stm32: determine all cpu types and use common examine
[openocd/jflash.git] / src / target / etb.c
blobbc38b3aa3bc5e7ed124094ea2dde5fd37f6cc64b
1 /***************************************************************************
2 * Copyright (C) 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
9 * *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
14 * *
15 * You should have received a copy of the GNU General Public License *
16 * along with this program; if not, write to the *
17 * Free Software Foundation, Inc., *
18 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
19 ***************************************************************************/
21 #ifdef HAVE_CONFIG_H
22 #include "config.h"
23 #endif
25 #include "arm.h"
26 #include "etm.h"
27 #include "etb.h"
28 #include "register.h"
30 static char *etb_reg_list[] = {
31 "ETB_identification",
32 "ETB_ram_depth",
33 "ETB_ram_width",
34 "ETB_status",
35 "ETB_ram_data",
36 "ETB_ram_read_pointer",
37 "ETB_ram_write_pointer",
38 "ETB_trigger_counter",
39 "ETB_control",
42 static int etb_get_reg(struct reg *reg);
44 static int etb_set_instr(struct etb *etb, uint32_t new_instr)
46 struct jtag_tap *tap;
48 tap = etb->tap;
49 if (tap == NULL)
50 return ERROR_FAIL;
52 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr) {
53 struct scan_field field;
55 field.num_bits = tap->ir_length;
56 void *t = calloc(DIV_ROUND_UP(field.num_bits, 8), 1);
57 field.out_value = t;
58 buf_set_u32(t, 0, field.num_bits, new_instr);
60 field.in_value = NULL;
62 jtag_add_ir_scan(tap, &field, TAP_IDLE);
64 free(t);
67 return ERROR_OK;
70 static int etb_scann(struct etb *etb, uint32_t new_scan_chain)
72 if (etb->cur_scan_chain != new_scan_chain) {
73 struct scan_field field;
75 field.num_bits = 5;
76 void *t = calloc(DIV_ROUND_UP(field.num_bits, 8), 1);
77 field.out_value = t;
78 buf_set_u32(t, 0, field.num_bits, new_scan_chain);
80 field.in_value = NULL;
82 /* select INTEST instruction */
83 etb_set_instr(etb, 0x2);
84 jtag_add_dr_scan(etb->tap, 1, &field, TAP_IDLE);
86 etb->cur_scan_chain = new_scan_chain;
88 free(t);
91 return ERROR_OK;
94 static int etb_read_reg_w_check(struct reg *, uint8_t *, uint8_t *);
95 static int etb_set_reg_w_exec(struct reg *, uint8_t *);
97 static int etb_read_reg(struct reg *reg)
99 return etb_read_reg_w_check(reg, NULL, NULL);
102 static int etb_get_reg(struct reg *reg)
104 int retval;
106 retval = etb_read_reg(reg);
107 if (retval != ERROR_OK) {
108 LOG_ERROR("BUG: error scheduling ETB register read");
109 return retval;
112 retval = jtag_execute_queue();
113 if (retval != ERROR_OK) {
114 LOG_ERROR("ETB register read failed");
115 return retval;
118 return ERROR_OK;
121 static const struct reg_arch_type etb_reg_type = {
122 .get = etb_get_reg,
123 .set = etb_set_reg_w_exec,
126 struct reg_cache *etb_build_reg_cache(struct etb *etb)
128 struct reg_cache *reg_cache = malloc(sizeof(struct reg_cache));
129 struct reg *reg_list = NULL;
130 struct etb_reg *arch_info = NULL;
131 int num_regs = 9;
132 int i;
134 /* the actual registers are kept in two arrays */
135 reg_list = calloc(num_regs, sizeof(struct reg));
136 arch_info = calloc(num_regs, sizeof(struct etb_reg));
138 /* fill in values for the reg cache */
139 reg_cache->name = "etb registers";
140 reg_cache->next = NULL;
141 reg_cache->reg_list = reg_list;
142 reg_cache->num_regs = num_regs;
144 /* set up registers */
145 for (i = 0; i < num_regs; i++) {
146 reg_list[i].name = etb_reg_list[i];
147 reg_list[i].size = 32;
148 reg_list[i].dirty = 0;
149 reg_list[i].valid = 0;
150 reg_list[i].value = calloc(1, 4);
151 reg_list[i].arch_info = &arch_info[i];
152 reg_list[i].type = &etb_reg_type;
153 reg_list[i].size = 32;
154 arch_info[i].addr = i;
155 arch_info[i].etb = etb;
158 return reg_cache;
161 static void etb_getbuf(jtag_callback_data_t arg)
163 uint8_t *in = (uint8_t *)arg;
165 *((uint32_t *)arg) = buf_get_u32(in, 0, 32);
169 static int etb_read_ram(struct etb *etb, uint32_t *data, int num_frames)
171 struct scan_field fields[3];
172 int i;
174 etb_scann(etb, 0x0);
175 etb_set_instr(etb, 0xc);
177 fields[0].num_bits = 32;
178 fields[0].out_value = NULL;
179 fields[0].in_value = NULL;
181 fields[1].num_bits = 7;
182 uint8_t temp1;
183 fields[1].out_value = &temp1;
184 buf_set_u32(&temp1, 0, 7, 4);
185 fields[1].in_value = NULL;
187 fields[2].num_bits = 1;
188 uint8_t temp2;
189 fields[2].out_value = &temp2;
190 buf_set_u32(&temp2, 0, 1, 0);
191 fields[2].in_value = NULL;
193 jtag_add_dr_scan(etb->tap, 3, fields, TAP_IDLE);
195 for (i = 0; i < num_frames; i++) {
196 /* ensure nR/W reamins set to read */
197 buf_set_u32(&temp2, 0, 1, 0);
199 /* address remains set to 0x4 (RAM data) until we read the last frame */
200 if (i < num_frames - 1)
201 buf_set_u32(&temp1, 0, 7, 4);
202 else
203 buf_set_u32(&temp1, 0, 7, 0);
205 fields[0].in_value = (uint8_t *)(data + i);
206 jtag_add_dr_scan(etb->tap, 3, fields, TAP_IDLE);
208 jtag_add_callback(etb_getbuf, (jtag_callback_data_t)(data + i));
211 jtag_execute_queue();
213 return ERROR_OK;
216 static int etb_read_reg_w_check(struct reg *reg,
217 uint8_t *check_value, uint8_t *check_mask)
219 struct etb_reg *etb_reg = reg->arch_info;
220 uint8_t reg_addr = etb_reg->addr & 0x7f;
221 struct scan_field fields[3];
223 LOG_DEBUG("%i", (int)(etb_reg->addr));
225 etb_scann(etb_reg->etb, 0x0);
226 etb_set_instr(etb_reg->etb, 0xc);
228 fields[0].num_bits = 32;
229 fields[0].out_value = reg->value;
230 fields[0].in_value = NULL;
231 fields[0].check_value = NULL;
232 fields[0].check_mask = NULL;
234 fields[1].num_bits = 7;
235 uint8_t temp1;
236 fields[1].out_value = &temp1;
237 buf_set_u32(&temp1, 0, 7, reg_addr);
238 fields[1].in_value = NULL;
239 fields[1].check_value = NULL;
240 fields[1].check_mask = NULL;
242 fields[2].num_bits = 1;
243 uint8_t temp2;
244 fields[2].out_value = &temp2;
245 buf_set_u32(&temp2, 0, 1, 0);
246 fields[2].in_value = NULL;
247 fields[2].check_value = NULL;
248 fields[2].check_mask = NULL;
250 jtag_add_dr_scan(etb_reg->etb->tap, 3, fields, TAP_IDLE);
252 /* read the identification register in the second run, to make sure we
253 * don't read the ETB data register twice, skipping every second entry
255 buf_set_u32(&temp1, 0, 7, 0x0);
256 fields[0].in_value = reg->value;
257 fields[0].check_value = check_value;
258 fields[0].check_mask = check_mask;
260 jtag_add_dr_scan_check(etb_reg->etb->tap, 3, fields, TAP_IDLE);
262 return ERROR_OK;
265 static int etb_write_reg(struct reg *, uint32_t);
267 static int etb_set_reg(struct reg *reg, uint32_t value)
269 int retval;
271 retval = etb_write_reg(reg, value);
272 if (retval != ERROR_OK) {
273 LOG_ERROR("BUG: error scheduling ETB register write");
274 return retval;
277 buf_set_u32(reg->value, 0, reg->size, value);
278 reg->valid = 1;
279 reg->dirty = 0;
281 return ERROR_OK;
284 static int etb_set_reg_w_exec(struct reg *reg, uint8_t *buf)
286 int retval;
288 etb_set_reg(reg, buf_get_u32(buf, 0, reg->size));
290 retval = jtag_execute_queue();
291 if (retval != ERROR_OK) {
292 LOG_ERROR("ETB: register write failed");
293 return retval;
295 return ERROR_OK;
298 static int etb_write_reg(struct reg *reg, uint32_t value)
300 struct etb_reg *etb_reg = reg->arch_info;
301 uint8_t reg_addr = etb_reg->addr & 0x7f;
302 struct scan_field fields[3];
304 LOG_DEBUG("%i: 0x%8.8" PRIx32 "", (int)(etb_reg->addr), value);
306 etb_scann(etb_reg->etb, 0x0);
307 etb_set_instr(etb_reg->etb, 0xc);
309 fields[0].num_bits = 32;
310 uint8_t temp0[4];
311 fields[0].out_value = temp0;
312 buf_set_u32(&temp0, 0, 32, value);
313 fields[0].in_value = NULL;
315 fields[1].num_bits = 7;
316 uint8_t temp1;
317 fields[1].out_value = &temp1;
318 buf_set_u32(&temp1, 0, 7, reg_addr);
319 fields[1].in_value = NULL;
321 fields[2].num_bits = 1;
322 uint8_t temp2;
323 fields[2].out_value = &temp2;
324 buf_set_u32(&temp2, 0, 1, 1);
325 fields[2].in_value = NULL;
327 jtag_add_dr_scan(etb_reg->etb->tap, 3, fields, TAP_IDLE);
329 return ERROR_OK;
332 COMMAND_HANDLER(handle_etb_config_command)
334 struct target *target;
335 struct jtag_tap *tap;
336 struct arm *arm;
338 if (CMD_ARGC != 2)
339 return ERROR_COMMAND_SYNTAX_ERROR;
341 target = get_target(CMD_ARGV[0]);
343 if (!target) {
344 LOG_ERROR("ETB: target '%s' not defined", CMD_ARGV[0]);
345 return ERROR_FAIL;
348 arm = target_to_arm(target);
349 if (!is_arm(arm)) {
350 command_print(CMD_CTX, "ETB: '%s' isn't an ARM", CMD_ARGV[0]);
351 return ERROR_FAIL;
354 tap = jtag_tap_by_string(CMD_ARGV[1]);
355 if (tap == NULL) {
356 command_print(CMD_CTX, "ETB: TAP %s does not exist", CMD_ARGV[1]);
357 return ERROR_FAIL;
360 if (arm->etm) {
361 struct etb *etb = malloc(sizeof(struct etb));
363 arm->etm->capture_driver_priv = etb;
365 etb->tap = tap;
366 etb->cur_scan_chain = 0xffffffff;
367 etb->reg_cache = NULL;
368 etb->ram_width = 0;
369 etb->ram_depth = 0;
370 } else {
371 LOG_ERROR("ETM: target has no ETM defined, ETB left unconfigured");
372 return ERROR_FAIL;
375 return ERROR_OK;
378 COMMAND_HANDLER(handle_etb_trigger_percent_command)
380 struct target *target;
381 struct arm *arm;
382 struct etm_context *etm;
383 struct etb *etb;
385 target = get_current_target(CMD_CTX);
386 arm = target_to_arm(target);
387 if (!is_arm(arm)) {
388 command_print(CMD_CTX, "ETB: current target isn't an ARM");
389 return ERROR_FAIL;
392 etm = arm->etm;
393 if (!etm) {
394 command_print(CMD_CTX, "ETB: target has no ETM configured");
395 return ERROR_FAIL;
397 if (etm->capture_driver != &etb_capture_driver) {
398 command_print(CMD_CTX, "ETB: target not using ETB");
399 return ERROR_FAIL;
401 etb = arm->etm->capture_driver_priv;
403 if (CMD_ARGC > 0) {
404 uint32_t new_value;
406 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], new_value);
407 if ((new_value < 2) || (new_value > 100))
408 command_print(CMD_CTX,
409 "valid percentages are 2%% to 100%%");
410 else
411 etb->trigger_percent = (unsigned) new_value;
414 command_print(CMD_CTX, "%d percent of tracebuffer fills after trigger",
415 etb->trigger_percent);
417 return ERROR_OK;
420 static const struct command_registration etb_config_command_handlers[] = {
422 /* NOTE: with ADIv5, ETBs are accessed using DAP operations,
423 * possibly over SWD, not through separate TAPs...
425 .name = "config",
426 .handler = handle_etb_config_command,
427 .mode = COMMAND_CONFIG,
428 .help = "Associate ETB with target and JTAG TAP.",
429 .usage = "target tap",
432 .name = "trigger_percent",
433 .handler = handle_etb_trigger_percent_command,
434 .mode = COMMAND_EXEC,
435 .help = "Set percent of trace buffer to be filled "
436 "after the trigger occurs (2..100).",
437 .usage = "[percent]",
439 COMMAND_REGISTRATION_DONE
441 static const struct command_registration etb_command_handlers[] = {
443 .name = "etb",
444 .mode = COMMAND_ANY,
445 .help = "Emebdded Trace Buffer command group",
446 .chain = etb_config_command_handlers,
448 COMMAND_REGISTRATION_DONE
451 static int etb_init(struct etm_context *etm_ctx)
453 struct etb *etb = etm_ctx->capture_driver_priv;
455 etb->etm_ctx = etm_ctx;
457 /* identify ETB RAM depth and width */
458 etb_read_reg(&etb->reg_cache->reg_list[ETB_RAM_DEPTH]);
459 etb_read_reg(&etb->reg_cache->reg_list[ETB_RAM_WIDTH]);
460 jtag_execute_queue();
462 etb->ram_depth = buf_get_u32(etb->reg_cache->reg_list[ETB_RAM_DEPTH].value, 0, 32);
463 etb->ram_width = buf_get_u32(etb->reg_cache->reg_list[ETB_RAM_WIDTH].value, 0, 32);
465 etb->trigger_percent = 50;
467 return ERROR_OK;
470 static trace_status_t etb_status(struct etm_context *etm_ctx)
472 struct etb *etb = etm_ctx->capture_driver_priv;
473 struct reg *control = &etb->reg_cache->reg_list[ETB_CTRL];
474 struct reg *status = &etb->reg_cache->reg_list[ETB_STATUS];
475 trace_status_t retval = 0;
476 int etb_timeout = 100;
478 etb->etm_ctx = etm_ctx;
480 /* read control and status registers */
481 etb_read_reg(control);
482 etb_read_reg(status);
483 jtag_execute_queue();
485 /* See if it's (still) active */
486 retval = buf_get_u32(control->value, 0, 1) ? TRACE_RUNNING : TRACE_IDLE;
488 /* check Full bit to identify wraparound/overflow */
489 if (buf_get_u32(status->value, 0, 1) == 1)
490 retval |= TRACE_OVERFLOWED;
492 /* check Triggered bit to identify trigger condition */
493 if (buf_get_u32(status->value, 1, 1) == 1)
494 retval |= TRACE_TRIGGERED;
496 /* check AcqComp to see if trigger counter dropped to zero */
497 if (buf_get_u32(status->value, 2, 1) == 1) {
498 /* wait for DFEmpty */
499 while (etb_timeout-- && buf_get_u32(status->value, 3, 1) == 0)
500 etb_get_reg(status);
502 if (etb_timeout == 0)
503 LOG_ERROR("ETB: DFEmpty won't go high, status 0x%02x",
504 (unsigned) buf_get_u32(status->value, 0, 4));
506 if (!(etm_ctx->capture_status & TRACE_TRIGGERED))
507 LOG_WARNING("ETB: trace complete without triggering?");
509 retval |= TRACE_COMPLETED;
512 /* NOTE: using a trigger is optional; and at least ETB11 has a mode
513 * where it can ignore the trigger counter.
516 /* update recorded state */
517 etm_ctx->capture_status = retval;
519 return retval;
522 static int etb_read_trace(struct etm_context *etm_ctx)
524 struct etb *etb = etm_ctx->capture_driver_priv;
525 int first_frame = 0;
526 int num_frames = etb->ram_depth;
527 uint32_t *trace_data = NULL;
528 int i, j;
530 etb_read_reg(&etb->reg_cache->reg_list[ETB_STATUS]);
531 etb_read_reg(&etb->reg_cache->reg_list[ETB_RAM_WRITE_POINTER]);
532 jtag_execute_queue();
534 /* check if we overflowed, and adjust first frame of the trace accordingly
535 * if we didn't overflow, read only up to the frame that would be written next,
536 * i.e. don't read invalid entries
538 if (buf_get_u32(etb->reg_cache->reg_list[ETB_STATUS].value, 0, 1))
539 first_frame = buf_get_u32(etb->reg_cache->reg_list[ETB_RAM_WRITE_POINTER].value,
541 32);
542 else
543 num_frames = buf_get_u32(etb->reg_cache->reg_list[ETB_RAM_WRITE_POINTER].value,
545 32);
547 etb_write_reg(&etb->reg_cache->reg_list[ETB_RAM_READ_POINTER], first_frame);
549 /* read data into temporary array for unpacking */
550 trace_data = malloc(sizeof(uint32_t) * num_frames);
551 etb_read_ram(etb, trace_data, num_frames);
553 if (etm_ctx->trace_depth > 0)
554 free(etm_ctx->trace_data);
556 if ((etm_ctx->control & ETM_PORT_WIDTH_MASK) == ETM_PORT_4BIT)
557 etm_ctx->trace_depth = num_frames * 3;
558 else if ((etm_ctx->control & ETM_PORT_WIDTH_MASK) == ETM_PORT_8BIT)
559 etm_ctx->trace_depth = num_frames * 2;
560 else
561 etm_ctx->trace_depth = num_frames;
563 etm_ctx->trace_data = malloc(sizeof(struct etmv1_trace_data) * etm_ctx->trace_depth);
565 for (i = 0, j = 0; i < num_frames; i++) {
566 if ((etm_ctx->control & ETM_PORT_WIDTH_MASK) == ETM_PORT_4BIT) {
567 /* trace word j */
568 etm_ctx->trace_data[j].pipestat = trace_data[i] & 0x7;
569 etm_ctx->trace_data[j].packet = (trace_data[i] & 0x78) >> 3;
570 etm_ctx->trace_data[j].flags = 0;
571 if ((trace_data[i] & 0x80) >> 7)
572 etm_ctx->trace_data[j].flags |= ETMV1_TRACESYNC_CYCLE;
573 if (etm_ctx->trace_data[j].pipestat == STAT_TR) {
574 etm_ctx->trace_data[j].pipestat = etm_ctx->trace_data[j].packet &
575 0x7;
576 etm_ctx->trace_data[j].flags |= ETMV1_TRIGGER_CYCLE;
579 /* trace word j + 1 */
580 etm_ctx->trace_data[j + 1].pipestat = (trace_data[i] & 0x100) >> 8;
581 etm_ctx->trace_data[j + 1].packet = (trace_data[i] & 0x7800) >> 11;
582 etm_ctx->trace_data[j + 1].flags = 0;
583 if ((trace_data[i] & 0x8000) >> 15)
584 etm_ctx->trace_data[j + 1].flags |= ETMV1_TRACESYNC_CYCLE;
585 if (etm_ctx->trace_data[j + 1].pipestat == STAT_TR) {
586 etm_ctx->trace_data[j +
587 1].pipestat = etm_ctx->trace_data[j + 1].packet & 0x7;
588 etm_ctx->trace_data[j + 1].flags |= ETMV1_TRIGGER_CYCLE;
591 /* trace word j + 2 */
592 etm_ctx->trace_data[j + 2].pipestat = (trace_data[i] & 0x10000) >> 16;
593 etm_ctx->trace_data[j + 2].packet = (trace_data[i] & 0x780000) >> 19;
594 etm_ctx->trace_data[j + 2].flags = 0;
595 if ((trace_data[i] & 0x800000) >> 23)
596 etm_ctx->trace_data[j + 2].flags |= ETMV1_TRACESYNC_CYCLE;
597 if (etm_ctx->trace_data[j + 2].pipestat == STAT_TR) {
598 etm_ctx->trace_data[j +
599 2].pipestat = etm_ctx->trace_data[j + 2].packet & 0x7;
600 etm_ctx->trace_data[j + 2].flags |= ETMV1_TRIGGER_CYCLE;
603 j += 3;
604 } else if ((etm_ctx->control & ETM_PORT_WIDTH_MASK) == ETM_PORT_8BIT) {
605 /* trace word j */
606 etm_ctx->trace_data[j].pipestat = trace_data[i] & 0x7;
607 etm_ctx->trace_data[j].packet = (trace_data[i] & 0x7f8) >> 3;
608 etm_ctx->trace_data[j].flags = 0;
609 if ((trace_data[i] & 0x800) >> 11)
610 etm_ctx->trace_data[j].flags |= ETMV1_TRACESYNC_CYCLE;
611 if (etm_ctx->trace_data[j].pipestat == STAT_TR) {
612 etm_ctx->trace_data[j].pipestat = etm_ctx->trace_data[j].packet &
613 0x7;
614 etm_ctx->trace_data[j].flags |= ETMV1_TRIGGER_CYCLE;
617 /* trace word j + 1 */
618 etm_ctx->trace_data[j + 1].pipestat = (trace_data[i] & 0x7000) >> 12;
619 etm_ctx->trace_data[j + 1].packet = (trace_data[i] & 0x7f8000) >> 15;
620 etm_ctx->trace_data[j + 1].flags = 0;
621 if ((trace_data[i] & 0x800000) >> 23)
622 etm_ctx->trace_data[j + 1].flags |= ETMV1_TRACESYNC_CYCLE;
623 if (etm_ctx->trace_data[j + 1].pipestat == STAT_TR) {
624 etm_ctx->trace_data[j +
625 1].pipestat = etm_ctx->trace_data[j + 1].packet & 0x7;
626 etm_ctx->trace_data[j + 1].flags |= ETMV1_TRIGGER_CYCLE;
629 j += 2;
630 } else {
631 /* trace word j */
632 etm_ctx->trace_data[j].pipestat = trace_data[i] & 0x7;
633 etm_ctx->trace_data[j].packet = (trace_data[i] & 0x7fff8) >> 3;
634 etm_ctx->trace_data[j].flags = 0;
635 if ((trace_data[i] & 0x80000) >> 19)
636 etm_ctx->trace_data[j].flags |= ETMV1_TRACESYNC_CYCLE;
637 if (etm_ctx->trace_data[j].pipestat == STAT_TR) {
638 etm_ctx->trace_data[j].pipestat = etm_ctx->trace_data[j].packet &
639 0x7;
640 etm_ctx->trace_data[j].flags |= ETMV1_TRIGGER_CYCLE;
643 j += 1;
647 free(trace_data);
649 return ERROR_OK;
652 static int etb_start_capture(struct etm_context *etm_ctx)
654 struct etb *etb = etm_ctx->capture_driver_priv;
655 uint32_t etb_ctrl_value = 0x1;
656 uint32_t trigger_count;
658 if ((etm_ctx->control & ETM_PORT_MODE_MASK) == ETM_PORT_DEMUXED) {
659 if ((etm_ctx->control & ETM_PORT_WIDTH_MASK) != ETM_PORT_8BIT) {
660 LOG_ERROR("ETB can't run in demultiplexed mode with a 4 or 16 bit port");
661 return ERROR_ETM_PORTMODE_NOT_SUPPORTED;
663 etb_ctrl_value |= 0x2;
666 if ((etm_ctx->control & ETM_PORT_MODE_MASK) == ETM_PORT_MUXED) {
667 LOG_ERROR("ETB: can't run in multiplexed mode");
668 return ERROR_ETM_PORTMODE_NOT_SUPPORTED;
671 trigger_count = (etb->ram_depth * etb->trigger_percent) / 100;
673 etb_write_reg(&etb->reg_cache->reg_list[ETB_TRIGGER_COUNTER], trigger_count);
674 etb_write_reg(&etb->reg_cache->reg_list[ETB_RAM_WRITE_POINTER], 0x0);
675 etb_write_reg(&etb->reg_cache->reg_list[ETB_CTRL], etb_ctrl_value);
676 jtag_execute_queue();
678 /* we're starting a new trace, initialize capture status */
679 etm_ctx->capture_status = TRACE_RUNNING;
681 return ERROR_OK;
684 static int etb_stop_capture(struct etm_context *etm_ctx)
686 struct etb *etb = etm_ctx->capture_driver_priv;
687 struct reg *etb_ctrl_reg = &etb->reg_cache->reg_list[ETB_CTRL];
689 etb_write_reg(etb_ctrl_reg, 0x0);
690 jtag_execute_queue();
692 /* trace stopped, just clear running flag, but preserve others */
693 etm_ctx->capture_status &= ~TRACE_RUNNING;
695 return ERROR_OK;
698 struct etm_capture_driver etb_capture_driver = {
699 .name = "etb",
700 .commands = etb_command_handlers,
701 .init = etb_init,
702 .status = etb_status,
703 .start_capture = etb_start_capture,
704 .stop_capture = etb_stop_capture,
705 .read_trace = etb_read_trace,