jtag: cut down on usage of unintended modification of global end state
[openocd/cortex.git] / src / target / etb.c
blob99710700ec4eaabbe924f3a0bb7e2bd3541fb25a
1 /***************************************************************************
2 * Copyright (C) 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
9 * *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
14 * *
15 * You should have received a copy of the GNU General Public License *
16 * along with this program; if not, write to the *
17 * Free Software Foundation, Inc., *
18 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
19 ***************************************************************************/
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
24 #include "arm.h"
25 #include "etm.h"
26 #include "etb.h"
27 #include "register.h"
30 static char* etb_reg_list[] =
32 "ETB_identification",
33 "ETB_ram_depth",
34 "ETB_ram_width",
35 "ETB_status",
36 "ETB_ram_data",
37 "ETB_ram_read_pointer",
38 "ETB_ram_write_pointer",
39 "ETB_trigger_counter",
40 "ETB_control",
43 static int etb_get_reg(struct reg *reg);
45 static int etb_set_instr(struct etb *etb, uint32_t new_instr)
47 struct jtag_tap *tap;
49 tap = etb->tap;
50 if (tap == NULL)
51 return ERROR_FAIL;
53 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
55 struct scan_field field;
57 field.num_bits = tap->ir_length;
58 field.out_value = calloc(DIV_ROUND_UP(field.num_bits, 8), 1);
59 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
61 field.in_value = NULL;
63 jtag_add_ir_scan(tap, &field, jtag_get_end_state());
65 free(field.out_value);
68 return ERROR_OK;
71 static int etb_scann(struct etb *etb, uint32_t new_scan_chain)
73 if (etb->cur_scan_chain != new_scan_chain)
75 struct scan_field field;
77 field.num_bits = 5;
78 field.out_value = calloc(DIV_ROUND_UP(field.num_bits, 8), 1);
79 buf_set_u32(field.out_value, 0, field.num_bits, new_scan_chain);
81 field.in_value = NULL;
83 /* select INTEST instruction */
84 etb_set_instr(etb, 0x2);
85 jtag_add_dr_scan(etb->tap, 1, &field, jtag_get_end_state());
87 etb->cur_scan_chain = new_scan_chain;
89 free(field.out_value);
92 return ERROR_OK;
95 static int etb_read_reg_w_check(struct reg *, uint8_t *, uint8_t *);
96 static int etb_set_reg_w_exec(struct reg *, uint8_t *);
98 static int etb_read_reg(struct reg *reg)
100 return etb_read_reg_w_check(reg, NULL, NULL);
103 static int etb_get_reg(struct reg *reg)
105 int retval;
107 if ((retval = etb_read_reg(reg)) != ERROR_OK)
109 LOG_ERROR("BUG: error scheduling ETB register read");
110 return retval;
113 if ((retval = jtag_execute_queue()) != ERROR_OK)
115 LOG_ERROR("ETB register read failed");
116 return retval;
119 return ERROR_OK;
122 static const struct reg_arch_type etb_reg_type = {
123 .get = etb_get_reg,
124 .set = etb_set_reg_w_exec,
127 struct reg_cache* etb_build_reg_cache(struct etb *etb)
129 struct reg_cache *reg_cache = malloc(sizeof(struct reg_cache));
130 struct reg *reg_list = NULL;
131 struct etb_reg *arch_info = NULL;
132 int num_regs = 9;
133 int i;
135 /* the actual registers are kept in two arrays */
136 reg_list = calloc(num_regs, sizeof(struct reg));
137 arch_info = calloc(num_regs, sizeof(struct etb_reg));
139 /* fill in values for the reg cache */
140 reg_cache->name = "etb registers";
141 reg_cache->next = NULL;
142 reg_cache->reg_list = reg_list;
143 reg_cache->num_regs = num_regs;
145 /* set up registers */
146 for (i = 0; i < num_regs; i++)
148 reg_list[i].name = etb_reg_list[i];
149 reg_list[i].size = 32;
150 reg_list[i].dirty = 0;
151 reg_list[i].valid = 0;
152 reg_list[i].value = calloc(1, 4);
153 reg_list[i].arch_info = &arch_info[i];
154 reg_list[i].type = &etb_reg_type;
155 reg_list[i].size = 32;
156 arch_info[i].addr = i;
157 arch_info[i].etb = etb;
160 return reg_cache;
163 static void etb_getbuf(jtag_callback_data_t arg)
165 uint8_t *in = (uint8_t *)arg;
167 *((uint32_t *)in) = buf_get_u32(in, 0, 32);
171 static int etb_read_ram(struct etb *etb, uint32_t *data, int num_frames)
173 struct scan_field fields[3];
174 int i;
176 jtag_set_end_state(TAP_IDLE);
177 etb_scann(etb, 0x0);
178 etb_set_instr(etb, 0xc);
180 fields[0].num_bits = 32;
181 fields[0].out_value = NULL;
182 fields[0].in_value = NULL;
184 fields[1].num_bits = 7;
185 fields[1].out_value = malloc(1);
186 buf_set_u32(fields[1].out_value, 0, 7, 4);
187 fields[1].in_value = NULL;
189 fields[2].num_bits = 1;
190 fields[2].out_value = malloc(1);
191 buf_set_u32(fields[2].out_value, 0, 1, 0);
192 fields[2].in_value = NULL;
194 jtag_add_dr_scan(etb->tap, 3, fields, TAP_IDLE);
196 for (i = 0; i < num_frames; i++)
198 /* ensure nR/W reamins set to read */
199 buf_set_u32(fields[2].out_value, 0, 1, 0);
201 /* address remains set to 0x4 (RAM data) until we read the last frame */
202 if (i < num_frames - 1)
203 buf_set_u32(fields[1].out_value, 0, 7, 4);
204 else
205 buf_set_u32(fields[1].out_value, 0, 7, 0);
207 fields[0].in_value = (uint8_t *)(data + i);
208 jtag_add_dr_scan(etb->tap, 3, fields, TAP_IDLE);
210 jtag_add_callback(etb_getbuf, (jtag_callback_data_t)(data + i));
213 jtag_execute_queue();
215 free(fields[1].out_value);
216 free(fields[2].out_value);
218 return ERROR_OK;
221 static int etb_read_reg_w_check(struct reg *reg,
222 uint8_t* check_value, uint8_t* check_mask)
224 struct etb_reg *etb_reg = reg->arch_info;
225 uint8_t reg_addr = etb_reg->addr & 0x7f;
226 struct scan_field fields[3];
228 LOG_DEBUG("%i", (int)(etb_reg->addr));
230 jtag_set_end_state(TAP_IDLE);
231 etb_scann(etb_reg->etb, 0x0);
232 etb_set_instr(etb_reg->etb, 0xc);
234 fields[0].num_bits = 32;
235 fields[0].out_value = reg->value;
236 fields[0].in_value = NULL;
237 fields[0].check_value = NULL;
238 fields[0].check_mask = NULL;
240 fields[1].num_bits = 7;
241 fields[1].out_value = malloc(1);
242 buf_set_u32(fields[1].out_value, 0, 7, reg_addr);
243 fields[1].in_value = NULL;
244 fields[1].check_value = NULL;
245 fields[1].check_mask = NULL;
247 fields[2].num_bits = 1;
248 fields[2].out_value = malloc(1);
249 buf_set_u32(fields[2].out_value, 0, 1, 0);
250 fields[2].in_value = NULL;
251 fields[2].check_value = NULL;
252 fields[2].check_mask = NULL;
254 jtag_add_dr_scan(etb_reg->etb->tap, 3, fields, TAP_IDLE);
256 /* read the identification register in the second run, to make sure we
257 * don't read the ETB data register twice, skipping every second entry
259 buf_set_u32(fields[1].out_value, 0, 7, 0x0);
260 fields[0].in_value = reg->value;
261 fields[0].check_value = check_value;
262 fields[0].check_mask = check_mask;
264 jtag_add_dr_scan_check(etb_reg->etb->tap, 3, fields, TAP_IDLE);
266 free(fields[1].out_value);
267 free(fields[2].out_value);
269 return ERROR_OK;
272 static int etb_write_reg(struct reg *, uint32_t);
274 static int etb_set_reg(struct reg *reg, uint32_t value)
276 int retval;
278 if ((retval = etb_write_reg(reg, value)) != ERROR_OK)
280 LOG_ERROR("BUG: error scheduling ETB register write");
281 return retval;
284 buf_set_u32(reg->value, 0, reg->size, value);
285 reg->valid = 1;
286 reg->dirty = 0;
288 return ERROR_OK;
291 static int etb_set_reg_w_exec(struct reg *reg, uint8_t *buf)
293 int retval;
295 etb_set_reg(reg, buf_get_u32(buf, 0, reg->size));
297 if ((retval = jtag_execute_queue()) != ERROR_OK)
299 LOG_ERROR("ETB: register write failed");
300 return retval;
302 return ERROR_OK;
305 static int etb_write_reg(struct reg *reg, uint32_t value)
307 struct etb_reg *etb_reg = reg->arch_info;
308 uint8_t reg_addr = etb_reg->addr & 0x7f;
309 struct scan_field fields[3];
311 LOG_DEBUG("%i: 0x%8.8" PRIx32 "", (int)(etb_reg->addr), value);
313 jtag_set_end_state(TAP_IDLE);
314 etb_scann(etb_reg->etb, 0x0);
315 etb_set_instr(etb_reg->etb, 0xc);
317 fields[0].num_bits = 32;
318 fields[0].out_value = malloc(4);
319 buf_set_u32(fields[0].out_value, 0, 32, value);
320 fields[0].in_value = NULL;
322 fields[1].num_bits = 7;
323 fields[1].out_value = malloc(1);
324 buf_set_u32(fields[1].out_value, 0, 7, reg_addr);
325 fields[1].in_value = NULL;
327 fields[2].num_bits = 1;
328 fields[2].out_value = malloc(1);
329 buf_set_u32(fields[2].out_value, 0, 1, 1);
331 fields[2].in_value = NULL;
333 free(fields[0].out_value);
334 free(fields[1].out_value);
335 free(fields[2].out_value);
337 return ERROR_OK;
340 COMMAND_HANDLER(handle_etb_config_command)
342 struct target *target;
343 struct jtag_tap *tap;
344 struct arm *arm;
346 if (CMD_ARGC != 2)
348 return ERROR_COMMAND_SYNTAX_ERROR;
351 target = get_target(CMD_ARGV[0]);
353 if (!target)
355 LOG_ERROR("ETB: target '%s' not defined", CMD_ARGV[0]);
356 return ERROR_FAIL;
359 arm = target_to_arm(target);
360 if (!is_arm(arm))
362 command_print(CMD_CTX, "ETB: '%s' isn't an ARM", CMD_ARGV[0]);
363 return ERROR_FAIL;
366 tap = jtag_tap_by_string(CMD_ARGV[1]);
367 if (tap == NULL)
369 command_print(CMD_CTX, "ETB: TAP %s does not exist", CMD_ARGV[1]);
370 return ERROR_FAIL;
373 if (arm->etm)
375 struct etb *etb = malloc(sizeof(struct etb));
377 arm->etm->capture_driver_priv = etb;
379 etb->tap = tap;
380 etb->cur_scan_chain = 0xffffffff;
381 etb->reg_cache = NULL;
382 etb->ram_width = 0;
383 etb->ram_depth = 0;
385 else
387 LOG_ERROR("ETM: target has no ETM defined, ETB left unconfigured");
388 return ERROR_FAIL;
391 return ERROR_OK;
394 COMMAND_HANDLER(handle_etb_trigger_percent_command)
396 struct target *target;
397 struct arm *arm;
398 struct etm_context *etm;
399 struct etb *etb;
401 target = get_current_target(CMD_CTX);
402 arm = target_to_arm(target);
403 if (!is_arm(arm))
405 command_print(CMD_CTX, "ETB: current target isn't an ARM");
406 return ERROR_FAIL;
409 etm = arm->etm;
410 if (!etm) {
411 command_print(CMD_CTX, "ETB: target has no ETM configured");
412 return ERROR_FAIL;
414 if (etm->capture_driver != &etb_capture_driver) {
415 command_print(CMD_CTX, "ETB: target not using ETB");
416 return ERROR_FAIL;
418 etb = arm->etm->capture_driver_priv;
420 if (CMD_ARGC > 0) {
421 uint32_t new_value;
423 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], new_value);
424 if ((new_value < 2) || (new_value > 100))
425 command_print(CMD_CTX,
426 "valid percentages are 2%% to 100%%");
427 else
428 etb->trigger_percent = (unsigned) new_value;
431 command_print(CMD_CTX, "%d percent of tracebuffer fills after trigger",
432 etb->trigger_percent);
434 return ERROR_OK;
437 static const struct command_registration etb_config_command_handlers[] = {
439 /* NOTE: with ADIv5, ETBs are accessed using DAP operations,
440 * possibly over SWD, not through separate TAPs...
442 .name = "config",
443 .handler = handle_etb_config_command,
444 .mode = COMMAND_CONFIG,
445 .help = "Associate ETB with target and JTAG TAP.",
446 .usage = "target tap",
449 .name = "trigger_percent",
450 .handler = handle_etb_trigger_percent_command,
451 .mode = COMMAND_EXEC,
452 .help = "Set percent of trace buffer to be filled "
453 "after the trigger occurs (2..100).",
454 .usage = "[percent]",
456 COMMAND_REGISTRATION_DONE
458 static const struct command_registration etb_command_handlers[] = {
460 .name = "etb",
461 .mode = COMMAND_ANY,
462 .help = "Emebdded Trace Buffer command group",
463 .chain = etb_config_command_handlers,
465 COMMAND_REGISTRATION_DONE
468 static int etb_init(struct etm_context *etm_ctx)
470 struct etb *etb = etm_ctx->capture_driver_priv;
472 etb->etm_ctx = etm_ctx;
474 /* identify ETB RAM depth and width */
475 etb_read_reg(&etb->reg_cache->reg_list[ETB_RAM_DEPTH]);
476 etb_read_reg(&etb->reg_cache->reg_list[ETB_RAM_WIDTH]);
477 jtag_execute_queue();
479 etb->ram_depth = buf_get_u32(etb->reg_cache->reg_list[ETB_RAM_DEPTH].value, 0, 32);
480 etb->ram_width = buf_get_u32(etb->reg_cache->reg_list[ETB_RAM_WIDTH].value, 0, 32);
482 etb->trigger_percent = 50;
484 return ERROR_OK;
487 static trace_status_t etb_status(struct etm_context *etm_ctx)
489 struct etb *etb = etm_ctx->capture_driver_priv;
490 struct reg *control = &etb->reg_cache->reg_list[ETB_CTRL];
491 struct reg *status = &etb->reg_cache->reg_list[ETB_STATUS];
492 trace_status_t retval = 0;
493 int etb_timeout = 100;
495 etb->etm_ctx = etm_ctx;
497 /* read control and status registers */
498 etb_read_reg(control);
499 etb_read_reg(status);
500 jtag_execute_queue();
502 /* See if it's (still) active */
503 retval = buf_get_u32(control->value, 0, 1) ? TRACE_RUNNING : TRACE_IDLE;
505 /* check Full bit to identify wraparound/overflow */
506 if (buf_get_u32(status->value, 0, 1) == 1)
507 retval |= TRACE_OVERFLOWED;
509 /* check Triggered bit to identify trigger condition */
510 if (buf_get_u32(status->value, 1, 1) == 1)
511 retval |= TRACE_TRIGGERED;
513 /* check AcqComp to see if trigger counter dropped to zero */
514 if (buf_get_u32(status->value, 2, 1) == 1) {
515 /* wait for DFEmpty */
516 while (etb_timeout-- && buf_get_u32(status->value, 3, 1) == 0)
517 etb_get_reg(status);
519 if (etb_timeout == 0)
520 LOG_ERROR("ETB: DFEmpty won't go high, status 0x%02x",
521 (unsigned) buf_get_u32(status->value, 0, 4));
523 if (!(etm_ctx->capture_status & TRACE_TRIGGERED))
524 LOG_WARNING("ETB: trace complete without triggering?");
526 retval |= TRACE_COMPLETED;
529 /* NOTE: using a trigger is optional; and at least ETB11 has a mode
530 * where it can ignore the trigger counter.
533 /* update recorded state */
534 etm_ctx->capture_status = retval;
536 return retval;
539 static int etb_read_trace(struct etm_context *etm_ctx)
541 struct etb *etb = etm_ctx->capture_driver_priv;
542 int first_frame = 0;
543 int num_frames = etb->ram_depth;
544 uint32_t *trace_data = NULL;
545 int i, j;
547 etb_read_reg(&etb->reg_cache->reg_list[ETB_STATUS]);
548 etb_read_reg(&etb->reg_cache->reg_list[ETB_RAM_WRITE_POINTER]);
549 jtag_execute_queue();
551 /* check if we overflowed, and adjust first frame of the trace accordingly
552 * if we didn't overflow, read only up to the frame that would be written next,
553 * i.e. don't read invalid entries
555 if (buf_get_u32(etb->reg_cache->reg_list[ETB_STATUS].value, 0, 1))
557 first_frame = buf_get_u32(etb->reg_cache->reg_list[ETB_RAM_WRITE_POINTER].value, 0, 32);
559 else
561 num_frames = buf_get_u32(etb->reg_cache->reg_list[ETB_RAM_WRITE_POINTER].value, 0, 32);
564 etb_write_reg(&etb->reg_cache->reg_list[ETB_RAM_READ_POINTER], first_frame);
566 /* read data into temporary array for unpacking */
567 trace_data = malloc(sizeof(uint32_t) * num_frames);
568 etb_read_ram(etb, trace_data, num_frames);
570 if (etm_ctx->trace_depth > 0)
572 free(etm_ctx->trace_data);
575 if ((etm_ctx->control & ETM_PORT_WIDTH_MASK) == ETM_PORT_4BIT)
576 etm_ctx->trace_depth = num_frames * 3;
577 else if ((etm_ctx->control & ETM_PORT_WIDTH_MASK) == ETM_PORT_8BIT)
578 etm_ctx->trace_depth = num_frames * 2;
579 else
580 etm_ctx->trace_depth = num_frames;
582 etm_ctx->trace_data = malloc(sizeof(struct etmv1_trace_data) * etm_ctx->trace_depth);
584 for (i = 0, j = 0; i < num_frames; i++)
586 if ((etm_ctx->control & ETM_PORT_WIDTH_MASK) == ETM_PORT_4BIT)
588 /* trace word j */
589 etm_ctx->trace_data[j].pipestat = trace_data[i] & 0x7;
590 etm_ctx->trace_data[j].packet = (trace_data[i] & 0x78) >> 3;
591 etm_ctx->trace_data[j].flags = 0;
592 if ((trace_data[i] & 0x80) >> 7)
594 etm_ctx->trace_data[j].flags |= ETMV1_TRACESYNC_CYCLE;
596 if (etm_ctx->trace_data[j].pipestat == STAT_TR)
598 etm_ctx->trace_data[j].pipestat = etm_ctx->trace_data[j].packet & 0x7;
599 etm_ctx->trace_data[j].flags |= ETMV1_TRIGGER_CYCLE;
602 /* trace word j + 1 */
603 etm_ctx->trace_data[j + 1].pipestat = (trace_data[i] & 0x100) >> 8;
604 etm_ctx->trace_data[j + 1].packet = (trace_data[i] & 0x7800) >> 11;
605 etm_ctx->trace_data[j + 1].flags = 0;
606 if ((trace_data[i] & 0x8000) >> 15)
608 etm_ctx->trace_data[j + 1].flags |= ETMV1_TRACESYNC_CYCLE;
610 if (etm_ctx->trace_data[j + 1].pipestat == STAT_TR)
612 etm_ctx->trace_data[j + 1].pipestat = etm_ctx->trace_data[j + 1].packet & 0x7;
613 etm_ctx->trace_data[j + 1].flags |= ETMV1_TRIGGER_CYCLE;
616 /* trace word j + 2 */
617 etm_ctx->trace_data[j + 2].pipestat = (trace_data[i] & 0x10000) >> 16;
618 etm_ctx->trace_data[j + 2].packet = (trace_data[i] & 0x780000) >> 19;
619 etm_ctx->trace_data[j + 2].flags = 0;
620 if ((trace_data[i] & 0x800000) >> 23)
622 etm_ctx->trace_data[j + 2].flags |= ETMV1_TRACESYNC_CYCLE;
624 if (etm_ctx->trace_data[j + 2].pipestat == STAT_TR)
626 etm_ctx->trace_data[j + 2].pipestat = etm_ctx->trace_data[j + 2].packet & 0x7;
627 etm_ctx->trace_data[j + 2].flags |= ETMV1_TRIGGER_CYCLE;
630 j += 3;
632 else if ((etm_ctx->control & ETM_PORT_WIDTH_MASK) == ETM_PORT_8BIT)
634 /* trace word j */
635 etm_ctx->trace_data[j].pipestat = trace_data[i] & 0x7;
636 etm_ctx->trace_data[j].packet = (trace_data[i] & 0x7f8) >> 3;
637 etm_ctx->trace_data[j].flags = 0;
638 if ((trace_data[i] & 0x800) >> 11)
640 etm_ctx->trace_data[j].flags |= ETMV1_TRACESYNC_CYCLE;
642 if (etm_ctx->trace_data[j].pipestat == STAT_TR)
644 etm_ctx->trace_data[j].pipestat = etm_ctx->trace_data[j].packet & 0x7;
645 etm_ctx->trace_data[j].flags |= ETMV1_TRIGGER_CYCLE;
648 /* trace word j + 1 */
649 etm_ctx->trace_data[j + 1].pipestat = (trace_data[i] & 0x7000) >> 12;
650 etm_ctx->trace_data[j + 1].packet = (trace_data[i] & 0x7f8000) >> 15;
651 etm_ctx->trace_data[j + 1].flags = 0;
652 if ((trace_data[i] & 0x800000) >> 23)
654 etm_ctx->trace_data[j + 1].flags |= ETMV1_TRACESYNC_CYCLE;
656 if (etm_ctx->trace_data[j + 1].pipestat == STAT_TR)
658 etm_ctx->trace_data[j + 1].pipestat = etm_ctx->trace_data[j + 1].packet & 0x7;
659 etm_ctx->trace_data[j + 1].flags |= ETMV1_TRIGGER_CYCLE;
662 j += 2;
664 else
666 /* trace word j */
667 etm_ctx->trace_data[j].pipestat = trace_data[i] & 0x7;
668 etm_ctx->trace_data[j].packet = (trace_data[i] & 0x7fff8) >> 3;
669 etm_ctx->trace_data[j].flags = 0;
670 if ((trace_data[i] & 0x80000) >> 19)
672 etm_ctx->trace_data[j].flags |= ETMV1_TRACESYNC_CYCLE;
674 if (etm_ctx->trace_data[j].pipestat == STAT_TR)
676 etm_ctx->trace_data[j].pipestat = etm_ctx->trace_data[j].packet & 0x7;
677 etm_ctx->trace_data[j].flags |= ETMV1_TRIGGER_CYCLE;
680 j += 1;
684 free(trace_data);
686 return ERROR_OK;
689 static int etb_start_capture(struct etm_context *etm_ctx)
691 struct etb *etb = etm_ctx->capture_driver_priv;
692 uint32_t etb_ctrl_value = 0x1;
693 uint32_t trigger_count;
695 if ((etm_ctx->control & ETM_PORT_MODE_MASK) == ETM_PORT_DEMUXED)
697 if ((etm_ctx->control & ETM_PORT_WIDTH_MASK) != ETM_PORT_8BIT)
699 LOG_ERROR("ETB can't run in demultiplexed mode with a 4 or 16 bit port");
700 return ERROR_ETM_PORTMODE_NOT_SUPPORTED;
702 etb_ctrl_value |= 0x2;
705 if ((etm_ctx->control & ETM_PORT_MODE_MASK) == ETM_PORT_MUXED) {
706 LOG_ERROR("ETB: can't run in multiplexed mode");
707 return ERROR_ETM_PORTMODE_NOT_SUPPORTED;
710 trigger_count = (etb->ram_depth * etb->trigger_percent) / 100;
712 etb_write_reg(&etb->reg_cache->reg_list[ETB_TRIGGER_COUNTER], trigger_count);
713 etb_write_reg(&etb->reg_cache->reg_list[ETB_RAM_WRITE_POINTER], 0x0);
714 etb_write_reg(&etb->reg_cache->reg_list[ETB_CTRL], etb_ctrl_value);
715 jtag_execute_queue();
717 /* we're starting a new trace, initialize capture status */
718 etm_ctx->capture_status = TRACE_RUNNING;
720 return ERROR_OK;
723 static int etb_stop_capture(struct etm_context *etm_ctx)
725 struct etb *etb = etm_ctx->capture_driver_priv;
726 struct reg *etb_ctrl_reg = &etb->reg_cache->reg_list[ETB_CTRL];
728 etb_write_reg(etb_ctrl_reg, 0x0);
729 jtag_execute_queue();
731 /* trace stopped, just clear running flag, but preserve others */
732 etm_ctx->capture_status &= ~TRACE_RUNNING;
734 return ERROR_OK;
737 struct etm_capture_driver etb_capture_driver =
739 .name = "etb",
740 .commands = etb_command_handlers,
741 .init = etb_init,
742 .status = etb_status,
743 .start_capture = etb_start_capture,
744 .stop_capture = etb_stop_capture,
745 .read_trace = etb_read_trace,