1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (c) 2019 Google, LLC.
5 * Author: Moritz Fischer <moritzf@google.com>
16 #include <linux/pci.h>
18 #include <jtag/interface.h>
20 #include <jtag/commands.h>
21 #include <helper/replacements.h>
22 #include <helper/bits.h>
24 /* Available only from kernel v4.10 */
25 #ifndef PCI_CFG_SPACE_EXP_SIZE
26 #define PCI_CFG_SPACE_EXP_SIZE 4096
29 #define PCIE_EXT_CAP_LST 0x100
31 #define XLNX_XVC_EXT_CAP 0x00
32 #define XLNX_XVC_VSEC_HDR 0x04
33 #define XLNX_XVC_LEN_REG 0x0C
34 #define XLNX_XVC_TMS_REG 0x10
35 #define XLNX_XVC_TDX_REG 0x14
37 #define XLNX_XVC_CAP_SIZE 0x20
38 #define XLNX_XVC_VSEC_ID 0x8
39 #define XLNX_XVC_MAX_BITS 0x20
41 #define MASK_ACK(x) (((x) >> 9) & 0x7)
42 #define MASK_PAR(x) ((int)((x) & 0x1))
44 struct xlnx_pcie_xvc
{
50 static struct xlnx_pcie_xvc xlnx_pcie_xvc_state
;
51 static struct xlnx_pcie_xvc
*xlnx_pcie_xvc
= &xlnx_pcie_xvc_state
;
53 static int xlnx_pcie_xvc_read_reg(const int offset
, uint32_t *val
)
58 /* Note: This should be ok endianness-wise because by going
59 * through sysfs the kernel does the conversion in the config
60 * space accessor functions
62 err
= pread(xlnx_pcie_xvc
->fd
, &res
, sizeof(res
),
63 xlnx_pcie_xvc
->offset
+ offset
);
64 if (err
!= sizeof(res
)) {
65 LOG_ERROR("Failed to read offset %x", offset
);
66 return ERROR_JTAG_DEVICE_ERROR
;
75 static int xlnx_pcie_xvc_write_reg(const int offset
, const uint32_t val
)
79 /* Note: This should be ok endianness-wise because by going
80 * through sysfs the kernel does the conversion in the config
81 * space accessor functions
83 err
= pwrite(xlnx_pcie_xvc
->fd
, &val
, sizeof(val
),
84 xlnx_pcie_xvc
->offset
+ offset
);
85 if (err
!= sizeof(val
)) {
86 LOG_ERROR("Failed to write offset: %x with value: %" PRIx32
,
88 return ERROR_JTAG_DEVICE_ERROR
;
94 static int xlnx_pcie_xvc_transact(size_t num_bits
, uint32_t tms
, uint32_t tdi
,
99 err
= xlnx_pcie_xvc_write_reg(XLNX_XVC_LEN_REG
, num_bits
);
103 err
= xlnx_pcie_xvc_write_reg(XLNX_XVC_TMS_REG
, tms
);
107 err
= xlnx_pcie_xvc_write_reg(XLNX_XVC_TDX_REG
, tdi
);
111 err
= xlnx_pcie_xvc_read_reg(XLNX_XVC_TDX_REG
, tdo
);
116 LOG_DEBUG_IO("Transact num_bits: %zu, tms: %" PRIx32
", tdi: %" PRIx32
", tdo: %" PRIx32
,
117 num_bits
, tms
, tdi
, *tdo
);
119 LOG_DEBUG_IO("Transact num_bits: %zu, tms: %" PRIx32
", tdi: %" PRIx32
", tdo: <null>",
124 static int xlnx_pcie_xvc_execute_stableclocks(struct jtag_command
*cmd
)
126 int tms
= tap_get_state() == TAP_RESET
? 1 : 0;
127 size_t left
= cmd
->cmd
.stableclocks
->num_cycles
;
131 LOG_DEBUG("stableclocks %i cycles", cmd
->cmd
.runtest
->num_cycles
);
134 write
= MIN(XLNX_XVC_MAX_BITS
, left
);
135 err
= xlnx_pcie_xvc_transact(write
, tms
, 0, NULL
);
144 static int xlnx_pcie_xvc_execute_statemove(size_t skip
)
146 uint8_t tms_scan
= tap_get_tms_path(tap_get_state(),
147 tap_get_end_state());
148 int tms_count
= tap_get_tms_path_len(tap_get_state(),
149 tap_get_end_state());
152 LOG_DEBUG("statemove starting at (skip: %zu) %s end in %s", skip
,
153 tap_state_name(tap_get_state()),
154 tap_state_name(tap_get_end_state()));
157 err
= xlnx_pcie_xvc_transact(tms_count
- skip
, tms_scan
>> skip
, 0, NULL
);
161 tap_set_state(tap_get_end_state());
166 static int xlnx_pcie_xvc_execute_runtest(struct jtag_command
*cmd
)
170 LOG_DEBUG("runtest %i cycles, end in %i",
171 cmd
->cmd
.runtest
->num_cycles
,
172 cmd
->cmd
.runtest
->end_state
);
174 tap_state_t tmp_state
= tap_get_end_state();
176 if (tap_get_state() != TAP_IDLE
) {
177 tap_set_end_state(TAP_IDLE
);
178 err
= xlnx_pcie_xvc_execute_statemove(0);
183 size_t left
= cmd
->cmd
.runtest
->num_cycles
;
187 write
= MIN(XLNX_XVC_MAX_BITS
, left
);
188 err
= xlnx_pcie_xvc_transact(write
, 0, 0, NULL
);
194 tap_set_end_state(tmp_state
);
195 if (tap_get_state() != tap_get_end_state())
196 err
= xlnx_pcie_xvc_execute_statemove(0);
201 static int xlnx_pcie_xvc_execute_pathmove(struct jtag_command
*cmd
)
203 size_t num_states
= cmd
->cmd
.pathmove
->num_states
;
204 tap_state_t
*path
= cmd
->cmd
.pathmove
->path
;
208 LOG_DEBUG("pathmove: %i states, end in %i",
209 cmd
->cmd
.pathmove
->num_states
,
210 cmd
->cmd
.pathmove
->path
[cmd
->cmd
.pathmove
->num_states
- 1]);
212 for (i
= 0; i
< num_states
; i
++) {
213 if (path
[i
] == tap_state_transition(tap_get_state(), false)) {
214 err
= xlnx_pcie_xvc_transact(1, 1, 0, NULL
);
215 } else if (path
[i
] == tap_state_transition(tap_get_state(), true)) {
216 err
= xlnx_pcie_xvc_transact(1, 0, 0, NULL
);
218 LOG_ERROR("BUG: %s -> %s isn't a valid TAP transition.",
219 tap_state_name(tap_get_state()),
220 tap_state_name(path
[i
]));
221 err
= ERROR_JTAG_QUEUE_FAILED
;
225 tap_set_state(path
[i
]);
228 tap_set_end_state(tap_get_state());
233 static int xlnx_pcie_xvc_execute_scan(struct jtag_command
*cmd
)
235 enum scan_type type
= jtag_scan_type(cmd
->cmd
.scan
);
236 tap_state_t saved_end_state
= cmd
->cmd
.scan
->end_state
;
237 bool ir_scan
= cmd
->cmd
.scan
->ir_scan
;
238 uint32_t tdi
, tms
, tdo
;
239 uint8_t *buf
, *rd_ptr
;
244 scan_size
= jtag_build_buffer(cmd
->cmd
.scan
, &buf
);
246 LOG_DEBUG("%s scan type %d %d bits; starts in %s end in %s",
247 (cmd
->cmd
.scan
->ir_scan
) ? "IR" : "DR", type
, scan_size
,
248 tap_state_name(tap_get_state()),
249 tap_state_name(cmd
->cmd
.scan
->end_state
));
251 /* If we're in TAP_DR_SHIFT state but need to do a IR_SCAN or
252 * vice-versa, do a statemove to corresponding other state, then restore
255 if (ir_scan
&& tap_get_state() != TAP_IRSHIFT
) {
256 tap_set_end_state(TAP_IRSHIFT
);
257 err
= xlnx_pcie_xvc_execute_statemove(0);
260 tap_set_end_state(saved_end_state
);
261 } else if (!ir_scan
&& (tap_get_state() != TAP_DRSHIFT
)) {
262 tap_set_end_state(TAP_DRSHIFT
);
263 err
= xlnx_pcie_xvc_execute_statemove(0);
266 tap_set_end_state(saved_end_state
);
271 write
= MIN(XLNX_XVC_MAX_BITS
, left
);
272 /* the last TMS should be a 1, to leave the state */
273 tms
= left
<= XLNX_XVC_MAX_BITS
? BIT(write
- 1) : 0;
274 tdi
= (type
!= SCAN_IN
) ? buf_get_u32(rd_ptr
, 0, write
) : 0;
275 err
= xlnx_pcie_xvc_transact(write
, tms
, tdi
, type
!= SCAN_OUT
?
280 if (type
!= SCAN_OUT
)
281 buf_set_u32(rd_ptr
, 0, write
, tdo
);
282 rd_ptr
+= sizeof(uint32_t);
285 err
= jtag_read_buffer(buf
, cmd
->cmd
.scan
);
288 if (tap_get_state() != tap_get_end_state())
289 err
= xlnx_pcie_xvc_execute_statemove(1);
298 static void xlnx_pcie_xvc_execute_reset(struct jtag_command
*cmd
)
300 LOG_DEBUG("reset trst: %i srst: %i", cmd
->cmd
.reset
->trst
,
301 cmd
->cmd
.reset
->srst
);
304 static void xlnx_pcie_xvc_execute_sleep(struct jtag_command
*cmd
)
306 LOG_DEBUG("sleep %" PRIu32
"", cmd
->cmd
.sleep
->us
);
307 usleep(cmd
->cmd
.sleep
->us
);
310 static int xlnx_pcie_xvc_execute_tms(struct jtag_command
*cmd
)
312 const size_t num_bits
= cmd
->cmd
.tms
->num_bits
;
313 const uint8_t *bits
= cmd
->cmd
.tms
->bits
;
318 LOG_DEBUG("execute tms %zu", num_bits
);
322 write
= MIN(XLNX_XVC_MAX_BITS
, left
);
323 tms
= buf_get_u32(bits
, 0, write
);
324 err
= xlnx_pcie_xvc_transact(write
, tms
, 0, NULL
);
334 static int xlnx_pcie_xvc_execute_command(struct jtag_command
*cmd
)
336 LOG_DEBUG("%s: cmd->type: %u", __func__
, cmd
->type
);
338 case JTAG_STABLECLOCKS
:
339 return xlnx_pcie_xvc_execute_stableclocks(cmd
);
341 return xlnx_pcie_xvc_execute_runtest(cmd
);
343 tap_set_end_state(cmd
->cmd
.statemove
->end_state
);
344 return xlnx_pcie_xvc_execute_statemove(0);
346 return xlnx_pcie_xvc_execute_pathmove(cmd
);
348 return xlnx_pcie_xvc_execute_scan(cmd
);
350 xlnx_pcie_xvc_execute_reset(cmd
);
353 xlnx_pcie_xvc_execute_sleep(cmd
);
356 return xlnx_pcie_xvc_execute_tms(cmd
);
358 LOG_ERROR("BUG: Unknown JTAG command type encountered.");
359 return ERROR_JTAG_QUEUE_FAILED
;
365 static int xlnx_pcie_xvc_execute_queue(struct jtag_command
*cmd_queue
)
367 struct jtag_command
*cmd
= cmd_queue
;
371 ret
= xlnx_pcie_xvc_execute_command(cmd
);
383 static int xlnx_pcie_xvc_init(void)
385 char filename
[PATH_MAX
];
389 snprintf(filename
, PATH_MAX
, "/sys/bus/pci/devices/%s/config",
390 xlnx_pcie_xvc
->device
);
391 xlnx_pcie_xvc
->fd
= open(filename
, O_RDWR
| O_SYNC
);
392 if (xlnx_pcie_xvc
->fd
< 0) {
393 LOG_ERROR("Failed to open device: %s", filename
);
394 return ERROR_JTAG_INIT_FAILED
;
397 LOG_INFO("Scanning PCIe device %s's for Xilinx XVC/PCIe ...",
398 xlnx_pcie_xvc
->device
);
399 /* Parse the PCIe extended capability list and try to find
400 * vendor specific header */
401 xlnx_pcie_xvc
->offset
= PCIE_EXT_CAP_LST
;
402 while (xlnx_pcie_xvc
->offset
<= PCI_CFG_SPACE_EXP_SIZE
- sizeof(cap
) &&
403 xlnx_pcie_xvc
->offset
>= PCIE_EXT_CAP_LST
) {
404 err
= xlnx_pcie_xvc_read_reg(XLNX_XVC_EXT_CAP
, &cap
);
407 LOG_DEBUG("Checking capability at 0x%x; id=0x%04" PRIx32
" version=0x%" PRIx32
" next=0x%" PRIx32
,
408 xlnx_pcie_xvc
->offset
,
410 PCI_EXT_CAP_VER(cap
),
411 PCI_EXT_CAP_NEXT(cap
));
412 if (PCI_EXT_CAP_ID(cap
) == PCI_EXT_CAP_ID_VNDR
) {
413 err
= xlnx_pcie_xvc_read_reg(XLNX_XVC_VSEC_HDR
, &vh
);
416 LOG_DEBUG("Checking possible match at 0x%x; id: 0x%" PRIx32
"; rev: 0x%" PRIx32
"; length: 0x%" PRIx32
,
417 xlnx_pcie_xvc
->offset
,
418 PCI_VNDR_HEADER_ID(vh
),
419 PCI_VNDR_HEADER_REV(vh
),
420 PCI_VNDR_HEADER_LEN(vh
));
421 if ((PCI_VNDR_HEADER_ID(vh
) == XLNX_XVC_VSEC_ID
) &&
422 (PCI_VNDR_HEADER_LEN(vh
) == XLNX_XVC_CAP_SIZE
))
425 xlnx_pcie_xvc
->offset
= PCI_EXT_CAP_NEXT(cap
);
427 if ((xlnx_pcie_xvc
->offset
> PCI_CFG_SPACE_EXP_SIZE
- XLNX_XVC_CAP_SIZE
) ||
428 xlnx_pcie_xvc
->offset
< PCIE_EXT_CAP_LST
) {
429 close(xlnx_pcie_xvc
->fd
);
430 return ERROR_JTAG_INIT_FAILED
;
433 LOG_INFO("Found Xilinx XVC/PCIe capability at offset: 0x%x", xlnx_pcie_xvc
->offset
);
438 static int xlnx_pcie_xvc_quit(void)
442 err
= close(xlnx_pcie_xvc
->fd
);
449 COMMAND_HANDLER(xlnx_pcie_xvc_handle_config_command
)
452 return ERROR_COMMAND_SYNTAX_ERROR
;
454 /* we can't really free this in a safe manner, so at least
455 * limit the memory we're leaking by freeing the old one first
456 * before allocating a new one ...
458 free(xlnx_pcie_xvc
->device
);
460 xlnx_pcie_xvc
->device
= strdup(CMD_ARGV
[0]);
464 static const struct command_registration xlnx_pcie_xvc_subcommand_handlers
[] = {
467 .handler
= xlnx_pcie_xvc_handle_config_command
,
468 .mode
= COMMAND_CONFIG
,
469 .help
= "Configure XVC/PCIe JTAG adapter",
472 COMMAND_REGISTRATION_DONE
475 static const struct command_registration xlnx_pcie_xvc_command_handlers
[] = {
477 .name
= "xlnx_pcie_xvc",
479 .help
= "perform xlnx_pcie_xvc management",
480 .chain
= xlnx_pcie_xvc_subcommand_handlers
,
483 COMMAND_REGISTRATION_DONE
486 static struct jtag_interface xlnx_pcie_xvc_jtag_ops
= {
487 .execute_queue
= &xlnx_pcie_xvc_execute_queue
,
490 static int xlnx_pcie_xvc_swd_sequence(const uint8_t *seq
, size_t length
)
498 write
= MIN(XLNX_XVC_MAX_BITS
, left
);
499 send
= buf_get_u32(seq
, 0, write
);
500 err
= xlnx_pcie_xvc_transact(write
, send
, 0, NULL
);
504 seq
+= sizeof(uint32_t);
510 static int xlnx_pcie_xvc_swd_switch_seq(enum swd_special_seq seq
)
514 LOG_DEBUG("SWD line reset");
515 return xlnx_pcie_xvc_swd_sequence(swd_seq_line_reset
,
516 swd_seq_line_reset_len
);
518 LOG_DEBUG("JTAG-to-SWD");
519 return xlnx_pcie_xvc_swd_sequence(swd_seq_jtag_to_swd
,
520 swd_seq_jtag_to_swd_len
);
522 LOG_DEBUG("SWD-to-JTAG");
523 return xlnx_pcie_xvc_swd_sequence(swd_seq_swd_to_jtag
,
524 swd_seq_swd_to_jtag_len
);
526 LOG_ERROR("Sequence %d not supported", seq
);
533 static int queued_retval
;
535 static void xlnx_pcie_xvc_swd_write_reg(uint8_t cmd
, uint32_t value
,
536 uint32_t ap_delay_clk
);
538 static void swd_clear_sticky_errors(void)
540 xlnx_pcie_xvc_swd_write_reg(swd_cmd(false, false, DP_ABORT
),
541 STKCMPCLR
| STKERRCLR
| WDERRCLR
| ORUNERRCLR
, 0);
544 static void xlnx_pcie_xvc_swd_read_reg(uint8_t cmd
, uint32_t *value
,
545 uint32_t ap_delay_clk
)
547 uint32_t res
, ack
, rpar
;
550 assert(cmd
& SWD_CMD_RNW
);
552 cmd
|= SWD_CMD_START
| SWD_CMD_PARK
;
554 err
= xlnx_pcie_xvc_transact(12, cmd
, 0, &res
);
561 err
= xlnx_pcie_xvc_transact(32, 0, 0, &res
);
566 err
= xlnx_pcie_xvc_transact(2, 0, 0, &rpar
);
570 LOG_DEBUG("%s %s %s reg %X = %08"PRIx32
,
571 ack
== SWD_ACK_OK
? "OK" : ack
== SWD_ACK_WAIT
?
572 "WAIT" : ack
== SWD_ACK_FAULT
? "FAULT" : "JUNK",
573 cmd
& SWD_CMD_APNDP
? "AP" : "DP",
574 cmd
& SWD_CMD_RNW
? "read" : "write",
575 (cmd
& SWD_CMD_A32
) >> 1,
579 if (MASK_PAR(rpar
) != parity_u32(res
)) {
580 LOG_DEBUG_IO("Wrong parity detected");
581 queued_retval
= ERROR_FAIL
;
586 if (cmd
& SWD_CMD_APNDP
)
587 err
= xlnx_pcie_xvc_transact(ap_delay_clk
, 0, 0, NULL
);
591 LOG_DEBUG_IO("SWD_ACK_WAIT");
592 swd_clear_sticky_errors();
595 LOG_DEBUG_IO("SWD_ACK_FAULT");
599 LOG_DEBUG_IO("No valid acknowledge: ack=%02"PRIx32
, ack
);
607 static void xlnx_pcie_xvc_swd_write_reg(uint8_t cmd
, uint32_t value
,
608 uint32_t ap_delay_clk
)
613 assert(!(cmd
& SWD_CMD_RNW
));
615 cmd
|= SWD_CMD_START
| SWD_CMD_PARK
;
616 /* cmd + trn + ack */
617 err
= xlnx_pcie_xvc_transact(13, cmd
, 0, &res
);
624 err
= xlnx_pcie_xvc_transact(32, value
, 0, NULL
);
629 err
= xlnx_pcie_xvc_transact(2, parity_u32(value
), 0, NULL
);
633 LOG_DEBUG("%s %s %s reg %X = %08"PRIx32
,
634 ack
== SWD_ACK_OK
? "OK" : ack
== SWD_ACK_WAIT
?
635 "WAIT" : ack
== SWD_ACK_FAULT
? "FAULT" : "JUNK",
636 cmd
& SWD_CMD_APNDP
? "AP" : "DP",
637 cmd
& SWD_CMD_RNW
? "read" : "write",
638 (cmd
& SWD_CMD_A32
) >> 1,
643 if (cmd
& SWD_CMD_APNDP
)
644 err
= xlnx_pcie_xvc_transact(ap_delay_clk
, 0, 0, NULL
);
648 LOG_DEBUG_IO("SWD_ACK_WAIT");
649 swd_clear_sticky_errors();
652 LOG_DEBUG_IO("SWD_ACK_FAULT");
656 LOG_DEBUG_IO("No valid acknowledge: ack=%02"PRIx32
, ack
);
665 static int xlnx_pcie_xvc_swd_run_queue(void)
669 /* we want at least 8 idle cycles between each transaction */
670 err
= xlnx_pcie_xvc_transact(8, 0, 0, NULL
);
675 queued_retval
= ERROR_OK
;
676 LOG_DEBUG("SWD queue return value: %02x", err
);
681 static int xlnx_pcie_xvc_swd_init(void)
686 static const struct swd_driver xlnx_pcie_xvc_swd_ops
= {
687 .init
= xlnx_pcie_xvc_swd_init
,
688 .switch_seq
= xlnx_pcie_xvc_swd_switch_seq
,
689 .read_reg
= xlnx_pcie_xvc_swd_read_reg
,
690 .write_reg
= xlnx_pcie_xvc_swd_write_reg
,
691 .run
= xlnx_pcie_xvc_swd_run_queue
,
694 static const char * const xlnx_pcie_xvc_transports
[] = { "jtag", "swd", NULL
};
696 struct adapter_driver xlnx_pcie_xvc_adapter_driver
= {
697 .name
= "xlnx_pcie_xvc",
698 .transports
= xlnx_pcie_xvc_transports
,
699 .commands
= xlnx_pcie_xvc_command_handlers
,
701 .init
= &xlnx_pcie_xvc_init
,
702 .quit
= &xlnx_pcie_xvc_quit
,
704 .jtag_ops
= &xlnx_pcie_xvc_jtag_ops
,
705 .swd_ops
= &xlnx_pcie_xvc_swd_ops
,