hw/rtc/sun4v-rtc: Relicense to GPLv2-or-later
[qemu/armbru.git] / target / hexagon / decode.c
bloba40210ca1e55dedc063cdf1ca1c0c8cb20b89fc9
1 /*
2 * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 #include "qemu/osdep.h"
19 #include "iclass.h"
20 #include "attribs.h"
21 #include "genptr.h"
22 #include "decode.h"
23 #include "insn.h"
24 #include "printinsn.h"
25 #include "mmvec/decode_ext_mmvec.h"
27 #define fZXTN(N, M, VAL) ((VAL) & ((1LL << (N)) - 1))
29 enum {
30 EXT_IDX_noext = 0,
31 EXT_IDX_noext_AFTER = 4,
32 EXT_IDX_mmvec = 4,
33 EXT_IDX_mmvec_AFTER = 8,
34 XX_LAST_EXT_IDX
38 * Certain operand types represent a non-contiguous set of values.
39 * For example, the compound compare-and-jump instruction can only access
40 * registers R0-R7 and R16-23.
41 * This table represents the mapping from the encoding to the actual values.
44 #define DEF_REGMAP(NAME, ELEMENTS, ...) \
45 static const unsigned int DECODE_REGISTER_##NAME[ELEMENTS] = \
46 { __VA_ARGS__ };
47 /* Name Num Table */
48 DEF_REGMAP(R_16, 16, 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23)
49 DEF_REGMAP(R__8, 8, 0, 2, 4, 6, 16, 18, 20, 22)
50 DEF_REGMAP(R_8, 8, 0, 1, 2, 3, 4, 5, 6, 7)
52 #define DECODE_MAPPED_REG(OPNUM, NAME) \
53 insn->regno[OPNUM] = DECODE_REGISTER_##NAME[insn->regno[OPNUM]];
55 /* Helper functions for decode_*_generated.c.inc */
56 #define DECODE_MAPPED(NAME) \
57 static int decode_mapped_reg_##NAME(DisasContext *ctx, int x) \
58 { \
59 return DECODE_REGISTER_##NAME[x]; \
61 DECODE_MAPPED(R_16)
62 DECODE_MAPPED(R_8)
63 DECODE_MAPPED(R__8)
65 /* Helper function for decodetree_trans_funcs_generated.c.inc */
66 static int shift_left(DisasContext *ctx, int x, int n, int immno)
68 int ret = x;
69 Insn *insn = ctx->insn;
70 if (!insn->extension_valid ||
71 insn->which_extended != immno) {
72 ret <<= n;
74 return ret;
77 /* Include the generated decoder for 32 bit insn */
78 #include "decode_normal_generated.c.inc"
79 #include "decode_hvx_generated.c.inc"
81 /* Include the generated decoder for 16 bit insn */
82 #include "decode_subinsn_a_generated.c.inc"
83 #include "decode_subinsn_l1_generated.c.inc"
84 #include "decode_subinsn_l2_generated.c.inc"
85 #include "decode_subinsn_s1_generated.c.inc"
86 #include "decode_subinsn_s2_generated.c.inc"
88 /* Include the generated helpers for the decoder */
89 #include "decodetree_trans_funcs_generated.c.inc"
91 void decode_send_insn_to(Packet *packet, int start, int newloc)
93 Insn tmpinsn;
94 int direction;
95 int i;
96 if (start == newloc) {
97 return;
99 if (start < newloc) {
100 /* Move towards end */
101 direction = 1;
102 } else {
103 /* move towards beginning */
104 direction = -1;
106 for (i = start; i != newloc; i += direction) {
107 tmpinsn = packet->insn[i];
108 packet->insn[i] = packet->insn[i + direction];
109 packet->insn[i + direction] = tmpinsn;
113 /* Fill newvalue registers with the correct regno */
114 static void
115 decode_fill_newvalue_regno(Packet *packet)
117 int i, use_regidx, offset, def_idx, dst_idx;
118 uint16_t def_opcode, use_opcode;
119 char *dststr;
121 for (i = 1; i < packet->num_insns; i++) {
122 if (GET_ATTRIB(packet->insn[i].opcode, A_DOTNEWVALUE) &&
123 !GET_ATTRIB(packet->insn[i].opcode, A_EXTENSION)) {
124 use_opcode = packet->insn[i].opcode;
126 /* It's a store, so we're adjusting the Nt field */
127 if (GET_ATTRIB(use_opcode, A_STORE)) {
128 use_regidx = strchr(opcode_reginfo[use_opcode], 't') -
129 opcode_reginfo[use_opcode];
130 } else { /* It's a Jump, so we're adjusting the Ns field */
131 use_regidx = strchr(opcode_reginfo[use_opcode], 's') -
132 opcode_reginfo[use_opcode];
136 * What's encoded at the N-field is the offset to who's producing
137 * the value. Shift off the LSB which indicates odd/even register,
138 * then walk backwards and skip over the constant extenders.
140 offset = packet->insn[i].regno[use_regidx] >> 1;
141 def_idx = i - offset;
142 for (int j = 0; j < offset; j++) {
143 if (GET_ATTRIB(packet->insn[i - j - 1].opcode, A_IT_EXTENDER)) {
144 def_idx--;
149 * Check for a badly encoded N-field which points to an instruction
150 * out-of-range
152 g_assert(!((def_idx < 0) || (def_idx > (packet->num_insns - 1))));
155 * packet->insn[def_idx] is the producer
156 * Figure out which type of destination it produces
157 * and the corresponding index in the reginfo
159 def_opcode = packet->insn[def_idx].opcode;
160 dststr = strstr(opcode_wregs[def_opcode], "Rd");
161 if (dststr) {
162 dststr = strchr(opcode_reginfo[def_opcode], 'd');
163 } else {
164 dststr = strstr(opcode_wregs[def_opcode], "Rx");
165 if (dststr) {
166 dststr = strchr(opcode_reginfo[def_opcode], 'x');
167 } else {
168 dststr = strstr(opcode_wregs[def_opcode], "Re");
169 if (dststr) {
170 dststr = strchr(opcode_reginfo[def_opcode], 'e');
171 } else {
172 dststr = strstr(opcode_wregs[def_opcode], "Ry");
173 if (dststr) {
174 dststr = strchr(opcode_reginfo[def_opcode], 'y');
175 } else {
176 g_assert_not_reached();
181 g_assert(dststr != NULL);
183 /* Now patch up the consumer with the register number */
184 dst_idx = dststr - opcode_reginfo[def_opcode];
185 packet->insn[i].regno[use_regidx] =
186 packet->insn[def_idx].regno[dst_idx];
188 * We need to remember who produces this value to later
189 * check if it was dynamically cancelled
191 packet->insn[i].new_value_producer_slot =
192 packet->insn[def_idx].slot;
197 /* Split CJ into a compare and a jump */
198 static void decode_split_cmpjump(Packet *pkt)
200 int last, i;
201 int numinsns = pkt->num_insns;
204 * First, split all compare-jumps.
205 * The compare is sent to the end as a new instruction.
206 * Do it this way so we don't reorder dual jumps. Those need to stay in
207 * original order.
209 for (i = 0; i < numinsns; i++) {
210 /* It's a cmp-jump */
211 if (GET_ATTRIB(pkt->insn[i].opcode, A_NEWCMPJUMP)) {
212 last = pkt->num_insns;
213 pkt->insn[last] = pkt->insn[i]; /* copy the instruction */
214 pkt->insn[last].part1 = true; /* last insn does the CMP */
215 pkt->insn[i].part1 = false; /* existing insn does the JUMP */
216 pkt->num_insns++;
220 /* Now re-shuffle all the compares back to the beginning */
221 for (i = 0; i < pkt->num_insns; i++) {
222 if (pkt->insn[i].part1) {
223 decode_send_insn_to(pkt, i, 0);
228 static bool decode_opcode_can_jump(int opcode)
230 if ((GET_ATTRIB(opcode, A_JUMP)) ||
231 (GET_ATTRIB(opcode, A_CALL)) ||
232 (opcode == J2_trap0) ||
233 (opcode == J2_pause)) {
234 /* Exception to A_JUMP attribute */
235 if (opcode == J4_hintjumpr) {
236 return false;
238 return true;
241 return false;
244 static bool decode_opcode_ends_loop(int opcode)
246 return GET_ATTRIB(opcode, A_HWLOOP0_END) ||
247 GET_ATTRIB(opcode, A_HWLOOP1_END);
250 /* Set the is_* fields in each instruction */
251 static void decode_set_insn_attr_fields(Packet *pkt)
253 int i;
254 int numinsns = pkt->num_insns;
255 uint16_t opcode;
257 pkt->pkt_has_cof = false;
258 pkt->pkt_has_multi_cof = false;
259 pkt->pkt_has_endloop = false;
260 pkt->pkt_has_dczeroa = false;
262 for (i = 0; i < numinsns; i++) {
263 opcode = pkt->insn[i].opcode;
264 if (pkt->insn[i].part1) {
265 continue; /* Skip compare of cmp-jumps */
268 if (GET_ATTRIB(opcode, A_DCZEROA)) {
269 pkt->pkt_has_dczeroa = true;
272 if (GET_ATTRIB(opcode, A_STORE)) {
273 if (GET_ATTRIB(opcode, A_SCALAR_STORE) &&
274 !GET_ATTRIB(opcode, A_MEMSIZE_0B)) {
275 if (pkt->insn[i].slot == 0) {
276 pkt->pkt_has_store_s0 = true;
277 } else {
278 pkt->pkt_has_store_s1 = true;
283 if (decode_opcode_can_jump(opcode)) {
284 if (pkt->pkt_has_cof) {
285 pkt->pkt_has_multi_cof = true;
287 pkt->pkt_has_cof = true;
290 pkt->insn[i].is_endloop = decode_opcode_ends_loop(opcode);
292 pkt->pkt_has_endloop |= pkt->insn[i].is_endloop;
294 if (pkt->pkt_has_endloop) {
295 if (pkt->pkt_has_cof) {
296 pkt->pkt_has_multi_cof = true;
298 pkt->pkt_has_cof = true;
304 * Shuffle for execution
305 * Move stores to end (in same order as encoding)
306 * Move compares to beginning (for use by .new insns)
308 static void decode_shuffle_for_execution(Packet *packet)
310 bool changed = false;
311 int i;
312 bool flag; /* flag means we've seen a non-memory instruction */
313 int n_mems;
314 int last_insn = packet->num_insns - 1;
317 * Skip end loops, somehow an end loop is getting in and messing
318 * up the order
320 if (decode_opcode_ends_loop(packet->insn[last_insn].opcode)) {
321 last_insn--;
324 do {
325 changed = false;
327 * Stores go last, must not reorder.
328 * Cannot shuffle stores past loads, either.
329 * Iterate backwards. If we see a non-memory instruction,
330 * then a store, shuffle the store to the front. Don't shuffle
331 * stores wrt each other or a load.
333 for (flag = false, n_mems = 0, i = last_insn; i >= 0; i--) {
334 int opcode = packet->insn[i].opcode;
336 if (flag && GET_ATTRIB(opcode, A_STORE)) {
337 decode_send_insn_to(packet, i, last_insn - n_mems);
338 n_mems++;
339 changed = true;
340 } else if (GET_ATTRIB(opcode, A_STORE)) {
341 n_mems++;
342 } else if (GET_ATTRIB(opcode, A_LOAD)) {
344 * Don't set flag, since we don't want to shuffle a
345 * store past a load
347 n_mems++;
348 } else if (GET_ATTRIB(opcode, A_DOTNEWVALUE)) {
350 * Don't set flag, since we don't want to shuffle past
351 * a .new value
353 } else {
354 flag = true;
358 if (changed) {
359 continue;
361 /* Compares go first, may be reordered wrt each other */
362 for (flag = false, i = 0; i < last_insn + 1; i++) {
363 int opcode = packet->insn[i].opcode;
365 if ((strstr(opcode_wregs[opcode], "Pd4") ||
366 strstr(opcode_wregs[opcode], "Pe4")) &&
367 GET_ATTRIB(opcode, A_STORE) == 0) {
368 /* This should be a compare (not a store conditional) */
369 if (flag) {
370 decode_send_insn_to(packet, i, 0);
371 changed = true;
372 continue;
374 } else if (GET_ATTRIB(opcode, A_IMPLICIT_WRITES_P3) &&
375 !decode_opcode_ends_loop(packet->insn[i].opcode)) {
377 * spNloop instruction
378 * Don't reorder endloops; they are not valid for .new uses,
379 * and we want to match HW
381 if (flag) {
382 decode_send_insn_to(packet, i, 0);
383 changed = true;
384 continue;
386 } else if (GET_ATTRIB(opcode, A_IMPLICIT_WRITES_P0) &&
387 !GET_ATTRIB(opcode, A_NEWCMPJUMP)) {
388 if (flag) {
389 decode_send_insn_to(packet, i, 0);
390 changed = true;
391 continue;
393 } else {
394 flag = true;
397 if (changed) {
398 continue;
400 } while (changed);
403 * If we have a .new register compare/branch, move that to the very
404 * very end, past stores
406 for (i = 0; i < last_insn; i++) {
407 if (GET_ATTRIB(packet->insn[i].opcode, A_DOTNEWVALUE)) {
408 decode_send_insn_to(packet, i, last_insn);
409 break;
414 static void
415 apply_extender(Packet *pkt, int i, uint32_t extender)
417 int immed_num;
418 uint32_t base_immed;
420 immed_num = pkt->insn[i].which_extended;
421 base_immed = pkt->insn[i].immed[immed_num];
423 pkt->insn[i].immed[immed_num] = extender | fZXTN(6, 32, base_immed);
426 static void decode_apply_extenders(Packet *packet)
428 int i;
429 for (i = 0; i < packet->num_insns; i++) {
430 if (GET_ATTRIB(packet->insn[i].opcode, A_IT_EXTENDER)) {
431 packet->insn[i + 1].extension_valid = true;
432 apply_extender(packet, i + 1, packet->insn[i].immed[0]);
437 static void decode_remove_extenders(Packet *packet)
439 int i, j;
440 for (i = 0; i < packet->num_insns; i++) {
441 if (GET_ATTRIB(packet->insn[i].opcode, A_IT_EXTENDER)) {
442 /* Remove this one by moving the remaining instructions down */
443 for (j = i;
444 (j < packet->num_insns - 1) && (j < INSTRUCTIONS_MAX - 1);
445 j++) {
446 packet->insn[j] = packet->insn[j + 1];
448 packet->num_insns--;
453 static SlotMask get_valid_slots(const Packet *pkt, unsigned int slot)
455 if (GET_ATTRIB(pkt->insn[slot].opcode, A_EXTENSION)) {
456 return mmvec_ext_decode_find_iclass_slots(pkt->insn[slot].opcode);
457 } else {
458 return find_iclass_slots(pkt->insn[slot].opcode,
459 pkt->insn[slot].iclass);
464 * Section 10.3 of the Hexagon V73 Programmer's Reference Manual
466 * A duplex is encoded as a 32-bit instruction with bits [15:14] set to 00.
467 * The sub-instructions that comprise a duplex are encoded as 13-bit fields
468 * in the duplex.
470 * Per table 10-4, the 4-bit duplex iclass is encoded in bits 31:29, 13
472 static uint32_t get_duplex_iclass(uint32_t encoding)
474 uint32_t iclass = extract32(encoding, 13, 1);
475 iclass = deposit32(iclass, 1, 3, extract32(encoding, 29, 3));
476 return iclass;
480 * Per table 10-5, the duplex ICLASS field values that specify the group of
481 * each sub-instruction in a duplex
483 * This table points to the decode instruction for each entry in the table
485 typedef bool (*subinsn_decode_func)(DisasContext *ctx, uint16_t insn);
486 typedef struct {
487 subinsn_decode_func decode_slot0_subinsn;
488 subinsn_decode_func decode_slot1_subinsn;
489 } subinsn_decode_groups;
491 static const subinsn_decode_groups decode_groups[16] = {
492 [0x0] = { decode_subinsn_l1, decode_subinsn_l1 },
493 [0x1] = { decode_subinsn_l2, decode_subinsn_l1 },
494 [0x2] = { decode_subinsn_l2, decode_subinsn_l2 },
495 [0x3] = { decode_subinsn_a, decode_subinsn_a },
496 [0x4] = { decode_subinsn_l1, decode_subinsn_a },
497 [0x5] = { decode_subinsn_l2, decode_subinsn_a },
498 [0x6] = { decode_subinsn_s1, decode_subinsn_a },
499 [0x7] = { decode_subinsn_s2, decode_subinsn_a },
500 [0x8] = { decode_subinsn_s1, decode_subinsn_l1 },
501 [0x9] = { decode_subinsn_s1, decode_subinsn_l2 },
502 [0xa] = { decode_subinsn_s1, decode_subinsn_s1 },
503 [0xb] = { decode_subinsn_s2, decode_subinsn_s1 },
504 [0xc] = { decode_subinsn_s2, decode_subinsn_l1 },
505 [0xd] = { decode_subinsn_s2, decode_subinsn_l2 },
506 [0xe] = { decode_subinsn_s2, decode_subinsn_s2 },
507 [0xf] = { NULL, NULL }, /* Reserved */
510 static uint16_t get_slot0_subinsn(uint32_t encoding)
512 return extract32(encoding, 0, 13);
515 static uint16_t get_slot1_subinsn(uint32_t encoding)
517 return extract32(encoding, 16, 13);
520 static unsigned int
521 decode_insns(DisasContext *ctx, Insn *insn, uint32_t encoding)
523 if (parse_bits(encoding) != 0) {
524 if (decode_normal(ctx, encoding) ||
525 decode_hvx(ctx, encoding)) {
526 insn->generate = opcode_genptr[insn->opcode];
527 insn->iclass = iclass_bits(encoding);
528 return 1;
530 g_assert_not_reached();
531 } else {
532 uint32_t iclass = get_duplex_iclass(encoding);
533 unsigned int slot0_subinsn = get_slot0_subinsn(encoding);
534 unsigned int slot1_subinsn = get_slot1_subinsn(encoding);
535 subinsn_decode_func decode_slot0_subinsn =
536 decode_groups[iclass].decode_slot0_subinsn;
537 subinsn_decode_func decode_slot1_subinsn =
538 decode_groups[iclass].decode_slot1_subinsn;
540 /* The slot1 subinsn needs to be in the packet first */
541 if (decode_slot1_subinsn(ctx, slot1_subinsn)) {
542 insn->generate = opcode_genptr[insn->opcode];
543 insn->iclass = iclass_bits(encoding);
544 ctx->insn = ++insn;
545 if (decode_slot0_subinsn(ctx, slot0_subinsn)) {
546 insn->generate = opcode_genptr[insn->opcode];
547 insn->iclass = iclass_bits(encoding);
548 return 2;
551 g_assert_not_reached();
555 static void decode_add_endloop_insn(Insn *insn, int loopnum)
557 if (loopnum == 10) {
558 insn->opcode = J2_endloop01;
559 insn->generate = opcode_genptr[J2_endloop01];
560 } else if (loopnum == 1) {
561 insn->opcode = J2_endloop1;
562 insn->generate = opcode_genptr[J2_endloop1];
563 } else if (loopnum == 0) {
564 insn->opcode = J2_endloop0;
565 insn->generate = opcode_genptr[J2_endloop0];
566 } else {
567 g_assert_not_reached();
571 static bool decode_parsebits_is_loopend(uint32_t encoding32)
573 uint32_t bits = parse_bits(encoding32);
574 return bits == 0x2;
577 static bool has_valid_slot_assignment(Packet *pkt)
579 int used_slots = 0;
580 for (int i = 0; i < pkt->num_insns; i++) {
581 int slot_mask;
582 Insn *insn = &pkt->insn[i];
583 if (decode_opcode_ends_loop(insn->opcode)) {
584 /* We overload slot 0 for endloop. */
585 continue;
587 slot_mask = 1 << insn->slot;
588 if (used_slots & slot_mask) {
589 return false;
591 used_slots |= slot_mask;
593 return true;
596 static bool
597 decode_set_slot_number(Packet *pkt)
599 int slot;
600 int i;
601 bool hit_mem_insn = false;
602 bool hit_duplex = false;
603 bool slot0_found = false;
604 bool slot1_found = false;
605 int slot1_iidx = 0;
608 * The slots are encoded in reverse order
609 * For each instruction, count down until you find a suitable slot
611 for (i = 0, slot = 3; i < pkt->num_insns; i++) {
612 SlotMask valid_slots = get_valid_slots(pkt, i);
614 while (!(valid_slots & (1 << slot))) {
615 slot--;
617 pkt->insn[i].slot = slot;
618 if (slot) {
619 /* I've assigned the slot, now decrement it for the next insn */
620 slot--;
624 /* Fix the exceptions - mem insns to slot 0,1 */
625 for (i = pkt->num_insns - 1; i >= 0; i--) {
626 /* First memory instruction always goes to slot 0 */
627 if ((GET_ATTRIB(pkt->insn[i].opcode, A_MEMLIKE) ||
628 GET_ATTRIB(pkt->insn[i].opcode, A_MEMLIKE_PACKET_RULES)) &&
629 !hit_mem_insn) {
630 hit_mem_insn = true;
631 pkt->insn[i].slot = 0;
632 continue;
635 /* Next memory instruction always goes to slot 1 */
636 if ((GET_ATTRIB(pkt->insn[i].opcode, A_MEMLIKE) ||
637 GET_ATTRIB(pkt->insn[i].opcode, A_MEMLIKE_PACKET_RULES)) &&
638 hit_mem_insn) {
639 pkt->insn[i].slot = 1;
643 /* Fix the exceptions - duplex always slot 0,1 */
644 for (i = pkt->num_insns - 1; i >= 0; i--) {
645 /* First subinsn always goes to slot 0 */
646 if (GET_ATTRIB(pkt->insn[i].opcode, A_SUBINSN) && !hit_duplex) {
647 hit_duplex = true;
648 pkt->insn[i].slot = 0;
649 continue;
652 /* Next subinsn always goes to slot 1 */
653 if (GET_ATTRIB(pkt->insn[i].opcode, A_SUBINSN) && hit_duplex) {
654 pkt->insn[i].slot = 1;
658 /* Fix the exceptions - slot 1 is never empty, always aligns to slot 0 */
659 for (i = pkt->num_insns - 1; i >= 0; i--) {
660 /* Is slot0 used? */
661 if (pkt->insn[i].slot == 0) {
662 bool is_endloop = (pkt->insn[i].opcode == J2_endloop01);
663 is_endloop |= (pkt->insn[i].opcode == J2_endloop0);
664 is_endloop |= (pkt->insn[i].opcode == J2_endloop1);
667 * Make sure it's not endloop since, we're overloading
668 * slot0 for endloop
670 if (!is_endloop) {
671 slot0_found = true;
674 /* Is slot1 used? */
675 if (pkt->insn[i].slot == 1) {
676 slot1_found = true;
677 slot1_iidx = i;
680 /* Is slot0 empty and slot1 used? */
681 if ((!slot0_found) && slot1_found) {
682 /* Then push it to slot0 */
683 pkt->insn[slot1_iidx].slot = 0;
686 return has_valid_slot_assignment(pkt);
690 * decode_packet
691 * Decodes packet with given words
692 * Returns 0 on insufficient words,
693 * or number of words used on success
696 int decode_packet(DisasContext *ctx, int max_words, const uint32_t *words,
697 Packet *pkt, bool disas_only)
699 int num_insns = 0;
700 int words_read = 0;
701 bool end_of_packet = false;
702 int new_insns = 0;
703 int i;
704 uint32_t encoding32;
706 /* Initialize */
707 memset(pkt, 0, sizeof(*pkt));
708 /* Try to build packet */
709 while (!end_of_packet && (words_read < max_words)) {
710 Insn *insn = &pkt->insn[num_insns];
711 ctx->insn = insn;
712 encoding32 = words[words_read];
713 end_of_packet = is_packet_end(encoding32);
714 new_insns = decode_insns(ctx, insn, encoding32);
715 g_assert(new_insns > 0);
717 * If we saw an extender, mark next word extended so immediate
718 * decode works
720 if (pkt->insn[num_insns].opcode == A4_ext) {
721 pkt->insn[num_insns + 1].extension_valid = true;
723 num_insns += new_insns;
724 words_read++;
727 pkt->num_insns = num_insns;
728 if (!end_of_packet) {
729 /* Ran out of words! */
730 return 0;
732 pkt->encod_pkt_size_in_bytes = words_read * 4;
733 pkt->pkt_has_hvx = false;
734 for (i = 0; i < num_insns; i++) {
735 pkt->pkt_has_hvx |=
736 GET_ATTRIB(pkt->insn[i].opcode, A_CVI);
740 * Check for :endloop in the parse bits
741 * Section 10.6 of the Programmer's Reference describes the encoding
742 * The end of hardware loop 0 can be encoded with 2 words
743 * The end of hardware loop 1 needs 3 words
745 if ((words_read == 2) && (decode_parsebits_is_loopend(words[0]))) {
746 decode_add_endloop_insn(&pkt->insn[pkt->num_insns++], 0);
748 if (words_read >= 3) {
749 bool has_loop0, has_loop1;
750 has_loop0 = decode_parsebits_is_loopend(words[0]);
751 has_loop1 = decode_parsebits_is_loopend(words[1]);
752 if (has_loop0 && has_loop1) {
753 decode_add_endloop_insn(&pkt->insn[pkt->num_insns++], 10);
754 } else if (has_loop1) {
755 decode_add_endloop_insn(&pkt->insn[pkt->num_insns++], 1);
756 } else if (has_loop0) {
757 decode_add_endloop_insn(&pkt->insn[pkt->num_insns++], 0);
761 decode_apply_extenders(pkt);
762 if (!disas_only) {
763 decode_remove_extenders(pkt);
764 if (!decode_set_slot_number(pkt)) {
765 /* Invalid packet */
766 return 0;
769 decode_fill_newvalue_regno(pkt);
771 if (pkt->pkt_has_hvx) {
772 mmvec_ext_decode_checks(pkt, disas_only);
775 if (!disas_only) {
776 decode_shuffle_for_execution(pkt);
777 decode_split_cmpjump(pkt);
778 decode_set_insn_attr_fields(pkt);
781 return words_read;
784 /* Used for "-d in_asm" logging */
785 int disassemble_hexagon(uint32_t *words, int nwords, bfd_vma pc,
786 GString *buf)
788 DisasContext ctx;
789 Packet pkt;
791 memset(&ctx, 0, sizeof(DisasContext));
792 ctx.pkt = &pkt;
794 if (decode_packet(&ctx, nwords, words, &pkt, true) > 0) {
795 snprint_a_pkt_disas(buf, &pkt, words, pc);
796 return pkt.encod_pkt_size_in_bytes;
797 } else {
798 g_string_assign(buf, "<invalid>");
799 return 0;