tcg/optimize: prefer the "op a, a, b" form for commutative ops
[qemu-kvm.git] / tcg / optimize.c
blobc8ae50bc91943b78abd1bd4626f8c71361ea858f
1 /*
2 * Optimizations for Tiny Code Generator for QEMU
4 * Copyright (c) 2010 Samsung Electronics.
5 * Contributed by Kirill Batuzov <batuzovk@ispras.ru>
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
26 #include "config.h"
28 #include <stdlib.h>
29 #include <stdio.h>
31 #include "qemu-common.h"
32 #include "tcg-op.h"
34 #define CASE_OP_32_64(x) \
35 glue(glue(case INDEX_op_, x), _i32): \
36 glue(glue(case INDEX_op_, x), _i64)
38 typedef enum {
39 TCG_TEMP_UNDEF = 0,
40 TCG_TEMP_CONST,
41 TCG_TEMP_COPY,
42 } tcg_temp_state;
44 struct tcg_temp_info {
45 tcg_temp_state state;
46 uint16_t prev_copy;
47 uint16_t next_copy;
48 tcg_target_ulong val;
51 static struct tcg_temp_info temps[TCG_MAX_TEMPS];
53 /* Reset TEMP's state to TCG_TEMP_UNDEF. If TEMP only had one copy, remove
54 the copy flag from the left temp. */
55 static void reset_temp(TCGArg temp)
57 if (temps[temp].state == TCG_TEMP_COPY) {
58 if (temps[temp].prev_copy == temps[temp].next_copy) {
59 temps[temps[temp].next_copy].state = TCG_TEMP_UNDEF;
60 } else {
61 temps[temps[temp].next_copy].prev_copy = temps[temp].prev_copy;
62 temps[temps[temp].prev_copy].next_copy = temps[temp].next_copy;
65 temps[temp].state = TCG_TEMP_UNDEF;
68 static int op_bits(TCGOpcode op)
70 const TCGOpDef *def = &tcg_op_defs[op];
71 return def->flags & TCG_OPF_64BIT ? 64 : 32;
74 static TCGOpcode op_to_movi(TCGOpcode op)
76 switch (op_bits(op)) {
77 case 32:
78 return INDEX_op_movi_i32;
79 case 64:
80 return INDEX_op_movi_i64;
81 default:
82 fprintf(stderr, "op_to_movi: unexpected return value of "
83 "function op_bits.\n");
84 tcg_abort();
88 static TCGArg find_better_copy(TCGContext *s, TCGArg temp)
90 TCGArg i;
92 /* If this is already a global, we can't do better. */
93 if (temp < s->nb_globals) {
94 return temp;
97 /* Search for a global first. */
98 for (i = temps[temp].next_copy ; i != temp ; i = temps[i].next_copy) {
99 if (i < s->nb_globals) {
100 return i;
104 /* If it is a temp, search for a temp local. */
105 if (!s->temps[temp].temp_local) {
106 for (i = temps[temp].next_copy ; i != temp ; i = temps[i].next_copy) {
107 if (s->temps[i].temp_local) {
108 return i;
113 /* Failure to find a better representation, return the same temp. */
114 return temp;
117 static bool temps_are_copies(TCGArg arg1, TCGArg arg2)
119 TCGArg i;
121 if (arg1 == arg2) {
122 return true;
125 if (temps[arg1].state != TCG_TEMP_COPY
126 || temps[arg2].state != TCG_TEMP_COPY) {
127 return false;
130 for (i = temps[arg1].next_copy ; i != arg1 ; i = temps[i].next_copy) {
131 if (i == arg2) {
132 return true;
136 return false;
139 static void tcg_opt_gen_mov(TCGContext *s, TCGArg *gen_args,
140 TCGArg dst, TCGArg src)
142 reset_temp(dst);
143 assert(temps[src].state != TCG_TEMP_CONST);
145 if (s->temps[src].type == s->temps[dst].type) {
146 if (temps[src].state != TCG_TEMP_COPY) {
147 temps[src].state = TCG_TEMP_COPY;
148 temps[src].next_copy = src;
149 temps[src].prev_copy = src;
151 temps[dst].state = TCG_TEMP_COPY;
152 temps[dst].next_copy = temps[src].next_copy;
153 temps[dst].prev_copy = src;
154 temps[temps[dst].next_copy].prev_copy = dst;
155 temps[src].next_copy = dst;
158 gen_args[0] = dst;
159 gen_args[1] = src;
162 static void tcg_opt_gen_movi(TCGArg *gen_args, TCGArg dst, TCGArg val)
164 reset_temp(dst);
165 temps[dst].state = TCG_TEMP_CONST;
166 temps[dst].val = val;
167 gen_args[0] = dst;
168 gen_args[1] = val;
171 static TCGOpcode op_to_mov(TCGOpcode op)
173 switch (op_bits(op)) {
174 case 32:
175 return INDEX_op_mov_i32;
176 case 64:
177 return INDEX_op_mov_i64;
178 default:
179 fprintf(stderr, "op_to_mov: unexpected return value of "
180 "function op_bits.\n");
181 tcg_abort();
185 static TCGArg do_constant_folding_2(TCGOpcode op, TCGArg x, TCGArg y)
187 switch (op) {
188 CASE_OP_32_64(add):
189 return x + y;
191 CASE_OP_32_64(sub):
192 return x - y;
194 CASE_OP_32_64(mul):
195 return x * y;
197 CASE_OP_32_64(and):
198 return x & y;
200 CASE_OP_32_64(or):
201 return x | y;
203 CASE_OP_32_64(xor):
204 return x ^ y;
206 case INDEX_op_shl_i32:
207 return (uint32_t)x << (uint32_t)y;
209 case INDEX_op_shl_i64:
210 return (uint64_t)x << (uint64_t)y;
212 case INDEX_op_shr_i32:
213 return (uint32_t)x >> (uint32_t)y;
215 case INDEX_op_shr_i64:
216 return (uint64_t)x >> (uint64_t)y;
218 case INDEX_op_sar_i32:
219 return (int32_t)x >> (int32_t)y;
221 case INDEX_op_sar_i64:
222 return (int64_t)x >> (int64_t)y;
224 case INDEX_op_rotr_i32:
225 x = ((uint32_t)x << (32 - y)) | ((uint32_t)x >> y);
226 return x;
228 case INDEX_op_rotr_i64:
229 x = ((uint64_t)x << (64 - y)) | ((uint64_t)x >> y);
230 return x;
232 case INDEX_op_rotl_i32:
233 x = ((uint32_t)x << y) | ((uint32_t)x >> (32 - y));
234 return x;
236 case INDEX_op_rotl_i64:
237 x = ((uint64_t)x << y) | ((uint64_t)x >> (64 - y));
238 return x;
240 CASE_OP_32_64(not):
241 return ~x;
243 CASE_OP_32_64(neg):
244 return -x;
246 CASE_OP_32_64(andc):
247 return x & ~y;
249 CASE_OP_32_64(orc):
250 return x | ~y;
252 CASE_OP_32_64(eqv):
253 return ~(x ^ y);
255 CASE_OP_32_64(nand):
256 return ~(x & y);
258 CASE_OP_32_64(nor):
259 return ~(x | y);
261 CASE_OP_32_64(ext8s):
262 return (int8_t)x;
264 CASE_OP_32_64(ext16s):
265 return (int16_t)x;
267 CASE_OP_32_64(ext8u):
268 return (uint8_t)x;
270 CASE_OP_32_64(ext16u):
271 return (uint16_t)x;
273 case INDEX_op_ext32s_i64:
274 return (int32_t)x;
276 case INDEX_op_ext32u_i64:
277 return (uint32_t)x;
279 default:
280 fprintf(stderr,
281 "Unrecognized operation %d in do_constant_folding.\n", op);
282 tcg_abort();
286 static TCGArg do_constant_folding(TCGOpcode op, TCGArg x, TCGArg y)
288 TCGArg res = do_constant_folding_2(op, x, y);
289 if (op_bits(op) == 32) {
290 res &= 0xffffffff;
292 return res;
295 /* Return 2 if the condition can't be simplified, and the result
296 of the condition (0 or 1) if it can */
297 static TCGArg do_constant_folding_cond(TCGOpcode op, TCGArg x,
298 TCGArg y, TCGCond c)
300 if (temps[x].state == TCG_TEMP_CONST && temps[y].state == TCG_TEMP_CONST) {
301 switch (op_bits(op)) {
302 case 32:
303 switch (c) {
304 case TCG_COND_EQ:
305 return (uint32_t)temps[x].val == (uint32_t)temps[y].val;
306 case TCG_COND_NE:
307 return (uint32_t)temps[x].val != (uint32_t)temps[y].val;
308 case TCG_COND_LT:
309 return (int32_t)temps[x].val < (int32_t)temps[y].val;
310 case TCG_COND_GE:
311 return (int32_t)temps[x].val >= (int32_t)temps[y].val;
312 case TCG_COND_LE:
313 return (int32_t)temps[x].val <= (int32_t)temps[y].val;
314 case TCG_COND_GT:
315 return (int32_t)temps[x].val > (int32_t)temps[y].val;
316 case TCG_COND_LTU:
317 return (uint32_t)temps[x].val < (uint32_t)temps[y].val;
318 case TCG_COND_GEU:
319 return (uint32_t)temps[x].val >= (uint32_t)temps[y].val;
320 case TCG_COND_LEU:
321 return (uint32_t)temps[x].val <= (uint32_t)temps[y].val;
322 case TCG_COND_GTU:
323 return (uint32_t)temps[x].val > (uint32_t)temps[y].val;
325 break;
326 case 64:
327 switch (c) {
328 case TCG_COND_EQ:
329 return (uint64_t)temps[x].val == (uint64_t)temps[y].val;
330 case TCG_COND_NE:
331 return (uint64_t)temps[x].val != (uint64_t)temps[y].val;
332 case TCG_COND_LT:
333 return (int64_t)temps[x].val < (int64_t)temps[y].val;
334 case TCG_COND_GE:
335 return (int64_t)temps[x].val >= (int64_t)temps[y].val;
336 case TCG_COND_LE:
337 return (int64_t)temps[x].val <= (int64_t)temps[y].val;
338 case TCG_COND_GT:
339 return (int64_t)temps[x].val > (int64_t)temps[y].val;
340 case TCG_COND_LTU:
341 return (uint64_t)temps[x].val < (uint64_t)temps[y].val;
342 case TCG_COND_GEU:
343 return (uint64_t)temps[x].val >= (uint64_t)temps[y].val;
344 case TCG_COND_LEU:
345 return (uint64_t)temps[x].val <= (uint64_t)temps[y].val;
346 case TCG_COND_GTU:
347 return (uint64_t)temps[x].val > (uint64_t)temps[y].val;
349 break;
351 } else if (temps_are_copies(x, y)) {
352 switch (c) {
353 case TCG_COND_GT:
354 case TCG_COND_LTU:
355 case TCG_COND_LT:
356 case TCG_COND_GTU:
357 case TCG_COND_NE:
358 return 0;
359 case TCG_COND_GE:
360 case TCG_COND_GEU:
361 case TCG_COND_LE:
362 case TCG_COND_LEU:
363 case TCG_COND_EQ:
364 return 1;
366 } else if (temps[y].state == TCG_TEMP_CONST && temps[y].val == 0) {
367 switch (c) {
368 case TCG_COND_LTU:
369 return 0;
370 case TCG_COND_GEU:
371 return 1;
372 default:
373 return 2;
375 } else {
376 return 2;
379 fprintf(stderr,
380 "Unrecognized bitness %d or condition %d in "
381 "do_constant_folding_cond.\n", op_bits(op), c);
382 tcg_abort();
385 /* Propagate constants and copies, fold constant expressions. */
386 static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
387 TCGArg *args, TCGOpDef *tcg_op_defs)
389 int i, nb_ops, op_index, nb_temps, nb_globals, nb_call_args;
390 TCGOpcode op;
391 const TCGOpDef *def;
392 TCGArg *gen_args;
393 TCGArg tmp;
394 TCGCond cond;
396 /* Array VALS has an element for each temp.
397 If this temp holds a constant then its value is kept in VALS' element.
398 If this temp is a copy of other ones then the other copies are
399 available through the doubly linked circular list. */
401 nb_temps = s->nb_temps;
402 nb_globals = s->nb_globals;
403 memset(temps, 0, nb_temps * sizeof(struct tcg_temp_info));
405 nb_ops = tcg_opc_ptr - gen_opc_buf;
406 gen_args = args;
407 for (op_index = 0; op_index < nb_ops; op_index++) {
408 op = gen_opc_buf[op_index];
409 def = &tcg_op_defs[op];
410 /* Do copy propagation */
411 if (op == INDEX_op_call) {
412 int nb_oargs = args[0] >> 16;
413 int nb_iargs = args[0] & 0xffff;
414 for (i = nb_oargs + 1; i < nb_oargs + nb_iargs + 1; i++) {
415 if (temps[args[i]].state == TCG_TEMP_COPY) {
416 args[i] = find_better_copy(s, args[i]);
419 } else {
420 for (i = def->nb_oargs; i < def->nb_oargs + def->nb_iargs; i++) {
421 if (temps[args[i]].state == TCG_TEMP_COPY) {
422 args[i] = find_better_copy(s, args[i]);
427 /* For commutative operations make constant second argument */
428 switch (op) {
429 CASE_OP_32_64(add):
430 CASE_OP_32_64(mul):
431 CASE_OP_32_64(and):
432 CASE_OP_32_64(or):
433 CASE_OP_32_64(xor):
434 CASE_OP_32_64(eqv):
435 CASE_OP_32_64(nand):
436 CASE_OP_32_64(nor):
437 /* Prefer the constant in second argument, and then the form
438 op a, a, b, which is better handled on non-RISC hosts. */
439 if (temps[args[1]].state == TCG_TEMP_CONST || (args[0] == args[2]
440 && temps[args[2]].state != TCG_TEMP_CONST)) {
441 tmp = args[1];
442 args[1] = args[2];
443 args[2] = tmp;
445 break;
446 CASE_OP_32_64(brcond):
447 if (temps[args[0]].state == TCG_TEMP_CONST
448 && temps[args[1]].state != TCG_TEMP_CONST) {
449 tmp = args[0];
450 args[0] = args[1];
451 args[1] = tmp;
452 args[2] = tcg_swap_cond(args[2]);
454 break;
455 CASE_OP_32_64(setcond):
456 if (temps[args[1]].state == TCG_TEMP_CONST
457 && temps[args[2]].state != TCG_TEMP_CONST) {
458 tmp = args[1];
459 args[1] = args[2];
460 args[2] = tmp;
461 args[3] = tcg_swap_cond(args[3]);
463 break;
464 CASE_OP_32_64(movcond):
465 cond = args[5];
466 if (temps[args[1]].state == TCG_TEMP_CONST
467 && temps[args[2]].state != TCG_TEMP_CONST) {
468 tmp = args[1];
469 args[1] = args[2];
470 args[2] = tmp;
471 cond = tcg_swap_cond(cond);
473 /* For movcond, we canonicalize the "false" input reg to match
474 the destination reg so that the tcg backend can implement
475 a "move if true" operation. */
476 if (args[0] == args[3]) {
477 tmp = args[3];
478 args[3] = args[4];
479 args[4] = tmp;
480 cond = tcg_invert_cond(cond);
482 args[5] = cond;
483 default:
484 break;
487 /* Simplify expressions for "shift/rot r, 0, a => movi r, 0" */
488 switch (op) {
489 CASE_OP_32_64(shl):
490 CASE_OP_32_64(shr):
491 CASE_OP_32_64(sar):
492 CASE_OP_32_64(rotl):
493 CASE_OP_32_64(rotr):
494 if (temps[args[1]].state == TCG_TEMP_CONST
495 && temps[args[1]].val == 0) {
496 gen_opc_buf[op_index] = op_to_movi(op);
497 tcg_opt_gen_movi(gen_args, args[0], 0);
498 args += 3;
499 gen_args += 2;
500 continue;
502 break;
503 default:
504 break;
507 /* Simplify expression for "op r, a, 0 => mov r, a" cases */
508 switch (op) {
509 CASE_OP_32_64(add):
510 CASE_OP_32_64(sub):
511 CASE_OP_32_64(shl):
512 CASE_OP_32_64(shr):
513 CASE_OP_32_64(sar):
514 CASE_OP_32_64(rotl):
515 CASE_OP_32_64(rotr):
516 CASE_OP_32_64(or):
517 CASE_OP_32_64(xor):
518 if (temps[args[1]].state == TCG_TEMP_CONST) {
519 /* Proceed with possible constant folding. */
520 break;
522 if (temps[args[2]].state == TCG_TEMP_CONST
523 && temps[args[2]].val == 0) {
524 if (temps_are_copies(args[0], args[1])) {
525 gen_opc_buf[op_index] = INDEX_op_nop;
526 } else {
527 gen_opc_buf[op_index] = op_to_mov(op);
528 tcg_opt_gen_mov(s, gen_args, args[0], args[1]);
529 gen_args += 2;
531 args += 3;
532 continue;
534 break;
535 default:
536 break;
539 /* Simplify expression for "op r, a, 0 => movi r, 0" cases */
540 switch (op) {
541 CASE_OP_32_64(and):
542 CASE_OP_32_64(mul):
543 if ((temps[args[2]].state == TCG_TEMP_CONST
544 && temps[args[2]].val == 0)) {
545 gen_opc_buf[op_index] = op_to_movi(op);
546 tcg_opt_gen_movi(gen_args, args[0], 0);
547 args += 3;
548 gen_args += 2;
549 continue;
551 break;
552 default:
553 break;
556 /* Simplify expression for "op r, a, a => mov r, a" cases */
557 switch (op) {
558 CASE_OP_32_64(or):
559 CASE_OP_32_64(and):
560 if (temps_are_copies(args[1], args[2])) {
561 if (temps_are_copies(args[0], args[1])) {
562 gen_opc_buf[op_index] = INDEX_op_nop;
563 } else {
564 gen_opc_buf[op_index] = op_to_mov(op);
565 tcg_opt_gen_mov(s, gen_args, args[0], args[1]);
566 gen_args += 2;
568 args += 3;
569 continue;
571 break;
572 default:
573 break;
576 /* Simplify expression for "op r, a, a => movi r, 0" cases */
577 switch (op) {
578 CASE_OP_32_64(sub):
579 CASE_OP_32_64(xor):
580 if (temps_are_copies(args[1], args[2])) {
581 gen_opc_buf[op_index] = op_to_movi(op);
582 tcg_opt_gen_movi(gen_args, args[0], 0);
583 gen_args += 2;
584 args += 3;
585 continue;
587 break;
588 default:
589 break;
592 /* Propagate constants through copy operations and do constant
593 folding. Constants will be substituted to arguments by register
594 allocator where needed and possible. Also detect copies. */
595 switch (op) {
596 CASE_OP_32_64(mov):
597 if (temps_are_copies(args[0], args[1])) {
598 args += 2;
599 gen_opc_buf[op_index] = INDEX_op_nop;
600 break;
602 if (temps[args[1]].state != TCG_TEMP_CONST) {
603 tcg_opt_gen_mov(s, gen_args, args[0], args[1]);
604 gen_args += 2;
605 args += 2;
606 break;
608 /* Source argument is constant. Rewrite the operation and
609 let movi case handle it. */
610 op = op_to_movi(op);
611 gen_opc_buf[op_index] = op;
612 args[1] = temps[args[1]].val;
613 /* fallthrough */
614 CASE_OP_32_64(movi):
615 tcg_opt_gen_movi(gen_args, args[0], args[1]);
616 gen_args += 2;
617 args += 2;
618 break;
619 CASE_OP_32_64(not):
620 CASE_OP_32_64(neg):
621 CASE_OP_32_64(ext8s):
622 CASE_OP_32_64(ext8u):
623 CASE_OP_32_64(ext16s):
624 CASE_OP_32_64(ext16u):
625 case INDEX_op_ext32s_i64:
626 case INDEX_op_ext32u_i64:
627 if (temps[args[1]].state == TCG_TEMP_CONST) {
628 gen_opc_buf[op_index] = op_to_movi(op);
629 tmp = do_constant_folding(op, temps[args[1]].val, 0);
630 tcg_opt_gen_movi(gen_args, args[0], tmp);
631 } else {
632 reset_temp(args[0]);
633 gen_args[0] = args[0];
634 gen_args[1] = args[1];
636 gen_args += 2;
637 args += 2;
638 break;
639 CASE_OP_32_64(add):
640 CASE_OP_32_64(sub):
641 CASE_OP_32_64(mul):
642 CASE_OP_32_64(or):
643 CASE_OP_32_64(and):
644 CASE_OP_32_64(xor):
645 CASE_OP_32_64(shl):
646 CASE_OP_32_64(shr):
647 CASE_OP_32_64(sar):
648 CASE_OP_32_64(rotl):
649 CASE_OP_32_64(rotr):
650 CASE_OP_32_64(andc):
651 CASE_OP_32_64(orc):
652 CASE_OP_32_64(eqv):
653 CASE_OP_32_64(nand):
654 CASE_OP_32_64(nor):
655 if (temps[args[1]].state == TCG_TEMP_CONST
656 && temps[args[2]].state == TCG_TEMP_CONST) {
657 gen_opc_buf[op_index] = op_to_movi(op);
658 tmp = do_constant_folding(op, temps[args[1]].val,
659 temps[args[2]].val);
660 tcg_opt_gen_movi(gen_args, args[0], tmp);
661 gen_args += 2;
662 } else {
663 reset_temp(args[0]);
664 gen_args[0] = args[0];
665 gen_args[1] = args[1];
666 gen_args[2] = args[2];
667 gen_args += 3;
669 args += 3;
670 break;
671 CASE_OP_32_64(setcond):
672 tmp = do_constant_folding_cond(op, args[1], args[2], args[3]);
673 if (tmp != 2) {
674 gen_opc_buf[op_index] = op_to_movi(op);
675 tcg_opt_gen_movi(gen_args, args[0], tmp);
676 gen_args += 2;
677 } else {
678 reset_temp(args[0]);
679 gen_args[0] = args[0];
680 gen_args[1] = args[1];
681 gen_args[2] = args[2];
682 gen_args[3] = args[3];
683 gen_args += 4;
685 args += 4;
686 break;
687 CASE_OP_32_64(brcond):
688 tmp = do_constant_folding_cond(op, args[0], args[1], args[2]);
689 if (tmp != 2) {
690 if (tmp) {
691 memset(temps, 0, nb_temps * sizeof(struct tcg_temp_info));
692 gen_opc_buf[op_index] = INDEX_op_br;
693 gen_args[0] = args[3];
694 gen_args += 1;
695 } else {
696 gen_opc_buf[op_index] = INDEX_op_nop;
698 } else {
699 memset(temps, 0, nb_temps * sizeof(struct tcg_temp_info));
700 reset_temp(args[0]);
701 gen_args[0] = args[0];
702 gen_args[1] = args[1];
703 gen_args[2] = args[2];
704 gen_args[3] = args[3];
705 gen_args += 4;
707 args += 4;
708 break;
709 CASE_OP_32_64(movcond):
710 tmp = do_constant_folding_cond(op, args[1], args[2], args[5]);
711 if (tmp != 2) {
712 if (temps_are_copies(args[0], args[4-tmp])) {
713 gen_opc_buf[op_index] = INDEX_op_nop;
714 } else if (temps[args[4-tmp]].state == TCG_TEMP_CONST) {
715 gen_opc_buf[op_index] = op_to_movi(op);
716 tcg_opt_gen_movi(gen_args, args[0], temps[args[4-tmp]].val);
717 gen_args += 2;
718 } else {
719 gen_opc_buf[op_index] = op_to_mov(op);
720 tcg_opt_gen_mov(s, gen_args, args[0], args[4-tmp]);
721 gen_args += 2;
723 } else {
724 reset_temp(args[0]);
725 gen_args[0] = args[0];
726 gen_args[1] = args[1];
727 gen_args[2] = args[2];
728 gen_args[3] = args[3];
729 gen_args[4] = args[4];
730 gen_args[5] = args[5];
731 gen_args += 6;
733 args += 6;
734 break;
735 case INDEX_op_call:
736 nb_call_args = (args[0] >> 16) + (args[0] & 0xffff);
737 if (!(args[nb_call_args + 1] & (TCG_CALL_CONST | TCG_CALL_PURE))) {
738 for (i = 0; i < nb_globals; i++) {
739 reset_temp(i);
742 for (i = 0; i < (args[0] >> 16); i++) {
743 reset_temp(args[i + 1]);
745 i = nb_call_args + 3;
746 while (i) {
747 *gen_args = *args;
748 args++;
749 gen_args++;
750 i--;
752 break;
753 default:
754 /* Default case: we do know nothing about operation so no
755 propagation is done. We trash everything if the operation
756 is the end of a basic block, otherwise we only trash the
757 output args. */
758 if (def->flags & TCG_OPF_BB_END) {
759 memset(temps, 0, nb_temps * sizeof(struct tcg_temp_info));
760 } else {
761 for (i = 0; i < def->nb_oargs; i++) {
762 reset_temp(args[i]);
765 for (i = 0; i < def->nb_args; i++) {
766 gen_args[i] = args[i];
768 args += def->nb_args;
769 gen_args += def->nb_args;
770 break;
774 return gen_args;
777 TCGArg *tcg_optimize(TCGContext *s, uint16_t *tcg_opc_ptr,
778 TCGArg *args, TCGOpDef *tcg_op_defs)
780 TCGArg *res;
781 res = tcg_constant_folding(s, tcg_opc_ptr, args, tcg_op_defs);
782 return res;