From dba2ece2a4f1734c763e4c2f0f92ba27ff8eb941 Mon Sep 17 00:00:00 2001 From: jakub Date: Fri, 6 Jul 2018 21:42:41 +0000 Subject: [PATCH] PR tree-optimization/86401 * fold-const.c (fold_binary_loc) : Move the ((A & N) + B) & M -> (A + B) & M etc. optimization into ... (fold_bit_and_mask): ... here. New helper function for match.pd. * fold-const.h (fold_bit_and_mask): Declare. * match.pd (((A & N) + B) & M -> (A + B) & M): New optimization. * gcc.dg/tree-ssa/pr86401-1.c: New test. * gcc.dg/tree-ssa/pr86401-2.c: New test. * c-c++-common/rotate-9.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@262485 138bc75d-0d04-0410-961f-82ee72b054a4 --- gcc/ChangeLog | 9 ++ gcc/fold-const.c | 209 ++++++++++++++---------------- gcc/fold-const.h | 3 + gcc/match.pd | 54 ++++++++ gcc/testsuite/ChangeLog | 7 + gcc/testsuite/c-c++-common/rotate-9.c | 19 +++ gcc/testsuite/gcc.dg/tree-ssa/pr86401-1.c | 48 +++++++ gcc/testsuite/gcc.dg/tree-ssa/pr86401-2.c | 48 +++++++ 8 files changed, 282 insertions(+), 115 deletions(-) create mode 100644 gcc/testsuite/c-c++-common/rotate-9.c create mode 100644 gcc/testsuite/gcc.dg/tree-ssa/pr86401-1.c create mode 100644 gcc/testsuite/gcc.dg/tree-ssa/pr86401-2.c diff --git a/gcc/ChangeLog b/gcc/ChangeLog index c4ac3ea9ef5..7a978971066 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,12 @@ +2018-07-06 Jakub Jelinek + + PR tree-optimization/86401 + * fold-const.c (fold_binary_loc) : Move the + ((A & N) + B) & M -> (A + B) & M etc. optimization into ... + (fold_bit_and_mask): ... here. New helper function for match.pd. + * fold-const.h (fold_bit_and_mask): Declare. + * match.pd (((A & N) + B) & M -> (A + B) & M): New optimization. + 2018-07-06 Peter Bergner PR target/86324 diff --git a/gcc/fold-const.c b/gcc/fold-const.c index 8476c223e4f..6d5c8f529fb 100644 --- a/gcc/fold-const.c +++ b/gcc/fold-const.c @@ -10236,121 +10236,6 @@ fold_binary_loc (location_t loc, enum tree_code code, tree type, } } - /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M, - ((A & N) + B) & M -> (A + B) & M - Similarly if (N & M) == 0, - ((A | N) + B) & M -> (A + B) & M - and for - instead of + (or unary - instead of +) - and/or ^ instead of |. - If B is constant and (B & M) == 0, fold into A & M. */ - if (TREE_CODE (arg1) == INTEGER_CST) - { - wi::tree_to_wide_ref cst1 = wi::to_wide (arg1); - if ((~cst1 != 0) && (cst1 & (cst1 + 1)) == 0 - && INTEGRAL_TYPE_P (TREE_TYPE (arg0)) - && (TREE_CODE (arg0) == PLUS_EXPR - || TREE_CODE (arg0) == MINUS_EXPR - || TREE_CODE (arg0) == NEGATE_EXPR) - && (TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0)) - || TREE_CODE (TREE_TYPE (arg0)) == INTEGER_TYPE)) - { - tree pmop[2]; - int which = 0; - wide_int cst0; - - /* Now we know that arg0 is (C + D) or (C - D) or - -C and arg1 (M) is == (1LL << cst) - 1. - Store C into PMOP[0] and D into PMOP[1]. */ - pmop[0] = TREE_OPERAND (arg0, 0); - pmop[1] = NULL; - if (TREE_CODE (arg0) != NEGATE_EXPR) - { - pmop[1] = TREE_OPERAND (arg0, 1); - which = 1; - } - - if ((wi::max_value (TREE_TYPE (arg0)) & cst1) != cst1) - which = -1; - - for (; which >= 0; which--) - switch (TREE_CODE (pmop[which])) - { - case BIT_AND_EXPR: - case BIT_IOR_EXPR: - case BIT_XOR_EXPR: - if (TREE_CODE (TREE_OPERAND (pmop[which], 1)) - != INTEGER_CST) - break; - cst0 = wi::to_wide (TREE_OPERAND (pmop[which], 1)) & cst1; - if (TREE_CODE (pmop[which]) == BIT_AND_EXPR) - { - if (cst0 != cst1) - break; - } - else if (cst0 != 0) - break; - /* If C or D is of the form (A & N) where - (N & M) == M, or of the form (A | N) or - (A ^ N) where (N & M) == 0, replace it with A. */ - pmop[which] = TREE_OPERAND (pmop[which], 0); - break; - case INTEGER_CST: - /* If C or D is a N where (N & M) == 0, it can be - omitted (assumed 0). */ - if ((TREE_CODE (arg0) == PLUS_EXPR - || (TREE_CODE (arg0) == MINUS_EXPR && which == 0)) - && (cst1 & wi::to_wide (pmop[which])) == 0) - pmop[which] = NULL; - break; - default: - break; - } - - /* Only build anything new if we optimized one or both arguments - above. */ - if (pmop[0] != TREE_OPERAND (arg0, 0) - || (TREE_CODE (arg0) != NEGATE_EXPR - && pmop[1] != TREE_OPERAND (arg0, 1))) - { - tree utype = TREE_TYPE (arg0); - if (! TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0))) - { - /* Perform the operations in a type that has defined - overflow behavior. */ - utype = unsigned_type_for (TREE_TYPE (arg0)); - if (pmop[0] != NULL) - pmop[0] = fold_convert_loc (loc, utype, pmop[0]); - if (pmop[1] != NULL) - pmop[1] = fold_convert_loc (loc, utype, pmop[1]); - } - - if (TREE_CODE (arg0) == NEGATE_EXPR) - tem = fold_build1_loc (loc, NEGATE_EXPR, utype, pmop[0]); - else if (TREE_CODE (arg0) == PLUS_EXPR) - { - if (pmop[0] != NULL && pmop[1] != NULL) - tem = fold_build2_loc (loc, PLUS_EXPR, utype, - pmop[0], pmop[1]); - else if (pmop[0] != NULL) - tem = pmop[0]; - else if (pmop[1] != NULL) - tem = pmop[1]; - else - return build_int_cst (type, 0); - } - else if (pmop[0] == NULL) - tem = fold_build1_loc (loc, NEGATE_EXPR, utype, pmop[1]); - else - tem = fold_build2_loc (loc, MINUS_EXPR, utype, - pmop[0], pmop[1]); - /* TEM is now the new binary +, - or unary - replacement. */ - tem = fold_build2_loc (loc, BIT_AND_EXPR, utype, tem, - fold_convert_loc (loc, utype, arg1)); - return fold_convert_loc (loc, type, tem); - } - } - } - /* Simplify ((int)c & 0377) into (int)c, if c is unsigned char. */ if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) == NOP_EXPR && TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0)))) @@ -11264,6 +11149,100 @@ fold_binary_loc (location_t loc, enum tree_code code, tree type, } /* switch (code) */ } +/* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M, + ((A & N) + B) & M -> (A + B) & M + Similarly if (N & M) == 0, + ((A | N) + B) & M -> (A + B) & M + and for - instead of + (or unary - instead of +) + and/or ^ instead of |. + If B is constant and (B & M) == 0, fold into A & M. + + This function is a helper for match.pd patterns. Return non-NULL + type in which the simplified operation should be performed only + if any optimization is possible. + + ARG1 is M above, ARG00 is left operand of +/-, if CODE00 is BIT_*_EXPR, + then ARG00{0,1} are operands of that bitop, otherwise CODE00 is ERROR_MARK. + Similarly for ARG01, CODE01 and ARG01{0,1}, just for the right operand of + +/-. */ +tree +fold_bit_and_mask (tree type, tree arg1, enum tree_code code, + tree arg00, enum tree_code code00, tree arg000, tree arg001, + tree arg01, enum tree_code code01, tree arg010, tree arg011, + tree *pmop) +{ + gcc_assert (TREE_CODE (arg1) == INTEGER_CST); + gcc_assert (code == PLUS_EXPR || code == MINUS_EXPR || code == NEGATE_EXPR); + wi::tree_to_wide_ref cst1 = wi::to_wide (arg1); + if (~cst1 == 0 + || (cst1 & (cst1 + 1)) != 0 + || !INTEGRAL_TYPE_P (type) + || (!TYPE_OVERFLOW_WRAPS (type) + && TREE_CODE (type) != INTEGER_TYPE) + || (wi::max_value (type) & cst1) != cst1) + return NULL_TREE; + + enum tree_code codes[2] = { code00, code01 }; + tree arg0xx[4] = { arg000, arg001, arg010, arg011 }; + int which = 0; + wide_int cst0; + + /* Now we know that arg0 is (C + D) or (C - D) or -C and + arg1 (M) is == (1LL << cst) - 1. + Store C into PMOP[0] and D into PMOP[1]. */ + pmop[0] = arg00; + pmop[1] = arg01; + which = code != NEGATE_EXPR; + + for (; which >= 0; which--) + switch (codes[which]) + { + case BIT_AND_EXPR: + case BIT_IOR_EXPR: + case BIT_XOR_EXPR: + gcc_assert (TREE_CODE (arg0xx[2 * which + 1]) == INTEGER_CST); + cst0 = wi::to_wide (arg0xx[2 * which + 1]) & cst1; + if (codes[which] == BIT_AND_EXPR) + { + if (cst0 != cst1) + break; + } + else if (cst0 != 0) + break; + /* If C or D is of the form (A & N) where + (N & M) == M, or of the form (A | N) or + (A ^ N) where (N & M) == 0, replace it with A. */ + pmop[which] = arg0xx[2 * which]; + break; + case ERROR_MARK: + if (TREE_CODE (pmop[which]) != INTEGER_CST) + break; + /* If C or D is a N where (N & M) == 0, it can be + omitted (replaced with 0). */ + if ((code == PLUS_EXPR + || (code == MINUS_EXPR && which == 0)) + && (cst1 & wi::to_wide (pmop[which])) == 0) + pmop[which] = build_int_cst (type, 0); + /* Similarly, with C - N where (-N & M) == 0. */ + if (code == MINUS_EXPR + && which == 1 + && (cst1 & -wi::to_wide (pmop[which])) == 0) + pmop[which] = build_int_cst (type, 0); + break; + default: + gcc_unreachable (); + } + + /* Only build anything new if we optimized one or both arguments above. */ + if (pmop[0] == arg00 && pmop[1] == arg01) + return NULL_TREE; + + if (TYPE_OVERFLOW_WRAPS (type)) + return type; + else + return unsigned_type_for (type); +} + /* Used by contains_label_[p1]. */ struct contains_label_data diff --git a/gcc/fold-const.h b/gcc/fold-const.h index 337818a3319..c64b8d0ecf7 100644 --- a/gcc/fold-const.h +++ b/gcc/fold-const.h @@ -96,6 +96,9 @@ extern tree omit_two_operands_loc (location_t, tree, tree, tree, tree); extern tree invert_truthvalue_loc (location_t, tree); extern tree fold_unary_to_constant (enum tree_code, tree, tree); extern tree fold_binary_to_constant (enum tree_code, tree, tree, tree); +extern tree fold_bit_and_mask (tree, tree, enum tree_code, + tree, enum tree_code, tree, tree, + tree, enum tree_code, tree, tree, tree *); extern tree fold_read_from_constant_string (tree); extern tree int_const_binop (enum tree_code, const_tree, const_tree); #define build_fold_addr_expr(T)\ diff --git a/gcc/match.pd b/gcc/match.pd index c1e0963da9a..3b02db35a28 100644 --- a/gcc/match.pd +++ b/gcc/match.pd @@ -779,6 +779,60 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT) (bit_xor @0 @1))) #endif +/* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M, + ((A & N) + B) & M -> (A + B) & M + Similarly if (N & M) == 0, + ((A | N) + B) & M -> (A + B) & M + and for - instead of + (or unary - instead of +) + and/or ^ instead of |. + If B is constant and (B & M) == 0, fold into A & M. */ +(for op (plus minus) + (for bitop (bit_and bit_ior bit_xor) + (simplify + (bit_and (op:s (bitop:s@0 @3 INTEGER_CST@4) @1) INTEGER_CST@2) + (with + { tree pmop[2]; + tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, bitop, + @3, @4, @1, ERROR_MARK, NULL_TREE, + NULL_TREE, pmop); } + (if (utype) + (convert (bit_and (op (convert:utype { pmop[0]; }) + (convert:utype { pmop[1]; })) + (convert:utype @2)))))) + (simplify + (bit_and (op:s @0 (bitop:s@1 @3 INTEGER_CST@4)) INTEGER_CST@2) + (with + { tree pmop[2]; + tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, ERROR_MARK, + NULL_TREE, NULL_TREE, @1, bitop, @3, + @4, pmop); } + (if (utype) + (convert (bit_and (op (convert:utype { pmop[0]; }) + (convert:utype { pmop[1]; })) + (convert:utype @2))))))) + (simplify + (bit_and (op:s @0 @1) INTEGER_CST@2) + (with + { tree pmop[2]; + tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, ERROR_MARK, + NULL_TREE, NULL_TREE, @1, ERROR_MARK, + NULL_TREE, NULL_TREE, pmop); } + (if (utype) + (convert (bit_and (op (convert:utype { pmop[0]; }) + (convert:utype { pmop[1]; })) + (convert:utype @2))))))) +(for bitop (bit_and bit_ior bit_xor) + (simplify + (bit_and (negate:s (bitop:s@0 @2 INTEGER_CST@3)) INTEGER_CST@1) + (with + { tree pmop[2]; + tree utype = fold_bit_and_mask (TREE_TYPE (@0), @1, NEGATE_EXPR, @0, + bitop, @2, @3, NULL_TREE, ERROR_MARK, + NULL_TREE, NULL_TREE, pmop); } + (if (utype) + (convert (bit_and (negate (convert:utype { pmop[0]; })) + (convert:utype @1))))))) + /* X % Y is smaller than Y. */ (for cmp (lt ge) (simplify diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog index fd01045f3e2..4242c233961 100644 --- a/gcc/testsuite/ChangeLog +++ b/gcc/testsuite/ChangeLog @@ -1,3 +1,10 @@ +2018-07-06 Jakub Jelinek + + PR tree-optimization/86401 + * gcc.dg/tree-ssa/pr86401-1.c: New test. + * gcc.dg/tree-ssa/pr86401-2.c: New test. + * c-c++-common/rotate-9.c: New test. + 2018-07-06 Peter Bergner PR target/86324 diff --git a/gcc/testsuite/c-c++-common/rotate-9.c b/gcc/testsuite/c-c++-common/rotate-9.c new file mode 100644 index 00000000000..b1a1d052e43 --- /dev/null +++ b/gcc/testsuite/c-c++-common/rotate-9.c @@ -0,0 +1,19 @@ +/* PR tree-optimization/86401 */ +/* { dg-do compile } */ +/* { dg-options "-O2 -fno-ipa-icf -fdump-tree-optimized" } */ +/* { dg-final { scan-tree-dump-times "r\[<>]\[<>]" 2 "optimized" } } */ + +unsigned int +f1 (unsigned int x, unsigned int s) +{ + unsigned int t = s % (__CHAR_BIT__ * __SIZEOF_INT__); + return (x << t) | (x >> (((__CHAR_BIT__ * __SIZEOF_INT__) - t) % (__CHAR_BIT__ * __SIZEOF_INT__))); +} + +unsigned int +f2 (unsigned int x, unsigned int s) +{ + int n = __CHAR_BIT__ * __SIZEOF_INT__; + unsigned int t = s % n; + return (x << t) | (x >> ((n - t) % n)); +} diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr86401-1.c b/gcc/testsuite/gcc.dg/tree-ssa/pr86401-1.c new file mode 100644 index 00000000000..3bf56cc34d7 --- /dev/null +++ b/gcc/testsuite/gcc.dg/tree-ssa/pr86401-1.c @@ -0,0 +1,48 @@ +/* PR tree-optimization/86401 */ +/* { dg-do compile } */ +/* { dg-options "-O2 -fdump-tree-optimized" } */ +/* { dg-final { scan-tree-dump-not " \\+ 64" "optimized" } } */ +/* { dg-final { scan-tree-dump-not "64 - " "optimized" } } */ +/* { dg-final { scan-tree-dump-not " \\+ 4294967232" "optimized" } } */ +/* { dg-final { scan-tree-dump-not " & 319" "optimized" } } */ +/* { dg-final { scan-tree-dump-not " \\| 4294967168" "optimized" } } */ +/* { dg-final { scan-tree-dump-not " \\^ 4294966912" "optimized" } } */ + +unsigned f1 (unsigned x) { unsigned m = 64; return (m + x) & (m - 1); } +unsigned f2 (unsigned x) { return (64 + x) & 63; } +unsigned f3 (unsigned x) { unsigned m = 64; return (x + m) & (m - 1); } +unsigned f4 (unsigned x) { return (x + 64) & 63; } +unsigned f5 (unsigned x) { unsigned m = 64; return (m - x) & (m - 1); } +unsigned f6 (unsigned x) { return (64 - x) & 63; } +unsigned f7 (unsigned x) { unsigned m = 64; return (x - m) & (m - 1); } +unsigned f8 (unsigned x) { return (x - 64) & 63; } +unsigned f9 (unsigned x, unsigned y) { unsigned m = 64, n = 256 | (m - 1); return ((x & n) + y) & (m - 1); } +unsigned f10 (unsigned x, unsigned y) { return ((x & 319) + y) & 63; } +unsigned f11 (unsigned x, unsigned y) { unsigned m = 64, n = -128; return ((x | n) + y) & (m - 1); } +unsigned f12 (unsigned x, unsigned y) { return ((x | -128) + y) & 63; } +unsigned f13 (unsigned x, unsigned y) { unsigned m = 64, n = -384; return ((x ^ n) + y) & (m - 1); } +unsigned f14 (unsigned x, unsigned y) { return ((x ^ -384) + y) & 63; } +unsigned f15 (unsigned x, unsigned y) { unsigned m = 64, n = 256 | (m - 1); return (y + (x & n)) & (m - 1); } +unsigned f16 (unsigned x, unsigned y) { return (y + (x & 319)) & 63; } +unsigned f17 (unsigned x, unsigned y) { unsigned m = 64, n = -128; return (y + (x | n)) & (m - 1); } +unsigned f18 (unsigned x, unsigned y) { return (y + (x | -128)) & 63; } +unsigned f19 (unsigned x, unsigned y) { unsigned m = 64, n = -384; return (y + (x ^ n)) & (m - 1); } +unsigned f20 (unsigned x, unsigned y) { return (y + (x ^ -384)) & 63; } +unsigned f21 (unsigned x, unsigned y) { unsigned m = 64, n = 256 | (m - 1); return ((x & n) - y) & (m - 1); } +unsigned f22 (unsigned x, unsigned y) { return ((x & 319) - y) & 63; } +unsigned f23 (unsigned x, unsigned y) { unsigned m = 64, n = -128; return ((x | n) - y) & (m - 1); } +unsigned f24 (unsigned x, unsigned y) { return ((x | -128) - y) & 63; } +unsigned f25 (unsigned x, unsigned y) { unsigned m = 64, n = -384; return ((x ^ n) - y) & (m - 1); } +unsigned f26 (unsigned x, unsigned y) { return ((x ^ -384) - y) & 63; } +unsigned f27 (unsigned x, unsigned y) { unsigned m = 64, n = 256 | (m - 1); return (y - (x & n)) & (m - 1); } +unsigned f28 (unsigned x, unsigned y) { return (y - (x & 319)) & 63; } +unsigned f29 (unsigned x, unsigned y) { unsigned m = 64, n = -128; return (y - (x | n)) & (m - 1); } +unsigned f30 (unsigned x, unsigned y) { return (y - (x | -128)) & 63; } +unsigned f31 (unsigned x, unsigned y) { unsigned m = 64, n = -384; return (y - (x ^ n)) & (m - 1); } +unsigned f32 (unsigned x, unsigned y) { return (y - (x ^ -384)) & 63; } +unsigned f33 (unsigned x) { unsigned m = 64, n = 256 | (m - 1); return (-(x & n)) & (m - 1); } +unsigned f34 (unsigned x) { return (-(x & 319)) & 63; } +unsigned f35 (unsigned x) { unsigned m = 64, n = -128; return (-(x | n)) & (m - 1); } +unsigned f36 (unsigned x) { return (-(x | -128)) & 63; } +unsigned f37 (unsigned x) { unsigned m = 64, n = -384; return (-(x ^ n)) & (m - 1); } +unsigned f38 (unsigned x) { return (-(x ^ -384)) & 63; } diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr86401-2.c b/gcc/testsuite/gcc.dg/tree-ssa/pr86401-2.c new file mode 100644 index 00000000000..0375d8a3873 --- /dev/null +++ b/gcc/testsuite/gcc.dg/tree-ssa/pr86401-2.c @@ -0,0 +1,48 @@ +/* PR tree-optimization/86401 */ +/* { dg-do compile } */ +/* { dg-options "-O2 -fdump-tree-optimized" } */ +/* { dg-final { scan-tree-dump-not " \\+ 64" "optimized" } } */ +/* { dg-final { scan-tree-dump-not "64 - " "optimized" } } */ +/* { dg-final { scan-tree-dump-not " \\+ -64" "optimized" } } */ +/* { dg-final { scan-tree-dump-not " & 319" "optimized" } } */ +/* { dg-final { scan-tree-dump-not " \\| -128" "optimized" } } */ +/* { dg-final { scan-tree-dump-not " \\^ -384" "optimized" } } */ + +int f1 (int x) { int m = 64; return (m + x) & (m - 1); } +int f2 (int x) { return (64 + x) & 63; } +int f3 (int x) { int m = 64; return (x + m) & (m - 1); } +int f4 (int x) { return (x + 64) & 63; } +int f5 (int x) { int m = 64; return (m - x) & (m - 1); } +int f6 (int x) { return (64 - x) & 63; } +int f7 (int x) { int m = 64; return (x - m) & (m - 1); } +int f8 (int x) { return (x - 64) & 63; } +int f9 (int x, int y) { int m = 64, n = 256 | (m - 1); return ((x & n) + y) & (m - 1); } +int f10 (int x, int y) { return ((x & 319) + y) & 63; } +int f11 (int x, int y) { int m = 64, n = -128; return ((x | n) + y) & (m - 1); } +int f12 (int x, int y) { return ((x | -128) + y) & 63; } +int f13 (int x, int y) { int m = 64, n = -384; return ((x ^ n) + y) & (m - 1); } +int f14 (int x, int y) { return ((x ^ -384) + y) & 63; } +int f15 (int x, int y) { int m = 64, n = 256 | (m - 1); return (y + (x & n)) & (m - 1); } +int f16 (int x, int y) { return (y + (x & 319)) & 63; } +int f17 (int x, int y) { int m = 64, n = -128; return (y + (x | n)) & (m - 1); } +int f18 (int x, int y) { return (y + (x | -128)) & 63; } +int f19 (int x, int y) { int m = 64, n = -384; return (y + (x ^ n)) & (m - 1); } +int f20 (int x, int y) { return (y + (x ^ -384)) & 63; } +int f21 (int x, int y) { int m = 64, n = 256 | (m - 1); return ((x & n) - y) & (m - 1); } +int f22 (int x, int y) { return ((x & 319) - y) & 63; } +int f23 (int x, int y) { int m = 64, n = -128; return ((x | n) - y) & (m - 1); } +int f24 (int x, int y) { return ((x | -128) - y) & 63; } +int f25 (int x, int y) { int m = 64, n = -384; return ((x ^ n) - y) & (m - 1); } +int f26 (int x, int y) { return ((x ^ -384) - y) & 63; } +int f27 (int x, int y) { int m = 64, n = 256 | (m - 1); return (y - (x & n)) & (m - 1); } +int f28 (int x, int y) { return (y - (x & 319)) & 63; } +int f29 (int x, int y) { int m = 64, n = -128; return (y - (x | n)) & (m - 1); } +int f30 (int x, int y) { return (y - (x | -128)) & 63; } +int f31 (int x, int y) { int m = 64, n = -384; return (y - (x ^ n)) & (m - 1); } +int f32 (int x, int y) { return (y - (x ^ -384)) & 63; } +int f33 (int x) { int m = 64, n = 256 | (m - 1); return (-(x & n)) & (m - 1); } +int f34 (int x) { return (-(x & 319)) & 63; } +int f35 (int x) { int m = 64, n = -128; return (-(x | n)) & (m - 1); } +int f36 (int x) { return (-(x | -128)) & 63; } +int f37 (int x) { int m = 64, n = -384; return (-(x ^ n)) & (m - 1); } +int f38 (int x) { return (-(x ^ -384)) & 63; } -- 2.11.4.GIT