From a68b3ea1194ee1fdc86dd47909bb8e2eb7bf0857 Mon Sep 17 00:00:00 2001 From: edlinger Date: Mon, 17 Oct 2016 17:46:59 +0000 Subject: [PATCH] 2016-10-17 Bernd Edlinger PR target/77308 * config/arm/arm.c (arm_emit_coreregs_64bit_shift): Clear the result register explicitly. * config/arm/arm.md (ashldi3, ashrdi3, lshrdi3): Don't FAIL if optimizing for size. testsuite: 2016-10-17 Bernd Edlinger PR target/77308 * gcc.target/arm/pr77308.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@241273 138bc75d-0d04-0410-961f-82ee72b054a4 --- gcc/ChangeLog | 8 ++ gcc/config/arm/arm.c | 7 +- gcc/config/arm/arm.md | 12 --- gcc/testsuite/ChangeLog | 5 + gcc/testsuite/gcc.target/arm/pr77308.c | 164 +++++++++++++++++++++++++++++++++ 5 files changed, 182 insertions(+), 14 deletions(-) create mode 100644 gcc/testsuite/gcc.target/arm/pr77308.c diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 92d06ce2766..3cad7bb512c 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,11 @@ +2016-10-17 Bernd Edlinger + + PR target/77308 + * config/arm/arm.c (arm_emit_coreregs_64bit_shift): Clear the result + register explicitly. + * config/arm/arm.md (ashldi3, ashrdi3, lshrdi3): Don't FAIL if + optimizing for size. + 2016-10-17 Kyrylo Tkachov * config/aarch64/aarch64.c: Delete inclusion of diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c index 39e3aa85c0c..a7588393a01 100644 --- a/gcc/config/arm/arm.c +++ b/gcc/config/arm/arm.c @@ -29216,6 +29216,10 @@ arm_emit_coreregs_64bit_shift (enum rtx_code code, rtx out, rtx in, /* Shifts by a constant less than 32. */ rtx reverse_amount = GEN_INT (32 - INTVAL (amount)); + /* Clearing the out register in DImode first avoids lots + of spilling and results in less stack usage. + Later this redundant insn is completely removed. */ + emit_insn (SET (out, const0_rtx)); emit_insn (SET (out_down, LSHIFT (code, in_down, amount))); emit_insn (SET (out_down, ORR (REV_LSHIFT (code, in_up, reverse_amount), @@ -29227,12 +29231,11 @@ arm_emit_coreregs_64bit_shift (enum rtx_code code, rtx out, rtx in, /* Shifts by a constant greater than 31. */ rtx adj_amount = GEN_INT (INTVAL (amount) - 32); + emit_insn (SET (out, const0_rtx)); emit_insn (SET (out_down, SHIFT (code, in_up, adj_amount))); if (code == ASHIFTRT) emit_insn (gen_ashrsi3 (out_up, in_up, GEN_INT (31))); - else - emit_insn (SET (out_up, const0_rtx)); } } else diff --git a/gcc/config/arm/arm.md b/gcc/config/arm/arm.md index 374b54e3c0e..8393f65bcf4 100644 --- a/gcc/config/arm/arm.md +++ b/gcc/config/arm/arm.md @@ -4016,10 +4016,6 @@ cheaper to have the alternate code being generated than moving values to iwmmxt regs and back. */ - /* If we're optimizing for size, we prefer the libgcc calls. */ - if (optimize_function_for_size_p (cfun)) - FAIL; - /* Expand operation using core-registers. 'FAIL' would achieve the same thing, but this is a bit smarter. */ scratch1 = gen_reg_rtx (SImode); @@ -4089,10 +4085,6 @@ cheaper to have the alternate code being generated than moving values to iwmmxt regs and back. */ - /* If we're optimizing for size, we prefer the libgcc calls. */ - if (optimize_function_for_size_p (cfun)) - FAIL; - /* Expand operation using core-registers. 'FAIL' would achieve the same thing, but this is a bit smarter. */ scratch1 = gen_reg_rtx (SImode); @@ -4159,10 +4151,6 @@ cheaper to have the alternate code being generated than moving values to iwmmxt regs and back. */ - /* If we're optimizing for size, we prefer the libgcc calls. */ - if (optimize_function_for_size_p (cfun)) - FAIL; - /* Expand operation using core-registers. 'FAIL' would achieve the same thing, but this is a bit smarter. */ scratch1 = gen_reg_rtx (SImode); diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog index 4c5b0d4180f..595bd4c8c82 100644 --- a/gcc/testsuite/ChangeLog +++ b/gcc/testsuite/ChangeLog @@ -1,3 +1,8 @@ +2016-09-29 Bernd Edlinger + + PR target/77308 + * gcc.target/arm/pr77308.c: New test. + 2016-10-17 David Edelsohn * lib/target-supports.exp diff --git a/gcc/testsuite/gcc.target/arm/pr77308.c b/gcc/testsuite/gcc.target/arm/pr77308.c new file mode 100644 index 00000000000..01da80c8123 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/pr77308.c @@ -0,0 +1,164 @@ +/* { dg-do compile } */ +/* { dg-options "-Os -Wstack-usage=2500" } */ + +#define SHA_LONG64 unsigned long long +#define U64(C) C##ULL + +#define SHA_LBLOCK 16 +#define SHA512_CBLOCK (SHA_LBLOCK*8) + +typedef struct SHA512state_st { + SHA_LONG64 h[8]; + SHA_LONG64 Nl, Nh; + union { + SHA_LONG64 d[SHA_LBLOCK]; + unsigned char p[SHA512_CBLOCK]; + } u; + unsigned int num, md_len; +} SHA512_CTX; + +static const SHA_LONG64 K512[80] = { + U64(0x428a2f98d728ae22), U64(0x7137449123ef65cd), + U64(0xb5c0fbcfec4d3b2f), U64(0xe9b5dba58189dbbc), + U64(0x3956c25bf348b538), U64(0x59f111f1b605d019), + U64(0x923f82a4af194f9b), U64(0xab1c5ed5da6d8118), + U64(0xd807aa98a3030242), U64(0x12835b0145706fbe), + U64(0x243185be4ee4b28c), U64(0x550c7dc3d5ffb4e2), + U64(0x72be5d74f27b896f), U64(0x80deb1fe3b1696b1), + U64(0x9bdc06a725c71235), U64(0xc19bf174cf692694), + U64(0xe49b69c19ef14ad2), U64(0xefbe4786384f25e3), + U64(0x0fc19dc68b8cd5b5), U64(0x240ca1cc77ac9c65), + U64(0x2de92c6f592b0275), U64(0x4a7484aa6ea6e483), + U64(0x5cb0a9dcbd41fbd4), U64(0x76f988da831153b5), + U64(0x983e5152ee66dfab), U64(0xa831c66d2db43210), + U64(0xb00327c898fb213f), U64(0xbf597fc7beef0ee4), + U64(0xc6e00bf33da88fc2), U64(0xd5a79147930aa725), + U64(0x06ca6351e003826f), U64(0x142929670a0e6e70), + U64(0x27b70a8546d22ffc), U64(0x2e1b21385c26c926), + U64(0x4d2c6dfc5ac42aed), U64(0x53380d139d95b3df), + U64(0x650a73548baf63de), U64(0x766a0abb3c77b2a8), + U64(0x81c2c92e47edaee6), U64(0x92722c851482353b), + U64(0xa2bfe8a14cf10364), U64(0xa81a664bbc423001), + U64(0xc24b8b70d0f89791), U64(0xc76c51a30654be30), + U64(0xd192e819d6ef5218), U64(0xd69906245565a910), + U64(0xf40e35855771202a), U64(0x106aa07032bbd1b8), + U64(0x19a4c116b8d2d0c8), U64(0x1e376c085141ab53), + U64(0x2748774cdf8eeb99), U64(0x34b0bcb5e19b48a8), + U64(0x391c0cb3c5c95a63), U64(0x4ed8aa4ae3418acb), + U64(0x5b9cca4f7763e373), U64(0x682e6ff3d6b2b8a3), + U64(0x748f82ee5defb2fc), U64(0x78a5636f43172f60), + U64(0x84c87814a1f0ab72), U64(0x8cc702081a6439ec), + U64(0x90befffa23631e28), U64(0xa4506cebde82bde9), + U64(0xbef9a3f7b2c67915), U64(0xc67178f2e372532b), + U64(0xca273eceea26619c), U64(0xd186b8c721c0c207), + U64(0xeada7dd6cde0eb1e), U64(0xf57d4f7fee6ed178), + U64(0x06f067aa72176fba), U64(0x0a637dc5a2c898a6), + U64(0x113f9804bef90dae), U64(0x1b710b35131c471b), + U64(0x28db77f523047d84), U64(0x32caab7b40c72493), + U64(0x3c9ebe0a15c9bebc), U64(0x431d67c49c100d4c), + U64(0x4cc5d4becb3e42b6), U64(0x597f299cfc657e2a), + U64(0x5fcb6fab3ad6faec), U64(0x6c44198c4a475817) +}; + +#define B(x,j) (((SHA_LONG64)(*(((const unsigned char *)(&x))+j)))<<((7-j)*8)) +#define PULL64(x) (B(x,0)|B(x,1)|B(x,2)|B(x,3)|B(x,4)|B(x,5)|B(x,6)|B(x,7)) +#define ROTR(x,s) (((x)>>s) | (x)<<(64-s)) +#define Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39)) +#define Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41)) +#define sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7)) +#define sigma1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6)) +#define Ch(x,y,z) (((x) & (y)) ^ ((~(x)) & (z))) +#define Maj(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z))) + +#define ROUND_00_15(i,a,b,c,d,e,f,g,h) do { \ + T1 += h + Sigma1(e) + Ch(e,f,g) + K512[i]; \ + h = Sigma0(a) + Maj(a,b,c); \ + d += T1; h += T1; } while (0) +#define ROUND_16_80(i,j,a,b,c,d,e,f,g,h,X) do { \ + s0 = X[(j+1)&0x0f]; s0 = sigma0(s0); \ + s1 = X[(j+14)&0x0f]; s1 = sigma1(s1); \ + T1 = X[(j)&0x0f] += s0 + s1 + X[(j+9)&0x0f]; \ + ROUND_00_15(i+j,a,b,c,d,e,f,g,h); } while (0) +void sha512_block_data_order(SHA512_CTX *ctx, const void *in, + unsigned int num) +{ + const SHA_LONG64 *W = in; + SHA_LONG64 a, b, c, d, e, f, g, h, s0, s1, T1; + SHA_LONG64 X[16]; + int i; + + while (num--) { + + a = ctx->h[0]; + b = ctx->h[1]; + c = ctx->h[2]; + d = ctx->h[3]; + e = ctx->h[4]; + f = ctx->h[5]; + g = ctx->h[6]; + h = ctx->h[7]; + + T1 = X[0] = PULL64(W[0]); + ROUND_00_15(0, a, b, c, d, e, f, g, h); + T1 = X[1] = PULL64(W[1]); + ROUND_00_15(1, h, a, b, c, d, e, f, g); + T1 = X[2] = PULL64(W[2]); + ROUND_00_15(2, g, h, a, b, c, d, e, f); + T1 = X[3] = PULL64(W[3]); + ROUND_00_15(3, f, g, h, a, b, c, d, e); + T1 = X[4] = PULL64(W[4]); + ROUND_00_15(4, e, f, g, h, a, b, c, d); + T1 = X[5] = PULL64(W[5]); + ROUND_00_15(5, d, e, f, g, h, a, b, c); + T1 = X[6] = PULL64(W[6]); + ROUND_00_15(6, c, d, e, f, g, h, a, b); + T1 = X[7] = PULL64(W[7]); + ROUND_00_15(7, b, c, d, e, f, g, h, a); + T1 = X[8] = PULL64(W[8]); + ROUND_00_15(8, a, b, c, d, e, f, g, h); + T1 = X[9] = PULL64(W[9]); + ROUND_00_15(9, h, a, b, c, d, e, f, g); + T1 = X[10] = PULL64(W[10]); + ROUND_00_15(10, g, h, a, b, c, d, e, f); + T1 = X[11] = PULL64(W[11]); + ROUND_00_15(11, f, g, h, a, b, c, d, e); + T1 = X[12] = PULL64(W[12]); + ROUND_00_15(12, e, f, g, h, a, b, c, d); + T1 = X[13] = PULL64(W[13]); + ROUND_00_15(13, d, e, f, g, h, a, b, c); + T1 = X[14] = PULL64(W[14]); + ROUND_00_15(14, c, d, e, f, g, h, a, b); + T1 = X[15] = PULL64(W[15]); + ROUND_00_15(15, b, c, d, e, f, g, h, a); + + for (i = 16; i < 80; i += 16) { + ROUND_16_80(i, 0, a, b, c, d, e, f, g, h, X); + ROUND_16_80(i, 1, h, a, b, c, d, e, f, g, X); + ROUND_16_80(i, 2, g, h, a, b, c, d, e, f, X); + ROUND_16_80(i, 3, f, g, h, a, b, c, d, e, X); + ROUND_16_80(i, 4, e, f, g, h, a, b, c, d, X); + ROUND_16_80(i, 5, d, e, f, g, h, a, b, c, X); + ROUND_16_80(i, 6, c, d, e, f, g, h, a, b, X); + ROUND_16_80(i, 7, b, c, d, e, f, g, h, a, X); + ROUND_16_80(i, 8, a, b, c, d, e, f, g, h, X); + ROUND_16_80(i, 9, h, a, b, c, d, e, f, g, X); + ROUND_16_80(i, 10, g, h, a, b, c, d, e, f, X); + ROUND_16_80(i, 11, f, g, h, a, b, c, d, e, X); + ROUND_16_80(i, 12, e, f, g, h, a, b, c, d, X); + ROUND_16_80(i, 13, d, e, f, g, h, a, b, c, X); + ROUND_16_80(i, 14, c, d, e, f, g, h, a, b, X); + ROUND_16_80(i, 15, b, c, d, e, f, g, h, a, X); + } + + ctx->h[0] += a; + ctx->h[1] += b; + ctx->h[2] += c; + ctx->h[3] += d; + ctx->h[4] += e; + ctx->h[5] += f; + ctx->h[6] += g; + ctx->h[7] += h; + + W += SHA_LBLOCK; + } +} -- 2.11.4.GIT