[aarch64][libstdc++] Use __ARM_BIG_ENDIAN instead of __AARCH64EB__ in opt_random.h
[official-gcc.git] / libstdc++-v3 / config / cpu / aarch64 / opt / ext / opt_random.h
blob7c824809b78073a435a772e7336750d39f79ac70
1 // Optimizations for random number extensions, aarch64 version -*- C++ -*-
3 // Copyright (C) 2017 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
25 /** @file ext/random.tcc
26 * This is an internal header file, included by other library headers.
27 * Do not attempt to use it directly. @headername{ext/random}
30 #ifndef _EXT_OPT_RANDOM_H
31 #define _EXT_OPT_RANDOM_H 1
33 #pragma GCC system_header
35 #ifdef __ARM_NEON
37 #ifdef __ARM_BIG_ENDIAN
38 # define __VEXT(_A,_B,_C) __builtin_shuffle (_A, _B, (__Uint8x16_t) \
39 {16-_C, 17-_C, 18-_C, 19-_C, 20-_C, 21-_C, 22-_C, 23-_C, \
40 24-_C, 25-_C, 26-_C, 27-_C, 28-_C, 29-_C, 30-_C, 31-_C})
41 #else
42 # define __VEXT(_A,_B,_C) __builtin_shuffle (_B, _A, (__Uint8x16_t) \
43 {_C, _C+1, _C+2, _C+3, _C+4, _C+5, _C+6, _C+7, \
44 _C+8, _C+9, _C+10, _C+11, _C+12, _C+13, _C+14, _C+15})
45 #endif
47 namespace __gnu_cxx _GLIBCXX_VISIBILITY (default)
49 _GLIBCXX_BEGIN_NAMESPACE_VERSION
51 namespace {
52 // Logical Shift right 128-bits by c * 8 bits
54 __extension__ extern __inline __Uint32x4_t
55 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
56 __aarch64_lsr_128 (__Uint8x16_t __a, __const int __c)
58 const __Uint8x16_t __zero = {0, 0, 0, 0, 0, 0, 0, 0,
59 0, 0, 0, 0, 0, 0, 0, 0};
61 return (__Uint32x4_t) __VEXT (__zero, __a, __c);
64 // Logical Shift left 128-bits by c * 8 bits
66 __extension__ extern __inline __Uint32x4_t
67 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
68 __aarch64_lsl_128 (__Uint8x16_t __a, __const int __c)
70 const __Uint8x16_t __zero = {0, 0, 0, 0, 0, 0, 0, 0,
71 0, 0, 0, 0, 0, 0, 0, 0};
73 return (__Uint32x4_t) __VEXT (__a, __zero, 16 - __c);
76 template<size_t __sl1, size_t __sl2, size_t __sr1, size_t __sr2>
77 inline __Uint32x4_t __aarch64_recursion (__Uint32x4_t __a,
78 __Uint32x4_t __b,
79 __Uint32x4_t __c,
80 __Uint32x4_t __d,
81 __Uint32x4_t __e)
83 __Uint32x4_t __y = (__b >> __sr1);
84 __Uint32x4_t __z = __aarch64_lsr_128 ((__Uint8x16_t) __c, __sr2);
86 __Uint32x4_t __v = __d << __sl1;
88 __z = __z ^ __a;
89 __z = __z ^ __v;
91 __Uint32x4_t __x = __aarch64_lsl_128 ((__Uint8x16_t) __a, __sl2);
93 __y = __y & __e;
94 __z = __z ^ __x;
95 return __z ^ __y;
99 #define _GLIBCXX_OPT_HAVE_RANDOM_SFMT_GEN_READ 1
100 template<typename _UIntType, size_t __m,
101 size_t __pos1, size_t __sl1, size_t __sl2,
102 size_t __sr1, size_t __sr2,
103 uint32_t __msk1, uint32_t __msk2,
104 uint32_t __msk3, uint32_t __msk4,
105 uint32_t __parity1, uint32_t __parity2,
106 uint32_t __parity3, uint32_t __parity4>
107 void simd_fast_mersenne_twister_engine<_UIntType, __m,
108 __pos1, __sl1, __sl2, __sr1, __sr2,
109 __msk1, __msk2, __msk3, __msk4,
110 __parity1, __parity2, __parity3,
111 __parity4>::
112 _M_gen_rand (void)
114 __Uint32x4_t __r1 = _M_state[_M_nstate - 2];
115 __Uint32x4_t __r2 = _M_state[_M_nstate - 1];
117 __Uint32x4_t __aData = {__msk1, __msk2, __msk3, __msk4};
119 size_t __i;
120 for (__i = 0; __i < _M_nstate - __pos1; ++__i)
122 __Uint32x4_t __r = __aarch64_recursion<__sl1, __sl2, __sr1, __sr2>
123 (_M_state[__i], _M_state[__i + __pos1], __r1, __r2, __aData);
125 _M_state[__i] = __r;
127 __r1 = __r2;
128 __r2 = __r;
130 for (; __i < _M_nstate; ++__i)
132 __Uint32x4_t __r = __aarch64_recursion<__sl1, __sl2, __sr1, __sr2>
133 (_M_state[__i], _M_state[__i + __pos1 - _M_nstate], __r1, __r2,
134 __aData);
136 _M_state[__i] = __r;
138 __r1 = __r2;
139 __r2 = __r;
142 _M_pos = 0;
146 #define _GLIBCXX_OPT_HAVE_RANDOM_SFMT_OPERATOREQUAL 1
147 template<typename _UIntType, size_t __m,
148 size_t __pos1, size_t __sl1, size_t __sl2,
149 size_t __sr1, size_t __sr2,
150 uint32_t __msk1, uint32_t __msk2,
151 uint32_t __msk3, uint32_t __msk4,
152 uint32_t __parity1, uint32_t __parity2,
153 uint32_t __parity3, uint32_t __parity4>
154 bool
155 operator==(const __gnu_cxx::simd_fast_mersenne_twister_engine<_UIntType,
156 __m, __pos1, __sl1, __sl2, __sr1, __sr2,
157 __msk1, __msk2, __msk3, __msk4,
158 __parity1, __parity2, __parity3, __parity4>& __lhs,
159 const __gnu_cxx::simd_fast_mersenne_twister_engine<_UIntType,
160 __m, __pos1, __sl1, __sl2, __sr1, __sr2,
161 __msk1, __msk2, __msk3, __msk4,
162 __parity1, __parity2, __parity3, __parity4>& __rhs)
164 if (__lhs._M_pos != __rhs._M_pos)
165 return false;
167 __Uint32x4_t __res = __lhs._M_state[0] ^ __rhs._M_state[0];
169 for (size_t __i = 1; __i < __lhs._M_nstate; ++__i)
170 __res |= __lhs._M_state[__i] ^ __rhs._M_state[__i];
172 return (__int128) __res == 0;
175 _GLIBCXX_END_NAMESPACE_VERSION
176 } // namespace
178 #endif // __ARM_NEON
180 #endif // _EXT_OPT_RANDOM_H