1 // The template and inlines for the -*- C++ -*- internal _Array helper class.
3 // Copyright (C) 1997-2024 Free Software Foundation, Inc.
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
25 /** @file bits/valarray_array.h
26 * This is an internal header file, included by other library headers.
27 * Do not attempt to use it directly. @headername{valarray}
30 // Written by Gabriel Dos Reis <Gabriel.Dos-Reis@DPTMaths.ENS-Cachan.Fr>
32 #ifndef _VALARRAY_ARRAY_H
33 #define _VALARRAY_ARRAY_H 1
35 #pragma GCC system_header
37 #include <bits/c++config.h>
38 #include <bits/cpp_type_traits.h>
42 namespace std
_GLIBCXX_VISIBILITY(default)
44 _GLIBCXX_BEGIN_NAMESPACE_VERSION
47 // Helper functions on raw pointers
50 // We get memory the old fashioned way
51 template<typename _Tp
>
53 __valarray_get_storage(size_t) __attribute__((__malloc__
));
55 template<typename _Tp
>
57 __valarray_get_storage(size_t __n
)
58 { return static_cast<_Tp
*>(operator new(__n
* sizeof(_Tp
))); }
60 // Return memory to the system
62 __valarray_release_memory(void* __p
)
63 { operator delete(__p
); }
65 // Turn raw-memory into an array of _Tp filled with _Tp().
66 // This is used in `valarray<T> v(n);` and in `valarray<T>::shift(n)`.
67 template<typename _Tp
>
69 __valarray_default_construct(_Tp
* __b
, _Tp
* __e
)
71 if _GLIBCXX17_CONSTEXPR (__is_trivial(_Tp
))
72 __builtin_memset(__b
, 0, (__e
- __b
) * sizeof(_Tp
));
75 ::new(static_cast<void*>(__b
++)) _Tp();
78 // Turn a raw-memory into an array of _Tp filled with __t
79 // This is the required in valarray<T> v(n, t). Also
80 // used in valarray<>::resize().
81 template<typename _Tp
>
83 __valarray_fill_construct(_Tp
* __b
, _Tp
* __e
, const _Tp __t
)
86 ::new(static_cast<void*>(__b
++)) _Tp(__t
);
89 // copy-construct raw array [__o, *) from plain array [__b, __e)
90 template<typename _Tp
>
92 __valarray_copy_construct(const _Tp
* __b
, const _Tp
* __e
,
93 _Tp
* __restrict__ __o
)
95 if _GLIBCXX17_CONSTEXPR (__is_trivial(_Tp
))
98 __builtin_memcpy(__o
, __b
, (__e
- __b
) * sizeof(_Tp
));
102 ::new(static_cast<void*>(__o
++)) _Tp(*__b
++);
105 // copy-construct raw array [__o, *) from strided array __a[<__n : __s>]
106 template<typename _Tp
>
108 __valarray_copy_construct (const _Tp
* __restrict__ __a
, size_t __n
,
109 size_t __s
, _Tp
* __restrict__ __o
)
111 if _GLIBCXX17_CONSTEXPR (__is_trivial(_Tp
))
120 new(__o
++) _Tp(*__a
);
125 // copy-construct raw array [__o, *) from indexed array __a[__i[<__n>]]
126 template<typename _Tp
>
128 __valarray_copy_construct (const _Tp
* __restrict__ __a
,
129 const size_t* __restrict__ __i
,
130 _Tp
* __restrict__ __o
, size_t __n
)
132 if (__is_trivial(_Tp
))
134 *__o
++ = __a
[*__i
++];
137 new (__o
++) _Tp(__a
[*__i
++]);
140 // Do the necessary cleanup when we're done with arrays.
141 template<typename _Tp
>
143 __valarray_destroy_elements(_Tp
* __b
, _Tp
* __e
)
145 if (!__is_trivial(_Tp
))
153 // Fill a plain array __a[<__n>] with __t
154 template<typename _Tp
>
156 __valarray_fill(_Tp
* __restrict__ __a
, size_t __n
, const _Tp
& __t
)
162 // fill strided array __a[<__n-1 : __s>] with __t
163 template<typename _Tp
>
165 __valarray_fill(_Tp
* __restrict__ __a
, size_t __n
,
166 size_t __s
, const _Tp
& __t
)
168 for (size_t __i
= 0; __i
< __n
; ++__i
, __a
+= __s
)
172 // fill indirect array __a[__i[<__n>]] with __i
173 template<typename _Tp
>
175 __valarray_fill(_Tp
* __restrict__ __a
, const size_t* __restrict__ __i
,
176 size_t __n
, const _Tp
& __t
)
178 for (size_t __j
= 0; __j
< __n
; ++__j
, ++__i
)
182 // copy plain array __a[<__n>] in __b[<__n>]
183 // For non-fundamental types, it is wrong to say 'memcpy()'
184 template<typename _Tp
, bool>
188 _S_do_it(const _Tp
* __restrict__ __a
, size_t __n
, _Tp
* __restrict__ __b
)
195 template<typename _Tp
>
196 struct _Array_copier
<_Tp
, true>
199 _S_do_it(const _Tp
* __restrict__ __a
, size_t __n
, _Tp
* __restrict__ __b
)
202 __builtin_memcpy(__b
, __a
, __n
* sizeof (_Tp
));
206 // Copy a plain array __a[<__n>] into a play array __b[<>]
207 template<typename _Tp
>
209 __valarray_copy(const _Tp
* __restrict__ __a
, size_t __n
,
210 _Tp
* __restrict__ __b
)
212 _Array_copier
<_Tp
, __is_trivial(_Tp
)>::_S_do_it(__a
, __n
, __b
);
215 // Copy strided array __a[<__n : __s>] in plain __b[<__n>]
216 template<typename _Tp
>
218 __valarray_copy(const _Tp
* __restrict__ __a
, size_t __n
, size_t __s
,
219 _Tp
* __restrict__ __b
)
221 for (size_t __i
= 0; __i
< __n
; ++__i
, ++__b
, __a
+= __s
)
225 // Copy a plain array __a[<__n>] into a strided array __b[<__n : __s>]
226 template<typename _Tp
>
228 __valarray_copy(const _Tp
* __restrict__ __a
, _Tp
* __restrict__ __b
,
229 size_t __n
, size_t __s
)
231 for (size_t __i
= 0; __i
< __n
; ++__i
, ++__a
, __b
+= __s
)
235 // Copy strided array __src[<__n : __s1>] into another
236 // strided array __dst[< : __s2>]. Their sizes must match.
237 template<typename _Tp
>
239 __valarray_copy(const _Tp
* __restrict__ __src
, size_t __n
, size_t __s1
,
240 _Tp
* __restrict__ __dst
, size_t __s2
)
242 for (size_t __i
= 0; __i
< __n
; ++__i
)
243 __dst
[__i
* __s2
] = __src
[__i
* __s1
];
246 // Copy an indexed array __a[__i[<__n>]] in plain array __b[<__n>]
247 template<typename _Tp
>
249 __valarray_copy(const _Tp
* __restrict__ __a
,
250 const size_t* __restrict__ __i
,
251 _Tp
* __restrict__ __b
, size_t __n
)
253 for (size_t __j
= 0; __j
< __n
; ++__j
, ++__b
, ++__i
)
257 // Copy a plain array __a[<__n>] in an indexed array __b[__i[<__n>]]
258 template<typename _Tp
>
260 __valarray_copy(const _Tp
* __restrict__ __a
, size_t __n
,
261 _Tp
* __restrict__ __b
, const size_t* __restrict__ __i
)
263 for (size_t __j
= 0; __j
< __n
; ++__j
, ++__a
, ++__i
)
267 // Copy the __n first elements of an indexed array __src[<__i>] into
268 // another indexed array __dst[<__j>].
269 template<typename _Tp
>
271 __valarray_copy(const _Tp
* __restrict__ __src
, size_t __n
,
272 const size_t* __restrict__ __i
,
273 _Tp
* __restrict__ __dst
, const size_t* __restrict__ __j
)
275 for (size_t __k
= 0; __k
< __n
; ++__k
)
276 __dst
[*__j
++] = __src
[*__i
++];
280 // Compute the sum of elements in range [__f, __l) which must not be empty.
281 // This is a naive algorithm. It suffers from cancelling.
282 // In the future try to specialize for _Tp = float, double, long double
283 // using a more accurate algorithm.
285 template<typename _Tp
>
287 __valarray_sum(const _Tp
* __f
, const _Tp
* __l
)
295 // Compute the min/max of an array-expression
296 template<typename _Ta
>
297 inline typename
_Ta::value_type
298 __valarray_min(const _Ta
& __a
)
300 size_t __s
= __a
.size();
301 typedef typename
_Ta::value_type _Value_type
;
302 _Value_type __r
= __s
== 0 ? _Value_type() : __a
[0];
303 for (size_t __i
= 1; __i
< __s
; ++__i
)
305 _Value_type __t
= __a
[__i
];
312 template<typename _Ta
>
313 inline typename
_Ta::value_type
314 __valarray_max(const _Ta
& __a
)
316 size_t __s
= __a
.size();
317 typedef typename
_Ta::value_type _Value_type
;
318 _Value_type __r
= __s
== 0 ? _Value_type() : __a
[0];
319 for (size_t __i
= 1; __i
< __s
; ++__i
)
321 _Value_type __t
= __a
[__i
];
329 // Helper class _Array, first layer of valarray abstraction.
330 // All operations on valarray should be forwarded to this class
331 // whenever possible. -- gdr
334 template<typename _Tp
>
337 explicit _Array(_Tp
* const __restrict__
);
338 explicit _Array(const valarray
<_Tp
>&);
339 _Array(const _Tp
* __restrict__
, size_t);
343 _Tp
* const __restrict__ _M_data
;
347 // Copy-construct plain array __b[<__n>] from indexed array __a[__i[<__n>]]
348 template<typename _Tp
>
350 __valarray_copy_construct(_Array
<_Tp
> __a
, _Array
<size_t> __i
,
351 _Array
<_Tp
> __b
, size_t __n
)
352 { std::__valarray_copy_construct(__a
._M_data
, __i
._M_data
,
355 // Copy-construct plain array __b[<__n>] from strided array __a[<__n : __s>]
356 template<typename _Tp
>
358 __valarray_copy_construct(_Array
<_Tp
> __a
, size_t __n
, size_t __s
,
360 { std::__valarray_copy_construct(__a
._M_data
, __n
, __s
, __b
._M_data
); }
362 template<typename _Tp
>
364 __valarray_fill (_Array
<_Tp
> __a
, size_t __n
, const _Tp
& __t
)
365 { std::__valarray_fill(__a
._M_data
, __n
, __t
); }
367 template<typename _Tp
>
369 __valarray_fill(_Array
<_Tp
> __a
, size_t __n
, size_t __s
, const _Tp
& __t
)
370 { std::__valarray_fill(__a
._M_data
, __n
, __s
, __t
); }
372 template<typename _Tp
>
374 __valarray_fill(_Array
<_Tp
> __a
, _Array
<size_t> __i
,
375 size_t __n
, const _Tp
& __t
)
376 { std::__valarray_fill(__a
._M_data
, __i
._M_data
, __n
, __t
); }
378 // Copy a plain array __a[<__n>] into a play array __b[<>]
379 template<typename _Tp
>
381 __valarray_copy(_Array
<_Tp
> __a
, size_t __n
, _Array
<_Tp
> __b
)
382 { std::__valarray_copy(__a
._M_data
, __n
, __b
._M_data
); }
384 // Copy strided array __a[<__n : __s>] in plain __b[<__n>]
385 template<typename _Tp
>
387 __valarray_copy(_Array
<_Tp
> __a
, size_t __n
, size_t __s
, _Array
<_Tp
> __b
)
388 { std::__valarray_copy(__a
._M_data
, __n
, __s
, __b
._M_data
); }
390 // Copy a plain array __a[<__n>] into a strided array __b[<__n : __s>]
391 template<typename _Tp
>
393 __valarray_copy(_Array
<_Tp
> __a
, _Array
<_Tp
> __b
, size_t __n
, size_t __s
)
394 { __valarray_copy(__a
._M_data
, __b
._M_data
, __n
, __s
); }
396 // Copy strided array __src[<__n : __s1>] into another
397 // strided array __dst[< : __s2>]. Their sizes must match.
398 template<typename _Tp
>
400 __valarray_copy(_Array
<_Tp
> __a
, size_t __n
, size_t __s1
,
401 _Array
<_Tp
> __b
, size_t __s2
)
402 { std::__valarray_copy(__a
._M_data
, __n
, __s1
, __b
._M_data
, __s2
); }
404 // Copy an indexed array __a[__i[<__n>]] in plain array __b[<__n>]
405 template<typename _Tp
>
407 __valarray_copy(_Array
<_Tp
> __a
, _Array
<size_t> __i
,
408 _Array
<_Tp
> __b
, size_t __n
)
409 { std::__valarray_copy(__a
._M_data
, __i
._M_data
, __b
._M_data
, __n
); }
411 // Copy a plain array __a[<__n>] in an indexed array __b[__i[<__n>]]
412 template<typename _Tp
>
414 __valarray_copy(_Array
<_Tp
> __a
, size_t __n
, _Array
<_Tp
> __b
,
416 { std::__valarray_copy(__a
._M_data
, __n
, __b
._M_data
, __i
._M_data
); }
418 // Copy the __n first elements of an indexed array __src[<__i>] into
419 // another indexed array __dst[<__j>].
420 template<typename _Tp
>
422 __valarray_copy(_Array
<_Tp
> __src
, size_t __n
, _Array
<size_t> __i
,
423 _Array
<_Tp
> __dst
, _Array
<size_t> __j
)
425 std::__valarray_copy(__src
._M_data
, __n
, __i
._M_data
,
426 __dst
._M_data
, __j
._M_data
);
429 template<typename _Tp
>
431 _Array
<_Tp
>::_Array(_Tp
* const __restrict__ __p
)
434 template<typename _Tp
>
436 _Array
<_Tp
>::_Array(const valarray
<_Tp
>& __v
)
437 : _M_data (__v
._M_data
) {}
439 template<typename _Tp
>
441 _Array
<_Tp
>::_Array(const _Tp
* __restrict__ __b
, size_t __s
)
442 : _M_data(__valarray_get_storage
<_Tp
>(__s
))
443 { std::__valarray_copy_construct(__b
, __s
, _M_data
); }
445 template<typename _Tp
>
447 _Array
<_Tp
>::begin () const
450 #define _DEFINE_ARRAY_FUNCTION(_Op, _Name) \
451 template<typename _Tp> \
453 _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, const _Tp& __t) \
455 for (_Tp* __p = __a._M_data; __p < __a._M_data + __n; ++__p) \
459 template<typename _Tp> \
461 _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, _Array<_Tp> __b) \
463 _Tp* __p = __a._M_data; \
464 for (_Tp* __q = __b._M_data; __q < __b._M_data + __n; ++__p, ++__q) \
468 template<typename _Tp, class _Dom> \
470 _Array_augmented_##_Name(_Array<_Tp> __a, \
471 const _Expr<_Dom, _Tp>& __e, size_t __n) \
473 _Tp* __p(__a._M_data); \
474 for (size_t __i = 0; __i < __n; ++__i, ++__p) \
475 *__p _Op##= __e[__i]; \
478 template<typename _Tp> \
480 _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, size_t __s, \
483 _Tp* __q(__b._M_data); \
484 for (_Tp* __p = __a._M_data; __p < __a._M_data + __s * __n; \
489 template<typename _Tp> \
491 _Array_augmented_##_Name(_Array<_Tp> __a, _Array<_Tp> __b, \
492 size_t __n, size_t __s) \
494 _Tp* __q(__b._M_data); \
495 for (_Tp* __p = __a._M_data; __p < __a._M_data + __n; \
500 template<typename _Tp, class _Dom> \
502 _Array_augmented_##_Name(_Array<_Tp> __a, size_t __s, \
503 const _Expr<_Dom, _Tp>& __e, size_t __n) \
505 _Tp* __p(__a._M_data); \
506 for (size_t __i = 0; __i < __n; ++__i, __p += __s) \
507 *__p _Op##= __e[__i]; \
510 template<typename _Tp> \
512 _Array_augmented_##_Name(_Array<_Tp> __a, _Array<size_t> __i, \
513 _Array<_Tp> __b, size_t __n) \
515 _Tp* __q(__b._M_data); \
516 for (size_t* __j = __i._M_data; __j < __i._M_data + __n; \
518 __a._M_data[*__j] _Op##= *__q; \
521 template<typename _Tp> \
523 _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, \
524 _Array<_Tp> __b, _Array<size_t> __i) \
526 _Tp* __p(__a._M_data); \
527 for (size_t* __j = __i._M_data; __j<__i._M_data + __n; \
529 *__p _Op##= __b._M_data[*__j]; \
532 template<typename _Tp, class _Dom> \
534 _Array_augmented_##_Name(_Array<_Tp> __a, _Array<size_t> __i, \
535 const _Expr<_Dom, _Tp>& __e, size_t __n) \
537 size_t* __j(__i._M_data); \
538 for (size_t __k = 0; __k<__n; ++__k, ++__j) \
539 __a._M_data[*__j] _Op##= __e[__k]; \
542 template<typename _Tp> \
544 _Array_augmented_##_Name(_Array<_Tp> __a, _Array<bool> __m, \
545 _Array<_Tp> __b, size_t __n) \
547 bool* __ok(__m._M_data); \
548 _Tp* __p(__a._M_data); \
549 for (_Tp* __q = __b._M_data; __q < __b._M_data + __n; \
550 ++__q, ++__ok, ++__p) \
561 template<typename _Tp> \
563 _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, \
564 _Array<_Tp> __b, _Array<bool> __m) \
566 bool* __ok(__m._M_data); \
567 _Tp* __q(__b._M_data); \
568 for (_Tp* __p = __a._M_data; __p < __a._M_data + __n; \
569 ++__p, ++__ok, ++__q) \
580 template<typename _Tp, class _Dom> \
582 _Array_augmented_##_Name(_Array<_Tp> __a, _Array<bool> __m, \
583 const _Expr<_Dom, _Tp>& __e, size_t __n) \
585 bool* __ok(__m._M_data); \
586 _Tp* __p(__a._M_data); \
587 for (size_t __i = 0; __i < __n; ++__i, ++__ok, ++__p) \
594 *__p _Op##= __e[__i]; \
598 _DEFINE_ARRAY_FUNCTION(+, __plus
)
599 _DEFINE_ARRAY_FUNCTION(-, __minus
)
600 _DEFINE_ARRAY_FUNCTION(*, __multiplies
)
601 _DEFINE_ARRAY_FUNCTION(/, __divides
)
602 _DEFINE_ARRAY_FUNCTION(%, __modulus
)
603 _DEFINE_ARRAY_FUNCTION(^, __bitwise_xor
)
604 _DEFINE_ARRAY_FUNCTION(|, __bitwise_or
)
605 _DEFINE_ARRAY_FUNCTION(&, __bitwise_and
)
606 _DEFINE_ARRAY_FUNCTION(<<, __shift_left
)
607 _DEFINE_ARRAY_FUNCTION(>>, __shift_right
)
609 #undef _DEFINE_ARRAY_FUNCTION
611 _GLIBCXX_END_NAMESPACE_VERSION
614 # include <bits/valarray_array.tcc>
616 #endif /* _ARRAY_H */