Fix cast to rvalue reference from prvalue.
[official-gcc.git] / libgfortran / intrinsics / unpack_generic.c
blobc5b9ecb051984890c134e075e46fc599a7a9099e
1 /* Generic implementation of the UNPACK intrinsic
2 Copyright (C) 2002-2018 Free Software Foundation, Inc.
3 Contributed by Paul Brook <paul@nowt.org>
5 This file is part of the GNU Fortran runtime library (libgfortran).
7 Libgfortran is free software; you can redistribute it and/or
8 modify it under the terms of the GNU General Public
9 License as published by the Free Software Foundation; either
10 version 3 of the License, or (at your option) any later version.
12 Ligbfortran is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
26 #include "libgfortran.h"
27 #include <assert.h>
28 #include <string.h>
30 /* All the bounds checking for unpack in one function. If field is NULL,
31 we don't check it, for the unpack0 functions. */
33 static void
34 unpack_bounds (gfc_array_char *ret, const gfc_array_char *vector,
35 const gfc_array_l1 *mask, const gfc_array_char *field)
37 index_type vec_size, mask_count;
38 vec_size = size0 ((array_t *) vector);
39 mask_count = count_0 (mask);
40 if (vec_size < mask_count)
41 runtime_error ("Incorrect size of return value in UNPACK"
42 " intrinsic: should be at least %ld, is"
43 " %ld", (long int) mask_count,
44 (long int) vec_size);
46 if (field != NULL)
47 bounds_equal_extents ((array_t *) field, (array_t *) mask,
48 "FIELD", "UNPACK");
50 if (ret->base_addr != NULL)
51 bounds_equal_extents ((array_t *) ret, (array_t *) mask,
52 "return value", "UNPACK");
56 static void
57 unpack_internal (gfc_array_char *ret, const gfc_array_char *vector,
58 const gfc_array_l1 *mask, const gfc_array_char *field,
59 index_type size)
61 /* r.* indicates the return array. */
62 index_type rstride[GFC_MAX_DIMENSIONS];
63 index_type rstride0;
64 index_type rs;
65 char * restrict rptr;
66 /* v.* indicates the vector array. */
67 index_type vstride0;
68 char *vptr;
69 /* f.* indicates the field array. */
70 index_type fstride[GFC_MAX_DIMENSIONS];
71 index_type fstride0;
72 const char *fptr;
73 /* m.* indicates the mask array. */
74 index_type mstride[GFC_MAX_DIMENSIONS];
75 index_type mstride0;
76 const GFC_LOGICAL_1 *mptr;
78 index_type count[GFC_MAX_DIMENSIONS];
79 index_type extent[GFC_MAX_DIMENSIONS];
80 index_type n;
81 index_type dim;
83 int empty;
84 int mask_kind;
86 empty = 0;
88 mptr = mask->base_addr;
90 /* Use the same loop for all logical types, by using GFC_LOGICAL_1
91 and using shifting to address size and endian issues. */
93 mask_kind = GFC_DESCRIPTOR_SIZE (mask);
95 if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8
96 #ifdef HAVE_GFC_LOGICAL_16
97 || mask_kind == 16
98 #endif
101 /* Don't convert a NULL pointer as we use test for NULL below. */
102 if (mptr)
103 mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind);
105 else
106 runtime_error ("Funny sized logical array");
108 if (ret->base_addr == NULL)
110 /* The front end has signalled that we need to populate the
111 return array descriptor. */
112 dim = GFC_DESCRIPTOR_RANK (mask);
113 rs = 1;
114 for (n = 0; n < dim; n++)
116 count[n] = 0;
117 GFC_DIMENSION_SET(ret->dim[n], 0,
118 GFC_DESCRIPTOR_EXTENT(mask,n) - 1, rs);
119 extent[n] = GFC_DESCRIPTOR_EXTENT(ret,n);
120 empty = empty || extent[n] <= 0;
121 rstride[n] = GFC_DESCRIPTOR_STRIDE_BYTES(ret, n);
122 fstride[n] = GFC_DESCRIPTOR_STRIDE_BYTES(field, n);
123 mstride[n] = GFC_DESCRIPTOR_STRIDE_BYTES(mask, n);
124 rs *= extent[n];
126 ret->offset = 0;
127 ret->base_addr = xmallocarray (rs, size);
129 else
131 dim = GFC_DESCRIPTOR_RANK (ret);
132 for (n = 0; n < dim; n++)
134 count[n] = 0;
135 extent[n] = GFC_DESCRIPTOR_EXTENT(ret,n);
136 empty = empty || extent[n] <= 0;
137 rstride[n] = GFC_DESCRIPTOR_STRIDE_BYTES(ret, n);
138 fstride[n] = GFC_DESCRIPTOR_STRIDE_BYTES(field, n);
139 mstride[n] = GFC_DESCRIPTOR_STRIDE_BYTES(mask, n);
143 if (empty)
144 return;
146 /* This assert makes sure GCC knows we can access *stride[0] later. */
147 assert (dim > 0);
149 vstride0 = GFC_DESCRIPTOR_STRIDE_BYTES(vector,0);
150 rstride0 = rstride[0];
151 fstride0 = fstride[0];
152 mstride0 = mstride[0];
153 rptr = ret->base_addr;
154 fptr = field->base_addr;
155 vptr = vector->base_addr;
157 while (rptr)
159 if (*mptr)
161 /* From vector. */
162 memcpy (rptr, vptr, size);
163 vptr += vstride0;
165 else
167 /* From field. */
168 memcpy (rptr, fptr, size);
170 /* Advance to the next element. */
171 rptr += rstride0;
172 fptr += fstride0;
173 mptr += mstride0;
174 count[0]++;
175 n = 0;
176 while (count[n] == extent[n])
178 /* When we get to the end of a dimension, reset it and increment
179 the next dimension. */
180 count[n] = 0;
181 /* We could precalculate these products, but this is a less
182 frequently used path so probably not worth it. */
183 rptr -= rstride[n] * extent[n];
184 fptr -= fstride[n] * extent[n];
185 mptr -= mstride[n] * extent[n];
186 n++;
187 if (n >= dim)
189 /* Break out of the loop. */
190 rptr = NULL;
191 break;
193 else
195 count[n]++;
196 rptr += rstride[n];
197 fptr += fstride[n];
198 mptr += mstride[n];
204 extern void unpack1 (gfc_array_char *, const gfc_array_char *,
205 const gfc_array_l1 *, const gfc_array_char *);
206 export_proto(unpack1);
208 void
209 unpack1 (gfc_array_char *ret, const gfc_array_char *vector,
210 const gfc_array_l1 *mask, const gfc_array_char *field)
212 index_type type_size;
213 index_type size;
215 if (unlikely(compile_options.bounds_check))
216 unpack_bounds (ret, vector, mask, field);
218 type_size = GFC_DTYPE_TYPE_SIZE (vector);
219 size = GFC_DESCRIPTOR_SIZE (vector);
221 switch(type_size)
223 case GFC_DTYPE_LOGICAL_1:
224 case GFC_DTYPE_INTEGER_1:
225 unpack1_i1 ((gfc_array_i1 *) ret, (gfc_array_i1 *) vector,
226 mask, (gfc_array_i1 *) field);
227 return;
229 case GFC_DTYPE_LOGICAL_2:
230 case GFC_DTYPE_INTEGER_2:
231 unpack1_i2 ((gfc_array_i2 *) ret, (gfc_array_i2 *) vector,
232 mask, (gfc_array_i2 *) field);
233 return;
235 case GFC_DTYPE_LOGICAL_4:
236 case GFC_DTYPE_INTEGER_4:
237 unpack1_i4 ((gfc_array_i4 *) ret, (gfc_array_i4 *) vector,
238 mask, (gfc_array_i4 *) field);
239 return;
241 case GFC_DTYPE_LOGICAL_8:
242 case GFC_DTYPE_INTEGER_8:
243 unpack1_i8 ((gfc_array_i8 *) ret, (gfc_array_i8 *) vector,
244 mask, (gfc_array_i8 *) field);
245 return;
247 #ifdef HAVE_GFC_INTEGER_16
248 case GFC_DTYPE_LOGICAL_16:
249 case GFC_DTYPE_INTEGER_16:
250 unpack1_i16 ((gfc_array_i16 *) ret, (gfc_array_i16 *) vector,
251 mask, (gfc_array_i16 *) field);
252 return;
253 #endif
255 case GFC_DTYPE_REAL_4:
256 unpack1_r4 ((gfc_array_r4 *) ret, (gfc_array_r4 *) vector,
257 mask, (gfc_array_r4 *) field);
258 return;
260 case GFC_DTYPE_REAL_8:
261 unpack1_r8 ((gfc_array_r8 *) ret, (gfc_array_r8 *) vector,
262 mask, (gfc_array_r8 *) field);
263 return;
265 /* FIXME: This here is a hack, which will have to be removed when
266 the array descriptor is reworked. Currently, we don't store the
267 kind value for the type, but only the size. Because on targets with
268 __float128, we have sizeof(logn double) == sizeof(__float128),
269 we cannot discriminate here and have to fall back to the generic
270 handling (which is suboptimal). */
271 #if !defined(GFC_REAL_16_IS_FLOAT128)
272 # ifdef HAVE_GFC_REAL_10
273 case GFC_DTYPE_REAL_10:
274 unpack1_r10 ((gfc_array_r10 *) ret, (gfc_array_r10 *) vector,
275 mask, (gfc_array_r10 *) field);
276 return;
277 # endif
279 # ifdef HAVE_GFC_REAL_16
280 case GFC_DTYPE_REAL_16:
281 unpack1_r16 ((gfc_array_r16 *) ret, (gfc_array_r16 *) vector,
282 mask, (gfc_array_r16 *) field);
283 return;
284 # endif
285 #endif
287 case GFC_DTYPE_COMPLEX_4:
288 unpack1_c4 ((gfc_array_c4 *) ret, (gfc_array_c4 *) vector,
289 mask, (gfc_array_c4 *) field);
290 return;
292 case GFC_DTYPE_COMPLEX_8:
293 unpack1_c8 ((gfc_array_c8 *) ret, (gfc_array_c8 *) vector,
294 mask, (gfc_array_c8 *) field);
295 return;
297 /* FIXME: This here is a hack, which will have to be removed when
298 the array descriptor is reworked. Currently, we don't store the
299 kind value for the type, but only the size. Because on targets with
300 __float128, we have sizeof(logn double) == sizeof(__float128),
301 we cannot discriminate here and have to fall back to the generic
302 handling (which is suboptimal). */
303 #if !defined(GFC_REAL_16_IS_FLOAT128)
304 # ifdef HAVE_GFC_COMPLEX_10
305 case GFC_DTYPE_COMPLEX_10:
306 unpack1_c10 ((gfc_array_c10 *) ret, (gfc_array_c10 *) vector,
307 mask, (gfc_array_c10 *) field);
308 return;
309 # endif
311 # ifdef HAVE_GFC_COMPLEX_16
312 case GFC_DTYPE_COMPLEX_16:
313 unpack1_c16 ((gfc_array_c16 *) ret, (gfc_array_c16 *) vector,
314 mask, (gfc_array_c16 *) field);
315 return;
316 # endif
317 #endif
321 switch (GFC_DESCRIPTOR_SIZE(ret))
323 case 1:
324 unpack1_i1 ((gfc_array_i1 *) ret, (gfc_array_i1 *) vector,
325 mask, (gfc_array_i1 *) field);
326 return;
328 case 2:
329 if (GFC_UNALIGNED_2(ret->base_addr) || GFC_UNALIGNED_2(vector->base_addr)
330 || GFC_UNALIGNED_2(field->base_addr))
331 break;
332 else
334 unpack1_i2 ((gfc_array_i2 *) ret, (gfc_array_i2 *) vector,
335 mask, (gfc_array_i2 *) field);
336 return;
339 case 4:
340 if (GFC_UNALIGNED_4(ret->base_addr) || GFC_UNALIGNED_4(vector->base_addr)
341 || GFC_UNALIGNED_4(field->base_addr))
342 break;
343 else
345 unpack1_i4 ((gfc_array_i4 *) ret, (gfc_array_i4 *) vector,
346 mask, (gfc_array_i4 *) field);
347 return;
350 case 8:
351 if (GFC_UNALIGNED_8(ret->base_addr) || GFC_UNALIGNED_8(vector->base_addr)
352 || GFC_UNALIGNED_8(field->base_addr))
353 break;
354 else
356 unpack1_i8 ((gfc_array_i8 *) ret, (gfc_array_i8 *) vector,
357 mask, (gfc_array_i8 *) field);
358 return;
361 #ifdef HAVE_GFC_INTEGER_16
362 case 16:
363 if (GFC_UNALIGNED_16(ret->base_addr)
364 || GFC_UNALIGNED_16(vector->base_addr)
365 || GFC_UNALIGNED_16(field->base_addr))
366 break;
367 else
369 unpack1_i16 ((gfc_array_i16 *) ret, (gfc_array_i16 *) vector,
370 mask, (gfc_array_i16 *) field);
371 return;
373 #endif
374 default:
375 break;
378 unpack_internal (ret, vector, mask, field, size);
382 extern void unpack1_char (gfc_array_char *, GFC_INTEGER_4,
383 const gfc_array_char *, const gfc_array_l1 *,
384 const gfc_array_char *, GFC_INTEGER_4,
385 GFC_INTEGER_4);
386 export_proto(unpack1_char);
388 void
389 unpack1_char (gfc_array_char *ret,
390 GFC_INTEGER_4 ret_length __attribute__((unused)),
391 const gfc_array_char *vector, const gfc_array_l1 *mask,
392 const gfc_array_char *field, GFC_INTEGER_4 vector_length,
393 GFC_INTEGER_4 field_length __attribute__((unused)))
396 if (unlikely(compile_options.bounds_check))
397 unpack_bounds (ret, vector, mask, field);
399 unpack_internal (ret, vector, mask, field, vector_length);
403 extern void unpack1_char4 (gfc_array_char *, GFC_INTEGER_4,
404 const gfc_array_char *, const gfc_array_l1 *,
405 const gfc_array_char *, GFC_INTEGER_4,
406 GFC_INTEGER_4);
407 export_proto(unpack1_char4);
409 void
410 unpack1_char4 (gfc_array_char *ret,
411 GFC_INTEGER_4 ret_length __attribute__((unused)),
412 const gfc_array_char *vector, const gfc_array_l1 *mask,
413 const gfc_array_char *field, GFC_INTEGER_4 vector_length,
414 GFC_INTEGER_4 field_length __attribute__((unused)))
417 if (unlikely(compile_options.bounds_check))
418 unpack_bounds (ret, vector, mask, field);
420 unpack_internal (ret, vector, mask, field,
421 vector_length * sizeof (gfc_char4_t));
425 extern void unpack0 (gfc_array_char *, const gfc_array_char *,
426 const gfc_array_l1 *, char *);
427 export_proto(unpack0);
429 void
430 unpack0 (gfc_array_char *ret, const gfc_array_char *vector,
431 const gfc_array_l1 *mask, char *field)
433 gfc_array_char tmp;
435 index_type type_size;
437 if (unlikely(compile_options.bounds_check))
438 unpack_bounds (ret, vector, mask, NULL);
440 type_size = GFC_DTYPE_TYPE_SIZE (vector);
442 switch (type_size)
444 case GFC_DTYPE_LOGICAL_1:
445 case GFC_DTYPE_INTEGER_1:
446 unpack0_i1 ((gfc_array_i1 *) ret, (gfc_array_i1 *) vector,
447 mask, (GFC_INTEGER_1 *) field);
448 return;
450 case GFC_DTYPE_LOGICAL_2:
451 case GFC_DTYPE_INTEGER_2:
452 unpack0_i2 ((gfc_array_i2 *) ret, (gfc_array_i2 *) vector,
453 mask, (GFC_INTEGER_2 *) field);
454 return;
456 case GFC_DTYPE_LOGICAL_4:
457 case GFC_DTYPE_INTEGER_4:
458 unpack0_i4 ((gfc_array_i4 *) ret, (gfc_array_i4 *) vector,
459 mask, (GFC_INTEGER_4 *) field);
460 return;
462 case GFC_DTYPE_LOGICAL_8:
463 case GFC_DTYPE_INTEGER_8:
464 unpack0_i8 ((gfc_array_i8 *) ret, (gfc_array_i8 *) vector,
465 mask, (GFC_INTEGER_8 *) field);
466 return;
468 #ifdef HAVE_GFC_INTEGER_16
469 case GFC_DTYPE_LOGICAL_16:
470 case GFC_DTYPE_INTEGER_16:
471 unpack0_i16 ((gfc_array_i16 *) ret, (gfc_array_i16 *) vector,
472 mask, (GFC_INTEGER_16 *) field);
473 return;
474 #endif
476 case GFC_DTYPE_REAL_4:
477 unpack0_r4 ((gfc_array_r4 *) ret, (gfc_array_r4 *) vector,
478 mask, (GFC_REAL_4 *) field);
479 return;
481 case GFC_DTYPE_REAL_8:
482 unpack0_r8 ((gfc_array_r8 *) ret, (gfc_array_r8*) vector,
483 mask, (GFC_REAL_8 *) field);
484 return;
486 /* FIXME: This here is a hack, which will have to be removed when
487 the array descriptor is reworked. Currently, we don't store the
488 kind value for the type, but only the size. Because on targets with
489 __float128, we have sizeof(logn double) == sizeof(__float128),
490 we cannot discriminate here and have to fall back to the generic
491 handling (which is suboptimal). */
492 #if !defined(GFC_REAL_16_IS_FLOAT128)
493 # ifdef HAVE_GFC_REAL_10
494 case GFC_DTYPE_REAL_10:
495 unpack0_r10 ((gfc_array_r10 *) ret, (gfc_array_r10 *) vector,
496 mask, (GFC_REAL_10 *) field);
497 return;
498 # endif
500 # ifdef HAVE_GFC_REAL_16
501 case GFC_DTYPE_REAL_16:
502 unpack0_r16 ((gfc_array_r16 *) ret, (gfc_array_r16 *) vector,
503 mask, (GFC_REAL_16 *) field);
504 return;
505 # endif
506 #endif
508 case GFC_DTYPE_COMPLEX_4:
509 unpack0_c4 ((gfc_array_c4 *) ret, (gfc_array_c4 *) vector,
510 mask, (GFC_COMPLEX_4 *) field);
511 return;
513 case GFC_DTYPE_COMPLEX_8:
514 unpack0_c8 ((gfc_array_c8 *) ret, (gfc_array_c8 *) vector,
515 mask, (GFC_COMPLEX_8 *) field);
516 return;
518 /* FIXME: This here is a hack, which will have to be removed when
519 the array descriptor is reworked. Currently, we don't store the
520 kind value for the type, but only the size. Because on targets with
521 __float128, we have sizeof(logn double) == sizeof(__float128),
522 we cannot discriminate here and have to fall back to the generic
523 handling (which is suboptimal). */
524 #if !defined(GFC_REAL_16_IS_FLOAT128)
525 # ifdef HAVE_GFC_COMPLEX_10
526 case GFC_DTYPE_COMPLEX_10:
527 unpack0_c10 ((gfc_array_c10 *) ret, (gfc_array_c10 *) vector,
528 mask, (GFC_COMPLEX_10 *) field);
529 return;
530 # endif
532 # ifdef HAVE_GFC_COMPLEX_16
533 case GFC_DTYPE_COMPLEX_16:
534 unpack0_c16 ((gfc_array_c16 *) ret, (gfc_array_c16 *) vector,
535 mask, (GFC_COMPLEX_16 *) field);
536 return;
537 # endif
538 #endif
542 switch (GFC_DESCRIPTOR_SIZE(ret))
544 case 1:
545 unpack0_i1 ((gfc_array_i1 *) ret, (gfc_array_i1 *) vector,
546 mask, (GFC_INTEGER_1 *) field);
547 return;
549 case 2:
550 if (GFC_UNALIGNED_2(ret->base_addr) || GFC_UNALIGNED_2(vector->base_addr)
551 || GFC_UNALIGNED_2(field))
552 break;
553 else
555 unpack0_i2 ((gfc_array_i2 *) ret, (gfc_array_i2 *) vector,
556 mask, (GFC_INTEGER_2 *) field);
557 return;
560 case 4:
561 if (GFC_UNALIGNED_4(ret->base_addr) || GFC_UNALIGNED_4(vector->base_addr)
562 || GFC_UNALIGNED_4(field))
563 break;
564 else
566 unpack0_i4 ((gfc_array_i4 *) ret, (gfc_array_i4 *) vector,
567 mask, (GFC_INTEGER_4 *) field);
568 return;
571 case 8:
572 if (GFC_UNALIGNED_8(ret->base_addr) || GFC_UNALIGNED_8(vector->base_addr)
573 || GFC_UNALIGNED_8(field))
574 break;
575 else
577 unpack0_i8 ((gfc_array_i8 *) ret, (gfc_array_i8 *) vector,
578 mask, (GFC_INTEGER_8 *) field);
579 return;
582 #ifdef HAVE_GFC_INTEGER_16
583 case 16:
584 if (GFC_UNALIGNED_16(ret->base_addr)
585 || GFC_UNALIGNED_16(vector->base_addr)
586 || GFC_UNALIGNED_16(field))
587 break;
588 else
590 unpack0_i16 ((gfc_array_i16 *) ret, (gfc_array_i16 *) vector,
591 mask, (GFC_INTEGER_16 *) field);
592 return;
594 #endif
597 memset (&tmp, 0, sizeof (tmp));
598 GFC_DTYPE_CLEAR(&tmp);
599 tmp.base_addr = field;
600 unpack_internal (ret, vector, mask, &tmp, GFC_DESCRIPTOR_SIZE (vector));
604 extern void unpack0_char (gfc_array_char *, GFC_INTEGER_4,
605 const gfc_array_char *, const gfc_array_l1 *,
606 char *, GFC_INTEGER_4, GFC_INTEGER_4);
607 export_proto(unpack0_char);
609 void
610 unpack0_char (gfc_array_char *ret,
611 GFC_INTEGER_4 ret_length __attribute__((unused)),
612 const gfc_array_char *vector, const gfc_array_l1 *mask,
613 char *field, GFC_INTEGER_4 vector_length,
614 GFC_INTEGER_4 field_length __attribute__((unused)))
616 gfc_array_char tmp;
618 if (unlikely(compile_options.bounds_check))
619 unpack_bounds (ret, vector, mask, NULL);
621 memset (&tmp, 0, sizeof (tmp));
622 GFC_DTYPE_CLEAR(&tmp);
623 tmp.base_addr = field;
624 unpack_internal (ret, vector, mask, &tmp, vector_length);
628 extern void unpack0_char4 (gfc_array_char *, GFC_INTEGER_4,
629 const gfc_array_char *, const gfc_array_l1 *,
630 char *, GFC_INTEGER_4, GFC_INTEGER_4);
631 export_proto(unpack0_char4);
633 void
634 unpack0_char4 (gfc_array_char *ret,
635 GFC_INTEGER_4 ret_length __attribute__((unused)),
636 const gfc_array_char *vector, const gfc_array_l1 *mask,
637 char *field, GFC_INTEGER_4 vector_length,
638 GFC_INTEGER_4 field_length __attribute__((unused)))
640 gfc_array_char tmp;
642 if (unlikely(compile_options.bounds_check))
643 unpack_bounds (ret, vector, mask, NULL);
645 memset (&tmp, 0, sizeof (tmp));
646 GFC_DTYPE_CLEAR(&tmp);
647 tmp.base_addr = field;
648 unpack_internal (ret, vector, mask, &tmp,
649 vector_length * sizeof (gfc_char4_t));