1 /* Copyright (C) 2020-2023 Free Software Foundation, Inc.
2 Contributed by Jakub Jelinek <jakub@redhat.com>.
4 This file is part of the GNU Offloading and Multi Processing Library
7 Libgomp is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
26 /* This file contains wrappers for the system allocation routines. Most
27 places in the OpenMP API do not make any provision for failure, so in
28 general we cannot allow memory allocation to fail. */
34 #ifdef LIBGOMP_USE_MEMKIND
38 #define omp_max_predefined_alloc omp_thread_mem_alloc
40 enum gomp_memkind_kind
42 GOMP_MEMKIND_NONE
= 0,
43 #define GOMP_MEMKIND_KINDS \
44 GOMP_MEMKIND_KIND (HBW_INTERLEAVE), \
45 GOMP_MEMKIND_KIND (HBW_PREFERRED), \
46 GOMP_MEMKIND_KIND (DAX_KMEM_ALL), \
47 GOMP_MEMKIND_KIND (DAX_KMEM), \
48 GOMP_MEMKIND_KIND (INTERLEAVE), \
49 GOMP_MEMKIND_KIND (DEFAULT)
50 #define GOMP_MEMKIND_KIND(kind) GOMP_MEMKIND_##kind
52 #undef GOMP_MEMKIND_KIND
56 struct omp_allocator_data
58 omp_memspace_handle_t memspace
;
59 omp_uintptr_t alignment
;
60 omp_uintptr_t pool_size
;
61 omp_uintptr_t used_pool_size
;
62 omp_allocator_handle_t fb_data
;
63 unsigned int sync_hint
: 8;
64 unsigned int access
: 8;
65 unsigned int fallback
: 8;
66 unsigned int pinned
: 1;
67 unsigned int partition
: 7;
68 #ifdef LIBGOMP_USE_MEMKIND
69 unsigned int memkind
: 8;
71 #ifndef HAVE_SYNC_BUILTINS
80 omp_allocator_handle_t allocator
;
84 struct gomp_memkind_data
87 void *(*memkind_malloc
) (void *, size_t);
88 void *(*memkind_calloc
) (void *, size_t, size_t);
89 void *(*memkind_realloc
) (void *, void *, size_t);
90 void (*memkind_free
) (void *, void *);
91 int (*memkind_check_available
) (void *);
92 void **kinds
[GOMP_MEMKIND_COUNT
];
95 #ifdef LIBGOMP_USE_MEMKIND
96 static struct gomp_memkind_data
*memkind_data
;
97 static pthread_once_t memkind_data_once
= PTHREAD_ONCE_INIT
;
100 gomp_init_memkind (void)
102 void *handle
= dlopen ("libmemkind.so.0", RTLD_LAZY
);
103 struct gomp_memkind_data
*data
;
105 static const char *kinds
[] = {
107 #define GOMP_MEMKIND_KIND(kind) "MEMKIND_" #kind
109 #undef GOMP_MEMKIND_KIND
112 data
= calloc (1, sizeof (struct gomp_memkind_data
));
121 __atomic_store_n (&memkind_data
, data
, MEMMODEL_RELEASE
);
124 data
->memkind_handle
= handle
;
126 = (__typeof (data
->memkind_malloc
)) dlsym (handle
, "memkind_malloc");
128 = (__typeof (data
->memkind_calloc
)) dlsym (handle
, "memkind_calloc");
129 data
->memkind_realloc
130 = (__typeof (data
->memkind_realloc
)) dlsym (handle
, "memkind_realloc");
132 = (__typeof (data
->memkind_free
)) dlsym (handle
, "memkind_free");
133 data
->memkind_check_available
134 = (__typeof (data
->memkind_check_available
))
135 dlsym (handle
, "memkind_check_available");
136 if (data
->memkind_malloc
137 && data
->memkind_calloc
138 && data
->memkind_realloc
139 && data
->memkind_free
140 && data
->memkind_check_available
)
141 for (i
= 1; i
< GOMP_MEMKIND_COUNT
; ++i
)
143 data
->kinds
[i
] = (void **) dlsym (handle
, kinds
[i
]);
144 if (data
->kinds
[i
] && data
->memkind_check_available (*data
->kinds
[i
]))
145 data
->kinds
[i
] = NULL
;
147 __atomic_store_n (&memkind_data
, data
, MEMMODEL_RELEASE
);
150 static struct gomp_memkind_data
*
151 gomp_get_memkind (void)
153 struct gomp_memkind_data
*data
154 = __atomic_load_n (&memkind_data
, MEMMODEL_ACQUIRE
);
157 pthread_once (&memkind_data_once
, gomp_init_memkind
);
158 return __atomic_load_n (&memkind_data
, MEMMODEL_ACQUIRE
);
162 omp_allocator_handle_t
163 omp_init_allocator (omp_memspace_handle_t memspace
, int ntraits
,
164 const omp_alloctrait_t traits
[])
166 struct omp_allocator_data data
167 = { memspace
, 1, ~(uintptr_t) 0, 0, 0, omp_atv_contended
, omp_atv_all
,
168 omp_atv_default_mem_fb
, omp_atv_false
, omp_atv_environment
,
169 #ifdef LIBGOMP_USE_MEMKIND
173 struct omp_allocator_data
*ret
;
176 if (memspace
> omp_low_lat_mem_space
)
177 return omp_null_allocator
;
178 for (i
= 0; i
< ntraits
; i
++)
179 switch (traits
[i
].key
)
181 case omp_atk_sync_hint
:
182 switch (traits
[i
].value
)
184 case omp_atv_default
:
185 data
.sync_hint
= omp_atv_contended
;
187 case omp_atv_contended
:
188 case omp_atv_uncontended
:
189 case omp_atv_serialized
:
190 case omp_atv_private
:
191 data
.sync_hint
= traits
[i
].value
;
194 return omp_null_allocator
;
197 case omp_atk_alignment
:
198 if (traits
[i
].value
== omp_atv_default
)
203 if ((traits
[i
].value
& (traits
[i
].value
- 1)) != 0
205 return omp_null_allocator
;
206 data
.alignment
= traits
[i
].value
;
209 switch (traits
[i
].value
)
211 case omp_atv_default
:
212 data
.access
= omp_atv_all
;
218 data
.access
= traits
[i
].value
;
221 return omp_null_allocator
;
224 case omp_atk_pool_size
:
225 if (traits
[i
].value
== omp_atv_default
)
226 data
.pool_size
= ~(uintptr_t) 0;
228 data
.pool_size
= traits
[i
].value
;
230 case omp_atk_fallback
:
231 switch (traits
[i
].value
)
233 case omp_atv_default
:
234 data
.fallback
= omp_atv_default_mem_fb
;
236 case omp_atv_default_mem_fb
:
237 case omp_atv_null_fb
:
238 case omp_atv_abort_fb
:
239 case omp_atv_allocator_fb
:
240 data
.fallback
= traits
[i
].value
;
243 return omp_null_allocator
;
246 case omp_atk_fb_data
:
247 data
.fb_data
= traits
[i
].value
;
250 switch (traits
[i
].value
)
252 case omp_atv_default
:
254 data
.pinned
= omp_atv_false
;
257 data
.pinned
= omp_atv_true
;
260 return omp_null_allocator
;
263 case omp_atk_partition
:
264 switch (traits
[i
].value
)
266 case omp_atv_default
:
267 data
.partition
= omp_atv_environment
;
269 case omp_atv_environment
:
270 case omp_atv_nearest
:
271 case omp_atv_blocked
:
272 case omp_atv_interleaved
:
273 data
.partition
= traits
[i
].value
;
276 return omp_null_allocator
;
280 return omp_null_allocator
;
283 if (data
.alignment
< sizeof (void *))
284 data
.alignment
= sizeof (void *);
288 case omp_high_bw_mem_space
:
289 #ifdef LIBGOMP_USE_MEMKIND
290 struct gomp_memkind_data
*memkind_data
;
291 memkind_data
= gomp_get_memkind ();
292 if (data
.partition
== omp_atv_interleaved
293 && memkind_data
->kinds
[GOMP_MEMKIND_HBW_INTERLEAVE
])
295 data
.memkind
= GOMP_MEMKIND_HBW_INTERLEAVE
;
298 else if (memkind_data
->kinds
[GOMP_MEMKIND_HBW_PREFERRED
])
300 data
.memkind
= GOMP_MEMKIND_HBW_PREFERRED
;
304 return omp_null_allocator
;
305 case omp_large_cap_mem_space
:
306 #ifdef LIBGOMP_USE_MEMKIND
307 memkind_data
= gomp_get_memkind ();
308 if (memkind_data
->kinds
[GOMP_MEMKIND_DAX_KMEM_ALL
])
309 data
.memkind
= GOMP_MEMKIND_DAX_KMEM_ALL
;
310 else if (memkind_data
->kinds
[GOMP_MEMKIND_DAX_KMEM
])
311 data
.memkind
= GOMP_MEMKIND_DAX_KMEM
;
315 #ifdef LIBGOMP_USE_MEMKIND
316 if (data
.partition
== omp_atv_interleaved
)
318 memkind_data
= gomp_get_memkind ();
319 if (memkind_data
->kinds
[GOMP_MEMKIND_INTERLEAVE
])
320 data
.memkind
= GOMP_MEMKIND_INTERLEAVE
;
326 /* No support for this so far. */
328 return omp_null_allocator
;
330 ret
= gomp_malloc (sizeof (struct omp_allocator_data
));
332 #ifndef HAVE_SYNC_BUILTINS
333 gomp_mutex_init (&ret
->lock
);
335 return (omp_allocator_handle_t
) ret
;
339 omp_destroy_allocator (omp_allocator_handle_t allocator
)
341 if (allocator
!= omp_null_allocator
)
343 #ifndef HAVE_SYNC_BUILTINS
344 gomp_mutex_destroy (&((struct omp_allocator_data
*) allocator
)->lock
);
346 free ((void *) allocator
);
350 ialias (omp_init_allocator
)
351 ialias (omp_destroy_allocator
)
354 omp_aligned_alloc (size_t alignment
, size_t size
,
355 omp_allocator_handle_t allocator
)
357 struct omp_allocator_data
*allocator_data
;
358 size_t new_size
, new_alignment
;
360 #ifdef LIBGOMP_USE_MEMKIND
361 enum gomp_memkind_kind memkind
;
364 if (__builtin_expect (size
== 0, 0))
368 new_alignment
= alignment
;
369 if (allocator
== omp_null_allocator
)
371 struct gomp_thread
*thr
= gomp_thread ();
372 if (thr
->ts
.def_allocator
== omp_null_allocator
)
373 thr
->ts
.def_allocator
= gomp_def_allocator
;
374 allocator
= (omp_allocator_handle_t
) thr
->ts
.def_allocator
;
377 if (allocator
> omp_max_predefined_alloc
)
379 allocator_data
= (struct omp_allocator_data
*) allocator
;
380 if (new_alignment
< allocator_data
->alignment
)
381 new_alignment
= allocator_data
->alignment
;
382 #ifdef LIBGOMP_USE_MEMKIND
383 memkind
= allocator_data
->memkind
;
388 allocator_data
= NULL
;
389 if (new_alignment
< sizeof (void *))
390 new_alignment
= sizeof (void *);
391 #ifdef LIBGOMP_USE_MEMKIND
392 memkind
= GOMP_MEMKIND_NONE
;
393 if (allocator
== omp_high_bw_mem_alloc
)
394 memkind
= GOMP_MEMKIND_HBW_PREFERRED
;
395 else if (allocator
== omp_large_cap_mem_alloc
)
396 memkind
= GOMP_MEMKIND_DAX_KMEM_ALL
;
399 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
400 if (!memkind_data
->kinds
[memkind
])
401 memkind
= GOMP_MEMKIND_NONE
;
406 new_size
= sizeof (struct omp_mem_header
);
407 if (new_alignment
> sizeof (void *))
408 new_size
+= new_alignment
- sizeof (void *);
409 if (__builtin_add_overflow (size
, new_size
, &new_size
))
412 if (__builtin_expect (allocator_data
413 && allocator_data
->pool_size
< ~(uintptr_t) 0, 0))
415 uintptr_t used_pool_size
;
416 if (new_size
> allocator_data
->pool_size
)
418 #ifdef HAVE_SYNC_BUILTINS
419 used_pool_size
= __atomic_load_n (&allocator_data
->used_pool_size
,
423 uintptr_t new_pool_size
;
424 if (__builtin_add_overflow (used_pool_size
, new_size
,
426 || new_pool_size
> allocator_data
->pool_size
)
428 if (__atomic_compare_exchange_n (&allocator_data
->used_pool_size
,
429 &used_pool_size
, new_pool_size
,
430 true, MEMMODEL_RELAXED
,
436 gomp_mutex_lock (&allocator_data
->lock
);
437 if (__builtin_add_overflow (allocator_data
->used_pool_size
, new_size
,
439 || used_pool_size
> allocator_data
->pool_size
)
441 gomp_mutex_unlock (&allocator_data
->lock
);
444 allocator_data
->used_pool_size
= used_pool_size
;
445 gomp_mutex_unlock (&allocator_data
->lock
);
447 #ifdef LIBGOMP_USE_MEMKIND
450 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
451 void *kind
= *memkind_data
->kinds
[memkind
];
452 ptr
= memkind_data
->memkind_malloc (kind
, new_size
);
456 ptr
= malloc (new_size
);
459 #ifdef HAVE_SYNC_BUILTINS
460 __atomic_add_fetch (&allocator_data
->used_pool_size
, -new_size
,
463 gomp_mutex_lock (&allocator_data
->lock
);
464 allocator_data
->used_pool_size
-= new_size
;
465 gomp_mutex_unlock (&allocator_data
->lock
);
472 #ifdef LIBGOMP_USE_MEMKIND
475 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
476 void *kind
= *memkind_data
->kinds
[memkind
];
477 ptr
= memkind_data
->memkind_malloc (kind
, new_size
);
481 ptr
= malloc (new_size
);
486 if (new_alignment
> sizeof (void *))
487 ret
= (void *) (((uintptr_t) ptr
488 + sizeof (struct omp_mem_header
)
489 + new_alignment
- sizeof (void *))
490 & ~(new_alignment
- 1));
492 ret
= (char *) ptr
+ sizeof (struct omp_mem_header
);
493 ((struct omp_mem_header
*) ret
)[-1].ptr
= ptr
;
494 ((struct omp_mem_header
*) ret
)[-1].size
= new_size
;
495 ((struct omp_mem_header
*) ret
)[-1].allocator
= allocator
;
501 switch (allocator_data
->fallback
)
503 case omp_atv_default_mem_fb
:
504 if ((new_alignment
> sizeof (void *) && new_alignment
> alignment
)
505 #ifdef LIBGOMP_USE_MEMKIND
509 && allocator_data
->pool_size
< ~(uintptr_t) 0))
511 allocator
= omp_default_mem_alloc
;
514 /* Otherwise, we've already performed default mem allocation
515 and if that failed, it won't succeed again (unless it was
516 intermittent. Return NULL then, as that is the fallback. */
518 case omp_atv_null_fb
:
521 case omp_atv_abort_fb
:
522 gomp_fatal ("Out of memory allocating %lu bytes",
523 (unsigned long) size
);
524 case omp_atv_allocator_fb
:
525 allocator
= allocator_data
->fb_data
;
532 ialias (omp_aligned_alloc
)
535 omp_alloc (size_t size
, omp_allocator_handle_t allocator
)
537 return ialias_call (omp_aligned_alloc
) (1, size
, allocator
);
540 /* Like omp_aligned_alloc, but apply on top of that:
541 "For allocations that arise from this ... the null_fb value of the
542 fallback allocator trait behaves as if the abort_fb had been specified." */
545 GOMP_alloc (size_t alignment
, size_t size
, uintptr_t allocator
)
548 = ialias_call (omp_aligned_alloc
) (alignment
, size
,
549 (omp_allocator_handle_t
) allocator
);
550 if (__builtin_expect (ret
== NULL
, 0) && size
)
551 gomp_fatal ("Out of memory allocating %lu bytes",
552 (unsigned long) size
);
557 omp_free (void *ptr
, omp_allocator_handle_t allocator
)
559 struct omp_mem_header
*data
;
564 data
= &((struct omp_mem_header
*) ptr
)[-1];
565 if (data
->allocator
> omp_max_predefined_alloc
)
567 struct omp_allocator_data
*allocator_data
568 = (struct omp_allocator_data
*) (data
->allocator
);
569 if (allocator_data
->pool_size
< ~(uintptr_t) 0)
571 #ifdef HAVE_SYNC_BUILTINS
572 __atomic_add_fetch (&allocator_data
->used_pool_size
, -data
->size
,
575 gomp_mutex_lock (&allocator_data
->lock
);
576 allocator_data
->used_pool_size
-= data
->size
;
577 gomp_mutex_unlock (&allocator_data
->lock
);
580 #ifdef LIBGOMP_USE_MEMKIND
581 if (allocator_data
->memkind
)
583 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
584 void *kind
= *memkind_data
->kinds
[allocator_data
->memkind
];
585 memkind_data
->memkind_free (kind
, data
->ptr
);
590 #ifdef LIBGOMP_USE_MEMKIND
593 enum gomp_memkind_kind memkind
= GOMP_MEMKIND_NONE
;
594 if (data
->allocator
== omp_high_bw_mem_alloc
)
595 memkind
= GOMP_MEMKIND_HBW_PREFERRED
;
596 else if (data
->allocator
== omp_large_cap_mem_alloc
)
597 memkind
= GOMP_MEMKIND_DAX_KMEM_ALL
;
600 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
601 if (memkind_data
->kinds
[memkind
])
603 void *kind
= *memkind_data
->kinds
[memkind
];
604 memkind_data
->memkind_free (kind
, data
->ptr
);
616 GOMP_free (void *ptr
, uintptr_t allocator
)
618 return ialias_call (omp_free
) (ptr
, (omp_allocator_handle_t
) allocator
);
622 omp_aligned_calloc (size_t alignment
, size_t nmemb
, size_t size
,
623 omp_allocator_handle_t allocator
)
625 struct omp_allocator_data
*allocator_data
;
626 size_t new_size
, size_temp
, new_alignment
;
628 #ifdef LIBGOMP_USE_MEMKIND
629 enum gomp_memkind_kind memkind
;
632 if (__builtin_expect (size
== 0 || nmemb
== 0, 0))
636 new_alignment
= alignment
;
637 if (allocator
== omp_null_allocator
)
639 struct gomp_thread
*thr
= gomp_thread ();
640 if (thr
->ts
.def_allocator
== omp_null_allocator
)
641 thr
->ts
.def_allocator
= gomp_def_allocator
;
642 allocator
= (omp_allocator_handle_t
) thr
->ts
.def_allocator
;
645 if (allocator
> omp_max_predefined_alloc
)
647 allocator_data
= (struct omp_allocator_data
*) allocator
;
648 if (new_alignment
< allocator_data
->alignment
)
649 new_alignment
= allocator_data
->alignment
;
650 #ifdef LIBGOMP_USE_MEMKIND
651 memkind
= allocator_data
->memkind
;
656 allocator_data
= NULL
;
657 if (new_alignment
< sizeof (void *))
658 new_alignment
= sizeof (void *);
659 #ifdef LIBGOMP_USE_MEMKIND
660 memkind
= GOMP_MEMKIND_NONE
;
661 if (allocator
== omp_high_bw_mem_alloc
)
662 memkind
= GOMP_MEMKIND_HBW_PREFERRED
;
663 else if (allocator
== omp_large_cap_mem_alloc
)
664 memkind
= GOMP_MEMKIND_DAX_KMEM_ALL
;
667 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
668 if (!memkind_data
->kinds
[memkind
])
669 memkind
= GOMP_MEMKIND_NONE
;
674 new_size
= sizeof (struct omp_mem_header
);
675 if (new_alignment
> sizeof (void *))
676 new_size
+= new_alignment
- sizeof (void *);
677 if (__builtin_mul_overflow (size
, nmemb
, &size_temp
))
679 if (__builtin_add_overflow (size_temp
, new_size
, &new_size
))
682 if (__builtin_expect (allocator_data
683 && allocator_data
->pool_size
< ~(uintptr_t) 0, 0))
685 uintptr_t used_pool_size
;
686 if (new_size
> allocator_data
->pool_size
)
688 #ifdef HAVE_SYNC_BUILTINS
689 used_pool_size
= __atomic_load_n (&allocator_data
->used_pool_size
,
693 uintptr_t new_pool_size
;
694 if (__builtin_add_overflow (used_pool_size
, new_size
,
696 || new_pool_size
> allocator_data
->pool_size
)
698 if (__atomic_compare_exchange_n (&allocator_data
->used_pool_size
,
699 &used_pool_size
, new_pool_size
,
700 true, MEMMODEL_RELAXED
,
706 gomp_mutex_lock (&allocator_data
->lock
);
707 if (__builtin_add_overflow (allocator_data
->used_pool_size
, new_size
,
709 || used_pool_size
> allocator_data
->pool_size
)
711 gomp_mutex_unlock (&allocator_data
->lock
);
714 allocator_data
->used_pool_size
= used_pool_size
;
715 gomp_mutex_unlock (&allocator_data
->lock
);
717 #ifdef LIBGOMP_USE_MEMKIND
720 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
721 void *kind
= *memkind_data
->kinds
[memkind
];
722 ptr
= memkind_data
->memkind_calloc (kind
, 1, new_size
);
726 ptr
= calloc (1, new_size
);
729 #ifdef HAVE_SYNC_BUILTINS
730 __atomic_add_fetch (&allocator_data
->used_pool_size
, -new_size
,
733 gomp_mutex_lock (&allocator_data
->lock
);
734 allocator_data
->used_pool_size
-= new_size
;
735 gomp_mutex_unlock (&allocator_data
->lock
);
742 #ifdef LIBGOMP_USE_MEMKIND
745 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
746 void *kind
= *memkind_data
->kinds
[memkind
];
747 ptr
= memkind_data
->memkind_calloc (kind
, 1, new_size
);
751 ptr
= calloc (1, new_size
);
756 if (new_alignment
> sizeof (void *))
757 ret
= (void *) (((uintptr_t) ptr
758 + sizeof (struct omp_mem_header
)
759 + new_alignment
- sizeof (void *))
760 & ~(new_alignment
- 1));
762 ret
= (char *) ptr
+ sizeof (struct omp_mem_header
);
763 ((struct omp_mem_header
*) ret
)[-1].ptr
= ptr
;
764 ((struct omp_mem_header
*) ret
)[-1].size
= new_size
;
765 ((struct omp_mem_header
*) ret
)[-1].allocator
= allocator
;
771 switch (allocator_data
->fallback
)
773 case omp_atv_default_mem_fb
:
774 if ((new_alignment
> sizeof (void *) && new_alignment
> alignment
)
775 #ifdef LIBGOMP_USE_MEMKIND
779 && allocator_data
->pool_size
< ~(uintptr_t) 0))
781 allocator
= omp_default_mem_alloc
;
784 /* Otherwise, we've already performed default mem allocation
785 and if that failed, it won't succeed again (unless it was
786 intermittent. Return NULL then, as that is the fallback. */
788 case omp_atv_null_fb
:
791 case omp_atv_abort_fb
:
792 gomp_fatal ("Out of memory allocating %lu bytes",
793 (unsigned long) (size
* nmemb
));
794 case omp_atv_allocator_fb
:
795 allocator
= allocator_data
->fb_data
;
802 ialias (omp_aligned_calloc
)
805 omp_calloc (size_t nmemb
, size_t size
, omp_allocator_handle_t allocator
)
807 return ialias_call (omp_aligned_calloc
) (1, nmemb
, size
, allocator
);
811 omp_realloc (void *ptr
, size_t size
, omp_allocator_handle_t allocator
,
812 omp_allocator_handle_t free_allocator
)
814 struct omp_allocator_data
*allocator_data
, *free_allocator_data
;
815 size_t new_size
, old_size
, new_alignment
, old_alignment
;
817 struct omp_mem_header
*data
;
818 #ifdef LIBGOMP_USE_MEMKIND
819 enum gomp_memkind_kind memkind
, free_memkind
;
822 if (__builtin_expect (ptr
== NULL
, 0))
823 return ialias_call (omp_aligned_alloc
) (1, size
, allocator
);
825 if (__builtin_expect (size
== 0, 0))
827 ialias_call (omp_free
) (ptr
, free_allocator
);
831 data
= &((struct omp_mem_header
*) ptr
)[-1];
832 free_allocator
= data
->allocator
;
835 new_alignment
= sizeof (void *);
836 if (allocator
== omp_null_allocator
)
837 allocator
= free_allocator
;
839 if (allocator
> omp_max_predefined_alloc
)
841 allocator_data
= (struct omp_allocator_data
*) allocator
;
842 if (new_alignment
< allocator_data
->alignment
)
843 new_alignment
= allocator_data
->alignment
;
844 #ifdef LIBGOMP_USE_MEMKIND
845 memkind
= allocator_data
->memkind
;
850 allocator_data
= NULL
;
851 #ifdef LIBGOMP_USE_MEMKIND
852 memkind
= GOMP_MEMKIND_NONE
;
853 if (allocator
== omp_high_bw_mem_alloc
)
854 memkind
= GOMP_MEMKIND_HBW_PREFERRED
;
855 else if (allocator
== omp_large_cap_mem_alloc
)
856 memkind
= GOMP_MEMKIND_DAX_KMEM_ALL
;
859 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
860 if (!memkind_data
->kinds
[memkind
])
861 memkind
= GOMP_MEMKIND_NONE
;
865 if (free_allocator
> omp_max_predefined_alloc
)
867 free_allocator_data
= (struct omp_allocator_data
*) free_allocator
;
868 #ifdef LIBGOMP_USE_MEMKIND
869 free_memkind
= free_allocator_data
->memkind
;
874 free_allocator_data
= NULL
;
875 #ifdef LIBGOMP_USE_MEMKIND
876 free_memkind
= GOMP_MEMKIND_NONE
;
877 if (free_allocator
== omp_high_bw_mem_alloc
)
878 free_memkind
= GOMP_MEMKIND_HBW_PREFERRED
;
879 else if (free_allocator
== omp_large_cap_mem_alloc
)
880 free_memkind
= GOMP_MEMKIND_DAX_KMEM_ALL
;
883 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
884 if (!memkind_data
->kinds
[free_memkind
])
885 free_memkind
= GOMP_MEMKIND_NONE
;
889 old_alignment
= (uintptr_t) ptr
- (uintptr_t) (data
->ptr
);
891 new_size
= sizeof (struct omp_mem_header
);
892 if (new_alignment
> sizeof (void *))
893 new_size
+= new_alignment
- sizeof (void *);
894 if (__builtin_add_overflow (size
, new_size
, &new_size
))
896 old_size
= data
->size
;
898 if (__builtin_expect (allocator_data
899 && allocator_data
->pool_size
< ~(uintptr_t) 0, 0))
901 uintptr_t used_pool_size
;
902 size_t prev_size
= 0;
903 /* Check if we can use realloc. Don't use it if extra alignment
904 was used previously or newly, because realloc might return a pointer
905 with different alignment and then we'd need to memmove the data
907 if (free_allocator_data
908 && free_allocator_data
== allocator_data
909 && new_alignment
== sizeof (void *)
910 && old_alignment
== sizeof (struct omp_mem_header
))
911 prev_size
= old_size
;
912 if (new_size
> prev_size
913 && new_size
- prev_size
> allocator_data
->pool_size
)
915 #ifdef HAVE_SYNC_BUILTINS
916 used_pool_size
= __atomic_load_n (&allocator_data
->used_pool_size
,
920 uintptr_t new_pool_size
;
921 if (new_size
> prev_size
)
923 if (__builtin_add_overflow (used_pool_size
, new_size
- prev_size
,
925 || new_pool_size
> allocator_data
->pool_size
)
929 new_pool_size
= used_pool_size
+ new_size
- prev_size
;
930 if (__atomic_compare_exchange_n (&allocator_data
->used_pool_size
,
931 &used_pool_size
, new_pool_size
,
932 true, MEMMODEL_RELAXED
,
938 gomp_mutex_lock (&allocator_data
->lock
);
939 if (new_size
> prev_size
)
941 if (__builtin_add_overflow (allocator_data
->used_pool_size
,
942 new_size
- prev_size
,
944 || used_pool_size
> allocator_data
->pool_size
)
946 gomp_mutex_unlock (&allocator_data
->lock
);
951 used_pool_size
= (allocator_data
->used_pool_size
952 + new_size
- prev_size
);
953 allocator_data
->used_pool_size
= used_pool_size
;
954 gomp_mutex_unlock (&allocator_data
->lock
);
956 #ifdef LIBGOMP_USE_MEMKIND
959 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
960 void *kind
= *memkind_data
->kinds
[memkind
];
962 new_ptr
= memkind_data
->memkind_realloc (kind
, data
->ptr
,
965 new_ptr
= memkind_data
->memkind_malloc (kind
, new_size
);
970 new_ptr
= realloc (data
->ptr
, new_size
);
972 new_ptr
= malloc (new_size
);
975 #ifdef HAVE_SYNC_BUILTINS
976 __atomic_add_fetch (&allocator_data
->used_pool_size
,
977 prev_size
- new_size
,
980 gomp_mutex_lock (&allocator_data
->lock
);
981 allocator_data
->used_pool_size
-= new_size
- prev_size
;
982 gomp_mutex_unlock (&allocator_data
->lock
);
988 ret
= (char *) new_ptr
+ sizeof (struct omp_mem_header
);
989 ((struct omp_mem_header
*) ret
)[-1].ptr
= new_ptr
;
990 ((struct omp_mem_header
*) ret
)[-1].size
= new_size
;
991 ((struct omp_mem_header
*) ret
)[-1].allocator
= allocator
;
995 else if (new_alignment
== sizeof (void *)
996 && old_alignment
== sizeof (struct omp_mem_header
)
997 #ifdef LIBGOMP_USE_MEMKIND
998 && memkind
== free_memkind
1000 && (free_allocator_data
== NULL
1001 || free_allocator_data
->pool_size
== ~(uintptr_t) 0))
1003 #ifdef LIBGOMP_USE_MEMKIND
1006 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
1007 void *kind
= *memkind_data
->kinds
[memkind
];
1008 new_ptr
= memkind_data
->memkind_realloc (kind
, data
->ptr
,
1013 new_ptr
= realloc (data
->ptr
, new_size
);
1014 if (new_ptr
== NULL
)
1016 ret
= (char *) new_ptr
+ sizeof (struct omp_mem_header
);
1017 ((struct omp_mem_header
*) ret
)[-1].ptr
= new_ptr
;
1018 ((struct omp_mem_header
*) ret
)[-1].size
= new_size
;
1019 ((struct omp_mem_header
*) ret
)[-1].allocator
= allocator
;
1024 #ifdef LIBGOMP_USE_MEMKIND
1027 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
1028 void *kind
= *memkind_data
->kinds
[memkind
];
1029 new_ptr
= memkind_data
->memkind_malloc (kind
, new_size
);
1033 new_ptr
= malloc (new_size
);
1034 if (new_ptr
== NULL
)
1038 if (new_alignment
> sizeof (void *))
1039 ret
= (void *) (((uintptr_t) new_ptr
1040 + sizeof (struct omp_mem_header
)
1041 + new_alignment
- sizeof (void *))
1042 & ~(new_alignment
- 1));
1044 ret
= (char *) new_ptr
+ sizeof (struct omp_mem_header
);
1045 ((struct omp_mem_header
*) ret
)[-1].ptr
= new_ptr
;
1046 ((struct omp_mem_header
*) ret
)[-1].size
= new_size
;
1047 ((struct omp_mem_header
*) ret
)[-1].allocator
= allocator
;
1048 if (old_size
- old_alignment
< size
)
1049 size
= old_size
- old_alignment
;
1050 memcpy (ret
, ptr
, size
);
1051 if (__builtin_expect (free_allocator_data
1052 && free_allocator_data
->pool_size
< ~(uintptr_t) 0, 0))
1054 #ifdef HAVE_SYNC_BUILTINS
1055 __atomic_add_fetch (&free_allocator_data
->used_pool_size
, -data
->size
,
1058 gomp_mutex_lock (&free_allocator_data
->lock
);
1059 free_allocator_data
->used_pool_size
-= data
->size
;
1060 gomp_mutex_unlock (&free_allocator_data
->lock
);
1063 #ifdef LIBGOMP_USE_MEMKIND
1066 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
1067 void *kind
= *memkind_data
->kinds
[free_memkind
];
1068 memkind_data
->memkind_free (kind
, data
->ptr
);
1078 switch (allocator_data
->fallback
)
1080 case omp_atv_default_mem_fb
:
1081 if (new_alignment
> sizeof (void *)
1082 #ifdef LIBGOMP_USE_MEMKIND
1086 && allocator_data
->pool_size
< ~(uintptr_t) 0))
1088 allocator
= omp_default_mem_alloc
;
1091 /* Otherwise, we've already performed default mem allocation
1092 and if that failed, it won't succeed again (unless it was
1093 intermittent. Return NULL then, as that is the fallback. */
1095 case omp_atv_null_fb
:
1098 case omp_atv_abort_fb
:
1099 gomp_fatal ("Out of memory allocating %lu bytes",
1100 (unsigned long) size
);
1101 case omp_atv_allocator_fb
:
1102 allocator
= allocator_data
->fb_data
;