Daily bump.
[official-gcc.git] / libgomp / allocator.c
blobdeebb6a79fabed55382563440bf54e3e415de9bf
1 /* Copyright (C) 2020-2021 Free Software Foundation, Inc.
2 Contributed by Jakub Jelinek <jakub@redhat.com>.
4 This file is part of the GNU Offloading and Multi Processing Library
5 (libgomp).
7 Libgomp is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 more details.
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
26 /* This file contains wrappers for the system allocation routines. Most
27 places in the OpenMP API do not make any provision for failure, so in
28 general we cannot allow memory allocation to fail. */
30 #define _GNU_SOURCE
31 #include "libgomp.h"
32 #include <stdlib.h>
33 #include <string.h>
35 #define omp_max_predefined_alloc omp_thread_mem_alloc
37 struct omp_allocator_data
39 omp_memspace_handle_t memspace;
40 omp_uintptr_t alignment;
41 omp_uintptr_t pool_size;
42 omp_uintptr_t used_pool_size;
43 omp_allocator_handle_t fb_data;
44 unsigned int sync_hint : 8;
45 unsigned int access : 8;
46 unsigned int fallback : 8;
47 unsigned int pinned : 1;
48 unsigned int partition : 7;
49 #ifndef HAVE_SYNC_BUILTINS
50 gomp_mutex_t lock;
51 #endif
54 struct omp_mem_header
56 void *ptr;
57 size_t size;
58 omp_allocator_handle_t allocator;
59 void *pad;
62 omp_allocator_handle_t
63 omp_init_allocator (omp_memspace_handle_t memspace, int ntraits,
64 const omp_alloctrait_t traits[])
66 struct omp_allocator_data data
67 = { memspace, 1, ~(uintptr_t) 0, 0, 0, omp_atv_contended, omp_atv_all,
68 omp_atv_default_mem_fb, omp_atv_false, omp_atv_environment };
69 struct omp_allocator_data *ret;
70 int i;
72 if (memspace > omp_low_lat_mem_space)
73 return omp_null_allocator;
74 for (i = 0; i < ntraits; i++)
75 switch (traits[i].key)
77 case omp_atk_sync_hint:
78 switch (traits[i].value)
80 case omp_atv_default:
81 data.sync_hint = omp_atv_contended;
82 break;
83 case omp_atv_contended:
84 case omp_atv_uncontended:
85 case omp_atv_serialized:
86 case omp_atv_private:
87 data.sync_hint = traits[i].value;
88 break;
89 default:
90 return omp_null_allocator;
92 break;
93 case omp_atk_alignment:
94 if (traits[i].value == omp_atv_default)
96 data.alignment = 1;
97 break;
99 if ((traits[i].value & (traits[i].value - 1)) != 0
100 || !traits[i].value)
101 return omp_null_allocator;
102 data.alignment = traits[i].value;
103 break;
104 case omp_atk_access:
105 switch (traits[i].value)
107 case omp_atv_default:
108 data.access = omp_atv_all;
109 break;
110 case omp_atv_all:
111 case omp_atv_cgroup:
112 case omp_atv_pteam:
113 case omp_atv_thread:
114 data.access = traits[i].value;
115 break;
116 default:
117 return omp_null_allocator;
119 break;
120 case omp_atk_pool_size:
121 if (traits[i].value == omp_atv_default)
122 data.pool_size = ~(uintptr_t) 0;
123 else
124 data.pool_size = traits[i].value;
125 break;
126 case omp_atk_fallback:
127 switch (traits[i].value)
129 case omp_atv_default:
130 data.fallback = omp_atv_default_mem_fb;
131 break;
132 case omp_atv_default_mem_fb:
133 case omp_atv_null_fb:
134 case omp_atv_abort_fb:
135 case omp_atv_allocator_fb:
136 data.fallback = traits[i].value;
137 break;
138 default:
139 return omp_null_allocator;
141 break;
142 case omp_atk_fb_data:
143 data.fb_data = traits[i].value;
144 break;
145 case omp_atk_pinned:
146 switch (traits[i].value)
148 case omp_atv_default:
149 case omp_atv_false:
150 data.pinned = omp_atv_false;
151 break;
152 case omp_atv_true:
153 data.pinned = omp_atv_true;
154 break;
155 default:
156 return omp_null_allocator;
158 break;
159 case omp_atk_partition:
160 switch (traits[i].value)
162 case omp_atv_default:
163 data.partition = omp_atv_environment;
164 break;
165 case omp_atv_environment:
166 case omp_atv_nearest:
167 case omp_atv_blocked:
168 case omp_atv_interleaved:
169 data.partition = traits[i].value;
170 break;
171 default:
172 return omp_null_allocator;
174 break;
175 default:
176 return omp_null_allocator;
179 if (data.alignment < sizeof (void *))
180 data.alignment = sizeof (void *);
182 /* No support for these so far (for hbw will use memkind). */
183 if (data.pinned || data.memspace == omp_high_bw_mem_space)
184 return omp_null_allocator;
186 ret = gomp_malloc (sizeof (struct omp_allocator_data));
187 *ret = data;
188 #ifndef HAVE_SYNC_BUILTINS
189 gomp_mutex_init (&ret->lock);
190 #endif
191 return (omp_allocator_handle_t) ret;
194 void
195 omp_destroy_allocator (omp_allocator_handle_t allocator)
197 if (allocator != omp_null_allocator)
199 #ifndef HAVE_SYNC_BUILTINS
200 gomp_mutex_destroy (&((struct omp_allocator_data *) allocator)->lock);
201 #endif
202 free ((void *) allocator);
206 ialias (omp_init_allocator)
207 ialias (omp_destroy_allocator)
209 void *
210 omp_aligned_alloc (size_t alignment, size_t size,
211 omp_allocator_handle_t allocator)
213 struct omp_allocator_data *allocator_data;
214 size_t new_size, new_alignment;
215 void *ptr, *ret;
217 if (__builtin_expect (size == 0, 0))
218 return NULL;
220 retry:
221 new_alignment = alignment;
222 if (allocator == omp_null_allocator)
224 struct gomp_thread *thr = gomp_thread ();
225 if (thr->ts.def_allocator == omp_null_allocator)
226 thr->ts.def_allocator = gomp_def_allocator;
227 allocator = (omp_allocator_handle_t) thr->ts.def_allocator;
230 if (allocator > omp_max_predefined_alloc)
232 allocator_data = (struct omp_allocator_data *) allocator;
233 if (new_alignment < allocator_data->alignment)
234 new_alignment = allocator_data->alignment;
236 else
238 allocator_data = NULL;
239 if (new_alignment < sizeof (void *))
240 new_alignment = sizeof (void *);
243 new_size = sizeof (struct omp_mem_header);
244 if (new_alignment > sizeof (void *))
245 new_size += new_alignment - sizeof (void *);
246 if (__builtin_add_overflow (size, new_size, &new_size))
247 goto fail;
249 if (__builtin_expect (allocator_data
250 && allocator_data->pool_size < ~(uintptr_t) 0, 0))
252 uintptr_t used_pool_size;
253 if (new_size > allocator_data->pool_size)
254 goto fail;
255 #ifdef HAVE_SYNC_BUILTINS
256 used_pool_size = __atomic_load_n (&allocator_data->used_pool_size,
257 MEMMODEL_RELAXED);
260 uintptr_t new_pool_size;
261 if (__builtin_add_overflow (used_pool_size, new_size,
262 &new_pool_size)
263 || new_pool_size > allocator_data->pool_size)
264 goto fail;
265 if (__atomic_compare_exchange_n (&allocator_data->used_pool_size,
266 &used_pool_size, new_pool_size,
267 true, MEMMODEL_RELAXED,
268 MEMMODEL_RELAXED))
269 break;
271 while (1);
272 #else
273 gomp_mutex_lock (&allocator_data->lock);
274 if (__builtin_add_overflow (allocator_data->used_pool_size, new_size,
275 &used_pool_size)
276 || used_pool_size > allocator_data->pool_size)
278 gomp_mutex_unlock (&allocator_data->lock);
279 goto fail;
281 allocator_data->used_pool_size = used_pool_size;
282 gomp_mutex_unlock (&allocator_data->lock);
283 #endif
284 ptr = malloc (new_size);
285 if (ptr == NULL)
287 #ifdef HAVE_SYNC_BUILTINS
288 __atomic_add_fetch (&allocator_data->used_pool_size, -new_size,
289 MEMMODEL_RELAXED);
290 #else
291 gomp_mutex_lock (&allocator_data->lock);
292 allocator_data->used_pool_size -= new_size;
293 gomp_mutex_unlock (&allocator_data->lock);
294 #endif
295 goto fail;
298 else
300 ptr = malloc (new_size);
301 if (ptr == NULL)
302 goto fail;
305 if (new_alignment > sizeof (void *))
306 ret = (void *) (((uintptr_t) ptr
307 + sizeof (struct omp_mem_header)
308 + new_alignment - sizeof (void *))
309 & ~(new_alignment - 1));
310 else
311 ret = (char *) ptr + sizeof (struct omp_mem_header);
312 ((struct omp_mem_header *) ret)[-1].ptr = ptr;
313 ((struct omp_mem_header *) ret)[-1].size = new_size;
314 ((struct omp_mem_header *) ret)[-1].allocator = allocator;
315 return ret;
317 fail:
318 if (allocator_data)
320 switch (allocator_data->fallback)
322 case omp_atv_default_mem_fb:
323 if ((new_alignment > sizeof (void *) && new_alignment > alignment)
324 || (allocator_data
325 && allocator_data->pool_size < ~(uintptr_t) 0))
327 allocator = omp_default_mem_alloc;
328 goto retry;
330 /* Otherwise, we've already performed default mem allocation
331 and if that failed, it won't succeed again (unless it was
332 intermittent. Return NULL then, as that is the fallback. */
333 break;
334 case omp_atv_null_fb:
335 break;
336 default:
337 case omp_atv_abort_fb:
338 gomp_fatal ("Out of memory allocating %lu bytes",
339 (unsigned long) size);
340 case omp_atv_allocator_fb:
341 allocator = allocator_data->fb_data;
342 goto retry;
345 return NULL;
348 ialias (omp_aligned_alloc)
350 void *
351 omp_alloc (size_t size, omp_allocator_handle_t allocator)
353 return ialias_call (omp_aligned_alloc) (1, size, allocator);
356 /* Like omp_aligned_alloc, but apply on top of that:
357 "For allocations that arise from this ... the null_fb value of the
358 fallback allocator trait behaves as if the abort_fb had been specified." */
360 void *
361 GOMP_alloc (size_t alignment, size_t size, uintptr_t allocator)
363 void *ret
364 = ialias_call (omp_aligned_alloc) (alignment, size,
365 (omp_allocator_handle_t) allocator);
366 if (__builtin_expect (ret == NULL, 0) && size)
367 gomp_fatal ("Out of memory allocating %lu bytes",
368 (unsigned long) size);
369 return ret;
372 void
373 omp_free (void *ptr, omp_allocator_handle_t allocator)
375 struct omp_mem_header *data;
377 if (ptr == NULL)
378 return;
379 (void) allocator;
380 data = &((struct omp_mem_header *) ptr)[-1];
381 if (data->allocator > omp_max_predefined_alloc)
383 struct omp_allocator_data *allocator_data
384 = (struct omp_allocator_data *) (data->allocator);
385 if (allocator_data->pool_size < ~(uintptr_t) 0)
387 #ifdef HAVE_SYNC_BUILTINS
388 __atomic_add_fetch (&allocator_data->used_pool_size, -data->size,
389 MEMMODEL_RELAXED);
390 #else
391 gomp_mutex_lock (&allocator_data->lock);
392 allocator_data->used_pool_size -= data->size;
393 gomp_mutex_unlock (&allocator_data->lock);
394 #endif
397 free (data->ptr);
400 ialias (omp_free)
402 void
403 GOMP_free (void *ptr, uintptr_t allocator)
405 return ialias_call (omp_free) (ptr, (omp_allocator_handle_t) allocator);
408 void *
409 omp_aligned_calloc (size_t alignment, size_t nmemb, size_t size,
410 omp_allocator_handle_t allocator)
412 struct omp_allocator_data *allocator_data;
413 size_t new_size, size_temp, new_alignment;
414 void *ptr, *ret;
416 if (__builtin_expect (size == 0 || nmemb == 0, 0))
417 return NULL;
419 retry:
420 new_alignment = alignment;
421 if (allocator == omp_null_allocator)
423 struct gomp_thread *thr = gomp_thread ();
424 if (thr->ts.def_allocator == omp_null_allocator)
425 thr->ts.def_allocator = gomp_def_allocator;
426 allocator = (omp_allocator_handle_t) thr->ts.def_allocator;
429 if (allocator > omp_max_predefined_alloc)
431 allocator_data = (struct omp_allocator_data *) allocator;
432 if (new_alignment < allocator_data->alignment)
433 new_alignment = allocator_data->alignment;
435 else
437 allocator_data = NULL;
438 if (new_alignment < sizeof (void *))
439 new_alignment = sizeof (void *);
442 new_size = sizeof (struct omp_mem_header);
443 if (new_alignment > sizeof (void *))
444 new_size += new_alignment - sizeof (void *);
445 if (__builtin_mul_overflow (size, nmemb, &size_temp))
446 goto fail;
447 if (__builtin_add_overflow (size_temp, new_size, &new_size))
448 goto fail;
450 if (__builtin_expect (allocator_data
451 && allocator_data->pool_size < ~(uintptr_t) 0, 0))
453 uintptr_t used_pool_size;
454 if (new_size > allocator_data->pool_size)
455 goto fail;
456 #ifdef HAVE_SYNC_BUILTINS
457 used_pool_size = __atomic_load_n (&allocator_data->used_pool_size,
458 MEMMODEL_RELAXED);
461 uintptr_t new_pool_size;
462 if (__builtin_add_overflow (used_pool_size, new_size,
463 &new_pool_size)
464 || new_pool_size > allocator_data->pool_size)
465 goto fail;
466 if (__atomic_compare_exchange_n (&allocator_data->used_pool_size,
467 &used_pool_size, new_pool_size,
468 true, MEMMODEL_RELAXED,
469 MEMMODEL_RELAXED))
470 break;
472 while (1);
473 #else
474 gomp_mutex_lock (&allocator_data->lock);
475 if (__builtin_add_overflow (allocator_data->used_pool_size, new_size,
476 &used_pool_size)
477 || used_pool_size > allocator_data->pool_size)
479 gomp_mutex_unlock (&allocator_data->lock);
480 goto fail;
482 allocator_data->used_pool_size = used_pool_size;
483 gomp_mutex_unlock (&allocator_data->lock);
484 #endif
485 ptr = calloc (1, new_size);
486 if (ptr == NULL)
488 #ifdef HAVE_SYNC_BUILTINS
489 __atomic_add_fetch (&allocator_data->used_pool_size, -new_size,
490 MEMMODEL_RELAXED);
491 #else
492 gomp_mutex_lock (&allocator_data->lock);
493 allocator_data->used_pool_size -= new_size;
494 gomp_mutex_unlock (&allocator_data->lock);
495 #endif
496 goto fail;
499 else
501 ptr = calloc (1, new_size);
502 if (ptr == NULL)
503 goto fail;
506 if (new_alignment > sizeof (void *))
507 ret = (void *) (((uintptr_t) ptr
508 + sizeof (struct omp_mem_header)
509 + new_alignment - sizeof (void *))
510 & ~(new_alignment - 1));
511 else
512 ret = (char *) ptr + sizeof (struct omp_mem_header);
513 ((struct omp_mem_header *) ret)[-1].ptr = ptr;
514 ((struct omp_mem_header *) ret)[-1].size = new_size;
515 ((struct omp_mem_header *) ret)[-1].allocator = allocator;
516 return ret;
518 fail:
519 if (allocator_data)
521 switch (allocator_data->fallback)
523 case omp_atv_default_mem_fb:
524 if ((new_alignment > sizeof (void *) && new_alignment > alignment)
525 || (allocator_data
526 && allocator_data->pool_size < ~(uintptr_t) 0))
528 allocator = omp_default_mem_alloc;
529 goto retry;
531 /* Otherwise, we've already performed default mem allocation
532 and if that failed, it won't succeed again (unless it was
533 intermittent. Return NULL then, as that is the fallback. */
534 break;
535 case omp_atv_null_fb:
536 break;
537 default:
538 case omp_atv_abort_fb:
539 gomp_fatal ("Out of memory allocating %lu bytes",
540 (unsigned long) (size * nmemb));
541 case omp_atv_allocator_fb:
542 allocator = allocator_data->fb_data;
543 goto retry;
546 return NULL;
549 ialias (omp_aligned_calloc)
551 void *
552 omp_calloc (size_t nmemb, size_t size, omp_allocator_handle_t allocator)
554 return ialias_call (omp_aligned_calloc) (1, nmemb, size, allocator);
557 void *
558 omp_realloc (void *ptr, size_t size, omp_allocator_handle_t allocator,
559 omp_allocator_handle_t free_allocator)
561 struct omp_allocator_data *allocator_data, *free_allocator_data;
562 size_t new_size, old_size, new_alignment, old_alignment;
563 void *new_ptr, *ret;
564 struct omp_mem_header *data;
566 if (__builtin_expect (ptr == NULL, 0))
567 return ialias_call (omp_aligned_alloc) (1, size, allocator);
569 if (__builtin_expect (size == 0, 0))
571 ialias_call (omp_free) (ptr, free_allocator);
572 return NULL;
575 data = &((struct omp_mem_header *) ptr)[-1];
576 free_allocator = data->allocator;
578 retry:
579 new_alignment = sizeof (void *);
580 if (allocator == omp_null_allocator)
581 allocator = free_allocator;
583 if (allocator > omp_max_predefined_alloc)
585 allocator_data = (struct omp_allocator_data *) allocator;
586 if (new_alignment < allocator_data->alignment)
587 new_alignment = allocator_data->alignment;
589 else
590 allocator_data = NULL;
591 if (free_allocator > omp_max_predefined_alloc)
592 free_allocator_data = (struct omp_allocator_data *) free_allocator;
593 else
594 free_allocator_data = NULL;
595 old_alignment = (uintptr_t) ptr - (uintptr_t) (data->ptr);
597 new_size = sizeof (struct omp_mem_header);
598 if (new_alignment > sizeof (void *))
599 new_size += new_alignment - sizeof (void *);
600 if (__builtin_add_overflow (size, new_size, &new_size))
601 goto fail;
602 old_size = data->size;
604 if (__builtin_expect (allocator_data
605 && allocator_data->pool_size < ~(uintptr_t) 0, 0))
607 uintptr_t used_pool_size;
608 size_t prev_size = 0;
609 /* Check if we can use realloc. Don't use it if extra alignment
610 was used previously or newly, because realloc might return a pointer
611 with different alignment and then we'd need to memmove the data
612 again. */
613 if (free_allocator_data
614 && free_allocator_data == allocator_data
615 && new_alignment == sizeof (void *)
616 && old_alignment == sizeof (struct omp_mem_header))
617 prev_size = old_size;
618 if (new_size > prev_size
619 && new_size - prev_size > allocator_data->pool_size)
620 goto fail;
621 #ifdef HAVE_SYNC_BUILTINS
622 used_pool_size = __atomic_load_n (&allocator_data->used_pool_size,
623 MEMMODEL_RELAXED);
626 uintptr_t new_pool_size;
627 if (new_size > prev_size)
629 if (__builtin_add_overflow (used_pool_size, new_size - prev_size,
630 &new_pool_size)
631 || new_pool_size > allocator_data->pool_size)
632 goto fail;
634 else
635 new_pool_size = used_pool_size + new_size - prev_size;
636 if (__atomic_compare_exchange_n (&allocator_data->used_pool_size,
637 &used_pool_size, new_pool_size,
638 true, MEMMODEL_RELAXED,
639 MEMMODEL_RELAXED))
640 break;
642 while (1);
643 #else
644 gomp_mutex_lock (&allocator_data->lock);
645 if (new_size > prev_size)
647 if (__builtin_add_overflow (allocator_data->used_pool_size,
648 new_size - prev_size,
649 &used_pool_size)
650 || used_pool_size > allocator_data->pool_size)
652 gomp_mutex_unlock (&allocator_data->lock);
653 goto fail;
656 else
657 used_pool_size = (allocator_data->used_pool_size
658 + new_size - prev_size);
659 allocator_data->used_pool_size = used_pool_size;
660 gomp_mutex_unlock (&allocator_data->lock);
661 #endif
662 if (prev_size)
663 new_ptr = realloc (data->ptr, new_size);
664 else
665 new_ptr = malloc (new_size);
666 if (new_ptr == NULL)
668 #ifdef HAVE_SYNC_BUILTINS
669 __atomic_add_fetch (&allocator_data->used_pool_size,
670 prev_size - new_size,
671 MEMMODEL_RELAXED);
672 #else
673 gomp_mutex_lock (&allocator_data->lock);
674 allocator_data->used_pool_size -= new_size - prev_size;
675 gomp_mutex_unlock (&allocator_data->lock);
676 #endif
677 goto fail;
679 else if (prev_size)
681 ret = (char *) new_ptr + sizeof (struct omp_mem_header);
682 ((struct omp_mem_header *) ret)[-1].ptr = new_ptr;
683 ((struct omp_mem_header *) ret)[-1].size = new_size;
684 ((struct omp_mem_header *) ret)[-1].allocator = allocator;
685 return ret;
688 else if (new_alignment == sizeof (void *)
689 && old_alignment == sizeof (struct omp_mem_header)
690 && (free_allocator_data == NULL
691 || free_allocator_data->pool_size == ~(uintptr_t) 0))
693 new_ptr = realloc (data->ptr, new_size);
694 if (new_ptr == NULL)
695 goto fail;
696 ret = (char *) new_ptr + sizeof (struct omp_mem_header);
697 ((struct omp_mem_header *) ret)[-1].ptr = new_ptr;
698 ((struct omp_mem_header *) ret)[-1].size = new_size;
699 ((struct omp_mem_header *) ret)[-1].allocator = allocator;
700 return ret;
702 else
704 new_ptr = malloc (new_size);
705 if (new_ptr == NULL)
706 goto fail;
709 if (new_alignment > sizeof (void *))
710 ret = (void *) (((uintptr_t) new_ptr
711 + sizeof (struct omp_mem_header)
712 + new_alignment - sizeof (void *))
713 & ~(new_alignment - 1));
714 else
715 ret = (char *) new_ptr + sizeof (struct omp_mem_header);
716 ((struct omp_mem_header *) ret)[-1].ptr = new_ptr;
717 ((struct omp_mem_header *) ret)[-1].size = new_size;
718 ((struct omp_mem_header *) ret)[-1].allocator = allocator;
719 if (old_size - old_alignment < size)
720 size = old_size - old_alignment;
721 memcpy (ret, ptr, size);
722 if (__builtin_expect (free_allocator_data
723 && free_allocator_data->pool_size < ~(uintptr_t) 0, 0))
725 #ifdef HAVE_SYNC_BUILTINS
726 __atomic_add_fetch (&free_allocator_data->used_pool_size, -data->size,
727 MEMMODEL_RELAXED);
728 #else
729 gomp_mutex_lock (&free_allocator_data->lock);
730 free_allocator_data->used_pool_size -= data->size;
731 gomp_mutex_unlock (&free_allocator_data->lock);
732 #endif
734 free (data->ptr);
735 return ret;
737 fail:
738 if (allocator_data)
740 switch (allocator_data->fallback)
742 case omp_atv_default_mem_fb:
743 if (new_alignment > sizeof (void *)
744 || (allocator_data
745 && allocator_data->pool_size < ~(uintptr_t) 0))
747 allocator = omp_default_mem_alloc;
748 goto retry;
750 /* Otherwise, we've already performed default mem allocation
751 and if that failed, it won't succeed again (unless it was
752 intermittent. Return NULL then, as that is the fallback. */
753 break;
754 case omp_atv_null_fb:
755 break;
756 default:
757 case omp_atv_abort_fb:
758 gomp_fatal ("Out of memory allocating %lu bytes",
759 (unsigned long) size);
760 case omp_atv_allocator_fb:
761 allocator = allocator_data->fb_data;
762 goto retry;
765 return NULL;