Make flag_thread_jumps a gate of pass_jump_after_combine
[official-gcc.git] / libgomp / oacc-mem.c
blob2f271009fb87689f40a5073a17d31fcbb0297313
1 /* OpenACC Runtime initialization routines
3 Copyright (C) 2013-2019 Free Software Foundation, Inc.
5 Contributed by Mentor Embedded.
7 This file is part of the GNU Offloading and Multi Processing Library
8 (libgomp).
10 Libgomp is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3, or (at your option)
13 any later version.
15 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
17 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 more details.
20 Under Section 7 of GPL version 3, you are granted additional
21 permissions described in the GCC Runtime Library Exception, version
22 3.1, as published by the Free Software Foundation.
24 You should have received a copy of the GNU General Public License and
25 a copy of the GCC Runtime Library Exception along with this program;
26 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
27 <http://www.gnu.org/licenses/>. */
29 #include "openacc.h"
30 #include "libgomp.h"
31 #include "gomp-constants.h"
32 #include "oacc-int.h"
33 #include <string.h>
34 #include <assert.h>
36 /* Return block containing [H->S), or NULL if not contained. The device lock
37 for DEV must be locked on entry, and remains locked on exit. */
39 static splay_tree_key
40 lookup_host (struct gomp_device_descr *dev, void *h, size_t s)
42 struct splay_tree_key_s node;
43 splay_tree_key key;
45 node.host_start = (uintptr_t) h;
46 node.host_end = (uintptr_t) h + s;
48 key = splay_tree_lookup (&dev->mem_map, &node);
50 return key;
53 /* Return block containing [D->S), or NULL if not contained.
54 The list isn't ordered by device address, so we have to iterate
55 over the whole array. This is not expected to be a common
56 operation. The device lock associated with TGT must be locked on entry, and
57 remains locked on exit. */
59 static splay_tree_key
60 lookup_dev (struct target_mem_desc *tgt, void *d, size_t s)
62 int i;
63 struct target_mem_desc *t;
65 if (!tgt)
66 return NULL;
68 for (t = tgt; t != NULL; t = t->prev)
70 if (t->tgt_start <= (uintptr_t) d && t->tgt_end >= (uintptr_t) d + s)
71 break;
74 if (!t)
75 return NULL;
77 for (i = 0; i < t->list_count; i++)
79 void * offset;
81 splay_tree_key k = &t->array[i].key;
82 offset = d - t->tgt_start + k->tgt_offset;
84 if (k->host_start + offset <= (void *) k->host_end)
85 return k;
88 return NULL;
91 /* OpenACC is silent on how memory exhaustion is indicated. We return
92 NULL. */
94 void *
95 acc_malloc (size_t s)
97 if (!s)
98 return NULL;
100 goacc_lazy_initialize ();
102 struct goacc_thread *thr = goacc_thread ();
104 assert (thr->dev);
106 if (thr->dev->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
107 return malloc (s);
109 acc_prof_info prof_info;
110 acc_api_info api_info;
111 bool profiling_p = GOACC_PROFILING_SETUP_P (thr, &prof_info, &api_info);
113 void *res = thr->dev->alloc_func (thr->dev->target_id, s);
115 if (profiling_p)
117 thr->prof_info = NULL;
118 thr->api_info = NULL;
121 return res;
124 /* OpenACC 2.0a (3.2.16) doesn't specify what to do in the event
125 the device address is mapped. We choose to check if it mapped,
126 and if it is, to unmap it. */
127 void
128 acc_free (void *d)
130 splay_tree_key k;
132 if (!d)
133 return;
135 struct goacc_thread *thr = goacc_thread ();
137 assert (thr && thr->dev);
139 struct gomp_device_descr *acc_dev = thr->dev;
141 if (acc_dev->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
142 return free (d);
144 acc_prof_info prof_info;
145 acc_api_info api_info;
146 bool profiling_p = GOACC_PROFILING_SETUP_P (thr, &prof_info, &api_info);
148 gomp_mutex_lock (&acc_dev->lock);
150 /* We don't have to call lazy open here, as the ptr value must have
151 been returned by acc_malloc. It's not permitted to pass NULL in
152 (unless you got that null from acc_malloc). */
153 if ((k = lookup_dev (acc_dev->openacc.data_environ, d, 1)))
155 void *offset;
157 offset = d - k->tgt->tgt_start + k->tgt_offset;
159 gomp_mutex_unlock (&acc_dev->lock);
161 acc_unmap_data ((void *)(k->host_start + offset));
163 else
164 gomp_mutex_unlock (&acc_dev->lock);
166 if (!acc_dev->free_func (acc_dev->target_id, d))
167 gomp_fatal ("error in freeing device memory in %s", __FUNCTION__);
169 if (profiling_p)
171 thr->prof_info = NULL;
172 thr->api_info = NULL;
176 static void
177 memcpy_tofrom_device (bool from, void *d, void *h, size_t s, int async,
178 const char *libfnname)
180 /* No need to call lazy open here, as the device pointer must have
181 been obtained from a routine that did that. */
182 struct goacc_thread *thr = goacc_thread ();
184 assert (thr && thr->dev);
186 if (thr->dev->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
188 if (from)
189 memmove (h, d, s);
190 else
191 memmove (d, h, s);
192 return;
195 acc_prof_info prof_info;
196 acc_api_info api_info;
197 bool profiling_p = GOACC_PROFILING_SETUP_P (thr, &prof_info, &api_info);
198 if (profiling_p)
200 prof_info.async = async;
201 prof_info.async_queue = prof_info.async;
204 goacc_aq aq = get_goacc_asyncqueue (async);
205 if (from)
206 gomp_copy_dev2host (thr->dev, aq, h, d, s);
207 else
208 gomp_copy_host2dev (thr->dev, aq, d, h, s, /* TODO: cbuf? */ NULL);
210 if (profiling_p)
212 thr->prof_info = NULL;
213 thr->api_info = NULL;
217 void
218 acc_memcpy_to_device (void *d, void *h, size_t s)
220 memcpy_tofrom_device (false, d, h, s, acc_async_sync, __FUNCTION__);
223 void
224 acc_memcpy_to_device_async (void *d, void *h, size_t s, int async)
226 memcpy_tofrom_device (false, d, h, s, async, __FUNCTION__);
229 void
230 acc_memcpy_from_device (void *h, void *d, size_t s)
232 memcpy_tofrom_device (true, d, h, s, acc_async_sync, __FUNCTION__);
235 void
236 acc_memcpy_from_device_async (void *h, void *d, size_t s, int async)
238 memcpy_tofrom_device (true, d, h, s, async, __FUNCTION__);
241 /* Return the device pointer that corresponds to host data H. Or NULL
242 if no mapping. */
244 void *
245 acc_deviceptr (void *h)
247 splay_tree_key n;
248 void *d;
249 void *offset;
251 goacc_lazy_initialize ();
253 struct goacc_thread *thr = goacc_thread ();
254 struct gomp_device_descr *dev = thr->dev;
256 if (thr->dev->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
257 return h;
259 /* In the following, no OpenACC Profiling Interface events can possibly be
260 generated. */
262 gomp_mutex_lock (&dev->lock);
264 n = lookup_host (dev, h, 1);
266 if (!n)
268 gomp_mutex_unlock (&dev->lock);
269 return NULL;
272 offset = h - n->host_start;
274 d = n->tgt->tgt_start + n->tgt_offset + offset;
276 gomp_mutex_unlock (&dev->lock);
278 return d;
281 /* Return the host pointer that corresponds to device data D. Or NULL
282 if no mapping. */
284 void *
285 acc_hostptr (void *d)
287 splay_tree_key n;
288 void *h;
289 void *offset;
291 goacc_lazy_initialize ();
293 struct goacc_thread *thr = goacc_thread ();
294 struct gomp_device_descr *acc_dev = thr->dev;
296 if (thr->dev->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
297 return d;
299 /* In the following, no OpenACC Profiling Interface events can possibly be
300 generated. */
302 gomp_mutex_lock (&acc_dev->lock);
304 n = lookup_dev (acc_dev->openacc.data_environ, d, 1);
306 if (!n)
308 gomp_mutex_unlock (&acc_dev->lock);
309 return NULL;
312 offset = d - n->tgt->tgt_start + n->tgt_offset;
314 h = n->host_start + offset;
316 gomp_mutex_unlock (&acc_dev->lock);
318 return h;
321 /* Return 1 if host data [H,+S] is present on the device. */
324 acc_is_present (void *h, size_t s)
326 splay_tree_key n;
328 if (!s || !h)
329 return 0;
331 goacc_lazy_initialize ();
333 struct goacc_thread *thr = goacc_thread ();
334 struct gomp_device_descr *acc_dev = thr->dev;
336 if (thr->dev->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
337 return h != NULL;
339 /* In the following, no OpenACC Profiling Interface events can possibly be
340 generated. */
342 gomp_mutex_lock (&acc_dev->lock);
344 n = lookup_host (acc_dev, h, s);
346 if (n && ((uintptr_t)h < n->host_start
347 || (uintptr_t)h + s > n->host_end
348 || s > n->host_end - n->host_start))
349 n = NULL;
351 gomp_mutex_unlock (&acc_dev->lock);
353 return n != NULL;
356 /* Create a mapping for host [H,+S] -> device [D,+S] */
358 void
359 acc_map_data (void *h, void *d, size_t s)
361 struct target_mem_desc *tgt = NULL;
362 size_t mapnum = 1;
363 void *hostaddrs = h;
364 void *devaddrs = d;
365 size_t sizes = s;
366 unsigned short kinds = GOMP_MAP_ALLOC;
368 goacc_lazy_initialize ();
370 struct goacc_thread *thr = goacc_thread ();
371 struct gomp_device_descr *acc_dev = thr->dev;
373 if (acc_dev->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
375 if (d != h)
376 gomp_fatal ("cannot map data on shared-memory system");
378 else
380 struct goacc_thread *thr = goacc_thread ();
382 if (!d || !h || !s)
383 gomp_fatal ("[%p,+%d]->[%p,+%d] is a bad map",
384 (void *)h, (int)s, (void *)d, (int)s);
386 acc_prof_info prof_info;
387 acc_api_info api_info;
388 bool profiling_p = GOACC_PROFILING_SETUP_P (thr, &prof_info, &api_info);
390 gomp_mutex_lock (&acc_dev->lock);
392 if (lookup_host (acc_dev, h, s))
394 gomp_mutex_unlock (&acc_dev->lock);
395 gomp_fatal ("host address [%p, +%d] is already mapped", (void *)h,
396 (int)s);
399 if (lookup_dev (thr->dev->openacc.data_environ, d, s))
401 gomp_mutex_unlock (&acc_dev->lock);
402 gomp_fatal ("device address [%p, +%d] is already mapped", (void *)d,
403 (int)s);
406 gomp_mutex_unlock (&acc_dev->lock);
408 tgt = gomp_map_vars (acc_dev, mapnum, &hostaddrs, &devaddrs, &sizes,
409 &kinds, true, GOMP_MAP_VARS_OPENACC);
410 tgt->list[0].key->refcount = REFCOUNT_INFINITY;
412 if (profiling_p)
414 thr->prof_info = NULL;
415 thr->api_info = NULL;
419 gomp_mutex_lock (&acc_dev->lock);
420 tgt->prev = acc_dev->openacc.data_environ;
421 acc_dev->openacc.data_environ = tgt;
422 gomp_mutex_unlock (&acc_dev->lock);
425 void
426 acc_unmap_data (void *h)
428 struct goacc_thread *thr = goacc_thread ();
429 struct gomp_device_descr *acc_dev = thr->dev;
431 /* No need to call lazy open, as the address must have been mapped. */
433 /* This is a no-op on shared-memory targets. */
434 if (acc_dev->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
435 return;
437 acc_prof_info prof_info;
438 acc_api_info api_info;
439 bool profiling_p = GOACC_PROFILING_SETUP_P (thr, &prof_info, &api_info);
441 size_t host_size;
443 gomp_mutex_lock (&acc_dev->lock);
445 splay_tree_key n = lookup_host (acc_dev, h, 1);
446 struct target_mem_desc *t;
448 if (!n)
450 gomp_mutex_unlock (&acc_dev->lock);
451 gomp_fatal ("%p is not a mapped block", (void *)h);
454 host_size = n->host_end - n->host_start;
456 if (n->host_start != (uintptr_t) h)
458 gomp_mutex_unlock (&acc_dev->lock);
459 gomp_fatal ("[%p,%d] surrounds %p",
460 (void *) n->host_start, (int) host_size, (void *) h);
463 /* Mark for removal. */
464 n->refcount = 1;
466 t = n->tgt;
468 if (t->refcount == 2)
470 struct target_mem_desc *tp;
472 /* This is the last reference, so pull the descriptor off the
473 chain. This avoids gomp_unmap_vars via gomp_unmap_tgt from
474 freeing the device memory. */
475 t->tgt_end = 0;
476 t->to_free = 0;
478 for (tp = NULL, t = acc_dev->openacc.data_environ; t != NULL;
479 tp = t, t = t->prev)
480 if (n->tgt == t)
482 if (tp)
483 tp->prev = t->prev;
484 else
485 acc_dev->openacc.data_environ = t->prev;
487 break;
491 gomp_mutex_unlock (&acc_dev->lock);
493 gomp_unmap_vars (t, true);
495 if (profiling_p)
497 thr->prof_info = NULL;
498 thr->api_info = NULL;
502 #define FLAG_PRESENT (1 << 0)
503 #define FLAG_CREATE (1 << 1)
504 #define FLAG_COPY (1 << 2)
506 static void *
507 present_create_copy (unsigned f, void *h, size_t s, int async)
509 void *d;
510 splay_tree_key n;
512 if (!h || !s)
513 gomp_fatal ("[%p,+%d] is a bad range", (void *)h, (int)s);
515 goacc_lazy_initialize ();
517 struct goacc_thread *thr = goacc_thread ();
518 struct gomp_device_descr *acc_dev = thr->dev;
520 if (acc_dev->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
521 return h;
523 acc_prof_info prof_info;
524 acc_api_info api_info;
525 bool profiling_p = GOACC_PROFILING_SETUP_P (thr, &prof_info, &api_info);
526 if (profiling_p)
528 prof_info.async = async;
529 prof_info.async_queue = prof_info.async;
532 gomp_mutex_lock (&acc_dev->lock);
534 n = lookup_host (acc_dev, h, s);
535 if (n)
537 /* Present. */
538 d = (void *) (n->tgt->tgt_start + n->tgt_offset);
540 if (!(f & FLAG_PRESENT))
542 gomp_mutex_unlock (&acc_dev->lock);
543 gomp_fatal ("[%p,+%d] already mapped to [%p,+%d]",
544 (void *)h, (int)s, (void *)d, (int)s);
546 if ((h + s) > (void *)n->host_end)
548 gomp_mutex_unlock (&acc_dev->lock);
549 gomp_fatal ("[%p,+%d] not mapped", (void *)h, (int)s);
552 if (n->refcount != REFCOUNT_INFINITY)
554 n->refcount++;
555 n->dynamic_refcount++;
557 gomp_mutex_unlock (&acc_dev->lock);
559 else if (!(f & FLAG_CREATE))
561 gomp_mutex_unlock (&acc_dev->lock);
562 gomp_fatal ("[%p,+%d] not mapped", (void *)h, (int)s);
564 else
566 struct target_mem_desc *tgt;
567 size_t mapnum = 1;
568 unsigned short kinds;
569 void *hostaddrs = h;
571 if (f & FLAG_COPY)
572 kinds = GOMP_MAP_TO;
573 else
574 kinds = GOMP_MAP_ALLOC;
576 gomp_mutex_unlock (&acc_dev->lock);
578 goacc_aq aq = get_goacc_asyncqueue (async);
580 tgt = gomp_map_vars_async (acc_dev, aq, mapnum, &hostaddrs, NULL, &s,
581 &kinds, true, GOMP_MAP_VARS_OPENACC);
582 /* Initialize dynamic refcount. */
583 tgt->list[0].key->dynamic_refcount = 1;
585 gomp_mutex_lock (&acc_dev->lock);
587 d = tgt->to_free;
588 tgt->prev = acc_dev->openacc.data_environ;
589 acc_dev->openacc.data_environ = tgt;
591 gomp_mutex_unlock (&acc_dev->lock);
594 if (profiling_p)
596 thr->prof_info = NULL;
597 thr->api_info = NULL;
600 return d;
603 void *
604 acc_create (void *h, size_t s)
606 return present_create_copy (FLAG_PRESENT | FLAG_CREATE, h, s, acc_async_sync);
609 void
610 acc_create_async (void *h, size_t s, int async)
612 present_create_copy (FLAG_PRESENT | FLAG_CREATE, h, s, async);
615 /* acc_present_or_create used to be what acc_create is now. */
616 /* acc_pcreate is acc_present_or_create by a different name. */
617 #ifdef HAVE_ATTRIBUTE_ALIAS
618 strong_alias (acc_create, acc_present_or_create)
619 strong_alias (acc_create, acc_pcreate)
620 #else
621 void *
622 acc_present_or_create (void *h, size_t s)
624 return acc_create (h, s);
627 void *
628 acc_pcreate (void *h, size_t s)
630 return acc_create (h, s);
632 #endif
634 void *
635 acc_copyin (void *h, size_t s)
637 return present_create_copy (FLAG_PRESENT | FLAG_CREATE | FLAG_COPY, h, s,
638 acc_async_sync);
641 void
642 acc_copyin_async (void *h, size_t s, int async)
644 present_create_copy (FLAG_PRESENT | FLAG_CREATE | FLAG_COPY, h, s, async);
647 /* acc_present_or_copyin used to be what acc_copyin is now. */
648 /* acc_pcopyin is acc_present_or_copyin by a different name. */
649 #ifdef HAVE_ATTRIBUTE_ALIAS
650 strong_alias (acc_copyin, acc_present_or_copyin)
651 strong_alias (acc_copyin, acc_pcopyin)
652 #else
653 void *
654 acc_present_or_copyin (void *h, size_t s)
656 return acc_copyin (h, s);
659 void *
660 acc_pcopyin (void *h, size_t s)
662 return acc_copyin (h, s);
664 #endif
666 #define FLAG_COPYOUT (1 << 0)
667 #define FLAG_FINALIZE (1 << 1)
669 static void
670 delete_copyout (unsigned f, void *h, size_t s, int async, const char *libfnname)
672 size_t host_size;
673 splay_tree_key n;
674 void *d;
675 struct goacc_thread *thr = goacc_thread ();
676 struct gomp_device_descr *acc_dev = thr->dev;
678 if (acc_dev->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
679 return;
681 acc_prof_info prof_info;
682 acc_api_info api_info;
683 bool profiling_p = GOACC_PROFILING_SETUP_P (thr, &prof_info, &api_info);
684 if (profiling_p)
686 prof_info.async = async;
687 prof_info.async_queue = prof_info.async;
690 gomp_mutex_lock (&acc_dev->lock);
692 n = lookup_host (acc_dev, h, s);
694 /* No need to call lazy open, as the data must already have been
695 mapped. */
697 if (!n)
699 gomp_mutex_unlock (&acc_dev->lock);
700 gomp_fatal ("[%p,%d] is not mapped", (void *)h, (int)s);
703 d = (void *) (n->tgt->tgt_start + n->tgt_offset
704 + (uintptr_t) h - n->host_start);
706 host_size = n->host_end - n->host_start;
708 if (n->host_start != (uintptr_t) h || host_size != s)
710 gomp_mutex_unlock (&acc_dev->lock);
711 gomp_fatal ("[%p,%d] surrounds2 [%p,+%d]",
712 (void *) n->host_start, (int) host_size, (void *) h, (int) s);
715 if (n->refcount == REFCOUNT_INFINITY)
717 n->refcount = 0;
718 n->dynamic_refcount = 0;
720 if (n->refcount < n->dynamic_refcount)
722 gomp_mutex_unlock (&acc_dev->lock);
723 gomp_fatal ("Dynamic reference counting assert fail\n");
726 if (f & FLAG_FINALIZE)
728 n->refcount -= n->dynamic_refcount;
729 n->dynamic_refcount = 0;
731 else if (n->dynamic_refcount)
733 n->dynamic_refcount--;
734 n->refcount--;
737 if (n->refcount == 0)
739 if (n->tgt->refcount == 2)
741 struct target_mem_desc *tp, *t;
742 for (tp = NULL, t = acc_dev->openacc.data_environ; t != NULL;
743 tp = t, t = t->prev)
744 if (n->tgt == t)
746 if (tp)
747 tp->prev = t->prev;
748 else
749 acc_dev->openacc.data_environ = t->prev;
750 break;
754 if (f & FLAG_COPYOUT)
756 goacc_aq aq = get_goacc_asyncqueue (async);
757 gomp_copy_dev2host (acc_dev, aq, h, d, s);
759 gomp_remove_var (acc_dev, n);
762 gomp_mutex_unlock (&acc_dev->lock);
764 if (profiling_p)
766 thr->prof_info = NULL;
767 thr->api_info = NULL;
771 void
772 acc_delete (void *h , size_t s)
774 delete_copyout (0, h, s, acc_async_sync, __FUNCTION__);
777 void
778 acc_delete_async (void *h , size_t s, int async)
780 delete_copyout (0, h, s, async, __FUNCTION__);
783 void
784 acc_delete_finalize (void *h , size_t s)
786 delete_copyout (FLAG_FINALIZE, h, s, acc_async_sync, __FUNCTION__);
789 void
790 acc_delete_finalize_async (void *h , size_t s, int async)
792 delete_copyout (FLAG_FINALIZE, h, s, async, __FUNCTION__);
795 void
796 acc_copyout (void *h, size_t s)
798 delete_copyout (FLAG_COPYOUT, h, s, acc_async_sync, __FUNCTION__);
801 void
802 acc_copyout_async (void *h, size_t s, int async)
804 delete_copyout (FLAG_COPYOUT, h, s, async, __FUNCTION__);
807 void
808 acc_copyout_finalize (void *h, size_t s)
810 delete_copyout (FLAG_COPYOUT | FLAG_FINALIZE, h, s, acc_async_sync,
811 __FUNCTION__);
814 void
815 acc_copyout_finalize_async (void *h, size_t s, int async)
817 delete_copyout (FLAG_COPYOUT | FLAG_FINALIZE, h, s, async, __FUNCTION__);
820 static void
821 update_dev_host (int is_dev, void *h, size_t s, int async)
823 splay_tree_key n;
824 void *d;
826 goacc_lazy_initialize ();
828 struct goacc_thread *thr = goacc_thread ();
829 struct gomp_device_descr *acc_dev = thr->dev;
831 if (acc_dev->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
832 return;
834 acc_prof_info prof_info;
835 acc_api_info api_info;
836 bool profiling_p = GOACC_PROFILING_SETUP_P (thr, &prof_info, &api_info);
837 if (profiling_p)
839 prof_info.async = async;
840 prof_info.async_queue = prof_info.async;
843 gomp_mutex_lock (&acc_dev->lock);
845 n = lookup_host (acc_dev, h, s);
847 if (!n)
849 gomp_mutex_unlock (&acc_dev->lock);
850 gomp_fatal ("[%p,%d] is not mapped", h, (int)s);
853 d = (void *) (n->tgt->tgt_start + n->tgt_offset
854 + (uintptr_t) h - n->host_start);
856 goacc_aq aq = get_goacc_asyncqueue (async);
858 if (is_dev)
859 gomp_copy_host2dev (acc_dev, aq, d, h, s, /* TODO: cbuf? */ NULL);
860 else
861 gomp_copy_dev2host (acc_dev, aq, h, d, s);
863 gomp_mutex_unlock (&acc_dev->lock);
865 if (profiling_p)
867 thr->prof_info = NULL;
868 thr->api_info = NULL;
872 void
873 acc_update_device (void *h, size_t s)
875 update_dev_host (1, h, s, acc_async_sync);
878 void
879 acc_update_device_async (void *h, size_t s, int async)
881 update_dev_host (1, h, s, async);
884 void
885 acc_update_self (void *h, size_t s)
887 update_dev_host (0, h, s, acc_async_sync);
890 void
891 acc_update_self_async (void *h, size_t s, int async)
893 update_dev_host (0, h, s, async);
896 void
897 gomp_acc_insert_pointer (size_t mapnum, void **hostaddrs, size_t *sizes,
898 void *kinds, int async)
900 struct target_mem_desc *tgt;
901 struct goacc_thread *thr = goacc_thread ();
902 struct gomp_device_descr *acc_dev = thr->dev;
904 if (acc_is_present (*hostaddrs, *sizes))
906 splay_tree_key n;
907 gomp_mutex_lock (&acc_dev->lock);
908 n = lookup_host (acc_dev, *hostaddrs, *sizes);
909 gomp_mutex_unlock (&acc_dev->lock);
911 tgt = n->tgt;
912 for (size_t i = 0; i < tgt->list_count; i++)
913 if (tgt->list[i].key == n)
915 for (size_t j = 0; j < mapnum; j++)
916 if (i + j < tgt->list_count && tgt->list[i + j].key)
918 tgt->list[i + j].key->refcount++;
919 tgt->list[i + j].key->dynamic_refcount++;
921 return;
923 /* Should not reach here. */
924 gomp_fatal ("Dynamic refcount incrementing failed for pointer/pset");
927 gomp_debug (0, " %s: prepare mappings\n", __FUNCTION__);
928 goacc_aq aq = get_goacc_asyncqueue (async);
929 tgt = gomp_map_vars_async (acc_dev, aq, mapnum, hostaddrs,
930 NULL, sizes, kinds, true, GOMP_MAP_VARS_OPENACC);
931 gomp_debug (0, " %s: mappings prepared\n", __FUNCTION__);
933 /* Initialize dynamic refcount. */
934 tgt->list[0].key->dynamic_refcount = 1;
936 gomp_mutex_lock (&acc_dev->lock);
937 tgt->prev = acc_dev->openacc.data_environ;
938 acc_dev->openacc.data_environ = tgt;
939 gomp_mutex_unlock (&acc_dev->lock);
942 void
943 gomp_acc_remove_pointer (void *h, size_t s, bool force_copyfrom, int async,
944 int finalize, int mapnum)
946 struct goacc_thread *thr = goacc_thread ();
947 struct gomp_device_descr *acc_dev = thr->dev;
948 splay_tree_key n;
949 struct target_mem_desc *t;
950 int minrefs = (mapnum == 1) ? 2 : 3;
952 if (!acc_is_present (h, s))
953 return;
955 gomp_mutex_lock (&acc_dev->lock);
957 n = lookup_host (acc_dev, h, 1);
959 if (!n)
961 gomp_mutex_unlock (&acc_dev->lock);
962 gomp_fatal ("%p is not a mapped block", (void *)h);
965 gomp_debug (0, " %s: restore mappings\n", __FUNCTION__);
967 t = n->tgt;
969 if (n->refcount < n->dynamic_refcount)
971 gomp_mutex_unlock (&acc_dev->lock);
972 gomp_fatal ("Dynamic reference counting assert fail\n");
975 if (finalize)
977 n->refcount -= n->dynamic_refcount;
978 n->dynamic_refcount = 0;
980 else if (n->dynamic_refcount)
982 n->dynamic_refcount--;
983 n->refcount--;
986 gomp_mutex_unlock (&acc_dev->lock);
988 if (n->refcount == 0)
990 if (t->refcount == minrefs)
992 /* This is the last reference, so pull the descriptor off the
993 chain. This prevents gomp_unmap_vars via gomp_unmap_tgt from
994 freeing the device memory. */
995 struct target_mem_desc *tp;
996 for (tp = NULL, t = acc_dev->openacc.data_environ; t != NULL;
997 tp = t, t = t->prev)
999 if (n->tgt == t)
1001 if (tp)
1002 tp->prev = t->prev;
1003 else
1004 acc_dev->openacc.data_environ = t->prev;
1005 break;
1010 /* Set refcount to 1 to allow gomp_unmap_vars to unmap it. */
1011 n->refcount = 1;
1012 t->refcount = minrefs;
1013 for (size_t i = 0; i < t->list_count; i++)
1014 if (t->list[i].key == n)
1016 t->list[i].copy_from = force_copyfrom ? 1 : 0;
1017 break;
1020 /* If running synchronously, unmap immediately. */
1021 if (async < acc_async_noval)
1022 gomp_unmap_vars (t, true);
1023 else
1025 goacc_aq aq = get_goacc_asyncqueue (async);
1026 gomp_unmap_vars_async (t, true, aq);
1030 gomp_mutex_unlock (&acc_dev->lock);
1032 gomp_debug (0, " %s: mappings restored\n", __FUNCTION__);