1 /* OpenACC Runtime initialization routines
3 Copyright (C) 2013-2018 Free Software Foundation, Inc.
5 Contributed by Mentor Embedded.
7 This file is part of the GNU Offloading and Multi Processing Library
10 Libgomp is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3, or (at your option)
15 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
17 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 Under Section 7 of GPL version 3, you are granted additional
21 permissions described in the GCC Runtime Library Exception, version
22 3.1, as published by the Free Software Foundation.
24 You should have received a copy of the GNU General Public License and
25 a copy of the GCC Runtime Library Exception along with this program;
26 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
27 <http://www.gnu.org/licenses/>. */
32 #include "gomp-constants.h"
38 /* Return block containing [H->S), or NULL if not contained. The device lock
39 for DEV must be locked on entry, and remains locked on exit. */
42 lookup_host (struct gomp_device_descr
*dev
, void *h
, size_t s
)
44 struct splay_tree_key_s node
;
47 node
.host_start
= (uintptr_t) h
;
48 node
.host_end
= (uintptr_t) h
+ s
;
50 key
= splay_tree_lookup (&dev
->mem_map
, &node
);
55 /* Return block containing [D->S), or NULL if not contained.
56 The list isn't ordered by device address, so we have to iterate
57 over the whole array. This is not expected to be a common
58 operation. The device lock associated with TGT must be locked on entry, and
59 remains locked on exit. */
62 lookup_dev (struct target_mem_desc
*tgt
, void *d
, size_t s
)
65 struct target_mem_desc
*t
;
70 for (t
= tgt
; t
!= NULL
; t
= t
->prev
)
72 if (t
->tgt_start
<= (uintptr_t) d
&& t
->tgt_end
>= (uintptr_t) d
+ s
)
79 for (i
= 0; i
< t
->list_count
; i
++)
83 splay_tree_key k
= &t
->array
[i
].key
;
84 offset
= d
- t
->tgt_start
+ k
->tgt_offset
;
86 if (k
->host_start
+ offset
<= (void *) k
->host_end
)
93 /* OpenACC is silent on how memory exhaustion is indicated. We return
102 goacc_lazy_initialize ();
104 struct goacc_thread
*thr
= goacc_thread ();
108 if (thr
->dev
->capabilities
& GOMP_OFFLOAD_CAP_SHARED_MEM
)
111 return thr
->dev
->alloc_func (thr
->dev
->target_id
, s
);
114 /* OpenACC 2.0a (3.2.16) doesn't specify what to do in the event
115 the device address is mapped. We choose to check if it mapped,
116 and if it is, to unmap it. */
125 struct goacc_thread
*thr
= goacc_thread ();
127 assert (thr
&& thr
->dev
);
129 struct gomp_device_descr
*acc_dev
= thr
->dev
;
131 if (acc_dev
->capabilities
& GOMP_OFFLOAD_CAP_SHARED_MEM
)
134 gomp_mutex_lock (&acc_dev
->lock
);
136 /* We don't have to call lazy open here, as the ptr value must have
137 been returned by acc_malloc. It's not permitted to pass NULL in
138 (unless you got that null from acc_malloc). */
139 if ((k
= lookup_dev (acc_dev
->openacc
.data_environ
, d
, 1)))
143 offset
= d
- k
->tgt
->tgt_start
+ k
->tgt_offset
;
145 gomp_mutex_unlock (&acc_dev
->lock
);
147 acc_unmap_data ((void *)(k
->host_start
+ offset
));
150 gomp_mutex_unlock (&acc_dev
->lock
);
152 if (!acc_dev
->free_func (acc_dev
->target_id
, d
))
153 gomp_fatal ("error in freeing device memory in %s", __FUNCTION__
);
157 memcpy_tofrom_device (bool from
, void *d
, void *h
, size_t s
, int async
,
158 const char *libfnname
)
160 /* No need to call lazy open here, as the device pointer must have
161 been obtained from a routine that did that. */
162 struct goacc_thread
*thr
= goacc_thread ();
164 assert (thr
&& thr
->dev
);
166 if (thr
->dev
->capabilities
& GOMP_OFFLOAD_CAP_SHARED_MEM
)
175 if (async
> acc_async_sync
)
176 thr
->dev
->openacc
.async_set_async_func (async
);
179 ? thr
->dev
->dev2host_func (thr
->dev
->target_id
, h
, d
, s
)
180 : thr
->dev
->host2dev_func (thr
->dev
->target_id
, d
, h
, s
));
182 if (async
> acc_async_sync
)
183 thr
->dev
->openacc
.async_set_async_func (acc_async_sync
);
186 gomp_fatal ("error in %s", libfnname
);
190 acc_memcpy_to_device (void *d
, void *h
, size_t s
)
192 memcpy_tofrom_device (false, d
, h
, s
, acc_async_sync
, __FUNCTION__
);
196 acc_memcpy_to_device_async (void *d
, void *h
, size_t s
, int async
)
198 memcpy_tofrom_device (false, d
, h
, s
, async
, __FUNCTION__
);
202 acc_memcpy_from_device (void *h
, void *d
, size_t s
)
204 memcpy_tofrom_device (true, d
, h
, s
, acc_async_sync
, __FUNCTION__
);
208 acc_memcpy_from_device_async (void *h
, void *d
, size_t s
, int async
)
210 memcpy_tofrom_device (true, d
, h
, s
, async
, __FUNCTION__
);
213 /* Return the device pointer that corresponds to host data H. Or NULL
217 acc_deviceptr (void *h
)
223 goacc_lazy_initialize ();
225 struct goacc_thread
*thr
= goacc_thread ();
226 struct gomp_device_descr
*dev
= thr
->dev
;
228 if (thr
->dev
->capabilities
& GOMP_OFFLOAD_CAP_SHARED_MEM
)
231 gomp_mutex_lock (&dev
->lock
);
233 n
= lookup_host (dev
, h
, 1);
237 gomp_mutex_unlock (&dev
->lock
);
241 offset
= h
- n
->host_start
;
243 d
= n
->tgt
->tgt_start
+ n
->tgt_offset
+ offset
;
245 gomp_mutex_unlock (&dev
->lock
);
250 /* Return the host pointer that corresponds to device data D. Or NULL
254 acc_hostptr (void *d
)
260 goacc_lazy_initialize ();
262 struct goacc_thread
*thr
= goacc_thread ();
263 struct gomp_device_descr
*acc_dev
= thr
->dev
;
265 if (thr
->dev
->capabilities
& GOMP_OFFLOAD_CAP_SHARED_MEM
)
268 gomp_mutex_lock (&acc_dev
->lock
);
270 n
= lookup_dev (acc_dev
->openacc
.data_environ
, d
, 1);
274 gomp_mutex_unlock (&acc_dev
->lock
);
278 offset
= d
- n
->tgt
->tgt_start
+ n
->tgt_offset
;
280 h
= n
->host_start
+ offset
;
282 gomp_mutex_unlock (&acc_dev
->lock
);
287 /* Return 1 if host data [H,+S] is present on the device. */
290 acc_is_present (void *h
, size_t s
)
297 goacc_lazy_initialize ();
299 struct goacc_thread
*thr
= goacc_thread ();
300 struct gomp_device_descr
*acc_dev
= thr
->dev
;
302 if (thr
->dev
->capabilities
& GOMP_OFFLOAD_CAP_SHARED_MEM
)
305 gomp_mutex_lock (&acc_dev
->lock
);
307 n
= lookup_host (acc_dev
, h
, s
);
309 if (n
&& ((uintptr_t)h
< n
->host_start
310 || (uintptr_t)h
+ s
> n
->host_end
311 || s
> n
->host_end
- n
->host_start
))
314 gomp_mutex_unlock (&acc_dev
->lock
);
319 /* Create a mapping for host [H,+S] -> device [D,+S] */
322 acc_map_data (void *h
, void *d
, size_t s
)
324 struct target_mem_desc
*tgt
= NULL
;
329 unsigned short kinds
= GOMP_MAP_ALLOC
;
331 goacc_lazy_initialize ();
333 struct goacc_thread
*thr
= goacc_thread ();
334 struct gomp_device_descr
*acc_dev
= thr
->dev
;
336 if (acc_dev
->capabilities
& GOMP_OFFLOAD_CAP_SHARED_MEM
)
339 gomp_fatal ("cannot map data on shared-memory system");
343 struct goacc_thread
*thr
= goacc_thread ();
346 gomp_fatal ("[%p,+%d]->[%p,+%d] is a bad map",
347 (void *)h
, (int)s
, (void *)d
, (int)s
);
349 gomp_mutex_lock (&acc_dev
->lock
);
351 if (lookup_host (acc_dev
, h
, s
))
353 gomp_mutex_unlock (&acc_dev
->lock
);
354 gomp_fatal ("host address [%p, +%d] is already mapped", (void *)h
,
358 if (lookup_dev (thr
->dev
->openacc
.data_environ
, d
, s
))
360 gomp_mutex_unlock (&acc_dev
->lock
);
361 gomp_fatal ("device address [%p, +%d] is already mapped", (void *)d
,
365 gomp_mutex_unlock (&acc_dev
->lock
);
367 tgt
= gomp_map_vars (acc_dev
, mapnum
, &hostaddrs
, &devaddrs
, &sizes
,
368 &kinds
, true, GOMP_MAP_VARS_OPENACC
);
369 tgt
->list
[0].key
->refcount
= REFCOUNT_INFINITY
;
372 gomp_mutex_lock (&acc_dev
->lock
);
373 tgt
->prev
= acc_dev
->openacc
.data_environ
;
374 acc_dev
->openacc
.data_environ
= tgt
;
375 gomp_mutex_unlock (&acc_dev
->lock
);
379 acc_unmap_data (void *h
)
381 struct goacc_thread
*thr
= goacc_thread ();
382 struct gomp_device_descr
*acc_dev
= thr
->dev
;
384 /* No need to call lazy open, as the address must have been mapped. */
386 /* This is a no-op on shared-memory targets. */
387 if (acc_dev
->capabilities
& GOMP_OFFLOAD_CAP_SHARED_MEM
)
392 gomp_mutex_lock (&acc_dev
->lock
);
394 splay_tree_key n
= lookup_host (acc_dev
, h
, 1);
395 struct target_mem_desc
*t
;
399 gomp_mutex_unlock (&acc_dev
->lock
);
400 gomp_fatal ("%p is not a mapped block", (void *)h
);
403 host_size
= n
->host_end
- n
->host_start
;
405 if (n
->host_start
!= (uintptr_t) h
)
407 gomp_mutex_unlock (&acc_dev
->lock
);
408 gomp_fatal ("[%p,%d] surrounds %p",
409 (void *) n
->host_start
, (int) host_size
, (void *) h
);
412 /* Mark for removal. */
417 if (t
->refcount
== 2)
419 struct target_mem_desc
*tp
;
421 /* This is the last reference, so pull the descriptor off the
422 chain. This avoids gomp_unmap_vars via gomp_unmap_tgt from
423 freeing the device memory. */
427 for (tp
= NULL
, t
= acc_dev
->openacc
.data_environ
; t
!= NULL
;
434 acc_dev
->openacc
.data_environ
= t
->prev
;
440 gomp_mutex_unlock (&acc_dev
->lock
);
442 gomp_unmap_vars (t
, true);
445 #define FLAG_PRESENT (1 << 0)
446 #define FLAG_CREATE (1 << 1)
447 #define FLAG_COPY (1 << 2)
450 present_create_copy (unsigned f
, void *h
, size_t s
, int async
)
456 gomp_fatal ("[%p,+%d] is a bad range", (void *)h
, (int)s
);
458 goacc_lazy_initialize ();
460 struct goacc_thread
*thr
= goacc_thread ();
461 struct gomp_device_descr
*acc_dev
= thr
->dev
;
463 if (acc_dev
->capabilities
& GOMP_OFFLOAD_CAP_SHARED_MEM
)
466 gomp_mutex_lock (&acc_dev
->lock
);
468 n
= lookup_host (acc_dev
, h
, s
);
472 d
= (void *) (n
->tgt
->tgt_start
+ n
->tgt_offset
);
474 if (!(f
& FLAG_PRESENT
))
476 gomp_mutex_unlock (&acc_dev
->lock
);
477 gomp_fatal ("[%p,+%d] already mapped to [%p,+%d]",
478 (void *)h
, (int)s
, (void *)d
, (int)s
);
480 if ((h
+ s
) > (void *)n
->host_end
)
482 gomp_mutex_unlock (&acc_dev
->lock
);
483 gomp_fatal ("[%p,+%d] not mapped", (void *)h
, (int)s
);
486 if (n
->refcount
!= REFCOUNT_INFINITY
)
489 n
->dynamic_refcount
++;
491 gomp_mutex_unlock (&acc_dev
->lock
);
493 else if (!(f
& FLAG_CREATE
))
495 gomp_mutex_unlock (&acc_dev
->lock
);
496 gomp_fatal ("[%p,+%d] not mapped", (void *)h
, (int)s
);
500 struct target_mem_desc
*tgt
;
502 unsigned short kinds
;
508 kinds
= GOMP_MAP_ALLOC
;
510 gomp_mutex_unlock (&acc_dev
->lock
);
512 if (async
> acc_async_sync
)
513 acc_dev
->openacc
.async_set_async_func (async
);
515 tgt
= gomp_map_vars (acc_dev
, mapnum
, &hostaddrs
, NULL
, &s
, &kinds
, true,
516 GOMP_MAP_VARS_OPENACC
);
517 /* Initialize dynamic refcount. */
518 tgt
->list
[0].key
->dynamic_refcount
= 1;
520 if (async
> acc_async_sync
)
521 acc_dev
->openacc
.async_set_async_func (acc_async_sync
);
523 gomp_mutex_lock (&acc_dev
->lock
);
526 tgt
->prev
= acc_dev
->openacc
.data_environ
;
527 acc_dev
->openacc
.data_environ
= tgt
;
529 gomp_mutex_unlock (&acc_dev
->lock
);
536 acc_create (void *h
, size_t s
)
538 return present_create_copy (FLAG_PRESENT
| FLAG_CREATE
, h
, s
, acc_async_sync
);
542 acc_create_async (void *h
, size_t s
, int async
)
544 present_create_copy (FLAG_PRESENT
| FLAG_CREATE
, h
, s
, async
);
548 acc_copyin (void *h
, size_t s
)
550 return present_create_copy (FLAG_PRESENT
| FLAG_CREATE
| FLAG_COPY
, h
, s
,
555 acc_copyin_async (void *h
, size_t s
, int async
)
557 present_create_copy (FLAG_PRESENT
| FLAG_CREATE
| FLAG_COPY
, h
, s
, async
);
561 acc_present_or_create (void *h
, size_t s
)
563 return present_create_copy (FLAG_PRESENT
| FLAG_CREATE
, h
, s
, acc_async_sync
);
566 /* acc_pcreate is acc_present_or_create by a different name. */
567 #ifdef HAVE_ATTRIBUTE_ALIAS
568 strong_alias (acc_present_or_create
, acc_pcreate
)
571 acc_pcreate (void *h
, size_t s
)
573 return acc_present_or_create (h
, s
);
578 acc_present_or_copyin (void *h
, size_t s
)
580 return present_create_copy (FLAG_PRESENT
| FLAG_CREATE
| FLAG_COPY
, h
, s
,
584 /* acc_pcopyin is acc_present_or_copyin by a different name. */
585 #ifdef HAVE_ATTRIBUTE_ALIAS
586 strong_alias (acc_present_or_copyin
, acc_pcopyin
)
589 acc_pcopyin (void *h
, size_t s
)
591 return acc_present_or_copyin (h
, s
);
595 #define FLAG_COPYOUT (1 << 0)
596 #define FLAG_FINALIZE (1 << 1)
599 delete_copyout (unsigned f
, void *h
, size_t s
, int async
, const char *libfnname
)
604 struct goacc_thread
*thr
= goacc_thread ();
605 struct gomp_device_descr
*acc_dev
= thr
->dev
;
607 if (acc_dev
->capabilities
& GOMP_OFFLOAD_CAP_SHARED_MEM
)
610 gomp_mutex_lock (&acc_dev
->lock
);
612 n
= lookup_host (acc_dev
, h
, s
);
614 /* No need to call lazy open, as the data must already have been
619 gomp_mutex_unlock (&acc_dev
->lock
);
620 gomp_fatal ("[%p,%d] is not mapped", (void *)h
, (int)s
);
623 d
= (void *) (n
->tgt
->tgt_start
+ n
->tgt_offset
624 + (uintptr_t) h
- n
->host_start
);
626 host_size
= n
->host_end
- n
->host_start
;
628 if (n
->host_start
!= (uintptr_t) h
|| host_size
!= s
)
630 gomp_mutex_unlock (&acc_dev
->lock
);
631 gomp_fatal ("[%p,%d] surrounds2 [%p,+%d]",
632 (void *) n
->host_start
, (int) host_size
, (void *) h
, (int) s
);
635 if (n
->refcount
== REFCOUNT_INFINITY
)
638 n
->dynamic_refcount
= 0;
640 if (n
->refcount
< n
->dynamic_refcount
)
642 gomp_mutex_unlock (&acc_dev
->lock
);
643 gomp_fatal ("Dynamic reference counting assert fail\n");
646 if (f
& FLAG_FINALIZE
)
648 n
->refcount
-= n
->dynamic_refcount
;
649 n
->dynamic_refcount
= 0;
651 else if (n
->dynamic_refcount
)
653 n
->dynamic_refcount
--;
657 if (n
->refcount
== 0)
659 if (n
->tgt
->refcount
== 2)
661 struct target_mem_desc
*tp
, *t
;
662 for (tp
= NULL
, t
= acc_dev
->openacc
.data_environ
; t
!= NULL
;
669 acc_dev
->openacc
.data_environ
= t
->prev
;
674 if (f
& FLAG_COPYOUT
)
676 if (async
> acc_async_sync
)
677 acc_dev
->openacc
.async_set_async_func (async
);
678 acc_dev
->dev2host_func (acc_dev
->target_id
, h
, d
, s
);
679 if (async
> acc_async_sync
)
680 acc_dev
->openacc
.async_set_async_func (acc_async_sync
);
683 gomp_remove_var (acc_dev
, n
);
686 gomp_mutex_unlock (&acc_dev
->lock
);
690 acc_delete (void *h
, size_t s
)
692 delete_copyout (0, h
, s
, acc_async_sync
, __FUNCTION__
);
696 acc_delete_async (void *h
, size_t s
, int async
)
698 delete_copyout (0, h
, s
, async
, __FUNCTION__
);
702 acc_delete_finalize (void *h
, size_t s
)
704 delete_copyout (FLAG_FINALIZE
, h
, s
, acc_async_sync
, __FUNCTION__
);
708 acc_delete_finalize_async (void *h
, size_t s
, int async
)
710 delete_copyout (FLAG_FINALIZE
, h
, s
, async
, __FUNCTION__
);
714 acc_copyout (void *h
, size_t s
)
716 delete_copyout (FLAG_COPYOUT
, h
, s
, acc_async_sync
, __FUNCTION__
);
720 acc_copyout_async (void *h
, size_t s
, int async
)
722 delete_copyout (FLAG_COPYOUT
, h
, s
, async
, __FUNCTION__
);
726 acc_copyout_finalize (void *h
, size_t s
)
728 delete_copyout (FLAG_COPYOUT
| FLAG_FINALIZE
, h
, s
, acc_async_sync
,
733 acc_copyout_finalize_async (void *h
, size_t s
, int async
)
735 delete_copyout (FLAG_COPYOUT
| FLAG_FINALIZE
, h
, s
, async
, __FUNCTION__
);
739 update_dev_host (int is_dev
, void *h
, size_t s
, int async
)
744 goacc_lazy_initialize ();
746 struct goacc_thread
*thr
= goacc_thread ();
747 struct gomp_device_descr
*acc_dev
= thr
->dev
;
749 if (acc_dev
->capabilities
& GOMP_OFFLOAD_CAP_SHARED_MEM
)
752 gomp_mutex_lock (&acc_dev
->lock
);
754 n
= lookup_host (acc_dev
, h
, s
);
758 gomp_mutex_unlock (&acc_dev
->lock
);
759 gomp_fatal ("[%p,%d] is not mapped", h
, (int)s
);
762 d
= (void *) (n
->tgt
->tgt_start
+ n
->tgt_offset
763 + (uintptr_t) h
- n
->host_start
);
765 if (async
> acc_async_sync
)
766 acc_dev
->openacc
.async_set_async_func (async
);
769 acc_dev
->host2dev_func (acc_dev
->target_id
, d
, h
, s
);
771 acc_dev
->dev2host_func (acc_dev
->target_id
, h
, d
, s
);
773 if (async
> acc_async_sync
)
774 acc_dev
->openacc
.async_set_async_func (acc_async_sync
);
776 gomp_mutex_unlock (&acc_dev
->lock
);
780 acc_update_device (void *h
, size_t s
)
782 update_dev_host (1, h
, s
, acc_async_sync
);
786 acc_update_device_async (void *h
, size_t s
, int async
)
788 update_dev_host (1, h
, s
, async
);
792 acc_update_self (void *h
, size_t s
)
794 update_dev_host (0, h
, s
, acc_async_sync
);
798 acc_update_self_async (void *h
, size_t s
, int async
)
800 update_dev_host (0, h
, s
, async
);
804 gomp_acc_insert_pointer (size_t mapnum
, void **hostaddrs
, size_t *sizes
,
807 struct target_mem_desc
*tgt
;
808 struct goacc_thread
*thr
= goacc_thread ();
809 struct gomp_device_descr
*acc_dev
= thr
->dev
;
811 if (acc_is_present (*hostaddrs
, *sizes
))
814 gomp_mutex_lock (&acc_dev
->lock
);
815 n
= lookup_host (acc_dev
, *hostaddrs
, *sizes
);
816 gomp_mutex_unlock (&acc_dev
->lock
);
819 for (size_t i
= 0; i
< tgt
->list_count
; i
++)
820 if (tgt
->list
[i
].key
== n
)
822 for (size_t j
= 0; j
< mapnum
; j
++)
823 if (i
+ j
< tgt
->list_count
&& tgt
->list
[i
+ j
].key
)
825 tgt
->list
[i
+ j
].key
->refcount
++;
826 tgt
->list
[i
+ j
].key
->dynamic_refcount
++;
830 /* Should not reach here. */
831 gomp_fatal ("Dynamic refcount incrementing failed for pointer/pset");
834 gomp_debug (0, " %s: prepare mappings\n", __FUNCTION__
);
835 tgt
= gomp_map_vars (acc_dev
, mapnum
, hostaddrs
,
836 NULL
, sizes
, kinds
, true, GOMP_MAP_VARS_OPENACC
);
837 gomp_debug (0, " %s: mappings prepared\n", __FUNCTION__
);
839 /* Initialize dynamic refcount. */
840 tgt
->list
[0].key
->dynamic_refcount
= 1;
842 gomp_mutex_lock (&acc_dev
->lock
);
843 tgt
->prev
= acc_dev
->openacc
.data_environ
;
844 acc_dev
->openacc
.data_environ
= tgt
;
845 gomp_mutex_unlock (&acc_dev
->lock
);
849 gomp_acc_remove_pointer (void *h
, size_t s
, bool force_copyfrom
, int async
,
850 int finalize
, int mapnum
)
852 struct goacc_thread
*thr
= goacc_thread ();
853 struct gomp_device_descr
*acc_dev
= thr
->dev
;
855 struct target_mem_desc
*t
;
856 int minrefs
= (mapnum
== 1) ? 2 : 3;
858 if (!acc_is_present (h
, s
))
861 gomp_mutex_lock (&acc_dev
->lock
);
863 n
= lookup_host (acc_dev
, h
, 1);
867 gomp_mutex_unlock (&acc_dev
->lock
);
868 gomp_fatal ("%p is not a mapped block", (void *)h
);
871 gomp_debug (0, " %s: restore mappings\n", __FUNCTION__
);
875 if (n
->refcount
< n
->dynamic_refcount
)
877 gomp_mutex_unlock (&acc_dev
->lock
);
878 gomp_fatal ("Dynamic reference counting assert fail\n");
883 n
->refcount
-= n
->dynamic_refcount
;
884 n
->dynamic_refcount
= 0;
886 else if (n
->dynamic_refcount
)
888 n
->dynamic_refcount
--;
892 gomp_mutex_unlock (&acc_dev
->lock
);
894 if (n
->refcount
== 0)
896 if (t
->refcount
== minrefs
)
898 /* This is the last reference, so pull the descriptor off the
899 chain. This prevents gomp_unmap_vars via gomp_unmap_tgt from
900 freeing the device memory. */
901 struct target_mem_desc
*tp
;
902 for (tp
= NULL
, t
= acc_dev
->openacc
.data_environ
; t
!= NULL
;
910 acc_dev
->openacc
.data_environ
= t
->prev
;
916 /* Set refcount to 1 to allow gomp_unmap_vars to unmap it. */
918 t
->refcount
= minrefs
;
919 for (size_t i
= 0; i
< t
->list_count
; i
++)
920 if (t
->list
[i
].key
== n
)
922 t
->list
[i
].copy_from
= force_copyfrom
? 1 : 0;
926 /* If running synchronously, unmap immediately. */
927 if (async
< acc_async_noval
)
928 gomp_unmap_vars (t
, true);
930 t
->device_descr
->openacc
.register_async_cleanup_func (t
, async
);
933 gomp_mutex_unlock (&acc_dev
->lock
);
935 gomp_debug (0, " %s: mappings restored\n", __FUNCTION__
);