1 /* Copyright (C) 2013-2015 Free Software Foundation, Inc.
2 Contributed by Jakub Jelinek <jakub@redhat.com>.
4 This file is part of the GNU Offloading and Multi Processing Library
7 Libgomp is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
26 /* This file contains the support of offloading. */
30 #include "oacc-plugin.h"
32 #include "gomp-constants.h"
36 #ifdef HAVE_INTTYPES_H
37 # include <inttypes.h> /* For PRIu64. */
44 #include "plugin-suffix.h"
47 static void gomp_target_init (void);
49 /* The whole initialization code for offloading plugins is only run one. */
50 static pthread_once_t gomp_is_initialized
= PTHREAD_ONCE_INIT
;
52 /* Mutex for offload image registration. */
53 static gomp_mutex_t register_lock
;
55 /* This structure describes an offload image.
56 It contains type of the target device, pointer to host table descriptor, and
57 pointer to target data. */
58 struct offload_image_descr
{
59 enum offload_target_type type
;
60 const void *host_table
;
61 const void *target_data
;
64 /* Array of descriptors of offload images. */
65 static struct offload_image_descr
*offload_images
;
67 /* Total number of offload images. */
68 static int num_offload_images
;
70 /* Array of descriptors for all available devices. */
71 static struct gomp_device_descr
*devices
;
73 /* Total number of available devices. */
74 static int num_devices
;
76 /* Number of GOMP_OFFLOAD_CAP_OPENMP_400 devices. */
77 static int num_devices_openmp
;
79 /* Similar to gomp_realloc, but release register_lock before gomp_fatal. */
82 gomp_realloc_unlock (void *old
, size_t size
)
84 void *ret
= realloc (old
, size
);
87 gomp_mutex_unlock (®ister_lock
);
88 gomp_fatal ("Out of memory allocating %lu bytes", (unsigned long) size
);
93 /* The comparison function. */
96 splay_compare (splay_tree_key x
, splay_tree_key y
)
98 if (x
->host_start
== x
->host_end
99 && y
->host_start
== y
->host_end
)
101 if (x
->host_end
<= y
->host_start
)
103 if (x
->host_start
>= y
->host_end
)
108 #include "splay-tree.h"
110 attribute_hidden
void
111 gomp_init_targets_once (void)
113 (void) pthread_once (&gomp_is_initialized
, gomp_target_init
);
117 gomp_get_num_devices (void)
119 gomp_init_targets_once ();
120 return num_devices_openmp
;
123 static struct gomp_device_descr
*
124 resolve_device (int device_id
)
126 if (device_id
== GOMP_DEVICE_ICV
)
128 struct gomp_task_icv
*icv
= gomp_icv (false);
129 device_id
= icv
->default_device_var
;
132 if (device_id
< 0 || device_id
>= gomp_get_num_devices ())
135 return &devices
[device_id
];
139 /* Handle the case where splay_tree_lookup found oldn for newn.
140 Helper function of gomp_map_vars. */
143 gomp_map_vars_existing (struct gomp_device_descr
*devicep
, splay_tree_key oldn
,
144 splay_tree_key newn
, unsigned char kind
)
146 if ((kind
& GOMP_MAP_FLAG_FORCE
)
147 || oldn
->host_start
> newn
->host_start
148 || oldn
->host_end
< newn
->host_end
)
150 gomp_mutex_unlock (&devicep
->lock
);
151 gomp_fatal ("Trying to map into device [%p..%p) object when "
152 "[%p..%p) is already mapped",
153 (void *) newn
->host_start
, (void *) newn
->host_end
,
154 (void *) oldn
->host_start
, (void *) oldn
->host_end
);
160 get_kind (bool is_openacc
, void *kinds
, int idx
)
162 return is_openacc
? ((unsigned short *) kinds
)[idx
]
163 : ((unsigned char *) kinds
)[idx
];
167 gomp_map_pointer (struct target_mem_desc
*tgt
, uintptr_t host_ptr
,
168 uintptr_t target_offset
, uintptr_t bias
)
170 struct gomp_device_descr
*devicep
= tgt
->device_descr
;
171 struct splay_tree_s
*mem_map
= &devicep
->mem_map
;
172 struct splay_tree_key_s cur_node
;
174 cur_node
.host_start
= host_ptr
;
175 if (cur_node
.host_start
== (uintptr_t) NULL
)
177 cur_node
.tgt_offset
= (uintptr_t) NULL
;
178 /* FIXME: see comment about coalescing host/dev transfers below. */
179 devicep
->host2dev_func (devicep
->target_id
,
180 (void *) (tgt
->tgt_start
+ target_offset
),
181 (void *) &cur_node
.tgt_offset
,
185 /* Add bias to the pointer value. */
186 cur_node
.host_start
+= bias
;
187 cur_node
.host_end
= cur_node
.host_start
+ 1;
188 splay_tree_key n
= splay_tree_lookup (mem_map
, &cur_node
);
191 /* Could be possibly zero size array section. */
193 n
= splay_tree_lookup (mem_map
, &cur_node
);
196 cur_node
.host_start
--;
197 n
= splay_tree_lookup (mem_map
, &cur_node
);
198 cur_node
.host_start
++;
203 gomp_mutex_unlock (&devicep
->lock
);
204 gomp_fatal ("Pointer target of array section wasn't mapped");
206 cur_node
.host_start
-= n
->host_start
;
208 = n
->tgt
->tgt_start
+ n
->tgt_offset
+ cur_node
.host_start
;
209 /* At this point tgt_offset is target address of the
210 array section. Now subtract bias to get what we want
211 to initialize the pointer with. */
212 cur_node
.tgt_offset
-= bias
;
213 /* FIXME: see comment about coalescing host/dev transfers below. */
214 devicep
->host2dev_func (devicep
->target_id
,
215 (void *) (tgt
->tgt_start
+ target_offset
),
216 (void *) &cur_node
.tgt_offset
,
220 attribute_hidden
struct target_mem_desc
*
221 gomp_map_vars (struct gomp_device_descr
*devicep
, size_t mapnum
,
222 void **hostaddrs
, void **devaddrs
, size_t *sizes
, void *kinds
,
223 bool is_openacc
, bool is_target
)
225 size_t i
, tgt_align
, tgt_size
, not_found_cnt
= 0;
226 const int rshift
= is_openacc
? 8 : 3;
227 const int typemask
= is_openacc
? 0xff : 0x7;
228 struct splay_tree_s
*mem_map
= &devicep
->mem_map
;
229 struct splay_tree_key_s cur_node
;
230 struct target_mem_desc
*tgt
231 = gomp_malloc (sizeof (*tgt
) + sizeof (tgt
->list
[0]) * mapnum
);
232 tgt
->list_count
= mapnum
;
234 tgt
->device_descr
= devicep
;
239 tgt_align
= sizeof (void *);
243 size_t align
= 4 * sizeof (void *);
245 tgt_size
= mapnum
* sizeof (void *);
248 gomp_mutex_lock (&devicep
->lock
);
250 for (i
= 0; i
< mapnum
; i
++)
252 int kind
= get_kind (is_openacc
, kinds
, i
);
253 if (hostaddrs
[i
] == NULL
)
258 cur_node
.host_start
= (uintptr_t) hostaddrs
[i
];
259 if (!GOMP_MAP_POINTER_P (kind
& typemask
))
260 cur_node
.host_end
= cur_node
.host_start
+ sizes
[i
];
262 cur_node
.host_end
= cur_node
.host_start
+ sizeof (void *);
263 splay_tree_key n
= splay_tree_lookup (mem_map
, &cur_node
);
267 gomp_map_vars_existing (devicep
, n
, &cur_node
, kind
& typemask
);
273 size_t align
= (size_t) 1 << (kind
>> rshift
);
275 if (tgt_align
< align
)
277 tgt_size
= (tgt_size
+ align
- 1) & ~(align
- 1);
278 tgt_size
+= cur_node
.host_end
- cur_node
.host_start
;
279 if ((kind
& typemask
) == GOMP_MAP_TO_PSET
)
282 for (j
= i
+ 1; j
< mapnum
; j
++)
283 if (!GOMP_MAP_POINTER_P (get_kind (is_openacc
, kinds
, j
)
286 else if ((uintptr_t) hostaddrs
[j
] < cur_node
.host_start
287 || ((uintptr_t) hostaddrs
[j
] + sizeof (void *)
288 > cur_node
.host_end
))
303 gomp_mutex_unlock (&devicep
->lock
);
304 gomp_fatal ("unexpected aggregation");
306 tgt
->to_free
= devaddrs
[0];
307 tgt
->tgt_start
= (uintptr_t) tgt
->to_free
;
308 tgt
->tgt_end
= tgt
->tgt_start
+ sizes
[0];
310 else if (not_found_cnt
|| is_target
)
312 /* Allocate tgt_align aligned tgt_size block of memory. */
313 /* FIXME: Perhaps change interface to allocate properly aligned
315 tgt
->to_free
= devicep
->alloc_func (devicep
->target_id
,
316 tgt_size
+ tgt_align
- 1);
317 tgt
->tgt_start
= (uintptr_t) tgt
->to_free
;
318 tgt
->tgt_start
= (tgt
->tgt_start
+ tgt_align
- 1) & ~(tgt_align
- 1);
319 tgt
->tgt_end
= tgt
->tgt_start
+ tgt_size
;
330 tgt_size
= mapnum
* sizeof (void *);
335 tgt
->array
= gomp_malloc (not_found_cnt
* sizeof (*tgt
->array
));
336 splay_tree_node array
= tgt
->array
;
339 for (i
= 0; i
< mapnum
; i
++)
340 if (tgt
->list
[i
] == NULL
)
342 int kind
= get_kind (is_openacc
, kinds
, i
);
343 if (hostaddrs
[i
] == NULL
)
345 splay_tree_key k
= &array
->key
;
346 k
->host_start
= (uintptr_t) hostaddrs
[i
];
347 if (!GOMP_MAP_POINTER_P (kind
& typemask
))
348 k
->host_end
= k
->host_start
+ sizes
[i
];
350 k
->host_end
= k
->host_start
+ sizeof (void *);
351 splay_tree_key n
= splay_tree_lookup (mem_map
, k
);
355 gomp_map_vars_existing (devicep
, n
, k
, kind
& typemask
);
359 size_t align
= (size_t) 1 << (kind
>> rshift
);
361 tgt_size
= (tgt_size
+ align
- 1) & ~(align
- 1);
363 k
->tgt_offset
= tgt_size
;
364 tgt_size
+= k
->host_end
- k
->host_start
;
365 k
->copy_from
= GOMP_MAP_COPY_FROM_P (kind
& typemask
);
367 k
->async_refcount
= 0;
371 splay_tree_insert (mem_map
, array
);
372 switch (kind
& typemask
)
376 case GOMP_MAP_FORCE_ALLOC
:
377 case GOMP_MAP_FORCE_FROM
:
380 case GOMP_MAP_TOFROM
:
381 case GOMP_MAP_FORCE_TO
:
382 case GOMP_MAP_FORCE_TOFROM
:
383 /* FIXME: Perhaps add some smarts, like if copying
384 several adjacent fields from host to target, use some
385 host buffer to avoid sending each var individually. */
386 devicep
->host2dev_func (devicep
->target_id
,
387 (void *) (tgt
->tgt_start
389 (void *) k
->host_start
,
390 k
->host_end
- k
->host_start
);
392 case GOMP_MAP_POINTER
:
393 gomp_map_pointer (tgt
, (uintptr_t) *(void **) k
->host_start
,
394 k
->tgt_offset
, sizes
[i
]);
396 case GOMP_MAP_TO_PSET
:
397 /* FIXME: see above FIXME comment. */
398 devicep
->host2dev_func (devicep
->target_id
,
399 (void *) (tgt
->tgt_start
401 (void *) k
->host_start
,
402 k
->host_end
- k
->host_start
);
404 for (j
= i
+ 1; j
< mapnum
; j
++)
405 if (!GOMP_MAP_POINTER_P (get_kind (is_openacc
, kinds
, j
)
408 else if ((uintptr_t) hostaddrs
[j
] < k
->host_start
409 || ((uintptr_t) hostaddrs
[j
] + sizeof (void *)
416 gomp_map_pointer (tgt
,
417 (uintptr_t) *(void **) hostaddrs
[j
],
419 + ((uintptr_t) hostaddrs
[j
]
425 case GOMP_MAP_FORCE_PRESENT
:
427 /* We already looked up the memory region above and it
429 size_t size
= k
->host_end
- k
->host_start
;
430 gomp_mutex_unlock (&devicep
->lock
);
431 #ifdef HAVE_INTTYPES_H
432 gomp_fatal ("present clause: !acc_is_present (%p, "
433 "%"PRIu64
" (0x%"PRIx64
"))",
434 (void *) k
->host_start
,
435 (uint64_t) size
, (uint64_t) size
);
437 gomp_fatal ("present clause: !acc_is_present (%p, "
438 "%lu (0x%lx))", (void *) k
->host_start
,
439 (unsigned long) size
, (unsigned long) size
);
443 case GOMP_MAP_FORCE_DEVICEPTR
:
444 assert (k
->host_end
- k
->host_start
== sizeof (void *));
446 devicep
->host2dev_func (devicep
->target_id
,
447 (void *) (tgt
->tgt_start
449 (void *) k
->host_start
,
453 gomp_mutex_unlock (&devicep
->lock
);
454 gomp_fatal ("%s: unhandled kind 0x%.2x", __FUNCTION__
,
464 for (i
= 0; i
< mapnum
; i
++)
466 if (tgt
->list
[i
] == NULL
)
467 cur_node
.tgt_offset
= (uintptr_t) NULL
;
469 cur_node
.tgt_offset
= tgt
->list
[i
]->tgt
->tgt_start
470 + tgt
->list
[i
]->tgt_offset
;
471 /* FIXME: see above FIXME comment. */
472 devicep
->host2dev_func (devicep
->target_id
,
473 (void *) (tgt
->tgt_start
474 + i
* sizeof (void *)),
475 (void *) &cur_node
.tgt_offset
,
480 gomp_mutex_unlock (&devicep
->lock
);
485 gomp_unmap_tgt (struct target_mem_desc
*tgt
)
487 /* Deallocate on target the tgt->tgt_start .. tgt->tgt_end region. */
489 tgt
->device_descr
->free_func (tgt
->device_descr
->target_id
, tgt
->to_free
);
495 /* Decrease the refcount for a set of mapped variables, and queue asychronous
496 copies from the device back to the host after any work that has been issued.
497 Because the regions are still "live", increment an asynchronous reference
498 count to indicate that they should not be unmapped from host-side data
499 structures until the asynchronous copy has completed. */
501 attribute_hidden
void
502 gomp_copy_from_async (struct target_mem_desc
*tgt
)
504 struct gomp_device_descr
*devicep
= tgt
->device_descr
;
507 gomp_mutex_lock (&devicep
->lock
);
509 for (i
= 0; i
< tgt
->list_count
; i
++)
510 if (tgt
->list
[i
] == NULL
)
512 else if (tgt
->list
[i
]->refcount
> 1)
514 tgt
->list
[i
]->refcount
--;
515 tgt
->list
[i
]->async_refcount
++;
519 splay_tree_key k
= tgt
->list
[i
];
521 devicep
->dev2host_func (devicep
->target_id
, (void *) k
->host_start
,
522 (void *) (k
->tgt
->tgt_start
+ k
->tgt_offset
),
523 k
->host_end
- k
->host_start
);
526 gomp_mutex_unlock (&devicep
->lock
);
529 /* Unmap variables described by TGT. If DO_COPYFROM is true, copy relevant
530 variables back from device to host: if it is false, it is assumed that this
531 has been done already, i.e. by gomp_copy_from_async above. */
533 attribute_hidden
void
534 gomp_unmap_vars (struct target_mem_desc
*tgt
, bool do_copyfrom
)
536 struct gomp_device_descr
*devicep
= tgt
->device_descr
;
538 if (tgt
->list_count
== 0)
544 gomp_mutex_lock (&devicep
->lock
);
547 for (i
= 0; i
< tgt
->list_count
; i
++)
548 if (tgt
->list
[i
] == NULL
)
550 else if (tgt
->list
[i
]->refcount
> 1)
551 tgt
->list
[i
]->refcount
--;
552 else if (tgt
->list
[i
]->async_refcount
> 0)
553 tgt
->list
[i
]->async_refcount
--;
556 splay_tree_key k
= tgt
->list
[i
];
557 if (k
->copy_from
&& do_copyfrom
)
558 devicep
->dev2host_func (devicep
->target_id
, (void *) k
->host_start
,
559 (void *) (k
->tgt
->tgt_start
+ k
->tgt_offset
),
560 k
->host_end
- k
->host_start
);
561 splay_tree_remove (&devicep
->mem_map
, k
);
562 if (k
->tgt
->refcount
> 1)
565 gomp_unmap_tgt (k
->tgt
);
568 if (tgt
->refcount
> 1)
571 gomp_unmap_tgt (tgt
);
573 gomp_mutex_unlock (&devicep
->lock
);
577 gomp_update (struct gomp_device_descr
*devicep
, size_t mapnum
, void **hostaddrs
,
578 size_t *sizes
, void *kinds
, bool is_openacc
)
581 struct splay_tree_key_s cur_node
;
582 const int typemask
= is_openacc
? 0xff : 0x7;
590 gomp_mutex_lock (&devicep
->lock
);
591 for (i
= 0; i
< mapnum
; i
++)
594 cur_node
.host_start
= (uintptr_t) hostaddrs
[i
];
595 cur_node
.host_end
= cur_node
.host_start
+ sizes
[i
];
596 splay_tree_key n
= splay_tree_lookup (&devicep
->mem_map
, &cur_node
);
599 int kind
= get_kind (is_openacc
, kinds
, i
);
600 if (n
->host_start
> cur_node
.host_start
601 || n
->host_end
< cur_node
.host_end
)
603 gomp_mutex_unlock (&devicep
->lock
);
604 gomp_fatal ("Trying to update [%p..%p) object when "
605 "only [%p..%p) is mapped",
606 (void *) cur_node
.host_start
,
607 (void *) cur_node
.host_end
,
608 (void *) n
->host_start
,
609 (void *) n
->host_end
);
611 if (GOMP_MAP_COPY_TO_P (kind
& typemask
))
612 devicep
->host2dev_func (devicep
->target_id
,
613 (void *) (n
->tgt
->tgt_start
615 + cur_node
.host_start
617 (void *) cur_node
.host_start
,
618 cur_node
.host_end
- cur_node
.host_start
);
619 if (GOMP_MAP_COPY_FROM_P (kind
& typemask
))
620 devicep
->dev2host_func (devicep
->target_id
,
621 (void *) cur_node
.host_start
,
622 (void *) (n
->tgt
->tgt_start
624 + cur_node
.host_start
626 cur_node
.host_end
- cur_node
.host_start
);
630 gomp_mutex_unlock (&devicep
->lock
);
631 gomp_fatal ("Trying to update [%p..%p) object that is not mapped",
632 (void *) cur_node
.host_start
,
633 (void *) cur_node
.host_end
);
636 gomp_mutex_unlock (&devicep
->lock
);
639 /* Load image pointed by TARGET_DATA to the device, specified by DEVICEP.
640 And insert to splay tree the mapping between addresses from HOST_TABLE and
641 from loaded target image. We rely in the host and device compiler
642 emitting variable and functions in the same order. */
645 gomp_load_image_to_device (struct gomp_device_descr
*devicep
,
646 const void *host_table
, const void *target_data
,
647 bool is_register_lock
)
649 void **host_func_table
= ((void ***) host_table
)[0];
650 void **host_funcs_end
= ((void ***) host_table
)[1];
651 void **host_var_table
= ((void ***) host_table
)[2];
652 void **host_vars_end
= ((void ***) host_table
)[3];
654 /* The func table contains only addresses, the var table contains addresses
655 and corresponding sizes. */
656 int num_funcs
= host_funcs_end
- host_func_table
;
657 int num_vars
= (host_vars_end
- host_var_table
) / 2;
659 /* Load image to device and get target addresses for the image. */
660 struct addr_pair
*target_table
= NULL
;
661 int i
, num_target_entries
662 = devicep
->load_image_func (devicep
->target_id
, target_data
,
665 if (num_target_entries
!= num_funcs
+ num_vars
)
667 gomp_mutex_unlock (&devicep
->lock
);
668 if (is_register_lock
)
669 gomp_mutex_unlock (®ister_lock
);
670 gomp_fatal ("Can't map target functions or variables");
673 /* Insert host-target address mapping into splay tree. */
674 struct target_mem_desc
*tgt
= gomp_malloc (sizeof (*tgt
));
675 tgt
->array
= gomp_malloc ((num_funcs
+ num_vars
) * sizeof (*tgt
->array
));
682 tgt
->device_descr
= devicep
;
683 splay_tree_node array
= tgt
->array
;
685 for (i
= 0; i
< num_funcs
; i
++)
687 splay_tree_key k
= &array
->key
;
688 k
->host_start
= (uintptr_t) host_func_table
[i
];
689 k
->host_end
= k
->host_start
+ 1;
691 k
->tgt_offset
= target_table
[i
].start
;
693 k
->async_refcount
= 0;
694 k
->copy_from
= false;
697 splay_tree_insert (&devicep
->mem_map
, array
);
701 for (i
= 0; i
< num_vars
; i
++)
703 struct addr_pair
*target_var
= &target_table
[num_funcs
+ i
];
704 if (target_var
->end
- target_var
->start
705 != (uintptr_t) host_var_table
[i
* 2 + 1])
707 gomp_mutex_unlock (&devicep
->lock
);
708 if (is_register_lock
)
709 gomp_mutex_unlock (®ister_lock
);
710 gomp_fatal ("Can't map target variables (size mismatch)");
713 splay_tree_key k
= &array
->key
;
714 k
->host_start
= (uintptr_t) host_var_table
[i
* 2];
715 k
->host_end
= k
->host_start
+ (uintptr_t) host_var_table
[i
* 2 + 1];
717 k
->tgt_offset
= target_var
->start
;
719 k
->async_refcount
= 0;
720 k
->copy_from
= false;
723 splay_tree_insert (&devicep
->mem_map
, array
);
730 /* Unload the mappings described by target_data from device DEVICE_P.
731 The device must be locked. */
734 gomp_unload_image_from_device (struct gomp_device_descr
*devicep
,
735 const void *host_table
, const void *target_data
)
737 void **host_func_table
= ((void ***) host_table
)[0];
738 void **host_funcs_end
= ((void ***) host_table
)[1];
739 void **host_var_table
= ((void ***) host_table
)[2];
740 void **host_vars_end
= ((void ***) host_table
)[3];
742 /* The func table contains only addresses, the var table contains addresses
743 and corresponding sizes. */
744 int num_funcs
= host_funcs_end
- host_func_table
;
745 int num_vars
= (host_vars_end
- host_var_table
) / 2;
748 struct splay_tree_key_s k
;
749 splay_tree_key node
= NULL
;
751 /* Find mapping at start of node array */
752 if (num_funcs
|| num_vars
)
754 k
.host_start
= (num_funcs
? (uintptr_t) host_func_table
[0]
755 : (uintptr_t) host_var_table
[0]);
756 k
.host_end
= k
.host_start
+ 1;
757 node
= splay_tree_lookup (&devicep
->mem_map
, &k
);
760 devicep
->unload_image_func (devicep
->target_id
, target_data
);
762 /* Remove mappings from splay tree. */
763 for (j
= 0; j
< num_funcs
; j
++)
765 k
.host_start
= (uintptr_t) host_func_table
[j
];
766 k
.host_end
= k
.host_start
+ 1;
767 splay_tree_remove (&devicep
->mem_map
, &k
);
770 for (j
= 0; j
< num_vars
; j
++)
772 k
.host_start
= (uintptr_t) host_var_table
[j
* 2];
773 k
.host_end
= k
.host_start
+ (uintptr_t) host_var_table
[j
* 2 + 1];
774 splay_tree_remove (&devicep
->mem_map
, &k
);
784 /* This function should be called from every offload image while loading.
785 It gets the descriptor of the host func and var tables HOST_TABLE, TYPE of
786 the target, and TARGET_DATA needed by target plugin. */
789 GOMP_offload_register (const void *host_table
, int target_type
,
790 const void *target_data
)
793 gomp_mutex_lock (®ister_lock
);
795 /* Load image to all initialized devices. */
796 for (i
= 0; i
< num_devices
; i
++)
798 struct gomp_device_descr
*devicep
= &devices
[i
];
799 gomp_mutex_lock (&devicep
->lock
);
800 if (devicep
->type
== target_type
&& devicep
->is_initialized
)
801 gomp_load_image_to_device (devicep
, host_table
, target_data
, true);
802 gomp_mutex_unlock (&devicep
->lock
);
805 /* Insert image to array of pending images. */
807 = gomp_realloc_unlock (offload_images
,
808 (num_offload_images
+ 1)
809 * sizeof (struct offload_image_descr
));
810 offload_images
[num_offload_images
].type
= target_type
;
811 offload_images
[num_offload_images
].host_table
= host_table
;
812 offload_images
[num_offload_images
].target_data
= target_data
;
814 num_offload_images
++;
815 gomp_mutex_unlock (®ister_lock
);
818 /* This function should be called from every offload image while unloading.
819 It gets the descriptor of the host func and var tables HOST_TABLE, TYPE of
820 the target, and TARGET_DATA needed by target plugin. */
823 GOMP_offload_unregister (const void *host_table
, int target_type
,
824 const void *target_data
)
828 gomp_mutex_lock (®ister_lock
);
830 /* Unload image from all initialized devices. */
831 for (i
= 0; i
< num_devices
; i
++)
833 struct gomp_device_descr
*devicep
= &devices
[i
];
834 gomp_mutex_lock (&devicep
->lock
);
835 if (devicep
->type
== target_type
&& devicep
->is_initialized
)
836 gomp_unload_image_from_device (devicep
, host_table
, target_data
);
837 gomp_mutex_unlock (&devicep
->lock
);
840 /* Remove image from array of pending images. */
841 for (i
= 0; i
< num_offload_images
; i
++)
842 if (offload_images
[i
].target_data
== target_data
)
844 offload_images
[i
] = offload_images
[--num_offload_images
];
848 gomp_mutex_unlock (®ister_lock
);
851 /* This function initializes the target device, specified by DEVICEP. DEVICEP
852 must be locked on entry, and remains locked on return. */
854 attribute_hidden
void
855 gomp_init_device (struct gomp_device_descr
*devicep
)
858 devicep
->init_device_func (devicep
->target_id
);
860 /* Load to device all images registered by the moment. */
861 for (i
= 0; i
< num_offload_images
; i
++)
863 struct offload_image_descr
*image
= &offload_images
[i
];
864 if (image
->type
== devicep
->type
)
865 gomp_load_image_to_device (devicep
, image
->host_table
,
866 image
->target_data
, false);
869 devicep
->is_initialized
= true;
872 attribute_hidden
void
873 gomp_unload_device (struct gomp_device_descr
*devicep
)
875 if (devicep
->is_initialized
)
879 /* Unload from device all images registered at the moment. */
880 for (i
= 0; i
< num_offload_images
; i
++)
882 struct offload_image_descr
*image
= &offload_images
[i
];
883 if (image
->type
== devicep
->type
)
884 gomp_unload_image_from_device (devicep
, image
->host_table
,
890 /* Free address mapping tables. MM must be locked on entry, and remains locked
893 attribute_hidden
void
894 gomp_free_memmap (struct splay_tree_s
*mem_map
)
896 while (mem_map
->root
)
898 struct target_mem_desc
*tgt
= mem_map
->root
->key
.tgt
;
900 splay_tree_remove (mem_map
, &mem_map
->root
->key
);
906 /* This function de-initializes the target device, specified by DEVICEP.
907 DEVICEP must be locked on entry, and remains locked on return. */
909 attribute_hidden
void
910 gomp_fini_device (struct gomp_device_descr
*devicep
)
912 if (devicep
->is_initialized
)
913 devicep
->fini_device_func (devicep
->target_id
);
915 devicep
->is_initialized
= false;
918 /* Called when encountering a target directive. If DEVICE
919 is GOMP_DEVICE_ICV, it means use device-var ICV. If it is
920 GOMP_DEVICE_HOST_FALLBACK (or any value
921 larger than last available hw device), use host fallback.
922 FN is address of host code, UNUSED is part of the current ABI, but
923 we're not actually using it. HOSTADDRS, SIZES and KINDS are arrays
924 with MAPNUM entries, with addresses of the host objects,
925 sizes of the host objects (resp. for pointer kind pointer bias
926 and assumed sizeof (void *) size) and kinds. */
929 GOMP_target (int device
, void (*fn
) (void *), const void *unused
,
930 size_t mapnum
, void **hostaddrs
, size_t *sizes
,
931 unsigned char *kinds
)
933 struct gomp_device_descr
*devicep
= resolve_device (device
);
936 || !(devicep
->capabilities
& GOMP_OFFLOAD_CAP_OPENMP_400
))
939 struct gomp_thread old_thr
, *thr
= gomp_thread ();
941 memset (thr
, '\0', sizeof (*thr
));
942 if (gomp_places_list
)
944 thr
->place
= old_thr
.place
;
945 thr
->ts
.place_partition_len
= gomp_places_list_len
;
948 gomp_free_thread (thr
);
953 gomp_mutex_lock (&devicep
->lock
);
954 if (!devicep
->is_initialized
)
955 gomp_init_device (devicep
);
956 gomp_mutex_unlock (&devicep
->lock
);
960 if (devicep
->capabilities
& GOMP_OFFLOAD_CAP_NATIVE_EXEC
)
961 fn_addr
= (void *) fn
;
964 gomp_mutex_lock (&devicep
->lock
);
965 struct splay_tree_key_s k
;
966 k
.host_start
= (uintptr_t) fn
;
967 k
.host_end
= k
.host_start
+ 1;
968 splay_tree_key tgt_fn
= splay_tree_lookup (&devicep
->mem_map
, &k
);
971 gomp_mutex_unlock (&devicep
->lock
);
972 gomp_fatal ("Target function wasn't mapped");
974 gomp_mutex_unlock (&devicep
->lock
);
976 fn_addr
= (void *) tgt_fn
->tgt_offset
;
979 struct target_mem_desc
*tgt_vars
980 = gomp_map_vars (devicep
, mapnum
, hostaddrs
, NULL
, sizes
, kinds
, false,
982 struct gomp_thread old_thr
, *thr
= gomp_thread ();
984 memset (thr
, '\0', sizeof (*thr
));
985 if (gomp_places_list
)
987 thr
->place
= old_thr
.place
;
988 thr
->ts
.place_partition_len
= gomp_places_list_len
;
990 devicep
->run_func (devicep
->target_id
, fn_addr
, (void *) tgt_vars
->tgt_start
);
991 gomp_free_thread (thr
);
993 gomp_unmap_vars (tgt_vars
, true);
997 GOMP_target_data (int device
, const void *unused
, size_t mapnum
,
998 void **hostaddrs
, size_t *sizes
, unsigned char *kinds
)
1000 struct gomp_device_descr
*devicep
= resolve_device (device
);
1003 || !(devicep
->capabilities
& GOMP_OFFLOAD_CAP_OPENMP_400
))
1005 /* Host fallback. */
1006 struct gomp_task_icv
*icv
= gomp_icv (false);
1007 if (icv
->target_data
)
1009 /* Even when doing a host fallback, if there are any active
1010 #pragma omp target data constructs, need to remember the
1011 new #pragma omp target data, otherwise GOMP_target_end_data
1012 would get out of sync. */
1013 struct target_mem_desc
*tgt
1014 = gomp_map_vars (NULL
, 0, NULL
, NULL
, NULL
, NULL
, false, false);
1015 tgt
->prev
= icv
->target_data
;
1016 icv
->target_data
= tgt
;
1021 gomp_mutex_lock (&devicep
->lock
);
1022 if (!devicep
->is_initialized
)
1023 gomp_init_device (devicep
);
1024 gomp_mutex_unlock (&devicep
->lock
);
1026 struct target_mem_desc
*tgt
1027 = gomp_map_vars (devicep
, mapnum
, hostaddrs
, NULL
, sizes
, kinds
, false,
1029 struct gomp_task_icv
*icv
= gomp_icv (true);
1030 tgt
->prev
= icv
->target_data
;
1031 icv
->target_data
= tgt
;
1035 GOMP_target_end_data (void)
1037 struct gomp_task_icv
*icv
= gomp_icv (false);
1038 if (icv
->target_data
)
1040 struct target_mem_desc
*tgt
= icv
->target_data
;
1041 icv
->target_data
= tgt
->prev
;
1042 gomp_unmap_vars (tgt
, true);
1047 GOMP_target_update (int device
, const void *unused
, size_t mapnum
,
1048 void **hostaddrs
, size_t *sizes
, unsigned char *kinds
)
1050 struct gomp_device_descr
*devicep
= resolve_device (device
);
1053 || !(devicep
->capabilities
& GOMP_OFFLOAD_CAP_OPENMP_400
))
1056 gomp_mutex_lock (&devicep
->lock
);
1057 if (!devicep
->is_initialized
)
1058 gomp_init_device (devicep
);
1059 gomp_mutex_unlock (&devicep
->lock
);
1061 gomp_update (devicep
, mapnum
, hostaddrs
, sizes
, kinds
, false);
1065 GOMP_teams (unsigned int num_teams
, unsigned int thread_limit
)
1069 struct gomp_task_icv
*icv
= gomp_icv (true);
1070 icv
->thread_limit_var
1071 = thread_limit
> INT_MAX
? UINT_MAX
: thread_limit
;
1076 #ifdef PLUGIN_SUPPORT
1078 /* This function tries to load a plugin for DEVICE. Name of plugin is passed
1080 The handles of the found functions are stored in the corresponding fields
1081 of DEVICE. The function returns TRUE on success and FALSE otherwise. */
1084 gomp_load_plugin_for_device (struct gomp_device_descr
*device
,
1085 const char *plugin_name
)
1087 const char *err
= NULL
, *last_missing
= NULL
;
1088 int optional_present
, optional_total
;
1090 /* Clear any existing error. */
1093 void *plugin_handle
= dlopen (plugin_name
, RTLD_LAZY
);
1100 /* Check if all required functions are available in the plugin and store
1105 device->f##_func = dlsym (plugin_handle, "GOMP_OFFLOAD_" #f); \
1111 /* Similar, but missing functions are not an error. */
1112 #define DLSYM_OPT(f, n) \
1115 const char *tmp_err; \
1116 device->f##_func = dlsym (plugin_handle, "GOMP_OFFLOAD_" #n); \
1117 tmp_err = dlerror (); \
1118 if (tmp_err == NULL) \
1119 optional_present++; \
1121 last_missing = #n; \
1129 DLSYM (get_num_devices
);
1130 DLSYM (init_device
);
1131 DLSYM (fini_device
);
1133 DLSYM (unload_image
);
1138 device
->capabilities
= device
->get_caps_func ();
1139 if (device
->capabilities
& GOMP_OFFLOAD_CAP_OPENMP_400
)
1141 if (device
->capabilities
& GOMP_OFFLOAD_CAP_OPENACC_200
)
1143 optional_present
= optional_total
= 0;
1144 DLSYM_OPT (openacc
.exec
, openacc_parallel
);
1145 DLSYM_OPT (openacc
.register_async_cleanup
,
1146 openacc_register_async_cleanup
);
1147 DLSYM_OPT (openacc
.async_test
, openacc_async_test
);
1148 DLSYM_OPT (openacc
.async_test_all
, openacc_async_test_all
);
1149 DLSYM_OPT (openacc
.async_wait
, openacc_async_wait
);
1150 DLSYM_OPT (openacc
.async_wait_async
, openacc_async_wait_async
);
1151 DLSYM_OPT (openacc
.async_wait_all
, openacc_async_wait_all
);
1152 DLSYM_OPT (openacc
.async_wait_all_async
, openacc_async_wait_all_async
);
1153 DLSYM_OPT (openacc
.async_set_async
, openacc_async_set_async
);
1154 DLSYM_OPT (openacc
.create_thread_data
, openacc_create_thread_data
);
1155 DLSYM_OPT (openacc
.destroy_thread_data
, openacc_destroy_thread_data
);
1156 /* Require all the OpenACC handlers if we have
1157 GOMP_OFFLOAD_CAP_OPENACC_200. */
1158 if (optional_present
!= optional_total
)
1160 err
= "plugin missing OpenACC handler function";
1163 optional_present
= optional_total
= 0;
1164 DLSYM_OPT (openacc
.cuda
.get_current_device
,
1165 openacc_get_current_cuda_device
);
1166 DLSYM_OPT (openacc
.cuda
.get_current_context
,
1167 openacc_get_current_cuda_context
);
1168 DLSYM_OPT (openacc
.cuda
.get_stream
, openacc_get_cuda_stream
);
1169 DLSYM_OPT (openacc
.cuda
.set_stream
, openacc_set_cuda_stream
);
1170 /* Make sure all the CUDA functions are there if any of them are. */
1171 if (optional_present
&& optional_present
!= optional_total
)
1173 err
= "plugin missing OpenACC CUDA handler function";
1183 gomp_error ("while loading %s: %s", plugin_name
, err
);
1185 gomp_error ("missing function was %s", last_missing
);
1187 dlclose (plugin_handle
);
1192 /* This function initializes the runtime needed for offloading.
1193 It parses the list of offload targets and tries to load the plugins for
1194 these targets. On return, the variables NUM_DEVICES and NUM_DEVICES_OPENMP
1195 will be set, and the array DEVICES initialized, containing descriptors for
1196 corresponding devices, first the GOMP_OFFLOAD_CAP_OPENMP_400 ones, follows
1200 gomp_target_init (void)
1202 const char *prefix
="libgomp-plugin-";
1203 const char *suffix
= SONAME_SUFFIX (1);
1204 const char *cur
, *next
;
1206 int i
, new_num_devices
;
1211 cur
= OFFLOAD_TARGETS
;
1215 struct gomp_device_descr current_device
;
1217 next
= strchr (cur
, ',');
1219 plugin_name
= (char *) malloc (1 + (next
? next
- cur
: strlen (cur
))
1220 + strlen (prefix
) + strlen (suffix
));
1227 strcpy (plugin_name
, prefix
);
1228 strncat (plugin_name
, cur
, next
? next
- cur
: strlen (cur
));
1229 strcat (plugin_name
, suffix
);
1231 if (gomp_load_plugin_for_device (¤t_device
, plugin_name
))
1233 new_num_devices
= current_device
.get_num_devices_func ();
1234 if (new_num_devices
>= 1)
1236 /* Augment DEVICES and NUM_DEVICES. */
1238 devices
= realloc (devices
, (num_devices
+ new_num_devices
)
1239 * sizeof (struct gomp_device_descr
));
1247 current_device
.name
= current_device
.get_name_func ();
1248 /* current_device.capabilities has already been set. */
1249 current_device
.type
= current_device
.get_type_func ();
1250 current_device
.mem_map
.root
= NULL
;
1251 current_device
.is_initialized
= false;
1252 current_device
.openacc
.data_environ
= NULL
;
1253 for (i
= 0; i
< new_num_devices
; i
++)
1255 current_device
.target_id
= i
;
1256 devices
[num_devices
] = current_device
;
1257 gomp_mutex_init (&devices
[num_devices
].lock
);
1268 /* In DEVICES, sort the GOMP_OFFLOAD_CAP_OPENMP_400 ones first, and set
1269 NUM_DEVICES_OPENMP. */
1270 struct gomp_device_descr
*devices_s
1271 = malloc (num_devices
* sizeof (struct gomp_device_descr
));
1278 num_devices_openmp
= 0;
1279 for (i
= 0; i
< num_devices
; i
++)
1280 if (devices
[i
].capabilities
& GOMP_OFFLOAD_CAP_OPENMP_400
)
1281 devices_s
[num_devices_openmp
++] = devices
[i
];
1282 int num_devices_after_openmp
= num_devices_openmp
;
1283 for (i
= 0; i
< num_devices
; i
++)
1284 if (!(devices
[i
].capabilities
& GOMP_OFFLOAD_CAP_OPENMP_400
))
1285 devices_s
[num_devices_after_openmp
++] = devices
[i
];
1287 devices
= devices_s
;
1289 for (i
= 0; i
< num_devices
; i
++)
1291 /* The 'devices' array can be moved (by the realloc call) until we have
1292 found all the plugins, so registering with the OpenACC runtime (which
1293 takes a copy of the pointer argument) must be delayed until now. */
1294 if (devices
[i
].capabilities
& GOMP_OFFLOAD_CAP_OPENACC_200
)
1295 goacc_register (&devices
[i
]);
1299 #else /* PLUGIN_SUPPORT */
1300 /* If dlfcn.h is unavailable we always fallback to host execution.
1301 GOMP_target* routines are just stubs for this case. */
1303 gomp_target_init (void)
1306 #endif /* PLUGIN_SUPPORT */