* config/i386/predicates.md (general_reg_operand): Use GENERAL_REGNO_P.
[official-gcc.git] / libgomp / oacc-mem.c
blob90d43eb2b8a6d83476c25e934498d5f11ec49e31
1 /* OpenACC Runtime initialization routines
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
5 Contributed by Mentor Embedded.
7 This file is part of the GNU Offloading and Multi Processing Library
8 (libgomp).
10 Libgomp is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3, or (at your option)
13 any later version.
15 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
17 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 more details.
20 Under Section 7 of GPL version 3, you are granted additional
21 permissions described in the GCC Runtime Library Exception, version
22 3.1, as published by the Free Software Foundation.
24 You should have received a copy of the GNU General Public License and
25 a copy of the GCC Runtime Library Exception along with this program;
26 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
27 <http://www.gnu.org/licenses/>. */
29 #include "openacc.h"
30 #include "config.h"
31 #include "libgomp.h"
32 #include "gomp-constants.h"
33 #include "oacc-int.h"
34 #include "splay-tree.h"
35 #include <stdint.h>
36 #include <assert.h>
38 /* Return block containing [H->S), or NULL if not contained. The device lock
39 for DEV must be locked on entry, and remains locked on exit. */
41 static splay_tree_key
42 lookup_host (struct gomp_device_descr *dev, void *h, size_t s)
44 struct splay_tree_key_s node;
45 splay_tree_key key;
47 node.host_start = (uintptr_t) h;
48 node.host_end = (uintptr_t) h + s;
50 key = splay_tree_lookup (&dev->mem_map, &node);
52 return key;
55 /* Return block containing [D->S), or NULL if not contained.
56 The list isn't ordered by device address, so we have to iterate
57 over the whole array. This is not expected to be a common
58 operation. The device lock associated with TGT must be locked on entry, and
59 remains locked on exit. */
61 static splay_tree_key
62 lookup_dev (struct target_mem_desc *tgt, void *d, size_t s)
64 int i;
65 struct target_mem_desc *t;
67 if (!tgt)
68 return NULL;
70 for (t = tgt; t != NULL; t = t->prev)
72 if (t->tgt_start <= (uintptr_t) d && t->tgt_end >= (uintptr_t) d + s)
73 break;
76 if (!t)
77 return NULL;
79 for (i = 0; i < t->list_count; i++)
81 void * offset;
83 splay_tree_key k = &t->array[i].key;
84 offset = d - t->tgt_start + k->tgt_offset;
86 if (k->host_start + offset <= (void *) k->host_end)
87 return k;
90 return NULL;
93 /* OpenACC is silent on how memory exhaustion is indicated. We return
94 NULL. */
96 void *
97 acc_malloc (size_t s)
99 if (!s)
100 return NULL;
102 goacc_lazy_initialize ();
104 struct goacc_thread *thr = goacc_thread ();
106 assert (thr->dev);
108 return thr->dev->alloc_func (thr->dev->target_id, s);
111 /* OpenACC 2.0a (3.2.16) doesn't specify what to do in the event
112 the device address is mapped. We choose to check if it mapped,
113 and if it is, to unmap it. */
114 void
115 acc_free (void *d)
117 splay_tree_key k;
119 if (!d)
120 return;
122 struct goacc_thread *thr = goacc_thread ();
124 assert (thr && thr->dev);
126 struct gomp_device_descr *acc_dev = thr->dev;
128 gomp_mutex_lock (&acc_dev->lock);
130 /* We don't have to call lazy open here, as the ptr value must have
131 been returned by acc_malloc. It's not permitted to pass NULL in
132 (unless you got that null from acc_malloc). */
133 if ((k = lookup_dev (acc_dev->openacc.data_environ, d, 1)))
135 void *offset;
137 offset = d - k->tgt->tgt_start + k->tgt_offset;
139 gomp_mutex_unlock (&acc_dev->lock);
141 acc_unmap_data ((void *)(k->host_start + offset));
143 else
144 gomp_mutex_unlock (&acc_dev->lock);
146 acc_dev->free_func (acc_dev->target_id, d);
149 void
150 acc_memcpy_to_device (void *d, void *h, size_t s)
152 /* No need to call lazy open here, as the device pointer must have
153 been obtained from a routine that did that. */
154 struct goacc_thread *thr = goacc_thread ();
156 assert (thr && thr->dev);
158 thr->dev->host2dev_func (thr->dev->target_id, d, h, s);
161 void
162 acc_memcpy_from_device (void *h, void *d, size_t s)
164 /* No need to call lazy open here, as the device pointer must have
165 been obtained from a routine that did that. */
166 struct goacc_thread *thr = goacc_thread ();
168 assert (thr && thr->dev);
170 thr->dev->dev2host_func (thr->dev->target_id, h, d, s);
173 /* Return the device pointer that corresponds to host data H. Or NULL
174 if no mapping. */
176 void *
177 acc_deviceptr (void *h)
179 splay_tree_key n;
180 void *d;
181 void *offset;
183 goacc_lazy_initialize ();
185 struct goacc_thread *thr = goacc_thread ();
186 struct gomp_device_descr *dev = thr->dev;
188 gomp_mutex_lock (&dev->lock);
190 n = lookup_host (dev, h, 1);
192 if (!n)
194 gomp_mutex_unlock (&dev->lock);
195 return NULL;
198 offset = h - n->host_start;
200 d = n->tgt->tgt_start + n->tgt_offset + offset;
202 gomp_mutex_unlock (&dev->lock);
204 return d;
207 /* Return the host pointer that corresponds to device data D. Or NULL
208 if no mapping. */
210 void *
211 acc_hostptr (void *d)
213 splay_tree_key n;
214 void *h;
215 void *offset;
217 goacc_lazy_initialize ();
219 struct goacc_thread *thr = goacc_thread ();
220 struct gomp_device_descr *acc_dev = thr->dev;
222 gomp_mutex_lock (&acc_dev->lock);
224 n = lookup_dev (acc_dev->openacc.data_environ, d, 1);
226 if (!n)
228 gomp_mutex_unlock (&acc_dev->lock);
229 return NULL;
232 offset = d - n->tgt->tgt_start + n->tgt_offset;
234 h = n->host_start + offset;
236 gomp_mutex_unlock (&acc_dev->lock);
238 return h;
241 /* Return 1 if host data [H,+S] is present on the device. */
244 acc_is_present (void *h, size_t s)
246 splay_tree_key n;
248 if (!s || !h)
249 return 0;
251 goacc_lazy_initialize ();
253 struct goacc_thread *thr = goacc_thread ();
254 struct gomp_device_descr *acc_dev = thr->dev;
256 gomp_mutex_lock (&acc_dev->lock);
258 n = lookup_host (acc_dev, h, s);
260 if (n && ((uintptr_t)h < n->host_start
261 || (uintptr_t)h + s > n->host_end
262 || s > n->host_end - n->host_start))
263 n = NULL;
265 gomp_mutex_unlock (&acc_dev->lock);
267 return n != NULL;
270 /* Create a mapping for host [H,+S] -> device [D,+S] */
272 void
273 acc_map_data (void *h, void *d, size_t s)
275 struct target_mem_desc *tgt;
276 size_t mapnum = 1;
277 void *hostaddrs = h;
278 void *devaddrs = d;
279 size_t sizes = s;
280 unsigned short kinds = GOMP_MAP_ALLOC;
282 goacc_lazy_initialize ();
284 struct goacc_thread *thr = goacc_thread ();
285 struct gomp_device_descr *acc_dev = thr->dev;
287 if (acc_dev->capabilities & GOMP_OFFLOAD_CAP_SHARED_MEM)
289 if (d != h)
290 gomp_fatal ("cannot map data on shared-memory system");
292 tgt = gomp_map_vars (NULL, 0, NULL, NULL, NULL, NULL, true, false);
294 else
296 struct goacc_thread *thr = goacc_thread ();
298 if (!d || !h || !s)
299 gomp_fatal ("[%p,+%d]->[%p,+%d] is a bad map",
300 (void *)h, (int)s, (void *)d, (int)s);
302 gomp_mutex_lock (&acc_dev->lock);
304 if (lookup_host (acc_dev, h, s))
306 gomp_mutex_unlock (&acc_dev->lock);
307 gomp_fatal ("host address [%p, +%d] is already mapped", (void *)h,
308 (int)s);
311 if (lookup_dev (thr->dev->openacc.data_environ, d, s))
313 gomp_mutex_unlock (&acc_dev->lock);
314 gomp_fatal ("device address [%p, +%d] is already mapped", (void *)d,
315 (int)s);
318 gomp_mutex_unlock (&acc_dev->lock);
320 tgt = gomp_map_vars (acc_dev, mapnum, &hostaddrs, &devaddrs, &sizes,
321 &kinds, true, false);
324 gomp_mutex_lock (&acc_dev->lock);
325 tgt->prev = acc_dev->openacc.data_environ;
326 acc_dev->openacc.data_environ = tgt;
327 gomp_mutex_unlock (&acc_dev->lock);
330 void
331 acc_unmap_data (void *h)
333 struct goacc_thread *thr = goacc_thread ();
334 struct gomp_device_descr *acc_dev = thr->dev;
336 /* No need to call lazy open, as the address must have been mapped. */
338 size_t host_size;
340 gomp_mutex_lock (&acc_dev->lock);
342 splay_tree_key n = lookup_host (acc_dev, h, 1);
343 struct target_mem_desc *t;
345 if (!n)
347 gomp_mutex_unlock (&acc_dev->lock);
348 gomp_fatal ("%p is not a mapped block", (void *)h);
351 host_size = n->host_end - n->host_start;
353 if (n->host_start != (uintptr_t) h)
355 gomp_mutex_unlock (&acc_dev->lock);
356 gomp_fatal ("[%p,%d] surrounds %p",
357 (void *) n->host_start, (int) host_size, (void *) h);
360 t = n->tgt;
362 if (t->refcount == 2)
364 struct target_mem_desc *tp;
366 /* This is the last reference, so pull the descriptor off the
367 chain. This avoids gomp_unmap_vars via gomp_unmap_tgt from
368 freeing the device memory. */
369 t->tgt_end = 0;
370 t->to_free = 0;
372 for (tp = NULL, t = acc_dev->openacc.data_environ; t != NULL;
373 tp = t, t = t->prev)
374 if (n->tgt == t)
376 if (tp)
377 tp->prev = t->prev;
378 else
379 acc_dev->openacc.data_environ = t->prev;
381 break;
385 gomp_mutex_unlock (&acc_dev->lock);
387 gomp_unmap_vars (t, true);
390 #define FLAG_PRESENT (1 << 0)
391 #define FLAG_CREATE (1 << 1)
392 #define FLAG_COPY (1 << 2)
394 static void *
395 present_create_copy (unsigned f, void *h, size_t s)
397 void *d;
398 splay_tree_key n;
400 if (!h || !s)
401 gomp_fatal ("[%p,+%d] is a bad range", (void *)h, (int)s);
403 goacc_lazy_initialize ();
405 struct goacc_thread *thr = goacc_thread ();
406 struct gomp_device_descr *acc_dev = thr->dev;
408 gomp_mutex_lock (&acc_dev->lock);
410 n = lookup_host (acc_dev, h, s);
411 if (n)
413 /* Present. */
414 d = (void *) (n->tgt->tgt_start + n->tgt_offset);
416 if (!(f & FLAG_PRESENT))
418 gomp_mutex_unlock (&acc_dev->lock);
419 gomp_fatal ("[%p,+%d] already mapped to [%p,+%d]",
420 (void *)h, (int)s, (void *)d, (int)s);
422 if ((h + s) > (void *)n->host_end)
424 gomp_mutex_unlock (&acc_dev->lock);
425 gomp_fatal ("[%p,+%d] not mapped", (void *)h, (int)s);
428 gomp_mutex_unlock (&acc_dev->lock);
430 else if (!(f & FLAG_CREATE))
432 gomp_mutex_unlock (&acc_dev->lock);
433 gomp_fatal ("[%p,+%d] not mapped", (void *)h, (int)s);
435 else
437 struct target_mem_desc *tgt;
438 size_t mapnum = 1;
439 unsigned short kinds;
440 void *hostaddrs = h;
442 if (f & FLAG_COPY)
443 kinds = GOMP_MAP_TO;
444 else
445 kinds = GOMP_MAP_ALLOC;
447 gomp_mutex_unlock (&acc_dev->lock);
449 tgt = gomp_map_vars (acc_dev, mapnum, &hostaddrs, NULL, &s, &kinds, true,
450 false);
452 gomp_mutex_lock (&acc_dev->lock);
454 d = tgt->to_free;
455 tgt->prev = acc_dev->openacc.data_environ;
456 acc_dev->openacc.data_environ = tgt;
458 gomp_mutex_unlock (&acc_dev->lock);
461 return d;
464 void *
465 acc_create (void *h, size_t s)
467 return present_create_copy (FLAG_CREATE, h, s);
470 void *
471 acc_copyin (void *h, size_t s)
473 return present_create_copy (FLAG_CREATE | FLAG_COPY, h, s);
476 void *
477 acc_present_or_create (void *h, size_t s)
479 return present_create_copy (FLAG_PRESENT | FLAG_CREATE, h, s);
482 void *
483 acc_present_or_copyin (void *h, size_t s)
485 return present_create_copy (FLAG_PRESENT | FLAG_CREATE | FLAG_COPY, h, s);
488 #define FLAG_COPYOUT (1 << 0)
490 static void
491 delete_copyout (unsigned f, void *h, size_t s)
493 size_t host_size;
494 splay_tree_key n;
495 void *d;
496 struct goacc_thread *thr = goacc_thread ();
497 struct gomp_device_descr *acc_dev = thr->dev;
499 gomp_mutex_lock (&acc_dev->lock);
501 n = lookup_host (acc_dev, h, s);
503 /* No need to call lazy open, as the data must already have been
504 mapped. */
506 if (!n)
508 gomp_mutex_unlock (&acc_dev->lock);
509 gomp_fatal ("[%p,%d] is not mapped", (void *)h, (int)s);
512 d = (void *) (n->tgt->tgt_start + n->tgt_offset);
514 host_size = n->host_end - n->host_start;
516 if (n->host_start != (uintptr_t) h || host_size != s)
518 gomp_mutex_unlock (&acc_dev->lock);
519 gomp_fatal ("[%p,%d] surrounds2 [%p,+%d]",
520 (void *) n->host_start, (int) host_size, (void *) h, (int) s);
523 gomp_mutex_unlock (&acc_dev->lock);
525 if (f & FLAG_COPYOUT)
526 acc_dev->dev2host_func (acc_dev->target_id, h, d, s);
528 acc_unmap_data (h);
530 acc_dev->free_func (acc_dev->target_id, d);
533 void
534 acc_delete (void *h , size_t s)
536 delete_copyout (0, h, s);
539 void acc_copyout (void *h, size_t s)
541 delete_copyout (FLAG_COPYOUT, h, s);
544 static void
545 update_dev_host (int is_dev, void *h, size_t s)
547 splay_tree_key n;
548 void *d;
549 struct goacc_thread *thr = goacc_thread ();
550 struct gomp_device_descr *acc_dev = thr->dev;
552 gomp_mutex_lock (&acc_dev->lock);
554 n = lookup_host (acc_dev, h, s);
556 /* No need to call lazy open, as the data must already have been
557 mapped. */
559 if (!n)
561 gomp_mutex_unlock (&acc_dev->lock);
562 gomp_fatal ("[%p,%d] is not mapped", h, (int)s);
565 d = (void *) (n->tgt->tgt_start + n->tgt_offset);
567 gomp_mutex_unlock (&acc_dev->lock);
569 if (is_dev)
570 acc_dev->host2dev_func (acc_dev->target_id, d, h, s);
571 else
572 acc_dev->dev2host_func (acc_dev->target_id, h, d, s);
575 void
576 acc_update_device (void *h, size_t s)
578 update_dev_host (1, h, s);
581 void
582 acc_update_self (void *h, size_t s)
584 update_dev_host (0, h, s);
587 void
588 gomp_acc_insert_pointer (size_t mapnum, void **hostaddrs, size_t *sizes,
589 void *kinds)
591 struct target_mem_desc *tgt;
592 struct goacc_thread *thr = goacc_thread ();
593 struct gomp_device_descr *acc_dev = thr->dev;
595 gomp_debug (0, " %s: prepare mappings\n", __FUNCTION__);
596 tgt = gomp_map_vars (acc_dev, mapnum, hostaddrs,
597 NULL, sizes, kinds, true, false);
598 gomp_debug (0, " %s: mappings prepared\n", __FUNCTION__);
600 gomp_mutex_lock (&acc_dev->lock);
601 tgt->prev = acc_dev->openacc.data_environ;
602 acc_dev->openacc.data_environ = tgt;
603 gomp_mutex_unlock (&acc_dev->lock);
606 void
607 gomp_acc_remove_pointer (void *h, bool force_copyfrom, int async, int mapnum)
609 struct goacc_thread *thr = goacc_thread ();
610 struct gomp_device_descr *acc_dev = thr->dev;
611 splay_tree_key n;
612 struct target_mem_desc *t;
613 int minrefs = (mapnum == 1) ? 2 : 3;
615 gomp_mutex_lock (&acc_dev->lock);
617 n = lookup_host (acc_dev, h, 1);
619 if (!n)
621 gomp_mutex_unlock (&acc_dev->lock);
622 gomp_fatal ("%p is not a mapped block", (void *)h);
625 gomp_debug (0, " %s: restore mappings\n", __FUNCTION__);
627 t = n->tgt;
629 struct target_mem_desc *tp;
631 if (t->refcount == minrefs)
633 /* This is the last reference, so pull the descriptor off the
634 chain. This avoids gomp_unmap_vars via gomp_unmap_tgt from
635 freeing the device memory. */
636 t->tgt_end = 0;
637 t->to_free = 0;
639 for (tp = NULL, t = acc_dev->openacc.data_environ; t != NULL;
640 tp = t, t = t->prev)
642 if (n->tgt == t)
644 if (tp)
645 tp->prev = t->prev;
646 else
647 acc_dev->openacc.data_environ = t->prev;
648 break;
653 if (force_copyfrom)
654 t->list[0]->copy_from = 1;
656 gomp_mutex_unlock (&acc_dev->lock);
658 /* If running synchronously, unmap immediately. */
659 if (async < acc_async_noval)
660 gomp_unmap_vars (t, true);
661 else
663 gomp_copy_from_async (t);
664 acc_dev->openacc.register_async_cleanup_func (t);
667 gomp_debug (0, " %s: mappings restored\n", __FUNCTION__);