Daily bump.
[official-gcc.git] / libmpx / mpxwrap / mpx_wrappers.c
blobaea0346b93352ed3e466bf74b797f4a31f571b8b
1 /* MPX Wrappers Library
2 Copyright (C) 2014 Free Software Foundation, Inc.
3 Contributed by Ilya Enkovich (ilya.enkovich@intel.com)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
26 #include "stdlib.h"
27 #include "string.h"
28 #include <sys/mman.h>
29 #include <stdint.h>
30 #include <assert.h>
31 #include "mpxrt/mpxrt.h"
33 /* Since internal MPX wrapper calls must avoid PLT which will clear bound
34 registers, we make them static with an external alias. */
35 #define EXTERN_ALIAS(f) \
36 __typeof (f) __##f __attribute__((alias(#f)));
38 static void *
39 mpx_wrapper_malloc (size_t size)
41 void *p = (void *)malloc (size);
42 if (!p) return __bnd_null_ptr_bounds (p);
43 return __bnd_set_ptr_bounds (p, size);
46 EXTERN_ALIAS (mpx_wrapper_malloc)
48 void *
49 __mpx_wrapper_mmap (void *addr, size_t length, int prot, int flags,
50 int fd, off_t offset)
52 void *p = mmap (addr, length, prot, flags, fd, offset);
53 if (!p) return __bnd_null_ptr_bounds (p);
54 return __bnd_set_ptr_bounds (p, length);
57 void *
58 __mpx_wrapper_realloc (void *ptr, size_t n)
60 if (!ptr)
61 return mpx_wrapper_malloc (n);
63 /* We don't kwnow how much data is copied by realloc
64 and therefore may check only lower bounds. */
65 __bnd_chk_ptr_lbounds (ptr);
66 ptr = realloc (ptr, n);
68 if (!ptr)
69 return __bnd_null_ptr_bounds (ptr);
71 return __bnd_set_ptr_bounds (ptr, n);
74 void *
75 __mpx_wrapper_calloc (size_t n_elements, size_t element_size)
77 void *p = calloc (n_elements, element_size);
78 if (!p)
79 return __bnd_null_ptr_bounds (p);
80 return __bnd_set_ptr_bounds (p, n_elements * element_size);
83 static void *
84 mpx_wrapper_memset (void *dstpp, int c, size_t len)
86 if (len > 0)
88 __bnd_chk_ptr_bounds (dstpp, len);
89 memset (dstpp, c, len);
91 return dstpp;
94 EXTERN_ALIAS (mpx_wrapper_memset)
96 void
97 __mpx_wrapper_bzero (void *dst, size_t len)
99 mpx_wrapper_memset (dst, 0, len);
102 /* The mpx_pointer type is used for getting bits
103 for bt_index (index in bounds table) and
104 bd_index (index in bounds directory). */
105 typedef union
107 struct
109 unsigned long ignored:NUM_IGN_BITS;
110 unsigned long l2entry:NUM_L2_BITS;
111 unsigned long l1index:NUM_L1_BITS;
113 void *pointer;
114 } mpx_pointer;
116 /* The mpx_bt_entry struct represents a cell in bounds table.
117 lb is the lower bound, ub is the upper bound,
118 p is the stored pointer. */
119 struct mpx_bt_entry
121 void *lb;
122 void *ub;
123 void *p;
124 void *reserved;
127 /* A special type for bd is needed because bt addresses can be modified. */
128 typedef struct mpx_bt_entry * volatile * bd_type;
130 /* Function alloc_bt is used for allocating bounds table
131 for the destination pointers if we don't have one.
132 We generate a bounds store for some pointer belonging
133 to that table and kernel allocates the table for us. */
134 static inline void __attribute__ ((bnd_legacy))
135 alloc_bt (void *ptr)
137 __asm__ __volatile__ ("bndstx %%bnd0, (%0,%0)"::"r" (ptr):"%bnd0");
140 /* get_bt returns address of bounds table that should
141 exist at BD[BD_INDEX]. If there is no address or the address is not valid,
142 we try to allocate a valid table.
143 If we succeed in getting bt, its address will be returned.
144 If we can't get a valid bt, NULL will be returned. */
145 __attribute__ ((bnd_legacy)) static inline struct mpx_bt_entry *
146 get_bt (unsigned bd_index, bd_type bd)
148 struct mpx_bt_entry *bt = (struct mpx_bt_entry *) ((uintptr_t) bd[bd_index]
149 & MPX_L2_ADDR_MASK);
150 if (!(bt) || !((uintptr_t) bd[bd_index] & MPX_L2_VALID_MASK))
152 mpx_pointer ptr;
153 ptr.l1index = bd_index;
154 /* If we don't have BT, allocate it. */
155 alloc_bt (ptr.pointer);
156 bt = (struct mpx_bt_entry *) ((uintptr_t) bd[bd_index]
157 & MPX_L2_ADDR_MASK);
158 if (!(bt) || !((uintptr_t) bd[bd_index] & MPX_L2_VALID_MASK))
159 return NULL;
161 return bt;
164 /* Function copy_if_possible moves elements from *FROM to *TO.
165 If ELEMS is less then the ELEMS_TO_COPY (elements we can copy),
166 it copies ELEMS elements and returns 0.
167 Otherwise, it copies ELEMS_TO_COPY elements and returns 1. */
168 __attribute__ ((bnd_legacy)) static inline int
169 copy_if_possible (int elems, int elems_to_copy, struct mpx_bt_entry *from,
170 struct mpx_bt_entry *to)
172 if (elems < elems_to_copy)
173 memmove (to, from, elems * sizeof (struct mpx_bt_entry));
174 else
176 memmove (to, from, elems_to_copy * sizeof (struct mpx_bt_entry));
177 return 1;
179 return 0;
182 /* Function copy_if_possible_from_end moves elements ending at *SRC_END
183 to the place where they will end at *DST_END.
184 If ELEMS is less then the ELEMS_TO_COPY (elements we can copy),
185 function copies ELEMS elements and returns 0.
186 Otherwise, it copies ELEMS_TO_COPY elements and returns 1. */
187 __attribute__ ((bnd_legacy)) static inline int
188 copy_if_possible_from_end (int elems, int elems_to_copy, struct mpx_bt_entry
189 *src_end, struct mpx_bt_entry *dst_end)
191 if (elems < elems_to_copy)
192 memmove (dst_end - elems, src_end - elems,
193 elems * sizeof (struct mpx_bt_entry));
194 else
196 memmove (dst_end - elems_to_copy,
197 src_end - elems_to_copy,
198 elems_to_copy * sizeof (struct mpx_bt_entry));
199 return 1;
201 return 0;
204 /* move_bounds function copies bounds for N bytes from bt of SRC to bt of DST.
205 It also copies bounds for all pointers inside.
206 There are 3 parts of the algorithm:
207 1) We copy everything till the end of the first bounds table of SRC
208 2) In loop we copy whole bound tables till the second-last one
209 3) Data in the last bounds table is copied separately, after the loop.
210 If one of bound tables in SRC doesn't exist,
211 we skip it because there are no pointers.
212 Depending on the arrangement of SRC and DST we copy from the beginning
213 or from the end. */
214 __attribute__ ((bnd_legacy)) static void
215 move_bounds (void *dst, const void *src, size_t n)
217 bd_type bd = (bd_type)get_bd ();
218 if (!(bd))
219 return;
221 /* We get indexes for all tables and number of elements for BT. */
222 unsigned long bt_num_of_elems = (1UL << NUM_L2_BITS);
223 mpx_pointer addr_src, addr_dst, addr_src_end, addr_dst_end;
224 addr_src.pointer = (char *) src;
225 addr_dst.pointer = (char *) dst;
226 addr_src_end.pointer = (char *) src + n - 1;
227 addr_dst_end.pointer = (char *) dst + n - 1;
228 unsigned dst_bd_index = addr_dst.l1index;
229 unsigned src_bd_index = addr_src.l1index;
230 unsigned dst_bt_index = addr_dst.l2entry;
231 unsigned src_bt_index = addr_src.l2entry;
233 unsigned dst_bd_index_end = addr_dst_end.l1index;
234 unsigned src_bd_index_end = addr_src_end.l1index;
235 unsigned dst_bt_index_end = addr_dst_end.l2entry;
236 unsigned src_bt_index_end = addr_src_end.l2entry;
238 int elems_to_copy = src_bt_index_end - src_bt_index + 1 + (src_bd_index_end
239 - src_bd_index) * bt_num_of_elems;
240 struct mpx_bt_entry *bt_src, *bt_dst;
241 uintptr_t bt_valid;
242 /* size1 and size2 will be used to find out what portions
243 can be used to copy data. */
244 int size1_elem, size2_elem, size1_bytes, size2_bytes;
246 /* Copy from the beginning. */
247 if (((char *) src - (char *) dst) > 0)
249 /* Copy everything till the end of the first bounds table (src) */
250 bt_src = (struct mpx_bt_entry *) ((uintptr_t) bd[src_bd_index]
251 & MPX_L2_ADDR_MASK);
252 bt_valid = (uintptr_t) bd[src_bd_index] & MPX_L2_VALID_MASK;
254 /* We can copy the whole preliminary piece of data. */
255 if (src_bt_index > dst_bt_index)
257 size1_elem = src_bt_index - dst_bt_index;
258 size2_elem = bt_num_of_elems - size1_elem;
259 size1_bytes = size1_elem * sizeof (struct mpx_bt_entry);
260 size2_bytes = size2_elem * sizeof (struct mpx_bt_entry);
262 /* Check we have bounds to copy. */
263 if (bt_src && bt_valid)
265 bt_dst = get_bt (dst_bd_index, bd);
266 if (!bt_dst)
267 return;
268 if (copy_if_possible (bt_num_of_elems - src_bt_index,
269 elems_to_copy, &(bt_src[src_bt_index]),
270 &(bt_dst[dst_bt_index])))
271 return;
273 elems_to_copy -= bt_num_of_elems - src_bt_index;
275 /* We have to copy preliminary data in two parts. */
276 else
278 size2_elem = dst_bt_index - src_bt_index;
279 size1_elem = bt_num_of_elems - size2_elem;
280 size1_bytes = size1_elem * sizeof (struct mpx_bt_entry);
281 size2_bytes = size2_elem * sizeof (struct mpx_bt_entry);
283 /* Check we have bounds to copy. */
284 if (bt_src && bt_valid)
286 bt_dst = get_bt (dst_bd_index, bd);
287 if (!bt_dst)
288 return;
290 if (copy_if_possible (bt_num_of_elems - dst_bt_index,
291 elems_to_copy, &(bt_src[src_bt_index]),
292 &(bt_dst[dst_bt_index])))
293 return;
294 elems_to_copy -= bt_num_of_elems - dst_bt_index;
296 dst_bd_index++;
298 bt_dst = get_bt (dst_bd_index, bd);
299 if (!bt_dst)
300 return;
301 if (copy_if_possible (size2_elem, elems_to_copy,
302 &(bt_src[size1_elem]), &(bt_dst[0])))
303 return;
304 elems_to_copy -= size2_elem;
306 else
307 elems_to_copy -= bt_num_of_elems - src_bt_index;
309 src_bd_index++;
311 /* For each bounds table check if it's valid and move it. */
312 for (; src_bd_index < src_bd_index_end; src_bd_index++)
314 bt_src = (struct mpx_bt_entry *) ((uintptr_t) bd[src_bd_index]
315 & MPX_L2_ADDR_MASK);
316 bt_valid = (uintptr_t) bd[src_bd_index] & MPX_L2_VALID_MASK;
318 /* Check we have bounds to copy. */
319 if (!bt_src || !bt_valid)
320 dst_bd_index++;
321 else
323 bt_dst = get_bt (dst_bd_index, bd);
324 if (!bt_dst)
325 return;
326 memmove (&(bt_dst[size2_elem]), &(bt_src[0]), size1_bytes);
327 dst_bd_index++;
328 bt_dst = get_bt (dst_bd_index, bd);
329 if (!bt_dst)
330 return;
331 memmove (&(bt_dst[0]), &(bt_src[size1_elem]), size2_bytes);
333 elems_to_copy -= bt_num_of_elems;
336 /* Now we have the last page that may be not full
337 we copy it separately. */
338 if (elems_to_copy > 0)
340 bt_src = (struct mpx_bt_entry *) ((uintptr_t) bd[src_bd_index]
341 & MPX_L2_ADDR_MASK);
342 bt_valid = (uintptr_t) bd[src_bd_index] & MPX_L2_VALID_MASK;
344 /* Check we have bounds to copy. */
345 if (bt_src && bt_valid)
347 bt_dst = get_bt (dst_bd_index, bd);
348 if (!bt_dst)
349 return;
351 if (copy_if_possible (size1_elem, elems_to_copy, &(bt_src[0]),
352 &(bt_dst[size2_elem])))
353 return;
355 elems_to_copy -= size1_elem;
356 dst_bd_index++;
357 bt_dst = get_bt (dst_bd_index, bd);
358 if (!bt_dst)
359 return;
360 memmove (&(bt_dst[0]), &(bt_src[size1_elem]),
361 elems_to_copy * sizeof (struct mpx_bt_entry));
366 /* Copy from the end. */
367 else
369 /* Copy everything till the end of the first bounds table (src) */
370 bt_src = (struct mpx_bt_entry *) ((uintptr_t) bd[src_bd_index_end]
371 & MPX_L2_ADDR_MASK);
372 bt_valid = (uintptr_t) bd[src_bd_index_end] & MPX_L2_VALID_MASK;
374 if (src_bt_index_end <= dst_bt_index_end)
375 /* We can copy the whole preliminary piece of data. */
377 size2_elem = dst_bt_index_end - src_bt_index_end;
378 size1_elem = bt_num_of_elems - size2_elem;
379 size1_bytes = size1_elem * sizeof (struct mpx_bt_entry);
380 size2_bytes = size2_elem * sizeof (struct mpx_bt_entry);
382 /* Check we have bounds to copy. */
383 if (bt_src && bt_valid)
385 bt_dst = get_bt (dst_bd_index_end, bd);
386 if (!bt_dst)
387 return;
389 if (copy_if_possible_from_end (src_bt_index_end + 1,
390 elems_to_copy, &(bt_src[src_bt_index_end + 1]),
391 &(bt_dst[dst_bt_index_end + 1])))
392 return;
394 elems_to_copy -= src_bt_index_end + 1;
396 /* We have to copy preliminary data in two parts. */
397 else
399 size1_elem = src_bt_index_end - dst_bt_index_end;
400 size2_elem = bt_num_of_elems - size1_elem;
401 size1_bytes = size1_elem * sizeof (struct mpx_bt_entry);
402 size2_bytes = size2_elem * sizeof (struct mpx_bt_entry);
404 /* Check we have bounds to copy. */
405 if (bt_src && bt_valid)
407 bt_dst = get_bt (dst_bd_index_end, bd);
408 if (!bt_dst)
409 return;
410 if (copy_if_possible_from_end (dst_bt_index_end + 1,
411 elems_to_copy, &(bt_src[src_bt_index_end + 1]),
412 &(bt_dst[dst_bt_index_end + 1])))
413 return;
414 elems_to_copy -= dst_bt_index_end + 1;
416 dst_bd_index_end--;
418 bt_dst = get_bt (dst_bd_index_end, bd);
419 if (!bt_dst)
420 return;
421 if (copy_if_possible_from_end (size1_elem, elems_to_copy,
422 &(bt_src[size1_elem]), &(bt_dst[bt_num_of_elems])))
423 return;
425 elems_to_copy -= size1_elem;
427 else
428 elems_to_copy -= src_bt_index_end + 1;
430 /* Go to previous table but beware of overflow.
431 We should have copied all required element
432 in case src_bd_index_end is 0. */
433 if (src_bd_index_end)
434 src_bd_index_end--;
435 else
437 assert (!elems_to_copy);
438 return;
440 /* For each bounds table we check if there are valid pointers inside.
441 If there are some, we copy table in pre-counted portions. */
442 for (; src_bd_index_end > src_bd_index; src_bd_index_end--)
444 bt_src = (struct mpx_bt_entry *) ((uintptr_t) bd[src_bd_index_end]
445 & MPX_L2_ADDR_MASK);
446 bt_valid = (uintptr_t) bd[src_bd_index_end] & MPX_L2_VALID_MASK;
447 /* Check we have bounds to copy. */
448 if (!bt_src || !bt_valid)
449 dst_bd_index_end--;
450 else
452 bt_dst = get_bt (dst_bd_index_end, bd);
453 if (!bt_dst)
454 return;
455 memmove (&(bt_dst[0]), &(bt_src[size1_elem]), size2_bytes);
456 dst_bd_index_end--;
457 bt_dst = get_bt (dst_bd_index_end, bd);
458 if (!bt_dst)
459 return;
460 memmove (&(bt_dst[size2_elem]), &(bt_src[0]), size1_bytes);
462 elems_to_copy -= bt_num_of_elems;
465 /* Now we have the last page that may be not full
466 we copy it separately. */
467 if (elems_to_copy > 0)
469 bt_src = (struct mpx_bt_entry *) ((uintptr_t) bd[src_bd_index_end]
470 & MPX_L2_ADDR_MASK);
471 bt_valid = (uintptr_t) bd[src_bd_index_end] & MPX_L2_VALID_MASK;
472 /* Check we have bounds to copy. */
473 if (bt_src && bt_valid)
475 bt_dst = get_bt (dst_bd_index_end, bd);
476 if (!bt_dst)
477 return;
478 if (copy_if_possible_from_end (size2_elem, elems_to_copy,
479 &(bt_src[bt_num_of_elems]), &(bt_dst[size2_elem])))
480 return;
482 elems_to_copy -= size2_elem;
483 dst_bd_index_end--;
484 bt_dst = get_bt (dst_bd_index_end, bd);
485 if (!bt_dst)
486 return;
487 memmove (&(bt_dst[dst_bt_index]), &(bt_src[src_bt_index]),
488 elems_to_copy * sizeof (struct mpx_bt_entry));
492 return;
495 static void *
496 mpx_wrapper_memmove (void *dst, const void *src, size_t n)
498 if (n == 0)
499 return dst;
501 __bnd_chk_ptr_bounds (dst, n);
502 __bnd_chk_ptr_bounds (src, n);
504 /* When we copy exactly one pointer it is faster to
505 just use bndldx + bndstx. */
506 if (n == sizeof (void *))
508 void *const *s = (void *const *) src;
509 void **d = (void **) dst;
510 *d = *s;
511 return dst;
514 memmove (dst, src, n);
516 /* Not necessary to copy bounds if size is less then size of pointer
517 or SRC==DST. */
518 if ((n >= sizeof (void *)) && (src != dst))
519 move_bounds (dst, src, n);
521 return dst;
524 EXTERN_ALIAS (mpx_wrapper_memmove)
526 static void *
527 mpx_wrapper_memcpy (void *dst, const void *src, size_t n)
529 return mpx_wrapper_memmove (dst, src, n);
532 EXTERN_ALIAS (mpx_wrapper_memcpy)
534 void *
535 __mpx_wrapper_mempcpy (void *dst, const void *src, size_t n)
537 return (char *)mpx_wrapper_memcpy (dst, src, n) + n;
540 char *
541 __mpx_wrapper_strncat (char *dst, const char *src, size_t n)
543 size_t dst_size = strlen (dst);
544 size_t src_size = strnlen (src, n);
546 __bnd_chk_ptr_bounds (dst, dst_size + src_size + 1);
547 if (src_size < n)
548 __bnd_chk_ptr_bounds (src, src_size + 1);
549 else
550 __bnd_chk_ptr_bounds (src, src_size);
552 strncat (dst, src, n);
554 return dst;
557 char *
558 __mpx_wrapper_strcat (char *dst, const char *src)
560 size_t dst_size = strlen (dst);
561 size_t src_size = strlen (src);
563 __bnd_chk_ptr_bounds (dst, dst_size + src_size + 1);
564 __bnd_chk_ptr_bounds (src, src_size + 1);
566 strcat (dst, src);
568 return dst;
571 char *
572 __mpx_wrapper_stpcpy (char *dst, const char *src)
574 size_t src_size = strlen (src);
576 __bnd_chk_ptr_bounds (dst, src_size + 1);
577 __bnd_chk_ptr_bounds (src, src_size + 1);
579 memcpy (dst, src, src_size + 1);
581 return dst + src_size;
584 char *
585 __mpx_wrapper_stpncpy (char *dst, const char *src, size_t n)
587 size_t src_size = strnlen (src, n);
588 char *res;
590 __bnd_chk_ptr_bounds (dst, n);
591 if (src_size < n)
593 __bnd_chk_ptr_bounds (src, src_size + 1);
594 res = dst + src_size;
596 else
598 __bnd_chk_ptr_bounds (src, src_size);
599 res = dst + n;
602 memcpy (dst, src, src_size);
603 if (n > src_size)
604 memset (dst + src_size, 0, n - src_size);
606 return res;
609 char *
610 __mpx_wrapper_strcpy (char *dst, const char *src)
612 size_t src_size = strlen (src);
614 __bnd_chk_ptr_bounds (dst, src_size + 1);
615 __bnd_chk_ptr_bounds (src, src_size + 1);
617 memcpy (dst, src, src_size + 1);
619 return dst;
622 char *
623 __mpx_wrapper_strncpy (char *dst, const char *src, size_t n)
625 size_t src_size = strnlen (src, n);
627 __bnd_chk_ptr_bounds (dst, n);
628 if (src_size < n)
629 __bnd_chk_ptr_bounds (src, src_size + 1);
630 else
631 __bnd_chk_ptr_bounds (src, src_size);
633 memcpy (dst, src, src_size);
634 if (n > src_size)
635 memset (dst + src_size, 0, n - src_size);
637 return dst;
640 size_t
641 __mpx_wrapper_strlen (const char *s)
643 size_t length = strlen (s);
644 __bnd_chk_ptr_bounds (s, length + 1);
645 return length;