1 /* Repeating a memory blob, with alias mapping optimization.
2 Copyright (C) 2018-2023 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
24 #include <support/blob_repeat.h>
25 #include <support/check.h>
26 #include <support/test-driver.h>
27 #include <support/support.h>
28 #include <support/xunistd.h>
33 /* Small allocations should use malloc directly instead of the mmap
34 optimization because mappings carry a lot of overhead. */
35 static const size_t maximum_small_size
= 4 * 1024 * 1024;
37 /* Internal helper for fill. */
39 fill0 (char *target
, const char *element
, size_t element_size
,
44 memcpy (target
, element
, element_size
);
45 target
+= element_size
;
50 /* Fill the buffer at TARGET with COUNT copies of the ELEMENT_SIZE
51 bytes starting at ELEMENT. */
53 fill (char *target
, const char *element
, size_t element_size
,
56 if (element_size
== 0 || count
== 0)
58 else if (element_size
== 1)
59 memset (target
, element
[0], count
);
60 else if (element_size
== sizeof (wchar_t))
63 memcpy (&wc
, element
, sizeof (wc
));
64 wmemset ((wchar_t *) target
, wc
, count
);
66 else if (element_size
< 1024 && count
> 4096)
68 /* Use larger copies for really small element sizes. */
70 size_t buffer_count
= sizeof (buffer
) / element_size
;
71 fill0 (buffer
, element
, element_size
, buffer_count
);
74 size_t copy_count
= buffer_count
;
75 if (copy_count
> count
)
77 size_t copy_bytes
= copy_count
* element_size
;
78 memcpy (target
, buffer
, copy_bytes
);
84 fill0 (target
, element
, element_size
, count
);
87 /* Use malloc instead of mmap for small allocations and unusual size
89 static struct support_blob_repeat
90 allocate_malloc (size_t total_size
, const void *element
, size_t element_size
,
93 void *buffer
= malloc (total_size
);
95 return (struct support_blob_repeat
) { 0 };
96 fill (buffer
, element
, element_size
, count
);
97 return (struct support_blob_repeat
)
105 /* Return the least common multiple of PAGE_SIZE and ELEMENT_SIZE,
106 avoiding overflow. This assumes that PAGE_SIZE is a power of
109 minimum_stride_size (size_t page_size
, size_t element_size
)
111 TEST_VERIFY_EXIT (page_size
> 0);
112 TEST_VERIFY_EXIT (element_size
> 0);
114 /* Compute the number of trailing zeros common to both sizes. */
115 unsigned int common_zeros
= __builtin_ctzll (page_size
| element_size
);
117 /* In the product, this power of two appears twice, but in the least
118 common multiple, it appears only once. Therefore, shift one
121 if (__builtin_mul_overflow (page_size
>> common_zeros
, element_size
,
127 /* Allocations larger than maximum_small_size potentially use mmap
128 with alias mappings. If SHARED, the alias mappings are created
129 using MAP_SHARED instead of MAP_PRIVATE. */
130 static struct support_blob_repeat
131 allocate_big (size_t total_size
, const void *element
, size_t element_size
,
132 size_t count
, bool shared
)
134 unsigned long page_size
= xsysconf (_SC_PAGESIZE
);
135 size_t stride_size
= minimum_stride_size (page_size
, element_size
);
136 if (stride_size
== 0)
139 return (struct support_blob_repeat
) { 0 };
142 /* Ensure that the stride size is at least maximum_small_size. This
143 is necessary to reduce the number of distinct mappings. */
144 if (stride_size
< maximum_small_size
)
146 = ((maximum_small_size
+ stride_size
- 1) / stride_size
) * stride_size
;
148 if (stride_size
> total_size
)
149 /* The mmap optimization would not save anything. */
150 return allocate_malloc (total_size
, element
, element_size
, count
);
152 /* Reserve the memory region. If we cannot create the mapping,
153 there is no reason to set up the backing file. */
154 void *target
= mmap (NULL
, total_size
, PROT_NONE
,
155 MAP_ANONYMOUS
| MAP_PRIVATE
, -1, 0);
156 if (target
== MAP_FAILED
)
157 return (struct support_blob_repeat
) { 0 };
159 /* Create the backing file for the repeated mapping. Call mkstemp
160 directly to remove the resources backing the temporary file
161 immediately, once support_blob_repeat_free is called. Using
162 create_temp_file would result in a warning during post-test
166 char *temppath
= xasprintf ("%s/support_blob_repeat-XXXXXX", test_dir
);
167 fd
= mkstemp (temppath
);
169 FAIL_EXIT1 ("mkstemp (\"%s\"): %m", temppath
);
174 /* Make sure that there is backing storage, so that the fill
175 operation will not fault. */
176 if (posix_fallocate (fd
, 0, stride_size
) != 0)
177 FAIL_EXIT1 ("posix_fallocate (%zu): %m", stride_size
);
179 /* The stride size must still be a multiple of the page size and
181 TEST_VERIFY_EXIT ((stride_size
% page_size
) == 0);
182 TEST_VERIFY_EXIT ((stride_size
% element_size
) == 0);
184 /* Fill the backing store. */
186 void *ptr
= mmap (target
, stride_size
, PROT_READ
| PROT_WRITE
,
187 MAP_FIXED
| MAP_FILE
| MAP_SHARED
, fd
, 0);
188 if (ptr
== MAP_FAILED
)
190 int saved_errno
= errno
;
191 xmunmap (target
, total_size
);
194 return (struct support_blob_repeat
) { 0 };
197 FAIL_EXIT1 ("mapping of %zu bytes moved from %p to %p",
198 stride_size
, target
, ptr
);
200 /* Write the repeating data. */
201 fill (target
, element
, element_size
, stride_size
/ element_size
);
203 /* Return to a PROT_NONE mapping, just to be on the safe side. */
204 ptr
= mmap (target
, stride_size
, PROT_NONE
,
205 MAP_FIXED
| MAP_ANONYMOUS
| MAP_PRIVATE
, -1, 0);
206 if (ptr
== MAP_FAILED
)
207 FAIL_EXIT1 ("Failed to reinstate PROT_NONE mapping: %m");
209 FAIL_EXIT1 ("PROT_NONE mapping of %zu bytes moved from %p to %p",
210 stride_size
, target
, ptr
);
213 /* Create the alias mappings. */
215 size_t remaining_size
= total_size
;
216 char *current
= target
;
217 int flags
= MAP_FIXED
| MAP_FILE
;
221 flags
|= MAP_PRIVATE
;
223 flags
|= MAP_NORESERVE
;
225 while (remaining_size
> 0)
227 size_t to_map
= stride_size
;
228 if (to_map
> remaining_size
)
229 to_map
= remaining_size
;
230 void *ptr
= mmap (current
, to_map
, PROT_READ
| PROT_WRITE
,
232 if (ptr
== MAP_FAILED
)
234 int saved_errno
= errno
;
235 xmunmap (target
, total_size
);
238 return (struct support_blob_repeat
) { 0 };
241 FAIL_EXIT1 ("MAP_PRIVATE mapping of %zu bytes moved from %p to %p",
242 to_map
, target
, ptr
);
243 remaining_size
-= to_map
;
250 return (struct support_blob_repeat
)
258 struct support_blob_repeat
259 repeat_allocate (const void *element
, size_t element_size
,
260 size_t count
, bool shared
)
263 if (__builtin_mul_overflow (element_size
, count
, &total_size
))
266 return (struct support_blob_repeat
) { 0 };
268 if (total_size
<= maximum_small_size
)
269 return allocate_malloc (total_size
, element
, element_size
, count
);
271 return allocate_big (total_size
, element
, element_size
, count
, shared
);
274 struct support_blob_repeat
275 support_blob_repeat_allocate (const void *element
, size_t element_size
,
278 return repeat_allocate (element
, element_size
, count
, false);
281 struct support_blob_repeat
282 support_blob_repeat_allocate_shared (const void *element
, size_t element_size
,
285 return repeat_allocate (element
, element_size
, count
, true);
289 support_blob_repeat_free (struct support_blob_repeat
*blob
)
293 int saved_errno
= errno
;
294 if (blob
->use_malloc
)
297 xmunmap (blob
->start
, blob
->size
);
300 *blob
= (struct support_blob_repeat
) { 0 };