Update Red Hat Copyright Notices
[nbdkit.git] / common / allocators / malloc.c
blob20772a564b75757d3c8d970f1a451e47c50f7669
1 /* nbdkit
2 * Copyright Red Hat
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
6 * met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
11 * * Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * * Neither the name of Red Hat nor the names of its contributors may be
16 * used to endorse or promote products derived from this software without
17 * specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY RED HAT AND CONTRIBUTORS ''AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
22 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL RED HAT OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
26 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
27 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
33 #include <config.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <stdbool.h>
38 #include <string.h>
40 #ifdef HAVE_SYS_MMAN_H
41 #include <sys/mman.h>
42 #endif
44 #include <pthread.h>
46 #include <nbdkit-plugin.h>
48 #include "cleanup.h"
49 #include "vector.h"
51 #include "allocator.h"
52 #include "allocator-internal.h"
54 /* This allocator implements a direct-mapped non-sparse RAM disk using
55 * malloc, with optional mlock.
58 DEFINE_VECTOR_TYPE (bytearray, uint8_t);
60 struct m_alloc {
61 struct allocator a; /* Must come first. */
62 bool use_mlock;
64 /* Byte array (vector) implementing the direct-mapped disk. Note we
65 * don't use the .size field. Accesses must be protected by the
66 * lock since writes may try to extend the array.
68 pthread_rwlock_t lock;
69 bytearray ba;
72 static void
73 m_alloc_free (struct allocator *a)
75 struct m_alloc *ma = (struct m_alloc *) a;
77 if (ma) {
78 free (ma->ba.ptr);
79 pthread_rwlock_destroy (&ma->lock);
80 free (ma);
84 /* Extend the underlying bytearray if needed. */
85 static int
86 extend_without_mlock (struct m_alloc *ma, uint64_t new_size)
88 size_t old_size, n;
90 if (ma->ba.cap < new_size) {
91 old_size = ma->ba.cap;
92 n = new_size - ma->ba.cap;
94 if (bytearray_reserve (&ma->ba, n) == -1) {
95 nbdkit_error ("realloc: %m");
96 return -1;
99 /* Initialize the newly allocated memory to 0. */
100 memset (ma->ba.ptr + old_size, 0, n);
103 return 0;
106 #ifdef HAVE_MLOCK
107 static int
108 extend_with_mlock (struct m_alloc *ma, uint64_t new_size)
110 size_t old_size, n;
112 if (ma->ba.cap < new_size) {
113 old_size = ma->ba.cap;
114 n = new_size - ma->ba.cap;
116 #ifdef HAVE_MUNLOCK
117 /* Since the memory might be moved by realloc, we must unlock the
118 * original array.
120 if (ma->use_mlock && ma->ba.ptr != NULL)
121 munlock (ma->ba.ptr, ma->ba.cap);
122 #endif
124 if (bytearray_reserve_page_aligned (&ma->ba, n) == -1) {
125 nbdkit_error ("realloc: %m");
126 return -1;
129 /* Initialize the newly allocated memory to 0. */
130 memset (ma->ba.ptr + old_size, 0, n);
132 if (mlock (ma->ba.ptr, ma->ba.cap) == -1) {
133 nbdkit_error ("allocator=malloc: mlock: %m");
134 return -1;
138 return 0;
140 #endif /* HAVE_MLOCK */
142 static int
143 extend (struct m_alloc *ma, uint64_t new_size)
145 ACQUIRE_WRLOCK_FOR_CURRENT_SCOPE (&ma->lock);
147 #ifdef HAVE_MLOCK
148 if (ma->use_mlock)
149 return extend_with_mlock (ma, new_size);
150 #endif
152 return extend_without_mlock (ma, new_size);
155 static int
156 m_alloc_set_size_hint (struct allocator *a, uint64_t size_hint)
158 struct m_alloc *ma = (struct m_alloc *) a;
159 return extend (ma, size_hint);
162 static int
163 m_alloc_read (struct allocator *a, void *buf,
164 uint64_t count, uint64_t offset)
166 struct m_alloc *ma = (struct m_alloc *) a;
167 ACQUIRE_RDLOCK_FOR_CURRENT_SCOPE (&ma->lock);
169 /* Avoid reading beyond the end of the allocated array. Return
170 * zeroes for that part.
172 if (offset >= ma->ba.cap)
173 memset (buf, 0, count);
174 else if (offset + count > ma->ba.cap) {
175 memcpy (buf, ma->ba.ptr + offset, ma->ba.cap - offset);
176 memset (buf + ma->ba.cap - offset, 0, offset + count - ma->ba.cap);
178 else
179 memcpy (buf, ma->ba.ptr + offset, count);
181 return 0;
184 static int
185 m_alloc_write (struct allocator *a, const void *buf,
186 uint64_t count, uint64_t offset)
188 struct m_alloc *ma = (struct m_alloc *) a;
190 if (extend (ma, offset+count) == -1)
191 return -1;
193 /* This is correct: Even though we are writing, we only need to
194 * acquire the read lock here. The write lock applies to changing
195 * the metadata and it was acquired if we called extend().
197 ACQUIRE_RDLOCK_FOR_CURRENT_SCOPE (&ma->lock);
198 memcpy (ma->ba.ptr + offset, buf, count);
199 return 0;
202 static int
203 m_alloc_fill (struct allocator *a, char c, uint64_t count, uint64_t offset)
205 struct m_alloc *ma = (struct m_alloc *) a;
207 if (extend (ma, offset+count) == -1)
208 return -1;
210 /* See comment in m_alloc_write. */
211 ACQUIRE_RDLOCK_FOR_CURRENT_SCOPE (&ma->lock);
212 memset (ma->ba.ptr + offset, c, count);
213 return 0;
216 static int
217 m_alloc_zero (struct allocator *a, uint64_t count, uint64_t offset)
219 struct m_alloc *ma = (struct m_alloc *) a;
220 ACQUIRE_RDLOCK_FOR_CURRENT_SCOPE (&ma->lock);
222 /* Try to avoid extending the array, since the unallocated part
223 * always reads as zero.
225 if (offset < ma->ba.cap) {
226 if (offset + count > ma->ba.cap)
227 memset (ma->ba.ptr + offset, 0, ma->ba.cap - offset);
228 else
229 memset (ma->ba.ptr + offset, 0, count);
232 return 0;
235 static int
236 m_alloc_blit (struct allocator *a1, struct allocator *a2,
237 uint64_t count, uint64_t offset1, uint64_t offset2)
239 struct m_alloc *ma2 = (struct m_alloc *) a2;
241 assert (a1 != a2);
242 assert (strcmp (a2->f->type, "malloc") == 0);
244 if (extend (ma2, offset2+count) == -1)
245 return -1;
247 /* See comment in m_alloc_write. */
248 ACQUIRE_RDLOCK_FOR_CURRENT_SCOPE (&ma2->lock);
249 return a1->f->read (a1, ma2->ba.ptr + offset2, count, offset1);
252 static int
253 m_alloc_extents (struct allocator *a,
254 uint64_t count, uint64_t offset,
255 struct nbdkit_extents *extents)
257 /* Always fully allocated. XXX In theory we could detect zeroes
258 * quite quickly and return that information, allowing the client to
259 * avoid reads. However we'd probably want to store a bitmap of
260 * which sectors we are known to have written to, and that
261 * complicates the implementation quite a lot.
263 return nbdkit_add_extent (extents, offset, count, 0);
266 struct allocator *
267 m_alloc_create (const void *paramsv)
269 const allocator_parameters *params = paramsv;
270 struct m_alloc *ma;
271 bool use_mlock = false;
272 size_t i;
274 /* Parse the optional mlock=true|false parameter. */
275 for (i = 0; i < params->len; ++i) {
276 if (strcmp (params->ptr[i].key, "mlock") == 0) {
277 int r = nbdkit_parse_bool (params->ptr[i].value);
278 if (r == -1) return NULL;
279 use_mlock = r;
280 #ifndef HAVE_MLOCK
281 if (use_mlock) {
282 nbdkit_error ("mlock is not supported on this platform");
283 return NULL;
285 #endif
287 else {
288 nbdkit_error ("allocator=malloc: unknown parameter %s",
289 params->ptr[i].key);
290 return NULL;
294 ma = calloc (1, sizeof *ma);
295 if (ma == NULL) {
296 nbdkit_error ("calloc: %m");
297 return NULL;
299 ma->use_mlock = use_mlock;
300 pthread_rwlock_init (&ma->lock, NULL);
301 ma->ba = (bytearray) empty_vector;
302 return (struct allocator *) ma;
305 static struct allocator_functions functions = {
306 .type = "malloc",
307 .create = m_alloc_create,
308 .free = m_alloc_free,
309 .set_size_hint = m_alloc_set_size_hint,
310 .read = m_alloc_read,
311 .write = m_alloc_write,
312 .fill = m_alloc_fill,
313 .zero = m_alloc_zero,
314 .blit = m_alloc_blit,
315 .extents = m_alloc_extents,
318 static void register_malloc (void) __attribute__ ((constructor));
320 static void
321 register_malloc (void)
323 register_allocator (&functions);