4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
11 * * Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * * Neither the name of Red Hat nor the names of its contributors may be
16 * used to endorse or promote products derived from this software without
17 * specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY RED HAT AND CONTRIBUTORS ''AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
22 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL RED HAT OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
26 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
27 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 #ifdef HAVE_SYS_MMAN_H
46 #include <nbdkit-plugin.h>
51 #include "allocator.h"
52 #include "allocator-internal.h"
54 /* This allocator implements a direct-mapped non-sparse RAM disk using
55 * malloc, with optional mlock.
58 DEFINE_VECTOR_TYPE (bytearray
, uint8_t);
61 struct allocator a
; /* Must come first. */
64 /* Byte array (vector) implementing the direct-mapped disk. Note we
65 * don't use the .size field. Accesses must be protected by the
66 * lock since writes may try to extend the array.
68 pthread_rwlock_t lock
;
73 m_alloc_free (struct allocator
*a
)
75 struct m_alloc
*ma
= (struct m_alloc
*) a
;
79 pthread_rwlock_destroy (&ma
->lock
);
84 /* Extend the underlying bytearray if needed. */
86 extend_without_mlock (struct m_alloc
*ma
, uint64_t new_size
)
90 if (ma
->ba
.cap
< new_size
) {
91 old_size
= ma
->ba
.cap
;
92 n
= new_size
- ma
->ba
.cap
;
94 if (bytearray_reserve (&ma
->ba
, n
) == -1) {
95 nbdkit_error ("realloc: %m");
99 /* Initialize the newly allocated memory to 0. */
100 memset (ma
->ba
.ptr
+ old_size
, 0, n
);
108 extend_with_mlock (struct m_alloc
*ma
, uint64_t new_size
)
112 if (ma
->ba
.cap
< new_size
) {
113 old_size
= ma
->ba
.cap
;
114 n
= new_size
- ma
->ba
.cap
;
117 /* Since the memory might be moved by realloc, we must unlock the
120 if (ma
->use_mlock
&& ma
->ba
.ptr
!= NULL
)
121 munlock (ma
->ba
.ptr
, ma
->ba
.cap
);
124 if (bytearray_reserve_page_aligned (&ma
->ba
, n
) == -1) {
125 nbdkit_error ("realloc: %m");
129 /* Initialize the newly allocated memory to 0. */
130 memset (ma
->ba
.ptr
+ old_size
, 0, n
);
132 if (mlock (ma
->ba
.ptr
, ma
->ba
.cap
) == -1) {
133 nbdkit_error ("allocator=malloc: mlock: %m");
140 #endif /* HAVE_MLOCK */
143 extend (struct m_alloc
*ma
, uint64_t new_size
)
145 ACQUIRE_WRLOCK_FOR_CURRENT_SCOPE (&ma
->lock
);
149 return extend_with_mlock (ma
, new_size
);
152 return extend_without_mlock (ma
, new_size
);
156 m_alloc_set_size_hint (struct allocator
*a
, uint64_t size_hint
)
158 struct m_alloc
*ma
= (struct m_alloc
*) a
;
159 return extend (ma
, size_hint
);
163 m_alloc_read (struct allocator
*a
, void *buf
,
164 uint64_t count
, uint64_t offset
)
166 struct m_alloc
*ma
= (struct m_alloc
*) a
;
167 ACQUIRE_RDLOCK_FOR_CURRENT_SCOPE (&ma
->lock
);
169 /* Avoid reading beyond the end of the allocated array. Return
170 * zeroes for that part.
172 if (offset
>= ma
->ba
.cap
)
173 memset (buf
, 0, count
);
174 else if (offset
+ count
> ma
->ba
.cap
) {
175 memcpy (buf
, ma
->ba
.ptr
+ offset
, ma
->ba
.cap
- offset
);
176 memset (buf
+ ma
->ba
.cap
- offset
, 0, offset
+ count
- ma
->ba
.cap
);
179 memcpy (buf
, ma
->ba
.ptr
+ offset
, count
);
185 m_alloc_write (struct allocator
*a
, const void *buf
,
186 uint64_t count
, uint64_t offset
)
188 struct m_alloc
*ma
= (struct m_alloc
*) a
;
190 if (extend (ma
, offset
+count
) == -1)
193 /* This is correct: Even though we are writing, we only need to
194 * acquire the read lock here. The write lock applies to changing
195 * the metadata and it was acquired if we called extend().
197 ACQUIRE_RDLOCK_FOR_CURRENT_SCOPE (&ma
->lock
);
198 memcpy (ma
->ba
.ptr
+ offset
, buf
, count
);
203 m_alloc_fill (struct allocator
*a
, char c
, uint64_t count
, uint64_t offset
)
205 struct m_alloc
*ma
= (struct m_alloc
*) a
;
207 if (extend (ma
, offset
+count
) == -1)
210 /* See comment in m_alloc_write. */
211 ACQUIRE_RDLOCK_FOR_CURRENT_SCOPE (&ma
->lock
);
212 memset (ma
->ba
.ptr
+ offset
, c
, count
);
217 m_alloc_zero (struct allocator
*a
, uint64_t count
, uint64_t offset
)
219 struct m_alloc
*ma
= (struct m_alloc
*) a
;
220 ACQUIRE_RDLOCK_FOR_CURRENT_SCOPE (&ma
->lock
);
222 /* Try to avoid extending the array, since the unallocated part
223 * always reads as zero.
225 if (offset
< ma
->ba
.cap
) {
226 if (offset
+ count
> ma
->ba
.cap
)
227 memset (ma
->ba
.ptr
+ offset
, 0, ma
->ba
.cap
- offset
);
229 memset (ma
->ba
.ptr
+ offset
, 0, count
);
236 m_alloc_blit (struct allocator
*a1
, struct allocator
*a2
,
237 uint64_t count
, uint64_t offset1
, uint64_t offset2
)
239 struct m_alloc
*ma2
= (struct m_alloc
*) a2
;
242 assert (strcmp (a2
->f
->type
, "malloc") == 0);
244 if (extend (ma2
, offset2
+count
) == -1)
247 /* See comment in m_alloc_write. */
248 ACQUIRE_RDLOCK_FOR_CURRENT_SCOPE (&ma2
->lock
);
249 return a1
->f
->read (a1
, ma2
->ba
.ptr
+ offset2
, count
, offset1
);
253 m_alloc_extents (struct allocator
*a
,
254 uint64_t count
, uint64_t offset
,
255 struct nbdkit_extents
*extents
)
257 /* Always fully allocated. XXX In theory we could detect zeroes
258 * quite quickly and return that information, allowing the client to
259 * avoid reads. However we'd probably want to store a bitmap of
260 * which sectors we are known to have written to, and that
261 * complicates the implementation quite a lot.
263 return nbdkit_add_extent (extents
, offset
, count
, 0);
267 m_alloc_create (const void *paramsv
)
269 const allocator_parameters
*params
= paramsv
;
271 bool use_mlock
= false;
274 /* Parse the optional mlock=true|false parameter. */
275 for (i
= 0; i
< params
->len
; ++i
) {
276 if (strcmp (params
->ptr
[i
].key
, "mlock") == 0) {
277 int r
= nbdkit_parse_bool (params
->ptr
[i
].value
);
278 if (r
== -1) return NULL
;
282 nbdkit_error ("mlock is not supported on this platform");
288 nbdkit_error ("allocator=malloc: unknown parameter %s",
294 ma
= calloc (1, sizeof *ma
);
296 nbdkit_error ("calloc: %m");
299 ma
->use_mlock
= use_mlock
;
300 pthread_rwlock_init (&ma
->lock
, NULL
);
301 ma
->ba
= (bytearray
) empty_vector
;
302 return (struct allocator
*) ma
;
305 static struct allocator_functions functions
= {
307 .create
= m_alloc_create
,
308 .free
= m_alloc_free
,
309 .set_size_hint
= m_alloc_set_size_hint
,
310 .read
= m_alloc_read
,
311 .write
= m_alloc_write
,
312 .fill
= m_alloc_fill
,
313 .zero
= m_alloc_zero
,
314 .blit
= m_alloc_blit
,
315 .extents
= m_alloc_extents
,
318 static void register_malloc (void) __attribute__ ((constructor
));
321 register_malloc (void)
323 register_allocator (&functions
);