Send MIDI control change events
[jack_mixer.git] / memory_atomic.c
blob4aecddd435e7f50dbb09e9b868054c8cf239616e
1 /* -*- Mode: C ; c-basic-offset: 2 -*- */
2 /*****************************************************************************
4 * Non-sleeping memory allocation
6 * Copyright (C) 2006,2007 Nedko Arnaudov <nedko@arnaudov.name>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
21 *****************************************************************************/
23 #include <stdlib.h>
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <pthread.h>
28 #include "memory_atomic.h"
29 #include "list.h"
30 #include "log.h"
32 struct rtsafe_memory_pool
34 size_t data_size;
35 size_t min_preallocated;
36 size_t max_preallocated;
38 unsigned int used_count;
39 struct list_head unused;
40 unsigned int unused_count;
42 bool enforce_thread_safety;
43 /* next members are initialized/used only if enforce_thread_safety is true */
44 pthread_mutex_t mutex;
45 unsigned int unused_count2;
46 struct list_head pending;
49 #define RTSAFE_GROUPS_PREALLOCATE 1024
51 bool
52 rtsafe_memory_pool_create(
53 size_t data_size,
54 size_t min_preallocated,
55 size_t max_preallocated,
56 bool enforce_thread_safety,
57 rtsafe_memory_pool_handle * pool_handle_ptr)
59 int ret;
60 struct rtsafe_memory_pool * pool_ptr;
62 assert(min_preallocated <= max_preallocated);
64 pool_ptr = malloc(sizeof(struct rtsafe_memory_pool));
65 if (pool_ptr == NULL)
67 return false;
70 pool_ptr->data_size = data_size;
71 pool_ptr->min_preallocated = min_preallocated;
72 pool_ptr->max_preallocated = max_preallocated;
74 pool_ptr->used_count = 0;
76 INIT_LIST_HEAD(&pool_ptr->unused);
77 pool_ptr->unused_count = 0;
79 pool_ptr->enforce_thread_safety = enforce_thread_safety;
80 if (enforce_thread_safety)
82 ret = pthread_mutex_init(&pool_ptr->mutex, NULL);
83 if (ret != 0)
85 free(pool_ptr);
86 return false;
89 INIT_LIST_HEAD(&pool_ptr->pending);
90 pool_ptr->unused_count2 = 0;
93 rtsafe_memory_pool_sleepy((rtsafe_memory_pool_handle)pool_ptr);
94 *pool_handle_ptr = pool_ptr;
96 return true;
99 #define pool_ptr ((struct rtsafe_memory_pool *)pool_handle)
101 void
102 rtsafe_memory_pool_destroy(
103 rtsafe_memory_pool_handle pool_handle)
105 int ret;
106 struct list_head * node_ptr;
108 /* caller should deallocate all chunks prior releasing pool itself */
109 assert(pool_ptr->used_count == 0);
111 while (pool_ptr->unused_count != 0)
113 assert(!list_empty(&pool_ptr->unused));
115 node_ptr = pool_ptr->unused.next;
117 list_del(node_ptr);
118 pool_ptr->unused_count--;
120 free(node_ptr);
123 assert(list_empty(&pool_ptr->unused));
125 if (pool_ptr->enforce_thread_safety)
127 while (!list_empty(&pool_ptr->pending))
129 node_ptr = pool_ptr->pending.next;
131 list_del(node_ptr);
133 free(node_ptr);
136 ret = pthread_mutex_destroy(&pool_ptr->mutex);
137 assert(ret == 0);
140 free(pool_ptr);
143 /* adjust unused list size */
144 void
145 rtsafe_memory_pool_sleepy(
146 rtsafe_memory_pool_handle pool_handle)
148 struct list_head * node_ptr;
149 unsigned int count;
151 if (pool_ptr->enforce_thread_safety)
153 pthread_mutex_lock(&pool_ptr->mutex);
155 count = pool_ptr->unused_count2;
157 assert(pool_ptr->min_preallocated < pool_ptr->max_preallocated);
159 while (count < pool_ptr->min_preallocated)
161 node_ptr = malloc(sizeof(struct list_head) + pool_ptr->data_size);
162 if (node_ptr == NULL)
164 break;
167 list_add_tail(node_ptr, &pool_ptr->pending);
169 count++;
172 while (count > pool_ptr->max_preallocated && !list_empty(&pool_ptr->pending))
174 node_ptr = pool_ptr->pending.next;
176 list_del(node_ptr);
178 free(node_ptr);
180 count--;
183 pthread_mutex_unlock(&pool_ptr->mutex);
185 else
187 while (pool_ptr->unused_count < pool_ptr->min_preallocated)
189 node_ptr = malloc(sizeof(struct list_head) + pool_ptr->data_size);
190 if (node_ptr == NULL)
192 return;
195 list_add_tail(node_ptr, &pool_ptr->unused);
196 pool_ptr->unused_count++;
199 while (pool_ptr->unused_count > pool_ptr->max_preallocated)
201 assert(!list_empty(&pool_ptr->unused));
203 node_ptr = pool_ptr->unused.next;
205 list_del(node_ptr);
206 pool_ptr->unused_count--;
208 free(node_ptr);
213 /* find entry in unused list, fail if it is empty */
214 void *
215 rtsafe_memory_pool_allocate(
216 rtsafe_memory_pool_handle pool_handle)
218 struct list_head * node_ptr;
220 if (list_empty(&pool_ptr->unused))
222 return NULL;
225 node_ptr = pool_ptr->unused.next;
226 list_del(node_ptr);
227 pool_ptr->unused_count--;
228 pool_ptr->used_count++;
230 if (pool_ptr->enforce_thread_safety &&
231 pthread_mutex_trylock(&pool_ptr->mutex) == 0)
233 while (pool_ptr->unused_count < pool_ptr->min_preallocated && !list_empty(&pool_ptr->pending))
235 node_ptr = pool_ptr->pending.next;
237 list_del(node_ptr);
238 list_add_tail(node_ptr, &pool_ptr->unused);
239 pool_ptr->unused_count++;
242 pool_ptr->unused_count2 = pool_ptr->unused_count;
244 pthread_mutex_unlock(&pool_ptr->mutex);
247 return (node_ptr + 1);
250 /* move from used to unused list */
251 void
252 rtsafe_memory_pool_deallocate(
253 rtsafe_memory_pool_handle pool_handle,
254 void * data)
256 struct list_head * node_ptr;
258 list_add_tail((struct list_head *)data - 1, &pool_ptr->unused);
259 pool_ptr->used_count--;
260 pool_ptr->unused_count++;
262 if (pool_ptr->enforce_thread_safety &&
263 pthread_mutex_trylock(&pool_ptr->mutex) == 0)
265 while (pool_ptr->unused_count > pool_ptr->max_preallocated)
267 assert(!list_empty(&pool_ptr->unused));
269 node_ptr = pool_ptr->unused.next;
271 list_del(node_ptr);
272 list_add_tail(node_ptr, &pool_ptr->pending);
273 pool_ptr->unused_count--;
276 pool_ptr->unused_count2 = pool_ptr->unused_count;
278 pthread_mutex_unlock(&pool_ptr->mutex);
282 void *
283 rtsafe_memory_pool_allocate_sleepy(
284 rtsafe_memory_pool_handle pool_handle)
286 void * data;
290 rtsafe_memory_pool_sleepy(pool_handle);
291 data = rtsafe_memory_pool_allocate(pool_handle);
293 while (data == NULL);
295 return data;
298 /* max alloc is DATA_MIN * (2 ^ POOLS_COUNT) - DATA_SUB */
299 #define DATA_MIN 1024
300 #define DATA_SUB 100 /* alloc slightly smaller chunks in hope to not allocating additional page for control data */
302 struct rtsafe_memory_pool_generic
304 size_t size;
305 rtsafe_memory_pool_handle pool;
308 struct rtsafe_memory
310 struct rtsafe_memory_pool_generic * pools;
311 size_t pools_count;
314 bool
315 rtsafe_memory_init(
316 size_t max_size,
317 size_t prealloc_min,
318 size_t prealloc_max,
319 bool enforce_thread_safety,
320 rtsafe_memory_handle * handle_ptr)
322 size_t i;
323 size_t size;
324 struct rtsafe_memory * memory_ptr;
326 LOG_DEBUG("rtsafe_memory_init() called.");
328 memory_ptr = malloc(sizeof(struct rtsafe_memory));
329 if (memory_ptr == NULL)
331 goto fail;
334 size = DATA_MIN;
335 memory_ptr->pools_count = 1;
337 while ((size << memory_ptr->pools_count) < max_size + DATA_SUB)
339 memory_ptr->pools_count++;
341 if (memory_ptr->pools_count > sizeof(size_t) * 8)
343 assert(0); /* chances that caller really need such huge size are close to zero */
344 goto fail_free;
348 memory_ptr->pools = malloc(memory_ptr->pools_count * sizeof(struct rtsafe_memory_pool_generic));
349 if (memory_ptr->pools == NULL)
351 goto fail_free;
354 size = DATA_MIN;
356 for (i = 0 ; i < memory_ptr->pools_count ; i++)
358 memory_ptr->pools[i].size = size - DATA_SUB;
360 if (!rtsafe_memory_pool_create(
361 memory_ptr->pools[i].size,
362 prealloc_min,
363 prealloc_max,
364 enforce_thread_safety,
365 &memory_ptr->pools[i].pool))
367 while (i > 0)
369 i--;
370 rtsafe_memory_pool_destroy(memory_ptr->pools[i].pool);
373 goto fail_free_pools;
376 size = size << 1;
379 *handle_ptr = (rtsafe_memory_handle)memory_ptr;
381 return true;
383 fail_free_pools:
384 free(memory_ptr->pools);
386 fail_free:
387 free(memory_ptr);
389 fail:
390 return false;
393 #define memory_ptr ((struct rtsafe_memory *)handle_ptr)
394 void
395 rtsafe_memory_uninit(
396 rtsafe_memory_handle handle_ptr)
398 unsigned int i;
400 LOG_DEBUG("rtsafe_memory_uninit() called.");
402 for (i = 0 ; i < memory_ptr->pools_count ; i++)
404 LOG_DEBUG("Destroying pool for size %u", (unsigned int)memory_ptr->pools[i].size);
405 rtsafe_memory_pool_destroy(memory_ptr->pools[i].pool);
408 free(memory_ptr->pools);
410 free(memory_ptr);
413 void *
414 rtsafe_memory_allocate(
415 rtsafe_memory_handle handle_ptr,
416 size_t size)
418 rtsafe_memory_pool_handle * data_ptr;
419 size_t i;
421 LOG_DEBUG("rtsafe_memory_allocate() called.");
423 /* pool handle is stored just before user data to ease deallocation */
424 size += sizeof(rtsafe_memory_pool_handle);
426 for (i = 0 ; i < memory_ptr->pools_count ; i++)
428 if (size <= memory_ptr->pools[i].size)
430 LOG_DEBUG("Using chunk with size %u.", (unsigned int)memory_ptr->pools[i].size);
431 data_ptr = rtsafe_memory_pool_allocate(memory_ptr->pools[i].pool);
432 if (data_ptr == NULL)
434 LOG_DEBUG("rtsafe_memory_pool_allocate() failed.");
435 return NULL;
438 *data_ptr = memory_ptr->pools[i].pool;
440 LOG_DEBUG("rtsafe_memory_allocate() returning %p", (data_ptr + 1));
441 return (data_ptr + 1);
445 /* data size too big, increase POOLS_COUNT */
446 LOG_WARNING("Data size is too big");
447 return NULL;
450 void
451 rtsafe_memory_sleepy(
452 rtsafe_memory_handle handle_ptr)
454 unsigned int i;
456 for (i = 0 ; i < memory_ptr->pools_count ; i++)
458 rtsafe_memory_pool_sleepy(memory_ptr->pools[i].pool);
462 void
463 rtsafe_memory_deallocate(
464 void * data)
466 LOG_DEBUG("rtsafe_memory_deallocate(%p) called.", data);
467 rtsafe_memory_pool_deallocate(
468 *((rtsafe_memory_pool_handle *)data -1),
469 (rtsafe_memory_pool_handle *)data - 1);