Fix up according to Coding Style
[pulseaudio-mirror.git] / src / pulsecore / memblock.c
blobce8b254031c594c17cdae875168e220a678fd268
1 /***
2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details
17 You should have received a copy of the GNU Lesser General Public
18 License along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <unistd.h>
31 #include <signal.h>
32 #include <errno.h>
34 #ifdef HAVE_VALGRIND_MEMCHECK_H
35 #include <valgrind/memcheck.h>
36 #endif
38 #include <pulse/xmalloc.h>
39 #include <pulse/def.h>
41 #include <pulsecore/shm.h>
42 #include <pulsecore/log.h>
43 #include <pulsecore/hashmap.h>
44 #include <pulsecore/semaphore.h>
45 #include <pulsecore/macro.h>
46 #include <pulsecore/flist.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/memtrap.h>
50 #include "memblock.h"
52 /* We can allocate 64*1024*1024 bytes at maximum. That's 64MB. Please
53 * note that the footprint is usually much smaller, since the data is
54 * stored in SHM and our OS does not commit the memory before we use
55 * it for the first time. */
56 #define PA_MEMPOOL_SLOTS_MAX 1024
57 #define PA_MEMPOOL_SLOT_SIZE (64*1024)
59 #define PA_MEMEXPORT_SLOTS_MAX 128
61 #define PA_MEMIMPORT_SLOTS_MAX 160
62 #define PA_MEMIMPORT_SEGMENTS_MAX 16
64 struct pa_memblock {
65 PA_REFCNT_DECLARE; /* the reference counter */
66 pa_mempool *pool;
68 pa_memblock_type_t type;
70 pa_bool_t read_only:1;
71 pa_bool_t is_silence:1;
73 pa_atomic_ptr_t data;
74 size_t length;
76 pa_atomic_t n_acquired;
77 pa_atomic_t please_signal;
79 union {
80 struct {
81 /* If type == PA_MEMBLOCK_USER this points to a function for freeing this memory block */
82 pa_free_cb_t free_cb;
83 } user;
85 struct {
86 uint32_t id;
87 pa_memimport_segment *segment;
88 } imported;
89 } per_type;
92 struct pa_memimport_segment {
93 pa_memimport *import;
94 pa_shm memory;
95 pa_memtrap *trap;
96 unsigned n_blocks;
99 /* A collection of multiple segments */
100 struct pa_memimport {
101 pa_mutex *mutex;
103 pa_mempool *pool;
104 pa_hashmap *segments;
105 pa_hashmap *blocks;
107 /* Called whenever an imported memory block is no longer
108 * needed. */
109 pa_memimport_release_cb_t release_cb;
110 void *userdata;
112 PA_LLIST_FIELDS(pa_memimport);
115 struct memexport_slot {
116 PA_LLIST_FIELDS(struct memexport_slot);
117 pa_memblock *block;
120 struct pa_memexport {
121 pa_mutex *mutex;
122 pa_mempool *pool;
124 struct memexport_slot slots[PA_MEMEXPORT_SLOTS_MAX];
126 PA_LLIST_HEAD(struct memexport_slot, free_slots);
127 PA_LLIST_HEAD(struct memexport_slot, used_slots);
128 unsigned n_init;
130 /* Called whenever a client from which we imported a memory block
131 which we in turn exported to another client dies and we need to
132 revoke the memory block accordingly */
133 pa_memexport_revoke_cb_t revoke_cb;
134 void *userdata;
136 PA_LLIST_FIELDS(pa_memexport);
139 struct pa_mempool {
140 pa_semaphore *semaphore;
141 pa_mutex *mutex;
143 pa_shm memory;
144 size_t block_size;
145 unsigned n_blocks;
147 pa_atomic_t n_init;
149 PA_LLIST_HEAD(pa_memimport, imports);
150 PA_LLIST_HEAD(pa_memexport, exports);
152 /* A list of free slots that may be reused */
153 pa_flist *free_slots;
155 pa_mempool_stat stat;
158 static void segment_detach(pa_memimport_segment *seg);
160 PA_STATIC_FLIST_DECLARE(unused_memblocks, 0, pa_xfree);
162 /* No lock necessary */
163 static void stat_add(pa_memblock*b) {
164 pa_assert(b);
165 pa_assert(b->pool);
167 pa_atomic_inc(&b->pool->stat.n_allocated);
168 pa_atomic_add(&b->pool->stat.allocated_size, (int) b->length);
170 pa_atomic_inc(&b->pool->stat.n_accumulated);
171 pa_atomic_add(&b->pool->stat.accumulated_size, (int) b->length);
173 if (b->type == PA_MEMBLOCK_IMPORTED) {
174 pa_atomic_inc(&b->pool->stat.n_imported);
175 pa_atomic_add(&b->pool->stat.imported_size, (int) b->length);
178 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
179 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
182 /* No lock necessary */
183 static void stat_remove(pa_memblock *b) {
184 pa_assert(b);
185 pa_assert(b->pool);
187 pa_assert(pa_atomic_load(&b->pool->stat.n_allocated) > 0);
188 pa_assert(pa_atomic_load(&b->pool->stat.allocated_size) >= (int) b->length);
190 pa_atomic_dec(&b->pool->stat.n_allocated);
191 pa_atomic_sub(&b->pool->stat.allocated_size, (int) b->length);
193 if (b->type == PA_MEMBLOCK_IMPORTED) {
194 pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
195 pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
197 pa_atomic_dec(&b->pool->stat.n_imported);
198 pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
201 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
204 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length);
206 /* No lock necessary */
207 pa_memblock *pa_memblock_new(pa_mempool *p, size_t length) {
208 pa_memblock *b;
210 pa_assert(p);
211 pa_assert(length);
213 if (!(b = pa_memblock_new_pool(p, length)))
214 b = memblock_new_appended(p, length);
216 return b;
219 /* No lock necessary */
220 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length) {
221 pa_memblock *b;
223 pa_assert(p);
224 pa_assert(length);
226 /* If -1 is passed as length we choose the size for the caller. */
228 if (length == (size_t) -1)
229 length = p->block_size - PA_ALIGN(sizeof(pa_memblock));
231 b = pa_xmalloc(PA_ALIGN(sizeof(pa_memblock)) + length);
232 PA_REFCNT_INIT(b);
233 b->pool = p;
234 b->type = PA_MEMBLOCK_APPENDED;
235 b->read_only = b->is_silence = FALSE;
236 pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
237 b->length = length;
238 pa_atomic_store(&b->n_acquired, 0);
239 pa_atomic_store(&b->please_signal, 0);
241 stat_add(b);
242 return b;
245 /* No lock necessary */
246 static struct mempool_slot* mempool_allocate_slot(pa_mempool *p) {
247 struct mempool_slot *slot;
248 pa_assert(p);
250 if (!(slot = pa_flist_pop(p->free_slots))) {
251 int idx;
253 /* The free list was empty, we have to allocate a new entry */
255 if ((unsigned) (idx = pa_atomic_inc(&p->n_init)) >= p->n_blocks)
256 pa_atomic_dec(&p->n_init);
257 else
258 slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) idx));
260 if (!slot) {
261 if (pa_log_ratelimit(PA_LOG_DEBUG))
262 pa_log_debug("Pool full");
263 pa_atomic_inc(&p->stat.n_pool_full);
264 return NULL;
268 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
269 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
270 /* VALGRIND_MALLOCLIKE_BLOCK(slot, p->block_size, 0, 0); */
271 /* } */
272 /* #endif */
274 return slot;
277 /* No lock necessary, totally redundant anyway */
278 static inline void* mempool_slot_data(struct mempool_slot *slot) {
279 return slot;
282 /* No lock necessary */
283 static unsigned mempool_slot_idx(pa_mempool *p, void *ptr) {
284 pa_assert(p);
286 pa_assert((uint8_t*) ptr >= (uint8_t*) p->memory.ptr);
287 pa_assert((uint8_t*) ptr < (uint8_t*) p->memory.ptr + p->memory.size);
289 return (unsigned) ((size_t) ((uint8_t*) ptr - (uint8_t*) p->memory.ptr) / p->block_size);
292 /* No lock necessary */
293 static struct mempool_slot* mempool_slot_by_ptr(pa_mempool *p, void *ptr) {
294 unsigned idx;
296 if ((idx = mempool_slot_idx(p, ptr)) == (unsigned) -1)
297 return NULL;
299 return (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (idx * p->block_size));
302 /* No lock necessary */
303 pa_memblock *pa_memblock_new_pool(pa_mempool *p, size_t length) {
304 pa_memblock *b = NULL;
305 struct mempool_slot *slot;
306 static int mempool_disable = 0;
308 pa_assert(p);
309 pa_assert(length);
311 if (mempool_disable == 0)
312 mempool_disable = getenv("PULSE_MEMPOOL_DISABLE") ? 1 : -1;
314 if (mempool_disable > 0)
315 return NULL;
317 /* If -1 is passed as length we choose the size for the caller: we
318 * take the largest size that fits in one of our slots. */
320 if (length == (size_t) -1)
321 length = pa_mempool_block_size_max(p);
323 if (p->block_size >= PA_ALIGN(sizeof(pa_memblock)) + length) {
325 if (!(slot = mempool_allocate_slot(p)))
326 return NULL;
328 b = mempool_slot_data(slot);
329 b->type = PA_MEMBLOCK_POOL;
330 pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
332 } else if (p->block_size >= length) {
334 if (!(slot = mempool_allocate_slot(p)))
335 return NULL;
337 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
338 b = pa_xnew(pa_memblock, 1);
340 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
341 pa_atomic_ptr_store(&b->data, mempool_slot_data(slot));
343 } else {
344 pa_log_debug("Memory block too large for pool: %lu > %lu", (unsigned long) length, (unsigned long) p->block_size);
345 pa_atomic_inc(&p->stat.n_too_large_for_pool);
346 return NULL;
349 PA_REFCNT_INIT(b);
350 b->pool = p;
351 b->read_only = b->is_silence = FALSE;
352 b->length = length;
353 pa_atomic_store(&b->n_acquired, 0);
354 pa_atomic_store(&b->please_signal, 0);
356 stat_add(b);
357 return b;
360 /* No lock necessary */
361 pa_memblock *pa_memblock_new_fixed(pa_mempool *p, void *d, size_t length, pa_bool_t read_only) {
362 pa_memblock *b;
364 pa_assert(p);
365 pa_assert(d);
366 pa_assert(length != (size_t) -1);
367 pa_assert(length);
369 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
370 b = pa_xnew(pa_memblock, 1);
372 PA_REFCNT_INIT(b);
373 b->pool = p;
374 b->type = PA_MEMBLOCK_FIXED;
375 b->read_only = read_only;
376 b->is_silence = FALSE;
377 pa_atomic_ptr_store(&b->data, d);
378 b->length = length;
379 pa_atomic_store(&b->n_acquired, 0);
380 pa_atomic_store(&b->please_signal, 0);
382 stat_add(b);
383 return b;
386 /* No lock necessary */
387 pa_memblock *pa_memblock_new_user(pa_mempool *p, void *d, size_t length, pa_free_cb_t free_cb, pa_bool_t read_only) {
388 pa_memblock *b;
390 pa_assert(p);
391 pa_assert(d);
392 pa_assert(length);
393 pa_assert(length != (size_t) -1);
394 pa_assert(free_cb);
396 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
397 b = pa_xnew(pa_memblock, 1);
399 PA_REFCNT_INIT(b);
400 b->pool = p;
401 b->type = PA_MEMBLOCK_USER;
402 b->read_only = read_only;
403 b->is_silence = FALSE;
404 pa_atomic_ptr_store(&b->data, d);
405 b->length = length;
406 pa_atomic_store(&b->n_acquired, 0);
407 pa_atomic_store(&b->please_signal, 0);
409 b->per_type.user.free_cb = free_cb;
411 stat_add(b);
412 return b;
415 /* No lock necessary */
416 pa_bool_t pa_memblock_is_read_only(pa_memblock *b) {
417 pa_assert(b);
418 pa_assert(PA_REFCNT_VALUE(b) > 0);
420 return b->read_only && PA_REFCNT_VALUE(b) == 1;
423 /* No lock necessary */
424 pa_bool_t pa_memblock_is_silence(pa_memblock *b) {
425 pa_assert(b);
426 pa_assert(PA_REFCNT_VALUE(b) > 0);
428 return b->is_silence;
431 /* No lock necessary */
432 void pa_memblock_set_is_silence(pa_memblock *b, pa_bool_t v) {
433 pa_assert(b);
434 pa_assert(PA_REFCNT_VALUE(b) > 0);
436 b->is_silence = v;
439 /* No lock necessary */
440 pa_bool_t pa_memblock_ref_is_one(pa_memblock *b) {
441 int r;
442 pa_assert(b);
444 pa_assert_se((r = PA_REFCNT_VALUE(b)) > 0);
446 return r == 1;
449 /* No lock necessary */
450 void* pa_memblock_acquire(pa_memblock *b) {
451 pa_assert(b);
452 pa_assert(PA_REFCNT_VALUE(b) > 0);
454 pa_atomic_inc(&b->n_acquired);
456 return pa_atomic_ptr_load(&b->data);
459 /* No lock necessary, in corner cases locks by its own */
460 void pa_memblock_release(pa_memblock *b) {
461 int r;
462 pa_assert(b);
463 pa_assert(PA_REFCNT_VALUE(b) > 0);
465 r = pa_atomic_dec(&b->n_acquired);
466 pa_assert(r >= 1);
468 /* Signal a waiting thread that this memblock is no longer used */
469 if (r == 1 && pa_atomic_load(&b->please_signal))
470 pa_semaphore_post(b->pool->semaphore);
473 size_t pa_memblock_get_length(pa_memblock *b) {
474 pa_assert(b);
475 pa_assert(PA_REFCNT_VALUE(b) > 0);
477 return b->length;
480 pa_mempool* pa_memblock_get_pool(pa_memblock *b) {
481 pa_assert(b);
482 pa_assert(PA_REFCNT_VALUE(b) > 0);
484 return b->pool;
487 /* No lock necessary */
488 pa_memblock* pa_memblock_ref(pa_memblock*b) {
489 pa_assert(b);
490 pa_assert(PA_REFCNT_VALUE(b) > 0);
492 PA_REFCNT_INC(b);
493 return b;
496 static void memblock_free(pa_memblock *b) {
497 pa_assert(b);
499 pa_assert(pa_atomic_load(&b->n_acquired) == 0);
501 stat_remove(b);
503 switch (b->type) {
504 case PA_MEMBLOCK_USER :
505 pa_assert(b->per_type.user.free_cb);
506 b->per_type.user.free_cb(pa_atomic_ptr_load(&b->data));
508 /* Fall through */
510 case PA_MEMBLOCK_FIXED:
511 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
512 pa_xfree(b);
514 break;
516 case PA_MEMBLOCK_APPENDED:
518 /* We could attached it unused_memblocks, but that would
519 * probably waste some considerable memory */
520 pa_xfree(b);
521 break;
523 case PA_MEMBLOCK_IMPORTED: {
524 pa_memimport_segment *segment;
525 pa_memimport *import;
527 /* FIXME! This should be implemented lock-free */
529 pa_assert_se(segment = b->per_type.imported.segment);
530 pa_assert_se(import = segment->import);
532 pa_mutex_lock(import->mutex);
534 pa_assert_se(pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id)));
536 pa_assert(segment->n_blocks >= 1);
537 if (-- segment->n_blocks <= 0)
538 segment_detach(segment);
540 pa_mutex_unlock(import->mutex);
542 import->release_cb(import, b->per_type.imported.id, import->userdata);
544 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
545 pa_xfree(b);
547 break;
550 case PA_MEMBLOCK_POOL_EXTERNAL:
551 case PA_MEMBLOCK_POOL: {
552 struct mempool_slot *slot;
553 pa_bool_t call_free;
555 pa_assert_se(slot = mempool_slot_by_ptr(b->pool, pa_atomic_ptr_load(&b->data)));
557 call_free = b->type == PA_MEMBLOCK_POOL_EXTERNAL;
559 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
560 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
561 /* VALGRIND_FREELIKE_BLOCK(slot, b->pool->block_size); */
562 /* } */
563 /* #endif */
565 /* The free list dimensions should easily allow all slots
566 * to fit in, hence try harder if pushing this slot into
567 * the free list fails */
568 while (pa_flist_push(b->pool->free_slots, slot) < 0)
571 if (call_free)
572 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
573 pa_xfree(b);
575 break;
578 case PA_MEMBLOCK_TYPE_MAX:
579 default:
580 pa_assert_not_reached();
584 /* No lock necessary */
585 void pa_memblock_unref(pa_memblock*b) {
586 pa_assert(b);
587 pa_assert(PA_REFCNT_VALUE(b) > 0);
589 if (PA_REFCNT_DEC(b) > 0)
590 return;
592 memblock_free(b);
595 /* Self locked */
596 static void memblock_wait(pa_memblock *b) {
597 pa_assert(b);
599 if (pa_atomic_load(&b->n_acquired) > 0) {
600 /* We need to wait until all threads gave up access to the
601 * memory block before we can go on. Unfortunately this means
602 * that we have to lock and wait here. Sniff! */
604 pa_atomic_inc(&b->please_signal);
606 while (pa_atomic_load(&b->n_acquired) > 0)
607 pa_semaphore_wait(b->pool->semaphore);
609 pa_atomic_dec(&b->please_signal);
613 /* No lock necessary. This function is not multiple caller safe! */
614 static void memblock_make_local(pa_memblock *b) {
615 pa_assert(b);
617 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
619 if (b->length <= b->pool->block_size) {
620 struct mempool_slot *slot;
622 if ((slot = mempool_allocate_slot(b->pool))) {
623 void *new_data;
624 /* We can move it into a local pool, perfect! */
626 new_data = mempool_slot_data(slot);
627 memcpy(new_data, pa_atomic_ptr_load(&b->data), b->length);
628 pa_atomic_ptr_store(&b->data, new_data);
630 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
631 b->read_only = FALSE;
633 goto finish;
637 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
638 b->per_type.user.free_cb = pa_xfree;
639 pa_atomic_ptr_store(&b->data, pa_xmemdup(pa_atomic_ptr_load(&b->data), b->length));
641 b->type = PA_MEMBLOCK_USER;
642 b->read_only = FALSE;
644 finish:
645 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
646 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
647 memblock_wait(b);
650 /* No lock necessary. This function is not multiple caller safe*/
651 void pa_memblock_unref_fixed(pa_memblock *b) {
652 pa_assert(b);
653 pa_assert(PA_REFCNT_VALUE(b) > 0);
654 pa_assert(b->type == PA_MEMBLOCK_FIXED);
656 if (PA_REFCNT_VALUE(b) > 1)
657 memblock_make_local(b);
659 pa_memblock_unref(b);
662 /* No lock necessary. */
663 pa_memblock *pa_memblock_will_need(pa_memblock *b) {
664 void *p;
666 pa_assert(b);
667 pa_assert(PA_REFCNT_VALUE(b) > 0);
669 p = pa_memblock_acquire(b);
670 pa_will_need(p, b->length);
671 pa_memblock_release(b);
673 return b;
676 /* Self-locked. This function is not multiple-caller safe */
677 static void memblock_replace_import(pa_memblock *b) {
678 pa_memimport_segment *segment;
679 pa_memimport *import;
681 pa_assert(b);
682 pa_assert(b->type == PA_MEMBLOCK_IMPORTED);
684 pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
685 pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
686 pa_atomic_dec(&b->pool->stat.n_imported);
687 pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
689 pa_assert_se(segment = b->per_type.imported.segment);
690 pa_assert_se(import = segment->import);
692 pa_mutex_lock(import->mutex);
694 pa_assert_se(pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id)));
696 memblock_make_local(b);
698 pa_assert(segment->n_blocks >= 1);
699 if (-- segment->n_blocks <= 0)
700 segment_detach(segment);
702 pa_mutex_unlock(import->mutex);
705 pa_mempool* pa_mempool_new(pa_bool_t shared, size_t size) {
706 pa_mempool *p;
707 char t1[PA_BYTES_SNPRINT_MAX], t2[PA_BYTES_SNPRINT_MAX];
709 p = pa_xnew(pa_mempool, 1);
711 p->mutex = pa_mutex_new(TRUE, TRUE);
712 p->semaphore = pa_semaphore_new(0);
714 p->block_size = PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE);
715 if (p->block_size < PA_PAGE_SIZE)
716 p->block_size = PA_PAGE_SIZE;
718 if (size <= 0)
719 p->n_blocks = PA_MEMPOOL_SLOTS_MAX;
720 else {
721 p->n_blocks = (unsigned) (size / p->block_size);
723 if (p->n_blocks < 2)
724 p->n_blocks = 2;
727 if (pa_shm_create_rw(&p->memory, p->n_blocks * p->block_size, shared, 0700) < 0) {
728 pa_xfree(p);
729 return NULL;
732 pa_log_debug("Using %s memory pool with %u slots of size %s each, total size is %s, maximum usable slot size is %lu",
733 p->memory.shared ? "shared" : "private",
734 p->n_blocks,
735 pa_bytes_snprint(t1, sizeof(t1), (unsigned) p->block_size),
736 pa_bytes_snprint(t2, sizeof(t2), (unsigned) (p->n_blocks * p->block_size)),
737 (unsigned long) pa_mempool_block_size_max(p));
739 memset(&p->stat, 0, sizeof(p->stat));
740 pa_atomic_store(&p->n_init, 0);
742 PA_LLIST_HEAD_INIT(pa_memimport, p->imports);
743 PA_LLIST_HEAD_INIT(pa_memexport, p->exports);
745 p->free_slots = pa_flist_new(p->n_blocks);
747 return p;
750 void pa_mempool_free(pa_mempool *p) {
751 pa_assert(p);
753 pa_mutex_lock(p->mutex);
755 while (p->imports)
756 pa_memimport_free(p->imports);
758 while (p->exports)
759 pa_memexport_free(p->exports);
761 pa_mutex_unlock(p->mutex);
763 pa_flist_free(p->free_slots, NULL);
765 if (pa_atomic_load(&p->stat.n_allocated) > 0) {
767 /* Ouch, somebody is retaining a memory block reference! */
769 #ifdef DEBUG_REF
770 unsigned i;
771 pa_flist *list;
773 /* Let's try to find at least one of those leaked memory blocks */
775 list = pa_flist_new(p->n_blocks);
777 for (i = 0; i < (unsigned) pa_atomic_load(&p->n_init); i++) {
778 struct mempool_slot *slot;
779 pa_memblock *b, *k;
781 slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) i));
782 b = mempool_slot_data(slot);
784 while ((k = pa_flist_pop(p->free_slots))) {
785 while (pa_flist_push(list, k) < 0)
788 if (b == k)
789 break;
792 if (!k)
793 pa_log("REF: Leaked memory block %p", b);
795 while ((k = pa_flist_pop(list)))
796 while (pa_flist_push(p->free_slots, k) < 0)
800 pa_flist_free(list, NULL);
802 #endif
804 pa_log_error("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p->stat.n_allocated));
806 /* PA_DEBUG_TRAP; */
809 pa_shm_free(&p->memory);
811 pa_mutex_free(p->mutex);
812 pa_semaphore_free(p->semaphore);
814 pa_xfree(p);
817 /* No lock necessary */
818 const pa_mempool_stat* pa_mempool_get_stat(pa_mempool *p) {
819 pa_assert(p);
821 return &p->stat;
824 /* No lock necessary */
825 size_t pa_mempool_block_size_max(pa_mempool *p) {
826 pa_assert(p);
828 return p->block_size - PA_ALIGN(sizeof(pa_memblock));
831 /* No lock necessary */
832 void pa_mempool_vacuum(pa_mempool *p) {
833 struct mempool_slot *slot;
834 pa_flist *list;
836 pa_assert(p);
838 list = pa_flist_new(p->n_blocks);
840 while ((slot = pa_flist_pop(p->free_slots)))
841 while (pa_flist_push(list, slot) < 0)
844 while ((slot = pa_flist_pop(list))) {
845 pa_shm_punch(&p->memory, (size_t) ((uint8_t*) slot - (uint8_t*) p->memory.ptr), p->block_size);
847 while (pa_flist_push(p->free_slots, slot))
851 pa_flist_free(list, NULL);
854 /* No lock necessary */
855 int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id) {
856 pa_assert(p);
858 if (!p->memory.shared)
859 return -1;
861 *id = p->memory.id;
863 return 0;
866 /* No lock necessary */
867 pa_bool_t pa_mempool_is_shared(pa_mempool *p) {
868 pa_assert(p);
870 return !!p->memory.shared;
873 /* For recieving blocks from other nodes */
874 pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata) {
875 pa_memimport *i;
877 pa_assert(p);
878 pa_assert(cb);
880 i = pa_xnew(pa_memimport, 1);
881 i->mutex = pa_mutex_new(TRUE, TRUE);
882 i->pool = p;
883 i->segments = pa_hashmap_new(NULL, NULL);
884 i->blocks = pa_hashmap_new(NULL, NULL);
885 i->release_cb = cb;
886 i->userdata = userdata;
888 pa_mutex_lock(p->mutex);
889 PA_LLIST_PREPEND(pa_memimport, p->imports, i);
890 pa_mutex_unlock(p->mutex);
892 return i;
895 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i);
897 /* Should be called locked */
898 static pa_memimport_segment* segment_attach(pa_memimport *i, uint32_t shm_id) {
899 pa_memimport_segment* seg;
901 if (pa_hashmap_size(i->segments) >= PA_MEMIMPORT_SEGMENTS_MAX)
902 return NULL;
904 seg = pa_xnew0(pa_memimport_segment, 1);
906 if (pa_shm_attach_ro(&seg->memory, shm_id) < 0) {
907 pa_xfree(seg);
908 return NULL;
911 seg->import = i;
912 seg->trap = pa_memtrap_add(seg->memory.ptr, seg->memory.size);
914 pa_hashmap_put(i->segments, PA_UINT32_TO_PTR(seg->memory.id), seg);
915 return seg;
918 /* Should be called locked */
919 static void segment_detach(pa_memimport_segment *seg) {
920 pa_assert(seg);
922 pa_hashmap_remove(seg->import->segments, PA_UINT32_TO_PTR(seg->memory.id));
923 pa_shm_free(&seg->memory);
925 if (seg->trap)
926 pa_memtrap_remove(seg->trap);
928 pa_xfree(seg);
931 /* Self-locked. Not multiple-caller safe */
932 void pa_memimport_free(pa_memimport *i) {
933 pa_memexport *e;
934 pa_memblock *b;
936 pa_assert(i);
938 pa_mutex_lock(i->mutex);
940 while ((b = pa_hashmap_first(i->blocks)))
941 memblock_replace_import(b);
943 pa_assert(pa_hashmap_size(i->segments) == 0);
945 pa_mutex_unlock(i->mutex);
947 pa_mutex_lock(i->pool->mutex);
949 /* If we've exported this block further we need to revoke that export */
950 for (e = i->pool->exports; e; e = e->next)
951 memexport_revoke_blocks(e, i);
953 PA_LLIST_REMOVE(pa_memimport, i->pool->imports, i);
955 pa_mutex_unlock(i->pool->mutex);
957 pa_hashmap_free(i->blocks, NULL, NULL);
958 pa_hashmap_free(i->segments, NULL, NULL);
960 pa_mutex_free(i->mutex);
962 pa_xfree(i);
965 /* Self-locked */
966 pa_memblock* pa_memimport_get(pa_memimport *i, uint32_t block_id, uint32_t shm_id, size_t offset, size_t size) {
967 pa_memblock *b = NULL;
968 pa_memimport_segment *seg;
970 pa_assert(i);
972 pa_mutex_lock(i->mutex);
974 if ((b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(block_id)))) {
975 pa_memblock_ref(b);
976 goto finish;
979 if (pa_hashmap_size(i->blocks) >= PA_MEMIMPORT_SLOTS_MAX)
980 goto finish;
982 if (!(seg = pa_hashmap_get(i->segments, PA_UINT32_TO_PTR(shm_id))))
983 if (!(seg = segment_attach(i, shm_id)))
984 goto finish;
986 if (offset+size > seg->memory.size)
987 goto finish;
989 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
990 b = pa_xnew(pa_memblock, 1);
992 PA_REFCNT_INIT(b);
993 b->pool = i->pool;
994 b->type = PA_MEMBLOCK_IMPORTED;
995 b->read_only = TRUE;
996 b->is_silence = FALSE;
997 pa_atomic_ptr_store(&b->data, (uint8_t*) seg->memory.ptr + offset);
998 b->length = size;
999 pa_atomic_store(&b->n_acquired, 0);
1000 pa_atomic_store(&b->please_signal, 0);
1001 b->per_type.imported.id = block_id;
1002 b->per_type.imported.segment = seg;
1004 pa_hashmap_put(i->blocks, PA_UINT32_TO_PTR(block_id), b);
1006 seg->n_blocks++;
1008 stat_add(b);
1010 finish:
1011 pa_mutex_unlock(i->mutex);
1013 return b;
1016 int pa_memimport_process_revoke(pa_memimport *i, uint32_t id) {
1017 pa_memblock *b;
1018 int ret = 0;
1019 pa_assert(i);
1021 pa_mutex_lock(i->mutex);
1023 if (!(b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(id)))) {
1024 ret = -1;
1025 goto finish;
1028 memblock_replace_import(b);
1030 finish:
1031 pa_mutex_unlock(i->mutex);
1033 return ret;
1036 /* For sending blocks to other nodes */
1037 pa_memexport* pa_memexport_new(pa_mempool *p, pa_memexport_revoke_cb_t cb, void *userdata) {
1038 pa_memexport *e;
1040 pa_assert(p);
1041 pa_assert(cb);
1043 if (!p->memory.shared)
1044 return NULL;
1046 e = pa_xnew(pa_memexport, 1);
1047 e->mutex = pa_mutex_new(TRUE, TRUE);
1048 e->pool = p;
1049 PA_LLIST_HEAD_INIT(struct memexport_slot, e->free_slots);
1050 PA_LLIST_HEAD_INIT(struct memexport_slot, e->used_slots);
1051 e->n_init = 0;
1052 e->revoke_cb = cb;
1053 e->userdata = userdata;
1055 pa_mutex_lock(p->mutex);
1056 PA_LLIST_PREPEND(pa_memexport, p->exports, e);
1057 pa_mutex_unlock(p->mutex);
1058 return e;
1061 void pa_memexport_free(pa_memexport *e) {
1062 pa_assert(e);
1064 pa_mutex_lock(e->mutex);
1065 while (e->used_slots)
1066 pa_memexport_process_release(e, (uint32_t) (e->used_slots - e->slots));
1067 pa_mutex_unlock(e->mutex);
1069 pa_mutex_lock(e->pool->mutex);
1070 PA_LLIST_REMOVE(pa_memexport, e->pool->exports, e);
1071 pa_mutex_unlock(e->pool->mutex);
1073 pa_mutex_free(e->mutex);
1074 pa_xfree(e);
1077 /* Self-locked */
1078 int pa_memexport_process_release(pa_memexport *e, uint32_t id) {
1079 pa_memblock *b;
1081 pa_assert(e);
1083 pa_mutex_lock(e->mutex);
1085 if (id >= e->n_init)
1086 goto fail;
1088 if (!e->slots[id].block)
1089 goto fail;
1091 b = e->slots[id].block;
1092 e->slots[id].block = NULL;
1094 PA_LLIST_REMOVE(struct memexport_slot, e->used_slots, &e->slots[id]);
1095 PA_LLIST_PREPEND(struct memexport_slot, e->free_slots, &e->slots[id]);
1097 pa_mutex_unlock(e->mutex);
1099 /* pa_log("Processing release for %u", id); */
1101 pa_assert(pa_atomic_load(&e->pool->stat.n_exported) > 0);
1102 pa_assert(pa_atomic_load(&e->pool->stat.exported_size) >= (int) b->length);
1104 pa_atomic_dec(&e->pool->stat.n_exported);
1105 pa_atomic_sub(&e->pool->stat.exported_size, (int) b->length);
1107 pa_memblock_unref(b);
1109 return 0;
1111 fail:
1112 pa_mutex_unlock(e->mutex);
1114 return -1;
1117 /* Self-locked */
1118 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i) {
1119 struct memexport_slot *slot, *next;
1120 pa_assert(e);
1121 pa_assert(i);
1123 pa_mutex_lock(e->mutex);
1125 for (slot = e->used_slots; slot; slot = next) {
1126 uint32_t idx;
1127 next = slot->next;
1129 if (slot->block->type != PA_MEMBLOCK_IMPORTED ||
1130 slot->block->per_type.imported.segment->import != i)
1131 continue;
1133 idx = (uint32_t) (slot - e->slots);
1134 e->revoke_cb(e, idx, e->userdata);
1135 pa_memexport_process_release(e, idx);
1138 pa_mutex_unlock(e->mutex);
1141 /* No lock necessary */
1142 static pa_memblock *memblock_shared_copy(pa_mempool *p, pa_memblock *b) {
1143 pa_memblock *n;
1145 pa_assert(p);
1146 pa_assert(b);
1148 if (b->type == PA_MEMBLOCK_IMPORTED ||
1149 b->type == PA_MEMBLOCK_POOL ||
1150 b->type == PA_MEMBLOCK_POOL_EXTERNAL) {
1151 pa_assert(b->pool == p);
1152 return pa_memblock_ref(b);
1155 if (!(n = pa_memblock_new_pool(p, b->length)))
1156 return NULL;
1158 memcpy(pa_atomic_ptr_load(&n->data), pa_atomic_ptr_load(&b->data), b->length);
1159 return n;
1162 /* Self-locked */
1163 int pa_memexport_put(pa_memexport *e, pa_memblock *b, uint32_t *block_id, uint32_t *shm_id, size_t *offset, size_t * size) {
1164 pa_shm *memory;
1165 struct memexport_slot *slot;
1166 void *data;
1168 pa_assert(e);
1169 pa_assert(b);
1170 pa_assert(block_id);
1171 pa_assert(shm_id);
1172 pa_assert(offset);
1173 pa_assert(size);
1174 pa_assert(b->pool == e->pool);
1176 if (!(b = memblock_shared_copy(e->pool, b)))
1177 return -1;
1179 pa_mutex_lock(e->mutex);
1181 if (e->free_slots) {
1182 slot = e->free_slots;
1183 PA_LLIST_REMOVE(struct memexport_slot, e->free_slots, slot);
1184 } else if (e->n_init < PA_MEMEXPORT_SLOTS_MAX)
1185 slot = &e->slots[e->n_init++];
1186 else {
1187 pa_mutex_unlock(e->mutex);
1188 pa_memblock_unref(b);
1189 return -1;
1192 PA_LLIST_PREPEND(struct memexport_slot, e->used_slots, slot);
1193 slot->block = b;
1194 *block_id = (uint32_t) (slot - e->slots);
1196 pa_mutex_unlock(e->mutex);
1197 /* pa_log("Got block id %u", *block_id); */
1199 data = pa_memblock_acquire(b);
1201 if (b->type == PA_MEMBLOCK_IMPORTED) {
1202 pa_assert(b->per_type.imported.segment);
1203 memory = &b->per_type.imported.segment->memory;
1204 } else {
1205 pa_assert(b->type == PA_MEMBLOCK_POOL || b->type == PA_MEMBLOCK_POOL_EXTERNAL);
1206 pa_assert(b->pool);
1207 memory = &b->pool->memory;
1210 pa_assert(data >= memory->ptr);
1211 pa_assert((uint8_t*) data + b->length <= (uint8_t*) memory->ptr + memory->size);
1213 *shm_id = memory->id;
1214 *offset = (size_t) ((uint8_t*) data - (uint8_t*) memory->ptr);
1215 *size = b->length;
1217 pa_memblock_release(b);
1219 pa_atomic_inc(&e->pool->stat.n_exported);
1220 pa_atomic_add(&e->pool->stat.exported_size, (int) b->length);
1222 return 0;