2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details
17 You should have received a copy of the GNU Lesser General Public
18 License along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
34 #ifdef HAVE_VALGRIND_MEMCHECK_H
35 #include <valgrind/memcheck.h>
38 #include <pulse/xmalloc.h>
39 #include <pulse/def.h>
41 #include <pulsecore/shm.h>
42 #include <pulsecore/log.h>
43 #include <pulsecore/hashmap.h>
44 #include <pulsecore/semaphore.h>
45 #include <pulsecore/macro.h>
46 #include <pulsecore/flist.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/memtrap.h>
52 /* We can allocate 64*1024*1024 bytes at maximum. That's 64MB. Please
53 * note that the footprint is usually much smaller, since the data is
54 * stored in SHM and our OS does not commit the memory before we use
55 * it for the first time. */
56 #define PA_MEMPOOL_SLOTS_MAX 1024
57 #define PA_MEMPOOL_SLOT_SIZE (64*1024)
59 #define PA_MEMEXPORT_SLOTS_MAX 128
61 #define PA_MEMIMPORT_SLOTS_MAX 160
62 #define PA_MEMIMPORT_SEGMENTS_MAX 16
65 PA_REFCNT_DECLARE
; /* the reference counter */
68 pa_memblock_type_t type
;
70 pa_bool_t read_only
:1;
71 pa_bool_t is_silence
:1;
76 pa_atomic_t n_acquired
;
77 pa_atomic_t please_signal
;
81 /* If type == PA_MEMBLOCK_USER this points to a function for freeing this memory block */
87 pa_memimport_segment
*segment
;
92 struct pa_memimport_segment
{
99 /* A collection of multiple segments */
100 struct pa_memimport
{
104 pa_hashmap
*segments
;
107 /* Called whenever an imported memory block is no longer
109 pa_memimport_release_cb_t release_cb
;
112 PA_LLIST_FIELDS(pa_memimport
);
115 struct memexport_slot
{
116 PA_LLIST_FIELDS(struct memexport_slot
);
120 struct pa_memexport
{
124 struct memexport_slot slots
[PA_MEMEXPORT_SLOTS_MAX
];
126 PA_LLIST_HEAD(struct memexport_slot
, free_slots
);
127 PA_LLIST_HEAD(struct memexport_slot
, used_slots
);
130 /* Called whenever a client from which we imported a memory block
131 which we in turn exported to another client dies and we need to
132 revoke the memory block accordingly */
133 pa_memexport_revoke_cb_t revoke_cb
;
136 PA_LLIST_FIELDS(pa_memexport
);
140 pa_semaphore
*semaphore
;
149 PA_LLIST_HEAD(pa_memimport
, imports
);
150 PA_LLIST_HEAD(pa_memexport
, exports
);
152 /* A list of free slots that may be reused */
153 pa_flist
*free_slots
;
155 pa_mempool_stat stat
;
158 static void segment_detach(pa_memimport_segment
*seg
);
160 PA_STATIC_FLIST_DECLARE(unused_memblocks
, 0, pa_xfree
);
162 /* No lock necessary */
163 static void stat_add(pa_memblock
*b
) {
167 pa_atomic_inc(&b
->pool
->stat
.n_allocated
);
168 pa_atomic_add(&b
->pool
->stat
.allocated_size
, (int) b
->length
);
170 pa_atomic_inc(&b
->pool
->stat
.n_accumulated
);
171 pa_atomic_add(&b
->pool
->stat
.accumulated_size
, (int) b
->length
);
173 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
174 pa_atomic_inc(&b
->pool
->stat
.n_imported
);
175 pa_atomic_add(&b
->pool
->stat
.imported_size
, (int) b
->length
);
178 pa_atomic_inc(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
179 pa_atomic_inc(&b
->pool
->stat
.n_accumulated_by_type
[b
->type
]);
182 /* No lock necessary */
183 static void stat_remove(pa_memblock
*b
) {
187 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_allocated
) > 0);
188 pa_assert(pa_atomic_load(&b
->pool
->stat
.allocated_size
) >= (int) b
->length
);
190 pa_atomic_dec(&b
->pool
->stat
.n_allocated
);
191 pa_atomic_sub(&b
->pool
->stat
.allocated_size
, (int) b
->length
);
193 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
194 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_imported
) > 0);
195 pa_assert(pa_atomic_load(&b
->pool
->stat
.imported_size
) >= (int) b
->length
);
197 pa_atomic_dec(&b
->pool
->stat
.n_imported
);
198 pa_atomic_sub(&b
->pool
->stat
.imported_size
, (int) b
->length
);
201 pa_atomic_dec(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
204 static pa_memblock
*memblock_new_appended(pa_mempool
*p
, size_t length
);
206 /* No lock necessary */
207 pa_memblock
*pa_memblock_new(pa_mempool
*p
, size_t length
) {
213 if (!(b
= pa_memblock_new_pool(p
, length
)))
214 b
= memblock_new_appended(p
, length
);
219 /* No lock necessary */
220 static pa_memblock
*memblock_new_appended(pa_mempool
*p
, size_t length
) {
226 /* If -1 is passed as length we choose the size for the caller. */
228 if (length
== (size_t) -1)
229 length
= p
->block_size
- PA_ALIGN(sizeof(pa_memblock
));
231 b
= pa_xmalloc(PA_ALIGN(sizeof(pa_memblock
)) + length
);
234 b
->type
= PA_MEMBLOCK_APPENDED
;
235 b
->read_only
= b
->is_silence
= FALSE
;
236 pa_atomic_ptr_store(&b
->data
, (uint8_t*) b
+ PA_ALIGN(sizeof(pa_memblock
)));
238 pa_atomic_store(&b
->n_acquired
, 0);
239 pa_atomic_store(&b
->please_signal
, 0);
245 /* No lock necessary */
246 static struct mempool_slot
* mempool_allocate_slot(pa_mempool
*p
) {
247 struct mempool_slot
*slot
;
250 if (!(slot
= pa_flist_pop(p
->free_slots
))) {
253 /* The free list was empty, we have to allocate a new entry */
255 if ((unsigned) (idx
= pa_atomic_inc(&p
->n_init
)) >= p
->n_blocks
)
256 pa_atomic_dec(&p
->n_init
);
258 slot
= (struct mempool_slot
*) ((uint8_t*) p
->memory
.ptr
+ (p
->block_size
* (size_t) idx
));
261 if (pa_log_ratelimit(PA_LOG_DEBUG
))
262 pa_log_debug("Pool full");
263 pa_atomic_inc(&p
->stat
.n_pool_full
);
268 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
269 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
270 /* VALGRIND_MALLOCLIKE_BLOCK(slot, p->block_size, 0, 0); */
277 /* No lock necessary, totally redundant anyway */
278 static inline void* mempool_slot_data(struct mempool_slot
*slot
) {
282 /* No lock necessary */
283 static unsigned mempool_slot_idx(pa_mempool
*p
, void *ptr
) {
286 pa_assert((uint8_t*) ptr
>= (uint8_t*) p
->memory
.ptr
);
287 pa_assert((uint8_t*) ptr
< (uint8_t*) p
->memory
.ptr
+ p
->memory
.size
);
289 return (unsigned) ((size_t) ((uint8_t*) ptr
- (uint8_t*) p
->memory
.ptr
) / p
->block_size
);
292 /* No lock necessary */
293 static struct mempool_slot
* mempool_slot_by_ptr(pa_mempool
*p
, void *ptr
) {
296 if ((idx
= mempool_slot_idx(p
, ptr
)) == (unsigned) -1)
299 return (struct mempool_slot
*) ((uint8_t*) p
->memory
.ptr
+ (idx
* p
->block_size
));
302 /* No lock necessary */
303 pa_memblock
*pa_memblock_new_pool(pa_mempool
*p
, size_t length
) {
304 pa_memblock
*b
= NULL
;
305 struct mempool_slot
*slot
;
306 static int mempool_disable
= 0;
311 if (mempool_disable
== 0)
312 mempool_disable
= getenv("PULSE_MEMPOOL_DISABLE") ? 1 : -1;
314 if (mempool_disable
> 0)
317 /* If -1 is passed as length we choose the size for the caller: we
318 * take the largest size that fits in one of our slots. */
320 if (length
== (size_t) -1)
321 length
= pa_mempool_block_size_max(p
);
323 if (p
->block_size
>= PA_ALIGN(sizeof(pa_memblock
)) + length
) {
325 if (!(slot
= mempool_allocate_slot(p
)))
328 b
= mempool_slot_data(slot
);
329 b
->type
= PA_MEMBLOCK_POOL
;
330 pa_atomic_ptr_store(&b
->data
, (uint8_t*) b
+ PA_ALIGN(sizeof(pa_memblock
)));
332 } else if (p
->block_size
>= length
) {
334 if (!(slot
= mempool_allocate_slot(p
)))
337 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
338 b
= pa_xnew(pa_memblock
, 1);
340 b
->type
= PA_MEMBLOCK_POOL_EXTERNAL
;
341 pa_atomic_ptr_store(&b
->data
, mempool_slot_data(slot
));
344 pa_log_debug("Memory block too large for pool: %lu > %lu", (unsigned long) length
, (unsigned long) p
->block_size
);
345 pa_atomic_inc(&p
->stat
.n_too_large_for_pool
);
351 b
->read_only
= b
->is_silence
= FALSE
;
353 pa_atomic_store(&b
->n_acquired
, 0);
354 pa_atomic_store(&b
->please_signal
, 0);
360 /* No lock necessary */
361 pa_memblock
*pa_memblock_new_fixed(pa_mempool
*p
, void *d
, size_t length
, pa_bool_t read_only
) {
366 pa_assert(length
!= (size_t) -1);
369 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
370 b
= pa_xnew(pa_memblock
, 1);
374 b
->type
= PA_MEMBLOCK_FIXED
;
375 b
->read_only
= read_only
;
376 b
->is_silence
= FALSE
;
377 pa_atomic_ptr_store(&b
->data
, d
);
379 pa_atomic_store(&b
->n_acquired
, 0);
380 pa_atomic_store(&b
->please_signal
, 0);
386 /* No lock necessary */
387 pa_memblock
*pa_memblock_new_user(pa_mempool
*p
, void *d
, size_t length
, pa_free_cb_t free_cb
, pa_bool_t read_only
) {
393 pa_assert(length
!= (size_t) -1);
396 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
397 b
= pa_xnew(pa_memblock
, 1);
401 b
->type
= PA_MEMBLOCK_USER
;
402 b
->read_only
= read_only
;
403 b
->is_silence
= FALSE
;
404 pa_atomic_ptr_store(&b
->data
, d
);
406 pa_atomic_store(&b
->n_acquired
, 0);
407 pa_atomic_store(&b
->please_signal
, 0);
409 b
->per_type
.user
.free_cb
= free_cb
;
415 /* No lock necessary */
416 pa_bool_t
pa_memblock_is_read_only(pa_memblock
*b
) {
418 pa_assert(PA_REFCNT_VALUE(b
) > 0);
420 return b
->read_only
&& PA_REFCNT_VALUE(b
) == 1;
423 /* No lock necessary */
424 pa_bool_t
pa_memblock_is_silence(pa_memblock
*b
) {
426 pa_assert(PA_REFCNT_VALUE(b
) > 0);
428 return b
->is_silence
;
431 /* No lock necessary */
432 void pa_memblock_set_is_silence(pa_memblock
*b
, pa_bool_t v
) {
434 pa_assert(PA_REFCNT_VALUE(b
) > 0);
439 /* No lock necessary */
440 pa_bool_t
pa_memblock_ref_is_one(pa_memblock
*b
) {
444 pa_assert_se((r
= PA_REFCNT_VALUE(b
)) > 0);
449 /* No lock necessary */
450 void* pa_memblock_acquire(pa_memblock
*b
) {
452 pa_assert(PA_REFCNT_VALUE(b
) > 0);
454 pa_atomic_inc(&b
->n_acquired
);
456 return pa_atomic_ptr_load(&b
->data
);
459 /* No lock necessary, in corner cases locks by its own */
460 void pa_memblock_release(pa_memblock
*b
) {
463 pa_assert(PA_REFCNT_VALUE(b
) > 0);
465 r
= pa_atomic_dec(&b
->n_acquired
);
468 /* Signal a waiting thread that this memblock is no longer used */
469 if (r
== 1 && pa_atomic_load(&b
->please_signal
))
470 pa_semaphore_post(b
->pool
->semaphore
);
473 size_t pa_memblock_get_length(pa_memblock
*b
) {
475 pa_assert(PA_REFCNT_VALUE(b
) > 0);
480 pa_mempool
* pa_memblock_get_pool(pa_memblock
*b
) {
482 pa_assert(PA_REFCNT_VALUE(b
) > 0);
487 /* No lock necessary */
488 pa_memblock
* pa_memblock_ref(pa_memblock
*b
) {
490 pa_assert(PA_REFCNT_VALUE(b
) > 0);
496 static void memblock_free(pa_memblock
*b
) {
499 pa_assert(pa_atomic_load(&b
->n_acquired
) == 0);
504 case PA_MEMBLOCK_USER
:
505 pa_assert(b
->per_type
.user
.free_cb
);
506 b
->per_type
.user
.free_cb(pa_atomic_ptr_load(&b
->data
));
510 case PA_MEMBLOCK_FIXED
:
511 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
516 case PA_MEMBLOCK_APPENDED
:
518 /* We could attached it unused_memblocks, but that would
519 * probably waste some considerable memory */
523 case PA_MEMBLOCK_IMPORTED
: {
524 pa_memimport_segment
*segment
;
525 pa_memimport
*import
;
527 /* FIXME! This should be implemented lock-free */
529 pa_assert_se(segment
= b
->per_type
.imported
.segment
);
530 pa_assert_se(import
= segment
->import
);
532 pa_mutex_lock(import
->mutex
);
534 pa_assert_se(pa_hashmap_remove(
536 PA_UINT32_TO_PTR(b
->per_type
.imported
.id
)));
538 pa_assert(segment
->n_blocks
>= 1);
539 if (-- segment
->n_blocks
<= 0)
540 segment_detach(segment
);
542 pa_mutex_unlock(import
->mutex
);
544 import
->release_cb(import
, b
->per_type
.imported
.id
, import
->userdata
);
546 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
552 case PA_MEMBLOCK_POOL_EXTERNAL
:
553 case PA_MEMBLOCK_POOL
: {
554 struct mempool_slot
*slot
;
557 pa_assert_se(slot
= mempool_slot_by_ptr(b
->pool
, pa_atomic_ptr_load(&b
->data
)));
559 call_free
= b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
;
561 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
562 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
563 /* VALGRIND_FREELIKE_BLOCK(slot, b->pool->block_size); */
567 /* The free list dimensions should easily allow all slots
568 * to fit in, hence try harder if pushing this slot into
569 * the free list fails */
570 while (pa_flist_push(b
->pool
->free_slots
, slot
) < 0)
574 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
580 case PA_MEMBLOCK_TYPE_MAX
:
582 pa_assert_not_reached();
586 /* No lock necessary */
587 void pa_memblock_unref(pa_memblock
*b
) {
589 pa_assert(PA_REFCNT_VALUE(b
) > 0);
591 if (PA_REFCNT_DEC(b
) > 0)
598 static void memblock_wait(pa_memblock
*b
) {
601 if (pa_atomic_load(&b
->n_acquired
) > 0) {
602 /* We need to wait until all threads gave up access to the
603 * memory block before we can go on. Unfortunately this means
604 * that we have to lock and wait here. Sniff! */
606 pa_atomic_inc(&b
->please_signal
);
608 while (pa_atomic_load(&b
->n_acquired
) > 0)
609 pa_semaphore_wait(b
->pool
->semaphore
);
611 pa_atomic_dec(&b
->please_signal
);
615 /* No lock necessary. This function is not multiple caller safe! */
616 static void memblock_make_local(pa_memblock
*b
) {
619 pa_atomic_dec(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
621 if (b
->length
<= b
->pool
->block_size
) {
622 struct mempool_slot
*slot
;
624 if ((slot
= mempool_allocate_slot(b
->pool
))) {
626 /* We can move it into a local pool, perfect! */
628 new_data
= mempool_slot_data(slot
);
629 memcpy(new_data
, pa_atomic_ptr_load(&b
->data
), b
->length
);
630 pa_atomic_ptr_store(&b
->data
, new_data
);
632 b
->type
= PA_MEMBLOCK_POOL_EXTERNAL
;
633 b
->read_only
= FALSE
;
639 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
640 b
->per_type
.user
.free_cb
= pa_xfree
;
641 pa_atomic_ptr_store(&b
->data
, pa_xmemdup(pa_atomic_ptr_load(&b
->data
), b
->length
));
643 b
->type
= PA_MEMBLOCK_USER
;
644 b
->read_only
= FALSE
;
647 pa_atomic_inc(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
648 pa_atomic_inc(&b
->pool
->stat
.n_accumulated_by_type
[b
->type
]);
652 /* No lock necessary. This function is not multiple caller safe*/
653 void pa_memblock_unref_fixed(pa_memblock
*b
) {
655 pa_assert(PA_REFCNT_VALUE(b
) > 0);
656 pa_assert(b
->type
== PA_MEMBLOCK_FIXED
);
658 if (PA_REFCNT_VALUE(b
) > 1)
659 memblock_make_local(b
);
661 pa_memblock_unref(b
);
664 /* No lock necessary. */
665 pa_memblock
*pa_memblock_will_need(pa_memblock
*b
) {
669 pa_assert(PA_REFCNT_VALUE(b
) > 0);
671 p
= pa_memblock_acquire(b
);
672 pa_will_need(p
, b
->length
);
673 pa_memblock_release(b
);
678 /* Self-locked. This function is not multiple-caller safe */
679 static void memblock_replace_import(pa_memblock
*b
) {
680 pa_memimport_segment
*segment
;
681 pa_memimport
*import
;
684 pa_assert(b
->type
== PA_MEMBLOCK_IMPORTED
);
686 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_imported
) > 0);
687 pa_assert(pa_atomic_load(&b
->pool
->stat
.imported_size
) >= (int) b
->length
);
688 pa_atomic_dec(&b
->pool
->stat
.n_imported
);
689 pa_atomic_sub(&b
->pool
->stat
.imported_size
, (int) b
->length
);
691 pa_assert_se(segment
= b
->per_type
.imported
.segment
);
692 pa_assert_se(import
= segment
->import
);
694 pa_mutex_lock(import
->mutex
);
696 pa_assert_se(pa_hashmap_remove(
698 PA_UINT32_TO_PTR(b
->per_type
.imported
.id
)));
700 memblock_make_local(b
);
702 pa_assert(segment
->n_blocks
>= 1);
703 if (-- segment
->n_blocks
<= 0)
704 segment_detach(segment
);
706 pa_mutex_unlock(import
->mutex
);
709 pa_mempool
* pa_mempool_new(pa_bool_t shared
, size_t size
) {
711 char t1
[PA_BYTES_SNPRINT_MAX
], t2
[PA_BYTES_SNPRINT_MAX
];
713 p
= pa_xnew(pa_mempool
, 1);
715 p
->mutex
= pa_mutex_new(TRUE
, TRUE
);
716 p
->semaphore
= pa_semaphore_new(0);
718 p
->block_size
= PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE
);
719 if (p
->block_size
< PA_PAGE_SIZE
)
720 p
->block_size
= PA_PAGE_SIZE
;
723 p
->n_blocks
= PA_MEMPOOL_SLOTS_MAX
;
725 p
->n_blocks
= (unsigned) (size
/ p
->block_size
);
731 if (pa_shm_create_rw(&p
->memory
, p
->n_blocks
* p
->block_size
, shared
, 0700) < 0) {
736 pa_log_debug("Using %s memory pool with %u slots of size %s each, total size is %s, maximum usable slot size is %lu",
737 p
->memory
.shared
? "shared" : "private",
739 pa_bytes_snprint(t1
, sizeof(t1
), (unsigned) p
->block_size
),
740 pa_bytes_snprint(t2
, sizeof(t2
), (unsigned) (p
->n_blocks
* p
->block_size
)),
741 (unsigned long) pa_mempool_block_size_max(p
));
743 memset(&p
->stat
, 0, sizeof(p
->stat
));
744 pa_atomic_store(&p
->n_init
, 0);
746 PA_LLIST_HEAD_INIT(pa_memimport
, p
->imports
);
747 PA_LLIST_HEAD_INIT(pa_memexport
, p
->exports
);
749 p
->free_slots
= pa_flist_new(p
->n_blocks
);
754 void pa_mempool_free(pa_mempool
*p
) {
757 pa_mutex_lock(p
->mutex
);
760 pa_memimport_free(p
->imports
);
763 pa_memexport_free(p
->exports
);
765 pa_mutex_unlock(p
->mutex
);
767 pa_flist_free(p
->free_slots
, NULL
);
769 if (pa_atomic_load(&p
->stat
.n_allocated
) > 0) {
771 /* Ouch, somebody is retaining a memory block reference! */
777 /* Let's try to find at least one of those leaked memory blocks */
779 list
= pa_flist_new(p
->n_blocks
);
781 for (i
= 0; i
< (unsigned) pa_atomic_load(&p
->n_init
); i
++) {
782 struct mempool_slot
*slot
;
785 slot
= (struct mempool_slot
*) ((uint8_t*) p
->memory
.ptr
+ (p
->block_size
* (size_t) i
));
786 b
= mempool_slot_data(slot
);
788 while ((k
= pa_flist_pop(p
->free_slots
))) {
789 while (pa_flist_push(list
, k
) < 0)
797 pa_log("REF: Leaked memory block %p", b
);
799 while ((k
= pa_flist_pop(list
)))
800 while (pa_flist_push(p
->free_slots
, k
) < 0)
804 pa_flist_free(list
, NULL
);
808 pa_log_error("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p
->stat
.n_allocated
));
813 pa_shm_free(&p
->memory
);
815 pa_mutex_free(p
->mutex
);
816 pa_semaphore_free(p
->semaphore
);
821 /* No lock necessary */
822 const pa_mempool_stat
* pa_mempool_get_stat(pa_mempool
*p
) {
828 /* No lock necessary */
829 size_t pa_mempool_block_size_max(pa_mempool
*p
) {
832 return p
->block_size
- PA_ALIGN(sizeof(pa_memblock
));
835 /* No lock necessary */
836 void pa_mempool_vacuum(pa_mempool
*p
) {
837 struct mempool_slot
*slot
;
842 list
= pa_flist_new(p
->n_blocks
);
844 while ((slot
= pa_flist_pop(p
->free_slots
)))
845 while (pa_flist_push(list
, slot
) < 0)
848 while ((slot
= pa_flist_pop(list
))) {
849 pa_shm_punch(&p
->memory
, (size_t) ((uint8_t*) slot
- (uint8_t*) p
->memory
.ptr
), p
->block_size
);
851 while (pa_flist_push(p
->free_slots
, slot
))
855 pa_flist_free(list
, NULL
);
858 /* No lock necessary */
859 int pa_mempool_get_shm_id(pa_mempool
*p
, uint32_t *id
) {
862 if (!p
->memory
.shared
)
870 /* No lock necessary */
871 pa_bool_t
pa_mempool_is_shared(pa_mempool
*p
) {
874 return !!p
->memory
.shared
;
877 /* For recieving blocks from other nodes */
878 pa_memimport
* pa_memimport_new(pa_mempool
*p
, pa_memimport_release_cb_t cb
, void *userdata
) {
884 i
= pa_xnew(pa_memimport
, 1);
885 i
->mutex
= pa_mutex_new(TRUE
, TRUE
);
887 i
->segments
= pa_hashmap_new(NULL
, NULL
);
888 i
->blocks
= pa_hashmap_new(NULL
, NULL
);
890 i
->userdata
= userdata
;
892 pa_mutex_lock(p
->mutex
);
893 PA_LLIST_PREPEND(pa_memimport
, p
->imports
, i
);
894 pa_mutex_unlock(p
->mutex
);
899 static void memexport_revoke_blocks(pa_memexport
*e
, pa_memimport
*i
);
901 /* Should be called locked */
902 static pa_memimport_segment
* segment_attach(pa_memimport
*i
, uint32_t shm_id
) {
903 pa_memimport_segment
* seg
;
905 if (pa_hashmap_size(i
->segments
) >= PA_MEMIMPORT_SEGMENTS_MAX
)
908 seg
= pa_xnew0(pa_memimport_segment
, 1);
910 if (pa_shm_attach_ro(&seg
->memory
, shm_id
) < 0) {
916 seg
->trap
= pa_memtrap_add(seg
->memory
.ptr
, seg
->memory
.size
);
918 pa_hashmap_put(i
->segments
, PA_UINT32_TO_PTR(seg
->memory
.id
), seg
);
922 /* Should be called locked */
923 static void segment_detach(pa_memimport_segment
*seg
) {
926 pa_hashmap_remove(seg
->import
->segments
, PA_UINT32_TO_PTR(seg
->memory
.id
));
927 pa_shm_free(&seg
->memory
);
930 pa_memtrap_remove(seg
->trap
);
935 /* Self-locked. Not multiple-caller safe */
936 void pa_memimport_free(pa_memimport
*i
) {
942 pa_mutex_lock(i
->mutex
);
944 while ((b
= pa_hashmap_first(i
->blocks
)))
945 memblock_replace_import(b
);
947 pa_assert(pa_hashmap_size(i
->segments
) == 0);
949 pa_mutex_unlock(i
->mutex
);
951 pa_mutex_lock(i
->pool
->mutex
);
953 /* If we've exported this block further we need to revoke that export */
954 for (e
= i
->pool
->exports
; e
; e
= e
->next
)
955 memexport_revoke_blocks(e
, i
);
957 PA_LLIST_REMOVE(pa_memimport
, i
->pool
->imports
, i
);
959 pa_mutex_unlock(i
->pool
->mutex
);
961 pa_hashmap_free(i
->blocks
, NULL
, NULL
);
962 pa_hashmap_free(i
->segments
, NULL
, NULL
);
964 pa_mutex_free(i
->mutex
);
970 pa_memblock
* pa_memimport_get(pa_memimport
*i
, uint32_t block_id
, uint32_t shm_id
, size_t offset
, size_t size
) {
971 pa_memblock
*b
= NULL
;
972 pa_memimport_segment
*seg
;
976 pa_mutex_lock(i
->mutex
);
978 if ((b
= pa_hashmap_get(i
->blocks
, PA_UINT32_TO_PTR(block_id
)))) {
983 if (pa_hashmap_size(i
->blocks
) >= PA_MEMIMPORT_SLOTS_MAX
)
986 if (!(seg
= pa_hashmap_get(i
->segments
, PA_UINT32_TO_PTR(shm_id
))))
987 if (!(seg
= segment_attach(i
, shm_id
)))
990 if (offset
+size
> seg
->memory
.size
)
993 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
994 b
= pa_xnew(pa_memblock
, 1);
998 b
->type
= PA_MEMBLOCK_IMPORTED
;
1000 b
->is_silence
= FALSE
;
1001 pa_atomic_ptr_store(&b
->data
, (uint8_t*) seg
->memory
.ptr
+ offset
);
1003 pa_atomic_store(&b
->n_acquired
, 0);
1004 pa_atomic_store(&b
->please_signal
, 0);
1005 b
->per_type
.imported
.id
= block_id
;
1006 b
->per_type
.imported
.segment
= seg
;
1008 pa_hashmap_put(i
->blocks
, PA_UINT32_TO_PTR(block_id
), b
);
1015 pa_mutex_unlock(i
->mutex
);
1020 int pa_memimport_process_revoke(pa_memimport
*i
, uint32_t id
) {
1025 pa_mutex_lock(i
->mutex
);
1027 if (!(b
= pa_hashmap_get(i
->blocks
, PA_UINT32_TO_PTR(id
)))) {
1032 memblock_replace_import(b
);
1035 pa_mutex_unlock(i
->mutex
);
1040 /* For sending blocks to other nodes */
1041 pa_memexport
* pa_memexport_new(pa_mempool
*p
, pa_memexport_revoke_cb_t cb
, void *userdata
) {
1047 if (!p
->memory
.shared
)
1050 e
= pa_xnew(pa_memexport
, 1);
1051 e
->mutex
= pa_mutex_new(TRUE
, TRUE
);
1053 PA_LLIST_HEAD_INIT(struct memexport_slot
, e
->free_slots
);
1054 PA_LLIST_HEAD_INIT(struct memexport_slot
, e
->used_slots
);
1057 e
->userdata
= userdata
;
1059 pa_mutex_lock(p
->mutex
);
1060 PA_LLIST_PREPEND(pa_memexport
, p
->exports
, e
);
1061 pa_mutex_unlock(p
->mutex
);
1065 void pa_memexport_free(pa_memexport
*e
) {
1068 pa_mutex_lock(e
->mutex
);
1069 while (e
->used_slots
)
1070 pa_memexport_process_release(e
, (uint32_t) (e
->used_slots
- e
->slots
));
1071 pa_mutex_unlock(e
->mutex
);
1073 pa_mutex_lock(e
->pool
->mutex
);
1074 PA_LLIST_REMOVE(pa_memexport
, e
->pool
->exports
, e
);
1075 pa_mutex_unlock(e
->pool
->mutex
);
1077 pa_mutex_free(e
->mutex
);
1082 int pa_memexport_process_release(pa_memexport
*e
, uint32_t id
) {
1087 pa_mutex_lock(e
->mutex
);
1089 if (id
>= e
->n_init
)
1092 if (!e
->slots
[id
].block
)
1095 b
= e
->slots
[id
].block
;
1096 e
->slots
[id
].block
= NULL
;
1098 PA_LLIST_REMOVE(struct memexport_slot
, e
->used_slots
, &e
->slots
[id
]);
1099 PA_LLIST_PREPEND(struct memexport_slot
, e
->free_slots
, &e
->slots
[id
]);
1101 pa_mutex_unlock(e
->mutex
);
1103 /* pa_log("Processing release for %u", id); */
1105 pa_assert(pa_atomic_load(&e
->pool
->stat
.n_exported
) > 0);
1106 pa_assert(pa_atomic_load(&e
->pool
->stat
.exported_size
) >= (int) b
->length
);
1108 pa_atomic_dec(&e
->pool
->stat
.n_exported
);
1109 pa_atomic_sub(&e
->pool
->stat
.exported_size
, (int) b
->length
);
1111 pa_memblock_unref(b
);
1116 pa_mutex_unlock(e
->mutex
);
1122 static void memexport_revoke_blocks(pa_memexport
*e
, pa_memimport
*i
) {
1123 struct memexport_slot
*slot
, *next
;
1127 pa_mutex_lock(e
->mutex
);
1129 for (slot
= e
->used_slots
; slot
; slot
= next
) {
1133 if (slot
->block
->type
!= PA_MEMBLOCK_IMPORTED
||
1134 slot
->block
->per_type
.imported
.segment
->import
!= i
)
1137 idx
= (uint32_t) (slot
- e
->slots
);
1138 e
->revoke_cb(e
, idx
, e
->userdata
);
1139 pa_memexport_process_release(e
, idx
);
1142 pa_mutex_unlock(e
->mutex
);
1145 /* No lock necessary */
1146 static pa_memblock
*memblock_shared_copy(pa_mempool
*p
, pa_memblock
*b
) {
1152 if (b
->type
== PA_MEMBLOCK_IMPORTED
||
1153 b
->type
== PA_MEMBLOCK_POOL
||
1154 b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
) {
1155 pa_assert(b
->pool
== p
);
1156 return pa_memblock_ref(b
);
1159 if (!(n
= pa_memblock_new_pool(p
, b
->length
)))
1162 memcpy(pa_atomic_ptr_load(&n
->data
), pa_atomic_ptr_load(&b
->data
), b
->length
);
1167 int pa_memexport_put(pa_memexport
*e
, pa_memblock
*b
, uint32_t *block_id
, uint32_t *shm_id
, size_t *offset
, size_t * size
) {
1169 struct memexport_slot
*slot
;
1174 pa_assert(block_id
);
1178 pa_assert(b
->pool
== e
->pool
);
1180 if (!(b
= memblock_shared_copy(e
->pool
, b
)))
1183 pa_mutex_lock(e
->mutex
);
1185 if (e
->free_slots
) {
1186 slot
= e
->free_slots
;
1187 PA_LLIST_REMOVE(struct memexport_slot
, e
->free_slots
, slot
);
1188 } else if (e
->n_init
< PA_MEMEXPORT_SLOTS_MAX
)
1189 slot
= &e
->slots
[e
->n_init
++];
1191 pa_mutex_unlock(e
->mutex
);
1192 pa_memblock_unref(b
);
1196 PA_LLIST_PREPEND(struct memexport_slot
, e
->used_slots
, slot
);
1198 *block_id
= (uint32_t) (slot
- e
->slots
);
1200 pa_mutex_unlock(e
->mutex
);
1201 /* pa_log("Got block id %u", *block_id); */
1203 data
= pa_memblock_acquire(b
);
1205 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
1206 pa_assert(b
->per_type
.imported
.segment
);
1207 memory
= &b
->per_type
.imported
.segment
->memory
;
1209 pa_assert(b
->type
== PA_MEMBLOCK_POOL
|| b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
);
1211 memory
= &b
->pool
->memory
;
1214 pa_assert(data
>= memory
->ptr
);
1215 pa_assert((uint8_t*) data
+ b
->length
<= (uint8_t*) memory
->ptr
+ memory
->size
);
1217 *shm_id
= memory
->id
;
1218 *offset
= (size_t) ((uint8_t*) data
- (uint8_t*) memory
->ptr
);
1221 pa_memblock_release(b
);
1223 pa_atomic_inc(&e
->pool
->stat
.n_exported
);
1224 pa_atomic_add(&e
->pool
->stat
.exported_size
, (int) b
->length
);