Remove u->channels and u->rates, since it's redundant info
[pulseaudio-mirror.git] / src / pulsecore / flist.c
blob6fb944f9cc5a5f2806b56330620279806b90d3ff
1 /***
2 This file is part of PulseAudio.
4 Copyright 2006-2008 Lennart Poettering
6 PulseAudio is free software; you can redistribute it and/or modify
7 it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 PulseAudio is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with PulseAudio; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
19 USA.
20 ***/
22 #ifdef HAVE_CONFIG_H
23 #include <config.h>
24 #endif
26 #include <pulse/xmalloc.h>
28 #include <pulsecore/atomic.h>
29 #include <pulsecore/log.h>
30 #include <pulsecore/thread.h>
31 #include <pulsecore/macro.h>
32 #include <pulsecore/core-util.h>
33 #include <pulsecore/macro.h>
35 #include "flist.h"
37 /* Algorithm is not perfect, in a few corner cases it will fail to pop
38 * from the flist although it isn't empty, and fail to push into the
39 * flist, although it isn't full.
41 * We keep a fixed size array of entries, each item is an atomic
42 * pointer.
44 * To accelerate finding of used/unused cells we maintain a read and a
45 * write index which is used like a ring buffer. After each push we
46 * increase the write index and after each pop we increase the read
47 * index.
49 * The indexes are incremented atomically and are never truncated to
50 * the buffer size. Instead we assume that the buffer size is a power
51 * of two and that the truncation can thus be done by applying a
52 * simple AND on read.
54 * To make sure that we do not look for empty cells indefinitely we
55 * maintain a length value which stores the number of used cells. From
56 * this value the number of unused cells is easily calculated. Please
57 * note that the length value is not updated atomically with the read
58 * and write index and might thus be a few cells off the real
59 * value. To deal with this we always look for N_EXTRA_SCAN extra
60 * cells when pushing/popping entries.
62 * It might make sense to replace this implementation with a link list
63 * stack or queue, which however requires DCAS to be simple. Patches
64 * welcome.
66 * Please note that this algorithm is home grown.*/
68 #define FLIST_SIZE 128
69 #define N_EXTRA_SCAN 3
71 /* For debugging purposes we can define _Y to put and extra thread
72 * yield between each operation. */
74 #ifdef PROFILE
75 #define _Y pa_thread_yield()
76 #else
77 #define _Y do { } while(0)
78 #endif
80 struct pa_flist {
81 unsigned size;
82 pa_atomic_t length;
83 pa_atomic_t read_idx;
84 pa_atomic_t write_idx;
87 #define PA_FLIST_CELLS(x) ((pa_atomic_ptr_t*) ((uint8_t*) (x) + PA_ALIGN(sizeof(struct pa_flist))))
89 pa_flist *pa_flist_new(unsigned size) {
90 pa_flist *l;
92 if (!size)
93 size = FLIST_SIZE;
95 pa_assert(pa_is_power_of_two(size));
97 l = pa_xmalloc0(PA_ALIGN(sizeof(pa_flist)) + (sizeof(pa_atomic_ptr_t) * size));
99 l->size = size;
101 pa_atomic_store(&l->read_idx, 0);
102 pa_atomic_store(&l->write_idx, 0);
103 pa_atomic_store(&l->length, 0);
105 return l;
108 static unsigned reduce(pa_flist *l, unsigned value) {
109 return value & (l->size - 1);
112 void pa_flist_free(pa_flist *l, pa_free_cb_t free_cb) {
113 pa_assert(l);
115 if (free_cb) {
116 pa_atomic_ptr_t*cells;
117 unsigned idx;
119 cells = PA_FLIST_CELLS(l);
121 for (idx = 0; idx < l->size; idx ++) {
122 void *p;
124 if ((p = pa_atomic_ptr_load(&cells[idx])))
125 free_cb(p);
129 pa_xfree(l);
132 int pa_flist_push(pa_flist*l, void *p) {
133 unsigned idx, n, len;
134 pa_atomic_ptr_t*cells;
136 pa_assert(l);
137 pa_assert(p);
139 cells = PA_FLIST_CELLS(l);
141 n = len = l->size + N_EXTRA_SCAN - (unsigned) pa_atomic_load(&l->length);
144 idx = reduce(l, (unsigned) pa_atomic_load(&l->write_idx));
146 for (; n > 0 ; n--) {
150 if (pa_atomic_ptr_cmpxchg(&cells[idx], NULL, p)) {
153 pa_atomic_inc(&l->write_idx);
156 pa_atomic_inc(&l->length);
158 return 0;
162 idx = reduce(l, idx + 1);
165 #ifdef PROFILE
166 if (len > N_EXTRA_SCAN)
167 pa_log_warn("Didn't find free cell after %u iterations.", len);
168 #endif
170 return -1;
173 void* pa_flist_pop(pa_flist*l) {
174 unsigned idx, len, n;
175 pa_atomic_ptr_t *cells;
177 pa_assert(l);
179 cells = PA_FLIST_CELLS(l);
181 n = len = (unsigned) pa_atomic_load(&l->length) + N_EXTRA_SCAN;
184 idx = reduce(l, (unsigned) pa_atomic_load(&l->read_idx));
186 for (; n > 0 ; n--) {
187 void *p;
190 p = pa_atomic_ptr_load(&cells[idx]);
192 if (p) {
195 if (!pa_atomic_ptr_cmpxchg(&cells[idx], p, NULL))
196 continue;
199 pa_atomic_inc(&l->read_idx);
202 pa_atomic_dec(&l->length);
204 return p;
208 idx = reduce(l, idx+1);
211 #ifdef PROFILE
212 if (len > N_EXTRA_SCAN)
213 pa_log_warn("Didn't find used cell after %u iterations.", len);
214 #endif
216 return NULL;