Allow building alffplay without experimental extensions
[openal-soft.git] / Alc / ringbuffer.c
blob6c419cf8ec4babaa595d7b9627fbc395b75470f5
1 /**
2 * OpenAL cross platform audio library
3 * Copyright (C) 1999-2007 by authors.
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Library General Public
6 * License as published by the Free Software Foundation; either
7 * version 2 of the License, or (at your option) any later version.
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Library General Public License for more details.
14 * You should have received a copy of the GNU Library General Public
15 * License along with this library; if not, write to the
16 * Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 * Or go to http://www.gnu.org/copyleft/lgpl.html
21 #include "config.h"
23 #include <string.h>
24 #include <stdlib.h>
25 #include <limits.h>
27 #include "ringbuffer.h"
28 #include "align.h"
29 #include "atomic.h"
30 #include "threads.h"
31 #include "almalloc.h"
32 #include "compat.h"
35 /* NOTE: This lockless ringbuffer implementation is copied from JACK, extended
36 * to include an element size. Consequently, parameters and return values for a
37 * size or count is in 'elements', not bytes. Additionally, it only supports
38 * single-consumer/single-provider operation. */
39 struct ll_ringbuffer {
40 ATOMIC(size_t) write_ptr;
41 ATOMIC(size_t) read_ptr;
42 size_t size;
43 size_t size_mask;
44 size_t elem_size;
46 alignas(16) char buf[];
49 ll_ringbuffer_t *ll_ringbuffer_create(size_t sz, size_t elem_sz, int limit_writes)
51 ll_ringbuffer_t *rb;
52 size_t power_of_two = 0;
54 if(sz > 0)
56 power_of_two = sz;
57 power_of_two |= power_of_two>>1;
58 power_of_two |= power_of_two>>2;
59 power_of_two |= power_of_two>>4;
60 power_of_two |= power_of_two>>8;
61 power_of_two |= power_of_two>>16;
62 #if SIZE_MAX > UINT_MAX
63 power_of_two |= power_of_two>>32;
64 #endif
66 power_of_two++;
67 if(power_of_two < sz) return NULL;
69 rb = al_malloc(16, sizeof(*rb) + power_of_two*elem_sz);
70 if(!rb) return NULL;
72 ATOMIC_INIT(&rb->write_ptr, 0);
73 ATOMIC_INIT(&rb->read_ptr, 0);
74 rb->size = limit_writes ? sz : power_of_two;
75 rb->size_mask = power_of_two - 1;
76 rb->elem_size = elem_sz;
77 return rb;
80 void ll_ringbuffer_free(ll_ringbuffer_t *rb)
82 al_free(rb);
85 void ll_ringbuffer_reset(ll_ringbuffer_t *rb)
87 ATOMIC_STORE(&rb->write_ptr, 0, almemory_order_release);
88 ATOMIC_STORE(&rb->read_ptr, 0, almemory_order_release);
89 memset(rb->buf, 0, (rb->size_mask+1)*rb->elem_size);
93 size_t ll_ringbuffer_read_space(const ll_ringbuffer_t *rb)
95 size_t w = ATOMIC_LOAD(&CONST_CAST(ll_ringbuffer_t*,rb)->write_ptr, almemory_order_acquire);
96 size_t r = ATOMIC_LOAD(&CONST_CAST(ll_ringbuffer_t*,rb)->read_ptr, almemory_order_acquire);
97 return (w-r) & rb->size_mask;
100 size_t ll_ringbuffer_write_space(const ll_ringbuffer_t *rb)
102 size_t w = ATOMIC_LOAD(&CONST_CAST(ll_ringbuffer_t*,rb)->write_ptr, almemory_order_acquire);
103 size_t r = ATOMIC_LOAD(&CONST_CAST(ll_ringbuffer_t*,rb)->read_ptr, almemory_order_acquire);
104 w = (r-w-1) & rb->size_mask;
105 return (w > rb->size) ? rb->size : w;
109 size_t ll_ringbuffer_read(ll_ringbuffer_t *rb, char *dest, size_t cnt)
111 size_t read_ptr;
112 size_t free_cnt;
113 size_t cnt2;
114 size_t to_read;
115 size_t n1, n2;
117 free_cnt = ll_ringbuffer_read_space(rb);
118 if(free_cnt == 0) return 0;
120 to_read = (cnt > free_cnt) ? free_cnt : cnt;
121 read_ptr = ATOMIC_LOAD(&rb->read_ptr, almemory_order_relaxed) & rb->size_mask;
123 cnt2 = read_ptr + to_read;
124 if(cnt2 > rb->size_mask+1)
126 n1 = rb->size_mask+1 - read_ptr;
127 n2 = cnt2 & rb->size_mask;
129 else
131 n1 = to_read;
132 n2 = 0;
135 memcpy(dest, &rb->buf[read_ptr*rb->elem_size], n1*rb->elem_size);
136 read_ptr += n1;
137 if(n2)
139 memcpy(dest + n1*rb->elem_size, &rb->buf[(read_ptr&rb->size_mask)*rb->elem_size],
140 n2*rb->elem_size);
141 read_ptr += n2;
143 ATOMIC_STORE(&rb->read_ptr, read_ptr, almemory_order_release);
144 return to_read;
147 size_t ll_ringbuffer_peek(ll_ringbuffer_t *rb, char *dest, size_t cnt)
149 size_t free_cnt;
150 size_t cnt2;
151 size_t to_read;
152 size_t n1, n2;
153 size_t read_ptr;
155 free_cnt = ll_ringbuffer_read_space(rb);
156 if(free_cnt == 0) return 0;
158 to_read = (cnt > free_cnt) ? free_cnt : cnt;
159 read_ptr = ATOMIC_LOAD(&rb->read_ptr, almemory_order_relaxed) & rb->size_mask;
161 cnt2 = read_ptr + to_read;
162 if(cnt2 > rb->size_mask+1)
164 n1 = rb->size_mask+1 - read_ptr;
165 n2 = cnt2 & rb->size_mask;
167 else
169 n1 = to_read;
170 n2 = 0;
173 memcpy(dest, &rb->buf[read_ptr*rb->elem_size], n1*rb->elem_size);
174 if(n2)
176 read_ptr += n1;
177 memcpy(dest + n1*rb->elem_size, &rb->buf[(read_ptr&rb->size_mask)*rb->elem_size],
178 n2*rb->elem_size);
180 return to_read;
183 size_t ll_ringbuffer_write(ll_ringbuffer_t *rb, const char *src, size_t cnt)
185 size_t write_ptr;
186 size_t free_cnt;
187 size_t cnt2;
188 size_t to_write;
189 size_t n1, n2;
191 free_cnt = ll_ringbuffer_write_space(rb);
192 if(free_cnt == 0) return 0;
194 to_write = (cnt > free_cnt) ? free_cnt : cnt;
195 write_ptr = ATOMIC_LOAD(&rb->write_ptr, almemory_order_relaxed) & rb->size_mask;
197 cnt2 = write_ptr + to_write;
198 if(cnt2 > rb->size_mask+1)
200 n1 = rb->size_mask+1 - write_ptr;
201 n2 = cnt2 & rb->size_mask;
203 else
205 n1 = to_write;
206 n2 = 0;
209 memcpy(&rb->buf[write_ptr*rb->elem_size], src, n1*rb->elem_size);
210 write_ptr += n1;
211 if(n2)
213 memcpy(&rb->buf[(write_ptr&rb->size_mask)*rb->elem_size], src + n1*rb->elem_size,
214 n2*rb->elem_size);
215 write_ptr += n2;
217 ATOMIC_STORE(&rb->write_ptr, write_ptr, almemory_order_release);
218 return to_write;
222 void ll_ringbuffer_read_advance(ll_ringbuffer_t *rb, size_t cnt)
224 ATOMIC_ADD(&rb->read_ptr, cnt, almemory_order_acq_rel);
227 void ll_ringbuffer_write_advance(ll_ringbuffer_t *rb, size_t cnt)
229 ATOMIC_ADD(&rb->write_ptr, cnt, almemory_order_acq_rel);
233 void ll_ringbuffer_get_read_vector(const ll_ringbuffer_t *rb, ll_ringbuffer_data_t vec[2])
235 size_t free_cnt;
236 size_t cnt2;
237 size_t w, r;
239 w = ATOMIC_LOAD(&CONST_CAST(ll_ringbuffer_t*,rb)->write_ptr, almemory_order_acquire);
240 r = ATOMIC_LOAD(&CONST_CAST(ll_ringbuffer_t*,rb)->read_ptr, almemory_order_acquire);
241 w &= rb->size_mask;
242 r &= rb->size_mask;
243 free_cnt = (w-r) & rb->size_mask;
245 cnt2 = r + free_cnt;
246 if(cnt2 > rb->size_mask+1)
248 /* Two part vector: the rest of the buffer after the current write ptr,
249 * plus some from the start of the buffer. */
250 vec[0].buf = (char*)&rb->buf[r*rb->elem_size];
251 vec[0].len = rb->size_mask+1 - r;
252 vec[1].buf = (char*)rb->buf;
253 vec[1].len = cnt2 & rb->size_mask;
255 else
257 /* Single part vector: just the rest of the buffer */
258 vec[0].buf = (char*)&rb->buf[r*rb->elem_size];
259 vec[0].len = free_cnt;
260 vec[1].buf = NULL;
261 vec[1].len = 0;
265 void ll_ringbuffer_get_write_vector(const ll_ringbuffer_t *rb, ll_ringbuffer_data_t vec[2])
267 size_t free_cnt;
268 size_t cnt2;
269 size_t w, r;
271 w = ATOMIC_LOAD(&CONST_CAST(ll_ringbuffer_t*,rb)->write_ptr, almemory_order_acquire);
272 r = ATOMIC_LOAD(&CONST_CAST(ll_ringbuffer_t*,rb)->read_ptr, almemory_order_acquire);
273 w &= rb->size_mask;
274 r &= rb->size_mask;
275 free_cnt = (r-w-1) & rb->size_mask;
276 if(free_cnt > rb->size) free_cnt = rb->size;
278 cnt2 = w + free_cnt;
279 if(cnt2 > rb->size_mask+1)
281 /* Two part vector: the rest of the buffer after the current write ptr,
282 * plus some from the start of the buffer. */
283 vec[0].buf = (char*)&rb->buf[w*rb->elem_size];
284 vec[0].len = rb->size_mask+1 - w;
285 vec[1].buf = (char*)rb->buf;
286 vec[1].len = cnt2 & rb->size_mask;
288 else
290 vec[0].buf = (char*)&rb->buf[w*rb->elem_size];
291 vec[0].len = free_cnt;
292 vec[1].buf = NULL;
293 vec[1].len = 0;