Revert the recent change to bufread()
[Rockbox.git] / apps / buffering.c
blob7ebcbae39d5d72908f6cd44e50c3c7931bdcb47a
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2007 Nicolas Pennequin
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
20 #include "config.h"
21 #include <stdio.h>
22 #include <string.h>
23 #include <stdlib.h>
24 #include <ctype.h>
25 #include "buffering.h"
27 #include "ata.h"
28 #include "system.h"
29 #include "thread.h"
30 #include "file.h"
31 #include "panic.h"
32 #include "memory.h"
33 #include "lcd.h"
34 #include "font.h"
35 #include "button.h"
36 #include "kernel.h"
37 #include "tree.h"
38 #include "debug.h"
39 #include "sprintf.h"
40 #include "settings.h"
41 #include "codecs.h"
42 #include "audio.h"
43 #include "mp3_playback.h"
44 #include "usb.h"
45 #include "status.h"
46 #include "screens.h"
47 #include "playlist.h"
48 #include "playback.h"
49 #include "pcmbuf.h"
50 #include "buffer.h"
52 #ifdef SIMULATOR
53 #define ata_disk_is_active() 1
54 #endif
56 #if MEM > 1
57 #define GUARD_BUFSIZE (32*1024)
58 #else
59 #define GUARD_BUFSIZE (8*1024)
60 #endif
62 /* Define LOGF_ENABLE to enable logf output in this file */
63 /*#define LOGF_ENABLE*/
64 #include "logf.h"
66 /* macros to enable logf for queues
67 logging on SYS_TIMEOUT can be disabled */
68 #ifdef SIMULATOR
69 /* Define this for logf output of all queuing except SYS_TIMEOUT */
70 #define BUFFERING_LOGQUEUES
71 /* Define this to logf SYS_TIMEOUT messages */
72 /* #define BUFFERING_LOGQUEUES_SYS_TIMEOUT */
73 #endif
75 #ifdef BUFFERING_LOGQUEUES
76 #define LOGFQUEUE logf
77 #else
78 #define LOGFQUEUE(...)
79 #endif
81 #ifdef BUFFERING_LOGQUEUES_SYS_TIMEOUT
82 #define LOGFQUEUE_SYS_TIMEOUT logf
83 #else
84 #define LOGFQUEUE_SYS_TIMEOUT(...)
85 #endif
87 /* default point to start buffer refill */
88 #define BUFFERING_DEFAULT_WATERMARK (1024*512)
89 /* amount of data to read in one read() call */
90 #define BUFFERING_DEFAULT_FILECHUNK (1024*32)
91 /* point at which the file buffer will fight for CPU time */
92 #define BUFFERING_CRITICAL_LEVEL (1024*128)
95 /* Ring buffer helper macros */
96 /* Buffer pointer (p) plus value (v), wrapped if necessary */
97 #define RINGBUF_ADD(p,v) ((p+v)<buffer_len ? p+v : p+v-buffer_len)
98 /* Buffer pointer (p) minus value (v), wrapped if necessary */
99 #define RINGBUF_SUB(p,v) ((p>=v) ? p-v : p+buffer_len-v)
100 /* How far value (v) plus buffer pointer (p1) will cross buffer pointer (p2) */
101 #define RINGBUF_ADD_CROSS(p1,v,p2) \
102 ((p1<p2) ? (int)(p1+v)-(int)p2 : (int)(p1+v-p2)-(int)buffer_len)
103 /* Bytes available in the buffer */
104 #define BUF_USED RINGBUF_SUB(buf_widx, buf_ridx)
106 struct memory_handle {
107 int id; /* A unique ID for the handle */
108 enum data_type type;
109 char path[MAX_PATH];
110 int fd;
111 size_t data; /* Start index of the handle's data buffer */
112 volatile size_t ridx; /* Current read pointer, relative to the main buffer */
113 size_t widx; /* Current write pointer */
114 size_t filesize; /* File total length */
115 size_t filerem; /* Remaining bytes of file NOT in buffer */
116 volatile size_t available; /* Available bytes to read from buffer */
117 size_t offset; /* Offset at which we started reading the file */
118 struct memory_handle *next;
120 /* at all times, we have: filesize == offset + available + filerem */
123 static char *buffer;
124 static char *guard_buffer;
126 static size_t buffer_len;
128 static volatile size_t buf_widx; /* current writing position */
129 static volatile size_t buf_ridx; /* current reading position */
130 /* buf_*idx are values relative to the buffer, not real pointers. */
132 /* Configuration */
133 static size_t conf_watermark = 0; /* Level to trigger filebuf fill */
134 static size_t conf_filechunk = 0; /* Largest chunk the codec accepts */
135 static size_t conf_preseek = 0; /* Codec pre-seek margin */
136 #if MEM > 8
137 static size_t high_watermark = 0; /* High watermark for rebuffer */
138 #endif
140 /* current memory handle in the linked list. NULL when the list is empty. */
141 static struct memory_handle *cur_handle;
142 /* first memory handle in the linked list. NULL when the list is empty. */
143 static struct memory_handle *first_handle;
145 static int num_handles; /* number of handles in the list */
147 static int base_handle_id;
149 static struct mutex llist_mutex;
151 /* Handle cache (makes find_handle faster).
152 This needs be to be global so that move_handle can invalidate it. */
153 static struct memory_handle *cached_handle = NULL;
155 static buffer_low_callback buffer_low_callback_funcs[MAX_BUF_CALLBACKS];
156 static int buffer_callback_count = 0;
158 static struct {
159 size_t remaining; /* Amount of data needing to be buffered */
160 size_t wasted; /* Amount of space available for freeing */
161 size_t buffered; /* Amount of data currently in the buffer */
162 size_t useful; /* Amount of data still useful to the user */
163 } data_counters;
166 /* Messages available to communicate with the buffering thread */
167 enum {
168 Q_BUFFER_HANDLE = 1, /* Request buffering of a handle */
169 Q_RESET_HANDLE, /* (internal) Request resetting of a handle to its
170 offset (the offset has to be set beforehand) */
171 Q_CLOSE_HANDLE, /* Request closing a handle */
172 Q_BASE_HANDLE, /* Set the reference handle for buf_useful_data */
174 /* Configuration: */
175 Q_SET_WATERMARK,
176 Q_SET_CHUNKSIZE,
177 Q_SET_PRESEEK,
180 /* Buffering thread */
181 void buffering_thread(void);
182 static long buffering_stack[(DEFAULT_STACK_SIZE + 0x2000)/sizeof(long)];
183 static const char buffering_thread_name[] = "buffering";
184 static struct thread_entry *buffering_thread_p;
185 static struct event_queue buffering_queue;
186 static struct queue_sender_list buffering_queue_sender_list;
190 LINKED LIST MANAGEMENT
191 ======================
193 add_handle : Add a handle to the list
194 rm_handle : Remove a handle from the list
195 find_handle : Get a handle pointer from an ID
196 move_handle : Move a handle in the buffer (with or without its data)
198 These functions only handle the linked list structure. They don't touch the
199 contents of the struct memory_handle headers. They also change the buf_*idx
200 pointers when necessary and manage the handle IDs.
202 The first and current (== last) handle are kept track of.
203 A new handle is added at buf_widx and becomes the current one.
204 buf_widx always points to the current writing position for the current handle
205 buf_ridx always points to the location of the first handle.
206 buf_ridx == buf_widx means the buffer is empty.
210 /* Add a new handle to the linked list and return it. It will have become the
211 new current handle. "data_size" must contain the size of what will be in the
212 handle. On return, it's the size available for the handle. */
213 static struct memory_handle *add_handle(size_t *data_size)
215 mutex_lock(&llist_mutex);
217 /* this will give each handle a unique id */
218 static int cur_handle_id = 1;
220 /* make sure buf_widx is 32-bit aligned so that the handle struct is,
221 but before that we check we can actually align. */
222 if (RINGBUF_ADD_CROSS(buf_widx, 3, buf_ridx) >= 0) {
223 mutex_unlock(&llist_mutex);
224 return NULL;
226 buf_widx = (RINGBUF_ADD(buf_widx, 3)) & ~3;
228 size_t len = (data_size ? *data_size : 0)
229 + sizeof(struct memory_handle);
231 /* check that we actually can add the handle and its data */
232 int overlap = RINGBUF_ADD_CROSS(buf_widx, len, buf_ridx);
233 if (overlap >= 0) {
234 *data_size -= overlap;
235 len -= overlap;
237 if (len < sizeof(struct memory_handle)) {
238 /* There isn't even enough space to write the struct */
239 mutex_unlock(&llist_mutex);
240 return NULL;
243 struct memory_handle *new_handle =
244 (struct memory_handle *)(&buffer[buf_widx]);
246 /* only advance the buffer write index of the size of the struct */
247 buf_widx = RINGBUF_ADD(buf_widx, sizeof(struct memory_handle));
249 if (!first_handle) {
250 /* the new handle is the first one */
251 first_handle = new_handle;
254 if (cur_handle) {
255 cur_handle->next = new_handle;
258 cur_handle = new_handle;
259 cur_handle->id = cur_handle_id++;
260 cur_handle->next = NULL;
261 num_handles++;
263 mutex_unlock(&llist_mutex);
264 return cur_handle;
267 /* Delete a given memory handle from the linked list
268 and return true for success. Nothing is actually erased from memory. */
269 static bool rm_handle(struct memory_handle *h)
271 mutex_lock(&llist_mutex);
273 if (h == first_handle) {
274 first_handle = h->next;
275 if (h == cur_handle) {
276 /* h was the first and last handle: the buffer is now empty */
277 cur_handle = NULL;
278 buf_ridx = buf_widx;
279 } else {
280 /* update buf_ridx to point to the new first handle */
281 buf_ridx = (void *)first_handle - (void *)buffer;
283 } else {
284 struct memory_handle *m = first_handle;
285 while (m && m->next != h) {
286 m = m->next;
288 if (h && m && m->next == h) {
289 m->next = h->next;
290 if (h == cur_handle) {
291 cur_handle = m;
292 buf_widx = cur_handle->widx;
294 } else {
295 mutex_unlock(&llist_mutex);
296 return false;
300 /* Invalidate the cache to prevent it from keeping the old location of h */
301 if (h == cached_handle)
302 cached_handle = NULL;
304 num_handles--;
306 mutex_unlock(&llist_mutex);
307 return true;
310 /* Return a pointer to the memory handle of given ID.
311 NULL if the handle wasn't found */
312 static struct memory_handle *find_handle(int handle_id)
314 if (handle_id <= 0)
315 return NULL;
317 mutex_lock(&llist_mutex);
319 /* simple caching because most of the time the requested handle
320 will either be the same as the last, or the one after the last */
321 if (cached_handle)
323 if (cached_handle->id == handle_id) {
324 mutex_unlock(&llist_mutex);
325 return cached_handle;
326 } else if (cached_handle->next &&
327 (cached_handle->next->id == handle_id)) {
328 cached_handle = cached_handle->next;
329 mutex_unlock(&llist_mutex);
330 return cached_handle;
334 struct memory_handle *m = first_handle;
335 while (m && m->id != handle_id) {
336 m = m->next;
338 /* This condition can only be reached with !m or m->id == handle_id */
339 if (m) {
340 cached_handle = m;
343 mutex_unlock(&llist_mutex);
344 return m;
347 /* Move a memory handle and data_size of its data of delta.
348 Return a pointer to the new location of the handle.
349 delta is the value of which to move the struct data.
350 data_size is the amount of data to move along with the struct. */
351 static struct memory_handle *move_handle(struct memory_handle *h,
352 size_t *delta, size_t data_size)
354 mutex_lock(&llist_mutex);
356 if (*delta < 4) {
357 /* aligning backwards would yield a negative result,
358 and moving the handle of such a small amount is a waste
359 of time anyway. */
360 mutex_unlock(&llist_mutex);
361 return NULL;
363 /* make sure delta is 32-bit aligned so that the handle struct is. */
364 *delta = (*delta - 3) & ~3;
366 size_t newpos = RINGBUF_ADD((void *)h - (void *)buffer, *delta);
368 struct memory_handle *dest = (struct memory_handle *)(&buffer[newpos]);
370 /* Invalidate the cache to prevent it from keeping the old location of h */
371 if (h == cached_handle)
372 cached_handle = NULL;
374 /* the cur_handle pointer might need updating */
375 if (h == cur_handle) {
376 cur_handle = dest;
379 if (h == first_handle) {
380 first_handle = dest;
381 buf_ridx = newpos;
382 } else {
383 struct memory_handle *m = first_handle;
384 while (m && m->next != h) {
385 m = m->next;
387 if (h && m && m->next == h) {
388 m->next = dest;
389 } else {
390 mutex_unlock(&llist_mutex);
391 return NULL;
395 memmove(dest, h, sizeof(struct memory_handle) + data_size);
397 mutex_unlock(&llist_mutex);
398 return dest;
403 BUFFER SPACE MANAGEMENT
404 =======================
406 yield_codec : Used by buffer_handle to know if it should interrupt buffering
407 buffer_handle : Buffer data for a handle
408 reset_handle : Reset writing position and data buffer of a handle to its
409 current offset
410 rebuffer_handle : Seek to a nonbuffered part of a handle by rebuffering the data
411 shrink_handle : Free buffer space by moving a handle
412 fill_buffer : Call buffer_handle for all handles that have data to buffer
413 can_add_handle : Indicate whether it's safe to add a handle
415 These functions are used by the buffering thread to manage buffer space.
419 static inline bool filebuf_is_lowdata(void)
421 return BUF_USED < BUFFERING_CRITICAL_LEVEL;
424 /* Yield to the codec thread for as long as possible if it is in need of data.
425 Return true if the caller should break to let the buffering thread process
426 new queue events */
427 static bool yield_codec(void)
429 yield();
431 if (!queue_empty(&buffering_queue))
432 return true;
434 while (pcmbuf_is_lowdata() && !filebuf_is_lowdata())
436 sleep(2);
438 if (!queue_empty(&buffering_queue))
439 return true;
442 return false;
445 /* Buffer data for the given handle. Return the amount of data buffered
446 or -1 if the handle wasn't found */
447 static ssize_t buffer_handle(int handle_id)
449 logf("buffer_handle(%d)", handle_id);
450 struct memory_handle *h = find_handle(handle_id);
451 if (!h)
452 return -1;
454 if (h->filerem == 0) {
455 /* nothing left to buffer */
456 return 0;
459 if (h->fd < 0) /* file closed, reopen */
461 if (*h->path)
462 h->fd = open(h->path, O_RDONLY);
463 else
464 return -1;
466 if (h->fd < 0)
467 return -1;
469 if (h->offset)
470 lseek(h->fd, h->offset, SEEK_SET);
473 trigger_cpu_boost();
475 ssize_t ret = 0;
476 while (h->filerem > 0)
478 /* max amount to copy */
479 size_t copy_n = MIN( MIN(h->filerem, conf_filechunk),
480 buffer_len - h->widx);
482 /* stop copying if it would overwrite the reading position
483 or the next handle */
484 if (RINGBUF_ADD_CROSS(h->widx, copy_n, buf_ridx) >= 0 || (h->next &&
485 RINGBUF_ADD_CROSS(h->widx, copy_n, (unsigned)
486 ((void *)h->next - (void *)buffer)) > 0))
487 break;
489 /* rc is the actual amount read */
490 int rc = read(h->fd, &buffer[h->widx], copy_n);
492 if (rc < 0)
494 if (h->type == TYPE_CODEC) {
495 logf("Partial codec");
496 break;
499 DEBUGF("File ended %ld bytes early\n", (long)h->filerem);
500 h->filesize -= h->filerem;
501 h->filerem = 0;
502 break;
505 /* Advance buffer */
506 h->widx = RINGBUF_ADD(h->widx, rc);
507 if (h == cur_handle)
508 buf_widx = h->widx;
509 h->available += rc;
510 ret += rc;
511 h->filerem -= rc;
513 /* Stop buffering if new queue events have arrived */
514 if (yield_codec())
515 break;
518 if (h->filerem == 0) {
519 /* finished buffering the file */
520 close(h->fd);
521 h->fd = -1;
524 return ret;
527 /* Reset writing position and data buffer of a handle to its current offset.
528 Use this after having set the new offset to use. */
529 static void reset_handle(int handle_id)
531 logf("reset_handle(%d)", handle_id);
533 struct memory_handle *h = find_handle(handle_id);
534 if (!h)
535 return;
537 h->widx = h->data;
538 if (h == cur_handle)
539 buf_widx = h->widx;
540 h->available = 0;
541 h->filerem = h->filesize - h->offset;
543 if (h->fd >= 0) {
544 lseek(h->fd, h->offset, SEEK_SET);
548 /* Seek to a nonbuffered part of a handle by rebuffering the data. */
549 static void rebuffer_handle(int handle_id, size_t newpos)
551 struct memory_handle *h = find_handle(handle_id);
552 if (!h)
553 return;
555 h->offset = newpos;
557 LOGFQUEUE("? >| buffering Q_RESET_HANDLE");
558 queue_send(&buffering_queue, Q_RESET_HANDLE, handle_id);
560 LOGFQUEUE("? >| buffering Q_BUFFER_HANDLE");
561 queue_send(&buffering_queue, Q_BUFFER_HANDLE, handle_id);
563 h->ridx = h->data;
566 static bool close_handle(int handle_id)
568 struct memory_handle *h = find_handle(handle_id);
569 if (!h)
570 return false;
572 if (h->fd >= 0) {
573 close(h->fd);
574 h->fd = -1;
577 rm_handle(h);
578 return true;
581 /* Free buffer space by moving the handle struct right before the useful
582 part of its data buffer or by moving all the data. */
583 static void shrink_handle(int handle_id)
585 struct memory_handle *h = find_handle(handle_id);
586 if (!h)
587 return;
589 size_t delta;
590 /* The value of delta might change for alignment reasons */
592 if (h->next && (h->type == TYPE_ID3 || h->type == TYPE_CUESHEET ||
593 h->type == TYPE_IMAGE) && h->filerem == 0 )
595 /* metadata handle: we can move all of it */
596 delta = RINGBUF_SUB( (unsigned)((void *)h->next - (void *)buffer),
597 h->data) - h->available;
598 h = move_handle(h, &delta, h->available);
599 if (!h) return;
601 size_t olddata = h->data;
602 h->data = RINGBUF_ADD(h->data, delta);
603 h->ridx = RINGBUF_ADD(h->ridx, delta);
604 h->widx = RINGBUF_ADD(h->widx, delta);
606 /* when moving a struct mp3entry we need to readjust its pointers. */
607 if (h->type == TYPE_ID3 && h->filesize == sizeof(struct mp3entry)) {
608 adjust_mp3entry((struct mp3entry *)&buffer[h->data],
609 (void *)&buffer[h->data],
610 (void *)&buffer[olddata]);
613 else
615 /* only move the handle struct */
616 delta = RINGBUF_SUB(h->ridx, h->data);
617 h = move_handle(h, &delta, 0);
618 if (!h) return;
619 h->data = RINGBUF_ADD(h->data, delta);
620 h->available -= delta;
621 h->offset += delta;
625 /* Fill the buffer by buffering as much data as possible for handles that still
626 have data left to buffer */
627 static void fill_buffer(void)
629 logf("fill_buffer()");
630 struct memory_handle *m = first_handle;
631 while (queue_empty(&buffering_queue) && m) {
632 if (m->filerem > 0) {
633 buffer_handle(m->id);
635 m = m->next;
638 #ifndef SIMULATOR
639 if (queue_empty(&buffering_queue)) {
640 /* only spin the disk down if the filling wasn't interrupted by an
641 event arriving in the queue. */
642 ata_sleep();
644 #endif
647 /* Check whether it's safe to add a new handle and reserve space to let the
648 current one finish buffering its data. Used by bufopen and bufalloc as
649 a preliminary check before even trying to physically add the handle.
650 Returns true if it's ok to add a new handle, false if not.
652 static bool can_add_handle(void)
654 if (cur_handle && cur_handle->filerem > 0) {
655 /* the current handle hasn't finished buffering. We can only add
656 a new one if there is already enough free space to finish
657 the buffering. */
658 if (cur_handle->filerem < (buffer_len - BUF_USED)) {
659 /* Before adding the new handle we reserve some space for the
660 current one to finish buffering its data. */
661 buf_widx = RINGBUF_ADD(buf_widx, cur_handle->filerem);
662 } else {
663 return false;
667 return true;
670 void update_data_counters(void)
672 struct memory_handle *m = find_handle(base_handle_id);
673 if (!m)
674 base_handle_id = 0;
676 memset(&data_counters, 0, sizeof(data_counters));
678 m = first_handle;
679 while (m) {
680 data_counters.buffered += m->available;
681 data_counters.wasted += RINGBUF_SUB(m->ridx, m->data);
682 data_counters.remaining += m->filerem;
684 if (m->id >= base_handle_id)
685 data_counters.useful += RINGBUF_SUB(m->widx, m->ridx);
687 m = m->next;
693 MAIN BUFFERING API CALLS
694 ========================
696 bufopen : Request the opening of a new handle for a file
697 bufalloc : Open a new handle for data other than a file.
698 bufclose : Close an open handle
699 bufseek : Set the read pointer in a handle
700 bufadvance : Move the read pointer in a handle
701 bufread : Copy data from a handle into a given buffer
702 bufgetdata : Give a pointer to the handle's data
704 These functions are exported, to allow interaction with the buffer.
705 They take care of the content of the structs, and rely on the linked list
706 management functions for all the actual handle management work.
710 /* Reserve space in the buffer for a file.
711 filename: name of the file to open
712 offset: offset at which to start buffering the file, useful when the first
713 (offset-1) bytes of the file aren't needed.
714 return value: <0 if the file cannot be opened, or one file already
715 queued to be opened, otherwise the handle for the file in the buffer
717 int bufopen(const char *file, size_t offset, enum data_type type)
719 if (!can_add_handle())
720 return -2;
722 int fd = open(file, O_RDONLY);
723 if (fd < 0)
724 return -1;
726 size_t size = filesize(fd) - offset;
728 if (type != TYPE_AUDIO &&
729 size + sizeof(struct memory_handle) > buffer_len - buf_widx)
731 /* for types other than audio, the data can't wrap, so we force it */
732 buf_widx = 0;
735 struct memory_handle *h = add_handle(&size);
736 if (!h)
738 DEBUGF("bufopen: failed to add handle\n");
739 close(fd);
740 return -2;
743 strncpy(h->path, file, MAX_PATH);
744 h->fd = -1;
745 h->filesize = filesize(fd);
746 h->filerem = h->filesize - offset;
747 h->offset = offset;
748 h->ridx = buf_widx;
749 h->widx = buf_widx;
750 h->data = buf_widx;
751 h->available = 0;
752 h->type = type;
754 close(fd);
756 if (type == TYPE_CODEC || type == TYPE_CUESHEET || type == TYPE_IMAGE) {
757 /* Immediately buffer those */
758 LOGFQUEUE("? >| buffering Q_BUFFER_HANDLE");
759 queue_send(&buffering_queue, Q_BUFFER_HANDLE, h->id);
762 logf("bufopen: new hdl %d", h->id);
763 return h->id;
766 /* Open a new handle from data that needs to be copied from memory.
767 src is the source buffer from which to copy data. It can be NULL to simply
768 reserve buffer space.
769 size is the requested size. The call will only be successful if the
770 requested amount of data can entirely fit in the buffer without wrapping.
771 Return value is the handle id for success or <0 for failure.
773 int bufalloc(const void *src, size_t size, enum data_type type)
775 if (!can_add_handle())
776 return -2;
778 if (buf_widx + size + sizeof(struct memory_handle) > buffer_len) {
779 /* The data would need to wrap. */
780 DEBUGF("bufalloc: data wrap\n");
781 return -2;
784 size_t allocsize = size;
785 struct memory_handle *h = add_handle(&allocsize);
787 if (!h || allocsize != size)
788 return -2;
790 if (src) {
791 if (type == TYPE_ID3 && size == sizeof(struct mp3entry)) {
792 /* specially take care of struct mp3entry */
793 copy_mp3entry((struct mp3entry *)&buffer[buf_widx],
794 (struct mp3entry *)src);
795 } else {
796 memcpy(&buffer[buf_widx], src, size);
800 h->fd = -1;
801 *h->path = 0;
802 h->filesize = size;
803 h->filerem = 0;
804 h->offset = 0;
805 h->ridx = buf_widx;
806 h->widx = buf_widx + size; /* this is safe because the data doesn't wrap */
807 h->data = buf_widx;
808 h->available = size;
809 h->type = type;
811 buf_widx += size; /* safe too */
813 logf("bufalloc: new hdl %d", h->id);
814 return h->id;
817 /* Close the handle. Return true for success and false for failure */
818 bool bufclose(int handle_id)
820 logf("bufclose(%d)", handle_id);
822 LOGFQUEUE("buffering >| Q_CLOSE_HANDLE");
823 return queue_send(&buffering_queue, Q_CLOSE_HANDLE, handle_id);
826 /* Set reading index in handle (relatively to the start of the file).
827 Access before the available data will trigger a rebuffer.
828 Return 0 for success and < 0 for failure:
829 -1 if the handle wasn't found
830 -2 if the new requested position was beyond the end of the file
832 int bufseek(int handle_id, size_t newpos)
834 struct memory_handle *h = find_handle(handle_id);
835 if (!h)
836 return -1;
838 if (newpos > h->filesize) {
839 /* access beyond the end of the file */
840 return -3;
842 else if (newpos < h->offset || h->offset + h->available < newpos) {
843 /* access before or after buffered data. A rebuffer is needed. */
844 rebuffer_handle(handle_id, newpos);
846 else {
847 h->ridx = RINGBUF_ADD(h->data, newpos - h->offset);
849 return 0;
852 /* Advance the reading index in a handle (relatively to its current position).
853 Return 0 for success and < 0 for failure */
854 int bufadvance(int handle_id, off_t offset)
856 struct memory_handle *h = find_handle(handle_id);
857 if (!h)
858 return -1;
860 size_t newpos = h->offset + RINGBUF_SUB(h->ridx, h->data) + offset;
861 return bufseek(handle_id, newpos);
864 /* Copy data from the given handle to the dest buffer.
865 Return the number of bytes copied or < 0 for failure. */
866 ssize_t bufread(int handle_id, size_t size, void *dest)
868 struct memory_handle *h = find_handle(handle_id);
869 if (!h)
870 return -1;
872 size_t ret;
873 size_t copy_n = RINGBUF_SUB(h->widx, h->ridx);
875 if (size == 0 && h->filerem > 0 && copy_n == 0)
876 /* Data isn't ready */
877 return -2;
879 if (copy_n < size && h->filerem > 0)
880 /* Data isn't ready */
881 return -2;
883 if (copy_n == 0 && h->filerem == 0)
884 /* File is finished reading */
885 return 0;
887 ret = MIN(size, copy_n);
889 if (h->ridx + ret > buffer_len)
891 /* the data wraps around the end of the buffer */
892 size_t read = buffer_len - h->ridx;
893 memcpy(dest, &buffer[h->ridx], read);
894 memcpy(dest+read, buffer, ret - read);
896 else
898 memcpy(dest, &buffer[h->ridx], ret);
901 return ret;
904 /* Update the "data" pointer to make the handle's data available to the caller.
905 Return the length of the available linear data or < 0 for failure.
906 size is the amount of linear data requested. it can be 0 to get as
907 much as possible.
908 The guard buffer may be used to provide the requested size */
909 ssize_t bufgetdata(int handle_id, size_t size, void **data)
911 struct memory_handle *h = find_handle(handle_id);
912 if (!h)
913 return -1;
915 ssize_t ret;
916 size_t copy_n = RINGBUF_SUB(h->widx, h->ridx);
918 if (size == 0 && h->filerem > 0 && copy_n == 0)
919 /* Data isn't ready */
920 return -2;
922 if (copy_n < size && h->filerem > 0)
923 /* Data isn't ready */
924 return -2;
926 if (copy_n == 0 && h->filerem == 0)
927 /* File is finished reading */
928 return 0;
930 if (h->ridx + size > buffer_len && copy_n >= size)
932 /* the data wraps around the end of the buffer :
933 use the guard buffer to provide the requested amount of data. */
934 size_t copy_n = MIN(h->ridx + size - buffer_len, GUARD_BUFSIZE);
935 memcpy(guard_buffer, (unsigned char *)buffer, copy_n);
936 ret = buffer_len - h->ridx + copy_n;
938 else
940 ret = MIN(copy_n, buffer_len - h->ridx);
943 *data = &buffer[h->ridx];
944 return ret;
948 SECONDARY EXPORTED FUNCTIONS
949 ============================
951 buf_get_offset
952 buf_handle_offset
953 buf_request_buffer_handle
954 buf_set_base_handle
955 buf_used
956 register_buffer_low_callback
957 unregister_buffer_low_callback
959 These functions are exported, to allow interaction with the buffer.
960 They take care of the content of the structs, and rely on the linked list
961 management functions for all the actual handle management work.
964 /* Get a handle offset from a pointer */
965 ssize_t buf_get_offset(int handle_id, void *ptr)
967 struct memory_handle *h = find_handle(handle_id);
968 if (!h)
969 return -1;
971 return (size_t)ptr - (size_t)&buffer[h->ridx];
974 ssize_t buf_handle_offset(int handle_id)
976 struct memory_handle *h = find_handle(handle_id);
977 if (!h)
978 return -1;
979 return h->offset;
982 void buf_request_buffer_handle(int handle_id)
984 LOGFQUEUE("buffering >| buffering Q_BUFFER_HANDLE");
985 queue_send(&buffering_queue, Q_BUFFER_HANDLE, handle_id);
988 void buf_set_base_handle(int handle_id)
990 LOGFQUEUE("buffering >| buffering Q_BUFFER_HANDLE");
991 queue_post(&buffering_queue, Q_BASE_HANDLE, handle_id);
994 /* Return the amount of buffer space used */
995 size_t buf_used(void)
997 return BUF_USED;
1000 void buf_set_conf(int setting, size_t value)
1002 int msg;
1003 switch (setting)
1005 case BUFFERING_SET_WATERMARK:
1006 msg = Q_SET_WATERMARK;
1007 break;
1009 case BUFFERING_SET_CHUNKSIZE:
1010 msg = Q_SET_CHUNKSIZE;
1011 break;
1013 case BUFFERING_SET_PRESEEK:
1014 msg = Q_SET_PRESEEK;
1015 break;
1017 default:
1018 return;
1020 queue_post(&buffering_queue, msg, value);
1023 bool register_buffer_low_callback(buffer_low_callback func)
1025 int i;
1026 if (buffer_callback_count >= MAX_BUF_CALLBACKS)
1027 return false;
1028 for (i = 0; i < MAX_BUF_CALLBACKS; i++)
1030 if (buffer_low_callback_funcs[i] == NULL)
1032 buffer_low_callback_funcs[i] = func;
1033 buffer_callback_count++;
1034 return true;
1036 else if (buffer_low_callback_funcs[i] == func)
1037 return true;
1039 return false;
1042 void unregister_buffer_low_callback(buffer_low_callback func)
1044 int i;
1045 for (i = 0; i < MAX_BUF_CALLBACKS; i++)
1047 if (buffer_low_callback_funcs[i] == func)
1049 buffer_low_callback_funcs[i] = NULL;
1050 buffer_callback_count--;
1053 return;
1056 static void call_buffer_low_callbacks(void)
1058 int i;
1059 for (i = 0; i < MAX_BUF_CALLBACKS; i++)
1061 if (buffer_low_callback_funcs[i])
1063 buffer_low_callback_funcs[i]();
1064 buffer_low_callback_funcs[i] = NULL;
1065 buffer_callback_count--;
1070 void buffering_thread(void)
1072 struct queue_event ev;
1074 while (true)
1076 queue_wait_w_tmo(&buffering_queue, &ev, HZ/2);
1078 switch (ev.id)
1080 case Q_BUFFER_HANDLE:
1081 LOGFQUEUE("buffering < Q_BUFFER_HANDLE");
1082 queue_reply(&buffering_queue, 1);
1083 buffer_handle((int)ev.data);
1084 break;
1086 case Q_RESET_HANDLE:
1087 LOGFQUEUE("buffering < Q_RESET_HANDLE");
1088 queue_reply(&buffering_queue, 1);
1089 reset_handle((int)ev.data);
1090 break;
1092 case Q_CLOSE_HANDLE:
1093 LOGFQUEUE("buffering < Q_CLOSE_HANDLE");
1094 queue_reply(&buffering_queue, close_handle((int)ev.data));
1095 break;
1097 case Q_BASE_HANDLE:
1098 LOGFQUEUE("buffering < Q_BASE_HANDLE");
1099 base_handle_id = (int)ev.data;
1100 break;
1102 case Q_SET_WATERMARK:
1103 LOGFQUEUE("buffering < Q_SET_WATERMARK");
1104 conf_watermark = (size_t)ev.data;
1105 break;
1107 case Q_SET_CHUNKSIZE:
1108 LOGFQUEUE("buffering < Q_SET_CHUNKSIZE");
1109 conf_filechunk = (size_t)ev.data;
1110 break;
1112 case Q_SET_PRESEEK:
1113 LOGFQUEUE("buffering < Q_SET_PRESEEK");
1114 conf_preseek = (size_t)ev.data;
1115 break;
1117 #ifndef SIMULATOR
1118 case SYS_USB_CONNECTED:
1119 LOGFQUEUE("buffering < SYS_USB_CONNECTED");
1120 usb_acknowledge(SYS_USB_CONNECTED_ACK);
1121 usb_wait_for_disconnect(&buffering_queue);
1122 break;
1123 #endif
1125 case SYS_TIMEOUT:
1126 LOGFQUEUE_SYS_TIMEOUT("buffering < SYS_TIMEOUT");
1127 break;
1130 update_data_counters();
1132 /* If the buffer is low, call the callbacks to get new data */
1133 if (num_handles > 0 && data_counters.useful < conf_watermark)
1135 call_buffer_low_callbacks();
1138 #if MEM > 8
1139 /* If the disk is spinning, take advantage by filling the buffer */
1140 if (ata_disk_is_active() && queue_empty(&buffering_queue) &&
1141 data_counters.remaining > 0 &&
1142 data_counters.buffered < high_watermark)
1144 fill_buffer();
1147 if (ata_disk_is_active() && queue_empty(&buffering_queue) &&
1148 num_handles > 0 && data_counters.useful < high_watermark)
1150 call_buffer_low_callbacks();
1152 #endif
1154 if (ev.id == SYS_TIMEOUT && queue_empty(&buffering_queue))
1156 if (data_counters.remaining > 0 &&
1157 data_counters.wasted > data_counters.buffered/2)
1159 /* free buffer from outdated audio data */
1160 struct memory_handle *m = first_handle;
1161 while (m) {
1162 if (m->type == TYPE_AUDIO)
1163 shrink_handle(m->id);
1164 m = m->next;
1167 /* free buffer by moving metadata */
1168 m = first_handle;
1169 while (m) {
1170 if (m->type != TYPE_AUDIO)
1171 shrink_handle(m->id);
1172 m = m->next;
1175 update_data_counters();
1178 if (data_counters.remaining > 0 &&
1179 data_counters.buffered < conf_watermark)
1181 fill_buffer();
1187 /* Initialise the buffering subsystem */
1188 bool buffering_init(char *buf, size_t buflen)
1190 if (!buf || !buflen)
1191 return false;
1193 buffer = buf;
1194 buffer_len = buflen;
1195 guard_buffer = buf + buflen;
1197 buf_widx = 0;
1198 buf_ridx = 0;
1200 first_handle = NULL;
1201 num_handles = 0;
1203 buffer_callback_count = 0;
1204 memset(buffer_low_callback_funcs, 0, sizeof(buffer_low_callback_funcs));
1206 mutex_init(&llist_mutex);
1208 conf_filechunk = BUFFERING_DEFAULT_FILECHUNK;
1209 conf_watermark = BUFFERING_DEFAULT_WATERMARK;
1211 /* Set the high watermark as 75% full...or 25% empty :) */
1212 #if MEM > 8
1213 high_watermark = 3*buflen / 4;
1214 #endif
1216 if (buffering_thread_p == NULL)
1218 buffering_thread_p = create_thread( buffering_thread, buffering_stack,
1219 sizeof(buffering_stack), 0,
1220 buffering_thread_name IF_PRIO(, PRIORITY_BUFFERING)
1221 IF_COP(, CPU));
1223 queue_init(&buffering_queue, true);
1224 queue_enable_queue_send(&buffering_queue, &buffering_queue_sender_list);
1227 return true;
1230 void buffering_get_debugdata(struct buffering_debug *dbgdata)
1232 update_data_counters();
1233 dbgdata->num_handles = num_handles;
1234 dbgdata->data_rem = data_counters.remaining;
1235 dbgdata->wasted_space = data_counters.wasted;
1236 dbgdata->buffered_data = data_counters.buffered;
1237 dbgdata->useful_data = data_counters.useful;