Fixed m200v4 red build.
[kugel-rb.git] / apps / buffering.c
blob1e643c5771f8560cbba79145a352d88de330f301
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2007 Nicolas Pennequin
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
22 #include "config.h"
23 #include <stdio.h>
24 #include <string.h>
25 #include <stdlib.h>
26 #include <ctype.h>
27 #include "buffering.h"
29 #include "storage.h"
30 #include "system.h"
31 #include "thread.h"
32 #include "file.h"
33 #include "panic.h"
34 #include "memory.h"
35 #include "lcd.h"
36 #include "font.h"
37 #include "button.h"
38 #include "kernel.h"
39 #include "tree.h"
40 #include "debug.h"
41 #include "sprintf.h"
42 #include "settings.h"
43 #include "codecs.h"
44 #include "audio.h"
45 #include "mp3_playback.h"
46 #include "usb.h"
47 #include "status.h"
48 #include "screens.h"
49 #include "playlist.h"
50 #include "pcmbuf.h"
51 #include "buffer.h"
52 #include "bmp.h"
53 #include "appevents.h"
54 #include "metadata.h"
55 #ifdef HAVE_ALBUMART
56 #include "albumart.h"
57 #endif
59 #if MEM > 1
60 #define GUARD_BUFSIZE (32*1024)
61 #else
62 #define GUARD_BUFSIZE (8*1024)
63 #endif
65 /* Define LOGF_ENABLE to enable logf output in this file */
66 /*#define LOGF_ENABLE*/
67 #include "logf.h"
69 /* macros to enable logf for queues
70 logging on SYS_TIMEOUT can be disabled */
71 #ifdef SIMULATOR
72 /* Define this for logf output of all queuing except SYS_TIMEOUT */
73 #define BUFFERING_LOGQUEUES
74 /* Define this to logf SYS_TIMEOUT messages */
75 /* #define BUFFERING_LOGQUEUES_SYS_TIMEOUT */
76 #endif
78 #ifdef BUFFERING_LOGQUEUES
79 #define LOGFQUEUE logf
80 #else
81 #define LOGFQUEUE(...)
82 #endif
84 #ifdef BUFFERING_LOGQUEUES_SYS_TIMEOUT
85 #define LOGFQUEUE_SYS_TIMEOUT logf
86 #else
87 #define LOGFQUEUE_SYS_TIMEOUT(...)
88 #endif
90 /* default point to start buffer refill */
91 #define BUFFERING_DEFAULT_WATERMARK (1024*512)
92 /* amount of data to read in one read() call */
93 #define BUFFERING_DEFAULT_FILECHUNK (1024*32)
94 /* point at which the file buffer will fight for CPU time */
95 #define BUFFERING_CRITICAL_LEVEL (1024*128)
97 #define BUF_HANDLE_MASK 0x7FFFFFFF
100 /* Ring buffer helper macros */
101 /* Buffer pointer (p) plus value (v), wrapped if necessary */
102 #define RINGBUF_ADD(p,v) (((p)+(v))<buffer_len ? (p)+(v) : (p)+(v)-buffer_len)
103 /* Buffer pointer (p) minus value (v), wrapped if necessary */
104 #define RINGBUF_SUB(p,v) ((p>=v) ? (p)-(v) : (p)+buffer_len-(v))
105 /* How far value (v) plus buffer pointer (p1) will cross buffer pointer (p2) */
106 #define RINGBUF_ADD_CROSS(p1,v,p2) \
107 ((p1<p2) ? (int)((p1)+(v))-(int)(p2) : (int)((p1)+(v)-(p2))-(int)buffer_len)
108 /* Bytes available in the buffer */
109 #define BUF_USED RINGBUF_SUB(buf_widx, buf_ridx)
111 /* assert(sizeof(struct memory_handle)%4==0) */
112 struct memory_handle {
113 int id; /* A unique ID for the handle */
114 enum data_type type; /* Type of data buffered with this handle */
115 char path[MAX_PATH]; /* Path if data originated in a file */
116 int fd; /* File descriptor to path (-1 if closed) */
117 size_t data; /* Start index of the handle's data buffer */
118 volatile size_t ridx; /* Read pointer, relative to the main buffer */
119 size_t widx; /* Write pointer */
120 size_t filesize; /* File total length */
121 size_t filerem; /* Remaining bytes of file NOT in buffer */
122 volatile size_t available; /* Available bytes to read from buffer */
123 size_t offset; /* Offset at which we started reading the file */
124 struct memory_handle *next;
126 /* invariant: filesize == offset + available + filerem */
128 static char *buffer;
129 static char *guard_buffer;
131 static size_t buffer_len;
133 static volatile size_t buf_widx; /* current writing position */
134 static volatile size_t buf_ridx; /* current reading position */
135 /* buf_*idx are values relative to the buffer, not real pointers. */
137 /* Configuration */
138 static size_t conf_watermark = 0; /* Level to trigger filebuf fill */
139 #if MEM > 8
140 static size_t high_watermark = 0; /* High watermark for rebuffer */
141 #endif
143 /* current memory handle in the linked list. NULL when the list is empty. */
144 static struct memory_handle *cur_handle;
145 /* first memory handle in the linked list. NULL when the list is empty. */
146 static struct memory_handle *first_handle;
148 static int num_handles; /* number of handles in the list */
150 static int base_handle_id;
152 static struct mutex llist_mutex;
154 /* Handle cache (makes find_handle faster).
155 This is global so that move_handle and rm_handle can invalidate it. */
156 static struct memory_handle *cached_handle = NULL;
158 static struct {
159 size_t remaining; /* Amount of data needing to be buffered */
160 size_t wasted; /* Amount of space available for freeing */
161 size_t buffered; /* Amount of data currently in the buffer */
162 size_t useful; /* Amount of data still useful to the user */
163 } data_counters;
166 /* Messages available to communicate with the buffering thread */
167 enum {
168 Q_BUFFER_HANDLE = 1, /* Request buffering of a handle, this should not be
169 used in a low buffer situation. */
170 Q_RESET_HANDLE, /* (internal) Request resetting of a handle to its
171 offset (the offset has to be set beforehand) */
172 Q_CLOSE_HANDLE, /* Request closing a handle */
173 Q_BASE_HANDLE, /* Set the reference handle for buf_useful_data */
175 /* Configuration: */
176 Q_SET_WATERMARK,
177 Q_START_FILL, /* Request that the buffering thread initiate a buffer
178 fill at its earliest convenience */
179 Q_HANDLE_ADDED, /* Inform the buffering thread that a handle was added,
180 (which means the disk is spinning) */
183 /* Buffering thread */
184 static void buffering_thread(void);
185 static long buffering_stack[(DEFAULT_STACK_SIZE + 0x2000)/sizeof(long)];
186 static const char buffering_thread_name[] = "buffering";
187 static unsigned int buffering_thread_id = 0;
188 static struct event_queue buffering_queue;
189 static struct queue_sender_list buffering_queue_sender_list;
194 LINKED LIST MANAGEMENT
195 ======================
197 add_handle : Add a handle to the list
198 rm_handle : Remove a handle from the list
199 find_handle : Get a handle pointer from an ID
200 move_handle : Move a handle in the buffer (with or without its data)
202 These functions only handle the linked list structure. They don't touch the
203 contents of the struct memory_handle headers. They also change the buf_*idx
204 pointers when necessary and manage the handle IDs.
206 The first and current (== last) handle are kept track of.
207 A new handle is added at buf_widx and becomes the current one.
208 buf_widx always points to the current writing position for the current handle
209 buf_ridx always points to the location of the first handle.
210 buf_ridx == buf_widx means the buffer is empty.
214 /* Add a new handle to the linked list and return it. It will have become the
215 new current handle.
216 data_size must contain the size of what will be in the handle.
217 can_wrap tells us whether this type of data may wrap on buffer
218 alloc_all tells us if we must immediately be able to allocate data_size
219 returns a valid memory handle if all conditions for allocation are met.
220 NULL if there memory_handle itself cannot be allocated or if the
221 data_size cannot be allocated and alloc_all is set. This function's
222 only potential side effect is to allocate space for the cur_handle
223 if it returns NULL.
225 static struct memory_handle *add_handle(size_t data_size, bool can_wrap,
226 bool alloc_all)
228 /* gives each handle a unique id */
229 static int cur_handle_id = 0;
230 size_t shift;
231 size_t new_widx;
232 size_t len;
233 int overlap;
235 if (num_handles >= BUF_MAX_HANDLES)
236 return NULL;
238 mutex_lock(&llist_mutex);
240 if (cur_handle && cur_handle->filerem > 0) {
241 /* the current handle hasn't finished buffering. We can only add
242 a new one if there is already enough free space to finish
243 the buffering. */
244 size_t req = cur_handle->filerem + sizeof(struct memory_handle);
245 if (RINGBUF_ADD_CROSS(cur_handle->widx, req, buf_ridx) >= 0) {
246 /* Not enough space */
247 mutex_unlock(&llist_mutex);
248 return NULL;
249 } else {
250 /* Allocate the remainder of the space for the current handle */
251 buf_widx = RINGBUF_ADD(cur_handle->widx, cur_handle->filerem);
255 /* align to 4 bytes up */
256 new_widx = RINGBUF_ADD(buf_widx, 3) & ~3;
258 len = data_size + sizeof(struct memory_handle);
260 /* First, will the handle wrap? */
261 overlap = RINGBUF_ADD_CROSS(new_widx, sizeof(struct memory_handle),
262 buffer_len - 1);
263 /* If the handle would wrap, move to the beginning of the buffer,
264 * otherwise check if the data can/would wrap and move it to the
265 * beginning if needed */
266 if (overlap > 0) {
267 new_widx = 0;
268 } else if (!can_wrap) {
269 overlap = RINGBUF_ADD_CROSS(new_widx, len, buffer_len - 1);
270 if (overlap > 0)
271 new_widx += data_size - overlap;
274 /* How far we shifted buf_widx to align things, must be < buffer_len */
275 shift = RINGBUF_SUB(new_widx, buf_widx);
277 /* How much space are we short in the actual ring buffer? */
278 overlap = RINGBUF_ADD_CROSS(buf_widx, shift + len, buf_ridx);
279 if (overlap >= 0 && (alloc_all || (unsigned)overlap > data_size)) {
280 /* Not enough space for required allocations */
281 mutex_unlock(&llist_mutex);
282 return NULL;
285 /* There is enough space for the required data, advance the buf_widx and
286 * initialize the struct */
287 buf_widx = new_widx;
289 struct memory_handle *new_handle =
290 (struct memory_handle *)(&buffer[buf_widx]);
292 /* only advance the buffer write index of the size of the struct */
293 buf_widx = RINGBUF_ADD(buf_widx, sizeof(struct memory_handle));
295 new_handle->id = cur_handle_id;
296 /* Wrap signed int is safe and 0 doesn't happen */
297 cur_handle_id = (cur_handle_id + 1) & BUF_HANDLE_MASK;
298 new_handle->next = NULL;
299 num_handles++;
301 if (!first_handle)
302 /* the new handle is the first one */
303 first_handle = new_handle;
305 if (cur_handle)
306 cur_handle->next = new_handle;
308 cur_handle = new_handle;
310 mutex_unlock(&llist_mutex);
311 return new_handle;
314 /* Delete a given memory handle from the linked list
315 and return true for success. Nothing is actually erased from memory. */
316 static bool rm_handle(const struct memory_handle *h)
318 if (h == NULL)
319 return true;
321 mutex_lock(&llist_mutex);
323 if (h == first_handle) {
324 first_handle = h->next;
325 if (h == cur_handle) {
326 /* h was the first and last handle: the buffer is now empty */
327 cur_handle = NULL;
328 buf_ridx = buf_widx = 0;
329 } else {
330 /* update buf_ridx to point to the new first handle */
331 buf_ridx = (void *)first_handle - (void *)buffer;
333 } else {
334 struct memory_handle *m = first_handle;
335 /* Find the previous handle */
336 while (m && m->next != h) {
337 m = m->next;
339 if (m && m->next == h) {
340 m->next = h->next;
341 if (h == cur_handle) {
342 cur_handle = m;
343 buf_widx = cur_handle->widx;
345 } else {
346 mutex_unlock(&llist_mutex);
347 return false;
351 /* Invalidate the cache to prevent it from keeping the old location of h */
352 if (h == cached_handle)
353 cached_handle = NULL;
355 num_handles--;
357 mutex_unlock(&llist_mutex);
358 return true;
361 /* Return a pointer to the memory handle of given ID.
362 NULL if the handle wasn't found */
363 static struct memory_handle *find_handle(int handle_id)
365 if (handle_id < 0)
366 return NULL;
368 mutex_lock(&llist_mutex);
370 /* simple caching because most of the time the requested handle
371 will either be the same as the last, or the one after the last */
372 if (cached_handle)
374 if (cached_handle->id == handle_id) {
375 mutex_unlock(&llist_mutex);
376 return cached_handle;
377 } else if (cached_handle->next &&
378 (cached_handle->next->id == handle_id)) {
379 cached_handle = cached_handle->next;
380 mutex_unlock(&llist_mutex);
381 return cached_handle;
385 struct memory_handle *m = first_handle;
386 while (m && m->id != handle_id) {
387 m = m->next;
389 /* This condition can only be reached with !m or m->id == handle_id */
390 if (m)
391 cached_handle = m;
393 mutex_unlock(&llist_mutex);
394 return m;
397 /* Move a memory handle and data_size of its data delta bytes along the buffer.
398 delta maximum bytes available to move the handle. If the move is performed
399 it is set to the actual distance moved.
400 data_size is the amount of data to move along with the struct.
401 returns a valid memory_handle if the move is successful
402 NULL if the handle is NULL, the move would be less than the size of
403 a memory_handle after correcting for wraps or if the handle is not
404 found in the linked list for adjustment. This function has no side
405 effects if NULL is returned. */
406 static bool move_handle(struct memory_handle **h, size_t *delta,
407 size_t data_size, bool can_wrap)
409 struct memory_handle *dest;
410 const struct memory_handle *src;
411 size_t newpos;
412 size_t size_to_move;
413 size_t final_delta = *delta;
414 int overlap;
416 if (h == NULL || (src = *h) == NULL)
417 return false;
419 size_to_move = sizeof(struct memory_handle) + data_size;
421 /* Align to four bytes, down */
422 final_delta &= ~3;
423 if (final_delta < sizeof(struct memory_handle)) {
424 /* It's not legal to move less than the size of the struct */
425 return false;
428 mutex_lock(&llist_mutex);
430 newpos = RINGBUF_ADD((void *)src - (void *)buffer, final_delta);
431 overlap = RINGBUF_ADD_CROSS(newpos, size_to_move, buffer_len - 1);
433 if (overlap > 0) {
434 /* Some part of the struct + data would wrap, maybe ok */
435 size_t correction = 0;
436 /* If the overlap lands inside the memory_handle */
437 if ((unsigned)overlap > data_size) {
438 /* Correct the position and real delta to prevent the struct from
439 * wrapping, this guarantees an aligned delta, I think */
440 correction = overlap - data_size;
441 } else if (!can_wrap) {
442 /* Otherwise the overlap falls in the data area and must all be
443 * backed out. This may become conditional if ever we move
444 * data that is allowed to wrap (ie audio) */
445 correction = overlap;
446 /* Align correction to four bytes, up */
447 correction = (correction+3) & ~3;
449 if (correction) {
450 if (final_delta < correction + sizeof(struct memory_handle)) {
451 /* Delta cannot end up less than the size of the struct */
452 mutex_unlock(&llist_mutex);
453 return false;
456 newpos -= correction;
457 overlap -= correction;/* Used below to know how to split the data */
458 final_delta -= correction;
462 dest = (struct memory_handle *)(&buffer[newpos]);
464 if (src == first_handle) {
465 first_handle = dest;
466 buf_ridx = newpos;
467 } else {
468 struct memory_handle *m = first_handle;
469 while (m && m->next != src) {
470 m = m->next;
472 if (m && m->next == src) {
473 m->next = dest;
474 } else {
475 mutex_unlock(&llist_mutex);
476 return false;
481 /* Update the cache to prevent it from keeping the old location of h */
482 if (src == cached_handle)
483 cached_handle = dest;
485 /* the cur_handle pointer might need updating */
486 if (src == cur_handle)
487 cur_handle = dest;
489 if (overlap > 0) {
490 size_t first_part = size_to_move - overlap;
491 memmove(dest, src, first_part);
492 memmove(buffer, (const char *)src + first_part, overlap);
493 } else {
494 memmove(dest, src, size_to_move);
497 /* Update the caller with the new location of h and the distance moved */
498 *h = dest;
499 *delta = final_delta;
500 mutex_unlock(&llist_mutex);
501 return dest;
506 BUFFER SPACE MANAGEMENT
507 =======================
509 update_data_counters: Updates the values in data_counters
510 buffer_is_low : Returns true if the amount of useful data in the buffer is low
511 buffer_handle : Buffer data for a handle
512 reset_handle : Reset write position and data buffer of a handle to its offset
513 rebuffer_handle : Seek to a nonbuffered part of a handle by rebuffering the data
514 shrink_handle : Free buffer space by moving a handle
515 fill_buffer : Call buffer_handle for all handles that have data to buffer
517 These functions are used by the buffering thread to manage buffer space.
520 static void update_data_counters(void)
522 struct memory_handle *m = find_handle(base_handle_id);
523 bool is_useful = m==NULL;
525 size_t buffered = 0;
526 size_t wasted = 0;
527 size_t remaining = 0;
528 size_t useful = 0;
530 mutex_lock(&llist_mutex);
532 m = first_handle;
533 while (m) {
534 buffered += m->available;
535 wasted += RINGBUF_SUB(m->ridx, m->data);
536 remaining += m->filerem;
538 if (m->id == base_handle_id)
539 is_useful = true;
541 if (is_useful)
542 useful += RINGBUF_SUB(m->widx, m->ridx);
544 m = m->next;
547 mutex_unlock(&llist_mutex);
549 data_counters.buffered = buffered;
550 data_counters.wasted = wasted;
551 data_counters.remaining = remaining;
552 data_counters.useful = useful;
555 static inline bool buffer_is_low(void)
557 update_data_counters();
558 return data_counters.useful < BUFFERING_CRITICAL_LEVEL;
561 /* Buffer data for the given handle.
562 Return whether or not the buffering should continue explicitly. */
563 static bool buffer_handle(int handle_id)
565 logf("buffer_handle(%d)", handle_id);
566 struct memory_handle *h = find_handle(handle_id);
567 if (!h)
568 return true;
570 if (h->filerem == 0) {
571 /* nothing left to buffer */
572 return true;
575 if (h->fd < 0) /* file closed, reopen */
577 if (*h->path)
578 h->fd = open(h->path, O_RDONLY);
580 if (h->fd < 0)
582 /* could not open the file, truncate it where it is */
583 h->filesize -= h->filerem;
584 h->filerem = 0;
585 return true;
588 if (h->offset)
589 lseek(h->fd, h->offset, SEEK_SET);
592 trigger_cpu_boost();
594 if (h->type == TYPE_ID3)
596 if (!get_metadata((struct mp3entry *)(buffer + h->data), h->fd, h->path))
598 /* metadata parsing failed: clear the buffer. */
599 memset(buffer + h->data, 0, sizeof(struct mp3entry));
601 close(h->fd);
602 h->fd = -1;
603 h->filerem = 0;
604 h->available = sizeof(struct mp3entry);
605 h->widx += sizeof(struct mp3entry);
606 send_event(BUFFER_EVENT_FINISHED, &h->id);
607 return true;
610 while (h->filerem > 0)
612 /* max amount to copy */
613 size_t copy_n = MIN( MIN(h->filerem, BUFFERING_DEFAULT_FILECHUNK),
614 buffer_len - h->widx);
616 /* stop copying if it would overwrite the reading position */
617 if (RINGBUF_ADD_CROSS(h->widx, copy_n, buf_ridx) >= 0)
618 return false;
620 /* This would read into the next handle, this is broken */
621 if (h->next && RINGBUF_ADD_CROSS(h->widx, copy_n,
622 (unsigned)((void *)h->next - (void *)buffer)) > 0) {
623 /* Try to recover by truncating this file */
624 copy_n = RINGBUF_ADD_CROSS(h->widx, copy_n,
625 (unsigned)((void *)h->next - (void *)buffer));
626 h->filerem -= copy_n;
627 h->filesize -= copy_n;
628 logf("buf alloc short %ld", (long)copy_n);
629 if (h->filerem)
630 continue;
631 else
632 break;
635 /* rc is the actual amount read */
636 int rc = read(h->fd, &buffer[h->widx], copy_n);
638 if (rc < 0)
640 /* Some kind of filesystem error, maybe recoverable if not codec */
641 if (h->type == TYPE_CODEC) {
642 logf("Partial codec");
643 break;
646 DEBUGF("File ended %ld bytes early\n", (long)h->filerem);
647 h->filesize -= h->filerem;
648 h->filerem = 0;
649 break;
652 /* Advance buffer */
653 h->widx = RINGBUF_ADD(h->widx, rc);
654 if (h == cur_handle)
655 buf_widx = h->widx;
656 h->available += rc;
657 h->filerem -= rc;
659 /* If this is a large file, see if we need to break or give the codec
660 * more time */
661 if (h->type == TYPE_PACKET_AUDIO &&
662 pcmbuf_is_lowdata() && !buffer_is_low())
664 sleep(1);
666 else
668 yield();
671 if (!queue_empty(&buffering_queue))
672 break;
675 if (h->filerem == 0) {
676 /* finished buffering the file */
677 close(h->fd);
678 h->fd = -1;
679 send_event(BUFFER_EVENT_FINISHED, &h->id);
682 return true;
685 /* Reset writing position and data buffer of a handle to its current offset.
686 Use this after having set the new offset to use. */
687 static void reset_handle(int handle_id)
689 logf("reset_handle(%d)", handle_id);
691 struct memory_handle *h = find_handle(handle_id);
692 if (!h)
693 return;
695 h->ridx = h->widx = h->data;
696 if (h == cur_handle)
697 buf_widx = h->widx;
698 h->available = 0;
699 h->filerem = h->filesize - h->offset;
701 if (h->fd >= 0) {
702 lseek(h->fd, h->offset, SEEK_SET);
706 /* Seek to a nonbuffered part of a handle by rebuffering the data. */
707 static void rebuffer_handle(int handle_id, size_t newpos)
709 struct memory_handle *h = find_handle(handle_id);
710 if (!h)
711 return;
713 /* When seeking foward off of the buffer, if it is a short seek don't
714 rebuffer the whole track, just read enough to satisfy */
715 if (newpos > h->offset && newpos - h->offset < BUFFERING_DEFAULT_FILECHUNK)
717 LOGFQUEUE("buffering >| Q_BUFFER_HANDLE %d", handle_id);
718 queue_send(&buffering_queue, Q_BUFFER_HANDLE, handle_id);
719 h->ridx = h->data + newpos;
720 return;
723 h->offset = newpos;
725 /* Reset the handle to its new offset */
726 LOGFQUEUE("buffering >| Q_RESET_HANDLE %d", handle_id);
727 queue_send(&buffering_queue, Q_RESET_HANDLE, handle_id);
729 size_t next = (unsigned)((void *)h->next - (void *)buffer);
730 if (RINGBUF_SUB(next, h->data) < h->filesize - newpos)
732 /* There isn't enough space to rebuffer all of the track from its new
733 offset, so we ask the user to free some */
734 DEBUGF("rebuffer_handle: space is needed\n");
735 send_event(BUFFER_EVENT_REBUFFER, &handle_id);
738 /* Now we ask for a rebuffer */
739 LOGFQUEUE("buffering >| Q_BUFFER_HANDLE %d", handle_id);
740 queue_send(&buffering_queue, Q_BUFFER_HANDLE, handle_id);
743 static bool close_handle(int handle_id)
745 struct memory_handle *h = find_handle(handle_id);
747 /* If the handle is not found, it is closed */
748 if (!h)
749 return true;
751 if (h->fd >= 0) {
752 close(h->fd);
753 h->fd = -1;
756 /* rm_handle returns true unless the handle somehow persists after exit */
757 return rm_handle(h);
760 /* Free buffer space by moving the handle struct right before the useful
761 part of its data buffer or by moving all the data. */
762 static void shrink_handle(struct memory_handle *h)
764 size_t delta;
766 if (!h)
767 return;
769 if (h->next && h->filerem == 0 &&
770 (h->type == TYPE_ID3 || h->type == TYPE_CUESHEET ||
771 h->type == TYPE_BITMAP || h->type == TYPE_CODEC ||
772 h->type == TYPE_ATOMIC_AUDIO))
774 /* metadata handle: we can move all of it */
775 size_t handle_distance =
776 RINGBUF_SUB((unsigned)((void *)h->next - (void*)buffer), h->data);
777 delta = handle_distance - h->available;
779 /* The value of delta might change for alignment reasons */
780 if (!move_handle(&h, &delta, h->available, h->type==TYPE_CODEC))
781 return;
783 size_t olddata = h->data;
784 h->data = RINGBUF_ADD(h->data, delta);
785 h->ridx = RINGBUF_ADD(h->ridx, delta);
786 h->widx = RINGBUF_ADD(h->widx, delta);
788 if (h->type == TYPE_ID3 && h->filesize == sizeof(struct mp3entry)) {
789 /* when moving an mp3entry we need to readjust its pointers. */
790 adjust_mp3entry((struct mp3entry *)&buffer[h->data],
791 (void *)&buffer[h->data],
792 (const void *)&buffer[olddata]);
793 } else if (h->type == TYPE_BITMAP) {
794 /* adjust the bitmap's pointer */
795 struct bitmap *bmp = (struct bitmap *)&buffer[h->data];
796 bmp->data = &buffer[h->data + sizeof(struct bitmap)];
799 else
801 /* only move the handle struct */
802 delta = RINGBUF_SUB(h->ridx, h->data);
803 if (!move_handle(&h, &delta, 0, true))
804 return;
806 h->data = RINGBUF_ADD(h->data, delta);
807 h->available -= delta;
808 h->offset += delta;
812 /* Fill the buffer by buffering as much data as possible for handles that still
813 have data left to buffer
814 Return whether or not to continue filling after this */
815 static bool fill_buffer(void)
817 logf("fill_buffer()");
818 struct memory_handle *m;
819 shrink_handle(first_handle);
820 m = first_handle;
821 while (queue_empty(&buffering_queue) && m) {
822 if (m->filerem > 0) {
823 if (!buffer_handle(m->id)) {
824 m = NULL;
825 break;
828 m = m->next;
831 if (m) {
832 return true;
834 else
836 /* only spin the disk down if the filling wasn't interrupted by an
837 event arriving in the queue. */
838 storage_sleep();
839 return false;
843 #ifdef HAVE_ALBUMART
844 /* Given a file descriptor to a bitmap file, write the bitmap data to the
845 buffer, with a struct bitmap and the actual data immediately following.
846 Return value is the total size (struct + data). */
847 static int load_bitmap(int fd)
849 int rc;
850 struct bitmap *bmp = (struct bitmap *)&buffer[buf_widx];
851 /* FIXME: alignment may be needed for the data buffer. */
852 bmp->data = &buffer[buf_widx + sizeof(struct bitmap)];
854 #if (LCD_DEPTH > 1) || defined(HAVE_REMOTE_LCD) && (LCD_REMOTE_DEPTH > 1)
855 bmp->maskdata = NULL;
856 #endif
858 int free = (int)MIN(buffer_len - BUF_USED, buffer_len - buf_widx)
859 - sizeof(struct bitmap);
861 get_albumart_size(bmp);
863 rc = read_bmp_fd(fd, bmp, free, FORMAT_NATIVE|FORMAT_DITHER|
864 FORMAT_RESIZE|FORMAT_KEEP_ASPECT, NULL);
865 return rc + (rc > 0 ? sizeof(struct bitmap) : 0);
867 #endif
871 MAIN BUFFERING API CALLS
872 ========================
874 bufopen : Request the opening of a new handle for a file
875 bufalloc : Open a new handle for data other than a file.
876 bufclose : Close an open handle
877 bufseek : Set the read pointer in a handle
878 bufadvance : Move the read pointer in a handle
879 bufread : Copy data from a handle into a given buffer
880 bufgetdata : Give a pointer to the handle's data
882 These functions are exported, to allow interaction with the buffer.
883 They take care of the content of the structs, and rely on the linked list
884 management functions for all the actual handle management work.
888 /* Reserve space in the buffer for a file.
889 filename: name of the file to open
890 offset: offset at which to start buffering the file, useful when the first
891 (offset-1) bytes of the file aren't needed.
892 return value: <0 if the file cannot be opened, or one file already
893 queued to be opened, otherwise the handle for the file in the buffer
895 int bufopen(const char *file, size_t offset, enum data_type type)
897 if (type == TYPE_ID3)
899 /* ID3 case: allocate space, init the handle and return. */
901 struct memory_handle *h = add_handle(sizeof(struct mp3entry), false, true);
902 if (!h)
903 return ERR_BUFFER_FULL;
905 h->fd = -1;
906 h->filesize = sizeof(struct mp3entry);
907 h->filerem = sizeof(struct mp3entry);
908 h->offset = 0;
909 h->data = buf_widx;
910 h->ridx = buf_widx;
911 h->widx = buf_widx;
912 h->available = 0;
913 h->type = type;
914 strncpy(h->path, file, MAX_PATH);
916 buf_widx += sizeof(struct mp3entry); /* safe because the handle
917 can't wrap */
919 /* Inform the buffering thread that we added a handle */
920 LOGFQUEUE("buffering > Q_HANDLE_ADDED %d", h->id);
921 queue_post(&buffering_queue, Q_HANDLE_ADDED, h->id);
923 return h->id;
926 /* Other cases: there is a little more work. */
928 int fd = open(file, O_RDONLY);
929 if (fd < 0)
930 return ERR_FILE_ERROR;
932 size_t size = filesize(fd);
933 bool can_wrap = type==TYPE_PACKET_AUDIO || type==TYPE_CODEC;
935 size_t adjusted_offset = offset;
936 if (adjusted_offset > size)
937 adjusted_offset = 0;
939 struct memory_handle *h = add_handle(size-adjusted_offset, can_wrap, false);
940 if (!h)
942 DEBUGF("bufopen: failed to add handle\n");
943 close(fd);
944 return ERR_BUFFER_FULL;
947 strncpy(h->path, file, MAX_PATH);
948 h->offset = adjusted_offset;
949 h->ridx = buf_widx;
950 h->data = buf_widx;
951 h->type = type;
953 #ifdef HAVE_ALBUMART
954 if (type == TYPE_BITMAP)
956 /* Bitmap file: we load the data instead of the file */
957 int rc;
958 mutex_lock(&llist_mutex); /* Lock because load_bitmap yields */
959 rc = load_bitmap(fd);
960 mutex_unlock(&llist_mutex);
961 if (rc <= 0)
963 rm_handle(h);
964 close(fd);
965 return ERR_FILE_ERROR;
967 h->filerem = 0;
968 h->filesize = rc;
969 h->available = rc;
970 h->widx = buf_widx + rc; /* safe because the data doesn't wrap */
971 buf_widx += rc; /* safe too */
973 else
974 #endif
976 h->filerem = size - adjusted_offset;
977 h->filesize = size;
978 h->available = 0;
979 h->widx = buf_widx;
982 if (type == TYPE_CUESHEET) {
983 h->fd = fd;
984 /* Immediately start buffering those */
985 LOGFQUEUE("buffering >| Q_BUFFER_HANDLE %d", h->id);
986 queue_send(&buffering_queue, Q_BUFFER_HANDLE, h->id);
987 } else {
988 /* Other types will get buffered in the course of normal operations */
989 h->fd = -1;
990 close(fd);
992 /* Inform the buffering thread that we added a handle */
993 LOGFQUEUE("buffering > Q_HANDLE_ADDED %d", h->id);
994 queue_post(&buffering_queue, Q_HANDLE_ADDED, h->id);
997 logf("bufopen: new hdl %d", h->id);
998 return h->id;
1001 /* Open a new handle from data that needs to be copied from memory.
1002 src is the source buffer from which to copy data. It can be NULL to simply
1003 reserve buffer space.
1004 size is the requested size. The call will only be successful if the
1005 requested amount of data can entirely fit in the buffer without wrapping.
1006 Return value is the handle id for success or <0 for failure.
1008 int bufalloc(const void *src, size_t size, enum data_type type)
1010 struct memory_handle *h = add_handle(size, false, true);
1012 if (!h)
1013 return ERR_BUFFER_FULL;
1015 if (src) {
1016 if (type == TYPE_ID3 && size == sizeof(struct mp3entry)) {
1017 /* specially take care of struct mp3entry */
1018 copy_mp3entry((struct mp3entry *)&buffer[buf_widx],
1019 (const struct mp3entry *)src);
1020 } else {
1021 memcpy(&buffer[buf_widx], src, size);
1025 h->fd = -1;
1026 *h->path = 0;
1027 h->filesize = size;
1028 h->filerem = 0;
1029 h->offset = 0;
1030 h->ridx = buf_widx;
1031 h->widx = buf_widx + size; /* this is safe because the data doesn't wrap */
1032 h->data = buf_widx;
1033 h->available = size;
1034 h->type = type;
1036 buf_widx += size; /* safe too */
1038 logf("bufalloc: new hdl %d", h->id);
1039 return h->id;
1042 /* Close the handle. Return true for success and false for failure */
1043 bool bufclose(int handle_id)
1045 logf("bufclose(%d)", handle_id);
1047 LOGFQUEUE("buffering >| Q_CLOSE_HANDLE %d", handle_id);
1048 return queue_send(&buffering_queue, Q_CLOSE_HANDLE, handle_id);
1051 /* Set reading index in handle (relatively to the start of the file).
1052 Access before the available data will trigger a rebuffer.
1053 Return 0 for success and < 0 for failure:
1054 -1 if the handle wasn't found
1055 -2 if the new requested position was beyond the end of the file
1057 int bufseek(int handle_id, size_t newpos)
1059 struct memory_handle *h = find_handle(handle_id);
1060 if (!h)
1061 return ERR_HANDLE_NOT_FOUND;
1063 if (newpos > h->filesize) {
1064 /* access beyond the end of the file */
1065 return ERR_INVALID_VALUE;
1067 else if (newpos < h->offset || h->offset + h->available < newpos) {
1068 /* access before or after buffered data. A rebuffer is needed. */
1069 rebuffer_handle(handle_id, newpos);
1071 else {
1072 h->ridx = RINGBUF_ADD(h->data, newpos - h->offset);
1074 return 0;
1077 /* Advance the reading index in a handle (relatively to its current position).
1078 Return 0 for success and < 0 for failure */
1079 int bufadvance(int handle_id, off_t offset)
1081 const struct memory_handle *h = find_handle(handle_id);
1082 if (!h)
1083 return ERR_HANDLE_NOT_FOUND;
1085 size_t newpos = h->offset + RINGBUF_SUB(h->ridx, h->data) + offset;
1086 return bufseek(handle_id, newpos);
1089 /* Used by bufread and bufgetdata to prepare the buffer and retrieve the
1090 * actual amount of data available for reading. This function explicitly
1091 * does not check the validity of the input handle. It does do range checks
1092 * on size and returns a valid (and explicit) amount of data for reading */
1093 static struct memory_handle *prep_bufdata(int handle_id, size_t *size,
1094 bool guardbuf_limit)
1096 struct memory_handle *h = find_handle(handle_id);
1097 if (!h)
1098 return NULL;
1100 size_t avail = RINGBUF_SUB(h->widx, h->ridx);
1102 if (avail == 0 && h->filerem == 0)
1104 /* File is finished reading */
1105 *size = 0;
1106 return h;
1109 if (*size == 0 || *size > avail + h->filerem)
1110 *size = avail + h->filerem;
1112 if (guardbuf_limit && h->type == TYPE_PACKET_AUDIO && *size > GUARD_BUFSIZE)
1114 logf("data request > guardbuf");
1115 /* If more than the size of the guardbuf is requested and this is a
1116 * bufgetdata, limit to guard_bufsize over the end of the buffer */
1117 *size = MIN(*size, buffer_len - h->ridx + GUARD_BUFSIZE);
1118 /* this ensures *size <= buffer_len - h->ridx + GUARD_BUFSIZE */
1121 if (h->filerem > 0 && avail < *size)
1123 /* Data isn't ready. Request buffering */
1124 buf_request_buffer_handle(handle_id);
1125 /* Wait for the data to be ready */
1128 sleep(1);
1129 /* it is not safe for a non-buffering thread to sleep while
1130 * holding a handle */
1131 h = find_handle(handle_id);
1132 if (!h)
1133 return NULL;
1134 avail = RINGBUF_SUB(h->widx, h->ridx);
1136 while (h->filerem > 0 && avail < *size);
1139 *size = MIN(*size,avail);
1140 return h;
1143 /* Copy data from the given handle to the dest buffer.
1144 Return the number of bytes copied or < 0 for failure (handle not found).
1145 The caller is blocked until the requested amount of data is available.
1147 ssize_t bufread(int handle_id, size_t size, void *dest)
1149 const struct memory_handle *h;
1150 size_t adjusted_size = size;
1152 h = prep_bufdata(handle_id, &adjusted_size, false);
1153 if (!h)
1154 return ERR_HANDLE_NOT_FOUND;
1156 if (h->ridx + adjusted_size > buffer_len)
1158 /* the data wraps around the end of the buffer */
1159 size_t read = buffer_len - h->ridx;
1160 memcpy(dest, &buffer[h->ridx], read);
1161 memcpy(dest+read, buffer, adjusted_size - read);
1163 else
1165 memcpy(dest, &buffer[h->ridx], adjusted_size);
1168 return adjusted_size;
1171 /* Update the "data" pointer to make the handle's data available to the caller.
1172 Return the length of the available linear data or < 0 for failure (handle
1173 not found).
1174 The caller is blocked until the requested amount of data is available.
1175 size is the amount of linear data requested. it can be 0 to get as
1176 much as possible.
1177 The guard buffer may be used to provide the requested size. This means it's
1178 unsafe to request more than the size of the guard buffer.
1180 ssize_t bufgetdata(int handle_id, size_t size, void **data)
1182 const struct memory_handle *h;
1183 size_t adjusted_size = size;
1185 h = prep_bufdata(handle_id, &adjusted_size, true);
1186 if (!h)
1187 return ERR_HANDLE_NOT_FOUND;
1189 if (h->ridx + adjusted_size > buffer_len)
1191 /* the data wraps around the end of the buffer :
1192 use the guard buffer to provide the requested amount of data. */
1193 size_t copy_n = h->ridx + adjusted_size - buffer_len;
1194 /* prep_bufdata ensures adjusted_size <= buffer_len - h->ridx + GUARD_BUFSIZE,
1195 so copy_n <= GUARD_BUFSIZE */
1196 memcpy(guard_buffer, (const unsigned char *)buffer, copy_n);
1199 if (data)
1200 *data = &buffer[h->ridx];
1202 return adjusted_size;
1205 ssize_t bufgettail(int handle_id, size_t size, void **data)
1207 size_t tidx;
1209 const struct memory_handle *h;
1211 h = find_handle(handle_id);
1213 if (!h)
1214 return ERR_HANDLE_NOT_FOUND;
1216 if (h->filerem)
1217 return ERR_HANDLE_NOT_DONE;
1219 /* We don't support tail requests of > guardbuf_size, for simplicity */
1220 if (size > GUARD_BUFSIZE)
1221 return ERR_INVALID_VALUE;
1223 tidx = RINGBUF_SUB(h->widx, size);
1225 if (tidx + size > buffer_len)
1227 size_t copy_n = tidx + size - buffer_len;
1228 memcpy(guard_buffer, (const unsigned char *)buffer, copy_n);
1231 *data = &buffer[tidx];
1232 return size;
1235 ssize_t bufcuttail(int handle_id, size_t size)
1237 struct memory_handle *h;
1238 size_t adjusted_size = size;
1240 h = find_handle(handle_id);
1242 if (!h)
1243 return ERR_HANDLE_NOT_FOUND;
1245 if (h->filerem)
1246 return ERR_HANDLE_NOT_DONE;
1248 if (h->available < adjusted_size)
1249 adjusted_size = h->available;
1251 h->available -= adjusted_size;
1252 h->filesize -= adjusted_size;
1253 h->widx = RINGBUF_SUB(h->widx, adjusted_size);
1254 if (h == cur_handle)
1255 buf_widx = h->widx;
1257 return adjusted_size;
1262 SECONDARY EXPORTED FUNCTIONS
1263 ============================
1265 buf_get_offset
1266 buf_handle_offset
1267 buf_request_buffer_handle
1268 buf_set_base_handle
1269 buf_used
1270 register_buffering_callback
1271 unregister_buffering_callback
1273 These functions are exported, to allow interaction with the buffer.
1274 They take care of the content of the structs, and rely on the linked list
1275 management functions for all the actual handle management work.
1278 /* Get a handle offset from a pointer */
1279 ssize_t buf_get_offset(int handle_id, void *ptr)
1281 const struct memory_handle *h = find_handle(handle_id);
1282 if (!h)
1283 return ERR_HANDLE_NOT_FOUND;
1285 return (size_t)ptr - (size_t)&buffer[h->ridx];
1288 ssize_t buf_handle_offset(int handle_id)
1290 const struct memory_handle *h = find_handle(handle_id);
1291 if (!h)
1292 return ERR_HANDLE_NOT_FOUND;
1293 return h->offset;
1296 void buf_request_buffer_handle(int handle_id)
1298 LOGFQUEUE("buffering >| Q_START_FILL %d",handle_id);
1299 queue_send(&buffering_queue, Q_START_FILL, handle_id);
1302 void buf_set_base_handle(int handle_id)
1304 LOGFQUEUE("buffering > Q_BASE_HANDLE %d", handle_id);
1305 queue_post(&buffering_queue, Q_BASE_HANDLE, handle_id);
1308 /* Return the amount of buffer space used */
1309 size_t buf_used(void)
1311 return BUF_USED;
1314 void buf_set_watermark(size_t bytes)
1316 LOGFQUEUE("buffering > Q_SET_WATERMARK %ld", (long)bytes);
1317 queue_post(&buffering_queue, Q_SET_WATERMARK, bytes);
1320 static void shrink_buffer_inner(struct memory_handle *h)
1322 if (h == NULL)
1323 return;
1325 shrink_buffer_inner(h->next);
1327 shrink_handle(h);
1330 static void shrink_buffer(void)
1332 logf("shrink_buffer()");
1333 shrink_buffer_inner(first_handle);
1336 void buffering_thread(void)
1338 bool filling = false;
1339 struct queue_event ev;
1341 while (true)
1343 if (!filling) {
1344 cancel_cpu_boost();
1347 queue_wait_w_tmo(&buffering_queue, &ev, filling ? 5 : HZ/2);
1349 switch (ev.id)
1351 case Q_START_FILL:
1352 LOGFQUEUE("buffering < Q_START_FILL %d", (int)ev.data);
1353 /* Call buffer callbacks here because this is one of two ways
1354 * to begin a full buffer fill */
1355 send_event(BUFFER_EVENT_BUFFER_LOW, 0);
1356 shrink_buffer();
1357 queue_reply(&buffering_queue, 1);
1358 filling |= buffer_handle((int)ev.data);
1359 break;
1361 case Q_BUFFER_HANDLE:
1362 LOGFQUEUE("buffering < Q_BUFFER_HANDLE %d", (int)ev.data);
1363 queue_reply(&buffering_queue, 1);
1364 buffer_handle((int)ev.data);
1365 break;
1367 case Q_RESET_HANDLE:
1368 LOGFQUEUE("buffering < Q_RESET_HANDLE %d", (int)ev.data);
1369 queue_reply(&buffering_queue, 1);
1370 reset_handle((int)ev.data);
1371 break;
1373 case Q_CLOSE_HANDLE:
1374 LOGFQUEUE("buffering < Q_CLOSE_HANDLE %d", (int)ev.data);
1375 queue_reply(&buffering_queue, close_handle((int)ev.data));
1376 break;
1378 case Q_HANDLE_ADDED:
1379 LOGFQUEUE("buffering < Q_HANDLE_ADDED %d", (int)ev.data);
1380 /* A handle was added: the disk is spinning, so we can fill */
1381 filling = true;
1382 break;
1384 case Q_BASE_HANDLE:
1385 LOGFQUEUE("buffering < Q_BASE_HANDLE %d", (int)ev.data);
1386 base_handle_id = (int)ev.data;
1387 break;
1389 case Q_SET_WATERMARK:
1390 LOGFQUEUE("buffering < Q_SET_WATERMARK");
1391 conf_watermark = (size_t)ev.data;
1392 if (conf_watermark < BUFFERING_DEFAULT_FILECHUNK)
1394 logf("wmark<chunk %ld<%d",
1395 (long)conf_watermark, BUFFERING_DEFAULT_FILECHUNK);
1396 conf_watermark = BUFFERING_DEFAULT_FILECHUNK;
1398 break;
1400 #ifndef SIMULATOR
1401 case SYS_USB_CONNECTED:
1402 LOGFQUEUE("buffering < SYS_USB_CONNECTED");
1403 usb_acknowledge(SYS_USB_CONNECTED_ACK);
1404 usb_wait_for_disconnect(&buffering_queue);
1405 break;
1406 #endif
1408 case SYS_TIMEOUT:
1409 LOGFQUEUE_SYS_TIMEOUT("buffering < SYS_TIMEOUT");
1410 break;
1413 update_data_counters();
1415 /* If the buffer is low, call the callbacks to get new data */
1416 if (num_handles > 0 && data_counters.useful <= conf_watermark)
1417 send_event(BUFFER_EVENT_BUFFER_LOW, 0);
1419 #if 0
1420 /* TODO: This needs to be fixed to use the idle callback, disable it
1421 * for simplicity until its done right */
1422 #if MEM > 8
1423 /* If the disk is spinning, take advantage by filling the buffer */
1424 else if (storage_disk_is_active() && queue_empty(&buffering_queue))
1426 if (num_handles > 0 && data_counters.useful <= high_watermark)
1427 send_event(BUFFER_EVENT_BUFFER_LOW, 0);
1429 if (data_counters.remaining > 0 && BUF_USED <= high_watermark)
1431 /* This is a new fill, shrink the buffer up first */
1432 if (!filling)
1433 shrink_buffer();
1434 filling = fill_buffer();
1435 update_data_counters();
1438 #endif
1439 #endif
1441 if (queue_empty(&buffering_queue)) {
1442 if (filling) {
1443 if (data_counters.remaining > 0 && BUF_USED < buffer_len)
1444 filling = fill_buffer();
1445 else if (data_counters.remaining == 0)
1446 filling = false;
1448 else if (ev.id == SYS_TIMEOUT)
1450 if (data_counters.remaining > 0 &&
1451 data_counters.useful <= conf_watermark) {
1452 shrink_buffer();
1453 filling = fill_buffer();
1460 void buffering_init(void)
1462 mutex_init(&llist_mutex);
1463 #ifdef HAVE_PRIORITY_SCHEDULING
1464 /* This behavior not safe atm */
1465 mutex_set_preempt(&llist_mutex, false);
1466 #endif
1468 conf_watermark = BUFFERING_DEFAULT_WATERMARK;
1470 queue_init(&buffering_queue, true);
1471 buffering_thread_id = create_thread( buffering_thread, buffering_stack,
1472 sizeof(buffering_stack), CREATE_THREAD_FROZEN,
1473 buffering_thread_name IF_PRIO(, PRIORITY_BUFFERING)
1474 IF_COP(, CPU));
1476 queue_enable_queue_send(&buffering_queue, &buffering_queue_sender_list,
1477 buffering_thread_id);
1480 /* Initialise the buffering subsystem */
1481 bool buffering_reset(char *buf, size_t buflen)
1483 if (!buf || !buflen)
1484 return false;
1486 buffer = buf;
1487 buffer_len = buflen;
1488 guard_buffer = buf + buflen;
1490 buf_widx = 0;
1491 buf_ridx = 0;
1493 first_handle = NULL;
1494 cur_handle = NULL;
1495 cached_handle = NULL;
1496 num_handles = 0;
1497 base_handle_id = -1;
1499 /* Set the high watermark as 75% full...or 25% empty :) */
1500 #if MEM > 8
1501 high_watermark = 3*buflen / 4;
1502 #endif
1504 thread_thaw(buffering_thread_id);
1506 return true;
1509 void buffering_get_debugdata(struct buffering_debug *dbgdata)
1511 update_data_counters();
1512 dbgdata->num_handles = num_handles;
1513 dbgdata->data_rem = data_counters.remaining;
1514 dbgdata->wasted_space = data_counters.wasted;
1515 dbgdata->buffered_data = data_counters.buffered;
1516 dbgdata->useful_data = data_counters.useful;