Fix red introduced with r20610.
[kugel-rb/myfork.git] / apps / buffering.c
bloba4d425fb1dd4dd74f4d7a3d0f9590e87bea58812
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2007 Nicolas Pennequin
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
22 #include "config.h"
23 #include <stdio.h>
24 #include <string.h>
25 #include <stdlib.h>
26 #include <ctype.h>
27 #include "buffering.h"
29 #include "storage.h"
30 #include "system.h"
31 #include "thread.h"
32 #include "file.h"
33 #include "panic.h"
34 #include "memory.h"
35 #include "lcd.h"
36 #include "font.h"
37 #include "button.h"
38 #include "kernel.h"
39 #include "tree.h"
40 #include "debug.h"
41 #include "sprintf.h"
42 #include "settings.h"
43 #include "codecs.h"
44 #include "audio.h"
45 #include "mp3_playback.h"
46 #include "usb.h"
47 #include "screens.h"
48 #include "playlist.h"
49 #include "pcmbuf.h"
50 #include "buffer.h"
51 #include "bmp.h"
52 #include "appevents.h"
53 #include "metadata.h"
54 #ifdef HAVE_ALBUMART
55 #include "albumart.h"
56 #endif
58 #define GUARD_BUFSIZE (32*1024)
60 /* Define LOGF_ENABLE to enable logf output in this file */
61 /*#define LOGF_ENABLE*/
62 #include "logf.h"
64 /* macros to enable logf for queues
65 logging on SYS_TIMEOUT can be disabled */
66 #ifdef SIMULATOR
67 /* Define this for logf output of all queuing except SYS_TIMEOUT */
68 #define BUFFERING_LOGQUEUES
69 /* Define this to logf SYS_TIMEOUT messages */
70 /* #define BUFFERING_LOGQUEUES_SYS_TIMEOUT */
71 #endif
73 #ifdef BUFFERING_LOGQUEUES
74 #define LOGFQUEUE logf
75 #else
76 #define LOGFQUEUE(...)
77 #endif
79 #ifdef BUFFERING_LOGQUEUES_SYS_TIMEOUT
80 #define LOGFQUEUE_SYS_TIMEOUT logf
81 #else
82 #define LOGFQUEUE_SYS_TIMEOUT(...)
83 #endif
85 /* default point to start buffer refill */
86 #define BUFFERING_DEFAULT_WATERMARK (1024*128)
87 /* amount of data to read in one read() call */
88 #define BUFFERING_DEFAULT_FILECHUNK (1024*32)
90 #define BUF_HANDLE_MASK 0x7FFFFFFF
93 /* Ring buffer helper macros */
94 /* Buffer pointer (p) plus value (v), wrapped if necessary */
95 #define RINGBUF_ADD(p,v) (((p)+(v))<buffer_len ? (p)+(v) : (p)+(v)-buffer_len)
96 /* Buffer pointer (p) minus value (v), wrapped if necessary */
97 #define RINGBUF_SUB(p,v) ((p>=v) ? (p)-(v) : (p)+buffer_len-(v))
98 /* How far value (v) plus buffer pointer (p1) will cross buffer pointer (p2) */
99 #define RINGBUF_ADD_CROSS(p1,v,p2) \
100 ((p1<p2) ? (int)((p1)+(v))-(int)(p2) : (int)((p1)+(v)-(p2))-(int)buffer_len)
101 /* Bytes available in the buffer */
102 #define BUF_USED RINGBUF_SUB(buf_widx, buf_ridx)
104 /* assert(sizeof(struct memory_handle)%4==0) */
105 struct memory_handle {
106 int id; /* A unique ID for the handle */
107 enum data_type type; /* Type of data buffered with this handle */
108 char path[MAX_PATH]; /* Path if data originated in a file */
109 int fd; /* File descriptor to path (-1 if closed) */
110 size_t data; /* Start index of the handle's data buffer */
111 volatile size_t ridx; /* Read pointer, relative to the main buffer */
112 size_t widx; /* Write pointer */
113 size_t filesize; /* File total length */
114 size_t filerem; /* Remaining bytes of file NOT in buffer */
115 volatile size_t available; /* Available bytes to read from buffer */
116 size_t offset; /* Offset at which we started reading the file */
117 struct memory_handle *next;
119 /* invariant: filesize == offset + available + filerem */
121 static char *buffer;
122 static char *guard_buffer;
124 static size_t buffer_len;
126 static volatile size_t buf_widx; /* current writing position */
127 static volatile size_t buf_ridx; /* current reading position */
128 /* buf_*idx are values relative to the buffer, not real pointers. */
130 /* Configuration */
131 static size_t conf_watermark = 0; /* Level to trigger filebuf fill */
132 #if MEM > 8
133 static size_t high_watermark = 0; /* High watermark for rebuffer */
134 #endif
136 /* current memory handle in the linked list. NULL when the list is empty. */
137 static struct memory_handle *cur_handle;
138 /* first memory handle in the linked list. NULL when the list is empty. */
139 static struct memory_handle *first_handle;
141 static int num_handles; /* number of handles in the list */
143 static int base_handle_id;
145 static struct mutex llist_mutex;
147 /* Handle cache (makes find_handle faster).
148 This is global so that move_handle and rm_handle can invalidate it. */
149 static struct memory_handle *cached_handle = NULL;
151 static struct {
152 size_t remaining; /* Amount of data needing to be buffered */
153 size_t wasted; /* Amount of space available for freeing */
154 size_t buffered; /* Amount of data currently in the buffer */
155 size_t useful; /* Amount of data still useful to the user */
156 } data_counters;
159 /* Messages available to communicate with the buffering thread */
160 enum {
161 Q_BUFFER_HANDLE = 1, /* Request buffering of a handle, this should not be
162 used in a low buffer situation. */
163 Q_RESET_HANDLE, /* (internal) Request resetting of a handle to its
164 offset (the offset has to be set beforehand) */
165 Q_CLOSE_HANDLE, /* Request closing a handle */
166 Q_BASE_HANDLE, /* Set the reference handle for buf_useful_data */
168 /* Configuration: */
169 Q_START_FILL, /* Request that the buffering thread initiate a buffer
170 fill at its earliest convenience */
171 Q_HANDLE_ADDED, /* Inform the buffering thread that a handle was added,
172 (which means the disk is spinning) */
175 /* Buffering thread */
176 static void buffering_thread(void);
177 static long buffering_stack[(DEFAULT_STACK_SIZE + 0x2000)/sizeof(long)];
178 static const char buffering_thread_name[] = "buffering";
179 static unsigned int buffering_thread_id = 0;
180 static struct event_queue buffering_queue;
181 static struct queue_sender_list buffering_queue_sender_list;
186 LINKED LIST MANAGEMENT
187 ======================
189 add_handle : Add a handle to the list
190 rm_handle : Remove a handle from the list
191 find_handle : Get a handle pointer from an ID
192 move_handle : Move a handle in the buffer (with or without its data)
194 These functions only handle the linked list structure. They don't touch the
195 contents of the struct memory_handle headers. They also change the buf_*idx
196 pointers when necessary and manage the handle IDs.
198 The first and current (== last) handle are kept track of.
199 A new handle is added at buf_widx and becomes the current one.
200 buf_widx always points to the current writing position for the current handle
201 buf_ridx always points to the location of the first handle.
202 buf_ridx == buf_widx means the buffer is empty.
206 /* Add a new handle to the linked list and return it. It will have become the
207 new current handle.
208 data_size must contain the size of what will be in the handle.
209 can_wrap tells us whether this type of data may wrap on buffer
210 alloc_all tells us if we must immediately be able to allocate data_size
211 returns a valid memory handle if all conditions for allocation are met.
212 NULL if there memory_handle itself cannot be allocated or if the
213 data_size cannot be allocated and alloc_all is set. This function's
214 only potential side effect is to allocate space for the cur_handle
215 if it returns NULL.
217 static struct memory_handle *add_handle(size_t data_size, bool can_wrap,
218 bool alloc_all)
220 /* gives each handle a unique id */
221 static int cur_handle_id = 0;
222 size_t shift;
223 size_t new_widx;
224 size_t len;
225 int overlap;
227 if (num_handles >= BUF_MAX_HANDLES)
228 return NULL;
230 mutex_lock(&llist_mutex);
232 if (cur_handle && cur_handle->filerem > 0) {
233 /* the current handle hasn't finished buffering. We can only add
234 a new one if there is already enough free space to finish
235 the buffering. */
236 size_t req = cur_handle->filerem + sizeof(struct memory_handle);
237 if (RINGBUF_ADD_CROSS(cur_handle->widx, req, buf_ridx) >= 0) {
238 /* Not enough space */
239 mutex_unlock(&llist_mutex);
240 return NULL;
241 } else {
242 /* Allocate the remainder of the space for the current handle */
243 buf_widx = RINGBUF_ADD(cur_handle->widx, cur_handle->filerem);
247 /* align to 4 bytes up */
248 new_widx = RINGBUF_ADD(buf_widx, 3) & ~3;
250 len = data_size + sizeof(struct memory_handle);
252 /* First, will the handle wrap? */
253 /* If the handle would wrap, move to the beginning of the buffer,
254 * or if the data must not but would wrap, move it to the beginning */
255 if( (new_widx + sizeof(struct memory_handle) > buffer_len) ||
256 (!can_wrap && (new_widx + len > buffer_len)) ) {
257 new_widx = 0;
260 /* How far we shifted buf_widx to align things, must be < buffer_len */
261 shift = RINGBUF_SUB(new_widx, buf_widx);
263 /* How much space are we short in the actual ring buffer? */
264 overlap = RINGBUF_ADD_CROSS(buf_widx, shift + len, buf_ridx);
265 if (overlap >= 0 && (alloc_all || (unsigned)overlap > data_size)) {
266 /* Not enough space for required allocations */
267 mutex_unlock(&llist_mutex);
268 return NULL;
271 /* There is enough space for the required data, advance the buf_widx and
272 * initialize the struct */
273 buf_widx = new_widx;
275 struct memory_handle *new_handle =
276 (struct memory_handle *)(&buffer[buf_widx]);
278 /* only advance the buffer write index of the size of the struct */
279 buf_widx = RINGBUF_ADD(buf_widx, sizeof(struct memory_handle));
281 new_handle->id = cur_handle_id;
282 /* Wrap signed int is safe and 0 doesn't happen */
283 cur_handle_id = (cur_handle_id + 1) & BUF_HANDLE_MASK;
284 new_handle->next = NULL;
285 num_handles++;
287 if (!first_handle)
288 /* the new handle is the first one */
289 first_handle = new_handle;
291 if (cur_handle)
292 cur_handle->next = new_handle;
294 cur_handle = new_handle;
296 mutex_unlock(&llist_mutex);
297 return new_handle;
300 /* Delete a given memory handle from the linked list
301 and return true for success. Nothing is actually erased from memory. */
302 static bool rm_handle(const struct memory_handle *h)
304 if (h == NULL)
305 return true;
307 mutex_lock(&llist_mutex);
309 if (h == first_handle) {
310 first_handle = h->next;
311 if (h == cur_handle) {
312 /* h was the first and last handle: the buffer is now empty */
313 cur_handle = NULL;
314 buf_ridx = buf_widx = 0;
315 } else {
316 /* update buf_ridx to point to the new first handle */
317 buf_ridx = (void *)first_handle - (void *)buffer;
319 } else {
320 struct memory_handle *m = first_handle;
321 /* Find the previous handle */
322 while (m && m->next != h) {
323 m = m->next;
325 if (m && m->next == h) {
326 m->next = h->next;
327 if (h == cur_handle) {
328 cur_handle = m;
329 buf_widx = cur_handle->widx;
331 } else {
332 mutex_unlock(&llist_mutex);
333 return false;
337 /* Invalidate the cache to prevent it from keeping the old location of h */
338 if (h == cached_handle)
339 cached_handle = NULL;
341 num_handles--;
343 mutex_unlock(&llist_mutex);
344 return true;
347 /* Return a pointer to the memory handle of given ID.
348 NULL if the handle wasn't found */
349 static struct memory_handle *find_handle(int handle_id)
351 if (handle_id < 0)
352 return NULL;
354 mutex_lock(&llist_mutex);
356 /* simple caching because most of the time the requested handle
357 will either be the same as the last, or the one after the last */
358 if (cached_handle)
360 if (cached_handle->id == handle_id) {
361 mutex_unlock(&llist_mutex);
362 return cached_handle;
363 } else if (cached_handle->next &&
364 (cached_handle->next->id == handle_id)) {
365 cached_handle = cached_handle->next;
366 mutex_unlock(&llist_mutex);
367 return cached_handle;
371 struct memory_handle *m = first_handle;
372 while (m && m->id != handle_id) {
373 m = m->next;
375 /* This condition can only be reached with !m or m->id == handle_id */
376 if (m)
377 cached_handle = m;
379 mutex_unlock(&llist_mutex);
380 return m;
383 /* Move a memory handle and data_size of its data delta bytes along the buffer.
384 delta maximum bytes available to move the handle. If the move is performed
385 it is set to the actual distance moved.
386 data_size is the amount of data to move along with the struct.
387 returns a valid memory_handle if the move is successful
388 NULL if the handle is NULL, the move would be less than the size of
389 a memory_handle after correcting for wraps or if the handle is not
390 found in the linked list for adjustment. This function has no side
391 effects if NULL is returned. */
392 static bool move_handle(struct memory_handle **h, size_t *delta,
393 size_t data_size, bool can_wrap)
395 struct memory_handle *dest;
396 const struct memory_handle *src;
397 size_t newpos;
398 size_t size_to_move;
399 size_t final_delta = *delta;
400 int overlap;
402 if (h == NULL || (src = *h) == NULL)
403 return false;
405 size_to_move = sizeof(struct memory_handle) + data_size;
407 /* Align to four bytes, down */
408 final_delta &= ~3;
409 if (final_delta < sizeof(struct memory_handle)) {
410 /* It's not legal to move less than the size of the struct */
411 return false;
414 mutex_lock(&llist_mutex);
416 newpos = RINGBUF_ADD((void *)src - (void *)buffer, final_delta);
417 overlap = RINGBUF_ADD_CROSS(newpos, size_to_move, buffer_len - 1);
419 if (overlap > 0) {
420 /* Some part of the struct + data would wrap, maybe ok */
421 size_t correction = 0;
422 /* If the overlap lands inside the memory_handle */
423 if ((unsigned)overlap > data_size) {
424 /* Correct the position and real delta to prevent the struct from
425 * wrapping, this guarantees an aligned delta, I think */
426 correction = overlap - data_size;
427 } else if (!can_wrap) {
428 /* Otherwise the overlap falls in the data area and must all be
429 * backed out. This may become conditional if ever we move
430 * data that is allowed to wrap (ie audio) */
431 correction = overlap;
432 /* Align correction to four bytes, up */
433 correction = (correction+3) & ~3;
435 if (correction) {
436 if (final_delta < correction + sizeof(struct memory_handle)) {
437 /* Delta cannot end up less than the size of the struct */
438 mutex_unlock(&llist_mutex);
439 return false;
442 newpos -= correction;
443 overlap -= correction;/* Used below to know how to split the data */
444 final_delta -= correction;
448 dest = (struct memory_handle *)(&buffer[newpos]);
450 if (src == first_handle) {
451 first_handle = dest;
452 buf_ridx = newpos;
453 } else {
454 struct memory_handle *m = first_handle;
455 while (m && m->next != src) {
456 m = m->next;
458 if (m && m->next == src) {
459 m->next = dest;
460 } else {
461 mutex_unlock(&llist_mutex);
462 return false;
467 /* Update the cache to prevent it from keeping the old location of h */
468 if (src == cached_handle)
469 cached_handle = dest;
471 /* the cur_handle pointer might need updating */
472 if (src == cur_handle)
473 cur_handle = dest;
475 if (overlap > 0) {
476 size_t first_part = size_to_move - overlap;
477 memmove(dest, src, first_part);
478 memmove(buffer, (const char *)src + first_part, overlap);
479 } else {
480 memmove(dest, src, size_to_move);
483 /* Update the caller with the new location of h and the distance moved */
484 *h = dest;
485 *delta = final_delta;
486 mutex_unlock(&llist_mutex);
487 return dest;
492 BUFFER SPACE MANAGEMENT
493 =======================
495 update_data_counters: Updates the values in data_counters
496 buffer_is_low : Returns true if the amount of useful data in the buffer is low
497 buffer_handle : Buffer data for a handle
498 reset_handle : Reset write position and data buffer of a handle to its offset
499 rebuffer_handle : Seek to a nonbuffered part of a handle by rebuffering the data
500 shrink_handle : Free buffer space by moving a handle
501 fill_buffer : Call buffer_handle for all handles that have data to buffer
503 These functions are used by the buffering thread to manage buffer space.
506 static void update_data_counters(void)
508 struct memory_handle *m = find_handle(base_handle_id);
509 bool is_useful = m==NULL;
511 size_t buffered = 0;
512 size_t wasted = 0;
513 size_t remaining = 0;
514 size_t useful = 0;
516 mutex_lock(&llist_mutex);
518 m = first_handle;
519 while (m) {
520 buffered += m->available;
521 wasted += RINGBUF_SUB(m->ridx, m->data);
522 remaining += m->filerem;
524 if (m->id == base_handle_id)
525 is_useful = true;
527 if (is_useful)
528 useful += RINGBUF_SUB(m->widx, m->ridx);
530 m = m->next;
533 mutex_unlock(&llist_mutex);
535 data_counters.buffered = buffered;
536 data_counters.wasted = wasted;
537 data_counters.remaining = remaining;
538 data_counters.useful = useful;
541 static inline bool buffer_is_low(void)
543 update_data_counters();
544 return data_counters.useful < (conf_watermark / 2);
547 /* Buffer data for the given handle.
548 Return whether or not the buffering should continue explicitly. */
549 static bool buffer_handle(int handle_id)
551 logf("buffer_handle(%d)", handle_id);
552 struct memory_handle *h = find_handle(handle_id);
553 if (!h)
554 return true;
556 if (h->filerem == 0) {
557 /* nothing left to buffer */
558 return true;
561 if (h->fd < 0) /* file closed, reopen */
563 if (*h->path)
564 h->fd = open(h->path, O_RDONLY);
566 if (h->fd < 0)
568 /* could not open the file, truncate it where it is */
569 h->filesize -= h->filerem;
570 h->filerem = 0;
571 return true;
574 if (h->offset)
575 lseek(h->fd, h->offset, SEEK_SET);
578 trigger_cpu_boost();
580 if (h->type == TYPE_ID3)
582 if (!get_metadata((struct mp3entry *)(buffer + h->data), h->fd, h->path))
584 /* metadata parsing failed: clear the buffer. */
585 memset(buffer + h->data, 0, sizeof(struct mp3entry));
587 close(h->fd);
588 h->fd = -1;
589 h->filerem = 0;
590 h->available = sizeof(struct mp3entry);
591 h->widx += sizeof(struct mp3entry);
592 send_event(BUFFER_EVENT_FINISHED, &h->id);
593 return true;
596 while (h->filerem > 0)
598 /* max amount to copy */
599 size_t copy_n = MIN( MIN(h->filerem, BUFFERING_DEFAULT_FILECHUNK),
600 buffer_len - h->widx);
602 /* stop copying if it would overwrite the reading position */
603 if (RINGBUF_ADD_CROSS(h->widx, copy_n, buf_ridx) >= 0)
604 return false;
606 /* This would read into the next handle, this is broken */
607 if (h->next && RINGBUF_ADD_CROSS(h->widx, copy_n,
608 (unsigned)((void *)h->next - (void *)buffer)) > 0) {
609 /* Try to recover by truncating this file */
610 copy_n = RINGBUF_ADD_CROSS(h->widx, copy_n,
611 (unsigned)((void *)h->next - (void *)buffer));
612 h->filerem -= copy_n;
613 h->filesize -= copy_n;
614 logf("buf alloc short %ld", (long)copy_n);
615 if (h->filerem)
616 continue;
617 else
618 break;
621 /* rc is the actual amount read */
622 int rc = read(h->fd, &buffer[h->widx], copy_n);
624 if (rc < 0)
626 /* Some kind of filesystem error, maybe recoverable if not codec */
627 if (h->type == TYPE_CODEC) {
628 logf("Partial codec");
629 break;
632 DEBUGF("File ended %ld bytes early\n", (long)h->filerem);
633 h->filesize -= h->filerem;
634 h->filerem = 0;
635 break;
638 /* Advance buffer */
639 h->widx = RINGBUF_ADD(h->widx, rc);
640 if (h == cur_handle)
641 buf_widx = h->widx;
642 h->available += rc;
643 h->filerem -= rc;
645 /* If this is a large file, see if we need to break or give the codec
646 * more time */
647 if (h->type == TYPE_PACKET_AUDIO &&
648 pcmbuf_is_lowdata() && !buffer_is_low())
650 sleep(1);
652 else
654 yield();
657 if (!queue_empty(&buffering_queue))
658 break;
661 if (h->filerem == 0) {
662 /* finished buffering the file */
663 close(h->fd);
664 h->fd = -1;
665 send_event(BUFFER_EVENT_FINISHED, &h->id);
668 return true;
671 /* Reset writing position and data buffer of a handle to its current offset.
672 Use this after having set the new offset to use. */
673 static void reset_handle(int handle_id)
675 logf("reset_handle(%d)", handle_id);
677 struct memory_handle *h = find_handle(handle_id);
678 if (!h)
679 return;
681 h->ridx = h->widx = h->data;
682 if (h == cur_handle)
683 buf_widx = h->widx;
684 h->available = 0;
685 h->filerem = h->filesize - h->offset;
687 if (h->fd >= 0) {
688 lseek(h->fd, h->offset, SEEK_SET);
692 /* Seek to a nonbuffered part of a handle by rebuffering the data. */
693 static void rebuffer_handle(int handle_id, size_t newpos)
695 struct memory_handle *h = find_handle(handle_id);
696 if (!h)
697 return;
699 /* When seeking foward off of the buffer, if it is a short seek don't
700 rebuffer the whole track, just read enough to satisfy */
701 if (newpos > h->offset && newpos - h->offset < BUFFERING_DEFAULT_FILECHUNK)
703 LOGFQUEUE("buffering >| Q_BUFFER_HANDLE %d", handle_id);
704 queue_send(&buffering_queue, Q_BUFFER_HANDLE, handle_id);
705 h->ridx = h->data + newpos;
706 return;
709 h->offset = newpos;
711 /* Reset the handle to its new offset */
712 LOGFQUEUE("buffering >| Q_RESET_HANDLE %d", handle_id);
713 queue_send(&buffering_queue, Q_RESET_HANDLE, handle_id);
715 size_t next = (unsigned)((void *)h->next - (void *)buffer);
716 if (RINGBUF_SUB(next, h->data) < h->filesize - newpos)
718 /* There isn't enough space to rebuffer all of the track from its new
719 offset, so we ask the user to free some */
720 DEBUGF("rebuffer_handle: space is needed\n");
721 send_event(BUFFER_EVENT_REBUFFER, &handle_id);
724 /* Now we ask for a rebuffer */
725 LOGFQUEUE("buffering >| Q_BUFFER_HANDLE %d", handle_id);
726 queue_send(&buffering_queue, Q_BUFFER_HANDLE, handle_id);
729 static bool close_handle(int handle_id)
731 struct memory_handle *h = find_handle(handle_id);
733 /* If the handle is not found, it is closed */
734 if (!h)
735 return true;
737 if (h->fd >= 0) {
738 close(h->fd);
739 h->fd = -1;
742 /* rm_handle returns true unless the handle somehow persists after exit */
743 return rm_handle(h);
746 /* Free buffer space by moving the handle struct right before the useful
747 part of its data buffer or by moving all the data. */
748 static void shrink_handle(struct memory_handle *h)
750 size_t delta;
752 if (!h)
753 return;
755 if (h->next && h->filerem == 0 &&
756 (h->type == TYPE_ID3 || h->type == TYPE_CUESHEET ||
757 h->type == TYPE_BITMAP || h->type == TYPE_CODEC ||
758 h->type == TYPE_ATOMIC_AUDIO))
760 /* metadata handle: we can move all of it */
761 size_t handle_distance =
762 RINGBUF_SUB((unsigned)((void *)h->next - (void*)buffer), h->data);
763 delta = handle_distance - h->available;
765 /* The value of delta might change for alignment reasons */
766 if (!move_handle(&h, &delta, h->available, h->type==TYPE_CODEC))
767 return;
769 size_t olddata = h->data;
770 h->data = RINGBUF_ADD(h->data, delta);
771 h->ridx = RINGBUF_ADD(h->ridx, delta);
772 h->widx = RINGBUF_ADD(h->widx, delta);
774 if (h->type == TYPE_ID3 && h->filesize == sizeof(struct mp3entry)) {
775 /* when moving an mp3entry we need to readjust its pointers. */
776 adjust_mp3entry((struct mp3entry *)&buffer[h->data],
777 (void *)&buffer[h->data],
778 (const void *)&buffer[olddata]);
779 } else if (h->type == TYPE_BITMAP) {
780 /* adjust the bitmap's pointer */
781 struct bitmap *bmp = (struct bitmap *)&buffer[h->data];
782 bmp->data = &buffer[h->data + sizeof(struct bitmap)];
785 else
787 /* only move the handle struct */
788 delta = RINGBUF_SUB(h->ridx, h->data);
789 if (!move_handle(&h, &delta, 0, true))
790 return;
792 h->data = RINGBUF_ADD(h->data, delta);
793 h->available -= delta;
794 h->offset += delta;
798 /* Fill the buffer by buffering as much data as possible for handles that still
799 have data left to buffer
800 Return whether or not to continue filling after this */
801 static bool fill_buffer(void)
803 logf("fill_buffer()");
804 struct memory_handle *m;
805 shrink_handle(first_handle);
806 m = first_handle;
807 while (queue_empty(&buffering_queue) && m) {
808 if (m->filerem > 0) {
809 if (!buffer_handle(m->id)) {
810 m = NULL;
811 break;
814 m = m->next;
817 if (m) {
818 return true;
820 else
822 /* only spin the disk down if the filling wasn't interrupted by an
823 event arriving in the queue. */
824 storage_sleep();
825 return false;
829 #ifdef HAVE_ALBUMART
830 /* Given a file descriptor to a bitmap file, write the bitmap data to the
831 buffer, with a struct bitmap and the actual data immediately following.
832 Return value is the total size (struct + data). */
833 static int load_bitmap(int fd)
835 int rc;
836 struct bitmap *bmp = (struct bitmap *)&buffer[buf_widx];
837 /* FIXME: alignment may be needed for the data buffer. */
838 bmp->data = &buffer[buf_widx + sizeof(struct bitmap)];
840 #if (LCD_DEPTH > 1) || defined(HAVE_REMOTE_LCD) && (LCD_REMOTE_DEPTH > 1)
841 bmp->maskdata = NULL;
842 #endif
844 int free = (int)MIN(buffer_len - BUF_USED, buffer_len - buf_widx)
845 - sizeof(struct bitmap);
847 get_albumart_size(bmp);
849 rc = read_bmp_fd(fd, bmp, free, FORMAT_NATIVE|FORMAT_DITHER|
850 FORMAT_RESIZE|FORMAT_KEEP_ASPECT, NULL);
851 return rc + (rc > 0 ? sizeof(struct bitmap) : 0);
853 #endif
857 MAIN BUFFERING API CALLS
858 ========================
860 bufopen : Request the opening of a new handle for a file
861 bufalloc : Open a new handle for data other than a file.
862 bufclose : Close an open handle
863 bufseek : Set the read pointer in a handle
864 bufadvance : Move the read pointer in a handle
865 bufread : Copy data from a handle into a given buffer
866 bufgetdata : Give a pointer to the handle's data
868 These functions are exported, to allow interaction with the buffer.
869 They take care of the content of the structs, and rely on the linked list
870 management functions for all the actual handle management work.
874 /* Reserve space in the buffer for a file.
875 filename: name of the file to open
876 offset: offset at which to start buffering the file, useful when the first
877 (offset-1) bytes of the file aren't needed.
878 return value: <0 if the file cannot be opened, or one file already
879 queued to be opened, otherwise the handle for the file in the buffer
881 int bufopen(const char *file, size_t offset, enum data_type type)
883 if (type == TYPE_ID3)
885 /* ID3 case: allocate space, init the handle and return. */
887 struct memory_handle *h = add_handle(sizeof(struct mp3entry), false, true);
888 if (!h)
889 return ERR_BUFFER_FULL;
891 h->fd = -1;
892 h->filesize = sizeof(struct mp3entry);
893 h->filerem = sizeof(struct mp3entry);
894 h->offset = 0;
895 h->data = buf_widx;
896 h->ridx = buf_widx;
897 h->widx = buf_widx;
898 h->available = 0;
899 h->type = type;
900 strncpy(h->path, file, MAX_PATH);
902 buf_widx += sizeof(struct mp3entry); /* safe because the handle
903 can't wrap */
905 /* Inform the buffering thread that we added a handle */
906 LOGFQUEUE("buffering > Q_HANDLE_ADDED %d", h->id);
907 queue_post(&buffering_queue, Q_HANDLE_ADDED, h->id);
909 return h->id;
912 /* Other cases: there is a little more work. */
914 int fd = open(file, O_RDONLY);
915 if (fd < 0)
916 return ERR_FILE_ERROR;
918 size_t size = filesize(fd);
919 bool can_wrap = type==TYPE_PACKET_AUDIO || type==TYPE_CODEC;
921 size_t adjusted_offset = offset;
922 if (adjusted_offset > size)
923 adjusted_offset = 0;
925 struct memory_handle *h = add_handle(size-adjusted_offset, can_wrap, false);
926 if (!h)
928 DEBUGF("bufopen: failed to add handle\n");
929 close(fd);
930 return ERR_BUFFER_FULL;
933 strncpy(h->path, file, MAX_PATH);
934 h->offset = adjusted_offset;
935 h->ridx = buf_widx;
936 h->data = buf_widx;
937 h->type = type;
939 #ifdef HAVE_ALBUMART
940 if (type == TYPE_BITMAP)
942 /* Bitmap file: we load the data instead of the file */
943 int rc;
944 mutex_lock(&llist_mutex); /* Lock because load_bitmap yields */
945 rc = load_bitmap(fd);
946 mutex_unlock(&llist_mutex);
947 if (rc <= 0)
949 rm_handle(h);
950 close(fd);
951 return ERR_FILE_ERROR;
953 h->filerem = 0;
954 h->filesize = rc;
955 h->available = rc;
956 h->widx = buf_widx + rc; /* safe because the data doesn't wrap */
957 buf_widx += rc; /* safe too */
959 else
960 #endif
962 h->filerem = size - adjusted_offset;
963 h->filesize = size;
964 h->available = 0;
965 h->widx = buf_widx;
968 if (type == TYPE_CUESHEET) {
969 h->fd = fd;
970 /* Immediately start buffering those */
971 LOGFQUEUE("buffering >| Q_BUFFER_HANDLE %d", h->id);
972 queue_send(&buffering_queue, Q_BUFFER_HANDLE, h->id);
973 } else {
974 /* Other types will get buffered in the course of normal operations */
975 h->fd = -1;
976 close(fd);
978 /* Inform the buffering thread that we added a handle */
979 LOGFQUEUE("buffering > Q_HANDLE_ADDED %d", h->id);
980 queue_post(&buffering_queue, Q_HANDLE_ADDED, h->id);
983 logf("bufopen: new hdl %d", h->id);
984 return h->id;
987 /* Open a new handle from data that needs to be copied from memory.
988 src is the source buffer from which to copy data. It can be NULL to simply
989 reserve buffer space.
990 size is the requested size. The call will only be successful if the
991 requested amount of data can entirely fit in the buffer without wrapping.
992 Return value is the handle id for success or <0 for failure.
994 int bufalloc(const void *src, size_t size, enum data_type type)
996 struct memory_handle *h = add_handle(size, false, true);
998 if (!h)
999 return ERR_BUFFER_FULL;
1001 if (src) {
1002 if (type == TYPE_ID3 && size == sizeof(struct mp3entry)) {
1003 /* specially take care of struct mp3entry */
1004 copy_mp3entry((struct mp3entry *)&buffer[buf_widx],
1005 (const struct mp3entry *)src);
1006 } else {
1007 memcpy(&buffer[buf_widx], src, size);
1011 h->fd = -1;
1012 *h->path = 0;
1013 h->filesize = size;
1014 h->filerem = 0;
1015 h->offset = 0;
1016 h->ridx = buf_widx;
1017 h->widx = buf_widx + size; /* this is safe because the data doesn't wrap */
1018 h->data = buf_widx;
1019 h->available = size;
1020 h->type = type;
1022 buf_widx += size; /* safe too */
1024 logf("bufalloc: new hdl %d", h->id);
1025 return h->id;
1028 /* Close the handle. Return true for success and false for failure */
1029 bool bufclose(int handle_id)
1031 logf("bufclose(%d)", handle_id);
1033 LOGFQUEUE("buffering >| Q_CLOSE_HANDLE %d", handle_id);
1034 return queue_send(&buffering_queue, Q_CLOSE_HANDLE, handle_id);
1037 /* Set reading index in handle (relatively to the start of the file).
1038 Access before the available data will trigger a rebuffer.
1039 Return 0 for success and < 0 for failure:
1040 -1 if the handle wasn't found
1041 -2 if the new requested position was beyond the end of the file
1043 int bufseek(int handle_id, size_t newpos)
1045 struct memory_handle *h = find_handle(handle_id);
1046 if (!h)
1047 return ERR_HANDLE_NOT_FOUND;
1049 if (newpos > h->filesize) {
1050 /* access beyond the end of the file */
1051 return ERR_INVALID_VALUE;
1053 else if (newpos < h->offset || h->offset + h->available < newpos) {
1054 /* access before or after buffered data. A rebuffer is needed. */
1055 rebuffer_handle(handle_id, newpos);
1057 else {
1058 h->ridx = RINGBUF_ADD(h->data, newpos - h->offset);
1060 return 0;
1063 /* Advance the reading index in a handle (relatively to its current position).
1064 Return 0 for success and < 0 for failure */
1065 int bufadvance(int handle_id, off_t offset)
1067 const struct memory_handle *h = find_handle(handle_id);
1068 if (!h)
1069 return ERR_HANDLE_NOT_FOUND;
1071 size_t newpos = h->offset + RINGBUF_SUB(h->ridx, h->data) + offset;
1072 return bufseek(handle_id, newpos);
1075 /* Used by bufread and bufgetdata to prepare the buffer and retrieve the
1076 * actual amount of data available for reading. This function explicitly
1077 * does not check the validity of the input handle. It does do range checks
1078 * on size and returns a valid (and explicit) amount of data for reading */
1079 static struct memory_handle *prep_bufdata(int handle_id, size_t *size,
1080 bool guardbuf_limit)
1082 struct memory_handle *h = find_handle(handle_id);
1083 if (!h)
1084 return NULL;
1086 size_t avail = RINGBUF_SUB(h->widx, h->ridx);
1088 if (avail == 0 && h->filerem == 0)
1090 /* File is finished reading */
1091 *size = 0;
1092 return h;
1095 if (*size == 0 || *size > avail + h->filerem)
1096 *size = avail + h->filerem;
1098 if (guardbuf_limit && h->type == TYPE_PACKET_AUDIO && *size > GUARD_BUFSIZE)
1100 logf("data request > guardbuf");
1101 /* If more than the size of the guardbuf is requested and this is a
1102 * bufgetdata, limit to guard_bufsize over the end of the buffer */
1103 *size = MIN(*size, buffer_len - h->ridx + GUARD_BUFSIZE);
1104 /* this ensures *size <= buffer_len - h->ridx + GUARD_BUFSIZE */
1107 if (h->filerem > 0 && avail < *size)
1109 /* Data isn't ready. Request buffering */
1110 buf_request_buffer_handle(handle_id);
1111 /* Wait for the data to be ready */
1114 sleep(1);
1115 /* it is not safe for a non-buffering thread to sleep while
1116 * holding a handle */
1117 h = find_handle(handle_id);
1118 if (!h)
1119 return NULL;
1120 avail = RINGBUF_SUB(h->widx, h->ridx);
1122 while (h->filerem > 0 && avail < *size);
1125 *size = MIN(*size,avail);
1126 return h;
1129 /* Copy data from the given handle to the dest buffer.
1130 Return the number of bytes copied or < 0 for failure (handle not found).
1131 The caller is blocked until the requested amount of data is available.
1133 ssize_t bufread(int handle_id, size_t size, void *dest)
1135 const struct memory_handle *h;
1136 size_t adjusted_size = size;
1138 h = prep_bufdata(handle_id, &adjusted_size, false);
1139 if (!h)
1140 return ERR_HANDLE_NOT_FOUND;
1142 if (h->ridx + adjusted_size > buffer_len)
1144 /* the data wraps around the end of the buffer */
1145 size_t read = buffer_len - h->ridx;
1146 memcpy(dest, &buffer[h->ridx], read);
1147 memcpy(dest+read, buffer, adjusted_size - read);
1149 else
1151 memcpy(dest, &buffer[h->ridx], adjusted_size);
1154 return adjusted_size;
1157 /* Update the "data" pointer to make the handle's data available to the caller.
1158 Return the length of the available linear data or < 0 for failure (handle
1159 not found).
1160 The caller is blocked until the requested amount of data is available.
1161 size is the amount of linear data requested. it can be 0 to get as
1162 much as possible.
1163 The guard buffer may be used to provide the requested size. This means it's
1164 unsafe to request more than the size of the guard buffer.
1166 ssize_t bufgetdata(int handle_id, size_t size, void **data)
1168 const struct memory_handle *h;
1169 size_t adjusted_size = size;
1171 h = prep_bufdata(handle_id, &adjusted_size, true);
1172 if (!h)
1173 return ERR_HANDLE_NOT_FOUND;
1175 if (h->ridx + adjusted_size > buffer_len)
1177 /* the data wraps around the end of the buffer :
1178 use the guard buffer to provide the requested amount of data. */
1179 size_t copy_n = h->ridx + adjusted_size - buffer_len;
1180 /* prep_bufdata ensures adjusted_size <= buffer_len - h->ridx + GUARD_BUFSIZE,
1181 so copy_n <= GUARD_BUFSIZE */
1182 memcpy(guard_buffer, (const unsigned char *)buffer, copy_n);
1185 if (data)
1186 *data = &buffer[h->ridx];
1188 return adjusted_size;
1191 ssize_t bufgettail(int handle_id, size_t size, void **data)
1193 size_t tidx;
1195 const struct memory_handle *h;
1197 h = find_handle(handle_id);
1199 if (!h)
1200 return ERR_HANDLE_NOT_FOUND;
1202 if (h->filerem)
1203 return ERR_HANDLE_NOT_DONE;
1205 /* We don't support tail requests of > guardbuf_size, for simplicity */
1206 if (size > GUARD_BUFSIZE)
1207 return ERR_INVALID_VALUE;
1209 tidx = RINGBUF_SUB(h->widx, size);
1211 if (tidx + size > buffer_len)
1213 size_t copy_n = tidx + size - buffer_len;
1214 memcpy(guard_buffer, (const unsigned char *)buffer, copy_n);
1217 *data = &buffer[tidx];
1218 return size;
1221 ssize_t bufcuttail(int handle_id, size_t size)
1223 struct memory_handle *h;
1224 size_t adjusted_size = size;
1226 h = find_handle(handle_id);
1228 if (!h)
1229 return ERR_HANDLE_NOT_FOUND;
1231 if (h->filerem)
1232 return ERR_HANDLE_NOT_DONE;
1234 if (h->available < adjusted_size)
1235 adjusted_size = h->available;
1237 h->available -= adjusted_size;
1238 h->filesize -= adjusted_size;
1239 h->widx = RINGBUF_SUB(h->widx, adjusted_size);
1240 if (h == cur_handle)
1241 buf_widx = h->widx;
1243 return adjusted_size;
1248 SECONDARY EXPORTED FUNCTIONS
1249 ============================
1251 buf_get_offset
1252 buf_handle_offset
1253 buf_request_buffer_handle
1254 buf_set_base_handle
1255 buf_used
1256 register_buffering_callback
1257 unregister_buffering_callback
1259 These functions are exported, to allow interaction with the buffer.
1260 They take care of the content of the structs, and rely on the linked list
1261 management functions for all the actual handle management work.
1264 /* Get a handle offset from a pointer */
1265 ssize_t buf_get_offset(int handle_id, void *ptr)
1267 const struct memory_handle *h = find_handle(handle_id);
1268 if (!h)
1269 return ERR_HANDLE_NOT_FOUND;
1271 return (size_t)ptr - (size_t)&buffer[h->ridx];
1274 ssize_t buf_handle_offset(int handle_id)
1276 const struct memory_handle *h = find_handle(handle_id);
1277 if (!h)
1278 return ERR_HANDLE_NOT_FOUND;
1279 return h->offset;
1282 void buf_request_buffer_handle(int handle_id)
1284 LOGFQUEUE("buffering >| Q_START_FILL %d",handle_id);
1285 queue_send(&buffering_queue, Q_START_FILL, handle_id);
1288 void buf_set_base_handle(int handle_id)
1290 LOGFQUEUE("buffering > Q_BASE_HANDLE %d", handle_id);
1291 queue_post(&buffering_queue, Q_BASE_HANDLE, handle_id);
1294 /* Return the amount of buffer space used */
1295 size_t buf_used(void)
1297 return BUF_USED;
1300 void buf_set_watermark(size_t bytes)
1302 conf_watermark = bytes;
1305 static void shrink_buffer_inner(struct memory_handle *h)
1307 if (h == NULL)
1308 return;
1310 shrink_buffer_inner(h->next);
1312 shrink_handle(h);
1315 static void shrink_buffer(void)
1317 logf("shrink_buffer()");
1318 shrink_buffer_inner(first_handle);
1321 void buffering_thread(void)
1323 bool filling = false;
1324 struct queue_event ev;
1326 while (true)
1328 if (!filling) {
1329 cancel_cpu_boost();
1332 queue_wait_w_tmo(&buffering_queue, &ev, filling ? 5 : HZ/2);
1334 switch (ev.id)
1336 case Q_START_FILL:
1337 LOGFQUEUE("buffering < Q_START_FILL %d", (int)ev.data);
1338 /* Call buffer callbacks here because this is one of two ways
1339 * to begin a full buffer fill */
1340 send_event(BUFFER_EVENT_BUFFER_LOW, 0);
1341 shrink_buffer();
1342 queue_reply(&buffering_queue, 1);
1343 filling |= buffer_handle((int)ev.data);
1344 break;
1346 case Q_BUFFER_HANDLE:
1347 LOGFQUEUE("buffering < Q_BUFFER_HANDLE %d", (int)ev.data);
1348 queue_reply(&buffering_queue, 1);
1349 buffer_handle((int)ev.data);
1350 break;
1352 case Q_RESET_HANDLE:
1353 LOGFQUEUE("buffering < Q_RESET_HANDLE %d", (int)ev.data);
1354 queue_reply(&buffering_queue, 1);
1355 reset_handle((int)ev.data);
1356 break;
1358 case Q_CLOSE_HANDLE:
1359 LOGFQUEUE("buffering < Q_CLOSE_HANDLE %d", (int)ev.data);
1360 queue_reply(&buffering_queue, close_handle((int)ev.data));
1361 break;
1363 case Q_HANDLE_ADDED:
1364 LOGFQUEUE("buffering < Q_HANDLE_ADDED %d", (int)ev.data);
1365 /* A handle was added: the disk is spinning, so we can fill */
1366 filling = true;
1367 break;
1369 case Q_BASE_HANDLE:
1370 LOGFQUEUE("buffering < Q_BASE_HANDLE %d", (int)ev.data);
1371 base_handle_id = (int)ev.data;
1372 break;
1374 #ifndef SIMULATOR
1375 case SYS_USB_CONNECTED:
1376 LOGFQUEUE("buffering < SYS_USB_CONNECTED");
1377 usb_acknowledge(SYS_USB_CONNECTED_ACK);
1378 usb_wait_for_disconnect(&buffering_queue);
1379 break;
1380 #endif
1382 case SYS_TIMEOUT:
1383 LOGFQUEUE_SYS_TIMEOUT("buffering < SYS_TIMEOUT");
1384 break;
1387 update_data_counters();
1389 /* If the buffer is low, call the callbacks to get new data */
1390 if (num_handles > 0 && data_counters.useful <= conf_watermark)
1391 send_event(BUFFER_EVENT_BUFFER_LOW, 0);
1393 #if 0
1394 /* TODO: This needs to be fixed to use the idle callback, disable it
1395 * for simplicity until its done right */
1396 #if MEM > 8
1397 /* If the disk is spinning, take advantage by filling the buffer */
1398 else if (storage_disk_is_active() && queue_empty(&buffering_queue))
1400 if (num_handles > 0 && data_counters.useful <= high_watermark)
1401 send_event(BUFFER_EVENT_BUFFER_LOW, 0);
1403 if (data_counters.remaining > 0 && BUF_USED <= high_watermark)
1405 /* This is a new fill, shrink the buffer up first */
1406 if (!filling)
1407 shrink_buffer();
1408 filling = fill_buffer();
1409 update_data_counters();
1412 #endif
1413 #endif
1415 if (queue_empty(&buffering_queue)) {
1416 if (filling) {
1417 if (data_counters.remaining > 0 && BUF_USED < buffer_len)
1418 filling = fill_buffer();
1419 else if (data_counters.remaining == 0)
1420 filling = false;
1422 else if (ev.id == SYS_TIMEOUT)
1424 if (data_counters.remaining > 0 &&
1425 data_counters.useful <= conf_watermark) {
1426 shrink_buffer();
1427 filling = fill_buffer();
1434 void buffering_init(void)
1436 mutex_init(&llist_mutex);
1437 #ifdef HAVE_PRIORITY_SCHEDULING
1438 /* This behavior not safe atm */
1439 mutex_set_preempt(&llist_mutex, false);
1440 #endif
1442 conf_watermark = BUFFERING_DEFAULT_WATERMARK;
1444 queue_init(&buffering_queue, true);
1445 buffering_thread_id = create_thread( buffering_thread, buffering_stack,
1446 sizeof(buffering_stack), CREATE_THREAD_FROZEN,
1447 buffering_thread_name IF_PRIO(, PRIORITY_BUFFERING)
1448 IF_COP(, CPU));
1450 queue_enable_queue_send(&buffering_queue, &buffering_queue_sender_list,
1451 buffering_thread_id);
1454 /* Initialise the buffering subsystem */
1455 bool buffering_reset(char *buf, size_t buflen)
1457 if (!buf || !buflen)
1458 return false;
1460 buffer = buf;
1461 buffer_len = buflen;
1462 guard_buffer = buf + buflen;
1464 buf_widx = 0;
1465 buf_ridx = 0;
1467 first_handle = NULL;
1468 cur_handle = NULL;
1469 cached_handle = NULL;
1470 num_handles = 0;
1471 base_handle_id = -1;
1473 /* Set the high watermark as 75% full...or 25% empty :) */
1474 #if MEM > 8
1475 high_watermark = 3*buflen / 4;
1476 #endif
1478 thread_thaw(buffering_thread_id);
1480 return true;
1483 void buffering_get_debugdata(struct buffering_debug *dbgdata)
1485 update_data_counters();
1486 dbgdata->num_handles = num_handles;
1487 dbgdata->data_rem = data_counters.remaining;
1488 dbgdata->wasted_space = data_counters.wasted;
1489 dbgdata->buffered_data = data_counters.buffered;
1490 dbgdata->useful_data = data_counters.useful;
1491 dbgdata->watermark = conf_watermark;