Add new tarball generation script.
[maemo-rb.git] / apps / buffering.c
blobc47564b7e1140d7b871b521aa77a5c36d6f7111c
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2007 Nicolas Pennequin
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
22 #include "config.h"
23 #include <stdio.h>
24 #include <string.h>
25 #include <stdlib.h>
26 #include <ctype.h>
27 #include <inttypes.h>
28 #include "buffering.h"
30 #include "storage.h"
31 #include "system.h"
32 #include "thread.h"
33 #include "file.h"
34 #include "panic.h"
35 #include "lcd.h"
36 #include "font.h"
37 #include "button.h"
38 #include "kernel.h"
39 #include "tree.h"
40 #include "debug.h"
41 #include "settings.h"
42 #include "codecs.h"
43 #include "audio.h"
44 #include "mp3_playback.h"
45 #include "usb.h"
46 #include "screens.h"
47 #include "playlist.h"
48 #include "pcmbuf.h"
49 #include "appevents.h"
50 #include "metadata.h"
51 #ifdef HAVE_ALBUMART
52 #include "albumart.h"
53 #include "jpeg_load.h"
54 #include "bmp.h"
55 #include "playback.h"
56 #endif
58 #define GUARD_BUFSIZE (32*1024)
60 /* Define LOGF_ENABLE to enable logf output in this file */
61 /* #define LOGF_ENABLE */
62 #include "logf.h"
64 /* macros to enable logf for queues
65 logging on SYS_TIMEOUT can be disabled */
66 #ifdef SIMULATOR
67 /* Define this for logf output of all queuing except SYS_TIMEOUT */
68 #define BUFFERING_LOGQUEUES
69 /* Define this to logf SYS_TIMEOUT messages */
70 /* #define BUFFERING_LOGQUEUES_SYS_TIMEOUT */
71 #endif
73 #ifdef BUFFERING_LOGQUEUES
74 #define LOGFQUEUE logf
75 #else
76 #define LOGFQUEUE(...)
77 #endif
79 #ifdef BUFFERING_LOGQUEUES_SYS_TIMEOUT
80 #define LOGFQUEUE_SYS_TIMEOUT logf
81 #else
82 #define LOGFQUEUE_SYS_TIMEOUT(...)
83 #endif
85 /* amount of data to read in one read() call */
86 #define BUFFERING_DEFAULT_FILECHUNK (1024*32)
88 #define BUF_HANDLE_MASK 0x7FFFFFFF
91 /* assert(sizeof(struct memory_handle)%4==0) */
92 struct memory_handle {
93 int id; /* A unique ID for the handle */
94 enum data_type type; /* Type of data buffered with this handle */
95 int8_t pinned; /* Count of references */
96 int8_t signaled; /* Stop any attempt at waiting to get the data */
97 char path[MAX_PATH]; /* Path if data originated in a file */
98 int fd; /* File descriptor to path (-1 if closed) */
99 size_t data; /* Start index of the handle's data buffer */
100 volatile size_t ridx; /* Read pointer, relative to the main buffer */
101 size_t widx; /* Write pointer, relative to the main buffer */
102 size_t filesize; /* File total length */
103 size_t filerem; /* Remaining bytes of file NOT in buffer */
104 volatile size_t available; /* Available bytes to read from buffer */
105 size_t offset; /* Offset at which we started reading the file */
106 struct memory_handle *next;
108 /* invariant: filesize == offset + available + filerem */
111 struct buf_message_data
113 int handle_id;
114 intptr_t data;
117 static char *buffer;
118 static char *guard_buffer;
120 static size_t buffer_len;
122 static volatile size_t buf_widx; /* current writing position */
123 static volatile size_t buf_ridx; /* current reading position */
124 /* buf_*idx are values relative to the buffer, not real pointers. */
126 /* Configuration */
127 static size_t conf_watermark = 0; /* Level to trigger filebuf fill */
128 static size_t high_watermark = 0; /* High watermark for rebuffer */
130 /* current memory handle in the linked list. NULL when the list is empty. */
131 static struct memory_handle *cur_handle;
132 /* first memory handle in the linked list. NULL when the list is empty. */
133 static struct memory_handle *first_handle;
135 static int num_handles; /* number of handles in the list */
137 static int base_handle_id;
139 /* Main lock for adding / removing handles */
140 static struct mutex llist_mutex SHAREDBSS_ATTR;
142 /* Handle cache (makes find_handle faster).
143 This is global so that move_handle and rm_handle can invalidate it. */
144 static struct memory_handle *cached_handle = NULL;
146 static struct data_counters
148 size_t remaining; /* Amount of data needing to be buffered */
149 size_t wasted; /* Amount of space available for freeing */
150 size_t buffered; /* Amount of data currently in the buffer */
151 size_t useful; /* Amount of data still useful to the user */
152 } data_counters;
155 /* Messages available to communicate with the buffering thread */
156 enum
158 Q_BUFFER_HANDLE = 1, /* Request buffering of a handle, this should not be
159 used in a low buffer situation. */
160 Q_REBUFFER_HANDLE, /* Request reset and rebuffering of a handle at a new
161 file starting position. */
162 Q_CLOSE_HANDLE, /* Request closing a handle */
164 /* Configuration: */
165 Q_START_FILL, /* Request that the buffering thread initiate a buffer
166 fill at its earliest convenience */
167 Q_HANDLE_ADDED, /* Inform the buffering thread that a handle was added,
168 (which means the disk is spinning) */
171 /* Buffering thread */
172 static void buffering_thread(void);
173 static long buffering_stack[(DEFAULT_STACK_SIZE + 0x2000)/sizeof(long)];
174 static const char buffering_thread_name[] = "buffering";
175 static unsigned int buffering_thread_id = 0;
176 static struct event_queue buffering_queue SHAREDBSS_ATTR;
177 static struct queue_sender_list buffering_queue_sender_list SHAREDBSS_ATTR;
181 /* Ring buffer helper functions */
183 static inline uintptr_t ringbuf_offset(const void *ptr)
185 return (uintptr_t)(ptr - (void*)buffer);
188 /* Buffer pointer (p) plus value (v), wrapped if necessary */
189 static inline uintptr_t ringbuf_add(uintptr_t p, size_t v)
191 uintptr_t res = p + v;
192 if (res >= buffer_len)
193 res -= buffer_len; /* wrap if necssary */
194 return res;
198 /* Buffer pointer (p) minus value (v), wrapped if necessary */
199 static inline uintptr_t ringbuf_sub(uintptr_t p, size_t v)
201 uintptr_t res = p;
202 if (p < v)
203 res += buffer_len; /* wrap */
205 return res - v;
209 /* How far value (v) plus buffer pointer (p1) will cross buffer pointer (p2) */
210 static inline ssize_t ringbuf_add_cross(uintptr_t p1, size_t v, uintptr_t p2)
212 ssize_t res = p1 + v - p2;
213 if (p1 >= p2) /* wrap if necessary */
214 res -= buffer_len;
216 return res;
219 /* Bytes available in the buffer */
220 #define BUF_USED ringbuf_sub(buf_widx, buf_ridx)
222 /* Real buffer watermark */
223 #define BUF_WATERMARK MIN(conf_watermark, high_watermark)
226 LINKED LIST MANAGEMENT
227 ======================
229 add_handle : Add a handle to the list
230 rm_handle : Remove a handle from the list
231 find_handle : Get a handle pointer from an ID
232 move_handle : Move a handle in the buffer (with or without its data)
234 These functions only handle the linked list structure. They don't touch the
235 contents of the struct memory_handle headers. They also change the buf_*idx
236 pointers when necessary and manage the handle IDs.
238 The first and current (== last) handle are kept track of.
239 A new handle is added at buf_widx and becomes the current one.
240 buf_widx always points to the current writing position for the current handle
241 buf_ridx always points to the location of the first handle.
242 buf_ridx == buf_widx means the buffer is empty.
246 /* Add a new handle to the linked list and return it. It will have become the
247 new current handle.
248 data_size must contain the size of what will be in the handle.
249 can_wrap tells us whether this type of data may wrap on buffer
250 alloc_all tells us if we must immediately be able to allocate data_size
251 returns a valid memory handle if all conditions for allocation are met.
252 NULL if there memory_handle itself cannot be allocated or if the
253 data_size cannot be allocated and alloc_all is set. */
254 static struct memory_handle *add_handle(size_t data_size, bool can_wrap,
255 bool alloc_all)
257 /* gives each handle a unique id */
258 static int cur_handle_id = 0;
259 size_t shift;
260 size_t widx, new_widx;
261 size_t len;
262 ssize_t overlap;
264 if (num_handles >= BUF_MAX_HANDLES)
265 return NULL;
267 widx = buf_widx;
269 if (cur_handle && cur_handle->filerem > 0) {
270 /* the current handle hasn't finished buffering. We can only add
271 a new one if there is already enough free space to finish
272 the buffering. */
273 size_t req = cur_handle->filerem;
274 if (ringbuf_add_cross(cur_handle->widx, req, buf_ridx) >= 0) {
275 /* Not enough space to finish allocation */
276 return NULL;
277 } else {
278 /* Allocate the remainder of the space for the current handle */
279 widx = ringbuf_add(cur_handle->widx, cur_handle->filerem);
283 /* align to 4 bytes up always leaving a gap */
284 new_widx = ringbuf_add(widx, 4) & ~3;
286 len = data_size + sizeof(struct memory_handle);
288 /* First, will the handle wrap? */
289 /* If the handle would wrap, move to the beginning of the buffer,
290 * or if the data must not but would wrap, move it to the beginning */
291 if (new_widx + sizeof(struct memory_handle) > buffer_len ||
292 (!can_wrap && new_widx + len > buffer_len)) {
293 new_widx = 0;
296 /* How far we shifted the new_widx to align things, must be < buffer_len */
297 shift = ringbuf_sub(new_widx, widx);
299 /* How much space are we short in the actual ring buffer? */
300 overlap = ringbuf_add_cross(widx, shift + len, buf_ridx);
301 if (overlap >= 0 && (alloc_all || (size_t)overlap >= data_size)) {
302 /* Not enough space for required allocations */
303 return NULL;
306 /* There is enough space for the required data, advance the buf_widx and
307 * initialize the struct */
308 buf_widx = new_widx;
310 struct memory_handle *new_handle =
311 (struct memory_handle *)(&buffer[buf_widx]);
313 /* Prevent buffering thread from looking at it */
314 new_handle->filerem = 0;
316 /* Handle can be moved by default */
317 new_handle->pinned = 0;
319 /* Handle data can be waited for by default */
320 new_handle->signaled = 0;
322 /* only advance the buffer write index of the size of the struct */
323 buf_widx = ringbuf_add(buf_widx, sizeof(struct memory_handle));
325 new_handle->id = cur_handle_id;
326 /* Wrap signed int is safe and 0 doesn't happen */
327 cur_handle_id = (cur_handle_id + 1) & BUF_HANDLE_MASK;
328 new_handle->next = NULL;
329 num_handles++;
331 if (!first_handle)
332 /* the new handle is the first one */
333 first_handle = new_handle;
335 if (cur_handle)
336 cur_handle->next = new_handle;
338 cur_handle = new_handle;
340 return new_handle;
343 /* Delete a given memory handle from the linked list
344 and return true for success. Nothing is actually erased from memory. */
345 static bool rm_handle(const struct memory_handle *h)
347 if (h == NULL)
348 return true;
350 if (h == first_handle) {
351 first_handle = h->next;
352 if (h == cur_handle) {
353 /* h was the first and last handle: the buffer is now empty */
354 cur_handle = NULL;
355 buf_ridx = buf_widx = 0;
356 } else {
357 /* update buf_ridx to point to the new first handle */
358 buf_ridx = (size_t)ringbuf_offset(first_handle);
360 } else {
361 struct memory_handle *m = first_handle;
362 /* Find the previous handle */
363 while (m && m->next != h) {
364 m = m->next;
366 if (m && m->next == h) {
367 m->next = h->next;
368 if (h == cur_handle) {
369 cur_handle = m;
370 buf_widx = cur_handle->widx;
372 } else {
373 /* If we don't find ourselves, this is a seriously incoherent
374 state with a corrupted list and severe action is needed! */
375 panicf("rm_handle fail: %d", h->id);
376 return false;
380 /* Invalidate the cache to prevent it from keeping the old location of h */
381 if (h == cached_handle)
382 cached_handle = NULL;
384 num_handles--;
385 return true;
388 /* Return a pointer to the memory handle of given ID.
389 NULL if the handle wasn't found */
390 static struct memory_handle *find_handle(int handle_id)
392 if (handle_id < 0)
393 return NULL;
395 /* simple caching because most of the time the requested handle
396 will either be the same as the last, or the one after the last */
397 if (cached_handle) {
398 if (cached_handle->id == handle_id) {
399 return cached_handle;
400 } else if (cached_handle->next &&
401 (cached_handle->next->id == handle_id)) {
402 cached_handle = cached_handle->next;
403 return cached_handle;
407 struct memory_handle *m = first_handle;
408 while (m && m->id != handle_id) {
409 m = m->next;
411 /* This condition can only be reached with !m or m->id == handle_id */
412 if (m)
413 cached_handle = m;
415 return m;
418 /* Move a memory handle and data_size of its data delta bytes along the buffer.
419 delta maximum bytes available to move the handle. If the move is performed
420 it is set to the actual distance moved.
421 data_size is the amount of data to move along with the struct.
422 returns true if the move is successful and false if the handle is NULL,
423 the move would be less than the size of a memory_handle after
424 correcting for wraps or if the handle is not found in the linked
425 list for adjustment. This function has no side effects if false
426 is returned. */
427 static bool move_handle(struct memory_handle **h, size_t *delta,
428 size_t data_size, bool can_wrap)
430 struct memory_handle *dest;
431 const struct memory_handle *src;
432 size_t final_delta = *delta, size_to_move;
433 uintptr_t oldpos, newpos;
434 intptr_t overlap, overlap_old;
436 if (h == NULL || (src = *h) == NULL)
437 return false;
439 size_to_move = sizeof(struct memory_handle) + data_size;
441 /* Align to four bytes, down */
442 final_delta &= ~3;
443 if (final_delta < sizeof(struct memory_handle)) {
444 /* It's not legal to move less than the size of the struct */
445 return false;
448 oldpos = ringbuf_offset(src);
449 newpos = ringbuf_add(oldpos, final_delta);
450 overlap = ringbuf_add_cross(newpos, size_to_move, buffer_len);
451 overlap_old = ringbuf_add_cross(oldpos, size_to_move, buffer_len);
453 if (overlap > 0) {
454 /* Some part of the struct + data would wrap, maybe ok */
455 ssize_t correction = 0;
456 /* If the overlap lands inside the memory_handle */
457 if (!can_wrap) {
458 /* Otherwise the overlap falls in the data area and must all be
459 * backed out. This may become conditional if ever we move
460 * data that is allowed to wrap (ie audio) */
461 correction = overlap;
462 } else if ((uintptr_t)overlap > data_size) {
463 /* Correct the position and real delta to prevent the struct from
464 * wrapping, this guarantees an aligned delta if the struct size is
465 * aligned and the buffer is aligned */
466 correction = overlap - data_size;
468 if (correction) {
469 /* Align correction to four bytes up */
470 correction = (correction + 3) & ~3;
471 if (final_delta < correction + sizeof(struct memory_handle)) {
472 /* Delta cannot end up less than the size of the struct */
473 return false;
475 newpos -= correction;
476 overlap -= correction;/* Used below to know how to split the data */
477 final_delta -= correction;
481 dest = (struct memory_handle *)(&buffer[newpos]);
483 if (src == first_handle) {
484 first_handle = dest;
485 buf_ridx = newpos;
486 } else {
487 struct memory_handle *m = first_handle;
488 while (m && m->next != src) {
489 m = m->next;
491 if (m && m->next == src) {
492 m->next = dest;
493 } else {
494 return false;
498 /* Update the cache to prevent it from keeping the old location of h */
499 if (src == cached_handle)
500 cached_handle = dest;
502 /* the cur_handle pointer might need updating */
503 if (src == cur_handle)
504 cur_handle = dest;
506 /* x = handle(s) following this one...
507 * ...if last handle, unmoveable if metadata, only shrinkable if audio.
508 * In other words, no legal move can be made that would have the src head
509 * and dest tail of the data overlap itself. These facts reduce the
510 * problem to four essential permutations.
512 * movement: always "clockwise" >>>>
514 * (src nowrap, dest nowrap)
515 * |0123 x |
516 * | 0123x | etc...
517 * move: "0123"
519 * (src nowrap, dest wrap)
520 * | x0123 |
521 * |23x 01|
522 * move: "23", "01"
524 * (src wrap, dest nowrap)
525 * |23 x01|
526 * | 0123x |
527 * move: "23", "01"
529 * (src wrap, dest wrap)
530 * |23 x 01|
531 * |123x 0|
532 * move: "23", "1", "0"
534 if (overlap_old > 0) {
535 /* Move over already wrapped data by the final delta */
536 memmove(&buffer[final_delta], buffer, overlap_old);
537 if (overlap <= 0)
538 size_to_move -= overlap_old;
541 if (overlap > 0) {
542 /* Move data that now wraps to the beginning */
543 size_to_move -= overlap;
544 memmove(buffer, SKIPBYTES(src, size_to_move),
545 overlap_old > 0 ? final_delta : (size_t)overlap);
548 /* Move leading fragment containing handle struct */
549 memmove(dest, src, size_to_move);
551 /* Update the caller with the new location of h and the distance moved */
552 *h = dest;
553 *delta = final_delta;
554 return true;
559 BUFFER SPACE MANAGEMENT
560 =======================
562 update_data_counters: Updates the values in data_counters
563 buffer_is_low : Returns true if the amount of useful data in the buffer is low
564 buffer_handle : Buffer data for a handle
565 rebuffer_handle : Seek to a nonbuffered part of a handle by rebuffering the data
566 shrink_handle : Free buffer space by moving a handle
567 fill_buffer : Call buffer_handle for all handles that have data to buffer
569 These functions are used by the buffering thread to manage buffer space.
571 static size_t handle_size_available(const struct memory_handle *h)
573 /* Obtain proper distances from data start */
574 size_t rd = ringbuf_sub(h->ridx, h->data);
575 size_t wr = ringbuf_sub(h->widx, h->data);
577 if (LIKELY(wr > rd))
578 return wr - rd;
580 return 0; /* ridx is ahead of or equal to widx at this time */
583 static void update_data_counters(struct data_counters *dc)
585 size_t buffered = 0;
586 size_t wasted = 0;
587 size_t remaining = 0;
588 size_t useful = 0;
590 struct memory_handle *m;
591 bool is_useful;
593 if (dc == NULL)
594 dc = &data_counters;
596 mutex_lock(&llist_mutex);
598 m = find_handle(base_handle_id);
599 is_useful = m == NULL;
601 m = first_handle;
602 while (m) {
603 buffered += m->available;
604 /* wasted could come out larger than the buffer size if ridx's are
605 overlapping data ahead of their handles' buffered data */
606 wasted += ringbuf_sub(m->ridx, m->data);
607 remaining += m->filerem;
609 if (m->id == base_handle_id)
610 is_useful = true;
612 if (is_useful)
613 useful += handle_size_available(m);
615 m = m->next;
618 mutex_unlock(&llist_mutex);
620 dc->buffered = buffered;
621 dc->wasted = wasted;
622 dc->remaining = remaining;
623 dc->useful = useful;
626 static inline bool buffer_is_low(void)
628 update_data_counters(NULL);
629 return data_counters.useful < BUF_WATERMARK / 2;
632 /* Q_BUFFER_HANDLE event and buffer data for the given handle.
633 Return whether or not the buffering should continue explicitly. */
634 static bool buffer_handle(int handle_id, size_t to_buffer)
636 logf("buffer_handle(%d, %lu)", handle_id, (unsigned long)to_buffer);
637 struct memory_handle *h = find_handle(handle_id);
638 bool stop = false;
640 if (!h)
641 return true;
643 logf(" type: %d", (int)h->type);
645 if (h->filerem == 0) {
646 /* nothing left to buffer */
647 return true;
650 if (h->fd < 0) { /* file closed, reopen */
651 if (*h->path)
652 h->fd = open(h->path, O_RDONLY);
654 if (h->fd < 0)
656 /* could not open the file, truncate it where it is */
657 h->filesize -= h->filerem;
658 h->filerem = 0;
659 return true;
662 if (h->offset)
663 lseek(h->fd, h->offset, SEEK_SET);
666 trigger_cpu_boost();
668 if (h->type == TYPE_ID3) {
669 if (!get_metadata((struct mp3entry *)(buffer + h->data),
670 h->fd, h->path)) {
671 /* metadata parsing failed: clear the buffer. */
672 wipe_mp3entry((struct mp3entry *)(buffer + h->data));
674 close(h->fd);
675 h->fd = -1;
676 h->filerem = 0;
677 h->available = sizeof(struct mp3entry);
678 h->widx = ringbuf_add(h->widx, sizeof(struct mp3entry));
679 send_event(BUFFER_EVENT_FINISHED, &handle_id);
680 return true;
683 while (h->filerem > 0 && !stop)
685 /* max amount to copy */
686 ssize_t copy_n = MIN( MIN(h->filerem, BUFFERING_DEFAULT_FILECHUNK),
687 buffer_len - h->widx);
688 uintptr_t offset = h->next ? ringbuf_offset(h->next) : buf_ridx;
689 ssize_t overlap = ringbuf_add_cross(h->widx, copy_n, offset) + 1;
691 if (overlap > 0) {
692 /* read only up to available space and stop if it would overwrite
693 or be on top of the reading position or the next handle */
694 stop = true;
695 copy_n -= overlap;
698 if (copy_n <= 0)
699 return false; /* no space for read */
701 /* rc is the actual amount read */
702 int rc = read(h->fd, &buffer[h->widx], copy_n);
704 if (rc <= 0) {
705 /* Some kind of filesystem error, maybe recoverable if not codec */
706 if (h->type == TYPE_CODEC) {
707 logf("Partial codec");
708 break;
711 logf("File ended %ld bytes early\n", (long)h->filerem);
712 h->filesize -= h->filerem;
713 h->filerem = 0;
714 break;
717 /* Advance buffer */
718 h->widx = ringbuf_add(h->widx, rc);
719 if (h == cur_handle)
720 buf_widx = h->widx;
721 h->available += rc;
722 h->filerem -= rc;
724 /* If this is a large file, see if we need to break or give the codec
725 * more time */
726 if (h->type == TYPE_PACKET_AUDIO &&
727 pcmbuf_is_lowdata() && !buffer_is_low()) {
728 sleep(1);
729 } else {
730 yield();
733 if (to_buffer == 0) {
734 /* Normal buffering - check queue */
735 if(!queue_empty(&buffering_queue))
736 break;
737 } else {
738 if (to_buffer <= (size_t)rc)
739 break; /* Done */
740 to_buffer -= rc;
744 if (h->filerem == 0) {
745 /* finished buffering the file */
746 close(h->fd);
747 h->fd = -1;
748 send_event(BUFFER_EVENT_FINISHED, &handle_id);
751 return !stop;
754 /* Close the specified handle id and free its allocation. */
755 static bool close_handle(int handle_id)
757 bool retval = true;
758 struct memory_handle *h;
760 mutex_lock(&llist_mutex);
761 h = find_handle(handle_id);
763 /* If the handle is not found, it is closed */
764 if (h) {
765 if (h->fd >= 0) {
766 close(h->fd);
767 h->fd = -1;
770 /* rm_handle returns true unless the handle somehow persists after
771 exit */
772 retval = rm_handle(h);
775 mutex_unlock(&llist_mutex);
776 return retval;
779 /* Free buffer space by moving the handle struct right before the useful
780 part of its data buffer or by moving all the data. */
781 static void shrink_handle(struct memory_handle *h)
783 if (!h)
784 return;
786 if (h->type == TYPE_PACKET_AUDIO) {
787 /* only move the handle struct */
788 /* data is pinned by default - if we start moving packet audio,
789 the semantics will determine whether or not data is movable
790 but the handle will remain movable in either case */
791 size_t delta = ringbuf_sub(h->ridx, h->data);
793 /* The value of delta might change for alignment reasons */
794 if (!move_handle(&h, &delta, 0, true))
795 return;
797 h->data = ringbuf_add(h->data, delta);
798 h->available -= delta;
799 h->offset += delta;
800 } else {
801 /* metadata handle: we can move all of it */
802 if (h->pinned || !h->next || h->filerem != 0)
803 return; /* Pinned, last handle or not finished loading */
805 uintptr_t handle_distance =
806 ringbuf_sub(ringbuf_offset(h->next), h->data);
807 size_t delta = handle_distance - h->available;
809 /* The value of delta might change for alignment reasons */
810 if (!move_handle(&h, &delta, h->available, h->type==TYPE_CODEC))
811 return;
813 size_t olddata = h->data;
814 h->data = ringbuf_add(h->data, delta);
815 h->ridx = ringbuf_add(h->ridx, delta);
816 h->widx = ringbuf_add(h->widx, delta);
818 if (h->type == TYPE_ID3 && h->filesize == sizeof(struct mp3entry)) {
819 /* when moving an mp3entry we need to readjust its pointers. */
820 adjust_mp3entry((struct mp3entry *)&buffer[h->data],
821 (void *)&buffer[h->data],
822 (const void *)&buffer[olddata]);
823 } else if (h->type == TYPE_BITMAP) {
824 /* adjust the bitmap's pointer */
825 struct bitmap *bmp = (struct bitmap *)&buffer[h->data];
826 bmp->data = &buffer[h->data + sizeof(struct bitmap)];
831 /* Fill the buffer by buffering as much data as possible for handles that still
832 have data left to buffer
833 Return whether or not to continue filling after this */
834 static bool fill_buffer(void)
836 logf("fill_buffer()");
837 struct memory_handle *m = first_handle;
839 shrink_handle(m);
841 while (queue_empty(&buffering_queue) && m) {
842 if (m->filerem > 0) {
843 if (!buffer_handle(m->id, 0)) {
844 m = NULL;
845 break;
848 m = m->next;
851 if (m) {
852 return true;
853 } else {
854 /* only spin the disk down if the filling wasn't interrupted by an
855 event arriving in the queue. */
856 storage_sleep();
857 return false;
861 #ifdef HAVE_ALBUMART
862 /* Given a file descriptor to a bitmap file, write the bitmap data to the
863 buffer, with a struct bitmap and the actual data immediately following.
864 Return value is the total size (struct + data). */
865 static int load_image(int fd, const char *path,
866 struct bufopen_bitmap_data *data)
868 int rc;
869 struct bitmap *bmp = (struct bitmap *)&buffer[buf_widx];
870 struct dim *dim = data->dim;
871 struct mp3_albumart *aa = data->embedded_albumart;
873 /* get the desired image size */
874 bmp->width = dim->width, bmp->height = dim->height;
875 /* FIXME: alignment may be needed for the data buffer. */
876 bmp->data = &buffer[buf_widx + sizeof(struct bitmap)];
877 #ifndef HAVE_JPEG
878 (void) path;
879 #endif
880 #if (LCD_DEPTH > 1) || defined(HAVE_REMOTE_LCD) && (LCD_REMOTE_DEPTH > 1)
881 bmp->maskdata = NULL;
882 #endif
884 int free = (int)MIN(buffer_len - BUF_USED, buffer_len - buf_widx)
885 - sizeof(struct bitmap);
887 #ifdef HAVE_JPEG
888 if (aa != NULL) {
889 lseek(fd, aa->pos, SEEK_SET);
890 rc = clip_jpeg_fd(fd, aa->size, bmp, free, FORMAT_NATIVE|FORMAT_DITHER|
891 FORMAT_RESIZE|FORMAT_KEEP_ASPECT, NULL);
893 else if (strcmp(path + strlen(path) - 4, ".bmp"))
894 rc = read_jpeg_fd(fd, bmp, free, FORMAT_NATIVE|FORMAT_DITHER|
895 FORMAT_RESIZE|FORMAT_KEEP_ASPECT, NULL);
896 else
897 #endif
898 rc = read_bmp_fd(fd, bmp, free, FORMAT_NATIVE|FORMAT_DITHER|
899 FORMAT_RESIZE|FORMAT_KEEP_ASPECT, NULL);
900 return rc + (rc > 0 ? sizeof(struct bitmap) : 0);
902 #endif
906 MAIN BUFFERING API CALLS
907 ========================
909 bufopen : Request the opening of a new handle for a file
910 bufalloc : Open a new handle for data other than a file.
911 bufclose : Close an open handle
912 bufseek : Set the read pointer in a handle
913 bufadvance : Move the read pointer in a handle
914 bufread : Copy data from a handle into a given buffer
915 bufgetdata : Give a pointer to the handle's data
917 These functions are exported, to allow interaction with the buffer.
918 They take care of the content of the structs, and rely on the linked list
919 management functions for all the actual handle management work.
923 /* Reserve space in the buffer for a file.
924 filename: name of the file to open
925 offset: offset at which to start buffering the file, useful when the first
926 offset bytes of the file aren't needed.
927 type: one of the data types supported (audio, image, cuesheet, others
928 user_data: user data passed possibly passed in subcalls specific to a
929 data_type (only used for image (albumart) buffering so far )
930 return value: <0 if the file cannot be opened, or one file already
931 queued to be opened, otherwise the handle for the file in the buffer
933 int bufopen(const char *file, size_t offset, enum data_type type,
934 void *user_data)
936 #ifndef HAVE_ALBUMART
937 /* currently only used for aa loading */
938 (void)user_data;
939 #endif
940 int handle_id = ERR_BUFFER_FULL;
942 /* No buffer refs until after the mutex_lock call! */
944 if (type == TYPE_ID3) {
945 /* ID3 case: allocate space, init the handle and return. */
946 mutex_lock(&llist_mutex);
948 struct memory_handle *h =
949 add_handle(sizeof(struct mp3entry), false, true);
951 if (h) {
952 handle_id = h->id;
953 h->fd = -1;
954 h->filesize = sizeof(struct mp3entry);
955 h->offset = 0;
956 h->data = buf_widx;
957 h->ridx = buf_widx;
958 h->widx = buf_widx;
959 h->available = 0;
960 h->type = type;
961 strlcpy(h->path, file, MAX_PATH);
963 buf_widx = ringbuf_add(buf_widx, sizeof(struct mp3entry));
965 h->filerem = sizeof(struct mp3entry);
967 /* Inform the buffering thread that we added a handle */
968 LOGFQUEUE("buffering > Q_HANDLE_ADDED %d", handle_id);
969 queue_post(&buffering_queue, Q_HANDLE_ADDED, handle_id);
972 mutex_unlock(&llist_mutex);
973 return handle_id;
975 else if (type == TYPE_UNKNOWN)
976 return ERR_UNSUPPORTED_TYPE;
977 #ifdef APPLICATION
978 /* loading code from memory is not supported in application builds */
979 else if (type == TYPE_CODEC)
980 return ERR_UNSUPPORTED_TYPE;
981 #endif
982 /* Other cases: there is a little more work. */
983 int fd = open(file, O_RDONLY);
984 if (fd < 0)
985 return ERR_FILE_ERROR;
987 size_t size = 0;
988 #ifdef HAVE_ALBUMART
989 if (type == TYPE_BITMAP) {
990 /* if albumart is embedded, the complete file is not buffered,
991 * but only the jpeg part; filesize() would be wrong */
992 struct bufopen_bitmap_data *aa = (struct bufopen_bitmap_data*)user_data;
993 if (aa->embedded_albumart)
994 size = aa->embedded_albumart->size;
996 #endif
997 if (size == 0)
998 size = filesize(fd);
999 bool can_wrap = type==TYPE_PACKET_AUDIO || type==TYPE_CODEC;
1001 size_t adjusted_offset = offset;
1002 if (adjusted_offset > size)
1003 adjusted_offset = 0;
1005 /* Reserve extra space because alignment can move data forward */
1006 size_t padded_size = STORAGE_PAD(size-adjusted_offset);
1008 mutex_lock(&llist_mutex);
1010 struct memory_handle *h = add_handle(padded_size, can_wrap, false);
1011 if (!h) {
1012 DEBUGF("%s(): failed to add handle\n", __func__);
1013 mutex_unlock(&llist_mutex);
1014 close(fd);
1015 return ERR_BUFFER_FULL;
1018 handle_id = h->id;
1019 strlcpy(h->path, file, MAX_PATH);
1020 h->offset = adjusted_offset;
1022 #ifdef STORAGE_WANTS_ALIGN
1023 /* Don't bother to storage align bitmaps because they are not
1024 * loaded directly into the buffer.
1026 if (type != TYPE_BITMAP) {
1027 /* Align to desired storage alignment */
1028 size_t alignment_pad = STORAGE_OVERLAP(adjusted_offset -
1029 (size_t)(&buffer[buf_widx]));
1030 buf_widx = ringbuf_add(buf_widx, alignment_pad);
1032 #endif /* STORAGE_WANTS_ALIGN */
1034 h->fd = -1;
1035 h->data = buf_widx;
1036 h->ridx = buf_widx;
1037 h->widx = buf_widx;
1038 h->available = 0;
1039 h->type = type;
1041 #ifdef HAVE_ALBUMART
1042 if (type == TYPE_BITMAP) {
1043 /* Bitmap file: we load the data instead of the file */
1044 int rc;
1045 rc = load_image(fd, file, (struct bufopen_bitmap_data*)user_data);
1046 if (rc <= 0) {
1047 rm_handle(h);
1048 handle_id = ERR_FILE_ERROR;
1049 } else {
1050 h->filesize = rc;
1051 h->available = rc;
1052 buf_widx = ringbuf_add(buf_widx, rc);
1053 h->widx = buf_widx;
1056 else
1057 #endif
1059 if (type == TYPE_CUESHEET)
1060 h->fd = fd;
1062 h->filesize = size;
1063 h->available = 0;
1064 h->widx = buf_widx;
1065 h->filerem = size - adjusted_offset;
1068 mutex_unlock(&llist_mutex);
1070 if (type == TYPE_CUESHEET) {
1071 /* Immediately start buffering those */
1072 LOGFQUEUE("buffering >| Q_BUFFER_HANDLE %d", handle_id);
1073 queue_send(&buffering_queue, Q_BUFFER_HANDLE, handle_id);
1074 } else {
1075 /* Other types will get buffered in the course of normal operations */
1076 close(fd);
1078 if (handle_id >= 0) {
1079 /* Inform the buffering thread that we added a handle */
1080 LOGFQUEUE("buffering > Q_HANDLE_ADDED %d", handle_id);
1081 queue_post(&buffering_queue, Q_HANDLE_ADDED, handle_id);
1085 logf("bufopen: new hdl %d", handle_id);
1086 return handle_id;
1089 /* Open a new handle from data that needs to be copied from memory.
1090 src is the source buffer from which to copy data. It can be NULL to simply
1091 reserve buffer space.
1092 size is the requested size. The call will only be successful if the
1093 requested amount of data can entirely fit in the buffer without wrapping.
1094 Return value is the handle id for success or <0 for failure.
1096 int bufalloc(const void *src, size_t size, enum data_type type)
1098 int handle_id;
1100 if (type == TYPE_UNKNOWN)
1101 return ERR_UNSUPPORTED_TYPE;
1103 handle_id = ERR_BUFFER_FULL;
1105 mutex_lock(&llist_mutex);
1107 struct memory_handle *h = add_handle(size, false, true);
1109 if (h) {
1110 handle_id = h->id;
1112 if (src) {
1113 if (type == TYPE_ID3 && size == sizeof(struct mp3entry)) {
1114 /* specially take care of struct mp3entry */
1115 copy_mp3entry((struct mp3entry *)&buffer[buf_widx],
1116 (const struct mp3entry *)src);
1117 } else {
1118 memcpy(&buffer[buf_widx], src, size);
1122 h->fd = -1;
1123 *h->path = 0;
1124 h->filesize = size;
1125 h->offset = 0;
1126 h->ridx = buf_widx;
1127 h->data = buf_widx;
1128 buf_widx = ringbuf_add(buf_widx, size);
1129 h->widx = buf_widx;
1130 h->available = size;
1131 h->type = type;
1134 mutex_unlock(&llist_mutex);
1136 logf("bufalloc: new hdl %d", handle_id);
1137 return handle_id;
1140 /* Close the handle. Return true for success and false for failure */
1141 bool bufclose(int handle_id)
1143 logf("bufclose(%d)", handle_id);
1144 #if 0
1145 /* Don't interrupt the buffering thread if the handle is already
1146 stale */
1147 if (!find_handle(handle_id)) {
1148 logf(" handle already closed");
1149 return true;
1151 #endif
1152 LOGFQUEUE("buffering >| Q_CLOSE_HANDLE %d", handle_id);
1153 return queue_send(&buffering_queue, Q_CLOSE_HANDLE, handle_id);
1156 /* Backend to bufseek and bufadvance. Call only in response to
1157 Q_REBUFFER_HANDLE! */
1158 static void rebuffer_handle(int handle_id, size_t newpos)
1160 struct memory_handle *h = find_handle(handle_id);
1162 if (!h) {
1163 queue_reply(&buffering_queue, ERR_HANDLE_NOT_FOUND);
1164 return;
1167 /* When seeking foward off of the buffer, if it is a short seek attempt to
1168 avoid rebuffering the whole track, just read enough to satisfy */
1169 if (newpos > h->offset &&
1170 newpos - h->offset < BUFFERING_DEFAULT_FILECHUNK) {
1172 size_t amount = newpos - h->offset;
1173 h->ridx = ringbuf_add(h->data, amount);
1175 if (buffer_handle(handle_id, amount + 1)) {
1176 size_t rd = ringbuf_sub(h->ridx, h->data);
1177 size_t wr = ringbuf_sub(h->widx, h->data);
1178 if (wr >= rd) {
1179 /* It really did succeed */
1180 queue_reply(&buffering_queue, 0);
1181 buffer_handle(handle_id, 0); /* Ok, try the rest */
1182 return;
1185 /* Data collision or other file error - must reset */
1187 if (newpos > h->filesize)
1188 newpos = h->filesize; /* file truncation happened above */
1191 /* Reset the handle to its new position */
1192 h->offset = newpos;
1194 size_t next = h->next ? ringbuf_offset(h->next) : buf_ridx;
1196 #ifdef STORAGE_WANTS_ALIGN
1197 /* Strip alignment padding then redo */
1198 size_t new_index = ringbuf_add(ringbuf_offset(h), sizeof (*h));
1200 /* Align to desired storage alignment if space permits - handle could
1201 have been shrunken too close to the following one after a previous
1202 rebuffer. */
1203 size_t alignment_pad =
1204 STORAGE_OVERLAP(h->offset - (size_t)(&buffer[new_index]));
1206 if (ringbuf_add_cross(new_index, alignment_pad, next) >= 0)
1207 alignment_pad = 0; /* Forego storage alignment this time */
1209 new_index = ringbuf_add(new_index, alignment_pad);
1210 #else
1211 /* Just clear the data buffer */
1212 size_t new_index = h->data;
1213 #endif /* STORAGE_WANTS_ALIGN */
1215 h->ridx = h->widx = h->data = new_index;
1217 if (h == cur_handle)
1218 buf_widx = new_index;
1220 h->available = 0;
1221 h->filerem = h->filesize - h->offset;
1223 if (h->fd >= 0)
1224 lseek(h->fd, h->offset, SEEK_SET);
1226 if (h->next && ringbuf_sub(next, h->data) <= h->filesize - newpos) {
1227 /* There isn't enough space to rebuffer all of the track from its new
1228 offset, so we ask the user to free some */
1229 DEBUGF("%s(): space is needed\n", __func__);
1230 int hid = handle_id;
1231 send_event(BUFFER_EVENT_REBUFFER, &hid);
1234 /* Now we do the rebuffer */
1235 queue_reply(&buffering_queue, 0);
1236 buffer_handle(handle_id, 0);
1239 /* Backend to bufseek and bufadvance */
1240 static int seek_handle(struct memory_handle *h, size_t newpos)
1242 if (newpos > h->filesize) {
1243 /* access beyond the end of the file */
1244 return ERR_INVALID_VALUE;
1246 else if ((newpos < h->offset || h->offset + h->available <= newpos) &&
1247 (newpos < h->filesize || h->filerem > 0)) {
1248 /* access before or after buffered data and not to end of file or file
1249 is not buffered to the end-- a rebuffer is needed. */
1250 struct buf_message_data parm = { h->id, newpos };
1251 return queue_send(&buffering_queue, Q_REBUFFER_HANDLE,
1252 (intptr_t)&parm);
1254 else {
1255 h->ridx = ringbuf_add(h->data, newpos - h->offset);
1258 return 0;
1261 /* Set reading index in handle (relatively to the start of the file).
1262 Access before the available data will trigger a rebuffer.
1263 Return 0 for success and for failure:
1264 ERR_HANDLE_NOT_FOUND if the handle wasn't found
1265 ERR_INVALID_VALUE if the new requested position was beyond the end of
1266 the file
1268 int bufseek(int handle_id, size_t newpos)
1270 struct memory_handle *h = find_handle(handle_id);
1271 if (!h)
1272 return ERR_HANDLE_NOT_FOUND;
1274 return seek_handle(h, newpos);
1277 /* Advance the reading index in a handle (relatively to its current position).
1278 Return 0 for success and for failure:
1279 ERR_HANDLE_NOT_FOUND if the handle wasn't found
1280 ERR_INVALID_VALUE if the new requested position was beyond the end of
1281 the file
1283 int bufadvance(int handle_id, off_t offset)
1285 struct memory_handle *h = find_handle(handle_id);
1286 if (!h)
1287 return ERR_HANDLE_NOT_FOUND;
1289 size_t newpos = h->offset + ringbuf_sub(h->ridx, h->data) + offset;
1290 return seek_handle(h, newpos);
1293 /* Get the read position from the start of the file
1294 Returns the offset from byte 0 of the file and for failure:
1295 ERR_HANDLE_NOT_FOUND if the handle wasn't found
1297 off_t bufftell(int handle_id)
1299 const struct memory_handle *h = find_handle(handle_id);
1300 if (!h)
1301 return ERR_HANDLE_NOT_FOUND;
1302 return h->offset + ringbuf_sub(h->ridx, h->data);
1305 /* Used by bufread and bufgetdata to prepare the buffer and retrieve the
1306 * actual amount of data available for reading. This function explicitly
1307 * does not check the validity of the input handle. It does do range checks
1308 * on size and returns a valid (and explicit) amount of data for reading */
1309 static struct memory_handle *prep_bufdata(int handle_id, size_t *size,
1310 bool guardbuf_limit)
1312 struct memory_handle *h = find_handle(handle_id);
1313 size_t realsize;
1315 if (!h)
1316 return NULL;
1318 size_t avail = handle_size_available(h);
1320 if (avail == 0 && h->filerem == 0) {
1321 /* File is finished reading */
1322 *size = 0;
1323 return h;
1326 realsize = *size;
1328 if (realsize == 0 || realsize > avail + h->filerem)
1329 realsize = avail + h->filerem;
1331 if (guardbuf_limit && h->type == TYPE_PACKET_AUDIO
1332 && realsize > GUARD_BUFSIZE) {
1333 logf("data request > guardbuf");
1334 /* If more than the size of the guardbuf is requested and this is a
1335 * bufgetdata, limit to guard_bufsize over the end of the buffer */
1336 realsize = MIN(realsize, buffer_len - h->ridx + GUARD_BUFSIZE);
1337 /* this ensures *size <= buffer_len - h->ridx + GUARD_BUFSIZE */
1340 if (h->filerem > 0 && avail < realsize) {
1341 /* Data isn't ready. Request buffering */
1342 LOGFQUEUE("buffering >| Q_START_FILL %d",handle_id);
1343 queue_send(&buffering_queue, Q_START_FILL, handle_id);
1344 /* Wait for the data to be ready */
1347 sleep(0);
1348 /* it is not safe for a non-buffering thread to sleep while
1349 * holding a handle */
1350 h = find_handle(handle_id);
1351 if (!h || h->signaled != 0)
1352 return NULL;
1353 avail = handle_size_available(h);
1355 while (h->filerem > 0 && avail < realsize);
1358 *size = MIN(realsize, avail);
1359 return h;
1363 /* Note: It is safe for the thread responsible for handling the rebuffer
1364 * cleanup request to call bufread or bufgetdata only when the data will
1365 * be available-- not if it could be blocked waiting for it in prep_bufdata.
1366 * It should be apparent that if said thread is being forced to wait for
1367 * buffering but has not yet responded to the cleanup request, the space
1368 * can never be cleared to allow further reading of the file because it is
1369 * not listening to callbacks any longer. */
1371 /* Copy data from the given handle to the dest buffer.
1372 Return the number of bytes copied or < 0 for failure (handle not found).
1373 The caller is blocked until the requested amount of data is available.
1375 ssize_t bufread(int handle_id, size_t size, void *dest)
1377 const struct memory_handle *h;
1378 size_t adjusted_size = size;
1380 h = prep_bufdata(handle_id, &adjusted_size, false);
1381 if (!h)
1382 return ERR_HANDLE_NOT_FOUND;
1384 if (h->ridx + adjusted_size > buffer_len) {
1385 /* the data wraps around the end of the buffer */
1386 size_t read = buffer_len - h->ridx;
1387 memcpy(dest, &buffer[h->ridx], read);
1388 memcpy(dest+read, buffer, adjusted_size - read);
1389 } else {
1390 memcpy(dest, &buffer[h->ridx], adjusted_size);
1393 return adjusted_size;
1396 /* Update the "data" pointer to make the handle's data available to the caller.
1397 Return the length of the available linear data or < 0 for failure (handle
1398 not found).
1399 The caller is blocked until the requested amount of data is available.
1400 size is the amount of linear data requested. it can be 0 to get as
1401 much as possible.
1402 The guard buffer may be used to provide the requested size. This means it's
1403 unsafe to request more than the size of the guard buffer.
1405 ssize_t bufgetdata(int handle_id, size_t size, void **data)
1407 const struct memory_handle *h;
1408 size_t adjusted_size = size;
1410 h = prep_bufdata(handle_id, &adjusted_size, true);
1411 if (!h)
1412 return ERR_HANDLE_NOT_FOUND;
1414 if (h->ridx + adjusted_size > buffer_len) {
1415 /* the data wraps around the end of the buffer :
1416 use the guard buffer to provide the requested amount of data. */
1417 size_t copy_n = h->ridx + adjusted_size - buffer_len;
1418 /* prep_bufdata ensures
1419 adjusted_size <= buffer_len - h->ridx + GUARD_BUFSIZE,
1420 so copy_n <= GUARD_BUFSIZE */
1421 memcpy(guard_buffer, (const unsigned char *)buffer, copy_n);
1424 if (data)
1425 *data = &buffer[h->ridx];
1427 return adjusted_size;
1430 ssize_t bufgettail(int handle_id, size_t size, void **data)
1432 size_t tidx;
1434 const struct memory_handle *h;
1436 h = find_handle(handle_id);
1438 if (!h)
1439 return ERR_HANDLE_NOT_FOUND;
1441 if (h->filerem)
1442 return ERR_HANDLE_NOT_DONE;
1444 /* We don't support tail requests of > guardbuf_size, for simplicity */
1445 if (size > GUARD_BUFSIZE)
1446 return ERR_INVALID_VALUE;
1448 tidx = ringbuf_sub(h->widx, size);
1450 if (tidx + size > buffer_len) {
1451 size_t copy_n = tidx + size - buffer_len;
1452 memcpy(guard_buffer, (const unsigned char *)buffer, copy_n);
1455 *data = &buffer[tidx];
1456 return size;
1459 ssize_t bufcuttail(int handle_id, size_t size)
1461 struct memory_handle *h;
1462 size_t adjusted_size = size;
1464 h = find_handle(handle_id);
1466 if (!h)
1467 return ERR_HANDLE_NOT_FOUND;
1469 if (h->filerem)
1470 return ERR_HANDLE_NOT_DONE;
1472 if (h->available < adjusted_size)
1473 adjusted_size = h->available;
1475 h->available -= adjusted_size;
1476 h->filesize -= adjusted_size;
1477 h->widx = ringbuf_sub(h->widx, adjusted_size);
1478 if (h == cur_handle)
1479 buf_widx = h->widx;
1481 return adjusted_size;
1486 SECONDARY EXPORTED FUNCTIONS
1487 ============================
1489 buf_handle_offset
1490 buf_set_base_handle
1491 buf_handle_data_type
1492 buf_is_handle
1493 buf_pin_handle
1494 buf_signal_handle
1495 buf_length
1496 buf_used
1497 buf_set_watermark
1498 buf_get_watermark
1500 These functions are exported, to allow interaction with the buffer.
1501 They take care of the content of the structs, and rely on the linked list
1502 management functions for all the actual handle management work.
1505 ssize_t buf_handle_offset(int handle_id)
1507 const struct memory_handle *h = find_handle(handle_id);
1508 if (!h)
1509 return ERR_HANDLE_NOT_FOUND;
1510 return h->offset;
1513 void buf_set_base_handle(int handle_id)
1515 mutex_lock(&llist_mutex);
1516 base_handle_id = handle_id;
1517 mutex_unlock(&llist_mutex);
1520 enum data_type buf_handle_data_type(int handle_id)
1522 const struct memory_handle *h = find_handle(handle_id);
1523 if (!h)
1524 return TYPE_UNKNOWN;
1525 return h->type;
1528 ssize_t buf_handle_remaining(int handle_id)
1530 const struct memory_handle *h = find_handle(handle_id);
1531 if (!h)
1532 return ERR_HANDLE_NOT_FOUND;
1533 return h->filerem;
1536 bool buf_is_handle(int handle_id)
1538 return find_handle(handle_id) != NULL;
1541 bool buf_pin_handle(int handle_id, bool pin)
1543 struct memory_handle *h = find_handle(handle_id);
1544 if (!h)
1545 return false;
1547 if (pin) {
1548 h->pinned++;
1549 } else if (h->pinned > 0) {
1550 h->pinned--;
1553 return true;
1556 bool buf_signal_handle(int handle_id, bool signal)
1558 struct memory_handle *h = find_handle(handle_id);
1559 if (!h)
1560 return false;
1562 h->signaled = signal ? 1 : 0;
1563 return true;
1566 /* Return the size of the ringbuffer */
1567 size_t buf_length(void)
1569 return buffer_len;
1572 /* Return the amount of buffer space used */
1573 size_t buf_used(void)
1575 return BUF_USED;
1578 void buf_set_watermark(size_t bytes)
1580 conf_watermark = bytes;
1583 size_t buf_get_watermark(void)
1585 return BUF_WATERMARK;
1588 #ifdef HAVE_IO_PRIORITY
1589 void buf_back_off_storage(bool back_off)
1591 int priority = back_off ?
1592 IO_PRIORITY_BACKGROUND : IO_PRIORITY_IMMEDIATE;
1593 thread_set_io_priority(buffering_thread_id, priority);
1595 #endif
1597 /** -- buffer thread helpers -- **/
1598 static void shrink_buffer_inner(struct memory_handle *h)
1600 if (h == NULL)
1601 return;
1603 shrink_buffer_inner(h->next);
1605 shrink_handle(h);
1608 static void shrink_buffer(void)
1610 logf("shrink_buffer()");
1611 shrink_buffer_inner(first_handle);
1614 static void NORETURN_ATTR buffering_thread(void)
1616 bool filling = false;
1617 struct queue_event ev;
1618 struct buf_message_data *parm;
1620 while (true)
1622 if (num_handles > 0) {
1623 if (!filling) {
1624 cancel_cpu_boost();
1626 queue_wait_w_tmo(&buffering_queue, &ev, filling ? 1 : HZ/2);
1627 } else {
1628 filling = false;
1629 cancel_cpu_boost();
1630 queue_wait(&buffering_queue, &ev);
1633 switch (ev.id)
1635 case Q_START_FILL:
1636 LOGFQUEUE("buffering < Q_START_FILL %d", (int)ev.data);
1637 shrink_buffer();
1638 queue_reply(&buffering_queue, 1);
1639 if (buffer_handle((int)ev.data, 0)) {
1640 filling = true;
1642 else if (num_handles > 0 && conf_watermark > 0) {
1643 update_data_counters(NULL);
1644 if (data_counters.useful >= BUF_WATERMARK) {
1645 send_event(BUFFER_EVENT_BUFFER_LOW, NULL);
1648 break;
1650 case Q_BUFFER_HANDLE:
1651 LOGFQUEUE("buffering < Q_BUFFER_HANDLE %d", (int)ev.data);
1652 queue_reply(&buffering_queue, 1);
1653 buffer_handle((int)ev.data, 0);
1654 break;
1656 case Q_REBUFFER_HANDLE:
1657 parm = (struct buf_message_data *)ev.data;
1658 LOGFQUEUE("buffering < Q_REBUFFER_HANDLE %d %ld",
1659 parm->handle_id, parm->data);
1660 rebuffer_handle(parm->handle_id, parm->data);
1661 break;
1663 case Q_CLOSE_HANDLE:
1664 LOGFQUEUE("buffering < Q_CLOSE_HANDLE %d", (int)ev.data);
1665 queue_reply(&buffering_queue, close_handle((int)ev.data));
1666 break;
1668 case Q_HANDLE_ADDED:
1669 LOGFQUEUE("buffering < Q_HANDLE_ADDED %d", (int)ev.data);
1670 /* A handle was added: the disk is spinning, so we can fill */
1671 filling = true;
1672 break;
1674 case SYS_TIMEOUT:
1675 LOGFQUEUE_SYS_TIMEOUT("buffering < SYS_TIMEOUT");
1676 break;
1679 if (num_handles == 0 || !queue_empty(&buffering_queue))
1680 continue;
1682 update_data_counters(NULL);
1683 #if 0
1684 /* TODO: This needs to be fixed to use the idle callback, disable it
1685 * for simplicity until its done right */
1686 #if MEMORYSIZE > 8
1687 /* If the disk is spinning, take advantage by filling the buffer */
1688 else if (storage_disk_is_active()) {
1689 if (num_handles > 0 && data_counters.useful <= high_watermark)
1690 send_event(BUFFER_EVENT_BUFFER_LOW, 0);
1692 if (data_counters.remaining > 0 && BUF_USED <= high_watermark) {
1693 /* This is a new fill, shrink the buffer up first */
1694 if (!filling)
1695 shrink_buffer();
1696 filling = fill_buffer();
1697 update_data_counters(NULL);
1700 #endif
1701 #endif
1703 if (filling) {
1704 filling = data_counters.remaining > 0 ? fill_buffer() : false;
1705 } else if (ev.id == SYS_TIMEOUT) {
1706 if (data_counters.useful < BUF_WATERMARK) {
1707 /* The buffer is low and we're idle, just watching the levels
1708 - call the callbacks to get new data */
1709 send_event(BUFFER_EVENT_BUFFER_LOW, NULL);
1711 /* Continue anything else we haven't finished - it might
1712 get booted off or stop early because the receiver hasn't
1713 had a chance to clear anything yet */
1714 if (data_counters.remaining > 0) {
1715 shrink_buffer();
1716 filling = fill_buffer();
1723 void buffering_init(void)
1725 mutex_init(&llist_mutex);
1727 /* Thread should absolutely not respond to USB because if it waits first,
1728 then it cannot properly service the handles and leaks will happen -
1729 this is a worker thread and shouldn't need to care about any system
1730 notifications.
1732 Whoever is using buffering should be responsible enough to clear all
1733 the handles at the right time. */
1734 queue_init(&buffering_queue, false);
1735 buffering_thread_id = create_thread( buffering_thread, buffering_stack,
1736 sizeof(buffering_stack), CREATE_THREAD_FROZEN,
1737 buffering_thread_name IF_PRIO(, PRIORITY_BUFFERING)
1738 IF_COP(, CPU));
1740 queue_enable_queue_send(&buffering_queue, &buffering_queue_sender_list,
1741 buffering_thread_id);
1744 /* Initialise the buffering subsystem */
1745 bool buffering_reset(char *buf, size_t buflen)
1747 /* Wraps of storage-aligned data must also be storage aligned,
1748 thus buf and buflen must be a aligned to an integer multiple of
1749 the storage alignment */
1751 buflen -= GUARD_BUFSIZE;
1753 STORAGE_ALIGN_BUFFER(buf, buflen);
1755 if (!buf || !buflen)
1756 return false;
1758 buffer = buf;
1759 buffer_len = buflen;
1760 guard_buffer = buf + buflen;
1762 buf_widx = 0;
1763 buf_ridx = 0;
1765 first_handle = NULL;
1766 cur_handle = NULL;
1767 cached_handle = NULL;
1768 num_handles = 0;
1769 base_handle_id = -1;
1771 /* Set the high watermark as 75% full...or 25% empty :)
1772 This is the greatest fullness that will trigger low-buffer events
1773 no matter what the setting because high-bitrate files can have
1774 ludicrous margins that even exceed the buffer size - most common
1775 with a huge anti-skip buffer but even without that setting,
1776 staying constantly active in buffering is pointless */
1777 high_watermark = 3*buflen / 4;
1779 thread_thaw(buffering_thread_id);
1781 return true;
1784 void buffering_get_debugdata(struct buffering_debug *dbgdata)
1786 struct data_counters dc;
1787 update_data_counters(&dc);
1788 dbgdata->num_handles = num_handles;
1789 dbgdata->data_rem = dc.remaining;
1790 dbgdata->wasted_space = dc.wasted;
1791 dbgdata->buffered_data = dc.buffered;
1792 dbgdata->useful_data = dc.useful;
1793 dbgdata->watermark = BUF_WATERMARK;