Fix typo in comment
[maemo-rb.git] / apps / buffering.c
blob54c6c05baaa557b71fd2025d49b91d4f4621cf79
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2007 Nicolas Pennequin
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
22 #include "config.h"
23 #include <stdio.h>
24 #include <string.h>
25 #include <stdlib.h>
26 #include <ctype.h>
27 #include <inttypes.h>
28 #include "buffering.h"
30 #include "storage.h"
31 #include "system.h"
32 #include "thread.h"
33 #include "file.h"
34 #include "panic.h"
35 #include "lcd.h"
36 #include "font.h"
37 #include "button.h"
38 #include "kernel.h"
39 #include "tree.h"
40 #include "debug.h"
41 #include "settings.h"
42 #include "codecs.h"
43 #include "audio.h"
44 #include "mp3_playback.h"
45 #include "usb.h"
46 #include "screens.h"
47 #include "playlist.h"
48 #include "pcmbuf.h"
49 #include "appevents.h"
50 #include "metadata.h"
51 #ifdef HAVE_ALBUMART
52 #include "albumart.h"
53 #include "jpeg_load.h"
54 #include "bmp.h"
55 #include "playback.h"
56 #endif
58 #define GUARD_BUFSIZE (32*1024)
60 /* Define LOGF_ENABLE to enable logf output in this file */
61 /*#define LOGF_ENABLE*/
62 #include "logf.h"
64 /* macros to enable logf for queues
65 logging on SYS_TIMEOUT can be disabled */
66 #ifdef SIMULATOR
67 /* Define this for logf output of all queuing except SYS_TIMEOUT */
68 #define BUFFERING_LOGQUEUES
69 /* Define this to logf SYS_TIMEOUT messages */
70 /* #define BUFFERING_LOGQUEUES_SYS_TIMEOUT */
71 #endif
73 #ifdef BUFFERING_LOGQUEUES
74 #define LOGFQUEUE logf
75 #else
76 #define LOGFQUEUE(...)
77 #endif
79 #ifdef BUFFERING_LOGQUEUES_SYS_TIMEOUT
80 #define LOGFQUEUE_SYS_TIMEOUT logf
81 #else
82 #define LOGFQUEUE_SYS_TIMEOUT(...)
83 #endif
85 /* default point to start buffer refill */
86 #define BUFFERING_DEFAULT_WATERMARK (1024*128)
87 /* amount of data to read in one read() call */
88 #define BUFFERING_DEFAULT_FILECHUNK (1024*32)
90 #define BUF_HANDLE_MASK 0x7FFFFFFF
93 /* assert(sizeof(struct memory_handle)%4==0) */
94 struct memory_handle {
95 int id; /* A unique ID for the handle */
96 enum data_type type; /* Type of data buffered with this handle */
97 char path[MAX_PATH]; /* Path if data originated in a file */
98 int fd; /* File descriptor to path (-1 if closed) */
99 size_t data; /* Start index of the handle's data buffer */
100 volatile size_t ridx; /* Read pointer, relative to the main buffer */
101 size_t widx; /* Write pointer, relative to the main buffer */
102 size_t filesize; /* File total length */
103 size_t filerem; /* Remaining bytes of file NOT in buffer */
104 volatile size_t available; /* Available bytes to read from buffer */
105 size_t offset; /* Offset at which we started reading the file */
106 struct memory_handle *next;
108 /* invariant: filesize == offset + available + filerem */
111 struct buf_message_data
113 int handle_id;
114 intptr_t data;
117 static char *buffer;
118 static char *guard_buffer;
120 static size_t buffer_len;
122 static volatile size_t buf_widx; /* current writing position */
123 static volatile size_t buf_ridx; /* current reading position */
124 /* buf_*idx are values relative to the buffer, not real pointers. */
126 /* Configuration */
127 static size_t conf_watermark = 0; /* Level to trigger filebuf fill */
128 #if MEMORYSIZE > 8
129 static size_t high_watermark = 0; /* High watermark for rebuffer */
130 #endif
132 /* current memory handle in the linked list. NULL when the list is empty. */
133 static struct memory_handle *cur_handle;
134 /* first memory handle in the linked list. NULL when the list is empty. */
135 static struct memory_handle *first_handle;
137 static int num_handles; /* number of handles in the list */
139 static int base_handle_id;
141 /* Main lock for adding / removing handles */
142 static struct mutex llist_mutex SHAREDBSS_ATTR;
144 /* Handle cache (makes find_handle faster).
145 This is global so that move_handle and rm_handle can invalidate it. */
146 static struct memory_handle *cached_handle = NULL;
148 static struct data_counters
150 size_t remaining; /* Amount of data needing to be buffered */
151 size_t wasted; /* Amount of space available for freeing */
152 size_t buffered; /* Amount of data currently in the buffer */
153 size_t useful; /* Amount of data still useful to the user */
154 } data_counters;
157 /* Messages available to communicate with the buffering thread */
158 enum
160 Q_BUFFER_HANDLE = 1, /* Request buffering of a handle, this should not be
161 used in a low buffer situation. */
162 Q_REBUFFER_HANDLE, /* Request reset and rebuffering of a handle at a new
163 file starting position. */
164 Q_CLOSE_HANDLE, /* Request closing a handle */
165 Q_BASE_HANDLE, /* Set the reference handle for buf_useful_data */
167 /* Configuration: */
168 Q_START_FILL, /* Request that the buffering thread initiate a buffer
169 fill at its earliest convenience */
170 Q_HANDLE_ADDED, /* Inform the buffering thread that a handle was added,
171 (which means the disk is spinning) */
174 /* Buffering thread */
175 static void buffering_thread(void);
176 static long buffering_stack[(DEFAULT_STACK_SIZE + 0x2000)/sizeof(long)];
177 static const char buffering_thread_name[] = "buffering";
178 static unsigned int buffering_thread_id = 0;
179 static struct event_queue buffering_queue SHAREDBSS_ATTR;
180 static struct queue_sender_list buffering_queue_sender_list SHAREDBSS_ATTR;
184 /* Ring buffer helper functions */
186 static inline uintptr_t ringbuf_offset(const void *ptr)
188 return (uintptr_t)(ptr - (void*)buffer);
191 /* Buffer pointer (p) plus value (v), wrapped if necessary */
192 static inline uintptr_t ringbuf_add(uintptr_t p, size_t v)
194 uintptr_t res = p + v;
195 if (res >= buffer_len)
196 res -= buffer_len; /* wrap if necssary */
197 return res;
201 /* Buffer pointer (p) minus value (v), wrapped if necessary */
202 static inline uintptr_t ringbuf_sub(uintptr_t p, size_t v)
204 uintptr_t res = p;
205 if (p < v)
206 res += buffer_len; /* wrap */
208 return res - v;
212 /* How far value (v) plus buffer pointer (p1) will cross buffer pointer (p2) */
213 static inline ssize_t ringbuf_add_cross(uintptr_t p1, size_t v, uintptr_t p2)
215 ssize_t res = p1 + v - p2;
216 if (p1 >= p2) /* wrap if necessary */
217 res -= buffer_len;
219 return res;
222 /* Bytes available in the buffer */
223 #define BUF_USED ringbuf_sub(buf_widx, buf_ridx)
226 LINKED LIST MANAGEMENT
227 ======================
229 add_handle : Add a handle to the list
230 rm_handle : Remove a handle from the list
231 find_handle : Get a handle pointer from an ID
232 move_handle : Move a handle in the buffer (with or without its data)
234 These functions only handle the linked list structure. They don't touch the
235 contents of the struct memory_handle headers. They also change the buf_*idx
236 pointers when necessary and manage the handle IDs.
238 The first and current (== last) handle are kept track of.
239 A new handle is added at buf_widx and becomes the current one.
240 buf_widx always points to the current writing position for the current handle
241 buf_ridx always points to the location of the first handle.
242 buf_ridx == buf_widx means the buffer is empty.
246 /* Add a new handle to the linked list and return it. It will have become the
247 new current handle.
248 data_size must contain the size of what will be in the handle.
249 can_wrap tells us whether this type of data may wrap on buffer
250 alloc_all tells us if we must immediately be able to allocate data_size
251 returns a valid memory handle if all conditions for allocation are met.
252 NULL if there memory_handle itself cannot be allocated or if the
253 data_size cannot be allocated and alloc_all is set. */
254 static struct memory_handle *add_handle(size_t data_size, bool can_wrap,
255 bool alloc_all)
257 /* gives each handle a unique id */
258 static int cur_handle_id = 0;
259 size_t shift;
260 size_t widx, new_widx;
261 size_t len;
262 ssize_t overlap;
264 if (num_handles >= BUF_MAX_HANDLES)
265 return NULL;
267 widx = buf_widx;
269 if (cur_handle && cur_handle->filerem > 0) {
270 /* the current handle hasn't finished buffering. We can only add
271 a new one if there is already enough free space to finish
272 the buffering. */
273 size_t req = cur_handle->filerem;
274 if (ringbuf_add_cross(cur_handle->widx, req, buf_ridx) >= 0) {
275 /* Not enough space to finish allocation */
276 return NULL;
277 } else {
278 /* Allocate the remainder of the space for the current handle */
279 widx = ringbuf_add(cur_handle->widx, cur_handle->filerem);
283 /* align to 4 bytes up always leaving a gap */
284 new_widx = ringbuf_add(widx, 4) & ~3;
286 len = data_size + sizeof(struct memory_handle);
288 /* First, will the handle wrap? */
289 /* If the handle would wrap, move to the beginning of the buffer,
290 * or if the data must not but would wrap, move it to the beginning */
291 if (new_widx + sizeof(struct memory_handle) > buffer_len ||
292 (!can_wrap && new_widx + len > buffer_len)) {
293 new_widx = 0;
296 /* How far we shifted the new_widx to align things, must be < buffer_len */
297 shift = ringbuf_sub(new_widx, widx);
299 /* How much space are we short in the actual ring buffer? */
300 overlap = ringbuf_add_cross(widx, shift + len, buf_ridx);
301 if (overlap >= 0 && (alloc_all || (size_t)overlap >= data_size)) {
302 /* Not enough space for required allocations */
303 return NULL;
306 /* There is enough space for the required data, advance the buf_widx and
307 * initialize the struct */
308 buf_widx = new_widx;
310 struct memory_handle *new_handle =
311 (struct memory_handle *)(&buffer[buf_widx]);
313 /* Prevent buffering thread from looking at it */
314 new_handle->filerem = 0;
316 /* only advance the buffer write index of the size of the struct */
317 buf_widx = ringbuf_add(buf_widx, sizeof(struct memory_handle));
319 new_handle->id = cur_handle_id;
320 /* Wrap signed int is safe and 0 doesn't happen */
321 cur_handle_id = (cur_handle_id + 1) & BUF_HANDLE_MASK;
322 new_handle->next = NULL;
323 num_handles++;
325 if (!first_handle)
326 /* the new handle is the first one */
327 first_handle = new_handle;
329 if (cur_handle)
330 cur_handle->next = new_handle;
332 cur_handle = new_handle;
334 return new_handle;
337 /* Delete a given memory handle from the linked list
338 and return true for success. Nothing is actually erased from memory. */
339 static bool rm_handle(const struct memory_handle *h)
341 if (h == NULL)
342 return true;
344 if (h == first_handle) {
345 first_handle = h->next;
346 if (h == cur_handle) {
347 /* h was the first and last handle: the buffer is now empty */
348 cur_handle = NULL;
349 buf_ridx = buf_widx = 0;
350 } else {
351 /* update buf_ridx to point to the new first handle */
352 buf_ridx = (size_t)ringbuf_offset(first_handle);
354 } else {
355 struct memory_handle *m = first_handle;
356 /* Find the previous handle */
357 while (m && m->next != h) {
358 m = m->next;
360 if (m && m->next == h) {
361 m->next = h->next;
362 if (h == cur_handle) {
363 cur_handle = m;
364 buf_widx = cur_handle->widx;
366 } else {
367 return false;
371 /* Invalidate the cache to prevent it from keeping the old location of h */
372 if (h == cached_handle)
373 cached_handle = NULL;
375 num_handles--;
376 return true;
379 /* Return a pointer to the memory handle of given ID.
380 NULL if the handle wasn't found */
381 static struct memory_handle *find_handle(int handle_id)
383 if (handle_id < 0)
384 return NULL;
386 /* simple caching because most of the time the requested handle
387 will either be the same as the last, or the one after the last */
388 if (cached_handle)
390 if (cached_handle->id == handle_id) {
391 return cached_handle;
392 } else if (cached_handle->next &&
393 (cached_handle->next->id == handle_id)) {
394 cached_handle = cached_handle->next;
395 return cached_handle;
399 struct memory_handle *m = first_handle;
400 while (m && m->id != handle_id) {
401 m = m->next;
403 /* This condition can only be reached with !m or m->id == handle_id */
404 if (m)
405 cached_handle = m;
407 return m;
410 /* Move a memory handle and data_size of its data delta bytes along the buffer.
411 delta maximum bytes available to move the handle. If the move is performed
412 it is set to the actual distance moved.
413 data_size is the amount of data to move along with the struct.
414 returns true if the move is successful and false if the handle is NULL,
415 the move would be less than the size of a memory_handle after
416 correcting for wraps or if the handle is not found in the linked
417 list for adjustment. This function has no side effects if false
418 is returned. */
419 static bool move_handle(struct memory_handle **h, size_t *delta,
420 size_t data_size, bool can_wrap)
422 struct memory_handle *dest;
423 const struct memory_handle *src;
424 size_t final_delta = *delta, size_to_move;
425 uintptr_t oldpos, newpos;
426 intptr_t overlap, overlap_old;
428 if (h == NULL || (src = *h) == NULL)
429 return false;
431 size_to_move = sizeof(struct memory_handle) + data_size;
433 /* Align to four bytes, down */
434 final_delta &= ~3;
435 if (final_delta < sizeof(struct memory_handle)) {
436 /* It's not legal to move less than the size of the struct */
437 return false;
440 oldpos = ringbuf_offset(src);
441 newpos = ringbuf_add(oldpos, final_delta);
442 overlap = ringbuf_add_cross(newpos, size_to_move, buffer_len);
443 overlap_old = ringbuf_add_cross(oldpos, size_to_move, buffer_len);
445 if (overlap > 0) {
446 /* Some part of the struct + data would wrap, maybe ok */
447 ssize_t correction = 0;
448 /* If the overlap lands inside the memory_handle */
449 if (!can_wrap) {
450 /* Otherwise the overlap falls in the data area and must all be
451 * backed out. This may become conditional if ever we move
452 * data that is allowed to wrap (ie audio) */
453 correction = overlap;
454 } else if ((uintptr_t)overlap > data_size) {
455 /* Correct the position and real delta to prevent the struct from
456 * wrapping, this guarantees an aligned delta if the struct size is
457 * aligned and the buffer is aligned */
458 correction = overlap - data_size;
460 if (correction) {
461 /* Align correction to four bytes up */
462 correction = (correction + 3) & ~3;
463 if (final_delta < correction + sizeof(struct memory_handle)) {
464 /* Delta cannot end up less than the size of the struct */
465 return false;
467 newpos -= correction;
468 overlap -= correction;/* Used below to know how to split the data */
469 final_delta -= correction;
473 dest = (struct memory_handle *)(&buffer[newpos]);
475 if (src == first_handle) {
476 first_handle = dest;
477 buf_ridx = newpos;
478 } else {
479 struct memory_handle *m = first_handle;
480 while (m && m->next != src) {
481 m = m->next;
483 if (m && m->next == src) {
484 m->next = dest;
485 } else {
486 return false;
490 /* Update the cache to prevent it from keeping the old location of h */
491 if (src == cached_handle)
492 cached_handle = dest;
494 /* the cur_handle pointer might need updating */
495 if (src == cur_handle)
496 cur_handle = dest;
498 /* x = handle(s) following this one...
499 * ...if last handle, unmoveable if metadata, only shrinkable if audio.
500 * In other words, no legal move can be made that would have the src head
501 * and dest tail of the data overlap itself. These facts reduce the
502 * problem to four essential permutations.
504 * movement: always "clockwise" >>>>
506 * (src nowrap, dest nowrap)
507 * |0123 x |
508 * | 0123x | etc...
509 * move: "0123"
511 * (src nowrap, dest wrap)
512 * | x0123 |
513 * |23x 01|
514 * move: "23", "01"
516 * (src wrap, dest nowrap)
517 * |23 x01|
518 * | 0123x |
519 * move: "23", "01"
521 * (src wrap, dest wrap)
522 * |23 x 01|
523 * |123x 0|
524 * move: "23", "1", "0"
526 if (overlap_old > 0) {
527 /* Move over already wrapped data by the final delta */
528 memmove(&buffer[final_delta], buffer, overlap_old);
529 if (overlap <= 0)
530 size_to_move -= overlap_old;
533 if (overlap > 0) {
534 /* Move data that now wraps to the beginning */
535 size_to_move -= overlap;
536 memmove(buffer, SKIPBYTES(src, size_to_move),
537 overlap_old > 0 ? final_delta : (size_t)overlap);
540 /* Move leading fragment containing handle struct */
541 memmove(dest, src, size_to_move);
543 /* Update the caller with the new location of h and the distance moved */
544 *h = dest;
545 *delta = final_delta;
546 return true;
551 BUFFER SPACE MANAGEMENT
552 =======================
554 update_data_counters: Updates the values in data_counters
555 buffer_is_low : Returns true if the amount of useful data in the buffer is low
556 buffer_handle : Buffer data for a handle
557 rebuffer_handle : Seek to a nonbuffered part of a handle by rebuffering the data
558 shrink_handle : Free buffer space by moving a handle
559 fill_buffer : Call buffer_handle for all handles that have data to buffer
561 These functions are used by the buffering thread to manage buffer space.
564 static void update_data_counters(struct data_counters *dc)
566 size_t buffered = 0;
567 size_t wasted = 0;
568 size_t remaining = 0;
569 size_t useful = 0;
571 struct memory_handle *m;
572 bool is_useful;
574 if (dc == NULL)
575 dc = &data_counters;
577 mutex_lock(&llist_mutex);
579 m = find_handle(base_handle_id);
580 is_useful = m == NULL;
582 m = first_handle;
583 while (m) {
584 buffered += m->available;
585 wasted += ringbuf_sub(m->ridx, m->data);
586 remaining += m->filerem;
588 if (m->id == base_handle_id)
589 is_useful = true;
591 if (is_useful)
592 useful += ringbuf_sub(m->widx, m->ridx);
594 m = m->next;
597 mutex_unlock(&llist_mutex);
599 dc->buffered = buffered;
600 dc->wasted = wasted;
601 dc->remaining = remaining;
602 dc->useful = useful;
605 static inline bool buffer_is_low(void)
607 update_data_counters(NULL);
608 return data_counters.useful < (conf_watermark / 2);
611 /* Q_BUFFER_HANDLE event and buffer data for the given handle.
612 Return whether or not the buffering should continue explicitly. */
613 static bool buffer_handle(int handle_id, size_t to_buffer)
615 logf("buffer_handle(%d)", handle_id);
616 struct memory_handle *h = find_handle(handle_id);
617 bool stop = false;
619 if (!h)
620 return true;
622 if (h->filerem == 0) {
623 /* nothing left to buffer */
624 return true;
627 if (h->fd < 0) { /* file closed, reopen */
628 if (*h->path)
629 h->fd = open(h->path, O_RDONLY);
631 if (h->fd < 0)
633 /* could not open the file, truncate it where it is */
634 h->filesize -= h->filerem;
635 h->filerem = 0;
636 return true;
639 if (h->offset)
640 lseek(h->fd, h->offset, SEEK_SET);
643 trigger_cpu_boost();
645 if (h->type == TYPE_ID3) {
646 if (!get_metadata((struct mp3entry *)(buffer + h->data),
647 h->fd, h->path)) {
648 /* metadata parsing failed: clear the buffer. */
649 memset(buffer + h->data, 0, sizeof(struct mp3entry));
651 close(h->fd);
652 h->fd = -1;
653 h->filerem = 0;
654 h->available = sizeof(struct mp3entry);
655 h->widx += sizeof(struct mp3entry);
656 send_event(BUFFER_EVENT_FINISHED, &handle_id);
657 return true;
660 while (h->filerem > 0 && !stop)
662 /* max amount to copy */
663 ssize_t copy_n = MIN( MIN(h->filerem, BUFFERING_DEFAULT_FILECHUNK),
664 buffer_len - h->widx);
665 uintptr_t offset = h->next ? ringbuf_offset(h->next) : buf_ridx;
666 ssize_t overlap = ringbuf_add_cross(h->widx, copy_n, offset) + 1;
668 if (overlap > 0) {
669 /* read only up to available space and stop if it would overwrite
670 or be on top of the reading position or the next handle */
671 stop = true;
672 copy_n -= overlap;
675 if (copy_n <= 0)
676 return false; /* no space for read */
678 /* rc is the actual amount read */
679 int rc = read(h->fd, &buffer[h->widx], copy_n);
681 if (rc < 0) {
682 /* Some kind of filesystem error, maybe recoverable if not codec */
683 if (h->type == TYPE_CODEC) {
684 logf("Partial codec");
685 break;
688 DEBUGF("File ended %ld bytes early\n", (long)h->filerem);
689 h->filesize -= h->filerem;
690 h->filerem = 0;
691 break;
694 /* Advance buffer */
695 h->widx = ringbuf_add(h->widx, rc);
696 if (h == cur_handle)
697 buf_widx = h->widx;
698 h->available += rc;
699 h->filerem -= rc;
701 /* If this is a large file, see if we need to break or give the codec
702 * more time */
703 if (h->type == TYPE_PACKET_AUDIO &&
704 pcmbuf_is_lowdata() && !buffer_is_low()) {
705 sleep(1);
706 } else {
707 yield();
710 if (to_buffer == 0) {
711 /* Normal buffering - check queue */
712 if(!queue_empty(&buffering_queue))
713 break;
714 } else {
715 if (to_buffer <= (size_t)rc)
716 break; /* Done */
717 to_buffer -= rc;
721 if (h->filerem == 0) {
722 /* finished buffering the file */
723 close(h->fd);
724 h->fd = -1;
725 send_event(BUFFER_EVENT_FINISHED, &handle_id);
728 return !stop;
731 /* Close the specified handle id and free its allocation. */
732 static bool close_handle(int handle_id)
734 bool retval = true;
735 struct memory_handle *h;
737 mutex_lock(&llist_mutex);
738 h = find_handle(handle_id);
740 /* If the handle is not found, it is closed */
741 if (h) {
742 if (h->fd >= 0) {
743 close(h->fd);
744 h->fd = -1;
747 /* rm_handle returns true unless the handle somehow persists after
748 exit */
749 retval = rm_handle(h);
752 mutex_unlock(&llist_mutex);
753 return retval;
756 /* Free buffer space by moving the handle struct right before the useful
757 part of its data buffer or by moving all the data. */
758 static void shrink_handle(struct memory_handle *h)
760 size_t delta;
762 if (!h)
763 return;
765 if (h->type == TYPE_ID3 || h->type == TYPE_CUESHEET ||
766 h->type == TYPE_BITMAP || h->type == TYPE_CODEC ||
767 h->type == TYPE_ATOMIC_AUDIO)
769 /* metadata handle: we can move all of it */
770 if (!h->next || h->filerem != 0)
771 return; /* Last handle or not finished loading */
773 uintptr_t handle_distance =
774 ringbuf_sub(ringbuf_offset(h->next), h->data);
775 delta = handle_distance - h->available;
777 /* The value of delta might change for alignment reasons */
778 if (!move_handle(&h, &delta, h->available, h->type==TYPE_CODEC))
779 return;
781 size_t olddata = h->data;
782 h->data = ringbuf_add(h->data, delta);
783 h->ridx = ringbuf_add(h->ridx, delta);
784 h->widx = ringbuf_add(h->widx, delta);
786 if (h->type == TYPE_ID3 && h->filesize == sizeof(struct mp3entry)) {
787 /* when moving an mp3entry we need to readjust its pointers. */
788 adjust_mp3entry((struct mp3entry *)&buffer[h->data],
789 (void *)&buffer[h->data],
790 (const void *)&buffer[olddata]);
791 } else if (h->type == TYPE_BITMAP) {
792 /* adjust the bitmap's pointer */
793 struct bitmap *bmp = (struct bitmap *)&buffer[h->data];
794 bmp->data = &buffer[h->data + sizeof(struct bitmap)];
796 } else {
797 /* only move the handle struct */
798 delta = ringbuf_sub(h->ridx, h->data);
799 if (!move_handle(&h, &delta, 0, true))
800 return;
802 h->data = ringbuf_add(h->data, delta);
803 h->available -= delta;
804 h->offset += delta;
808 /* Fill the buffer by buffering as much data as possible for handles that still
809 have data left to buffer
810 Return whether or not to continue filling after this */
811 static bool fill_buffer(void)
813 logf("fill_buffer()");
814 struct memory_handle *m = first_handle;
816 shrink_handle(m);
818 while (queue_empty(&buffering_queue) && m) {
819 if (m->filerem > 0) {
820 if (!buffer_handle(m->id, 0)) {
821 m = NULL;
822 break;
825 m = m->next;
828 if (m) {
829 return true;
830 } else {
831 /* only spin the disk down if the filling wasn't interrupted by an
832 event arriving in the queue. */
833 storage_sleep();
834 return false;
838 #ifdef HAVE_ALBUMART
839 /* Given a file descriptor to a bitmap file, write the bitmap data to the
840 buffer, with a struct bitmap and the actual data immediately following.
841 Return value is the total size (struct + data). */
842 static int load_image(int fd, const char *path,
843 struct bufopen_bitmap_data *data)
845 int rc;
846 struct bitmap *bmp = (struct bitmap *)&buffer[buf_widx];
847 struct dim *dim = data->dim;
848 struct mp3_albumart *aa = data->embedded_albumart;
850 /* get the desired image size */
851 bmp->width = dim->width, bmp->height = dim->height;
852 /* FIXME: alignment may be needed for the data buffer. */
853 bmp->data = &buffer[buf_widx + sizeof(struct bitmap)];
854 #ifndef HAVE_JPEG
855 (void) path;
856 #endif
857 #if (LCD_DEPTH > 1) || defined(HAVE_REMOTE_LCD) && (LCD_REMOTE_DEPTH > 1)
858 bmp->maskdata = NULL;
859 #endif
861 int free = (int)MIN(buffer_len - BUF_USED, buffer_len - buf_widx)
862 - sizeof(struct bitmap);
864 #ifdef HAVE_JPEG
865 if (aa != NULL) {
866 lseek(fd, aa->pos, SEEK_SET);
867 rc = clip_jpeg_fd(fd, aa->size, bmp, free, FORMAT_NATIVE|FORMAT_DITHER|
868 FORMAT_RESIZE|FORMAT_KEEP_ASPECT, NULL);
870 else if (strcmp(path + strlen(path) - 4, ".bmp"))
871 rc = read_jpeg_fd(fd, bmp, free, FORMAT_NATIVE|FORMAT_DITHER|
872 FORMAT_RESIZE|FORMAT_KEEP_ASPECT, NULL);
873 else
874 #endif
875 rc = read_bmp_fd(fd, bmp, free, FORMAT_NATIVE|FORMAT_DITHER|
876 FORMAT_RESIZE|FORMAT_KEEP_ASPECT, NULL);
877 return rc + (rc > 0 ? sizeof(struct bitmap) : 0);
879 #endif
883 MAIN BUFFERING API CALLS
884 ========================
886 bufopen : Request the opening of a new handle for a file
887 bufalloc : Open a new handle for data other than a file.
888 bufclose : Close an open handle
889 bufseek : Set the read pointer in a handle
890 bufadvance : Move the read pointer in a handle
891 bufread : Copy data from a handle into a given buffer
892 bufgetdata : Give a pointer to the handle's data
894 These functions are exported, to allow interaction with the buffer.
895 They take care of the content of the structs, and rely on the linked list
896 management functions for all the actual handle management work.
900 /* Reserve space in the buffer for a file.
901 filename: name of the file to open
902 offset: offset at which to start buffering the file, useful when the first
903 offset bytes of the file aren't needed.
904 type: one of the data types supported (audio, image, cuesheet, others
905 user_data: user data passed possibly passed in subcalls specific to a
906 data_type (only used for image (albumart) buffering so far )
907 return value: <0 if the file cannot be opened, or one file already
908 queued to be opened, otherwise the handle for the file in the buffer
910 int bufopen(const char *file, size_t offset, enum data_type type,
911 void *user_data)
913 #ifndef HAVE_ALBUMART
914 /* currently only used for aa loading */
915 (void)user_data;
916 #endif
917 int handle_id = ERR_BUFFER_FULL;
919 /* No buffer refs until after the mutex_lock call! */
921 if (type == TYPE_ID3) {
922 /* ID3 case: allocate space, init the handle and return. */
923 mutex_lock(&llist_mutex);
925 struct memory_handle *h =
926 add_handle(sizeof(struct mp3entry), false, true);
928 if (h) {
929 handle_id = h->id;
930 h->fd = -1;
931 h->filesize = sizeof(struct mp3entry);
932 h->offset = 0;
933 h->data = buf_widx;
934 h->ridx = buf_widx;
935 h->widx = buf_widx;
936 h->available = 0;
937 h->type = type;
938 strlcpy(h->path, file, MAX_PATH);
940 buf_widx += sizeof(struct mp3entry); /* safe because the handle
941 can't wrap */
942 h->filerem = sizeof(struct mp3entry);
944 /* Inform the buffering thread that we added a handle */
945 LOGFQUEUE("buffering > Q_HANDLE_ADDED %d", handle_id);
946 queue_post(&buffering_queue, Q_HANDLE_ADDED, handle_id);
949 mutex_unlock(&llist_mutex);
950 return handle_id;
952 #ifdef APPLICATION
953 /* loading code from memory is not supported in application builds */
954 else if (type == TYPE_CODEC)
955 return ERR_UNSUPPORTED_TYPE;
956 #endif
957 /* Other cases: there is a little more work. */
958 int fd = open(file, O_RDONLY);
959 if (fd < 0)
960 return ERR_FILE_ERROR;
962 size_t size = 0;
963 #ifdef HAVE_ALBUMART
964 if (type == TYPE_BITMAP) {
965 /* if albumart is embedded, the complete file is not buffered,
966 * but only the jpeg part; filesize() would be wrong */
967 struct bufopen_bitmap_data *aa = (struct bufopen_bitmap_data*)user_data;
968 if (aa->embedded_albumart)
969 size = aa->embedded_albumart->size;
971 #endif
972 if (size == 0)
973 size = filesize(fd);
974 bool can_wrap = type==TYPE_PACKET_AUDIO || type==TYPE_CODEC;
976 size_t adjusted_offset = offset;
977 if (adjusted_offset > size)
978 adjusted_offset = 0;
980 /* Reserve extra space because alignment can move data forward */
981 size_t padded_size = STORAGE_PAD(size-adjusted_offset);
983 mutex_lock(&llist_mutex);
985 struct memory_handle *h = add_handle(padded_size, can_wrap, false);
986 if (!h) {
987 DEBUGF("%s(): failed to add handle\n", __func__);
988 mutex_unlock(&llist_mutex);
989 close(fd);
990 return ERR_BUFFER_FULL;
993 handle_id = h->id;
994 strlcpy(h->path, file, MAX_PATH);
995 h->offset = adjusted_offset;
997 #ifdef STORAGE_WANTS_ALIGN
998 /* Don't bother to storage align bitmaps because they are not
999 * loaded directly into the buffer.
1001 if (type != TYPE_BITMAP) {
1002 /* Align to desired storage alignment */
1003 size_t alignment_pad = STORAGE_OVERLAP(adjusted_offset -
1004 (size_t)(&buffer[buf_widx]));
1005 buf_widx = ringbuf_add(buf_widx, alignment_pad);
1007 #endif /* STORAGE_WANTS_ALIGN */
1009 h->fd = -1;
1010 h->data = buf_widx;
1011 h->ridx = buf_widx;
1012 h->widx = buf_widx;
1013 h->available = 0;
1014 h->type = type;
1016 #ifdef HAVE_ALBUMART
1017 if (type == TYPE_BITMAP) {
1018 /* Bitmap file: we load the data instead of the file */
1019 int rc;
1020 rc = load_image(fd, file, (struct bufopen_bitmap_data*)user_data);
1021 if (rc <= 0) {
1022 rm_handle(h);
1023 handle_id = ERR_FILE_ERROR;
1024 } else {
1025 h->filesize = rc;
1026 h->available = rc;
1027 h->widx = buf_widx + rc; /* safe because the data doesn't wrap */
1028 buf_widx += rc; /* safe too */
1031 else
1032 #endif
1034 if (type == TYPE_CUESHEET)
1035 h->fd = fd;
1037 h->filesize = size;
1038 h->available = 0;
1039 h->widx = buf_widx;
1040 h->filerem = size - adjusted_offset;
1043 mutex_unlock(&llist_mutex);
1045 if (type == TYPE_CUESHEET) {
1046 /* Immediately start buffering those */
1047 LOGFQUEUE("buffering >| Q_BUFFER_HANDLE %d", handle_id);
1048 queue_send(&buffering_queue, Q_BUFFER_HANDLE, handle_id);
1049 } else {
1050 /* Other types will get buffered in the course of normal operations */
1051 close(fd);
1053 if (handle_id >= 0) {
1054 /* Inform the buffering thread that we added a handle */
1055 LOGFQUEUE("buffering > Q_HANDLE_ADDED %d", handle_id);
1056 queue_post(&buffering_queue, Q_HANDLE_ADDED, handle_id);
1060 logf("bufopen: new hdl %d", handle_id);
1061 return handle_id;
1064 /* Open a new handle from data that needs to be copied from memory.
1065 src is the source buffer from which to copy data. It can be NULL to simply
1066 reserve buffer space.
1067 size is the requested size. The call will only be successful if the
1068 requested amount of data can entirely fit in the buffer without wrapping.
1069 Return value is the handle id for success or <0 for failure.
1071 int bufalloc(const void *src, size_t size, enum data_type type)
1073 int handle_id = ERR_BUFFER_FULL;
1075 mutex_lock(&llist_mutex);
1077 struct memory_handle *h = add_handle(size, false, true);
1079 if (h) {
1080 handle_id = h->id;
1082 if (src) {
1083 if (type == TYPE_ID3 && size == sizeof(struct mp3entry)) {
1084 /* specially take care of struct mp3entry */
1085 copy_mp3entry((struct mp3entry *)&buffer[buf_widx],
1086 (const struct mp3entry *)src);
1087 } else {
1088 memcpy(&buffer[buf_widx], src, size);
1092 h->fd = -1;
1093 *h->path = 0;
1094 h->filesize = size;
1095 h->offset = 0;
1096 h->ridx = buf_widx;
1097 h->widx = buf_widx + size; /* safe because the data doesn't wrap */
1098 h->data = buf_widx;
1099 h->available = size;
1100 h->type = type;
1102 buf_widx += size; /* safe too */
1105 mutex_unlock(&llist_mutex);
1107 logf("bufalloc: new hdl %d", handle_id);
1108 return handle_id;
1111 /* Close the handle. Return true for success and false for failure */
1112 bool bufclose(int handle_id)
1114 logf("bufclose(%d)", handle_id);
1116 LOGFQUEUE("buffering >| Q_CLOSE_HANDLE %d", handle_id);
1117 return queue_send(&buffering_queue, Q_CLOSE_HANDLE, handle_id);
1120 /* Backend to bufseek and bufadvance. Call only in response to
1121 Q_REBUFFER_HANDLE! */
1122 static void rebuffer_handle(int handle_id, size_t newpos)
1124 struct memory_handle *h = find_handle(handle_id);
1126 if (!h) {
1127 queue_reply(&buffering_queue, ERR_HANDLE_NOT_FOUND);
1128 return;
1131 /* When seeking foward off of the buffer, if it is a short seek attempt to
1132 avoid rebuffering the whole track, just read enough to satisfy */
1133 if (newpos > h->offset &&
1134 newpos - h->offset < BUFFERING_DEFAULT_FILECHUNK) {
1136 size_t amount = newpos - h->offset;
1137 h->ridx = ringbuf_add(h->data, amount);
1139 if (buffer_handle(handle_id, amount + 1)) {
1140 queue_reply(&buffering_queue, 0);
1141 buffer_handle(handle_id, 0); /* Ok, try the rest */
1142 return;
1144 /* Data collision - must reset */
1147 /* Reset the handle to its new position */
1148 h->offset = newpos;
1150 size_t next = h->next ? ringbuf_offset(h->next) : buf_ridx;
1152 #ifdef STORAGE_WANTS_ALIGN
1153 /* Strip alignment padding then redo */
1154 size_t new_index = ringbuf_add(ringbuf_offset(h), sizeof (*h));
1156 /* Align to desired storage alignment if space permits - handle could
1157 have been shrunken too close to the following one after a previous
1158 rebuffer. */
1159 size_t alignment_pad =
1160 STORAGE_OVERLAP(h->offset - (size_t)(&buffer[new_index]));
1162 if (ringbuf_add_cross(new_index, alignment_pad, next) >= 0)
1163 alignment_pad = 0; /* Forego storage alignment this time */
1165 new_index = ringbuf_add(new_index, alignment_pad);
1166 #else
1167 /* Just clear the data buffer */
1168 size_t new_index = h->data;
1169 #endif /* STORAGE_WANTS_ALIGN */
1171 h->ridx = h->widx = h->data = new_index;
1173 if (h == cur_handle)
1174 buf_widx = new_index;
1176 h->available = 0;
1177 h->filerem = h->filesize - h->offset;
1179 if (h->fd >= 0)
1180 lseek(h->fd, h->offset, SEEK_SET);
1182 if (h->next && ringbuf_sub(next, h->data) <= h->filesize - newpos) {
1183 /* There isn't enough space to rebuffer all of the track from its new
1184 offset, so we ask the user to free some */
1185 DEBUGF("%s(): space is needed\n", __func__);
1186 int hid = handle_id;
1187 send_event(BUFFER_EVENT_REBUFFER, &hid);
1190 /* Now we do the rebuffer */
1191 queue_reply(&buffering_queue, 0);
1192 buffer_handle(handle_id, 0);
1195 /* Backend to bufseek and bufadvance */
1196 static int seek_handle(struct memory_handle *h, size_t newpos)
1198 if (newpos > h->filesize) {
1199 /* access beyond the end of the file */
1200 return ERR_INVALID_VALUE;
1202 else if ((newpos < h->offset || h->offset + h->available <= newpos) &&
1203 (newpos < h->filesize || h->filerem > 0)) {
1204 /* access before or after buffered data and not to end of file or file
1205 is not buffered to the end-- a rebuffer is needed. */
1206 struct buf_message_data parm = { h->id, newpos };
1207 return queue_send(&buffering_queue, Q_REBUFFER_HANDLE,
1208 (intptr_t)&parm);
1210 else {
1211 h->ridx = ringbuf_add(h->data, newpos - h->offset);
1214 return 0;
1217 /* Set reading index in handle (relatively to the start of the file).
1218 Access before the available data will trigger a rebuffer.
1219 Return 0 for success and < 0 for failure:
1220 -1 if the handle wasn't found
1221 -2 if the new requested position was beyond the end of the file
1223 int bufseek(int handle_id, size_t newpos)
1225 struct memory_handle *h = find_handle(handle_id);
1226 if (!h)
1227 return ERR_HANDLE_NOT_FOUND;
1229 return seek_handle(h, newpos);
1232 /* Advance the reading index in a handle (relatively to its current position).
1233 Return 0 for success and < 0 for failure */
1234 int bufadvance(int handle_id, off_t offset)
1236 struct memory_handle *h = find_handle(handle_id);
1237 if (!h)
1238 return ERR_HANDLE_NOT_FOUND;
1240 size_t newpos = h->offset + ringbuf_sub(h->ridx, h->data) + offset;
1241 return seek_handle(h, newpos);
1244 /* Used by bufread and bufgetdata to prepare the buffer and retrieve the
1245 * actual amount of data available for reading. This function explicitly
1246 * does not check the validity of the input handle. It does do range checks
1247 * on size and returns a valid (and explicit) amount of data for reading */
1248 static size_t handle_size_available(const struct memory_handle *h)
1250 /* Obtain proper distances from data start */
1251 size_t rd = ringbuf_sub(h->ridx, h->data);
1252 size_t wr = ringbuf_sub(h->widx, h->data);
1254 if (LIKELY(wr > rd))
1255 return wr - rd;
1257 return 0; /* ridx is ahead of or equal to widx at this time */
1260 static struct memory_handle *prep_bufdata(int handle_id, size_t *size,
1261 bool guardbuf_limit)
1263 struct memory_handle *h = find_handle(handle_id);
1264 size_t realsize;
1266 if (!h)
1267 return NULL;
1269 size_t avail = handle_size_available(h);
1271 if (avail == 0 && h->filerem == 0) {
1272 /* File is finished reading */
1273 *size = 0;
1274 return h;
1277 realsize = *size;
1279 if (realsize == 0 || realsize > avail + h->filerem)
1280 realsize = avail + h->filerem;
1282 if (guardbuf_limit && h->type == TYPE_PACKET_AUDIO
1283 && realsize > GUARD_BUFSIZE) {
1284 logf("data request > guardbuf");
1285 /* If more than the size of the guardbuf is requested and this is a
1286 * bufgetdata, limit to guard_bufsize over the end of the buffer */
1287 realsize = MIN(realsize, buffer_len - h->ridx + GUARD_BUFSIZE);
1288 /* this ensures *size <= buffer_len - h->ridx + GUARD_BUFSIZE */
1291 if (h->filerem > 0 && avail < realsize) {
1292 /* Data isn't ready. Request buffering */
1293 buf_request_buffer_handle(handle_id);
1294 /* Wait for the data to be ready */
1297 sleep(0);
1298 /* it is not safe for a non-buffering thread to sleep while
1299 * holding a handle */
1300 h = find_handle(handle_id);
1301 if (!h)
1302 return NULL;
1303 avail = handle_size_available(h);
1305 while (h->filerem > 0 && avail < realsize);
1308 *size = MIN(realsize, avail);
1309 return h;
1313 /* Note: It is safe for the thread responsible for handling the rebuffer
1314 * cleanup request to call bufread or bufgetdata only when the data will
1315 * be available-- not if it could be blocked waiting for it in prep_bufdata.
1316 * It should be apparent that if said thread is being forced to wait for
1317 * buffering but has not yet responded to the cleanup request, the space
1318 * can never be cleared to allow further reading of the file because it is
1319 * not listening to callbacks any longer. */
1321 /* Copy data from the given handle to the dest buffer.
1322 Return the number of bytes copied or < 0 for failure (handle not found).
1323 The caller is blocked until the requested amount of data is available.
1325 ssize_t bufread(int handle_id, size_t size, void *dest)
1327 const struct memory_handle *h;
1328 size_t adjusted_size = size;
1330 h = prep_bufdata(handle_id, &adjusted_size, false);
1331 if (!h)
1332 return ERR_HANDLE_NOT_FOUND;
1334 if (h->ridx + adjusted_size > buffer_len) {
1335 /* the data wraps around the end of the buffer */
1336 size_t read = buffer_len - h->ridx;
1337 memcpy(dest, &buffer[h->ridx], read);
1338 memcpy(dest+read, buffer, adjusted_size - read);
1339 } else {
1340 memcpy(dest, &buffer[h->ridx], adjusted_size);
1343 return adjusted_size;
1346 /* Update the "data" pointer to make the handle's data available to the caller.
1347 Return the length of the available linear data or < 0 for failure (handle
1348 not found).
1349 The caller is blocked until the requested amount of data is available.
1350 size is the amount of linear data requested. it can be 0 to get as
1351 much as possible.
1352 The guard buffer may be used to provide the requested size. This means it's
1353 unsafe to request more than the size of the guard buffer.
1355 ssize_t bufgetdata(int handle_id, size_t size, void **data)
1357 const struct memory_handle *h;
1358 size_t adjusted_size = size;
1360 h = prep_bufdata(handle_id, &adjusted_size, true);
1361 if (!h)
1362 return ERR_HANDLE_NOT_FOUND;
1364 if (h->ridx + adjusted_size > buffer_len) {
1365 /* the data wraps around the end of the buffer :
1366 use the guard buffer to provide the requested amount of data. */
1367 size_t copy_n = h->ridx + adjusted_size - buffer_len;
1368 /* prep_bufdata ensures
1369 adjusted_size <= buffer_len - h->ridx + GUARD_BUFSIZE,
1370 so copy_n <= GUARD_BUFSIZE */
1371 memcpy(guard_buffer, (const unsigned char *)buffer, copy_n);
1374 if (data)
1375 *data = &buffer[h->ridx];
1377 return adjusted_size;
1380 ssize_t bufgettail(int handle_id, size_t size, void **data)
1382 size_t tidx;
1384 const struct memory_handle *h;
1386 h = find_handle(handle_id);
1388 if (!h)
1389 return ERR_HANDLE_NOT_FOUND;
1391 if (h->filerem)
1392 return ERR_HANDLE_NOT_DONE;
1394 /* We don't support tail requests of > guardbuf_size, for simplicity */
1395 if (size > GUARD_BUFSIZE)
1396 return ERR_INVALID_VALUE;
1398 tidx = ringbuf_sub(h->widx, size);
1400 if (tidx + size > buffer_len) {
1401 size_t copy_n = tidx + size - buffer_len;
1402 memcpy(guard_buffer, (const unsigned char *)buffer, copy_n);
1405 *data = &buffer[tidx];
1406 return size;
1409 ssize_t bufcuttail(int handle_id, size_t size)
1411 struct memory_handle *h;
1412 size_t adjusted_size = size;
1414 h = find_handle(handle_id);
1416 if (!h)
1417 return ERR_HANDLE_NOT_FOUND;
1419 if (h->filerem)
1420 return ERR_HANDLE_NOT_DONE;
1422 if (h->available < adjusted_size)
1423 adjusted_size = h->available;
1425 h->available -= adjusted_size;
1426 h->filesize -= adjusted_size;
1427 h->widx = ringbuf_sub(h->widx, adjusted_size);
1428 if (h == cur_handle)
1429 buf_widx = h->widx;
1431 return adjusted_size;
1436 SECONDARY EXPORTED FUNCTIONS
1437 ============================
1439 buf_get_offset
1440 buf_handle_offset
1441 buf_request_buffer_handle
1442 buf_set_base_handle
1443 buf_used
1444 register_buffering_callback
1445 unregister_buffering_callback
1447 These functions are exported, to allow interaction with the buffer.
1448 They take care of the content of the structs, and rely on the linked list
1449 management functions for all the actual handle management work.
1452 /* Get a handle offset from a pointer */
1453 ssize_t buf_get_offset(int handle_id, void *ptr)
1455 const struct memory_handle *h = find_handle(handle_id);
1456 if (!h)
1457 return ERR_HANDLE_NOT_FOUND;
1459 return (size_t)ptr - (size_t)&buffer[h->ridx];
1462 ssize_t buf_handle_offset(int handle_id)
1464 const struct memory_handle *h = find_handle(handle_id);
1465 if (!h)
1466 return ERR_HANDLE_NOT_FOUND;
1467 return h->offset;
1470 void buf_request_buffer_handle(int handle_id)
1472 LOGFQUEUE("buffering >| Q_START_FILL %d",handle_id);
1473 queue_send(&buffering_queue, Q_START_FILL, handle_id);
1476 void buf_set_base_handle(int handle_id)
1478 LOGFQUEUE("buffering > Q_BASE_HANDLE %d", handle_id);
1479 queue_post(&buffering_queue, Q_BASE_HANDLE, handle_id);
1482 /* Return the amount of buffer space used */
1483 size_t buf_used(void)
1485 return BUF_USED;
1488 void buf_set_watermark(size_t bytes)
1490 conf_watermark = bytes;
1493 static void shrink_buffer_inner(struct memory_handle *h)
1495 if (h == NULL)
1496 return;
1498 shrink_buffer_inner(h->next);
1500 shrink_handle(h);
1503 static void shrink_buffer(void)
1505 logf("shrink_buffer()");
1506 shrink_buffer_inner(first_handle);
1509 void buffering_thread(void)
1511 bool filling = false;
1512 struct queue_event ev;
1513 struct buf_message_data *parm;
1515 while (true)
1517 if (!filling) {
1518 cancel_cpu_boost();
1521 queue_wait_w_tmo(&buffering_queue, &ev, filling ? 5 : HZ/2);
1523 switch (ev.id)
1525 case Q_START_FILL:
1526 LOGFQUEUE("buffering < Q_START_FILL %d", (int)ev.data);
1527 /* Call buffer callbacks here because this is one of two ways
1528 * to begin a full buffer fill */
1529 send_event(BUFFER_EVENT_BUFFER_LOW, 0);
1530 shrink_buffer();
1531 queue_reply(&buffering_queue, 1);
1532 filling |= buffer_handle((int)ev.data, 0);
1533 break;
1535 case Q_BUFFER_HANDLE:
1536 LOGFQUEUE("buffering < Q_BUFFER_HANDLE %d", (int)ev.data);
1537 queue_reply(&buffering_queue, 1);
1538 buffer_handle((int)ev.data, 0);
1539 break;
1541 case Q_REBUFFER_HANDLE:
1542 parm = (struct buf_message_data *)ev.data;
1543 LOGFQUEUE("buffering < Q_REBUFFER_HANDLE %d %ld",
1544 parm->handle_id, parm->data);
1545 rebuffer_handle(parm->handle_id, parm->data);
1546 break;
1548 case Q_CLOSE_HANDLE:
1549 LOGFQUEUE("buffering < Q_CLOSE_HANDLE %d", (int)ev.data);
1550 queue_reply(&buffering_queue, close_handle((int)ev.data));
1551 break;
1553 case Q_HANDLE_ADDED:
1554 LOGFQUEUE("buffering < Q_HANDLE_ADDED %d", (int)ev.data);
1555 /* A handle was added: the disk is spinning, so we can fill */
1556 filling = true;
1557 break;
1559 case Q_BASE_HANDLE:
1560 LOGFQUEUE("buffering < Q_BASE_HANDLE %d", (int)ev.data);
1561 base_handle_id = (int)ev.data;
1562 break;
1564 #if (CONFIG_PLATFORM & PLATFORM_NATIVE)
1565 case SYS_USB_CONNECTED:
1566 LOGFQUEUE("buffering < SYS_USB_CONNECTED");
1567 usb_acknowledge(SYS_USB_CONNECTED_ACK);
1568 usb_wait_for_disconnect(&buffering_queue);
1569 break;
1570 #endif
1572 case SYS_TIMEOUT:
1573 LOGFQUEUE_SYS_TIMEOUT("buffering < SYS_TIMEOUT");
1574 break;
1577 update_data_counters(NULL);
1579 /* If the buffer is low, call the callbacks to get new data */
1580 if (num_handles > 0 && data_counters.useful <= conf_watermark)
1581 send_event(BUFFER_EVENT_BUFFER_LOW, 0);
1583 #if 0
1584 /* TODO: This needs to be fixed to use the idle callback, disable it
1585 * for simplicity until its done right */
1586 #if MEMORYSIZE > 8
1587 /* If the disk is spinning, take advantage by filling the buffer */
1588 else if (storage_disk_is_active() && queue_empty(&buffering_queue)) {
1589 if (num_handles > 0 && data_counters.useful <= high_watermark)
1590 send_event(BUFFER_EVENT_BUFFER_LOW, 0);
1592 if (data_counters.remaining > 0 && BUF_USED <= high_watermark) {
1593 /* This is a new fill, shrink the buffer up first */
1594 if (!filling)
1595 shrink_buffer();
1596 filling = fill_buffer();
1597 update_data_counters(NULL);
1600 #endif
1601 #endif
1603 if (queue_empty(&buffering_queue)) {
1604 if (filling) {
1605 if (data_counters.remaining > 0 && BUF_USED < buffer_len)
1606 filling = fill_buffer();
1607 else if (data_counters.remaining == 0)
1608 filling = false;
1609 } else if (ev.id == SYS_TIMEOUT) {
1610 if (data_counters.remaining > 0 &&
1611 data_counters.useful <= conf_watermark) {
1612 shrink_buffer();
1613 filling = fill_buffer();
1620 void buffering_init(void)
1622 mutex_init(&llist_mutex);
1624 conf_watermark = BUFFERING_DEFAULT_WATERMARK;
1626 queue_init(&buffering_queue, true);
1627 buffering_thread_id = create_thread( buffering_thread, buffering_stack,
1628 sizeof(buffering_stack), CREATE_THREAD_FROZEN,
1629 buffering_thread_name IF_PRIO(, PRIORITY_BUFFERING)
1630 IF_COP(, CPU));
1632 queue_enable_queue_send(&buffering_queue, &buffering_queue_sender_list,
1633 buffering_thread_id);
1636 /* Initialise the buffering subsystem */
1637 bool buffering_reset(char *buf, size_t buflen)
1639 /* Wraps of storage-aligned data must also be storage aligned,
1640 thus buf and buflen must be a aligned to an integer multiple of
1641 the storage alignment */
1642 STORAGE_ALIGN_BUFFER(buf, buflen);
1644 if (!buf || !buflen)
1645 return false;
1647 buffer = buf;
1648 buffer_len = buflen;
1649 guard_buffer = buf + buflen;
1651 buf_widx = 0;
1652 buf_ridx = 0;
1654 first_handle = NULL;
1655 cur_handle = NULL;
1656 cached_handle = NULL;
1657 num_handles = 0;
1658 base_handle_id = -1;
1660 /* Set the high watermark as 75% full...or 25% empty :) */
1661 #if MEMORYSIZE > 8
1662 high_watermark = 3*buflen / 4;
1663 #endif
1665 thread_thaw(buffering_thread_id);
1667 return true;
1670 void buffering_get_debugdata(struct buffering_debug *dbgdata)
1672 struct data_counters dc;
1673 update_data_counters(&dc);
1674 dbgdata->num_handles = num_handles;
1675 dbgdata->data_rem = dc.remaining;
1676 dbgdata->wasted_space = dc.wasted;
1677 dbgdata->buffered_data = dc.buffered;
1678 dbgdata->useful_data = dc.useful;
1679 dbgdata->watermark = conf_watermark;