1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2007 Nicolas Pennequin
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
28 #include "buffering.h"
44 #include "mp3_playback.h"
49 #include "appevents.h"
53 #include "jpeg_load.h"
58 #define GUARD_BUFSIZE (32*1024)
60 /* Define LOGF_ENABLE to enable logf output in this file */
61 /*#define LOGF_ENABLE*/
64 /* macros to enable logf for queues
65 logging on SYS_TIMEOUT can be disabled */
67 /* Define this for logf output of all queuing except SYS_TIMEOUT */
68 #define BUFFERING_LOGQUEUES
69 /* Define this to logf SYS_TIMEOUT messages */
70 /* #define BUFFERING_LOGQUEUES_SYS_TIMEOUT */
73 #ifdef BUFFERING_LOGQUEUES
74 #define LOGFQUEUE logf
76 #define LOGFQUEUE(...)
79 #ifdef BUFFERING_LOGQUEUES_SYS_TIMEOUT
80 #define LOGFQUEUE_SYS_TIMEOUT logf
82 #define LOGFQUEUE_SYS_TIMEOUT(...)
85 /* default point to start buffer refill */
86 #define BUFFERING_DEFAULT_WATERMARK (1024*128)
87 /* amount of data to read in one read() call */
88 #define BUFFERING_DEFAULT_FILECHUNK (1024*32)
90 #define BUF_HANDLE_MASK 0x7FFFFFFF
93 /* assert(sizeof(struct memory_handle)%4==0) */
94 struct memory_handle
{
95 int id
; /* A unique ID for the handle */
96 enum data_type type
; /* Type of data buffered with this handle */
97 char path
[MAX_PATH
]; /* Path if data originated in a file */
98 int fd
; /* File descriptor to path (-1 if closed) */
99 size_t data
; /* Start index of the handle's data buffer */
100 volatile size_t ridx
; /* Read pointer, relative to the main buffer */
101 size_t widx
; /* Write pointer, relative to the main buffer */
102 size_t filesize
; /* File total length */
103 size_t filerem
; /* Remaining bytes of file NOT in buffer */
104 volatile size_t available
; /* Available bytes to read from buffer */
105 size_t offset
; /* Offset at which we started reading the file */
106 struct memory_handle
*next
;
108 /* invariant: filesize == offset + available + filerem */
111 struct buf_message_data
118 static char *guard_buffer
;
120 static size_t buffer_len
;
122 static volatile size_t buf_widx
; /* current writing position */
123 static volatile size_t buf_ridx
; /* current reading position */
124 /* buf_*idx are values relative to the buffer, not real pointers. */
127 static size_t conf_watermark
= 0; /* Level to trigger filebuf fill */
129 static size_t high_watermark
= 0; /* High watermark for rebuffer */
132 /* current memory handle in the linked list. NULL when the list is empty. */
133 static struct memory_handle
*cur_handle
;
134 /* first memory handle in the linked list. NULL when the list is empty. */
135 static struct memory_handle
*first_handle
;
137 static int num_handles
; /* number of handles in the list */
139 static int base_handle_id
;
141 /* Main lock for adding / removing handles */
142 static struct mutex llist_mutex SHAREDBSS_ATTR
;
144 /* Handle cache (makes find_handle faster).
145 This is global so that move_handle and rm_handle can invalidate it. */
146 static struct memory_handle
*cached_handle
= NULL
;
148 static struct data_counters
150 size_t remaining
; /* Amount of data needing to be buffered */
151 size_t wasted
; /* Amount of space available for freeing */
152 size_t buffered
; /* Amount of data currently in the buffer */
153 size_t useful
; /* Amount of data still useful to the user */
157 /* Messages available to communicate with the buffering thread */
160 Q_BUFFER_HANDLE
= 1, /* Request buffering of a handle, this should not be
161 used in a low buffer situation. */
162 Q_REBUFFER_HANDLE
, /* Request reset and rebuffering of a handle at a new
163 file starting position. */
164 Q_CLOSE_HANDLE
, /* Request closing a handle */
165 Q_BASE_HANDLE
, /* Set the reference handle for buf_useful_data */
168 Q_START_FILL
, /* Request that the buffering thread initiate a buffer
169 fill at its earliest convenience */
170 Q_HANDLE_ADDED
, /* Inform the buffering thread that a handle was added,
171 (which means the disk is spinning) */
174 /* Buffering thread */
175 static void buffering_thread(void);
176 static long buffering_stack
[(DEFAULT_STACK_SIZE
+ 0x2000)/sizeof(long)];
177 static const char buffering_thread_name
[] = "buffering";
178 static unsigned int buffering_thread_id
= 0;
179 static struct event_queue buffering_queue SHAREDBSS_ATTR
;
180 static struct queue_sender_list buffering_queue_sender_list SHAREDBSS_ATTR
;
184 /* Ring buffer helper functions */
186 static inline uintptr_t ringbuf_offset(const void *ptr
)
188 return (uintptr_t)(ptr
- (void*)buffer
);
191 /* Buffer pointer (p) plus value (v), wrapped if necessary */
192 static inline uintptr_t ringbuf_add(uintptr_t p
, size_t v
)
194 uintptr_t res
= p
+ v
;
195 if (res
>= buffer_len
)
196 res
-= buffer_len
; /* wrap if necssary */
201 /* Buffer pointer (p) minus value (v), wrapped if necessary */
202 static inline uintptr_t ringbuf_sub(uintptr_t p
, size_t v
)
206 res
+= buffer_len
; /* wrap */
212 /* How far value (v) plus buffer pointer (p1) will cross buffer pointer (p2) */
213 static inline ssize_t
ringbuf_add_cross(uintptr_t p1
, size_t v
, uintptr_t p2
)
215 ssize_t res
= p1
+ v
- p2
;
216 if (p1
>= p2
) /* wrap if necessary */
222 /* Bytes available in the buffer */
223 #define BUF_USED ringbuf_sub(buf_widx, buf_ridx)
226 LINKED LIST MANAGEMENT
227 ======================
229 add_handle : Add a handle to the list
230 rm_handle : Remove a handle from the list
231 find_handle : Get a handle pointer from an ID
232 move_handle : Move a handle in the buffer (with or without its data)
234 These functions only handle the linked list structure. They don't touch the
235 contents of the struct memory_handle headers. They also change the buf_*idx
236 pointers when necessary and manage the handle IDs.
238 The first and current (== last) handle are kept track of.
239 A new handle is added at buf_widx and becomes the current one.
240 buf_widx always points to the current writing position for the current handle
241 buf_ridx always points to the location of the first handle.
242 buf_ridx == buf_widx means the buffer is empty.
246 /* Add a new handle to the linked list and return it. It will have become the
248 data_size must contain the size of what will be in the handle.
249 can_wrap tells us whether this type of data may wrap on buffer
250 alloc_all tells us if we must immediately be able to allocate data_size
251 returns a valid memory handle if all conditions for allocation are met.
252 NULL if there memory_handle itself cannot be allocated or if the
253 data_size cannot be allocated and alloc_all is set. */
254 static struct memory_handle
*add_handle(size_t data_size
, bool can_wrap
,
257 /* gives each handle a unique id */
258 static int cur_handle_id
= 0;
260 size_t widx
, new_widx
;
264 if (num_handles
>= BUF_MAX_HANDLES
)
269 if (cur_handle
&& cur_handle
->filerem
> 0) {
270 /* the current handle hasn't finished buffering. We can only add
271 a new one if there is already enough free space to finish
273 size_t req
= cur_handle
->filerem
;
274 if (ringbuf_add_cross(cur_handle
->widx
, req
, buf_ridx
) >= 0) {
275 /* Not enough space to finish allocation */
278 /* Allocate the remainder of the space for the current handle */
279 widx
= ringbuf_add(cur_handle
->widx
, cur_handle
->filerem
);
283 /* align to 4 bytes up always leaving a gap */
284 new_widx
= ringbuf_add(widx
, 4) & ~3;
286 len
= data_size
+ sizeof(struct memory_handle
);
288 /* First, will the handle wrap? */
289 /* If the handle would wrap, move to the beginning of the buffer,
290 * or if the data must not but would wrap, move it to the beginning */
291 if (new_widx
+ sizeof(struct memory_handle
) > buffer_len
||
292 (!can_wrap
&& new_widx
+ len
> buffer_len
)) {
296 /* How far we shifted the new_widx to align things, must be < buffer_len */
297 shift
= ringbuf_sub(new_widx
, widx
);
299 /* How much space are we short in the actual ring buffer? */
300 overlap
= ringbuf_add_cross(widx
, shift
+ len
, buf_ridx
);
301 if (overlap
>= 0 && (alloc_all
|| (size_t)overlap
>= data_size
)) {
302 /* Not enough space for required allocations */
306 /* There is enough space for the required data, advance the buf_widx and
307 * initialize the struct */
310 struct memory_handle
*new_handle
=
311 (struct memory_handle
*)(&buffer
[buf_widx
]);
313 /* Prevent buffering thread from looking at it */
314 new_handle
->filerem
= 0;
316 /* only advance the buffer write index of the size of the struct */
317 buf_widx
= ringbuf_add(buf_widx
, sizeof(struct memory_handle
));
319 new_handle
->id
= cur_handle_id
;
320 /* Wrap signed int is safe and 0 doesn't happen */
321 cur_handle_id
= (cur_handle_id
+ 1) & BUF_HANDLE_MASK
;
322 new_handle
->next
= NULL
;
326 /* the new handle is the first one */
327 first_handle
= new_handle
;
330 cur_handle
->next
= new_handle
;
332 cur_handle
= new_handle
;
337 /* Delete a given memory handle from the linked list
338 and return true for success. Nothing is actually erased from memory. */
339 static bool rm_handle(const struct memory_handle
*h
)
344 if (h
== first_handle
) {
345 first_handle
= h
->next
;
346 if (h
== cur_handle
) {
347 /* h was the first and last handle: the buffer is now empty */
349 buf_ridx
= buf_widx
= 0;
351 /* update buf_ridx to point to the new first handle */
352 buf_ridx
= (size_t)ringbuf_offset(first_handle
);
355 struct memory_handle
*m
= first_handle
;
356 /* Find the previous handle */
357 while (m
&& m
->next
!= h
) {
360 if (m
&& m
->next
== h
) {
362 if (h
== cur_handle
) {
364 buf_widx
= cur_handle
->widx
;
371 /* Invalidate the cache to prevent it from keeping the old location of h */
372 if (h
== cached_handle
)
373 cached_handle
= NULL
;
379 /* Return a pointer to the memory handle of given ID.
380 NULL if the handle wasn't found */
381 static struct memory_handle
*find_handle(int handle_id
)
386 /* simple caching because most of the time the requested handle
387 will either be the same as the last, or the one after the last */
390 if (cached_handle
->id
== handle_id
) {
391 return cached_handle
;
392 } else if (cached_handle
->next
&&
393 (cached_handle
->next
->id
== handle_id
)) {
394 cached_handle
= cached_handle
->next
;
395 return cached_handle
;
399 struct memory_handle
*m
= first_handle
;
400 while (m
&& m
->id
!= handle_id
) {
403 /* This condition can only be reached with !m or m->id == handle_id */
410 /* Move a memory handle and data_size of its data delta bytes along the buffer.
411 delta maximum bytes available to move the handle. If the move is performed
412 it is set to the actual distance moved.
413 data_size is the amount of data to move along with the struct.
414 returns true if the move is successful and false if the handle is NULL,
415 the move would be less than the size of a memory_handle after
416 correcting for wraps or if the handle is not found in the linked
417 list for adjustment. This function has no side effects if false
419 static bool move_handle(struct memory_handle
**h
, size_t *delta
,
420 size_t data_size
, bool can_wrap
)
422 struct memory_handle
*dest
;
423 const struct memory_handle
*src
;
424 size_t final_delta
= *delta
, size_to_move
;
425 uintptr_t oldpos
, newpos
;
426 intptr_t overlap
, overlap_old
;
428 if (h
== NULL
|| (src
= *h
) == NULL
)
431 size_to_move
= sizeof(struct memory_handle
) + data_size
;
433 /* Align to four bytes, down */
435 if (final_delta
< sizeof(struct memory_handle
)) {
436 /* It's not legal to move less than the size of the struct */
440 oldpos
= ringbuf_offset(src
);
441 newpos
= ringbuf_add(oldpos
, final_delta
);
442 overlap
= ringbuf_add_cross(newpos
, size_to_move
, buffer_len
);
443 overlap_old
= ringbuf_add_cross(oldpos
, size_to_move
, buffer_len
);
446 /* Some part of the struct + data would wrap, maybe ok */
447 ssize_t correction
= 0;
448 /* If the overlap lands inside the memory_handle */
450 /* Otherwise the overlap falls in the data area and must all be
451 * backed out. This may become conditional if ever we move
452 * data that is allowed to wrap (ie audio) */
453 correction
= overlap
;
454 } else if ((uintptr_t)overlap
> data_size
) {
455 /* Correct the position and real delta to prevent the struct from
456 * wrapping, this guarantees an aligned delta if the struct size is
457 * aligned and the buffer is aligned */
458 correction
= overlap
- data_size
;
461 /* Align correction to four bytes up */
462 correction
= (correction
+ 3) & ~3;
463 if (final_delta
< correction
+ sizeof(struct memory_handle
)) {
464 /* Delta cannot end up less than the size of the struct */
467 newpos
-= correction
;
468 overlap
-= correction
;/* Used below to know how to split the data */
469 final_delta
-= correction
;
473 dest
= (struct memory_handle
*)(&buffer
[newpos
]);
475 if (src
== first_handle
) {
479 struct memory_handle
*m
= first_handle
;
480 while (m
&& m
->next
!= src
) {
483 if (m
&& m
->next
== src
) {
490 /* Update the cache to prevent it from keeping the old location of h */
491 if (src
== cached_handle
)
492 cached_handle
= dest
;
494 /* the cur_handle pointer might need updating */
495 if (src
== cur_handle
)
498 /* x = handle(s) following this one...
499 * ...if last handle, unmoveable if metadata, only shrinkable if audio.
500 * In other words, no legal move can be made that would have the src head
501 * and dest tail of the data overlap itself. These facts reduce the
502 * problem to four essential permutations.
504 * movement: always "clockwise" >>>>
506 * (src nowrap, dest nowrap)
511 * (src nowrap, dest wrap)
516 * (src wrap, dest nowrap)
521 * (src wrap, dest wrap)
524 * move: "23", "1", "0"
526 if (overlap_old
> 0) {
527 /* Move over already wrapped data by the final delta */
528 memmove(&buffer
[final_delta
], buffer
, overlap_old
);
530 size_to_move
-= overlap_old
;
534 /* Move data that now wraps to the beginning */
535 size_to_move
-= overlap
;
536 memmove(buffer
, SKIPBYTES(src
, size_to_move
),
537 overlap_old
> 0 ? final_delta
: (size_t)overlap
);
540 /* Move leading fragment containing handle struct */
541 memmove(dest
, src
, size_to_move
);
543 /* Update the caller with the new location of h and the distance moved */
545 *delta
= final_delta
;
551 BUFFER SPACE MANAGEMENT
552 =======================
554 update_data_counters: Updates the values in data_counters
555 buffer_is_low : Returns true if the amount of useful data in the buffer is low
556 buffer_handle : Buffer data for a handle
557 rebuffer_handle : Seek to a nonbuffered part of a handle by rebuffering the data
558 shrink_handle : Free buffer space by moving a handle
559 fill_buffer : Call buffer_handle for all handles that have data to buffer
561 These functions are used by the buffering thread to manage buffer space.
563 static size_t handle_size_available(const struct memory_handle
*h
)
565 /* Obtain proper distances from data start */
566 size_t rd
= ringbuf_sub(h
->ridx
, h
->data
);
567 size_t wr
= ringbuf_sub(h
->widx
, h
->data
);
572 return 0; /* ridx is ahead of or equal to widx at this time */
575 static void update_data_counters(struct data_counters
*dc
)
579 size_t remaining
= 0;
582 struct memory_handle
*m
;
588 mutex_lock(&llist_mutex
);
590 m
= find_handle(base_handle_id
);
591 is_useful
= m
== NULL
;
595 buffered
+= m
->available
;
596 /* wasted could come out larger than the buffer size if ridx's are
597 overlapping data ahead of their handles' buffered data */
598 wasted
+= ringbuf_sub(m
->ridx
, m
->data
);
599 remaining
+= m
->filerem
;
601 if (m
->id
== base_handle_id
)
605 useful
+= handle_size_available(m
);
610 mutex_unlock(&llist_mutex
);
612 dc
->buffered
= buffered
;
614 dc
->remaining
= remaining
;
618 static inline bool buffer_is_low(void)
620 update_data_counters(NULL
);
621 return data_counters
.useful
< (conf_watermark
/ 2);
624 /* Q_BUFFER_HANDLE event and buffer data for the given handle.
625 Return whether or not the buffering should continue explicitly. */
626 static bool buffer_handle(int handle_id
, size_t to_buffer
)
628 logf("buffer_handle(%d)", handle_id
);
629 struct memory_handle
*h
= find_handle(handle_id
);
635 if (h
->filerem
== 0) {
636 /* nothing left to buffer */
640 if (h
->fd
< 0) { /* file closed, reopen */
642 h
->fd
= open(h
->path
, O_RDONLY
);
646 /* could not open the file, truncate it where it is */
647 h
->filesize
-= h
->filerem
;
653 lseek(h
->fd
, h
->offset
, SEEK_SET
);
658 if (h
->type
== TYPE_ID3
) {
659 if (!get_metadata((struct mp3entry
*)(buffer
+ h
->data
),
661 /* metadata parsing failed: clear the buffer. */
662 memset(buffer
+ h
->data
, 0, sizeof(struct mp3entry
));
667 h
->available
= sizeof(struct mp3entry
);
668 h
->widx
+= sizeof(struct mp3entry
);
669 send_event(BUFFER_EVENT_FINISHED
, &handle_id
);
673 while (h
->filerem
> 0 && !stop
)
675 /* max amount to copy */
676 ssize_t copy_n
= MIN( MIN(h
->filerem
, BUFFERING_DEFAULT_FILECHUNK
),
677 buffer_len
- h
->widx
);
678 uintptr_t offset
= h
->next
? ringbuf_offset(h
->next
) : buf_ridx
;
679 ssize_t overlap
= ringbuf_add_cross(h
->widx
, copy_n
, offset
) + 1;
682 /* read only up to available space and stop if it would overwrite
683 or be on top of the reading position or the next handle */
689 return false; /* no space for read */
691 /* rc is the actual amount read */
692 int rc
= read(h
->fd
, &buffer
[h
->widx
], copy_n
);
695 /* Some kind of filesystem error, maybe recoverable if not codec */
696 if (h
->type
== TYPE_CODEC
) {
697 logf("Partial codec");
701 DEBUGF("File ended %ld bytes early\n", (long)h
->filerem
);
702 h
->filesize
-= h
->filerem
;
708 h
->widx
= ringbuf_add(h
->widx
, rc
);
714 /* If this is a large file, see if we need to break or give the codec
716 if (h
->type
== TYPE_PACKET_AUDIO
&&
717 pcmbuf_is_lowdata() && !buffer_is_low()) {
723 if (to_buffer
== 0) {
724 /* Normal buffering - check queue */
725 if(!queue_empty(&buffering_queue
))
728 if (to_buffer
<= (size_t)rc
)
734 if (h
->filerem
== 0) {
735 /* finished buffering the file */
738 send_event(BUFFER_EVENT_FINISHED
, &handle_id
);
744 /* Close the specified handle id and free its allocation. */
745 static bool close_handle(int handle_id
)
748 struct memory_handle
*h
;
750 mutex_lock(&llist_mutex
);
751 h
= find_handle(handle_id
);
753 /* If the handle is not found, it is closed */
760 /* rm_handle returns true unless the handle somehow persists after
762 retval
= rm_handle(h
);
765 mutex_unlock(&llist_mutex
);
769 /* Free buffer space by moving the handle struct right before the useful
770 part of its data buffer or by moving all the data. */
771 static void shrink_handle(struct memory_handle
*h
)
778 if (h
->type
== TYPE_ID3
|| h
->type
== TYPE_CUESHEET
||
779 h
->type
== TYPE_BITMAP
|| h
->type
== TYPE_CODEC
||
780 h
->type
== TYPE_ATOMIC_AUDIO
)
782 /* metadata handle: we can move all of it */
783 if (!h
->next
|| h
->filerem
!= 0)
784 return; /* Last handle or not finished loading */
786 uintptr_t handle_distance
=
787 ringbuf_sub(ringbuf_offset(h
->next
), h
->data
);
788 delta
= handle_distance
- h
->available
;
790 /* The value of delta might change for alignment reasons */
791 if (!move_handle(&h
, &delta
, h
->available
, h
->type
==TYPE_CODEC
))
794 size_t olddata
= h
->data
;
795 h
->data
= ringbuf_add(h
->data
, delta
);
796 h
->ridx
= ringbuf_add(h
->ridx
, delta
);
797 h
->widx
= ringbuf_add(h
->widx
, delta
);
799 if (h
->type
== TYPE_ID3
&& h
->filesize
== sizeof(struct mp3entry
)) {
800 /* when moving an mp3entry we need to readjust its pointers. */
801 adjust_mp3entry((struct mp3entry
*)&buffer
[h
->data
],
802 (void *)&buffer
[h
->data
],
803 (const void *)&buffer
[olddata
]);
804 } else if (h
->type
== TYPE_BITMAP
) {
805 /* adjust the bitmap's pointer */
806 struct bitmap
*bmp
= (struct bitmap
*)&buffer
[h
->data
];
807 bmp
->data
= &buffer
[h
->data
+ sizeof(struct bitmap
)];
810 /* only move the handle struct */
811 delta
= ringbuf_sub(h
->ridx
, h
->data
);
812 if (!move_handle(&h
, &delta
, 0, true))
815 h
->data
= ringbuf_add(h
->data
, delta
);
816 h
->available
-= delta
;
821 /* Fill the buffer by buffering as much data as possible for handles that still
822 have data left to buffer
823 Return whether or not to continue filling after this */
824 static bool fill_buffer(void)
826 logf("fill_buffer()");
827 struct memory_handle
*m
= first_handle
;
831 while (queue_empty(&buffering_queue
) && m
) {
832 if (m
->filerem
> 0) {
833 if (!buffer_handle(m
->id
, 0)) {
844 /* only spin the disk down if the filling wasn't interrupted by an
845 event arriving in the queue. */
852 /* Given a file descriptor to a bitmap file, write the bitmap data to the
853 buffer, with a struct bitmap and the actual data immediately following.
854 Return value is the total size (struct + data). */
855 static int load_image(int fd
, const char *path
,
856 struct bufopen_bitmap_data
*data
)
859 struct bitmap
*bmp
= (struct bitmap
*)&buffer
[buf_widx
];
860 struct dim
*dim
= data
->dim
;
861 struct mp3_albumart
*aa
= data
->embedded_albumart
;
863 /* get the desired image size */
864 bmp
->width
= dim
->width
, bmp
->height
= dim
->height
;
865 /* FIXME: alignment may be needed for the data buffer. */
866 bmp
->data
= &buffer
[buf_widx
+ sizeof(struct bitmap
)];
870 #if (LCD_DEPTH > 1) || defined(HAVE_REMOTE_LCD) && (LCD_REMOTE_DEPTH > 1)
871 bmp
->maskdata
= NULL
;
874 int free
= (int)MIN(buffer_len
- BUF_USED
, buffer_len
- buf_widx
)
875 - sizeof(struct bitmap
);
879 lseek(fd
, aa
->pos
, SEEK_SET
);
880 rc
= clip_jpeg_fd(fd
, aa
->size
, bmp
, free
, FORMAT_NATIVE
|FORMAT_DITHER
|
881 FORMAT_RESIZE
|FORMAT_KEEP_ASPECT
, NULL
);
883 else if (strcmp(path
+ strlen(path
) - 4, ".bmp"))
884 rc
= read_jpeg_fd(fd
, bmp
, free
, FORMAT_NATIVE
|FORMAT_DITHER
|
885 FORMAT_RESIZE
|FORMAT_KEEP_ASPECT
, NULL
);
888 rc
= read_bmp_fd(fd
, bmp
, free
, FORMAT_NATIVE
|FORMAT_DITHER
|
889 FORMAT_RESIZE
|FORMAT_KEEP_ASPECT
, NULL
);
890 return rc
+ (rc
> 0 ? sizeof(struct bitmap
) : 0);
896 MAIN BUFFERING API CALLS
897 ========================
899 bufopen : Request the opening of a new handle for a file
900 bufalloc : Open a new handle for data other than a file.
901 bufclose : Close an open handle
902 bufseek : Set the read pointer in a handle
903 bufadvance : Move the read pointer in a handle
904 bufread : Copy data from a handle into a given buffer
905 bufgetdata : Give a pointer to the handle's data
907 These functions are exported, to allow interaction with the buffer.
908 They take care of the content of the structs, and rely on the linked list
909 management functions for all the actual handle management work.
913 /* Reserve space in the buffer for a file.
914 filename: name of the file to open
915 offset: offset at which to start buffering the file, useful when the first
916 offset bytes of the file aren't needed.
917 type: one of the data types supported (audio, image, cuesheet, others
918 user_data: user data passed possibly passed in subcalls specific to a
919 data_type (only used for image (albumart) buffering so far )
920 return value: <0 if the file cannot be opened, or one file already
921 queued to be opened, otherwise the handle for the file in the buffer
923 int bufopen(const char *file
, size_t offset
, enum data_type type
,
926 #ifndef HAVE_ALBUMART
927 /* currently only used for aa loading */
930 int handle_id
= ERR_BUFFER_FULL
;
932 /* No buffer refs until after the mutex_lock call! */
934 if (type
== TYPE_ID3
) {
935 /* ID3 case: allocate space, init the handle and return. */
936 mutex_lock(&llist_mutex
);
938 struct memory_handle
*h
=
939 add_handle(sizeof(struct mp3entry
), false, true);
944 h
->filesize
= sizeof(struct mp3entry
);
951 strlcpy(h
->path
, file
, MAX_PATH
);
953 buf_widx
= ringbuf_add(buf_widx
, sizeof(struct mp3entry
));
955 h
->filerem
= sizeof(struct mp3entry
);
957 /* Inform the buffering thread that we added a handle */
958 LOGFQUEUE("buffering > Q_HANDLE_ADDED %d", handle_id
);
959 queue_post(&buffering_queue
, Q_HANDLE_ADDED
, handle_id
);
962 mutex_unlock(&llist_mutex
);
966 /* loading code from memory is not supported in application builds */
967 else if (type
== TYPE_CODEC
)
968 return ERR_UNSUPPORTED_TYPE
;
970 /* Other cases: there is a little more work. */
971 int fd
= open(file
, O_RDONLY
);
973 return ERR_FILE_ERROR
;
977 if (type
== TYPE_BITMAP
) {
978 /* if albumart is embedded, the complete file is not buffered,
979 * but only the jpeg part; filesize() would be wrong */
980 struct bufopen_bitmap_data
*aa
= (struct bufopen_bitmap_data
*)user_data
;
981 if (aa
->embedded_albumart
)
982 size
= aa
->embedded_albumart
->size
;
987 bool can_wrap
= type
==TYPE_PACKET_AUDIO
|| type
==TYPE_CODEC
;
989 size_t adjusted_offset
= offset
;
990 if (adjusted_offset
> size
)
993 /* Reserve extra space because alignment can move data forward */
994 size_t padded_size
= STORAGE_PAD(size
-adjusted_offset
);
996 mutex_lock(&llist_mutex
);
998 struct memory_handle
*h
= add_handle(padded_size
, can_wrap
, false);
1000 DEBUGF("%s(): failed to add handle\n", __func__
);
1001 mutex_unlock(&llist_mutex
);
1003 return ERR_BUFFER_FULL
;
1007 strlcpy(h
->path
, file
, MAX_PATH
);
1008 h
->offset
= adjusted_offset
;
1010 #ifdef STORAGE_WANTS_ALIGN
1011 /* Don't bother to storage align bitmaps because they are not
1012 * loaded directly into the buffer.
1014 if (type
!= TYPE_BITMAP
) {
1015 /* Align to desired storage alignment */
1016 size_t alignment_pad
= STORAGE_OVERLAP(adjusted_offset
-
1017 (size_t)(&buffer
[buf_widx
]));
1018 buf_widx
= ringbuf_add(buf_widx
, alignment_pad
);
1020 #endif /* STORAGE_WANTS_ALIGN */
1029 #ifdef HAVE_ALBUMART
1030 if (type
== TYPE_BITMAP
) {
1031 /* Bitmap file: we load the data instead of the file */
1033 rc
= load_image(fd
, file
, (struct bufopen_bitmap_data
*)user_data
);
1036 handle_id
= ERR_FILE_ERROR
;
1040 buf_widx
= ringbuf_add(buf_widx
, rc
);
1047 if (type
== TYPE_CUESHEET
)
1053 h
->filerem
= size
- adjusted_offset
;
1056 mutex_unlock(&llist_mutex
);
1058 if (type
== TYPE_CUESHEET
) {
1059 /* Immediately start buffering those */
1060 LOGFQUEUE("buffering >| Q_BUFFER_HANDLE %d", handle_id
);
1061 queue_send(&buffering_queue
, Q_BUFFER_HANDLE
, handle_id
);
1063 /* Other types will get buffered in the course of normal operations */
1066 if (handle_id
>= 0) {
1067 /* Inform the buffering thread that we added a handle */
1068 LOGFQUEUE("buffering > Q_HANDLE_ADDED %d", handle_id
);
1069 queue_post(&buffering_queue
, Q_HANDLE_ADDED
, handle_id
);
1073 logf("bufopen: new hdl %d", handle_id
);
1077 /* Open a new handle from data that needs to be copied from memory.
1078 src is the source buffer from which to copy data. It can be NULL to simply
1079 reserve buffer space.
1080 size is the requested size. The call will only be successful if the
1081 requested amount of data can entirely fit in the buffer without wrapping.
1082 Return value is the handle id for success or <0 for failure.
1084 int bufalloc(const void *src
, size_t size
, enum data_type type
)
1086 int handle_id
= ERR_BUFFER_FULL
;
1088 mutex_lock(&llist_mutex
);
1090 struct memory_handle
*h
= add_handle(size
, false, true);
1096 if (type
== TYPE_ID3
&& size
== sizeof(struct mp3entry
)) {
1097 /* specially take care of struct mp3entry */
1098 copy_mp3entry((struct mp3entry
*)&buffer
[buf_widx
],
1099 (const struct mp3entry
*)src
);
1101 memcpy(&buffer
[buf_widx
], src
, size
);
1111 buf_widx
= ringbuf_add(buf_widx
, size
);
1113 h
->available
= size
;
1117 mutex_unlock(&llist_mutex
);
1119 logf("bufalloc: new hdl %d", handle_id
);
1123 /* Close the handle. Return true for success and false for failure */
1124 bool bufclose(int handle_id
)
1126 logf("bufclose(%d)", handle_id
);
1128 LOGFQUEUE("buffering >| Q_CLOSE_HANDLE %d", handle_id
);
1129 return queue_send(&buffering_queue
, Q_CLOSE_HANDLE
, handle_id
);
1132 /* Backend to bufseek and bufadvance. Call only in response to
1133 Q_REBUFFER_HANDLE! */
1134 static void rebuffer_handle(int handle_id
, size_t newpos
)
1136 struct memory_handle
*h
= find_handle(handle_id
);
1139 queue_reply(&buffering_queue
, ERR_HANDLE_NOT_FOUND
);
1143 /* When seeking foward off of the buffer, if it is a short seek attempt to
1144 avoid rebuffering the whole track, just read enough to satisfy */
1145 if (newpos
> h
->offset
&&
1146 newpos
- h
->offset
< BUFFERING_DEFAULT_FILECHUNK
) {
1148 size_t amount
= newpos
- h
->offset
;
1149 h
->ridx
= ringbuf_add(h
->data
, amount
);
1151 if (buffer_handle(handle_id
, amount
+ 1)) {
1152 size_t rd
= ringbuf_sub(h
->ridx
, h
->data
);
1153 size_t wr
= ringbuf_sub(h
->widx
, h
->data
);
1155 /* It really did succeed */
1156 queue_reply(&buffering_queue
, 0);
1157 buffer_handle(handle_id
, 0); /* Ok, try the rest */
1161 /* Data collision or other file error - must reset */
1163 if (newpos
> h
->filesize
)
1164 newpos
= h
->filesize
; /* file truncation happened above */
1167 /* Reset the handle to its new position */
1170 size_t next
= h
->next
? ringbuf_offset(h
->next
) : buf_ridx
;
1172 #ifdef STORAGE_WANTS_ALIGN
1173 /* Strip alignment padding then redo */
1174 size_t new_index
= ringbuf_add(ringbuf_offset(h
), sizeof (*h
));
1176 /* Align to desired storage alignment if space permits - handle could
1177 have been shrunken too close to the following one after a previous
1179 size_t alignment_pad
=
1180 STORAGE_OVERLAP(h
->offset
- (size_t)(&buffer
[new_index
]));
1182 if (ringbuf_add_cross(new_index
, alignment_pad
, next
) >= 0)
1183 alignment_pad
= 0; /* Forego storage alignment this time */
1185 new_index
= ringbuf_add(new_index
, alignment_pad
);
1187 /* Just clear the data buffer */
1188 size_t new_index
= h
->data
;
1189 #endif /* STORAGE_WANTS_ALIGN */
1191 h
->ridx
= h
->widx
= h
->data
= new_index
;
1193 if (h
== cur_handle
)
1194 buf_widx
= new_index
;
1197 h
->filerem
= h
->filesize
- h
->offset
;
1200 lseek(h
->fd
, h
->offset
, SEEK_SET
);
1202 if (h
->next
&& ringbuf_sub(next
, h
->data
) <= h
->filesize
- newpos
) {
1203 /* There isn't enough space to rebuffer all of the track from its new
1204 offset, so we ask the user to free some */
1205 DEBUGF("%s(): space is needed\n", __func__
);
1206 int hid
= handle_id
;
1207 send_event(BUFFER_EVENT_REBUFFER
, &hid
);
1210 /* Now we do the rebuffer */
1211 queue_reply(&buffering_queue
, 0);
1212 buffer_handle(handle_id
, 0);
1215 /* Backend to bufseek and bufadvance */
1216 static int seek_handle(struct memory_handle
*h
, size_t newpos
)
1218 if (newpos
> h
->filesize
) {
1219 /* access beyond the end of the file */
1220 return ERR_INVALID_VALUE
;
1222 else if ((newpos
< h
->offset
|| h
->offset
+ h
->available
<= newpos
) &&
1223 (newpos
< h
->filesize
|| h
->filerem
> 0)) {
1224 /* access before or after buffered data and not to end of file or file
1225 is not buffered to the end-- a rebuffer is needed. */
1226 struct buf_message_data parm
= { h
->id
, newpos
};
1227 return queue_send(&buffering_queue
, Q_REBUFFER_HANDLE
,
1231 h
->ridx
= ringbuf_add(h
->data
, newpos
- h
->offset
);
1237 /* Set reading index in handle (relatively to the start of the file).
1238 Access before the available data will trigger a rebuffer.
1239 Return 0 for success and < 0 for failure:
1240 -1 if the handle wasn't found
1241 -2 if the new requested position was beyond the end of the file
1243 int bufseek(int handle_id
, size_t newpos
)
1245 struct memory_handle
*h
= find_handle(handle_id
);
1247 return ERR_HANDLE_NOT_FOUND
;
1249 return seek_handle(h
, newpos
);
1252 /* Advance the reading index in a handle (relatively to its current position).
1253 Return 0 for success and < 0 for failure */
1254 int bufadvance(int handle_id
, off_t offset
)
1256 struct memory_handle
*h
= find_handle(handle_id
);
1258 return ERR_HANDLE_NOT_FOUND
;
1260 size_t newpos
= h
->offset
+ ringbuf_sub(h
->ridx
, h
->data
) + offset
;
1261 return seek_handle(h
, newpos
);
1264 /* Used by bufread and bufgetdata to prepare the buffer and retrieve the
1265 * actual amount of data available for reading. This function explicitly
1266 * does not check the validity of the input handle. It does do range checks
1267 * on size and returns a valid (and explicit) amount of data for reading */
1268 static struct memory_handle
*prep_bufdata(int handle_id
, size_t *size
,
1269 bool guardbuf_limit
)
1271 struct memory_handle
*h
= find_handle(handle_id
);
1277 size_t avail
= handle_size_available(h
);
1279 if (avail
== 0 && h
->filerem
== 0) {
1280 /* File is finished reading */
1287 if (realsize
== 0 || realsize
> avail
+ h
->filerem
)
1288 realsize
= avail
+ h
->filerem
;
1290 if (guardbuf_limit
&& h
->type
== TYPE_PACKET_AUDIO
1291 && realsize
> GUARD_BUFSIZE
) {
1292 logf("data request > guardbuf");
1293 /* If more than the size of the guardbuf is requested and this is a
1294 * bufgetdata, limit to guard_bufsize over the end of the buffer */
1295 realsize
= MIN(realsize
, buffer_len
- h
->ridx
+ GUARD_BUFSIZE
);
1296 /* this ensures *size <= buffer_len - h->ridx + GUARD_BUFSIZE */
1299 if (h
->filerem
> 0 && avail
< realsize
) {
1300 /* Data isn't ready. Request buffering */
1301 buf_request_buffer_handle(handle_id
);
1302 /* Wait for the data to be ready */
1306 /* it is not safe for a non-buffering thread to sleep while
1307 * holding a handle */
1308 h
= find_handle(handle_id
);
1311 avail
= handle_size_available(h
);
1313 while (h
->filerem
> 0 && avail
< realsize
);
1316 *size
= MIN(realsize
, avail
);
1321 /* Note: It is safe for the thread responsible for handling the rebuffer
1322 * cleanup request to call bufread or bufgetdata only when the data will
1323 * be available-- not if it could be blocked waiting for it in prep_bufdata.
1324 * It should be apparent that if said thread is being forced to wait for
1325 * buffering but has not yet responded to the cleanup request, the space
1326 * can never be cleared to allow further reading of the file because it is
1327 * not listening to callbacks any longer. */
1329 /* Copy data from the given handle to the dest buffer.
1330 Return the number of bytes copied or < 0 for failure (handle not found).
1331 The caller is blocked until the requested amount of data is available.
1333 ssize_t
bufread(int handle_id
, size_t size
, void *dest
)
1335 const struct memory_handle
*h
;
1336 size_t adjusted_size
= size
;
1338 h
= prep_bufdata(handle_id
, &adjusted_size
, false);
1340 return ERR_HANDLE_NOT_FOUND
;
1342 if (h
->ridx
+ adjusted_size
> buffer_len
) {
1343 /* the data wraps around the end of the buffer */
1344 size_t read
= buffer_len
- h
->ridx
;
1345 memcpy(dest
, &buffer
[h
->ridx
], read
);
1346 memcpy(dest
+read
, buffer
, adjusted_size
- read
);
1348 memcpy(dest
, &buffer
[h
->ridx
], adjusted_size
);
1351 return adjusted_size
;
1354 /* Update the "data" pointer to make the handle's data available to the caller.
1355 Return the length of the available linear data or < 0 for failure (handle
1357 The caller is blocked until the requested amount of data is available.
1358 size is the amount of linear data requested. it can be 0 to get as
1360 The guard buffer may be used to provide the requested size. This means it's
1361 unsafe to request more than the size of the guard buffer.
1363 ssize_t
bufgetdata(int handle_id
, size_t size
, void **data
)
1365 const struct memory_handle
*h
;
1366 size_t adjusted_size
= size
;
1368 h
= prep_bufdata(handle_id
, &adjusted_size
, true);
1370 return ERR_HANDLE_NOT_FOUND
;
1372 if (h
->ridx
+ adjusted_size
> buffer_len
) {
1373 /* the data wraps around the end of the buffer :
1374 use the guard buffer to provide the requested amount of data. */
1375 size_t copy_n
= h
->ridx
+ adjusted_size
- buffer_len
;
1376 /* prep_bufdata ensures
1377 adjusted_size <= buffer_len - h->ridx + GUARD_BUFSIZE,
1378 so copy_n <= GUARD_BUFSIZE */
1379 memcpy(guard_buffer
, (const unsigned char *)buffer
, copy_n
);
1383 *data
= &buffer
[h
->ridx
];
1385 return adjusted_size
;
1388 ssize_t
bufgettail(int handle_id
, size_t size
, void **data
)
1392 const struct memory_handle
*h
;
1394 h
= find_handle(handle_id
);
1397 return ERR_HANDLE_NOT_FOUND
;
1400 return ERR_HANDLE_NOT_DONE
;
1402 /* We don't support tail requests of > guardbuf_size, for simplicity */
1403 if (size
> GUARD_BUFSIZE
)
1404 return ERR_INVALID_VALUE
;
1406 tidx
= ringbuf_sub(h
->widx
, size
);
1408 if (tidx
+ size
> buffer_len
) {
1409 size_t copy_n
= tidx
+ size
- buffer_len
;
1410 memcpy(guard_buffer
, (const unsigned char *)buffer
, copy_n
);
1413 *data
= &buffer
[tidx
];
1417 ssize_t
bufcuttail(int handle_id
, size_t size
)
1419 struct memory_handle
*h
;
1420 size_t adjusted_size
= size
;
1422 h
= find_handle(handle_id
);
1425 return ERR_HANDLE_NOT_FOUND
;
1428 return ERR_HANDLE_NOT_DONE
;
1430 if (h
->available
< adjusted_size
)
1431 adjusted_size
= h
->available
;
1433 h
->available
-= adjusted_size
;
1434 h
->filesize
-= adjusted_size
;
1435 h
->widx
= ringbuf_sub(h
->widx
, adjusted_size
);
1436 if (h
== cur_handle
)
1439 return adjusted_size
;
1444 SECONDARY EXPORTED FUNCTIONS
1445 ============================
1448 buf_request_buffer_handle
1451 register_buffering_callback
1452 unregister_buffering_callback
1454 These functions are exported, to allow interaction with the buffer.
1455 They take care of the content of the structs, and rely on the linked list
1456 management functions for all the actual handle management work.
1459 ssize_t
buf_handle_offset(int handle_id
)
1461 const struct memory_handle
*h
= find_handle(handle_id
);
1463 return ERR_HANDLE_NOT_FOUND
;
1467 void buf_request_buffer_handle(int handle_id
)
1469 LOGFQUEUE("buffering >| Q_START_FILL %d",handle_id
);
1470 queue_send(&buffering_queue
, Q_START_FILL
, handle_id
);
1473 void buf_set_base_handle(int handle_id
)
1475 LOGFQUEUE("buffering > Q_BASE_HANDLE %d", handle_id
);
1476 queue_post(&buffering_queue
, Q_BASE_HANDLE
, handle_id
);
1479 /* Return the amount of buffer space used */
1480 size_t buf_used(void)
1485 void buf_set_watermark(size_t bytes
)
1487 conf_watermark
= bytes
;
1490 static void shrink_buffer_inner(struct memory_handle
*h
)
1495 shrink_buffer_inner(h
->next
);
1500 static void shrink_buffer(void)
1502 logf("shrink_buffer()");
1503 shrink_buffer_inner(first_handle
);
1506 void buffering_thread(void)
1508 bool filling
= false;
1509 struct queue_event ev
;
1510 struct buf_message_data
*parm
;
1518 queue_wait_w_tmo(&buffering_queue
, &ev
, filling
? 5 : HZ
/2);
1523 LOGFQUEUE("buffering < Q_START_FILL %d", (int)ev
.data
);
1524 /* Call buffer callbacks here because this is one of two ways
1525 * to begin a full buffer fill */
1526 send_event(BUFFER_EVENT_BUFFER_LOW
, 0);
1528 queue_reply(&buffering_queue
, 1);
1529 filling
|= buffer_handle((int)ev
.data
, 0);
1532 case Q_BUFFER_HANDLE
:
1533 LOGFQUEUE("buffering < Q_BUFFER_HANDLE %d", (int)ev
.data
);
1534 queue_reply(&buffering_queue
, 1);
1535 buffer_handle((int)ev
.data
, 0);
1538 case Q_REBUFFER_HANDLE
:
1539 parm
= (struct buf_message_data
*)ev
.data
;
1540 LOGFQUEUE("buffering < Q_REBUFFER_HANDLE %d %ld",
1541 parm
->handle_id
, parm
->data
);
1542 rebuffer_handle(parm
->handle_id
, parm
->data
);
1545 case Q_CLOSE_HANDLE
:
1546 LOGFQUEUE("buffering < Q_CLOSE_HANDLE %d", (int)ev
.data
);
1547 queue_reply(&buffering_queue
, close_handle((int)ev
.data
));
1550 case Q_HANDLE_ADDED
:
1551 LOGFQUEUE("buffering < Q_HANDLE_ADDED %d", (int)ev
.data
);
1552 /* A handle was added: the disk is spinning, so we can fill */
1557 LOGFQUEUE("buffering < Q_BASE_HANDLE %d", (int)ev
.data
);
1558 base_handle_id
= (int)ev
.data
;
1561 #if (CONFIG_PLATFORM & PLATFORM_NATIVE)
1562 case SYS_USB_CONNECTED
:
1563 LOGFQUEUE("buffering < SYS_USB_CONNECTED");
1564 usb_acknowledge(SYS_USB_CONNECTED_ACK
);
1565 usb_wait_for_disconnect(&buffering_queue
);
1570 LOGFQUEUE_SYS_TIMEOUT("buffering < SYS_TIMEOUT");
1574 update_data_counters(NULL
);
1576 /* If the buffer is low, call the callbacks to get new data */
1577 if (num_handles
> 0 && data_counters
.useful
<= conf_watermark
)
1578 send_event(BUFFER_EVENT_BUFFER_LOW
, 0);
1581 /* TODO: This needs to be fixed to use the idle callback, disable it
1582 * for simplicity until its done right */
1584 /* If the disk is spinning, take advantage by filling the buffer */
1585 else if (storage_disk_is_active() && queue_empty(&buffering_queue
)) {
1586 if (num_handles
> 0 && data_counters
.useful
<= high_watermark
)
1587 send_event(BUFFER_EVENT_BUFFER_LOW
, 0);
1589 if (data_counters
.remaining
> 0 && BUF_USED
<= high_watermark
) {
1590 /* This is a new fill, shrink the buffer up first */
1593 filling
= fill_buffer();
1594 update_data_counters(NULL
);
1600 if (queue_empty(&buffering_queue
)) {
1602 if (data_counters
.remaining
> 0 && BUF_USED
< buffer_len
)
1603 filling
= fill_buffer();
1604 else if (data_counters
.remaining
== 0)
1606 } else if (ev
.id
== SYS_TIMEOUT
) {
1607 if (data_counters
.remaining
> 0 &&
1608 data_counters
.useful
<= conf_watermark
) {
1610 filling
= fill_buffer();
1617 void buffering_init(void)
1619 mutex_init(&llist_mutex
);
1621 conf_watermark
= BUFFERING_DEFAULT_WATERMARK
;
1623 queue_init(&buffering_queue
, true);
1624 buffering_thread_id
= create_thread( buffering_thread
, buffering_stack
,
1625 sizeof(buffering_stack
), CREATE_THREAD_FROZEN
,
1626 buffering_thread_name
IF_PRIO(, PRIORITY_BUFFERING
)
1629 queue_enable_queue_send(&buffering_queue
, &buffering_queue_sender_list
,
1630 buffering_thread_id
);
1633 /* Initialise the buffering subsystem */
1634 bool buffering_reset(char *buf
, size_t buflen
)
1636 /* Wraps of storage-aligned data must also be storage aligned,
1637 thus buf and buflen must be a aligned to an integer multiple of
1638 the storage alignment */
1639 STORAGE_ALIGN_BUFFER(buf
, buflen
);
1641 if (!buf
|| !buflen
)
1645 buffer_len
= buflen
;
1646 guard_buffer
= buf
+ buflen
;
1651 first_handle
= NULL
;
1653 cached_handle
= NULL
;
1655 base_handle_id
= -1;
1657 /* Set the high watermark as 75% full...or 25% empty :) */
1659 high_watermark
= 3*buflen
/ 4;
1662 thread_thaw(buffering_thread_id
);
1667 void buffering_get_debugdata(struct buffering_debug
*dbgdata
)
1669 struct data_counters dc
;
1670 update_data_counters(&dc
);
1671 dbgdata
->num_handles
= num_handles
;
1672 dbgdata
->data_rem
= dc
.remaining
;
1673 dbgdata
->wasted_space
= dc
.wasted
;
1674 dbgdata
->buffered_data
= dc
.buffered
;
1675 dbgdata
->useful_data
= dc
.useful
;
1676 dbgdata
->watermark
= conf_watermark
;