1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2007 Nicolas Pennequin
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
25 #include "buffering.h"
43 #include "mp3_playback.h"
53 #define ata_disk_is_active() 1
57 #define GUARD_BUFSIZE (32*1024)
59 #define GUARD_BUFSIZE (8*1024)
62 /* Define LOGF_ENABLE to enable logf output in this file */
63 /*#define LOGF_ENABLE*/
66 /* macros to enable logf for queues
67 logging on SYS_TIMEOUT can be disabled */
69 /* Define this for logf output of all queuing except SYS_TIMEOUT */
70 #define BUFFERING_LOGQUEUES
71 /* Define this to logf SYS_TIMEOUT messages */
72 /* #define BUFFERING_LOGQUEUES_SYS_TIMEOUT */
75 #ifdef BUFFERING_LOGQUEUES
76 #define LOGFQUEUE logf
78 #define LOGFQUEUE(...)
81 #ifdef BUFFERING_LOGQUEUES_SYS_TIMEOUT
82 #define LOGFQUEUE_SYS_TIMEOUT logf
84 #define LOGFQUEUE_SYS_TIMEOUT(...)
87 /* default point to start buffer refill */
88 #define BUFFERING_DEFAULT_WATERMARK (1024*512)
89 /* amount of data to read in one read() call */
90 #define BUFFERING_DEFAULT_FILECHUNK (1024*32)
91 /* point at which the file buffer will fight for CPU time */
92 #define BUFFERING_CRITICAL_LEVEL (1024*128)
95 /* Ring buffer helper macros */
96 /* Buffer pointer (p) plus value (v), wrapped if necessary */
97 #define RINGBUF_ADD(p,v) ((p+v)<buffer_len ? p+v : p+v-buffer_len)
98 /* Buffer pointer (p) minus value (v), wrapped if necessary */
99 #define RINGBUF_SUB(p,v) ((p>=v) ? p-v : p+buffer_len-v)
100 /* How far value (v) plus buffer pointer (p1) will cross buffer pointer (p2) */
101 #define RINGBUF_ADD_CROSS(p1,v,p2) \
102 ((p1<p2) ? (int)(p1+v)-(int)p2 : (int)(p1+v-p2)-(int)buffer_len)
103 /* Bytes available in the buffer */
104 #define BUF_USED RINGBUF_SUB(buf_widx, buf_ridx)
106 struct memory_handle
{
107 int id
; /* A unique ID for the handle */
111 size_t data
; /* Start index of the handle's data buffer */
112 volatile size_t ridx
; /* Current read pointer, relative to the main buffer */
113 size_t widx
; /* Current write pointer */
114 size_t filesize
; /* File total length */
115 size_t filerem
; /* Remaining bytes of file NOT in buffer */
116 volatile size_t available
; /* Available bytes to read from buffer */
117 size_t offset
; /* Offset at which we started reading the file */
118 struct memory_handle
*next
;
120 /* at all times, we have: filesize == offset + available + filerem */
124 static char *guard_buffer
;
126 static size_t buffer_len
;
128 static volatile size_t buf_widx
; /* current writing position */
129 static volatile size_t buf_ridx
; /* current reading position */
130 /* buf_*idx are values relative to the buffer, not real pointers. */
133 static size_t conf_watermark
= 0; /* Level to trigger filebuf fill */
134 static size_t conf_filechunk
= 0; /* Largest chunk the codec accepts */
135 static size_t conf_preseek
= 0; /* Codec pre-seek margin */
137 static size_t high_watermark
= 0; /* High watermark for rebuffer */
140 /* current memory handle in the linked list. NULL when the list is empty. */
141 static struct memory_handle
*cur_handle
;
142 /* first memory handle in the linked list. NULL when the list is empty. */
143 static struct memory_handle
*first_handle
;
145 static int num_handles
; /* number of handles in the list */
147 static int base_handle_id
;
149 static struct mutex llist_mutex
;
151 /* Handle cache (makes find_handle faster).
152 This needs be to be global so that move_handle can invalidate it. */
153 static struct memory_handle
*cached_handle
= NULL
;
155 static buffer_low_callback buffer_low_callback_funcs
[MAX_BUF_CALLBACKS
];
156 static int buffer_callback_count
= 0;
159 size_t remaining
; /* Amount of data needing to be buffered */
160 size_t wasted
; /* Amount of space available for freeing */
161 size_t buffered
; /* Amount of data currently in the buffer */
162 size_t useful
; /* Amount of data still useful to the user */
166 /* Messages available to communicate with the buffering thread */
168 Q_BUFFER_HANDLE
= 1, /* Request buffering of a handle */
169 Q_RESET_HANDLE
, /* (internal) Request resetting of a handle to its
170 offset (the offset has to be set beforehand) */
171 Q_CLOSE_HANDLE
, /* Request closing a handle */
172 Q_BASE_HANDLE
, /* Set the reference handle for buf_useful_data */
180 /* Buffering thread */
181 void buffering_thread(void);
182 static long buffering_stack
[(DEFAULT_STACK_SIZE
+ 0x2000)/sizeof(long)];
183 static const char buffering_thread_name
[] = "buffering";
184 static struct thread_entry
*buffering_thread_p
;
185 static struct event_queue buffering_queue
;
186 static struct queue_sender_list buffering_queue_sender_list
;
190 LINKED LIST MANAGEMENT
191 ======================
193 add_handle : Add a handle to the list
194 rm_handle : Remove a handle from the list
195 find_handle : Get a handle pointer from an ID
196 move_handle : Move a handle in the buffer (with or without its data)
198 These functions only handle the linked list structure. They don't touch the
199 contents of the struct memory_handle headers. They also change the buf_*idx
200 pointers when necessary and manage the handle IDs.
202 The first and current (== last) handle are kept track of.
203 A new handle is added at buf_widx and becomes the current one.
204 buf_widx always points to the current writing position for the current handle
205 buf_ridx always points to the location of the first handle.
206 buf_ridx == buf_widx means the buffer is empty.
210 /* Add a new handle to the linked list and return it. It will have become the
211 new current handle. "data_size" must contain the size of what will be in the
212 handle. On return, it's the size available for the handle. */
213 static struct memory_handle
*add_handle(size_t *data_size
)
215 mutex_lock(&llist_mutex
);
217 /* this will give each handle a unique id */
218 static int cur_handle_id
= 1;
220 /* make sure buf_widx is 32-bit aligned so that the handle struct is,
221 but before that we check we can actually align. */
222 if (RINGBUF_ADD_CROSS(buf_widx
, 3, buf_ridx
) >= 0) {
223 mutex_unlock(&llist_mutex
);
226 buf_widx
= (RINGBUF_ADD(buf_widx
, 3)) & ~3;
228 size_t len
= (data_size
? *data_size
: 0)
229 + sizeof(struct memory_handle
);
231 /* check that we actually can add the handle and its data */
232 int overlap
= RINGBUF_ADD_CROSS(buf_widx
, len
, buf_ridx
);
234 *data_size
-= overlap
;
237 if (len
< sizeof(struct memory_handle
)) {
238 /* There isn't even enough space to write the struct */
239 mutex_unlock(&llist_mutex
);
243 struct memory_handle
*new_handle
=
244 (struct memory_handle
*)(&buffer
[buf_widx
]);
246 /* only advance the buffer write index of the size of the struct */
247 buf_widx
= RINGBUF_ADD(buf_widx
, sizeof(struct memory_handle
));
250 /* the new handle is the first one */
251 first_handle
= new_handle
;
255 cur_handle
->next
= new_handle
;
258 cur_handle
= new_handle
;
259 cur_handle
->id
= cur_handle_id
++;
260 cur_handle
->next
= NULL
;
263 mutex_unlock(&llist_mutex
);
267 /* Delete a given memory handle from the linked list
268 and return true for success. Nothing is actually erased from memory. */
269 static bool rm_handle(struct memory_handle
*h
)
271 mutex_lock(&llist_mutex
);
273 if (h
== first_handle
) {
274 first_handle
= h
->next
;
275 if (h
== cur_handle
) {
276 /* h was the first and last handle: the buffer is now empty */
280 /* update buf_ridx to point to the new first handle */
281 buf_ridx
= (void *)first_handle
- (void *)buffer
;
284 struct memory_handle
*m
= first_handle
;
285 while (m
&& m
->next
!= h
) {
288 if (h
&& m
&& m
->next
== h
) {
290 if (h
== cur_handle
) {
292 buf_widx
= cur_handle
->widx
;
295 mutex_unlock(&llist_mutex
);
300 /* Invalidate the cache to prevent it from keeping the old location of h */
301 if (h
== cached_handle
)
302 cached_handle
= NULL
;
306 mutex_unlock(&llist_mutex
);
310 /* Return a pointer to the memory handle of given ID.
311 NULL if the handle wasn't found */
312 static struct memory_handle
*find_handle(int handle_id
)
317 mutex_lock(&llist_mutex
);
319 /* simple caching because most of the time the requested handle
320 will either be the same as the last, or the one after the last */
323 if (cached_handle
->id
== handle_id
) {
324 mutex_unlock(&llist_mutex
);
325 return cached_handle
;
326 } else if (cached_handle
->next
&&
327 (cached_handle
->next
->id
== handle_id
)) {
328 cached_handle
= cached_handle
->next
;
329 mutex_unlock(&llist_mutex
);
330 return cached_handle
;
334 struct memory_handle
*m
= first_handle
;
335 while (m
&& m
->id
!= handle_id
) {
338 /* This condition can only be reached with !m or m->id == handle_id */
343 mutex_unlock(&llist_mutex
);
347 /* Move a memory handle and data_size of its data of delta.
348 Return a pointer to the new location of the handle.
349 delta is the value of which to move the struct data.
350 data_size is the amount of data to move along with the struct. */
351 static struct memory_handle
*move_handle(struct memory_handle
*h
,
352 size_t *delta
, size_t data_size
)
354 mutex_lock(&llist_mutex
);
357 /* aligning backwards would yield a negative result,
358 and moving the handle of such a small amount is a waste
360 mutex_unlock(&llist_mutex
);
363 /* make sure delta is 32-bit aligned so that the handle struct is. */
364 *delta
= (*delta
- 3) & ~3;
366 size_t newpos
= RINGBUF_ADD((void *)h
- (void *)buffer
, *delta
);
368 struct memory_handle
*dest
= (struct memory_handle
*)(&buffer
[newpos
]);
370 /* Invalidate the cache to prevent it from keeping the old location of h */
371 if (h
== cached_handle
)
372 cached_handle
= NULL
;
374 /* the cur_handle pointer might need updating */
375 if (h
== cur_handle
) {
379 if (h
== first_handle
) {
383 struct memory_handle
*m
= first_handle
;
384 while (m
&& m
->next
!= h
) {
387 if (h
&& m
&& m
->next
== h
) {
390 mutex_unlock(&llist_mutex
);
395 memmove(dest
, h
, sizeof(struct memory_handle
) + data_size
);
397 mutex_unlock(&llist_mutex
);
403 BUFFER SPACE MANAGEMENT
404 =======================
406 yield_codec : Used by buffer_handle to know if it should interrupt buffering
407 buffer_handle : Buffer data for a handle
408 reset_handle : Reset writing position and data buffer of a handle to its
410 rebuffer_handle : Seek to a nonbuffered part of a handle by rebuffering the data
411 shrink_handle : Free buffer space by moving a handle
412 fill_buffer : Call buffer_handle for all handles that have data to buffer
413 can_add_handle : Indicate whether it's safe to add a handle
415 These functions are used by the buffering thread to manage buffer space.
419 static inline bool filebuf_is_lowdata(void)
421 return BUF_USED
< BUFFERING_CRITICAL_LEVEL
;
424 /* Yield to the codec thread for as long as possible if it is in need of data.
425 Return true if the caller should break to let the buffering thread process
427 static bool yield_codec(void)
431 if (!queue_empty(&buffering_queue
))
434 while (pcmbuf_is_lowdata() && !filebuf_is_lowdata())
438 if (!queue_empty(&buffering_queue
))
445 /* Buffer data for the given handle. Return the amount of data buffered
446 or -1 if the handle wasn't found */
447 static ssize_t
buffer_handle(int handle_id
)
449 logf("buffer_handle(%d)", handle_id
);
450 struct memory_handle
*h
= find_handle(handle_id
);
454 if (h
->filerem
== 0) {
455 /* nothing left to buffer */
459 if (h
->fd
< 0) /* file closed, reopen */
462 h
->fd
= open(h
->path
, O_RDONLY
);
470 lseek(h
->fd
, h
->offset
, SEEK_SET
);
476 while (h
->filerem
> 0)
478 /* max amount to copy */
479 size_t copy_n
= MIN( MIN(h
->filerem
, conf_filechunk
),
480 buffer_len
- h
->widx
);
482 /* stop copying if it would overwrite the reading position
483 or the next handle */
484 if (RINGBUF_ADD_CROSS(h
->widx
, copy_n
, buf_ridx
) >= 0 || (h
->next
&&
485 RINGBUF_ADD_CROSS(h
->widx
, copy_n
, (unsigned)
486 ((void *)h
->next
- (void *)buffer
)) > 0))
489 /* rc is the actual amount read */
490 int rc
= read(h
->fd
, &buffer
[h
->widx
], copy_n
);
494 if (h
->type
== TYPE_CODEC
) {
495 logf("Partial codec");
499 DEBUGF("File ended %ld bytes early\n", (long)h
->filerem
);
500 h
->filesize
-= h
->filerem
;
506 h
->widx
= RINGBUF_ADD(h
->widx
, rc
);
513 /* Stop buffering if new queue events have arrived */
518 if (h
->filerem
== 0) {
519 /* finished buffering the file */
527 /* Reset writing position and data buffer of a handle to its current offset.
528 Use this after having set the new offset to use. */
529 static void reset_handle(int handle_id
)
531 logf("reset_handle(%d)", handle_id
);
533 struct memory_handle
*h
= find_handle(handle_id
);
541 h
->filerem
= h
->filesize
- h
->offset
;
544 lseek(h
->fd
, h
->offset
, SEEK_SET
);
548 /* Seek to a nonbuffered part of a handle by rebuffering the data. */
549 static void rebuffer_handle(int handle_id
, size_t newpos
)
551 struct memory_handle
*h
= find_handle(handle_id
);
557 LOGFQUEUE("? >| buffering Q_RESET_HANDLE");
558 queue_send(&buffering_queue
, Q_RESET_HANDLE
, handle_id
);
560 LOGFQUEUE("? >| buffering Q_BUFFER_HANDLE");
561 queue_send(&buffering_queue
, Q_BUFFER_HANDLE
, handle_id
);
566 static bool close_handle(int handle_id
)
568 struct memory_handle
*h
= find_handle(handle_id
);
581 /* Free buffer space by moving the handle struct right before the useful
582 part of its data buffer or by moving all the data. */
583 static void shrink_handle(int handle_id
)
585 struct memory_handle
*h
= find_handle(handle_id
);
590 /* The value of delta might change for alignment reasons */
592 if (h
->next
&& (h
->type
== TYPE_ID3
|| h
->type
== TYPE_CUESHEET
||
593 h
->type
== TYPE_IMAGE
) && h
->filerem
== 0 )
595 /* metadata handle: we can move all of it */
596 delta
= RINGBUF_SUB( (unsigned)((void *)h
->next
- (void *)buffer
),
597 h
->data
) - h
->available
;
598 h
= move_handle(h
, &delta
, h
->available
);
601 size_t olddata
= h
->data
;
602 h
->data
= RINGBUF_ADD(h
->data
, delta
);
603 h
->ridx
= RINGBUF_ADD(h
->ridx
, delta
);
604 h
->widx
= RINGBUF_ADD(h
->widx
, delta
);
606 /* when moving a struct mp3entry we need to readjust its pointers. */
607 if (h
->type
== TYPE_ID3
&& h
->filesize
== sizeof(struct mp3entry
)) {
608 adjust_mp3entry((struct mp3entry
*)&buffer
[h
->data
],
609 (void *)&buffer
[h
->data
],
610 (void *)&buffer
[olddata
]);
615 /* only move the handle struct */
616 delta
= RINGBUF_SUB(h
->ridx
, h
->data
);
617 h
= move_handle(h
, &delta
, 0);
619 h
->data
= RINGBUF_ADD(h
->data
, delta
);
620 h
->available
-= delta
;
625 /* Fill the buffer by buffering as much data as possible for handles that still
626 have data left to buffer */
627 static void fill_buffer(void)
629 logf("fill_buffer()");
630 struct memory_handle
*m
= first_handle
;
631 while (queue_empty(&buffering_queue
) && m
) {
632 if (m
->filerem
> 0) {
633 buffer_handle(m
->id
);
639 if (queue_empty(&buffering_queue
)) {
640 /* only spin the disk down if the filling wasn't interrupted by an
641 event arriving in the queue. */
647 /* Check whether it's safe to add a new handle and reserve space to let the
648 current one finish buffering its data. Used by bufopen and bufalloc as
649 a preliminary check before even trying to physically add the handle.
650 Returns true if it's ok to add a new handle, false if not.
652 static bool can_add_handle(void)
654 if (cur_handle
&& cur_handle
->filerem
> 0) {
655 /* the current handle hasn't finished buffering. We can only add
656 a new one if there is already enough free space to finish
658 if (cur_handle
->filerem
< (buffer_len
- BUF_USED
)) {
659 /* Before adding the new handle we reserve some space for the
660 current one to finish buffering its data. */
661 buf_widx
= RINGBUF_ADD(buf_widx
, cur_handle
->filerem
);
670 void update_data_counters(void)
672 struct memory_handle
*m
= find_handle(base_handle_id
);
676 memset(&data_counters
, 0, sizeof(data_counters
));
680 data_counters
.buffered
+= m
->available
;
681 data_counters
.wasted
+= RINGBUF_SUB(m
->ridx
, m
->data
);
682 data_counters
.remaining
+= m
->filerem
;
684 if (m
->id
>= base_handle_id
)
685 data_counters
.useful
+= RINGBUF_SUB(m
->widx
, m
->ridx
);
693 MAIN BUFFERING API CALLS
694 ========================
696 bufopen : Request the opening of a new handle for a file
697 bufalloc : Open a new handle for data other than a file.
698 bufclose : Close an open handle
699 bufseek : Set the read pointer in a handle
700 bufadvance : Move the read pointer in a handle
701 bufread : Copy data from a handle into a given buffer
702 bufgetdata : Give a pointer to the handle's data
704 These functions are exported, to allow interaction with the buffer.
705 They take care of the content of the structs, and rely on the linked list
706 management functions for all the actual handle management work.
710 /* Reserve space in the buffer for a file.
711 filename: name of the file to open
712 offset: offset at which to start buffering the file, useful when the first
713 (offset-1) bytes of the file aren't needed.
714 return value: <0 if the file cannot be opened, or one file already
715 queued to be opened, otherwise the handle for the file in the buffer
717 int bufopen(const char *file
, size_t offset
, enum data_type type
)
719 if (!can_add_handle())
722 int fd
= open(file
, O_RDONLY
);
726 size_t size
= filesize(fd
) - offset
;
728 if (type
!= TYPE_AUDIO
&&
729 size
+ sizeof(struct memory_handle
) > buffer_len
- buf_widx
)
731 /* for types other than audio, the data can't wrap, so we force it */
735 struct memory_handle
*h
= add_handle(&size
);
738 DEBUGF("bufopen: failed to add handle\n");
743 strncpy(h
->path
, file
, MAX_PATH
);
745 h
->filesize
= filesize(fd
);
746 h
->filerem
= h
->filesize
- offset
;
756 if (type
== TYPE_CODEC
|| type
== TYPE_CUESHEET
|| type
== TYPE_IMAGE
) {
757 /* Immediately buffer those */
758 LOGFQUEUE("? >| buffering Q_BUFFER_HANDLE");
759 queue_send(&buffering_queue
, Q_BUFFER_HANDLE
, h
->id
);
762 logf("bufopen: new hdl %d", h
->id
);
766 /* Open a new handle from data that needs to be copied from memory.
767 src is the source buffer from which to copy data. It can be NULL to simply
768 reserve buffer space.
769 size is the requested size. The call will only be successful if the
770 requested amount of data can entirely fit in the buffer without wrapping.
771 Return value is the handle id for success or <0 for failure.
773 int bufalloc(const void *src
, size_t size
, enum data_type type
)
775 if (!can_add_handle())
778 if (buf_widx
+ size
+ sizeof(struct memory_handle
) > buffer_len
) {
779 /* The data would need to wrap. */
780 DEBUGF("bufalloc: data wrap\n");
784 size_t allocsize
= size
;
785 struct memory_handle
*h
= add_handle(&allocsize
);
787 if (!h
|| allocsize
!= size
)
791 if (type
== TYPE_ID3
&& size
== sizeof(struct mp3entry
)) {
792 /* specially take care of struct mp3entry */
793 copy_mp3entry((struct mp3entry
*)&buffer
[buf_widx
],
794 (struct mp3entry
*)src
);
796 memcpy(&buffer
[buf_widx
], src
, size
);
806 h
->widx
= buf_widx
+ size
; /* this is safe because the data doesn't wrap */
811 buf_widx
+= size
; /* safe too */
813 logf("bufalloc: new hdl %d", h
->id
);
817 /* Close the handle. Return true for success and false for failure */
818 bool bufclose(int handle_id
)
820 logf("bufclose(%d)", handle_id
);
822 LOGFQUEUE("buffering >| Q_CLOSE_HANDLE");
823 return queue_send(&buffering_queue
, Q_CLOSE_HANDLE
, handle_id
);
826 /* Set reading index in handle (relatively to the start of the file).
827 Access before the available data will trigger a rebuffer.
828 Return 0 for success and < 0 for failure:
829 -1 if the handle wasn't found
830 -2 if the new requested position was beyond the end of the file
832 int bufseek(int handle_id
, size_t newpos
)
834 struct memory_handle
*h
= find_handle(handle_id
);
838 if (newpos
> h
->filesize
) {
839 /* access beyond the end of the file */
842 else if (newpos
< h
->offset
|| h
->offset
+ h
->available
< newpos
) {
843 /* access before or after buffered data. A rebuffer is needed. */
844 rebuffer_handle(handle_id
, newpos
);
847 h
->ridx
= RINGBUF_ADD(h
->data
, newpos
- h
->offset
);
852 /* Advance the reading index in a handle (relatively to its current position).
853 Return 0 for success and < 0 for failure */
854 int bufadvance(int handle_id
, off_t offset
)
856 struct memory_handle
*h
= find_handle(handle_id
);
860 size_t newpos
= h
->offset
+ RINGBUF_SUB(h
->ridx
, h
->data
) + offset
;
861 return bufseek(handle_id
, newpos
);
864 /* Copy data from the given handle to the dest buffer.
865 Return the number of bytes copied or < 0 for failure. */
866 ssize_t
bufread(int handle_id
, size_t size
, void *dest
)
868 struct memory_handle
*h
= find_handle(handle_id
);
873 size_t copy_n
= RINGBUF_SUB(h
->widx
, h
->ridx
);
875 if (size
== 0 && h
->filerem
> 0 && copy_n
== 0)
876 /* Data isn't ready */
879 if (copy_n
< size
&& h
->filerem
> 0)
880 /* Data isn't ready */
883 if (copy_n
== 0 && h
->filerem
== 0)
884 /* File is finished reading */
887 ret
= MIN(size
, copy_n
);
889 if (h
->ridx
+ ret
> buffer_len
)
891 /* the data wraps around the end of the buffer */
892 size_t read
= buffer_len
- h
->ridx
;
893 memcpy(dest
, &buffer
[h
->ridx
], read
);
894 memcpy(dest
+read
, buffer
, ret
- read
);
898 memcpy(dest
, &buffer
[h
->ridx
], ret
);
904 /* Update the "data" pointer to make the handle's data available to the caller.
905 Return the length of the available linear data or < 0 for failure.
906 size is the amount of linear data requested. it can be 0 to get as
908 The guard buffer may be used to provide the requested size */
909 ssize_t
bufgetdata(int handle_id
, size_t size
, void **data
)
911 struct memory_handle
*h
= find_handle(handle_id
);
916 size_t copy_n
= RINGBUF_SUB(h
->widx
, h
->ridx
);
918 if (size
== 0 && h
->filerem
> 0 && copy_n
== 0)
919 /* Data isn't ready */
922 if (copy_n
< size
&& h
->filerem
> 0)
923 /* Data isn't ready */
926 if (copy_n
== 0 && h
->filerem
== 0)
927 /* File is finished reading */
930 if (h
->ridx
+ size
> buffer_len
&& copy_n
>= size
)
932 /* the data wraps around the end of the buffer :
933 use the guard buffer to provide the requested amount of data. */
934 size_t copy_n
= MIN(h
->ridx
+ size
- buffer_len
, GUARD_BUFSIZE
);
935 memcpy(guard_buffer
, (unsigned char *)buffer
, copy_n
);
936 ret
= buffer_len
- h
->ridx
+ copy_n
;
940 ret
= MIN(copy_n
, buffer_len
- h
->ridx
);
943 *data
= &buffer
[h
->ridx
];
948 SECONDARY EXPORTED FUNCTIONS
949 ============================
953 buf_request_buffer_handle
956 register_buffer_low_callback
957 unregister_buffer_low_callback
959 These functions are exported, to allow interaction with the buffer.
960 They take care of the content of the structs, and rely on the linked list
961 management functions for all the actual handle management work.
964 /* Get a handle offset from a pointer */
965 ssize_t
buf_get_offset(int handle_id
, void *ptr
)
967 struct memory_handle
*h
= find_handle(handle_id
);
971 return (size_t)ptr
- (size_t)&buffer
[h
->ridx
];
974 ssize_t
buf_handle_offset(int handle_id
)
976 struct memory_handle
*h
= find_handle(handle_id
);
982 void buf_request_buffer_handle(int handle_id
)
984 LOGFQUEUE("buffering >| buffering Q_BUFFER_HANDLE");
985 queue_send(&buffering_queue
, Q_BUFFER_HANDLE
, handle_id
);
988 void buf_set_base_handle(int handle_id
)
990 LOGFQUEUE("buffering >| buffering Q_BUFFER_HANDLE");
991 queue_post(&buffering_queue
, Q_BASE_HANDLE
, handle_id
);
994 /* Return the amount of buffer space used */
995 size_t buf_used(void)
1000 void buf_set_conf(int setting
, size_t value
)
1005 case BUFFERING_SET_WATERMARK
:
1006 msg
= Q_SET_WATERMARK
;
1009 case BUFFERING_SET_CHUNKSIZE
:
1010 msg
= Q_SET_CHUNKSIZE
;
1013 case BUFFERING_SET_PRESEEK
:
1014 msg
= Q_SET_PRESEEK
;
1020 queue_post(&buffering_queue
, msg
, value
);
1023 bool register_buffer_low_callback(buffer_low_callback func
)
1026 if (buffer_callback_count
>= MAX_BUF_CALLBACKS
)
1028 for (i
= 0; i
< MAX_BUF_CALLBACKS
; i
++)
1030 if (buffer_low_callback_funcs
[i
] == NULL
)
1032 buffer_low_callback_funcs
[i
] = func
;
1033 buffer_callback_count
++;
1036 else if (buffer_low_callback_funcs
[i
] == func
)
1042 void unregister_buffer_low_callback(buffer_low_callback func
)
1045 for (i
= 0; i
< MAX_BUF_CALLBACKS
; i
++)
1047 if (buffer_low_callback_funcs
[i
] == func
)
1049 buffer_low_callback_funcs
[i
] = NULL
;
1050 buffer_callback_count
--;
1056 static void call_buffer_low_callbacks(void)
1059 for (i
= 0; i
< MAX_BUF_CALLBACKS
; i
++)
1061 if (buffer_low_callback_funcs
[i
])
1063 buffer_low_callback_funcs
[i
]();
1064 buffer_low_callback_funcs
[i
] = NULL
;
1065 buffer_callback_count
--;
1070 void buffering_thread(void)
1072 struct queue_event ev
;
1076 queue_wait_w_tmo(&buffering_queue
, &ev
, HZ
/2);
1080 case Q_BUFFER_HANDLE
:
1081 LOGFQUEUE("buffering < Q_BUFFER_HANDLE");
1082 queue_reply(&buffering_queue
, 1);
1083 buffer_handle((int)ev
.data
);
1086 case Q_RESET_HANDLE
:
1087 LOGFQUEUE("buffering < Q_RESET_HANDLE");
1088 queue_reply(&buffering_queue
, 1);
1089 reset_handle((int)ev
.data
);
1092 case Q_CLOSE_HANDLE
:
1093 LOGFQUEUE("buffering < Q_CLOSE_HANDLE");
1094 queue_reply(&buffering_queue
, close_handle((int)ev
.data
));
1098 LOGFQUEUE("buffering < Q_BASE_HANDLE");
1099 base_handle_id
= (int)ev
.data
;
1102 case Q_SET_WATERMARK
:
1103 LOGFQUEUE("buffering < Q_SET_WATERMARK");
1104 conf_watermark
= (size_t)ev
.data
;
1107 case Q_SET_CHUNKSIZE
:
1108 LOGFQUEUE("buffering < Q_SET_CHUNKSIZE");
1109 conf_filechunk
= (size_t)ev
.data
;
1113 LOGFQUEUE("buffering < Q_SET_PRESEEK");
1114 conf_preseek
= (size_t)ev
.data
;
1118 case SYS_USB_CONNECTED
:
1119 LOGFQUEUE("buffering < SYS_USB_CONNECTED");
1120 usb_acknowledge(SYS_USB_CONNECTED_ACK
);
1121 usb_wait_for_disconnect(&buffering_queue
);
1126 LOGFQUEUE_SYS_TIMEOUT("buffering < SYS_TIMEOUT");
1130 update_data_counters();
1132 /* If the buffer is low, call the callbacks to get new data */
1133 if (num_handles
> 0 && data_counters
.useful
< conf_watermark
)
1135 call_buffer_low_callbacks();
1139 /* If the disk is spinning, take advantage by filling the buffer */
1140 if (ata_disk_is_active() && queue_empty(&buffering_queue
) &&
1141 data_counters
.remaining
> 0 &&
1142 data_counters
.buffered
< high_watermark
)
1147 if (ata_disk_is_active() && queue_empty(&buffering_queue
) &&
1148 num_handles
> 0 && data_counters
.useful
< high_watermark
)
1150 call_buffer_low_callbacks();
1154 if (ev
.id
== SYS_TIMEOUT
&& queue_empty(&buffering_queue
))
1156 if (data_counters
.remaining
> 0 &&
1157 data_counters
.wasted
> data_counters
.buffered
/2)
1159 /* free buffer from outdated audio data */
1160 struct memory_handle
*m
= first_handle
;
1162 if (m
->type
== TYPE_AUDIO
)
1163 shrink_handle(m
->id
);
1167 /* free buffer by moving metadata */
1170 if (m
->type
!= TYPE_AUDIO
)
1171 shrink_handle(m
->id
);
1175 update_data_counters();
1178 if (data_counters
.remaining
> 0 &&
1179 data_counters
.buffered
< conf_watermark
)
1187 /* Initialise the buffering subsystem */
1188 bool buffering_init(char *buf
, size_t buflen
)
1190 if (!buf
|| !buflen
)
1194 buffer_len
= buflen
;
1195 guard_buffer
= buf
+ buflen
;
1200 first_handle
= NULL
;
1203 buffer_callback_count
= 0;
1204 memset(buffer_low_callback_funcs
, 0, sizeof(buffer_low_callback_funcs
));
1206 mutex_init(&llist_mutex
);
1208 conf_filechunk
= BUFFERING_DEFAULT_FILECHUNK
;
1209 conf_watermark
= BUFFERING_DEFAULT_WATERMARK
;
1211 /* Set the high watermark as 75% full...or 25% empty :) */
1213 high_watermark
= 3*buflen
/ 4;
1216 if (buffering_thread_p
== NULL
)
1218 buffering_thread_p
= create_thread( buffering_thread
, buffering_stack
,
1219 sizeof(buffering_stack
), 0,
1220 buffering_thread_name
IF_PRIO(, PRIORITY_BUFFERING
)
1223 queue_init(&buffering_queue
, true);
1224 queue_enable_queue_send(&buffering_queue
, &buffering_queue_sender_list
);
1230 void buffering_get_debugdata(struct buffering_debug
*dbgdata
)
1232 update_data_counters();
1233 dbgdata
->num_handles
= num_handles
;
1234 dbgdata
->data_rem
= data_counters
.remaining
;
1235 dbgdata
->wasted_space
= data_counters
.wasted
;
1236 dbgdata
->buffered_data
= data_counters
.buffered
;
1237 dbgdata
->useful_data
= data_counters
.useful
;