fix the charging icon so it will actually display correctly
[kugel-rb.git] / apps / buffering.c
blob22ec821a308125152cc7f2b1406fcc8161a09a2f
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2007 Nicolas Pennequin
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
22 #include "config.h"
23 #include <stdio.h>
24 #include <string.h>
25 #include <stdlib.h>
26 #include <ctype.h>
27 #include "buffering.h"
29 #include "storage.h"
30 #include "system.h"
31 #include "thread.h"
32 #include "file.h"
33 #include "panic.h"
34 #include "memory.h"
35 #include "lcd.h"
36 #include "font.h"
37 #include "button.h"
38 #include "kernel.h"
39 #include "tree.h"
40 #include "debug.h"
41 #include "sprintf.h"
42 #include "settings.h"
43 #include "codecs.h"
44 #include "audio.h"
45 #include "mp3_playback.h"
46 #include "usb.h"
47 #include "screens.h"
48 #include "playlist.h"
49 #include "pcmbuf.h"
50 #include "buffer.h"
51 #include "bmp.h"
52 #include "appevents.h"
53 #include "metadata.h"
54 #ifdef HAVE_ALBUMART
55 #include "albumart.h"
56 #include "jpeg_load.h"
57 #include "bmp.h"
58 #endif
60 #define GUARD_BUFSIZE (32*1024)
62 /* Define LOGF_ENABLE to enable logf output in this file */
63 /*#define LOGF_ENABLE*/
64 #include "logf.h"
66 /* macros to enable logf for queues
67 logging on SYS_TIMEOUT can be disabled */
68 #ifdef SIMULATOR
69 /* Define this for logf output of all queuing except SYS_TIMEOUT */
70 #define BUFFERING_LOGQUEUES
71 /* Define this to logf SYS_TIMEOUT messages */
72 /* #define BUFFERING_LOGQUEUES_SYS_TIMEOUT */
73 #endif
75 #ifdef BUFFERING_LOGQUEUES
76 #define LOGFQUEUE logf
77 #else
78 #define LOGFQUEUE(...)
79 #endif
81 #ifdef BUFFERING_LOGQUEUES_SYS_TIMEOUT
82 #define LOGFQUEUE_SYS_TIMEOUT logf
83 #else
84 #define LOGFQUEUE_SYS_TIMEOUT(...)
85 #endif
87 /* default point to start buffer refill */
88 #define BUFFERING_DEFAULT_WATERMARK (1024*128)
89 /* amount of data to read in one read() call */
90 #define BUFFERING_DEFAULT_FILECHUNK (1024*32)
92 #define BUF_HANDLE_MASK 0x7FFFFFFF
95 /* Ring buffer helper macros */
96 /* Buffer pointer (p) plus value (v), wrapped if necessary */
97 #define RINGBUF_ADD(p,v) (((p)+(v))<buffer_len ? (p)+(v) : (p)+(v)-buffer_len)
98 /* Buffer pointer (p) minus value (v), wrapped if necessary */
99 #define RINGBUF_SUB(p,v) ((p>=v) ? (p)-(v) : (p)+buffer_len-(v))
100 /* How far value (v) plus buffer pointer (p1) will cross buffer pointer (p2) */
101 #define RINGBUF_ADD_CROSS(p1,v,p2) \
102 ((p1<p2) ? (int)((p1)+(v))-(int)(p2) : (int)((p1)+(v)-(p2))-(int)buffer_len)
103 /* Bytes available in the buffer */
104 #define BUF_USED RINGBUF_SUB(buf_widx, buf_ridx)
106 /* assert(sizeof(struct memory_handle)%4==0) */
107 struct memory_handle {
108 int id; /* A unique ID for the handle */
109 enum data_type type; /* Type of data buffered with this handle */
110 char path[MAX_PATH]; /* Path if data originated in a file */
111 int fd; /* File descriptor to path (-1 if closed) */
112 size_t data; /* Start index of the handle's data buffer */
113 volatile size_t ridx; /* Read pointer, relative to the main buffer */
114 size_t widx; /* Write pointer */
115 size_t filesize; /* File total length */
116 size_t filerem; /* Remaining bytes of file NOT in buffer */
117 volatile size_t available; /* Available bytes to read from buffer */
118 size_t offset; /* Offset at which we started reading the file */
119 struct memory_handle *next;
121 /* invariant: filesize == offset + available + filerem */
123 static char *buffer;
124 static char *guard_buffer;
126 static size_t buffer_len;
128 static volatile size_t buf_widx; /* current writing position */
129 static volatile size_t buf_ridx; /* current reading position */
130 /* buf_*idx are values relative to the buffer, not real pointers. */
132 /* Configuration */
133 static size_t conf_watermark = 0; /* Level to trigger filebuf fill */
134 #if MEM > 8
135 static size_t high_watermark = 0; /* High watermark for rebuffer */
136 #endif
138 /* current memory handle in the linked list. NULL when the list is empty. */
139 static struct memory_handle *cur_handle;
140 /* first memory handle in the linked list. NULL when the list is empty. */
141 static struct memory_handle *first_handle;
143 static int num_handles; /* number of handles in the list */
145 static int base_handle_id;
147 static struct mutex llist_mutex;
148 static struct mutex llist_mod_mutex;
150 /* Handle cache (makes find_handle faster).
151 This is global so that move_handle and rm_handle can invalidate it. */
152 static struct memory_handle *cached_handle = NULL;
154 static struct {
155 size_t remaining; /* Amount of data needing to be buffered */
156 size_t wasted; /* Amount of space available for freeing */
157 size_t buffered; /* Amount of data currently in the buffer */
158 size_t useful; /* Amount of data still useful to the user */
159 } data_counters;
162 /* Messages available to communicate with the buffering thread */
163 enum {
164 Q_BUFFER_HANDLE = 1, /* Request buffering of a handle, this should not be
165 used in a low buffer situation. */
166 Q_RESET_HANDLE, /* (internal) Request resetting of a handle to its
167 offset (the offset has to be set beforehand) */
168 Q_CLOSE_HANDLE, /* Request closing a handle */
169 Q_BASE_HANDLE, /* Set the reference handle for buf_useful_data */
171 /* Configuration: */
172 Q_START_FILL, /* Request that the buffering thread initiate a buffer
173 fill at its earliest convenience */
174 Q_HANDLE_ADDED, /* Inform the buffering thread that a handle was added,
175 (which means the disk is spinning) */
178 /* Buffering thread */
179 static void buffering_thread(void);
180 static long buffering_stack[(DEFAULT_STACK_SIZE + 0x2000)/sizeof(long)];
181 static const char buffering_thread_name[] = "buffering";
182 static unsigned int buffering_thread_id = 0;
183 static struct event_queue buffering_queue;
184 static struct queue_sender_list buffering_queue_sender_list;
189 LINKED LIST MANAGEMENT
190 ======================
192 add_handle : Add a handle to the list
193 rm_handle : Remove a handle from the list
194 find_handle : Get a handle pointer from an ID
195 move_handle : Move a handle in the buffer (with or without its data)
197 These functions only handle the linked list structure. They don't touch the
198 contents of the struct memory_handle headers. They also change the buf_*idx
199 pointers when necessary and manage the handle IDs.
201 The first and current (== last) handle are kept track of.
202 A new handle is added at buf_widx and becomes the current one.
203 buf_widx always points to the current writing position for the current handle
204 buf_ridx always points to the location of the first handle.
205 buf_ridx == buf_widx means the buffer is empty.
209 /* Add a new handle to the linked list and return it. It will have become the
210 new current handle.
211 data_size must contain the size of what will be in the handle.
212 can_wrap tells us whether this type of data may wrap on buffer
213 alloc_all tells us if we must immediately be able to allocate data_size
214 returns a valid memory handle if all conditions for allocation are met.
215 NULL if there memory_handle itself cannot be allocated or if the
216 data_size cannot be allocated and alloc_all is set. This function's
217 only potential side effect is to allocate space for the cur_handle
218 if it returns NULL.
220 static struct memory_handle *add_handle(size_t data_size, bool can_wrap,
221 bool alloc_all)
223 /* gives each handle a unique id */
224 static int cur_handle_id = 0;
225 size_t shift;
226 size_t new_widx;
227 size_t len;
228 int overlap;
230 if (num_handles >= BUF_MAX_HANDLES)
231 return NULL;
233 mutex_lock(&llist_mutex);
234 mutex_lock(&llist_mod_mutex);
236 if (cur_handle && cur_handle->filerem > 0) {
237 /* the current handle hasn't finished buffering. We can only add
238 a new one if there is already enough free space to finish
239 the buffering. */
240 size_t req = cur_handle->filerem + sizeof(struct memory_handle);
241 if (RINGBUF_ADD_CROSS(cur_handle->widx, req, buf_ridx) >= 0) {
242 /* Not enough space */
243 mutex_unlock(&llist_mod_mutex);
244 mutex_unlock(&llist_mutex);
245 return NULL;
246 } else {
247 /* Allocate the remainder of the space for the current handle */
248 buf_widx = RINGBUF_ADD(cur_handle->widx, cur_handle->filerem);
252 /* align to 4 bytes up */
253 new_widx = RINGBUF_ADD(buf_widx, 3) & ~3;
255 len = data_size + sizeof(struct memory_handle);
257 /* First, will the handle wrap? */
258 /* If the handle would wrap, move to the beginning of the buffer,
259 * or if the data must not but would wrap, move it to the beginning */
260 if( (new_widx + sizeof(struct memory_handle) > buffer_len) ||
261 (!can_wrap && (new_widx + len > buffer_len)) ) {
262 new_widx = 0;
265 /* How far we shifted buf_widx to align things, must be < buffer_len */
266 shift = RINGBUF_SUB(new_widx, buf_widx);
268 /* How much space are we short in the actual ring buffer? */
269 overlap = RINGBUF_ADD_CROSS(buf_widx, shift + len, buf_ridx);
270 if (overlap >= 0 && (alloc_all || (unsigned)overlap > data_size)) {
271 /* Not enough space for required allocations */
272 mutex_unlock(&llist_mod_mutex);
273 mutex_unlock(&llist_mutex);
274 return NULL;
277 /* There is enough space for the required data, advance the buf_widx and
278 * initialize the struct */
279 buf_widx = new_widx;
281 struct memory_handle *new_handle =
282 (struct memory_handle *)(&buffer[buf_widx]);
284 /* only advance the buffer write index of the size of the struct */
285 buf_widx = RINGBUF_ADD(buf_widx, sizeof(struct memory_handle));
287 new_handle->id = cur_handle_id;
288 /* Wrap signed int is safe and 0 doesn't happen */
289 cur_handle_id = (cur_handle_id + 1) & BUF_HANDLE_MASK;
290 new_handle->next = NULL;
291 num_handles++;
293 if (!first_handle)
294 /* the new handle is the first one */
295 first_handle = new_handle;
297 if (cur_handle)
298 cur_handle->next = new_handle;
300 cur_handle = new_handle;
302 mutex_unlock(&llist_mod_mutex);
303 mutex_unlock(&llist_mutex);
304 return new_handle;
307 /* Delete a given memory handle from the linked list
308 and return true for success. Nothing is actually erased from memory. */
309 static bool rm_handle(const struct memory_handle *h)
311 if (h == NULL)
312 return true;
314 mutex_lock(&llist_mutex);
315 mutex_lock(&llist_mod_mutex);
317 if (h == first_handle) {
318 first_handle = h->next;
319 if (h == cur_handle) {
320 /* h was the first and last handle: the buffer is now empty */
321 cur_handle = NULL;
322 buf_ridx = buf_widx = 0;
323 } else {
324 /* update buf_ridx to point to the new first handle */
325 buf_ridx = (void *)first_handle - (void *)buffer;
327 } else {
328 struct memory_handle *m = first_handle;
329 /* Find the previous handle */
330 while (m && m->next != h) {
331 m = m->next;
333 if (m && m->next == h) {
334 m->next = h->next;
335 if (h == cur_handle) {
336 cur_handle = m;
337 buf_widx = cur_handle->widx;
339 } else {
340 mutex_unlock(&llist_mod_mutex);
341 mutex_unlock(&llist_mutex);
342 return false;
346 /* Invalidate the cache to prevent it from keeping the old location of h */
347 if (h == cached_handle)
348 cached_handle = NULL;
350 num_handles--;
352 mutex_unlock(&llist_mod_mutex);
353 mutex_unlock(&llist_mutex);
354 return true;
357 /* Return a pointer to the memory handle of given ID.
358 NULL if the handle wasn't found */
359 static struct memory_handle *find_handle(int handle_id)
361 if (handle_id < 0)
362 return NULL;
364 mutex_lock(&llist_mutex);
366 /* simple caching because most of the time the requested handle
367 will either be the same as the last, or the one after the last */
368 if (cached_handle)
370 if (cached_handle->id == handle_id) {
371 mutex_unlock(&llist_mutex);
372 return cached_handle;
373 } else if (cached_handle->next &&
374 (cached_handle->next->id == handle_id)) {
375 cached_handle = cached_handle->next;
376 mutex_unlock(&llist_mutex);
377 return cached_handle;
381 struct memory_handle *m = first_handle;
382 while (m && m->id != handle_id) {
383 m = m->next;
385 /* This condition can only be reached with !m or m->id == handle_id */
386 if (m)
387 cached_handle = m;
389 mutex_unlock(&llist_mutex);
390 return m;
393 /* Move a memory handle and data_size of its data delta bytes along the buffer.
394 delta maximum bytes available to move the handle. If the move is performed
395 it is set to the actual distance moved.
396 data_size is the amount of data to move along with the struct.
397 returns a valid memory_handle if the move is successful
398 NULL if the handle is NULL, the move would be less than the size of
399 a memory_handle after correcting for wraps or if the handle is not
400 found in the linked list for adjustment. This function has no side
401 effects if NULL is returned. */
402 static bool move_handle(struct memory_handle **h, size_t *delta,
403 size_t data_size, bool can_wrap)
405 struct memory_handle *dest;
406 const struct memory_handle *src;
407 size_t newpos;
408 size_t size_to_move;
409 size_t final_delta = *delta;
410 int overlap;
412 if (h == NULL || (src = *h) == NULL)
413 return false;
415 size_to_move = sizeof(struct memory_handle) + data_size;
417 /* Align to four bytes, down */
418 final_delta &= ~3;
419 if (final_delta < sizeof(struct memory_handle)) {
420 /* It's not legal to move less than the size of the struct */
421 return false;
424 mutex_lock(&llist_mutex);
425 mutex_lock(&llist_mod_mutex);
427 newpos = RINGBUF_ADD((void *)src - (void *)buffer, final_delta);
428 overlap = RINGBUF_ADD_CROSS(newpos, size_to_move, buffer_len - 1);
430 if (overlap > 0) {
431 /* Some part of the struct + data would wrap, maybe ok */
432 size_t correction = 0;
433 /* If the overlap lands inside the memory_handle */
434 if ((unsigned)overlap > data_size) {
435 /* Correct the position and real delta to prevent the struct from
436 * wrapping, this guarantees an aligned delta, I think */
437 correction = overlap - data_size;
438 } else if (!can_wrap) {
439 /* Otherwise the overlap falls in the data area and must all be
440 * backed out. This may become conditional if ever we move
441 * data that is allowed to wrap (ie audio) */
442 correction = overlap;
443 /* Align correction to four bytes, up */
444 correction = (correction+3) & ~3;
446 if (correction) {
447 if (final_delta < correction + sizeof(struct memory_handle)) {
448 /* Delta cannot end up less than the size of the struct */
449 mutex_unlock(&llist_mod_mutex);
450 mutex_unlock(&llist_mutex);
451 return false;
454 newpos -= correction;
455 overlap -= correction;/* Used below to know how to split the data */
456 final_delta -= correction;
460 dest = (struct memory_handle *)(&buffer[newpos]);
462 if (src == first_handle) {
463 first_handle = dest;
464 buf_ridx = newpos;
465 } else {
466 struct memory_handle *m = first_handle;
467 while (m && m->next != src) {
468 m = m->next;
470 if (m && m->next == src) {
471 m->next = dest;
472 } else {
473 mutex_unlock(&llist_mod_mutex);
474 mutex_unlock(&llist_mutex);
475 return false;
480 /* Update the cache to prevent it from keeping the old location of h */
481 if (src == cached_handle)
482 cached_handle = dest;
484 /* the cur_handle pointer might need updating */
485 if (src == cur_handle)
486 cur_handle = dest;
488 if (overlap > 0) {
489 /* FIXME : this code is broken and can leave the data corrupted when
490 * the amount of data to move is close to the whole buffer size.
492 * Example : ('S' is the source data, '-' is empty buffer)
493 * Size of the buffer is 8 bytes, starts at 0.
494 * Size of the data to move is 7 bytes.
496 * -SSSSSSS
497 * ^-------- start of source data == 1
499 * DD-DDDDD ('D' is desired destination data)
500 * ^------ start of destination data == 3
502 * memmove(3, 1, 5);
503 * memmove(0, 7, 2);
505 * First memmove() call will leave the buffer in this state:
507 * -SSDDDDD
508 * ^^
509 * \--- data to be moved by the second memmove() call, but
510 * overwritten by the first call.
512 * See FS#10605 for more details
514 size_t first_part = size_to_move - overlap;
515 memmove(dest, src, first_part);
516 memmove(buffer, (const char *)src + first_part, overlap);
517 } else {
518 memmove(dest, src, size_to_move);
521 /* Update the caller with the new location of h and the distance moved */
522 *h = dest;
523 *delta = final_delta;
524 mutex_unlock(&llist_mod_mutex);
525 mutex_unlock(&llist_mutex);
526 return dest;
531 BUFFER SPACE MANAGEMENT
532 =======================
534 update_data_counters: Updates the values in data_counters
535 buffer_is_low : Returns true if the amount of useful data in the buffer is low
536 buffer_handle : Buffer data for a handle
537 reset_handle : Reset write position and data buffer of a handle to its offset
538 rebuffer_handle : Seek to a nonbuffered part of a handle by rebuffering the data
539 shrink_handle : Free buffer space by moving a handle
540 fill_buffer : Call buffer_handle for all handles that have data to buffer
542 These functions are used by the buffering thread to manage buffer space.
545 static void update_data_counters(void)
547 struct memory_handle *m = find_handle(base_handle_id);
548 bool is_useful = m==NULL;
550 size_t buffered = 0;
551 size_t wasted = 0;
552 size_t remaining = 0;
553 size_t useful = 0;
555 mutex_lock(&llist_mutex);
557 m = first_handle;
558 while (m) {
559 buffered += m->available;
560 wasted += RINGBUF_SUB(m->ridx, m->data);
561 remaining += m->filerem;
563 if (m->id == base_handle_id)
564 is_useful = true;
566 if (is_useful)
567 useful += RINGBUF_SUB(m->widx, m->ridx);
569 m = m->next;
572 mutex_unlock(&llist_mutex);
574 data_counters.buffered = buffered;
575 data_counters.wasted = wasted;
576 data_counters.remaining = remaining;
577 data_counters.useful = useful;
580 static inline bool buffer_is_low(void)
582 update_data_counters();
583 return data_counters.useful < (conf_watermark / 2);
586 /* Buffer data for the given handle.
587 Return whether or not the buffering should continue explicitly. */
588 static bool buffer_handle(int handle_id)
590 logf("buffer_handle(%d)", handle_id);
591 struct memory_handle *h = find_handle(handle_id);
592 if (!h)
593 return true;
595 if (h->filerem == 0) {
596 /* nothing left to buffer */
597 return true;
600 if (h->fd < 0) /* file closed, reopen */
602 if (*h->path)
603 h->fd = open(h->path, O_RDONLY);
605 if (h->fd < 0)
607 /* could not open the file, truncate it where it is */
608 h->filesize -= h->filerem;
609 h->filerem = 0;
610 return true;
613 if (h->offset)
614 lseek(h->fd, h->offset, SEEK_SET);
617 trigger_cpu_boost();
619 if (h->type == TYPE_ID3)
621 if (!get_metadata((struct mp3entry *)(buffer + h->data), h->fd, h->path))
623 /* metadata parsing failed: clear the buffer. */
624 memset(buffer + h->data, 0, sizeof(struct mp3entry));
626 close(h->fd);
627 h->fd = -1;
628 h->filerem = 0;
629 h->available = sizeof(struct mp3entry);
630 h->widx += sizeof(struct mp3entry);
631 send_event(BUFFER_EVENT_FINISHED, &h->id);
632 return true;
635 while (h->filerem > 0)
637 /* max amount to copy */
638 size_t copy_n = MIN( MIN(h->filerem, BUFFERING_DEFAULT_FILECHUNK),
639 buffer_len - h->widx);
641 /* stop copying if it would overwrite the reading position */
642 if (RINGBUF_ADD_CROSS(h->widx, copy_n, buf_ridx) >= 0)
643 return false;
645 /* This would read into the next handle, this is broken */
646 if (h->next && RINGBUF_ADD_CROSS(h->widx, copy_n,
647 (unsigned)((void *)h->next - (void *)buffer)) > 0) {
648 /* Try to recover by truncating this file */
649 copy_n = RINGBUF_ADD_CROSS(h->widx, copy_n,
650 (unsigned)((void *)h->next - (void *)buffer));
651 h->filerem -= copy_n;
652 h->filesize -= copy_n;
653 logf("buf alloc short %ld", (long)copy_n);
654 if (h->filerem)
655 continue;
656 else
657 break;
660 /* rc is the actual amount read */
661 int rc = read(h->fd, &buffer[h->widx], copy_n);
663 if (rc < 0)
665 /* Some kind of filesystem error, maybe recoverable if not codec */
666 if (h->type == TYPE_CODEC) {
667 logf("Partial codec");
668 break;
671 DEBUGF("File ended %ld bytes early\n", (long)h->filerem);
672 h->filesize -= h->filerem;
673 h->filerem = 0;
674 break;
677 /* Advance buffer */
678 h->widx = RINGBUF_ADD(h->widx, rc);
679 if (h == cur_handle)
680 buf_widx = h->widx;
681 h->available += rc;
682 h->filerem -= rc;
684 /* If this is a large file, see if we need to break or give the codec
685 * more time */
686 if (h->type == TYPE_PACKET_AUDIO &&
687 pcmbuf_is_lowdata() && !buffer_is_low())
689 sleep(1);
691 else
693 yield();
696 if (!queue_empty(&buffering_queue))
697 break;
700 if (h->filerem == 0) {
701 /* finished buffering the file */
702 close(h->fd);
703 h->fd = -1;
704 send_event(BUFFER_EVENT_FINISHED, &h->id);
707 return true;
710 /* Reset writing position and data buffer of a handle to its current offset.
711 Use this after having set the new offset to use. */
712 static void reset_handle(int handle_id)
714 logf("reset_handle(%d)", handle_id);
716 struct memory_handle *h = find_handle(handle_id);
717 if (!h)
718 return;
720 h->ridx = h->widx = h->data;
721 if (h == cur_handle)
722 buf_widx = h->widx;
723 h->available = 0;
724 h->filerem = h->filesize - h->offset;
726 if (h->fd >= 0) {
727 lseek(h->fd, h->offset, SEEK_SET);
731 /* Seek to a nonbuffered part of a handle by rebuffering the data. */
732 static void rebuffer_handle(int handle_id, size_t newpos)
734 struct memory_handle *h = find_handle(handle_id);
735 if (!h)
736 return;
738 /* When seeking foward off of the buffer, if it is a short seek don't
739 rebuffer the whole track, just read enough to satisfy */
740 if (newpos > h->offset && newpos - h->offset < BUFFERING_DEFAULT_FILECHUNK)
742 LOGFQUEUE("buffering >| Q_BUFFER_HANDLE %d", handle_id);
743 queue_send(&buffering_queue, Q_BUFFER_HANDLE, handle_id);
744 h->ridx = h->data + newpos;
745 return;
748 h->offset = newpos;
750 /* Reset the handle to its new offset */
751 LOGFQUEUE("buffering >| Q_RESET_HANDLE %d", handle_id);
752 queue_send(&buffering_queue, Q_RESET_HANDLE, handle_id);
754 size_t next = (unsigned)((void *)h->next - (void *)buffer);
755 if (RINGBUF_SUB(next, h->data) < h->filesize - newpos)
757 /* There isn't enough space to rebuffer all of the track from its new
758 offset, so we ask the user to free some */
759 DEBUGF("rebuffer_handle: space is needed\n");
760 send_event(BUFFER_EVENT_REBUFFER, &handle_id);
763 /* Now we ask for a rebuffer */
764 LOGFQUEUE("buffering >| Q_BUFFER_HANDLE %d", handle_id);
765 queue_send(&buffering_queue, Q_BUFFER_HANDLE, handle_id);
768 static bool close_handle(int handle_id)
770 struct memory_handle *h = find_handle(handle_id);
772 /* If the handle is not found, it is closed */
773 if (!h)
774 return true;
776 if (h->fd >= 0) {
777 close(h->fd);
778 h->fd = -1;
781 /* rm_handle returns true unless the handle somehow persists after exit */
782 return rm_handle(h);
785 /* Free buffer space by moving the handle struct right before the useful
786 part of its data buffer or by moving all the data. */
787 static void shrink_handle(struct memory_handle *h)
789 size_t delta;
791 if (!h)
792 return;
794 if (h->next && h->filerem == 0 &&
795 (h->type == TYPE_ID3 || h->type == TYPE_CUESHEET ||
796 h->type == TYPE_BITMAP || h->type == TYPE_CODEC ||
797 h->type == TYPE_ATOMIC_AUDIO))
799 /* metadata handle: we can move all of it */
800 size_t handle_distance =
801 RINGBUF_SUB((unsigned)((void *)h->next - (void*)buffer), h->data);
802 delta = handle_distance - h->available;
804 /* The value of delta might change for alignment reasons */
805 if (!move_handle(&h, &delta, h->available, h->type==TYPE_CODEC))
806 return;
808 size_t olddata = h->data;
809 h->data = RINGBUF_ADD(h->data, delta);
810 h->ridx = RINGBUF_ADD(h->ridx, delta);
811 h->widx = RINGBUF_ADD(h->widx, delta);
813 if (h->type == TYPE_ID3 && h->filesize == sizeof(struct mp3entry)) {
814 /* when moving an mp3entry we need to readjust its pointers. */
815 adjust_mp3entry((struct mp3entry *)&buffer[h->data],
816 (void *)&buffer[h->data],
817 (const void *)&buffer[olddata]);
818 } else if (h->type == TYPE_BITMAP) {
819 /* adjust the bitmap's pointer */
820 struct bitmap *bmp = (struct bitmap *)&buffer[h->data];
821 bmp->data = &buffer[h->data + sizeof(struct bitmap)];
824 else
826 /* only move the handle struct */
827 delta = RINGBUF_SUB(h->ridx, h->data);
828 if (!move_handle(&h, &delta, 0, true))
829 return;
831 h->data = RINGBUF_ADD(h->data, delta);
832 h->available -= delta;
833 h->offset += delta;
837 /* Fill the buffer by buffering as much data as possible for handles that still
838 have data left to buffer
839 Return whether or not to continue filling after this */
840 static bool fill_buffer(void)
842 logf("fill_buffer()");
843 struct memory_handle *m;
844 shrink_handle(first_handle);
845 m = first_handle;
846 while (queue_empty(&buffering_queue) && m) {
847 if (m->filerem > 0) {
848 if (!buffer_handle(m->id)) {
849 m = NULL;
850 break;
853 m = m->next;
856 if (m) {
857 return true;
859 else
861 /* only spin the disk down if the filling wasn't interrupted by an
862 event arriving in the queue. */
863 storage_sleep();
864 return false;
868 #ifdef HAVE_ALBUMART
869 /* Given a file descriptor to a bitmap file, write the bitmap data to the
870 buffer, with a struct bitmap and the actual data immediately following.
871 Return value is the total size (struct + data). */
872 static int load_image(int fd, const char *path, struct dim *dim)
874 int rc;
875 struct bitmap *bmp = (struct bitmap *)&buffer[buf_widx];
877 /* get the desired image size */
878 bmp->width = dim->width, bmp->height = dim->height;
879 /* FIXME: alignment may be needed for the data buffer. */
880 bmp->data = &buffer[buf_widx + sizeof(struct bitmap)];
881 #ifndef HAVE_JPEG
882 (void) path;
883 #endif
884 #if (LCD_DEPTH > 1) || defined(HAVE_REMOTE_LCD) && (LCD_REMOTE_DEPTH > 1)
885 bmp->maskdata = NULL;
886 #endif
888 int free = (int)MIN(buffer_len - BUF_USED, buffer_len - buf_widx)
889 - sizeof(struct bitmap);
891 #ifdef HAVE_JPEG
892 int pathlen = strlen(path);
893 if (strcmp(path + pathlen - 4, ".bmp"))
894 rc = read_jpeg_fd(fd, bmp, free, FORMAT_NATIVE|FORMAT_DITHER|
895 FORMAT_RESIZE|FORMAT_KEEP_ASPECT, NULL);
896 else
897 #endif
898 rc = read_bmp_fd(fd, bmp, free, FORMAT_NATIVE|FORMAT_DITHER|
899 FORMAT_RESIZE|FORMAT_KEEP_ASPECT, NULL);
900 return rc + (rc > 0 ? sizeof(struct bitmap) : 0);
902 #endif
906 MAIN BUFFERING API CALLS
907 ========================
909 bufopen : Request the opening of a new handle for a file
910 bufalloc : Open a new handle for data other than a file.
911 bufclose : Close an open handle
912 bufseek : Set the read pointer in a handle
913 bufadvance : Move the read pointer in a handle
914 bufread : Copy data from a handle into a given buffer
915 bufgetdata : Give a pointer to the handle's data
917 These functions are exported, to allow interaction with the buffer.
918 They take care of the content of the structs, and rely on the linked list
919 management functions for all the actual handle management work.
923 /* Reserve space in the buffer for a file.
924 filename: name of the file to open
925 offset: offset at which to start buffering the file, useful when the first
926 (offset-1) bytes of the file aren't needed.
927 type: one of the data types supported (audio, image, cuesheet, others
928 user_data: user data passed possibly passed in subcalls specific to a
929 data_type (only used for image (albumart) buffering so far )
930 return value: <0 if the file cannot be opened, or one file already
931 queued to be opened, otherwise the handle for the file in the buffer
933 int bufopen(const char *file, size_t offset, enum data_type type,
934 void *user_data)
936 #ifndef HAVE_ALBUMART
937 /* currently only used for aa loading */
938 (void)user_data;
939 #endif
940 if (type == TYPE_ID3)
942 /* ID3 case: allocate space, init the handle and return. */
944 struct memory_handle *h = add_handle(sizeof(struct mp3entry), false, true);
945 if (!h)
946 return ERR_BUFFER_FULL;
948 h->fd = -1;
949 h->filesize = sizeof(struct mp3entry);
950 h->filerem = sizeof(struct mp3entry);
951 h->offset = 0;
952 h->data = buf_widx;
953 h->ridx = buf_widx;
954 h->widx = buf_widx;
955 h->available = 0;
956 h->type = type;
957 strlcpy(h->path, file, MAX_PATH);
959 buf_widx += sizeof(struct mp3entry); /* safe because the handle
960 can't wrap */
962 /* Inform the buffering thread that we added a handle */
963 LOGFQUEUE("buffering > Q_HANDLE_ADDED %d", h->id);
964 queue_post(&buffering_queue, Q_HANDLE_ADDED, h->id);
966 return h->id;
969 /* Other cases: there is a little more work. */
971 int fd = open(file, O_RDONLY);
972 if (fd < 0)
973 return ERR_FILE_ERROR;
975 size_t size = filesize(fd);
976 bool can_wrap = type==TYPE_PACKET_AUDIO || type==TYPE_CODEC;
978 size_t adjusted_offset = offset;
979 if (adjusted_offset > size)
980 adjusted_offset = 0;
982 struct memory_handle *h = add_handle(size-adjusted_offset, can_wrap, false);
983 if (!h)
985 DEBUGF("bufopen: failed to add handle\n");
986 close(fd);
987 return ERR_BUFFER_FULL;
990 strlcpy(h->path, file, MAX_PATH);
991 h->offset = adjusted_offset;
992 h->ridx = buf_widx;
993 h->widx = buf_widx;
994 h->data = buf_widx;
995 h->available = 0;
996 h->filerem = 0;
997 h->type = type;
999 #ifdef HAVE_ALBUMART
1000 if (type == TYPE_BITMAP)
1002 /* Bitmap file: we load the data instead of the file */
1003 int rc;
1004 mutex_lock(&llist_mod_mutex); /* Lock because load_bitmap yields */
1005 rc = load_image(fd, file, (struct dim*)user_data);
1006 mutex_unlock(&llist_mod_mutex);
1007 if (rc <= 0)
1009 rm_handle(h);
1010 close(fd);
1011 return ERR_FILE_ERROR;
1013 h->filerem = 0;
1014 h->filesize = rc;
1015 h->available = rc;
1016 h->widx = buf_widx + rc; /* safe because the data doesn't wrap */
1017 buf_widx += rc; /* safe too */
1019 else
1020 #endif
1022 h->filerem = size - adjusted_offset;
1023 h->filesize = size;
1024 h->available = 0;
1025 h->widx = buf_widx;
1028 if (type == TYPE_CUESHEET) {
1029 h->fd = fd;
1030 /* Immediately start buffering those */
1031 LOGFQUEUE("buffering >| Q_BUFFER_HANDLE %d", h->id);
1032 queue_send(&buffering_queue, Q_BUFFER_HANDLE, h->id);
1033 } else {
1034 /* Other types will get buffered in the course of normal operations */
1035 h->fd = -1;
1036 close(fd);
1038 /* Inform the buffering thread that we added a handle */
1039 LOGFQUEUE("buffering > Q_HANDLE_ADDED %d", h->id);
1040 queue_post(&buffering_queue, Q_HANDLE_ADDED, h->id);
1043 logf("bufopen: new hdl %d", h->id);
1044 return h->id;
1047 /* Open a new handle from data that needs to be copied from memory.
1048 src is the source buffer from which to copy data. It can be NULL to simply
1049 reserve buffer space.
1050 size is the requested size. The call will only be successful if the
1051 requested amount of data can entirely fit in the buffer without wrapping.
1052 Return value is the handle id for success or <0 for failure.
1054 int bufalloc(const void *src, size_t size, enum data_type type)
1056 struct memory_handle *h = add_handle(size, false, true);
1058 if (!h)
1059 return ERR_BUFFER_FULL;
1061 if (src) {
1062 if (type == TYPE_ID3 && size == sizeof(struct mp3entry)) {
1063 /* specially take care of struct mp3entry */
1064 copy_mp3entry((struct mp3entry *)&buffer[buf_widx],
1065 (const struct mp3entry *)src);
1066 } else {
1067 memcpy(&buffer[buf_widx], src, size);
1071 h->fd = -1;
1072 *h->path = 0;
1073 h->filesize = size;
1074 h->filerem = 0;
1075 h->offset = 0;
1076 h->ridx = buf_widx;
1077 h->widx = buf_widx + size; /* this is safe because the data doesn't wrap */
1078 h->data = buf_widx;
1079 h->available = size;
1080 h->type = type;
1082 buf_widx += size; /* safe too */
1084 logf("bufalloc: new hdl %d", h->id);
1085 return h->id;
1088 /* Close the handle. Return true for success and false for failure */
1089 bool bufclose(int handle_id)
1091 logf("bufclose(%d)", handle_id);
1093 LOGFQUEUE("buffering >| Q_CLOSE_HANDLE %d", handle_id);
1094 return queue_send(&buffering_queue, Q_CLOSE_HANDLE, handle_id);
1097 /* Set reading index in handle (relatively to the start of the file).
1098 Access before the available data will trigger a rebuffer.
1099 Return 0 for success and < 0 for failure:
1100 -1 if the handle wasn't found
1101 -2 if the new requested position was beyond the end of the file
1103 int bufseek(int handle_id, size_t newpos)
1105 struct memory_handle *h = find_handle(handle_id);
1106 if (!h)
1107 return ERR_HANDLE_NOT_FOUND;
1109 if (newpos > h->filesize) {
1110 /* access beyond the end of the file */
1111 return ERR_INVALID_VALUE;
1113 else if (newpos < h->offset || h->offset + h->available < newpos) {
1114 /* access before or after buffered data. A rebuffer is needed. */
1115 rebuffer_handle(handle_id, newpos);
1117 else {
1118 h->ridx = RINGBUF_ADD(h->data, newpos - h->offset);
1120 return 0;
1123 /* Advance the reading index in a handle (relatively to its current position).
1124 Return 0 for success and < 0 for failure */
1125 int bufadvance(int handle_id, off_t offset)
1127 const struct memory_handle *h = find_handle(handle_id);
1128 if (!h)
1129 return ERR_HANDLE_NOT_FOUND;
1131 size_t newpos = h->offset + RINGBUF_SUB(h->ridx, h->data) + offset;
1132 return bufseek(handle_id, newpos);
1135 /* Used by bufread and bufgetdata to prepare the buffer and retrieve the
1136 * actual amount of data available for reading. This function explicitly
1137 * does not check the validity of the input handle. It does do range checks
1138 * on size and returns a valid (and explicit) amount of data for reading */
1139 static struct memory_handle *prep_bufdata(int handle_id, size_t *size,
1140 bool guardbuf_limit)
1142 struct memory_handle *h = find_handle(handle_id);
1143 if (!h)
1144 return NULL;
1146 size_t avail = RINGBUF_SUB(h->widx, h->ridx);
1148 if (avail == 0 && h->filerem == 0)
1150 /* File is finished reading */
1151 *size = 0;
1152 return h;
1155 if (*size == 0 || *size > avail + h->filerem)
1156 *size = avail + h->filerem;
1158 if (guardbuf_limit && h->type == TYPE_PACKET_AUDIO && *size > GUARD_BUFSIZE)
1160 logf("data request > guardbuf");
1161 /* If more than the size of the guardbuf is requested and this is a
1162 * bufgetdata, limit to guard_bufsize over the end of the buffer */
1163 *size = MIN(*size, buffer_len - h->ridx + GUARD_BUFSIZE);
1164 /* this ensures *size <= buffer_len - h->ridx + GUARD_BUFSIZE */
1167 if (h->filerem > 0 && avail < *size)
1169 /* Data isn't ready. Request buffering */
1170 buf_request_buffer_handle(handle_id);
1171 /* Wait for the data to be ready */
1174 sleep(1);
1175 /* it is not safe for a non-buffering thread to sleep while
1176 * holding a handle */
1177 h = find_handle(handle_id);
1178 if (!h)
1179 return NULL;
1180 avail = RINGBUF_SUB(h->widx, h->ridx);
1182 while (h->filerem > 0 && avail < *size);
1185 *size = MIN(*size,avail);
1186 return h;
1189 /* Copy data from the given handle to the dest buffer.
1190 Return the number of bytes copied or < 0 for failure (handle not found).
1191 The caller is blocked until the requested amount of data is available.
1193 ssize_t bufread(int handle_id, size_t size, void *dest)
1195 const struct memory_handle *h;
1196 size_t adjusted_size = size;
1198 h = prep_bufdata(handle_id, &adjusted_size, false);
1199 if (!h)
1200 return ERR_HANDLE_NOT_FOUND;
1202 if (h->ridx + adjusted_size > buffer_len)
1204 /* the data wraps around the end of the buffer */
1205 size_t read = buffer_len - h->ridx;
1206 memcpy(dest, &buffer[h->ridx], read);
1207 memcpy(dest+read, buffer, adjusted_size - read);
1209 else
1211 memcpy(dest, &buffer[h->ridx], adjusted_size);
1214 return adjusted_size;
1217 /* Update the "data" pointer to make the handle's data available to the caller.
1218 Return the length of the available linear data or < 0 for failure (handle
1219 not found).
1220 The caller is blocked until the requested amount of data is available.
1221 size is the amount of linear data requested. it can be 0 to get as
1222 much as possible.
1223 The guard buffer may be used to provide the requested size. This means it's
1224 unsafe to request more than the size of the guard buffer.
1226 ssize_t bufgetdata(int handle_id, size_t size, void **data)
1228 const struct memory_handle *h;
1229 size_t adjusted_size = size;
1231 h = prep_bufdata(handle_id, &adjusted_size, true);
1232 if (!h)
1233 return ERR_HANDLE_NOT_FOUND;
1235 if (h->ridx + adjusted_size > buffer_len)
1237 /* the data wraps around the end of the buffer :
1238 use the guard buffer to provide the requested amount of data. */
1239 size_t copy_n = h->ridx + adjusted_size - buffer_len;
1240 /* prep_bufdata ensures adjusted_size <= buffer_len - h->ridx + GUARD_BUFSIZE,
1241 so copy_n <= GUARD_BUFSIZE */
1242 memcpy(guard_buffer, (const unsigned char *)buffer, copy_n);
1245 if (data)
1246 *data = &buffer[h->ridx];
1248 return adjusted_size;
1251 ssize_t bufgettail(int handle_id, size_t size, void **data)
1253 size_t tidx;
1255 const struct memory_handle *h;
1257 h = find_handle(handle_id);
1259 if (!h)
1260 return ERR_HANDLE_NOT_FOUND;
1262 if (h->filerem)
1263 return ERR_HANDLE_NOT_DONE;
1265 /* We don't support tail requests of > guardbuf_size, for simplicity */
1266 if (size > GUARD_BUFSIZE)
1267 return ERR_INVALID_VALUE;
1269 tidx = RINGBUF_SUB(h->widx, size);
1271 if (tidx + size > buffer_len)
1273 size_t copy_n = tidx + size - buffer_len;
1274 memcpy(guard_buffer, (const unsigned char *)buffer, copy_n);
1277 *data = &buffer[tidx];
1278 return size;
1281 ssize_t bufcuttail(int handle_id, size_t size)
1283 struct memory_handle *h;
1284 size_t adjusted_size = size;
1286 h = find_handle(handle_id);
1288 if (!h)
1289 return ERR_HANDLE_NOT_FOUND;
1291 if (h->filerem)
1292 return ERR_HANDLE_NOT_DONE;
1294 if (h->available < adjusted_size)
1295 adjusted_size = h->available;
1297 h->available -= adjusted_size;
1298 h->filesize -= adjusted_size;
1299 h->widx = RINGBUF_SUB(h->widx, adjusted_size);
1300 if (h == cur_handle)
1301 buf_widx = h->widx;
1303 return adjusted_size;
1308 SECONDARY EXPORTED FUNCTIONS
1309 ============================
1311 buf_get_offset
1312 buf_handle_offset
1313 buf_request_buffer_handle
1314 buf_set_base_handle
1315 buf_used
1316 register_buffering_callback
1317 unregister_buffering_callback
1319 These functions are exported, to allow interaction with the buffer.
1320 They take care of the content of the structs, and rely on the linked list
1321 management functions for all the actual handle management work.
1324 /* Get a handle offset from a pointer */
1325 ssize_t buf_get_offset(int handle_id, void *ptr)
1327 const struct memory_handle *h = find_handle(handle_id);
1328 if (!h)
1329 return ERR_HANDLE_NOT_FOUND;
1331 return (size_t)ptr - (size_t)&buffer[h->ridx];
1334 ssize_t buf_handle_offset(int handle_id)
1336 const struct memory_handle *h = find_handle(handle_id);
1337 if (!h)
1338 return ERR_HANDLE_NOT_FOUND;
1339 return h->offset;
1342 void buf_request_buffer_handle(int handle_id)
1344 LOGFQUEUE("buffering >| Q_START_FILL %d",handle_id);
1345 queue_send(&buffering_queue, Q_START_FILL, handle_id);
1348 void buf_set_base_handle(int handle_id)
1350 LOGFQUEUE("buffering > Q_BASE_HANDLE %d", handle_id);
1351 queue_post(&buffering_queue, Q_BASE_HANDLE, handle_id);
1354 /* Return the amount of buffer space used */
1355 size_t buf_used(void)
1357 return BUF_USED;
1360 void buf_set_watermark(size_t bytes)
1362 conf_watermark = bytes;
1365 static void shrink_buffer_inner(struct memory_handle *h)
1367 if (h == NULL)
1368 return;
1370 shrink_buffer_inner(h->next);
1372 shrink_handle(h);
1375 static void shrink_buffer(void)
1377 logf("shrink_buffer()");
1378 shrink_buffer_inner(first_handle);
1381 void buffering_thread(void)
1383 bool filling = false;
1384 struct queue_event ev;
1386 while (true)
1388 if (!filling) {
1389 cancel_cpu_boost();
1392 queue_wait_w_tmo(&buffering_queue, &ev, filling ? 5 : HZ/2);
1394 switch (ev.id)
1396 case Q_START_FILL:
1397 LOGFQUEUE("buffering < Q_START_FILL %d", (int)ev.data);
1398 /* Call buffer callbacks here because this is one of two ways
1399 * to begin a full buffer fill */
1400 send_event(BUFFER_EVENT_BUFFER_LOW, 0);
1401 shrink_buffer();
1402 queue_reply(&buffering_queue, 1);
1403 filling |= buffer_handle((int)ev.data);
1404 break;
1406 case Q_BUFFER_HANDLE:
1407 LOGFQUEUE("buffering < Q_BUFFER_HANDLE %d", (int)ev.data);
1408 queue_reply(&buffering_queue, 1);
1409 buffer_handle((int)ev.data);
1410 break;
1412 case Q_RESET_HANDLE:
1413 LOGFQUEUE("buffering < Q_RESET_HANDLE %d", (int)ev.data);
1414 queue_reply(&buffering_queue, 1);
1415 reset_handle((int)ev.data);
1416 break;
1418 case Q_CLOSE_HANDLE:
1419 LOGFQUEUE("buffering < Q_CLOSE_HANDLE %d", (int)ev.data);
1420 queue_reply(&buffering_queue, close_handle((int)ev.data));
1421 break;
1423 case Q_HANDLE_ADDED:
1424 LOGFQUEUE("buffering < Q_HANDLE_ADDED %d", (int)ev.data);
1425 /* A handle was added: the disk is spinning, so we can fill */
1426 filling = true;
1427 break;
1429 case Q_BASE_HANDLE:
1430 LOGFQUEUE("buffering < Q_BASE_HANDLE %d", (int)ev.data);
1431 base_handle_id = (int)ev.data;
1432 break;
1434 #ifndef SIMULATOR
1435 case SYS_USB_CONNECTED:
1436 LOGFQUEUE("buffering < SYS_USB_CONNECTED");
1437 usb_acknowledge(SYS_USB_CONNECTED_ACK);
1438 usb_wait_for_disconnect(&buffering_queue);
1439 break;
1440 #endif
1442 case SYS_TIMEOUT:
1443 LOGFQUEUE_SYS_TIMEOUT("buffering < SYS_TIMEOUT");
1444 break;
1447 update_data_counters();
1449 /* If the buffer is low, call the callbacks to get new data */
1450 if (num_handles > 0 && data_counters.useful <= conf_watermark)
1451 send_event(BUFFER_EVENT_BUFFER_LOW, 0);
1453 #if 0
1454 /* TODO: This needs to be fixed to use the idle callback, disable it
1455 * for simplicity until its done right */
1456 #if MEM > 8
1457 /* If the disk is spinning, take advantage by filling the buffer */
1458 else if (storage_disk_is_active() && queue_empty(&buffering_queue))
1460 if (num_handles > 0 && data_counters.useful <= high_watermark)
1461 send_event(BUFFER_EVENT_BUFFER_LOW, 0);
1463 if (data_counters.remaining > 0 && BUF_USED <= high_watermark)
1465 /* This is a new fill, shrink the buffer up first */
1466 if (!filling)
1467 shrink_buffer();
1468 filling = fill_buffer();
1469 update_data_counters();
1472 #endif
1473 #endif
1475 if (queue_empty(&buffering_queue)) {
1476 if (filling) {
1477 if (data_counters.remaining > 0 && BUF_USED < buffer_len)
1478 filling = fill_buffer();
1479 else if (data_counters.remaining == 0)
1480 filling = false;
1482 else if (ev.id == SYS_TIMEOUT)
1484 if (data_counters.remaining > 0 &&
1485 data_counters.useful <= conf_watermark) {
1486 shrink_buffer();
1487 filling = fill_buffer();
1494 void buffering_init(void)
1496 mutex_init(&llist_mutex);
1497 mutex_init(&llist_mod_mutex);
1498 #ifdef HAVE_PRIORITY_SCHEDULING
1499 /* This behavior not safe atm */
1500 mutex_set_preempt(&llist_mutex, false);
1501 mutex_set_preempt(&llist_mod_mutex, false);
1502 #endif
1504 conf_watermark = BUFFERING_DEFAULT_WATERMARK;
1506 queue_init(&buffering_queue, true);
1507 buffering_thread_id = create_thread( buffering_thread, buffering_stack,
1508 sizeof(buffering_stack), CREATE_THREAD_FROZEN,
1509 buffering_thread_name IF_PRIO(, PRIORITY_BUFFERING)
1510 IF_COP(, CPU));
1512 queue_enable_queue_send(&buffering_queue, &buffering_queue_sender_list,
1513 buffering_thread_id);
1516 /* Initialise the buffering subsystem */
1517 bool buffering_reset(char *buf, size_t buflen)
1519 if (!buf || !buflen)
1520 return false;
1522 buffer = buf;
1523 buffer_len = buflen;
1524 guard_buffer = buf + buflen;
1526 buf_widx = 0;
1527 buf_ridx = 0;
1529 first_handle = NULL;
1530 cur_handle = NULL;
1531 cached_handle = NULL;
1532 num_handles = 0;
1533 base_handle_id = -1;
1535 /* Set the high watermark as 75% full...or 25% empty :) */
1536 #if MEM > 8
1537 high_watermark = 3*buflen / 4;
1538 #endif
1540 thread_thaw(buffering_thread_id);
1542 return true;
1545 void buffering_get_debugdata(struct buffering_debug *dbgdata)
1547 update_data_counters();
1548 dbgdata->num_handles = num_handles;
1549 dbgdata->data_rem = data_counters.remaining;
1550 dbgdata->wasted_space = data_counters.wasted;
1551 dbgdata->buffered_data = data_counters.buffered;
1552 dbgdata->useful_data = data_counters.useful;
1553 dbgdata->watermark = conf_watermark;