1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2007 Nicolas Pennequin
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
28 #include "buffering.h"
44 #include "mp3_playback.h"
49 #include "appevents.h"
53 #include "jpeg_load.h"
58 #define GUARD_BUFSIZE (32*1024)
60 /* Define LOGF_ENABLE to enable logf output in this file */
61 /*#define LOGF_ENABLE*/
64 /* macros to enable logf for queues
65 logging on SYS_TIMEOUT can be disabled */
67 /* Define this for logf output of all queuing except SYS_TIMEOUT */
68 #define BUFFERING_LOGQUEUES
69 /* Define this to logf SYS_TIMEOUT messages */
70 /* #define BUFFERING_LOGQUEUES_SYS_TIMEOUT */
73 #ifdef BUFFERING_LOGQUEUES
74 #define LOGFQUEUE logf
76 #define LOGFQUEUE(...)
79 #ifdef BUFFERING_LOGQUEUES_SYS_TIMEOUT
80 #define LOGFQUEUE_SYS_TIMEOUT logf
82 #define LOGFQUEUE_SYS_TIMEOUT(...)
85 /* default point to start buffer refill */
86 #define BUFFERING_DEFAULT_WATERMARK (1024*128)
87 /* amount of data to read in one read() call */
88 #define BUFFERING_DEFAULT_FILECHUNK (1024*32)
90 #define BUF_HANDLE_MASK 0x7FFFFFFF
93 /* assert(sizeof(struct memory_handle)%4==0) */
94 struct memory_handle
{
95 int id
; /* A unique ID for the handle */
96 enum data_type type
; /* Type of data buffered with this handle */
97 char path
[MAX_PATH
]; /* Path if data originated in a file */
98 int fd
; /* File descriptor to path (-1 if closed) */
99 size_t data
; /* Start index of the handle's data buffer */
100 volatile size_t ridx
; /* Read pointer, relative to the main buffer */
101 size_t widx
; /* Write pointer, relative to the main buffer */
102 size_t filesize
; /* File total length */
103 size_t filerem
; /* Remaining bytes of file NOT in buffer */
104 volatile size_t available
; /* Available bytes to read from buffer */
105 size_t offset
; /* Offset at which we started reading the file */
106 struct memory_handle
*next
;
108 /* invariant: filesize == offset + available + filerem */
111 struct buf_message_data
118 static char *guard_buffer
;
120 static size_t buffer_len
;
122 static volatile size_t buf_widx
; /* current writing position */
123 static volatile size_t buf_ridx
; /* current reading position */
124 /* buf_*idx are values relative to the buffer, not real pointers. */
127 static size_t conf_watermark
= 0; /* Level to trigger filebuf fill */
129 static size_t high_watermark
= 0; /* High watermark for rebuffer */
132 /* current memory handle in the linked list. NULL when the list is empty. */
133 static struct memory_handle
*cur_handle
;
134 /* first memory handle in the linked list. NULL when the list is empty. */
135 static struct memory_handle
*first_handle
;
137 static int num_handles
; /* number of handles in the list */
139 static int base_handle_id
;
141 /* Main lock for adding / removing handles */
142 static struct mutex llist_mutex SHAREDBSS_ATTR
;
144 /* Handle cache (makes find_handle faster).
145 This is global so that move_handle and rm_handle can invalidate it. */
146 static struct memory_handle
*cached_handle
= NULL
;
148 static struct data_counters
150 size_t remaining
; /* Amount of data needing to be buffered */
151 size_t wasted
; /* Amount of space available for freeing */
152 size_t buffered
; /* Amount of data currently in the buffer */
153 size_t useful
; /* Amount of data still useful to the user */
157 /* Messages available to communicate with the buffering thread */
160 Q_BUFFER_HANDLE
= 1, /* Request buffering of a handle, this should not be
161 used in a low buffer situation. */
162 Q_REBUFFER_HANDLE
, /* Request reset and rebuffering of a handle at a new
163 file starting position. */
164 Q_CLOSE_HANDLE
, /* Request closing a handle */
165 Q_BASE_HANDLE
, /* Set the reference handle for buf_useful_data */
168 Q_START_FILL
, /* Request that the buffering thread initiate a buffer
169 fill at its earliest convenience */
170 Q_HANDLE_ADDED
, /* Inform the buffering thread that a handle was added,
171 (which means the disk is spinning) */
174 /* Buffering thread */
175 static void buffering_thread(void);
176 static long buffering_stack
[(DEFAULT_STACK_SIZE
+ 0x2000)/sizeof(long)];
177 static const char buffering_thread_name
[] = "buffering";
178 static unsigned int buffering_thread_id
= 0;
179 static struct event_queue buffering_queue SHAREDBSS_ATTR
;
180 static struct queue_sender_list buffering_queue_sender_list SHAREDBSS_ATTR
;
184 /* Ring buffer helper functions */
186 static inline uintptr_t ringbuf_offset(const void *ptr
)
188 return (uintptr_t)(ptr
- (void*)buffer
);
191 /* Buffer pointer (p) plus value (v), wrapped if necessary */
192 static inline uintptr_t ringbuf_add(uintptr_t p
, size_t v
)
194 uintptr_t res
= p
+ v
;
195 if (res
>= buffer_len
)
196 res
-= buffer_len
; /* wrap if necssary */
201 /* Buffer pointer (p) minus value (v), wrapped if necessary */
202 static inline uintptr_t ringbuf_sub(uintptr_t p
, size_t v
)
206 res
+= buffer_len
; /* wrap */
212 /* How far value (v) plus buffer pointer (p1) will cross buffer pointer (p2) */
213 static inline ssize_t
ringbuf_add_cross(uintptr_t p1
, size_t v
, uintptr_t p2
)
215 ssize_t res
= p1
+ v
- p2
;
216 if (p1
>= p2
) /* wrap if necessary */
222 /* Bytes available in the buffer */
223 #define BUF_USED ringbuf_sub(buf_widx, buf_ridx)
226 LINKED LIST MANAGEMENT
227 ======================
229 add_handle : Add a handle to the list
230 rm_handle : Remove a handle from the list
231 find_handle : Get a handle pointer from an ID
232 move_handle : Move a handle in the buffer (with or without its data)
234 These functions only handle the linked list structure. They don't touch the
235 contents of the struct memory_handle headers. They also change the buf_*idx
236 pointers when necessary and manage the handle IDs.
238 The first and current (== last) handle are kept track of.
239 A new handle is added at buf_widx and becomes the current one.
240 buf_widx always points to the current writing position for the current handle
241 buf_ridx always points to the location of the first handle.
242 buf_ridx == buf_widx means the buffer is empty.
246 /* Add a new handle to the linked list and return it. It will have become the
248 data_size must contain the size of what will be in the handle.
249 can_wrap tells us whether this type of data may wrap on buffer
250 alloc_all tells us if we must immediately be able to allocate data_size
251 returns a valid memory handle if all conditions for allocation are met.
252 NULL if there memory_handle itself cannot be allocated or if the
253 data_size cannot be allocated and alloc_all is set. */
254 static struct memory_handle
*add_handle(size_t data_size
, bool can_wrap
,
257 /* gives each handle a unique id */
258 static int cur_handle_id
= 0;
260 size_t widx
, new_widx
;
264 if (num_handles
>= BUF_MAX_HANDLES
)
269 if (cur_handle
&& cur_handle
->filerem
> 0) {
270 /* the current handle hasn't finished buffering. We can only add
271 a new one if there is already enough free space to finish
273 size_t req
= cur_handle
->filerem
;
274 if (ringbuf_add_cross(cur_handle
->widx
, req
, buf_ridx
) >= 0) {
275 /* Not enough space to finish allocation */
278 /* Allocate the remainder of the space for the current handle */
279 widx
= ringbuf_add(cur_handle
->widx
, cur_handle
->filerem
);
283 /* align to 4 bytes up always leaving a gap */
284 new_widx
= ringbuf_add(widx
, 4) & ~3;
286 len
= data_size
+ sizeof(struct memory_handle
);
288 /* First, will the handle wrap? */
289 /* If the handle would wrap, move to the beginning of the buffer,
290 * or if the data must not but would wrap, move it to the beginning */
291 if (new_widx
+ sizeof(struct memory_handle
) > buffer_len
||
292 (!can_wrap
&& new_widx
+ len
> buffer_len
)) {
296 /* How far we shifted the new_widx to align things, must be < buffer_len */
297 shift
= ringbuf_sub(new_widx
, widx
);
299 /* How much space are we short in the actual ring buffer? */
300 overlap
= ringbuf_add_cross(widx
, shift
+ len
, buf_ridx
);
301 if (overlap
>= 0 && (alloc_all
|| (size_t)overlap
>= data_size
)) {
302 /* Not enough space for required allocations */
306 /* There is enough space for the required data, advance the buf_widx and
307 * initialize the struct */
310 struct memory_handle
*new_handle
=
311 (struct memory_handle
*)(&buffer
[buf_widx
]);
313 /* Prevent buffering thread from looking at it */
314 new_handle
->filerem
= 0;
316 /* only advance the buffer write index of the size of the struct */
317 buf_widx
= ringbuf_add(buf_widx
, sizeof(struct memory_handle
));
319 new_handle
->id
= cur_handle_id
;
320 /* Wrap signed int is safe and 0 doesn't happen */
321 cur_handle_id
= (cur_handle_id
+ 1) & BUF_HANDLE_MASK
;
322 new_handle
->next
= NULL
;
326 /* the new handle is the first one */
327 first_handle
= new_handle
;
330 cur_handle
->next
= new_handle
;
332 cur_handle
= new_handle
;
337 /* Delete a given memory handle from the linked list
338 and return true for success. Nothing is actually erased from memory. */
339 static bool rm_handle(const struct memory_handle
*h
)
344 if (h
== first_handle
) {
345 first_handle
= h
->next
;
346 if (h
== cur_handle
) {
347 /* h was the first and last handle: the buffer is now empty */
349 buf_ridx
= buf_widx
= 0;
351 /* update buf_ridx to point to the new first handle */
352 buf_ridx
= (size_t)ringbuf_offset(first_handle
);
355 struct memory_handle
*m
= first_handle
;
356 /* Find the previous handle */
357 while (m
&& m
->next
!= h
) {
360 if (m
&& m
->next
== h
) {
362 if (h
== cur_handle
) {
364 buf_widx
= cur_handle
->widx
;
371 /* Invalidate the cache to prevent it from keeping the old location of h */
372 if (h
== cached_handle
)
373 cached_handle
= NULL
;
379 /* Return a pointer to the memory handle of given ID.
380 NULL if the handle wasn't found */
381 static struct memory_handle
*find_handle(int handle_id
)
386 /* simple caching because most of the time the requested handle
387 will either be the same as the last, or the one after the last */
390 if (cached_handle
->id
== handle_id
) {
391 return cached_handle
;
392 } else if (cached_handle
->next
&&
393 (cached_handle
->next
->id
== handle_id
)) {
394 cached_handle
= cached_handle
->next
;
395 return cached_handle
;
399 struct memory_handle
*m
= first_handle
;
400 while (m
&& m
->id
!= handle_id
) {
403 /* This condition can only be reached with !m or m->id == handle_id */
410 /* Move a memory handle and data_size of its data delta bytes along the buffer.
411 delta maximum bytes available to move the handle. If the move is performed
412 it is set to the actual distance moved.
413 data_size is the amount of data to move along with the struct.
414 returns true if the move is successful and false if the handle is NULL,
415 the move would be less than the size of a memory_handle after
416 correcting for wraps or if the handle is not found in the linked
417 list for adjustment. This function has no side effects if false
419 static bool move_handle(struct memory_handle
**h
, size_t *delta
,
420 size_t data_size
, bool can_wrap
)
422 struct memory_handle
*dest
;
423 const struct memory_handle
*src
;
424 size_t final_delta
= *delta
, size_to_move
;
425 uintptr_t oldpos
, newpos
;
426 intptr_t overlap
, overlap_old
;
428 if (h
== NULL
|| (src
= *h
) == NULL
)
431 size_to_move
= sizeof(struct memory_handle
) + data_size
;
433 /* Align to four bytes, down */
435 if (final_delta
< sizeof(struct memory_handle
)) {
436 /* It's not legal to move less than the size of the struct */
440 oldpos
= ringbuf_offset(src
);
441 newpos
= ringbuf_add(oldpos
, final_delta
);
442 overlap
= ringbuf_add_cross(newpos
, size_to_move
, buffer_len
);
443 overlap_old
= ringbuf_add_cross(oldpos
, size_to_move
, buffer_len
);
446 /* Some part of the struct + data would wrap, maybe ok */
447 ssize_t correction
= 0;
448 /* If the overlap lands inside the memory_handle */
450 /* Otherwise the overlap falls in the data area and must all be
451 * backed out. This may become conditional if ever we move
452 * data that is allowed to wrap (ie audio) */
453 correction
= overlap
;
454 } else if ((uintptr_t)overlap
> data_size
) {
455 /* Correct the position and real delta to prevent the struct from
456 * wrapping, this guarantees an aligned delta if the struct size is
457 * aligned and the buffer is aligned */
458 correction
= overlap
- data_size
;
461 /* Align correction to four bytes up */
462 correction
= (correction
+ 3) & ~3;
463 if (final_delta
< correction
+ sizeof(struct memory_handle
)) {
464 /* Delta cannot end up less than the size of the struct */
467 newpos
-= correction
;
468 overlap
-= correction
;/* Used below to know how to split the data */
469 final_delta
-= correction
;
473 dest
= (struct memory_handle
*)(&buffer
[newpos
]);
475 if (src
== first_handle
) {
479 struct memory_handle
*m
= first_handle
;
480 while (m
&& m
->next
!= src
) {
483 if (m
&& m
->next
== src
) {
490 /* Update the cache to prevent it from keeping the old location of h */
491 if (src
== cached_handle
)
492 cached_handle
= dest
;
494 /* the cur_handle pointer might need updating */
495 if (src
== cur_handle
)
498 /* x = handle(s) following this one...
499 * ...if last handle, unmoveable if metadata, only shrinkable if audio.
500 * In other words, no legal move can be made that would have the src head
501 * and dest tail of the data overlap itself. These facts reduce the
502 * problem to four essential permutations.
504 * movement: always "clockwise" >>>>
506 * (src nowrap, dest nowrap)
511 * (src nowrap, dest wrap)
516 * (src wrap, dest nowrap)
521 * (src wrap, dest wrap)
524 * move: "23", "1", "0"
526 if (overlap_old
> 0) {
527 /* Move over already wrapped data by the final delta */
528 memmove(&buffer
[final_delta
], buffer
, overlap_old
);
530 size_to_move
-= overlap_old
;
534 /* Move data that now wraps to the beginning */
535 size_to_move
-= overlap
;
536 memmove(buffer
, SKIPBYTES(src
, size_to_move
),
537 overlap_old
> 0 ? final_delta
: (size_t)overlap
);
540 /* Move leading fragment containing handle struct */
541 memmove(dest
, src
, size_to_move
);
543 /* Update the caller with the new location of h and the distance moved */
545 *delta
= final_delta
;
551 BUFFER SPACE MANAGEMENT
552 =======================
554 update_data_counters: Updates the values in data_counters
555 buffer_is_low : Returns true if the amount of useful data in the buffer is low
556 buffer_handle : Buffer data for a handle
557 rebuffer_handle : Seek to a nonbuffered part of a handle by rebuffering the data
558 shrink_handle : Free buffer space by moving a handle
559 fill_buffer : Call buffer_handle for all handles that have data to buffer
561 These functions are used by the buffering thread to manage buffer space.
563 static size_t handle_size_available(const struct memory_handle
*h
)
565 /* Obtain proper distances from data start */
566 size_t rd
= ringbuf_sub(h
->ridx
, h
->data
);
567 size_t wr
= ringbuf_sub(h
->widx
, h
->data
);
572 return 0; /* ridx is ahead of or equal to widx at this time */
575 static void update_data_counters(struct data_counters
*dc
)
579 size_t remaining
= 0;
582 struct memory_handle
*m
;
588 mutex_lock(&llist_mutex
);
590 m
= find_handle(base_handle_id
);
591 is_useful
= m
== NULL
;
595 buffered
+= m
->available
;
596 /* wasted could come out larger than the buffer size if ridx's are
597 overlapping data ahead of their handles' buffered data */
598 wasted
+= ringbuf_sub(m
->ridx
, m
->data
);
599 remaining
+= m
->filerem
;
601 if (m
->id
== base_handle_id
)
605 useful
+= handle_size_available(m
);
610 mutex_unlock(&llist_mutex
);
612 dc
->buffered
= buffered
;
614 dc
->remaining
= remaining
;
618 static inline bool buffer_is_low(void)
620 update_data_counters(NULL
);
621 return data_counters
.useful
< (conf_watermark
/ 2);
624 /* Q_BUFFER_HANDLE event and buffer data for the given handle.
625 Return whether or not the buffering should continue explicitly. */
626 static bool buffer_handle(int handle_id
, size_t to_buffer
)
628 logf("buffer_handle(%d)", handle_id
);
629 struct memory_handle
*h
= find_handle(handle_id
);
635 if (h
->filerem
== 0) {
636 /* nothing left to buffer */
640 if (h
->fd
< 0) { /* file closed, reopen */
642 h
->fd
= open(h
->path
, O_RDONLY
);
646 /* could not open the file, truncate it where it is */
647 h
->filesize
-= h
->filerem
;
653 lseek(h
->fd
, h
->offset
, SEEK_SET
);
658 if (h
->type
== TYPE_ID3
) {
659 if (!get_metadata((struct mp3entry
*)(buffer
+ h
->data
),
661 /* metadata parsing failed: clear the buffer. */
662 memset(buffer
+ h
->data
, 0, sizeof(struct mp3entry
));
667 h
->available
= sizeof(struct mp3entry
);
668 h
->widx
+= sizeof(struct mp3entry
);
669 send_event(BUFFER_EVENT_FINISHED
, &handle_id
);
673 while (h
->filerem
> 0 && !stop
)
675 /* max amount to copy */
676 ssize_t copy_n
= MIN( MIN(h
->filerem
, BUFFERING_DEFAULT_FILECHUNK
),
677 buffer_len
- h
->widx
);
678 uintptr_t offset
= h
->next
? ringbuf_offset(h
->next
) : buf_ridx
;
679 ssize_t overlap
= ringbuf_add_cross(h
->widx
, copy_n
, offset
) + 1;
682 /* read only up to available space and stop if it would overwrite
683 or be on top of the reading position or the next handle */
689 return false; /* no space for read */
691 /* rc is the actual amount read */
692 int rc
= read(h
->fd
, &buffer
[h
->widx
], copy_n
);
695 /* Some kind of filesystem error, maybe recoverable if not codec */
696 if (h
->type
== TYPE_CODEC
) {
697 logf("Partial codec");
701 DEBUGF("File ended %ld bytes early\n", (long)h
->filerem
);
702 h
->filesize
-= h
->filerem
;
708 h
->widx
= ringbuf_add(h
->widx
, rc
);
714 /* If this is a large file, see if we need to break or give the codec
716 if (h
->type
== TYPE_PACKET_AUDIO
&&
717 pcmbuf_is_lowdata() && !buffer_is_low()) {
723 if (to_buffer
== 0) {
724 /* Normal buffering - check queue */
725 if(!queue_empty(&buffering_queue
))
728 if (to_buffer
<= (size_t)rc
)
734 if (h
->filerem
== 0) {
735 /* finished buffering the file */
738 send_event(BUFFER_EVENT_FINISHED
, &handle_id
);
744 /* Close the specified handle id and free its allocation. */
745 static bool close_handle(int handle_id
)
748 struct memory_handle
*h
;
750 mutex_lock(&llist_mutex
);
751 h
= find_handle(handle_id
);
753 /* If the handle is not found, it is closed */
760 /* rm_handle returns true unless the handle somehow persists after
762 retval
= rm_handle(h
);
765 mutex_unlock(&llist_mutex
);
769 /* Free buffer space by moving the handle struct right before the useful
770 part of its data buffer or by moving all the data. */
771 static void shrink_handle(struct memory_handle
*h
)
778 if (h
->type
== TYPE_ID3
|| h
->type
== TYPE_CUESHEET
||
779 h
->type
== TYPE_BITMAP
|| h
->type
== TYPE_CODEC
||
780 h
->type
== TYPE_ATOMIC_AUDIO
)
782 /* metadata handle: we can move all of it */
783 if (!h
->next
|| h
->filerem
!= 0)
784 return; /* Last handle or not finished loading */
786 uintptr_t handle_distance
=
787 ringbuf_sub(ringbuf_offset(h
->next
), h
->data
);
788 delta
= handle_distance
- h
->available
;
790 /* The value of delta might change for alignment reasons */
791 if (!move_handle(&h
, &delta
, h
->available
, h
->type
==TYPE_CODEC
))
794 size_t olddata
= h
->data
;
795 h
->data
= ringbuf_add(h
->data
, delta
);
796 h
->ridx
= ringbuf_add(h
->ridx
, delta
);
797 h
->widx
= ringbuf_add(h
->widx
, delta
);
799 if (h
->type
== TYPE_ID3
&& h
->filesize
== sizeof(struct mp3entry
)) {
800 /* when moving an mp3entry we need to readjust its pointers. */
801 adjust_mp3entry((struct mp3entry
*)&buffer
[h
->data
],
802 (void *)&buffer
[h
->data
],
803 (const void *)&buffer
[olddata
]);
804 } else if (h
->type
== TYPE_BITMAP
) {
805 /* adjust the bitmap's pointer */
806 struct bitmap
*bmp
= (struct bitmap
*)&buffer
[h
->data
];
807 bmp
->data
= &buffer
[h
->data
+ sizeof(struct bitmap
)];
810 /* only move the handle struct */
811 delta
= ringbuf_sub(h
->ridx
, h
->data
);
812 if (!move_handle(&h
, &delta
, 0, true))
815 h
->data
= ringbuf_add(h
->data
, delta
);
816 h
->available
-= delta
;
821 /* Fill the buffer by buffering as much data as possible for handles that still
822 have data left to buffer
823 Return whether or not to continue filling after this */
824 static bool fill_buffer(void)
826 logf("fill_buffer()");
827 struct memory_handle
*m
= first_handle
;
831 while (queue_empty(&buffering_queue
) && m
) {
832 if (m
->filerem
> 0) {
833 if (!buffer_handle(m
->id
, 0)) {
844 /* only spin the disk down if the filling wasn't interrupted by an
845 event arriving in the queue. */
852 /* Given a file descriptor to a bitmap file, write the bitmap data to the
853 buffer, with a struct bitmap and the actual data immediately following.
854 Return value is the total size (struct + data). */
855 static int load_image(int fd
, const char *path
,
856 struct bufopen_bitmap_data
*data
)
859 struct bitmap
*bmp
= (struct bitmap
*)&buffer
[buf_widx
];
860 struct dim
*dim
= data
->dim
;
861 struct mp3_albumart
*aa
= data
->embedded_albumart
;
863 /* get the desired image size */
864 bmp
->width
= dim
->width
, bmp
->height
= dim
->height
;
865 /* FIXME: alignment may be needed for the data buffer. */
866 bmp
->data
= &buffer
[buf_widx
+ sizeof(struct bitmap
)];
870 #if (LCD_DEPTH > 1) || defined(HAVE_REMOTE_LCD) && (LCD_REMOTE_DEPTH > 1)
871 bmp
->maskdata
= NULL
;
874 int free
= (int)MIN(buffer_len
- BUF_USED
, buffer_len
- buf_widx
)
875 - sizeof(struct bitmap
);
879 lseek(fd
, aa
->pos
, SEEK_SET
);
880 rc
= clip_jpeg_fd(fd
, aa
->size
, bmp
, free
, FORMAT_NATIVE
|FORMAT_DITHER
|
881 FORMAT_RESIZE
|FORMAT_KEEP_ASPECT
, NULL
);
883 else if (strcmp(path
+ strlen(path
) - 4, ".bmp"))
884 rc
= read_jpeg_fd(fd
, bmp
, free
, FORMAT_NATIVE
|FORMAT_DITHER
|
885 FORMAT_RESIZE
|FORMAT_KEEP_ASPECT
, NULL
);
888 rc
= read_bmp_fd(fd
, bmp
, free
, FORMAT_NATIVE
|FORMAT_DITHER
|
889 FORMAT_RESIZE
|FORMAT_KEEP_ASPECT
, NULL
);
890 return rc
+ (rc
> 0 ? sizeof(struct bitmap
) : 0);
896 MAIN BUFFERING API CALLS
897 ========================
899 bufopen : Request the opening of a new handle for a file
900 bufalloc : Open a new handle for data other than a file.
901 bufclose : Close an open handle
902 bufseek : Set the read pointer in a handle
903 bufadvance : Move the read pointer in a handle
904 bufread : Copy data from a handle into a given buffer
905 bufgetdata : Give a pointer to the handle's data
907 These functions are exported, to allow interaction with the buffer.
908 They take care of the content of the structs, and rely on the linked list
909 management functions for all the actual handle management work.
913 /* Reserve space in the buffer for a file.
914 filename: name of the file to open
915 offset: offset at which to start buffering the file, useful when the first
916 offset bytes of the file aren't needed.
917 type: one of the data types supported (audio, image, cuesheet, others
918 user_data: user data passed possibly passed in subcalls specific to a
919 data_type (only used for image (albumart) buffering so far )
920 return value: <0 if the file cannot be opened, or one file already
921 queued to be opened, otherwise the handle for the file in the buffer
923 int bufopen(const char *file
, size_t offset
, enum data_type type
,
926 #ifndef HAVE_ALBUMART
927 /* currently only used for aa loading */
930 int handle_id
= ERR_BUFFER_FULL
;
932 /* No buffer refs until after the mutex_lock call! */
934 if (type
== TYPE_ID3
) {
935 /* ID3 case: allocate space, init the handle and return. */
936 mutex_lock(&llist_mutex
);
938 struct memory_handle
*h
=
939 add_handle(sizeof(struct mp3entry
), false, true);
944 h
->filesize
= sizeof(struct mp3entry
);
951 strlcpy(h
->path
, file
, MAX_PATH
);
953 buf_widx
+= sizeof(struct mp3entry
); /* safe because the handle
955 h
->filerem
= sizeof(struct mp3entry
);
957 /* Inform the buffering thread that we added a handle */
958 LOGFQUEUE("buffering > Q_HANDLE_ADDED %d", handle_id
);
959 queue_post(&buffering_queue
, Q_HANDLE_ADDED
, handle_id
);
962 mutex_unlock(&llist_mutex
);
966 /* loading code from memory is not supported in application builds */
967 else if (type
== TYPE_CODEC
)
968 return ERR_UNSUPPORTED_TYPE
;
970 /* Other cases: there is a little more work. */
971 int fd
= open(file
, O_RDONLY
);
973 return ERR_FILE_ERROR
;
977 if (type
== TYPE_BITMAP
) {
978 /* if albumart is embedded, the complete file is not buffered,
979 * but only the jpeg part; filesize() would be wrong */
980 struct bufopen_bitmap_data
*aa
= (struct bufopen_bitmap_data
*)user_data
;
981 if (aa
->embedded_albumart
)
982 size
= aa
->embedded_albumart
->size
;
987 bool can_wrap
= type
==TYPE_PACKET_AUDIO
|| type
==TYPE_CODEC
;
989 size_t adjusted_offset
= offset
;
990 if (adjusted_offset
> size
)
993 /* Reserve extra space because alignment can move data forward */
994 size_t padded_size
= STORAGE_PAD(size
-adjusted_offset
);
996 mutex_lock(&llist_mutex
);
998 struct memory_handle
*h
= add_handle(padded_size
, can_wrap
, false);
1000 DEBUGF("%s(): failed to add handle\n", __func__
);
1001 mutex_unlock(&llist_mutex
);
1003 return ERR_BUFFER_FULL
;
1007 strlcpy(h
->path
, file
, MAX_PATH
);
1008 h
->offset
= adjusted_offset
;
1010 #ifdef STORAGE_WANTS_ALIGN
1011 /* Don't bother to storage align bitmaps because they are not
1012 * loaded directly into the buffer.
1014 if (type
!= TYPE_BITMAP
) {
1015 /* Align to desired storage alignment */
1016 size_t alignment_pad
= STORAGE_OVERLAP(adjusted_offset
-
1017 (size_t)(&buffer
[buf_widx
]));
1018 buf_widx
= ringbuf_add(buf_widx
, alignment_pad
);
1020 #endif /* STORAGE_WANTS_ALIGN */
1029 #ifdef HAVE_ALBUMART
1030 if (type
== TYPE_BITMAP
) {
1031 /* Bitmap file: we load the data instead of the file */
1033 rc
= load_image(fd
, file
, (struct bufopen_bitmap_data
*)user_data
);
1036 handle_id
= ERR_FILE_ERROR
;
1040 h
->widx
= buf_widx
+ rc
; /* safe because the data doesn't wrap */
1041 buf_widx
+= rc
; /* safe too */
1047 if (type
== TYPE_CUESHEET
)
1053 h
->filerem
= size
- adjusted_offset
;
1056 mutex_unlock(&llist_mutex
);
1058 if (type
== TYPE_CUESHEET
) {
1059 /* Immediately start buffering those */
1060 LOGFQUEUE("buffering >| Q_BUFFER_HANDLE %d", handle_id
);
1061 queue_send(&buffering_queue
, Q_BUFFER_HANDLE
, handle_id
);
1063 /* Other types will get buffered in the course of normal operations */
1066 if (handle_id
>= 0) {
1067 /* Inform the buffering thread that we added a handle */
1068 LOGFQUEUE("buffering > Q_HANDLE_ADDED %d", handle_id
);
1069 queue_post(&buffering_queue
, Q_HANDLE_ADDED
, handle_id
);
1073 logf("bufopen: new hdl %d", handle_id
);
1077 /* Open a new handle from data that needs to be copied from memory.
1078 src is the source buffer from which to copy data. It can be NULL to simply
1079 reserve buffer space.
1080 size is the requested size. The call will only be successful if the
1081 requested amount of data can entirely fit in the buffer without wrapping.
1082 Return value is the handle id for success or <0 for failure.
1084 int bufalloc(const void *src
, size_t size
, enum data_type type
)
1086 int handle_id
= ERR_BUFFER_FULL
;
1088 mutex_lock(&llist_mutex
);
1090 struct memory_handle
*h
= add_handle(size
, false, true);
1096 if (type
== TYPE_ID3
&& size
== sizeof(struct mp3entry
)) {
1097 /* specially take care of struct mp3entry */
1098 copy_mp3entry((struct mp3entry
*)&buffer
[buf_widx
],
1099 (const struct mp3entry
*)src
);
1101 memcpy(&buffer
[buf_widx
], src
, size
);
1110 h
->widx
= buf_widx
+ size
; /* safe because the data doesn't wrap */
1112 h
->available
= size
;
1115 buf_widx
+= size
; /* safe too */
1118 mutex_unlock(&llist_mutex
);
1120 logf("bufalloc: new hdl %d", handle_id
);
1124 /* Close the handle. Return true for success and false for failure */
1125 bool bufclose(int handle_id
)
1127 logf("bufclose(%d)", handle_id
);
1129 LOGFQUEUE("buffering >| Q_CLOSE_HANDLE %d", handle_id
);
1130 return queue_send(&buffering_queue
, Q_CLOSE_HANDLE
, handle_id
);
1133 /* Backend to bufseek and bufadvance. Call only in response to
1134 Q_REBUFFER_HANDLE! */
1135 static void rebuffer_handle(int handle_id
, size_t newpos
)
1137 struct memory_handle
*h
= find_handle(handle_id
);
1140 queue_reply(&buffering_queue
, ERR_HANDLE_NOT_FOUND
);
1144 /* When seeking foward off of the buffer, if it is a short seek attempt to
1145 avoid rebuffering the whole track, just read enough to satisfy */
1146 if (newpos
> h
->offset
&&
1147 newpos
- h
->offset
< BUFFERING_DEFAULT_FILECHUNK
) {
1149 size_t amount
= newpos
- h
->offset
;
1150 h
->ridx
= ringbuf_add(h
->data
, amount
);
1152 if (buffer_handle(handle_id
, amount
+ 1)) {
1153 size_t rd
= ringbuf_sub(h
->ridx
, h
->data
);
1154 size_t wr
= ringbuf_sub(h
->widx
, h
->data
);
1156 /* It really did succeed */
1157 queue_reply(&buffering_queue
, 0);
1158 buffer_handle(handle_id
, 0); /* Ok, try the rest */
1162 /* Data collision or other file error - must reset */
1164 if (newpos
> h
->filesize
)
1165 newpos
= h
->filesize
; /* file truncation happened above */
1168 /* Reset the handle to its new position */
1171 size_t next
= h
->next
? ringbuf_offset(h
->next
) : buf_ridx
;
1173 #ifdef STORAGE_WANTS_ALIGN
1174 /* Strip alignment padding then redo */
1175 size_t new_index
= ringbuf_add(ringbuf_offset(h
), sizeof (*h
));
1177 /* Align to desired storage alignment if space permits - handle could
1178 have been shrunken too close to the following one after a previous
1180 size_t alignment_pad
=
1181 STORAGE_OVERLAP(h
->offset
- (size_t)(&buffer
[new_index
]));
1183 if (ringbuf_add_cross(new_index
, alignment_pad
, next
) >= 0)
1184 alignment_pad
= 0; /* Forego storage alignment this time */
1186 new_index
= ringbuf_add(new_index
, alignment_pad
);
1188 /* Just clear the data buffer */
1189 size_t new_index
= h
->data
;
1190 #endif /* STORAGE_WANTS_ALIGN */
1192 h
->ridx
= h
->widx
= h
->data
= new_index
;
1194 if (h
== cur_handle
)
1195 buf_widx
= new_index
;
1198 h
->filerem
= h
->filesize
- h
->offset
;
1201 lseek(h
->fd
, h
->offset
, SEEK_SET
);
1203 if (h
->next
&& ringbuf_sub(next
, h
->data
) <= h
->filesize
- newpos
) {
1204 /* There isn't enough space to rebuffer all of the track from its new
1205 offset, so we ask the user to free some */
1206 DEBUGF("%s(): space is needed\n", __func__
);
1207 int hid
= handle_id
;
1208 send_event(BUFFER_EVENT_REBUFFER
, &hid
);
1211 /* Now we do the rebuffer */
1212 queue_reply(&buffering_queue
, 0);
1213 buffer_handle(handle_id
, 0);
1216 /* Backend to bufseek and bufadvance */
1217 static int seek_handle(struct memory_handle
*h
, size_t newpos
)
1219 if (newpos
> h
->filesize
) {
1220 /* access beyond the end of the file */
1221 return ERR_INVALID_VALUE
;
1223 else if ((newpos
< h
->offset
|| h
->offset
+ h
->available
<= newpos
) &&
1224 (newpos
< h
->filesize
|| h
->filerem
> 0)) {
1225 /* access before or after buffered data and not to end of file or file
1226 is not buffered to the end-- a rebuffer is needed. */
1227 struct buf_message_data parm
= { h
->id
, newpos
};
1228 return queue_send(&buffering_queue
, Q_REBUFFER_HANDLE
,
1232 h
->ridx
= ringbuf_add(h
->data
, newpos
- h
->offset
);
1238 /* Set reading index in handle (relatively to the start of the file).
1239 Access before the available data will trigger a rebuffer.
1240 Return 0 for success and < 0 for failure:
1241 -1 if the handle wasn't found
1242 -2 if the new requested position was beyond the end of the file
1244 int bufseek(int handle_id
, size_t newpos
)
1246 struct memory_handle
*h
= find_handle(handle_id
);
1248 return ERR_HANDLE_NOT_FOUND
;
1250 return seek_handle(h
, newpos
);
1253 /* Advance the reading index in a handle (relatively to its current position).
1254 Return 0 for success and < 0 for failure */
1255 int bufadvance(int handle_id
, off_t offset
)
1257 struct memory_handle
*h
= find_handle(handle_id
);
1259 return ERR_HANDLE_NOT_FOUND
;
1261 size_t newpos
= h
->offset
+ ringbuf_sub(h
->ridx
, h
->data
) + offset
;
1262 return seek_handle(h
, newpos
);
1265 /* Used by bufread and bufgetdata to prepare the buffer and retrieve the
1266 * actual amount of data available for reading. This function explicitly
1267 * does not check the validity of the input handle. It does do range checks
1268 * on size and returns a valid (and explicit) amount of data for reading */
1269 static struct memory_handle
*prep_bufdata(int handle_id
, size_t *size
,
1270 bool guardbuf_limit
)
1272 struct memory_handle
*h
= find_handle(handle_id
);
1278 size_t avail
= handle_size_available(h
);
1280 if (avail
== 0 && h
->filerem
== 0) {
1281 /* File is finished reading */
1288 if (realsize
== 0 || realsize
> avail
+ h
->filerem
)
1289 realsize
= avail
+ h
->filerem
;
1291 if (guardbuf_limit
&& h
->type
== TYPE_PACKET_AUDIO
1292 && realsize
> GUARD_BUFSIZE
) {
1293 logf("data request > guardbuf");
1294 /* If more than the size of the guardbuf is requested and this is a
1295 * bufgetdata, limit to guard_bufsize over the end of the buffer */
1296 realsize
= MIN(realsize
, buffer_len
- h
->ridx
+ GUARD_BUFSIZE
);
1297 /* this ensures *size <= buffer_len - h->ridx + GUARD_BUFSIZE */
1300 if (h
->filerem
> 0 && avail
< realsize
) {
1301 /* Data isn't ready. Request buffering */
1302 buf_request_buffer_handle(handle_id
);
1303 /* Wait for the data to be ready */
1307 /* it is not safe for a non-buffering thread to sleep while
1308 * holding a handle */
1309 h
= find_handle(handle_id
);
1312 avail
= handle_size_available(h
);
1314 while (h
->filerem
> 0 && avail
< realsize
);
1317 *size
= MIN(realsize
, avail
);
1322 /* Note: It is safe for the thread responsible for handling the rebuffer
1323 * cleanup request to call bufread or bufgetdata only when the data will
1324 * be available-- not if it could be blocked waiting for it in prep_bufdata.
1325 * It should be apparent that if said thread is being forced to wait for
1326 * buffering but has not yet responded to the cleanup request, the space
1327 * can never be cleared to allow further reading of the file because it is
1328 * not listening to callbacks any longer. */
1330 /* Copy data from the given handle to the dest buffer.
1331 Return the number of bytes copied or < 0 for failure (handle not found).
1332 The caller is blocked until the requested amount of data is available.
1334 ssize_t
bufread(int handle_id
, size_t size
, void *dest
)
1336 const struct memory_handle
*h
;
1337 size_t adjusted_size
= size
;
1339 h
= prep_bufdata(handle_id
, &adjusted_size
, false);
1341 return ERR_HANDLE_NOT_FOUND
;
1343 if (h
->ridx
+ adjusted_size
> buffer_len
) {
1344 /* the data wraps around the end of the buffer */
1345 size_t read
= buffer_len
- h
->ridx
;
1346 memcpy(dest
, &buffer
[h
->ridx
], read
);
1347 memcpy(dest
+read
, buffer
, adjusted_size
- read
);
1349 memcpy(dest
, &buffer
[h
->ridx
], adjusted_size
);
1352 return adjusted_size
;
1355 /* Update the "data" pointer to make the handle's data available to the caller.
1356 Return the length of the available linear data or < 0 for failure (handle
1358 The caller is blocked until the requested amount of data is available.
1359 size is the amount of linear data requested. it can be 0 to get as
1361 The guard buffer may be used to provide the requested size. This means it's
1362 unsafe to request more than the size of the guard buffer.
1364 ssize_t
bufgetdata(int handle_id
, size_t size
, void **data
)
1366 const struct memory_handle
*h
;
1367 size_t adjusted_size
= size
;
1369 h
= prep_bufdata(handle_id
, &adjusted_size
, true);
1371 return ERR_HANDLE_NOT_FOUND
;
1373 if (h
->ridx
+ adjusted_size
> buffer_len
) {
1374 /* the data wraps around the end of the buffer :
1375 use the guard buffer to provide the requested amount of data. */
1376 size_t copy_n
= h
->ridx
+ adjusted_size
- buffer_len
;
1377 /* prep_bufdata ensures
1378 adjusted_size <= buffer_len - h->ridx + GUARD_BUFSIZE,
1379 so copy_n <= GUARD_BUFSIZE */
1380 memcpy(guard_buffer
, (const unsigned char *)buffer
, copy_n
);
1384 *data
= &buffer
[h
->ridx
];
1386 return adjusted_size
;
1389 ssize_t
bufgettail(int handle_id
, size_t size
, void **data
)
1393 const struct memory_handle
*h
;
1395 h
= find_handle(handle_id
);
1398 return ERR_HANDLE_NOT_FOUND
;
1401 return ERR_HANDLE_NOT_DONE
;
1403 /* We don't support tail requests of > guardbuf_size, for simplicity */
1404 if (size
> GUARD_BUFSIZE
)
1405 return ERR_INVALID_VALUE
;
1407 tidx
= ringbuf_sub(h
->widx
, size
);
1409 if (tidx
+ size
> buffer_len
) {
1410 size_t copy_n
= tidx
+ size
- buffer_len
;
1411 memcpy(guard_buffer
, (const unsigned char *)buffer
, copy_n
);
1414 *data
= &buffer
[tidx
];
1418 ssize_t
bufcuttail(int handle_id
, size_t size
)
1420 struct memory_handle
*h
;
1421 size_t adjusted_size
= size
;
1423 h
= find_handle(handle_id
);
1426 return ERR_HANDLE_NOT_FOUND
;
1429 return ERR_HANDLE_NOT_DONE
;
1431 if (h
->available
< adjusted_size
)
1432 adjusted_size
= h
->available
;
1434 h
->available
-= adjusted_size
;
1435 h
->filesize
-= adjusted_size
;
1436 h
->widx
= ringbuf_sub(h
->widx
, adjusted_size
);
1437 if (h
== cur_handle
)
1440 return adjusted_size
;
1445 SECONDARY EXPORTED FUNCTIONS
1446 ============================
1450 buf_request_buffer_handle
1453 register_buffering_callback
1454 unregister_buffering_callback
1456 These functions are exported, to allow interaction with the buffer.
1457 They take care of the content of the structs, and rely on the linked list
1458 management functions for all the actual handle management work.
1461 /* Get a handle offset from a pointer */
1462 ssize_t
buf_get_offset(int handle_id
, void *ptr
)
1464 const struct memory_handle
*h
= find_handle(handle_id
);
1466 return ERR_HANDLE_NOT_FOUND
;
1468 return (size_t)ptr
- (size_t)&buffer
[h
->ridx
];
1471 ssize_t
buf_handle_offset(int handle_id
)
1473 const struct memory_handle
*h
= find_handle(handle_id
);
1475 return ERR_HANDLE_NOT_FOUND
;
1479 void buf_request_buffer_handle(int handle_id
)
1481 LOGFQUEUE("buffering >| Q_START_FILL %d",handle_id
);
1482 queue_send(&buffering_queue
, Q_START_FILL
, handle_id
);
1485 void buf_set_base_handle(int handle_id
)
1487 LOGFQUEUE("buffering > Q_BASE_HANDLE %d", handle_id
);
1488 queue_post(&buffering_queue
, Q_BASE_HANDLE
, handle_id
);
1491 /* Return the amount of buffer space used */
1492 size_t buf_used(void)
1497 void buf_set_watermark(size_t bytes
)
1499 conf_watermark
= bytes
;
1502 static void shrink_buffer_inner(struct memory_handle
*h
)
1507 shrink_buffer_inner(h
->next
);
1512 static void shrink_buffer(void)
1514 logf("shrink_buffer()");
1515 shrink_buffer_inner(first_handle
);
1518 void buffering_thread(void)
1520 bool filling
= false;
1521 struct queue_event ev
;
1522 struct buf_message_data
*parm
;
1530 queue_wait_w_tmo(&buffering_queue
, &ev
, filling
? 5 : HZ
/2);
1535 LOGFQUEUE("buffering < Q_START_FILL %d", (int)ev
.data
);
1536 /* Call buffer callbacks here because this is one of two ways
1537 * to begin a full buffer fill */
1538 send_event(BUFFER_EVENT_BUFFER_LOW
, 0);
1540 queue_reply(&buffering_queue
, 1);
1541 filling
|= buffer_handle((int)ev
.data
, 0);
1544 case Q_BUFFER_HANDLE
:
1545 LOGFQUEUE("buffering < Q_BUFFER_HANDLE %d", (int)ev
.data
);
1546 queue_reply(&buffering_queue
, 1);
1547 buffer_handle((int)ev
.data
, 0);
1550 case Q_REBUFFER_HANDLE
:
1551 parm
= (struct buf_message_data
*)ev
.data
;
1552 LOGFQUEUE("buffering < Q_REBUFFER_HANDLE %d %ld",
1553 parm
->handle_id
, parm
->data
);
1554 rebuffer_handle(parm
->handle_id
, parm
->data
);
1557 case Q_CLOSE_HANDLE
:
1558 LOGFQUEUE("buffering < Q_CLOSE_HANDLE %d", (int)ev
.data
);
1559 queue_reply(&buffering_queue
, close_handle((int)ev
.data
));
1562 case Q_HANDLE_ADDED
:
1563 LOGFQUEUE("buffering < Q_HANDLE_ADDED %d", (int)ev
.data
);
1564 /* A handle was added: the disk is spinning, so we can fill */
1569 LOGFQUEUE("buffering < Q_BASE_HANDLE %d", (int)ev
.data
);
1570 base_handle_id
= (int)ev
.data
;
1573 #if (CONFIG_PLATFORM & PLATFORM_NATIVE)
1574 case SYS_USB_CONNECTED
:
1575 LOGFQUEUE("buffering < SYS_USB_CONNECTED");
1576 usb_acknowledge(SYS_USB_CONNECTED_ACK
);
1577 usb_wait_for_disconnect(&buffering_queue
);
1582 LOGFQUEUE_SYS_TIMEOUT("buffering < SYS_TIMEOUT");
1586 update_data_counters(NULL
);
1588 /* If the buffer is low, call the callbacks to get new data */
1589 if (num_handles
> 0 && data_counters
.useful
<= conf_watermark
)
1590 send_event(BUFFER_EVENT_BUFFER_LOW
, 0);
1593 /* TODO: This needs to be fixed to use the idle callback, disable it
1594 * for simplicity until its done right */
1596 /* If the disk is spinning, take advantage by filling the buffer */
1597 else if (storage_disk_is_active() && queue_empty(&buffering_queue
)) {
1598 if (num_handles
> 0 && data_counters
.useful
<= high_watermark
)
1599 send_event(BUFFER_EVENT_BUFFER_LOW
, 0);
1601 if (data_counters
.remaining
> 0 && BUF_USED
<= high_watermark
) {
1602 /* This is a new fill, shrink the buffer up first */
1605 filling
= fill_buffer();
1606 update_data_counters(NULL
);
1612 if (queue_empty(&buffering_queue
)) {
1614 if (data_counters
.remaining
> 0 && BUF_USED
< buffer_len
)
1615 filling
= fill_buffer();
1616 else if (data_counters
.remaining
== 0)
1618 } else if (ev
.id
== SYS_TIMEOUT
) {
1619 if (data_counters
.remaining
> 0 &&
1620 data_counters
.useful
<= conf_watermark
) {
1622 filling
= fill_buffer();
1629 void buffering_init(void)
1631 mutex_init(&llist_mutex
);
1633 conf_watermark
= BUFFERING_DEFAULT_WATERMARK
;
1635 queue_init(&buffering_queue
, true);
1636 buffering_thread_id
= create_thread( buffering_thread
, buffering_stack
,
1637 sizeof(buffering_stack
), CREATE_THREAD_FROZEN
,
1638 buffering_thread_name
IF_PRIO(, PRIORITY_BUFFERING
)
1641 queue_enable_queue_send(&buffering_queue
, &buffering_queue_sender_list
,
1642 buffering_thread_id
);
1645 /* Initialise the buffering subsystem */
1646 bool buffering_reset(char *buf
, size_t buflen
)
1648 /* Wraps of storage-aligned data must also be storage aligned,
1649 thus buf and buflen must be a aligned to an integer multiple of
1650 the storage alignment */
1651 STORAGE_ALIGN_BUFFER(buf
, buflen
);
1653 if (!buf
|| !buflen
)
1657 buffer_len
= buflen
;
1658 guard_buffer
= buf
+ buflen
;
1663 first_handle
= NULL
;
1665 cached_handle
= NULL
;
1667 base_handle_id
= -1;
1669 /* Set the high watermark as 75% full...or 25% empty :) */
1671 high_watermark
= 3*buflen
/ 4;
1674 thread_thaw(buffering_thread_id
);
1679 void buffering_get_debugdata(struct buffering_debug
*dbgdata
)
1681 struct data_counters dc
;
1682 update_data_counters(&dc
);
1683 dbgdata
->num_handles
= num_handles
;
1684 dbgdata
->data_rem
= dc
.remaining
;
1685 dbgdata
->wasted_space
= dc
.wasted
;
1686 dbgdata
->buffered_data
= dc
.buffered
;
1687 dbgdata
->useful_data
= dc
.useful
;
1688 dbgdata
->watermark
= conf_watermark
;