blob: 95b2591b446b17422034f8bd7f1dd5bd64583970 [file] [log] [blame]
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2007 Nicolas Pennequin
11 *
Daniel Stenberg2acc0ac2008-06-28 18:10:04 +000012 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +000016 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21
22#include "config.h"
23#include <stdio.h>
24#include <string.h>
25#include <stdlib.h>
26#include <ctype.h>
27#include "buffering.h"
28
Frank Gevaerts2f8a0082008-11-01 16:14:28 +000029#include "storage.h"
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +000030#include "system.h"
31#include "thread.h"
32#include "file.h"
33#include "panic.h"
34#include "memory.h"
35#include "lcd.h"
36#include "font.h"
37#include "button.h"
38#include "kernel.h"
39#include "tree.h"
40#include "debug.h"
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +000041#include "settings.h"
42#include "codecs.h"
43#include "audio.h"
44#include "mp3_playback.h"
45#include "usb.h"
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +000046#include "screens.h"
47#include "playlist.h"
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +000048#include "pcmbuf.h"
Jonathan Gordon71898e52008-10-16 10:38:03 +000049#include "appevents.h"
Nicolas Pennequin4e2de442008-04-14 16:17:47 +000050#include "metadata.h"
Andrew Mahone781421a2008-12-09 23:07:59 +000051#ifdef HAVE_ALBUMART
52#include "albumart.h"
Andrew Mahone54e6eb32009-05-01 23:31:43 +000053#include "jpeg_load.h"
Thomas Martitze9c10182009-10-16 19:14:41 +000054#include "bmp.h"
Andrew Mahone781421a2008-12-09 23:07:59 +000055#endif
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +000056
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +000057#define GUARD_BUFSIZE (32*1024)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +000058
59/* Define LOGF_ENABLE to enable logf output in this file */
Nicolas Pennequin86b7c2b2007-11-05 23:46:45 +000060/*#define LOGF_ENABLE*/
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +000061#include "logf.h"
62
63/* macros to enable logf for queues
64 logging on SYS_TIMEOUT can be disabled */
65#ifdef SIMULATOR
66/* Define this for logf output of all queuing except SYS_TIMEOUT */
67#define BUFFERING_LOGQUEUES
68/* Define this to logf SYS_TIMEOUT messages */
69/* #define BUFFERING_LOGQUEUES_SYS_TIMEOUT */
70#endif
71
72#ifdef BUFFERING_LOGQUEUES
73#define LOGFQUEUE logf
74#else
75#define LOGFQUEUE(...)
76#endif
77
78#ifdef BUFFERING_LOGQUEUES_SYS_TIMEOUT
79#define LOGFQUEUE_SYS_TIMEOUT logf
80#else
81#define LOGFQUEUE_SYS_TIMEOUT(...)
82#endif
83
84/* default point to start buffer refill */
Björn Stenberg6427d122009-01-10 21:10:56 +000085#define BUFFERING_DEFAULT_WATERMARK (1024*128)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +000086/* amount of data to read in one read() call */
Michael Sevakis9120c852008-03-29 20:52:56 +000087#define BUFFERING_DEFAULT_FILECHUNK (1024*32)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +000088
Brandon Low31c11642007-11-04 19:01:02 +000089#define BUF_HANDLE_MASK 0x7FFFFFFF
90
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +000091
Brandon Low404c6fb2007-10-27 01:37:33 +000092/* assert(sizeof(struct memory_handle)%4==0) */
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +000093struct memory_handle {
Brandon Low9784f6b2007-11-03 22:06:56 +000094 int id; /* A unique ID for the handle */
Brandon Low404c6fb2007-10-27 01:37:33 +000095 enum data_type type; /* Type of data buffered with this handle */
96 char path[MAX_PATH]; /* Path if data originated in a file */
97 int fd; /* File descriptor to path (-1 if closed) */
Torne Wuffc4e051b2010-02-01 17:16:39 +000098 size_t start; /* Start index of the handle's data buffer,
99 for use by reset_handle. */
100 size_t data; /* Start index of the handle's data */
Brandon Low404c6fb2007-10-27 01:37:33 +0000101 volatile size_t ridx; /* Read pointer, relative to the main buffer */
102 size_t widx; /* Write pointer */
103 size_t filesize; /* File total length */
104 size_t filerem; /* Remaining bytes of file NOT in buffer */
105 volatile size_t available; /* Available bytes to read from buffer */
106 size_t offset; /* Offset at which we started reading the file */
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000107 struct memory_handle *next;
108};
Brandon Low404c6fb2007-10-27 01:37:33 +0000109/* invariant: filesize == offset + available + filerem */
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000110
111static char *buffer;
112static char *guard_buffer;
113
114static size_t buffer_len;
115
116static volatile size_t buf_widx; /* current writing position */
117static volatile size_t buf_ridx; /* current reading position */
118/* buf_*idx are values relative to the buffer, not real pointers. */
119
120/* Configuration */
121static size_t conf_watermark = 0; /* Level to trigger filebuf fill */
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000122#if MEM > 8
123static size_t high_watermark = 0; /* High watermark for rebuffer */
124#endif
125
126/* current memory handle in the linked list. NULL when the list is empty. */
127static struct memory_handle *cur_handle;
128/* first memory handle in the linked list. NULL when the list is empty. */
129static struct memory_handle *first_handle;
130
131static int num_handles; /* number of handles in the list */
132
Brandon Low9784f6b2007-11-03 22:06:56 +0000133static int base_handle_id;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000134
135static struct mutex llist_mutex;
Andrew Mahone216424a2009-05-15 00:14:38 +0000136static struct mutex llist_mod_mutex;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000137
138/* Handle cache (makes find_handle faster).
Brandon Low404c6fb2007-10-27 01:37:33 +0000139 This is global so that move_handle and rm_handle can invalidate it. */
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000140static struct memory_handle *cached_handle = NULL;
141
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000142static struct {
143 size_t remaining; /* Amount of data needing to be buffered */
144 size_t wasted; /* Amount of space available for freeing */
145 size_t buffered; /* Amount of data currently in the buffer */
146 size_t useful; /* Amount of data still useful to the user */
147} data_counters;
148
149
150/* Messages available to communicate with the buffering thread */
151enum {
Brandon Low33794402007-11-05 17:48:21 +0000152 Q_BUFFER_HANDLE = 1, /* Request buffering of a handle, this should not be
153 used in a low buffer situation. */
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000154 Q_RESET_HANDLE, /* (internal) Request resetting of a handle to its
155 offset (the offset has to be set beforehand) */
156 Q_CLOSE_HANDLE, /* Request closing a handle */
157 Q_BASE_HANDLE, /* Set the reference handle for buf_useful_data */
158
159 /* Configuration: */
Brandon Low86830b62007-11-05 17:51:55 +0000160 Q_START_FILL, /* Request that the buffering thread initiate a buffer
Brandon Low47eb5692007-11-05 03:11:58 +0000161 fill at its earliest convenience */
Nicolas Pennequin483c4022008-02-12 23:15:59 +0000162 Q_HANDLE_ADDED, /* Inform the buffering thread that a handle was added,
163 (which means the disk is spinning) */
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000164};
165
166/* Buffering thread */
Steve Bavin73f98632008-03-26 08:57:25 +0000167static void buffering_thread(void);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000168static long buffering_stack[(DEFAULT_STACK_SIZE + 0x2000)/sizeof(long)];
169static const char buffering_thread_name[] = "buffering";
Michael Sevakis8cfbd362008-12-10 08:57:10 +0000170static unsigned int buffering_thread_id = 0;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000171static struct event_queue buffering_queue;
172static struct queue_sender_list buffering_queue_sender_list;
173
174
Nicolas Pennequine24454f2007-11-26 21:13:08 +0000175
Thomas Martitzb11c8192010-02-12 13:12:59 +0000176/* Ring buffer helper functions */
Thomas Martitzabb3dd42010-02-20 15:13:53 +0000177
178static inline uintptr_t ringbuf_offset(const void *ptr)
179{
180 return (uintptr_t)(ptr - (void*)buffer);
181}
182
Thomas Martitzb11c8192010-02-12 13:12:59 +0000183/* Buffer pointer (p) plus value (v), wrapped if necessary */
184static inline uintptr_t ringbuf_add(uintptr_t p, size_t v)
185{
186 uintptr_t res = p + v;
187 if (res >= buffer_len)
188 res -= buffer_len; /* wrap if necssary */
189 return res;
190}
191
192
193/* Buffer pointer (p) minus value (v), wrapped if necessary */
194static inline uintptr_t ringbuf_sub(uintptr_t p, size_t v)
195{
196 uintptr_t res = p;
197 if (p < v)
198 res += buffer_len; /* wrap */
199
200 return res - v;
201}
202
203
204/* How far value (v) plus buffer pointer (p1) will cross buffer pointer (p2) */
205static inline ssize_t ringbuf_add_cross(uintptr_t p1, size_t v, uintptr_t p2)
206{
207 ssize_t res = p1 + v - p2;
208 if (p1 >= p2) /* wrap if necessary */
209 res -= buffer_len;
210
211 return res;
212}
213
214/* Bytes available in the buffer */
215#define BUF_USED ringbuf_sub(buf_widx, buf_ridx)
216
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000217/*
218LINKED LIST MANAGEMENT
219======================
220
221add_handle : Add a handle to the list
222rm_handle : Remove a handle from the list
223find_handle : Get a handle pointer from an ID
224move_handle : Move a handle in the buffer (with or without its data)
225
226These functions only handle the linked list structure. They don't touch the
227contents of the struct memory_handle headers. They also change the buf_*idx
228pointers when necessary and manage the handle IDs.
229
230The first and current (== last) handle are kept track of.
231A new handle is added at buf_widx and becomes the current one.
232buf_widx always points to the current writing position for the current handle
233buf_ridx always points to the location of the first handle.
234buf_ridx == buf_widx means the buffer is empty.
235*/
236
237
238/* Add a new handle to the linked list and return it. It will have become the
Brandon Low18c9aba2007-10-27 04:49:04 +0000239 new current handle.
240 data_size must contain the size of what will be in the handle.
Brandon Low18c9aba2007-10-27 04:49:04 +0000241 can_wrap tells us whether this type of data may wrap on buffer
242 alloc_all tells us if we must immediately be able to allocate data_size
Brandon Low94b133a2007-10-28 19:19:54 +0000243 returns a valid memory handle if all conditions for allocation are met.
244 NULL if there memory_handle itself cannot be allocated or if the
Brandon Low151b7c92007-10-30 15:09:52 +0000245 data_size cannot be allocated and alloc_all is set. This function's
246 only potential side effect is to allocate space for the cur_handle
247 if it returns NULL.
Brandon Low18c9aba2007-10-27 04:49:04 +0000248 */
Steve Bavin135cc752008-03-28 12:51:33 +0000249static struct memory_handle *add_handle(size_t data_size, bool can_wrap,
250 bool alloc_all)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000251{
Brandon Low9784f6b2007-11-03 22:06:56 +0000252 /* gives each handle a unique id */
Brandon Low31c11642007-11-04 19:01:02 +0000253 static int cur_handle_id = 0;
Brandon Low18c9aba2007-10-27 04:49:04 +0000254 size_t shift;
Brandon Low151b7c92007-10-30 15:09:52 +0000255 size_t new_widx;
Brandon Low18c9aba2007-10-27 04:49:04 +0000256 size_t len;
257 int overlap;
258
Brandon Lowa042c722007-11-03 02:54:34 +0000259 if (num_handles >= BUF_MAX_HANDLES)
260 return NULL;
261
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000262 mutex_lock(&llist_mutex);
Andrew Mahone216424a2009-05-15 00:14:38 +0000263 mutex_lock(&llist_mod_mutex);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000264
Nicolas Pennequin4ff2f9f2007-10-30 14:11:03 +0000265 if (cur_handle && cur_handle->filerem > 0) {
266 /* the current handle hasn't finished buffering. We can only add
267 a new one if there is already enough free space to finish
268 the buffering. */
269 size_t req = cur_handle->filerem + sizeof(struct memory_handle);
Thomas Martitzb11c8192010-02-12 13:12:59 +0000270 if (ringbuf_add_cross(cur_handle->widx, req, buf_ridx) >= 0) {
Nicolas Pennequin4ff2f9f2007-10-30 14:11:03 +0000271 /* Not enough space */
Andrew Mahone216424a2009-05-15 00:14:38 +0000272 mutex_unlock(&llist_mod_mutex);
Nicolas Pennequin4ff2f9f2007-10-30 14:11:03 +0000273 mutex_unlock(&llist_mutex);
274 return NULL;
275 } else {
276 /* Allocate the remainder of the space for the current handle */
Thomas Martitzb11c8192010-02-12 13:12:59 +0000277 buf_widx = ringbuf_add(cur_handle->widx, cur_handle->filerem);
Nicolas Pennequin4ff2f9f2007-10-30 14:11:03 +0000278 }
279 }
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000280
Brandon Low151b7c92007-10-30 15:09:52 +0000281 /* align to 4 bytes up */
Thomas Martitzb11c8192010-02-12 13:12:59 +0000282 new_widx = ringbuf_add(buf_widx, 3) & ~3;
Brandon Low18c9aba2007-10-27 04:49:04 +0000283
284 len = data_size + sizeof(struct memory_handle);
285
286 /* First, will the handle wrap? */
Brandon Low18c9aba2007-10-27 04:49:04 +0000287 /* If the handle would wrap, move to the beginning of the buffer,
Antonius Hellmann0055f132009-02-22 10:12:34 +0000288 * or if the data must not but would wrap, move it to the beginning */
289 if( (new_widx + sizeof(struct memory_handle) > buffer_len) ||
290 (!can_wrap && (new_widx + len > buffer_len)) ) {
Brandon Low18c9aba2007-10-27 04:49:04 +0000291 new_widx = 0;
Brandon Low18c9aba2007-10-27 04:49:04 +0000292 }
293
Brandon Low151b7c92007-10-30 15:09:52 +0000294 /* How far we shifted buf_widx to align things, must be < buffer_len */
Thomas Martitzb11c8192010-02-12 13:12:59 +0000295 shift = ringbuf_sub(new_widx, buf_widx);
Nicolas Pennequin78072792007-10-28 15:54:10 +0000296
Brandon Low18c9aba2007-10-27 04:49:04 +0000297 /* How much space are we short in the actual ring buffer? */
Thomas Martitzb11c8192010-02-12 13:12:59 +0000298 overlap = ringbuf_add_cross(buf_widx, shift + len, buf_ridx);
Brandon Low18c9aba2007-10-27 04:49:04 +0000299 if (overlap >= 0 && (alloc_all || (unsigned)overlap > data_size)) {
300 /* Not enough space for required allocations */
Andrew Mahone216424a2009-05-15 00:14:38 +0000301 mutex_unlock(&llist_mod_mutex);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000302 mutex_unlock(&llist_mutex);
303 return NULL;
304 }
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000305
Brandon Low18c9aba2007-10-27 04:49:04 +0000306 /* There is enough space for the required data, advance the buf_widx and
307 * initialize the struct */
308 buf_widx = new_widx;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000309
310 struct memory_handle *new_handle =
311 (struct memory_handle *)(&buffer[buf_widx]);
312
313 /* only advance the buffer write index of the size of the struct */
Thomas Martitzb11c8192010-02-12 13:12:59 +0000314 buf_widx = ringbuf_add(buf_widx, sizeof(struct memory_handle));
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000315
Brandon Low18c9aba2007-10-27 04:49:04 +0000316 new_handle->id = cur_handle_id;
Brandon Low9784f6b2007-11-03 22:06:56 +0000317 /* Wrap signed int is safe and 0 doesn't happen */
Brandon Low31c11642007-11-04 19:01:02 +0000318 cur_handle_id = (cur_handle_id + 1) & BUF_HANDLE_MASK;
Brandon Low18c9aba2007-10-27 04:49:04 +0000319 new_handle->next = NULL;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000320 num_handles++;
321
Brandon Low18c9aba2007-10-27 04:49:04 +0000322 if (!first_handle)
323 /* the new handle is the first one */
324 first_handle = new_handle;
325
326 if (cur_handle)
327 cur_handle->next = new_handle;
328
329 cur_handle = new_handle;
330
Andrew Mahone216424a2009-05-15 00:14:38 +0000331 mutex_unlock(&llist_mod_mutex);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000332 mutex_unlock(&llist_mutex);
Brandon Low18c9aba2007-10-27 04:49:04 +0000333 return new_handle;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000334}
335
336/* Delete a given memory handle from the linked list
337 and return true for success. Nothing is actually erased from memory. */
Brandon Low404c6fb2007-10-27 01:37:33 +0000338static bool rm_handle(const struct memory_handle *h)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000339{
Brandon Low404c6fb2007-10-27 01:37:33 +0000340 if (h == NULL)
Brandon Low31c11642007-11-04 19:01:02 +0000341 return true;
Brandon Low404c6fb2007-10-27 01:37:33 +0000342
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000343 mutex_lock(&llist_mutex);
Andrew Mahone216424a2009-05-15 00:14:38 +0000344 mutex_lock(&llist_mod_mutex);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000345
346 if (h == first_handle) {
347 first_handle = h->next;
348 if (h == cur_handle) {
349 /* h was the first and last handle: the buffer is now empty */
350 cur_handle = NULL;
Brandon Low404c6fb2007-10-27 01:37:33 +0000351 buf_ridx = buf_widx = 0;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000352 } else {
353 /* update buf_ridx to point to the new first handle */
Thomas Martitzabb3dd42010-02-20 15:13:53 +0000354 buf_ridx = (size_t)ringbuf_offset(first_handle);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000355 }
356 } else {
357 struct memory_handle *m = first_handle;
Nicolas Pennequine24454f2007-11-26 21:13:08 +0000358 /* Find the previous handle */
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000359 while (m && m->next != h) {
360 m = m->next;
361 }
Brandon Low404c6fb2007-10-27 01:37:33 +0000362 if (m && m->next == h) {
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000363 m->next = h->next;
364 if (h == cur_handle) {
365 cur_handle = m;
366 buf_widx = cur_handle->widx;
367 }
368 } else {
Andrew Mahone216424a2009-05-15 00:14:38 +0000369 mutex_unlock(&llist_mod_mutex);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000370 mutex_unlock(&llist_mutex);
371 return false;
372 }
373 }
374
375 /* Invalidate the cache to prevent it from keeping the old location of h */
376 if (h == cached_handle)
377 cached_handle = NULL;
378
379 num_handles--;
380
Andrew Mahone216424a2009-05-15 00:14:38 +0000381 mutex_unlock(&llist_mod_mutex);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000382 mutex_unlock(&llist_mutex);
383 return true;
384}
385
386/* Return a pointer to the memory handle of given ID.
387 NULL if the handle wasn't found */
Steve Bavin135cc752008-03-28 12:51:33 +0000388static struct memory_handle *find_handle(int handle_id)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000389{
Brandon Low31c11642007-11-04 19:01:02 +0000390 if (handle_id < 0)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000391 return NULL;
392
393 mutex_lock(&llist_mutex);
394
395 /* simple caching because most of the time the requested handle
396 will either be the same as the last, or the one after the last */
397 if (cached_handle)
398 {
399 if (cached_handle->id == handle_id) {
400 mutex_unlock(&llist_mutex);
401 return cached_handle;
402 } else if (cached_handle->next &&
403 (cached_handle->next->id == handle_id)) {
404 cached_handle = cached_handle->next;
405 mutex_unlock(&llist_mutex);
406 return cached_handle;
407 }
408 }
409
410 struct memory_handle *m = first_handle;
411 while (m && m->id != handle_id) {
412 m = m->next;
413 }
414 /* This condition can only be reached with !m or m->id == handle_id */
Brandon Low404c6fb2007-10-27 01:37:33 +0000415 if (m)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000416 cached_handle = m;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000417
418 mutex_unlock(&llist_mutex);
419 return m;
420}
421
Brandon Low4feab102007-10-28 20:18:59 +0000422/* Move a memory handle and data_size of its data delta bytes along the buffer.
423 delta maximum bytes available to move the handle. If the move is performed
424 it is set to the actual distance moved.
425 data_size is the amount of data to move along with the struct.
Yoshihisa Uchida9c13b6e2010-05-24 10:49:36 +0000426 returns true if the move is successful and false if the handle is NULL,
427 the move would be less than the size of a memory_handle after
428 correcting for wraps or if the handle is not found in the linked
429 list for adjustment. This function has no side effects if false
430 is returned. */
Brandon Lowdcca5862007-11-02 14:06:48 +0000431static bool move_handle(struct memory_handle **h, size_t *delta,
Steve Bavin135cc752008-03-28 12:51:33 +0000432 size_t data_size, bool can_wrap)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000433{
Brandon Low18c9aba2007-10-27 04:49:04 +0000434 struct memory_handle *dest;
Brandon Low483dca92007-10-29 16:48:16 +0000435 const struct memory_handle *src;
Michael Giacomelliecd9bcf2009-11-21 17:00:38 +0000436 int32_t *here;
437 int32_t *there;
438 int32_t *end;
439 int32_t *begin;
Thomas Martitzabb3dd42010-02-20 15:13:53 +0000440 size_t final_delta = *delta, size_to_move, n;
441 uintptr_t oldpos, newpos;
442 intptr_t overlap, overlap_old;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000443
Brandon Low483dca92007-10-29 16:48:16 +0000444 if (h == NULL || (src = *h) == NULL)
445 return false;
Brandon Low4feab102007-10-28 20:18:59 +0000446
447 size_to_move = sizeof(struct memory_handle) + data_size;
448
449 /* Align to four bytes, down */
Brandon Low483dca92007-10-29 16:48:16 +0000450 final_delta &= ~3;
451 if (final_delta < sizeof(struct memory_handle)) {
Brandon Low4feab102007-10-28 20:18:59 +0000452 /* It's not legal to move less than the size of the struct */
Brandon Low483dca92007-10-29 16:48:16 +0000453 return false;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000454 }
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000455
Brandon Low18c9aba2007-10-27 04:49:04 +0000456 mutex_lock(&llist_mutex);
Andrew Mahone216424a2009-05-15 00:14:38 +0000457 mutex_lock(&llist_mod_mutex);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000458
Thomas Martitzabb3dd42010-02-20 15:13:53 +0000459 oldpos = ringbuf_offset(src);
Thomas Martitzb11c8192010-02-12 13:12:59 +0000460 newpos = ringbuf_add(oldpos, final_delta);
461 overlap = ringbuf_add_cross(newpos, size_to_move, buffer_len - 1);
462 overlap_old = ringbuf_add_cross(oldpos, size_to_move, buffer_len -1);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000463
Brandon Low18c9aba2007-10-27 04:49:04 +0000464 if (overlap > 0) {
Brandon Low4feab102007-10-28 20:18:59 +0000465 /* Some part of the struct + data would wrap, maybe ok */
Brandon Lowdcca5862007-11-02 14:06:48 +0000466 size_t correction = 0;
Brandon Low18c9aba2007-10-27 04:49:04 +0000467 /* If the overlap lands inside the memory_handle */
Michael Giacomelliecd9bcf2009-11-21 17:00:38 +0000468 if (!can_wrap) {
Brandon Low4feab102007-10-28 20:18:59 +0000469 /* Otherwise the overlap falls in the data area and must all be
470 * backed out. This may become conditional if ever we move
471 * data that is allowed to wrap (ie audio) */
Brandon Low18c9aba2007-10-27 04:49:04 +0000472 correction = overlap;
Thomas Martitzabb3dd42010-02-20 15:13:53 +0000473 } else if ((uintptr_t)overlap > data_size) {
Michael Giacomelliecd9bcf2009-11-21 17:00:38 +0000474 /* Correct the position and real delta to prevent the struct from
475 * wrapping, this guarantees an aligned delta, I think */
476 correction = overlap - data_size;
Brandon Low18c9aba2007-10-27 04:49:04 +0000477 }
Brandon Lowdcca5862007-11-02 14:06:48 +0000478 if (correction) {
Michael Giacomelliecd9bcf2009-11-21 17:00:38 +0000479 /* Align correction to four bytes up */
Rafaël Carréf6201032009-11-22 13:51:25 +0000480 correction = (correction + 3) & ~3;
Brandon Lowdcca5862007-11-02 14:06:48 +0000481 if (final_delta < correction + sizeof(struct memory_handle)) {
482 /* Delta cannot end up less than the size of the struct */
Andrew Mahone216424a2009-05-15 00:14:38 +0000483 mutex_unlock(&llist_mod_mutex);
Brandon Lowdcca5862007-11-02 14:06:48 +0000484 mutex_unlock(&llist_mutex);
485 return false;
486 }
Brandon Lowdcca5862007-11-02 14:06:48 +0000487 newpos -= correction;
488 overlap -= correction;/* Used below to know how to split the data */
489 final_delta -= correction;
490 }
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000491 }
Nicolas Pennequin78072792007-10-28 15:54:10 +0000492
Brandon Low18c9aba2007-10-27 04:49:04 +0000493 dest = (struct memory_handle *)(&buffer[newpos]);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000494
Brandon Low483dca92007-10-29 16:48:16 +0000495 if (src == first_handle) {
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000496 first_handle = dest;
497 buf_ridx = newpos;
498 } else {
499 struct memory_handle *m = first_handle;
Brandon Low483dca92007-10-29 16:48:16 +0000500 while (m && m->next != src) {
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000501 m = m->next;
502 }
Brandon Low483dca92007-10-29 16:48:16 +0000503 if (m && m->next == src) {
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000504 m->next = dest;
505 } else {
Andrew Mahone216424a2009-05-15 00:14:38 +0000506 mutex_unlock(&llist_mod_mutex);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000507 mutex_unlock(&llist_mutex);
Brandon Low483dca92007-10-29 16:48:16 +0000508 return false;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000509 }
510 }
511
Brandon Low4feab102007-10-28 20:18:59 +0000512
Brandon Low18c9aba2007-10-27 04:49:04 +0000513 /* Update the cache to prevent it from keeping the old location of h */
Brandon Low483dca92007-10-29 16:48:16 +0000514 if (src == cached_handle)
Brandon Low18c9aba2007-10-27 04:49:04 +0000515 cached_handle = dest;
516
517 /* the cur_handle pointer might need updating */
Brandon Low483dca92007-10-29 16:48:16 +0000518 if (src == cur_handle)
Brandon Low18c9aba2007-10-27 04:49:04 +0000519 cur_handle = dest;
520
Michael Giacomelliecd9bcf2009-11-21 17:00:38 +0000521
Rafaël Carréf6201032009-11-22 13:51:25 +0000522 /* Copying routine takes into account that the handles have a
Michael Giacomelliecd9bcf2009-11-21 17:00:38 +0000523 * distance between each other which is a multiple of four. Faster 2 word
524 * copy may be ok but do this for safety and because wrapped copies should
Rafaël Carréf6201032009-11-22 13:51:25 +0000525 * be fairly uncommon */
Michael Giacomelliecd9bcf2009-11-21 17:00:38 +0000526
Thomas Martitzb11c8192010-02-12 13:12:59 +0000527 here = (int32_t *)((ringbuf_add(oldpos, size_to_move - 1) & ~3)+ (intptr_t)buffer);
528 there =(int32_t *)((ringbuf_add(newpos, size_to_move - 1) & ~3)+ (intptr_t)buffer);
Michael Giacomelliecd9bcf2009-11-21 17:00:38 +0000529 end = (int32_t *)(( intptr_t)buffer + buffer_len - 4);
530 begin =(int32_t *)buffer;
Rafaël Carréf6201032009-11-22 13:51:25 +0000531
Michael Giacomelliecd9bcf2009-11-21 17:00:38 +0000532 n = (size_to_move & ~3)/4;
533
Rafaël Carréf6201032009-11-22 13:51:25 +0000534 if ( overlap_old > 0 || overlap > 0 ) {
535 /* Old or moved handle wraps */
536 while (n--) {
537 if (here < begin)
538 here = end;
539 if (there < begin)
540 there = end;
541 *there-- = *here--;
542 }
Brandon Low18c9aba2007-10-27 04:49:04 +0000543 } else {
Michael Giacomelliecd9bcf2009-11-21 17:00:38 +0000544 /* both handles do not wrap */
545 memmove(dest,src,size_to_move);
Rafaël Carréf6201032009-11-22 13:51:25 +0000546 }
Michael Giacomelliecd9bcf2009-11-21 17:00:38 +0000547
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000548
Brandon Low483dca92007-10-29 16:48:16 +0000549 /* Update the caller with the new location of h and the distance moved */
550 *h = dest;
551 *delta = final_delta;
Andrew Mahone216424a2009-05-15 00:14:38 +0000552 mutex_unlock(&llist_mod_mutex);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000553 mutex_unlock(&llist_mutex);
Yoshihisa Uchida9c13b6e2010-05-24 10:49:36 +0000554 return true;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000555}
556
557
558/*
559BUFFER SPACE MANAGEMENT
560=======================
561
Nicolas Pennequin0c7b26d2007-11-05 21:11:54 +0000562update_data_counters: Updates the values in data_counters
563buffer_is_low : Returns true if the amount of useful data in the buffer is low
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000564buffer_handle : Buffer data for a handle
Nicolas Pennequin0c7b26d2007-11-05 21:11:54 +0000565reset_handle : Reset write position and data buffer of a handle to its offset
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000566rebuffer_handle : Seek to a nonbuffered part of a handle by rebuffering the data
567shrink_handle : Free buffer space by moving a handle
568fill_buffer : Call buffer_handle for all handles that have data to buffer
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000569
570These functions are used by the buffering thread to manage buffer space.
571*/
572
Nicolas Pennequin0c7b26d2007-11-05 21:11:54 +0000573static void update_data_counters(void)
574{
575 struct memory_handle *m = find_handle(base_handle_id);
576 bool is_useful = m==NULL;
577
578 size_t buffered = 0;
579 size_t wasted = 0;
580 size_t remaining = 0;
581 size_t useful = 0;
582
Nicolas Pennequinf7e0e6b2008-05-13 20:51:06 +0000583 mutex_lock(&llist_mutex);
584
Nicolas Pennequin0c7b26d2007-11-05 21:11:54 +0000585 m = first_handle;
586 while (m) {
587 buffered += m->available;
Thomas Martitzb11c8192010-02-12 13:12:59 +0000588 wasted += ringbuf_sub(m->ridx, m->data);
Nicolas Pennequin0c7b26d2007-11-05 21:11:54 +0000589 remaining += m->filerem;
590
591 if (m->id == base_handle_id)
592 is_useful = true;
593
594 if (is_useful)
Thomas Martitzb11c8192010-02-12 13:12:59 +0000595 useful += ringbuf_sub(m->widx, m->ridx);
Nicolas Pennequin0c7b26d2007-11-05 21:11:54 +0000596
597 m = m->next;
598 }
599
Nicolas Pennequinf7e0e6b2008-05-13 20:51:06 +0000600 mutex_unlock(&llist_mutex);
601
Nicolas Pennequin0c7b26d2007-11-05 21:11:54 +0000602 data_counters.buffered = buffered;
603 data_counters.wasted = wasted;
604 data_counters.remaining = remaining;
605 data_counters.useful = useful;
606}
607
608static inline bool buffer_is_low(void)
609{
610 update_data_counters();
Björn Stenberg6427d122009-01-10 21:10:56 +0000611 return data_counters.useful < (conf_watermark / 2);
Nicolas Pennequin0c7b26d2007-11-05 21:11:54 +0000612}
613
Brandon Low60d4e7c2007-11-03 17:55:45 +0000614/* Buffer data for the given handle.
615 Return whether or not the buffering should continue explicitly. */
Steve Bavin135cc752008-03-28 12:51:33 +0000616static bool buffer_handle(int handle_id)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000617{
618 logf("buffer_handle(%d)", handle_id);
619 struct memory_handle *h = find_handle(handle_id);
Thomas Martitzdf79ac22010-02-18 15:38:30 +0000620 bool stop = false;
621
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000622 if (!h)
Brandon Low7b74dd72007-11-03 21:57:27 +0000623 return true;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000624
625 if (h->filerem == 0) {
626 /* nothing left to buffer */
Brandon Low7b74dd72007-11-03 21:57:27 +0000627 return true;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000628 }
629
630 if (h->fd < 0) /* file closed, reopen */
631 {
632 if (*h->path)
633 h->fd = open(h->path, O_RDONLY);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000634
635 if (h->fd < 0)
Brandon Lowebc981b2007-11-04 05:57:48 +0000636 {
637 /* could not open the file, truncate it where it is */
638 h->filesize -= h->filerem;
639 h->filerem = 0;
Brandon Low7b74dd72007-11-03 21:57:27 +0000640 return true;
Brandon Lowebc981b2007-11-04 05:57:48 +0000641 }
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000642
643 if (h->offset)
644 lseek(h->fd, h->offset, SEEK_SET);
645 }
646
647 trigger_cpu_boost();
648
Nicolas Pennequin4e2de442008-04-14 16:17:47 +0000649 if (h->type == TYPE_ID3)
650 {
Nicolas Pennequinde026dc2008-04-16 16:18:05 +0000651 if (!get_metadata((struct mp3entry *)(buffer + h->data), h->fd, h->path))
652 {
653 /* metadata parsing failed: clear the buffer. */
654 memset(buffer + h->data, 0, sizeof(struct mp3entry));
655 }
Nicolas Pennequin4e2de442008-04-14 16:17:47 +0000656 close(h->fd);
657 h->fd = -1;
658 h->filerem = 0;
659 h->available = sizeof(struct mp3entry);
660 h->widx += sizeof(struct mp3entry);
Jonathan Gordon71898e52008-10-16 10:38:03 +0000661 send_event(BUFFER_EVENT_FINISHED, &h->id);
Nicolas Pennequin4e2de442008-04-14 16:17:47 +0000662 return true;
663 }
664
Thomas Martitzdf79ac22010-02-18 15:38:30 +0000665 while (h->filerem > 0 && !stop)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000666 {
667 /* max amount to copy */
Brandon Low33794402007-11-05 17:48:21 +0000668 size_t copy_n = MIN( MIN(h->filerem, BUFFERING_DEFAULT_FILECHUNK),
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000669 buffer_len - h->widx);
670
Thomas Martitzdf79ac22010-02-18 15:38:30 +0000671 ssize_t overlap;
Thomas Martitzabb3dd42010-02-20 15:13:53 +0000672 uintptr_t next_handle = ringbuf_offset(h->next);
Thomas Martitzdf79ac22010-02-18 15:38:30 +0000673
Brandon Lowff9cdb42007-11-01 05:12:55 +0000674 /* stop copying if it would overwrite the reading position */
Thomas Martitzb11c8192010-02-12 13:12:59 +0000675 if (ringbuf_add_cross(h->widx, copy_n, buf_ridx) >= 0)
Brandon Lowebc981b2007-11-04 05:57:48 +0000676 return false;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000677
Thomas Martitzdf79ac22010-02-18 15:38:30 +0000678 /* FIXME: This would overwrite the next handle
679 * If this is true, then there's a handle even though we have still
680 * data to buffer. This should NEVER EVER happen! (but it does :( ) */
681 if (h->next && (overlap
682 = ringbuf_add_cross(h->widx, copy_n, next_handle)) > 0)
683 {
684 /* stop buffering data for now and post-pone buffering the rest */
685 stop = true;
686 DEBUGF( "%s(): Preventing handle corruption: h1.id:%d h2.id:%d"
Jeffrey Goode6ef04a72010-05-15 03:34:31 +0000687 " copy_n:%lu overlap:%ld h1.filerem:%lu\n", __func__,
688 h->id, h->next->id, (unsigned long)copy_n, (long)overlap,
Thomas Martitz4c78eca2010-02-18 17:43:55 +0000689 (unsigned long)h->filerem);
Thomas Martitzdf79ac22010-02-18 15:38:30 +0000690 copy_n -= overlap;
691 }
Brandon Lowff9cdb42007-11-01 05:12:55 +0000692
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000693 /* rc is the actual amount read */
694 int rc = read(h->fd, &buffer[h->widx], copy_n);
695
696 if (rc < 0)
697 {
Brandon Low60d4e7c2007-11-03 17:55:45 +0000698 /* Some kind of filesystem error, maybe recoverable if not codec */
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000699 if (h->type == TYPE_CODEC) {
700 logf("Partial codec");
701 break;
702 }
703
704 DEBUGF("File ended %ld bytes early\n", (long)h->filerem);
705 h->filesize -= h->filerem;
706 h->filerem = 0;
707 break;
708 }
709
710 /* Advance buffer */
Thomas Martitzb11c8192010-02-12 13:12:59 +0000711 h->widx = ringbuf_add(h->widx, rc);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000712 if (h == cur_handle)
713 buf_widx = h->widx;
714 h->available += rc;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000715 h->filerem -= rc;
716
Nicolas Pennequin0c7b26d2007-11-05 21:11:54 +0000717 /* If this is a large file, see if we need to break or give the codec
Brandon Lowaabd6882007-11-04 02:40:24 +0000718 * more time */
Michael Sevakis398d9fd2007-11-20 22:45:46 +0000719 if (h->type == TYPE_PACKET_AUDIO &&
720 pcmbuf_is_lowdata() && !buffer_is_low())
721 {
722 sleep(1);
723 }
724 else
725 {
726 yield();
727 }
728
729 if (!queue_empty(&buffering_queue))
Nicolas Pennequin0c7b26d2007-11-05 21:11:54 +0000730 break;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000731 }
732
733 if (h->filerem == 0) {
734 /* finished buffering the file */
735 close(h->fd);
736 h->fd = -1;
Jonathan Gordon71898e52008-10-16 10:38:03 +0000737 send_event(BUFFER_EVENT_FINISHED, &h->id);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000738 }
739
Thomas Martitzdf79ac22010-02-18 15:38:30 +0000740 return !stop;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000741}
742
743/* Reset writing position and data buffer of a handle to its current offset.
744 Use this after having set the new offset to use. */
Steve Bavin135cc752008-03-28 12:51:33 +0000745static void reset_handle(int handle_id)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000746{
Torne Wuffc4e051b2010-02-01 17:16:39 +0000747 size_t alignment_pad;
748
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000749 logf("reset_handle(%d)", handle_id);
750
751 struct memory_handle *h = find_handle(handle_id);
752 if (!h)
753 return;
754
Torne Wuffc4e051b2010-02-01 17:16:39 +0000755 /* Align to desired storage alignment */
Rafaël Carréa8d16902010-03-25 23:01:56 +0000756 alignment_pad = STORAGE_OVERLAP(h->offset - (size_t)(&buffer[h->start]));
Thomas Martitzb11c8192010-02-12 13:12:59 +0000757 h->ridx = h->widx = h->data = ringbuf_add(h->start, alignment_pad);
Torne Wuffc4e051b2010-02-01 17:16:39 +0000758
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000759 if (h == cur_handle)
760 buf_widx = h->widx;
761 h->available = 0;
762 h->filerem = h->filesize - h->offset;
763
764 if (h->fd >= 0) {
765 lseek(h->fd, h->offset, SEEK_SET);
766 }
767}
768
769/* Seek to a nonbuffered part of a handle by rebuffering the data. */
Steve Bavin135cc752008-03-28 12:51:33 +0000770static void rebuffer_handle(int handle_id, size_t newpos)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000771{
772 struct memory_handle *h = find_handle(handle_id);
773 if (!h)
774 return;
775
Nicolas Pennequine24454f2007-11-26 21:13:08 +0000776 /* When seeking foward off of the buffer, if it is a short seek don't
777 rebuffer the whole track, just read enough to satisfy */
Brandon Low76f9bfa2007-11-05 17:50:51 +0000778 if (newpos > h->offset && newpos - h->offset < BUFFERING_DEFAULT_FILECHUNK)
779 {
Nicolas Pennequincf369572008-07-18 23:42:47 +0000780 LOGFQUEUE("buffering >| Q_BUFFER_HANDLE %d", handle_id);
Brandon Low76f9bfa2007-11-05 17:50:51 +0000781 queue_send(&buffering_queue, Q_BUFFER_HANDLE, handle_id);
782 h->ridx = h->data + newpos;
783 return;
784 }
Nicolas Pennequine24454f2007-11-26 21:13:08 +0000785
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000786 h->offset = newpos;
787
Nicolas Pennequine24454f2007-11-26 21:13:08 +0000788 /* Reset the handle to its new offset */
Nicolas Pennequincf369572008-07-18 23:42:47 +0000789 LOGFQUEUE("buffering >| Q_RESET_HANDLE %d", handle_id);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000790 queue_send(&buffering_queue, Q_RESET_HANDLE, handle_id);
791
Thomas Martitzabb3dd42010-02-20 15:13:53 +0000792 uintptr_t next = ringbuf_offset(h->next);
Thomas Martitzb11c8192010-02-12 13:12:59 +0000793 if (ringbuf_sub(next, h->data) < h->filesize - newpos)
Nicolas Pennequine24454f2007-11-26 21:13:08 +0000794 {
Nicolas Pennequin774fd192007-11-26 23:51:36 +0000795 /* There isn't enough space to rebuffer all of the track from its new
796 offset, so we ask the user to free some */
Thomas Martitzdf79ac22010-02-18 15:38:30 +0000797 DEBUGF("%s(): space is needed\n", __func__);
Jonathan Gordon71898e52008-10-16 10:38:03 +0000798 send_event(BUFFER_EVENT_REBUFFER, &handle_id);
Nicolas Pennequine24454f2007-11-26 21:13:08 +0000799 }
800
801 /* Now we ask for a rebuffer */
Nicolas Pennequincf369572008-07-18 23:42:47 +0000802 LOGFQUEUE("buffering >| Q_BUFFER_HANDLE %d", handle_id);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000803 queue_send(&buffering_queue, Q_BUFFER_HANDLE, handle_id);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000804}
805
Steve Bavin135cc752008-03-28 12:51:33 +0000806static bool close_handle(int handle_id)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000807{
808 struct memory_handle *h = find_handle(handle_id);
Brandon Low31c11642007-11-04 19:01:02 +0000809
810 /* If the handle is not found, it is closed */
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000811 if (!h)
Brandon Low31c11642007-11-04 19:01:02 +0000812 return true;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000813
814 if (h->fd >= 0) {
815 close(h->fd);
816 h->fd = -1;
817 }
818
Brandon Low31c11642007-11-04 19:01:02 +0000819 /* rm_handle returns true unless the handle somehow persists after exit */
820 return rm_handle(h);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000821}
822
823/* Free buffer space by moving the handle struct right before the useful
824 part of its data buffer or by moving all the data. */
Brandon Lowa042c722007-11-03 02:54:34 +0000825static void shrink_handle(struct memory_handle *h)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000826{
Brandon Low18c9aba2007-10-27 04:49:04 +0000827 size_t delta;
Brandon Low18c9aba2007-10-27 04:49:04 +0000828
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000829 if (!h)
830 return;
831
Brandon Lowdcca5862007-11-02 14:06:48 +0000832 if (h->next && h->filerem == 0 &&
833 (h->type == TYPE_ID3 || h->type == TYPE_CUESHEET ||
Nicolas Pennequin9d4bed72007-11-11 12:29:37 +0000834 h->type == TYPE_BITMAP || h->type == TYPE_CODEC ||
Brandon Lowdcca5862007-11-02 14:06:48 +0000835 h->type == TYPE_ATOMIC_AUDIO))
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000836 {
837 /* metadata handle: we can move all of it */
Thomas Martitzabb3dd42010-02-20 15:13:53 +0000838 uintptr_t handle_distance =
839 ringbuf_sub(ringbuf_offset(h->next), h->data);
Brandon Low18c9aba2007-10-27 04:49:04 +0000840 delta = handle_distance - h->available;
841
842 /* The value of delta might change for alignment reasons */
Brandon Lowdcca5862007-11-02 14:06:48 +0000843 if (!move_handle(&h, &delta, h->available, h->type==TYPE_CODEC))
Brandon Low483dca92007-10-29 16:48:16 +0000844 return;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000845
846 size_t olddata = h->data;
Thomas Martitzb11c8192010-02-12 13:12:59 +0000847 h->data = ringbuf_add(h->data, delta);
848 h->ridx = ringbuf_add(h->ridx, delta);
849 h->widx = ringbuf_add(h->widx, delta);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000850
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000851 if (h->type == TYPE_ID3 && h->filesize == sizeof(struct mp3entry)) {
Nicolas Pennequin9d4bed72007-11-11 12:29:37 +0000852 /* when moving an mp3entry we need to readjust its pointers. */
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000853 adjust_mp3entry((struct mp3entry *)&buffer[h->data],
854 (void *)&buffer[h->data],
Steve Bavin73f98632008-03-26 08:57:25 +0000855 (const void *)&buffer[olddata]);
Nicolas Pennequin9d4bed72007-11-11 12:29:37 +0000856 } else if (h->type == TYPE_BITMAP) {
857 /* adjust the bitmap's pointer */
858 struct bitmap *bmp = (struct bitmap *)&buffer[h->data];
859 bmp->data = &buffer[h->data + sizeof(struct bitmap)];
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000860 }
861 }
862 else
863 {
864 /* only move the handle struct */
Thomas Martitzb11c8192010-02-12 13:12:59 +0000865 delta = ringbuf_sub(h->ridx, h->data);
Brandon Lowdcca5862007-11-02 14:06:48 +0000866 if (!move_handle(&h, &delta, 0, true))
Brandon Low483dca92007-10-29 16:48:16 +0000867 return;
Brandon Low18c9aba2007-10-27 04:49:04 +0000868
Thomas Martitzb11c8192010-02-12 13:12:59 +0000869 h->data = ringbuf_add(h->data, delta);
870 h->start = ringbuf_add(h->start, delta);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000871 h->available -= delta;
872 h->offset += delta;
873 }
874}
875
876/* Fill the buffer by buffering as much data as possible for handles that still
Brandon Low11a36612007-11-03 06:21:32 +0000877 have data left to buffer
878 Return whether or not to continue filling after this */
879static bool fill_buffer(void)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000880{
881 logf("fill_buffer()");
Magnus Holmgren7c84ede2008-07-11 12:44:27 +0000882 struct memory_handle *m;
883 shrink_handle(first_handle);
884 m = first_handle;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000885 while (queue_empty(&buffering_queue) && m) {
886 if (m->filerem > 0) {
Brandon Low60d4e7c2007-11-03 17:55:45 +0000887 if (!buffer_handle(m->id)) {
888 m = NULL;
889 break;
890 }
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000891 }
892 m = m->next;
893 }
894
Brandon Low11a36612007-11-03 06:21:32 +0000895 if (m) {
896 return true;
897 }
898 else
899 {
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000900 /* only spin the disk down if the filling wasn't interrupted by an
901 event arriving in the queue. */
Frank Gevaerts2f8a0082008-11-01 16:14:28 +0000902 storage_sleep();
Brandon Low11a36612007-11-03 06:21:32 +0000903 return false;
904 }
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000905}
906
Nicolas Pennequina384fb62007-11-11 13:15:36 +0000907#ifdef HAVE_ALBUMART
Nicolas Pennequin9d4bed72007-11-11 12:29:37 +0000908/* Given a file descriptor to a bitmap file, write the bitmap data to the
909 buffer, with a struct bitmap and the actual data immediately following.
910 Return value is the total size (struct + data). */
Thomas Martitze9c10182009-10-16 19:14:41 +0000911static int load_image(int fd, const char *path, struct dim *dim)
Nicolas Pennequin9d4bed72007-11-11 12:29:37 +0000912{
913 int rc;
914 struct bitmap *bmp = (struct bitmap *)&buffer[buf_widx];
Thomas Martitze9c10182009-10-16 19:14:41 +0000915
916 /* get the desired image size */
917 bmp->width = dim->width, bmp->height = dim->height;
Nicolas Pennequin9d4bed72007-11-11 12:29:37 +0000918 /* FIXME: alignment may be needed for the data buffer. */
919 bmp->data = &buffer[buf_widx + sizeof(struct bitmap)];
Andrew Mahone00d6cfd2009-05-26 20:26:05 +0000920#ifndef HAVE_JPEG
921 (void) path;
922#endif
Nicolas Pennequina384fb62007-11-11 13:15:36 +0000923#if (LCD_DEPTH > 1) || defined(HAVE_REMOTE_LCD) && (LCD_REMOTE_DEPTH > 1)
Nicolas Pennequin9d4bed72007-11-11 12:29:37 +0000924 bmp->maskdata = NULL;
Nicolas Pennequina384fb62007-11-11 13:15:36 +0000925#endif
926
Andrew Mahone781421a2008-12-09 23:07:59 +0000927 int free = (int)MIN(buffer_len - BUF_USED, buffer_len - buf_widx)
928 - sizeof(struct bitmap);
929
Andrew Mahone20f76d62009-05-04 15:46:41 +0000930#ifdef HAVE_JPEG
931 int pathlen = strlen(path);
Andrew Mahone54e6eb32009-05-01 23:31:43 +0000932 if (strcmp(path + pathlen - 4, ".bmp"))
933 rc = read_jpeg_fd(fd, bmp, free, FORMAT_NATIVE|FORMAT_DITHER|
934 FORMAT_RESIZE|FORMAT_KEEP_ASPECT, NULL);
935 else
Andrew Mahone20f76d62009-05-04 15:46:41 +0000936#endif
Andrew Mahone54e6eb32009-05-01 23:31:43 +0000937 rc = read_bmp_fd(fd, bmp, free, FORMAT_NATIVE|FORMAT_DITHER|
938 FORMAT_RESIZE|FORMAT_KEEP_ASPECT, NULL);
Nicolas Pennequin9d4bed72007-11-11 12:29:37 +0000939 return rc + (rc > 0 ? sizeof(struct bitmap) : 0);
940}
941#endif
942
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000943
944/*
945MAIN BUFFERING API CALLS
946========================
947
948bufopen : Request the opening of a new handle for a file
949bufalloc : Open a new handle for data other than a file.
950bufclose : Close an open handle
951bufseek : Set the read pointer in a handle
952bufadvance : Move the read pointer in a handle
953bufread : Copy data from a handle into a given buffer
954bufgetdata : Give a pointer to the handle's data
955
956These functions are exported, to allow interaction with the buffer.
957They take care of the content of the structs, and rely on the linked list
958management functions for all the actual handle management work.
959*/
960
961
962/* Reserve space in the buffer for a file.
963 filename: name of the file to open
964 offset: offset at which to start buffering the file, useful when the first
965 (offset-1) bytes of the file aren't needed.
Thomas Martitze9c10182009-10-16 19:14:41 +0000966 type: one of the data types supported (audio, image, cuesheet, others
967 user_data: user data passed possibly passed in subcalls specific to a
968 data_type (only used for image (albumart) buffering so far )
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000969 return value: <0 if the file cannot be opened, or one file already
970 queued to be opened, otherwise the handle for the file in the buffer
971*/
Thomas Martitze9c10182009-10-16 19:14:41 +0000972int bufopen(const char *file, size_t offset, enum data_type type,
973 void *user_data)
Nicolas Pennequin4fd27742008-03-29 14:09:14 +0000974{
Thomas Martitze9c10182009-10-16 19:14:41 +0000975#ifndef HAVE_ALBUMART
976 /* currently only used for aa loading */
977 (void)user_data;
978#endif
Nicolas Pennequin4e2de442008-04-14 16:17:47 +0000979 if (type == TYPE_ID3)
980 {
981 /* ID3 case: allocate space, init the handle and return. */
982
983 struct memory_handle *h = add_handle(sizeof(struct mp3entry), false, true);
984 if (!h)
985 return ERR_BUFFER_FULL;
986
987 h->fd = -1;
988 h->filesize = sizeof(struct mp3entry);
989 h->filerem = sizeof(struct mp3entry);
990 h->offset = 0;
991 h->data = buf_widx;
992 h->ridx = buf_widx;
993 h->widx = buf_widx;
994 h->available = 0;
995 h->type = type;
Nils Wallménius3d4701a2009-07-14 13:57:45 +0000996 strlcpy(h->path, file, MAX_PATH);
Nicolas Pennequin4e2de442008-04-14 16:17:47 +0000997
998 buf_widx += sizeof(struct mp3entry); /* safe because the handle
999 can't wrap */
Nicolas Pennequin3625be42008-12-02 21:07:12 +00001000
1001 /* Inform the buffering thread that we added a handle */
1002 LOGFQUEUE("buffering > Q_HANDLE_ADDED %d", h->id);
1003 queue_post(&buffering_queue, Q_HANDLE_ADDED, h->id);
1004
Nicolas Pennequin4e2de442008-04-14 16:17:47 +00001005 return h->id;
1006 }
1007
1008 /* Other cases: there is a little more work. */
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001009 int fd = open(file, O_RDONLY);
1010 if (fd < 0)
Nicolas Pennequind400e232007-10-29 14:15:59 +00001011 return ERR_FILE_ERROR;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001012
Brandon Low18c9aba2007-10-27 04:49:04 +00001013 size_t size = filesize(fd);
Brandon Lowdcca5862007-11-02 14:06:48 +00001014 bool can_wrap = type==TYPE_PACKET_AUDIO || type==TYPE_CODEC;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001015
Nicolas Pennequin4e2de442008-04-14 16:17:47 +00001016 size_t adjusted_offset = offset;
Steve Bavinc9df8fd2008-03-28 11:24:24 +00001017 if (adjusted_offset > size)
1018 adjusted_offset = 0;
Nicolas Pennequin659fe5a2008-01-08 23:48:51 +00001019
Torne Wuffc4e051b2010-02-01 17:16:39 +00001020 /* Reserve extra space because alignment can move data forward */
Rafaël Carréa8d16902010-03-25 23:01:56 +00001021 size_t padded_size = STORAGE_PAD(size-adjusted_offset);
1022 struct memory_handle *h = add_handle(padded_size, can_wrap, false);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001023 if (!h)
1024 {
Thomas Martitzdf79ac22010-02-18 15:38:30 +00001025 DEBUGF("%s(): failed to add handle\n", __func__);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001026 close(fd);
Nicolas Pennequind400e232007-10-29 14:15:59 +00001027 return ERR_BUFFER_FULL;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001028 }
1029
Nils Wallménius3d4701a2009-07-14 13:57:45 +00001030 strlcpy(h->path, file, MAX_PATH);
Steve Bavinc9df8fd2008-03-28 11:24:24 +00001031 h->offset = adjusted_offset;
Torne Wuffc4e051b2010-02-01 17:16:39 +00001032
1033 /* Don't bother to storage align bitmaps because they are not
1034 * loaded directly into the buffer.
1035 */
1036 if (type != TYPE_BITMAP)
1037 {
1038 size_t alignment_pad;
1039
1040 /* Remember where data area starts, for use by reset_handle */
1041 h->start = buf_widx;
1042
1043 /* Align to desired storage alignment */
Rafaël Carréa8d16902010-03-25 23:01:56 +00001044 alignment_pad = STORAGE_OVERLAP(adjusted_offset - (size_t)(&buffer[buf_widx]));
Thomas Martitzb11c8192010-02-12 13:12:59 +00001045 buf_widx = ringbuf_add(buf_widx, alignment_pad);
Torne Wuffc4e051b2010-02-01 17:16:39 +00001046 }
1047
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001048 h->ridx = buf_widx;
Andrew Mahone216424a2009-05-15 00:14:38 +00001049 h->widx = buf_widx;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001050 h->data = buf_widx;
Andrew Mahone216424a2009-05-15 00:14:38 +00001051 h->available = 0;
1052 h->filerem = 0;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001053 h->type = type;
1054
Nicolas Pennequin5e0e2392007-11-11 13:28:24 +00001055#ifdef HAVE_ALBUMART
Nicolas Pennequin87e5b112007-11-12 15:16:41 +00001056 if (type == TYPE_BITMAP)
1057 {
Nicolas Pennequin9d4bed72007-11-11 12:29:37 +00001058 /* Bitmap file: we load the data instead of the file */
Nicolas Pennequin87e5b112007-11-12 15:16:41 +00001059 int rc;
Andrew Mahone216424a2009-05-15 00:14:38 +00001060 mutex_lock(&llist_mod_mutex); /* Lock because load_bitmap yields */
Thomas Martitze9c10182009-10-16 19:14:41 +00001061 rc = load_image(fd, file, (struct dim*)user_data);
Andrew Mahone216424a2009-05-15 00:14:38 +00001062 mutex_unlock(&llist_mod_mutex);
Nicolas Pennequin87e5b112007-11-12 15:16:41 +00001063 if (rc <= 0)
1064 {
1065 rm_handle(h);
1066 close(fd);
Nicolas Pennequin9d4bed72007-11-11 12:29:37 +00001067 return ERR_FILE_ERROR;
Nicolas Pennequin87e5b112007-11-12 15:16:41 +00001068 }
Nicolas Pennequin9d4bed72007-11-11 12:29:37 +00001069 h->filerem = 0;
Nicolas Pennequin87e5b112007-11-12 15:16:41 +00001070 h->filesize = rc;
1071 h->available = rc;
1072 h->widx = buf_widx + rc; /* safe because the data doesn't wrap */
1073 buf_widx += rc; /* safe too */
Nicolas Pennequin9d4bed72007-11-11 12:29:37 +00001074 }
Nicolas Pennequin87e5b112007-11-12 15:16:41 +00001075 else
Nicolas Pennequin9d4bed72007-11-11 12:29:37 +00001076#endif
Nicolas Pennequin87e5b112007-11-12 15:16:41 +00001077 {
Steve Bavinc9df8fd2008-03-28 11:24:24 +00001078 h->filerem = size - adjusted_offset;
Nicolas Pennequin87e5b112007-11-12 15:16:41 +00001079 h->filesize = size;
1080 h->available = 0;
1081 h->widx = buf_widx;
1082 }
Nicolas Pennequin9d4bed72007-11-11 12:29:37 +00001083
1084 if (type == TYPE_CUESHEET) {
Brandon Low18c9aba2007-10-27 04:49:04 +00001085 h->fd = fd;
1086 /* Immediately start buffering those */
Nicolas Pennequincf369572008-07-18 23:42:47 +00001087 LOGFQUEUE("buffering >| Q_BUFFER_HANDLE %d", h->id);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001088 queue_send(&buffering_queue, Q_BUFFER_HANDLE, h->id);
Brandon Low18c9aba2007-10-27 04:49:04 +00001089 } else {
1090 /* Other types will get buffered in the course of normal operations */
1091 h->fd = -1;
1092 close(fd);
Nicolas Pennequin483c4022008-02-12 23:15:59 +00001093
1094 /* Inform the buffering thread that we added a handle */
1095 LOGFQUEUE("buffering > Q_HANDLE_ADDED %d", h->id);
1096 queue_post(&buffering_queue, Q_HANDLE_ADDED, h->id);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001097 }
1098
1099 logf("bufopen: new hdl %d", h->id);
1100 return h->id;
1101}
1102
1103/* Open a new handle from data that needs to be copied from memory.
1104 src is the source buffer from which to copy data. It can be NULL to simply
1105 reserve buffer space.
1106 size is the requested size. The call will only be successful if the
1107 requested amount of data can entirely fit in the buffer without wrapping.
1108 Return value is the handle id for success or <0 for failure.
1109*/
Steve Bavin135cc752008-03-28 12:51:33 +00001110int bufalloc(const void *src, size_t size, enum data_type type)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001111{
Brandon Low18c9aba2007-10-27 04:49:04 +00001112 struct memory_handle *h = add_handle(size, false, true);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001113
Brandon Low18c9aba2007-10-27 04:49:04 +00001114 if (!h)
Nicolas Pennequind400e232007-10-29 14:15:59 +00001115 return ERR_BUFFER_FULL;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001116
1117 if (src) {
1118 if (type == TYPE_ID3 && size == sizeof(struct mp3entry)) {
1119 /* specially take care of struct mp3entry */
1120 copy_mp3entry((struct mp3entry *)&buffer[buf_widx],
Steve Bavin73f98632008-03-26 08:57:25 +00001121 (const struct mp3entry *)src);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001122 } else {
1123 memcpy(&buffer[buf_widx], src, size);
1124 }
1125 }
1126
1127 h->fd = -1;
1128 *h->path = 0;
1129 h->filesize = size;
1130 h->filerem = 0;
1131 h->offset = 0;
1132 h->ridx = buf_widx;
1133 h->widx = buf_widx + size; /* this is safe because the data doesn't wrap */
1134 h->data = buf_widx;
1135 h->available = size;
1136 h->type = type;
1137
1138 buf_widx += size; /* safe too */
1139
1140 logf("bufalloc: new hdl %d", h->id);
1141 return h->id;
1142}
1143
1144/* Close the handle. Return true for success and false for failure */
Steve Bavin135cc752008-03-28 12:51:33 +00001145bool bufclose(int handle_id)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001146{
1147 logf("bufclose(%d)", handle_id);
1148
Nicolas Pennequind08131a2007-10-27 01:25:47 +00001149 LOGFQUEUE("buffering >| Q_CLOSE_HANDLE %d", handle_id);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001150 return queue_send(&buffering_queue, Q_CLOSE_HANDLE, handle_id);
1151}
1152
1153/* Set reading index in handle (relatively to the start of the file).
1154 Access before the available data will trigger a rebuffer.
1155 Return 0 for success and < 0 for failure:
1156 -1 if the handle wasn't found
1157 -2 if the new requested position was beyond the end of the file
1158*/
Steve Bavin135cc752008-03-28 12:51:33 +00001159int bufseek(int handle_id, size_t newpos)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001160{
1161 struct memory_handle *h = find_handle(handle_id);
1162 if (!h)
Nicolas Pennequind400e232007-10-29 14:15:59 +00001163 return ERR_HANDLE_NOT_FOUND;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001164
1165 if (newpos > h->filesize) {
1166 /* access beyond the end of the file */
Nicolas Pennequind400e232007-10-29 14:15:59 +00001167 return ERR_INVALID_VALUE;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001168 }
1169 else if (newpos < h->offset || h->offset + h->available < newpos) {
1170 /* access before or after buffered data. A rebuffer is needed. */
1171 rebuffer_handle(handle_id, newpos);
1172 }
1173 else {
Thomas Martitzb11c8192010-02-12 13:12:59 +00001174 h->ridx = ringbuf_add(h->data, newpos - h->offset);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001175 }
1176 return 0;
1177}
1178
1179/* Advance the reading index in a handle (relatively to its current position).
1180 Return 0 for success and < 0 for failure */
Steve Bavin135cc752008-03-28 12:51:33 +00001181int bufadvance(int handle_id, off_t offset)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001182{
Brandon Low404c6fb2007-10-27 01:37:33 +00001183 const struct memory_handle *h = find_handle(handle_id);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001184 if (!h)
Nicolas Pennequind400e232007-10-29 14:15:59 +00001185 return ERR_HANDLE_NOT_FOUND;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001186
Thomas Martitzb11c8192010-02-12 13:12:59 +00001187 size_t newpos = h->offset + ringbuf_sub(h->ridx, h->data) + offset;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001188 return bufseek(handle_id, newpos);
1189}
1190
Brandon Low9821cce2007-11-06 16:49:30 +00001191/* Used by bufread and bufgetdata to prepare the buffer and retrieve the
1192 * actual amount of data available for reading. This function explicitly
1193 * does not check the validity of the input handle. It does do range checks
1194 * on size and returns a valid (and explicit) amount of data for reading */
Steve Bavin135cc752008-03-28 12:51:33 +00001195static struct memory_handle *prep_bufdata(int handle_id, size_t *size,
1196 bool guardbuf_limit)
Brandon Low9821cce2007-11-06 16:49:30 +00001197{
Brandon Low6e8ee402007-11-08 15:34:23 +00001198 struct memory_handle *h = find_handle(handle_id);
1199 if (!h)
1200 return NULL;
1201
Thomas Martitzb11c8192010-02-12 13:12:59 +00001202 size_t avail = ringbuf_sub(h->widx, h->ridx);
Brandon Low9821cce2007-11-06 16:49:30 +00001203
1204 if (avail == 0 && h->filerem == 0)
Nicolas Pennequinb50473f2007-11-08 18:27:19 +00001205 {
Brandon Low9821cce2007-11-06 16:49:30 +00001206 /* File is finished reading */
Nicolas Pennequinb50473f2007-11-08 18:27:19 +00001207 *size = 0;
1208 return h;
1209 }
Brandon Low9821cce2007-11-06 16:49:30 +00001210
Brandon Low6e8ee402007-11-08 15:34:23 +00001211 if (*size == 0 || *size > avail + h->filerem)
1212 *size = avail + h->filerem;
Brandon Low9821cce2007-11-06 16:49:30 +00001213
Brandon Low1db42432007-11-08 15:52:10 +00001214 if (guardbuf_limit && h->type == TYPE_PACKET_AUDIO && *size > GUARD_BUFSIZE)
Brandon Low9821cce2007-11-06 16:49:30 +00001215 {
Brandon Low1db42432007-11-08 15:52:10 +00001216 logf("data request > guardbuf");
1217 /* If more than the size of the guardbuf is requested and this is a
1218 * bufgetdata, limit to guard_bufsize over the end of the buffer */
1219 *size = MIN(*size, buffer_len - h->ridx + GUARD_BUFSIZE);
Nicolas Pennequinca4771b2007-11-08 16:12:28 +00001220 /* this ensures *size <= buffer_len - h->ridx + GUARD_BUFSIZE */
Brandon Low9821cce2007-11-06 16:49:30 +00001221 }
1222
Brandon Low6e8ee402007-11-08 15:34:23 +00001223 if (h->filerem > 0 && avail < *size)
Brandon Low9821cce2007-11-06 16:49:30 +00001224 {
1225 /* Data isn't ready. Request buffering */
Brandon Lowdc0f4972007-11-08 18:59:22 +00001226 buf_request_buffer_handle(handle_id);
Brandon Low9821cce2007-11-06 16:49:30 +00001227 /* Wait for the data to be ready */
1228 do
1229 {
1230 sleep(1);
Brandon Low551db402007-11-08 16:06:24 +00001231 /* it is not safe for a non-buffering thread to sleep while
1232 * holding a handle */
Brandon Low6e8ee402007-11-08 15:34:23 +00001233 h = find_handle(handle_id);
Brandon Low551db402007-11-08 16:06:24 +00001234 if (!h)
1235 return NULL;
Thomas Martitzb11c8192010-02-12 13:12:59 +00001236 avail = ringbuf_sub(h->widx, h->ridx);
Brandon Low9821cce2007-11-06 16:49:30 +00001237 }
Brandon Low6e8ee402007-11-08 15:34:23 +00001238 while (h->filerem > 0 && avail < *size);
Brandon Low9821cce2007-11-06 16:49:30 +00001239 }
1240
Brandon Low6e8ee402007-11-08 15:34:23 +00001241 *size = MIN(*size,avail);
1242 return h;
Brandon Low9821cce2007-11-06 16:49:30 +00001243}
1244
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001245/* Copy data from the given handle to the dest buffer.
Nicolas Pennequinb838a622007-11-02 19:13:03 +00001246 Return the number of bytes copied or < 0 for failure (handle not found).
1247 The caller is blocked until the requested amount of data is available.
1248*/
Steve Bavin135cc752008-03-28 12:51:33 +00001249ssize_t bufread(int handle_id, size_t size, void *dest)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001250{
Nicolas Pennequin4fd27742008-03-29 14:09:14 +00001251 const struct memory_handle *h;
Steve Bavinc9df8fd2008-03-28 11:24:24 +00001252 size_t adjusted_size = size;
Brandon Low6e8ee402007-11-08 15:34:23 +00001253
Steve Bavinc9df8fd2008-03-28 11:24:24 +00001254 h = prep_bufdata(handle_id, &adjusted_size, false);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001255 if (!h)
Nicolas Pennequind400e232007-10-29 14:15:59 +00001256 return ERR_HANDLE_NOT_FOUND;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001257
Steve Bavinc9df8fd2008-03-28 11:24:24 +00001258 if (h->ridx + adjusted_size > buffer_len)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001259 {
1260 /* the data wraps around the end of the buffer */
1261 size_t read = buffer_len - h->ridx;
1262 memcpy(dest, &buffer[h->ridx], read);
Steve Bavinc9df8fd2008-03-28 11:24:24 +00001263 memcpy(dest+read, buffer, adjusted_size - read);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001264 }
1265 else
1266 {
Steve Bavinc9df8fd2008-03-28 11:24:24 +00001267 memcpy(dest, &buffer[h->ridx], adjusted_size);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001268 }
1269
Steve Bavinc9df8fd2008-03-28 11:24:24 +00001270 return adjusted_size;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001271}
1272
1273/* Update the "data" pointer to make the handle's data available to the caller.
Nicolas Pennequinb838a622007-11-02 19:13:03 +00001274 Return the length of the available linear data or < 0 for failure (handle
1275 not found).
1276 The caller is blocked until the requested amount of data is available.
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001277 size is the amount of linear data requested. it can be 0 to get as
1278 much as possible.
Nicolas Pennequinb838a622007-11-02 19:13:03 +00001279 The guard buffer may be used to provide the requested size. This means it's
1280 unsafe to request more than the size of the guard buffer.
1281*/
Steve Bavin135cc752008-03-28 12:51:33 +00001282ssize_t bufgetdata(int handle_id, size_t size, void **data)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001283{
Brandon Low6e8ee402007-11-08 15:34:23 +00001284 const struct memory_handle *h;
Nicolas Pennequin4fd27742008-03-29 14:09:14 +00001285 size_t adjusted_size = size;
Brandon Low6e8ee402007-11-08 15:34:23 +00001286
Steve Bavinc9df8fd2008-03-28 11:24:24 +00001287 h = prep_bufdata(handle_id, &adjusted_size, true);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001288 if (!h)
Nicolas Pennequind400e232007-10-29 14:15:59 +00001289 return ERR_HANDLE_NOT_FOUND;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001290
Steve Bavinc9df8fd2008-03-28 11:24:24 +00001291 if (h->ridx + adjusted_size > buffer_len)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001292 {
1293 /* the data wraps around the end of the buffer :
1294 use the guard buffer to provide the requested amount of data. */
Steve Bavinc9df8fd2008-03-28 11:24:24 +00001295 size_t copy_n = h->ridx + adjusted_size - buffer_len;
1296 /* prep_bufdata ensures adjusted_size <= buffer_len - h->ridx + GUARD_BUFSIZE,
Nicolas Pennequinca4771b2007-11-08 16:12:28 +00001297 so copy_n <= GUARD_BUFSIZE */
Steve Bavin73f98632008-03-26 08:57:25 +00001298 memcpy(guard_buffer, (const unsigned char *)buffer, copy_n);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001299 }
1300
Nicolas Pennequinecec9402007-12-16 01:38:56 +00001301 if (data)
1302 *data = &buffer[h->ridx];
1303
Steve Bavinc9df8fd2008-03-28 11:24:24 +00001304 return adjusted_size;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001305}
1306
Steve Bavin135cc752008-03-28 12:51:33 +00001307ssize_t bufgettail(int handle_id, size_t size, void **data)
Brandon Low3386dd72007-11-28 04:58:16 +00001308{
1309 size_t tidx;
1310
1311 const struct memory_handle *h;
Nicolas Pennequinecec9402007-12-16 01:38:56 +00001312
Brandon Low3386dd72007-11-28 04:58:16 +00001313 h = find_handle(handle_id);
1314
1315 if (!h)
1316 return ERR_HANDLE_NOT_FOUND;
1317
1318 if (h->filerem)
1319 return ERR_HANDLE_NOT_DONE;
1320
1321 /* We don't support tail requests of > guardbuf_size, for simplicity */
1322 if (size > GUARD_BUFSIZE)
1323 return ERR_INVALID_VALUE;
1324
Thomas Martitzb11c8192010-02-12 13:12:59 +00001325 tidx = ringbuf_sub(h->widx, size);
Brandon Low3386dd72007-11-28 04:58:16 +00001326
1327 if (tidx + size > buffer_len)
1328 {
1329 size_t copy_n = tidx + size - buffer_len;
Steve Bavin73f98632008-03-26 08:57:25 +00001330 memcpy(guard_buffer, (const unsigned char *)buffer, copy_n);
Brandon Low3386dd72007-11-28 04:58:16 +00001331 }
1332
1333 *data = &buffer[tidx];
1334 return size;
1335}
1336
Steve Bavin135cc752008-03-28 12:51:33 +00001337ssize_t bufcuttail(int handle_id, size_t size)
Brandon Low3386dd72007-11-28 04:58:16 +00001338{
Nicolas Pennequin4fd27742008-03-29 14:09:14 +00001339 struct memory_handle *h;
Steve Bavinc9df8fd2008-03-28 11:24:24 +00001340 size_t adjusted_size = size;
Nicolas Pennequinecec9402007-12-16 01:38:56 +00001341
Brandon Low3386dd72007-11-28 04:58:16 +00001342 h = find_handle(handle_id);
1343
1344 if (!h)
1345 return ERR_HANDLE_NOT_FOUND;
1346
1347 if (h->filerem)
1348 return ERR_HANDLE_NOT_DONE;
1349
Steve Bavinc9df8fd2008-03-28 11:24:24 +00001350 if (h->available < adjusted_size)
1351 adjusted_size = h->available;
Brandon Low3386dd72007-11-28 04:58:16 +00001352
Steve Bavinc9df8fd2008-03-28 11:24:24 +00001353 h->available -= adjusted_size;
1354 h->filesize -= adjusted_size;
Thomas Martitzb11c8192010-02-12 13:12:59 +00001355 h->widx = ringbuf_sub(h->widx, adjusted_size);
Steve Bavin73f98632008-03-26 08:57:25 +00001356 if (h == cur_handle)
Brandon Lowe959c5a2007-11-28 16:39:58 +00001357 buf_widx = h->widx;
Steve Bavin73f98632008-03-26 08:57:25 +00001358
Steve Bavinc9df8fd2008-03-28 11:24:24 +00001359 return adjusted_size;
Brandon Low3386dd72007-11-28 04:58:16 +00001360}
1361
1362
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001363/*
1364SECONDARY EXPORTED FUNCTIONS
1365============================
1366
1367buf_get_offset
1368buf_handle_offset
1369buf_request_buffer_handle
1370buf_set_base_handle
1371buf_used
Nicolas Pennequine24454f2007-11-26 21:13:08 +00001372register_buffering_callback
1373unregister_buffering_callback
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001374
1375These functions are exported, to allow interaction with the buffer.
1376They take care of the content of the structs, and rely on the linked list
1377management functions for all the actual handle management work.
1378*/
1379
1380/* Get a handle offset from a pointer */
Steve Bavin135cc752008-03-28 12:51:33 +00001381ssize_t buf_get_offset(int handle_id, void *ptr)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001382{
Brandon Low404c6fb2007-10-27 01:37:33 +00001383 const struct memory_handle *h = find_handle(handle_id);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001384 if (!h)
Nicolas Pennequind400e232007-10-29 14:15:59 +00001385 return ERR_HANDLE_NOT_FOUND;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001386
1387 return (size_t)ptr - (size_t)&buffer[h->ridx];
1388}
1389
Steve Bavin135cc752008-03-28 12:51:33 +00001390ssize_t buf_handle_offset(int handle_id)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001391{
Brandon Low404c6fb2007-10-27 01:37:33 +00001392 const struct memory_handle *h = find_handle(handle_id);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001393 if (!h)
Nicolas Pennequind400e232007-10-29 14:15:59 +00001394 return ERR_HANDLE_NOT_FOUND;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001395 return h->offset;
1396}
1397
Steve Bavin135cc752008-03-28 12:51:33 +00001398void buf_request_buffer_handle(int handle_id)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001399{
Brandon Low86830b62007-11-05 17:51:55 +00001400 LOGFQUEUE("buffering >| Q_START_FILL %d",handle_id);
1401 queue_send(&buffering_queue, Q_START_FILL, handle_id);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001402}
1403
Steve Bavin135cc752008-03-28 12:51:33 +00001404void buf_set_base_handle(int handle_id)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001405{
Brandon Low555a7642007-11-05 15:24:21 +00001406 LOGFQUEUE("buffering > Q_BASE_HANDLE %d", handle_id);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001407 queue_post(&buffering_queue, Q_BASE_HANDLE, handle_id);
1408}
1409
1410/* Return the amount of buffer space used */
1411size_t buf_used(void)
1412{
1413 return BUF_USED;
1414}
1415
Steve Bavin135cc752008-03-28 12:51:33 +00001416void buf_set_watermark(size_t bytes)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001417{
Björn Stenberg6427d122009-01-10 21:10:56 +00001418 conf_watermark = bytes;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001419}
1420
Nicolas Pennequin4fd27742008-03-29 14:09:14 +00001421static void shrink_buffer_inner(struct memory_handle *h)
Steve Bavin73f98632008-03-26 08:57:25 +00001422{
Brandon Lowa042c722007-11-03 02:54:34 +00001423 if (h == NULL)
1424 return;
1425
Brandon Low555a7642007-11-05 15:24:21 +00001426 shrink_buffer_inner(h->next);
Brandon Lowa042c722007-11-03 02:54:34 +00001427
1428 shrink_handle(h);
Brandon Low483dca92007-10-29 16:48:16 +00001429}
1430
Nicolas Pennequin4fd27742008-03-29 14:09:14 +00001431static void shrink_buffer(void)
Steve Bavin73f98632008-03-26 08:57:25 +00001432{
Brandon Low555a7642007-11-05 15:24:21 +00001433 logf("shrink_buffer()");
1434 shrink_buffer_inner(first_handle);
1435}
1436
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001437void buffering_thread(void)
1438{
Brandon Low11a36612007-11-03 06:21:32 +00001439 bool filling = false;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001440 struct queue_event ev;
1441
1442 while (true)
1443 {
Nicolas Pennequin12b6c842008-03-29 17:40:04 +00001444 if (!filling) {
Nicolas Pennequin732df382008-03-29 17:28:30 +00001445 cancel_cpu_boost();
Nicolas Pennequin12b6c842008-03-29 17:40:04 +00001446 }
Nicolas Pennequin732df382008-03-29 17:28:30 +00001447
1448 queue_wait_w_tmo(&buffering_queue, &ev, filling ? 5 : HZ/2);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001449
1450 switch (ev.id)
1451 {
Brandon Low86830b62007-11-05 17:51:55 +00001452 case Q_START_FILL:
Nicolas Pennequincf369572008-07-18 23:42:47 +00001453 LOGFQUEUE("buffering < Q_START_FILL %d", (int)ev.data);
Brandon Low7b74dd72007-11-03 21:57:27 +00001454 /* Call buffer callbacks here because this is one of two ways
1455 * to begin a full buffer fill */
Jonathan Gordon71898e52008-10-16 10:38:03 +00001456 send_event(BUFFER_EVENT_BUFFER_LOW, 0);
Brandon Low555a7642007-11-05 15:24:21 +00001457 shrink_buffer();
Brandon Low47eb5692007-11-05 03:11:58 +00001458 queue_reply(&buffering_queue, 1);
Brandon Low86830b62007-11-05 17:51:55 +00001459 filling |= buffer_handle((int)ev.data);
Brandon Low47eb5692007-11-05 03:11:58 +00001460 break;
Brandon Low555a7642007-11-05 15:24:21 +00001461
Brandon Low47eb5692007-11-05 03:11:58 +00001462 case Q_BUFFER_HANDLE:
Nicolas Pennequincf369572008-07-18 23:42:47 +00001463 LOGFQUEUE("buffering < Q_BUFFER_HANDLE %d", (int)ev.data);
Brandon Low47eb5692007-11-05 03:11:58 +00001464 queue_reply(&buffering_queue, 1);
Brandon Low86830b62007-11-05 17:51:55 +00001465 buffer_handle((int)ev.data);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001466 break;
1467
1468 case Q_RESET_HANDLE:
Nicolas Pennequincf369572008-07-18 23:42:47 +00001469 LOGFQUEUE("buffering < Q_RESET_HANDLE %d", (int)ev.data);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001470 queue_reply(&buffering_queue, 1);
1471 reset_handle((int)ev.data);
1472 break;
1473
1474 case Q_CLOSE_HANDLE:
Nicolas Pennequincf369572008-07-18 23:42:47 +00001475 LOGFQUEUE("buffering < Q_CLOSE_HANDLE %d", (int)ev.data);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001476 queue_reply(&buffering_queue, close_handle((int)ev.data));
1477 break;
1478
Nicolas Pennequin483c4022008-02-12 23:15:59 +00001479 case Q_HANDLE_ADDED:
1480 LOGFQUEUE("buffering < Q_HANDLE_ADDED %d", (int)ev.data);
1481 /* A handle was added: the disk is spinning, so we can fill */
1482 filling = true;
1483 break;
1484
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001485 case Q_BASE_HANDLE:
Nicolas Pennequincf369572008-07-18 23:42:47 +00001486 LOGFQUEUE("buffering < Q_BASE_HANDLE %d", (int)ev.data);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001487 base_handle_id = (int)ev.data;
1488 break;
1489
Thomas Martitz35e8b142010-06-21 16:53:00 +00001490#if (CONFIG_PLATFORM & PLATFORM_NATIVE)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001491 case SYS_USB_CONNECTED:
1492 LOGFQUEUE("buffering < SYS_USB_CONNECTED");
1493 usb_acknowledge(SYS_USB_CONNECTED_ACK);
1494 usb_wait_for_disconnect(&buffering_queue);
1495 break;
1496#endif
1497
1498 case SYS_TIMEOUT:
1499 LOGFQUEUE_SYS_TIMEOUT("buffering < SYS_TIMEOUT");
1500 break;
1501 }
1502
1503 update_data_counters();
1504
1505 /* If the buffer is low, call the callbacks to get new data */
Brandon Lowdc58c3d2007-11-03 03:46:22 +00001506 if (num_handles > 0 && data_counters.useful <= conf_watermark)
Jonathan Gordon71898e52008-10-16 10:38:03 +00001507 send_event(BUFFER_EVENT_BUFFER_LOW, 0);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001508
Brandon Low1c36a662007-11-04 00:23:22 +00001509#if 0
1510 /* TODO: This needs to be fixed to use the idle callback, disable it
1511 * for simplicity until its done right */
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001512#if MEM > 8
1513 /* If the disk is spinning, take advantage by filling the buffer */
Frank Gevaerts2f8a0082008-11-01 16:14:28 +00001514 else if (storage_disk_is_active() && queue_empty(&buffering_queue))
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001515 {
Brandon Lowdc58c3d2007-11-03 03:46:22 +00001516 if (num_handles > 0 && data_counters.useful <= high_watermark)
Jonathan Gordon71898e52008-10-16 10:38:03 +00001517 send_event(BUFFER_EVENT_BUFFER_LOW, 0);
Brandon Low11a36612007-11-03 06:21:32 +00001518
1519 if (data_counters.remaining > 0 && BUF_USED <= high_watermark)
1520 {
Brandon Low60d4e7c2007-11-03 17:55:45 +00001521 /* This is a new fill, shrink the buffer up first */
1522 if (!filling)
Brandon Low555a7642007-11-05 15:24:21 +00001523 shrink_buffer();
Brandon Low11a36612007-11-03 06:21:32 +00001524 filling = fill_buffer();
1525 update_data_counters();
Nicolas Pennequin09bce702007-10-30 17:24:31 +00001526 }
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001527 }
1528#endif
Brandon Low1c36a662007-11-04 00:23:22 +00001529#endif
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001530
Brandon Low60d4e7c2007-11-03 17:55:45 +00001531 if (queue_empty(&buffering_queue)) {
1532 if (filling) {
1533 if (data_counters.remaining > 0 && BUF_USED < buffer_len)
1534 filling = fill_buffer();
Nicolas Pennequina2191b92008-07-02 21:50:42 +00001535 else if (data_counters.remaining == 0)
1536 filling = false;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001537 }
Brandon Low60d4e7c2007-11-03 17:55:45 +00001538 else if (ev.id == SYS_TIMEOUT)
1539 {
1540 if (data_counters.remaining > 0 &&
Brandon Low47eb5692007-11-05 03:11:58 +00001541 data_counters.useful <= conf_watermark) {
Brandon Low555a7642007-11-05 15:24:21 +00001542 shrink_buffer();
Brandon Low60d4e7c2007-11-03 17:55:45 +00001543 filling = fill_buffer();
Brandon Low47eb5692007-11-05 03:11:58 +00001544 }
Brandon Low60d4e7c2007-11-03 17:55:45 +00001545 }
Brandon Low11a36612007-11-03 06:21:32 +00001546 }
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001547 }
1548}
1549
Nicolas Pennequin4fd27742008-03-29 14:09:14 +00001550void buffering_init(void)
Steve Bavin73f98632008-03-26 08:57:25 +00001551{
Brandon Low14b6f432007-10-27 04:16:41 +00001552 mutex_init(&llist_mutex);
Andrew Mahone216424a2009-05-15 00:14:38 +00001553 mutex_init(&llist_mod_mutex);
Michael Sevakis27cf6772008-03-25 02:34:12 +00001554#ifdef HAVE_PRIORITY_SCHEDULING
1555 /* This behavior not safe atm */
1556 mutex_set_preempt(&llist_mutex, false);
Andrew Mahone216424a2009-05-15 00:14:38 +00001557 mutex_set_preempt(&llist_mod_mutex, false);
Michael Sevakis27cf6772008-03-25 02:34:12 +00001558#endif
Brandon Low14b6f432007-10-27 04:16:41 +00001559
Brandon Low14b6f432007-10-27 04:16:41 +00001560 conf_watermark = BUFFERING_DEFAULT_WATERMARK;
1561
Brandon Low2c1e8282007-10-27 04:19:17 +00001562 queue_init(&buffering_queue, true);
Michael Sevakis8cfbd362008-12-10 08:57:10 +00001563 buffering_thread_id = create_thread( buffering_thread, buffering_stack,
Brandon Low7104ad52007-10-27 04:29:46 +00001564 sizeof(buffering_stack), CREATE_THREAD_FROZEN,
Michael Sevakis8a6fd3f2008-03-29 23:21:19 +00001565 buffering_thread_name IF_PRIO(, PRIORITY_BUFFERING)
Brandon Low14b6f432007-10-27 04:16:41 +00001566 IF_COP(, CPU));
Michael Sevakis27cf6772008-03-25 02:34:12 +00001567
1568 queue_enable_queue_send(&buffering_queue, &buffering_queue_sender_list,
Michael Sevakis8cfbd362008-12-10 08:57:10 +00001569 buffering_thread_id);
Brandon Low14b6f432007-10-27 04:16:41 +00001570}
1571
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001572/* Initialise the buffering subsystem */
Steve Bavin135cc752008-03-28 12:51:33 +00001573bool buffering_reset(char *buf, size_t buflen)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001574{
1575 if (!buf || !buflen)
1576 return false;
1577
1578 buffer = buf;
Rafaël Carré2494afc2010-06-23 04:34:18 +00001579 buffer_len = buflen;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001580 guard_buffer = buf + buflen;
1581
1582 buf_widx = 0;
1583 buf_ridx = 0;
1584
1585 first_handle = NULL;
Nicolas Pennequind08131a2007-10-27 01:25:47 +00001586 cur_handle = NULL;
1587 cached_handle = NULL;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001588 num_handles = 0;
Brandon Low31c11642007-11-04 19:01:02 +00001589 base_handle_id = -1;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001590
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001591 /* Set the high watermark as 75% full...or 25% empty :) */
1592#if MEM > 8
1593 high_watermark = 3*buflen / 4;
1594#endif
1595
Michael Sevakis8cfbd362008-12-10 08:57:10 +00001596 thread_thaw(buffering_thread_id);
Brandon Low7104ad52007-10-27 04:29:46 +00001597
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001598 return true;
1599}
1600
1601void buffering_get_debugdata(struct buffering_debug *dbgdata)
1602{
1603 update_data_counters();
1604 dbgdata->num_handles = num_handles;
1605 dbgdata->data_rem = data_counters.remaining;
1606 dbgdata->wasted_space = data_counters.wasted;
1607 dbgdata->buffered_data = data_counters.buffered;
1608 dbgdata->useful_data = data_counters.useful;
Thomas Martitzfcbfef82009-02-23 22:55:48 +00001609 dbgdata->watermark = conf_watermark;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001610}