blob: 9ffd35714c7a1b4a8be399b3b943eff6aeb74e5c [file] [log] [blame]
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2007 Nicolas Pennequin
11 *
Daniel Stenberg2acc0ac2008-06-28 18:10:04 +000012 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +000016 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +000021#include "config.h"
Michael Sevakisf9d60e12014-04-02 21:03:30 -040022#include <string.h>
Michael Sevakis8375b692014-04-03 18:49:16 -040023#include "strlcpy.h"
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +000024#include "system.h"
Michael Sevakis36615812013-08-26 16:49:53 -040025#include "storage.h"
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +000026#include "thread.h"
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +000027#include "kernel.h"
Michael Sevakis36615812013-08-26 16:49:53 -040028#include "panic.h"
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +000029#include "debug.h"
Michael Sevakis36615812013-08-26 16:49:53 -040030#include "file.h"
Jonathan Gordon71898e52008-10-16 10:38:03 +000031#include "appevents.h"
Nicolas Pennequin4e2de442008-04-14 16:17:47 +000032#include "metadata.h"
Michael Sevakis36615812013-08-26 16:49:53 -040033#include "bmp.h"
Andrew Mahone781421a2008-12-09 23:07:59 +000034#ifdef HAVE_ALBUMART
35#include "albumart.h"
Andrew Mahone54e6eb32009-05-01 23:31:43 +000036#include "jpeg_load.h"
Thomas Martitzf577a6a2011-02-09 20:13:13 +000037#include "playback.h"
Andrew Mahone781421a2008-12-09 23:07:59 +000038#endif
Michael Sevakis36615812013-08-26 16:49:53 -040039#include "buffering.h"
Michael Sevakis65c6a142017-04-13 18:53:17 -040040#include "linked_list.h"
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +000041
42/* Define LOGF_ENABLE to enable logf output in this file */
Michael Sevakisc537d592011-04-27 03:08:23 +000043/* #define LOGF_ENABLE */
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +000044#include "logf.h"
45
Michael Sevakisc1a01be2017-12-08 13:01:25 -050046#define BUF_MAX_HANDLES 384
47
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +000048/* macros to enable logf for queues
49 logging on SYS_TIMEOUT can be disabled */
50#ifdef SIMULATOR
51/* Define this for logf output of all queuing except SYS_TIMEOUT */
52#define BUFFERING_LOGQUEUES
53/* Define this to logf SYS_TIMEOUT messages */
54/* #define BUFFERING_LOGQUEUES_SYS_TIMEOUT */
55#endif
56
57#ifdef BUFFERING_LOGQUEUES
58#define LOGFQUEUE logf
59#else
60#define LOGFQUEUE(...)
61#endif
62
63#ifdef BUFFERING_LOGQUEUES_SYS_TIMEOUT
64#define LOGFQUEUE_SYS_TIMEOUT logf
65#else
66#define LOGFQUEUE_SYS_TIMEOUT(...)
67#endif
68
Michael Sevakis36615812013-08-26 16:49:53 -040069#define GUARD_BUFSIZE (32*1024)
70
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +000071/* amount of data to read in one read() call */
Michael Sevakis9120c852008-03-29 20:52:56 +000072#define BUFFERING_DEFAULT_FILECHUNK (1024*32)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +000073
Brandon Low31c11642007-11-04 19:01:02 +000074#define BUF_HANDLE_MASK 0x7FFFFFFF
75
Michael Sevakis36615812013-08-26 16:49:53 -040076enum handle_flags
77{
78 H_CANWRAP = 0x1, /* Handle data may wrap in buffer */
79 H_ALLOCALL = 0x2, /* All data must be allocated up front */
80 H_FIXEDDATA = 0x4, /* Data is fixed in position */
81};
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +000082
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +000083struct memory_handle {
Michael Sevakis65c6a142017-04-13 18:53:17 -040084 struct lld_node hnode; /* Handle list node (first!) */
Michael Sevakiscd3ea082017-12-09 09:41:34 -050085 struct lld_node mrunode;/* MRU list node (second!) */
86 size_t size; /* Size of this structure + its auxilliary data */
87 int id; /* A unique ID for the handle */
Michael Sevakis36615812013-08-26 16:49:53 -040088 enum data_type type; /* Type of data buffered with this handle */
89 uint8_t flags; /* Handle property flags */
90 int8_t pinned; /* Count of pinnings */
91 int8_t signaled; /* Stop any attempt at waiting to get the data */
Michael Sevakis36615812013-08-26 16:49:53 -040092 int fd; /* File descriptor to path (-1 if closed) */
93 size_t data; /* Start index of the handle's data buffer */
94 size_t ridx; /* Read pointer, relative to the main buffer */
95 size_t widx; /* Write pointer, relative to the main buffer */
Michael Sevakisdfff9382017-12-17 16:12:10 -050096 off_t filesize; /* File total length (possibly trimmed at tail) */
Michael Sevakis36615812013-08-26 16:49:53 -040097 off_t start; /* Offset at which we started reading the file */
98 off_t pos; /* Read position in file */
99 off_t volatile end; /* Offset at which we stopped reading the file */
Michael Sevakiscd3ea082017-12-09 09:41:34 -0500100 char path[]; /* Path if data originated in a file */
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000101};
Michael Sevakis69382552011-02-14 08:36:29 +0000102
Michael Sevakiscd3ea082017-12-09 09:41:34 -0500103/* Minimum allowed handle movement */
104#define MIN_MOVE_DELTA sizeof(struct memory_handle)
105
Michael Sevakis69382552011-02-14 08:36:29 +0000106struct buf_message_data
107{
108 int handle_id;
109 intptr_t data;
110};
111
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000112static char *buffer;
113static char *guard_buffer;
114
115static size_t buffer_len;
116
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000117/* Configuration */
118static size_t conf_watermark = 0; /* Level to trigger filebuf fill */
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000119static size_t high_watermark = 0; /* High watermark for rebuffer */
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000120
Michael Sevakis65c6a142017-04-13 18:53:17 -0400121static struct lld_head handle_list; /* buffer-order handle list */
122static struct lld_head mru_cache; /* MRU-ordered list of handles */
123static int num_handles; /* number of handles in the lists */
Brandon Low9784f6b2007-11-03 22:06:56 +0000124static int base_handle_id;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000125
Michael Sevakis69382552011-02-14 08:36:29 +0000126/* Main lock for adding / removing handles */
127static struct mutex llist_mutex SHAREDBSS_ATTR;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000128
Michael Sevakis65c6a142017-04-13 18:53:17 -0400129#define HLIST_HANDLE(node) \
130 ({ struct lld_node *__node = (node); \
131 (struct memory_handle *)__node; })
132
133#define HLIST_FIRST \
134 HLIST_HANDLE(handle_list.head)
135
136#define HLIST_LAST \
137 HLIST_HANDLE(handle_list.tail)
138
Michael Sevakis8be40742017-12-09 21:57:01 -0500139#define HLIST_PREV(h) \
140 HLIST_HANDLE((h)->hnode.prev)
141
Michael Sevakis65c6a142017-04-13 18:53:17 -0400142#define HLIST_NEXT(h) \
143 HLIST_HANDLE((h)->hnode.next)
144
145#define MRU_HANDLE(m) \
146 container_of((m), struct memory_handle, mrunode)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000147
Michael Sevakis8f143572011-02-14 09:18:58 +0000148static struct data_counters
149{
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000150 size_t remaining; /* Amount of data needing to be buffered */
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000151 size_t buffered; /* Amount of data currently in the buffer */
152 size_t useful; /* Amount of data still useful to the user */
153} data_counters;
154
155
156/* Messages available to communicate with the buffering thread */
Michael Sevakis8f143572011-02-14 09:18:58 +0000157enum
158{
Brandon Low33794402007-11-05 17:48:21 +0000159 Q_BUFFER_HANDLE = 1, /* Request buffering of a handle, this should not be
160 used in a low buffer situation. */
Michael Sevakis69382552011-02-14 08:36:29 +0000161 Q_REBUFFER_HANDLE, /* Request reset and rebuffering of a handle at a new
162 file starting position. */
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000163 Q_CLOSE_HANDLE, /* Request closing a handle */
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000164
165 /* Configuration: */
Brandon Low86830b62007-11-05 17:51:55 +0000166 Q_START_FILL, /* Request that the buffering thread initiate a buffer
Brandon Low47eb5692007-11-05 03:11:58 +0000167 fill at its earliest convenience */
Nicolas Pennequin483c4022008-02-12 23:15:59 +0000168 Q_HANDLE_ADDED, /* Inform the buffering thread that a handle was added,
169 (which means the disk is spinning) */
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000170};
171
172/* Buffering thread */
Steve Bavin73f98632008-03-26 08:57:25 +0000173static void buffering_thread(void);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000174static long buffering_stack[(DEFAULT_STACK_SIZE + 0x2000)/sizeof(long)];
175static const char buffering_thread_name[] = "buffering";
Michael Sevakis8cfbd362008-12-10 08:57:10 +0000176static unsigned int buffering_thread_id = 0;
Michael Sevakisb15aa472011-02-14 11:27:45 +0000177static struct event_queue buffering_queue SHAREDBSS_ATTR;
178static struct queue_sender_list buffering_queue_sender_list SHAREDBSS_ATTR;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000179
Michael Sevakis36615812013-08-26 16:49:53 -0400180static void close_fd(int *fd_p)
181{
182 int fd = *fd_p;
183 if (fd >= 0) {
184 close(fd);
185 *fd_p = -1;
186 }
187}
Nicolas Pennequine24454f2007-11-26 21:13:08 +0000188
Thomas Martitzb11c8192010-02-12 13:12:59 +0000189/* Ring buffer helper functions */
Michael Sevakis36615812013-08-26 16:49:53 -0400190static inline void * ringbuf_ptr(uintptr_t p)
191{
192 return buffer + p;
193}
Thomas Martitzabb3dd42010-02-20 15:13:53 +0000194
195static inline uintptr_t ringbuf_offset(const void *ptr)
196{
Michael Sevakis36615812013-08-26 16:49:53 -0400197 return (uintptr_t)(ptr - (void *)buffer);
Thomas Martitzabb3dd42010-02-20 15:13:53 +0000198}
199
Thomas Martitzb11c8192010-02-12 13:12:59 +0000200/* Buffer pointer (p) plus value (v), wrapped if necessary */
201static inline uintptr_t ringbuf_add(uintptr_t p, size_t v)
202{
203 uintptr_t res = p + v;
204 if (res >= buffer_len)
205 res -= buffer_len; /* wrap if necssary */
206 return res;
207}
208
Thomas Martitzb11c8192010-02-12 13:12:59 +0000209/* Buffer pointer (p) minus value (v), wrapped if necessary */
Michael Sevakiseefc7c72017-04-08 18:11:25 -0400210/* Interprets p == v as empty */
211static inline uintptr_t ringbuf_sub_empty(uintptr_t p, size_t v)
Thomas Martitzb11c8192010-02-12 13:12:59 +0000212{
213 uintptr_t res = p;
214 if (p < v)
215 res += buffer_len; /* wrap */
Michael Sevakis89b05af2013-06-29 22:18:17 -0400216
Thomas Martitzb11c8192010-02-12 13:12:59 +0000217 return res - v;
218}
219
Michael Sevakiseefc7c72017-04-08 18:11:25 -0400220/* Buffer pointer (p) minus value (v), wrapped if necessary */
221/* Interprets p == v as full */
222static inline uintptr_t ringbuf_sub_full(uintptr_t p, size_t v)
223{
224 uintptr_t res = p;
225 if (p <= v)
226 res += buffer_len; /* wrap */
227
228 return res - v;
229}
230
Thomas Martitzb11c8192010-02-12 13:12:59 +0000231/* How far value (v) plus buffer pointer (p1) will cross buffer pointer (p2) */
Michael Sevakiseefc7c72017-04-08 18:11:25 -0400232/* Interprets p1 == p2 as empty */
233static inline ssize_t ringbuf_add_cross_empty(uintptr_t p1, size_t v,
234 uintptr_t p2)
Thomas Martitzb11c8192010-02-12 13:12:59 +0000235{
236 ssize_t res = p1 + v - p2;
237 if (p1 >= p2) /* wrap if necessary */
238 res -= buffer_len;
239
240 return res;
241}
242
Michael Sevakiseefc7c72017-04-08 18:11:25 -0400243/* How far value (v) plus buffer pointer (p1) will cross buffer pointer (p2) */
244/* Interprets p1 == p2 as full */
245static inline ssize_t ringbuf_add_cross_full(uintptr_t p1, size_t v,
246 uintptr_t p2)
247{
248 ssize_t res = p1 + v - p2;
249 if (p1 > p2) /* wrap if necessary */
250 res -= buffer_len;
251
252 return res;
253}
254
Michael Sevakisc537d592011-04-27 03:08:23 +0000255/* Real buffer watermark */
256#define BUF_WATERMARK MIN(conf_watermark, high_watermark)
257
Michael Sevakiseefc7c72017-04-08 18:11:25 -0400258static size_t bytes_used(void)
259{
Michael Sevakis65c6a142017-04-13 18:53:17 -0400260 struct memory_handle *first = HLIST_FIRST;
Michael Sevakiseefc7c72017-04-08 18:11:25 -0400261 if (!first) {
262 return 0;
263 }
264
Michael Sevakis65c6a142017-04-13 18:53:17 -0400265 return ringbuf_sub_full(HLIST_LAST->widx, ringbuf_offset(first));
Michael Sevakiseefc7c72017-04-08 18:11:25 -0400266}
267
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000268/*
269LINKED LIST MANAGEMENT
270======================
271
Michael Sevakis65c6a142017-04-13 18:53:17 -0400272add_handle : Create a new handle
273link_handle : Add a handle to the list
274unlink_handle : Remove a handle from the list
275find_handle : Get a handle pointer from an ID
276move_handle : Move a handle in the buffer (with or without its data)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000277
278These functions only handle the linked list structure. They don't touch the
Michael Sevakis36615812013-08-26 16:49:53 -0400279contents of the struct memory_handle headers.
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000280
Michael Sevakis65c6a142017-04-13 18:53:17 -0400281Doubly-linked list, not circular.
282New handles are added at the tail.
Michael Sevakis36615812013-08-26 16:49:53 -0400283
284num_handles = N
Michael Sevakis65c6a142017-04-13 18:53:17 -0400285 NULL <- h0 <-> h1 <-> h2 -> ... <- hN-1 -> NULL
286head=> --------^ ^
287tail=> -----------------------------------+
288
289MRU cache is similar except new handles are added at the head and the most-
290recently-accessed handle is always moved to the head (if not already there).
291
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000292*/
293
Michael Sevakis36615812013-08-26 16:49:53 -0400294static int next_handle_id(void)
295{
296 static int cur_handle_id = 0;
297
298 /* Wrap signed int is safe and 0 doesn't happen */
299 int next_hid = (cur_handle_id + 1) & BUF_HANDLE_MASK;
300 if (next_hid == 0)
301 next_hid = 1;
302
303 cur_handle_id = next_hid;
304
305 return next_hid;
306}
307
Michael Sevakis65c6a142017-04-13 18:53:17 -0400308/* Adds the handle to the linked list */
309static void link_handle(struct memory_handle *h)
Michael Sevakis36615812013-08-26 16:49:53 -0400310{
Michael Sevakis65c6a142017-04-13 18:53:17 -0400311 lld_insert_last(&handle_list, &h->hnode);
312 lld_insert_first(&mru_cache, &h->mrunode);
Michael Sevakis36615812013-08-26 16:49:53 -0400313 num_handles++;
314}
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000315
Michael Sevakis65c6a142017-04-13 18:53:17 -0400316/* Delete a given memory handle from the linked list */
317static void unlink_handle(struct memory_handle *h)
318{
319 lld_remove(&handle_list, &h->hnode);
320 lld_remove(&mru_cache, &h->mrunode);
321 num_handles--;
322}
323
324/* Adjusts handle list pointers _before_ it's actually moved */
325static void adjust_handle_node(struct lld_head *list,
326 struct lld_node *srcnode,
327 struct lld_node *destnode)
328{
329 if (srcnode->prev) {
330 srcnode->prev->next = destnode;
331 } else {
332 list->head = destnode;
333 }
334
335 if (srcnode->next) {
336 srcnode->next->prev = destnode;
337 } else {
338 list->tail = destnode;
339 }
340}
341
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000342/* Add a new handle to the linked list and return it. It will have become the
Brandon Low18c9aba2007-10-27 04:49:04 +0000343 new current handle.
Michael Sevakis36615812013-08-26 16:49:53 -0400344 flags contains information on how this may be allocated
Brandon Low18c9aba2007-10-27 04:49:04 +0000345 data_size must contain the size of what will be in the handle.
Michael Sevakis36615812013-08-26 16:49:53 -0400346 widx_out points to variable to receive first available byte of data area
Brandon Low94b133a2007-10-28 19:19:54 +0000347 returns a valid memory handle if all conditions for allocation are met.
348 NULL if there memory_handle itself cannot be allocated or if the
Michael Sevakisb474d0d2011-02-13 10:44:13 +0000349 data_size cannot be allocated and alloc_all is set. */
Michael Sevakis36615812013-08-26 16:49:53 -0400350static struct memory_handle *
Michael Sevakiscd3ea082017-12-09 09:41:34 -0500351add_handle(unsigned int flags, size_t data_size, const char *path,
352 size_t *data_out)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000353{
Michael Sevakis36615812013-08-26 16:49:53 -0400354 /* Gives each handle a unique id */
Brandon Lowa042c722007-11-03 02:54:34 +0000355 if (num_handles >= BUF_MAX_HANDLES)
356 return NULL;
357
Michael Sevakis36615812013-08-26 16:49:53 -0400358 size_t ridx = 0, widx = 0;
359 off_t cur_total = 0;
Michael Sevakisb474d0d2011-02-13 10:44:13 +0000360
Michael Sevakis65c6a142017-04-13 18:53:17 -0400361 struct memory_handle *first = HLIST_FIRST;
362 if (first) {
Michael Sevakis36615812013-08-26 16:49:53 -0400363 /* Buffer is not empty */
Michael Sevakis65c6a142017-04-13 18:53:17 -0400364 struct memory_handle *last = HLIST_LAST;
365 ridx = ringbuf_offset(first);
366 widx = last->data;
367 cur_total = last->filesize - last->start;
Michael Sevakis36615812013-08-26 16:49:53 -0400368 }
369
370 if (cur_total > 0) {
Nicolas Pennequin4ff2f9f2007-10-30 14:11:03 +0000371 /* the current handle hasn't finished buffering. We can only add
372 a new one if there is already enough free space to finish
373 the buffering. */
Michael Sevakiseefc7c72017-04-08 18:11:25 -0400374 if (ringbuf_add_cross_full(widx, cur_total, ridx) > 0) {
Michael Sevakis0fde6352011-02-14 02:14:26 +0000375 /* Not enough space to finish allocation */
Nicolas Pennequin4ff2f9f2007-10-30 14:11:03 +0000376 return NULL;
377 } else {
Michael Sevakis36615812013-08-26 16:49:53 -0400378 /* Apply all the needed reserve */
379 widx = ringbuf_add(widx, cur_total);
Nicolas Pennequin4ff2f9f2007-10-30 14:11:03 +0000380 }
381 }
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000382
Michael Sevakiseefc7c72017-04-08 18:11:25 -0400383 /* Align to align size up */
Michael Sevakiscd3ea082017-12-09 09:41:34 -0500384 size_t pathsize = path ? strlen(path) + 1 : 0;
Michael Sevakiseefc7c72017-04-08 18:11:25 -0400385 size_t adjust = ALIGN_UP(widx, alignof(struct memory_handle)) - widx;
Michael Sevakis36615812013-08-26 16:49:53 -0400386 size_t index = ringbuf_add(widx, adjust);
Michael Sevakiscd3ea082017-12-09 09:41:34 -0500387 size_t handlesize = ALIGN_UP(sizeof(struct memory_handle) + pathsize,
388 alignof(struct memory_handle));
389 size_t len = handlesize + data_size;
Brandon Low18c9aba2007-10-27 04:49:04 +0000390
391 /* First, will the handle wrap? */
Brandon Low18c9aba2007-10-27 04:49:04 +0000392 /* If the handle would wrap, move to the beginning of the buffer,
Antonius Hellmann0055f132009-02-22 10:12:34 +0000393 * or if the data must not but would wrap, move it to the beginning */
Michael Sevakiscd3ea082017-12-09 09:41:34 -0500394 if (index + handlesize > buffer_len ||
Michael Sevakis36615812013-08-26 16:49:53 -0400395 (!(flags & H_CANWRAP) && index + len > buffer_len)) {
396 index = 0;
Brandon Low18c9aba2007-10-27 04:49:04 +0000397 }
398
Michael Sevakis36615812013-08-26 16:49:53 -0400399 /* How far we shifted index to align things, must be < buffer_len */
Michael Sevakiseefc7c72017-04-08 18:11:25 -0400400 size_t shift = ringbuf_sub_empty(index, widx);
Nicolas Pennequin78072792007-10-28 15:54:10 +0000401
Brandon Low18c9aba2007-10-27 04:49:04 +0000402 /* How much space are we short in the actual ring buffer? */
Michael Sevakis65c6a142017-04-13 18:53:17 -0400403 ssize_t overlap = first ?
Michael Sevakiseefc7c72017-04-08 18:11:25 -0400404 ringbuf_add_cross_full(widx, shift + len, ridx) :
405 ringbuf_add_cross_empty(widx, shift + len, ridx);
406
407 if (overlap > 0 &&
408 ((flags & H_ALLOCALL) || (size_t)overlap > data_size)) {
Brandon Low18c9aba2007-10-27 04:49:04 +0000409 /* Not enough space for required allocations */
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000410 return NULL;
411 }
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000412
Michael Sevakis36615812013-08-26 16:49:53 -0400413 /* There is enough space for the required data, initialize the struct */
414 struct memory_handle *h = ringbuf_ptr(index);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000415
Michael Sevakiscd3ea082017-12-09 09:41:34 -0500416 h->size = handlesize;
Michael Sevakis36615812013-08-26 16:49:53 -0400417 h->id = next_handle_id();
418 h->flags = flags;
419 h->pinned = 0; /* Can be moved */
420 h->signaled = 0; /* Data can be waited for */
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000421
Michael Sevakiscd3ea082017-12-09 09:41:34 -0500422 /* Save the provided path */
423 memcpy(h->path, path, pathsize);
424
Michael Sevakis36615812013-08-26 16:49:53 -0400425 /* Return the start of the data area */
Michael Sevakiscd3ea082017-12-09 09:41:34 -0500426 *data_out = ringbuf_add(index, handlesize);
Michael Sevakis69382552011-02-14 08:36:29 +0000427
Michael Sevakis36615812013-08-26 16:49:53 -0400428 return h;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000429}
430
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000431/* Return a pointer to the memory handle of given ID.
432 NULL if the handle wasn't found */
Michael Sevakis65c6a142017-04-13 18:53:17 -0400433static struct memory_handle * find_handle(int handle_id)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000434{
Michael Sevakis65c6a142017-04-13 18:53:17 -0400435 struct memory_handle *h = NULL;
436 struct lld_node *mru = mru_cache.head;
437 struct lld_node *m = mru;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000438
Michael Sevakis65c6a142017-04-13 18:53:17 -0400439 while (m && MRU_HANDLE(m)->id != handle_id) {
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000440 m = m->next;
441 }
Michael Sevakis36615812013-08-26 16:49:53 -0400442
Michael Sevakis65c6a142017-04-13 18:53:17 -0400443 if (m) {
444 if (m != mru) {
445 lld_remove(&mru_cache, m);
446 lld_insert_first(&mru_cache, m);
447 }
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000448
Michael Sevakis65c6a142017-04-13 18:53:17 -0400449 h = MRU_HANDLE(m);
450 }
451
452 return h;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000453}
454
Brandon Low4feab102007-10-28 20:18:59 +0000455/* Move a memory handle and data_size of its data delta bytes along the buffer.
456 delta maximum bytes available to move the handle. If the move is performed
457 it is set to the actual distance moved.
458 data_size is the amount of data to move along with the struct.
Yoshihisa Uchida9c13b6e2010-05-24 10:49:36 +0000459 returns true if the move is successful and false if the handle is NULL,
460 the move would be less than the size of a memory_handle after
461 correcting for wraps or if the handle is not found in the linked
462 list for adjustment. This function has no side effects if false
463 is returned. */
Brandon Lowdcca5862007-11-02 14:06:48 +0000464static bool move_handle(struct memory_handle **h, size_t *delta,
Michael Sevakis36615812013-08-26 16:49:53 -0400465 size_t data_size)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000466{
Michael Sevakis65c6a142017-04-13 18:53:17 -0400467 struct memory_handle *src;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000468
Brandon Low483dca92007-10-29 16:48:16 +0000469 if (h == NULL || (src = *h) == NULL)
470 return false;
Brandon Low4feab102007-10-28 20:18:59 +0000471
Michael Sevakiscd3ea082017-12-09 09:41:34 -0500472 size_t size_to_move = src->size + data_size;
Brandon Low4feab102007-10-28 20:18:59 +0000473
Michael Sevakiseefc7c72017-04-08 18:11:25 -0400474 /* Align to align size down */
Michael Sevakis36615812013-08-26 16:49:53 -0400475 size_t final_delta = *delta;
Michael Sevakiseefc7c72017-04-08 18:11:25 -0400476 final_delta = ALIGN_DOWN(final_delta, alignof(struct memory_handle));
Michael Sevakiscd3ea082017-12-09 09:41:34 -0500477 if (final_delta < MIN_MOVE_DELTA) {
478 /* It's not legal to move less than MIN_MOVE_DELTA */
Brandon Low483dca92007-10-29 16:48:16 +0000479 return false;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000480 }
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000481
Michael Sevakis36615812013-08-26 16:49:53 -0400482 uintptr_t oldpos = ringbuf_offset(src);
483 uintptr_t newpos = ringbuf_add(oldpos, final_delta);
Michael Sevakiseefc7c72017-04-08 18:11:25 -0400484 intptr_t overlap = ringbuf_add_cross_full(newpos, size_to_move, buffer_len);
485 intptr_t overlap_old = ringbuf_add_cross_full(oldpos, size_to_move, buffer_len);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000486
Brandon Low18c9aba2007-10-27 04:49:04 +0000487 if (overlap > 0) {
Brandon Low4feab102007-10-28 20:18:59 +0000488 /* Some part of the struct + data would wrap, maybe ok */
Michael Sevakis3c81bf02011-02-12 12:18:09 +0000489 ssize_t correction = 0;
Brandon Low18c9aba2007-10-27 04:49:04 +0000490 /* If the overlap lands inside the memory_handle */
Michael Sevakis36615812013-08-26 16:49:53 -0400491 if (!(src->flags & H_CANWRAP)) {
Brandon Low4feab102007-10-28 20:18:59 +0000492 /* Otherwise the overlap falls in the data area and must all be
493 * backed out. This may become conditional if ever we move
494 * data that is allowed to wrap (ie audio) */
Brandon Low18c9aba2007-10-27 04:49:04 +0000495 correction = overlap;
Thomas Martitzabb3dd42010-02-20 15:13:53 +0000496 } else if ((uintptr_t)overlap > data_size) {
Michael Giacomelliecd9bcf2009-11-21 17:00:38 +0000497 /* Correct the position and real delta to prevent the struct from
Michael Sevakis3c81bf02011-02-12 12:18:09 +0000498 * wrapping, this guarantees an aligned delta if the struct size is
499 * aligned and the buffer is aligned */
Michael Giacomelliecd9bcf2009-11-21 17:00:38 +0000500 correction = overlap - data_size;
Brandon Low18c9aba2007-10-27 04:49:04 +0000501 }
Brandon Lowdcca5862007-11-02 14:06:48 +0000502 if (correction) {
Michael Sevakiseefc7c72017-04-08 18:11:25 -0400503 /* Align correction to align size up */
504 correction = ALIGN_UP(correction, alignof(struct memory_handle));
Michael Sevakiscd3ea082017-12-09 09:41:34 -0500505 if (final_delta < correction + MIN_MOVE_DELTA) {
506 /* Delta cannot end up less than MIN_MOVE_DELTA */
Brandon Lowdcca5862007-11-02 14:06:48 +0000507 return false;
508 }
Brandon Lowdcca5862007-11-02 14:06:48 +0000509 newpos -= correction;
510 overlap -= correction;/* Used below to know how to split the data */
511 final_delta -= correction;
512 }
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000513 }
Nicolas Pennequin78072792007-10-28 15:54:10 +0000514
Michael Sevakis36615812013-08-26 16:49:53 -0400515 struct memory_handle *dest = ringbuf_ptr(newpos);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000516
Michael Sevakis65c6a142017-04-13 18:53:17 -0400517 /* Adjust list pointers */
518 adjust_handle_node(&handle_list, &src->hnode, &dest->hnode);
519 adjust_handle_node(&mru_cache, &src->mrunode, &dest->mrunode);
Brandon Low18c9aba2007-10-27 04:49:04 +0000520
Michael Sevakis3c81bf02011-02-12 12:18:09 +0000521 /* x = handle(s) following this one...
522 * ...if last handle, unmoveable if metadata, only shrinkable if audio.
523 * In other words, no legal move can be made that would have the src head
524 * and dest tail of the data overlap itself. These facts reduce the
525 * problem to four essential permutations.
526 *
527 * movement: always "clockwise" >>>>
528 *
529 * (src nowrap, dest nowrap)
530 * |0123 x |
531 * | 0123x | etc...
532 * move: "0123"
533 *
534 * (src nowrap, dest wrap)
535 * | x0123 |
536 * |23x 01|
537 * move: "23", "01"
538 *
539 * (src wrap, dest nowrap)
540 * |23 x01|
541 * | 0123x |
542 * move: "23", "01"
543 *
544 * (src wrap, dest wrap)
545 * |23 x 01|
546 * |123x 0|
547 * move: "23", "1", "0"
548 */
549 if (overlap_old > 0) {
550 /* Move over already wrapped data by the final delta */
Michael Sevakis36615812013-08-26 16:49:53 -0400551 memmove(ringbuf_ptr(final_delta), ringbuf_ptr(0), overlap_old);
Michael Sevakis3c81bf02011-02-12 12:18:09 +0000552 if (overlap <= 0)
553 size_to_move -= overlap_old;
Rafaël Carréf6201032009-11-22 13:51:25 +0000554 }
Michael Giacomelliecd9bcf2009-11-21 17:00:38 +0000555
Michael Sevakis3c81bf02011-02-12 12:18:09 +0000556 if (overlap > 0) {
557 /* Move data that now wraps to the beginning */
558 size_to_move -= overlap;
Michael Sevakis36615812013-08-26 16:49:53 -0400559 memmove(ringbuf_ptr(0), SKIPBYTES(src, size_to_move),
Michael Sevakis3c81bf02011-02-12 12:18:09 +0000560 overlap_old > 0 ? final_delta : (size_t)overlap);
561 }
562
563 /* Move leading fragment containing handle struct */
564 memmove(dest, src, size_to_move);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000565
Brandon Low483dca92007-10-29 16:48:16 +0000566 /* Update the caller with the new location of h and the distance moved */
567 *h = dest;
568 *delta = final_delta;
Yoshihisa Uchida9c13b6e2010-05-24 10:49:36 +0000569 return true;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000570}
571
572
573/*
574BUFFER SPACE MANAGEMENT
575=======================
576
Nicolas Pennequin0c7b26d2007-11-05 21:11:54 +0000577update_data_counters: Updates the values in data_counters
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000578buffer_handle : Buffer data for a handle
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000579rebuffer_handle : Seek to a nonbuffered part of a handle by rebuffering the data
580shrink_handle : Free buffer space by moving a handle
581fill_buffer : Call buffer_handle for all handles that have data to buffer
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000582
583These functions are used by the buffering thread to manage buffer space.
584*/
Michael Sevakis36615812013-08-26 16:49:53 -0400585
586static int update_data_counters(struct data_counters *dc)
Michael Sevakis64647f32011-03-02 04:41:29 +0000587{
Michael Sevakis36615812013-08-26 16:49:53 -0400588 size_t buffered = 0;
Nicolas Pennequin0c7b26d2007-11-05 21:11:54 +0000589 size_t remaining = 0;
Michael Sevakis36615812013-08-26 16:49:53 -0400590 size_t useful = 0;
Michael Sevakis69382552011-02-14 08:36:29 +0000591
592 if (dc == NULL)
593 dc = &data_counters;
594
Nicolas Pennequinf7e0e6b2008-05-13 20:51:06 +0000595 mutex_lock(&llist_mutex);
596
Michael Sevakis36615812013-08-26 16:49:53 -0400597 int num = num_handles;
598 struct memory_handle *m = find_handle(base_handle_id);
599 bool is_useful = m == NULL;
Michael Sevakis69382552011-02-14 08:36:29 +0000600
Michael Sevakis65c6a142017-04-13 18:53:17 -0400601 for (m = HLIST_FIRST; m; m = HLIST_NEXT(m))
Michael Sevakis36615812013-08-26 16:49:53 -0400602 {
603 off_t pos = m->pos;
604 off_t end = m->end;
605
606 buffered += end - m->start;
607 remaining += m->filesize - end;
Nicolas Pennequin0c7b26d2007-11-05 21:11:54 +0000608
609 if (m->id == base_handle_id)
610 is_useful = true;
611
612 if (is_useful)
Michael Sevakis36615812013-08-26 16:49:53 -0400613 useful += end - pos;
Nicolas Pennequin0c7b26d2007-11-05 21:11:54 +0000614 }
615
Nicolas Pennequinf7e0e6b2008-05-13 20:51:06 +0000616 mutex_unlock(&llist_mutex);
617
Michael Sevakis36615812013-08-26 16:49:53 -0400618 dc->buffered = buffered;
Michael Sevakis69382552011-02-14 08:36:29 +0000619 dc->remaining = remaining;
Michael Sevakis36615812013-08-26 16:49:53 -0400620 dc->useful = useful;
621
622 return num;
Nicolas Pennequin0c7b26d2007-11-05 21:11:54 +0000623}
624
Michael Sevakis69382552011-02-14 08:36:29 +0000625/* Q_BUFFER_HANDLE event and buffer data for the given handle.
Brandon Low60d4e7c2007-11-03 17:55:45 +0000626 Return whether or not the buffering should continue explicitly. */
Michael Sevakis69382552011-02-14 08:36:29 +0000627static bool buffer_handle(int handle_id, size_t to_buffer)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000628{
Michael Sevakisc537d592011-04-27 03:08:23 +0000629 logf("buffer_handle(%d, %lu)", handle_id, (unsigned long)to_buffer);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000630 struct memory_handle *h = find_handle(handle_id);
631 if (!h)
Brandon Low7b74dd72007-11-03 21:57:27 +0000632 return true;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000633
Michael Sevakisc537d592011-04-27 03:08:23 +0000634 logf(" type: %d", (int)h->type);
635
Michael Sevakis36615812013-08-26 16:49:53 -0400636 if (h->end >= h->filesize) {
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000637 /* nothing left to buffer */
Brandon Low7b74dd72007-11-03 21:57:27 +0000638 return true;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000639 }
640
Michael Sevakis8f143572011-02-14 09:18:58 +0000641 if (h->fd < 0) { /* file closed, reopen */
Michael Sevakis36615812013-08-26 16:49:53 -0400642 if (h->path[0] != '\0')
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000643 h->fd = open(h->path, O_RDONLY);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000644
Michael Sevakis36615812013-08-26 16:49:53 -0400645 if (h->fd < 0) {
Brandon Lowebc981b2007-11-04 05:57:48 +0000646 /* could not open the file, truncate it where it is */
Michael Sevakis36615812013-08-26 16:49:53 -0400647 h->filesize = h->end;
Brandon Low7b74dd72007-11-03 21:57:27 +0000648 return true;
Brandon Lowebc981b2007-11-04 05:57:48 +0000649 }
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000650
Michael Sevakis36615812013-08-26 16:49:53 -0400651 if (h->start)
652 lseek(h->fd, h->start, SEEK_SET);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000653 }
654
655 trigger_cpu_boost();
656
Michael Sevakis8f143572011-02-14 09:18:58 +0000657 if (h->type == TYPE_ID3) {
Michael Sevakis36615812013-08-26 16:49:53 -0400658 if (!get_metadata(ringbuf_ptr(h->data), h->fd, h->path)) {
Nicolas Pennequinde026dc2008-04-16 16:18:05 +0000659 /* metadata parsing failed: clear the buffer. */
Michael Sevakis36615812013-08-26 16:49:53 -0400660 wipe_mp3entry(ringbuf_ptr(h->data));
Nicolas Pennequinde026dc2008-04-16 16:18:05 +0000661 }
Michael Sevakis36615812013-08-26 16:49:53 -0400662 close_fd(&h->fd);
663 h->widx = ringbuf_add(h->data, h->filesize);
664 h->end = h->filesize;
Michael Sevakis69382552011-02-14 08:36:29 +0000665 send_event(BUFFER_EVENT_FINISHED, &handle_id);
Nicolas Pennequin4e2de442008-04-14 16:17:47 +0000666 return true;
667 }
668
Michael Sevakis36615812013-08-26 16:49:53 -0400669 bool stop = false;
670 while (h->end < h->filesize && !stop)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000671 {
672 /* max amount to copy */
Michael Sevakis36615812013-08-26 16:49:53 -0400673 size_t widx = h->widx;
Michael Sevakis36615812013-08-26 16:49:53 -0400674 ssize_t copy_n = h->filesize - h->end;
675 copy_n = MIN(copy_n, BUFFERING_DEFAULT_FILECHUNK);
676 copy_n = MIN(copy_n, (off_t)(buffer_len - widx));
677
Michael Sevakiseefc7c72017-04-08 18:11:25 -0400678 mutex_lock(&llist_mutex);
Michael Sevakis36615812013-08-26 16:49:53 -0400679
680 /* read only up to available space and stop if it would overwrite
Michael Sevakiseefc7c72017-04-08 18:11:25 -0400681 the next handle; stop one byte early to avoid empty/full alias
682 (or else do more complicated arithmetic to differentiate) */
Michael Sevakis65c6a142017-04-13 18:53:17 -0400683 size_t next = ringbuf_offset(HLIST_NEXT(h) ?: HLIST_FIRST);
Michael Sevakiseefc7c72017-04-08 18:11:25 -0400684 ssize_t overlap = ringbuf_add_cross_full(widx, copy_n, next);
685
686 mutex_unlock(&llist_mutex);
Thomas Martitzdf79ac22010-02-18 15:38:30 +0000687
Michael Sevakis8f143572011-02-14 09:18:58 +0000688 if (overlap > 0) {
Thomas Martitzdf79ac22010-02-18 15:38:30 +0000689 stop = true;
Thomas Martitzdf79ac22010-02-18 15:38:30 +0000690 copy_n -= overlap;
691 }
Brandon Lowff9cdb42007-11-01 05:12:55 +0000692
Michael Sevakis88d91fa2011-02-10 05:56:21 +0000693 if (copy_n <= 0)
694 return false; /* no space for read */
695
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000696 /* rc is the actual amount read */
Michael Sevakis36615812013-08-26 16:49:53 -0400697 ssize_t rc = read(h->fd, ringbuf_ptr(widx), copy_n);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000698
Michael Sevakis4823b2b2011-03-21 15:57:07 +0000699 if (rc <= 0) {
Brandon Low60d4e7c2007-11-03 17:55:45 +0000700 /* Some kind of filesystem error, maybe recoverable if not codec */
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000701 if (h->type == TYPE_CODEC) {
702 logf("Partial codec");
703 break;
704 }
705
Michael Sevakis36615812013-08-26 16:49:53 -0400706 logf("File ended %lu bytes early\n",
707 (unsigned long)(h->filesize - h->end));
708 h->filesize = h->end;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000709 break;
710 }
711
Michael Sevakis36615812013-08-26 16:49:53 -0400712 /* Advance buffer and make data available to users */
713 h->widx = ringbuf_add(widx, rc);
714 h->end += rc;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000715
Michael Sevakis36615812013-08-26 16:49:53 -0400716 yield();
Michael Sevakis398d9fd2007-11-20 22:45:46 +0000717
Michael Sevakis8f143572011-02-14 09:18:58 +0000718 if (to_buffer == 0) {
Michael Sevakis69382552011-02-14 08:36:29 +0000719 /* Normal buffering - check queue */
Michael Sevakis36615812013-08-26 16:49:53 -0400720 if (!queue_empty(&buffering_queue))
Michael Sevakis69382552011-02-14 08:36:29 +0000721 break;
Michael Sevakis8f143572011-02-14 09:18:58 +0000722 } else {
Michael Sevakis69382552011-02-14 08:36:29 +0000723 if (to_buffer <= (size_t)rc)
724 break; /* Done */
725 to_buffer -= rc;
726 }
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000727 }
728
Michael Sevakis36615812013-08-26 16:49:53 -0400729 if (h->end >= h->filesize) {
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000730 /* finished buffering the file */
Michael Sevakis36615812013-08-26 16:49:53 -0400731 close_fd(&h->fd);
Michael Sevakis69382552011-02-14 08:36:29 +0000732 send_event(BUFFER_EVENT_FINISHED, &handle_id);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000733 }
734
Thomas Martitzdf79ac22010-02-18 15:38:30 +0000735 return !stop;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000736}
737
Michael Sevakis69382552011-02-14 08:36:29 +0000738/* Close the specified handle id and free its allocation. */
Michael Sevakis36615812013-08-26 16:49:53 -0400739/* Q_CLOSE_HANDLE */
Steve Bavin135cc752008-03-28 12:51:33 +0000740static bool close_handle(int handle_id)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000741{
Michael Sevakis69382552011-02-14 08:36:29 +0000742 mutex_lock(&llist_mutex);
Michael Sevakis36615812013-08-26 16:49:53 -0400743 struct memory_handle *h = find_handle(handle_id);
Brandon Low31c11642007-11-04 19:01:02 +0000744
745 /* If the handle is not found, it is closed */
Michael Sevakis69382552011-02-14 08:36:29 +0000746 if (h) {
Michael Sevakis36615812013-08-26 16:49:53 -0400747 close_fd(&h->fd);
Michael Sevakis65c6a142017-04-13 18:53:17 -0400748 unlink_handle(h);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000749 }
750
Michael Sevakis69382552011-02-14 08:36:29 +0000751 mutex_unlock(&llist_mutex);
Michael Sevakis65c6a142017-04-13 18:53:17 -0400752 return true;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000753}
754
755/* Free buffer space by moving the handle struct right before the useful
756 part of its data buffer or by moving all the data. */
Michael Sevakis6ee3b6f2017-12-09 23:26:05 -0500757static struct memory_handle * shrink_handle(struct memory_handle *h)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000758{
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000759 if (!h)
Michael Sevakis6ee3b6f2017-12-09 23:26:05 -0500760 return NULL;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000761
Michael Sevakisc537d592011-04-27 03:08:23 +0000762 if (h->type == TYPE_PACKET_AUDIO) {
763 /* only move the handle struct */
764 /* data is pinned by default - if we start moving packet audio,
765 the semantics will determine whether or not data is movable
766 but the handle will remain movable in either case */
Michael Sevakiseefc7c72017-04-08 18:11:25 -0400767 size_t delta = ringbuf_sub_empty(h->ridx, h->data);
Michael Sevakisc537d592011-04-27 03:08:23 +0000768
769 /* The value of delta might change for alignment reasons */
Michael Sevakis36615812013-08-26 16:49:53 -0400770 if (!move_handle(&h, &delta, 0))
Michael Sevakis6ee3b6f2017-12-09 23:26:05 -0500771 return h;
Michael Sevakisc537d592011-04-27 03:08:23 +0000772
773 h->data = ringbuf_add(h->data, delta);
Michael Sevakis36615812013-08-26 16:49:53 -0400774 h->start += delta;
Michael Sevakisc537d592011-04-27 03:08:23 +0000775 } else {
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000776 /* metadata handle: we can move all of it */
Michael Sevakis65c6a142017-04-13 18:53:17 -0400777 if (h->pinned || !HLIST_NEXT(h))
Michael Sevakis6ee3b6f2017-12-09 23:26:05 -0500778 return h; /* Pinned, last handle */
Michael Sevakis88d91fa2011-02-10 05:56:21 +0000779
Michael Sevakis36615812013-08-26 16:49:53 -0400780 size_t data_size = h->filesize - h->start;
Thomas Martitzabb3dd42010-02-20 15:13:53 +0000781 uintptr_t handle_distance =
Michael Sevakis65c6a142017-04-13 18:53:17 -0400782 ringbuf_sub_empty(ringbuf_offset(HLIST_NEXT(h)), h->data);
Michael Sevakis36615812013-08-26 16:49:53 -0400783 size_t delta = handle_distance - data_size;
Brandon Low18c9aba2007-10-27 04:49:04 +0000784
785 /* The value of delta might change for alignment reasons */
Michael Sevakis36615812013-08-26 16:49:53 -0400786 if (!move_handle(&h, &delta, data_size))
Michael Sevakis6ee3b6f2017-12-09 23:26:05 -0500787 return h;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000788
789 size_t olddata = h->data;
Thomas Martitzb11c8192010-02-12 13:12:59 +0000790 h->data = ringbuf_add(h->data, delta);
791 h->ridx = ringbuf_add(h->ridx, delta);
792 h->widx = ringbuf_add(h->widx, delta);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000793
Michael Sevakis36615812013-08-26 16:49:53 -0400794 switch (h->type)
795 {
796 case TYPE_ID3:
797 if (h->filesize != sizeof(struct mp3entry))
798 break;
799 /* when moving an mp3entry we need to readjust its pointers */
800 adjust_mp3entry(ringbuf_ptr(h->data), ringbuf_ptr(h->data),
801 ringbuf_ptr(olddata));
802 break;
803
804 case TYPE_BITMAP:
805 /* adjust the bitmap's pointer */
806 ((struct bitmap *)ringbuf_ptr(h->data))->data =
807 ringbuf_ptr(h->data + sizeof(struct bitmap));
808 break;
809
810 default:
811 break;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000812 }
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000813 }
Michael Sevakis6ee3b6f2017-12-09 23:26:05 -0500814
815 return h;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000816}
817
818/* Fill the buffer by buffering as much data as possible for handles that still
Brandon Low11a36612007-11-03 06:21:32 +0000819 have data left to buffer
820 Return whether or not to continue filling after this */
821static bool fill_buffer(void)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000822{
823 logf("fill_buffer()");
Michael Sevakiseefc7c72017-04-08 18:11:25 -0400824 mutex_lock(&llist_mutex);
Michael Sevakis69382552011-02-14 08:36:29 +0000825
Michael Sevakis6ee3b6f2017-12-09 23:26:05 -0500826 struct memory_handle *m = shrink_handle(HLIST_FIRST);
Michael Sevakis69382552011-02-14 08:36:29 +0000827
Michael Sevakiseefc7c72017-04-08 18:11:25 -0400828 mutex_unlock(&llist_mutex);
829
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000830 while (queue_empty(&buffering_queue) && m) {
Michael Sevakis36615812013-08-26 16:49:53 -0400831 if (m->end < m->filesize && !buffer_handle(m->id, 0)) {
832 m = NULL;
833 break;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000834 }
Michael Sevakis65c6a142017-04-13 18:53:17 -0400835 m = HLIST_NEXT(m);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000836 }
837
Brandon Low11a36612007-11-03 06:21:32 +0000838 if (m) {
839 return true;
Michael Sevakis8f143572011-02-14 09:18:58 +0000840 } else {
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000841 /* only spin the disk down if the filling wasn't interrupted by an
842 event arriving in the queue. */
Frank Gevaerts2f8a0082008-11-01 16:14:28 +0000843 storage_sleep();
Brandon Low11a36612007-11-03 06:21:32 +0000844 return false;
845 }
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000846}
847
Nicolas Pennequina384fb62007-11-11 13:15:36 +0000848#ifdef HAVE_ALBUMART
Nicolas Pennequin9d4bed72007-11-11 12:29:37 +0000849/* Given a file descriptor to a bitmap file, write the bitmap data to the
850 buffer, with a struct bitmap and the actual data immediately following.
851 Return value is the total size (struct + data). */
Michael Sevakis8f143572011-02-14 09:18:58 +0000852static int load_image(int fd, const char *path,
Michael Sevakis36615812013-08-26 16:49:53 -0400853 struct bufopen_bitmap_data *data,
854 size_t bufidx)
Nicolas Pennequin9d4bed72007-11-11 12:29:37 +0000855{
856 int rc;
Michael Sevakis36615812013-08-26 16:49:53 -0400857 struct bitmap *bmp = ringbuf_ptr(bufidx);
Thomas Martitzf577a6a2011-02-09 20:13:13 +0000858 struct dim *dim = data->dim;
859 struct mp3_albumart *aa = data->embedded_albumart;
Thomas Martitze9c10182009-10-16 19:14:41 +0000860
861 /* get the desired image size */
862 bmp->width = dim->width, bmp->height = dim->height;
Nicolas Pennequin9d4bed72007-11-11 12:29:37 +0000863 /* FIXME: alignment may be needed for the data buffer. */
Michael Sevakis36615812013-08-26 16:49:53 -0400864 bmp->data = ringbuf_ptr(bufidx + sizeof(struct bitmap));
865
Nicolas Pennequina384fb62007-11-11 13:15:36 +0000866#if (LCD_DEPTH > 1) || defined(HAVE_REMOTE_LCD) && (LCD_REMOTE_DEPTH > 1)
Nicolas Pennequin9d4bed72007-11-11 12:29:37 +0000867 bmp->maskdata = NULL;
Nicolas Pennequina384fb62007-11-11 13:15:36 +0000868#endif
Michael Sevakiseefc7c72017-04-08 18:11:25 -0400869 int free = (int)MIN(buffer_len - bytes_used(), buffer_len - bufidx)
Michael Sevakis36615812013-08-26 16:49:53 -0400870 - sizeof(struct bitmap);
Andrew Mahone781421a2008-12-09 23:07:59 +0000871
Andrew Mahone20f76d62009-05-04 15:46:41 +0000872#ifdef HAVE_JPEG
Michael Sevakis8f143572011-02-14 09:18:58 +0000873 if (aa != NULL) {
Thomas Martitzf577a6a2011-02-09 20:13:13 +0000874 lseek(fd, aa->pos, SEEK_SET);
875 rc = clip_jpeg_fd(fd, aa->size, bmp, free, FORMAT_NATIVE|FORMAT_DITHER|
876 FORMAT_RESIZE|FORMAT_KEEP_ASPECT, NULL);
877 }
878 else if (strcmp(path + strlen(path) - 4, ".bmp"))
Andrew Mahone54e6eb32009-05-01 23:31:43 +0000879 rc = read_jpeg_fd(fd, bmp, free, FORMAT_NATIVE|FORMAT_DITHER|
Michael Sevakis36615812013-08-26 16:49:53 -0400880 FORMAT_RESIZE|FORMAT_KEEP_ASPECT, NULL);
Andrew Mahone54e6eb32009-05-01 23:31:43 +0000881 else
Andrew Mahone20f76d62009-05-04 15:46:41 +0000882#endif
Andrew Mahone54e6eb32009-05-01 23:31:43 +0000883 rc = read_bmp_fd(fd, bmp, free, FORMAT_NATIVE|FORMAT_DITHER|
884 FORMAT_RESIZE|FORMAT_KEEP_ASPECT, NULL);
Michael Sevakis36615812013-08-26 16:49:53 -0400885
Nicolas Pennequin9d4bed72007-11-11 12:29:37 +0000886 return rc + (rc > 0 ? sizeof(struct bitmap) : 0);
Michael Sevakis36615812013-08-26 16:49:53 -0400887 (void)path;
Nicolas Pennequin9d4bed72007-11-11 12:29:37 +0000888}
Michael Sevakis36615812013-08-26 16:49:53 -0400889#endif /* HAVE_ALBUMART */
Nicolas Pennequin9d4bed72007-11-11 12:29:37 +0000890
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000891
892/*
893MAIN BUFFERING API CALLS
894========================
895
896bufopen : Request the opening of a new handle for a file
897bufalloc : Open a new handle for data other than a file.
898bufclose : Close an open handle
899bufseek : Set the read pointer in a handle
900bufadvance : Move the read pointer in a handle
901bufread : Copy data from a handle into a given buffer
902bufgetdata : Give a pointer to the handle's data
903
904These functions are exported, to allow interaction with the buffer.
905They take care of the content of the structs, and rely on the linked list
906management functions for all the actual handle management work.
907*/
908
909
910/* Reserve space in the buffer for a file.
911 filename: name of the file to open
912 offset: offset at which to start buffering the file, useful when the first
Michael Sevakis69382552011-02-14 08:36:29 +0000913 offset bytes of the file aren't needed.
Thomas Martitze9c10182009-10-16 19:14:41 +0000914 type: one of the data types supported (audio, image, cuesheet, others
915 user_data: user data passed possibly passed in subcalls specific to a
916 data_type (only used for image (albumart) buffering so far )
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000917 return value: <0 if the file cannot be opened, or one file already
918 queued to be opened, otherwise the handle for the file in the buffer
919*/
Michael Sevakisdfff9382017-12-17 16:12:10 -0500920int bufopen(const char *file, off_t offset, enum data_type type,
Thomas Martitze9c10182009-10-16 19:14:41 +0000921 void *user_data)
Nicolas Pennequin4fd27742008-03-29 14:09:14 +0000922{
Michael Sevakis69382552011-02-14 08:36:29 +0000923 int handle_id = ERR_BUFFER_FULL;
Michael Sevakis36615812013-08-26 16:49:53 -0400924 size_t data;
925 struct memory_handle *h;
Michael Sevakis69382552011-02-14 08:36:29 +0000926
927 /* No buffer refs until after the mutex_lock call! */
928
Michael Sevakis8f143572011-02-14 09:18:58 +0000929 if (type == TYPE_ID3) {
Nicolas Pennequin4e2de442008-04-14 16:17:47 +0000930 /* ID3 case: allocate space, init the handle and return. */
Michael Sevakis69382552011-02-14 08:36:29 +0000931 mutex_lock(&llist_mutex);
Nicolas Pennequin4e2de442008-04-14 16:17:47 +0000932
Michael Sevakiscd3ea082017-12-09 09:41:34 -0500933 h = add_handle(H_ALLOCALL, sizeof(struct mp3entry), file, &data);
Michael Sevakis8f143572011-02-14 09:18:58 +0000934
935 if (h) {
Michael Sevakis69382552011-02-14 08:36:29 +0000936 handle_id = h->id;
Michael Sevakis36615812013-08-26 16:49:53 -0400937
938 h->type = type;
Michael Sevakis36615812013-08-26 16:49:53 -0400939 h->fd = -1;
940 h->data = data;
941 h->ridx = data;
942 h->widx = data;
943 h->filesize = sizeof(struct mp3entry);
944 h->start = 0;
945 h->pos = 0;
946 h->end = 0;
Nicolas Pennequin4e2de442008-04-14 16:17:47 +0000947
Michael Sevakis65c6a142017-04-13 18:53:17 -0400948 link_handle(h);
Nicolas Pennequin4e2de442008-04-14 16:17:47 +0000949
Michael Sevakis69382552011-02-14 08:36:29 +0000950 /* Inform the buffering thread that we added a handle */
951 LOGFQUEUE("buffering > Q_HANDLE_ADDED %d", handle_id);
952 queue_post(&buffering_queue, Q_HANDLE_ADDED, handle_id);
953 }
Nicolas Pennequin3625be42008-12-02 21:07:12 +0000954
Michael Sevakis69382552011-02-14 08:36:29 +0000955 mutex_unlock(&llist_mutex);
956 return handle_id;
Nicolas Pennequin4e2de442008-04-14 16:17:47 +0000957 }
Michael Sevakisc537d592011-04-27 03:08:23 +0000958 else if (type == TYPE_UNKNOWN)
959 return ERR_UNSUPPORTED_TYPE;
Thomas Martitz86cab2e2011-02-09 20:27:23 +0000960#ifdef APPLICATION
Michael Sevakis36615812013-08-26 16:49:53 -0400961 /* Loading code from memory is not supported in application builds */
Thomas Martitz86cab2e2011-02-09 20:27:23 +0000962 else if (type == TYPE_CODEC)
963 return ERR_UNSUPPORTED_TYPE;
964#endif
Nicolas Pennequin4e2de442008-04-14 16:17:47 +0000965 /* Other cases: there is a little more work. */
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000966 int fd = open(file, O_RDONLY);
967 if (fd < 0)
Nicolas Pennequind400e232007-10-29 14:15:59 +0000968 return ERR_FILE_ERROR;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000969
Thomas Martitzf577a6a2011-02-09 20:13:13 +0000970 size_t size = 0;
971#ifdef HAVE_ALBUMART
Michael Sevakis8f143572011-02-14 09:18:58 +0000972 if (type == TYPE_BITMAP) {
Michael Sevakis36615812013-08-26 16:49:53 -0400973 /* If albumart is embedded, the complete file is not buffered,
Thomas Martitzf577a6a2011-02-09 20:13:13 +0000974 * but only the jpeg part; filesize() would be wrong */
Michael Sevakis36615812013-08-26 16:49:53 -0400975 struct bufopen_bitmap_data *aa = user_data;
Thomas Martitzf577a6a2011-02-09 20:13:13 +0000976 if (aa->embedded_albumart)
977 size = aa->embedded_albumart->size;
978 }
979#endif
Michael Sevakis36615812013-08-26 16:49:53 -0400980
Thomas Martitzf577a6a2011-02-09 20:13:13 +0000981 if (size == 0)
982 size = filesize(fd);
Michael Sevakis36615812013-08-26 16:49:53 -0400983
984 unsigned int hflags = 0;
985 if (type == TYPE_PACKET_AUDIO || type == TYPE_CODEC)
986 hflags = H_CANWRAP;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +0000987
Nicolas Pennequin4e2de442008-04-14 16:17:47 +0000988 size_t adjusted_offset = offset;
Steve Bavinc9df8fd2008-03-28 11:24:24 +0000989 if (adjusted_offset > size)
990 adjusted_offset = 0;
Nicolas Pennequin659fe5a2008-01-08 23:48:51 +0000991
Torne Wuffc4e051b2010-02-01 17:16:39 +0000992 /* Reserve extra space because alignment can move data forward */
Michael Sevakis36615812013-08-26 16:49:53 -0400993 size_t padded_size = STORAGE_PAD(size - adjusted_offset);
Michael Sevakis69382552011-02-14 08:36:29 +0000994
995 mutex_lock(&llist_mutex);
996
Michael Sevakiscd3ea082017-12-09 09:41:34 -0500997 h = add_handle(hflags, padded_size, file, &data);
Michael Sevakis8f143572011-02-14 09:18:58 +0000998 if (!h) {
Thomas Martitzdf79ac22010-02-18 15:38:30 +0000999 DEBUGF("%s(): failed to add handle\n", __func__);
Michael Sevakis69382552011-02-14 08:36:29 +00001000 mutex_unlock(&llist_mutex);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001001 close(fd);
Nicolas Pennequind400e232007-10-29 14:15:59 +00001002 return ERR_BUFFER_FULL;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001003 }
1004
Michael Sevakis69382552011-02-14 08:36:29 +00001005 handle_id = h->id;
Michael Sevakis36615812013-08-26 16:49:53 -04001006
1007 h->type = type;
Michael Sevakis36615812013-08-26 16:49:53 -04001008 h->fd = -1;
Torne Wuffc4e051b2010-02-01 17:16:39 +00001009
Michael Sevakis69382552011-02-14 08:36:29 +00001010#ifdef STORAGE_WANTS_ALIGN
Torne Wuffc4e051b2010-02-01 17:16:39 +00001011 /* Don't bother to storage align bitmaps because they are not
1012 * loaded directly into the buffer.
1013 */
Michael Sevakis8f143572011-02-14 09:18:58 +00001014 if (type != TYPE_BITMAP) {
Torne Wuffc4e051b2010-02-01 17:16:39 +00001015 /* Align to desired storage alignment */
Michael Sevakis36615812013-08-26 16:49:53 -04001016 size_t alignment_pad = STORAGE_OVERLAP((uintptr_t)adjusted_offset -
1017 (uintptr_t)ringbuf_ptr(data));
1018 data = ringbuf_add(data, alignment_pad);
Torne Wuffc4e051b2010-02-01 17:16:39 +00001019 }
Michael Sevakis69382552011-02-14 08:36:29 +00001020#endif /* STORAGE_WANTS_ALIGN */
Torne Wuffc4e051b2010-02-01 17:16:39 +00001021
Michael Sevakis36615812013-08-26 16:49:53 -04001022 h->data = data;
1023 h->ridx = data;
1024 h->start = adjusted_offset;
1025 h->pos = adjusted_offset;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001026
Nicolas Pennequin5e0e2392007-11-11 13:28:24 +00001027#ifdef HAVE_ALBUMART
Michael Sevakis8f143572011-02-14 09:18:58 +00001028 if (type == TYPE_BITMAP) {
Nicolas Pennequin9d4bed72007-11-11 12:29:37 +00001029 /* Bitmap file: we load the data instead of the file */
Michael Sevakis36615812013-08-26 16:49:53 -04001030 int rc = load_image(fd, file, user_data, data);
Michael Sevakis8f143572011-02-14 09:18:58 +00001031 if (rc <= 0) {
Michael Sevakis69382552011-02-14 08:36:29 +00001032 handle_id = ERR_FILE_ERROR;
Michael Sevakis8f143572011-02-14 09:18:58 +00001033 } else {
Michael Sevakis36615812013-08-26 16:49:53 -04001034 data = ringbuf_add(data, rc);
1035 size = rc;
1036 adjusted_offset = rc;
Michael Sevakis69382552011-02-14 08:36:29 +00001037 }
Nicolas Pennequin9d4bed72007-11-11 12:29:37 +00001038 }
Nicolas Pennequin87e5b112007-11-12 15:16:41 +00001039 else
Nicolas Pennequin9d4bed72007-11-11 12:29:37 +00001040#endif
Michael Sevakis36615812013-08-26 16:49:53 -04001041 if (type == TYPE_CUESHEET) {
1042 h->fd = fd;
1043 }
Michael Sevakis69382552011-02-14 08:36:29 +00001044
Michael Sevakis36615812013-08-26 16:49:53 -04001045 if (handle_id >= 0) {
1046 h->widx = data;
Nicolas Pennequin87e5b112007-11-12 15:16:41 +00001047 h->filesize = size;
Michael Sevakis36615812013-08-26 16:49:53 -04001048 h->end = adjusted_offset;
Michael Sevakis65c6a142017-04-13 18:53:17 -04001049 link_handle(h);
Nicolas Pennequin87e5b112007-11-12 15:16:41 +00001050 }
Nicolas Pennequin9d4bed72007-11-11 12:29:37 +00001051
Michael Sevakis69382552011-02-14 08:36:29 +00001052 mutex_unlock(&llist_mutex);
1053
Michael Sevakis8f143572011-02-14 09:18:58 +00001054 if (type == TYPE_CUESHEET) {
Brandon Low18c9aba2007-10-27 04:49:04 +00001055 /* Immediately start buffering those */
Michael Sevakis69382552011-02-14 08:36:29 +00001056 LOGFQUEUE("buffering >| Q_BUFFER_HANDLE %d", handle_id);
1057 queue_send(&buffering_queue, Q_BUFFER_HANDLE, handle_id);
Michael Sevakis8f143572011-02-14 09:18:58 +00001058 } else {
Brandon Low18c9aba2007-10-27 04:49:04 +00001059 /* Other types will get buffered in the course of normal operations */
Brandon Low18c9aba2007-10-27 04:49:04 +00001060 close(fd);
Nicolas Pennequin483c4022008-02-12 23:15:59 +00001061
Michael Sevakis8f143572011-02-14 09:18:58 +00001062 if (handle_id >= 0) {
Michael Sevakis69382552011-02-14 08:36:29 +00001063 /* Inform the buffering thread that we added a handle */
1064 LOGFQUEUE("buffering > Q_HANDLE_ADDED %d", handle_id);
1065 queue_post(&buffering_queue, Q_HANDLE_ADDED, handle_id);
1066 }
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001067 }
1068
Michael Sevakis69382552011-02-14 08:36:29 +00001069 logf("bufopen: new hdl %d", handle_id);
1070 return handle_id;
Michael Sevakis36615812013-08-26 16:49:53 -04001071
1072 /* Currently only used for aa loading */
1073 (void)user_data;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001074}
1075
1076/* Open a new handle from data that needs to be copied from memory.
1077 src is the source buffer from which to copy data. It can be NULL to simply
1078 reserve buffer space.
1079 size is the requested size. The call will only be successful if the
1080 requested amount of data can entirely fit in the buffer without wrapping.
1081 Return value is the handle id for success or <0 for failure.
1082*/
Steve Bavin135cc752008-03-28 12:51:33 +00001083int bufalloc(const void *src, size_t size, enum data_type type)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001084{
Michael Sevakisc537d592011-04-27 03:08:23 +00001085 if (type == TYPE_UNKNOWN)
1086 return ERR_UNSUPPORTED_TYPE;
1087
Michael Sevakis36615812013-08-26 16:49:53 -04001088 int handle_id = ERR_BUFFER_FULL;
Michael Sevakis69382552011-02-14 08:36:29 +00001089
1090 mutex_lock(&llist_mutex);
1091
Michael Sevakis36615812013-08-26 16:49:53 -04001092 size_t data;
Michael Sevakiscd3ea082017-12-09 09:41:34 -05001093 struct memory_handle *h = add_handle(H_ALLOCALL, size, NULL, &data);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001094
Michael Sevakis8f143572011-02-14 09:18:58 +00001095 if (h) {
Michael Sevakis69382552011-02-14 08:36:29 +00001096 handle_id = h->id;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001097
Michael Sevakis69382552011-02-14 08:36:29 +00001098 if (src) {
1099 if (type == TYPE_ID3 && size == sizeof(struct mp3entry)) {
1100 /* specially take care of struct mp3entry */
Michael Sevakis36615812013-08-26 16:49:53 -04001101 copy_mp3entry(ringbuf_ptr(data), src);
Michael Sevakis69382552011-02-14 08:36:29 +00001102 } else {
Michael Sevakis36615812013-08-26 16:49:53 -04001103 memcpy(ringbuf_ptr(data), src, size);
Michael Sevakis69382552011-02-14 08:36:29 +00001104 }
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001105 }
Michael Sevakis89b05af2013-06-29 22:18:17 -04001106
Michael Sevakis36615812013-08-26 16:49:53 -04001107 h->type = type;
Michael Sevakis36615812013-08-26 16:49:53 -04001108 h->fd = -1;
1109 h->data = data;
1110 h->ridx = data;
1111 h->widx = ringbuf_add(data, size);
1112 h->filesize = size;
1113 h->start = 0;
1114 h->pos = 0;
1115 h->end = size;
1116
Michael Sevakis65c6a142017-04-13 18:53:17 -04001117 link_handle(h);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001118 }
1119
Michael Sevakis69382552011-02-14 08:36:29 +00001120 mutex_unlock(&llist_mutex);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001121
Michael Sevakis69382552011-02-14 08:36:29 +00001122 logf("bufalloc: new hdl %d", handle_id);
1123 return handle_id;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001124}
1125
1126/* Close the handle. Return true for success and false for failure */
Steve Bavin135cc752008-03-28 12:51:33 +00001127bool bufclose(int handle_id)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001128{
1129 logf("bufclose(%d)", handle_id);
Michael Sevakis786fbbf2017-12-17 18:49:24 -05001130
Michael Sevakisc1a01be2017-12-08 13:01:25 -05001131 if (handle_id <= 0) {
1132 return true;
1133 }
1134
Nicolas Pennequind08131a2007-10-27 01:25:47 +00001135 LOGFQUEUE("buffering >| Q_CLOSE_HANDLE %d", handle_id);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001136 return queue_send(&buffering_queue, Q_CLOSE_HANDLE, handle_id);
1137}
1138
Michael Sevakis69382552011-02-14 08:36:29 +00001139/* Backend to bufseek and bufadvance. Call only in response to
1140 Q_REBUFFER_HANDLE! */
Michael Sevakis36615812013-08-26 16:49:53 -04001141static void rebuffer_handle(int handle_id, off_t newpos)
Michael Sevakis69382552011-02-14 08:36:29 +00001142{
1143 struct memory_handle *h = find_handle(handle_id);
Michael Sevakis8f143572011-02-14 09:18:58 +00001144 if (!h) {
Michael Sevakis69382552011-02-14 08:36:29 +00001145 queue_reply(&buffering_queue, ERR_HANDLE_NOT_FOUND);
1146 return;
1147 }
1148
Michael Sevakis36615812013-08-26 16:49:53 -04001149 /* Check that we still need to do this since the request could have
1150 possibly been met by this time */
1151 if (newpos >= h->start && newpos <= h->end) {
1152 h->ridx = ringbuf_add(h->data, newpos - h->start);
1153 h->pos = newpos;
1154 queue_reply(&buffering_queue, 0);
1155 return;
1156 }
1157
Michael Sevakis69382552011-02-14 08:36:29 +00001158 /* When seeking foward off of the buffer, if it is a short seek attempt to
1159 avoid rebuffering the whole track, just read enough to satisfy */
Michael Sevakis36615812013-08-26 16:49:53 -04001160 off_t amount = newpos - h->pos;
Michael Sevakis8f143572011-02-14 09:18:58 +00001161
Michael Sevakis36615812013-08-26 16:49:53 -04001162 if (amount > 0 && amount <= BUFFERING_DEFAULT_FILECHUNK) {
1163 h->ridx = ringbuf_add(h->data, newpos - h->start);
1164 h->pos = newpos;
Michael Sevakis8f143572011-02-14 09:18:58 +00001165
Michael Sevakis36615812013-08-26 16:49:53 -04001166 if (buffer_handle(handle_id, amount + 1) && h->end >= h->pos) {
1167 /* It really did succeed */
1168 queue_reply(&buffering_queue, 0);
1169 buffer_handle(handle_id, 0); /* Ok, try the rest */
1170 return;
Michael Sevakis69382552011-02-14 08:36:29 +00001171 }
Michael Sevakis05e180a2011-03-02 06:24:50 +00001172 /* Data collision or other file error - must reset */
1173
1174 if (newpos > h->filesize)
1175 newpos = h->filesize; /* file truncation happened above */
Michael Sevakis69382552011-02-14 08:36:29 +00001176 }
1177
Michael Sevakiseefc7c72017-04-08 18:11:25 -04001178 mutex_lock(&llist_mutex);
1179
Michael Sevakis65c6a142017-04-13 18:53:17 -04001180 size_t next = ringbuf_offset(HLIST_NEXT(h) ?: HLIST_FIRST);
Michael Sevakis69382552011-02-14 08:36:29 +00001181
1182#ifdef STORAGE_WANTS_ALIGN
1183 /* Strip alignment padding then redo */
Michael Sevakisd35a18f2017-12-19 13:48:14 -05001184 size_t new_index = ringbuf_add(ringbuf_offset(h), h->size);
Michael Sevakis69382552011-02-14 08:36:29 +00001185
Michael Sevakis8f143572011-02-14 09:18:58 +00001186 /* Align to desired storage alignment if space permits - handle could
1187 have been shrunken too close to the following one after a previous
1188 rebuffer. */
Michael Sevakis36615812013-08-26 16:49:53 -04001189 size_t alignment_pad = STORAGE_OVERLAP((uintptr_t)newpos -
1190 (uintptr_t)ringbuf_ptr(new_index));
Michael Sevakis69382552011-02-14 08:36:29 +00001191
Michael Sevakiseefc7c72017-04-08 18:11:25 -04001192 if (ringbuf_add_cross_full(new_index, alignment_pad, next) > 0)
Michael Sevakis69382552011-02-14 08:36:29 +00001193 alignment_pad = 0; /* Forego storage alignment this time */
1194
1195 new_index = ringbuf_add(new_index, alignment_pad);
1196#else
1197 /* Just clear the data buffer */
1198 size_t new_index = h->data;
1199#endif /* STORAGE_WANTS_ALIGN */
1200
Michael Sevakis36615812013-08-26 16:49:53 -04001201 /* Reset the handle to its new position */
Michael Sevakis69382552011-02-14 08:36:29 +00001202 h->ridx = h->widx = h->data = new_index;
Michael Sevakis36615812013-08-26 16:49:53 -04001203 h->start = h->pos = h->end = newpos;
Michael Sevakis69382552011-02-14 08:36:29 +00001204
1205 if (h->fd >= 0)
Michael Sevakis36615812013-08-26 16:49:53 -04001206 lseek(h->fd, newpos, SEEK_SET);
Michael Sevakis69382552011-02-14 08:36:29 +00001207
Michael Sevakis36615812013-08-26 16:49:53 -04001208 off_t filerem = h->filesize - newpos;
Michael Sevakis65c6a142017-04-13 18:53:17 -04001209 bool send = HLIST_NEXT(h) &&
Michael Sevakiseefc7c72017-04-08 18:11:25 -04001210 ringbuf_add_cross_full(new_index, filerem, next) > 0;
1211
1212 mutex_unlock(&llist_mutex);
1213
1214 if (send) {
Michael Sevakis69382552011-02-14 08:36:29 +00001215 /* There isn't enough space to rebuffer all of the track from its new
1216 offset, so we ask the user to free some */
1217 DEBUGF("%s(): space is needed\n", __func__);
Michael Sevakis36615812013-08-26 16:49:53 -04001218 send_event(BUFFER_EVENT_REBUFFER, &(int){ handle_id });
Michael Sevakis69382552011-02-14 08:36:29 +00001219 }
1220
1221 /* Now we do the rebuffer */
1222 queue_reply(&buffering_queue, 0);
1223 buffer_handle(handle_id, 0);
1224}
1225
1226/* Backend to bufseek and bufadvance */
Michael Sevakis36615812013-08-26 16:49:53 -04001227static int seek_handle(struct memory_handle *h, off_t newpos)
Michael Sevakis69382552011-02-14 08:36:29 +00001228{
Michael Sevakis36615812013-08-26 16:49:53 -04001229 if ((newpos < h->start || newpos >= h->end) &&
1230 (newpos < h->filesize || h->end < h->filesize)) {
Michael Sevakis69382552011-02-14 08:36:29 +00001231 /* access before or after buffered data and not to end of file or file
1232 is not buffered to the end-- a rebuffer is needed. */
Michael Sevakis69382552011-02-14 08:36:29 +00001233 return queue_send(&buffering_queue, Q_REBUFFER_HANDLE,
Michael Sevakis36615812013-08-26 16:49:53 -04001234 (intptr_t)&(struct buf_message_data){ h->id, newpos });
Michael Sevakis69382552011-02-14 08:36:29 +00001235 }
1236 else {
Michael Sevakis36615812013-08-26 16:49:53 -04001237 h->ridx = ringbuf_add(h->data, newpos - h->start);
1238 h->pos = newpos;
1239 return 0;
Michael Sevakis69382552011-02-14 08:36:29 +00001240 }
Michael Sevakis69382552011-02-14 08:36:29 +00001241}
1242
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001243/* Set reading index in handle (relatively to the start of the file).
1244 Access before the available data will trigger a rebuffer.
Michael Sevakisc537d592011-04-27 03:08:23 +00001245 Return 0 for success and for failure:
1246 ERR_HANDLE_NOT_FOUND if the handle wasn't found
1247 ERR_INVALID_VALUE if the new requested position was beyond the end of
1248 the file
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001249*/
Steve Bavin135cc752008-03-28 12:51:33 +00001250int bufseek(int handle_id, size_t newpos)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001251{
1252 struct memory_handle *h = find_handle(handle_id);
1253 if (!h)
Nicolas Pennequind400e232007-10-29 14:15:59 +00001254 return ERR_HANDLE_NOT_FOUND;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001255
Michael Sevakis36615812013-08-26 16:49:53 -04001256 if (newpos > (size_t)h->filesize)
1257 return ERR_INVALID_VALUE;
1258
Michael Sevakis69382552011-02-14 08:36:29 +00001259 return seek_handle(h, newpos);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001260}
1261
1262/* Advance the reading index in a handle (relatively to its current position).
Michael Sevakisc537d592011-04-27 03:08:23 +00001263 Return 0 for success and for failure:
1264 ERR_HANDLE_NOT_FOUND if the handle wasn't found
Michael Sevakis36615812013-08-26 16:49:53 -04001265 ERR_INVALID_VALUE if the new requested position was before the beginning
1266 or beyond the end of the file
Michael Sevakisc537d592011-04-27 03:08:23 +00001267 */
Steve Bavin135cc752008-03-28 12:51:33 +00001268int bufadvance(int handle_id, off_t offset)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001269{
Michael Sevakis69382552011-02-14 08:36:29 +00001270 struct memory_handle *h = find_handle(handle_id);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001271 if (!h)
Nicolas Pennequind400e232007-10-29 14:15:59 +00001272 return ERR_HANDLE_NOT_FOUND;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001273
Michael Sevakis36615812013-08-26 16:49:53 -04001274 off_t pos = h->pos;
1275
1276 if ((offset < 0 && offset < -pos) ||
1277 (offset >= 0 && offset > h->filesize - pos))
1278 return ERR_INVALID_VALUE;
1279
1280 return seek_handle(h, pos + offset);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001281}
1282
Michael Sevakisc537d592011-04-27 03:08:23 +00001283/* Get the read position from the start of the file
1284 Returns the offset from byte 0 of the file and for failure:
1285 ERR_HANDLE_NOT_FOUND if the handle wasn't found
1286 */
1287off_t bufftell(int handle_id)
1288{
1289 const struct memory_handle *h = find_handle(handle_id);
1290 if (!h)
1291 return ERR_HANDLE_NOT_FOUND;
Michael Sevakis36615812013-08-26 16:49:53 -04001292
1293 return h->pos;
Michael Sevakisc537d592011-04-27 03:08:23 +00001294}
1295
Brandon Low9821cce2007-11-06 16:49:30 +00001296/* Used by bufread and bufgetdata to prepare the buffer and retrieve the
Michael Sevakis36615812013-08-26 16:49:53 -04001297 * actual amount of data available for reading. It does range checks on
1298 * size and returns a valid (and explicit) amount of data for reading */
Steve Bavin135cc752008-03-28 12:51:33 +00001299static struct memory_handle *prep_bufdata(int handle_id, size_t *size,
1300 bool guardbuf_limit)
Brandon Low9821cce2007-11-06 16:49:30 +00001301{
Brandon Low6e8ee402007-11-08 15:34:23 +00001302 struct memory_handle *h = find_handle(handle_id);
1303 if (!h)
1304 return NULL;
1305
Michael Sevakis36615812013-08-26 16:49:53 -04001306 if (h->pos >= h->filesize) {
Brandon Low9821cce2007-11-06 16:49:30 +00001307 /* File is finished reading */
Nicolas Pennequinb50473f2007-11-08 18:27:19 +00001308 *size = 0;
1309 return h;
1310 }
Brandon Low9821cce2007-11-06 16:49:30 +00001311
Michael Sevakis36615812013-08-26 16:49:53 -04001312 off_t realsize = *size;
1313 off_t filerem = h->filesize - h->pos;
Brandon Low9821cce2007-11-06 16:49:30 +00001314
Michael Sevakis36615812013-08-26 16:49:53 -04001315 if (realsize <= 0 || realsize > filerem)
1316 realsize = filerem; /* clip to eof */
Michael Sevakis69382552011-02-14 08:36:29 +00001317
Michael Sevakis36615812013-08-26 16:49:53 -04001318 if (guardbuf_limit && realsize > GUARD_BUFSIZE) {
Brandon Low1db42432007-11-08 15:52:10 +00001319 logf("data request > guardbuf");
1320 /* If more than the size of the guardbuf is requested and this is a
1321 * bufgetdata, limit to guard_bufsize over the end of the buffer */
Michael Sevakis36615812013-08-26 16:49:53 -04001322 realsize = MIN((size_t)realsize, buffer_len - h->ridx + GUARD_BUFSIZE);
Nicolas Pennequinca4771b2007-11-08 16:12:28 +00001323 /* this ensures *size <= buffer_len - h->ridx + GUARD_BUFSIZE */
Brandon Low9821cce2007-11-06 16:49:30 +00001324 }
1325
Michael Sevakis36615812013-08-26 16:49:53 -04001326 off_t end = h->end;
1327 off_t wait_end = h->pos + realsize;
1328
1329 if (end < wait_end && end < h->filesize) {
Brandon Low9821cce2007-11-06 16:49:30 +00001330 /* Wait for the data to be ready */
Michael Sevakis36615812013-08-26 16:49:53 -04001331 unsigned int request = 1;
1332
Brandon Low9821cce2007-11-06 16:49:30 +00001333 do
1334 {
Michael Sevakis36615812013-08-26 16:49:53 -04001335 if (--request == 0) {
1336 request = 100;
1337 /* Data (still) isn't ready; ping buffering thread */
1338 LOGFQUEUE("buffering >| Q_START_FILL %d",handle_id);
1339 queue_send(&buffering_queue, Q_START_FILL, handle_id);
1340 }
1341
Michael Sevakis69382552011-02-14 08:36:29 +00001342 sleep(0);
Brandon Low551db402007-11-08 16:06:24 +00001343 /* it is not safe for a non-buffering thread to sleep while
1344 * holding a handle */
Brandon Low6e8ee402007-11-08 15:34:23 +00001345 h = find_handle(handle_id);
Michael Sevakis36615812013-08-26 16:49:53 -04001346 if (!h)
Brandon Low551db402007-11-08 16:06:24 +00001347 return NULL;
Michael Sevakis36615812013-08-26 16:49:53 -04001348
1349 if (h->signaled != 0)
1350 return NULL; /* Wait must be abandoned */
1351
1352 end = h->end;
Brandon Low9821cce2007-11-06 16:49:30 +00001353 }
Michael Sevakis36615812013-08-26 16:49:53 -04001354 while (end < wait_end && end < h->filesize);
1355
1356 filerem = h->filesize - h->pos;
1357 if (realsize > filerem)
1358 realsize = filerem;
Brandon Low9821cce2007-11-06 16:49:30 +00001359 }
1360
Michael Sevakis36615812013-08-26 16:49:53 -04001361 *size = realsize;
Brandon Low6e8ee402007-11-08 15:34:23 +00001362 return h;
Brandon Low9821cce2007-11-06 16:49:30 +00001363}
1364
Michael Sevakis69382552011-02-14 08:36:29 +00001365
1366/* Note: It is safe for the thread responsible for handling the rebuffer
1367 * cleanup request to call bufread or bufgetdata only when the data will
1368 * be available-- not if it could be blocked waiting for it in prep_bufdata.
1369 * It should be apparent that if said thread is being forced to wait for
1370 * buffering but has not yet responded to the cleanup request, the space
1371 * can never be cleared to allow further reading of the file because it is
1372 * not listening to callbacks any longer. */
1373
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001374/* Copy data from the given handle to the dest buffer.
Nicolas Pennequinb838a622007-11-02 19:13:03 +00001375 Return the number of bytes copied or < 0 for failure (handle not found).
1376 The caller is blocked until the requested amount of data is available.
1377*/
Steve Bavin135cc752008-03-28 12:51:33 +00001378ssize_t bufread(int handle_id, size_t size, void *dest)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001379{
Michael Sevakis36615812013-08-26 16:49:53 -04001380 const struct memory_handle *h =
1381 prep_bufdata(handle_id, &size, false);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001382 if (!h)
Nicolas Pennequind400e232007-10-29 14:15:59 +00001383 return ERR_HANDLE_NOT_FOUND;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001384
Michael Sevakis36615812013-08-26 16:49:53 -04001385 if (h->ridx + size > buffer_len) {
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001386 /* the data wraps around the end of the buffer */
1387 size_t read = buffer_len - h->ridx;
Michael Sevakis36615812013-08-26 16:49:53 -04001388 memcpy(dest, ringbuf_ptr(h->ridx), read);
1389 memcpy(dest + read, ringbuf_ptr(0), size - read);
Michael Sevakis8f143572011-02-14 09:18:58 +00001390 } else {
Michael Sevakis36615812013-08-26 16:49:53 -04001391 memcpy(dest, ringbuf_ptr(h->ridx), size);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001392 }
1393
Michael Sevakis36615812013-08-26 16:49:53 -04001394 return size;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001395}
1396
1397/* Update the "data" pointer to make the handle's data available to the caller.
Nicolas Pennequinb838a622007-11-02 19:13:03 +00001398 Return the length of the available linear data or < 0 for failure (handle
1399 not found).
1400 The caller is blocked until the requested amount of data is available.
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001401 size is the amount of linear data requested. it can be 0 to get as
1402 much as possible.
Nicolas Pennequinb838a622007-11-02 19:13:03 +00001403 The guard buffer may be used to provide the requested size. This means it's
1404 unsafe to request more than the size of the guard buffer.
1405*/
Steve Bavin135cc752008-03-28 12:51:33 +00001406ssize_t bufgetdata(int handle_id, size_t size, void **data)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001407{
Michael Sevakis36615812013-08-26 16:49:53 -04001408 struct memory_handle *h =
1409 prep_bufdata(handle_id, &size, true);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001410 if (!h)
Nicolas Pennequind400e232007-10-29 14:15:59 +00001411 return ERR_HANDLE_NOT_FOUND;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001412
Michael Sevakis36615812013-08-26 16:49:53 -04001413 if (h->ridx + size > buffer_len) {
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001414 /* the data wraps around the end of the buffer :
1415 use the guard buffer to provide the requested amount of data. */
Michael Sevakis36615812013-08-26 16:49:53 -04001416 size_t copy_n = h->ridx + size - buffer_len;
Michael Sevakis8f143572011-02-14 09:18:58 +00001417 /* prep_bufdata ensures
1418 adjusted_size <= buffer_len - h->ridx + GUARD_BUFSIZE,
Nicolas Pennequinca4771b2007-11-08 16:12:28 +00001419 so copy_n <= GUARD_BUFSIZE */
Michael Sevakis36615812013-08-26 16:49:53 -04001420 memcpy(guard_buffer, ringbuf_ptr(0), copy_n);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001421 }
1422
Nicolas Pennequinecec9402007-12-16 01:38:56 +00001423 if (data)
Michael Sevakis36615812013-08-26 16:49:53 -04001424 *data = ringbuf_ptr(h->ridx);
Nicolas Pennequinecec9402007-12-16 01:38:56 +00001425
Michael Sevakis36615812013-08-26 16:49:53 -04001426 return size;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001427}
1428
Steve Bavin135cc752008-03-28 12:51:33 +00001429ssize_t bufgettail(int handle_id, size_t size, void **data)
Brandon Low3386dd72007-11-28 04:58:16 +00001430{
Michael Sevakis36615812013-08-26 16:49:53 -04001431 if (thread_self() != buffering_thread_id)
1432 return ERR_WRONG_THREAD; /* only from buffering thread */
Brandon Low3386dd72007-11-28 04:58:16 +00001433
1434 /* We don't support tail requests of > guardbuf_size, for simplicity */
1435 if (size > GUARD_BUFSIZE)
1436 return ERR_INVALID_VALUE;
1437
Michael Sevakis36615812013-08-26 16:49:53 -04001438 const struct memory_handle *h = find_handle(handle_id);
1439 if (!h)
1440 return ERR_HANDLE_NOT_FOUND;
Brandon Low3386dd72007-11-28 04:58:16 +00001441
Michael Sevakis36615812013-08-26 16:49:53 -04001442 if (h->end >= h->filesize) {
Michael Sevakiseefc7c72017-04-08 18:11:25 -04001443 size_t tidx = ringbuf_sub_empty(h->widx, size);
Michael Sevakis36615812013-08-26 16:49:53 -04001444
1445 if (tidx + size > buffer_len) {
1446 size_t copy_n = tidx + size - buffer_len;
1447 memcpy(guard_buffer, ringbuf_ptr(0), copy_n);
1448 }
1449
1450 *data = ringbuf_ptr(tidx);
1451 }
1452 else {
1453 size = ERR_HANDLE_NOT_DONE;
Brandon Low3386dd72007-11-28 04:58:16 +00001454 }
1455
Brandon Low3386dd72007-11-28 04:58:16 +00001456 return size;
1457}
1458
Steve Bavin135cc752008-03-28 12:51:33 +00001459ssize_t bufcuttail(int handle_id, size_t size)
Brandon Low3386dd72007-11-28 04:58:16 +00001460{
Michael Sevakis36615812013-08-26 16:49:53 -04001461 if (thread_self() != buffering_thread_id)
1462 return ERR_WRONG_THREAD; /* only from buffering thread */
Nicolas Pennequinecec9402007-12-16 01:38:56 +00001463
Michael Sevakis36615812013-08-26 16:49:53 -04001464 struct memory_handle *h = find_handle(handle_id);
Brandon Low3386dd72007-11-28 04:58:16 +00001465 if (!h)
1466 return ERR_HANDLE_NOT_FOUND;
1467
Michael Sevakis36615812013-08-26 16:49:53 -04001468 if (h->end >= h->filesize) {
1469 /* Cannot trim to before read position */
1470 size_t available = h->end - MAX(h->start, h->pos);
1471 if (available < size)
1472 size = available;
Brandon Low3386dd72007-11-28 04:58:16 +00001473
Michael Sevakiseefc7c72017-04-08 18:11:25 -04001474 h->widx = ringbuf_sub_empty(h->widx, size);
Michael Sevakis36615812013-08-26 16:49:53 -04001475 h->filesize -= size;
1476 h->end -= size;
1477 } else {
1478 size = ERR_HANDLE_NOT_DONE;
1479 }
Brandon Low3386dd72007-11-28 04:58:16 +00001480
Michael Sevakis36615812013-08-26 16:49:53 -04001481 return size;
Brandon Low3386dd72007-11-28 04:58:16 +00001482}
1483
1484
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001485/*
1486SECONDARY EXPORTED FUNCTIONS
1487============================
1488
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001489buf_handle_offset
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001490buf_set_base_handle
Michael Sevakisc537d592011-04-27 03:08:23 +00001491buf_handle_data_type
1492buf_is_handle
1493buf_pin_handle
1494buf_signal_handle
1495buf_length
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001496buf_used
Michael Sevakisc537d592011-04-27 03:08:23 +00001497buf_set_watermark
1498buf_get_watermark
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001499
1500These functions are exported, to allow interaction with the buffer.
1501They take care of the content of the structs, and rely on the linked list
1502management functions for all the actual handle management work.
1503*/
Michael Sevakisdfff9382017-12-17 16:12:10 -05001504bool buf_is_handle(int handle_id)
1505{
1506 return find_handle(handle_id) != NULL;
1507}
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001508
Michael Sevakisdfff9382017-12-17 16:12:10 -05001509int buf_handle_data_type(int handle_id)
1510{
1511 const struct memory_handle *h = find_handle(handle_id);
1512 if (!h)
1513 return ERR_HANDLE_NOT_FOUND;
1514 return h->type;
1515}
1516
1517off_t buf_filesize(int handle_id)
1518{
1519 const struct memory_handle *h = find_handle(handle_id);
1520 if (!h)
1521 return ERR_HANDLE_NOT_FOUND;
1522 return h->filesize;
1523}
1524
1525off_t buf_handle_offset(int handle_id)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001526{
Brandon Low404c6fb2007-10-27 01:37:33 +00001527 const struct memory_handle *h = find_handle(handle_id);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001528 if (!h)
Nicolas Pennequind400e232007-10-29 14:15:59 +00001529 return ERR_HANDLE_NOT_FOUND;
Michael Sevakis36615812013-08-26 16:49:53 -04001530 return h->start;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001531}
1532
Michael Sevakisdfff9382017-12-17 16:12:10 -05001533off_t buf_handle_remaining(int handle_id)
Michael Sevakisc537d592011-04-27 03:08:23 +00001534{
1535 const struct memory_handle *h = find_handle(handle_id);
1536 if (!h)
1537 return ERR_HANDLE_NOT_FOUND;
Michael Sevakis36615812013-08-26 16:49:53 -04001538 return h->filesize - h->end;
Michael Sevakisc537d592011-04-27 03:08:23 +00001539}
1540
Michael Sevakisc537d592011-04-27 03:08:23 +00001541bool buf_pin_handle(int handle_id, bool pin)
1542{
1543 struct memory_handle *h = find_handle(handle_id);
1544 if (!h)
1545 return false;
1546
1547 if (pin) {
1548 h->pinned++;
1549 } else if (h->pinned > 0) {
1550 h->pinned--;
1551 }
1552
Michael Sevakis89b05af2013-06-29 22:18:17 -04001553 return true;
Michael Sevakisc537d592011-04-27 03:08:23 +00001554}
1555
1556bool buf_signal_handle(int handle_id, bool signal)
1557{
1558 struct memory_handle *h = find_handle(handle_id);
1559 if (!h)
1560 return false;
1561
1562 h->signaled = signal ? 1 : 0;
Michael Sevakis89b05af2013-06-29 22:18:17 -04001563 return true;
Michael Sevakisc537d592011-04-27 03:08:23 +00001564}
1565
1566/* Return the size of the ringbuffer */
1567size_t buf_length(void)
1568{
1569 return buffer_len;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001570}
1571
Michael Sevakisdfff9382017-12-17 16:12:10 -05001572/* Set the handle from which useful data is counted */
1573void buf_set_base_handle(int handle_id)
1574{
1575 mutex_lock(&llist_mutex);
1576 base_handle_id = handle_id;
1577 mutex_unlock(&llist_mutex);
1578}
1579
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001580/* Return the amount of buffer space used */
1581size_t buf_used(void)
1582{
Michael Sevakiseefc7c72017-04-08 18:11:25 -04001583 mutex_lock(&llist_mutex);
1584 size_t used = bytes_used();
1585 mutex_unlock(&llist_mutex);
1586 return used;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001587}
1588
Steve Bavin135cc752008-03-28 12:51:33 +00001589void buf_set_watermark(size_t bytes)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001590{
Björn Stenberg6427d122009-01-10 21:10:56 +00001591 conf_watermark = bytes;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001592}
1593
Michael Sevakisc537d592011-04-27 03:08:23 +00001594size_t buf_get_watermark(void)
1595{
1596 return BUF_WATERMARK;
1597}
1598
Michael Sevakisc537d592011-04-27 03:08:23 +00001599/** -- buffer thread helpers -- **/
Nicolas Pennequin4fd27742008-03-29 14:09:14 +00001600static void shrink_buffer(void)
Steve Bavin73f98632008-03-26 08:57:25 +00001601{
Brandon Low555a7642007-11-05 15:24:21 +00001602 logf("shrink_buffer()");
Michael Sevakis8be40742017-12-09 21:57:01 -05001603
Michael Sevakiseefc7c72017-04-08 18:11:25 -04001604 mutex_lock(&llist_mutex);
Michael Sevakis8be40742017-12-09 21:57:01 -05001605
1606 for (struct memory_handle *h = HLIST_LAST; h; h = HLIST_PREV(h)) {
Michael Sevakis6ee3b6f2017-12-09 23:26:05 -05001607 h = shrink_handle(h);
Michael Sevakis8be40742017-12-09 21:57:01 -05001608 }
1609
Michael Sevakiseefc7c72017-04-08 18:11:25 -04001610 mutex_unlock(&llist_mutex);
Brandon Low555a7642007-11-05 15:24:21 +00001611}
1612
Michael Sevakisc537d592011-04-27 03:08:23 +00001613static void NORETURN_ATTR buffering_thread(void)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001614{
Brandon Low11a36612007-11-03 06:21:32 +00001615 bool filling = false;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001616 struct queue_event ev;
1617
1618 while (true)
1619 {
Michael Sevakisc537d592011-04-27 03:08:23 +00001620 if (num_handles > 0) {
1621 if (!filling) {
1622 cancel_cpu_boost();
1623 }
1624 queue_wait_w_tmo(&buffering_queue, &ev, filling ? 1 : HZ/2);
1625 } else {
1626 filling = false;
Nicolas Pennequin732df382008-03-29 17:28:30 +00001627 cancel_cpu_boost();
Michael Sevakisc537d592011-04-27 03:08:23 +00001628 queue_wait(&buffering_queue, &ev);
Nicolas Pennequin12b6c842008-03-29 17:40:04 +00001629 }
Nicolas Pennequin732df382008-03-29 17:28:30 +00001630
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001631 switch (ev.id)
1632 {
Brandon Low86830b62007-11-05 17:51:55 +00001633 case Q_START_FILL:
Nicolas Pennequincf369572008-07-18 23:42:47 +00001634 LOGFQUEUE("buffering < Q_START_FILL %d", (int)ev.data);
Brandon Low555a7642007-11-05 15:24:21 +00001635 shrink_buffer();
Brandon Low47eb5692007-11-05 03:11:58 +00001636 queue_reply(&buffering_queue, 1);
Michael Sevakis5a8f5b82011-05-09 21:19:11 +00001637 if (buffer_handle((int)ev.data, 0)) {
1638 filling = true;
1639 }
1640 else if (num_handles > 0 && conf_watermark > 0) {
1641 update_data_counters(NULL);
1642 if (data_counters.useful >= BUF_WATERMARK) {
1643 send_event(BUFFER_EVENT_BUFFER_LOW, NULL);
1644 }
1645 }
Brandon Low47eb5692007-11-05 03:11:58 +00001646 break;
Brandon Low555a7642007-11-05 15:24:21 +00001647
Brandon Low47eb5692007-11-05 03:11:58 +00001648 case Q_BUFFER_HANDLE:
Nicolas Pennequincf369572008-07-18 23:42:47 +00001649 LOGFQUEUE("buffering < Q_BUFFER_HANDLE %d", (int)ev.data);
Brandon Low47eb5692007-11-05 03:11:58 +00001650 queue_reply(&buffering_queue, 1);
Michael Sevakis69382552011-02-14 08:36:29 +00001651 buffer_handle((int)ev.data, 0);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001652 break;
1653
Michael Sevakis69382552011-02-14 08:36:29 +00001654 case Q_REBUFFER_HANDLE:
Michael Sevakis36615812013-08-26 16:49:53 -04001655 {
1656 struct buf_message_data *parm =
1657 (struct buf_message_data *)ev.data;
Michael Sevakis69382552011-02-14 08:36:29 +00001658 LOGFQUEUE("buffering < Q_REBUFFER_HANDLE %d %ld",
1659 parm->handle_id, parm->data);
1660 rebuffer_handle(parm->handle_id, parm->data);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001661 break;
Michael Sevakis36615812013-08-26 16:49:53 -04001662 }
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001663
1664 case Q_CLOSE_HANDLE:
Nicolas Pennequincf369572008-07-18 23:42:47 +00001665 LOGFQUEUE("buffering < Q_CLOSE_HANDLE %d", (int)ev.data);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001666 queue_reply(&buffering_queue, close_handle((int)ev.data));
1667 break;
1668
Nicolas Pennequin483c4022008-02-12 23:15:59 +00001669 case Q_HANDLE_ADDED:
1670 LOGFQUEUE("buffering < Q_HANDLE_ADDED %d", (int)ev.data);
1671 /* A handle was added: the disk is spinning, so we can fill */
1672 filling = true;
1673 break;
1674
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001675 case SYS_TIMEOUT:
1676 LOGFQUEUE_SYS_TIMEOUT("buffering < SYS_TIMEOUT");
1677 break;
1678 }
1679
Michael Sevakisc537d592011-04-27 03:08:23 +00001680 if (num_handles == 0 || !queue_empty(&buffering_queue))
1681 continue;
1682
Michael Sevakis69382552011-02-14 08:36:29 +00001683 update_data_counters(NULL);
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001684
Michael Sevakisc537d592011-04-27 03:08:23 +00001685 if (filling) {
Michael Sevakis5a8f5b82011-05-09 21:19:11 +00001686 filling = data_counters.remaining > 0 ? fill_buffer() : false;
Michael Sevakisc537d592011-04-27 03:08:23 +00001687 } else if (ev.id == SYS_TIMEOUT) {
1688 if (data_counters.useful < BUF_WATERMARK) {
1689 /* The buffer is low and we're idle, just watching the levels
1690 - call the callbacks to get new data */
1691 send_event(BUFFER_EVENT_BUFFER_LOW, NULL);
1692
1693 /* Continue anything else we haven't finished - it might
1694 get booted off or stop early because the receiver hasn't
1695 had a chance to clear anything yet */
1696 if (data_counters.remaining > 0) {
Brandon Low555a7642007-11-05 15:24:21 +00001697 shrink_buffer();
Brandon Low60d4e7c2007-11-03 17:55:45 +00001698 filling = fill_buffer();
Brandon Low47eb5692007-11-05 03:11:58 +00001699 }
Brandon Low60d4e7c2007-11-03 17:55:45 +00001700 }
Brandon Low11a36612007-11-03 06:21:32 +00001701 }
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001702 }
1703}
1704
Michael Sevakis9b990bd2013-06-29 22:19:59 -04001705void INIT_ATTR buffering_init(void)
Steve Bavin73f98632008-03-26 08:57:25 +00001706{
Brandon Low14b6f432007-10-27 04:16:41 +00001707 mutex_init(&llist_mutex);
1708
Michael Sevakisc537d592011-04-27 03:08:23 +00001709 /* Thread should absolutely not respond to USB because if it waits first,
1710 then it cannot properly service the handles and leaks will happen -
1711 this is a worker thread and shouldn't need to care about any system
1712 notifications.
1713 ***
1714 Whoever is using buffering should be responsible enough to clear all
1715 the handles at the right time. */
1716 queue_init(&buffering_queue, false);
Michael Sevakis8cfbd362008-12-10 08:57:10 +00001717 buffering_thread_id = create_thread( buffering_thread, buffering_stack,
Brandon Low7104ad52007-10-27 04:29:46 +00001718 sizeof(buffering_stack), CREATE_THREAD_FROZEN,
Michael Sevakis8a6fd3f2008-03-29 23:21:19 +00001719 buffering_thread_name IF_PRIO(, PRIORITY_BUFFERING)
Brandon Low14b6f432007-10-27 04:16:41 +00001720 IF_COP(, CPU));
Michael Sevakis27cf6772008-03-25 02:34:12 +00001721
1722 queue_enable_queue_send(&buffering_queue, &buffering_queue_sender_list,
Michael Sevakis8cfbd362008-12-10 08:57:10 +00001723 buffering_thread_id);
Brandon Low14b6f432007-10-27 04:16:41 +00001724}
1725
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001726/* Initialise the buffering subsystem */
Steve Bavin135cc752008-03-28 12:51:33 +00001727bool buffering_reset(char *buf, size_t buflen)
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001728{
Michael Sevakis0d902c82011-02-09 09:30:09 +00001729 /* Wraps of storage-aligned data must also be storage aligned,
1730 thus buf and buflen must be a aligned to an integer multiple of
1731 the storage alignment */
Michael Sevakisc537d592011-04-27 03:08:23 +00001732
Michael Sevakis0ebfb932012-05-21 02:18:46 -04001733 if (buf) {
1734 buflen -= MIN(buflen, GUARD_BUFSIZE);
Michael Sevakis89b05af2013-06-29 22:18:17 -04001735
Michael Sevakis0ebfb932012-05-21 02:18:46 -04001736 STORAGE_ALIGN_BUFFER(buf, buflen);
Michael Sevakisc537d592011-04-27 03:08:23 +00001737
Michael Sevakis0ebfb932012-05-21 02:18:46 -04001738 if (!buf || !buflen)
1739 return false;
1740 } else {
1741 buflen = 0;
1742 }
Michael Sevakis0d902c82011-02-09 09:30:09 +00001743
Michael Sevakis0ebfb932012-05-21 02:18:46 -04001744 send_event(BUFFER_EVENT_BUFFER_RESET, NULL);
1745
1746 /* If handles weren't closed above, just do it */
Michael Sevakis65c6a142017-04-13 18:53:17 -04001747 struct memory_handle *h;
1748 while ((h = HLIST_FIRST)) {
1749 bufclose(h->id);
1750 }
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001751
1752 buffer = buf;
Rafaël Carré2494afc2010-06-23 04:34:18 +00001753 buffer_len = buflen;
Nicolas Pennequin3e3c43c2007-10-25 21:27:45 +00001754 guard_buffer = buf + buflen;
1755
Michael Sevakis65c6a142017-04-13 18:53:17 -04001756