Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1 | /*************************************************************************** |
| 2 | * __________ __ ___. |
| 3 | * Open \______ \ ____ ____ | | _\_ |__ _______ ___ |
| 4 | * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / |
| 5 | * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < |
| 6 | * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ |
| 7 | * \/ \/ \/ \/ \/ |
| 8 | * $Id$ |
| 9 | * |
| 10 | * Copyright (C) 2007 Nicolas Pennequin |
| 11 | * |
Daniel Stenberg | 2acc0ac | 2008-06-28 18:10:04 +0000 | [diff] [blame] | 12 | * This program is free software; you can redistribute it and/or |
| 13 | * modify it under the terms of the GNU General Public License |
| 14 | * as published by the Free Software Foundation; either version 2 |
| 15 | * of the License, or (at your option) any later version. |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 16 | * |
| 17 | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY |
| 18 | * KIND, either express or implied. |
| 19 | * |
| 20 | ****************************************************************************/ |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 21 | #include "config.h" |
Michael Sevakis | f9d60e1 | 2014-04-02 21:03:30 -0400 | [diff] [blame] | 22 | #include <string.h> |
Michael Sevakis | 8375b69 | 2014-04-03 18:49:16 -0400 | [diff] [blame] | 23 | #include "strlcpy.h" |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 24 | #include "system.h" |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 25 | #include "storage.h" |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 26 | #include "thread.h" |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 27 | #include "kernel.h" |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 28 | #include "panic.h" |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 29 | #include "debug.h" |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 30 | #include "file.h" |
Jonathan Gordon | 71898e5 | 2008-10-16 10:38:03 +0000 | [diff] [blame] | 31 | #include "appevents.h" |
Nicolas Pennequin | 4e2de44 | 2008-04-14 16:17:47 +0000 | [diff] [blame] | 32 | #include "metadata.h" |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 33 | #include "bmp.h" |
Andrew Mahone | 781421a | 2008-12-09 23:07:59 +0000 | [diff] [blame] | 34 | #ifdef HAVE_ALBUMART |
| 35 | #include "albumart.h" |
Andrew Mahone | 54e6eb3 | 2009-05-01 23:31:43 +0000 | [diff] [blame] | 36 | #include "jpeg_load.h" |
Thomas Martitz | f577a6a | 2011-02-09 20:13:13 +0000 | [diff] [blame] | 37 | #include "playback.h" |
Andrew Mahone | 781421a | 2008-12-09 23:07:59 +0000 | [diff] [blame] | 38 | #endif |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 39 | #include "buffering.h" |
Michael Sevakis | 65c6a14 | 2017-04-13 18:53:17 -0400 | [diff] [blame] | 40 | #include "linked_list.h" |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 41 | |
| 42 | /* Define LOGF_ENABLE to enable logf output in this file */ |
Michael Sevakis | c537d59 | 2011-04-27 03:08:23 +0000 | [diff] [blame] | 43 | /* #define LOGF_ENABLE */ |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 44 | #include "logf.h" |
| 45 | |
Michael Sevakis | c1a01be | 2017-12-08 13:01:25 -0500 | [diff] [blame] | 46 | #define BUF_MAX_HANDLES 384 |
| 47 | |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 48 | /* macros to enable logf for queues |
| 49 | logging on SYS_TIMEOUT can be disabled */ |
| 50 | #ifdef SIMULATOR |
| 51 | /* Define this for logf output of all queuing except SYS_TIMEOUT */ |
| 52 | #define BUFFERING_LOGQUEUES |
| 53 | /* Define this to logf SYS_TIMEOUT messages */ |
| 54 | /* #define BUFFERING_LOGQUEUES_SYS_TIMEOUT */ |
| 55 | #endif |
| 56 | |
| 57 | #ifdef BUFFERING_LOGQUEUES |
| 58 | #define LOGFQUEUE logf |
| 59 | #else |
| 60 | #define LOGFQUEUE(...) |
| 61 | #endif |
| 62 | |
| 63 | #ifdef BUFFERING_LOGQUEUES_SYS_TIMEOUT |
| 64 | #define LOGFQUEUE_SYS_TIMEOUT logf |
| 65 | #else |
| 66 | #define LOGFQUEUE_SYS_TIMEOUT(...) |
| 67 | #endif |
| 68 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 69 | #define GUARD_BUFSIZE (32*1024) |
| 70 | |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 71 | /* amount of data to read in one read() call */ |
Michael Sevakis | 9120c85 | 2008-03-29 20:52:56 +0000 | [diff] [blame] | 72 | #define BUFFERING_DEFAULT_FILECHUNK (1024*32) |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 73 | |
Brandon Low | 31c1164 | 2007-11-04 19:01:02 +0000 | [diff] [blame] | 74 | #define BUF_HANDLE_MASK 0x7FFFFFFF |
| 75 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 76 | enum handle_flags |
| 77 | { |
| 78 | H_CANWRAP = 0x1, /* Handle data may wrap in buffer */ |
| 79 | H_ALLOCALL = 0x2, /* All data must be allocated up front */ |
| 80 | H_FIXEDDATA = 0x4, /* Data is fixed in position */ |
| 81 | }; |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 82 | |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 83 | struct memory_handle { |
Michael Sevakis | 65c6a14 | 2017-04-13 18:53:17 -0400 | [diff] [blame] | 84 | struct lld_node hnode; /* Handle list node (first!) */ |
Michael Sevakis | cd3ea08 | 2017-12-09 09:41:34 -0500 | [diff] [blame] | 85 | struct lld_node mrunode;/* MRU list node (second!) */ |
| 86 | size_t size; /* Size of this structure + its auxilliary data */ |
| 87 | int id; /* A unique ID for the handle */ |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 88 | enum data_type type; /* Type of data buffered with this handle */ |
| 89 | uint8_t flags; /* Handle property flags */ |
| 90 | int8_t pinned; /* Count of pinnings */ |
| 91 | int8_t signaled; /* Stop any attempt at waiting to get the data */ |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 92 | int fd; /* File descriptor to path (-1 if closed) */ |
| 93 | size_t data; /* Start index of the handle's data buffer */ |
| 94 | size_t ridx; /* Read pointer, relative to the main buffer */ |
| 95 | size_t widx; /* Write pointer, relative to the main buffer */ |
Michael Sevakis | dfff938 | 2017-12-17 16:12:10 -0500 | [diff] [blame] | 96 | off_t filesize; /* File total length (possibly trimmed at tail) */ |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 97 | off_t start; /* Offset at which we started reading the file */ |
| 98 | off_t pos; /* Read position in file */ |
| 99 | off_t volatile end; /* Offset at which we stopped reading the file */ |
Michael Sevakis | cd3ea08 | 2017-12-09 09:41:34 -0500 | [diff] [blame] | 100 | char path[]; /* Path if data originated in a file */ |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 101 | }; |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 102 | |
Michael Sevakis | cd3ea08 | 2017-12-09 09:41:34 -0500 | [diff] [blame] | 103 | /* Minimum allowed handle movement */ |
| 104 | #define MIN_MOVE_DELTA sizeof(struct memory_handle) |
| 105 | |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 106 | struct buf_message_data |
| 107 | { |
| 108 | int handle_id; |
| 109 | intptr_t data; |
| 110 | }; |
| 111 | |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 112 | static char *buffer; |
| 113 | static char *guard_buffer; |
| 114 | |
| 115 | static size_t buffer_len; |
| 116 | |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 117 | /* Configuration */ |
| 118 | static size_t conf_watermark = 0; /* Level to trigger filebuf fill */ |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 119 | static size_t high_watermark = 0; /* High watermark for rebuffer */ |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 120 | |
Michael Sevakis | 65c6a14 | 2017-04-13 18:53:17 -0400 | [diff] [blame] | 121 | static struct lld_head handle_list; /* buffer-order handle list */ |
| 122 | static struct lld_head mru_cache; /* MRU-ordered list of handles */ |
| 123 | static int num_handles; /* number of handles in the lists */ |
Brandon Low | 9784f6b | 2007-11-03 22:06:56 +0000 | [diff] [blame] | 124 | static int base_handle_id; |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 125 | |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 126 | /* Main lock for adding / removing handles */ |
| 127 | static struct mutex llist_mutex SHAREDBSS_ATTR; |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 128 | |
Michael Sevakis | 65c6a14 | 2017-04-13 18:53:17 -0400 | [diff] [blame] | 129 | #define HLIST_HANDLE(node) \ |
| 130 | ({ struct lld_node *__node = (node); \ |
| 131 | (struct memory_handle *)__node; }) |
| 132 | |
| 133 | #define HLIST_FIRST \ |
| 134 | HLIST_HANDLE(handle_list.head) |
| 135 | |
| 136 | #define HLIST_LAST \ |
| 137 | HLIST_HANDLE(handle_list.tail) |
| 138 | |
Michael Sevakis | 8be4074 | 2017-12-09 21:57:01 -0500 | [diff] [blame] | 139 | #define HLIST_PREV(h) \ |
| 140 | HLIST_HANDLE((h)->hnode.prev) |
| 141 | |
Michael Sevakis | 65c6a14 | 2017-04-13 18:53:17 -0400 | [diff] [blame] | 142 | #define HLIST_NEXT(h) \ |
| 143 | HLIST_HANDLE((h)->hnode.next) |
| 144 | |
| 145 | #define MRU_HANDLE(m) \ |
| 146 | container_of((m), struct memory_handle, mrunode) |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 147 | |
Michael Sevakis | 8f14357 | 2011-02-14 09:18:58 +0000 | [diff] [blame] | 148 | static struct data_counters |
| 149 | { |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 150 | size_t remaining; /* Amount of data needing to be buffered */ |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 151 | size_t buffered; /* Amount of data currently in the buffer */ |
| 152 | size_t useful; /* Amount of data still useful to the user */ |
| 153 | } data_counters; |
| 154 | |
| 155 | |
| 156 | /* Messages available to communicate with the buffering thread */ |
Michael Sevakis | 8f14357 | 2011-02-14 09:18:58 +0000 | [diff] [blame] | 157 | enum |
| 158 | { |
Brandon Low | 3379440 | 2007-11-05 17:48:21 +0000 | [diff] [blame] | 159 | Q_BUFFER_HANDLE = 1, /* Request buffering of a handle, this should not be |
| 160 | used in a low buffer situation. */ |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 161 | Q_REBUFFER_HANDLE, /* Request reset and rebuffering of a handle at a new |
| 162 | file starting position. */ |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 163 | Q_CLOSE_HANDLE, /* Request closing a handle */ |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 164 | |
| 165 | /* Configuration: */ |
Brandon Low | 86830b6 | 2007-11-05 17:51:55 +0000 | [diff] [blame] | 166 | Q_START_FILL, /* Request that the buffering thread initiate a buffer |
Brandon Low | 47eb569 | 2007-11-05 03:11:58 +0000 | [diff] [blame] | 167 | fill at its earliest convenience */ |
Nicolas Pennequin | 483c402 | 2008-02-12 23:15:59 +0000 | [diff] [blame] | 168 | Q_HANDLE_ADDED, /* Inform the buffering thread that a handle was added, |
| 169 | (which means the disk is spinning) */ |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 170 | }; |
| 171 | |
| 172 | /* Buffering thread */ |
Steve Bavin | 73f9863 | 2008-03-26 08:57:25 +0000 | [diff] [blame] | 173 | static void buffering_thread(void); |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 174 | static long buffering_stack[(DEFAULT_STACK_SIZE + 0x2000)/sizeof(long)]; |
| 175 | static const char buffering_thread_name[] = "buffering"; |
Michael Sevakis | 8cfbd36 | 2008-12-10 08:57:10 +0000 | [diff] [blame] | 176 | static unsigned int buffering_thread_id = 0; |
Michael Sevakis | b15aa47 | 2011-02-14 11:27:45 +0000 | [diff] [blame] | 177 | static struct event_queue buffering_queue SHAREDBSS_ATTR; |
| 178 | static struct queue_sender_list buffering_queue_sender_list SHAREDBSS_ATTR; |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 179 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 180 | static void close_fd(int *fd_p) |
| 181 | { |
| 182 | int fd = *fd_p; |
| 183 | if (fd >= 0) { |
| 184 | close(fd); |
| 185 | *fd_p = -1; |
| 186 | } |
| 187 | } |
Nicolas Pennequin | e24454f | 2007-11-26 21:13:08 +0000 | [diff] [blame] | 188 | |
Thomas Martitz | b11c819 | 2010-02-12 13:12:59 +0000 | [diff] [blame] | 189 | /* Ring buffer helper functions */ |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 190 | static inline void * ringbuf_ptr(uintptr_t p) |
| 191 | { |
| 192 | return buffer + p; |
| 193 | } |
Thomas Martitz | abb3dd4 | 2010-02-20 15:13:53 +0000 | [diff] [blame] | 194 | |
| 195 | static inline uintptr_t ringbuf_offset(const void *ptr) |
| 196 | { |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 197 | return (uintptr_t)(ptr - (void *)buffer); |
Thomas Martitz | abb3dd4 | 2010-02-20 15:13:53 +0000 | [diff] [blame] | 198 | } |
| 199 | |
Thomas Martitz | b11c819 | 2010-02-12 13:12:59 +0000 | [diff] [blame] | 200 | /* Buffer pointer (p) plus value (v), wrapped if necessary */ |
| 201 | static inline uintptr_t ringbuf_add(uintptr_t p, size_t v) |
| 202 | { |
| 203 | uintptr_t res = p + v; |
| 204 | if (res >= buffer_len) |
| 205 | res -= buffer_len; /* wrap if necssary */ |
| 206 | return res; |
| 207 | } |
| 208 | |
Thomas Martitz | b11c819 | 2010-02-12 13:12:59 +0000 | [diff] [blame] | 209 | /* Buffer pointer (p) minus value (v), wrapped if necessary */ |
Michael Sevakis | eefc7c7 | 2017-04-08 18:11:25 -0400 | [diff] [blame] | 210 | /* Interprets p == v as empty */ |
| 211 | static inline uintptr_t ringbuf_sub_empty(uintptr_t p, size_t v) |
Thomas Martitz | b11c819 | 2010-02-12 13:12:59 +0000 | [diff] [blame] | 212 | { |
| 213 | uintptr_t res = p; |
| 214 | if (p < v) |
| 215 | res += buffer_len; /* wrap */ |
Michael Sevakis | 89b05af | 2013-06-29 22:18:17 -0400 | [diff] [blame] | 216 | |
Thomas Martitz | b11c819 | 2010-02-12 13:12:59 +0000 | [diff] [blame] | 217 | return res - v; |
| 218 | } |
| 219 | |
Michael Sevakis | eefc7c7 | 2017-04-08 18:11:25 -0400 | [diff] [blame] | 220 | /* Buffer pointer (p) minus value (v), wrapped if necessary */ |
| 221 | /* Interprets p == v as full */ |
| 222 | static inline uintptr_t ringbuf_sub_full(uintptr_t p, size_t v) |
| 223 | { |
| 224 | uintptr_t res = p; |
| 225 | if (p <= v) |
| 226 | res += buffer_len; /* wrap */ |
| 227 | |
| 228 | return res - v; |
| 229 | } |
| 230 | |
Thomas Martitz | b11c819 | 2010-02-12 13:12:59 +0000 | [diff] [blame] | 231 | /* How far value (v) plus buffer pointer (p1) will cross buffer pointer (p2) */ |
Michael Sevakis | eefc7c7 | 2017-04-08 18:11:25 -0400 | [diff] [blame] | 232 | /* Interprets p1 == p2 as empty */ |
| 233 | static inline ssize_t ringbuf_add_cross_empty(uintptr_t p1, size_t v, |
| 234 | uintptr_t p2) |
Thomas Martitz | b11c819 | 2010-02-12 13:12:59 +0000 | [diff] [blame] | 235 | { |
| 236 | ssize_t res = p1 + v - p2; |
| 237 | if (p1 >= p2) /* wrap if necessary */ |
| 238 | res -= buffer_len; |
| 239 | |
| 240 | return res; |
| 241 | } |
| 242 | |
Michael Sevakis | eefc7c7 | 2017-04-08 18:11:25 -0400 | [diff] [blame] | 243 | /* How far value (v) plus buffer pointer (p1) will cross buffer pointer (p2) */ |
| 244 | /* Interprets p1 == p2 as full */ |
| 245 | static inline ssize_t ringbuf_add_cross_full(uintptr_t p1, size_t v, |
| 246 | uintptr_t p2) |
| 247 | { |
| 248 | ssize_t res = p1 + v - p2; |
| 249 | if (p1 > p2) /* wrap if necessary */ |
| 250 | res -= buffer_len; |
| 251 | |
| 252 | return res; |
| 253 | } |
| 254 | |
Michael Sevakis | c537d59 | 2011-04-27 03:08:23 +0000 | [diff] [blame] | 255 | /* Real buffer watermark */ |
| 256 | #define BUF_WATERMARK MIN(conf_watermark, high_watermark) |
| 257 | |
Michael Sevakis | eefc7c7 | 2017-04-08 18:11:25 -0400 | [diff] [blame] | 258 | static size_t bytes_used(void) |
| 259 | { |
Michael Sevakis | 65c6a14 | 2017-04-13 18:53:17 -0400 | [diff] [blame] | 260 | struct memory_handle *first = HLIST_FIRST; |
Michael Sevakis | eefc7c7 | 2017-04-08 18:11:25 -0400 | [diff] [blame] | 261 | if (!first) { |
| 262 | return 0; |
| 263 | } |
| 264 | |
Michael Sevakis | 65c6a14 | 2017-04-13 18:53:17 -0400 | [diff] [blame] | 265 | return ringbuf_sub_full(HLIST_LAST->widx, ringbuf_offset(first)); |
Michael Sevakis | eefc7c7 | 2017-04-08 18:11:25 -0400 | [diff] [blame] | 266 | } |
| 267 | |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 268 | /* |
| 269 | LINKED LIST MANAGEMENT |
| 270 | ====================== |
| 271 | |
Michael Sevakis | 65c6a14 | 2017-04-13 18:53:17 -0400 | [diff] [blame] | 272 | add_handle : Create a new handle |
| 273 | link_handle : Add a handle to the list |
| 274 | unlink_handle : Remove a handle from the list |
| 275 | find_handle : Get a handle pointer from an ID |
| 276 | move_handle : Move a handle in the buffer (with or without its data) |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 277 | |
| 278 | These functions only handle the linked list structure. They don't touch the |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 279 | contents of the struct memory_handle headers. |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 280 | |
Michael Sevakis | 65c6a14 | 2017-04-13 18:53:17 -0400 | [diff] [blame] | 281 | Doubly-linked list, not circular. |
| 282 | New handles are added at the tail. |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 283 | |
| 284 | num_handles = N |
Michael Sevakis | 65c6a14 | 2017-04-13 18:53:17 -0400 | [diff] [blame] | 285 | NULL <- h0 <-> h1 <-> h2 -> ... <- hN-1 -> NULL |
| 286 | head=> --------^ ^ |
| 287 | tail=> -----------------------------------+ |
| 288 | |
| 289 | MRU cache is similar except new handles are added at the head and the most- |
| 290 | recently-accessed handle is always moved to the head (if not already there). |
| 291 | |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 292 | */ |
| 293 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 294 | static int next_handle_id(void) |
| 295 | { |
| 296 | static int cur_handle_id = 0; |
| 297 | |
| 298 | /* Wrap signed int is safe and 0 doesn't happen */ |
| 299 | int next_hid = (cur_handle_id + 1) & BUF_HANDLE_MASK; |
| 300 | if (next_hid == 0) |
| 301 | next_hid = 1; |
| 302 | |
| 303 | cur_handle_id = next_hid; |
| 304 | |
| 305 | return next_hid; |
| 306 | } |
| 307 | |
Michael Sevakis | 65c6a14 | 2017-04-13 18:53:17 -0400 | [diff] [blame] | 308 | /* Adds the handle to the linked list */ |
| 309 | static void link_handle(struct memory_handle *h) |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 310 | { |
Michael Sevakis | 65c6a14 | 2017-04-13 18:53:17 -0400 | [diff] [blame] | 311 | lld_insert_last(&handle_list, &h->hnode); |
| 312 | lld_insert_first(&mru_cache, &h->mrunode); |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 313 | num_handles++; |
| 314 | } |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 315 | |
Michael Sevakis | 65c6a14 | 2017-04-13 18:53:17 -0400 | [diff] [blame] | 316 | /* Delete a given memory handle from the linked list */ |
| 317 | static void unlink_handle(struct memory_handle *h) |
| 318 | { |
| 319 | lld_remove(&handle_list, &h->hnode); |
| 320 | lld_remove(&mru_cache, &h->mrunode); |
| 321 | num_handles--; |
| 322 | } |
| 323 | |
| 324 | /* Adjusts handle list pointers _before_ it's actually moved */ |
| 325 | static void adjust_handle_node(struct lld_head *list, |
| 326 | struct lld_node *srcnode, |
| 327 | struct lld_node *destnode) |
| 328 | { |
| 329 | if (srcnode->prev) { |
| 330 | srcnode->prev->next = destnode; |
| 331 | } else { |
| 332 | list->head = destnode; |
| 333 | } |
| 334 | |
| 335 | if (srcnode->next) { |
| 336 | srcnode->next->prev = destnode; |
| 337 | } else { |
| 338 | list->tail = destnode; |
| 339 | } |
| 340 | } |
| 341 | |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 342 | /* Add a new handle to the linked list and return it. It will have become the |
Brandon Low | 18c9aba | 2007-10-27 04:49:04 +0000 | [diff] [blame] | 343 | new current handle. |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 344 | flags contains information on how this may be allocated |
Brandon Low | 18c9aba | 2007-10-27 04:49:04 +0000 | [diff] [blame] | 345 | data_size must contain the size of what will be in the handle. |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 346 | widx_out points to variable to receive first available byte of data area |
Brandon Low | 94b133a | 2007-10-28 19:19:54 +0000 | [diff] [blame] | 347 | returns a valid memory handle if all conditions for allocation are met. |
| 348 | NULL if there memory_handle itself cannot be allocated or if the |
Michael Sevakis | b474d0d | 2011-02-13 10:44:13 +0000 | [diff] [blame] | 349 | data_size cannot be allocated and alloc_all is set. */ |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 350 | static struct memory_handle * |
Michael Sevakis | cd3ea08 | 2017-12-09 09:41:34 -0500 | [diff] [blame] | 351 | add_handle(unsigned int flags, size_t data_size, const char *path, |
| 352 | size_t *data_out) |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 353 | { |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 354 | /* Gives each handle a unique id */ |
Brandon Low | a042c72 | 2007-11-03 02:54:34 +0000 | [diff] [blame] | 355 | if (num_handles >= BUF_MAX_HANDLES) |
| 356 | return NULL; |
| 357 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 358 | size_t ridx = 0, widx = 0; |
| 359 | off_t cur_total = 0; |
Michael Sevakis | b474d0d | 2011-02-13 10:44:13 +0000 | [diff] [blame] | 360 | |
Michael Sevakis | 65c6a14 | 2017-04-13 18:53:17 -0400 | [diff] [blame] | 361 | struct memory_handle *first = HLIST_FIRST; |
| 362 | if (first) { |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 363 | /* Buffer is not empty */ |
Michael Sevakis | 65c6a14 | 2017-04-13 18:53:17 -0400 | [diff] [blame] | 364 | struct memory_handle *last = HLIST_LAST; |
| 365 | ridx = ringbuf_offset(first); |
| 366 | widx = last->data; |
| 367 | cur_total = last->filesize - last->start; |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 368 | } |
| 369 | |
| 370 | if (cur_total > 0) { |
Nicolas Pennequin | 4ff2f9f | 2007-10-30 14:11:03 +0000 | [diff] [blame] | 371 | /* the current handle hasn't finished buffering. We can only add |
| 372 | a new one if there is already enough free space to finish |
| 373 | the buffering. */ |
Michael Sevakis | eefc7c7 | 2017-04-08 18:11:25 -0400 | [diff] [blame] | 374 | if (ringbuf_add_cross_full(widx, cur_total, ridx) > 0) { |
Michael Sevakis | 0fde635 | 2011-02-14 02:14:26 +0000 | [diff] [blame] | 375 | /* Not enough space to finish allocation */ |
Nicolas Pennequin | 4ff2f9f | 2007-10-30 14:11:03 +0000 | [diff] [blame] | 376 | return NULL; |
| 377 | } else { |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 378 | /* Apply all the needed reserve */ |
| 379 | widx = ringbuf_add(widx, cur_total); |
Nicolas Pennequin | 4ff2f9f | 2007-10-30 14:11:03 +0000 | [diff] [blame] | 380 | } |
| 381 | } |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 382 | |
Michael Sevakis | eefc7c7 | 2017-04-08 18:11:25 -0400 | [diff] [blame] | 383 | /* Align to align size up */ |
Michael Sevakis | cd3ea08 | 2017-12-09 09:41:34 -0500 | [diff] [blame] | 384 | size_t pathsize = path ? strlen(path) + 1 : 0; |
Michael Sevakis | eefc7c7 | 2017-04-08 18:11:25 -0400 | [diff] [blame] | 385 | size_t adjust = ALIGN_UP(widx, alignof(struct memory_handle)) - widx; |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 386 | size_t index = ringbuf_add(widx, adjust); |
Michael Sevakis | cd3ea08 | 2017-12-09 09:41:34 -0500 | [diff] [blame] | 387 | size_t handlesize = ALIGN_UP(sizeof(struct memory_handle) + pathsize, |
| 388 | alignof(struct memory_handle)); |
| 389 | size_t len = handlesize + data_size; |
Brandon Low | 18c9aba | 2007-10-27 04:49:04 +0000 | [diff] [blame] | 390 | |
| 391 | /* First, will the handle wrap? */ |
Brandon Low | 18c9aba | 2007-10-27 04:49:04 +0000 | [diff] [blame] | 392 | /* If the handle would wrap, move to the beginning of the buffer, |
Antonius Hellmann | 0055f13 | 2009-02-22 10:12:34 +0000 | [diff] [blame] | 393 | * or if the data must not but would wrap, move it to the beginning */ |
Michael Sevakis | cd3ea08 | 2017-12-09 09:41:34 -0500 | [diff] [blame] | 394 | if (index + handlesize > buffer_len || |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 395 | (!(flags & H_CANWRAP) && index + len > buffer_len)) { |
| 396 | index = 0; |
Brandon Low | 18c9aba | 2007-10-27 04:49:04 +0000 | [diff] [blame] | 397 | } |
| 398 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 399 | /* How far we shifted index to align things, must be < buffer_len */ |
Michael Sevakis | eefc7c7 | 2017-04-08 18:11:25 -0400 | [diff] [blame] | 400 | size_t shift = ringbuf_sub_empty(index, widx); |
Nicolas Pennequin | 7807279 | 2007-10-28 15:54:10 +0000 | [diff] [blame] | 401 | |
Brandon Low | 18c9aba | 2007-10-27 04:49:04 +0000 | [diff] [blame] | 402 | /* How much space are we short in the actual ring buffer? */ |
Michael Sevakis | 65c6a14 | 2017-04-13 18:53:17 -0400 | [diff] [blame] | 403 | ssize_t overlap = first ? |
Michael Sevakis | eefc7c7 | 2017-04-08 18:11:25 -0400 | [diff] [blame] | 404 | ringbuf_add_cross_full(widx, shift + len, ridx) : |
| 405 | ringbuf_add_cross_empty(widx, shift + len, ridx); |
| 406 | |
| 407 | if (overlap > 0 && |
| 408 | ((flags & H_ALLOCALL) || (size_t)overlap > data_size)) { |
Brandon Low | 18c9aba | 2007-10-27 04:49:04 +0000 | [diff] [blame] | 409 | /* Not enough space for required allocations */ |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 410 | return NULL; |
| 411 | } |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 412 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 413 | /* There is enough space for the required data, initialize the struct */ |
| 414 | struct memory_handle *h = ringbuf_ptr(index); |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 415 | |
Michael Sevakis | cd3ea08 | 2017-12-09 09:41:34 -0500 | [diff] [blame] | 416 | h->size = handlesize; |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 417 | h->id = next_handle_id(); |
| 418 | h->flags = flags; |
| 419 | h->pinned = 0; /* Can be moved */ |
| 420 | h->signaled = 0; /* Data can be waited for */ |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 421 | |
Michael Sevakis | cd3ea08 | 2017-12-09 09:41:34 -0500 | [diff] [blame] | 422 | /* Save the provided path */ |
| 423 | memcpy(h->path, path, pathsize); |
| 424 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 425 | /* Return the start of the data area */ |
Michael Sevakis | cd3ea08 | 2017-12-09 09:41:34 -0500 | [diff] [blame] | 426 | *data_out = ringbuf_add(index, handlesize); |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 427 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 428 | return h; |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 429 | } |
| 430 | |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 431 | /* Return a pointer to the memory handle of given ID. |
| 432 | NULL if the handle wasn't found */ |
Michael Sevakis | 65c6a14 | 2017-04-13 18:53:17 -0400 | [diff] [blame] | 433 | static struct memory_handle * find_handle(int handle_id) |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 434 | { |
Michael Sevakis | 65c6a14 | 2017-04-13 18:53:17 -0400 | [diff] [blame] | 435 | struct memory_handle *h = NULL; |
| 436 | struct lld_node *mru = mru_cache.head; |
| 437 | struct lld_node *m = mru; |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 438 | |
Michael Sevakis | 65c6a14 | 2017-04-13 18:53:17 -0400 | [diff] [blame] | 439 | while (m && MRU_HANDLE(m)->id != handle_id) { |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 440 | m = m->next; |
| 441 | } |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 442 | |
Michael Sevakis | 65c6a14 | 2017-04-13 18:53:17 -0400 | [diff] [blame] | 443 | if (m) { |
| 444 | if (m != mru) { |
| 445 | lld_remove(&mru_cache, m); |
| 446 | lld_insert_first(&mru_cache, m); |
| 447 | } |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 448 | |
Michael Sevakis | 65c6a14 | 2017-04-13 18:53:17 -0400 | [diff] [blame] | 449 | h = MRU_HANDLE(m); |
| 450 | } |
| 451 | |
| 452 | return h; |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 453 | } |
| 454 | |
Brandon Low | 4feab10 | 2007-10-28 20:18:59 +0000 | [diff] [blame] | 455 | /* Move a memory handle and data_size of its data delta bytes along the buffer. |
| 456 | delta maximum bytes available to move the handle. If the move is performed |
| 457 | it is set to the actual distance moved. |
| 458 | data_size is the amount of data to move along with the struct. |
Yoshihisa Uchida | 9c13b6e | 2010-05-24 10:49:36 +0000 | [diff] [blame] | 459 | returns true if the move is successful and false if the handle is NULL, |
| 460 | the move would be less than the size of a memory_handle after |
| 461 | correcting for wraps or if the handle is not found in the linked |
| 462 | list for adjustment. This function has no side effects if false |
| 463 | is returned. */ |
Brandon Low | dcca586 | 2007-11-02 14:06:48 +0000 | [diff] [blame] | 464 | static bool move_handle(struct memory_handle **h, size_t *delta, |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 465 | size_t data_size) |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 466 | { |
Michael Sevakis | 65c6a14 | 2017-04-13 18:53:17 -0400 | [diff] [blame] | 467 | struct memory_handle *src; |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 468 | |
Brandon Low | 483dca9 | 2007-10-29 16:48:16 +0000 | [diff] [blame] | 469 | if (h == NULL || (src = *h) == NULL) |
| 470 | return false; |
Brandon Low | 4feab10 | 2007-10-28 20:18:59 +0000 | [diff] [blame] | 471 | |
Michael Sevakis | cd3ea08 | 2017-12-09 09:41:34 -0500 | [diff] [blame] | 472 | size_t size_to_move = src->size + data_size; |
Brandon Low | 4feab10 | 2007-10-28 20:18:59 +0000 | [diff] [blame] | 473 | |
Michael Sevakis | eefc7c7 | 2017-04-08 18:11:25 -0400 | [diff] [blame] | 474 | /* Align to align size down */ |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 475 | size_t final_delta = *delta; |
Michael Sevakis | eefc7c7 | 2017-04-08 18:11:25 -0400 | [diff] [blame] | 476 | final_delta = ALIGN_DOWN(final_delta, alignof(struct memory_handle)); |
Michael Sevakis | cd3ea08 | 2017-12-09 09:41:34 -0500 | [diff] [blame] | 477 | if (final_delta < MIN_MOVE_DELTA) { |
| 478 | /* It's not legal to move less than MIN_MOVE_DELTA */ |
Brandon Low | 483dca9 | 2007-10-29 16:48:16 +0000 | [diff] [blame] | 479 | return false; |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 480 | } |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 481 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 482 | uintptr_t oldpos = ringbuf_offset(src); |
| 483 | uintptr_t newpos = ringbuf_add(oldpos, final_delta); |
Michael Sevakis | eefc7c7 | 2017-04-08 18:11:25 -0400 | [diff] [blame] | 484 | intptr_t overlap = ringbuf_add_cross_full(newpos, size_to_move, buffer_len); |
| 485 | intptr_t overlap_old = ringbuf_add_cross_full(oldpos, size_to_move, buffer_len); |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 486 | |
Brandon Low | 18c9aba | 2007-10-27 04:49:04 +0000 | [diff] [blame] | 487 | if (overlap > 0) { |
Brandon Low | 4feab10 | 2007-10-28 20:18:59 +0000 | [diff] [blame] | 488 | /* Some part of the struct + data would wrap, maybe ok */ |
Michael Sevakis | 3c81bf0 | 2011-02-12 12:18:09 +0000 | [diff] [blame] | 489 | ssize_t correction = 0; |
Brandon Low | 18c9aba | 2007-10-27 04:49:04 +0000 | [diff] [blame] | 490 | /* If the overlap lands inside the memory_handle */ |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 491 | if (!(src->flags & H_CANWRAP)) { |
Brandon Low | 4feab10 | 2007-10-28 20:18:59 +0000 | [diff] [blame] | 492 | /* Otherwise the overlap falls in the data area and must all be |
| 493 | * backed out. This may become conditional if ever we move |
| 494 | * data that is allowed to wrap (ie audio) */ |
Brandon Low | 18c9aba | 2007-10-27 04:49:04 +0000 | [diff] [blame] | 495 | correction = overlap; |
Thomas Martitz | abb3dd4 | 2010-02-20 15:13:53 +0000 | [diff] [blame] | 496 | } else if ((uintptr_t)overlap > data_size) { |
Michael Giacomelli | ecd9bcf | 2009-11-21 17:00:38 +0000 | [diff] [blame] | 497 | /* Correct the position and real delta to prevent the struct from |
Michael Sevakis | 3c81bf0 | 2011-02-12 12:18:09 +0000 | [diff] [blame] | 498 | * wrapping, this guarantees an aligned delta if the struct size is |
| 499 | * aligned and the buffer is aligned */ |
Michael Giacomelli | ecd9bcf | 2009-11-21 17:00:38 +0000 | [diff] [blame] | 500 | correction = overlap - data_size; |
Brandon Low | 18c9aba | 2007-10-27 04:49:04 +0000 | [diff] [blame] | 501 | } |
Brandon Low | dcca586 | 2007-11-02 14:06:48 +0000 | [diff] [blame] | 502 | if (correction) { |
Michael Sevakis | eefc7c7 | 2017-04-08 18:11:25 -0400 | [diff] [blame] | 503 | /* Align correction to align size up */ |
| 504 | correction = ALIGN_UP(correction, alignof(struct memory_handle)); |
Michael Sevakis | cd3ea08 | 2017-12-09 09:41:34 -0500 | [diff] [blame] | 505 | if (final_delta < correction + MIN_MOVE_DELTA) { |
| 506 | /* Delta cannot end up less than MIN_MOVE_DELTA */ |
Brandon Low | dcca586 | 2007-11-02 14:06:48 +0000 | [diff] [blame] | 507 | return false; |
| 508 | } |
Brandon Low | dcca586 | 2007-11-02 14:06:48 +0000 | [diff] [blame] | 509 | newpos -= correction; |
| 510 | overlap -= correction;/* Used below to know how to split the data */ |
| 511 | final_delta -= correction; |
| 512 | } |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 513 | } |
Nicolas Pennequin | 7807279 | 2007-10-28 15:54:10 +0000 | [diff] [blame] | 514 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 515 | struct memory_handle *dest = ringbuf_ptr(newpos); |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 516 | |
Michael Sevakis | 65c6a14 | 2017-04-13 18:53:17 -0400 | [diff] [blame] | 517 | /* Adjust list pointers */ |
| 518 | adjust_handle_node(&handle_list, &src->hnode, &dest->hnode); |
| 519 | adjust_handle_node(&mru_cache, &src->mrunode, &dest->mrunode); |
Brandon Low | 18c9aba | 2007-10-27 04:49:04 +0000 | [diff] [blame] | 520 | |
Michael Sevakis | 3c81bf0 | 2011-02-12 12:18:09 +0000 | [diff] [blame] | 521 | /* x = handle(s) following this one... |
| 522 | * ...if last handle, unmoveable if metadata, only shrinkable if audio. |
| 523 | * In other words, no legal move can be made that would have the src head |
| 524 | * and dest tail of the data overlap itself. These facts reduce the |
| 525 | * problem to four essential permutations. |
| 526 | * |
| 527 | * movement: always "clockwise" >>>> |
| 528 | * |
| 529 | * (src nowrap, dest nowrap) |
| 530 | * |0123 x | |
| 531 | * | 0123x | etc... |
| 532 | * move: "0123" |
| 533 | * |
| 534 | * (src nowrap, dest wrap) |
| 535 | * | x0123 | |
| 536 | * |23x 01| |
| 537 | * move: "23", "01" |
| 538 | * |
| 539 | * (src wrap, dest nowrap) |
| 540 | * |23 x01| |
| 541 | * | 0123x | |
| 542 | * move: "23", "01" |
| 543 | * |
| 544 | * (src wrap, dest wrap) |
| 545 | * |23 x 01| |
| 546 | * |123x 0| |
| 547 | * move: "23", "1", "0" |
| 548 | */ |
| 549 | if (overlap_old > 0) { |
| 550 | /* Move over already wrapped data by the final delta */ |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 551 | memmove(ringbuf_ptr(final_delta), ringbuf_ptr(0), overlap_old); |
Michael Sevakis | 3c81bf0 | 2011-02-12 12:18:09 +0000 | [diff] [blame] | 552 | if (overlap <= 0) |
| 553 | size_to_move -= overlap_old; |
Rafaël Carré | f620103 | 2009-11-22 13:51:25 +0000 | [diff] [blame] | 554 | } |
Michael Giacomelli | ecd9bcf | 2009-11-21 17:00:38 +0000 | [diff] [blame] | 555 | |
Michael Sevakis | 3c81bf0 | 2011-02-12 12:18:09 +0000 | [diff] [blame] | 556 | if (overlap > 0) { |
| 557 | /* Move data that now wraps to the beginning */ |
| 558 | size_to_move -= overlap; |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 559 | memmove(ringbuf_ptr(0), SKIPBYTES(src, size_to_move), |
Michael Sevakis | 3c81bf0 | 2011-02-12 12:18:09 +0000 | [diff] [blame] | 560 | overlap_old > 0 ? final_delta : (size_t)overlap); |
| 561 | } |
| 562 | |
| 563 | /* Move leading fragment containing handle struct */ |
| 564 | memmove(dest, src, size_to_move); |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 565 | |
Brandon Low | 483dca9 | 2007-10-29 16:48:16 +0000 | [diff] [blame] | 566 | /* Update the caller with the new location of h and the distance moved */ |
| 567 | *h = dest; |
| 568 | *delta = final_delta; |
Yoshihisa Uchida | 9c13b6e | 2010-05-24 10:49:36 +0000 | [diff] [blame] | 569 | return true; |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 570 | } |
| 571 | |
| 572 | |
| 573 | /* |
| 574 | BUFFER SPACE MANAGEMENT |
| 575 | ======================= |
| 576 | |
Nicolas Pennequin | 0c7b26d | 2007-11-05 21:11:54 +0000 | [diff] [blame] | 577 | update_data_counters: Updates the values in data_counters |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 578 | buffer_handle : Buffer data for a handle |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 579 | rebuffer_handle : Seek to a nonbuffered part of a handle by rebuffering the data |
| 580 | shrink_handle : Free buffer space by moving a handle |
| 581 | fill_buffer : Call buffer_handle for all handles that have data to buffer |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 582 | |
| 583 | These functions are used by the buffering thread to manage buffer space. |
| 584 | */ |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 585 | |
| 586 | static int update_data_counters(struct data_counters *dc) |
Michael Sevakis | 64647f3 | 2011-03-02 04:41:29 +0000 | [diff] [blame] | 587 | { |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 588 | size_t buffered = 0; |
Nicolas Pennequin | 0c7b26d | 2007-11-05 21:11:54 +0000 | [diff] [blame] | 589 | size_t remaining = 0; |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 590 | size_t useful = 0; |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 591 | |
| 592 | if (dc == NULL) |
| 593 | dc = &data_counters; |
| 594 | |
Nicolas Pennequin | f7e0e6b | 2008-05-13 20:51:06 +0000 | [diff] [blame] | 595 | mutex_lock(&llist_mutex); |
| 596 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 597 | int num = num_handles; |
| 598 | struct memory_handle *m = find_handle(base_handle_id); |
| 599 | bool is_useful = m == NULL; |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 600 | |
Michael Sevakis | 65c6a14 | 2017-04-13 18:53:17 -0400 | [diff] [blame] | 601 | for (m = HLIST_FIRST; m; m = HLIST_NEXT(m)) |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 602 | { |
| 603 | off_t pos = m->pos; |
| 604 | off_t end = m->end; |
| 605 | |
| 606 | buffered += end - m->start; |
| 607 | remaining += m->filesize - end; |
Nicolas Pennequin | 0c7b26d | 2007-11-05 21:11:54 +0000 | [diff] [blame] | 608 | |
| 609 | if (m->id == base_handle_id) |
| 610 | is_useful = true; |
| 611 | |
| 612 | if (is_useful) |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 613 | useful += end - pos; |
Nicolas Pennequin | 0c7b26d | 2007-11-05 21:11:54 +0000 | [diff] [blame] | 614 | } |
| 615 | |
Nicolas Pennequin | f7e0e6b | 2008-05-13 20:51:06 +0000 | [diff] [blame] | 616 | mutex_unlock(&llist_mutex); |
| 617 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 618 | dc->buffered = buffered; |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 619 | dc->remaining = remaining; |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 620 | dc->useful = useful; |
| 621 | |
| 622 | return num; |
Nicolas Pennequin | 0c7b26d | 2007-11-05 21:11:54 +0000 | [diff] [blame] | 623 | } |
| 624 | |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 625 | /* Q_BUFFER_HANDLE event and buffer data for the given handle. |
Brandon Low | 60d4e7c | 2007-11-03 17:55:45 +0000 | [diff] [blame] | 626 | Return whether or not the buffering should continue explicitly. */ |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 627 | static bool buffer_handle(int handle_id, size_t to_buffer) |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 628 | { |
Michael Sevakis | c537d59 | 2011-04-27 03:08:23 +0000 | [diff] [blame] | 629 | logf("buffer_handle(%d, %lu)", handle_id, (unsigned long)to_buffer); |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 630 | struct memory_handle *h = find_handle(handle_id); |
| 631 | if (!h) |
Brandon Low | 7b74dd7 | 2007-11-03 21:57:27 +0000 | [diff] [blame] | 632 | return true; |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 633 | |
Michael Sevakis | c537d59 | 2011-04-27 03:08:23 +0000 | [diff] [blame] | 634 | logf(" type: %d", (int)h->type); |
| 635 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 636 | if (h->end >= h->filesize) { |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 637 | /* nothing left to buffer */ |
Brandon Low | 7b74dd7 | 2007-11-03 21:57:27 +0000 | [diff] [blame] | 638 | return true; |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 639 | } |
| 640 | |
Michael Sevakis | 8f14357 | 2011-02-14 09:18:58 +0000 | [diff] [blame] | 641 | if (h->fd < 0) { /* file closed, reopen */ |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 642 | if (h->path[0] != '\0') |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 643 | h->fd = open(h->path, O_RDONLY); |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 644 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 645 | if (h->fd < 0) { |
Brandon Low | ebc981b | 2007-11-04 05:57:48 +0000 | [diff] [blame] | 646 | /* could not open the file, truncate it where it is */ |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 647 | h->filesize = h->end; |
Brandon Low | 7b74dd7 | 2007-11-03 21:57:27 +0000 | [diff] [blame] | 648 | return true; |
Brandon Low | ebc981b | 2007-11-04 05:57:48 +0000 | [diff] [blame] | 649 | } |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 650 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 651 | if (h->start) |
| 652 | lseek(h->fd, h->start, SEEK_SET); |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 653 | } |
| 654 | |
| 655 | trigger_cpu_boost(); |
| 656 | |
Michael Sevakis | 8f14357 | 2011-02-14 09:18:58 +0000 | [diff] [blame] | 657 | if (h->type == TYPE_ID3) { |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 658 | if (!get_metadata(ringbuf_ptr(h->data), h->fd, h->path)) { |
Nicolas Pennequin | de026dc | 2008-04-16 16:18:05 +0000 | [diff] [blame] | 659 | /* metadata parsing failed: clear the buffer. */ |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 660 | wipe_mp3entry(ringbuf_ptr(h->data)); |
Nicolas Pennequin | de026dc | 2008-04-16 16:18:05 +0000 | [diff] [blame] | 661 | } |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 662 | close_fd(&h->fd); |
| 663 | h->widx = ringbuf_add(h->data, h->filesize); |
| 664 | h->end = h->filesize; |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 665 | send_event(BUFFER_EVENT_FINISHED, &handle_id); |
Nicolas Pennequin | 4e2de44 | 2008-04-14 16:17:47 +0000 | [diff] [blame] | 666 | return true; |
| 667 | } |
| 668 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 669 | bool stop = false; |
| 670 | while (h->end < h->filesize && !stop) |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 671 | { |
| 672 | /* max amount to copy */ |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 673 | size_t widx = h->widx; |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 674 | ssize_t copy_n = h->filesize - h->end; |
| 675 | copy_n = MIN(copy_n, BUFFERING_DEFAULT_FILECHUNK); |
| 676 | copy_n = MIN(copy_n, (off_t)(buffer_len - widx)); |
| 677 | |
Michael Sevakis | eefc7c7 | 2017-04-08 18:11:25 -0400 | [diff] [blame] | 678 | mutex_lock(&llist_mutex); |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 679 | |
| 680 | /* read only up to available space and stop if it would overwrite |
Michael Sevakis | eefc7c7 | 2017-04-08 18:11:25 -0400 | [diff] [blame] | 681 | the next handle; stop one byte early to avoid empty/full alias |
| 682 | (or else do more complicated arithmetic to differentiate) */ |
Michael Sevakis | 65c6a14 | 2017-04-13 18:53:17 -0400 | [diff] [blame] | 683 | size_t next = ringbuf_offset(HLIST_NEXT(h) ?: HLIST_FIRST); |
Michael Sevakis | eefc7c7 | 2017-04-08 18:11:25 -0400 | [diff] [blame] | 684 | ssize_t overlap = ringbuf_add_cross_full(widx, copy_n, next); |
| 685 | |
| 686 | mutex_unlock(&llist_mutex); |
Thomas Martitz | df79ac2 | 2010-02-18 15:38:30 +0000 | [diff] [blame] | 687 | |
Michael Sevakis | 8f14357 | 2011-02-14 09:18:58 +0000 | [diff] [blame] | 688 | if (overlap > 0) { |
Thomas Martitz | df79ac2 | 2010-02-18 15:38:30 +0000 | [diff] [blame] | 689 | stop = true; |
Thomas Martitz | df79ac2 | 2010-02-18 15:38:30 +0000 | [diff] [blame] | 690 | copy_n -= overlap; |
| 691 | } |
Brandon Low | ff9cdb4 | 2007-11-01 05:12:55 +0000 | [diff] [blame] | 692 | |
Michael Sevakis | 88d91fa | 2011-02-10 05:56:21 +0000 | [diff] [blame] | 693 | if (copy_n <= 0) |
| 694 | return false; /* no space for read */ |
| 695 | |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 696 | /* rc is the actual amount read */ |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 697 | ssize_t rc = read(h->fd, ringbuf_ptr(widx), copy_n); |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 698 | |
Michael Sevakis | 4823b2b | 2011-03-21 15:57:07 +0000 | [diff] [blame] | 699 | if (rc <= 0) { |
Brandon Low | 60d4e7c | 2007-11-03 17:55:45 +0000 | [diff] [blame] | 700 | /* Some kind of filesystem error, maybe recoverable if not codec */ |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 701 | if (h->type == TYPE_CODEC) { |
| 702 | logf("Partial codec"); |
| 703 | break; |
| 704 | } |
| 705 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 706 | logf("File ended %lu bytes early\n", |
| 707 | (unsigned long)(h->filesize - h->end)); |
| 708 | h->filesize = h->end; |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 709 | break; |
| 710 | } |
| 711 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 712 | /* Advance buffer and make data available to users */ |
| 713 | h->widx = ringbuf_add(widx, rc); |
| 714 | h->end += rc; |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 715 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 716 | yield(); |
Michael Sevakis | 398d9fd | 2007-11-20 22:45:46 +0000 | [diff] [blame] | 717 | |
Michael Sevakis | 8f14357 | 2011-02-14 09:18:58 +0000 | [diff] [blame] | 718 | if (to_buffer == 0) { |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 719 | /* Normal buffering - check queue */ |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 720 | if (!queue_empty(&buffering_queue)) |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 721 | break; |
Michael Sevakis | 8f14357 | 2011-02-14 09:18:58 +0000 | [diff] [blame] | 722 | } else { |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 723 | if (to_buffer <= (size_t)rc) |
| 724 | break; /* Done */ |
| 725 | to_buffer -= rc; |
| 726 | } |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 727 | } |
| 728 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 729 | if (h->end >= h->filesize) { |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 730 | /* finished buffering the file */ |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 731 | close_fd(&h->fd); |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 732 | send_event(BUFFER_EVENT_FINISHED, &handle_id); |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 733 | } |
| 734 | |
Thomas Martitz | df79ac2 | 2010-02-18 15:38:30 +0000 | [diff] [blame] | 735 | return !stop; |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 736 | } |
| 737 | |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 738 | /* Close the specified handle id and free its allocation. */ |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 739 | /* Q_CLOSE_HANDLE */ |
Steve Bavin | 135cc75 | 2008-03-28 12:51:33 +0000 | [diff] [blame] | 740 | static bool close_handle(int handle_id) |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 741 | { |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 742 | mutex_lock(&llist_mutex); |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 743 | struct memory_handle *h = find_handle(handle_id); |
Brandon Low | 31c1164 | 2007-11-04 19:01:02 +0000 | [diff] [blame] | 744 | |
| 745 | /* If the handle is not found, it is closed */ |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 746 | if (h) { |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 747 | close_fd(&h->fd); |
Michael Sevakis | 65c6a14 | 2017-04-13 18:53:17 -0400 | [diff] [blame] | 748 | unlink_handle(h); |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 749 | } |
| 750 | |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 751 | mutex_unlock(&llist_mutex); |
Michael Sevakis | 65c6a14 | 2017-04-13 18:53:17 -0400 | [diff] [blame] | 752 | return true; |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 753 | } |
| 754 | |
| 755 | /* Free buffer space by moving the handle struct right before the useful |
| 756 | part of its data buffer or by moving all the data. */ |
Michael Sevakis | 6ee3b6f | 2017-12-09 23:26:05 -0500 | [diff] [blame] | 757 | static struct memory_handle * shrink_handle(struct memory_handle *h) |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 758 | { |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 759 | if (!h) |
Michael Sevakis | 6ee3b6f | 2017-12-09 23:26:05 -0500 | [diff] [blame] | 760 | return NULL; |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 761 | |
Michael Sevakis | c537d59 | 2011-04-27 03:08:23 +0000 | [diff] [blame] | 762 | if (h->type == TYPE_PACKET_AUDIO) { |
| 763 | /* only move the handle struct */ |
| 764 | /* data is pinned by default - if we start moving packet audio, |
| 765 | the semantics will determine whether or not data is movable |
| 766 | but the handle will remain movable in either case */ |
Michael Sevakis | eefc7c7 | 2017-04-08 18:11:25 -0400 | [diff] [blame] | 767 | size_t delta = ringbuf_sub_empty(h->ridx, h->data); |
Michael Sevakis | c537d59 | 2011-04-27 03:08:23 +0000 | [diff] [blame] | 768 | |
| 769 | /* The value of delta might change for alignment reasons */ |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 770 | if (!move_handle(&h, &delta, 0)) |
Michael Sevakis | 6ee3b6f | 2017-12-09 23:26:05 -0500 | [diff] [blame] | 771 | return h; |
Michael Sevakis | c537d59 | 2011-04-27 03:08:23 +0000 | [diff] [blame] | 772 | |
| 773 | h->data = ringbuf_add(h->data, delta); |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 774 | h->start += delta; |
Michael Sevakis | c537d59 | 2011-04-27 03:08:23 +0000 | [diff] [blame] | 775 | } else { |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 776 | /* metadata handle: we can move all of it */ |
Michael Sevakis | 65c6a14 | 2017-04-13 18:53:17 -0400 | [diff] [blame] | 777 | if (h->pinned || !HLIST_NEXT(h)) |
Michael Sevakis | 6ee3b6f | 2017-12-09 23:26:05 -0500 | [diff] [blame] | 778 | return h; /* Pinned, last handle */ |
Michael Sevakis | 88d91fa | 2011-02-10 05:56:21 +0000 | [diff] [blame] | 779 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 780 | size_t data_size = h->filesize - h->start; |
Thomas Martitz | abb3dd4 | 2010-02-20 15:13:53 +0000 | [diff] [blame] | 781 | uintptr_t handle_distance = |
Michael Sevakis | 65c6a14 | 2017-04-13 18:53:17 -0400 | [diff] [blame] | 782 | ringbuf_sub_empty(ringbuf_offset(HLIST_NEXT(h)), h->data); |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 783 | size_t delta = handle_distance - data_size; |
Brandon Low | 18c9aba | 2007-10-27 04:49:04 +0000 | [diff] [blame] | 784 | |
| 785 | /* The value of delta might change for alignment reasons */ |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 786 | if (!move_handle(&h, &delta, data_size)) |
Michael Sevakis | 6ee3b6f | 2017-12-09 23:26:05 -0500 | [diff] [blame] | 787 | return h; |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 788 | |
| 789 | size_t olddata = h->data; |
Thomas Martitz | b11c819 | 2010-02-12 13:12:59 +0000 | [diff] [blame] | 790 | h->data = ringbuf_add(h->data, delta); |
| 791 | h->ridx = ringbuf_add(h->ridx, delta); |
| 792 | h->widx = ringbuf_add(h->widx, delta); |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 793 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 794 | switch (h->type) |
| 795 | { |
| 796 | case TYPE_ID3: |
| 797 | if (h->filesize != sizeof(struct mp3entry)) |
| 798 | break; |
| 799 | /* when moving an mp3entry we need to readjust its pointers */ |
| 800 | adjust_mp3entry(ringbuf_ptr(h->data), ringbuf_ptr(h->data), |
| 801 | ringbuf_ptr(olddata)); |
| 802 | break; |
| 803 | |
| 804 | case TYPE_BITMAP: |
| 805 | /* adjust the bitmap's pointer */ |
| 806 | ((struct bitmap *)ringbuf_ptr(h->data))->data = |
| 807 | ringbuf_ptr(h->data + sizeof(struct bitmap)); |
| 808 | break; |
| 809 | |
| 810 | default: |
| 811 | break; |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 812 | } |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 813 | } |
Michael Sevakis | 6ee3b6f | 2017-12-09 23:26:05 -0500 | [diff] [blame] | 814 | |
| 815 | return h; |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 816 | } |
| 817 | |
| 818 | /* Fill the buffer by buffering as much data as possible for handles that still |
Brandon Low | 11a3661 | 2007-11-03 06:21:32 +0000 | [diff] [blame] | 819 | have data left to buffer |
| 820 | Return whether or not to continue filling after this */ |
| 821 | static bool fill_buffer(void) |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 822 | { |
| 823 | logf("fill_buffer()"); |
Michael Sevakis | eefc7c7 | 2017-04-08 18:11:25 -0400 | [diff] [blame] | 824 | mutex_lock(&llist_mutex); |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 825 | |
Michael Sevakis | 6ee3b6f | 2017-12-09 23:26:05 -0500 | [diff] [blame] | 826 | struct memory_handle *m = shrink_handle(HLIST_FIRST); |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 827 | |
Michael Sevakis | eefc7c7 | 2017-04-08 18:11:25 -0400 | [diff] [blame] | 828 | mutex_unlock(&llist_mutex); |
| 829 | |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 830 | while (queue_empty(&buffering_queue) && m) { |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 831 | if (m->end < m->filesize && !buffer_handle(m->id, 0)) { |
| 832 | m = NULL; |
| 833 | break; |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 834 | } |
Michael Sevakis | 65c6a14 | 2017-04-13 18:53:17 -0400 | [diff] [blame] | 835 | m = HLIST_NEXT(m); |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 836 | } |
| 837 | |
Brandon Low | 11a3661 | 2007-11-03 06:21:32 +0000 | [diff] [blame] | 838 | if (m) { |
| 839 | return true; |
Michael Sevakis | 8f14357 | 2011-02-14 09:18:58 +0000 | [diff] [blame] | 840 | } else { |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 841 | /* only spin the disk down if the filling wasn't interrupted by an |
| 842 | event arriving in the queue. */ |
Frank Gevaerts | 2f8a008 | 2008-11-01 16:14:28 +0000 | [diff] [blame] | 843 | storage_sleep(); |
Brandon Low | 11a3661 | 2007-11-03 06:21:32 +0000 | [diff] [blame] | 844 | return false; |
| 845 | } |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 846 | } |
| 847 | |
Nicolas Pennequin | a384fb6 | 2007-11-11 13:15:36 +0000 | [diff] [blame] | 848 | #ifdef HAVE_ALBUMART |
Nicolas Pennequin | 9d4bed7 | 2007-11-11 12:29:37 +0000 | [diff] [blame] | 849 | /* Given a file descriptor to a bitmap file, write the bitmap data to the |
| 850 | buffer, with a struct bitmap and the actual data immediately following. |
| 851 | Return value is the total size (struct + data). */ |
Michael Sevakis | 8f14357 | 2011-02-14 09:18:58 +0000 | [diff] [blame] | 852 | static int load_image(int fd, const char *path, |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 853 | struct bufopen_bitmap_data *data, |
| 854 | size_t bufidx) |
Nicolas Pennequin | 9d4bed7 | 2007-11-11 12:29:37 +0000 | [diff] [blame] | 855 | { |
| 856 | int rc; |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 857 | struct bitmap *bmp = ringbuf_ptr(bufidx); |
Thomas Martitz | f577a6a | 2011-02-09 20:13:13 +0000 | [diff] [blame] | 858 | struct dim *dim = data->dim; |
| 859 | struct mp3_albumart *aa = data->embedded_albumart; |
Thomas Martitz | e9c1018 | 2009-10-16 19:14:41 +0000 | [diff] [blame] | 860 | |
| 861 | /* get the desired image size */ |
| 862 | bmp->width = dim->width, bmp->height = dim->height; |
Nicolas Pennequin | 9d4bed7 | 2007-11-11 12:29:37 +0000 | [diff] [blame] | 863 | /* FIXME: alignment may be needed for the data buffer. */ |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 864 | bmp->data = ringbuf_ptr(bufidx + sizeof(struct bitmap)); |
| 865 | |
Nicolas Pennequin | a384fb6 | 2007-11-11 13:15:36 +0000 | [diff] [blame] | 866 | #if (LCD_DEPTH > 1) || defined(HAVE_REMOTE_LCD) && (LCD_REMOTE_DEPTH > 1) |
Nicolas Pennequin | 9d4bed7 | 2007-11-11 12:29:37 +0000 | [diff] [blame] | 867 | bmp->maskdata = NULL; |
Nicolas Pennequin | a384fb6 | 2007-11-11 13:15:36 +0000 | [diff] [blame] | 868 | #endif |
Michael Sevakis | eefc7c7 | 2017-04-08 18:11:25 -0400 | [diff] [blame] | 869 | int free = (int)MIN(buffer_len - bytes_used(), buffer_len - bufidx) |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 870 | - sizeof(struct bitmap); |
Andrew Mahone | 781421a | 2008-12-09 23:07:59 +0000 | [diff] [blame] | 871 | |
Andrew Mahone | 20f76d6 | 2009-05-04 15:46:41 +0000 | [diff] [blame] | 872 | #ifdef HAVE_JPEG |
Michael Sevakis | 8f14357 | 2011-02-14 09:18:58 +0000 | [diff] [blame] | 873 | if (aa != NULL) { |
Thomas Martitz | f577a6a | 2011-02-09 20:13:13 +0000 | [diff] [blame] | 874 | lseek(fd, aa->pos, SEEK_SET); |
| 875 | rc = clip_jpeg_fd(fd, aa->size, bmp, free, FORMAT_NATIVE|FORMAT_DITHER| |
| 876 | FORMAT_RESIZE|FORMAT_KEEP_ASPECT, NULL); |
| 877 | } |
| 878 | else if (strcmp(path + strlen(path) - 4, ".bmp")) |
Andrew Mahone | 54e6eb3 | 2009-05-01 23:31:43 +0000 | [diff] [blame] | 879 | rc = read_jpeg_fd(fd, bmp, free, FORMAT_NATIVE|FORMAT_DITHER| |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 880 | FORMAT_RESIZE|FORMAT_KEEP_ASPECT, NULL); |
Andrew Mahone | 54e6eb3 | 2009-05-01 23:31:43 +0000 | [diff] [blame] | 881 | else |
Andrew Mahone | 20f76d6 | 2009-05-04 15:46:41 +0000 | [diff] [blame] | 882 | #endif |
Andrew Mahone | 54e6eb3 | 2009-05-01 23:31:43 +0000 | [diff] [blame] | 883 | rc = read_bmp_fd(fd, bmp, free, FORMAT_NATIVE|FORMAT_DITHER| |
| 884 | FORMAT_RESIZE|FORMAT_KEEP_ASPECT, NULL); |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 885 | |
Nicolas Pennequin | 9d4bed7 | 2007-11-11 12:29:37 +0000 | [diff] [blame] | 886 | return rc + (rc > 0 ? sizeof(struct bitmap) : 0); |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 887 | (void)path; |
Nicolas Pennequin | 9d4bed7 | 2007-11-11 12:29:37 +0000 | [diff] [blame] | 888 | } |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 889 | #endif /* HAVE_ALBUMART */ |
Nicolas Pennequin | 9d4bed7 | 2007-11-11 12:29:37 +0000 | [diff] [blame] | 890 | |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 891 | |
| 892 | /* |
| 893 | MAIN BUFFERING API CALLS |
| 894 | ======================== |
| 895 | |
| 896 | bufopen : Request the opening of a new handle for a file |
| 897 | bufalloc : Open a new handle for data other than a file. |
| 898 | bufclose : Close an open handle |
| 899 | bufseek : Set the read pointer in a handle |
| 900 | bufadvance : Move the read pointer in a handle |
| 901 | bufread : Copy data from a handle into a given buffer |
| 902 | bufgetdata : Give a pointer to the handle's data |
| 903 | |
| 904 | These functions are exported, to allow interaction with the buffer. |
| 905 | They take care of the content of the structs, and rely on the linked list |
| 906 | management functions for all the actual handle management work. |
| 907 | */ |
| 908 | |
| 909 | |
| 910 | /* Reserve space in the buffer for a file. |
| 911 | filename: name of the file to open |
| 912 | offset: offset at which to start buffering the file, useful when the first |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 913 | offset bytes of the file aren't needed. |
Thomas Martitz | e9c1018 | 2009-10-16 19:14:41 +0000 | [diff] [blame] | 914 | type: one of the data types supported (audio, image, cuesheet, others |
| 915 | user_data: user data passed possibly passed in subcalls specific to a |
| 916 | data_type (only used for image (albumart) buffering so far ) |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 917 | return value: <0 if the file cannot be opened, or one file already |
| 918 | queued to be opened, otherwise the handle for the file in the buffer |
| 919 | */ |
Michael Sevakis | dfff938 | 2017-12-17 16:12:10 -0500 | [diff] [blame] | 920 | int bufopen(const char *file, off_t offset, enum data_type type, |
Thomas Martitz | e9c1018 | 2009-10-16 19:14:41 +0000 | [diff] [blame] | 921 | void *user_data) |
Nicolas Pennequin | 4fd2774 | 2008-03-29 14:09:14 +0000 | [diff] [blame] | 922 | { |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 923 | int handle_id = ERR_BUFFER_FULL; |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 924 | size_t data; |
| 925 | struct memory_handle *h; |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 926 | |
| 927 | /* No buffer refs until after the mutex_lock call! */ |
| 928 | |
Michael Sevakis | 8f14357 | 2011-02-14 09:18:58 +0000 | [diff] [blame] | 929 | if (type == TYPE_ID3) { |
Nicolas Pennequin | 4e2de44 | 2008-04-14 16:17:47 +0000 | [diff] [blame] | 930 | /* ID3 case: allocate space, init the handle and return. */ |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 931 | mutex_lock(&llist_mutex); |
Nicolas Pennequin | 4e2de44 | 2008-04-14 16:17:47 +0000 | [diff] [blame] | 932 | |
Michael Sevakis | cd3ea08 | 2017-12-09 09:41:34 -0500 | [diff] [blame] | 933 | h = add_handle(H_ALLOCALL, sizeof(struct mp3entry), file, &data); |
Michael Sevakis | 8f14357 | 2011-02-14 09:18:58 +0000 | [diff] [blame] | 934 | |
| 935 | if (h) { |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 936 | handle_id = h->id; |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 937 | |
| 938 | h->type = type; |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 939 | h->fd = -1; |
| 940 | h->data = data; |
| 941 | h->ridx = data; |
| 942 | h->widx = data; |
| 943 | h->filesize = sizeof(struct mp3entry); |
| 944 | h->start = 0; |
| 945 | h->pos = 0; |
| 946 | h->end = 0; |
Nicolas Pennequin | 4e2de44 | 2008-04-14 16:17:47 +0000 | [diff] [blame] | 947 | |
Michael Sevakis | 65c6a14 | 2017-04-13 18:53:17 -0400 | [diff] [blame] | 948 | link_handle(h); |
Nicolas Pennequin | 4e2de44 | 2008-04-14 16:17:47 +0000 | [diff] [blame] | 949 | |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 950 | /* Inform the buffering thread that we added a handle */ |
| 951 | LOGFQUEUE("buffering > Q_HANDLE_ADDED %d", handle_id); |
| 952 | queue_post(&buffering_queue, Q_HANDLE_ADDED, handle_id); |
| 953 | } |
Nicolas Pennequin | 3625be4 | 2008-12-02 21:07:12 +0000 | [diff] [blame] | 954 | |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 955 | mutex_unlock(&llist_mutex); |
| 956 | return handle_id; |
Nicolas Pennequin | 4e2de44 | 2008-04-14 16:17:47 +0000 | [diff] [blame] | 957 | } |
Michael Sevakis | c537d59 | 2011-04-27 03:08:23 +0000 | [diff] [blame] | 958 | else if (type == TYPE_UNKNOWN) |
| 959 | return ERR_UNSUPPORTED_TYPE; |
Thomas Martitz | 86cab2e | 2011-02-09 20:27:23 +0000 | [diff] [blame] | 960 | #ifdef APPLICATION |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 961 | /* Loading code from memory is not supported in application builds */ |
Thomas Martitz | 86cab2e | 2011-02-09 20:27:23 +0000 | [diff] [blame] | 962 | else if (type == TYPE_CODEC) |
| 963 | return ERR_UNSUPPORTED_TYPE; |
| 964 | #endif |
Nicolas Pennequin | 4e2de44 | 2008-04-14 16:17:47 +0000 | [diff] [blame] | 965 | /* Other cases: there is a little more work. */ |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 966 | int fd = open(file, O_RDONLY); |
| 967 | if (fd < 0) |
Nicolas Pennequin | d400e23 | 2007-10-29 14:15:59 +0000 | [diff] [blame] | 968 | return ERR_FILE_ERROR; |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 969 | |
Thomas Martitz | f577a6a | 2011-02-09 20:13:13 +0000 | [diff] [blame] | 970 | size_t size = 0; |
| 971 | #ifdef HAVE_ALBUMART |
Michael Sevakis | 8f14357 | 2011-02-14 09:18:58 +0000 | [diff] [blame] | 972 | if (type == TYPE_BITMAP) { |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 973 | /* If albumart is embedded, the complete file is not buffered, |
Thomas Martitz | f577a6a | 2011-02-09 20:13:13 +0000 | [diff] [blame] | 974 | * but only the jpeg part; filesize() would be wrong */ |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 975 | struct bufopen_bitmap_data *aa = user_data; |
Thomas Martitz | f577a6a | 2011-02-09 20:13:13 +0000 | [diff] [blame] | 976 | if (aa->embedded_albumart) |
| 977 | size = aa->embedded_albumart->size; |
| 978 | } |
| 979 | #endif |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 980 | |
Thomas Martitz | f577a6a | 2011-02-09 20:13:13 +0000 | [diff] [blame] | 981 | if (size == 0) |
| 982 | size = filesize(fd); |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 983 | |
| 984 | unsigned int hflags = 0; |
| 985 | if (type == TYPE_PACKET_AUDIO || type == TYPE_CODEC) |
| 986 | hflags = H_CANWRAP; |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 987 | |
Nicolas Pennequin | 4e2de44 | 2008-04-14 16:17:47 +0000 | [diff] [blame] | 988 | size_t adjusted_offset = offset; |
Steve Bavin | c9df8fd | 2008-03-28 11:24:24 +0000 | [diff] [blame] | 989 | if (adjusted_offset > size) |
| 990 | adjusted_offset = 0; |
Nicolas Pennequin | 659fe5a | 2008-01-08 23:48:51 +0000 | [diff] [blame] | 991 | |
Torne Wuff | c4e051b | 2010-02-01 17:16:39 +0000 | [diff] [blame] | 992 | /* Reserve extra space because alignment can move data forward */ |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 993 | size_t padded_size = STORAGE_PAD(size - adjusted_offset); |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 994 | |
| 995 | mutex_lock(&llist_mutex); |
| 996 | |
Michael Sevakis | cd3ea08 | 2017-12-09 09:41:34 -0500 | [diff] [blame] | 997 | h = add_handle(hflags, padded_size, file, &data); |
Michael Sevakis | 8f14357 | 2011-02-14 09:18:58 +0000 | [diff] [blame] | 998 | if (!h) { |
Thomas Martitz | df79ac2 | 2010-02-18 15:38:30 +0000 | [diff] [blame] | 999 | DEBUGF("%s(): failed to add handle\n", __func__); |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1000 | mutex_unlock(&llist_mutex); |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1001 | close(fd); |
Nicolas Pennequin | d400e23 | 2007-10-29 14:15:59 +0000 | [diff] [blame] | 1002 | return ERR_BUFFER_FULL; |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1003 | } |
| 1004 | |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1005 | handle_id = h->id; |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1006 | |
| 1007 | h->type = type; |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1008 | h->fd = -1; |
Torne Wuff | c4e051b | 2010-02-01 17:16:39 +0000 | [diff] [blame] | 1009 | |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1010 | #ifdef STORAGE_WANTS_ALIGN |
Torne Wuff | c4e051b | 2010-02-01 17:16:39 +0000 | [diff] [blame] | 1011 | /* Don't bother to storage align bitmaps because they are not |
| 1012 | * loaded directly into the buffer. |
| 1013 | */ |
Michael Sevakis | 8f14357 | 2011-02-14 09:18:58 +0000 | [diff] [blame] | 1014 | if (type != TYPE_BITMAP) { |
Torne Wuff | c4e051b | 2010-02-01 17:16:39 +0000 | [diff] [blame] | 1015 | /* Align to desired storage alignment */ |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1016 | size_t alignment_pad = STORAGE_OVERLAP((uintptr_t)adjusted_offset - |
| 1017 | (uintptr_t)ringbuf_ptr(data)); |
| 1018 | data = ringbuf_add(data, alignment_pad); |
Torne Wuff | c4e051b | 2010-02-01 17:16:39 +0000 | [diff] [blame] | 1019 | } |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1020 | #endif /* STORAGE_WANTS_ALIGN */ |
Torne Wuff | c4e051b | 2010-02-01 17:16:39 +0000 | [diff] [blame] | 1021 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1022 | h->data = data; |
| 1023 | h->ridx = data; |
| 1024 | h->start = adjusted_offset; |
| 1025 | h->pos = adjusted_offset; |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1026 | |
Nicolas Pennequin | 5e0e239 | 2007-11-11 13:28:24 +0000 | [diff] [blame] | 1027 | #ifdef HAVE_ALBUMART |
Michael Sevakis | 8f14357 | 2011-02-14 09:18:58 +0000 | [diff] [blame] | 1028 | if (type == TYPE_BITMAP) { |
Nicolas Pennequin | 9d4bed7 | 2007-11-11 12:29:37 +0000 | [diff] [blame] | 1029 | /* Bitmap file: we load the data instead of the file */ |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1030 | int rc = load_image(fd, file, user_data, data); |
Michael Sevakis | 8f14357 | 2011-02-14 09:18:58 +0000 | [diff] [blame] | 1031 | if (rc <= 0) { |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1032 | handle_id = ERR_FILE_ERROR; |
Michael Sevakis | 8f14357 | 2011-02-14 09:18:58 +0000 | [diff] [blame] | 1033 | } else { |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1034 | data = ringbuf_add(data, rc); |
| 1035 | size = rc; |
| 1036 | adjusted_offset = rc; |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1037 | } |
Nicolas Pennequin | 9d4bed7 | 2007-11-11 12:29:37 +0000 | [diff] [blame] | 1038 | } |
Nicolas Pennequin | 87e5b11 | 2007-11-12 15:16:41 +0000 | [diff] [blame] | 1039 | else |
Nicolas Pennequin | 9d4bed7 | 2007-11-11 12:29:37 +0000 | [diff] [blame] | 1040 | #endif |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1041 | if (type == TYPE_CUESHEET) { |
| 1042 | h->fd = fd; |
| 1043 | } |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1044 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1045 | if (handle_id >= 0) { |
| 1046 | h->widx = data; |
Nicolas Pennequin | 87e5b11 | 2007-11-12 15:16:41 +0000 | [diff] [blame] | 1047 | h->filesize = size; |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1048 | h->end = adjusted_offset; |
Michael Sevakis | 65c6a14 | 2017-04-13 18:53:17 -0400 | [diff] [blame] | 1049 | link_handle(h); |
Nicolas Pennequin | 87e5b11 | 2007-11-12 15:16:41 +0000 | [diff] [blame] | 1050 | } |
Nicolas Pennequin | 9d4bed7 | 2007-11-11 12:29:37 +0000 | [diff] [blame] | 1051 | |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1052 | mutex_unlock(&llist_mutex); |
| 1053 | |
Michael Sevakis | 8f14357 | 2011-02-14 09:18:58 +0000 | [diff] [blame] | 1054 | if (type == TYPE_CUESHEET) { |
Brandon Low | 18c9aba | 2007-10-27 04:49:04 +0000 | [diff] [blame] | 1055 | /* Immediately start buffering those */ |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1056 | LOGFQUEUE("buffering >| Q_BUFFER_HANDLE %d", handle_id); |
| 1057 | queue_send(&buffering_queue, Q_BUFFER_HANDLE, handle_id); |
Michael Sevakis | 8f14357 | 2011-02-14 09:18:58 +0000 | [diff] [blame] | 1058 | } else { |
Brandon Low | 18c9aba | 2007-10-27 04:49:04 +0000 | [diff] [blame] | 1059 | /* Other types will get buffered in the course of normal operations */ |
Brandon Low | 18c9aba | 2007-10-27 04:49:04 +0000 | [diff] [blame] | 1060 | close(fd); |
Nicolas Pennequin | 483c402 | 2008-02-12 23:15:59 +0000 | [diff] [blame] | 1061 | |
Michael Sevakis | 8f14357 | 2011-02-14 09:18:58 +0000 | [diff] [blame] | 1062 | if (handle_id >= 0) { |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1063 | /* Inform the buffering thread that we added a handle */ |
| 1064 | LOGFQUEUE("buffering > Q_HANDLE_ADDED %d", handle_id); |
| 1065 | queue_post(&buffering_queue, Q_HANDLE_ADDED, handle_id); |
| 1066 | } |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1067 | } |
| 1068 | |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1069 | logf("bufopen: new hdl %d", handle_id); |
| 1070 | return handle_id; |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1071 | |
| 1072 | /* Currently only used for aa loading */ |
| 1073 | (void)user_data; |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1074 | } |
| 1075 | |
| 1076 | /* Open a new handle from data that needs to be copied from memory. |
| 1077 | src is the source buffer from which to copy data. It can be NULL to simply |
| 1078 | reserve buffer space. |
| 1079 | size is the requested size. The call will only be successful if the |
| 1080 | requested amount of data can entirely fit in the buffer without wrapping. |
| 1081 | Return value is the handle id for success or <0 for failure. |
| 1082 | */ |
Steve Bavin | 135cc75 | 2008-03-28 12:51:33 +0000 | [diff] [blame] | 1083 | int bufalloc(const void *src, size_t size, enum data_type type) |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1084 | { |
Michael Sevakis | c537d59 | 2011-04-27 03:08:23 +0000 | [diff] [blame] | 1085 | if (type == TYPE_UNKNOWN) |
| 1086 | return ERR_UNSUPPORTED_TYPE; |
| 1087 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1088 | int handle_id = ERR_BUFFER_FULL; |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1089 | |
| 1090 | mutex_lock(&llist_mutex); |
| 1091 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1092 | size_t data; |
Michael Sevakis | cd3ea08 | 2017-12-09 09:41:34 -0500 | [diff] [blame] | 1093 | struct memory_handle *h = add_handle(H_ALLOCALL, size, NULL, &data); |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1094 | |
Michael Sevakis | 8f14357 | 2011-02-14 09:18:58 +0000 | [diff] [blame] | 1095 | if (h) { |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1096 | handle_id = h->id; |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1097 | |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1098 | if (src) { |
| 1099 | if (type == TYPE_ID3 && size == sizeof(struct mp3entry)) { |
| 1100 | /* specially take care of struct mp3entry */ |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1101 | copy_mp3entry(ringbuf_ptr(data), src); |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1102 | } else { |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1103 | memcpy(ringbuf_ptr(data), src, size); |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1104 | } |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1105 | } |
Michael Sevakis | 89b05af | 2013-06-29 22:18:17 -0400 | [diff] [blame] | 1106 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1107 | h->type = type; |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1108 | h->fd = -1; |
| 1109 | h->data = data; |
| 1110 | h->ridx = data; |
| 1111 | h->widx = ringbuf_add(data, size); |
| 1112 | h->filesize = size; |
| 1113 | h->start = 0; |
| 1114 | h->pos = 0; |
| 1115 | h->end = size; |
| 1116 | |
Michael Sevakis | 65c6a14 | 2017-04-13 18:53:17 -0400 | [diff] [blame] | 1117 | link_handle(h); |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1118 | } |
| 1119 | |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1120 | mutex_unlock(&llist_mutex); |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1121 | |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1122 | logf("bufalloc: new hdl %d", handle_id); |
| 1123 | return handle_id; |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1124 | } |
| 1125 | |
| 1126 | /* Close the handle. Return true for success and false for failure */ |
Steve Bavin | 135cc75 | 2008-03-28 12:51:33 +0000 | [diff] [blame] | 1127 | bool bufclose(int handle_id) |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1128 | { |
| 1129 | logf("bufclose(%d)", handle_id); |
Michael Sevakis | 786fbbf | 2017-12-17 18:49:24 -0500 | [diff] [blame] | 1130 | |
Michael Sevakis | c1a01be | 2017-12-08 13:01:25 -0500 | [diff] [blame] | 1131 | if (handle_id <= 0) { |
| 1132 | return true; |
| 1133 | } |
| 1134 | |
Nicolas Pennequin | d08131a | 2007-10-27 01:25:47 +0000 | [diff] [blame] | 1135 | LOGFQUEUE("buffering >| Q_CLOSE_HANDLE %d", handle_id); |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1136 | return queue_send(&buffering_queue, Q_CLOSE_HANDLE, handle_id); |
| 1137 | } |
| 1138 | |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1139 | /* Backend to bufseek and bufadvance. Call only in response to |
| 1140 | Q_REBUFFER_HANDLE! */ |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1141 | static void rebuffer_handle(int handle_id, off_t newpos) |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1142 | { |
| 1143 | struct memory_handle *h = find_handle(handle_id); |
Michael Sevakis | 8f14357 | 2011-02-14 09:18:58 +0000 | [diff] [blame] | 1144 | if (!h) { |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1145 | queue_reply(&buffering_queue, ERR_HANDLE_NOT_FOUND); |
| 1146 | return; |
| 1147 | } |
| 1148 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1149 | /* Check that we still need to do this since the request could have |
| 1150 | possibly been met by this time */ |
| 1151 | if (newpos >= h->start && newpos <= h->end) { |
| 1152 | h->ridx = ringbuf_add(h->data, newpos - h->start); |
| 1153 | h->pos = newpos; |
| 1154 | queue_reply(&buffering_queue, 0); |
| 1155 | return; |
| 1156 | } |
| 1157 | |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1158 | /* When seeking foward off of the buffer, if it is a short seek attempt to |
| 1159 | avoid rebuffering the whole track, just read enough to satisfy */ |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1160 | off_t amount = newpos - h->pos; |
Michael Sevakis | 8f14357 | 2011-02-14 09:18:58 +0000 | [diff] [blame] | 1161 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1162 | if (amount > 0 && amount <= BUFFERING_DEFAULT_FILECHUNK) { |
| 1163 | h->ridx = ringbuf_add(h->data, newpos - h->start); |
| 1164 | h->pos = newpos; |
Michael Sevakis | 8f14357 | 2011-02-14 09:18:58 +0000 | [diff] [blame] | 1165 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1166 | if (buffer_handle(handle_id, amount + 1) && h->end >= h->pos) { |
| 1167 | /* It really did succeed */ |
| 1168 | queue_reply(&buffering_queue, 0); |
| 1169 | buffer_handle(handle_id, 0); /* Ok, try the rest */ |
| 1170 | return; |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1171 | } |
Michael Sevakis | 05e180a | 2011-03-02 06:24:50 +0000 | [diff] [blame] | 1172 | /* Data collision or other file error - must reset */ |
| 1173 | |
| 1174 | if (newpos > h->filesize) |
| 1175 | newpos = h->filesize; /* file truncation happened above */ |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1176 | } |
| 1177 | |
Michael Sevakis | eefc7c7 | 2017-04-08 18:11:25 -0400 | [diff] [blame] | 1178 | mutex_lock(&llist_mutex); |
| 1179 | |
Michael Sevakis | 65c6a14 | 2017-04-13 18:53:17 -0400 | [diff] [blame] | 1180 | size_t next = ringbuf_offset(HLIST_NEXT(h) ?: HLIST_FIRST); |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1181 | |
| 1182 | #ifdef STORAGE_WANTS_ALIGN |
| 1183 | /* Strip alignment padding then redo */ |
Michael Sevakis | d35a18f | 2017-12-19 13:48:14 -0500 | [diff] [blame] | 1184 | size_t new_index = ringbuf_add(ringbuf_offset(h), h->size); |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1185 | |
Michael Sevakis | 8f14357 | 2011-02-14 09:18:58 +0000 | [diff] [blame] | 1186 | /* Align to desired storage alignment if space permits - handle could |
| 1187 | have been shrunken too close to the following one after a previous |
| 1188 | rebuffer. */ |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1189 | size_t alignment_pad = STORAGE_OVERLAP((uintptr_t)newpos - |
| 1190 | (uintptr_t)ringbuf_ptr(new_index)); |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1191 | |
Michael Sevakis | eefc7c7 | 2017-04-08 18:11:25 -0400 | [diff] [blame] | 1192 | if (ringbuf_add_cross_full(new_index, alignment_pad, next) > 0) |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1193 | alignment_pad = 0; /* Forego storage alignment this time */ |
| 1194 | |
| 1195 | new_index = ringbuf_add(new_index, alignment_pad); |
| 1196 | #else |
| 1197 | /* Just clear the data buffer */ |
| 1198 | size_t new_index = h->data; |
| 1199 | #endif /* STORAGE_WANTS_ALIGN */ |
| 1200 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1201 | /* Reset the handle to its new position */ |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1202 | h->ridx = h->widx = h->data = new_index; |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1203 | h->start = h->pos = h->end = newpos; |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1204 | |
| 1205 | if (h->fd >= 0) |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1206 | lseek(h->fd, newpos, SEEK_SET); |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1207 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1208 | off_t filerem = h->filesize - newpos; |
Michael Sevakis | 65c6a14 | 2017-04-13 18:53:17 -0400 | [diff] [blame] | 1209 | bool send = HLIST_NEXT(h) && |
Michael Sevakis | eefc7c7 | 2017-04-08 18:11:25 -0400 | [diff] [blame] | 1210 | ringbuf_add_cross_full(new_index, filerem, next) > 0; |
| 1211 | |
| 1212 | mutex_unlock(&llist_mutex); |
| 1213 | |
| 1214 | if (send) { |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1215 | /* There isn't enough space to rebuffer all of the track from its new |
| 1216 | offset, so we ask the user to free some */ |
| 1217 | DEBUGF("%s(): space is needed\n", __func__); |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1218 | send_event(BUFFER_EVENT_REBUFFER, &(int){ handle_id }); |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1219 | } |
| 1220 | |
| 1221 | /* Now we do the rebuffer */ |
| 1222 | queue_reply(&buffering_queue, 0); |
| 1223 | buffer_handle(handle_id, 0); |
| 1224 | } |
| 1225 | |
| 1226 | /* Backend to bufseek and bufadvance */ |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1227 | static int seek_handle(struct memory_handle *h, off_t newpos) |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1228 | { |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1229 | if ((newpos < h->start || newpos >= h->end) && |
| 1230 | (newpos < h->filesize || h->end < h->filesize)) { |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1231 | /* access before or after buffered data and not to end of file or file |
| 1232 | is not buffered to the end-- a rebuffer is needed. */ |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1233 | return queue_send(&buffering_queue, Q_REBUFFER_HANDLE, |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1234 | (intptr_t)&(struct buf_message_data){ h->id, newpos }); |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1235 | } |
| 1236 | else { |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1237 | h->ridx = ringbuf_add(h->data, newpos - h->start); |
| 1238 | h->pos = newpos; |
| 1239 | return 0; |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1240 | } |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1241 | } |
| 1242 | |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1243 | /* Set reading index in handle (relatively to the start of the file). |
| 1244 | Access before the available data will trigger a rebuffer. |
Michael Sevakis | c537d59 | 2011-04-27 03:08:23 +0000 | [diff] [blame] | 1245 | Return 0 for success and for failure: |
| 1246 | ERR_HANDLE_NOT_FOUND if the handle wasn't found |
| 1247 | ERR_INVALID_VALUE if the new requested position was beyond the end of |
| 1248 | the file |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1249 | */ |
Steve Bavin | 135cc75 | 2008-03-28 12:51:33 +0000 | [diff] [blame] | 1250 | int bufseek(int handle_id, size_t newpos) |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1251 | { |
| 1252 | struct memory_handle *h = find_handle(handle_id); |
| 1253 | if (!h) |
Nicolas Pennequin | d400e23 | 2007-10-29 14:15:59 +0000 | [diff] [blame] | 1254 | return ERR_HANDLE_NOT_FOUND; |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1255 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1256 | if (newpos > (size_t)h->filesize) |
| 1257 | return ERR_INVALID_VALUE; |
| 1258 | |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1259 | return seek_handle(h, newpos); |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1260 | } |
| 1261 | |
| 1262 | /* Advance the reading index in a handle (relatively to its current position). |
Michael Sevakis | c537d59 | 2011-04-27 03:08:23 +0000 | [diff] [blame] | 1263 | Return 0 for success and for failure: |
| 1264 | ERR_HANDLE_NOT_FOUND if the handle wasn't found |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1265 | ERR_INVALID_VALUE if the new requested position was before the beginning |
| 1266 | or beyond the end of the file |
Michael Sevakis | c537d59 | 2011-04-27 03:08:23 +0000 | [diff] [blame] | 1267 | */ |
Steve Bavin | 135cc75 | 2008-03-28 12:51:33 +0000 | [diff] [blame] | 1268 | int bufadvance(int handle_id, off_t offset) |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1269 | { |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1270 | struct memory_handle *h = find_handle(handle_id); |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1271 | if (!h) |
Nicolas Pennequin | d400e23 | 2007-10-29 14:15:59 +0000 | [diff] [blame] | 1272 | return ERR_HANDLE_NOT_FOUND; |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1273 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1274 | off_t pos = h->pos; |
| 1275 | |
| 1276 | if ((offset < 0 && offset < -pos) || |
| 1277 | (offset >= 0 && offset > h->filesize - pos)) |
| 1278 | return ERR_INVALID_VALUE; |
| 1279 | |
| 1280 | return seek_handle(h, pos + offset); |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1281 | } |
| 1282 | |
Michael Sevakis | c537d59 | 2011-04-27 03:08:23 +0000 | [diff] [blame] | 1283 | /* Get the read position from the start of the file |
| 1284 | Returns the offset from byte 0 of the file and for failure: |
| 1285 | ERR_HANDLE_NOT_FOUND if the handle wasn't found |
| 1286 | */ |
| 1287 | off_t bufftell(int handle_id) |
| 1288 | { |
| 1289 | const struct memory_handle *h = find_handle(handle_id); |
| 1290 | if (!h) |
| 1291 | return ERR_HANDLE_NOT_FOUND; |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1292 | |
| 1293 | return h->pos; |
Michael Sevakis | c537d59 | 2011-04-27 03:08:23 +0000 | [diff] [blame] | 1294 | } |
| 1295 | |
Brandon Low | 9821cce | 2007-11-06 16:49:30 +0000 | [diff] [blame] | 1296 | /* Used by bufread and bufgetdata to prepare the buffer and retrieve the |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1297 | * actual amount of data available for reading. It does range checks on |
| 1298 | * size and returns a valid (and explicit) amount of data for reading */ |
Steve Bavin | 135cc75 | 2008-03-28 12:51:33 +0000 | [diff] [blame] | 1299 | static struct memory_handle *prep_bufdata(int handle_id, size_t *size, |
| 1300 | bool guardbuf_limit) |
Brandon Low | 9821cce | 2007-11-06 16:49:30 +0000 | [diff] [blame] | 1301 | { |
Brandon Low | 6e8ee40 | 2007-11-08 15:34:23 +0000 | [diff] [blame] | 1302 | struct memory_handle *h = find_handle(handle_id); |
| 1303 | if (!h) |
| 1304 | return NULL; |
| 1305 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1306 | if (h->pos >= h->filesize) { |
Brandon Low | 9821cce | 2007-11-06 16:49:30 +0000 | [diff] [blame] | 1307 | /* File is finished reading */ |
Nicolas Pennequin | b50473f | 2007-11-08 18:27:19 +0000 | [diff] [blame] | 1308 | *size = 0; |
| 1309 | return h; |
| 1310 | } |
Brandon Low | 9821cce | 2007-11-06 16:49:30 +0000 | [diff] [blame] | 1311 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1312 | off_t realsize = *size; |
| 1313 | off_t filerem = h->filesize - h->pos; |
Brandon Low | 9821cce | 2007-11-06 16:49:30 +0000 | [diff] [blame] | 1314 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1315 | if (realsize <= 0 || realsize > filerem) |
| 1316 | realsize = filerem; /* clip to eof */ |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1317 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1318 | if (guardbuf_limit && realsize > GUARD_BUFSIZE) { |
Brandon Low | 1db4243 | 2007-11-08 15:52:10 +0000 | [diff] [blame] | 1319 | logf("data request > guardbuf"); |
| 1320 | /* If more than the size of the guardbuf is requested and this is a |
| 1321 | * bufgetdata, limit to guard_bufsize over the end of the buffer */ |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1322 | realsize = MIN((size_t)realsize, buffer_len - h->ridx + GUARD_BUFSIZE); |
Nicolas Pennequin | ca4771b | 2007-11-08 16:12:28 +0000 | [diff] [blame] | 1323 | /* this ensures *size <= buffer_len - h->ridx + GUARD_BUFSIZE */ |
Brandon Low | 9821cce | 2007-11-06 16:49:30 +0000 | [diff] [blame] | 1324 | } |
| 1325 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1326 | off_t end = h->end; |
| 1327 | off_t wait_end = h->pos + realsize; |
| 1328 | |
| 1329 | if (end < wait_end && end < h->filesize) { |
Brandon Low | 9821cce | 2007-11-06 16:49:30 +0000 | [diff] [blame] | 1330 | /* Wait for the data to be ready */ |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1331 | unsigned int request = 1; |
| 1332 | |
Brandon Low | 9821cce | 2007-11-06 16:49:30 +0000 | [diff] [blame] | 1333 | do |
| 1334 | { |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1335 | if (--request == 0) { |
| 1336 | request = 100; |
| 1337 | /* Data (still) isn't ready; ping buffering thread */ |
| 1338 | LOGFQUEUE("buffering >| Q_START_FILL %d",handle_id); |
| 1339 | queue_send(&buffering_queue, Q_START_FILL, handle_id); |
| 1340 | } |
| 1341 | |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1342 | sleep(0); |
Brandon Low | 551db40 | 2007-11-08 16:06:24 +0000 | [diff] [blame] | 1343 | /* it is not safe for a non-buffering thread to sleep while |
| 1344 | * holding a handle */ |
Brandon Low | 6e8ee40 | 2007-11-08 15:34:23 +0000 | [diff] [blame] | 1345 | h = find_handle(handle_id); |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1346 | if (!h) |
Brandon Low | 551db40 | 2007-11-08 16:06:24 +0000 | [diff] [blame] | 1347 | return NULL; |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1348 | |
| 1349 | if (h->signaled != 0) |
| 1350 | return NULL; /* Wait must be abandoned */ |
| 1351 | |
| 1352 | end = h->end; |
Brandon Low | 9821cce | 2007-11-06 16:49:30 +0000 | [diff] [blame] | 1353 | } |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1354 | while (end < wait_end && end < h->filesize); |
| 1355 | |
| 1356 | filerem = h->filesize - h->pos; |
| 1357 | if (realsize > filerem) |
| 1358 | realsize = filerem; |
Brandon Low | 9821cce | 2007-11-06 16:49:30 +0000 | [diff] [blame] | 1359 | } |
| 1360 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1361 | *size = realsize; |
Brandon Low | 6e8ee40 | 2007-11-08 15:34:23 +0000 | [diff] [blame] | 1362 | return h; |
Brandon Low | 9821cce | 2007-11-06 16:49:30 +0000 | [diff] [blame] | 1363 | } |
| 1364 | |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1365 | |
| 1366 | /* Note: It is safe for the thread responsible for handling the rebuffer |
| 1367 | * cleanup request to call bufread or bufgetdata only when the data will |
| 1368 | * be available-- not if it could be blocked waiting for it in prep_bufdata. |
| 1369 | * It should be apparent that if said thread is being forced to wait for |
| 1370 | * buffering but has not yet responded to the cleanup request, the space |
| 1371 | * can never be cleared to allow further reading of the file because it is |
| 1372 | * not listening to callbacks any longer. */ |
| 1373 | |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1374 | /* Copy data from the given handle to the dest buffer. |
Nicolas Pennequin | b838a62 | 2007-11-02 19:13:03 +0000 | [diff] [blame] | 1375 | Return the number of bytes copied or < 0 for failure (handle not found). |
| 1376 | The caller is blocked until the requested amount of data is available. |
| 1377 | */ |
Steve Bavin | 135cc75 | 2008-03-28 12:51:33 +0000 | [diff] [blame] | 1378 | ssize_t bufread(int handle_id, size_t size, void *dest) |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1379 | { |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1380 | const struct memory_handle *h = |
| 1381 | prep_bufdata(handle_id, &size, false); |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1382 | if (!h) |
Nicolas Pennequin | d400e23 | 2007-10-29 14:15:59 +0000 | [diff] [blame] | 1383 | return ERR_HANDLE_NOT_FOUND; |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1384 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1385 | if (h->ridx + size > buffer_len) { |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1386 | /* the data wraps around the end of the buffer */ |
| 1387 | size_t read = buffer_len - h->ridx; |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1388 | memcpy(dest, ringbuf_ptr(h->ridx), read); |
| 1389 | memcpy(dest + read, ringbuf_ptr(0), size - read); |
Michael Sevakis | 8f14357 | 2011-02-14 09:18:58 +0000 | [diff] [blame] | 1390 | } else { |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1391 | memcpy(dest, ringbuf_ptr(h->ridx), size); |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1392 | } |
| 1393 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1394 | return size; |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1395 | } |
| 1396 | |
| 1397 | /* Update the "data" pointer to make the handle's data available to the caller. |
Nicolas Pennequin | b838a62 | 2007-11-02 19:13:03 +0000 | [diff] [blame] | 1398 | Return the length of the available linear data or < 0 for failure (handle |
| 1399 | not found). |
| 1400 | The caller is blocked until the requested amount of data is available. |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1401 | size is the amount of linear data requested. it can be 0 to get as |
| 1402 | much as possible. |
Nicolas Pennequin | b838a62 | 2007-11-02 19:13:03 +0000 | [diff] [blame] | 1403 | The guard buffer may be used to provide the requested size. This means it's |
| 1404 | unsafe to request more than the size of the guard buffer. |
| 1405 | */ |
Steve Bavin | 135cc75 | 2008-03-28 12:51:33 +0000 | [diff] [blame] | 1406 | ssize_t bufgetdata(int handle_id, size_t size, void **data) |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1407 | { |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1408 | struct memory_handle *h = |
| 1409 | prep_bufdata(handle_id, &size, true); |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1410 | if (!h) |
Nicolas Pennequin | d400e23 | 2007-10-29 14:15:59 +0000 | [diff] [blame] | 1411 | return ERR_HANDLE_NOT_FOUND; |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1412 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1413 | if (h->ridx + size > buffer_len) { |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1414 | /* the data wraps around the end of the buffer : |
| 1415 | use the guard buffer to provide the requested amount of data. */ |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1416 | size_t copy_n = h->ridx + size - buffer_len; |
Michael Sevakis | 8f14357 | 2011-02-14 09:18:58 +0000 | [diff] [blame] | 1417 | /* prep_bufdata ensures |
| 1418 | adjusted_size <= buffer_len - h->ridx + GUARD_BUFSIZE, |
Nicolas Pennequin | ca4771b | 2007-11-08 16:12:28 +0000 | [diff] [blame] | 1419 | so copy_n <= GUARD_BUFSIZE */ |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1420 | memcpy(guard_buffer, ringbuf_ptr(0), copy_n); |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1421 | } |
| 1422 | |
Nicolas Pennequin | ecec940 | 2007-12-16 01:38:56 +0000 | [diff] [blame] | 1423 | if (data) |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1424 | *data = ringbuf_ptr(h->ridx); |
Nicolas Pennequin | ecec940 | 2007-12-16 01:38:56 +0000 | [diff] [blame] | 1425 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1426 | return size; |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1427 | } |
| 1428 | |
Steve Bavin | 135cc75 | 2008-03-28 12:51:33 +0000 | [diff] [blame] | 1429 | ssize_t bufgettail(int handle_id, size_t size, void **data) |
Brandon Low | 3386dd7 | 2007-11-28 04:58:16 +0000 | [diff] [blame] | 1430 | { |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1431 | if (thread_self() != buffering_thread_id) |
| 1432 | return ERR_WRONG_THREAD; /* only from buffering thread */ |
Brandon Low | 3386dd7 | 2007-11-28 04:58:16 +0000 | [diff] [blame] | 1433 | |
| 1434 | /* We don't support tail requests of > guardbuf_size, for simplicity */ |
| 1435 | if (size > GUARD_BUFSIZE) |
| 1436 | return ERR_INVALID_VALUE; |
| 1437 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1438 | const struct memory_handle *h = find_handle(handle_id); |
| 1439 | if (!h) |
| 1440 | return ERR_HANDLE_NOT_FOUND; |
Brandon Low | 3386dd7 | 2007-11-28 04:58:16 +0000 | [diff] [blame] | 1441 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1442 | if (h->end >= h->filesize) { |
Michael Sevakis | eefc7c7 | 2017-04-08 18:11:25 -0400 | [diff] [blame] | 1443 | size_t tidx = ringbuf_sub_empty(h->widx, size); |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1444 | |
| 1445 | if (tidx + size > buffer_len) { |
| 1446 | size_t copy_n = tidx + size - buffer_len; |
| 1447 | memcpy(guard_buffer, ringbuf_ptr(0), copy_n); |
| 1448 | } |
| 1449 | |
| 1450 | *data = ringbuf_ptr(tidx); |
| 1451 | } |
| 1452 | else { |
| 1453 | size = ERR_HANDLE_NOT_DONE; |
Brandon Low | 3386dd7 | 2007-11-28 04:58:16 +0000 | [diff] [blame] | 1454 | } |
| 1455 | |
Brandon Low | 3386dd7 | 2007-11-28 04:58:16 +0000 | [diff] [blame] | 1456 | return size; |
| 1457 | } |
| 1458 | |
Steve Bavin | 135cc75 | 2008-03-28 12:51:33 +0000 | [diff] [blame] | 1459 | ssize_t bufcuttail(int handle_id, size_t size) |
Brandon Low | 3386dd7 | 2007-11-28 04:58:16 +0000 | [diff] [blame] | 1460 | { |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1461 | if (thread_self() != buffering_thread_id) |
| 1462 | return ERR_WRONG_THREAD; /* only from buffering thread */ |
Nicolas Pennequin | ecec940 | 2007-12-16 01:38:56 +0000 | [diff] [blame] | 1463 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1464 | struct memory_handle *h = find_handle(handle_id); |
Brandon Low | 3386dd7 | 2007-11-28 04:58:16 +0000 | [diff] [blame] | 1465 | if (!h) |
| 1466 | return ERR_HANDLE_NOT_FOUND; |
| 1467 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1468 | if (h->end >= h->filesize) { |
| 1469 | /* Cannot trim to before read position */ |
| 1470 | size_t available = h->end - MAX(h->start, h->pos); |
| 1471 | if (available < size) |
| 1472 | size = available; |
Brandon Low | 3386dd7 | 2007-11-28 04:58:16 +0000 | [diff] [blame] | 1473 | |
Michael Sevakis | eefc7c7 | 2017-04-08 18:11:25 -0400 | [diff] [blame] | 1474 | h->widx = ringbuf_sub_empty(h->widx, size); |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1475 | h->filesize -= size; |
| 1476 | h->end -= size; |
| 1477 | } else { |
| 1478 | size = ERR_HANDLE_NOT_DONE; |
| 1479 | } |
Brandon Low | 3386dd7 | 2007-11-28 04:58:16 +0000 | [diff] [blame] | 1480 | |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1481 | return size; |
Brandon Low | 3386dd7 | 2007-11-28 04:58:16 +0000 | [diff] [blame] | 1482 | } |
| 1483 | |
| 1484 | |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1485 | /* |
| 1486 | SECONDARY EXPORTED FUNCTIONS |
| 1487 | ============================ |
| 1488 | |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1489 | buf_handle_offset |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1490 | buf_set_base_handle |
Michael Sevakis | c537d59 | 2011-04-27 03:08:23 +0000 | [diff] [blame] | 1491 | buf_handle_data_type |
| 1492 | buf_is_handle |
| 1493 | buf_pin_handle |
| 1494 | buf_signal_handle |
| 1495 | buf_length |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1496 | buf_used |
Michael Sevakis | c537d59 | 2011-04-27 03:08:23 +0000 | [diff] [blame] | 1497 | buf_set_watermark |
| 1498 | buf_get_watermark |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1499 | |
| 1500 | These functions are exported, to allow interaction with the buffer. |
| 1501 | They take care of the content of the structs, and rely on the linked list |
| 1502 | management functions for all the actual handle management work. |
| 1503 | */ |
Michael Sevakis | dfff938 | 2017-12-17 16:12:10 -0500 | [diff] [blame] | 1504 | bool buf_is_handle(int handle_id) |
| 1505 | { |
| 1506 | return find_handle(handle_id) != NULL; |
| 1507 | } |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1508 | |
Michael Sevakis | dfff938 | 2017-12-17 16:12:10 -0500 | [diff] [blame] | 1509 | int buf_handle_data_type(int handle_id) |
| 1510 | { |
| 1511 | const struct memory_handle *h = find_handle(handle_id); |
| 1512 | if (!h) |
| 1513 | return ERR_HANDLE_NOT_FOUND; |
| 1514 | return h->type; |
| 1515 | } |
| 1516 | |
| 1517 | off_t buf_filesize(int handle_id) |
| 1518 | { |
| 1519 | const struct memory_handle *h = find_handle(handle_id); |
| 1520 | if (!h) |
| 1521 | return ERR_HANDLE_NOT_FOUND; |
| 1522 | return h->filesize; |
| 1523 | } |
| 1524 | |
| 1525 | off_t buf_handle_offset(int handle_id) |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1526 | { |
Brandon Low | 404c6fb | 2007-10-27 01:37:33 +0000 | [diff] [blame] | 1527 | const struct memory_handle *h = find_handle(handle_id); |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1528 | if (!h) |
Nicolas Pennequin | d400e23 | 2007-10-29 14:15:59 +0000 | [diff] [blame] | 1529 | return ERR_HANDLE_NOT_FOUND; |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1530 | return h->start; |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1531 | } |
| 1532 | |
Michael Sevakis | dfff938 | 2017-12-17 16:12:10 -0500 | [diff] [blame] | 1533 | off_t buf_handle_remaining(int handle_id) |
Michael Sevakis | c537d59 | 2011-04-27 03:08:23 +0000 | [diff] [blame] | 1534 | { |
| 1535 | const struct memory_handle *h = find_handle(handle_id); |
| 1536 | if (!h) |
| 1537 | return ERR_HANDLE_NOT_FOUND; |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1538 | return h->filesize - h->end; |
Michael Sevakis | c537d59 | 2011-04-27 03:08:23 +0000 | [diff] [blame] | 1539 | } |
| 1540 | |
Michael Sevakis | c537d59 | 2011-04-27 03:08:23 +0000 | [diff] [blame] | 1541 | bool buf_pin_handle(int handle_id, bool pin) |
| 1542 | { |
| 1543 | struct memory_handle *h = find_handle(handle_id); |
| 1544 | if (!h) |
| 1545 | return false; |
| 1546 | |
| 1547 | if (pin) { |
| 1548 | h->pinned++; |
| 1549 | } else if (h->pinned > 0) { |
| 1550 | h->pinned--; |
| 1551 | } |
| 1552 | |
Michael Sevakis | 89b05af | 2013-06-29 22:18:17 -0400 | [diff] [blame] | 1553 | return true; |
Michael Sevakis | c537d59 | 2011-04-27 03:08:23 +0000 | [diff] [blame] | 1554 | } |
| 1555 | |
| 1556 | bool buf_signal_handle(int handle_id, bool signal) |
| 1557 | { |
| 1558 | struct memory_handle *h = find_handle(handle_id); |
| 1559 | if (!h) |
| 1560 | return false; |
| 1561 | |
| 1562 | h->signaled = signal ? 1 : 0; |
Michael Sevakis | 89b05af | 2013-06-29 22:18:17 -0400 | [diff] [blame] | 1563 | return true; |
Michael Sevakis | c537d59 | 2011-04-27 03:08:23 +0000 | [diff] [blame] | 1564 | } |
| 1565 | |
| 1566 | /* Return the size of the ringbuffer */ |
| 1567 | size_t buf_length(void) |
| 1568 | { |
| 1569 | return buffer_len; |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1570 | } |
| 1571 | |
Michael Sevakis | dfff938 | 2017-12-17 16:12:10 -0500 | [diff] [blame] | 1572 | /* Set the handle from which useful data is counted */ |
| 1573 | void buf_set_base_handle(int handle_id) |
| 1574 | { |
| 1575 | mutex_lock(&llist_mutex); |
| 1576 | base_handle_id = handle_id; |
| 1577 | mutex_unlock(&llist_mutex); |
| 1578 | } |
| 1579 | |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1580 | /* Return the amount of buffer space used */ |
| 1581 | size_t buf_used(void) |
| 1582 | { |
Michael Sevakis | eefc7c7 | 2017-04-08 18:11:25 -0400 | [diff] [blame] | 1583 | mutex_lock(&llist_mutex); |
| 1584 | size_t used = bytes_used(); |
| 1585 | mutex_unlock(&llist_mutex); |
| 1586 | return used; |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1587 | } |
| 1588 | |
Steve Bavin | 135cc75 | 2008-03-28 12:51:33 +0000 | [diff] [blame] | 1589 | void buf_set_watermark(size_t bytes) |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1590 | { |
Björn Stenberg | 6427d12 | 2009-01-10 21:10:56 +0000 | [diff] [blame] | 1591 | conf_watermark = bytes; |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1592 | } |
| 1593 | |
Michael Sevakis | c537d59 | 2011-04-27 03:08:23 +0000 | [diff] [blame] | 1594 | size_t buf_get_watermark(void) |
| 1595 | { |
| 1596 | return BUF_WATERMARK; |
| 1597 | } |
| 1598 | |
Michael Sevakis | c537d59 | 2011-04-27 03:08:23 +0000 | [diff] [blame] | 1599 | /** -- buffer thread helpers -- **/ |
Nicolas Pennequin | 4fd2774 | 2008-03-29 14:09:14 +0000 | [diff] [blame] | 1600 | static void shrink_buffer(void) |
Steve Bavin | 73f9863 | 2008-03-26 08:57:25 +0000 | [diff] [blame] | 1601 | { |
Brandon Low | 555a764 | 2007-11-05 15:24:21 +0000 | [diff] [blame] | 1602 | logf("shrink_buffer()"); |
Michael Sevakis | 8be4074 | 2017-12-09 21:57:01 -0500 | [diff] [blame] | 1603 | |
Michael Sevakis | eefc7c7 | 2017-04-08 18:11:25 -0400 | [diff] [blame] | 1604 | mutex_lock(&llist_mutex); |
Michael Sevakis | 8be4074 | 2017-12-09 21:57:01 -0500 | [diff] [blame] | 1605 | |
| 1606 | for (struct memory_handle *h = HLIST_LAST; h; h = HLIST_PREV(h)) { |
Michael Sevakis | 6ee3b6f | 2017-12-09 23:26:05 -0500 | [diff] [blame] | 1607 | h = shrink_handle(h); |
Michael Sevakis | 8be4074 | 2017-12-09 21:57:01 -0500 | [diff] [blame] | 1608 | } |
| 1609 | |
Michael Sevakis | eefc7c7 | 2017-04-08 18:11:25 -0400 | [diff] [blame] | 1610 | mutex_unlock(&llist_mutex); |
Brandon Low | 555a764 | 2007-11-05 15:24:21 +0000 | [diff] [blame] | 1611 | } |
| 1612 | |
Michael Sevakis | c537d59 | 2011-04-27 03:08:23 +0000 | [diff] [blame] | 1613 | static void NORETURN_ATTR buffering_thread(void) |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1614 | { |
Brandon Low | 11a3661 | 2007-11-03 06:21:32 +0000 | [diff] [blame] | 1615 | bool filling = false; |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1616 | struct queue_event ev; |
| 1617 | |
| 1618 | while (true) |
| 1619 | { |
Michael Sevakis | c537d59 | 2011-04-27 03:08:23 +0000 | [diff] [blame] | 1620 | if (num_handles > 0) { |
| 1621 | if (!filling) { |
| 1622 | cancel_cpu_boost(); |
| 1623 | } |
| 1624 | queue_wait_w_tmo(&buffering_queue, &ev, filling ? 1 : HZ/2); |
| 1625 | } else { |
| 1626 | filling = false; |
Nicolas Pennequin | 732df38 | 2008-03-29 17:28:30 +0000 | [diff] [blame] | 1627 | cancel_cpu_boost(); |
Michael Sevakis | c537d59 | 2011-04-27 03:08:23 +0000 | [diff] [blame] | 1628 | queue_wait(&buffering_queue, &ev); |
Nicolas Pennequin | 12b6c84 | 2008-03-29 17:40:04 +0000 | [diff] [blame] | 1629 | } |
Nicolas Pennequin | 732df38 | 2008-03-29 17:28:30 +0000 | [diff] [blame] | 1630 | |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1631 | switch (ev.id) |
| 1632 | { |
Brandon Low | 86830b6 | 2007-11-05 17:51:55 +0000 | [diff] [blame] | 1633 | case Q_START_FILL: |
Nicolas Pennequin | cf36957 | 2008-07-18 23:42:47 +0000 | [diff] [blame] | 1634 | LOGFQUEUE("buffering < Q_START_FILL %d", (int)ev.data); |
Brandon Low | 555a764 | 2007-11-05 15:24:21 +0000 | [diff] [blame] | 1635 | shrink_buffer(); |
Brandon Low | 47eb569 | 2007-11-05 03:11:58 +0000 | [diff] [blame] | 1636 | queue_reply(&buffering_queue, 1); |
Michael Sevakis | 5a8f5b8 | 2011-05-09 21:19:11 +0000 | [diff] [blame] | 1637 | if (buffer_handle((int)ev.data, 0)) { |
| 1638 | filling = true; |
| 1639 | } |
| 1640 | else if (num_handles > 0 && conf_watermark > 0) { |
| 1641 | update_data_counters(NULL); |
| 1642 | if (data_counters.useful >= BUF_WATERMARK) { |
| 1643 | send_event(BUFFER_EVENT_BUFFER_LOW, NULL); |
| 1644 | } |
| 1645 | } |
Brandon Low | 47eb569 | 2007-11-05 03:11:58 +0000 | [diff] [blame] | 1646 | break; |
Brandon Low | 555a764 | 2007-11-05 15:24:21 +0000 | [diff] [blame] | 1647 | |
Brandon Low | 47eb569 | 2007-11-05 03:11:58 +0000 | [diff] [blame] | 1648 | case Q_BUFFER_HANDLE: |
Nicolas Pennequin | cf36957 | 2008-07-18 23:42:47 +0000 | [diff] [blame] | 1649 | LOGFQUEUE("buffering < Q_BUFFER_HANDLE %d", (int)ev.data); |
Brandon Low | 47eb569 | 2007-11-05 03:11:58 +0000 | [diff] [blame] | 1650 | queue_reply(&buffering_queue, 1); |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1651 | buffer_handle((int)ev.data, 0); |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1652 | break; |
| 1653 | |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1654 | case Q_REBUFFER_HANDLE: |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1655 | { |
| 1656 | struct buf_message_data *parm = |
| 1657 | (struct buf_message_data *)ev.data; |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1658 | LOGFQUEUE("buffering < Q_REBUFFER_HANDLE %d %ld", |
| 1659 | parm->handle_id, parm->data); |
| 1660 | rebuffer_handle(parm->handle_id, parm->data); |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1661 | break; |
Michael Sevakis | 3661581 | 2013-08-26 16:49:53 -0400 | [diff] [blame] | 1662 | } |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1663 | |
| 1664 | case Q_CLOSE_HANDLE: |
Nicolas Pennequin | cf36957 | 2008-07-18 23:42:47 +0000 | [diff] [blame] | 1665 | LOGFQUEUE("buffering < Q_CLOSE_HANDLE %d", (int)ev.data); |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1666 | queue_reply(&buffering_queue, close_handle((int)ev.data)); |
| 1667 | break; |
| 1668 | |
Nicolas Pennequin | 483c402 | 2008-02-12 23:15:59 +0000 | [diff] [blame] | 1669 | case Q_HANDLE_ADDED: |
| 1670 | LOGFQUEUE("buffering < Q_HANDLE_ADDED %d", (int)ev.data); |
| 1671 | /* A handle was added: the disk is spinning, so we can fill */ |
| 1672 | filling = true; |
| 1673 | break; |
| 1674 | |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1675 | case SYS_TIMEOUT: |
| 1676 | LOGFQUEUE_SYS_TIMEOUT("buffering < SYS_TIMEOUT"); |
| 1677 | break; |
| 1678 | } |
| 1679 | |
Michael Sevakis | c537d59 | 2011-04-27 03:08:23 +0000 | [diff] [blame] | 1680 | if (num_handles == 0 || !queue_empty(&buffering_queue)) |
| 1681 | continue; |
| 1682 | |
Michael Sevakis | 6938255 | 2011-02-14 08:36:29 +0000 | [diff] [blame] | 1683 | update_data_counters(NULL); |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1684 | |
Michael Sevakis | c537d59 | 2011-04-27 03:08:23 +0000 | [diff] [blame] | 1685 | if (filling) { |
Michael Sevakis | 5a8f5b8 | 2011-05-09 21:19:11 +0000 | [diff] [blame] | 1686 | filling = data_counters.remaining > 0 ? fill_buffer() : false; |
Michael Sevakis | c537d59 | 2011-04-27 03:08:23 +0000 | [diff] [blame] | 1687 | } else if (ev.id == SYS_TIMEOUT) { |
| 1688 | if (data_counters.useful < BUF_WATERMARK) { |
| 1689 | /* The buffer is low and we're idle, just watching the levels |
| 1690 | - call the callbacks to get new data */ |
| 1691 | send_event(BUFFER_EVENT_BUFFER_LOW, NULL); |
| 1692 | |
| 1693 | /* Continue anything else we haven't finished - it might |
| 1694 | get booted off or stop early because the receiver hasn't |
| 1695 | had a chance to clear anything yet */ |
| 1696 | if (data_counters.remaining > 0) { |
Brandon Low | 555a764 | 2007-11-05 15:24:21 +0000 | [diff] [blame] | 1697 | shrink_buffer(); |
Brandon Low | 60d4e7c | 2007-11-03 17:55:45 +0000 | [diff] [blame] | 1698 | filling = fill_buffer(); |
Brandon Low | 47eb569 | 2007-11-05 03:11:58 +0000 | [diff] [blame] | 1699 | } |
Brandon Low | 60d4e7c | 2007-11-03 17:55:45 +0000 | [diff] [blame] | 1700 | } |
Brandon Low | 11a3661 | 2007-11-03 06:21:32 +0000 | [diff] [blame] | 1701 | } |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1702 | } |
| 1703 | } |
| 1704 | |
Michael Sevakis | 9b990bd | 2013-06-29 22:19:59 -0400 | [diff] [blame] | 1705 | void INIT_ATTR buffering_init(void) |
Steve Bavin | 73f9863 | 2008-03-26 08:57:25 +0000 | [diff] [blame] | 1706 | { |
Brandon Low | 14b6f43 | 2007-10-27 04:16:41 +0000 | [diff] [blame] | 1707 | mutex_init(&llist_mutex); |
| 1708 | |
Michael Sevakis | c537d59 | 2011-04-27 03:08:23 +0000 | [diff] [blame] | 1709 | /* Thread should absolutely not respond to USB because if it waits first, |
| 1710 | then it cannot properly service the handles and leaks will happen - |
| 1711 | this is a worker thread and shouldn't need to care about any system |
| 1712 | notifications. |
| 1713 | *** |
| 1714 | Whoever is using buffering should be responsible enough to clear all |
| 1715 | the handles at the right time. */ |
| 1716 | queue_init(&buffering_queue, false); |
Michael Sevakis | 8cfbd36 | 2008-12-10 08:57:10 +0000 | [diff] [blame] | 1717 | buffering_thread_id = create_thread( buffering_thread, buffering_stack, |
Brandon Low | 7104ad5 | 2007-10-27 04:29:46 +0000 | [diff] [blame] | 1718 | sizeof(buffering_stack), CREATE_THREAD_FROZEN, |
Michael Sevakis | 8a6fd3f | 2008-03-29 23:21:19 +0000 | [diff] [blame] | 1719 | buffering_thread_name IF_PRIO(, PRIORITY_BUFFERING) |
Brandon Low | 14b6f43 | 2007-10-27 04:16:41 +0000 | [diff] [blame] | 1720 | IF_COP(, CPU)); |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1721 | |
| 1722 | queue_enable_queue_send(&buffering_queue, &buffering_queue_sender_list, |
Michael Sevakis | 8cfbd36 | 2008-12-10 08:57:10 +0000 | [diff] [blame] | 1723 | buffering_thread_id); |
Brandon Low | 14b6f43 | 2007-10-27 04:16:41 +0000 | [diff] [blame] | 1724 | } |
| 1725 | |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1726 | /* Initialise the buffering subsystem */ |
Steve Bavin | 135cc75 | 2008-03-28 12:51:33 +0000 | [diff] [blame] | 1727 | bool buffering_reset(char *buf, size_t buflen) |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1728 | { |
Michael Sevakis | 0d902c8 | 2011-02-09 09:30:09 +0000 | [diff] [blame] | 1729 | /* Wraps of storage-aligned data must also be storage aligned, |
| 1730 | thus buf and buflen must be a aligned to an integer multiple of |
| 1731 | the storage alignment */ |
Michael Sevakis | c537d59 | 2011-04-27 03:08:23 +0000 | [diff] [blame] | 1732 | |
Michael Sevakis | 0ebfb93 | 2012-05-21 02:18:46 -0400 | [diff] [blame] | 1733 | if (buf) { |
| 1734 | buflen -= MIN(buflen, GUARD_BUFSIZE); |
Michael Sevakis | 89b05af | 2013-06-29 22:18:17 -0400 | [diff] [blame] | 1735 | |
Michael Sevakis | 0ebfb93 | 2012-05-21 02:18:46 -0400 | [diff] [blame] | 1736 | STORAGE_ALIGN_BUFFER(buf, buflen); |
Michael Sevakis | c537d59 | 2011-04-27 03:08:23 +0000 | [diff] [blame] | 1737 | |
Michael Sevakis | 0ebfb93 | 2012-05-21 02:18:46 -0400 | [diff] [blame] | 1738 | if (!buf || !buflen) |
| 1739 | return false; |
| 1740 | } else { |
| 1741 | buflen = 0; |
| 1742 | } |
Michael Sevakis | 0d902c8 | 2011-02-09 09:30:09 +0000 | [diff] [blame] | 1743 | |
Michael Sevakis | 0ebfb93 | 2012-05-21 02:18:46 -0400 | [diff] [blame] | 1744 | send_event(BUFFER_EVENT_BUFFER_RESET, NULL); |
| 1745 | |
| 1746 | /* If handles weren't closed above, just do it */ |
Michael Sevakis | 65c6a14 | 2017-04-13 18:53:17 -0400 | [diff] [blame] | 1747 | struct memory_handle *h; |
| 1748 | while ((h = HLIST_FIRST)) { |
| 1749 | bufclose(h->id); |
| 1750 | } |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1751 | |
| 1752 | buffer = buf; |
Rafaël Carré | 2494afc | 2010-06-23 04:34:18 +0000 | [diff] [blame] | 1753 | buffer_len = buflen; |
Nicolas Pennequin | 3e3c43c | 2007-10-25 21:27:45 +0000 | [diff] [blame] | 1754 | guard_buffer = buf + buflen; |
| 1755 | |
Michael Sevakis | 65c6a14 | 2017-04-13 18:53:17 -0400 | [diff] [blame] | 1756 | |