Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 1 | /*************************************************************************** |
| 2 | * __________ __ ___. |
| 3 | * Open \______ \ ____ ____ | | _\_ |__ _______ ___ |
| 4 | * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / |
| 5 | * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < |
| 6 | * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ |
| 7 | * \/ \/ \/ \/ \/ |
| 8 | * $Id$ |
| 9 | * |
Linus Nielsen Feltzing | fc72c53 | 2006-02-03 15:19:58 +0000 | [diff] [blame] | 10 | * Copyright (C) 2002 by Felix Arends |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 11 | * |
| 12 | * All files in this archive are subject to the GNU General Public License. |
| 13 | * See the file COPYING in the source tree root for full license agreement. |
| 14 | * |
| 15 | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY |
| 16 | * KIND, either express or implied. |
| 17 | * |
| 18 | ****************************************************************************/ |
| 19 | |
Linus Nielsen Feltzing | fc72c53 | 2006-02-03 15:19:58 +0000 | [diff] [blame] | 20 | #include <stdlib.h> |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 21 | #include "memory.h" |
Linus Nielsen Feltzing | fc72c53 | 2006-02-03 15:19:58 +0000 | [diff] [blame] | 22 | #include "uisdl.h" |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 23 | #include "kernel.h" |
Linus Nielsen Feltzing | fc72c53 | 2006-02-03 15:19:58 +0000 | [diff] [blame] | 24 | #include "thread-sdl.h" |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 25 | #include "thread.h" |
| 26 | #include "debug.h" |
| 27 | |
| 28 | static void (*tick_funcs[MAX_NUM_TICK_TASKS])(void); |
| 29 | |
| 30 | int set_irq_level (int level) |
| 31 | { |
| 32 | static int _lv = 0; |
| 33 | return (_lv = level); |
| 34 | } |
| 35 | |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 36 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
| 37 | /* Moves waiting thread's descriptor to the current sender when a |
| 38 | message is dequeued */ |
| 39 | static void queue_fetch_sender(struct queue_sender_list *send, |
| 40 | unsigned int i) |
| 41 | { |
| 42 | int old_level = set_irq_level(15<<4); |
Michael Sevakis | 55392f7 | 2007-03-21 23:33:49 +0000 | [diff] [blame] | 43 | struct thread_entry **spp = &send->senders[i]; |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 44 | |
| 45 | if(*spp) |
| 46 | { |
| 47 | send->curr_sender = *spp; |
| 48 | *spp = NULL; |
| 49 | } |
| 50 | |
| 51 | set_irq_level(old_level); |
| 52 | } |
| 53 | |
| 54 | /* Puts the specified return value in the waiting thread's return value |
| 55 | and wakes the thread - a sender should be confirmed to exist first */ |
Michael Sevakis | 55392f7 | 2007-03-21 23:33:49 +0000 | [diff] [blame] | 56 | static void queue_release_sender(struct thread_entry **sender, |
Michael Sevakis | 4b90267 | 2006-12-19 16:50:07 +0000 | [diff] [blame] | 57 | intptr_t retval) |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 58 | { |
| 59 | (*sender)->retval = retval; |
| 60 | *sender = NULL; |
| 61 | } |
| 62 | |
| 63 | /* Releases any waiting threads that are queued with queue_send - |
| 64 | reply with NULL */ |
| 65 | static void queue_release_all_senders(struct event_queue *q) |
| 66 | { |
| 67 | if(q->send) |
| 68 | { |
| 69 | unsigned int i; |
| 70 | for(i = q->read; i != q->write; i++) |
| 71 | { |
Michael Sevakis | 55392f7 | 2007-03-21 23:33:49 +0000 | [diff] [blame] | 72 | struct thread_entry **spp = |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 73 | &q->send->senders[i & QUEUE_LENGTH_MASK]; |
| 74 | if(*spp) |
| 75 | { |
Michael Sevakis | 4b90267 | 2006-12-19 16:50:07 +0000 | [diff] [blame] | 76 | queue_release_sender(spp, 0); |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 77 | } |
| 78 | } |
| 79 | } |
| 80 | } |
| 81 | |
| 82 | /* Enables queue_send on the specified queue - caller allocates the extra |
| 83 | data structure */ |
| 84 | void queue_enable_queue_send(struct event_queue *q, |
| 85 | struct queue_sender_list *send) |
| 86 | { |
| 87 | q->send = send; |
| 88 | memset(send, 0, sizeof(*send)); |
| 89 | } |
| 90 | #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */ |
| 91 | |
Miika Pekkarinen | a85044b | 2006-09-16 16:18:11 +0000 | [diff] [blame] | 92 | void queue_init(struct event_queue *q, bool register_queue) |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 93 | { |
Miika Pekkarinen | a85044b | 2006-09-16 16:18:11 +0000 | [diff] [blame] | 94 | (void)register_queue; |
| 95 | |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 96 | q->read = 0; |
| 97 | q->write = 0; |
Steve Bavin | 46925b3 | 2006-11-03 10:12:15 +0000 | [diff] [blame] | 98 | q->thread = NULL; |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 99 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
| 100 | q->send = NULL; /* No message sending by default */ |
| 101 | #endif |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 102 | } |
| 103 | |
Linus Nielsen Feltzing | 765e0f8 | 2006-01-23 10:53:47 +0000 | [diff] [blame] | 104 | void queue_delete(struct event_queue *q) |
| 105 | { |
| 106 | (void)q; |
| 107 | } |
| 108 | |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 109 | void queue_wait(struct event_queue *q, struct event *ev) |
| 110 | { |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 111 | unsigned int rd; |
| 112 | |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 113 | while(q->read == q->write) |
| 114 | { |
Miika Pekkarinen | a85044b | 2006-09-16 16:18:11 +0000 | [diff] [blame] | 115 | switch_thread(true, NULL); |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 116 | } |
| 117 | |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 118 | rd = q->read++ & QUEUE_LENGTH_MASK; |
| 119 | *ev = q->events[rd]; |
| 120 | |
| 121 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
| 122 | if(q->send && q->send->senders[rd]) |
| 123 | { |
| 124 | /* Get data for a waiting thread if one */ |
| 125 | queue_fetch_sender(q->send, rd); |
| 126 | } |
| 127 | #endif |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 128 | } |
| 129 | |
| 130 | void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks) |
| 131 | { |
| 132 | unsigned int timeout = current_tick + ticks; |
| 133 | |
| 134 | while(q->read == q->write && TIME_BEFORE( current_tick, timeout )) |
| 135 | { |
Dan Everton | d0c65f7 | 2006-03-29 12:11:28 +0000 | [diff] [blame] | 136 | sim_sleep(1); |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 137 | } |
| 138 | |
| 139 | if(q->read != q->write) |
| 140 | { |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 141 | unsigned int rd = q->read++ & QUEUE_LENGTH_MASK; |
| 142 | *ev = q->events[rd]; |
| 143 | |
| 144 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
| 145 | if(q->send && q->send->senders[rd]) |
| 146 | { |
| 147 | /* Get data for a waiting thread if one */ |
| 148 | queue_fetch_sender(q->send, rd); |
| 149 | } |
| 150 | #endif |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 151 | } |
| 152 | else |
| 153 | { |
| 154 | ev->id = SYS_TIMEOUT; |
| 155 | } |
| 156 | } |
| 157 | |
Michael Sevakis | 4b90267 | 2006-12-19 16:50:07 +0000 | [diff] [blame] | 158 | void queue_post(struct event_queue *q, long id, intptr_t data) |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 159 | { |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 160 | int oldlevel = set_irq_level(15<<4); |
| 161 | unsigned int wr = q->write++ & QUEUE_LENGTH_MASK; |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 162 | |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 163 | q->events[wr].id = id; |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 164 | q->events[wr].data = data; |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 165 | |
| 166 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
| 167 | if(q->send) |
| 168 | { |
Michael Sevakis | 55392f7 | 2007-03-21 23:33:49 +0000 | [diff] [blame] | 169 | struct thread_entry **spp = &q->send->senders[wr]; |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 170 | |
| 171 | if(*spp) |
| 172 | { |
| 173 | /* overflow protect - unblock any thread waiting at this index */ |
Michael Sevakis | 4b90267 | 2006-12-19 16:50:07 +0000 | [diff] [blame] | 174 | queue_release_sender(spp, 0); |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 175 | } |
| 176 | } |
| 177 | #endif |
| 178 | |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 179 | set_irq_level(oldlevel); |
| 180 | } |
| 181 | |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 182 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
Michael Sevakis | 4b90267 | 2006-12-19 16:50:07 +0000 | [diff] [blame] | 183 | intptr_t queue_send(struct event_queue *q, long id, intptr_t data) |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 184 | { |
| 185 | int oldlevel = set_irq_level(15<<4); |
| 186 | unsigned int wr = q->write++ & QUEUE_LENGTH_MASK; |
| 187 | |
| 188 | q->events[wr].id = id; |
| 189 | q->events[wr].data = data; |
| 190 | |
| 191 | if(q->send) |
| 192 | { |
Michael Sevakis | 55392f7 | 2007-03-21 23:33:49 +0000 | [diff] [blame] | 193 | struct thread_entry **spp = &q->send->senders[wr]; |
Steve Bavin | abf3435 | 2007-08-01 09:07:09 +0000 | [diff] [blame^] | 194 | static struct thread_entry sender; |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 195 | |
| 196 | if(*spp) |
| 197 | { |
| 198 | /* overflow protect - unblock any thread waiting at this index */ |
Michael Sevakis | 4b90267 | 2006-12-19 16:50:07 +0000 | [diff] [blame] | 199 | queue_release_sender(spp, 0); |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 200 | } |
| 201 | |
| 202 | *spp = &sender; |
| 203 | |
| 204 | set_irq_level(oldlevel); |
| 205 | while (*spp != NULL) |
| 206 | { |
| 207 | switch_thread(true, NULL); |
| 208 | } |
| 209 | |
| 210 | return sender.retval; |
| 211 | } |
| 212 | |
| 213 | /* Function as queue_post if sending is not enabled */ |
| 214 | set_irq_level(oldlevel); |
Michael Sevakis | 4b90267 | 2006-12-19 16:50:07 +0000 | [diff] [blame] | 215 | return 0; |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 216 | } |
| 217 | |
| 218 | #if 0 /* not used now but probably will be later */ |
| 219 | /* Query if the last message dequeued was added by queue_send or not */ |
| 220 | bool queue_in_queue_send(struct event_queue *q) |
| 221 | { |
| 222 | return q->send && q->send->curr_sender; |
| 223 | } |
| 224 | #endif |
| 225 | |
| 226 | /* Replies with retval to any dequeued message sent with queue_send */ |
Michael Sevakis | 4b90267 | 2006-12-19 16:50:07 +0000 | [diff] [blame] | 227 | void queue_reply(struct event_queue *q, intptr_t retval) |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 228 | { |
| 229 | if(q->send && q->send->curr_sender) |
| 230 | { |
| 231 | queue_release_sender(&q->send->curr_sender, retval); |
| 232 | } |
| 233 | } |
| 234 | #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */ |
| 235 | |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 236 | bool queue_empty(const struct event_queue* q) |
| 237 | { |
| 238 | return ( q->read == q->write ); |
| 239 | } |
| 240 | |
| 241 | void queue_clear(struct event_queue* q) |
| 242 | { |
| 243 | /* fixme: This is potentially unsafe in case we do interrupt-like processing */ |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 244 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
| 245 | /* Release all thread waiting in the queue for a reply - |
| 246 | dequeued sent message will be handled by owning thread */ |
| 247 | queue_release_all_senders(q); |
| 248 | #endif |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 249 | q->read = 0; |
| 250 | q->write = 0; |
| 251 | } |
| 252 | |
Steve Bavin | 46925b3 | 2006-11-03 10:12:15 +0000 | [diff] [blame] | 253 | void queue_remove_from_head(struct event_queue *q, long id) |
| 254 | { |
| 255 | int oldlevel = set_irq_level(15<<4); |
| 256 | |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 257 | while(q->read != q->write) |
Steve Bavin | 46925b3 | 2006-11-03 10:12:15 +0000 | [diff] [blame] | 258 | { |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 259 | unsigned int rd = q->read & QUEUE_LENGTH_MASK; |
| 260 | |
| 261 | if(q->events[rd].id != id) |
| 262 | { |
| 263 | break; |
| 264 | } |
| 265 | |
| 266 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
| 267 | if(q->send) |
| 268 | { |
Michael Sevakis | 55392f7 | 2007-03-21 23:33:49 +0000 | [diff] [blame] | 269 | struct thread_entry **spp = &q->send->senders[rd]; |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 270 | |
| 271 | if(*spp) |
| 272 | { |
| 273 | /* Release any thread waiting on this message */ |
Michael Sevakis | 4b90267 | 2006-12-19 16:50:07 +0000 | [diff] [blame] | 274 | queue_release_sender(spp, 0); |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 275 | } |
| 276 | } |
| 277 | #endif |
Steve Bavin | 46925b3 | 2006-11-03 10:12:15 +0000 | [diff] [blame] | 278 | q->read++; |
| 279 | } |
| 280 | |
| 281 | set_irq_level(oldlevel); |
| 282 | } |
| 283 | |
Jens Arnold | 0b7bb31 | 2007-04-14 09:47:47 +0000 | [diff] [blame] | 284 | int queue_count(const struct event_queue *q) |
| 285 | { |
| 286 | return q->write - q->read; |
| 287 | } |
| 288 | |
Miika Pekkarinen | a85044b | 2006-09-16 16:18:11 +0000 | [diff] [blame] | 289 | void switch_thread(bool save_context, struct thread_entry **blocked_list) |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 290 | { |
Miika Pekkarinen | a85044b | 2006-09-16 16:18:11 +0000 | [diff] [blame] | 291 | (void)save_context; |
| 292 | (void)blocked_list; |
| 293 | |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 294 | yield (); |
| 295 | } |
| 296 | |
| 297 | void sim_tick_tasks(void) |
| 298 | { |
| 299 | int i; |
| 300 | |
| 301 | /* Run through the list of tick tasks */ |
| 302 | for(i = 0;i < MAX_NUM_TICK_TASKS;i++) |
| 303 | { |
| 304 | if(tick_funcs[i]) |
| 305 | { |
| 306 | tick_funcs[i](); |
| 307 | } |
| 308 | } |
| 309 | } |
| 310 | |
| 311 | int tick_add_task(void (*f)(void)) |
| 312 | { |
| 313 | int i; |
| 314 | |
| 315 | /* Add a task if there is room */ |
| 316 | for(i = 0;i < MAX_NUM_TICK_TASKS;i++) |
| 317 | { |
| 318 | if(tick_funcs[i] == NULL) |
| 319 | { |
| 320 | tick_funcs[i] = f; |
| 321 | return 0; |
| 322 | } |
| 323 | } |
| 324 | DEBUGF("Error! tick_add_task(): out of tasks"); |
| 325 | return -1; |
| 326 | } |
| 327 | |
| 328 | int tick_remove_task(void (*f)(void)) |
| 329 | { |
| 330 | int i; |
| 331 | |
| 332 | /* Remove a task if it is there */ |
| 333 | for(i = 0;i < MAX_NUM_TICK_TASKS;i++) |
| 334 | { |
| 335 | if(tick_funcs[i] == f) |
| 336 | { |
| 337 | tick_funcs[i] = NULL; |
| 338 | return 0; |
| 339 | } |
| 340 | } |
| 341 | |
| 342 | return -1; |
| 343 | } |
| 344 | |
Brandon Low | 9cde770 | 2006-04-12 15:38:56 +0000 | [diff] [blame] | 345 | /* Very simple mutex simulation - won't work with pre-emptive |
| 346 | multitasking, but is better than nothing at all */ |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 347 | void mutex_init(struct mutex *m) |
| 348 | { |
Brandon Low | 9cde770 | 2006-04-12 15:38:56 +0000 | [diff] [blame] | 349 | m->locked = false; |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 350 | } |
| 351 | |
| 352 | void mutex_lock(struct mutex *m) |
| 353 | { |
Brandon Low | 9cde770 | 2006-04-12 15:38:56 +0000 | [diff] [blame] | 354 | while(m->locked) |
Miika Pekkarinen | a85044b | 2006-09-16 16:18:11 +0000 | [diff] [blame] | 355 | switch_thread(true, NULL); |
Brandon Low | 9cde770 | 2006-04-12 15:38:56 +0000 | [diff] [blame] | 356 | m->locked = true; |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 357 | } |
| 358 | |
| 359 | void mutex_unlock(struct mutex *m) |
| 360 | { |
Brandon Low | 9cde770 | 2006-04-12 15:38:56 +0000 | [diff] [blame] | 361 | m->locked = false; |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 362 | } |
Michael Sevakis | dee43ec | 2007-03-09 08:03:18 +0000 | [diff] [blame] | 363 | |
| 364 | void spinlock_lock(struct mutex *m) |
| 365 | { |
| 366 | while(m->locked) |
| 367 | switch_thread(true, NULL); |
| 368 | m->locked = true; |
| 369 | } |
| 370 | |
| 371 | void spinlock_unlock(struct mutex *m) |
| 372 | { |
| 373 | m->locked = false; |
| 374 | } |