Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 1 | /*************************************************************************** |
| 2 | * __________ __ ___. |
| 3 | * Open \______ \ ____ ____ | | _\_ |__ _______ ___ |
| 4 | * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / |
| 5 | * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < |
| 6 | * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ |
| 7 | * \/ \/ \/ \/ \/ |
| 8 | * $Id$ |
| 9 | * |
Linus Nielsen Feltzing | fc72c53 | 2006-02-03 15:19:58 +0000 | [diff] [blame] | 10 | * Copyright (C) 2002 by Felix Arends |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 11 | * |
| 12 | * All files in this archive are subject to the GNU General Public License. |
| 13 | * See the file COPYING in the source tree root for full license agreement. |
| 14 | * |
| 15 | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY |
| 16 | * KIND, either express or implied. |
| 17 | * |
| 18 | ****************************************************************************/ |
| 19 | |
Linus Nielsen Feltzing | fc72c53 | 2006-02-03 15:19:58 +0000 | [diff] [blame] | 20 | #include <stdlib.h> |
Michael Sevakis | d6af287 | 2007-10-26 23:11:18 +0000 | [diff] [blame] | 21 | #include <SDL.h> |
| 22 | #include <SDL_thread.h> |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 23 | #include "memory.h" |
Michael Sevakis | d6af287 | 2007-10-26 23:11:18 +0000 | [diff] [blame] | 24 | #include "system-sdl.h" |
Linus Nielsen Feltzing | fc72c53 | 2006-02-03 15:19:58 +0000 | [diff] [blame] | 25 | #include "uisdl.h" |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 26 | #include "kernel.h" |
Linus Nielsen Feltzing | fc72c53 | 2006-02-03 15:19:58 +0000 | [diff] [blame] | 27 | #include "thread-sdl.h" |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 28 | #include "thread.h" |
| 29 | #include "debug.h" |
| 30 | |
Michael Sevakis | 4a5894d | 2007-10-26 23:27:04 +0000 | [diff] [blame] | 31 | /* Condition to signal that "interrupts" may proceed */ |
Michael Sevakis | d6af287 | 2007-10-26 23:11:18 +0000 | [diff] [blame] | 32 | static SDL_cond *sim_thread_cond; |
Michael Sevakis | 4a5894d | 2007-10-26 23:27:04 +0000 | [diff] [blame] | 33 | /* Mutex to serialize changing levels and exclude other threads while |
| 34 | * inside a handler */ |
Michael Sevakis | d6af287 | 2007-10-26 23:11:18 +0000 | [diff] [blame] | 35 | static SDL_mutex *sim_irq_mtx; |
| 36 | static int interrupt_level = HIGHEST_IRQ_LEVEL; |
Michael Sevakis | a8eeff0 | 2007-10-28 16:49:02 +0000 | [diff] [blame^] | 37 | static int handlers_pending = 0; |
Michael Sevakis | d6af287 | 2007-10-26 23:11:18 +0000 | [diff] [blame] | 38 | static int status_reg = 0; |
| 39 | |
| 40 | extern struct core_entry cores[NUM_CORES]; |
| 41 | |
| 42 | /* Nescessary logic: |
| 43 | * 1) All threads must pass unblocked |
| 44 | * 2) Current handler must always pass unblocked |
| 45 | * 3) Threads must be excluded when irq routine is running |
| 46 | * 4) No more than one handler routine should execute at a time |
| 47 | */ |
| 48 | int set_irq_level(int level) |
| 49 | { |
| 50 | SDL_LockMutex(sim_irq_mtx); |
| 51 | |
| 52 | int oldlevel = interrupt_level; |
| 53 | |
| 54 | if (status_reg == 0 && level == 0 && oldlevel != 0) |
| 55 | { |
| 56 | /* Not in a handler and "interrupts" are being reenabled */ |
Michael Sevakis | a8eeff0 | 2007-10-28 16:49:02 +0000 | [diff] [blame^] | 57 | if (handlers_pending > 0) |
| 58 | SDL_CondSignal(sim_thread_cond); |
Michael Sevakis | d6af287 | 2007-10-26 23:11:18 +0000 | [diff] [blame] | 59 | } |
| 60 | |
| 61 | interrupt_level = level; /* save new level */ |
| 62 | |
| 63 | SDL_UnlockMutex(sim_irq_mtx); |
| 64 | return oldlevel; |
| 65 | } |
| 66 | |
| 67 | void sim_enter_irq_handler(void) |
| 68 | { |
| 69 | SDL_LockMutex(sim_irq_mtx); |
Michael Sevakis | a8eeff0 | 2007-10-28 16:49:02 +0000 | [diff] [blame^] | 70 | handlers_pending++; |
| 71 | |
Michael Sevakis | d6af287 | 2007-10-26 23:11:18 +0000 | [diff] [blame] | 72 | if(interrupt_level != 0) |
| 73 | { |
| 74 | /* "Interrupts" are disabled. Wait for reenable */ |
| 75 | SDL_CondWait(sim_thread_cond, sim_irq_mtx); |
| 76 | } |
Michael Sevakis | a8eeff0 | 2007-10-28 16:49:02 +0000 | [diff] [blame^] | 77 | |
Michael Sevakis | d6af287 | 2007-10-26 23:11:18 +0000 | [diff] [blame] | 78 | status_reg = 1; |
| 79 | } |
| 80 | |
| 81 | void sim_exit_irq_handler(void) |
| 82 | { |
Michael Sevakis | a8eeff0 | 2007-10-28 16:49:02 +0000 | [diff] [blame^] | 83 | if (--handlers_pending > 0) |
| 84 | SDL_CondSignal(sim_thread_cond); |
| 85 | |
Michael Sevakis | d6af287 | 2007-10-26 23:11:18 +0000 | [diff] [blame] | 86 | status_reg = 0; |
| 87 | SDL_UnlockMutex(sim_irq_mtx); |
| 88 | } |
| 89 | |
| 90 | bool sim_kernel_init(void) |
| 91 | { |
| 92 | sim_irq_mtx = SDL_CreateMutex(); |
| 93 | if (sim_irq_mtx == NULL) |
| 94 | { |
| 95 | fprintf(stderr, "Cannot create sim_handler_mtx\n"); |
| 96 | return false; |
| 97 | } |
| 98 | |
Michael Sevakis | d6af287 | 2007-10-26 23:11:18 +0000 | [diff] [blame] | 99 | sim_thread_cond = SDL_CreateCond(); |
| 100 | if (sim_thread_cond == NULL) |
| 101 | { |
| 102 | fprintf(stderr, "Cannot create sim_thread_cond\n"); |
| 103 | return false; |
| 104 | } |
| 105 | |
| 106 | return true; |
| 107 | } |
| 108 | |
| 109 | void sim_kernel_shutdown(void) |
| 110 | { |
| 111 | SDL_DestroyMutex(sim_irq_mtx); |
| 112 | SDL_DestroyCond(sim_thread_cond); |
| 113 | } |
| 114 | |
Michael Sevakis | f64ebb1 | 2007-09-08 12:20:53 +0000 | [diff] [blame] | 115 | volatile long current_tick = 0; |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 116 | static void (*tick_funcs[MAX_NUM_TICK_TASKS])(void); |
| 117 | |
Jens Arnold | ea88571 | 2007-08-12 11:59:06 +0000 | [diff] [blame] | 118 | /* This array holds all queues that are initiated. It is used for broadcast. */ |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 119 | static struct event_queue *all_queues[MAX_NUM_QUEUES]; |
Jens Arnold | ea88571 | 2007-08-12 11:59:06 +0000 | [diff] [blame] | 120 | static int num_queues = 0; |
| 121 | |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 122 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
| 123 | /* Moves waiting thread's descriptor to the current sender when a |
| 124 | message is dequeued */ |
| 125 | static void queue_fetch_sender(struct queue_sender_list *send, |
| 126 | unsigned int i) |
| 127 | { |
Michael Sevakis | 55392f7 | 2007-03-21 23:33:49 +0000 | [diff] [blame] | 128 | struct thread_entry **spp = &send->senders[i]; |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 129 | |
| 130 | if(*spp) |
| 131 | { |
| 132 | send->curr_sender = *spp; |
| 133 | *spp = NULL; |
| 134 | } |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 135 | } |
| 136 | |
| 137 | /* Puts the specified return value in the waiting thread's return value |
| 138 | and wakes the thread - a sender should be confirmed to exist first */ |
Michael Sevakis | 55392f7 | 2007-03-21 23:33:49 +0000 | [diff] [blame] | 139 | static void queue_release_sender(struct thread_entry **sender, |
Michael Sevakis | 4b90267 | 2006-12-19 16:50:07 +0000 | [diff] [blame] | 140 | intptr_t retval) |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 141 | { |
| 142 | (*sender)->retval = retval; |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 143 | wakeup_thread_no_listlock(sender); |
Michael Sevakis | f64ebb1 | 2007-09-08 12:20:53 +0000 | [diff] [blame] | 144 | if(*sender != NULL) |
| 145 | { |
Michael Sevakis | 174ce90 | 2007-09-08 12:32:41 +0000 | [diff] [blame] | 146 | fprintf(stderr, "queue->send slot ovf: %p\n", *sender); |
Michael Sevakis | f64ebb1 | 2007-09-08 12:20:53 +0000 | [diff] [blame] | 147 | exit(-1); |
| 148 | } |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 149 | } |
| 150 | |
| 151 | /* Releases any waiting threads that are queued with queue_send - |
| 152 | reply with NULL */ |
| 153 | static void queue_release_all_senders(struct event_queue *q) |
| 154 | { |
| 155 | if(q->send) |
| 156 | { |
| 157 | unsigned int i; |
| 158 | for(i = q->read; i != q->write; i++) |
| 159 | { |
Michael Sevakis | 55392f7 | 2007-03-21 23:33:49 +0000 | [diff] [blame] | 160 | struct thread_entry **spp = |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 161 | &q->send->senders[i & QUEUE_LENGTH_MASK]; |
| 162 | if(*spp) |
| 163 | { |
Michael Sevakis | 4b90267 | 2006-12-19 16:50:07 +0000 | [diff] [blame] | 164 | queue_release_sender(spp, 0); |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 165 | } |
| 166 | } |
| 167 | } |
| 168 | } |
| 169 | |
| 170 | /* Enables queue_send on the specified queue - caller allocates the extra |
| 171 | data structure */ |
| 172 | void queue_enable_queue_send(struct event_queue *q, |
| 173 | struct queue_sender_list *send) |
| 174 | { |
Michael Sevakis | d6af287 | 2007-10-26 23:11:18 +0000 | [diff] [blame] | 175 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); |
Michael Sevakis | f64ebb1 | 2007-09-08 12:20:53 +0000 | [diff] [blame] | 176 | q->send = NULL; |
| 177 | if(send) |
| 178 | { |
| 179 | q->send = send; |
| 180 | memset(send, 0, sizeof(*send)); |
| 181 | } |
Michael Sevakis | d6af287 | 2007-10-26 23:11:18 +0000 | [diff] [blame] | 182 | set_irq_level(oldlevel); |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 183 | } |
| 184 | #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */ |
| 185 | |
Miika Pekkarinen | a85044b | 2006-09-16 16:18:11 +0000 | [diff] [blame] | 186 | void queue_init(struct event_queue *q, bool register_queue) |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 187 | { |
Michael Sevakis | d6af287 | 2007-10-26 23:11:18 +0000 | [diff] [blame] | 188 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); |
| 189 | |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 190 | q->read = 0; |
| 191 | q->write = 0; |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 192 | thread_queue_init(&q->queue); |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 193 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
| 194 | q->send = NULL; /* No message sending by default */ |
| 195 | #endif |
Jens Arnold | ea88571 | 2007-08-12 11:59:06 +0000 | [diff] [blame] | 196 | |
| 197 | if(register_queue) |
| 198 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 199 | if(num_queues >= MAX_NUM_QUEUES) |
Michael Sevakis | f64ebb1 | 2007-09-08 12:20:53 +0000 | [diff] [blame] | 200 | { |
| 201 | fprintf(stderr, "queue_init->out of queues"); |
| 202 | exit(-1); |
| 203 | } |
Jens Arnold | ea88571 | 2007-08-12 11:59:06 +0000 | [diff] [blame] | 204 | /* Add it to the all_queues array */ |
| 205 | all_queues[num_queues++] = q; |
| 206 | } |
Michael Sevakis | d6af287 | 2007-10-26 23:11:18 +0000 | [diff] [blame] | 207 | |
| 208 | set_irq_level(oldlevel); |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 209 | } |
| 210 | |
Linus Nielsen Feltzing | 765e0f8 | 2006-01-23 10:53:47 +0000 | [diff] [blame] | 211 | void queue_delete(struct event_queue *q) |
| 212 | { |
Jens Arnold | ea88571 | 2007-08-12 11:59:06 +0000 | [diff] [blame] | 213 | int i; |
| 214 | bool found = false; |
| 215 | |
Michael Sevakis | d6af287 | 2007-10-26 23:11:18 +0000 | [diff] [blame] | 216 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); |
| 217 | |
Jens Arnold | ea88571 | 2007-08-12 11:59:06 +0000 | [diff] [blame] | 218 | /* Find the queue to be deleted */ |
| 219 | for(i = 0;i < num_queues;i++) |
| 220 | { |
| 221 | if(all_queues[i] == q) |
| 222 | { |
| 223 | found = true; |
| 224 | break; |
| 225 | } |
| 226 | } |
| 227 | |
| 228 | if(found) |
| 229 | { |
| 230 | /* Move the following queues up in the list */ |
| 231 | for(;i < num_queues-1;i++) |
| 232 | { |
| 233 | all_queues[i] = all_queues[i+1]; |
| 234 | } |
| 235 | |
| 236 | num_queues--; |
| 237 | } |
Michael Sevakis | f64ebb1 | 2007-09-08 12:20:53 +0000 | [diff] [blame] | 238 | |
| 239 | /* Release threads waiting on queue head */ |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 240 | thread_queue_wake(&q->queue); |
Michael Sevakis | f64ebb1 | 2007-09-08 12:20:53 +0000 | [diff] [blame] | 241 | |
| 242 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
| 243 | /* Release waiting threads and reply to any dequeued message |
| 244 | waiting for one. */ |
| 245 | queue_release_all_senders(q); |
| 246 | queue_reply(q, 0); |
| 247 | #endif |
| 248 | |
| 249 | q->read = 0; |
| 250 | q->write = 0; |
Michael Sevakis | d6af287 | 2007-10-26 23:11:18 +0000 | [diff] [blame] | 251 | |
| 252 | set_irq_level(oldlevel); |
Linus Nielsen Feltzing | 765e0f8 | 2006-01-23 10:53:47 +0000 | [diff] [blame] | 253 | } |
| 254 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 255 | void queue_wait(struct event_queue *q, struct queue_event *ev) |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 256 | { |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 257 | unsigned int rd; |
Michael Sevakis | d6af287 | 2007-10-26 23:11:18 +0000 | [diff] [blame] | 258 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 259 | |
Michael Sevakis | 035529c | 2007-09-30 17:23:13 +0000 | [diff] [blame] | 260 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
| 261 | if (q->send && q->send->curr_sender) |
| 262 | { |
| 263 | /* auto-reply */ |
| 264 | queue_release_sender(&q->send->curr_sender, 0); |
| 265 | } |
| 266 | #endif |
| 267 | |
Michael Sevakis | f64ebb1 | 2007-09-08 12:20:53 +0000 | [diff] [blame] | 268 | if (q->read == q->write) |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 269 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 270 | do |
| 271 | { |
Michael Sevakis | d6af287 | 2007-10-26 23:11:18 +0000 | [diff] [blame] | 272 | cores[CURRENT_CORE].irq_level = oldlevel; |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 273 | block_thread(&q->queue); |
Michael Sevakis | d6af287 | 2007-10-26 23:11:18 +0000 | [diff] [blame] | 274 | oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 275 | } |
| 276 | while (q->read == q->write); |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 277 | } |
| 278 | |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 279 | rd = q->read++ & QUEUE_LENGTH_MASK; |
| 280 | *ev = q->events[rd]; |
| 281 | |
| 282 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
| 283 | if(q->send && q->send->senders[rd]) |
| 284 | { |
| 285 | /* Get data for a waiting thread if one */ |
| 286 | queue_fetch_sender(q->send, rd); |
| 287 | } |
| 288 | #endif |
Michael Sevakis | d6af287 | 2007-10-26 23:11:18 +0000 | [diff] [blame] | 289 | |
| 290 | set_irq_level(oldlevel); |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 291 | } |
| 292 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 293 | void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks) |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 294 | { |
Michael Sevakis | d6af287 | 2007-10-26 23:11:18 +0000 | [diff] [blame] | 295 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); |
| 296 | |
Michael Sevakis | 035529c | 2007-09-30 17:23:13 +0000 | [diff] [blame] | 297 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
| 298 | if (q->send && q->send->curr_sender) |
| 299 | { |
| 300 | /* auto-reply */ |
| 301 | queue_release_sender(&q->send->curr_sender, 0); |
| 302 | } |
| 303 | #endif |
| 304 | |
Michael Sevakis | f64ebb1 | 2007-09-08 12:20:53 +0000 | [diff] [blame] | 305 | if (q->read == q->write && ticks > 0) |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 306 | { |
Michael Sevakis | d6af287 | 2007-10-26 23:11:18 +0000 | [diff] [blame] | 307 | cores[CURRENT_CORE].irq_level = oldlevel; |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 308 | block_thread_w_tmo(&q->queue, ticks); |
Michael Sevakis | d6af287 | 2007-10-26 23:11:18 +0000 | [diff] [blame] | 309 | oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 310 | } |
| 311 | |
| 312 | if(q->read != q->write) |
| 313 | { |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 314 | unsigned int rd = q->read++ & QUEUE_LENGTH_MASK; |
| 315 | *ev = q->events[rd]; |
| 316 | |
| 317 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
| 318 | if(q->send && q->send->senders[rd]) |
| 319 | { |
| 320 | /* Get data for a waiting thread if one */ |
| 321 | queue_fetch_sender(q->send, rd); |
| 322 | } |
| 323 | #endif |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 324 | } |
| 325 | else |
| 326 | { |
| 327 | ev->id = SYS_TIMEOUT; |
| 328 | } |
Michael Sevakis | d6af287 | 2007-10-26 23:11:18 +0000 | [diff] [blame] | 329 | |
| 330 | set_irq_level(oldlevel); |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 331 | } |
| 332 | |
Michael Sevakis | 4b90267 | 2006-12-19 16:50:07 +0000 | [diff] [blame] | 333 | void queue_post(struct event_queue *q, long id, intptr_t data) |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 334 | { |
Michael Sevakis | d6af287 | 2007-10-26 23:11:18 +0000 | [diff] [blame] | 335 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); |
| 336 | |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 337 | unsigned int wr = q->write++ & QUEUE_LENGTH_MASK; |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 338 | |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 339 | q->events[wr].id = id; |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 340 | q->events[wr].data = data; |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 341 | |
| 342 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
| 343 | if(q->send) |
| 344 | { |
Michael Sevakis | 55392f7 | 2007-03-21 23:33:49 +0000 | [diff] [blame] | 345 | struct thread_entry **spp = &q->send->senders[wr]; |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 346 | |
| 347 | if(*spp) |
| 348 | { |
| 349 | /* overflow protect - unblock any thread waiting at this index */ |
Michael Sevakis | 4b90267 | 2006-12-19 16:50:07 +0000 | [diff] [blame] | 350 | queue_release_sender(spp, 0); |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 351 | } |
| 352 | } |
| 353 | #endif |
| 354 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 355 | wakeup_thread(&q->queue); |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 356 | |
Michael Sevakis | d6af287 | 2007-10-26 23:11:18 +0000 | [diff] [blame] | 357 | set_irq_level(oldlevel); |
Michael Sevakis | 0107dfc | 2007-09-09 01:59:07 +0000 | [diff] [blame] | 358 | } |
| 359 | |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 360 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
Michael Sevakis | 4b90267 | 2006-12-19 16:50:07 +0000 | [diff] [blame] | 361 | intptr_t queue_send(struct event_queue *q, long id, intptr_t data) |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 362 | { |
Michael Sevakis | d6af287 | 2007-10-26 23:11:18 +0000 | [diff] [blame] | 363 | int oldlevel = set_irq_level(oldlevel); |
| 364 | |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 365 | unsigned int wr = q->write++ & QUEUE_LENGTH_MASK; |
| 366 | |
| 367 | q->events[wr].id = id; |
| 368 | q->events[wr].data = data; |
| 369 | |
| 370 | if(q->send) |
| 371 | { |
Michael Sevakis | 55392f7 | 2007-03-21 23:33:49 +0000 | [diff] [blame] | 372 | struct thread_entry **spp = &q->send->senders[wr]; |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 373 | |
| 374 | if(*spp) |
| 375 | { |
| 376 | /* overflow protect - unblock any thread waiting at this index */ |
Michael Sevakis | 4b90267 | 2006-12-19 16:50:07 +0000 | [diff] [blame] | 377 | queue_release_sender(spp, 0); |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 378 | } |
| 379 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 380 | wakeup_thread(&q->queue); |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 381 | |
Michael Sevakis | d6af287 | 2007-10-26 23:11:18 +0000 | [diff] [blame] | 382 | cores[CURRENT_CORE].irq_level = oldlevel; |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 383 | block_thread_no_listlock(spp); |
Michael Sevakis | f64ebb1 | 2007-09-08 12:20:53 +0000 | [diff] [blame] | 384 | return thread_get_current()->retval; |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 385 | } |
| 386 | |
| 387 | /* Function as queue_post if sending is not enabled */ |
Michael Sevakis | d6af287 | 2007-10-26 23:11:18 +0000 | [diff] [blame] | 388 | wakeup_thread(&q->queue); |
| 389 | set_irq_level(oldlevel); |
Michael Sevakis | 4b90267 | 2006-12-19 16:50:07 +0000 | [diff] [blame] | 390 | return 0; |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 391 | } |
| 392 | |
| 393 | #if 0 /* not used now but probably will be later */ |
| 394 | /* Query if the last message dequeued was added by queue_send or not */ |
| 395 | bool queue_in_queue_send(struct event_queue *q) |
| 396 | { |
| 397 | return q->send && q->send->curr_sender; |
| 398 | } |
| 399 | #endif |
| 400 | |
| 401 | /* Replies with retval to any dequeued message sent with queue_send */ |
Michael Sevakis | 4b90267 | 2006-12-19 16:50:07 +0000 | [diff] [blame] | 402 | void queue_reply(struct event_queue *q, intptr_t retval) |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 403 | { |
| 404 | if(q->send && q->send->curr_sender) |
| 405 | { |
| 406 | queue_release_sender(&q->send->curr_sender, retval); |
| 407 | } |
| 408 | } |
| 409 | #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */ |
| 410 | |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 411 | bool queue_empty(const struct event_queue* q) |
| 412 | { |
| 413 | return ( q->read == q->write ); |
| 414 | } |
| 415 | |
Nicolas Pennequin | 1839edf | 2007-10-27 18:08:18 +0000 | [diff] [blame] | 416 | bool queue_peek(struct event_queue *q, struct queue_event *ev) |
| 417 | { |
| 418 | if (q->read == q->write) |
| 419 | return false; |
| 420 | |
| 421 | bool have_msg = false; |
| 422 | |
| 423 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); |
| 424 | |
| 425 | if (q->read != q->write) |
| 426 | { |
| 427 | *ev = q->events[q->read & QUEUE_LENGTH_MASK]; |
| 428 | have_msg = true; |
| 429 | } |
| 430 | |
| 431 | set_irq_level(oldlevel); |
| 432 | |
| 433 | return have_msg; |
| 434 | } |
| 435 | |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 436 | void queue_clear(struct event_queue* q) |
| 437 | { |
Michael Sevakis | d6af287 | 2007-10-26 23:11:18 +0000 | [diff] [blame] | 438 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); |
| 439 | |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 440 | /* fixme: This is potentially unsafe in case we do interrupt-like processing */ |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 441 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
| 442 | /* Release all thread waiting in the queue for a reply - |
| 443 | dequeued sent message will be handled by owning thread */ |
| 444 | queue_release_all_senders(q); |
| 445 | #endif |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 446 | q->read = 0; |
| 447 | q->write = 0; |
Michael Sevakis | d6af287 | 2007-10-26 23:11:18 +0000 | [diff] [blame] | 448 | |
| 449 | set_irq_level(oldlevel); |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 450 | } |
| 451 | |
Steve Bavin | 46925b3 | 2006-11-03 10:12:15 +0000 | [diff] [blame] | 452 | void queue_remove_from_head(struct event_queue *q, long id) |
| 453 | { |
Michael Sevakis | d6af287 | 2007-10-26 23:11:18 +0000 | [diff] [blame] | 454 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); |
| 455 | |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 456 | while(q->read != q->write) |
Steve Bavin | 46925b3 | 2006-11-03 10:12:15 +0000 | [diff] [blame] | 457 | { |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 458 | unsigned int rd = q->read & QUEUE_LENGTH_MASK; |
| 459 | |
| 460 | if(q->events[rd].id != id) |
| 461 | { |
| 462 | break; |
| 463 | } |
| 464 | |
| 465 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
| 466 | if(q->send) |
| 467 | { |
Michael Sevakis | 55392f7 | 2007-03-21 23:33:49 +0000 | [diff] [blame] | 468 | struct thread_entry **spp = &q->send->senders[rd]; |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 469 | |
| 470 | if(*spp) |
| 471 | { |
| 472 | /* Release any thread waiting on this message */ |
Michael Sevakis | 4b90267 | 2006-12-19 16:50:07 +0000 | [diff] [blame] | 473 | queue_release_sender(spp, 0); |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 474 | } |
| 475 | } |
| 476 | #endif |
Steve Bavin | 46925b3 | 2006-11-03 10:12:15 +0000 | [diff] [blame] | 477 | q->read++; |
| 478 | } |
Michael Sevakis | d6af287 | 2007-10-26 23:11:18 +0000 | [diff] [blame] | 479 | |
| 480 | set_irq_level(oldlevel); |
Steve Bavin | 46925b3 | 2006-11-03 10:12:15 +0000 | [diff] [blame] | 481 | } |
| 482 | |
Jens Arnold | 0b7bb31 | 2007-04-14 09:47:47 +0000 | [diff] [blame] | 483 | int queue_count(const struct event_queue *q) |
| 484 | { |
| 485 | return q->write - q->read; |
| 486 | } |
| 487 | |
Jens Arnold | ea88571 | 2007-08-12 11:59:06 +0000 | [diff] [blame] | 488 | int queue_broadcast(long id, intptr_t data) |
| 489 | { |
Michael Sevakis | d6af287 | 2007-10-26 23:11:18 +0000 | [diff] [blame] | 490 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); |
Jens Arnold | ea88571 | 2007-08-12 11:59:06 +0000 | [diff] [blame] | 491 | int i; |
| 492 | |
| 493 | for(i = 0;i < num_queues;i++) |
| 494 | { |
| 495 | queue_post(all_queues[i], id, data); |
| 496 | } |
Jens Arnold | ea88571 | 2007-08-12 11:59:06 +0000 | [diff] [blame] | 497 | |
Michael Sevakis | d6af287 | 2007-10-26 23:11:18 +0000 | [diff] [blame] | 498 | set_irq_level(oldlevel); |
| 499 | return num_queues; |
Michael Sevakis | 0107dfc | 2007-09-09 01:59:07 +0000 | [diff] [blame] | 500 | } |
| 501 | |
Michael Sevakis | f64ebb1 | 2007-09-08 12:20:53 +0000 | [diff] [blame] | 502 | void yield(void) |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 503 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 504 | switch_thread(NULL); |
Michael Sevakis | f64ebb1 | 2007-09-08 12:20:53 +0000 | [diff] [blame] | 505 | } |
| 506 | |
| 507 | void sleep(int ticks) |
| 508 | { |
| 509 | sleep_thread(ticks); |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 510 | } |
| 511 | |
| 512 | void sim_tick_tasks(void) |
| 513 | { |
| 514 | int i; |
| 515 | |
| 516 | /* Run through the list of tick tasks */ |
| 517 | for(i = 0;i < MAX_NUM_TICK_TASKS;i++) |
| 518 | { |
| 519 | if(tick_funcs[i]) |
| 520 | { |
| 521 | tick_funcs[i](); |
| 522 | } |
| 523 | } |
| 524 | } |
| 525 | |
| 526 | int tick_add_task(void (*f)(void)) |
| 527 | { |
Michael Sevakis | d6af287 | 2007-10-26 23:11:18 +0000 | [diff] [blame] | 528 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 529 | int i; |
| 530 | |
| 531 | /* Add a task if there is room */ |
| 532 | for(i = 0;i < MAX_NUM_TICK_TASKS;i++) |
| 533 | { |
| 534 | if(tick_funcs[i] == NULL) |
| 535 | { |
| 536 | tick_funcs[i] = f; |
Michael Sevakis | d6af287 | 2007-10-26 23:11:18 +0000 | [diff] [blame] | 537 | set_irq_level(oldlevel); |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 538 | return 0; |
| 539 | } |
| 540 | } |
Michael Sevakis | f64ebb1 | 2007-09-08 12:20:53 +0000 | [diff] [blame] | 541 | fprintf(stderr, "Error! tick_add_task(): out of tasks"); |
| 542 | exit(-1); |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 543 | return -1; |
| 544 | } |
| 545 | |
| 546 | int tick_remove_task(void (*f)(void)) |
| 547 | { |
Michael Sevakis | d6af287 | 2007-10-26 23:11:18 +0000 | [diff] [blame] | 548 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 549 | int i; |
| 550 | |
| 551 | /* Remove a task if it is there */ |
| 552 | for(i = 0;i < MAX_NUM_TICK_TASKS;i++) |
| 553 | { |
| 554 | if(tick_funcs[i] == f) |
| 555 | { |
| 556 | tick_funcs[i] = NULL; |
Michael Sevakis | d6af287 | 2007-10-26 23:11:18 +0000 | [diff] [blame] | 557 | set_irq_level(oldlevel); |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 558 | return 0; |
| 559 | } |
| 560 | } |
Michael Sevakis | d6af287 | 2007-10-26 23:11:18 +0000 | [diff] [blame] | 561 | |
| 562 | set_irq_level(oldlevel); |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 563 | return -1; |
| 564 | } |
| 565 | |
Brandon Low | 9cde770 | 2006-04-12 15:38:56 +0000 | [diff] [blame] | 566 | /* Very simple mutex simulation - won't work with pre-emptive |
| 567 | multitasking, but is better than nothing at all */ |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 568 | void mutex_init(struct mutex *m) |
| 569 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 570 | m->queue = NULL; |
Michael Sevakis | f64ebb1 | 2007-09-08 12:20:53 +0000 | [diff] [blame] | 571 | m->thread = NULL; |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 572 | m->count = 0; |
Michael Sevakis | f64ebb1 | 2007-09-08 12:20:53 +0000 | [diff] [blame] | 573 | m->locked = 0; |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 574 | } |
| 575 | |
| 576 | void mutex_lock(struct mutex *m) |
| 577 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 578 | struct thread_entry *const thread = thread_get_current(); |
| 579 | |
| 580 | if(thread == m->thread) |
Michael Sevakis | f64ebb1 | 2007-09-08 12:20:53 +0000 | [diff] [blame] | 581 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 582 | m->count++; |
| 583 | return; |
Michael Sevakis | f64ebb1 | 2007-09-08 12:20:53 +0000 | [diff] [blame] | 584 | } |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 585 | |
| 586 | if (!test_and_set(&m->locked, 1)) |
| 587 | { |
| 588 | m->thread = thread; |
| 589 | return; |
| 590 | } |
| 591 | |
| 592 | block_thread_no_listlock(&m->queue); |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 593 | } |
| 594 | |
| 595 | void mutex_unlock(struct mutex *m) |
| 596 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 597 | /* unlocker not being the owner is an unlocking violation */ |
| 598 | if(m->thread != thread_get_current()) |
Michael Sevakis | f64ebb1 | 2007-09-08 12:20:53 +0000 | [diff] [blame] | 599 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 600 | fprintf(stderr, "spinlock_unlock->wrong thread"); |
| 601 | exit(-1); |
| 602 | } |
| 603 | |
| 604 | if (m->count > 0) |
| 605 | { |
| 606 | /* this thread still owns lock */ |
| 607 | m->count--; |
| 608 | return; |
Michael Sevakis | f64ebb1 | 2007-09-08 12:20:53 +0000 | [diff] [blame] | 609 | } |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 610 | |
| 611 | m->thread = wakeup_thread_no_listlock(&m->queue); |
| 612 | |
| 613 | if (m->thread == NULL) |
Michael Sevakis | f64ebb1 | 2007-09-08 12:20:53 +0000 | [diff] [blame] | 614 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 615 | /* release lock */ |
Michael Sevakis | f64ebb1 | 2007-09-08 12:20:53 +0000 | [diff] [blame] | 616 | m->locked = 0; |
| 617 | } |
Daniel Stenberg | 0e41998 | 2006-01-09 11:22:36 +0000 | [diff] [blame] | 618 | } |
Michael Sevakis | dee43ec | 2007-03-09 08:03:18 +0000 | [diff] [blame] | 619 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 620 | void spinlock_init(struct spinlock *l) |
Michael Sevakis | dee43ec | 2007-03-09 08:03:18 +0000 | [diff] [blame] | 621 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 622 | l->locked = 0; |
| 623 | l->thread = NULL; |
| 624 | l->count = 0; |
| 625 | } |
| 626 | |
| 627 | void spinlock_lock(struct spinlock *l) |
| 628 | { |
| 629 | struct thread_entry *const thread = thread_get_current(); |
| 630 | |
| 631 | if (l->thread == thread) |
| 632 | { |
| 633 | l->count++; |
| 634 | return; |
| 635 | } |
| 636 | |
Michael Sevakis | f64ebb1 | 2007-09-08 12:20:53 +0000 | [diff] [blame] | 637 | while(test_and_set(&l->locked, 1)) |
| 638 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 639 | switch_thread(NULL); |
| 640 | } |
| 641 | |
| 642 | l->thread = thread; |
| 643 | } |
| 644 | |
| 645 | void spinlock_unlock(struct spinlock *l) |
| 646 | { |
| 647 | /* unlocker not being the owner is an unlocking violation */ |
| 648 | if(l->thread != thread_get_current()) |
| 649 | { |
| 650 | fprintf(stderr, "spinlock_unlock->wrong thread"); |
| 651 | exit(-1); |
| 652 | } |
| 653 | |
| 654 | if (l->count > 0) |
| 655 | { |
| 656 | /* this thread still owns lock */ |
| 657 | l->count--; |
| 658 | return; |
| 659 | } |
| 660 | |
| 661 | /* clear owner */ |
| 662 | l->thread = NULL; |
| 663 | l->locked = 0; |
| 664 | } |
| 665 | |
Michael Sevakis | 6f51399 | 2007-10-16 01:41:16 +0000 | [diff] [blame] | 666 | #ifdef HAVE_SEMAPHORE_OBJECTS |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 667 | void semaphore_init(struct semaphore *s, int max, int start) |
| 668 | { |
| 669 | if(max <= 0 || start < 0 || start > max) |
| 670 | { |
| 671 | fprintf(stderr, "semaphore_init->inv arg"); |
| 672 | exit(-1); |
| 673 | } |
| 674 | s->queue = NULL; |
| 675 | s->max = max; |
| 676 | s->count = start; |
| 677 | } |
| 678 | |
| 679 | void semaphore_wait(struct semaphore *s) |
| 680 | { |
| 681 | if(--s->count >= 0) |
| 682 | return; |
| 683 | block_thread_no_listlock(&s->queue); |
| 684 | } |
| 685 | |
| 686 | void semaphore_release(struct semaphore *s) |
| 687 | { |
| 688 | if(s->count < s->max) |
| 689 | { |
| 690 | if(++s->count <= 0) |
| 691 | { |
| 692 | if(s->queue == NULL) |
| 693 | { |
| 694 | /* there should be threads in this queue */ |
| 695 | fprintf(stderr, "semaphore->wakeup"); |
| 696 | exit(-1); |
| 697 | } |
| 698 | /* a thread was queued - wake it up */ |
| 699 | wakeup_thread_no_listlock(&s->queue); |
| 700 | } |
Michael Sevakis | f64ebb1 | 2007-09-08 12:20:53 +0000 | [diff] [blame] | 701 | } |
Michael Sevakis | dee43ec | 2007-03-09 08:03:18 +0000 | [diff] [blame] | 702 | } |
Michael Sevakis | 6f51399 | 2007-10-16 01:41:16 +0000 | [diff] [blame] | 703 | #endif /* HAVE_SEMAPHORE_OBJECTS */ |
Michael Sevakis | dee43ec | 2007-03-09 08:03:18 +0000 | [diff] [blame] | 704 | |
Michael Sevakis | 6f51399 | 2007-10-16 01:41:16 +0000 | [diff] [blame] | 705 | #ifdef HAVE_EVENT_OBJECTS |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 706 | void event_init(struct event *e, unsigned int flags) |
Michael Sevakis | dee43ec | 2007-03-09 08:03:18 +0000 | [diff] [blame] | 707 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 708 | e->queues[STATE_NONSIGNALED] = NULL; |
| 709 | e->queues[STATE_SIGNALED] = NULL; |
| 710 | e->state = flags & STATE_SIGNALED; |
| 711 | e->automatic = (flags & EVENT_AUTOMATIC) ? 1 : 0; |
| 712 | } |
| 713 | |
| 714 | void event_wait(struct event *e, unsigned int for_state) |
| 715 | { |
| 716 | unsigned int last_state = e->state; |
| 717 | |
| 718 | if(e->automatic != 0) |
| 719 | { |
| 720 | /* wait for false always satisfied by definition |
| 721 | or if it just changed to false */ |
| 722 | if(last_state == STATE_SIGNALED || for_state == STATE_NONSIGNALED) |
| 723 | { |
| 724 | /* automatic - unsignal */ |
| 725 | e->state = STATE_NONSIGNALED; |
| 726 | return; |
| 727 | } |
| 728 | /* block until state matches */ |
| 729 | } |
| 730 | else if(for_state == last_state) |
| 731 | { |
| 732 | /* the state being waited for is the current state */ |
| 733 | return; |
| 734 | } |
| 735 | |
| 736 | /* current state does not match wait-for state */ |
| 737 | block_thread_no_listlock(&e->queues[for_state]); |
| 738 | } |
| 739 | |
| 740 | void event_set_state(struct event *e, unsigned int state) |
| 741 | { |
| 742 | unsigned int last_state = e->state; |
| 743 | |
| 744 | if(last_state == state) |
| 745 | { |
| 746 | /* no change */ |
| 747 | return; |
| 748 | } |
| 749 | |
| 750 | if(state == STATE_SIGNALED) |
| 751 | { |
| 752 | if(e->automatic != 0) |
| 753 | { |
| 754 | struct thread_entry *thread; |
| 755 | |
| 756 | if(e->queues[STATE_NONSIGNALED] != NULL) |
| 757 | { |
| 758 | /* no thread should have ever blocked for nonsignaled */ |
| 759 | fprintf(stderr, "set_event_state->queue[NS]:S"); |
| 760 | exit(-1); |
| 761 | } |
| 762 | |
| 763 | /* pass to next thread and keep unsignaled - "pulse" */ |
| 764 | thread = wakeup_thread_no_listlock(&e->queues[STATE_SIGNALED]); |
| 765 | e->state = thread != NULL ? STATE_NONSIGNALED : STATE_SIGNALED; |
| 766 | } |
| 767 | else |
| 768 | { |
| 769 | /* release all threads waiting for signaled */ |
| 770 | thread_queue_wake_no_listlock(&e->queues[STATE_SIGNALED]); |
| 771 | e->state = STATE_SIGNALED; |
| 772 | } |
| 773 | } |
| 774 | else |
| 775 | { |
| 776 | /* release all threads waiting for unsignaled */ |
| 777 | if(e->queues[STATE_NONSIGNALED] != NULL && e->automatic != 0) |
| 778 | { |
| 779 | /* no thread should have ever blocked */ |
| 780 | fprintf(stderr, "set_event_state->queue[NS]:NS"); |
| 781 | exit(-1); |
| 782 | } |
| 783 | |
| 784 | thread_queue_wake_no_listlock(&e->queues[STATE_NONSIGNALED]); |
| 785 | e->state = STATE_NONSIGNALED; |
| 786 | } |
Michael Sevakis | dee43ec | 2007-03-09 08:03:18 +0000 | [diff] [blame] | 787 | } |
Michael Sevakis | 9b1f1dd | 2007-10-16 01:52:18 +0000 | [diff] [blame] | 788 | #endif /* HAVE_EVENT_OBJECTS */ |