blob: 7126b82072fc460dc140fc48b6da31f96900ccf6 [file] [log] [blame]
Daniel Stenberg0e419982006-01-09 11:22:36 +00001/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
Linus Nielsen Feltzingfc72c532006-02-03 15:19:58 +000010 * Copyright (C) 2002 by Felix Arends
Daniel Stenberg0e419982006-01-09 11:22:36 +000011 *
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
14 *
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
17 *
18 ****************************************************************************/
19
Linus Nielsen Feltzingfc72c532006-02-03 15:19:58 +000020#include <stdlib.h>
Michael Sevakisd6af2872007-10-26 23:11:18 +000021#include <SDL.h>
22#include <SDL_thread.h>
Michael Sevakis43c15922006-12-16 18:35:12 +000023#include "memory.h"
Michael Sevakisd6af2872007-10-26 23:11:18 +000024#include "system-sdl.h"
Linus Nielsen Feltzingfc72c532006-02-03 15:19:58 +000025#include "uisdl.h"
Daniel Stenberg0e419982006-01-09 11:22:36 +000026#include "kernel.h"
Linus Nielsen Feltzingfc72c532006-02-03 15:19:58 +000027#include "thread-sdl.h"
Daniel Stenberg0e419982006-01-09 11:22:36 +000028#include "thread.h"
29#include "debug.h"
30
Michael Sevakis4a5894d2007-10-26 23:27:04 +000031/* Condition to signal that "interrupts" may proceed */
Michael Sevakisd6af2872007-10-26 23:11:18 +000032static SDL_cond *sim_thread_cond;
Michael Sevakis4a5894d2007-10-26 23:27:04 +000033/* Mutex to serialize changing levels and exclude other threads while
34 * inside a handler */
Michael Sevakisd6af2872007-10-26 23:11:18 +000035static SDL_mutex *sim_irq_mtx;
36static int interrupt_level = HIGHEST_IRQ_LEVEL;
Michael Sevakisa8eeff02007-10-28 16:49:02 +000037static int handlers_pending = 0;
Michael Sevakisd6af2872007-10-26 23:11:18 +000038static int status_reg = 0;
39
40extern struct core_entry cores[NUM_CORES];
41
42/* Nescessary logic:
43 * 1) All threads must pass unblocked
44 * 2) Current handler must always pass unblocked
45 * 3) Threads must be excluded when irq routine is running
46 * 4) No more than one handler routine should execute at a time
47 */
48int set_irq_level(int level)
49{
50 SDL_LockMutex(sim_irq_mtx);
51
52 int oldlevel = interrupt_level;
53
54 if (status_reg == 0 && level == 0 && oldlevel != 0)
55 {
56 /* Not in a handler and "interrupts" are being reenabled */
Michael Sevakisa8eeff02007-10-28 16:49:02 +000057 if (handlers_pending > 0)
58 SDL_CondSignal(sim_thread_cond);
Michael Sevakisd6af2872007-10-26 23:11:18 +000059 }
60
61 interrupt_level = level; /* save new level */
62
63 SDL_UnlockMutex(sim_irq_mtx);
64 return oldlevel;
65}
66
67void sim_enter_irq_handler(void)
68{
69 SDL_LockMutex(sim_irq_mtx);
Michael Sevakisa8eeff02007-10-28 16:49:02 +000070 handlers_pending++;
71
Michael Sevakisd6af2872007-10-26 23:11:18 +000072 if(interrupt_level != 0)
73 {
74 /* "Interrupts" are disabled. Wait for reenable */
75 SDL_CondWait(sim_thread_cond, sim_irq_mtx);
76 }
Michael Sevakisa8eeff02007-10-28 16:49:02 +000077
Michael Sevakisd6af2872007-10-26 23:11:18 +000078 status_reg = 1;
79}
80
81void sim_exit_irq_handler(void)
82{
Michael Sevakisa8eeff02007-10-28 16:49:02 +000083 if (--handlers_pending > 0)
84 SDL_CondSignal(sim_thread_cond);
85
Michael Sevakisd6af2872007-10-26 23:11:18 +000086 status_reg = 0;
87 SDL_UnlockMutex(sim_irq_mtx);
88}
89
90bool sim_kernel_init(void)
91{
92 sim_irq_mtx = SDL_CreateMutex();
93 if (sim_irq_mtx == NULL)
94 {
95 fprintf(stderr, "Cannot create sim_handler_mtx\n");
96 return false;
97 }
98
Michael Sevakisd6af2872007-10-26 23:11:18 +000099 sim_thread_cond = SDL_CreateCond();
100 if (sim_thread_cond == NULL)
101 {
102 fprintf(stderr, "Cannot create sim_thread_cond\n");
103 return false;
104 }
105
106 return true;
107}
108
109void sim_kernel_shutdown(void)
110{
111 SDL_DestroyMutex(sim_irq_mtx);
112 SDL_DestroyCond(sim_thread_cond);
113}
114
Michael Sevakisf64ebb12007-09-08 12:20:53 +0000115volatile long current_tick = 0;
Daniel Stenberg0e419982006-01-09 11:22:36 +0000116static void (*tick_funcs[MAX_NUM_TICK_TASKS])(void);
117
Jens Arnoldea885712007-08-12 11:59:06 +0000118/* This array holds all queues that are initiated. It is used for broadcast. */
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000119static struct event_queue *all_queues[MAX_NUM_QUEUES];
Jens Arnoldea885712007-08-12 11:59:06 +0000120static int num_queues = 0;
121
Michael Sevakis43c15922006-12-16 18:35:12 +0000122#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
123/* Moves waiting thread's descriptor to the current sender when a
124 message is dequeued */
125static void queue_fetch_sender(struct queue_sender_list *send,
126 unsigned int i)
127{
Michael Sevakis55392f72007-03-21 23:33:49 +0000128 struct thread_entry **spp = &send->senders[i];
Michael Sevakis43c15922006-12-16 18:35:12 +0000129
130 if(*spp)
131 {
132 send->curr_sender = *spp;
133 *spp = NULL;
134 }
Michael Sevakis43c15922006-12-16 18:35:12 +0000135}
136
137/* Puts the specified return value in the waiting thread's return value
138 and wakes the thread - a sender should be confirmed to exist first */
Michael Sevakis55392f72007-03-21 23:33:49 +0000139static void queue_release_sender(struct thread_entry **sender,
Michael Sevakis4b902672006-12-19 16:50:07 +0000140 intptr_t retval)
Michael Sevakis43c15922006-12-16 18:35:12 +0000141{
142 (*sender)->retval = retval;
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000143 wakeup_thread_no_listlock(sender);
Michael Sevakisf64ebb12007-09-08 12:20:53 +0000144 if(*sender != NULL)
145 {
Michael Sevakis174ce902007-09-08 12:32:41 +0000146 fprintf(stderr, "queue->send slot ovf: %p\n", *sender);
Michael Sevakisf64ebb12007-09-08 12:20:53 +0000147 exit(-1);
148 }
Michael Sevakis43c15922006-12-16 18:35:12 +0000149}
150
151/* Releases any waiting threads that are queued with queue_send -
152 reply with NULL */
153static void queue_release_all_senders(struct event_queue *q)
154{
155 if(q->send)
156 {
157 unsigned int i;
158 for(i = q->read; i != q->write; i++)
159 {
Michael Sevakis55392f72007-03-21 23:33:49 +0000160 struct thread_entry **spp =
Michael Sevakis43c15922006-12-16 18:35:12 +0000161 &q->send->senders[i & QUEUE_LENGTH_MASK];
162 if(*spp)
163 {
Michael Sevakis4b902672006-12-19 16:50:07 +0000164 queue_release_sender(spp, 0);
Michael Sevakis43c15922006-12-16 18:35:12 +0000165 }
166 }
167 }
168}
169
170/* Enables queue_send on the specified queue - caller allocates the extra
171 data structure */
172void queue_enable_queue_send(struct event_queue *q,
173 struct queue_sender_list *send)
174{
Michael Sevakisd6af2872007-10-26 23:11:18 +0000175 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
Michael Sevakisf64ebb12007-09-08 12:20:53 +0000176 q->send = NULL;
177 if(send)
178 {
179 q->send = send;
180 memset(send, 0, sizeof(*send));
181 }
Michael Sevakisd6af2872007-10-26 23:11:18 +0000182 set_irq_level(oldlevel);
Michael Sevakis43c15922006-12-16 18:35:12 +0000183}
184#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
185
Miika Pekkarinena85044b2006-09-16 16:18:11 +0000186void queue_init(struct event_queue *q, bool register_queue)
Daniel Stenberg0e419982006-01-09 11:22:36 +0000187{
Michael Sevakisd6af2872007-10-26 23:11:18 +0000188 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
189
Michael Sevakis43c15922006-12-16 18:35:12 +0000190 q->read = 0;
191 q->write = 0;
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000192 thread_queue_init(&q->queue);
Michael Sevakis43c15922006-12-16 18:35:12 +0000193#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
194 q->send = NULL; /* No message sending by default */
195#endif
Jens Arnoldea885712007-08-12 11:59:06 +0000196
197 if(register_queue)
198 {
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000199 if(num_queues >= MAX_NUM_QUEUES)
Michael Sevakisf64ebb12007-09-08 12:20:53 +0000200 {
201 fprintf(stderr, "queue_init->out of queues");
202 exit(-1);
203 }
Jens Arnoldea885712007-08-12 11:59:06 +0000204 /* Add it to the all_queues array */
205 all_queues[num_queues++] = q;
206 }
Michael Sevakisd6af2872007-10-26 23:11:18 +0000207
208 set_irq_level(oldlevel);
Daniel Stenberg0e419982006-01-09 11:22:36 +0000209}
210
Linus Nielsen Feltzing765e0f82006-01-23 10:53:47 +0000211void queue_delete(struct event_queue *q)
212{
Jens Arnoldea885712007-08-12 11:59:06 +0000213 int i;
214 bool found = false;
215
Michael Sevakisd6af2872007-10-26 23:11:18 +0000216 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
217
Jens Arnoldea885712007-08-12 11:59:06 +0000218 /* Find the queue to be deleted */
219 for(i = 0;i < num_queues;i++)
220 {
221 if(all_queues[i] == q)
222 {
223 found = true;
224 break;
225 }
226 }
227
228 if(found)
229 {
230 /* Move the following queues up in the list */
231 for(;i < num_queues-1;i++)
232 {
233 all_queues[i] = all_queues[i+1];
234 }
235
236 num_queues--;
237 }
Michael Sevakisf64ebb12007-09-08 12:20:53 +0000238
239 /* Release threads waiting on queue head */
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000240 thread_queue_wake(&q->queue);
Michael Sevakisf64ebb12007-09-08 12:20:53 +0000241
242#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
243 /* Release waiting threads and reply to any dequeued message
244 waiting for one. */
245 queue_release_all_senders(q);
246 queue_reply(q, 0);
247#endif
248
249 q->read = 0;
250 q->write = 0;
Michael Sevakisd6af2872007-10-26 23:11:18 +0000251
252 set_irq_level(oldlevel);
Linus Nielsen Feltzing765e0f82006-01-23 10:53:47 +0000253}
254
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000255void queue_wait(struct event_queue *q, struct queue_event *ev)
Daniel Stenberg0e419982006-01-09 11:22:36 +0000256{
Michael Sevakis43c15922006-12-16 18:35:12 +0000257 unsigned int rd;
Michael Sevakisd6af2872007-10-26 23:11:18 +0000258 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
Michael Sevakis43c15922006-12-16 18:35:12 +0000259
Michael Sevakis035529c2007-09-30 17:23:13 +0000260#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
261 if (q->send && q->send->curr_sender)
262 {
263 /* auto-reply */
264 queue_release_sender(&q->send->curr_sender, 0);
265 }
266#endif
267
Michael Sevakisf64ebb12007-09-08 12:20:53 +0000268 if (q->read == q->write)
Daniel Stenberg0e419982006-01-09 11:22:36 +0000269 {
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000270 do
271 {
Michael Sevakisd6af2872007-10-26 23:11:18 +0000272 cores[CURRENT_CORE].irq_level = oldlevel;
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000273 block_thread(&q->queue);
Michael Sevakisd6af2872007-10-26 23:11:18 +0000274 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000275 }
276 while (q->read == q->write);
Daniel Stenberg0e419982006-01-09 11:22:36 +0000277 }
278
Michael Sevakis43c15922006-12-16 18:35:12 +0000279 rd = q->read++ & QUEUE_LENGTH_MASK;
280 *ev = q->events[rd];
281
282#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
283 if(q->send && q->send->senders[rd])
284 {
285 /* Get data for a waiting thread if one */
286 queue_fetch_sender(q->send, rd);
287 }
288#endif
Michael Sevakisd6af2872007-10-26 23:11:18 +0000289
290 set_irq_level(oldlevel);
Daniel Stenberg0e419982006-01-09 11:22:36 +0000291}
292
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000293void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
Daniel Stenberg0e419982006-01-09 11:22:36 +0000294{
Michael Sevakisd6af2872007-10-26 23:11:18 +0000295 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
296
Michael Sevakis035529c2007-09-30 17:23:13 +0000297#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
298 if (q->send && q->send->curr_sender)
299 {
300 /* auto-reply */
301 queue_release_sender(&q->send->curr_sender, 0);
302 }
303#endif
304
Michael Sevakisf64ebb12007-09-08 12:20:53 +0000305 if (q->read == q->write && ticks > 0)
Daniel Stenberg0e419982006-01-09 11:22:36 +0000306 {
Michael Sevakisd6af2872007-10-26 23:11:18 +0000307 cores[CURRENT_CORE].irq_level = oldlevel;
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000308 block_thread_w_tmo(&q->queue, ticks);
Michael Sevakisd6af2872007-10-26 23:11:18 +0000309 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
Daniel Stenberg0e419982006-01-09 11:22:36 +0000310 }
311
312 if(q->read != q->write)
313 {
Michael Sevakis43c15922006-12-16 18:35:12 +0000314 unsigned int rd = q->read++ & QUEUE_LENGTH_MASK;
315 *ev = q->events[rd];
316
317#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
318 if(q->send && q->send->senders[rd])
319 {
320 /* Get data for a waiting thread if one */
321 queue_fetch_sender(q->send, rd);
322 }
323#endif
Daniel Stenberg0e419982006-01-09 11:22:36 +0000324 }
325 else
326 {
327 ev->id = SYS_TIMEOUT;
328 }
Michael Sevakisd6af2872007-10-26 23:11:18 +0000329
330 set_irq_level(oldlevel);
Daniel Stenberg0e419982006-01-09 11:22:36 +0000331}
332
Michael Sevakis4b902672006-12-19 16:50:07 +0000333void queue_post(struct event_queue *q, long id, intptr_t data)
Daniel Stenberg0e419982006-01-09 11:22:36 +0000334{
Michael Sevakisd6af2872007-10-26 23:11:18 +0000335 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
336
Michael Sevakis43c15922006-12-16 18:35:12 +0000337 unsigned int wr = q->write++ & QUEUE_LENGTH_MASK;
Daniel Stenberg0e419982006-01-09 11:22:36 +0000338
Michael Sevakis43c15922006-12-16 18:35:12 +0000339 q->events[wr].id = id;
Daniel Stenberg0e419982006-01-09 11:22:36 +0000340 q->events[wr].data = data;
Michael Sevakis43c15922006-12-16 18:35:12 +0000341
342#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
343 if(q->send)
344 {
Michael Sevakis55392f72007-03-21 23:33:49 +0000345 struct thread_entry **spp = &q->send->senders[wr];
Michael Sevakis43c15922006-12-16 18:35:12 +0000346
347 if(*spp)
348 {
349 /* overflow protect - unblock any thread waiting at this index */
Michael Sevakis4b902672006-12-19 16:50:07 +0000350 queue_release_sender(spp, 0);
Michael Sevakis43c15922006-12-16 18:35:12 +0000351 }
352 }
353#endif
354
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000355 wakeup_thread(&q->queue);
Daniel Stenberg0e419982006-01-09 11:22:36 +0000356
Michael Sevakisd6af2872007-10-26 23:11:18 +0000357 set_irq_level(oldlevel);
Michael Sevakis0107dfc2007-09-09 01:59:07 +0000358}
359
Michael Sevakis43c15922006-12-16 18:35:12 +0000360#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
Michael Sevakis4b902672006-12-19 16:50:07 +0000361intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
Michael Sevakis43c15922006-12-16 18:35:12 +0000362{
Michael Sevakisd6af2872007-10-26 23:11:18 +0000363 int oldlevel = set_irq_level(oldlevel);
364
Michael Sevakis43c15922006-12-16 18:35:12 +0000365 unsigned int wr = q->write++ & QUEUE_LENGTH_MASK;
366
367 q->events[wr].id = id;
368 q->events[wr].data = data;
369
370 if(q->send)
371 {
Michael Sevakis55392f72007-03-21 23:33:49 +0000372 struct thread_entry **spp = &q->send->senders[wr];
Michael Sevakis43c15922006-12-16 18:35:12 +0000373
374 if(*spp)
375 {
376 /* overflow protect - unblock any thread waiting at this index */
Michael Sevakis4b902672006-12-19 16:50:07 +0000377 queue_release_sender(spp, 0);
Michael Sevakis43c15922006-12-16 18:35:12 +0000378 }
379
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000380 wakeup_thread(&q->queue);
Michael Sevakis43c15922006-12-16 18:35:12 +0000381
Michael Sevakisd6af2872007-10-26 23:11:18 +0000382 cores[CURRENT_CORE].irq_level = oldlevel;
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000383 block_thread_no_listlock(spp);
Michael Sevakisf64ebb12007-09-08 12:20:53 +0000384 return thread_get_current()->retval;
Michael Sevakis43c15922006-12-16 18:35:12 +0000385 }
386
387 /* Function as queue_post if sending is not enabled */
Michael Sevakisd6af2872007-10-26 23:11:18 +0000388 wakeup_thread(&q->queue);
389 set_irq_level(oldlevel);
Michael Sevakis4b902672006-12-19 16:50:07 +0000390 return 0;
Michael Sevakis43c15922006-12-16 18:35:12 +0000391}
392
393#if 0 /* not used now but probably will be later */
394/* Query if the last message dequeued was added by queue_send or not */
395bool queue_in_queue_send(struct event_queue *q)
396{
397 return q->send && q->send->curr_sender;
398}
399#endif
400
401/* Replies with retval to any dequeued message sent with queue_send */
Michael Sevakis4b902672006-12-19 16:50:07 +0000402void queue_reply(struct event_queue *q, intptr_t retval)
Michael Sevakis43c15922006-12-16 18:35:12 +0000403{
404 if(q->send && q->send->curr_sender)
405 {
406 queue_release_sender(&q->send->curr_sender, retval);
407 }
408}
409#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
410
Daniel Stenberg0e419982006-01-09 11:22:36 +0000411bool queue_empty(const struct event_queue* q)
412{
413 return ( q->read == q->write );
414}
415
Nicolas Pennequin1839edf2007-10-27 18:08:18 +0000416bool queue_peek(struct event_queue *q, struct queue_event *ev)
417{
418 if (q->read == q->write)
419 return false;
420
421 bool have_msg = false;
422
423 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
424
425 if (q->read != q->write)
426 {
427 *ev = q->events[q->read & QUEUE_LENGTH_MASK];
428 have_msg = true;
429 }
430
431 set_irq_level(oldlevel);
432
433 return have_msg;
434}
435
Daniel Stenberg0e419982006-01-09 11:22:36 +0000436void queue_clear(struct event_queue* q)
437{
Michael Sevakisd6af2872007-10-26 23:11:18 +0000438 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
439
Daniel Stenberg0e419982006-01-09 11:22:36 +0000440 /* fixme: This is potentially unsafe in case we do interrupt-like processing */
Michael Sevakis43c15922006-12-16 18:35:12 +0000441#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
442 /* Release all thread waiting in the queue for a reply -
443 dequeued sent message will be handled by owning thread */
444 queue_release_all_senders(q);
445#endif
Daniel Stenberg0e419982006-01-09 11:22:36 +0000446 q->read = 0;
447 q->write = 0;
Michael Sevakisd6af2872007-10-26 23:11:18 +0000448
449 set_irq_level(oldlevel);
Daniel Stenberg0e419982006-01-09 11:22:36 +0000450}
451
Steve Bavin46925b32006-11-03 10:12:15 +0000452void queue_remove_from_head(struct event_queue *q, long id)
453{
Michael Sevakisd6af2872007-10-26 23:11:18 +0000454 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
455
Michael Sevakis43c15922006-12-16 18:35:12 +0000456 while(q->read != q->write)
Steve Bavin46925b32006-11-03 10:12:15 +0000457 {
Michael Sevakis43c15922006-12-16 18:35:12 +0000458 unsigned int rd = q->read & QUEUE_LENGTH_MASK;
459
460 if(q->events[rd].id != id)
461 {
462 break;
463 }
464
465#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
466 if(q->send)
467 {
Michael Sevakis55392f72007-03-21 23:33:49 +0000468 struct thread_entry **spp = &q->send->senders[rd];
Michael Sevakis43c15922006-12-16 18:35:12 +0000469
470 if(*spp)
471 {
472 /* Release any thread waiting on this message */
Michael Sevakis4b902672006-12-19 16:50:07 +0000473 queue_release_sender(spp, 0);
Michael Sevakis43c15922006-12-16 18:35:12 +0000474 }
475 }
476#endif
Steve Bavin46925b32006-11-03 10:12:15 +0000477 q->read++;
478 }
Michael Sevakisd6af2872007-10-26 23:11:18 +0000479
480 set_irq_level(oldlevel);
Steve Bavin46925b32006-11-03 10:12:15 +0000481}
482
Jens Arnold0b7bb312007-04-14 09:47:47 +0000483int queue_count(const struct event_queue *q)
484{
485 return q->write - q->read;
486}
487
Jens Arnoldea885712007-08-12 11:59:06 +0000488int queue_broadcast(long id, intptr_t data)
489{
Michael Sevakisd6af2872007-10-26 23:11:18 +0000490 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
Jens Arnoldea885712007-08-12 11:59:06 +0000491 int i;
492
493 for(i = 0;i < num_queues;i++)
494 {
495 queue_post(all_queues[i], id, data);
496 }
Jens Arnoldea885712007-08-12 11:59:06 +0000497
Michael Sevakisd6af2872007-10-26 23:11:18 +0000498 set_irq_level(oldlevel);
499 return num_queues;
Michael Sevakis0107dfc2007-09-09 01:59:07 +0000500}
501
Michael Sevakisf64ebb12007-09-08 12:20:53 +0000502void yield(void)
Daniel Stenberg0e419982006-01-09 11:22:36 +0000503{
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000504 switch_thread(NULL);
Michael Sevakisf64ebb12007-09-08 12:20:53 +0000505}
506
507void sleep(int ticks)
508{
509 sleep_thread(ticks);
Daniel Stenberg0e419982006-01-09 11:22:36 +0000510}
511
512void sim_tick_tasks(void)
513{
514 int i;
515
516 /* Run through the list of tick tasks */
517 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
518 {
519 if(tick_funcs[i])
520 {
521 tick_funcs[i]();
522 }
523 }
524}
525
526int tick_add_task(void (*f)(void))
527{
Michael Sevakisd6af2872007-10-26 23:11:18 +0000528 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
Daniel Stenberg0e419982006-01-09 11:22:36 +0000529 int i;
530
531 /* Add a task if there is room */
532 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
533 {
534 if(tick_funcs[i] == NULL)
535 {
536 tick_funcs[i] = f;
Michael Sevakisd6af2872007-10-26 23:11:18 +0000537 set_irq_level(oldlevel);
Daniel Stenberg0e419982006-01-09 11:22:36 +0000538 return 0;
539 }
540 }
Michael Sevakisf64ebb12007-09-08 12:20:53 +0000541 fprintf(stderr, "Error! tick_add_task(): out of tasks");
542 exit(-1);
Daniel Stenberg0e419982006-01-09 11:22:36 +0000543 return -1;
544}
545
546int tick_remove_task(void (*f)(void))
547{
Michael Sevakisd6af2872007-10-26 23:11:18 +0000548 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
Daniel Stenberg0e419982006-01-09 11:22:36 +0000549 int i;
550
551 /* Remove a task if it is there */
552 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
553 {
554 if(tick_funcs[i] == f)
555 {
556 tick_funcs[i] = NULL;
Michael Sevakisd6af2872007-10-26 23:11:18 +0000557 set_irq_level(oldlevel);
Daniel Stenberg0e419982006-01-09 11:22:36 +0000558 return 0;
559 }
560 }
Michael Sevakisd6af2872007-10-26 23:11:18 +0000561
562 set_irq_level(oldlevel);
Daniel Stenberg0e419982006-01-09 11:22:36 +0000563 return -1;
564}
565
Brandon Low9cde7702006-04-12 15:38:56 +0000566/* Very simple mutex simulation - won't work with pre-emptive
567 multitasking, but is better than nothing at all */
Daniel Stenberg0e419982006-01-09 11:22:36 +0000568void mutex_init(struct mutex *m)
569{
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000570 m->queue = NULL;
Michael Sevakisf64ebb12007-09-08 12:20:53 +0000571 m->thread = NULL;
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000572 m->count = 0;
Michael Sevakisf64ebb12007-09-08 12:20:53 +0000573 m->locked = 0;
Daniel Stenberg0e419982006-01-09 11:22:36 +0000574}
575
576void mutex_lock(struct mutex *m)
577{
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000578 struct thread_entry *const thread = thread_get_current();
579
580 if(thread == m->thread)
Michael Sevakisf64ebb12007-09-08 12:20:53 +0000581 {
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000582 m->count++;
583 return;
Michael Sevakisf64ebb12007-09-08 12:20:53 +0000584 }
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000585
586 if (!test_and_set(&m->locked, 1))
587 {
588 m->thread = thread;
589 return;
590 }
591
592 block_thread_no_listlock(&m->queue);
Daniel Stenberg0e419982006-01-09 11:22:36 +0000593}
594
595void mutex_unlock(struct mutex *m)
596{
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000597 /* unlocker not being the owner is an unlocking violation */
598 if(m->thread != thread_get_current())
Michael Sevakisf64ebb12007-09-08 12:20:53 +0000599 {
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000600 fprintf(stderr, "spinlock_unlock->wrong thread");
601 exit(-1);
602 }
603
604 if (m->count > 0)
605 {
606 /* this thread still owns lock */
607 m->count--;
608 return;
Michael Sevakisf64ebb12007-09-08 12:20:53 +0000609 }
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000610
611 m->thread = wakeup_thread_no_listlock(&m->queue);
612
613 if (m->thread == NULL)
Michael Sevakisf64ebb12007-09-08 12:20:53 +0000614 {
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000615 /* release lock */
Michael Sevakisf64ebb12007-09-08 12:20:53 +0000616 m->locked = 0;
617 }
Daniel Stenberg0e419982006-01-09 11:22:36 +0000618}
Michael Sevakisdee43ec2007-03-09 08:03:18 +0000619
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000620void spinlock_init(struct spinlock *l)
Michael Sevakisdee43ec2007-03-09 08:03:18 +0000621{
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000622 l->locked = 0;
623 l->thread = NULL;
624 l->count = 0;
625}
626
627void spinlock_lock(struct spinlock *l)
628{
629 struct thread_entry *const thread = thread_get_current();
630
631 if (l->thread == thread)
632 {
633 l->count++;
634 return;
635 }
636
Michael Sevakisf64ebb12007-09-08 12:20:53 +0000637 while(test_and_set(&l->locked, 1))
638 {
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000639 switch_thread(NULL);
640 }
641
642 l->thread = thread;
643}
644
645void spinlock_unlock(struct spinlock *l)
646{
647 /* unlocker not being the owner is an unlocking violation */
648 if(l->thread != thread_get_current())
649 {
650 fprintf(stderr, "spinlock_unlock->wrong thread");
651 exit(-1);
652 }
653
654 if (l->count > 0)
655 {
656 /* this thread still owns lock */
657 l->count--;
658 return;
659 }
660
661 /* clear owner */
662 l->thread = NULL;
663 l->locked = 0;
664}
665
Michael Sevakis6f513992007-10-16 01:41:16 +0000666#ifdef HAVE_SEMAPHORE_OBJECTS
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000667void semaphore_init(struct semaphore *s, int max, int start)
668{
669 if(max <= 0 || start < 0 || start > max)
670 {
671 fprintf(stderr, "semaphore_init->inv arg");
672 exit(-1);
673 }
674 s->queue = NULL;
675 s->max = max;
676 s->count = start;
677}
678
679void semaphore_wait(struct semaphore *s)
680{
681 if(--s->count >= 0)
682 return;
683 block_thread_no_listlock(&s->queue);
684}
685
686void semaphore_release(struct semaphore *s)
687{
688 if(s->count < s->max)
689 {
690 if(++s->count <= 0)
691 {
692 if(s->queue == NULL)
693 {
694 /* there should be threads in this queue */
695 fprintf(stderr, "semaphore->wakeup");
696 exit(-1);
697 }
698 /* a thread was queued - wake it up */
699 wakeup_thread_no_listlock(&s->queue);
700 }
Michael Sevakisf64ebb12007-09-08 12:20:53 +0000701 }
Michael Sevakisdee43ec2007-03-09 08:03:18 +0000702}
Michael Sevakis6f513992007-10-16 01:41:16 +0000703#endif /* HAVE_SEMAPHORE_OBJECTS */
Michael Sevakisdee43ec2007-03-09 08:03:18 +0000704
Michael Sevakis6f513992007-10-16 01:41:16 +0000705#ifdef HAVE_EVENT_OBJECTS
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000706void event_init(struct event *e, unsigned int flags)
Michael Sevakisdee43ec2007-03-09 08:03:18 +0000707{
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000708 e->queues[STATE_NONSIGNALED] = NULL;
709 e->queues[STATE_SIGNALED] = NULL;
710 e->state = flags & STATE_SIGNALED;
711 e->automatic = (flags & EVENT_AUTOMATIC) ? 1 : 0;
712}
713
714void event_wait(struct event *e, unsigned int for_state)
715{
716 unsigned int last_state = e->state;
717
718 if(e->automatic != 0)
719 {
720 /* wait for false always satisfied by definition
721 or if it just changed to false */
722 if(last_state == STATE_SIGNALED || for_state == STATE_NONSIGNALED)
723 {
724 /* automatic - unsignal */
725 e->state = STATE_NONSIGNALED;
726 return;
727 }
728 /* block until state matches */
729 }
730 else if(for_state == last_state)
731 {
732 /* the state being waited for is the current state */
733 return;
734 }
735
736 /* current state does not match wait-for state */
737 block_thread_no_listlock(&e->queues[for_state]);
738}
739
740void event_set_state(struct event *e, unsigned int state)
741{
742 unsigned int last_state = e->state;
743
744 if(last_state == state)
745 {
746 /* no change */
747 return;
748 }
749
750 if(state == STATE_SIGNALED)
751 {
752 if(e->automatic != 0)
753 {
754 struct thread_entry *thread;
755
756 if(e->queues[STATE_NONSIGNALED] != NULL)
757 {
758 /* no thread should have ever blocked for nonsignaled */
759 fprintf(stderr, "set_event_state->queue[NS]:S");
760 exit(-1);
761 }
762
763 /* pass to next thread and keep unsignaled - "pulse" */
764 thread = wakeup_thread_no_listlock(&e->queues[STATE_SIGNALED]);
765 e->state = thread != NULL ? STATE_NONSIGNALED : STATE_SIGNALED;
766 }
767 else
768 {
769 /* release all threads waiting for signaled */
770 thread_queue_wake_no_listlock(&e->queues[STATE_SIGNALED]);
771 e->state = STATE_SIGNALED;
772 }
773 }
774 else
775 {
776 /* release all threads waiting for unsignaled */
777 if(e->queues[STATE_NONSIGNALED] != NULL && e->automatic != 0)
778 {
779 /* no thread should have ever blocked */
780 fprintf(stderr, "set_event_state->queue[NS]:NS");
781 exit(-1);
782 }
783
784 thread_queue_wake_no_listlock(&e->queues[STATE_NONSIGNALED]);
785 e->state = STATE_NONSIGNALED;
786 }
Michael Sevakisdee43ec2007-03-09 08:03:18 +0000787}
Michael Sevakis9b1f1dd2007-10-16 01:52:18 +0000788#endif /* HAVE_EVENT_OBJECTS */