blob: b2bf77026f5fd1bd5acfd6551f1aa91866db0ab8 [file] [log] [blame]
Daniel Stenberg0e419982006-01-09 11:22:36 +00001/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
Linus Nielsen Feltzingfc72c532006-02-03 15:19:58 +000010 * Copyright (C) 2002 by Felix Arends
Daniel Stenberg0e419982006-01-09 11:22:36 +000011 *
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
14 *
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
17 *
18 ****************************************************************************/
19
Linus Nielsen Feltzingfc72c532006-02-03 15:19:58 +000020#include <stdlib.h>
Michael Sevakis43c15922006-12-16 18:35:12 +000021#include "memory.h"
Linus Nielsen Feltzingfc72c532006-02-03 15:19:58 +000022#include "uisdl.h"
Daniel Stenberg0e419982006-01-09 11:22:36 +000023#include "kernel.h"
Linus Nielsen Feltzingfc72c532006-02-03 15:19:58 +000024#include "thread-sdl.h"
Daniel Stenberg0e419982006-01-09 11:22:36 +000025#include "thread.h"
26#include "debug.h"
27
28static void (*tick_funcs[MAX_NUM_TICK_TASKS])(void);
29
30int set_irq_level (int level)
31{
32 static int _lv = 0;
33 return (_lv = level);
34}
35
Michael Sevakis43c15922006-12-16 18:35:12 +000036#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
37/* Moves waiting thread's descriptor to the current sender when a
38 message is dequeued */
39static void queue_fetch_sender(struct queue_sender_list *send,
40 unsigned int i)
41{
42 int old_level = set_irq_level(15<<4);
Michael Sevakis55392f72007-03-21 23:33:49 +000043 struct thread_entry **spp = &send->senders[i];
Michael Sevakis43c15922006-12-16 18:35:12 +000044
45 if(*spp)
46 {
47 send->curr_sender = *spp;
48 *spp = NULL;
49 }
50
51 set_irq_level(old_level);
52}
53
54/* Puts the specified return value in the waiting thread's return value
55 and wakes the thread - a sender should be confirmed to exist first */
Michael Sevakis55392f72007-03-21 23:33:49 +000056static void queue_release_sender(struct thread_entry **sender,
Michael Sevakis4b902672006-12-19 16:50:07 +000057 intptr_t retval)
Michael Sevakis43c15922006-12-16 18:35:12 +000058{
59 (*sender)->retval = retval;
60 *sender = NULL;
61}
62
63/* Releases any waiting threads that are queued with queue_send -
64 reply with NULL */
65static void queue_release_all_senders(struct event_queue *q)
66{
67 if(q->send)
68 {
69 unsigned int i;
70 for(i = q->read; i != q->write; i++)
71 {
Michael Sevakis55392f72007-03-21 23:33:49 +000072 struct thread_entry **spp =
Michael Sevakis43c15922006-12-16 18:35:12 +000073 &q->send->senders[i & QUEUE_LENGTH_MASK];
74 if(*spp)
75 {
Michael Sevakis4b902672006-12-19 16:50:07 +000076 queue_release_sender(spp, 0);
Michael Sevakis43c15922006-12-16 18:35:12 +000077 }
78 }
79 }
80}
81
82/* Enables queue_send on the specified queue - caller allocates the extra
83 data structure */
84void queue_enable_queue_send(struct event_queue *q,
85 struct queue_sender_list *send)
86{
87 q->send = send;
88 memset(send, 0, sizeof(*send));
89}
90#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
91
Miika Pekkarinena85044b2006-09-16 16:18:11 +000092void queue_init(struct event_queue *q, bool register_queue)
Daniel Stenberg0e419982006-01-09 11:22:36 +000093{
Miika Pekkarinena85044b2006-09-16 16:18:11 +000094 (void)register_queue;
95
Michael Sevakis43c15922006-12-16 18:35:12 +000096 q->read = 0;
97 q->write = 0;
Steve Bavin46925b32006-11-03 10:12:15 +000098 q->thread = NULL;
Michael Sevakis43c15922006-12-16 18:35:12 +000099#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
100 q->send = NULL; /* No message sending by default */
101#endif
Daniel Stenberg0e419982006-01-09 11:22:36 +0000102}
103
Linus Nielsen Feltzing765e0f82006-01-23 10:53:47 +0000104void queue_delete(struct event_queue *q)
105{
106 (void)q;
107}
108
Daniel Stenberg0e419982006-01-09 11:22:36 +0000109void queue_wait(struct event_queue *q, struct event *ev)
110{
Michael Sevakis43c15922006-12-16 18:35:12 +0000111 unsigned int rd;
112
Daniel Stenberg0e419982006-01-09 11:22:36 +0000113 while(q->read == q->write)
114 {
Miika Pekkarinena85044b2006-09-16 16:18:11 +0000115 switch_thread(true, NULL);
Daniel Stenberg0e419982006-01-09 11:22:36 +0000116 }
117
Michael Sevakis43c15922006-12-16 18:35:12 +0000118 rd = q->read++ & QUEUE_LENGTH_MASK;
119 *ev = q->events[rd];
120
121#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
122 if(q->send && q->send->senders[rd])
123 {
124 /* Get data for a waiting thread if one */
125 queue_fetch_sender(q->send, rd);
126 }
127#endif
Daniel Stenberg0e419982006-01-09 11:22:36 +0000128}
129
130void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks)
131{
132 unsigned int timeout = current_tick + ticks;
133
134 while(q->read == q->write && TIME_BEFORE( current_tick, timeout ))
135 {
Dan Evertond0c65f72006-03-29 12:11:28 +0000136 sim_sleep(1);
Daniel Stenberg0e419982006-01-09 11:22:36 +0000137 }
138
139 if(q->read != q->write)
140 {
Michael Sevakis43c15922006-12-16 18:35:12 +0000141 unsigned int rd = q->read++ & QUEUE_LENGTH_MASK;
142 *ev = q->events[rd];
143
144#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
145 if(q->send && q->send->senders[rd])
146 {
147 /* Get data for a waiting thread if one */
148 queue_fetch_sender(q->send, rd);
149 }
150#endif
Daniel Stenberg0e419982006-01-09 11:22:36 +0000151 }
152 else
153 {
154 ev->id = SYS_TIMEOUT;
155 }
156}
157
Michael Sevakis4b902672006-12-19 16:50:07 +0000158void queue_post(struct event_queue *q, long id, intptr_t data)
Daniel Stenberg0e419982006-01-09 11:22:36 +0000159{
Michael Sevakis43c15922006-12-16 18:35:12 +0000160 int oldlevel = set_irq_level(15<<4);
161 unsigned int wr = q->write++ & QUEUE_LENGTH_MASK;
Daniel Stenberg0e419982006-01-09 11:22:36 +0000162
Michael Sevakis43c15922006-12-16 18:35:12 +0000163 q->events[wr].id = id;
Daniel Stenberg0e419982006-01-09 11:22:36 +0000164 q->events[wr].data = data;
Michael Sevakis43c15922006-12-16 18:35:12 +0000165
166#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
167 if(q->send)
168 {
Michael Sevakis55392f72007-03-21 23:33:49 +0000169 struct thread_entry **spp = &q->send->senders[wr];
Michael Sevakis43c15922006-12-16 18:35:12 +0000170
171 if(*spp)
172 {
173 /* overflow protect - unblock any thread waiting at this index */
Michael Sevakis4b902672006-12-19 16:50:07 +0000174 queue_release_sender(spp, 0);
Michael Sevakis43c15922006-12-16 18:35:12 +0000175 }
176 }
177#endif
178
Daniel Stenberg0e419982006-01-09 11:22:36 +0000179 set_irq_level(oldlevel);
180}
181
Michael Sevakis43c15922006-12-16 18:35:12 +0000182#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
Michael Sevakis4b902672006-12-19 16:50:07 +0000183intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
Michael Sevakis43c15922006-12-16 18:35:12 +0000184{
185 int oldlevel = set_irq_level(15<<4);
186 unsigned int wr = q->write++ & QUEUE_LENGTH_MASK;
187
188 q->events[wr].id = id;
189 q->events[wr].data = data;
190
191 if(q->send)
192 {
Michael Sevakis55392f72007-03-21 23:33:49 +0000193 struct thread_entry **spp = &q->send->senders[wr];
Steve Bavinabf34352007-08-01 09:07:09 +0000194 static struct thread_entry sender;
Michael Sevakis43c15922006-12-16 18:35:12 +0000195
196 if(*spp)
197 {
198 /* overflow protect - unblock any thread waiting at this index */
Michael Sevakis4b902672006-12-19 16:50:07 +0000199 queue_release_sender(spp, 0);
Michael Sevakis43c15922006-12-16 18:35:12 +0000200 }
201
202 *spp = &sender;
203
204 set_irq_level(oldlevel);
205 while (*spp != NULL)
206 {
207 switch_thread(true, NULL);
208 }
209
210 return sender.retval;
211 }
212
213 /* Function as queue_post if sending is not enabled */
214 set_irq_level(oldlevel);
Michael Sevakis4b902672006-12-19 16:50:07 +0000215 return 0;
Michael Sevakis43c15922006-12-16 18:35:12 +0000216}
217
218#if 0 /* not used now but probably will be later */
219/* Query if the last message dequeued was added by queue_send or not */
220bool queue_in_queue_send(struct event_queue *q)
221{
222 return q->send && q->send->curr_sender;
223}
224#endif
225
226/* Replies with retval to any dequeued message sent with queue_send */
Michael Sevakis4b902672006-12-19 16:50:07 +0000227void queue_reply(struct event_queue *q, intptr_t retval)
Michael Sevakis43c15922006-12-16 18:35:12 +0000228{
229 if(q->send && q->send->curr_sender)
230 {
231 queue_release_sender(&q->send->curr_sender, retval);
232 }
233}
234#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
235
Daniel Stenberg0e419982006-01-09 11:22:36 +0000236bool queue_empty(const struct event_queue* q)
237{
238 return ( q->read == q->write );
239}
240
241void queue_clear(struct event_queue* q)
242{
243 /* fixme: This is potentially unsafe in case we do interrupt-like processing */
Michael Sevakis43c15922006-12-16 18:35:12 +0000244#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
245 /* Release all thread waiting in the queue for a reply -
246 dequeued sent message will be handled by owning thread */
247 queue_release_all_senders(q);
248#endif
Daniel Stenberg0e419982006-01-09 11:22:36 +0000249 q->read = 0;
250 q->write = 0;
251}
252
Steve Bavin46925b32006-11-03 10:12:15 +0000253void queue_remove_from_head(struct event_queue *q, long id)
254{
255 int oldlevel = set_irq_level(15<<4);
256
Michael Sevakis43c15922006-12-16 18:35:12 +0000257 while(q->read != q->write)
Steve Bavin46925b32006-11-03 10:12:15 +0000258 {
Michael Sevakis43c15922006-12-16 18:35:12 +0000259 unsigned int rd = q->read & QUEUE_LENGTH_MASK;
260
261 if(q->events[rd].id != id)
262 {
263 break;
264 }
265
266#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
267 if(q->send)
268 {
Michael Sevakis55392f72007-03-21 23:33:49 +0000269 struct thread_entry **spp = &q->send->senders[rd];
Michael Sevakis43c15922006-12-16 18:35:12 +0000270
271 if(*spp)
272 {
273 /* Release any thread waiting on this message */
Michael Sevakis4b902672006-12-19 16:50:07 +0000274 queue_release_sender(spp, 0);
Michael Sevakis43c15922006-12-16 18:35:12 +0000275 }
276 }
277#endif
Steve Bavin46925b32006-11-03 10:12:15 +0000278 q->read++;
279 }
280
281 set_irq_level(oldlevel);
282}
283
Jens Arnold0b7bb312007-04-14 09:47:47 +0000284int queue_count(const struct event_queue *q)
285{
286 return q->write - q->read;
287}
288
Miika Pekkarinena85044b2006-09-16 16:18:11 +0000289void switch_thread(bool save_context, struct thread_entry **blocked_list)
Daniel Stenberg0e419982006-01-09 11:22:36 +0000290{
Miika Pekkarinena85044b2006-09-16 16:18:11 +0000291 (void)save_context;
292 (void)blocked_list;
293
Daniel Stenberg0e419982006-01-09 11:22:36 +0000294 yield ();
295}
296
297void sim_tick_tasks(void)
298{
299 int i;
300
301 /* Run through the list of tick tasks */
302 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
303 {
304 if(tick_funcs[i])
305 {
306 tick_funcs[i]();
307 }
308 }
309}
310
311int tick_add_task(void (*f)(void))
312{
313 int i;
314
315 /* Add a task if there is room */
316 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
317 {
318 if(tick_funcs[i] == NULL)
319 {
320 tick_funcs[i] = f;
321 return 0;
322 }
323 }
324 DEBUGF("Error! tick_add_task(): out of tasks");
325 return -1;
326}
327
328int tick_remove_task(void (*f)(void))
329{
330 int i;
331
332 /* Remove a task if it is there */
333 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
334 {
335 if(tick_funcs[i] == f)
336 {
337 tick_funcs[i] = NULL;
338 return 0;
339 }
340 }
341
342 return -1;
343}
344
Brandon Low9cde7702006-04-12 15:38:56 +0000345/* Very simple mutex simulation - won't work with pre-emptive
346 multitasking, but is better than nothing at all */
Daniel Stenberg0e419982006-01-09 11:22:36 +0000347void mutex_init(struct mutex *m)
348{
Brandon Low9cde7702006-04-12 15:38:56 +0000349 m->locked = false;
Daniel Stenberg0e419982006-01-09 11:22:36 +0000350}
351
352void mutex_lock(struct mutex *m)
353{
Brandon Low9cde7702006-04-12 15:38:56 +0000354 while(m->locked)
Miika Pekkarinena85044b2006-09-16 16:18:11 +0000355 switch_thread(true, NULL);
Brandon Low9cde7702006-04-12 15:38:56 +0000356 m->locked = true;
Daniel Stenberg0e419982006-01-09 11:22:36 +0000357}
358
359void mutex_unlock(struct mutex *m)
360{
Brandon Low9cde7702006-04-12 15:38:56 +0000361 m->locked = false;
Daniel Stenberg0e419982006-01-09 11:22:36 +0000362}
Michael Sevakisdee43ec2007-03-09 08:03:18 +0000363
364void spinlock_lock(struct mutex *m)
365{
366 while(m->locked)
367 switch_thread(true, NULL);
368 m->locked = true;
369}
370
371void spinlock_unlock(struct mutex *m)
372{
373 m->locked = false;
374}