Björn Stenberg | 1acfd6b | 2002-04-21 22:06:12 +0000 | [diff] [blame] | 1 | /*************************************************************************** |
| 2 | * __________ __ ___. |
| 3 | * Open \______ \ ____ ____ | | _\_ |__ _______ ___ |
| 4 | * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / |
| 5 | * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < |
| 6 | * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ |
| 7 | * \/ \/ \/ \/ \/ |
| 8 | * $Id$ |
| 9 | * |
| 10 | * Copyright (C) 2002 by Björn Stenberg |
| 11 | * |
| 12 | * All files in this archive are subject to the GNU General Public License. |
| 13 | * See the file COPYING in the source tree root for full license agreement. |
| 14 | * |
| 15 | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY |
| 16 | * KIND, either express or implied. |
| 17 | * |
| 18 | ****************************************************************************/ |
Linus Nielsen Feltzing | 9e142da | 2002-05-05 18:34:06 +0000 | [diff] [blame] | 19 | #include <stdlib.h> |
Linus Nielsen Feltzing | 5421181 | 2002-06-29 21:30:42 +0000 | [diff] [blame] | 20 | #include <string.h> |
Linus Nielsen Feltzing | 76620fb | 2005-03-01 14:33:45 +0000 | [diff] [blame] | 21 | #include "config.h" |
Björn Stenberg | 1acfd6b | 2002-04-21 22:06:12 +0000 | [diff] [blame] | 22 | #include "kernel.h" |
Linus Nielsen Feltzing | 7b9581a | 2002-04-25 00:15:04 +0000 | [diff] [blame] | 23 | #include "thread.h" |
Linus Nielsen Feltzing | 9c4423f | 2004-10-27 07:07:54 +0000 | [diff] [blame] | 24 | #include "cpu.h" |
Linus Nielsen Feltzing | 9e142da | 2002-05-05 18:34:06 +0000 | [diff] [blame] | 25 | #include "system.h" |
| 26 | #include "panic.h" |
Will Robertson | 590501c | 2007-09-21 15:51:53 +0000 | [diff] [blame] | 27 | #if CONFIG_CPU == IMX31L |
| 28 | #include "avic-imx31.h" |
| 29 | #endif |
Björn Stenberg | 1acfd6b | 2002-04-21 22:06:12 +0000 | [diff] [blame] | 30 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 31 | /* Make this nonzero to enable more elaborate checks on objects */ |
| 32 | #ifdef DEBUG |
| 33 | #define KERNEL_OBJECT_CHECKS 1 /* Always 1 for DEBUG */ |
| 34 | #else |
| 35 | #define KERNEL_OBJECT_CHECKS 0 |
| 36 | #endif |
| 37 | |
| 38 | #if KERNEL_OBJECT_CHECKS |
| 39 | #define KERNEL_ASSERT(exp, msg...) \ |
| 40 | ({ if (!({ exp; })) panicf(msg); }) |
| 41 | #else |
| 42 | #define KERNEL_ASSERT(exp, msg...) ({}) |
| 43 | #endif |
| 44 | |
Will Robertson | 590501c | 2007-09-21 15:51:53 +0000 | [diff] [blame] | 45 | #if (!defined(CPU_PP) && (CONFIG_CPU != IMX31L)) || !defined(BOOTLOADER) |
Michael Sevakis | 6bba70b | 2007-06-25 20:46:54 +0000 | [diff] [blame] | 46 | volatile long current_tick NOCACHEDATA_ATTR = 0; |
Dave Chapman | cb7e695 | 2006-01-05 17:02:48 +0000 | [diff] [blame] | 47 | #endif |
Björn Stenberg | 1acfd6b | 2002-04-21 22:06:12 +0000 | [diff] [blame] | 48 | |
Michael Sevakis | 4ae87c8 | 2007-07-06 21:36:32 +0000 | [diff] [blame] | 49 | void (*tick_funcs[MAX_NUM_TICK_TASKS])(void); |
Linus Nielsen Feltzing | 150c5a7 | 2002-06-04 12:25:53 +0000 | [diff] [blame] | 50 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 51 | extern struct core_entry cores[NUM_CORES]; |
| 52 | |
Linus Nielsen Feltzing | 2a73cec | 2002-06-29 21:19:55 +0000 | [diff] [blame] | 53 | /* This array holds all queues that are initiated. It is used for broadcast. */ |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 54 | static struct |
| 55 | { |
| 56 | int count; |
| 57 | struct event_queue *queues[MAX_NUM_QUEUES]; |
| 58 | #if NUM_CORES > 1 |
| 59 | struct corelock cl; |
| 60 | #endif |
| 61 | } all_queues NOCACHEBSS_ATTR; |
Linus Nielsen Feltzing | 2a73cec | 2002-06-29 21:19:55 +0000 | [diff] [blame] | 62 | |
Linus Nielsen Feltzing | 9e142da | 2002-05-05 18:34:06 +0000 | [diff] [blame] | 63 | /**************************************************************************** |
| 64 | * Standard kernel stuff |
| 65 | ****************************************************************************/ |
| 66 | void kernel_init(void) |
| 67 | { |
Linus Nielsen Feltzing | 9430a0b | 2002-06-07 14:56:10 +0000 | [diff] [blame] | 68 | /* Init the threading API */ |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 69 | init_threads(); |
| 70 | |
Michael Sevakis | 18e87ff | 2007-10-05 23:24:46 +0000 | [diff] [blame] | 71 | /* Other processors will not reach this point in a multicore build. |
| 72 | * In a single-core build with multiple cores they fall-through and |
| 73 | * sleep in cop_main without returning. */ |
| 74 | if (CURRENT_CORE == CPU) |
| 75 | { |
| 76 | memset(tick_funcs, 0, sizeof(tick_funcs)); |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 77 | memset(&all_queues, 0, sizeof(all_queues)); |
| 78 | corelock_init(&all_queues.cl); |
Michael Sevakis | 18e87ff | 2007-10-05 23:24:46 +0000 | [diff] [blame] | 79 | tick_start(1000/HZ); |
| 80 | } |
Linus Nielsen Feltzing | 9e142da | 2002-05-05 18:34:06 +0000 | [diff] [blame] | 81 | } |
| 82 | |
Björn Stenberg | 1acfd6b | 2002-04-21 22:06:12 +0000 | [diff] [blame] | 83 | void sleep(int ticks) |
| 84 | { |
Marcoen Hirschberg | 0a06824 | 2006-08-12 08:27:48 +0000 | [diff] [blame] | 85 | #if CONFIG_CPU == S3C2440 && defined(BOOTLOADER) |
Marcoen Hirschberg | 2953676 | 2006-12-29 02:49:12 +0000 | [diff] [blame] | 86 | volatile int counter; |
Marcoen Hirschberg | 0a06824 | 2006-08-12 08:27:48 +0000 | [diff] [blame] | 87 | TCON &= ~(1 << 20); // stop timer 4 |
| 88 | // TODO: this constant depends on dividers settings inherited from |
| 89 | // firmware. Set them explicitly somwhere. |
| 90 | TCNTB4 = 12193 * ticks / HZ; |
| 91 | TCON |= 1 << 21; // set manual bit |
| 92 | TCON &= ~(1 << 21); // reset manual bit |
| 93 | TCON &= ~(1 << 22); //autoreload Off |
| 94 | TCON |= (1 << 20); // start timer 4 |
| 95 | do { |
| 96 | counter = TCNTO4; |
| 97 | } while(counter > 0); |
| 98 | |
Barry Wardell | 2370998 | 2007-03-12 22:12:20 +0000 | [diff] [blame] | 99 | #elif defined(CPU_PP) && defined(BOOTLOADER) |
| 100 | unsigned stop = USEC_TIMER + ticks * (1000000/HZ); |
| 101 | while (TIME_BEFORE(USEC_TIMER, stop)) |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 102 | switch_thread(NULL); |
Marcoen Hirschberg | 0a06824 | 2006-08-12 08:27:48 +0000 | [diff] [blame] | 103 | #else |
Miika Pekkarinen | a85044b | 2006-09-16 16:18:11 +0000 | [diff] [blame] | 104 | sleep_thread(ticks); |
Marcoen Hirschberg | 0a06824 | 2006-08-12 08:27:48 +0000 | [diff] [blame] | 105 | #endif |
Björn Stenberg | 1acfd6b | 2002-04-21 22:06:12 +0000 | [diff] [blame] | 106 | } |
| 107 | |
| 108 | void yield(void) |
| 109 | { |
Will Robertson | 590501c | 2007-09-21 15:51:53 +0000 | [diff] [blame] | 110 | #if ((CONFIG_CPU == S3C2440 || defined(ELIO_TPJ1022) || CONFIG_CPU == IMX31L) && defined(BOOTLOADER)) |
Dave Chapman | 657dcb5 | 2006-08-31 19:19:35 +0000 | [diff] [blame] | 111 | /* Some targets don't like yielding in the bootloader */ |
Marcoen Hirschberg | 0a06824 | 2006-08-12 08:27:48 +0000 | [diff] [blame] | 112 | #else |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 113 | switch_thread(NULL); |
Marcoen Hirschberg | 0a06824 | 2006-08-12 08:27:48 +0000 | [diff] [blame] | 114 | #endif |
Björn Stenberg | 1acfd6b | 2002-04-21 22:06:12 +0000 | [diff] [blame] | 115 | } |
Linus Nielsen Feltzing | bd2561d | 2002-04-29 14:25:44 +0000 | [diff] [blame] | 116 | |
| 117 | /**************************************************************************** |
| 118 | * Queue handling stuff |
| 119 | ****************************************************************************/ |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 120 | |
| 121 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
| 122 | /* Moves waiting thread's descriptor to the current sender when a |
| 123 | message is dequeued */ |
| 124 | static void queue_fetch_sender(struct queue_sender_list *send, |
| 125 | unsigned int i) |
| 126 | { |
Michael Sevakis | 0caf3b8 | 2007-03-21 22:58:53 +0000 | [diff] [blame] | 127 | struct thread_entry **spp = &send->senders[i]; |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 128 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 129 | if(*spp) |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 130 | { |
| 131 | send->curr_sender = *spp; |
| 132 | *spp = NULL; |
| 133 | } |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 134 | } |
| 135 | |
| 136 | /* Puts the specified return value in the waiting thread's return value |
Michael Sevakis | 6d87513 | 2007-03-23 01:00:13 +0000 | [diff] [blame] | 137 | * and wakes the thread. |
| 138 | * 1) A sender should be confirmed to exist before calling which makes it |
| 139 | * more efficent to reject the majority of cases that don't need this |
| 140 | called. |
| 141 | * 2) Requires interrupts disabled since queue overflows can cause posts |
| 142 | * from interrupt handlers to wake threads. Not doing so could cause |
| 143 | * an attempt at multiple wakes or other problems. |
| 144 | */ |
Michael Sevakis | 0caf3b8 | 2007-03-21 22:58:53 +0000 | [diff] [blame] | 145 | static void queue_release_sender(struct thread_entry **sender, |
Michael Sevakis | 4b90267 | 2006-12-19 16:50:07 +0000 | [diff] [blame] | 146 | intptr_t retval) |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 147 | { |
| 148 | (*sender)->retval = retval; |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 149 | wakeup_thread_no_listlock(sender); |
Michael Sevakis | 6d87513 | 2007-03-23 01:00:13 +0000 | [diff] [blame] | 150 | /* This should _never_ happen - there must never be multiple |
| 151 | threads in this list and it is a corrupt state */ |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 152 | KERNEL_ASSERT(*sender == NULL, "queue->send slot ovf: %08X", (int)*sender); |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 153 | } |
| 154 | |
| 155 | /* Releases any waiting threads that are queued with queue_send - |
Michael Sevakis | 6d87513 | 2007-03-23 01:00:13 +0000 | [diff] [blame] | 156 | * reply with 0. |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 157 | * Disable IRQs and lock before calling since it uses |
| 158 | * queue_release_sender. |
Michael Sevakis | 6d87513 | 2007-03-23 01:00:13 +0000 | [diff] [blame] | 159 | */ |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 160 | static void queue_release_all_senders(struct event_queue *q) |
| 161 | { |
| 162 | if(q->send) |
| 163 | { |
| 164 | unsigned int i; |
| 165 | for(i = q->read; i != q->write; i++) |
| 166 | { |
Michael Sevakis | 0caf3b8 | 2007-03-21 22:58:53 +0000 | [diff] [blame] | 167 | struct thread_entry **spp = |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 168 | &q->send->senders[i & QUEUE_LENGTH_MASK]; |
Michael Sevakis | 0caf3b8 | 2007-03-21 22:58:53 +0000 | [diff] [blame] | 169 | |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 170 | if(*spp) |
| 171 | { |
Michael Sevakis | 4b90267 | 2006-12-19 16:50:07 +0000 | [diff] [blame] | 172 | queue_release_sender(spp, 0); |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 173 | } |
| 174 | } |
| 175 | } |
| 176 | } |
| 177 | |
| 178 | /* Enables queue_send on the specified queue - caller allocates the extra |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 179 | data structure. Only queues which are taken to be owned by a thread should |
| 180 | enable this. Public waiting is not permitted. */ |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 181 | void queue_enable_queue_send(struct event_queue *q, |
| 182 | struct queue_sender_list *send) |
| 183 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 184 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); |
| 185 | corelock_lock(&q->cl); |
| 186 | |
| 187 | q->send = NULL; |
| 188 | if(send != NULL) |
| 189 | { |
| 190 | memset(send, 0, sizeof(*send)); |
| 191 | q->send = send; |
| 192 | } |
| 193 | |
| 194 | corelock_unlock(&q->cl); |
| 195 | set_irq_level(oldlevel); |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 196 | } |
| 197 | #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */ |
| 198 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 199 | /* Queue must not be available for use during this call */ |
Miika Pekkarinen | a85044b | 2006-09-16 16:18:11 +0000 | [diff] [blame] | 200 | void queue_init(struct event_queue *q, bool register_queue) |
Linus Nielsen Feltzing | bd2561d | 2002-04-29 14:25:44 +0000 | [diff] [blame] | 201 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 202 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); |
| 203 | |
| 204 | if(register_queue) |
| 205 | { |
| 206 | corelock_lock(&all_queues.cl); |
| 207 | } |
| 208 | |
| 209 | corelock_init(&q->cl); |
| 210 | thread_queue_init(&q->queue); |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 211 | q->read = 0; |
| 212 | q->write = 0; |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 213 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
| 214 | q->send = NULL; /* No message sending by default */ |
| 215 | #endif |
| 216 | |
| 217 | if(register_queue) |
Miika Pekkarinen | a85044b | 2006-09-16 16:18:11 +0000 | [diff] [blame] | 218 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 219 | if(all_queues.count >= MAX_NUM_QUEUES) |
| 220 | { |
| 221 | panicf("queue_init->out of queues"); |
| 222 | } |
Miika Pekkarinen | a85044b | 2006-09-16 16:18:11 +0000 | [diff] [blame] | 223 | /* Add it to the all_queues array */ |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 224 | all_queues.queues[all_queues.count++] = q; |
| 225 | corelock_unlock(&all_queues.cl); |
Miika Pekkarinen | a85044b | 2006-09-16 16:18:11 +0000 | [diff] [blame] | 226 | } |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 227 | |
| 228 | set_irq_level(oldlevel); |
Linus Nielsen Feltzing | bd2561d | 2002-04-29 14:25:44 +0000 | [diff] [blame] | 229 | } |
| 230 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 231 | /* Queue must not be available for use during this call */ |
Linus Nielsen Feltzing | 765e0f8 | 2006-01-23 10:53:47 +0000 | [diff] [blame] | 232 | void queue_delete(struct event_queue *q) |
| 233 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 234 | int oldlevel; |
Linus Nielsen Feltzing | 765e0f8 | 2006-01-23 10:53:47 +0000 | [diff] [blame] | 235 | int i; |
| 236 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 237 | oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); |
| 238 | corelock_lock(&all_queues.cl); |
| 239 | corelock_lock(&q->cl); |
Michael Sevakis | bfb281f | 2007-05-12 05:20:04 +0000 | [diff] [blame] | 240 | |
Linus Nielsen Feltzing | 765e0f8 | 2006-01-23 10:53:47 +0000 | [diff] [blame] | 241 | /* Find the queue to be deleted */ |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 242 | for(i = 0;i < all_queues.count;i++) |
Linus Nielsen Feltzing | 765e0f8 | 2006-01-23 10:53:47 +0000 | [diff] [blame] | 243 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 244 | if(all_queues.queues[i] == q) |
Linus Nielsen Feltzing | dacc6f3 | 2006-01-23 10:59:07 +0000 | [diff] [blame] | 245 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 246 | /* Move the following queues up in the list */ |
| 247 | all_queues.count--; |
| 248 | |
| 249 | for(;i < all_queues.count;i++) |
| 250 | { |
| 251 | all_queues.queues[i] = all_queues.queues[i+1]; |
| 252 | } |
| 253 | |
Linus Nielsen Feltzing | 765e0f8 | 2006-01-23 10:53:47 +0000 | [diff] [blame] | 254 | break; |
Linus Nielsen Feltzing | dacc6f3 | 2006-01-23 10:59:07 +0000 | [diff] [blame] | 255 | } |
Linus Nielsen Feltzing | 765e0f8 | 2006-01-23 10:53:47 +0000 | [diff] [blame] | 256 | } |
| 257 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 258 | corelock_unlock(&all_queues.cl); |
| 259 | |
| 260 | /* Release threads waiting on queue head */ |
| 261 | thread_queue_wake(&q->queue); |
| 262 | |
| 263 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
| 264 | /* Release waiting threads for reply and reply to any dequeued |
| 265 | message waiting for one. */ |
| 266 | queue_release_all_senders(q); |
| 267 | queue_reply(q, 0); |
| 268 | #endif |
| 269 | |
| 270 | q->read = 0; |
| 271 | q->write = 0; |
| 272 | |
| 273 | corelock_unlock(&q->cl); |
Michael Sevakis | bfb281f | 2007-05-12 05:20:04 +0000 | [diff] [blame] | 274 | set_irq_level(oldlevel); |
Linus Nielsen Feltzing | 765e0f8 | 2006-01-23 10:53:47 +0000 | [diff] [blame] | 275 | } |
| 276 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 277 | /* NOTE: multiple threads waiting on a queue head cannot have a well- |
| 278 | defined release order if timeouts are used. If multiple threads must |
| 279 | access the queue head, use a dispatcher or queue_wait only. */ |
| 280 | void queue_wait(struct event_queue *q, struct queue_event *ev) |
Linus Nielsen Feltzing | bd2561d | 2002-04-29 14:25:44 +0000 | [diff] [blame] | 281 | { |
Michael Sevakis | bfb281f | 2007-05-12 05:20:04 +0000 | [diff] [blame] | 282 | int oldlevel; |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 283 | unsigned int rd; |
| 284 | |
Michael Sevakis | 035529c | 2007-09-30 17:23:13 +0000 | [diff] [blame] | 285 | oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 286 | corelock_lock(&q->cl); |
Michael Sevakis | 035529c | 2007-09-30 17:23:13 +0000 | [diff] [blame] | 287 | |
| 288 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
| 289 | if(q->send && q->send->curr_sender) |
| 290 | { |
| 291 | /* auto-reply */ |
| 292 | queue_release_sender(&q->send->curr_sender, 0); |
| 293 | } |
| 294 | #endif |
Miika Pekkarinen | 66258a3 | 2007-03-26 16:55:17 +0000 | [diff] [blame] | 295 | |
Michael Sevakis | 0caf3b8 | 2007-03-21 22:58:53 +0000 | [diff] [blame] | 296 | if (q->read == q->write) |
Linus Nielsen Feltzing | bd2561d | 2002-04-29 14:25:44 +0000 | [diff] [blame] | 297 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 298 | do |
| 299 | { |
| 300 | #if CONFIG_CORELOCK == CORELOCK_NONE |
| 301 | cores[CURRENT_CORE].irq_level = oldlevel; |
| 302 | #elif CONFIG_CORELOCK == SW_CORELOCK |
| 303 | const unsigned int core = CURRENT_CORE; |
| 304 | cores[core].blk_ops.irq_level = oldlevel; |
| 305 | cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK | TBOP_IRQ_LEVEL; |
| 306 | cores[core].blk_ops.cl_p = &q->cl; |
| 307 | #elif CONFIG_CORELOCK == CORELOCK_SWAP |
| 308 | const unsigned int core = CURRENT_CORE; |
| 309 | cores[core].blk_ops.irq_level = oldlevel; |
| 310 | cores[core].blk_ops.flags = TBOP_SET_VARu8 | TBOP_IRQ_LEVEL; |
| 311 | cores[core].blk_ops.var_u8p = &q->cl.locked; |
| 312 | cores[core].blk_ops.var_u8v = 0; |
| 313 | #endif /* CONFIG_CORELOCK */ |
| 314 | block_thread(&q->queue); |
| 315 | oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); |
| 316 | corelock_lock(&q->cl); |
| 317 | } |
| 318 | /* A message that woke us could now be gone */ |
| 319 | while (q->read == q->write); |
Michael Sevakis | 0caf3b8 | 2007-03-21 22:58:53 +0000 | [diff] [blame] | 320 | } |
Linus Nielsen Feltzing | bd2561d | 2002-04-29 14:25:44 +0000 | [diff] [blame] | 321 | |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 322 | rd = q->read++ & QUEUE_LENGTH_MASK; |
| 323 | *ev = q->events[rd]; |
| 324 | |
| 325 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
| 326 | if(q->send && q->send->senders[rd]) |
| 327 | { |
| 328 | /* Get data for a waiting thread if one */ |
| 329 | queue_fetch_sender(q->send, rd); |
| 330 | } |
| 331 | #endif |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 332 | |
| 333 | corelock_unlock(&q->cl); |
Michael Sevakis | bfb281f | 2007-05-12 05:20:04 +0000 | [diff] [blame] | 334 | set_irq_level(oldlevel); |
Linus Nielsen Feltzing | bd2561d | 2002-04-29 14:25:44 +0000 | [diff] [blame] | 335 | } |
| 336 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 337 | void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks) |
Markus Braun | 88098be | 2002-10-11 08:56:23 +0000 | [diff] [blame] | 338 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 339 | int oldlevel; |
| 340 | |
| 341 | oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); |
| 342 | corelock_lock(&q->cl); |
Michael Sevakis | 035529c | 2007-09-30 17:23:13 +0000 | [diff] [blame] | 343 | |
| 344 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
| 345 | if (q->send && q->send->curr_sender) |
| 346 | { |
| 347 | /* auto-reply */ |
| 348 | queue_release_sender(&q->send->curr_sender, 0); |
| 349 | } |
| 350 | #endif |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 351 | |
Michael Sevakis | 0caf3b8 | 2007-03-21 22:58:53 +0000 | [diff] [blame] | 352 | if (q->read == q->write && ticks > 0) |
Markus Braun | 88098be | 2002-10-11 08:56:23 +0000 | [diff] [blame] | 353 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 354 | #if CONFIG_CORELOCK == CORELOCK_NONE |
| 355 | cores[CURRENT_CORE].irq_level = oldlevel; |
| 356 | #elif CONFIG_CORELOCK == SW_CORELOCK |
| 357 | const unsigned int core = CURRENT_CORE; |
| 358 | cores[core].blk_ops.irq_level = oldlevel; |
| 359 | cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK | TBOP_IRQ_LEVEL; |
| 360 | cores[core].blk_ops.cl_p = &q->cl; |
| 361 | #elif CONFIG_CORELOCK == CORELOCK_SWAP |
| 362 | const unsigned int core = CURRENT_CORE; |
| 363 | cores[core].blk_ops.irq_level = oldlevel; |
| 364 | cores[core].blk_ops.flags = TBOP_SET_VARu8 | TBOP_IRQ_LEVEL; |
| 365 | cores[core].blk_ops.var_u8p = &q->cl.locked; |
| 366 | cores[core].blk_ops.var_u8v = 0; |
| 367 | #endif |
| 368 | block_thread_w_tmo(&q->queue, ticks); |
Michael Sevakis | bfb281f | 2007-05-12 05:20:04 +0000 | [diff] [blame] | 369 | oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 370 | corelock_lock(&q->cl); |
Markus Braun | 88098be | 2002-10-11 08:56:23 +0000 | [diff] [blame] | 371 | } |
| 372 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 373 | /* no worry about a removed message here - status is checked inside |
| 374 | locks - perhaps verify if timeout or false alarm */ |
Michael Sevakis | 0caf3b8 | 2007-03-21 22:58:53 +0000 | [diff] [blame] | 375 | if (q->read != q->write) |
Markus Braun | 88098be | 2002-10-11 08:56:23 +0000 | [diff] [blame] | 376 | { |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 377 | unsigned int rd = q->read++ & QUEUE_LENGTH_MASK; |
| 378 | *ev = q->events[rd]; |
| 379 | |
| 380 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
| 381 | if(q->send && q->send->senders[rd]) |
| 382 | { |
| 383 | /* Get data for a waiting thread if one */ |
| 384 | queue_fetch_sender(q->send, rd); |
| 385 | } |
| 386 | #endif |
Markus Braun | 88098be | 2002-10-11 08:56:23 +0000 | [diff] [blame] | 387 | } |
| 388 | else |
| 389 | { |
| 390 | ev->id = SYS_TIMEOUT; |
| 391 | } |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 392 | |
| 393 | corelock_unlock(&q->cl); |
Michael Sevakis | bfb281f | 2007-05-12 05:20:04 +0000 | [diff] [blame] | 394 | set_irq_level(oldlevel); |
Markus Braun | 88098be | 2002-10-11 08:56:23 +0000 | [diff] [blame] | 395 | } |
| 396 | |
Michael Sevakis | 4b90267 | 2006-12-19 16:50:07 +0000 | [diff] [blame] | 397 | void queue_post(struct event_queue *q, long id, intptr_t data) |
Linus Nielsen Feltzing | bd2561d | 2002-04-29 14:25:44 +0000 | [diff] [blame] | 398 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 399 | int oldlevel; |
Miika Pekkarinen | 66258a3 | 2007-03-26 16:55:17 +0000 | [diff] [blame] | 400 | unsigned int wr; |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 401 | |
| 402 | oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); |
| 403 | corelock_lock(&q->cl); |
| 404 | |
Miika Pekkarinen | 66258a3 | 2007-03-26 16:55:17 +0000 | [diff] [blame] | 405 | wr = q->write++ & QUEUE_LENGTH_MASK; |
Linus Nielsen Feltzing | b7104fc | 2002-05-08 22:07:41 +0000 | [diff] [blame] | 406 | |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 407 | q->events[wr].id = id; |
Linus Nielsen Feltzing | bd2561d | 2002-04-29 14:25:44 +0000 | [diff] [blame] | 408 | q->events[wr].data = data; |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 409 | |
| 410 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
| 411 | if(q->send) |
| 412 | { |
Michael Sevakis | 0caf3b8 | 2007-03-21 22:58:53 +0000 | [diff] [blame] | 413 | struct thread_entry **spp = &q->send->senders[wr]; |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 414 | |
Michael Sevakis | 0caf3b8 | 2007-03-21 22:58:53 +0000 | [diff] [blame] | 415 | if (*spp) |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 416 | { |
| 417 | /* overflow protect - unblock any thread waiting at this index */ |
Michael Sevakis | 4b90267 | 2006-12-19 16:50:07 +0000 | [diff] [blame] | 418 | queue_release_sender(spp, 0); |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 419 | } |
| 420 | } |
| 421 | #endif |
| 422 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 423 | /* Wakeup a waiting thread if any */ |
| 424 | wakeup_thread(&q->queue); |
| 425 | |
| 426 | corelock_unlock(&q->cl); |
Linus Nielsen Feltzing | b7104fc | 2002-05-08 22:07:41 +0000 | [diff] [blame] | 427 | set_irq_level(oldlevel); |
Linus Nielsen Feltzing | bd2561d | 2002-04-29 14:25:44 +0000 | [diff] [blame] | 428 | } |
Linus Nielsen Feltzing | 9e142da | 2002-05-05 18:34:06 +0000 | [diff] [blame] | 429 | |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 430 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 431 | /* IRQ handlers are not allowed use of this function - we only aim to |
| 432 | protect the queue integrity by turning them off. */ |
Michael Sevakis | 4b90267 | 2006-12-19 16:50:07 +0000 | [diff] [blame] | 433 | intptr_t queue_send(struct event_queue *q, long id, intptr_t data) |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 434 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 435 | int oldlevel; |
Miika Pekkarinen | 66258a3 | 2007-03-26 16:55:17 +0000 | [diff] [blame] | 436 | unsigned int wr; |
Jens Arnold | 0b7bb31 | 2007-04-14 09:47:47 +0000 | [diff] [blame] | 437 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 438 | oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); |
| 439 | corelock_lock(&q->cl); |
| 440 | |
Miika Pekkarinen | 66258a3 | 2007-03-26 16:55:17 +0000 | [diff] [blame] | 441 | wr = q->write++ & QUEUE_LENGTH_MASK; |
| 442 | |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 443 | q->events[wr].id = id; |
| 444 | q->events[wr].data = data; |
Michael Sevakis | 0caf3b8 | 2007-03-21 22:58:53 +0000 | [diff] [blame] | 445 | |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 446 | if(q->send) |
| 447 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 448 | const unsigned int core = CURRENT_CORE; |
Michael Sevakis | 0caf3b8 | 2007-03-21 22:58:53 +0000 | [diff] [blame] | 449 | struct thread_entry **spp = &q->send->senders[wr]; |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 450 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 451 | if(*spp) |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 452 | { |
| 453 | /* overflow protect - unblock any thread waiting at this index */ |
Michael Sevakis | 4b90267 | 2006-12-19 16:50:07 +0000 | [diff] [blame] | 454 | queue_release_sender(spp, 0); |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 455 | } |
| 456 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 457 | /* Wakeup a waiting thread if any */ |
| 458 | wakeup_thread(&q->queue); |
| 459 | |
| 460 | #if CONFIG_CORELOCK == CORELOCK_NONE |
| 461 | cores[core].irq_level = oldlevel; |
| 462 | #elif CONFIG_CORELOCK == SW_CORELOCK |
| 463 | cores[core].blk_ops.irq_level = oldlevel; |
| 464 | cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK | TBOP_IRQ_LEVEL; |
| 465 | cores[core].blk_ops.cl_p = &q->cl; |
| 466 | #elif CONFIG_CORELOCK == CORELOCK_SWAP |
| 467 | cores[core].blk_ops.irq_level = oldlevel; |
| 468 | cores[core].blk_ops.flags = TBOP_SET_VARu8 | TBOP_IRQ_LEVEL; |
| 469 | cores[core].blk_ops.var_u8p = &q->cl.locked; |
| 470 | cores[core].blk_ops.var_u8v = 0; |
| 471 | #endif |
| 472 | block_thread_no_listlock(spp); |
| 473 | return cores[core].running->retval; |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 474 | } |
| 475 | |
| 476 | /* Function as queue_post if sending is not enabled */ |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 477 | wakeup_thread(&q->queue); |
| 478 | |
| 479 | corelock_unlock(&q->cl); |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 480 | set_irq_level(oldlevel); |
Michael Sevakis | 0caf3b8 | 2007-03-21 22:58:53 +0000 | [diff] [blame] | 481 | |
Michael Sevakis | 4b90267 | 2006-12-19 16:50:07 +0000 | [diff] [blame] | 482 | return 0; |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 483 | } |
| 484 | |
| 485 | #if 0 /* not used now but probably will be later */ |
| 486 | /* Query if the last message dequeued was added by queue_send or not */ |
| 487 | bool queue_in_queue_send(struct event_queue *q) |
| 488 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 489 | bool in_send; |
| 490 | |
| 491 | #if NUM_CORES > 1 |
| 492 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); |
| 493 | corelock_lock(&q->cl); |
| 494 | #endif |
| 495 | |
| 496 | in_send = q->send && q->send->curr_sender; |
| 497 | |
| 498 | #if NUM_CORES > 1 |
| 499 | corelock_unlock(&q->cl); |
| 500 | set_irq_level(oldlevel); |
| 501 | #endif |
| 502 | |
| 503 | return in_send; |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 504 | } |
| 505 | #endif |
| 506 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 507 | /* Replies with retval to the last dequeued message sent with queue_send */ |
Michael Sevakis | 4b90267 | 2006-12-19 16:50:07 +0000 | [diff] [blame] | 508 | void queue_reply(struct event_queue *q, intptr_t retval) |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 509 | { |
| 510 | if(q->send && q->send->curr_sender) |
| 511 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 512 | #if NUM_CORES > 1 |
| 513 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); |
| 514 | corelock_lock(&q->cl); |
| 515 | /* Double-check locking */ |
| 516 | if(q->send && q->send->curr_sender) |
| 517 | { |
| 518 | #endif |
| 519 | |
| 520 | queue_release_sender(&q->send->curr_sender, retval); |
| 521 | |
| 522 | #if NUM_CORES > 1 |
| 523 | } |
| 524 | corelock_unlock(&q->cl); |
| 525 | set_irq_level(oldlevel); |
| 526 | #endif |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 527 | } |
| 528 | } |
| 529 | #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */ |
| 530 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 531 | /* Poll queue to see if a message exists - careful in using the result if |
| 532 | * queue_remove_from_head is called when messages are posted - possibly use |
| 533 | * queue_wait_w_tmo(&q, 0) in that case or else a removed message that |
| 534 | * unsignals the queue may cause an unwanted block */ |
Jens Arnold | c76c568 | 2004-08-16 23:37:23 +0000 | [diff] [blame] | 535 | bool queue_empty(const struct event_queue* q) |
Björn Stenberg | 29f8028 | 2002-05-23 09:22:07 +0000 | [diff] [blame] | 536 | { |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 537 | return ( q->read == q->write ); |
Björn Stenberg | 29f8028 | 2002-05-23 09:22:07 +0000 | [diff] [blame] | 538 | } |
| 539 | |
Linus Nielsen Feltzing | cc05528 | 2004-09-01 06:24:05 +0000 | [diff] [blame] | 540 | void queue_clear(struct event_queue* q) |
Linus Nielsen Feltzing | 9872fea | 2004-09-01 06:20:21 +0000 | [diff] [blame] | 541 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 542 | int oldlevel; |
| 543 | |
| 544 | oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); |
| 545 | corelock_lock(&q->cl); |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 546 | |
| 547 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 548 | /* Release all threads waiting in the queue for a reply - |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 549 | dequeued sent message will be handled by owning thread */ |
| 550 | queue_release_all_senders(q); |
| 551 | #endif |
| 552 | |
Linus Nielsen Feltzing | 9872fea | 2004-09-01 06:20:21 +0000 | [diff] [blame] | 553 | q->read = 0; |
| 554 | q->write = 0; |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 555 | |
| 556 | corelock_unlock(&q->cl); |
Linus Nielsen Feltzing | cc05528 | 2004-09-01 06:24:05 +0000 | [diff] [blame] | 557 | set_irq_level(oldlevel); |
Linus Nielsen Feltzing | 9872fea | 2004-09-01 06:20:21 +0000 | [diff] [blame] | 558 | } |
| 559 | |
Linus Nielsen Feltzing | 4950b55 | 2006-10-19 11:43:13 +0000 | [diff] [blame] | 560 | void queue_remove_from_head(struct event_queue *q, long id) |
| 561 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 562 | int oldlevel; |
| 563 | |
| 564 | oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); |
| 565 | corelock_lock(&q->cl); |
Michael Sevakis | 0caf3b8 | 2007-03-21 22:58:53 +0000 | [diff] [blame] | 566 | |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 567 | while(q->read != q->write) |
Linus Nielsen Feltzing | 4950b55 | 2006-10-19 11:43:13 +0000 | [diff] [blame] | 568 | { |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 569 | unsigned int rd = q->read & QUEUE_LENGTH_MASK; |
| 570 | |
| 571 | if(q->events[rd].id != id) |
| 572 | { |
| 573 | break; |
| 574 | } |
| 575 | |
| 576 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
| 577 | if(q->send) |
| 578 | { |
Michael Sevakis | 0caf3b8 | 2007-03-21 22:58:53 +0000 | [diff] [blame] | 579 | struct thread_entry **spp = &q->send->senders[rd]; |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 580 | |
Michael Sevakis | 0caf3b8 | 2007-03-21 22:58:53 +0000 | [diff] [blame] | 581 | if (*spp) |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 582 | { |
| 583 | /* Release any thread waiting on this message */ |
Michael Sevakis | 4b90267 | 2006-12-19 16:50:07 +0000 | [diff] [blame] | 584 | queue_release_sender(spp, 0); |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 585 | } |
| 586 | } |
| 587 | #endif |
Linus Nielsen Feltzing | 4950b55 | 2006-10-19 11:43:13 +0000 | [diff] [blame] | 588 | q->read++; |
| 589 | } |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 590 | |
| 591 | corelock_unlock(&q->cl); |
Linus Nielsen Feltzing | 4950b55 | 2006-10-19 11:43:13 +0000 | [diff] [blame] | 592 | set_irq_level(oldlevel); |
| 593 | } |
| 594 | |
Miika Pekkarinen | 2eefb5a | 2007-03-11 10:52:36 +0000 | [diff] [blame] | 595 | /** |
| 596 | * The number of events waiting in the queue. |
| 597 | * |
| 598 | * @param struct of event_queue |
| 599 | * @return number of events in the queue |
| 600 | */ |
| 601 | int queue_count(const struct event_queue *q) |
| 602 | { |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 603 | return q->write - q->read; |
Miika Pekkarinen | 2eefb5a | 2007-03-11 10:52:36 +0000 | [diff] [blame] | 604 | } |
| 605 | |
Michael Sevakis | 4b90267 | 2006-12-19 16:50:07 +0000 | [diff] [blame] | 606 | int queue_broadcast(long id, intptr_t data) |
Linus Nielsen Feltzing | 2a73cec | 2002-06-29 21:19:55 +0000 | [diff] [blame] | 607 | { |
Michael Sevakis | 0caf3b8 | 2007-03-21 22:58:53 +0000 | [diff] [blame] | 608 | int i; |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 609 | |
| 610 | #if NUM_CORES > 1 |
| 611 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); |
| 612 | corelock_lock(&all_queues.cl); |
| 613 | #endif |
Michael Sevakis | 0caf3b8 | 2007-03-21 22:58:53 +0000 | [diff] [blame] | 614 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 615 | for(i = 0;i < all_queues.count;i++) |
Michael Sevakis | 0caf3b8 | 2007-03-21 22:58:53 +0000 | [diff] [blame] | 616 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 617 | queue_post(all_queues.queues[i], id, data); |
Michael Sevakis | 0caf3b8 | 2007-03-21 22:58:53 +0000 | [diff] [blame] | 618 | } |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 619 | |
| 620 | #if NUM_CORES > 1 |
| 621 | corelock_unlock(&all_queues.cl); |
| 622 | set_irq_level(oldlevel); |
| 623 | #endif |
Linus Nielsen Feltzing | 2a73cec | 2002-06-29 21:19:55 +0000 | [diff] [blame] | 624 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 625 | return i; |
Linus Nielsen Feltzing | 2a73cec | 2002-06-29 21:19:55 +0000 | [diff] [blame] | 626 | } |
Linus Nielsen Feltzing | 9e142da | 2002-05-05 18:34:06 +0000 | [diff] [blame] | 627 | |
| 628 | /**************************************************************************** |
| 629 | * Timer tick |
| 630 | ****************************************************************************/ |
Linus Nielsen Feltzing | 9c4423f | 2004-10-27 07:07:54 +0000 | [diff] [blame] | 631 | #if CONFIG_CPU == SH7034 |
Linus Nielsen Feltzing | 76620fb | 2005-03-01 14:33:45 +0000 | [diff] [blame] | 632 | void tick_start(unsigned int interval_in_ms) |
Linus Nielsen Feltzing | 9e142da | 2002-05-05 18:34:06 +0000 | [diff] [blame] | 633 | { |
Jens Arnold | cfb073c | 2005-10-03 09:24:36 +0000 | [diff] [blame] | 634 | unsigned long count; |
Linus Nielsen Feltzing | 9e142da | 2002-05-05 18:34:06 +0000 | [diff] [blame] | 635 | |
Jens Arnold | cfb073c | 2005-10-03 09:24:36 +0000 | [diff] [blame] | 636 | count = CPU_FREQ * interval_in_ms / 1000 / 8; |
Linus Nielsen Feltzing | 9e142da | 2002-05-05 18:34:06 +0000 | [diff] [blame] | 637 | |
Jens Arnold | cfb073c | 2005-10-03 09:24:36 +0000 | [diff] [blame] | 638 | if(count > 0x10000) |
Linus Nielsen Feltzing | 9e142da | 2002-05-05 18:34:06 +0000 | [diff] [blame] | 639 | { |
| 640 | panicf("Error! The tick interval is too long (%d ms)\n", |
| 641 | interval_in_ms); |
| 642 | return; |
| 643 | } |
| 644 | |
| 645 | /* We are using timer 0 */ |
| 646 | |
| 647 | TSTR &= ~0x01; /* Stop the timer */ |
| 648 | TSNC &= ~0x01; /* No synchronization */ |
| 649 | TMDR &= ~0x01; /* Operate normally */ |
| 650 | |
| 651 | TCNT0 = 0; /* Start counting at 0 */ |
Jens Arnold | cfb073c | 2005-10-03 09:24:36 +0000 | [diff] [blame] | 652 | GRA0 = (unsigned short)(count - 1); |
Linus Nielsen Feltzing | 9e142da | 2002-05-05 18:34:06 +0000 | [diff] [blame] | 653 | TCR0 = 0x23; /* Clear at GRA match, sysclock/8 */ |
| 654 | |
| 655 | /* Enable interrupt on level 1 */ |
| 656 | IPRC = (IPRC & ~0x00f0) | 0x0010; |
| 657 | |
| 658 | TSR0 &= ~0x01; |
| 659 | TIER0 = 0xf9; /* Enable GRA match interrupt */ |
| 660 | |
| 661 | TSTR |= 0x01; /* Start timer 1 */ |
| 662 | } |
| 663 | |
Jens Arnold | cfb073c | 2005-10-03 09:24:36 +0000 | [diff] [blame] | 664 | void IMIA0(void) __attribute__ ((interrupt_handler)); |
Linus Nielsen Feltzing | 9e142da | 2002-05-05 18:34:06 +0000 | [diff] [blame] | 665 | void IMIA0(void) |
| 666 | { |
| 667 | int i; |
| 668 | |
| 669 | /* Run through the list of tick tasks */ |
Linus Nielsen Feltzing | 80f8b22 | 2002-06-04 12:47:39 +0000 | [diff] [blame] | 670 | for(i = 0;i < MAX_NUM_TICK_TASKS;i++) |
Linus Nielsen Feltzing | 9e142da | 2002-05-05 18:34:06 +0000 | [diff] [blame] | 671 | { |
| 672 | if(tick_funcs[i]) |
| 673 | { |
| 674 | tick_funcs[i](); |
| 675 | } |
| 676 | } |
| 677 | |
| 678 | current_tick++; |
| 679 | |
| 680 | TSR0 &= ~0x01; |
| 681 | } |
Christian Gmeiner | c6ff1f5 | 2005-07-18 12:40:29 +0000 | [diff] [blame] | 682 | #elif defined(CPU_COLDFIRE) |
Linus Nielsen Feltzing | 76620fb | 2005-03-01 14:33:45 +0000 | [diff] [blame] | 683 | void tick_start(unsigned int interval_in_ms) |
Linus Nielsen Feltzing | 9c4423f | 2004-10-27 07:07:54 +0000 | [diff] [blame] | 684 | { |
Jens Arnold | cfb073c | 2005-10-03 09:24:36 +0000 | [diff] [blame] | 685 | unsigned long count; |
| 686 | int prescale; |
Linus Nielsen Feltzing | 9c4423f | 2004-10-27 07:07:54 +0000 | [diff] [blame] | 687 | |
Jens Arnold | cfb073c | 2005-10-03 09:24:36 +0000 | [diff] [blame] | 688 | count = CPU_FREQ/2 * interval_in_ms / 1000 / 16; |
Linus Nielsen Feltzing | 9c4423f | 2004-10-27 07:07:54 +0000 | [diff] [blame] | 689 | |
Jens Arnold | cfb073c | 2005-10-03 09:24:36 +0000 | [diff] [blame] | 690 | if(count > 0x10000) |
Linus Nielsen Feltzing | 9c4423f | 2004-10-27 07:07:54 +0000 | [diff] [blame] | 691 | { |
| 692 | panicf("Error! The tick interval is too long (%d ms)\n", |
| 693 | interval_in_ms); |
| 694 | return; |
| 695 | } |
Jens Arnold | cfb073c | 2005-10-03 09:24:36 +0000 | [diff] [blame] | 696 | |
| 697 | prescale = cpu_frequency / CPU_FREQ; |
| 698 | /* Note: The prescaler is later adjusted on-the-fly on CPU frequency |
| 699 | changes within timer.c */ |
Linus Nielsen Feltzing | 9c4423f | 2004-10-27 07:07:54 +0000 | [diff] [blame] | 700 | |
| 701 | /* We are using timer 0 */ |
| 702 | |
Jens Arnold | cfb073c | 2005-10-03 09:24:36 +0000 | [diff] [blame] | 703 | TRR0 = (unsigned short)(count - 1); /* The reference count */ |
Linus Nielsen Feltzing | 9c4423f | 2004-10-27 07:07:54 +0000 | [diff] [blame] | 704 | TCN0 = 0; /* reset the timer */ |
Jens Arnold | cfb073c | 2005-10-03 09:24:36 +0000 | [diff] [blame] | 705 | TMR0 = 0x001d | ((unsigned short)(prescale - 1) << 8); |
| 706 | /* restart, CLK/16, enabled, prescaler */ |
Linus Nielsen Feltzing | 9c4423f | 2004-10-27 07:07:54 +0000 | [diff] [blame] | 707 | |
| 708 | TER0 = 0xff; /* Clear all events */ |
| 709 | |
Jens Arnold | 72f9878 | 2005-11-05 03:28:20 +0000 | [diff] [blame] | 710 | ICR1 = 0x8c; /* Interrupt on level 3.0 */ |
Linus Nielsen Feltzing | 9c4423f | 2004-10-27 07:07:54 +0000 | [diff] [blame] | 711 | IMR &= ~0x200; |
| 712 | } |
| 713 | |
| 714 | void TIMER0(void) __attribute__ ((interrupt_handler)); |
| 715 | void TIMER0(void) |
| 716 | { |
| 717 | int i; |
| 718 | |
| 719 | /* Run through the list of tick tasks */ |
| 720 | for(i = 0;i < MAX_NUM_TICK_TASKS;i++) |
| 721 | { |
| 722 | if(tick_funcs[i]) |
| 723 | { |
| 724 | tick_funcs[i](); |
| 725 | } |
| 726 | } |
| 727 | |
| 728 | current_tick++; |
Linus Nielsen Feltzing | 9c4423f | 2004-10-27 07:07:54 +0000 | [diff] [blame] | 729 | |
| 730 | TER0 = 0xff; /* Clear all events */ |
| 731 | } |
Daniel Stenberg | fe79cd8 | 2005-01-09 23:24:02 +0000 | [diff] [blame] | 732 | |
Daniel Stenberg | 6a8aebb | 2006-08-02 09:46:51 +0000 | [diff] [blame] | 733 | #elif defined(CPU_PP) |
Dave Chapman | 77372d1 | 2005-11-07 23:07:19 +0000 | [diff] [blame] | 734 | |
Dave Chapman | cb7e695 | 2006-01-05 17:02:48 +0000 | [diff] [blame] | 735 | #ifndef BOOTLOADER |
Thom Johansen | 544b03c | 2005-12-12 13:53:22 +0000 | [diff] [blame] | 736 | void TIMER1(void) |
| 737 | { |
| 738 | int i; |
| 739 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 740 | /* Run through the list of tick tasks (using main core) */ |
Thom Johansen | 22e6c02 | 2006-01-24 22:31:57 +0000 | [diff] [blame] | 741 | TIMER1_VAL; /* Read value to ack IRQ */ |
Thom Johansen | 544b03c | 2005-12-12 13:53:22 +0000 | [diff] [blame] | 742 | |
Michael Sevakis | 18e87ff | 2007-10-05 23:24:46 +0000 | [diff] [blame] | 743 | /* Run through the list of tick tasks using main CPU core - |
| 744 | wake up the COP through its control interface to provide pulse */ |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 745 | for (i = 0;i < MAX_NUM_TICK_TASKS;i++) |
| 746 | { |
| 747 | if (tick_funcs[i]) |
| 748 | { |
| 749 | tick_funcs[i](); |
| 750 | } |
Daniel Ankers | 82f9056 | 2007-03-04 20:06:41 +0000 | [diff] [blame] | 751 | } |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 752 | |
Michael Sevakis | 18e87ff | 2007-10-05 23:24:46 +0000 | [diff] [blame] | 753 | #if NUM_CORES > 1 |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 754 | /* Pulse the COP */ |
| 755 | core_wake(COP); |
Michael Sevakis | 18e87ff | 2007-10-05 23:24:46 +0000 | [diff] [blame] | 756 | #endif /* NUM_CORES */ |
| 757 | |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 758 | current_tick++; |
Thom Johansen | 544b03c | 2005-12-12 13:53:22 +0000 | [diff] [blame] | 759 | } |
Dave Chapman | cb7e695 | 2006-01-05 17:02:48 +0000 | [diff] [blame] | 760 | #endif |
Thom Johansen | 544b03c | 2005-12-12 13:53:22 +0000 | [diff] [blame] | 761 | |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 762 | /* Must be last function called init kernel/thread initialization */ |
Thom Johansen | 544b03c | 2005-12-12 13:53:22 +0000 | [diff] [blame] | 763 | void tick_start(unsigned int interval_in_ms) |
| 764 | { |
Dave Chapman | cb7e695 | 2006-01-05 17:02:48 +0000 | [diff] [blame] | 765 | #ifndef BOOTLOADER |
Michael Sevakis | 18e87ff | 2007-10-05 23:24:46 +0000 | [diff] [blame] | 766 | TIMER1_CFG = 0x0; |
| 767 | TIMER1_VAL; |
| 768 | /* enable timer */ |
| 769 | TIMER1_CFG = 0xc0000000 | (interval_in_ms*1000 - 1); |
| 770 | /* unmask interrupt source */ |
| 771 | CPU_INT_EN = TIMER1_MASK; |
Dave Chapman | cb7e695 | 2006-01-05 17:02:48 +0000 | [diff] [blame] | 772 | #else |
| 773 | /* We don't enable interrupts in the bootloader */ |
| 774 | (void)interval_in_ms; |
| 775 | #endif |
Dave Chapman | 77372d1 | 2005-11-07 23:07:19 +0000 | [diff] [blame] | 776 | } |
| 777 | |
Dave Chapman | d83e929 | 2006-01-12 00:35:50 +0000 | [diff] [blame] | 778 | #elif CONFIG_CPU == PNX0101 |
| 779 | |
| 780 | void timer_handler(void) |
| 781 | { |
| 782 | int i; |
| 783 | |
| 784 | /* Run through the list of tick tasks */ |
| 785 | for(i = 0;i < MAX_NUM_TICK_TASKS;i++) |
| 786 | { |
| 787 | if(tick_funcs[i]) |
| 788 | tick_funcs[i](); |
| 789 | } |
| 790 | |
| 791 | current_tick++; |
Dave Chapman | d83e929 | 2006-01-12 00:35:50 +0000 | [diff] [blame] | 792 | |
Tomasz Malesinski | cd630c9 | 2007-03-24 19:26:13 +0000 | [diff] [blame] | 793 | TIMER0.clr = 0; |
Dave Chapman | d83e929 | 2006-01-12 00:35:50 +0000 | [diff] [blame] | 794 | } |
| 795 | |
| 796 | void tick_start(unsigned int interval_in_ms) |
| 797 | { |
Tomasz Malesinski | cd630c9 | 2007-03-24 19:26:13 +0000 | [diff] [blame] | 798 | TIMER0.ctrl &= ~0x80; /* Disable the counter */ |
| 799 | TIMER0.ctrl |= 0x40; /* Reload after counting down to zero */ |
| 800 | TIMER0.load = 3000000 * interval_in_ms / 1000; |
| 801 | TIMER0.ctrl &= ~0xc; /* No prescaler */ |
| 802 | TIMER0.clr = 1; /* Clear the interrupt request */ |
Dave Chapman | d83e929 | 2006-01-12 00:35:50 +0000 | [diff] [blame] | 803 | |
Tomasz Malesinski | cd630c9 | 2007-03-24 19:26:13 +0000 | [diff] [blame] | 804 | irq_set_int_handler(IRQ_TIMER0, timer_handler); |
| 805 | irq_enable_int(IRQ_TIMER0); |
Dave Chapman | d83e929 | 2006-01-12 00:35:50 +0000 | [diff] [blame] | 806 | |
Tomasz Malesinski | cd630c9 | 2007-03-24 19:26:13 +0000 | [diff] [blame] | 807 | TIMER0.ctrl |= 0x80; /* Enable the counter */ |
Dave Chapman | d83e929 | 2006-01-12 00:35:50 +0000 | [diff] [blame] | 808 | } |
Will Robertson | 590501c | 2007-09-21 15:51:53 +0000 | [diff] [blame] | 809 | #elif CONFIG_CPU == IMX31L |
| 810 | void tick_start(unsigned int interval_in_ms) |
| 811 | { |
| 812 | EPITCR1 &= ~0x1; /* Disable the counter */ |
| 813 | |
| 814 | EPITCR1 &= ~0xE; /* Disable interrupt, count down from 0xFFFFFFFF */ |
| 815 | EPITCR1 &= ~0xFFF0; /* Clear prescaler */ |
| 816 | #ifdef BOOTLOADER |
| 817 | EPITCR1 |= (2700 << 2); /* Prescaler = 2700 */ |
| 818 | #endif |
| 819 | EPITCR1 &= ~(0x3 << 24); |
| 820 | EPITCR1 |= (0x2 << 24); /* Set clock source to external clock (27mhz) */ |
| 821 | EPITSR1 = 1; /* Clear the interrupt request */ |
| 822 | #ifndef BOOTLOADER |
| 823 | EPITLR1 = 27000000 * interval_in_ms / 1000; |
| 824 | EPITCMPR1 = 27000000 * interval_in_ms / 1000; |
| 825 | #else |
| 826 | (void)interval_in_ms; |
| 827 | #endif |
| 828 | |
| 829 | //avic_enable_int(EPIT1, IRQ, EPIT_HANDLER); |
| 830 | |
| 831 | EPITCR1 |= 0x1; /* Enable the counter */ |
| 832 | } |
| 833 | |
| 834 | #ifndef BOOTLOADER |
| 835 | void EPIT_HANDLER(void) __attribute__((interrupt("IRQ"))); |
| 836 | void EPIT_HANDLER(void) { |
| 837 | int i; |
| 838 | |
| 839 | /* Run through the list of tick tasks */ |
| 840 | for(i = 0;i < MAX_NUM_TICK_TASKS;i++) |
| 841 | { |
| 842 | if(tick_funcs[i]) |
| 843 | tick_funcs[i](); |
| 844 | } |
| 845 | |
| 846 | current_tick++; |
| 847 | |
| 848 | EPITSR1 = 1; /* Clear the interrupt request */ |
| 849 | } |
| 850 | #endif |
Linus Nielsen Feltzing | 9c4423f | 2004-10-27 07:07:54 +0000 | [diff] [blame] | 851 | #endif |
Linus Nielsen Feltzing | 9e142da | 2002-05-05 18:34:06 +0000 | [diff] [blame] | 852 | |
| 853 | int tick_add_task(void (*f)(void)) |
| 854 | { |
| 855 | int i; |
Linus Nielsen Feltzing | 111a972 | 2004-03-02 11:32:59 +0000 | [diff] [blame] | 856 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); |
Linus Nielsen Feltzing | 9e142da | 2002-05-05 18:34:06 +0000 | [diff] [blame] | 857 | |
| 858 | /* Add a task if there is room */ |
Linus Nielsen Feltzing | 80f8b22 | 2002-06-04 12:47:39 +0000 | [diff] [blame] | 859 | for(i = 0;i < MAX_NUM_TICK_TASKS;i++) |
Linus Nielsen Feltzing | 9e142da | 2002-05-05 18:34:06 +0000 | [diff] [blame] | 860 | { |
| 861 | if(tick_funcs[i] == NULL) |
| 862 | { |
| 863 | tick_funcs[i] = f; |
| 864 | set_irq_level(oldlevel); |
| 865 | return 0; |
| 866 | } |
| 867 | } |
| 868 | set_irq_level(oldlevel); |
Linus Nielsen Feltzing | 5421181 | 2002-06-29 21:30:42 +0000 | [diff] [blame] | 869 | panicf("Error! tick_add_task(): out of tasks"); |
Linus Nielsen Feltzing | 9e142da | 2002-05-05 18:34:06 +0000 | [diff] [blame] | 870 | return -1; |
| 871 | } |
| 872 | |
| 873 | int tick_remove_task(void (*f)(void)) |
| 874 | { |
| 875 | int i; |
Linus Nielsen Feltzing | 111a972 | 2004-03-02 11:32:59 +0000 | [diff] [blame] | 876 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); |
Linus Nielsen Feltzing | 9e142da | 2002-05-05 18:34:06 +0000 | [diff] [blame] | 877 | |
| 878 | /* Remove a task if it is there */ |
Linus Nielsen Feltzing | 80f8b22 | 2002-06-04 12:47:39 +0000 | [diff] [blame] | 879 | for(i = 0;i < MAX_NUM_TICK_TASKS;i++) |
Linus Nielsen Feltzing | 9e142da | 2002-05-05 18:34:06 +0000 | [diff] [blame] | 880 | { |
| 881 | if(tick_funcs[i] == f) |
| 882 | { |
| 883 | tick_funcs[i] = NULL; |
| 884 | set_irq_level(oldlevel); |
| 885 | return 0; |
| 886 | } |
| 887 | } |
| 888 | |
| 889 | set_irq_level(oldlevel); |
| 890 | return -1; |
| 891 | } |
Linus Nielsen Feltzing | 7361340 | 2002-05-16 20:57:32 +0000 | [diff] [blame] | 892 | |
Michael Sevakis | e64f7e3 | 2007-07-29 04:49:19 +0000 | [diff] [blame] | 893 | /**************************************************************************** |
| 894 | * Tick-based interval timers/one-shots - be mindful this is not really |
| 895 | * intended for continuous timers but for events that need to run for a short |
| 896 | * time and be cancelled without further software intervention. |
| 897 | ****************************************************************************/ |
| 898 | #ifdef INCLUDE_TIMEOUT_API |
| 899 | static struct timeout *tmo_list = NULL; /* list of active timeout events */ |
| 900 | |
| 901 | /* timeout tick task - calls event handlers when they expire |
| 902 | * Event handlers may alter ticks, callback and data during operation. |
| 903 | */ |
| 904 | static void timeout_tick(void) |
| 905 | { |
| 906 | unsigned long tick = current_tick; |
| 907 | struct timeout *curr, *next; |
| 908 | |
| 909 | for (curr = tmo_list; curr != NULL; curr = next) |
| 910 | { |
| 911 | next = (struct timeout *)curr->next; |
| 912 | |
| 913 | if (TIME_BEFORE(tick, curr->expires)) |
| 914 | continue; |
| 915 | |
| 916 | /* this event has expired - call callback */ |
| 917 | if (curr->callback(curr)) |
| 918 | *(long *)&curr->expires = tick + curr->ticks; /* reload */ |
| 919 | else |
| 920 | timeout_cancel(curr); /* cancel */ |
| 921 | } |
| 922 | } |
| 923 | |
| 924 | /* Cancels a timeout callback - can be called from the ISR */ |
| 925 | void timeout_cancel(struct timeout *tmo) |
| 926 | { |
| 927 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); |
| 928 | |
| 929 | if (tmo_list != NULL) |
| 930 | { |
| 931 | struct timeout *curr = tmo_list; |
| 932 | struct timeout *prev = NULL; |
| 933 | |
| 934 | while (curr != tmo && curr != NULL) |
| 935 | { |
| 936 | prev = curr; |
| 937 | curr = (struct timeout *)curr->next; |
| 938 | } |
| 939 | |
| 940 | if (curr != NULL) |
| 941 | { |
| 942 | /* in list */ |
| 943 | if (prev == NULL) |
| 944 | tmo_list = (struct timeout *)curr->next; |
| 945 | else |
| 946 | *(const struct timeout **)&prev->next = curr->next; |
| 947 | |
| 948 | if (tmo_list == NULL) |
| 949 | tick_remove_task(timeout_tick); /* last one - remove task */ |
| 950 | } |
| 951 | /* not in list or tmo == NULL */ |
| 952 | } |
| 953 | |
| 954 | set_irq_level(oldlevel); |
| 955 | } |
| 956 | |
| 957 | /* Adds a timeout callback - calling with an active timeout resets the |
| 958 | interval - can be called from the ISR */ |
| 959 | void timeout_register(struct timeout *tmo, timeout_cb_type callback, |
| 960 | int ticks, intptr_t data) |
| 961 | { |
| 962 | int oldlevel; |
| 963 | struct timeout *curr; |
| 964 | |
| 965 | if (tmo == NULL) |
| 966 | return; |
| 967 | |
| 968 | oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); |
| 969 | |
| 970 | /* see if this one is already registered */ |
| 971 | curr = tmo_list; |
| 972 | while (curr != tmo && curr != NULL) |
| 973 | curr = (struct timeout *)curr->next; |
| 974 | |
| 975 | if (curr == NULL) |
| 976 | { |
| 977 | /* not found - add it */ |
| 978 | if (tmo_list == NULL) |
| 979 | tick_add_task(timeout_tick); /* first one - add task */ |
| 980 | |
| 981 | *(struct timeout **)&tmo->next = tmo_list; |
| 982 | tmo_list = tmo; |
| 983 | } |
| 984 | |
| 985 | tmo->callback = callback; |
| 986 | tmo->ticks = ticks; |
| 987 | tmo->data = data; |
| 988 | *(long *)&tmo->expires = current_tick + ticks; |
| 989 | |
| 990 | set_irq_level(oldlevel); |
| 991 | } |
| 992 | |
| 993 | #endif /* INCLUDE_TIMEOUT_API */ |
| 994 | |
Linus Nielsen Feltzing | 7361340 | 2002-05-16 20:57:32 +0000 | [diff] [blame] | 995 | /**************************************************************************** |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 996 | * Simple mutex functions ;) |
Linus Nielsen Feltzing | 7361340 | 2002-05-16 20:57:32 +0000 | [diff] [blame] | 997 | ****************************************************************************/ |
| 998 | void mutex_init(struct mutex *m) |
| 999 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1000 | m->queue = NULL; |
Miika Pekkarinen | a85044b | 2006-09-16 16:18:11 +0000 | [diff] [blame] | 1001 | m->thread = NULL; |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1002 | m->count = 0; |
| 1003 | m->locked = 0; |
| 1004 | #if CONFIG_CORELOCK == SW_CORELOCK |
| 1005 | corelock_init(&m->cl); |
| 1006 | #endif |
Linus Nielsen Feltzing | 7361340 | 2002-05-16 20:57:32 +0000 | [diff] [blame] | 1007 | } |
| 1008 | |
| 1009 | void mutex_lock(struct mutex *m) |
| 1010 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1011 | const unsigned int core = CURRENT_CORE; |
| 1012 | struct thread_entry *const thread = cores[core].running; |
| 1013 | |
| 1014 | if(thread == m->thread) |
Miika Pekkarinen | a85044b | 2006-09-16 16:18:11 +0000 | [diff] [blame] | 1015 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1016 | m->count++; |
| 1017 | return; |
Miika Pekkarinen | a85044b | 2006-09-16 16:18:11 +0000 | [diff] [blame] | 1018 | } |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1019 | |
| 1020 | /* Repeat some stuff here or else all the variation is too difficult to |
| 1021 | read */ |
| 1022 | #if CONFIG_CORELOCK == CORELOCK_SWAP |
| 1023 | /* peek at lock until it's no longer busy */ |
| 1024 | unsigned int locked; |
| 1025 | while ((locked = xchg8(&m->locked, STATE_BUSYu8)) == STATE_BUSYu8); |
| 1026 | if(locked == 0) |
| 1027 | { |
| 1028 | m->thread = thread; |
| 1029 | m->locked = 1; |
| 1030 | return; |
| 1031 | } |
| 1032 | |
| 1033 | /* Block until the lock is open... */ |
| 1034 | cores[core].blk_ops.flags = TBOP_SET_VARu8; |
| 1035 | cores[core].blk_ops.var_u8p = &m->locked; |
| 1036 | cores[core].blk_ops.var_u8v = 1; |
| 1037 | #else |
| 1038 | corelock_lock(&m->cl); |
| 1039 | if (m->locked == 0) |
| 1040 | { |
| 1041 | m->locked = 1; |
| 1042 | m->thread = thread; |
| 1043 | corelock_unlock(&m->cl); |
| 1044 | return; |
| 1045 | } |
| 1046 | |
| 1047 | /* Block until the lock is open... */ |
| 1048 | #if CONFIG_CORELOCK == SW_CORELOCK |
| 1049 | cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK; |
| 1050 | cores[core].blk_ops.cl_p = &m->cl; |
| 1051 | #endif |
| 1052 | #endif /* CONFIG_CORELOCK */ |
| 1053 | |
| 1054 | block_thread_no_listlock(&m->queue); |
Linus Nielsen Feltzing | 7361340 | 2002-05-16 20:57:32 +0000 | [diff] [blame] | 1055 | } |
| 1056 | |
| 1057 | void mutex_unlock(struct mutex *m) |
| 1058 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1059 | /* unlocker not being the owner is an unlocking violation */ |
| 1060 | KERNEL_ASSERT(m->thread == cores[CURRENT_CORE].running, |
| 1061 | "mutex_unlock->wrong thread (recurse)"); |
Daniel Stenberg | 22b7701 | 2005-02-22 12:19:12 +0000 | [diff] [blame] | 1062 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1063 | if(m->count > 0) |
Michael Sevakis | dee43ec | 2007-03-09 08:03:18 +0000 | [diff] [blame] | 1064 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1065 | /* this thread still owns lock */ |
| 1066 | m->count--; |
| 1067 | return; |
| 1068 | } |
| 1069 | |
| 1070 | #if CONFIG_CORELOCK == SW_CORELOCK |
| 1071 | /* lock out other cores */ |
| 1072 | corelock_lock(&m->cl); |
| 1073 | #elif CONFIG_CORELOCK == CORELOCK_SWAP |
| 1074 | /* wait for peeker to move on */ |
| 1075 | while (xchg8(&m->locked, STATE_BUSYu8) == STATE_BUSYu8); |
| 1076 | #endif |
| 1077 | |
| 1078 | /* transfer to next queued thread if any */ |
Michael Sevakis | 6fac8fc | 2007-10-21 19:10:03 +0000 | [diff] [blame^] | 1079 | |
| 1080 | /* This can become busy using SWP but is safe since only one thread |
| 1081 | will be changing things at a time. Allowing timeout waits will |
| 1082 | change that however but not now. There is also a hazard the thread |
| 1083 | could be killed before performing the wakeup but that's just |
| 1084 | irresponsible. :-) */ |
| 1085 | m->thread = m->queue; |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1086 | |
| 1087 | if(m->thread == NULL) |
| 1088 | { |
| 1089 | m->locked = 0; /* release lock */ |
| 1090 | #if CONFIG_CORELOCK == SW_CORELOCK |
| 1091 | corelock_unlock(&m->cl); |
| 1092 | #endif |
| 1093 | } |
| 1094 | else /* another thread is waiting - remain locked */ |
| 1095 | { |
Michael Sevakis | 6fac8fc | 2007-10-21 19:10:03 +0000 | [diff] [blame^] | 1096 | wakeup_thread_no_listlock(&m->queue); |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1097 | #if CONFIG_CORELOCK == SW_CORELOCK |
| 1098 | corelock_unlock(&m->cl); |
| 1099 | #elif CONFIG_CORELOCK == CORELOCK_SWAP |
| 1100 | m->locked = 1; |
| 1101 | #endif |
Michael Sevakis | dee43ec | 2007-03-09 08:03:18 +0000 | [diff] [blame] | 1102 | } |
| 1103 | } |
| 1104 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1105 | /**************************************************************************** |
| 1106 | * Simpl-er mutex functions ;) |
| 1107 | ****************************************************************************/ |
| 1108 | void spinlock_init(struct spinlock *l IF_COP(, unsigned int flags)) |
Michael Sevakis | dee43ec | 2007-03-09 08:03:18 +0000 | [diff] [blame] | 1109 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1110 | l->locked = 0; |
| 1111 | l->thread = NULL; |
| 1112 | l->count = 0; |
| 1113 | #if NUM_CORES > 1 |
| 1114 | l->task_switch = flags & SPINLOCK_TASK_SWITCH; |
| 1115 | corelock_init(&l->cl); |
| 1116 | #endif |
Michael Sevakis | dee43ec | 2007-03-09 08:03:18 +0000 | [diff] [blame] | 1117 | } |
| 1118 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1119 | void spinlock_lock(struct spinlock *l) |
| 1120 | { |
| 1121 | struct thread_entry *const thread = cores[CURRENT_CORE].running; |
| 1122 | |
| 1123 | if (l->thread == thread) |
| 1124 | { |
| 1125 | l->count++; |
| 1126 | return; |
| 1127 | } |
| 1128 | |
| 1129 | #if NUM_CORES > 1 |
| 1130 | if (l->task_switch != 0) |
| 1131 | #endif |
| 1132 | { |
| 1133 | /* Let other threads run until the lock is free */ |
| 1134 | while(test_and_set(&l->locked, 1, &l->cl) != 0) |
| 1135 | { |
| 1136 | /* spin and switch until the lock is open... */ |
| 1137 | switch_thread(NULL); |
| 1138 | } |
| 1139 | } |
| 1140 | #if NUM_CORES > 1 |
| 1141 | else |
| 1142 | { |
| 1143 | /* Use the corelock purely */ |
| 1144 | corelock_lock(&l->cl); |
| 1145 | } |
| 1146 | #endif |
| 1147 | |
| 1148 | l->thread = thread; |
| 1149 | } |
| 1150 | |
| 1151 | void spinlock_unlock(struct spinlock *l) |
| 1152 | { |
| 1153 | /* unlocker not being the owner is an unlocking violation */ |
| 1154 | KERNEL_ASSERT(l->thread == cores[CURRENT_CORE].running, |
| 1155 | "spinlock_unlock->wrong thread"); |
| 1156 | |
| 1157 | if (l->count > 0) |
| 1158 | { |
| 1159 | /* this thread still owns lock */ |
| 1160 | l->count--; |
| 1161 | return; |
| 1162 | } |
| 1163 | |
| 1164 | /* clear owner */ |
| 1165 | l->thread = NULL; |
| 1166 | |
| 1167 | #if NUM_CORES > 1 |
| 1168 | if (l->task_switch != 0) |
| 1169 | #endif |
| 1170 | { |
| 1171 | /* release lock */ |
| 1172 | #if CONFIG_CORELOCK == SW_CORELOCK |
| 1173 | /* This must be done since our unlock could be missed by the |
| 1174 | test_and_set and leave the object locked permanently */ |
| 1175 | corelock_lock(&l->cl); |
| 1176 | #endif |
| 1177 | l->locked = 0; |
| 1178 | } |
| 1179 | |
| 1180 | #if NUM_CORES > 1 |
| 1181 | corelock_unlock(&l->cl); |
| 1182 | #endif |
| 1183 | } |
| 1184 | |
| 1185 | /**************************************************************************** |
| 1186 | * Simple semaphore functions ;) |
| 1187 | ****************************************************************************/ |
| 1188 | #ifdef HAVE_SEMAPHORE_OBJECTS |
| 1189 | void semaphore_init(struct semaphore *s, int max, int start) |
| 1190 | { |
| 1191 | KERNEL_ASSERT(max > 0 && start >= 0 && start <= max, |
| 1192 | "semaphore_init->inv arg"); |
| 1193 | s->queue = NULL; |
| 1194 | s->max = max; |
| 1195 | s->count = start; |
| 1196 | #if CONFIG_CORELOCK == SW_CORELOCK |
| 1197 | corelock_init(&s->cl); |
| 1198 | #endif |
| 1199 | } |
| 1200 | |
| 1201 | void semaphore_wait(struct semaphore *s) |
| 1202 | { |
| 1203 | #if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK |
| 1204 | corelock_lock(&s->cl); |
| 1205 | if(--s->count >= 0) |
| 1206 | { |
| 1207 | corelock_unlock(&s->cl); |
| 1208 | return; |
| 1209 | } |
| 1210 | #elif CONFIG_CORELOCK == CORELOCK_SWAP |
| 1211 | int count; |
| 1212 | while ((count = xchg32(&s->count, STATE_BUSYi)) == STATE_BUSYi); |
| 1213 | if(--count >= 0) |
| 1214 | { |
| 1215 | s->count = count; |
| 1216 | return; |
| 1217 | } |
| 1218 | #endif |
| 1219 | |
| 1220 | /* too many waits - block until dequeued */ |
| 1221 | #if CONFIG_CORELOCK == SW_CORELOCK |
| 1222 | const unsigned int core = CURRENT_CORE; |
| 1223 | cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK; |
| 1224 | cores[core].blk_ops.cl_p = &s->cl; |
| 1225 | #elif CONFIG_CORELOCK == CORELOCK_SWAP |
| 1226 | const unsigned int core = CURRENT_CORE; |
| 1227 | cores[core].blk_ops.flags = TBOP_SET_VARi; |
| 1228 | cores[core].blk_ops.var_ip = &s->count; |
| 1229 | cores[core].blk_ops.var_iv = count; |
| 1230 | #endif |
| 1231 | block_thread_no_listlock(&s->queue); |
| 1232 | } |
| 1233 | |
| 1234 | void semaphore_release(struct semaphore *s) |
| 1235 | { |
| 1236 | #if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK |
| 1237 | corelock_lock(&s->cl); |
| 1238 | if (s->count < s->max) |
| 1239 | { |
| 1240 | if (++s->count <= 0) |
| 1241 | { |
| 1242 | #elif CONFIG_CORELOCK == CORELOCK_SWAP |
| 1243 | int count; |
| 1244 | while ((count = xchg32(&s->count, STATE_BUSYi)) == STATE_BUSYi); |
| 1245 | if(count < s->max) |
| 1246 | { |
| 1247 | if(++count <= 0) |
| 1248 | { |
| 1249 | #endif /* CONFIG_CORELOCK */ |
| 1250 | |
| 1251 | /* there should be threads in this queue */ |
| 1252 | KERNEL_ASSERT(s->queue.queue != NULL, "semaphore->wakeup"); |
| 1253 | /* a thread was queued - wake it up */ |
| 1254 | wakeup_thread_no_listlock(&s->queue); |
| 1255 | } |
| 1256 | } |
| 1257 | |
| 1258 | #if CONFIG_CORELOCK == SW_CORELOCK |
| 1259 | corelock_unlock(&s->cl); |
| 1260 | #elif CONFIG_CORELOCK == CORELOCK_SWAP |
| 1261 | s->count = count; |
| 1262 | #endif |
| 1263 | } |
| 1264 | #endif /* HAVE_SEMAPHORE_OBJECTS */ |
| 1265 | |
| 1266 | /**************************************************************************** |
| 1267 | * Simple event functions ;) |
| 1268 | ****************************************************************************/ |
| 1269 | #ifdef HAVE_EVENT_OBJECTS |
| 1270 | void event_init(struct event *e, unsigned int flags) |
| 1271 | { |
| 1272 | e->queues[STATE_NONSIGNALED] = NULL; |
| 1273 | e->queues[STATE_SIGNALED] = NULL; |
| 1274 | e->state = flags & STATE_SIGNALED; |
| 1275 | e->automatic = (flags & EVENT_AUTOMATIC) ? 1 : 0; |
| 1276 | #if CONFIG_CORELOCK == SW_CORELOCK |
| 1277 | corelock_init(&e->cl); |
| 1278 | #endif |
| 1279 | } |
| 1280 | |
| 1281 | void event_wait(struct event *e, unsigned int for_state) |
| 1282 | { |
| 1283 | unsigned int last_state; |
| 1284 | #if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK |
| 1285 | corelock_lock(&e->cl); |
| 1286 | last_state = e->state; |
| 1287 | #elif CONFIG_CORELOCK == CORELOCK_SWAP |
| 1288 | while ((last_state = xchg8(&e->state, STATE_BUSYu8)) == STATE_BUSYu8); |
| 1289 | #endif |
| 1290 | |
| 1291 | if(e->automatic != 0) |
| 1292 | { |
| 1293 | /* wait for false always satisfied by definition |
| 1294 | or if it just changed to false */ |
| 1295 | if(last_state == STATE_SIGNALED || for_state == STATE_NONSIGNALED) |
| 1296 | { |
| 1297 | /* automatic - unsignal */ |
| 1298 | e->state = STATE_NONSIGNALED; |
| 1299 | #if CONFIG_CORELOCK == SW_CORELOCK |
| 1300 | corelock_unlock(&e->cl); |
| 1301 | #endif |
| 1302 | return; |
| 1303 | } |
| 1304 | /* block until state matches */ |
| 1305 | } |
| 1306 | else if(for_state == last_state) |
| 1307 | { |
| 1308 | /* the state being waited for is the current state */ |
| 1309 | #if CONFIG_CORELOCK == SW_CORELOCK |
| 1310 | corelock_unlock(&e->cl); |
| 1311 | #elif CONFIG_CORELOCK == CORELOCK_SWAP |
| 1312 | e->state = last_state; |
| 1313 | #endif |
| 1314 | return; |
| 1315 | } |
| 1316 | |
| 1317 | { |
| 1318 | /* current state does not match wait-for state */ |
| 1319 | #if CONFIG_CORELOCK == SW_CORELOCK |
| 1320 | const unsigned int core = CURRENT_CORE; |
| 1321 | cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK; |
| 1322 | cores[core].blk_ops.cl_p = &e->cl; |
| 1323 | #elif CONFIG_CORELOCK == CORELOCK_SWAP |
| 1324 | const unsigned int core = CURRENT_CORE; |
| 1325 | cores[core].blk_ops.flags = TBOP_SET_VARu8; |
| 1326 | cores[core].blk_ops.var_u8p = &e->state; |
| 1327 | cores[core].blk_ops.var_u8v = last_state; |
| 1328 | #endif |
| 1329 | block_thread_no_listlock(&e->queues[for_state]); |
| 1330 | } |
| 1331 | } |
| 1332 | |
| 1333 | void event_set_state(struct event *e, unsigned int state) |
| 1334 | { |
| 1335 | unsigned int last_state; |
| 1336 | #if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK |
| 1337 | corelock_lock(&e->cl); |
| 1338 | last_state = e->state; |
| 1339 | #elif CONFIG_CORELOCK == CORELOCK_SWAP |
| 1340 | while ((last_state = xchg8(&e->state, STATE_BUSYu8)) == STATE_BUSYu8); |
| 1341 | #endif |
| 1342 | |
| 1343 | if(last_state == state) |
| 1344 | { |
| 1345 | /* no change */ |
| 1346 | #if CONFIG_CORELOCK == SW_CORELOCK |
| 1347 | corelock_unlock(&e->cl); |
| 1348 | #elif CONFIG_CORELOCK == CORELOCK_SWAP |
| 1349 | e->state = last_state; |
| 1350 | #endif |
| 1351 | return; |
| 1352 | } |
| 1353 | |
| 1354 | if(state == STATE_SIGNALED) |
| 1355 | { |
| 1356 | if(e->automatic != 0) |
| 1357 | { |
| 1358 | struct thread_entry *thread; |
| 1359 | /* no thread should have ever blocked for unsignaled */ |
| 1360 | KERNEL_ASSERT(e->queues[STATE_NONSIGNALED].queue == NULL, |
| 1361 | "set_event_state->queue[NS]:S"); |
| 1362 | /* pass to next thread and keep unsignaled - "pulse" */ |
| 1363 | thread = wakeup_thread_no_listlock(&e->queues[STATE_SIGNALED]); |
| 1364 | e->state = thread != NULL ? STATE_NONSIGNALED : STATE_SIGNALED; |
| 1365 | } |
| 1366 | else |
| 1367 | { |
| 1368 | /* release all threads waiting for signaled */ |
| 1369 | thread_queue_wake_no_listlock(&e->queues[STATE_SIGNALED]); |
| 1370 | e->state = STATE_SIGNALED; |
| 1371 | } |
| 1372 | } |
| 1373 | else |
| 1374 | { |
| 1375 | /* release all threads waiting for unsignaled */ |
| 1376 | |
| 1377 | /* no thread should have ever blocked if automatic */ |
| 1378 | KERNEL_ASSERT(e->queues[STATE_NONSIGNALED].queue == NULL || |
| 1379 | e->automatic == 0, "set_event_state->queue[NS]:NS"); |
| 1380 | |
| 1381 | thread_queue_wake_no_listlock(&e->queues[STATE_NONSIGNALED]); |
| 1382 | e->state = STATE_NONSIGNALED; |
| 1383 | } |
| 1384 | |
| 1385 | #if CONFIG_CORELOCK == SW_CORELOCK |
| 1386 | corelock_unlock(&e->cl); |
| 1387 | #endif |
| 1388 | } |
| 1389 | #endif /* HAVE_EVENT_OBJECTS */ |