Jean-Philippe Bernardy | 9dcb575 | 2005-02-13 18:55:14 +0000 | [diff] [blame] | 1 | /*************************************************************************** |
Daniel Stenberg | 3c031c5 | 2002-04-22 12:07:34 +0000 | [diff] [blame] | 2 | * __________ __ ___. |
| 3 | * Open \______ \ ____ ____ | | _\_ |__ _______ ___ |
| 4 | * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / |
| 5 | * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < |
| 6 | * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ |
| 7 | * \/ \/ \/ \/ \/ |
| 8 | * $Id$ |
| 9 | * |
| 10 | * Copyright (C) 2002 by Ulf Ralberg |
| 11 | * |
Daniel Stenberg | 2acc0ac | 2008-06-28 18:10:04 +0000 | [diff] [blame^] | 12 | * This program is free software; you can redistribute it and/or |
| 13 | * modify it under the terms of the GNU General Public License |
| 14 | * as published by the Free Software Foundation; either version 2 |
| 15 | * of the License, or (at your option) any later version. |
Daniel Stenberg | 3c031c5 | 2002-04-22 12:07:34 +0000 | [diff] [blame] | 16 | * |
| 17 | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY |
| 18 | * KIND, either express or implied. |
| 19 | * |
| 20 | ****************************************************************************/ |
Linus Nielsen Feltzing | 7b91ec6 | 2004-10-15 02:13:43 +0000 | [diff] [blame] | 21 | #include "config.h" |
Björn Stenberg | c4d8d97 | 2003-02-14 09:44:34 +0000 | [diff] [blame] | 22 | #include <stdbool.h> |
Daniel Stenberg | 3c031c5 | 2002-04-22 12:07:34 +0000 | [diff] [blame] | 23 | #include "thread.h" |
Linus Nielsen Feltzing | 09153dd | 2002-07-15 22:21:18 +0000 | [diff] [blame] | 24 | #include "panic.h" |
Michael Sevakis | bfb281f | 2007-05-12 05:20:04 +0000 | [diff] [blame] | 25 | #include "sprintf.h" |
Jens Arnold | 6d54d6c | 2005-08-26 22:52:31 +0000 | [diff] [blame] | 26 | #include "system.h" |
Björn Stenberg | c4d8d97 | 2003-02-14 09:44:34 +0000 | [diff] [blame] | 27 | #include "kernel.h" |
Linus Nielsen Feltzing | 7b91ec6 | 2004-10-15 02:13:43 +0000 | [diff] [blame] | 28 | #include "cpu.h" |
Miika Pekkarinen | a85044b | 2006-09-16 16:18:11 +0000 | [diff] [blame] | 29 | #include "string.h" |
Magnus Holmgren | 13f3c5b | 2006-09-23 14:38:04 +0000 | [diff] [blame] | 30 | #ifdef RB_PROFILE |
| 31 | #include <profile.h> |
| 32 | #endif |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 33 | /**************************************************************************** |
| 34 | * ATTENTION!! * |
| 35 | * See notes below on implementing processor-specific portions! * |
| 36 | ***************************************************************************/ |
Daniel Stenberg | 3c031c5 | 2002-04-22 12:07:34 +0000 | [diff] [blame] | 37 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 38 | /* Define THREAD_EXTRA_CHECKS as 1 to enable additional state checks */ |
| 39 | #ifdef DEBUG |
| 40 | #define THREAD_EXTRA_CHECKS 1 /* Always 1 for DEBUG */ |
Miika Pekkarinen | 66258a3 | 2007-03-26 16:55:17 +0000 | [diff] [blame] | 41 | #else |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 42 | #define THREAD_EXTRA_CHECKS 0 |
Miika Pekkarinen | 66258a3 | 2007-03-26 16:55:17 +0000 | [diff] [blame] | 43 | #endif |
| 44 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 45 | /** |
| 46 | * General locking order to guarantee progress. Order must be observed but |
| 47 | * all stages are not nescessarily obligatory. Going from 1) to 3) is |
| 48 | * perfectly legal. |
| 49 | * |
| 50 | * 1) IRQ |
| 51 | * This is first because of the likelyhood of having an interrupt occur that |
| 52 | * also accesses one of the objects farther down the list. Any non-blocking |
| 53 | * synchronization done may already have a lock on something during normal |
| 54 | * execution and if an interrupt handler running on the same processor as |
| 55 | * the one that has the resource locked were to attempt to access the |
| 56 | * resource, the interrupt handler would wait forever waiting for an unlock |
| 57 | * that will never happen. There is no danger if the interrupt occurs on |
| 58 | * a different processor because the one that has the lock will eventually |
| 59 | * unlock and the other processor's handler may proceed at that time. Not |
| 60 | * nescessary when the resource in question is definitely not available to |
| 61 | * interrupt handlers. |
| 62 | * |
| 63 | * 2) Kernel Object |
| 64 | * 1) May be needed beforehand if the kernel object allows dual-use such as |
| 65 | * event queues. The kernel object must have a scheme to protect itself from |
| 66 | * access by another processor and is responsible for serializing the calls |
| 67 | * to block_thread(_w_tmo) and wakeup_thread both to themselves and to each |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 68 | * other. Objects' queues are also protected here. |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 69 | * |
| 70 | * 3) Thread Slot |
| 71 | * This locks access to the thread's slot such that its state cannot be |
| 72 | * altered by another processor when a state change is in progress such as |
| 73 | * when it is in the process of going on a blocked list. An attempt to wake |
| 74 | * a thread while it is still blocking will likely desync its state with |
| 75 | * the other resources used for that state. |
| 76 | * |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 77 | * 4) Core Lists |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 78 | * These lists are specific to a particular processor core and are accessible |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 79 | * by all processor cores and interrupt handlers. The running (rtr) list is |
| 80 | * the prime example where a thread may be added by any means. |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 81 | */ |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 82 | |
| 83 | /*--------------------------------------------------------------------------- |
| 84 | * Processor specific: core_sleep/core_wake/misc. notes |
| 85 | * |
| 86 | * ARM notes: |
| 87 | * FIQ is not dealt with by the scheduler code and is simply restored if it |
| 88 | * must by masked for some reason - because threading modifies a register |
| 89 | * that FIQ may also modify and there's no way to accomplish it atomically. |
| 90 | * s3c2440 is such a case. |
| 91 | * |
| 92 | * Audio interrupts are generally treated at a higher priority than others |
| 93 | * usage of scheduler code with interrupts higher than HIGHEST_IRQ_LEVEL |
| 94 | * are not in general safe. Special cases may be constructed on a per- |
| 95 | * source basis and blocking operations are not available. |
| 96 | * |
| 97 | * core_sleep procedure to implement for any CPU to ensure an asychronous |
| 98 | * wakup never results in requiring a wait until the next tick (up to |
| 99 | * 10000uS!). May require assembly and careful instruction ordering. |
| 100 | * |
| 101 | * 1) On multicore, stay awake if directed to do so by another. If so, goto |
| 102 | * step 4. |
| 103 | * 2) If processor requires, atomically reenable interrupts and perform step |
| 104 | * 3. |
| 105 | * 3) Sleep the CPU core. If wakeup itself enables interrupts (stop #0x2000 |
| 106 | * on Coldfire) goto step 5. |
| 107 | * 4) Enable interrupts. |
| 108 | * 5) Exit procedure. |
| 109 | * |
| 110 | * core_wake and multprocessor notes for sleep/wake coordination: |
| 111 | * If possible, to wake up another processor, the forcing of an interrupt on |
| 112 | * the woken core by the waker core is the easiest way to ensure a non- |
| 113 | * delayed wake and immediate execution of any woken threads. If that isn't |
| 114 | * available then some careful non-blocking synchonization is needed (as on |
| 115 | * PP targets at the moment). |
| 116 | *--------------------------------------------------------------------------- |
| 117 | */ |
| 118 | |
| 119 | /* Cast to the the machine pointer size, whose size could be < 4 or > 32 |
| 120 | * (someday :). */ |
| 121 | #define DEADBEEF ((uintptr_t)0xdeadbeefdeadbeefull) |
Miika Pekkarinen | 3686228 | 2006-09-02 07:56:52 +0000 | [diff] [blame] | 122 | struct core_entry cores[NUM_CORES] IBSS_ATTR; |
Miika Pekkarinen | 66258a3 | 2007-03-26 16:55:17 +0000 | [diff] [blame] | 123 | struct thread_entry threads[MAXTHREADS] IBSS_ATTR; |
Miika Pekkarinen | a85044b | 2006-09-16 16:18:11 +0000 | [diff] [blame] | 124 | |
Jens Arnold | 9478cc6 | 2004-08-03 19:22:56 +0000 | [diff] [blame] | 125 | static const char main_thread_name[] = "main"; |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 126 | extern uintptr_t stackbegin[]; |
| 127 | extern uintptr_t stackend[]; |
Daniel Stenberg | 3c031c5 | 2002-04-22 12:07:34 +0000 | [diff] [blame] | 128 | |
Michael Sevakis | 32a531b | 2008-01-19 13:27:47 +0000 | [diff] [blame] | 129 | static inline void core_sleep(IF_COP_VOID(unsigned int core)) |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 130 | __attribute__((always_inline)); |
Miika Pekkarinen | a85044b | 2006-09-16 16:18:11 +0000 | [diff] [blame] | 131 | |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 132 | void check_tmo_threads(void) |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 133 | __attribute__((noinline)); |
Miika Pekkarinen | a85044b | 2006-09-16 16:18:11 +0000 | [diff] [blame] | 134 | |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 135 | static inline void block_thread_on_l(struct thread_entry *thread, unsigned state) |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 136 | __attribute__((always_inline)); |
| 137 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 138 | static void add_to_list_tmo(struct thread_entry *thread) |
| 139 | __attribute__((noinline)); |
| 140 | |
| 141 | static void core_schedule_wakeup(struct thread_entry *thread) |
| 142 | __attribute__((noinline)); |
| 143 | |
Michael Sevakis | 608c547 | 2008-01-19 13:47:26 +0000 | [diff] [blame] | 144 | #if NUM_CORES > 1 |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 145 | static inline void run_blocking_ops( |
Michael Sevakis | 608c547 | 2008-01-19 13:47:26 +0000 | [diff] [blame] | 146 | unsigned int core, struct thread_entry *thread) |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 147 | __attribute__((always_inline)); |
Michael Sevakis | 608c547 | 2008-01-19 13:47:26 +0000 | [diff] [blame] | 148 | #endif |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 149 | |
| 150 | static void thread_stkov(struct thread_entry *thread) |
| 151 | __attribute__((noinline)); |
| 152 | |
| 153 | static inline void store_context(void* addr) |
| 154 | __attribute__((always_inline)); |
| 155 | |
Brandon Low | 8a82892 | 2006-11-11 05:33:24 +0000 | [diff] [blame] | 156 | static inline void load_context(const void* addr) |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 157 | __attribute__((always_inline)); |
| 158 | |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 159 | void switch_thread(void) |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 160 | __attribute__((noinline)); |
| 161 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 162 | /**************************************************************************** |
| 163 | * Processor-specific section |
| 164 | */ |
Linus Nielsen Feltzing | 2f70f13 | 2002-08-01 08:14:56 +0000 | [diff] [blame] | 165 | |
Michael Sevakis | 606d9d0 | 2008-06-03 04:23:09 +0000 | [diff] [blame] | 166 | #if defined(MAX_PHYS_SECTOR_SIZE) && MEM == 64 |
| 167 | /* Support a special workaround object for large-sector disks */ |
| 168 | #define IF_NO_SKIP_YIELD(...) __VA_ARGS__ |
| 169 | #else |
| 170 | #define IF_NO_SKIP_YIELD(...) |
| 171 | #endif |
| 172 | |
Thom Johansen | 27cd6ca | 2005-12-10 19:51:56 +0000 | [diff] [blame] | 173 | #if defined(CPU_ARM) |
Daniel Ankers | 0aec12f | 2006-08-21 17:35:35 +0000 | [diff] [blame] | 174 | /*--------------------------------------------------------------------------- |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 175 | * Start the thread running and terminate it if it returns |
| 176 | *--------------------------------------------------------------------------- |
| 177 | */ |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 178 | static void __attribute__((naked,used)) start_thread(void) |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 179 | { |
| 180 | /* r0 = context */ |
| 181 | asm volatile ( |
| 182 | "ldr sp, [r0, #32] \n" /* Load initial sp */ |
| 183 | "ldr r4, [r0, #40] \n" /* start in r4 since it's non-volatile */ |
| 184 | "mov r1, #0 \n" /* Mark thread as running */ |
| 185 | "str r1, [r0, #40] \n" |
Michael Sevakis | da55251 | 2007-09-29 06:17:33 +0000 | [diff] [blame] | 186 | #if NUM_CORES > 1 |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 187 | "ldr r0, =invalidate_icache \n" /* Invalidate this core's cache. */ |
| 188 | "mov lr, pc \n" /* This could be the first entry into */ |
| 189 | "bx r0 \n" /* plugin or codec code for this core. */ |
| 190 | #endif |
| 191 | "mov lr, pc \n" /* Call thread function */ |
| 192 | "bx r4 \n" |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 193 | ); /* No clobber list - new thread doesn't care */ |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 194 | thread_exit(); |
| 195 | //asm volatile (".ltorg"); /* Dump constant pool */ |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 196 | } |
| 197 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 198 | /* For startup, place context pointer in r4 slot, start_thread pointer in r5 |
| 199 | * slot, and thread function pointer in context.start. See load_context for |
| 200 | * what happens when thread is initially going to run. */ |
| 201 | #define THREAD_STARTUP_INIT(core, thread, function) \ |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 202 | ({ (thread)->context.r[0] = (uint32_t)&(thread)->context, \ |
| 203 | (thread)->context.r[1] = (uint32_t)start_thread, \ |
| 204 | (thread)->context.start = (uint32_t)function; }) |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 205 | |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 206 | /*--------------------------------------------------------------------------- |
Thom Johansen | 52e91de | 2005-11-13 23:47:38 +0000 | [diff] [blame] | 207 | * Store non-volatile context. |
| 208 | *--------------------------------------------------------------------------- |
| 209 | */ |
Dave Chapman | 77372d1 | 2005-11-07 23:07:19 +0000 | [diff] [blame] | 210 | static inline void store_context(void* addr) |
| 211 | { |
Thom Johansen | 52e91de | 2005-11-13 23:47:38 +0000 | [diff] [blame] | 212 | asm volatile( |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 213 | "stmia %0, { r4-r11, sp, lr } \n" |
Thom Johansen | 52e91de | 2005-11-13 23:47:38 +0000 | [diff] [blame] | 214 | : : "r" (addr) |
| 215 | ); |
Dave Chapman | 77372d1 | 2005-11-07 23:07:19 +0000 | [diff] [blame] | 216 | } |
| 217 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 218 | /*--------------------------------------------------------------------------- |
| 219 | * Load non-volatile context. |
| 220 | *--------------------------------------------------------------------------- |
| 221 | */ |
Dave Chapman | 77372d1 | 2005-11-07 23:07:19 +0000 | [diff] [blame] | 222 | static inline void load_context(const void* addr) |
| 223 | { |
Thom Johansen | 52e91de | 2005-11-13 23:47:38 +0000 | [diff] [blame] | 224 | asm volatile( |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 225 | "ldr r0, [%0, #40] \n" /* Load start pointer */ |
| 226 | "cmp r0, #0 \n" /* Check for NULL */ |
| 227 | "ldmneia %0, { r0, pc } \n" /* If not already running, jump to start */ |
| 228 | "ldmia %0, { r4-r11, sp, lr } \n" /* Load regs r4 to r14 from context */ |
| 229 | : : "r" (addr) : "r0" /* only! */ |
Thom Johansen | 52e91de | 2005-11-13 23:47:38 +0000 | [diff] [blame] | 230 | ); |
Dave Chapman | 77372d1 | 2005-11-07 23:07:19 +0000 | [diff] [blame] | 231 | } |
| 232 | |
Michael Sevakis | bfb281f | 2007-05-12 05:20:04 +0000 | [diff] [blame] | 233 | #if defined (CPU_PP) |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 234 | |
| 235 | #if NUM_CORES > 1 |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 236 | extern uintptr_t cpu_idlestackbegin[]; |
| 237 | extern uintptr_t cpu_idlestackend[]; |
| 238 | extern uintptr_t cop_idlestackbegin[]; |
| 239 | extern uintptr_t cop_idlestackend[]; |
Michael Sevakis | 0509914 | 2008-04-06 04:34:57 +0000 | [diff] [blame] | 240 | static uintptr_t * const idle_stacks[NUM_CORES] = |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 241 | { |
| 242 | [CPU] = cpu_idlestackbegin, |
| 243 | [COP] = cop_idlestackbegin |
| 244 | }; |
Michael Sevakis | a8b388f | 2007-11-27 01:20:26 +0000 | [diff] [blame] | 245 | |
| 246 | #if CONFIG_CPU == PP5002 |
| 247 | /* Bytes to emulate the PP502x mailbox bits */ |
| 248 | struct core_semaphores |
| 249 | { |
| 250 | volatile uint8_t intend_wake; /* 00h */ |
| 251 | volatile uint8_t stay_awake; /* 01h */ |
| 252 | volatile uint8_t intend_sleep; /* 02h */ |
| 253 | volatile uint8_t unused; /* 03h */ |
| 254 | }; |
| 255 | |
Michael Sevakis | 0509914 | 2008-04-06 04:34:57 +0000 | [diff] [blame] | 256 | static struct core_semaphores core_semaphores[NUM_CORES] IBSS_ATTR; |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 257 | #endif /* CONFIG_CPU == PP5002 */ |
Michael Sevakis | a8b388f | 2007-11-27 01:20:26 +0000 | [diff] [blame] | 258 | |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 259 | #endif /* NUM_CORES */ |
| 260 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 261 | #if CONFIG_CORELOCK == SW_CORELOCK |
| 262 | /* Software core locks using Peterson's mutual exclusion algorithm */ |
| 263 | |
| 264 | /*--------------------------------------------------------------------------- |
| 265 | * Initialize the corelock structure. |
| 266 | *--------------------------------------------------------------------------- |
| 267 | */ |
| 268 | void corelock_init(struct corelock *cl) |
Michael Sevakis | bfb281f | 2007-05-12 05:20:04 +0000 | [diff] [blame] | 269 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 270 | memset(cl, 0, sizeof (*cl)); |
| 271 | } |
| 272 | |
| 273 | #if 1 /* Assembly locks to minimize overhead */ |
| 274 | /*--------------------------------------------------------------------------- |
| 275 | * Wait for the corelock to become free and acquire it when it does. |
| 276 | *--------------------------------------------------------------------------- |
| 277 | */ |
| 278 | void corelock_lock(struct corelock *cl) __attribute__((naked)); |
| 279 | void corelock_lock(struct corelock *cl) |
| 280 | { |
Michael Sevakis | 4ab52c3 | 2008-04-06 08:48:31 +0000 | [diff] [blame] | 281 | /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */ |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 282 | asm volatile ( |
| 283 | "mov r1, %0 \n" /* r1 = PROCESSOR_ID */ |
| 284 | "ldrb r1, [r1] \n" |
Michael Sevakis | 26b3a74 | 2007-10-19 06:19:06 +0000 | [diff] [blame] | 285 | "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */ |
Michael Sevakis | 4ab52c3 | 2008-04-06 08:48:31 +0000 | [diff] [blame] | 286 | "eor r2, r1, #0xff \n" /* r2 = othercore */ |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 287 | "strb r2, [r0, #2] \n" /* cl->turn = othercore */ |
| 288 | "1: \n" |
Michael Sevakis | 4ab52c3 | 2008-04-06 08:48:31 +0000 | [diff] [blame] | 289 | "ldrb r3, [r0, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */ |
| 290 | "cmp r3, #0 \n" /* yes? lock acquired */ |
| 291 | "bxeq lr \n" |
| 292 | "ldrb r3, [r0, #2] \n" /* || cl->turn == core ? */ |
| 293 | "cmp r3, r1 \n" |
Michael Sevakis | 26b3a74 | 2007-10-19 06:19:06 +0000 | [diff] [blame] | 294 | "bxeq lr \n" /* yes? lock acquired */ |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 295 | "b 1b \n" /* keep trying */ |
| 296 | : : "i"(&PROCESSOR_ID) |
| 297 | ); |
| 298 | (void)cl; |
| 299 | } |
| 300 | |
| 301 | /*--------------------------------------------------------------------------- |
| 302 | * Try to aquire the corelock. If free, caller gets it, otherwise return 0. |
| 303 | *--------------------------------------------------------------------------- |
| 304 | */ |
| 305 | int corelock_try_lock(struct corelock *cl) __attribute__((naked)); |
| 306 | int corelock_try_lock(struct corelock *cl) |
| 307 | { |
Michael Sevakis | 4ab52c3 | 2008-04-06 08:48:31 +0000 | [diff] [blame] | 308 | /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */ |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 309 | asm volatile ( |
| 310 | "mov r1, %0 \n" /* r1 = PROCESSOR_ID */ |
| 311 | "ldrb r1, [r1] \n" |
Michael Sevakis | 4ab52c3 | 2008-04-06 08:48:31 +0000 | [diff] [blame] | 312 | "mov r3, r0 \n" |
Michael Sevakis | 26b3a74 | 2007-10-19 06:19:06 +0000 | [diff] [blame] | 313 | "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */ |
Michael Sevakis | 4ab52c3 | 2008-04-06 08:48:31 +0000 | [diff] [blame] | 314 | "eor r2, r1, #0xff \n" /* r2 = othercore */ |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 315 | "strb r2, [r0, #2] \n" /* cl->turn = othercore */ |
Michael Sevakis | 4ab52c3 | 2008-04-06 08:48:31 +0000 | [diff] [blame] | 316 | "ldrb r0, [r3, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */ |
| 317 | "eors r0, r0, r2 \n" /* yes? lock acquired */ |
| 318 | "bxne lr \n" |
| 319 | "ldrb r0, [r3, #2] \n" /* || cl->turn == core? */ |
| 320 | "ands r0, r0, r1 \n" |
| 321 | "streqb r0, [r3, r1, lsr #7] \n" /* if not, cl->myl[core] = 0 */ |
| 322 | "bx lr \n" /* return result */ |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 323 | : : "i"(&PROCESSOR_ID) |
| 324 | ); |
| 325 | |
| 326 | return 0; |
| 327 | (void)cl; |
| 328 | } |
| 329 | |
| 330 | /*--------------------------------------------------------------------------- |
| 331 | * Release ownership of the corelock |
| 332 | *--------------------------------------------------------------------------- |
| 333 | */ |
| 334 | void corelock_unlock(struct corelock *cl) __attribute__((naked)); |
| 335 | void corelock_unlock(struct corelock *cl) |
| 336 | { |
| 337 | asm volatile ( |
| 338 | "mov r1, %0 \n" /* r1 = PROCESSOR_ID */ |
| 339 | "ldrb r1, [r1] \n" |
| 340 | "mov r2, #0 \n" /* cl->myl[core] = 0 */ |
| 341 | "strb r2, [r0, r1, lsr #7] \n" |
| 342 | "bx lr \n" |
| 343 | : : "i"(&PROCESSOR_ID) |
| 344 | ); |
| 345 | (void)cl; |
| 346 | } |
| 347 | #else /* C versions for reference */ |
| 348 | /*--------------------------------------------------------------------------- |
| 349 | * Wait for the corelock to become free and aquire it when it does. |
| 350 | *--------------------------------------------------------------------------- |
| 351 | */ |
| 352 | void corelock_lock(struct corelock *cl) |
| 353 | { |
| 354 | const unsigned int core = CURRENT_CORE; |
| 355 | const unsigned int othercore = 1 - core; |
| 356 | |
Michael Sevakis | 26b3a74 | 2007-10-19 06:19:06 +0000 | [diff] [blame] | 357 | cl->myl[core] = core; |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 358 | cl->turn = othercore; |
| 359 | |
Michael Sevakis | 26b3a74 | 2007-10-19 06:19:06 +0000 | [diff] [blame] | 360 | for (;;) |
| 361 | { |
| 362 | if (cl->myl[othercore] == 0 || cl->turn == core) |
| 363 | break; |
| 364 | } |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 365 | } |
| 366 | |
| 367 | /*--------------------------------------------------------------------------- |
| 368 | * Try to aquire the corelock. If free, caller gets it, otherwise return 0. |
| 369 | *--------------------------------------------------------------------------- |
| 370 | */ |
| 371 | int corelock_try_lock(struct corelock *cl) |
| 372 | { |
| 373 | const unsigned int core = CURRENT_CORE; |
| 374 | const unsigned int othercore = 1 - core; |
| 375 | |
Michael Sevakis | 26b3a74 | 2007-10-19 06:19:06 +0000 | [diff] [blame] | 376 | cl->myl[core] = core; |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 377 | cl->turn = othercore; |
| 378 | |
Michael Sevakis | 26b3a74 | 2007-10-19 06:19:06 +0000 | [diff] [blame] | 379 | if (cl->myl[othercore] == 0 || cl->turn == core) |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 380 | { |
Michael Sevakis | 26b3a74 | 2007-10-19 06:19:06 +0000 | [diff] [blame] | 381 | return 1; |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 382 | } |
| 383 | |
Michael Sevakis | 26b3a74 | 2007-10-19 06:19:06 +0000 | [diff] [blame] | 384 | cl->myl[core] = 0; |
| 385 | return 0; |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 386 | } |
| 387 | |
| 388 | /*--------------------------------------------------------------------------- |
| 389 | * Release ownership of the corelock |
| 390 | *--------------------------------------------------------------------------- |
| 391 | */ |
| 392 | void corelock_unlock(struct corelock *cl) |
| 393 | { |
| 394 | cl->myl[CURRENT_CORE] = 0; |
| 395 | } |
| 396 | #endif /* ASM / C selection */ |
| 397 | |
| 398 | #endif /* CONFIG_CORELOCK == SW_CORELOCK */ |
| 399 | |
| 400 | /*--------------------------------------------------------------------------- |
| 401 | * Put core in a power-saving state if waking list wasn't repopulated and if |
| 402 | * no other core requested a wakeup for it to perform a task. |
| 403 | *--------------------------------------------------------------------------- |
| 404 | */ |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 405 | #ifdef CPU_PP502x |
Michael Sevakis | a8b388f | 2007-11-27 01:20:26 +0000 | [diff] [blame] | 406 | #if NUM_CORES == 1 |
Michael Sevakis | 32a531b | 2008-01-19 13:27:47 +0000 | [diff] [blame] | 407 | static inline void core_sleep(void) |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 408 | { |
Jens Arnold | cea07eb | 2008-04-20 17:53:05 +0000 | [diff] [blame] | 409 | sleep_core(CURRENT_CORE); |
Michael Sevakis | af395f4 | 2008-03-26 01:50:41 +0000 | [diff] [blame] | 410 | enable_irq(); |
Michael Sevakis | a8b388f | 2007-11-27 01:20:26 +0000 | [diff] [blame] | 411 | } |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 412 | #else |
Michael Sevakis | 32a531b | 2008-01-19 13:27:47 +0000 | [diff] [blame] | 413 | static inline void core_sleep(unsigned int core) |
Michael Sevakis | a8b388f | 2007-11-27 01:20:26 +0000 | [diff] [blame] | 414 | { |
Michael Sevakis | a443614 | 2007-10-18 01:26:50 +0000 | [diff] [blame] | 415 | #if 1 |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 416 | asm volatile ( |
Michael Sevakis | a443614 | 2007-10-18 01:26:50 +0000 | [diff] [blame] | 417 | "mov r0, #4 \n" /* r0 = 0x4 << core */ |
| 418 | "mov r0, r0, lsl %[c] \n" |
| 419 | "str r0, [%[mbx], #4] \n" /* signal intent to sleep */ |
Michael Sevakis | 32a531b | 2008-01-19 13:27:47 +0000 | [diff] [blame] | 420 | "ldr r1, [%[mbx], #0] \n" /* && !(MBX_MSG_STAT & (0x10<<core)) ? */ |
| 421 | "tst r1, r0, lsl #2 \n" |
Michael Sevakis | a443614 | 2007-10-18 01:26:50 +0000 | [diff] [blame] | 422 | "moveq r1, #0x80000000 \n" /* Then sleep */ |
| 423 | "streq r1, [%[ctl], %[c], lsl #2] \n" |
| 424 | "moveq r1, #0 \n" /* Clear control reg */ |
| 425 | "streq r1, [%[ctl], %[c], lsl #2] \n" |
| 426 | "orr r1, r0, r0, lsl #2 \n" /* Signal intent to wake - clear wake flag */ |
| 427 | "str r1, [%[mbx], #8] \n" |
| 428 | "1: \n" /* Wait for wake procedure to finish */ |
| 429 | "ldr r1, [%[mbx], #0] \n" |
| 430 | "tst r1, r0, lsr #2 \n" |
| 431 | "bne 1b \n" |
Michael Sevakis | a443614 | 2007-10-18 01:26:50 +0000 | [diff] [blame] | 432 | : |
Jens Arnold | cea07eb | 2008-04-20 17:53:05 +0000 | [diff] [blame] | 433 | : [ctl]"r"(&CPU_CTL), [mbx]"r"(MBX_BASE), [c]"r"(core) |
Michael Sevakis | 32a531b | 2008-01-19 13:27:47 +0000 | [diff] [blame] | 434 | : "r0", "r1"); |
Michael Sevakis | a443614 | 2007-10-18 01:26:50 +0000 | [diff] [blame] | 435 | #else /* C version for reference */ |
Michael Sevakis | a443614 | 2007-10-18 01:26:50 +0000 | [diff] [blame] | 436 | /* Signal intent to sleep */ |
| 437 | MBX_MSG_SET = 0x4 << core; |
| 438 | |
| 439 | /* Something waking or other processor intends to wake us? */ |
Michael Sevakis | 32a531b | 2008-01-19 13:27:47 +0000 | [diff] [blame] | 440 | if ((MBX_MSG_STAT & (0x10 << core)) == 0) |
Michael Sevakis | a443614 | 2007-10-18 01:26:50 +0000 | [diff] [blame] | 441 | { |
Jens Arnold | cea07eb | 2008-04-20 17:53:05 +0000 | [diff] [blame] | 442 | sleep_core(core); |
| 443 | wake_core(core); |
Michael Sevakis | a443614 | 2007-10-18 01:26:50 +0000 | [diff] [blame] | 444 | } |
| 445 | |
| 446 | /* Signal wake - clear wake flag */ |
| 447 | MBX_MSG_CLR = 0x14 << core; |
| 448 | |
| 449 | /* Wait for other processor to finish wake procedure */ |
| 450 | while (MBX_MSG_STAT & (0x1 << core)); |
Michael Sevakis | a443614 | 2007-10-18 01:26:50 +0000 | [diff] [blame] | 451 | #endif /* ASM/C selection */ |
Michael Sevakis | af395f4 | 2008-03-26 01:50:41 +0000 | [diff] [blame] | 452 | enable_irq(); |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 453 | } |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 454 | #endif /* NUM_CORES */ |
Michael Sevakis | a8b388f | 2007-11-27 01:20:26 +0000 | [diff] [blame] | 455 | #elif CONFIG_CPU == PP5002 |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 456 | #if NUM_CORES == 1 |
| 457 | static inline void core_sleep(void) |
| 458 | { |
Jens Arnold | cea07eb | 2008-04-20 17:53:05 +0000 | [diff] [blame] | 459 | sleep_core(CURRENT_CORE); |
Michael Sevakis | af395f4 | 2008-03-26 01:50:41 +0000 | [diff] [blame] | 460 | enable_irq(); |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 461 | } |
| 462 | #else |
Michael Sevakis | a8b388f | 2007-11-27 01:20:26 +0000 | [diff] [blame] | 463 | /* PP5002 has no mailboxes - emulate using bytes */ |
Michael Sevakis | 32a531b | 2008-01-19 13:27:47 +0000 | [diff] [blame] | 464 | static inline void core_sleep(unsigned int core) |
Michael Sevakis | a8b388f | 2007-11-27 01:20:26 +0000 | [diff] [blame] | 465 | { |
| 466 | #if 1 |
| 467 | asm volatile ( |
Michael Sevakis | a8b388f | 2007-11-27 01:20:26 +0000 | [diff] [blame] | 468 | "mov r0, #1 \n" /* Signal intent to sleep */ |
| 469 | "strb r0, [%[sem], #2] \n" |
Michael Sevakis | 32a531b | 2008-01-19 13:27:47 +0000 | [diff] [blame] | 470 | "ldrb r0, [%[sem], #1] \n" /* && stay_awake == 0? */ |
Michael Sevakis | a8b388f | 2007-11-27 01:20:26 +0000 | [diff] [blame] | 471 | "cmp r0, #0 \n" |
Jens Arnold | cb57bf8 | 2008-03-07 23:44:46 +0000 | [diff] [blame] | 472 | "bne 2f \n" |
| 473 | /* Sleep: PP5002 crashes if the instruction that puts it to sleep is |
| 474 | * located at 0xNNNNNNN0. 4/8/C works. This sequence makes sure |
| 475 | * that the correct alternative is executed. Don't change the order |
| 476 | * of the next 4 instructions! */ |
| 477 | "tst pc, #0x0c \n" |
| 478 | "mov r0, #0xca \n" |
| 479 | "strne r0, [%[ctl], %[c], lsl #2] \n" |
| 480 | "streq r0, [%[ctl], %[c], lsl #2] \n" |
Michael Sevakis | a8b388f | 2007-11-27 01:20:26 +0000 | [diff] [blame] | 481 | "nop \n" /* nop's needed because of pipeline */ |
| 482 | "nop \n" |
| 483 | "nop \n" |
Jens Arnold | cb57bf8 | 2008-03-07 23:44:46 +0000 | [diff] [blame] | 484 | "2: \n" |
Michael Sevakis | a8b388f | 2007-11-27 01:20:26 +0000 | [diff] [blame] | 485 | "mov r0, #0 \n" /* Clear stay_awake and sleep intent */ |
| 486 | "strb r0, [%[sem], #1] \n" |
| 487 | "strb r0, [%[sem], #2] \n" |
| 488 | "1: \n" /* Wait for wake procedure to finish */ |
| 489 | "ldrb r0, [%[sem], #0] \n" |
| 490 | "cmp r0, #0 \n" |
| 491 | "bne 1b \n" |
Michael Sevakis | a8b388f | 2007-11-27 01:20:26 +0000 | [diff] [blame] | 492 | : |
| 493 | : [sem]"r"(&core_semaphores[core]), [c]"r"(core), |
Jens Arnold | cea07eb | 2008-04-20 17:53:05 +0000 | [diff] [blame] | 494 | [ctl]"r"(&CPU_CTL) |
Michael Sevakis | 32a531b | 2008-01-19 13:27:47 +0000 | [diff] [blame] | 495 | : "r0" |
Michael Sevakis | a8b388f | 2007-11-27 01:20:26 +0000 | [diff] [blame] | 496 | ); |
| 497 | #else /* C version for reference */ |
Michael Sevakis | a8b388f | 2007-11-27 01:20:26 +0000 | [diff] [blame] | 498 | /* Signal intent to sleep */ |
| 499 | core_semaphores[core].intend_sleep = 1; |
| 500 | |
| 501 | /* Something waking or other processor intends to wake us? */ |
Michael Sevakis | 32a531b | 2008-01-19 13:27:47 +0000 | [diff] [blame] | 502 | if (core_semaphores[core].stay_awake == 0) |
Michael Sevakis | a8b388f | 2007-11-27 01:20:26 +0000 | [diff] [blame] | 503 | { |
Jens Arnold | cea07eb | 2008-04-20 17:53:05 +0000 | [diff] [blame] | 504 | sleep_core(core); |
Michael Sevakis | a8b388f | 2007-11-27 01:20:26 +0000 | [diff] [blame] | 505 | } |
| 506 | |
| 507 | /* Signal wake - clear wake flag */ |
| 508 | core_semaphores[core].stay_awake = 0; |
| 509 | core_semaphores[core].intend_sleep = 0; |
| 510 | |
| 511 | /* Wait for other processor to finish wake procedure */ |
| 512 | while (core_semaphores[core].intend_wake != 0); |
| 513 | |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 514 | /* Enable IRQ */ |
Michael Sevakis | a8b388f | 2007-11-27 01:20:26 +0000 | [diff] [blame] | 515 | #endif /* ASM/C selection */ |
Michael Sevakis | af395f4 | 2008-03-26 01:50:41 +0000 | [diff] [blame] | 516 | enable_irq(); |
Michael Sevakis | a8b388f | 2007-11-27 01:20:26 +0000 | [diff] [blame] | 517 | } |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 518 | #endif /* NUM_CORES */ |
| 519 | #endif /* PP CPU type */ |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 520 | |
| 521 | /*--------------------------------------------------------------------------- |
| 522 | * Wake another processor core that is sleeping or prevent it from doing so |
| 523 | * if it was already destined. FIQ, IRQ should be disabled before calling. |
| 524 | *--------------------------------------------------------------------------- |
| 525 | */ |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 526 | #if NUM_CORES == 1 |
Michael Sevakis | a8b388f | 2007-11-27 01:20:26 +0000 | [diff] [blame] | 527 | /* Shared single-core build debugging version */ |
| 528 | void core_wake(void) |
| 529 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 530 | /* No wakey - core already wakey */ |
Michael Sevakis | a8b388f | 2007-11-27 01:20:26 +0000 | [diff] [blame] | 531 | } |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 532 | #elif defined (CPU_PP502x) |
Michael Sevakis | a8b388f | 2007-11-27 01:20:26 +0000 | [diff] [blame] | 533 | void core_wake(unsigned int othercore) |
| 534 | { |
Michael Sevakis | a443614 | 2007-10-18 01:26:50 +0000 | [diff] [blame] | 535 | #if 1 |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 536 | /* avoid r0 since that contains othercore */ |
| 537 | asm volatile ( |
Michael Sevakis | a443614 | 2007-10-18 01:26:50 +0000 | [diff] [blame] | 538 | "mrs r3, cpsr \n" /* Disable IRQ */ |
| 539 | "orr r1, r3, #0x80 \n" |
| 540 | "msr cpsr_c, r1 \n" |
| 541 | "mov r2, #0x11 \n" /* r2 = (0x11 << othercore) */ |
| 542 | "mov r2, r2, lsl %[oc] \n" /* Signal intent to wake othercore */ |
| 543 | "str r2, [%[mbx], #4] \n" |
| 544 | "1: \n" /* If it intends to sleep, let it first */ |
| 545 | "ldr r1, [%[mbx], #0] \n" /* (MSG_MSG_STAT & (0x4 << othercore)) != 0 ? */ |
| 546 | "eor r1, r1, #0xc \n" |
| 547 | "tst r1, r2, lsr #2 \n" |
| 548 | "ldr r1, [%[ctl], %[oc], lsl #2] \n" /* && (PROC_CTL(othercore) & PROC_SLEEP) == 0 ? */ |
| 549 | "tsteq r1, #0x80000000 \n" |
| 550 | "beq 1b \n" /* Wait for sleep or wake */ |
| 551 | "tst r1, #0x80000000 \n" /* If sleeping, wake it */ |
| 552 | "movne r1, #0x0 \n" |
| 553 | "strne r1, [%[ctl], %[oc], lsl #2] \n" |
| 554 | "mov r1, r2, lsr #4 \n" |
| 555 | "str r1, [%[mbx], #8] \n" /* Done with wake procedure */ |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 556 | "msr cpsr_c, r3 \n" /* Restore IRQ */ |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 557 | : |
Michael Sevakis | a8b388f | 2007-11-27 01:20:26 +0000 | [diff] [blame] | 558 | : [ctl]"r"(&PROC_CTL(CPU)), [mbx]"r"(MBX_BASE), |
| 559 | [oc]"r"(othercore) |
Michael Sevakis | a443614 | 2007-10-18 01:26:50 +0000 | [diff] [blame] | 560 | : "r1", "r2", "r3"); |
| 561 | #else /* C version for reference */ |
| 562 | /* Disable interrupts - avoid reentrancy from the tick */ |
Michael Sevakis | af395f4 | 2008-03-26 01:50:41 +0000 | [diff] [blame] | 563 | int oldlevel = disable_irq_save(); |
Michael Sevakis | a443614 | 2007-10-18 01:26:50 +0000 | [diff] [blame] | 564 | |
| 565 | /* Signal intent to wake other processor - set stay awake */ |
| 566 | MBX_MSG_SET = 0x11 << othercore; |
| 567 | |
| 568 | /* If it intends to sleep, wait until it does or aborts */ |
| 569 | while ((MBX_MSG_STAT & (0x4 << othercore)) != 0 && |
| 570 | (PROC_CTL(othercore) & PROC_SLEEP) == 0); |
| 571 | |
| 572 | /* If sleeping, wake it up */ |
| 573 | if (PROC_CTL(othercore) & PROC_SLEEP) |
Michael Sevakis | a443614 | 2007-10-18 01:26:50 +0000 | [diff] [blame] | 574 | PROC_CTL(othercore) = 0; |
Michael Sevakis | a443614 | 2007-10-18 01:26:50 +0000 | [diff] [blame] | 575 | |
| 576 | /* Done with wake procedure */ |
| 577 | MBX_MSG_CLR = 0x1 << othercore; |
Michael Sevakis | af395f4 | 2008-03-26 01:50:41 +0000 | [diff] [blame] | 578 | restore_irq(oldlevel); |
Michael Sevakis | a443614 | 2007-10-18 01:26:50 +0000 | [diff] [blame] | 579 | #endif /* ASM/C selection */ |
Michael Sevakis | bfb281f | 2007-05-12 05:20:04 +0000 | [diff] [blame] | 580 | } |
Michael Sevakis | a8b388f | 2007-11-27 01:20:26 +0000 | [diff] [blame] | 581 | #elif CONFIG_CPU == PP5002 |
| 582 | /* PP5002 has no mailboxes - emulate using bytes */ |
| 583 | void core_wake(unsigned int othercore) |
| 584 | { |
| 585 | #if 1 |
| 586 | /* avoid r0 since that contains othercore */ |
| 587 | asm volatile ( |
| 588 | "mrs r3, cpsr \n" /* Disable IRQ */ |
| 589 | "orr r1, r3, #0x80 \n" |
| 590 | "msr cpsr_c, r1 \n" |
| 591 | "mov r1, #1 \n" /* Signal intent to wake other core */ |
| 592 | "orr r1, r1, r1, lsl #8 \n" /* and set stay_awake */ |
| 593 | "strh r1, [%[sem], #0] \n" |
| 594 | "mov r2, #0x8000 \n" |
| 595 | "1: \n" /* If it intends to sleep, let it first */ |
| 596 | "ldrb r1, [%[sem], #2] \n" /* intend_sleep != 0 ? */ |
| 597 | "cmp r1, #1 \n" |
| 598 | "ldr r1, [%[st]] \n" /* && not sleeping ? */ |
| 599 | "tsteq r1, r2, lsr %[oc] \n" |
| 600 | "beq 1b \n" /* Wait for sleep or wake */ |
| 601 | "tst r1, r2, lsr %[oc] \n" |
| 602 | "ldrne r2, =0xcf004054 \n" /* If sleeping, wake it */ |
| 603 | "movne r1, #0xce \n" |
Jens Arnold | cb57bf8 | 2008-03-07 23:44:46 +0000 | [diff] [blame] | 604 | "strne r1, [r2, %[oc], lsl #2] \n" |
Michael Sevakis | a8b388f | 2007-11-27 01:20:26 +0000 | [diff] [blame] | 605 | "mov r1, #0 \n" /* Done with wake procedure */ |
| 606 | "strb r1, [%[sem], #0] \n" |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 607 | "msr cpsr_c, r3 \n" /* Restore IRQ */ |
Michael Sevakis | a8b388f | 2007-11-27 01:20:26 +0000 | [diff] [blame] | 608 | : |
| 609 | : [sem]"r"(&core_semaphores[othercore]), |
| 610 | [st]"r"(&PROC_STAT), |
| 611 | [oc]"r"(othercore) |
| 612 | : "r1", "r2", "r3" |
| 613 | ); |
| 614 | #else /* C version for reference */ |
| 615 | /* Disable interrupts - avoid reentrancy from the tick */ |
Michael Sevakis | af395f4 | 2008-03-26 01:50:41 +0000 | [diff] [blame] | 616 | int oldlevel = disable_irq_save(); |
Michael Sevakis | a8b388f | 2007-11-27 01:20:26 +0000 | [diff] [blame] | 617 | |
| 618 | /* Signal intent to wake other processor - set stay awake */ |
| 619 | core_semaphores[othercore].intend_wake = 1; |
| 620 | core_semaphores[othercore].stay_awake = 1; |
| 621 | |
| 622 | /* If it intends to sleep, wait until it does or aborts */ |
| 623 | while (core_semaphores[othercore].intend_sleep != 0 && |
| 624 | (PROC_STAT & PROC_SLEEPING(othercore)) == 0); |
| 625 | |
| 626 | /* If sleeping, wake it up */ |
| 627 | if (PROC_STAT & PROC_SLEEPING(othercore)) |
Jens Arnold | cea07eb | 2008-04-20 17:53:05 +0000 | [diff] [blame] | 628 | wake_core(othercore); |
Michael Sevakis | a8b388f | 2007-11-27 01:20:26 +0000 | [diff] [blame] | 629 | |
| 630 | /* Done with wake procedure */ |
| 631 | core_semaphores[othercore].intend_wake = 0; |
Michael Sevakis | af395f4 | 2008-03-26 01:50:41 +0000 | [diff] [blame] | 632 | restore_irq(oldlevel); |
Michael Sevakis | a8b388f | 2007-11-27 01:20:26 +0000 | [diff] [blame] | 633 | #endif /* ASM/C selection */ |
| 634 | } |
| 635 | #endif /* CPU type */ |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 636 | |
| 637 | #if NUM_CORES > 1 |
| 638 | /*--------------------------------------------------------------------------- |
| 639 | * Switches to a stack that always resides in the Rockbox core. |
| 640 | * |
| 641 | * Needed when a thread suicides on a core other than the main CPU since the |
| 642 | * stack used when idling is the stack of the last thread to run. This stack |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 643 | * may not reside in the core firmware in which case the core will continue |
| 644 | * to use a stack from an unloaded module until another thread runs on it. |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 645 | *--------------------------------------------------------------------------- |
| 646 | */ |
| 647 | static inline void switch_to_idle_stack(const unsigned int core) |
| 648 | { |
| 649 | asm volatile ( |
| 650 | "str sp, [%0] \n" /* save original stack pointer on idle stack */ |
| 651 | "mov sp, %0 \n" /* switch stacks */ |
| 652 | : : "r"(&idle_stacks[core][IDLE_STACK_WORDS-1])); |
Michael Sevakis | a13a1d5 | 2007-09-28 10:54:27 +0000 | [diff] [blame] | 653 | (void)core; |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 654 | } |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 655 | |
| 656 | /*--------------------------------------------------------------------------- |
| 657 | * Perform core switch steps that need to take place inside switch_thread. |
| 658 | * |
| 659 | * These steps must take place while before changing the processor and after |
| 660 | * having entered switch_thread since switch_thread may not do a normal return |
| 661 | * because the stack being used for anything the compiler saved will not belong |
| 662 | * to the thread's destination core and it may have been recycled for other |
| 663 | * purposes by the time a normal context load has taken place. switch_thread |
| 664 | * will also clobber anything stashed in the thread's context or stored in the |
| 665 | * nonvolatile registers if it is saved there before the call since the |
| 666 | * compiler's order of operations cannot be known for certain. |
| 667 | */ |
| 668 | static void core_switch_blk_op(unsigned int core, struct thread_entry *thread) |
| 669 | { |
| 670 | /* Flush our data to ram */ |
| 671 | flush_icache(); |
| 672 | /* Stash thread in r4 slot */ |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 673 | thread->context.r[0] = (uint32_t)thread; |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 674 | /* Stash restart address in r5 slot */ |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 675 | thread->context.r[1] = thread->context.start; |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 676 | /* Save sp in context.sp while still running on old core */ |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 677 | thread->context.sp = idle_stacks[core][IDLE_STACK_WORDS-1]; |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 678 | } |
| 679 | |
| 680 | /*--------------------------------------------------------------------------- |
| 681 | * Machine-specific helper function for switching the processor a thread is |
| 682 | * running on. Basically, the thread suicides on the departing core and is |
| 683 | * reborn on the destination. Were it not for gcc's ill-behavior regarding |
| 684 | * naked functions written in C where it actually clobbers non-volatile |
| 685 | * registers before the intended prologue code, this would all be much |
| 686 | * simpler. Generic setup is done in switch_core itself. |
| 687 | */ |
| 688 | |
| 689 | /*--------------------------------------------------------------------------- |
| 690 | * This actually performs the core switch. |
| 691 | */ |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 692 | static void __attribute__((naked)) |
| 693 | switch_thread_core(unsigned int core, struct thread_entry *thread) |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 694 | { |
| 695 | /* Pure asm for this because compiler behavior isn't sufficiently predictable. |
| 696 | * Stack access also isn't permitted until restoring the original stack and |
| 697 | * context. */ |
| 698 | asm volatile ( |
| 699 | "stmfd sp!, { r4-r12, lr } \n" /* Stack all non-volatile context on current core */ |
| 700 | "ldr r2, =idle_stacks \n" /* r2 = &idle_stacks[core][IDLE_STACK_WORDS] */ |
| 701 | "ldr r2, [r2, r0, lsl #2] \n" |
| 702 | "add r2, r2, %0*4 \n" |
| 703 | "stmfd r2!, { sp } \n" /* save original stack pointer on idle stack */ |
| 704 | "mov sp, r2 \n" /* switch stacks */ |
| 705 | "adr r2, 1f \n" /* r2 = new core restart address */ |
| 706 | "str r2, [r1, #40] \n" /* thread->context.start = r2 */ |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 707 | "ldr pc, =switch_thread \n" /* r0 = thread after call - see load_context */ |
| 708 | "1: \n" |
| 709 | "ldr sp, [r0, #32] \n" /* Reload original sp from context structure */ |
| 710 | "mov r1, #0 \n" /* Clear start address */ |
| 711 | "str r1, [r0, #40] \n" |
| 712 | "ldr r0, =invalidate_icache \n" /* Invalidate new core's cache */ |
| 713 | "mov lr, pc \n" |
| 714 | "bx r0 \n" |
| 715 | "ldmfd sp!, { r4-r12, pc } \n" /* Restore non-volatile context to new core and return */ |
| 716 | ".ltorg \n" /* Dump constant pool */ |
| 717 | : : "i"(IDLE_STACK_WORDS) |
| 718 | ); |
| 719 | (void)core; (void)thread; |
| 720 | } |
Michael Sevakis | 9ba80c9 | 2008-04-06 22:08:36 +0000 | [diff] [blame] | 721 | |
| 722 | /*--------------------------------------------------------------------------- |
| 723 | * Do any device-specific inits for the threads and synchronize the kernel |
| 724 | * initializations. |
| 725 | *--------------------------------------------------------------------------- |
| 726 | */ |
| 727 | static void core_thread_init(unsigned int core) |
| 728 | { |
| 729 | if (core == CPU) |
| 730 | { |
| 731 | /* Wake up coprocessor and let it initialize kernel and threads */ |
| 732 | #ifdef CPU_PP502x |
| 733 | MBX_MSG_CLR = 0x3f; |
| 734 | #endif |
Jens Arnold | cea07eb | 2008-04-20 17:53:05 +0000 | [diff] [blame] | 735 | wake_core(COP); |
Michael Sevakis | 9ba80c9 | 2008-04-06 22:08:36 +0000 | [diff] [blame] | 736 | /* Sleep until COP has finished */ |
Jens Arnold | cea07eb | 2008-04-20 17:53:05 +0000 | [diff] [blame] | 737 | sleep_core(CPU); |
Michael Sevakis | 9ba80c9 | 2008-04-06 22:08:36 +0000 | [diff] [blame] | 738 | } |
| 739 | else |
| 740 | { |
| 741 | /* Wake the CPU and return */ |
Jens Arnold | cea07eb | 2008-04-20 17:53:05 +0000 | [diff] [blame] | 742 | wake_core(CPU); |
Michael Sevakis | 9ba80c9 | 2008-04-06 22:08:36 +0000 | [diff] [blame] | 743 | } |
| 744 | } |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 745 | #endif /* NUM_CORES */ |
| 746 | |
Michael Sevakis | bfb281f | 2007-05-12 05:20:04 +0000 | [diff] [blame] | 747 | #elif CONFIG_CPU == S3C2440 |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 748 | |
| 749 | /*--------------------------------------------------------------------------- |
| 750 | * Put core in a power-saving state if waking list wasn't repopulated. |
| 751 | *--------------------------------------------------------------------------- |
| 752 | */ |
Michael Sevakis | 32a531b | 2008-01-19 13:27:47 +0000 | [diff] [blame] | 753 | static inline void core_sleep(void) |
Michael Sevakis | bfb281f | 2007-05-12 05:20:04 +0000 | [diff] [blame] | 754 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 755 | /* FIQ also changes the CLKCON register so FIQ must be disabled |
| 756 | when changing it here */ |
| 757 | asm volatile ( |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 758 | "mrs r0, cpsr \n" |
| 759 | "orr r2, r0, #0x40 \n" /* Disable FIQ */ |
| 760 | "bic r0, r0, #0x80 \n" /* Prepare IRQ enable */ |
| 761 | "msr cpsr_c, r2 \n" |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 762 | "mov r1, #0x4c000000 \n" /* CLKCON = 0x4c00000c */ |
| 763 | "ldr r2, [r1, #0xc] \n" /* Set IDLE bit */ |
| 764 | "orr r2, r2, #4 \n" |
| 765 | "str r2, [r1, #0xc] \n" |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 766 | "msr cpsr_c, r0 \n" /* Enable IRQ, restore FIQ */ |
Michael Sevakis | 32a531b | 2008-01-19 13:27:47 +0000 | [diff] [blame] | 767 | "mov r2, #0 \n" /* wait for IDLE */ |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 768 | "1: \n" |
Michael Sevakis | 32a531b | 2008-01-19 13:27:47 +0000 | [diff] [blame] | 769 | "add r2, r2, #1 \n" |
| 770 | "cmp r2, #10 \n" |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 771 | "bne 1b \n" |
Michael Sevakis | 32a531b | 2008-01-19 13:27:47 +0000 | [diff] [blame] | 772 | "orr r2, r0, #0xc0 \n" /* Disable IRQ, FIQ */ |
| 773 | "msr cpsr_c, r2 \n" |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 774 | "ldr r2, [r1, #0xc] \n" /* Reset IDLE bit */ |
| 775 | "bic r2, r2, #4 \n" |
| 776 | "str r2, [r1, #0xc] \n" |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 777 | "msr cpsr_c, r0 \n" /* Enable IRQ, restore FIQ */ |
Michael Sevakis | 32a531b | 2008-01-19 13:27:47 +0000 | [diff] [blame] | 778 | : : : "r0", "r1", "r2"); |
Michael Sevakis | bfb281f | 2007-05-12 05:20:04 +0000 | [diff] [blame] | 779 | } |
Dave Chapman | 28f6ae4 | 2007-10-28 11:08:10 +0000 | [diff] [blame] | 780 | #elif defined(CPU_TCC77X) |
Michael Sevakis | 32a531b | 2008-01-19 13:27:47 +0000 | [diff] [blame] | 781 | static inline void core_sleep(void) |
Dave Chapman | 28f6ae4 | 2007-10-28 11:08:10 +0000 | [diff] [blame] | 782 | { |
| 783 | #warning TODO: Implement core_sleep |
Michael Sevakis | af395f4 | 2008-03-26 01:50:41 +0000 | [diff] [blame] | 784 | enable_irq(); |
Dave Chapman | 28f6ae4 | 2007-10-28 11:08:10 +0000 | [diff] [blame] | 785 | } |
Rob Purchase | 1836971 | 2008-03-22 13:18:47 +0000 | [diff] [blame] | 786 | #elif defined(CPU_TCC780X) |
| 787 | static inline void core_sleep(void) |
| 788 | { |
| 789 | /* Single core only for now. Use the generic ARMv5 wait for IRQ */ |
| 790 | asm volatile ( |
| 791 | "mov r0, #0 \n" |
| 792 | "mcr p15, 0, r0, c7, c0, 4 \n" /* Wait for interrupt */ |
Rob Purchase | 1836971 | 2008-03-22 13:18:47 +0000 | [diff] [blame] | 793 | : : : "r0" |
| 794 | ); |
Michael Sevakis | af395f4 | 2008-03-26 01:50:41 +0000 | [diff] [blame] | 795 | enable_irq(); |
Rob Purchase | 1836971 | 2008-03-22 13:18:47 +0000 | [diff] [blame] | 796 | } |
Michael Sevakis | 1f021af | 2008-02-05 04:43:19 +0000 | [diff] [blame] | 797 | #elif CONFIG_CPU == IMX31L |
| 798 | static inline void core_sleep(void) |
| 799 | { |
| 800 | asm volatile ( |
| 801 | "mov r0, #0 \n" |
| 802 | "mcr p15, 0, r0, c7, c0, 4 \n" /* Wait for interrupt */ |
Michael Sevakis | 1f021af | 2008-02-05 04:43:19 +0000 | [diff] [blame] | 803 | : : : "r0" |
| 804 | ); |
Michael Sevakis | af395f4 | 2008-03-26 01:50:41 +0000 | [diff] [blame] | 805 | enable_irq(); |
Michael Sevakis | 1f021af | 2008-02-05 04:43:19 +0000 | [diff] [blame] | 806 | } |
Maurus Cuelenaere | 95167e0 | 2008-04-24 20:08:28 +0000 | [diff] [blame] | 807 | #elif CONFIG_CPU == DM320 |
| 808 | static inline void core_sleep(void) |
| 809 | { |
| 810 | asm volatile ( |
| 811 | "mov r0, #0 \n" |
| 812 | "mcr p15, 0, r0, c7, c0, 4 \n" /* Wait for interrupt */ |
| 813 | : : : "r0" |
| 814 | ); |
| 815 | enable_irq(); |
| 816 | } |
Karl Kurbjun | 7b97fe2 | 2007-09-20 04:46:41 +0000 | [diff] [blame] | 817 | #else |
Michael Sevakis | 32a531b | 2008-01-19 13:27:47 +0000 | [diff] [blame] | 818 | static inline void core_sleep(void) |
Karl Kurbjun | 7b97fe2 | 2007-09-20 04:46:41 +0000 | [diff] [blame] | 819 | { |
Michael Sevakis | 32a531b | 2008-01-19 13:27:47 +0000 | [diff] [blame] | 820 | #warning core_sleep not implemented, battery life will be decreased |
Michael Sevakis | af395f4 | 2008-03-26 01:50:41 +0000 | [diff] [blame] | 821 | enable_irq(); |
Karl Kurbjun | 7b97fe2 | 2007-09-20 04:46:41 +0000 | [diff] [blame] | 822 | } |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 823 | #endif /* CONFIG_CPU == */ |
Michael Sevakis | bfb281f | 2007-05-12 05:20:04 +0000 | [diff] [blame] | 824 | |
Dave Chapman | 77372d1 | 2005-11-07 23:07:19 +0000 | [diff] [blame] | 825 | #elif defined(CPU_COLDFIRE) |
Daniel Ankers | 0aec12f | 2006-08-21 17:35:35 +0000 | [diff] [blame] | 826 | /*--------------------------------------------------------------------------- |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 827 | * Start the thread running and terminate it if it returns |
| 828 | *--------------------------------------------------------------------------- |
| 829 | */ |
| 830 | void start_thread(void); /* Provide C access to ASM label */ |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 831 | static void __attribute__((used)) __start_thread(void) |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 832 | { |
| 833 | /* a0=macsr, a1=context */ |
| 834 | asm volatile ( |
| 835 | "start_thread: \n" /* Start here - no naked attribute */ |
| 836 | "move.l %a0, %macsr \n" /* Set initial mac status reg */ |
| 837 | "lea.l 48(%a1), %a1 \n" |
| 838 | "move.l (%a1)+, %sp \n" /* Set initial stack */ |
| 839 | "move.l (%a1), %a2 \n" /* Fetch thread function pointer */ |
| 840 | "clr.l (%a1) \n" /* Mark thread running */ |
| 841 | "jsr (%a2) \n" /* Call thread function */ |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 842 | ); |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 843 | thread_exit(); |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 844 | } |
| 845 | |
| 846 | /* Set EMAC unit to fractional mode with saturation for each new thread, |
| 847 | * since that's what'll be the most useful for most things which the dsp |
| 848 | * will do. Codecs should still initialize their preferred modes |
| 849 | * explicitly. Context pointer is placed in d2 slot and start_thread |
| 850 | * pointer in d3 slot. thread function pointer is placed in context.start. |
| 851 | * See load_context for what happens when thread is initially going to |
| 852 | * run. |
| 853 | */ |
| 854 | #define THREAD_STARTUP_INIT(core, thread, function) \ |
| 855 | ({ (thread)->context.macsr = EMAC_FRACTIONAL | EMAC_SATURATE, \ |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 856 | (thread)->context.d[0] = (uint32_t)&(thread)->context, \ |
| 857 | (thread)->context.d[1] = (uint32_t)start_thread, \ |
| 858 | (thread)->context.start = (uint32_t)(function); }) |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 859 | |
| 860 | /*--------------------------------------------------------------------------- |
Linus Nielsen Feltzing | 7b91ec6 | 2004-10-15 02:13:43 +0000 | [diff] [blame] | 861 | * Store non-volatile context. |
| 862 | *--------------------------------------------------------------------------- |
| 863 | */ |
| 864 | static inline void store_context(void* addr) |
| 865 | { |
Jens Arnold | a4aa508 | 2005-06-10 23:05:15 +0000 | [diff] [blame] | 866 | asm volatile ( |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 867 | "move.l %%macsr,%%d0 \n" |
| 868 | "movem.l %%d0/%%d2-%%d7/%%a2-%%a7,(%0) \n" |
Jens Arnold | 904f7fd | 2005-09-01 20:06:38 +0000 | [diff] [blame] | 869 | : : "a" (addr) : "d0" /* only! */ |
Jens Arnold | a4aa508 | 2005-06-10 23:05:15 +0000 | [diff] [blame] | 870 | ); |
Linus Nielsen Feltzing | 7b91ec6 | 2004-10-15 02:13:43 +0000 | [diff] [blame] | 871 | } |
| 872 | |
Jens Arnold | 904f7fd | 2005-09-01 20:06:38 +0000 | [diff] [blame] | 873 | /*--------------------------------------------------------------------------- |
Linus Nielsen Feltzing | 7b91ec6 | 2004-10-15 02:13:43 +0000 | [diff] [blame] | 874 | * Load non-volatile context. |
| 875 | *--------------------------------------------------------------------------- |
| 876 | */ |
| 877 | static inline void load_context(const void* addr) |
| 878 | { |
Jens Arnold | a4aa508 | 2005-06-10 23:05:15 +0000 | [diff] [blame] | 879 | asm volatile ( |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 880 | "move.l 52(%0), %%d0 \n" /* Get start address */ |
| 881 | "beq.b 1f \n" /* NULL -> already running */ |
| 882 | "movem.l (%0), %%a0-%%a2 \n" /* a0=macsr, a1=context, a2=start_thread */ |
| 883 | "jmp (%%a2) \n" /* Start the thread */ |
| 884 | "1: \n" |
| 885 | "movem.l (%0), %%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */ |
| 886 | "move.l %%d0, %%macsr \n" |
Jens Arnold | a4aa508 | 2005-06-10 23:05:15 +0000 | [diff] [blame] | 887 | : : "a" (addr) : "d0" /* only! */ |
| 888 | ); |
Linus Nielsen Feltzing | 7b91ec6 | 2004-10-15 02:13:43 +0000 | [diff] [blame] | 889 | } |
| 890 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 891 | /*--------------------------------------------------------------------------- |
| 892 | * Put core in a power-saving state if waking list wasn't repopulated. |
| 893 | *--------------------------------------------------------------------------- |
| 894 | */ |
Michael Sevakis | 32a531b | 2008-01-19 13:27:47 +0000 | [diff] [blame] | 895 | static inline void core_sleep(void) |
Michael Sevakis | bfb281f | 2007-05-12 05:20:04 +0000 | [diff] [blame] | 896 | { |
Michael Sevakis | 32a531b | 2008-01-19 13:27:47 +0000 | [diff] [blame] | 897 | /* Supervisor mode, interrupts enabled upon wakeup */ |
| 898 | asm volatile ("stop #0x2000"); |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 899 | }; |
Michael Sevakis | 2c9cbc1 | 2007-02-25 21:43:10 +0000 | [diff] [blame] | 900 | |
Linus Nielsen Feltzing | 7b91ec6 | 2004-10-15 02:13:43 +0000 | [diff] [blame] | 901 | #elif CONFIG_CPU == SH7034 |
Jens Arnold | 904f7fd | 2005-09-01 20:06:38 +0000 | [diff] [blame] | 902 | /*--------------------------------------------------------------------------- |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 903 | * Start the thread running and terminate it if it returns |
| 904 | *--------------------------------------------------------------------------- |
| 905 | */ |
| 906 | void start_thread(void); /* Provide C access to ASM label */ |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 907 | static void __attribute__((used)) __start_thread(void) |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 908 | { |
| 909 | /* r8 = context */ |
| 910 | asm volatile ( |
| 911 | "_start_thread: \n" /* Start here - no naked attribute */ |
| 912 | "mov.l @(4, r8), r0 \n" /* Fetch thread function pointer */ |
| 913 | "mov.l @(28, r8), r15 \n" /* Set initial sp */ |
| 914 | "mov #0, r1 \n" /* Start the thread */ |
| 915 | "jsr @r0 \n" |
| 916 | "mov.l r1, @(36, r8) \n" /* Clear start address */ |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 917 | ); |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 918 | thread_exit(); |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 919 | } |
| 920 | |
| 921 | /* Place context pointer in r8 slot, function pointer in r9 slot, and |
| 922 | * start_thread pointer in context_start */ |
| 923 | #define THREAD_STARTUP_INIT(core, thread, function) \ |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 924 | ({ (thread)->context.r[0] = (uint32_t)&(thread)->context, \ |
| 925 | (thread)->context.r[1] = (uint32_t)(function), \ |
| 926 | (thread)->context.start = (uint32_t)start_thread; }) |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 927 | |
| 928 | /*--------------------------------------------------------------------------- |
Daniel Stenberg | 3c031c5 | 2002-04-22 12:07:34 +0000 | [diff] [blame] | 929 | * Store non-volatile context. |
| 930 | *--------------------------------------------------------------------------- |
| 931 | */ |
Björn Stenberg | 3f9c7c2 | 2002-06-25 12:04:23 +0000 | [diff] [blame] | 932 | static inline void store_context(void* addr) |
Daniel Stenberg | 3c031c5 | 2002-04-22 12:07:34 +0000 | [diff] [blame] | 933 | { |
Jens Arnold | a4aa508 | 2005-06-10 23:05:15 +0000 | [diff] [blame] | 934 | asm volatile ( |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 935 | "add #36, %0 \n" /* Start at last reg. By the time routine */ |
| 936 | "sts.l pr, @-%0 \n" /* is done, %0 will have the original value */ |
Jens Arnold | a4aa508 | 2005-06-10 23:05:15 +0000 | [diff] [blame] | 937 | "mov.l r15,@-%0 \n" |
| 938 | "mov.l r14,@-%0 \n" |
| 939 | "mov.l r13,@-%0 \n" |
| 940 | "mov.l r12,@-%0 \n" |
| 941 | "mov.l r11,@-%0 \n" |
| 942 | "mov.l r10,@-%0 \n" |
| 943 | "mov.l r9, @-%0 \n" |
| 944 | "mov.l r8, @-%0 \n" |
| 945 | : : "r" (addr) |
| 946 | ); |
Daniel Stenberg | 3c031c5 | 2002-04-22 12:07:34 +0000 | [diff] [blame] | 947 | } |
| 948 | |
Jens Arnold | a4aa508 | 2005-06-10 23:05:15 +0000 | [diff] [blame] | 949 | /*--------------------------------------------------------------------------- |
Daniel Stenberg | 3c031c5 | 2002-04-22 12:07:34 +0000 | [diff] [blame] | 950 | * Load non-volatile context. |
| 951 | *--------------------------------------------------------------------------- |
| 952 | */ |
Jens Arnold | c76c568 | 2004-08-16 23:37:23 +0000 | [diff] [blame] | 953 | static inline void load_context(const void* addr) |
Daniel Stenberg | 3c031c5 | 2002-04-22 12:07:34 +0000 | [diff] [blame] | 954 | { |
Jens Arnold | a4aa508 | 2005-06-10 23:05:15 +0000 | [diff] [blame] | 955 | asm volatile ( |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 956 | "mov.l @(36, %0), r0 \n" /* Get start address */ |
| 957 | "tst r0, r0 \n" |
| 958 | "bt .running \n" /* NULL -> already running */ |
| 959 | "jmp @r0 \n" /* r8 = context */ |
| 960 | ".running: \n" |
| 961 | "mov.l @%0+, r8 \n" /* Executes in delay slot and outside it */ |
| 962 | "mov.l @%0+, r9 \n" |
| 963 | "mov.l @%0+, r10 \n" |
| 964 | "mov.l @%0+, r11 \n" |
| 965 | "mov.l @%0+, r12 \n" |
| 966 | "mov.l @%0+, r13 \n" |
| 967 | "mov.l @%0+, r14 \n" |
| 968 | "mov.l @%0+, r15 \n" |
| 969 | "lds.l @%0+, pr \n" |
Jens Arnold | a4aa508 | 2005-06-10 23:05:15 +0000 | [diff] [blame] | 970 | : : "r" (addr) : "r0" /* only! */ |
| 971 | ); |
Daniel Stenberg | 3c031c5 | 2002-04-22 12:07:34 +0000 | [diff] [blame] | 972 | } |
Jens Arnold | a4aa508 | 2005-06-10 23:05:15 +0000 | [diff] [blame] | 973 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 974 | /*--------------------------------------------------------------------------- |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 975 | * Put core in a power-saving state. |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 976 | *--------------------------------------------------------------------------- |
| 977 | */ |
Michael Sevakis | 32a531b | 2008-01-19 13:27:47 +0000 | [diff] [blame] | 978 | static inline void core_sleep(void) |
Michael Sevakis | bfb281f | 2007-05-12 05:20:04 +0000 | [diff] [blame] | 979 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 980 | asm volatile ( |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 981 | "and.b #0x7f, @(r0, gbr) \n" /* Clear SBY (bit 7) in SBYCR */ |
| 982 | "mov #0, r1 \n" /* Enable interrupts */ |
| 983 | "ldc r1, sr \n" /* Following instruction cannot be interrupted */ |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 984 | "sleep \n" /* Execute standby */ |
Michael Sevakis | 32a531b | 2008-01-19 13:27:47 +0000 | [diff] [blame] | 985 | : : "z"(&SBYCR-GBR) : "r1"); |
Michael Sevakis | bfb281f | 2007-05-12 05:20:04 +0000 | [diff] [blame] | 986 | } |
| 987 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 988 | #endif /* CONFIG_CPU == */ |
Daniel Stenberg | 3c031c5 | 2002-04-22 12:07:34 +0000 | [diff] [blame] | 989 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 990 | /* |
| 991 | * End Processor-specific section |
| 992 | ***************************************************************************/ |
Michael Sevakis | 2c9cbc1 | 2007-02-25 21:43:10 +0000 | [diff] [blame] | 993 | |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 994 | #if THREAD_EXTRA_CHECKS |
| 995 | static void thread_panicf(const char *msg, struct thread_entry *thread) |
Michael Sevakis | bfb281f | 2007-05-12 05:20:04 +0000 | [diff] [blame] | 996 | { |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 997 | IF_COP( const unsigned int core = thread->core; ) |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 998 | static char name[32]; |
| 999 | thread_get_name(name, 32, thread); |
| 1000 | panicf ("%s %s" IF_COP(" (%d)"), msg, name IF_COP(, core)); |
Michael Sevakis | bfb281f | 2007-05-12 05:20:04 +0000 | [diff] [blame] | 1001 | } |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 1002 | static void thread_stkov(struct thread_entry *thread) |
Michael Sevakis | bfb281f | 2007-05-12 05:20:04 +0000 | [diff] [blame] | 1003 | { |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 1004 | thread_panicf("Stkov", thread); |
Michael Sevakis | bfb281f | 2007-05-12 05:20:04 +0000 | [diff] [blame] | 1005 | } |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 1006 | #define THREAD_PANICF(msg, thread) \ |
| 1007 | thread_panicf(msg, thread) |
| 1008 | #define THREAD_ASSERT(exp, msg, thread) \ |
| 1009 | ({ if (!({ exp; })) thread_panicf((msg), (thread)); }) |
Michael Sevakis | bfb281f | 2007-05-12 05:20:04 +0000 | [diff] [blame] | 1010 | #else |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 1011 | static void thread_stkov(struct thread_entry *thread) |
Michael Sevakis | bfb281f | 2007-05-12 05:20:04 +0000 | [diff] [blame] | 1012 | { |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1013 | IF_COP( const unsigned int core = thread->core; ) |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 1014 | static char name[32]; |
| 1015 | thread_get_name(name, 32, thread); |
| 1016 | panicf("Stkov %s" IF_COP(" (%d)"), name IF_COP(, core)); |
Michael Sevakis | bfb281f | 2007-05-12 05:20:04 +0000 | [diff] [blame] | 1017 | } |
Michael Sevakis | 7914e90 | 2007-09-28 10:20:02 +0000 | [diff] [blame] | 1018 | #define THREAD_PANICF(msg, thread) |
| 1019 | #define THREAD_ASSERT(exp, msg, thread) |
Michael Sevakis | bfb281f | 2007-05-12 05:20:04 +0000 | [diff] [blame] | 1020 | #endif /* THREAD_EXTRA_CHECKS */ |
| 1021 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1022 | /* Thread locking */ |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1023 | #if NUM_CORES > 1 |
| 1024 | #define LOCK_THREAD(thread) \ |
| 1025 | ({ corelock_lock(&(thread)->slot_cl); }) |
| 1026 | #define TRY_LOCK_THREAD(thread) \ |
| 1027 | ({ corelock_try_lock(&thread->slot_cl); }) |
| 1028 | #define UNLOCK_THREAD(thread) \ |
| 1029 | ({ corelock_unlock(&(thread)->slot_cl); }) |
| 1030 | #define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \ |
| 1031 | ({ unsigned int _core = (thread)->core; \ |
| 1032 | cores[_core].blk_ops.flags |= TBOP_UNLOCK_CORELOCK; \ |
| 1033 | cores[_core].blk_ops.cl_p = &(thread)->slot_cl; }) |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1034 | #else |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1035 | #define LOCK_THREAD(thread) \ |
| 1036 | ({ }) |
| 1037 | #define TRY_LOCK_THREAD(thread) \ |
| 1038 | ({ }) |
| 1039 | #define UNLOCK_THREAD(thread) \ |
| 1040 | ({ }) |
| 1041 | #define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \ |
| 1042 | ({ }) |
| 1043 | #endif |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1044 | |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1045 | /* RTR list */ |
| 1046 | #define RTR_LOCK(core) \ |
| 1047 | ({ corelock_lock(&cores[core].rtr_cl); }) |
| 1048 | #define RTR_UNLOCK(core) \ |
| 1049 | ({ corelock_unlock(&cores[core].rtr_cl); }) |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1050 | |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1051 | #ifdef HAVE_PRIORITY_SCHEDULING |
| 1052 | #define rtr_add_entry(core, priority) \ |
| 1053 | prio_add_entry(&cores[core].rtr, (priority)) |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1054 | |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1055 | #define rtr_subtract_entry(core, priority) \ |
| 1056 | prio_subtract_entry(&cores[core].rtr, (priority)) |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1057 | |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1058 | #define rtr_move_entry(core, from, to) \ |
| 1059 | prio_move_entry(&cores[core].rtr, (from), (to)) |
| 1060 | #else |
| 1061 | #define rtr_add_entry(core, priority) |
| 1062 | #define rtr_add_entry_inl(core, priority) |
| 1063 | #define rtr_subtract_entry(core, priority) |
| 1064 | #define rtr_subtract_entry_inl(core, priotity) |
| 1065 | #define rtr_move_entry(core, from, to) |
| 1066 | #define rtr_move_entry_inl(core, from, to) |
| 1067 | #endif |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1068 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1069 | /*--------------------------------------------------------------------------- |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1070 | * Thread list structure - circular: |
| 1071 | * +------------------------------+ |
| 1072 | * | | |
| 1073 | * +--+---+<-+---+<-+---+<-+---+<-+ |
| 1074 | * Head->| T | | T | | T | | T | |
| 1075 | * +->+---+->+---+->+---+->+---+--+ |
| 1076 | * | | |
| 1077 | * +------------------------------+ |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1078 | *--------------------------------------------------------------------------- |
| 1079 | */ |
Michael Sevakis | 165f62d | 2007-03-26 03:24:36 +0000 | [diff] [blame] | 1080 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1081 | /*--------------------------------------------------------------------------- |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1082 | * Adds a thread to a list of threads using "insert last". Uses the "l" |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1083 | * links. |
| 1084 | *--------------------------------------------------------------------------- |
| 1085 | */ |
| 1086 | static void add_to_list_l(struct thread_entry **list, |
| 1087 | struct thread_entry *thread) |
Miika Pekkarinen | a85044b | 2006-09-16 16:18:11 +0000 | [diff] [blame] | 1088 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1089 | struct thread_entry *l = *list; |
Michael Sevakis | a690ebb | 2007-07-30 16:44:36 +0000 | [diff] [blame] | 1090 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1091 | if (l == NULL) |
Miika Pekkarinen | a85044b | 2006-09-16 16:18:11 +0000 | [diff] [blame] | 1092 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1093 | /* Insert into unoccupied list */ |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1094 | thread->l.prev = thread; |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1095 | thread->l.next = thread; |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1096 | *list = thread; |
| 1097 | return; |
Björn Stenberg | c4d8d97 | 2003-02-14 09:44:34 +0000 | [diff] [blame] | 1098 | } |
Daniel Ankers | 0aec12f | 2006-08-21 17:35:35 +0000 | [diff] [blame] | 1099 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1100 | /* Insert last */ |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1101 | thread->l.prev = l->l.prev; |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1102 | thread->l.next = l; |
| 1103 | l->l.prev->l.next = thread; |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1104 | l->l.prev = thread; |
Daniel Stenberg | 3c031c5 | 2002-04-22 12:07:34 +0000 | [diff] [blame] | 1105 | } |
| 1106 | |
Miika Pekkarinen | a85044b | 2006-09-16 16:18:11 +0000 | [diff] [blame] | 1107 | /*--------------------------------------------------------------------------- |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1108 | * Removes a thread from a list of threads. Uses the "l" links. |
| 1109 | *--------------------------------------------------------------------------- |
| 1110 | */ |
| 1111 | static void remove_from_list_l(struct thread_entry **list, |
| 1112 | struct thread_entry *thread) |
| 1113 | { |
| 1114 | struct thread_entry *prev, *next; |
| 1115 | |
| 1116 | next = thread->l.next; |
| 1117 | |
| 1118 | if (thread == next) |
| 1119 | { |
| 1120 | /* The only item */ |
| 1121 | *list = NULL; |
| 1122 | return; |
| 1123 | } |
| 1124 | |
| 1125 | if (thread == *list) |
| 1126 | { |
| 1127 | /* List becomes next item */ |
| 1128 | *list = next; |
| 1129 | } |
| 1130 | |
| 1131 | prev = thread->l.prev; |
| 1132 | |
| 1133 | /* Fix links to jump over the removed entry. */ |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1134 | next->l.prev = prev; |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1135 | prev->l.next = next; |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1136 | } |
| 1137 | |
| 1138 | /*--------------------------------------------------------------------------- |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1139 | * Timeout list structure - circular reverse (to make "remove item" O(1)), |
| 1140 | * NULL-terminated forward (to ease the far more common forward traversal): |
| 1141 | * +------------------------------+ |
| 1142 | * | | |
| 1143 | * +--+---+<-+---+<-+---+<-+---+<-+ |
| 1144 | * Head->| T | | T | | T | | T | |
| 1145 | * +---+->+---+->+---+->+---+-X |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1146 | *--------------------------------------------------------------------------- |
| 1147 | */ |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1148 | |
| 1149 | /*--------------------------------------------------------------------------- |
| 1150 | * Add a thread from the core's timout list by linking the pointers in its |
| 1151 | * tmo structure. |
| 1152 | *--------------------------------------------------------------------------- |
| 1153 | */ |
| 1154 | static void add_to_list_tmo(struct thread_entry *thread) |
| 1155 | { |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1156 | struct thread_entry *tmo = cores[IF_COP_CORE(thread->core)].timeout; |
| 1157 | THREAD_ASSERT(thread->tmo.prev == NULL, |
| 1158 | "add_to_list_tmo->already listed", thread); |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1159 | |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1160 | thread->tmo.next = NULL; |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1161 | |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1162 | if (tmo == NULL) |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1163 | { |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1164 | /* Insert into unoccupied list */ |
| 1165 | thread->tmo.prev = thread; |
| 1166 | cores[IF_COP_CORE(thread->core)].timeout = thread; |
| 1167 | return; |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1168 | } |
| 1169 | |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1170 | /* Insert Last */ |
| 1171 | thread->tmo.prev = tmo->tmo.prev; |
| 1172 | tmo->tmo.prev->tmo.next = thread; |
| 1173 | tmo->tmo.prev = thread; |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1174 | } |
| 1175 | |
| 1176 | /*--------------------------------------------------------------------------- |
| 1177 | * Remove a thread from the core's timout list by unlinking the pointers in |
| 1178 | * its tmo structure. Sets thread->tmo.prev to NULL to indicate the timeout |
| 1179 | * is cancelled. |
| 1180 | *--------------------------------------------------------------------------- |
| 1181 | */ |
| 1182 | static void remove_from_list_tmo(struct thread_entry *thread) |
| 1183 | { |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1184 | struct thread_entry **list = &cores[IF_COP_CORE(thread->core)].timeout; |
| 1185 | struct thread_entry *prev = thread->tmo.prev; |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1186 | struct thread_entry *next = thread->tmo.next; |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1187 | |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1188 | THREAD_ASSERT(prev != NULL, "remove_from_list_tmo->not listed", thread); |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1189 | |
| 1190 | if (next != NULL) |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1191 | next->tmo.prev = prev; |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1192 | |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1193 | if (thread == *list) |
| 1194 | { |
| 1195 | /* List becomes next item and empty if next == NULL */ |
| 1196 | *list = next; |
| 1197 | /* Mark as unlisted */ |
| 1198 | thread->tmo.prev = NULL; |
| 1199 | } |
| 1200 | else |
| 1201 | { |
| 1202 | if (next == NULL) |
| 1203 | (*list)->tmo.prev = prev; |
| 1204 | prev->tmo.next = next; |
| 1205 | /* Mark as unlisted */ |
| 1206 | thread->tmo.prev = NULL; |
| 1207 | } |
| 1208 | } |
| 1209 | |
| 1210 | |
| 1211 | #ifdef HAVE_PRIORITY_SCHEDULING |
| 1212 | /*--------------------------------------------------------------------------- |
| 1213 | * Priority distribution structure (one category for each possible priority): |
| 1214 | * |
| 1215 | * +----+----+----+ ... +-----+ |
| 1216 | * hist: | F0 | F1 | F2 | | F31 | |
| 1217 | * +----+----+----+ ... +-----+ |
| 1218 | * mask: | b0 | b1 | b2 | | b31 | |
| 1219 | * +----+----+----+ ... +-----+ |
| 1220 | * |
| 1221 | * F = count of threads at priority category n (frequency) |
| 1222 | * b = bitmask of non-zero priority categories (occupancy) |
| 1223 | * |
| 1224 | * / if H[n] != 0 : 1 |
| 1225 | * b[n] = | |
| 1226 | * \ else : 0 |
| 1227 | * |
| 1228 | *--------------------------------------------------------------------------- |
| 1229 | * Basic priority inheritance priotocol (PIP): |
| 1230 | * |
| 1231 | * Mn = mutex n, Tn = thread n |
| 1232 | * |
| 1233 | * A lower priority thread inherits the priority of the highest priority |
| 1234 | * thread blocked waiting for it to complete an action (such as release a |
| 1235 | * mutex or respond to a message via queue_send): |
| 1236 | * |
| 1237 | * 1) T2->M1->T1 |
| 1238 | * |
| 1239 | * T1 owns M1, T2 is waiting for M1 to realease M1. If T2 has a higher |
| 1240 | * priority than T1 then T1 inherits the priority of T2. |
| 1241 | * |
| 1242 | * 2) T3 |
| 1243 | * \/ |
| 1244 | * T2->M1->T1 |
| 1245 | * |
| 1246 | * Situation is like 1) but T2 and T3 are both queued waiting for M1 and so |
| 1247 | * T1 inherits the higher of T2 and T3. |
| 1248 | * |
| 1249 | * 3) T3->M2->T2->M1->T1 |
| 1250 | * |
| 1251 | * T1 owns M1, T2 owns M2. If T3 has a higher priority than both T1 and T2, |
| 1252 | * then T1 inherits the priority of T3 through T2. |
| 1253 | * |
| 1254 | * Blocking chains can grow arbitrarily complex (though it's best that they |
| 1255 | * not form at all very often :) and build-up from these units. |
| 1256 | *--------------------------------------------------------------------------- |
| 1257 | */ |
| 1258 | |
| 1259 | /*--------------------------------------------------------------------------- |
| 1260 | * Increment frequency at category "priority" |
| 1261 | *--------------------------------------------------------------------------- |
| 1262 | */ |
| 1263 | static inline unsigned int prio_add_entry( |
| 1264 | struct priority_distribution *pd, int priority) |
| 1265 | { |
| 1266 | unsigned int count; |
| 1267 | /* Enough size/instruction count difference for ARM makes it worth it to |
| 1268 | * use different code (192 bytes for ARM). Only thing better is ASM. */ |
| 1269 | #ifdef CPU_ARM |
| 1270 | count = pd->hist[priority]; |
| 1271 | if (++count == 1) |
| 1272 | pd->mask |= 1 << priority; |
| 1273 | pd->hist[priority] = count; |
| 1274 | #else /* This one's better for Coldfire */ |
| 1275 | if ((count = ++pd->hist[priority]) == 1) |
| 1276 | pd->mask |= 1 << priority; |
| 1277 | #endif |
| 1278 | |
| 1279 | return count; |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1280 | } |
| 1281 | |
| 1282 | /*--------------------------------------------------------------------------- |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1283 | * Decrement frequency at category "priority" |
| 1284 | *--------------------------------------------------------------------------- |
| 1285 | */ |
| 1286 | static inline unsigned int prio_subtract_entry( |
| 1287 | struct priority_distribution *pd, int priority) |
| 1288 | { |
| 1289 | unsigned int count; |
| 1290 | |
| 1291 | #ifdef CPU_ARM |
| 1292 | count = pd->hist[priority]; |
| 1293 | if (--count == 0) |
| 1294 | pd->mask &= ~(1 << priority); |
| 1295 | pd->hist[priority] = count; |
| 1296 | #else |
| 1297 | if ((count = --pd->hist[priority]) == 0) |
| 1298 | pd->mask &= ~(1 << priority); |
| 1299 | #endif |
| 1300 | |
| 1301 | return count; |
| 1302 | } |
| 1303 | |
| 1304 | /*--------------------------------------------------------------------------- |
| 1305 | * Remove from one category and add to another |
| 1306 | *--------------------------------------------------------------------------- |
| 1307 | */ |
| 1308 | static inline void prio_move_entry( |
| 1309 | struct priority_distribution *pd, int from, int to) |
| 1310 | { |
| 1311 | uint32_t mask = pd->mask; |
| 1312 | |
| 1313 | #ifdef CPU_ARM |
| 1314 | unsigned int count; |
| 1315 | |
| 1316 | count = pd->hist[from]; |
| 1317 | if (--count == 0) |
| 1318 | mask &= ~(1 << from); |
| 1319 | pd->hist[from] = count; |
| 1320 | |
| 1321 | count = pd->hist[to]; |
| 1322 | if (++count == 1) |
| 1323 | mask |= 1 << to; |
| 1324 | pd->hist[to] = count; |
| 1325 | #else |
| 1326 | if (--pd->hist[from] == 0) |
| 1327 | mask &= ~(1 << from); |
| 1328 | |
| 1329 | if (++pd->hist[to] == 1) |
| 1330 | mask |= 1 << to; |
| 1331 | #endif |
| 1332 | |
| 1333 | pd->mask = mask; |
| 1334 | } |
| 1335 | |
| 1336 | /*--------------------------------------------------------------------------- |
| 1337 | * Change the priority and rtr entry for a running thread |
| 1338 | *--------------------------------------------------------------------------- |
| 1339 | */ |
| 1340 | static inline void set_running_thread_priority( |
| 1341 | struct thread_entry *thread, int priority) |
| 1342 | { |
| 1343 | const unsigned int core = IF_COP_CORE(thread->core); |
| 1344 | RTR_LOCK(core); |
| 1345 | rtr_move_entry(core, thread->priority, priority); |
| 1346 | thread->priority = priority; |
| 1347 | RTR_UNLOCK(core); |
| 1348 | } |
| 1349 | |
| 1350 | /*--------------------------------------------------------------------------- |
| 1351 | * Finds the highest priority thread in a list of threads. If the list is |
| 1352 | * empty, the PRIORITY_IDLE is returned. |
| 1353 | * |
| 1354 | * It is possible to use the struct priority_distribution within an object |
| 1355 | * instead of scanning the remaining threads in the list but as a compromise, |
| 1356 | * the resulting per-object memory overhead is saved at a slight speed |
| 1357 | * penalty under high contention. |
| 1358 | *--------------------------------------------------------------------------- |
| 1359 | */ |
| 1360 | static int find_highest_priority_in_list_l( |
| 1361 | struct thread_entry * const thread) |
| 1362 | { |
| 1363 | if (thread != NULL) |
| 1364 | { |
| 1365 | /* Go though list until the ending up at the initial thread */ |
| 1366 | int highest_priority = thread->priority; |
| 1367 | struct thread_entry *curr = thread; |
| 1368 | |
| 1369 | do |
| 1370 | { |
| 1371 | int priority = curr->priority; |
| 1372 | |
| 1373 | if (priority < highest_priority) |
| 1374 | highest_priority = priority; |
| 1375 | |
| 1376 | curr = curr->l.next; |
| 1377 | } |
| 1378 | while (curr != thread); |
| 1379 | |
| 1380 | return highest_priority; |
| 1381 | } |
| 1382 | |
| 1383 | return PRIORITY_IDLE; |
| 1384 | } |
| 1385 | |
| 1386 | /*--------------------------------------------------------------------------- |
| 1387 | * Register priority with blocking system and bubble it down the chain if |
| 1388 | * any until we reach the end or something is already equal or higher. |
| 1389 | * |
| 1390 | * NOTE: A simultaneous circular wait could spin deadlock on multiprocessor |
| 1391 | * targets but that same action also guarantees a circular block anyway and |
| 1392 | * those are prevented, right? :-) |
| 1393 | *--------------------------------------------------------------------------- |
| 1394 | */ |
| 1395 | static struct thread_entry * |
| 1396 | blocker_inherit_priority(struct thread_entry *current) |
| 1397 | { |
| 1398 | const int priority = current->priority; |
| 1399 | struct blocker *bl = current->blocker; |
| 1400 | struct thread_entry * const tstart = current; |
| 1401 | struct thread_entry *bl_t = bl->thread; |
| 1402 | |
| 1403 | /* Blocker cannot change since the object protection is held */ |
| 1404 | LOCK_THREAD(bl_t); |
| 1405 | |
| 1406 | for (;;) |
| 1407 | { |
| 1408 | struct thread_entry *next; |
| 1409 | int bl_pr = bl->priority; |
| 1410 | |
| 1411 | if (priority >= bl_pr) |
| 1412 | break; /* Object priority already high enough */ |
| 1413 | |
| 1414 | bl->priority = priority; |
| 1415 | |
| 1416 | /* Add this one */ |
| 1417 | prio_add_entry(&bl_t->pdist, priority); |
| 1418 | |
| 1419 | if (bl_pr < PRIORITY_IDLE) |
| 1420 | { |
| 1421 | /* Not first waiter - subtract old one */ |
| 1422 | prio_subtract_entry(&bl_t->pdist, bl_pr); |
| 1423 | } |
| 1424 | |
| 1425 | if (priority >= bl_t->priority) |
| 1426 | break; /* Thread priority high enough */ |
| 1427 | |
| 1428 | if (bl_t->state == STATE_RUNNING) |
| 1429 | { |
| 1430 | /* Blocking thread is a running thread therefore there are no |
| 1431 | * further blockers. Change the "run queue" on which it |
| 1432 | * resides. */ |
| 1433 | set_running_thread_priority(bl_t, priority); |
| 1434 | break; |
| 1435 | } |
| 1436 | |
| 1437 | bl_t->priority = priority; |
| 1438 | |
| 1439 | /* If blocking thread has a blocker, apply transitive inheritance */ |
| 1440 | bl = bl_t->blocker; |
| 1441 | |
| 1442 | if (bl == NULL) |
| 1443 | break; /* End of chain or object doesn't support inheritance */ |
| 1444 | |
| 1445 | next = bl->thread; |
| 1446 | |
| 1447 | if (next == tstart) |
| 1448 | break; /* Full-circle - deadlock! */ |
| 1449 | |
| 1450 | UNLOCK_THREAD(current); |
| 1451 | |
| 1452 | #if NUM_CORES > 1 |
| 1453 | for (;;) |
| 1454 | { |
| 1455 | LOCK_THREAD(next); |
| 1456 | |
| 1457 | /* Blocker could change - retest condition */ |
| 1458 | if (bl->thread == next) |
| 1459 | break; |
| 1460 | |
| 1461 | UNLOCK_THREAD(next); |
| 1462 | next = bl->thread; |
| 1463 | } |
| 1464 | #endif |
| 1465 | current = bl_t; |
| 1466 | bl_t = next; |
| 1467 | } |
| 1468 | |
| 1469 | UNLOCK_THREAD(bl_t); |
| 1470 | |
| 1471 | return current; |
| 1472 | } |
| 1473 | |
| 1474 | /*--------------------------------------------------------------------------- |
| 1475 | * Readjust priorities when waking a thread blocked waiting for another |
| 1476 | * in essence "releasing" the thread's effect on the object owner. Can be |
| 1477 | * performed from any context. |
| 1478 | *--------------------------------------------------------------------------- |
| 1479 | */ |
| 1480 | struct thread_entry * |
| 1481 | wakeup_priority_protocol_release(struct thread_entry *thread) |
| 1482 | { |
| 1483 | const int priority = thread->priority; |
| 1484 | struct blocker *bl = thread->blocker; |
| 1485 | struct thread_entry * const tstart = thread; |
| 1486 | struct thread_entry *bl_t = bl->thread; |
| 1487 | |
| 1488 | /* Blocker cannot change since object will be locked */ |
| 1489 | LOCK_THREAD(bl_t); |
| 1490 | |
| 1491 | thread->blocker = NULL; /* Thread not blocked */ |
| 1492 | |
| 1493 | for (;;) |
| 1494 | { |
| 1495 | struct thread_entry *next; |
| 1496 | int bl_pr = bl->priority; |
| 1497 | |
| 1498 | if (priority > bl_pr) |
| 1499 | break; /* Object priority higher */ |
| 1500 | |
| 1501 | next = *thread->bqp; |
| 1502 | |
| 1503 | if (next == NULL) |
| 1504 | { |
| 1505 | /* No more threads in queue */ |
| 1506 | prio_subtract_entry(&bl_t->pdist, bl_pr); |
| 1507 | bl->priority = PRIORITY_IDLE; |
| 1508 | } |
| 1509 | else |
| 1510 | { |
| 1511 | /* Check list for highest remaining priority */ |
| 1512 | int queue_pr = find_highest_priority_in_list_l(next); |
| 1513 | |
| 1514 | if (queue_pr == bl_pr) |
| 1515 | break; /* Object priority not changing */ |
| 1516 | |
| 1517 | /* Change queue priority */ |
| 1518 | prio_move_entry(&bl_t->pdist, bl_pr, queue_pr); |
| 1519 | bl->priority = queue_pr; |
| 1520 | } |
| 1521 | |
| 1522 | if (bl_pr > bl_t->priority) |
| 1523 | break; /* thread priority is higher */ |
| 1524 | |
| 1525 | bl_pr = find_first_set_bit(bl_t->pdist.mask); |
| 1526 | |
| 1527 | if (bl_pr == bl_t->priority) |
| 1528 | break; /* Thread priority not changing */ |
| 1529 | |
| 1530 | if (bl_t->state == STATE_RUNNING) |
| 1531 | { |
| 1532 | /* No further blockers */ |
| 1533 | set_running_thread_priority(bl_t, bl_pr); |
| 1534 | break; |
| 1535 | } |
| 1536 | |
| 1537 | bl_t->priority = bl_pr; |
| 1538 | |
| 1539 | /* If blocking thread has a blocker, apply transitive inheritance */ |
| 1540 | bl = bl_t->blocker; |
| 1541 | |
| 1542 | if (bl == NULL) |
| 1543 | break; /* End of chain or object doesn't support inheritance */ |
| 1544 | |
| 1545 | next = bl->thread; |
| 1546 | |
| 1547 | if (next == tstart) |
| 1548 | break; /* Full-circle - deadlock! */ |
| 1549 | |
| 1550 | UNLOCK_THREAD(thread); |
| 1551 | |
| 1552 | #if NUM_CORES > 1 |
| 1553 | for (;;) |
| 1554 | { |
| 1555 | LOCK_THREAD(next); |
| 1556 | |
| 1557 | /* Blocker could change - retest condition */ |
| 1558 | if (bl->thread == next) |
| 1559 | break; |
| 1560 | |
| 1561 | UNLOCK_THREAD(next); |
| 1562 | next = bl->thread; |
| 1563 | } |
| 1564 | #endif |
| 1565 | thread = bl_t; |
| 1566 | bl_t = next; |
| 1567 | } |
| 1568 | |
| 1569 | UNLOCK_THREAD(bl_t); |
| 1570 | |
| 1571 | #if NUM_CORES > 1 |
| 1572 | if (thread != tstart) |
| 1573 | { |
| 1574 | /* Relock original if it changed */ |
| 1575 | LOCK_THREAD(tstart); |
| 1576 | } |
| 1577 | #endif |
| 1578 | |
| 1579 | return cores[CURRENT_CORE].running; |
| 1580 | } |
| 1581 | |
| 1582 | /*--------------------------------------------------------------------------- |
| 1583 | * Transfer ownership to a thread waiting for an objects and transfer |
| 1584 | * inherited priority boost from other waiters. This algorithm knows that |
| 1585 | * blocking chains may only unblock from the very end. |
| 1586 | * |
| 1587 | * Only the owning thread itself may call this and so the assumption that |
| 1588 | * it is the running thread is made. |
| 1589 | *--------------------------------------------------------------------------- |
| 1590 | */ |
| 1591 | struct thread_entry * |
| 1592 | wakeup_priority_protocol_transfer(struct thread_entry *thread) |
| 1593 | { |
| 1594 | /* Waking thread inherits priority boost from object owner */ |
| 1595 | struct blocker *bl = thread->blocker; |
| 1596 | struct thread_entry *bl_t = bl->thread; |
| 1597 | struct thread_entry *next; |
| 1598 | int bl_pr; |
| 1599 | |
| 1600 | THREAD_ASSERT(thread_get_current() == bl_t, |
| 1601 | "UPPT->wrong thread", thread_get_current()); |
| 1602 | |
| 1603 | LOCK_THREAD(bl_t); |
| 1604 | |
| 1605 | bl_pr = bl->priority; |
| 1606 | |
| 1607 | /* Remove the object's boost from the owning thread */ |
| 1608 | if (prio_subtract_entry(&bl_t->pdist, bl_pr) == 0 && |
| 1609 | bl_pr <= bl_t->priority) |
| 1610 | { |
| 1611 | /* No more threads at this priority are waiting and the old level is |
| 1612 | * at least the thread level */ |
| 1613 | int priority = find_first_set_bit(bl_t->pdist.mask); |
| 1614 | |
| 1615 | if (priority != bl_t->priority) |
| 1616 | { |
| 1617 | /* Adjust this thread's priority */ |
| 1618 | set_running_thread_priority(bl_t, priority); |
| 1619 | } |
| 1620 | } |
| 1621 | |
| 1622 | next = *thread->bqp; |
| 1623 | |
| 1624 | if (next == NULL) |
| 1625 | { |
| 1626 | /* Expected shortcut - no more waiters */ |
| 1627 | bl_pr = PRIORITY_IDLE; |
| 1628 | } |
| 1629 | else |
| 1630 | { |
| 1631 | if (thread->priority <= bl_pr) |
| 1632 | { |
| 1633 | /* Need to scan threads remaining in queue */ |
| 1634 | bl_pr = find_highest_priority_in_list_l(next); |
| 1635 | } |
| 1636 | |
| 1637 | if (prio_add_entry(&thread->pdist, bl_pr) == 1 && |
| 1638 | bl_pr < thread->priority) |
| 1639 | { |
| 1640 | /* Thread priority must be raised */ |
| 1641 | thread->priority = bl_pr; |
| 1642 | } |
| 1643 | } |
| 1644 | |
| 1645 | bl->thread = thread; /* This thread pwns */ |
| 1646 | bl->priority = bl_pr; /* Save highest blocked priority */ |
| 1647 | thread->blocker = NULL; /* Thread not blocked */ |
| 1648 | |
| 1649 | UNLOCK_THREAD(bl_t); |
| 1650 | |
| 1651 | return bl_t; |
| 1652 | } |
| 1653 | |
| 1654 | /*--------------------------------------------------------------------------- |
| 1655 | * No threads must be blocked waiting for this thread except for it to exit. |
| 1656 | * The alternative is more elaborate cleanup and object registration code. |
| 1657 | * Check this for risk of silent data corruption when objects with |
| 1658 | * inheritable blocking are abandoned by the owner - not precise but may |
| 1659 | * catch something. |
| 1660 | *--------------------------------------------------------------------------- |
| 1661 | */ |
Bertrik Sikken | e15f8a2 | 2008-05-03 08:35:14 +0000 | [diff] [blame] | 1662 | static void check_for_obj_waiters(const char *function, struct thread_entry *thread) |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1663 | { |
| 1664 | /* Only one bit in the mask should be set with a frequency on 1 which |
| 1665 | * represents the thread's own base priority */ |
| 1666 | uint32_t mask = thread->pdist.mask; |
| 1667 | if ((mask & (mask - 1)) != 0 || |
| 1668 | thread->pdist.hist[find_first_set_bit(mask)] > 1) |
| 1669 | { |
| 1670 | unsigned char name[32]; |
| 1671 | thread_get_name(name, 32, thread); |
| 1672 | panicf("%s->%s with obj. waiters", function, name); |
| 1673 | } |
| 1674 | } |
| 1675 | #endif /* HAVE_PRIORITY_SCHEDULING */ |
| 1676 | |
| 1677 | /*--------------------------------------------------------------------------- |
| 1678 | * Move a thread back to a running state on its core. |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1679 | *--------------------------------------------------------------------------- |
| 1680 | */ |
| 1681 | static void core_schedule_wakeup(struct thread_entry *thread) |
| 1682 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1683 | const unsigned int core = IF_COP_CORE(thread->core); |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1684 | |
| 1685 | RTR_LOCK(core); |
| 1686 | |
| 1687 | thread->state = STATE_RUNNING; |
| 1688 | |
| 1689 | add_to_list_l(&cores[core].running, thread); |
| 1690 | rtr_add_entry(core, thread->priority); |
| 1691 | |
| 1692 | RTR_UNLOCK(core); |
| 1693 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1694 | #if NUM_CORES > 1 |
| 1695 | if (core != CURRENT_CORE) |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1696 | core_wake(core); |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1697 | #endif |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1698 | } |
| 1699 | |
| 1700 | /*--------------------------------------------------------------------------- |
| 1701 | * Check the core's timeout list when at least one thread is due to wake. |
| 1702 | * Filtering for the condition is done before making the call. Resets the |
| 1703 | * tick when the next check will occur. |
| 1704 | *--------------------------------------------------------------------------- |
| 1705 | */ |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1706 | void check_tmo_threads(void) |
Björn Stenberg | c4d8d97 | 2003-02-14 09:44:34 +0000 | [diff] [blame] | 1707 | { |
Michael Sevakis | a690ebb | 2007-07-30 16:44:36 +0000 | [diff] [blame] | 1708 | const unsigned int core = CURRENT_CORE; |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1709 | const long tick = current_tick; /* snapshot the current tick */ |
| 1710 | long next_tmo_check = tick + 60*HZ; /* minimum duration: once/minute */ |
| 1711 | struct thread_entry *next = cores[core].timeout; |
| 1712 | |
| 1713 | /* If there are no processes waiting for a timeout, just keep the check |
| 1714 | tick from falling into the past. */ |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1715 | |
| 1716 | /* Break the loop once we have walked through the list of all |
| 1717 | * sleeping processes or have removed them all. */ |
| 1718 | while (next != NULL) |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1719 | { |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1720 | /* Check sleeping threads. Allow interrupts between checks. */ |
Michael Sevakis | af395f4 | 2008-03-26 01:50:41 +0000 | [diff] [blame] | 1721 | enable_irq(); |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1722 | |
| 1723 | struct thread_entry *curr = next; |
| 1724 | |
| 1725 | next = curr->tmo.next; |
| 1726 | |
| 1727 | /* Lock thread slot against explicit wakeup */ |
Michael Sevakis | af395f4 | 2008-03-26 01:50:41 +0000 | [diff] [blame] | 1728 | disable_irq(); |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1729 | LOCK_THREAD(curr); |
| 1730 | |
| 1731 | unsigned state = curr->state; |
| 1732 | |
| 1733 | if (state < TIMEOUT_STATE_FIRST) |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1734 | { |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1735 | /* Cleanup threads no longer on a timeout but still on the |
| 1736 | * list. */ |
| 1737 | remove_from_list_tmo(curr); |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1738 | } |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1739 | else if (TIME_BEFORE(tick, curr->tmo_tick)) |
| 1740 | { |
| 1741 | /* Timeout still pending - this will be the usual case */ |
| 1742 | if (TIME_BEFORE(curr->tmo_tick, next_tmo_check)) |
| 1743 | { |
| 1744 | /* Earliest timeout found so far - move the next check up |
| 1745 | to its time */ |
| 1746 | next_tmo_check = curr->tmo_tick; |
| 1747 | } |
| 1748 | } |
| 1749 | else |
| 1750 | { |
| 1751 | /* Sleep timeout has been reached so bring the thread back to |
| 1752 | * life again. */ |
| 1753 | if (state == STATE_BLOCKED_W_TMO) |
| 1754 | { |
| 1755 | #if NUM_CORES > 1 |
| 1756 | /* Lock the waiting thread's kernel object */ |
| 1757 | struct corelock *ocl = curr->obj_cl; |
| 1758 | |
| 1759 | if (corelock_try_lock(ocl) == 0) |
| 1760 | { |
| 1761 | /* Need to retry in the correct order though the need is |
| 1762 | * unlikely */ |
| 1763 | UNLOCK_THREAD(curr); |
| 1764 | corelock_lock(ocl); |
| 1765 | LOCK_THREAD(curr); |
| 1766 | |
| 1767 | if (curr->state != STATE_BLOCKED_W_TMO) |
| 1768 | { |
| 1769 | /* Thread was woken or removed explicitely while slot |
| 1770 | * was unlocked */ |
| 1771 | corelock_unlock(ocl); |
| 1772 | remove_from_list_tmo(curr); |
| 1773 | UNLOCK_THREAD(curr); |
| 1774 | continue; |
| 1775 | } |
| 1776 | } |
| 1777 | #endif /* NUM_CORES */ |
| 1778 | |
| 1779 | remove_from_list_l(curr->bqp, curr); |
| 1780 | |
| 1781 | #ifdef HAVE_WAKEUP_EXT_CB |
| 1782 | if (curr->wakeup_ext_cb != NULL) |
| 1783 | curr->wakeup_ext_cb(curr); |
| 1784 | #endif |
| 1785 | |
| 1786 | #ifdef HAVE_PRIORITY_SCHEDULING |
| 1787 | if (curr->blocker != NULL) |
| 1788 | wakeup_priority_protocol_release(curr); |
| 1789 | #endif |
| 1790 | corelock_unlock(ocl); |
| 1791 | } |
| 1792 | /* else state == STATE_SLEEPING */ |
| 1793 | |
| 1794 | remove_from_list_tmo(curr); |
| 1795 | |
| 1796 | RTR_LOCK(core); |
| 1797 | |
| 1798 | curr->state = STATE_RUNNING; |
| 1799 | |
| 1800 | add_to_list_l(&cores[core].running, curr); |
| 1801 | rtr_add_entry(core, curr->priority); |
| 1802 | |
| 1803 | RTR_UNLOCK(core); |
| 1804 | } |
| 1805 | |
| 1806 | UNLOCK_THREAD(curr); |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1807 | } |
| 1808 | |
| 1809 | cores[core].next_tmo_check = next_tmo_check; |
| 1810 | } |
| 1811 | |
| 1812 | /*--------------------------------------------------------------------------- |
| 1813 | * Performs operations that must be done before blocking a thread but after |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1814 | * the state is saved. |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1815 | *--------------------------------------------------------------------------- |
| 1816 | */ |
Michael Sevakis | 32a531b | 2008-01-19 13:27:47 +0000 | [diff] [blame] | 1817 | #if NUM_CORES > 1 |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1818 | static inline void run_blocking_ops( |
Michael Sevakis | 608c547 | 2008-01-19 13:47:26 +0000 | [diff] [blame] | 1819 | unsigned int core, struct thread_entry *thread) |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1820 | { |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1821 | struct thread_blk_ops *ops = &cores[core].blk_ops; |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1822 | const unsigned flags = ops->flags; |
| 1823 | |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1824 | if (flags == TBOP_CLEAR) |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1825 | return; |
| 1826 | |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1827 | switch (flags) |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1828 | { |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1829 | case TBOP_SWITCH_CORE: |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1830 | core_switch_blk_op(core, thread); |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1831 | /* Fall-through */ |
| 1832 | case TBOP_UNLOCK_CORELOCK: |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1833 | corelock_unlock(ops->cl_p); |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1834 | break; |
| 1835 | } |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1836 | |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1837 | ops->flags = TBOP_CLEAR; |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1838 | } |
Michael Sevakis | 32a531b | 2008-01-19 13:27:47 +0000 | [diff] [blame] | 1839 | #endif /* NUM_CORES > 1 */ |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1840 | |
Miika Pekkarinen | a85044b | 2006-09-16 16:18:11 +0000 | [diff] [blame] | 1841 | #ifdef RB_PROFILE |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1842 | void profile_thread(void) |
| 1843 | { |
| 1844 | profstart(cores[CURRENT_CORE].running - threads); |
| 1845 | } |
Miika Pekkarinen | a85044b | 2006-09-16 16:18:11 +0000 | [diff] [blame] | 1846 | #endif |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1847 | |
| 1848 | /*--------------------------------------------------------------------------- |
| 1849 | * Prepares a thread to block on an object's list and/or for a specified |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1850 | * duration - expects object and slot to be appropriately locked if needed |
| 1851 | * and interrupts to be masked. |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1852 | *--------------------------------------------------------------------------- |
| 1853 | */ |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1854 | static inline void block_thread_on_l(struct thread_entry *thread, |
| 1855 | unsigned state) |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1856 | { |
| 1857 | /* If inlined, unreachable branches will be pruned with no size penalty |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1858 | because state is passed as a constant parameter. */ |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1859 | const unsigned int core = IF_COP_CORE(thread->core); |
| 1860 | |
| 1861 | /* Remove the thread from the list of running threads. */ |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1862 | RTR_LOCK(core); |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1863 | remove_from_list_l(&cores[core].running, thread); |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1864 | rtr_subtract_entry(core, thread->priority); |
| 1865 | RTR_UNLOCK(core); |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1866 | |
| 1867 | /* Add a timeout to the block if not infinite */ |
| 1868 | switch (state) |
| 1869 | { |
| 1870 | case STATE_BLOCKED: |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1871 | case STATE_BLOCKED_W_TMO: |
| 1872 | /* Put the thread into a new list of inactive threads. */ |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1873 | add_to_list_l(thread->bqp, thread); |
| 1874 | |
| 1875 | if (state == STATE_BLOCKED) |
| 1876 | break; |
| 1877 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1878 | /* Fall-through */ |
| 1879 | case STATE_SLEEPING: |
| 1880 | /* If this thread times out sooner than any other thread, update |
| 1881 | next_tmo_check to its timeout */ |
| 1882 | if (TIME_BEFORE(thread->tmo_tick, cores[core].next_tmo_check)) |
| 1883 | { |
| 1884 | cores[core].next_tmo_check = thread->tmo_tick; |
| 1885 | } |
| 1886 | |
| 1887 | if (thread->tmo.prev == NULL) |
| 1888 | { |
| 1889 | add_to_list_tmo(thread); |
| 1890 | } |
| 1891 | /* else thread was never removed from list - just keep it there */ |
| 1892 | break; |
| 1893 | } |
| 1894 | |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1895 | /* Remember the the next thread about to block. */ |
| 1896 | cores[core].block_task = thread; |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1897 | |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1898 | /* Report new state. */ |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1899 | thread->state = state; |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1900 | } |
| 1901 | |
| 1902 | /*--------------------------------------------------------------------------- |
| 1903 | * Switch thread in round robin fashion for any given priority. Any thread |
| 1904 | * that removed itself from the running list first must specify itself in |
| 1905 | * the paramter. |
| 1906 | * |
| 1907 | * INTERNAL: Intended for use by kernel and not for programs. |
| 1908 | *--------------------------------------------------------------------------- |
| 1909 | */ |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1910 | void switch_thread(void) |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1911 | { |
| 1912 | const unsigned int core = CURRENT_CORE; |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1913 | struct thread_entry *block = cores[core].block_task; |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1914 | struct thread_entry *thread = cores[core].running; |
| 1915 | |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1916 | /* Get context to save - next thread to run is unknown until all wakeups |
| 1917 | * are evaluated */ |
| 1918 | if (block != NULL) |
| 1919 | { |
| 1920 | cores[core].block_task = NULL; |
| 1921 | |
| 1922 | #if NUM_CORES > 1 |
| 1923 | if (thread == block) |
| 1924 | { |
| 1925 | /* This was the last thread running and another core woke us before |
| 1926 | * reaching here. Force next thread selection to give tmo threads or |
| 1927 | * other threads woken before this block a first chance. */ |
| 1928 | block = NULL; |
| 1929 | } |
| 1930 | else |
| 1931 | #endif |
| 1932 | { |
| 1933 | /* Blocking task is the old one */ |
| 1934 | thread = block; |
| 1935 | } |
| 1936 | } |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1937 | |
| 1938 | #ifdef RB_PROFILE |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1939 | profile_thread_stopped(thread - threads); |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1940 | #endif |
Michael Sevakis | 165f62d | 2007-03-26 03:24:36 +0000 | [diff] [blame] | 1941 | |
Miika Pekkarinen | a85044b | 2006-09-16 16:18:11 +0000 | [diff] [blame] | 1942 | /* Begin task switching by saving our current context so that we can |
| 1943 | * restore the state of the current thread later to the point prior |
| 1944 | * to this call. */ |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1945 | store_context(&thread->context); |
Miika Pekkarinen | a85044b | 2006-09-16 16:18:11 +0000 | [diff] [blame] | 1946 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1947 | /* Check if the current thread stack is overflown */ |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1948 | if (thread->stack[0] != DEADBEEF) |
| 1949 | thread_stkov(thread); |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 1950 | |
Michael Sevakis | 32a531b | 2008-01-19 13:27:47 +0000 | [diff] [blame] | 1951 | #if NUM_CORES > 1 |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1952 | /* Run any blocking operations requested before switching/sleeping */ |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1953 | run_blocking_ops(core, thread); |
Michael Sevakis | 32a531b | 2008-01-19 13:27:47 +0000 | [diff] [blame] | 1954 | #endif |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 1955 | |
Miika Pekkarinen | a85044b | 2006-09-16 16:18:11 +0000 | [diff] [blame] | 1956 | #ifdef HAVE_PRIORITY_SCHEDULING |
Michael Sevakis | 606d9d0 | 2008-06-03 04:23:09 +0000 | [diff] [blame] | 1957 | IF_NO_SKIP_YIELD( if (thread->skip_count != -1) ) |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1958 | /* Reset the value of thread's skip count */ |
Michael Sevakis | 606d9d0 | 2008-06-03 04:23:09 +0000 | [diff] [blame] | 1959 | thread->skip_count = 0; |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1960 | #endif |
Michael Sevakis | bbe3f1f | 2008-02-28 17:40:18 +0000 | [diff] [blame] | 1961 | |
Miika Pekkarinen | a85044b | 2006-09-16 16:18:11 +0000 | [diff] [blame] | 1962 | for (;;) |
| 1963 | { |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1964 | /* If there are threads on a timeout and the earliest wakeup is due, |
| 1965 | * check the list and wake any threads that need to start running |
| 1966 | * again. */ |
| 1967 | if (!TIME_BEFORE(current_tick, cores[core].next_tmo_check)) |
Miika Pekkarinen | 66258a3 | 2007-03-26 16:55:17 +0000 | [diff] [blame] | 1968 | { |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1969 | check_tmo_threads(); |
Miika Pekkarinen | 66258a3 | 2007-03-26 16:55:17 +0000 | [diff] [blame] | 1970 | } |
Brandon Low | 8a82892 | 2006-11-11 05:33:24 +0000 | [diff] [blame] | 1971 | |
Michael Sevakis | af395f4 | 2008-03-26 01:50:41 +0000 | [diff] [blame] | 1972 | disable_irq(); |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 1973 | RTR_LOCK(core); |
| 1974 | |
| 1975 | thread = cores[core].running; |
| 1976 | |
| 1977 | if (thread == NULL) |
| 1978 | { |
| 1979 | /* Enter sleep mode to reduce power usage - woken up on interrupt |
| 1980 | * or wakeup request from another core - expected to enable |
| 1981 | * interrupts. */ |
| 1982 | RTR_UNLOCK(core); |
| 1983 | core_sleep(IF_COP(core)); |
| 1984 | } |
| 1985 | else |
| 1986 | { |
| 1987 | #ifdef HAVE_PRIORITY_SCHEDULING |
| 1988 | /* Select the new task based on priorities and the last time a |
| 1989 | * process got CPU time relative to the highest priority runnable |
| 1990 | * task. */ |
| 1991 | struct priority_distribution *pd = &cores[core].rtr; |
| 1992 | int max = find_first_set_bit(pd->mask); |
| 1993 | |
| 1994 | if (block == NULL) |
| 1995 | { |
| 1996 | /* Not switching on a block, tentatively select next thread */ |
| 1997 | thread = thread->l.next; |
| 1998 | } |
| 1999 | |
| 2000 | for (;;) |
| 2001 | { |
| 2002 | int priority = thread->priority; |
| 2003 | int diff; |
| 2004 | |
| 2005 | /* This ridiculously simple method of aging seems to work |
| 2006 | * suspiciously well. It does tend to reward CPU hogs (under |
| 2007 | * yielding) but that's generally not desirable at all. On the |
| 2008 | * plus side, it, relatively to other threads, penalizes excess |
| 2009 | * yielding which is good if some high priority thread is |
| 2010 | * performing no useful work such as polling for a device to be |
| 2011 | * ready. Of course, aging is only employed when higher and lower |
| 2012 | * priority threads are runnable. The highest priority runnable |
| 2013 | * thread(s) are never skipped. */ |
| 2014 | if (priority <= max || |
Michael Sevakis | 606d9d0 | 2008-06-03 04:23:09 +0000 | [diff] [blame] | 2015 | IF_NO_SKIP_YIELD( thread->skip_count == -1 || ) |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 2016 | (diff = priority - max, ++thread->skip_count > diff*diff)) |
| 2017 | { |
| 2018 | cores[core].running = thread; |
| 2019 | break; |
| 2020 | } |
| 2021 | |
| 2022 | thread = thread->l.next; |
| 2023 | } |
Michael Sevakis | bbe3f1f | 2008-02-28 17:40:18 +0000 | [diff] [blame] | 2024 | #else |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 2025 | /* Without priority use a simple FCFS algorithm */ |
| 2026 | if (block == NULL) |
| 2027 | { |
| 2028 | /* Not switching on a block, select next thread */ |
| 2029 | thread = thread->l.next; |
| 2030 | cores[core].running = thread; |
| 2031 | } |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 2032 | #endif /* HAVE_PRIORITY_SCHEDULING */ |
| 2033 | |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 2034 | RTR_UNLOCK(core); |
Michael Sevakis | af395f4 | 2008-03-26 01:50:41 +0000 | [diff] [blame] | 2035 | enable_irq(); |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 2036 | break; |
| 2037 | } |
| 2038 | } |
| 2039 | |
Miika Pekkarinen | a85044b | 2006-09-16 16:18:11 +0000 | [diff] [blame] | 2040 | /* And finally give control to the next thread. */ |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 2041 | load_context(&thread->context); |
| 2042 | |
Miika Pekkarinen | a85044b | 2006-09-16 16:18:11 +0000 | [diff] [blame] | 2043 | #ifdef RB_PROFILE |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 2044 | profile_thread_started(thread - threads); |
Miika Pekkarinen | a85044b | 2006-09-16 16:18:11 +0000 | [diff] [blame] | 2045 | #endif |
Björn Stenberg | c4d8d97 | 2003-02-14 09:44:34 +0000 | [diff] [blame] | 2046 | } |
| 2047 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 2048 | /*--------------------------------------------------------------------------- |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 2049 | * Sleeps a thread for at least a specified number of ticks with zero being |
| 2050 | * a wait until the next tick. |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 2051 | * |
| 2052 | * INTERNAL: Intended for use by kernel and not for programs. |
| 2053 | *--------------------------------------------------------------------------- |
| 2054 | */ |
| 2055 | void sleep_thread(int ticks) |
Miika Pekkarinen | a85044b | 2006-09-16 16:18:11 +0000 | [diff] [blame] | 2056 | { |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 2057 | struct thread_entry *current = cores[CURRENT_CORE].running; |
Brandon Low | 8a82892 | 2006-11-11 05:33:24 +0000 | [diff] [blame] | 2058 | |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 2059 | LOCK_THREAD(current); |
Brandon Low | 8a82892 | 2006-11-11 05:33:24 +0000 | [diff] [blame] | 2060 | |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 2061 | /* Set our timeout, remove from run list and join timeout list. */ |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 2062 | current->tmo_tick = current_tick + ticks + 1; |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 2063 | block_thread_on_l(current, STATE_SLEEPING); |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 2064 | |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 2065 | UNLOCK_THREAD(current); |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 2066 | } |
| 2067 | |
| 2068 | /*--------------------------------------------------------------------------- |
| 2069 | * Indefinitely block a thread on a blocking queue for explicit wakeup. |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 2070 | * |
| 2071 | * INTERNAL: Intended for use by kernel objects and not for programs. |
| 2072 | *--------------------------------------------------------------------------- |
| 2073 | */ |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 2074 | void block_thread(struct thread_entry *current) |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 2075 | { |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 2076 | /* Set the state to blocked and take us off of the run queue until we |
| 2077 | * are explicitly woken */ |
| 2078 | LOCK_THREAD(current); |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 2079 | |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 2080 | /* Set the list for explicit wakeup */ |
| 2081 | block_thread_on_l(current, STATE_BLOCKED); |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 2082 | |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 2083 | #ifdef HAVE_PRIORITY_SCHEDULING |
| 2084 | if (current->blocker != NULL) |
| 2085 | { |
| 2086 | /* Object supports PIP */ |
| 2087 | current = blocker_inherit_priority(current); |
| 2088 | } |
Brandon Low | 8a82892 | 2006-11-11 05:33:24 +0000 | [diff] [blame] | 2089 | #endif |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 2090 | |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 2091 | UNLOCK_THREAD(current); |
Brandon Low | 8a82892 | 2006-11-11 05:33:24 +0000 | [diff] [blame] | 2092 | } |
| 2093 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 2094 | /*--------------------------------------------------------------------------- |
| 2095 | * Block a thread on a blocking queue for a specified time interval or until |
| 2096 | * explicitly woken - whichever happens first. |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 2097 | * |
| 2098 | * INTERNAL: Intended for use by kernel objects and not for programs. |
| 2099 | *--------------------------------------------------------------------------- |
| 2100 | */ |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 2101 | void block_thread_w_tmo(struct thread_entry *current, int timeout) |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 2102 | { |
Miika Pekkarinen | a85044b | 2006-09-16 16:18:11 +0000 | [diff] [blame] | 2103 | /* Get the entry for the current running thread. */ |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 2104 | LOCK_THREAD(current); |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 2105 | |
Brandon Low | 8a82892 | 2006-11-11 05:33:24 +0000 | [diff] [blame] | 2106 | /* Set the state to blocked with the specified timeout */ |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 2107 | current->tmo_tick = current_tick + timeout; |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 2108 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 2109 | /* Set the list for explicit wakeup */ |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 2110 | block_thread_on_l(current, STATE_BLOCKED_W_TMO); |
Brandon Low | 8a82892 | 2006-11-11 05:33:24 +0000 | [diff] [blame] | 2111 | |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 2112 | #ifdef HAVE_PRIORITY_SCHEDULING |
| 2113 | if (current->blocker != NULL) |
| 2114 | { |
| 2115 | /* Object supports PIP */ |
| 2116 | current = blocker_inherit_priority(current); |
| 2117 | } |
| 2118 | #endif |
Brandon Low | 8a82892 | 2006-11-11 05:33:24 +0000 | [diff] [blame] | 2119 | |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 2120 | UNLOCK_THREAD(current); |
Miika Pekkarinen | a85044b | 2006-09-16 16:18:11 +0000 | [diff] [blame] | 2121 | } |
| 2122 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 2123 | /*--------------------------------------------------------------------------- |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 2124 | * Explicitly wakeup a thread on a blocking queue. Only effects threads of |
| 2125 | * STATE_BLOCKED and STATE_BLOCKED_W_TMO. |
| 2126 | * |
| 2127 | * This code should be considered a critical section by the caller meaning |
| 2128 | * that the object's corelock should be held. |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 2129 | * |
| 2130 | * INTERNAL: Intended for use by kernel objects and not for programs. |
| 2131 | *--------------------------------------------------------------------------- |
| 2132 | */ |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 2133 | unsigned int wakeup_thread(struct thread_entry **list) |
Michael Sevakis | 43c1592 | 2006-12-16 18:35:12 +0000 | [diff] [blame] | 2134 | { |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 2135 | struct thread_entry *thread = *list; |
| 2136 | unsigned int result = THREAD_NONE; |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 2137 | |
| 2138 | /* Check if there is a blocked thread at all. */ |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 2139 | if (thread == NULL) |
| 2140 | return result; |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 2141 | |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 2142 | LOCK_THREAD(thread); |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 2143 | |
| 2144 | /* Determine thread's current state. */ |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 2145 | switch (thread->state) |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 2146 | { |
| 2147 | case STATE_BLOCKED: |
| 2148 | case STATE_BLOCKED_W_TMO: |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 2149 | remove_from_list_l(list, thread); |
| 2150 | |
| 2151 | result = THREAD_OK; |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 2152 | |
| 2153 | #ifdef HAVE_PRIORITY_SCHEDULING |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 2154 | struct thread_entry *current; |
| 2155 | struct blocker *bl = thread->blocker; |
| 2156 | |
| 2157 | if (bl == NULL) |
| 2158 | { |
| 2159 | /* No inheritance - just boost the thread by aging */ |
Michael Sevakis | 606d9d0 | 2008-06-03 04:23:09 +0000 | [diff] [blame] | 2160 | IF_NO_SKIP_YIELD( if (thread->skip_count != -1) ) |
| 2161 | thread->skip_count = thread->priority; |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 2162 | current = cores[CURRENT_CORE].running; |
| 2163 | } |
| 2164 | else |
| 2165 | { |
| 2166 | /* Call the specified unblocking PIP */ |
| 2167 | current = bl->wakeup_protocol(thread); |
| 2168 | } |
| 2169 | |
| 2170 | if (current != NULL && thread->priority < current->priority |
| 2171 | IF_COP( && thread->core == current->core )) |
| 2172 | { |
| 2173 | /* Woken thread is higher priority and exists on the same CPU core; |
| 2174 | * recommend a task switch. Knowing if this is an interrupt call |
| 2175 | * would be helpful here. */ |
| 2176 | result |= THREAD_SWITCH; |
| 2177 | } |
| 2178 | #endif /* HAVE_PRIORITY_SCHEDULING */ |
| 2179 | |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 2180 | core_schedule_wakeup(thread); |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 2181 | break; |
| 2182 | |
| 2183 | /* Nothing to do. State is not blocked. */ |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 2184 | #if THREAD_EXTRA_CHECKS |
Michael Sevakis | 27cf677 | 2008-03-25 02:34:12 +0000 | [diff] [blame] | 2185 | default: |
Michael Sevakis | a9b2fb5 | 2007-10-16 01:25:17 +0000 | [diff] [blame] | 2186 | THREAD_PANICF("wakeup_thread->block invalid", thread); |
| 2187 | <