blob: 4a808fb4b815055c610bed0391c4adf369def55b [file] [log] [blame]
<
Jean-Philippe Bernardy9dcb5752005-02-13 18:55:14 +00001/***************************************************************************
Daniel Stenberg3c031c52002-04-22 12:07:34 +00002 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2002 by Ulf Ralberg
11 *
Daniel Stenberg2acc0ac2008-06-28 18:10:04 +000012 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
Daniel Stenberg3c031c52002-04-22 12:07:34 +000016 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
Linus Nielsen Feltzing7b91ec62004-10-15 02:13:43 +000021#include "config.h"
Björn Stenbergc4d8d972003-02-14 09:44:34 +000022#include <stdbool.h>
Daniel Stenberg3c031c52002-04-22 12:07:34 +000023#include "thread.h"
Linus Nielsen Feltzing09153dd2002-07-15 22:21:18 +000024#include "panic.h"
Michael Sevakisbfb281f2007-05-12 05:20:04 +000025#include "sprintf.h"
Jens Arnold6d54d6c2005-08-26 22:52:31 +000026#include "system.h"
Björn Stenbergc4d8d972003-02-14 09:44:34 +000027#include "kernel.h"
Linus Nielsen Feltzing7b91ec62004-10-15 02:13:43 +000028#include "cpu.h"
Miika Pekkarinena85044b2006-09-16 16:18:11 +000029#include "string.h"
Magnus Holmgren13f3c5b2006-09-23 14:38:04 +000030#ifdef RB_PROFILE
31#include <profile.h>
32#endif
Michael Sevakis27cf6772008-03-25 02:34:12 +000033/****************************************************************************
34 * ATTENTION!! *
35 * See notes below on implementing processor-specific portions! *
36 ***************************************************************************/
Daniel Stenberg3c031c52002-04-22 12:07:34 +000037
Michael Sevakisa9b2fb52007-10-16 01:25:17 +000038/* Define THREAD_EXTRA_CHECKS as 1 to enable additional state checks */
39#ifdef DEBUG
40#define THREAD_EXTRA_CHECKS 1 /* Always 1 for DEBUG */
Miika Pekkarinen66258a32007-03-26 16:55:17 +000041#else
Michael Sevakisa9b2fb52007-10-16 01:25:17 +000042#define THREAD_EXTRA_CHECKS 0
Miika Pekkarinen66258a32007-03-26 16:55:17 +000043#endif
44
Michael Sevakisa9b2fb52007-10-16 01:25:17 +000045/**
46 * General locking order to guarantee progress. Order must be observed but
47 * all stages are not nescessarily obligatory. Going from 1) to 3) is
48 * perfectly legal.
49 *
50 * 1) IRQ
51 * This is first because of the likelyhood of having an interrupt occur that
52 * also accesses one of the objects farther down the list. Any non-blocking
53 * synchronization done may already have a lock on something during normal
54 * execution and if an interrupt handler running on the same processor as
55 * the one that has the resource locked were to attempt to access the
56 * resource, the interrupt handler would wait forever waiting for an unlock
57 * that will never happen. There is no danger if the interrupt occurs on
58 * a different processor because the one that has the lock will eventually
59 * unlock and the other processor's handler may proceed at that time. Not
60 * nescessary when the resource in question is definitely not available to
61 * interrupt handlers.
62 *
63 * 2) Kernel Object
64 * 1) May be needed beforehand if the kernel object allows dual-use such as
65 * event queues. The kernel object must have a scheme to protect itself from
66 * access by another processor and is responsible for serializing the calls
67 * to block_thread(_w_tmo) and wakeup_thread both to themselves and to each
Michael Sevakis27cf6772008-03-25 02:34:12 +000068 * other. Objects' queues are also protected here.
Michael Sevakisa9b2fb52007-10-16 01:25:17 +000069 *
70 * 3) Thread Slot
71 * This locks access to the thread's slot such that its state cannot be
72 * altered by another processor when a state change is in progress such as
73 * when it is in the process of going on a blocked list. An attempt to wake
74 * a thread while it is still blocking will likely desync its state with
75 * the other resources used for that state.
76 *
Michael Sevakis27cf6772008-03-25 02:34:12 +000077 * 4) Core Lists
Michael Sevakisa9b2fb52007-10-16 01:25:17 +000078 * These lists are specific to a particular processor core and are accessible
Michael Sevakis27cf6772008-03-25 02:34:12 +000079 * by all processor cores and interrupt handlers. The running (rtr) list is
80 * the prime example where a thread may be added by any means.
Michael Sevakisa9b2fb52007-10-16 01:25:17 +000081 */
Michael Sevakis27cf6772008-03-25 02:34:12 +000082
83/*---------------------------------------------------------------------------
84 * Processor specific: core_sleep/core_wake/misc. notes
85 *
86 * ARM notes:
87 * FIQ is not dealt with by the scheduler code and is simply restored if it
88 * must by masked for some reason - because threading modifies a register
89 * that FIQ may also modify and there's no way to accomplish it atomically.
90 * s3c2440 is such a case.
91 *
92 * Audio interrupts are generally treated at a higher priority than others
93 * usage of scheduler code with interrupts higher than HIGHEST_IRQ_LEVEL
94 * are not in general safe. Special cases may be constructed on a per-
95 * source basis and blocking operations are not available.
96 *
97 * core_sleep procedure to implement for any CPU to ensure an asychronous
98 * wakup never results in requiring a wait until the next tick (up to
99 * 10000uS!). May require assembly and careful instruction ordering.
100 *
101 * 1) On multicore, stay awake if directed to do so by another. If so, goto
102 * step 4.
103 * 2) If processor requires, atomically reenable interrupts and perform step
104 * 3.
105 * 3) Sleep the CPU core. If wakeup itself enables interrupts (stop #0x2000
106 * on Coldfire) goto step 5.
107 * 4) Enable interrupts.
108 * 5) Exit procedure.
109 *
110 * core_wake and multprocessor notes for sleep/wake coordination:
111 * If possible, to wake up another processor, the forcing of an interrupt on
112 * the woken core by the waker core is the easiest way to ensure a non-
113 * delayed wake and immediate execution of any woken threads. If that isn't
114 * available then some careful non-blocking synchonization is needed (as on
115 * PP targets at the moment).
116 *---------------------------------------------------------------------------
117 */
118
119/* Cast to the the machine pointer size, whose size could be < 4 or > 32
120 * (someday :). */
121#define DEADBEEF ((uintptr_t)0xdeadbeefdeadbeefull)
Miika Pekkarinen36862282006-09-02 07:56:52 +0000122struct core_entry cores[NUM_CORES] IBSS_ATTR;
Miika Pekkarinen66258a32007-03-26 16:55:17 +0000123struct thread_entry threads[MAXTHREADS] IBSS_ATTR;
Miika Pekkarinena85044b2006-09-16 16:18:11 +0000124
Jens Arnold9478cc62004-08-03 19:22:56 +0000125static const char main_thread_name[] = "main";
Michael Sevakis27cf6772008-03-25 02:34:12 +0000126extern uintptr_t stackbegin[];
127extern uintptr_t stackend[];
Daniel Stenberg3c031c52002-04-22 12:07:34 +0000128
Michael Sevakis32a531b2008-01-19 13:27:47 +0000129static inline void core_sleep(IF_COP_VOID(unsigned int core))
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000130 __attribute__((always_inline));
Miika Pekkarinena85044b2006-09-16 16:18:11 +0000131
Michael Sevakis27cf6772008-03-25 02:34:12 +0000132void check_tmo_threads(void)
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000133 __attribute__((noinline));
Miika Pekkarinena85044b2006-09-16 16:18:11 +0000134
Michael Sevakis27cf6772008-03-25 02:34:12 +0000135static inline void block_thread_on_l(struct thread_entry *thread, unsigned state)
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000136 __attribute__((always_inline));
137
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000138static void add_to_list_tmo(struct thread_entry *thread)
139 __attribute__((noinline));
140
141static void core_schedule_wakeup(struct thread_entry *thread)
142 __attribute__((noinline));
143
Michael Sevakis608c5472008-01-19 13:47:26 +0000144#if NUM_CORES > 1
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000145static inline void run_blocking_ops(
Michael Sevakis608c5472008-01-19 13:47:26 +0000146 unsigned int core, struct thread_entry *thread)
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000147 __attribute__((always_inline));
Michael Sevakis608c5472008-01-19 13:47:26 +0000148#endif
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000149
150static void thread_stkov(struct thread_entry *thread)
151 __attribute__((noinline));
152
153static inline void store_context(void* addr)
154 __attribute__((always_inline));
155
Brandon Low8a828922006-11-11 05:33:24 +0000156static inline void load_context(const void* addr)
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000157 __attribute__((always_inline));
158
Michael Sevakis27cf6772008-03-25 02:34:12 +0000159void switch_thread(void)
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000160 __attribute__((noinline));
161
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000162/****************************************************************************
163 * Processor-specific section
164 */
Linus Nielsen Feltzing2f70f132002-08-01 08:14:56 +0000165
Michael Sevakis606d9d02008-06-03 04:23:09 +0000166#if defined(MAX_PHYS_SECTOR_SIZE) && MEM == 64
167/* Support a special workaround object for large-sector disks */
168#define IF_NO_SKIP_YIELD(...) __VA_ARGS__
169#else
170#define IF_NO_SKIP_YIELD(...)
171#endif
172
Thom Johansen27cd6ca2005-12-10 19:51:56 +0000173#if defined(CPU_ARM)
Daniel Ankers0aec12f2006-08-21 17:35:35 +0000174/*---------------------------------------------------------------------------
Michael Sevakis7914e902007-09-28 10:20:02 +0000175 * Start the thread running and terminate it if it returns
176 *---------------------------------------------------------------------------
177 */
Michael Sevakis27cf6772008-03-25 02:34:12 +0000178static void __attribute__((naked,used)) start_thread(void)
Michael Sevakis7914e902007-09-28 10:20:02 +0000179{
180 /* r0 = context */
181 asm volatile (
182 "ldr sp, [r0, #32] \n" /* Load initial sp */
183 "ldr r4, [r0, #40] \n" /* start in r4 since it's non-volatile */
184 "mov r1, #0 \n" /* Mark thread as running */
185 "str r1, [r0, #40] \n"
Michael Sevakisda552512007-09-29 06:17:33 +0000186#if NUM_CORES > 1
Michael Sevakis7914e902007-09-28 10:20:02 +0000187 "ldr r0, =invalidate_icache \n" /* Invalidate this core's cache. */
188 "mov lr, pc \n" /* This could be the first entry into */
189 "bx r0 \n" /* plugin or codec code for this core. */
190#endif
191 "mov lr, pc \n" /* Call thread function */
192 "bx r4 \n"
Michael Sevakis7914e902007-09-28 10:20:02 +0000193 ); /* No clobber list - new thread doesn't care */
Michael Sevakis27cf6772008-03-25 02:34:12 +0000194 thread_exit();
195 //asm volatile (".ltorg"); /* Dump constant pool */
Michael Sevakis7914e902007-09-28 10:20:02 +0000196}
197
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000198/* For startup, place context pointer in r4 slot, start_thread pointer in r5
199 * slot, and thread function pointer in context.start. See load_context for
200 * what happens when thread is initially going to run. */
201#define THREAD_STARTUP_INIT(core, thread, function) \
Michael Sevakis27cf6772008-03-25 02:34:12 +0000202 ({ (thread)->context.r[0] = (uint32_t)&(thread)->context, \
203 (thread)->context.r[1] = (uint32_t)start_thread, \
204 (thread)->context.start = (uint32_t)function; })
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000205
Michael Sevakis7914e902007-09-28 10:20:02 +0000206/*---------------------------------------------------------------------------
Thom Johansen52e91de2005-11-13 23:47:38 +0000207 * Store non-volatile context.
208 *---------------------------------------------------------------------------
209 */
Dave Chapman77372d12005-11-07 23:07:19 +0000210static inline void store_context(void* addr)
211{
Thom Johansen52e91de2005-11-13 23:47:38 +0000212 asm volatile(
Michael Sevakis7914e902007-09-28 10:20:02 +0000213 "stmia %0, { r4-r11, sp, lr } \n"
Thom Johansen52e91de2005-11-13 23:47:38 +0000214 : : "r" (addr)
215 );
Dave Chapman77372d12005-11-07 23:07:19 +0000216}
217
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000218/*---------------------------------------------------------------------------
219 * Load non-volatile context.
220 *---------------------------------------------------------------------------
221 */
Dave Chapman77372d12005-11-07 23:07:19 +0000222static inline void load_context(const void* addr)
223{
Thom Johansen52e91de2005-11-13 23:47:38 +0000224 asm volatile(
Michael Sevakis7914e902007-09-28 10:20:02 +0000225 "ldr r0, [%0, #40] \n" /* Load start pointer */
226 "cmp r0, #0 \n" /* Check for NULL */
227 "ldmneia %0, { r0, pc } \n" /* If not already running, jump to start */
228 "ldmia %0, { r4-r11, sp, lr } \n" /* Load regs r4 to r14 from context */
229 : : "r" (addr) : "r0" /* only! */
Thom Johansen52e91de2005-11-13 23:47:38 +0000230 );
Dave Chapman77372d12005-11-07 23:07:19 +0000231}
232
Michael Sevakisbfb281f2007-05-12 05:20:04 +0000233#if defined (CPU_PP)
Michael Sevakis7914e902007-09-28 10:20:02 +0000234
235#if NUM_CORES > 1
Michael Sevakis27cf6772008-03-25 02:34:12 +0000236extern uintptr_t cpu_idlestackbegin[];
237extern uintptr_t cpu_idlestackend[];
238extern uintptr_t cop_idlestackbegin[];
239extern uintptr_t cop_idlestackend[];
Michael Sevakis05099142008-04-06 04:34:57 +0000240static uintptr_t * const idle_stacks[NUM_CORES] =
Michael Sevakis7914e902007-09-28 10:20:02 +0000241{
242 [CPU] = cpu_idlestackbegin,
243 [COP] = cop_idlestackbegin
244};
Michael Sevakisa8b388f2007-11-27 01:20:26 +0000245
246#if CONFIG_CPU == PP5002
247/* Bytes to emulate the PP502x mailbox bits */
248struct core_semaphores
249{
250 volatile uint8_t intend_wake; /* 00h */
251 volatile uint8_t stay_awake; /* 01h */
252 volatile uint8_t intend_sleep; /* 02h */
253 volatile uint8_t unused; /* 03h */
254};
255
Michael Sevakis05099142008-04-06 04:34:57 +0000256static struct core_semaphores core_semaphores[NUM_CORES] IBSS_ATTR;
Michael Sevakis27cf6772008-03-25 02:34:12 +0000257#endif /* CONFIG_CPU == PP5002 */
Michael Sevakisa8b388f2007-11-27 01:20:26 +0000258
Michael Sevakis7914e902007-09-28 10:20:02 +0000259#endif /* NUM_CORES */
260
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000261#if CONFIG_CORELOCK == SW_CORELOCK
262/* Software core locks using Peterson's mutual exclusion algorithm */
263
264/*---------------------------------------------------------------------------
265 * Initialize the corelock structure.
266 *---------------------------------------------------------------------------
267 */
268void corelock_init(struct corelock *cl)
Michael Sevakisbfb281f2007-05-12 05:20:04 +0000269{
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000270 memset(cl, 0, sizeof (*cl));
271}
272
273#if 1 /* Assembly locks to minimize overhead */
274/*---------------------------------------------------------------------------
275 * Wait for the corelock to become free and acquire it when it does.
276 *---------------------------------------------------------------------------
277 */
278void corelock_lock(struct corelock *cl) __attribute__((naked));
279void corelock_lock(struct corelock *cl)
280{
Michael Sevakis4ab52c32008-04-06 08:48:31 +0000281 /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000282 asm volatile (
283 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
284 "ldrb r1, [r1] \n"
Michael Sevakis26b3a742007-10-19 06:19:06 +0000285 "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
Michael Sevakis4ab52c32008-04-06 08:48:31 +0000286 "eor r2, r1, #0xff \n" /* r2 = othercore */
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000287 "strb r2, [r0, #2] \n" /* cl->turn = othercore */
288 "1: \n"
Michael Sevakis4ab52c32008-04-06 08:48:31 +0000289 "ldrb r3, [r0, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */
290 "cmp r3, #0 \n" /* yes? lock acquired */
291 "bxeq lr \n"
292 "ldrb r3, [r0, #2] \n" /* || cl->turn == core ? */
293 "cmp r3, r1 \n"
Michael Sevakis26b3a742007-10-19 06:19:06 +0000294 "bxeq lr \n" /* yes? lock acquired */
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000295 "b 1b \n" /* keep trying */
296 : : "i"(&PROCESSOR_ID)
297 );
298 (void)cl;
299}
300
301/*---------------------------------------------------------------------------
302 * Try to aquire the corelock. If free, caller gets it, otherwise return 0.
303 *---------------------------------------------------------------------------
304 */
305int corelock_try_lock(struct corelock *cl) __attribute__((naked));
306int corelock_try_lock(struct corelock *cl)
307{
Michael Sevakis4ab52c32008-04-06 08:48:31 +0000308 /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000309 asm volatile (
310 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
311 "ldrb r1, [r1] \n"
Michael Sevakis4ab52c32008-04-06 08:48:31 +0000312 "mov r3, r0 \n"
Michael Sevakis26b3a742007-10-19 06:19:06 +0000313 "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
Michael Sevakis4ab52c32008-04-06 08:48:31 +0000314 "eor r2, r1, #0xff \n" /* r2 = othercore */
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000315 "strb r2, [r0, #2] \n" /* cl->turn = othercore */
Michael Sevakis4ab52c32008-04-06 08:48:31 +0000316 "ldrb r0, [r3, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */
317 "eors r0, r0, r2 \n" /* yes? lock acquired */
318 "bxne lr \n"
319 "ldrb r0, [r3, #2] \n" /* || cl->turn == core? */
320 "ands r0, r0, r1 \n"
321 "streqb r0, [r3, r1, lsr #7] \n" /* if not, cl->myl[core] = 0 */
322 "bx lr \n" /* return result */
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000323 : : "i"(&PROCESSOR_ID)
324 );
325
326 return 0;
327 (void)cl;
328}
329
330/*---------------------------------------------------------------------------
331 * Release ownership of the corelock
332 *---------------------------------------------------------------------------
333 */
334void corelock_unlock(struct corelock *cl) __attribute__((naked));
335void corelock_unlock(struct corelock *cl)
336{
337 asm volatile (
338 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
339 "ldrb r1, [r1] \n"
340 "mov r2, #0 \n" /* cl->myl[core] = 0 */
341 "strb r2, [r0, r1, lsr #7] \n"
342 "bx lr \n"
343 : : "i"(&PROCESSOR_ID)
344 );
345 (void)cl;
346}
347#else /* C versions for reference */
348/*---------------------------------------------------------------------------
349 * Wait for the corelock to become free and aquire it when it does.
350 *---------------------------------------------------------------------------
351 */
352void corelock_lock(struct corelock *cl)
353{
354 const unsigned int core = CURRENT_CORE;
355 const unsigned int othercore = 1 - core;
356
Michael Sevakis26b3a742007-10-19 06:19:06 +0000357 cl->myl[core] = core;
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000358 cl->turn = othercore;
359
Michael Sevakis26b3a742007-10-19 06:19:06 +0000360 for (;;)
361 {
362 if (cl->myl[othercore] == 0 || cl->turn == core)
363 break;
364 }
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000365}
366
367/*---------------------------------------------------------------------------
368 * Try to aquire the corelock. If free, caller gets it, otherwise return 0.
369 *---------------------------------------------------------------------------
370 */
371int corelock_try_lock(struct corelock *cl)
372{
373 const unsigned int core = CURRENT_CORE;
374 const unsigned int othercore = 1 - core;
375
Michael Sevakis26b3a742007-10-19 06:19:06 +0000376 cl->myl[core] = core;
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000377 cl->turn = othercore;
378
Michael Sevakis26b3a742007-10-19 06:19:06 +0000379 if (cl->myl[othercore] == 0 || cl->turn == core)
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000380 {
Michael Sevakis26b3a742007-10-19 06:19:06 +0000381 return 1;
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000382 }
383
Michael Sevakis26b3a742007-10-19 06:19:06 +0000384 cl->myl[core] = 0;
385 return 0;
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000386}
387
388/*---------------------------------------------------------------------------
389 * Release ownership of the corelock
390 *---------------------------------------------------------------------------
391 */
392void corelock_unlock(struct corelock *cl)
393{
394 cl->myl[CURRENT_CORE] = 0;
395}
396#endif /* ASM / C selection */
397
398#endif /* CONFIG_CORELOCK == SW_CORELOCK */
399
400/*---------------------------------------------------------------------------
401 * Put core in a power-saving state if waking list wasn't repopulated and if
402 * no other core requested a wakeup for it to perform a task.
403 *---------------------------------------------------------------------------
404 */
Michael Sevakis27cf6772008-03-25 02:34:12 +0000405#ifdef CPU_PP502x
Michael Sevakisa8b388f2007-11-27 01:20:26 +0000406#if NUM_CORES == 1
Michael Sevakis32a531b2008-01-19 13:27:47 +0000407static inline void core_sleep(void)
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000408{
Jens Arnoldcea07eb2008-04-20 17:53:05 +0000409 sleep_core(CURRENT_CORE);
Michael Sevakisaf395f42008-03-26 01:50:41 +0000410 enable_irq();
Michael Sevakisa8b388f2007-11-27 01:20:26 +0000411}
Michael Sevakis27cf6772008-03-25 02:34:12 +0000412#else
Michael Sevakis32a531b2008-01-19 13:27:47 +0000413static inline void core_sleep(unsigned int core)
Michael Sevakisa8b388f2007-11-27 01:20:26 +0000414{
Michael Sevakisa4436142007-10-18 01:26:50 +0000415#if 1
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000416 asm volatile (
Michael Sevakisa4436142007-10-18 01:26:50 +0000417 "mov r0, #4 \n" /* r0 = 0x4 << core */
418 "mov r0, r0, lsl %[c] \n"
419 "str r0, [%[mbx], #4] \n" /* signal intent to sleep */
Michael Sevakis32a531b2008-01-19 13:27:47 +0000420 "ldr r1, [%[mbx], #0] \n" /* && !(MBX_MSG_STAT & (0x10<<core)) ? */
421 "tst r1, r0, lsl #2 \n"
Michael Sevakisa4436142007-10-18 01:26:50 +0000422 "moveq r1, #0x80000000 \n" /* Then sleep */
423 "streq r1, [%[ctl], %[c], lsl #2] \n"
424 "moveq r1, #0 \n" /* Clear control reg */
425 "streq r1, [%[ctl], %[c], lsl #2] \n"
426 "orr r1, r0, r0, lsl #2 \n" /* Signal intent to wake - clear wake flag */
427 "str r1, [%[mbx], #8] \n"
428 "1: \n" /* Wait for wake procedure to finish */
429 "ldr r1, [%[mbx], #0] \n"
430 "tst r1, r0, lsr #2 \n"
431 "bne 1b \n"
Michael Sevakisa4436142007-10-18 01:26:50 +0000432 :
Jens Arnoldcea07eb2008-04-20 17:53:05 +0000433 : [ctl]"r"(&CPU_CTL), [mbx]"r"(MBX_BASE), [c]"r"(core)
Michael Sevakis32a531b2008-01-19 13:27:47 +0000434 : "r0", "r1");
Michael Sevakisa4436142007-10-18 01:26:50 +0000435#else /* C version for reference */
Michael Sevakisa4436142007-10-18 01:26:50 +0000436 /* Signal intent to sleep */
437 MBX_MSG_SET = 0x4 << core;
438
439 /* Something waking or other processor intends to wake us? */
Michael Sevakis32a531b2008-01-19 13:27:47 +0000440 if ((MBX_MSG_STAT & (0x10 << core)) == 0)
Michael Sevakisa4436142007-10-18 01:26:50 +0000441 {
Jens Arnoldcea07eb2008-04-20 17:53:05 +0000442 sleep_core(core);
443 wake_core(core);
Michael Sevakisa4436142007-10-18 01:26:50 +0000444 }
445
446 /* Signal wake - clear wake flag */
447 MBX_MSG_CLR = 0x14 << core;
448
449 /* Wait for other processor to finish wake procedure */
450 while (MBX_MSG_STAT & (0x1 << core));
Michael Sevakisa4436142007-10-18 01:26:50 +0000451#endif /* ASM/C selection */
Michael Sevakisaf395f42008-03-26 01:50:41 +0000452 enable_irq();
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000453}
Michael Sevakis27cf6772008-03-25 02:34:12 +0000454#endif /* NUM_CORES */
Michael Sevakisa8b388f2007-11-27 01:20:26 +0000455#elif CONFIG_CPU == PP5002
Michael Sevakis27cf6772008-03-25 02:34:12 +0000456#if NUM_CORES == 1
457static inline void core_sleep(void)
458{
Jens Arnoldcea07eb2008-04-20 17:53:05 +0000459 sleep_core(CURRENT_CORE);
Michael Sevakisaf395f42008-03-26 01:50:41 +0000460 enable_irq();
Michael Sevakis27cf6772008-03-25 02:34:12 +0000461}
462#else
Michael Sevakisa8b388f2007-11-27 01:20:26 +0000463/* PP5002 has no mailboxes - emulate using bytes */
Michael Sevakis32a531b2008-01-19 13:27:47 +0000464static inline void core_sleep(unsigned int core)
Michael Sevakisa8b388f2007-11-27 01:20:26 +0000465{
466#if 1
467 asm volatile (
Michael Sevakisa8b388f2007-11-27 01:20:26 +0000468 "mov r0, #1 \n" /* Signal intent to sleep */
469 "strb r0, [%[sem], #2] \n"
Michael Sevakis32a531b2008-01-19 13:27:47 +0000470 "ldrb r0, [%[sem], #1] \n" /* && stay_awake == 0? */
Michael Sevakisa8b388f2007-11-27 01:20:26 +0000471 "cmp r0, #0 \n"
Jens Arnoldcb57bf82008-03-07 23:44:46 +0000472 "bne 2f \n"
473 /* Sleep: PP5002 crashes if the instruction that puts it to sleep is
474 * located at 0xNNNNNNN0. 4/8/C works. This sequence makes sure
475 * that the correct alternative is executed. Don't change the order
476 * of the next 4 instructions! */
477 "tst pc, #0x0c \n"
478 "mov r0, #0xca \n"
479 "strne r0, [%[ctl], %[c], lsl #2] \n"
480 "streq r0, [%[ctl], %[c], lsl #2] \n"
Michael Sevakisa8b388f2007-11-27 01:20:26 +0000481 "nop \n" /* nop's needed because of pipeline */
482 "nop \n"
483 "nop \n"
Jens Arnoldcb57bf82008-03-07 23:44:46 +0000484 "2: \n"
Michael Sevakisa8b388f2007-11-27 01:20:26 +0000485 "mov r0, #0 \n" /* Clear stay_awake and sleep intent */
486 "strb r0, [%[sem], #1] \n"
487 "strb r0, [%[sem], #2] \n"
488 "1: \n" /* Wait for wake procedure to finish */
489 "ldrb r0, [%[sem], #0] \n"
490 "cmp r0, #0 \n"
491 "bne 1b \n"
Michael Sevakisa8b388f2007-11-27 01:20:26 +0000492 :
493 : [sem]"r"(&core_semaphores[core]), [c]"r"(core),
Jens Arnoldcea07eb2008-04-20 17:53:05 +0000494 [ctl]"r"(&CPU_CTL)
Michael Sevakis32a531b2008-01-19 13:27:47 +0000495 : "r0"
Michael Sevakisa8b388f2007-11-27 01:20:26 +0000496 );
497#else /* C version for reference */
Michael Sevakisa8b388f2007-11-27 01:20:26 +0000498 /* Signal intent to sleep */
499 core_semaphores[core].intend_sleep = 1;
500
501 /* Something waking or other processor intends to wake us? */
Michael Sevakis32a531b2008-01-19 13:27:47 +0000502 if (core_semaphores[core].stay_awake == 0)
Michael Sevakisa8b388f2007-11-27 01:20:26 +0000503 {
Jens Arnoldcea07eb2008-04-20 17:53:05 +0000504 sleep_core(core);
Michael Sevakisa8b388f2007-11-27 01:20:26 +0000505 }
506
507 /* Signal wake - clear wake flag */
508 core_semaphores[core].stay_awake = 0;
509 core_semaphores[core].intend_sleep = 0;
510
511 /* Wait for other processor to finish wake procedure */
512 while (core_semaphores[core].intend_wake != 0);
513
Michael Sevakis27cf6772008-03-25 02:34:12 +0000514 /* Enable IRQ */
Michael Sevakisa8b388f2007-11-27 01:20:26 +0000515#endif /* ASM/C selection */
Michael Sevakisaf395f42008-03-26 01:50:41 +0000516 enable_irq();
Michael Sevakisa8b388f2007-11-27 01:20:26 +0000517}
Michael Sevakis27cf6772008-03-25 02:34:12 +0000518#endif /* NUM_CORES */
519#endif /* PP CPU type */
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000520
521/*---------------------------------------------------------------------------
522 * Wake another processor core that is sleeping or prevent it from doing so
523 * if it was already destined. FIQ, IRQ should be disabled before calling.
524 *---------------------------------------------------------------------------
525 */
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000526#if NUM_CORES == 1
Michael Sevakisa8b388f2007-11-27 01:20:26 +0000527/* Shared single-core build debugging version */
528void core_wake(void)
529{
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000530 /* No wakey - core already wakey */
Michael Sevakisa8b388f2007-11-27 01:20:26 +0000531}
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000532#elif defined (CPU_PP502x)
Michael Sevakisa8b388f2007-11-27 01:20:26 +0000533void core_wake(unsigned int othercore)
534{
Michael Sevakisa4436142007-10-18 01:26:50 +0000535#if 1
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000536 /* avoid r0 since that contains othercore */
537 asm volatile (
Michael Sevakisa4436142007-10-18 01:26:50 +0000538 "mrs r3, cpsr \n" /* Disable IRQ */
539 "orr r1, r3, #0x80 \n"
540 "msr cpsr_c, r1 \n"
541 "mov r2, #0x11 \n" /* r2 = (0x11 << othercore) */
542 "mov r2, r2, lsl %[oc] \n" /* Signal intent to wake othercore */
543 "str r2, [%[mbx], #4] \n"
544 "1: \n" /* If it intends to sleep, let it first */
545 "ldr r1, [%[mbx], #0] \n" /* (MSG_MSG_STAT & (0x4 << othercore)) != 0 ? */
546 "eor r1, r1, #0xc \n"
547 "tst r1, r2, lsr #2 \n"
548 "ldr r1, [%[ctl], %[oc], lsl #2] \n" /* && (PROC_CTL(othercore) & PROC_SLEEP) == 0 ? */
549 "tsteq r1, #0x80000000 \n"
550 "beq 1b \n" /* Wait for sleep or wake */
551 "tst r1, #0x80000000 \n" /* If sleeping, wake it */
552 "movne r1, #0x0 \n"
553 "strne r1, [%[ctl], %[oc], lsl #2] \n"
554 "mov r1, r2, lsr #4 \n"
555 "str r1, [%[mbx], #8] \n" /* Done with wake procedure */
Michael Sevakis27cf6772008-03-25 02:34:12 +0000556 "msr cpsr_c, r3 \n" /* Restore IRQ */
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000557 :
Michael Sevakisa8b388f2007-11-27 01:20:26 +0000558 : [ctl]"r"(&PROC_CTL(CPU)), [mbx]"r"(MBX_BASE),
559 [oc]"r"(othercore)
Michael Sevakisa4436142007-10-18 01:26:50 +0000560 : "r1", "r2", "r3");
561#else /* C version for reference */
562 /* Disable interrupts - avoid reentrancy from the tick */
Michael Sevakisaf395f42008-03-26 01:50:41 +0000563 int oldlevel = disable_irq_save();
Michael Sevakisa4436142007-10-18 01:26:50 +0000564
565 /* Signal intent to wake other processor - set stay awake */
566 MBX_MSG_SET = 0x11 << othercore;
567
568 /* If it intends to sleep, wait until it does or aborts */
569 while ((MBX_MSG_STAT & (0x4 << othercore)) != 0 &&
570 (PROC_CTL(othercore) & PROC_SLEEP) == 0);
571
572 /* If sleeping, wake it up */
573 if (PROC_CTL(othercore) & PROC_SLEEP)
Michael Sevakisa4436142007-10-18 01:26:50 +0000574 PROC_CTL(othercore) = 0;
Michael Sevakisa4436142007-10-18 01:26:50 +0000575
576 /* Done with wake procedure */
577 MBX_MSG_CLR = 0x1 << othercore;
Michael Sevakisaf395f42008-03-26 01:50:41 +0000578 restore_irq(oldlevel);
Michael Sevakisa4436142007-10-18 01:26:50 +0000579#endif /* ASM/C selection */
Michael Sevakisbfb281f2007-05-12 05:20:04 +0000580}
Michael Sevakisa8b388f2007-11-27 01:20:26 +0000581#elif CONFIG_CPU == PP5002
582/* PP5002 has no mailboxes - emulate using bytes */
583void core_wake(unsigned int othercore)
584{
585#if 1
586 /* avoid r0 since that contains othercore */
587 asm volatile (
588 "mrs r3, cpsr \n" /* Disable IRQ */
589 "orr r1, r3, #0x80 \n"
590 "msr cpsr_c, r1 \n"
591 "mov r1, #1 \n" /* Signal intent to wake other core */
592 "orr r1, r1, r1, lsl #8 \n" /* and set stay_awake */
593 "strh r1, [%[sem], #0] \n"
594 "mov r2, #0x8000 \n"
595 "1: \n" /* If it intends to sleep, let it first */
596 "ldrb r1, [%[sem], #2] \n" /* intend_sleep != 0 ? */
597 "cmp r1, #1 \n"
598 "ldr r1, [%[st]] \n" /* && not sleeping ? */
599 "tsteq r1, r2, lsr %[oc] \n"
600 "beq 1b \n" /* Wait for sleep or wake */
601 "tst r1, r2, lsr %[oc] \n"
602 "ldrne r2, =0xcf004054 \n" /* If sleeping, wake it */
603 "movne r1, #0xce \n"
Jens Arnoldcb57bf82008-03-07 23:44:46 +0000604 "strne r1, [r2, %[oc], lsl #2] \n"
Michael Sevakisa8b388f2007-11-27 01:20:26 +0000605 "mov r1, #0 \n" /* Done with wake procedure */
606 "strb r1, [%[sem], #0] \n"
Michael Sevakis27cf6772008-03-25 02:34:12 +0000607 "msr cpsr_c, r3 \n" /* Restore IRQ */
Michael Sevakisa8b388f2007-11-27 01:20:26 +0000608 :
609 : [sem]"r"(&core_semaphores[othercore]),
610 [st]"r"(&PROC_STAT),
611 [oc]"r"(othercore)
612 : "r1", "r2", "r3"
613 );
614#else /* C version for reference */
615 /* Disable interrupts - avoid reentrancy from the tick */
Michael Sevakisaf395f42008-03-26 01:50:41 +0000616 int oldlevel = disable_irq_save();
Michael Sevakisa8b388f2007-11-27 01:20:26 +0000617
618 /* Signal intent to wake other processor - set stay awake */
619 core_semaphores[othercore].intend_wake = 1;
620 core_semaphores[othercore].stay_awake = 1;
621
622 /* If it intends to sleep, wait until it does or aborts */
623 while (core_semaphores[othercore].intend_sleep != 0 &&
624 (PROC_STAT & PROC_SLEEPING(othercore)) == 0);
625
626 /* If sleeping, wake it up */
627 if (PROC_STAT & PROC_SLEEPING(othercore))
Jens Arnoldcea07eb2008-04-20 17:53:05 +0000628 wake_core(othercore);
Michael Sevakisa8b388f2007-11-27 01:20:26 +0000629
630 /* Done with wake procedure */
631 core_semaphores[othercore].intend_wake = 0;
Michael Sevakisaf395f42008-03-26 01:50:41 +0000632 restore_irq(oldlevel);
Michael Sevakisa8b388f2007-11-27 01:20:26 +0000633#endif /* ASM/C selection */
634}
635#endif /* CPU type */
Michael Sevakis7914e902007-09-28 10:20:02 +0000636
637#if NUM_CORES > 1
638/*---------------------------------------------------------------------------
639 * Switches to a stack that always resides in the Rockbox core.
640 *
641 * Needed when a thread suicides on a core other than the main CPU since the
642 * stack used when idling is the stack of the last thread to run. This stack
Michael Sevakis27cf6772008-03-25 02:34:12 +0000643 * may not reside in the core firmware in which case the core will continue
644 * to use a stack from an unloaded module until another thread runs on it.
Michael Sevakis7914e902007-09-28 10:20:02 +0000645 *---------------------------------------------------------------------------
646 */
647static inline void switch_to_idle_stack(const unsigned int core)
648{
649 asm volatile (
650 "str sp, [%0] \n" /* save original stack pointer on idle stack */
651 "mov sp, %0 \n" /* switch stacks */
652 : : "r"(&idle_stacks[core][IDLE_STACK_WORDS-1]));
Michael Sevakisa13a1d52007-09-28 10:54:27 +0000653 (void)core;
Michael Sevakis7914e902007-09-28 10:20:02 +0000654}
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000655
656/*---------------------------------------------------------------------------
657 * Perform core switch steps that need to take place inside switch_thread.
658 *
659 * These steps must take place while before changing the processor and after
660 * having entered switch_thread since switch_thread may not do a normal return
661 * because the stack being used for anything the compiler saved will not belong
662 * to the thread's destination core and it may have been recycled for other
663 * purposes by the time a normal context load has taken place. switch_thread
664 * will also clobber anything stashed in the thread's context or stored in the
665 * nonvolatile registers if it is saved there before the call since the
666 * compiler's order of operations cannot be known for certain.
667 */
668static void core_switch_blk_op(unsigned int core, struct thread_entry *thread)
669{
670 /* Flush our data to ram */
671 flush_icache();
672 /* Stash thread in r4 slot */
Michael Sevakis27cf6772008-03-25 02:34:12 +0000673 thread->context.r[0] = (uint32_t)thread;
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000674 /* Stash restart address in r5 slot */
Michael Sevakis27cf6772008-03-25 02:34:12 +0000675 thread->context.r[1] = thread->context.start;
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000676 /* Save sp in context.sp while still running on old core */
Michael Sevakis27cf6772008-03-25 02:34:12 +0000677 thread->context.sp = idle_stacks[core][IDLE_STACK_WORDS-1];
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000678}
679
680/*---------------------------------------------------------------------------
681 * Machine-specific helper function for switching the processor a thread is
682 * running on. Basically, the thread suicides on the departing core and is
683 * reborn on the destination. Were it not for gcc's ill-behavior regarding
684 * naked functions written in C where it actually clobbers non-volatile
685 * registers before the intended prologue code, this would all be much
686 * simpler. Generic setup is done in switch_core itself.
687 */
688
689/*---------------------------------------------------------------------------
690 * This actually performs the core switch.
691 */
Michael Sevakis27cf6772008-03-25 02:34:12 +0000692static void __attribute__((naked))
693 switch_thread_core(unsigned int core, struct thread_entry *thread)
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000694{
695 /* Pure asm for this because compiler behavior isn't sufficiently predictable.
696 * Stack access also isn't permitted until restoring the original stack and
697 * context. */
698 asm volatile (
699 "stmfd sp!, { r4-r12, lr } \n" /* Stack all non-volatile context on current core */
700 "ldr r2, =idle_stacks \n" /* r2 = &idle_stacks[core][IDLE_STACK_WORDS] */
701 "ldr r2, [r2, r0, lsl #2] \n"
702 "add r2, r2, %0*4 \n"
703 "stmfd r2!, { sp } \n" /* save original stack pointer on idle stack */
704 "mov sp, r2 \n" /* switch stacks */
705 "adr r2, 1f \n" /* r2 = new core restart address */
706 "str r2, [r1, #40] \n" /* thread->context.start = r2 */
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000707 "ldr pc, =switch_thread \n" /* r0 = thread after call - see load_context */
708 "1: \n"
709 "ldr sp, [r0, #32] \n" /* Reload original sp from context structure */
710 "mov r1, #0 \n" /* Clear start address */
711 "str r1, [r0, #40] \n"
712 "ldr r0, =invalidate_icache \n" /* Invalidate new core's cache */
713 "mov lr, pc \n"
714 "bx r0 \n"
715 "ldmfd sp!, { r4-r12, pc } \n" /* Restore non-volatile context to new core and return */
716 ".ltorg \n" /* Dump constant pool */
717 : : "i"(IDLE_STACK_WORDS)
718 );
719 (void)core; (void)thread;
720}
Michael Sevakis9ba80c92008-04-06 22:08:36 +0000721
722/*---------------------------------------------------------------------------
723 * Do any device-specific inits for the threads and synchronize the kernel
724 * initializations.
725 *---------------------------------------------------------------------------
726 */
727static void core_thread_init(unsigned int core)
728{
729 if (core == CPU)
730 {
731 /* Wake up coprocessor and let it initialize kernel and threads */
732#ifdef CPU_PP502x
733 MBX_MSG_CLR = 0x3f;
734#endif
Jens Arnoldcea07eb2008-04-20 17:53:05 +0000735 wake_core(COP);
Michael Sevakis9ba80c92008-04-06 22:08:36 +0000736 /* Sleep until COP has finished */
Jens Arnoldcea07eb2008-04-20 17:53:05 +0000737 sleep_core(CPU);
Michael Sevakis9ba80c92008-04-06 22:08:36 +0000738 }
739 else
740 {
741 /* Wake the CPU and return */
Jens Arnoldcea07eb2008-04-20 17:53:05 +0000742 wake_core(CPU);
Michael Sevakis9ba80c92008-04-06 22:08:36 +0000743 }
744}
Michael Sevakis7914e902007-09-28 10:20:02 +0000745#endif /* NUM_CORES */
746
Michael Sevakisbfb281f2007-05-12 05:20:04 +0000747#elif CONFIG_CPU == S3C2440
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000748
749/*---------------------------------------------------------------------------
750 * Put core in a power-saving state if waking list wasn't repopulated.
751 *---------------------------------------------------------------------------
752 */
Michael Sevakis32a531b2008-01-19 13:27:47 +0000753static inline void core_sleep(void)
Michael Sevakisbfb281f2007-05-12 05:20:04 +0000754{
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000755 /* FIQ also changes the CLKCON register so FIQ must be disabled
756 when changing it here */
757 asm volatile (
Michael Sevakis27cf6772008-03-25 02:34:12 +0000758 "mrs r0, cpsr \n"
759 "orr r2, r0, #0x40 \n" /* Disable FIQ */
760 "bic r0, r0, #0x80 \n" /* Prepare IRQ enable */
761 "msr cpsr_c, r2 \n"
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000762 "mov r1, #0x4c000000 \n" /* CLKCON = 0x4c00000c */
763 "ldr r2, [r1, #0xc] \n" /* Set IDLE bit */
764 "orr r2, r2, #4 \n"
765 "str r2, [r1, #0xc] \n"
Michael Sevakis27cf6772008-03-25 02:34:12 +0000766 "msr cpsr_c, r0 \n" /* Enable IRQ, restore FIQ */
Michael Sevakis32a531b2008-01-19 13:27:47 +0000767 "mov r2, #0 \n" /* wait for IDLE */
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000768 "1: \n"
Michael Sevakis32a531b2008-01-19 13:27:47 +0000769 "add r2, r2, #1 \n"
770 "cmp r2, #10 \n"
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000771 "bne 1b \n"
Michael Sevakis32a531b2008-01-19 13:27:47 +0000772 "orr r2, r0, #0xc0 \n" /* Disable IRQ, FIQ */
773 "msr cpsr_c, r2 \n"
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000774 "ldr r2, [r1, #0xc] \n" /* Reset IDLE bit */
775 "bic r2, r2, #4 \n"
776 "str r2, [r1, #0xc] \n"
Michael Sevakis27cf6772008-03-25 02:34:12 +0000777 "msr cpsr_c, r0 \n" /* Enable IRQ, restore FIQ */
Michael Sevakis32a531b2008-01-19 13:27:47 +0000778 : : : "r0", "r1", "r2");
Michael Sevakisbfb281f2007-05-12 05:20:04 +0000779}
Dave Chapman28f6ae42007-10-28 11:08:10 +0000780#elif defined(CPU_TCC77X)
Michael Sevakis32a531b2008-01-19 13:27:47 +0000781static inline void core_sleep(void)
Dave Chapman28f6ae42007-10-28 11:08:10 +0000782{
783 #warning TODO: Implement core_sleep
Michael Sevakisaf395f42008-03-26 01:50:41 +0000784 enable_irq();
Dave Chapman28f6ae42007-10-28 11:08:10 +0000785}
Rob Purchase18369712008-03-22 13:18:47 +0000786#elif defined(CPU_TCC780X)
787static inline void core_sleep(void)
788{
789 /* Single core only for now. Use the generic ARMv5 wait for IRQ */
790 asm volatile (
791 "mov r0, #0 \n"
792 "mcr p15, 0, r0, c7, c0, 4 \n" /* Wait for interrupt */
Rob Purchase18369712008-03-22 13:18:47 +0000793 : : : "r0"
794 );
Michael Sevakisaf395f42008-03-26 01:50:41 +0000795 enable_irq();
Rob Purchase18369712008-03-22 13:18:47 +0000796}
Michael Sevakis1f021af2008-02-05 04:43:19 +0000797#elif CONFIG_CPU == IMX31L
798static inline void core_sleep(void)
799{
800 asm volatile (
801 "mov r0, #0 \n"
802 "mcr p15, 0, r0, c7, c0, 4 \n" /* Wait for interrupt */
Michael Sevakis1f021af2008-02-05 04:43:19 +0000803 : : : "r0"
804 );
Michael Sevakisaf395f42008-03-26 01:50:41 +0000805 enable_irq();
Michael Sevakis1f021af2008-02-05 04:43:19 +0000806}
Maurus Cuelenaere95167e02008-04-24 20:08:28 +0000807#elif CONFIG_CPU == DM320
808static inline void core_sleep(void)
809{
810 asm volatile (
811 "mov r0, #0 \n"
812 "mcr p15, 0, r0, c7, c0, 4 \n" /* Wait for interrupt */
813 : : : "r0"
814 );
815 enable_irq();
816}
Karl Kurbjun7b97fe22007-09-20 04:46:41 +0000817#else
Michael Sevakis32a531b2008-01-19 13:27:47 +0000818static inline void core_sleep(void)
Karl Kurbjun7b97fe22007-09-20 04:46:41 +0000819{
Michael Sevakis32a531b2008-01-19 13:27:47 +0000820 #warning core_sleep not implemented, battery life will be decreased
Michael Sevakisaf395f42008-03-26 01:50:41 +0000821 enable_irq();
Karl Kurbjun7b97fe22007-09-20 04:46:41 +0000822}
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000823#endif /* CONFIG_CPU == */
Michael Sevakisbfb281f2007-05-12 05:20:04 +0000824
Dave Chapman77372d12005-11-07 23:07:19 +0000825#elif defined(CPU_COLDFIRE)
Daniel Ankers0aec12f2006-08-21 17:35:35 +0000826/*---------------------------------------------------------------------------
Michael Sevakis7914e902007-09-28 10:20:02 +0000827 * Start the thread running and terminate it if it returns
828 *---------------------------------------------------------------------------
829 */
830void start_thread(void); /* Provide C access to ASM label */
Michael Sevakis27cf6772008-03-25 02:34:12 +0000831static void __attribute__((used)) __start_thread(void)
Michael Sevakis7914e902007-09-28 10:20:02 +0000832{
833 /* a0=macsr, a1=context */
834 asm volatile (
835 "start_thread: \n" /* Start here - no naked attribute */
836 "move.l %a0, %macsr \n" /* Set initial mac status reg */
837 "lea.l 48(%a1), %a1 \n"
838 "move.l (%a1)+, %sp \n" /* Set initial stack */
839 "move.l (%a1), %a2 \n" /* Fetch thread function pointer */
840 "clr.l (%a1) \n" /* Mark thread running */
841 "jsr (%a2) \n" /* Call thread function */
Michael Sevakis7914e902007-09-28 10:20:02 +0000842 );
Michael Sevakis27cf6772008-03-25 02:34:12 +0000843 thread_exit();
Michael Sevakis7914e902007-09-28 10:20:02 +0000844}
845
846/* Set EMAC unit to fractional mode with saturation for each new thread,
847 * since that's what'll be the most useful for most things which the dsp
848 * will do. Codecs should still initialize their preferred modes
849 * explicitly. Context pointer is placed in d2 slot and start_thread
850 * pointer in d3 slot. thread function pointer is placed in context.start.
851 * See load_context for what happens when thread is initially going to
852 * run.
853 */
854#define THREAD_STARTUP_INIT(core, thread, function) \
855 ({ (thread)->context.macsr = EMAC_FRACTIONAL | EMAC_SATURATE, \
Michael Sevakis27cf6772008-03-25 02:34:12 +0000856 (thread)->context.d[0] = (uint32_t)&(thread)->context, \
857 (thread)->context.d[1] = (uint32_t)start_thread, \
858 (thread)->context.start = (uint32_t)(function); })
Michael Sevakis7914e902007-09-28 10:20:02 +0000859
860/*---------------------------------------------------------------------------
Linus Nielsen Feltzing7b91ec62004-10-15 02:13:43 +0000861 * Store non-volatile context.
862 *---------------------------------------------------------------------------
863 */
864static inline void store_context(void* addr)
865{
Jens Arnolda4aa5082005-06-10 23:05:15 +0000866 asm volatile (
Michael Sevakis7914e902007-09-28 10:20:02 +0000867 "move.l %%macsr,%%d0 \n"
868 "movem.l %%d0/%%d2-%%d7/%%a2-%%a7,(%0) \n"
Jens Arnold904f7fd2005-09-01 20:06:38 +0000869 : : "a" (addr) : "d0" /* only! */
Jens Arnolda4aa5082005-06-10 23:05:15 +0000870 );
Linus Nielsen Feltzing7b91ec62004-10-15 02:13:43 +0000871}
872
Jens Arnold904f7fd2005-09-01 20:06:38 +0000873/*---------------------------------------------------------------------------
Linus Nielsen Feltzing7b91ec62004-10-15 02:13:43 +0000874 * Load non-volatile context.
875 *---------------------------------------------------------------------------
876 */
877static inline void load_context(const void* addr)
878{
Jens Arnolda4aa5082005-06-10 23:05:15 +0000879 asm volatile (
Michael Sevakis7914e902007-09-28 10:20:02 +0000880 "move.l 52(%0), %%d0 \n" /* Get start address */
881 "beq.b 1f \n" /* NULL -> already running */
882 "movem.l (%0), %%a0-%%a2 \n" /* a0=macsr, a1=context, a2=start_thread */
883 "jmp (%%a2) \n" /* Start the thread */
884 "1: \n"
885 "movem.l (%0), %%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */
886 "move.l %%d0, %%macsr \n"
Jens Arnolda4aa5082005-06-10 23:05:15 +0000887 : : "a" (addr) : "d0" /* only! */
888 );
Linus Nielsen Feltzing7b91ec62004-10-15 02:13:43 +0000889}
890
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000891/*---------------------------------------------------------------------------
892 * Put core in a power-saving state if waking list wasn't repopulated.
893 *---------------------------------------------------------------------------
894 */
Michael Sevakis32a531b2008-01-19 13:27:47 +0000895static inline void core_sleep(void)
Michael Sevakisbfb281f2007-05-12 05:20:04 +0000896{
Michael Sevakis32a531b2008-01-19 13:27:47 +0000897 /* Supervisor mode, interrupts enabled upon wakeup */
898 asm volatile ("stop #0x2000");
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000899};
Michael Sevakis2c9cbc12007-02-25 21:43:10 +0000900
Linus Nielsen Feltzing7b91ec62004-10-15 02:13:43 +0000901#elif CONFIG_CPU == SH7034
Jens Arnold904f7fd2005-09-01 20:06:38 +0000902/*---------------------------------------------------------------------------
Michael Sevakis7914e902007-09-28 10:20:02 +0000903 * Start the thread running and terminate it if it returns
904 *---------------------------------------------------------------------------
905 */
906void start_thread(void); /* Provide C access to ASM label */
Michael Sevakis27cf6772008-03-25 02:34:12 +0000907static void __attribute__((used)) __start_thread(void)
Michael Sevakis7914e902007-09-28 10:20:02 +0000908{
909 /* r8 = context */
910 asm volatile (
911 "_start_thread: \n" /* Start here - no naked attribute */
912 "mov.l @(4, r8), r0 \n" /* Fetch thread function pointer */
913 "mov.l @(28, r8), r15 \n" /* Set initial sp */
914 "mov #0, r1 \n" /* Start the thread */
915 "jsr @r0 \n"
916 "mov.l r1, @(36, r8) \n" /* Clear start address */
Michael Sevakis7914e902007-09-28 10:20:02 +0000917 );
Michael Sevakis27cf6772008-03-25 02:34:12 +0000918 thread_exit();
Michael Sevakis7914e902007-09-28 10:20:02 +0000919}
920
921/* Place context pointer in r8 slot, function pointer in r9 slot, and
922 * start_thread pointer in context_start */
923#define THREAD_STARTUP_INIT(core, thread, function) \
Michael Sevakis27cf6772008-03-25 02:34:12 +0000924 ({ (thread)->context.r[0] = (uint32_t)&(thread)->context, \
925 (thread)->context.r[1] = (uint32_t)(function), \
926 (thread)->context.start = (uint32_t)start_thread; })
Michael Sevakis7914e902007-09-28 10:20:02 +0000927
928/*---------------------------------------------------------------------------
Daniel Stenberg3c031c52002-04-22 12:07:34 +0000929 * Store non-volatile context.
930 *---------------------------------------------------------------------------
931 */
Björn Stenberg3f9c7c22002-06-25 12:04:23 +0000932static inline void store_context(void* addr)
Daniel Stenberg3c031c52002-04-22 12:07:34 +0000933{
Jens Arnolda4aa5082005-06-10 23:05:15 +0000934 asm volatile (
Michael Sevakis7914e902007-09-28 10:20:02 +0000935 "add #36, %0 \n" /* Start at last reg. By the time routine */
936 "sts.l pr, @-%0 \n" /* is done, %0 will have the original value */
Jens Arnolda4aa5082005-06-10 23:05:15 +0000937 "mov.l r15,@-%0 \n"
938 "mov.l r14,@-%0 \n"
939 "mov.l r13,@-%0 \n"
940 "mov.l r12,@-%0 \n"
941 "mov.l r11,@-%0 \n"
942 "mov.l r10,@-%0 \n"
943 "mov.l r9, @-%0 \n"
944 "mov.l r8, @-%0 \n"
945 : : "r" (addr)
946 );
Daniel Stenberg3c031c52002-04-22 12:07:34 +0000947}
948
Jens Arnolda4aa5082005-06-10 23:05:15 +0000949/*---------------------------------------------------------------------------
Daniel Stenberg3c031c52002-04-22 12:07:34 +0000950 * Load non-volatile context.
951 *---------------------------------------------------------------------------
952 */
Jens Arnoldc76c5682004-08-16 23:37:23 +0000953static inline void load_context(const void* addr)
Daniel Stenberg3c031c52002-04-22 12:07:34 +0000954{
Jens Arnolda4aa5082005-06-10 23:05:15 +0000955 asm volatile (
Michael Sevakis7914e902007-09-28 10:20:02 +0000956 "mov.l @(36, %0), r0 \n" /* Get start address */
957 "tst r0, r0 \n"
958 "bt .running \n" /* NULL -> already running */
959 "jmp @r0 \n" /* r8 = context */
960 ".running: \n"
961 "mov.l @%0+, r8 \n" /* Executes in delay slot and outside it */
962 "mov.l @%0+, r9 \n"
963 "mov.l @%0+, r10 \n"
964 "mov.l @%0+, r11 \n"
965 "mov.l @%0+, r12 \n"
966 "mov.l @%0+, r13 \n"
967 "mov.l @%0+, r14 \n"
968 "mov.l @%0+, r15 \n"
969 "lds.l @%0+, pr \n"
Jens Arnolda4aa5082005-06-10 23:05:15 +0000970 : : "r" (addr) : "r0" /* only! */
971 );
Daniel Stenberg3c031c52002-04-22 12:07:34 +0000972}
Jens Arnolda4aa5082005-06-10 23:05:15 +0000973
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000974/*---------------------------------------------------------------------------
Michael Sevakis27cf6772008-03-25 02:34:12 +0000975 * Put core in a power-saving state.
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000976 *---------------------------------------------------------------------------
977 */
Michael Sevakis32a531b2008-01-19 13:27:47 +0000978static inline void core_sleep(void)
Michael Sevakisbfb281f2007-05-12 05:20:04 +0000979{
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000980 asm volatile (
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000981 "and.b #0x7f, @(r0, gbr) \n" /* Clear SBY (bit 7) in SBYCR */
982 "mov #0, r1 \n" /* Enable interrupts */
983 "ldc r1, sr \n" /* Following instruction cannot be interrupted */
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000984 "sleep \n" /* Execute standby */
Michael Sevakis32a531b2008-01-19 13:27:47 +0000985 : : "z"(&SBYCR-GBR) : "r1");
Michael Sevakisbfb281f2007-05-12 05:20:04 +0000986}
987
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000988#endif /* CONFIG_CPU == */
Daniel Stenberg3c031c52002-04-22 12:07:34 +0000989
Michael Sevakisa9b2fb52007-10-16 01:25:17 +0000990/*
991 * End Processor-specific section
992 ***************************************************************************/
Michael Sevakis2c9cbc12007-02-25 21:43:10 +0000993
Michael Sevakis7914e902007-09-28 10:20:02 +0000994#if THREAD_EXTRA_CHECKS
995static void thread_panicf(const char *msg, struct thread_entry *thread)
Michael Sevakisbfb281f2007-05-12 05:20:04 +0000996{
Michael Sevakis27cf6772008-03-25 02:34:12 +0000997 IF_COP( const unsigned int core = thread->core; )
Michael Sevakis7914e902007-09-28 10:20:02 +0000998 static char name[32];
999 thread_get_name(name, 32, thread);
1000 panicf ("%s %s" IF_COP(" (%d)"), msg, name IF_COP(, core));
Michael Sevakisbfb281f2007-05-12 05:20:04 +00001001}
Michael Sevakis7914e902007-09-28 10:20:02 +00001002static void thread_stkov(struct thread_entry *thread)
Michael Sevakisbfb281f2007-05-12 05:20:04 +00001003{
Michael Sevakis7914e902007-09-28 10:20:02 +00001004 thread_panicf("Stkov", thread);
Michael Sevakisbfb281f2007-05-12 05:20:04 +00001005}
Michael Sevakis7914e902007-09-28 10:20:02 +00001006#define THREAD_PANICF(msg, thread) \
1007 thread_panicf(msg, thread)
1008#define THREAD_ASSERT(exp, msg, thread) \
1009 ({ if (!({ exp; })) thread_panicf((msg), (thread)); })
Michael Sevakisbfb281f2007-05-12 05:20:04 +00001010#else
Michael Sevakis7914e902007-09-28 10:20:02 +00001011static void thread_stkov(struct thread_entry *thread)
Michael Sevakisbfb281f2007-05-12 05:20:04 +00001012{
Michael Sevakis27cf6772008-03-25 02:34:12 +00001013 IF_COP( const unsigned int core = thread->core; )
Michael Sevakis7914e902007-09-28 10:20:02 +00001014 static char name[32];
1015 thread_get_name(name, 32, thread);
1016 panicf("Stkov %s" IF_COP(" (%d)"), name IF_COP(, core));
Michael Sevakisbfb281f2007-05-12 05:20:04 +00001017}
Michael Sevakis7914e902007-09-28 10:20:02 +00001018#define THREAD_PANICF(msg, thread)
1019#define THREAD_ASSERT(exp, msg, thread)
Michael Sevakisbfb281f2007-05-12 05:20:04 +00001020#endif /* THREAD_EXTRA_CHECKS */
1021
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001022/* Thread locking */
Michael Sevakis27cf6772008-03-25 02:34:12 +00001023#if NUM_CORES > 1
1024#define LOCK_THREAD(thread) \
1025 ({ corelock_lock(&(thread)->slot_cl); })
1026#define TRY_LOCK_THREAD(thread) \
1027 ({ corelock_try_lock(&thread->slot_cl); })
1028#define UNLOCK_THREAD(thread) \
1029 ({ corelock_unlock(&(thread)->slot_cl); })
1030#define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
1031 ({ unsigned int _core = (thread)->core; \
1032 cores[_core].blk_ops.flags |= TBOP_UNLOCK_CORELOCK; \
1033 cores[_core].blk_ops.cl_p = &(thread)->slot_cl; })
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001034#else
Michael Sevakis27cf6772008-03-25 02:34:12 +00001035#define LOCK_THREAD(thread) \
1036 ({ })
1037#define TRY_LOCK_THREAD(thread) \
1038 ({ })
1039#define UNLOCK_THREAD(thread) \
1040 ({ })
1041#define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
1042 ({ })
1043#endif
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001044
Michael Sevakis27cf6772008-03-25 02:34:12 +00001045/* RTR list */
1046#define RTR_LOCK(core) \
1047 ({ corelock_lock(&cores[core].rtr_cl); })
1048#define RTR_UNLOCK(core) \
1049 ({ corelock_unlock(&cores[core].rtr_cl); })
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001050
Michael Sevakis27cf6772008-03-25 02:34:12 +00001051#ifdef HAVE_PRIORITY_SCHEDULING
1052#define rtr_add_entry(core, priority) \
1053 prio_add_entry(&cores[core].rtr, (priority))
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001054
Michael Sevakis27cf6772008-03-25 02:34:12 +00001055#define rtr_subtract_entry(core, priority) \
1056 prio_subtract_entry(&cores[core].rtr, (priority))
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001057
Michael Sevakis27cf6772008-03-25 02:34:12 +00001058#define rtr_move_entry(core, from, to) \
1059 prio_move_entry(&cores[core].rtr, (from), (to))
1060#else
1061#define rtr_add_entry(core, priority)
1062#define rtr_add_entry_inl(core, priority)
1063#define rtr_subtract_entry(core, priority)
1064#define rtr_subtract_entry_inl(core, priotity)
1065#define rtr_move_entry(core, from, to)
1066#define rtr_move_entry_inl(core, from, to)
1067#endif
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001068
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001069/*---------------------------------------------------------------------------
Michael Sevakis27cf6772008-03-25 02:34:12 +00001070 * Thread list structure - circular:
1071 * +------------------------------+
1072 * | |
1073 * +--+---+<-+---+<-+---+<-+---+<-+
1074 * Head->| T | | T | | T | | T |
1075 * +->+---+->+---+->+---+->+---+--+
1076 * | |
1077 * +------------------------------+
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001078 *---------------------------------------------------------------------------
1079 */
Michael Sevakis165f62d2007-03-26 03:24:36 +00001080
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001081/*---------------------------------------------------------------------------
Michael Sevakis27cf6772008-03-25 02:34:12 +00001082 * Adds a thread to a list of threads using "insert last". Uses the "l"
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001083 * links.
1084 *---------------------------------------------------------------------------
1085 */
1086static void add_to_list_l(struct thread_entry **list,
1087 struct thread_entry *thread)
Miika Pekkarinena85044b2006-09-16 16:18:11 +00001088{
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001089 struct thread_entry *l = *list;
Michael Sevakisa690ebb2007-07-30 16:44:36 +00001090
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001091 if (l == NULL)
Miika Pekkarinena85044b2006-09-16 16:18:11 +00001092 {
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001093 /* Insert into unoccupied list */
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001094 thread->l.prev = thread;
Michael Sevakis27cf6772008-03-25 02:34:12 +00001095 thread->l.next = thread;
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001096 *list = thread;
1097 return;
Björn Stenbergc4d8d972003-02-14 09:44:34 +00001098 }
Daniel Ankers0aec12f2006-08-21 17:35:35 +00001099
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001100 /* Insert last */
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001101 thread->l.prev = l->l.prev;
Michael Sevakis27cf6772008-03-25 02:34:12 +00001102 thread->l.next = l;
1103 l->l.prev->l.next = thread;
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001104 l->l.prev = thread;
Daniel Stenberg3c031c52002-04-22 12:07:34 +00001105}
1106
Miika Pekkarinena85044b2006-09-16 16:18:11 +00001107/*---------------------------------------------------------------------------
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001108 * Removes a thread from a list of threads. Uses the "l" links.
1109 *---------------------------------------------------------------------------
1110 */
1111static void remove_from_list_l(struct thread_entry **list,
1112 struct thread_entry *thread)
1113{
1114 struct thread_entry *prev, *next;
1115
1116 next = thread->l.next;
1117
1118 if (thread == next)
1119 {
1120 /* The only item */
1121 *list = NULL;
1122 return;
1123 }
1124
1125 if (thread == *list)
1126 {
1127 /* List becomes next item */
1128 *list = next;
1129 }
1130
1131 prev = thread->l.prev;
1132
1133 /* Fix links to jump over the removed entry. */
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001134 next->l.prev = prev;
Michael Sevakis27cf6772008-03-25 02:34:12 +00001135 prev->l.next = next;
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001136}
1137
1138/*---------------------------------------------------------------------------
Michael Sevakis27cf6772008-03-25 02:34:12 +00001139 * Timeout list structure - circular reverse (to make "remove item" O(1)),
1140 * NULL-terminated forward (to ease the far more common forward traversal):
1141 * +------------------------------+
1142 * | |
1143 * +--+---+<-+---+<-+---+<-+---+<-+
1144 * Head->| T | | T | | T | | T |
1145 * +---+->+---+->+---+->+---+-X
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001146 *---------------------------------------------------------------------------
1147 */
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001148
1149/*---------------------------------------------------------------------------
1150 * Add a thread from the core's timout list by linking the pointers in its
1151 * tmo structure.
1152 *---------------------------------------------------------------------------
1153 */
1154static void add_to_list_tmo(struct thread_entry *thread)
1155{
Michael Sevakis27cf6772008-03-25 02:34:12 +00001156 struct thread_entry *tmo = cores[IF_COP_CORE(thread->core)].timeout;
1157 THREAD_ASSERT(thread->tmo.prev == NULL,
1158 "add_to_list_tmo->already listed", thread);
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001159
Michael Sevakis27cf6772008-03-25 02:34:12 +00001160 thread->tmo.next = NULL;
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001161
Michael Sevakis27cf6772008-03-25 02:34:12 +00001162 if (tmo == NULL)
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001163 {
Michael Sevakis27cf6772008-03-25 02:34:12 +00001164 /* Insert into unoccupied list */
1165 thread->tmo.prev = thread;
1166 cores[IF_COP_CORE(thread->core)].timeout = thread;
1167 return;
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001168 }
1169
Michael Sevakis27cf6772008-03-25 02:34:12 +00001170 /* Insert Last */
1171 thread->tmo.prev = tmo->tmo.prev;
1172 tmo->tmo.prev->tmo.next = thread;
1173 tmo->tmo.prev = thread;
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001174}
1175
1176/*---------------------------------------------------------------------------
1177 * Remove a thread from the core's timout list by unlinking the pointers in
1178 * its tmo structure. Sets thread->tmo.prev to NULL to indicate the timeout
1179 * is cancelled.
1180 *---------------------------------------------------------------------------
1181 */
1182static void remove_from_list_tmo(struct thread_entry *thread)
1183{
Michael Sevakis27cf6772008-03-25 02:34:12 +00001184 struct thread_entry **list = &cores[IF_COP_CORE(thread->core)].timeout;
1185 struct thread_entry *prev = thread->tmo.prev;
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001186 struct thread_entry *next = thread->tmo.next;
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001187
Michael Sevakis27cf6772008-03-25 02:34:12 +00001188 THREAD_ASSERT(prev != NULL, "remove_from_list_tmo->not listed", thread);
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001189
1190 if (next != NULL)
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001191 next->tmo.prev = prev;
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001192
Michael Sevakis27cf6772008-03-25 02:34:12 +00001193 if (thread == *list)
1194 {
1195 /* List becomes next item and empty if next == NULL */
1196 *list = next;
1197 /* Mark as unlisted */
1198 thread->tmo.prev = NULL;
1199 }
1200 else
1201 {
1202 if (next == NULL)
1203 (*list)->tmo.prev = prev;
1204 prev->tmo.next = next;
1205 /* Mark as unlisted */
1206 thread->tmo.prev = NULL;
1207 }
1208}
1209
1210
1211#ifdef HAVE_PRIORITY_SCHEDULING
1212/*---------------------------------------------------------------------------
1213 * Priority distribution structure (one category for each possible priority):
1214 *
1215 * +----+----+----+ ... +-----+
1216 * hist: | F0 | F1 | F2 | | F31 |
1217 * +----+----+----+ ... +-----+
1218 * mask: | b0 | b1 | b2 | | b31 |
1219 * +----+----+----+ ... +-----+
1220 *
1221 * F = count of threads at priority category n (frequency)
1222 * b = bitmask of non-zero priority categories (occupancy)
1223 *
1224 * / if H[n] != 0 : 1
1225 * b[n] = |
1226 * \ else : 0
1227 *
1228 *---------------------------------------------------------------------------
1229 * Basic priority inheritance priotocol (PIP):
1230 *
1231 * Mn = mutex n, Tn = thread n
1232 *
1233 * A lower priority thread inherits the priority of the highest priority
1234 * thread blocked waiting for it to complete an action (such as release a
1235 * mutex or respond to a message via queue_send):
1236 *
1237 * 1) T2->M1->T1
1238 *
1239 * T1 owns M1, T2 is waiting for M1 to realease M1. If T2 has a higher
1240 * priority than T1 then T1 inherits the priority of T2.
1241 *
1242 * 2) T3
1243 * \/
1244 * T2->M1->T1
1245 *
1246 * Situation is like 1) but T2 and T3 are both queued waiting for M1 and so
1247 * T1 inherits the higher of T2 and T3.
1248 *
1249 * 3) T3->M2->T2->M1->T1
1250 *
1251 * T1 owns M1, T2 owns M2. If T3 has a higher priority than both T1 and T2,
1252 * then T1 inherits the priority of T3 through T2.
1253 *
1254 * Blocking chains can grow arbitrarily complex (though it's best that they
1255 * not form at all very often :) and build-up from these units.
1256 *---------------------------------------------------------------------------
1257 */
1258
1259/*---------------------------------------------------------------------------
1260 * Increment frequency at category "priority"
1261 *---------------------------------------------------------------------------
1262 */
1263static inline unsigned int prio_add_entry(
1264 struct priority_distribution *pd, int priority)
1265{
1266 unsigned int count;
1267 /* Enough size/instruction count difference for ARM makes it worth it to
1268 * use different code (192 bytes for ARM). Only thing better is ASM. */
1269#ifdef CPU_ARM
1270 count = pd->hist[priority];
1271 if (++count == 1)
1272 pd->mask |= 1 << priority;
1273 pd->hist[priority] = count;
1274#else /* This one's better for Coldfire */
1275 if ((count = ++pd->hist[priority]) == 1)
1276 pd->mask |= 1 << priority;
1277#endif
1278
1279 return count;
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001280}
1281
1282/*---------------------------------------------------------------------------
Michael Sevakis27cf6772008-03-25 02:34:12 +00001283 * Decrement frequency at category "priority"
1284 *---------------------------------------------------------------------------
1285 */
1286static inline unsigned int prio_subtract_entry(
1287 struct priority_distribution *pd, int priority)
1288{
1289 unsigned int count;
1290
1291#ifdef CPU_ARM
1292 count = pd->hist[priority];
1293 if (--count == 0)
1294 pd->mask &= ~(1 << priority);
1295 pd->hist[priority] = count;
1296#else
1297 if ((count = --pd->hist[priority]) == 0)
1298 pd->mask &= ~(1 << priority);
1299#endif
1300
1301 return count;
1302}
1303
1304/*---------------------------------------------------------------------------
1305 * Remove from one category and add to another
1306 *---------------------------------------------------------------------------
1307 */
1308static inline void prio_move_entry(
1309 struct priority_distribution *pd, int from, int to)
1310{
1311 uint32_t mask = pd->mask;
1312
1313#ifdef CPU_ARM
1314 unsigned int count;
1315
1316 count = pd->hist[from];
1317 if (--count == 0)
1318 mask &= ~(1 << from);
1319 pd->hist[from] = count;
1320
1321 count = pd->hist[to];
1322 if (++count == 1)
1323 mask |= 1 << to;
1324 pd->hist[to] = count;
1325#else
1326 if (--pd->hist[from] == 0)
1327 mask &= ~(1 << from);
1328
1329 if (++pd->hist[to] == 1)
1330 mask |= 1 << to;
1331#endif
1332
1333 pd->mask = mask;
1334}
1335
1336/*---------------------------------------------------------------------------
1337 * Change the priority and rtr entry for a running thread
1338 *---------------------------------------------------------------------------
1339 */
1340static inline void set_running_thread_priority(
1341 struct thread_entry *thread, int priority)
1342{
1343 const unsigned int core = IF_COP_CORE(thread->core);
1344 RTR_LOCK(core);
1345 rtr_move_entry(core, thread->priority, priority);
1346 thread->priority = priority;
1347 RTR_UNLOCK(core);
1348}
1349
1350/*---------------------------------------------------------------------------
1351 * Finds the highest priority thread in a list of threads. If the list is
1352 * empty, the PRIORITY_IDLE is returned.
1353 *
1354 * It is possible to use the struct priority_distribution within an object
1355 * instead of scanning the remaining threads in the list but as a compromise,
1356 * the resulting per-object memory overhead is saved at a slight speed
1357 * penalty under high contention.
1358 *---------------------------------------------------------------------------
1359 */
1360static int find_highest_priority_in_list_l(
1361 struct thread_entry * const thread)
1362{
1363 if (thread != NULL)
1364 {
1365 /* Go though list until the ending up at the initial thread */
1366 int highest_priority = thread->priority;
1367 struct thread_entry *curr = thread;
1368
1369 do
1370 {
1371 int priority = curr->priority;
1372
1373 if (priority < highest_priority)
1374 highest_priority = priority;
1375
1376 curr = curr->l.next;
1377 }
1378 while (curr != thread);
1379
1380 return highest_priority;
1381 }
1382
1383 return PRIORITY_IDLE;
1384}
1385
1386/*---------------------------------------------------------------------------
1387 * Register priority with blocking system and bubble it down the chain if
1388 * any until we reach the end or something is already equal or higher.
1389 *
1390 * NOTE: A simultaneous circular wait could spin deadlock on multiprocessor
1391 * targets but that same action also guarantees a circular block anyway and
1392 * those are prevented, right? :-)
1393 *---------------------------------------------------------------------------
1394 */
1395static struct thread_entry *
1396 blocker_inherit_priority(struct thread_entry *current)
1397{
1398 const int priority = current->priority;
1399 struct blocker *bl = current->blocker;
1400 struct thread_entry * const tstart = current;
1401 struct thread_entry *bl_t = bl->thread;
1402
1403 /* Blocker cannot change since the object protection is held */
1404 LOCK_THREAD(bl_t);
1405
1406 for (;;)
1407 {
1408 struct thread_entry *next;
1409 int bl_pr = bl->priority;
1410
1411 if (priority >= bl_pr)
1412 break; /* Object priority already high enough */
1413
1414 bl->priority = priority;
1415
1416 /* Add this one */
1417 prio_add_entry(&bl_t->pdist, priority);
1418
1419 if (bl_pr < PRIORITY_IDLE)
1420 {
1421 /* Not first waiter - subtract old one */
1422 prio_subtract_entry(&bl_t->pdist, bl_pr);
1423 }
1424
1425 if (priority >= bl_t->priority)
1426 break; /* Thread priority high enough */
1427
1428 if (bl_t->state == STATE_RUNNING)
1429 {
1430 /* Blocking thread is a running thread therefore there are no
1431 * further blockers. Change the "run queue" on which it
1432 * resides. */
1433 set_running_thread_priority(bl_t, priority);
1434 break;
1435 }
1436
1437 bl_t->priority = priority;
1438
1439 /* If blocking thread has a blocker, apply transitive inheritance */
1440 bl = bl_t->blocker;
1441
1442 if (bl == NULL)
1443 break; /* End of chain or object doesn't support inheritance */
1444
1445 next = bl->thread;
1446
1447 if (next == tstart)
1448 break; /* Full-circle - deadlock! */
1449
1450 UNLOCK_THREAD(current);
1451
1452#if NUM_CORES > 1
1453 for (;;)
1454 {
1455 LOCK_THREAD(next);
1456
1457 /* Blocker could change - retest condition */
1458 if (bl->thread == next)
1459 break;
1460
1461 UNLOCK_THREAD(next);
1462 next = bl->thread;
1463 }
1464#endif
1465 current = bl_t;
1466 bl_t = next;
1467 }
1468
1469 UNLOCK_THREAD(bl_t);
1470
1471 return current;
1472}
1473
1474/*---------------------------------------------------------------------------
1475 * Readjust priorities when waking a thread blocked waiting for another
1476 * in essence "releasing" the thread's effect on the object owner. Can be
1477 * performed from any context.
1478 *---------------------------------------------------------------------------
1479 */
1480struct thread_entry *
1481 wakeup_priority_protocol_release(struct thread_entry *thread)
1482{
1483 const int priority = thread->priority;
1484 struct blocker *bl = thread->blocker;
1485 struct thread_entry * const tstart = thread;
1486 struct thread_entry *bl_t = bl->thread;
1487
1488 /* Blocker cannot change since object will be locked */
1489 LOCK_THREAD(bl_t);
1490
1491 thread->blocker = NULL; /* Thread not blocked */
1492
1493 for (;;)
1494 {
1495 struct thread_entry *next;
1496 int bl_pr = bl->priority;
1497
1498 if (priority > bl_pr)
1499 break; /* Object priority higher */
1500
1501 next = *thread->bqp;
1502
1503 if (next == NULL)
1504 {
1505 /* No more threads in queue */
1506 prio_subtract_entry(&bl_t->pdist, bl_pr);
1507 bl->priority = PRIORITY_IDLE;
1508 }
1509 else
1510 {
1511 /* Check list for highest remaining priority */
1512 int queue_pr = find_highest_priority_in_list_l(next);
1513
1514 if (queue_pr == bl_pr)
1515 break; /* Object priority not changing */
1516
1517 /* Change queue priority */
1518 prio_move_entry(&bl_t->pdist, bl_pr, queue_pr);
1519 bl->priority = queue_pr;
1520 }
1521
1522 if (bl_pr > bl_t->priority)
1523 break; /* thread priority is higher */
1524
1525 bl_pr = find_first_set_bit(bl_t->pdist.mask);
1526
1527 if (bl_pr == bl_t->priority)
1528 break; /* Thread priority not changing */
1529
1530 if (bl_t->state == STATE_RUNNING)
1531 {
1532 /* No further blockers */
1533 set_running_thread_priority(bl_t, bl_pr);
1534 break;
1535 }
1536
1537 bl_t->priority = bl_pr;
1538
1539 /* If blocking thread has a blocker, apply transitive inheritance */
1540 bl = bl_t->blocker;
1541
1542 if (bl == NULL)
1543 break; /* End of chain or object doesn't support inheritance */
1544
1545 next = bl->thread;
1546
1547 if (next == tstart)
1548 break; /* Full-circle - deadlock! */
1549
1550 UNLOCK_THREAD(thread);
1551
1552#if NUM_CORES > 1
1553 for (;;)
1554 {
1555 LOCK_THREAD(next);
1556
1557 /* Blocker could change - retest condition */
1558 if (bl->thread == next)
1559 break;
1560
1561 UNLOCK_THREAD(next);
1562 next = bl->thread;
1563 }
1564#endif
1565 thread = bl_t;
1566 bl_t = next;
1567 }
1568
1569 UNLOCK_THREAD(bl_t);
1570
1571#if NUM_CORES > 1
1572 if (thread != tstart)
1573 {
1574 /* Relock original if it changed */
1575 LOCK_THREAD(tstart);
1576 }
1577#endif
1578
1579 return cores[CURRENT_CORE].running;
1580}
1581
1582/*---------------------------------------------------------------------------
1583 * Transfer ownership to a thread waiting for an objects and transfer
1584 * inherited priority boost from other waiters. This algorithm knows that
1585 * blocking chains may only unblock from the very end.
1586 *
1587 * Only the owning thread itself may call this and so the assumption that
1588 * it is the running thread is made.
1589 *---------------------------------------------------------------------------
1590 */
1591struct thread_entry *
1592 wakeup_priority_protocol_transfer(struct thread_entry *thread)
1593{
1594 /* Waking thread inherits priority boost from object owner */
1595 struct blocker *bl = thread->blocker;
1596 struct thread_entry *bl_t = bl->thread;
1597 struct thread_entry *next;
1598 int bl_pr;
1599
1600 THREAD_ASSERT(thread_get_current() == bl_t,
1601 "UPPT->wrong thread", thread_get_current());
1602
1603 LOCK_THREAD(bl_t);
1604
1605 bl_pr = bl->priority;
1606
1607 /* Remove the object's boost from the owning thread */
1608 if (prio_subtract_entry(&bl_t->pdist, bl_pr) == 0 &&
1609 bl_pr <= bl_t->priority)
1610 {
1611 /* No more threads at this priority are waiting and the old level is
1612 * at least the thread level */
1613 int priority = find_first_set_bit(bl_t->pdist.mask);
1614
1615 if (priority != bl_t->priority)
1616 {
1617 /* Adjust this thread's priority */
1618 set_running_thread_priority(bl_t, priority);
1619 }
1620 }
1621
1622 next = *thread->bqp;
1623
1624 if (next == NULL)
1625 {
1626 /* Expected shortcut - no more waiters */
1627 bl_pr = PRIORITY_IDLE;
1628 }
1629 else
1630 {
1631 if (thread->priority <= bl_pr)
1632 {
1633 /* Need to scan threads remaining in queue */
1634 bl_pr = find_highest_priority_in_list_l(next);
1635 }
1636
1637 if (prio_add_entry(&thread->pdist, bl_pr) == 1 &&
1638 bl_pr < thread->priority)
1639 {
1640 /* Thread priority must be raised */
1641 thread->priority = bl_pr;
1642 }
1643 }
1644
1645 bl->thread = thread; /* This thread pwns */
1646 bl->priority = bl_pr; /* Save highest blocked priority */
1647 thread->blocker = NULL; /* Thread not blocked */
1648
1649 UNLOCK_THREAD(bl_t);
1650
1651 return bl_t;
1652}
1653
1654/*---------------------------------------------------------------------------
1655 * No threads must be blocked waiting for this thread except for it to exit.
1656 * The alternative is more elaborate cleanup and object registration code.
1657 * Check this for risk of silent data corruption when objects with
1658 * inheritable blocking are abandoned by the owner - not precise but may
1659 * catch something.
1660 *---------------------------------------------------------------------------
1661 */
Bertrik Sikkene15f8a22008-05-03 08:35:14 +00001662static void check_for_obj_waiters(const char *function, struct thread_entry *thread)
Michael Sevakis27cf6772008-03-25 02:34:12 +00001663{
1664 /* Only one bit in the mask should be set with a frequency on 1 which
1665 * represents the thread's own base priority */
1666 uint32_t mask = thread->pdist.mask;
1667 if ((mask & (mask - 1)) != 0 ||
1668 thread->pdist.hist[find_first_set_bit(mask)] > 1)
1669 {
1670 unsigned char name[32];
1671 thread_get_name(name, 32, thread);
1672 panicf("%s->%s with obj. waiters", function, name);
1673 }
1674}
1675#endif /* HAVE_PRIORITY_SCHEDULING */
1676
1677/*---------------------------------------------------------------------------
1678 * Move a thread back to a running state on its core.
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001679 *---------------------------------------------------------------------------
1680 */
1681static void core_schedule_wakeup(struct thread_entry *thread)
1682{
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001683 const unsigned int core = IF_COP_CORE(thread->core);
Michael Sevakis27cf6772008-03-25 02:34:12 +00001684
1685 RTR_LOCK(core);
1686
1687 thread->state = STATE_RUNNING;
1688
1689 add_to_list_l(&cores[core].running, thread);
1690 rtr_add_entry(core, thread->priority);
1691
1692 RTR_UNLOCK(core);
1693
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001694#if NUM_CORES > 1
1695 if (core != CURRENT_CORE)
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001696 core_wake(core);
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001697#endif
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001698}
1699
1700/*---------------------------------------------------------------------------
1701 * Check the core's timeout list when at least one thread is due to wake.
1702 * Filtering for the condition is done before making the call. Resets the
1703 * tick when the next check will occur.
1704 *---------------------------------------------------------------------------
1705 */
Michael Sevakis27cf6772008-03-25 02:34:12 +00001706void check_tmo_threads(void)
Björn Stenbergc4d8d972003-02-14 09:44:34 +00001707{
Michael Sevakisa690ebb2007-07-30 16:44:36 +00001708 const unsigned int core = CURRENT_CORE;
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001709 const long tick = current_tick; /* snapshot the current tick */
1710 long next_tmo_check = tick + 60*HZ; /* minimum duration: once/minute */
1711 struct thread_entry *next = cores[core].timeout;
1712
1713 /* If there are no processes waiting for a timeout, just keep the check
1714 tick from falling into the past. */
Michael Sevakis27cf6772008-03-25 02:34:12 +00001715
1716 /* Break the loop once we have walked through the list of all
1717 * sleeping processes or have removed them all. */
1718 while (next != NULL)
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001719 {
Michael Sevakis27cf6772008-03-25 02:34:12 +00001720 /* Check sleeping threads. Allow interrupts between checks. */
Michael Sevakisaf395f42008-03-26 01:50:41 +00001721 enable_irq();
Michael Sevakis27cf6772008-03-25 02:34:12 +00001722
1723 struct thread_entry *curr = next;
1724
1725 next = curr->tmo.next;
1726
1727 /* Lock thread slot against explicit wakeup */
Michael Sevakisaf395f42008-03-26 01:50:41 +00001728 disable_irq();
Michael Sevakis27cf6772008-03-25 02:34:12 +00001729 LOCK_THREAD(curr);
1730
1731 unsigned state = curr->state;
1732
1733 if (state < TIMEOUT_STATE_FIRST)
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001734 {
Michael Sevakis27cf6772008-03-25 02:34:12 +00001735 /* Cleanup threads no longer on a timeout but still on the
1736 * list. */
1737 remove_from_list_tmo(curr);
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001738 }
Michael Sevakis27cf6772008-03-25 02:34:12 +00001739 else if (TIME_BEFORE(tick, curr->tmo_tick))
1740 {
1741 /* Timeout still pending - this will be the usual case */
1742 if (TIME_BEFORE(curr->tmo_tick, next_tmo_check))
1743 {
1744 /* Earliest timeout found so far - move the next check up
1745 to its time */
1746 next_tmo_check = curr->tmo_tick;
1747 }
1748 }
1749 else
1750 {
1751 /* Sleep timeout has been reached so bring the thread back to
1752 * life again. */
1753 if (state == STATE_BLOCKED_W_TMO)
1754 {
1755#if NUM_CORES > 1
1756 /* Lock the waiting thread's kernel object */
1757 struct corelock *ocl = curr->obj_cl;
1758
1759 if (corelock_try_lock(ocl) == 0)
1760 {
1761 /* Need to retry in the correct order though the need is
1762 * unlikely */
1763 UNLOCK_THREAD(curr);
1764 corelock_lock(ocl);
1765 LOCK_THREAD(curr);
1766
1767 if (curr->state != STATE_BLOCKED_W_TMO)
1768 {
1769 /* Thread was woken or removed explicitely while slot
1770 * was unlocked */
1771 corelock_unlock(ocl);
1772 remove_from_list_tmo(curr);
1773 UNLOCK_THREAD(curr);
1774 continue;
1775 }
1776 }
1777#endif /* NUM_CORES */
1778
1779 remove_from_list_l(curr->bqp, curr);
1780
1781#ifdef HAVE_WAKEUP_EXT_CB
1782 if (curr->wakeup_ext_cb != NULL)
1783 curr->wakeup_ext_cb(curr);
1784#endif
1785
1786#ifdef HAVE_PRIORITY_SCHEDULING
1787 if (curr->blocker != NULL)
1788 wakeup_priority_protocol_release(curr);
1789#endif
1790 corelock_unlock(ocl);
1791 }
1792 /* else state == STATE_SLEEPING */
1793
1794 remove_from_list_tmo(curr);
1795
1796 RTR_LOCK(core);
1797
1798 curr->state = STATE_RUNNING;
1799
1800 add_to_list_l(&cores[core].running, curr);
1801 rtr_add_entry(core, curr->priority);
1802
1803 RTR_UNLOCK(core);
1804 }
1805
1806 UNLOCK_THREAD(curr);
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001807 }
1808
1809 cores[core].next_tmo_check = next_tmo_check;
1810}
1811
1812/*---------------------------------------------------------------------------
1813 * Performs operations that must be done before blocking a thread but after
Michael Sevakis27cf6772008-03-25 02:34:12 +00001814 * the state is saved.
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001815 *---------------------------------------------------------------------------
1816 */
Michael Sevakis32a531b2008-01-19 13:27:47 +00001817#if NUM_CORES > 1
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001818static inline void run_blocking_ops(
Michael Sevakis608c5472008-01-19 13:47:26 +00001819 unsigned int core, struct thread_entry *thread)
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001820{
Michael Sevakis27cf6772008-03-25 02:34:12 +00001821 struct thread_blk_ops *ops = &cores[core].blk_ops;
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001822 const unsigned flags = ops->flags;
1823
Michael Sevakis27cf6772008-03-25 02:34:12 +00001824 if (flags == TBOP_CLEAR)
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001825 return;
1826
Michael Sevakis27cf6772008-03-25 02:34:12 +00001827 switch (flags)
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001828 {
Michael Sevakis27cf6772008-03-25 02:34:12 +00001829 case TBOP_SWITCH_CORE:
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001830 core_switch_blk_op(core, thread);
Michael Sevakis27cf6772008-03-25 02:34:12 +00001831 /* Fall-through */
1832 case TBOP_UNLOCK_CORELOCK:
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001833 corelock_unlock(ops->cl_p);
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001834 break;
1835 }
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001836
Michael Sevakis27cf6772008-03-25 02:34:12 +00001837 ops->flags = TBOP_CLEAR;
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001838}
Michael Sevakis32a531b2008-01-19 13:27:47 +00001839#endif /* NUM_CORES > 1 */
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001840
Miika Pekkarinena85044b2006-09-16 16:18:11 +00001841#ifdef RB_PROFILE
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001842void profile_thread(void)
1843{
1844 profstart(cores[CURRENT_CORE].running - threads);
1845}
Miika Pekkarinena85044b2006-09-16 16:18:11 +00001846#endif
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001847
1848/*---------------------------------------------------------------------------
1849 * Prepares a thread to block on an object's list and/or for a specified
Michael Sevakis27cf6772008-03-25 02:34:12 +00001850 * duration - expects object and slot to be appropriately locked if needed
1851 * and interrupts to be masked.
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001852 *---------------------------------------------------------------------------
1853 */
Michael Sevakis27cf6772008-03-25 02:34:12 +00001854static inline void block_thread_on_l(struct thread_entry *thread,
1855 unsigned state)
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001856{
1857 /* If inlined, unreachable branches will be pruned with no size penalty
Michael Sevakis27cf6772008-03-25 02:34:12 +00001858 because state is passed as a constant parameter. */
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001859 const unsigned int core = IF_COP_CORE(thread->core);
1860
1861 /* Remove the thread from the list of running threads. */
Michael Sevakis27cf6772008-03-25 02:34:12 +00001862 RTR_LOCK(core);
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001863 remove_from_list_l(&cores[core].running, thread);
Michael Sevakis27cf6772008-03-25 02:34:12 +00001864 rtr_subtract_entry(core, thread->priority);
1865 RTR_UNLOCK(core);
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001866
1867 /* Add a timeout to the block if not infinite */
1868 switch (state)
1869 {
1870 case STATE_BLOCKED:
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001871 case STATE_BLOCKED_W_TMO:
1872 /* Put the thread into a new list of inactive threads. */
Michael Sevakis27cf6772008-03-25 02:34:12 +00001873 add_to_list_l(thread->bqp, thread);
1874
1875 if (state == STATE_BLOCKED)
1876 break;
1877
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001878 /* Fall-through */
1879 case STATE_SLEEPING:
1880 /* If this thread times out sooner than any other thread, update
1881 next_tmo_check to its timeout */
1882 if (TIME_BEFORE(thread->tmo_tick, cores[core].next_tmo_check))
1883 {
1884 cores[core].next_tmo_check = thread->tmo_tick;
1885 }
1886
1887 if (thread->tmo.prev == NULL)
1888 {
1889 add_to_list_tmo(thread);
1890 }
1891 /* else thread was never removed from list - just keep it there */
1892 break;
1893 }
1894
Michael Sevakis27cf6772008-03-25 02:34:12 +00001895 /* Remember the the next thread about to block. */
1896 cores[core].block_task = thread;
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001897
Michael Sevakis27cf6772008-03-25 02:34:12 +00001898 /* Report new state. */
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001899 thread->state = state;
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001900}
1901
1902/*---------------------------------------------------------------------------
1903 * Switch thread in round robin fashion for any given priority. Any thread
1904 * that removed itself from the running list first must specify itself in
1905 * the paramter.
1906 *
1907 * INTERNAL: Intended for use by kernel and not for programs.
1908 *---------------------------------------------------------------------------
1909 */
Michael Sevakis27cf6772008-03-25 02:34:12 +00001910void switch_thread(void)
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001911{
1912 const unsigned int core = CURRENT_CORE;
Michael Sevakis27cf6772008-03-25 02:34:12 +00001913 struct thread_entry *block = cores[core].block_task;
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001914 struct thread_entry *thread = cores[core].running;
1915
Michael Sevakis27cf6772008-03-25 02:34:12 +00001916 /* Get context to save - next thread to run is unknown until all wakeups
1917 * are evaluated */
1918 if (block != NULL)
1919 {
1920 cores[core].block_task = NULL;
1921
1922#if NUM_CORES > 1
1923 if (thread == block)
1924 {
1925 /* This was the last thread running and another core woke us before
1926 * reaching here. Force next thread selection to give tmo threads or
1927 * other threads woken before this block a first chance. */
1928 block = NULL;
1929 }
1930 else
1931#endif
1932 {
1933 /* Blocking task is the old one */
1934 thread = block;
1935 }
1936 }
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001937
1938#ifdef RB_PROFILE
Michael Sevakis27cf6772008-03-25 02:34:12 +00001939 profile_thread_stopped(thread - threads);
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001940#endif
Michael Sevakis165f62d2007-03-26 03:24:36 +00001941
Miika Pekkarinena85044b2006-09-16 16:18:11 +00001942 /* Begin task switching by saving our current context so that we can
1943 * restore the state of the current thread later to the point prior
1944 * to this call. */
Michael Sevakis27cf6772008-03-25 02:34:12 +00001945 store_context(&thread->context);
Miika Pekkarinena85044b2006-09-16 16:18:11 +00001946
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001947 /* Check if the current thread stack is overflown */
Michael Sevakis27cf6772008-03-25 02:34:12 +00001948 if (thread->stack[0] != DEADBEEF)
1949 thread_stkov(thread);
Michael Sevakis43c15922006-12-16 18:35:12 +00001950
Michael Sevakis32a531b2008-01-19 13:27:47 +00001951#if NUM_CORES > 1
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001952 /* Run any blocking operations requested before switching/sleeping */
Michael Sevakis27cf6772008-03-25 02:34:12 +00001953 run_blocking_ops(core, thread);
Michael Sevakis32a531b2008-01-19 13:27:47 +00001954#endif
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00001955
Miika Pekkarinena85044b2006-09-16 16:18:11 +00001956#ifdef HAVE_PRIORITY_SCHEDULING
Michael Sevakis606d9d02008-06-03 04:23:09 +00001957 IF_NO_SKIP_YIELD( if (thread->skip_count != -1) )
Michael Sevakis27cf6772008-03-25 02:34:12 +00001958 /* Reset the value of thread's skip count */
Michael Sevakis606d9d02008-06-03 04:23:09 +00001959 thread->skip_count = 0;
Michael Sevakis27cf6772008-03-25 02:34:12 +00001960#endif
Michael Sevakisbbe3f1f2008-02-28 17:40:18 +00001961
Miika Pekkarinena85044b2006-09-16 16:18:11 +00001962 for (;;)
1963 {
Michael Sevakis27cf6772008-03-25 02:34:12 +00001964 /* If there are threads on a timeout and the earliest wakeup is due,
1965 * check the list and wake any threads that need to start running
1966 * again. */
1967 if (!TIME_BEFORE(current_tick, cores[core].next_tmo_check))
Miika Pekkarinen66258a32007-03-26 16:55:17 +00001968 {
Michael Sevakis27cf6772008-03-25 02:34:12 +00001969 check_tmo_threads();
Miika Pekkarinen66258a32007-03-26 16:55:17 +00001970 }
Brandon Low8a828922006-11-11 05:33:24 +00001971
Michael Sevakisaf395f42008-03-26 01:50:41 +00001972 disable_irq();
Michael Sevakis27cf6772008-03-25 02:34:12 +00001973 RTR_LOCK(core);
1974
1975 thread = cores[core].running;
1976
1977 if (thread == NULL)
1978 {
1979 /* Enter sleep mode to reduce power usage - woken up on interrupt
1980 * or wakeup request from another core - expected to enable
1981 * interrupts. */
1982 RTR_UNLOCK(core);
1983 core_sleep(IF_COP(core));
1984 }
1985 else
1986 {
1987#ifdef HAVE_PRIORITY_SCHEDULING
1988 /* Select the new task based on priorities and the last time a
1989 * process got CPU time relative to the highest priority runnable
1990 * task. */
1991 struct priority_distribution *pd = &cores[core].rtr;
1992 int max = find_first_set_bit(pd->mask);
1993
1994 if (block == NULL)
1995 {
1996 /* Not switching on a block, tentatively select next thread */
1997 thread = thread->l.next;
1998 }
1999
2000 for (;;)
2001 {
2002 int priority = thread->priority;
2003 int diff;
2004
2005 /* This ridiculously simple method of aging seems to work
2006 * suspiciously well. It does tend to reward CPU hogs (under
2007 * yielding) but that's generally not desirable at all. On the
2008 * plus side, it, relatively to other threads, penalizes excess
2009 * yielding which is good if some high priority thread is
2010 * performing no useful work such as polling for a device to be
2011 * ready. Of course, aging is only employed when higher and lower
2012 * priority threads are runnable. The highest priority runnable
2013 * thread(s) are never skipped. */
2014 if (priority <= max ||
Michael Sevakis606d9d02008-06-03 04:23:09 +00002015 IF_NO_SKIP_YIELD( thread->skip_count == -1 || )
Michael Sevakis27cf6772008-03-25 02:34:12 +00002016 (diff = priority - max, ++thread->skip_count > diff*diff))
2017 {
2018 cores[core].running = thread;
2019 break;
2020 }
2021
2022 thread = thread->l.next;
2023 }
Michael Sevakisbbe3f1f2008-02-28 17:40:18 +00002024#else
Michael Sevakis27cf6772008-03-25 02:34:12 +00002025 /* Without priority use a simple FCFS algorithm */
2026 if (block == NULL)
2027 {
2028 /* Not switching on a block, select next thread */
2029 thread = thread->l.next;
2030 cores[core].running = thread;
2031 }
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00002032#endif /* HAVE_PRIORITY_SCHEDULING */
2033
Michael Sevakis27cf6772008-03-25 02:34:12 +00002034 RTR_UNLOCK(core);
Michael Sevakisaf395f42008-03-26 01:50:41 +00002035 enable_irq();
Michael Sevakis27cf6772008-03-25 02:34:12 +00002036 break;
2037 }
2038 }
2039
Miika Pekkarinena85044b2006-09-16 16:18:11 +00002040 /* And finally give control to the next thread. */
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00002041 load_context(&thread->context);
2042
Miika Pekkarinena85044b2006-09-16 16:18:11 +00002043#ifdef RB_PROFILE
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00002044 profile_thread_started(thread - threads);
Miika Pekkarinena85044b2006-09-16 16:18:11 +00002045#endif
Björn Stenbergc4d8d972003-02-14 09:44:34 +00002046}
2047
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00002048/*---------------------------------------------------------------------------
Michael Sevakis27cf6772008-03-25 02:34:12 +00002049 * Sleeps a thread for at least a specified number of ticks with zero being
2050 * a wait until the next tick.
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00002051 *
2052 * INTERNAL: Intended for use by kernel and not for programs.
2053 *---------------------------------------------------------------------------
2054 */
2055void sleep_thread(int ticks)
Miika Pekkarinena85044b2006-09-16 16:18:11 +00002056{
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00002057 struct thread_entry *current = cores[CURRENT_CORE].running;
Brandon Low8a828922006-11-11 05:33:24 +00002058
Michael Sevakis27cf6772008-03-25 02:34:12 +00002059 LOCK_THREAD(current);
Brandon Low8a828922006-11-11 05:33:24 +00002060
Michael Sevakis27cf6772008-03-25 02:34:12 +00002061 /* Set our timeout, remove from run list and join timeout list. */
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00002062 current->tmo_tick = current_tick + ticks + 1;
Michael Sevakis27cf6772008-03-25 02:34:12 +00002063 block_thread_on_l(current, STATE_SLEEPING);
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00002064
Michael Sevakis27cf6772008-03-25 02:34:12 +00002065 UNLOCK_THREAD(current);
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00002066}
2067
2068/*---------------------------------------------------------------------------
2069 * Indefinitely block a thread on a blocking queue for explicit wakeup.
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00002070 *
2071 * INTERNAL: Intended for use by kernel objects and not for programs.
2072 *---------------------------------------------------------------------------
2073 */
Michael Sevakis27cf6772008-03-25 02:34:12 +00002074void block_thread(struct thread_entry *current)
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00002075{
Michael Sevakis27cf6772008-03-25 02:34:12 +00002076 /* Set the state to blocked and take us off of the run queue until we
2077 * are explicitly woken */
2078 LOCK_THREAD(current);
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00002079
Michael Sevakis27cf6772008-03-25 02:34:12 +00002080 /* Set the list for explicit wakeup */
2081 block_thread_on_l(current, STATE_BLOCKED);
Michael Sevakis43c15922006-12-16 18:35:12 +00002082
Michael Sevakis27cf6772008-03-25 02:34:12 +00002083#ifdef HAVE_PRIORITY_SCHEDULING
2084 if (current->blocker != NULL)
2085 {
2086 /* Object supports PIP */
2087 current = blocker_inherit_priority(current);
2088 }
Brandon Low8a828922006-11-11 05:33:24 +00002089#endif
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00002090
Michael Sevakis27cf6772008-03-25 02:34:12 +00002091 UNLOCK_THREAD(current);
Brandon Low8a828922006-11-11 05:33:24 +00002092}
2093
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00002094/*---------------------------------------------------------------------------
2095 * Block a thread on a blocking queue for a specified time interval or until
2096 * explicitly woken - whichever happens first.
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00002097 *
2098 * INTERNAL: Intended for use by kernel objects and not for programs.
2099 *---------------------------------------------------------------------------
2100 */
Michael Sevakis27cf6772008-03-25 02:34:12 +00002101void block_thread_w_tmo(struct thread_entry *current, int timeout)
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00002102{
Miika Pekkarinena85044b2006-09-16 16:18:11 +00002103 /* Get the entry for the current running thread. */
Michael Sevakis27cf6772008-03-25 02:34:12 +00002104 LOCK_THREAD(current);
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00002105
Brandon Low8a828922006-11-11 05:33:24 +00002106 /* Set the state to blocked with the specified timeout */
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00002107 current->tmo_tick = current_tick + timeout;
Michael Sevakis27cf6772008-03-25 02:34:12 +00002108
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00002109 /* Set the list for explicit wakeup */
Michael Sevakis27cf6772008-03-25 02:34:12 +00002110 block_thread_on_l(current, STATE_BLOCKED_W_TMO);
Brandon Low8a828922006-11-11 05:33:24 +00002111
Michael Sevakis27cf6772008-03-25 02:34:12 +00002112#ifdef HAVE_PRIORITY_SCHEDULING
2113 if (current->blocker != NULL)
2114 {
2115 /* Object supports PIP */
2116 current = blocker_inherit_priority(current);
2117 }
2118#endif
Brandon Low8a828922006-11-11 05:33:24 +00002119
Michael Sevakis27cf6772008-03-25 02:34:12 +00002120 UNLOCK_THREAD(current);
Miika Pekkarinena85044b2006-09-16 16:18:11 +00002121}
2122
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00002123/*---------------------------------------------------------------------------
Michael Sevakis27cf6772008-03-25 02:34:12 +00002124 * Explicitly wakeup a thread on a blocking queue. Only effects threads of
2125 * STATE_BLOCKED and STATE_BLOCKED_W_TMO.
2126 *
2127 * This code should be considered a critical section by the caller meaning
2128 * that the object's corelock should be held.
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00002129 *
2130 * INTERNAL: Intended for use by kernel objects and not for programs.
2131 *---------------------------------------------------------------------------
2132 */
Michael Sevakis27cf6772008-03-25 02:34:12 +00002133unsigned int wakeup_thread(struct thread_entry **list)
Michael Sevakis43c15922006-12-16 18:35:12 +00002134{
Michael Sevakis27cf6772008-03-25 02:34:12 +00002135 struct thread_entry *thread = *list;
2136 unsigned int result = THREAD_NONE;
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00002137
2138 /* Check if there is a blocked thread at all. */
Michael Sevakis27cf6772008-03-25 02:34:12 +00002139 if (thread == NULL)
2140 return result;
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00002141
Michael Sevakis27cf6772008-03-25 02:34:12 +00002142 LOCK_THREAD(thread);
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00002143
2144 /* Determine thread's current state. */
Michael Sevakis27cf6772008-03-25 02:34:12 +00002145 switch (thread->state)
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00002146 {
2147 case STATE_BLOCKED:
2148 case STATE_BLOCKED_W_TMO:
Michael Sevakis27cf6772008-03-25 02:34:12 +00002149 remove_from_list_l(list, thread);
2150
2151 result = THREAD_OK;
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00002152
2153#ifdef HAVE_PRIORITY_SCHEDULING
Michael Sevakis27cf6772008-03-25 02:34:12 +00002154 struct thread_entry *current;
2155 struct blocker *bl = thread->blocker;
2156
2157 if (bl == NULL)
2158 {
2159 /* No inheritance - just boost the thread by aging */
Michael Sevakis606d9d02008-06-03 04:23:09 +00002160 IF_NO_SKIP_YIELD( if (thread->skip_count != -1) )
2161 thread->skip_count = thread->priority;
Michael Sevakis27cf6772008-03-25 02:34:12 +00002162 current = cores[CURRENT_CORE].running;
2163 }
2164 else
2165 {
2166 /* Call the specified unblocking PIP */
2167 current = bl->wakeup_protocol(thread);
2168 }
2169
2170 if (current != NULL && thread->priority < current->priority
2171 IF_COP( && thread->core == current->core ))
2172 {
2173 /* Woken thread is higher priority and exists on the same CPU core;
2174 * recommend a task switch. Knowing if this is an interrupt call
2175 * would be helpful here. */
2176 result |= THREAD_SWITCH;
2177 }
2178#endif /* HAVE_PRIORITY_SCHEDULING */
2179
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00002180 core_schedule_wakeup(thread);
Michael Sevakis27cf6772008-03-25 02:34:12 +00002181 break;
2182
2183 /* Nothing to do. State is not blocked. */
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00002184#if THREAD_EXTRA_CHECKS
Michael Sevakis27cf6772008-03-25 02:34:12 +00002185 default:
Michael Sevakisa9b2fb52007-10-16 01:25:17 +00002186 THREAD_PANICF("wakeup_thread->block invalid", thread);
2187