sd-as3525.c: disable wide bus, it corrupts writes
[kugel-rb.git] / firmware / target / arm / thread-pp.c
blob0c077779e5d13ed7462a0dd9aeddb2061bcd57f4
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2007 by Daniel Ankers
12 * PP5002 and PP502x SoC threading support
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version 2
17 * of the License, or (at your option) any later version.
19 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
20 * KIND, either express or implied.
22 ****************************************************************************/
24 #if defined(MAX_PHYS_SECTOR_SIZE) && MEM == 64
25 /* Support a special workaround object for large-sector disks */
26 #define IF_NO_SKIP_YIELD(...) __VA_ARGS__
27 #endif
29 #if NUM_CORES == 1
30 /* Single-core variants for FORCE_SINGLE_CORE */
31 static inline void core_sleep(void)
33 sleep_core(CURRENT_CORE);
34 enable_irq();
37 /* Shared single-core build debugging version */
38 void core_wake(void)
40 /* No wakey - core already wakey (because this is it) */
42 #else /* NUM_CORES > 1 */
43 /** Model-generic PP dual-core code **/
44 extern uintptr_t cpu_idlestackbegin[];
45 extern uintptr_t cpu_idlestackend[];
46 extern uintptr_t cop_idlestackbegin[];
47 extern uintptr_t cop_idlestackend[];
48 static uintptr_t * const idle_stacks[NUM_CORES] =
50 [CPU] = cpu_idlestackbegin,
51 [COP] = cop_idlestackbegin
54 /* Core locks using Peterson's mutual exclusion algorithm */
56 /*---------------------------------------------------------------------------
57 * Initialize the corelock structure.
58 *---------------------------------------------------------------------------
60 void corelock_init(struct corelock *cl)
62 memset(cl, 0, sizeof (*cl));
65 #if 1 /* Assembly locks to minimize overhead */
66 /*---------------------------------------------------------------------------
67 * Wait for the corelock to become free and acquire it when it does.
68 *---------------------------------------------------------------------------
70 void __attribute__((naked)) corelock_lock(struct corelock *cl)
72 /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
73 asm volatile (
74 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
75 "ldrb r1, [r1] \n"
76 "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
77 "eor r2, r1, #0xff \n" /* r2 = othercore */
78 "strb r2, [r0, #2] \n" /* cl->turn = othercore */
79 "1: \n"
80 "ldrb r3, [r0, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */
81 "cmp r3, #0 \n" /* yes? lock acquired */
82 "bxeq lr \n"
83 "ldrb r3, [r0, #2] \n" /* || cl->turn == core ? */
84 "cmp r3, r1 \n"
85 "bxeq lr \n" /* yes? lock acquired */
86 "b 1b \n" /* keep trying */
87 : : "i"(&PROCESSOR_ID)
89 (void)cl;
92 /*---------------------------------------------------------------------------
93 * Try to aquire the corelock. If free, caller gets it, otherwise return 0.
94 *---------------------------------------------------------------------------
96 int __attribute__((naked)) corelock_try_lock(struct corelock *cl)
98 /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
99 asm volatile (
100 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
101 "ldrb r1, [r1] \n"
102 "mov r3, r0 \n"
103 "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
104 "eor r2, r1, #0xff \n" /* r2 = othercore */
105 "strb r2, [r0, #2] \n" /* cl->turn = othercore */
106 "ldrb r0, [r3, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */
107 "eors r0, r0, r2 \n" /* yes? lock acquired */
108 "bxne lr \n"
109 "ldrb r0, [r3, #2] \n" /* || cl->turn == core? */
110 "ands r0, r0, r1 \n"
111 "streqb r0, [r3, r1, lsr #7] \n" /* if not, cl->myl[core] = 0 */
112 "bx lr \n" /* return result */
113 : : "i"(&PROCESSOR_ID)
116 return 0;
117 (void)cl;
120 /*---------------------------------------------------------------------------
121 * Release ownership of the corelock
122 *---------------------------------------------------------------------------
124 void __attribute__((naked)) corelock_unlock(struct corelock *cl)
126 asm volatile (
127 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
128 "ldrb r1, [r1] \n"
129 "mov r2, #0 \n" /* cl->myl[core] = 0 */
130 "strb r2, [r0, r1, lsr #7] \n"
131 "bx lr \n"
132 : : "i"(&PROCESSOR_ID)
134 (void)cl;
137 #else /* C versions for reference */
139 void corelock_lock(struct corelock *cl)
141 const unsigned int core = CURRENT_CORE;
142 const unsigned int othercore = 1 - core;
144 cl->myl[core] = core;
145 cl->turn = othercore;
147 for (;;)
149 if (cl->myl[othercore] == 0 || cl->turn == core)
150 break;
154 int corelock_try_lock(struct corelock *cl)
156 const unsigned int core = CURRENT_CORE;
157 const unsigned int othercore = 1 - core;
159 cl->myl[core] = core;
160 cl->turn = othercore;
162 if (cl->myl[othercore] == 0 || cl->turn == core)
164 return 1;
167 cl->myl[core] = 0;
168 return 0;
171 void corelock_unlock(struct corelock *cl)
173 cl->myl[CURRENT_CORE] = 0;
175 #endif /* ASM / C selection */
177 /*---------------------------------------------------------------------------
178 * Do any device-specific inits for the threads and synchronize the kernel
179 * initializations.
180 *---------------------------------------------------------------------------
182 static void INIT_ATTR core_thread_init(unsigned int core)
184 if (core == CPU)
186 /* Wake up coprocessor and let it initialize kernel and threads */
187 #ifdef CPU_PP502x
188 MBX_MSG_CLR = 0x3f;
189 #endif
190 wake_core(COP);
191 /* Sleep until COP has finished */
192 sleep_core(CPU);
194 else
196 /* Wake the CPU and return */
197 wake_core(CPU);
201 /*---------------------------------------------------------------------------
202 * Switches to a stack that always resides in the Rockbox core then calls
203 * the final exit routine to actually finish removing the thread from the
204 * scheduler.
206 * Needed when a thread suicides on a core other than the main CPU since the
207 * stack used when idling is the stack of the last thread to run. This stack
208 * may not reside in the core firmware in which case the core will continue
209 * to use a stack from an unloaded module until another thread runs on it.
210 *---------------------------------------------------------------------------
212 static inline void __attribute__((noreturn,always_inline))
213 thread_final_exit(struct thread_entry *current)
215 asm volatile (
216 "cmp %1, #0 \n" /* CPU? */
217 "ldrne r0, =cpucache_flush \n" /* No? write back data */
218 "movne lr, pc \n"
219 "bxne r0 \n"
220 "mov r0, %0 \n" /* copy thread parameter */
221 "mov sp, %2 \n" /* switch to idle stack */
222 "bl thread_final_exit_do \n" /* finish removal */
223 : : "r"(current),
224 "r"(current->core),
225 "r"(&idle_stacks[current->core][IDLE_STACK_WORDS])
226 : "r0", "r1", "r2", "r3", "ip", "lr"); /* Because of flush call,
227 force inputs out
228 of scratch regs */
229 while (1);
232 /*---------------------------------------------------------------------------
233 * Perform core switch steps that need to take place inside switch_thread.
235 * These steps must take place while before changing the processor and after
236 * having entered switch_thread since switch_thread may not do a normal return
237 * because the stack being used for anything the compiler saved will not belong
238 * to the thread's destination core and it may have been recycled for other
239 * purposes by the time a normal context load has taken place. switch_thread
240 * will also clobber anything stashed in the thread's context or stored in the
241 * nonvolatile registers if it is saved there before the call since the
242 * compiler's order of operations cannot be known for certain.
244 static void core_switch_blk_op(unsigned int core, struct thread_entry *thread)
246 /* Flush our data to ram */
247 cpucache_flush();
248 /* Stash thread in r4 slot */
249 thread->context.r[0] = (uint32_t)thread;
250 /* Stash restart address in r5 slot */
251 thread->context.r[1] = thread->context.start;
252 /* Save sp in context.sp while still running on old core */
253 thread->context.sp = idle_stacks[core][IDLE_STACK_WORDS-1];
256 /*---------------------------------------------------------------------------
257 * Machine-specific helper function for switching the processor a thread is
258 * running on. Basically, the thread suicides on the departing core and is
259 * reborn on the destination. Were it not for gcc's ill-behavior regarding
260 * naked functions written in C where it actually clobbers non-volatile
261 * registers before the intended prologue code, this would all be much
262 * simpler. Generic setup is done in switch_core itself.
265 /*---------------------------------------------------------------------------
266 * This actually performs the core switch.
268 static void __attribute__((naked))
269 switch_thread_core(unsigned int core, struct thread_entry *thread)
271 /* Pure asm for this because compiler behavior isn't sufficiently predictable.
272 * Stack access also isn't permitted until restoring the original stack and
273 * context. */
274 asm volatile (
275 "stmfd sp!, { r4-r11, lr } \n" /* Stack all non-volatile context on current core */
276 "ldr r2, =idle_stacks \n" /* r2 = &idle_stacks[core][IDLE_STACK_WORDS] */
277 "ldr r2, [r2, r0, lsl #2] \n"
278 "add r2, r2, %0*4 \n"
279 "stmfd r2!, { sp } \n" /* save original stack pointer on idle stack */
280 "mov sp, r2 \n" /* switch stacks */
281 "adr r2, 1f \n" /* r2 = new core restart address */
282 "str r2, [r1, #40] \n" /* thread->context.start = r2 */
283 "ldr pc, =switch_thread \n" /* r0 = thread after call - see load_context */
284 "1: \n"
285 "ldr sp, [r0, #32] \n" /* Reload original sp from context structure */
286 "mov r1, #0 \n" /* Clear start address */
287 "str r1, [r0, #40] \n"
288 "ldr r0, =cpucache_invalidate \n" /* Invalidate new core's cache */
289 "mov lr, pc \n"
290 "bx r0 \n"
291 "ldmfd sp!, { r4-r11, pc } \n" /* Restore non-volatile context to new core and return */
292 : : "i"(IDLE_STACK_WORDS)
294 (void)core; (void)thread;
297 /** PP-model-specific dual-core code **/
299 #if CONFIG_CPU == PP5002
300 /* PP5002 has no mailboxes - Bytes to emulate the PP502x mailbox bits */
301 struct core_semaphores
303 volatile uint8_t intend_wake; /* 00h */
304 volatile uint8_t stay_awake; /* 01h */
305 volatile uint8_t intend_sleep; /* 02h */
306 volatile uint8_t unused; /* 03h */
309 static struct core_semaphores core_semaphores[NUM_CORES] IBSS_ATTR;
311 #if 1 /* Select ASM */
312 /*---------------------------------------------------------------------------
313 * Put core in a power-saving state if waking list wasn't repopulated and if
314 * no other core requested a wakeup for it to perform a task.
315 *---------------------------------------------------------------------------
317 static inline void core_sleep(unsigned int core)
319 asm volatile (
320 "mov r0, #1 \n" /* Signal intent to sleep */
321 "strb r0, [%[sem], #2] \n"
322 "ldrb r0, [%[sem], #1] \n" /* && stay_awake == 0? */
323 "cmp r0, #0 \n"
324 "bne 2f \n"
325 /* Sleep: PP5002 crashes if the instruction that puts it to sleep is
326 * located at 0xNNNNNNN0. 4/8/C works. This sequence makes sure
327 * that the correct alternative is executed. Don't change the order
328 * of the next 4 instructions! */
329 "tst pc, #0x0c \n"
330 "mov r0, #0xca \n"
331 "strne r0, [%[ctl], %[c], lsl #2] \n"
332 "streq r0, [%[ctl], %[c], lsl #2] \n"
333 "nop \n" /* nop's needed because of pipeline */
334 "nop \n"
335 "nop \n"
336 "2: \n"
337 "mov r0, #0 \n" /* Clear stay_awake and sleep intent */
338 "strb r0, [%[sem], #1] \n"
339 "strb r0, [%[sem], #2] \n"
340 "1: \n" /* Wait for wake procedure to finish */
341 "ldrb r0, [%[sem], #0] \n"
342 "cmp r0, #0 \n"
343 "bne 1b \n"
345 : [sem]"r"(&core_semaphores[core]), [c]"r"(core),
346 [ctl]"r"(&CPU_CTL)
347 : "r0"
349 enable_irq();
352 /*---------------------------------------------------------------------------
353 * Wake another processor core that is sleeping or prevent it from doing so
354 * if it was already destined. FIQ, IRQ should be disabled before calling.
355 *---------------------------------------------------------------------------
357 void core_wake(unsigned int othercore)
359 /* avoid r0 since that contains othercore */
360 asm volatile (
361 "mrs r3, cpsr \n" /* Disable IRQ */
362 "orr r1, r3, #0x80 \n"
363 "msr cpsr_c, r1 \n"
364 "mov r1, #1 \n" /* Signal intent to wake other core */
365 "orr r1, r1, r1, lsl #8 \n" /* and set stay_awake */
366 "strh r1, [%[sem], #0] \n"
367 "mov r2, #0x8000 \n"
368 "1: \n" /* If it intends to sleep, let it first */
369 "ldrb r1, [%[sem], #2] \n" /* intend_sleep != 0 ? */
370 "cmp r1, #1 \n"
371 "ldr r1, [%[st]] \n" /* && not sleeping ? */
372 "tsteq r1, r2, lsr %[oc] \n"
373 "beq 1b \n" /* Wait for sleep or wake */
374 "tst r1, r2, lsr %[oc] \n"
375 "ldrne r2, =0xcf004054 \n" /* If sleeping, wake it */
376 "movne r1, #0xce \n"
377 "strne r1, [r2, %[oc], lsl #2] \n"
378 "mov r1, #0 \n" /* Done with wake procedure */
379 "strb r1, [%[sem], #0] \n"
380 "msr cpsr_c, r3 \n" /* Restore IRQ */
382 : [sem]"r"(&core_semaphores[othercore]),
383 [st]"r"(&PROC_STAT),
384 [oc]"r"(othercore)
385 : "r1", "r2", "r3"
389 #else /* C version for reference */
391 static inline void core_sleep(unsigned int core)
393 /* Signal intent to sleep */
394 core_semaphores[core].intend_sleep = 1;
396 /* Something waking or other processor intends to wake us? */
397 if (core_semaphores[core].stay_awake == 0)
399 sleep_core(core);
402 /* Signal wake - clear wake flag */
403 core_semaphores[core].stay_awake = 0;
404 core_semaphores[core].intend_sleep = 0;
406 /* Wait for other processor to finish wake procedure */
407 while (core_semaphores[core].intend_wake != 0);
409 /* Enable IRQ */
410 enable_irq();
413 void core_wake(unsigned int othercore)
415 /* Disable interrupts - avoid reentrancy from the tick */
416 int oldlevel = disable_irq_save();
418 /* Signal intent to wake other processor - set stay awake */
419 core_semaphores[othercore].intend_wake = 1;
420 core_semaphores[othercore].stay_awake = 1;
422 /* If it intends to sleep, wait until it does or aborts */
423 while (core_semaphores[othercore].intend_sleep != 0 &&
424 (PROC_STAT & PROC_SLEEPING(othercore)) == 0);
426 /* If sleeping, wake it up */
427 if (PROC_STAT & PROC_SLEEPING(othercore))
428 wake_core(othercore);
430 /* Done with wake procedure */
431 core_semaphores[othercore].intend_wake = 0;
432 restore_irq(oldlevel);
434 #endif /* ASM/C selection */
436 #elif defined (CPU_PP502x)
438 #if 1 /* Select ASM */
439 /*---------------------------------------------------------------------------
440 * Put core in a power-saving state if waking list wasn't repopulated and if
441 * no other core requested a wakeup for it to perform a task.
442 *---------------------------------------------------------------------------
444 static inline void core_sleep(unsigned int core)
446 asm volatile (
447 "mov r0, #4 \n" /* r0 = 0x4 << core */
448 "mov r0, r0, lsl %[c] \n"
449 "str r0, [%[mbx], #4] \n" /* signal intent to sleep */
450 "ldr r1, [%[mbx], #0] \n" /* && !(MBX_MSG_STAT & (0x10<<core)) ? */
451 "tst r1, r0, lsl #2 \n"
452 "moveq r1, #0x80000000 \n" /* Then sleep */
453 "streq r1, [%[ctl], %[c], lsl #2] \n"
454 "moveq r1, #0 \n" /* Clear control reg */
455 "streq r1, [%[ctl], %[c], lsl #2] \n"
456 "orr r1, r0, r0, lsl #2 \n" /* Signal intent to wake - clear wake flag */
457 "str r1, [%[mbx], #8] \n"
458 "1: \n" /* Wait for wake procedure to finish */
459 "ldr r1, [%[mbx], #0] \n"
460 "tst r1, r0, lsr #2 \n"
461 "bne 1b \n"
463 : [ctl]"r"(&CPU_CTL), [mbx]"r"(MBX_BASE), [c]"r"(core)
464 : "r0", "r1");
465 enable_irq();
468 /*---------------------------------------------------------------------------
469 * Wake another processor core that is sleeping or prevent it from doing so
470 * if it was already destined. FIQ, IRQ should be disabled before calling.
471 *---------------------------------------------------------------------------
473 void core_wake(unsigned int othercore)
475 /* avoid r0 since that contains othercore */
476 asm volatile (
477 "mrs r3, cpsr \n" /* Disable IRQ */
478 "orr r1, r3, #0x80 \n"
479 "msr cpsr_c, r1 \n"
480 "mov r2, #0x11 \n" /* r2 = (0x11 << othercore) */
481 "mov r2, r2, lsl %[oc] \n" /* Signal intent to wake othercore */
482 "str r2, [%[mbx], #4] \n"
483 "1: \n" /* If it intends to sleep, let it first */
484 "ldr r1, [%[mbx], #0] \n" /* (MSG_MSG_STAT & (0x4 << othercore)) != 0 ? */
485 "eor r1, r1, #0xc \n"
486 "tst r1, r2, lsr #2 \n"
487 "ldr r1, [%[ctl], %[oc], lsl #2] \n" /* && (PROC_CTL(othercore) & PROC_SLEEP) == 0 ? */
488 "tsteq r1, #0x80000000 \n"
489 "beq 1b \n" /* Wait for sleep or wake */
490 "tst r1, #0x80000000 \n" /* If sleeping, wake it */
491 "movne r1, #0x0 \n"
492 "strne r1, [%[ctl], %[oc], lsl #2] \n"
493 "mov r1, r2, lsr #4 \n"
494 "str r1, [%[mbx], #8] \n" /* Done with wake procedure */
495 "msr cpsr_c, r3 \n" /* Restore IRQ */
497 : [ctl]"r"(&PROC_CTL(CPU)), [mbx]"r"(MBX_BASE),
498 [oc]"r"(othercore)
499 : "r1", "r2", "r3");
502 #else /* C version for reference */
504 static inline void core_sleep(unsigned int core)
506 /* Signal intent to sleep */
507 MBX_MSG_SET = 0x4 << core;
509 /* Something waking or other processor intends to wake us? */
510 if ((MBX_MSG_STAT & (0x10 << core)) == 0)
512 sleep_core(core);
513 wake_core(core);
516 /* Signal wake - clear wake flag */
517 MBX_MSG_CLR = 0x14 << core;
519 /* Wait for other processor to finish wake procedure */
520 while (MBX_MSG_STAT & (0x1 << core));
521 enable_irq();
524 void core_wake(unsigned int othercore)
526 /* Disable interrupts - avoid reentrancy from the tick */
527 int oldlevel = disable_irq_save();
529 /* Signal intent to wake other processor - set stay awake */
530 MBX_MSG_SET = 0x11 << othercore;
532 /* If it intends to sleep, wait until it does or aborts */
533 while ((MBX_MSG_STAT & (0x4 << othercore)) != 0 &&
534 (PROC_CTL(othercore) & PROC_SLEEP) == 0);
536 /* If sleeping, wake it up */
537 if (PROC_CTL(othercore) & PROC_SLEEP)
538 PROC_CTL(othercore) = 0;
540 /* Done with wake procedure */
541 MBX_MSG_CLR = 0x1 << othercore;
542 restore_irq(oldlevel);
544 #endif /* ASM/C selection */
546 #endif /* CPU_PPxxxx */
548 /* Keep constant pool in range of inline ASM */
549 static void __attribute__((naked, used)) dump_ltorg(void)
551 asm volatile (".ltorg");
554 #endif /* NUM_CORES */