From 4ab4d11d74927f2485b0399664c390607326312d Mon Sep 17 00:00:00 2001 From: NicJA Date: Thu, 7 May 2015 15:46:18 +0000 Subject: [PATCH] wip commit. remove IdleTask pointer (uneeded). fixes to various comments. rework scheduler code to correctly move the running / next task around when appropriate. simplify some parts of the code by using the existing macros/in-lines. git-svn-id: https://svn.aros.org/svn/aros/trunk/AROS@50618 fb15a70f-31f2-0310-bbcc-cdcc74a49acc --- arch/arm-native/exec/platform_init.c | 6 +- arch/arm-native/kernel/kernel_cpu.c | 21 ++-- arch/arm-native/kernel/kernel_scheduler.c | 176 ++++++++++++++++-------------- arch/arm-native/kernel/platform_bcm2708.c | 102 +++++++++-------- arch/arm-native/kernel/tls.h | 4 - 5 files changed, 160 insertions(+), 149 deletions(-) diff --git a/arch/arm-native/exec/platform_init.c b/arch/arm-native/exec/platform_init.c index a8cdd3751a..1be4d8ea4b 100644 --- a/arch/arm-native/exec/platform_init.c +++ b/arch/arm-native/exec/platform_init.c @@ -35,7 +35,7 @@ int Exec_ARMCPUInit(struct ExecBase *SysBase) BootTask = GET_THIS_TASK; - D(bug("[Exec] Exec_ARMCPUInit[%02d]: %s @ 0x%p\n", cpunum, BootTask->tc_Node.ln_Name, BootTask)); + D(bug("[Exec] Exec_ARMCPUInit[%02d]: launched from %s @ 0x%p\n", cpunum, BootTask->tc_Node.ln_Name, BootTask)); if (cpunum == 0) { @@ -55,9 +55,7 @@ int Exec_ARMCPUInit(struct ExecBase *SysBase) if (CPUIdleTask) { - CPUIdleTask->tc_State = TS_WAIT; - TLS_SET(IdleTask, CPUIdleTask); - D(bug("[Exec] Exec_ARMCPUInit[%02d]: %s Task @ 0x%p\n", cpunum, CPUIdleTask->tc_Node.ln_Name, CPUIdleTask)); + D(bug("[Exec] Exec_ARMCPUInit[%02d]: %s Task created @ 0x%p\n", cpunum, CPUIdleTask->tc_Node.ln_Name, CPUIdleTask)); } return TRUE; diff --git a/arch/arm-native/kernel/kernel_cpu.c b/arch/arm-native/kernel/kernel_cpu.c index df461a37d7..12779b6e60 100644 --- a/arch/arm-native/kernel/kernel_cpu.c +++ b/arch/arm-native/kernel/kernel_cpu.c @@ -28,6 +28,7 @@ #include "kernel_intr.h" #define D(x) +#define DSCHED(x) #define DREGS(x) uint32_t __arm_affinitymask __attribute__((section(".data"))) = 1; @@ -250,13 +251,15 @@ void cpu_Switch(regs_t *regs) struct Task *task; UQUAD timeCur; struct timeval timeVal; - - D(bug("[Kernel] cpu_Switch()\n")); +#if defined(__AROSEXEC_SMP__) || defined(DEBUG) + int cpunum = GetCPUNumber(); +#endif + + DSCHED(bug("[Kernel] cpu_Switch(%02d)\n", cpunum)); task = GET_THIS_TASK; - /* Copy current task's context into the ETask structure */ - /* Restore the task's state */ + /* Cache running task's context */ STORE_TASKSTATE(task, regs) if (__arm_arosintern.ARMI_GetTime) @@ -280,7 +283,7 @@ void cpu_Dispatch(regs_t *regs) struct Task *task; - D(bug("[Kernel] cpu_Dispatch(%02d)\n", cpunum)); + DSCHED(bug("[Kernel] cpu_Dispatch(%02d)\n", cpunum)); /* Break Disable() if needed */ if (SysBase->IDNestCnt >= 0) { @@ -288,12 +291,13 @@ void cpu_Dispatch(regs_t *regs) ((uint32_t *)regs)[13] &= ~0x80; } - if (!(task = core_Dispatch())) + while (!(task = core_Dispatch())) { - task = TLS_GET(IdleTask); + DSCHED(bug("[Kernel] cpu_Dispatch[%02d]: Nothing to run - idling\n", cpunum)); + asm volatile("wfi"); } - D(bug("[Kernel] cpu_Dispatch[%02d]: 0x%p [R ] '%s'\n", cpunum, task, task->tc_Node.ln_Name)); + DSCHED(bug("[Kernel] cpu_Dispatch[%02d]: 0x%p [R ] '%s'\n", cpunum, task, task->tc_Node.ln_Name)); /* Restore the task's state */ RESTORE_TASKSTATE(task, regs) @@ -324,6 +328,7 @@ void cpu_Dispatch(regs_t *regs) AROS_UFC1(void, task->tc_Launch, AROS_UFCA(struct ExecBase *, SysBase, A6)); } + /* Leave interrupt and jump to the new task */ } void cpu_DumpRegs(regs_t *regs) diff --git a/arch/arm-native/kernel/kernel_scheduler.c b/arch/arm-native/kernel/kernel_scheduler.c index e742203a24..377bdc3c43 100644 --- a/arch/arm-native/kernel/kernel_scheduler.c +++ b/arch/arm-native/kernel/kernel_scheduler.c @@ -14,6 +14,8 @@ #include #include +#include "kernel_cpu.h" + #include #include @@ -22,45 +24,58 @@ #include "exec_intern.h" -#define D(x) +#define DSCHED(x) -/* - * Schedule the currently running task away. Put it into the TaskReady list - * in some smart way. This function is subject of change and it will be probably replaced - * by some plugin system in the future - */ +/* Check if the currently running task on this cpu should be rescheduled.. */ BOOL core_Schedule(void) { struct Task *task = GET_THIS_TASK; + BOOL corereschedule = TRUE; - D(bug("[KRN:BCM2708] core_Schedule()\n")); + DSCHED(bug("[KRN:BCM2708] core_Schedule()\n")); SysBase->AttnResched &= ~ARF_AttnSwitch; /* If task has pending exception, reschedule it so that the dispatcher may handle the exception */ if (!(task->tc_Flags & TF_EXCEPT)) { - BYTE pri; - +#if defined(__AROSEXEC_SMP__) + KrnSpinLock(&PrivExecBase(SysBase)->TaskReadySpinLock, + SPINLOCK_MODE_READ); +#endif /* Is the TaskReady empty? If yes, then the running task is the only one. Let it work */ if (IsListEmpty(&SysBase->TaskReady)) - return FALSE; - + corereschedule = FALSE; + else + { + struct Task *nexttask; #if defined(__AROSEXEC_SMP__) - KrnSpinLock(&PrivExecBase(SysBase)->TaskReadySpinLock, SPINLOCK_MODE_READ); + uint32_t cpumask; + uint32_t tmp; + + asm volatile (" mrc p15, 0, %0, c0, c0, 5 " : "=r" (tmp)); + cpumask = (1 << (tmp & 3)); #endif - /* Does the TaskReady list contains tasks with priority equal or lower than current task? - * If so, then check further... */ - pri = ((struct Task*)GetHead(&SysBase->TaskReady))->tc_Node.ln_Pri; - if (pri <= task->tc_Node.ln_Pri) - { - /* If the running task did not used it's whole quantum yet, let it work */ - if (!(SysBase->SysFlags & SFF_QuantumOver)) + /* + If there are tasks ready for this cpu that have equal or lower priority, + and the current task has used its alloted time - reschedule so they can run + */ + for (nexttask = (struct Task *)GetHead(&SysBase->TaskReady); nexttask != NULL; nexttask = (struct Task *)GetSucc(nexttask)) { #if defined(__AROSEXEC_SMP__) - KrnSpinUnLock(&PrivExecBase(SysBase)->TaskReadySpinLock); + if ((GetIntETask(nexttask)->iet_CpuAffinity & cpumask) == cpumask) + { +#endif + if (nexttask->tc_Node.ln_Pri <= task->tc_Node.ln_Pri) + { + /* If the running task did not used it's whole quantum yet, let it work */ + if (!(SysBase->SysFlags & SFF_QuantumOver)) + corereschedule = FALSE; + } + break; +#if defined(__AROSEXEC_SMP__) + } #endif - return FALSE; } } #if defined(__AROSEXEC_SMP__) @@ -68,59 +83,54 @@ BOOL core_Schedule(void) #endif } - /* - * If we got here, then the rescheduling is necessary. - * Put the task into the TaskReady list. - */ - D(bug("[KRN:BCM2708] Setting task 0x%p (%s) to READY\n", task, task->tc_Node.ln_Name)); -#if defined(__AROSEXEC_SMP__) - KrnSpinLock(&PrivExecBase(SysBase)->TaskRunningSpinLock, SPINLOCK_MODE_WRITE); - Remove(&task->tc_Node); - KrnSpinUnLock(&PrivExecBase(SysBase)->TaskRunningSpinLock); -#endif - task->tc_State = TS_READY; -#if defined(__AROSEXEC_SMP__) - KrnSpinLock(&PrivExecBase(SysBase)->TaskReadySpinLock, SPINLOCK_MODE_WRITE); -#endif - Enqueue(&SysBase->TaskReady, &task->tc_Node); -#if defined(__AROSEXEC_SMP__) - KrnSpinUnLock(&PrivExecBase(SysBase)->TaskReadySpinLock); -#endif + DSCHED + ( + if (corereschedule) + bug("[KRN:BCM2708] Setting task 0x%p (%s) to READY\n", task, task->tc_Node.ln_Name); + ) - /* Select new task to run */ - return TRUE; + return corereschedule; } -/* Actually switch away from the task */ +/* Switch the currently running task on this cpu to ready state */ void core_Switch(void) { struct Task *task = GET_THIS_TASK; - D(bug("[KRN:BCM2708] core_Switch(): Old task = %p (%s)\n", task, task->tc_Node.ln_Name)); + DSCHED(bug("[KRN:BCM2708] core_Switch(): Old task = %p (%s)\n", task, task->tc_Node.ln_Name)); - if (task->tc_SPReg <= task->tc_SPLower || task->tc_SPReg > task->tc_SPUpper) - { - bug("[KRN:BCM2708] Task %s went out of stack limits\n", task->tc_Node.ln_Name); - bug("[KRN:BCM2708] Lower %p, upper %p, SP %p\n", task->tc_SPLower, task->tc_SPUpper, task->tc_SPReg); - /* - * Suspend the task to stop it from causing more harm. In some rare cases, if the task is holding - * lock on some global/library semaphore it will most likelly mean immenent freeze. In most cases - * however, user will be shown an alert. - */ #if defined(__AROSEXEC_SMP__) - KrnSpinLock(&PrivExecBase(SysBase)->TaskRunningSpinLock, SPINLOCK_MODE_WRITE); + KrnSpinLock(&PrivExecBase(SysBase)->TaskRunningSpinLock, + SPINLOCK_MODE_WRITE); #endif - Remove(&task->tc_Node); + Remove(&task->tc_Node); #if defined(__AROSEXEC_SMP__) - KrnSpinUnLock(&PrivExecBase(SysBase)->TaskRunningSpinLock); + KrnSpinUnLock(&PrivExecBase(SysBase)->TaskRunningSpinLock); #endif + /* if the current task has gone out of stack bounds, suspend it to prevent further damage to the system */ + if (task->tc_SPReg <= task->tc_SPLower || task->tc_SPReg > task->tc_SPUpper) + { + bug("[KRN:BCM2708] Task %s went out of stack limits\n", task->tc_Node.ln_Name); + bug("[KRN:BCM2708] Lower %p, upper %p, SP %p\n", task->tc_SPLower, task->tc_SPUpper, task->tc_SPReg); + task->tc_SigWait = 0; task->tc_State = TS_WAIT; +#if defined(__AROSEXEC_SMP__) + KrnSpinLock(&PrivExecBase(SysBase)->TaskWaitSpinLock, + SPINLOCK_MODE_WRITE); +#endif Enqueue(&SysBase->TaskWait, &task->tc_Node); +#if defined(__AROSEXEC_SMP__) + KrnSpinUnLock(&PrivExecBase(SysBase)->TaskWaitSpinLock); +#endif Alert(AN_StackProbe); } + else + { + task->tc_State = TS_READY; + } task->tc_IDNestCnt = SysBase->IDNestCnt; @@ -128,44 +138,52 @@ void core_Switch(void) AROS_UFC1NR(void, task->tc_Switch, AROS_UFCA(struct ExecBase *, SysBase, A6)); } -/* - * Task dispatcher. Basically it may be the same one no matter - * what scheduling algorithm is used (except SysBase->Elapsed reloading) - */ +/* Dispatch a "new" ready task on this cpu */ struct Task *core_Dispatch(void) { - struct Task *task; + struct Task *newtask; + struct Task *task = GET_THIS_TASK; +#if defined(__AROSEXEC_SMP__) uint32_t cpumask; - uint32_t tmp; - - D(bug("[KRN:BCM2708] core_Dispatch()\n")); + int cpunum = GetCPUNumber(); +#endif - asm volatile (" mrc p15, 0, %0, c0, c0, 5 " : "=r" (tmp)); - cpumask = (1 << (tmp & 3)); + DSCHED(bug("[KRN:BCM2708] core_Dispatch()\n")); #if defined(__AROSEXEC_SMP__) - KrnSpinLock(&PrivExecBase(SysBase)->TaskReadySpinLock, SPINLOCK_MODE_WRITE); + cpumask = (1 << cpunum); + + KrnSpinLock(&PrivExecBase(SysBase)->TaskReadySpinLock, + SPINLOCK_MODE_WRITE); #endif - for (task = (struct Task *)GetHead(&SysBase->TaskReady); task != NULL; task = (struct Task *)GetSucc(task)) + for (newtask = (struct Task *)GetHead(&SysBase->TaskReady); newtask != NULL; newtask = (struct Task *)GetSucc(newtask)) { #if defined(__AROSEXEC_SMP__) - if ((GetIntETask(task)->iet_CpuAffinity & cpumask) == cpumask) + if ((GetIntETask(newtask)->iet_CpuAffinity & cpumask) == cpumask) { #endif - Remove(&task->tc_Node); + Remove(&newtask->tc_Node); break; #if defined(__AROSEXEC_SMP__) } #endif } + + if ((newtask == NULL) && (task != NULL)) + newtask = task; + + if ((task != NULL) && (task->tc_State == TS_READY) && (newtask != task)) + { + Enqueue(&SysBase->TaskReady, &task->tc_Node); + } #if defined(__AROSEXEC_SMP__) KrnSpinUnLock(&PrivExecBase(SysBase)->TaskReadySpinLock); #endif - if (!task) + if (!newtask) { /* Is the list of ready tasks empty? Well, go idle. */ - D(bug("[KRN:BCM2708] No ready tasks, entering sleep mode\n")); + DSCHED(bug("[KRN:BCM2708] No ready tasks, entering sleep mode\n")); /* * Idle counter is incremented every time when we enter here, @@ -178,26 +196,22 @@ struct Task *core_Dispatch(void) } SysBase->DispCount++; - SysBase->IDNestCnt = task->tc_IDNestCnt; - SET_THIS_TASK(task); + SysBase->IDNestCnt = newtask->tc_IDNestCnt; + SET_THIS_TASK(newtask); SysBase->Elapsed = SysBase->Quantum; SysBase->SysFlags &= ~SFF_QuantumOver; - task->tc_State = TS_RUN; + newtask->tc_State = TS_RUN; - D(bug("[KRN:BCM2708] New task = %p (%s)\n", task, task->tc_Node.ln_Name)); + DSCHED(bug("[KRN:BCM2708] New task = %p (%s)\n", newtask, newtask->tc_Node.ln_Name)); /* Check the stack of the task we are about to launch. */ - if (task->tc_SPReg <= task->tc_SPLower || task->tc_SPReg > task->tc_SPUpper) + if (newtask->tc_SPReg <= newtask->tc_SPLower || newtask->tc_SPReg > newtask->tc_SPUpper) { /* Don't let the task run, switch it away (raising Alert) and dispatch another task */ core_Switch(); return core_Dispatch(); } - if (task->tc_Flags & TF_LAUNCH) - AROS_UFC1NR(void, task->tc_Launch, AROS_UFCA(struct ExecBase *, SysBase, A6)); - - /* Leave interrupt and jump to the new task */ - return task; + return newtask; } diff --git a/arch/arm-native/kernel/platform_bcm2708.c b/arch/arm-native/kernel/platform_bcm2708.c index 9a65348b20..09d26b8682 100644 --- a/arch/arm-native/kernel/platform_bcm2708.c +++ b/arch/arm-native/kernel/platform_bcm2708.c @@ -74,9 +74,9 @@ static void bcm2708_init(APTR _kernelBase, APTR _sysBase) void *trampoline_dst = (void *)BOOTMEMADDR(bm_mctrampoline); uint32_t trampoline_length = (uintptr_t)&mpcore_end - (uintptr_t)mpcore_trampoline; uint32_t trampoline_data_offset = (uintptr_t)&mpcore_pde - (uintptr_t)mpcore_trampoline; - int core; - uint32_t *core_stack; - uint32_t *core_fiq_stack; + int cpu; + uint32_t *cpu_stack; + uint32_t *cpu_fiq_stack; uint32_t tmp; tls_t *__tls; @@ -91,37 +91,37 @@ static void bcm2708_init(APTR _kernelBase, APTR _sysBase) ((uint32_t *)(trampoline_dst + trampoline_data_offset))[0] = tmp; // pde ((uint32_t *)(trampoline_dst + trampoline_data_offset))[1] = (uint32_t)cpu_Register; - for (core = 1; core < 4; core ++) + for (cpu = 1; cpu < 4; cpu ++) { - core_stack = (uint32_t *)AllocMem(AROS_STACKSIZE*sizeof(uint32_t), MEMF_CLEAR); /* MEMF_PRIVATE */ - ((uint32_t *)(trampoline_dst + trampoline_data_offset))[2] = (uint32_t)&core_stack[AROS_STACKSIZE-sizeof(IPTR)]; + cpu_stack = (uint32_t *)AllocMem(AROS_STACKSIZE*sizeof(uint32_t), MEMF_CLEAR); /* MEMF_PRIVATE */ + ((uint32_t *)(trampoline_dst + trampoline_data_offset))[2] = (uint32_t)&cpu_stack[AROS_STACKSIZE-sizeof(IPTR)]; - core_fiq_stack = (uint32_t *)AllocMem(1024*sizeof(uint32_t), MEMF_CLEAR); /* MEMF_PRIVATE */ - ((uint32_t *)(trampoline_dst + trampoline_data_offset))[4] = (uint32_t)&core_fiq_stack[1024-sizeof(IPTR)]; + cpu_fiq_stack = (uint32_t *)AllocMem(1024*sizeof(uint32_t), MEMF_CLEAR); /* MEMF_PRIVATE */ + ((uint32_t *)(trampoline_dst + trampoline_data_offset))[4] = (uint32_t)&cpu_fiq_stack[1024-sizeof(IPTR)]; - __tls = (tls_t *)AllocMem(sizeof(tls_t), MEMF_CLEAR); /* MEMF_PRIVATE */ + __tls = (tls_t *)AllocMem(sizeof(tls_t) + sizeof(struct cpu_ipidata), MEMF_CLEAR); /* MEMF_PRIVATE */ __tls->SysBase = _sysBase; __tls->KernelBase = _kernelBase; __tls->ThisTask = NULL; arm_flush_cache(((uint32_t)__tls) & ~63, 512); ((uint32_t *)(trampoline_dst + trampoline_data_offset))[3] = (uint32_t)__tls; - D(bug("[KRN:BCM2708] %s: Attempting to wake core #%d\n", __PRETTY_FUNCTION__, core)); - D(bug("[KRN:BCM2708] %s: core #%d stack @ 0x%p (sp=0x%p)\n", __PRETTY_FUNCTION__, core, core_stack, ((uint32_t *)(trampoline_dst + trampoline_data_offset))[2])); - D(bug("[KRN:BCM2708] %s: core #%d fiq stack @ 0x%p (sp=0x%p)\n", __PRETTY_FUNCTION__, core, core_fiq_stack, ((uint32_t *)(trampoline_dst + trampoline_data_offset))[4])); - D(bug("[KRN:BCM2708] %s: core #%d tls @ 0x%p\n", __PRETTY_FUNCTION__, core, ((uint32_t *)(trampoline_dst + trampoline_data_offset))[3])); + D(bug("[KRN:BCM2708] %s: Attempting to wake CPU #%d\n", __PRETTY_FUNCTION__, cpu)); + D(bug("[KRN:BCM2708] %s: CPU #%d Stack @ 0x%p (sp=0x%p)\n", __PRETTY_FUNCTION__, cpu, cpu_stack, ((uint32_t *)(trampoline_dst + trampoline_data_offset))[2])); + D(bug("[KRN:BCM2708] %s: CPU #%d FIQ Stack @ 0x%p (sp=0x%p)\n", __PRETTY_FUNCTION__, cpu, cpu_fiq_stack, ((uint32_t *)(trampoline_dst + trampoline_data_offset))[4])); + D(bug("[KRN:BCM2708] %s: CPU #%d TLS @ 0x%p\n", __PRETTY_FUNCTION__, cpu, ((uint32_t *)(trampoline_dst + trampoline_data_offset))[3])); arm_flush_cache((uint32_t)trampoline_dst, 512); /* Lock the startup spinlock */ KrnSpinLock(&startup_lock, SPINLOCK_MODE_WRITE); - /* Wake up the core */ - *((uint32_t *)(BCM2836_MAILBOX3_SET0 + (0x10 * core))) = (uint32_t)trampoline_dst; + /* Wake up the cpu */ + *((uint32_t *)(BCM2836_MAILBOX3_SET0 + (0x10 * cpu))) = (uint32_t)trampoline_dst; /* * Try to obtain spinlock again. - * This should put this core to sleep since the locked was already obtained. Once the core startup + * This should put this cpu to sleep since the locked was already obtained. Once the cpu startup * is ready, it will call KrnSpinUnLock too */ KrnSpinLock(&startup_lock, SPINLOCK_MODE_WRITE); @@ -130,31 +130,29 @@ static void bcm2708_init(APTR _kernelBase, APTR _sysBase) } } -static void bcm2708_init_core(APTR _kernelBase, APTR _sysBase) +static void bcm2708_init_cpu(APTR _kernelBase, APTR _sysBase) { struct ExecBase *SysBase = (struct ExecBase *)_sysBase; struct KernelBase *KernelBase = (struct KernelBase *)_kernelBase; - struct cpu_ipidata *core_ipidata; - uint32_t tmp; - - asm volatile (" mrc p15, 0, %0, c0, c0, 5 " : "=r" (tmp)); +#if defined(__AROSEXEC_SMP__) + tls_t *__tls = TLS_PTR_GET(); +#endif + int cpunum = GetCPUNumber(); - D(bug("[KRN:BCM2708] %s(%d)\n", __PRETTY_FUNCTION__, (tmp & 0x3))); + D(bug("[KRN:BCM2708] %s(%d)\n", __PRETTY_FUNCTION__, cpunum)); /* Clear all pending FIQ sources on mailboxes */ - *((uint32_t *)(BCM2836_MAILBOX0_CLR0 + (16 * (tmp & 0x3)))) = 0xffffffff; - *((uint32_t *)(BCM2836_MAILBOX1_CLR0 + (16 * (tmp & 0x3)))) = 0xffffffff; - *((uint32_t *)(BCM2836_MAILBOX2_CLR0 + (16 * (tmp & 0x3)))) = 0xffffffff; - *((uint32_t *)(BCM2836_MAILBOX3_CLR0 + (16 * (tmp & 0x3)))) = 0xffffffff; + *((uint32_t *)(BCM2836_MAILBOX0_CLR0 + (16 * cpunum))) = 0xffffffff; + *((uint32_t *)(BCM2836_MAILBOX1_CLR0 + (16 * cpunum))) = 0xffffffff; + *((uint32_t *)(BCM2836_MAILBOX2_CLR0 + (16 * cpunum))) = 0xffffffff; + *((uint32_t *)(BCM2836_MAILBOX3_CLR0 + (16 * cpunum))) = 0xffffffff; #if defined(__AROSEXEC_SMP__) - if ((core_ipidata = (struct cpu_ipidata *)AllocMem(sizeof(struct cpu_ipidata), MEMF_CLEAR)) != NULL) - { - bcm2708_cpuipid[(tmp & 0x3)] = core_ipidata; + bcm2708_cpuipid[cpunum] = (unsigned int)__tls + sizeof(tls_t); + D(bug("[KRN:BCM2708] %s: cpu #%d IPI data @ 0x%p\n", __PRETTY_FUNCTION__, cpunum, bcm2708_cpuipid[cpunum])); - // enable FIQ mailbox interupt - *((uint32_t *)(BCM2836_MAILBOX_INT_CTRL0 + (0x4 * (tmp & 0x3)))) = 0x10; - } + // enable FIQ mailbox interupt + *((uint32_t *)(BCM2836_MAILBOX_INT_CTRL0 + (0x4 * cpunum))) = 0x10; #endif } @@ -175,15 +173,16 @@ static void bcm2807_irq_init(void) static void bcm2807_send_ipi(uint32_t ipi, uint32_t ipi_data, uint32_t cpumask) { - int i = 0; - for (i = 0; i < 4; i++) + int cpu, mbno = 0; + + for (cpu = 0; cpu < 4; cpu++) { #if defined(__AROSEXEC_SMP__) - if ((cpumask & (1 << i)) && bcm2708_cpuipid[i]) + if ((cpumask & (1 << cpu)) && bcm2708_cpuipid[cpu]) { /* TODO: check which mailbox is available and use it */ - bcm2708_cpuipid[i]->ipi_data[0] = ipi_data; - *((uint32_t *)(BCM2836_MAILBOX0_SET0 + (0x10 * i))) = ipi; + bcm2708_cpuipid[cpu]->ipi_data[mbno] = ipi_data; + *((uint32_t *)(BCM2836_MAILBOX0_SET0 + (0x10 * cpu))) = ipi; } #endif } @@ -274,30 +273,29 @@ static void bcm2807_irq_process() static void bcm2807_fiq_process() { - uint32_t tmp, fiq, fiq_data; - int i; - - asm volatile (" mrc p15, 0, %0, c0, c0, 5 " : "=r" (tmp)); + int cpunum = GetCPUNumber(); + uint32_t fiq, fiq_data; + int mbno; - DFIQ(bug("[KRN:BCM2708] %s(%d)\n", __PRETTY_FUNCTION__, (tmp & 0x3))); + DFIQ(bug("[KRN:BCM2708] %s(%d)\n", __PRETTY_FUNCTION__, cpunum)); - fiq = *((uint32_t *)(BCM2836_FIQ_PEND0 + (0x4 * (tmp & 0x3)))); + fiq = *((uint32_t *)(BCM2836_FIQ_PEND0 + (0x4 * cpunum))); - DFIQ(bug("[KRN:BCM2708] %s: Core #%d FIQ %x\n", __PRETTY_FUNCTION__, (tmp & 0x3), fiq)); + DFIQ(bug("[KRN:BCM2708] %s: CPU #%d FIQ %x\n", __PRETTY_FUNCTION__, cpunum, fiq)); if (fiq) { - for (i=0; i < 4; i++) + for (mbno=0; mbno < 4; mbno++) { - if (fiq & (0x10 << i)) + if (fiq & (0x10 << mbno)) { - fiq_data = *((uint32_t *)(BCM2836_MAILBOX0_CLR0 + 4*i + (16 * (tmp & 0x3)))); - DFIQ(bug("[KRN:BCM2708] %s: Mailbox%d: FIQ Data %08x\n", __PRETTY_FUNCTION__, i, fiq_data)); + fiq_data = *((uint32_t *)(BCM2836_MAILBOX0_CLR0 + 4 * mbno + (16 * cpunum))); + DFIQ(bug("[KRN:BCM2708] %s: Mailbox%d: FIQ Data %08x\n", __PRETTY_FUNCTION__, mbno, fiq_data)); #if defined(__AROSEXEC_SMP__) - if (bcm2708_cpuipid[(tmp & 0x3)]) - handle_ipi(fiq_data, bcm2708_cpuipid[(tmp & 0x3)]->ipi_data[0]); + if (bcm2708_cpuipid[cpunum]) + handle_ipi(fiq_data, bcm2708_cpuipid[cpunum]->ipi_data[0]); #endif - *((uint32_t *)(BCM2836_MAILBOX0_CLR0 + 4*i + (16 * (tmp & 0x3)))) = 0xffffffff; + *((uint32_t *)(BCM2836_MAILBOX0_CLR0 + 4 * mbno + (16 * cpunum))) = 0xffffffff; } } } @@ -440,7 +438,7 @@ static IPTR bcm2708_probe(struct ARM_Implementation *krnARMImpl, struct TagItem { /* bcm2836 uses armv7 */ krnARMImpl->ARMI_PeripheralBase = (APTR)BCM2836_PERIPHYSBASE; - krnARMImpl->ARMI_InitCore = &bcm2708_init_core; + krnARMImpl->ARMI_InitCore = &bcm2708_init_cpu; krnARMImpl->ARMI_FIQProcess = &bcm2807_fiq_process; krnARMImpl->ARMI_SendIPI = &bcm2807_send_ipi; } diff --git a/arch/arm-native/kernel/tls.h b/arch/arm-native/kernel/tls.h index 1c016db6be..e409a93954 100644 --- a/arch/arm-native/kernel/tls.h +++ b/arch/arm-native/kernel/tls.h @@ -6,10 +6,6 @@ typedef struct tls struct ExecBase *SysBase; void * *KernelBase; /* Base of kernel.resource */ struct Task *ThisTask; /* Currently running task on this core */ -#if (1) - // TODO: to be removed since it isnt needed .. - struct Task *IdleTask; -#endif } tls_t; #define TLS_OFFSET(name) ((char *)&(((tls_t *)0)->name)-(char *)0) -- 2.11.4.GIT