2 Copyright © 2015, The AROS Development Team. All rights reserved.
7 #include <exec/alerts.h>
8 #include <exec/execbase.h>
9 #include <exec/lists.h>
10 #include <proto/exec.h>
11 #include <proto/kernel.h>
13 //#include <kernel_base.h>
14 #include <kernel_debug.h>
15 #include <kernel_scheduler.h>
17 #include "kernel_cpu.h"
19 #include <exec_platform.h>
21 #include <aros/types/spinlock_s.h>
25 #include "exec_intern.h"
29 /* Check if the currently running task on this cpu should be rescheduled.. */
30 BOOL
core_Schedule(void)
32 struct Task
*task
= GET_THIS_TASK
;
33 BOOL corereschedule
= TRUE
;
35 DSCHED(bug("[KRN:BCM2708] core_Schedule()\n"));
37 SysBase
->AttnResched
&= ~ARF_AttnSwitch
;
39 /* If task has pending exception, reschedule it so that the dispatcher may handle the exception */
40 if (!(task
->tc_Flags
& TF_EXCEPT
))
42 #if defined(__AROSEXEC_SMP__)
43 KrnSpinLock(&PrivExecBase(SysBase
)->TaskReadySpinLock
, NULL
,
46 /* Is the TaskReady empty? If yes, then the running task is the only one. Let it work */
47 if (IsListEmpty(&SysBase
->TaskReady
))
48 corereschedule
= FALSE
;
51 struct Task
*nexttask
;
52 #if defined(__AROSEXEC_SMP__)
53 int cpunum
= GetCPUNumber();
54 uint32_t cpumask
= (1 << cpunum
);
57 If there are tasks ready for this cpu that have equal or lower priority,
58 and the current task has used its alloted time - reschedule so they can run
60 for (nexttask
= (struct Task
*)GetHead(&SysBase
->TaskReady
); nexttask
!= NULL
; nexttask
= (struct Task
*)GetSucc(nexttask
))
62 #if defined(__AROSEXEC_SMP__)
63 if ((GetIntETask(nexttask
)->iet_CpuAffinity
& cpumask
) == cpumask
)
66 if (nexttask
->tc_Node
.ln_Pri
<= task
->tc_Node
.ln_Pri
)
68 /* If the running task did not used it's whole quantum yet, let it work */
69 if (!(SysBase
->SysFlags
& SFF_QuantumOver
))
70 corereschedule
= FALSE
;
73 #if defined(__AROSEXEC_SMP__)
78 #if defined(__AROSEXEC_SMP__)
79 KrnSpinUnLock(&PrivExecBase(SysBase
)->TaskReadySpinLock
);
86 bug("[KRN:BCM2708] Setting task 0x%p (%s) to READY\n", task
, task
->tc_Node
.ln_Name
);
89 return corereschedule
;
92 /* Switch the currently running task on this cpu to ready state */
93 void core_Switch(void)
95 struct Task
*task
= GET_THIS_TASK
;
97 DSCHED(bug("[KRN:BCM2708] core_Switch(): Old task = %p (%s)\n", task
, task
->tc_Node
.ln_Name
));
99 if (task
->tc_State
== TS_RUN
)
101 #if defined(__AROSEXEC_SMP__)
102 KrnSpinLock(&PrivExecBase(SysBase
)->TaskRunningSpinLock
, NULL
,
103 SPINLOCK_MODE_WRITE
);
105 Remove(&task
->tc_Node
);
106 #if defined(__AROSEXEC_SMP__)
107 KrnSpinUnLock(&PrivExecBase(SysBase
)->TaskRunningSpinLock
);
109 task
->tc_State
= TS_READY
;
112 /* if the current task has gone out of stack bounds, suspend it to prevent further damage to the system */
113 if (task
->tc_SPReg
<= task
->tc_SPLower
|| task
->tc_SPReg
> task
->tc_SPUpper
)
115 bug("[KRN:BCM2708] Task %s went out of stack limits\n", task
->tc_Node
.ln_Name
);
116 bug("[KRN:BCM2708] Lower %p, upper %p, SP %p\n", task
->tc_SPLower
, task
->tc_SPUpper
, task
->tc_SPReg
);
118 task
->tc_SigWait
= 0;
119 task
->tc_State
= TS_WAIT
;
120 #if defined(__AROSEXEC_SMP__)
121 KrnSpinLock(&PrivExecBase(SysBase
)->TaskWaitSpinLock
, NULL
,
122 SPINLOCK_MODE_WRITE
);
124 Enqueue(&SysBase
->TaskWait
, &task
->tc_Node
);
125 #if defined(__AROSEXEC_SMP__)
126 KrnSpinUnLock(&PrivExecBase(SysBase
)->TaskWaitSpinLock
);
129 Alert(AN_StackProbe
);
132 task
->tc_IDNestCnt
= SysBase
->IDNestCnt
;
134 if (task
->tc_Flags
& TF_SWITCH
)
135 AROS_UFC1NR(void, task
->tc_Switch
, AROS_UFCA(struct ExecBase
*, SysBase
, A6
));
138 /* Dispatch a "new" ready task on this cpu */
139 struct Task
*core_Dispatch(void)
141 struct Task
*newtask
;
142 struct Task
*task
= GET_THIS_TASK
;
143 #if defined(__AROSEXEC_SMP__)
144 int cpunum
= GetCPUNumber();
145 uint32_t cpumask
= (1 << cpunum
);
148 DSCHED(bug("[KRN:BCM2708] core_Dispatch()\n"));
150 #if defined(__AROSEXEC_SMP__)
151 KrnSpinLock(&PrivExecBase(SysBase
)->TaskReadySpinLock
, NULL
,
152 SPINLOCK_MODE_WRITE
);
154 for (newtask
= (struct Task
*)GetHead(&SysBase
->TaskReady
); newtask
!= NULL
; newtask
= (struct Task
*)GetSucc(newtask
))
156 #if defined(__AROSEXEC_SMP__)
157 if ((GetIntETask(newtask
)->iet_CpuAffinity
& cpumask
) == cpumask
)
160 Remove(&newtask
->tc_Node
);
162 #if defined(__AROSEXEC_SMP__)
171 else if ((task
->tc_State
== TS_READY
) && (task
!= newtask
))
172 Enqueue(&SysBase
->TaskReady
, &task
->tc_Node
);
174 #if defined(__AROSEXEC_SMP__)
175 KrnSpinUnLock(&PrivExecBase(SysBase
)->TaskReadySpinLock
);
179 (newtask
->tc_State
== TS_READY
) ||
180 (newtask
->tc_State
== TS_RUN
))
182 SysBase
->DispCount
++;
183 SysBase
->IDNestCnt
= newtask
->tc_IDNestCnt
;
184 SET_THIS_TASK(newtask
);
185 SysBase
->Elapsed
= SysBase
->Quantum
;
186 SysBase
->SysFlags
&= ~SFF_QuantumOver
;
188 /* Check the stack of the task we are about to launch. */
189 if ((newtask
->tc_SPReg
<= newtask
->tc_SPLower
) ||
190 (newtask
->tc_SPReg
> newtask
->tc_SPUpper
))
191 newtask
->tc_State
= TS_WAIT
;
193 newtask
->tc_State
= TS_RUN
;
198 #if defined(__AROSEXEC_SMP__)
199 if (newtask
->tc_State
== TS_SPIN
)
201 /* move it to the spinning list */
202 KrnSpinLock(&PrivExecBase(SysBase
)->TaskSpinningLock
, NULL
,
203 SPINLOCK_MODE_WRITE
);
204 AddHead(&PrivExecBase(SysBase
)->TaskSpinning
, &newtask
->tc_Node
);
205 KrnSpinUnLock(&PrivExecBase(SysBase
)->TaskReadySpinLock
);
209 /* if the task shouldnt run - force a reschedulre.. */
210 if (newtask
->tc_State
!= TS_RUN
)
213 newtask
= core_Dispatch();
217 DSCHED(bug("[KRN:BCM2708] Dispatching Task @ %p (%s)\n", newtask
, newtask
->tc_Node
.ln_Name
));
222 /* Is the list of ready tasks empty? Well, go idle. */
223 DSCHED(bug("[KRN:BCM2708] No ready Task(s) - entering sleep mode\n"));
226 * Idle counter is incremented every time when we enter here,
227 * not only once. This is correct.
229 SysBase
->IdleCount
++;
230 SysBase
->AttnResched
|= ARF_AttnSwitch
;