2 Copyright © 2015, The AROS Development Team. All rights reserved.
7 #include <exec/alerts.h>
8 #include <exec/execbase.h>
9 #include <exec/lists.h>
10 #include <proto/exec.h>
11 #include <proto/kernel.h>
13 //#include <kernel_base.h>
14 #include <kernel_debug.h>
15 #include <kernel_scheduler.h>
17 #include "kernel_cpu.h"
19 #include <exec_platform.h>
21 #include <aros/types/spinlock_s.h>
25 #include "exec_intern.h"
29 /* Check if the currently running task on this cpu should be rescheduled.. */
30 BOOL
core_Schedule(void)
32 struct Task
*task
= GET_THIS_TASK
;
33 BOOL corereschedule
= TRUE
;
35 DSCHED(bug("[Kernel] core_Schedule()\n"));
37 SysBase
->AttnResched
&= ~ARF_AttnSwitch
;
39 /* If task has pending exception, reschedule it so that the dispatcher may handle the exception */
40 if (!(task
->tc_Flags
& TF_EXCEPT
))
42 #if defined(__AROSEXEC_SMP__)
43 KrnSpinLock(&PrivExecBase(SysBase
)->TaskReadySpinLock
, NULL
,
46 /* Is the TaskReady empty? If yes, then the running task is the only one. Let it work */
47 if (IsListEmpty(&SysBase
->TaskReady
))
48 corereschedule
= FALSE
;
51 struct Task
*nexttask
;
52 #if defined(__AROSEXEC_SMP__)
53 int cpunum
= GetCPUNumber();
54 uint32_t cpumask
= (1 << cpunum
);
57 If there are tasks ready for this cpu that have equal or lower priority,
58 and the current task has used its alloted time - reschedule so they can run
60 for (nexttask
= (struct Task
*)GetHead(&SysBase
->TaskReady
); nexttask
!= NULL
; nexttask
= (struct Task
*)GetSucc(nexttask
))
62 #if defined(__AROSEXEC_SMP__)
63 if ((GetIntETask(nexttask
)->iet_CpuAffinity
& cpumask
) == cpumask
)
66 if (nexttask
->tc_Node
.ln_Pri
<= task
->tc_Node
.ln_Pri
)
68 /* If the running task did not used it's whole quantum yet, let it work */
69 if (!(SysBase
->SysFlags
& SFF_QuantumOver
))
70 corereschedule
= FALSE
;
73 #if defined(__AROSEXEC_SMP__)
78 #if defined(__AROSEXEC_SMP__)
79 KrnSpinUnLock(&PrivExecBase(SysBase
)->TaskReadySpinLock
);
86 bug("[Kernel] '%s' @ 0x%p needs rescheduled ..\n", task
->tc_Node
.ln_Name
, task
);
89 return corereschedule
;
92 /* Switch the currently running task on this cpu to ready state */
93 void core_Switch(void)
95 struct Task
*task
= GET_THIS_TASK
;
97 DSCHED(bug("[Kernel] core_Switch()\n"));
99 if (task
->tc_State
== TS_RUN
)
101 DSCHED(bug("[Kernel] Switching away from '%s' @ 0x%p\n", task
->tc_Node
.ln_Name
, task
));
102 #if defined(__AROSEXEC_SMP__)
103 KrnSpinLock(&PrivExecBase(SysBase
)->TaskRunningSpinLock
, NULL
,
104 SPINLOCK_MODE_WRITE
);
106 Remove(&task
->tc_Node
);
107 #if defined(__AROSEXEC_SMP__)
108 KrnSpinUnLock(&PrivExecBase(SysBase
)->TaskRunningSpinLock
);
110 task
->tc_State
= TS_READY
;
112 /* if the current task has gone out of stack bounds, suspend it to prevent further damage to the system */
113 if (task
->tc_SPReg
<= task
->tc_SPLower
|| task
->tc_SPReg
> task
->tc_SPUpper
)
115 bug("[Kernel] '%s' @ 0x%p went out of stack limits\n", task
->tc_Node
.ln_Name
, task
);
116 bug("[Kernel] Lower 0x%p, upper 0x%p, SP 0x%p\n", task
->tc_SPLower
, task
->tc_SPUpper
, task
->tc_SPReg
);
118 task
->tc_SigWait
= 0;
119 task
->tc_State
= TS_WAIT
;
120 #if defined(__AROSEXEC_SMP__)
121 KrnSpinLock(&PrivExecBase(SysBase
)->TaskWaitSpinLock
, NULL
,
122 SPINLOCK_MODE_WRITE
);
124 Enqueue(&SysBase
->TaskWait
, &task
->tc_Node
);
125 #if defined(__AROSEXEC_SMP__)
126 KrnSpinUnLock(&PrivExecBase(SysBase
)->TaskWaitSpinLock
);
129 Alert(AN_StackProbe
);
132 task
->tc_IDNestCnt
= SysBase
->IDNestCnt
;
134 if (task
->tc_Flags
& TF_SWITCH
)
135 AROS_UFC1NR(void, task
->tc_Switch
, AROS_UFCA(struct ExecBase
*, SysBase
, A6
));
137 if (task
->tc_State
== TS_READY
)
139 DSCHED(bug("[Kernel] Setting '%s' @ 0x%p as ready\n", task
->tc_Node
.ln_Name
, task
));
140 #if defined(__AROSEXEC_SMP__)
141 KrnSpinLock(&PrivExecBase(SysBase
)->TaskReadySpinLock
, NULL
,
142 SPINLOCK_MODE_WRITE
);
144 Enqueue(&SysBase
->TaskReady
, &task
->tc_Node
);
145 #if defined(__AROSEXEC_SMP__)
146 KrnSpinUnLock(&PrivExecBase(SysBase
)->TaskReadySpinLock
);
152 /* Dispatch a "new" ready task on this cpu */
153 struct Task
*core_Dispatch(void)
155 struct Task
*newtask
;
156 struct Task
*task
= GET_THIS_TASK
;
157 #if defined(__AROSEXEC_SMP__)
158 int cpunum
= GetCPUNumber();
159 uint32_t cpumask
= (1 << cpunum
);
162 DSCHED(bug("[Kernel] core_Dispatch()\n"));
164 #if defined(__AROSEXEC_SMP__)
165 KrnSpinLock(&PrivExecBase(SysBase
)->TaskReadySpinLock
, NULL
,
166 SPINLOCK_MODE_WRITE
);
168 for (newtask
= (struct Task
*)GetHead(&SysBase
->TaskReady
); newtask
!= NULL
; newtask
= (struct Task
*)GetSucc(newtask
))
170 #if defined(__AROSEXEC_SMP__)
171 if ((GetIntETask(newtask
)->iet_CpuAffinity
& cpumask
) == cpumask
)
174 Remove(&newtask
->tc_Node
);
176 #if defined(__AROSEXEC_SMP__)
180 #if defined(__AROSEXEC_SMP__)
181 KrnSpinUnLock(&PrivExecBase(SysBase
)->TaskReadySpinLock
);
184 if ((task
) && (!newtask
))
188 (newtask
->tc_State
== TS_READY
) ||
189 (newtask
->tc_State
== TS_RUN
))
191 DSCHED(bug("[Kernel] Preparing to run '%s' @ 0x%p\n", newtask
->tc_Node
.ln_Name
, newtask
));
193 SysBase
->DispCount
++;
194 SysBase
->IDNestCnt
= newtask
->tc_IDNestCnt
;
195 SET_THIS_TASK(newtask
);
196 SysBase
->Elapsed
= SysBase
->Quantum
;
197 SysBase
->SysFlags
&= ~SFF_QuantumOver
;
199 /* Check the stack of the task we are about to launch. */
200 if ((newtask
->tc_SPReg
<= newtask
->tc_SPLower
) ||
201 (newtask
->tc_SPReg
> newtask
->tc_SPUpper
))
202 newtask
->tc_State
= TS_WAIT
;
204 newtask
->tc_State
= TS_RUN
;
209 BOOL launchtask
= TRUE
;
210 #if defined(__AROSEXEC_SMP__)
211 if (newtask
->tc_State
== TS_SPIN
)
213 /* move it to the spinning list */
214 KrnSpinLock(&PrivExecBase(SysBase
)->TaskSpinningLock
, NULL
,
215 SPINLOCK_MODE_WRITE
);
216 AddHead(&PrivExecBase(SysBase
)->TaskSpinning
, &newtask
->tc_Node
);
217 KrnSpinUnLock(&PrivExecBase(SysBase
)->TaskSpinningLock
);
221 if (newtask
->tc_State
== TS_WAIT
)
223 #if defined(__AROSEXEC_SMP__)
224 KrnSpinLock(&PrivExecBase(SysBase
)->TaskWaitSpinLock
, NULL
,
225 SPINLOCK_MODE_WRITE
);
227 Enqueue(&SysBase
->TaskWait
, &task
->tc_Node
);
228 #if defined(__AROSEXEC_SMP__)
229 KrnSpinUnLock(&PrivExecBase(SysBase
)->TaskWaitSpinLock
);
236 /* if the new task shouldnt run - force a reschedule.. */
237 DSCHED(bug("[Kernel] Skipping '%s' @ 0x%p (state %08x)\n", newtask
->tc_Node
.ln_Name
, newtask
, newtask
->tc_State
));
240 newtask
= core_Dispatch();
244 DSCHED(bug("[Kernel] Launching '%s' @ 0x%p (state %08x)\n", newtask
->tc_Node
.ln_Name
, newtask
, newtask
->tc_State
));
249 /* Go idle if there is nothing to do ... */
250 DSCHED(bug("[Kernel] No ready Task(s) - entering sleep mode\n"));
253 * Idle counter is incremented every time when we enter here,
254 * not only once. This is correct.
256 SysBase
->IdleCount
++;
257 SysBase
->AttnResched
|= ARF_AttnSwitch
;