2 Copyright © 1995-2015, The AROS Development Team. All rights reserved.
6 #include <asm/mpc5200b.h>
7 #include <aros/kernel.h>
8 #include <aros/libcall.h>
9 #include <exec/execbase.h>
10 #include <hardware/intbits.h>
12 #include "exec_intern.h"
16 #include "kernel_intern.h"
18 AROS_LH0(KRN_SchedType
, KrnGetScheduler
,
19 struct KernelBase
*, KernelBase
, 1, Kernel
)
28 AROS_LH1(void, KrnSetScheduler
,
29 AROS_LHA(KRN_SchedType
, sched
, D0
),
30 struct KernelBase
*, KernelBase
, 2, Kernel
)
34 /* Cannot set scheduler yet */
39 AROS_LH0(void, KrnCause
,
40 struct KernelBase
*, KernelBase
, 3, Kernel
)
44 asm volatile("li %%r3,%0; sc"::"i"(SC_CAUSE
):"memory","r3");
49 AROS_LH0(void , KrnDispatch
,
50 struct KernelBase
*, KernelBase
, 4, Kernel
)
55 asm volatile("li %%r3,%0; sc"::"i"(SC_DISPATCH
):"memory","r3");
60 AROS_LH0(void, KrnSwitch
,
61 struct KernelBase
*, KernelBase
, 5, Kernel
)
65 asm volatile("li %%r3,%0; sc"::"i"(SC_SWITCH
):"memory","r3");
70 AROS_LH0(void, KrnSchedule
,
71 struct KernelBase
*, KernelBase
, 6, Kernel
)
75 asm volatile("li %%r3,%0; sc"::"i"(SC_SCHEDULE
):"memory","r3");
81 * Task dispatcher. Basically it may be the same one no matter what scheduling algorithm is used
83 void core_Dispatch(regs_t
*regs
)
85 volatile struct ExecBase
*SysBase
= getSysBase();
90 wrmsr(rdmsr() & ~MSR_EE
);
93 * Is the list of ready tasks empty? Well, increment the idle switch cound and halt CPU.
94 * It should be extended by some plugin mechanism which would put CPU and whole machine
95 * into some more sophisticated sleep states (ACPI?)
97 while (IsListEmpty(&SysBase
->TaskReady
))
99 // SysBase->IdleCount++;
100 SysBase
->AttnResched
|= ARF_AttnSwitch
;
102 //D(bug("[KRN] TaskReady list empty. Sleeping for a while...\n"));
103 /* Sleep almost forever ;) */
105 wrmsr(rdmsr() | MSR_EE
);
106 asm volatile("sync");
107 // wrmsr(rdmsr() | MSR_POW);
108 // asm volatile("isync");
110 if (SysBase
->SysFlags
& SFF_SoftInt
)
116 SysBase
->DispCount
++;
118 /* Get the first task from the TaskReady list, and populate it's settings through Sysbase */
119 task
= (struct Task
*)REMHEAD(&SysBase
->TaskReady
);
120 SysBase
->ThisTask
= task
;
121 SysBase
->Elapsed
= SysBase
->Quantum
;
122 SysBase
->SysFlags
&= ~0x2000;
123 task
->tc_State
= TS_RUN
;
124 SysBase
->IDNestCnt
= task
->tc_IDNestCnt
;
126 //D(bug("[KRN] New task = %p (%s)\n", task, task->tc_Node.ln_Name));
128 /* Handle tasks's flags */
129 if (task
->tc_Flags
& TF_EXCEPT
)
132 /* Store the launch time */
133 GetIntETask(task
)->iet_private1
= mftbu();
135 if (task
->tc_Flags
& TF_LAUNCH
)
137 AROS_UFC1(void, task
->tc_Launch
,
138 AROS_UFCA(struct ExecBase
*, SysBase
, A6
));
141 /* Restore the task's state */
142 regs
= task
->tc_UnionETask
.tc_ETask
->et_RegFrame
;
144 if (SysBase
->IDNestCnt
< 0)
145 regs
->srr1
|= MSR_EE
;
147 /* Copy the fpu, mmx, xmm state */
148 #warning FIXME: Change to the lazy saving of the FPU state!!!!
149 #warning TODO: No FPU support yet!!!!!!! Yay, it sucks! :-D
153 regs
->srr1
&= ~MSR_POW
;
154 /* Leave interrupt and jump to the new task */
155 core_LeaveInterrupt(regs
);
158 extern struct Task
*idle_task
;
160 void core_Switch(regs_t
*regs
)
162 struct ExecBase
*SysBase
= getSysBase();
164 context_t
*ctx
= (context_t
*)regs
;
167 /* Disable interrupts for a while */
168 wrmsr(rdmsr() & ~MSR_EE
);
170 task
= SysBase
->ThisTask
;
172 //D(bug("[KRN] Old task = %p (%s)\n", task, task->tc_Node.ln_Name));
174 /* Copy current task's context into the ETask structure */
175 memmove(task
->tc_UnionETask
.tc_ETask
->et_RegFrame
, regs
, sizeof(context_t
));
177 /* Copy the fpu, mmx, xmm state */
179 #warning FIXME: Change to the lazy saving of the FPU state!!!!
180 #warning TODO: Write the damn FPU handling at all!!!!!!!! ;-D LOL
182 /* store IDNestCnt into tasks's structure */
183 task
->tc_IDNestCnt
= SysBase
->IDNestCnt
;
184 task
->tc_SPReg
= regs
->gpr
[1];
186 /* And enable interrupts */
187 SysBase
->IDNestCnt
= -1;
189 // if (task->tc_Node.ln_Pri < 127)
190 // task->tc_Node.ln_Pri++;
192 // if (SysBase->Elapsed <= 1)
193 // if (task->tc_Node.ln_Pri > -125)
194 // task->tc_Node.ln_Pri--;
196 // wrmsr(rdmsr() | MSR_EE);
200 /* Task says byebye. Update the CPU Time now. */
201 GetIntETask(task
)->iet_CpuTime
+= mftbu() - GetIntETask(task
)->iet_private1
;
203 /* TF_SWITCH flag set? Call the switch routine */
204 if (task
->tc_Flags
& TF_SWITCH
)
206 AROS_UFC1(void, task
->tc_Switch
,
207 AROS_UFCA(struct ExecBase
*, SysBase
, A6
));
215 * Schedule the currently running task away. Put it into the TaskReady list
216 * in some smart way. This function is subject of change and it will be probably replaced
217 * by some plugin system in the future
219 void core_Schedule(regs_t
*regs
)
221 struct ExecBase
*SysBase
= getSysBase();
226 /* Disable interrupts for a while */
227 wrmsr(rdmsr() & ~MSR_EE
); // CLI
229 task
= SysBase
->ThisTask
;
231 /* Clear the pending switch flag. */
232 SysBase
->AttnResched
&= ~ARF_AttnSwitch
;
234 /* If task has pending exception, reschedule it so that the dispatcher may handle the exception */
235 if (!(task
->tc_Flags
& TF_EXCEPT
))
237 /* Is the TaskReady empty? If yes, then the running task is the only one. Let it work */
238 if (IsListEmpty(&SysBase
->TaskReady
))
239 core_LeaveInterrupt(regs
);
241 /* Does the TaskReady list contains tasks with priority equal or lower than current task?
242 * If so, then check further... */
243 if (((struct Task
*)GetHead(&SysBase
->TaskReady
))->tc_Node
.ln_Pri
<= task
->tc_Node
.ln_Pri
)
245 /* If the running task did not used it's whole quantum yet, let it work */
246 if (!(SysBase
->SysFlags
& 0x2000))
248 core_LeaveInterrupt(regs
);
254 if (task
!= idle_task
)
256 /* almost no CPU time used? Good. Increase the pri */
257 if (SysBase
->Elapsed
== SysBase
->Quantum
)
259 // if (task->tc_Node.ln_Pri < (GetIntETask(task)->iet_OrigPri) + 5)
260 // task->tc_Node.ln_Pri++;
264 if (task
->tc_Node
.ln_Pri
> (GetIntETask(task
)->iet_OrigPri
) - 5)
265 task
->tc_Node
.ln_Pri
--;
271 * If we got here, then the rescheduling is necessary.
272 * Put the task into the TaskReady list.
274 task
->tc_State
= TS_READY
;
275 Enqueue(&SysBase
->TaskReady
, (struct Node
*)task
);
278 /* Select new task to run */
283 * Leave the interrupt. This function receives the register frame used to leave the supervisor
284 * mode. It never returns and reschedules the task if it was asked for.
286 void core_ExitInterrupt(regs_t
*regs
)
288 /* Powermode was on? Turn it off now */
289 regs
->srr1
&= ~MSR_POW
;
291 /* Going back into supervisor mode? Then exit immediatelly */
292 if (!(regs
->srr1
& MSR_PR
))
294 core_LeaveInterrupt(regs
);
298 /* Prepare to go back into user mode */
299 struct ExecBase
*SysBase
= getSysBase();
303 /* Soft interrupt requested? It's high time to do it */
304 if (SysBase
->SysFlags
& SFF_SoftInt
)
307 /* If task switching is disabled, leave immediatelly */
308 if (SysBase
->TDNestCnt
>= 0)
310 core_LeaveInterrupt(regs
);
315 * Do not disturb task if it's not necessary.
316 * Reschedule only if switch pending flag is set. Exit otherwise.
318 if (SysBase
->AttnResched
& ARF_AttnSwitch
)
323 core_LeaveInterrupt(regs
);
327 core_LeaveInterrupt(regs
);
331 void core_Cause(struct ExecBase
*SysBase
)
333 struct IntVector
*iv
= &SysBase
->IntVects
[INTB_SOFTINT
];
335 /* If the SoftInt vector in SysBase is set, call it. It will do the rest for us */
338 AROS_INTC1(iv
->iv_Code
, iv
->iv_Data
);