2 Copyright © 1995-2011, The AROS Development Team. All rights reserved.
6 #include <asm/mpc5200b.h>
7 #include <aros/kernel.h>
8 #include <aros/libcall.h>
9 #include <exec/execbase.h>
10 #include <hardware/intbits.h>
12 #include "exec_intern.h"
16 #include "kernel_intern.h"
18 AROS_LH0(KRN_SchedType
, KrnGetScheduler
,
19 struct KernelBase
*, KernelBase
, 1, Kernel
)
28 AROS_LH1(void, KrnSetScheduler
,
29 AROS_LHA(KRN_SchedType
, sched
, D0
),
30 struct KernelBase
*, KernelBase
, 2, Kernel
)
34 /* Cannot set scheduler yet */
39 AROS_LH0(void, KrnCause
,
40 struct KernelBase
*, KernelBase
, 3, Kernel
)
44 asm volatile("li %%r3,%0; sc"::"i"(SC_CAUSE
):"memory","r3");
49 AROS_LH0(void , KrnDispatch
,
50 struct KernelBase
*, KernelBase
, 4, Kernel
)
55 asm volatile("li %%r3,%0; sc"::"i"(SC_DISPATCH
):"memory","r3");
60 AROS_LH0(void, KrnSwitch
,
61 struct KernelBase
*, KernelBase
, 5, Kernel
)
65 asm volatile("li %%r3,%0; sc"::"i"(SC_SWITCH
):"memory","r3");
70 AROS_LH0(void, KrnSchedule
,
71 struct KernelBase
*, KernelBase
, 6, Kernel
)
75 asm volatile("li %%r3,%0; sc"::"i"(SC_SCHEDULE
):"memory","r3");
81 * Task dispatcher. Basically it may be the same one no matter what scheduling algorithm is used
83 void core_Dispatch(regs_t
*regs
)
85 volatile struct ExecBase
*SysBase
= getSysBase();
90 wrmsr(rdmsr() & ~MSR_EE
);
93 * Is the list of ready tasks empty? Well, increment the idle switch cound and halt CPU.
94 * It should be extended by some plugin mechanism which would put CPU and whole machine
95 * into some more sophisticated sleep states (ACPI?)
97 while (IsListEmpty(&SysBase
->TaskReady
))
99 // SysBase->IdleCount++;
100 SysBase
->AttnResched
|= ARF_AttnSwitch
;
102 //D(bug("[KRN] TaskReady list empty. Sleeping for a while...\n"));
103 /* Sleep almost forever ;) */
105 wrmsr(rdmsr() | MSR_EE
);
106 asm volatile("sync");
107 // wrmsr(rdmsr() | MSR_POW);
108 // asm volatile("isync");
110 if (SysBase
->SysFlags
& SFF_SoftInt
)
116 SysBase
->DispCount
++;
118 /* Get the first task from the TaskReady list, and populate it's settings through Sysbase */
119 task
= (struct Task
*)REMHEAD(&SysBase
->TaskReady
);
120 SysBase
->ThisTask
= task
;
121 SysBase
->Elapsed
= SysBase
->Quantum
;
122 SysBase
->SysFlags
&= ~0x2000;
123 task
->tc_State
= TS_RUN
;
124 SysBase
->IDNestCnt
= task
->tc_IDNestCnt
;
126 //D(bug("[KRN] New task = %p (%s)\n", task, task->tc_Node.ln_Name));
128 /* Handle tasks's flags */
129 if (task
->tc_Flags
& TF_EXCEPT
)
132 /* Store the launch time */
133 GetIntETask(task
)->iet_private1
= mftbu();
135 if (task
->tc_Flags
& TF_LAUNCH
)
137 AROS_UFC1(void, task
->tc_Launch
,
138 AROS_UFCA(struct ExecBase
*, SysBase
, A6
));
141 /* Restore the task's state */
142 regs
= task
->tc_UnionETask
.tc_ETask
->et_RegFrame
;
144 if (SysBase
->IDNestCnt
< 0)
145 regs
->srr1
|= MSR_EE
;
147 /* Copy the fpu, mmx, xmm state */
148 #warning FIXME: Change to the lazy saving of the FPU state!!!!
149 #warning TODO: No FPU support yet!!!!!!! Yay, it sucks! :-D
153 regs
->srr1
&= ~MSR_POW
;
154 /* Leave interrupt and jump to the new task */
155 core_LeaveInterrupt(regs
);
158 extern struct Task
*idle_task
;
160 void core_Switch(regs_t
*regs
)
162 struct ExecBase
*SysBase
= getSysBase();
164 context_t
*ctx
= (context_t
*)regs
;
167 /* Disable interrupts for a while */
168 wrmsr(rdmsr() & ~MSR_EE
);
170 task
= SysBase
->ThisTask
;
172 //D(bug("[KRN] Old task = %p (%s)\n", task, task->tc_Node.ln_Name));
174 /* Copy current task's context into the ETask structure */
175 memmove(task
->tc_UnionETask
.tc_ETask
->et_RegFrame
, regs
, sizeof(context_t
));
177 /* Copy the fpu, mmx, xmm state */
179 #warning FIXME: Change to the lazy saving of the FPU state!!!!
180 #warning TODO: Write the damn FPU handling at all!!!!!!!! ;-D LOL
182 /* store IDNestCnt into tasks's structure */
183 task
->tc_IDNestCnt
= SysBase
->IDNestCnt
;
184 task
->tc_SPReg
= regs
->gpr
[1];
186 /* And enable interrupts */
187 SysBase
->IDNestCnt
= -1;
189 // if (task->tc_Node.ln_Pri < 127)
190 // task->tc_Node.ln_Pri++;
192 // if (SysBase->Elapsed <= 1)
193 // if (task->tc_Node.ln_Pri > -125)
194 // task->tc_Node.ln_Pri--;
196 // wrmsr(rdmsr() | MSR_EE);
198 /* Task says byebye. Update the CPU Time now. */
199 GetIntETask(task
)->iet_CpuTime
+= mftbu() - GetIntETask(task
)->iet_private1
;
201 /* TF_SWITCH flag set? Call the switch routine */
202 if (task
->tc_Flags
& TF_SWITCH
)
204 AROS_UFC1(void, task
->tc_Switch
,
205 AROS_UFCA(struct ExecBase
*, SysBase
, A6
));
213 * Schedule the currently running task away. Put it into the TaskReady list
214 * in some smart way. This function is subject of change and it will be probably replaced
215 * by some plugin system in the future
217 void core_Schedule(regs_t
*regs
)
219 struct ExecBase
*SysBase
= getSysBase();
224 /* Disable interrupts for a while */
225 wrmsr(rdmsr() & ~MSR_EE
); // CLI
227 task
= SysBase
->ThisTask
;
229 /* Clear the pending switch flag. */
230 SysBase
->AttnResched
&= ~ARF_AttnSwitch
;
232 /* If task has pending exception, reschedule it so that the dispatcher may handle the exception */
233 if (!(task
->tc_Flags
& TF_EXCEPT
))
235 /* Is the TaskReady empty? If yes, then the running task is the only one. Let it work */
236 if (IsListEmpty(&SysBase
->TaskReady
))
237 core_LeaveInterrupt(regs
);
239 /* Does the TaskReady list contains tasks with priority equal or lower than current task?
240 * If so, then check further... */
241 if (((struct Task
*)GetHead(&SysBase
->TaskReady
))->tc_Node
.ln_Pri
<= task
->tc_Node
.ln_Pri
)
243 /* If the running task did not used it's whole quantum yet, let it work */
244 if (!(SysBase
->SysFlags
& 0x2000))
246 core_LeaveInterrupt(regs
);
252 if (task
!= idle_task
)
254 /* almost no CPU time used? Good. Increase the pri */
255 if (SysBase
->Elapsed
== SysBase
->Quantum
)
257 // if (task->tc_Node.ln_Pri < (GetIntETask(task)->iet_OrigPri) + 5)
258 // task->tc_Node.ln_Pri++;
262 if (task
->tc_Node
.ln_Pri
> (GetIntETask(task
)->iet_OrigPri
) - 5)
263 task
->tc_Node
.ln_Pri
--;
269 * If we got here, then the rescheduling is necessary.
270 * Put the task into the TaskReady list.
272 task
->tc_State
= TS_READY
;
273 Enqueue(&SysBase
->TaskReady
, (struct Node
*)task
);
276 /* Select new task to run */
281 * Leave the interrupt. This function receives the register frame used to leave the supervisor
282 * mode. It never returns and reschedules the task if it was asked for.
284 void core_ExitInterrupt(regs_t
*regs
)
286 /* Powermode was on? Turn it off now */
287 regs
->srr1
&= ~MSR_POW
;
289 /* Going back into supervisor mode? Then exit immediatelly */
290 if (!(regs
->srr1
& MSR_PR
))
292 core_LeaveInterrupt(regs
);
296 /* Prepare to go back into user mode */
297 struct ExecBase
*SysBase
= getSysBase();
301 /* Soft interrupt requested? It's high time to do it */
302 if (SysBase
->SysFlags
& SFF_SoftInt
)
305 /* If task switching is disabled, leave immediatelly */
306 if (SysBase
->TDNestCnt
>= 0)
308 core_LeaveInterrupt(regs
);
313 * Do not disturb task if it's not necessary.
314 * Reschedule only if switch pending flag is set. Exit otherwise.
316 if (SysBase
->AttnResched
& ARF_AttnSwitch
)
321 core_LeaveInterrupt(regs
);
325 core_LeaveInterrupt(regs
);
329 void core_Cause(struct ExecBase
*SysBase
)
331 struct IntVector
*iv
= &SysBase
->IntVects
[INTB_SOFTINT
];
333 /* If the SoftInt vector in SysBase is set, call it. It will do the rest for us */
336 AROS_UFIC1(iv
->iv_Code
, iv
->iv_Data
);