disable cputime accounting until it is corrected
[AROS.git] / arch / ppc-chrp / efika / kernel / scheduler.c
blob214f6492702990e0e091d775040fa8edf808826a
1 /*
2 Copyright © 1995-2011, The AROS Development Team. All rights reserved.
3 $Id$
4 */
6 #include <asm/mpc5200b.h>
7 #include <aros/kernel.h>
8 #include <aros/libcall.h>
9 #include <exec/execbase.h>
10 #include <hardware/intbits.h>
12 #include "exec_intern.h"
13 #include "etask.h"
14 #include "syscall.h"
16 #include "kernel_intern.h"
18 AROS_LH0(KRN_SchedType, KrnGetScheduler,
19 struct KernelBase *, KernelBase, 1, Kernel)
21 AROS_LIBFUNC_INIT
23 return SCHED_RR;
25 AROS_LIBFUNC_EXIT
28 AROS_LH1(void, KrnSetScheduler,
29 AROS_LHA(KRN_SchedType, sched, D0),
30 struct KernelBase *, KernelBase, 2, Kernel)
32 AROS_LIBFUNC_INIT
34 /* Cannot set scheduler yet */
36 AROS_LIBFUNC_EXIT
39 AROS_LH0(void, KrnCause,
40 struct KernelBase *, KernelBase, 3, Kernel)
42 AROS_LIBFUNC_INIT
44 asm volatile("li %%r3,%0; sc"::"i"(SC_CAUSE):"memory","r3");
46 AROS_LIBFUNC_EXIT
49 AROS_LH0(void , KrnDispatch,
50 struct KernelBase *, KernelBase, 4, Kernel)
52 AROS_LIBFUNC_INIT
55 asm volatile("li %%r3,%0; sc"::"i"(SC_DISPATCH):"memory","r3");
57 AROS_LIBFUNC_EXIT
60 AROS_LH0(void, KrnSwitch,
61 struct KernelBase *, KernelBase, 5, Kernel)
63 AROS_LIBFUNC_INIT
65 asm volatile("li %%r3,%0; sc"::"i"(SC_SWITCH):"memory","r3");
67 AROS_LIBFUNC_EXIT
70 AROS_LH0(void, KrnSchedule,
71 struct KernelBase *, KernelBase, 6, Kernel)
73 AROS_LIBFUNC_INIT
75 asm volatile("li %%r3,%0; sc"::"i"(SC_SCHEDULE):"memory","r3");
77 AROS_LIBFUNC_EXIT
81 * Task dispatcher. Basically it may be the same one no matter what scheduling algorithm is used
83 void core_Dispatch(regs_t *regs)
85 volatile struct ExecBase *SysBase = getSysBase();
86 struct Task *task;
88 if (SysBase)
90 wrmsr(rdmsr() & ~MSR_EE);
93 * Is the list of ready tasks empty? Well, increment the idle switch cound and halt CPU.
94 * It should be extended by some plugin mechanism which would put CPU and whole machine
95 * into some more sophisticated sleep states (ACPI?)
97 while (IsListEmpty(&SysBase->TaskReady))
99 // SysBase->IdleCount++;
100 SysBase->AttnResched |= ARF_AttnSwitch;
102 //D(bug("[KRN] TaskReady list empty. Sleeping for a while...\n"));
103 /* Sleep almost forever ;) */
105 wrmsr(rdmsr() | MSR_EE);
106 asm volatile("sync");
107 // wrmsr(rdmsr() | MSR_POW);
108 // asm volatile("isync");
110 if (SysBase->SysFlags & SFF_SoftInt)
112 core_Cause(SysBase);
116 SysBase->DispCount++;
118 /* Get the first task from the TaskReady list, and populate it's settings through Sysbase */
119 task = (struct Task *)REMHEAD(&SysBase->TaskReady);
120 SysBase->ThisTask = task;
121 SysBase->Elapsed = SysBase->Quantum;
122 SysBase->SysFlags &= ~0x2000;
123 task->tc_State = TS_RUN;
124 SysBase->IDNestCnt = task->tc_IDNestCnt;
126 //D(bug("[KRN] New task = %p (%s)\n", task, task->tc_Node.ln_Name));
128 /* Handle tasks's flags */
129 if (task->tc_Flags & TF_EXCEPT)
130 Exception();
132 /* Store the launch time */
133 GetIntETask(task)->iet_private1 = mftbu();
135 if (task->tc_Flags & TF_LAUNCH)
137 AROS_UFC1(void, task->tc_Launch,
138 AROS_UFCA(struct ExecBase *, SysBase, A6));
141 /* Restore the task's state */
142 regs = task->tc_UnionETask.tc_ETask->et_RegFrame;
144 if (SysBase->IDNestCnt < 0)
145 regs->srr1 |= MSR_EE;
147 /* Copy the fpu, mmx, xmm state */
148 #warning FIXME: Change to the lazy saving of the FPU state!!!!
149 #warning TODO: No FPU support yet!!!!!!! Yay, it sucks! :-D
153 regs->srr1 &= ~MSR_POW;
154 /* Leave interrupt and jump to the new task */
155 core_LeaveInterrupt(regs);
158 extern struct Task *idle_task;
160 void core_Switch(regs_t *regs)
162 struct ExecBase *SysBase = getSysBase();
163 struct Task *task;
164 context_t *ctx = (context_t *)regs;
165 if (SysBase)
167 /* Disable interrupts for a while */
168 wrmsr(rdmsr() & ~MSR_EE);
170 task = SysBase->ThisTask;
172 //D(bug("[KRN] Old task = %p (%s)\n", task, task->tc_Node.ln_Name));
174 /* Copy current task's context into the ETask structure */
175 memmove(task->tc_UnionETask.tc_ETask->et_RegFrame, regs, sizeof(context_t));
177 /* Copy the fpu, mmx, xmm state */
179 #warning FIXME: Change to the lazy saving of the FPU state!!!!
180 #warning TODO: Write the damn FPU handling at all!!!!!!!! ;-D LOL
182 /* store IDNestCnt into tasks's structure */
183 task->tc_IDNestCnt = SysBase->IDNestCnt;
184 task->tc_SPReg = regs->gpr[1];
186 /* And enable interrupts */
187 SysBase->IDNestCnt = -1;
189 // if (task->tc_Node.ln_Pri < 127)
190 // task->tc_Node.ln_Pri++;
192 // if (SysBase->Elapsed <= 1)
193 // if (task->tc_Node.ln_Pri > -125)
194 // task->tc_Node.ln_Pri--;
196 // wrmsr(rdmsr() | MSR_EE);
198 // warning: fixme
199 #if (0)
200 /* Task says byebye. Update the CPU Time now. */
201 GetIntETask(task)->iet_CpuTime += mftbu() - GetIntETask(task)->iet_private1;
202 #endif
203 /* TF_SWITCH flag set? Call the switch routine */
204 if (task->tc_Flags & TF_SWITCH)
206 AROS_UFC1(void, task->tc_Switch,
207 AROS_UFCA(struct ExecBase *, SysBase, A6));
211 core_Dispatch(regs);
215 * Schedule the currently running task away. Put it into the TaskReady list
216 * in some smart way. This function is subject of change and it will be probably replaced
217 * by some plugin system in the future
219 void core_Schedule(regs_t *regs)
221 struct ExecBase *SysBase = getSysBase();
222 struct Task *task;
224 if (SysBase)
226 /* Disable interrupts for a while */
227 wrmsr(rdmsr() & ~MSR_EE); // CLI
229 task = SysBase->ThisTask;
231 /* Clear the pending switch flag. */
232 SysBase->AttnResched &= ~ARF_AttnSwitch;
234 /* If task has pending exception, reschedule it so that the dispatcher may handle the exception */
235 if (!(task->tc_Flags & TF_EXCEPT))
237 /* Is the TaskReady empty? If yes, then the running task is the only one. Let it work */
238 if (IsListEmpty(&SysBase->TaskReady))
239 core_LeaveInterrupt(regs);
241 /* Does the TaskReady list contains tasks with priority equal or lower than current task?
242 * If so, then check further... */
243 if (((struct Task*)GetHead(&SysBase->TaskReady))->tc_Node.ln_Pri <= task->tc_Node.ln_Pri)
245 /* If the running task did not used it's whole quantum yet, let it work */
246 if (!(SysBase->SysFlags & 0x2000))
248 core_LeaveInterrupt(regs);
253 #if 0
254 if (task != idle_task)
256 /* almost no CPU time used? Good. Increase the pri */
257 if (SysBase->Elapsed == SysBase->Quantum)
259 // if (task->tc_Node.ln_Pri < (GetIntETask(task)->iet_OrigPri) + 5)
260 // task->tc_Node.ln_Pri++;
262 else
264 if (task->tc_Node.ln_Pri > (GetIntETask(task)->iet_OrigPri) - 5)
265 task->tc_Node.ln_Pri--;
268 #endif
271 * If we got here, then the rescheduling is necessary.
272 * Put the task into the TaskReady list.
274 task->tc_State = TS_READY;
275 Enqueue(&SysBase->TaskReady, (struct Node *)task);
278 /* Select new task to run */
279 core_Switch(regs);
283 * Leave the interrupt. This function receives the register frame used to leave the supervisor
284 * mode. It never returns and reschedules the task if it was asked for.
286 void core_ExitInterrupt(regs_t *regs)
288 /* Powermode was on? Turn it off now */
289 regs->srr1 &= ~MSR_POW;
291 /* Going back into supervisor mode? Then exit immediatelly */
292 if (!(regs->srr1 & MSR_PR))
294 core_LeaveInterrupt(regs);
296 else
298 /* Prepare to go back into user mode */
299 struct ExecBase *SysBase = getSysBase();
301 if (SysBase)
303 /* Soft interrupt requested? It's high time to do it */
304 if (SysBase->SysFlags & SFF_SoftInt)
305 core_Cause(SysBase);
307 /* If task switching is disabled, leave immediatelly */
308 if (SysBase->TDNestCnt >= 0)
310 core_LeaveInterrupt(regs);
312 else
315 * Do not disturb task if it's not necessary.
316 * Reschedule only if switch pending flag is set. Exit otherwise.
318 if (SysBase->AttnResched & ARF_AttnSwitch)
320 core_Schedule(regs);
322 else
323 core_LeaveInterrupt(regs);
326 else
327 core_LeaveInterrupt(regs);
331 void core_Cause(struct ExecBase *SysBase)
333 struct IntVector *iv = &SysBase->IntVects[INTB_SOFTINT];
335 /* If the SoftInt vector in SysBase is set, call it. It will do the rest for us */
336 if (iv->iv_Code)
338 AROS_INTC1(iv->iv_Code, iv->iv_Data);