fix build
[AROS.git] / arch / arm-native / kernel / kernel_scheduler.c
blob9b0d93ce5d87c8c301dab39e3aeddd077e233071
1 /*
2 Copyright © 2015, The AROS Development Team. All rights reserved.
3 $Id$
5 */
7 #include <exec/alerts.h>
8 #include <exec/execbase.h>
9 #include <exec/lists.h>
10 #include <proto/exec.h>
12 #include <kernel_base.h>
13 #include <kernel_debug.h>
14 #include <kernel_scheduler.h>
16 #include <exec_platform.h>
18 #include <etask.h>
20 #define D(x)
23 * Schedule the currently running task away. Put it into the TaskReady list
24 * in some smart way. This function is subject of change and it will be probably replaced
25 * by some plugin system in the future
27 BOOL core_Schedule(void)
29 struct Task *task = GET_THIS_TASK;
31 D(bug("[KRN:BCM2708] core_Schedule()\n"));
33 SysBase->AttnResched &= ~ARF_AttnSwitch;
35 /* If task has pending exception, reschedule it so that the dispatcher may handle the exception */
36 if (!(task->tc_Flags & TF_EXCEPT))
38 BYTE pri;
40 /* Is the TaskReady empty? If yes, then the running task is the only one. Let it work */
41 if (IsListEmpty(&SysBase->TaskReady))
42 return FALSE;
44 //## TODO : Lock TaskReady access (READ)
45 /* Does the TaskReady list contains tasks with priority equal or lower than current task?
46 * If so, then check further... */
47 pri = ((struct Task*)GetHead(&SysBase->TaskReady))->tc_Node.ln_Pri;
48 if (pri <= task->tc_Node.ln_Pri)
50 /* If the running task did not used it's whole quantum yet, let it work */
51 if (!(SysBase->SysFlags & SFF_QuantumOver))
53 //TODO: Unlock TaskReady access
54 return FALSE;
57 //## TODO: Unlock TaskReady access
60 /*
61 * If we got here, then the rescheduling is necessary.
62 * Put the task into the TaskReady list.
64 D(bug("[KRN:BCM2708] Setting task 0x%p (%s) to READY\n", task, task->tc_Node.ln_Name));
65 task->tc_State = TS_READY;
66 //## TODO : Lock TaskReady access (WRITE)
67 Enqueue(&SysBase->TaskReady, &task->tc_Node);
68 //## TODO: Unlock TaskReady access
70 /* Select new task to run */
71 return TRUE;
74 /* Actually switch away from the task */
75 void core_Switch(void)
77 struct Task *task = GET_THIS_TASK;
79 D(bug("[KRN:BCM2708] core_Switch(): Old task = %p (%s)\n", task, task->tc_Node.ln_Name));
81 if (task->tc_SPReg <= task->tc_SPLower || task->tc_SPReg > task->tc_SPUpper)
83 bug("[KRN:BCM2708] Task %s went out of stack limits\n", task->tc_Node.ln_Name);
84 bug("[KRN:BCM2708] Lower %p, upper %p, SP %p\n", task->tc_SPLower, task->tc_SPUpper, task->tc_SPReg);
86 * Suspend the task to stop it from causing more harm. In some rare cases, if the task is holding
87 * lock on some global/library semaphore it will most likelly mean immenent freeze. In most cases
88 * however, user will be shown an alert.
90 //## TODO : Lock "Tasks" list access (WRITE)
91 Remove(&task->tc_Node);
92 //## TODO: Unlock access
94 task->tc_SigWait = 0;
95 task->tc_State = TS_WAIT;
96 Enqueue(&SysBase->TaskWait, &task->tc_Node);
98 Alert(AN_StackProbe);
101 task->tc_IDNestCnt = SysBase->IDNestCnt;
103 if (task->tc_Flags & TF_SWITCH)
104 AROS_UFC1NR(void, task->tc_Switch, AROS_UFCA(struct ExecBase *, SysBase, A6));
108 * Task dispatcher. Basically it may be the same one no matter
109 * what scheduling algorithm is used (except SysBase->Elapsed reloading)
111 struct Task *core_Dispatch(void)
113 struct Task *task;
114 uint32_t cpumask;
115 uint32_t tmp;
117 D(bug("[KRN:BCM2708] core_Dispatch()\n"));
119 asm volatile (" mrc p15, 0, %0, c0, c0, 5 " : "=r" (tmp));
120 cpumask = (1 << (tmp & 3));
122 // ## TODO: Lock TaskReady access READ
123 for (task = (struct Task *)GetHead(&SysBase->TaskReady); task != NULL; task = (struct Task *)GetSucc(task))
125 #if defined(__AROSEXEC_SMP__)
126 if ((GetIntETask(task)->iet_CpuAffinity & cpumask) == cpumask)
128 #endif
129 // ## TODO: switch TaskReady Lock to WRITE
130 Remove(&task->tc_Node);
131 break;
132 #if defined(__AROSEXEC_SMP__)
134 #endif
136 // ## TODO: Unlock TaskReady access
138 if (!task)
140 /* Is the list of ready tasks empty? Well, go idle. */
141 D(bug("[KRN:BCM2708] No ready tasks, entering sleep mode\n"));
144 * Idle counter is incremented every time when we enter here,
145 * not only once. This is correct.
147 SysBase->IdleCount++;
148 SysBase->AttnResched |= ARF_AttnSwitch;
150 return NULL;
153 SysBase->DispCount++;
154 SysBase->IDNestCnt = task->tc_IDNestCnt;
155 SET_THIS_TASK(task);
156 SysBase->Elapsed = SysBase->Quantum;
157 SysBase->SysFlags &= ~SFF_QuantumOver;
158 task->tc_State = TS_RUN;
160 D(bug("[KRN:BCM2708] New task = %p (%s)\n", task, task->tc_Node.ln_Name));
162 /* Check the stack of the task we are about to launch. */
164 if (task->tc_SPReg <= task->tc_SPLower || task->tc_SPReg > task->tc_SPUpper)
166 /* Don't let the task run, switch it away (raising Alert) and dispatch another task */
167 core_Switch();
168 return core_Dispatch();
171 if (task->tc_Flags & TF_LAUNCH)
172 AROS_UFC1NR(void, task->tc_Launch, AROS_UFCA(struct ExecBase *, SysBase, A6));
174 /* Leave interrupt and jump to the new task */
175 return task;