2 * Copyright (c) 2015-2020 François Tigeot <ftigeot@wolfpond.org>
3 * Copyright (c) 2019-2020 Matthew Dillon <dillon@backplane.com>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice unmodified, this list of conditions, and the following
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #ifndef _LINUX_SCHED_H_
29 #define _LINUX_SCHED_H_
31 #include <linux/capability.h>
32 #include <linux/threads.h>
33 #include <linux/kernel.h>
34 #include <linux/types.h>
35 #include <linux/jiffies.h>
36 #include <linux/rbtree.h>
37 #include <linux/thread_info.h>
38 #include <linux/cpumask.h>
39 #include <linux/errno.h>
40 #include <linux/mm_types.h>
41 #include <linux/preempt.h>
45 #include <linux/smp.h>
46 #include <linux/compiler.h>
47 #include <linux/completion.h>
48 #include <linux/pid.h>
49 #include <linux/rcupdate.h>
50 #include <linux/rculist.h>
52 #include <linux/time.h>
53 #include <linux/timer.h>
54 #include <linux/hrtimer.h>
55 #include <linux/llist.h>
56 #include <linux/gfp.h>
58 #include <asm/processor.h>
60 #include <linux/spinlock.h>
62 #include <sys/param.h>
63 #include <sys/systm.h>
65 #include <sys/sched.h>
66 #include <sys/signal2.h>
68 #include <machine/cpu.h>
72 #define TASK_RUNNING 0
73 #define TASK_INTERRUPTIBLE 1
74 #define TASK_UNINTERRUPTIBLE 2
76 #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
78 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
80 #define TASK_COMM_LEN MAXCOMLEN
83 struct thread
*dfly_td
;
85 struct mm_struct
*mm
; /* mirror copy in p->p_linux_mm */
88 /* kthread-specific data */
89 unsigned long kt_flags
;
90 int (*kt_fn
)(void *data
);
94 /* executable name without path */
95 char comm
[TASK_COMM_LEN
];
97 atomic_t usage_counter
;
99 struct spinlock kt_spin
;
102 #define __set_current_state(state_value) current->state = (state_value);
104 #define set_current_state(state_value) \
106 __set_current_state(state_value); \
111 * schedule_timeout: puts the current thread to sleep until timeout
112 * if its state allows it to.
115 schedule_timeout(signed long timeout
)
117 unsigned long time_before
, time_after
;
122 kprintf("schedule_timeout(): timeout cannot be negative\n");
127 * Indefinite wait if timeout is MAX_SCHEDULE_TIMEOUT, but we are
128 * also translating to an integer. The first conditional will
129 * cover both but to code defensively test both.
131 if (timeout
>= INT_MAX
|| timeout
== MAX_SCHEDULE_TIMEOUT
)
136 spin_lock(¤t
->kt_spin
);
138 switch (current
->state
) {
139 case TASK_INTERRUPTIBLE
:
141 ssleep(current
, ¤t
->kt_spin
, PCATCH
, "lstim", timo
);
143 slept
= time_after
- time_before
;
144 ret
= timeout
- slept
;
148 case TASK_UNINTERRUPTIBLE
:
149 ssleep(current
, ¤t
->kt_spin
, 0, "lstim", timo
);
153 * Task has been flagged running before we could
156 * XXX should be able to remove this ssleep(), have it
157 * here to protect against live-locks in case we mess
158 * up the task->state.
160 ssleep(current
, ¤t
->kt_spin
, 0, "lst1", 1);
164 spin_unlock(¤t
->kt_spin
);
167 if (timeout
== MAX_SCHEDULE_TIMEOUT
)
168 ret
= MAX_SCHEDULE_TIMEOUT
;
170 current
->state
= TASK_RUNNING
;
177 (void)schedule_timeout(MAX_SCHEDULE_TIMEOUT
);
180 static inline signed long
181 schedule_timeout_uninterruptible(signed long timeout
)
183 __set_current_state(TASK_UNINTERRUPTIBLE
);
184 return schedule_timeout(timeout
);
188 io_schedule_timeout(signed long timeout
)
190 return schedule_timeout(timeout
);
194 * local_clock: fast time source, monotonic on the same cpu
196 static inline uint64_t
202 return (ts
.tv_sec
* NSEC_PER_SEC
) + ts
.tv_nsec
;
212 wake_up_process(struct task_struct
*tsk
)
217 * Among other things, this function is supposed to act as
221 spin_lock(&tsk
->kt_spin
);
223 tsk
->state
= TASK_RUNNING
;
224 spin_unlock(&tsk
->kt_spin
);
225 /* if (ostate != TASK_RUNNING) */
228 return 1; /* Always indicate the process was woken up */
232 signal_pending(struct task_struct
*p
)
234 struct thread
*t
= p
->dfly_td
;
236 /* Some kernel threads do not have lwp, t->td_lwp can be NULL */
237 if (t
->td_lwp
== NULL
)
240 return CURSIG(t
->td_lwp
);
244 fatal_signal_pending(struct task_struct
*p
)
246 struct thread
*t
= p
->dfly_td
;
247 sigset_t pending_set
;
249 /* Some kernel threads do not have lwp, t->td_lwp can be NULL */
250 if (t
->td_lwp
== NULL
)
253 pending_set
= lwp_sigpend(t
->td_lwp
);
254 return SIGISMEMBER(pending_set
, SIGKILL
);
258 signal_pending_state(long state
, struct task_struct
*p
)
260 if (state
& TASK_INTERRUPTIBLE
)
261 return (signal_pending(p
));
263 return (fatal_signal_pending(p
));
266 /* Explicit rescheduling in order to reduce latency */
275 send_sig(int sig
, struct proc
*p
, int priv
)
282 set_need_resched(void)
284 /* do nothing for now */
285 /* used on ttm_bo_reserve failures */
291 return any_resched_wanted();
295 sched_setscheduler_nocheck(struct task_struct
*ts
,
296 int policy
, const struct sched_param
*param
)
298 /* We do not allow different thread scheduling policies */
303 pagefault_disabled(void)
305 return (curthread
->td_flags
& TDF_NOFAULT
);
309 mmgrab(struct mm_struct
*mm
)
311 atomic_inc(&mm
->mm_count
);
314 #endif /* _LINUX_SCHED_H_ */