less(1): Regenerate defines.h and update Makefile
[dragonfly.git] / sys / dev / drm / include / linux / sched.h
blobff836bfad03869cedebca35330a0164a36cfd0f5
1 /*
2 * Copyright (c) 2015-2020 François Tigeot <ftigeot@wolfpond.org>
3 * Copyright (c) 2019-2020 Matthew Dillon <dillon@backplane.com>
4 * All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice unmodified, this list of conditions, and the following
11 * disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #ifndef _LINUX_SCHED_H_
29 #define _LINUX_SCHED_H_
31 #include <linux/capability.h>
32 #include <linux/threads.h>
33 #include <linux/kernel.h>
34 #include <linux/types.h>
35 #include <linux/jiffies.h>
36 #include <linux/rbtree.h>
37 #include <linux/thread_info.h>
38 #include <linux/cpumask.h>
39 #include <linux/errno.h>
40 #include <linux/mm_types.h>
41 #include <linux/preempt.h>
43 #include <asm/page.h>
45 #include <linux/smp.h>
46 #include <linux/compiler.h>
47 #include <linux/completion.h>
48 #include <linux/pid.h>
49 #include <linux/rcupdate.h>
50 #include <linux/rculist.h>
52 #include <linux/time.h>
53 #include <linux/timer.h>
54 #include <linux/hrtimer.h>
55 #include <linux/llist.h>
56 #include <linux/gfp.h>
58 #include <asm/processor.h>
60 #include <linux/spinlock.h>
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 #include <sys/proc.h>
65 #include <sys/sched.h>
66 #include <sys/signal2.h>
68 #include <machine/cpu.h>
70 struct seq_file;
72 #define TASK_RUNNING 0
73 #define TASK_INTERRUPTIBLE 1
74 #define TASK_UNINTERRUPTIBLE 2
76 #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
78 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
80 #define TASK_COMM_LEN MAXCOMLEN
82 struct task_struct {
83 struct thread *dfly_td;
84 volatile long state;
85 struct mm_struct *mm; /* mirror copy in p->p_linux_mm */
86 int prio;
88 /* kthread-specific data */
89 unsigned long kt_flags;
90 int (*kt_fn)(void *data);
91 void *kt_fndata;
92 int kt_exitvalue;
94 /* executable name without path */
95 char comm[TASK_COMM_LEN];
97 atomic_t usage_counter;
98 pid_t pid;
99 struct spinlock kt_spin;
102 #define __set_current_state(state_value) current->state = (state_value);
104 #define set_current_state(state_value) \
105 do { \
106 __set_current_state(state_value); \
107 mb(); \
108 } while (0)
111 * schedule_timeout: puts the current thread to sleep until timeout
112 * if its state allows it to.
114 static inline long
115 schedule_timeout(signed long timeout)
117 unsigned long time_before, time_after;
118 long slept, ret = 0;
119 int timo;
121 if (timeout < 0) {
122 kprintf("schedule_timeout(): timeout cannot be negative\n");
123 goto done;
127 * Indefinite wait if timeout is MAX_SCHEDULE_TIMEOUT, but we are
128 * also translating to an integer. The first conditional will
129 * cover both but to code defensively test both.
131 if (timeout >= INT_MAX || timeout == MAX_SCHEDULE_TIMEOUT)
132 timo = 0;
133 else
134 timo = timeout;
136 spin_lock(&current->kt_spin);
138 switch (current->state) {
139 case TASK_INTERRUPTIBLE:
140 time_before = ticks;
141 ssleep(current, &current->kt_spin, PCATCH, "lstim", timo);
142 time_after = ticks;
143 slept = time_after - time_before;
144 ret = timeout - slept;
145 if (ret < 0)
146 ret = 0;
147 break;
148 case TASK_UNINTERRUPTIBLE:
149 ssleep(current, &current->kt_spin, 0, "lstim", timo);
150 break;
151 default:
153 * Task has been flagged running before we could
154 * enter the sleep.
156 * XXX should be able to remove this ssleep(), have it
157 * here to protect against live-locks in case we mess
158 * up the task->state.
160 ssleep(current, &current->kt_spin, 0, "lst1", 1);
161 break;
164 spin_unlock(&current->kt_spin);
166 done:
167 if (timeout == MAX_SCHEDULE_TIMEOUT)
168 ret = MAX_SCHEDULE_TIMEOUT;
170 current->state = TASK_RUNNING;
171 return ret;
174 static inline void
175 schedule(void)
177 (void)schedule_timeout(MAX_SCHEDULE_TIMEOUT);
180 static inline signed long
181 schedule_timeout_uninterruptible(signed long timeout)
183 __set_current_state(TASK_UNINTERRUPTIBLE);
184 return schedule_timeout(timeout);
187 static inline long
188 io_schedule_timeout(signed long timeout)
190 return schedule_timeout(timeout);
194 * local_clock: fast time source, monotonic on the same cpu
196 static inline uint64_t
197 local_clock(void)
199 struct timespec ts;
201 getnanouptime(&ts);
202 return (ts.tv_sec * NSEC_PER_SEC) + ts.tv_nsec;
205 static inline void
206 yield(void)
208 lwkt_yield();
211 static inline int
212 wake_up_process(struct task_struct *tsk)
214 long ostate;
217 * Among other things, this function is supposed to act as
218 * a barrier
220 smp_wmb();
221 spin_lock(&tsk->kt_spin);
222 ostate = tsk->state;
223 tsk->state = TASK_RUNNING;
224 spin_unlock(&tsk->kt_spin);
225 /* if (ostate != TASK_RUNNING) */
226 wakeup(tsk);
228 return 1; /* Always indicate the process was woken up */
231 static inline int
232 signal_pending(struct task_struct *p)
234 struct thread *t = p->dfly_td;
236 /* Some kernel threads do not have lwp, t->td_lwp can be NULL */
237 if (t->td_lwp == NULL)
238 return 0;
240 return CURSIG(t->td_lwp);
243 static inline int
244 fatal_signal_pending(struct task_struct *p)
246 struct thread *t = p->dfly_td;
247 sigset_t pending_set;
249 /* Some kernel threads do not have lwp, t->td_lwp can be NULL */
250 if (t->td_lwp == NULL)
251 return 0;
253 pending_set = lwp_sigpend(t->td_lwp);
254 return SIGISMEMBER(pending_set, SIGKILL);
257 static inline int
258 signal_pending_state(long state, struct task_struct *p)
260 if (state & TASK_INTERRUPTIBLE)
261 return (signal_pending(p));
262 else
263 return (fatal_signal_pending(p));
266 /* Explicit rescheduling in order to reduce latency */
267 static inline int
268 cond_resched(void)
270 lwkt_yield();
271 return 0;
274 static inline int
275 send_sig(int sig, struct proc *p, int priv)
277 ksignal(p, sig);
278 return 0;
281 static inline void
282 set_need_resched(void)
284 /* do nothing for now */
285 /* used on ttm_bo_reserve failures */
288 static inline bool
289 need_resched(void)
291 return any_resched_wanted();
294 static inline int
295 sched_setscheduler_nocheck(struct task_struct *ts,
296 int policy, const struct sched_param *param)
298 /* We do not allow different thread scheduling policies */
299 return 0;
302 static inline int
303 pagefault_disabled(void)
305 return (curthread->td_flags & TDF_NOFAULT);
308 static inline void
309 mmgrab(struct mm_struct *mm)
311 atomic_inc(&mm->mm_count);
314 #endif /* _LINUX_SCHED_H_ */