4 #include <asm/param.h> /* for HZ */
6 #include <linux/config.h>
7 #include <linux/capability.h>
8 #include <linux/threads.h>
9 #include <linux/kernel.h>
10 #include <linux/types.h>
11 #include <linux/timex.h>
12 #include <linux/jiffies.h>
13 #include <linux/rbtree.h>
14 #include <linux/thread_info.h>
16 #include <asm/system.h>
17 #include <asm/semaphore.h>
19 #include <asm/ptrace.h>
22 #include <linux/smp.h>
23 #include <linux/sem.h>
24 #include <linux/signal.h>
25 #include <linux/securebits.h>
26 #include <linux/fs_struct.h>
27 #include <linux/compiler.h>
28 #include <linux/completion.h>
29 #include <linux/pid.h>
30 #include <linux/percpu.h>
37 #define CSIGNAL 0x000000ff /* signal mask to be sent at exit */
38 #define CLONE_VM 0x00000100 /* set if VM shared between processes */
39 #define CLONE_FS 0x00000200 /* set if fs info shared between processes */
40 #define CLONE_FILES 0x00000400 /* set if open files shared between processes */
41 #define CLONE_SIGHAND 0x00000800 /* set if signal handlers and blocked signals shared */
42 #define CLONE_IDLETASK 0x00001000 /* set if new pid should be 0 (kernel only)*/
43 #define CLONE_PTRACE 0x00002000 /* set if we want to let tracing continue on the child too */
44 #define CLONE_VFORK 0x00004000 /* set if the parent wants the child to wake it up on mm_release */
45 #define CLONE_PARENT 0x00008000 /* set if we want to have the same parent as the cloner */
46 #define CLONE_THREAD 0x00010000 /* Same thread group? */
47 #define CLONE_NEWNS 0x00020000 /* New namespace group? */
48 #define CLONE_SYSVSEM 0x00040000 /* share system V SEM_UNDO semantics */
49 #define CLONE_SETTLS 0x00080000 /* create a new TLS for the child */
50 #define CLONE_PARENT_SETTID 0x00100000 /* set the TID in the parent */
51 #define CLONE_CHILD_CLEARTID 0x00200000 /* clear the TID in the child */
52 #define CLONE_DETACHED 0x00400000 /* parent wants no child-exit signal */
53 #define CLONE_UNTRACED 0x00800000 /* set if the tracing process can't force CLONE_PTRACE on this clone */
54 #define CLONE_CHILD_SETTID 0x01000000 /* set the TID in the child */
57 * List of flags we want to share for kernel threads,
58 * if only because they are not used by them anyway.
60 #define CLONE_KERNEL (CLONE_FS | CLONE_FILES | CLONE_SIGHAND)
63 * These are the constant used to fake the fixed-point load-average
64 * counting. Some notes:
65 * - 11 bit fractions expand to 22 bits by the multiplies: this gives
66 * a load-average precision of 10 bits integer + 11 bits fractional
67 * - if you want to count load-averages more often, you need more
68 * precision, or rounding will get you. With 2-second counting freq,
69 * the EXP_n values would be 1981, 2034 and 2043 if still using only
72 extern unsigned long avenrun
[]; /* Load averages */
74 #define FSHIFT 11 /* nr of bits of precision */
75 #define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */
76 #define LOAD_FREQ (5*HZ) /* 5 sec intervals */
77 #define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */
78 #define EXP_5 2014 /* 1/exp(5sec/5min) */
79 #define EXP_15 2037 /* 1/exp(5sec/15min) */
81 #define CALC_LOAD(load,exp,n) \
83 load += n*(FIXED_1-exp); \
86 #define CT_TO_SECS(x) ((x) / HZ)
87 #define CT_TO_USECS(x) (((x) % HZ) * 1000000/HZ)
89 extern int nr_threads
;
91 DECLARE_PER_CPU(unsigned long, process_counts
);
92 extern int nr_processes(void);
93 extern unsigned long nr_running(void);
94 extern unsigned long nr_uninterruptible(void);
95 extern unsigned long nr_iowait(void);
97 #include <linux/time.h>
98 #include <linux/param.h>
99 #include <linux/resource.h>
100 #include <linux/timer.h>
102 #include <asm/processor.h>
104 #define TASK_RUNNING 0
105 #define TASK_INTERRUPTIBLE 1
106 #define TASK_UNINTERRUPTIBLE 2
107 #define TASK_STOPPED 4
108 #define TASK_ZOMBIE 8
111 #define __set_task_state(tsk, state_value) \
112 do { (tsk)->state = (state_value); } while (0)
113 #define set_task_state(tsk, state_value) \
114 set_mb((tsk)->state, (state_value))
116 #define __set_current_state(state_value) \
117 do { current->state = (state_value); } while (0)
118 #define set_current_state(state_value) \
119 set_mb(current->state, (state_value))
122 * Scheduling policies
124 #define SCHED_NORMAL 0
134 #include <linux/spinlock.h>
137 * This serializes "schedule()" and also protects
138 * the run-queue from deletions/modifications (but
139 * _adding_ to the beginning of the run-queue has
142 extern rwlock_t tasklist_lock
;
143 extern spinlock_t mmlist_lock
;
145 typedef struct task_struct task_t
;
147 extern void sched_init(void);
148 extern void init_idle(task_t
*idle
, int cpu
);
150 extern void show_state(void);
151 extern void show_trace(unsigned long *stack
);
152 extern void show_stack(unsigned long *stack
);
153 extern void show_regs(struct pt_regs
*);
155 void io_schedule(void);
156 long io_schedule_timeout(long timeout
);
158 extern void cpu_init (void);
159 extern void trap_init(void);
160 extern void update_process_times(int user
);
161 extern void update_one_process(struct task_struct
*p
, unsigned long user
,
162 unsigned long system
, int cpu
);
163 extern void scheduler_tick(int user_tick
, int system
);
164 extern unsigned long cache_decay_ticks
;
167 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
168 extern signed long FASTCALL(schedule_timeout(signed long timeout
));
169 asmlinkage
void schedule(void);
173 /* Maximum number of active map areas.. This is a random (large) number */
174 #define MAX_MAP_COUNT (65536)
176 #include <linux/aio.h>
179 struct vm_area_struct
* mmap
; /* list of VMAs */
180 struct rb_root mm_rb
;
181 struct vm_area_struct
* mmap_cache
; /* last find_vma result */
182 unsigned long free_area_cache
; /* first hole */
184 atomic_t mm_users
; /* How many users with user space? */
185 atomic_t mm_count
; /* How many references to "struct mm_struct" (users count as 1) */
186 int map_count
; /* number of VMAs */
187 struct rw_semaphore mmap_sem
;
188 spinlock_t page_table_lock
; /* Protects task page tables and mm->rss */
190 struct list_head mmlist
; /* List of all active mm's. These are globally strung
191 * together off init_mm.mmlist, and are protected
195 unsigned long start_code
, end_code
, start_data
, end_data
;
196 unsigned long start_brk
, brk
, start_stack
;
197 unsigned long arg_start
, arg_end
, env_start
, env_end
;
198 unsigned long rss
, total_vm
, locked_vm
;
199 unsigned long def_flags
;
200 unsigned long cpu_vm_mask
;
201 unsigned long swap_address
;
205 /* Architecture-specific MM context */
206 mm_context_t context
;
208 /* coredumping support */
210 struct completion
*core_startup_done
, core_done
;
213 rwlock_t ioctx_list_lock
;
214 struct kioctx
*ioctx_list
;
216 struct kioctx default_kioctx
;
219 extern int mmlist_nr
;
221 struct signal_struct
{
223 struct k_sigaction action
[_NSIG
];
226 /* current thread group signal load-balancing target: */
229 /* shared signal handling: */
230 struct sigpending shared_pending
;
232 /* thread group exit support */
235 struct task_struct
*group_exit_task
;
239 * Priority of a process goes from 0..MAX_PRIO-1, valid RT
240 * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL tasks are
241 * in the range MAX_RT_PRIO..MAX_PRIO-1. Priority values
242 * are inverted: lower p->prio value means higher priority.
244 * The MAX_RT_USER_PRIO value allows the actual maximum
245 * RT priority to be separate from the value exported to
246 * user-space. This allows kernel threads to set their
247 * priority to a value higher than any user task. Note:
248 * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
251 #define MAX_USER_RT_PRIO 100
252 #define MAX_RT_PRIO MAX_USER_RT_PRIO
254 #define MAX_PRIO (MAX_RT_PRIO + 40)
257 * Some day this will be a full-fledged user tracking system..
260 atomic_t __count
; /* reference count */
261 atomic_t processes
; /* How many processes does this user have? */
262 atomic_t files
; /* How many open files does this user have? */
264 /* Hash table maintenance information */
265 struct list_head uidhash_list
;
269 #define get_current_user() ({ \
270 struct user_struct *__user = current->user; \
271 atomic_inc(&__user->__count); \
274 extern struct user_struct
*find_user(uid_t
);
276 extern struct user_struct root_user
;
277 #define INIT_USER (&root_user)
279 typedef struct prio_array prio_array_t
;
280 struct backing_dev_info
;
283 volatile long state
; /* -1 unrunnable, 0 runnable, >0 stopped */
284 struct thread_info
*thread_info
;
286 unsigned long flags
; /* per process flags, defined below */
287 unsigned long ptrace
;
289 int lock_depth
; /* Lock depth */
291 int prio
, static_prio
;
292 struct list_head run_list
;
295 unsigned long sleep_avg
;
296 unsigned long sleep_timestamp
;
298 unsigned long policy
;
299 unsigned long cpus_allowed
;
300 unsigned int time_slice
, first_time_slice
;
302 struct list_head tasks
;
303 struct list_head ptrace_children
;
304 struct list_head ptrace_list
;
306 struct mm_struct
*mm
, *active_mm
;
309 struct linux_binfmt
*binfmt
;
310 int exit_code
, exit_signal
;
311 int pdeath_signal
; /* The signal sent when the parent dies */
313 unsigned long personality
;
320 /* boolean value for session group leader */
323 * pointers to (original) parent process, youngest child, younger sibling,
324 * older sibling, respectively. (p->father can be replaced with
327 struct task_struct
*real_parent
; /* real parent process (when being debugged) */
328 struct task_struct
*parent
; /* parent process */
329 struct list_head children
; /* list of my children */
330 struct list_head sibling
; /* linkage in my parent's children list */
331 struct task_struct
*group_leader
;
333 /* PID/PID hash table linkage. */
334 struct pid_link pids
[PIDTYPE_MAX
];
336 wait_queue_head_t wait_chldexit
; /* for wait4() */
337 struct completion
*vfork_done
; /* for vfork() */
338 int *set_child_tid
; /* CLONE_CHILD_SETTID */
339 int *clear_child_tid
; /* CLONE_CHILD_CLEARTID */
341 unsigned long rt_priority
;
342 unsigned long it_real_value
, it_prof_value
, it_virt_value
;
343 unsigned long it_real_incr
, it_prof_incr
, it_virt_incr
;
344 struct timer_list real_timer
;
345 unsigned long utime
, stime
, cutime
, cstime
;
346 unsigned long start_time
;
347 /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
348 unsigned long min_flt
, maj_flt
, nswap
, cmin_flt
, cmaj_flt
, cnswap
;
349 /* process credentials */
350 uid_t uid
,euid
,suid
,fsuid
;
351 gid_t gid
,egid
,sgid
,fsgid
;
353 gid_t groups
[NGROUPS
];
354 kernel_cap_t cap_effective
, cap_inheritable
, cap_permitted
;
355 int keep_capabilities
:1;
356 struct user_struct
*user
;
358 struct rlimit rlim
[RLIM_NLIMITS
];
359 unsigned short used_math
;
361 /* file system info */
362 int link_count
, total_link_count
;
363 struct tty_struct
*tty
; /* NULL if no tty */
364 unsigned int locks
; /* How many file locks are being held */
366 struct sysv_sem sysvsem
;
367 /* CPU-specific state of this task */
368 struct thread_struct thread
;
369 /* filesystem information */
370 struct fs_struct
*fs
;
371 /* open file information */
372 struct files_struct
*files
;
374 struct namespace *namespace;
375 /* signal handlers */
376 struct signal_struct
*sig
;
378 sigset_t blocked
, real_blocked
;
379 struct sigpending pending
;
381 unsigned long sas_ss_sp
;
383 int (*notifier
)(void *priv
);
385 sigset_t
*notifier_mask
;
389 /* Thread group tracking */
392 /* Protection of (de-)allocation: mm, files, fs, tty */
393 spinlock_t alloc_lock
;
394 /* context-switch lock */
395 spinlock_t switch_lock
;
397 /* journalling filesystem info */
399 struct dentry
*proc_dentry
;
400 struct backing_dev_info
*backing_dev_info
;
402 unsigned long ptrace_message
;
405 extern void __put_task_struct(struct task_struct
*tsk
);
406 #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
407 #define put_task_struct(tsk) \
408 do { if (atomic_dec_and_test(&(tsk)->usage)) __put_task_struct(tsk); } while(0)
413 #define PF_ALIGNWARN 0x00000001 /* Print alignment warning msgs */
414 /* Not implemented yet, only for 486*/
415 #define PF_STARTING 0x00000002 /* being created */
416 #define PF_EXITING 0x00000004 /* getting shut down */
417 #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
418 #define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
419 #define PF_DUMPCORE 0x00000200 /* dumped core */
420 #define PF_SIGNALED 0x00000400 /* killed by a signal */
421 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
422 #define PF_MEMDIE 0x00001000 /* Killed for out-of-memory */
423 #define PF_FLUSHER 0x00002000 /* responsible for disk writeback */
425 #define PF_FREEZE 0x00004000 /* this task should be frozen for suspend */
426 #define PF_IOTHREAD 0x00008000 /* this thread is needed for doing I/O to swap */
427 #define PF_FROZEN 0x00010000 /* frozen for system suspend */
428 #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
429 #define PF_KSWAPD 0x00040000 /* I am kswapd */
435 #define PT_PTRACED 0x00000001
436 #define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */
437 #define PT_TRACESYSGOOD 0x00000004
438 #define PT_PTRACE_CAP 0x00000008 /* ptracer can follow suid-exec */
439 #define PT_TRACE_FORK 0x00000010
440 #define PT_TRACE_VFORK 0x00000020
441 #define PT_TRACE_CLONE 0x00000040
442 #define PT_TRACE_EXEC 0x00000080
445 extern void set_cpus_allowed(task_t
*p
, unsigned long new_mask
);
447 # define set_cpus_allowed(p, new_mask) do { } while (0)
451 extern void sched_balance_exec(void);
452 extern void node_nr_running_init(void);
454 #define sched_balance_exec() {}
455 #define node_nr_running_init() {}
458 extern void set_user_nice(task_t
*p
, long nice
);
459 extern int task_prio(task_t
*p
);
460 extern int task_nice(task_t
*p
);
461 extern int task_curr(task_t
*p
);
462 extern int idle_cpu(int cpu
);
467 * The default (Linux) execution domain.
469 extern struct exec_domain default_exec_domain
;
471 #ifndef INIT_THREAD_SIZE
472 # define INIT_THREAD_SIZE 2048*sizeof(long)
476 struct thread_info thread_info
;
477 unsigned long stack
[INIT_THREAD_SIZE
/sizeof(long)];
480 extern union thread_union init_thread_union
;
481 extern struct task_struct init_task
;
483 extern struct mm_struct init_mm
;
485 extern struct task_struct
*find_task_by_pid(int pid
);
487 /* per-UID process charging. */
488 extern struct user_struct
* alloc_uid(uid_t
);
489 extern void free_uid(struct user_struct
*);
491 #include <asm/current.h>
493 extern unsigned long itimer_ticks
;
494 extern unsigned long itimer_next
;
495 extern void do_timer(struct pt_regs
*);
497 extern int FASTCALL(wake_up_process(struct task_struct
* tsk
));
498 extern void FASTCALL(wake_up_forked_process(struct task_struct
* tsk
));
499 extern void FASTCALL(sched_exit(task_t
* p
));
501 asmlinkage
long sys_wait4(pid_t pid
,unsigned int * stat_addr
, int options
, struct rusage
* ru
);
503 extern int in_group_p(gid_t
);
504 extern int in_egroup_p(gid_t
);
506 extern ATTRIB_NORET
void cpu_idle(void);
508 extern void release_task(struct task_struct
* p
);
510 extern void proc_caches_init(void);
511 extern void flush_signals(struct task_struct
*);
512 extern void flush_signal_handlers(struct task_struct
*);
513 extern void sig_exit(int, int, struct siginfo
*);
514 extern int dequeue_signal(sigset_t
*mask
, siginfo_t
*info
);
515 extern void block_all_signals(int (*notifier
)(void *priv
), void *priv
,
517 extern void unblock_all_signals(void);
518 extern void release_task(struct task_struct
* p
);
519 extern int send_sig_info(int, struct siginfo
*, struct task_struct
*);
520 extern int force_sig_info(int, struct siginfo
*, struct task_struct
*);
521 extern int __kill_pg_info(int sig
, struct siginfo
*info
, pid_t pgrp
);
522 extern int kill_pg_info(int, struct siginfo
*, pid_t
);
523 extern int kill_sl_info(int, struct siginfo
*, pid_t
);
524 extern int kill_proc_info(int, struct siginfo
*, pid_t
);
525 extern void notify_parent(struct task_struct
*, int);
526 extern void do_notify_parent(struct task_struct
*, int);
527 extern void force_sig(int, struct task_struct
*);
528 extern void force_sig_specific(int, struct task_struct
*);
529 extern int send_sig(int, struct task_struct
*, int);
530 extern int __broadcast_thread_group(struct task_struct
*p
, int sig
);
531 extern int kill_pg(pid_t
, int, int);
532 extern int kill_sl(pid_t
, int, int);
533 extern int kill_proc(pid_t
, int, int);
534 extern int do_sigaction(int, const struct k_sigaction
*, struct k_sigaction
*);
535 extern int do_sigaltstack(const stack_t
*, stack_t
*, unsigned long);
537 /* True if we are on the alternate signal stack. */
539 static inline int on_sig_stack(unsigned long sp
)
541 return (sp
- current
->sas_ss_sp
< current
->sas_ss_size
);
544 static inline int sas_ss_flags(unsigned long sp
)
546 return (current
->sas_ss_size
== 0 ? SS_DISABLE
547 : on_sig_stack(sp
) ? SS_ONSTACK
: 0);
551 #ifdef CONFIG_SECURITY
552 /* code is in security.c */
553 extern int capable(int cap
);
555 static inline int capable(int cap
)
557 if (cap_raised(current
->cap_effective
, cap
)) {
558 current
->flags
|= PF_SUPERPRIV
;
566 * Routines for handling mm_structs
568 extern struct mm_struct
* mm_alloc(void);
570 extern struct mm_struct
* start_lazy_tlb(void);
571 extern void end_lazy_tlb(struct mm_struct
*mm
);
573 /* mmdrop drops the mm and the page tables */
574 extern inline void FASTCALL(__mmdrop(struct mm_struct
*));
575 static inline void mmdrop(struct mm_struct
* mm
)
577 if (atomic_dec_and_test(&mm
->mm_count
))
581 /* mmput gets rid of the mappings and all user-space */
582 extern void mmput(struct mm_struct
*);
583 /* Remove the current tasks stale references to the old mm_struct */
584 extern void mm_release(struct task_struct
*, struct mm_struct
*);
586 extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct
*, struct pt_regs
*);
587 extern void flush_thread(void);
588 extern void exit_thread(void);
590 extern void exit_mm(struct task_struct
*);
591 extern void exit_files(struct task_struct
*);
592 extern void exit_sighand(struct task_struct
*);
593 extern void __exit_sighand(struct task_struct
*);
595 extern void reparent_to_init(void);
596 extern void daemonize(void);
597 extern task_t
*child_reaper
;
599 extern int do_execve(char *, char **, char **, struct pt_regs
*);
600 extern struct task_struct
*do_fork(unsigned long, unsigned long, struct pt_regs
*, unsigned long, int *, int *);
603 extern void wait_task_inactive(task_t
* p
);
605 #define wait_task_inactive(p) do { } while (0)
607 extern void kick_if_running(task_t
* p
);
609 #define remove_parent(p) list_del_init(&(p)->sibling)
610 #define add_parent(p, parent) list_add_tail(&(p)->sibling,&(parent)->children)
612 #define REMOVE_LINKS(p) do { \
613 if (thread_group_leader(p)) \
614 list_del_init(&(p)->tasks); \
618 #define SET_LINKS(p) do { \
619 if (thread_group_leader(p)) \
620 list_add_tail(&(p)->tasks,&init_task.tasks); \
621 add_parent(p, (p)->parent); \
624 #define next_task(p) list_entry((p)->tasks.next, struct task_struct, tasks)
625 #define prev_task(p) list_entry((p)->tasks.prev, struct task_struct, tasks)
627 #define for_each_process(p) \
628 for (p = &init_task ; (p = next_task(p)) != &init_task ; )
631 * Careful: do_each_thread/while_each_thread is a double loop so
632 * 'break' will not work as expected - use goto instead.
634 #define do_each_thread(g, t) \
635 for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
637 #define while_each_thread(g, t) \
638 while ((t = next_thread(t)) != g)
640 extern task_t
* FASTCALL(next_thread(task_t
*p
));
642 #define thread_group_leader(p) (p->pid == p->tgid)
644 static inline int thread_group_empty(task_t
*p
)
646 struct pid
*pid
= p
->pids
[PIDTYPE_TGID
].pidptr
;
648 return pid
->task_list
.next
->next
== &pid
->task_list
;
651 #define delay_group_leader(p) \
652 (thread_group_leader(p) && !thread_group_empty(p))
654 extern void unhash_process(struct task_struct
*p
);
656 /* Protects ->fs, ->files, ->mm, and synchronises with wait4(). Nests inside tasklist_lock */
657 static inline void task_lock(struct task_struct
*p
)
659 spin_lock(&p
->alloc_lock
);
662 static inline void task_unlock(struct task_struct
*p
)
664 spin_unlock(&p
->alloc_lock
);
668 * get_task_mm - acquire a reference to the task's mm
670 * Returns %NULL if the task has no mm. User must release
671 * the mm via mmput() after use.
673 static inline struct mm_struct
* get_task_mm(struct task_struct
* task
)
675 struct mm_struct
* mm
;
680 atomic_inc(&mm
->mm_users
);
687 /* set thread flags in other task's structures
688 * - see asm/thread_info.h for TIF_xxxx flags available
690 static inline void set_tsk_thread_flag(struct task_struct
*tsk
, int flag
)
692 set_ti_thread_flag(tsk
->thread_info
,flag
);
695 static inline void clear_tsk_thread_flag(struct task_struct
*tsk
, int flag
)
697 clear_ti_thread_flag(tsk
->thread_info
,flag
);
700 static inline int test_and_set_tsk_thread_flag(struct task_struct
*tsk
, int flag
)
702 return test_and_set_ti_thread_flag(tsk
->thread_info
,flag
);
705 static inline int test_and_clear_tsk_thread_flag(struct task_struct
*tsk
, int flag
)
707 return test_and_clear_ti_thread_flag(tsk
->thread_info
,flag
);
710 static inline int test_tsk_thread_flag(struct task_struct
*tsk
, int flag
)
712 return test_ti_thread_flag(tsk
->thread_info
,flag
);
715 static inline void set_tsk_need_resched(struct task_struct
*tsk
)
717 set_tsk_thread_flag(tsk
,TIF_NEED_RESCHED
);
720 static inline void clear_tsk_need_resched(struct task_struct
*tsk
)
722 clear_tsk_thread_flag(tsk
,TIF_NEED_RESCHED
);
725 static inline int signal_pending(struct task_struct
*p
)
727 return unlikely(test_tsk_thread_flag(p
,TIF_SIGPENDING
));
730 static inline int need_resched(void)
732 return unlikely(test_thread_flag(TIF_NEED_RESCHED
));
735 extern void __cond_resched(void);
736 static inline void cond_resched(void)
743 * cond_resched_lock() - if a reschedule is pending, drop the given lock,
744 * call schedule, and on return reacquire the lock.
746 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
747 * operations here to prevent schedule() from being called twice (once via
748 * spin_unlock(), once by hand).
750 static inline void cond_resched_lock(spinlock_t
* lock
)
752 if (need_resched()) {
753 _raw_spin_unlock(lock
);
754 preempt_enable_no_resched();
760 /* Reevaluate whether the task has signals pending delivery.
761 This is required every time the blocked sigset_t changes.
762 callers must hold sig->siglock. */
764 extern FASTCALL(void recalc_sigpending_tsk(struct task_struct
*t
));
765 extern void recalc_sigpending(void);
768 * Wrappers for p->thread_info->cpu access. No-op on UP.
772 static inline unsigned int task_cpu(struct task_struct
*p
)
774 return p
->thread_info
->cpu
;
777 static inline void set_task_cpu(struct task_struct
*p
, unsigned int cpu
)
779 p
->thread_info
->cpu
= cpu
;
784 static inline unsigned int task_cpu(struct task_struct
*p
)
789 static inline void set_task_cpu(struct task_struct
*p
, unsigned int cpu
)
793 #endif /* CONFIG_SMP */
795 #endif /* __KERNEL__ */