4 * Copyright (C) 1991, 1992 Linus Torvalds
8 * 'fork.c' contains the help-routines for the 'fork' system call
9 * (see also entry.S and others).
10 * Fork is rather simple, once you get the hang of it, but the memory
11 * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
14 #include <linux/slab.h>
15 #include <linux/sched/autogroup.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/coredump.h>
18 #include <linux/sched/user.h>
19 #include <linux/sched/numa_balancing.h>
20 #include <linux/sched/stat.h>
21 #include <linux/sched/task.h>
22 #include <linux/sched/task_stack.h>
23 #include <linux/sched/cputime.h>
24 #include <linux/rtmutex.h>
25 #include <linux/init.h>
26 #include <linux/unistd.h>
27 #include <linux/module.h>
28 #include <linux/vmalloc.h>
29 #include <linux/completion.h>
30 #include <linux/personality.h>
31 #include <linux/mempolicy.h>
32 #include <linux/sem.h>
33 #include <linux/file.h>
34 #include <linux/fdtable.h>
35 #include <linux/iocontext.h>
36 #include <linux/key.h>
37 #include <linux/binfmts.h>
38 #include <linux/mman.h>
39 #include <linux/mmu_notifier.h>
42 #include <linux/vmacache.h>
43 #include <linux/nsproxy.h>
44 #include <linux/capability.h>
45 #include <linux/cpu.h>
46 #include <linux/cgroup.h>
47 #include <linux/security.h>
48 #include <linux/hugetlb.h>
49 #include <linux/seccomp.h>
50 #include <linux/swap.h>
51 #include <linux/syscalls.h>
52 #include <linux/jiffies.h>
53 #include <linux/futex.h>
54 #include <linux/compat.h>
55 #include <linux/kthread.h>
56 #include <linux/task_io_accounting_ops.h>
57 #include <linux/rcupdate.h>
58 #include <linux/ptrace.h>
59 #include <linux/mount.h>
60 #include <linux/audit.h>
61 #include <linux/memcontrol.h>
62 #include <linux/ftrace.h>
63 #include <linux/proc_fs.h>
64 #include <linux/profile.h>
65 #include <linux/rmap.h>
66 #include <linux/ksm.h>
67 #include <linux/acct.h>
68 #include <linux/userfaultfd_k.h>
69 #include <linux/tsacct_kern.h>
70 #include <linux/cn_proc.h>
71 #include <linux/freezer.h>
72 #include <linux/delayacct.h>
73 #include <linux/taskstats_kern.h>
74 #include <linux/random.h>
75 #include <linux/tty.h>
76 #include <linux/blkdev.h>
77 #include <linux/fs_struct.h>
78 #include <linux/magic.h>
79 #include <linux/perf_event.h>
80 #include <linux/posix-timers.h>
81 #include <linux/user-return-notifier.h>
82 #include <linux/oom.h>
83 #include <linux/khugepaged.h>
84 #include <linux/signalfd.h>
85 #include <linux/uprobes.h>
86 #include <linux/aio.h>
87 #include <linux/compiler.h>
88 #include <linux/sysctl.h>
89 #include <linux/kcov.h>
90 #include <linux/livepatch.h>
91 #include <linux/thread_info.h>
93 #include <asm/pgtable.h>
94 #include <asm/pgalloc.h>
95 #include <linux/uaccess.h>
96 #include <asm/mmu_context.h>
97 #include <asm/cacheflush.h>
98 #include <asm/tlbflush.h>
100 #include <trace/events/sched.h>
102 #define CREATE_TRACE_POINTS
103 #include <trace/events/task.h>
106 * Minimum number of threads to boot the kernel
108 #define MIN_THREADS 20
111 * Maximum number of threads
113 #define MAX_THREADS FUTEX_TID_MASK
116 * Protected counters by write_lock_irq(&tasklist_lock)
118 unsigned long total_forks
; /* Handle normal Linux uptimes. */
119 int nr_threads
; /* The idle threads do not count.. */
121 int max_threads
; /* tunable limit on nr_threads */
123 DEFINE_PER_CPU(unsigned long, process_counts
) = 0;
125 __cacheline_aligned
DEFINE_RWLOCK(tasklist_lock
); /* outer */
127 #ifdef CONFIG_PROVE_RCU
128 int lockdep_tasklist_lock_is_held(void)
130 return lockdep_is_held(&tasklist_lock
);
132 EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held
);
133 #endif /* #ifdef CONFIG_PROVE_RCU */
135 int nr_processes(void)
140 for_each_possible_cpu(cpu
)
141 total
+= per_cpu(process_counts
, cpu
);
146 void __weak
arch_release_task_struct(struct task_struct
*tsk
)
150 #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
151 static struct kmem_cache
*task_struct_cachep
;
153 static inline struct task_struct
*alloc_task_struct_node(int node
)
155 return kmem_cache_alloc_node(task_struct_cachep
, GFP_KERNEL
, node
);
158 static inline void free_task_struct(struct task_struct
*tsk
)
160 kmem_cache_free(task_struct_cachep
, tsk
);
164 void __weak
arch_release_thread_stack(unsigned long *stack
)
168 #ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR
171 * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
172 * kmemcache based allocator.
174 # if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)
176 #ifdef CONFIG_VMAP_STACK
178 * vmalloc() is a bit slow, and calling vfree() enough times will force a TLB
179 * flush. Try to minimize the number of calls by caching stacks.
181 #define NR_CACHED_STACKS 2
182 static DEFINE_PER_CPU(struct vm_struct
*, cached_stacks
[NR_CACHED_STACKS
]);
184 static int free_vm_stack_cache(unsigned int cpu
)
186 struct vm_struct
**cached_vm_stacks
= per_cpu_ptr(cached_stacks
, cpu
);
189 for (i
= 0; i
< NR_CACHED_STACKS
; i
++) {
190 struct vm_struct
*vm_stack
= cached_vm_stacks
[i
];
195 vfree(vm_stack
->addr
);
196 cached_vm_stacks
[i
] = NULL
;
203 static unsigned long *alloc_thread_stack_node(struct task_struct
*tsk
, int node
)
205 #ifdef CONFIG_VMAP_STACK
209 for (i
= 0; i
< NR_CACHED_STACKS
; i
++) {
212 s
= this_cpu_xchg(cached_stacks
[i
], NULL
);
217 tsk
->stack_vm_area
= s
;
221 stack
= __vmalloc_node_range(THREAD_SIZE
, THREAD_ALIGN
,
222 VMALLOC_START
, VMALLOC_END
,
225 0, node
, __builtin_return_address(0));
228 * We can't call find_vm_area() in interrupt context, and
229 * free_thread_stack() can be called in interrupt context,
230 * so cache the vm_struct.
233 tsk
->stack_vm_area
= find_vm_area(stack
);
236 struct page
*page
= alloc_pages_node(node
, THREADINFO_GFP
,
239 return page
? page_address(page
) : NULL
;
243 static inline void free_thread_stack(struct task_struct
*tsk
)
245 #ifdef CONFIG_VMAP_STACK
246 if (task_stack_vm_area(tsk
)) {
249 for (i
= 0; i
< NR_CACHED_STACKS
; i
++) {
250 if (this_cpu_cmpxchg(cached_stacks
[i
],
251 NULL
, tsk
->stack_vm_area
) != NULL
)
257 vfree_atomic(tsk
->stack
);
262 __free_pages(virt_to_page(tsk
->stack
), THREAD_SIZE_ORDER
);
265 static struct kmem_cache
*thread_stack_cache
;
267 static unsigned long *alloc_thread_stack_node(struct task_struct
*tsk
,
270 return kmem_cache_alloc_node(thread_stack_cache
, THREADINFO_GFP
, node
);
273 static void free_thread_stack(struct task_struct
*tsk
)
275 kmem_cache_free(thread_stack_cache
, tsk
->stack
);
278 void thread_stack_cache_init(void)
280 thread_stack_cache
= kmem_cache_create("thread_stack", THREAD_SIZE
,
281 THREAD_SIZE
, 0, NULL
);
282 BUG_ON(thread_stack_cache
== NULL
);
287 /* SLAB cache for signal_struct structures (tsk->signal) */
288 static struct kmem_cache
*signal_cachep
;
290 /* SLAB cache for sighand_struct structures (tsk->sighand) */
291 struct kmem_cache
*sighand_cachep
;
293 /* SLAB cache for files_struct structures (tsk->files) */
294 struct kmem_cache
*files_cachep
;
296 /* SLAB cache for fs_struct structures (tsk->fs) */
297 struct kmem_cache
*fs_cachep
;
299 /* SLAB cache for vm_area_struct structures */
300 struct kmem_cache
*vm_area_cachep
;
302 /* SLAB cache for mm_struct structures (tsk->mm) */
303 static struct kmem_cache
*mm_cachep
;
305 static void account_kernel_stack(struct task_struct
*tsk
, int account
)
307 void *stack
= task_stack_page(tsk
);
308 struct vm_struct
*vm
= task_stack_vm_area(tsk
);
310 BUILD_BUG_ON(IS_ENABLED(CONFIG_VMAP_STACK
) && PAGE_SIZE
% 1024 != 0);
315 BUG_ON(vm
->nr_pages
!= THREAD_SIZE
/ PAGE_SIZE
);
317 for (i
= 0; i
< THREAD_SIZE
/ PAGE_SIZE
; i
++) {
318 mod_zone_page_state(page_zone(vm
->pages
[i
]),
320 PAGE_SIZE
/ 1024 * account
);
323 /* All stack pages belong to the same memcg. */
324 mod_memcg_page_state(vm
->pages
[0], MEMCG_KERNEL_STACK_KB
,
325 account
* (THREAD_SIZE
/ 1024));
328 * All stack pages are in the same zone and belong to the
331 struct page
*first_page
= virt_to_page(stack
);
333 mod_zone_page_state(page_zone(first_page
), NR_KERNEL_STACK_KB
,
334 THREAD_SIZE
/ 1024 * account
);
336 mod_memcg_page_state(first_page
, MEMCG_KERNEL_STACK_KB
,
337 account
* (THREAD_SIZE
/ 1024));
341 static void release_task_stack(struct task_struct
*tsk
)
343 if (WARN_ON(tsk
->state
!= TASK_DEAD
))
344 return; /* Better to leak the stack than to free prematurely */
346 account_kernel_stack(tsk
, -1);
347 arch_release_thread_stack(tsk
->stack
);
348 free_thread_stack(tsk
);
350 #ifdef CONFIG_VMAP_STACK
351 tsk
->stack_vm_area
= NULL
;
355 #ifdef CONFIG_THREAD_INFO_IN_TASK
356 void put_task_stack(struct task_struct
*tsk
)
358 if (atomic_dec_and_test(&tsk
->stack_refcount
))
359 release_task_stack(tsk
);
363 void free_task(struct task_struct
*tsk
)
365 #ifndef CONFIG_THREAD_INFO_IN_TASK
367 * The task is finally done with both the stack and thread_info,
370 release_task_stack(tsk
);
373 * If the task had a separate stack allocation, it should be gone
376 WARN_ON_ONCE(atomic_read(&tsk
->stack_refcount
) != 0);
378 rt_mutex_debug_task_free(tsk
);
379 ftrace_graph_exit_task(tsk
);
380 put_seccomp_filter(tsk
);
381 arch_release_task_struct(tsk
);
382 if (tsk
->flags
& PF_KTHREAD
)
383 free_kthread_struct(tsk
);
384 free_task_struct(tsk
);
386 EXPORT_SYMBOL(free_task
);
388 static inline void free_signal_struct(struct signal_struct
*sig
)
390 taskstats_tgid_free(sig
);
391 sched_autogroup_exit(sig
);
393 * __mmdrop is not safe to call from softirq context on x86 due to
394 * pgd_dtor so postpone it to the async context
397 mmdrop_async(sig
->oom_mm
);
398 kmem_cache_free(signal_cachep
, sig
);
401 static inline void put_signal_struct(struct signal_struct
*sig
)
403 if (atomic_dec_and_test(&sig
->sigcnt
))
404 free_signal_struct(sig
);
407 void __put_task_struct(struct task_struct
*tsk
)
409 WARN_ON(!tsk
->exit_state
);
410 WARN_ON(atomic_read(&tsk
->usage
));
411 WARN_ON(tsk
== current
);
415 security_task_free(tsk
);
417 delayacct_tsk_free(tsk
);
418 put_signal_struct(tsk
->signal
);
420 if (!profile_handoff_task(tsk
))
423 EXPORT_SYMBOL_GPL(__put_task_struct
);
425 void __init __weak
arch_task_cache_init(void) { }
430 static void set_max_threads(unsigned int max_threads_suggested
)
435 * The number of threads shall be limited such that the thread
436 * structures may only consume a small part of the available memory.
438 if (fls64(totalram_pages
) + fls64(PAGE_SIZE
) > 64)
439 threads
= MAX_THREADS
;
441 threads
= div64_u64((u64
) totalram_pages
* (u64
) PAGE_SIZE
,
442 (u64
) THREAD_SIZE
* 8UL);
444 if (threads
> max_threads_suggested
)
445 threads
= max_threads_suggested
;
447 max_threads
= clamp_t(u64
, threads
, MIN_THREADS
, MAX_THREADS
);
450 #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
451 /* Initialized by the architecture: */
452 int arch_task_struct_size __read_mostly
;
455 void __init
fork_init(void)
458 #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
459 #ifndef ARCH_MIN_TASKALIGN
460 #define ARCH_MIN_TASKALIGN 0
462 int align
= max_t(int, L1_CACHE_BYTES
, ARCH_MIN_TASKALIGN
);
464 /* create a slab on which task_structs can be allocated */
465 task_struct_cachep
= kmem_cache_create("task_struct",
466 arch_task_struct_size
, align
,
467 SLAB_PANIC
|SLAB_NOTRACK
|SLAB_ACCOUNT
, NULL
);
470 /* do the arch specific task caches init */
471 arch_task_cache_init();
473 set_max_threads(MAX_THREADS
);
475 init_task
.signal
->rlim
[RLIMIT_NPROC
].rlim_cur
= max_threads
/2;
476 init_task
.signal
->rlim
[RLIMIT_NPROC
].rlim_max
= max_threads
/2;
477 init_task
.signal
->rlim
[RLIMIT_SIGPENDING
] =
478 init_task
.signal
->rlim
[RLIMIT_NPROC
];
480 for (i
= 0; i
< UCOUNT_COUNTS
; i
++) {
481 init_user_ns
.ucount_max
[i
] = max_threads
/2;
484 #ifdef CONFIG_VMAP_STACK
485 cpuhp_setup_state(CPUHP_BP_PREPARE_DYN
, "fork:vm_stack_cache",
486 NULL
, free_vm_stack_cache
);
489 lockdep_init_task(&init_task
);
492 int __weak
arch_dup_task_struct(struct task_struct
*dst
,
493 struct task_struct
*src
)
499 void set_task_stack_end_magic(struct task_struct
*tsk
)
501 unsigned long *stackend
;
503 stackend
= end_of_stack(tsk
);
504 *stackend
= STACK_END_MAGIC
; /* for overflow detection */
507 static struct task_struct
*dup_task_struct(struct task_struct
*orig
, int node
)
509 struct task_struct
*tsk
;
510 unsigned long *stack
;
511 struct vm_struct
*stack_vm_area
;
514 if (node
== NUMA_NO_NODE
)
515 node
= tsk_fork_get_node(orig
);
516 tsk
= alloc_task_struct_node(node
);
520 stack
= alloc_thread_stack_node(tsk
, node
);
524 stack_vm_area
= task_stack_vm_area(tsk
);
526 err
= arch_dup_task_struct(tsk
, orig
);
529 * arch_dup_task_struct() clobbers the stack-related fields. Make
530 * sure they're properly initialized before using any stack-related
534 #ifdef CONFIG_VMAP_STACK
535 tsk
->stack_vm_area
= stack_vm_area
;
537 #ifdef CONFIG_THREAD_INFO_IN_TASK
538 atomic_set(&tsk
->stack_refcount
, 1);
544 #ifdef CONFIG_SECCOMP
546 * We must handle setting up seccomp filters once we're under
547 * the sighand lock in case orig has changed between now and
548 * then. Until then, filter must be NULL to avoid messing up
549 * the usage counts on the error path calling free_task.
551 tsk
->seccomp
.filter
= NULL
;
554 setup_thread_stack(tsk
, orig
);
555 clear_user_return_notifier(tsk
);
556 clear_tsk_need_resched(tsk
);
557 set_task_stack_end_magic(tsk
);
559 #ifdef CONFIG_CC_STACKPROTECTOR
560 tsk
->stack_canary
= get_random_canary();
564 * One for us, one for whoever does the "release_task()" (usually
567 atomic_set(&tsk
->usage
, 2);
568 #ifdef CONFIG_BLK_DEV_IO_TRACE
571 tsk
->splice_pipe
= NULL
;
572 tsk
->task_frag
.page
= NULL
;
573 tsk
->wake_q
.next
= NULL
;
575 account_kernel_stack(tsk
, 1);
579 #ifdef CONFIG_FAULT_INJECTION
586 free_thread_stack(tsk
);
588 free_task_struct(tsk
);
593 static __latent_entropy
int dup_mmap(struct mm_struct
*mm
,
594 struct mm_struct
*oldmm
)
596 struct vm_area_struct
*mpnt
, *tmp
, *prev
, **pprev
;
597 struct rb_node
**rb_link
, *rb_parent
;
599 unsigned long charge
;
602 uprobe_start_dup_mmap();
603 if (down_write_killable(&oldmm
->mmap_sem
)) {
605 goto fail_uprobe_end
;
607 flush_cache_dup_mm(oldmm
);
608 uprobe_dup_mmap(oldmm
, mm
);
610 * Not linked in yet - no deadlock potential:
612 down_write_nested(&mm
->mmap_sem
, SINGLE_DEPTH_NESTING
);
614 /* No ordering required: file already has been exposed. */
615 RCU_INIT_POINTER(mm
->exe_file
, get_mm_exe_file(oldmm
));
617 mm
->total_vm
= oldmm
->total_vm
;
618 mm
->data_vm
= oldmm
->data_vm
;
619 mm
->exec_vm
= oldmm
->exec_vm
;
620 mm
->stack_vm
= oldmm
->stack_vm
;
622 rb_link
= &mm
->mm_rb
.rb_node
;
625 retval
= ksm_fork(mm
, oldmm
);
628 retval
= khugepaged_fork(mm
, oldmm
);
633 for (mpnt
= oldmm
->mmap
; mpnt
; mpnt
= mpnt
->vm_next
) {
636 if (mpnt
->vm_flags
& VM_DONTCOPY
) {
637 vm_stat_account(mm
, mpnt
->vm_flags
, -vma_pages(mpnt
));
641 if (mpnt
->vm_flags
& VM_ACCOUNT
) {
642 unsigned long len
= vma_pages(mpnt
);
644 if (security_vm_enough_memory_mm(oldmm
, len
)) /* sic */
648 tmp
= kmem_cache_alloc(vm_area_cachep
, GFP_KERNEL
);
652 INIT_LIST_HEAD(&tmp
->anon_vma_chain
);
653 retval
= vma_dup_policy(mpnt
, tmp
);
655 goto fail_nomem_policy
;
657 retval
= dup_userfaultfd(tmp
, &uf
);
659 goto fail_nomem_anon_vma_fork
;
660 if (anon_vma_fork(tmp
, mpnt
))
661 goto fail_nomem_anon_vma_fork
;
662 tmp
->vm_flags
&= ~(VM_LOCKED
| VM_LOCKONFAULT
);
663 tmp
->vm_next
= tmp
->vm_prev
= NULL
;
666 struct inode
*inode
= file_inode(file
);
667 struct address_space
*mapping
= file
->f_mapping
;
670 if (tmp
->vm_flags
& VM_DENYWRITE
)
671 atomic_dec(&inode
->i_writecount
);
672 i_mmap_lock_write(mapping
);
673 if (tmp
->vm_flags
& VM_SHARED
)
674 atomic_inc(&mapping
->i_mmap_writable
);
675 flush_dcache_mmap_lock(mapping
);
676 /* insert tmp into the share list, just after mpnt */
677 vma_interval_tree_insert_after(tmp
, mpnt
,
679 flush_dcache_mmap_unlock(mapping
);
680 i_mmap_unlock_write(mapping
);
684 * Clear hugetlb-related page reserves for children. This only
685 * affects MAP_PRIVATE mappings. Faults generated by the child
686 * are not guaranteed to succeed, even if read-only
688 if (is_vm_hugetlb_page(tmp
))
689 reset_vma_resv_huge_pages(tmp
);
692 * Link in the new vma and copy the page table entries.
695 pprev
= &tmp
->vm_next
;
699 __vma_link_rb(mm
, tmp
, rb_link
, rb_parent
);
700 rb_link
= &tmp
->vm_rb
.rb_right
;
701 rb_parent
= &tmp
->vm_rb
;
704 retval
= copy_page_range(mm
, oldmm
, mpnt
);
706 if (tmp
->vm_ops
&& tmp
->vm_ops
->open
)
707 tmp
->vm_ops
->open(tmp
);
712 /* a new mm has just been created */
713 arch_dup_mmap(oldmm
, mm
);
716 up_write(&mm
->mmap_sem
);
718 up_write(&oldmm
->mmap_sem
);
719 dup_userfaultfd_complete(&uf
);
721 uprobe_end_dup_mmap();
723 fail_nomem_anon_vma_fork
:
724 mpol_put(vma_policy(tmp
));
726 kmem_cache_free(vm_area_cachep
, tmp
);
729 vm_unacct_memory(charge
);
733 static inline int mm_alloc_pgd(struct mm_struct
*mm
)
735 mm
->pgd
= pgd_alloc(mm
);
736 if (unlikely(!mm
->pgd
))
741 static inline void mm_free_pgd(struct mm_struct
*mm
)
743 pgd_free(mm
, mm
->pgd
);
746 static int dup_mmap(struct mm_struct
*mm
, struct mm_struct
*oldmm
)
748 down_write(&oldmm
->mmap_sem
);
749 RCU_INIT_POINTER(mm
->exe_file
, get_mm_exe_file(oldmm
));
750 up_write(&oldmm
->mmap_sem
);
753 #define mm_alloc_pgd(mm) (0)
754 #define mm_free_pgd(mm)
755 #endif /* CONFIG_MMU */
757 __cacheline_aligned_in_smp
DEFINE_SPINLOCK(mmlist_lock
);
759 #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
760 #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
762 static unsigned long default_dump_filter
= MMF_DUMP_FILTER_DEFAULT
;
764 static int __init
coredump_filter_setup(char *s
)
766 default_dump_filter
=
767 (simple_strtoul(s
, NULL
, 0) << MMF_DUMP_FILTER_SHIFT
) &
768 MMF_DUMP_FILTER_MASK
;
772 __setup("coredump_filter=", coredump_filter_setup
);
774 #include <linux/init_task.h>
776 static void mm_init_aio(struct mm_struct
*mm
)
779 spin_lock_init(&mm
->ioctx_lock
);
780 mm
->ioctx_table
= NULL
;
784 static void mm_init_owner(struct mm_struct
*mm
, struct task_struct
*p
)
791 static void mm_init_uprobes_state(struct mm_struct
*mm
)
793 #ifdef CONFIG_UPROBES
794 mm
->uprobes_state
.xol_area
= NULL
;
798 static struct mm_struct
*mm_init(struct mm_struct
*mm
, struct task_struct
*p
,
799 struct user_namespace
*user_ns
)
803 mm
->vmacache_seqnum
= 0;
804 atomic_set(&mm
->mm_users
, 1);
805 atomic_set(&mm
->mm_count
, 1);
806 init_rwsem(&mm
->mmap_sem
);
807 INIT_LIST_HEAD(&mm
->mmlist
);
808 mm
->core_state
= NULL
;
809 atomic_long_set(&mm
->nr_ptes
, 0);
814 memset(&mm
->rss_stat
, 0, sizeof(mm
->rss_stat
));
815 spin_lock_init(&mm
->page_table_lock
);
818 mm_init_owner(mm
, p
);
819 RCU_INIT_POINTER(mm
->exe_file
, NULL
);
820 mmu_notifier_mm_init(mm
);
821 init_tlb_flush_pending(mm
);
822 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
823 mm
->pmd_huge_pte
= NULL
;
825 mm_init_uprobes_state(mm
);
828 mm
->flags
= current
->mm
->flags
& MMF_INIT_MASK
;
829 mm
->def_flags
= current
->mm
->def_flags
& VM_INIT_DEF_MASK
;
831 mm
->flags
= default_dump_filter
;
835 if (mm_alloc_pgd(mm
))
838 if (init_new_context(p
, mm
))
841 mm
->user_ns
= get_user_ns(user_ns
);
851 static void check_mm(struct mm_struct
*mm
)
855 for (i
= 0; i
< NR_MM_COUNTERS
; i
++) {
856 long x
= atomic_long_read(&mm
->rss_stat
.count
[i
]);
859 printk(KERN_ALERT
"BUG: Bad rss-counter state "
860 "mm:%p idx:%d val:%ld\n", mm
, i
, x
);
863 if (atomic_long_read(&mm
->nr_ptes
))
864 pr_alert("BUG: non-zero nr_ptes on freeing mm: %ld\n",
865 atomic_long_read(&mm
->nr_ptes
));
867 pr_alert("BUG: non-zero nr_pmds on freeing mm: %ld\n",
870 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
871 VM_BUG_ON_MM(mm
->pmd_huge_pte
, mm
);
876 * Allocate and initialize an mm_struct.
878 struct mm_struct
*mm_alloc(void)
880 struct mm_struct
*mm
;
886 memset(mm
, 0, sizeof(*mm
));
887 return mm_init(mm
, current
, current_user_ns());
891 * Called when the last reference to the mm
892 * is dropped: either by a lazy thread or by
893 * mmput. Free the page directory and the mm.
895 void __mmdrop(struct mm_struct
*mm
)
897 BUG_ON(mm
== &init_mm
);
900 mmu_notifier_mm_destroy(mm
);
902 put_user_ns(mm
->user_ns
);
905 EXPORT_SYMBOL_GPL(__mmdrop
);
907 static inline void __mmput(struct mm_struct
*mm
)
909 VM_BUG_ON(atomic_read(&mm
->mm_users
));
911 uprobe_clear_state(mm
);
914 khugepaged_exit(mm
); /* must run before exit_mmap */
916 mm_put_huge_zero_page(mm
);
917 set_mm_exe_file(mm
, NULL
);
918 if (!list_empty(&mm
->mmlist
)) {
919 spin_lock(&mmlist_lock
);
920 list_del(&mm
->mmlist
);
921 spin_unlock(&mmlist_lock
);
924 module_put(mm
->binfmt
->module
);
925 set_bit(MMF_OOM_SKIP
, &mm
->flags
);
930 * Decrement the use count and release all resources for an mm.
932 void mmput(struct mm_struct
*mm
)
936 if (atomic_dec_and_test(&mm
->mm_users
))
939 EXPORT_SYMBOL_GPL(mmput
);
942 static void mmput_async_fn(struct work_struct
*work
)
944 struct mm_struct
*mm
= container_of(work
, struct mm_struct
, async_put_work
);
948 void mmput_async(struct mm_struct
*mm
)
950 if (atomic_dec_and_test(&mm
->mm_users
)) {
951 INIT_WORK(&mm
->async_put_work
, mmput_async_fn
);
952 schedule_work(&mm
->async_put_work
);
958 * set_mm_exe_file - change a reference to the mm's executable file
960 * This changes mm's executable file (shown as symlink /proc/[pid]/exe).
962 * Main users are mmput() and sys_execve(). Callers prevent concurrent
963 * invocations: in mmput() nobody alive left, in execve task is single
964 * threaded. sys_prctl(PR_SET_MM_MAP/EXE_FILE) also needs to set the
965 * mm->exe_file, but does so without using set_mm_exe_file() in order
966 * to do avoid the need for any locks.
968 void set_mm_exe_file(struct mm_struct
*mm
, struct file
*new_exe_file
)
970 struct file
*old_exe_file
;
973 * It is safe to dereference the exe_file without RCU as
974 * this function is only called if nobody else can access
975 * this mm -- see comment above for justification.
977 old_exe_file
= rcu_dereference_raw(mm
->exe_file
);
980 get_file(new_exe_file
);
981 rcu_assign_pointer(mm
->exe_file
, new_exe_file
);
987 * get_mm_exe_file - acquire a reference to the mm's executable file
989 * Returns %NULL if mm has no associated executable file.
990 * User must release file via fput().
992 struct file
*get_mm_exe_file(struct mm_struct
*mm
)
994 struct file
*exe_file
;
997 exe_file
= rcu_dereference(mm
->exe_file
);
998 if (exe_file
&& !get_file_rcu(exe_file
))
1003 EXPORT_SYMBOL(get_mm_exe_file
);
1006 * get_task_exe_file - acquire a reference to the task's executable file
1008 * Returns %NULL if task's mm (if any) has no associated executable file or
1009 * this is a kernel thread with borrowed mm (see the comment above get_task_mm).
1010 * User must release file via fput().
1012 struct file
*get_task_exe_file(struct task_struct
*task
)
1014 struct file
*exe_file
= NULL
;
1015 struct mm_struct
*mm
;
1020 if (!(task
->flags
& PF_KTHREAD
))
1021 exe_file
= get_mm_exe_file(mm
);
1026 EXPORT_SYMBOL(get_task_exe_file
);
1029 * get_task_mm - acquire a reference to the task's mm
1031 * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning
1032 * this kernel workthread has transiently adopted a user mm with use_mm,
1033 * to do its AIO) is not set and if so returns a reference to it, after
1034 * bumping up the use count. User must release the mm via mmput()
1035 * after use. Typically used by /proc and ptrace.
1037 struct mm_struct
*get_task_mm(struct task_struct
*task
)
1039 struct mm_struct
*mm
;
1044 if (task
->flags
& PF_KTHREAD
)
1052 EXPORT_SYMBOL_GPL(get_task_mm
);
1054 struct mm_struct
*mm_access(struct task_struct
*task
, unsigned int mode
)
1056 struct mm_struct
*mm
;
1059 err
= mutex_lock_killable(&task
->signal
->cred_guard_mutex
);
1061 return ERR_PTR(err
);
1063 mm
= get_task_mm(task
);
1064 if (mm
&& mm
!= current
->mm
&&
1065 !ptrace_may_access(task
, mode
)) {
1067 mm
= ERR_PTR(-EACCES
);
1069 mutex_unlock(&task
->signal
->cred_guard_mutex
);
1074 static void complete_vfork_done(struct task_struct
*tsk
)
1076 struct completion
*vfork
;
1079 vfork
= tsk
->vfork_done
;
1080 if (likely(vfork
)) {
1081 tsk
->vfork_done
= NULL
;
1087 static int wait_for_vfork_done(struct task_struct
*child
,
1088 struct completion
*vfork
)
1092 freezer_do_not_count();
1093 killed
= wait_for_completion_killable(vfork
);
1098 child
->vfork_done
= NULL
;
1102 put_task_struct(child
);
1106 /* Please note the differences between mmput and mm_release.
1107 * mmput is called whenever we stop holding onto a mm_struct,
1108 * error success whatever.
1110 * mm_release is called after a mm_struct has been removed
1111 * from the current process.
1113 * This difference is important for error handling, when we
1114 * only half set up a mm_struct for a new process and need to restore
1115 * the old one. Because we mmput the new mm_struct before
1116 * restoring the old one. . .
1117 * Eric Biederman 10 January 1998
1119 void mm_release(struct task_struct
*tsk
, struct mm_struct
*mm
)
1121 /* Get rid of any futexes when releasing the mm */
1123 if (unlikely(tsk
->robust_list
)) {
1124 exit_robust_list(tsk
);
1125 tsk
->robust_list
= NULL
;
1127 #ifdef CONFIG_COMPAT
1128 if (unlikely(tsk
->compat_robust_list
)) {
1129 compat_exit_robust_list(tsk
);
1130 tsk
->compat_robust_list
= NULL
;
1133 if (unlikely(!list_empty(&tsk
->pi_state_list
)))
1134 exit_pi_state_list(tsk
);
1137 uprobe_free_utask(tsk
);
1139 /* Get rid of any cached register state */
1140 deactivate_mm(tsk
, mm
);
1143 * Signal userspace if we're not exiting with a core dump
1144 * because we want to leave the value intact for debugging
1147 if (tsk
->clear_child_tid
) {
1148 if (!(tsk
->signal
->flags
& SIGNAL_GROUP_COREDUMP
) &&
1149 atomic_read(&mm
->mm_users
) > 1) {
1151 * We don't check the error code - if userspace has
1152 * not set up a proper pointer then tough luck.
1154 put_user(0, tsk
->clear_child_tid
);
1155 sys_futex(tsk
->clear_child_tid
, FUTEX_WAKE
,
1158 tsk
->clear_child_tid
= NULL
;
1162 * All done, finally we can wake up parent and return this mm to him.
1163 * Also kthread_stop() uses this completion for synchronization.
1165 if (tsk
->vfork_done
)
1166 complete_vfork_done(tsk
);
1170 * Allocate a new mm structure and copy contents from the
1171 * mm structure of the passed in task structure.
1173 static struct mm_struct
*dup_mm(struct task_struct
*tsk
)
1175 struct mm_struct
*mm
, *oldmm
= current
->mm
;
1182 memcpy(mm
, oldmm
, sizeof(*mm
));
1184 if (!mm_init(mm
, tsk
, mm
->user_ns
))
1187 err
= dup_mmap(mm
, oldmm
);
1191 mm
->hiwater_rss
= get_mm_rss(mm
);
1192 mm
->hiwater_vm
= mm
->total_vm
;
1194 if (mm
->binfmt
&& !try_module_get(mm
->binfmt
->module
))
1200 /* don't put binfmt in mmput, we haven't got module yet */
1208 static int copy_mm(unsigned long clone_flags
, struct task_struct
*tsk
)
1210 struct mm_struct
*mm
, *oldmm
;
1213 tsk
->min_flt
= tsk
->maj_flt
= 0;
1214 tsk
->nvcsw
= tsk
->nivcsw
= 0;
1215 #ifdef CONFIG_DETECT_HUNG_TASK
1216 tsk
->last_switch_count
= tsk
->nvcsw
+ tsk
->nivcsw
;
1220 tsk
->active_mm
= NULL
;
1223 * Are we cloning a kernel thread?
1225 * We need to steal a active VM for that..
1227 oldmm
= current
->mm
;
1231 /* initialize the new vmacache entries */
1232 vmacache_flush(tsk
);
1234 if (clone_flags
& CLONE_VM
) {
1247 tsk
->active_mm
= mm
;
1254 static int copy_fs(unsigned long clone_flags
, struct task_struct
*tsk
)
1256 struct fs_struct
*fs
= current
->fs
;
1257 if (clone_flags
& CLONE_FS
) {
1258 /* tsk->fs is already what we want */
1259 spin_lock(&fs
->lock
);
1261 spin_unlock(&fs
->lock
);
1265 spin_unlock(&fs
->lock
);
1268 tsk
->fs
= copy_fs_struct(fs
);
1274 static int copy_files(unsigned long clone_flags
, struct task_struct
*tsk
)
1276 struct files_struct
*oldf
, *newf
;
1280 * A background process may not have any files ...
1282 oldf
= current
->files
;
1286 if (clone_flags
& CLONE_FILES
) {
1287 atomic_inc(&oldf
->count
);
1291 newf
= dup_fd(oldf
, &error
);
1301 static int copy_io(unsigned long clone_flags
, struct task_struct
*tsk
)
1304 struct io_context
*ioc
= current
->io_context
;
1305 struct io_context
*new_ioc
;
1310 * Share io context with parent, if CLONE_IO is set
1312 if (clone_flags
& CLONE_IO
) {
1314 tsk
->io_context
= ioc
;
1315 } else if (ioprio_valid(ioc
->ioprio
)) {
1316 new_ioc
= get_task_io_context(tsk
, GFP_KERNEL
, NUMA_NO_NODE
);
1317 if (unlikely(!new_ioc
))
1320 new_ioc
->ioprio
= ioc
->ioprio
;
1321 put_io_context(new_ioc
);
1327 static int copy_sighand(unsigned long clone_flags
, struct task_struct
*tsk
)
1329 struct sighand_struct
*sig
;
1331 if (clone_flags
& CLONE_SIGHAND
) {
1332 atomic_inc(¤t
->sighand
->count
);
1335 sig
= kmem_cache_alloc(sighand_cachep
, GFP_KERNEL
);
1336 rcu_assign_pointer(tsk
->sighand
, sig
);
1340 atomic_set(&sig
->count
, 1);
1341 memcpy(sig
->action
, current
->sighand
->action
, sizeof(sig
->action
));
1345 void __cleanup_sighand(struct sighand_struct
*sighand
)
1347 if (atomic_dec_and_test(&sighand
->count
)) {
1348 signalfd_cleanup(sighand
);
1350 * sighand_cachep is SLAB_TYPESAFE_BY_RCU so we can free it
1351 * without an RCU grace period, see __lock_task_sighand().
1353 kmem_cache_free(sighand_cachep
, sighand
);
1357 #ifdef CONFIG_POSIX_TIMERS
1359 * Initialize POSIX timer handling for a thread group.
1361 static void posix_cpu_timers_init_group(struct signal_struct
*sig
)
1363 unsigned long cpu_limit
;
1365 cpu_limit
= READ_ONCE(sig
->rlim
[RLIMIT_CPU
].rlim_cur
);
1366 if (cpu_limit
!= RLIM_INFINITY
) {
1367 sig
->cputime_expires
.prof_exp
= cpu_limit
* NSEC_PER_SEC
;
1368 sig
->cputimer
.running
= true;
1371 /* The timer lists. */
1372 INIT_LIST_HEAD(&sig
->cpu_timers
[0]);
1373 INIT_LIST_HEAD(&sig
->cpu_timers
[1]);
1374 INIT_LIST_HEAD(&sig
->cpu_timers
[2]);
1377 static inline void posix_cpu_timers_init_group(struct signal_struct
*sig
) { }
1380 static int copy_signal(unsigned long clone_flags
, struct task_struct
*tsk
)
1382 struct signal_struct
*sig
;
1384 if (clone_flags
& CLONE_THREAD
)
1387 sig
= kmem_cache_zalloc(signal_cachep
, GFP_KERNEL
);
1392 sig
->nr_threads
= 1;
1393 atomic_set(&sig
->live
, 1);
1394 atomic_set(&sig
->sigcnt
, 1);
1396 /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */
1397 sig
->thread_head
= (struct list_head
)LIST_HEAD_INIT(tsk
->thread_node
);
1398 tsk
->thread_node
= (struct list_head
)LIST_HEAD_INIT(sig
->thread_head
);
1400 init_waitqueue_head(&sig
->wait_chldexit
);
1401 sig
->curr_target
= tsk
;
1402 init_sigpending(&sig
->shared_pending
);
1403 seqlock_init(&sig
->stats_lock
);
1404 prev_cputime_init(&sig
->prev_cputime
);
1406 #ifdef CONFIG_POSIX_TIMERS
1407 INIT_LIST_HEAD(&sig
->posix_timers
);
1408 hrtimer_init(&sig
->real_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
1409 sig
->real_timer
.function
= it_real_fn
;
1412 task_lock(current
->group_leader
);
1413 memcpy(sig
->rlim
, current
->signal
->rlim
, sizeof sig
->rlim
);
1414 task_unlock(current
->group_leader
);
1416 posix_cpu_timers_init_group(sig
);
1418 tty_audit_fork(sig
);
1419 sched_autogroup_fork(sig
);
1421 sig
->oom_score_adj
= current
->signal
->oom_score_adj
;
1422 sig
->oom_score_adj_min
= current
->signal
->oom_score_adj_min
;
1424 mutex_init(&sig
->cred_guard_mutex
);
1429 static void copy_seccomp(struct task_struct
*p
)
1431 #ifdef CONFIG_SECCOMP
1433 * Must be called with sighand->lock held, which is common to
1434 * all threads in the group. Holding cred_guard_mutex is not
1435 * needed because this new task is not yet running and cannot
1438 assert_spin_locked(¤t
->sighand
->siglock
);
1440 /* Ref-count the new filter user, and assign it. */
1441 get_seccomp_filter(current
);
1442 p
->seccomp
= current
->seccomp
;
1445 * Explicitly enable no_new_privs here in case it got set
1446 * between the task_struct being duplicated and holding the
1447 * sighand lock. The seccomp state and nnp must be in sync.
1449 if (task_no_new_privs(current
))
1450 task_set_no_new_privs(p
);
1453 * If the parent gained a seccomp mode after copying thread
1454 * flags and between before we held the sighand lock, we have
1455 * to manually enable the seccomp thread flag here.
1457 if (p
->seccomp
.mode
!= SECCOMP_MODE_DISABLED
)
1458 set_tsk_thread_flag(p
, TIF_SECCOMP
);
1462 SYSCALL_DEFINE1(set_tid_address
, int __user
*, tidptr
)
1464 current
->clear_child_tid
= tidptr
;
1466 return task_pid_vnr(current
);
1469 static void rt_mutex_init_task(struct task_struct
*p
)
1471 raw_spin_lock_init(&p
->pi_lock
);
1472 #ifdef CONFIG_RT_MUTEXES
1473 p
->pi_waiters
= RB_ROOT
;
1474 p
->pi_waiters_leftmost
= NULL
;
1475 p
->pi_top_task
= NULL
;
1476 p
->pi_blocked_on
= NULL
;
1480 #ifdef CONFIG_POSIX_TIMERS
1482 * Initialize POSIX timer handling for a single task.
1484 static void posix_cpu_timers_init(struct task_struct
*tsk
)
1486 tsk
->cputime_expires
.prof_exp
= 0;
1487 tsk
->cputime_expires
.virt_exp
= 0;
1488 tsk
->cputime_expires
.sched_exp
= 0;
1489 INIT_LIST_HEAD(&tsk
->cpu_timers
[0]);
1490 INIT_LIST_HEAD(&tsk
->cpu_timers
[1]);
1491 INIT_LIST_HEAD(&tsk
->cpu_timers
[2]);
1494 static inline void posix_cpu_timers_init(struct task_struct
*tsk
) { }
1498 init_task_pid(struct task_struct
*task
, enum pid_type type
, struct pid
*pid
)
1500 task
->pids
[type
].pid
= pid
;
1503 static inline void rcu_copy_process(struct task_struct
*p
)
1505 #ifdef CONFIG_PREEMPT_RCU
1506 p
->rcu_read_lock_nesting
= 0;
1507 p
->rcu_read_unlock_special
.s
= 0;
1508 p
->rcu_blocked_node
= NULL
;
1509 INIT_LIST_HEAD(&p
->rcu_node_entry
);
1510 #endif /* #ifdef CONFIG_PREEMPT_RCU */
1511 #ifdef CONFIG_TASKS_RCU
1512 p
->rcu_tasks_holdout
= false;
1513 INIT_LIST_HEAD(&p
->rcu_tasks_holdout_list
);
1514 p
->rcu_tasks_idle_cpu
= -1;
1515 #endif /* #ifdef CONFIG_TASKS_RCU */
1519 * This creates a new process as a copy of the old one,
1520 * but does not actually start it yet.
1522 * It copies the registers, and all the appropriate
1523 * parts of the process environment (as per the clone
1524 * flags). The actual kick-off is left to the caller.
1526 static __latent_entropy
struct task_struct
*copy_process(
1527 unsigned long clone_flags
,
1528 unsigned long stack_start
,
1529 unsigned long stack_size
,
1530 int __user
*child_tidptr
,
1537 struct task_struct
*p
;
1539 if ((clone_flags
& (CLONE_NEWNS
|CLONE_FS
)) == (CLONE_NEWNS
|CLONE_FS
))
1540 return ERR_PTR(-EINVAL
);
1542 if ((clone_flags
& (CLONE_NEWUSER
|CLONE_FS
)) == (CLONE_NEWUSER
|CLONE_FS
))
1543 return ERR_PTR(-EINVAL
);
1546 * Thread groups must share signals as well, and detached threads
1547 * can only be started up within the thread group.
1549 if ((clone_flags
& CLONE_THREAD
) && !(clone_flags
& CLONE_SIGHAND
))
1550 return ERR_PTR(-EINVAL
);
1553 * Shared signal handlers imply shared VM. By way of the above,
1554 * thread groups also imply shared VM. Blocking this case allows
1555 * for various simplifications in other code.
1557 if ((clone_flags
& CLONE_SIGHAND
) && !(clone_flags
& CLONE_VM
))
1558 return ERR_PTR(-EINVAL
);
1561 * Siblings of global init remain as zombies on exit since they are
1562 * not reaped by their parent (swapper). To solve this and to avoid
1563 * multi-rooted process trees, prevent global and container-inits
1564 * from creating siblings.
1566 if ((clone_flags
& CLONE_PARENT
) &&
1567 current
->signal
->flags
& SIGNAL_UNKILLABLE
)
1568 return ERR_PTR(-EINVAL
);
1571 * If the new process will be in a different pid or user namespace
1572 * do not allow it to share a thread group with the forking task.
1574 if (clone_flags
& CLONE_THREAD
) {
1575 if ((clone_flags
& (CLONE_NEWUSER
| CLONE_NEWPID
)) ||
1576 (task_active_pid_ns(current
) !=
1577 current
->nsproxy
->pid_ns_for_children
))
1578 return ERR_PTR(-EINVAL
);
1581 retval
= security_task_create(clone_flags
);
1586 p
= dup_task_struct(current
, node
);
1591 * This _must_ happen before we call free_task(), i.e. before we jump
1592 * to any of the bad_fork_* labels. This is to avoid freeing
1593 * p->set_child_tid which is (ab)used as a kthread's data pointer for
1594 * kernel threads (PF_KTHREAD).
1596 p
->set_child_tid
= (clone_flags
& CLONE_CHILD_SETTID
) ? child_tidptr
: NULL
;
1598 * Clear TID on mm_release()?
1600 p
->clear_child_tid
= (clone_flags
& CLONE_CHILD_CLEARTID
) ? child_tidptr
: NULL
;
1602 ftrace_graph_init_task(p
);
1604 rt_mutex_init_task(p
);
1606 #ifdef CONFIG_PROVE_LOCKING
1607 DEBUG_LOCKS_WARN_ON(!p
->hardirqs_enabled
);
1608 DEBUG_LOCKS_WARN_ON(!p
->softirqs_enabled
);
1611 if (atomic_read(&p
->real_cred
->user
->processes
) >=
1612 task_rlimit(p
, RLIMIT_NPROC
)) {
1613 if (p
->real_cred
->user
!= INIT_USER
&&
1614 !capable(CAP_SYS_RESOURCE
) && !capable(CAP_SYS_ADMIN
))
1617 current
->flags
&= ~PF_NPROC_EXCEEDED
;
1619 retval
= copy_creds(p
, clone_flags
);
1624 * If multiple threads are within copy_process(), then this check
1625 * triggers too late. This doesn't hurt, the check is only there
1626 * to stop root fork bombs.
1629 if (nr_threads
>= max_threads
)
1630 goto bad_fork_cleanup_count
;
1632 delayacct_tsk_init(p
); /* Must remain after dup_task_struct() */
1633 p
->flags
&= ~(PF_SUPERPRIV
| PF_WQ_WORKER
| PF_IDLE
);
1634 p
->flags
|= PF_FORKNOEXEC
;
1635 INIT_LIST_HEAD(&p
->children
);
1636 INIT_LIST_HEAD(&p
->sibling
);
1637 rcu_copy_process(p
);
1638 p
->vfork_done
= NULL
;
1639 spin_lock_init(&p
->alloc_lock
);
1641 init_sigpending(&p
->pending
);
1643 p
->utime
= p
->stime
= p
->gtime
= 0;
1644 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
1645 p
->utimescaled
= p
->stimescaled
= 0;
1647 prev_cputime_init(&p
->prev_cputime
);
1649 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1650 seqcount_init(&p
->vtime
.seqcount
);
1651 p
->vtime
.starttime
= 0;
1652 p
->vtime
.state
= VTIME_INACTIVE
;
1655 #if defined(SPLIT_RSS_COUNTING)
1656 memset(&p
->rss_stat
, 0, sizeof(p
->rss_stat
));
1659 p
->default_timer_slack_ns
= current
->timer_slack_ns
;
1661 task_io_accounting_init(&p
->ioac
);
1662 acct_clear_integrals(p
);
1664 posix_cpu_timers_init(p
);
1666 p
->start_time
= ktime_get_ns();
1667 p
->real_start_time
= ktime_get_boot_ns();
1668 p
->io_context
= NULL
;
1669 p
->audit_context
= NULL
;
1672 p
->mempolicy
= mpol_dup(p
->mempolicy
);
1673 if (IS_ERR(p
->mempolicy
)) {
1674 retval
= PTR_ERR(p
->mempolicy
);
1675 p
->mempolicy
= NULL
;
1676 goto bad_fork_cleanup_threadgroup_lock
;
1679 #ifdef CONFIG_CPUSETS
1680 p
->cpuset_mem_spread_rotor
= NUMA_NO_NODE
;
1681 p
->cpuset_slab_spread_rotor
= NUMA_NO_NODE
;
1682 seqcount_init(&p
->mems_allowed_seq
);
1684 #ifdef CONFIG_TRACE_IRQFLAGS
1686 p
->hardirqs_enabled
= 0;
1687 p
->hardirq_enable_ip
= 0;
1688 p
->hardirq_enable_event
= 0;
1689 p
->hardirq_disable_ip
= _THIS_IP_
;
1690 p
->hardirq_disable_event
= 0;
1691 p
->softirqs_enabled
= 1;
1692 p
->softirq_enable_ip
= _THIS_IP_
;
1693 p
->softirq_enable_event
= 0;
1694 p
->softirq_disable_ip
= 0;
1695 p
->softirq_disable_event
= 0;
1696 p
->hardirq_context
= 0;
1697 p
->softirq_context
= 0;
1700 p
->pagefault_disabled
= 0;
1702 #ifdef CONFIG_LOCKDEP
1703 p
->lockdep_depth
= 0; /* no locks held yet */
1704 p
->curr_chain_key
= 0;
1705 p
->lockdep_recursion
= 0;
1706 lockdep_init_task(p
);
1709 #ifdef CONFIG_DEBUG_MUTEXES
1710 p
->blocked_on
= NULL
; /* not blocked yet */
1712 #ifdef CONFIG_BCACHE
1713 p
->sequential_io
= 0;
1714 p
->sequential_io_avg
= 0;
1717 /* Perform scheduler related setup. Assign this task to a CPU. */
1718 retval
= sched_fork(clone_flags
, p
);
1720 goto bad_fork_cleanup_policy
;
1722 retval
= perf_event_init_task(p
);
1724 goto bad_fork_cleanup_policy
;
1725 retval
= audit_alloc(p
);
1727 goto bad_fork_cleanup_perf
;
1728 /* copy all the process information */
1730 retval
= security_task_alloc(p
, clone_flags
);
1732 goto bad_fork_cleanup_audit
;
1733 retval
= copy_semundo(clone_flags
, p
);
1735 goto bad_fork_cleanup_security
;
1736 retval
= copy_files(clone_flags
, p
);
1738 goto bad_fork_cleanup_semundo
;
1739 retval
= copy_fs(clone_flags
, p
);
1741 goto bad_fork_cleanup_files
;
1742 retval
= copy_sighand(clone_flags
, p
);
1744 goto bad_fork_cleanup_fs
;
1745 retval
= copy_signal(clone_flags
, p
);
1747 goto bad_fork_cleanup_sighand
;
1748 retval
= copy_mm(clone_flags
, p
);
1750 goto bad_fork_cleanup_signal
;
1751 retval
= copy_namespaces(clone_flags
, p
);
1753 goto bad_fork_cleanup_mm
;
1754 retval
= copy_io(clone_flags
, p
);
1756 goto bad_fork_cleanup_namespaces
;
1757 retval
= copy_thread_tls(clone_flags
, stack_start
, stack_size
, p
, tls
);
1759 goto bad_fork_cleanup_io
;
1761 if (pid
!= &init_struct_pid
) {
1762 pid
= alloc_pid(p
->nsproxy
->pid_ns_for_children
);
1764 retval
= PTR_ERR(pid
);
1765 goto bad_fork_cleanup_thread
;
1773 p
->robust_list
= NULL
;
1774 #ifdef CONFIG_COMPAT
1775 p
->compat_robust_list
= NULL
;
1777 INIT_LIST_HEAD(&p
->pi_state_list
);
1778 p
->pi_state_cache
= NULL
;
1781 * sigaltstack should be cleared when sharing the same VM
1783 if ((clone_flags
& (CLONE_VM
|CLONE_VFORK
)) == CLONE_VM
)
1787 * Syscall tracing and stepping should be turned off in the
1788 * child regardless of CLONE_PTRACE.
1790 user_disable_single_step(p
);
1791 clear_tsk_thread_flag(p
, TIF_SYSCALL_TRACE
);
1792 #ifdef TIF_SYSCALL_EMU
1793 clear_tsk_thread_flag(p
, TIF_SYSCALL_EMU
);
1795 clear_all_latency_tracing(p
);
1797 /* ok, now we should be set up.. */
1798 p
->pid
= pid_nr(pid
);
1799 if (clone_flags
& CLONE_THREAD
) {
1800 p
->exit_signal
= -1;
1801 p
->group_leader
= current
->group_leader
;
1802 p
->tgid
= current
->tgid
;
1804 if (clone_flags
& CLONE_PARENT
)
1805 p
->exit_signal
= current
->group_leader
->exit_signal
;
1807 p
->exit_signal
= (clone_flags
& CSIGNAL
);
1808 p
->group_leader
= p
;
1813 p
->nr_dirtied_pause
= 128 >> (PAGE_SHIFT
- 10);
1814 p
->dirty_paused_when
= 0;
1816 p
->pdeath_signal
= 0;
1817 INIT_LIST_HEAD(&p
->thread_group
);
1818 p
->task_works
= NULL
;
1820 cgroup_threadgroup_change_begin(current
);
1822 * Ensure that the cgroup subsystem policies allow the new process to be
1823 * forked. It should be noted the the new process's css_set can be changed
1824 * between here and cgroup_post_fork() if an organisation operation is in
1827 retval
= cgroup_can_fork(p
);
1829 goto bad_fork_free_pid
;
1832 * Make it visible to the rest of the system, but dont wake it up yet.
1833 * Need tasklist lock for parent etc handling!
1835 write_lock_irq(&tasklist_lock
);
1837 /* CLONE_PARENT re-uses the old parent */
1838 if (clone_flags
& (CLONE_PARENT
|CLONE_THREAD
)) {
1839 p
->real_parent
= current
->real_parent
;
1840 p
->parent_exec_id
= current
->parent_exec_id
;
1842 p
->real_parent
= current
;
1843 p
->parent_exec_id
= current
->self_exec_id
;
1846 klp_copy_process(p
);
1848 spin_lock(¤t
->sighand
->siglock
);
1851 * Copy seccomp details explicitly here, in case they were changed
1852 * before holding sighand lock.
1857 * Process group and session signals need to be delivered to just the
1858 * parent before the fork or both the parent and the child after the
1859 * fork. Restart if a signal comes in before we add the new process to
1860 * it's process group.
1861 * A fatal signal pending means that current will exit, so the new
1862 * thread can't slip out of an OOM kill (or normal SIGKILL).
1864 recalc_sigpending();
1865 if (signal_pending(current
)) {
1866 retval
= -ERESTARTNOINTR
;
1867 goto bad_fork_cancel_cgroup
;
1869 if (unlikely(!(ns_of_pid(pid
)->nr_hashed
& PIDNS_HASH_ADDING
))) {
1871 goto bad_fork_cancel_cgroup
;
1874 if (likely(p
->pid
)) {
1875 ptrace_init_task(p
, (clone_flags
& CLONE_PTRACE
) || trace
);
1877 init_task_pid(p
, PIDTYPE_PID
, pid
);
1878 if (thread_group_leader(p
)) {
1879 init_task_pid(p
, PIDTYPE_PGID
, task_pgrp(current
));
1880 init_task_pid(p
, PIDTYPE_SID
, task_session(current
));
1882 if (is_child_reaper(pid
)) {
1883 ns_of_pid(pid
)->child_reaper
= p
;
1884 p
->signal
->flags
|= SIGNAL_UNKILLABLE
;
1887 p
->signal
->leader_pid
= pid
;
1888 p
->signal
->tty
= tty_kref_get(current
->signal
->tty
);
1890 * Inherit has_child_subreaper flag under the same
1891 * tasklist_lock with adding child to the process tree
1892 * for propagate_has_child_subreaper optimization.
1894 p
->signal
->has_child_subreaper
= p
->real_parent
->signal
->has_child_subreaper
||
1895 p
->real_parent
->signal
->is_child_subreaper
;
1896 list_add_tail(&p
->sibling
, &p
->real_parent
->children
);
1897 list_add_tail_rcu(&p
->tasks
, &init_task
.tasks
);
1898 attach_pid(p
, PIDTYPE_PGID
);
1899 attach_pid(p
, PIDTYPE_SID
);
1900 __this_cpu_inc(process_counts
);
1902 current
->signal
->nr_threads
++;
1903 atomic_inc(¤t
->signal
->live
);
1904 atomic_inc(¤t
->signal
->sigcnt
);
1905 list_add_tail_rcu(&p
->thread_group
,
1906 &p
->group_leader
->thread_group
);
1907 list_add_tail_rcu(&p
->thread_node
,
1908 &p
->signal
->thread_head
);
1910 attach_pid(p
, PIDTYPE_PID
);
1915 spin_unlock(¤t
->sighand
->siglock
);
1916 syscall_tracepoint_update(p
);
1917 write_unlock_irq(&tasklist_lock
);
1919 proc_fork_connector(p
);
1920 cgroup_post_fork(p
);
1921 cgroup_threadgroup_change_end(current
);
1924 trace_task_newtask(p
, clone_flags
);
1925 uprobe_copy_process(p
, clone_flags
);
1929 bad_fork_cancel_cgroup
:
1930 spin_unlock(¤t
->sighand
->siglock
);
1931 write_unlock_irq(&tasklist_lock
);
1932 cgroup_cancel_fork(p
);
1934 cgroup_threadgroup_change_end(current
);
1935 if (pid
!= &init_struct_pid
)
1937 bad_fork_cleanup_thread
:
1939 bad_fork_cleanup_io
:
1942 bad_fork_cleanup_namespaces
:
1943 exit_task_namespaces(p
);
1944 bad_fork_cleanup_mm
:
1947 bad_fork_cleanup_signal
:
1948 if (!(clone_flags
& CLONE_THREAD
))
1949 free_signal_struct(p
->signal
);
1950 bad_fork_cleanup_sighand
:
1951 __cleanup_sighand(p
->sighand
);
1952 bad_fork_cleanup_fs
:
1953 exit_fs(p
); /* blocking */
1954 bad_fork_cleanup_files
:
1955 exit_files(p
); /* blocking */
1956 bad_fork_cleanup_semundo
:
1958 bad_fork_cleanup_security
:
1959 security_task_free(p
);
1960 bad_fork_cleanup_audit
:
1962 bad_fork_cleanup_perf
:
1963 perf_event_free_task(p
);
1964 bad_fork_cleanup_policy
:
1965 lockdep_free_task(p
);
1967 mpol_put(p
->mempolicy
);
1968 bad_fork_cleanup_threadgroup_lock
:
1970 delayacct_tsk_free(p
);
1971 bad_fork_cleanup_count
:
1972 atomic_dec(&p
->cred
->user
->processes
);
1975 p
->state
= TASK_DEAD
;
1979 return ERR_PTR(retval
);
1982 static inline void init_idle_pids(struct pid_link
*links
)
1986 for (type
= PIDTYPE_PID
; type
< PIDTYPE_MAX
; ++type
) {
1987 INIT_HLIST_NODE(&links
[type
].node
); /* not really needed */
1988 links
[type
].pid
= &init_struct_pid
;
1992 struct task_struct
*fork_idle(int cpu
)
1994 struct task_struct
*task
;
1995 task
= copy_process(CLONE_VM
, 0, 0, NULL
, &init_struct_pid
, 0, 0,
1997 if (!IS_ERR(task
)) {
1998 init_idle_pids(task
->pids
);
1999 init_idle(task
, cpu
);
2006 * Ok, this is the main fork-routine.
2008 * It copies the process, and if successful kick-starts
2009 * it and waits for it to finish using the VM if required.
2011 long _do_fork(unsigned long clone_flags
,
2012 unsigned long stack_start
,
2013 unsigned long stack_size
,
2014 int __user
*parent_tidptr
,
2015 int __user
*child_tidptr
,
2018 struct task_struct
*p
;
2023 * Determine whether and which event to report to ptracer. When
2024 * called from kernel_thread or CLONE_UNTRACED is explicitly
2025 * requested, no event is reported; otherwise, report if the event
2026 * for the type of forking is enabled.
2028 if (!(clone_flags
& CLONE_UNTRACED
)) {
2029 if (clone_flags
& CLONE_VFORK
)
2030 trace
= PTRACE_EVENT_VFORK
;
2031 else if ((clone_flags
& CSIGNAL
) != SIGCHLD
)
2032 trace
= PTRACE_EVENT_CLONE
;
2034 trace
= PTRACE_EVENT_FORK
;
2036 if (likely(!ptrace_event_enabled(current
, trace
)))
2040 p
= copy_process(clone_flags
, stack_start
, stack_size
,
2041 child_tidptr
, NULL
, trace
, tls
, NUMA_NO_NODE
);
2042 add_latent_entropy();
2044 * Do this prior waking up the new thread - the thread pointer
2045 * might get invalid after that point, if the thread exits quickly.
2048 struct completion vfork
;
2051 trace_sched_process_fork(current
, p
);
2053 pid
= get_task_pid(p
, PIDTYPE_PID
);
2056 if (clone_flags
& CLONE_PARENT_SETTID
)
2057 put_user(nr
, parent_tidptr
);
2059 if (clone_flags
& CLONE_VFORK
) {
2060 p
->vfork_done
= &vfork
;
2061 init_completion(&vfork
);
2065 wake_up_new_task(p
);
2067 /* forking complete and child started to run, tell ptracer */
2068 if (unlikely(trace
))
2069 ptrace_event_pid(trace
, pid
);
2071 if (clone_flags
& CLONE_VFORK
) {
2072 if (!wait_for_vfork_done(p
, &vfork
))
2073 ptrace_event_pid(PTRACE_EVENT_VFORK_DONE
, pid
);
2083 #ifndef CONFIG_HAVE_COPY_THREAD_TLS
2084 /* For compatibility with architectures that call do_fork directly rather than
2085 * using the syscall entry points below. */
2086 long do_fork(unsigned long clone_flags
,
2087 unsigned long stack_start
,
2088 unsigned long stack_size
,
2089 int __user
*parent_tidptr
,
2090 int __user
*child_tidptr
)
2092 return _do_fork(clone_flags
, stack_start
, stack_size
,
2093 parent_tidptr
, child_tidptr
, 0);
2098 * Create a kernel thread.
2100 pid_t
kernel_thread(int (*fn
)(void *), void *arg
, unsigned long flags
)
2102 return _do_fork(flags
|CLONE_VM
|CLONE_UNTRACED
, (unsigned long)fn
,
2103 (unsigned long)arg
, NULL
, NULL
, 0);
2106 #ifdef __ARCH_WANT_SYS_FORK
2107 SYSCALL_DEFINE0(fork
)
2110 return _do_fork(SIGCHLD
, 0, 0, NULL
, NULL
, 0);
2112 /* can not support in nommu mode */
2118 #ifdef __ARCH_WANT_SYS_VFORK
2119 SYSCALL_DEFINE0(vfork
)
2121 return _do_fork(CLONE_VFORK
| CLONE_VM
| SIGCHLD
, 0,
2126 #ifdef __ARCH_WANT_SYS_CLONE
2127 #ifdef CONFIG_CLONE_BACKWARDS
2128 SYSCALL_DEFINE5(clone
, unsigned long, clone_flags
, unsigned long, newsp
,
2129 int __user
*, parent_tidptr
,
2131 int __user
*, child_tidptr
)
2132 #elif defined(CONFIG_CLONE_BACKWARDS2)
2133 SYSCALL_DEFINE5(clone
, unsigned long, newsp
, unsigned long, clone_flags
,
2134 int __user
*, parent_tidptr
,
2135 int __user
*, child_tidptr
,
2137 #elif defined(CONFIG_CLONE_BACKWARDS3)
2138 SYSCALL_DEFINE6(clone
, unsigned long, clone_flags
, unsigned long, newsp
,
2140 int __user
*, parent_tidptr
,
2141 int __user
*, child_tidptr
,
2144 SYSCALL_DEFINE5(clone
, unsigned long, clone_flags
, unsigned long, newsp
,
2145 int __user
*, parent_tidptr
,
2146 int __user
*, child_tidptr
,
2150 return _do_fork(clone_flags
, newsp
, 0, parent_tidptr
, child_tidptr
, tls
);
2154 void walk_process_tree(struct task_struct
*top
, proc_visitor visitor
, void *data
)
2156 struct task_struct
*leader
, *parent
, *child
;
2159 read_lock(&tasklist_lock
);
2160 leader
= top
= top
->group_leader
;
2162 for_each_thread(leader
, parent
) {
2163 list_for_each_entry(child
, &parent
->children
, sibling
) {
2164 res
= visitor(child
, data
);
2176 if (leader
!= top
) {
2178 parent
= child
->real_parent
;
2179 leader
= parent
->group_leader
;
2183 read_unlock(&tasklist_lock
);
2186 #ifndef ARCH_MIN_MMSTRUCT_ALIGN
2187 #define ARCH_MIN_MMSTRUCT_ALIGN 0
2190 static void sighand_ctor(void *data
)
2192 struct sighand_struct
*sighand
= data
;
2194 spin_lock_init(&sighand
->siglock
);
2195 init_waitqueue_head(&sighand
->signalfd_wqh
);
2198 void __init
proc_caches_init(void)
2200 sighand_cachep
= kmem_cache_create("sighand_cache",
2201 sizeof(struct sighand_struct
), 0,
2202 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
|SLAB_TYPESAFE_BY_RCU
|
2203 SLAB_NOTRACK
|SLAB_ACCOUNT
, sighand_ctor
);
2204 signal_cachep
= kmem_cache_create("signal_cache",
2205 sizeof(struct signal_struct
), 0,
2206 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
|SLAB_NOTRACK
|SLAB_ACCOUNT
,
2208 files_cachep
= kmem_cache_create("files_cache",
2209 sizeof(struct files_struct
), 0,
2210 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
|SLAB_NOTRACK
|SLAB_ACCOUNT
,
2212 fs_cachep
= kmem_cache_create("fs_cache",
2213 sizeof(struct fs_struct
), 0,
2214 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
|SLAB_NOTRACK
|SLAB_ACCOUNT
,
2217 * FIXME! The "sizeof(struct mm_struct)" currently includes the
2218 * whole struct cpumask for the OFFSTACK case. We could change
2219 * this to *only* allocate as much of it as required by the
2220 * maximum number of CPU's we can ever have. The cpumask_allocation
2221 * is at the end of the structure, exactly for that reason.
2223 mm_cachep
= kmem_cache_create("mm_struct",
2224 sizeof(struct mm_struct
), ARCH_MIN_MMSTRUCT_ALIGN
,
2225 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
|SLAB_NOTRACK
|SLAB_ACCOUNT
,
2227 vm_area_cachep
= KMEM_CACHE(vm_area_struct
, SLAB_PANIC
|SLAB_ACCOUNT
);
2229 nsproxy_cache_init();
2233 * Check constraints on flags passed to the unshare system call.
2235 static int check_unshare_flags(unsigned long unshare_flags
)
2237 if (unshare_flags
& ~(CLONE_THREAD
|CLONE_FS
|CLONE_NEWNS
|CLONE_SIGHAND
|
2238 CLONE_VM
|CLONE_FILES
|CLONE_SYSVSEM
|
2239 CLONE_NEWUTS
|CLONE_NEWIPC
|CLONE_NEWNET
|
2240 CLONE_NEWUSER
|CLONE_NEWPID
|CLONE_NEWCGROUP
))
2243 * Not implemented, but pretend it works if there is nothing
2244 * to unshare. Note that unsharing the address space or the
2245 * signal handlers also need to unshare the signal queues (aka
2248 if (unshare_flags
& (CLONE_THREAD
| CLONE_SIGHAND
| CLONE_VM
)) {
2249 if (!thread_group_empty(current
))
2252 if (unshare_flags
& (CLONE_SIGHAND
| CLONE_VM
)) {
2253 if (atomic_read(¤t
->sighand
->count
) > 1)
2256 if (unshare_flags
& CLONE_VM
) {
2257 if (!current_is_single_threaded())
2265 * Unshare the filesystem structure if it is being shared
2267 static int unshare_fs(unsigned long unshare_flags
, struct fs_struct
**new_fsp
)
2269 struct fs_struct
*fs
= current
->fs
;
2271 if (!(unshare_flags
& CLONE_FS
) || !fs
)
2274 /* don't need lock here; in the worst case we'll do useless copy */
2278 *new_fsp
= copy_fs_struct(fs
);
2286 * Unshare file descriptor table if it is being shared
2288 static int unshare_fd(unsigned long unshare_flags
, struct files_struct
**new_fdp
)
2290 struct files_struct
*fd
= current
->files
;
2293 if ((unshare_flags
& CLONE_FILES
) &&
2294 (fd
&& atomic_read(&fd
->count
) > 1)) {
2295 *new_fdp
= dup_fd(fd
, &error
);
2304 * unshare allows a process to 'unshare' part of the process
2305 * context which was originally shared using clone. copy_*
2306 * functions used by do_fork() cannot be used here directly
2307 * because they modify an inactive task_struct that is being
2308 * constructed. Here we are modifying the current, active,
2311 SYSCALL_DEFINE1(unshare
, unsigned long, unshare_flags
)
2313 struct fs_struct
*fs
, *new_fs
= NULL
;
2314 struct files_struct
*fd
, *new_fd
= NULL
;
2315 struct cred
*new_cred
= NULL
;
2316 struct nsproxy
*new_nsproxy
= NULL
;
2321 * If unsharing a user namespace must also unshare the thread group
2322 * and unshare the filesystem root and working directories.
2324 if (unshare_flags
& CLONE_NEWUSER
)
2325 unshare_flags
|= CLONE_THREAD
| CLONE_FS
;
2327 * If unsharing vm, must also unshare signal handlers.
2329 if (unshare_flags
& CLONE_VM
)
2330 unshare_flags
|= CLONE_SIGHAND
;
2332 * If unsharing a signal handlers, must also unshare the signal queues.
2334 if (unshare_flags
& CLONE_SIGHAND
)
2335 unshare_flags
|= CLONE_THREAD
;
2337 * If unsharing namespace, must also unshare filesystem information.
2339 if (unshare_flags
& CLONE_NEWNS
)
2340 unshare_flags
|= CLONE_FS
;
2342 err
= check_unshare_flags(unshare_flags
);
2344 goto bad_unshare_out
;
2346 * CLONE_NEWIPC must also detach from the undolist: after switching
2347 * to a new ipc namespace, the semaphore arrays from the old
2348 * namespace are unreachable.
2350 if (unshare_flags
& (CLONE_NEWIPC
|CLONE_SYSVSEM
))
2352 err
= unshare_fs(unshare_flags
, &new_fs
);
2354 goto bad_unshare_out
;
2355 err
= unshare_fd(unshare_flags
, &new_fd
);
2357 goto bad_unshare_cleanup_fs
;
2358 err
= unshare_userns(unshare_flags
, &new_cred
);
2360 goto bad_unshare_cleanup_fd
;
2361 err
= unshare_nsproxy_namespaces(unshare_flags
, &new_nsproxy
,
2364 goto bad_unshare_cleanup_cred
;
2366 if (new_fs
|| new_fd
|| do_sysvsem
|| new_cred
|| new_nsproxy
) {
2369 * CLONE_SYSVSEM is equivalent to sys_exit().
2373 if (unshare_flags
& CLONE_NEWIPC
) {
2374 /* Orphan segments in old ns (see sem above). */
2376 shm_init_task(current
);
2380 switch_task_namespaces(current
, new_nsproxy
);
2386 spin_lock(&fs
->lock
);
2387 current
->fs
= new_fs
;
2392 spin_unlock(&fs
->lock
);
2396 fd
= current
->files
;
2397 current
->files
= new_fd
;
2401 task_unlock(current
);
2404 /* Install the new user namespace */
2405 commit_creds(new_cred
);
2410 perf_event_namespaces(current
);
2412 bad_unshare_cleanup_cred
:
2415 bad_unshare_cleanup_fd
:
2417 put_files_struct(new_fd
);
2419 bad_unshare_cleanup_fs
:
2421 free_fs_struct(new_fs
);
2428 * Helper to unshare the files of the current task.
2429 * We don't want to expose copy_files internals to
2430 * the exec layer of the kernel.
2433 int unshare_files(struct files_struct
**displaced
)
2435 struct task_struct
*task
= current
;
2436 struct files_struct
*copy
= NULL
;
2439 error
= unshare_fd(CLONE_FILES
, ©
);
2440 if (error
|| !copy
) {
2444 *displaced
= task
->files
;
2451 int sysctl_max_threads(struct ctl_table
*table
, int write
,
2452 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
2456 int threads
= max_threads
;
2457 int min
= MIN_THREADS
;
2458 int max
= MAX_THREADS
;
2465 ret
= proc_dointvec_minmax(&t
, write
, buffer
, lenp
, ppos
);
2469 set_max_threads(threads
);