1 // SPDX-License-Identifier: GPL-2.0
3 * linux/kernel/seccomp.c
5 * Copyright 2004-2005 Andrea Arcangeli <andrea@cpushare.com>
7 * Copyright (C) 2012 Google, Inc.
8 * Will Drewry <wad@chromium.org>
10 * This defines a simple but solid secure-computing facility.
12 * Mode 1 uses a fixed list of allowed system calls.
13 * Mode 2 allows user-defined system call filters in the form
14 * of Berkeley Packet Filters/Linux Socket Filters.
17 #include <linux/refcount.h>
18 #include <linux/audit.h>
19 #include <linux/compat.h>
20 #include <linux/coredump.h>
21 #include <linux/kmemleak.h>
22 #include <linux/nospec.h>
23 #include <linux/prctl.h>
24 #include <linux/sched.h>
25 #include <linux/sched/task_stack.h>
26 #include <linux/seccomp.h>
27 #include <linux/slab.h>
28 #include <linux/syscalls.h>
29 #include <linux/sysctl.h>
31 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
32 #include <asm/syscall.h>
35 #ifdef CONFIG_SECCOMP_FILTER
36 #include <linux/filter.h>
37 #include <linux/pid.h>
38 #include <linux/ptrace.h>
39 #include <linux/security.h>
40 #include <linux/tracehook.h>
41 #include <linux/uaccess.h>
44 * struct seccomp_filter - container for seccomp BPF programs
46 * @usage: reference count to manage the object lifetime.
47 * get/put helpers should be used when accessing an instance
48 * outside of a lifetime-guarded section. In general, this
49 * is only needed for handling filters shared across tasks.
50 * @log: true if all actions except for SECCOMP_RET_ALLOW should be logged
51 * @prev: points to a previously installed, or inherited, filter
52 * @prog: the BPF program to evaluate
54 * seccomp_filter objects are organized in a tree linked via the @prev
55 * pointer. For any task, it appears to be a singly-linked list starting
56 * with current->seccomp.filter, the most recently attached or inherited filter.
57 * However, multiple filters may share a @prev node, by way of fork(), which
58 * results in a unidirectional tree existing in memory. This is similar to
59 * how namespaces work.
61 * seccomp_filter objects should never be modified after being attached
62 * to a task_struct (other than @usage).
64 struct seccomp_filter
{
67 struct seccomp_filter
*prev
;
68 struct bpf_prog
*prog
;
71 /* Limit any path through the tree to 256KB worth of instructions. */
72 #define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter))
75 * Endianness is explicitly ignored and left for BPF program authors to manage
76 * as per the specific architecture.
78 static void populate_seccomp_data(struct seccomp_data
*sd
)
80 struct task_struct
*task
= current
;
81 struct pt_regs
*regs
= task_pt_regs(task
);
82 unsigned long args
[6];
84 sd
->nr
= syscall_get_nr(task
, regs
);
85 sd
->arch
= syscall_get_arch();
86 syscall_get_arguments(task
, regs
, 0, 6, args
);
87 sd
->args
[0] = args
[0];
88 sd
->args
[1] = args
[1];
89 sd
->args
[2] = args
[2];
90 sd
->args
[3] = args
[3];
91 sd
->args
[4] = args
[4];
92 sd
->args
[5] = args
[5];
93 sd
->instruction_pointer
= KSTK_EIP(task
);
97 * seccomp_check_filter - verify seccomp filter code
98 * @filter: filter to verify
99 * @flen: length of filter
101 * Takes a previously checked filter (by bpf_check_classic) and
102 * redirects all filter code that loads struct sk_buff data
103 * and related data through seccomp_bpf_load. It also
104 * enforces length and alignment checking of those loads.
106 * Returns 0 if the rule set is legal or -EINVAL if not.
108 static int seccomp_check_filter(struct sock_filter
*filter
, unsigned int flen
)
111 for (pc
= 0; pc
< flen
; pc
++) {
112 struct sock_filter
*ftest
= &filter
[pc
];
113 u16 code
= ftest
->code
;
117 case BPF_LD
| BPF_W
| BPF_ABS
:
118 ftest
->code
= BPF_LDX
| BPF_W
| BPF_ABS
;
119 /* 32-bit aligned and not out of bounds. */
120 if (k
>= sizeof(struct seccomp_data
) || k
& 3)
123 case BPF_LD
| BPF_W
| BPF_LEN
:
124 ftest
->code
= BPF_LD
| BPF_IMM
;
125 ftest
->k
= sizeof(struct seccomp_data
);
127 case BPF_LDX
| BPF_W
| BPF_LEN
:
128 ftest
->code
= BPF_LDX
| BPF_IMM
;
129 ftest
->k
= sizeof(struct seccomp_data
);
131 /* Explicitly include allowed calls. */
132 case BPF_RET
| BPF_K
:
133 case BPF_RET
| BPF_A
:
134 case BPF_ALU
| BPF_ADD
| BPF_K
:
135 case BPF_ALU
| BPF_ADD
| BPF_X
:
136 case BPF_ALU
| BPF_SUB
| BPF_K
:
137 case BPF_ALU
| BPF_SUB
| BPF_X
:
138 case BPF_ALU
| BPF_MUL
| BPF_K
:
139 case BPF_ALU
| BPF_MUL
| BPF_X
:
140 case BPF_ALU
| BPF_DIV
| BPF_K
:
141 case BPF_ALU
| BPF_DIV
| BPF_X
:
142 case BPF_ALU
| BPF_AND
| BPF_K
:
143 case BPF_ALU
| BPF_AND
| BPF_X
:
144 case BPF_ALU
| BPF_OR
| BPF_K
:
145 case BPF_ALU
| BPF_OR
| BPF_X
:
146 case BPF_ALU
| BPF_XOR
| BPF_K
:
147 case BPF_ALU
| BPF_XOR
| BPF_X
:
148 case BPF_ALU
| BPF_LSH
| BPF_K
:
149 case BPF_ALU
| BPF_LSH
| BPF_X
:
150 case BPF_ALU
| BPF_RSH
| BPF_K
:
151 case BPF_ALU
| BPF_RSH
| BPF_X
:
152 case BPF_ALU
| BPF_NEG
:
153 case BPF_LD
| BPF_IMM
:
154 case BPF_LDX
| BPF_IMM
:
155 case BPF_MISC
| BPF_TAX
:
156 case BPF_MISC
| BPF_TXA
:
157 case BPF_LD
| BPF_MEM
:
158 case BPF_LDX
| BPF_MEM
:
161 case BPF_JMP
| BPF_JA
:
162 case BPF_JMP
| BPF_JEQ
| BPF_K
:
163 case BPF_JMP
| BPF_JEQ
| BPF_X
:
164 case BPF_JMP
| BPF_JGE
| BPF_K
:
165 case BPF_JMP
| BPF_JGE
| BPF_X
:
166 case BPF_JMP
| BPF_JGT
| BPF_K
:
167 case BPF_JMP
| BPF_JGT
| BPF_X
:
168 case BPF_JMP
| BPF_JSET
| BPF_K
:
169 case BPF_JMP
| BPF_JSET
| BPF_X
:
179 * seccomp_run_filters - evaluates all seccomp filters against @sd
180 * @sd: optional seccomp data to be passed to filters
181 * @match: stores struct seccomp_filter that resulted in the return value,
182 * unless filter returned SECCOMP_RET_ALLOW, in which case it will
185 * Returns valid seccomp BPF response codes.
187 #define ACTION_ONLY(ret) ((s32)((ret) & (SECCOMP_RET_ACTION_FULL)))
188 static u32
seccomp_run_filters(const struct seccomp_data
*sd
,
189 struct seccomp_filter
**match
)
191 struct seccomp_data sd_local
;
192 u32 ret
= SECCOMP_RET_ALLOW
;
193 /* Make sure cross-thread synced filter points somewhere sane. */
194 struct seccomp_filter
*f
=
195 READ_ONCE(current
->seccomp
.filter
);
197 /* Ensure unexpected behavior doesn't result in failing open. */
198 if (unlikely(WARN_ON(f
== NULL
)))
199 return SECCOMP_RET_KILL_PROCESS
;
202 populate_seccomp_data(&sd_local
);
207 * All filters in the list are evaluated and the lowest BPF return
208 * value always takes priority (ignoring the DATA).
210 for (; f
; f
= f
->prev
) {
211 u32 cur_ret
= BPF_PROG_RUN(f
->prog
, sd
);
213 if (ACTION_ONLY(cur_ret
) < ACTION_ONLY(ret
)) {
220 #endif /* CONFIG_SECCOMP_FILTER */
222 static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode
)
224 assert_spin_locked(¤t
->sighand
->siglock
);
226 if (current
->seccomp
.mode
&& current
->seccomp
.mode
!= seccomp_mode
)
232 void __weak
arch_seccomp_spec_mitigate(struct task_struct
*task
) { }
234 static inline void seccomp_assign_mode(struct task_struct
*task
,
235 unsigned long seccomp_mode
,
238 assert_spin_locked(&task
->sighand
->siglock
);
240 task
->seccomp
.mode
= seccomp_mode
;
242 * Make sure TIF_SECCOMP cannot be set before the mode (and
245 smp_mb__before_atomic();
246 /* Assume default seccomp processes want spec flaw mitigation. */
247 if ((flags
& SECCOMP_FILTER_FLAG_SPEC_ALLOW
) == 0)
248 arch_seccomp_spec_mitigate(task
);
249 set_tsk_thread_flag(task
, TIF_SECCOMP
);
252 #ifdef CONFIG_SECCOMP_FILTER
253 /* Returns 1 if the parent is an ancestor of the child. */
254 static int is_ancestor(struct seccomp_filter
*parent
,
255 struct seccomp_filter
*child
)
257 /* NULL is the root ancestor. */
260 for (; child
; child
= child
->prev
)
267 * seccomp_can_sync_threads: checks if all threads can be synchronized
269 * Expects sighand and cred_guard_mutex locks to be held.
271 * Returns 0 on success, -ve on error, or the pid of a thread which was
272 * either not in the correct seccomp mode or it did not have an ancestral
275 static inline pid_t
seccomp_can_sync_threads(void)
277 struct task_struct
*thread
, *caller
;
279 BUG_ON(!mutex_is_locked(¤t
->signal
->cred_guard_mutex
));
280 assert_spin_locked(¤t
->sighand
->siglock
);
282 /* Validate all threads being eligible for synchronization. */
284 for_each_thread(caller
, thread
) {
287 /* Skip current, since it is initiating the sync. */
288 if (thread
== caller
)
291 if (thread
->seccomp
.mode
== SECCOMP_MODE_DISABLED
||
292 (thread
->seccomp
.mode
== SECCOMP_MODE_FILTER
&&
293 is_ancestor(thread
->seccomp
.filter
,
294 caller
->seccomp
.filter
)))
297 /* Return the first thread that cannot be synchronized. */
298 failed
= task_pid_vnr(thread
);
299 /* If the pid cannot be resolved, then return -ESRCH */
300 if (unlikely(WARN_ON(failed
== 0)))
309 * seccomp_sync_threads: sets all threads to use current's filter
311 * Expects sighand and cred_guard_mutex locks to be held, and for
312 * seccomp_can_sync_threads() to have returned success already
313 * without dropping the locks.
316 static inline void seccomp_sync_threads(unsigned long flags
)
318 struct task_struct
*thread
, *caller
;
320 BUG_ON(!mutex_is_locked(¤t
->signal
->cred_guard_mutex
));
321 assert_spin_locked(¤t
->sighand
->siglock
);
323 /* Synchronize all threads. */
325 for_each_thread(caller
, thread
) {
326 /* Skip current, since it needs no changes. */
327 if (thread
== caller
)
330 /* Get a task reference for the new leaf node. */
331 get_seccomp_filter(caller
);
333 * Drop the task reference to the shared ancestor since
334 * current's path will hold a reference. (This also
335 * allows a put before the assignment.)
337 put_seccomp_filter(thread
);
338 smp_store_release(&thread
->seccomp
.filter
,
339 caller
->seccomp
.filter
);
342 * Don't let an unprivileged task work around
343 * the no_new_privs restriction by creating
344 * a thread that sets it up, enters seccomp,
347 if (task_no_new_privs(caller
))
348 task_set_no_new_privs(thread
);
351 * Opt the other thread into seccomp if needed.
352 * As threads are considered to be trust-realm
353 * equivalent (see ptrace_may_access), it is safe to
354 * allow one thread to transition the other.
356 if (thread
->seccomp
.mode
== SECCOMP_MODE_DISABLED
)
357 seccomp_assign_mode(thread
, SECCOMP_MODE_FILTER
,
363 * seccomp_prepare_filter: Prepares a seccomp filter for use.
364 * @fprog: BPF program to install
366 * Returns filter on success or an ERR_PTR on failure.
368 static struct seccomp_filter
*seccomp_prepare_filter(struct sock_fprog
*fprog
)
370 struct seccomp_filter
*sfilter
;
372 const bool save_orig
= IS_ENABLED(CONFIG_CHECKPOINT_RESTORE
);
374 if (fprog
->len
== 0 || fprog
->len
> BPF_MAXINSNS
)
375 return ERR_PTR(-EINVAL
);
377 BUG_ON(INT_MAX
/ fprog
->len
< sizeof(struct sock_filter
));
380 * Installing a seccomp filter requires that the task has
381 * CAP_SYS_ADMIN in its namespace or be running with no_new_privs.
382 * This avoids scenarios where unprivileged tasks can affect the
383 * behavior of privileged children.
385 if (!task_no_new_privs(current
) &&
386 security_capable_noaudit(current_cred(), current_user_ns(),
388 return ERR_PTR(-EACCES
);
390 /* Allocate a new seccomp_filter */
391 sfilter
= kzalloc(sizeof(*sfilter
), GFP_KERNEL
| __GFP_NOWARN
);
393 return ERR_PTR(-ENOMEM
);
395 ret
= bpf_prog_create_from_user(&sfilter
->prog
, fprog
,
396 seccomp_check_filter
, save_orig
);
402 refcount_set(&sfilter
->usage
, 1);
408 * seccomp_prepare_user_filter - prepares a user-supplied sock_fprog
409 * @user_filter: pointer to the user data containing a sock_fprog.
411 * Returns 0 on success and non-zero otherwise.
413 static struct seccomp_filter
*
414 seccomp_prepare_user_filter(const char __user
*user_filter
)
416 struct sock_fprog fprog
;
417 struct seccomp_filter
*filter
= ERR_PTR(-EFAULT
);
420 if (in_compat_syscall()) {
421 struct compat_sock_fprog fprog32
;
422 if (copy_from_user(&fprog32
, user_filter
, sizeof(fprog32
)))
424 fprog
.len
= fprog32
.len
;
425 fprog
.filter
= compat_ptr(fprog32
.filter
);
426 } else /* falls through to the if below. */
428 if (copy_from_user(&fprog
, user_filter
, sizeof(fprog
)))
430 filter
= seccomp_prepare_filter(&fprog
);
436 * seccomp_attach_filter: validate and attach filter
437 * @flags: flags to change filter behavior
438 * @filter: seccomp filter to add to the current process
440 * Caller must be holding current->sighand->siglock lock.
442 * Returns 0 on success, -ve on error.
444 static long seccomp_attach_filter(unsigned int flags
,
445 struct seccomp_filter
*filter
)
447 unsigned long total_insns
;
448 struct seccomp_filter
*walker
;
450 assert_spin_locked(¤t
->sighand
->siglock
);
452 /* Validate resulting filter length. */
453 total_insns
= filter
->prog
->len
;
454 for (walker
= current
->seccomp
.filter
; walker
; walker
= walker
->prev
)
455 total_insns
+= walker
->prog
->len
+ 4; /* 4 instr penalty */
456 if (total_insns
> MAX_INSNS_PER_PATH
)
459 /* If thread sync has been requested, check that it is possible. */
460 if (flags
& SECCOMP_FILTER_FLAG_TSYNC
) {
463 ret
= seccomp_can_sync_threads();
468 /* Set log flag, if present. */
469 if (flags
& SECCOMP_FILTER_FLAG_LOG
)
473 * If there is an existing filter, make it the prev and don't drop its
476 filter
->prev
= current
->seccomp
.filter
;
477 current
->seccomp
.filter
= filter
;
479 /* Now that the new filter is in place, synchronize to all threads. */
480 if (flags
& SECCOMP_FILTER_FLAG_TSYNC
)
481 seccomp_sync_threads(flags
);
486 static void __get_seccomp_filter(struct seccomp_filter
*filter
)
488 /* Reference count is bounded by the number of total processes. */
489 refcount_inc(&filter
->usage
);
492 /* get_seccomp_filter - increments the reference count of the filter on @tsk */
493 void get_seccomp_filter(struct task_struct
*tsk
)
495 struct seccomp_filter
*orig
= tsk
->seccomp
.filter
;
498 __get_seccomp_filter(orig
);
501 static inline void seccomp_filter_free(struct seccomp_filter
*filter
)
504 bpf_prog_destroy(filter
->prog
);
509 static void __put_seccomp_filter(struct seccomp_filter
*orig
)
511 /* Clean up single-reference branches iteratively. */
512 while (orig
&& refcount_dec_and_test(&orig
->usage
)) {
513 struct seccomp_filter
*freeme
= orig
;
515 seccomp_filter_free(freeme
);
519 /* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
520 void put_seccomp_filter(struct task_struct
*tsk
)
522 __put_seccomp_filter(tsk
->seccomp
.filter
);
525 static void seccomp_init_siginfo(siginfo_t
*info
, int syscall
, int reason
)
528 info
->si_signo
= SIGSYS
;
529 info
->si_code
= SYS_SECCOMP
;
530 info
->si_call_addr
= (void __user
*)KSTK_EIP(current
);
531 info
->si_errno
= reason
;
532 info
->si_arch
= syscall_get_arch();
533 info
->si_syscall
= syscall
;
537 * seccomp_send_sigsys - signals the task to allow in-process syscall emulation
538 * @syscall: syscall number to send to userland
539 * @reason: filter-supplied reason code to send to userland (via si_errno)
541 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
543 static void seccomp_send_sigsys(int syscall
, int reason
)
546 seccomp_init_siginfo(&info
, syscall
, reason
);
547 force_sig_info(SIGSYS
, &info
, current
);
549 #endif /* CONFIG_SECCOMP_FILTER */
551 /* For use with seccomp_actions_logged */
552 #define SECCOMP_LOG_KILL_PROCESS (1 << 0)
553 #define SECCOMP_LOG_KILL_THREAD (1 << 1)
554 #define SECCOMP_LOG_TRAP (1 << 2)
555 #define SECCOMP_LOG_ERRNO (1 << 3)
556 #define SECCOMP_LOG_TRACE (1 << 4)
557 #define SECCOMP_LOG_LOG (1 << 5)
558 #define SECCOMP_LOG_ALLOW (1 << 6)
560 static u32 seccomp_actions_logged
= SECCOMP_LOG_KILL_PROCESS
|
561 SECCOMP_LOG_KILL_THREAD
|
567 static inline void seccomp_log(unsigned long syscall
, long signr
, u32 action
,
573 case SECCOMP_RET_ALLOW
:
575 case SECCOMP_RET_TRAP
:
576 log
= requested
&& seccomp_actions_logged
& SECCOMP_LOG_TRAP
;
578 case SECCOMP_RET_ERRNO
:
579 log
= requested
&& seccomp_actions_logged
& SECCOMP_LOG_ERRNO
;
581 case SECCOMP_RET_TRACE
:
582 log
= requested
&& seccomp_actions_logged
& SECCOMP_LOG_TRACE
;
584 case SECCOMP_RET_LOG
:
585 log
= seccomp_actions_logged
& SECCOMP_LOG_LOG
;
587 case SECCOMP_RET_KILL_THREAD
:
588 log
= seccomp_actions_logged
& SECCOMP_LOG_KILL_THREAD
;
590 case SECCOMP_RET_KILL_PROCESS
:
592 log
= seccomp_actions_logged
& SECCOMP_LOG_KILL_PROCESS
;
596 * Emit an audit message when the action is RET_KILL_*, RET_LOG, or the
597 * FILTER_FLAG_LOG bit was set. The admin has the ability to silence
598 * any action from being logged by removing the action name from the
599 * seccomp_actions_logged sysctl.
604 audit_seccomp(syscall
, signr
, action
);
608 * Secure computing mode 1 allows only read/write/exit/sigreturn.
609 * To be fully secure this must be combined with rlimit
610 * to limit the stack allocations too.
612 static const int mode1_syscalls
[] = {
613 __NR_seccomp_read
, __NR_seccomp_write
, __NR_seccomp_exit
, __NR_seccomp_sigreturn
,
614 0, /* null terminated */
617 static void __secure_computing_strict(int this_syscall
)
619 const int *syscall_whitelist
= mode1_syscalls
;
621 if (in_compat_syscall())
622 syscall_whitelist
= get_compat_mode1_syscalls();
625 if (*syscall_whitelist
== this_syscall
)
627 } while (*++syscall_whitelist
);
632 seccomp_log(this_syscall
, SIGKILL
, SECCOMP_RET_KILL_THREAD
, true);
636 #ifndef CONFIG_HAVE_ARCH_SECCOMP_FILTER
637 void secure_computing_strict(int this_syscall
)
639 int mode
= current
->seccomp
.mode
;
641 if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE
) &&
642 unlikely(current
->ptrace
& PT_SUSPEND_SECCOMP
))
645 if (mode
== SECCOMP_MODE_DISABLED
)
647 else if (mode
== SECCOMP_MODE_STRICT
)
648 __secure_computing_strict(this_syscall
);
654 #ifdef CONFIG_SECCOMP_FILTER
655 static int __seccomp_filter(int this_syscall
, const struct seccomp_data
*sd
,
656 const bool recheck_after_trace
)
658 u32 filter_ret
, action
;
659 struct seccomp_filter
*match
= NULL
;
663 * Make sure that any changes to mode from another thread have
664 * been seen after TIF_SECCOMP was seen.
668 filter_ret
= seccomp_run_filters(sd
, &match
);
669 data
= filter_ret
& SECCOMP_RET_DATA
;
670 action
= filter_ret
& SECCOMP_RET_ACTION_FULL
;
673 case SECCOMP_RET_ERRNO
:
674 /* Set low-order bits as an errno, capped at MAX_ERRNO. */
675 if (data
> MAX_ERRNO
)
677 syscall_set_return_value(current
, task_pt_regs(current
),
681 case SECCOMP_RET_TRAP
:
682 /* Show the handler the original registers. */
683 syscall_rollback(current
, task_pt_regs(current
));
684 /* Let the filter pass back 16 bits of data. */
685 seccomp_send_sigsys(this_syscall
, data
);
688 case SECCOMP_RET_TRACE
:
689 /* We've been put in this state by the ptracer already. */
690 if (recheck_after_trace
)
693 /* ENOSYS these calls if there is no tracer attached. */
694 if (!ptrace_event_enabled(current
, PTRACE_EVENT_SECCOMP
)) {
695 syscall_set_return_value(current
,
696 task_pt_regs(current
),
701 /* Allow the BPF to provide the event message */
702 ptrace_event(PTRACE_EVENT_SECCOMP
, data
);
704 * The delivery of a fatal signal during event
705 * notification may silently skip tracer notification,
706 * which could leave us with a potentially unmodified
707 * syscall that the tracer would have liked to have
708 * changed. Since the process is about to die, we just
709 * force the syscall to be skipped and let the signal
710 * kill the process and correctly handle any tracer exit
713 if (fatal_signal_pending(current
))
715 /* Check if the tracer forced the syscall to be skipped. */
716 this_syscall
= syscall_get_nr(current
, task_pt_regs(current
));
717 if (this_syscall
< 0)
721 * Recheck the syscall, since it may have changed. This
722 * intentionally uses a NULL struct seccomp_data to force
723 * a reload of all registers. This does not goto skip since
724 * a skip would have already been reported.
726 if (__seccomp_filter(this_syscall
, NULL
, true))
731 case SECCOMP_RET_LOG
:
732 seccomp_log(this_syscall
, 0, action
, true);
735 case SECCOMP_RET_ALLOW
:
737 * Note that the "match" filter will always be NULL for
738 * this action since SECCOMP_RET_ALLOW is the starting
739 * state in seccomp_run_filters().
743 case SECCOMP_RET_KILL_THREAD
:
744 case SECCOMP_RET_KILL_PROCESS
:
746 seccomp_log(this_syscall
, SIGSYS
, action
, true);
747 /* Dump core only if this is the last remaining thread. */
748 if (action
== SECCOMP_RET_KILL_PROCESS
||
749 get_nr_threads(current
) == 1) {
752 /* Show the original registers in the dump. */
753 syscall_rollback(current
, task_pt_regs(current
));
754 /* Trigger a manual coredump since do_exit skips it. */
755 seccomp_init_siginfo(&info
, this_syscall
, data
);
758 if (action
== SECCOMP_RET_KILL_PROCESS
)
759 do_group_exit(SIGSYS
);
767 seccomp_log(this_syscall
, 0, action
, match
? match
->log
: false);
771 static int __seccomp_filter(int this_syscall
, const struct seccomp_data
*sd
,
772 const bool recheck_after_trace
)
778 int __secure_computing(const struct seccomp_data
*sd
)
780 int mode
= current
->seccomp
.mode
;
783 if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE
) &&
784 unlikely(current
->ptrace
& PT_SUSPEND_SECCOMP
))
787 this_syscall
= sd
? sd
->nr
:
788 syscall_get_nr(current
, task_pt_regs(current
));
791 case SECCOMP_MODE_STRICT
:
792 __secure_computing_strict(this_syscall
); /* may call do_exit */
794 case SECCOMP_MODE_FILTER
:
795 return __seccomp_filter(this_syscall
, sd
, false);
800 #endif /* CONFIG_HAVE_ARCH_SECCOMP_FILTER */
802 long prctl_get_seccomp(void)
804 return current
->seccomp
.mode
;
808 * seccomp_set_mode_strict: internal function for setting strict seccomp
810 * Once current->seccomp.mode is non-zero, it may not be changed.
812 * Returns 0 on success or -EINVAL on failure.
814 static long seccomp_set_mode_strict(void)
816 const unsigned long seccomp_mode
= SECCOMP_MODE_STRICT
;
819 spin_lock_irq(¤t
->sighand
->siglock
);
821 if (!seccomp_may_assign_mode(seccomp_mode
))
827 seccomp_assign_mode(current
, seccomp_mode
, 0);
831 spin_unlock_irq(¤t
->sighand
->siglock
);
836 #ifdef CONFIG_SECCOMP_FILTER
838 * seccomp_set_mode_filter: internal function for setting seccomp filter
839 * @flags: flags to change filter behavior
840 * @filter: struct sock_fprog containing filter
842 * This function may be called repeatedly to install additional filters.
843 * Every filter successfully installed will be evaluated (in reverse order)
844 * for each system call the task makes.
846 * Once current->seccomp.mode is non-zero, it may not be changed.
848 * Returns 0 on success or -EINVAL on failure.
850 static long seccomp_set_mode_filter(unsigned int flags
,
851 const char __user
*filter
)
853 const unsigned long seccomp_mode
= SECCOMP_MODE_FILTER
;
854 struct seccomp_filter
*prepared
= NULL
;
857 /* Validate flags. */
858 if (flags
& ~SECCOMP_FILTER_FLAG_MASK
)
861 /* Prepare the new filter before holding any locks. */
862 prepared
= seccomp_prepare_user_filter(filter
);
863 if (IS_ERR(prepared
))
864 return PTR_ERR(prepared
);
867 * Make sure we cannot change seccomp or nnp state via TSYNC
868 * while another thread is in the middle of calling exec.
870 if (flags
& SECCOMP_FILTER_FLAG_TSYNC
&&
871 mutex_lock_killable(¤t
->signal
->cred_guard_mutex
))
874 spin_lock_irq(¤t
->sighand
->siglock
);
876 if (!seccomp_may_assign_mode(seccomp_mode
))
879 ret
= seccomp_attach_filter(flags
, prepared
);
882 /* Do not free the successfully attached filter. */
885 seccomp_assign_mode(current
, seccomp_mode
, flags
);
887 spin_unlock_irq(¤t
->sighand
->siglock
);
888 if (flags
& SECCOMP_FILTER_FLAG_TSYNC
)
889 mutex_unlock(¤t
->signal
->cred_guard_mutex
);
891 seccomp_filter_free(prepared
);
895 static inline long seccomp_set_mode_filter(unsigned int flags
,
896 const char __user
*filter
)
902 static long seccomp_get_action_avail(const char __user
*uaction
)
906 if (copy_from_user(&action
, uaction
, sizeof(action
)))
910 case SECCOMP_RET_KILL_PROCESS
:
911 case SECCOMP_RET_KILL_THREAD
:
912 case SECCOMP_RET_TRAP
:
913 case SECCOMP_RET_ERRNO
:
914 case SECCOMP_RET_TRACE
:
915 case SECCOMP_RET_LOG
:
916 case SECCOMP_RET_ALLOW
:
925 /* Common entry point for both prctl and syscall. */
926 static long do_seccomp(unsigned int op
, unsigned int flags
,
927 const char __user
*uargs
)
930 case SECCOMP_SET_MODE_STRICT
:
931 if (flags
!= 0 || uargs
!= NULL
)
933 return seccomp_set_mode_strict();
934 case SECCOMP_SET_MODE_FILTER
:
935 return seccomp_set_mode_filter(flags
, uargs
);
936 case SECCOMP_GET_ACTION_AVAIL
:
940 return seccomp_get_action_avail(uargs
);
946 SYSCALL_DEFINE3(seccomp
, unsigned int, op
, unsigned int, flags
,
947 const char __user
*, uargs
)
949 return do_seccomp(op
, flags
, uargs
);
953 * prctl_set_seccomp: configures current->seccomp.mode
954 * @seccomp_mode: requested mode to use
955 * @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER
957 * Returns 0 on success or -EINVAL on failure.
959 long prctl_set_seccomp(unsigned long seccomp_mode
, char __user
*filter
)
964 switch (seccomp_mode
) {
965 case SECCOMP_MODE_STRICT
:
966 op
= SECCOMP_SET_MODE_STRICT
;
968 * Setting strict mode through prctl always ignored filter,
969 * so make sure it is always NULL here to pass the internal
970 * check in do_seccomp().
974 case SECCOMP_MODE_FILTER
:
975 op
= SECCOMP_SET_MODE_FILTER
;
982 /* prctl interface doesn't have flags, so they are always zero. */
983 return do_seccomp(op
, 0, uargs
);
986 #if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE)
987 static struct seccomp_filter
*get_nth_filter(struct task_struct
*task
,
988 unsigned long filter_off
)
990 struct seccomp_filter
*orig
, *filter
;
994 * Note: this is only correct because the caller should be the (ptrace)
995 * tracer of the task, otherwise lock_task_sighand is needed.
997 spin_lock_irq(&task
->sighand
->siglock
);
999 if (task
->seccomp
.mode
!= SECCOMP_MODE_FILTER
) {
1000 spin_unlock_irq(&task
->sighand
->siglock
);
1001 return ERR_PTR(-EINVAL
);
1004 orig
= task
->seccomp
.filter
;
1005 __get_seccomp_filter(orig
);
1006 spin_unlock_irq(&task
->sighand
->siglock
);
1009 for (filter
= orig
; filter
; filter
= filter
->prev
)
1012 if (filter_off
>= count
) {
1013 filter
= ERR_PTR(-ENOENT
);
1017 count
-= filter_off
;
1018 for (filter
= orig
; filter
&& count
> 1; filter
= filter
->prev
)
1021 if (WARN_ON(count
!= 1 || !filter
)) {
1022 filter
= ERR_PTR(-ENOENT
);
1026 __get_seccomp_filter(filter
);
1029 __put_seccomp_filter(orig
);
1033 long seccomp_get_filter(struct task_struct
*task
, unsigned long filter_off
,
1036 struct seccomp_filter
*filter
;
1037 struct sock_fprog_kern
*fprog
;
1040 if (!capable(CAP_SYS_ADMIN
) ||
1041 current
->seccomp
.mode
!= SECCOMP_MODE_DISABLED
) {
1045 filter
= get_nth_filter(task
, filter_off
);
1047 return PTR_ERR(filter
);
1049 fprog
= filter
->prog
->orig_prog
;
1051 /* This must be a new non-cBPF filter, since we save
1052 * every cBPF filter's orig_prog above when
1053 * CONFIG_CHECKPOINT_RESTORE is enabled.
1063 if (copy_to_user(data
, fprog
->filter
, bpf_classic_proglen(fprog
)))
1067 __put_seccomp_filter(filter
);
1071 long seccomp_get_metadata(struct task_struct
*task
,
1072 unsigned long size
, void __user
*data
)
1075 struct seccomp_filter
*filter
;
1076 struct seccomp_metadata kmd
= {};
1078 if (!capable(CAP_SYS_ADMIN
) ||
1079 current
->seccomp
.mode
!= SECCOMP_MODE_DISABLED
) {
1083 size
= min_t(unsigned long, size
, sizeof(kmd
));
1085 if (size
< sizeof(kmd
.filter_off
))
1088 if (copy_from_user(&kmd
.filter_off
, data
, sizeof(kmd
.filter_off
)))
1091 filter
= get_nth_filter(task
, kmd
.filter_off
);
1093 return PTR_ERR(filter
);
1096 kmd
.flags
|= SECCOMP_FILTER_FLAG_LOG
;
1099 if (copy_to_user(data
, &kmd
, size
))
1102 __put_seccomp_filter(filter
);
1107 #ifdef CONFIG_SYSCTL
1109 /* Human readable action names for friendly sysctl interaction */
1110 #define SECCOMP_RET_KILL_PROCESS_NAME "kill_process"
1111 #define SECCOMP_RET_KILL_THREAD_NAME "kill_thread"
1112 #define SECCOMP_RET_TRAP_NAME "trap"
1113 #define SECCOMP_RET_ERRNO_NAME "errno"
1114 #define SECCOMP_RET_TRACE_NAME "trace"
1115 #define SECCOMP_RET_LOG_NAME "log"
1116 #define SECCOMP_RET_ALLOW_NAME "allow"
1118 static const char seccomp_actions_avail
[] =
1119 SECCOMP_RET_KILL_PROCESS_NAME
" "
1120 SECCOMP_RET_KILL_THREAD_NAME
" "
1121 SECCOMP_RET_TRAP_NAME
" "
1122 SECCOMP_RET_ERRNO_NAME
" "
1123 SECCOMP_RET_TRACE_NAME
" "
1124 SECCOMP_RET_LOG_NAME
" "
1125 SECCOMP_RET_ALLOW_NAME
;
1127 struct seccomp_log_name
{
1132 static const struct seccomp_log_name seccomp_log_names
[] = {
1133 { SECCOMP_LOG_KILL_PROCESS
, SECCOMP_RET_KILL_PROCESS_NAME
},
1134 { SECCOMP_LOG_KILL_THREAD
, SECCOMP_RET_KILL_THREAD_NAME
},
1135 { SECCOMP_LOG_TRAP
, SECCOMP_RET_TRAP_NAME
},
1136 { SECCOMP_LOG_ERRNO
, SECCOMP_RET_ERRNO_NAME
},
1137 { SECCOMP_LOG_TRACE
, SECCOMP_RET_TRACE_NAME
},
1138 { SECCOMP_LOG_LOG
, SECCOMP_RET_LOG_NAME
},
1139 { SECCOMP_LOG_ALLOW
, SECCOMP_RET_ALLOW_NAME
},
1143 static bool seccomp_names_from_actions_logged(char *names
, size_t size
,
1147 const struct seccomp_log_name
*cur
;
1148 bool append_sep
= false;
1150 for (cur
= seccomp_log_names
; cur
->name
&& size
; cur
++) {
1153 if (!(actions_logged
& cur
->log
))
1157 ret
= strscpy(names
, sep
, size
);
1166 ret
= strscpy(names
, cur
->name
, size
);
1177 static bool seccomp_action_logged_from_name(u32
*action_logged
,
1180 const struct seccomp_log_name
*cur
;
1182 for (cur
= seccomp_log_names
; cur
->name
; cur
++) {
1183 if (!strcmp(cur
->name
, name
)) {
1184 *action_logged
= cur
->log
;
1192 static bool seccomp_actions_logged_from_names(u32
*actions_logged
, char *names
)
1196 *actions_logged
= 0;
1197 while ((name
= strsep(&names
, " ")) && *name
) {
1198 u32 action_logged
= 0;
1200 if (!seccomp_action_logged_from_name(&action_logged
, name
))
1203 *actions_logged
|= action_logged
;
1209 static int read_actions_logged(struct ctl_table
*ro_table
, void __user
*buffer
,
1210 size_t *lenp
, loff_t
*ppos
)
1212 char names
[sizeof(seccomp_actions_avail
)];
1213 struct ctl_table table
;
1215 memset(names
, 0, sizeof(names
));
1217 if (!seccomp_names_from_actions_logged(names
, sizeof(names
),
1218 seccomp_actions_logged
, " "))
1223 table
.maxlen
= sizeof(names
);
1224 return proc_dostring(&table
, 0, buffer
, lenp
, ppos
);
1227 static int write_actions_logged(struct ctl_table
*ro_table
, void __user
*buffer
,
1228 size_t *lenp
, loff_t
*ppos
, u32
*actions_logged
)
1230 char names
[sizeof(seccomp_actions_avail
)];
1231 struct ctl_table table
;
1234 if (!capable(CAP_SYS_ADMIN
))
1237 memset(names
, 0, sizeof(names
));
1241 table
.maxlen
= sizeof(names
);
1242 ret
= proc_dostring(&table
, 1, buffer
, lenp
, ppos
);
1246 if (!seccomp_actions_logged_from_names(actions_logged
, table
.data
))
1249 if (*actions_logged
& SECCOMP_LOG_ALLOW
)
1252 seccomp_actions_logged
= *actions_logged
;
1256 static void audit_actions_logged(u32 actions_logged
, u32 old_actions_logged
,
1259 char names
[sizeof(seccomp_actions_avail
)];
1260 char old_names
[sizeof(seccomp_actions_avail
)];
1261 const char *new = names
;
1262 const char *old
= old_names
;
1267 memset(names
, 0, sizeof(names
));
1268 memset(old_names
, 0, sizeof(old_names
));
1272 else if (!actions_logged
)
1274 else if (!seccomp_names_from_actions_logged(names
, sizeof(names
),
1275 actions_logged
, ","))
1278 if (!old_actions_logged
)
1280 else if (!seccomp_names_from_actions_logged(old_names
,
1282 old_actions_logged
, ","))
1285 return audit_seccomp_actions_logged(new, old
, !ret
);
1288 static int seccomp_actions_logged_handler(struct ctl_table
*ro_table
, int write
,
1289 void __user
*buffer
, size_t *lenp
,
1295 u32 actions_logged
= 0;
1296 u32 old_actions_logged
= seccomp_actions_logged
;
1298 ret
= write_actions_logged(ro_table
, buffer
, lenp
, ppos
,
1300 audit_actions_logged(actions_logged
, old_actions_logged
, ret
);
1302 ret
= read_actions_logged(ro_table
, buffer
, lenp
, ppos
);
1307 static struct ctl_path seccomp_sysctl_path
[] = {
1308 { .procname
= "kernel", },
1309 { .procname
= "seccomp", },
1313 static struct ctl_table seccomp_sysctl_table
[] = {
1315 .procname
= "actions_avail",
1316 .data
= (void *) &seccomp_actions_avail
,
1317 .maxlen
= sizeof(seccomp_actions_avail
),
1319 .proc_handler
= proc_dostring
,
1322 .procname
= "actions_logged",
1324 .proc_handler
= seccomp_actions_logged_handler
,
1329 static int __init
seccomp_sysctl_init(void)
1331 struct ctl_table_header
*hdr
;
1333 hdr
= register_sysctl_paths(seccomp_sysctl_path
, seccomp_sysctl_table
);
1335 pr_warn("seccomp: sysctl registration failed\n");
1337 kmemleak_not_leak(hdr
);
1342 device_initcall(seccomp_sysctl_init
)
1344 #endif /* CONFIG_SYSCTL */