2 * Yama Linux Security Module
4 * Author: Kees Cook <keescook@chromium.org>
6 * Copyright (C) 2010 Canonical, Ltd.
7 * Copyright (C) 2011 The Chromium OS Authors.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2, as
11 * published by the Free Software Foundation.
15 #include <linux/security.h>
16 #include <linux/sysctl.h>
17 #include <linux/ptrace.h>
18 #include <linux/prctl.h>
19 #include <linux/ratelimit.h>
20 #include <linux/workqueue.h>
22 #define YAMA_SCOPE_DISABLED 0
23 #define YAMA_SCOPE_RELATIONAL 1
24 #define YAMA_SCOPE_CAPABILITY 2
25 #define YAMA_SCOPE_NO_ATTACH 3
27 static int ptrace_scope
= YAMA_SCOPE_RELATIONAL
;
29 /* describe a ptrace relationship for potential exception */
30 struct ptrace_relation
{
31 struct task_struct
*tracer
;
32 struct task_struct
*tracee
;
34 struct list_head node
;
38 static LIST_HEAD(ptracer_relations
);
39 static DEFINE_SPINLOCK(ptracer_relations_lock
);
41 static void yama_relation_cleanup(struct work_struct
*work
);
42 static DECLARE_WORK(yama_relation_work
, yama_relation_cleanup
);
45 * yama_relation_cleanup - remove invalid entries from the relation list
48 static void yama_relation_cleanup(struct work_struct
*work
)
50 struct ptrace_relation
*relation
;
52 spin_lock(&ptracer_relations_lock
);
54 list_for_each_entry_rcu(relation
, &ptracer_relations
, node
) {
55 if (relation
->invalid
) {
56 list_del_rcu(&relation
->node
);
57 kfree_rcu(relation
, rcu
);
61 spin_unlock(&ptracer_relations_lock
);
65 * yama_ptracer_add - add/replace an exception for this tracer/tracee pair
66 * @tracer: the task_struct of the process doing the ptrace
67 * @tracee: the task_struct of the process to be ptraced
69 * Each tracee can have, at most, one tracer registered. Each time this
70 * is called, the prior registered tracer will be replaced for the tracee.
72 * Returns 0 if relationship was added, -ve on error.
74 static int yama_ptracer_add(struct task_struct
*tracer
,
75 struct task_struct
*tracee
)
77 struct ptrace_relation
*relation
, *added
;
79 added
= kmalloc(sizeof(*added
), GFP_KERNEL
);
83 added
->tracee
= tracee
;
84 added
->tracer
= tracer
;
85 added
->invalid
= false;
87 spin_lock(&ptracer_relations_lock
);
89 list_for_each_entry_rcu(relation
, &ptracer_relations
, node
) {
90 if (relation
->invalid
)
92 if (relation
->tracee
== tracee
) {
93 list_replace_rcu(&relation
->node
, &added
->node
);
94 kfree_rcu(relation
, rcu
);
99 list_add_rcu(&added
->node
, &ptracer_relations
);
103 spin_unlock(&ptracer_relations_lock
);
108 * yama_ptracer_del - remove exceptions related to the given tasks
109 * @tracer: remove any relation where tracer task matches
110 * @tracee: remove any relation where tracee task matches
112 static void yama_ptracer_del(struct task_struct
*tracer
,
113 struct task_struct
*tracee
)
115 struct ptrace_relation
*relation
;
119 list_for_each_entry_rcu(relation
, &ptracer_relations
, node
) {
120 if (relation
->invalid
)
122 if (relation
->tracee
== tracee
||
123 (tracer
&& relation
->tracer
== tracer
)) {
124 relation
->invalid
= true;
131 schedule_work(&yama_relation_work
);
135 * yama_task_free - check for task_pid to remove from exception list
136 * @task: task being removed
138 void yama_task_free(struct task_struct
*task
)
140 yama_ptracer_del(task
, task
);
144 * yama_task_prctl - check for Yama-specific prctl operations
151 * Return 0 on success, -ve on error. -ENOSYS is returned when Yama
152 * does not handle the given option.
154 int yama_task_prctl(int option
, unsigned long arg2
, unsigned long arg3
,
155 unsigned long arg4
, unsigned long arg5
)
158 struct task_struct
*myself
= current
;
160 rc
= cap_task_prctl(option
, arg2
, arg3
, arg4
, arg5
);
166 /* Since a thread can call prctl(), find the group leader
167 * before calling _add() or _del() on it, since we want
168 * process-level granularity of control. The tracer group
169 * leader checking is handled later when walking the ancestry
170 * at the time of PTRACE_ATTACH check.
173 if (!thread_group_leader(myself
))
174 myself
= rcu_dereference(myself
->group_leader
);
175 get_task_struct(myself
);
179 yama_ptracer_del(NULL
, myself
);
181 } else if (arg2
== PR_SET_PTRACER_ANY
|| (int)arg2
== -1) {
182 rc
= yama_ptracer_add(NULL
, myself
);
184 struct task_struct
*tracer
;
187 tracer
= find_task_by_vpid(arg2
);
189 get_task_struct(tracer
);
195 rc
= yama_ptracer_add(tracer
, myself
);
196 put_task_struct(tracer
);
200 put_task_struct(myself
);
208 * task_is_descendant - walk up a process family tree looking for a match
209 * @parent: the process to compare against while walking up from child
210 * @child: the process to start from while looking upwards for parent
212 * Returns 1 if child is a descendant of parent, 0 if not.
214 static int task_is_descendant(struct task_struct
*parent
,
215 struct task_struct
*child
)
218 struct task_struct
*walker
= child
;
220 if (!parent
|| !child
)
224 if (!thread_group_leader(parent
))
225 parent
= rcu_dereference(parent
->group_leader
);
226 while (walker
->pid
> 0) {
227 if (!thread_group_leader(walker
))
228 walker
= rcu_dereference(walker
->group_leader
);
229 if (walker
== parent
) {
233 walker
= rcu_dereference(walker
->real_parent
);
241 * ptracer_exception_found - tracer registered as exception for this tracee
242 * @tracer: the task_struct of the process attempting ptrace
243 * @tracee: the task_struct of the process to be ptraced
245 * Returns 1 if tracer has is ptracer exception ancestor for tracee.
247 static int ptracer_exception_found(struct task_struct
*tracer
,
248 struct task_struct
*tracee
)
251 struct ptrace_relation
*relation
;
252 struct task_struct
*parent
= NULL
;
256 if (!thread_group_leader(tracee
))
257 tracee
= rcu_dereference(tracee
->group_leader
);
258 list_for_each_entry_rcu(relation
, &ptracer_relations
, node
) {
259 if (relation
->invalid
)
261 if (relation
->tracee
== tracee
) {
262 parent
= relation
->tracer
;
268 if (found
&& (parent
== NULL
|| task_is_descendant(parent
, tracer
)))
276 * yama_ptrace_access_check - validate PTRACE_ATTACH calls
277 * @child: task that current task is attempting to ptrace
278 * @mode: ptrace attach mode
280 * Returns 0 if following the ptrace is allowed, -ve on error.
282 int yama_ptrace_access_check(struct task_struct
*child
,
287 /* If standard caps disallows it, so does Yama. We should
288 * only tighten restrictions further.
290 rc
= cap_ptrace_access_check(child
, mode
);
294 /* require ptrace target be a child of ptracer on attach */
295 if (mode
== PTRACE_MODE_ATTACH
) {
296 switch (ptrace_scope
) {
297 case YAMA_SCOPE_DISABLED
:
298 /* No additional restrictions. */
300 case YAMA_SCOPE_RELATIONAL
:
302 if (!task_is_descendant(current
, child
) &&
303 !ptracer_exception_found(current
, child
) &&
304 !ns_capable(__task_cred(child
)->user_ns
, CAP_SYS_PTRACE
))
308 case YAMA_SCOPE_CAPABILITY
:
310 if (!ns_capable(__task_cred(child
)->user_ns
, CAP_SYS_PTRACE
))
314 case YAMA_SCOPE_NO_ATTACH
:
322 printk_ratelimited(KERN_NOTICE
323 "ptrace of pid %d was attempted by: %s (pid %d)\n",
324 child
->pid
, current
->comm
, current
->pid
);
331 * yama_ptrace_traceme - validate PTRACE_TRACEME calls
332 * @parent: task that will become the ptracer of the current task
334 * Returns 0 if following the ptrace is allowed, -ve on error.
336 int yama_ptrace_traceme(struct task_struct
*parent
)
340 /* If standard caps disallows it, so does Yama. We should
341 * only tighten restrictions further.
343 rc
= cap_ptrace_traceme(parent
);
347 /* Only disallow PTRACE_TRACEME on more aggressive settings. */
348 switch (ptrace_scope
) {
349 case YAMA_SCOPE_CAPABILITY
:
350 if (!has_ns_capability(parent
, current_user_ns(), CAP_SYS_PTRACE
))
353 case YAMA_SCOPE_NO_ATTACH
:
359 printk_ratelimited(KERN_NOTICE
360 "ptraceme of pid %d was attempted by: %s (pid %d)\n",
361 current
->pid
, parent
->comm
, parent
->pid
);
367 #ifndef CONFIG_SECURITY_YAMA_STACKED
368 static struct security_operations yama_ops
= {
371 .ptrace_access_check
= yama_ptrace_access_check
,
372 .ptrace_traceme
= yama_ptrace_traceme
,
373 .task_prctl
= yama_task_prctl
,
374 .task_free
= yama_task_free
,
379 static int yama_dointvec_minmax(struct ctl_table
*table
, int write
,
380 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
384 if (write
&& !capable(CAP_SYS_PTRACE
))
387 rc
= proc_dointvec_minmax(table
, write
, buffer
, lenp
, ppos
);
391 /* Lock the max value if it ever gets set. */
392 if (write
&& *(int *)table
->data
== *(int *)table
->extra2
)
393 table
->extra1
= table
->extra2
;
399 static int max_scope
= YAMA_SCOPE_NO_ATTACH
;
401 struct ctl_path yama_sysctl_path
[] = {
402 { .procname
= "kernel", },
403 { .procname
= "yama", },
407 static struct ctl_table yama_sysctl_table
[] = {
409 .procname
= "ptrace_scope",
410 .data
= &ptrace_scope
,
411 .maxlen
= sizeof(int),
413 .proc_handler
= yama_dointvec_minmax
,
415 .extra2
= &max_scope
,
419 #endif /* CONFIG_SYSCTL */
421 static __init
int yama_init(void)
423 #ifndef CONFIG_SECURITY_YAMA_STACKED
424 if (!security_module_enable(&yama_ops
))
428 printk(KERN_INFO
"Yama: becoming mindful.\n");
430 #ifndef CONFIG_SECURITY_YAMA_STACKED
431 if (register_security(&yama_ops
))
432 panic("Yama: kernel registration failed.\n");
436 if (!register_sysctl_paths(yama_sysctl_path
, yama_sysctl_table
))
437 panic("Yama: sysctl registration failed.\n");
443 security_initcall(yama_init
);