Merge tag 'mm-hotfixes-stable-2024-06-26-17-28' of git://git.kernel.org/pub/scm/linux...
[linux.git] / drivers / android / binder.c
blobb21a7b246a0dc454f407c9c9e2cda17a60e62bab
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder.c
4 * Android IPC Subsystem
6 * Copyright (C) 2007-2008 Google, Inc.
7 */
9 /*
10 * Locking overview
12 * There are 3 main spinlocks which must be acquired in the
13 * order shown:
15 * 1) proc->outer_lock : protects binder_ref
16 * binder_proc_lock() and binder_proc_unlock() are
17 * used to acq/rel.
18 * 2) node->lock : protects most fields of binder_node.
19 * binder_node_lock() and binder_node_unlock() are
20 * used to acq/rel
21 * 3) proc->inner_lock : protects the thread and node lists
22 * (proc->threads, proc->waiting_threads, proc->nodes)
23 * and all todo lists associated with the binder_proc
24 * (proc->todo, thread->todo, proc->delivered_death and
25 * node->async_todo), as well as thread->transaction_stack
26 * binder_inner_proc_lock() and binder_inner_proc_unlock()
27 * are used to acq/rel
29 * Any lock under procA must never be nested under any lock at the same
30 * level or below on procB.
32 * Functions that require a lock held on entry indicate which lock
33 * in the suffix of the function name:
35 * foo_olocked() : requires node->outer_lock
36 * foo_nlocked() : requires node->lock
37 * foo_ilocked() : requires proc->inner_lock
38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39 * foo_nilocked(): requires node->lock and proc->inner_lock
40 * ...
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
48 #include <linux/fs.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/string.h>
61 #include <linux/uaccess.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/security.h>
64 #include <linux/spinlock.h>
65 #include <linux/ratelimit.h>
66 #include <linux/syscalls.h>
67 #include <linux/task_work.h>
68 #include <linux/sizes.h>
69 #include <linux/ktime.h>
71 #include <uapi/linux/android/binder.h>
73 #include <linux/cacheflush.h>
75 #include "binder_internal.h"
76 #include "binder_trace.h"
78 static HLIST_HEAD(binder_deferred_list);
79 static DEFINE_MUTEX(binder_deferred_lock);
81 static HLIST_HEAD(binder_devices);
82 static HLIST_HEAD(binder_procs);
83 static DEFINE_MUTEX(binder_procs_lock);
85 static HLIST_HEAD(binder_dead_nodes);
86 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
88 static struct dentry *binder_debugfs_dir_entry_root;
89 static struct dentry *binder_debugfs_dir_entry_proc;
90 static atomic_t binder_last_id;
92 static int proc_show(struct seq_file *m, void *unused);
93 DEFINE_SHOW_ATTRIBUTE(proc);
95 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
97 enum {
98 BINDER_DEBUG_USER_ERROR = 1U << 0,
99 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
100 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
101 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
102 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
103 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
104 BINDER_DEBUG_READ_WRITE = 1U << 6,
105 BINDER_DEBUG_USER_REFS = 1U << 7,
106 BINDER_DEBUG_THREADS = 1U << 8,
107 BINDER_DEBUG_TRANSACTION = 1U << 9,
108 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
109 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
110 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
111 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
112 BINDER_DEBUG_SPINLOCKS = 1U << 14,
114 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
115 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
116 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
118 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
119 module_param_named(devices, binder_devices_param, charp, 0444);
121 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
122 static int binder_stop_on_user_error;
124 static int binder_set_stop_on_user_error(const char *val,
125 const struct kernel_param *kp)
127 int ret;
129 ret = param_set_int(val, kp);
130 if (binder_stop_on_user_error < 2)
131 wake_up(&binder_user_error_wait);
132 return ret;
134 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
135 param_get_int, &binder_stop_on_user_error, 0644);
137 static __printf(2, 3) void binder_debug(int mask, const char *format, ...)
139 struct va_format vaf;
140 va_list args;
142 if (binder_debug_mask & mask) {
143 va_start(args, format);
144 vaf.va = &args;
145 vaf.fmt = format;
146 pr_info_ratelimited("%pV", &vaf);
147 va_end(args);
151 #define binder_txn_error(x...) \
152 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x)
154 static __printf(1, 2) void binder_user_error(const char *format, ...)
156 struct va_format vaf;
157 va_list args;
159 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) {
160 va_start(args, format);
161 vaf.va = &args;
162 vaf.fmt = format;
163 pr_info_ratelimited("%pV", &vaf);
164 va_end(args);
167 if (binder_stop_on_user_error)
168 binder_stop_on_user_error = 2;
171 #define binder_set_extended_error(ee, _id, _command, _param) \
172 do { \
173 (ee)->id = _id; \
174 (ee)->command = _command; \
175 (ee)->param = _param; \
176 } while (0)
178 #define to_flat_binder_object(hdr) \
179 container_of(hdr, struct flat_binder_object, hdr)
181 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
183 #define to_binder_buffer_object(hdr) \
184 container_of(hdr, struct binder_buffer_object, hdr)
186 #define to_binder_fd_array_object(hdr) \
187 container_of(hdr, struct binder_fd_array_object, hdr)
189 static struct binder_stats binder_stats;
191 static inline void binder_stats_deleted(enum binder_stat_types type)
193 atomic_inc(&binder_stats.obj_deleted[type]);
196 static inline void binder_stats_created(enum binder_stat_types type)
198 atomic_inc(&binder_stats.obj_created[type]);
201 struct binder_transaction_log_entry {
202 int debug_id;
203 int debug_id_done;
204 int call_type;
205 int from_proc;
206 int from_thread;
207 int target_handle;
208 int to_proc;
209 int to_thread;
210 int to_node;
211 int data_size;
212 int offsets_size;
213 int return_error_line;
214 uint32_t return_error;
215 uint32_t return_error_param;
216 char context_name[BINDERFS_MAX_NAME + 1];
219 struct binder_transaction_log {
220 atomic_t cur;
221 bool full;
222 struct binder_transaction_log_entry entry[32];
225 static struct binder_transaction_log binder_transaction_log;
226 static struct binder_transaction_log binder_transaction_log_failed;
228 static struct binder_transaction_log_entry *binder_transaction_log_add(
229 struct binder_transaction_log *log)
231 struct binder_transaction_log_entry *e;
232 unsigned int cur = atomic_inc_return(&log->cur);
234 if (cur >= ARRAY_SIZE(log->entry))
235 log->full = true;
236 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
237 WRITE_ONCE(e->debug_id_done, 0);
239 * write-barrier to synchronize access to e->debug_id_done.
240 * We make sure the initialized 0 value is seen before
241 * memset() other fields are zeroed by memset.
243 smp_wmb();
244 memset(e, 0, sizeof(*e));
245 return e;
248 enum binder_deferred_state {
249 BINDER_DEFERRED_FLUSH = 0x01,
250 BINDER_DEFERRED_RELEASE = 0x02,
253 enum {
254 BINDER_LOOPER_STATE_REGISTERED = 0x01,
255 BINDER_LOOPER_STATE_ENTERED = 0x02,
256 BINDER_LOOPER_STATE_EXITED = 0x04,
257 BINDER_LOOPER_STATE_INVALID = 0x08,
258 BINDER_LOOPER_STATE_WAITING = 0x10,
259 BINDER_LOOPER_STATE_POLL = 0x20,
263 * binder_proc_lock() - Acquire outer lock for given binder_proc
264 * @proc: struct binder_proc to acquire
266 * Acquires proc->outer_lock. Used to protect binder_ref
267 * structures associated with the given proc.
269 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
270 static void
271 _binder_proc_lock(struct binder_proc *proc, int line)
272 __acquires(&proc->outer_lock)
274 binder_debug(BINDER_DEBUG_SPINLOCKS,
275 "%s: line=%d\n", __func__, line);
276 spin_lock(&proc->outer_lock);
280 * binder_proc_unlock() - Release spinlock for given binder_proc
281 * @proc: struct binder_proc to acquire
283 * Release lock acquired via binder_proc_lock()
285 #define binder_proc_unlock(proc) _binder_proc_unlock(proc, __LINE__)
286 static void
287 _binder_proc_unlock(struct binder_proc *proc, int line)
288 __releases(&proc->outer_lock)
290 binder_debug(BINDER_DEBUG_SPINLOCKS,
291 "%s: line=%d\n", __func__, line);
292 spin_unlock(&proc->outer_lock);
296 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
297 * @proc: struct binder_proc to acquire
299 * Acquires proc->inner_lock. Used to protect todo lists
301 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
302 static void
303 _binder_inner_proc_lock(struct binder_proc *proc, int line)
304 __acquires(&proc->inner_lock)
306 binder_debug(BINDER_DEBUG_SPINLOCKS,
307 "%s: line=%d\n", __func__, line);
308 spin_lock(&proc->inner_lock);
312 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
313 * @proc: struct binder_proc to acquire
315 * Release lock acquired via binder_inner_proc_lock()
317 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
318 static void
319 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
320 __releases(&proc->inner_lock)
322 binder_debug(BINDER_DEBUG_SPINLOCKS,
323 "%s: line=%d\n", __func__, line);
324 spin_unlock(&proc->inner_lock);
328 * binder_node_lock() - Acquire spinlock for given binder_node
329 * @node: struct binder_node to acquire
331 * Acquires node->lock. Used to protect binder_node fields
333 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
334 static void
335 _binder_node_lock(struct binder_node *node, int line)
336 __acquires(&node->lock)
338 binder_debug(BINDER_DEBUG_SPINLOCKS,
339 "%s: line=%d\n", __func__, line);
340 spin_lock(&node->lock);
344 * binder_node_unlock() - Release spinlock for given binder_proc
345 * @node: struct binder_node to acquire
347 * Release lock acquired via binder_node_lock()
349 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
350 static void
351 _binder_node_unlock(struct binder_node *node, int line)
352 __releases(&node->lock)
354 binder_debug(BINDER_DEBUG_SPINLOCKS,
355 "%s: line=%d\n", __func__, line);
356 spin_unlock(&node->lock);
360 * binder_node_inner_lock() - Acquire node and inner locks
361 * @node: struct binder_node to acquire
363 * Acquires node->lock. If node->proc also acquires
364 * proc->inner_lock. Used to protect binder_node fields
366 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
367 static void
368 _binder_node_inner_lock(struct binder_node *node, int line)
369 __acquires(&node->lock) __acquires(&node->proc->inner_lock)
371 binder_debug(BINDER_DEBUG_SPINLOCKS,
372 "%s: line=%d\n", __func__, line);
373 spin_lock(&node->lock);
374 if (node->proc)
375 binder_inner_proc_lock(node->proc);
376 else
377 /* annotation for sparse */
378 __acquire(&node->proc->inner_lock);
382 * binder_node_inner_unlock() - Release node and inner locks
383 * @node: struct binder_node to acquire
385 * Release lock acquired via binder_node_lock()
387 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
388 static void
389 _binder_node_inner_unlock(struct binder_node *node, int line)
390 __releases(&node->lock) __releases(&node->proc->inner_lock)
392 struct binder_proc *proc = node->proc;
394 binder_debug(BINDER_DEBUG_SPINLOCKS,
395 "%s: line=%d\n", __func__, line);
396 if (proc)
397 binder_inner_proc_unlock(proc);
398 else
399 /* annotation for sparse */
400 __release(&node->proc->inner_lock);
401 spin_unlock(&node->lock);
404 static bool binder_worklist_empty_ilocked(struct list_head *list)
406 return list_empty(list);
410 * binder_worklist_empty() - Check if no items on the work list
411 * @proc: binder_proc associated with list
412 * @list: list to check
414 * Return: true if there are no items on list, else false
416 static bool binder_worklist_empty(struct binder_proc *proc,
417 struct list_head *list)
419 bool ret;
421 binder_inner_proc_lock(proc);
422 ret = binder_worklist_empty_ilocked(list);
423 binder_inner_proc_unlock(proc);
424 return ret;
428 * binder_enqueue_work_ilocked() - Add an item to the work list
429 * @work: struct binder_work to add to list
430 * @target_list: list to add work to
432 * Adds the work to the specified list. Asserts that work
433 * is not already on a list.
435 * Requires the proc->inner_lock to be held.
437 static void
438 binder_enqueue_work_ilocked(struct binder_work *work,
439 struct list_head *target_list)
441 BUG_ON(target_list == NULL);
442 BUG_ON(work->entry.next && !list_empty(&work->entry));
443 list_add_tail(&work->entry, target_list);
447 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
448 * @thread: thread to queue work to
449 * @work: struct binder_work to add to list
451 * Adds the work to the todo list of the thread. Doesn't set the process_todo
452 * flag, which means that (if it wasn't already set) the thread will go to
453 * sleep without handling this work when it calls read.
455 * Requires the proc->inner_lock to be held.
457 static void
458 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
459 struct binder_work *work)
461 WARN_ON(!list_empty(&thread->waiting_thread_node));
462 binder_enqueue_work_ilocked(work, &thread->todo);
466 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
467 * @thread: thread to queue work to
468 * @work: struct binder_work to add to list
470 * Adds the work to the todo list of the thread, and enables processing
471 * of the todo queue.
473 * Requires the proc->inner_lock to be held.
475 static void
476 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
477 struct binder_work *work)
479 WARN_ON(!list_empty(&thread->waiting_thread_node));
480 binder_enqueue_work_ilocked(work, &thread->todo);
482 /* (e)poll-based threads require an explicit wakeup signal when
483 * queuing their own work; they rely on these events to consume
484 * messages without I/O block. Without it, threads risk waiting
485 * indefinitely without handling the work.
487 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
488 thread->pid == current->pid && !thread->process_todo)
489 wake_up_interruptible_sync(&thread->wait);
491 thread->process_todo = true;
495 * binder_enqueue_thread_work() - Add an item to the thread work list
496 * @thread: thread to queue work to
497 * @work: struct binder_work to add to list
499 * Adds the work to the todo list of the thread, and enables processing
500 * of the todo queue.
502 static void
503 binder_enqueue_thread_work(struct binder_thread *thread,
504 struct binder_work *work)
506 binder_inner_proc_lock(thread->proc);
507 binder_enqueue_thread_work_ilocked(thread, work);
508 binder_inner_proc_unlock(thread->proc);
511 static void
512 binder_dequeue_work_ilocked(struct binder_work *work)
514 list_del_init(&work->entry);
518 * binder_dequeue_work() - Removes an item from the work list
519 * @proc: binder_proc associated with list
520 * @work: struct binder_work to remove from list
522 * Removes the specified work item from whatever list it is on.
523 * Can safely be called if work is not on any list.
525 static void
526 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
528 binder_inner_proc_lock(proc);
529 binder_dequeue_work_ilocked(work);
530 binder_inner_proc_unlock(proc);
533 static struct binder_work *binder_dequeue_work_head_ilocked(
534 struct list_head *list)
536 struct binder_work *w;
538 w = list_first_entry_or_null(list, struct binder_work, entry);
539 if (w)
540 list_del_init(&w->entry);
541 return w;
544 static void
545 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
546 static void binder_free_thread(struct binder_thread *thread);
547 static void binder_free_proc(struct binder_proc *proc);
548 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
550 static bool binder_has_work_ilocked(struct binder_thread *thread,
551 bool do_proc_work)
553 return thread->process_todo ||
554 thread->looper_need_return ||
555 (do_proc_work &&
556 !binder_worklist_empty_ilocked(&thread->proc->todo));
559 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
561 bool has_work;
563 binder_inner_proc_lock(thread->proc);
564 has_work = binder_has_work_ilocked(thread, do_proc_work);
565 binder_inner_proc_unlock(thread->proc);
567 return has_work;
570 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
572 return !thread->transaction_stack &&
573 binder_worklist_empty_ilocked(&thread->todo) &&
574 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
575 BINDER_LOOPER_STATE_REGISTERED));
578 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
579 bool sync)
581 struct rb_node *n;
582 struct binder_thread *thread;
584 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
585 thread = rb_entry(n, struct binder_thread, rb_node);
586 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
587 binder_available_for_proc_work_ilocked(thread)) {
588 if (sync)
589 wake_up_interruptible_sync(&thread->wait);
590 else
591 wake_up_interruptible(&thread->wait);
597 * binder_select_thread_ilocked() - selects a thread for doing proc work.
598 * @proc: process to select a thread from
600 * Note that calling this function moves the thread off the waiting_threads
601 * list, so it can only be woken up by the caller of this function, or a
602 * signal. Therefore, callers *should* always wake up the thread this function
603 * returns.
605 * Return: If there's a thread currently waiting for process work,
606 * returns that thread. Otherwise returns NULL.
608 static struct binder_thread *
609 binder_select_thread_ilocked(struct binder_proc *proc)
611 struct binder_thread *thread;
613 assert_spin_locked(&proc->inner_lock);
614 thread = list_first_entry_or_null(&proc->waiting_threads,
615 struct binder_thread,
616 waiting_thread_node);
618 if (thread)
619 list_del_init(&thread->waiting_thread_node);
621 return thread;
625 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
626 * @proc: process to wake up a thread in
627 * @thread: specific thread to wake-up (may be NULL)
628 * @sync: whether to do a synchronous wake-up
630 * This function wakes up a thread in the @proc process.
631 * The caller may provide a specific thread to wake-up in
632 * the @thread parameter. If @thread is NULL, this function
633 * will wake up threads that have called poll().
635 * Note that for this function to work as expected, callers
636 * should first call binder_select_thread() to find a thread
637 * to handle the work (if they don't have a thread already),
638 * and pass the result into the @thread parameter.
640 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
641 struct binder_thread *thread,
642 bool sync)
644 assert_spin_locked(&proc->inner_lock);
646 if (thread) {
647 if (sync)
648 wake_up_interruptible_sync(&thread->wait);
649 else
650 wake_up_interruptible(&thread->wait);
651 return;
654 /* Didn't find a thread waiting for proc work; this can happen
655 * in two scenarios:
656 * 1. All threads are busy handling transactions
657 * In that case, one of those threads should call back into
658 * the kernel driver soon and pick up this work.
659 * 2. Threads are using the (e)poll interface, in which case
660 * they may be blocked on the waitqueue without having been
661 * added to waiting_threads. For this case, we just iterate
662 * over all threads not handling transaction work, and
663 * wake them all up. We wake all because we don't know whether
664 * a thread that called into (e)poll is handling non-binder
665 * work currently.
667 binder_wakeup_poll_threads_ilocked(proc, sync);
670 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
672 struct binder_thread *thread = binder_select_thread_ilocked(proc);
674 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
677 static void binder_set_nice(long nice)
679 long min_nice;
681 if (can_nice(current, nice)) {
682 set_user_nice(current, nice);
683 return;
685 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
686 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
687 "%d: nice value %ld not allowed use %ld instead\n",
688 current->pid, nice, min_nice);
689 set_user_nice(current, min_nice);
690 if (min_nice <= MAX_NICE)
691 return;
692 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
695 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
696 binder_uintptr_t ptr)
698 struct rb_node *n = proc->nodes.rb_node;
699 struct binder_node *node;
701 assert_spin_locked(&proc->inner_lock);
703 while (n) {
704 node = rb_entry(n, struct binder_node, rb_node);
706 if (ptr < node->ptr)
707 n = n->rb_left;
708 else if (ptr > node->ptr)
709 n = n->rb_right;
710 else {
712 * take an implicit weak reference
713 * to ensure node stays alive until
714 * call to binder_put_node()
716 binder_inc_node_tmpref_ilocked(node);
717 return node;
720 return NULL;
723 static struct binder_node *binder_get_node(struct binder_proc *proc,
724 binder_uintptr_t ptr)
726 struct binder_node *node;
728 binder_inner_proc_lock(proc);
729 node = binder_get_node_ilocked(proc, ptr);
730 binder_inner_proc_unlock(proc);
731 return node;
734 static struct binder_node *binder_init_node_ilocked(
735 struct binder_proc *proc,
736 struct binder_node *new_node,
737 struct flat_binder_object *fp)
739 struct rb_node **p = &proc->nodes.rb_node;
740 struct rb_node *parent = NULL;
741 struct binder_node *node;
742 binder_uintptr_t ptr = fp ? fp->binder : 0;
743 binder_uintptr_t cookie = fp ? fp->cookie : 0;
744 __u32 flags = fp ? fp->flags : 0;
746 assert_spin_locked(&proc->inner_lock);
748 while (*p) {
750 parent = *p;
751 node = rb_entry(parent, struct binder_node, rb_node);
753 if (ptr < node->ptr)
754 p = &(*p)->rb_left;
755 else if (ptr > node->ptr)
756 p = &(*p)->rb_right;
757 else {
759 * A matching node is already in
760 * the rb tree. Abandon the init
761 * and return it.
763 binder_inc_node_tmpref_ilocked(node);
764 return node;
767 node = new_node;
768 binder_stats_created(BINDER_STAT_NODE);
769 node->tmp_refs++;
770 rb_link_node(&node->rb_node, parent, p);
771 rb_insert_color(&node->rb_node, &proc->nodes);
772 node->debug_id = atomic_inc_return(&binder_last_id);
773 node->proc = proc;
774 node->ptr = ptr;
775 node->cookie = cookie;
776 node->work.type = BINDER_WORK_NODE;
777 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
778 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
779 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
780 spin_lock_init(&node->lock);
781 INIT_LIST_HEAD(&node->work.entry);
782 INIT_LIST_HEAD(&node->async_todo);
783 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
784 "%d:%d node %d u%016llx c%016llx created\n",
785 proc->pid, current->pid, node->debug_id,
786 (u64)node->ptr, (u64)node->cookie);
788 return node;
791 static struct binder_node *binder_new_node(struct binder_proc *proc,
792 struct flat_binder_object *fp)
794 struct binder_node *node;
795 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
797 if (!new_node)
798 return NULL;
799 binder_inner_proc_lock(proc);
800 node = binder_init_node_ilocked(proc, new_node, fp);
801 binder_inner_proc_unlock(proc);
802 if (node != new_node)
804 * The node was already added by another thread
806 kfree(new_node);
808 return node;
811 static void binder_free_node(struct binder_node *node)
813 kfree(node);
814 binder_stats_deleted(BINDER_STAT_NODE);
817 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
818 int internal,
819 struct list_head *target_list)
821 struct binder_proc *proc = node->proc;
823 assert_spin_locked(&node->lock);
824 if (proc)
825 assert_spin_locked(&proc->inner_lock);
826 if (strong) {
827 if (internal) {
828 if (target_list == NULL &&
829 node->internal_strong_refs == 0 &&
830 !(node->proc &&
831 node == node->proc->context->binder_context_mgr_node &&
832 node->has_strong_ref)) {
833 pr_err("invalid inc strong node for %d\n",
834 node->debug_id);
835 return -EINVAL;
837 node->internal_strong_refs++;
838 } else
839 node->local_strong_refs++;
840 if (!node->has_strong_ref && target_list) {
841 struct binder_thread *thread = container_of(target_list,
842 struct binder_thread, todo);
843 binder_dequeue_work_ilocked(&node->work);
844 BUG_ON(&thread->todo != target_list);
845 binder_enqueue_deferred_thread_work_ilocked(thread,
846 &node->work);
848 } else {
849 if (!internal)
850 node->local_weak_refs++;
851 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
852 if (target_list == NULL) {
853 pr_err("invalid inc weak node for %d\n",
854 node->debug_id);
855 return -EINVAL;
858 * See comment above
860 binder_enqueue_work_ilocked(&node->work, target_list);
863 return 0;
866 static int binder_inc_node(struct binder_node *node, int strong, int internal,
867 struct list_head *target_list)
869 int ret;
871 binder_node_inner_lock(node);
872 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
873 binder_node_inner_unlock(node);
875 return ret;
878 static bool binder_dec_node_nilocked(struct binder_node *node,
879 int strong, int internal)
881 struct binder_proc *proc = node->proc;
883 assert_spin_locked(&node->lock);
884 if (proc)
885 assert_spin_locked(&proc->inner_lock);
886 if (strong) {
887 if (internal)
888 node->internal_strong_refs--;
889 else
890 node->local_strong_refs--;
891 if (node->local_strong_refs || node->internal_strong_refs)
892 return false;
893 } else {
894 if (!internal)
895 node->local_weak_refs--;
896 if (node->local_weak_refs || node->tmp_refs ||
897 !hlist_empty(&node->refs))
898 return false;
901 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
902 if (list_empty(&node->work.entry)) {
903 binder_enqueue_work_ilocked(&node->work, &proc->todo);
904 binder_wakeup_proc_ilocked(proc);
906 } else {
907 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
908 !node->local_weak_refs && !node->tmp_refs) {
909 if (proc) {
910 binder_dequeue_work_ilocked(&node->work);
911 rb_erase(&node->rb_node, &proc->nodes);
912 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
913 "refless node %d deleted\n",
914 node->debug_id);
915 } else {
916 BUG_ON(!list_empty(&node->work.entry));
917 spin_lock(&binder_dead_nodes_lock);
919 * tmp_refs could have changed so
920 * check it again
922 if (node->tmp_refs) {
923 spin_unlock(&binder_dead_nodes_lock);
924 return false;
926 hlist_del(&node->dead_node);
927 spin_unlock(&binder_dead_nodes_lock);
928 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
929 "dead node %d deleted\n",
930 node->debug_id);
932 return true;
935 return false;
938 static void binder_dec_node(struct binder_node *node, int strong, int internal)
940 bool free_node;
942 binder_node_inner_lock(node);
943 free_node = binder_dec_node_nilocked(node, strong, internal);
944 binder_node_inner_unlock(node);
945 if (free_node)
946 binder_free_node(node);
949 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
952 * No call to binder_inc_node() is needed since we
953 * don't need to inform userspace of any changes to
954 * tmp_refs
956 node->tmp_refs++;
960 * binder_inc_node_tmpref() - take a temporary reference on node
961 * @node: node to reference
963 * Take reference on node to prevent the node from being freed
964 * while referenced only by a local variable. The inner lock is
965 * needed to serialize with the node work on the queue (which
966 * isn't needed after the node is dead). If the node is dead
967 * (node->proc is NULL), use binder_dead_nodes_lock to protect
968 * node->tmp_refs against dead-node-only cases where the node
969 * lock cannot be acquired (eg traversing the dead node list to
970 * print nodes)
972 static void binder_inc_node_tmpref(struct binder_node *node)
974 binder_node_lock(node);
975 if (node->proc)
976 binder_inner_proc_lock(node->proc);
977 else
978 spin_lock(&binder_dead_nodes_lock);
979 binder_inc_node_tmpref_ilocked(node);
980 if (node->proc)
981 binder_inner_proc_unlock(node->proc);
982 else
983 spin_unlock(&binder_dead_nodes_lock);
984 binder_node_unlock(node);
988 * binder_dec_node_tmpref() - remove a temporary reference on node
989 * @node: node to reference
991 * Release temporary reference on node taken via binder_inc_node_tmpref()
993 static void binder_dec_node_tmpref(struct binder_node *node)
995 bool free_node;
997 binder_node_inner_lock(node);
998 if (!node->proc)
999 spin_lock(&binder_dead_nodes_lock);
1000 else
1001 __acquire(&binder_dead_nodes_lock);
1002 node->tmp_refs--;
1003 BUG_ON(node->tmp_refs < 0);
1004 if (!node->proc)
1005 spin_unlock(&binder_dead_nodes_lock);
1006 else
1007 __release(&binder_dead_nodes_lock);
1009 * Call binder_dec_node() to check if all refcounts are 0
1010 * and cleanup is needed. Calling with strong=0 and internal=1
1011 * causes no actual reference to be released in binder_dec_node().
1012 * If that changes, a change is needed here too.
1014 free_node = binder_dec_node_nilocked(node, 0, 1);
1015 binder_node_inner_unlock(node);
1016 if (free_node)
1017 binder_free_node(node);
1020 static void binder_put_node(struct binder_node *node)
1022 binder_dec_node_tmpref(node);
1025 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1026 u32 desc, bool need_strong_ref)
1028 struct rb_node *n = proc->refs_by_desc.rb_node;
1029 struct binder_ref *ref;
1031 while (n) {
1032 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1034 if (desc < ref->data.desc) {
1035 n = n->rb_left;
1036 } else if (desc > ref->data.desc) {
1037 n = n->rb_right;
1038 } else if (need_strong_ref && !ref->data.strong) {
1039 binder_user_error("tried to use weak ref as strong ref\n");
1040 return NULL;
1041 } else {
1042 return ref;
1045 return NULL;
1049 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1050 * @proc: binder_proc that owns the ref
1051 * @node: binder_node of target
1052 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1054 * Look up the ref for the given node and return it if it exists
1056 * If it doesn't exist and the caller provides a newly allocated
1057 * ref, initialize the fields of the newly allocated ref and insert
1058 * into the given proc rb_trees and node refs list.
1060 * Return: the ref for node. It is possible that another thread
1061 * allocated/initialized the ref first in which case the
1062 * returned ref would be different than the passed-in
1063 * new_ref. new_ref must be kfree'd by the caller in
1064 * this case.
1066 static struct binder_ref *binder_get_ref_for_node_olocked(
1067 struct binder_proc *proc,
1068 struct binder_node *node,
1069 struct binder_ref *new_ref)
1071 struct binder_context *context = proc->context;
1072 struct rb_node **p = &proc->refs_by_node.rb_node;
1073 struct rb_node *parent = NULL;
1074 struct binder_ref *ref;
1075 struct rb_node *n;
1077 while (*p) {
1078 parent = *p;
1079 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1081 if (node < ref->node)
1082 p = &(*p)->rb_left;
1083 else if (node > ref->node)
1084 p = &(*p)->rb_right;
1085 else
1086 return ref;
1088 if (!new_ref)
1089 return NULL;
1091 binder_stats_created(BINDER_STAT_REF);
1092 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1093 new_ref->proc = proc;
1094 new_ref->node = node;
1095 rb_link_node(&new_ref->rb_node_node, parent, p);
1096 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1098 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1099 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1100 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1101 if (ref->data.desc > new_ref->data.desc)
1102 break;
1103 new_ref->data.desc = ref->data.desc + 1;
1106 p = &proc->refs_by_desc.rb_node;
1107 while (*p) {
1108 parent = *p;
1109 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1111 if (new_ref->data.desc < ref->data.desc)
1112 p = &(*p)->rb_left;
1113 else if (new_ref->data.desc > ref->data.desc)
1114 p = &(*p)->rb_right;
1115 else
1116 BUG();
1118 rb_link_node(&new_ref->rb_node_desc, parent, p);
1119 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1121 binder_node_lock(node);
1122 hlist_add_head(&new_ref->node_entry, &node->refs);
1124 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1125 "%d new ref %d desc %d for node %d\n",
1126 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1127 node->debug_id);
1128 binder_node_unlock(node);
1129 return new_ref;
1132 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1134 bool delete_node = false;
1136 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1137 "%d delete ref %d desc %d for node %d\n",
1138 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1139 ref->node->debug_id);
1141 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1142 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1144 binder_node_inner_lock(ref->node);
1145 if (ref->data.strong)
1146 binder_dec_node_nilocked(ref->node, 1, 1);
1148 hlist_del(&ref->node_entry);
1149 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1150 binder_node_inner_unlock(ref->node);
1152 * Clear ref->node unless we want the caller to free the node
1154 if (!delete_node) {
1156 * The caller uses ref->node to determine
1157 * whether the node needs to be freed. Clear
1158 * it since the node is still alive.
1160 ref->node = NULL;
1163 if (ref->death) {
1164 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1165 "%d delete ref %d desc %d has death notification\n",
1166 ref->proc->pid, ref->data.debug_id,
1167 ref->data.desc);
1168 binder_dequeue_work(ref->proc, &ref->death->work);
1169 binder_stats_deleted(BINDER_STAT_DEATH);
1171 binder_stats_deleted(BINDER_STAT_REF);
1175 * binder_inc_ref_olocked() - increment the ref for given handle
1176 * @ref: ref to be incremented
1177 * @strong: if true, strong increment, else weak
1178 * @target_list: list to queue node work on
1180 * Increment the ref. @ref->proc->outer_lock must be held on entry
1182 * Return: 0, if successful, else errno
1184 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1185 struct list_head *target_list)
1187 int ret;
1189 if (strong) {
1190 if (ref->data.strong == 0) {
1191 ret = binder_inc_node(ref->node, 1, 1, target_list);
1192 if (ret)
1193 return ret;
1195 ref->data.strong++;
1196 } else {
1197 if (ref->data.weak == 0) {
1198 ret = binder_inc_node(ref->node, 0, 1, target_list);
1199 if (ret)
1200 return ret;
1202 ref->data.weak++;
1204 return 0;
1208 * binder_dec_ref_olocked() - dec the ref for given handle
1209 * @ref: ref to be decremented
1210 * @strong: if true, strong decrement, else weak
1212 * Decrement the ref.
1214 * Return: %true if ref is cleaned up and ready to be freed.
1216 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1218 if (strong) {
1219 if (ref->data.strong == 0) {
1220 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1221 ref->proc->pid, ref->data.debug_id,
1222 ref->data.desc, ref->data.strong,
1223 ref->data.weak);
1224 return false;
1226 ref->data.strong--;
1227 if (ref->data.strong == 0)
1228 binder_dec_node(ref->node, strong, 1);
1229 } else {
1230 if (ref->data.weak == 0) {
1231 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1232 ref->proc->pid, ref->data.debug_id,
1233 ref->data.desc, ref->data.strong,
1234 ref->data.weak);
1235 return false;
1237 ref->data.weak--;
1239 if (ref->data.strong == 0 && ref->data.weak == 0) {
1240 binder_cleanup_ref_olocked(ref);
1241 return true;
1243 return false;
1247 * binder_get_node_from_ref() - get the node from the given proc/desc
1248 * @proc: proc containing the ref
1249 * @desc: the handle associated with the ref
1250 * @need_strong_ref: if true, only return node if ref is strong
1251 * @rdata: the id/refcount data for the ref
1253 * Given a proc and ref handle, return the associated binder_node
1255 * Return: a binder_node or NULL if not found or not strong when strong required
1257 static struct binder_node *binder_get_node_from_ref(
1258 struct binder_proc *proc,
1259 u32 desc, bool need_strong_ref,
1260 struct binder_ref_data *rdata)
1262 struct binder_node *node;
1263 struct binder_ref *ref;
1265 binder_proc_lock(proc);
1266 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1267 if (!ref)
1268 goto err_no_ref;
1269 node = ref->node;
1271 * Take an implicit reference on the node to ensure
1272 * it stays alive until the call to binder_put_node()
1274 binder_inc_node_tmpref(node);
1275 if (rdata)
1276 *rdata = ref->data;
1277 binder_proc_unlock(proc);
1279 return node;
1281 err_no_ref:
1282 binder_proc_unlock(proc);
1283 return NULL;
1287 * binder_free_ref() - free the binder_ref
1288 * @ref: ref to free
1290 * Free the binder_ref. Free the binder_node indicated by ref->node
1291 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1293 static void binder_free_ref(struct binder_ref *ref)
1295 if (ref->node)
1296 binder_free_node(ref->node);
1297 kfree(ref->death);
1298 kfree(ref);
1302 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1303 * @proc: proc containing the ref
1304 * @desc: the handle associated with the ref
1305 * @increment: true=inc reference, false=dec reference
1306 * @strong: true=strong reference, false=weak reference
1307 * @rdata: the id/refcount data for the ref
1309 * Given a proc and ref handle, increment or decrement the ref
1310 * according to "increment" arg.
1312 * Return: 0 if successful, else errno
1314 static int binder_update_ref_for_handle(struct binder_proc *proc,
1315 uint32_t desc, bool increment, bool strong,
1316 struct binder_ref_data *rdata)
1318 int ret = 0;
1319 struct binder_ref *ref;
1320 bool delete_ref = false;
1322 binder_proc_lock(proc);
1323 ref = binder_get_ref_olocked(proc, desc, strong);
1324 if (!ref) {
1325 ret = -EINVAL;
1326 goto err_no_ref;
1328 if (increment)
1329 ret = binder_inc_ref_olocked(ref, strong, NULL);
1330 else
1331 delete_ref = binder_dec_ref_olocked(ref, strong);
1333 if (rdata)
1334 *rdata = ref->data;
1335 binder_proc_unlock(proc);
1337 if (delete_ref)
1338 binder_free_ref(ref);
1339 return ret;
1341 err_no_ref:
1342 binder_proc_unlock(proc);
1343 return ret;
1347 * binder_dec_ref_for_handle() - dec the ref for given handle
1348 * @proc: proc containing the ref
1349 * @desc: the handle associated with the ref
1350 * @strong: true=strong reference, false=weak reference
1351 * @rdata: the id/refcount data for the ref
1353 * Just calls binder_update_ref_for_handle() to decrement the ref.
1355 * Return: 0 if successful, else errno
1357 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1358 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1360 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1365 * binder_inc_ref_for_node() - increment the ref for given proc/node
1366 * @proc: proc containing the ref
1367 * @node: target node
1368 * @strong: true=strong reference, false=weak reference
1369 * @target_list: worklist to use if node is incremented
1370 * @rdata: the id/refcount data for the ref
1372 * Given a proc and node, increment the ref. Create the ref if it
1373 * doesn't already exist
1375 * Return: 0 if successful, else errno
1377 static int binder_inc_ref_for_node(struct binder_proc *proc,
1378 struct binder_node *node,
1379 bool strong,
1380 struct list_head *target_list,
1381 struct binder_ref_data *rdata)
1383 struct binder_ref *ref;
1384 struct binder_ref *new_ref = NULL;
1385 int ret = 0;
1387 binder_proc_lock(proc);
1388 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1389 if (!ref) {
1390 binder_proc_unlock(proc);
1391 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1392 if (!new_ref)
1393 return -ENOMEM;
1394 binder_proc_lock(proc);
1395 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1397 ret = binder_inc_ref_olocked(ref, strong, target_list);
1398 *rdata = ref->data;
1399 if (ret && ref == new_ref) {
1401 * Cleanup the failed reference here as the target
1402 * could now be dead and have already released its
1403 * references by now. Calling on the new reference
1404 * with strong=0 and a tmp_refs will not decrement
1405 * the node. The new_ref gets kfree'd below.
1407 binder_cleanup_ref_olocked(new_ref);
1408 ref = NULL;
1411 binder_proc_unlock(proc);
1412 if (new_ref && ref != new_ref)
1414 * Another thread created the ref first so
1415 * free the one we allocated
1417 kfree(new_ref);
1418 return ret;
1421 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1422 struct binder_transaction *t)
1424 BUG_ON(!target_thread);
1425 assert_spin_locked(&target_thread->proc->inner_lock);
1426 BUG_ON(target_thread->transaction_stack != t);
1427 BUG_ON(target_thread->transaction_stack->from != target_thread);
1428 target_thread->transaction_stack =
1429 target_thread->transaction_stack->from_parent;
1430 t->from = NULL;
1434 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1435 * @thread: thread to decrement
1437 * A thread needs to be kept alive while being used to create or
1438 * handle a transaction. binder_get_txn_from() is used to safely
1439 * extract t->from from a binder_transaction and keep the thread
1440 * indicated by t->from from being freed. When done with that
1441 * binder_thread, this function is called to decrement the
1442 * tmp_ref and free if appropriate (thread has been released
1443 * and no transaction being processed by the driver)
1445 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1448 * atomic is used to protect the counter value while
1449 * it cannot reach zero or thread->is_dead is false
1451 binder_inner_proc_lock(thread->proc);
1452 atomic_dec(&thread->tmp_ref);
1453 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1454 binder_inner_proc_unlock(thread->proc);
1455 binder_free_thread(thread);
1456 return;
1458 binder_inner_proc_unlock(thread->proc);
1462 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1463 * @proc: proc to decrement
1465 * A binder_proc needs to be kept alive while being used to create or
1466 * handle a transaction. proc->tmp_ref is incremented when
1467 * creating a new transaction or the binder_proc is currently in-use
1468 * by threads that are being released. When done with the binder_proc,
1469 * this function is called to decrement the counter and free the
1470 * proc if appropriate (proc has been released, all threads have
1471 * been released and not currenly in-use to process a transaction).
1473 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1475 binder_inner_proc_lock(proc);
1476 proc->tmp_ref--;
1477 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1478 !proc->tmp_ref) {
1479 binder_inner_proc_unlock(proc);
1480 binder_free_proc(proc);
1481 return;
1483 binder_inner_proc_unlock(proc);
1487 * binder_get_txn_from() - safely extract the "from" thread in transaction
1488 * @t: binder transaction for t->from
1490 * Atomically return the "from" thread and increment the tmp_ref
1491 * count for the thread to ensure it stays alive until
1492 * binder_thread_dec_tmpref() is called.
1494 * Return: the value of t->from
1496 static struct binder_thread *binder_get_txn_from(
1497 struct binder_transaction *t)
1499 struct binder_thread *from;
1501 spin_lock(&t->lock);
1502 from = t->from;
1503 if (from)
1504 atomic_inc(&from->tmp_ref);
1505 spin_unlock(&t->lock);
1506 return from;
1510 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1511 * @t: binder transaction for t->from
1513 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1514 * to guarantee that the thread cannot be released while operating on it.
1515 * The caller must call binder_inner_proc_unlock() to release the inner lock
1516 * as well as call binder_dec_thread_txn() to release the reference.
1518 * Return: the value of t->from
1520 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1521 struct binder_transaction *t)
1522 __acquires(&t->from->proc->inner_lock)
1524 struct binder_thread *from;
1526 from = binder_get_txn_from(t);
1527 if (!from) {
1528 __acquire(&from->proc->inner_lock);
1529 return NULL;
1531 binder_inner_proc_lock(from->proc);
1532 if (t->from) {
1533 BUG_ON(from != t->from);
1534 return from;
1536 binder_inner_proc_unlock(from->proc);
1537 __acquire(&from->proc->inner_lock);
1538 binder_thread_dec_tmpref(from);
1539 return NULL;
1543 * binder_free_txn_fixups() - free unprocessed fd fixups
1544 * @t: binder transaction for t->from
1546 * If the transaction is being torn down prior to being
1547 * processed by the target process, free all of the
1548 * fd fixups and fput the file structs. It is safe to
1549 * call this function after the fixups have been
1550 * processed -- in that case, the list will be empty.
1552 static void binder_free_txn_fixups(struct binder_transaction *t)
1554 struct binder_txn_fd_fixup *fixup, *tmp;
1556 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1557 fput(fixup->file);
1558 if (fixup->target_fd >= 0)
1559 put_unused_fd(fixup->target_fd);
1560 list_del(&fixup->fixup_entry);
1561 kfree(fixup);
1565 static void binder_txn_latency_free(struct binder_transaction *t)
1567 int from_proc, from_thread, to_proc, to_thread;
1569 spin_lock(&t->lock);
1570 from_proc = t->from ? t->from->proc->pid : 0;
1571 from_thread = t->from ? t->from->pid : 0;
1572 to_proc = t->to_proc ? t->to_proc->pid : 0;
1573 to_thread = t->to_thread ? t->to_thread->pid : 0;
1574 spin_unlock(&t->lock);
1576 trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
1579 static void binder_free_transaction(struct binder_transaction *t)
1581 struct binder_proc *target_proc = t->to_proc;
1583 if (target_proc) {
1584 binder_inner_proc_lock(target_proc);
1585 target_proc->outstanding_txns--;
1586 if (target_proc->outstanding_txns < 0)
1587 pr_warn("%s: Unexpected outstanding_txns %d\n",
1588 __func__, target_proc->outstanding_txns);
1589 if (!target_proc->outstanding_txns && target_proc->is_frozen)
1590 wake_up_interruptible_all(&target_proc->freeze_wait);
1591 if (t->buffer)
1592 t->buffer->transaction = NULL;
1593 binder_inner_proc_unlock(target_proc);
1595 if (trace_binder_txn_latency_free_enabled())
1596 binder_txn_latency_free(t);
1598 * If the transaction has no target_proc, then
1599 * t->buffer->transaction has already been cleared.
1601 binder_free_txn_fixups(t);
1602 kfree(t);
1603 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1606 static void binder_send_failed_reply(struct binder_transaction *t,
1607 uint32_t error_code)
1609 struct binder_thread *target_thread;
1610 struct binder_transaction *next;
1612 BUG_ON(t->flags & TF_ONE_WAY);
1613 while (1) {
1614 target_thread = binder_get_txn_from_and_acq_inner(t);
1615 if (target_thread) {
1616 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1617 "send failed reply for transaction %d to %d:%d\n",
1618 t->debug_id,
1619 target_thread->proc->pid,
1620 target_thread->pid);
1622 binder_pop_transaction_ilocked(target_thread, t);
1623 if (target_thread->reply_error.cmd == BR_OK) {
1624 target_thread->reply_error.cmd = error_code;
1625 binder_enqueue_thread_work_ilocked(
1626 target_thread,
1627 &target_thread->reply_error.work);
1628 wake_up_interruptible(&target_thread->wait);
1629 } else {
1631 * Cannot get here for normal operation, but
1632 * we can if multiple synchronous transactions
1633 * are sent without blocking for responses.
1634 * Just ignore the 2nd error in this case.
1636 pr_warn("Unexpected reply error: %u\n",
1637 target_thread->reply_error.cmd);
1639 binder_inner_proc_unlock(target_thread->proc);
1640 binder_thread_dec_tmpref(target_thread);
1641 binder_free_transaction(t);
1642 return;
1644 __release(&target_thread->proc->inner_lock);
1645 next = t->from_parent;
1647 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1648 "send failed reply for transaction %d, target dead\n",
1649 t->debug_id);
1651 binder_free_transaction(t);
1652 if (next == NULL) {
1653 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1654 "reply failed, no target thread at root\n");
1655 return;
1657 t = next;
1658 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1659 "reply failed, no target thread -- retry %d\n",
1660 t->debug_id);
1665 * binder_cleanup_transaction() - cleans up undelivered transaction
1666 * @t: transaction that needs to be cleaned up
1667 * @reason: reason the transaction wasn't delivered
1668 * @error_code: error to return to caller (if synchronous call)
1670 static void binder_cleanup_transaction(struct binder_transaction *t,
1671 const char *reason,
1672 uint32_t error_code)
1674 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1675 binder_send_failed_reply(t, error_code);
1676 } else {
1677 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1678 "undelivered transaction %d, %s\n",
1679 t->debug_id, reason);
1680 binder_free_transaction(t);
1685 * binder_get_object() - gets object and checks for valid metadata
1686 * @proc: binder_proc owning the buffer
1687 * @u: sender's user pointer to base of buffer
1688 * @buffer: binder_buffer that we're parsing.
1689 * @offset: offset in the @buffer at which to validate an object.
1690 * @object: struct binder_object to read into
1692 * Copy the binder object at the given offset into @object. If @u is
1693 * provided then the copy is from the sender's buffer. If not, then
1694 * it is copied from the target's @buffer.
1696 * Return: If there's a valid metadata object at @offset, the
1697 * size of that object. Otherwise, it returns zero. The object
1698 * is read into the struct binder_object pointed to by @object.
1700 static size_t binder_get_object(struct binder_proc *proc,
1701 const void __user *u,
1702 struct binder_buffer *buffer,
1703 unsigned long offset,
1704 struct binder_object *object)
1706 size_t read_size;
1707 struct binder_object_header *hdr;
1708 size_t object_size = 0;
1710 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1711 if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
1712 !IS_ALIGNED(offset, sizeof(u32)))
1713 return 0;
1715 if (u) {
1716 if (copy_from_user(object, u + offset, read_size))
1717 return 0;
1718 } else {
1719 if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1720 offset, read_size))
1721 return 0;
1724 /* Ok, now see if we read a complete object. */
1725 hdr = &object->hdr;
1726 switch (hdr->type) {
1727 case BINDER_TYPE_BINDER:
1728 case BINDER_TYPE_WEAK_BINDER:
1729 case BINDER_TYPE_HANDLE:
1730 case BINDER_TYPE_WEAK_HANDLE:
1731 object_size = sizeof(struct flat_binder_object);
1732 break;
1733 case BINDER_TYPE_FD:
1734 object_size = sizeof(struct binder_fd_object);
1735 break;
1736 case BINDER_TYPE_PTR:
1737 object_size = sizeof(struct binder_buffer_object);
1738 break;
1739 case BINDER_TYPE_FDA:
1740 object_size = sizeof(struct binder_fd_array_object);
1741 break;
1742 default:
1743 return 0;
1745 if (offset <= buffer->data_size - object_size &&
1746 buffer->data_size >= object_size)
1747 return object_size;
1748 else
1749 return 0;
1753 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1754 * @proc: binder_proc owning the buffer
1755 * @b: binder_buffer containing the object
1756 * @object: struct binder_object to read into
1757 * @index: index in offset array at which the binder_buffer_object is
1758 * located
1759 * @start_offset: points to the start of the offset array
1760 * @object_offsetp: offset of @object read from @b
1761 * @num_valid: the number of valid offsets in the offset array
1763 * Return: If @index is within the valid range of the offset array
1764 * described by @start and @num_valid, and if there's a valid
1765 * binder_buffer_object at the offset found in index @index
1766 * of the offset array, that object is returned. Otherwise,
1767 * %NULL is returned.
1768 * Note that the offset found in index @index itself is not
1769 * verified; this function assumes that @num_valid elements
1770 * from @start were previously verified to have valid offsets.
1771 * If @object_offsetp is non-NULL, then the offset within
1772 * @b is written to it.
1774 static struct binder_buffer_object *binder_validate_ptr(
1775 struct binder_proc *proc,
1776 struct binder_buffer *b,
1777 struct binder_object *object,
1778 binder_size_t index,
1779 binder_size_t start_offset,
1780 binder_size_t *object_offsetp,
1781 binder_size_t num_valid)
1783 size_t object_size;
1784 binder_size_t object_offset;
1785 unsigned long buffer_offset;
1787 if (index >= num_valid)
1788 return NULL;
1790 buffer_offset = start_offset + sizeof(binder_size_t) * index;
1791 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1792 b, buffer_offset,
1793 sizeof(object_offset)))
1794 return NULL;
1795 object_size = binder_get_object(proc, NULL, b, object_offset, object);
1796 if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
1797 return NULL;
1798 if (object_offsetp)
1799 *object_offsetp = object_offset;
1801 return &object->bbo;
1805 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1806 * @proc: binder_proc owning the buffer
1807 * @b: transaction buffer
1808 * @objects_start_offset: offset to start of objects buffer
1809 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up
1810 * @fixup_offset: start offset in @buffer to fix up
1811 * @last_obj_offset: offset to last binder_buffer_object that we fixed
1812 * @last_min_offset: minimum fixup offset in object at @last_obj_offset
1814 * Return: %true if a fixup in buffer @buffer at offset @offset is
1815 * allowed.
1817 * For safety reasons, we only allow fixups inside a buffer to happen
1818 * at increasing offsets; additionally, we only allow fixup on the last
1819 * buffer object that was verified, or one of its parents.
1821 * Example of what is allowed:
1824 * B (parent = A, offset = 0)
1825 * C (parent = A, offset = 16)
1826 * D (parent = C, offset = 0)
1827 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1829 * Examples of what is not allowed:
1831 * Decreasing offsets within the same parent:
1833 * C (parent = A, offset = 16)
1834 * B (parent = A, offset = 0) // decreasing offset within A
1836 * Referring to a parent that wasn't the last object or any of its parents:
1838 * B (parent = A, offset = 0)
1839 * C (parent = A, offset = 0)
1840 * C (parent = A, offset = 16)
1841 * D (parent = B, offset = 0) // B is not A or any of A's parents
1843 static bool binder_validate_fixup(struct binder_proc *proc,
1844 struct binder_buffer *b,
1845 binder_size_t objects_start_offset,
1846 binder_size_t buffer_obj_offset,
1847 binder_size_t fixup_offset,
1848 binder_size_t last_obj_offset,
1849 binder_size_t last_min_offset)
1851 if (!last_obj_offset) {
1852 /* Nothing to fix up in */
1853 return false;
1856 while (last_obj_offset != buffer_obj_offset) {
1857 unsigned long buffer_offset;
1858 struct binder_object last_object;
1859 struct binder_buffer_object *last_bbo;
1860 size_t object_size = binder_get_object(proc, NULL, b,
1861 last_obj_offset,
1862 &last_object);
1863 if (object_size != sizeof(*last_bbo))
1864 return false;
1866 last_bbo = &last_object.bbo;
1868 * Safe to retrieve the parent of last_obj, since it
1869 * was already previously verified by the driver.
1871 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1872 return false;
1873 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
1874 buffer_offset = objects_start_offset +
1875 sizeof(binder_size_t) * last_bbo->parent;
1876 if (binder_alloc_copy_from_buffer(&proc->alloc,
1877 &last_obj_offset,
1878 b, buffer_offset,
1879 sizeof(last_obj_offset)))
1880 return false;
1882 return (fixup_offset >= last_min_offset);
1886 * struct binder_task_work_cb - for deferred close
1888 * @twork: callback_head for task work
1889 * @fd: fd to close
1891 * Structure to pass task work to be handled after
1892 * returning from binder_ioctl() via task_work_add().
1894 struct binder_task_work_cb {
1895 struct callback_head twork;
1896 struct file *file;
1900 * binder_do_fd_close() - close list of file descriptors
1901 * @twork: callback head for task work
1903 * It is not safe to call ksys_close() during the binder_ioctl()
1904 * function if there is a chance that binder's own file descriptor
1905 * might be closed. This is to meet the requirements for using
1906 * fdget() (see comments for __fget_light()). Therefore use
1907 * task_work_add() to schedule the close operation once we have
1908 * returned from binder_ioctl(). This function is a callback
1909 * for that mechanism and does the actual ksys_close() on the
1910 * given file descriptor.
1912 static void binder_do_fd_close(struct callback_head *twork)
1914 struct binder_task_work_cb *twcb = container_of(twork,
1915 struct binder_task_work_cb, twork);
1917 fput(twcb->file);
1918 kfree(twcb);
1922 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
1923 * @fd: file-descriptor to close
1925 * See comments in binder_do_fd_close(). This function is used to schedule
1926 * a file-descriptor to be closed after returning from binder_ioctl().
1928 static void binder_deferred_fd_close(int fd)
1930 struct binder_task_work_cb *twcb;
1932 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
1933 if (!twcb)
1934 return;
1935 init_task_work(&twcb->twork, binder_do_fd_close);
1936 twcb->file = file_close_fd(fd);
1937 if (twcb->file) {
1938 // pin it until binder_do_fd_close(); see comments there
1939 get_file(twcb->file);
1940 filp_close(twcb->file, current->files);
1941 task_work_add(current, &twcb->twork, TWA_RESUME);
1942 } else {
1943 kfree(twcb);
1947 static void binder_transaction_buffer_release(struct binder_proc *proc,
1948 struct binder_thread *thread,
1949 struct binder_buffer *buffer,
1950 binder_size_t off_end_offset,
1951 bool is_failure)
1953 int debug_id = buffer->debug_id;
1954 binder_size_t off_start_offset, buffer_offset;
1956 binder_debug(BINDER_DEBUG_TRANSACTION,
1957 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
1958 proc->pid, buffer->debug_id,
1959 buffer->data_size, buffer->offsets_size,
1960 (unsigned long long)off_end_offset);
1962 if (buffer->target_node)
1963 binder_dec_node(buffer->target_node, 1, 0);
1965 off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
1967 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
1968 buffer_offset += sizeof(binder_size_t)) {
1969 struct binder_object_header *hdr;
1970 size_t object_size = 0;
1971 struct binder_object object;
1972 binder_size_t object_offset;
1974 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1975 buffer, buffer_offset,
1976 sizeof(object_offset)))
1977 object_size = binder_get_object(proc, NULL, buffer,
1978 object_offset, &object);
1979 if (object_size == 0) {
1980 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
1981 debug_id, (u64)object_offset, buffer->data_size);
1982 continue;
1984 hdr = &object.hdr;
1985 switch (hdr->type) {
1986 case BINDER_TYPE_BINDER:
1987 case BINDER_TYPE_WEAK_BINDER: {
1988 struct flat_binder_object *fp;
1989 struct binder_node *node;
1991 fp = to_flat_binder_object(hdr);
1992 node = binder_get_node(proc, fp->binder);
1993 if (node == NULL) {
1994 pr_err("transaction release %d bad node %016llx\n",
1995 debug_id, (u64)fp->binder);
1996 break;
1998 binder_debug(BINDER_DEBUG_TRANSACTION,
1999 " node %d u%016llx\n",
2000 node->debug_id, (u64)node->ptr);
2001 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2003 binder_put_node(node);
2004 } break;
2005 case BINDER_TYPE_HANDLE:
2006 case BINDER_TYPE_WEAK_HANDLE: {
2007 struct flat_binder_object *fp;
2008 struct binder_ref_data rdata;
2009 int ret;
2011 fp = to_flat_binder_object(hdr);
2012 ret = binder_dec_ref_for_handle(proc, fp->handle,
2013 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2015 if (ret) {
2016 pr_err("transaction release %d bad handle %d, ret = %d\n",
2017 debug_id, fp->handle, ret);
2018 break;
2020 binder_debug(BINDER_DEBUG_TRANSACTION,
2021 " ref %d desc %d\n",
2022 rdata.debug_id, rdata.desc);
2023 } break;
2025 case BINDER_TYPE_FD: {
2027 * No need to close the file here since user-space
2028 * closes it for successfully delivered
2029 * transactions. For transactions that weren't
2030 * delivered, the new fd was never allocated so
2031 * there is no need to close and the fput on the
2032 * file is done when the transaction is torn
2033 * down.
2035 } break;
2036 case BINDER_TYPE_PTR:
2038 * Nothing to do here, this will get cleaned up when the
2039 * transaction buffer gets freed
2041 break;
2042 case BINDER_TYPE_FDA: {
2043 struct binder_fd_array_object *fda;
2044 struct binder_buffer_object *parent;
2045 struct binder_object ptr_object;
2046 binder_size_t fda_offset;
2047 size_t fd_index;
2048 binder_size_t fd_buf_size;
2049 binder_size_t num_valid;
2051 if (is_failure) {
2053 * The fd fixups have not been applied so no
2054 * fds need to be closed.
2056 continue;
2059 num_valid = (buffer_offset - off_start_offset) /
2060 sizeof(binder_size_t);
2061 fda = to_binder_fd_array_object(hdr);
2062 parent = binder_validate_ptr(proc, buffer, &ptr_object,
2063 fda->parent,
2064 off_start_offset,
2065 NULL,
2066 num_valid);
2067 if (!parent) {
2068 pr_err("transaction release %d bad parent offset\n",
2069 debug_id);
2070 continue;
2072 fd_buf_size = sizeof(u32) * fda->num_fds;
2073 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2074 pr_err("transaction release %d invalid number of fds (%lld)\n",
2075 debug_id, (u64)fda->num_fds);
2076 continue;
2078 if (fd_buf_size > parent->length ||
2079 fda->parent_offset > parent->length - fd_buf_size) {
2080 /* No space for all file descriptors here. */
2081 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2082 debug_id, (u64)fda->num_fds);
2083 continue;
2086 * the source data for binder_buffer_object is visible
2087 * to user-space and the @buffer element is the user
2088 * pointer to the buffer_object containing the fd_array.
2089 * Convert the address to an offset relative to
2090 * the base of the transaction buffer.
2092 fda_offset = parent->buffer - buffer->user_data +
2093 fda->parent_offset;
2094 for (fd_index = 0; fd_index < fda->num_fds;
2095 fd_index++) {
2096 u32 fd;
2097 int err;
2098 binder_size_t offset = fda_offset +
2099 fd_index * sizeof(fd);
2101 err = binder_alloc_copy_from_buffer(
2102 &proc->alloc, &fd, buffer,
2103 offset, sizeof(fd));
2104 WARN_ON(err);
2105 if (!err) {
2106 binder_deferred_fd_close(fd);
2108 * Need to make sure the thread goes
2109 * back to userspace to complete the
2110 * deferred close
2112 if (thread)
2113 thread->looper_need_return = true;
2116 } break;
2117 default:
2118 pr_err("transaction release %d bad object type %x\n",
2119 debug_id, hdr->type);
2120 break;
2125 /* Clean up all the objects in the buffer */
2126 static inline void binder_release_entire_buffer(struct binder_proc *proc,
2127 struct binder_thread *thread,
2128 struct binder_buffer *buffer,
2129 bool is_failure)
2131 binder_size_t off_end_offset;
2133 off_end_offset = ALIGN(buffer->data_size, sizeof(void *));
2134 off_end_offset += buffer->offsets_size;
2136 binder_transaction_buffer_release(proc, thread, buffer,
2137 off_end_offset, is_failure);
2140 static int binder_translate_binder(struct flat_binder_object *fp,
2141 struct binder_transaction *t,
2142 struct binder_thread *thread)
2144 struct binder_node *node;
2145 struct binder_proc *proc = thread->proc;
2146 struct binder_proc *target_proc = t->to_proc;
2147 struct binder_ref_data rdata;
2148 int ret = 0;
2150 node = binder_get_node(proc, fp->binder);
2151 if (!node) {
2152 node = binder_new_node(proc, fp);
2153 if (!node)
2154 return -ENOMEM;
2156 if (fp->cookie != node->cookie) {
2157 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2158 proc->pid, thread->pid, (u64)fp->binder,
2159 node->debug_id, (u64)fp->cookie,
2160 (u64)node->cookie);
2161 ret = -EINVAL;
2162 goto done;
2164 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2165 ret = -EPERM;
2166 goto done;
2169 ret = binder_inc_ref_for_node(target_proc, node,
2170 fp->hdr.type == BINDER_TYPE_BINDER,
2171 &thread->todo, &rdata);
2172 if (ret)
2173 goto done;
2175 if (fp->hdr.type == BINDER_TYPE_BINDER)
2176 fp->hdr.type = BINDER_TYPE_HANDLE;
2177 else
2178 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2179 fp->binder = 0;
2180 fp->handle = rdata.desc;
2181 fp->cookie = 0;
2183 trace_binder_transaction_node_to_ref(t, node, &rdata);
2184 binder_debug(BINDER_DEBUG_TRANSACTION,
2185 " node %d u%016llx -> ref %d desc %d\n",
2186 node->debug_id, (u64)node->ptr,
2187 rdata.debug_id, rdata.desc);
2188 done:
2189 binder_put_node(node);
2190 return ret;
2193 static int binder_translate_handle(struct flat_binder_object *fp,
2194 struct binder_transaction *t,
2195 struct binder_thread *thread)
2197 struct binder_proc *proc = thread->proc;
2198 struct binder_proc *target_proc = t->to_proc;
2199 struct binder_node *node;
2200 struct binder_ref_data src_rdata;
2201 int ret = 0;
2203 node = binder_get_node_from_ref(proc, fp->handle,
2204 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2205 if (!node) {
2206 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2207 proc->pid, thread->pid, fp->handle);
2208 return -EINVAL;
2210 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2211 ret = -EPERM;
2212 goto done;
2215 binder_node_lock(node);
2216 if (node->proc == target_proc) {
2217 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2218 fp->hdr.type = BINDER_TYPE_BINDER;
2219 else
2220 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2221 fp->binder = node->ptr;
2222 fp->cookie = node->cookie;
2223 if (node->proc)
2224 binder_inner_proc_lock(node->proc);
2225 else
2226 __acquire(&node->proc->inner_lock);
2227 binder_inc_node_nilocked(node,
2228 fp->hdr.type == BINDER_TYPE_BINDER,
2229 0, NULL);
2230 if (node->proc)
2231 binder_inner_proc_unlock(node->proc);
2232 else
2233 __release(&node->proc->inner_lock);
2234 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2235 binder_debug(BINDER_DEBUG_TRANSACTION,
2236 " ref %d desc %d -> node %d u%016llx\n",
2237 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2238 (u64)node->ptr);
2239 binder_node_unlock(node);
2240 } else {
2241 struct binder_ref_data dest_rdata;
2243 binder_node_unlock(node);
2244 ret = binder_inc_ref_for_node(target_proc, node,
2245 fp->hdr.type == BINDER_TYPE_HANDLE,
2246 NULL, &dest_rdata);
2247 if (ret)
2248 goto done;
2250 fp->binder = 0;
2251 fp->handle = dest_rdata.desc;
2252 fp->cookie = 0;
2253 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2254 &dest_rdata);
2255 binder_debug(BINDER_DEBUG_TRANSACTION,
2256 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2257 src_rdata.debug_id, src_rdata.desc,
2258 dest_rdata.debug_id, dest_rdata.desc,
2259 node->debug_id);
2261 done:
2262 binder_put_node(node);
2263 return ret;
2266 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2267 struct binder_transaction *t,
2268 struct binder_thread *thread,
2269 struct binder_transaction *in_reply_to)
2271 struct binder_proc *proc = thread->proc;
2272 struct binder_proc *target_proc = t->to_proc;
2273 struct binder_txn_fd_fixup *fixup;
2274 struct file *file;
2275 int ret = 0;
2276 bool target_allows_fd;
2278 if (in_reply_to)
2279 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2280 else
2281 target_allows_fd = t->buffer->target_node->accept_fds;
2282 if (!target_allows_fd) {
2283 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2284 proc->pid, thread->pid,
2285 in_reply_to ? "reply" : "transaction",
2286 fd);
2287 ret = -EPERM;
2288 goto err_fd_not_accepted;
2291 file = fget(fd);
2292 if (!file) {
2293 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2294 proc->pid, thread->pid, fd);
2295 ret = -EBADF;
2296 goto err_fget;
2298 ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2299 if (ret < 0) {
2300 ret = -EPERM;
2301 goto err_security;
2305 * Add fixup record for this transaction. The allocation
2306 * of the fd in the target needs to be done from a
2307 * target thread.
2309 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2310 if (!fixup) {
2311 ret = -ENOMEM;
2312 goto err_alloc;
2314 fixup->file = file;
2315 fixup->offset = fd_offset;
2316 fixup->target_fd = -1;
2317 trace_binder_transaction_fd_send(t, fd, fixup->offset);
2318 list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2320 return ret;
2322 err_alloc:
2323 err_security:
2324 fput(file);
2325 err_fget:
2326 err_fd_not_accepted:
2327 return ret;
2331 * struct binder_ptr_fixup - data to be fixed-up in target buffer
2332 * @offset offset in target buffer to fixup
2333 * @skip_size bytes to skip in copy (fixup will be written later)
2334 * @fixup_data data to write at fixup offset
2335 * @node list node
2337 * This is used for the pointer fixup list (pf) which is created and consumed
2338 * during binder_transaction() and is only accessed locally. No
2339 * locking is necessary.
2341 * The list is ordered by @offset.
2343 struct binder_ptr_fixup {
2344 binder_size_t offset;
2345 size_t skip_size;
2346 binder_uintptr_t fixup_data;
2347 struct list_head node;
2351 * struct binder_sg_copy - scatter-gather data to be copied
2352 * @offset offset in target buffer
2353 * @sender_uaddr user address in source buffer
2354 * @length bytes to copy
2355 * @node list node
2357 * This is used for the sg copy list (sgc) which is created and consumed
2358 * during binder_transaction() and is only accessed locally. No
2359 * locking is necessary.
2361 * The list is ordered by @offset.
2363 struct binder_sg_copy {
2364 binder_size_t offset;
2365 const void __user *sender_uaddr;
2366 size_t length;
2367 struct list_head node;
2371 * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
2372 * @alloc: binder_alloc associated with @buffer
2373 * @buffer: binder buffer in target process
2374 * @sgc_head: list_head of scatter-gather copy list
2375 * @pf_head: list_head of pointer fixup list
2377 * Processes all elements of @sgc_head, applying fixups from @pf_head
2378 * and copying the scatter-gather data from the source process' user
2379 * buffer to the target's buffer. It is expected that the list creation
2380 * and processing all occurs during binder_transaction() so these lists
2381 * are only accessed in local context.
2383 * Return: 0=success, else -errno
2385 static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
2386 struct binder_buffer *buffer,
2387 struct list_head *sgc_head,
2388 struct list_head *pf_head)
2390 int ret = 0;
2391 struct binder_sg_copy *sgc, *tmpsgc;
2392 struct binder_ptr_fixup *tmppf;
2393 struct binder_ptr_fixup *pf =
2394 list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
2395 node);
2397 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2398 size_t bytes_copied = 0;
2400 while (bytes_copied < sgc->length) {
2401 size_t copy_size;
2402 size_t bytes_left = sgc->length - bytes_copied;
2403 size_t offset = sgc->offset + bytes_copied;
2406 * We copy up to the fixup (pointed to by pf)
2408 copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
2409 : bytes_left;
2410 if (!ret && copy_size)
2411 ret = binder_alloc_copy_user_to_buffer(
2412 alloc, buffer,
2413 offset,
2414 sgc->sender_uaddr + bytes_copied,
2415 copy_size);
2416 bytes_copied += copy_size;
2417 if (copy_size != bytes_left) {
2418 BUG_ON(!pf);
2419 /* we stopped at a fixup offset */
2420 if (pf->skip_size) {
2422 * we are just skipping. This is for
2423 * BINDER_TYPE_FDA where the translated
2424 * fds will be fixed up when we get
2425 * to target context.
2427 bytes_copied += pf->skip_size;
2428 } else {
2429 /* apply the fixup indicated by pf */
2430 if (!ret)
2431 ret = binder_alloc_copy_to_buffer(
2432 alloc, buffer,
2433 pf->offset,
2434 &pf->fixup_data,
2435 sizeof(pf->fixup_data));
2436 bytes_copied += sizeof(pf->fixup_data);
2438 list_del(&pf->node);
2439 kfree(pf);
2440 pf = list_first_entry_or_null(pf_head,
2441 struct binder_ptr_fixup, node);
2444 list_del(&sgc->node);
2445 kfree(sgc);
2447 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2448 BUG_ON(pf->skip_size == 0);
2449 list_del(&pf->node);
2450 kfree(pf);
2452 BUG_ON(!list_empty(sgc_head));
2454 return ret > 0 ? -EINVAL : ret;
2458 * binder_cleanup_deferred_txn_lists() - free specified lists
2459 * @sgc_head: list_head of scatter-gather copy list
2460 * @pf_head: list_head of pointer fixup list
2462 * Called to clean up @sgc_head and @pf_head if there is an
2463 * error.
2465 static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
2466 struct list_head *pf_head)
2468 struct binder_sg_copy *sgc, *tmpsgc;
2469 struct binder_ptr_fixup *pf, *tmppf;
2471 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2472 list_del(&sgc->node);
2473 kfree(sgc);
2475 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2476 list_del(&pf->node);
2477 kfree(pf);
2482 * binder_defer_copy() - queue a scatter-gather buffer for copy
2483 * @sgc_head: list_head of scatter-gather copy list
2484 * @offset: binder buffer offset in target process
2485 * @sender_uaddr: user address in source process
2486 * @length: bytes to copy
2488 * Specify a scatter-gather block to be copied. The actual copy must
2489 * be deferred until all the needed fixups are identified and queued.
2490 * Then the copy and fixups are done together so un-translated values
2491 * from the source are never visible in the target buffer.
2493 * We are guaranteed that repeated calls to this function will have
2494 * monotonically increasing @offset values so the list will naturally
2495 * be ordered.
2497 * Return: 0=success, else -errno
2499 static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
2500 const void __user *sender_uaddr, size_t length)
2502 struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
2504 if (!bc)
2505 return -ENOMEM;
2507 bc->offset = offset;
2508 bc->sender_uaddr = sender_uaddr;
2509 bc->length = length;
2510 INIT_LIST_HEAD(&bc->node);
2513 * We are guaranteed that the deferred copies are in-order
2514 * so just add to the tail.
2516 list_add_tail(&bc->node, sgc_head);
2518 return 0;
2522 * binder_add_fixup() - queue a fixup to be applied to sg copy
2523 * @pf_head: list_head of binder ptr fixup list
2524 * @offset: binder buffer offset in target process
2525 * @fixup: bytes to be copied for fixup
2526 * @skip_size: bytes to skip when copying (fixup will be applied later)
2528 * Add the specified fixup to a list ordered by @offset. When copying
2529 * the scatter-gather buffers, the fixup will be copied instead of
2530 * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
2531 * will be applied later (in target process context), so we just skip
2532 * the bytes specified by @skip_size. If @skip_size is 0, we copy the
2533 * value in @fixup.
2535 * This function is called *mostly* in @offset order, but there are
2536 * exceptions. Since out-of-order inserts are relatively uncommon,
2537 * we insert the new element by searching backward from the tail of
2538 * the list.
2540 * Return: 0=success, else -errno
2542 static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
2543 binder_uintptr_t fixup, size_t skip_size)
2545 struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
2546 struct binder_ptr_fixup *tmppf;
2548 if (!pf)
2549 return -ENOMEM;
2551 pf->offset = offset;
2552 pf->fixup_data = fixup;
2553 pf->skip_size = skip_size;
2554 INIT_LIST_HEAD(&pf->node);
2556 /* Fixups are *mostly* added in-order, but there are some
2557 * exceptions. Look backwards through list for insertion point.
2559 list_for_each_entry_reverse(tmppf, pf_head, node) {
2560 if (tmppf->offset < pf->offset) {
2561 list_add(&pf->node, &tmppf->node);
2562 return 0;
2566 * if we get here, then the new offset is the lowest so
2567 * insert at the head
2569 list_add(&pf->node, pf_head);
2570 return 0;
2573 static int binder_translate_fd_array(struct list_head *pf_head,
2574 struct binder_fd_array_object *fda,
2575 const void __user *sender_ubuffer,
2576 struct binder_buffer_object *parent,
2577 struct binder_buffer_object *sender_uparent,
2578 struct binder_transaction *t,
2579 struct binder_thread *thread,
2580 struct binder_transaction *in_reply_to)
2582 binder_size_t fdi, fd_buf_size;
2583 binder_size_t fda_offset;
2584 const void __user *sender_ufda_base;
2585 struct binder_proc *proc = thread->proc;
2586 int ret;
2588 if (fda->num_fds == 0)
2589 return 0;
2591 fd_buf_size = sizeof(u32) * fda->num_fds;
2592 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2593 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2594 proc->pid, thread->pid, (u64)fda->num_fds);
2595 return -EINVAL;
2597 if (fd_buf_size > parent->length ||
2598 fda->parent_offset > parent->length - fd_buf_size) {
2599 /* No space for all file descriptors here. */
2600 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2601 proc->pid, thread->pid, (u64)fda->num_fds);
2602 return -EINVAL;
2605 * the source data for binder_buffer_object is visible
2606 * to user-space and the @buffer element is the user
2607 * pointer to the buffer_object containing the fd_array.
2608 * Convert the address to an offset relative to
2609 * the base of the transaction buffer.
2611 fda_offset = parent->buffer - t->buffer->user_data +
2612 fda->parent_offset;
2613 sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
2614 fda->parent_offset;
2616 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
2617 !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
2618 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2619 proc->pid, thread->pid);
2620 return -EINVAL;
2622 ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
2623 if (ret)
2624 return ret;
2626 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2627 u32 fd;
2628 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2629 binder_size_t sender_uoffset = fdi * sizeof(fd);
2631 ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
2632 if (!ret)
2633 ret = binder_translate_fd(fd, offset, t, thread,
2634 in_reply_to);
2635 if (ret)
2636 return ret > 0 ? -EINVAL : ret;
2638 return 0;
2641 static int binder_fixup_parent(struct list_head *pf_head,
2642 struct binder_transaction *t,
2643 struct binder_thread *thread,
2644 struct binder_buffer_object *bp,
2645 binder_size_t off_start_offset,
2646 binder_size_t num_valid,
2647 binder_size_t last_fixup_obj_off,
2648 binder_size_t last_fixup_min_off)
2650 struct binder_buffer_object *parent;
2651 struct binder_buffer *b = t->buffer;
2652 struct binder_proc *proc = thread->proc;
2653 struct binder_proc *target_proc = t->to_proc;
2654 struct binder_object object;
2655 binder_size_t buffer_offset;
2656 binder_size_t parent_offset;
2658 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2659 return 0;
2661 parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2662 off_start_offset, &parent_offset,
2663 num_valid);
2664 if (!parent) {
2665 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2666 proc->pid, thread->pid);
2667 return -EINVAL;
2670 if (!binder_validate_fixup(target_proc, b, off_start_offset,
2671 parent_offset, bp->parent_offset,
2672 last_fixup_obj_off,
2673 last_fixup_min_off)) {
2674 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2675 proc->pid, thread->pid);
2676 return -EINVAL;
2679 if (parent->length < sizeof(binder_uintptr_t) ||
2680 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2681 /* No space for a pointer here! */
2682 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2683 proc->pid, thread->pid);
2684 return -EINVAL;
2687 buffer_offset = bp->parent_offset + parent->buffer - b->user_data;
2689 return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
2693 * binder_can_update_transaction() - Can a txn be superseded by an updated one?
2694 * @t1: the pending async txn in the frozen process
2695 * @t2: the new async txn to supersede the outdated pending one
2697 * Return: true if t2 can supersede t1
2698 * false if t2 can not supersede t1
2700 static bool binder_can_update_transaction(struct binder_transaction *t1,
2701 struct binder_transaction *t2)
2703 if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) !=
2704 (TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc)
2705 return false;
2706 if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code &&
2707 t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid &&
2708 t1->buffer->target_node->ptr == t2->buffer->target_node->ptr &&
2709 t1->buffer->target_node->cookie == t2->buffer->target_node->cookie)
2710 return true;
2711 return false;
2715 * binder_find_outdated_transaction_ilocked() - Find the outdated transaction
2716 * @t: new async transaction
2717 * @target_list: list to find outdated transaction
2719 * Return: the outdated transaction if found
2720 * NULL if no outdated transacton can be found
2722 * Requires the proc->inner_lock to be held.
2724 static struct binder_transaction *
2725 binder_find_outdated_transaction_ilocked(struct binder_transaction *t,
2726 struct list_head *target_list)
2728 struct binder_work *w;
2730 list_for_each_entry(w, target_list, entry) {
2731 struct binder_transaction *t_queued;
2733 if (w->type != BINDER_WORK_TRANSACTION)
2734 continue;
2735 t_queued = container_of(w, struct binder_transaction, work);
2736 if (binder_can_update_transaction(t_queued, t))
2737 return t_queued;
2739 return NULL;
2743 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2744 * @t: transaction to send
2745 * @proc: process to send the transaction to
2746 * @thread: thread in @proc to send the transaction to (may be NULL)
2748 * This function queues a transaction to the specified process. It will try
2749 * to find a thread in the target process to handle the transaction and
2750 * wake it up. If no thread is found, the work is queued to the proc
2751 * waitqueue.
2753 * If the @thread parameter is not NULL, the transaction is always queued
2754 * to the waitlist of that specific thread.
2756 * Return: 0 if the transaction was successfully queued
2757 * BR_DEAD_REPLY if the target process or thread is dead
2758 * BR_FROZEN_REPLY if the target process or thread is frozen and
2759 * the sync transaction was rejected
2760 * BR_TRANSACTION_PENDING_FROZEN if the target process is frozen
2761 * and the async transaction was successfully queued
2763 static int binder_proc_transaction(struct binder_transaction *t,
2764 struct binder_proc *proc,
2765 struct binder_thread *thread)
2767 struct binder_node *node = t->buffer->target_node;
2768 bool oneway = !!(t->flags & TF_ONE_WAY);
2769 bool pending_async = false;
2770 struct binder_transaction *t_outdated = NULL;
2771 bool frozen = false;
2773 BUG_ON(!node);
2774 binder_node_lock(node);
2775 if (oneway) {
2776 BUG_ON(thread);
2777 if (node->has_async_transaction)
2778 pending_async = true;
2779 else
2780 node->has_async_transaction = true;
2783 binder_inner_proc_lock(proc);
2784 if (proc->is_frozen) {
2785 frozen = true;
2786 proc->sync_recv |= !oneway;
2787 proc->async_recv |= oneway;
2790 if ((frozen && !oneway) || proc->is_dead ||
2791 (thread && thread->is_dead)) {
2792 binder_inner_proc_unlock(proc);
2793 binder_node_unlock(node);
2794 return frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
2797 if (!thread && !pending_async)
2798 thread = binder_select_thread_ilocked(proc);
2800 if (thread) {
2801 binder_enqueue_thread_work_ilocked(thread, &t->work);
2802 } else if (!pending_async) {
2803 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2804 } else {
2805 if ((t->flags & TF_UPDATE_TXN) && frozen) {
2806 t_outdated = binder_find_outdated_transaction_ilocked(t,
2807 &node->async_todo);
2808 if (t_outdated) {
2809 binder_debug(BINDER_DEBUG_TRANSACTION,
2810 "txn %d supersedes %d\n",
2811 t->debug_id, t_outdated->debug_id);
2812 list_del_init(&t_outdated->work.entry);
2813 proc->outstanding_txns--;
2816 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2819 if (!pending_async)
2820 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2822 proc->outstanding_txns++;
2823 binder_inner_proc_unlock(proc);
2824 binder_node_unlock(node);
2827 * To reduce potential contention, free the outdated transaction and
2828 * buffer after releasing the locks.
2830 if (t_outdated) {
2831 struct binder_buffer *buffer = t_outdated->buffer;
2833 t_outdated->buffer = NULL;
2834 buffer->transaction = NULL;
2835 trace_binder_transaction_update_buffer_release(buffer);
2836 binder_release_entire_buffer(proc, NULL, buffer, false);
2837 binder_alloc_free_buf(&proc->alloc, buffer);
2838 kfree(t_outdated);
2839 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2842 if (oneway && frozen)
2843 return BR_TRANSACTION_PENDING_FROZEN;
2845 return 0;
2849 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2850 * @node: struct binder_node for which to get refs
2851 * @procp: returns @node->proc if valid
2852 * @error: if no @procp then returns BR_DEAD_REPLY
2854 * User-space normally keeps the node alive when creating a transaction
2855 * since it has a reference to the target. The local strong ref keeps it
2856 * alive if the sending process dies before the target process processes
2857 * the transaction. If the source process is malicious or has a reference
2858 * counting bug, relying on the local strong ref can fail.
2860 * Since user-space can cause the local strong ref to go away, we also take
2861 * a tmpref on the node to ensure it survives while we are constructing
2862 * the transaction. We also need a tmpref on the proc while we are
2863 * constructing the transaction, so we take that here as well.
2865 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2866 * Also sets @procp if valid. If the @node->proc is NULL indicating that the
2867 * target proc has died, @error is set to BR_DEAD_REPLY.
2869 static struct binder_node *binder_get_node_refs_for_txn(
2870 struct binder_node *node,
2871 struct binder_proc **procp,
2872 uint32_t *error)
2874 struct binder_node *target_node = NULL;
2876 binder_node_inner_lock(node);
2877 if (node->proc) {
2878 target_node = node;
2879 binder_inc_node_nilocked(node, 1, 0, NULL);
2880 binder_inc_node_tmpref_ilocked(node);
2881 node->proc->tmp_ref++;
2882 *procp = node->proc;
2883 } else
2884 *error = BR_DEAD_REPLY;
2885 binder_node_inner_unlock(node);
2887 return target_node;
2890 static void binder_set_txn_from_error(struct binder_transaction *t, int id,
2891 uint32_t command, int32_t param)
2893 struct binder_thread *from = binder_get_txn_from_and_acq_inner(t);
2895 if (!from) {
2896 /* annotation for sparse */
2897 __release(&from->proc->inner_lock);
2898 return;
2901 /* don't override existing errors */
2902 if (from->ee.command == BR_OK)
2903 binder_set_extended_error(&from->ee, id, command, param);
2904 binder_inner_proc_unlock(from->proc);
2905 binder_thread_dec_tmpref(from);
2908 static void binder_transaction(struct binder_proc *proc,
2909 struct binder_thread *thread,
2910 struct binder_transaction_data *tr, int reply,
2911 binder_size_t extra_buffers_size)
2913 int ret;
2914 struct binder_transaction *t;
2915 struct binder_work *w;
2916 struct binder_work *tcomplete;
2917 binder_size_t buffer_offset = 0;
2918 binder_size_t off_start_offset, off_end_offset;
2919 binder_size_t off_min;
2920 binder_size_t sg_buf_offset, sg_buf_end_offset;
2921 binder_size_t user_offset = 0;
2922 struct binder_proc *target_proc = NULL;
2923 struct binder_thread *target_thread = NULL;
2924 struct binder_node *target_node = NULL;
2925 struct binder_transaction *in_reply_to = NULL;
2926 struct binder_transaction_log_entry *e;
2927 uint32_t return_error = 0;
2928 uint32_t return_error_param = 0;
2929 uint32_t return_error_line = 0;
2930 binder_size_t last_fixup_obj_off = 0;
2931 binder_size_t last_fixup_min_off = 0;
2932 struct binder_context *context = proc->context;
2933 int t_debug_id = atomic_inc_return(&binder_last_id);
2934 ktime_t t_start_time = ktime_get();
2935 char *secctx = NULL;
2936 u32 secctx_sz = 0;
2937 struct list_head sgc_head;
2938 struct list_head pf_head;
2939 const void __user *user_buffer = (const void __user *)
2940 (uintptr_t)tr->data.ptr.buffer;
2941 INIT_LIST_HEAD(&sgc_head);
2942 INIT_LIST_HEAD(&pf_head);
2944 e = binder_transaction_log_add(&binder_transaction_log);
2945 e->debug_id = t_debug_id;
2946 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2947 e->from_proc = proc->pid;
2948 e->from_thread = thread->pid;
2949 e->target_handle = tr->target.handle;
2950 e->data_size = tr->data_size;
2951 e->offsets_size = tr->offsets_size;
2952 strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
2954 binder_inner_proc_lock(proc);
2955 binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
2956 binder_inner_proc_unlock(proc);
2958 if (reply) {
2959 binder_inner_proc_lock(proc);
2960 in_reply_to = thread->transaction_stack;
2961 if (in_reply_to == NULL) {
2962 binder_inner_proc_unlock(proc);
2963 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2964 proc->pid, thread->pid);
2965 return_error = BR_FAILED_REPLY;
2966 return_error_param = -EPROTO;
2967 return_error_line = __LINE__;
2968 goto err_empty_call_stack;
2970 if (in_reply_to->to_thread != thread) {
2971 spin_lock(&in_reply_to->lock);
2972 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2973 proc->pid, thread->pid, in_reply_to->debug_id,
2974 in_reply_to->to_proc ?
2975 in_reply_to->to_proc->pid : 0,
2976 in_reply_to->to_thread ?
2977 in_reply_to->to_thread->pid : 0);
2978 spin_unlock(&in_reply_to->lock);
2979 binder_inner_proc_unlock(proc);
2980 return_error = BR_FAILED_REPLY;
2981 return_error_param = -EPROTO;
2982 return_error_line = __LINE__;
2983 in_reply_to = NULL;
2984 goto err_bad_call_stack;
2986 thread->transaction_stack = in_reply_to->to_parent;
2987 binder_inner_proc_unlock(proc);
2988 binder_set_nice(in_reply_to->saved_priority);
2989 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2990 if (target_thread == NULL) {
2991 /* annotation for sparse */
2992 __release(&target_thread->proc->inner_lock);
2993 binder_txn_error("%d:%d reply target not found\n",
2994 thread->pid, proc->pid);
2995 return_error = BR_DEAD_REPLY;
2996 return_error_line = __LINE__;
2997 goto err_dead_binder;
2999 if (target_thread->transaction_stack != in_reply_to) {
3000 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
3001 proc->pid, thread->pid,
3002 target_thread->transaction_stack ?
3003 target_thread->transaction_stack->debug_id : 0,
3004 in_reply_to->debug_id);
3005 binder_inner_proc_unlock(target_thread->proc);
3006 return_error = BR_FAILED_REPLY;
3007 return_error_param = -EPROTO;
3008 return_error_line = __LINE__;
3009 in_reply_to = NULL;
3010 target_thread = NULL;
3011 goto err_dead_binder;
3013 target_proc = target_thread->proc;
3014 target_proc->tmp_ref++;
3015 binder_inner_proc_unlock(target_thread->proc);
3016 } else {
3017 if (tr->target.handle) {
3018 struct binder_ref *ref;
3021 * There must already be a strong ref
3022 * on this node. If so, do a strong
3023 * increment on the node to ensure it
3024 * stays alive until the transaction is
3025 * done.
3027 binder_proc_lock(proc);
3028 ref = binder_get_ref_olocked(proc, tr->target.handle,
3029 true);
3030 if (ref) {
3031 target_node = binder_get_node_refs_for_txn(
3032 ref->node, &target_proc,
3033 &return_error);
3034 } else {
3035 binder_user_error("%d:%d got transaction to invalid handle, %u\n",
3036 proc->pid, thread->pid, tr->target.handle);
3037 return_error = BR_FAILED_REPLY;
3039 binder_proc_unlock(proc);
3040 } else {
3041 mutex_lock(&context->context_mgr_node_lock);
3042 target_node = context->binder_context_mgr_node;
3043 if (target_node)
3044 target_node = binder_get_node_refs_for_txn(
3045 target_node, &target_proc,
3046 &return_error);
3047 else
3048 return_error = BR_DEAD_REPLY;
3049 mutex_unlock(&context->context_mgr_node_lock);
3050 if (target_node && target_proc->pid == proc->pid) {
3051 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3052 proc->pid, thread->pid);
3053 return_error = BR_FAILED_REPLY;
3054 return_error_param = -EINVAL;
3055 return_error_line = __LINE__;
3056 goto err_invalid_target_handle;
3059 if (!target_node) {
3060 binder_txn_error("%d:%d cannot find target node\n",
3061 thread->pid, proc->pid);
3063 * return_error is set above
3065 return_error_param = -EINVAL;
3066 return_error_line = __LINE__;
3067 goto err_dead_binder;
3069 e->to_node = target_node->debug_id;
3070 if (WARN_ON(proc == target_proc)) {
3071 binder_txn_error("%d:%d self transactions not allowed\n",
3072 thread->pid, proc->pid);
3073 return_error = BR_FAILED_REPLY;
3074 return_error_param = -EINVAL;
3075 return_error_line = __LINE__;
3076 goto err_invalid_target_handle;
3078 if (security_binder_transaction(proc->cred,
3079 target_proc->cred) < 0) {
3080 binder_txn_error("%d:%d transaction credentials failed\n",
3081 thread->pid, proc->pid);
3082 return_error = BR_FAILED_REPLY;
3083 return_error_param = -EPERM;
3084 return_error_line = __LINE__;
3085 goto err_invalid_target_handle;
3087 binder_inner_proc_lock(proc);
3089 w = list_first_entry_or_null(&thread->todo,
3090 struct binder_work, entry);
3091 if (!(tr->flags & TF_ONE_WAY) && w &&
3092 w->type == BINDER_WORK_TRANSACTION) {
3094 * Do not allow new outgoing transaction from a
3095 * thread that has a transaction at the head of
3096 * its todo list. Only need to check the head
3097 * because binder_select_thread_ilocked picks a
3098 * thread from proc->waiting_threads to enqueue
3099 * the transaction, and nothing is queued to the
3100 * todo list while the thread is on waiting_threads.
3102 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3103 proc->pid, thread->pid);
3104 binder_inner_proc_unlock(proc);
3105 return_error = BR_FAILED_REPLY;
3106 return_error_param = -EPROTO;
3107 return_error_line = __LINE__;
3108 goto err_bad_todo_list;
3111 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3112 struct binder_transaction *tmp;
3114 tmp = thread->transaction_stack;
3115 if (tmp->to_thread != thread) {
3116 spin_lock(&tmp->lock);
3117 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3118 proc->pid, thread->pid, tmp->debug_id,
3119 tmp->to_proc ? tmp->to_proc->pid : 0,
3120 tmp->to_thread ?
3121 tmp->to_thread->pid : 0);
3122 spin_unlock(&tmp->lock);
3123 binder_inner_proc_unlock(proc);
3124 return_error = BR_FAILED_REPLY;
3125 return_error_param = -EPROTO;
3126 return_error_line = __LINE__;
3127 goto err_bad_call_stack;
3129 while (tmp) {
3130 struct binder_thread *from;
3132 spin_lock(&tmp->lock);
3133 from = tmp->from;
3134 if (from && from->proc == target_proc) {
3135 atomic_inc(&from->tmp_ref);
3136 target_thread = from;
3137 spin_unlock(&tmp->lock);
3138 break;
3140 spin_unlock(&tmp->lock);
3141 tmp = tmp->from_parent;
3144 binder_inner_proc_unlock(proc);
3146 if (target_thread)
3147 e->to_thread = target_thread->pid;
3148 e->to_proc = target_proc->pid;
3150 /* TODO: reuse incoming transaction for reply */
3151 t = kzalloc(sizeof(*t), GFP_KERNEL);
3152 if (t == NULL) {
3153 binder_txn_error("%d:%d cannot allocate transaction\n",
3154 thread->pid, proc->pid);
3155 return_error = BR_FAILED_REPLY;
3156 return_error_param = -ENOMEM;
3157 return_error_line = __LINE__;
3158 goto err_alloc_t_failed;
3160 INIT_LIST_HEAD(&t->fd_fixups);
3161 binder_stats_created(BINDER_STAT_TRANSACTION);
3162 spin_lock_init(&t->lock);
3164 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3165 if (tcomplete == NULL) {
3166 binder_txn_error("%d:%d cannot allocate work for transaction\n",
3167 thread->pid, proc->pid);
3168 return_error = BR_FAILED_REPLY;
3169 return_error_param = -ENOMEM;
3170 return_error_line = __LINE__;
3171 goto err_alloc_tcomplete_failed;
3173 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3175 t->debug_id = t_debug_id;
3176 t->start_time = t_start_time;
3178 if (reply)
3179 binder_debug(BINDER_DEBUG_TRANSACTION,
3180 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3181 proc->pid, thread->pid, t->debug_id,
3182 target_proc->pid, target_thread->pid,
3183 (u64)tr->data.ptr.buffer,
3184 (u64)tr->data.ptr.offsets,
3185 (u64)tr->data_size, (u64)tr->offsets_size,
3186 (u64)extra_buffers_size);
3187 else
3188 binder_debug(BINDER_DEBUG_TRANSACTION,
3189 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3190 proc->pid, thread->pid, t->debug_id,
3191 target_proc->pid, target_node->debug_id,
3192 (u64)tr->data.ptr.buffer,
3193 (u64)tr->data.ptr.offsets,
3194 (u64)tr->data_size, (u64)tr->offsets_size,
3195 (u64)extra_buffers_size);
3197 if (!reply && !(tr->flags & TF_ONE_WAY))
3198 t->from = thread;
3199 else
3200 t->from = NULL;
3201 t->from_pid = proc->pid;
3202 t->from_tid = thread->pid;
3203 t->sender_euid = task_euid(proc->tsk);
3204 t->to_proc = target_proc;
3205 t->to_thread = target_thread;
3206 t->code = tr->code;
3207 t->flags = tr->flags;
3208 t->priority = task_nice(current);
3210 if (target_node && target_node->txn_security_ctx) {
3211 u32 secid;
3212 size_t added_size;
3214 security_cred_getsecid(proc->cred, &secid);
3215 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3216 if (ret) {
3217 binder_txn_error("%d:%d failed to get security context\n",
3218 thread->pid, proc->pid);
3219 return_error = BR_FAILED_REPLY;
3220 return_error_param = ret;
3221 return_error_line = __LINE__;
3222 goto err_get_secctx_failed;
3224 added_size = ALIGN(secctx_sz, sizeof(u64));
3225 extra_buffers_size += added_size;
3226 if (extra_buffers_size < added_size) {
3227 binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
3228 thread->pid, proc->pid);
3229 return_error = BR_FAILED_REPLY;
3230 return_error_param = -EINVAL;
3231 return_error_line = __LINE__;
3232 goto err_bad_extra_size;
3236 trace_binder_transaction(reply, t, target_node);
3238 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3239 tr->offsets_size, extra_buffers_size,
3240 !reply && (t->flags & TF_ONE_WAY));
3241 if (IS_ERR(t->buffer)) {
3242 char *s;
3244 ret = PTR_ERR(t->buffer);
3245 s = (ret == -ESRCH) ? ": vma cleared, target dead or dying"
3246 : (ret == -ENOSPC) ? ": no space left"
3247 : (ret == -ENOMEM) ? ": memory allocation failed"
3248 : "";
3249 binder_txn_error("cannot allocate buffer%s", s);
3251 return_error_param = PTR_ERR(t->buffer);
3252 return_error = return_error_param == -ESRCH ?
3253 BR_DEAD_REPLY : BR_FAILED_REPLY;
3254 return_error_line = __LINE__;
3255 t->buffer = NULL;
3256 goto err_binder_alloc_buf_failed;
3258 if (secctx) {
3259 int err;
3260 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3261 ALIGN(tr->offsets_size, sizeof(void *)) +
3262 ALIGN(extra_buffers_size, sizeof(void *)) -
3263 ALIGN(secctx_sz, sizeof(u64));
3265 t->security_ctx = t->buffer->user_data + buf_offset;
3266 err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3267 t->buffer, buf_offset,
3268 secctx, secctx_sz);
3269 if (err) {
3270 t->security_ctx = 0;
3271 WARN_ON(1);
3273 security_release_secctx(secctx, secctx_sz);
3274 secctx = NULL;
3276 t->buffer->debug_id = t->debug_id;
3277 t->buffer->transaction = t;
3278 t->buffer->target_node = target_node;
3279 t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
3280 trace_binder_transaction_alloc_buf(t->buffer);
3282 if (binder_alloc_copy_user_to_buffer(
3283 &target_proc->alloc,
3284 t->buffer,
3285 ALIGN(tr->data_size, sizeof(void *)),
3286 (const void __user *)
3287 (uintptr_t)tr->data.ptr.offsets,
3288 tr->offsets_size)) {
3289 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3290 proc->pid, thread->pid);
3291 return_error = BR_FAILED_REPLY;
3292 return_error_param = -EFAULT;
3293 return_error_line = __LINE__;
3294 goto err_copy_data_failed;
3296 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3297 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3298 proc->pid, thread->pid, (u64)tr->offsets_size);
3299 return_error = BR_FAILED_REPLY;
3300 return_error_param = -EINVAL;
3301 return_error_line = __LINE__;
3302 goto err_bad_offset;
3304 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3305 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3306 proc->pid, thread->pid,
3307 (u64)extra_buffers_size);
3308 return_error = BR_FAILED_REPLY;
3309 return_error_param = -EINVAL;
3310 return_error_line = __LINE__;
3311 goto err_bad_offset;
3313 off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3314 buffer_offset = off_start_offset;
3315 off_end_offset = off_start_offset + tr->offsets_size;
3316 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3317 sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3318 ALIGN(secctx_sz, sizeof(u64));
3319 off_min = 0;
3320 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3321 buffer_offset += sizeof(binder_size_t)) {
3322 struct binder_object_header *hdr;
3323 size_t object_size;
3324 struct binder_object object;
3325 binder_size_t object_offset;
3326 binder_size_t copy_size;
3328 if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3329 &object_offset,
3330 t->buffer,
3331 buffer_offset,
3332 sizeof(object_offset))) {
3333 binder_txn_error("%d:%d copy offset from buffer failed\n",
3334 thread->pid, proc->pid);
3335 return_error = BR_FAILED_REPLY;
3336 return_error_param = -EINVAL;
3337 return_error_line = __LINE__;
3338 goto err_bad_offset;
3342 * Copy the source user buffer up to the next object
3343 * that will be processed.
3345 copy_size = object_offset - user_offset;
3346 if (copy_size && (user_offset > object_offset ||
3347 binder_alloc_copy_user_to_buffer(
3348 &target_proc->alloc,
3349 t->buffer, user_offset,
3350 user_buffer + user_offset,
3351 copy_size))) {
3352 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3353 proc->pid, thread->pid);
3354 return_error = BR_FAILED_REPLY;
3355 return_error_param = -EFAULT;
3356 return_error_line = __LINE__;
3357 goto err_copy_data_failed;
3359 object_size = binder_get_object(target_proc, user_buffer,
3360 t->buffer, object_offset, &object);
3361 if (object_size == 0 || object_offset < off_min) {
3362 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3363 proc->pid, thread->pid,
3364 (u64)object_offset,
3365 (u64)off_min,
3366 (u64)t->buffer->data_size);
3367 return_error = BR_FAILED_REPLY;
3368 return_error_param = -EINVAL;
3369 return_error_line = __LINE__;
3370 goto err_bad_offset;
3373 * Set offset to the next buffer fragment to be
3374 * copied
3376 user_offset = object_offset + object_size;
3378 hdr = &object.hdr;
3379 off_min = object_offset + object_size;
3380 switch (hdr->type) {
3381 case BINDER_TYPE_BINDER:
3382 case BINDER_TYPE_WEAK_BINDER: {
3383 struct flat_binder_object *fp;
3385 fp = to_flat_binder_object(hdr);
3386 ret = binder_translate_binder(fp, t, thread);
3388 if (ret < 0 ||
3389 binder_alloc_copy_to_buffer(&target_proc->alloc,
3390 t->buffer,
3391 object_offset,
3392 fp, sizeof(*fp))) {
3393 binder_txn_error("%d:%d translate binder failed\n",
3394 thread->pid, proc->pid);
3395 return_error = BR_FAILED_REPLY;
3396 return_error_param = ret;
3397 return_error_line = __LINE__;
3398 goto err_translate_failed;
3400 } break;
3401 case BINDER_TYPE_HANDLE:
3402 case BINDER_TYPE_WEAK_HANDLE: {
3403 struct flat_binder_object *fp;
3405 fp = to_flat_binder_object(hdr);
3406 ret = binder_translate_handle(fp, t, thread);
3407 if (ret < 0 ||
3408 binder_alloc_copy_to_buffer(&target_proc->alloc,
3409 t->buffer,
3410 object_offset,
3411 fp, sizeof(*fp))) {
3412 binder_txn_error("%d:%d translate handle failed\n",
3413 thread->pid, proc->pid);
3414 return_error = BR_FAILED_REPLY;
3415 return_error_param = ret;
3416 return_error_line = __LINE__;
3417 goto err_translate_failed;
3419 } break;
3421 case BINDER_TYPE_FD: {
3422 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3423 binder_size_t fd_offset = object_offset +
3424 (uintptr_t)&fp->fd - (uintptr_t)fp;
3425 int ret = binder_translate_fd(fp->fd, fd_offset, t,
3426 thread, in_reply_to);
3428 fp->pad_binder = 0;
3429 if (ret < 0 ||
3430 binder_alloc_copy_to_buffer(&target_proc->alloc,
3431 t->buffer,
3432 object_offset,
3433 fp, sizeof(*fp))) {
3434 binder_txn_error("%d:%d translate fd failed\n",
3435 thread->pid, proc->pid);
3436 return_error = BR_FAILED_REPLY;
3437 return_error_param = ret;
3438 return_error_line = __LINE__;
3439 goto err_translate_failed;
3441 } break;
3442 case BINDER_TYPE_FDA: {
3443 struct binder_object ptr_object;
3444 binder_size_t parent_offset;
3445 struct binder_object user_object;
3446 size_t user_parent_size;
3447 struct binder_fd_array_object *fda =
3448 to_binder_fd_array_object(hdr);
3449 size_t num_valid = (buffer_offset - off_start_offset) /
3450 sizeof(binder_size_t);
3451 struct binder_buffer_object *parent =
3452 binder_validate_ptr(target_proc, t->buffer,
3453 &ptr_object, fda->parent,
3454 off_start_offset,
3455 &parent_offset,
3456 num_valid);
3457 if (!parent) {
3458 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3459 proc->pid, thread->pid);
3460 return_error = BR_FAILED_REPLY;
3461 return_error_param = -EINVAL;
3462 return_error_line = __LINE__;
3463 goto err_bad_parent;
3465 if (!binder_validate_fixup(target_proc, t->buffer,
3466 off_start_offset,
3467 parent_offset,
3468 fda->parent_offset,
3469 last_fixup_obj_off,
3470 last_fixup_min_off)) {
3471 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3472 proc->pid, thread->pid);
3473 return_error = BR_FAILED_REPLY;
3474 return_error_param = -EINVAL;
3475 return_error_line = __LINE__;
3476 goto err_bad_parent;
3479 * We need to read the user version of the parent
3480 * object to get the original user offset
3482 user_parent_size =
3483 binder_get_object(proc, user_buffer, t->buffer,
3484 parent_offset, &user_object);
3485 if (user_parent_size != sizeof(user_object.bbo)) {
3486 binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
3487 proc->pid, thread->pid,
3488 user_parent_size,
3489 sizeof(user_object.bbo));
3490 return_error = BR_FAILED_REPLY;
3491 return_error_param = -EINVAL;
3492 return_error_line = __LINE__;
3493 goto err_bad_parent;
3495 ret = binder_translate_fd_array(&pf_head, fda,
3496 user_buffer, parent,
3497 &user_object.bbo, t,
3498 thread, in_reply_to);
3499 if (!ret)
3500 ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3501 t->buffer,
3502 object_offset,
3503 fda, sizeof(*fda));
3504 if (ret) {
3505 binder_txn_error("%d:%d translate fd array failed\n",
3506 thread->pid, proc->pid);
3507 return_error = BR_FAILED_REPLY;
3508 return_error_param = ret > 0 ? -EINVAL : ret;
3509 return_error_line = __LINE__;
3510 goto err_translate_failed;
3512 last_fixup_obj_off = parent_offset;
3513 last_fixup_min_off =
3514 fda->parent_offset + sizeof(u32) * fda->num_fds;
3515 } break;
3516 case BINDER_TYPE_PTR: {
3517 struct binder_buffer_object *bp =
3518 to_binder_buffer_object(hdr);
3519 size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3520 size_t num_valid;
3522 if (bp->length > buf_left) {
3523 binder_user_error("%d:%d got transaction with too large buffer\n",
3524 proc->pid, thread->pid);
3525 return_error = BR_FAILED_REPLY;
3526 return_error_param = -EINVAL;
3527 return_error_line = __LINE__;
3528 goto err_bad_offset;
3530 ret = binder_defer_copy(&sgc_head, sg_buf_offset,
3531 (const void __user *)(uintptr_t)bp->buffer,
3532 bp->length);
3533 if (ret) {
3534 binder_txn_error("%d:%d deferred copy failed\n",
3535 thread->pid, proc->pid);
3536 return_error = BR_FAILED_REPLY;
3537 return_error_param = ret;
3538 return_error_line = __LINE__;
3539 goto err_translate_failed;
3541 /* Fixup buffer pointer to target proc address space */
3542 bp->buffer = t->buffer->user_data + sg_buf_offset;
3543 sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3545 num_valid = (buffer_offset - off_start_offset) /
3546 sizeof(binder_size_t);
3547 ret = binder_fixup_parent(&pf_head, t,
3548 thread, bp,
3549 off_start_offset,
3550 num_valid,
3551 last_fixup_obj_off,
3552 last_fixup_min_off);
3553 if (ret < 0 ||
3554 binder_alloc_copy_to_buffer(&target_proc->alloc,
3555 t->buffer,
3556 object_offset,
3557 bp, sizeof(*bp))) {
3558 binder_txn_error("%d:%d failed to fixup parent\n",
3559 thread->pid, proc->pid);
3560 return_error = BR_FAILED_REPLY;
3561 return_error_param = ret;
3562 return_error_line = __LINE__;
3563 goto err_translate_failed;
3565 last_fixup_obj_off = object_offset;
3566 last_fixup_min_off = 0;
3567 } break;
3568 default:
3569 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3570 proc->pid, thread->pid, hdr->type);
3571 return_error = BR_FAILED_REPLY;
3572 return_error_param = -EINVAL;
3573 return_error_line = __LINE__;
3574 goto err_bad_object_type;
3577 /* Done processing objects, copy the rest of the buffer */
3578 if (binder_alloc_copy_user_to_buffer(
3579 &target_proc->alloc,
3580 t->buffer, user_offset,
3581 user_buffer + user_offset,
3582 tr->data_size - user_offset)) {
3583 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3584 proc->pid, thread->pid);
3585 return_error = BR_FAILED_REPLY;
3586 return_error_param = -EFAULT;
3587 return_error_line = __LINE__;
3588 goto err_copy_data_failed;
3591 ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
3592 &sgc_head, &pf_head);
3593 if (ret) {
3594 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3595 proc->pid, thread->pid);
3596 return_error = BR_FAILED_REPLY;
3597 return_error_param = ret;
3598 return_error_line = __LINE__;
3599 goto err_copy_data_failed;
3601 if (t->buffer->oneway_spam_suspect)
3602 tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3603 else
3604 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3605 t->work.type = BINDER_WORK_TRANSACTION;
3607 if (reply) {
3608 binder_enqueue_thread_work(thread, tcomplete);
3609 binder_inner_proc_lock(target_proc);
3610 if (target_thread->is_dead) {
3611 return_error = BR_DEAD_REPLY;
3612 binder_inner_proc_unlock(target_proc);
3613 goto err_dead_proc_or_thread;
3615 BUG_ON(t->buffer->async_transaction != 0);
3616 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3617 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3618 target_proc->outstanding_txns++;
3619 binder_inner_proc_unlock(target_proc);
3620 wake_up_interruptible_sync(&target_thread->wait);
3621 binder_free_transaction(in_reply_to);
3622 } else if (!(t->flags & TF_ONE_WAY)) {
3623 BUG_ON(t->buffer->async_transaction != 0);
3624 binder_inner_proc_lock(proc);
3626 * Defer the TRANSACTION_COMPLETE, so we don't return to
3627 * userspace immediately; this allows the target process to
3628 * immediately start processing this transaction, reducing
3629 * latency. We will then return the TRANSACTION_COMPLETE when
3630 * the target replies (or there is an error).
3632 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3633 t->need_reply = 1;
3634 t->from_parent = thread->transaction_stack;
3635 thread->transaction_stack = t;
3636 binder_inner_proc_unlock(proc);
3637 return_error = binder_proc_transaction(t,
3638 target_proc, target_thread);
3639 if (return_error) {
3640 binder_inner_proc_lock(proc);
3641 binder_pop_transaction_ilocked(thread, t);
3642 binder_inner_proc_unlock(proc);
3643 goto err_dead_proc_or_thread;
3645 } else {
3646 BUG_ON(target_node == NULL);
3647 BUG_ON(t->buffer->async_transaction != 1);
3648 return_error = binder_proc_transaction(t, target_proc, NULL);
3650 * Let the caller know when async transaction reaches a frozen
3651 * process and is put in a pending queue, waiting for the target
3652 * process to be unfrozen.
3654 if (return_error == BR_TRANSACTION_PENDING_FROZEN)
3655 tcomplete->type = BINDER_WORK_TRANSACTION_PENDING;
3656 binder_enqueue_thread_work(thread, tcomplete);
3657 if (return_error &&
3658 return_error != BR_TRANSACTION_PENDING_FROZEN)
3659 goto err_dead_proc_or_thread;
3661 if (target_thread)
3662 binder_thread_dec_tmpref(target_thread);
3663 binder_proc_dec_tmpref(target_proc);
3664 if (target_node)
3665 binder_dec_node_tmpref(target_node);
3667 * write barrier to synchronize with initialization
3668 * of log entry
3670 smp_wmb();
3671 WRITE_ONCE(e->debug_id_done, t_debug_id);
3672 return;
3674 err_dead_proc_or_thread:
3675 binder_txn_error("%d:%d dead process or thread\n",
3676 thread->pid, proc->pid);
3677 return_error_line = __LINE__;
3678 binder_dequeue_work(proc, tcomplete);
3679 err_translate_failed:
3680 err_bad_object_type:
3681 err_bad_offset:
3682 err_bad_parent:
3683 err_copy_data_failed:
3684 binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
3685 binder_free_txn_fixups(t);
3686 trace_binder_transaction_failed_buffer_release(t->buffer);
3687 binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3688 buffer_offset, true);
3689 if (target_node)
3690 binder_dec_node_tmpref(target_node);
3691 target_node = NULL;
3692 t->buffer->transaction = NULL;
3693 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3694 err_binder_alloc_buf_failed:
3695 err_bad_extra_size:
3696 if (secctx)
3697 security_release_secctx(secctx, secctx_sz);
3698 err_get_secctx_failed:
3699 kfree(tcomplete);
3700 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3701 err_alloc_tcomplete_failed:
3702 if (trace_binder_txn_latency_free_enabled())
3703 binder_txn_latency_free(t);
3704 kfree(t);
3705 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3706 err_alloc_t_failed:
3707 err_bad_todo_list:
3708 err_bad_call_stack:
3709 err_empty_call_stack:
3710 err_dead_binder:
3711 err_invalid_target_handle:
3712 if (target_node) {
3713 binder_dec_node(target_node, 1, 0);
3714 binder_dec_node_tmpref(target_node);
3717 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3718 "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n",
3719 proc->pid, thread->pid, reply ? "reply" :
3720 (tr->flags & TF_ONE_WAY ? "async" : "call"),
3721 target_proc ? target_proc->pid : 0,
3722 target_thread ? target_thread->pid : 0,
3723 t_debug_id, return_error, return_error_param,
3724 (u64)tr->data_size, (u64)tr->offsets_size,
3725 return_error_line);
3727 if (target_thread)
3728 binder_thread_dec_tmpref(target_thread);
3729 if (target_proc)
3730 binder_proc_dec_tmpref(target_proc);
3733 struct binder_transaction_log_entry *fe;
3735 e->return_error = return_error;
3736 e->return_error_param = return_error_param;
3737 e->return_error_line = return_error_line;
3738 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3739 *fe = *e;
3741 * write barrier to synchronize with initialization
3742 * of log entry
3744 smp_wmb();
3745 WRITE_ONCE(e->debug_id_done, t_debug_id);
3746 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3749 BUG_ON(thread->return_error.cmd != BR_OK);
3750 if (in_reply_to) {
3751 binder_set_txn_from_error(in_reply_to, t_debug_id,
3752 return_error, return_error_param);
3753 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3754 binder_enqueue_thread_work(thread, &thread->return_error.work);
3755 binder_send_failed_reply(in_reply_to, return_error);
3756 } else {
3757 binder_inner_proc_lock(proc);
3758 binder_set_extended_error(&thread->ee, t_debug_id,
3759 return_error, return_error_param);
3760 binder_inner_proc_unlock(proc);
3761 thread->return_error.cmd = return_error;
3762 binder_enqueue_thread_work(thread, &thread->return_error.work);
3767 * binder_free_buf() - free the specified buffer
3768 * @proc: binder proc that owns buffer
3769 * @buffer: buffer to be freed
3770 * @is_failure: failed to send transaction
3772 * If buffer for an async transaction, enqueue the next async
3773 * transaction from the node.
3775 * Cleanup buffer and free it.
3777 static void
3778 binder_free_buf(struct binder_proc *proc,
3779 struct binder_thread *thread,
3780 struct binder_buffer *buffer, bool is_failure)
3782 binder_inner_proc_lock(proc);
3783 if (buffer->transaction) {
3784 buffer->transaction->buffer = NULL;
3785 buffer->transaction = NULL;
3787 binder_inner_proc_unlock(proc);
3788 if (buffer->async_transaction && buffer->target_node) {
3789 struct binder_node *buf_node;
3790 struct binder_work *w;
3792 buf_node = buffer->target_node;
3793 binder_node_inner_lock(buf_node);
3794 BUG_ON(!buf_node->has_async_transaction);
3795 BUG_ON(buf_node->proc != proc);
3796 w = binder_dequeue_work_head_ilocked(
3797 &buf_node->async_todo);
3798 if (!w) {
3799 buf_node->has_async_transaction = false;
3800 } else {
3801 binder_enqueue_work_ilocked(
3802 w, &proc->todo);
3803 binder_wakeup_proc_ilocked(proc);
3805 binder_node_inner_unlock(buf_node);
3807 trace_binder_transaction_buffer_release(buffer);
3808 binder_release_entire_buffer(proc, thread, buffer, is_failure);
3809 binder_alloc_free_buf(&proc->alloc, buffer);
3812 static int binder_thread_write(struct binder_proc *proc,
3813 struct binder_thread *thread,
3814 binder_uintptr_t binder_buffer, size_t size,
3815 binder_size_t *consumed)
3817 uint32_t cmd;
3818 struct binder_context *context = proc->context;
3819 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3820 void __user *ptr = buffer + *consumed;
3821 void __user *end = buffer + size;
3823 while (ptr < end && thread->return_error.cmd == BR_OK) {
3824 int ret;
3826 if (get_user(cmd, (uint32_t __user *)ptr))
3827 return -EFAULT;
3828 ptr += sizeof(uint32_t);
3829 trace_binder_command(cmd);
3830 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3831 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3832 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3833 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3835 switch (cmd) {
3836 case BC_INCREFS:
3837 case BC_ACQUIRE:
3838 case BC_RELEASE:
3839 case BC_DECREFS: {
3840 uint32_t target;
3841 const char *debug_string;
3842 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3843 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3844 struct binder_ref_data rdata;
3846 if (get_user(target, (uint32_t __user *)ptr))
3847 return -EFAULT;
3849 ptr += sizeof(uint32_t);
3850 ret = -1;
3851 if (increment && !target) {
3852 struct binder_node *ctx_mgr_node;
3854 mutex_lock(&context->context_mgr_node_lock);
3855 ctx_mgr_node = context->binder_context_mgr_node;
3856 if (ctx_mgr_node) {
3857 if (ctx_mgr_node->proc == proc) {
3858 binder_user_error("%d:%d context manager tried to acquire desc 0\n",
3859 proc->pid, thread->pid);
3860 mutex_unlock(&context->context_mgr_node_lock);
3861 return -EINVAL;
3863 ret = binder_inc_ref_for_node(
3864 proc, ctx_mgr_node,
3865 strong, NULL, &rdata);
3867 mutex_unlock(&context->context_mgr_node_lock);
3869 if (ret)
3870 ret = binder_update_ref_for_handle(
3871 proc, target, increment, strong,
3872 &rdata);
3873 if (!ret && rdata.desc != target) {
3874 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3875 proc->pid, thread->pid,
3876 target, rdata.desc);
3878 switch (cmd) {
3879 case BC_INCREFS:
3880 debug_string = "IncRefs";
3881 break;
3882 case BC_ACQUIRE:
3883 debug_string = "Acquire";
3884 break;
3885 case BC_RELEASE:
3886 debug_string = "Release";
3887 break;
3888 case BC_DECREFS:
3889 default:
3890 debug_string = "DecRefs";
3891 break;
3893 if (ret) {
3894 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3895 proc->pid, thread->pid, debug_string,
3896 strong, target, ret);
3897 break;
3899 binder_debug(BINDER_DEBUG_USER_REFS,
3900 "%d:%d %s ref %d desc %d s %d w %d\n",
3901 proc->pid, thread->pid, debug_string,
3902 rdata.debug_id, rdata.desc, rdata.strong,
3903 rdata.weak);
3904 break;
3906 case BC_INCREFS_DONE:
3907 case BC_ACQUIRE_DONE: {
3908 binder_uintptr_t node_ptr;
3909 binder_uintptr_t cookie;
3910 struct binder_node *node;
3911 bool free_node;
3913 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3914 return -EFAULT;
3915 ptr += sizeof(binder_uintptr_t);
3916 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3917 return -EFAULT;
3918 ptr += sizeof(binder_uintptr_t);
3919 node = binder_get_node(proc, node_ptr);
3920 if (node == NULL) {
3921 binder_user_error("%d:%d %s u%016llx no match\n",
3922 proc->pid, thread->pid,
3923 cmd == BC_INCREFS_DONE ?
3924 "BC_INCREFS_DONE" :
3925 "BC_ACQUIRE_DONE",
3926 (u64)node_ptr);
3927 break;
3929 if (cookie != node->cookie) {
3930 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3931 proc->pid, thread->pid,
3932 cmd == BC_INCREFS_DONE ?
3933 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3934 (u64)node_ptr, node->debug_id,
3935 (u64)cookie, (u64)node->cookie);
3936 binder_put_node(node);
3937 break;
3939 binder_node_inner_lock(node);
3940 if (cmd == BC_ACQUIRE_DONE) {
3941 if (node->pending_strong_ref == 0) {
3942 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3943 proc->pid, thread->pid,
3944 node->debug_id);
3945 binder_node_inner_unlock(node);
3946 binder_put_node(node);
3947 break;
3949 node->pending_strong_ref = 0;
3950 } else {
3951 if (node->pending_weak_ref == 0) {
3952 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3953 proc->pid, thread->pid,
3954 node->debug_id);
3955 binder_node_inner_unlock(node);
3956 binder_put_node(node);
3957 break;
3959 node->pending_weak_ref = 0;
3961 free_node = binder_dec_node_nilocked(node,
3962 cmd == BC_ACQUIRE_DONE, 0);
3963 WARN_ON(free_node);
3964 binder_debug(BINDER_DEBUG_USER_REFS,
3965 "%d:%d %s node %d ls %d lw %d tr %d\n",
3966 proc->pid, thread->pid,
3967 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3968 node->debug_id, node->local_strong_refs,
3969 node->local_weak_refs, node->tmp_refs);
3970 binder_node_inner_unlock(node);
3971 binder_put_node(node);
3972 break;
3974 case BC_ATTEMPT_ACQUIRE:
3975 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3976 return -EINVAL;
3977 case BC_ACQUIRE_RESULT:
3978 pr_err("BC_ACQUIRE_RESULT not supported\n");
3979 return -EINVAL;
3981 case BC_FREE_BUFFER: {
3982 binder_uintptr_t data_ptr;
3983 struct binder_buffer *buffer;
3985 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3986 return -EFAULT;
3987 ptr += sizeof(binder_uintptr_t);
3989 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3990 data_ptr);
3991 if (IS_ERR_OR_NULL(buffer)) {
3992 if (PTR_ERR(buffer) == -EPERM) {
3993 binder_user_error(
3994 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3995 proc->pid, thread->pid,
3996 (u64)data_ptr);
3997 } else {
3998 binder_user_error(
3999 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
4000 proc->pid, thread->pid,
4001 (u64)data_ptr);
4003 break;
4005 binder_debug(BINDER_DEBUG_FREE_BUFFER,
4006 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
4007 proc->pid, thread->pid, (u64)data_ptr,
4008 buffer->debug_id,
4009 buffer->transaction ? "active" : "finished");
4010 binder_free_buf(proc, thread, buffer, false);
4011 break;
4014 case BC_TRANSACTION_SG:
4015 case BC_REPLY_SG: {
4016 struct binder_transaction_data_sg tr;
4018 if (copy_from_user(&tr, ptr, sizeof(tr)))
4019 return -EFAULT;
4020 ptr += sizeof(tr);
4021 binder_transaction(proc, thread, &tr.transaction_data,
4022 cmd == BC_REPLY_SG, tr.buffers_size);
4023 break;
4025 case BC_TRANSACTION:
4026 case BC_REPLY: {
4027 struct binder_transaction_data tr;
4029 if (copy_from_user(&tr, ptr, sizeof(tr)))
4030 return -EFAULT;
4031 ptr += sizeof(tr);
4032 binder_transaction(proc, thread, &tr,
4033 cmd == BC_REPLY, 0);
4034 break;
4037 case BC_REGISTER_LOOPER:
4038 binder_debug(BINDER_DEBUG_THREADS,
4039 "%d:%d BC_REGISTER_LOOPER\n",
4040 proc->pid, thread->pid);
4041 binder_inner_proc_lock(proc);
4042 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
4043 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4044 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
4045 proc->pid, thread->pid);
4046 } else if (proc->requested_threads == 0) {
4047 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4048 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
4049 proc->pid, thread->pid);
4050 } else {
4051 proc->requested_threads--;
4052 proc->requested_threads_started++;
4054 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
4055 binder_inner_proc_unlock(proc);
4056 break;
4057 case BC_ENTER_LOOPER:
4058 binder_debug(BINDER_DEBUG_THREADS,
4059 "%d:%d BC_ENTER_LOOPER\n",
4060 proc->pid, thread->pid);
4061 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
4062 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4063 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
4064 proc->pid, thread->pid);
4066 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
4067 break;
4068 case BC_EXIT_LOOPER:
4069 binder_debug(BINDER_DEBUG_THREADS,
4070 "%d:%d BC_EXIT_LOOPER\n",
4071 proc->pid, thread->pid);
4072 thread->looper |= BINDER_LOOPER_STATE_EXITED;
4073 break;
4075 case BC_REQUEST_DEATH_NOTIFICATION:
4076 case BC_CLEAR_DEATH_NOTIFICATION: {
4077 uint32_t target;
4078 binder_uintptr_t cookie;
4079 struct binder_ref *ref;
4080 struct binder_ref_death *death = NULL;
4082 if (get_user(target, (uint32_t __user *)ptr))
4083 return -EFAULT;
4084 ptr += sizeof(uint32_t);
4085 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4086 return -EFAULT;
4087 ptr += sizeof(binder_uintptr_t);
4088 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4090 * Allocate memory for death notification
4091 * before taking lock
4093 death = kzalloc(sizeof(*death), GFP_KERNEL);
4094 if (death == NULL) {
4095 WARN_ON(thread->return_error.cmd !=
4096 BR_OK);
4097 thread->return_error.cmd = BR_ERROR;
4098 binder_enqueue_thread_work(
4099 thread,
4100 &thread->return_error.work);
4101 binder_debug(
4102 BINDER_DEBUG_FAILED_TRANSACTION,
4103 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
4104 proc->pid, thread->pid);
4105 break;
4108 binder_proc_lock(proc);
4109 ref = binder_get_ref_olocked(proc, target, false);
4110 if (ref == NULL) {
4111 binder_user_error("%d:%d %s invalid ref %d\n",
4112 proc->pid, thread->pid,
4113 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4114 "BC_REQUEST_DEATH_NOTIFICATION" :
4115 "BC_CLEAR_DEATH_NOTIFICATION",
4116 target);
4117 binder_proc_unlock(proc);
4118 kfree(death);
4119 break;
4122 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4123 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
4124 proc->pid, thread->pid,
4125 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4126 "BC_REQUEST_DEATH_NOTIFICATION" :
4127 "BC_CLEAR_DEATH_NOTIFICATION",
4128 (u64)cookie, ref->data.debug_id,
4129 ref->data.desc, ref->data.strong,
4130 ref->data.weak, ref->node->debug_id);
4132 binder_node_lock(ref->node);
4133 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4134 if (ref->death) {
4135 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
4136 proc->pid, thread->pid);
4137 binder_node_unlock(ref->node);
4138 binder_proc_unlock(proc);
4139 kfree(death);
4140 break;
4142 binder_stats_created(BINDER_STAT_DEATH);
4143 INIT_LIST_HEAD(&death->work.entry);
4144 death->cookie = cookie;
4145 ref->death = death;
4146 if (ref->node->proc == NULL) {
4147 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4149 binder_inner_proc_lock(proc);
4150 binder_enqueue_work_ilocked(
4151 &ref->death->work, &proc->todo);
4152 binder_wakeup_proc_ilocked(proc);
4153 binder_inner_proc_unlock(proc);
4155 } else {
4156 if (ref->death == NULL) {
4157 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
4158 proc->pid, thread->pid);
4159 binder_node_unlock(ref->node);
4160 binder_proc_unlock(proc);
4161 break;
4163 death = ref->death;
4164 if (death->cookie != cookie) {
4165 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
4166 proc->pid, thread->pid,
4167 (u64)death->cookie,
4168 (u64)cookie);
4169 binder_node_unlock(ref->node);
4170 binder_proc_unlock(proc);
4171 break;
4173 ref->death = NULL;
4174 binder_inner_proc_lock(proc);
4175 if (list_empty(&death->work.entry)) {
4176 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4177 if (thread->looper &
4178 (BINDER_LOOPER_STATE_REGISTERED |
4179 BINDER_LOOPER_STATE_ENTERED))
4180 binder_enqueue_thread_work_ilocked(
4181 thread,
4182 &death->work);
4183 else {
4184 binder_enqueue_work_ilocked(
4185 &death->work,
4186 &proc->todo);
4187 binder_wakeup_proc_ilocked(
4188 proc);
4190 } else {
4191 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
4192 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
4194 binder_inner_proc_unlock(proc);
4196 binder_node_unlock(ref->node);
4197 binder_proc_unlock(proc);
4198 } break;
4199 case BC_DEAD_BINDER_DONE: {
4200 struct binder_work *w;
4201 binder_uintptr_t cookie;
4202 struct binder_ref_death *death = NULL;
4204 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4205 return -EFAULT;
4207 ptr += sizeof(cookie);
4208 binder_inner_proc_lock(proc);
4209 list_for_each_entry(w, &proc->delivered_death,
4210 entry) {
4211 struct binder_ref_death *tmp_death =
4212 container_of(w,
4213 struct binder_ref_death,
4214 work);
4216 if (tmp_death->cookie == cookie) {
4217 death = tmp_death;
4218 break;
4221 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4222 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4223 proc->pid, thread->pid, (u64)cookie,
4224 death);
4225 if (death == NULL) {
4226 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4227 proc->pid, thread->pid, (u64)cookie);
4228 binder_inner_proc_unlock(proc);
4229 break;
4231 binder_dequeue_work_ilocked(&death->work);
4232 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4233 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4234 if (thread->looper &
4235 (BINDER_LOOPER_STATE_REGISTERED |
4236 BINDER_LOOPER_STATE_ENTERED))
4237 binder_enqueue_thread_work_ilocked(
4238 thread, &death->work);
4239 else {
4240 binder_enqueue_work_ilocked(
4241 &death->work,
4242 &proc->todo);
4243 binder_wakeup_proc_ilocked(proc);
4246 binder_inner_proc_unlock(proc);
4247 } break;
4249 default:
4250 pr_err("%d:%d unknown command %u\n",
4251 proc->pid, thread->pid, cmd);
4252 return -EINVAL;
4254 *consumed = ptr - buffer;
4256 return 0;
4259 static void binder_stat_br(struct binder_proc *proc,
4260 struct binder_thread *thread, uint32_t cmd)
4262 trace_binder_return(cmd);
4263 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4264 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4265 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4266 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4270 static int binder_put_node_cmd(struct binder_proc *proc,
4271 struct binder_thread *thread,
4272 void __user **ptrp,
4273 binder_uintptr_t node_ptr,
4274 binder_uintptr_t node_cookie,
4275 int node_debug_id,
4276 uint32_t cmd, const char *cmd_name)
4278 void __user *ptr = *ptrp;
4280 if (put_user(cmd, (uint32_t __user *)ptr))
4281 return -EFAULT;
4282 ptr += sizeof(uint32_t);
4284 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4285 return -EFAULT;
4286 ptr += sizeof(binder_uintptr_t);
4288 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4289 return -EFAULT;
4290 ptr += sizeof(binder_uintptr_t);
4292 binder_stat_br(proc, thread, cmd);
4293 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4294 proc->pid, thread->pid, cmd_name, node_debug_id,
4295 (u64)node_ptr, (u64)node_cookie);
4297 *ptrp = ptr;
4298 return 0;
4301 static int binder_wait_for_work(struct binder_thread *thread,
4302 bool do_proc_work)
4304 DEFINE_WAIT(wait);
4305 struct binder_proc *proc = thread->proc;
4306 int ret = 0;
4308 binder_inner_proc_lock(proc);
4309 for (;;) {
4310 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE);
4311 if (binder_has_work_ilocked(thread, do_proc_work))
4312 break;
4313 if (do_proc_work)
4314 list_add(&thread->waiting_thread_node,
4315 &proc->waiting_threads);
4316 binder_inner_proc_unlock(proc);
4317 schedule();
4318 binder_inner_proc_lock(proc);
4319 list_del_init(&thread->waiting_thread_node);
4320 if (signal_pending(current)) {
4321 ret = -EINTR;
4322 break;
4325 finish_wait(&thread->wait, &wait);
4326 binder_inner_proc_unlock(proc);
4328 return ret;
4332 * binder_apply_fd_fixups() - finish fd translation
4333 * @proc: binder_proc associated @t->buffer
4334 * @t: binder transaction with list of fd fixups
4336 * Now that we are in the context of the transaction target
4337 * process, we can allocate and install fds. Process the
4338 * list of fds to translate and fixup the buffer with the
4339 * new fds first and only then install the files.
4341 * If we fail to allocate an fd, skip the install and release
4342 * any fds that have already been allocated.
4344 static int binder_apply_fd_fixups(struct binder_proc *proc,
4345 struct binder_transaction *t)
4347 struct binder_txn_fd_fixup *fixup, *tmp;
4348 int ret = 0;
4350 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4351 int fd = get_unused_fd_flags(O_CLOEXEC);
4353 if (fd < 0) {
4354 binder_debug(BINDER_DEBUG_TRANSACTION,
4355 "failed fd fixup txn %d fd %d\n",
4356 t->debug_id, fd);
4357 ret = -ENOMEM;
4358 goto err;
4360 binder_debug(BINDER_DEBUG_TRANSACTION,
4361 "fd fixup txn %d fd %d\n",
4362 t->debug_id, fd);
4363 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4364 fixup->target_fd = fd;
4365 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4366 fixup->offset, &fd,
4367 sizeof(u32))) {
4368 ret = -EINVAL;
4369 goto err;
4372 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4373 fd_install(fixup->target_fd, fixup->file);
4374 list_del(&fixup->fixup_entry);
4375 kfree(fixup);
4378 return ret;
4380 err:
4381 binder_free_txn_fixups(t);
4382 return ret;
4385 static int binder_thread_read(struct binder_proc *proc,
4386 struct binder_thread *thread,
4387 binder_uintptr_t binder_buffer, size_t size,
4388 binder_size_t *consumed, int non_block)
4390 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4391 void __user *ptr = buffer + *consumed;
4392 void __user *end = buffer + size;
4394 int ret = 0;
4395 int wait_for_proc_work;
4397 if (*consumed == 0) {
4398 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4399 return -EFAULT;
4400 ptr += sizeof(uint32_t);
4403 retry:
4404 binder_inner_proc_lock(proc);
4405 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4406 binder_inner_proc_unlock(proc);
4408 thread->looper |= BINDER_LOOPER_STATE_WAITING;
4410 trace_binder_wait_for_work(wait_for_proc_work,
4411 !!thread->transaction_stack,
4412 !binder_worklist_empty(proc, &thread->todo));
4413 if (wait_for_proc_work) {
4414 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4415 BINDER_LOOPER_STATE_ENTERED))) {
4416 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4417 proc->pid, thread->pid, thread->looper);
4418 wait_event_interruptible(binder_user_error_wait,
4419 binder_stop_on_user_error < 2);
4421 binder_set_nice(proc->default_priority);
4424 if (non_block) {
4425 if (!binder_has_work(thread, wait_for_proc_work))
4426 ret = -EAGAIN;
4427 } else {
4428 ret = binder_wait_for_work(thread, wait_for_proc_work);
4431 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4433 if (ret)
4434 return ret;
4436 while (1) {
4437 uint32_t cmd;
4438 struct binder_transaction_data_secctx tr;
4439 struct binder_transaction_data *trd = &tr.transaction_data;
4440 struct binder_work *w = NULL;
4441 struct list_head *list = NULL;
4442 struct binder_transaction *t = NULL;
4443 struct binder_thread *t_from;
4444 size_t trsize = sizeof(*trd);
4446 binder_inner_proc_lock(proc);
4447 if (!binder_worklist_empty_ilocked(&thread->todo))
4448 list = &thread->todo;
4449 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4450 wait_for_proc_work)
4451 list = &proc->todo;
4452 else {
4453 binder_inner_proc_unlock(proc);
4455 /* no data added */
4456 if (ptr - buffer == 4 && !thread->looper_need_return)
4457 goto retry;
4458 break;
4461 if (end - ptr < sizeof(tr) + 4) {
4462 binder_inner_proc_unlock(proc);
4463 break;
4465 w = binder_dequeue_work_head_ilocked(list);
4466 if (binder_worklist_empty_ilocked(&thread->todo))
4467 thread->process_todo = false;
4469 switch (w->type) {
4470 case BINDER_WORK_TRANSACTION: {
4471 binder_inner_proc_unlock(proc);
4472 t = container_of(w, struct binder_transaction, work);
4473 } break;
4474 case BINDER_WORK_RETURN_ERROR: {
4475 struct binder_error *e = container_of(
4476 w, struct binder_error, work);
4478 WARN_ON(e->cmd == BR_OK);
4479 binder_inner_proc_unlock(proc);
4480 if (put_user(e->cmd, (uint32_t __user *)ptr))
4481 return -EFAULT;
4482 cmd = e->cmd;
4483 e->cmd = BR_OK;
4484 ptr += sizeof(uint32_t);
4486 binder_stat_br(proc, thread, cmd);
4487 } break;
4488 case BINDER_WORK_TRANSACTION_COMPLETE:
4489 case BINDER_WORK_TRANSACTION_PENDING:
4490 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
4491 if (proc->oneway_spam_detection_enabled &&
4492 w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
4493 cmd = BR_ONEWAY_SPAM_SUSPECT;
4494 else if (w->type == BINDER_WORK_TRANSACTION_PENDING)
4495 cmd = BR_TRANSACTION_PENDING_FROZEN;
4496 else
4497 cmd = BR_TRANSACTION_COMPLETE;
4498 binder_inner_proc_unlock(proc);
4499 kfree(w);
4500 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4501 if (put_user(cmd, (uint32_t __user *)ptr))
4502 return -EFAULT;
4503 ptr += sizeof(uint32_t);
4505 binder_stat_br(proc, thread, cmd);
4506 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4507 "%d:%d BR_TRANSACTION_COMPLETE\n",
4508 proc->pid, thread->pid);
4509 } break;
4510 case BINDER_WORK_NODE: {
4511 struct binder_node *node = container_of(w, struct binder_node, work);
4512 int strong, weak;
4513 binder_uintptr_t node_ptr = node->ptr;
4514 binder_uintptr_t node_cookie = node->cookie;
4515 int node_debug_id = node->debug_id;
4516 int has_weak_ref;
4517 int has_strong_ref;
4518 void __user *orig_ptr = ptr;
4520 BUG_ON(proc != node->proc);
4521 strong = node->internal_strong_refs ||
4522 node->local_strong_refs;
4523 weak = !hlist_empty(&node->refs) ||
4524 node->local_weak_refs ||
4525 node->tmp_refs || strong;
4526 has_strong_ref = node->has_strong_ref;
4527 has_weak_ref = node->has_weak_ref;
4529 if (weak && !has_weak_ref) {
4530 node->has_weak_ref = 1;
4531 node->pending_weak_ref = 1;
4532 node->local_weak_refs++;
4534 if (strong && !has_strong_ref) {
4535 node->has_strong_ref = 1;
4536 node->pending_strong_ref = 1;
4537 node->local_strong_refs++;
4539 if (!strong && has_strong_ref)
4540 node->has_strong_ref = 0;
4541 if (!weak && has_weak_ref)
4542 node->has_weak_ref = 0;
4543 if (!weak && !strong) {
4544 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4545 "%d:%d node %d u%016llx c%016llx deleted\n",
4546 proc->pid, thread->pid,
4547 node_debug_id,
4548 (u64)node_ptr,
4549 (u64)node_cookie);
4550 rb_erase(&node->rb_node, &proc->nodes);
4551 binder_inner_proc_unlock(proc);
4552 binder_node_lock(node);
4554 * Acquire the node lock before freeing the
4555 * node to serialize with other threads that
4556 * may have been holding the node lock while
4557 * decrementing this node (avoids race where
4558 * this thread frees while the other thread
4559 * is unlocking the node after the final
4560 * decrement)
4562 binder_node_unlock(node);
4563 binder_free_node(node);
4564 } else
4565 binder_inner_proc_unlock(proc);
4567 if (weak && !has_weak_ref)
4568 ret = binder_put_node_cmd(
4569 proc, thread, &ptr, node_ptr,
4570 node_cookie, node_debug_id,
4571 BR_INCREFS, "BR_INCREFS");
4572 if (!ret && strong && !has_strong_ref)
4573 ret = binder_put_node_cmd(
4574 proc, thread, &ptr, node_ptr,
4575 node_cookie, node_debug_id,
4576 BR_ACQUIRE, "BR_ACQUIRE");
4577 if (!ret && !strong && has_strong_ref)
4578 ret = binder_put_node_cmd(
4579 proc, thread, &ptr, node_ptr,
4580 node_cookie, node_debug_id,
4581 BR_RELEASE, "BR_RELEASE");
4582 if (!ret && !weak && has_weak_ref)
4583 ret = binder_put_node_cmd(
4584 proc, thread, &ptr, node_ptr,
4585 node_cookie, node_debug_id,
4586 BR_DECREFS, "BR_DECREFS");
4587 if (orig_ptr == ptr)
4588 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4589 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4590 proc->pid, thread->pid,
4591 node_debug_id,
4592 (u64)node_ptr,
4593 (u64)node_cookie);
4594 if (ret)
4595 return ret;
4596 } break;
4597 case BINDER_WORK_DEAD_BINDER:
4598 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4599 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4600 struct binder_ref_death *death;
4601 uint32_t cmd;
4602 binder_uintptr_t cookie;
4604 death = container_of(w, struct binder_ref_death, work);
4605 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4606 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4607 else
4608 cmd = BR_DEAD_BINDER;
4609 cookie = death->cookie;
4611 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4612 "%d:%d %s %016llx\n",
4613 proc->pid, thread->pid,
4614 cmd == BR_DEAD_BINDER ?
4615 "BR_DEAD_BINDER" :
4616 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4617 (u64)cookie);
4618 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4619 binder_inner_proc_unlock(proc);
4620 kfree(death);
4621 binder_stats_deleted(BINDER_STAT_DEATH);
4622 } else {
4623 binder_enqueue_work_ilocked(
4624 w, &proc->delivered_death);
4625 binder_inner_proc_unlock(proc);
4627 if (put_user(cmd, (uint32_t __user *)ptr))
4628 return -EFAULT;
4629 ptr += sizeof(uint32_t);
4630 if (put_user(cookie,
4631 (binder_uintptr_t __user *)ptr))
4632 return -EFAULT;
4633 ptr += sizeof(binder_uintptr_t);
4634 binder_stat_br(proc, thread, cmd);
4635 if (cmd == BR_DEAD_BINDER)
4636 goto done; /* DEAD_BINDER notifications can cause transactions */
4637 } break;
4638 default:
4639 binder_inner_proc_unlock(proc);
4640 pr_err("%d:%d: bad work type %d\n",
4641 proc->pid, thread->pid, w->type);
4642 break;
4645 if (!t)
4646 continue;
4648 BUG_ON(t->buffer == NULL);
4649 if (t->buffer->target_node) {
4650 struct binder_node *target_node = t->buffer->target_node;
4652 trd->target.ptr = target_node->ptr;
4653 trd->cookie = target_node->cookie;
4654 t->saved_priority = task_nice(current);
4655 if (t->priority < target_node->min_priority &&
4656 !(t->flags & TF_ONE_WAY))
4657 binder_set_nice(t->priority);
4658 else if (!(t->flags & TF_ONE_WAY) ||
4659 t->saved_priority > target_node->min_priority)
4660 binder_set_nice(target_node->min_priority);
4661 cmd = BR_TRANSACTION;
4662 } else {
4663 trd->target.ptr = 0;
4664 trd->cookie = 0;
4665 cmd = BR_REPLY;
4667 trd->code = t->code;
4668 trd->flags = t->flags;
4669 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4671 t_from = binder_get_txn_from(t);
4672 if (t_from) {
4673 struct task_struct *sender = t_from->proc->tsk;
4675 trd->sender_pid =
4676 task_tgid_nr_ns(sender,
4677 task_active_pid_ns(current));
4678 } else {
4679 trd->sender_pid = 0;
4682 ret = binder_apply_fd_fixups(proc, t);
4683 if (ret) {
4684 struct binder_buffer *buffer = t->buffer;
4685 bool oneway = !!(t->flags & TF_ONE_WAY);
4686 int tid = t->debug_id;
4688 if (t_from)
4689 binder_thread_dec_tmpref(t_from);
4690 buffer->transaction = NULL;
4691 binder_cleanup_transaction(t, "fd fixups failed",
4692 BR_FAILED_REPLY);
4693 binder_free_buf(proc, thread, buffer, true);
4694 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4695 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4696 proc->pid, thread->pid,
4697 oneway ? "async " :
4698 (cmd == BR_REPLY ? "reply " : ""),
4699 tid, BR_FAILED_REPLY, ret, __LINE__);
4700 if (cmd == BR_REPLY) {
4701 cmd = BR_FAILED_REPLY;
4702 if (put_user(cmd, (uint32_t __user *)ptr))
4703 return -EFAULT;
4704 ptr += sizeof(uint32_t);
4705 binder_stat_br(proc, thread, cmd);
4706 break;
4708 continue;
4710 trd->data_size = t->buffer->data_size;
4711 trd->offsets_size = t->buffer->offsets_size;
4712 trd->data.ptr.buffer = t->buffer->user_data;
4713 trd->data.ptr.offsets = trd->data.ptr.buffer +
4714 ALIGN(t->buffer->data_size,
4715 sizeof(void *));
4717 tr.secctx = t->security_ctx;
4718 if (t->security_ctx) {
4719 cmd = BR_TRANSACTION_SEC_CTX;
4720 trsize = sizeof(tr);
4722 if (put_user(cmd, (uint32_t __user *)ptr)) {
4723 if (t_from)
4724 binder_thread_dec_tmpref(t_from);
4726 binder_cleanup_transaction(t, "put_user failed",
4727 BR_FAILED_REPLY);
4729 return -EFAULT;
4731 ptr += sizeof(uint32_t);
4732 if (copy_to_user(ptr, &tr, trsize)) {
4733 if (t_from)
4734 binder_thread_dec_tmpref(t_from);
4736 binder_cleanup_transaction(t, "copy_to_user failed",
4737 BR_FAILED_REPLY);
4739 return -EFAULT;
4741 ptr += trsize;
4743 trace_binder_transaction_received(t);
4744 binder_stat_br(proc, thread, cmd);
4745 binder_debug(BINDER_DEBUG_TRANSACTION,
4746 "%d:%d %s %d %d:%d, cmd %u size %zd-%zd ptr %016llx-%016llx\n",
4747 proc->pid, thread->pid,
4748 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4749 (cmd == BR_TRANSACTION_SEC_CTX) ?
4750 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4751 t->debug_id, t_from ? t_from->proc->pid : 0,
4752 t_from ? t_from->pid : 0, cmd,
4753 t->buffer->data_size, t->buffer->offsets_size,
4754 (u64)trd->data.ptr.buffer,
4755 (u64)trd->data.ptr.offsets);
4757 if (t_from)
4758 binder_thread_dec_tmpref(t_from);
4759 t->buffer->allow_user_free = 1;
4760 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4761 binder_inner_proc_lock(thread->proc);
4762 t->to_parent = thread->transaction_stack;
4763 t->to_thread = thread;
4764 thread->transaction_stack = t;
4765 binder_inner_proc_unlock(thread->proc);
4766 } else {
4767 binder_free_transaction(t);
4769 break;
4772 done:
4774 *consumed = ptr - buffer;
4775 binder_inner_proc_lock(proc);
4776 if (proc->requested_threads == 0 &&
4777 list_empty(&thread->proc->waiting_threads) &&
4778 proc->requested_threads_started < proc->max_threads &&
4779 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4780 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4781 /*spawn a new thread if we leave this out */) {
4782 proc->requested_threads++;
4783 binder_inner_proc_unlock(proc);
4784 binder_debug(BINDER_DEBUG_THREADS,
4785 "%d:%d BR_SPAWN_LOOPER\n",
4786 proc->pid, thread->pid);
4787 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4788 return -EFAULT;
4789 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4790 } else
4791 binder_inner_proc_unlock(proc);
4792 return 0;
4795 static void binder_release_work(struct binder_proc *proc,
4796 struct list_head *list)
4798 struct binder_work *w;
4799 enum binder_work_type wtype;
4801 while (1) {
4802 binder_inner_proc_lock(proc);
4803 w = binder_dequeue_work_head_ilocked(list);
4804 wtype = w ? w->type : 0;
4805 binder_inner_proc_unlock(proc);
4806 if (!w)
4807 return;
4809 switch (wtype) {
4810 case BINDER_WORK_TRANSACTION: {
4811 struct binder_transaction *t;
4813 t = container_of(w, struct binder_transaction, work);
4815 binder_cleanup_transaction(t, "process died.",
4816 BR_DEAD_REPLY);
4817 } break;
4818 case BINDER_WORK_RETURN_ERROR: {
4819 struct binder_error *e = container_of(
4820 w, struct binder_error, work);
4822 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4823 "undelivered TRANSACTION_ERROR: %u\n",
4824 e->cmd);
4825 } break;
4826 case BINDER_WORK_TRANSACTION_PENDING:
4827 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT:
4828 case BINDER_WORK_TRANSACTION_COMPLETE: {
4829 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4830 "undelivered TRANSACTION_COMPLETE\n");
4831 kfree(w);
4832 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4833 } break;
4834 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4835 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4836 struct binder_ref_death *death;
4838 death = container_of(w, struct binder_ref_death, work);
4839 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4840 "undelivered death notification, %016llx\n",
4841 (u64)death->cookie);
4842 kfree(death);
4843 binder_stats_deleted(BINDER_STAT_DEATH);
4844 } break;
4845 case BINDER_WORK_NODE:
4846 break;
4847 default:
4848 pr_err("unexpected work type, %d, not freed\n",
4849 wtype);
4850 break;
4856 static struct binder_thread *binder_get_thread_ilocked(
4857 struct binder_proc *proc, struct binder_thread *new_thread)
4859 struct binder_thread *thread = NULL;
4860 struct rb_node *parent = NULL;
4861 struct rb_node **p = &proc->threads.rb_node;
4863 while (*p) {
4864 parent = *p;
4865 thread = rb_entry(parent, struct binder_thread, rb_node);
4867 if (current->pid < thread->pid)
4868 p = &(*p)->rb_left;
4869 else if (current->pid > thread->pid)
4870 p = &(*p)->rb_right;
4871 else
4872 return thread;
4874 if (!new_thread)
4875 return NULL;
4876 thread = new_thread;
4877 binder_stats_created(BINDER_STAT_THREAD);
4878 thread->proc = proc;
4879 thread->pid = current->pid;
4880 atomic_set(&thread->tmp_ref, 0);
4881 init_waitqueue_head(&thread->wait);
4882 INIT_LIST_HEAD(&thread->todo);
4883 rb_link_node(&thread->rb_node, parent, p);
4884 rb_insert_color(&thread->rb_node, &proc->threads);
4885 thread->looper_need_return = true;
4886 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4887 thread->return_error.cmd = BR_OK;
4888 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4889 thread->reply_error.cmd = BR_OK;
4890 thread->ee.command = BR_OK;
4891 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4892 return thread;
4895 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4897 struct binder_thread *thread;
4898 struct binder_thread *new_thread;
4900 binder_inner_proc_lock(proc);
4901 thread = binder_get_thread_ilocked(proc, NULL);
4902 binder_inner_proc_unlock(proc);
4903 if (!thread) {
4904 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4905 if (new_thread == NULL)
4906 return NULL;
4907 binder_inner_proc_lock(proc);
4908 thread = binder_get_thread_ilocked(proc, new_thread);
4909 binder_inner_proc_unlock(proc);
4910 if (thread != new_thread)
4911 kfree(new_thread);
4913 return thread;
4916 static void binder_free_proc(struct binder_proc *proc)
4918 struct binder_device *device;
4920 BUG_ON(!list_empty(&proc->todo));
4921 BUG_ON(!list_empty(&proc->delivered_death));
4922 if (proc->outstanding_txns)
4923 pr_warn("%s: Unexpected outstanding_txns %d\n",
4924 __func__, proc->outstanding_txns);
4925 device = container_of(proc->context, struct binder_device, context);
4926 if (refcount_dec_and_test(&device->ref)) {
4927 kfree(proc->context->name);
4928 kfree(device);
4930 binder_alloc_deferred_release(&proc->alloc);
4931 put_task_struct(proc->tsk);
4932 put_cred(proc->cred);
4933 binder_stats_deleted(BINDER_STAT_PROC);
4934 kfree(proc);
4937 static void binder_free_thread(struct binder_thread *thread)
4939 BUG_ON(!list_empty(&thread->todo));
4940 binder_stats_deleted(BINDER_STAT_THREAD);
4941 binder_proc_dec_tmpref(thread->proc);
4942 kfree(thread);
4945 static int binder_thread_release(struct binder_proc *proc,
4946 struct binder_thread *thread)
4948 struct binder_transaction *t;
4949 struct binder_transaction *send_reply = NULL;
4950 int active_transactions = 0;
4951 struct binder_transaction *last_t = NULL;
4953 binder_inner_proc_lock(thread->proc);
4955 * take a ref on the proc so it survives
4956 * after we remove this thread from proc->threads.
4957 * The corresponding dec is when we actually
4958 * free the thread in binder_free_thread()
4960 proc->tmp_ref++;
4962 * take a ref on this thread to ensure it
4963 * survives while we are releasing it
4965 atomic_inc(&thread->tmp_ref);
4966 rb_erase(&thread->rb_node, &proc->threads);
4967 t = thread->transaction_stack;
4968 if (t) {
4969 spin_lock(&t->lock);
4970 if (t->to_thread == thread)
4971 send_reply = t;
4972 } else {
4973 __acquire(&t->lock);
4975 thread->is_dead = true;
4977 while (t) {
4978 last_t = t;
4979 active_transactions++;
4980 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4981 "release %d:%d transaction %d %s, still active\n",
4982 proc->pid, thread->pid,
4983 t->debug_id,
4984 (t->to_thread == thread) ? "in" : "out");
4986 if (t->to_thread == thread) {
4987 thread->proc->outstanding_txns--;
4988 t->to_proc = NULL;
4989 t->to_thread = NULL;
4990 if (t->buffer) {
4991 t->buffer->transaction = NULL;
4992 t->buffer = NULL;
4994 t = t->to_parent;
4995 } else if (t->from == thread) {
4996 t->from = NULL;
4997 t = t->from_parent;
4998 } else
4999 BUG();
5000 spin_unlock(&last_t->lock);
5001 if (t)
5002 spin_lock(&t->lock);
5003 else
5004 __acquire(&t->lock);
5006 /* annotation for sparse, lock not acquired in last iteration above */
5007 __release(&t->lock);
5010 * If this thread used poll, make sure we remove the waitqueue from any
5011 * poll data structures holding it.
5013 if (thread->looper & BINDER_LOOPER_STATE_POLL)
5014 wake_up_pollfree(&thread->wait);
5016 binder_inner_proc_unlock(thread->proc);
5019 * This is needed to avoid races between wake_up_pollfree() above and
5020 * someone else removing the last entry from the queue for other reasons
5021 * (e.g. ep_remove_wait_queue() being called due to an epoll file
5022 * descriptor being closed). Such other users hold an RCU read lock, so
5023 * we can be sure they're done after we call synchronize_rcu().
5025 if (thread->looper & BINDER_LOOPER_STATE_POLL)
5026 synchronize_rcu();
5028 if (send_reply)
5029 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
5030 binder_release_work(proc, &thread->todo);
5031 binder_thread_dec_tmpref(thread);
5032 return active_transactions;
5035 static __poll_t binder_poll(struct file *filp,
5036 struct poll_table_struct *wait)
5038 struct binder_proc *proc = filp->private_data;
5039 struct binder_thread *thread = NULL;
5040 bool wait_for_proc_work;
5042 thread = binder_get_thread(proc);
5043 if (!thread)
5044 return EPOLLERR;
5046 binder_inner_proc_lock(thread->proc);
5047 thread->looper |= BINDER_LOOPER_STATE_POLL;
5048 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
5050 binder_inner_proc_unlock(thread->proc);
5052 poll_wait(filp, &thread->wait, wait);
5054 if (binder_has_work(thread, wait_for_proc_work))
5055 return EPOLLIN;
5057 return 0;
5060 static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
5061 struct binder_thread *thread)
5063 int ret = 0;
5064 struct binder_proc *proc = filp->private_data;
5065 void __user *ubuf = (void __user *)arg;
5066 struct binder_write_read bwr;
5068 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
5069 ret = -EFAULT;
5070 goto out;
5072 binder_debug(BINDER_DEBUG_READ_WRITE,
5073 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
5074 proc->pid, thread->pid,
5075 (u64)bwr.write_size, (u64)bwr.write_buffer,
5076 (u64)bwr.read_size, (u64)bwr.read_buffer);
5078 if (bwr.write_size > 0) {
5079 ret = binder_thread_write(proc, thread,
5080 bwr.write_buffer,
5081 bwr.write_size,
5082 &bwr.write_consumed);
5083 trace_binder_write_done(ret);
5084 if (ret < 0) {
5085 bwr.read_consumed = 0;
5086 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5087 ret = -EFAULT;
5088 goto out;
5091 if (bwr.read_size > 0) {
5092 ret = binder_thread_read(proc, thread, bwr.read_buffer,
5093 bwr.read_size,
5094 &bwr.read_consumed,
5095 filp->f_flags & O_NONBLOCK);
5096 trace_binder_read_done(ret);
5097 binder_inner_proc_lock(proc);
5098 if (!binder_worklist_empty_ilocked(&proc->todo))
5099 binder_wakeup_proc_ilocked(proc);
5100 binder_inner_proc_unlock(proc);
5101 if (ret < 0) {
5102 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5103 ret = -EFAULT;
5104 goto out;
5107 binder_debug(BINDER_DEBUG_READ_WRITE,
5108 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
5109 proc->pid, thread->pid,
5110 (u64)bwr.write_consumed, (u64)bwr.write_size,
5111 (u64)bwr.read_consumed, (u64)bwr.read_size);
5112 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
5113 ret = -EFAULT;
5114 goto out;
5116 out:
5117 return ret;
5120 static int binder_ioctl_set_ctx_mgr(struct file *filp,
5121 struct flat_binder_object *fbo)
5123 int ret = 0;
5124 struct binder_proc *proc = filp->private_data;
5125 struct binder_context *context = proc->context;
5126 struct binder_node *new_node;
5127 kuid_t curr_euid = current_euid();
5129 mutex_lock(&context->context_mgr_node_lock);
5130 if (context->binder_context_mgr_node) {
5131 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
5132 ret = -EBUSY;
5133 goto out;
5135 ret = security_binder_set_context_mgr(proc->cred);
5136 if (ret < 0)
5137 goto out;
5138 if (uid_valid(context->binder_context_mgr_uid)) {
5139 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
5140 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
5141 from_kuid(&init_user_ns, curr_euid),
5142 from_kuid(&init_user_ns,
5143 context->binder_context_mgr_uid));
5144 ret = -EPERM;
5145 goto out;
5147 } else {
5148 context->binder_context_mgr_uid = curr_euid;
5150 new_node = binder_new_node(proc, fbo);
5151 if (!new_node) {
5152 ret = -ENOMEM;
5153 goto out;
5155 binder_node_lock(new_node);
5156 new_node->local_weak_refs++;
5157 new_node->local_strong_refs++;
5158 new_node->has_strong_ref = 1;
5159 new_node->has_weak_ref = 1;
5160 context->binder_context_mgr_node = new_node;
5161 binder_node_unlock(new_node);
5162 binder_put_node(new_node);
5163 out:
5164 mutex_unlock(&context->context_mgr_node_lock);
5165 return ret;
5168 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
5169 struct binder_node_info_for_ref *info)
5171 struct binder_node *node;
5172 struct binder_context *context = proc->context;
5173 __u32 handle = info->handle;
5175 if (info->strong_count || info->weak_count || info->reserved1 ||
5176 info->reserved2 || info->reserved3) {
5177 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
5178 proc->pid);
5179 return -EINVAL;
5182 /* This ioctl may only be used by the context manager */
5183 mutex_lock(&context->context_mgr_node_lock);
5184 if (!context->binder_context_mgr_node ||
5185 context->binder_context_mgr_node->proc != proc) {
5186 mutex_unlock(&context->context_mgr_node_lock);
5187 return -EPERM;
5189 mutex_unlock(&context->context_mgr_node_lock);
5191 node = binder_get_node_from_ref(proc, handle, true, NULL);
5192 if (!node)
5193 return -EINVAL;
5195 info->strong_count = node->local_strong_refs +
5196 node->internal_strong_refs;
5197 info->weak_count = node->local_weak_refs;
5199 binder_put_node(node);
5201 return 0;
5204 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
5205 struct binder_node_debug_info *info)
5207 struct rb_node *n;
5208 binder_uintptr_t ptr = info->ptr;
5210 memset(info, 0, sizeof(*info));
5212 binder_inner_proc_lock(proc);
5213 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5214 struct binder_node *node = rb_entry(n, struct binder_node,
5215 rb_node);
5216 if (node->ptr > ptr) {
5217 info->ptr = node->ptr;
5218 info->cookie = node->cookie;
5219 info->has_strong_ref = node->has_strong_ref;
5220 info->has_weak_ref = node->has_weak_ref;
5221 break;
5224 binder_inner_proc_unlock(proc);
5226 return 0;
5229 static bool binder_txns_pending_ilocked(struct binder_proc *proc)
5231 struct rb_node *n;
5232 struct binder_thread *thread;
5234 if (proc->outstanding_txns > 0)
5235 return true;
5237 for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
5238 thread = rb_entry(n, struct binder_thread, rb_node);
5239 if (thread->transaction_stack)
5240 return true;
5242 return false;
5245 static int binder_ioctl_freeze(struct binder_freeze_info *info,
5246 struct binder_proc *target_proc)
5248 int ret = 0;
5250 if (!info->enable) {
5251 binder_inner_proc_lock(target_proc);
5252 target_proc->sync_recv = false;
5253 target_proc->async_recv = false;
5254 target_proc->is_frozen = false;
5255 binder_inner_proc_unlock(target_proc);
5256 return 0;
5260 * Freezing the target. Prevent new transactions by
5261 * setting frozen state. If timeout specified, wait
5262 * for transactions to drain.
5264 binder_inner_proc_lock(target_proc);
5265 target_proc->sync_recv = false;
5266 target_proc->async_recv = false;
5267 target_proc->is_frozen = true;
5268 binder_inner_proc_unlock(target_proc);
5270 if (info->timeout_ms > 0)
5271 ret = wait_event_interruptible_timeout(
5272 target_proc->freeze_wait,
5273 (!target_proc->outstanding_txns),
5274 msecs_to_jiffies(info->timeout_ms));
5276 /* Check pending transactions that wait for reply */
5277 if (ret >= 0) {
5278 binder_inner_proc_lock(target_proc);
5279 if (binder_txns_pending_ilocked(target_proc))
5280 ret = -EAGAIN;
5281 binder_inner_proc_unlock(target_proc);
5284 if (ret < 0) {
5285 binder_inner_proc_lock(target_proc);
5286 target_proc->is_frozen = false;
5287 binder_inner_proc_unlock(target_proc);
5290 return ret;
5293 static int binder_ioctl_get_freezer_info(
5294 struct binder_frozen_status_info *info)
5296 struct binder_proc *target_proc;
5297 bool found = false;
5298 __u32 txns_pending;
5300 info->sync_recv = 0;
5301 info->async_recv = 0;
5303 mutex_lock(&binder_procs_lock);
5304 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5305 if (target_proc->pid == info->pid) {
5306 found = true;
5307 binder_inner_proc_lock(target_proc);
5308 txns_pending = binder_txns_pending_ilocked(target_proc);
5309 info->sync_recv |= target_proc->sync_recv |
5310 (txns_pending << 1);
5311 info->async_recv |= target_proc->async_recv;
5312 binder_inner_proc_unlock(target_proc);
5315 mutex_unlock(&binder_procs_lock);
5317 if (!found)
5318 return -EINVAL;
5320 return 0;
5323 static int binder_ioctl_get_extended_error(struct binder_thread *thread,
5324 void __user *ubuf)
5326 struct binder_extended_error ee;
5328 binder_inner_proc_lock(thread->proc);
5329 ee = thread->ee;
5330 binder_set_extended_error(&thread->ee, 0, BR_OK, 0);
5331 binder_inner_proc_unlock(thread->proc);
5333 if (copy_to_user(ubuf, &ee, sizeof(ee)))
5334 return -EFAULT;
5336 return 0;
5339 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5341 int ret;
5342 struct binder_proc *proc = filp->private_data;
5343 struct binder_thread *thread;
5344 void __user *ubuf = (void __user *)arg;
5346 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
5347 proc->pid, current->pid, cmd, arg);*/
5349 binder_selftest_alloc(&proc->alloc);
5351 trace_binder_ioctl(cmd, arg);
5353 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5354 if (ret)
5355 goto err_unlocked;
5357 thread = binder_get_thread(proc);
5358 if (thread == NULL) {
5359 ret = -ENOMEM;
5360 goto err;
5363 switch (cmd) {
5364 case BINDER_WRITE_READ:
5365 ret = binder_ioctl_write_read(filp, arg, thread);
5366 if (ret)
5367 goto err;
5368 break;
5369 case BINDER_SET_MAX_THREADS: {
5370 u32 max_threads;
5372 if (copy_from_user(&max_threads, ubuf,
5373 sizeof(max_threads))) {
5374 ret = -EINVAL;
5375 goto err;
5377 binder_inner_proc_lock(proc);
5378 proc->max_threads = max_threads;
5379 binder_inner_proc_unlock(proc);
5380 break;
5382 case BINDER_SET_CONTEXT_MGR_EXT: {
5383 struct flat_binder_object fbo;
5385 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5386 ret = -EINVAL;
5387 goto err;
5389 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5390 if (ret)
5391 goto err;
5392 break;
5394 case BINDER_SET_CONTEXT_MGR:
5395 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5396 if (ret)
5397 goto err;
5398 break;
5399 case BINDER_THREAD_EXIT:
5400 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5401 proc->pid, thread->pid);
5402 binder_thread_release(proc, thread);
5403 thread = NULL;
5404 break;
5405 case BINDER_VERSION: {
5406 struct binder_version __user *ver = ubuf;
5408 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5409 &ver->protocol_version)) {
5410 ret = -EINVAL;
5411 goto err;
5413 break;
5415 case BINDER_GET_NODE_INFO_FOR_REF: {
5416 struct binder_node_info_for_ref info;
5418 if (copy_from_user(&info, ubuf, sizeof(info))) {
5419 ret = -EFAULT;
5420 goto err;
5423 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5424 if (ret < 0)
5425 goto err;
5427 if (copy_to_user(ubuf, &info, sizeof(info))) {
5428 ret = -EFAULT;
5429 goto err;
5432 break;
5434 case BINDER_GET_NODE_DEBUG_INFO: {
5435 struct binder_node_debug_info info;
5437 if (copy_from_user(&info, ubuf, sizeof(info))) {
5438 ret = -EFAULT;
5439 goto err;
5442 ret = binder_ioctl_get_node_debug_info(proc, &info);
5443 if (ret < 0)
5444 goto err;
5446 if (copy_to_user(ubuf, &info, sizeof(info))) {
5447 ret = -EFAULT;
5448 goto err;
5450 break;
5452 case BINDER_FREEZE: {
5453 struct binder_freeze_info info;
5454 struct binder_proc **target_procs = NULL, *target_proc;
5455 int target_procs_count = 0, i = 0;
5457 ret = 0;
5459 if (copy_from_user(&info, ubuf, sizeof(info))) {
5460 ret = -EFAULT;
5461 goto err;
5464 mutex_lock(&binder_procs_lock);
5465 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5466 if (target_proc->pid == info.pid)
5467 target_procs_count++;
5470 if (target_procs_count == 0) {
5471 mutex_unlock(&binder_procs_lock);
5472 ret = -EINVAL;
5473 goto err;
5476 target_procs = kcalloc(target_procs_count,
5477 sizeof(struct binder_proc *),
5478 GFP_KERNEL);
5480 if (!target_procs) {
5481 mutex_unlock(&binder_procs_lock);
5482 ret = -ENOMEM;
5483 goto err;
5486 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5487 if (target_proc->pid != info.pid)
5488 continue;
5490 binder_inner_proc_lock(target_proc);
5491 target_proc->tmp_ref++;
5492 binder_inner_proc_unlock(target_proc);
5494 target_procs[i++] = target_proc;
5496 mutex_unlock(&binder_procs_lock);
5498 for (i = 0; i < target_procs_count; i++) {
5499 if (ret >= 0)
5500 ret = binder_ioctl_freeze(&info,
5501 target_procs[i]);
5503 binder_proc_dec_tmpref(target_procs[i]);
5506 kfree(target_procs);
5508 if (ret < 0)
5509 goto err;
5510 break;
5512 case BINDER_GET_FROZEN_INFO: {
5513 struct binder_frozen_status_info info;
5515 if (copy_from_user(&info, ubuf, sizeof(info))) {
5516 ret = -EFAULT;
5517 goto err;
5520 ret = binder_ioctl_get_freezer_info(&info);
5521 if (ret < 0)
5522 goto err;
5524 if (copy_to_user(ubuf, &info, sizeof(info))) {
5525 ret = -EFAULT;
5526 goto err;
5528 break;
5530 case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
5531 uint32_t enable;
5533 if (copy_from_user(&enable, ubuf, sizeof(enable))) {
5534 ret = -EFAULT;
5535 goto err;
5537 binder_inner_proc_lock(proc);
5538 proc->oneway_spam_detection_enabled = (bool)enable;
5539 binder_inner_proc_unlock(proc);
5540 break;
5542 case BINDER_GET_EXTENDED_ERROR:
5543 ret = binder_ioctl_get_extended_error(thread, ubuf);
5544 if (ret < 0)
5545 goto err;
5546 break;
5547 default:
5548 ret = -EINVAL;
5549 goto err;
5551 ret = 0;
5552 err:
5553 if (thread)
5554 thread->looper_need_return = false;
5555 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5556 if (ret && ret != -EINTR)
5557 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5558 err_unlocked:
5559 trace_binder_ioctl_done(ret);
5560 return ret;
5563 static void binder_vma_open(struct vm_area_struct *vma)
5565 struct binder_proc *proc = vma->vm_private_data;
5567 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5568 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5569 proc->pid, vma->vm_start, vma->vm_end,
5570 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5571 (unsigned long)pgprot_val(vma->vm_page_prot));
5574 static void binder_vma_close(struct vm_area_struct *vma)
5576 struct binder_proc *proc = vma->vm_private_data;
5578 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5579 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5580 proc->pid, vma->vm_start, vma->vm_end,
5581 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5582 (unsigned long)pgprot_val(vma->vm_page_prot));
5583 binder_alloc_vma_close(&proc->alloc);
5586 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5588 return VM_FAULT_SIGBUS;
5591 static const struct vm_operations_struct binder_vm_ops = {
5592 .open = binder_vma_open,
5593 .close = binder_vma_close,
5594 .fault = binder_vm_fault,
5597 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5599 struct binder_proc *proc = filp->private_data;
5601 if (proc->tsk != current->group_leader)
5602 return -EINVAL;
5604 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5605 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5606 __func__, proc->pid, vma->vm_start, vma->vm_end,
5607 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5608 (unsigned long)pgprot_val(vma->vm_page_prot));
5610 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5611 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5612 proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5613 return -EPERM;
5615 vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
5617 vma->vm_ops = &binder_vm_ops;
5618 vma->vm_private_data = proc;
5620 return binder_alloc_mmap_handler(&proc->alloc, vma);
5623 static int binder_open(struct inode *nodp, struct file *filp)
5625 struct binder_proc *proc, *itr;
5626 struct binder_device *binder_dev;
5627 struct binderfs_info *info;
5628 struct dentry *binder_binderfs_dir_entry_proc = NULL;
5629 bool existing_pid = false;
5631 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5632 current->group_leader->pid, current->pid);
5634 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5635 if (proc == NULL)
5636 return -ENOMEM;
5637 spin_lock_init(&proc->inner_lock);
5638 spin_lock_init(&proc->outer_lock);
5639 get_task_struct(current->group_leader);
5640 proc->tsk = current->group_leader;
5641 proc->cred = get_cred(filp->f_cred);
5642 INIT_LIST_HEAD(&proc->todo);
5643 init_waitqueue_head(&proc->freeze_wait);
5644 proc->default_priority = task_nice(current);
5645 /* binderfs stashes devices in i_private */
5646 if (is_binderfs_device(nodp)) {
5647 binder_dev = nodp->i_private;
5648 info = nodp->i_sb->s_fs_info;
5649 binder_binderfs_dir_entry_proc = info->proc_log_dir;
5650 } else {
5651 binder_dev = container_of(filp->private_data,
5652 struct binder_device, miscdev);
5654 refcount_inc(&binder_dev->ref);
5655 proc->context = &binder_dev->context;
5656 binder_alloc_init(&proc->alloc);
5658 binder_stats_created(BINDER_STAT_PROC);
5659 proc->pid = current->group_leader->pid;
5660 INIT_LIST_HEAD(&proc->delivered_death);
5661 INIT_LIST_HEAD(&proc->waiting_threads);
5662 filp->private_data = proc;
5664 mutex_lock(&binder_procs_lock);
5665 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5666 if (itr->pid == proc->pid) {
5667 existing_pid = true;
5668 break;
5671 hlist_add_head(&proc->proc_node, &binder_procs);
5672 mutex_unlock(&binder_procs_lock);
5674 if (binder_debugfs_dir_entry_proc && !existing_pid) {
5675 char strbuf[11];
5677 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5679 * proc debug entries are shared between contexts.
5680 * Only create for the first PID to avoid debugfs log spamming
5681 * The printing code will anyway print all contexts for a given
5682 * PID so this is not a problem.
5684 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5685 binder_debugfs_dir_entry_proc,
5686 (void *)(unsigned long)proc->pid,
5687 &proc_fops);
5690 if (binder_binderfs_dir_entry_proc && !existing_pid) {
5691 char strbuf[11];
5692 struct dentry *binderfs_entry;
5694 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5696 * Similar to debugfs, the process specific log file is shared
5697 * between contexts. Only create for the first PID.
5698 * This is ok since same as debugfs, the log file will contain
5699 * information on all contexts of a given PID.
5701 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5702 strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5703 if (!IS_ERR(binderfs_entry)) {
5704 proc->binderfs_entry = binderfs_entry;
5705 } else {
5706 int error;
5708 error = PTR_ERR(binderfs_entry);
5709 pr_warn("Unable to create file %s in binderfs (error %d)\n",
5710 strbuf, error);
5714 return 0;
5717 static int binder_flush(struct file *filp, fl_owner_t id)
5719 struct binder_proc *proc = filp->private_data;
5721 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5723 return 0;
5726 static void binder_deferred_flush(struct binder_proc *proc)
5728 struct rb_node *n;
5729 int wake_count = 0;
5731 binder_inner_proc_lock(proc);
5732 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5733 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5735 thread->looper_need_return = true;
5736 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5737 wake_up_interruptible(&thread->wait);
5738 wake_count++;
5741 binder_inner_proc_unlock(proc);
5743 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5744 "binder_flush: %d woke %d threads\n", proc->pid,
5745 wake_count);
5748 static int binder_release(struct inode *nodp, struct file *filp)
5750 struct binder_proc *proc = filp->private_data;
5752 debugfs_remove(proc->debugfs_entry);
5754 if (proc->binderfs_entry) {
5755 binderfs_remove_file(proc->binderfs_entry);
5756 proc->binderfs_entry = NULL;
5759 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5761 return 0;
5764 static int binder_node_release(struct binder_node *node, int refs)
5766 struct binder_ref *ref;
5767 int death = 0;
5768 struct binder_proc *proc = node->proc;
5770 binder_release_work(proc, &node->async_todo);
5772 binder_node_lock(node);
5773 binder_inner_proc_lock(proc);
5774 binder_dequeue_work_ilocked(&node->work);
5776 * The caller must have taken a temporary ref on the node,
5778 BUG_ON(!node->tmp_refs);
5779 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5780 binder_inner_proc_unlock(proc);
5781 binder_node_unlock(node);
5782 binder_free_node(node);
5784 return refs;
5787 node->proc = NULL;
5788 node->local_strong_refs = 0;
5789 node->local_weak_refs = 0;
5790 binder_inner_proc_unlock(proc);
5792 spin_lock(&binder_dead_nodes_lock);
5793 hlist_add_head(&node->dead_node, &binder_dead_nodes);
5794 spin_unlock(&binder_dead_nodes_lock);
5796 hlist_for_each_entry(ref, &node->refs, node_entry) {
5797 refs++;
5799 * Need the node lock to synchronize
5800 * with new notification requests and the
5801 * inner lock to synchronize with queued
5802 * death notifications.
5804 binder_inner_proc_lock(ref->proc);
5805 if (!ref->death) {
5806 binder_inner_proc_unlock(ref->proc);
5807 continue;
5810 death++;
5812 BUG_ON(!list_empty(&ref->death->work.entry));
5813 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5814 binder_enqueue_work_ilocked(&ref->death->work,
5815 &ref->proc->todo);
5816 binder_wakeup_proc_ilocked(ref->proc);
5817 binder_inner_proc_unlock(ref->proc);
5820 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5821 "node %d now dead, refs %d, death %d\n",
5822 node->debug_id, refs, death);
5823 binder_node_unlock(node);
5824 binder_put_node(node);
5826 return refs;
5829 static void binder_deferred_release(struct binder_proc *proc)
5831 struct binder_context *context = proc->context;
5832 struct rb_node *n;
5833 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5835 mutex_lock(&binder_procs_lock);
5836 hlist_del(&proc->proc_node);
5837 mutex_unlock(&binder_procs_lock);
5839 mutex_lock(&context->context_mgr_node_lock);
5840 if (context->binder_context_mgr_node &&
5841 context->binder_context_mgr_node->proc == proc) {
5842 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5843 "%s: %d context_mgr_node gone\n",
5844 __func__, proc->pid);
5845 context->binder_context_mgr_node = NULL;
5847 mutex_unlock(&context->context_mgr_node_lock);
5848 binder_inner_proc_lock(proc);
5850 * Make sure proc stays alive after we
5851 * remove all the threads
5853 proc->tmp_ref++;
5855 proc->is_dead = true;
5856 proc->is_frozen = false;
5857 proc->sync_recv = false;
5858 proc->async_recv = false;
5859 threads = 0;
5860 active_transactions = 0;
5861 while ((n = rb_first(&proc->threads))) {
5862 struct binder_thread *thread;
5864 thread = rb_entry(n, struct binder_thread, rb_node);
5865 binder_inner_proc_unlock(proc);
5866 threads++;
5867 active_transactions += binder_thread_release(proc, thread);
5868 binder_inner_proc_lock(proc);
5871 nodes = 0;
5872 incoming_refs = 0;
5873 while ((n = rb_first(&proc->nodes))) {
5874 struct binder_node *node;
5876 node = rb_entry(n, struct binder_node, rb_node);
5877 nodes++;
5879 * take a temporary ref on the node before
5880 * calling binder_node_release() which will either
5881 * kfree() the node or call binder_put_node()
5883 binder_inc_node_tmpref_ilocked(node);
5884 rb_erase(&node->rb_node, &proc->nodes);
5885 binder_inner_proc_unlock(proc);
5886 incoming_refs = binder_node_release(node, incoming_refs);
5887 binder_inner_proc_lock(proc);
5889 binder_inner_proc_unlock(proc);
5891 outgoing_refs = 0;
5892 binder_proc_lock(proc);
5893 while ((n = rb_first(&proc->refs_by_desc))) {
5894 struct binder_ref *ref;
5896 ref = rb_entry(n, struct binder_ref, rb_node_desc);
5897 outgoing_refs++;
5898 binder_cleanup_ref_olocked(ref);
5899 binder_proc_unlock(proc);
5900 binder_free_ref(ref);
5901 binder_proc_lock(proc);
5903 binder_proc_unlock(proc);
5905 binder_release_work(proc, &proc->todo);
5906 binder_release_work(proc, &proc->delivered_death);
5908 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5909 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5910 __func__, proc->pid, threads, nodes, incoming_refs,
5911 outgoing_refs, active_transactions);
5913 binder_proc_dec_tmpref(proc);
5916 static void binder_deferred_func(struct work_struct *work)
5918 struct binder_proc *proc;
5920 int defer;
5922 do {
5923 mutex_lock(&binder_deferred_lock);
5924 if (!hlist_empty(&binder_deferred_list)) {
5925 proc = hlist_entry(binder_deferred_list.first,
5926 struct binder_proc, deferred_work_node);
5927 hlist_del_init(&proc->deferred_work_node);
5928 defer = proc->deferred_work;
5929 proc->deferred_work = 0;
5930 } else {
5931 proc = NULL;
5932 defer = 0;
5934 mutex_unlock(&binder_deferred_lock);
5936 if (defer & BINDER_DEFERRED_FLUSH)
5937 binder_deferred_flush(proc);
5939 if (defer & BINDER_DEFERRED_RELEASE)
5940 binder_deferred_release(proc); /* frees proc */
5941 } while (proc);
5943 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5945 static void
5946 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5948 mutex_lock(&binder_deferred_lock);
5949 proc->deferred_work |= defer;
5950 if (hlist_unhashed(&proc->deferred_work_node)) {
5951 hlist_add_head(&proc->deferred_work_node,
5952 &binder_deferred_list);
5953 schedule_work(&binder_deferred_work);
5955 mutex_unlock(&binder_deferred_lock);
5958 static void print_binder_transaction_ilocked(struct seq_file *m,
5959 struct binder_proc *proc,
5960 const char *prefix,
5961 struct binder_transaction *t)
5963 struct binder_proc *to_proc;
5964 struct binder_buffer *buffer = t->buffer;
5965 ktime_t current_time = ktime_get();
5967 spin_lock(&t->lock);
5968 to_proc = t->to_proc;
5969 seq_printf(m,
5970 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d elapsed %lldms",
5971 prefix, t->debug_id, t,
5972 t->from_pid,
5973 t->from_tid,
5974 to_proc ? to_proc->pid : 0,
5975 t->to_thread ? t->to_thread->pid : 0,
5976 t->code, t->flags, t->priority, t->need_reply,
5977 ktime_ms_delta(current_time, t->start_time));
5978 spin_unlock(&t->lock);
5980 if (proc != to_proc) {
5982 * Can only safely deref buffer if we are holding the
5983 * correct proc inner lock for this node
5985 seq_puts(m, "\n");
5986 return;
5989 if (buffer == NULL) {
5990 seq_puts(m, " buffer free\n");
5991 return;
5993 if (buffer->target_node)
5994 seq_printf(m, " node %d", buffer->target_node->debug_id);
5995 seq_printf(m, " size %zd:%zd offset %lx\n",
5996 buffer->data_size, buffer->offsets_size,
5997 proc->alloc.buffer - buffer->user_data);
6000 static void print_binder_work_ilocked(struct seq_file *m,
6001 struct binder_proc *proc,
6002 const char *prefix,
6003 const char *transaction_prefix,
6004 struct binder_work *w)
6006 struct binder_node *node;
6007 struct binder_transaction *t;
6009 switch (w->type) {
6010 case BINDER_WORK_TRANSACTION:
6011 t = container_of(w, struct binder_transaction, work);
6012 print_binder_transaction_ilocked(
6013 m, proc, transaction_prefix, t);
6014 break;
6015 case BINDER_WORK_RETURN_ERROR: {
6016 struct binder_error *e = container_of(
6017 w, struct binder_error, work);
6019 seq_printf(m, "%stransaction error: %u\n",
6020 prefix, e->cmd);
6021 } break;
6022 case BINDER_WORK_TRANSACTION_COMPLETE:
6023 seq_printf(m, "%stransaction complete\n", prefix);
6024 break;
6025 case BINDER_WORK_NODE:
6026 node = container_of(w, struct binder_node, work);
6027 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
6028 prefix, node->debug_id,
6029 (u64)node->ptr, (u64)node->cookie);
6030 break;
6031 case BINDER_WORK_DEAD_BINDER:
6032 seq_printf(m, "%shas dead binder\n", prefix);
6033 break;
6034 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
6035 seq_printf(m, "%shas cleared dead binder\n", prefix);
6036 break;
6037 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
6038 seq_printf(m, "%shas cleared death notification\n", prefix);
6039 break;
6040 default:
6041 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
6042 break;
6046 static void print_binder_thread_ilocked(struct seq_file *m,
6047 struct binder_thread *thread,
6048 int print_always)
6050 struct binder_transaction *t;
6051 struct binder_work *w;
6052 size_t start_pos = m->count;
6053 size_t header_pos;
6055 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
6056 thread->pid, thread->looper,
6057 thread->looper_need_return,
6058 atomic_read(&thread->tmp_ref));
6059 header_pos = m->count;
6060 t = thread->transaction_stack;
6061 while (t) {
6062 if (t->from == thread) {
6063 print_binder_transaction_ilocked(m, thread->proc,
6064 " outgoing transaction", t);
6065 t = t->from_parent;
6066 } else if (t->to_thread == thread) {
6067 print_binder_transaction_ilocked(m, thread->proc,
6068 " incoming transaction", t);
6069 t = t->to_parent;
6070 } else {
6071 print_binder_transaction_ilocked(m, thread->proc,
6072 " bad transaction", t);
6073 t = NULL;
6076 list_for_each_entry(w, &thread->todo, entry) {
6077 print_binder_work_ilocked(m, thread->proc, " ",
6078 " pending transaction", w);
6080 if (!print_always && m->count == header_pos)
6081 m->count = start_pos;
6084 static void print_binder_node_nilocked(struct seq_file *m,
6085 struct binder_node *node)
6087 struct binder_ref *ref;
6088 struct binder_work *w;
6089 int count;
6091 count = hlist_count_nodes(&node->refs);
6093 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
6094 node->debug_id, (u64)node->ptr, (u64)node->cookie,
6095 node->has_strong_ref, node->has_weak_ref,
6096 node->local_strong_refs, node->local_weak_refs,
6097 node->internal_strong_refs, count, node->tmp_refs);
6098 if (count) {
6099 seq_puts(m, " proc");
6100 hlist_for_each_entry(ref, &node->refs, node_entry)
6101 seq_printf(m, " %d", ref->proc->pid);
6103 seq_puts(m, "\n");
6104 if (node->proc) {
6105 list_for_each_entry(w, &node->async_todo, entry)
6106 print_binder_work_ilocked(m, node->proc, " ",
6107 " pending async transaction", w);
6111 static void print_binder_ref_olocked(struct seq_file *m,
6112 struct binder_ref *ref)
6114 binder_node_lock(ref->node);
6115 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
6116 ref->data.debug_id, ref->data.desc,
6117 ref->node->proc ? "" : "dead ",
6118 ref->node->debug_id, ref->data.strong,
6119 ref->data.weak, ref->death);
6120 binder_node_unlock(ref->node);
6123 static void print_binder_proc(struct seq_file *m,
6124 struct binder_proc *proc, int print_all)
6126 struct binder_work *w;
6127 struct rb_node *n;
6128 size_t start_pos = m->count;
6129 size_t header_pos;
6130 struct binder_node *last_node = NULL;
6132 seq_printf(m, "proc %d\n", proc->pid);
6133 seq_printf(m, "context %s\n", proc->context->name);
6134 header_pos = m->count;
6136 binder_inner_proc_lock(proc);
6137 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6138 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
6139 rb_node), print_all);
6141 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
6142 struct binder_node *node = rb_entry(n, struct binder_node,
6143 rb_node);
6144 if (!print_all && !node->has_async_transaction)
6145 continue;
6148 * take a temporary reference on the node so it
6149 * survives and isn't removed from the tree
6150 * while we print it.
6152 binder_inc_node_tmpref_ilocked(node);
6153 /* Need to drop inner lock to take node lock */
6154 binder_inner_proc_unlock(proc);
6155 if (last_node)
6156 binder_put_node(last_node);
6157 binder_node_inner_lock(node);
6158 print_binder_node_nilocked(m, node);
6159 binder_node_inner_unlock(node);
6160 last_node = node;
6161 binder_inner_proc_lock(proc);
6163 binder_inner_proc_unlock(proc);
6164 if (last_node)
6165 binder_put_node(last_node);
6167 if (print_all) {
6168 binder_proc_lock(proc);
6169 for (n = rb_first(&proc->refs_by_desc);
6170 n != NULL;
6171 n = rb_next(n))
6172 print_binder_ref_olocked(m, rb_entry(n,
6173 struct binder_ref,
6174 rb_node_desc));
6175 binder_proc_unlock(proc);
6177 binder_alloc_print_allocated(m, &proc->alloc);
6178 binder_inner_proc_lock(proc);
6179 list_for_each_entry(w, &proc->todo, entry)
6180 print_binder_work_ilocked(m, proc, " ",
6181 " pending transaction", w);
6182 list_for_each_entry(w, &proc->delivered_death, entry) {
6183 seq_puts(m, " has delivered dead binder\n");
6184 break;
6186 binder_inner_proc_unlock(proc);
6187 if (!print_all && m->count == header_pos)
6188 m->count = start_pos;
6191 static const char * const binder_return_strings[] = {
6192 "BR_ERROR",
6193 "BR_OK",
6194 "BR_TRANSACTION",
6195 "BR_REPLY",
6196 "BR_ACQUIRE_RESULT",
6197 "BR_DEAD_REPLY",
6198 "BR_TRANSACTION_COMPLETE",
6199 "BR_INCREFS",
6200 "BR_ACQUIRE",
6201 "BR_RELEASE",
6202 "BR_DECREFS",
6203 "BR_ATTEMPT_ACQUIRE",
6204 "BR_NOOP",
6205 "BR_SPAWN_LOOPER",
6206 "BR_FINISHED",
6207 "BR_DEAD_BINDER",
6208 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
6209 "BR_FAILED_REPLY",
6210 "BR_FROZEN_REPLY",
6211 "BR_ONEWAY_SPAM_SUSPECT",
6212 "BR_TRANSACTION_PENDING_FROZEN"
6215 static const char * const binder_command_strings[] = {
6216 "BC_TRANSACTION",
6217 "BC_REPLY",
6218 "BC_ACQUIRE_RESULT",
6219 "BC_FREE_BUFFER",
6220 "BC_INCREFS",
6221 "BC_ACQUIRE",
6222 "BC_RELEASE",
6223 "BC_DECREFS",
6224 "BC_INCREFS_DONE",
6225 "BC_ACQUIRE_DONE",
6226 "BC_ATTEMPT_ACQUIRE",
6227 "BC_REGISTER_LOOPER",
6228 "BC_ENTER_LOOPER",
6229 "BC_EXIT_LOOPER",
6230 "BC_REQUEST_DEATH_NOTIFICATION",
6231 "BC_CLEAR_DEATH_NOTIFICATION",
6232 "BC_DEAD_BINDER_DONE",
6233 "BC_TRANSACTION_SG",
6234 "BC_REPLY_SG",
6237 static const char * const binder_objstat_strings[] = {
6238 "proc",
6239 "thread",
6240 "node",
6241 "ref",
6242 "death",
6243 "transaction",
6244 "transaction_complete"
6247 static void print_binder_stats(struct seq_file *m, const char *prefix,
6248 struct binder_stats *stats)
6250 int i;
6252 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
6253 ARRAY_SIZE(binder_command_strings));
6254 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
6255 int temp = atomic_read(&stats->bc[i]);
6257 if (temp)
6258 seq_printf(m, "%s%s: %d\n", prefix,
6259 binder_command_strings[i], temp);
6262 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
6263 ARRAY_SIZE(binder_return_strings));
6264 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
6265 int temp = atomic_read(&stats->br[i]);
6267 if (temp)
6268 seq_printf(m, "%s%s: %d\n", prefix,
6269 binder_return_strings[i], temp);
6272 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6273 ARRAY_SIZE(binder_objstat_strings));
6274 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6275 ARRAY_SIZE(stats->obj_deleted));
6276 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
6277 int created = atomic_read(&stats->obj_created[i]);
6278 int deleted = atomic_read(&stats->obj_deleted[i]);
6280 if (created || deleted)
6281 seq_printf(m, "%s%s: active %d total %d\n",
6282 prefix,
6283 binder_objstat_strings[i],
6284 created - deleted,
6285 created);
6289 static void print_binder_proc_stats(struct seq_file *m,
6290 struct binder_proc *proc)
6292 struct binder_work *w;
6293 struct binder_thread *thread;
6294 struct rb_node *n;
6295 int count, strong, weak, ready_threads;
6296 size_t free_async_space =
6297 binder_alloc_get_free_async_space(&proc->alloc);
6299 seq_printf(m, "proc %d\n", proc->pid);
6300 seq_printf(m, "context %s\n", proc->context->name);
6301 count = 0;
6302 ready_threads = 0;
6303 binder_inner_proc_lock(proc);
6304 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6305 count++;
6307 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6308 ready_threads++;
6310 seq_printf(m, " threads: %d\n", count);
6311 seq_printf(m, " requested threads: %d+%d/%d\n"
6312 " ready threads %d\n"
6313 " free async space %zd\n", proc->requested_threads,
6314 proc->requested_threads_started, proc->max_threads,
6315 ready_threads,
6316 free_async_space);
6317 count = 0;
6318 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
6319 count++;
6320 binder_inner_proc_unlock(proc);
6321 seq_printf(m, " nodes: %d\n", count);
6322 count = 0;
6323 strong = 0;
6324 weak = 0;
6325 binder_proc_lock(proc);
6326 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
6327 struct binder_ref *ref = rb_entry(n, struct binder_ref,
6328 rb_node_desc);
6329 count++;
6330 strong += ref->data.strong;
6331 weak += ref->data.weak;
6333 binder_proc_unlock(proc);
6334 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
6336 count = binder_alloc_get_allocated_count(&proc->alloc);
6337 seq_printf(m, " buffers: %d\n", count);
6339 binder_alloc_print_pages(m, &proc->alloc);
6341 count = 0;
6342 binder_inner_proc_lock(proc);
6343 list_for_each_entry(w, &proc->todo, entry) {
6344 if (w->type == BINDER_WORK_TRANSACTION)
6345 count++;
6347 binder_inner_proc_unlock(proc);
6348 seq_printf(m, " pending transactions: %d\n", count);
6350 print_binder_stats(m, " ", &proc->stats);
6353 static int state_show(struct seq_file *m, void *unused)
6355 struct binder_proc *proc;
6356 struct binder_node *node;
6357 struct binder_node *last_node = NULL;
6359 seq_puts(m, "binder state:\n");
6361 spin_lock(&binder_dead_nodes_lock);
6362 if (!hlist_empty(&binder_dead_nodes))
6363 seq_puts(m, "dead nodes:\n");
6364 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
6366 * take a temporary reference on the node so it
6367 * survives and isn't removed from the list
6368 * while we print it.
6370 node->tmp_refs++;
6371 spin_unlock(&binder_dead_nodes_lock);
6372 if (last_node)
6373 binder_put_node(last_node);
6374 binder_node_lock(node);
6375 print_binder_node_nilocked(m, node);
6376 binder_node_unlock(node);
6377 last_node = node;
6378 spin_lock(&binder_dead_nodes_lock);
6380 spin_unlock(&binder_dead_nodes_lock);
6381 if (last_node)
6382 binder_put_node(last_node);
6384 mutex_lock(&binder_procs_lock);
6385 hlist_for_each_entry(proc, &binder_procs, proc_node)
6386 print_binder_proc(m, proc, 1);
6387 mutex_unlock(&binder_procs_lock);
6389 return 0;
6392 static int stats_show(struct seq_file *m, void *unused)
6394 struct binder_proc *proc;
6396 seq_puts(m, "binder stats:\n");
6398 print_binder_stats(m, "", &binder_stats);
6400 mutex_lock(&binder_procs_lock);
6401 hlist_for_each_entry(proc, &binder_procs, proc_node)
6402 print_binder_proc_stats(m, proc);
6403 mutex_unlock(&binder_procs_lock);
6405 return 0;
6408 static int transactions_show(struct seq_file *m, void *unused)
6410 struct binder_proc *proc;
6412 seq_puts(m, "binder transactions:\n");
6413 mutex_lock(&binder_procs_lock);
6414 hlist_for_each_entry(proc, &binder_procs, proc_node)
6415 print_binder_proc(m, proc, 0);
6416 mutex_unlock(&binder_procs_lock);
6418 return 0;
6421 static int proc_show(struct seq_file *m, void *unused)
6423 struct binder_proc *itr;
6424 int pid = (unsigned long)m->private;
6426 mutex_lock(&binder_procs_lock);
6427 hlist_for_each_entry(itr, &binder_procs, proc_node) {
6428 if (itr->pid == pid) {
6429 seq_puts(m, "binder proc state:\n");
6430 print_binder_proc(m, itr, 1);
6433 mutex_unlock(&binder_procs_lock);
6435 return 0;
6438 static void print_binder_transaction_log_entry(struct seq_file *m,
6439 struct binder_transaction_log_entry *e)
6441 int debug_id = READ_ONCE(e->debug_id_done);
6443 * read barrier to guarantee debug_id_done read before
6444 * we print the log values
6446 smp_rmb();
6447 seq_printf(m,
6448 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6449 e->debug_id, (e->call_type == 2) ? "reply" :
6450 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6451 e->from_thread, e->to_proc, e->to_thread, e->context_name,
6452 e->to_node, e->target_handle, e->data_size, e->offsets_size,
6453 e->return_error, e->return_error_param,
6454 e->return_error_line);
6456 * read-barrier to guarantee read of debug_id_done after
6457 * done printing the fields of the entry
6459 smp_rmb();
6460 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6461 "\n" : " (incomplete)\n");
6464 static int transaction_log_show(struct seq_file *m, void *unused)
6466 struct binder_transaction_log *log = m->private;
6467 unsigned int log_cur = atomic_read(&log->cur);
6468 unsigned int count;
6469 unsigned int cur;
6470 int i;
6472 count = log_cur + 1;
6473 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6474 0 : count % ARRAY_SIZE(log->entry);
6475 if (count > ARRAY_SIZE(log->entry) || log->full)
6476 count = ARRAY_SIZE(log->entry);
6477 for (i = 0; i < count; i++) {
6478 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6480 print_binder_transaction_log_entry(m, &log->entry[index]);
6482 return 0;
6485 const struct file_operations binder_fops = {
6486 .owner = THIS_MODULE,
6487 .poll = binder_poll,
6488 .unlocked_ioctl = binder_ioctl,
6489 .compat_ioctl = compat_ptr_ioctl,
6490 .mmap = binder_mmap,
6491 .open = binder_open,
6492 .flush = binder_flush,
6493 .release = binder_release,
6496 DEFINE_SHOW_ATTRIBUTE(state);
6497 DEFINE_SHOW_ATTRIBUTE(stats);
6498 DEFINE_SHOW_ATTRIBUTE(transactions);
6499 DEFINE_SHOW_ATTRIBUTE(transaction_log);
6501 const struct binder_debugfs_entry binder_debugfs_entries[] = {
6503 .name = "state",
6504 .mode = 0444,
6505 .fops = &state_fops,
6506 .data = NULL,
6509 .name = "stats",
6510 .mode = 0444,
6511 .fops = &stats_fops,
6512 .data = NULL,
6515 .name = "transactions",
6516 .mode = 0444,
6517 .fops = &transactions_fops,
6518 .data = NULL,
6521 .name = "transaction_log",
6522 .mode = 0444,
6523 .fops = &transaction_log_fops,
6524 .data = &binder_transaction_log,
6527 .name = "failed_transaction_log",
6528 .mode = 0444,
6529 .fops = &transaction_log_fops,
6530 .data = &binder_transaction_log_failed,
6532 {} /* terminator */
6535 static int __init init_binder_device(const char *name)
6537 int ret;
6538 struct binder_device *binder_device;
6540 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6541 if (!binder_device)
6542 return -ENOMEM;
6544 binder_device->miscdev.fops = &binder_fops;
6545 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6546 binder_device->miscdev.name = name;
6548 refcount_set(&binder_device->ref, 1);
6549 binder_device->context.binder_context_mgr_uid = INVALID_UID;
6550 binder_device->context.name = name;
6551 mutex_init(&binder_device->context.context_mgr_node_lock);
6553 ret = misc_register(&binder_device->miscdev);
6554 if (ret < 0) {
6555 kfree(binder_device);
6556 return ret;
6559 hlist_add_head(&binder_device->hlist, &binder_devices);
6561 return ret;
6564 static int __init binder_init(void)
6566 int ret;
6567 char *device_name, *device_tmp;
6568 struct binder_device *device;
6569 struct hlist_node *tmp;
6570 char *device_names = NULL;
6571 const struct binder_debugfs_entry *db_entry;
6573 ret = binder_alloc_shrinker_init();
6574 if (ret)
6575 return ret;
6577 atomic_set(&binder_transaction_log.cur, ~0U);
6578 atomic_set(&binder_transaction_log_failed.cur, ~0U);
6580 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6582 binder_for_each_debugfs_entry(db_entry)
6583 debugfs_create_file(db_entry->name,
6584 db_entry->mode,
6585 binder_debugfs_dir_entry_root,
6586 db_entry->data,
6587 db_entry->fops);
6589 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6590 binder_debugfs_dir_entry_root);
6592 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6593 strcmp(binder_devices_param, "") != 0) {
6595 * Copy the module_parameter string, because we don't want to
6596 * tokenize it in-place.
6598 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6599 if (!device_names) {
6600 ret = -ENOMEM;
6601 goto err_alloc_device_names_failed;
6604 device_tmp = device_names;
6605 while ((device_name = strsep(&device_tmp, ","))) {
6606 ret = init_binder_device(device_name);
6607 if (ret)
6608 goto err_init_binder_device_failed;
6612 ret = init_binderfs();
6613 if (ret)
6614 goto err_init_binder_device_failed;
6616 return ret;
6618 err_init_binder_device_failed:
6619 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6620 misc_deregister(&device->miscdev);
6621 hlist_del(&device->hlist);
6622 kfree(device);
6625 kfree(device_names);
6627 err_alloc_device_names_failed:
6628 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6629 binder_alloc_shrinker_exit();
6631 return ret;
6634 device_initcall(binder_init);
6636 #define CREATE_TRACE_POINTS
6637 #include "binder_trace.h"
6639 MODULE_LICENSE("GPL v2");