Commit 64dcfe6b84d4104d93e4baf2b5a0b3e7f2e4cc30
Committed by
Greg Kroah-Hartman
1 parent
eecddef594
Exists in
smarc-imx_3.14.28_1.0.0_ga
and in
1 other branch
staging: android: binder: fix binder interface for 64bit compat layer
The changes in this patch will fix the binder interface for use on 64bit machines and stand as the base of the 64bit compat support. The changes apply to the structures that are passed between the kernel and userspace. Most of the changes applied mirror the change to struct binder_version where there is no need for a 64bit wide protocol_version(on 64bit machines). The change inlines with the existing 32bit userspace(the structure has the same size) and simplifies the compat layer such that the same handler can service the BINDER_VERSION ioctl. Other changes make use of kernel types as well as user-exportable ones and fix format specifier issues. The changes do not affect existing 32bit ABI. Signed-off-by: Serban Constantinescu <serban.constantinescu@arm.com> Acked-by: Arve Hjønnevåg <arve@android.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Showing 2 changed files with 16 additions and 16 deletions Inline Diff
drivers/staging/android/binder.c
1 | /* binder.c | 1 | /* binder.c |
2 | * | 2 | * |
3 | * Android IPC Subsystem | 3 | * Android IPC Subsystem |
4 | * | 4 | * |
5 | * Copyright (C) 2007-2008 Google, Inc. | 5 | * Copyright (C) 2007-2008 Google, Inc. |
6 | * | 6 | * |
7 | * This software is licensed under the terms of the GNU General Public | 7 | * This software is licensed under the terms of the GNU General Public |
8 | * License version 2, as published by the Free Software Foundation, and | 8 | * License version 2, as published by the Free Software Foundation, and |
9 | * may be copied, distributed, and modified under those terms. | 9 | * may be copied, distributed, and modified under those terms. |
10 | * | 10 | * |
11 | * This program is distributed in the hope that it will be useful, | 11 | * This program is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | * GNU General Public License for more details. | 14 | * GNU General Public License for more details. |
15 | * | 15 | * |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 18 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
19 | 19 | ||
20 | #include <asm/cacheflush.h> | 20 | #include <asm/cacheflush.h> |
21 | #include <linux/fdtable.h> | 21 | #include <linux/fdtable.h> |
22 | #include <linux/file.h> | 22 | #include <linux/file.h> |
23 | #include <linux/freezer.h> | 23 | #include <linux/freezer.h> |
24 | #include <linux/fs.h> | 24 | #include <linux/fs.h> |
25 | #include <linux/list.h> | 25 | #include <linux/list.h> |
26 | #include <linux/miscdevice.h> | 26 | #include <linux/miscdevice.h> |
27 | #include <linux/mm.h> | 27 | #include <linux/mm.h> |
28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
29 | #include <linux/mutex.h> | 29 | #include <linux/mutex.h> |
30 | #include <linux/nsproxy.h> | 30 | #include <linux/nsproxy.h> |
31 | #include <linux/poll.h> | 31 | #include <linux/poll.h> |
32 | #include <linux/debugfs.h> | 32 | #include <linux/debugfs.h> |
33 | #include <linux/rbtree.h> | 33 | #include <linux/rbtree.h> |
34 | #include <linux/sched.h> | 34 | #include <linux/sched.h> |
35 | #include <linux/seq_file.h> | 35 | #include <linux/seq_file.h> |
36 | #include <linux/uaccess.h> | 36 | #include <linux/uaccess.h> |
37 | #include <linux/vmalloc.h> | 37 | #include <linux/vmalloc.h> |
38 | #include <linux/slab.h> | 38 | #include <linux/slab.h> |
39 | #include <linux/pid_namespace.h> | 39 | #include <linux/pid_namespace.h> |
40 | 40 | ||
41 | #include "binder.h" | 41 | #include "binder.h" |
42 | #include "binder_trace.h" | 42 | #include "binder_trace.h" |
43 | 43 | ||
44 | static DEFINE_MUTEX(binder_main_lock); | 44 | static DEFINE_MUTEX(binder_main_lock); |
45 | static DEFINE_MUTEX(binder_deferred_lock); | 45 | static DEFINE_MUTEX(binder_deferred_lock); |
46 | static DEFINE_MUTEX(binder_mmap_lock); | 46 | static DEFINE_MUTEX(binder_mmap_lock); |
47 | 47 | ||
48 | static HLIST_HEAD(binder_procs); | 48 | static HLIST_HEAD(binder_procs); |
49 | static HLIST_HEAD(binder_deferred_list); | 49 | static HLIST_HEAD(binder_deferred_list); |
50 | static HLIST_HEAD(binder_dead_nodes); | 50 | static HLIST_HEAD(binder_dead_nodes); |
51 | 51 | ||
52 | static struct dentry *binder_debugfs_dir_entry_root; | 52 | static struct dentry *binder_debugfs_dir_entry_root; |
53 | static struct dentry *binder_debugfs_dir_entry_proc; | 53 | static struct dentry *binder_debugfs_dir_entry_proc; |
54 | static struct binder_node *binder_context_mgr_node; | 54 | static struct binder_node *binder_context_mgr_node; |
55 | static kuid_t binder_context_mgr_uid = INVALID_UID; | 55 | static kuid_t binder_context_mgr_uid = INVALID_UID; |
56 | static int binder_last_id; | 56 | static int binder_last_id; |
57 | static struct workqueue_struct *binder_deferred_workqueue; | 57 | static struct workqueue_struct *binder_deferred_workqueue; |
58 | 58 | ||
59 | #define BINDER_DEBUG_ENTRY(name) \ | 59 | #define BINDER_DEBUG_ENTRY(name) \ |
60 | static int binder_##name##_open(struct inode *inode, struct file *file) \ | 60 | static int binder_##name##_open(struct inode *inode, struct file *file) \ |
61 | { \ | 61 | { \ |
62 | return single_open(file, binder_##name##_show, inode->i_private); \ | 62 | return single_open(file, binder_##name##_show, inode->i_private); \ |
63 | } \ | 63 | } \ |
64 | \ | 64 | \ |
65 | static const struct file_operations binder_##name##_fops = { \ | 65 | static const struct file_operations binder_##name##_fops = { \ |
66 | .owner = THIS_MODULE, \ | 66 | .owner = THIS_MODULE, \ |
67 | .open = binder_##name##_open, \ | 67 | .open = binder_##name##_open, \ |
68 | .read = seq_read, \ | 68 | .read = seq_read, \ |
69 | .llseek = seq_lseek, \ | 69 | .llseek = seq_lseek, \ |
70 | .release = single_release, \ | 70 | .release = single_release, \ |
71 | } | 71 | } |
72 | 72 | ||
73 | static int binder_proc_show(struct seq_file *m, void *unused); | 73 | static int binder_proc_show(struct seq_file *m, void *unused); |
74 | BINDER_DEBUG_ENTRY(proc); | 74 | BINDER_DEBUG_ENTRY(proc); |
75 | 75 | ||
76 | /* This is only defined in include/asm-arm/sizes.h */ | 76 | /* This is only defined in include/asm-arm/sizes.h */ |
77 | #ifndef SZ_1K | 77 | #ifndef SZ_1K |
78 | #define SZ_1K 0x400 | 78 | #define SZ_1K 0x400 |
79 | #endif | 79 | #endif |
80 | 80 | ||
81 | #ifndef SZ_4M | 81 | #ifndef SZ_4M |
82 | #define SZ_4M 0x400000 | 82 | #define SZ_4M 0x400000 |
83 | #endif | 83 | #endif |
84 | 84 | ||
85 | #define FORBIDDEN_MMAP_FLAGS (VM_WRITE) | 85 | #define FORBIDDEN_MMAP_FLAGS (VM_WRITE) |
86 | 86 | ||
87 | #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64) | 87 | #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64) |
88 | 88 | ||
89 | enum { | 89 | enum { |
90 | BINDER_DEBUG_USER_ERROR = 1U << 0, | 90 | BINDER_DEBUG_USER_ERROR = 1U << 0, |
91 | BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1, | 91 | BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1, |
92 | BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2, | 92 | BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2, |
93 | BINDER_DEBUG_OPEN_CLOSE = 1U << 3, | 93 | BINDER_DEBUG_OPEN_CLOSE = 1U << 3, |
94 | BINDER_DEBUG_DEAD_BINDER = 1U << 4, | 94 | BINDER_DEBUG_DEAD_BINDER = 1U << 4, |
95 | BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5, | 95 | BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5, |
96 | BINDER_DEBUG_READ_WRITE = 1U << 6, | 96 | BINDER_DEBUG_READ_WRITE = 1U << 6, |
97 | BINDER_DEBUG_USER_REFS = 1U << 7, | 97 | BINDER_DEBUG_USER_REFS = 1U << 7, |
98 | BINDER_DEBUG_THREADS = 1U << 8, | 98 | BINDER_DEBUG_THREADS = 1U << 8, |
99 | BINDER_DEBUG_TRANSACTION = 1U << 9, | 99 | BINDER_DEBUG_TRANSACTION = 1U << 9, |
100 | BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, | 100 | BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, |
101 | BINDER_DEBUG_FREE_BUFFER = 1U << 11, | 101 | BINDER_DEBUG_FREE_BUFFER = 1U << 11, |
102 | BINDER_DEBUG_INTERNAL_REFS = 1U << 12, | 102 | BINDER_DEBUG_INTERNAL_REFS = 1U << 12, |
103 | BINDER_DEBUG_BUFFER_ALLOC = 1U << 13, | 103 | BINDER_DEBUG_BUFFER_ALLOC = 1U << 13, |
104 | BINDER_DEBUG_PRIORITY_CAP = 1U << 14, | 104 | BINDER_DEBUG_PRIORITY_CAP = 1U << 14, |
105 | BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15, | 105 | BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15, |
106 | }; | 106 | }; |
107 | static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | | 107 | static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | |
108 | BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; | 108 | BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; |
109 | module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO); | 109 | module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO); |
110 | 110 | ||
111 | static bool binder_debug_no_lock; | 111 | static bool binder_debug_no_lock; |
112 | module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO); | 112 | module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO); |
113 | 113 | ||
114 | static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); | 114 | static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); |
115 | static int binder_stop_on_user_error; | 115 | static int binder_stop_on_user_error; |
116 | 116 | ||
117 | static int binder_set_stop_on_user_error(const char *val, | 117 | static int binder_set_stop_on_user_error(const char *val, |
118 | struct kernel_param *kp) | 118 | struct kernel_param *kp) |
119 | { | 119 | { |
120 | int ret; | 120 | int ret; |
121 | ret = param_set_int(val, kp); | 121 | ret = param_set_int(val, kp); |
122 | if (binder_stop_on_user_error < 2) | 122 | if (binder_stop_on_user_error < 2) |
123 | wake_up(&binder_user_error_wait); | 123 | wake_up(&binder_user_error_wait); |
124 | return ret; | 124 | return ret; |
125 | } | 125 | } |
126 | module_param_call(stop_on_user_error, binder_set_stop_on_user_error, | 126 | module_param_call(stop_on_user_error, binder_set_stop_on_user_error, |
127 | param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO); | 127 | param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO); |
128 | 128 | ||
129 | #define binder_debug(mask, x...) \ | 129 | #define binder_debug(mask, x...) \ |
130 | do { \ | 130 | do { \ |
131 | if (binder_debug_mask & mask) \ | 131 | if (binder_debug_mask & mask) \ |
132 | pr_info(x); \ | 132 | pr_info(x); \ |
133 | } while (0) | 133 | } while (0) |
134 | 134 | ||
135 | #define binder_user_error(x...) \ | 135 | #define binder_user_error(x...) \ |
136 | do { \ | 136 | do { \ |
137 | if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \ | 137 | if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \ |
138 | pr_info(x); \ | 138 | pr_info(x); \ |
139 | if (binder_stop_on_user_error) \ | 139 | if (binder_stop_on_user_error) \ |
140 | binder_stop_on_user_error = 2; \ | 140 | binder_stop_on_user_error = 2; \ |
141 | } while (0) | 141 | } while (0) |
142 | 142 | ||
143 | enum binder_stat_types { | 143 | enum binder_stat_types { |
144 | BINDER_STAT_PROC, | 144 | BINDER_STAT_PROC, |
145 | BINDER_STAT_THREAD, | 145 | BINDER_STAT_THREAD, |
146 | BINDER_STAT_NODE, | 146 | BINDER_STAT_NODE, |
147 | BINDER_STAT_REF, | 147 | BINDER_STAT_REF, |
148 | BINDER_STAT_DEATH, | 148 | BINDER_STAT_DEATH, |
149 | BINDER_STAT_TRANSACTION, | 149 | BINDER_STAT_TRANSACTION, |
150 | BINDER_STAT_TRANSACTION_COMPLETE, | 150 | BINDER_STAT_TRANSACTION_COMPLETE, |
151 | BINDER_STAT_COUNT | 151 | BINDER_STAT_COUNT |
152 | }; | 152 | }; |
153 | 153 | ||
154 | struct binder_stats { | 154 | struct binder_stats { |
155 | int br[_IOC_NR(BR_FAILED_REPLY) + 1]; | 155 | int br[_IOC_NR(BR_FAILED_REPLY) + 1]; |
156 | int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1]; | 156 | int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1]; |
157 | int obj_created[BINDER_STAT_COUNT]; | 157 | int obj_created[BINDER_STAT_COUNT]; |
158 | int obj_deleted[BINDER_STAT_COUNT]; | 158 | int obj_deleted[BINDER_STAT_COUNT]; |
159 | }; | 159 | }; |
160 | 160 | ||
161 | static struct binder_stats binder_stats; | 161 | static struct binder_stats binder_stats; |
162 | 162 | ||
163 | static inline void binder_stats_deleted(enum binder_stat_types type) | 163 | static inline void binder_stats_deleted(enum binder_stat_types type) |
164 | { | 164 | { |
165 | binder_stats.obj_deleted[type]++; | 165 | binder_stats.obj_deleted[type]++; |
166 | } | 166 | } |
167 | 167 | ||
168 | static inline void binder_stats_created(enum binder_stat_types type) | 168 | static inline void binder_stats_created(enum binder_stat_types type) |
169 | { | 169 | { |
170 | binder_stats.obj_created[type]++; | 170 | binder_stats.obj_created[type]++; |
171 | } | 171 | } |
172 | 172 | ||
173 | struct binder_transaction_log_entry { | 173 | struct binder_transaction_log_entry { |
174 | int debug_id; | 174 | int debug_id; |
175 | int call_type; | 175 | int call_type; |
176 | int from_proc; | 176 | int from_proc; |
177 | int from_thread; | 177 | int from_thread; |
178 | int target_handle; | 178 | int target_handle; |
179 | int to_proc; | 179 | int to_proc; |
180 | int to_thread; | 180 | int to_thread; |
181 | int to_node; | 181 | int to_node; |
182 | int data_size; | 182 | int data_size; |
183 | int offsets_size; | 183 | int offsets_size; |
184 | }; | 184 | }; |
185 | struct binder_transaction_log { | 185 | struct binder_transaction_log { |
186 | int next; | 186 | int next; |
187 | int full; | 187 | int full; |
188 | struct binder_transaction_log_entry entry[32]; | 188 | struct binder_transaction_log_entry entry[32]; |
189 | }; | 189 | }; |
190 | static struct binder_transaction_log binder_transaction_log; | 190 | static struct binder_transaction_log binder_transaction_log; |
191 | static struct binder_transaction_log binder_transaction_log_failed; | 191 | static struct binder_transaction_log binder_transaction_log_failed; |
192 | 192 | ||
193 | static struct binder_transaction_log_entry *binder_transaction_log_add( | 193 | static struct binder_transaction_log_entry *binder_transaction_log_add( |
194 | struct binder_transaction_log *log) | 194 | struct binder_transaction_log *log) |
195 | { | 195 | { |
196 | struct binder_transaction_log_entry *e; | 196 | struct binder_transaction_log_entry *e; |
197 | e = &log->entry[log->next]; | 197 | e = &log->entry[log->next]; |
198 | memset(e, 0, sizeof(*e)); | 198 | memset(e, 0, sizeof(*e)); |
199 | log->next++; | 199 | log->next++; |
200 | if (log->next == ARRAY_SIZE(log->entry)) { | 200 | if (log->next == ARRAY_SIZE(log->entry)) { |
201 | log->next = 0; | 201 | log->next = 0; |
202 | log->full = 1; | 202 | log->full = 1; |
203 | } | 203 | } |
204 | return e; | 204 | return e; |
205 | } | 205 | } |
206 | 206 | ||
207 | struct binder_work { | 207 | struct binder_work { |
208 | struct list_head entry; | 208 | struct list_head entry; |
209 | enum { | 209 | enum { |
210 | BINDER_WORK_TRANSACTION = 1, | 210 | BINDER_WORK_TRANSACTION = 1, |
211 | BINDER_WORK_TRANSACTION_COMPLETE, | 211 | BINDER_WORK_TRANSACTION_COMPLETE, |
212 | BINDER_WORK_NODE, | 212 | BINDER_WORK_NODE, |
213 | BINDER_WORK_DEAD_BINDER, | 213 | BINDER_WORK_DEAD_BINDER, |
214 | BINDER_WORK_DEAD_BINDER_AND_CLEAR, | 214 | BINDER_WORK_DEAD_BINDER_AND_CLEAR, |
215 | BINDER_WORK_CLEAR_DEATH_NOTIFICATION, | 215 | BINDER_WORK_CLEAR_DEATH_NOTIFICATION, |
216 | } type; | 216 | } type; |
217 | }; | 217 | }; |
218 | 218 | ||
219 | struct binder_node { | 219 | struct binder_node { |
220 | int debug_id; | 220 | int debug_id; |
221 | struct binder_work work; | 221 | struct binder_work work; |
222 | union { | 222 | union { |
223 | struct rb_node rb_node; | 223 | struct rb_node rb_node; |
224 | struct hlist_node dead_node; | 224 | struct hlist_node dead_node; |
225 | }; | 225 | }; |
226 | struct binder_proc *proc; | 226 | struct binder_proc *proc; |
227 | struct hlist_head refs; | 227 | struct hlist_head refs; |
228 | int internal_strong_refs; | 228 | int internal_strong_refs; |
229 | int local_weak_refs; | 229 | int local_weak_refs; |
230 | int local_strong_refs; | 230 | int local_strong_refs; |
231 | void __user *ptr; | 231 | void __user *ptr; |
232 | void __user *cookie; | 232 | void __user *cookie; |
233 | unsigned has_strong_ref:1; | 233 | unsigned has_strong_ref:1; |
234 | unsigned pending_strong_ref:1; | 234 | unsigned pending_strong_ref:1; |
235 | unsigned has_weak_ref:1; | 235 | unsigned has_weak_ref:1; |
236 | unsigned pending_weak_ref:1; | 236 | unsigned pending_weak_ref:1; |
237 | unsigned has_async_transaction:1; | 237 | unsigned has_async_transaction:1; |
238 | unsigned accept_fds:1; | 238 | unsigned accept_fds:1; |
239 | unsigned min_priority:8; | 239 | unsigned min_priority:8; |
240 | struct list_head async_todo; | 240 | struct list_head async_todo; |
241 | }; | 241 | }; |
242 | 242 | ||
243 | struct binder_ref_death { | 243 | struct binder_ref_death { |
244 | struct binder_work work; | 244 | struct binder_work work; |
245 | void __user *cookie; | 245 | void __user *cookie; |
246 | }; | 246 | }; |
247 | 247 | ||
248 | struct binder_ref { | 248 | struct binder_ref { |
249 | /* Lookups needed: */ | 249 | /* Lookups needed: */ |
250 | /* node + proc => ref (transaction) */ | 250 | /* node + proc => ref (transaction) */ |
251 | /* desc + proc => ref (transaction, inc/dec ref) */ | 251 | /* desc + proc => ref (transaction, inc/dec ref) */ |
252 | /* node => refs + procs (proc exit) */ | 252 | /* node => refs + procs (proc exit) */ |
253 | int debug_id; | 253 | int debug_id; |
254 | struct rb_node rb_node_desc; | 254 | struct rb_node rb_node_desc; |
255 | struct rb_node rb_node_node; | 255 | struct rb_node rb_node_node; |
256 | struct hlist_node node_entry; | 256 | struct hlist_node node_entry; |
257 | struct binder_proc *proc; | 257 | struct binder_proc *proc; |
258 | struct binder_node *node; | 258 | struct binder_node *node; |
259 | uint32_t desc; | 259 | uint32_t desc; |
260 | int strong; | 260 | int strong; |
261 | int weak; | 261 | int weak; |
262 | struct binder_ref_death *death; | 262 | struct binder_ref_death *death; |
263 | }; | 263 | }; |
264 | 264 | ||
265 | struct binder_buffer { | 265 | struct binder_buffer { |
266 | struct list_head entry; /* free and allocated entries by address */ | 266 | struct list_head entry; /* free and allocated entries by address */ |
267 | struct rb_node rb_node; /* free entry by size or allocated entry */ | 267 | struct rb_node rb_node; /* free entry by size or allocated entry */ |
268 | /* by address */ | 268 | /* by address */ |
269 | unsigned free:1; | 269 | unsigned free:1; |
270 | unsigned allow_user_free:1; | 270 | unsigned allow_user_free:1; |
271 | unsigned async_transaction:1; | 271 | unsigned async_transaction:1; |
272 | unsigned debug_id:29; | 272 | unsigned debug_id:29; |
273 | 273 | ||
274 | struct binder_transaction *transaction; | 274 | struct binder_transaction *transaction; |
275 | 275 | ||
276 | struct binder_node *target_node; | 276 | struct binder_node *target_node; |
277 | size_t data_size; | 277 | size_t data_size; |
278 | size_t offsets_size; | 278 | size_t offsets_size; |
279 | uint8_t data[0]; | 279 | uint8_t data[0]; |
280 | }; | 280 | }; |
281 | 281 | ||
282 | enum binder_deferred_state { | 282 | enum binder_deferred_state { |
283 | BINDER_DEFERRED_PUT_FILES = 0x01, | 283 | BINDER_DEFERRED_PUT_FILES = 0x01, |
284 | BINDER_DEFERRED_FLUSH = 0x02, | 284 | BINDER_DEFERRED_FLUSH = 0x02, |
285 | BINDER_DEFERRED_RELEASE = 0x04, | 285 | BINDER_DEFERRED_RELEASE = 0x04, |
286 | }; | 286 | }; |
287 | 287 | ||
288 | struct binder_proc { | 288 | struct binder_proc { |
289 | struct hlist_node proc_node; | 289 | struct hlist_node proc_node; |
290 | struct rb_root threads; | 290 | struct rb_root threads; |
291 | struct rb_root nodes; | 291 | struct rb_root nodes; |
292 | struct rb_root refs_by_desc; | 292 | struct rb_root refs_by_desc; |
293 | struct rb_root refs_by_node; | 293 | struct rb_root refs_by_node; |
294 | int pid; | 294 | int pid; |
295 | struct vm_area_struct *vma; | 295 | struct vm_area_struct *vma; |
296 | struct mm_struct *vma_vm_mm; | 296 | struct mm_struct *vma_vm_mm; |
297 | struct task_struct *tsk; | 297 | struct task_struct *tsk; |
298 | struct files_struct *files; | 298 | struct files_struct *files; |
299 | struct hlist_node deferred_work_node; | 299 | struct hlist_node deferred_work_node; |
300 | int deferred_work; | 300 | int deferred_work; |
301 | void *buffer; | 301 | void *buffer; |
302 | ptrdiff_t user_buffer_offset; | 302 | ptrdiff_t user_buffer_offset; |
303 | 303 | ||
304 | struct list_head buffers; | 304 | struct list_head buffers; |
305 | struct rb_root free_buffers; | 305 | struct rb_root free_buffers; |
306 | struct rb_root allocated_buffers; | 306 | struct rb_root allocated_buffers; |
307 | size_t free_async_space; | 307 | size_t free_async_space; |
308 | 308 | ||
309 | struct page **pages; | 309 | struct page **pages; |
310 | size_t buffer_size; | 310 | size_t buffer_size; |
311 | uint32_t buffer_free; | 311 | uint32_t buffer_free; |
312 | struct list_head todo; | 312 | struct list_head todo; |
313 | wait_queue_head_t wait; | 313 | wait_queue_head_t wait; |
314 | struct binder_stats stats; | 314 | struct binder_stats stats; |
315 | struct list_head delivered_death; | 315 | struct list_head delivered_death; |
316 | int max_threads; | 316 | int max_threads; |
317 | int requested_threads; | 317 | int requested_threads; |
318 | int requested_threads_started; | 318 | int requested_threads_started; |
319 | int ready_threads; | 319 | int ready_threads; |
320 | long default_priority; | 320 | long default_priority; |
321 | struct dentry *debugfs_entry; | 321 | struct dentry *debugfs_entry; |
322 | }; | 322 | }; |
323 | 323 | ||
324 | enum { | 324 | enum { |
325 | BINDER_LOOPER_STATE_REGISTERED = 0x01, | 325 | BINDER_LOOPER_STATE_REGISTERED = 0x01, |
326 | BINDER_LOOPER_STATE_ENTERED = 0x02, | 326 | BINDER_LOOPER_STATE_ENTERED = 0x02, |
327 | BINDER_LOOPER_STATE_EXITED = 0x04, | 327 | BINDER_LOOPER_STATE_EXITED = 0x04, |
328 | BINDER_LOOPER_STATE_INVALID = 0x08, | 328 | BINDER_LOOPER_STATE_INVALID = 0x08, |
329 | BINDER_LOOPER_STATE_WAITING = 0x10, | 329 | BINDER_LOOPER_STATE_WAITING = 0x10, |
330 | BINDER_LOOPER_STATE_NEED_RETURN = 0x20 | 330 | BINDER_LOOPER_STATE_NEED_RETURN = 0x20 |
331 | }; | 331 | }; |
332 | 332 | ||
333 | struct binder_thread { | 333 | struct binder_thread { |
334 | struct binder_proc *proc; | 334 | struct binder_proc *proc; |
335 | struct rb_node rb_node; | 335 | struct rb_node rb_node; |
336 | int pid; | 336 | int pid; |
337 | int looper; | 337 | int looper; |
338 | struct binder_transaction *transaction_stack; | 338 | struct binder_transaction *transaction_stack; |
339 | struct list_head todo; | 339 | struct list_head todo; |
340 | uint32_t return_error; /* Write failed, return error code in read buf */ | 340 | uint32_t return_error; /* Write failed, return error code in read buf */ |
341 | uint32_t return_error2; /* Write failed, return error code in read */ | 341 | uint32_t return_error2; /* Write failed, return error code in read */ |
342 | /* buffer. Used when sending a reply to a dead process that */ | 342 | /* buffer. Used when sending a reply to a dead process that */ |
343 | /* we are also waiting on */ | 343 | /* we are also waiting on */ |
344 | wait_queue_head_t wait; | 344 | wait_queue_head_t wait; |
345 | struct binder_stats stats; | 345 | struct binder_stats stats; |
346 | }; | 346 | }; |
347 | 347 | ||
348 | struct binder_transaction { | 348 | struct binder_transaction { |
349 | int debug_id; | 349 | int debug_id; |
350 | struct binder_work work; | 350 | struct binder_work work; |
351 | struct binder_thread *from; | 351 | struct binder_thread *from; |
352 | struct binder_transaction *from_parent; | 352 | struct binder_transaction *from_parent; |
353 | struct binder_proc *to_proc; | 353 | struct binder_proc *to_proc; |
354 | struct binder_thread *to_thread; | 354 | struct binder_thread *to_thread; |
355 | struct binder_transaction *to_parent; | 355 | struct binder_transaction *to_parent; |
356 | unsigned need_reply:1; | 356 | unsigned need_reply:1; |
357 | /* unsigned is_dead:1; */ /* not used at the moment */ | 357 | /* unsigned is_dead:1; */ /* not used at the moment */ |
358 | 358 | ||
359 | struct binder_buffer *buffer; | 359 | struct binder_buffer *buffer; |
360 | unsigned int code; | 360 | unsigned int code; |
361 | unsigned int flags; | 361 | unsigned int flags; |
362 | long priority; | 362 | long priority; |
363 | long saved_priority; | 363 | long saved_priority; |
364 | kuid_t sender_euid; | 364 | kuid_t sender_euid; |
365 | }; | 365 | }; |
366 | 366 | ||
367 | static void | 367 | static void |
368 | binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); | 368 | binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); |
369 | 369 | ||
370 | static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) | 370 | static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) |
371 | { | 371 | { |
372 | struct files_struct *files = proc->files; | 372 | struct files_struct *files = proc->files; |
373 | unsigned long rlim_cur; | 373 | unsigned long rlim_cur; |
374 | unsigned long irqs; | 374 | unsigned long irqs; |
375 | 375 | ||
376 | if (files == NULL) | 376 | if (files == NULL) |
377 | return -ESRCH; | 377 | return -ESRCH; |
378 | 378 | ||
379 | if (!lock_task_sighand(proc->tsk, &irqs)) | 379 | if (!lock_task_sighand(proc->tsk, &irqs)) |
380 | return -EMFILE; | 380 | return -EMFILE; |
381 | 381 | ||
382 | rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE); | 382 | rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE); |
383 | unlock_task_sighand(proc->tsk, &irqs); | 383 | unlock_task_sighand(proc->tsk, &irqs); |
384 | 384 | ||
385 | return __alloc_fd(files, 0, rlim_cur, flags); | 385 | return __alloc_fd(files, 0, rlim_cur, flags); |
386 | } | 386 | } |
387 | 387 | ||
388 | /* | 388 | /* |
389 | * copied from fd_install | 389 | * copied from fd_install |
390 | */ | 390 | */ |
391 | static void task_fd_install( | 391 | static void task_fd_install( |
392 | struct binder_proc *proc, unsigned int fd, struct file *file) | 392 | struct binder_proc *proc, unsigned int fd, struct file *file) |
393 | { | 393 | { |
394 | if (proc->files) | 394 | if (proc->files) |
395 | __fd_install(proc->files, fd, file); | 395 | __fd_install(proc->files, fd, file); |
396 | } | 396 | } |
397 | 397 | ||
398 | /* | 398 | /* |
399 | * copied from sys_close | 399 | * copied from sys_close |
400 | */ | 400 | */ |
401 | static long task_close_fd(struct binder_proc *proc, unsigned int fd) | 401 | static long task_close_fd(struct binder_proc *proc, unsigned int fd) |
402 | { | 402 | { |
403 | int retval; | 403 | int retval; |
404 | 404 | ||
405 | if (proc->files == NULL) | 405 | if (proc->files == NULL) |
406 | return -ESRCH; | 406 | return -ESRCH; |
407 | 407 | ||
408 | retval = __close_fd(proc->files, fd); | 408 | retval = __close_fd(proc->files, fd); |
409 | /* can't restart close syscall because file table entry was cleared */ | 409 | /* can't restart close syscall because file table entry was cleared */ |
410 | if (unlikely(retval == -ERESTARTSYS || | 410 | if (unlikely(retval == -ERESTARTSYS || |
411 | retval == -ERESTARTNOINTR || | 411 | retval == -ERESTARTNOINTR || |
412 | retval == -ERESTARTNOHAND || | 412 | retval == -ERESTARTNOHAND || |
413 | retval == -ERESTART_RESTARTBLOCK)) | 413 | retval == -ERESTART_RESTARTBLOCK)) |
414 | retval = -EINTR; | 414 | retval = -EINTR; |
415 | 415 | ||
416 | return retval; | 416 | return retval; |
417 | } | 417 | } |
418 | 418 | ||
419 | static inline void binder_lock(const char *tag) | 419 | static inline void binder_lock(const char *tag) |
420 | { | 420 | { |
421 | trace_binder_lock(tag); | 421 | trace_binder_lock(tag); |
422 | mutex_lock(&binder_main_lock); | 422 | mutex_lock(&binder_main_lock); |
423 | trace_binder_locked(tag); | 423 | trace_binder_locked(tag); |
424 | } | 424 | } |
425 | 425 | ||
426 | static inline void binder_unlock(const char *tag) | 426 | static inline void binder_unlock(const char *tag) |
427 | { | 427 | { |
428 | trace_binder_unlock(tag); | 428 | trace_binder_unlock(tag); |
429 | mutex_unlock(&binder_main_lock); | 429 | mutex_unlock(&binder_main_lock); |
430 | } | 430 | } |
431 | 431 | ||
432 | static void binder_set_nice(long nice) | 432 | static void binder_set_nice(long nice) |
433 | { | 433 | { |
434 | long min_nice; | 434 | long min_nice; |
435 | if (can_nice(current, nice)) { | 435 | if (can_nice(current, nice)) { |
436 | set_user_nice(current, nice); | 436 | set_user_nice(current, nice); |
437 | return; | 437 | return; |
438 | } | 438 | } |
439 | min_nice = 20 - current->signal->rlim[RLIMIT_NICE].rlim_cur; | 439 | min_nice = 20 - current->signal->rlim[RLIMIT_NICE].rlim_cur; |
440 | binder_debug(BINDER_DEBUG_PRIORITY_CAP, | 440 | binder_debug(BINDER_DEBUG_PRIORITY_CAP, |
441 | "%d: nice value %ld not allowed use %ld instead\n", | 441 | "%d: nice value %ld not allowed use %ld instead\n", |
442 | current->pid, nice, min_nice); | 442 | current->pid, nice, min_nice); |
443 | set_user_nice(current, min_nice); | 443 | set_user_nice(current, min_nice); |
444 | if (min_nice < 20) | 444 | if (min_nice < 20) |
445 | return; | 445 | return; |
446 | binder_user_error("%d RLIMIT_NICE not set\n", current->pid); | 446 | binder_user_error("%d RLIMIT_NICE not set\n", current->pid); |
447 | } | 447 | } |
448 | 448 | ||
449 | static size_t binder_buffer_size(struct binder_proc *proc, | 449 | static size_t binder_buffer_size(struct binder_proc *proc, |
450 | struct binder_buffer *buffer) | 450 | struct binder_buffer *buffer) |
451 | { | 451 | { |
452 | if (list_is_last(&buffer->entry, &proc->buffers)) | 452 | if (list_is_last(&buffer->entry, &proc->buffers)) |
453 | return proc->buffer + proc->buffer_size - (void *)buffer->data; | 453 | return proc->buffer + proc->buffer_size - (void *)buffer->data; |
454 | else | 454 | else |
455 | return (size_t)list_entry(buffer->entry.next, | 455 | return (size_t)list_entry(buffer->entry.next, |
456 | struct binder_buffer, entry) - (size_t)buffer->data; | 456 | struct binder_buffer, entry) - (size_t)buffer->data; |
457 | } | 457 | } |
458 | 458 | ||
459 | static void binder_insert_free_buffer(struct binder_proc *proc, | 459 | static void binder_insert_free_buffer(struct binder_proc *proc, |
460 | struct binder_buffer *new_buffer) | 460 | struct binder_buffer *new_buffer) |
461 | { | 461 | { |
462 | struct rb_node **p = &proc->free_buffers.rb_node; | 462 | struct rb_node **p = &proc->free_buffers.rb_node; |
463 | struct rb_node *parent = NULL; | 463 | struct rb_node *parent = NULL; |
464 | struct binder_buffer *buffer; | 464 | struct binder_buffer *buffer; |
465 | size_t buffer_size; | 465 | size_t buffer_size; |
466 | size_t new_buffer_size; | 466 | size_t new_buffer_size; |
467 | 467 | ||
468 | BUG_ON(!new_buffer->free); | 468 | BUG_ON(!new_buffer->free); |
469 | 469 | ||
470 | new_buffer_size = binder_buffer_size(proc, new_buffer); | 470 | new_buffer_size = binder_buffer_size(proc, new_buffer); |
471 | 471 | ||
472 | binder_debug(BINDER_DEBUG_BUFFER_ALLOC, | 472 | binder_debug(BINDER_DEBUG_BUFFER_ALLOC, |
473 | "%d: add free buffer, size %zd, at %p\n", | 473 | "%d: add free buffer, size %zd, at %p\n", |
474 | proc->pid, new_buffer_size, new_buffer); | 474 | proc->pid, new_buffer_size, new_buffer); |
475 | 475 | ||
476 | while (*p) { | 476 | while (*p) { |
477 | parent = *p; | 477 | parent = *p; |
478 | buffer = rb_entry(parent, struct binder_buffer, rb_node); | 478 | buffer = rb_entry(parent, struct binder_buffer, rb_node); |
479 | BUG_ON(!buffer->free); | 479 | BUG_ON(!buffer->free); |
480 | 480 | ||
481 | buffer_size = binder_buffer_size(proc, buffer); | 481 | buffer_size = binder_buffer_size(proc, buffer); |
482 | 482 | ||
483 | if (new_buffer_size < buffer_size) | 483 | if (new_buffer_size < buffer_size) |
484 | p = &parent->rb_left; | 484 | p = &parent->rb_left; |
485 | else | 485 | else |
486 | p = &parent->rb_right; | 486 | p = &parent->rb_right; |
487 | } | 487 | } |
488 | rb_link_node(&new_buffer->rb_node, parent, p); | 488 | rb_link_node(&new_buffer->rb_node, parent, p); |
489 | rb_insert_color(&new_buffer->rb_node, &proc->free_buffers); | 489 | rb_insert_color(&new_buffer->rb_node, &proc->free_buffers); |
490 | } | 490 | } |
491 | 491 | ||
492 | static void binder_insert_allocated_buffer(struct binder_proc *proc, | 492 | static void binder_insert_allocated_buffer(struct binder_proc *proc, |
493 | struct binder_buffer *new_buffer) | 493 | struct binder_buffer *new_buffer) |
494 | { | 494 | { |
495 | struct rb_node **p = &proc->allocated_buffers.rb_node; | 495 | struct rb_node **p = &proc->allocated_buffers.rb_node; |
496 | struct rb_node *parent = NULL; | 496 | struct rb_node *parent = NULL; |
497 | struct binder_buffer *buffer; | 497 | struct binder_buffer *buffer; |
498 | 498 | ||
499 | BUG_ON(new_buffer->free); | 499 | BUG_ON(new_buffer->free); |
500 | 500 | ||
501 | while (*p) { | 501 | while (*p) { |
502 | parent = *p; | 502 | parent = *p; |
503 | buffer = rb_entry(parent, struct binder_buffer, rb_node); | 503 | buffer = rb_entry(parent, struct binder_buffer, rb_node); |
504 | BUG_ON(buffer->free); | 504 | BUG_ON(buffer->free); |
505 | 505 | ||
506 | if (new_buffer < buffer) | 506 | if (new_buffer < buffer) |
507 | p = &parent->rb_left; | 507 | p = &parent->rb_left; |
508 | else if (new_buffer > buffer) | 508 | else if (new_buffer > buffer) |
509 | p = &parent->rb_right; | 509 | p = &parent->rb_right; |
510 | else | 510 | else |
511 | BUG(); | 511 | BUG(); |
512 | } | 512 | } |
513 | rb_link_node(&new_buffer->rb_node, parent, p); | 513 | rb_link_node(&new_buffer->rb_node, parent, p); |
514 | rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers); | 514 | rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers); |
515 | } | 515 | } |
516 | 516 | ||
517 | static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc, | 517 | static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc, |
518 | void __user *user_ptr) | 518 | void __user *user_ptr) |
519 | { | 519 | { |
520 | struct rb_node *n = proc->allocated_buffers.rb_node; | 520 | struct rb_node *n = proc->allocated_buffers.rb_node; |
521 | struct binder_buffer *buffer; | 521 | struct binder_buffer *buffer; |
522 | struct binder_buffer *kern_ptr; | 522 | struct binder_buffer *kern_ptr; |
523 | 523 | ||
524 | kern_ptr = user_ptr - proc->user_buffer_offset | 524 | kern_ptr = user_ptr - proc->user_buffer_offset |
525 | - offsetof(struct binder_buffer, data); | 525 | - offsetof(struct binder_buffer, data); |
526 | 526 | ||
527 | while (n) { | 527 | while (n) { |
528 | buffer = rb_entry(n, struct binder_buffer, rb_node); | 528 | buffer = rb_entry(n, struct binder_buffer, rb_node); |
529 | BUG_ON(buffer->free); | 529 | BUG_ON(buffer->free); |
530 | 530 | ||
531 | if (kern_ptr < buffer) | 531 | if (kern_ptr < buffer) |
532 | n = n->rb_left; | 532 | n = n->rb_left; |
533 | else if (kern_ptr > buffer) | 533 | else if (kern_ptr > buffer) |
534 | n = n->rb_right; | 534 | n = n->rb_right; |
535 | else | 535 | else |
536 | return buffer; | 536 | return buffer; |
537 | } | 537 | } |
538 | return NULL; | 538 | return NULL; |
539 | } | 539 | } |
540 | 540 | ||
541 | static int binder_update_page_range(struct binder_proc *proc, int allocate, | 541 | static int binder_update_page_range(struct binder_proc *proc, int allocate, |
542 | void *start, void *end, | 542 | void *start, void *end, |
543 | struct vm_area_struct *vma) | 543 | struct vm_area_struct *vma) |
544 | { | 544 | { |
545 | void *page_addr; | 545 | void *page_addr; |
546 | unsigned long user_page_addr; | 546 | unsigned long user_page_addr; |
547 | struct vm_struct tmp_area; | 547 | struct vm_struct tmp_area; |
548 | struct page **page; | 548 | struct page **page; |
549 | struct mm_struct *mm; | 549 | struct mm_struct *mm; |
550 | 550 | ||
551 | binder_debug(BINDER_DEBUG_BUFFER_ALLOC, | 551 | binder_debug(BINDER_DEBUG_BUFFER_ALLOC, |
552 | "%d: %s pages %p-%p\n", proc->pid, | 552 | "%d: %s pages %p-%p\n", proc->pid, |
553 | allocate ? "allocate" : "free", start, end); | 553 | allocate ? "allocate" : "free", start, end); |
554 | 554 | ||
555 | if (end <= start) | 555 | if (end <= start) |
556 | return 0; | 556 | return 0; |
557 | 557 | ||
558 | trace_binder_update_page_range(proc, allocate, start, end); | 558 | trace_binder_update_page_range(proc, allocate, start, end); |
559 | 559 | ||
560 | if (vma) | 560 | if (vma) |
561 | mm = NULL; | 561 | mm = NULL; |
562 | else | 562 | else |
563 | mm = get_task_mm(proc->tsk); | 563 | mm = get_task_mm(proc->tsk); |
564 | 564 | ||
565 | if (mm) { | 565 | if (mm) { |
566 | down_write(&mm->mmap_sem); | 566 | down_write(&mm->mmap_sem); |
567 | vma = proc->vma; | 567 | vma = proc->vma; |
568 | if (vma && mm != proc->vma_vm_mm) { | 568 | if (vma && mm != proc->vma_vm_mm) { |
569 | pr_err("%d: vma mm and task mm mismatch\n", | 569 | pr_err("%d: vma mm and task mm mismatch\n", |
570 | proc->pid); | 570 | proc->pid); |
571 | vma = NULL; | 571 | vma = NULL; |
572 | } | 572 | } |
573 | } | 573 | } |
574 | 574 | ||
575 | if (allocate == 0) | 575 | if (allocate == 0) |
576 | goto free_range; | 576 | goto free_range; |
577 | 577 | ||
578 | if (vma == NULL) { | 578 | if (vma == NULL) { |
579 | pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n", | 579 | pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n", |
580 | proc->pid); | 580 | proc->pid); |
581 | goto err_no_vma; | 581 | goto err_no_vma; |
582 | } | 582 | } |
583 | 583 | ||
584 | for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { | 584 | for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { |
585 | int ret; | 585 | int ret; |
586 | struct page **page_array_ptr; | 586 | struct page **page_array_ptr; |
587 | page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; | 587 | page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; |
588 | 588 | ||
589 | BUG_ON(*page); | 589 | BUG_ON(*page); |
590 | *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); | 590 | *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); |
591 | if (*page == NULL) { | 591 | if (*page == NULL) { |
592 | pr_err("%d: binder_alloc_buf failed for page at %p\n", | 592 | pr_err("%d: binder_alloc_buf failed for page at %p\n", |
593 | proc->pid, page_addr); | 593 | proc->pid, page_addr); |
594 | goto err_alloc_page_failed; | 594 | goto err_alloc_page_failed; |
595 | } | 595 | } |
596 | tmp_area.addr = page_addr; | 596 | tmp_area.addr = page_addr; |
597 | tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */; | 597 | tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */; |
598 | page_array_ptr = page; | 598 | page_array_ptr = page; |
599 | ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr); | 599 | ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr); |
600 | if (ret) { | 600 | if (ret) { |
601 | pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n", | 601 | pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n", |
602 | proc->pid, page_addr); | 602 | proc->pid, page_addr); |
603 | goto err_map_kernel_failed; | 603 | goto err_map_kernel_failed; |
604 | } | 604 | } |
605 | user_page_addr = | 605 | user_page_addr = |
606 | (uintptr_t)page_addr + proc->user_buffer_offset; | 606 | (uintptr_t)page_addr + proc->user_buffer_offset; |
607 | ret = vm_insert_page(vma, user_page_addr, page[0]); | 607 | ret = vm_insert_page(vma, user_page_addr, page[0]); |
608 | if (ret) { | 608 | if (ret) { |
609 | pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", | 609 | pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", |
610 | proc->pid, user_page_addr); | 610 | proc->pid, user_page_addr); |
611 | goto err_vm_insert_page_failed; | 611 | goto err_vm_insert_page_failed; |
612 | } | 612 | } |
613 | /* vm_insert_page does not seem to increment the refcount */ | 613 | /* vm_insert_page does not seem to increment the refcount */ |
614 | } | 614 | } |
615 | if (mm) { | 615 | if (mm) { |
616 | up_write(&mm->mmap_sem); | 616 | up_write(&mm->mmap_sem); |
617 | mmput(mm); | 617 | mmput(mm); |
618 | } | 618 | } |
619 | return 0; | 619 | return 0; |
620 | 620 | ||
621 | free_range: | 621 | free_range: |
622 | for (page_addr = end - PAGE_SIZE; page_addr >= start; | 622 | for (page_addr = end - PAGE_SIZE; page_addr >= start; |
623 | page_addr -= PAGE_SIZE) { | 623 | page_addr -= PAGE_SIZE) { |
624 | page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; | 624 | page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; |
625 | if (vma) | 625 | if (vma) |
626 | zap_page_range(vma, (uintptr_t)page_addr + | 626 | zap_page_range(vma, (uintptr_t)page_addr + |
627 | proc->user_buffer_offset, PAGE_SIZE, NULL); | 627 | proc->user_buffer_offset, PAGE_SIZE, NULL); |
628 | err_vm_insert_page_failed: | 628 | err_vm_insert_page_failed: |
629 | unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); | 629 | unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); |
630 | err_map_kernel_failed: | 630 | err_map_kernel_failed: |
631 | __free_page(*page); | 631 | __free_page(*page); |
632 | *page = NULL; | 632 | *page = NULL; |
633 | err_alloc_page_failed: | 633 | err_alloc_page_failed: |
634 | ; | 634 | ; |
635 | } | 635 | } |
636 | err_no_vma: | 636 | err_no_vma: |
637 | if (mm) { | 637 | if (mm) { |
638 | up_write(&mm->mmap_sem); | 638 | up_write(&mm->mmap_sem); |
639 | mmput(mm); | 639 | mmput(mm); |
640 | } | 640 | } |
641 | return -ENOMEM; | 641 | return -ENOMEM; |
642 | } | 642 | } |
643 | 643 | ||
644 | static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, | 644 | static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, |
645 | size_t data_size, | 645 | size_t data_size, |
646 | size_t offsets_size, int is_async) | 646 | size_t offsets_size, int is_async) |
647 | { | 647 | { |
648 | struct rb_node *n = proc->free_buffers.rb_node; | 648 | struct rb_node *n = proc->free_buffers.rb_node; |
649 | struct binder_buffer *buffer; | 649 | struct binder_buffer *buffer; |
650 | size_t buffer_size; | 650 | size_t buffer_size; |
651 | struct rb_node *best_fit = NULL; | 651 | struct rb_node *best_fit = NULL; |
652 | void *has_page_addr; | 652 | void *has_page_addr; |
653 | void *end_page_addr; | 653 | void *end_page_addr; |
654 | size_t size; | 654 | size_t size; |
655 | 655 | ||
656 | if (proc->vma == NULL) { | 656 | if (proc->vma == NULL) { |
657 | pr_err("%d: binder_alloc_buf, no vma\n", | 657 | pr_err("%d: binder_alloc_buf, no vma\n", |
658 | proc->pid); | 658 | proc->pid); |
659 | return NULL; | 659 | return NULL; |
660 | } | 660 | } |
661 | 661 | ||
662 | size = ALIGN(data_size, sizeof(void *)) + | 662 | size = ALIGN(data_size, sizeof(void *)) + |
663 | ALIGN(offsets_size, sizeof(void *)); | 663 | ALIGN(offsets_size, sizeof(void *)); |
664 | 664 | ||
665 | if (size < data_size || size < offsets_size) { | 665 | if (size < data_size || size < offsets_size) { |
666 | binder_user_error("%d: got transaction with invalid size %zd-%zd\n", | 666 | binder_user_error("%d: got transaction with invalid size %zd-%zd\n", |
667 | proc->pid, data_size, offsets_size); | 667 | proc->pid, data_size, offsets_size); |
668 | return NULL; | 668 | return NULL; |
669 | } | 669 | } |
670 | 670 | ||
671 | if (is_async && | 671 | if (is_async && |
672 | proc->free_async_space < size + sizeof(struct binder_buffer)) { | 672 | proc->free_async_space < size + sizeof(struct binder_buffer)) { |
673 | binder_debug(BINDER_DEBUG_BUFFER_ALLOC, | 673 | binder_debug(BINDER_DEBUG_BUFFER_ALLOC, |
674 | "%d: binder_alloc_buf size %zd failed, no async space left\n", | 674 | "%d: binder_alloc_buf size %zd failed, no async space left\n", |
675 | proc->pid, size); | 675 | proc->pid, size); |
676 | return NULL; | 676 | return NULL; |
677 | } | 677 | } |
678 | 678 | ||
679 | while (n) { | 679 | while (n) { |
680 | buffer = rb_entry(n, struct binder_buffer, rb_node); | 680 | buffer = rb_entry(n, struct binder_buffer, rb_node); |
681 | BUG_ON(!buffer->free); | 681 | BUG_ON(!buffer->free); |
682 | buffer_size = binder_buffer_size(proc, buffer); | 682 | buffer_size = binder_buffer_size(proc, buffer); |
683 | 683 | ||
684 | if (size < buffer_size) { | 684 | if (size < buffer_size) { |
685 | best_fit = n; | 685 | best_fit = n; |
686 | n = n->rb_left; | 686 | n = n->rb_left; |
687 | } else if (size > buffer_size) | 687 | } else if (size > buffer_size) |
688 | n = n->rb_right; | 688 | n = n->rb_right; |
689 | else { | 689 | else { |
690 | best_fit = n; | 690 | best_fit = n; |
691 | break; | 691 | break; |
692 | } | 692 | } |
693 | } | 693 | } |
694 | if (best_fit == NULL) { | 694 | if (best_fit == NULL) { |
695 | pr_err("%d: binder_alloc_buf size %zd failed, no address space\n", | 695 | pr_err("%d: binder_alloc_buf size %zd failed, no address space\n", |
696 | proc->pid, size); | 696 | proc->pid, size); |
697 | return NULL; | 697 | return NULL; |
698 | } | 698 | } |
699 | if (n == NULL) { | 699 | if (n == NULL) { |
700 | buffer = rb_entry(best_fit, struct binder_buffer, rb_node); | 700 | buffer = rb_entry(best_fit, struct binder_buffer, rb_node); |
701 | buffer_size = binder_buffer_size(proc, buffer); | 701 | buffer_size = binder_buffer_size(proc, buffer); |
702 | } | 702 | } |
703 | 703 | ||
704 | binder_debug(BINDER_DEBUG_BUFFER_ALLOC, | 704 | binder_debug(BINDER_DEBUG_BUFFER_ALLOC, |
705 | "%d: binder_alloc_buf size %zd got buffer %p size %zd\n", | 705 | "%d: binder_alloc_buf size %zd got buffer %p size %zd\n", |
706 | proc->pid, size, buffer, buffer_size); | 706 | proc->pid, size, buffer, buffer_size); |
707 | 707 | ||
708 | has_page_addr = | 708 | has_page_addr = |
709 | (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); | 709 | (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); |
710 | if (n == NULL) { | 710 | if (n == NULL) { |
711 | if (size + sizeof(struct binder_buffer) + 4 >= buffer_size) | 711 | if (size + sizeof(struct binder_buffer) + 4 >= buffer_size) |
712 | buffer_size = size; /* no room for other buffers */ | 712 | buffer_size = size; /* no room for other buffers */ |
713 | else | 713 | else |
714 | buffer_size = size + sizeof(struct binder_buffer); | 714 | buffer_size = size + sizeof(struct binder_buffer); |
715 | } | 715 | } |
716 | end_page_addr = | 716 | end_page_addr = |
717 | (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size); | 717 | (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size); |
718 | if (end_page_addr > has_page_addr) | 718 | if (end_page_addr > has_page_addr) |
719 | end_page_addr = has_page_addr; | 719 | end_page_addr = has_page_addr; |
720 | if (binder_update_page_range(proc, 1, | 720 | if (binder_update_page_range(proc, 1, |
721 | (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL)) | 721 | (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL)) |
722 | return NULL; | 722 | return NULL; |
723 | 723 | ||
724 | rb_erase(best_fit, &proc->free_buffers); | 724 | rb_erase(best_fit, &proc->free_buffers); |
725 | buffer->free = 0; | 725 | buffer->free = 0; |
726 | binder_insert_allocated_buffer(proc, buffer); | 726 | binder_insert_allocated_buffer(proc, buffer); |
727 | if (buffer_size != size) { | 727 | if (buffer_size != size) { |
728 | struct binder_buffer *new_buffer = (void *)buffer->data + size; | 728 | struct binder_buffer *new_buffer = (void *)buffer->data + size; |
729 | list_add(&new_buffer->entry, &buffer->entry); | 729 | list_add(&new_buffer->entry, &buffer->entry); |
730 | new_buffer->free = 1; | 730 | new_buffer->free = 1; |
731 | binder_insert_free_buffer(proc, new_buffer); | 731 | binder_insert_free_buffer(proc, new_buffer); |
732 | } | 732 | } |
733 | binder_debug(BINDER_DEBUG_BUFFER_ALLOC, | 733 | binder_debug(BINDER_DEBUG_BUFFER_ALLOC, |
734 | "%d: binder_alloc_buf size %zd got %p\n", | 734 | "%d: binder_alloc_buf size %zd got %p\n", |
735 | proc->pid, size, buffer); | 735 | proc->pid, size, buffer); |
736 | buffer->data_size = data_size; | 736 | buffer->data_size = data_size; |
737 | buffer->offsets_size = offsets_size; | 737 | buffer->offsets_size = offsets_size; |
738 | buffer->async_transaction = is_async; | 738 | buffer->async_transaction = is_async; |
739 | if (is_async) { | 739 | if (is_async) { |
740 | proc->free_async_space -= size + sizeof(struct binder_buffer); | 740 | proc->free_async_space -= size + sizeof(struct binder_buffer); |
741 | binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, | 741 | binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, |
742 | "%d: binder_alloc_buf size %zd async free %zd\n", | 742 | "%d: binder_alloc_buf size %zd async free %zd\n", |
743 | proc->pid, size, proc->free_async_space); | 743 | proc->pid, size, proc->free_async_space); |
744 | } | 744 | } |
745 | 745 | ||
746 | return buffer; | 746 | return buffer; |
747 | } | 747 | } |
748 | 748 | ||
749 | static void *buffer_start_page(struct binder_buffer *buffer) | 749 | static void *buffer_start_page(struct binder_buffer *buffer) |
750 | { | 750 | { |
751 | return (void *)((uintptr_t)buffer & PAGE_MASK); | 751 | return (void *)((uintptr_t)buffer & PAGE_MASK); |
752 | } | 752 | } |
753 | 753 | ||
754 | static void *buffer_end_page(struct binder_buffer *buffer) | 754 | static void *buffer_end_page(struct binder_buffer *buffer) |
755 | { | 755 | { |
756 | return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK); | 756 | return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK); |
757 | } | 757 | } |
758 | 758 | ||
759 | static void binder_delete_free_buffer(struct binder_proc *proc, | 759 | static void binder_delete_free_buffer(struct binder_proc *proc, |
760 | struct binder_buffer *buffer) | 760 | struct binder_buffer *buffer) |
761 | { | 761 | { |
762 | struct binder_buffer *prev, *next = NULL; | 762 | struct binder_buffer *prev, *next = NULL; |
763 | int free_page_end = 1; | 763 | int free_page_end = 1; |
764 | int free_page_start = 1; | 764 | int free_page_start = 1; |
765 | 765 | ||
766 | BUG_ON(proc->buffers.next == &buffer->entry); | 766 | BUG_ON(proc->buffers.next == &buffer->entry); |
767 | prev = list_entry(buffer->entry.prev, struct binder_buffer, entry); | 767 | prev = list_entry(buffer->entry.prev, struct binder_buffer, entry); |
768 | BUG_ON(!prev->free); | 768 | BUG_ON(!prev->free); |
769 | if (buffer_end_page(prev) == buffer_start_page(buffer)) { | 769 | if (buffer_end_page(prev) == buffer_start_page(buffer)) { |
770 | free_page_start = 0; | 770 | free_page_start = 0; |
771 | if (buffer_end_page(prev) == buffer_end_page(buffer)) | 771 | if (buffer_end_page(prev) == buffer_end_page(buffer)) |
772 | free_page_end = 0; | 772 | free_page_end = 0; |
773 | binder_debug(BINDER_DEBUG_BUFFER_ALLOC, | 773 | binder_debug(BINDER_DEBUG_BUFFER_ALLOC, |
774 | "%d: merge free, buffer %p share page with %p\n", | 774 | "%d: merge free, buffer %p share page with %p\n", |
775 | proc->pid, buffer, prev); | 775 | proc->pid, buffer, prev); |
776 | } | 776 | } |
777 | 777 | ||
778 | if (!list_is_last(&buffer->entry, &proc->buffers)) { | 778 | if (!list_is_last(&buffer->entry, &proc->buffers)) { |
779 | next = list_entry(buffer->entry.next, | 779 | next = list_entry(buffer->entry.next, |
780 | struct binder_buffer, entry); | 780 | struct binder_buffer, entry); |
781 | if (buffer_start_page(next) == buffer_end_page(buffer)) { | 781 | if (buffer_start_page(next) == buffer_end_page(buffer)) { |
782 | free_page_end = 0; | 782 | free_page_end = 0; |
783 | if (buffer_start_page(next) == | 783 | if (buffer_start_page(next) == |
784 | buffer_start_page(buffer)) | 784 | buffer_start_page(buffer)) |
785 | free_page_start = 0; | 785 | free_page_start = 0; |
786 | binder_debug(BINDER_DEBUG_BUFFER_ALLOC, | 786 | binder_debug(BINDER_DEBUG_BUFFER_ALLOC, |
787 | "%d: merge free, buffer %p share page with %p\n", | 787 | "%d: merge free, buffer %p share page with %p\n", |
788 | proc->pid, buffer, prev); | 788 | proc->pid, buffer, prev); |
789 | } | 789 | } |
790 | } | 790 | } |
791 | list_del(&buffer->entry); | 791 | list_del(&buffer->entry); |
792 | if (free_page_start || free_page_end) { | 792 | if (free_page_start || free_page_end) { |
793 | binder_debug(BINDER_DEBUG_BUFFER_ALLOC, | 793 | binder_debug(BINDER_DEBUG_BUFFER_ALLOC, |
794 | "%d: merge free, buffer %p do not share page%s%s with %p or %p\n", | 794 | "%d: merge free, buffer %p do not share page%s%s with %p or %p\n", |
795 | proc->pid, buffer, free_page_start ? "" : " end", | 795 | proc->pid, buffer, free_page_start ? "" : " end", |
796 | free_page_end ? "" : " start", prev, next); | 796 | free_page_end ? "" : " start", prev, next); |
797 | binder_update_page_range(proc, 0, free_page_start ? | 797 | binder_update_page_range(proc, 0, free_page_start ? |
798 | buffer_start_page(buffer) : buffer_end_page(buffer), | 798 | buffer_start_page(buffer) : buffer_end_page(buffer), |
799 | (free_page_end ? buffer_end_page(buffer) : | 799 | (free_page_end ? buffer_end_page(buffer) : |
800 | buffer_start_page(buffer)) + PAGE_SIZE, NULL); | 800 | buffer_start_page(buffer)) + PAGE_SIZE, NULL); |
801 | } | 801 | } |
802 | } | 802 | } |
803 | 803 | ||
804 | static void binder_free_buf(struct binder_proc *proc, | 804 | static void binder_free_buf(struct binder_proc *proc, |
805 | struct binder_buffer *buffer) | 805 | struct binder_buffer *buffer) |
806 | { | 806 | { |
807 | size_t size, buffer_size; | 807 | size_t size, buffer_size; |
808 | 808 | ||
809 | buffer_size = binder_buffer_size(proc, buffer); | 809 | buffer_size = binder_buffer_size(proc, buffer); |
810 | 810 | ||
811 | size = ALIGN(buffer->data_size, sizeof(void *)) + | 811 | size = ALIGN(buffer->data_size, sizeof(void *)) + |
812 | ALIGN(buffer->offsets_size, sizeof(void *)); | 812 | ALIGN(buffer->offsets_size, sizeof(void *)); |
813 | 813 | ||
814 | binder_debug(BINDER_DEBUG_BUFFER_ALLOC, | 814 | binder_debug(BINDER_DEBUG_BUFFER_ALLOC, |
815 | "%d: binder_free_buf %p size %zd buffer_size %zd\n", | 815 | "%d: binder_free_buf %p size %zd buffer_size %zd\n", |
816 | proc->pid, buffer, size, buffer_size); | 816 | proc->pid, buffer, size, buffer_size); |
817 | 817 | ||
818 | BUG_ON(buffer->free); | 818 | BUG_ON(buffer->free); |
819 | BUG_ON(size > buffer_size); | 819 | BUG_ON(size > buffer_size); |
820 | BUG_ON(buffer->transaction != NULL); | 820 | BUG_ON(buffer->transaction != NULL); |
821 | BUG_ON((void *)buffer < proc->buffer); | 821 | BUG_ON((void *)buffer < proc->buffer); |
822 | BUG_ON((void *)buffer > proc->buffer + proc->buffer_size); | 822 | BUG_ON((void *)buffer > proc->buffer + proc->buffer_size); |
823 | 823 | ||
824 | if (buffer->async_transaction) { | 824 | if (buffer->async_transaction) { |
825 | proc->free_async_space += size + sizeof(struct binder_buffer); | 825 | proc->free_async_space += size + sizeof(struct binder_buffer); |
826 | 826 | ||
827 | binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, | 827 | binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, |
828 | "%d: binder_free_buf size %zd async free %zd\n", | 828 | "%d: binder_free_buf size %zd async free %zd\n", |
829 | proc->pid, size, proc->free_async_space); | 829 | proc->pid, size, proc->free_async_space); |
830 | } | 830 | } |
831 | 831 | ||
832 | binder_update_page_range(proc, 0, | 832 | binder_update_page_range(proc, 0, |
833 | (void *)PAGE_ALIGN((uintptr_t)buffer->data), | 833 | (void *)PAGE_ALIGN((uintptr_t)buffer->data), |
834 | (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), | 834 | (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), |
835 | NULL); | 835 | NULL); |
836 | rb_erase(&buffer->rb_node, &proc->allocated_buffers); | 836 | rb_erase(&buffer->rb_node, &proc->allocated_buffers); |
837 | buffer->free = 1; | 837 | buffer->free = 1; |
838 | if (!list_is_last(&buffer->entry, &proc->buffers)) { | 838 | if (!list_is_last(&buffer->entry, &proc->buffers)) { |
839 | struct binder_buffer *next = list_entry(buffer->entry.next, | 839 | struct binder_buffer *next = list_entry(buffer->entry.next, |
840 | struct binder_buffer, entry); | 840 | struct binder_buffer, entry); |
841 | if (next->free) { | 841 | if (next->free) { |
842 | rb_erase(&next->rb_node, &proc->free_buffers); | 842 | rb_erase(&next->rb_node, &proc->free_buffers); |
843 | binder_delete_free_buffer(proc, next); | 843 | binder_delete_free_buffer(proc, next); |
844 | } | 844 | } |
845 | } | 845 | } |
846 | if (proc->buffers.next != &buffer->entry) { | 846 | if (proc->buffers.next != &buffer->entry) { |
847 | struct binder_buffer *prev = list_entry(buffer->entry.prev, | 847 | struct binder_buffer *prev = list_entry(buffer->entry.prev, |
848 | struct binder_buffer, entry); | 848 | struct binder_buffer, entry); |
849 | if (prev->free) { | 849 | if (prev->free) { |
850 | binder_delete_free_buffer(proc, buffer); | 850 | binder_delete_free_buffer(proc, buffer); |
851 | rb_erase(&prev->rb_node, &proc->free_buffers); | 851 | rb_erase(&prev->rb_node, &proc->free_buffers); |
852 | buffer = prev; | 852 | buffer = prev; |
853 | } | 853 | } |
854 | } | 854 | } |
855 | binder_insert_free_buffer(proc, buffer); | 855 | binder_insert_free_buffer(proc, buffer); |
856 | } | 856 | } |
857 | 857 | ||
858 | static struct binder_node *binder_get_node(struct binder_proc *proc, | 858 | static struct binder_node *binder_get_node(struct binder_proc *proc, |
859 | void __user *ptr) | 859 | void __user *ptr) |
860 | { | 860 | { |
861 | struct rb_node *n = proc->nodes.rb_node; | 861 | struct rb_node *n = proc->nodes.rb_node; |
862 | struct binder_node *node; | 862 | struct binder_node *node; |
863 | 863 | ||
864 | while (n) { | 864 | while (n) { |
865 | node = rb_entry(n, struct binder_node, rb_node); | 865 | node = rb_entry(n, struct binder_node, rb_node); |
866 | 866 | ||
867 | if (ptr < node->ptr) | 867 | if (ptr < node->ptr) |
868 | n = n->rb_left; | 868 | n = n->rb_left; |
869 | else if (ptr > node->ptr) | 869 | else if (ptr > node->ptr) |
870 | n = n->rb_right; | 870 | n = n->rb_right; |
871 | else | 871 | else |
872 | return node; | 872 | return node; |
873 | } | 873 | } |
874 | return NULL; | 874 | return NULL; |
875 | } | 875 | } |
876 | 876 | ||
877 | static struct binder_node *binder_new_node(struct binder_proc *proc, | 877 | static struct binder_node *binder_new_node(struct binder_proc *proc, |
878 | void __user *ptr, | 878 | void __user *ptr, |
879 | void __user *cookie) | 879 | void __user *cookie) |
880 | { | 880 | { |
881 | struct rb_node **p = &proc->nodes.rb_node; | 881 | struct rb_node **p = &proc->nodes.rb_node; |
882 | struct rb_node *parent = NULL; | 882 | struct rb_node *parent = NULL; |
883 | struct binder_node *node; | 883 | struct binder_node *node; |
884 | 884 | ||
885 | while (*p) { | 885 | while (*p) { |
886 | parent = *p; | 886 | parent = *p; |
887 | node = rb_entry(parent, struct binder_node, rb_node); | 887 | node = rb_entry(parent, struct binder_node, rb_node); |
888 | 888 | ||
889 | if (ptr < node->ptr) | 889 | if (ptr < node->ptr) |
890 | p = &(*p)->rb_left; | 890 | p = &(*p)->rb_left; |
891 | else if (ptr > node->ptr) | 891 | else if (ptr > node->ptr) |
892 | p = &(*p)->rb_right; | 892 | p = &(*p)->rb_right; |
893 | else | 893 | else |
894 | return NULL; | 894 | return NULL; |
895 | } | 895 | } |
896 | 896 | ||
897 | node = kzalloc(sizeof(*node), GFP_KERNEL); | 897 | node = kzalloc(sizeof(*node), GFP_KERNEL); |
898 | if (node == NULL) | 898 | if (node == NULL) |
899 | return NULL; | 899 | return NULL; |
900 | binder_stats_created(BINDER_STAT_NODE); | 900 | binder_stats_created(BINDER_STAT_NODE); |
901 | rb_link_node(&node->rb_node, parent, p); | 901 | rb_link_node(&node->rb_node, parent, p); |
902 | rb_insert_color(&node->rb_node, &proc->nodes); | 902 | rb_insert_color(&node->rb_node, &proc->nodes); |
903 | node->debug_id = ++binder_last_id; | 903 | node->debug_id = ++binder_last_id; |
904 | node->proc = proc; | 904 | node->proc = proc; |
905 | node->ptr = ptr; | 905 | node->ptr = ptr; |
906 | node->cookie = cookie; | 906 | node->cookie = cookie; |
907 | node->work.type = BINDER_WORK_NODE; | 907 | node->work.type = BINDER_WORK_NODE; |
908 | INIT_LIST_HEAD(&node->work.entry); | 908 | INIT_LIST_HEAD(&node->work.entry); |
909 | INIT_LIST_HEAD(&node->async_todo); | 909 | INIT_LIST_HEAD(&node->async_todo); |
910 | binder_debug(BINDER_DEBUG_INTERNAL_REFS, | 910 | binder_debug(BINDER_DEBUG_INTERNAL_REFS, |
911 | "%d:%d node %d u%p c%p created\n", | 911 | "%d:%d node %d u%p c%p created\n", |
912 | proc->pid, current->pid, node->debug_id, | 912 | proc->pid, current->pid, node->debug_id, |
913 | node->ptr, node->cookie); | 913 | node->ptr, node->cookie); |
914 | return node; | 914 | return node; |
915 | } | 915 | } |
916 | 916 | ||
917 | static int binder_inc_node(struct binder_node *node, int strong, int internal, | 917 | static int binder_inc_node(struct binder_node *node, int strong, int internal, |
918 | struct list_head *target_list) | 918 | struct list_head *target_list) |
919 | { | 919 | { |
920 | if (strong) { | 920 | if (strong) { |
921 | if (internal) { | 921 | if (internal) { |
922 | if (target_list == NULL && | 922 | if (target_list == NULL && |
923 | node->internal_strong_refs == 0 && | 923 | node->internal_strong_refs == 0 && |
924 | !(node == binder_context_mgr_node && | 924 | !(node == binder_context_mgr_node && |
925 | node->has_strong_ref)) { | 925 | node->has_strong_ref)) { |
926 | pr_err("invalid inc strong node for %d\n", | 926 | pr_err("invalid inc strong node for %d\n", |
927 | node->debug_id); | 927 | node->debug_id); |
928 | return -EINVAL; | 928 | return -EINVAL; |
929 | } | 929 | } |
930 | node->internal_strong_refs++; | 930 | node->internal_strong_refs++; |
931 | } else | 931 | } else |
932 | node->local_strong_refs++; | 932 | node->local_strong_refs++; |
933 | if (!node->has_strong_ref && target_list) { | 933 | if (!node->has_strong_ref && target_list) { |
934 | list_del_init(&node->work.entry); | 934 | list_del_init(&node->work.entry); |
935 | list_add_tail(&node->work.entry, target_list); | 935 | list_add_tail(&node->work.entry, target_list); |
936 | } | 936 | } |
937 | } else { | 937 | } else { |
938 | if (!internal) | 938 | if (!internal) |
939 | node->local_weak_refs++; | 939 | node->local_weak_refs++; |
940 | if (!node->has_weak_ref && list_empty(&node->work.entry)) { | 940 | if (!node->has_weak_ref && list_empty(&node->work.entry)) { |
941 | if (target_list == NULL) { | 941 | if (target_list == NULL) { |
942 | pr_err("invalid inc weak node for %d\n", | 942 | pr_err("invalid inc weak node for %d\n", |
943 | node->debug_id); | 943 | node->debug_id); |
944 | return -EINVAL; | 944 | return -EINVAL; |
945 | } | 945 | } |
946 | list_add_tail(&node->work.entry, target_list); | 946 | list_add_tail(&node->work.entry, target_list); |
947 | } | 947 | } |
948 | } | 948 | } |
949 | return 0; | 949 | return 0; |
950 | } | 950 | } |
951 | 951 | ||
952 | static int binder_dec_node(struct binder_node *node, int strong, int internal) | 952 | static int binder_dec_node(struct binder_node *node, int strong, int internal) |
953 | { | 953 | { |
954 | if (strong) { | 954 | if (strong) { |
955 | if (internal) | 955 | if (internal) |
956 | node->internal_strong_refs--; | 956 | node->internal_strong_refs--; |
957 | else | 957 | else |
958 | node->local_strong_refs--; | 958 | node->local_strong_refs--; |
959 | if (node->local_strong_refs || node->internal_strong_refs) | 959 | if (node->local_strong_refs || node->internal_strong_refs) |
960 | return 0; | 960 | return 0; |
961 | } else { | 961 | } else { |
962 | if (!internal) | 962 | if (!internal) |
963 | node->local_weak_refs--; | 963 | node->local_weak_refs--; |
964 | if (node->local_weak_refs || !hlist_empty(&node->refs)) | 964 | if (node->local_weak_refs || !hlist_empty(&node->refs)) |
965 | return 0; | 965 | return 0; |
966 | } | 966 | } |
967 | if (node->proc && (node->has_strong_ref || node->has_weak_ref)) { | 967 | if (node->proc && (node->has_strong_ref || node->has_weak_ref)) { |
968 | if (list_empty(&node->work.entry)) { | 968 | if (list_empty(&node->work.entry)) { |
969 | list_add_tail(&node->work.entry, &node->proc->todo); | 969 | list_add_tail(&node->work.entry, &node->proc->todo); |
970 | wake_up_interruptible(&node->proc->wait); | 970 | wake_up_interruptible(&node->proc->wait); |
971 | } | 971 | } |
972 | } else { | 972 | } else { |
973 | if (hlist_empty(&node->refs) && !node->local_strong_refs && | 973 | if (hlist_empty(&node->refs) && !node->local_strong_refs && |
974 | !node->local_weak_refs) { | 974 | !node->local_weak_refs) { |
975 | list_del_init(&node->work.entry); | 975 | list_del_init(&node->work.entry); |
976 | if (node->proc) { | 976 | if (node->proc) { |
977 | rb_erase(&node->rb_node, &node->proc->nodes); | 977 | rb_erase(&node->rb_node, &node->proc->nodes); |
978 | binder_debug(BINDER_DEBUG_INTERNAL_REFS, | 978 | binder_debug(BINDER_DEBUG_INTERNAL_REFS, |
979 | "refless node %d deleted\n", | 979 | "refless node %d deleted\n", |
980 | node->debug_id); | 980 | node->debug_id); |
981 | } else { | 981 | } else { |
982 | hlist_del(&node->dead_node); | 982 | hlist_del(&node->dead_node); |
983 | binder_debug(BINDER_DEBUG_INTERNAL_REFS, | 983 | binder_debug(BINDER_DEBUG_INTERNAL_REFS, |
984 | "dead node %d deleted\n", | 984 | "dead node %d deleted\n", |
985 | node->debug_id); | 985 | node->debug_id); |
986 | } | 986 | } |
987 | kfree(node); | 987 | kfree(node); |
988 | binder_stats_deleted(BINDER_STAT_NODE); | 988 | binder_stats_deleted(BINDER_STAT_NODE); |
989 | } | 989 | } |
990 | } | 990 | } |
991 | 991 | ||
992 | return 0; | 992 | return 0; |
993 | } | 993 | } |
994 | 994 | ||
995 | 995 | ||
996 | static struct binder_ref *binder_get_ref(struct binder_proc *proc, | 996 | static struct binder_ref *binder_get_ref(struct binder_proc *proc, |
997 | uint32_t desc) | 997 | uint32_t desc) |
998 | { | 998 | { |
999 | struct rb_node *n = proc->refs_by_desc.rb_node; | 999 | struct rb_node *n = proc->refs_by_desc.rb_node; |
1000 | struct binder_ref *ref; | 1000 | struct binder_ref *ref; |
1001 | 1001 | ||
1002 | while (n) { | 1002 | while (n) { |
1003 | ref = rb_entry(n, struct binder_ref, rb_node_desc); | 1003 | ref = rb_entry(n, struct binder_ref, rb_node_desc); |
1004 | 1004 | ||
1005 | if (desc < ref->desc) | 1005 | if (desc < ref->desc) |
1006 | n = n->rb_left; | 1006 | n = n->rb_left; |
1007 | else if (desc > ref->desc) | 1007 | else if (desc > ref->desc) |
1008 | n = n->rb_right; | 1008 | n = n->rb_right; |
1009 | else | 1009 | else |
1010 | return ref; | 1010 | return ref; |
1011 | } | 1011 | } |
1012 | return NULL; | 1012 | return NULL; |
1013 | } | 1013 | } |
1014 | 1014 | ||
1015 | static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc, | 1015 | static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc, |
1016 | struct binder_node *node) | 1016 | struct binder_node *node) |
1017 | { | 1017 | { |
1018 | struct rb_node *n; | 1018 | struct rb_node *n; |
1019 | struct rb_node **p = &proc->refs_by_node.rb_node; | 1019 | struct rb_node **p = &proc->refs_by_node.rb_node; |
1020 | struct rb_node *parent = NULL; | 1020 | struct rb_node *parent = NULL; |
1021 | struct binder_ref *ref, *new_ref; | 1021 | struct binder_ref *ref, *new_ref; |
1022 | 1022 | ||
1023 | while (*p) { | 1023 | while (*p) { |
1024 | parent = *p; | 1024 | parent = *p; |
1025 | ref = rb_entry(parent, struct binder_ref, rb_node_node); | 1025 | ref = rb_entry(parent, struct binder_ref, rb_node_node); |
1026 | 1026 | ||
1027 | if (node < ref->node) | 1027 | if (node < ref->node) |
1028 | p = &(*p)->rb_left; | 1028 | p = &(*p)->rb_left; |
1029 | else if (node > ref->node) | 1029 | else if (node > ref->node) |
1030 | p = &(*p)->rb_right; | 1030 | p = &(*p)->rb_right; |
1031 | else | 1031 | else |
1032 | return ref; | 1032 | return ref; |
1033 | } | 1033 | } |
1034 | new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); | 1034 | new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); |
1035 | if (new_ref == NULL) | 1035 | if (new_ref == NULL) |
1036 | return NULL; | 1036 | return NULL; |
1037 | binder_stats_created(BINDER_STAT_REF); | 1037 | binder_stats_created(BINDER_STAT_REF); |
1038 | new_ref->debug_id = ++binder_last_id; | 1038 | new_ref->debug_id = ++binder_last_id; |
1039 | new_ref->proc = proc; | 1039 | new_ref->proc = proc; |
1040 | new_ref->node = node; | 1040 | new_ref->node = node; |
1041 | rb_link_node(&new_ref->rb_node_node, parent, p); | 1041 | rb_link_node(&new_ref->rb_node_node, parent, p); |
1042 | rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); | 1042 | rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); |
1043 | 1043 | ||
1044 | new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1; | 1044 | new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1; |
1045 | for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { | 1045 | for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { |
1046 | ref = rb_entry(n, struct binder_ref, rb_node_desc); | 1046 | ref = rb_entry(n, struct binder_ref, rb_node_desc); |
1047 | if (ref->desc > new_ref->desc) | 1047 | if (ref->desc > new_ref->desc) |
1048 | break; | 1048 | break; |
1049 | new_ref->desc = ref->desc + 1; | 1049 | new_ref->desc = ref->desc + 1; |
1050 | } | 1050 | } |
1051 | 1051 | ||
1052 | p = &proc->refs_by_desc.rb_node; | 1052 | p = &proc->refs_by_desc.rb_node; |
1053 | while (*p) { | 1053 | while (*p) { |
1054 | parent = *p; | 1054 | parent = *p; |
1055 | ref = rb_entry(parent, struct binder_ref, rb_node_desc); | 1055 | ref = rb_entry(parent, struct binder_ref, rb_node_desc); |
1056 | 1056 | ||
1057 | if (new_ref->desc < ref->desc) | 1057 | if (new_ref->desc < ref->desc) |
1058 | p = &(*p)->rb_left; | 1058 | p = &(*p)->rb_left; |
1059 | else if (new_ref->desc > ref->desc) | 1059 | else if (new_ref->desc > ref->desc) |
1060 | p = &(*p)->rb_right; | 1060 | p = &(*p)->rb_right; |
1061 | else | 1061 | else |
1062 | BUG(); | 1062 | BUG(); |
1063 | } | 1063 | } |
1064 | rb_link_node(&new_ref->rb_node_desc, parent, p); | 1064 | rb_link_node(&new_ref->rb_node_desc, parent, p); |
1065 | rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); | 1065 | rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); |
1066 | if (node) { | 1066 | if (node) { |
1067 | hlist_add_head(&new_ref->node_entry, &node->refs); | 1067 | hlist_add_head(&new_ref->node_entry, &node->refs); |
1068 | 1068 | ||
1069 | binder_debug(BINDER_DEBUG_INTERNAL_REFS, | 1069 | binder_debug(BINDER_DEBUG_INTERNAL_REFS, |
1070 | "%d new ref %d desc %d for node %d\n", | 1070 | "%d new ref %d desc %d for node %d\n", |
1071 | proc->pid, new_ref->debug_id, new_ref->desc, | 1071 | proc->pid, new_ref->debug_id, new_ref->desc, |
1072 | node->debug_id); | 1072 | node->debug_id); |
1073 | } else { | 1073 | } else { |
1074 | binder_debug(BINDER_DEBUG_INTERNAL_REFS, | 1074 | binder_debug(BINDER_DEBUG_INTERNAL_REFS, |
1075 | "%d new ref %d desc %d for dead node\n", | 1075 | "%d new ref %d desc %d for dead node\n", |
1076 | proc->pid, new_ref->debug_id, new_ref->desc); | 1076 | proc->pid, new_ref->debug_id, new_ref->desc); |
1077 | } | 1077 | } |
1078 | return new_ref; | 1078 | return new_ref; |
1079 | } | 1079 | } |
1080 | 1080 | ||
1081 | static void binder_delete_ref(struct binder_ref *ref) | 1081 | static void binder_delete_ref(struct binder_ref *ref) |
1082 | { | 1082 | { |
1083 | binder_debug(BINDER_DEBUG_INTERNAL_REFS, | 1083 | binder_debug(BINDER_DEBUG_INTERNAL_REFS, |
1084 | "%d delete ref %d desc %d for node %d\n", | 1084 | "%d delete ref %d desc %d for node %d\n", |
1085 | ref->proc->pid, ref->debug_id, ref->desc, | 1085 | ref->proc->pid, ref->debug_id, ref->desc, |
1086 | ref->node->debug_id); | 1086 | ref->node->debug_id); |
1087 | 1087 | ||
1088 | rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); | 1088 | rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); |
1089 | rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); | 1089 | rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); |
1090 | if (ref->strong) | 1090 | if (ref->strong) |
1091 | binder_dec_node(ref->node, 1, 1); | 1091 | binder_dec_node(ref->node, 1, 1); |
1092 | hlist_del(&ref->node_entry); | 1092 | hlist_del(&ref->node_entry); |
1093 | binder_dec_node(ref->node, 0, 1); | 1093 | binder_dec_node(ref->node, 0, 1); |
1094 | if (ref->death) { | 1094 | if (ref->death) { |
1095 | binder_debug(BINDER_DEBUG_DEAD_BINDER, | 1095 | binder_debug(BINDER_DEBUG_DEAD_BINDER, |
1096 | "%d delete ref %d desc %d has death notification\n", | 1096 | "%d delete ref %d desc %d has death notification\n", |
1097 | ref->proc->pid, ref->debug_id, ref->desc); | 1097 | ref->proc->pid, ref->debug_id, ref->desc); |
1098 | list_del(&ref->death->work.entry); | 1098 | list_del(&ref->death->work.entry); |
1099 | kfree(ref->death); | 1099 | kfree(ref->death); |
1100 | binder_stats_deleted(BINDER_STAT_DEATH); | 1100 | binder_stats_deleted(BINDER_STAT_DEATH); |
1101 | } | 1101 | } |
1102 | kfree(ref); | 1102 | kfree(ref); |
1103 | binder_stats_deleted(BINDER_STAT_REF); | 1103 | binder_stats_deleted(BINDER_STAT_REF); |
1104 | } | 1104 | } |
1105 | 1105 | ||
1106 | static int binder_inc_ref(struct binder_ref *ref, int strong, | 1106 | static int binder_inc_ref(struct binder_ref *ref, int strong, |
1107 | struct list_head *target_list) | 1107 | struct list_head *target_list) |
1108 | { | 1108 | { |
1109 | int ret; | 1109 | int ret; |
1110 | if (strong) { | 1110 | if (strong) { |
1111 | if (ref->strong == 0) { | 1111 | if (ref->strong == 0) { |
1112 | ret = binder_inc_node(ref->node, 1, 1, target_list); | 1112 | ret = binder_inc_node(ref->node, 1, 1, target_list); |
1113 | if (ret) | 1113 | if (ret) |
1114 | return ret; | 1114 | return ret; |
1115 | } | 1115 | } |
1116 | ref->strong++; | 1116 | ref->strong++; |
1117 | } else { | 1117 | } else { |
1118 | if (ref->weak == 0) { | 1118 | if (ref->weak == 0) { |
1119 | ret = binder_inc_node(ref->node, 0, 1, target_list); | 1119 | ret = binder_inc_node(ref->node, 0, 1, target_list); |
1120 | if (ret) | 1120 | if (ret) |
1121 | return ret; | 1121 | return ret; |
1122 | } | 1122 | } |
1123 | ref->weak++; | 1123 | ref->weak++; |
1124 | } | 1124 | } |
1125 | return 0; | 1125 | return 0; |
1126 | } | 1126 | } |
1127 | 1127 | ||
1128 | 1128 | ||
1129 | static int binder_dec_ref(struct binder_ref *ref, int strong) | 1129 | static int binder_dec_ref(struct binder_ref *ref, int strong) |
1130 | { | 1130 | { |
1131 | if (strong) { | 1131 | if (strong) { |
1132 | if (ref->strong == 0) { | 1132 | if (ref->strong == 0) { |
1133 | binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n", | 1133 | binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n", |
1134 | ref->proc->pid, ref->debug_id, | 1134 | ref->proc->pid, ref->debug_id, |
1135 | ref->desc, ref->strong, ref->weak); | 1135 | ref->desc, ref->strong, ref->weak); |
1136 | return -EINVAL; | 1136 | return -EINVAL; |
1137 | } | 1137 | } |
1138 | ref->strong--; | 1138 | ref->strong--; |
1139 | if (ref->strong == 0) { | 1139 | if (ref->strong == 0) { |
1140 | int ret; | 1140 | int ret; |
1141 | ret = binder_dec_node(ref->node, strong, 1); | 1141 | ret = binder_dec_node(ref->node, strong, 1); |
1142 | if (ret) | 1142 | if (ret) |
1143 | return ret; | 1143 | return ret; |
1144 | } | 1144 | } |
1145 | } else { | 1145 | } else { |
1146 | if (ref->weak == 0) { | 1146 | if (ref->weak == 0) { |
1147 | binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n", | 1147 | binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n", |
1148 | ref->proc->pid, ref->debug_id, | 1148 | ref->proc->pid, ref->debug_id, |
1149 | ref->desc, ref->strong, ref->weak); | 1149 | ref->desc, ref->strong, ref->weak); |
1150 | return -EINVAL; | 1150 | return -EINVAL; |
1151 | } | 1151 | } |
1152 | ref->weak--; | 1152 | ref->weak--; |
1153 | } | 1153 | } |
1154 | if (ref->strong == 0 && ref->weak == 0) | 1154 | if (ref->strong == 0 && ref->weak == 0) |
1155 | binder_delete_ref(ref); | 1155 | binder_delete_ref(ref); |
1156 | return 0; | 1156 | return 0; |
1157 | } | 1157 | } |
1158 | 1158 | ||
1159 | static void binder_pop_transaction(struct binder_thread *target_thread, | 1159 | static void binder_pop_transaction(struct binder_thread *target_thread, |
1160 | struct binder_transaction *t) | 1160 | struct binder_transaction *t) |
1161 | { | 1161 | { |
1162 | if (target_thread) { | 1162 | if (target_thread) { |
1163 | BUG_ON(target_thread->transaction_stack != t); | 1163 | BUG_ON(target_thread->transaction_stack != t); |
1164 | BUG_ON(target_thread->transaction_stack->from != target_thread); | 1164 | BUG_ON(target_thread->transaction_stack->from != target_thread); |
1165 | target_thread->transaction_stack = | 1165 | target_thread->transaction_stack = |
1166 | target_thread->transaction_stack->from_parent; | 1166 | target_thread->transaction_stack->from_parent; |
1167 | t->from = NULL; | 1167 | t->from = NULL; |
1168 | } | 1168 | } |
1169 | t->need_reply = 0; | 1169 | t->need_reply = 0; |
1170 | if (t->buffer) | 1170 | if (t->buffer) |
1171 | t->buffer->transaction = NULL; | 1171 | t->buffer->transaction = NULL; |
1172 | kfree(t); | 1172 | kfree(t); |
1173 | binder_stats_deleted(BINDER_STAT_TRANSACTION); | 1173 | binder_stats_deleted(BINDER_STAT_TRANSACTION); |
1174 | } | 1174 | } |
1175 | 1175 | ||
1176 | static void binder_send_failed_reply(struct binder_transaction *t, | 1176 | static void binder_send_failed_reply(struct binder_transaction *t, |
1177 | uint32_t error_code) | 1177 | uint32_t error_code) |
1178 | { | 1178 | { |
1179 | struct binder_thread *target_thread; | 1179 | struct binder_thread *target_thread; |
1180 | BUG_ON(t->flags & TF_ONE_WAY); | 1180 | BUG_ON(t->flags & TF_ONE_WAY); |
1181 | while (1) { | 1181 | while (1) { |
1182 | target_thread = t->from; | 1182 | target_thread = t->from; |
1183 | if (target_thread) { | 1183 | if (target_thread) { |
1184 | if (target_thread->return_error != BR_OK && | 1184 | if (target_thread->return_error != BR_OK && |
1185 | target_thread->return_error2 == BR_OK) { | 1185 | target_thread->return_error2 == BR_OK) { |
1186 | target_thread->return_error2 = | 1186 | target_thread->return_error2 = |
1187 | target_thread->return_error; | 1187 | target_thread->return_error; |
1188 | target_thread->return_error = BR_OK; | 1188 | target_thread->return_error = BR_OK; |
1189 | } | 1189 | } |
1190 | if (target_thread->return_error == BR_OK) { | 1190 | if (target_thread->return_error == BR_OK) { |
1191 | binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, | 1191 | binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, |
1192 | "send failed reply for transaction %d to %d:%d\n", | 1192 | "send failed reply for transaction %d to %d:%d\n", |
1193 | t->debug_id, target_thread->proc->pid, | 1193 | t->debug_id, target_thread->proc->pid, |
1194 | target_thread->pid); | 1194 | target_thread->pid); |
1195 | 1195 | ||
1196 | binder_pop_transaction(target_thread, t); | 1196 | binder_pop_transaction(target_thread, t); |
1197 | target_thread->return_error = error_code; | 1197 | target_thread->return_error = error_code; |
1198 | wake_up_interruptible(&target_thread->wait); | 1198 | wake_up_interruptible(&target_thread->wait); |
1199 | } else { | 1199 | } else { |
1200 | pr_err("reply failed, target thread, %d:%d, has error code %d already\n", | 1200 | pr_err("reply failed, target thread, %d:%d, has error code %d already\n", |
1201 | target_thread->proc->pid, | 1201 | target_thread->proc->pid, |
1202 | target_thread->pid, | 1202 | target_thread->pid, |
1203 | target_thread->return_error); | 1203 | target_thread->return_error); |
1204 | } | 1204 | } |
1205 | return; | 1205 | return; |
1206 | } else { | 1206 | } else { |
1207 | struct binder_transaction *next = t->from_parent; | 1207 | struct binder_transaction *next = t->from_parent; |
1208 | 1208 | ||
1209 | binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, | 1209 | binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, |
1210 | "send failed reply for transaction %d, target dead\n", | 1210 | "send failed reply for transaction %d, target dead\n", |
1211 | t->debug_id); | 1211 | t->debug_id); |
1212 | 1212 | ||
1213 | binder_pop_transaction(target_thread, t); | 1213 | binder_pop_transaction(target_thread, t); |
1214 | if (next == NULL) { | 1214 | if (next == NULL) { |
1215 | binder_debug(BINDER_DEBUG_DEAD_BINDER, | 1215 | binder_debug(BINDER_DEBUG_DEAD_BINDER, |
1216 | "reply failed, no target thread at root\n"); | 1216 | "reply failed, no target thread at root\n"); |
1217 | return; | 1217 | return; |
1218 | } | 1218 | } |
1219 | t = next; | 1219 | t = next; |
1220 | binder_debug(BINDER_DEBUG_DEAD_BINDER, | 1220 | binder_debug(BINDER_DEBUG_DEAD_BINDER, |
1221 | "reply failed, no target thread -- retry %d\n", | 1221 | "reply failed, no target thread -- retry %d\n", |
1222 | t->debug_id); | 1222 | t->debug_id); |
1223 | } | 1223 | } |
1224 | } | 1224 | } |
1225 | } | 1225 | } |
1226 | 1226 | ||
1227 | static void binder_transaction_buffer_release(struct binder_proc *proc, | 1227 | static void binder_transaction_buffer_release(struct binder_proc *proc, |
1228 | struct binder_buffer *buffer, | 1228 | struct binder_buffer *buffer, |
1229 | size_t *failed_at) | 1229 | size_t *failed_at) |
1230 | { | 1230 | { |
1231 | size_t *offp, *off_end; | 1231 | size_t *offp, *off_end; |
1232 | int debug_id = buffer->debug_id; | 1232 | int debug_id = buffer->debug_id; |
1233 | 1233 | ||
1234 | binder_debug(BINDER_DEBUG_TRANSACTION, | 1234 | binder_debug(BINDER_DEBUG_TRANSACTION, |
1235 | "%d buffer release %d, size %zd-%zd, failed at %p\n", | 1235 | "%d buffer release %d, size %zd-%zd, failed at %p\n", |
1236 | proc->pid, buffer->debug_id, | 1236 | proc->pid, buffer->debug_id, |
1237 | buffer->data_size, buffer->offsets_size, failed_at); | 1237 | buffer->data_size, buffer->offsets_size, failed_at); |
1238 | 1238 | ||
1239 | if (buffer->target_node) | 1239 | if (buffer->target_node) |
1240 | binder_dec_node(buffer->target_node, 1, 0); | 1240 | binder_dec_node(buffer->target_node, 1, 0); |
1241 | 1241 | ||
1242 | offp = (size_t *)(buffer->data + ALIGN(buffer->data_size, sizeof(void *))); | 1242 | offp = (size_t *)(buffer->data + ALIGN(buffer->data_size, sizeof(void *))); |
1243 | if (failed_at) | 1243 | if (failed_at) |
1244 | off_end = failed_at; | 1244 | off_end = failed_at; |
1245 | else | 1245 | else |
1246 | off_end = (void *)offp + buffer->offsets_size; | 1246 | off_end = (void *)offp + buffer->offsets_size; |
1247 | for (; offp < off_end; offp++) { | 1247 | for (; offp < off_end; offp++) { |
1248 | struct flat_binder_object *fp; | 1248 | struct flat_binder_object *fp; |
1249 | if (*offp > buffer->data_size - sizeof(*fp) || | 1249 | if (*offp > buffer->data_size - sizeof(*fp) || |
1250 | buffer->data_size < sizeof(*fp) || | 1250 | buffer->data_size < sizeof(*fp) || |
1251 | !IS_ALIGNED(*offp, sizeof(u32))) { | 1251 | !IS_ALIGNED(*offp, sizeof(u32))) { |
1252 | pr_err("transaction release %d bad offset %zd, size %zd\n", | 1252 | pr_err("transaction release %d bad offset %zd, size %zd\n", |
1253 | debug_id, *offp, buffer->data_size); | 1253 | debug_id, *offp, buffer->data_size); |
1254 | continue; | 1254 | continue; |
1255 | } | 1255 | } |
1256 | fp = (struct flat_binder_object *)(buffer->data + *offp); | 1256 | fp = (struct flat_binder_object *)(buffer->data + *offp); |
1257 | switch (fp->type) { | 1257 | switch (fp->type) { |
1258 | case BINDER_TYPE_BINDER: | 1258 | case BINDER_TYPE_BINDER: |
1259 | case BINDER_TYPE_WEAK_BINDER: { | 1259 | case BINDER_TYPE_WEAK_BINDER: { |
1260 | struct binder_node *node = binder_get_node(proc, fp->binder); | 1260 | struct binder_node *node = binder_get_node(proc, fp->binder); |
1261 | if (node == NULL) { | 1261 | if (node == NULL) { |
1262 | pr_err("transaction release %d bad node %p\n", | 1262 | pr_err("transaction release %d bad node %p\n", |
1263 | debug_id, fp->binder); | 1263 | debug_id, fp->binder); |
1264 | break; | 1264 | break; |
1265 | } | 1265 | } |
1266 | binder_debug(BINDER_DEBUG_TRANSACTION, | 1266 | binder_debug(BINDER_DEBUG_TRANSACTION, |
1267 | " node %d u%p\n", | 1267 | " node %d u%p\n", |
1268 | node->debug_id, node->ptr); | 1268 | node->debug_id, node->ptr); |
1269 | binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0); | 1269 | binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0); |
1270 | } break; | 1270 | } break; |
1271 | case BINDER_TYPE_HANDLE: | 1271 | case BINDER_TYPE_HANDLE: |
1272 | case BINDER_TYPE_WEAK_HANDLE: { | 1272 | case BINDER_TYPE_WEAK_HANDLE: { |
1273 | struct binder_ref *ref = binder_get_ref(proc, fp->handle); | 1273 | struct binder_ref *ref = binder_get_ref(proc, fp->handle); |
1274 | if (ref == NULL) { | 1274 | if (ref == NULL) { |
1275 | pr_err("transaction release %d bad handle %ld\n", | 1275 | pr_err("transaction release %d bad handle %d\n", |
1276 | debug_id, fp->handle); | 1276 | debug_id, fp->handle); |
1277 | break; | 1277 | break; |
1278 | } | 1278 | } |
1279 | binder_debug(BINDER_DEBUG_TRANSACTION, | 1279 | binder_debug(BINDER_DEBUG_TRANSACTION, |
1280 | " ref %d desc %d (node %d)\n", | 1280 | " ref %d desc %d (node %d)\n", |
1281 | ref->debug_id, ref->desc, ref->node->debug_id); | 1281 | ref->debug_id, ref->desc, ref->node->debug_id); |
1282 | binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE); | 1282 | binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE); |
1283 | } break; | 1283 | } break; |
1284 | 1284 | ||
1285 | case BINDER_TYPE_FD: | 1285 | case BINDER_TYPE_FD: |
1286 | binder_debug(BINDER_DEBUG_TRANSACTION, | 1286 | binder_debug(BINDER_DEBUG_TRANSACTION, |
1287 | " fd %ld\n", fp->handle); | 1287 | " fd %d\n", fp->handle); |
1288 | if (failed_at) | 1288 | if (failed_at) |
1289 | task_close_fd(proc, fp->handle); | 1289 | task_close_fd(proc, fp->handle); |
1290 | break; | 1290 | break; |
1291 | 1291 | ||
1292 | default: | 1292 | default: |
1293 | pr_err("transaction release %d bad object type %lx\n", | 1293 | pr_err("transaction release %d bad object type %x\n", |
1294 | debug_id, fp->type); | 1294 | debug_id, fp->type); |
1295 | break; | 1295 | break; |
1296 | } | 1296 | } |
1297 | } | 1297 | } |
1298 | } | 1298 | } |
1299 | 1299 | ||
1300 | static void binder_transaction(struct binder_proc *proc, | 1300 | static void binder_transaction(struct binder_proc *proc, |
1301 | struct binder_thread *thread, | 1301 | struct binder_thread *thread, |
1302 | struct binder_transaction_data *tr, int reply) | 1302 | struct binder_transaction_data *tr, int reply) |
1303 | { | 1303 | { |
1304 | struct binder_transaction *t; | 1304 | struct binder_transaction *t; |
1305 | struct binder_work *tcomplete; | 1305 | struct binder_work *tcomplete; |
1306 | size_t *offp, *off_end; | 1306 | size_t *offp, *off_end; |
1307 | struct binder_proc *target_proc; | 1307 | struct binder_proc *target_proc; |
1308 | struct binder_thread *target_thread = NULL; | 1308 | struct binder_thread *target_thread = NULL; |
1309 | struct binder_node *target_node = NULL; | 1309 | struct binder_node *target_node = NULL; |
1310 | struct list_head *target_list; | 1310 | struct list_head *target_list; |
1311 | wait_queue_head_t *target_wait; | 1311 | wait_queue_head_t *target_wait; |
1312 | struct binder_transaction *in_reply_to = NULL; | 1312 | struct binder_transaction *in_reply_to = NULL; |
1313 | struct binder_transaction_log_entry *e; | 1313 | struct binder_transaction_log_entry *e; |
1314 | uint32_t return_error; | 1314 | uint32_t return_error; |
1315 | 1315 | ||
1316 | e = binder_transaction_log_add(&binder_transaction_log); | 1316 | e = binder_transaction_log_add(&binder_transaction_log); |
1317 | e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); | 1317 | e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); |
1318 | e->from_proc = proc->pid; | 1318 | e->from_proc = proc->pid; |
1319 | e->from_thread = thread->pid; | 1319 | e->from_thread = thread->pid; |
1320 | e->target_handle = tr->target.handle; | 1320 | e->target_handle = tr->target.handle; |
1321 | e->data_size = tr->data_size; | 1321 | e->data_size = tr->data_size; |
1322 | e->offsets_size = tr->offsets_size; | 1322 | e->offsets_size = tr->offsets_size; |
1323 | 1323 | ||
1324 | if (reply) { | 1324 | if (reply) { |
1325 | in_reply_to = thread->transaction_stack; | 1325 | in_reply_to = thread->transaction_stack; |
1326 | if (in_reply_to == NULL) { | 1326 | if (in_reply_to == NULL) { |
1327 | binder_user_error("%d:%d got reply transaction with no transaction stack\n", | 1327 | binder_user_error("%d:%d got reply transaction with no transaction stack\n", |
1328 | proc->pid, thread->pid); | 1328 | proc->pid, thread->pid); |
1329 | return_error = BR_FAILED_REPLY; | 1329 | return_error = BR_FAILED_REPLY; |
1330 | goto err_empty_call_stack; | 1330 | goto err_empty_call_stack; |
1331 | } | 1331 | } |
1332 | binder_set_nice(in_reply_to->saved_priority); | 1332 | binder_set_nice(in_reply_to->saved_priority); |
1333 | if (in_reply_to->to_thread != thread) { | 1333 | if (in_reply_to->to_thread != thread) { |
1334 | binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n", | 1334 | binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n", |
1335 | proc->pid, thread->pid, in_reply_to->debug_id, | 1335 | proc->pid, thread->pid, in_reply_to->debug_id, |
1336 | in_reply_to->to_proc ? | 1336 | in_reply_to->to_proc ? |
1337 | in_reply_to->to_proc->pid : 0, | 1337 | in_reply_to->to_proc->pid : 0, |
1338 | in_reply_to->to_thread ? | 1338 | in_reply_to->to_thread ? |
1339 | in_reply_to->to_thread->pid : 0); | 1339 | in_reply_to->to_thread->pid : 0); |
1340 | return_error = BR_FAILED_REPLY; | 1340 | return_error = BR_FAILED_REPLY; |
1341 | in_reply_to = NULL; | 1341 | in_reply_to = NULL; |
1342 | goto err_bad_call_stack; | 1342 | goto err_bad_call_stack; |
1343 | } | 1343 | } |
1344 | thread->transaction_stack = in_reply_to->to_parent; | 1344 | thread->transaction_stack = in_reply_to->to_parent; |
1345 | target_thread = in_reply_to->from; | 1345 | target_thread = in_reply_to->from; |
1346 | if (target_thread == NULL) { | 1346 | if (target_thread == NULL) { |
1347 | return_error = BR_DEAD_REPLY; | 1347 | return_error = BR_DEAD_REPLY; |
1348 | goto err_dead_binder; | 1348 | goto err_dead_binder; |
1349 | } | 1349 | } |
1350 | if (target_thread->transaction_stack != in_reply_to) { | 1350 | if (target_thread->transaction_stack != in_reply_to) { |
1351 | binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n", | 1351 | binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n", |
1352 | proc->pid, thread->pid, | 1352 | proc->pid, thread->pid, |
1353 | target_thread->transaction_stack ? | 1353 | target_thread->transaction_stack ? |
1354 | target_thread->transaction_stack->debug_id : 0, | 1354 | target_thread->transaction_stack->debug_id : 0, |
1355 | in_reply_to->debug_id); | 1355 | in_reply_to->debug_id); |
1356 | return_error = BR_FAILED_REPLY; | 1356 | return_error = BR_FAILED_REPLY; |
1357 | in_reply_to = NULL; | 1357 | in_reply_to = NULL; |
1358 | target_thread = NULL; | 1358 | target_thread = NULL; |
1359 | goto err_dead_binder; | 1359 | goto err_dead_binder; |
1360 | } | 1360 | } |
1361 | target_proc = target_thread->proc; | 1361 | target_proc = target_thread->proc; |
1362 | } else { | 1362 | } else { |
1363 | if (tr->target.handle) { | 1363 | if (tr->target.handle) { |
1364 | struct binder_ref *ref; | 1364 | struct binder_ref *ref; |
1365 | ref = binder_get_ref(proc, tr->target.handle); | 1365 | ref = binder_get_ref(proc, tr->target.handle); |
1366 | if (ref == NULL) { | 1366 | if (ref == NULL) { |
1367 | binder_user_error("%d:%d got transaction to invalid handle\n", | 1367 | binder_user_error("%d:%d got transaction to invalid handle\n", |
1368 | proc->pid, thread->pid); | 1368 | proc->pid, thread->pid); |
1369 | return_error = BR_FAILED_REPLY; | 1369 | return_error = BR_FAILED_REPLY; |
1370 | goto err_invalid_target_handle; | 1370 | goto err_invalid_target_handle; |
1371 | } | 1371 | } |
1372 | target_node = ref->node; | 1372 | target_node = ref->node; |
1373 | } else { | 1373 | } else { |
1374 | target_node = binder_context_mgr_node; | 1374 | target_node = binder_context_mgr_node; |
1375 | if (target_node == NULL) { | 1375 | if (target_node == NULL) { |
1376 | return_error = BR_DEAD_REPLY; | 1376 | return_error = BR_DEAD_REPLY; |
1377 | goto err_no_context_mgr_node; | 1377 | goto err_no_context_mgr_node; |
1378 | } | 1378 | } |
1379 | } | 1379 | } |
1380 | e->to_node = target_node->debug_id; | 1380 | e->to_node = target_node->debug_id; |
1381 | target_proc = target_node->proc; | 1381 | target_proc = target_node->proc; |
1382 | if (target_proc == NULL) { | 1382 | if (target_proc == NULL) { |
1383 | return_error = BR_DEAD_REPLY; | 1383 | return_error = BR_DEAD_REPLY; |
1384 | goto err_dead_binder; | 1384 | goto err_dead_binder; |
1385 | } | 1385 | } |
1386 | if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { | 1386 | if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { |
1387 | struct binder_transaction *tmp; | 1387 | struct binder_transaction *tmp; |
1388 | tmp = thread->transaction_stack; | 1388 | tmp = thread->transaction_stack; |
1389 | if (tmp->to_thread != thread) { | 1389 | if (tmp->to_thread != thread) { |
1390 | binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n", | 1390 | binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n", |
1391 | proc->pid, thread->pid, tmp->debug_id, | 1391 | proc->pid, thread->pid, tmp->debug_id, |
1392 | tmp->to_proc ? tmp->to_proc->pid : 0, | 1392 | tmp->to_proc ? tmp->to_proc->pid : 0, |
1393 | tmp->to_thread ? | 1393 | tmp->to_thread ? |
1394 | tmp->to_thread->pid : 0); | 1394 | tmp->to_thread->pid : 0); |
1395 | return_error = BR_FAILED_REPLY; | 1395 | return_error = BR_FAILED_REPLY; |
1396 | goto err_bad_call_stack; | 1396 | goto err_bad_call_stack; |
1397 | } | 1397 | } |
1398 | while (tmp) { | 1398 | while (tmp) { |
1399 | if (tmp->from && tmp->from->proc == target_proc) | 1399 | if (tmp->from && tmp->from->proc == target_proc) |
1400 | target_thread = tmp->from; | 1400 | target_thread = tmp->from; |
1401 | tmp = tmp->from_parent; | 1401 | tmp = tmp->from_parent; |
1402 | } | 1402 | } |
1403 | } | 1403 | } |
1404 | } | 1404 | } |
1405 | if (target_thread) { | 1405 | if (target_thread) { |
1406 | e->to_thread = target_thread->pid; | 1406 | e->to_thread = target_thread->pid; |
1407 | target_list = &target_thread->todo; | 1407 | target_list = &target_thread->todo; |
1408 | target_wait = &target_thread->wait; | 1408 | target_wait = &target_thread->wait; |
1409 | } else { | 1409 | } else { |
1410 | target_list = &target_proc->todo; | 1410 | target_list = &target_proc->todo; |
1411 | target_wait = &target_proc->wait; | 1411 | target_wait = &target_proc->wait; |
1412 | } | 1412 | } |
1413 | e->to_proc = target_proc->pid; | 1413 | e->to_proc = target_proc->pid; |
1414 | 1414 | ||
1415 | /* TODO: reuse incoming transaction for reply */ | 1415 | /* TODO: reuse incoming transaction for reply */ |
1416 | t = kzalloc(sizeof(*t), GFP_KERNEL); | 1416 | t = kzalloc(sizeof(*t), GFP_KERNEL); |
1417 | if (t == NULL) { | 1417 | if (t == NULL) { |
1418 | return_error = BR_FAILED_REPLY; | 1418 | return_error = BR_FAILED_REPLY; |
1419 | goto err_alloc_t_failed; | 1419 | goto err_alloc_t_failed; |
1420 | } | 1420 | } |
1421 | binder_stats_created(BINDER_STAT_TRANSACTION); | 1421 | binder_stats_created(BINDER_STAT_TRANSACTION); |
1422 | 1422 | ||
1423 | tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); | 1423 | tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); |
1424 | if (tcomplete == NULL) { | 1424 | if (tcomplete == NULL) { |
1425 | return_error = BR_FAILED_REPLY; | 1425 | return_error = BR_FAILED_REPLY; |
1426 | goto err_alloc_tcomplete_failed; | 1426 | goto err_alloc_tcomplete_failed; |
1427 | } | 1427 | } |
1428 | binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE); | 1428 | binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE); |
1429 | 1429 | ||
1430 | t->debug_id = ++binder_last_id; | 1430 | t->debug_id = ++binder_last_id; |
1431 | e->debug_id = t->debug_id; | 1431 | e->debug_id = t->debug_id; |
1432 | 1432 | ||
1433 | if (reply) | 1433 | if (reply) |
1434 | binder_debug(BINDER_DEBUG_TRANSACTION, | 1434 | binder_debug(BINDER_DEBUG_TRANSACTION, |
1435 | "%d:%d BC_REPLY %d -> %d:%d, data %p-%p size %zd-%zd\n", | 1435 | "%d:%d BC_REPLY %d -> %d:%d, data %p-%p size %zd-%zd\n", |
1436 | proc->pid, thread->pid, t->debug_id, | 1436 | proc->pid, thread->pid, t->debug_id, |
1437 | target_proc->pid, target_thread->pid, | 1437 | target_proc->pid, target_thread->pid, |
1438 | tr->data.ptr.buffer, tr->data.ptr.offsets, | 1438 | tr->data.ptr.buffer, tr->data.ptr.offsets, |
1439 | tr->data_size, tr->offsets_size); | 1439 | tr->data_size, tr->offsets_size); |
1440 | else | 1440 | else |
1441 | binder_debug(BINDER_DEBUG_TRANSACTION, | 1441 | binder_debug(BINDER_DEBUG_TRANSACTION, |
1442 | "%d:%d BC_TRANSACTION %d -> %d - node %d, data %p-%p size %zd-%zd\n", | 1442 | "%d:%d BC_TRANSACTION %d -> %d - node %d, data %p-%p size %zd-%zd\n", |
1443 | proc->pid, thread->pid, t->debug_id, | 1443 | proc->pid, thread->pid, t->debug_id, |
1444 | target_proc->pid, target_node->debug_id, | 1444 | target_proc->pid, target_node->debug_id, |
1445 | tr->data.ptr.buffer, tr->data.ptr.offsets, | 1445 | tr->data.ptr.buffer, tr->data.ptr.offsets, |
1446 | tr->data_size, tr->offsets_size); | 1446 | tr->data_size, tr->offsets_size); |
1447 | 1447 | ||
1448 | if (!reply && !(tr->flags & TF_ONE_WAY)) | 1448 | if (!reply && !(tr->flags & TF_ONE_WAY)) |
1449 | t->from = thread; | 1449 | t->from = thread; |
1450 | else | 1450 | else |
1451 | t->from = NULL; | 1451 | t->from = NULL; |
1452 | t->sender_euid = proc->tsk->cred->euid; | 1452 | t->sender_euid = proc->tsk->cred->euid; |
1453 | t->to_proc = target_proc; | 1453 | t->to_proc = target_proc; |
1454 | t->to_thread = target_thread; | 1454 | t->to_thread = target_thread; |
1455 | t->code = tr->code; | 1455 | t->code = tr->code; |
1456 | t->flags = tr->flags; | 1456 | t->flags = tr->flags; |
1457 | t->priority = task_nice(current); | 1457 | t->priority = task_nice(current); |
1458 | 1458 | ||
1459 | trace_binder_transaction(reply, t, target_node); | 1459 | trace_binder_transaction(reply, t, target_node); |
1460 | 1460 | ||
1461 | t->buffer = binder_alloc_buf(target_proc, tr->data_size, | 1461 | t->buffer = binder_alloc_buf(target_proc, tr->data_size, |
1462 | tr->offsets_size, !reply && (t->flags & TF_ONE_WAY)); | 1462 | tr->offsets_size, !reply && (t->flags & TF_ONE_WAY)); |
1463 | if (t->buffer == NULL) { | 1463 | if (t->buffer == NULL) { |
1464 | return_error = BR_FAILED_REPLY; | 1464 | return_error = BR_FAILED_REPLY; |
1465 | goto err_binder_alloc_buf_failed; | 1465 | goto err_binder_alloc_buf_failed; |
1466 | } | 1466 | } |
1467 | t->buffer->allow_user_free = 0; | 1467 | t->buffer->allow_user_free = 0; |
1468 | t->buffer->debug_id = t->debug_id; | 1468 | t->buffer->debug_id = t->debug_id; |
1469 | t->buffer->transaction = t; | 1469 | t->buffer->transaction = t; |
1470 | t->buffer->target_node = target_node; | 1470 | t->buffer->target_node = target_node; |
1471 | trace_binder_transaction_alloc_buf(t->buffer); | 1471 | trace_binder_transaction_alloc_buf(t->buffer); |
1472 | if (target_node) | 1472 | if (target_node) |
1473 | binder_inc_node(target_node, 1, 0, NULL); | 1473 | binder_inc_node(target_node, 1, 0, NULL); |
1474 | 1474 | ||
1475 | offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *))); | 1475 | offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *))); |
1476 | 1476 | ||
1477 | if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) { | 1477 | if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) { |
1478 | binder_user_error("%d:%d got transaction with invalid data ptr\n", | 1478 | binder_user_error("%d:%d got transaction with invalid data ptr\n", |
1479 | proc->pid, thread->pid); | 1479 | proc->pid, thread->pid); |
1480 | return_error = BR_FAILED_REPLY; | 1480 | return_error = BR_FAILED_REPLY; |
1481 | goto err_copy_data_failed; | 1481 | goto err_copy_data_failed; |
1482 | } | 1482 | } |
1483 | if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) { | 1483 | if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) { |
1484 | binder_user_error("%d:%d got transaction with invalid offsets ptr\n", | 1484 | binder_user_error("%d:%d got transaction with invalid offsets ptr\n", |
1485 | proc->pid, thread->pid); | 1485 | proc->pid, thread->pid); |
1486 | return_error = BR_FAILED_REPLY; | 1486 | return_error = BR_FAILED_REPLY; |
1487 | goto err_copy_data_failed; | 1487 | goto err_copy_data_failed; |
1488 | } | 1488 | } |
1489 | if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) { | 1489 | if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) { |
1490 | binder_user_error("%d:%d got transaction with invalid offsets size, %zd\n", | 1490 | binder_user_error("%d:%d got transaction with invalid offsets size, %zd\n", |
1491 | proc->pid, thread->pid, tr->offsets_size); | 1491 | proc->pid, thread->pid, tr->offsets_size); |
1492 | return_error = BR_FAILED_REPLY; | 1492 | return_error = BR_FAILED_REPLY; |
1493 | goto err_bad_offset; | 1493 | goto err_bad_offset; |
1494 | } | 1494 | } |
1495 | off_end = (void *)offp + tr->offsets_size; | 1495 | off_end = (void *)offp + tr->offsets_size; |
1496 | for (; offp < off_end; offp++) { | 1496 | for (; offp < off_end; offp++) { |
1497 | struct flat_binder_object *fp; | 1497 | struct flat_binder_object *fp; |
1498 | if (*offp > t->buffer->data_size - sizeof(*fp) || | 1498 | if (*offp > t->buffer->data_size - sizeof(*fp) || |
1499 | t->buffer->data_size < sizeof(*fp) || | 1499 | t->buffer->data_size < sizeof(*fp) || |
1500 | !IS_ALIGNED(*offp, sizeof(u32))) { | 1500 | !IS_ALIGNED(*offp, sizeof(u32))) { |
1501 | binder_user_error("%d:%d got transaction with invalid offset, %zd\n", | 1501 | binder_user_error("%d:%d got transaction with invalid offset, %zd\n", |
1502 | proc->pid, thread->pid, *offp); | 1502 | proc->pid, thread->pid, *offp); |
1503 | return_error = BR_FAILED_REPLY; | 1503 | return_error = BR_FAILED_REPLY; |
1504 | goto err_bad_offset; | 1504 | goto err_bad_offset; |
1505 | } | 1505 | } |
1506 | fp = (struct flat_binder_object *)(t->buffer->data + *offp); | 1506 | fp = (struct flat_binder_object *)(t->buffer->data + *offp); |
1507 | switch (fp->type) { | 1507 | switch (fp->type) { |
1508 | case BINDER_TYPE_BINDER: | 1508 | case BINDER_TYPE_BINDER: |
1509 | case BINDER_TYPE_WEAK_BINDER: { | 1509 | case BINDER_TYPE_WEAK_BINDER: { |
1510 | struct binder_ref *ref; | 1510 | struct binder_ref *ref; |
1511 | struct binder_node *node = binder_get_node(proc, fp->binder); | 1511 | struct binder_node *node = binder_get_node(proc, fp->binder); |
1512 | if (node == NULL) { | 1512 | if (node == NULL) { |
1513 | node = binder_new_node(proc, fp->binder, fp->cookie); | 1513 | node = binder_new_node(proc, fp->binder, fp->cookie); |
1514 | if (node == NULL) { | 1514 | if (node == NULL) { |
1515 | return_error = BR_FAILED_REPLY; | 1515 | return_error = BR_FAILED_REPLY; |
1516 | goto err_binder_new_node_failed; | 1516 | goto err_binder_new_node_failed; |
1517 | } | 1517 | } |
1518 | node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK; | 1518 | node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK; |
1519 | node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); | 1519 | node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); |
1520 | } | 1520 | } |
1521 | if (fp->cookie != node->cookie) { | 1521 | if (fp->cookie != node->cookie) { |
1522 | binder_user_error("%d:%d sending u%p node %d, cookie mismatch %p != %p\n", | 1522 | binder_user_error("%d:%d sending u%p node %d, cookie mismatch %p != %p\n", |
1523 | proc->pid, thread->pid, | 1523 | proc->pid, thread->pid, |
1524 | fp->binder, node->debug_id, | 1524 | fp->binder, node->debug_id, |
1525 | fp->cookie, node->cookie); | 1525 | fp->cookie, node->cookie); |
1526 | goto err_binder_get_ref_for_node_failed; | 1526 | goto err_binder_get_ref_for_node_failed; |
1527 | } | 1527 | } |
1528 | ref = binder_get_ref_for_node(target_proc, node); | 1528 | ref = binder_get_ref_for_node(target_proc, node); |
1529 | if (ref == NULL) { | 1529 | if (ref == NULL) { |
1530 | return_error = BR_FAILED_REPLY; | 1530 | return_error = BR_FAILED_REPLY; |
1531 | goto err_binder_get_ref_for_node_failed; | 1531 | goto err_binder_get_ref_for_node_failed; |
1532 | } | 1532 | } |
1533 | if (fp->type == BINDER_TYPE_BINDER) | 1533 | if (fp->type == BINDER_TYPE_BINDER) |
1534 | fp->type = BINDER_TYPE_HANDLE; | 1534 | fp->type = BINDER_TYPE_HANDLE; |
1535 | else | 1535 | else |
1536 | fp->type = BINDER_TYPE_WEAK_HANDLE; | 1536 | fp->type = BINDER_TYPE_WEAK_HANDLE; |
1537 | fp->handle = ref->desc; | 1537 | fp->handle = ref->desc; |
1538 | binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE, | 1538 | binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE, |
1539 | &thread->todo); | 1539 | &thread->todo); |
1540 | 1540 | ||
1541 | trace_binder_transaction_node_to_ref(t, node, ref); | 1541 | trace_binder_transaction_node_to_ref(t, node, ref); |
1542 | binder_debug(BINDER_DEBUG_TRANSACTION, | 1542 | binder_debug(BINDER_DEBUG_TRANSACTION, |
1543 | " node %d u%p -> ref %d desc %d\n", | 1543 | " node %d u%p -> ref %d desc %d\n", |
1544 | node->debug_id, node->ptr, ref->debug_id, | 1544 | node->debug_id, node->ptr, ref->debug_id, |
1545 | ref->desc); | 1545 | ref->desc); |
1546 | } break; | 1546 | } break; |
1547 | case BINDER_TYPE_HANDLE: | 1547 | case BINDER_TYPE_HANDLE: |
1548 | case BINDER_TYPE_WEAK_HANDLE: { | 1548 | case BINDER_TYPE_WEAK_HANDLE: { |
1549 | struct binder_ref *ref = binder_get_ref(proc, fp->handle); | 1549 | struct binder_ref *ref = binder_get_ref(proc, fp->handle); |
1550 | if (ref == NULL) { | 1550 | if (ref == NULL) { |
1551 | binder_user_error("%d:%d got transaction with invalid handle, %ld\n", | 1551 | binder_user_error("%d:%d got transaction with invalid handle, %d\n", |
1552 | proc->pid, | 1552 | proc->pid, |
1553 | thread->pid, fp->handle); | 1553 | thread->pid, fp->handle); |
1554 | return_error = BR_FAILED_REPLY; | 1554 | return_error = BR_FAILED_REPLY; |
1555 | goto err_binder_get_ref_failed; | 1555 | goto err_binder_get_ref_failed; |
1556 | } | 1556 | } |
1557 | if (ref->node->proc == target_proc) { | 1557 | if (ref->node->proc == target_proc) { |
1558 | if (fp->type == BINDER_TYPE_HANDLE) | 1558 | if (fp->type == BINDER_TYPE_HANDLE) |
1559 | fp->type = BINDER_TYPE_BINDER; | 1559 | fp->type = BINDER_TYPE_BINDER; |
1560 | else | 1560 | else |
1561 | fp->type = BINDER_TYPE_WEAK_BINDER; | 1561 | fp->type = BINDER_TYPE_WEAK_BINDER; |
1562 | fp->binder = ref->node->ptr; | 1562 | fp->binder = ref->node->ptr; |
1563 | fp->cookie = ref->node->cookie; | 1563 | fp->cookie = ref->node->cookie; |
1564 | binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL); | 1564 | binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL); |
1565 | trace_binder_transaction_ref_to_node(t, ref); | 1565 | trace_binder_transaction_ref_to_node(t, ref); |
1566 | binder_debug(BINDER_DEBUG_TRANSACTION, | 1566 | binder_debug(BINDER_DEBUG_TRANSACTION, |
1567 | " ref %d desc %d -> node %d u%p\n", | 1567 | " ref %d desc %d -> node %d u%p\n", |
1568 | ref->debug_id, ref->desc, ref->node->debug_id, | 1568 | ref->debug_id, ref->desc, ref->node->debug_id, |
1569 | ref->node->ptr); | 1569 | ref->node->ptr); |
1570 | } else { | 1570 | } else { |
1571 | struct binder_ref *new_ref; | 1571 | struct binder_ref *new_ref; |
1572 | new_ref = binder_get_ref_for_node(target_proc, ref->node); | 1572 | new_ref = binder_get_ref_for_node(target_proc, ref->node); |
1573 | if (new_ref == NULL) { | 1573 | if (new_ref == NULL) { |
1574 | return_error = BR_FAILED_REPLY; | 1574 | return_error = BR_FAILED_REPLY; |
1575 | goto err_binder_get_ref_for_node_failed; | 1575 | goto err_binder_get_ref_for_node_failed; |
1576 | } | 1576 | } |
1577 | fp->handle = new_ref->desc; | 1577 | fp->handle = new_ref->desc; |
1578 | binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL); | 1578 | binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL); |
1579 | trace_binder_transaction_ref_to_ref(t, ref, | 1579 | trace_binder_transaction_ref_to_ref(t, ref, |
1580 | new_ref); | 1580 | new_ref); |
1581 | binder_debug(BINDER_DEBUG_TRANSACTION, | 1581 | binder_debug(BINDER_DEBUG_TRANSACTION, |
1582 | " ref %d desc %d -> ref %d desc %d (node %d)\n", | 1582 | " ref %d desc %d -> ref %d desc %d (node %d)\n", |
1583 | ref->debug_id, ref->desc, new_ref->debug_id, | 1583 | ref->debug_id, ref->desc, new_ref->debug_id, |
1584 | new_ref->desc, ref->node->debug_id); | 1584 | new_ref->desc, ref->node->debug_id); |
1585 | } | 1585 | } |
1586 | } break; | 1586 | } break; |
1587 | 1587 | ||
1588 | case BINDER_TYPE_FD: { | 1588 | case BINDER_TYPE_FD: { |
1589 | int target_fd; | 1589 | int target_fd; |
1590 | struct file *file; | 1590 | struct file *file; |
1591 | 1591 | ||
1592 | if (reply) { | 1592 | if (reply) { |
1593 | if (!(in_reply_to->flags & TF_ACCEPT_FDS)) { | 1593 | if (!(in_reply_to->flags & TF_ACCEPT_FDS)) { |
1594 | binder_user_error("%d:%d got reply with fd, %ld, but target does not allow fds\n", | 1594 | binder_user_error("%d:%d got reply with fd, %d, but target does not allow fds\n", |
1595 | proc->pid, thread->pid, fp->handle); | 1595 | proc->pid, thread->pid, fp->handle); |
1596 | return_error = BR_FAILED_REPLY; | 1596 | return_error = BR_FAILED_REPLY; |
1597 | goto err_fd_not_allowed; | 1597 | goto err_fd_not_allowed; |
1598 | } | 1598 | } |
1599 | } else if (!target_node->accept_fds) { | 1599 | } else if (!target_node->accept_fds) { |
1600 | binder_user_error("%d:%d got transaction with fd, %ld, but target does not allow fds\n", | 1600 | binder_user_error("%d:%d got transaction with fd, %d, but target does not allow fds\n", |
1601 | proc->pid, thread->pid, fp->handle); | 1601 | proc->pid, thread->pid, fp->handle); |
1602 | return_error = BR_FAILED_REPLY; | 1602 | return_error = BR_FAILED_REPLY; |
1603 | goto err_fd_not_allowed; | 1603 | goto err_fd_not_allowed; |
1604 | } | 1604 | } |
1605 | 1605 | ||
1606 | file = fget(fp->handle); | 1606 | file = fget(fp->handle); |
1607 | if (file == NULL) { | 1607 | if (file == NULL) { |
1608 | binder_user_error("%d:%d got transaction with invalid fd, %ld\n", | 1608 | binder_user_error("%d:%d got transaction with invalid fd, %d\n", |
1609 | proc->pid, thread->pid, fp->handle); | 1609 | proc->pid, thread->pid, fp->handle); |
1610 | return_error = BR_FAILED_REPLY; | 1610 | return_error = BR_FAILED_REPLY; |
1611 | goto err_fget_failed; | 1611 | goto err_fget_failed; |
1612 | } | 1612 | } |
1613 | target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC); | 1613 | target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC); |
1614 | if (target_fd < 0) { | 1614 | if (target_fd < 0) { |
1615 | fput(file); | 1615 | fput(file); |
1616 | return_error = BR_FAILED_REPLY; | 1616 | return_error = BR_FAILED_REPLY; |
1617 | goto err_get_unused_fd_failed; | 1617 | goto err_get_unused_fd_failed; |
1618 | } | 1618 | } |
1619 | task_fd_install(target_proc, target_fd, file); | 1619 | task_fd_install(target_proc, target_fd, file); |
1620 | trace_binder_transaction_fd(t, fp->handle, target_fd); | 1620 | trace_binder_transaction_fd(t, fp->handle, target_fd); |
1621 | binder_debug(BINDER_DEBUG_TRANSACTION, | 1621 | binder_debug(BINDER_DEBUG_TRANSACTION, |
1622 | " fd %ld -> %d\n", fp->handle, target_fd); | 1622 | " fd %d -> %d\n", fp->handle, target_fd); |
1623 | /* TODO: fput? */ | 1623 | /* TODO: fput? */ |
1624 | fp->handle = target_fd; | 1624 | fp->handle = target_fd; |
1625 | } break; | 1625 | } break; |
1626 | 1626 | ||
1627 | default: | 1627 | default: |
1628 | binder_user_error("%d:%d got transaction with invalid object type, %lx\n", | 1628 | binder_user_error("%d:%d got transaction with invalid object type, %x\n", |
1629 | proc->pid, thread->pid, fp->type); | 1629 | proc->pid, thread->pid, fp->type); |
1630 | return_error = BR_FAILED_REPLY; | 1630 | return_error = BR_FAILED_REPLY; |
1631 | goto err_bad_object_type; | 1631 | goto err_bad_object_type; |
1632 | } | 1632 | } |
1633 | } | 1633 | } |
1634 | if (reply) { | 1634 | if (reply) { |
1635 | BUG_ON(t->buffer->async_transaction != 0); | 1635 | BUG_ON(t->buffer->async_transaction != 0); |
1636 | binder_pop_transaction(target_thread, in_reply_to); | 1636 | binder_pop_transaction(target_thread, in_reply_to); |
1637 | } else if (!(t->flags & TF_ONE_WAY)) { | 1637 | } else if (!(t->flags & TF_ONE_WAY)) { |
1638 | BUG_ON(t->buffer->async_transaction != 0); | 1638 | BUG_ON(t->buffer->async_transaction != 0); |
1639 | t->need_reply = 1; | 1639 | t->need_reply = 1; |
1640 | t->from_parent = thread->transaction_stack; | 1640 | t->from_parent = thread->transaction_stack; |
1641 | thread->transaction_stack = t; | 1641 | thread->transaction_stack = t; |
1642 | } else { | 1642 | } else { |
1643 | BUG_ON(target_node == NULL); | 1643 | BUG_ON(target_node == NULL); |
1644 | BUG_ON(t->buffer->async_transaction != 1); | 1644 | BUG_ON(t->buffer->async_transaction != 1); |
1645 | if (target_node->has_async_transaction) { | 1645 | if (target_node->has_async_transaction) { |
1646 | target_list = &target_node->async_todo; | 1646 | target_list = &target_node->async_todo; |
1647 | target_wait = NULL; | 1647 | target_wait = NULL; |
1648 | } else | 1648 | } else |
1649 | target_node->has_async_transaction = 1; | 1649 | target_node->has_async_transaction = 1; |
1650 | } | 1650 | } |
1651 | t->work.type = BINDER_WORK_TRANSACTION; | 1651 | t->work.type = BINDER_WORK_TRANSACTION; |
1652 | list_add_tail(&t->work.entry, target_list); | 1652 | list_add_tail(&t->work.entry, target_list); |
1653 | tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; | 1653 | tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; |
1654 | list_add_tail(&tcomplete->entry, &thread->todo); | 1654 | list_add_tail(&tcomplete->entry, &thread->todo); |
1655 | if (target_wait) | 1655 | if (target_wait) |
1656 | wake_up_interruptible(target_wait); | 1656 | wake_up_interruptible(target_wait); |
1657 | return; | 1657 | return; |
1658 | 1658 | ||
1659 | err_get_unused_fd_failed: | 1659 | err_get_unused_fd_failed: |
1660 | err_fget_failed: | 1660 | err_fget_failed: |
1661 | err_fd_not_allowed: | 1661 | err_fd_not_allowed: |
1662 | err_binder_get_ref_for_node_failed: | 1662 | err_binder_get_ref_for_node_failed: |
1663 | err_binder_get_ref_failed: | 1663 | err_binder_get_ref_failed: |
1664 | err_binder_new_node_failed: | 1664 | err_binder_new_node_failed: |
1665 | err_bad_object_type: | 1665 | err_bad_object_type: |
1666 | err_bad_offset: | 1666 | err_bad_offset: |
1667 | err_copy_data_failed: | 1667 | err_copy_data_failed: |
1668 | trace_binder_transaction_failed_buffer_release(t->buffer); | 1668 | trace_binder_transaction_failed_buffer_release(t->buffer); |
1669 | binder_transaction_buffer_release(target_proc, t->buffer, offp); | 1669 | binder_transaction_buffer_release(target_proc, t->buffer, offp); |
1670 | t->buffer->transaction = NULL; | 1670 | t->buffer->transaction = NULL; |
1671 | binder_free_buf(target_proc, t->buffer); | 1671 | binder_free_buf(target_proc, t->buffer); |
1672 | err_binder_alloc_buf_failed: | 1672 | err_binder_alloc_buf_failed: |
1673 | kfree(tcomplete); | 1673 | kfree(tcomplete); |
1674 | binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); | 1674 | binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); |
1675 | err_alloc_tcomplete_failed: | 1675 | err_alloc_tcomplete_failed: |
1676 | kfree(t); | 1676 | kfree(t); |
1677 | binder_stats_deleted(BINDER_STAT_TRANSACTION); | 1677 | binder_stats_deleted(BINDER_STAT_TRANSACTION); |
1678 | err_alloc_t_failed: | 1678 | err_alloc_t_failed: |
1679 | err_bad_call_stack: | 1679 | err_bad_call_stack: |
1680 | err_empty_call_stack: | 1680 | err_empty_call_stack: |
1681 | err_dead_binder: | 1681 | err_dead_binder: |
1682 | err_invalid_target_handle: | 1682 | err_invalid_target_handle: |
1683 | err_no_context_mgr_node: | 1683 | err_no_context_mgr_node: |
1684 | binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, | 1684 | binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, |
1685 | "%d:%d transaction failed %d, size %zd-%zd\n", | 1685 | "%d:%d transaction failed %d, size %zd-%zd\n", |
1686 | proc->pid, thread->pid, return_error, | 1686 | proc->pid, thread->pid, return_error, |
1687 | tr->data_size, tr->offsets_size); | 1687 | tr->data_size, tr->offsets_size); |
1688 | 1688 | ||
1689 | { | 1689 | { |
1690 | struct binder_transaction_log_entry *fe; | 1690 | struct binder_transaction_log_entry *fe; |
1691 | fe = binder_transaction_log_add(&binder_transaction_log_failed); | 1691 | fe = binder_transaction_log_add(&binder_transaction_log_failed); |
1692 | *fe = *e; | 1692 | *fe = *e; |
1693 | } | 1693 | } |
1694 | 1694 | ||
1695 | BUG_ON(thread->return_error != BR_OK); | 1695 | BUG_ON(thread->return_error != BR_OK); |
1696 | if (in_reply_to) { | 1696 | if (in_reply_to) { |
1697 | thread->return_error = BR_TRANSACTION_COMPLETE; | 1697 | thread->return_error = BR_TRANSACTION_COMPLETE; |
1698 | binder_send_failed_reply(in_reply_to, return_error); | 1698 | binder_send_failed_reply(in_reply_to, return_error); |
1699 | } else | 1699 | } else |
1700 | thread->return_error = return_error; | 1700 | thread->return_error = return_error; |
1701 | } | 1701 | } |
1702 | 1702 | ||
1703 | int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, | 1703 | int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, |
1704 | void __user *buffer, size_t size, size_t *consumed) | 1704 | void __user *buffer, size_t size, size_t *consumed) |
1705 | { | 1705 | { |
1706 | uint32_t cmd; | 1706 | uint32_t cmd; |
1707 | void __user *ptr = buffer + *consumed; | 1707 | void __user *ptr = buffer + *consumed; |
1708 | void __user *end = buffer + size; | 1708 | void __user *end = buffer + size; |
1709 | 1709 | ||
1710 | while (ptr < end && thread->return_error == BR_OK) { | 1710 | while (ptr < end && thread->return_error == BR_OK) { |
1711 | if (get_user(cmd, (uint32_t __user *)ptr)) | 1711 | if (get_user(cmd, (uint32_t __user *)ptr)) |
1712 | return -EFAULT; | 1712 | return -EFAULT; |
1713 | ptr += sizeof(uint32_t); | 1713 | ptr += sizeof(uint32_t); |
1714 | trace_binder_command(cmd); | 1714 | trace_binder_command(cmd); |
1715 | if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { | 1715 | if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { |
1716 | binder_stats.bc[_IOC_NR(cmd)]++; | 1716 | binder_stats.bc[_IOC_NR(cmd)]++; |
1717 | proc->stats.bc[_IOC_NR(cmd)]++; | 1717 | proc->stats.bc[_IOC_NR(cmd)]++; |
1718 | thread->stats.bc[_IOC_NR(cmd)]++; | 1718 | thread->stats.bc[_IOC_NR(cmd)]++; |
1719 | } | 1719 | } |
1720 | switch (cmd) { | 1720 | switch (cmd) { |
1721 | case BC_INCREFS: | 1721 | case BC_INCREFS: |
1722 | case BC_ACQUIRE: | 1722 | case BC_ACQUIRE: |
1723 | case BC_RELEASE: | 1723 | case BC_RELEASE: |
1724 | case BC_DECREFS: { | 1724 | case BC_DECREFS: { |
1725 | uint32_t target; | 1725 | uint32_t target; |
1726 | struct binder_ref *ref; | 1726 | struct binder_ref *ref; |
1727 | const char *debug_string; | 1727 | const char *debug_string; |
1728 | 1728 | ||
1729 | if (get_user(target, (uint32_t __user *)ptr)) | 1729 | if (get_user(target, (uint32_t __user *)ptr)) |
1730 | return -EFAULT; | 1730 | return -EFAULT; |
1731 | ptr += sizeof(uint32_t); | 1731 | ptr += sizeof(uint32_t); |
1732 | if (target == 0 && binder_context_mgr_node && | 1732 | if (target == 0 && binder_context_mgr_node && |
1733 | (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) { | 1733 | (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) { |
1734 | ref = binder_get_ref_for_node(proc, | 1734 | ref = binder_get_ref_for_node(proc, |
1735 | binder_context_mgr_node); | 1735 | binder_context_mgr_node); |
1736 | if (ref->desc != target) { | 1736 | if (ref->desc != target) { |
1737 | binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n", | 1737 | binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n", |
1738 | proc->pid, thread->pid, | 1738 | proc->pid, thread->pid, |
1739 | ref->desc); | 1739 | ref->desc); |
1740 | } | 1740 | } |
1741 | } else | 1741 | } else |
1742 | ref = binder_get_ref(proc, target); | 1742 | ref = binder_get_ref(proc, target); |
1743 | if (ref == NULL) { | 1743 | if (ref == NULL) { |
1744 | binder_user_error("%d:%d refcount change on invalid ref %d\n", | 1744 | binder_user_error("%d:%d refcount change on invalid ref %d\n", |
1745 | proc->pid, thread->pid, target); | 1745 | proc->pid, thread->pid, target); |
1746 | break; | 1746 | break; |
1747 | } | 1747 | } |
1748 | switch (cmd) { | 1748 | switch (cmd) { |
1749 | case BC_INCREFS: | 1749 | case BC_INCREFS: |
1750 | debug_string = "IncRefs"; | 1750 | debug_string = "IncRefs"; |
1751 | binder_inc_ref(ref, 0, NULL); | 1751 | binder_inc_ref(ref, 0, NULL); |
1752 | break; | 1752 | break; |
1753 | case BC_ACQUIRE: | 1753 | case BC_ACQUIRE: |
1754 | debug_string = "Acquire"; | 1754 | debug_string = "Acquire"; |
1755 | binder_inc_ref(ref, 1, NULL); | 1755 | binder_inc_ref(ref, 1, NULL); |
1756 | break; | 1756 | break; |
1757 | case BC_RELEASE: | 1757 | case BC_RELEASE: |
1758 | debug_string = "Release"; | 1758 | debug_string = "Release"; |
1759 | binder_dec_ref(ref, 1); | 1759 | binder_dec_ref(ref, 1); |
1760 | break; | 1760 | break; |
1761 | case BC_DECREFS: | 1761 | case BC_DECREFS: |
1762 | default: | 1762 | default: |
1763 | debug_string = "DecRefs"; | 1763 | debug_string = "DecRefs"; |
1764 | binder_dec_ref(ref, 0); | 1764 | binder_dec_ref(ref, 0); |
1765 | break; | 1765 | break; |
1766 | } | 1766 | } |
1767 | binder_debug(BINDER_DEBUG_USER_REFS, | 1767 | binder_debug(BINDER_DEBUG_USER_REFS, |
1768 | "%d:%d %s ref %d desc %d s %d w %d for node %d\n", | 1768 | "%d:%d %s ref %d desc %d s %d w %d for node %d\n", |
1769 | proc->pid, thread->pid, debug_string, ref->debug_id, | 1769 | proc->pid, thread->pid, debug_string, ref->debug_id, |
1770 | ref->desc, ref->strong, ref->weak, ref->node->debug_id); | 1770 | ref->desc, ref->strong, ref->weak, ref->node->debug_id); |
1771 | break; | 1771 | break; |
1772 | } | 1772 | } |
1773 | case BC_INCREFS_DONE: | 1773 | case BC_INCREFS_DONE: |
1774 | case BC_ACQUIRE_DONE: { | 1774 | case BC_ACQUIRE_DONE: { |
1775 | void __user *node_ptr; | 1775 | void __user *node_ptr; |
1776 | void *cookie; | 1776 | void *cookie; |
1777 | struct binder_node *node; | 1777 | struct binder_node *node; |
1778 | 1778 | ||
1779 | if (get_user(node_ptr, (void * __user *)ptr)) | 1779 | if (get_user(node_ptr, (void * __user *)ptr)) |
1780 | return -EFAULT; | 1780 | return -EFAULT; |
1781 | ptr += sizeof(void *); | 1781 | ptr += sizeof(void *); |
1782 | if (get_user(cookie, (void * __user *)ptr)) | 1782 | if (get_user(cookie, (void * __user *)ptr)) |
1783 | return -EFAULT; | 1783 | return -EFAULT; |
1784 | ptr += sizeof(void *); | 1784 | ptr += sizeof(void *); |
1785 | node = binder_get_node(proc, node_ptr); | 1785 | node = binder_get_node(proc, node_ptr); |
1786 | if (node == NULL) { | 1786 | if (node == NULL) { |
1787 | binder_user_error("%d:%d %s u%p no match\n", | 1787 | binder_user_error("%d:%d %s u%p no match\n", |
1788 | proc->pid, thread->pid, | 1788 | proc->pid, thread->pid, |
1789 | cmd == BC_INCREFS_DONE ? | 1789 | cmd == BC_INCREFS_DONE ? |
1790 | "BC_INCREFS_DONE" : | 1790 | "BC_INCREFS_DONE" : |
1791 | "BC_ACQUIRE_DONE", | 1791 | "BC_ACQUIRE_DONE", |
1792 | node_ptr); | 1792 | node_ptr); |
1793 | break; | 1793 | break; |
1794 | } | 1794 | } |
1795 | if (cookie != node->cookie) { | 1795 | if (cookie != node->cookie) { |
1796 | binder_user_error("%d:%d %s u%p node %d cookie mismatch %p != %p\n", | 1796 | binder_user_error("%d:%d %s u%p node %d cookie mismatch %p != %p\n", |
1797 | proc->pid, thread->pid, | 1797 | proc->pid, thread->pid, |
1798 | cmd == BC_INCREFS_DONE ? | 1798 | cmd == BC_INCREFS_DONE ? |
1799 | "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", | 1799 | "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", |
1800 | node_ptr, node->debug_id, | 1800 | node_ptr, node->debug_id, |
1801 | cookie, node->cookie); | 1801 | cookie, node->cookie); |
1802 | break; | 1802 | break; |
1803 | } | 1803 | } |
1804 | if (cmd == BC_ACQUIRE_DONE) { | 1804 | if (cmd == BC_ACQUIRE_DONE) { |
1805 | if (node->pending_strong_ref == 0) { | 1805 | if (node->pending_strong_ref == 0) { |
1806 | binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n", | 1806 | binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n", |
1807 | proc->pid, thread->pid, | 1807 | proc->pid, thread->pid, |
1808 | node->debug_id); | 1808 | node->debug_id); |
1809 | break; | 1809 | break; |
1810 | } | 1810 | } |
1811 | node->pending_strong_ref = 0; | 1811 | node->pending_strong_ref = 0; |
1812 | } else { | 1812 | } else { |
1813 | if (node->pending_weak_ref == 0) { | 1813 | if (node->pending_weak_ref == 0) { |
1814 | binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n", | 1814 | binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n", |
1815 | proc->pid, thread->pid, | 1815 | proc->pid, thread->pid, |
1816 | node->debug_id); | 1816 | node->debug_id); |
1817 | break; | 1817 | break; |
1818 | } | 1818 | } |
1819 | node->pending_weak_ref = 0; | 1819 | node->pending_weak_ref = 0; |
1820 | } | 1820 | } |
1821 | binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0); | 1821 | binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0); |
1822 | binder_debug(BINDER_DEBUG_USER_REFS, | 1822 | binder_debug(BINDER_DEBUG_USER_REFS, |
1823 | "%d:%d %s node %d ls %d lw %d\n", | 1823 | "%d:%d %s node %d ls %d lw %d\n", |
1824 | proc->pid, thread->pid, | 1824 | proc->pid, thread->pid, |
1825 | cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", | 1825 | cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", |
1826 | node->debug_id, node->local_strong_refs, node->local_weak_refs); | 1826 | node->debug_id, node->local_strong_refs, node->local_weak_refs); |
1827 | break; | 1827 | break; |
1828 | } | 1828 | } |
1829 | case BC_ATTEMPT_ACQUIRE: | 1829 | case BC_ATTEMPT_ACQUIRE: |
1830 | pr_err("BC_ATTEMPT_ACQUIRE not supported\n"); | 1830 | pr_err("BC_ATTEMPT_ACQUIRE not supported\n"); |
1831 | return -EINVAL; | 1831 | return -EINVAL; |
1832 | case BC_ACQUIRE_RESULT: | 1832 | case BC_ACQUIRE_RESULT: |
1833 | pr_err("BC_ACQUIRE_RESULT not supported\n"); | 1833 | pr_err("BC_ACQUIRE_RESULT not supported\n"); |
1834 | return -EINVAL; | 1834 | return -EINVAL; |
1835 | 1835 | ||
1836 | case BC_FREE_BUFFER: { | 1836 | case BC_FREE_BUFFER: { |
1837 | void __user *data_ptr; | 1837 | void __user *data_ptr; |
1838 | struct binder_buffer *buffer; | 1838 | struct binder_buffer *buffer; |
1839 | 1839 | ||
1840 | if (get_user(data_ptr, (void * __user *)ptr)) | 1840 | if (get_user(data_ptr, (void * __user *)ptr)) |
1841 | return -EFAULT; | 1841 | return -EFAULT; |
1842 | ptr += sizeof(void *); | 1842 | ptr += sizeof(void *); |
1843 | 1843 | ||
1844 | buffer = binder_buffer_lookup(proc, data_ptr); | 1844 | buffer = binder_buffer_lookup(proc, data_ptr); |
1845 | if (buffer == NULL) { | 1845 | if (buffer == NULL) { |
1846 | binder_user_error("%d:%d BC_FREE_BUFFER u%p no match\n", | 1846 | binder_user_error("%d:%d BC_FREE_BUFFER u%p no match\n", |
1847 | proc->pid, thread->pid, data_ptr); | 1847 | proc->pid, thread->pid, data_ptr); |
1848 | break; | 1848 | break; |
1849 | } | 1849 | } |
1850 | if (!buffer->allow_user_free) { | 1850 | if (!buffer->allow_user_free) { |
1851 | binder_user_error("%d:%d BC_FREE_BUFFER u%p matched unreturned buffer\n", | 1851 | binder_user_error("%d:%d BC_FREE_BUFFER u%p matched unreturned buffer\n", |
1852 | proc->pid, thread->pid, data_ptr); | 1852 | proc->pid, thread->pid, data_ptr); |
1853 | break; | 1853 | break; |
1854 | } | 1854 | } |
1855 | binder_debug(BINDER_DEBUG_FREE_BUFFER, | 1855 | binder_debug(BINDER_DEBUG_FREE_BUFFER, |
1856 | "%d:%d BC_FREE_BUFFER u%p found buffer %d for %s transaction\n", | 1856 | "%d:%d BC_FREE_BUFFER u%p found buffer %d for %s transaction\n", |
1857 | proc->pid, thread->pid, data_ptr, buffer->debug_id, | 1857 | proc->pid, thread->pid, data_ptr, buffer->debug_id, |
1858 | buffer->transaction ? "active" : "finished"); | 1858 | buffer->transaction ? "active" : "finished"); |
1859 | 1859 | ||
1860 | if (buffer->transaction) { | 1860 | if (buffer->transaction) { |
1861 | buffer->transaction->buffer = NULL; | 1861 | buffer->transaction->buffer = NULL; |
1862 | buffer->transaction = NULL; | 1862 | buffer->transaction = NULL; |
1863 | } | 1863 | } |
1864 | if (buffer->async_transaction && buffer->target_node) { | 1864 | if (buffer->async_transaction && buffer->target_node) { |
1865 | BUG_ON(!buffer->target_node->has_async_transaction); | 1865 | BUG_ON(!buffer->target_node->has_async_transaction); |
1866 | if (list_empty(&buffer->target_node->async_todo)) | 1866 | if (list_empty(&buffer->target_node->async_todo)) |
1867 | buffer->target_node->has_async_transaction = 0; | 1867 | buffer->target_node->has_async_transaction = 0; |
1868 | else | 1868 | else |
1869 | list_move_tail(buffer->target_node->async_todo.next, &thread->todo); | 1869 | list_move_tail(buffer->target_node->async_todo.next, &thread->todo); |
1870 | } | 1870 | } |
1871 | trace_binder_transaction_buffer_release(buffer); | 1871 | trace_binder_transaction_buffer_release(buffer); |
1872 | binder_transaction_buffer_release(proc, buffer, NULL); | 1872 | binder_transaction_buffer_release(proc, buffer, NULL); |
1873 | binder_free_buf(proc, buffer); | 1873 | binder_free_buf(proc, buffer); |
1874 | break; | 1874 | break; |
1875 | } | 1875 | } |
1876 | 1876 | ||
1877 | case BC_TRANSACTION: | 1877 | case BC_TRANSACTION: |
1878 | case BC_REPLY: { | 1878 | case BC_REPLY: { |
1879 | struct binder_transaction_data tr; | 1879 | struct binder_transaction_data tr; |
1880 | 1880 | ||
1881 | if (copy_from_user(&tr, ptr, sizeof(tr))) | 1881 | if (copy_from_user(&tr, ptr, sizeof(tr))) |
1882 | return -EFAULT; | 1882 | return -EFAULT; |
1883 | ptr += sizeof(tr); | 1883 | ptr += sizeof(tr); |
1884 | binder_transaction(proc, thread, &tr, cmd == BC_REPLY); | 1884 | binder_transaction(proc, thread, &tr, cmd == BC_REPLY); |
1885 | break; | 1885 | break; |
1886 | } | 1886 | } |
1887 | 1887 | ||
1888 | case BC_REGISTER_LOOPER: | 1888 | case BC_REGISTER_LOOPER: |
1889 | binder_debug(BINDER_DEBUG_THREADS, | 1889 | binder_debug(BINDER_DEBUG_THREADS, |
1890 | "%d:%d BC_REGISTER_LOOPER\n", | 1890 | "%d:%d BC_REGISTER_LOOPER\n", |
1891 | proc->pid, thread->pid); | 1891 | proc->pid, thread->pid); |
1892 | if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { | 1892 | if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { |
1893 | thread->looper |= BINDER_LOOPER_STATE_INVALID; | 1893 | thread->looper |= BINDER_LOOPER_STATE_INVALID; |
1894 | binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n", | 1894 | binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n", |
1895 | proc->pid, thread->pid); | 1895 | proc->pid, thread->pid); |
1896 | } else if (proc->requested_threads == 0) { | 1896 | } else if (proc->requested_threads == 0) { |
1897 | thread->looper |= BINDER_LOOPER_STATE_INVALID; | 1897 | thread->looper |= BINDER_LOOPER_STATE_INVALID; |
1898 | binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n", | 1898 | binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n", |
1899 | proc->pid, thread->pid); | 1899 | proc->pid, thread->pid); |
1900 | } else { | 1900 | } else { |
1901 | proc->requested_threads--; | 1901 | proc->requested_threads--; |
1902 | proc->requested_threads_started++; | 1902 | proc->requested_threads_started++; |
1903 | } | 1903 | } |
1904 | thread->looper |= BINDER_LOOPER_STATE_REGISTERED; | 1904 | thread->looper |= BINDER_LOOPER_STATE_REGISTERED; |
1905 | break; | 1905 | break; |
1906 | case BC_ENTER_LOOPER: | 1906 | case BC_ENTER_LOOPER: |
1907 | binder_debug(BINDER_DEBUG_THREADS, | 1907 | binder_debug(BINDER_DEBUG_THREADS, |
1908 | "%d:%d BC_ENTER_LOOPER\n", | 1908 | "%d:%d BC_ENTER_LOOPER\n", |
1909 | proc->pid, thread->pid); | 1909 | proc->pid, thread->pid); |
1910 | if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { | 1910 | if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { |
1911 | thread->looper |= BINDER_LOOPER_STATE_INVALID; | 1911 | thread->looper |= BINDER_LOOPER_STATE_INVALID; |
1912 | binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n", | 1912 | binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n", |
1913 | proc->pid, thread->pid); | 1913 | proc->pid, thread->pid); |
1914 | } | 1914 | } |
1915 | thread->looper |= BINDER_LOOPER_STATE_ENTERED; | 1915 | thread->looper |= BINDER_LOOPER_STATE_ENTERED; |
1916 | break; | 1916 | break; |
1917 | case BC_EXIT_LOOPER: | 1917 | case BC_EXIT_LOOPER: |
1918 | binder_debug(BINDER_DEBUG_THREADS, | 1918 | binder_debug(BINDER_DEBUG_THREADS, |
1919 | "%d:%d BC_EXIT_LOOPER\n", | 1919 | "%d:%d BC_EXIT_LOOPER\n", |
1920 | proc->pid, thread->pid); | 1920 | proc->pid, thread->pid); |
1921 | thread->looper |= BINDER_LOOPER_STATE_EXITED; | 1921 | thread->looper |= BINDER_LOOPER_STATE_EXITED; |
1922 | break; | 1922 | break; |
1923 | 1923 | ||
1924 | case BC_REQUEST_DEATH_NOTIFICATION: | 1924 | case BC_REQUEST_DEATH_NOTIFICATION: |
1925 | case BC_CLEAR_DEATH_NOTIFICATION: { | 1925 | case BC_CLEAR_DEATH_NOTIFICATION: { |
1926 | uint32_t target; | 1926 | uint32_t target; |
1927 | void __user *cookie; | 1927 | void __user *cookie; |
1928 | struct binder_ref *ref; | 1928 | struct binder_ref *ref; |
1929 | struct binder_ref_death *death; | 1929 | struct binder_ref_death *death; |
1930 | 1930 | ||
1931 | if (get_user(target, (uint32_t __user *)ptr)) | 1931 | if (get_user(target, (uint32_t __user *)ptr)) |
1932 | return -EFAULT; | 1932 | return -EFAULT; |
1933 | ptr += sizeof(uint32_t); | 1933 | ptr += sizeof(uint32_t); |
1934 | if (get_user(cookie, (void __user * __user *)ptr)) | 1934 | if (get_user(cookie, (void __user * __user *)ptr)) |
1935 | return -EFAULT; | 1935 | return -EFAULT; |
1936 | ptr += sizeof(void *); | 1936 | ptr += sizeof(void *); |
1937 | ref = binder_get_ref(proc, target); | 1937 | ref = binder_get_ref(proc, target); |
1938 | if (ref == NULL) { | 1938 | if (ref == NULL) { |
1939 | binder_user_error("%d:%d %s invalid ref %d\n", | 1939 | binder_user_error("%d:%d %s invalid ref %d\n", |
1940 | proc->pid, thread->pid, | 1940 | proc->pid, thread->pid, |
1941 | cmd == BC_REQUEST_DEATH_NOTIFICATION ? | 1941 | cmd == BC_REQUEST_DEATH_NOTIFICATION ? |
1942 | "BC_REQUEST_DEATH_NOTIFICATION" : | 1942 | "BC_REQUEST_DEATH_NOTIFICATION" : |
1943 | "BC_CLEAR_DEATH_NOTIFICATION", | 1943 | "BC_CLEAR_DEATH_NOTIFICATION", |
1944 | target); | 1944 | target); |
1945 | break; | 1945 | break; |
1946 | } | 1946 | } |
1947 | 1947 | ||
1948 | binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, | 1948 | binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, |
1949 | "%d:%d %s %p ref %d desc %d s %d w %d for node %d\n", | 1949 | "%d:%d %s %p ref %d desc %d s %d w %d for node %d\n", |
1950 | proc->pid, thread->pid, | 1950 | proc->pid, thread->pid, |
1951 | cmd == BC_REQUEST_DEATH_NOTIFICATION ? | 1951 | cmd == BC_REQUEST_DEATH_NOTIFICATION ? |
1952 | "BC_REQUEST_DEATH_NOTIFICATION" : | 1952 | "BC_REQUEST_DEATH_NOTIFICATION" : |
1953 | "BC_CLEAR_DEATH_NOTIFICATION", | 1953 | "BC_CLEAR_DEATH_NOTIFICATION", |
1954 | cookie, ref->debug_id, ref->desc, | 1954 | cookie, ref->debug_id, ref->desc, |
1955 | ref->strong, ref->weak, ref->node->debug_id); | 1955 | ref->strong, ref->weak, ref->node->debug_id); |
1956 | 1956 | ||
1957 | if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { | 1957 | if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { |
1958 | if (ref->death) { | 1958 | if (ref->death) { |
1959 | binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n", | 1959 | binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n", |
1960 | proc->pid, thread->pid); | 1960 | proc->pid, thread->pid); |
1961 | break; | 1961 | break; |
1962 | } | 1962 | } |
1963 | death = kzalloc(sizeof(*death), GFP_KERNEL); | 1963 | death = kzalloc(sizeof(*death), GFP_KERNEL); |
1964 | if (death == NULL) { | 1964 | if (death == NULL) { |
1965 | thread->return_error = BR_ERROR; | 1965 | thread->return_error = BR_ERROR; |
1966 | binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, | 1966 | binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, |
1967 | "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", | 1967 | "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", |
1968 | proc->pid, thread->pid); | 1968 | proc->pid, thread->pid); |
1969 | break; | 1969 | break; |
1970 | } | 1970 | } |
1971 | binder_stats_created(BINDER_STAT_DEATH); | 1971 | binder_stats_created(BINDER_STAT_DEATH); |
1972 | INIT_LIST_HEAD(&death->work.entry); | 1972 | INIT_LIST_HEAD(&death->work.entry); |
1973 | death->cookie = cookie; | 1973 | death->cookie = cookie; |
1974 | ref->death = death; | 1974 | ref->death = death; |
1975 | if (ref->node->proc == NULL) { | 1975 | if (ref->node->proc == NULL) { |
1976 | ref->death->work.type = BINDER_WORK_DEAD_BINDER; | 1976 | ref->death->work.type = BINDER_WORK_DEAD_BINDER; |
1977 | if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { | 1977 | if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { |
1978 | list_add_tail(&ref->death->work.entry, &thread->todo); | 1978 | list_add_tail(&ref->death->work.entry, &thread->todo); |
1979 | } else { | 1979 | } else { |
1980 | list_add_tail(&ref->death->work.entry, &proc->todo); | 1980 | list_add_tail(&ref->death->work.entry, &proc->todo); |
1981 | wake_up_interruptible(&proc->wait); | 1981 | wake_up_interruptible(&proc->wait); |
1982 | } | 1982 | } |
1983 | } | 1983 | } |
1984 | } else { | 1984 | } else { |
1985 | if (ref->death == NULL) { | 1985 | if (ref->death == NULL) { |
1986 | binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n", | 1986 | binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n", |
1987 | proc->pid, thread->pid); | 1987 | proc->pid, thread->pid); |
1988 | break; | 1988 | break; |
1989 | } | 1989 | } |
1990 | death = ref->death; | 1990 | death = ref->death; |
1991 | if (death->cookie != cookie) { | 1991 | if (death->cookie != cookie) { |
1992 | binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %p != %p\n", | 1992 | binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %p != %p\n", |
1993 | proc->pid, thread->pid, | 1993 | proc->pid, thread->pid, |
1994 | death->cookie, cookie); | 1994 | death->cookie, cookie); |
1995 | break; | 1995 | break; |
1996 | } | 1996 | } |
1997 | ref->death = NULL; | 1997 | ref->death = NULL; |
1998 | if (list_empty(&death->work.entry)) { | 1998 | if (list_empty(&death->work.entry)) { |
1999 | death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; | 1999 | death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; |
2000 | if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { | 2000 | if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { |
2001 | list_add_tail(&death->work.entry, &thread->todo); | 2001 | list_add_tail(&death->work.entry, &thread->todo); |
2002 | } else { | 2002 | } else { |
2003 | list_add_tail(&death->work.entry, &proc->todo); | 2003 | list_add_tail(&death->work.entry, &proc->todo); |
2004 | wake_up_interruptible(&proc->wait); | 2004 | wake_up_interruptible(&proc->wait); |
2005 | } | 2005 | } |
2006 | } else { | 2006 | } else { |
2007 | BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); | 2007 | BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); |
2008 | death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; | 2008 | death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; |
2009 | } | 2009 | } |
2010 | } | 2010 | } |
2011 | } break; | 2011 | } break; |
2012 | case BC_DEAD_BINDER_DONE: { | 2012 | case BC_DEAD_BINDER_DONE: { |
2013 | struct binder_work *w; | 2013 | struct binder_work *w; |
2014 | void __user *cookie; | 2014 | void __user *cookie; |
2015 | struct binder_ref_death *death = NULL; | 2015 | struct binder_ref_death *death = NULL; |
2016 | if (get_user(cookie, (void __user * __user *)ptr)) | 2016 | if (get_user(cookie, (void __user * __user *)ptr)) |
2017 | return -EFAULT; | 2017 | return -EFAULT; |
2018 | 2018 | ||
2019 | ptr += sizeof(void *); | 2019 | ptr += sizeof(void *); |
2020 | list_for_each_entry(w, &proc->delivered_death, entry) { | 2020 | list_for_each_entry(w, &proc->delivered_death, entry) { |
2021 | struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work); | 2021 | struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work); |
2022 | if (tmp_death->cookie == cookie) { | 2022 | if (tmp_death->cookie == cookie) { |
2023 | death = tmp_death; | 2023 | death = tmp_death; |
2024 | break; | 2024 | break; |
2025 | } | 2025 | } |
2026 | } | 2026 | } |
2027 | binder_debug(BINDER_DEBUG_DEAD_BINDER, | 2027 | binder_debug(BINDER_DEBUG_DEAD_BINDER, |
2028 | "%d:%d BC_DEAD_BINDER_DONE %p found %p\n", | 2028 | "%d:%d BC_DEAD_BINDER_DONE %p found %p\n", |
2029 | proc->pid, thread->pid, cookie, death); | 2029 | proc->pid, thread->pid, cookie, death); |
2030 | if (death == NULL) { | 2030 | if (death == NULL) { |
2031 | binder_user_error("%d:%d BC_DEAD_BINDER_DONE %p not found\n", | 2031 | binder_user_error("%d:%d BC_DEAD_BINDER_DONE %p not found\n", |
2032 | proc->pid, thread->pid, cookie); | 2032 | proc->pid, thread->pid, cookie); |
2033 | break; | 2033 | break; |
2034 | } | 2034 | } |
2035 | 2035 | ||
2036 | list_del_init(&death->work.entry); | 2036 | list_del_init(&death->work.entry); |
2037 | if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { | 2037 | if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { |
2038 | death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; | 2038 | death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; |
2039 | if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { | 2039 | if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { |
2040 | list_add_tail(&death->work.entry, &thread->todo); | 2040 | list_add_tail(&death->work.entry, &thread->todo); |
2041 | } else { | 2041 | } else { |
2042 | list_add_tail(&death->work.entry, &proc->todo); | 2042 | list_add_tail(&death->work.entry, &proc->todo); |
2043 | wake_up_interruptible(&proc->wait); | 2043 | wake_up_interruptible(&proc->wait); |
2044 | } | 2044 | } |
2045 | } | 2045 | } |
2046 | } break; | 2046 | } break; |
2047 | 2047 | ||
2048 | default: | 2048 | default: |
2049 | pr_err("%d:%d unknown command %d\n", | 2049 | pr_err("%d:%d unknown command %d\n", |
2050 | proc->pid, thread->pid, cmd); | 2050 | proc->pid, thread->pid, cmd); |
2051 | return -EINVAL; | 2051 | return -EINVAL; |
2052 | } | 2052 | } |
2053 | *consumed = ptr - buffer; | 2053 | *consumed = ptr - buffer; |
2054 | } | 2054 | } |
2055 | return 0; | 2055 | return 0; |
2056 | } | 2056 | } |
2057 | 2057 | ||
2058 | void binder_stat_br(struct binder_proc *proc, struct binder_thread *thread, | 2058 | void binder_stat_br(struct binder_proc *proc, struct binder_thread *thread, |
2059 | uint32_t cmd) | 2059 | uint32_t cmd) |
2060 | { | 2060 | { |
2061 | trace_binder_return(cmd); | 2061 | trace_binder_return(cmd); |
2062 | if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { | 2062 | if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { |
2063 | binder_stats.br[_IOC_NR(cmd)]++; | 2063 | binder_stats.br[_IOC_NR(cmd)]++; |
2064 | proc->stats.br[_IOC_NR(cmd)]++; | 2064 | proc->stats.br[_IOC_NR(cmd)]++; |
2065 | thread->stats.br[_IOC_NR(cmd)]++; | 2065 | thread->stats.br[_IOC_NR(cmd)]++; |
2066 | } | 2066 | } |
2067 | } | 2067 | } |
2068 | 2068 | ||
2069 | static int binder_has_proc_work(struct binder_proc *proc, | 2069 | static int binder_has_proc_work(struct binder_proc *proc, |
2070 | struct binder_thread *thread) | 2070 | struct binder_thread *thread) |
2071 | { | 2071 | { |
2072 | return !list_empty(&proc->todo) || | 2072 | return !list_empty(&proc->todo) || |
2073 | (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); | 2073 | (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); |
2074 | } | 2074 | } |
2075 | 2075 | ||
2076 | static int binder_has_thread_work(struct binder_thread *thread) | 2076 | static int binder_has_thread_work(struct binder_thread *thread) |
2077 | { | 2077 | { |
2078 | return !list_empty(&thread->todo) || thread->return_error != BR_OK || | 2078 | return !list_empty(&thread->todo) || thread->return_error != BR_OK || |
2079 | (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); | 2079 | (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); |
2080 | } | 2080 | } |
2081 | 2081 | ||
2082 | static int binder_thread_read(struct binder_proc *proc, | 2082 | static int binder_thread_read(struct binder_proc *proc, |
2083 | struct binder_thread *thread, | 2083 | struct binder_thread *thread, |
2084 | void __user *buffer, size_t size, | 2084 | void __user *buffer, size_t size, |
2085 | size_t *consumed, int non_block) | 2085 | size_t *consumed, int non_block) |
2086 | { | 2086 | { |
2087 | void __user *ptr = buffer + *consumed; | 2087 | void __user *ptr = buffer + *consumed; |
2088 | void __user *end = buffer + size; | 2088 | void __user *end = buffer + size; |
2089 | 2089 | ||
2090 | int ret = 0; | 2090 | int ret = 0; |
2091 | int wait_for_proc_work; | 2091 | int wait_for_proc_work; |
2092 | 2092 | ||
2093 | if (*consumed == 0) { | 2093 | if (*consumed == 0) { |
2094 | if (put_user(BR_NOOP, (uint32_t __user *)ptr)) | 2094 | if (put_user(BR_NOOP, (uint32_t __user *)ptr)) |
2095 | return -EFAULT; | 2095 | return -EFAULT; |
2096 | ptr += sizeof(uint32_t); | 2096 | ptr += sizeof(uint32_t); |
2097 | } | 2097 | } |
2098 | 2098 | ||
2099 | retry: | 2099 | retry: |
2100 | wait_for_proc_work = thread->transaction_stack == NULL && | 2100 | wait_for_proc_work = thread->transaction_stack == NULL && |
2101 | list_empty(&thread->todo); | 2101 | list_empty(&thread->todo); |
2102 | 2102 | ||
2103 | if (thread->return_error != BR_OK && ptr < end) { | 2103 | if (thread->return_error != BR_OK && ptr < end) { |
2104 | if (thread->return_error2 != BR_OK) { | 2104 | if (thread->return_error2 != BR_OK) { |
2105 | if (put_user(thread->return_error2, (uint32_t __user *)ptr)) | 2105 | if (put_user(thread->return_error2, (uint32_t __user *)ptr)) |
2106 | return -EFAULT; | 2106 | return -EFAULT; |
2107 | ptr += sizeof(uint32_t); | 2107 | ptr += sizeof(uint32_t); |
2108 | binder_stat_br(proc, thread, thread->return_error2); | 2108 | binder_stat_br(proc, thread, thread->return_error2); |
2109 | if (ptr == end) | 2109 | if (ptr == end) |
2110 | goto done; | 2110 | goto done; |
2111 | thread->return_error2 = BR_OK; | 2111 | thread->return_error2 = BR_OK; |
2112 | } | 2112 | } |
2113 | if (put_user(thread->return_error, (uint32_t __user *)ptr)) | 2113 | if (put_user(thread->return_error, (uint32_t __user *)ptr)) |
2114 | return -EFAULT; | 2114 | return -EFAULT; |
2115 | ptr += sizeof(uint32_t); | 2115 | ptr += sizeof(uint32_t); |
2116 | binder_stat_br(proc, thread, thread->return_error); | 2116 | binder_stat_br(proc, thread, thread->return_error); |
2117 | thread->return_error = BR_OK; | 2117 | thread->return_error = BR_OK; |
2118 | goto done; | 2118 | goto done; |
2119 | } | 2119 | } |
2120 | 2120 | ||
2121 | 2121 | ||
2122 | thread->looper |= BINDER_LOOPER_STATE_WAITING; | 2122 | thread->looper |= BINDER_LOOPER_STATE_WAITING; |
2123 | if (wait_for_proc_work) | 2123 | if (wait_for_proc_work) |
2124 | proc->ready_threads++; | 2124 | proc->ready_threads++; |
2125 | 2125 | ||
2126 | binder_unlock(__func__); | 2126 | binder_unlock(__func__); |
2127 | 2127 | ||
2128 | trace_binder_wait_for_work(wait_for_proc_work, | 2128 | trace_binder_wait_for_work(wait_for_proc_work, |
2129 | !!thread->transaction_stack, | 2129 | !!thread->transaction_stack, |
2130 | !list_empty(&thread->todo)); | 2130 | !list_empty(&thread->todo)); |
2131 | if (wait_for_proc_work) { | 2131 | if (wait_for_proc_work) { |
2132 | if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | | 2132 | if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | |
2133 | BINDER_LOOPER_STATE_ENTERED))) { | 2133 | BINDER_LOOPER_STATE_ENTERED))) { |
2134 | binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n", | 2134 | binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n", |
2135 | proc->pid, thread->pid, thread->looper); | 2135 | proc->pid, thread->pid, thread->looper); |
2136 | wait_event_interruptible(binder_user_error_wait, | 2136 | wait_event_interruptible(binder_user_error_wait, |
2137 | binder_stop_on_user_error < 2); | 2137 | binder_stop_on_user_error < 2); |
2138 | } | 2138 | } |
2139 | binder_set_nice(proc->default_priority); | 2139 | binder_set_nice(proc->default_priority); |
2140 | if (non_block) { | 2140 | if (non_block) { |
2141 | if (!binder_has_proc_work(proc, thread)) | 2141 | if (!binder_has_proc_work(proc, thread)) |
2142 | ret = -EAGAIN; | 2142 | ret = -EAGAIN; |
2143 | } else | 2143 | } else |
2144 | ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread)); | 2144 | ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread)); |
2145 | } else { | 2145 | } else { |
2146 | if (non_block) { | 2146 | if (non_block) { |
2147 | if (!binder_has_thread_work(thread)) | 2147 | if (!binder_has_thread_work(thread)) |
2148 | ret = -EAGAIN; | 2148 | ret = -EAGAIN; |
2149 | } else | 2149 | } else |
2150 | ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread)); | 2150 | ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread)); |
2151 | } | 2151 | } |
2152 | 2152 | ||
2153 | binder_lock(__func__); | 2153 | binder_lock(__func__); |
2154 | 2154 | ||
2155 | if (wait_for_proc_work) | 2155 | if (wait_for_proc_work) |
2156 | proc->ready_threads--; | 2156 | proc->ready_threads--; |
2157 | thread->looper &= ~BINDER_LOOPER_STATE_WAITING; | 2157 | thread->looper &= ~BINDER_LOOPER_STATE_WAITING; |
2158 | 2158 | ||
2159 | if (ret) | 2159 | if (ret) |
2160 | return ret; | 2160 | return ret; |
2161 | 2161 | ||
2162 | while (1) { | 2162 | while (1) { |
2163 | uint32_t cmd; | 2163 | uint32_t cmd; |
2164 | struct binder_transaction_data tr; | 2164 | struct binder_transaction_data tr; |
2165 | struct binder_work *w; | 2165 | struct binder_work *w; |
2166 | struct binder_transaction *t = NULL; | 2166 | struct binder_transaction *t = NULL; |
2167 | 2167 | ||
2168 | if (!list_empty(&thread->todo)) | 2168 | if (!list_empty(&thread->todo)) |
2169 | w = list_first_entry(&thread->todo, struct binder_work, entry); | 2169 | w = list_first_entry(&thread->todo, struct binder_work, entry); |
2170 | else if (!list_empty(&proc->todo) && wait_for_proc_work) | 2170 | else if (!list_empty(&proc->todo) && wait_for_proc_work) |
2171 | w = list_first_entry(&proc->todo, struct binder_work, entry); | 2171 | w = list_first_entry(&proc->todo, struct binder_work, entry); |
2172 | else { | 2172 | else { |
2173 | if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */ | 2173 | if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */ |
2174 | goto retry; | 2174 | goto retry; |
2175 | break; | 2175 | break; |
2176 | } | 2176 | } |
2177 | 2177 | ||
2178 | if (end - ptr < sizeof(tr) + 4) | 2178 | if (end - ptr < sizeof(tr) + 4) |
2179 | break; | 2179 | break; |
2180 | 2180 | ||
2181 | switch (w->type) { | 2181 | switch (w->type) { |
2182 | case BINDER_WORK_TRANSACTION: { | 2182 | case BINDER_WORK_TRANSACTION: { |
2183 | t = container_of(w, struct binder_transaction, work); | 2183 | t = container_of(w, struct binder_transaction, work); |
2184 | } break; | 2184 | } break; |
2185 | case BINDER_WORK_TRANSACTION_COMPLETE: { | 2185 | case BINDER_WORK_TRANSACTION_COMPLETE: { |
2186 | cmd = BR_TRANSACTION_COMPLETE; | 2186 | cmd = BR_TRANSACTION_COMPLETE; |
2187 | if (put_user(cmd, (uint32_t __user *)ptr)) | 2187 | if (put_user(cmd, (uint32_t __user *)ptr)) |
2188 | return -EFAULT; | 2188 | return -EFAULT; |
2189 | ptr += sizeof(uint32_t); | 2189 | ptr += sizeof(uint32_t); |
2190 | 2190 | ||
2191 | binder_stat_br(proc, thread, cmd); | 2191 | binder_stat_br(proc, thread, cmd); |
2192 | binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, | 2192 | binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, |
2193 | "%d:%d BR_TRANSACTION_COMPLETE\n", | 2193 | "%d:%d BR_TRANSACTION_COMPLETE\n", |
2194 | proc->pid, thread->pid); | 2194 | proc->pid, thread->pid); |
2195 | 2195 | ||
2196 | list_del(&w->entry); | 2196 | list_del(&w->entry); |
2197 | kfree(w); | 2197 | kfree(w); |
2198 | binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); | 2198 | binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); |
2199 | } break; | 2199 | } break; |
2200 | case BINDER_WORK_NODE: { | 2200 | case BINDER_WORK_NODE: { |
2201 | struct binder_node *node = container_of(w, struct binder_node, work); | 2201 | struct binder_node *node = container_of(w, struct binder_node, work); |
2202 | uint32_t cmd = BR_NOOP; | 2202 | uint32_t cmd = BR_NOOP; |
2203 | const char *cmd_name; | 2203 | const char *cmd_name; |
2204 | int strong = node->internal_strong_refs || node->local_strong_refs; | 2204 | int strong = node->internal_strong_refs || node->local_strong_refs; |
2205 | int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong; | 2205 | int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong; |
2206 | if (weak && !node->has_weak_ref) { | 2206 | if (weak && !node->has_weak_ref) { |
2207 | cmd = BR_INCREFS; | 2207 | cmd = BR_INCREFS; |
2208 | cmd_name = "BR_INCREFS"; | 2208 | cmd_name = "BR_INCREFS"; |
2209 | node->has_weak_ref = 1; | 2209 | node->has_weak_ref = 1; |
2210 | node->pending_weak_ref = 1; | 2210 | node->pending_weak_ref = 1; |
2211 | node->local_weak_refs++; | 2211 | node->local_weak_refs++; |
2212 | } else if (strong && !node->has_strong_ref) { | 2212 | } else if (strong && !node->has_strong_ref) { |
2213 | cmd = BR_ACQUIRE; | 2213 | cmd = BR_ACQUIRE; |
2214 | cmd_name = "BR_ACQUIRE"; | 2214 | cmd_name = "BR_ACQUIRE"; |
2215 | node->has_strong_ref = 1; | 2215 | node->has_strong_ref = 1; |
2216 | node->pending_strong_ref = 1; | 2216 | node->pending_strong_ref = 1; |
2217 | node->local_strong_refs++; | 2217 | node->local_strong_refs++; |
2218 | } else if (!strong && node->has_strong_ref) { | 2218 | } else if (!strong && node->has_strong_ref) { |
2219 | cmd = BR_RELEASE; | 2219 | cmd = BR_RELEASE; |
2220 | cmd_name = "BR_RELEASE"; | 2220 | cmd_name = "BR_RELEASE"; |
2221 | node->has_strong_ref = 0; | 2221 | node->has_strong_ref = 0; |
2222 | } else if (!weak && node->has_weak_ref) { | 2222 | } else if (!weak && node->has_weak_ref) { |
2223 | cmd = BR_DECREFS; | 2223 | cmd = BR_DECREFS; |
2224 | cmd_name = "BR_DECREFS"; | 2224 | cmd_name = "BR_DECREFS"; |
2225 | node->has_weak_ref = 0; | 2225 | node->has_weak_ref = 0; |
2226 | } | 2226 | } |
2227 | if (cmd != BR_NOOP) { | 2227 | if (cmd != BR_NOOP) { |
2228 | if (put_user(cmd, (uint32_t __user *)ptr)) | 2228 | if (put_user(cmd, (uint32_t __user *)ptr)) |
2229 | return -EFAULT; | 2229 | return -EFAULT; |
2230 | ptr += sizeof(uint32_t); | 2230 | ptr += sizeof(uint32_t); |
2231 | if (put_user(node->ptr, (void * __user *)ptr)) | 2231 | if (put_user(node->ptr, (void * __user *)ptr)) |
2232 | return -EFAULT; | 2232 | return -EFAULT; |
2233 | ptr += sizeof(void *); | 2233 | ptr += sizeof(void *); |
2234 | if (put_user(node->cookie, (void * __user *)ptr)) | 2234 | if (put_user(node->cookie, (void * __user *)ptr)) |
2235 | return -EFAULT; | 2235 | return -EFAULT; |
2236 | ptr += sizeof(void *); | 2236 | ptr += sizeof(void *); |
2237 | 2237 | ||
2238 | binder_stat_br(proc, thread, cmd); | 2238 | binder_stat_br(proc, thread, cmd); |
2239 | binder_debug(BINDER_DEBUG_USER_REFS, | 2239 | binder_debug(BINDER_DEBUG_USER_REFS, |
2240 | "%d:%d %s %d u%p c%p\n", | 2240 | "%d:%d %s %d u%p c%p\n", |
2241 | proc->pid, thread->pid, cmd_name, node->debug_id, node->ptr, node->cookie); | 2241 | proc->pid, thread->pid, cmd_name, node->debug_id, node->ptr, node->cookie); |
2242 | } else { | 2242 | } else { |
2243 | list_del_init(&w->entry); | 2243 | list_del_init(&w->entry); |
2244 | if (!weak && !strong) { | 2244 | if (!weak && !strong) { |
2245 | binder_debug(BINDER_DEBUG_INTERNAL_REFS, | 2245 | binder_debug(BINDER_DEBUG_INTERNAL_REFS, |
2246 | "%d:%d node %d u%p c%p deleted\n", | 2246 | "%d:%d node %d u%p c%p deleted\n", |
2247 | proc->pid, thread->pid, node->debug_id, | 2247 | proc->pid, thread->pid, node->debug_id, |
2248 | node->ptr, node->cookie); | 2248 | node->ptr, node->cookie); |
2249 | rb_erase(&node->rb_node, &proc->nodes); | 2249 | rb_erase(&node->rb_node, &proc->nodes); |
2250 | kfree(node); | 2250 | kfree(node); |
2251 | binder_stats_deleted(BINDER_STAT_NODE); | 2251 | binder_stats_deleted(BINDER_STAT_NODE); |
2252 | } else { | 2252 | } else { |
2253 | binder_debug(BINDER_DEBUG_INTERNAL_REFS, | 2253 | binder_debug(BINDER_DEBUG_INTERNAL_REFS, |
2254 | "%d:%d node %d u%p c%p state unchanged\n", | 2254 | "%d:%d node %d u%p c%p state unchanged\n", |
2255 | proc->pid, thread->pid, node->debug_id, node->ptr, | 2255 | proc->pid, thread->pid, node->debug_id, node->ptr, |
2256 | node->cookie); | 2256 | node->cookie); |
2257 | } | 2257 | } |
2258 | } | 2258 | } |
2259 | } break; | 2259 | } break; |
2260 | case BINDER_WORK_DEAD_BINDER: | 2260 | case BINDER_WORK_DEAD_BINDER: |
2261 | case BINDER_WORK_DEAD_BINDER_AND_CLEAR: | 2261 | case BINDER_WORK_DEAD_BINDER_AND_CLEAR: |
2262 | case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { | 2262 | case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { |
2263 | struct binder_ref_death *death; | 2263 | struct binder_ref_death *death; |
2264 | uint32_t cmd; | 2264 | uint32_t cmd; |
2265 | 2265 | ||
2266 | death = container_of(w, struct binder_ref_death, work); | 2266 | death = container_of(w, struct binder_ref_death, work); |
2267 | if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) | 2267 | if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) |
2268 | cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; | 2268 | cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; |
2269 | else | 2269 | else |
2270 | cmd = BR_DEAD_BINDER; | 2270 | cmd = BR_DEAD_BINDER; |
2271 | if (put_user(cmd, (uint32_t __user *)ptr)) | 2271 | if (put_user(cmd, (uint32_t __user *)ptr)) |
2272 | return -EFAULT; | 2272 | return -EFAULT; |
2273 | ptr += sizeof(uint32_t); | 2273 | ptr += sizeof(uint32_t); |
2274 | if (put_user(death->cookie, (void * __user *)ptr)) | 2274 | if (put_user(death->cookie, (void * __user *)ptr)) |
2275 | return -EFAULT; | 2275 | return -EFAULT; |
2276 | ptr += sizeof(void *); | 2276 | ptr += sizeof(void *); |
2277 | binder_stat_br(proc, thread, cmd); | 2277 | binder_stat_br(proc, thread, cmd); |
2278 | binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, | 2278 | binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, |
2279 | "%d:%d %s %p\n", | 2279 | "%d:%d %s %p\n", |
2280 | proc->pid, thread->pid, | 2280 | proc->pid, thread->pid, |
2281 | cmd == BR_DEAD_BINDER ? | 2281 | cmd == BR_DEAD_BINDER ? |
2282 | "BR_DEAD_BINDER" : | 2282 | "BR_DEAD_BINDER" : |
2283 | "BR_CLEAR_DEATH_NOTIFICATION_DONE", | 2283 | "BR_CLEAR_DEATH_NOTIFICATION_DONE", |
2284 | death->cookie); | 2284 | death->cookie); |
2285 | 2285 | ||
2286 | if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { | 2286 | if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { |
2287 | list_del(&w->entry); | 2287 | list_del(&w->entry); |
2288 | kfree(death); | 2288 | kfree(death); |
2289 | binder_stats_deleted(BINDER_STAT_DEATH); | 2289 | binder_stats_deleted(BINDER_STAT_DEATH); |
2290 | } else | 2290 | } else |
2291 | list_move(&w->entry, &proc->delivered_death); | 2291 | list_move(&w->entry, &proc->delivered_death); |
2292 | if (cmd == BR_DEAD_BINDER) | 2292 | if (cmd == BR_DEAD_BINDER) |
2293 | goto done; /* DEAD_BINDER notifications can cause transactions */ | 2293 | goto done; /* DEAD_BINDER notifications can cause transactions */ |
2294 | } break; | 2294 | } break; |
2295 | } | 2295 | } |
2296 | 2296 | ||
2297 | if (!t) | 2297 | if (!t) |
2298 | continue; | 2298 | continue; |
2299 | 2299 | ||
2300 | BUG_ON(t->buffer == NULL); | 2300 | BUG_ON(t->buffer == NULL); |
2301 | if (t->buffer->target_node) { | 2301 | if (t->buffer->target_node) { |
2302 | struct binder_node *target_node = t->buffer->target_node; | 2302 | struct binder_node *target_node = t->buffer->target_node; |
2303 | tr.target.ptr = target_node->ptr; | 2303 | tr.target.ptr = target_node->ptr; |
2304 | tr.cookie = target_node->cookie; | 2304 | tr.cookie = target_node->cookie; |
2305 | t->saved_priority = task_nice(current); | 2305 | t->saved_priority = task_nice(current); |
2306 | if (t->priority < target_node->min_priority && | 2306 | if (t->priority < target_node->min_priority && |
2307 | !(t->flags & TF_ONE_WAY)) | 2307 | !(t->flags & TF_ONE_WAY)) |
2308 | binder_set_nice(t->priority); | 2308 | binder_set_nice(t->priority); |
2309 | else if (!(t->flags & TF_ONE_WAY) || | 2309 | else if (!(t->flags & TF_ONE_WAY) || |
2310 | t->saved_priority > target_node->min_priority) | 2310 | t->saved_priority > target_node->min_priority) |
2311 | binder_set_nice(target_node->min_priority); | 2311 | binder_set_nice(target_node->min_priority); |
2312 | cmd = BR_TRANSACTION; | 2312 | cmd = BR_TRANSACTION; |
2313 | } else { | 2313 | } else { |
2314 | tr.target.ptr = NULL; | 2314 | tr.target.ptr = NULL; |
2315 | tr.cookie = NULL; | 2315 | tr.cookie = NULL; |
2316 | cmd = BR_REPLY; | 2316 | cmd = BR_REPLY; |
2317 | } | 2317 | } |
2318 | tr.code = t->code; | 2318 | tr.code = t->code; |
2319 | tr.flags = t->flags; | 2319 | tr.flags = t->flags; |
2320 | tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid); | 2320 | tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid); |
2321 | 2321 | ||
2322 | if (t->from) { | 2322 | if (t->from) { |
2323 | struct task_struct *sender = t->from->proc->tsk; | 2323 | struct task_struct *sender = t->from->proc->tsk; |
2324 | tr.sender_pid = task_tgid_nr_ns(sender, | 2324 | tr.sender_pid = task_tgid_nr_ns(sender, |
2325 | task_active_pid_ns(current)); | 2325 | task_active_pid_ns(current)); |
2326 | } else { | 2326 | } else { |
2327 | tr.sender_pid = 0; | 2327 | tr.sender_pid = 0; |
2328 | } | 2328 | } |
2329 | 2329 | ||
2330 | tr.data_size = t->buffer->data_size; | 2330 | tr.data_size = t->buffer->data_size; |
2331 | tr.offsets_size = t->buffer->offsets_size; | 2331 | tr.offsets_size = t->buffer->offsets_size; |
2332 | tr.data.ptr.buffer = (void *)t->buffer->data + | 2332 | tr.data.ptr.buffer = (void *)t->buffer->data + |
2333 | proc->user_buffer_offset; | 2333 | proc->user_buffer_offset; |
2334 | tr.data.ptr.offsets = tr.data.ptr.buffer + | 2334 | tr.data.ptr.offsets = tr.data.ptr.buffer + |
2335 | ALIGN(t->buffer->data_size, | 2335 | ALIGN(t->buffer->data_size, |
2336 | sizeof(void *)); | 2336 | sizeof(void *)); |
2337 | 2337 | ||
2338 | if (put_user(cmd, (uint32_t __user *)ptr)) | 2338 | if (put_user(cmd, (uint32_t __user *)ptr)) |
2339 | return -EFAULT; | 2339 | return -EFAULT; |
2340 | ptr += sizeof(uint32_t); | 2340 | ptr += sizeof(uint32_t); |
2341 | if (copy_to_user(ptr, &tr, sizeof(tr))) | 2341 | if (copy_to_user(ptr, &tr, sizeof(tr))) |
2342 | return -EFAULT; | 2342 | return -EFAULT; |
2343 | ptr += sizeof(tr); | 2343 | ptr += sizeof(tr); |
2344 | 2344 | ||
2345 | trace_binder_transaction_received(t); | 2345 | trace_binder_transaction_received(t); |
2346 | binder_stat_br(proc, thread, cmd); | 2346 | binder_stat_br(proc, thread, cmd); |
2347 | binder_debug(BINDER_DEBUG_TRANSACTION, | 2347 | binder_debug(BINDER_DEBUG_TRANSACTION, |
2348 | "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %p-%p\n", | 2348 | "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %p-%p\n", |
2349 | proc->pid, thread->pid, | 2349 | proc->pid, thread->pid, |
2350 | (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : | 2350 | (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : |
2351 | "BR_REPLY", | 2351 | "BR_REPLY", |
2352 | t->debug_id, t->from ? t->from->proc->pid : 0, | 2352 | t->debug_id, t->from ? t->from->proc->pid : 0, |
2353 | t->from ? t->from->pid : 0, cmd, | 2353 | t->from ? t->from->pid : 0, cmd, |
2354 | t->buffer->data_size, t->buffer->offsets_size, | 2354 | t->buffer->data_size, t->buffer->offsets_size, |
2355 | tr.data.ptr.buffer, tr.data.ptr.offsets); | 2355 | tr.data.ptr.buffer, tr.data.ptr.offsets); |
2356 | 2356 | ||
2357 | list_del(&t->work.entry); | 2357 | list_del(&t->work.entry); |
2358 | t->buffer->allow_user_free = 1; | 2358 | t->buffer->allow_user_free = 1; |
2359 | if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) { | 2359 | if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) { |
2360 | t->to_parent = thread->transaction_stack; | 2360 | t->to_parent = thread->transaction_stack; |
2361 | t->to_thread = thread; | 2361 | t->to_thread = thread; |
2362 | thread->transaction_stack = t; | 2362 | thread->transaction_stack = t; |
2363 | } else { | 2363 | } else { |
2364 | t->buffer->transaction = NULL; | 2364 | t->buffer->transaction = NULL; |
2365 | kfree(t); | 2365 | kfree(t); |
2366 | binder_stats_deleted(BINDER_STAT_TRANSACTION); | 2366 | binder_stats_deleted(BINDER_STAT_TRANSACTION); |
2367 | } | 2367 | } |
2368 | break; | 2368 | break; |
2369 | } | 2369 | } |
2370 | 2370 | ||
2371 | done: | 2371 | done: |
2372 | 2372 | ||
2373 | *consumed = ptr - buffer; | 2373 | *consumed = ptr - buffer; |
2374 | if (proc->requested_threads + proc->ready_threads == 0 && | 2374 | if (proc->requested_threads + proc->ready_threads == 0 && |
2375 | proc->requested_threads_started < proc->max_threads && | 2375 | proc->requested_threads_started < proc->max_threads && |
2376 | (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | | 2376 | (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | |
2377 | BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ | 2377 | BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ |
2378 | /*spawn a new thread if we leave this out */) { | 2378 | /*spawn a new thread if we leave this out */) { |
2379 | proc->requested_threads++; | 2379 | proc->requested_threads++; |
2380 | binder_debug(BINDER_DEBUG_THREADS, | 2380 | binder_debug(BINDER_DEBUG_THREADS, |
2381 | "%d:%d BR_SPAWN_LOOPER\n", | 2381 | "%d:%d BR_SPAWN_LOOPER\n", |
2382 | proc->pid, thread->pid); | 2382 | proc->pid, thread->pid); |
2383 | if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) | 2383 | if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) |
2384 | return -EFAULT; | 2384 | return -EFAULT; |
2385 | binder_stat_br(proc, thread, BR_SPAWN_LOOPER); | 2385 | binder_stat_br(proc, thread, BR_SPAWN_LOOPER); |
2386 | } | 2386 | } |
2387 | return 0; | 2387 | return 0; |
2388 | } | 2388 | } |
2389 | 2389 | ||
2390 | static void binder_release_work(struct list_head *list) | 2390 | static void binder_release_work(struct list_head *list) |
2391 | { | 2391 | { |
2392 | struct binder_work *w; | 2392 | struct binder_work *w; |
2393 | while (!list_empty(list)) { | 2393 | while (!list_empty(list)) { |
2394 | w = list_first_entry(list, struct binder_work, entry); | 2394 | w = list_first_entry(list, struct binder_work, entry); |
2395 | list_del_init(&w->entry); | 2395 | list_del_init(&w->entry); |
2396 | switch (w->type) { | 2396 | switch (w->type) { |
2397 | case BINDER_WORK_TRANSACTION: { | 2397 | case BINDER_WORK_TRANSACTION: { |
2398 | struct binder_transaction *t; | 2398 | struct binder_transaction *t; |
2399 | 2399 | ||
2400 | t = container_of(w, struct binder_transaction, work); | 2400 | t = container_of(w, struct binder_transaction, work); |
2401 | if (t->buffer->target_node && | 2401 | if (t->buffer->target_node && |
2402 | !(t->flags & TF_ONE_WAY)) { | 2402 | !(t->flags & TF_ONE_WAY)) { |
2403 | binder_send_failed_reply(t, BR_DEAD_REPLY); | 2403 | binder_send_failed_reply(t, BR_DEAD_REPLY); |
2404 | } else { | 2404 | } else { |
2405 | binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, | 2405 | binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, |
2406 | "undelivered transaction %d\n", | 2406 | "undelivered transaction %d\n", |
2407 | t->debug_id); | 2407 | t->debug_id); |
2408 | t->buffer->transaction = NULL; | 2408 | t->buffer->transaction = NULL; |
2409 | kfree(t); | 2409 | kfree(t); |
2410 | binder_stats_deleted(BINDER_STAT_TRANSACTION); | 2410 | binder_stats_deleted(BINDER_STAT_TRANSACTION); |
2411 | } | 2411 | } |
2412 | } break; | 2412 | } break; |
2413 | case BINDER_WORK_TRANSACTION_COMPLETE: { | 2413 | case BINDER_WORK_TRANSACTION_COMPLETE: { |
2414 | binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, | 2414 | binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, |
2415 | "undelivered TRANSACTION_COMPLETE\n"); | 2415 | "undelivered TRANSACTION_COMPLETE\n"); |
2416 | kfree(w); | 2416 | kfree(w); |
2417 | binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); | 2417 | binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); |
2418 | } break; | 2418 | } break; |
2419 | case BINDER_WORK_DEAD_BINDER_AND_CLEAR: | 2419 | case BINDER_WORK_DEAD_BINDER_AND_CLEAR: |
2420 | case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { | 2420 | case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { |
2421 | struct binder_ref_death *death; | 2421 | struct binder_ref_death *death; |
2422 | 2422 | ||
2423 | death = container_of(w, struct binder_ref_death, work); | 2423 | death = container_of(w, struct binder_ref_death, work); |
2424 | binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, | 2424 | binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, |
2425 | "undelivered death notification, %p\n", | 2425 | "undelivered death notification, %p\n", |
2426 | death->cookie); | 2426 | death->cookie); |
2427 | kfree(death); | 2427 | kfree(death); |
2428 | binder_stats_deleted(BINDER_STAT_DEATH); | 2428 | binder_stats_deleted(BINDER_STAT_DEATH); |
2429 | } break; | 2429 | } break; |
2430 | default: | 2430 | default: |
2431 | pr_err("unexpected work type, %d, not freed\n", | 2431 | pr_err("unexpected work type, %d, not freed\n", |
2432 | w->type); | 2432 | w->type); |
2433 | break; | 2433 | break; |
2434 | } | 2434 | } |
2435 | } | 2435 | } |
2436 | 2436 | ||
2437 | } | 2437 | } |
2438 | 2438 | ||
2439 | static struct binder_thread *binder_get_thread(struct binder_proc *proc) | 2439 | static struct binder_thread *binder_get_thread(struct binder_proc *proc) |
2440 | { | 2440 | { |
2441 | struct binder_thread *thread = NULL; | 2441 | struct binder_thread *thread = NULL; |
2442 | struct rb_node *parent = NULL; | 2442 | struct rb_node *parent = NULL; |
2443 | struct rb_node **p = &proc->threads.rb_node; | 2443 | struct rb_node **p = &proc->threads.rb_node; |
2444 | 2444 | ||
2445 | while (*p) { | 2445 | while (*p) { |
2446 | parent = *p; | 2446 | parent = *p; |
2447 | thread = rb_entry(parent, struct binder_thread, rb_node); | 2447 | thread = rb_entry(parent, struct binder_thread, rb_node); |
2448 | 2448 | ||
2449 | if (current->pid < thread->pid) | 2449 | if (current->pid < thread->pid) |
2450 | p = &(*p)->rb_left; | 2450 | p = &(*p)->rb_left; |
2451 | else if (current->pid > thread->pid) | 2451 | else if (current->pid > thread->pid) |
2452 | p = &(*p)->rb_right; | 2452 | p = &(*p)->rb_right; |
2453 | else | 2453 | else |
2454 | break; | 2454 | break; |
2455 | } | 2455 | } |
2456 | if (*p == NULL) { | 2456 | if (*p == NULL) { |
2457 | thread = kzalloc(sizeof(*thread), GFP_KERNEL); | 2457 | thread = kzalloc(sizeof(*thread), GFP_KERNEL); |
2458 | if (thread == NULL) | 2458 | if (thread == NULL) |
2459 | return NULL; | 2459 | return NULL; |
2460 | binder_stats_created(BINDER_STAT_THREAD); | 2460 | binder_stats_created(BINDER_STAT_THREAD); |
2461 | thread->proc = proc; | 2461 | thread->proc = proc; |
2462 | thread->pid = current->pid; | 2462 | thread->pid = current->pid; |
2463 | init_waitqueue_head(&thread->wait); | 2463 | init_waitqueue_head(&thread->wait); |
2464 | INIT_LIST_HEAD(&thread->todo); | 2464 | INIT_LIST_HEAD(&thread->todo); |
2465 | rb_link_node(&thread->rb_node, parent, p); | 2465 | rb_link_node(&thread->rb_node, parent, p); |
2466 | rb_insert_color(&thread->rb_node, &proc->threads); | 2466 | rb_insert_color(&thread->rb_node, &proc->threads); |
2467 | thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; | 2467 | thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; |
2468 | thread->return_error = BR_OK; | 2468 | thread->return_error = BR_OK; |
2469 | thread->return_error2 = BR_OK; | 2469 | thread->return_error2 = BR_OK; |
2470 | } | 2470 | } |
2471 | return thread; | 2471 | return thread; |
2472 | } | 2472 | } |
2473 | 2473 | ||
2474 | static int binder_free_thread(struct binder_proc *proc, | 2474 | static int binder_free_thread(struct binder_proc *proc, |
2475 | struct binder_thread *thread) | 2475 | struct binder_thread *thread) |
2476 | { | 2476 | { |
2477 | struct binder_transaction *t; | 2477 | struct binder_transaction *t; |
2478 | struct binder_transaction *send_reply = NULL; | 2478 | struct binder_transaction *send_reply = NULL; |
2479 | int active_transactions = 0; | 2479 | int active_transactions = 0; |
2480 | 2480 | ||
2481 | rb_erase(&thread->rb_node, &proc->threads); | 2481 | rb_erase(&thread->rb_node, &proc->threads); |
2482 | t = thread->transaction_stack; | 2482 | t = thread->transaction_stack; |
2483 | if (t && t->to_thread == thread) | 2483 | if (t && t->to_thread == thread) |
2484 | send_reply = t; | 2484 | send_reply = t; |
2485 | while (t) { | 2485 | while (t) { |
2486 | active_transactions++; | 2486 | active_transactions++; |
2487 | binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, | 2487 | binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, |
2488 | "release %d:%d transaction %d %s, still active\n", | 2488 | "release %d:%d transaction %d %s, still active\n", |
2489 | proc->pid, thread->pid, | 2489 | proc->pid, thread->pid, |
2490 | t->debug_id, | 2490 | t->debug_id, |
2491 | (t->to_thread == thread) ? "in" : "out"); | 2491 | (t->to_thread == thread) ? "in" : "out"); |
2492 | 2492 | ||
2493 | if (t->to_thread == thread) { | 2493 | if (t->to_thread == thread) { |
2494 | t->to_proc = NULL; | 2494 | t->to_proc = NULL; |
2495 | t->to_thread = NULL; | 2495 | t->to_thread = NULL; |
2496 | if (t->buffer) { | 2496 | if (t->buffer) { |
2497 | t->buffer->transaction = NULL; | 2497 | t->buffer->transaction = NULL; |
2498 | t->buffer = NULL; | 2498 | t->buffer = NULL; |
2499 | } | 2499 | } |
2500 | t = t->to_parent; | 2500 | t = t->to_parent; |
2501 | } else if (t->from == thread) { | 2501 | } else if (t->from == thread) { |
2502 | t->from = NULL; | 2502 | t->from = NULL; |
2503 | t = t->from_parent; | 2503 | t = t->from_parent; |
2504 | } else | 2504 | } else |
2505 | BUG(); | 2505 | BUG(); |
2506 | } | 2506 | } |
2507 | if (send_reply) | 2507 | if (send_reply) |
2508 | binder_send_failed_reply(send_reply, BR_DEAD_REPLY); | 2508 | binder_send_failed_reply(send_reply, BR_DEAD_REPLY); |
2509 | binder_release_work(&thread->todo); | 2509 | binder_release_work(&thread->todo); |
2510 | kfree(thread); | 2510 | kfree(thread); |
2511 | binder_stats_deleted(BINDER_STAT_THREAD); | 2511 | binder_stats_deleted(BINDER_STAT_THREAD); |
2512 | return active_transactions; | 2512 | return active_transactions; |
2513 | } | 2513 | } |
2514 | 2514 | ||
2515 | static unsigned int binder_poll(struct file *filp, | 2515 | static unsigned int binder_poll(struct file *filp, |
2516 | struct poll_table_struct *wait) | 2516 | struct poll_table_struct *wait) |
2517 | { | 2517 | { |
2518 | struct binder_proc *proc = filp->private_data; | 2518 | struct binder_proc *proc = filp->private_data; |
2519 | struct binder_thread *thread = NULL; | 2519 | struct binder_thread *thread = NULL; |
2520 | int wait_for_proc_work; | 2520 | int wait_for_proc_work; |
2521 | 2521 | ||
2522 | binder_lock(__func__); | 2522 | binder_lock(__func__); |
2523 | 2523 | ||
2524 | thread = binder_get_thread(proc); | 2524 | thread = binder_get_thread(proc); |
2525 | 2525 | ||
2526 | wait_for_proc_work = thread->transaction_stack == NULL && | 2526 | wait_for_proc_work = thread->transaction_stack == NULL && |
2527 | list_empty(&thread->todo) && thread->return_error == BR_OK; | 2527 | list_empty(&thread->todo) && thread->return_error == BR_OK; |
2528 | 2528 | ||
2529 | binder_unlock(__func__); | 2529 | binder_unlock(__func__); |
2530 | 2530 | ||
2531 | if (wait_for_proc_work) { | 2531 | if (wait_for_proc_work) { |
2532 | if (binder_has_proc_work(proc, thread)) | 2532 | if (binder_has_proc_work(proc, thread)) |
2533 | return POLLIN; | 2533 | return POLLIN; |
2534 | poll_wait(filp, &proc->wait, wait); | 2534 | poll_wait(filp, &proc->wait, wait); |
2535 | if (binder_has_proc_work(proc, thread)) | 2535 | if (binder_has_proc_work(proc, thread)) |
2536 | return POLLIN; | 2536 | return POLLIN; |
2537 | } else { | 2537 | } else { |
2538 | if (binder_has_thread_work(thread)) | 2538 | if (binder_has_thread_work(thread)) |
2539 | return POLLIN; | 2539 | return POLLIN; |
2540 | poll_wait(filp, &thread->wait, wait); | 2540 | poll_wait(filp, &thread->wait, wait); |
2541 | if (binder_has_thread_work(thread)) | 2541 | if (binder_has_thread_work(thread)) |
2542 | return POLLIN; | 2542 | return POLLIN; |
2543 | } | 2543 | } |
2544 | return 0; | 2544 | return 0; |
2545 | } | 2545 | } |
2546 | 2546 | ||
2547 | static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | 2547 | static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) |
2548 | { | 2548 | { |
2549 | int ret; | 2549 | int ret; |
2550 | struct binder_proc *proc = filp->private_data; | 2550 | struct binder_proc *proc = filp->private_data; |
2551 | struct binder_thread *thread; | 2551 | struct binder_thread *thread; |
2552 | unsigned int size = _IOC_SIZE(cmd); | 2552 | unsigned int size = _IOC_SIZE(cmd); |
2553 | void __user *ubuf = (void __user *)arg; | 2553 | void __user *ubuf = (void __user *)arg; |
2554 | 2554 | ||
2555 | /*pr_info("binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/ | 2555 | /*pr_info("binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/ |
2556 | 2556 | ||
2557 | trace_binder_ioctl(cmd, arg); | 2557 | trace_binder_ioctl(cmd, arg); |
2558 | 2558 | ||
2559 | ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); | 2559 | ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); |
2560 | if (ret) | 2560 | if (ret) |
2561 | goto err_unlocked; | 2561 | goto err_unlocked; |
2562 | 2562 | ||
2563 | binder_lock(__func__); | 2563 | binder_lock(__func__); |
2564 | thread = binder_get_thread(proc); | 2564 | thread = binder_get_thread(proc); |
2565 | if (thread == NULL) { | 2565 | if (thread == NULL) { |
2566 | ret = -ENOMEM; | 2566 | ret = -ENOMEM; |
2567 | goto err; | 2567 | goto err; |
2568 | } | 2568 | } |
2569 | 2569 | ||
2570 | switch (cmd) { | 2570 | switch (cmd) { |
2571 | case BINDER_WRITE_READ: { | 2571 | case BINDER_WRITE_READ: { |
2572 | struct binder_write_read bwr; | 2572 | struct binder_write_read bwr; |
2573 | if (size != sizeof(struct binder_write_read)) { | 2573 | if (size != sizeof(struct binder_write_read)) { |
2574 | ret = -EINVAL; | 2574 | ret = -EINVAL; |
2575 | goto err; | 2575 | goto err; |
2576 | } | 2576 | } |
2577 | if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { | 2577 | if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { |
2578 | ret = -EFAULT; | 2578 | ret = -EFAULT; |
2579 | goto err; | 2579 | goto err; |
2580 | } | 2580 | } |
2581 | binder_debug(BINDER_DEBUG_READ_WRITE, | 2581 | binder_debug(BINDER_DEBUG_READ_WRITE, |
2582 | "%d:%d write %zd at %08lx, read %zd at %08lx\n", | 2582 | "%d:%d write %zd at %016lx, read %zd at %016lx\n", |
2583 | proc->pid, thread->pid, bwr.write_size, | 2583 | proc->pid, thread->pid, bwr.write_size, |
2584 | bwr.write_buffer, bwr.read_size, bwr.read_buffer); | 2584 | bwr.write_buffer, bwr.read_size, bwr.read_buffer); |
2585 | 2585 | ||
2586 | if (bwr.write_size > 0) { | 2586 | if (bwr.write_size > 0) { |
2587 | ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed); | 2587 | ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed); |
2588 | trace_binder_write_done(ret); | 2588 | trace_binder_write_done(ret); |
2589 | if (ret < 0) { | 2589 | if (ret < 0) { |
2590 | bwr.read_consumed = 0; | 2590 | bwr.read_consumed = 0; |
2591 | if (copy_to_user(ubuf, &bwr, sizeof(bwr))) | 2591 | if (copy_to_user(ubuf, &bwr, sizeof(bwr))) |
2592 | ret = -EFAULT; | 2592 | ret = -EFAULT; |
2593 | goto err; | 2593 | goto err; |
2594 | } | 2594 | } |
2595 | } | 2595 | } |
2596 | if (bwr.read_size > 0) { | 2596 | if (bwr.read_size > 0) { |
2597 | ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK); | 2597 | ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK); |
2598 | trace_binder_read_done(ret); | 2598 | trace_binder_read_done(ret); |
2599 | if (!list_empty(&proc->todo)) | 2599 | if (!list_empty(&proc->todo)) |
2600 | wake_up_interruptible(&proc->wait); | 2600 | wake_up_interruptible(&proc->wait); |
2601 | if (ret < 0) { | 2601 | if (ret < 0) { |
2602 | if (copy_to_user(ubuf, &bwr, sizeof(bwr))) | 2602 | if (copy_to_user(ubuf, &bwr, sizeof(bwr))) |
2603 | ret = -EFAULT; | 2603 | ret = -EFAULT; |
2604 | goto err; | 2604 | goto err; |
2605 | } | 2605 | } |
2606 | } | 2606 | } |
2607 | binder_debug(BINDER_DEBUG_READ_WRITE, | 2607 | binder_debug(BINDER_DEBUG_READ_WRITE, |
2608 | "%d:%d wrote %zd of %zd, read return %zd of %zd\n", | 2608 | "%d:%d wrote %zd of %zd, read return %zd of %zd\n", |
2609 | proc->pid, thread->pid, bwr.write_consumed, bwr.write_size, | 2609 | proc->pid, thread->pid, bwr.write_consumed, bwr.write_size, |
2610 | bwr.read_consumed, bwr.read_size); | 2610 | bwr.read_consumed, bwr.read_size); |
2611 | if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { | 2611 | if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { |
2612 | ret = -EFAULT; | 2612 | ret = -EFAULT; |
2613 | goto err; | 2613 | goto err; |
2614 | } | 2614 | } |
2615 | break; | 2615 | break; |
2616 | } | 2616 | } |
2617 | case BINDER_SET_MAX_THREADS: | 2617 | case BINDER_SET_MAX_THREADS: |
2618 | if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) { | 2618 | if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) { |
2619 | ret = -EINVAL; | 2619 | ret = -EINVAL; |
2620 | goto err; | 2620 | goto err; |
2621 | } | 2621 | } |
2622 | break; | 2622 | break; |
2623 | case BINDER_SET_CONTEXT_MGR: | 2623 | case BINDER_SET_CONTEXT_MGR: |
2624 | if (binder_context_mgr_node != NULL) { | 2624 | if (binder_context_mgr_node != NULL) { |
2625 | pr_err("BINDER_SET_CONTEXT_MGR already set\n"); | 2625 | pr_err("BINDER_SET_CONTEXT_MGR already set\n"); |
2626 | ret = -EBUSY; | 2626 | ret = -EBUSY; |
2627 | goto err; | 2627 | goto err; |
2628 | } | 2628 | } |
2629 | if (uid_valid(binder_context_mgr_uid)) { | 2629 | if (uid_valid(binder_context_mgr_uid)) { |
2630 | if (!uid_eq(binder_context_mgr_uid, current->cred->euid)) { | 2630 | if (!uid_eq(binder_context_mgr_uid, current->cred->euid)) { |
2631 | pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n", | 2631 | pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n", |
2632 | from_kuid(&init_user_ns, current->cred->euid), | 2632 | from_kuid(&init_user_ns, current->cred->euid), |
2633 | from_kuid(&init_user_ns, binder_context_mgr_uid)); | 2633 | from_kuid(&init_user_ns, binder_context_mgr_uid)); |
2634 | ret = -EPERM; | 2634 | ret = -EPERM; |
2635 | goto err; | 2635 | goto err; |
2636 | } | 2636 | } |
2637 | } else | 2637 | } else |
2638 | binder_context_mgr_uid = current->cred->euid; | 2638 | binder_context_mgr_uid = current->cred->euid; |
2639 | binder_context_mgr_node = binder_new_node(proc, NULL, NULL); | 2639 | binder_context_mgr_node = binder_new_node(proc, NULL, NULL); |
2640 | if (binder_context_mgr_node == NULL) { | 2640 | if (binder_context_mgr_node == NULL) { |
2641 | ret = -ENOMEM; | 2641 | ret = -ENOMEM; |
2642 | goto err; | 2642 | goto err; |
2643 | } | 2643 | } |
2644 | binder_context_mgr_node->local_weak_refs++; | 2644 | binder_context_mgr_node->local_weak_refs++; |
2645 | binder_context_mgr_node->local_strong_refs++; | 2645 | binder_context_mgr_node->local_strong_refs++; |
2646 | binder_context_mgr_node->has_strong_ref = 1; | 2646 | binder_context_mgr_node->has_strong_ref = 1; |
2647 | binder_context_mgr_node->has_weak_ref = 1; | 2647 | binder_context_mgr_node->has_weak_ref = 1; |
2648 | break; | 2648 | break; |
2649 | case BINDER_THREAD_EXIT: | 2649 | case BINDER_THREAD_EXIT: |
2650 | binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n", | 2650 | binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n", |
2651 | proc->pid, thread->pid); | 2651 | proc->pid, thread->pid); |
2652 | binder_free_thread(proc, thread); | 2652 | binder_free_thread(proc, thread); |
2653 | thread = NULL; | 2653 | thread = NULL; |
2654 | break; | 2654 | break; |
2655 | case BINDER_VERSION: | 2655 | case BINDER_VERSION: |
2656 | if (size != sizeof(struct binder_version)) { | 2656 | if (size != sizeof(struct binder_version)) { |
2657 | ret = -EINVAL; | 2657 | ret = -EINVAL; |
2658 | goto err; | 2658 | goto err; |
2659 | } | 2659 | } |
2660 | if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &((struct binder_version *)ubuf)->protocol_version)) { | 2660 | if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &((struct binder_version *)ubuf)->protocol_version)) { |
2661 | ret = -EINVAL; | 2661 | ret = -EINVAL; |
2662 | goto err; | 2662 | goto err; |
2663 | } | 2663 | } |
2664 | break; | 2664 | break; |
2665 | default: | 2665 | default: |
2666 | ret = -EINVAL; | 2666 | ret = -EINVAL; |
2667 | goto err; | 2667 | goto err; |
2668 | } | 2668 | } |
2669 | ret = 0; | 2669 | ret = 0; |
2670 | err: | 2670 | err: |
2671 | if (thread) | 2671 | if (thread) |
2672 | thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN; | 2672 | thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN; |
2673 | binder_unlock(__func__); | 2673 | binder_unlock(__func__); |
2674 | wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); | 2674 | wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); |
2675 | if (ret && ret != -ERESTARTSYS) | 2675 | if (ret && ret != -ERESTARTSYS) |
2676 | pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); | 2676 | pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); |
2677 | err_unlocked: | 2677 | err_unlocked: |
2678 | trace_binder_ioctl_done(ret); | 2678 | trace_binder_ioctl_done(ret); |
2679 | return ret; | 2679 | return ret; |
2680 | } | 2680 | } |
2681 | 2681 | ||
2682 | static void binder_vma_open(struct vm_area_struct *vma) | 2682 | static void binder_vma_open(struct vm_area_struct *vma) |
2683 | { | 2683 | { |
2684 | struct binder_proc *proc = vma->vm_private_data; | 2684 | struct binder_proc *proc = vma->vm_private_data; |
2685 | binder_debug(BINDER_DEBUG_OPEN_CLOSE, | 2685 | binder_debug(BINDER_DEBUG_OPEN_CLOSE, |
2686 | "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", | 2686 | "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", |
2687 | proc->pid, vma->vm_start, vma->vm_end, | 2687 | proc->pid, vma->vm_start, vma->vm_end, |
2688 | (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, | 2688 | (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, |
2689 | (unsigned long)pgprot_val(vma->vm_page_prot)); | 2689 | (unsigned long)pgprot_val(vma->vm_page_prot)); |
2690 | } | 2690 | } |
2691 | 2691 | ||
2692 | static void binder_vma_close(struct vm_area_struct *vma) | 2692 | static void binder_vma_close(struct vm_area_struct *vma) |
2693 | { | 2693 | { |
2694 | struct binder_proc *proc = vma->vm_private_data; | 2694 | struct binder_proc *proc = vma->vm_private_data; |
2695 | binder_debug(BINDER_DEBUG_OPEN_CLOSE, | 2695 | binder_debug(BINDER_DEBUG_OPEN_CLOSE, |
2696 | "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", | 2696 | "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", |
2697 | proc->pid, vma->vm_start, vma->vm_end, | 2697 | proc->pid, vma->vm_start, vma->vm_end, |
2698 | (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, | 2698 | (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, |
2699 | (unsigned long)pgprot_val(vma->vm_page_prot)); | 2699 | (unsigned long)pgprot_val(vma->vm_page_prot)); |
2700 | proc->vma = NULL; | 2700 | proc->vma = NULL; |
2701 | proc->vma_vm_mm = NULL; | 2701 | proc->vma_vm_mm = NULL; |
2702 | binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); | 2702 | binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); |
2703 | } | 2703 | } |
2704 | 2704 | ||
2705 | static struct vm_operations_struct binder_vm_ops = { | 2705 | static struct vm_operations_struct binder_vm_ops = { |
2706 | .open = binder_vma_open, | 2706 | .open = binder_vma_open, |
2707 | .close = binder_vma_close, | 2707 | .close = binder_vma_close, |
2708 | }; | 2708 | }; |
2709 | 2709 | ||
2710 | static int binder_mmap(struct file *filp, struct vm_area_struct *vma) | 2710 | static int binder_mmap(struct file *filp, struct vm_area_struct *vma) |
2711 | { | 2711 | { |
2712 | int ret; | 2712 | int ret; |
2713 | struct vm_struct *area; | 2713 | struct vm_struct *area; |
2714 | struct binder_proc *proc = filp->private_data; | 2714 | struct binder_proc *proc = filp->private_data; |
2715 | const char *failure_string; | 2715 | const char *failure_string; |
2716 | struct binder_buffer *buffer; | 2716 | struct binder_buffer *buffer; |
2717 | 2717 | ||
2718 | if (proc->tsk != current) | 2718 | if (proc->tsk != current) |
2719 | return -EINVAL; | 2719 | return -EINVAL; |
2720 | 2720 | ||
2721 | if ((vma->vm_end - vma->vm_start) > SZ_4M) | 2721 | if ((vma->vm_end - vma->vm_start) > SZ_4M) |
2722 | vma->vm_end = vma->vm_start + SZ_4M; | 2722 | vma->vm_end = vma->vm_start + SZ_4M; |
2723 | 2723 | ||
2724 | binder_debug(BINDER_DEBUG_OPEN_CLOSE, | 2724 | binder_debug(BINDER_DEBUG_OPEN_CLOSE, |
2725 | "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", | 2725 | "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", |
2726 | proc->pid, vma->vm_start, vma->vm_end, | 2726 | proc->pid, vma->vm_start, vma->vm_end, |
2727 | (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, | 2727 | (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, |
2728 | (unsigned long)pgprot_val(vma->vm_page_prot)); | 2728 | (unsigned long)pgprot_val(vma->vm_page_prot)); |
2729 | 2729 | ||
2730 | if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { | 2730 | if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { |
2731 | ret = -EPERM; | 2731 | ret = -EPERM; |
2732 | failure_string = "bad vm_flags"; | 2732 | failure_string = "bad vm_flags"; |
2733 | goto err_bad_arg; | 2733 | goto err_bad_arg; |
2734 | } | 2734 | } |
2735 | vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; | 2735 | vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; |
2736 | 2736 | ||
2737 | mutex_lock(&binder_mmap_lock); | 2737 | mutex_lock(&binder_mmap_lock); |
2738 | if (proc->buffer) { | 2738 | if (proc->buffer) { |
2739 | ret = -EBUSY; | 2739 | ret = -EBUSY; |
2740 | failure_string = "already mapped"; | 2740 | failure_string = "already mapped"; |
2741 | goto err_already_mapped; | 2741 | goto err_already_mapped; |
2742 | } | 2742 | } |
2743 | 2743 | ||
2744 | area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); | 2744 | area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); |
2745 | if (area == NULL) { | 2745 | if (area == NULL) { |
2746 | ret = -ENOMEM; | 2746 | ret = -ENOMEM; |
2747 | failure_string = "get_vm_area"; | 2747 | failure_string = "get_vm_area"; |
2748 | goto err_get_vm_area_failed; | 2748 | goto err_get_vm_area_failed; |
2749 | } | 2749 | } |
2750 | proc->buffer = area->addr; | 2750 | proc->buffer = area->addr; |
2751 | proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer; | 2751 | proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer; |
2752 | mutex_unlock(&binder_mmap_lock); | 2752 | mutex_unlock(&binder_mmap_lock); |
2753 | 2753 | ||
2754 | #ifdef CONFIG_CPU_CACHE_VIPT | 2754 | #ifdef CONFIG_CPU_CACHE_VIPT |
2755 | if (cache_is_vipt_aliasing()) { | 2755 | if (cache_is_vipt_aliasing()) { |
2756 | while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) { | 2756 | while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) { |
2757 | pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer); | 2757 | pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer); |
2758 | vma->vm_start += PAGE_SIZE; | 2758 | vma->vm_start += PAGE_SIZE; |
2759 | } | 2759 | } |
2760 | } | 2760 | } |
2761 | #endif | 2761 | #endif |
2762 | proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL); | 2762 | proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL); |
2763 | if (proc->pages == NULL) { | 2763 | if (proc->pages == NULL) { |
2764 | ret = -ENOMEM; | 2764 | ret = -ENOMEM; |
2765 | failure_string = "alloc page array"; | 2765 | failure_string = "alloc page array"; |
2766 | goto err_alloc_pages_failed; | 2766 | goto err_alloc_pages_failed; |
2767 | } | 2767 | } |
2768 | proc->buffer_size = vma->vm_end - vma->vm_start; | 2768 | proc->buffer_size = vma->vm_end - vma->vm_start; |
2769 | 2769 | ||
2770 | vma->vm_ops = &binder_vm_ops; | 2770 | vma->vm_ops = &binder_vm_ops; |
2771 | vma->vm_private_data = proc; | 2771 | vma->vm_private_data = proc; |
2772 | 2772 | ||
2773 | if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) { | 2773 | if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) { |
2774 | ret = -ENOMEM; | 2774 | ret = -ENOMEM; |
2775 | failure_string = "alloc small buf"; | 2775 | failure_string = "alloc small buf"; |
2776 | goto err_alloc_small_buf_failed; | 2776 | goto err_alloc_small_buf_failed; |
2777 | } | 2777 | } |
2778 | buffer = proc->buffer; | 2778 | buffer = proc->buffer; |
2779 | INIT_LIST_HEAD(&proc->buffers); | 2779 | INIT_LIST_HEAD(&proc->buffers); |
2780 | list_add(&buffer->entry, &proc->buffers); | 2780 | list_add(&buffer->entry, &proc->buffers); |
2781 | buffer->free = 1; | 2781 | buffer->free = 1; |
2782 | binder_insert_free_buffer(proc, buffer); | 2782 | binder_insert_free_buffer(proc, buffer); |
2783 | proc->free_async_space = proc->buffer_size / 2; | 2783 | proc->free_async_space = proc->buffer_size / 2; |
2784 | barrier(); | 2784 | barrier(); |
2785 | proc->files = get_files_struct(current); | 2785 | proc->files = get_files_struct(current); |
2786 | proc->vma = vma; | 2786 | proc->vma = vma; |
2787 | proc->vma_vm_mm = vma->vm_mm; | 2787 | proc->vma_vm_mm = vma->vm_mm; |
2788 | 2788 | ||
2789 | /*pr_info("binder_mmap: %d %lx-%lx maps %p\n", | 2789 | /*pr_info("binder_mmap: %d %lx-%lx maps %p\n", |
2790 | proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/ | 2790 | proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/ |
2791 | return 0; | 2791 | return 0; |
2792 | 2792 | ||
2793 | err_alloc_small_buf_failed: | 2793 | err_alloc_small_buf_failed: |
2794 | kfree(proc->pages); | 2794 | kfree(proc->pages); |
2795 | proc->pages = NULL; | 2795 | proc->pages = NULL; |
2796 | err_alloc_pages_failed: | 2796 | err_alloc_pages_failed: |
2797 | mutex_lock(&binder_mmap_lock); | 2797 | mutex_lock(&binder_mmap_lock); |
2798 | vfree(proc->buffer); | 2798 | vfree(proc->buffer); |
2799 | proc->buffer = NULL; | 2799 | proc->buffer = NULL; |
2800 | err_get_vm_area_failed: | 2800 | err_get_vm_area_failed: |
2801 | err_already_mapped: | 2801 | err_already_mapped: |
2802 | mutex_unlock(&binder_mmap_lock); | 2802 | mutex_unlock(&binder_mmap_lock); |
2803 | err_bad_arg: | 2803 | err_bad_arg: |
2804 | pr_err("binder_mmap: %d %lx-%lx %s failed %d\n", | 2804 | pr_err("binder_mmap: %d %lx-%lx %s failed %d\n", |
2805 | proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); | 2805 | proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); |
2806 | return ret; | 2806 | return ret; |
2807 | } | 2807 | } |
2808 | 2808 | ||
2809 | static int binder_open(struct inode *nodp, struct file *filp) | 2809 | static int binder_open(struct inode *nodp, struct file *filp) |
2810 | { | 2810 | { |
2811 | struct binder_proc *proc; | 2811 | struct binder_proc *proc; |
2812 | 2812 | ||
2813 | binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n", | 2813 | binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n", |
2814 | current->group_leader->pid, current->pid); | 2814 | current->group_leader->pid, current->pid); |
2815 | 2815 | ||
2816 | proc = kzalloc(sizeof(*proc), GFP_KERNEL); | 2816 | proc = kzalloc(sizeof(*proc), GFP_KERNEL); |
2817 | if (proc == NULL) | 2817 | if (proc == NULL) |
2818 | return -ENOMEM; | 2818 | return -ENOMEM; |
2819 | get_task_struct(current); | 2819 | get_task_struct(current); |
2820 | proc->tsk = current; | 2820 | proc->tsk = current; |
2821 | INIT_LIST_HEAD(&proc->todo); | 2821 | INIT_LIST_HEAD(&proc->todo); |
2822 | init_waitqueue_head(&proc->wait); | 2822 | init_waitqueue_head(&proc->wait); |
2823 | proc->default_priority = task_nice(current); | 2823 | proc->default_priority = task_nice(current); |
2824 | 2824 | ||
2825 | binder_lock(__func__); | 2825 | binder_lock(__func__); |
2826 | 2826 | ||
2827 | binder_stats_created(BINDER_STAT_PROC); | 2827 | binder_stats_created(BINDER_STAT_PROC); |
2828 | hlist_add_head(&proc->proc_node, &binder_procs); | 2828 | hlist_add_head(&proc->proc_node, &binder_procs); |
2829 | proc->pid = current->group_leader->pid; | 2829 | proc->pid = current->group_leader->pid; |
2830 | INIT_LIST_HEAD(&proc->delivered_death); | 2830 | INIT_LIST_HEAD(&proc->delivered_death); |
2831 | filp->private_data = proc; | 2831 | filp->private_data = proc; |
2832 | 2832 | ||
2833 | binder_unlock(__func__); | 2833 | binder_unlock(__func__); |
2834 | 2834 | ||
2835 | if (binder_debugfs_dir_entry_proc) { | 2835 | if (binder_debugfs_dir_entry_proc) { |
2836 | char strbuf[11]; | 2836 | char strbuf[11]; |
2837 | snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); | 2837 | snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); |
2838 | proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO, | 2838 | proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO, |
2839 | binder_debugfs_dir_entry_proc, proc, &binder_proc_fops); | 2839 | binder_debugfs_dir_entry_proc, proc, &binder_proc_fops); |
2840 | } | 2840 | } |
2841 | 2841 | ||
2842 | return 0; | 2842 | return 0; |
2843 | } | 2843 | } |
2844 | 2844 | ||
2845 | static int binder_flush(struct file *filp, fl_owner_t id) | 2845 | static int binder_flush(struct file *filp, fl_owner_t id) |
2846 | { | 2846 | { |
2847 | struct binder_proc *proc = filp->private_data; | 2847 | struct binder_proc *proc = filp->private_data; |
2848 | 2848 | ||
2849 | binder_defer_work(proc, BINDER_DEFERRED_FLUSH); | 2849 | binder_defer_work(proc, BINDER_DEFERRED_FLUSH); |
2850 | 2850 | ||
2851 | return 0; | 2851 | return 0; |
2852 | } | 2852 | } |
2853 | 2853 | ||
2854 | static void binder_deferred_flush(struct binder_proc *proc) | 2854 | static void binder_deferred_flush(struct binder_proc *proc) |
2855 | { | 2855 | { |
2856 | struct rb_node *n; | 2856 | struct rb_node *n; |
2857 | int wake_count = 0; | 2857 | int wake_count = 0; |
2858 | for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { | 2858 | for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { |
2859 | struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); | 2859 | struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); |
2860 | thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; | 2860 | thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; |
2861 | if (thread->looper & BINDER_LOOPER_STATE_WAITING) { | 2861 | if (thread->looper & BINDER_LOOPER_STATE_WAITING) { |
2862 | wake_up_interruptible(&thread->wait); | 2862 | wake_up_interruptible(&thread->wait); |
2863 | wake_count++; | 2863 | wake_count++; |
2864 | } | 2864 | } |
2865 | } | 2865 | } |
2866 | wake_up_interruptible_all(&proc->wait); | 2866 | wake_up_interruptible_all(&proc->wait); |
2867 | 2867 | ||
2868 | binder_debug(BINDER_DEBUG_OPEN_CLOSE, | 2868 | binder_debug(BINDER_DEBUG_OPEN_CLOSE, |
2869 | "binder_flush: %d woke %d threads\n", proc->pid, | 2869 | "binder_flush: %d woke %d threads\n", proc->pid, |
2870 | wake_count); | 2870 | wake_count); |
2871 | } | 2871 | } |
2872 | 2872 | ||
2873 | static int binder_release(struct inode *nodp, struct file *filp) | 2873 | static int binder_release(struct inode *nodp, struct file *filp) |
2874 | { | 2874 | { |
2875 | struct binder_proc *proc = filp->private_data; | 2875 | struct binder_proc *proc = filp->private_data; |
2876 | debugfs_remove(proc->debugfs_entry); | 2876 | debugfs_remove(proc->debugfs_entry); |
2877 | binder_defer_work(proc, BINDER_DEFERRED_RELEASE); | 2877 | binder_defer_work(proc, BINDER_DEFERRED_RELEASE); |
2878 | 2878 | ||
2879 | return 0; | 2879 | return 0; |
2880 | } | 2880 | } |
2881 | 2881 | ||
2882 | static int binder_node_release(struct binder_node *node, int refs) | 2882 | static int binder_node_release(struct binder_node *node, int refs) |
2883 | { | 2883 | { |
2884 | struct binder_ref *ref; | 2884 | struct binder_ref *ref; |
2885 | int death = 0; | 2885 | int death = 0; |
2886 | 2886 | ||
2887 | list_del_init(&node->work.entry); | 2887 | list_del_init(&node->work.entry); |
2888 | binder_release_work(&node->async_todo); | 2888 | binder_release_work(&node->async_todo); |
2889 | 2889 | ||
2890 | if (hlist_empty(&node->refs)) { | 2890 | if (hlist_empty(&node->refs)) { |
2891 | kfree(node); | 2891 | kfree(node); |
2892 | binder_stats_deleted(BINDER_STAT_NODE); | 2892 | binder_stats_deleted(BINDER_STAT_NODE); |
2893 | 2893 | ||
2894 | return refs; | 2894 | return refs; |
2895 | } | 2895 | } |
2896 | 2896 | ||
2897 | node->proc = NULL; | 2897 | node->proc = NULL; |
2898 | node->local_strong_refs = 0; | 2898 | node->local_strong_refs = 0; |
2899 | node->local_weak_refs = 0; | 2899 | node->local_weak_refs = 0; |
2900 | hlist_add_head(&node->dead_node, &binder_dead_nodes); | 2900 | hlist_add_head(&node->dead_node, &binder_dead_nodes); |
2901 | 2901 | ||
2902 | hlist_for_each_entry(ref, &node->refs, node_entry) { | 2902 | hlist_for_each_entry(ref, &node->refs, node_entry) { |
2903 | refs++; | 2903 | refs++; |
2904 | 2904 | ||
2905 | if (!ref->death) | 2905 | if (!ref->death) |
2906 | goto out; | 2906 | goto out; |
2907 | 2907 | ||
2908 | death++; | 2908 | death++; |
2909 | 2909 | ||
2910 | if (list_empty(&ref->death->work.entry)) { | 2910 | if (list_empty(&ref->death->work.entry)) { |
2911 | ref->death->work.type = BINDER_WORK_DEAD_BINDER; | 2911 | ref->death->work.type = BINDER_WORK_DEAD_BINDER; |
2912 | list_add_tail(&ref->death->work.entry, | 2912 | list_add_tail(&ref->death->work.entry, |
2913 | &ref->proc->todo); | 2913 | &ref->proc->todo); |
2914 | wake_up_interruptible(&ref->proc->wait); | 2914 | wake_up_interruptible(&ref->proc->wait); |
2915 | } else | 2915 | } else |
2916 | BUG(); | 2916 | BUG(); |
2917 | } | 2917 | } |
2918 | 2918 | ||
2919 | out: | 2919 | out: |
2920 | binder_debug(BINDER_DEBUG_DEAD_BINDER, | 2920 | binder_debug(BINDER_DEBUG_DEAD_BINDER, |
2921 | "node %d now dead, refs %d, death %d\n", | 2921 | "node %d now dead, refs %d, death %d\n", |
2922 | node->debug_id, refs, death); | 2922 | node->debug_id, refs, death); |
2923 | 2923 | ||
2924 | return refs; | 2924 | return refs; |
2925 | } | 2925 | } |
2926 | 2926 | ||
2927 | static void binder_deferred_release(struct binder_proc *proc) | 2927 | static void binder_deferred_release(struct binder_proc *proc) |
2928 | { | 2928 | { |
2929 | struct binder_transaction *t; | 2929 | struct binder_transaction *t; |
2930 | struct rb_node *n; | 2930 | struct rb_node *n; |
2931 | int threads, nodes, incoming_refs, outgoing_refs, buffers, | 2931 | int threads, nodes, incoming_refs, outgoing_refs, buffers, |
2932 | active_transactions, page_count; | 2932 | active_transactions, page_count; |
2933 | 2933 | ||
2934 | BUG_ON(proc->vma); | 2934 | BUG_ON(proc->vma); |
2935 | BUG_ON(proc->files); | 2935 | BUG_ON(proc->files); |
2936 | 2936 | ||
2937 | hlist_del(&proc->proc_node); | 2937 | hlist_del(&proc->proc_node); |
2938 | 2938 | ||
2939 | if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) { | 2939 | if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) { |
2940 | binder_debug(BINDER_DEBUG_DEAD_BINDER, | 2940 | binder_debug(BINDER_DEBUG_DEAD_BINDER, |
2941 | "%s: %d context_mgr_node gone\n", | 2941 | "%s: %d context_mgr_node gone\n", |
2942 | __func__, proc->pid); | 2942 | __func__, proc->pid); |
2943 | binder_context_mgr_node = NULL; | 2943 | binder_context_mgr_node = NULL; |
2944 | } | 2944 | } |
2945 | 2945 | ||
2946 | threads = 0; | 2946 | threads = 0; |
2947 | active_transactions = 0; | 2947 | active_transactions = 0; |
2948 | while ((n = rb_first(&proc->threads))) { | 2948 | while ((n = rb_first(&proc->threads))) { |
2949 | struct binder_thread *thread; | 2949 | struct binder_thread *thread; |
2950 | 2950 | ||
2951 | thread = rb_entry(n, struct binder_thread, rb_node); | 2951 | thread = rb_entry(n, struct binder_thread, rb_node); |
2952 | threads++; | 2952 | threads++; |
2953 | active_transactions += binder_free_thread(proc, thread); | 2953 | active_transactions += binder_free_thread(proc, thread); |
2954 | } | 2954 | } |
2955 | 2955 | ||
2956 | nodes = 0; | 2956 | nodes = 0; |
2957 | incoming_refs = 0; | 2957 | incoming_refs = 0; |
2958 | while ((n = rb_first(&proc->nodes))) { | 2958 | while ((n = rb_first(&proc->nodes))) { |
2959 | struct binder_node *node; | 2959 | struct binder_node *node; |
2960 | 2960 | ||
2961 | node = rb_entry(n, struct binder_node, rb_node); | 2961 | node = rb_entry(n, struct binder_node, rb_node); |
2962 | nodes++; | 2962 | nodes++; |
2963 | rb_erase(&node->rb_node, &proc->nodes); | 2963 | rb_erase(&node->rb_node, &proc->nodes); |
2964 | incoming_refs = binder_node_release(node, incoming_refs); | 2964 | incoming_refs = binder_node_release(node, incoming_refs); |
2965 | } | 2965 | } |
2966 | 2966 | ||
2967 | outgoing_refs = 0; | 2967 | outgoing_refs = 0; |
2968 | while ((n = rb_first(&proc->refs_by_desc))) { | 2968 | while ((n = rb_first(&proc->refs_by_desc))) { |
2969 | struct binder_ref *ref; | 2969 | struct binder_ref *ref; |
2970 | 2970 | ||
2971 | ref = rb_entry(n, struct binder_ref, rb_node_desc); | 2971 | ref = rb_entry(n, struct binder_ref, rb_node_desc); |
2972 | outgoing_refs++; | 2972 | outgoing_refs++; |
2973 | binder_delete_ref(ref); | 2973 | binder_delete_ref(ref); |
2974 | } | 2974 | } |
2975 | 2975 | ||
2976 | binder_release_work(&proc->todo); | 2976 | binder_release_work(&proc->todo); |
2977 | binder_release_work(&proc->delivered_death); | 2977 | binder_release_work(&proc->delivered_death); |
2978 | 2978 | ||
2979 | buffers = 0; | 2979 | buffers = 0; |
2980 | while ((n = rb_first(&proc->allocated_buffers))) { | 2980 | while ((n = rb_first(&proc->allocated_buffers))) { |
2981 | struct binder_buffer *buffer; | 2981 | struct binder_buffer *buffer; |
2982 | 2982 | ||
2983 | buffer = rb_entry(n, struct binder_buffer, rb_node); | 2983 | buffer = rb_entry(n, struct binder_buffer, rb_node); |
2984 | 2984 | ||
2985 | t = buffer->transaction; | 2985 | t = buffer->transaction; |
2986 | if (t) { | 2986 | if (t) { |
2987 | t->buffer = NULL; | 2987 | t->buffer = NULL; |
2988 | buffer->transaction = NULL; | 2988 | buffer->transaction = NULL; |
2989 | pr_err("release proc %d, transaction %d, not freed\n", | 2989 | pr_err("release proc %d, transaction %d, not freed\n", |
2990 | proc->pid, t->debug_id); | 2990 | proc->pid, t->debug_id); |
2991 | /*BUG();*/ | 2991 | /*BUG();*/ |
2992 | } | 2992 | } |
2993 | 2993 | ||
2994 | binder_free_buf(proc, buffer); | 2994 | binder_free_buf(proc, buffer); |
2995 | buffers++; | 2995 | buffers++; |
2996 | } | 2996 | } |
2997 | 2997 | ||
2998 | binder_stats_deleted(BINDER_STAT_PROC); | 2998 | binder_stats_deleted(BINDER_STAT_PROC); |
2999 | 2999 | ||
3000 | page_count = 0; | 3000 | page_count = 0; |
3001 | if (proc->pages) { | 3001 | if (proc->pages) { |
3002 | int i; | 3002 | int i; |
3003 | 3003 | ||
3004 | for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) { | 3004 | for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) { |
3005 | void *page_addr; | 3005 | void *page_addr; |
3006 | 3006 | ||
3007 | if (!proc->pages[i]) | 3007 | if (!proc->pages[i]) |
3008 | continue; | 3008 | continue; |
3009 | 3009 | ||
3010 | page_addr = proc->buffer + i * PAGE_SIZE; | 3010 | page_addr = proc->buffer + i * PAGE_SIZE; |
3011 | binder_debug(BINDER_DEBUG_BUFFER_ALLOC, | 3011 | binder_debug(BINDER_DEBUG_BUFFER_ALLOC, |
3012 | "%s: %d: page %d at %p not freed\n", | 3012 | "%s: %d: page %d at %p not freed\n", |
3013 | __func__, proc->pid, i, page_addr); | 3013 | __func__, proc->pid, i, page_addr); |
3014 | unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); | 3014 | unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); |
3015 | __free_page(proc->pages[i]); | 3015 | __free_page(proc->pages[i]); |
3016 | page_count++; | 3016 | page_count++; |
3017 | } | 3017 | } |
3018 | kfree(proc->pages); | 3018 | kfree(proc->pages); |
3019 | vfree(proc->buffer); | 3019 | vfree(proc->buffer); |
3020 | } | 3020 | } |
3021 | 3021 | ||
3022 | put_task_struct(proc->tsk); | 3022 | put_task_struct(proc->tsk); |
3023 | 3023 | ||
3024 | binder_debug(BINDER_DEBUG_OPEN_CLOSE, | 3024 | binder_debug(BINDER_DEBUG_OPEN_CLOSE, |
3025 | "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n", | 3025 | "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n", |
3026 | __func__, proc->pid, threads, nodes, incoming_refs, | 3026 | __func__, proc->pid, threads, nodes, incoming_refs, |
3027 | outgoing_refs, active_transactions, buffers, page_count); | 3027 | outgoing_refs, active_transactions, buffers, page_count); |
3028 | 3028 | ||
3029 | kfree(proc); | 3029 | kfree(proc); |
3030 | } | 3030 | } |
3031 | 3031 | ||
3032 | static void binder_deferred_func(struct work_struct *work) | 3032 | static void binder_deferred_func(struct work_struct *work) |
3033 | { | 3033 | { |
3034 | struct binder_proc *proc; | 3034 | struct binder_proc *proc; |
3035 | struct files_struct *files; | 3035 | struct files_struct *files; |
3036 | 3036 | ||
3037 | int defer; | 3037 | int defer; |
3038 | do { | 3038 | do { |
3039 | binder_lock(__func__); | 3039 | binder_lock(__func__); |
3040 | mutex_lock(&binder_deferred_lock); | 3040 | mutex_lock(&binder_deferred_lock); |
3041 | if (!hlist_empty(&binder_deferred_list)) { | 3041 | if (!hlist_empty(&binder_deferred_list)) { |
3042 | proc = hlist_entry(binder_deferred_list.first, | 3042 | proc = hlist_entry(binder_deferred_list.first, |
3043 | struct binder_proc, deferred_work_node); | 3043 | struct binder_proc, deferred_work_node); |
3044 | hlist_del_init(&proc->deferred_work_node); | 3044 | hlist_del_init(&proc->deferred_work_node); |
3045 | defer = proc->deferred_work; | 3045 | defer = proc->deferred_work; |
3046 | proc->deferred_work = 0; | 3046 | proc->deferred_work = 0; |
3047 | } else { | 3047 | } else { |
3048 | proc = NULL; | 3048 | proc = NULL; |
3049 | defer = 0; | 3049 | defer = 0; |
3050 | } | 3050 | } |
3051 | mutex_unlock(&binder_deferred_lock); | 3051 | mutex_unlock(&binder_deferred_lock); |
3052 | 3052 | ||
3053 | files = NULL; | 3053 | files = NULL; |
3054 | if (defer & BINDER_DEFERRED_PUT_FILES) { | 3054 | if (defer & BINDER_DEFERRED_PUT_FILES) { |
3055 | files = proc->files; | 3055 | files = proc->files; |
3056 | if (files) | 3056 | if (files) |
3057 | proc->files = NULL; | 3057 | proc->files = NULL; |
3058 | } | 3058 | } |
3059 | 3059 | ||
3060 | if (defer & BINDER_DEFERRED_FLUSH) | 3060 | if (defer & BINDER_DEFERRED_FLUSH) |
3061 | binder_deferred_flush(proc); | 3061 | binder_deferred_flush(proc); |
3062 | 3062 | ||
3063 | if (defer & BINDER_DEFERRED_RELEASE) | 3063 | if (defer & BINDER_DEFERRED_RELEASE) |
3064 | binder_deferred_release(proc); /* frees proc */ | 3064 | binder_deferred_release(proc); /* frees proc */ |
3065 | 3065 | ||
3066 | binder_unlock(__func__); | 3066 | binder_unlock(__func__); |
3067 | if (files) | 3067 | if (files) |
3068 | put_files_struct(files); | 3068 | put_files_struct(files); |
3069 | } while (proc); | 3069 | } while (proc); |
3070 | } | 3070 | } |
3071 | static DECLARE_WORK(binder_deferred_work, binder_deferred_func); | 3071 | static DECLARE_WORK(binder_deferred_work, binder_deferred_func); |
3072 | 3072 | ||
3073 | static void | 3073 | static void |
3074 | binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) | 3074 | binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) |
3075 | { | 3075 | { |
3076 | mutex_lock(&binder_deferred_lock); | 3076 | mutex_lock(&binder_deferred_lock); |
3077 | proc->deferred_work |= defer; | 3077 | proc->deferred_work |= defer; |
3078 | if (hlist_unhashed(&proc->deferred_work_node)) { | 3078 | if (hlist_unhashed(&proc->deferred_work_node)) { |
3079 | hlist_add_head(&proc->deferred_work_node, | 3079 | hlist_add_head(&proc->deferred_work_node, |
3080 | &binder_deferred_list); | 3080 | &binder_deferred_list); |
3081 | queue_work(binder_deferred_workqueue, &binder_deferred_work); | 3081 | queue_work(binder_deferred_workqueue, &binder_deferred_work); |
3082 | } | 3082 | } |
3083 | mutex_unlock(&binder_deferred_lock); | 3083 | mutex_unlock(&binder_deferred_lock); |
3084 | } | 3084 | } |
3085 | 3085 | ||
3086 | static void print_binder_transaction(struct seq_file *m, const char *prefix, | 3086 | static void print_binder_transaction(struct seq_file *m, const char *prefix, |
3087 | struct binder_transaction *t) | 3087 | struct binder_transaction *t) |
3088 | { | 3088 | { |
3089 | seq_printf(m, | 3089 | seq_printf(m, |
3090 | "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d", | 3090 | "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d", |
3091 | prefix, t->debug_id, t, | 3091 | prefix, t->debug_id, t, |
3092 | t->from ? t->from->proc->pid : 0, | 3092 | t->from ? t->from->proc->pid : 0, |
3093 | t->from ? t->from->pid : 0, | 3093 | t->from ? t->from->pid : 0, |
3094 | t->to_proc ? t->to_proc->pid : 0, | 3094 | t->to_proc ? t->to_proc->pid : 0, |
3095 | t->to_thread ? t->to_thread->pid : 0, | 3095 | t->to_thread ? t->to_thread->pid : 0, |
3096 | t->code, t->flags, t->priority, t->need_reply); | 3096 | t->code, t->flags, t->priority, t->need_reply); |
3097 | if (t->buffer == NULL) { | 3097 | if (t->buffer == NULL) { |
3098 | seq_puts(m, " buffer free\n"); | 3098 | seq_puts(m, " buffer free\n"); |
3099 | return; | 3099 | return; |
3100 | } | 3100 | } |
3101 | if (t->buffer->target_node) | 3101 | if (t->buffer->target_node) |
3102 | seq_printf(m, " node %d", | 3102 | seq_printf(m, " node %d", |
3103 | t->buffer->target_node->debug_id); | 3103 | t->buffer->target_node->debug_id); |
3104 | seq_printf(m, " size %zd:%zd data %p\n", | 3104 | seq_printf(m, " size %zd:%zd data %p\n", |
3105 | t->buffer->data_size, t->buffer->offsets_size, | 3105 | t->buffer->data_size, t->buffer->offsets_size, |
3106 | t->buffer->data); | 3106 | t->buffer->data); |
3107 | } | 3107 | } |
3108 | 3108 | ||
3109 | static void print_binder_buffer(struct seq_file *m, const char *prefix, | 3109 | static void print_binder_buffer(struct seq_file *m, const char *prefix, |
3110 | struct binder_buffer *buffer) | 3110 | struct binder_buffer *buffer) |
3111 | { | 3111 | { |
3112 | seq_printf(m, "%s %d: %p size %zd:%zd %s\n", | 3112 | seq_printf(m, "%s %d: %p size %zd:%zd %s\n", |
3113 | prefix, buffer->debug_id, buffer->data, | 3113 | prefix, buffer->debug_id, buffer->data, |
3114 | buffer->data_size, buffer->offsets_size, | 3114 | buffer->data_size, buffer->offsets_size, |
3115 | buffer->transaction ? "active" : "delivered"); | 3115 | buffer->transaction ? "active" : "delivered"); |
3116 | } | 3116 | } |
3117 | 3117 | ||
3118 | static void print_binder_work(struct seq_file *m, const char *prefix, | 3118 | static void print_binder_work(struct seq_file *m, const char *prefix, |
3119 | const char *transaction_prefix, | 3119 | const char *transaction_prefix, |
3120 | struct binder_work *w) | 3120 | struct binder_work *w) |
3121 | { | 3121 | { |
3122 | struct binder_node *node; | 3122 | struct binder_node *node; |
3123 | struct binder_transaction *t; | 3123 | struct binder_transaction *t; |
3124 | 3124 | ||
3125 | switch (w->type) { | 3125 | switch (w->type) { |
3126 | case BINDER_WORK_TRANSACTION: | 3126 | case BINDER_WORK_TRANSACTION: |
3127 | t = container_of(w, struct binder_transaction, work); | 3127 | t = container_of(w, struct binder_transaction, work); |
3128 | print_binder_transaction(m, transaction_prefix, t); | 3128 | print_binder_transaction(m, transaction_prefix, t); |
3129 | break; | 3129 | break; |
3130 | case BINDER_WORK_TRANSACTION_COMPLETE: | 3130 | case BINDER_WORK_TRANSACTION_COMPLETE: |
3131 | seq_printf(m, "%stransaction complete\n", prefix); | 3131 | seq_printf(m, "%stransaction complete\n", prefix); |
3132 | break; | 3132 | break; |
3133 | case BINDER_WORK_NODE: | 3133 | case BINDER_WORK_NODE: |
3134 | node = container_of(w, struct binder_node, work); | 3134 | node = container_of(w, struct binder_node, work); |
3135 | seq_printf(m, "%snode work %d: u%p c%p\n", | 3135 | seq_printf(m, "%snode work %d: u%p c%p\n", |
3136 | prefix, node->debug_id, node->ptr, node->cookie); | 3136 | prefix, node->debug_id, node->ptr, node->cookie); |
3137 | break; | 3137 | break; |
3138 | case BINDER_WORK_DEAD_BINDER: | 3138 | case BINDER_WORK_DEAD_BINDER: |
3139 | seq_printf(m, "%shas dead binder\n", prefix); | 3139 | seq_printf(m, "%shas dead binder\n", prefix); |
3140 | break; | 3140 | break; |
3141 | case BINDER_WORK_DEAD_BINDER_AND_CLEAR: | 3141 | case BINDER_WORK_DEAD_BINDER_AND_CLEAR: |
3142 | seq_printf(m, "%shas cleared dead binder\n", prefix); | 3142 | seq_printf(m, "%shas cleared dead binder\n", prefix); |
3143 | break; | 3143 | break; |
3144 | case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: | 3144 | case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: |
3145 | seq_printf(m, "%shas cleared death notification\n", prefix); | 3145 | seq_printf(m, "%shas cleared death notification\n", prefix); |
3146 | break; | 3146 | break; |
3147 | default: | 3147 | default: |
3148 | seq_printf(m, "%sunknown work: type %d\n", prefix, w->type); | 3148 | seq_printf(m, "%sunknown work: type %d\n", prefix, w->type); |
3149 | break; | 3149 | break; |
3150 | } | 3150 | } |
3151 | } | 3151 | } |
3152 | 3152 | ||
3153 | static void print_binder_thread(struct seq_file *m, | 3153 | static void print_binder_thread(struct seq_file *m, |
3154 | struct binder_thread *thread, | 3154 | struct binder_thread *thread, |
3155 | int print_always) | 3155 | int print_always) |
3156 | { | 3156 | { |
3157 | struct binder_transaction *t; | 3157 | struct binder_transaction *t; |
3158 | struct binder_work *w; | 3158 | struct binder_work *w; |
3159 | size_t start_pos = m->count; | 3159 | size_t start_pos = m->count; |
3160 | size_t header_pos; | 3160 | size_t header_pos; |
3161 | 3161 | ||
3162 | seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper); | 3162 | seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper); |
3163 | header_pos = m->count; | 3163 | header_pos = m->count; |
3164 | t = thread->transaction_stack; | 3164 | t = thread->transaction_stack; |
3165 | while (t) { | 3165 | while (t) { |
3166 | if (t->from == thread) { | 3166 | if (t->from == thread) { |
3167 | print_binder_transaction(m, | 3167 | print_binder_transaction(m, |
3168 | " outgoing transaction", t); | 3168 | " outgoing transaction", t); |
3169 | t = t->from_parent; | 3169 | t = t->from_parent; |
3170 | } else if (t->to_thread == thread) { | 3170 | } else if (t->to_thread == thread) { |
3171 | print_binder_transaction(m, | 3171 | print_binder_transaction(m, |
3172 | " incoming transaction", t); | 3172 | " incoming transaction", t); |
3173 | t = t->to_parent; | 3173 | t = t->to_parent; |
3174 | } else { | 3174 | } else { |
3175 | print_binder_transaction(m, " bad transaction", t); | 3175 | print_binder_transaction(m, " bad transaction", t); |
3176 | t = NULL; | 3176 | t = NULL; |
3177 | } | 3177 | } |
3178 | } | 3178 | } |
3179 | list_for_each_entry(w, &thread->todo, entry) { | 3179 | list_for_each_entry(w, &thread->todo, entry) { |
3180 | print_binder_work(m, " ", " pending transaction", w); | 3180 | print_binder_work(m, " ", " pending transaction", w); |
3181 | } | 3181 | } |
3182 | if (!print_always && m->count == header_pos) | 3182 | if (!print_always && m->count == header_pos) |
3183 | m->count = start_pos; | 3183 | m->count = start_pos; |
3184 | } | 3184 | } |
3185 | 3185 | ||
3186 | static void print_binder_node(struct seq_file *m, struct binder_node *node) | 3186 | static void print_binder_node(struct seq_file *m, struct binder_node *node) |
3187 | { | 3187 | { |
3188 | struct binder_ref *ref; | 3188 | struct binder_ref *ref; |
3189 | struct binder_work *w; | 3189 | struct binder_work *w; |
3190 | int count; | 3190 | int count; |
3191 | 3191 | ||
3192 | count = 0; | 3192 | count = 0; |
3193 | hlist_for_each_entry(ref, &node->refs, node_entry) | 3193 | hlist_for_each_entry(ref, &node->refs, node_entry) |
3194 | count++; | 3194 | count++; |
3195 | 3195 | ||
3196 | seq_printf(m, " node %d: u%p c%p hs %d hw %d ls %d lw %d is %d iw %d", | 3196 | seq_printf(m, " node %d: u%p c%p hs %d hw %d ls %d lw %d is %d iw %d", |
3197 | node->debug_id, node->ptr, node->cookie, | 3197 | node->debug_id, node->ptr, node->cookie, |
3198 | node->has_strong_ref, node->has_weak_ref, | 3198 | node->has_strong_ref, node->has_weak_ref, |
3199 | node->local_strong_refs, node->local_weak_refs, | 3199 | node->local_strong_refs, node->local_weak_refs, |
3200 | node->internal_strong_refs, count); | 3200 | node->internal_strong_refs, count); |
3201 | if (count) { | 3201 | if (count) { |
3202 | seq_puts(m, " proc"); | 3202 | seq_puts(m, " proc"); |
3203 | hlist_for_each_entry(ref, &node->refs, node_entry) | 3203 | hlist_for_each_entry(ref, &node->refs, node_entry) |
3204 | seq_printf(m, " %d", ref->proc->pid); | 3204 | seq_printf(m, " %d", ref->proc->pid); |
3205 | } | 3205 | } |
3206 | seq_puts(m, "\n"); | 3206 | seq_puts(m, "\n"); |
3207 | list_for_each_entry(w, &node->async_todo, entry) | 3207 | list_for_each_entry(w, &node->async_todo, entry) |
3208 | print_binder_work(m, " ", | 3208 | print_binder_work(m, " ", |
3209 | " pending async transaction", w); | 3209 | " pending async transaction", w); |
3210 | } | 3210 | } |
3211 | 3211 | ||
3212 | static void print_binder_ref(struct seq_file *m, struct binder_ref *ref) | 3212 | static void print_binder_ref(struct seq_file *m, struct binder_ref *ref) |
3213 | { | 3213 | { |
3214 | seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n", | 3214 | seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n", |
3215 | ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ", | 3215 | ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ", |
3216 | ref->node->debug_id, ref->strong, ref->weak, ref->death); | 3216 | ref->node->debug_id, ref->strong, ref->weak, ref->death); |
3217 | } | 3217 | } |
3218 | 3218 | ||
3219 | static void print_binder_proc(struct seq_file *m, | 3219 | static void print_binder_proc(struct seq_file *m, |
3220 | struct binder_proc *proc, int print_all) | 3220 | struct binder_proc *proc, int print_all) |
3221 | { | 3221 | { |
3222 | struct binder_work *w; | 3222 | struct binder_work *w; |
3223 | struct rb_node *n; | 3223 | struct rb_node *n; |
3224 | size_t start_pos = m->count; | 3224 | size_t start_pos = m->count; |
3225 | size_t header_pos; | 3225 | size_t header_pos; |
3226 | 3226 | ||
3227 | seq_printf(m, "proc %d\n", proc->pid); | 3227 | seq_printf(m, "proc %d\n", proc->pid); |
3228 | header_pos = m->count; | 3228 | header_pos = m->count; |
3229 | 3229 | ||
3230 | for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) | 3230 | for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) |
3231 | print_binder_thread(m, rb_entry(n, struct binder_thread, | 3231 | print_binder_thread(m, rb_entry(n, struct binder_thread, |
3232 | rb_node), print_all); | 3232 | rb_node), print_all); |
3233 | for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { | 3233 | for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { |
3234 | struct binder_node *node = rb_entry(n, struct binder_node, | 3234 | struct binder_node *node = rb_entry(n, struct binder_node, |
3235 | rb_node); | 3235 | rb_node); |
3236 | if (print_all || node->has_async_transaction) | 3236 | if (print_all || node->has_async_transaction) |
3237 | print_binder_node(m, node); | 3237 | print_binder_node(m, node); |
3238 | } | 3238 | } |
3239 | if (print_all) { | 3239 | if (print_all) { |
3240 | for (n = rb_first(&proc->refs_by_desc); | 3240 | for (n = rb_first(&proc->refs_by_desc); |
3241 | n != NULL; | 3241 | n != NULL; |
3242 | n = rb_next(n)) | 3242 | n = rb_next(n)) |
3243 | print_binder_ref(m, rb_entry(n, struct binder_ref, | 3243 | print_binder_ref(m, rb_entry(n, struct binder_ref, |
3244 | rb_node_desc)); | 3244 | rb_node_desc)); |
3245 | } | 3245 | } |
3246 | for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) | 3246 | for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) |
3247 | print_binder_buffer(m, " buffer", | 3247 | print_binder_buffer(m, " buffer", |
3248 | rb_entry(n, struct binder_buffer, rb_node)); | 3248 | rb_entry(n, struct binder_buffer, rb_node)); |
3249 | list_for_each_entry(w, &proc->todo, entry) | 3249 | list_for_each_entry(w, &proc->todo, entry) |
3250 | print_binder_work(m, " ", " pending transaction", w); | 3250 | print_binder_work(m, " ", " pending transaction", w); |
3251 | list_for_each_entry(w, &proc->delivered_death, entry) { | 3251 | list_for_each_entry(w, &proc->delivered_death, entry) { |
3252 | seq_puts(m, " has delivered dead binder\n"); | 3252 | seq_puts(m, " has delivered dead binder\n"); |
3253 | break; | 3253 | break; |
3254 | } | 3254 | } |
3255 | if (!print_all && m->count == header_pos) | 3255 | if (!print_all && m->count == header_pos) |
3256 | m->count = start_pos; | 3256 | m->count = start_pos; |
3257 | } | 3257 | } |
3258 | 3258 | ||
3259 | static const char * const binder_return_strings[] = { | 3259 | static const char * const binder_return_strings[] = { |
3260 | "BR_ERROR", | 3260 | "BR_ERROR", |
3261 | "BR_OK", | 3261 | "BR_OK", |
3262 | "BR_TRANSACTION", | 3262 | "BR_TRANSACTION", |
3263 | "BR_REPLY", | 3263 | "BR_REPLY", |
3264 | "BR_ACQUIRE_RESULT", | 3264 | "BR_ACQUIRE_RESULT", |
3265 | "BR_DEAD_REPLY", | 3265 | "BR_DEAD_REPLY", |
3266 | "BR_TRANSACTION_COMPLETE", | 3266 | "BR_TRANSACTION_COMPLETE", |
3267 | "BR_INCREFS", | 3267 | "BR_INCREFS", |
3268 | "BR_ACQUIRE", | 3268 | "BR_ACQUIRE", |
3269 | "BR_RELEASE", | 3269 | "BR_RELEASE", |
3270 | "BR_DECREFS", | 3270 | "BR_DECREFS", |
3271 | "BR_ATTEMPT_ACQUIRE", | 3271 | "BR_ATTEMPT_ACQUIRE", |
3272 | "BR_NOOP", | 3272 | "BR_NOOP", |
3273 | "BR_SPAWN_LOOPER", | 3273 | "BR_SPAWN_LOOPER", |
3274 | "BR_FINISHED", | 3274 | "BR_FINISHED", |
3275 | "BR_DEAD_BINDER", | 3275 | "BR_DEAD_BINDER", |
3276 | "BR_CLEAR_DEATH_NOTIFICATION_DONE", | 3276 | "BR_CLEAR_DEATH_NOTIFICATION_DONE", |
3277 | "BR_FAILED_REPLY" | 3277 | "BR_FAILED_REPLY" |
3278 | }; | 3278 | }; |
3279 | 3279 | ||
3280 | static const char * const binder_command_strings[] = { | 3280 | static const char * const binder_command_strings[] = { |
3281 | "BC_TRANSACTION", | 3281 | "BC_TRANSACTION", |
3282 | "BC_REPLY", | 3282 | "BC_REPLY", |
3283 | "BC_ACQUIRE_RESULT", | 3283 | "BC_ACQUIRE_RESULT", |
3284 | "BC_FREE_BUFFER", | 3284 | "BC_FREE_BUFFER", |
3285 | "BC_INCREFS", | 3285 | "BC_INCREFS", |
3286 | "BC_ACQUIRE", | 3286 | "BC_ACQUIRE", |
3287 | "BC_RELEASE", | 3287 | "BC_RELEASE", |
3288 | "BC_DECREFS", | 3288 | "BC_DECREFS", |
3289 | "BC_INCREFS_DONE", | 3289 | "BC_INCREFS_DONE", |
3290 | "BC_ACQUIRE_DONE", | 3290 | "BC_ACQUIRE_DONE", |
3291 | "BC_ATTEMPT_ACQUIRE", | 3291 | "BC_ATTEMPT_ACQUIRE", |
3292 | "BC_REGISTER_LOOPER", | 3292 | "BC_REGISTER_LOOPER", |
3293 | "BC_ENTER_LOOPER", | 3293 | "BC_ENTER_LOOPER", |
3294 | "BC_EXIT_LOOPER", | 3294 | "BC_EXIT_LOOPER", |
3295 | "BC_REQUEST_DEATH_NOTIFICATION", | 3295 | "BC_REQUEST_DEATH_NOTIFICATION", |
3296 | "BC_CLEAR_DEATH_NOTIFICATION", | 3296 | "BC_CLEAR_DEATH_NOTIFICATION", |
3297 | "BC_DEAD_BINDER_DONE" | 3297 | "BC_DEAD_BINDER_DONE" |
3298 | }; | 3298 | }; |
3299 | 3299 | ||
3300 | static const char * const binder_objstat_strings[] = { | 3300 | static const char * const binder_objstat_strings[] = { |
3301 | "proc", | 3301 | "proc", |
3302 | "thread", | 3302 | "thread", |
3303 | "node", | 3303 | "node", |
3304 | "ref", | 3304 | "ref", |
3305 | "death", | 3305 | "death", |
3306 | "transaction", | 3306 | "transaction", |
3307 | "transaction_complete" | 3307 | "transaction_complete" |
3308 | }; | 3308 | }; |
3309 | 3309 | ||
3310 | static void print_binder_stats(struct seq_file *m, const char *prefix, | 3310 | static void print_binder_stats(struct seq_file *m, const char *prefix, |
3311 | struct binder_stats *stats) | 3311 | struct binder_stats *stats) |
3312 | { | 3312 | { |
3313 | int i; | 3313 | int i; |
3314 | 3314 | ||
3315 | BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != | 3315 | BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != |
3316 | ARRAY_SIZE(binder_command_strings)); | 3316 | ARRAY_SIZE(binder_command_strings)); |
3317 | for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { | 3317 | for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { |
3318 | if (stats->bc[i]) | 3318 | if (stats->bc[i]) |
3319 | seq_printf(m, "%s%s: %d\n", prefix, | 3319 | seq_printf(m, "%s%s: %d\n", prefix, |
3320 | binder_command_strings[i], stats->bc[i]); | 3320 | binder_command_strings[i], stats->bc[i]); |
3321 | } | 3321 | } |
3322 | 3322 | ||
3323 | BUILD_BUG_ON(ARRAY_SIZE(stats->br) != | 3323 | BUILD_BUG_ON(ARRAY_SIZE(stats->br) != |
3324 | ARRAY_SIZE(binder_return_strings)); | 3324 | ARRAY_SIZE(binder_return_strings)); |
3325 | for (i = 0; i < ARRAY_SIZE(stats->br); i++) { | 3325 | for (i = 0; i < ARRAY_SIZE(stats->br); i++) { |
3326 | if (stats->br[i]) | 3326 | if (stats->br[i]) |
3327 | seq_printf(m, "%s%s: %d\n", prefix, | 3327 | seq_printf(m, "%s%s: %d\n", prefix, |
3328 | binder_return_strings[i], stats->br[i]); | 3328 | binder_return_strings[i], stats->br[i]); |
3329 | } | 3329 | } |
3330 | 3330 | ||
3331 | BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != | 3331 | BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != |
3332 | ARRAY_SIZE(binder_objstat_strings)); | 3332 | ARRAY_SIZE(binder_objstat_strings)); |
3333 | BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != | 3333 | BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != |
3334 | ARRAY_SIZE(stats->obj_deleted)); | 3334 | ARRAY_SIZE(stats->obj_deleted)); |
3335 | for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { | 3335 | for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { |
3336 | if (stats->obj_created[i] || stats->obj_deleted[i]) | 3336 | if (stats->obj_created[i] || stats->obj_deleted[i]) |
3337 | seq_printf(m, "%s%s: active %d total %d\n", prefix, | 3337 | seq_printf(m, "%s%s: active %d total %d\n", prefix, |
3338 | binder_objstat_strings[i], | 3338 | binder_objstat_strings[i], |
3339 | stats->obj_created[i] - stats->obj_deleted[i], | 3339 | stats->obj_created[i] - stats->obj_deleted[i], |
3340 | stats->obj_created[i]); | 3340 | stats->obj_created[i]); |
3341 | } | 3341 | } |
3342 | } | 3342 | } |
3343 | 3343 | ||
3344 | static void print_binder_proc_stats(struct seq_file *m, | 3344 | static void print_binder_proc_stats(struct seq_file *m, |
3345 | struct binder_proc *proc) | 3345 | struct binder_proc *proc) |
3346 | { | 3346 | { |
3347 | struct binder_work *w; | 3347 | struct binder_work *w; |
3348 | struct rb_node *n; | 3348 | struct rb_node *n; |
3349 | int count, strong, weak; | 3349 | int count, strong, weak; |
3350 | 3350 | ||
3351 | seq_printf(m, "proc %d\n", proc->pid); | 3351 | seq_printf(m, "proc %d\n", proc->pid); |
3352 | count = 0; | 3352 | count = 0; |
3353 | for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) | 3353 | for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) |
3354 | count++; | 3354 | count++; |
3355 | seq_printf(m, " threads: %d\n", count); | 3355 | seq_printf(m, " threads: %d\n", count); |
3356 | seq_printf(m, " requested threads: %d+%d/%d\n" | 3356 | seq_printf(m, " requested threads: %d+%d/%d\n" |
3357 | " ready threads %d\n" | 3357 | " ready threads %d\n" |
3358 | " free async space %zd\n", proc->requested_threads, | 3358 | " free async space %zd\n", proc->requested_threads, |
3359 | proc->requested_threads_started, proc->max_threads, | 3359 | proc->requested_threads_started, proc->max_threads, |
3360 | proc->ready_threads, proc->free_async_space); | 3360 | proc->ready_threads, proc->free_async_space); |
3361 | count = 0; | 3361 | count = 0; |
3362 | for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) | 3362 | for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) |
3363 | count++; | 3363 | count++; |
3364 | seq_printf(m, " nodes: %d\n", count); | 3364 | seq_printf(m, " nodes: %d\n", count); |
3365 | count = 0; | 3365 | count = 0; |
3366 | strong = 0; | 3366 | strong = 0; |
3367 | weak = 0; | 3367 | weak = 0; |
3368 | for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { | 3368 | for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { |
3369 | struct binder_ref *ref = rb_entry(n, struct binder_ref, | 3369 | struct binder_ref *ref = rb_entry(n, struct binder_ref, |
3370 | rb_node_desc); | 3370 | rb_node_desc); |
3371 | count++; | 3371 | count++; |
3372 | strong += ref->strong; | 3372 | strong += ref->strong; |
3373 | weak += ref->weak; | 3373 | weak += ref->weak; |
3374 | } | 3374 | } |
3375 | seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak); | 3375 | seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak); |
3376 | 3376 | ||
3377 | count = 0; | 3377 | count = 0; |
3378 | for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) | 3378 | for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) |
3379 | count++; | 3379 | count++; |
3380 | seq_printf(m, " buffers: %d\n", count); | 3380 | seq_printf(m, " buffers: %d\n", count); |
3381 | 3381 | ||
3382 | count = 0; | 3382 | count = 0; |
3383 | list_for_each_entry(w, &proc->todo, entry) { | 3383 | list_for_each_entry(w, &proc->todo, entry) { |
3384 | switch (w->type) { | 3384 | switch (w->type) { |
3385 | case BINDER_WORK_TRANSACTION: | 3385 | case BINDER_WORK_TRANSACTION: |
3386 | count++; | 3386 | count++; |
3387 | break; | 3387 | break; |
3388 | default: | 3388 | default: |
3389 | break; | 3389 | break; |
3390 | } | 3390 | } |
3391 | } | 3391 | } |
3392 | seq_printf(m, " pending transactions: %d\n", count); | 3392 | seq_printf(m, " pending transactions: %d\n", count); |
3393 | 3393 | ||
3394 | print_binder_stats(m, " ", &proc->stats); | 3394 | print_binder_stats(m, " ", &proc->stats); |
3395 | } | 3395 | } |
3396 | 3396 | ||
3397 | 3397 | ||
3398 | static int binder_state_show(struct seq_file *m, void *unused) | 3398 | static int binder_state_show(struct seq_file *m, void *unused) |
3399 | { | 3399 | { |
3400 | struct binder_proc *proc; | 3400 | struct binder_proc *proc; |
3401 | struct binder_node *node; | 3401 | struct binder_node *node; |
3402 | int do_lock = !binder_debug_no_lock; | 3402 | int do_lock = !binder_debug_no_lock; |
3403 | 3403 | ||
3404 | if (do_lock) | 3404 | if (do_lock) |
3405 | binder_lock(__func__); | 3405 | binder_lock(__func__); |
3406 | 3406 | ||
3407 | seq_puts(m, "binder state:\n"); | 3407 | seq_puts(m, "binder state:\n"); |
3408 | 3408 | ||
3409 | if (!hlist_empty(&binder_dead_nodes)) | 3409 | if (!hlist_empty(&binder_dead_nodes)) |
3410 | seq_puts(m, "dead nodes:\n"); | 3410 | seq_puts(m, "dead nodes:\n"); |
3411 | hlist_for_each_entry(node, &binder_dead_nodes, dead_node) | 3411 | hlist_for_each_entry(node, &binder_dead_nodes, dead_node) |
3412 | print_binder_node(m, node); | 3412 | print_binder_node(m, node); |
3413 | 3413 | ||
3414 | hlist_for_each_entry(proc, &binder_procs, proc_node) | 3414 | hlist_for_each_entry(proc, &binder_procs, proc_node) |
3415 | print_binder_proc(m, proc, 1); | 3415 | print_binder_proc(m, proc, 1); |
3416 | if (do_lock) | 3416 | if (do_lock) |
3417 | binder_unlock(__func__); | 3417 | binder_unlock(__func__); |
3418 | return 0; | 3418 | return 0; |
3419 | } | 3419 | } |
3420 | 3420 | ||
3421 | static int binder_stats_show(struct seq_file *m, void *unused) | 3421 | static int binder_stats_show(struct seq_file *m, void *unused) |
3422 | { | 3422 | { |
3423 | struct binder_proc *proc; | 3423 | struct binder_proc *proc; |
3424 | int do_lock = !binder_debug_no_lock; | 3424 | int do_lock = !binder_debug_no_lock; |
3425 | 3425 | ||
3426 | if (do_lock) | 3426 | if (do_lock) |
3427 | binder_lock(__func__); | 3427 | binder_lock(__func__); |
3428 | 3428 | ||
3429 | seq_puts(m, "binder stats:\n"); | 3429 | seq_puts(m, "binder stats:\n"); |
3430 | 3430 | ||
3431 | print_binder_stats(m, "", &binder_stats); | 3431 | print_binder_stats(m, "", &binder_stats); |
3432 | 3432 | ||
3433 | hlist_for_each_entry(proc, &binder_procs, proc_node) | 3433 | hlist_for_each_entry(proc, &binder_procs, proc_node) |
3434 | print_binder_proc_stats(m, proc); | 3434 | print_binder_proc_stats(m, proc); |
3435 | if (do_lock) | 3435 | if (do_lock) |
3436 | binder_unlock(__func__); | 3436 | binder_unlock(__func__); |
3437 | return 0; | 3437 | return 0; |
3438 | } | 3438 | } |
3439 | 3439 | ||
3440 | static int binder_transactions_show(struct seq_file *m, void *unused) | 3440 | static int binder_transactions_show(struct seq_file *m, void *unused) |
3441 | { | 3441 | { |
3442 | struct binder_proc *proc; | 3442 | struct binder_proc *proc; |
3443 | int do_lock = !binder_debug_no_lock; | 3443 | int do_lock = !binder_debug_no_lock; |
3444 | 3444 | ||
3445 | if (do_lock) | 3445 | if (do_lock) |
3446 | binder_lock(__func__); | 3446 | binder_lock(__func__); |
3447 | 3447 | ||
3448 | seq_puts(m, "binder transactions:\n"); | 3448 | seq_puts(m, "binder transactions:\n"); |
3449 | hlist_for_each_entry(proc, &binder_procs, proc_node) | 3449 | hlist_for_each_entry(proc, &binder_procs, proc_node) |
3450 | print_binder_proc(m, proc, 0); | 3450 | print_binder_proc(m, proc, 0); |
3451 | if (do_lock) | 3451 | if (do_lock) |
3452 | binder_unlock(__func__); | 3452 | binder_unlock(__func__); |
3453 | return 0; | 3453 | return 0; |
3454 | } | 3454 | } |
3455 | 3455 | ||
3456 | static int binder_proc_show(struct seq_file *m, void *unused) | 3456 | static int binder_proc_show(struct seq_file *m, void *unused) |
3457 | { | 3457 | { |
3458 | struct binder_proc *proc = m->private; | 3458 | struct binder_proc *proc = m->private; |
3459 | int do_lock = !binder_debug_no_lock; | 3459 | int do_lock = !binder_debug_no_lock; |
3460 | 3460 | ||
3461 | if (do_lock) | 3461 | if (do_lock) |
3462 | binder_lock(__func__); | 3462 | binder_lock(__func__); |
3463 | seq_puts(m, "binder proc state:\n"); | 3463 | seq_puts(m, "binder proc state:\n"); |
3464 | print_binder_proc(m, proc, 1); | 3464 | print_binder_proc(m, proc, 1); |
3465 | if (do_lock) | 3465 | if (do_lock) |
3466 | binder_unlock(__func__); | 3466 | binder_unlock(__func__); |
3467 | return 0; | 3467 | return 0; |
3468 | } | 3468 | } |
3469 | 3469 | ||
3470 | static void print_binder_transaction_log_entry(struct seq_file *m, | 3470 | static void print_binder_transaction_log_entry(struct seq_file *m, |
3471 | struct binder_transaction_log_entry *e) | 3471 | struct binder_transaction_log_entry *e) |
3472 | { | 3472 | { |
3473 | seq_printf(m, | 3473 | seq_printf(m, |
3474 | "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n", | 3474 | "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n", |
3475 | e->debug_id, (e->call_type == 2) ? "reply" : | 3475 | e->debug_id, (e->call_type == 2) ? "reply" : |
3476 | ((e->call_type == 1) ? "async" : "call "), e->from_proc, | 3476 | ((e->call_type == 1) ? "async" : "call "), e->from_proc, |
3477 | e->from_thread, e->to_proc, e->to_thread, e->to_node, | 3477 | e->from_thread, e->to_proc, e->to_thread, e->to_node, |
3478 | e->target_handle, e->data_size, e->offsets_size); | 3478 | e->target_handle, e->data_size, e->offsets_size); |
3479 | } | 3479 | } |
3480 | 3480 | ||
3481 | static int binder_transaction_log_show(struct seq_file *m, void *unused) | 3481 | static int binder_transaction_log_show(struct seq_file *m, void *unused) |
3482 | { | 3482 | { |
3483 | struct binder_transaction_log *log = m->private; | 3483 | struct binder_transaction_log *log = m->private; |
3484 | int i; | 3484 | int i; |
3485 | 3485 | ||
3486 | if (log->full) { | 3486 | if (log->full) { |
3487 | for (i = log->next; i < ARRAY_SIZE(log->entry); i++) | 3487 | for (i = log->next; i < ARRAY_SIZE(log->entry); i++) |
3488 | print_binder_transaction_log_entry(m, &log->entry[i]); | 3488 | print_binder_transaction_log_entry(m, &log->entry[i]); |
3489 | } | 3489 | } |
3490 | for (i = 0; i < log->next; i++) | 3490 | for (i = 0; i < log->next; i++) |
3491 | print_binder_transaction_log_entry(m, &log->entry[i]); | 3491 | print_binder_transaction_log_entry(m, &log->entry[i]); |
3492 | return 0; | 3492 | return 0; |
3493 | } | 3493 | } |
3494 | 3494 | ||
3495 | static const struct file_operations binder_fops = { | 3495 | static const struct file_operations binder_fops = { |
3496 | .owner = THIS_MODULE, | 3496 | .owner = THIS_MODULE, |
3497 | .poll = binder_poll, | 3497 | .poll = binder_poll, |
3498 | .unlocked_ioctl = binder_ioctl, | 3498 | .unlocked_ioctl = binder_ioctl, |
3499 | .mmap = binder_mmap, | 3499 | .mmap = binder_mmap, |
3500 | .open = binder_open, | 3500 | .open = binder_open, |
3501 | .flush = binder_flush, | 3501 | .flush = binder_flush, |
3502 | .release = binder_release, | 3502 | .release = binder_release, |
3503 | }; | 3503 | }; |
3504 | 3504 | ||
3505 | static struct miscdevice binder_miscdev = { | 3505 | static struct miscdevice binder_miscdev = { |
3506 | .minor = MISC_DYNAMIC_MINOR, | 3506 | .minor = MISC_DYNAMIC_MINOR, |
3507 | .name = "binder", | 3507 | .name = "binder", |
3508 | .fops = &binder_fops | 3508 | .fops = &binder_fops |
3509 | }; | 3509 | }; |
3510 | 3510 | ||
3511 | BINDER_DEBUG_ENTRY(state); | 3511 | BINDER_DEBUG_ENTRY(state); |
3512 | BINDER_DEBUG_ENTRY(stats); | 3512 | BINDER_DEBUG_ENTRY(stats); |
3513 | BINDER_DEBUG_ENTRY(transactions); | 3513 | BINDER_DEBUG_ENTRY(transactions); |
3514 | BINDER_DEBUG_ENTRY(transaction_log); | 3514 | BINDER_DEBUG_ENTRY(transaction_log); |
3515 | 3515 | ||
3516 | static int __init binder_init(void) | 3516 | static int __init binder_init(void) |
3517 | { | 3517 | { |
3518 | int ret; | 3518 | int ret; |
3519 | 3519 | ||
3520 | binder_deferred_workqueue = create_singlethread_workqueue("binder"); | 3520 | binder_deferred_workqueue = create_singlethread_workqueue("binder"); |
3521 | if (!binder_deferred_workqueue) | 3521 | if (!binder_deferred_workqueue) |
3522 | return -ENOMEM; | 3522 | return -ENOMEM; |
3523 | 3523 | ||
3524 | binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); | 3524 | binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); |
3525 | if (binder_debugfs_dir_entry_root) | 3525 | if (binder_debugfs_dir_entry_root) |
3526 | binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", | 3526 | binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", |
3527 | binder_debugfs_dir_entry_root); | 3527 | binder_debugfs_dir_entry_root); |
3528 | ret = misc_register(&binder_miscdev); | 3528 | ret = misc_register(&binder_miscdev); |
3529 | if (binder_debugfs_dir_entry_root) { | 3529 | if (binder_debugfs_dir_entry_root) { |
3530 | debugfs_create_file("state", | 3530 | debugfs_create_file("state", |
3531 | S_IRUGO, | 3531 | S_IRUGO, |
3532 | binder_debugfs_dir_entry_root, | 3532 | binder_debugfs_dir_entry_root, |
3533 | NULL, | 3533 | NULL, |
3534 | &binder_state_fops); | 3534 | &binder_state_fops); |
3535 | debugfs_create_file("stats", | 3535 | debugfs_create_file("stats", |
3536 | S_IRUGO, | 3536 | S_IRUGO, |
3537 | binder_debugfs_dir_entry_root, | 3537 | binder_debugfs_dir_entry_root, |
3538 | NULL, | 3538 | NULL, |
3539 | &binder_stats_fops); | 3539 | &binder_stats_fops); |
3540 | debugfs_create_file("transactions", | 3540 | debugfs_create_file("transactions", |
3541 | S_IRUGO, | 3541 | S_IRUGO, |
3542 | binder_debugfs_dir_entry_root, | 3542 | binder_debugfs_dir_entry_root, |
3543 | NULL, | 3543 | NULL, |
3544 | &binder_transactions_fops); | 3544 | &binder_transactions_fops); |
3545 | debugfs_create_file("transaction_log", | 3545 | debugfs_create_file("transaction_log", |
3546 | S_IRUGO, | 3546 | S_IRUGO, |
3547 | binder_debugfs_dir_entry_root, | 3547 | binder_debugfs_dir_entry_root, |
3548 | &binder_transaction_log, | 3548 | &binder_transaction_log, |
3549 | &binder_transaction_log_fops); | 3549 | &binder_transaction_log_fops); |
3550 | debugfs_create_file("failed_transaction_log", | 3550 | debugfs_create_file("failed_transaction_log", |
3551 | S_IRUGO, | 3551 | S_IRUGO, |
3552 | binder_debugfs_dir_entry_root, | 3552 | binder_debugfs_dir_entry_root, |
3553 | &binder_transaction_log_failed, | 3553 | &binder_transaction_log_failed, |
3554 | &binder_transaction_log_fops); | 3554 | &binder_transaction_log_fops); |
3555 | } | 3555 | } |
3556 | return ret; | 3556 | return ret; |
3557 | } | 3557 | } |
3558 | 3558 | ||
3559 | device_initcall(binder_init); | 3559 | device_initcall(binder_init); |
3560 | 3560 | ||
3561 | #define CREATE_TRACE_POINTS | 3561 | #define CREATE_TRACE_POINTS |
3562 | #include "binder_trace.h" | 3562 | #include "binder_trace.h" |
3563 | 3563 | ||
3564 | MODULE_LICENSE("GPL v2"); | 3564 | MODULE_LICENSE("GPL v2"); |
3565 | 3565 |
drivers/staging/android/binder.h
1 | /* | 1 | /* |
2 | * Copyright (C) 2008 Google, Inc. | 2 | * Copyright (C) 2008 Google, Inc. |
3 | * | 3 | * |
4 | * Based on, but no longer compatible with, the original | 4 | * Based on, but no longer compatible with, the original |
5 | * OpenBinder.org binder driver interface, which is: | 5 | * OpenBinder.org binder driver interface, which is: |
6 | * | 6 | * |
7 | * Copyright (c) 2005 Palmsource, Inc. | 7 | * Copyright (c) 2005 Palmsource, Inc. |
8 | * | 8 | * |
9 | * This software is licensed under the terms of the GNU General Public | 9 | * This software is licensed under the terms of the GNU General Public |
10 | * License version 2, as published by the Free Software Foundation, and | 10 | * License version 2, as published by the Free Software Foundation, and |
11 | * may be copied, distributed, and modified under those terms. | 11 | * may be copied, distributed, and modified under those terms. |
12 | * | 12 | * |
13 | * This program is distributed in the hope that it will be useful, | 13 | * This program is distributed in the hope that it will be useful, |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
16 | * GNU General Public License for more details. | 16 | * GNU General Public License for more details. |
17 | * | 17 | * |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #ifndef _LINUX_BINDER_H | 20 | #ifndef _LINUX_BINDER_H |
21 | #define _LINUX_BINDER_H | 21 | #define _LINUX_BINDER_H |
22 | 22 | ||
23 | #include <linux/ioctl.h> | 23 | #include <linux/ioctl.h> |
24 | 24 | ||
25 | #define B_PACK_CHARS(c1, c2, c3, c4) \ | 25 | #define B_PACK_CHARS(c1, c2, c3, c4) \ |
26 | ((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4)) | 26 | ((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4)) |
27 | #define B_TYPE_LARGE 0x85 | 27 | #define B_TYPE_LARGE 0x85 |
28 | 28 | ||
29 | enum { | 29 | enum { |
30 | BINDER_TYPE_BINDER = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE), | 30 | BINDER_TYPE_BINDER = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE), |
31 | BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE), | 31 | BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE), |
32 | BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE), | 32 | BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE), |
33 | BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE), | 33 | BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE), |
34 | BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE), | 34 | BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE), |
35 | }; | 35 | }; |
36 | 36 | ||
37 | enum { | 37 | enum { |
38 | FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff, | 38 | FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff, |
39 | FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100, | 39 | FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100, |
40 | }; | 40 | }; |
41 | 41 | ||
42 | /* | 42 | /* |
43 | * This is the flattened representation of a Binder object for transfer | 43 | * This is the flattened representation of a Binder object for transfer |
44 | * between processes. The 'offsets' supplied as part of a binder transaction | 44 | * between processes. The 'offsets' supplied as part of a binder transaction |
45 | * contains offsets into the data where these structures occur. The Binder | 45 | * contains offsets into the data where these structures occur. The Binder |
46 | * driver takes care of re-writing the structure type and data as it moves | 46 | * driver takes care of re-writing the structure type and data as it moves |
47 | * between processes. | 47 | * between processes. |
48 | */ | 48 | */ |
49 | struct flat_binder_object { | 49 | struct flat_binder_object { |
50 | /* 8 bytes for large_flat_header. */ | 50 | /* 8 bytes for large_flat_header. */ |
51 | unsigned long type; | 51 | __u32 type; |
52 | unsigned long flags; | 52 | __u32 flags; |
53 | 53 | ||
54 | /* 8 bytes of data. */ | 54 | /* 8 bytes of data. */ |
55 | union { | 55 | union { |
56 | void __user *binder; /* local object */ | 56 | void __user *binder; /* local object */ |
57 | signed long handle; /* remote object */ | 57 | __u32 handle; /* remote object */ |
58 | }; | 58 | }; |
59 | 59 | ||
60 | /* extra data associated with local object */ | 60 | /* extra data associated with local object */ |
61 | void __user *cookie; | 61 | void __user *cookie; |
62 | }; | 62 | }; |
63 | 63 | ||
64 | /* | 64 | /* |
65 | * On 64-bit platforms where user code may run in 32-bits the driver must | 65 | * On 64-bit platforms where user code may run in 32-bits the driver must |
66 | * translate the buffer (and local binder) addresses appropriately. | 66 | * translate the buffer (and local binder) addresses appropriately. |
67 | */ | 67 | */ |
68 | 68 | ||
69 | struct binder_write_read { | 69 | struct binder_write_read { |
70 | size_t write_size; /* bytes to write */ | 70 | size_t write_size; /* bytes to write */ |
71 | size_t write_consumed; /* bytes consumed by driver */ | 71 | size_t write_consumed; /* bytes consumed by driver */ |
72 | unsigned long write_buffer; | 72 | unsigned long write_buffer; |
73 | size_t read_size; /* bytes to read */ | 73 | size_t read_size; /* bytes to read */ |
74 | size_t read_consumed; /* bytes consumed by driver */ | 74 | size_t read_consumed; /* bytes consumed by driver */ |
75 | unsigned long read_buffer; | 75 | unsigned long read_buffer; |
76 | }; | 76 | }; |
77 | 77 | ||
78 | /* Use with BINDER_VERSION, driver fills in fields. */ | 78 | /* Use with BINDER_VERSION, driver fills in fields. */ |
79 | struct binder_version { | 79 | struct binder_version { |
80 | /* driver protocol version -- increment with incompatible change */ | 80 | /* driver protocol version -- increment with incompatible change */ |
81 | signed long protocol_version; | 81 | __s32 protocol_version; |
82 | }; | 82 | }; |
83 | 83 | ||
84 | /* This is the current protocol version. */ | 84 | /* This is the current protocol version. */ |
85 | #define BINDER_CURRENT_PROTOCOL_VERSION 7 | 85 | #define BINDER_CURRENT_PROTOCOL_VERSION 7 |
86 | 86 | ||
87 | #define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read) | 87 | #define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read) |
88 | #define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64) | 88 | #define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64) |
89 | #define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32) | 89 | #define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32) |
90 | #define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32) | 90 | #define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32) |
91 | #define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32) | 91 | #define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32) |
92 | #define BINDER_THREAD_EXIT _IOW('b', 8, __s32) | 92 | #define BINDER_THREAD_EXIT _IOW('b', 8, __s32) |
93 | #define BINDER_VERSION _IOWR('b', 9, struct binder_version) | 93 | #define BINDER_VERSION _IOWR('b', 9, struct binder_version) |
94 | 94 | ||
95 | /* | 95 | /* |
96 | * NOTE: Two special error codes you should check for when calling | 96 | * NOTE: Two special error codes you should check for when calling |
97 | * in to the driver are: | 97 | * in to the driver are: |
98 | * | 98 | * |
99 | * EINTR -- The operation has been interupted. This should be | 99 | * EINTR -- The operation has been interupted. This should be |
100 | * handled by retrying the ioctl() until a different error code | 100 | * handled by retrying the ioctl() until a different error code |
101 | * is returned. | 101 | * is returned. |
102 | * | 102 | * |
103 | * ECONNREFUSED -- The driver is no longer accepting operations | 103 | * ECONNREFUSED -- The driver is no longer accepting operations |
104 | * from your process. That is, the process is being destroyed. | 104 | * from your process. That is, the process is being destroyed. |
105 | * You should handle this by exiting from your process. Note | 105 | * You should handle this by exiting from your process. Note |
106 | * that once this error code is returned, all further calls to | 106 | * that once this error code is returned, all further calls to |
107 | * the driver from any thread will return this same code. | 107 | * the driver from any thread will return this same code. |
108 | */ | 108 | */ |
109 | 109 | ||
110 | enum transaction_flags { | 110 | enum transaction_flags { |
111 | TF_ONE_WAY = 0x01, /* this is a one-way call: async, no return */ | 111 | TF_ONE_WAY = 0x01, /* this is a one-way call: async, no return */ |
112 | TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */ | 112 | TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */ |
113 | TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */ | 113 | TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */ |
114 | TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */ | 114 | TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */ |
115 | }; | 115 | }; |
116 | 116 | ||
117 | struct binder_transaction_data { | 117 | struct binder_transaction_data { |
118 | /* The first two are only used for bcTRANSACTION and brTRANSACTION, | 118 | /* The first two are only used for bcTRANSACTION and brTRANSACTION, |
119 | * identifying the target and contents of the transaction. | 119 | * identifying the target and contents of the transaction. |
120 | */ | 120 | */ |
121 | union { | 121 | union { |
122 | size_t handle; /* target descriptor of command transaction */ | 122 | __u32 handle; /* target descriptor of command transaction */ |
123 | void *ptr; /* target descriptor of return transaction */ | 123 | void *ptr; /* target descriptor of return transaction */ |
124 | } target; | 124 | } target; |
125 | void *cookie; /* target object cookie */ | 125 | void *cookie; /* target object cookie */ |
126 | __u32 code; /* transaction command */ | 126 | __u32 code; /* transaction command */ |
127 | 127 | ||
128 | /* General information about the transaction. */ | 128 | /* General information about the transaction. */ |
129 | __u32 flags; | 129 | __u32 flags; |
130 | pid_t sender_pid; | 130 | pid_t sender_pid; |
131 | uid_t sender_euid; | 131 | uid_t sender_euid; |
132 | size_t data_size; /* number of bytes of data */ | 132 | size_t data_size; /* number of bytes of data */ |
133 | size_t offsets_size; /* number of bytes of offsets */ | 133 | size_t offsets_size; /* number of bytes of offsets */ |
134 | 134 | ||
135 | /* If this transaction is inline, the data immediately | 135 | /* If this transaction is inline, the data immediately |
136 | * follows here; otherwise, it ends with a pointer to | 136 | * follows here; otherwise, it ends with a pointer to |
137 | * the data buffer. | 137 | * the data buffer. |
138 | */ | 138 | */ |
139 | union { | 139 | union { |
140 | struct { | 140 | struct { |
141 | /* transaction data */ | 141 | /* transaction data */ |
142 | const void __user *buffer; | 142 | const void __user *buffer; |
143 | /* offsets from buffer to flat_binder_object structs */ | 143 | /* offsets from buffer to flat_binder_object structs */ |
144 | const void __user *offsets; | 144 | const void __user *offsets; |
145 | } ptr; | 145 | } ptr; |
146 | __u8 buf[8]; | 146 | __u8 buf[8]; |
147 | } data; | 147 | } data; |
148 | }; | 148 | }; |
149 | 149 | ||
150 | struct binder_ptr_cookie { | 150 | struct binder_ptr_cookie { |
151 | void *ptr; | 151 | void *ptr; |
152 | void *cookie; | 152 | void *cookie; |
153 | }; | 153 | }; |
154 | 154 | ||
155 | struct binder_pri_desc { | 155 | struct binder_pri_desc { |
156 | __s32 priority; | 156 | __s32 priority; |
157 | __s32 desc; | 157 | __u32 desc; |
158 | }; | 158 | }; |
159 | 159 | ||
160 | struct binder_pri_ptr_cookie { | 160 | struct binder_pri_ptr_cookie { |
161 | __s32 priority; | 161 | __s32 priority; |
162 | void *ptr; | 162 | void *ptr; |
163 | void *cookie; | 163 | void *cookie; |
164 | }; | 164 | }; |
165 | 165 | ||
166 | enum binder_driver_return_protocol { | 166 | enum binder_driver_return_protocol { |
167 | BR_ERROR = _IOR('r', 0, __s32), | 167 | BR_ERROR = _IOR('r', 0, __s32), |
168 | /* | 168 | /* |
169 | * int: error code | 169 | * int: error code |
170 | */ | 170 | */ |
171 | 171 | ||
172 | BR_OK = _IO('r', 1), | 172 | BR_OK = _IO('r', 1), |
173 | /* No parameters! */ | 173 | /* No parameters! */ |
174 | 174 | ||
175 | BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data), | 175 | BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data), |
176 | BR_REPLY = _IOR('r', 3, struct binder_transaction_data), | 176 | BR_REPLY = _IOR('r', 3, struct binder_transaction_data), |
177 | /* | 177 | /* |
178 | * binder_transaction_data: the received command. | 178 | * binder_transaction_data: the received command. |
179 | */ | 179 | */ |
180 | 180 | ||
181 | BR_ACQUIRE_RESULT = _IOR('r', 4, __s32), | 181 | BR_ACQUIRE_RESULT = _IOR('r', 4, __s32), |
182 | /* | 182 | /* |
183 | * not currently supported | 183 | * not currently supported |
184 | * int: 0 if the last bcATTEMPT_ACQUIRE was not successful. | 184 | * int: 0 if the last bcATTEMPT_ACQUIRE was not successful. |
185 | * Else the remote object has acquired a primary reference. | 185 | * Else the remote object has acquired a primary reference. |
186 | */ | 186 | */ |
187 | 187 | ||
188 | BR_DEAD_REPLY = _IO('r', 5), | 188 | BR_DEAD_REPLY = _IO('r', 5), |
189 | /* | 189 | /* |
190 | * The target of the last transaction (either a bcTRANSACTION or | 190 | * The target of the last transaction (either a bcTRANSACTION or |
191 | * a bcATTEMPT_ACQUIRE) is no longer with us. No parameters. | 191 | * a bcATTEMPT_ACQUIRE) is no longer with us. No parameters. |
192 | */ | 192 | */ |
193 | 193 | ||
194 | BR_TRANSACTION_COMPLETE = _IO('r', 6), | 194 | BR_TRANSACTION_COMPLETE = _IO('r', 6), |
195 | /* | 195 | /* |
196 | * No parameters... always refers to the last transaction requested | 196 | * No parameters... always refers to the last transaction requested |
197 | * (including replies). Note that this will be sent even for | 197 | * (including replies). Note that this will be sent even for |
198 | * asynchronous transactions. | 198 | * asynchronous transactions. |
199 | */ | 199 | */ |
200 | 200 | ||
201 | BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie), | 201 | BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie), |
202 | BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie), | 202 | BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie), |
203 | BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie), | 203 | BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie), |
204 | BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie), | 204 | BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie), |
205 | /* | 205 | /* |
206 | * void *: ptr to binder | 206 | * void *: ptr to binder |
207 | * void *: cookie for binder | 207 | * void *: cookie for binder |
208 | */ | 208 | */ |
209 | 209 | ||
210 | BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie), | 210 | BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie), |
211 | /* | 211 | /* |
212 | * not currently supported | 212 | * not currently supported |
213 | * int: priority | 213 | * int: priority |
214 | * void *: ptr to binder | 214 | * void *: ptr to binder |
215 | * void *: cookie for binder | 215 | * void *: cookie for binder |
216 | */ | 216 | */ |
217 | 217 | ||
218 | BR_NOOP = _IO('r', 12), | 218 | BR_NOOP = _IO('r', 12), |
219 | /* | 219 | /* |
220 | * No parameters. Do nothing and examine the next command. It exists | 220 | * No parameters. Do nothing and examine the next command. It exists |
221 | * primarily so that we can replace it with a BR_SPAWN_LOOPER command. | 221 | * primarily so that we can replace it with a BR_SPAWN_LOOPER command. |
222 | */ | 222 | */ |
223 | 223 | ||
224 | BR_SPAWN_LOOPER = _IO('r', 13), | 224 | BR_SPAWN_LOOPER = _IO('r', 13), |
225 | /* | 225 | /* |
226 | * No parameters. The driver has determined that a process has no | 226 | * No parameters. The driver has determined that a process has no |
227 | * threads waiting to service incoming transactions. When a process | 227 | * threads waiting to service incoming transactions. When a process |
228 | * receives this command, it must spawn a new service thread and | 228 | * receives this command, it must spawn a new service thread and |
229 | * register it via bcENTER_LOOPER. | 229 | * register it via bcENTER_LOOPER. |
230 | */ | 230 | */ |
231 | 231 | ||
232 | BR_FINISHED = _IO('r', 14), | 232 | BR_FINISHED = _IO('r', 14), |
233 | /* | 233 | /* |
234 | * not currently supported | 234 | * not currently supported |
235 | * stop threadpool thread | 235 | * stop threadpool thread |
236 | */ | 236 | */ |
237 | 237 | ||
238 | BR_DEAD_BINDER = _IOR('r', 15, void *), | 238 | BR_DEAD_BINDER = _IOR('r', 15, void *), |
239 | /* | 239 | /* |
240 | * void *: cookie | 240 | * void *: cookie |
241 | */ | 241 | */ |
242 | BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, void *), | 242 | BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, void *), |
243 | /* | 243 | /* |
244 | * void *: cookie | 244 | * void *: cookie |
245 | */ | 245 | */ |
246 | 246 | ||
247 | BR_FAILED_REPLY = _IO('r', 17), | 247 | BR_FAILED_REPLY = _IO('r', 17), |
248 | /* | 248 | /* |
249 | * The the last transaction (either a bcTRANSACTION or | 249 | * The the last transaction (either a bcTRANSACTION or |
250 | * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters. | 250 | * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters. |
251 | */ | 251 | */ |
252 | }; | 252 | }; |
253 | 253 | ||
254 | enum binder_driver_command_protocol { | 254 | enum binder_driver_command_protocol { |
255 | BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data), | 255 | BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data), |
256 | BC_REPLY = _IOW('c', 1, struct binder_transaction_data), | 256 | BC_REPLY = _IOW('c', 1, struct binder_transaction_data), |
257 | /* | 257 | /* |
258 | * binder_transaction_data: the sent command. | 258 | * binder_transaction_data: the sent command. |
259 | */ | 259 | */ |
260 | 260 | ||
261 | BC_ACQUIRE_RESULT = _IOW('c', 2, __s32), | 261 | BC_ACQUIRE_RESULT = _IOW('c', 2, __s32), |
262 | /* | 262 | /* |
263 | * not currently supported | 263 | * not currently supported |
264 | * int: 0 if the last BR_ATTEMPT_ACQUIRE was not successful. | 264 | * int: 0 if the last BR_ATTEMPT_ACQUIRE was not successful. |
265 | * Else you have acquired a primary reference on the object. | 265 | * Else you have acquired a primary reference on the object. |
266 | */ | 266 | */ |
267 | 267 | ||
268 | BC_FREE_BUFFER = _IOW('c', 3, void *), | 268 | BC_FREE_BUFFER = _IOW('c', 3, void *), |
269 | /* | 269 | /* |
270 | * void *: ptr to transaction data received on a read | 270 | * void *: ptr to transaction data received on a read |
271 | */ | 271 | */ |
272 | 272 | ||
273 | BC_INCREFS = _IOW('c', 4, __u32), | 273 | BC_INCREFS = _IOW('c', 4, __u32), |
274 | BC_ACQUIRE = _IOW('c', 5, __u32), | 274 | BC_ACQUIRE = _IOW('c', 5, __u32), |
275 | BC_RELEASE = _IOW('c', 6, __u32), | 275 | BC_RELEASE = _IOW('c', 6, __u32), |
276 | BC_DECREFS = _IOW('c', 7, __u32), | 276 | BC_DECREFS = _IOW('c', 7, __u32), |
277 | /* | 277 | /* |
278 | * int: descriptor | 278 | * int: descriptor |
279 | */ | 279 | */ |
280 | 280 | ||
281 | BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie), | 281 | BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie), |
282 | BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie), | 282 | BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie), |
283 | /* | 283 | /* |
284 | * void *: ptr to binder | 284 | * void *: ptr to binder |
285 | * void *: cookie for binder | 285 | * void *: cookie for binder |
286 | */ | 286 | */ |
287 | 287 | ||
288 | BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc), | 288 | BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc), |
289 | /* | 289 | /* |
290 | * not currently supported | 290 | * not currently supported |
291 | * int: priority | 291 | * int: priority |
292 | * int: descriptor | 292 | * int: descriptor |
293 | */ | 293 | */ |
294 | 294 | ||
295 | BC_REGISTER_LOOPER = _IO('c', 11), | 295 | BC_REGISTER_LOOPER = _IO('c', 11), |
296 | /* | 296 | /* |
297 | * No parameters. | 297 | * No parameters. |
298 | * Register a spawned looper thread with the device. | 298 | * Register a spawned looper thread with the device. |
299 | */ | 299 | */ |
300 | 300 | ||
301 | BC_ENTER_LOOPER = _IO('c', 12), | 301 | BC_ENTER_LOOPER = _IO('c', 12), |
302 | BC_EXIT_LOOPER = _IO('c', 13), | 302 | BC_EXIT_LOOPER = _IO('c', 13), |
303 | /* | 303 | /* |
304 | * No parameters. | 304 | * No parameters. |
305 | * These two commands are sent as an application-level thread | 305 | * These two commands are sent as an application-level thread |
306 | * enters and exits the binder loop, respectively. They are | 306 | * enters and exits the binder loop, respectively. They are |
307 | * used so the binder can have an accurate count of the number | 307 | * used so the binder can have an accurate count of the number |
308 | * of looping threads it has available. | 308 | * of looping threads it has available. |
309 | */ | 309 | */ |
310 | 310 | ||
311 | BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, struct binder_ptr_cookie), | 311 | BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, struct binder_ptr_cookie), |
312 | /* | 312 | /* |
313 | * void *: ptr to binder | 313 | * void *: ptr to binder |
314 | * void *: cookie | 314 | * void *: cookie |
315 | */ | 315 | */ |
316 | 316 | ||
317 | BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, struct binder_ptr_cookie), | 317 | BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, struct binder_ptr_cookie), |
318 | /* | 318 | /* |
319 | * void *: ptr to binder | 319 | * void *: ptr to binder |
320 | * void *: cookie | 320 | * void *: cookie |
321 | */ | 321 | */ |
322 | 322 | ||
323 | BC_DEAD_BINDER_DONE = _IOW('c', 16, void *), | 323 | BC_DEAD_BINDER_DONE = _IOW('c', 16, void *), |
324 | /* | 324 | /* |
325 | * void *: cookie | 325 | * void *: cookie |
326 | */ | 326 | */ |
327 | }; | 327 | }; |
328 | 328 | ||
329 | #endif /* _LINUX_BINDER_H */ | 329 | #endif /* _LINUX_BINDER_H */ |
330 | 330 | ||
331 | 331 |