Commit 736288ba5016e255869c26296014eeff649971c2
1 parent
bdf8647c44
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
uprobes/perf: Teach trace_uprobe/perf code to track the active perf_event's
Introduce "struct trace_uprobe_filter" which records the "active" perf_event's attached to ftrace_event_call. For the start we simply use list_head, we can optimize this later if needed. For example, we do not really need to record an event with ->parent != NULL, we can rely on parent->child_list. And we can certainly do some optimizations for the case when 2 events have the same ->tp_target or tp_target->mm. Change trace_uprobe_register() to process TRACE_REG_PERF_OPEN/CLOSE and add/del this perf_event to the list. We can probably avoid any locking, but lets start with the "obvioulsy correct" trace_uprobe_filter->rwlock which protects everything. Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Showing 1 changed file with 55 additions and 0 deletions Inline Diff
kernel/trace/trace_uprobe.c
1 | /* | 1 | /* |
2 | * uprobes-based tracing events | 2 | * uprobes-based tracing events |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License version 2 as | 5 | * it under the terms of the GNU General Public License version 2 as |
6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation. |
7 | * | 7 | * |
8 | * This program is distributed in the hope that it will be useful, | 8 | * This program is distributed in the hope that it will be useful, |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
11 | * GNU General Public License for more details. | 11 | * GNU General Public License for more details. |
12 | * | 12 | * |
13 | * You should have received a copy of the GNU General Public License | 13 | * You should have received a copy of the GNU General Public License |
14 | * along with this program; if not, write to the Free Software | 14 | * along with this program; if not, write to the Free Software |
15 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 15 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
16 | * | 16 | * |
17 | * Copyright (C) IBM Corporation, 2010-2012 | 17 | * Copyright (C) IBM Corporation, 2010-2012 |
18 | * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com> | 18 | * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com> |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/uaccess.h> | 22 | #include <linux/uaccess.h> |
23 | #include <linux/uprobes.h> | 23 | #include <linux/uprobes.h> |
24 | #include <linux/namei.h> | 24 | #include <linux/namei.h> |
25 | #include <linux/string.h> | 25 | #include <linux/string.h> |
26 | 26 | ||
27 | #include "trace_probe.h" | 27 | #include "trace_probe.h" |
28 | 28 | ||
29 | #define UPROBE_EVENT_SYSTEM "uprobes" | 29 | #define UPROBE_EVENT_SYSTEM "uprobes" |
30 | 30 | ||
31 | struct trace_uprobe_filter { | ||
32 | rwlock_t rwlock; | ||
33 | int nr_systemwide; | ||
34 | struct list_head perf_events; | ||
35 | }; | ||
36 | |||
31 | /* | 37 | /* |
32 | * uprobe event core functions | 38 | * uprobe event core functions |
33 | */ | 39 | */ |
34 | struct trace_uprobe { | 40 | struct trace_uprobe { |
35 | struct list_head list; | 41 | struct list_head list; |
36 | struct ftrace_event_class class; | 42 | struct ftrace_event_class class; |
37 | struct ftrace_event_call call; | 43 | struct ftrace_event_call call; |
44 | struct trace_uprobe_filter filter; | ||
38 | struct uprobe_consumer consumer; | 45 | struct uprobe_consumer consumer; |
39 | struct inode *inode; | 46 | struct inode *inode; |
40 | char *filename; | 47 | char *filename; |
41 | unsigned long offset; | 48 | unsigned long offset; |
42 | unsigned long nhit; | 49 | unsigned long nhit; |
43 | unsigned int flags; /* For TP_FLAG_* */ | 50 | unsigned int flags; /* For TP_FLAG_* */ |
44 | ssize_t size; /* trace entry size */ | 51 | ssize_t size; /* trace entry size */ |
45 | unsigned int nr_args; | 52 | unsigned int nr_args; |
46 | struct probe_arg args[]; | 53 | struct probe_arg args[]; |
47 | }; | 54 | }; |
48 | 55 | ||
49 | #define SIZEOF_TRACE_UPROBE(n) \ | 56 | #define SIZEOF_TRACE_UPROBE(n) \ |
50 | (offsetof(struct trace_uprobe, args) + \ | 57 | (offsetof(struct trace_uprobe, args) + \ |
51 | (sizeof(struct probe_arg) * (n))) | 58 | (sizeof(struct probe_arg) * (n))) |
52 | 59 | ||
53 | static int register_uprobe_event(struct trace_uprobe *tu); | 60 | static int register_uprobe_event(struct trace_uprobe *tu); |
54 | static void unregister_uprobe_event(struct trace_uprobe *tu); | 61 | static void unregister_uprobe_event(struct trace_uprobe *tu); |
55 | 62 | ||
56 | static DEFINE_MUTEX(uprobe_lock); | 63 | static DEFINE_MUTEX(uprobe_lock); |
57 | static LIST_HEAD(uprobe_list); | 64 | static LIST_HEAD(uprobe_list); |
58 | 65 | ||
59 | static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs); | 66 | static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs); |
60 | 67 | ||
68 | static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter) | ||
69 | { | ||
70 | rwlock_init(&filter->rwlock); | ||
71 | filter->nr_systemwide = 0; | ||
72 | INIT_LIST_HEAD(&filter->perf_events); | ||
73 | } | ||
74 | |||
75 | static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter) | ||
76 | { | ||
77 | return !filter->nr_systemwide && list_empty(&filter->perf_events); | ||
78 | } | ||
79 | |||
61 | /* | 80 | /* |
62 | * Allocate new trace_uprobe and initialize it (including uprobes). | 81 | * Allocate new trace_uprobe and initialize it (including uprobes). |
63 | */ | 82 | */ |
64 | static struct trace_uprobe * | 83 | static struct trace_uprobe * |
65 | alloc_trace_uprobe(const char *group, const char *event, int nargs) | 84 | alloc_trace_uprobe(const char *group, const char *event, int nargs) |
66 | { | 85 | { |
67 | struct trace_uprobe *tu; | 86 | struct trace_uprobe *tu; |
68 | 87 | ||
69 | if (!event || !is_good_name(event)) | 88 | if (!event || !is_good_name(event)) |
70 | return ERR_PTR(-EINVAL); | 89 | return ERR_PTR(-EINVAL); |
71 | 90 | ||
72 | if (!group || !is_good_name(group)) | 91 | if (!group || !is_good_name(group)) |
73 | return ERR_PTR(-EINVAL); | 92 | return ERR_PTR(-EINVAL); |
74 | 93 | ||
75 | tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL); | 94 | tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL); |
76 | if (!tu) | 95 | if (!tu) |
77 | return ERR_PTR(-ENOMEM); | 96 | return ERR_PTR(-ENOMEM); |
78 | 97 | ||
79 | tu->call.class = &tu->class; | 98 | tu->call.class = &tu->class; |
80 | tu->call.name = kstrdup(event, GFP_KERNEL); | 99 | tu->call.name = kstrdup(event, GFP_KERNEL); |
81 | if (!tu->call.name) | 100 | if (!tu->call.name) |
82 | goto error; | 101 | goto error; |
83 | 102 | ||
84 | tu->class.system = kstrdup(group, GFP_KERNEL); | 103 | tu->class.system = kstrdup(group, GFP_KERNEL); |
85 | if (!tu->class.system) | 104 | if (!tu->class.system) |
86 | goto error; | 105 | goto error; |
87 | 106 | ||
88 | INIT_LIST_HEAD(&tu->list); | 107 | INIT_LIST_HEAD(&tu->list); |
89 | tu->consumer.handler = uprobe_dispatcher; | 108 | tu->consumer.handler = uprobe_dispatcher; |
109 | init_trace_uprobe_filter(&tu->filter); | ||
90 | return tu; | 110 | return tu; |
91 | 111 | ||
92 | error: | 112 | error: |
93 | kfree(tu->call.name); | 113 | kfree(tu->call.name); |
94 | kfree(tu); | 114 | kfree(tu); |
95 | 115 | ||
96 | return ERR_PTR(-ENOMEM); | 116 | return ERR_PTR(-ENOMEM); |
97 | } | 117 | } |
98 | 118 | ||
99 | static void free_trace_uprobe(struct trace_uprobe *tu) | 119 | static void free_trace_uprobe(struct trace_uprobe *tu) |
100 | { | 120 | { |
101 | int i; | 121 | int i; |
102 | 122 | ||
103 | for (i = 0; i < tu->nr_args; i++) | 123 | for (i = 0; i < tu->nr_args; i++) |
104 | traceprobe_free_probe_arg(&tu->args[i]); | 124 | traceprobe_free_probe_arg(&tu->args[i]); |
105 | 125 | ||
106 | iput(tu->inode); | 126 | iput(tu->inode); |
107 | kfree(tu->call.class->system); | 127 | kfree(tu->call.class->system); |
108 | kfree(tu->call.name); | 128 | kfree(tu->call.name); |
109 | kfree(tu->filename); | 129 | kfree(tu->filename); |
110 | kfree(tu); | 130 | kfree(tu); |
111 | } | 131 | } |
112 | 132 | ||
113 | static struct trace_uprobe *find_probe_event(const char *event, const char *group) | 133 | static struct trace_uprobe *find_probe_event(const char *event, const char *group) |
114 | { | 134 | { |
115 | struct trace_uprobe *tu; | 135 | struct trace_uprobe *tu; |
116 | 136 | ||
117 | list_for_each_entry(tu, &uprobe_list, list) | 137 | list_for_each_entry(tu, &uprobe_list, list) |
118 | if (strcmp(tu->call.name, event) == 0 && | 138 | if (strcmp(tu->call.name, event) == 0 && |
119 | strcmp(tu->call.class->system, group) == 0) | 139 | strcmp(tu->call.class->system, group) == 0) |
120 | return tu; | 140 | return tu; |
121 | 141 | ||
122 | return NULL; | 142 | return NULL; |
123 | } | 143 | } |
124 | 144 | ||
125 | /* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */ | 145 | /* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */ |
126 | static void unregister_trace_uprobe(struct trace_uprobe *tu) | 146 | static void unregister_trace_uprobe(struct trace_uprobe *tu) |
127 | { | 147 | { |
128 | list_del(&tu->list); | 148 | list_del(&tu->list); |
129 | unregister_uprobe_event(tu); | 149 | unregister_uprobe_event(tu); |
130 | free_trace_uprobe(tu); | 150 | free_trace_uprobe(tu); |
131 | } | 151 | } |
132 | 152 | ||
133 | /* Register a trace_uprobe and probe_event */ | 153 | /* Register a trace_uprobe and probe_event */ |
134 | static int register_trace_uprobe(struct trace_uprobe *tu) | 154 | static int register_trace_uprobe(struct trace_uprobe *tu) |
135 | { | 155 | { |
136 | struct trace_uprobe *old_tp; | 156 | struct trace_uprobe *old_tp; |
137 | int ret; | 157 | int ret; |
138 | 158 | ||
139 | mutex_lock(&uprobe_lock); | 159 | mutex_lock(&uprobe_lock); |
140 | 160 | ||
141 | /* register as an event */ | 161 | /* register as an event */ |
142 | old_tp = find_probe_event(tu->call.name, tu->call.class->system); | 162 | old_tp = find_probe_event(tu->call.name, tu->call.class->system); |
143 | if (old_tp) | 163 | if (old_tp) |
144 | /* delete old event */ | 164 | /* delete old event */ |
145 | unregister_trace_uprobe(old_tp); | 165 | unregister_trace_uprobe(old_tp); |
146 | 166 | ||
147 | ret = register_uprobe_event(tu); | 167 | ret = register_uprobe_event(tu); |
148 | if (ret) { | 168 | if (ret) { |
149 | pr_warning("Failed to register probe event(%d)\n", ret); | 169 | pr_warning("Failed to register probe event(%d)\n", ret); |
150 | goto end; | 170 | goto end; |
151 | } | 171 | } |
152 | 172 | ||
153 | list_add_tail(&tu->list, &uprobe_list); | 173 | list_add_tail(&tu->list, &uprobe_list); |
154 | 174 | ||
155 | end: | 175 | end: |
156 | mutex_unlock(&uprobe_lock); | 176 | mutex_unlock(&uprobe_lock); |
157 | 177 | ||
158 | return ret; | 178 | return ret; |
159 | } | 179 | } |
160 | 180 | ||
161 | /* | 181 | /* |
162 | * Argument syntax: | 182 | * Argument syntax: |
163 | * - Add uprobe: p[:[GRP/]EVENT] PATH:SYMBOL[+offs] [FETCHARGS] | 183 | * - Add uprobe: p[:[GRP/]EVENT] PATH:SYMBOL[+offs] [FETCHARGS] |
164 | * | 184 | * |
165 | * - Remove uprobe: -:[GRP/]EVENT | 185 | * - Remove uprobe: -:[GRP/]EVENT |
166 | */ | 186 | */ |
167 | static int create_trace_uprobe(int argc, char **argv) | 187 | static int create_trace_uprobe(int argc, char **argv) |
168 | { | 188 | { |
169 | struct trace_uprobe *tu; | 189 | struct trace_uprobe *tu; |
170 | struct inode *inode; | 190 | struct inode *inode; |
171 | char *arg, *event, *group, *filename; | 191 | char *arg, *event, *group, *filename; |
172 | char buf[MAX_EVENT_NAME_LEN]; | 192 | char buf[MAX_EVENT_NAME_LEN]; |
173 | struct path path; | 193 | struct path path; |
174 | unsigned long offset; | 194 | unsigned long offset; |
175 | bool is_delete; | 195 | bool is_delete; |
176 | int i, ret; | 196 | int i, ret; |
177 | 197 | ||
178 | inode = NULL; | 198 | inode = NULL; |
179 | ret = 0; | 199 | ret = 0; |
180 | is_delete = false; | 200 | is_delete = false; |
181 | event = NULL; | 201 | event = NULL; |
182 | group = NULL; | 202 | group = NULL; |
183 | 203 | ||
184 | /* argc must be >= 1 */ | 204 | /* argc must be >= 1 */ |
185 | if (argv[0][0] == '-') | 205 | if (argv[0][0] == '-') |
186 | is_delete = true; | 206 | is_delete = true; |
187 | else if (argv[0][0] != 'p') { | 207 | else if (argv[0][0] != 'p') { |
188 | pr_info("Probe definition must be started with 'p' or '-'.\n"); | 208 | pr_info("Probe definition must be started with 'p' or '-'.\n"); |
189 | return -EINVAL; | 209 | return -EINVAL; |
190 | } | 210 | } |
191 | 211 | ||
192 | if (argv[0][1] == ':') { | 212 | if (argv[0][1] == ':') { |
193 | event = &argv[0][2]; | 213 | event = &argv[0][2]; |
194 | arg = strchr(event, '/'); | 214 | arg = strchr(event, '/'); |
195 | 215 | ||
196 | if (arg) { | 216 | if (arg) { |
197 | group = event; | 217 | group = event; |
198 | event = arg + 1; | 218 | event = arg + 1; |
199 | event[-1] = '\0'; | 219 | event[-1] = '\0'; |
200 | 220 | ||
201 | if (strlen(group) == 0) { | 221 | if (strlen(group) == 0) { |
202 | pr_info("Group name is not specified\n"); | 222 | pr_info("Group name is not specified\n"); |
203 | return -EINVAL; | 223 | return -EINVAL; |
204 | } | 224 | } |
205 | } | 225 | } |
206 | if (strlen(event) == 0) { | 226 | if (strlen(event) == 0) { |
207 | pr_info("Event name is not specified\n"); | 227 | pr_info("Event name is not specified\n"); |
208 | return -EINVAL; | 228 | return -EINVAL; |
209 | } | 229 | } |
210 | } | 230 | } |
211 | if (!group) | 231 | if (!group) |
212 | group = UPROBE_EVENT_SYSTEM; | 232 | group = UPROBE_EVENT_SYSTEM; |
213 | 233 | ||
214 | if (is_delete) { | 234 | if (is_delete) { |
215 | if (!event) { | 235 | if (!event) { |
216 | pr_info("Delete command needs an event name.\n"); | 236 | pr_info("Delete command needs an event name.\n"); |
217 | return -EINVAL; | 237 | return -EINVAL; |
218 | } | 238 | } |
219 | mutex_lock(&uprobe_lock); | 239 | mutex_lock(&uprobe_lock); |
220 | tu = find_probe_event(event, group); | 240 | tu = find_probe_event(event, group); |
221 | 241 | ||
222 | if (!tu) { | 242 | if (!tu) { |
223 | mutex_unlock(&uprobe_lock); | 243 | mutex_unlock(&uprobe_lock); |
224 | pr_info("Event %s/%s doesn't exist.\n", group, event); | 244 | pr_info("Event %s/%s doesn't exist.\n", group, event); |
225 | return -ENOENT; | 245 | return -ENOENT; |
226 | } | 246 | } |
227 | /* delete an event */ | 247 | /* delete an event */ |
228 | unregister_trace_uprobe(tu); | 248 | unregister_trace_uprobe(tu); |
229 | mutex_unlock(&uprobe_lock); | 249 | mutex_unlock(&uprobe_lock); |
230 | return 0; | 250 | return 0; |
231 | } | 251 | } |
232 | 252 | ||
233 | if (argc < 2) { | 253 | if (argc < 2) { |
234 | pr_info("Probe point is not specified.\n"); | 254 | pr_info("Probe point is not specified.\n"); |
235 | return -EINVAL; | 255 | return -EINVAL; |
236 | } | 256 | } |
237 | if (isdigit(argv[1][0])) { | 257 | if (isdigit(argv[1][0])) { |
238 | pr_info("probe point must be have a filename.\n"); | 258 | pr_info("probe point must be have a filename.\n"); |
239 | return -EINVAL; | 259 | return -EINVAL; |
240 | } | 260 | } |
241 | arg = strchr(argv[1], ':'); | 261 | arg = strchr(argv[1], ':'); |
242 | if (!arg) | 262 | if (!arg) |
243 | goto fail_address_parse; | 263 | goto fail_address_parse; |
244 | 264 | ||
245 | *arg++ = '\0'; | 265 | *arg++ = '\0'; |
246 | filename = argv[1]; | 266 | filename = argv[1]; |
247 | ret = kern_path(filename, LOOKUP_FOLLOW, &path); | 267 | ret = kern_path(filename, LOOKUP_FOLLOW, &path); |
248 | if (ret) | 268 | if (ret) |
249 | goto fail_address_parse; | 269 | goto fail_address_parse; |
250 | 270 | ||
251 | inode = igrab(path.dentry->d_inode); | 271 | inode = igrab(path.dentry->d_inode); |
252 | path_put(&path); | 272 | path_put(&path); |
253 | 273 | ||
254 | if (!inode || !S_ISREG(inode->i_mode)) { | 274 | if (!inode || !S_ISREG(inode->i_mode)) { |
255 | ret = -EINVAL; | 275 | ret = -EINVAL; |
256 | goto fail_address_parse; | 276 | goto fail_address_parse; |
257 | } | 277 | } |
258 | 278 | ||
259 | ret = kstrtoul(arg, 0, &offset); | 279 | ret = kstrtoul(arg, 0, &offset); |
260 | if (ret) | 280 | if (ret) |
261 | goto fail_address_parse; | 281 | goto fail_address_parse; |
262 | 282 | ||
263 | argc -= 2; | 283 | argc -= 2; |
264 | argv += 2; | 284 | argv += 2; |
265 | 285 | ||
266 | /* setup a probe */ | 286 | /* setup a probe */ |
267 | if (!event) { | 287 | if (!event) { |
268 | char *tail; | 288 | char *tail; |
269 | char *ptr; | 289 | char *ptr; |
270 | 290 | ||
271 | tail = kstrdup(kbasename(filename), GFP_KERNEL); | 291 | tail = kstrdup(kbasename(filename), GFP_KERNEL); |
272 | if (!tail) { | 292 | if (!tail) { |
273 | ret = -ENOMEM; | 293 | ret = -ENOMEM; |
274 | goto fail_address_parse; | 294 | goto fail_address_parse; |
275 | } | 295 | } |
276 | 296 | ||
277 | ptr = strpbrk(tail, ".-_"); | 297 | ptr = strpbrk(tail, ".-_"); |
278 | if (ptr) | 298 | if (ptr) |
279 | *ptr = '\0'; | 299 | *ptr = '\0'; |
280 | 300 | ||
281 | snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset); | 301 | snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset); |
282 | event = buf; | 302 | event = buf; |
283 | kfree(tail); | 303 | kfree(tail); |
284 | } | 304 | } |
285 | 305 | ||
286 | tu = alloc_trace_uprobe(group, event, argc); | 306 | tu = alloc_trace_uprobe(group, event, argc); |
287 | if (IS_ERR(tu)) { | 307 | if (IS_ERR(tu)) { |
288 | pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu)); | 308 | pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu)); |
289 | ret = PTR_ERR(tu); | 309 | ret = PTR_ERR(tu); |
290 | goto fail_address_parse; | 310 | goto fail_address_parse; |
291 | } | 311 | } |
292 | tu->offset = offset; | 312 | tu->offset = offset; |
293 | tu->inode = inode; | 313 | tu->inode = inode; |
294 | tu->filename = kstrdup(filename, GFP_KERNEL); | 314 | tu->filename = kstrdup(filename, GFP_KERNEL); |
295 | 315 | ||
296 | if (!tu->filename) { | 316 | if (!tu->filename) { |
297 | pr_info("Failed to allocate filename.\n"); | 317 | pr_info("Failed to allocate filename.\n"); |
298 | ret = -ENOMEM; | 318 | ret = -ENOMEM; |
299 | goto error; | 319 | goto error; |
300 | } | 320 | } |
301 | 321 | ||
302 | /* parse arguments */ | 322 | /* parse arguments */ |
303 | ret = 0; | 323 | ret = 0; |
304 | for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { | 324 | for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { |
305 | /* Increment count for freeing args in error case */ | 325 | /* Increment count for freeing args in error case */ |
306 | tu->nr_args++; | 326 | tu->nr_args++; |
307 | 327 | ||
308 | /* Parse argument name */ | 328 | /* Parse argument name */ |
309 | arg = strchr(argv[i], '='); | 329 | arg = strchr(argv[i], '='); |
310 | if (arg) { | 330 | if (arg) { |
311 | *arg++ = '\0'; | 331 | *arg++ = '\0'; |
312 | tu->args[i].name = kstrdup(argv[i], GFP_KERNEL); | 332 | tu->args[i].name = kstrdup(argv[i], GFP_KERNEL); |
313 | } else { | 333 | } else { |
314 | arg = argv[i]; | 334 | arg = argv[i]; |
315 | /* If argument name is omitted, set "argN" */ | 335 | /* If argument name is omitted, set "argN" */ |
316 | snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1); | 336 | snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1); |
317 | tu->args[i].name = kstrdup(buf, GFP_KERNEL); | 337 | tu->args[i].name = kstrdup(buf, GFP_KERNEL); |
318 | } | 338 | } |
319 | 339 | ||
320 | if (!tu->args[i].name) { | 340 | if (!tu->args[i].name) { |
321 | pr_info("Failed to allocate argument[%d] name.\n", i); | 341 | pr_info("Failed to allocate argument[%d] name.\n", i); |
322 | ret = -ENOMEM; | 342 | ret = -ENOMEM; |
323 | goto error; | 343 | goto error; |
324 | } | 344 | } |
325 | 345 | ||
326 | if (!is_good_name(tu->args[i].name)) { | 346 | if (!is_good_name(tu->args[i].name)) { |
327 | pr_info("Invalid argument[%d] name: %s\n", i, tu->args[i].name); | 347 | pr_info("Invalid argument[%d] name: %s\n", i, tu->args[i].name); |
328 | ret = -EINVAL; | 348 | ret = -EINVAL; |
329 | goto error; | 349 | goto error; |
330 | } | 350 | } |
331 | 351 | ||
332 | if (traceprobe_conflict_field_name(tu->args[i].name, tu->args, i)) { | 352 | if (traceprobe_conflict_field_name(tu->args[i].name, tu->args, i)) { |
333 | pr_info("Argument[%d] name '%s' conflicts with " | 353 | pr_info("Argument[%d] name '%s' conflicts with " |
334 | "another field.\n", i, argv[i]); | 354 | "another field.\n", i, argv[i]); |
335 | ret = -EINVAL; | 355 | ret = -EINVAL; |
336 | goto error; | 356 | goto error; |
337 | } | 357 | } |
338 | 358 | ||
339 | /* Parse fetch argument */ | 359 | /* Parse fetch argument */ |
340 | ret = traceprobe_parse_probe_arg(arg, &tu->size, &tu->args[i], false, false); | 360 | ret = traceprobe_parse_probe_arg(arg, &tu->size, &tu->args[i], false, false); |
341 | if (ret) { | 361 | if (ret) { |
342 | pr_info("Parse error at argument[%d]. (%d)\n", i, ret); | 362 | pr_info("Parse error at argument[%d]. (%d)\n", i, ret); |
343 | goto error; | 363 | goto error; |
344 | } | 364 | } |
345 | } | 365 | } |
346 | 366 | ||
347 | ret = register_trace_uprobe(tu); | 367 | ret = register_trace_uprobe(tu); |
348 | if (ret) | 368 | if (ret) |
349 | goto error; | 369 | goto error; |
350 | return 0; | 370 | return 0; |
351 | 371 | ||
352 | error: | 372 | error: |
353 | free_trace_uprobe(tu); | 373 | free_trace_uprobe(tu); |
354 | return ret; | 374 | return ret; |
355 | 375 | ||
356 | fail_address_parse: | 376 | fail_address_parse: |
357 | if (inode) | 377 | if (inode) |
358 | iput(inode); | 378 | iput(inode); |
359 | 379 | ||
360 | pr_info("Failed to parse address or file.\n"); | 380 | pr_info("Failed to parse address or file.\n"); |
361 | 381 | ||
362 | return ret; | 382 | return ret; |
363 | } | 383 | } |
364 | 384 | ||
365 | static void cleanup_all_probes(void) | 385 | static void cleanup_all_probes(void) |
366 | { | 386 | { |
367 | struct trace_uprobe *tu; | 387 | struct trace_uprobe *tu; |
368 | 388 | ||
369 | mutex_lock(&uprobe_lock); | 389 | mutex_lock(&uprobe_lock); |
370 | while (!list_empty(&uprobe_list)) { | 390 | while (!list_empty(&uprobe_list)) { |
371 | tu = list_entry(uprobe_list.next, struct trace_uprobe, list); | 391 | tu = list_entry(uprobe_list.next, struct trace_uprobe, list); |
372 | unregister_trace_uprobe(tu); | 392 | unregister_trace_uprobe(tu); |
373 | } | 393 | } |
374 | mutex_unlock(&uprobe_lock); | 394 | mutex_unlock(&uprobe_lock); |
375 | } | 395 | } |
376 | 396 | ||
377 | /* Probes listing interfaces */ | 397 | /* Probes listing interfaces */ |
378 | static void *probes_seq_start(struct seq_file *m, loff_t *pos) | 398 | static void *probes_seq_start(struct seq_file *m, loff_t *pos) |
379 | { | 399 | { |
380 | mutex_lock(&uprobe_lock); | 400 | mutex_lock(&uprobe_lock); |
381 | return seq_list_start(&uprobe_list, *pos); | 401 | return seq_list_start(&uprobe_list, *pos); |
382 | } | 402 | } |
383 | 403 | ||
384 | static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos) | 404 | static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos) |
385 | { | 405 | { |
386 | return seq_list_next(v, &uprobe_list, pos); | 406 | return seq_list_next(v, &uprobe_list, pos); |
387 | } | 407 | } |
388 | 408 | ||
389 | static void probes_seq_stop(struct seq_file *m, void *v) | 409 | static void probes_seq_stop(struct seq_file *m, void *v) |
390 | { | 410 | { |
391 | mutex_unlock(&uprobe_lock); | 411 | mutex_unlock(&uprobe_lock); |
392 | } | 412 | } |
393 | 413 | ||
394 | static int probes_seq_show(struct seq_file *m, void *v) | 414 | static int probes_seq_show(struct seq_file *m, void *v) |
395 | { | 415 | { |
396 | struct trace_uprobe *tu = v; | 416 | struct trace_uprobe *tu = v; |
397 | int i; | 417 | int i; |
398 | 418 | ||
399 | seq_printf(m, "p:%s/%s", tu->call.class->system, tu->call.name); | 419 | seq_printf(m, "p:%s/%s", tu->call.class->system, tu->call.name); |
400 | seq_printf(m, " %s:0x%p", tu->filename, (void *)tu->offset); | 420 | seq_printf(m, " %s:0x%p", tu->filename, (void *)tu->offset); |
401 | 421 | ||
402 | for (i = 0; i < tu->nr_args; i++) | 422 | for (i = 0; i < tu->nr_args; i++) |
403 | seq_printf(m, " %s=%s", tu->args[i].name, tu->args[i].comm); | 423 | seq_printf(m, " %s=%s", tu->args[i].name, tu->args[i].comm); |
404 | 424 | ||
405 | seq_printf(m, "\n"); | 425 | seq_printf(m, "\n"); |
406 | return 0; | 426 | return 0; |
407 | } | 427 | } |
408 | 428 | ||
409 | static const struct seq_operations probes_seq_op = { | 429 | static const struct seq_operations probes_seq_op = { |
410 | .start = probes_seq_start, | 430 | .start = probes_seq_start, |
411 | .next = probes_seq_next, | 431 | .next = probes_seq_next, |
412 | .stop = probes_seq_stop, | 432 | .stop = probes_seq_stop, |
413 | .show = probes_seq_show | 433 | .show = probes_seq_show |
414 | }; | 434 | }; |
415 | 435 | ||
416 | static int probes_open(struct inode *inode, struct file *file) | 436 | static int probes_open(struct inode *inode, struct file *file) |
417 | { | 437 | { |
418 | if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) | 438 | if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) |
419 | cleanup_all_probes(); | 439 | cleanup_all_probes(); |
420 | 440 | ||
421 | return seq_open(file, &probes_seq_op); | 441 | return seq_open(file, &probes_seq_op); |
422 | } | 442 | } |
423 | 443 | ||
424 | static ssize_t probes_write(struct file *file, const char __user *buffer, | 444 | static ssize_t probes_write(struct file *file, const char __user *buffer, |
425 | size_t count, loff_t *ppos) | 445 | size_t count, loff_t *ppos) |
426 | { | 446 | { |
427 | return traceprobe_probes_write(file, buffer, count, ppos, create_trace_uprobe); | 447 | return traceprobe_probes_write(file, buffer, count, ppos, create_trace_uprobe); |
428 | } | 448 | } |
429 | 449 | ||
430 | static const struct file_operations uprobe_events_ops = { | 450 | static const struct file_operations uprobe_events_ops = { |
431 | .owner = THIS_MODULE, | 451 | .owner = THIS_MODULE, |
432 | .open = probes_open, | 452 | .open = probes_open, |
433 | .read = seq_read, | 453 | .read = seq_read, |
434 | .llseek = seq_lseek, | 454 | .llseek = seq_lseek, |
435 | .release = seq_release, | 455 | .release = seq_release, |
436 | .write = probes_write, | 456 | .write = probes_write, |
437 | }; | 457 | }; |
438 | 458 | ||
439 | /* Probes profiling interfaces */ | 459 | /* Probes profiling interfaces */ |
440 | static int probes_profile_seq_show(struct seq_file *m, void *v) | 460 | static int probes_profile_seq_show(struct seq_file *m, void *v) |
441 | { | 461 | { |
442 | struct trace_uprobe *tu = v; | 462 | struct trace_uprobe *tu = v; |
443 | 463 | ||
444 | seq_printf(m, " %s %-44s %15lu\n", tu->filename, tu->call.name, tu->nhit); | 464 | seq_printf(m, " %s %-44s %15lu\n", tu->filename, tu->call.name, tu->nhit); |
445 | return 0; | 465 | return 0; |
446 | } | 466 | } |
447 | 467 | ||
448 | static const struct seq_operations profile_seq_op = { | 468 | static const struct seq_operations profile_seq_op = { |
449 | .start = probes_seq_start, | 469 | .start = probes_seq_start, |
450 | .next = probes_seq_next, | 470 | .next = probes_seq_next, |
451 | .stop = probes_seq_stop, | 471 | .stop = probes_seq_stop, |
452 | .show = probes_profile_seq_show | 472 | .show = probes_profile_seq_show |
453 | }; | 473 | }; |
454 | 474 | ||
455 | static int profile_open(struct inode *inode, struct file *file) | 475 | static int profile_open(struct inode *inode, struct file *file) |
456 | { | 476 | { |
457 | return seq_open(file, &profile_seq_op); | 477 | return seq_open(file, &profile_seq_op); |
458 | } | 478 | } |
459 | 479 | ||
460 | static const struct file_operations uprobe_profile_ops = { | 480 | static const struct file_operations uprobe_profile_ops = { |
461 | .owner = THIS_MODULE, | 481 | .owner = THIS_MODULE, |
462 | .open = profile_open, | 482 | .open = profile_open, |
463 | .read = seq_read, | 483 | .read = seq_read, |
464 | .llseek = seq_lseek, | 484 | .llseek = seq_lseek, |
465 | .release = seq_release, | 485 | .release = seq_release, |
466 | }; | 486 | }; |
467 | 487 | ||
468 | /* uprobe handler */ | 488 | /* uprobe handler */ |
469 | static void uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs) | 489 | static void uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs) |
470 | { | 490 | { |
471 | struct uprobe_trace_entry_head *entry; | 491 | struct uprobe_trace_entry_head *entry; |
472 | struct ring_buffer_event *event; | 492 | struct ring_buffer_event *event; |
473 | struct ring_buffer *buffer; | 493 | struct ring_buffer *buffer; |
474 | u8 *data; | 494 | u8 *data; |
475 | int size, i, pc; | 495 | int size, i, pc; |
476 | unsigned long irq_flags; | 496 | unsigned long irq_flags; |
477 | struct ftrace_event_call *call = &tu->call; | 497 | struct ftrace_event_call *call = &tu->call; |
478 | 498 | ||
479 | local_save_flags(irq_flags); | 499 | local_save_flags(irq_flags); |
480 | pc = preempt_count(); | 500 | pc = preempt_count(); |
481 | 501 | ||
482 | size = sizeof(*entry) + tu->size; | 502 | size = sizeof(*entry) + tu->size; |
483 | 503 | ||
484 | event = trace_current_buffer_lock_reserve(&buffer, call->event.type, | 504 | event = trace_current_buffer_lock_reserve(&buffer, call->event.type, |
485 | size, irq_flags, pc); | 505 | size, irq_flags, pc); |
486 | if (!event) | 506 | if (!event) |
487 | return; | 507 | return; |
488 | 508 | ||
489 | entry = ring_buffer_event_data(event); | 509 | entry = ring_buffer_event_data(event); |
490 | entry->ip = instruction_pointer(task_pt_regs(current)); | 510 | entry->ip = instruction_pointer(task_pt_regs(current)); |
491 | data = (u8 *)&entry[1]; | 511 | data = (u8 *)&entry[1]; |
492 | for (i = 0; i < tu->nr_args; i++) | 512 | for (i = 0; i < tu->nr_args; i++) |
493 | call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset); | 513 | call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset); |
494 | 514 | ||
495 | if (!filter_current_check_discard(buffer, call, entry, event)) | 515 | if (!filter_current_check_discard(buffer, call, entry, event)) |
496 | trace_buffer_unlock_commit(buffer, event, irq_flags, pc); | 516 | trace_buffer_unlock_commit(buffer, event, irq_flags, pc); |
497 | } | 517 | } |
498 | 518 | ||
499 | /* Event entry printers */ | 519 | /* Event entry printers */ |
500 | static enum print_line_t | 520 | static enum print_line_t |
501 | print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event) | 521 | print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event) |
502 | { | 522 | { |
503 | struct uprobe_trace_entry_head *field; | 523 | struct uprobe_trace_entry_head *field; |
504 | struct trace_seq *s = &iter->seq; | 524 | struct trace_seq *s = &iter->seq; |
505 | struct trace_uprobe *tu; | 525 | struct trace_uprobe *tu; |
506 | u8 *data; | 526 | u8 *data; |
507 | int i; | 527 | int i; |
508 | 528 | ||
509 | field = (struct uprobe_trace_entry_head *)iter->ent; | 529 | field = (struct uprobe_trace_entry_head *)iter->ent; |
510 | tu = container_of(event, struct trace_uprobe, call.event); | 530 | tu = container_of(event, struct trace_uprobe, call.event); |
511 | 531 | ||
512 | if (!trace_seq_printf(s, "%s: (", tu->call.name)) | 532 | if (!trace_seq_printf(s, "%s: (", tu->call.name)) |
513 | goto partial; | 533 | goto partial; |
514 | 534 | ||
515 | if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) | 535 | if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) |
516 | goto partial; | 536 | goto partial; |
517 | 537 | ||
518 | if (!trace_seq_puts(s, ")")) | 538 | if (!trace_seq_puts(s, ")")) |
519 | goto partial; | 539 | goto partial; |
520 | 540 | ||
521 | data = (u8 *)&field[1]; | 541 | data = (u8 *)&field[1]; |
522 | for (i = 0; i < tu->nr_args; i++) { | 542 | for (i = 0; i < tu->nr_args; i++) { |
523 | if (!tu->args[i].type->print(s, tu->args[i].name, | 543 | if (!tu->args[i].type->print(s, tu->args[i].name, |
524 | data + tu->args[i].offset, field)) | 544 | data + tu->args[i].offset, field)) |
525 | goto partial; | 545 | goto partial; |
526 | } | 546 | } |
527 | 547 | ||
528 | if (trace_seq_puts(s, "\n")) | 548 | if (trace_seq_puts(s, "\n")) |
529 | return TRACE_TYPE_HANDLED; | 549 | return TRACE_TYPE_HANDLED; |
530 | 550 | ||
531 | partial: | 551 | partial: |
532 | return TRACE_TYPE_PARTIAL_LINE; | 552 | return TRACE_TYPE_PARTIAL_LINE; |
533 | } | 553 | } |
534 | 554 | ||
535 | static inline bool is_trace_uprobe_enabled(struct trace_uprobe *tu) | 555 | static inline bool is_trace_uprobe_enabled(struct trace_uprobe *tu) |
536 | { | 556 | { |
537 | return tu->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE); | 557 | return tu->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE); |
538 | } | 558 | } |
539 | 559 | ||
540 | static int probe_event_enable(struct trace_uprobe *tu, int flag) | 560 | static int probe_event_enable(struct trace_uprobe *tu, int flag) |
541 | { | 561 | { |
542 | int ret = 0; | 562 | int ret = 0; |
543 | 563 | ||
544 | if (is_trace_uprobe_enabled(tu)) | 564 | if (is_trace_uprobe_enabled(tu)) |
545 | return -EINTR; | 565 | return -EINTR; |
546 | 566 | ||
567 | WARN_ON(!uprobe_filter_is_empty(&tu->filter)); | ||
568 | |||
547 | tu->flags |= flag; | 569 | tu->flags |= flag; |
548 | ret = uprobe_register(tu->inode, tu->offset, &tu->consumer); | 570 | ret = uprobe_register(tu->inode, tu->offset, &tu->consumer); |
549 | if (ret) | 571 | if (ret) |
550 | tu->flags &= ~flag; | 572 | tu->flags &= ~flag; |
551 | 573 | ||
552 | return ret; | 574 | return ret; |
553 | } | 575 | } |
554 | 576 | ||
555 | static void probe_event_disable(struct trace_uprobe *tu, int flag) | 577 | static void probe_event_disable(struct trace_uprobe *tu, int flag) |
556 | { | 578 | { |
557 | if (!is_trace_uprobe_enabled(tu)) | 579 | if (!is_trace_uprobe_enabled(tu)) |
558 | return; | 580 | return; |
559 | 581 | ||
582 | WARN_ON(!uprobe_filter_is_empty(&tu->filter)); | ||
583 | |||
560 | uprobe_unregister(tu->inode, tu->offset, &tu->consumer); | 584 | uprobe_unregister(tu->inode, tu->offset, &tu->consumer); |
561 | tu->flags &= ~flag; | 585 | tu->flags &= ~flag; |
562 | } | 586 | } |
563 | 587 | ||
564 | static int uprobe_event_define_fields(struct ftrace_event_call *event_call) | 588 | static int uprobe_event_define_fields(struct ftrace_event_call *event_call) |
565 | { | 589 | { |
566 | int ret, i; | 590 | int ret, i; |
567 | struct uprobe_trace_entry_head field; | 591 | struct uprobe_trace_entry_head field; |
568 | struct trace_uprobe *tu = (struct trace_uprobe *)event_call->data; | 592 | struct trace_uprobe *tu = (struct trace_uprobe *)event_call->data; |
569 | 593 | ||
570 | DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); | 594 | DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); |
571 | /* Set argument names as fields */ | 595 | /* Set argument names as fields */ |
572 | for (i = 0; i < tu->nr_args; i++) { | 596 | for (i = 0; i < tu->nr_args; i++) { |
573 | ret = trace_define_field(event_call, tu->args[i].type->fmttype, | 597 | ret = trace_define_field(event_call, tu->args[i].type->fmttype, |
574 | tu->args[i].name, | 598 | tu->args[i].name, |
575 | sizeof(field) + tu->args[i].offset, | 599 | sizeof(field) + tu->args[i].offset, |
576 | tu->args[i].type->size, | 600 | tu->args[i].type->size, |
577 | tu->args[i].type->is_signed, | 601 | tu->args[i].type->is_signed, |
578 | FILTER_OTHER); | 602 | FILTER_OTHER); |
579 | 603 | ||
580 | if (ret) | 604 | if (ret) |
581 | return ret; | 605 | return ret; |
582 | } | 606 | } |
583 | return 0; | 607 | return 0; |
584 | } | 608 | } |
585 | 609 | ||
586 | #define LEN_OR_ZERO (len ? len - pos : 0) | 610 | #define LEN_OR_ZERO (len ? len - pos : 0) |
587 | static int __set_print_fmt(struct trace_uprobe *tu, char *buf, int len) | 611 | static int __set_print_fmt(struct trace_uprobe *tu, char *buf, int len) |
588 | { | 612 | { |
589 | const char *fmt, *arg; | 613 | const char *fmt, *arg; |
590 | int i; | 614 | int i; |
591 | int pos = 0; | 615 | int pos = 0; |
592 | 616 | ||
593 | fmt = "(%lx)"; | 617 | fmt = "(%lx)"; |
594 | arg = "REC->" FIELD_STRING_IP; | 618 | arg = "REC->" FIELD_STRING_IP; |
595 | 619 | ||
596 | /* When len=0, we just calculate the needed length */ | 620 | /* When len=0, we just calculate the needed length */ |
597 | 621 | ||
598 | pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt); | 622 | pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt); |
599 | 623 | ||
600 | for (i = 0; i < tu->nr_args; i++) { | 624 | for (i = 0; i < tu->nr_args; i++) { |
601 | pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=%s", | 625 | pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=%s", |
602 | tu->args[i].name, tu->args[i].type->fmt); | 626 | tu->args[i].name, tu->args[i].type->fmt); |
603 | } | 627 | } |
604 | 628 | ||
605 | pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg); | 629 | pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg); |
606 | 630 | ||
607 | for (i = 0; i < tu->nr_args; i++) { | 631 | for (i = 0; i < tu->nr_args; i++) { |
608 | pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s", | 632 | pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s", |
609 | tu->args[i].name); | 633 | tu->args[i].name); |
610 | } | 634 | } |
611 | 635 | ||
612 | return pos; /* return the length of print_fmt */ | 636 | return pos; /* return the length of print_fmt */ |
613 | } | 637 | } |
614 | #undef LEN_OR_ZERO | 638 | #undef LEN_OR_ZERO |
615 | 639 | ||
616 | static int set_print_fmt(struct trace_uprobe *tu) | 640 | static int set_print_fmt(struct trace_uprobe *tu) |
617 | { | 641 | { |
618 | char *print_fmt; | 642 | char *print_fmt; |
619 | int len; | 643 | int len; |
620 | 644 | ||
621 | /* First: called with 0 length to calculate the needed length */ | 645 | /* First: called with 0 length to calculate the needed length */ |
622 | len = __set_print_fmt(tu, NULL, 0); | 646 | len = __set_print_fmt(tu, NULL, 0); |
623 | print_fmt = kmalloc(len + 1, GFP_KERNEL); | 647 | print_fmt = kmalloc(len + 1, GFP_KERNEL); |
624 | if (!print_fmt) | 648 | if (!print_fmt) |
625 | return -ENOMEM; | 649 | return -ENOMEM; |
626 | 650 | ||
627 | /* Second: actually write the @print_fmt */ | 651 | /* Second: actually write the @print_fmt */ |
628 | __set_print_fmt(tu, print_fmt, len + 1); | 652 | __set_print_fmt(tu, print_fmt, len + 1); |
629 | tu->call.print_fmt = print_fmt; | 653 | tu->call.print_fmt = print_fmt; |
630 | 654 | ||
631 | return 0; | 655 | return 0; |
632 | } | 656 | } |
633 | 657 | ||
634 | #ifdef CONFIG_PERF_EVENTS | 658 | #ifdef CONFIG_PERF_EVENTS |
659 | static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event) | ||
660 | { | ||
661 | write_lock(&tu->filter.rwlock); | ||
662 | if (event->hw.tp_target) | ||
663 | list_add(&event->hw.tp_list, &tu->filter.perf_events); | ||
664 | else | ||
665 | tu->filter.nr_systemwide++; | ||
666 | write_unlock(&tu->filter.rwlock); | ||
667 | |||
668 | return 0; | ||
669 | } | ||
670 | |||
671 | static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event) | ||
672 | { | ||
673 | write_lock(&tu->filter.rwlock); | ||
674 | if (event->hw.tp_target) | ||
675 | list_del(&event->hw.tp_list); | ||
676 | else | ||
677 | tu->filter.nr_systemwide--; | ||
678 | write_unlock(&tu->filter.rwlock); | ||
679 | |||
680 | return 0; | ||
681 | } | ||
682 | |||
635 | /* uprobe profile handler */ | 683 | /* uprobe profile handler */ |
636 | static void uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs) | 684 | static void uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs) |
637 | { | 685 | { |
638 | struct ftrace_event_call *call = &tu->call; | 686 | struct ftrace_event_call *call = &tu->call; |
639 | struct uprobe_trace_entry_head *entry; | 687 | struct uprobe_trace_entry_head *entry; |
640 | struct hlist_head *head; | 688 | struct hlist_head *head; |
641 | u8 *data; | 689 | u8 *data; |
642 | int size, __size, i; | 690 | int size, __size, i; |
643 | int rctx; | 691 | int rctx; |
644 | 692 | ||
645 | __size = sizeof(*entry) + tu->size; | 693 | __size = sizeof(*entry) + tu->size; |
646 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); | 694 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); |
647 | size -= sizeof(u32); | 695 | size -= sizeof(u32); |
648 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) | 696 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) |
649 | return; | 697 | return; |
650 | 698 | ||
651 | preempt_disable(); | 699 | preempt_disable(); |
652 | 700 | ||
653 | entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); | 701 | entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); |
654 | if (!entry) | 702 | if (!entry) |
655 | goto out; | 703 | goto out; |
656 | 704 | ||
657 | entry->ip = instruction_pointer(task_pt_regs(current)); | 705 | entry->ip = instruction_pointer(task_pt_regs(current)); |
658 | data = (u8 *)&entry[1]; | 706 | data = (u8 *)&entry[1]; |
659 | for (i = 0; i < tu->nr_args; i++) | 707 | for (i = 0; i < tu->nr_args; i++) |
660 | call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset); | 708 | call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset); |
661 | 709 | ||
662 | head = this_cpu_ptr(call->perf_events); | 710 | head = this_cpu_ptr(call->perf_events); |
663 | perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head, NULL); | 711 | perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head, NULL); |
664 | 712 | ||
665 | out: | 713 | out: |
666 | preempt_enable(); | 714 | preempt_enable(); |
667 | } | 715 | } |
668 | #endif /* CONFIG_PERF_EVENTS */ | 716 | #endif /* CONFIG_PERF_EVENTS */ |
669 | 717 | ||
670 | static | 718 | static |
671 | int trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type, void *data) | 719 | int trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type, void *data) |
672 | { | 720 | { |
673 | struct trace_uprobe *tu = (struct trace_uprobe *)event->data; | 721 | struct trace_uprobe *tu = (struct trace_uprobe *)event->data; |
674 | 722 | ||
675 | switch (type) { | 723 | switch (type) { |
676 | case TRACE_REG_REGISTER: | 724 | case TRACE_REG_REGISTER: |
677 | return probe_event_enable(tu, TP_FLAG_TRACE); | 725 | return probe_event_enable(tu, TP_FLAG_TRACE); |
678 | 726 | ||
679 | case TRACE_REG_UNREGISTER: | 727 | case TRACE_REG_UNREGISTER: |
680 | probe_event_disable(tu, TP_FLAG_TRACE); | 728 | probe_event_disable(tu, TP_FLAG_TRACE); |
681 | return 0; | 729 | return 0; |
682 | 730 | ||
683 | #ifdef CONFIG_PERF_EVENTS | 731 | #ifdef CONFIG_PERF_EVENTS |
684 | case TRACE_REG_PERF_REGISTER: | 732 | case TRACE_REG_PERF_REGISTER: |
685 | return probe_event_enable(tu, TP_FLAG_PROFILE); | 733 | return probe_event_enable(tu, TP_FLAG_PROFILE); |
686 | 734 | ||
687 | case TRACE_REG_PERF_UNREGISTER: | 735 | case TRACE_REG_PERF_UNREGISTER: |
688 | probe_event_disable(tu, TP_FLAG_PROFILE); | 736 | probe_event_disable(tu, TP_FLAG_PROFILE); |
689 | return 0; | 737 | return 0; |
738 | |||
739 | case TRACE_REG_PERF_OPEN: | ||
740 | return uprobe_perf_open(tu, data); | ||
741 | |||
742 | case TRACE_REG_PERF_CLOSE: | ||
743 | return uprobe_perf_close(tu, data); | ||
744 | |||
690 | #endif | 745 | #endif |
691 | default: | 746 | default: |
692 | return 0; | 747 | return 0; |
693 | } | 748 | } |
694 | return 0; | 749 | return 0; |
695 | } | 750 | } |
696 | 751 | ||
697 | static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) | 752 | static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) |
698 | { | 753 | { |
699 | struct trace_uprobe *tu; | 754 | struct trace_uprobe *tu; |
700 | 755 | ||
701 | tu = container_of(con, struct trace_uprobe, consumer); | 756 | tu = container_of(con, struct trace_uprobe, consumer); |
702 | tu->nhit++; | 757 | tu->nhit++; |
703 | 758 | ||
704 | if (tu->flags & TP_FLAG_TRACE) | 759 | if (tu->flags & TP_FLAG_TRACE) |
705 | uprobe_trace_func(tu, regs); | 760 | uprobe_trace_func(tu, regs); |
706 | 761 | ||
707 | #ifdef CONFIG_PERF_EVENTS | 762 | #ifdef CONFIG_PERF_EVENTS |
708 | if (tu->flags & TP_FLAG_PROFILE) | 763 | if (tu->flags & TP_FLAG_PROFILE) |
709 | uprobe_perf_func(tu, regs); | 764 | uprobe_perf_func(tu, regs); |
710 | #endif | 765 | #endif |
711 | return 0; | 766 | return 0; |
712 | } | 767 | } |
713 | 768 | ||
714 | static struct trace_event_functions uprobe_funcs = { | 769 | static struct trace_event_functions uprobe_funcs = { |
715 | .trace = print_uprobe_event | 770 | .trace = print_uprobe_event |
716 | }; | 771 | }; |
717 | 772 | ||
718 | static int register_uprobe_event(struct trace_uprobe *tu) | 773 | static int register_uprobe_event(struct trace_uprobe *tu) |
719 | { | 774 | { |
720 | struct ftrace_event_call *call = &tu->call; | 775 | struct ftrace_event_call *call = &tu->call; |
721 | int ret; | 776 | int ret; |
722 | 777 | ||
723 | /* Initialize ftrace_event_call */ | 778 | /* Initialize ftrace_event_call */ |
724 | INIT_LIST_HEAD(&call->class->fields); | 779 | INIT_LIST_HEAD(&call->class->fields); |
725 | call->event.funcs = &uprobe_funcs; | 780 | call->event.funcs = &uprobe_funcs; |
726 | call->class->define_fields = uprobe_event_define_fields; | 781 | call->class->define_fields = uprobe_event_define_fields; |
727 | 782 | ||
728 | if (set_print_fmt(tu) < 0) | 783 | if (set_print_fmt(tu) < 0) |
729 | return -ENOMEM; | 784 | return -ENOMEM; |
730 | 785 | ||
731 | ret = register_ftrace_event(&call->event); | 786 | ret = register_ftrace_event(&call->event); |
732 | if (!ret) { | 787 | if (!ret) { |
733 | kfree(call->print_fmt); | 788 | kfree(call->print_fmt); |
734 | return -ENODEV; | 789 | return -ENODEV; |
735 | } | 790 | } |
736 | call->flags = 0; | 791 | call->flags = 0; |
737 | call->class->reg = trace_uprobe_register; | 792 | call->class->reg = trace_uprobe_register; |
738 | call->data = tu; | 793 | call->data = tu; |
739 | ret = trace_add_event_call(call); | 794 | ret = trace_add_event_call(call); |
740 | 795 | ||
741 | if (ret) { | 796 | if (ret) { |
742 | pr_info("Failed to register uprobe event: %s\n", call->name); | 797 | pr_info("Failed to register uprobe event: %s\n", call->name); |
743 | kfree(call->print_fmt); | 798 | kfree(call->print_fmt); |
744 | unregister_ftrace_event(&call->event); | 799 | unregister_ftrace_event(&call->event); |
745 | } | 800 | } |
746 | 801 | ||
747 | return ret; | 802 | return ret; |
748 | } | 803 | } |
749 | 804 | ||
750 | static void unregister_uprobe_event(struct trace_uprobe *tu) | 805 | static void unregister_uprobe_event(struct trace_uprobe *tu) |
751 | { | 806 | { |
752 | /* tu->event is unregistered in trace_remove_event_call() */ | 807 | /* tu->event is unregistered in trace_remove_event_call() */ |
753 | trace_remove_event_call(&tu->call); | 808 | trace_remove_event_call(&tu->call); |
754 | kfree(tu->call.print_fmt); | 809 | kfree(tu->call.print_fmt); |
755 | tu->call.print_fmt = NULL; | 810 | tu->call.print_fmt = NULL; |
756 | } | 811 | } |
757 | 812 | ||
758 | /* Make a trace interface for controling probe points */ | 813 | /* Make a trace interface for controling probe points */ |
759 | static __init int init_uprobe_trace(void) | 814 | static __init int init_uprobe_trace(void) |
760 | { | 815 | { |
761 | struct dentry *d_tracer; | 816 | struct dentry *d_tracer; |
762 | 817 | ||
763 | d_tracer = tracing_init_dentry(); | 818 | d_tracer = tracing_init_dentry(); |
764 | if (!d_tracer) | 819 | if (!d_tracer) |
765 | return 0; | 820 | return 0; |
766 | 821 | ||
767 | trace_create_file("uprobe_events", 0644, d_tracer, | 822 | trace_create_file("uprobe_events", 0644, d_tracer, |
768 | NULL, &uprobe_events_ops); | 823 | NULL, &uprobe_events_ops); |
769 | /* Profile interface */ | 824 | /* Profile interface */ |
770 | trace_create_file("uprobe_profile", 0444, d_tracer, | 825 | trace_create_file("uprobe_profile", 0444, d_tracer, |
771 | NULL, &uprobe_profile_ops); | 826 | NULL, &uprobe_profile_ops); |
772 | return 0; | 827 | return 0; |
773 | } | 828 | } |
774 | 829 | ||
775 | fs_initcall(init_uprobe_trace); | 830 | fs_initcall(init_uprobe_trace); |
776 | 831 |