Commit 65e234ec2c4a0659ca22531dc1372a185f088517

Authored by Masami Hiramatsu
Committed by Frederic Weisbecker
1 parent 8222d718b3

kprobes: Prohibit to probe native_get_debugreg

Since do_debug() calls get_debugreg(), native_get_debugreg() will be
called from singlestepping. This can cause an int3 infinite loop.

We can't put it in the .text.kprobes section because it is inlined,
then we blacklist its name.

Signed-off-by: Masami Hiramatsu <mhiramat@redhat.com>
Acked-by: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Ingo Molnar <mingo@elte.hu>
LKML-Reference: <20090827172332.8246.34194.stgit@localhost.localdomain>
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>

Showing 1 changed file with 1 additions and 0 deletions Inline Diff

1 /* 1 /*
2 * Kernel Probes (KProbes) 2 * Kernel Probes (KProbes)
3 * kernel/kprobes.c 3 * kernel/kprobes.c
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or 7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version. 8 * (at your option) any later version.
9 * 9 *
10 * This program is distributed in the hope that it will be useful, 10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License 15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software 16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * 18 *
19 * Copyright (C) IBM Corporation, 2002, 2004 19 * Copyright (C) IBM Corporation, 2002, 2004
20 * 20 *
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel 21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation (includes suggestions from 22 * Probes initial implementation (includes suggestions from
23 * Rusty Russell). 23 * Rusty Russell).
24 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with 24 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25 * hlists and exceptions notifier as suggested by Andi Kleen. 25 * hlists and exceptions notifier as suggested by Andi Kleen.
26 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes 26 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27 * interface to access function arguments. 27 * interface to access function arguments.
28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes 28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 * exceptions notifier to be first on the priority list. 29 * exceptions notifier to be first on the priority list.
30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston 30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi 31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32 * <prasanna@in.ibm.com> added function-return probes. 32 * <prasanna@in.ibm.com> added function-return probes.
33 */ 33 */
34 #include <linux/kprobes.h> 34 #include <linux/kprobes.h>
35 #include <linux/hash.h> 35 #include <linux/hash.h>
36 #include <linux/init.h> 36 #include <linux/init.h>
37 #include <linux/slab.h> 37 #include <linux/slab.h>
38 #include <linux/stddef.h> 38 #include <linux/stddef.h>
39 #include <linux/module.h> 39 #include <linux/module.h>
40 #include <linux/moduleloader.h> 40 #include <linux/moduleloader.h>
41 #include <linux/kallsyms.h> 41 #include <linux/kallsyms.h>
42 #include <linux/freezer.h> 42 #include <linux/freezer.h>
43 #include <linux/seq_file.h> 43 #include <linux/seq_file.h>
44 #include <linux/debugfs.h> 44 #include <linux/debugfs.h>
45 #include <linux/kdebug.h> 45 #include <linux/kdebug.h>
46 #include <linux/memory.h> 46 #include <linux/memory.h>
47 47
48 #include <asm-generic/sections.h> 48 #include <asm-generic/sections.h>
49 #include <asm/cacheflush.h> 49 #include <asm/cacheflush.h>
50 #include <asm/errno.h> 50 #include <asm/errno.h>
51 #include <asm/uaccess.h> 51 #include <asm/uaccess.h>
52 52
53 #define KPROBE_HASH_BITS 6 53 #define KPROBE_HASH_BITS 6
54 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS) 54 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
55 55
56 56
57 /* 57 /*
58 * Some oddball architectures like 64bit powerpc have function descriptors 58 * Some oddball architectures like 64bit powerpc have function descriptors
59 * so this must be overridable. 59 * so this must be overridable.
60 */ 60 */
61 #ifndef kprobe_lookup_name 61 #ifndef kprobe_lookup_name
62 #define kprobe_lookup_name(name, addr) \ 62 #define kprobe_lookup_name(name, addr) \
63 addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name))) 63 addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
64 #endif 64 #endif
65 65
66 static int kprobes_initialized; 66 static int kprobes_initialized;
67 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; 67 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
68 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; 68 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
69 69
70 /* NOTE: change this value only with kprobe_mutex held */ 70 /* NOTE: change this value only with kprobe_mutex held */
71 static bool kprobes_all_disarmed; 71 static bool kprobes_all_disarmed;
72 72
73 static DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ 73 static DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
74 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; 74 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
75 static struct { 75 static struct {
76 spinlock_t lock ____cacheline_aligned_in_smp; 76 spinlock_t lock ____cacheline_aligned_in_smp;
77 } kretprobe_table_locks[KPROBE_TABLE_SIZE]; 77 } kretprobe_table_locks[KPROBE_TABLE_SIZE];
78 78
79 static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash) 79 static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
80 { 80 {
81 return &(kretprobe_table_locks[hash].lock); 81 return &(kretprobe_table_locks[hash].lock);
82 } 82 }
83 83
84 /* 84 /*
85 * Normally, functions that we'd want to prohibit kprobes in, are marked 85 * Normally, functions that we'd want to prohibit kprobes in, are marked
86 * __kprobes. But, there are cases where such functions already belong to 86 * __kprobes. But, there are cases where such functions already belong to
87 * a different section (__sched for preempt_schedule) 87 * a different section (__sched for preempt_schedule)
88 * 88 *
89 * For such cases, we now have a blacklist 89 * For such cases, we now have a blacklist
90 */ 90 */
91 static struct kprobe_blackpoint kprobe_blacklist[] = { 91 static struct kprobe_blackpoint kprobe_blacklist[] = {
92 {"preempt_schedule",}, 92 {"preempt_schedule",},
93 {"native_get_debugreg",},
93 {NULL} /* Terminator */ 94 {NULL} /* Terminator */
94 }; 95 };
95 96
96 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT 97 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
97 /* 98 /*
98 * kprobe->ainsn.insn points to the copy of the instruction to be 99 * kprobe->ainsn.insn points to the copy of the instruction to be
99 * single-stepped. x86_64, POWER4 and above have no-exec support and 100 * single-stepped. x86_64, POWER4 and above have no-exec support and
100 * stepping on the instruction on a vmalloced/kmalloced/data page 101 * stepping on the instruction on a vmalloced/kmalloced/data page
101 * is a recipe for disaster 102 * is a recipe for disaster
102 */ 103 */
103 #define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t))) 104 #define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
104 105
105 struct kprobe_insn_page { 106 struct kprobe_insn_page {
106 struct list_head list; 107 struct list_head list;
107 kprobe_opcode_t *insns; /* Page of instruction slots */ 108 kprobe_opcode_t *insns; /* Page of instruction slots */
108 char slot_used[INSNS_PER_PAGE]; 109 char slot_used[INSNS_PER_PAGE];
109 int nused; 110 int nused;
110 int ngarbage; 111 int ngarbage;
111 }; 112 };
112 113
113 enum kprobe_slot_state { 114 enum kprobe_slot_state {
114 SLOT_CLEAN = 0, 115 SLOT_CLEAN = 0,
115 SLOT_DIRTY = 1, 116 SLOT_DIRTY = 1,
116 SLOT_USED = 2, 117 SLOT_USED = 2,
117 }; 118 };
118 119
119 static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_pages */ 120 static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_pages */
120 static LIST_HEAD(kprobe_insn_pages); 121 static LIST_HEAD(kprobe_insn_pages);
121 static int kprobe_garbage_slots; 122 static int kprobe_garbage_slots;
122 static int collect_garbage_slots(void); 123 static int collect_garbage_slots(void);
123 124
124 static int __kprobes check_safety(void) 125 static int __kprobes check_safety(void)
125 { 126 {
126 int ret = 0; 127 int ret = 0;
127 #if defined(CONFIG_PREEMPT) && defined(CONFIG_FREEZER) 128 #if defined(CONFIG_PREEMPT) && defined(CONFIG_FREEZER)
128 ret = freeze_processes(); 129 ret = freeze_processes();
129 if (ret == 0) { 130 if (ret == 0) {
130 struct task_struct *p, *q; 131 struct task_struct *p, *q;
131 do_each_thread(p, q) { 132 do_each_thread(p, q) {
132 if (p != current && p->state == TASK_RUNNING && 133 if (p != current && p->state == TASK_RUNNING &&
133 p->pid != 0) { 134 p->pid != 0) {
134 printk("Check failed: %s is running\n",p->comm); 135 printk("Check failed: %s is running\n",p->comm);
135 ret = -1; 136 ret = -1;
136 goto loop_end; 137 goto loop_end;
137 } 138 }
138 } while_each_thread(p, q); 139 } while_each_thread(p, q);
139 } 140 }
140 loop_end: 141 loop_end:
141 thaw_processes(); 142 thaw_processes();
142 #else 143 #else
143 synchronize_sched(); 144 synchronize_sched();
144 #endif 145 #endif
145 return ret; 146 return ret;
146 } 147 }
147 148
148 /** 149 /**
149 * __get_insn_slot() - Find a slot on an executable page for an instruction. 150 * __get_insn_slot() - Find a slot on an executable page for an instruction.
150 * We allocate an executable page if there's no room on existing ones. 151 * We allocate an executable page if there's no room on existing ones.
151 */ 152 */
152 static kprobe_opcode_t __kprobes *__get_insn_slot(void) 153 static kprobe_opcode_t __kprobes *__get_insn_slot(void)
153 { 154 {
154 struct kprobe_insn_page *kip; 155 struct kprobe_insn_page *kip;
155 156
156 retry: 157 retry:
157 list_for_each_entry(kip, &kprobe_insn_pages, list) { 158 list_for_each_entry(kip, &kprobe_insn_pages, list) {
158 if (kip->nused < INSNS_PER_PAGE) { 159 if (kip->nused < INSNS_PER_PAGE) {
159 int i; 160 int i;
160 for (i = 0; i < INSNS_PER_PAGE; i++) { 161 for (i = 0; i < INSNS_PER_PAGE; i++) {
161 if (kip->slot_used[i] == SLOT_CLEAN) { 162 if (kip->slot_used[i] == SLOT_CLEAN) {
162 kip->slot_used[i] = SLOT_USED; 163 kip->slot_used[i] = SLOT_USED;
163 kip->nused++; 164 kip->nused++;
164 return kip->insns + (i * MAX_INSN_SIZE); 165 return kip->insns + (i * MAX_INSN_SIZE);
165 } 166 }
166 } 167 }
167 /* Surprise! No unused slots. Fix kip->nused. */ 168 /* Surprise! No unused slots. Fix kip->nused. */
168 kip->nused = INSNS_PER_PAGE; 169 kip->nused = INSNS_PER_PAGE;
169 } 170 }
170 } 171 }
171 172
172 /* If there are any garbage slots, collect it and try again. */ 173 /* If there are any garbage slots, collect it and try again. */
173 if (kprobe_garbage_slots && collect_garbage_slots() == 0) { 174 if (kprobe_garbage_slots && collect_garbage_slots() == 0) {
174 goto retry; 175 goto retry;
175 } 176 }
176 /* All out of space. Need to allocate a new page. Use slot 0. */ 177 /* All out of space. Need to allocate a new page. Use slot 0. */
177 kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL); 178 kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
178 if (!kip) 179 if (!kip)
179 return NULL; 180 return NULL;
180 181
181 /* 182 /*
182 * Use module_alloc so this page is within +/- 2GB of where the 183 * Use module_alloc so this page is within +/- 2GB of where the
183 * kernel image and loaded module images reside. This is required 184 * kernel image and loaded module images reside. This is required
184 * so x86_64 can correctly handle the %rip-relative fixups. 185 * so x86_64 can correctly handle the %rip-relative fixups.
185 */ 186 */
186 kip->insns = module_alloc(PAGE_SIZE); 187 kip->insns = module_alloc(PAGE_SIZE);
187 if (!kip->insns) { 188 if (!kip->insns) {
188 kfree(kip); 189 kfree(kip);
189 return NULL; 190 return NULL;
190 } 191 }
191 INIT_LIST_HEAD(&kip->list); 192 INIT_LIST_HEAD(&kip->list);
192 list_add(&kip->list, &kprobe_insn_pages); 193 list_add(&kip->list, &kprobe_insn_pages);
193 memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE); 194 memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
194 kip->slot_used[0] = SLOT_USED; 195 kip->slot_used[0] = SLOT_USED;
195 kip->nused = 1; 196 kip->nused = 1;
196 kip->ngarbage = 0; 197 kip->ngarbage = 0;
197 return kip->insns; 198 return kip->insns;
198 } 199 }
199 200
200 kprobe_opcode_t __kprobes *get_insn_slot(void) 201 kprobe_opcode_t __kprobes *get_insn_slot(void)
201 { 202 {
202 kprobe_opcode_t *ret; 203 kprobe_opcode_t *ret;
203 mutex_lock(&kprobe_insn_mutex); 204 mutex_lock(&kprobe_insn_mutex);
204 ret = __get_insn_slot(); 205 ret = __get_insn_slot();
205 mutex_unlock(&kprobe_insn_mutex); 206 mutex_unlock(&kprobe_insn_mutex);
206 return ret; 207 return ret;
207 } 208 }
208 209
209 /* Return 1 if all garbages are collected, otherwise 0. */ 210 /* Return 1 if all garbages are collected, otherwise 0. */
210 static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx) 211 static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
211 { 212 {
212 kip->slot_used[idx] = SLOT_CLEAN; 213 kip->slot_used[idx] = SLOT_CLEAN;
213 kip->nused--; 214 kip->nused--;
214 if (kip->nused == 0) { 215 if (kip->nused == 0) {
215 /* 216 /*
216 * Page is no longer in use. Free it unless 217 * Page is no longer in use. Free it unless
217 * it's the last one. We keep the last one 218 * it's the last one. We keep the last one
218 * so as not to have to set it up again the 219 * so as not to have to set it up again the
219 * next time somebody inserts a probe. 220 * next time somebody inserts a probe.
220 */ 221 */
221 if (!list_is_singular(&kprobe_insn_pages)) { 222 if (!list_is_singular(&kprobe_insn_pages)) {
222 list_del(&kip->list); 223 list_del(&kip->list);
223 module_free(NULL, kip->insns); 224 module_free(NULL, kip->insns);
224 kfree(kip); 225 kfree(kip);
225 } 226 }
226 return 1; 227 return 1;
227 } 228 }
228 return 0; 229 return 0;
229 } 230 }
230 231
231 static int __kprobes collect_garbage_slots(void) 232 static int __kprobes collect_garbage_slots(void)
232 { 233 {
233 struct kprobe_insn_page *kip, *next; 234 struct kprobe_insn_page *kip, *next;
234 235
235 /* Ensure no-one is preepmted on the garbages */ 236 /* Ensure no-one is preepmted on the garbages */
236 if (check_safety()) 237 if (check_safety())
237 return -EAGAIN; 238 return -EAGAIN;
238 239
239 list_for_each_entry_safe(kip, next, &kprobe_insn_pages, list) { 240 list_for_each_entry_safe(kip, next, &kprobe_insn_pages, list) {
240 int i; 241 int i;
241 if (kip->ngarbage == 0) 242 if (kip->ngarbage == 0)
242 continue; 243 continue;
243 kip->ngarbage = 0; /* we will collect all garbages */ 244 kip->ngarbage = 0; /* we will collect all garbages */
244 for (i = 0; i < INSNS_PER_PAGE; i++) { 245 for (i = 0; i < INSNS_PER_PAGE; i++) {
245 if (kip->slot_used[i] == SLOT_DIRTY && 246 if (kip->slot_used[i] == SLOT_DIRTY &&
246 collect_one_slot(kip, i)) 247 collect_one_slot(kip, i))
247 break; 248 break;
248 } 249 }
249 } 250 }
250 kprobe_garbage_slots = 0; 251 kprobe_garbage_slots = 0;
251 return 0; 252 return 0;
252 } 253 }
253 254
254 void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty) 255 void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
255 { 256 {
256 struct kprobe_insn_page *kip; 257 struct kprobe_insn_page *kip;
257 258
258 mutex_lock(&kprobe_insn_mutex); 259 mutex_lock(&kprobe_insn_mutex);
259 list_for_each_entry(kip, &kprobe_insn_pages, list) { 260 list_for_each_entry(kip, &kprobe_insn_pages, list) {
260 if (kip->insns <= slot && 261 if (kip->insns <= slot &&
261 slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) { 262 slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
262 int i = (slot - kip->insns) / MAX_INSN_SIZE; 263 int i = (slot - kip->insns) / MAX_INSN_SIZE;
263 if (dirty) { 264 if (dirty) {
264 kip->slot_used[i] = SLOT_DIRTY; 265 kip->slot_used[i] = SLOT_DIRTY;
265 kip->ngarbage++; 266 kip->ngarbage++;
266 } else 267 } else
267 collect_one_slot(kip, i); 268 collect_one_slot(kip, i);
268 break; 269 break;
269 } 270 }
270 } 271 }
271 272
272 if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE) 273 if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE)
273 collect_garbage_slots(); 274 collect_garbage_slots();
274 275
275 mutex_unlock(&kprobe_insn_mutex); 276 mutex_unlock(&kprobe_insn_mutex);
276 } 277 }
277 #endif 278 #endif
278 279
279 /* We have preemption disabled.. so it is safe to use __ versions */ 280 /* We have preemption disabled.. so it is safe to use __ versions */
280 static inline void set_kprobe_instance(struct kprobe *kp) 281 static inline void set_kprobe_instance(struct kprobe *kp)
281 { 282 {
282 __get_cpu_var(kprobe_instance) = kp; 283 __get_cpu_var(kprobe_instance) = kp;
283 } 284 }
284 285
285 static inline void reset_kprobe_instance(void) 286 static inline void reset_kprobe_instance(void)
286 { 287 {
287 __get_cpu_var(kprobe_instance) = NULL; 288 __get_cpu_var(kprobe_instance) = NULL;
288 } 289 }
289 290
290 /* 291 /*
291 * This routine is called either: 292 * This routine is called either:
292 * - under the kprobe_mutex - during kprobe_[un]register() 293 * - under the kprobe_mutex - during kprobe_[un]register()
293 * OR 294 * OR
294 * - with preemption disabled - from arch/xxx/kernel/kprobes.c 295 * - with preemption disabled - from arch/xxx/kernel/kprobes.c
295 */ 296 */
296 struct kprobe __kprobes *get_kprobe(void *addr) 297 struct kprobe __kprobes *get_kprobe(void *addr)
297 { 298 {
298 struct hlist_head *head; 299 struct hlist_head *head;
299 struct hlist_node *node; 300 struct hlist_node *node;
300 struct kprobe *p; 301 struct kprobe *p;
301 302
302 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; 303 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
303 hlist_for_each_entry_rcu(p, node, head, hlist) { 304 hlist_for_each_entry_rcu(p, node, head, hlist) {
304 if (p->addr == addr) 305 if (p->addr == addr)
305 return p; 306 return p;
306 } 307 }
307 return NULL; 308 return NULL;
308 } 309 }
309 310
310 /* Arm a kprobe with text_mutex */ 311 /* Arm a kprobe with text_mutex */
311 static void __kprobes arm_kprobe(struct kprobe *kp) 312 static void __kprobes arm_kprobe(struct kprobe *kp)
312 { 313 {
313 mutex_lock(&text_mutex); 314 mutex_lock(&text_mutex);
314 arch_arm_kprobe(kp); 315 arch_arm_kprobe(kp);
315 mutex_unlock(&text_mutex); 316 mutex_unlock(&text_mutex);
316 } 317 }
317 318
318 /* Disarm a kprobe with text_mutex */ 319 /* Disarm a kprobe with text_mutex */
319 static void __kprobes disarm_kprobe(struct kprobe *kp) 320 static void __kprobes disarm_kprobe(struct kprobe *kp)
320 { 321 {
321 mutex_lock(&text_mutex); 322 mutex_lock(&text_mutex);
322 arch_disarm_kprobe(kp); 323 arch_disarm_kprobe(kp);
323 mutex_unlock(&text_mutex); 324 mutex_unlock(&text_mutex);
324 } 325 }
325 326
326 /* 327 /*
327 * Aggregate handlers for multiple kprobes support - these handlers 328 * Aggregate handlers for multiple kprobes support - these handlers
328 * take care of invoking the individual kprobe handlers on p->list 329 * take care of invoking the individual kprobe handlers on p->list
329 */ 330 */
330 static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) 331 static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
331 { 332 {
332 struct kprobe *kp; 333 struct kprobe *kp;
333 334
334 list_for_each_entry_rcu(kp, &p->list, list) { 335 list_for_each_entry_rcu(kp, &p->list, list) {
335 if (kp->pre_handler && likely(!kprobe_disabled(kp))) { 336 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
336 set_kprobe_instance(kp); 337 set_kprobe_instance(kp);
337 if (kp->pre_handler(kp, regs)) 338 if (kp->pre_handler(kp, regs))
338 return 1; 339 return 1;
339 } 340 }
340 reset_kprobe_instance(); 341 reset_kprobe_instance();
341 } 342 }
342 return 0; 343 return 0;
343 } 344 }
344 345
345 static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs, 346 static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
346 unsigned long flags) 347 unsigned long flags)
347 { 348 {
348 struct kprobe *kp; 349 struct kprobe *kp;
349 350
350 list_for_each_entry_rcu(kp, &p->list, list) { 351 list_for_each_entry_rcu(kp, &p->list, list) {
351 if (kp->post_handler && likely(!kprobe_disabled(kp))) { 352 if (kp->post_handler && likely(!kprobe_disabled(kp))) {
352 set_kprobe_instance(kp); 353 set_kprobe_instance(kp);
353 kp->post_handler(kp, regs, flags); 354 kp->post_handler(kp, regs, flags);
354 reset_kprobe_instance(); 355 reset_kprobe_instance();
355 } 356 }
356 } 357 }
357 } 358 }
358 359
359 static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, 360 static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
360 int trapnr) 361 int trapnr)
361 { 362 {
362 struct kprobe *cur = __get_cpu_var(kprobe_instance); 363 struct kprobe *cur = __get_cpu_var(kprobe_instance);
363 364
364 /* 365 /*
365 * if we faulted "during" the execution of a user specified 366 * if we faulted "during" the execution of a user specified
366 * probe handler, invoke just that probe's fault handler 367 * probe handler, invoke just that probe's fault handler
367 */ 368 */
368 if (cur && cur->fault_handler) { 369 if (cur && cur->fault_handler) {
369 if (cur->fault_handler(cur, regs, trapnr)) 370 if (cur->fault_handler(cur, regs, trapnr))
370 return 1; 371 return 1;
371 } 372 }
372 return 0; 373 return 0;
373 } 374 }
374 375
375 static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs) 376 static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
376 { 377 {
377 struct kprobe *cur = __get_cpu_var(kprobe_instance); 378 struct kprobe *cur = __get_cpu_var(kprobe_instance);
378 int ret = 0; 379 int ret = 0;
379 380
380 if (cur && cur->break_handler) { 381 if (cur && cur->break_handler) {
381 if (cur->break_handler(cur, regs)) 382 if (cur->break_handler(cur, regs))
382 ret = 1; 383 ret = 1;
383 } 384 }
384 reset_kprobe_instance(); 385 reset_kprobe_instance();
385 return ret; 386 return ret;
386 } 387 }
387 388
388 /* Walks the list and increments nmissed count for multiprobe case */ 389 /* Walks the list and increments nmissed count for multiprobe case */
389 void __kprobes kprobes_inc_nmissed_count(struct kprobe *p) 390 void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
390 { 391 {
391 struct kprobe *kp; 392 struct kprobe *kp;
392 if (p->pre_handler != aggr_pre_handler) { 393 if (p->pre_handler != aggr_pre_handler) {
393 p->nmissed++; 394 p->nmissed++;
394 } else { 395 } else {
395 list_for_each_entry_rcu(kp, &p->list, list) 396 list_for_each_entry_rcu(kp, &p->list, list)
396 kp->nmissed++; 397 kp->nmissed++;
397 } 398 }
398 return; 399 return;
399 } 400 }
400 401
401 void __kprobes recycle_rp_inst(struct kretprobe_instance *ri, 402 void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
402 struct hlist_head *head) 403 struct hlist_head *head)
403 { 404 {
404 struct kretprobe *rp = ri->rp; 405 struct kretprobe *rp = ri->rp;
405 406
406 /* remove rp inst off the rprobe_inst_table */ 407 /* remove rp inst off the rprobe_inst_table */
407 hlist_del(&ri->hlist); 408 hlist_del(&ri->hlist);
408 INIT_HLIST_NODE(&ri->hlist); 409 INIT_HLIST_NODE(&ri->hlist);
409 if (likely(rp)) { 410 if (likely(rp)) {
410 spin_lock(&rp->lock); 411 spin_lock(&rp->lock);
411 hlist_add_head(&ri->hlist, &rp->free_instances); 412 hlist_add_head(&ri->hlist, &rp->free_instances);
412 spin_unlock(&rp->lock); 413 spin_unlock(&rp->lock);
413 } else 414 } else
414 /* Unregistering */ 415 /* Unregistering */
415 hlist_add_head(&ri->hlist, head); 416 hlist_add_head(&ri->hlist, head);
416 } 417 }
417 418
418 void __kprobes kretprobe_hash_lock(struct task_struct *tsk, 419 void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
419 struct hlist_head **head, unsigned long *flags) 420 struct hlist_head **head, unsigned long *flags)
420 { 421 {
421 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); 422 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
422 spinlock_t *hlist_lock; 423 spinlock_t *hlist_lock;
423 424
424 *head = &kretprobe_inst_table[hash]; 425 *head = &kretprobe_inst_table[hash];
425 hlist_lock = kretprobe_table_lock_ptr(hash); 426 hlist_lock = kretprobe_table_lock_ptr(hash);
426 spin_lock_irqsave(hlist_lock, *flags); 427 spin_lock_irqsave(hlist_lock, *flags);
427 } 428 }
428 429
429 static void __kprobes kretprobe_table_lock(unsigned long hash, 430 static void __kprobes kretprobe_table_lock(unsigned long hash,
430 unsigned long *flags) 431 unsigned long *flags)
431 { 432 {
432 spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); 433 spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
433 spin_lock_irqsave(hlist_lock, *flags); 434 spin_lock_irqsave(hlist_lock, *flags);
434 } 435 }
435 436
436 void __kprobes kretprobe_hash_unlock(struct task_struct *tsk, 437 void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
437 unsigned long *flags) 438 unsigned long *flags)
438 { 439 {
439 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); 440 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
440 spinlock_t *hlist_lock; 441 spinlock_t *hlist_lock;
441 442
442 hlist_lock = kretprobe_table_lock_ptr(hash); 443 hlist_lock = kretprobe_table_lock_ptr(hash);
443 spin_unlock_irqrestore(hlist_lock, *flags); 444 spin_unlock_irqrestore(hlist_lock, *flags);
444 } 445 }
445 446
446 void __kprobes kretprobe_table_unlock(unsigned long hash, unsigned long *flags) 447 void __kprobes kretprobe_table_unlock(unsigned long hash, unsigned long *flags)
447 { 448 {
448 spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); 449 spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
449 spin_unlock_irqrestore(hlist_lock, *flags); 450 spin_unlock_irqrestore(hlist_lock, *flags);
450 } 451 }
451 452
452 /* 453 /*
453 * This function is called from finish_task_switch when task tk becomes dead, 454 * This function is called from finish_task_switch when task tk becomes dead,
454 * so that we can recycle any function-return probe instances associated 455 * so that we can recycle any function-return probe instances associated
455 * with this task. These left over instances represent probed functions 456 * with this task. These left over instances represent probed functions
456 * that have been called but will never return. 457 * that have been called but will never return.
457 */ 458 */
458 void __kprobes kprobe_flush_task(struct task_struct *tk) 459 void __kprobes kprobe_flush_task(struct task_struct *tk)
459 { 460 {
460 struct kretprobe_instance *ri; 461 struct kretprobe_instance *ri;
461 struct hlist_head *head, empty_rp; 462 struct hlist_head *head, empty_rp;
462 struct hlist_node *node, *tmp; 463 struct hlist_node *node, *tmp;
463 unsigned long hash, flags = 0; 464 unsigned long hash, flags = 0;
464 465
465 if (unlikely(!kprobes_initialized)) 466 if (unlikely(!kprobes_initialized))
466 /* Early boot. kretprobe_table_locks not yet initialized. */ 467 /* Early boot. kretprobe_table_locks not yet initialized. */
467 return; 468 return;
468 469
469 hash = hash_ptr(tk, KPROBE_HASH_BITS); 470 hash = hash_ptr(tk, KPROBE_HASH_BITS);
470 head = &kretprobe_inst_table[hash]; 471 head = &kretprobe_inst_table[hash];
471 kretprobe_table_lock(hash, &flags); 472 kretprobe_table_lock(hash, &flags);
472 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 473 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
473 if (ri->task == tk) 474 if (ri->task == tk)
474 recycle_rp_inst(ri, &empty_rp); 475 recycle_rp_inst(ri, &empty_rp);
475 } 476 }
476 kretprobe_table_unlock(hash, &flags); 477 kretprobe_table_unlock(hash, &flags);
477 INIT_HLIST_HEAD(&empty_rp); 478 INIT_HLIST_HEAD(&empty_rp);
478 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 479 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
479 hlist_del(&ri->hlist); 480 hlist_del(&ri->hlist);
480 kfree(ri); 481 kfree(ri);
481 } 482 }
482 } 483 }
483 484
484 static inline void free_rp_inst(struct kretprobe *rp) 485 static inline void free_rp_inst(struct kretprobe *rp)
485 { 486 {
486 struct kretprobe_instance *ri; 487 struct kretprobe_instance *ri;
487 struct hlist_node *pos, *next; 488 struct hlist_node *pos, *next;
488 489
489 hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) { 490 hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) {
490 hlist_del(&ri->hlist); 491 hlist_del(&ri->hlist);
491 kfree(ri); 492 kfree(ri);
492 } 493 }
493 } 494 }
494 495
495 static void __kprobes cleanup_rp_inst(struct kretprobe *rp) 496 static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
496 { 497 {
497 unsigned long flags, hash; 498 unsigned long flags, hash;
498 struct kretprobe_instance *ri; 499 struct kretprobe_instance *ri;
499 struct hlist_node *pos, *next; 500 struct hlist_node *pos, *next;
500 struct hlist_head *head; 501 struct hlist_head *head;
501 502
502 /* No race here */ 503 /* No race here */
503 for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) { 504 for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
504 kretprobe_table_lock(hash, &flags); 505 kretprobe_table_lock(hash, &flags);
505 head = &kretprobe_inst_table[hash]; 506 head = &kretprobe_inst_table[hash];
506 hlist_for_each_entry_safe(ri, pos, next, head, hlist) { 507 hlist_for_each_entry_safe(ri, pos, next, head, hlist) {
507 if (ri->rp == rp) 508 if (ri->rp == rp)
508 ri->rp = NULL; 509 ri->rp = NULL;
509 } 510 }
510 kretprobe_table_unlock(hash, &flags); 511 kretprobe_table_unlock(hash, &flags);
511 } 512 }
512 free_rp_inst(rp); 513 free_rp_inst(rp);
513 } 514 }
514 515
515 /* 516 /*
516 * Keep all fields in the kprobe consistent 517 * Keep all fields in the kprobe consistent
517 */ 518 */
518 static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p) 519 static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
519 { 520 {
520 memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t)); 521 memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
521 memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn)); 522 memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
522 } 523 }
523 524
524 /* 525 /*
525 * Add the new probe to ap->list. Fail if this is the 526 * Add the new probe to ap->list. Fail if this is the
526 * second jprobe at the address - two jprobes can't coexist 527 * second jprobe at the address - two jprobes can't coexist
527 */ 528 */
528 static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p) 529 static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
529 { 530 {
530 BUG_ON(kprobe_gone(ap) || kprobe_gone(p)); 531 BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
531 if (p->break_handler) { 532 if (p->break_handler) {
532 if (ap->break_handler) 533 if (ap->break_handler)
533 return -EEXIST; 534 return -EEXIST;
534 list_add_tail_rcu(&p->list, &ap->list); 535 list_add_tail_rcu(&p->list, &ap->list);
535 ap->break_handler = aggr_break_handler; 536 ap->break_handler = aggr_break_handler;
536 } else 537 } else
537 list_add_rcu(&p->list, &ap->list); 538 list_add_rcu(&p->list, &ap->list);
538 if (p->post_handler && !ap->post_handler) 539 if (p->post_handler && !ap->post_handler)
539 ap->post_handler = aggr_post_handler; 540 ap->post_handler = aggr_post_handler;
540 541
541 if (kprobe_disabled(ap) && !kprobe_disabled(p)) { 542 if (kprobe_disabled(ap) && !kprobe_disabled(p)) {
542 ap->flags &= ~KPROBE_FLAG_DISABLED; 543 ap->flags &= ~KPROBE_FLAG_DISABLED;
543 if (!kprobes_all_disarmed) 544 if (!kprobes_all_disarmed)
544 /* Arm the breakpoint again. */ 545 /* Arm the breakpoint again. */
545 arm_kprobe(ap); 546 arm_kprobe(ap);
546 } 547 }
547 return 0; 548 return 0;
548 } 549 }
549 550
550 /* 551 /*
551 * Fill in the required fields of the "manager kprobe". Replace the 552 * Fill in the required fields of the "manager kprobe". Replace the
552 * earlier kprobe in the hlist with the manager kprobe 553 * earlier kprobe in the hlist with the manager kprobe
553 */ 554 */
554 static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p) 555 static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
555 { 556 {
556 copy_kprobe(p, ap); 557 copy_kprobe(p, ap);
557 flush_insn_slot(ap); 558 flush_insn_slot(ap);
558 ap->addr = p->addr; 559 ap->addr = p->addr;
559 ap->flags = p->flags; 560 ap->flags = p->flags;
560 ap->pre_handler = aggr_pre_handler; 561 ap->pre_handler = aggr_pre_handler;
561 ap->fault_handler = aggr_fault_handler; 562 ap->fault_handler = aggr_fault_handler;
562 /* We don't care the kprobe which has gone. */ 563 /* We don't care the kprobe which has gone. */
563 if (p->post_handler && !kprobe_gone(p)) 564 if (p->post_handler && !kprobe_gone(p))
564 ap->post_handler = aggr_post_handler; 565 ap->post_handler = aggr_post_handler;
565 if (p->break_handler && !kprobe_gone(p)) 566 if (p->break_handler && !kprobe_gone(p))
566 ap->break_handler = aggr_break_handler; 567 ap->break_handler = aggr_break_handler;
567 568
568 INIT_LIST_HEAD(&ap->list); 569 INIT_LIST_HEAD(&ap->list);
569 list_add_rcu(&p->list, &ap->list); 570 list_add_rcu(&p->list, &ap->list);
570 571
571 hlist_replace_rcu(&p->hlist, &ap->hlist); 572 hlist_replace_rcu(&p->hlist, &ap->hlist);
572 } 573 }
573 574
574 /* 575 /*
575 * This is the second or subsequent kprobe at the address - handle 576 * This is the second or subsequent kprobe at the address - handle
576 * the intricacies 577 * the intricacies
577 */ 578 */
578 static int __kprobes register_aggr_kprobe(struct kprobe *old_p, 579 static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
579 struct kprobe *p) 580 struct kprobe *p)
580 { 581 {
581 int ret = 0; 582 int ret = 0;
582 struct kprobe *ap = old_p; 583 struct kprobe *ap = old_p;
583 584
584 if (old_p->pre_handler != aggr_pre_handler) { 585 if (old_p->pre_handler != aggr_pre_handler) {
585 /* If old_p is not an aggr_probe, create new aggr_kprobe. */ 586 /* If old_p is not an aggr_probe, create new aggr_kprobe. */
586 ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL); 587 ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
587 if (!ap) 588 if (!ap)
588 return -ENOMEM; 589 return -ENOMEM;
589 add_aggr_kprobe(ap, old_p); 590 add_aggr_kprobe(ap, old_p);
590 } 591 }
591 592
592 if (kprobe_gone(ap)) { 593 if (kprobe_gone(ap)) {
593 /* 594 /*
594 * Attempting to insert new probe at the same location that 595 * Attempting to insert new probe at the same location that
595 * had a probe in the module vaddr area which already 596 * had a probe in the module vaddr area which already
596 * freed. So, the instruction slot has already been 597 * freed. So, the instruction slot has already been
597 * released. We need a new slot for the new probe. 598 * released. We need a new slot for the new probe.
598 */ 599 */
599 ret = arch_prepare_kprobe(ap); 600 ret = arch_prepare_kprobe(ap);
600 if (ret) 601 if (ret)
601 /* 602 /*
602 * Even if fail to allocate new slot, don't need to 603 * Even if fail to allocate new slot, don't need to
603 * free aggr_probe. It will be used next time, or 604 * free aggr_probe. It will be used next time, or
604 * freed by unregister_kprobe. 605 * freed by unregister_kprobe.
605 */ 606 */
606 return ret; 607 return ret;
607 608
608 /* 609 /*
609 * Clear gone flag to prevent allocating new slot again, and 610 * Clear gone flag to prevent allocating new slot again, and
610 * set disabled flag because it is not armed yet. 611 * set disabled flag because it is not armed yet.
611 */ 612 */
612 ap->flags = (ap->flags & ~KPROBE_FLAG_GONE) 613 ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
613 | KPROBE_FLAG_DISABLED; 614 | KPROBE_FLAG_DISABLED;
614 } 615 }
615 616
616 copy_kprobe(ap, p); 617 copy_kprobe(ap, p);
617 return add_new_kprobe(ap, p); 618 return add_new_kprobe(ap, p);
618 } 619 }
619 620
620 /* Try to disable aggr_kprobe, and return 1 if succeeded.*/ 621 /* Try to disable aggr_kprobe, and return 1 if succeeded.*/
621 static int __kprobes try_to_disable_aggr_kprobe(struct kprobe *p) 622 static int __kprobes try_to_disable_aggr_kprobe(struct kprobe *p)
622 { 623 {
623 struct kprobe *kp; 624 struct kprobe *kp;
624 625
625 list_for_each_entry_rcu(kp, &p->list, list) { 626 list_for_each_entry_rcu(kp, &p->list, list) {
626 if (!kprobe_disabled(kp)) 627 if (!kprobe_disabled(kp))
627 /* 628 /*
628 * There is an active probe on the list. 629 * There is an active probe on the list.
629 * We can't disable aggr_kprobe. 630 * We can't disable aggr_kprobe.
630 */ 631 */
631 return 0; 632 return 0;
632 } 633 }
633 p->flags |= KPROBE_FLAG_DISABLED; 634 p->flags |= KPROBE_FLAG_DISABLED;
634 return 1; 635 return 1;
635 } 636 }
636 637
637 static int __kprobes in_kprobes_functions(unsigned long addr) 638 static int __kprobes in_kprobes_functions(unsigned long addr)
638 { 639 {
639 struct kprobe_blackpoint *kb; 640 struct kprobe_blackpoint *kb;
640 641
641 if (addr >= (unsigned long)__kprobes_text_start && 642 if (addr >= (unsigned long)__kprobes_text_start &&
642 addr < (unsigned long)__kprobes_text_end) 643 addr < (unsigned long)__kprobes_text_end)
643 return -EINVAL; 644 return -EINVAL;
644 /* 645 /*
645 * If there exists a kprobe_blacklist, verify and 646 * If there exists a kprobe_blacklist, verify and
646 * fail any probe registration in the prohibited area 647 * fail any probe registration in the prohibited area
647 */ 648 */
648 for (kb = kprobe_blacklist; kb->name != NULL; kb++) { 649 for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
649 if (kb->start_addr) { 650 if (kb->start_addr) {
650 if (addr >= kb->start_addr && 651 if (addr >= kb->start_addr &&
651 addr < (kb->start_addr + kb->range)) 652 addr < (kb->start_addr + kb->range))
652 return -EINVAL; 653 return -EINVAL;
653 } 654 }
654 } 655 }
655 return 0; 656 return 0;
656 } 657 }
657 658
658 /* 659 /*
659 * If we have a symbol_name argument, look it up and add the offset field 660 * If we have a symbol_name argument, look it up and add the offset field
660 * to it. This way, we can specify a relative address to a symbol. 661 * to it. This way, we can specify a relative address to a symbol.
661 */ 662 */
662 static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p) 663 static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
663 { 664 {
664 kprobe_opcode_t *addr = p->addr; 665 kprobe_opcode_t *addr = p->addr;
665 if (p->symbol_name) { 666 if (p->symbol_name) {
666 if (addr) 667 if (addr)
667 return NULL; 668 return NULL;
668 kprobe_lookup_name(p->symbol_name, addr); 669 kprobe_lookup_name(p->symbol_name, addr);
669 } 670 }
670 671
671 if (!addr) 672 if (!addr)
672 return NULL; 673 return NULL;
673 return (kprobe_opcode_t *)(((char *)addr) + p->offset); 674 return (kprobe_opcode_t *)(((char *)addr) + p->offset);
674 } 675 }
675 676
676 int __kprobes register_kprobe(struct kprobe *p) 677 int __kprobes register_kprobe(struct kprobe *p)
677 { 678 {
678 int ret = 0; 679 int ret = 0;
679 struct kprobe *old_p; 680 struct kprobe *old_p;
680 struct module *probed_mod; 681 struct module *probed_mod;
681 kprobe_opcode_t *addr; 682 kprobe_opcode_t *addr;
682 683
683 addr = kprobe_addr(p); 684 addr = kprobe_addr(p);
684 if (!addr) 685 if (!addr)
685 return -EINVAL; 686 return -EINVAL;
686 p->addr = addr; 687 p->addr = addr;
687 688
688 preempt_disable(); 689 preempt_disable();
689 if (!kernel_text_address((unsigned long) p->addr) || 690 if (!kernel_text_address((unsigned long) p->addr) ||
690 in_kprobes_functions((unsigned long) p->addr)) { 691 in_kprobes_functions((unsigned long) p->addr)) {
691 preempt_enable(); 692 preempt_enable();
692 return -EINVAL; 693 return -EINVAL;
693 } 694 }
694 695
695 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ 696 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
696 p->flags &= KPROBE_FLAG_DISABLED; 697 p->flags &= KPROBE_FLAG_DISABLED;
697 698
698 /* 699 /*
699 * Check if are we probing a module. 700 * Check if are we probing a module.
700 */ 701 */
701 probed_mod = __module_text_address((unsigned long) p->addr); 702 probed_mod = __module_text_address((unsigned long) p->addr);
702 if (probed_mod) { 703 if (probed_mod) {
703 /* 704 /*
704 * We must hold a refcount of the probed module while updating 705 * We must hold a refcount of the probed module while updating
705 * its code to prohibit unexpected unloading. 706 * its code to prohibit unexpected unloading.
706 */ 707 */
707 if (unlikely(!try_module_get(probed_mod))) { 708 if (unlikely(!try_module_get(probed_mod))) {
708 preempt_enable(); 709 preempt_enable();
709 return -EINVAL; 710 return -EINVAL;
710 } 711 }
711 /* 712 /*
712 * If the module freed .init.text, we couldn't insert 713 * If the module freed .init.text, we couldn't insert
713 * kprobes in there. 714 * kprobes in there.
714 */ 715 */
715 if (within_module_init((unsigned long)p->addr, probed_mod) && 716 if (within_module_init((unsigned long)p->addr, probed_mod) &&
716 probed_mod->state != MODULE_STATE_COMING) { 717 probed_mod->state != MODULE_STATE_COMING) {
717 module_put(probed_mod); 718 module_put(probed_mod);
718 preempt_enable(); 719 preempt_enable();
719 return -EINVAL; 720 return -EINVAL;
720 } 721 }
721 } 722 }
722 preempt_enable(); 723 preempt_enable();
723 724
724 p->nmissed = 0; 725 p->nmissed = 0;
725 INIT_LIST_HEAD(&p->list); 726 INIT_LIST_HEAD(&p->list);
726 mutex_lock(&kprobe_mutex); 727 mutex_lock(&kprobe_mutex);
727 old_p = get_kprobe(p->addr); 728 old_p = get_kprobe(p->addr);
728 if (old_p) { 729 if (old_p) {
729 ret = register_aggr_kprobe(old_p, p); 730 ret = register_aggr_kprobe(old_p, p);
730 goto out; 731 goto out;
731 } 732 }
732 733
733 mutex_lock(&text_mutex); 734 mutex_lock(&text_mutex);
734 ret = arch_prepare_kprobe(p); 735 ret = arch_prepare_kprobe(p);
735 if (ret) 736 if (ret)
736 goto out_unlock_text; 737 goto out_unlock_text;
737 738
738 INIT_HLIST_NODE(&p->hlist); 739 INIT_HLIST_NODE(&p->hlist);
739 hlist_add_head_rcu(&p->hlist, 740 hlist_add_head_rcu(&p->hlist,
740 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); 741 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
741 742
742 if (!kprobes_all_disarmed && !kprobe_disabled(p)) 743 if (!kprobes_all_disarmed && !kprobe_disabled(p))
743 arch_arm_kprobe(p); 744 arch_arm_kprobe(p);
744 745
745 out_unlock_text: 746 out_unlock_text:
746 mutex_unlock(&text_mutex); 747 mutex_unlock(&text_mutex);
747 out: 748 out:
748 mutex_unlock(&kprobe_mutex); 749 mutex_unlock(&kprobe_mutex);
749 750
750 if (probed_mod) 751 if (probed_mod)
751 module_put(probed_mod); 752 module_put(probed_mod);
752 753
753 return ret; 754 return ret;
754 } 755 }
755 EXPORT_SYMBOL_GPL(register_kprobe); 756 EXPORT_SYMBOL_GPL(register_kprobe);
756 757
757 /* Check passed kprobe is valid and return kprobe in kprobe_table. */ 758 /* Check passed kprobe is valid and return kprobe in kprobe_table. */
758 static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p) 759 static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
759 { 760 {
760 struct kprobe *old_p, *list_p; 761 struct kprobe *old_p, *list_p;
761 762
762 old_p = get_kprobe(p->addr); 763 old_p = get_kprobe(p->addr);
763 if (unlikely(!old_p)) 764 if (unlikely(!old_p))
764 return NULL; 765 return NULL;
765 766
766 if (p != old_p) { 767 if (p != old_p) {
767 list_for_each_entry_rcu(list_p, &old_p->list, list) 768 list_for_each_entry_rcu(list_p, &old_p->list, list)
768 if (list_p == p) 769 if (list_p == p)
769 /* kprobe p is a valid probe */ 770 /* kprobe p is a valid probe */
770 goto valid; 771 goto valid;
771 return NULL; 772 return NULL;
772 } 773 }
773 valid: 774 valid:
774 return old_p; 775 return old_p;
775 } 776 }
776 777
777 /* 778 /*
778 * Unregister a kprobe without a scheduler synchronization. 779 * Unregister a kprobe without a scheduler synchronization.
779 */ 780 */
780 static int __kprobes __unregister_kprobe_top(struct kprobe *p) 781 static int __kprobes __unregister_kprobe_top(struct kprobe *p)
781 { 782 {
782 struct kprobe *old_p, *list_p; 783 struct kprobe *old_p, *list_p;
783 784
784 old_p = __get_valid_kprobe(p); 785 old_p = __get_valid_kprobe(p);
785 if (old_p == NULL) 786 if (old_p == NULL)
786 return -EINVAL; 787 return -EINVAL;
787 788
788 if (old_p == p || 789 if (old_p == p ||
789 (old_p->pre_handler == aggr_pre_handler && 790 (old_p->pre_handler == aggr_pre_handler &&
790 list_is_singular(&old_p->list))) { 791 list_is_singular(&old_p->list))) {
791 /* 792 /*
792 * Only probe on the hash list. Disarm only if kprobes are 793 * Only probe on the hash list. Disarm only if kprobes are
793 * enabled and not gone - otherwise, the breakpoint would 794 * enabled and not gone - otherwise, the breakpoint would
794 * already have been removed. We save on flushing icache. 795 * already have been removed. We save on flushing icache.
795 */ 796 */
796 if (!kprobes_all_disarmed && !kprobe_disabled(old_p)) 797 if (!kprobes_all_disarmed && !kprobe_disabled(old_p))
797 disarm_kprobe(p); 798 disarm_kprobe(p);
798 hlist_del_rcu(&old_p->hlist); 799 hlist_del_rcu(&old_p->hlist);
799 } else { 800 } else {
800 if (p->break_handler && !kprobe_gone(p)) 801 if (p->break_handler && !kprobe_gone(p))
801 old_p->break_handler = NULL; 802 old_p->break_handler = NULL;
802 if (p->post_handler && !kprobe_gone(p)) { 803 if (p->post_handler && !kprobe_gone(p)) {
803 list_for_each_entry_rcu(list_p, &old_p->list, list) { 804 list_for_each_entry_rcu(list_p, &old_p->list, list) {
804 if ((list_p != p) && (list_p->post_handler)) 805 if ((list_p != p) && (list_p->post_handler))
805 goto noclean; 806 goto noclean;
806 } 807 }
807 old_p->post_handler = NULL; 808 old_p->post_handler = NULL;
808 } 809 }
809 noclean: 810 noclean:
810 list_del_rcu(&p->list); 811 list_del_rcu(&p->list);
811 if (!kprobe_disabled(old_p)) { 812 if (!kprobe_disabled(old_p)) {
812 try_to_disable_aggr_kprobe(old_p); 813 try_to_disable_aggr_kprobe(old_p);
813 if (!kprobes_all_disarmed && kprobe_disabled(old_p)) 814 if (!kprobes_all_disarmed && kprobe_disabled(old_p))
814 disarm_kprobe(old_p); 815 disarm_kprobe(old_p);
815 } 816 }
816 } 817 }
817 return 0; 818 return 0;
818 } 819 }
819 820
820 static void __kprobes __unregister_kprobe_bottom(struct kprobe *p) 821 static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
821 { 822 {
822 struct kprobe *old_p; 823 struct kprobe *old_p;
823 824
824 if (list_empty(&p->list)) 825 if (list_empty(&p->list))
825 arch_remove_kprobe(p); 826 arch_remove_kprobe(p);
826 else if (list_is_singular(&p->list)) { 827 else if (list_is_singular(&p->list)) {
827 /* "p" is the last child of an aggr_kprobe */ 828 /* "p" is the last child of an aggr_kprobe */
828 old_p = list_entry(p->list.next, struct kprobe, list); 829 old_p = list_entry(p->list.next, struct kprobe, list);
829 list_del(&p->list); 830 list_del(&p->list);
830 arch_remove_kprobe(old_p); 831 arch_remove_kprobe(old_p);
831 kfree(old_p); 832 kfree(old_p);
832 } 833 }
833 } 834 }
834 835
835 int __kprobes register_kprobes(struct kprobe **kps, int num) 836 int __kprobes register_kprobes(struct kprobe **kps, int num)
836 { 837 {
837 int i, ret = 0; 838 int i, ret = 0;
838 839
839 if (num <= 0) 840 if (num <= 0)
840 return -EINVAL; 841 return -EINVAL;
841 for (i = 0; i < num; i++) { 842 for (i = 0; i < num; i++) {
842 ret = register_kprobe(kps[i]); 843 ret = register_kprobe(kps[i]);
843 if (ret < 0) { 844 if (ret < 0) {
844 if (i > 0) 845 if (i > 0)
845 unregister_kprobes(kps, i); 846 unregister_kprobes(kps, i);
846 break; 847 break;
847 } 848 }
848 } 849 }
849 return ret; 850 return ret;
850 } 851 }
851 EXPORT_SYMBOL_GPL(register_kprobes); 852 EXPORT_SYMBOL_GPL(register_kprobes);
852 853
853 void __kprobes unregister_kprobe(struct kprobe *p) 854 void __kprobes unregister_kprobe(struct kprobe *p)
854 { 855 {
855 unregister_kprobes(&p, 1); 856 unregister_kprobes(&p, 1);
856 } 857 }
857 EXPORT_SYMBOL_GPL(unregister_kprobe); 858 EXPORT_SYMBOL_GPL(unregister_kprobe);
858 859
859 void __kprobes unregister_kprobes(struct kprobe **kps, int num) 860 void __kprobes unregister_kprobes(struct kprobe **kps, int num)
860 { 861 {
861 int i; 862 int i;
862 863
863 if (num <= 0) 864 if (num <= 0)
864 return; 865 return;
865 mutex_lock(&kprobe_mutex); 866 mutex_lock(&kprobe_mutex);
866 for (i = 0; i < num; i++) 867 for (i = 0; i < num; i++)
867 if (__unregister_kprobe_top(kps[i]) < 0) 868 if (__unregister_kprobe_top(kps[i]) < 0)
868 kps[i]->addr = NULL; 869 kps[i]->addr = NULL;
869 mutex_unlock(&kprobe_mutex); 870 mutex_unlock(&kprobe_mutex);
870 871
871 synchronize_sched(); 872 synchronize_sched();
872 for (i = 0; i < num; i++) 873 for (i = 0; i < num; i++)
873 if (kps[i]->addr) 874 if (kps[i]->addr)
874 __unregister_kprobe_bottom(kps[i]); 875 __unregister_kprobe_bottom(kps[i]);
875 } 876 }
876 EXPORT_SYMBOL_GPL(unregister_kprobes); 877 EXPORT_SYMBOL_GPL(unregister_kprobes);
877 878
878 static struct notifier_block kprobe_exceptions_nb = { 879 static struct notifier_block kprobe_exceptions_nb = {
879 .notifier_call = kprobe_exceptions_notify, 880 .notifier_call = kprobe_exceptions_notify,
880 .priority = 0x7fffffff /* we need to be notified first */ 881 .priority = 0x7fffffff /* we need to be notified first */
881 }; 882 };
882 883
883 unsigned long __weak arch_deref_entry_point(void *entry) 884 unsigned long __weak arch_deref_entry_point(void *entry)
884 { 885 {
885 return (unsigned long)entry; 886 return (unsigned long)entry;
886 } 887 }
887 888
888 int __kprobes register_jprobes(struct jprobe **jps, int num) 889 int __kprobes register_jprobes(struct jprobe **jps, int num)
889 { 890 {
890 struct jprobe *jp; 891 struct jprobe *jp;
891 int ret = 0, i; 892 int ret = 0, i;
892 893
893 if (num <= 0) 894 if (num <= 0)
894 return -EINVAL; 895 return -EINVAL;
895 for (i = 0; i < num; i++) { 896 for (i = 0; i < num; i++) {
896 unsigned long addr; 897 unsigned long addr;
897 jp = jps[i]; 898 jp = jps[i];
898 addr = arch_deref_entry_point(jp->entry); 899 addr = arch_deref_entry_point(jp->entry);
899 900
900 if (!kernel_text_address(addr)) 901 if (!kernel_text_address(addr))
901 ret = -EINVAL; 902 ret = -EINVAL;
902 else { 903 else {
903 /* Todo: Verify probepoint is a function entry point */ 904 /* Todo: Verify probepoint is a function entry point */
904 jp->kp.pre_handler = setjmp_pre_handler; 905 jp->kp.pre_handler = setjmp_pre_handler;
905 jp->kp.break_handler = longjmp_break_handler; 906 jp->kp.break_handler = longjmp_break_handler;
906 ret = register_kprobe(&jp->kp); 907 ret = register_kprobe(&jp->kp);
907 } 908 }
908 if (ret < 0) { 909 if (ret < 0) {
909 if (i > 0) 910 if (i > 0)
910 unregister_jprobes(jps, i); 911 unregister_jprobes(jps, i);
911 break; 912 break;
912 } 913 }
913 } 914 }
914 return ret; 915 return ret;
915 } 916 }
916 EXPORT_SYMBOL_GPL(register_jprobes); 917 EXPORT_SYMBOL_GPL(register_jprobes);
917 918
918 int __kprobes register_jprobe(struct jprobe *jp) 919 int __kprobes register_jprobe(struct jprobe *jp)
919 { 920 {
920 return register_jprobes(&jp, 1); 921 return register_jprobes(&jp, 1);
921 } 922 }
922 EXPORT_SYMBOL_GPL(register_jprobe); 923 EXPORT_SYMBOL_GPL(register_jprobe);
923 924
924 void __kprobes unregister_jprobe(struct jprobe *jp) 925 void __kprobes unregister_jprobe(struct jprobe *jp)
925 { 926 {
926 unregister_jprobes(&jp, 1); 927 unregister_jprobes(&jp, 1);
927 } 928 }
928 EXPORT_SYMBOL_GPL(unregister_jprobe); 929 EXPORT_SYMBOL_GPL(unregister_jprobe);
929 930
930 void __kprobes unregister_jprobes(struct jprobe **jps, int num) 931 void __kprobes unregister_jprobes(struct jprobe **jps, int num)
931 { 932 {
932 int i; 933 int i;
933 934
934 if (num <= 0) 935 if (num <= 0)
935 return; 936 return;
936 mutex_lock(&kprobe_mutex); 937 mutex_lock(&kprobe_mutex);
937 for (i = 0; i < num; i++) 938 for (i = 0; i < num; i++)
938 if (__unregister_kprobe_top(&jps[i]->kp) < 0) 939 if (__unregister_kprobe_top(&jps[i]->kp) < 0)
939 jps[i]->kp.addr = NULL; 940 jps[i]->kp.addr = NULL;
940 mutex_unlock(&kprobe_mutex); 941 mutex_unlock(&kprobe_mutex);
941 942
942 synchronize_sched(); 943 synchronize_sched();
943 for (i = 0; i < num; i++) { 944 for (i = 0; i < num; i++) {
944 if (jps[i]->kp.addr) 945 if (jps[i]->kp.addr)
945 __unregister_kprobe_bottom(&jps[i]->kp); 946 __unregister_kprobe_bottom(&jps[i]->kp);
946 } 947 }
947 } 948 }
948 EXPORT_SYMBOL_GPL(unregister_jprobes); 949 EXPORT_SYMBOL_GPL(unregister_jprobes);
949 950
950 #ifdef CONFIG_KRETPROBES 951 #ifdef CONFIG_KRETPROBES
951 /* 952 /*
952 * This kprobe pre_handler is registered with every kretprobe. When probe 953 * This kprobe pre_handler is registered with every kretprobe. When probe
953 * hits it will set up the return probe. 954 * hits it will set up the return probe.
954 */ 955 */
955 static int __kprobes pre_handler_kretprobe(struct kprobe *p, 956 static int __kprobes pre_handler_kretprobe(struct kprobe *p,
956 struct pt_regs *regs) 957 struct pt_regs *regs)
957 { 958 {
958 struct kretprobe *rp = container_of(p, struct kretprobe, kp); 959 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
959 unsigned long hash, flags = 0; 960 unsigned long hash, flags = 0;
960 struct kretprobe_instance *ri; 961 struct kretprobe_instance *ri;
961 962
962 /*TODO: consider to only swap the RA after the last pre_handler fired */ 963 /*TODO: consider to only swap the RA after the last pre_handler fired */
963 hash = hash_ptr(current, KPROBE_HASH_BITS); 964 hash = hash_ptr(current, KPROBE_HASH_BITS);
964 spin_lock_irqsave(&rp->lock, flags); 965 spin_lock_irqsave(&rp->lock, flags);
965 if (!hlist_empty(&rp->free_instances)) { 966 if (!hlist_empty(&rp->free_instances)) {
966 ri = hlist_entry(rp->free_instances.first, 967 ri = hlist_entry(rp->free_instances.first,
967 struct kretprobe_instance, hlist); 968 struct kretprobe_instance, hlist);
968 hlist_del(&ri->hlist); 969 hlist_del(&ri->hlist);
969 spin_unlock_irqrestore(&rp->lock, flags); 970 spin_unlock_irqrestore(&rp->lock, flags);
970 971
971 ri->rp = rp; 972 ri->rp = rp;
972 ri->task = current; 973 ri->task = current;
973 974
974 if (rp->entry_handler && rp->entry_handler(ri, regs)) 975 if (rp->entry_handler && rp->entry_handler(ri, regs))
975 return 0; 976 return 0;
976 977
977 arch_prepare_kretprobe(ri, regs); 978 arch_prepare_kretprobe(ri, regs);
978 979
979 /* XXX(hch): why is there no hlist_move_head? */ 980 /* XXX(hch): why is there no hlist_move_head? */
980 INIT_HLIST_NODE(&ri->hlist); 981 INIT_HLIST_NODE(&ri->hlist);
981 kretprobe_table_lock(hash, &flags); 982 kretprobe_table_lock(hash, &flags);
982 hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]); 983 hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
983 kretprobe_table_unlock(hash, &flags); 984 kretprobe_table_unlock(hash, &flags);
984 } else { 985 } else {
985 rp->nmissed++; 986 rp->nmissed++;
986 spin_unlock_irqrestore(&rp->lock, flags); 987 spin_unlock_irqrestore(&rp->lock, flags);
987 } 988 }
988 return 0; 989 return 0;
989 } 990 }
990 991
991 int __kprobes register_kretprobe(struct kretprobe *rp) 992 int __kprobes register_kretprobe(struct kretprobe *rp)
992 { 993 {
993 int ret = 0; 994 int ret = 0;
994 struct kretprobe_instance *inst; 995 struct kretprobe_instance *inst;
995 int i; 996 int i;
996 void *addr; 997 void *addr;
997 998
998 if (kretprobe_blacklist_size) { 999 if (kretprobe_blacklist_size) {
999 addr = kprobe_addr(&rp->kp); 1000 addr = kprobe_addr(&rp->kp);
1000 if (!addr) 1001 if (!addr)
1001 return -EINVAL; 1002 return -EINVAL;
1002 1003
1003 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { 1004 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1004 if (kretprobe_blacklist[i].addr == addr) 1005 if (kretprobe_blacklist[i].addr == addr)
1005 return -EINVAL; 1006 return -EINVAL;
1006 } 1007 }
1007 } 1008 }
1008 1009
1009 rp->kp.pre_handler = pre_handler_kretprobe; 1010 rp->kp.pre_handler = pre_handler_kretprobe;
1010 rp->kp.post_handler = NULL; 1011 rp->kp.post_handler = NULL;
1011 rp->kp.fault_handler = NULL; 1012 rp->kp.fault_handler = NULL;
1012 rp->kp.break_handler = NULL; 1013 rp->kp.break_handler = NULL;
1013 1014
1014 /* Pre-allocate memory for max kretprobe instances */ 1015 /* Pre-allocate memory for max kretprobe instances */
1015 if (rp->maxactive <= 0) { 1016 if (rp->maxactive <= 0) {
1016 #ifdef CONFIG_PREEMPT 1017 #ifdef CONFIG_PREEMPT
1017 rp->maxactive = max(10, 2 * NR_CPUS); 1018 rp->maxactive = max(10, 2 * NR_CPUS);
1018 #else 1019 #else
1019 rp->maxactive = NR_CPUS; 1020 rp->maxactive = NR_CPUS;
1020 #endif 1021 #endif
1021 } 1022 }
1022 spin_lock_init(&rp->lock); 1023 spin_lock_init(&rp->lock);
1023 INIT_HLIST_HEAD(&rp->free_instances); 1024 INIT_HLIST_HEAD(&rp->free_instances);
1024 for (i = 0; i < rp->maxactive; i++) { 1025 for (i = 0; i < rp->maxactive; i++) {
1025 inst = kmalloc(sizeof(struct kretprobe_instance) + 1026 inst = kmalloc(sizeof(struct kretprobe_instance) +
1026 rp->data_size, GFP_KERNEL); 1027 rp->data_size, GFP_KERNEL);
1027 if (inst == NULL) { 1028 if (inst == NULL) {
1028 free_rp_inst(rp); 1029 free_rp_inst(rp);
1029 return -ENOMEM; 1030 return -ENOMEM;
1030 } 1031 }
1031 INIT_HLIST_NODE(&inst->hlist); 1032 INIT_HLIST_NODE(&inst->hlist);
1032 hlist_add_head(&inst->hlist, &rp->free_instances); 1033 hlist_add_head(&inst->hlist, &rp->free_instances);
1033 } 1034 }
1034 1035
1035 rp->nmissed = 0; 1036 rp->nmissed = 0;
1036 /* Establish function entry probe point */ 1037 /* Establish function entry probe point */
1037 ret = register_kprobe(&rp->kp); 1038 ret = register_kprobe(&rp->kp);
1038 if (ret != 0) 1039 if (ret != 0)
1039 free_rp_inst(rp); 1040 free_rp_inst(rp);
1040 return ret; 1041 return ret;
1041 } 1042 }
1042 EXPORT_SYMBOL_GPL(register_kretprobe); 1043 EXPORT_SYMBOL_GPL(register_kretprobe);
1043 1044
1044 int __kprobes register_kretprobes(struct kretprobe **rps, int num) 1045 int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1045 { 1046 {
1046 int ret = 0, i; 1047 int ret = 0, i;
1047 1048
1048 if (num <= 0) 1049 if (num <= 0)
1049 return -EINVAL; 1050 return -EINVAL;
1050 for (i = 0; i < num; i++) { 1051 for (i = 0; i < num; i++) {
1051 ret = register_kretprobe(rps[i]); 1052 ret = register_kretprobe(rps[i]);
1052 if (ret < 0) { 1053 if (ret < 0) {
1053 if (i > 0) 1054 if (i > 0)
1054 unregister_kretprobes(rps, i); 1055 unregister_kretprobes(rps, i);
1055 break; 1056 break;
1056 } 1057 }
1057 } 1058 }
1058 return ret; 1059 return ret;
1059 } 1060 }
1060 EXPORT_SYMBOL_GPL(register_kretprobes); 1061 EXPORT_SYMBOL_GPL(register_kretprobes);
1061 1062
1062 void __kprobes unregister_kretprobe(struct kretprobe *rp) 1063 void __kprobes unregister_kretprobe(struct kretprobe *rp)
1063 { 1064 {
1064 unregister_kretprobes(&rp, 1); 1065 unregister_kretprobes(&rp, 1);
1065 } 1066 }
1066 EXPORT_SYMBOL_GPL(unregister_kretprobe); 1067 EXPORT_SYMBOL_GPL(unregister_kretprobe);
1067 1068
1068 void __kprobes unregister_kretprobes(struct kretprobe **rps, int num) 1069 void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1069 { 1070 {
1070 int i; 1071 int i;
1071 1072
1072 if (num <= 0) 1073 if (num <= 0)
1073 return; 1074 return;
1074 mutex_lock(&kprobe_mutex); 1075 mutex_lock(&kprobe_mutex);
1075 for (i = 0; i < num; i++) 1076 for (i = 0; i < num; i++)
1076 if (__unregister_kprobe_top(&rps[i]->kp) < 0) 1077 if (__unregister_kprobe_top(&rps[i]->kp) < 0)
1077 rps[i]->kp.addr = NULL; 1078 rps[i]->kp.addr = NULL;
1078 mutex_unlock(&kprobe_mutex); 1079 mutex_unlock(&kprobe_mutex);
1079 1080
1080 synchronize_sched(); 1081 synchronize_sched();
1081 for (i = 0; i < num; i++) { 1082 for (i = 0; i < num; i++) {
1082 if (rps[i]->kp.addr) { 1083 if (rps[i]->kp.addr) {
1083 __unregister_kprobe_bottom(&rps[i]->kp); 1084 __unregister_kprobe_bottom(&rps[i]->kp);
1084 cleanup_rp_inst(rps[i]); 1085 cleanup_rp_inst(rps[i]);
1085 } 1086 }
1086 } 1087 }
1087 } 1088 }
1088 EXPORT_SYMBOL_GPL(unregister_kretprobes); 1089 EXPORT_SYMBOL_GPL(unregister_kretprobes);
1089 1090
1090 #else /* CONFIG_KRETPROBES */ 1091 #else /* CONFIG_KRETPROBES */
1091 int __kprobes register_kretprobe(struct kretprobe *rp) 1092 int __kprobes register_kretprobe(struct kretprobe *rp)
1092 { 1093 {
1093 return -ENOSYS; 1094 return -ENOSYS;
1094 } 1095 }
1095 EXPORT_SYMBOL_GPL(register_kretprobe); 1096 EXPORT_SYMBOL_GPL(register_kretprobe);
1096 1097
1097 int __kprobes register_kretprobes(struct kretprobe **rps, int num) 1098 int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1098 { 1099 {
1099 return -ENOSYS; 1100 return -ENOSYS;
1100 } 1101 }
1101 EXPORT_SYMBOL_GPL(register_kretprobes); 1102 EXPORT_SYMBOL_GPL(register_kretprobes);
1102 1103
1103 void __kprobes unregister_kretprobe(struct kretprobe *rp) 1104 void __kprobes unregister_kretprobe(struct kretprobe *rp)
1104 { 1105 {
1105 } 1106 }
1106 EXPORT_SYMBOL_GPL(unregister_kretprobe); 1107 EXPORT_SYMBOL_GPL(unregister_kretprobe);
1107 1108
1108 void __kprobes unregister_kretprobes(struct kretprobe **rps, int num) 1109 void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1109 { 1110 {
1110 } 1111 }
1111 EXPORT_SYMBOL_GPL(unregister_kretprobes); 1112 EXPORT_SYMBOL_GPL(unregister_kretprobes);
1112 1113
1113 static int __kprobes pre_handler_kretprobe(struct kprobe *p, 1114 static int __kprobes pre_handler_kretprobe(struct kprobe *p,
1114 struct pt_regs *regs) 1115 struct pt_regs *regs)
1115 { 1116 {
1116 return 0; 1117 return 0;
1117 } 1118 }
1118 1119
1119 #endif /* CONFIG_KRETPROBES */ 1120 #endif /* CONFIG_KRETPROBES */
1120 1121
1121 /* Set the kprobe gone and remove its instruction buffer. */ 1122 /* Set the kprobe gone and remove its instruction buffer. */
1122 static void __kprobes kill_kprobe(struct kprobe *p) 1123 static void __kprobes kill_kprobe(struct kprobe *p)
1123 { 1124 {
1124 struct kprobe *kp; 1125 struct kprobe *kp;
1125 1126
1126 p->flags |= KPROBE_FLAG_GONE; 1127 p->flags |= KPROBE_FLAG_GONE;
1127 if (p->pre_handler == aggr_pre_handler) { 1128 if (p->pre_handler == aggr_pre_handler) {
1128 /* 1129 /*
1129 * If this is an aggr_kprobe, we have to list all the 1130 * If this is an aggr_kprobe, we have to list all the
1130 * chained probes and mark them GONE. 1131 * chained probes and mark them GONE.
1131 */ 1132 */
1132 list_for_each_entry_rcu(kp, &p->list, list) 1133 list_for_each_entry_rcu(kp, &p->list, list)
1133 kp->flags |= KPROBE_FLAG_GONE; 1134 kp->flags |= KPROBE_FLAG_GONE;
1134 p->post_handler = NULL; 1135 p->post_handler = NULL;
1135 p->break_handler = NULL; 1136 p->break_handler = NULL;
1136 } 1137 }
1137 /* 1138 /*
1138 * Here, we can remove insn_slot safely, because no thread calls 1139 * Here, we can remove insn_slot safely, because no thread calls
1139 * the original probed function (which will be freed soon) any more. 1140 * the original probed function (which will be freed soon) any more.
1140 */ 1141 */
1141 arch_remove_kprobe(p); 1142 arch_remove_kprobe(p);
1142 } 1143 }
1143 1144
1144 void __kprobes dump_kprobe(struct kprobe *kp) 1145 void __kprobes dump_kprobe(struct kprobe *kp)
1145 { 1146 {
1146 printk(KERN_WARNING "Dumping kprobe:\n"); 1147 printk(KERN_WARNING "Dumping kprobe:\n");
1147 printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n", 1148 printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
1148 kp->symbol_name, kp->addr, kp->offset); 1149 kp->symbol_name, kp->addr, kp->offset);
1149 } 1150 }
1150 1151
1151 /* Module notifier call back, checking kprobes on the module */ 1152 /* Module notifier call back, checking kprobes on the module */
1152 static int __kprobes kprobes_module_callback(struct notifier_block *nb, 1153 static int __kprobes kprobes_module_callback(struct notifier_block *nb,
1153 unsigned long val, void *data) 1154 unsigned long val, void *data)
1154 { 1155 {
1155 struct module *mod = data; 1156 struct module *mod = data;
1156 struct hlist_head *head; 1157 struct hlist_head *head;
1157 struct hlist_node *node; 1158 struct hlist_node *node;
1158 struct kprobe *p; 1159 struct kprobe *p;
1159 unsigned int i; 1160 unsigned int i;
1160 int checkcore = (val == MODULE_STATE_GOING); 1161 int checkcore = (val == MODULE_STATE_GOING);
1161 1162
1162 if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE) 1163 if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
1163 return NOTIFY_DONE; 1164 return NOTIFY_DONE;
1164 1165
1165 /* 1166 /*
1166 * When MODULE_STATE_GOING was notified, both of module .text and 1167 * When MODULE_STATE_GOING was notified, both of module .text and
1167 * .init.text sections would be freed. When MODULE_STATE_LIVE was 1168 * .init.text sections would be freed. When MODULE_STATE_LIVE was
1168 * notified, only .init.text section would be freed. We need to 1169 * notified, only .init.text section would be freed. We need to
1169 * disable kprobes which have been inserted in the sections. 1170 * disable kprobes which have been inserted in the sections.
1170 */ 1171 */
1171 mutex_lock(&kprobe_mutex); 1172 mutex_lock(&kprobe_mutex);
1172 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 1173 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1173 head = &kprobe_table[i]; 1174 head = &kprobe_table[i];
1174 hlist_for_each_entry_rcu(p, node, head, hlist) 1175 hlist_for_each_entry_rcu(p, node, head, hlist)
1175 if (within_module_init((unsigned long)p->addr, mod) || 1176 if (within_module_init((unsigned long)p->addr, mod) ||
1176 (checkcore && 1177 (checkcore &&
1177 within_module_core((unsigned long)p->addr, mod))) { 1178 within_module_core((unsigned long)p->addr, mod))) {
1178 /* 1179 /*
1179 * The vaddr this probe is installed will soon 1180 * The vaddr this probe is installed will soon
1180 * be vfreed buy not synced to disk. Hence, 1181 * be vfreed buy not synced to disk. Hence,
1181 * disarming the breakpoint isn't needed. 1182 * disarming the breakpoint isn't needed.
1182 */ 1183 */
1183 kill_kprobe(p); 1184 kill_kprobe(p);
1184 } 1185 }
1185 } 1186 }
1186 mutex_unlock(&kprobe_mutex); 1187 mutex_unlock(&kprobe_mutex);
1187 return NOTIFY_DONE; 1188 return NOTIFY_DONE;
1188 } 1189 }
1189 1190
1190 static struct notifier_block kprobe_module_nb = { 1191 static struct notifier_block kprobe_module_nb = {
1191 .notifier_call = kprobes_module_callback, 1192 .notifier_call = kprobes_module_callback,
1192 .priority = 0 1193 .priority = 0
1193 }; 1194 };
1194 1195
1195 static int __init init_kprobes(void) 1196 static int __init init_kprobes(void)
1196 { 1197 {
1197 int i, err = 0; 1198 int i, err = 0;
1198 unsigned long offset = 0, size = 0; 1199 unsigned long offset = 0, size = 0;
1199 char *modname, namebuf[128]; 1200 char *modname, namebuf[128];
1200 const char *symbol_name; 1201 const char *symbol_name;
1201 void *addr; 1202 void *addr;
1202 struct kprobe_blackpoint *kb; 1203 struct kprobe_blackpoint *kb;
1203 1204
1204 /* FIXME allocate the probe table, currently defined statically */ 1205 /* FIXME allocate the probe table, currently defined statically */
1205 /* initialize all list heads */ 1206 /* initialize all list heads */
1206 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 1207 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1207 INIT_HLIST_HEAD(&kprobe_table[i]); 1208 INIT_HLIST_HEAD(&kprobe_table[i]);
1208 INIT_HLIST_HEAD(&kretprobe_inst_table[i]); 1209 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
1209 spin_lock_init(&(kretprobe_table_locks[i].lock)); 1210 spin_lock_init(&(kretprobe_table_locks[i].lock));
1210 } 1211 }
1211 1212
1212 /* 1213 /*
1213 * Lookup and populate the kprobe_blacklist. 1214 * Lookup and populate the kprobe_blacklist.
1214 * 1215 *
1215 * Unlike the kretprobe blacklist, we'll need to determine 1216 * Unlike the kretprobe blacklist, we'll need to determine
1216 * the range of addresses that belong to the said functions, 1217 * the range of addresses that belong to the said functions,
1217 * since a kprobe need not necessarily be at the beginning 1218 * since a kprobe need not necessarily be at the beginning
1218 * of a function. 1219 * of a function.
1219 */ 1220 */
1220 for (kb = kprobe_blacklist; kb->name != NULL; kb++) { 1221 for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
1221 kprobe_lookup_name(kb->name, addr); 1222 kprobe_lookup_name(kb->name, addr);
1222 if (!addr) 1223 if (!addr)
1223 continue; 1224 continue;
1224 1225
1225 kb->start_addr = (unsigned long)addr; 1226 kb->start_addr = (unsigned long)addr;
1226 symbol_name = kallsyms_lookup(kb->start_addr, 1227 symbol_name = kallsyms_lookup(kb->start_addr,
1227 &size, &offset, &modname, namebuf); 1228 &size, &offset, &modname, namebuf);
1228 if (!symbol_name) 1229 if (!symbol_name)
1229 kb->range = 0; 1230 kb->range = 0;
1230 else 1231 else
1231 kb->range = size; 1232 kb->range = size;
1232 } 1233 }
1233 1234
1234 if (kretprobe_blacklist_size) { 1235 if (kretprobe_blacklist_size) {
1235 /* lookup the function address from its name */ 1236 /* lookup the function address from its name */
1236 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { 1237 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1237 kprobe_lookup_name(kretprobe_blacklist[i].name, 1238 kprobe_lookup_name(kretprobe_blacklist[i].name,
1238 kretprobe_blacklist[i].addr); 1239 kretprobe_blacklist[i].addr);
1239 if (!kretprobe_blacklist[i].addr) 1240 if (!kretprobe_blacklist[i].addr)
1240 printk("kretprobe: lookup failed: %s\n", 1241 printk("kretprobe: lookup failed: %s\n",
1241 kretprobe_blacklist[i].name); 1242 kretprobe_blacklist[i].name);
1242 } 1243 }
1243 } 1244 }
1244 1245
1245 /* By default, kprobes are armed */ 1246 /* By default, kprobes are armed */
1246 kprobes_all_disarmed = false; 1247 kprobes_all_disarmed = false;
1247 1248
1248 err = arch_init_kprobes(); 1249 err = arch_init_kprobes();
1249 if (!err) 1250 if (!err)
1250 err = register_die_notifier(&kprobe_exceptions_nb); 1251 err = register_die_notifier(&kprobe_exceptions_nb);
1251 if (!err) 1252 if (!err)
1252 err = register_module_notifier(&kprobe_module_nb); 1253 err = register_module_notifier(&kprobe_module_nb);
1253 1254
1254 kprobes_initialized = (err == 0); 1255 kprobes_initialized = (err == 0);
1255 1256
1256 if (!err) 1257 if (!err)
1257 init_test_probes(); 1258 init_test_probes();
1258 return err; 1259 return err;
1259 } 1260 }
1260 1261
1261 #ifdef CONFIG_DEBUG_FS 1262 #ifdef CONFIG_DEBUG_FS
1262 static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p, 1263 static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
1263 const char *sym, int offset,char *modname) 1264 const char *sym, int offset,char *modname)
1264 { 1265 {
1265 char *kprobe_type; 1266 char *kprobe_type;
1266 1267
1267 if (p->pre_handler == pre_handler_kretprobe) 1268 if (p->pre_handler == pre_handler_kretprobe)
1268 kprobe_type = "r"; 1269 kprobe_type = "r";
1269 else if (p->pre_handler == setjmp_pre_handler) 1270 else if (p->pre_handler == setjmp_pre_handler)
1270 kprobe_type = "j"; 1271 kprobe_type = "j";
1271 else 1272 else
1272 kprobe_type = "k"; 1273 kprobe_type = "k";
1273 if (sym) 1274 if (sym)
1274 seq_printf(pi, "%p %s %s+0x%x %s %s%s\n", 1275 seq_printf(pi, "%p %s %s+0x%x %s %s%s\n",
1275 p->addr, kprobe_type, sym, offset, 1276 p->addr, kprobe_type, sym, offset,
1276 (modname ? modname : " "), 1277 (modname ? modname : " "),
1277 (kprobe_gone(p) ? "[GONE]" : ""), 1278 (kprobe_gone(p) ? "[GONE]" : ""),
1278 ((kprobe_disabled(p) && !kprobe_gone(p)) ? 1279 ((kprobe_disabled(p) && !kprobe_gone(p)) ?
1279 "[DISABLED]" : "")); 1280 "[DISABLED]" : ""));
1280 else 1281 else
1281 seq_printf(pi, "%p %s %p %s%s\n", 1282 seq_printf(pi, "%p %s %p %s%s\n",
1282 p->addr, kprobe_type, p->addr, 1283 p->addr, kprobe_type, p->addr,
1283 (kprobe_gone(p) ? "[GONE]" : ""), 1284 (kprobe_gone(p) ? "[GONE]" : ""),
1284 ((kprobe_disabled(p) && !kprobe_gone(p)) ? 1285 ((kprobe_disabled(p) && !kprobe_gone(p)) ?
1285 "[DISABLED]" : "")); 1286 "[DISABLED]" : ""));
1286 } 1287 }
1287 1288
1288 static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos) 1289 static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
1289 { 1290 {
1290 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL; 1291 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
1291 } 1292 }
1292 1293
1293 static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos) 1294 static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
1294 { 1295 {
1295 (*pos)++; 1296 (*pos)++;
1296 if (*pos >= KPROBE_TABLE_SIZE) 1297 if (*pos >= KPROBE_TABLE_SIZE)
1297 return NULL; 1298 return NULL;
1298 return pos; 1299 return pos;
1299 } 1300 }
1300 1301
1301 static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v) 1302 static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
1302 { 1303 {
1303 /* Nothing to do */ 1304 /* Nothing to do */
1304 } 1305 }
1305 1306
1306 static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v) 1307 static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
1307 { 1308 {
1308 struct hlist_head *head; 1309 struct hlist_head *head;
1309 struct hlist_node *node; 1310 struct hlist_node *node;
1310 struct kprobe *p, *kp; 1311 struct kprobe *p, *kp;
1311 const char *sym = NULL; 1312 const char *sym = NULL;
1312 unsigned int i = *(loff_t *) v; 1313 unsigned int i = *(loff_t *) v;
1313 unsigned long offset = 0; 1314 unsigned long offset = 0;
1314 char *modname, namebuf[128]; 1315 char *modname, namebuf[128];
1315 1316
1316 head = &kprobe_table[i]; 1317 head = &kprobe_table[i];
1317 preempt_disable(); 1318 preempt_disable();
1318 hlist_for_each_entry_rcu(p, node, head, hlist) { 1319 hlist_for_each_entry_rcu(p, node, head, hlist) {
1319 sym = kallsyms_lookup((unsigned long)p->addr, NULL, 1320 sym = kallsyms_lookup((unsigned long)p->addr, NULL,
1320 &offset, &modname, namebuf); 1321 &offset, &modname, namebuf);
1321 if (p->pre_handler == aggr_pre_handler) { 1322 if (p->pre_handler == aggr_pre_handler) {
1322 list_for_each_entry_rcu(kp, &p->list, list) 1323 list_for_each_entry_rcu(kp, &p->list, list)
1323 report_probe(pi, kp, sym, offset, modname); 1324 report_probe(pi, kp, sym, offset, modname);
1324 } else 1325 } else
1325 report_probe(pi, p, sym, offset, modname); 1326 report_probe(pi, p, sym, offset, modname);
1326 } 1327 }
1327 preempt_enable(); 1328 preempt_enable();
1328 return 0; 1329 return 0;
1329 } 1330 }
1330 1331
1331 static struct seq_operations kprobes_seq_ops = { 1332 static struct seq_operations kprobes_seq_ops = {
1332 .start = kprobe_seq_start, 1333 .start = kprobe_seq_start,
1333 .next = kprobe_seq_next, 1334 .next = kprobe_seq_next,
1334 .stop = kprobe_seq_stop, 1335 .stop = kprobe_seq_stop,
1335 .show = show_kprobe_addr 1336 .show = show_kprobe_addr
1336 }; 1337 };
1337 1338
1338 static int __kprobes kprobes_open(struct inode *inode, struct file *filp) 1339 static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
1339 { 1340 {
1340 return seq_open(filp, &kprobes_seq_ops); 1341 return seq_open(filp, &kprobes_seq_ops);
1341 } 1342 }
1342 1343
1343 static struct file_operations debugfs_kprobes_operations = { 1344 static struct file_operations debugfs_kprobes_operations = {
1344 .open = kprobes_open, 1345 .open = kprobes_open,
1345 .read = seq_read, 1346 .read = seq_read,
1346 .llseek = seq_lseek, 1347 .llseek = seq_lseek,
1347 .release = seq_release, 1348 .release = seq_release,
1348 }; 1349 };
1349 1350
1350 /* Disable one kprobe */ 1351 /* Disable one kprobe */
1351 int __kprobes disable_kprobe(struct kprobe *kp) 1352 int __kprobes disable_kprobe(struct kprobe *kp)
1352 { 1353 {
1353 int ret = 0; 1354 int ret = 0;
1354 struct kprobe *p; 1355 struct kprobe *p;
1355 1356
1356 mutex_lock(&kprobe_mutex); 1357 mutex_lock(&kprobe_mutex);
1357 1358
1358 /* Check whether specified probe is valid. */ 1359 /* Check whether specified probe is valid. */
1359 p = __get_valid_kprobe(kp); 1360 p = __get_valid_kprobe(kp);
1360 if (unlikely(p == NULL)) { 1361 if (unlikely(p == NULL)) {
1361 ret = -EINVAL; 1362 ret = -EINVAL;
1362 goto out; 1363 goto out;
1363 } 1364 }
1364 1365
1365 /* If the probe is already disabled (or gone), just return */ 1366 /* If the probe is already disabled (or gone), just return */
1366 if (kprobe_disabled(kp)) 1367 if (kprobe_disabled(kp))
1367 goto out; 1368 goto out;
1368 1369
1369 kp->flags |= KPROBE_FLAG_DISABLED; 1370 kp->flags |= KPROBE_FLAG_DISABLED;
1370 if (p != kp) 1371 if (p != kp)
1371 /* When kp != p, p is always enabled. */ 1372 /* When kp != p, p is always enabled. */
1372 try_to_disable_aggr_kprobe(p); 1373 try_to_disable_aggr_kprobe(p);
1373 1374
1374 if (!kprobes_all_disarmed && kprobe_disabled(p)) 1375 if (!kprobes_all_disarmed && kprobe_disabled(p))
1375 disarm_kprobe(p); 1376 disarm_kprobe(p);
1376 out: 1377 out:
1377 mutex_unlock(&kprobe_mutex); 1378 mutex_unlock(&kprobe_mutex);
1378 return ret; 1379 return ret;
1379 } 1380 }
1380 EXPORT_SYMBOL_GPL(disable_kprobe); 1381 EXPORT_SYMBOL_GPL(disable_kprobe);
1381 1382
1382 /* Enable one kprobe */ 1383 /* Enable one kprobe */
1383 int __kprobes enable_kprobe(struct kprobe *kp) 1384 int __kprobes enable_kprobe(struct kprobe *kp)
1384 { 1385 {
1385 int ret = 0; 1386 int ret = 0;
1386 struct kprobe *p; 1387 struct kprobe *p;
1387 1388
1388 mutex_lock(&kprobe_mutex); 1389 mutex_lock(&kprobe_mutex);
1389 1390
1390 /* Check whether specified probe is valid. */ 1391 /* Check whether specified probe is valid. */
1391 p = __get_valid_kprobe(kp); 1392 p = __get_valid_kprobe(kp);
1392 if (unlikely(p == NULL)) { 1393 if (unlikely(p == NULL)) {
1393 ret = -EINVAL; 1394 ret = -EINVAL;
1394 goto out; 1395 goto out;
1395 } 1396 }
1396 1397
1397 if (kprobe_gone(kp)) { 1398 if (kprobe_gone(kp)) {
1398 /* This kprobe has gone, we couldn't enable it. */ 1399 /* This kprobe has gone, we couldn't enable it. */
1399 ret = -EINVAL; 1400 ret = -EINVAL;
1400 goto out; 1401 goto out;
1401 } 1402 }
1402 1403
1403 if (!kprobes_all_disarmed && kprobe_disabled(p)) 1404 if (!kprobes_all_disarmed && kprobe_disabled(p))
1404 arm_kprobe(p); 1405 arm_kprobe(p);
1405 1406
1406 p->flags &= ~KPROBE_FLAG_DISABLED; 1407 p->flags &= ~KPROBE_FLAG_DISABLED;
1407 if (p != kp) 1408 if (p != kp)
1408 kp->flags &= ~KPROBE_FLAG_DISABLED; 1409 kp->flags &= ~KPROBE_FLAG_DISABLED;
1409 out: 1410 out:
1410 mutex_unlock(&kprobe_mutex); 1411 mutex_unlock(&kprobe_mutex);
1411 return ret; 1412 return ret;
1412 } 1413 }
1413 EXPORT_SYMBOL_GPL(enable_kprobe); 1414 EXPORT_SYMBOL_GPL(enable_kprobe);
1414 1415
1415 static void __kprobes arm_all_kprobes(void) 1416 static void __kprobes arm_all_kprobes(void)
1416 { 1417 {
1417 struct hlist_head *head; 1418 struct hlist_head *head;
1418 struct hlist_node *node; 1419 struct hlist_node *node;
1419 struct kprobe *p; 1420 struct kprobe *p;
1420 unsigned int i; 1421 unsigned int i;
1421 1422
1422 mutex_lock(&kprobe_mutex); 1423 mutex_lock(&kprobe_mutex);
1423 1424
1424 /* If kprobes are armed, just return */ 1425 /* If kprobes are armed, just return */
1425 if (!kprobes_all_disarmed) 1426 if (!kprobes_all_disarmed)
1426 goto already_enabled; 1427 goto already_enabled;
1427 1428
1428 mutex_lock(&text_mutex); 1429 mutex_lock(&text_mutex);
1429 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 1430 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1430 head = &kprobe_table[i]; 1431 head = &kprobe_table[i];
1431 hlist_for_each_entry_rcu(p, node, head, hlist) 1432 hlist_for_each_entry_rcu(p, node, head, hlist)
1432 if (!kprobe_disabled(p)) 1433 if (!kprobe_disabled(p))
1433 arch_arm_kprobe(p); 1434 arch_arm_kprobe(p);
1434 } 1435 }
1435 mutex_unlock(&text_mutex); 1436 mutex_unlock(&text_mutex);
1436 1437
1437 kprobes_all_disarmed = false; 1438 kprobes_all_disarmed = false;
1438 printk(KERN_INFO "Kprobes globally enabled\n"); 1439 printk(KERN_INFO "Kprobes globally enabled\n");
1439 1440
1440 already_enabled: 1441 already_enabled:
1441 mutex_unlock(&kprobe_mutex); 1442 mutex_unlock(&kprobe_mutex);
1442 return; 1443 return;
1443 } 1444 }
1444 1445
1445 static void __kprobes disarm_all_kprobes(void) 1446 static void __kprobes disarm_all_kprobes(void)
1446 { 1447 {
1447 struct hlist_head *head; 1448 struct hlist_head *head;
1448 struct hlist_node *node; 1449 struct hlist_node *node;
1449 struct kprobe *p; 1450 struct kprobe *p;
1450 unsigned int i; 1451 unsigned int i;
1451 1452
1452 mutex_lock(&kprobe_mutex); 1453 mutex_lock(&kprobe_mutex);
1453 1454
1454 /* If kprobes are already disarmed, just return */ 1455 /* If kprobes are already disarmed, just return */
1455 if (kprobes_all_disarmed) 1456 if (kprobes_all_disarmed)
1456 goto already_disabled; 1457 goto already_disabled;
1457 1458
1458 kprobes_all_disarmed = true; 1459 kprobes_all_disarmed = true;
1459 printk(KERN_INFO "Kprobes globally disabled\n"); 1460 printk(KERN_INFO "Kprobes globally disabled\n");
1460 mutex_lock(&text_mutex); 1461 mutex_lock(&text_mutex);
1461 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 1462 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1462 head = &kprobe_table[i]; 1463 head = &kprobe_table[i];
1463 hlist_for_each_entry_rcu(p, node, head, hlist) { 1464 hlist_for_each_entry_rcu(p, node, head, hlist) {
1464 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) 1465 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
1465 arch_disarm_kprobe(p); 1466 arch_disarm_kprobe(p);
1466 } 1467 }
1467 } 1468 }
1468 1469
1469 mutex_unlock(&text_mutex); 1470 mutex_unlock(&text_mutex);
1470 mutex_unlock(&kprobe_mutex); 1471 mutex_unlock(&kprobe_mutex);
1471 /* Allow all currently running kprobes to complete */ 1472 /* Allow all currently running kprobes to complete */
1472 synchronize_sched(); 1473 synchronize_sched();
1473 return; 1474 return;
1474 1475
1475 already_disabled: 1476 already_disabled:
1476 mutex_unlock(&kprobe_mutex); 1477 mutex_unlock(&kprobe_mutex);
1477 return; 1478 return;
1478 } 1479 }
1479 1480
1480 /* 1481 /*
1481 * XXX: The debugfs bool file interface doesn't allow for callbacks 1482 * XXX: The debugfs bool file interface doesn't allow for callbacks
1482 * when the bool state is switched. We can reuse that facility when 1483 * when the bool state is switched. We can reuse that facility when
1483 * available 1484 * available
1484 */ 1485 */
1485 static ssize_t read_enabled_file_bool(struct file *file, 1486 static ssize_t read_enabled_file_bool(struct file *file,
1486 char __user *user_buf, size_t count, loff_t *ppos) 1487 char __user *user_buf, size_t count, loff_t *ppos)
1487 { 1488 {
1488 char buf[3]; 1489 char buf[3];
1489 1490
1490 if (!kprobes_all_disarmed) 1491 if (!kprobes_all_disarmed)
1491 buf[0] = '1'; 1492 buf[0] = '1';
1492 else 1493 else
1493 buf[0] = '0'; 1494 buf[0] = '0';
1494 buf[1] = '\n'; 1495 buf[1] = '\n';
1495 buf[2] = 0x00; 1496 buf[2] = 0x00;
1496 return simple_read_from_buffer(user_buf, count, ppos, buf, 2); 1497 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
1497 } 1498 }
1498 1499
1499 static ssize_t write_enabled_file_bool(struct file *file, 1500 static ssize_t write_enabled_file_bool(struct file *file,
1500 const char __user *user_buf, size_t count, loff_t *ppos) 1501 const char __user *user_buf, size_t count, loff_t *ppos)
1501 { 1502 {
1502 char buf[32]; 1503 char buf[32];
1503 int buf_size; 1504 int buf_size;
1504 1505
1505 buf_size = min(count, (sizeof(buf)-1)); 1506 buf_size = min(count, (sizeof(buf)-1));
1506 if (copy_from_user(buf, user_buf, buf_size)) 1507 if (copy_from_user(buf, user_buf, buf_size))
1507 return -EFAULT; 1508 return -EFAULT;
1508 1509
1509 switch (buf[0]) { 1510 switch (buf[0]) {
1510 case 'y': 1511 case 'y':
1511 case 'Y': 1512 case 'Y':
1512 case '1': 1513 case '1':
1513 arm_all_kprobes(); 1514 arm_all_kprobes();
1514 break; 1515 break;
1515 case 'n': 1516 case 'n':
1516 case 'N': 1517 case 'N':
1517 case '0': 1518 case '0':
1518 disarm_all_kprobes(); 1519 disarm_all_kprobes();
1519 break; 1520 break;
1520 } 1521 }
1521 1522
1522 return count; 1523 return count;
1523 } 1524 }
1524 1525
1525 static struct file_operations fops_kp = { 1526 static struct file_operations fops_kp = {
1526 .read = read_enabled_file_bool, 1527 .read = read_enabled_file_bool,
1527 .write = write_enabled_file_bool, 1528 .write = write_enabled_file_bool,
1528 }; 1529 };
1529 1530
1530 static int __kprobes debugfs_kprobe_init(void) 1531 static int __kprobes debugfs_kprobe_init(void)
1531 { 1532 {
1532 struct dentry *dir, *file; 1533 struct dentry *dir, *file;
1533 unsigned int value = 1; 1534 unsigned int value = 1;
1534 1535
1535 dir = debugfs_create_dir("kprobes", NULL); 1536 dir = debugfs_create_dir("kprobes", NULL);
1536 if (!dir) 1537 if (!dir)
1537 return -ENOMEM; 1538 return -ENOMEM;
1538 1539
1539 file = debugfs_create_file("list", 0444, dir, NULL, 1540 file = debugfs_create_file("list", 0444, dir, NULL,
1540 &debugfs_kprobes_operations); 1541 &debugfs_kprobes_operations);
1541 if (!file) { 1542 if (!file) {
1542 debugfs_remove(dir); 1543 debugfs_remove(dir);
1543 return -ENOMEM; 1544 return -ENOMEM;
1544 } 1545 }
1545 1546
1546 file = debugfs_create_file("enabled", 0600, dir, 1547 file = debugfs_create_file("enabled", 0600, dir,
1547 &value, &fops_kp); 1548 &value, &fops_kp);
1548 if (!file) { 1549 if (!file) {
1549 debugfs_remove(dir); 1550 debugfs_remove(dir);
1550 return -ENOMEM; 1551 return -ENOMEM;
1551 } 1552 }
1552 1553
1553 return 0; 1554 return 0;
1554 } 1555 }
1555 1556
1556 late_initcall(debugfs_kprobe_init); 1557 late_initcall(debugfs_kprobe_init);
1557 #endif /* CONFIG_DEBUG_FS */ 1558 #endif /* CONFIG_DEBUG_FS */
1558 1559
1559 module_init(init_kprobes); 1560 module_init(init_kprobes);
1560 1561
1561 /* defined in arch/.../kernel/kprobes.c */ 1562 /* defined in arch/.../kernel/kprobes.c */
1562 EXPORT_SYMBOL_GPL(jprobe_return); 1563 EXPORT_SYMBOL_GPL(jprobe_return);
1563 1564