Blame view

kernel/kprobes.c 50 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
  /*
   *  Kernel Probes (KProbes)
   *  kernel/kprobes.c
   *
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License as published by
   * the Free Software Foundation; either version 2 of the License, or
   * (at your option) any later version.
   *
   * This program is distributed in the hope that it will be useful,
   * but WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   * GNU General Public License for more details.
   *
   * You should have received a copy of the GNU General Public License
   * along with this program; if not, write to the Free Software
   * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
   *
   * Copyright (C) IBM Corporation, 2002, 2004
   *
   * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
   *		Probes initial implementation (includes suggestions from
   *		Rusty Russell).
   * 2004-Aug	Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
   *		hlists and exceptions notifier as suggested by Andi Kleen.
   * 2004-July	Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
   *		interface to access function arguments.
   * 2004-Sep	Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
   *		exceptions notifier to be first on the priority list.
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
30
31
32
   * 2005-May	Hien Nguyen <hien@us.ibm.com>, Jim Keniston
   *		<jkenisto@us.ibm.com> and Prasanna S Panchamukhi
   *		<prasanna@in.ibm.com> added function-return probes.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
33
34
   */
  #include <linux/kprobes.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
35
36
  #include <linux/hash.h>
  #include <linux/init.h>
4e57b6817   Tim Schmielau   [PATCH] fix missi...
37
  #include <linux/slab.h>
e38697929   Randy Dunlap   kprobes: fix spar...
38
  #include <linux/stddef.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
39
  #include <linux/module.h>
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
40
  #include <linux/moduleloader.h>
3a872d89b   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
41
  #include <linux/kallsyms.h>
b4c6c34a5   Masami Hiramatsu   [PATCH] kprobes: ...
42
  #include <linux/freezer.h>
346fd59ba   Srinivasa Ds   [PATCH] kprobes: ...
43
44
  #include <linux/seq_file.h>
  #include <linux/debugfs.h>
b2be84df9   Masami Hiramatsu   kprobes: Jump opt...
45
  #include <linux/sysctl.h>
1eeb66a1b   Christoph Hellwig   move die notifier...
46
  #include <linux/kdebug.h>
4460fdad8   Mathieu Desnoyers   tracing, Text Edi...
47
  #include <linux/memory.h>
4554dbcb8   Masami Hiramatsu   kprobes: Check pr...
48
  #include <linux/ftrace.h>
afd66255b   Masami Hiramatsu   kprobes: Introduc...
49
  #include <linux/cpu.h>
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
50

d0aaff979   Prasanna S Panchamukhi   [PATCH] Kprobes: ...
51
  #include <asm-generic/sections.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
52
53
  #include <asm/cacheflush.h>
  #include <asm/errno.h>
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
54
  #include <asm/uaccess.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
55
56
57
  
  #define KPROBE_HASH_BITS 6
  #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
3a872d89b   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
58
59
60
61
62
63
64
65
66
  
  /*
   * Some oddball architectures like 64bit powerpc have function descriptors
   * so this must be overridable.
   */
  #ifndef kprobe_lookup_name
  #define kprobe_lookup_name(name, addr) \
  	addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
  #endif
ef53d9c5e   Srinivasa D S   kprobes: improve ...
67
  static int kprobes_initialized;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
68
  static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
69
  static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
70

bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
71
  /* NOTE: change this value only with kprobe_mutex held */
e579abeb5   Masami Hiramatsu   kprobes: rename k...
72
  static bool kprobes_all_disarmed;
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
73

129415607   Masami Hiramatsu   kprobes: add kpro...
74
  static DEFINE_MUTEX(kprobe_mutex);	/* Protects kprobe_table */
e65845235   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
75
  static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
ef53d9c5e   Srinivasa D S   kprobes: improve ...
76
  static struct {
7e036d040   Andrew Morton   kernel/kprobes.c:...
77
  	spinlock_t lock ____cacheline_aligned_in_smp;
ef53d9c5e   Srinivasa D S   kprobes: improve ...
78
79
80
81
82
83
  } kretprobe_table_locks[KPROBE_TABLE_SIZE];
  
  static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
  {
  	return &(kretprobe_table_locks[hash].lock);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
84

3d8d996e0   Srinivasa Ds   kprobes: prevent ...
85
86
87
88
89
90
91
  /*
   * Normally, functions that we'd want to prohibit kprobes in, are marked
   * __kprobes. But, there are cases where such functions already belong to
   * a different section (__sched for preempt_schedule)
   *
   * For such cases, we now have a blacklist
   */
544304b20   Daniel Guilak   kernel/kprobes.c:...
92
  static struct kprobe_blackpoint kprobe_blacklist[] = {
3d8d996e0   Srinivasa Ds   kprobes: prevent ...
93
  	{"preempt_schedule",},
65e234ec2   Masami Hiramatsu   kprobes: Prohibit...
94
  	{"native_get_debugreg",},
a00e817f4   Masami Hiramatsu   kprobes/x86-32: M...
95
96
  	{"irq_entries_start",},
  	{"common_interrupt",},
5ecaafdbf   Masami Hiramatsu   kprobes: Add mcou...
97
  	{"mcount",},	/* mcount can be called from everywhere */
3d8d996e0   Srinivasa Ds   kprobes: prevent ...
98
99
  	{NULL}    /* Terminator */
  };
2d14e39da   Anil S Keshavamurthy   [PATCH] kprobes: ...
100
  #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
101
102
103
104
105
106
  /*
   * kprobe->ainsn.insn points to the copy of the instruction to be
   * single-stepped. x86_64, POWER4 and above have no-exec support and
   * stepping on the instruction on a vmalloced/kmalloced/data page
   * is a recipe for disaster
   */
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
107
  struct kprobe_insn_page {
c5cb5a2d8   Masami Hiramatsu   kprobes: Clean up...
108
  	struct list_head list;
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
109
  	kprobe_opcode_t *insns;		/* Page of instruction slots */
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
110
  	int nused;
b4c6c34a5   Masami Hiramatsu   [PATCH] kprobes: ...
111
  	int ngarbage;
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
112
  	char slot_used[];
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
113
  };
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
114
115
116
117
118
119
120
121
122
123
124
125
126
127
  #define KPROBE_INSN_PAGE_SIZE(slots)			\
  	(offsetof(struct kprobe_insn_page, slot_used) +	\
  	 (sizeof(char) * (slots)))
  
  struct kprobe_insn_cache {
  	struct list_head pages;	/* list of kprobe_insn_page */
  	size_t insn_size;	/* size of instruction slot */
  	int nr_garbage;
  };
  
  static int slots_per_page(struct kprobe_insn_cache *c)
  {
  	return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
  }
ab40c5c6b   Masami Hiramatsu   [PATCH] kprobes: ...
128
129
130
131
132
  enum kprobe_slot_state {
  	SLOT_CLEAN = 0,
  	SLOT_DIRTY = 1,
  	SLOT_USED = 2,
  };
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
133
134
135
136
137
138
139
  static DEFINE_MUTEX(kprobe_insn_mutex);	/* Protects kprobe_insn_slots */
  static struct kprobe_insn_cache kprobe_insn_slots = {
  	.pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
  	.insn_size = MAX_INSN_SIZE,
  	.nr_garbage = 0,
  };
  static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c);
b4c6c34a5   Masami Hiramatsu   [PATCH] kprobes: ...
140

9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
141
  /**
129415607   Masami Hiramatsu   kprobes: add kpro...
142
   * __get_insn_slot() - Find a slot on an executable page for an instruction.
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
143
144
   * We allocate an executable page if there's no room on existing ones.
   */
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
145
  static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
146
147
  {
  	struct kprobe_insn_page *kip;
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
148

6f716acd5   Christoph Hellwig   kprobes: codingst...
149
   retry:
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
150
151
  	list_for_each_entry(kip, &c->pages, list) {
  		if (kip->nused < slots_per_page(c)) {
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
152
  			int i;
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
153
  			for (i = 0; i < slots_per_page(c); i++) {
ab40c5c6b   Masami Hiramatsu   [PATCH] kprobes: ...
154
155
  				if (kip->slot_used[i] == SLOT_CLEAN) {
  					kip->slot_used[i] = SLOT_USED;
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
156
  					kip->nused++;
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
157
  					return kip->insns + (i * c->insn_size);
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
158
159
  				}
  			}
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
160
161
162
  			/* kip->nused is broken. Fix it. */
  			kip->nused = slots_per_page(c);
  			WARN_ON(1);
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
163
164
  		}
  	}
b4c6c34a5   Masami Hiramatsu   [PATCH] kprobes: ...
165
  	/* If there are any garbage slots, collect it and try again. */
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
166
  	if (c->nr_garbage && collect_garbage_slots(c) == 0)
b4c6c34a5   Masami Hiramatsu   [PATCH] kprobes: ...
167
  		goto retry;
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
168
169
170
  
  	/* All out of space.  Need to allocate a new page. */
  	kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
6f716acd5   Christoph Hellwig   kprobes: codingst...
171
  	if (!kip)
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
172
  		return NULL;
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
173
174
175
176
177
178
179
180
181
182
183
  
  	/*
  	 * Use module_alloc so this page is within +/- 2GB of where the
  	 * kernel image and loaded module images reside. This is required
  	 * so x86_64 can correctly handle the %rip-relative fixups.
  	 */
  	kip->insns = module_alloc(PAGE_SIZE);
  	if (!kip->insns) {
  		kfree(kip);
  		return NULL;
  	}
c5cb5a2d8   Masami Hiramatsu   kprobes: Clean up...
184
  	INIT_LIST_HEAD(&kip->list);
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
185
  	memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
ab40c5c6b   Masami Hiramatsu   [PATCH] kprobes: ...
186
  	kip->slot_used[0] = SLOT_USED;
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
187
  	kip->nused = 1;
b4c6c34a5   Masami Hiramatsu   [PATCH] kprobes: ...
188
  	kip->ngarbage = 0;
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
189
  	list_add(&kip->list, &c->pages);
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
190
191
  	return kip->insns;
  }
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
192

129415607   Masami Hiramatsu   kprobes: add kpro...
193
194
  kprobe_opcode_t __kprobes *get_insn_slot(void)
  {
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
195
  	kprobe_opcode_t *ret = NULL;
129415607   Masami Hiramatsu   kprobes: add kpro...
196
  	mutex_lock(&kprobe_insn_mutex);
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
197
  	ret = __get_insn_slot(&kprobe_insn_slots);
129415607   Masami Hiramatsu   kprobes: add kpro...
198
  	mutex_unlock(&kprobe_insn_mutex);
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
199

129415607   Masami Hiramatsu   kprobes: add kpro...
200
201
  	return ret;
  }
b4c6c34a5   Masami Hiramatsu   [PATCH] kprobes: ...
202
203
204
  /* Return 1 if all garbages are collected, otherwise 0. */
  static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
  {
ab40c5c6b   Masami Hiramatsu   [PATCH] kprobes: ...
205
  	kip->slot_used[idx] = SLOT_CLEAN;
b4c6c34a5   Masami Hiramatsu   [PATCH] kprobes: ...
206
207
208
209
210
211
212
213
  	kip->nused--;
  	if (kip->nused == 0) {
  		/*
  		 * Page is no longer in use.  Free it unless
  		 * it's the last one.  We keep the last one
  		 * so as not to have to set it up again the
  		 * next time somebody inserts a probe.
  		 */
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
214
  		if (!list_is_singular(&kip->list)) {
c5cb5a2d8   Masami Hiramatsu   kprobes: Clean up...
215
  			list_del(&kip->list);
b4c6c34a5   Masami Hiramatsu   [PATCH] kprobes: ...
216
217
218
219
220
221
222
  			module_free(NULL, kip->insns);
  			kfree(kip);
  		}
  		return 1;
  	}
  	return 0;
  }
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
223
  static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c)
b4c6c34a5   Masami Hiramatsu   [PATCH] kprobes: ...
224
  {
c5cb5a2d8   Masami Hiramatsu   kprobes: Clean up...
225
  	struct kprobe_insn_page *kip, *next;
b4c6c34a5   Masami Hiramatsu   [PATCH] kprobes: ...
226

615d0ebbc   Masami Hiramatsu   kprobes: Disable ...
227
228
  	/* Ensure no-one is interrupted on the garbages */
  	synchronize_sched();
b4c6c34a5   Masami Hiramatsu   [PATCH] kprobes: ...
229

4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
230
  	list_for_each_entry_safe(kip, next, &c->pages, list) {
b4c6c34a5   Masami Hiramatsu   [PATCH] kprobes: ...
231
  		int i;
b4c6c34a5   Masami Hiramatsu   [PATCH] kprobes: ...
232
233
234
  		if (kip->ngarbage == 0)
  			continue;
  		kip->ngarbage = 0;	/* we will collect all garbages */
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
235
  		for (i = 0; i < slots_per_page(c); i++) {
ab40c5c6b   Masami Hiramatsu   [PATCH] kprobes: ...
236
  			if (kip->slot_used[i] == SLOT_DIRTY &&
b4c6c34a5   Masami Hiramatsu   [PATCH] kprobes: ...
237
238
239
240
  			    collect_one_slot(kip, i))
  				break;
  		}
  	}
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
241
  	c->nr_garbage = 0;
b4c6c34a5   Masami Hiramatsu   [PATCH] kprobes: ...
242
243
  	return 0;
  }
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
244
245
  static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
  				       kprobe_opcode_t *slot, int dirty)
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
246
247
  {
  	struct kprobe_insn_page *kip;
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
248

4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
249
  	list_for_each_entry(kip, &c->pages, list) {
83ff56f46   Masami Hiramatsu   kprobes: Calculat...
250
251
  		long idx = ((long)slot - (long)kip->insns) /
  				(c->insn_size * sizeof(kprobe_opcode_t));
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
252
253
  		if (idx >= 0 && idx < slots_per_page(c)) {
  			WARN_ON(kip->slot_used[idx] != SLOT_USED);
b4c6c34a5   Masami Hiramatsu   [PATCH] kprobes: ...
254
  			if (dirty) {
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
255
  				kip->slot_used[idx] = SLOT_DIRTY;
b4c6c34a5   Masami Hiramatsu   [PATCH] kprobes: ...
256
  				kip->ngarbage++;
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
257
258
  				if (++c->nr_garbage > slots_per_page(c))
  					collect_garbage_slots(c);
c5cb5a2d8   Masami Hiramatsu   kprobes: Clean up...
259
  			} else
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
260
261
  				collect_one_slot(kip, idx);
  			return;
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
262
263
  		}
  	}
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
264
265
266
  	/* Could not free this slot. */
  	WARN_ON(1);
  }
6f716acd5   Christoph Hellwig   kprobes: codingst...
267

4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
268
269
270
271
  void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
  {
  	mutex_lock(&kprobe_insn_mutex);
  	__free_insn_slot(&kprobe_insn_slots, slot, dirty);
129415607   Masami Hiramatsu   kprobes: add kpro...
272
  	mutex_unlock(&kprobe_insn_mutex);
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
273
  }
afd66255b   Masami Hiramatsu   kprobes: Introduc...
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
  #ifdef CONFIG_OPTPROBES
  /* For optimized_kprobe buffer */
  static DEFINE_MUTEX(kprobe_optinsn_mutex); /* Protects kprobe_optinsn_slots */
  static struct kprobe_insn_cache kprobe_optinsn_slots = {
  	.pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
  	/* .insn_size is initialized later */
  	.nr_garbage = 0,
  };
  /* Get a slot for optimized_kprobe buffer */
  kprobe_opcode_t __kprobes *get_optinsn_slot(void)
  {
  	kprobe_opcode_t *ret = NULL;
  
  	mutex_lock(&kprobe_optinsn_mutex);
  	ret = __get_insn_slot(&kprobe_optinsn_slots);
  	mutex_unlock(&kprobe_optinsn_mutex);
  
  	return ret;
  }
  
  void __kprobes free_optinsn_slot(kprobe_opcode_t * slot, int dirty)
  {
  	mutex_lock(&kprobe_optinsn_mutex);
  	__free_insn_slot(&kprobe_optinsn_slots, slot, dirty);
  	mutex_unlock(&kprobe_optinsn_mutex);
  }
  #endif
2d14e39da   Anil S Keshavamurthy   [PATCH] kprobes: ...
301
  #endif
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
302

e65845235   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
303
304
305
306
307
308
309
310
311
312
  /* We have preemption disabled.. so it is safe to use __ versions */
  static inline void set_kprobe_instance(struct kprobe *kp)
  {
  	__get_cpu_var(kprobe_instance) = kp;
  }
  
  static inline void reset_kprobe_instance(void)
  {
  	__get_cpu_var(kprobe_instance) = NULL;
  }
3516a4604   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
313
314
  /*
   * This routine is called either:
49a2a1b83   Anil S Keshavamurthy   [PATCH] kprobes: ...
315
   * 	- under the kprobe_mutex - during kprobe_[un]register()
3516a4604   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
316
   * 				OR
d217d5450   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
317
   * 	- with preemption disabled - from arch/xxx/kernel/kprobes.c
3516a4604   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
318
   */
d0aaff979   Prasanna S Panchamukhi   [PATCH] Kprobes: ...
319
  struct kprobe __kprobes *get_kprobe(void *addr)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
320
321
322
  {
  	struct hlist_head *head;
  	struct hlist_node *node;
3516a4604   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
323
  	struct kprobe *p;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
324
325
  
  	head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
3516a4604   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
326
  	hlist_for_each_entry_rcu(p, node, head, hlist) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
327
328
329
  		if (p->addr == addr)
  			return p;
  	}
afd66255b   Masami Hiramatsu   kprobes: Introduc...
330

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
331
332
  	return NULL;
  }
afd66255b   Masami Hiramatsu   kprobes: Introduc...
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
  static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
  
  /* Return true if the kprobe is an aggregator */
  static inline int kprobe_aggrprobe(struct kprobe *p)
  {
  	return p->pre_handler == aggr_pre_handler;
  }
  
  /*
   * Keep all fields in the kprobe consistent
   */
  static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
  {
  	memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
  	memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
  }
  
  #ifdef CONFIG_OPTPROBES
b2be84df9   Masami Hiramatsu   kprobes: Jump opt...
351
352
  /* NOTE: change this value only with kprobe_mutex held */
  static bool kprobes_allow_optimization;
afd66255b   Masami Hiramatsu   kprobes: Introduc...
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
  /*
   * Call all pre_handler on the list, but ignores its return value.
   * This must be called from arch-dep optimized caller.
   */
  void __kprobes opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
  {
  	struct kprobe *kp;
  
  	list_for_each_entry_rcu(kp, &p->list, list) {
  		if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
  			set_kprobe_instance(kp);
  			kp->pre_handler(kp, regs);
  		}
  		reset_kprobe_instance();
  	}
  }
  
  /* Return true(!0) if the kprobe is ready for optimization. */
  static inline int kprobe_optready(struct kprobe *p)
  {
  	struct optimized_kprobe *op;
  
  	if (kprobe_aggrprobe(p)) {
  		op = container_of(p, struct optimized_kprobe, kp);
  		return arch_prepared_optinsn(&op->optinsn);
  	}
  
  	return 0;
  }
  
  /*
   * Return an optimized kprobe whose optimizing code replaces
   * instructions including addr (exclude breakpoint).
   */
  struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr)
  {
  	int i;
  	struct kprobe *p = NULL;
  	struct optimized_kprobe *op;
  
  	/* Don't check i == 0, since that is a breakpoint case. */
  	for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++)
  		p = get_kprobe((void *)(addr - i));
  
  	if (p && kprobe_optready(p)) {
  		op = container_of(p, struct optimized_kprobe, kp);
  		if (arch_within_optimized_kprobe(op, addr))
  			return p;
  	}
  
  	return NULL;
  }
  
  /* Optimization staging list, protected by kprobe_mutex */
  static LIST_HEAD(optimizing_list);
  
  static void kprobe_optimizer(struct work_struct *work);
  static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
  #define OPTIMIZE_DELAY 5
  
  /* Kprobe jump optimizer */
  static __kprobes void kprobe_optimizer(struct work_struct *work)
  {
  	struct optimized_kprobe *op, *tmp;
  
  	/* Lock modules while optimizing kprobes */
  	mutex_lock(&module_mutex);
  	mutex_lock(&kprobe_mutex);
b2be84df9   Masami Hiramatsu   kprobes: Jump opt...
421
  	if (kprobes_all_disarmed || !kprobes_allow_optimization)
afd66255b   Masami Hiramatsu   kprobes: Introduc...
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
  		goto end;
  
  	/*
  	 * Wait for quiesence period to ensure all running interrupts
  	 * are done. Because optprobe may modify multiple instructions
  	 * there is a chance that Nth instruction is interrupted. In that
  	 * case, running interrupt can return to 2nd-Nth byte of jump
  	 * instruction. This wait is for avoiding it.
  	 */
  	synchronize_sched();
  
  	/*
  	 * The optimization/unoptimization refers online_cpus via
  	 * stop_machine() and cpu-hotplug modifies online_cpus.
  	 * And same time, text_mutex will be held in cpu-hotplug and here.
  	 * This combination can cause a deadlock (cpu-hotplug try to lock
  	 * text_mutex but stop_machine can not be done because online_cpus
  	 * has been changed)
  	 * To avoid this deadlock, we need to call get_online_cpus()
  	 * for preventing cpu-hotplug outside of text_mutex locking.
  	 */
  	get_online_cpus();
  	mutex_lock(&text_mutex);
  	list_for_each_entry_safe(op, tmp, &optimizing_list, list) {
  		WARN_ON(kprobe_disabled(&op->kp));
  		if (arch_optimize_kprobe(op) < 0)
  			op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
  		list_del_init(&op->list);
  	}
  	mutex_unlock(&text_mutex);
  	put_online_cpus();
  end:
  	mutex_unlock(&kprobe_mutex);
  	mutex_unlock(&module_mutex);
  }
  
  /* Optimize kprobe if p is ready to be optimized */
  static __kprobes void optimize_kprobe(struct kprobe *p)
  {
  	struct optimized_kprobe *op;
  
  	/* Check if the kprobe is disabled or not ready for optimization. */
b2be84df9   Masami Hiramatsu   kprobes: Jump opt...
464
  	if (!kprobe_optready(p) || !kprobes_allow_optimization ||
afd66255b   Masami Hiramatsu   kprobes: Introduc...
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
  	    (kprobe_disabled(p) || kprobes_all_disarmed))
  		return;
  
  	/* Both of break_handler and post_handler are not supported. */
  	if (p->break_handler || p->post_handler)
  		return;
  
  	op = container_of(p, struct optimized_kprobe, kp);
  
  	/* Check there is no other kprobes at the optimized instructions */
  	if (arch_check_optimized_kprobe(op) < 0)
  		return;
  
  	/* Check if it is already optimized. */
  	if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
  		return;
  
  	op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
  	list_add(&op->list, &optimizing_list);
  	if (!delayed_work_pending(&optimizing_work))
  		schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
  }
  
  /* Unoptimize a kprobe if p is optimized */
  static __kprobes void unoptimize_kprobe(struct kprobe *p)
  {
  	struct optimized_kprobe *op;
  
  	if ((p->flags & KPROBE_FLAG_OPTIMIZED) && kprobe_aggrprobe(p)) {
  		op = container_of(p, struct optimized_kprobe, kp);
  		if (!list_empty(&op->list))
  			/* Dequeue from the optimization queue */
  			list_del_init(&op->list);
  		else
  			/* Replace jump with break */
  			arch_unoptimize_kprobe(op);
  		op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
  	}
  }
  
  /* Remove optimized instructions */
  static void __kprobes kill_optimized_kprobe(struct kprobe *p)
  {
  	struct optimized_kprobe *op;
  
  	op = container_of(p, struct optimized_kprobe, kp);
  	if (!list_empty(&op->list)) {
  		/* Dequeue from the optimization queue */
  		list_del_init(&op->list);
  		op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
  	}
  	/* Don't unoptimize, because the target code will be freed. */
  	arch_remove_optimized_kprobe(op);
  }
  
  /* Try to prepare optimized instructions */
  static __kprobes void prepare_optimized_kprobe(struct kprobe *p)
  {
  	struct optimized_kprobe *op;
  
  	op = container_of(p, struct optimized_kprobe, kp);
  	arch_prepare_optimized_kprobe(op);
  }
  
  /* Free optimized instructions and optimized_kprobe */
  static __kprobes void free_aggr_kprobe(struct kprobe *p)
  {
  	struct optimized_kprobe *op;
  
  	op = container_of(p, struct optimized_kprobe, kp);
  	arch_remove_optimized_kprobe(op);
  	kfree(op);
  }
  
  /* Allocate new optimized_kprobe and try to prepare optimized instructions */
  static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
  {
  	struct optimized_kprobe *op;
  
  	op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
  	if (!op)
  		return NULL;
  
  	INIT_LIST_HEAD(&op->list);
  	op->kp.addr = p->addr;
  	arch_prepare_optimized_kprobe(op);
  
  	return &op->kp;
  }
  
  static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
  
  /*
   * Prepare an optimized_kprobe and optimize it
   * NOTE: p must be a normal registered kprobe
   */
  static __kprobes void try_to_optimize_kprobe(struct kprobe *p)
  {
  	struct kprobe *ap;
  	struct optimized_kprobe *op;
  
  	ap = alloc_aggr_kprobe(p);
  	if (!ap)
  		return;
  
  	op = container_of(ap, struct optimized_kprobe, kp);
  	if (!arch_prepared_optinsn(&op->optinsn)) {
  		/* If failed to setup optimizing, fallback to kprobe */
  		free_aggr_kprobe(ap);
  		return;
  	}
  
  	init_aggr_kprobe(ap, p);
  	optimize_kprobe(ap);
  }
b2be84df9   Masami Hiramatsu   kprobes: Jump opt...
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
  #ifdef CONFIG_SYSCTL
  static void __kprobes optimize_all_kprobes(void)
  {
  	struct hlist_head *head;
  	struct hlist_node *node;
  	struct kprobe *p;
  	unsigned int i;
  
  	/* If optimization is already allowed, just return */
  	if (kprobes_allow_optimization)
  		return;
  
  	kprobes_allow_optimization = true;
  	mutex_lock(&text_mutex);
  	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  		head = &kprobe_table[i];
  		hlist_for_each_entry_rcu(p, node, head, hlist)
  			if (!kprobe_disabled(p))
  				optimize_kprobe(p);
  	}
  	mutex_unlock(&text_mutex);
  	printk(KERN_INFO "Kprobes globally optimized
  ");
  }
  
  static void __kprobes unoptimize_all_kprobes(void)
  {
  	struct hlist_head *head;
  	struct hlist_node *node;
  	struct kprobe *p;
  	unsigned int i;
  
  	/* If optimization is already prohibited, just return */
  	if (!kprobes_allow_optimization)
  		return;
  
  	kprobes_allow_optimization = false;
  	printk(KERN_INFO "Kprobes globally unoptimized
  ");
  	get_online_cpus();	/* For avoiding text_mutex deadlock */
  	mutex_lock(&text_mutex);
  	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  		head = &kprobe_table[i];
  		hlist_for_each_entry_rcu(p, node, head, hlist) {
  			if (!kprobe_disabled(p))
  				unoptimize_kprobe(p);
  		}
  	}
  
  	mutex_unlock(&text_mutex);
  	put_online_cpus();
  	/* Allow all currently running kprobes to complete */
  	synchronize_sched();
  }
  
  int sysctl_kprobes_optimization;
  int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
  				      void __user *buffer, size_t *length,
  				      loff_t *ppos)
  {
  	int ret;
  
  	mutex_lock(&kprobe_mutex);
  	sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
  	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
  
  	if (sysctl_kprobes_optimization)
  		optimize_all_kprobes();
  	else
  		unoptimize_all_kprobes();
  	mutex_unlock(&kprobe_mutex);
  
  	return ret;
  }
  #endif /* CONFIG_SYSCTL */
afd66255b   Masami Hiramatsu   kprobes: Introduc...
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
  static void __kprobes __arm_kprobe(struct kprobe *p)
  {
  	struct kprobe *old_p;
  
  	/* Check collision with other optimized kprobes */
  	old_p = get_optimized_kprobe((unsigned long)p->addr);
  	if (unlikely(old_p))
  		unoptimize_kprobe(old_p); /* Fallback to unoptimized kprobe */
  
  	arch_arm_kprobe(p);
  	optimize_kprobe(p);	/* Try to optimize (add kprobe to a list) */
  }
  
  static void __kprobes __disarm_kprobe(struct kprobe *p)
  {
  	struct kprobe *old_p;
  
  	unoptimize_kprobe(p);	/* Try to unoptimize */
  	arch_disarm_kprobe(p);
  
  	/* If another kprobe was blocked, optimize it. */
  	old_p = get_optimized_kprobe((unsigned long)p->addr);
  	if (unlikely(old_p))
  		optimize_kprobe(old_p);
  }
  
  #else /* !CONFIG_OPTPROBES */
  
  #define optimize_kprobe(p)			do {} while (0)
  #define unoptimize_kprobe(p)			do {} while (0)
  #define kill_optimized_kprobe(p)		do {} while (0)
  #define prepare_optimized_kprobe(p)		do {} while (0)
  #define try_to_optimize_kprobe(p)		do {} while (0)
  #define __arm_kprobe(p)				arch_arm_kprobe(p)
  #define __disarm_kprobe(p)			arch_disarm_kprobe(p)
  
  static __kprobes void free_aggr_kprobe(struct kprobe *p)
  {
  	kfree(p);
  }
  
  static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
  {
  	return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
  }
  #endif /* CONFIG_OPTPROBES */
201517a7f   Masami Hiramatsu   kprobes: fix to u...
701
702
703
  /* Arm a kprobe with text_mutex */
  static void __kprobes arm_kprobe(struct kprobe *kp)
  {
afd66255b   Masami Hiramatsu   kprobes: Introduc...
704
705
706
707
708
  	/*
  	 * Here, since __arm_kprobe() doesn't use stop_machine(),
  	 * this doesn't cause deadlock on text_mutex. So, we don't
  	 * need get_online_cpus().
  	 */
201517a7f   Masami Hiramatsu   kprobes: fix to u...
709
  	mutex_lock(&text_mutex);
afd66255b   Masami Hiramatsu   kprobes: Introduc...
710
  	__arm_kprobe(kp);
201517a7f   Masami Hiramatsu   kprobes: fix to u...
711
712
713
714
715
716
  	mutex_unlock(&text_mutex);
  }
  
  /* Disarm a kprobe with text_mutex */
  static void __kprobes disarm_kprobe(struct kprobe *kp)
  {
afd66255b   Masami Hiramatsu   kprobes: Introduc...
717
  	get_online_cpus();	/* For avoiding text_mutex deadlock */
201517a7f   Masami Hiramatsu   kprobes: fix to u...
718
  	mutex_lock(&text_mutex);
afd66255b   Masami Hiramatsu   kprobes: Introduc...
719
  	__disarm_kprobe(kp);
201517a7f   Masami Hiramatsu   kprobes: fix to u...
720
  	mutex_unlock(&text_mutex);
afd66255b   Masami Hiramatsu   kprobes: Introduc...
721
  	put_online_cpus();
201517a7f   Masami Hiramatsu   kprobes: fix to u...
722
  }
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
723
724
725
726
  /*
   * Aggregate handlers for multiple kprobes support - these handlers
   * take care of invoking the individual kprobe handlers on p->list
   */
d0aaff979   Prasanna S Panchamukhi   [PATCH] Kprobes: ...
727
  static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
728
729
  {
  	struct kprobe *kp;
3516a4604   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
730
  	list_for_each_entry_rcu(kp, &p->list, list) {
de5bd88d5   Masami Hiramatsu   kprobes: support ...
731
  		if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
e65845235   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
732
  			set_kprobe_instance(kp);
8b0914ea7   Prasanna S Panchamukhi   [PATCH] jprobes: ...
733
734
  			if (kp->pre_handler(kp, regs))
  				return 1;
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
735
  		}
e65845235   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
736
  		reset_kprobe_instance();
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
737
738
739
  	}
  	return 0;
  }
d0aaff979   Prasanna S Panchamukhi   [PATCH] Kprobes: ...
740
741
  static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
  					unsigned long flags)
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
742
743
  {
  	struct kprobe *kp;
3516a4604   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
744
  	list_for_each_entry_rcu(kp, &p->list, list) {
de5bd88d5   Masami Hiramatsu   kprobes: support ...
745
  		if (kp->post_handler && likely(!kprobe_disabled(kp))) {
e65845235   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
746
  			set_kprobe_instance(kp);
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
747
  			kp->post_handler(kp, regs, flags);
e65845235   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
748
  			reset_kprobe_instance();
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
749
750
  		}
  	}
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
751
  }
d0aaff979   Prasanna S Panchamukhi   [PATCH] Kprobes: ...
752
753
  static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
  					int trapnr)
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
754
  {
e65845235   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
755
  	struct kprobe *cur = __get_cpu_var(kprobe_instance);
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
756
757
758
759
  	/*
  	 * if we faulted "during" the execution of a user specified
  	 * probe handler, invoke just that probe's fault handler
  	 */
e65845235   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
760
761
  	if (cur && cur->fault_handler) {
  		if (cur->fault_handler(cur, regs, trapnr))
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
762
763
764
765
  			return 1;
  	}
  	return 0;
  }
d0aaff979   Prasanna S Panchamukhi   [PATCH] Kprobes: ...
766
  static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
8b0914ea7   Prasanna S Panchamukhi   [PATCH] jprobes: ...
767
  {
e65845235   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
768
769
770
771
772
773
  	struct kprobe *cur = __get_cpu_var(kprobe_instance);
  	int ret = 0;
  
  	if (cur && cur->break_handler) {
  		if (cur->break_handler(cur, regs))
  			ret = 1;
8b0914ea7   Prasanna S Panchamukhi   [PATCH] jprobes: ...
774
  	}
e65845235   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
775
776
  	reset_kprobe_instance();
  	return ret;
8b0914ea7   Prasanna S Panchamukhi   [PATCH] jprobes: ...
777
  }
bf8d5c52c   Keshavamurthy Anil S   [PATCH] kprobes: ...
778
779
780
781
  /* Walks the list and increments nmissed count for multiprobe case */
  void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
  {
  	struct kprobe *kp;
afd66255b   Masami Hiramatsu   kprobes: Introduc...
782
  	if (!kprobe_aggrprobe(p)) {
bf8d5c52c   Keshavamurthy Anil S   [PATCH] kprobes: ...
783
784
785
786
787
788
789
  		p->nmissed++;
  	} else {
  		list_for_each_entry_rcu(kp, &p->list, list)
  			kp->nmissed++;
  	}
  	return;
  }
99219a3fb   bibo,mao   [PATCH] kretprobe...
790
791
  void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
  				struct hlist_head *head)
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
792
  {
ef53d9c5e   Srinivasa D S   kprobes: improve ...
793
  	struct kretprobe *rp = ri->rp;
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
794
795
  	/* remove rp inst off the rprobe_inst_table */
  	hlist_del(&ri->hlist);
ef53d9c5e   Srinivasa D S   kprobes: improve ...
796
797
798
799
800
  	INIT_HLIST_NODE(&ri->hlist);
  	if (likely(rp)) {
  		spin_lock(&rp->lock);
  		hlist_add_head(&ri->hlist, &rp->free_instances);
  		spin_unlock(&rp->lock);
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
801
802
  	} else
  		/* Unregistering */
99219a3fb   bibo,mao   [PATCH] kretprobe...
803
  		hlist_add_head(&ri->hlist, head);
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
804
  }
017c39bdb   Masami Hiramatsu   kprobes: add __kp...
805
  void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
ef53d9c5e   Srinivasa D S   kprobes: improve ...
806
807
808
809
810
811
812
813
814
  			 struct hlist_head **head, unsigned long *flags)
  {
  	unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
  	spinlock_t *hlist_lock;
  
  	*head = &kretprobe_inst_table[hash];
  	hlist_lock = kretprobe_table_lock_ptr(hash);
  	spin_lock_irqsave(hlist_lock, *flags);
  }
017c39bdb   Masami Hiramatsu   kprobes: add __kp...
815
816
  static void __kprobes kretprobe_table_lock(unsigned long hash,
  	unsigned long *flags)
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
817
  {
ef53d9c5e   Srinivasa D S   kprobes: improve ...
818
819
820
  	spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
  	spin_lock_irqsave(hlist_lock, *flags);
  }
017c39bdb   Masami Hiramatsu   kprobes: add __kp...
821
822
  void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
  	unsigned long *flags)
ef53d9c5e   Srinivasa D S   kprobes: improve ...
823
824
825
826
827
828
829
  {
  	unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
  	spinlock_t *hlist_lock;
  
  	hlist_lock = kretprobe_table_lock_ptr(hash);
  	spin_unlock_irqrestore(hlist_lock, *flags);
  }
017c39bdb   Masami Hiramatsu   kprobes: add __kp...
830
  void __kprobes kretprobe_table_unlock(unsigned long hash, unsigned long *flags)
ef53d9c5e   Srinivasa D S   kprobes: improve ...
831
832
833
  {
  	spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
  	spin_unlock_irqrestore(hlist_lock, *flags);
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
834
  }
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
835
  /*
c6fd91f0b   bibo mao   [PATCH] kretprobe...
836
837
838
839
   * This function is called from finish_task_switch when task tk becomes dead,
   * so that we can recycle any function-return probe instances associated
   * with this task. These left over instances represent probed functions
   * that have been called but will never return.
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
840
   */
d0aaff979   Prasanna S Panchamukhi   [PATCH] Kprobes: ...
841
  void __kprobes kprobe_flush_task(struct task_struct *tk)
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
842
  {
62c27be0d   bibo,mao   [PATCH] kprobe wh...
843
  	struct kretprobe_instance *ri;
99219a3fb   bibo,mao   [PATCH] kretprobe...
844
  	struct hlist_head *head, empty_rp;
802eae7c8   Rusty Lynch   [PATCH] Return pr...
845
  	struct hlist_node *node, *tmp;
ef53d9c5e   Srinivasa D S   kprobes: improve ...
846
  	unsigned long hash, flags = 0;
802eae7c8   Rusty Lynch   [PATCH] Return pr...
847

ef53d9c5e   Srinivasa D S   kprobes: improve ...
848
849
850
851
852
853
854
  	if (unlikely(!kprobes_initialized))
  		/* Early boot.  kretprobe_table_locks not yet initialized. */
  		return;
  
  	hash = hash_ptr(tk, KPROBE_HASH_BITS);
  	head = &kretprobe_inst_table[hash];
  	kretprobe_table_lock(hash, &flags);
62c27be0d   bibo,mao   [PATCH] kprobe wh...
855
856
  	hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
  		if (ri->task == tk)
99219a3fb   bibo,mao   [PATCH] kretprobe...
857
  			recycle_rp_inst(ri, &empty_rp);
62c27be0d   bibo,mao   [PATCH] kprobe wh...
858
  	}
ef53d9c5e   Srinivasa D S   kprobes: improve ...
859
860
  	kretprobe_table_unlock(hash, &flags);
  	INIT_HLIST_HEAD(&empty_rp);
99219a3fb   bibo,mao   [PATCH] kretprobe...
861
862
863
864
  	hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
  		hlist_del(&ri->hlist);
  		kfree(ri);
  	}
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
865
  }
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
866
867
868
  static inline void free_rp_inst(struct kretprobe *rp)
  {
  	struct kretprobe_instance *ri;
4c4308cb9   Christoph Hellwig   kprobes: kretprob...
869
  	struct hlist_node *pos, *next;
ef53d9c5e   Srinivasa D S   kprobes: improve ...
870
871
  	hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) {
  		hlist_del(&ri->hlist);
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
872
873
874
  		kfree(ri);
  	}
  }
4a296e07c   Masami Hiramatsu   kprobes: add (un)...
875
876
  static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
  {
ef53d9c5e   Srinivasa D S   kprobes: improve ...
877
  	unsigned long flags, hash;
4a296e07c   Masami Hiramatsu   kprobes: add (un)...
878
879
  	struct kretprobe_instance *ri;
  	struct hlist_node *pos, *next;
ef53d9c5e   Srinivasa D S   kprobes: improve ...
880
  	struct hlist_head *head;
4a296e07c   Masami Hiramatsu   kprobes: add (un)...
881
  	/* No race here */
ef53d9c5e   Srinivasa D S   kprobes: improve ...
882
883
884
885
886
887
888
889
  	for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
  		kretprobe_table_lock(hash, &flags);
  		head = &kretprobe_inst_table[hash];
  		hlist_for_each_entry_safe(ri, pos, next, head, hlist) {
  			if (ri->rp == rp)
  				ri->rp = NULL;
  		}
  		kretprobe_table_unlock(hash, &flags);
4a296e07c   Masami Hiramatsu   kprobes: add (un)...
890
  	}
4a296e07c   Masami Hiramatsu   kprobes: add (un)...
891
892
  	free_rp_inst(rp);
  }
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
893
  /*
b918e5e60   Masami Hiramatsu   kprobes: cleanup ...
894
  * Add the new probe to ap->list. Fail if this is the
8b0914ea7   Prasanna S Panchamukhi   [PATCH] jprobes: ...
895
896
  * second jprobe at the address - two jprobes can't coexist
  */
b918e5e60   Masami Hiramatsu   kprobes: cleanup ...
897
  static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
8b0914ea7   Prasanna S Panchamukhi   [PATCH] jprobes: ...
898
  {
de5bd88d5   Masami Hiramatsu   kprobes: support ...
899
  	BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
afd66255b   Masami Hiramatsu   kprobes: Introduc...
900
901
902
  
  	if (p->break_handler || p->post_handler)
  		unoptimize_kprobe(ap);	/* Fall back to normal kprobe */
8b0914ea7   Prasanna S Panchamukhi   [PATCH] jprobes: ...
903
  	if (p->break_handler) {
b918e5e60   Masami Hiramatsu   kprobes: cleanup ...
904
  		if (ap->break_handler)
367216567   mao, bibo   [PATCH] Kprobe: m...
905
  			return -EEXIST;
b918e5e60   Masami Hiramatsu   kprobes: cleanup ...
906
907
  		list_add_tail_rcu(&p->list, &ap->list);
  		ap->break_handler = aggr_break_handler;
8b0914ea7   Prasanna S Panchamukhi   [PATCH] jprobes: ...
908
  	} else
b918e5e60   Masami Hiramatsu   kprobes: cleanup ...
909
910
911
  		list_add_rcu(&p->list, &ap->list);
  	if (p->post_handler && !ap->post_handler)
  		ap->post_handler = aggr_post_handler;
de5bd88d5   Masami Hiramatsu   kprobes: support ...
912
913
914
915
916
  
  	if (kprobe_disabled(ap) && !kprobe_disabled(p)) {
  		ap->flags &= ~KPROBE_FLAG_DISABLED;
  		if (!kprobes_all_disarmed)
  			/* Arm the breakpoint again. */
afd66255b   Masami Hiramatsu   kprobes: Introduc...
917
  			__arm_kprobe(ap);
de5bd88d5   Masami Hiramatsu   kprobes: support ...
918
  	}
8b0914ea7   Prasanna S Panchamukhi   [PATCH] jprobes: ...
919
920
921
922
  	return 0;
  }
  
  /*
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
923
924
925
   * Fill in the required fields of the "manager kprobe". Replace the
   * earlier kprobe in the hlist with the manager kprobe
   */
afd66255b   Masami Hiramatsu   kprobes: Introduc...
926
  static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
927
  {
afd66255b   Masami Hiramatsu   kprobes: Introduc...
928
  	/* Copy p's insn slot to ap */
8b0914ea7   Prasanna S Panchamukhi   [PATCH] jprobes: ...
929
  	copy_kprobe(p, ap);
a9ad965ea   bibo, mao   [PATCH] IA64: kpr...
930
  	flush_insn_slot(ap);
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
931
  	ap->addr = p->addr;
afd66255b   Masami Hiramatsu   kprobes: Introduc...
932
  	ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
933
  	ap->pre_handler = aggr_pre_handler;
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
934
  	ap->fault_handler = aggr_fault_handler;
e8386a0cb   Masami Hiramatsu   kprobes: support ...
935
936
  	/* We don't care the kprobe which has gone. */
  	if (p->post_handler && !kprobe_gone(p))
367216567   mao, bibo   [PATCH] Kprobe: m...
937
  		ap->post_handler = aggr_post_handler;
e8386a0cb   Masami Hiramatsu   kprobes: support ...
938
  	if (p->break_handler && !kprobe_gone(p))
367216567   mao, bibo   [PATCH] Kprobe: m...
939
  		ap->break_handler = aggr_break_handler;
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
940
941
  
  	INIT_LIST_HEAD(&ap->list);
afd66255b   Masami Hiramatsu   kprobes: Introduc...
942
  	INIT_HLIST_NODE(&ap->hlist);
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
943

afd66255b   Masami Hiramatsu   kprobes: Introduc...
944
  	list_add_rcu(&p->list, &ap->list);
adad0f331   Keshavamurthy Anil S   [PATCH] kprobes: ...
945
  	hlist_replace_rcu(&p->hlist, &ap->hlist);
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
946
947
948
949
950
  }
  
  /*
   * This is the second or subsequent kprobe at the address - handle
   * the intricacies
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
951
   */
d0aaff979   Prasanna S Panchamukhi   [PATCH] Kprobes: ...
952
953
  static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
  					  struct kprobe *p)
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
954
955
  {
  	int ret = 0;
b918e5e60   Masami Hiramatsu   kprobes: cleanup ...
956
  	struct kprobe *ap = old_p;
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
957

afd66255b   Masami Hiramatsu   kprobes: Introduc...
958
959
960
  	if (!kprobe_aggrprobe(old_p)) {
  		/* If old_p is not an aggr_kprobe, create new aggr_kprobe. */
  		ap = alloc_aggr_kprobe(old_p);
b918e5e60   Masami Hiramatsu   kprobes: cleanup ...
961
962
  		if (!ap)
  			return -ENOMEM;
afd66255b   Masami Hiramatsu   kprobes: Introduc...
963
  		init_aggr_kprobe(ap, old_p);
b918e5e60   Masami Hiramatsu   kprobes: cleanup ...
964
965
966
  	}
  
  	if (kprobe_gone(ap)) {
e8386a0cb   Masami Hiramatsu   kprobes: support ...
967
968
969
970
971
972
  		/*
  		 * Attempting to insert new probe at the same location that
  		 * had a probe in the module vaddr area which already
  		 * freed. So, the instruction slot has already been
  		 * released. We need a new slot for the new probe.
  		 */
b918e5e60   Masami Hiramatsu   kprobes: cleanup ...
973
  		ret = arch_prepare_kprobe(ap);
e8386a0cb   Masami Hiramatsu   kprobes: support ...
974
  		if (ret)
b918e5e60   Masami Hiramatsu   kprobes: cleanup ...
975
976
977
978
979
  			/*
  			 * Even if fail to allocate new slot, don't need to
  			 * free aggr_probe. It will be used next time, or
  			 * freed by unregister_kprobe.
  			 */
e8386a0cb   Masami Hiramatsu   kprobes: support ...
980
  			return ret;
de5bd88d5   Masami Hiramatsu   kprobes: support ...
981

afd66255b   Masami Hiramatsu   kprobes: Introduc...
982
983
  		/* Prepare optimized instructions if possible. */
  		prepare_optimized_kprobe(ap);
e8386a0cb   Masami Hiramatsu   kprobes: support ...
984
  		/*
de5bd88d5   Masami Hiramatsu   kprobes: support ...
985
986
  		 * Clear gone flag to prevent allocating new slot again, and
  		 * set disabled flag because it is not armed yet.
e8386a0cb   Masami Hiramatsu   kprobes: support ...
987
  		 */
de5bd88d5   Masami Hiramatsu   kprobes: support ...
988
989
  		ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
  			    | KPROBE_FLAG_DISABLED;
e8386a0cb   Masami Hiramatsu   kprobes: support ...
990
  	}
b918e5e60   Masami Hiramatsu   kprobes: cleanup ...
991

afd66255b   Masami Hiramatsu   kprobes: Introduc...
992
  	/* Copy ap's insn slot to p */
b918e5e60   Masami Hiramatsu   kprobes: cleanup ...
993
994
  	copy_kprobe(ap, p);
  	return add_new_kprobe(ap, p);
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
995
  }
de5bd88d5   Masami Hiramatsu   kprobes: support ...
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
  /* Try to disable aggr_kprobe, and return 1 if succeeded.*/
  static int __kprobes try_to_disable_aggr_kprobe(struct kprobe *p)
  {
  	struct kprobe *kp;
  
  	list_for_each_entry_rcu(kp, &p->list, list) {
  		if (!kprobe_disabled(kp))
  			/*
  			 * There is an active probe on the list.
  			 * We can't disable aggr_kprobe.
  			 */
  			return 0;
  	}
  	p->flags |= KPROBE_FLAG_DISABLED;
  	return 1;
  }
d0aaff979   Prasanna S Panchamukhi   [PATCH] Kprobes: ...
1012
1013
  static int __kprobes in_kprobes_functions(unsigned long addr)
  {
3d8d996e0   Srinivasa Ds   kprobes: prevent ...
1014
  	struct kprobe_blackpoint *kb;
6f716acd5   Christoph Hellwig   kprobes: codingst...
1015
1016
  	if (addr >= (unsigned long)__kprobes_text_start &&
  	    addr < (unsigned long)__kprobes_text_end)
d0aaff979   Prasanna S Panchamukhi   [PATCH] Kprobes: ...
1017
  		return -EINVAL;
3d8d996e0   Srinivasa Ds   kprobes: prevent ...
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
  	/*
  	 * If there exists a kprobe_blacklist, verify and
  	 * fail any probe registration in the prohibited area
  	 */
  	for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
  		if (kb->start_addr) {
  			if (addr >= kb->start_addr &&
  			    addr < (kb->start_addr + kb->range))
  				return -EINVAL;
  		}
  	}
d0aaff979   Prasanna S Panchamukhi   [PATCH] Kprobes: ...
1029
1030
  	return 0;
  }
b2a5cd693   Masami Hiramatsu   kprobes: fix a nu...
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
  /*
   * If we have a symbol_name argument, look it up and add the offset field
   * to it. This way, we can specify a relative address to a symbol.
   */
  static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
  {
  	kprobe_opcode_t *addr = p->addr;
  	if (p->symbol_name) {
  		if (addr)
  			return NULL;
  		kprobe_lookup_name(p->symbol_name, addr);
  	}
  
  	if (!addr)
  		return NULL;
  	return (kprobe_opcode_t *)(((char *)addr) + p->offset);
  }
1f0ab4097   Ananth N Mavinakayanahalli   kprobes: Prevent ...
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
  /* Check passed kprobe is valid and return kprobe in kprobe_table. */
  static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
  {
  	struct kprobe *old_p, *list_p;
  
  	old_p = get_kprobe(p->addr);
  	if (unlikely(!old_p))
  		return NULL;
  
  	if (p != old_p) {
  		list_for_each_entry_rcu(list_p, &old_p->list, list)
  			if (list_p == p)
  			/* kprobe p is a valid probe */
  				goto valid;
  		return NULL;
  	}
  valid:
  	return old_p;
  }
  
  /* Return error if the kprobe is being re-registered */
  static inline int check_kprobe_rereg(struct kprobe *p)
  {
  	int ret = 0;
  	struct kprobe *old_p;
  
  	mutex_lock(&kprobe_mutex);
  	old_p = __get_valid_kprobe(p);
  	if (old_p)
  		ret = -EINVAL;
  	mutex_unlock(&kprobe_mutex);
  	return ret;
  }
49ad2fd76   Masami Hiramatsu   kprobes: remove c...
1081
  int __kprobes register_kprobe(struct kprobe *p)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1082
1083
  {
  	int ret = 0;
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
1084
  	struct kprobe *old_p;
df019b1d8   Keshavamurthy Anil S   [PATCH] kprobes: ...
1085
  	struct module *probed_mod;
b2a5cd693   Masami Hiramatsu   kprobes: fix a nu...
1086
  	kprobe_opcode_t *addr;
b3e55c727   Mao, Bibo   [PATCH] Kprobes: ...
1087

b2a5cd693   Masami Hiramatsu   kprobes: fix a nu...
1088
1089
  	addr = kprobe_addr(p);
  	if (!addr)
3a872d89b   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
1090
  		return -EINVAL;
b2a5cd693   Masami Hiramatsu   kprobes: fix a nu...
1091
  	p->addr = addr;
3a872d89b   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
1092

1f0ab4097   Ananth N Mavinakayanahalli   kprobes: Prevent ...
1093
1094
1095
  	ret = check_kprobe_rereg(p);
  	if (ret)
  		return ret;
a189d0350   Masami Hiramatsu   kprobes: disable ...
1096
  	preempt_disable();
ec30c5f3a   Masami Hiramatsu   kprobes: Use kern...
1097
  	if (!kernel_text_address((unsigned long) p->addr) ||
4554dbcb8   Masami Hiramatsu   kprobes: Check pr...
1098
1099
  	    in_kprobes_functions((unsigned long) p->addr) ||
  	    ftrace_text_reserved(p->addr, p->addr)) {
a189d0350   Masami Hiramatsu   kprobes: disable ...
1100
  		preempt_enable();
b3e55c727   Mao, Bibo   [PATCH] Kprobes: ...
1101
  		return -EINVAL;
a189d0350   Masami Hiramatsu   kprobes: disable ...
1102
  	}
b3e55c727   Mao, Bibo   [PATCH] Kprobes: ...
1103

de5bd88d5   Masami Hiramatsu   kprobes: support ...
1104
1105
  	/* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
  	p->flags &= KPROBE_FLAG_DISABLED;
6f716acd5   Christoph Hellwig   kprobes: codingst...
1106
1107
1108
  	/*
  	 * Check if are we probing a module.
  	 */
a189d0350   Masami Hiramatsu   kprobes: disable ...
1109
  	probed_mod = __module_text_address((unsigned long) p->addr);
6f716acd5   Christoph Hellwig   kprobes: codingst...
1110
  	if (probed_mod) {
6f716acd5   Christoph Hellwig   kprobes: codingst...
1111
  		/*
e8386a0cb   Masami Hiramatsu   kprobes: support ...
1112
1113
  		 * We must hold a refcount of the probed module while updating
  		 * its code to prohibit unexpected unloading.
df019b1d8   Keshavamurthy Anil S   [PATCH] kprobes: ...
1114
  		 */
49ad2fd76   Masami Hiramatsu   kprobes: remove c...
1115
1116
1117
1118
  		if (unlikely(!try_module_get(probed_mod))) {
  			preempt_enable();
  			return -EINVAL;
  		}
f24659d96   Masami Hiramatsu   kprobes: support ...
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
  		/*
  		 * If the module freed .init.text, we couldn't insert
  		 * kprobes in there.
  		 */
  		if (within_module_init((unsigned long)p->addr, probed_mod) &&
  		    probed_mod->state != MODULE_STATE_COMING) {
  			module_put(probed_mod);
  			preempt_enable();
  			return -EINVAL;
  		}
df019b1d8   Keshavamurthy Anil S   [PATCH] kprobes: ...
1129
  	}
a189d0350   Masami Hiramatsu   kprobes: disable ...
1130
  	preempt_enable();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1131

3516a4604   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
1132
  	p->nmissed = 0;
9861668f7   Masami Hiramatsu   kprobes: add (un)...
1133
  	INIT_LIST_HEAD(&p->list);
7a7d1cf95   Ingo Molnar   [PATCH] sem2mutex...
1134
  	mutex_lock(&kprobe_mutex);
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1135
1136
1137
  
  	get_online_cpus();	/* For avoiding text_mutex deadlock. */
  	mutex_lock(&text_mutex);
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
1138
1139
  	old_p = get_kprobe(p->addr);
  	if (old_p) {
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1140
  		/* Since this may unoptimize old_p, locking text_mutex. */
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
1141
  		ret = register_aggr_kprobe(old_p, p);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1142
1143
  		goto out;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1144

6f716acd5   Christoph Hellwig   kprobes: codingst...
1145
1146
  	ret = arch_prepare_kprobe(p);
  	if (ret)
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1147
  		goto out;
49a2a1b83   Anil S Keshavamurthy   [PATCH] kprobes: ...
1148

64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
1149
  	INIT_HLIST_NODE(&p->hlist);
3516a4604   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
1150
  	hlist_add_head_rcu(&p->hlist,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1151
  		       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
de5bd88d5   Masami Hiramatsu   kprobes: support ...
1152
  	if (!kprobes_all_disarmed && !kprobe_disabled(p))
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1153
1154
1155
1156
  		__arm_kprobe(p);
  
  	/* Try to optimize kprobe */
  	try_to_optimize_kprobe(p);
74a0b5762   Christoph Hellwig   x86: optimize pag...
1157

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1158
  out:
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1159
1160
  	mutex_unlock(&text_mutex);
  	put_online_cpus();
7a7d1cf95   Ingo Molnar   [PATCH] sem2mutex...
1161
  	mutex_unlock(&kprobe_mutex);
49a2a1b83   Anil S Keshavamurthy   [PATCH] kprobes: ...
1162

e8386a0cb   Masami Hiramatsu   kprobes: support ...
1163
  	if (probed_mod)
df019b1d8   Keshavamurthy Anil S   [PATCH] kprobes: ...
1164
  		module_put(probed_mod);
e8386a0cb   Masami Hiramatsu   kprobes: support ...
1165

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1166
1167
  	return ret;
  }
99081ab55   Masami Hiramatsu   kprobes: move EXP...
1168
  EXPORT_SYMBOL_GPL(register_kprobe);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1169

de5bd88d5   Masami Hiramatsu   kprobes: support ...
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
  /*
   * Unregister a kprobe without a scheduler synchronization.
   */
  static int __kprobes __unregister_kprobe_top(struct kprobe *p)
  {
  	struct kprobe *old_p, *list_p;
  
  	old_p = __get_valid_kprobe(p);
  	if (old_p == NULL)
  		return -EINVAL;
6f716acd5   Christoph Hellwig   kprobes: codingst...
1180
  	if (old_p == p ||
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1181
  	    (kprobe_aggrprobe(old_p) &&
9861668f7   Masami Hiramatsu   kprobes: add (un)...
1182
  	     list_is_singular(&old_p->list))) {
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1183
1184
  		/*
  		 * Only probe on the hash list. Disarm only if kprobes are
e8386a0cb   Masami Hiramatsu   kprobes: support ...
1185
1186
  		 * enabled and not gone - otherwise, the breakpoint would
  		 * already have been removed. We save on flushing icache.
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1187
  		 */
201517a7f   Masami Hiramatsu   kprobes: fix to u...
1188
  		if (!kprobes_all_disarmed && !kprobe_disabled(old_p))
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1189
  			disarm_kprobe(old_p);
49a2a1b83   Anil S Keshavamurthy   [PATCH] kprobes: ...
1190
  		hlist_del_rcu(&old_p->hlist);
49a2a1b83   Anil S Keshavamurthy   [PATCH] kprobes: ...
1191
  	} else {
e8386a0cb   Masami Hiramatsu   kprobes: support ...
1192
  		if (p->break_handler && !kprobe_gone(p))
9861668f7   Masami Hiramatsu   kprobes: add (un)...
1193
  			old_p->break_handler = NULL;
e8386a0cb   Masami Hiramatsu   kprobes: support ...
1194
  		if (p->post_handler && !kprobe_gone(p)) {
9861668f7   Masami Hiramatsu   kprobes: add (un)...
1195
1196
1197
1198
1199
1200
1201
  			list_for_each_entry_rcu(list_p, &old_p->list, list) {
  				if ((list_p != p) && (list_p->post_handler))
  					goto noclean;
  			}
  			old_p->post_handler = NULL;
  		}
  noclean:
49a2a1b83   Anil S Keshavamurthy   [PATCH] kprobes: ...
1202
  		list_del_rcu(&p->list);
de5bd88d5   Masami Hiramatsu   kprobes: support ...
1203
1204
  		if (!kprobe_disabled(old_p)) {
  			try_to_disable_aggr_kprobe(old_p);
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1205
1206
1207
1208
1209
1210
1211
  			if (!kprobes_all_disarmed) {
  				if (kprobe_disabled(old_p))
  					disarm_kprobe(old_p);
  				else
  					/* Try to optimize this probe again */
  					optimize_kprobe(old_p);
  			}
de5bd88d5   Masami Hiramatsu   kprobes: support ...
1212
  		}
49a2a1b83   Anil S Keshavamurthy   [PATCH] kprobes: ...
1213
  	}
9861668f7   Masami Hiramatsu   kprobes: add (un)...
1214
1215
  	return 0;
  }
3516a4604   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
1216

9861668f7   Masami Hiramatsu   kprobes: add (un)...
1217
1218
  static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
  {
9861668f7   Masami Hiramatsu   kprobes: add (un)...
1219
  	struct kprobe *old_p;
b3e55c727   Mao, Bibo   [PATCH] Kprobes: ...
1220

e8386a0cb   Masami Hiramatsu   kprobes: support ...
1221
  	if (list_empty(&p->list))
0498b6350   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
1222
  		arch_remove_kprobe(p);
e8386a0cb   Masami Hiramatsu   kprobes: support ...
1223
1224
1225
1226
1227
  	else if (list_is_singular(&p->list)) {
  		/* "p" is the last child of an aggr_kprobe */
  		old_p = list_entry(p->list.next, struct kprobe, list);
  		list_del(&p->list);
  		arch_remove_kprobe(old_p);
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1228
  		free_aggr_kprobe(old_p);
9861668f7   Masami Hiramatsu   kprobes: add (un)...
1229
1230
  	}
  }
49ad2fd76   Masami Hiramatsu   kprobes: remove c...
1231
  int __kprobes register_kprobes(struct kprobe **kps, int num)
9861668f7   Masami Hiramatsu   kprobes: add (un)...
1232
1233
1234
1235
1236
1237
  {
  	int i, ret = 0;
  
  	if (num <= 0)
  		return -EINVAL;
  	for (i = 0; i < num; i++) {
49ad2fd76   Masami Hiramatsu   kprobes: remove c...
1238
  		ret = register_kprobe(kps[i]);
67dddaad5   Masami Hiramatsu   kprobes: fix erro...
1239
1240
1241
  		if (ret < 0) {
  			if (i > 0)
  				unregister_kprobes(kps, i);
9861668f7   Masami Hiramatsu   kprobes: add (un)...
1242
  			break;
367216567   mao, bibo   [PATCH] Kprobe: m...
1243
  		}
49a2a1b83   Anil S Keshavamurthy   [PATCH] kprobes: ...
1244
  	}
9861668f7   Masami Hiramatsu   kprobes: add (un)...
1245
1246
  	return ret;
  }
99081ab55   Masami Hiramatsu   kprobes: move EXP...
1247
  EXPORT_SYMBOL_GPL(register_kprobes);
9861668f7   Masami Hiramatsu   kprobes: add (un)...
1248

9861668f7   Masami Hiramatsu   kprobes: add (un)...
1249
1250
1251
1252
  void __kprobes unregister_kprobe(struct kprobe *p)
  {
  	unregister_kprobes(&p, 1);
  }
99081ab55   Masami Hiramatsu   kprobes: move EXP...
1253
  EXPORT_SYMBOL_GPL(unregister_kprobe);
9861668f7   Masami Hiramatsu   kprobes: add (un)...
1254

9861668f7   Masami Hiramatsu   kprobes: add (un)...
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
  void __kprobes unregister_kprobes(struct kprobe **kps, int num)
  {
  	int i;
  
  	if (num <= 0)
  		return;
  	mutex_lock(&kprobe_mutex);
  	for (i = 0; i < num; i++)
  		if (__unregister_kprobe_top(kps[i]) < 0)
  			kps[i]->addr = NULL;
  	mutex_unlock(&kprobe_mutex);
  
  	synchronize_sched();
  	for (i = 0; i < num; i++)
  		if (kps[i]->addr)
  			__unregister_kprobe_bottom(kps[i]);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1271
  }
99081ab55   Masami Hiramatsu   kprobes: move EXP...
1272
  EXPORT_SYMBOL_GPL(unregister_kprobes);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1273
1274
1275
  
  static struct notifier_block kprobe_exceptions_nb = {
  	.notifier_call = kprobe_exceptions_notify,
3d5631e06   Anil S Keshavamurthy   [PATCH] Kprobes r...
1276
1277
  	.priority = 0x7fffffff /* we need to be notified first */
  };
3d7e33825   Michael Ellerman   jprobes: make jpr...
1278
1279
1280
1281
  unsigned long __weak arch_deref_entry_point(void *entry)
  {
  	return (unsigned long)entry;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1282

49ad2fd76   Masami Hiramatsu   kprobes: remove c...
1283
  int __kprobes register_jprobes(struct jprobe **jps, int num)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1284
  {
26b31c190   Masami Hiramatsu   kprobes: add (un)...
1285
1286
  	struct jprobe *jp;
  	int ret = 0, i;
3d7e33825   Michael Ellerman   jprobes: make jpr...
1287

26b31c190   Masami Hiramatsu   kprobes: add (un)...
1288
  	if (num <= 0)
3d7e33825   Michael Ellerman   jprobes: make jpr...
1289
  		return -EINVAL;
26b31c190   Masami Hiramatsu   kprobes: add (un)...
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
  	for (i = 0; i < num; i++) {
  		unsigned long addr;
  		jp = jps[i];
  		addr = arch_deref_entry_point(jp->entry);
  
  		if (!kernel_text_address(addr))
  			ret = -EINVAL;
  		else {
  			/* Todo: Verify probepoint is a function entry point */
  			jp->kp.pre_handler = setjmp_pre_handler;
  			jp->kp.break_handler = longjmp_break_handler;
49ad2fd76   Masami Hiramatsu   kprobes: remove c...
1301
  			ret = register_kprobe(&jp->kp);
26b31c190   Masami Hiramatsu   kprobes: add (un)...
1302
  		}
67dddaad5   Masami Hiramatsu   kprobes: fix erro...
1303
1304
1305
  		if (ret < 0) {
  			if (i > 0)
  				unregister_jprobes(jps, i);
26b31c190   Masami Hiramatsu   kprobes: add (un)...
1306
1307
1308
1309
1310
  			break;
  		}
  	}
  	return ret;
  }
99081ab55   Masami Hiramatsu   kprobes: move EXP...
1311
  EXPORT_SYMBOL_GPL(register_jprobes);
3d7e33825   Michael Ellerman   jprobes: make jpr...
1312

26b31c190   Masami Hiramatsu   kprobes: add (un)...
1313
1314
  int __kprobes register_jprobe(struct jprobe *jp)
  {
49ad2fd76   Masami Hiramatsu   kprobes: remove c...
1315
  	return register_jprobes(&jp, 1);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1316
  }
99081ab55   Masami Hiramatsu   kprobes: move EXP...
1317
  EXPORT_SYMBOL_GPL(register_jprobe);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1318

d0aaff979   Prasanna S Panchamukhi   [PATCH] Kprobes: ...
1319
  void __kprobes unregister_jprobe(struct jprobe *jp)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1320
  {
26b31c190   Masami Hiramatsu   kprobes: add (un)...
1321
1322
  	unregister_jprobes(&jp, 1);
  }
99081ab55   Masami Hiramatsu   kprobes: move EXP...
1323
  EXPORT_SYMBOL_GPL(unregister_jprobe);
26b31c190   Masami Hiramatsu   kprobes: add (un)...
1324

26b31c190   Masami Hiramatsu   kprobes: add (un)...
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
  void __kprobes unregister_jprobes(struct jprobe **jps, int num)
  {
  	int i;
  
  	if (num <= 0)
  		return;
  	mutex_lock(&kprobe_mutex);
  	for (i = 0; i < num; i++)
  		if (__unregister_kprobe_top(&jps[i]->kp) < 0)
  			jps[i]->kp.addr = NULL;
  	mutex_unlock(&kprobe_mutex);
  
  	synchronize_sched();
  	for (i = 0; i < num; i++) {
  		if (jps[i]->kp.addr)
  			__unregister_kprobe_bottom(&jps[i]->kp);
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1342
  }
99081ab55   Masami Hiramatsu   kprobes: move EXP...
1343
  EXPORT_SYMBOL_GPL(unregister_jprobes);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1344

9edddaa20   Ananth N Mavinakayanahalli   Kprobes: indicate...
1345
  #ifdef CONFIG_KRETPROBES
e65cefe87   Adrian Bunk   [PATCH] kernel/kp...
1346
1347
1348
1349
1350
1351
1352
1353
  /*
   * This kprobe pre_handler is registered with every kretprobe. When probe
   * hits it will set up the return probe.
   */
  static int __kprobes pre_handler_kretprobe(struct kprobe *p,
  					   struct pt_regs *regs)
  {
  	struct kretprobe *rp = container_of(p, struct kretprobe, kp);
ef53d9c5e   Srinivasa D S   kprobes: improve ...
1354
1355
  	unsigned long hash, flags = 0;
  	struct kretprobe_instance *ri;
e65cefe87   Adrian Bunk   [PATCH] kernel/kp...
1356
1357
  
  	/*TODO: consider to only swap the RA after the last pre_handler fired */
ef53d9c5e   Srinivasa D S   kprobes: improve ...
1358
1359
  	hash = hash_ptr(current, KPROBE_HASH_BITS);
  	spin_lock_irqsave(&rp->lock, flags);
4c4308cb9   Christoph Hellwig   kprobes: kretprob...
1360
  	if (!hlist_empty(&rp->free_instances)) {
4c4308cb9   Christoph Hellwig   kprobes: kretprob...
1361
  		ri = hlist_entry(rp->free_instances.first,
ef53d9c5e   Srinivasa D S   kprobes: improve ...
1362
1363
1364
  				struct kretprobe_instance, hlist);
  		hlist_del(&ri->hlist);
  		spin_unlock_irqrestore(&rp->lock, flags);
4c4308cb9   Christoph Hellwig   kprobes: kretprob...
1365
1366
  		ri->rp = rp;
  		ri->task = current;
f47cd9b55   Abhishek Sagar   kprobes: kretprob...
1367

f02b8624f   Ananth N Mavinakayanahalli   kprobes: Fix lock...
1368
  		if (rp->entry_handler && rp->entry_handler(ri, regs))
f47cd9b55   Abhishek Sagar   kprobes: kretprob...
1369
  			return 0;
f47cd9b55   Abhishek Sagar   kprobes: kretprob...
1370

4c4308cb9   Christoph Hellwig   kprobes: kretprob...
1371
1372
1373
  		arch_prepare_kretprobe(ri, regs);
  
  		/* XXX(hch): why is there no hlist_move_head? */
ef53d9c5e   Srinivasa D S   kprobes: improve ...
1374
1375
1376
1377
1378
  		INIT_HLIST_NODE(&ri->hlist);
  		kretprobe_table_lock(hash, &flags);
  		hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
  		kretprobe_table_unlock(hash, &flags);
  	} else {
4c4308cb9   Christoph Hellwig   kprobes: kretprob...
1379
  		rp->nmissed++;
ef53d9c5e   Srinivasa D S   kprobes: improve ...
1380
1381
  		spin_unlock_irqrestore(&rp->lock, flags);
  	}
e65cefe87   Adrian Bunk   [PATCH] kernel/kp...
1382
1383
  	return 0;
  }
49ad2fd76   Masami Hiramatsu   kprobes: remove c...
1384
  int __kprobes register_kretprobe(struct kretprobe *rp)
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
1385
1386
1387
1388
  {
  	int ret = 0;
  	struct kretprobe_instance *inst;
  	int i;
b2a5cd693   Masami Hiramatsu   kprobes: fix a nu...
1389
  	void *addr;
f438d914b   Masami Hiramatsu   kprobes: support ...
1390
1391
  
  	if (kretprobe_blacklist_size) {
b2a5cd693   Masami Hiramatsu   kprobes: fix a nu...
1392
1393
1394
  		addr = kprobe_addr(&rp->kp);
  		if (!addr)
  			return -EINVAL;
f438d914b   Masami Hiramatsu   kprobes: support ...
1395
1396
1397
1398
1399
1400
  
  		for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
  			if (kretprobe_blacklist[i].addr == addr)
  				return -EINVAL;
  		}
  	}
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
1401
1402
  
  	rp->kp.pre_handler = pre_handler_kretprobe;
7522a8423   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
1403
1404
1405
  	rp->kp.post_handler = NULL;
  	rp->kp.fault_handler = NULL;
  	rp->kp.break_handler = NULL;
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
1406
1407
1408
1409
  
  	/* Pre-allocate memory for max kretprobe instances */
  	if (rp->maxactive <= 0) {
  #ifdef CONFIG_PREEMPT
c2ef6661c   Heiko Carstens   kprobes: Fix dist...
1410
  		rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
1411
  #else
4dae560f9   Ananth N Mavinakayanahalli   kprobes: Sanitize...
1412
  		rp->maxactive = num_possible_cpus();
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
1413
1414
  #endif
  	}
ef53d9c5e   Srinivasa D S   kprobes: improve ...
1415
  	spin_lock_init(&rp->lock);
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
1416
1417
  	INIT_HLIST_HEAD(&rp->free_instances);
  	for (i = 0; i < rp->maxactive; i++) {
f47cd9b55   Abhishek Sagar   kprobes: kretprob...
1418
1419
  		inst = kmalloc(sizeof(struct kretprobe_instance) +
  			       rp->data_size, GFP_KERNEL);
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
1420
1421
1422
1423
  		if (inst == NULL) {
  			free_rp_inst(rp);
  			return -ENOMEM;
  		}
ef53d9c5e   Srinivasa D S   kprobes: improve ...
1424
1425
  		INIT_HLIST_NODE(&inst->hlist);
  		hlist_add_head(&inst->hlist, &rp->free_instances);
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
1426
1427
1428
1429
  	}
  
  	rp->nmissed = 0;
  	/* Establish function entry probe point */
49ad2fd76   Masami Hiramatsu   kprobes: remove c...
1430
  	ret = register_kprobe(&rp->kp);
4a296e07c   Masami Hiramatsu   kprobes: add (un)...
1431
  	if (ret != 0)
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
1432
1433
1434
  		free_rp_inst(rp);
  	return ret;
  }
99081ab55   Masami Hiramatsu   kprobes: move EXP...
1435
  EXPORT_SYMBOL_GPL(register_kretprobe);
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
1436

49ad2fd76   Masami Hiramatsu   kprobes: remove c...
1437
  int __kprobes register_kretprobes(struct kretprobe **rps, int num)
4a296e07c   Masami Hiramatsu   kprobes: add (un)...
1438
1439
1440
1441
1442
1443
  {
  	int ret = 0, i;
  
  	if (num <= 0)
  		return -EINVAL;
  	for (i = 0; i < num; i++) {
49ad2fd76   Masami Hiramatsu   kprobes: remove c...
1444
  		ret = register_kretprobe(rps[i]);
67dddaad5   Masami Hiramatsu   kprobes: fix erro...
1445
1446
1447
  		if (ret < 0) {
  			if (i > 0)
  				unregister_kretprobes(rps, i);
4a296e07c   Masami Hiramatsu   kprobes: add (un)...
1448
1449
1450
1451
1452
  			break;
  		}
  	}
  	return ret;
  }
99081ab55   Masami Hiramatsu   kprobes: move EXP...
1453
  EXPORT_SYMBOL_GPL(register_kretprobes);
4a296e07c   Masami Hiramatsu   kprobes: add (un)...
1454

4a296e07c   Masami Hiramatsu   kprobes: add (un)...
1455
1456
1457
1458
  void __kprobes unregister_kretprobe(struct kretprobe *rp)
  {
  	unregister_kretprobes(&rp, 1);
  }
99081ab55   Masami Hiramatsu   kprobes: move EXP...
1459
  EXPORT_SYMBOL_GPL(unregister_kretprobe);
4a296e07c   Masami Hiramatsu   kprobes: add (un)...
1460

4a296e07c   Masami Hiramatsu   kprobes: add (un)...
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
  void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
  {
  	int i;
  
  	if (num <= 0)
  		return;
  	mutex_lock(&kprobe_mutex);
  	for (i = 0; i < num; i++)
  		if (__unregister_kprobe_top(&rps[i]->kp) < 0)
  			rps[i]->kp.addr = NULL;
  	mutex_unlock(&kprobe_mutex);
  
  	synchronize_sched();
  	for (i = 0; i < num; i++) {
  		if (rps[i]->kp.addr) {
  			__unregister_kprobe_bottom(&rps[i]->kp);
  			cleanup_rp_inst(rps[i]);
  		}
  	}
  }
99081ab55   Masami Hiramatsu   kprobes: move EXP...
1481
  EXPORT_SYMBOL_GPL(unregister_kretprobes);
4a296e07c   Masami Hiramatsu   kprobes: add (un)...
1482

9edddaa20   Ananth N Mavinakayanahalli   Kprobes: indicate...
1483
  #else /* CONFIG_KRETPROBES */
d0aaff979   Prasanna S Panchamukhi   [PATCH] Kprobes: ...
1484
  int __kprobes register_kretprobe(struct kretprobe *rp)
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
1485
1486
1487
  {
  	return -ENOSYS;
  }
99081ab55   Masami Hiramatsu   kprobes: move EXP...
1488
  EXPORT_SYMBOL_GPL(register_kretprobe);
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
1489

4a296e07c   Masami Hiramatsu   kprobes: add (un)...
1490
  int __kprobes register_kretprobes(struct kretprobe **rps, int num)
346fd59ba   Srinivasa Ds   [PATCH] kprobes: ...
1491
  {
4a296e07c   Masami Hiramatsu   kprobes: add (un)...
1492
  	return -ENOSYS;
346fd59ba   Srinivasa Ds   [PATCH] kprobes: ...
1493
  }
99081ab55   Masami Hiramatsu   kprobes: move EXP...
1494
  EXPORT_SYMBOL_GPL(register_kretprobes);
d0aaff979   Prasanna S Panchamukhi   [PATCH] Kprobes: ...
1495
  void __kprobes unregister_kretprobe(struct kretprobe *rp)
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
1496
  {
4a296e07c   Masami Hiramatsu   kprobes: add (un)...
1497
  }
99081ab55   Masami Hiramatsu   kprobes: move EXP...
1498
  EXPORT_SYMBOL_GPL(unregister_kretprobe);
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
1499

4a296e07c   Masami Hiramatsu   kprobes: add (un)...
1500
1501
1502
  void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
  {
  }
99081ab55   Masami Hiramatsu   kprobes: move EXP...
1503
  EXPORT_SYMBOL_GPL(unregister_kretprobes);
4c4308cb9   Christoph Hellwig   kprobes: kretprob...
1504

4a296e07c   Masami Hiramatsu   kprobes: add (un)...
1505
1506
1507
1508
  static int __kprobes pre_handler_kretprobe(struct kprobe *p,
  					   struct pt_regs *regs)
  {
  	return 0;
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
1509
  }
4a296e07c   Masami Hiramatsu   kprobes: add (un)...
1510
  #endif /* CONFIG_KRETPROBES */
e8386a0cb   Masami Hiramatsu   kprobes: support ...
1511
1512
1513
1514
  /* Set the kprobe gone and remove its instruction buffer. */
  static void __kprobes kill_kprobe(struct kprobe *p)
  {
  	struct kprobe *kp;
de5bd88d5   Masami Hiramatsu   kprobes: support ...
1515

e8386a0cb   Masami Hiramatsu   kprobes: support ...
1516
  	p->flags |= KPROBE_FLAG_GONE;
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1517
  	if (kprobe_aggrprobe(p)) {
e8386a0cb   Masami Hiramatsu   kprobes: support ...
1518
1519
1520
1521
1522
1523
1524
1525
  		/*
  		 * If this is an aggr_kprobe, we have to list all the
  		 * chained probes and mark them GONE.
  		 */
  		list_for_each_entry_rcu(kp, &p->list, list)
  			kp->flags |= KPROBE_FLAG_GONE;
  		p->post_handler = NULL;
  		p->break_handler = NULL;
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1526
  		kill_optimized_kprobe(p);
e8386a0cb   Masami Hiramatsu   kprobes: support ...
1527
1528
1529
1530
1531
1532
1533
  	}
  	/*
  	 * Here, we can remove insn_slot safely, because no thread calls
  	 * the original probed function (which will be freed soon) any more.
  	 */
  	arch_remove_kprobe(p);
  }
24851d244   Frederic Weisbecker   tracing/kprobes: ...
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
  void __kprobes dump_kprobe(struct kprobe *kp)
  {
  	printk(KERN_WARNING "Dumping kprobe:
  ");
  	printk(KERN_WARNING "Name: %s
  Address: %p
  Offset: %x
  ",
  	       kp->symbol_name, kp->addr, kp->offset);
  }
e8386a0cb   Masami Hiramatsu   kprobes: support ...
1544
1545
1546
1547
1548
1549
1550
1551
1552
  /* Module notifier call back, checking kprobes on the module */
  static int __kprobes kprobes_module_callback(struct notifier_block *nb,
  					     unsigned long val, void *data)
  {
  	struct module *mod = data;
  	struct hlist_head *head;
  	struct hlist_node *node;
  	struct kprobe *p;
  	unsigned int i;
f24659d96   Masami Hiramatsu   kprobes: support ...
1553
  	int checkcore = (val == MODULE_STATE_GOING);
e8386a0cb   Masami Hiramatsu   kprobes: support ...
1554

f24659d96   Masami Hiramatsu   kprobes: support ...
1555
  	if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
e8386a0cb   Masami Hiramatsu   kprobes: support ...
1556
1557
1558
  		return NOTIFY_DONE;
  
  	/*
f24659d96   Masami Hiramatsu   kprobes: support ...
1559
1560
1561
1562
  	 * When MODULE_STATE_GOING was notified, both of module .text and
  	 * .init.text sections would be freed. When MODULE_STATE_LIVE was
  	 * notified, only .init.text section would be freed. We need to
  	 * disable kprobes which have been inserted in the sections.
e8386a0cb   Masami Hiramatsu   kprobes: support ...
1563
1564
1565
1566
1567
  	 */
  	mutex_lock(&kprobe_mutex);
  	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  		head = &kprobe_table[i];
  		hlist_for_each_entry_rcu(p, node, head, hlist)
f24659d96   Masami Hiramatsu   kprobes: support ...
1568
1569
1570
  			if (within_module_init((unsigned long)p->addr, mod) ||
  			    (checkcore &&
  			     within_module_core((unsigned long)p->addr, mod))) {
e8386a0cb   Masami Hiramatsu   kprobes: support ...
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
  				/*
  				 * The vaddr this probe is installed will soon
  				 * be vfreed buy not synced to disk. Hence,
  				 * disarming the breakpoint isn't needed.
  				 */
  				kill_kprobe(p);
  			}
  	}
  	mutex_unlock(&kprobe_mutex);
  	return NOTIFY_DONE;
  }
  
  static struct notifier_block kprobe_module_nb = {
  	.notifier_call = kprobes_module_callback,
  	.priority = 0
  };
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1587
1588
1589
  static int __init init_kprobes(void)
  {
  	int i, err = 0;
3d8d996e0   Srinivasa Ds   kprobes: prevent ...
1590
1591
1592
1593
1594
  	unsigned long offset = 0, size = 0;
  	char *modname, namebuf[128];
  	const char *symbol_name;
  	void *addr;
  	struct kprobe_blackpoint *kb;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1595
1596
1597
  
  	/* FIXME allocate the probe table, currently defined statically */
  	/* initialize all list heads */
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
1598
  	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1599
  		INIT_HLIST_HEAD(&kprobe_table[i]);
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
1600
  		INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
ef53d9c5e   Srinivasa D S   kprobes: improve ...
1601
  		spin_lock_init(&(kretprobe_table_locks[i].lock));
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
1602
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1603

3d8d996e0   Srinivasa Ds   kprobes: prevent ...
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
  	/*
  	 * Lookup and populate the kprobe_blacklist.
  	 *
  	 * Unlike the kretprobe blacklist, we'll need to determine
  	 * the range of addresses that belong to the said functions,
  	 * since a kprobe need not necessarily be at the beginning
  	 * of a function.
  	 */
  	for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
  		kprobe_lookup_name(kb->name, addr);
  		if (!addr)
  			continue;
  
  		kb->start_addr = (unsigned long)addr;
  		symbol_name = kallsyms_lookup(kb->start_addr,
  				&size, &offset, &modname, namebuf);
  		if (!symbol_name)
  			kb->range = 0;
  		else
  			kb->range = size;
  	}
f438d914b   Masami Hiramatsu   kprobes: support ...
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
  	if (kretprobe_blacklist_size) {
  		/* lookup the function address from its name */
  		for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
  			kprobe_lookup_name(kretprobe_blacklist[i].name,
  					   kretprobe_blacklist[i].addr);
  			if (!kretprobe_blacklist[i].addr)
  				printk("kretprobe: lookup failed: %s
  ",
  				       kretprobe_blacklist[i].name);
  		}
  	}
b2be84df9   Masami Hiramatsu   kprobes: Jump opt...
1636
1637
  #if defined(CONFIG_OPTPROBES)
  #if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1638
1639
1640
  	/* Init kprobe_optinsn_slots */
  	kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
  #endif
b2be84df9   Masami Hiramatsu   kprobes: Jump opt...
1641
1642
1643
  	/* By default, kprobes can be optimized */
  	kprobes_allow_optimization = true;
  #endif
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1644

e579abeb5   Masami Hiramatsu   kprobes: rename k...
1645
1646
  	/* By default, kprobes are armed */
  	kprobes_all_disarmed = false;
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1647

6772926be   Rusty Lynch   [PATCH] kprobes: ...
1648
  	err = arch_init_kprobes();
802eae7c8   Rusty Lynch   [PATCH] Return pr...
1649
1650
  	if (!err)
  		err = register_die_notifier(&kprobe_exceptions_nb);
e8386a0cb   Masami Hiramatsu   kprobes: support ...
1651
1652
  	if (!err)
  		err = register_module_notifier(&kprobe_module_nb);
ef53d9c5e   Srinivasa D S   kprobes: improve ...
1653
  	kprobes_initialized = (err == 0);
802eae7c8   Rusty Lynch   [PATCH] Return pr...
1654

8c1c93564   Ananth N Mavinakayanahalli   x86: kprobes: add...
1655
1656
  	if (!err)
  		init_test_probes();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1657
1658
  	return err;
  }
346fd59ba   Srinivasa Ds   [PATCH] kprobes: ...
1659
1660
  #ifdef CONFIG_DEBUG_FS
  static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1661
  		const char *sym, int offset, char *modname, struct kprobe *pp)
346fd59ba   Srinivasa Ds   [PATCH] kprobes: ...
1662
1663
1664
1665
1666
1667
1668
1669
1670
  {
  	char *kprobe_type;
  
  	if (p->pre_handler == pre_handler_kretprobe)
  		kprobe_type = "r";
  	else if (p->pre_handler == setjmp_pre_handler)
  		kprobe_type = "j";
  	else
  		kprobe_type = "k";
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1671

346fd59ba   Srinivasa Ds   [PATCH] kprobes: ...
1672
  	if (sym)
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1673
  		seq_printf(pi, "%p  %s  %s+0x%x  %s ",
de5bd88d5   Masami Hiramatsu   kprobes: support ...
1674
  			p->addr, kprobe_type, sym, offset,
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1675
  			(modname ? modname : " "));
346fd59ba   Srinivasa Ds   [PATCH] kprobes: ...
1676
  	else
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
  		seq_printf(pi, "%p  %s  %p ",
  			p->addr, kprobe_type, p->addr);
  
  	if (!pp)
  		pp = p;
  	seq_printf(pi, "%s%s%s
  ",
  		(kprobe_gone(p) ? "[GONE]" : ""),
  		((kprobe_disabled(p) && !kprobe_gone(p)) ?  "[DISABLED]" : ""),
  		(kprobe_optimized(pp) ? "[OPTIMIZED]" : ""));
346fd59ba   Srinivasa Ds   [PATCH] kprobes: ...
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
  }
  
  static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
  {
  	return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
  }
  
  static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
  {
  	(*pos)++;
  	if (*pos >= KPROBE_TABLE_SIZE)
  		return NULL;
  	return pos;
  }
  
  static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
  {
  	/* Nothing to do */
  }
  
  static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
  {
  	struct hlist_head *head;
  	struct hlist_node *node;
  	struct kprobe *p, *kp;
  	const char *sym = NULL;
  	unsigned int i = *(loff_t *) v;
ffb451227   Alexey Dobriyan   Simplify kallsyms...
1714
  	unsigned long offset = 0;
346fd59ba   Srinivasa Ds   [PATCH] kprobes: ...
1715
1716
1717
1718
1719
  	char *modname, namebuf[128];
  
  	head = &kprobe_table[i];
  	preempt_disable();
  	hlist_for_each_entry_rcu(p, node, head, hlist) {
ffb451227   Alexey Dobriyan   Simplify kallsyms...
1720
  		sym = kallsyms_lookup((unsigned long)p->addr, NULL,
346fd59ba   Srinivasa Ds   [PATCH] kprobes: ...
1721
  					&offset, &modname, namebuf);
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1722
  		if (kprobe_aggrprobe(p)) {
346fd59ba   Srinivasa Ds   [PATCH] kprobes: ...
1723
  			list_for_each_entry_rcu(kp, &p->list, list)
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1724
  				report_probe(pi, kp, sym, offset, modname, p);
346fd59ba   Srinivasa Ds   [PATCH] kprobes: ...
1725
  		} else
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1726
  			report_probe(pi, p, sym, offset, modname, NULL);
346fd59ba   Srinivasa Ds   [PATCH] kprobes: ...
1727
1728
1729
1730
  	}
  	preempt_enable();
  	return 0;
  }
88e9d34c7   James Morris   seq_file: constif...
1731
  static const struct seq_operations kprobes_seq_ops = {
346fd59ba   Srinivasa Ds   [PATCH] kprobes: ...
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
  	.start = kprobe_seq_start,
  	.next  = kprobe_seq_next,
  	.stop  = kprobe_seq_stop,
  	.show  = show_kprobe_addr
  };
  
  static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
  {
  	return seq_open(filp, &kprobes_seq_ops);
  }
828c09509   Alexey Dobriyan   const: constify r...
1742
  static const struct file_operations debugfs_kprobes_operations = {
346fd59ba   Srinivasa Ds   [PATCH] kprobes: ...
1743
1744
1745
1746
1747
  	.open           = kprobes_open,
  	.read           = seq_read,
  	.llseek         = seq_lseek,
  	.release        = seq_release,
  };
de5bd88d5   Masami Hiramatsu   kprobes: support ...
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
  /* Disable one kprobe */
  int __kprobes disable_kprobe(struct kprobe *kp)
  {
  	int ret = 0;
  	struct kprobe *p;
  
  	mutex_lock(&kprobe_mutex);
  
  	/* Check whether specified probe is valid. */
  	p = __get_valid_kprobe(kp);
  	if (unlikely(p == NULL)) {
  		ret = -EINVAL;
  		goto out;
  	}
  
  	/* If the probe is already disabled (or gone), just return */
  	if (kprobe_disabled(kp))
  		goto out;
  
  	kp->flags |= KPROBE_FLAG_DISABLED;
  	if (p != kp)
  		/* When kp != p, p is always enabled. */
  		try_to_disable_aggr_kprobe(p);
  
  	if (!kprobes_all_disarmed && kprobe_disabled(p))
201517a7f   Masami Hiramatsu   kprobes: fix to u...
1773
  		disarm_kprobe(p);
de5bd88d5   Masami Hiramatsu   kprobes: support ...
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
  out:
  	mutex_unlock(&kprobe_mutex);
  	return ret;
  }
  EXPORT_SYMBOL_GPL(disable_kprobe);
  
  /* Enable one kprobe */
  int __kprobes enable_kprobe(struct kprobe *kp)
  {
  	int ret = 0;
  	struct kprobe *p;
  
  	mutex_lock(&kprobe_mutex);
  
  	/* Check whether specified probe is valid. */
  	p = __get_valid_kprobe(kp);
  	if (unlikely(p == NULL)) {
  		ret = -EINVAL;
  		goto out;
  	}
  
  	if (kprobe_gone(kp)) {
  		/* This kprobe has gone, we couldn't enable it. */
  		ret = -EINVAL;
  		goto out;
  	}
de5bd88d5   Masami Hiramatsu   kprobes: support ...
1800
1801
  	if (p != kp)
  		kp->flags &= ~KPROBE_FLAG_DISABLED;
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1802
1803
1804
1805
1806
  
  	if (!kprobes_all_disarmed && kprobe_disabled(p)) {
  		p->flags &= ~KPROBE_FLAG_DISABLED;
  		arm_kprobe(p);
  	}
de5bd88d5   Masami Hiramatsu   kprobes: support ...
1807
1808
1809
1810
1811
  out:
  	mutex_unlock(&kprobe_mutex);
  	return ret;
  }
  EXPORT_SYMBOL_GPL(enable_kprobe);
e579abeb5   Masami Hiramatsu   kprobes: rename k...
1812
  static void __kprobes arm_all_kprobes(void)
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1813
1814
1815
1816
1817
1818
1819
  {
  	struct hlist_head *head;
  	struct hlist_node *node;
  	struct kprobe *p;
  	unsigned int i;
  
  	mutex_lock(&kprobe_mutex);
e579abeb5   Masami Hiramatsu   kprobes: rename k...
1820
1821
  	/* If kprobes are armed, just return */
  	if (!kprobes_all_disarmed)
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1822
  		goto already_enabled;
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1823
  	/* Arming kprobes doesn't optimize kprobe itself */
4460fdad8   Mathieu Desnoyers   tracing, Text Edi...
1824
  	mutex_lock(&text_mutex);
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1825
1826
1827
  	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  		head = &kprobe_table[i];
  		hlist_for_each_entry_rcu(p, node, head, hlist)
de5bd88d5   Masami Hiramatsu   kprobes: support ...
1828
  			if (!kprobe_disabled(p))
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1829
  				__arm_kprobe(p);
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1830
  	}
4460fdad8   Mathieu Desnoyers   tracing, Text Edi...
1831
  	mutex_unlock(&text_mutex);
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1832

e579abeb5   Masami Hiramatsu   kprobes: rename k...
1833
  	kprobes_all_disarmed = false;
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1834
1835
1836
1837
1838
1839
1840
  	printk(KERN_INFO "Kprobes globally enabled
  ");
  
  already_enabled:
  	mutex_unlock(&kprobe_mutex);
  	return;
  }
e579abeb5   Masami Hiramatsu   kprobes: rename k...
1841
  static void __kprobes disarm_all_kprobes(void)
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1842
1843
1844
1845
1846
1847
1848
  {
  	struct hlist_head *head;
  	struct hlist_node *node;
  	struct kprobe *p;
  	unsigned int i;
  
  	mutex_lock(&kprobe_mutex);
e579abeb5   Masami Hiramatsu   kprobes: rename k...
1849
1850
  	/* If kprobes are already disarmed, just return */
  	if (kprobes_all_disarmed)
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1851
  		goto already_disabled;
e579abeb5   Masami Hiramatsu   kprobes: rename k...
1852
  	kprobes_all_disarmed = true;
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1853
1854
  	printk(KERN_INFO "Kprobes globally disabled
  ");
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1855
1856
1857
1858
1859
1860
  
  	/*
  	 * Here we call get_online_cpus() for avoiding text_mutex deadlock,
  	 * because disarming may also unoptimize kprobes.
  	 */
  	get_online_cpus();
4460fdad8   Mathieu Desnoyers   tracing, Text Edi...
1861
  	mutex_lock(&text_mutex);
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1862
1863
1864
  	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  		head = &kprobe_table[i];
  		hlist_for_each_entry_rcu(p, node, head, hlist) {
de5bd88d5   Masami Hiramatsu   kprobes: support ...
1865
  			if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1866
  				__disarm_kprobe(p);
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1867
1868
  		}
  	}
4460fdad8   Mathieu Desnoyers   tracing, Text Edi...
1869
  	mutex_unlock(&text_mutex);
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1870
  	put_online_cpus();
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1871
1872
1873
  	mutex_unlock(&kprobe_mutex);
  	/* Allow all currently running kprobes to complete */
  	synchronize_sched();
74a0b5762   Christoph Hellwig   x86: optimize pag...
1874
  	return;
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
  
  already_disabled:
  	mutex_unlock(&kprobe_mutex);
  	return;
  }
  
  /*
   * XXX: The debugfs bool file interface doesn't allow for callbacks
   * when the bool state is switched. We can reuse that facility when
   * available
   */
  static ssize_t read_enabled_file_bool(struct file *file,
  	       char __user *user_buf, size_t count, loff_t *ppos)
  {
  	char buf[3];
e579abeb5   Masami Hiramatsu   kprobes: rename k...
1890
  	if (!kprobes_all_disarmed)
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
  		buf[0] = '1';
  	else
  		buf[0] = '0';
  	buf[1] = '
  ';
  	buf[2] = 0x00;
  	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
  }
  
  static ssize_t write_enabled_file_bool(struct file *file,
  	       const char __user *user_buf, size_t count, loff_t *ppos)
  {
  	char buf[32];
  	int buf_size;
  
  	buf_size = min(count, (sizeof(buf)-1));
  	if (copy_from_user(buf, user_buf, buf_size))
  		return -EFAULT;
  
  	switch (buf[0]) {
  	case 'y':
  	case 'Y':
  	case '1':
e579abeb5   Masami Hiramatsu   kprobes: rename k...
1914
  		arm_all_kprobes();
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1915
1916
1917
1918
  		break;
  	case 'n':
  	case 'N':
  	case '0':
e579abeb5   Masami Hiramatsu   kprobes: rename k...
1919
  		disarm_all_kprobes();
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1920
1921
1922
1923
1924
  		break;
  	}
  
  	return count;
  }
828c09509   Alexey Dobriyan   const: constify r...
1925
  static const struct file_operations fops_kp = {
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1926
1927
1928
  	.read =         read_enabled_file_bool,
  	.write =        write_enabled_file_bool,
  };
346fd59ba   Srinivasa Ds   [PATCH] kprobes: ...
1929
1930
1931
  static int __kprobes debugfs_kprobe_init(void)
  {
  	struct dentry *dir, *file;
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1932
  	unsigned int value = 1;
346fd59ba   Srinivasa Ds   [PATCH] kprobes: ...
1933
1934
1935
1936
  
  	dir = debugfs_create_dir("kprobes", NULL);
  	if (!dir)
  		return -ENOMEM;
e38697929   Randy Dunlap   kprobes: fix spar...
1937
  	file = debugfs_create_file("list", 0444, dir, NULL,
346fd59ba   Srinivasa Ds   [PATCH] kprobes: ...
1938
1939
1940
1941
1942
  				&debugfs_kprobes_operations);
  	if (!file) {
  		debugfs_remove(dir);
  		return -ENOMEM;
  	}
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1943
1944
1945
1946
1947
1948
  	file = debugfs_create_file("enabled", 0600, dir,
  					&value, &fops_kp);
  	if (!file) {
  		debugfs_remove(dir);
  		return -ENOMEM;
  	}
346fd59ba   Srinivasa Ds   [PATCH] kprobes: ...
1949
1950
1951
1952
1953
1954
1955
  	return 0;
  }
  
  late_initcall(debugfs_kprobe_init);
  #endif /* CONFIG_DEBUG_FS */
  
  module_init(init_kprobes);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1956

99081ab55   Masami Hiramatsu   kprobes: move EXP...
1957
  /* defined in arch/.../kernel/kprobes.c */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1958
  EXPORT_SYMBOL_GPL(jprobe_return);