Blame view

kernel/kprobes.c 49.9 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
  /*
   *  Kernel Probes (KProbes)
   *  kernel/kprobes.c
   *
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License as published by
   * the Free Software Foundation; either version 2 of the License, or
   * (at your option) any later version.
   *
   * This program is distributed in the hope that it will be useful,
   * but WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   * GNU General Public License for more details.
   *
   * You should have received a copy of the GNU General Public License
   * along with this program; if not, write to the Free Software
   * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
   *
   * Copyright (C) IBM Corporation, 2002, 2004
   *
   * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
   *		Probes initial implementation (includes suggestions from
   *		Rusty Russell).
   * 2004-Aug	Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
   *		hlists and exceptions notifier as suggested by Andi Kleen.
   * 2004-July	Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
   *		interface to access function arguments.
   * 2004-Sep	Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
   *		exceptions notifier to be first on the priority list.
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
30
31
32
   * 2005-May	Hien Nguyen <hien@us.ibm.com>, Jim Keniston
   *		<jkenisto@us.ibm.com> and Prasanna S Panchamukhi
   *		<prasanna@in.ibm.com> added function-return probes.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
33
34
   */
  #include <linux/kprobes.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
35
36
  #include <linux/hash.h>
  #include <linux/init.h>
4e57b6817   Tim Schmielau   [PATCH] fix missi...
37
  #include <linux/slab.h>
e38697929   Randy Dunlap   kprobes: fix spar...
38
  #include <linux/stddef.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
39
  #include <linux/module.h>
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
40
  #include <linux/moduleloader.h>
3a872d89b   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
41
  #include <linux/kallsyms.h>
b4c6c34a5   Masami Hiramatsu   [PATCH] kprobes: ...
42
  #include <linux/freezer.h>
346fd59ba   Srinivasa Ds   [PATCH] kprobes: ...
43
44
  #include <linux/seq_file.h>
  #include <linux/debugfs.h>
b2be84df9   Masami Hiramatsu   kprobes: Jump opt...
45
  #include <linux/sysctl.h>
1eeb66a1b   Christoph Hellwig   move die notifier...
46
  #include <linux/kdebug.h>
4460fdad8   Mathieu Desnoyers   tracing, Text Edi...
47
  #include <linux/memory.h>
4554dbcb8   Masami Hiramatsu   kprobes: Check pr...
48
  #include <linux/ftrace.h>
afd66255b   Masami Hiramatsu   kprobes: Introduc...
49
  #include <linux/cpu.h>
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
50

d0aaff979   Prasanna S Panchamukhi   [PATCH] Kprobes: ...
51
  #include <asm-generic/sections.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
52
53
  #include <asm/cacheflush.h>
  #include <asm/errno.h>
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
54
  #include <asm/uaccess.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
55
56
57
  
  #define KPROBE_HASH_BITS 6
  #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
3a872d89b   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
58
59
60
61
62
63
64
65
66
  
  /*
   * Some oddball architectures like 64bit powerpc have function descriptors
   * so this must be overridable.
   */
  #ifndef kprobe_lookup_name
  #define kprobe_lookup_name(name, addr) \
  	addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
  #endif
ef53d9c5e   Srinivasa D S   kprobes: improve ...
67
  static int kprobes_initialized;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
68
  static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
69
  static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
70

bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
71
  /* NOTE: change this value only with kprobe_mutex held */
e579abeb5   Masami Hiramatsu   kprobes: rename k...
72
  static bool kprobes_all_disarmed;
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
73

129415607   Masami Hiramatsu   kprobes: add kpro...
74
  static DEFINE_MUTEX(kprobe_mutex);	/* Protects kprobe_table */
e65845235   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
75
  static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
ef53d9c5e   Srinivasa D S   kprobes: improve ...
76
  static struct {
7e036d040   Andrew Morton   kernel/kprobes.c:...
77
  	spinlock_t lock ____cacheline_aligned_in_smp;
ef53d9c5e   Srinivasa D S   kprobes: improve ...
78
79
80
81
82
83
  } kretprobe_table_locks[KPROBE_TABLE_SIZE];
  
  static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
  {
  	return &(kretprobe_table_locks[hash].lock);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
84

3d8d996e0   Srinivasa Ds   kprobes: prevent ...
85
86
87
88
89
90
91
  /*
   * Normally, functions that we'd want to prohibit kprobes in, are marked
   * __kprobes. But, there are cases where such functions already belong to
   * a different section (__sched for preempt_schedule)
   *
   * For such cases, we now have a blacklist
   */
544304b20   Daniel Guilak   kernel/kprobes.c:...
92
  static struct kprobe_blackpoint kprobe_blacklist[] = {
3d8d996e0   Srinivasa Ds   kprobes: prevent ...
93
  	{"preempt_schedule",},
65e234ec2   Masami Hiramatsu   kprobes: Prohibit...
94
  	{"native_get_debugreg",},
a00e817f4   Masami Hiramatsu   kprobes/x86-32: M...
95
96
  	{"irq_entries_start",},
  	{"common_interrupt",},
5ecaafdbf   Masami Hiramatsu   kprobes: Add mcou...
97
  	{"mcount",},	/* mcount can be called from everywhere */
3d8d996e0   Srinivasa Ds   kprobes: prevent ...
98
99
  	{NULL}    /* Terminator */
  };
2d14e39da   Anil S Keshavamurthy   [PATCH] kprobes: ...
100
  #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
101
102
103
104
105
106
  /*
   * kprobe->ainsn.insn points to the copy of the instruction to be
   * single-stepped. x86_64, POWER4 and above have no-exec support and
   * stepping on the instruction on a vmalloced/kmalloced/data page
   * is a recipe for disaster
   */
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
107
  struct kprobe_insn_page {
c5cb5a2d8   Masami Hiramatsu   kprobes: Clean up...
108
  	struct list_head list;
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
109
  	kprobe_opcode_t *insns;		/* Page of instruction slots */
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
110
  	int nused;
b4c6c34a5   Masami Hiramatsu   [PATCH] kprobes: ...
111
  	int ngarbage;
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
112
  	char slot_used[];
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
113
  };
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
114
115
116
117
118
119
120
121
122
123
124
125
126
127
  #define KPROBE_INSN_PAGE_SIZE(slots)			\
  	(offsetof(struct kprobe_insn_page, slot_used) +	\
  	 (sizeof(char) * (slots)))
  
  struct kprobe_insn_cache {
  	struct list_head pages;	/* list of kprobe_insn_page */
  	size_t insn_size;	/* size of instruction slot */
  	int nr_garbage;
  };
  
  static int slots_per_page(struct kprobe_insn_cache *c)
  {
  	return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
  }
ab40c5c6b   Masami Hiramatsu   [PATCH] kprobes: ...
128
129
130
131
132
  enum kprobe_slot_state {
  	SLOT_CLEAN = 0,
  	SLOT_DIRTY = 1,
  	SLOT_USED = 2,
  };
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
133
134
135
136
137
138
139
  static DEFINE_MUTEX(kprobe_insn_mutex);	/* Protects kprobe_insn_slots */
  static struct kprobe_insn_cache kprobe_insn_slots = {
  	.pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
  	.insn_size = MAX_INSN_SIZE,
  	.nr_garbage = 0,
  };
  static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c);
b4c6c34a5   Masami Hiramatsu   [PATCH] kprobes: ...
140

9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
141
  /**
129415607   Masami Hiramatsu   kprobes: add kpro...
142
   * __get_insn_slot() - Find a slot on an executable page for an instruction.
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
143
144
   * We allocate an executable page if there's no room on existing ones.
   */
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
145
  static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
146
147
  {
  	struct kprobe_insn_page *kip;
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
148

6f716acd5   Christoph Hellwig   kprobes: codingst...
149
   retry:
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
150
151
  	list_for_each_entry(kip, &c->pages, list) {
  		if (kip->nused < slots_per_page(c)) {
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
152
  			int i;
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
153
  			for (i = 0; i < slots_per_page(c); i++) {
ab40c5c6b   Masami Hiramatsu   [PATCH] kprobes: ...
154
155
  				if (kip->slot_used[i] == SLOT_CLEAN) {
  					kip->slot_used[i] = SLOT_USED;
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
156
  					kip->nused++;
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
157
  					return kip->insns + (i * c->insn_size);
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
158
159
  				}
  			}
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
160
161
162
  			/* kip->nused is broken. Fix it. */
  			kip->nused = slots_per_page(c);
  			WARN_ON(1);
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
163
164
  		}
  	}
b4c6c34a5   Masami Hiramatsu   [PATCH] kprobes: ...
165
  	/* If there are any garbage slots, collect it and try again. */
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
166
  	if (c->nr_garbage && collect_garbage_slots(c) == 0)
b4c6c34a5   Masami Hiramatsu   [PATCH] kprobes: ...
167
  		goto retry;
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
168
169
170
  
  	/* All out of space.  Need to allocate a new page. */
  	kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
6f716acd5   Christoph Hellwig   kprobes: codingst...
171
  	if (!kip)
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
172
  		return NULL;
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
173
174
175
176
177
178
179
180
181
182
183
  
  	/*
  	 * Use module_alloc so this page is within +/- 2GB of where the
  	 * kernel image and loaded module images reside. This is required
  	 * so x86_64 can correctly handle the %rip-relative fixups.
  	 */
  	kip->insns = module_alloc(PAGE_SIZE);
  	if (!kip->insns) {
  		kfree(kip);
  		return NULL;
  	}
c5cb5a2d8   Masami Hiramatsu   kprobes: Clean up...
184
  	INIT_LIST_HEAD(&kip->list);
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
185
  	memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
ab40c5c6b   Masami Hiramatsu   [PATCH] kprobes: ...
186
  	kip->slot_used[0] = SLOT_USED;
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
187
  	kip->nused = 1;
b4c6c34a5   Masami Hiramatsu   [PATCH] kprobes: ...
188
  	kip->ngarbage = 0;
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
189
  	list_add(&kip->list, &c->pages);
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
190
191
  	return kip->insns;
  }
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
192

129415607   Masami Hiramatsu   kprobes: add kpro...
193
194
  kprobe_opcode_t __kprobes *get_insn_slot(void)
  {
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
195
  	kprobe_opcode_t *ret = NULL;
129415607   Masami Hiramatsu   kprobes: add kpro...
196
  	mutex_lock(&kprobe_insn_mutex);
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
197
  	ret = __get_insn_slot(&kprobe_insn_slots);
129415607   Masami Hiramatsu   kprobes: add kpro...
198
  	mutex_unlock(&kprobe_insn_mutex);
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
199

129415607   Masami Hiramatsu   kprobes: add kpro...
200
201
  	return ret;
  }
b4c6c34a5   Masami Hiramatsu   [PATCH] kprobes: ...
202
203
204
  /* Return 1 if all garbages are collected, otherwise 0. */
  static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
  {
ab40c5c6b   Masami Hiramatsu   [PATCH] kprobes: ...
205
  	kip->slot_used[idx] = SLOT_CLEAN;
b4c6c34a5   Masami Hiramatsu   [PATCH] kprobes: ...
206
207
208
209
210
211
212
213
  	kip->nused--;
  	if (kip->nused == 0) {
  		/*
  		 * Page is no longer in use.  Free it unless
  		 * it's the last one.  We keep the last one
  		 * so as not to have to set it up again the
  		 * next time somebody inserts a probe.
  		 */
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
214
  		if (!list_is_singular(&kip->list)) {
c5cb5a2d8   Masami Hiramatsu   kprobes: Clean up...
215
  			list_del(&kip->list);
b4c6c34a5   Masami Hiramatsu   [PATCH] kprobes: ...
216
217
218
219
220
221
222
  			module_free(NULL, kip->insns);
  			kfree(kip);
  		}
  		return 1;
  	}
  	return 0;
  }
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
223
  static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c)
b4c6c34a5   Masami Hiramatsu   [PATCH] kprobes: ...
224
  {
c5cb5a2d8   Masami Hiramatsu   kprobes: Clean up...
225
  	struct kprobe_insn_page *kip, *next;
b4c6c34a5   Masami Hiramatsu   [PATCH] kprobes: ...
226

615d0ebbc   Masami Hiramatsu   kprobes: Disable ...
227
228
  	/* Ensure no-one is interrupted on the garbages */
  	synchronize_sched();
b4c6c34a5   Masami Hiramatsu   [PATCH] kprobes: ...
229

4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
230
  	list_for_each_entry_safe(kip, next, &c->pages, list) {
b4c6c34a5   Masami Hiramatsu   [PATCH] kprobes: ...
231
  		int i;
b4c6c34a5   Masami Hiramatsu   [PATCH] kprobes: ...
232
233
234
  		if (kip->ngarbage == 0)
  			continue;
  		kip->ngarbage = 0;	/* we will collect all garbages */
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
235
  		for (i = 0; i < slots_per_page(c); i++) {
ab40c5c6b   Masami Hiramatsu   [PATCH] kprobes: ...
236
  			if (kip->slot_used[i] == SLOT_DIRTY &&
b4c6c34a5   Masami Hiramatsu   [PATCH] kprobes: ...
237
238
239
240
  			    collect_one_slot(kip, i))
  				break;
  		}
  	}
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
241
  	c->nr_garbage = 0;
b4c6c34a5   Masami Hiramatsu   [PATCH] kprobes: ...
242
243
  	return 0;
  }
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
244
245
  static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
  				       kprobe_opcode_t *slot, int dirty)
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
246
247
  {
  	struct kprobe_insn_page *kip;
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
248

4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
249
250
251
252
  	list_for_each_entry(kip, &c->pages, list) {
  		long idx = ((long)slot - (long)kip->insns) / c->insn_size;
  		if (idx >= 0 && idx < slots_per_page(c)) {
  			WARN_ON(kip->slot_used[idx] != SLOT_USED);
b4c6c34a5   Masami Hiramatsu   [PATCH] kprobes: ...
253
  			if (dirty) {
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
254
  				kip->slot_used[idx] = SLOT_DIRTY;
b4c6c34a5   Masami Hiramatsu   [PATCH] kprobes: ...
255
  				kip->ngarbage++;
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
256
257
  				if (++c->nr_garbage > slots_per_page(c))
  					collect_garbage_slots(c);
c5cb5a2d8   Masami Hiramatsu   kprobes: Clean up...
258
  			} else
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
259
260
  				collect_one_slot(kip, idx);
  			return;
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
261
262
  		}
  	}
4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
263
264
265
  	/* Could not free this slot. */
  	WARN_ON(1);
  }
6f716acd5   Christoph Hellwig   kprobes: codingst...
266

4610ee1d3   Masami Hiramatsu   kprobes: Introduc...
267
268
269
270
  void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
  {
  	mutex_lock(&kprobe_insn_mutex);
  	__free_insn_slot(&kprobe_insn_slots, slot, dirty);
129415607   Masami Hiramatsu   kprobes: add kpro...
271
  	mutex_unlock(&kprobe_insn_mutex);
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
272
  }
afd66255b   Masami Hiramatsu   kprobes: Introduc...
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
  #ifdef CONFIG_OPTPROBES
  /* For optimized_kprobe buffer */
  static DEFINE_MUTEX(kprobe_optinsn_mutex); /* Protects kprobe_optinsn_slots */
  static struct kprobe_insn_cache kprobe_optinsn_slots = {
  	.pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
  	/* .insn_size is initialized later */
  	.nr_garbage = 0,
  };
  /* Get a slot for optimized_kprobe buffer */
  kprobe_opcode_t __kprobes *get_optinsn_slot(void)
  {
  	kprobe_opcode_t *ret = NULL;
  
  	mutex_lock(&kprobe_optinsn_mutex);
  	ret = __get_insn_slot(&kprobe_optinsn_slots);
  	mutex_unlock(&kprobe_optinsn_mutex);
  
  	return ret;
  }
  
  void __kprobes free_optinsn_slot(kprobe_opcode_t * slot, int dirty)
  {
  	mutex_lock(&kprobe_optinsn_mutex);
  	__free_insn_slot(&kprobe_optinsn_slots, slot, dirty);
  	mutex_unlock(&kprobe_optinsn_mutex);
  }
  #endif
2d14e39da   Anil S Keshavamurthy   [PATCH] kprobes: ...
300
  #endif
9ec4b1f35   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
301

e65845235   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
302
303
304
305
306
307
308
309
310
311
  /* We have preemption disabled.. so it is safe to use __ versions */
  static inline void set_kprobe_instance(struct kprobe *kp)
  {
  	__get_cpu_var(kprobe_instance) = kp;
  }
  
  static inline void reset_kprobe_instance(void)
  {
  	__get_cpu_var(kprobe_instance) = NULL;
  }
3516a4604   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
312
313
  /*
   * This routine is called either:
49a2a1b83   Anil S Keshavamurthy   [PATCH] kprobes: ...
314
   * 	- under the kprobe_mutex - during kprobe_[un]register()
3516a4604   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
315
   * 				OR
d217d5450   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
316
   * 	- with preemption disabled - from arch/xxx/kernel/kprobes.c
3516a4604   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
317
   */
d0aaff979   Prasanna S Panchamukhi   [PATCH] Kprobes: ...
318
  struct kprobe __kprobes *get_kprobe(void *addr)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
319
320
321
  {
  	struct hlist_head *head;
  	struct hlist_node *node;
3516a4604   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
322
  	struct kprobe *p;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
323
324
  
  	head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
3516a4604   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
325
  	hlist_for_each_entry_rcu(p, node, head, hlist) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
326
327
328
  		if (p->addr == addr)
  			return p;
  	}
afd66255b   Masami Hiramatsu   kprobes: Introduc...
329

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
330
331
  	return NULL;
  }
afd66255b   Masami Hiramatsu   kprobes: Introduc...
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
  static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
  
  /* Return true if the kprobe is an aggregator */
  static inline int kprobe_aggrprobe(struct kprobe *p)
  {
  	return p->pre_handler == aggr_pre_handler;
  }
  
  /*
   * Keep all fields in the kprobe consistent
   */
  static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
  {
  	memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
  	memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
  }
  
  #ifdef CONFIG_OPTPROBES
b2be84df9   Masami Hiramatsu   kprobes: Jump opt...
350
351
  /* NOTE: change this value only with kprobe_mutex held */
  static bool kprobes_allow_optimization;
afd66255b   Masami Hiramatsu   kprobes: Introduc...
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
  /*
   * Call all pre_handler on the list, but ignores its return value.
   * This must be called from arch-dep optimized caller.
   */
  void __kprobes opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
  {
  	struct kprobe *kp;
  
  	list_for_each_entry_rcu(kp, &p->list, list) {
  		if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
  			set_kprobe_instance(kp);
  			kp->pre_handler(kp, regs);
  		}
  		reset_kprobe_instance();
  	}
  }
  
  /* Return true(!0) if the kprobe is ready for optimization. */
  static inline int kprobe_optready(struct kprobe *p)
  {
  	struct optimized_kprobe *op;
  
  	if (kprobe_aggrprobe(p)) {
  		op = container_of(p, struct optimized_kprobe, kp);
  		return arch_prepared_optinsn(&op->optinsn);
  	}
  
  	return 0;
  }
  
  /*
   * Return an optimized kprobe whose optimizing code replaces
   * instructions including addr (exclude breakpoint).
   */
  struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr)
  {
  	int i;
  	struct kprobe *p = NULL;
  	struct optimized_kprobe *op;
  
  	/* Don't check i == 0, since that is a breakpoint case. */
  	for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++)
  		p = get_kprobe((void *)(addr - i));
  
  	if (p && kprobe_optready(p)) {
  		op = container_of(p, struct optimized_kprobe, kp);
  		if (arch_within_optimized_kprobe(op, addr))
  			return p;
  	}
  
  	return NULL;
  }
  
  /* Optimization staging list, protected by kprobe_mutex */
  static LIST_HEAD(optimizing_list);
  
  static void kprobe_optimizer(struct work_struct *work);
  static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
  #define OPTIMIZE_DELAY 5
  
  /* Kprobe jump optimizer */
  static __kprobes void kprobe_optimizer(struct work_struct *work)
  {
  	struct optimized_kprobe *op, *tmp;
  
  	/* Lock modules while optimizing kprobes */
  	mutex_lock(&module_mutex);
  	mutex_lock(&kprobe_mutex);
b2be84df9   Masami Hiramatsu   kprobes: Jump opt...
420
  	if (kprobes_all_disarmed || !kprobes_allow_optimization)
afd66255b   Masami Hiramatsu   kprobes: Introduc...
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
  		goto end;
  
  	/*
  	 * Wait for quiesence period to ensure all running interrupts
  	 * are done. Because optprobe may modify multiple instructions
  	 * there is a chance that Nth instruction is interrupted. In that
  	 * case, running interrupt can return to 2nd-Nth byte of jump
  	 * instruction. This wait is for avoiding it.
  	 */
  	synchronize_sched();
  
  	/*
  	 * The optimization/unoptimization refers online_cpus via
  	 * stop_machine() and cpu-hotplug modifies online_cpus.
  	 * And same time, text_mutex will be held in cpu-hotplug and here.
  	 * This combination can cause a deadlock (cpu-hotplug try to lock
  	 * text_mutex but stop_machine can not be done because online_cpus
  	 * has been changed)
  	 * To avoid this deadlock, we need to call get_online_cpus()
  	 * for preventing cpu-hotplug outside of text_mutex locking.
  	 */
  	get_online_cpus();
  	mutex_lock(&text_mutex);
  	list_for_each_entry_safe(op, tmp, &optimizing_list, list) {
  		WARN_ON(kprobe_disabled(&op->kp));
  		if (arch_optimize_kprobe(op) < 0)
  			op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
  		list_del_init(&op->list);
  	}
  	mutex_unlock(&text_mutex);
  	put_online_cpus();
  end:
  	mutex_unlock(&kprobe_mutex);
  	mutex_unlock(&module_mutex);
  }
  
  /* Optimize kprobe if p is ready to be optimized */
  static __kprobes void optimize_kprobe(struct kprobe *p)
  {
  	struct optimized_kprobe *op;
  
  	/* Check if the kprobe is disabled or not ready for optimization. */
b2be84df9   Masami Hiramatsu   kprobes: Jump opt...
463
  	if (!kprobe_optready(p) || !kprobes_allow_optimization ||
afd66255b   Masami Hiramatsu   kprobes: Introduc...
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
  	    (kprobe_disabled(p) || kprobes_all_disarmed))
  		return;
  
  	/* Both of break_handler and post_handler are not supported. */
  	if (p->break_handler || p->post_handler)
  		return;
  
  	op = container_of(p, struct optimized_kprobe, kp);
  
  	/* Check there is no other kprobes at the optimized instructions */
  	if (arch_check_optimized_kprobe(op) < 0)
  		return;
  
  	/* Check if it is already optimized. */
  	if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
  		return;
  
  	op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
  	list_add(&op->list, &optimizing_list);
  	if (!delayed_work_pending(&optimizing_work))
  		schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
  }
  
  /* Unoptimize a kprobe if p is optimized */
  static __kprobes void unoptimize_kprobe(struct kprobe *p)
  {
  	struct optimized_kprobe *op;
  
  	if ((p->flags & KPROBE_FLAG_OPTIMIZED) && kprobe_aggrprobe(p)) {
  		op = container_of(p, struct optimized_kprobe, kp);
  		if (!list_empty(&op->list))
  			/* Dequeue from the optimization queue */
  			list_del_init(&op->list);
  		else
  			/* Replace jump with break */
  			arch_unoptimize_kprobe(op);
  		op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
  	}
  }
  
  /* Remove optimized instructions */
  static void __kprobes kill_optimized_kprobe(struct kprobe *p)
  {
  	struct optimized_kprobe *op;
  
  	op = container_of(p, struct optimized_kprobe, kp);
  	if (!list_empty(&op->list)) {
  		/* Dequeue from the optimization queue */
  		list_del_init(&op->list);
  		op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
  	}
  	/* Don't unoptimize, because the target code will be freed. */
  	arch_remove_optimized_kprobe(op);
  }
  
  /* Try to prepare optimized instructions */
  static __kprobes void prepare_optimized_kprobe(struct kprobe *p)
  {
  	struct optimized_kprobe *op;
  
  	op = container_of(p, struct optimized_kprobe, kp);
  	arch_prepare_optimized_kprobe(op);
  }
  
  /* Free optimized instructions and optimized_kprobe */
  static __kprobes void free_aggr_kprobe(struct kprobe *p)
  {
  	struct optimized_kprobe *op;
  
  	op = container_of(p, struct optimized_kprobe, kp);
  	arch_remove_optimized_kprobe(op);
  	kfree(op);
  }
  
  /* Allocate new optimized_kprobe and try to prepare optimized instructions */
  static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
  {
  	struct optimized_kprobe *op;
  
  	op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
  	if (!op)
  		return NULL;
  
  	INIT_LIST_HEAD(&op->list);
  	op->kp.addr = p->addr;
  	arch_prepare_optimized_kprobe(op);
  
  	return &op->kp;
  }
  
  static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
  
  /*
   * Prepare an optimized_kprobe and optimize it
   * NOTE: p must be a normal registered kprobe
   */
  static __kprobes void try_to_optimize_kprobe(struct kprobe *p)
  {
  	struct kprobe *ap;
  	struct optimized_kprobe *op;
  
  	ap = alloc_aggr_kprobe(p);
  	if (!ap)
  		return;
  
  	op = container_of(ap, struct optimized_kprobe, kp);
  	if (!arch_prepared_optinsn(&op->optinsn)) {
  		/* If failed to setup optimizing, fallback to kprobe */
  		free_aggr_kprobe(ap);
  		return;
  	}
  
  	init_aggr_kprobe(ap, p);
  	optimize_kprobe(ap);
  }
b2be84df9   Masami Hiramatsu   kprobes: Jump opt...
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
  #ifdef CONFIG_SYSCTL
  static void __kprobes optimize_all_kprobes(void)
  {
  	struct hlist_head *head;
  	struct hlist_node *node;
  	struct kprobe *p;
  	unsigned int i;
  
  	/* If optimization is already allowed, just return */
  	if (kprobes_allow_optimization)
  		return;
  
  	kprobes_allow_optimization = true;
  	mutex_lock(&text_mutex);
  	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  		head = &kprobe_table[i];
  		hlist_for_each_entry_rcu(p, node, head, hlist)
  			if (!kprobe_disabled(p))
  				optimize_kprobe(p);
  	}
  	mutex_unlock(&text_mutex);
  	printk(KERN_INFO "Kprobes globally optimized
  ");
  }
  
  static void __kprobes unoptimize_all_kprobes(void)
  {
  	struct hlist_head *head;
  	struct hlist_node *node;
  	struct kprobe *p;
  	unsigned int i;
  
  	/* If optimization is already prohibited, just return */
  	if (!kprobes_allow_optimization)
  		return;
  
  	kprobes_allow_optimization = false;
  	printk(KERN_INFO "Kprobes globally unoptimized
  ");
  	get_online_cpus();	/* For avoiding text_mutex deadlock */
  	mutex_lock(&text_mutex);
  	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  		head = &kprobe_table[i];
  		hlist_for_each_entry_rcu(p, node, head, hlist) {
  			if (!kprobe_disabled(p))
  				unoptimize_kprobe(p);
  		}
  	}
  
  	mutex_unlock(&text_mutex);
  	put_online_cpus();
  	/* Allow all currently running kprobes to complete */
  	synchronize_sched();
  }
  
  int sysctl_kprobes_optimization;
  int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
  				      void __user *buffer, size_t *length,
  				      loff_t *ppos)
  {
  	int ret;
  
  	mutex_lock(&kprobe_mutex);
  	sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
  	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
  
  	if (sysctl_kprobes_optimization)
  		optimize_all_kprobes();
  	else
  		unoptimize_all_kprobes();
  	mutex_unlock(&kprobe_mutex);
  
  	return ret;
  }
  #endif /* CONFIG_SYSCTL */
afd66255b   Masami Hiramatsu   kprobes: Introduc...
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
  static void __kprobes __arm_kprobe(struct kprobe *p)
  {
  	struct kprobe *old_p;
  
  	/* Check collision with other optimized kprobes */
  	old_p = get_optimized_kprobe((unsigned long)p->addr);
  	if (unlikely(old_p))
  		unoptimize_kprobe(old_p); /* Fallback to unoptimized kprobe */
  
  	arch_arm_kprobe(p);
  	optimize_kprobe(p);	/* Try to optimize (add kprobe to a list) */
  }
  
  static void __kprobes __disarm_kprobe(struct kprobe *p)
  {
  	struct kprobe *old_p;
  
  	unoptimize_kprobe(p);	/* Try to unoptimize */
  	arch_disarm_kprobe(p);
  
  	/* If another kprobe was blocked, optimize it. */
  	old_p = get_optimized_kprobe((unsigned long)p->addr);
  	if (unlikely(old_p))
  		optimize_kprobe(old_p);
  }
  
  #else /* !CONFIG_OPTPROBES */
  
  #define optimize_kprobe(p)			do {} while (0)
  #define unoptimize_kprobe(p)			do {} while (0)
  #define kill_optimized_kprobe(p)		do {} while (0)
  #define prepare_optimized_kprobe(p)		do {} while (0)
  #define try_to_optimize_kprobe(p)		do {} while (0)
  #define __arm_kprobe(p)				arch_arm_kprobe(p)
  #define __disarm_kprobe(p)			arch_disarm_kprobe(p)
  
  static __kprobes void free_aggr_kprobe(struct kprobe *p)
  {
  	kfree(p);
  }
  
  static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
  {
  	return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
  }
  #endif /* CONFIG_OPTPROBES */
201517a7f   Masami Hiramatsu   kprobes: fix to u...
700
701
702
  /* Arm a kprobe with text_mutex */
  static void __kprobes arm_kprobe(struct kprobe *kp)
  {
afd66255b   Masami Hiramatsu   kprobes: Introduc...
703
704
705
706
707
  	/*
  	 * Here, since __arm_kprobe() doesn't use stop_machine(),
  	 * this doesn't cause deadlock on text_mutex. So, we don't
  	 * need get_online_cpus().
  	 */
201517a7f   Masami Hiramatsu   kprobes: fix to u...
708
  	mutex_lock(&text_mutex);
afd66255b   Masami Hiramatsu   kprobes: Introduc...
709
  	__arm_kprobe(kp);
201517a7f   Masami Hiramatsu   kprobes: fix to u...
710
711
712
713
714
715
  	mutex_unlock(&text_mutex);
  }
  
  /* Disarm a kprobe with text_mutex */
  static void __kprobes disarm_kprobe(struct kprobe *kp)
  {
afd66255b   Masami Hiramatsu   kprobes: Introduc...
716
  	get_online_cpus();	/* For avoiding text_mutex deadlock */
201517a7f   Masami Hiramatsu   kprobes: fix to u...
717
  	mutex_lock(&text_mutex);
afd66255b   Masami Hiramatsu   kprobes: Introduc...
718
  	__disarm_kprobe(kp);
201517a7f   Masami Hiramatsu   kprobes: fix to u...
719
  	mutex_unlock(&text_mutex);
afd66255b   Masami Hiramatsu   kprobes: Introduc...
720
  	put_online_cpus();
201517a7f   Masami Hiramatsu   kprobes: fix to u...
721
  }
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
722
723
724
725
  /*
   * Aggregate handlers for multiple kprobes support - these handlers
   * take care of invoking the individual kprobe handlers on p->list
   */
d0aaff979   Prasanna S Panchamukhi   [PATCH] Kprobes: ...
726
  static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
727
728
  {
  	struct kprobe *kp;
3516a4604   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
729
  	list_for_each_entry_rcu(kp, &p->list, list) {
de5bd88d5   Masami Hiramatsu   kprobes: support ...
730
  		if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
e65845235   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
731
  			set_kprobe_instance(kp);
8b0914ea7   Prasanna S Panchamukhi   [PATCH] jprobes: ...
732
733
  			if (kp->pre_handler(kp, regs))
  				return 1;
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
734
  		}
e65845235   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
735
  		reset_kprobe_instance();
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
736
737
738
  	}
  	return 0;
  }
d0aaff979   Prasanna S Panchamukhi   [PATCH] Kprobes: ...
739
740
  static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
  					unsigned long flags)
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
741
742
  {
  	struct kprobe *kp;
3516a4604   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
743
  	list_for_each_entry_rcu(kp, &p->list, list) {
de5bd88d5   Masami Hiramatsu   kprobes: support ...
744
  		if (kp->post_handler && likely(!kprobe_disabled(kp))) {
e65845235   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
745
  			set_kprobe_instance(kp);
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
746
  			kp->post_handler(kp, regs, flags);
e65845235   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
747
  			reset_kprobe_instance();
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
748
749
  		}
  	}
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
750
  }
d0aaff979   Prasanna S Panchamukhi   [PATCH] Kprobes: ...
751
752
  static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
  					int trapnr)
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
753
  {
e65845235   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
754
  	struct kprobe *cur = __get_cpu_var(kprobe_instance);
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
755
756
757
758
  	/*
  	 * if we faulted "during" the execution of a user specified
  	 * probe handler, invoke just that probe's fault handler
  	 */
e65845235   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
759
760
  	if (cur && cur->fault_handler) {
  		if (cur->fault_handler(cur, regs, trapnr))
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
761
762
763
764
  			return 1;
  	}
  	return 0;
  }
d0aaff979   Prasanna S Panchamukhi   [PATCH] Kprobes: ...
765
  static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
8b0914ea7   Prasanna S Panchamukhi   [PATCH] jprobes: ...
766
  {
e65845235   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
767
768
769
770
771
772
  	struct kprobe *cur = __get_cpu_var(kprobe_instance);
  	int ret = 0;
  
  	if (cur && cur->break_handler) {
  		if (cur->break_handler(cur, regs))
  			ret = 1;
8b0914ea7   Prasanna S Panchamukhi   [PATCH] jprobes: ...
773
  	}
e65845235   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
774
775
  	reset_kprobe_instance();
  	return ret;
8b0914ea7   Prasanna S Panchamukhi   [PATCH] jprobes: ...
776
  }
bf8d5c52c   Keshavamurthy Anil S   [PATCH] kprobes: ...
777
778
779
780
  /* Walks the list and increments nmissed count for multiprobe case */
  void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
  {
  	struct kprobe *kp;
afd66255b   Masami Hiramatsu   kprobes: Introduc...
781
  	if (!kprobe_aggrprobe(p)) {
bf8d5c52c   Keshavamurthy Anil S   [PATCH] kprobes: ...
782
783
784
785
786
787
788
  		p->nmissed++;
  	} else {
  		list_for_each_entry_rcu(kp, &p->list, list)
  			kp->nmissed++;
  	}
  	return;
  }
99219a3fb   bibo,mao   [PATCH] kretprobe...
789
790
  void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
  				struct hlist_head *head)
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
791
  {
ef53d9c5e   Srinivasa D S   kprobes: improve ...
792
  	struct kretprobe *rp = ri->rp;
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
793
794
  	/* remove rp inst off the rprobe_inst_table */
  	hlist_del(&ri->hlist);
ef53d9c5e   Srinivasa D S   kprobes: improve ...
795
796
797
798
799
  	INIT_HLIST_NODE(&ri->hlist);
  	if (likely(rp)) {
  		spin_lock(&rp->lock);
  		hlist_add_head(&ri->hlist, &rp->free_instances);
  		spin_unlock(&rp->lock);
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
800
801
  	} else
  		/* Unregistering */
99219a3fb   bibo,mao   [PATCH] kretprobe...
802
  		hlist_add_head(&ri->hlist, head);
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
803
  }
017c39bdb   Masami Hiramatsu   kprobes: add __kp...
804
  void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
ef53d9c5e   Srinivasa D S   kprobes: improve ...
805
806
807
808
809
810
811
812
813
  			 struct hlist_head **head, unsigned long *flags)
  {
  	unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
  	spinlock_t *hlist_lock;
  
  	*head = &kretprobe_inst_table[hash];
  	hlist_lock = kretprobe_table_lock_ptr(hash);
  	spin_lock_irqsave(hlist_lock, *flags);
  }
017c39bdb   Masami Hiramatsu   kprobes: add __kp...
814
815
  static void __kprobes kretprobe_table_lock(unsigned long hash,
  	unsigned long *flags)
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
816
  {
ef53d9c5e   Srinivasa D S   kprobes: improve ...
817
818
819
  	spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
  	spin_lock_irqsave(hlist_lock, *flags);
  }
017c39bdb   Masami Hiramatsu   kprobes: add __kp...
820
821
  void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
  	unsigned long *flags)
ef53d9c5e   Srinivasa D S   kprobes: improve ...
822
823
824
825
826
827
828
  {
  	unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
  	spinlock_t *hlist_lock;
  
  	hlist_lock = kretprobe_table_lock_ptr(hash);
  	spin_unlock_irqrestore(hlist_lock, *flags);
  }
017c39bdb   Masami Hiramatsu   kprobes: add __kp...
829
  void __kprobes kretprobe_table_unlock(unsigned long hash, unsigned long *flags)
ef53d9c5e   Srinivasa D S   kprobes: improve ...
830
831
832
  {
  	spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
  	spin_unlock_irqrestore(hlist_lock, *flags);
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
833
  }
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
834
  /*
c6fd91f0b   bibo mao   [PATCH] kretprobe...
835
836
837
838
   * This function is called from finish_task_switch when task tk becomes dead,
   * so that we can recycle any function-return probe instances associated
   * with this task. These left over instances represent probed functions
   * that have been called but will never return.
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
839
   */
d0aaff979   Prasanna S Panchamukhi   [PATCH] Kprobes: ...
840
  void __kprobes kprobe_flush_task(struct task_struct *tk)
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
841
  {
62c27be0d   bibo,mao   [PATCH] kprobe wh...
842
  	struct kretprobe_instance *ri;
99219a3fb   bibo,mao   [PATCH] kretprobe...
843
  	struct hlist_head *head, empty_rp;
802eae7c8   Rusty Lynch   [PATCH] Return pr...
844
  	struct hlist_node *node, *tmp;
ef53d9c5e   Srinivasa D S   kprobes: improve ...
845
  	unsigned long hash, flags = 0;
802eae7c8   Rusty Lynch   [PATCH] Return pr...
846

ef53d9c5e   Srinivasa D S   kprobes: improve ...
847
848
849
850
851
852
853
  	if (unlikely(!kprobes_initialized))
  		/* Early boot.  kretprobe_table_locks not yet initialized. */
  		return;
  
  	hash = hash_ptr(tk, KPROBE_HASH_BITS);
  	head = &kretprobe_inst_table[hash];
  	kretprobe_table_lock(hash, &flags);
62c27be0d   bibo,mao   [PATCH] kprobe wh...
854
855
  	hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
  		if (ri->task == tk)
99219a3fb   bibo,mao   [PATCH] kretprobe...
856
  			recycle_rp_inst(ri, &empty_rp);
62c27be0d   bibo,mao   [PATCH] kprobe wh...
857
  	}
ef53d9c5e   Srinivasa D S   kprobes: improve ...
858
859
  	kretprobe_table_unlock(hash, &flags);
  	INIT_HLIST_HEAD(&empty_rp);
99219a3fb   bibo,mao   [PATCH] kretprobe...
860
861
862
863
  	hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
  		hlist_del(&ri->hlist);
  		kfree(ri);
  	}
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
864
  }
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
865
866
867
  static inline void free_rp_inst(struct kretprobe *rp)
  {
  	struct kretprobe_instance *ri;
4c4308cb9   Christoph Hellwig   kprobes: kretprob...
868
  	struct hlist_node *pos, *next;
ef53d9c5e   Srinivasa D S   kprobes: improve ...
869
870
  	hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) {
  		hlist_del(&ri->hlist);
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
871
872
873
  		kfree(ri);
  	}
  }
4a296e07c   Masami Hiramatsu   kprobes: add (un)...
874
875
  static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
  {
ef53d9c5e   Srinivasa D S   kprobes: improve ...
876
  	unsigned long flags, hash;
4a296e07c   Masami Hiramatsu   kprobes: add (un)...
877
878
  	struct kretprobe_instance *ri;
  	struct hlist_node *pos, *next;
ef53d9c5e   Srinivasa D S   kprobes: improve ...
879
  	struct hlist_head *head;
4a296e07c   Masami Hiramatsu   kprobes: add (un)...
880
  	/* No race here */
ef53d9c5e   Srinivasa D S   kprobes: improve ...
881
882
883
884
885
886
887
888
  	for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
  		kretprobe_table_lock(hash, &flags);
  		head = &kretprobe_inst_table[hash];
  		hlist_for_each_entry_safe(ri, pos, next, head, hlist) {
  			if (ri->rp == rp)
  				ri->rp = NULL;
  		}
  		kretprobe_table_unlock(hash, &flags);
4a296e07c   Masami Hiramatsu   kprobes: add (un)...
889
  	}
4a296e07c   Masami Hiramatsu   kprobes: add (un)...
890
891
  	free_rp_inst(rp);
  }
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
892
  /*
b918e5e60   Masami Hiramatsu   kprobes: cleanup ...
893
  * Add the new probe to ap->list. Fail if this is the
8b0914ea7   Prasanna S Panchamukhi   [PATCH] jprobes: ...
894
895
  * second jprobe at the address - two jprobes can't coexist
  */
b918e5e60   Masami Hiramatsu   kprobes: cleanup ...
896
  static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
8b0914ea7   Prasanna S Panchamukhi   [PATCH] jprobes: ...
897
  {
de5bd88d5   Masami Hiramatsu   kprobes: support ...
898
  	BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
afd66255b   Masami Hiramatsu   kprobes: Introduc...
899
900
901
  
  	if (p->break_handler || p->post_handler)
  		unoptimize_kprobe(ap);	/* Fall back to normal kprobe */
8b0914ea7   Prasanna S Panchamukhi   [PATCH] jprobes: ...
902
  	if (p->break_handler) {
b918e5e60   Masami Hiramatsu   kprobes: cleanup ...
903
  		if (ap->break_handler)
367216567   mao, bibo   [PATCH] Kprobe: m...
904
  			return -EEXIST;
b918e5e60   Masami Hiramatsu   kprobes: cleanup ...
905
906
  		list_add_tail_rcu(&p->list, &ap->list);
  		ap->break_handler = aggr_break_handler;
8b0914ea7   Prasanna S Panchamukhi   [PATCH] jprobes: ...
907
  	} else
b918e5e60   Masami Hiramatsu   kprobes: cleanup ...
908
909
910
  		list_add_rcu(&p->list, &ap->list);
  	if (p->post_handler && !ap->post_handler)
  		ap->post_handler = aggr_post_handler;
de5bd88d5   Masami Hiramatsu   kprobes: support ...
911
912
913
914
915
  
  	if (kprobe_disabled(ap) && !kprobe_disabled(p)) {
  		ap->flags &= ~KPROBE_FLAG_DISABLED;
  		if (!kprobes_all_disarmed)
  			/* Arm the breakpoint again. */
afd66255b   Masami Hiramatsu   kprobes: Introduc...
916
  			__arm_kprobe(ap);
de5bd88d5   Masami Hiramatsu   kprobes: support ...
917
  	}
8b0914ea7   Prasanna S Panchamukhi   [PATCH] jprobes: ...
918
919
920
921
  	return 0;
  }
  
  /*
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
922
923
924
   * Fill in the required fields of the "manager kprobe". Replace the
   * earlier kprobe in the hlist with the manager kprobe
   */
afd66255b   Masami Hiramatsu   kprobes: Introduc...
925
  static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
926
  {
afd66255b   Masami Hiramatsu   kprobes: Introduc...
927
  	/* Copy p's insn slot to ap */
8b0914ea7   Prasanna S Panchamukhi   [PATCH] jprobes: ...
928
  	copy_kprobe(p, ap);
a9ad965ea   bibo, mao   [PATCH] IA64: kpr...
929
  	flush_insn_slot(ap);
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
930
  	ap->addr = p->addr;
afd66255b   Masami Hiramatsu   kprobes: Introduc...
931
  	ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
932
  	ap->pre_handler = aggr_pre_handler;
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
933
  	ap->fault_handler = aggr_fault_handler;
e8386a0cb   Masami Hiramatsu   kprobes: support ...
934
935
  	/* We don't care the kprobe which has gone. */
  	if (p->post_handler && !kprobe_gone(p))
367216567   mao, bibo   [PATCH] Kprobe: m...
936
  		ap->post_handler = aggr_post_handler;
e8386a0cb   Masami Hiramatsu   kprobes: support ...
937
  	if (p->break_handler && !kprobe_gone(p))
367216567   mao, bibo   [PATCH] Kprobe: m...
938
  		ap->break_handler = aggr_break_handler;
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
939
940
  
  	INIT_LIST_HEAD(&ap->list);
afd66255b   Masami Hiramatsu   kprobes: Introduc...
941
  	INIT_HLIST_NODE(&ap->hlist);
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
942

afd66255b   Masami Hiramatsu   kprobes: Introduc...
943
  	list_add_rcu(&p->list, &ap->list);
adad0f331   Keshavamurthy Anil S   [PATCH] kprobes: ...
944
  	hlist_replace_rcu(&p->hlist, &ap->hlist);
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
945
946
947
948
949
  }
  
  /*
   * This is the second or subsequent kprobe at the address - handle
   * the intricacies
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
950
   */
d0aaff979   Prasanna S Panchamukhi   [PATCH] Kprobes: ...
951
952
  static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
  					  struct kprobe *p)
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
953
954
  {
  	int ret = 0;
b918e5e60   Masami Hiramatsu   kprobes: cleanup ...
955
  	struct kprobe *ap = old_p;
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
956

afd66255b   Masami Hiramatsu   kprobes: Introduc...
957
958
959
  	if (!kprobe_aggrprobe(old_p)) {
  		/* If old_p is not an aggr_kprobe, create new aggr_kprobe. */
  		ap = alloc_aggr_kprobe(old_p);
b918e5e60   Masami Hiramatsu   kprobes: cleanup ...
960
961
  		if (!ap)
  			return -ENOMEM;
afd66255b   Masami Hiramatsu   kprobes: Introduc...
962
  		init_aggr_kprobe(ap, old_p);
b918e5e60   Masami Hiramatsu   kprobes: cleanup ...
963
964
965
  	}
  
  	if (kprobe_gone(ap)) {
e8386a0cb   Masami Hiramatsu   kprobes: support ...
966
967
968
969
970
971
  		/*
  		 * Attempting to insert new probe at the same location that
  		 * had a probe in the module vaddr area which already
  		 * freed. So, the instruction slot has already been
  		 * released. We need a new slot for the new probe.
  		 */
b918e5e60   Masami Hiramatsu   kprobes: cleanup ...
972
  		ret = arch_prepare_kprobe(ap);
e8386a0cb   Masami Hiramatsu   kprobes: support ...
973
  		if (ret)
b918e5e60   Masami Hiramatsu   kprobes: cleanup ...
974
975
976
977
978
  			/*
  			 * Even if fail to allocate new slot, don't need to
  			 * free aggr_probe. It will be used next time, or
  			 * freed by unregister_kprobe.
  			 */
e8386a0cb   Masami Hiramatsu   kprobes: support ...
979
  			return ret;
de5bd88d5   Masami Hiramatsu   kprobes: support ...
980

afd66255b   Masami Hiramatsu   kprobes: Introduc...
981
982
  		/* Prepare optimized instructions if possible. */
  		prepare_optimized_kprobe(ap);
e8386a0cb   Masami Hiramatsu   kprobes: support ...
983
  		/*
de5bd88d5   Masami Hiramatsu   kprobes: support ...
984
985
  		 * Clear gone flag to prevent allocating new slot again, and
  		 * set disabled flag because it is not armed yet.
e8386a0cb   Masami Hiramatsu   kprobes: support ...
986
  		 */
de5bd88d5   Masami Hiramatsu   kprobes: support ...
987
988
  		ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
  			    | KPROBE_FLAG_DISABLED;
e8386a0cb   Masami Hiramatsu   kprobes: support ...
989
  	}
b918e5e60   Masami Hiramatsu   kprobes: cleanup ...
990

afd66255b   Masami Hiramatsu   kprobes: Introduc...
991
  	/* Copy ap's insn slot to p */
b918e5e60   Masami Hiramatsu   kprobes: cleanup ...
992
993
  	copy_kprobe(ap, p);
  	return add_new_kprobe(ap, p);
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
994
  }
de5bd88d5   Masami Hiramatsu   kprobes: support ...
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
  /* Try to disable aggr_kprobe, and return 1 if succeeded.*/
  static int __kprobes try_to_disable_aggr_kprobe(struct kprobe *p)
  {
  	struct kprobe *kp;
  
  	list_for_each_entry_rcu(kp, &p->list, list) {
  		if (!kprobe_disabled(kp))
  			/*
  			 * There is an active probe on the list.
  			 * We can't disable aggr_kprobe.
  			 */
  			return 0;
  	}
  	p->flags |= KPROBE_FLAG_DISABLED;
  	return 1;
  }
d0aaff979   Prasanna S Panchamukhi   [PATCH] Kprobes: ...
1011
1012
  static int __kprobes in_kprobes_functions(unsigned long addr)
  {
3d8d996e0   Srinivasa Ds   kprobes: prevent ...
1013
  	struct kprobe_blackpoint *kb;
6f716acd5   Christoph Hellwig   kprobes: codingst...
1014
1015
  	if (addr >= (unsigned long)__kprobes_text_start &&
  	    addr < (unsigned long)__kprobes_text_end)
d0aaff979   Prasanna S Panchamukhi   [PATCH] Kprobes: ...
1016
  		return -EINVAL;
3d8d996e0   Srinivasa Ds   kprobes: prevent ...
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
  	/*
  	 * If there exists a kprobe_blacklist, verify and
  	 * fail any probe registration in the prohibited area
  	 */
  	for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
  		if (kb->start_addr) {
  			if (addr >= kb->start_addr &&
  			    addr < (kb->start_addr + kb->range))
  				return -EINVAL;
  		}
  	}
d0aaff979   Prasanna S Panchamukhi   [PATCH] Kprobes: ...
1028
1029
  	return 0;
  }
b2a5cd693   Masami Hiramatsu   kprobes: fix a nu...
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
  /*
   * If we have a symbol_name argument, look it up and add the offset field
   * to it. This way, we can specify a relative address to a symbol.
   */
  static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
  {
  	kprobe_opcode_t *addr = p->addr;
  	if (p->symbol_name) {
  		if (addr)
  			return NULL;
  		kprobe_lookup_name(p->symbol_name, addr);
  	}
  
  	if (!addr)
  		return NULL;
  	return (kprobe_opcode_t *)(((char *)addr) + p->offset);
  }
1f0ab4097   Ananth N Mavinakayanahalli   kprobes: Prevent ...
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
  /* Check passed kprobe is valid and return kprobe in kprobe_table. */
  static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
  {
  	struct kprobe *old_p, *list_p;
  
  	old_p = get_kprobe(p->addr);
  	if (unlikely(!old_p))
  		return NULL;
  
  	if (p != old_p) {
  		list_for_each_entry_rcu(list_p, &old_p->list, list)
  			if (list_p == p)
  			/* kprobe p is a valid probe */
  				goto valid;
  		return NULL;
  	}
  valid:
  	return old_p;
  }
  
  /* Return error if the kprobe is being re-registered */
  static inline int check_kprobe_rereg(struct kprobe *p)
  {
  	int ret = 0;
  	struct kprobe *old_p;
  
  	mutex_lock(&kprobe_mutex);
  	old_p = __get_valid_kprobe(p);
  	if (old_p)
  		ret = -EINVAL;
  	mutex_unlock(&kprobe_mutex);
  	return ret;
  }
49ad2fd76   Masami Hiramatsu   kprobes: remove c...
1080
  int __kprobes register_kprobe(struct kprobe *p)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1081
1082
  {
  	int ret = 0;
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
1083
  	struct kprobe *old_p;
df019b1d8   Keshavamurthy Anil S   [PATCH] kprobes: ...
1084
  	struct module *probed_mod;
b2a5cd693   Masami Hiramatsu   kprobes: fix a nu...
1085
  	kprobe_opcode_t *addr;
b3e55c727   Mao, Bibo   [PATCH] Kprobes: ...
1086

b2a5cd693   Masami Hiramatsu   kprobes: fix a nu...
1087
1088
  	addr = kprobe_addr(p);
  	if (!addr)
3a872d89b   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
1089
  		return -EINVAL;
b2a5cd693   Masami Hiramatsu   kprobes: fix a nu...
1090
  	p->addr = addr;
3a872d89b   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
1091

1f0ab4097   Ananth N Mavinakayanahalli   kprobes: Prevent ...
1092
1093
1094
  	ret = check_kprobe_rereg(p);
  	if (ret)
  		return ret;
a189d0350   Masami Hiramatsu   kprobes: disable ...
1095
  	preempt_disable();
ec30c5f3a   Masami Hiramatsu   kprobes: Use kern...
1096
  	if (!kernel_text_address((unsigned long) p->addr) ||
4554dbcb8   Masami Hiramatsu   kprobes: Check pr...
1097
1098
  	    in_kprobes_functions((unsigned long) p->addr) ||
  	    ftrace_text_reserved(p->addr, p->addr)) {
a189d0350   Masami Hiramatsu   kprobes: disable ...
1099
  		preempt_enable();
b3e55c727   Mao, Bibo   [PATCH] Kprobes: ...
1100
  		return -EINVAL;
a189d0350   Masami Hiramatsu   kprobes: disable ...
1101
  	}
b3e55c727   Mao, Bibo   [PATCH] Kprobes: ...
1102

de5bd88d5   Masami Hiramatsu   kprobes: support ...
1103
1104
  	/* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
  	p->flags &= KPROBE_FLAG_DISABLED;
6f716acd5   Christoph Hellwig   kprobes: codingst...
1105
1106
1107
  	/*
  	 * Check if are we probing a module.
  	 */
a189d0350   Masami Hiramatsu   kprobes: disable ...
1108
  	probed_mod = __module_text_address((unsigned long) p->addr);
6f716acd5   Christoph Hellwig   kprobes: codingst...
1109
  	if (probed_mod) {
6f716acd5   Christoph Hellwig   kprobes: codingst...
1110
  		/*
e8386a0cb   Masami Hiramatsu   kprobes: support ...
1111
1112
  		 * We must hold a refcount of the probed module while updating
  		 * its code to prohibit unexpected unloading.
df019b1d8   Keshavamurthy Anil S   [PATCH] kprobes: ...
1113
  		 */
49ad2fd76   Masami Hiramatsu   kprobes: remove c...
1114
1115
1116
1117
  		if (unlikely(!try_module_get(probed_mod))) {
  			preempt_enable();
  			return -EINVAL;
  		}
f24659d96   Masami Hiramatsu   kprobes: support ...
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
  		/*
  		 * If the module freed .init.text, we couldn't insert
  		 * kprobes in there.
  		 */
  		if (within_module_init((unsigned long)p->addr, probed_mod) &&
  		    probed_mod->state != MODULE_STATE_COMING) {
  			module_put(probed_mod);
  			preempt_enable();
  			return -EINVAL;
  		}
df019b1d8   Keshavamurthy Anil S   [PATCH] kprobes: ...
1128
  	}
a189d0350   Masami Hiramatsu   kprobes: disable ...
1129
  	preempt_enable();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1130

3516a4604   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
1131
  	p->nmissed = 0;
9861668f7   Masami Hiramatsu   kprobes: add (un)...
1132
  	INIT_LIST_HEAD(&p->list);
7a7d1cf95   Ingo Molnar   [PATCH] sem2mutex...
1133
  	mutex_lock(&kprobe_mutex);
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1134
1135
1136
  
  	get_online_cpus();	/* For avoiding text_mutex deadlock. */
  	mutex_lock(&text_mutex);
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
1137
1138
  	old_p = get_kprobe(p->addr);
  	if (old_p) {
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1139
  		/* Since this may unoptimize old_p, locking text_mutex. */
64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
1140
  		ret = register_aggr_kprobe(old_p, p);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1141
1142
  		goto out;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1143

6f716acd5   Christoph Hellwig   kprobes: codingst...
1144
1145
  	ret = arch_prepare_kprobe(p);
  	if (ret)
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1146
  		goto out;
49a2a1b83   Anil S Keshavamurthy   [PATCH] kprobes: ...
1147

64f562c6d   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
1148
  	INIT_HLIST_NODE(&p->hlist);
3516a4604   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
1149
  	hlist_add_head_rcu(&p->hlist,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1150
  		       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
de5bd88d5   Masami Hiramatsu   kprobes: support ...
1151
  	if (!kprobes_all_disarmed && !kprobe_disabled(p))
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1152
1153
1154
1155
  		__arm_kprobe(p);
  
  	/* Try to optimize kprobe */
  	try_to_optimize_kprobe(p);
74a0b5762   Christoph Hellwig   x86: optimize pag...
1156

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1157
  out:
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1158
1159
  	mutex_unlock(&text_mutex);
  	put_online_cpus();
7a7d1cf95   Ingo Molnar   [PATCH] sem2mutex...
1160
  	mutex_unlock(&kprobe_mutex);
49a2a1b83   Anil S Keshavamurthy   [PATCH] kprobes: ...
1161

e8386a0cb   Masami Hiramatsu   kprobes: support ...
1162
  	if (probed_mod)
df019b1d8   Keshavamurthy Anil S   [PATCH] kprobes: ...
1163
  		module_put(probed_mod);
e8386a0cb   Masami Hiramatsu   kprobes: support ...
1164

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1165
1166
  	return ret;
  }
99081ab55   Masami Hiramatsu   kprobes: move EXP...
1167
  EXPORT_SYMBOL_GPL(register_kprobe);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1168

de5bd88d5   Masami Hiramatsu   kprobes: support ...
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
  /*
   * Unregister a kprobe without a scheduler synchronization.
   */
  static int __kprobes __unregister_kprobe_top(struct kprobe *p)
  {
  	struct kprobe *old_p, *list_p;
  
  	old_p = __get_valid_kprobe(p);
  	if (old_p == NULL)
  		return -EINVAL;
6f716acd5   Christoph Hellwig   kprobes: codingst...
1179
  	if (old_p == p ||
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1180
  	    (kprobe_aggrprobe(old_p) &&
9861668f7   Masami Hiramatsu   kprobes: add (un)...
1181
  	     list_is_singular(&old_p->list))) {
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1182
1183
  		/*
  		 * Only probe on the hash list. Disarm only if kprobes are
e8386a0cb   Masami Hiramatsu   kprobes: support ...
1184
1185
  		 * enabled and not gone - otherwise, the breakpoint would
  		 * already have been removed. We save on flushing icache.
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1186
  		 */
201517a7f   Masami Hiramatsu   kprobes: fix to u...
1187
  		if (!kprobes_all_disarmed && !kprobe_disabled(old_p))
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1188
  			disarm_kprobe(old_p);
49a2a1b83   Anil S Keshavamurthy   [PATCH] kprobes: ...
1189
  		hlist_del_rcu(&old_p->hlist);
49a2a1b83   Anil S Keshavamurthy   [PATCH] kprobes: ...
1190
  	} else {
e8386a0cb   Masami Hiramatsu   kprobes: support ...
1191
  		if (p->break_handler && !kprobe_gone(p))
9861668f7   Masami Hiramatsu   kprobes: add (un)...
1192
  			old_p->break_handler = NULL;
e8386a0cb   Masami Hiramatsu   kprobes: support ...
1193
  		if (p->post_handler && !kprobe_gone(p)) {
9861668f7   Masami Hiramatsu   kprobes: add (un)...
1194
1195
1196
1197
1198
1199
1200
  			list_for_each_entry_rcu(list_p, &old_p->list, list) {
  				if ((list_p != p) && (list_p->post_handler))
  					goto noclean;
  			}
  			old_p->post_handler = NULL;
  		}
  noclean:
49a2a1b83   Anil S Keshavamurthy   [PATCH] kprobes: ...
1201
  		list_del_rcu(&p->list);
de5bd88d5   Masami Hiramatsu   kprobes: support ...
1202
1203
  		if (!kprobe_disabled(old_p)) {
  			try_to_disable_aggr_kprobe(old_p);
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1204
1205
1206
1207
1208
1209
1210
  			if (!kprobes_all_disarmed) {
  				if (kprobe_disabled(old_p))
  					disarm_kprobe(old_p);
  				else
  					/* Try to optimize this probe again */
  					optimize_kprobe(old_p);
  			}
de5bd88d5   Masami Hiramatsu   kprobes: support ...
1211
  		}
49a2a1b83   Anil S Keshavamurthy   [PATCH] kprobes: ...
1212
  	}
9861668f7   Masami Hiramatsu   kprobes: add (un)...
1213
1214
  	return 0;
  }
3516a4604   Ananth N Mavinakayanahalli   [PATCH] Kprobes: ...
1215

9861668f7   Masami Hiramatsu   kprobes: add (un)...
1216
1217
  static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
  {
9861668f7   Masami Hiramatsu   kprobes: add (un)...
1218
  	struct kprobe *old_p;
b3e55c727   Mao, Bibo   [PATCH] Kprobes: ...
1219

e8386a0cb   Masami Hiramatsu   kprobes: support ...
1220
  	if (list_empty(&p->list))
0498b6350   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
1221
  		arch_remove_kprobe(p);
e8386a0cb   Masami Hiramatsu   kprobes: support ...
1222
1223
1224
1225
1226
  	else if (list_is_singular(&p->list)) {
  		/* "p" is the last child of an aggr_kprobe */
  		old_p = list_entry(p->list.next, struct kprobe, list);
  		list_del(&p->list);
  		arch_remove_kprobe(old_p);
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1227
  		free_aggr_kprobe(old_p);
9861668f7   Masami Hiramatsu   kprobes: add (un)...
1228
1229
  	}
  }
49ad2fd76   Masami Hiramatsu   kprobes: remove c...
1230
  int __kprobes register_kprobes(struct kprobe **kps, int num)
9861668f7   Masami Hiramatsu   kprobes: add (un)...
1231
1232
1233
1234
1235
1236
  {
  	int i, ret = 0;
  
  	if (num <= 0)
  		return -EINVAL;
  	for (i = 0; i < num; i++) {
49ad2fd76   Masami Hiramatsu   kprobes: remove c...
1237
  		ret = register_kprobe(kps[i]);
67dddaad5   Masami Hiramatsu   kprobes: fix erro...
1238
1239
1240
  		if (ret < 0) {
  			if (i > 0)
  				unregister_kprobes(kps, i);
9861668f7   Masami Hiramatsu   kprobes: add (un)...
1241
  			break;
367216567   mao, bibo   [PATCH] Kprobe: m...
1242
  		}
49a2a1b83   Anil S Keshavamurthy   [PATCH] kprobes: ...
1243
  	}
9861668f7   Masami Hiramatsu   kprobes: add (un)...
1244
1245
  	return ret;
  }
99081ab55   Masami Hiramatsu   kprobes: move EXP...
1246
  EXPORT_SYMBOL_GPL(register_kprobes);
9861668f7   Masami Hiramatsu   kprobes: add (un)...
1247

9861668f7   Masami Hiramatsu   kprobes: add (un)...
1248
1249
1250
1251
  void __kprobes unregister_kprobe(struct kprobe *p)
  {
  	unregister_kprobes(&p, 1);
  }
99081ab55   Masami Hiramatsu   kprobes: move EXP...
1252
  EXPORT_SYMBOL_GPL(unregister_kprobe);
9861668f7   Masami Hiramatsu   kprobes: add (un)...
1253

9861668f7   Masami Hiramatsu   kprobes: add (un)...
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
  void __kprobes unregister_kprobes(struct kprobe **kps, int num)
  {
  	int i;
  
  	if (num <= 0)
  		return;
  	mutex_lock(&kprobe_mutex);
  	for (i = 0; i < num; i++)
  		if (__unregister_kprobe_top(kps[i]) < 0)
  			kps[i]->addr = NULL;
  	mutex_unlock(&kprobe_mutex);
  
  	synchronize_sched();
  	for (i = 0; i < num; i++)
  		if (kps[i]->addr)
  			__unregister_kprobe_bottom(kps[i]);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1270
  }
99081ab55   Masami Hiramatsu   kprobes: move EXP...
1271
  EXPORT_SYMBOL_GPL(unregister_kprobes);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1272
1273
1274
  
  static struct notifier_block kprobe_exceptions_nb = {
  	.notifier_call = kprobe_exceptions_notify,
3d5631e06   Anil S Keshavamurthy   [PATCH] Kprobes r...
1275
1276
  	.priority = 0x7fffffff /* we need to be notified first */
  };
3d7e33825   Michael Ellerman   jprobes: make jpr...
1277
1278
1279
1280
  unsigned long __weak arch_deref_entry_point(void *entry)
  {
  	return (unsigned long)entry;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1281

49ad2fd76   Masami Hiramatsu   kprobes: remove c...
1282
  int __kprobes register_jprobes(struct jprobe **jps, int num)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1283
  {
26b31c190   Masami Hiramatsu   kprobes: add (un)...
1284
1285
  	struct jprobe *jp;
  	int ret = 0, i;
3d7e33825   Michael Ellerman   jprobes: make jpr...
1286

26b31c190   Masami Hiramatsu   kprobes: add (un)...
1287
  	if (num <= 0)
3d7e33825   Michael Ellerman   jprobes: make jpr...
1288
  		return -EINVAL;
26b31c190   Masami Hiramatsu   kprobes: add (un)...
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
  	for (i = 0; i < num; i++) {
  		unsigned long addr;
  		jp = jps[i];
  		addr = arch_deref_entry_point(jp->entry);
  
  		if (!kernel_text_address(addr))
  			ret = -EINVAL;
  		else {
  			/* Todo: Verify probepoint is a function entry point */
  			jp->kp.pre_handler = setjmp_pre_handler;
  			jp->kp.break_handler = longjmp_break_handler;
49ad2fd76   Masami Hiramatsu   kprobes: remove c...
1300
  			ret = register_kprobe(&jp->kp);
26b31c190   Masami Hiramatsu   kprobes: add (un)...
1301
  		}
67dddaad5   Masami Hiramatsu   kprobes: fix erro...
1302
1303
1304
  		if (ret < 0) {
  			if (i > 0)
  				unregister_jprobes(jps, i);
26b31c190   Masami Hiramatsu   kprobes: add (un)...
1305
1306
1307
1308
1309
  			break;
  		}
  	}
  	return ret;
  }
99081ab55   Masami Hiramatsu   kprobes: move EXP...
1310
  EXPORT_SYMBOL_GPL(register_jprobes);
3d7e33825   Michael Ellerman   jprobes: make jpr...
1311

26b31c190   Masami Hiramatsu   kprobes: add (un)...
1312
1313
  int __kprobes register_jprobe(struct jprobe *jp)
  {
49ad2fd76   Masami Hiramatsu   kprobes: remove c...
1314
  	return register_jprobes(&jp, 1);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1315
  }
99081ab55   Masami Hiramatsu   kprobes: move EXP...
1316
  EXPORT_SYMBOL_GPL(register_jprobe);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1317

d0aaff979   Prasanna S Panchamukhi   [PATCH] Kprobes: ...
1318
  void __kprobes unregister_jprobe(struct jprobe *jp)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1319
  {
26b31c190   Masami Hiramatsu   kprobes: add (un)...
1320
1321
  	unregister_jprobes(&jp, 1);
  }
99081ab55   Masami Hiramatsu   kprobes: move EXP...
1322
  EXPORT_SYMBOL_GPL(unregister_jprobe);
26b31c190   Masami Hiramatsu   kprobes: add (un)...
1323

26b31c190   Masami Hiramatsu   kprobes: add (un)...
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
  void __kprobes unregister_jprobes(struct jprobe **jps, int num)
  {
  	int i;
  
  	if (num <= 0)
  		return;
  	mutex_lock(&kprobe_mutex);
  	for (i = 0; i < num; i++)
  		if (__unregister_kprobe_top(&jps[i]->kp) < 0)
  			jps[i]->kp.addr = NULL;
  	mutex_unlock(&kprobe_mutex);
  
  	synchronize_sched();
  	for (i = 0; i < num; i++) {
  		if (jps[i]->kp.addr)
  			__unregister_kprobe_bottom(&jps[i]->kp);
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1341
  }
99081ab55   Masami Hiramatsu   kprobes: move EXP...
1342
  EXPORT_SYMBOL_GPL(unregister_jprobes);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1343

9edddaa20   Ananth N Mavinakayanahalli   Kprobes: indicate...
1344
  #ifdef CONFIG_KRETPROBES
e65cefe87   Adrian Bunk   [PATCH] kernel/kp...
1345
1346
1347
1348
1349
1350
1351
1352
  /*
   * This kprobe pre_handler is registered with every kretprobe. When probe
   * hits it will set up the return probe.
   */
  static int __kprobes pre_handler_kretprobe(struct kprobe *p,
  					   struct pt_regs *regs)
  {
  	struct kretprobe *rp = container_of(p, struct kretprobe, kp);
ef53d9c5e   Srinivasa D S   kprobes: improve ...
1353
1354
  	unsigned long hash, flags = 0;
  	struct kretprobe_instance *ri;
e65cefe87   Adrian Bunk   [PATCH] kernel/kp...
1355
1356
  
  	/*TODO: consider to only swap the RA after the last pre_handler fired */
ef53d9c5e   Srinivasa D S   kprobes: improve ...
1357
1358
  	hash = hash_ptr(current, KPROBE_HASH_BITS);
  	spin_lock_irqsave(&rp->lock, flags);
4c4308cb9   Christoph Hellwig   kprobes: kretprob...
1359
  	if (!hlist_empty(&rp->free_instances)) {
4c4308cb9   Christoph Hellwig   kprobes: kretprob...
1360
  		ri = hlist_entry(rp->free_instances.first,
ef53d9c5e   Srinivasa D S   kprobes: improve ...
1361
1362
1363
  				struct kretprobe_instance, hlist);
  		hlist_del(&ri->hlist);
  		spin_unlock_irqrestore(&rp->lock, flags);
4c4308cb9   Christoph Hellwig   kprobes: kretprob...
1364
1365
  		ri->rp = rp;
  		ri->task = current;
f47cd9b55   Abhishek Sagar   kprobes: kretprob...
1366

f02b8624f   Ananth N Mavinakayanahalli   kprobes: Fix lock...
1367
  		if (rp->entry_handler && rp->entry_handler(ri, regs))
f47cd9b55   Abhishek Sagar   kprobes: kretprob...
1368
  			return 0;
f47cd9b55   Abhishek Sagar   kprobes: kretprob...
1369

4c4308cb9   Christoph Hellwig   kprobes: kretprob...
1370
1371
1372
  		arch_prepare_kretprobe(ri, regs);
  
  		/* XXX(hch): why is there no hlist_move_head? */
ef53d9c5e   Srinivasa D S   kprobes: improve ...
1373
1374
1375
1376
1377
  		INIT_HLIST_NODE(&ri->hlist);
  		kretprobe_table_lock(hash, &flags);
  		hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
  		kretprobe_table_unlock(hash, &flags);
  	} else {
4c4308cb9   Christoph Hellwig   kprobes: kretprob...
1378
  		rp->nmissed++;
ef53d9c5e   Srinivasa D S   kprobes: improve ...
1379
1380
  		spin_unlock_irqrestore(&rp->lock, flags);
  	}
e65cefe87   Adrian Bunk   [PATCH] kernel/kp...
1381
1382
  	return 0;
  }
49ad2fd76   Masami Hiramatsu   kprobes: remove c...
1383
  int __kprobes register_kretprobe(struct kretprobe *rp)
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
1384
1385
1386
1387
  {
  	int ret = 0;
  	struct kretprobe_instance *inst;
  	int i;
b2a5cd693   Masami Hiramatsu   kprobes: fix a nu...
1388
  	void *addr;
f438d914b   Masami Hiramatsu   kprobes: support ...
1389
1390
  
  	if (kretprobe_blacklist_size) {
b2a5cd693   Masami Hiramatsu   kprobes: fix a nu...
1391
1392
1393
  		addr = kprobe_addr(&rp->kp);
  		if (!addr)
  			return -EINVAL;
f438d914b   Masami Hiramatsu   kprobes: support ...
1394
1395
1396
1397
1398
1399
  
  		for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
  			if (kretprobe_blacklist[i].addr == addr)
  				return -EINVAL;
  		}
  	}
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
1400
1401
  
  	rp->kp.pre_handler = pre_handler_kretprobe;
7522a8423   Ananth N Mavinakayanahalli   [PATCH] kprobes: ...
1402
1403
1404
  	rp->kp.post_handler = NULL;
  	rp->kp.fault_handler = NULL;
  	rp->kp.break_handler = NULL;
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
1405
1406
1407
1408
  
  	/* Pre-allocate memory for max kretprobe instances */
  	if (rp->maxactive <= 0) {
  #ifdef CONFIG_PREEMPT
c2ef6661c   Heiko Carstens   kprobes: Fix dist...
1409
  		rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
1410
  #else
4dae560f9   Ananth N Mavinakayanahalli   kprobes: Sanitize...
1411
  		rp->maxactive = num_possible_cpus();
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
1412
1413
  #endif
  	}
ef53d9c5e   Srinivasa D S   kprobes: improve ...
1414
  	spin_lock_init(&rp->lock);
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
1415
1416
  	INIT_HLIST_HEAD(&rp->free_instances);
  	for (i = 0; i < rp->maxactive; i++) {
f47cd9b55   Abhishek Sagar   kprobes: kretprob...
1417
1418
  		inst = kmalloc(sizeof(struct kretprobe_instance) +
  			       rp->data_size, GFP_KERNEL);
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
1419
1420
1421
1422
  		if (inst == NULL) {
  			free_rp_inst(rp);
  			return -ENOMEM;
  		}
ef53d9c5e   Srinivasa D S   kprobes: improve ...
1423
1424
  		INIT_HLIST_NODE(&inst->hlist);
  		hlist_add_head(&inst->hlist, &rp->free_instances);
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
1425
1426
1427
1428
  	}
  
  	rp->nmissed = 0;
  	/* Establish function entry probe point */
49ad2fd76   Masami Hiramatsu   kprobes: remove c...
1429
  	ret = register_kprobe(&rp->kp);
4a296e07c   Masami Hiramatsu   kprobes: add (un)...
1430
  	if (ret != 0)
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
1431
1432
1433
  		free_rp_inst(rp);
  	return ret;
  }
99081ab55   Masami Hiramatsu   kprobes: move EXP...
1434
  EXPORT_SYMBOL_GPL(register_kretprobe);
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
1435

49ad2fd76   Masami Hiramatsu   kprobes: remove c...
1436
  int __kprobes register_kretprobes(struct kretprobe **rps, int num)
4a296e07c   Masami Hiramatsu   kprobes: add (un)...
1437
1438
1439
1440
1441
1442
  {
  	int ret = 0, i;
  
  	if (num <= 0)
  		return -EINVAL;
  	for (i = 0; i < num; i++) {
49ad2fd76   Masami Hiramatsu   kprobes: remove c...
1443
  		ret = register_kretprobe(rps[i]);
67dddaad5   Masami Hiramatsu   kprobes: fix erro...
1444
1445
1446
  		if (ret < 0) {
  			if (i > 0)
  				unregister_kretprobes(rps, i);
4a296e07c   Masami Hiramatsu   kprobes: add (un)...
1447
1448
1449
1450
1451
  			break;
  		}
  	}
  	return ret;
  }
99081ab55   Masami Hiramatsu   kprobes: move EXP...
1452
  EXPORT_SYMBOL_GPL(register_kretprobes);
4a296e07c   Masami Hiramatsu   kprobes: add (un)...
1453

4a296e07c   Masami Hiramatsu   kprobes: add (un)...
1454
1455
1456
1457
  void __kprobes unregister_kretprobe(struct kretprobe *rp)
  {
  	unregister_kretprobes(&rp, 1);
  }
99081ab55   Masami Hiramatsu   kprobes: move EXP...
1458
  EXPORT_SYMBOL_GPL(unregister_kretprobe);
4a296e07c   Masami Hiramatsu   kprobes: add (un)...
1459

4a296e07c   Masami Hiramatsu   kprobes: add (un)...
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
  void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
  {
  	int i;
  
  	if (num <= 0)
  		return;
  	mutex_lock(&kprobe_mutex);
  	for (i = 0; i < num; i++)
  		if (__unregister_kprobe_top(&rps[i]->kp) < 0)
  			rps[i]->kp.addr = NULL;
  	mutex_unlock(&kprobe_mutex);
  
  	synchronize_sched();
  	for (i = 0; i < num; i++) {
  		if (rps[i]->kp.addr) {
  			__unregister_kprobe_bottom(&rps[i]->kp);
  			cleanup_rp_inst(rps[i]);
  		}
  	}
  }
99081ab55   Masami Hiramatsu   kprobes: move EXP...
1480
  EXPORT_SYMBOL_GPL(unregister_kretprobes);
4a296e07c   Masami Hiramatsu   kprobes: add (un)...
1481

9edddaa20   Ananth N Mavinakayanahalli   Kprobes: indicate...
1482
  #else /* CONFIG_KRETPROBES */
d0aaff979   Prasanna S Panchamukhi   [PATCH] Kprobes: ...
1483
  int __kprobes register_kretprobe(struct kretprobe *rp)
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
1484
1485
1486
  {
  	return -ENOSYS;
  }
99081ab55   Masami Hiramatsu   kprobes: move EXP...
1487
  EXPORT_SYMBOL_GPL(register_kretprobe);
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
1488

4a296e07c   Masami Hiramatsu   kprobes: add (un)...
1489
  int __kprobes register_kretprobes(struct kretprobe **rps, int num)
346fd59ba   Srinivasa Ds   [PATCH] kprobes: ...
1490
  {
4a296e07c   Masami Hiramatsu   kprobes: add (un)...
1491
  	return -ENOSYS;
346fd59ba   Srinivasa Ds   [PATCH] kprobes: ...
1492
  }
99081ab55   Masami Hiramatsu   kprobes: move EXP...
1493
  EXPORT_SYMBOL_GPL(register_kretprobes);
d0aaff979   Prasanna S Panchamukhi   [PATCH] Kprobes: ...
1494
  void __kprobes unregister_kretprobe(struct kretprobe *rp)
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
1495
  {
4a296e07c   Masami Hiramatsu   kprobes: add (un)...
1496
  }
99081ab55   Masami Hiramatsu   kprobes: move EXP...
1497
  EXPORT_SYMBOL_GPL(unregister_kretprobe);
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
1498

4a296e07c   Masami Hiramatsu   kprobes: add (un)...
1499
1500
1501
  void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
  {
  }
99081ab55   Masami Hiramatsu   kprobes: move EXP...
1502
  EXPORT_SYMBOL_GPL(unregister_kretprobes);
4c4308cb9   Christoph Hellwig   kprobes: kretprob...
1503

4a296e07c   Masami Hiramatsu   kprobes: add (un)...
1504
1505
1506
1507
  static int __kprobes pre_handler_kretprobe(struct kprobe *p,
  					   struct pt_regs *regs)
  {
  	return 0;
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
1508
  }
4a296e07c   Masami Hiramatsu   kprobes: add (un)...
1509
  #endif /* CONFIG_KRETPROBES */
e8386a0cb   Masami Hiramatsu   kprobes: support ...
1510
1511
1512
1513
  /* Set the kprobe gone and remove its instruction buffer. */
  static void __kprobes kill_kprobe(struct kprobe *p)
  {
  	struct kprobe *kp;
de5bd88d5   Masami Hiramatsu   kprobes: support ...
1514

e8386a0cb   Masami Hiramatsu   kprobes: support ...
1515
  	p->flags |= KPROBE_FLAG_GONE;
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1516
  	if (kprobe_aggrprobe(p)) {
e8386a0cb   Masami Hiramatsu   kprobes: support ...
1517
1518
1519
1520
1521
1522
1523
1524
  		/*
  		 * If this is an aggr_kprobe, we have to list all the
  		 * chained probes and mark them GONE.
  		 */
  		list_for_each_entry_rcu(kp, &p->list, list)
  			kp->flags |= KPROBE_FLAG_GONE;
  		p->post_handler = NULL;
  		p->break_handler = NULL;
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1525
  		kill_optimized_kprobe(p);
e8386a0cb   Masami Hiramatsu   kprobes: support ...
1526
1527
1528
1529
1530
1531
1532
  	}
  	/*
  	 * Here, we can remove insn_slot safely, because no thread calls
  	 * the original probed function (which will be freed soon) any more.
  	 */
  	arch_remove_kprobe(p);
  }
24851d244   Frederic Weisbecker   tracing/kprobes: ...
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
  void __kprobes dump_kprobe(struct kprobe *kp)
  {
  	printk(KERN_WARNING "Dumping kprobe:
  ");
  	printk(KERN_WARNING "Name: %s
  Address: %p
  Offset: %x
  ",
  	       kp->symbol_name, kp->addr, kp->offset);
  }
e8386a0cb   Masami Hiramatsu   kprobes: support ...
1543
1544
1545
1546
1547
1548
1549
1550
1551
  /* Module notifier call back, checking kprobes on the module */
  static int __kprobes kprobes_module_callback(struct notifier_block *nb,
  					     unsigned long val, void *data)
  {
  	struct module *mod = data;
  	struct hlist_head *head;
  	struct hlist_node *node;
  	struct kprobe *p;
  	unsigned int i;
f24659d96   Masami Hiramatsu   kprobes: support ...
1552
  	int checkcore = (val == MODULE_STATE_GOING);
e8386a0cb   Masami Hiramatsu   kprobes: support ...
1553

f24659d96   Masami Hiramatsu   kprobes: support ...
1554
  	if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
e8386a0cb   Masami Hiramatsu   kprobes: support ...
1555
1556
1557
  		return NOTIFY_DONE;
  
  	/*
f24659d96   Masami Hiramatsu   kprobes: support ...
1558
1559
1560
1561
  	 * When MODULE_STATE_GOING was notified, both of module .text and
  	 * .init.text sections would be freed. When MODULE_STATE_LIVE was
  	 * notified, only .init.text section would be freed. We need to
  	 * disable kprobes which have been inserted in the sections.
e8386a0cb   Masami Hiramatsu   kprobes: support ...
1562
1563
1564
1565
1566
  	 */
  	mutex_lock(&kprobe_mutex);
  	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  		head = &kprobe_table[i];
  		hlist_for_each_entry_rcu(p, node, head, hlist)
f24659d96   Masami Hiramatsu   kprobes: support ...
1567
1568
1569
  			if (within_module_init((unsigned long)p->addr, mod) ||
  			    (checkcore &&
  			     within_module_core((unsigned long)p->addr, mod))) {
e8386a0cb   Masami Hiramatsu   kprobes: support ...
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
  				/*
  				 * The vaddr this probe is installed will soon
  				 * be vfreed buy not synced to disk. Hence,
  				 * disarming the breakpoint isn't needed.
  				 */
  				kill_kprobe(p);
  			}
  	}
  	mutex_unlock(&kprobe_mutex);
  	return NOTIFY_DONE;
  }
  
  static struct notifier_block kprobe_module_nb = {
  	.notifier_call = kprobes_module_callback,
  	.priority = 0
  };
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1586
1587
1588
  static int __init init_kprobes(void)
  {
  	int i, err = 0;
3d8d996e0   Srinivasa Ds   kprobes: prevent ...
1589
1590
1591
1592
1593
  	unsigned long offset = 0, size = 0;
  	char *modname, namebuf[128];
  	const char *symbol_name;
  	void *addr;
  	struct kprobe_blackpoint *kb;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1594
1595
1596
  
  	/* FIXME allocate the probe table, currently defined statically */
  	/* initialize all list heads */
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
1597
  	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1598
  		INIT_HLIST_HEAD(&kprobe_table[i]);
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
1599
  		INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
ef53d9c5e   Srinivasa D S   kprobes: improve ...
1600
  		spin_lock_init(&(kretprobe_table_locks[i].lock));
b94cce926   Hien Nguyen   [PATCH] kprobes: ...
1601
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1602

3d8d996e0   Srinivasa Ds   kprobes: prevent ...
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
  	/*
  	 * Lookup and populate the kprobe_blacklist.
  	 *
  	 * Unlike the kretprobe blacklist, we'll need to determine
  	 * the range of addresses that belong to the said functions,
  	 * since a kprobe need not necessarily be at the beginning
  	 * of a function.
  	 */
  	for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
  		kprobe_lookup_name(kb->name, addr);
  		if (!addr)
  			continue;
  
  		kb->start_addr = (unsigned long)addr;
  		symbol_name = kallsyms_lookup(kb->start_addr,
  				&size, &offset, &modname, namebuf);
  		if (!symbol_name)
  			kb->range = 0;
  		else
  			kb->range = size;
  	}
f438d914b   Masami Hiramatsu   kprobes: support ...
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
  	if (kretprobe_blacklist_size) {
  		/* lookup the function address from its name */
  		for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
  			kprobe_lookup_name(kretprobe_blacklist[i].name,
  					   kretprobe_blacklist[i].addr);
  			if (!kretprobe_blacklist[i].addr)
  				printk("kretprobe: lookup failed: %s
  ",
  				       kretprobe_blacklist[i].name);
  		}
  	}
b2be84df9   Masami Hiramatsu   kprobes: Jump opt...
1635
1636
  #if defined(CONFIG_OPTPROBES)
  #if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1637
1638
1639
  	/* Init kprobe_optinsn_slots */
  	kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
  #endif
b2be84df9   Masami Hiramatsu   kprobes: Jump opt...
1640
1641
1642
  	/* By default, kprobes can be optimized */
  	kprobes_allow_optimization = true;
  #endif
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1643

e579abeb5   Masami Hiramatsu   kprobes: rename k...
1644
1645
  	/* By default, kprobes are armed */
  	kprobes_all_disarmed = false;
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1646

6772926be   Rusty Lynch   [PATCH] kprobes: ...
1647
  	err = arch_init_kprobes();
802eae7c8   Rusty Lynch   [PATCH] Return pr...
1648
1649
  	if (!err)
  		err = register_die_notifier(&kprobe_exceptions_nb);
e8386a0cb   Masami Hiramatsu   kprobes: support ...
1650
1651
  	if (!err)
  		err = register_module_notifier(&kprobe_module_nb);
ef53d9c5e   Srinivasa D S   kprobes: improve ...
1652
  	kprobes_initialized = (err == 0);
802eae7c8   Rusty Lynch   [PATCH] Return pr...
1653

8c1c93564   Ananth N Mavinakayanahalli   x86: kprobes: add...
1654
1655
  	if (!err)
  		init_test_probes();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1656
1657
  	return err;
  }
346fd59ba   Srinivasa Ds   [PATCH] kprobes: ...
1658
1659
  #ifdef CONFIG_DEBUG_FS
  static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1660
  		const char *sym, int offset, char *modname, struct kprobe *pp)
346fd59ba   Srinivasa Ds   [PATCH] kprobes: ...
1661
1662
1663
1664
1665
1666
1667
1668
1669
  {
  	char *kprobe_type;
  
  	if (p->pre_handler == pre_handler_kretprobe)
  		kprobe_type = "r";
  	else if (p->pre_handler == setjmp_pre_handler)
  		kprobe_type = "j";
  	else
  		kprobe_type = "k";
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1670

346fd59ba   Srinivasa Ds   [PATCH] kprobes: ...
1671
  	if (sym)
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1672
  		seq_printf(pi, "%p  %s  %s+0x%x  %s ",
de5bd88d5   Masami Hiramatsu   kprobes: support ...
1673
  			p->addr, kprobe_type, sym, offset,
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1674
  			(modname ? modname : " "));
346fd59ba   Srinivasa Ds   [PATCH] kprobes: ...
1675
  	else
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
  		seq_printf(pi, "%p  %s  %p ",
  			p->addr, kprobe_type, p->addr);
  
  	if (!pp)
  		pp = p;
  	seq_printf(pi, "%s%s%s
  ",
  		(kprobe_gone(p) ? "[GONE]" : ""),
  		((kprobe_disabled(p) && !kprobe_gone(p)) ?  "[DISABLED]" : ""),
  		(kprobe_optimized(pp) ? "[OPTIMIZED]" : ""));
346fd59ba   Srinivasa Ds   [PATCH] kprobes: ...
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
  }
  
  static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
  {
  	return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
  }
  
  static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
  {
  	(*pos)++;
  	if (*pos >= KPROBE_TABLE_SIZE)
  		return NULL;
  	return pos;
  }
  
  static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
  {
  	/* Nothing to do */
  }
  
  static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
  {
  	struct hlist_head *head;
  	struct hlist_node *node;
  	struct kprobe *p, *kp;
  	const char *sym = NULL;
  	unsigned int i = *(loff_t *) v;
ffb451227   Alexey Dobriyan   Simplify kallsyms...
1713
  	unsigned long offset = 0;
346fd59ba   Srinivasa Ds   [PATCH] kprobes: ...
1714
1715
1716
1717
1718
  	char *modname, namebuf[128];
  
  	head = &kprobe_table[i];
  	preempt_disable();
  	hlist_for_each_entry_rcu(p, node, head, hlist) {
ffb451227   Alexey Dobriyan   Simplify kallsyms...
1719
  		sym = kallsyms_lookup((unsigned long)p->addr, NULL,
346fd59ba   Srinivasa Ds   [PATCH] kprobes: ...
1720
  					&offset, &modname, namebuf);
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1721
  		if (kprobe_aggrprobe(p)) {
346fd59ba   Srinivasa Ds   [PATCH] kprobes: ...
1722
  			list_for_each_entry_rcu(kp, &p->list, list)
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1723
  				report_probe(pi, kp, sym, offset, modname, p);
346fd59ba   Srinivasa Ds   [PATCH] kprobes: ...
1724
  		} else
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1725
  			report_probe(pi, p, sym, offset, modname, NULL);
346fd59ba   Srinivasa Ds   [PATCH] kprobes: ...
1726
1727
1728
1729
  	}
  	preempt_enable();
  	return 0;
  }
88e9d34c7   James Morris   seq_file: constif...
1730
  static const struct seq_operations kprobes_seq_ops = {
346fd59ba   Srinivasa Ds   [PATCH] kprobes: ...
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
  	.start = kprobe_seq_start,
  	.next  = kprobe_seq_next,
  	.stop  = kprobe_seq_stop,
  	.show  = show_kprobe_addr
  };
  
  static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
  {
  	return seq_open(filp, &kprobes_seq_ops);
  }
828c09509   Alexey Dobriyan   const: constify r...
1741
  static const struct file_operations debugfs_kprobes_operations = {
346fd59ba   Srinivasa Ds   [PATCH] kprobes: ...
1742
1743
1744
1745
1746
  	.open           = kprobes_open,
  	.read           = seq_read,
  	.llseek         = seq_lseek,
  	.release        = seq_release,
  };
de5bd88d5   Masami Hiramatsu   kprobes: support ...
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
  /* Disable one kprobe */
  int __kprobes disable_kprobe(struct kprobe *kp)
  {
  	int ret = 0;
  	struct kprobe *p;
  
  	mutex_lock(&kprobe_mutex);
  
  	/* Check whether specified probe is valid. */
  	p = __get_valid_kprobe(kp);
  	if (unlikely(p == NULL)) {
  		ret = -EINVAL;
  		goto out;
  	}
  
  	/* If the probe is already disabled (or gone), just return */
  	if (kprobe_disabled(kp))
  		goto out;
  
  	kp->flags |= KPROBE_FLAG_DISABLED;
  	if (p != kp)
  		/* When kp != p, p is always enabled. */
  		try_to_disable_aggr_kprobe(p);
  
  	if (!kprobes_all_disarmed && kprobe_disabled(p))
201517a7f   Masami Hiramatsu   kprobes: fix to u...
1772
  		disarm_kprobe(p);
de5bd88d5   Masami Hiramatsu   kprobes: support ...
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
  out:
  	mutex_unlock(&kprobe_mutex);
  	return ret;
  }
  EXPORT_SYMBOL_GPL(disable_kprobe);
  
  /* Enable one kprobe */
  int __kprobes enable_kprobe(struct kprobe *kp)
  {
  	int ret = 0;
  	struct kprobe *p;
  
  	mutex_lock(&kprobe_mutex);
  
  	/* Check whether specified probe is valid. */
  	p = __get_valid_kprobe(kp);
  	if (unlikely(p == NULL)) {
  		ret = -EINVAL;
  		goto out;
  	}
  
  	if (kprobe_gone(kp)) {
  		/* This kprobe has gone, we couldn't enable it. */
  		ret = -EINVAL;
  		goto out;
  	}
de5bd88d5   Masami Hiramatsu   kprobes: support ...
1799
1800
  	if (p != kp)
  		kp->flags &= ~KPROBE_FLAG_DISABLED;
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1801
1802
1803
1804
1805
  
  	if (!kprobes_all_disarmed && kprobe_disabled(p)) {
  		p->flags &= ~KPROBE_FLAG_DISABLED;
  		arm_kprobe(p);
  	}
de5bd88d5   Masami Hiramatsu   kprobes: support ...
1806
1807
1808
1809
1810
  out:
  	mutex_unlock(&kprobe_mutex);
  	return ret;
  }
  EXPORT_SYMBOL_GPL(enable_kprobe);
e579abeb5   Masami Hiramatsu   kprobes: rename k...
1811
  static void __kprobes arm_all_kprobes(void)
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1812
1813
1814
1815
1816
1817
1818
  {
  	struct hlist_head *head;
  	struct hlist_node *node;
  	struct kprobe *p;
  	unsigned int i;
  
  	mutex_lock(&kprobe_mutex);
e579abeb5   Masami Hiramatsu   kprobes: rename k...
1819
1820
  	/* If kprobes are armed, just return */
  	if (!kprobes_all_disarmed)
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1821
  		goto already_enabled;
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1822
  	/* Arming kprobes doesn't optimize kprobe itself */
4460fdad8   Mathieu Desnoyers   tracing, Text Edi...
1823
  	mutex_lock(&text_mutex);
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1824
1825
1826
  	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  		head = &kprobe_table[i];
  		hlist_for_each_entry_rcu(p, node, head, hlist)
de5bd88d5   Masami Hiramatsu   kprobes: support ...
1827
  			if (!kprobe_disabled(p))
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1828
  				__arm_kprobe(p);
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1829
  	}
4460fdad8   Mathieu Desnoyers   tracing, Text Edi...
1830
  	mutex_unlock(&text_mutex);
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1831

e579abeb5   Masami Hiramatsu   kprobes: rename k...
1832
  	kprobes_all_disarmed = false;
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1833
1834
1835
1836
1837
1838
1839
  	printk(KERN_INFO "Kprobes globally enabled
  ");
  
  already_enabled:
  	mutex_unlock(&kprobe_mutex);
  	return;
  }
e579abeb5   Masami Hiramatsu   kprobes: rename k...
1840
  static void __kprobes disarm_all_kprobes(void)
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1841
1842
1843
1844
1845
1846
1847
  {
  	struct hlist_head *head;
  	struct hlist_node *node;
  	struct kprobe *p;
  	unsigned int i;
  
  	mutex_lock(&kprobe_mutex);
e579abeb5   Masami Hiramatsu   kprobes: rename k...
1848
1849
  	/* If kprobes are already disarmed, just return */
  	if (kprobes_all_disarmed)
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1850
  		goto already_disabled;
e579abeb5   Masami Hiramatsu   kprobes: rename k...
1851
  	kprobes_all_disarmed = true;
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1852
1853
  	printk(KERN_INFO "Kprobes globally disabled
  ");
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1854
1855
1856
1857
1858
1859
  
  	/*
  	 * Here we call get_online_cpus() for avoiding text_mutex deadlock,
  	 * because disarming may also unoptimize kprobes.
  	 */
  	get_online_cpus();
4460fdad8   Mathieu Desnoyers   tracing, Text Edi...
1860
  	mutex_lock(&text_mutex);
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1861
1862
1863
  	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  		head = &kprobe_table[i];
  		hlist_for_each_entry_rcu(p, node, head, hlist) {
de5bd88d5   Masami Hiramatsu   kprobes: support ...
1864
  			if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1865
  				__disarm_kprobe(p);
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1866
1867
  		}
  	}
4460fdad8   Mathieu Desnoyers   tracing, Text Edi...
1868
  	mutex_unlock(&text_mutex);
afd66255b   Masami Hiramatsu   kprobes: Introduc...
1869
  	put_online_cpus();
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1870
1871
1872
  	mutex_unlock(&kprobe_mutex);
  	/* Allow all currently running kprobes to complete */
  	synchronize_sched();
74a0b5762   Christoph Hellwig   x86: optimize pag...
1873
  	return;
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
  
  already_disabled:
  	mutex_unlock(&kprobe_mutex);
  	return;
  }
  
  /*
   * XXX: The debugfs bool file interface doesn't allow for callbacks
   * when the bool state is switched. We can reuse that facility when
   * available
   */
  static ssize_t read_enabled_file_bool(struct file *file,
  	       char __user *user_buf, size_t count, loff_t *ppos)
  {
  	char buf[3];
e579abeb5   Masami Hiramatsu   kprobes: rename k...
1889
  	if (!kprobes_all_disarmed)
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
  		buf[0] = '1';
  	else
  		buf[0] = '0';
  	buf[1] = '
  ';
  	buf[2] = 0x00;
  	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
  }
  
  static ssize_t write_enabled_file_bool(struct file *file,
  	       const char __user *user_buf, size_t count, loff_t *ppos)
  {
  	char buf[32];
  	int buf_size;
  
  	buf_size = min(count, (sizeof(buf)-1));
  	if (copy_from_user(buf, user_buf, buf_size))
  		return -EFAULT;
  
  	switch (buf[0]) {
  	case 'y':
  	case 'Y':
  	case '1':
e579abeb5   Masami Hiramatsu   kprobes: rename k...
1913
  		arm_all_kprobes();
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1914
1915
1916
1917
  		break;
  	case 'n':
  	case 'N':
  	case '0':
e579abeb5   Masami Hiramatsu   kprobes: rename k...
1918
  		disarm_all_kprobes();
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1919
1920
1921
1922
1923
  		break;
  	}
  
  	return count;
  }
828c09509   Alexey Dobriyan   const: constify r...
1924
  static const struct file_operations fops_kp = {
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1925
1926
1927
  	.read =         read_enabled_file_bool,
  	.write =        write_enabled_file_bool,
  };
346fd59ba   Srinivasa Ds   [PATCH] kprobes: ...
1928
1929
1930
  static int __kprobes debugfs_kprobe_init(void)
  {
  	struct dentry *dir, *file;
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1931
  	unsigned int value = 1;
346fd59ba   Srinivasa Ds   [PATCH] kprobes: ...
1932
1933
1934
1935
  
  	dir = debugfs_create_dir("kprobes", NULL);
  	if (!dir)
  		return -ENOMEM;
e38697929   Randy Dunlap   kprobes: fix spar...
1936
  	file = debugfs_create_file("list", 0444, dir, NULL,
346fd59ba   Srinivasa Ds   [PATCH] kprobes: ...
1937
1938
1939
1940
1941
  				&debugfs_kprobes_operations);
  	if (!file) {
  		debugfs_remove(dir);
  		return -ENOMEM;
  	}
bf8f6e5b3   Ananth N Mavinakayanahalli   Kprobes: The ON/O...
1942
1943
1944
1945
1946
1947
  	file = debugfs_create_file("enabled", 0600, dir,
  					&value, &fops_kp);
  	if (!file) {
  		debugfs_remove(dir);
  		return -ENOMEM;
  	}
346fd59ba   Srinivasa Ds   [PATCH] kprobes: ...
1948
1949
1950
1951
1952
1953
1954
  	return 0;
  }
  
  late_initcall(debugfs_kprobe_init);
  #endif /* CONFIG_DEBUG_FS */
  
  module_init(init_kprobes);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1955

99081ab55   Masami Hiramatsu   kprobes: move EXP...
1956
  /* defined in arch/.../kernel/kprobes.c */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1957
  EXPORT_SYMBOL_GPL(jprobe_return);