Commit 129415607845d4daea11ddcba706005c69dcb942
Committed by
Linus Torvalds
1 parent
a06f6211ef
Exists in
master
and in
4 other branches
kprobes: add kprobe_insn_mutex and cleanup arch_remove_kprobe()
Add kprobe_insn_mutex for protecting kprobe_insn_pages hlist, and remove kprobe_mutex from architecture dependent code. This allows us to call arch_remove_kprobe() (and free_insn_slot) while holding kprobe_mutex. Signed-off-by: Masami Hiramatsu <mhiramat@redhat.com> Acked-by: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Cc: Russell King <rmk@arm.linux.org.uk> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 7 changed files with 38 additions and 19 deletions Side-by-side Diff
arch/arm/kernel/kprobes.c
arch/ia64/kernel/kprobes.c
... | ... | @@ -670,9 +670,11 @@ |
670 | 670 | |
671 | 671 | void __kprobes arch_remove_kprobe(struct kprobe *p) |
672 | 672 | { |
673 | - mutex_lock(&kprobe_mutex); | |
674 | - free_insn_slot(p->ainsn.insn, p->ainsn.inst_flag & INST_FLAG_BOOSTABLE); | |
675 | - mutex_unlock(&kprobe_mutex); | |
673 | + if (p->ainsn.insn) { | |
674 | + free_insn_slot(p->ainsn.insn, | |
675 | + p->ainsn.inst_flag & INST_FLAG_BOOSTABLE); | |
676 | + p->ainsn.insn = NULL; | |
677 | + } | |
676 | 678 | } |
677 | 679 | /* |
678 | 680 | * We are resuming execution after a single step fault, so the pt_regs |
arch/powerpc/kernel/kprobes.c
... | ... | @@ -96,9 +96,10 @@ |
96 | 96 | |
97 | 97 | void __kprobes arch_remove_kprobe(struct kprobe *p) |
98 | 98 | { |
99 | - mutex_lock(&kprobe_mutex); | |
100 | - free_insn_slot(p->ainsn.insn, 0); | |
101 | - mutex_unlock(&kprobe_mutex); | |
99 | + if (p->ainsn.insn) { | |
100 | + free_insn_slot(p->ainsn.insn, 0); | |
101 | + p->ainsn.insn = NULL; | |
102 | + } | |
102 | 103 | } |
103 | 104 | |
104 | 105 | static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) |
arch/s390/kernel/kprobes.c
... | ... | @@ -218,9 +218,10 @@ |
218 | 218 | |
219 | 219 | void __kprobes arch_remove_kprobe(struct kprobe *p) |
220 | 220 | { |
221 | - mutex_lock(&kprobe_mutex); | |
222 | - free_insn_slot(p->ainsn.insn, 0); | |
223 | - mutex_unlock(&kprobe_mutex); | |
221 | + if (p->ainsn.insn) { | |
222 | + free_insn_slot(p->ainsn.insn, 0); | |
223 | + p->ainsn.insn = NULL; | |
224 | + } | |
224 | 225 | } |
225 | 226 | |
226 | 227 | static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) |
arch/x86/kernel/kprobes.c
... | ... | @@ -376,9 +376,10 @@ |
376 | 376 | |
377 | 377 | void __kprobes arch_remove_kprobe(struct kprobe *p) |
378 | 378 | { |
379 | - mutex_lock(&kprobe_mutex); | |
380 | - free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1)); | |
381 | - mutex_unlock(&kprobe_mutex); | |
379 | + if (p->ainsn.insn) { | |
380 | + free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1)); | |
381 | + p->ainsn.insn = NULL; | |
382 | + } | |
382 | 383 | } |
383 | 384 | |
384 | 385 | static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) |
include/linux/kprobes.h
... | ... | @@ -201,7 +201,6 @@ |
201 | 201 | } |
202 | 202 | #endif /* CONFIG_KPROBES_SANITY_TEST */ |
203 | 203 | |
204 | -extern struct mutex kprobe_mutex; | |
205 | 204 | extern int arch_prepare_kprobe(struct kprobe *p); |
206 | 205 | extern void arch_arm_kprobe(struct kprobe *p); |
207 | 206 | extern void arch_disarm_kprobe(struct kprobe *p); |
kernel/kprobes.c
... | ... | @@ -69,7 +69,7 @@ |
69 | 69 | /* NOTE: change this value only with kprobe_mutex held */ |
70 | 70 | static bool kprobe_enabled; |
71 | 71 | |
72 | -DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ | |
72 | +static DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ | |
73 | 73 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; |
74 | 74 | static struct { |
75 | 75 | spinlock_t lock ____cacheline_aligned_in_smp; |
... | ... | @@ -115,6 +115,7 @@ |
115 | 115 | SLOT_USED = 2, |
116 | 116 | }; |
117 | 117 | |
118 | +static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_pages */ | |
118 | 119 | static struct hlist_head kprobe_insn_pages; |
119 | 120 | static int kprobe_garbage_slots; |
120 | 121 | static int collect_garbage_slots(void); |
121 | 122 | |
... | ... | @@ -144,10 +145,10 @@ |
144 | 145 | } |
145 | 146 | |
146 | 147 | /** |
147 | - * get_insn_slot() - Find a slot on an executable page for an instruction. | |
148 | + * __get_insn_slot() - Find a slot on an executable page for an instruction. | |
148 | 149 | * We allocate an executable page if there's no room on existing ones. |
149 | 150 | */ |
150 | -kprobe_opcode_t __kprobes *get_insn_slot(void) | |
151 | +static kprobe_opcode_t __kprobes *__get_insn_slot(void) | |
151 | 152 | { |
152 | 153 | struct kprobe_insn_page *kip; |
153 | 154 | struct hlist_node *pos; |
... | ... | @@ -196,6 +197,15 @@ |
196 | 197 | return kip->insns; |
197 | 198 | } |
198 | 199 | |
200 | +kprobe_opcode_t __kprobes *get_insn_slot(void) | |
201 | +{ | |
202 | + kprobe_opcode_t *ret; | |
203 | + mutex_lock(&kprobe_insn_mutex); | |
204 | + ret = __get_insn_slot(); | |
205 | + mutex_unlock(&kprobe_insn_mutex); | |
206 | + return ret; | |
207 | +} | |
208 | + | |
199 | 209 | /* Return 1 if all garbages are collected, otherwise 0. */ |
200 | 210 | static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx) |
201 | 211 | { |
202 | 212 | |
... | ... | @@ -226,9 +236,13 @@ |
226 | 236 | { |
227 | 237 | struct kprobe_insn_page *kip; |
228 | 238 | struct hlist_node *pos, *next; |
239 | + int safety; | |
229 | 240 | |
230 | 241 | /* Ensure no-one is preepmted on the garbages */ |
231 | - if (check_safety() != 0) | |
242 | + mutex_unlock(&kprobe_insn_mutex); | |
243 | + safety = check_safety(); | |
244 | + mutex_lock(&kprobe_insn_mutex); | |
245 | + if (safety != 0) | |
232 | 246 | return -EAGAIN; |
233 | 247 | |
234 | 248 | hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) { |
... | ... | @@ -251,6 +265,7 @@ |
251 | 265 | struct kprobe_insn_page *kip; |
252 | 266 | struct hlist_node *pos; |
253 | 267 | |
268 | + mutex_lock(&kprobe_insn_mutex); | |
254 | 269 | hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) { |
255 | 270 | if (kip->insns <= slot && |
256 | 271 | slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) { |
... | ... | @@ -267,6 +282,8 @@ |
267 | 282 | |
268 | 283 | if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE) |
269 | 284 | collect_garbage_slots(); |
285 | + | |
286 | + mutex_unlock(&kprobe_insn_mutex); | |
270 | 287 | } |
271 | 288 | #endif |
272 | 289 |