Commit 64f562c6df3cfc5d1b2b4bdbcb7951457df9c237
Committed by
Linus Torvalds
1 parent
04dea5f932
Exists in
master
and in
7 other branches
[PATCH] kprobes: Allow multiple kprobes at the same address
Allow registration of multiple kprobes at an address in an architecture agnostic way. Corresponding handlers will be invoked in a sequence. But, a kprobe and a jprobe can't (yet) co-exist at the same address. Signed-off-by: Ananth N Mavinakayanahalli <amavin@redhat.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Showing 2 changed files with 134 additions and 13 deletions Side-by-side Diff
include/linux/kprobes.h
kernel/kprobes.c
... | ... | @@ -44,6 +44,7 @@ |
44 | 44 | |
45 | 45 | unsigned int kprobe_cpu = NR_CPUS; |
46 | 46 | static DEFINE_SPINLOCK(kprobe_lock); |
47 | +static struct kprobe *curr_kprobe; | |
47 | 48 | |
48 | 49 | /* Locks kprobe: irqs must be disabled */ |
49 | 50 | void lock_kprobes(void) |
50 | 51 | |
51 | 52 | |
52 | 53 | |
53 | 54 | |
... | ... | @@ -73,22 +74,139 @@ |
73 | 74 | return NULL; |
74 | 75 | } |
75 | 76 | |
77 | +/* | |
78 | + * Aggregate handlers for multiple kprobes support - these handlers | |
79 | + * take care of invoking the individual kprobe handlers on p->list | |
80 | + */ | |
81 | +int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) | |
82 | +{ | |
83 | + struct kprobe *kp; | |
84 | + | |
85 | + list_for_each_entry(kp, &p->list, list) { | |
86 | + if (kp->pre_handler) { | |
87 | + curr_kprobe = kp; | |
88 | + kp->pre_handler(kp, regs); | |
89 | + curr_kprobe = NULL; | |
90 | + } | |
91 | + } | |
92 | + return 0; | |
93 | +} | |
94 | + | |
95 | +void aggr_post_handler(struct kprobe *p, struct pt_regs *regs, | |
96 | + unsigned long flags) | |
97 | +{ | |
98 | + struct kprobe *kp; | |
99 | + | |
100 | + list_for_each_entry(kp, &p->list, list) { | |
101 | + if (kp->post_handler) { | |
102 | + curr_kprobe = kp; | |
103 | + kp->post_handler(kp, regs, flags); | |
104 | + curr_kprobe = NULL; | |
105 | + } | |
106 | + } | |
107 | + return; | |
108 | +} | |
109 | + | |
110 | +int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, int trapnr) | |
111 | +{ | |
112 | + /* | |
113 | + * if we faulted "during" the execution of a user specified | |
114 | + * probe handler, invoke just that probe's fault handler | |
115 | + */ | |
116 | + if (curr_kprobe && curr_kprobe->fault_handler) { | |
117 | + if (curr_kprobe->fault_handler(curr_kprobe, regs, trapnr)) | |
118 | + return 1; | |
119 | + } | |
120 | + return 0; | |
121 | +} | |
122 | + | |
123 | +/* | |
124 | + * Fill in the required fields of the "manager kprobe". Replace the | |
125 | + * earlier kprobe in the hlist with the manager kprobe | |
126 | + */ | |
127 | +static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p) | |
128 | +{ | |
129 | + ap->addr = p->addr; | |
130 | + ap->opcode = p->opcode; | |
131 | + memcpy(&ap->ainsn, &p->ainsn, sizeof(struct arch_specific_insn)); | |
132 | + | |
133 | + ap->pre_handler = aggr_pre_handler; | |
134 | + ap->post_handler = aggr_post_handler; | |
135 | + ap->fault_handler = aggr_fault_handler; | |
136 | + | |
137 | + INIT_LIST_HEAD(&ap->list); | |
138 | + list_add(&p->list, &ap->list); | |
139 | + | |
140 | + INIT_HLIST_NODE(&ap->hlist); | |
141 | + hlist_del(&p->hlist); | |
142 | + hlist_add_head(&ap->hlist, | |
143 | + &kprobe_table[hash_ptr(ap->addr, KPROBE_HASH_BITS)]); | |
144 | +} | |
145 | + | |
146 | +/* | |
147 | + * This is the second or subsequent kprobe at the address - handle | |
148 | + * the intricacies | |
149 | + * TODO: Move kcalloc outside the spinlock | |
150 | + */ | |
151 | +static int register_aggr_kprobe(struct kprobe *old_p, struct kprobe *p) | |
152 | +{ | |
153 | + int ret = 0; | |
154 | + struct kprobe *ap; | |
155 | + | |
156 | + if (old_p->break_handler || p->break_handler) { | |
157 | + ret = -EEXIST; /* kprobe and jprobe can't (yet) coexist */ | |
158 | + } else if (old_p->pre_handler == aggr_pre_handler) { | |
159 | + list_add(&p->list, &old_p->list); | |
160 | + } else { | |
161 | + ap = kcalloc(1, sizeof(struct kprobe), GFP_ATOMIC); | |
162 | + if (!ap) | |
163 | + return -ENOMEM; | |
164 | + add_aggr_kprobe(ap, old_p); | |
165 | + list_add(&p->list, &ap->list); | |
166 | + } | |
167 | + return ret; | |
168 | +} | |
169 | + | |
170 | +/* kprobe removal house-keeping routines */ | |
171 | +static inline void cleanup_kprobe(struct kprobe *p, unsigned long flags) | |
172 | +{ | |
173 | + *p->addr = p->opcode; | |
174 | + hlist_del(&p->hlist); | |
175 | + flush_icache_range((unsigned long) p->addr, | |
176 | + (unsigned long) p->addr + sizeof(kprobe_opcode_t)); | |
177 | + spin_unlock_irqrestore(&kprobe_lock, flags); | |
178 | + arch_remove_kprobe(p); | |
179 | +} | |
180 | + | |
181 | +static inline void cleanup_aggr_kprobe(struct kprobe *old_p, | |
182 | + struct kprobe *p, unsigned long flags) | |
183 | +{ | |
184 | + list_del(&p->list); | |
185 | + if (list_empty(&old_p->list)) { | |
186 | + cleanup_kprobe(old_p, flags); | |
187 | + kfree(old_p); | |
188 | + } else | |
189 | + spin_unlock_irqrestore(&kprobe_lock, flags); | |
190 | +} | |
191 | + | |
76 | 192 | int register_kprobe(struct kprobe *p) |
77 | 193 | { |
78 | 194 | int ret = 0; |
79 | 195 | unsigned long flags = 0; |
196 | + struct kprobe *old_p; | |
80 | 197 | |
81 | 198 | if ((ret = arch_prepare_kprobe(p)) != 0) { |
82 | 199 | goto rm_kprobe; |
83 | 200 | } |
84 | 201 | spin_lock_irqsave(&kprobe_lock, flags); |
85 | - INIT_HLIST_NODE(&p->hlist); | |
86 | - if (get_kprobe(p->addr)) { | |
87 | - ret = -EEXIST; | |
202 | + old_p = get_kprobe(p->addr); | |
203 | + if (old_p) { | |
204 | + ret = register_aggr_kprobe(old_p, p); | |
88 | 205 | goto out; |
89 | 206 | } |
90 | - arch_copy_kprobe(p); | |
91 | 207 | |
208 | + arch_copy_kprobe(p); | |
209 | + INIT_HLIST_NODE(&p->hlist); | |
92 | 210 | hlist_add_head(&p->hlist, |
93 | 211 | &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); |
94 | 212 | |
95 | 213 | |
96 | 214 | |
... | ... | @@ -107,17 +225,17 @@ |
107 | 225 | void unregister_kprobe(struct kprobe *p) |
108 | 226 | { |
109 | 227 | unsigned long flags; |
228 | + struct kprobe *old_p; | |
229 | + | |
110 | 230 | spin_lock_irqsave(&kprobe_lock, flags); |
111 | - if (!get_kprobe(p->addr)) { | |
231 | + old_p = get_kprobe(p->addr); | |
232 | + if (old_p) { | |
233 | + if (old_p->pre_handler == aggr_pre_handler) | |
234 | + cleanup_aggr_kprobe(old_p, p, flags); | |
235 | + else | |
236 | + cleanup_kprobe(p, flags); | |
237 | + } else | |
112 | 238 | spin_unlock_irqrestore(&kprobe_lock, flags); |
113 | - return; | |
114 | - } | |
115 | - *p->addr = p->opcode; | |
116 | - hlist_del(&p->hlist); | |
117 | - flush_icache_range((unsigned long) p->addr, | |
118 | - (unsigned long) p->addr + sizeof(kprobe_opcode_t)); | |
119 | - spin_unlock_irqrestore(&kprobe_lock, flags); | |
120 | - arch_remove_kprobe(p); | |
121 | 239 | } |
122 | 240 | |
123 | 241 | static struct notifier_block kprobe_exceptions_nb = { |