Commit 4610ee1d3638fa05ba8e87ccfa971db8e4033ae7

Authored by Masami Hiramatsu
Committed by Ingo Molnar
1 parent d498f76395

kprobes: Introduce generic insn_slot framework

Make insn_slot framework support various size slots.
Current insn_slot just supports one-size instruction buffer
slot. However, kprobes jump optimization needs larger size
buffers.

Signed-off-by: Masami Hiramatsu <mhiramat@redhat.com>
Cc: systemtap <systemtap@sources.redhat.com>
Cc: DLE <dle-develop@lists.sourceforge.net>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Jim Keniston <jkenisto@us.ibm.com>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Anders Kaseorg <andersk@ksplice.com>
Cc: Tim Abbott <tabbott@ksplice.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Jason Baron <jbaron@redhat.com>
Cc: Mathieu Desnoyers <compudj@krystal.dyndns.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
LKML-Reference: <20100225133358.6725.82430.stgit@localhost6.localdomain6>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Jim Keniston <jkenisto@us.ibm.com>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Anders Kaseorg <andersk@ksplice.com>
Cc: Tim Abbott <tabbott@ksplice.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Jason Baron <jbaron@redhat.com>
Cc: Mathieu Desnoyers <compudj@krystal.dyndns.org>

Showing 1 changed file with 65 additions and 39 deletions Side-by-side Diff

... ... @@ -105,57 +105,74 @@
105 105 * stepping on the instruction on a vmalloced/kmalloced/data page
106 106 * is a recipe for disaster
107 107 */
108   -#define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
109   -
110 108 struct kprobe_insn_page {
111 109 struct list_head list;
112 110 kprobe_opcode_t *insns; /* Page of instruction slots */
113   - char slot_used[INSNS_PER_PAGE];
114 111 int nused;
115 112 int ngarbage;
  113 + char slot_used[];
116 114 };
117 115  
  116 +#define KPROBE_INSN_PAGE_SIZE(slots) \
  117 + (offsetof(struct kprobe_insn_page, slot_used) + \
  118 + (sizeof(char) * (slots)))
  119 +
  120 +struct kprobe_insn_cache {
  121 + struct list_head pages; /* list of kprobe_insn_page */
  122 + size_t insn_size; /* size of instruction slot */
  123 + int nr_garbage;
  124 +};
  125 +
  126 +static int slots_per_page(struct kprobe_insn_cache *c)
  127 +{
  128 + return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
  129 +}
  130 +
118 131 enum kprobe_slot_state {
119 132 SLOT_CLEAN = 0,
120 133 SLOT_DIRTY = 1,
121 134 SLOT_USED = 2,
122 135 };
123 136  
124   -static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_pages */
125   -static LIST_HEAD(kprobe_insn_pages);
126   -static int kprobe_garbage_slots;
127   -static int collect_garbage_slots(void);
  137 +static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_slots */
  138 +static struct kprobe_insn_cache kprobe_insn_slots = {
  139 + .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
  140 + .insn_size = MAX_INSN_SIZE,
  141 + .nr_garbage = 0,
  142 +};
  143 +static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c);
128 144  
129 145 /**
130 146 * __get_insn_slot() - Find a slot on an executable page for an instruction.
131 147 * We allocate an executable page if there's no room on existing ones.
132 148 */
133   -static kprobe_opcode_t __kprobes *__get_insn_slot(void)
  149 +static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
134 150 {
135 151 struct kprobe_insn_page *kip;
136 152  
137 153 retry:
138   - list_for_each_entry(kip, &kprobe_insn_pages, list) {
139   - if (kip->nused < INSNS_PER_PAGE) {
  154 + list_for_each_entry(kip, &c->pages, list) {
  155 + if (kip->nused < slots_per_page(c)) {
140 156 int i;
141   - for (i = 0; i < INSNS_PER_PAGE; i++) {
  157 + for (i = 0; i < slots_per_page(c); i++) {
142 158 if (kip->slot_used[i] == SLOT_CLEAN) {
143 159 kip->slot_used[i] = SLOT_USED;
144 160 kip->nused++;
145   - return kip->insns + (i * MAX_INSN_SIZE);
  161 + return kip->insns + (i * c->insn_size);
146 162 }
147 163 }
148   - /* Surprise! No unused slots. Fix kip->nused. */
149   - kip->nused = INSNS_PER_PAGE;
  164 + /* kip->nused is broken. Fix it. */
  165 + kip->nused = slots_per_page(c);
  166 + WARN_ON(1);
150 167 }
151 168 }
152 169  
153 170 /* If there are any garbage slots, collect it and try again. */
154   - if (kprobe_garbage_slots && collect_garbage_slots() == 0) {
  171 + if (c->nr_garbage && collect_garbage_slots(c) == 0)
155 172 goto retry;
156   - }
157   - /* All out of space. Need to allocate a new page. Use slot 0. */
158   - kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
  173 +
  174 + /* All out of space. Need to allocate a new page. */
  175 + kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
159 176 if (!kip)
160 177 return NULL;
161 178  
162 179  
163 180  
164 181  
165 182  
166 183  
... ... @@ -170,20 +187,23 @@
170 187 return NULL;
171 188 }
172 189 INIT_LIST_HEAD(&kip->list);
173   - list_add(&kip->list, &kprobe_insn_pages);
174   - memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
  190 + memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
175 191 kip->slot_used[0] = SLOT_USED;
176 192 kip->nused = 1;
177 193 kip->ngarbage = 0;
  194 + list_add(&kip->list, &c->pages);
178 195 return kip->insns;
179 196 }
180 197  
  198 +
181 199 kprobe_opcode_t __kprobes *get_insn_slot(void)
182 200 {
183   - kprobe_opcode_t *ret;
  201 + kprobe_opcode_t *ret = NULL;
  202 +
184 203 mutex_lock(&kprobe_insn_mutex);
185   - ret = __get_insn_slot();
  204 + ret = __get_insn_slot(&kprobe_insn_slots);
186 205 mutex_unlock(&kprobe_insn_mutex);
  206 +
187 207 return ret;
188 208 }
189 209  
... ... @@ -199,7 +219,7 @@
199 219 * so as not to have to set it up again the
200 220 * next time somebody inserts a probe.
201 221 */
202   - if (!list_is_singular(&kprobe_insn_pages)) {
  222 + if (!list_is_singular(&kip->list)) {
203 223 list_del(&kip->list);
204 224 module_free(NULL, kip->insns);
205 225 kfree(kip);
206 226  
207 227  
208 228  
209 229  
210 230  
211 231  
212 232  
213 233  
214 234  
215 235  
... ... @@ -209,49 +229,55 @@
209 229 return 0;
210 230 }
211 231  
212   -static int __kprobes collect_garbage_slots(void)
  232 +static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c)
213 233 {
214 234 struct kprobe_insn_page *kip, *next;
215 235  
216 236 /* Ensure no-one is interrupted on the garbages */
217 237 synchronize_sched();
218 238  
219   - list_for_each_entry_safe(kip, next, &kprobe_insn_pages, list) {
  239 + list_for_each_entry_safe(kip, next, &c->pages, list) {
220 240 int i;
221 241 if (kip->ngarbage == 0)
222 242 continue;
223 243 kip->ngarbage = 0; /* we will collect all garbages */
224   - for (i = 0; i < INSNS_PER_PAGE; i++) {
  244 + for (i = 0; i < slots_per_page(c); i++) {
225 245 if (kip->slot_used[i] == SLOT_DIRTY &&
226 246 collect_one_slot(kip, i))
227 247 break;
228 248 }
229 249 }
230   - kprobe_garbage_slots = 0;
  250 + c->nr_garbage = 0;
231 251 return 0;
232 252 }
233 253  
234   -void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
  254 +static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
  255 + kprobe_opcode_t *slot, int dirty)
235 256 {
236 257 struct kprobe_insn_page *kip;
237 258  
238   - mutex_lock(&kprobe_insn_mutex);
239   - list_for_each_entry(kip, &kprobe_insn_pages, list) {
240   - if (kip->insns <= slot &&
241   - slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
242   - int i = (slot - kip->insns) / MAX_INSN_SIZE;
  259 + list_for_each_entry(kip, &c->pages, list) {
  260 + long idx = ((long)slot - (long)kip->insns) / c->insn_size;
  261 + if (idx >= 0 && idx < slots_per_page(c)) {
  262 + WARN_ON(kip->slot_used[idx] != SLOT_USED);
243 263 if (dirty) {
244   - kip->slot_used[i] = SLOT_DIRTY;
  264 + kip->slot_used[idx] = SLOT_DIRTY;
245 265 kip->ngarbage++;
  266 + if (++c->nr_garbage > slots_per_page(c))
  267 + collect_garbage_slots(c);
246 268 } else
247   - collect_one_slot(kip, i);
248   - break;
  269 + collect_one_slot(kip, idx);
  270 + return;
249 271 }
250 272 }
  273 + /* Could not free this slot. */
  274 + WARN_ON(1);
  275 +}
251 276  
252   - if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE)
253   - collect_garbage_slots();
254   -
  277 +void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
  278 +{
  279 + mutex_lock(&kprobe_insn_mutex);
  280 + __free_insn_slot(&kprobe_insn_slots, slot, dirty);
255 281 mutex_unlock(&kprobe_insn_mutex);
256 282 }
257 283 #endif