Commit 62c27be0dd8144e11bd3ed054a0fb890579925f8
Committed by
Linus Torvalds
1 parent
09b18203d7
Exists in
master
and in
4 other branches
[PATCH] kprobe whitespace cleanup
Whitespace is used to indent, this patch cleans up these sentences by kernel coding style. Signed-off-by: bibo, mao <bibo.mao@intel.com> Signed-off-by: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: "Luck, Tony" <tony.luck@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Showing 5 changed files with 103 additions and 103 deletions Side-by-side Diff
arch/i386/kernel/kprobes.c
... | ... | @@ -230,20 +230,20 @@ |
230 | 230 | struct pt_regs *regs) |
231 | 231 | { |
232 | 232 | unsigned long *sara = (unsigned long *)®s->esp; |
233 | - struct kretprobe_instance *ri; | |
234 | 233 | |
235 | - if ((ri = get_free_rp_inst(rp)) != NULL) { | |
236 | - ri->rp = rp; | |
237 | - ri->task = current; | |
234 | + struct kretprobe_instance *ri; | |
235 | + | |
236 | + if ((ri = get_free_rp_inst(rp)) != NULL) { | |
237 | + ri->rp = rp; | |
238 | + ri->task = current; | |
238 | 239 | ri->ret_addr = (kprobe_opcode_t *) *sara; |
239 | 240 | |
240 | 241 | /* Replace the return addr with trampoline addr */ |
241 | 242 | *sara = (unsigned long) &kretprobe_trampoline; |
242 | - | |
243 | - add_rp_inst(ri); | |
244 | - } else { | |
245 | - rp->nmissed++; | |
246 | - } | |
243 | + add_rp_inst(ri); | |
244 | + } else { | |
245 | + rp->nmissed++; | |
246 | + } | |
247 | 247 | } |
248 | 248 | |
249 | 249 | /* |
... | ... | @@ -359,7 +359,7 @@ |
359 | 359 | void __kprobes kretprobe_trampoline_holder(void) |
360 | 360 | { |
361 | 361 | asm volatile ( ".global kretprobe_trampoline\n" |
362 | - "kretprobe_trampoline: \n" | |
362 | + "kretprobe_trampoline: \n" | |
363 | 363 | " pushf\n" |
364 | 364 | /* skip cs, eip, orig_eax, es, ds */ |
365 | 365 | " subl $20, %esp\n" |
366 | 366 | |
... | ... | @@ -395,14 +395,14 @@ |
395 | 395 | */ |
396 | 396 | fastcall void *__kprobes trampoline_handler(struct pt_regs *regs) |
397 | 397 | { |
398 | - struct kretprobe_instance *ri = NULL; | |
399 | - struct hlist_head *head; | |
400 | - struct hlist_node *node, *tmp; | |
398 | + struct kretprobe_instance *ri = NULL; | |
399 | + struct hlist_head *head; | |
400 | + struct hlist_node *node, *tmp; | |
401 | 401 | unsigned long flags, orig_ret_address = 0; |
402 | 402 | unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; |
403 | 403 | |
404 | 404 | spin_lock_irqsave(&kretprobe_lock, flags); |
405 | - head = kretprobe_inst_table_head(current); | |
405 | + head = kretprobe_inst_table_head(current); | |
406 | 406 | |
407 | 407 | /* |
408 | 408 | * It is possible to have multiple instances associated with a given |
409 | 409 | |
410 | 410 | |
... | ... | @@ -413,14 +413,14 @@ |
413 | 413 | * We can handle this because: |
414 | 414 | * - instances are always inserted at the head of the list |
415 | 415 | * - when multiple return probes are registered for the same |
416 | - * function, the first instance's ret_addr will point to the | |
416 | + * function, the first instance's ret_addr will point to the | |
417 | 417 | * real return address, and all the rest will point to |
418 | 418 | * kretprobe_trampoline |
419 | 419 | */ |
420 | 420 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { |
421 | - if (ri->task != current) | |
421 | + if (ri->task != current) | |
422 | 422 | /* another task is sharing our hash bucket */ |
423 | - continue; | |
423 | + continue; | |
424 | 424 | |
425 | 425 | if (ri->rp && ri->rp->handler){ |
426 | 426 | __get_cpu_var(current_kprobe) = &ri->rp->kp; |
arch/ia64/kernel/kprobes.c
... | ... | @@ -90,7 +90,7 @@ |
90 | 90 | p->ainsn.target_br_reg = 0; |
91 | 91 | |
92 | 92 | /* Check for Break instruction |
93 | - * Bits 37:40 Major opcode to be zero | |
93 | + * Bits 37:40 Major opcode to be zero | |
94 | 94 | * Bits 27:32 X6 to be zero |
95 | 95 | * Bits 32:35 X3 to be zero |
96 | 96 | */ |
97 | 97 | |
98 | 98 | |
99 | 99 | |
... | ... | @@ -104,19 +104,19 @@ |
104 | 104 | switch (major_opcode) { |
105 | 105 | case INDIRECT_CALL_OPCODE: |
106 | 106 | p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG; |
107 | - p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7); | |
108 | - break; | |
107 | + p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7); | |
108 | + break; | |
109 | 109 | case IP_RELATIVE_PREDICT_OPCODE: |
110 | 110 | case IP_RELATIVE_BRANCH_OPCODE: |
111 | 111 | p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR; |
112 | - break; | |
112 | + break; | |
113 | 113 | case IP_RELATIVE_CALL_OPCODE: |
114 | - p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR; | |
115 | - p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG; | |
116 | - p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7); | |
117 | - break; | |
114 | + p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR; | |
115 | + p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG; | |
116 | + p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7); | |
117 | + break; | |
118 | 118 | } |
119 | - } else if (bundle_encoding[template][slot] == X) { | |
119 | + } else if (bundle_encoding[template][slot] == X) { | |
120 | 120 | switch (major_opcode) { |
121 | 121 | case LONG_CALL_OPCODE: |
122 | 122 | p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG; |
123 | 123 | |
124 | 124 | |
... | ... | @@ -258,18 +258,18 @@ |
258 | 258 | |
259 | 259 | switch (slot) { |
260 | 260 | case 0: |
261 | - *major_opcode = (bundle->quad0.slot0 >> SLOT0_OPCODE_SHIFT); | |
262 | - *kprobe_inst = bundle->quad0.slot0; | |
263 | - break; | |
261 | + *major_opcode = (bundle->quad0.slot0 >> SLOT0_OPCODE_SHIFT); | |
262 | + *kprobe_inst = bundle->quad0.slot0; | |
263 | + break; | |
264 | 264 | case 1: |
265 | - *major_opcode = (bundle->quad1.slot1_p1 >> SLOT1_p1_OPCODE_SHIFT); | |
266 | - kprobe_inst_p0 = bundle->quad0.slot1_p0; | |
267 | - kprobe_inst_p1 = bundle->quad1.slot1_p1; | |
268 | - *kprobe_inst = kprobe_inst_p0 | (kprobe_inst_p1 << (64-46)); | |
265 | + *major_opcode = (bundle->quad1.slot1_p1 >> SLOT1_p1_OPCODE_SHIFT); | |
266 | + kprobe_inst_p0 = bundle->quad0.slot1_p0; | |
267 | + kprobe_inst_p1 = bundle->quad1.slot1_p1; | |
268 | + *kprobe_inst = kprobe_inst_p0 | (kprobe_inst_p1 << (64-46)); | |
269 | 269 | break; |
270 | 270 | case 2: |
271 | - *major_opcode = (bundle->quad1.slot2 >> SLOT2_OPCODE_SHIFT); | |
272 | - *kprobe_inst = bundle->quad1.slot2; | |
271 | + *major_opcode = (bundle->quad1.slot2 >> SLOT2_OPCODE_SHIFT); | |
272 | + *kprobe_inst = bundle->quad1.slot2; | |
273 | 273 | break; |
274 | 274 | } |
275 | 275 | } |
276 | 276 | |
... | ... | @@ -290,11 +290,11 @@ |
290 | 290 | return -EINVAL; |
291 | 291 | } |
292 | 292 | |
293 | - if (in_ivt_functions(addr)) { | |
294 | - printk(KERN_WARNING "Kprobes can't be inserted inside " | |
293 | + if (in_ivt_functions(addr)) { | |
294 | + printk(KERN_WARNING "Kprobes can't be inserted inside " | |
295 | 295 | "IVT functions at 0x%lx\n", addr); |
296 | - return -EINVAL; | |
297 | - } | |
296 | + return -EINVAL; | |
297 | + } | |
298 | 298 | |
299 | 299 | if (slot == 1 && bundle_encoding[template][1] != L) { |
300 | 300 | printk(KERN_WARNING "Inserting kprobes on slot #1 " |
301 | 301 | |
... | ... | @@ -424,14 +424,14 @@ |
424 | 424 | bundle_t *bundle; |
425 | 425 | |
426 | 426 | bundle = &((kprobe_opcode_t *)kprobe_addr)->bundle; |
427 | - template = bundle->quad0.template; | |
427 | + template = bundle->quad0.template; | |
428 | 428 | |
429 | 429 | if(valid_kprobe_addr(template, slot, addr)) |
430 | 430 | return -EINVAL; |
431 | 431 | |
432 | 432 | /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */ |
433 | - if (slot == 1 && bundle_encoding[template][1] == L) | |
434 | - slot++; | |
433 | + if (slot == 1 && bundle_encoding[template][1] == L) | |
434 | + slot++; | |
435 | 435 | |
436 | 436 | /* Get kprobe_inst and major_opcode from the bundle */ |
437 | 437 | get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode); |
438 | 438 | |
439 | 439 | |
... | ... | @@ -489,21 +489,22 @@ |
489 | 489 | */ |
490 | 490 | static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) |
491 | 491 | { |
492 | - unsigned long bundle_addr = (unsigned long) (&p->ainsn.insn->bundle); | |
493 | - unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL; | |
494 | - unsigned long template; | |
495 | - int slot = ((unsigned long)p->addr & 0xf); | |
492 | + unsigned long bundle_addr = (unsigned long) (&p->ainsn.insn->bundle); | |
493 | + unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL; | |
494 | + unsigned long template; | |
495 | + int slot = ((unsigned long)p->addr & 0xf); | |
496 | 496 | |
497 | 497 | template = p->ainsn.insn->bundle.quad0.template; |
498 | 498 | |
499 | - if (slot == 1 && bundle_encoding[template][1] == L) | |
500 | - slot = 2; | |
499 | + if (slot == 1 && bundle_encoding[template][1] == L) | |
500 | + slot = 2; | |
501 | 501 | |
502 | 502 | if (p->ainsn.inst_flag) { |
503 | 503 | |
504 | 504 | if (p->ainsn.inst_flag & INST_FLAG_FIX_RELATIVE_IP_ADDR) { |
505 | 505 | /* Fix relative IP address */ |
506 | - regs->cr_iip = (regs->cr_iip - bundle_addr) + resume_addr; | |
506 | + regs->cr_iip = (regs->cr_iip - bundle_addr) + | |
507 | + resume_addr; | |
507 | 508 | } |
508 | 509 | |
509 | 510 | if (p->ainsn.inst_flag & INST_FLAG_FIX_BRANCH_REG) { |
510 | 511 | |
... | ... | @@ -540,18 +541,18 @@ |
540 | 541 | } |
541 | 542 | |
542 | 543 | if (slot == 2) { |
543 | - if (regs->cr_iip == bundle_addr + 0x10) { | |
544 | - regs->cr_iip = resume_addr + 0x10; | |
545 | - } | |
546 | - } else { | |
547 | - if (regs->cr_iip == bundle_addr) { | |
548 | - regs->cr_iip = resume_addr; | |
549 | - } | |
544 | + if (regs->cr_iip == bundle_addr + 0x10) { | |
545 | + regs->cr_iip = resume_addr + 0x10; | |
546 | + } | |
547 | + } else { | |
548 | + if (regs->cr_iip == bundle_addr) { | |
549 | + regs->cr_iip = resume_addr; | |
550 | + } | |
550 | 551 | } |
551 | 552 | |
552 | 553 | turn_ss_off: |
553 | - /* Turn off Single Step bit */ | |
554 | - ia64_psr(regs)->ss = 0; | |
554 | + /* Turn off Single Step bit */ | |
555 | + ia64_psr(regs)->ss = 0; | |
555 | 556 | } |
556 | 557 | |
557 | 558 | static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs) |
... | ... | @@ -587,7 +588,7 @@ |
587 | 588 | |
588 | 589 | /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */ |
589 | 590 | if (slot == 1 && bundle_encoding[template][1] == L) |
590 | - slot++; | |
591 | + slot++; | |
591 | 592 | |
592 | 593 | /* Get Kprobe probe instruction at given slot*/ |
593 | 594 | get_kprobe_inst(&bundle, slot, &kprobe_inst, &major_opcode); |
... | ... | @@ -627,7 +628,7 @@ |
627 | 628 | if (p) { |
628 | 629 | if ((kcb->kprobe_status == KPROBE_HIT_SS) && |
629 | 630 | (p->ainsn.inst_flag == INST_FLAG_BREAK_INST)) { |
630 | - ia64_psr(regs)->ss = 0; | |
631 | + ia64_psr(regs)->ss = 0; | |
631 | 632 | goto no_kprobe; |
632 | 633 | } |
633 | 634 | /* We have reentered the pre_kprobe_handler(), since |
... | ... | @@ -887,7 +888,7 @@ |
887 | 888 | * fix the return address to our jprobe_inst_return() function |
888 | 889 | * in the jprobes.S file |
889 | 890 | */ |
890 | - regs->b0 = ((struct fnptr *)(jprobe_inst_return))->ip; | |
891 | + regs->b0 = ((struct fnptr *)(jprobe_inst_return))->ip; | |
891 | 892 | |
892 | 893 | return 1; |
893 | 894 | } |
arch/powerpc/kernel/kprobes.c
... | ... | @@ -259,14 +259,14 @@ |
259 | 259 | */ |
260 | 260 | int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) |
261 | 261 | { |
262 | - struct kretprobe_instance *ri = NULL; | |
263 | - struct hlist_head *head; | |
264 | - struct hlist_node *node, *tmp; | |
262 | + struct kretprobe_instance *ri = NULL; | |
263 | + struct hlist_head *head; | |
264 | + struct hlist_node *node, *tmp; | |
265 | 265 | unsigned long flags, orig_ret_address = 0; |
266 | 266 | unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; |
267 | 267 | |
268 | 268 | spin_lock_irqsave(&kretprobe_lock, flags); |
269 | - head = kretprobe_inst_table_head(current); | |
269 | + head = kretprobe_inst_table_head(current); | |
270 | 270 | |
271 | 271 | /* |
272 | 272 | * It is possible to have multiple instances associated with a given |
273 | 273 | |
274 | 274 | |
... | ... | @@ -277,14 +277,14 @@ |
277 | 277 | * We can handle this because: |
278 | 278 | * - instances are always inserted at the head of the list |
279 | 279 | * - when multiple return probes are registered for the same |
280 | - * function, the first instance's ret_addr will point to the | |
280 | + * function, the first instance's ret_addr will point to the | |
281 | 281 | * real return address, and all the rest will point to |
282 | 282 | * kretprobe_trampoline |
283 | 283 | */ |
284 | 284 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { |
285 | - if (ri->task != current) | |
285 | + if (ri->task != current) | |
286 | 286 | /* another task is sharing our hash bucket */ |
287 | - continue; | |
287 | + continue; | |
288 | 288 | |
289 | 289 | if (ri->rp && ri->rp->handler) |
290 | 290 | ri->rp->handler(ri, regs); |
... | ... | @@ -308,12 +308,12 @@ |
308 | 308 | spin_unlock_irqrestore(&kretprobe_lock, flags); |
309 | 309 | preempt_enable_no_resched(); |
310 | 310 | |
311 | - /* | |
312 | - * By returning a non-zero value, we are telling | |
313 | - * kprobe_handler() that we don't want the post_handler | |
314 | - * to run (and have re-enabled preemption) | |
315 | - */ | |
316 | - return 1; | |
311 | + /* | |
312 | + * By returning a non-zero value, we are telling | |
313 | + * kprobe_handler() that we don't want the post_handler | |
314 | + * to run (and have re-enabled preemption) | |
315 | + */ | |
316 | + return 1; | |
317 | 317 | } |
318 | 318 | |
319 | 319 | /* |
arch/x86_64/kernel/kprobes.c
... | ... | @@ -270,20 +270,19 @@ |
270 | 270 | struct pt_regs *regs) |
271 | 271 | { |
272 | 272 | unsigned long *sara = (unsigned long *)regs->rsp; |
273 | - struct kretprobe_instance *ri; | |
273 | + struct kretprobe_instance *ri; | |
274 | 274 | |
275 | - if ((ri = get_free_rp_inst(rp)) != NULL) { | |
276 | - ri->rp = rp; | |
277 | - ri->task = current; | |
275 | + if ((ri = get_free_rp_inst(rp)) != NULL) { | |
276 | + ri->rp = rp; | |
277 | + ri->task = current; | |
278 | 278 | ri->ret_addr = (kprobe_opcode_t *) *sara; |
279 | 279 | |
280 | 280 | /* Replace the return addr with trampoline addr */ |
281 | 281 | *sara = (unsigned long) &kretprobe_trampoline; |
282 | - | |
283 | - add_rp_inst(ri); | |
284 | - } else { | |
285 | - rp->nmissed++; | |
286 | - } | |
282 | + add_rp_inst(ri); | |
283 | + } else { | |
284 | + rp->nmissed++; | |
285 | + } | |
287 | 286 | } |
288 | 287 | |
289 | 288 | int __kprobes kprobe_handler(struct pt_regs *regs) |
290 | 289 | |
... | ... | @@ -405,14 +404,14 @@ |
405 | 404 | */ |
406 | 405 | int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) |
407 | 406 | { |
408 | - struct kretprobe_instance *ri = NULL; | |
409 | - struct hlist_head *head; | |
410 | - struct hlist_node *node, *tmp; | |
407 | + struct kretprobe_instance *ri = NULL; | |
408 | + struct hlist_head *head; | |
409 | + struct hlist_node *node, *tmp; | |
411 | 410 | unsigned long flags, orig_ret_address = 0; |
412 | 411 | unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; |
413 | 412 | |
414 | 413 | spin_lock_irqsave(&kretprobe_lock, flags); |
415 | - head = kretprobe_inst_table_head(current); | |
414 | + head = kretprobe_inst_table_head(current); | |
416 | 415 | |
417 | 416 | /* |
418 | 417 | * It is possible to have multiple instances associated with a given |
419 | 418 | |
420 | 419 | |
... | ... | @@ -423,14 +422,14 @@ |
423 | 422 | * We can handle this because: |
424 | 423 | * - instances are always inserted at the head of the list |
425 | 424 | * - when multiple return probes are registered for the same |
426 | - * function, the first instance's ret_addr will point to the | |
425 | + * function, the first instance's ret_addr will point to the | |
427 | 426 | * real return address, and all the rest will point to |
428 | 427 | * kretprobe_trampoline |
429 | 428 | */ |
430 | 429 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { |
431 | - if (ri->task != current) | |
430 | + if (ri->task != current) | |
432 | 431 | /* another task is sharing our hash bucket */ |
433 | - continue; | |
432 | + continue; | |
434 | 433 | |
435 | 434 | if (ri->rp && ri->rp->handler) |
436 | 435 | ri->rp->handler(ri, regs); |
437 | 436 | |
... | ... | @@ -454,12 +453,12 @@ |
454 | 453 | spin_unlock_irqrestore(&kretprobe_lock, flags); |
455 | 454 | preempt_enable_no_resched(); |
456 | 455 | |
457 | - /* | |
458 | - * By returning a non-zero value, we are telling | |
459 | - * kprobe_handler() that we don't want the post_handler | |
456 | + /* | |
457 | + * By returning a non-zero value, we are telling | |
458 | + * kprobe_handler() that we don't want the post_handler | |
460 | 459 | * to run (and have re-enabled preemption) |
461 | - */ | |
462 | - return 1; | |
460 | + */ | |
461 | + return 1; | |
463 | 462 | } |
464 | 463 | |
465 | 464 | /* |
kernel/kprobes.c
... | ... | @@ -347,17 +347,17 @@ |
347 | 347 | */ |
348 | 348 | void __kprobes kprobe_flush_task(struct task_struct *tk) |
349 | 349 | { |
350 | - struct kretprobe_instance *ri; | |
351 | - struct hlist_head *head; | |
350 | + struct kretprobe_instance *ri; | |
351 | + struct hlist_head *head; | |
352 | 352 | struct hlist_node *node, *tmp; |
353 | 353 | unsigned long flags = 0; |
354 | 354 | |
355 | 355 | spin_lock_irqsave(&kretprobe_lock, flags); |
356 | - head = kretprobe_inst_table_head(tk); | |
357 | - hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { | |
358 | - if (ri->task == tk) | |
359 | - recycle_rp_inst(ri); | |
360 | - } | |
356 | + head = kretprobe_inst_table_head(tk); | |
357 | + hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { | |
358 | + if (ri->task == tk) | |
359 | + recycle_rp_inst(ri); | |
360 | + } | |
361 | 361 | spin_unlock_irqrestore(&kretprobe_lock, flags); |
362 | 362 | } |
363 | 363 | |
... | ... | @@ -514,7 +514,7 @@ |
514 | 514 | (ARCH_INACTIVE_KPROBE_COUNT + 1)) |
515 | 515 | register_page_fault_notifier(&kprobe_page_fault_nb); |
516 | 516 | |
517 | - arch_arm_kprobe(p); | |
517 | + arch_arm_kprobe(p); | |
518 | 518 | |
519 | 519 | out: |
520 | 520 | mutex_unlock(&kprobe_mutex); |