Commit f070a4dba984975f6353c6f1d966da1a6ce4b86f

Authored by Ingo Molnar

Merge branch 'perf/urgent' into perf/core

Merge in two hw_breakpoint fixes, before applying another 5.

Signed-off-by: Ingo Molnar <mingo@kernel.org>

Showing 2 changed files Side-by-side Diff

arch/x86/kernel/kprobes/core.c
... ... @@ -365,10 +365,14 @@
365 365 return insn.length;
366 366 }
367 367  
368   -static void __kprobes arch_copy_kprobe(struct kprobe *p)
  368 +static int __kprobes arch_copy_kprobe(struct kprobe *p)
369 369 {
  370 + int ret;
  371 +
370 372 /* Copy an instruction with recovering if other optprobe modifies it.*/
371   - __copy_instruction(p->ainsn.insn, p->addr);
  373 + ret = __copy_instruction(p->ainsn.insn, p->addr);
  374 + if (!ret)
  375 + return -EINVAL;
372 376  
373 377 /*
374 378 * __copy_instruction can modify the displacement of the instruction,
... ... @@ -384,6 +388,8 @@
384 388  
385 389 /* Also, displacement change doesn't affect the first byte */
386 390 p->opcode = p->ainsn.insn[0];
  391 +
  392 + return 0;
387 393 }
388 394  
389 395 int __kprobes arch_prepare_kprobe(struct kprobe *p)
... ... @@ -397,8 +403,8 @@
397 403 p->ainsn.insn = get_insn_slot();
398 404 if (!p->ainsn.insn)
399 405 return -ENOMEM;
400   - arch_copy_kprobe(p);
401   - return 0;
  406 +
  407 + return arch_copy_kprobe(p);
402 408 }
403 409  
404 410 void __kprobes arch_arm_kprobe(struct kprobe *p)
kernel/events/hw_breakpoint.c
... ... @@ -120,7 +120,7 @@
120 120 list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
121 121 if (iter->hw.bp_target == tsk &&
122 122 find_slot_idx(iter) == type &&
123   - cpu == iter->cpu)
  123 + (iter->cpu < 0 || cpu == iter->cpu))
124 124 count += hw_breakpoint_weight(iter);
125 125 }
126 126  
... ... @@ -149,7 +149,7 @@
149 149 return;
150 150 }
151 151  
152   - for_each_online_cpu(cpu) {
  152 + for_each_possible_cpu(cpu) {
153 153 unsigned int nr;
154 154  
155 155 nr = per_cpu(nr_cpu_bp_pinned[type], cpu);
... ... @@ -235,7 +235,7 @@
235 235 if (cpu >= 0) {
236 236 toggle_bp_task_slot(bp, cpu, enable, type, weight);
237 237 } else {
238   - for_each_online_cpu(cpu)
  238 + for_each_possible_cpu(cpu)
239 239 toggle_bp_task_slot(bp, cpu, enable, type, weight);
240 240 }
241 241