Commit b21d55e98ac2bbcbbeec9a8cb091f717fd95b072

Authored by Rabin Vincent
Committed by Russell King
1 parent d82227cf8f

ARM: 7332/1: extract out code patch function from kprobes

Extract out the code patching code from kprobes so that it can be used
from the jump label code.  Additionally, the separated code:

 - Uses the IS_ENABLED() macros instead of the #ifdefs for THUMB2
   support

 - Unifies the two separate functions in kprobes, providing one function
   that uses stop_machine() internally, and one that can be called from
   stop_machine() directly

 - Patches the text on all CPUs only on processors requiring software
   broadcasting of cache operations

Acked-by: Jon Medhurst <tixy@yxit.co.uk>
Tested-by: Jon Medhurst <tixy@yxit.co.uk>
Signed-off-by: Rabin Vincent <rabin@rab.in>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

Showing 4 changed files with 108 additions and 63 deletions Side-by-side Diff

arch/arm/kernel/Makefile
... ... @@ -8,6 +8,7 @@
8 8 ifdef CONFIG_FUNCTION_TRACER
9 9 CFLAGS_REMOVE_ftrace.o = -pg
10 10 CFLAGS_REMOVE_insn.o = -pg
  11 +CFLAGS_REMOVE_patch.o = -pg
11 12 endif
12 13  
13 14 CFLAGS_REMOVE_return_address.o = -pg
... ... @@ -38,7 +39,7 @@
38 39 obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o insn.o
39 40 obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o insn.o
40 41 obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
41   -obj-$(CONFIG_KPROBES) += kprobes.o kprobes-common.o
  42 +obj-$(CONFIG_KPROBES) += kprobes.o kprobes-common.o patch.o
42 43 ifdef CONFIG_THUMB2_KERNEL
43 44 obj-$(CONFIG_KPROBES) += kprobes-thumb.o
44 45 else
arch/arm/kernel/kprobes.c
... ... @@ -29,6 +29,7 @@
29 29 #include <asm/cacheflush.h>
30 30  
31 31 #include "kprobes.h"
  32 +#include "patch.h"
32 33  
33 34 #define MIN_STACK_SIZE(addr) \
34 35 min((unsigned long)MAX_STACK_SIZE, \
35 36  
36 37  
37 38  
38 39  
39 40  
40 41  
... ... @@ -103,58 +104,34 @@
103 104 return 0;
104 105 }
105 106  
106   -#ifdef CONFIG_THUMB2_KERNEL
107   -
108   -/*
109   - * For a 32-bit Thumb breakpoint spanning two memory words we need to take
110   - * special precautions to insert the breakpoint atomically, especially on SMP
111   - * systems. This is achieved by calling this arming function using stop_machine.
112   - */
113   -static int __kprobes set_t32_breakpoint(void *addr)
114   -{
115   - ((u16 *)addr)[0] = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION >> 16;
116   - ((u16 *)addr)[1] = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION & 0xffff;
117   - flush_insns(addr, 2*sizeof(u16));
118   - return 0;
119   -}
120   -
121 107 void __kprobes arch_arm_kprobe(struct kprobe *p)
122 108 {
123   - uintptr_t addr = (uintptr_t)p->addr & ~1; /* Remove any Thumb flag */
  109 + unsigned int brkp;
  110 + void *addr;
124 111  
125   - if (!is_wide_instruction(p->opcode)) {
126   - *(u16 *)addr = KPROBE_THUMB16_BREAKPOINT_INSTRUCTION;
127   - flush_insns(addr, sizeof(u16));
128   - } else if (addr & 2) {
129   - /* A 32-bit instruction spanning two words needs special care */
130   - stop_machine(set_t32_breakpoint, (void *)addr, &cpu_online_map);
  112 + if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
  113 + /* Remove any Thumb flag */
  114 + addr = (void *)((uintptr_t)p->addr & ~1);
  115 +
  116 + if (is_wide_instruction(p->opcode))
  117 + brkp = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION;
  118 + else
  119 + brkp = KPROBE_THUMB16_BREAKPOINT_INSTRUCTION;
131 120 } else {
132   - /* Word aligned 32-bit instruction can be written atomically */
133   - u32 bkp = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION;
134   -#ifndef __ARMEB__ /* Swap halfwords for little-endian */
135   - bkp = (bkp >> 16) | (bkp << 16);
136   -#endif
137   - *(u32 *)addr = bkp;
138   - flush_insns(addr, sizeof(u32));
139   - }
140   -}
  121 + kprobe_opcode_t insn = p->opcode;
141 122  
142   -#else /* !CONFIG_THUMB2_KERNEL */
  123 + addr = p->addr;
  124 + brkp = KPROBE_ARM_BREAKPOINT_INSTRUCTION;
143 125  
144   -void __kprobes arch_arm_kprobe(struct kprobe *p)
145   -{
146   - kprobe_opcode_t insn = p->opcode;
147   - kprobe_opcode_t brkp = KPROBE_ARM_BREAKPOINT_INSTRUCTION;
148   - if (insn >= 0xe0000000)
149   - brkp |= 0xe0000000; /* Unconditional instruction */
150   - else
151   - brkp |= insn & 0xf0000000; /* Copy condition from insn */
152   - *p->addr = brkp;
153   - flush_insns(p->addr, sizeof(p->addr[0]));
  126 + if (insn >= 0xe0000000)
  127 + brkp |= 0xe0000000; /* Unconditional instruction */
  128 + else
  129 + brkp |= insn & 0xf0000000; /* Copy condition from insn */
  130 + }
  131 +
  132 + patch_text(addr, brkp);
154 133 }
155 134  
156   -#endif /* !CONFIG_THUMB2_KERNEL */
157   -
158 135 /*
159 136 * The actual disarming is done here on each CPU and synchronized using
160 137 * stop_machine. This synchronization is necessary on SMP to avoid removing
161 138  
162 139  
... ... @@ -166,25 +143,10 @@
166 143 int __kprobes __arch_disarm_kprobe(void *p)
167 144 {
168 145 struct kprobe *kp = p;
169   -#ifdef CONFIG_THUMB2_KERNEL
170   - u16 *addr = (u16 *)((uintptr_t)kp->addr & ~1);
171   - kprobe_opcode_t insn = kp->opcode;
172   - unsigned int len;
  146 + void *addr = (void *)((uintptr_t)kp->addr & ~1);
173 147  
174   - if (is_wide_instruction(insn)) {
175   - ((u16 *)addr)[0] = insn>>16;
176   - ((u16 *)addr)[1] = insn;
177   - len = 2*sizeof(u16);
178   - } else {
179   - ((u16 *)addr)[0] = insn;
180   - len = sizeof(u16);
181   - }
182   - flush_insns(addr, len);
  148 + __patch_text(addr, kp->opcode);
183 149  
184   -#else /* !CONFIG_THUMB2_KERNEL */
185   - *kp->addr = kp->opcode;
186   - flush_insns(kp->addr, sizeof(kp->addr[0]));
187   -#endif
188 150 return 0;
189 151 }
190 152  
arch/arm/kernel/patch.c
  1 +#include <linux/kernel.h>
  2 +#include <linux/kprobes.h>
  3 +#include <linux/stop_machine.h>
  4 +
  5 +#include <asm/cacheflush.h>
  6 +#include <asm/smp_plat.h>
  7 +#include <asm/opcodes.h>
  8 +
  9 +#include "patch.h"
  10 +
  11 +struct patch {
  12 + void *addr;
  13 + unsigned int insn;
  14 +};
  15 +
  16 +void __kprobes __patch_text(void *addr, unsigned int insn)
  17 +{
  18 + bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
  19 + int size;
  20 +
  21 + if (thumb2 && __opcode_is_thumb16(insn)) {
  22 + *(u16 *)addr = __opcode_to_mem_thumb16(insn);
  23 + size = sizeof(u16);
  24 + } else if (thumb2 && ((uintptr_t)addr & 2)) {
  25 + u16 first = __opcode_thumb32_first(insn);
  26 + u16 second = __opcode_thumb32_second(insn);
  27 + u16 *addrh = addr;
  28 +
  29 + addrh[0] = __opcode_to_mem_thumb16(first);
  30 + addrh[1] = __opcode_to_mem_thumb16(second);
  31 +
  32 + size = sizeof(u32);
  33 + } else {
  34 + if (thumb2)
  35 + insn = __opcode_to_mem_thumb32(insn);
  36 + else
  37 + insn = __opcode_to_mem_arm(insn);
  38 +
  39 + *(u32 *)addr = insn;
  40 + size = sizeof(u32);
  41 + }
  42 +
  43 + flush_icache_range((uintptr_t)(addr),
  44 + (uintptr_t)(addr) + size);
  45 +}
  46 +
  47 +static int __kprobes patch_text_stop_machine(void *data)
  48 +{
  49 + struct patch *patch = data;
  50 +
  51 + __patch_text(patch->addr, patch->insn);
  52 +
  53 + return 0;
  54 +}
  55 +
  56 +void __kprobes patch_text(void *addr, unsigned int insn)
  57 +{
  58 + struct patch patch = {
  59 + .addr = addr,
  60 + .insn = insn,
  61 + };
  62 +
  63 + if (cache_ops_need_broadcast()) {
  64 + stop_machine(patch_text_stop_machine, &patch, cpu_online_mask);
  65 + } else {
  66 + bool straddles_word = IS_ENABLED(CONFIG_THUMB2_KERNEL)
  67 + && __opcode_is_thumb32(insn)
  68 + && ((uintptr_t)addr & 2);
  69 +
  70 + if (straddles_word)
  71 + stop_machine(patch_text_stop_machine, &patch, NULL);
  72 + else
  73 + __patch_text(addr, insn);
  74 + }
  75 +}
arch/arm/kernel/patch.h
  1 +#ifndef _ARM_KERNEL_PATCH_H
  2 +#define _ARM_KERNEL_PATCH_H
  3 +
  4 +void patch_text(void *addr, unsigned int insn);
  5 +void __patch_text(void *addr, unsigned int insn);
  6 +
  7 +#endif