Blame view

kernel/livepatch/core.c 30.4 KB
1ccea77e2   Thomas Gleixner   treewide: Replace...
1
  // SPDX-License-Identifier: GPL-2.0-or-later
b700e7f03   Seth Jennings   livepatch: kernel...
2
3
4
5
6
  /*
   * core.c - Kernel Live Patching Core
   *
   * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
   * Copyright (C) 2014 SUSE
b700e7f03   Seth Jennings   livepatch: kernel...
7
8
9
10
11
12
13
14
   */
  
  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  
  #include <linux/module.h>
  #include <linux/kernel.h>
  #include <linux/mutex.h>
  #include <linux/slab.h>
b700e7f03   Seth Jennings   livepatch: kernel...
15
16
17
  #include <linux/list.h>
  #include <linux/kallsyms.h>
  #include <linux/livepatch.h>
425595a7f   Jessica Yu   livepatch: reuse ...
18
19
  #include <linux/elf.h>
  #include <linux/moduleloader.h>
3ec24776b   Josh Poimboeuf   livepatch: allow ...
20
  #include <linux/completion.h>
9f255b632   Josh Poimboeuf   module: Fix livep...
21
  #include <linux/memory.h>
b56b36ee6   Josh Poimboeuf   livepatch: Cleanu...
22
  #include <asm/cacheflush.h>
10517429b   Jiri Kosina   livepatch: make k...
23
  #include "core.h"
c349cdcab   Josh Poimboeuf   livepatch: move p...
24
  #include "patch.h"
92c9abf5e   Petr Mladek   livepatch: Allow ...
25
  #include "state.h"
d83a7cb37   Josh Poimboeuf   livepatch: change...
26
  #include "transition.h"
b700e7f03   Seth Jennings   livepatch: kernel...
27

3c33f5b99   Josh Poimboeuf   livepatch: suppor...
28
  /*
d83a7cb37   Josh Poimboeuf   livepatch: change...
29
30
31
32
33
34
   * klp_mutex is a coarse lock which serializes access to klp data.  All
   * accesses to klp-related variables and structures must have mutex protection,
   * except within the following functions which carefully avoid the need for it:
   *
   * - klp_ftrace_handler()
   * - klp_update_patch_state()
3c33f5b99   Josh Poimboeuf   livepatch: suppor...
35
   */
d83a7cb37   Josh Poimboeuf   livepatch: change...
36
  DEFINE_MUTEX(klp_mutex);
3c33f5b99   Josh Poimboeuf   livepatch: suppor...
37

958ef1e39   Petr Mladek   livepatch: Simpli...
38
39
40
41
42
  /*
   * Actively used patches: enabled or in transition. Note that replaced
   * or disabled patches are not listed even though the related kernel
   * module still can be loaded.
   */
68007289b   Petr Mladek   livepatch: Don't ...
43
  LIST_HEAD(klp_patches);
b700e7f03   Seth Jennings   livepatch: kernel...
44
45
46
47
48
49
50
  
  static struct kobject *klp_root_kobj;
  
  static bool klp_is_module(struct klp_object *obj)
  {
  	return obj->name;
  }
b700e7f03   Seth Jennings   livepatch: kernel...
51
52
53
  /* sets obj->mod if object is not vmlinux and module is found */
  static void klp_find_object_module(struct klp_object *obj)
  {
8cb2c2dc4   Petr Mladek   livepatch: Fix su...
54
  	struct module *mod;
b700e7f03   Seth Jennings   livepatch: kernel...
55
56
57
58
59
  	if (!klp_is_module(obj))
  		return;
  
  	mutex_lock(&module_mutex);
  	/*
8cb2c2dc4   Petr Mladek   livepatch: Fix su...
60
61
  	 * We do not want to block removal of patched modules and therefore
  	 * we do not take a reference here. The patches are removed by
7e545d6ec   Jessica Yu   livepatch/module:...
62
  	 * klp_module_going() instead.
8cb2c2dc4   Petr Mladek   livepatch: Fix su...
63
64
65
  	 */
  	mod = find_module(obj->name);
  	/*
7e545d6ec   Jessica Yu   livepatch/module:...
66
67
  	 * Do not mess work of klp_module_coming() and klp_module_going().
  	 * Note that the patch might still be needed before klp_module_going()
8cb2c2dc4   Petr Mladek   livepatch: Fix su...
68
69
70
  	 * is called. Module functions can be called even in the GOING state
  	 * until mod->exit() finishes. This is especially important for
  	 * patches that modify semantic of the functions.
b700e7f03   Seth Jennings   livepatch: kernel...
71
  	 */
8cb2c2dc4   Petr Mladek   livepatch: Fix su...
72
73
  	if (mod && mod->klp_alive)
  		obj->mod = mod;
b700e7f03   Seth Jennings   livepatch: kernel...
74
75
  	mutex_unlock(&module_mutex);
  }
b700e7f03   Seth Jennings   livepatch: kernel...
76
77
  static bool klp_initialized(void)
  {
e76ff06a9   Nicholas Mc Guire   livepatch: match ...
78
  	return !!klp_root_kobj;
b700e7f03   Seth Jennings   livepatch: kernel...
79
  }
e1452b607   Jason Baron   livepatch: Add at...
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
  static struct klp_func *klp_find_func(struct klp_object *obj,
  				      struct klp_func *old_func)
  {
  	struct klp_func *func;
  
  	klp_for_each_func(obj, func) {
  		if ((strcmp(old_func->old_name, func->old_name) == 0) &&
  		    (old_func->old_sympos == func->old_sympos)) {
  			return func;
  		}
  	}
  
  	return NULL;
  }
  
  static struct klp_object *klp_find_object(struct klp_patch *patch,
  					  struct klp_object *old_obj)
  {
  	struct klp_object *obj;
  
  	klp_for_each_object(patch, obj) {
  		if (klp_is_module(old_obj)) {
  			if (klp_is_module(obj) &&
  			    strcmp(old_obj->name, obj->name) == 0) {
  				return obj;
  			}
  		} else if (!klp_is_module(obj)) {
  			return obj;
  		}
  	}
  
  	return NULL;
  }
b700e7f03   Seth Jennings   livepatch: kernel...
113
114
115
116
  struct klp_find_arg {
  	const char *objname;
  	const char *name;
  	unsigned long addr;
b700e7f03   Seth Jennings   livepatch: kernel...
117
  	unsigned long count;
b2b018ef4   Chris J Arges   livepatch: add ol...
118
  	unsigned long pos;
b700e7f03   Seth Jennings   livepatch: kernel...
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
  };
  
  static int klp_find_callback(void *data, const char *name,
  			     struct module *mod, unsigned long addr)
  {
  	struct klp_find_arg *args = data;
  
  	if ((mod && !args->objname) || (!mod && args->objname))
  		return 0;
  
  	if (strcmp(args->name, name))
  		return 0;
  
  	if (args->objname && strcmp(args->objname, mod->name))
  		return 0;
b700e7f03   Seth Jennings   livepatch: kernel...
134
135
  	args->addr = addr;
  	args->count++;
b2b018ef4   Chris J Arges   livepatch: add ol...
136
137
138
139
140
141
142
  	/*
  	 * Finish the search when the symbol is found for the desired position
  	 * or the position is not defined for a non-unique symbol.
  	 */
  	if ((args->pos && (args->count == args->pos)) ||
  	    (!args->pos && (args->count > 1)))
  		return 1;
b700e7f03   Seth Jennings   livepatch: kernel...
143
144
145
146
  	return 0;
  }
  
  static int klp_find_object_symbol(const char *objname, const char *name,
b2b018ef4   Chris J Arges   livepatch: add ol...
147
  				  unsigned long sympos, unsigned long *addr)
b700e7f03   Seth Jennings   livepatch: kernel...
148
149
150
151
152
  {
  	struct klp_find_arg args = {
  		.objname = objname,
  		.name = name,
  		.addr = 0,
b2b018ef4   Chris J Arges   livepatch: add ol...
153
154
  		.count = 0,
  		.pos = sympos,
b700e7f03   Seth Jennings   livepatch: kernel...
155
  	};
9a1bd63cd   Miroslav Benes   livepatch: add mo...
156
  	mutex_lock(&module_mutex);
72f04b50d   Zhou Chengming   livepatch: Reduce...
157
158
159
160
  	if (objname)
  		module_kallsyms_on_each_symbol(klp_find_callback, &args);
  	else
  		kallsyms_on_each_symbol(klp_find_callback, &args);
9a1bd63cd   Miroslav Benes   livepatch: add mo...
161
  	mutex_unlock(&module_mutex);
b700e7f03   Seth Jennings   livepatch: kernel...
162

b2b018ef4   Chris J Arges   livepatch: add ol...
163
164
165
166
167
  	/*
  	 * Ensure an address was found. If sympos is 0, ensure symbol is unique;
  	 * otherwise ensure the symbol position count matches sympos.
  	 */
  	if (args.addr == 0)
b700e7f03   Seth Jennings   livepatch: kernel...
168
169
  		pr_err("symbol '%s' not found in symbol table
  ", name);
b2b018ef4   Chris J Arges   livepatch: add ol...
170
  	else if (args.count > 1 && sympos == 0) {
f995b5f72   Petr Mladek   livepatch: Fix th...
171
172
173
  		pr_err("unresolvable ambiguity for symbol '%s' in object '%s'
  ",
  		       name, objname);
b2b018ef4   Chris J Arges   livepatch: add ol...
174
175
176
177
178
  	} else if (sympos != args.count && sympos > 0) {
  		pr_err("symbol position %lu for symbol '%s' in object '%s' not found
  ",
  		       sympos, name, objname ? objname : "vmlinux");
  	} else {
b700e7f03   Seth Jennings   livepatch: kernel...
179
180
181
182
183
184
185
  		*addr = args.addr;
  		return 0;
  	}
  
  	*addr = 0;
  	return -EINVAL;
  }
7c8e2bdd5   Josh Poimboeuf   livepatch: Apply ...
186
  static int klp_resolve_symbols(Elf64_Shdr *sechdrs, const char *strtab,
ca376a937   Josh Poimboeuf   livepatch: Preven...
187
188
  			       unsigned int symndx, Elf_Shdr *relasec,
  			       const char *sec_objname)
b700e7f03   Seth Jennings   livepatch: kernel...
189
  {
ca376a937   Josh Poimboeuf   livepatch: Preven...
190
191
192
  	int i, cnt, ret;
  	char sym_objname[MODULE_NAME_LEN];
  	char sym_name[KSYM_NAME_LEN];
425595a7f   Jessica Yu   livepatch: reuse ...
193
194
195
  	Elf_Rela *relas;
  	Elf_Sym *sym;
  	unsigned long sympos, addr;
ca376a937   Josh Poimboeuf   livepatch: Preven...
196
197
  	bool sym_vmlinux;
  	bool sec_vmlinux = !strcmp(sec_objname, "vmlinux");
b700e7f03   Seth Jennings   livepatch: kernel...
198

b2b018ef4   Chris J Arges   livepatch: add ol...
199
  	/*
ca376a937   Josh Poimboeuf   livepatch: Preven...
200
  	 * Since the field widths for sym_objname and sym_name in the sscanf()
425595a7f   Jessica Yu   livepatch: reuse ...
201
202
203
204
205
206
207
  	 * call are hard-coded and correspond to MODULE_NAME_LEN and
  	 * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN
  	 * and KSYM_NAME_LEN have the values we expect them to have.
  	 *
  	 * Because the value of MODULE_NAME_LEN can differ among architectures,
  	 * we use the smallest/strictest upper bound possible (56, based on
  	 * the current definition of MODULE_NAME_LEN) to prevent overflows.
b2b018ef4   Chris J Arges   livepatch: add ol...
208
  	 */
425595a7f   Jessica Yu   livepatch: reuse ...
209
210
211
212
213
  	BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128);
  
  	relas = (Elf_Rela *) relasec->sh_addr;
  	/* For each rela in this klp relocation section */
  	for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
7c8e2bdd5   Josh Poimboeuf   livepatch: Apply ...
214
  		sym = (Elf64_Sym *)sechdrs[symndx].sh_addr + ELF_R_SYM(relas[i].r_info);
425595a7f   Jessica Yu   livepatch: reuse ...
215
  		if (sym->st_shndx != SHN_LIVEPATCH) {
77f8f39a2   Josh Poimboeuf   livepatch: add mi...
216
217
  			pr_err("symbol %s is not marked as a livepatch symbol
  ",
425595a7f   Jessica Yu   livepatch: reuse ...
218
219
220
  			       strtab + sym->st_name);
  			return -EINVAL;
  		}
ca376a937   Josh Poimboeuf   livepatch: Preven...
221
  		/* Format: .klp.sym.sym_objname.sym_name,sympos */
425595a7f   Jessica Yu   livepatch: reuse ...
222
223
  		cnt = sscanf(strtab + sym->st_name,
  			     ".klp.sym.%55[^.].%127[^,],%lu",
ca376a937   Josh Poimboeuf   livepatch: Preven...
224
  			     sym_objname, sym_name, &sympos);
425595a7f   Jessica Yu   livepatch: reuse ...
225
  		if (cnt != 3) {
77f8f39a2   Josh Poimboeuf   livepatch: add mi...
226
227
  			pr_err("symbol %s has an incorrectly formatted name
  ",
425595a7f   Jessica Yu   livepatch: reuse ...
228
229
230
  			       strtab + sym->st_name);
  			return -EINVAL;
  		}
ca376a937   Josh Poimboeuf   livepatch: Preven...
231
232
233
234
235
236
237
238
239
240
241
242
243
  		sym_vmlinux = !strcmp(sym_objname, "vmlinux");
  
  		/*
  		 * Prevent module-specific KLP rela sections from referencing
  		 * vmlinux symbols.  This helps prevent ordering issues with
  		 * module special section initializations.  Presumably such
  		 * symbols are exported and normal relas can be used instead.
  		 */
  		if (!sec_vmlinux && sym_vmlinux) {
  			pr_err("invalid access to vmlinux symbol '%s' from module-specific livepatch relocation section",
  			       sym_name);
  			return -EINVAL;
  		}
425595a7f   Jessica Yu   livepatch: reuse ...
244
  		/* klp_find_object_symbol() treats a NULL objname as vmlinux */
ca376a937   Josh Poimboeuf   livepatch: Preven...
245
246
  		ret = klp_find_object_symbol(sym_vmlinux ? NULL : sym_objname,
  					     sym_name, sympos, &addr);
425595a7f   Jessica Yu   livepatch: reuse ...
247
248
249
250
251
252
253
  		if (ret)
  			return ret;
  
  		sym->st_value = addr;
  	}
  
  	return 0;
b700e7f03   Seth Jennings   livepatch: kernel...
254
  }
7c8e2bdd5   Josh Poimboeuf   livepatch: Apply ...
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
  /*
   * At a high-level, there are two types of klp relocation sections: those which
   * reference symbols which live in vmlinux; and those which reference symbols
   * which live in other modules.  This function is called for both types:
   *
   * 1) When a klp module itself loads, the module code calls this function to
   *    write vmlinux-specific klp relocations (.klp.rela.vmlinux.* sections).
   *    These relocations are written to the klp module text to allow the patched
   *    code/data to reference unexported vmlinux symbols.  They're written as
   *    early as possible to ensure that other module init code (.e.g.,
   *    jump_label_apply_nops) can access any unexported vmlinux symbols which
   *    might be referenced by the klp module's special sections.
   *
   * 2) When a to-be-patched module loads -- or is already loaded when a
   *    corresponding klp module loads -- klp code calls this function to write
   *    module-specific klp relocations (.klp.rela.{module}.* sections).  These
   *    are written to the klp module text to allow the patched code/data to
   *    reference symbols which live in the to-be-patched module or one of its
   *    module dependencies.  Exported symbols are supported, in addition to
   *    unexported symbols, in order to enable late module patching, which allows
   *    the to-be-patched module to be loaded and patched sometime *after* the
   *    klp module is loaded.
   */
  int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
  			     const char *shstrtab, const char *strtab,
  			     unsigned int symndx, unsigned int secndx,
  			     const char *objname)
b700e7f03   Seth Jennings   livepatch: kernel...
282
  {
7c8e2bdd5   Josh Poimboeuf   livepatch: Apply ...
283
  	int cnt, ret;
425595a7f   Jessica Yu   livepatch: reuse ...
284
  	char sec_objname[MODULE_NAME_LEN];
7c8e2bdd5   Josh Poimboeuf   livepatch: Apply ...
285
  	Elf_Shdr *sec = sechdrs + secndx;
b700e7f03   Seth Jennings   livepatch: kernel...
286

7c8e2bdd5   Josh Poimboeuf   livepatch: Apply ...
287
288
289
290
291
292
293
294
295
296
297
  	/*
  	 * Format: .klp.rela.sec_objname.section_name
  	 * See comment in klp_resolve_symbols() for an explanation
  	 * of the selected field width value.
  	 */
  	cnt = sscanf(shstrtab + sec->sh_name, ".klp.rela.%55[^.]",
  		     sec_objname);
  	if (cnt != 1) {
  		pr_err("section %s has an incorrectly formatted name
  ",
  		       shstrtab + sec->sh_name);
b700e7f03   Seth Jennings   livepatch: kernel...
298
  		return -EINVAL;
7c8e2bdd5   Josh Poimboeuf   livepatch: Apply ...
299
  	}
b700e7f03   Seth Jennings   livepatch: kernel...
300

7c8e2bdd5   Josh Poimboeuf   livepatch: Apply ...
301
302
  	if (strcmp(objname ? objname : "vmlinux", sec_objname))
  		return 0;
064c89df6   Chris J Arges   livepatch: add sy...
303

ca376a937   Josh Poimboeuf   livepatch: Preven...
304
  	ret = klp_resolve_symbols(sechdrs, strtab, symndx, sec, sec_objname);
7c8e2bdd5   Josh Poimboeuf   livepatch: Apply ...
305
306
  	if (ret)
  		return ret;
b700e7f03   Seth Jennings   livepatch: kernel...
307

7c8e2bdd5   Josh Poimboeuf   livepatch: Apply ...
308
  	return apply_relocate_add(sechdrs, strtab, symndx, secndx, pmod);
b700e7f03   Seth Jennings   livepatch: kernel...
309
  }
b700e7f03   Seth Jennings   livepatch: kernel...
310
311
312
313
314
315
  /*
   * Sysfs Interface
   *
   * /sys/kernel/livepatch
   * /sys/kernel/livepatch/<patch>
   * /sys/kernel/livepatch/<patch>/enabled
d83a7cb37   Josh Poimboeuf   livepatch: change...
316
   * /sys/kernel/livepatch/<patch>/transition
c99a2be79   Miroslav Benes   livepatch: force ...
317
   * /sys/kernel/livepatch/<patch>/force
b700e7f03   Seth Jennings   livepatch: kernel...
318
   * /sys/kernel/livepatch/<patch>/<object>
444f9e99a   Chris J Arges   livepatch: functi...
319
   * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
b700e7f03   Seth Jennings   livepatch: kernel...
320
   */
26c3e98e2   Petr Mladek   livepatch: Shuffl...
321
  static int __klp_disable_patch(struct klp_patch *patch);
b700e7f03   Seth Jennings   livepatch: kernel...
322
323
324
325
326
327
  
  static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
  			     const char *buf, size_t count)
  {
  	struct klp_patch *patch;
  	int ret;
68ae4b2b6   Josh Poimboeuf   livepatch: use ks...
328
  	bool enabled;
b700e7f03   Seth Jennings   livepatch: kernel...
329

68ae4b2b6   Josh Poimboeuf   livepatch: use ks...
330
  	ret = kstrtobool(buf, &enabled);
b700e7f03   Seth Jennings   livepatch: kernel...
331
  	if (ret)
68ae4b2b6   Josh Poimboeuf   livepatch: use ks...
332
  		return ret;
b700e7f03   Seth Jennings   livepatch: kernel...
333
334
335
336
  
  	patch = container_of(kobj, struct klp_patch, kobj);
  
  	mutex_lock(&klp_mutex);
68ae4b2b6   Josh Poimboeuf   livepatch: use ks...
337
  	if (patch->enabled == enabled) {
b700e7f03   Seth Jennings   livepatch: kernel...
338
339
  		/* already in requested state */
  		ret = -EINVAL;
958ef1e39   Petr Mladek   livepatch: Simpli...
340
  		goto out;
b700e7f03   Seth Jennings   livepatch: kernel...
341
  	}
958ef1e39   Petr Mladek   livepatch: Simpli...
342
343
344
345
346
347
348
349
  	/*
  	 * Allow to reverse a pending transition in both ways. It might be
  	 * necessary to complete the transition without forcing and breaking
  	 * the system integrity.
  	 *
  	 * Do not allow to re-enable a disabled patch.
  	 */
  	if (patch == klp_transition_patch)
d83a7cb37   Josh Poimboeuf   livepatch: change...
350
  		klp_reverse_transition();
958ef1e39   Petr Mladek   livepatch: Simpli...
351
  	else if (!enabled)
b700e7f03   Seth Jennings   livepatch: kernel...
352
  		ret = __klp_disable_patch(patch);
958ef1e39   Petr Mladek   livepatch: Simpli...
353
354
  	else
  		ret = -EINVAL;
b700e7f03   Seth Jennings   livepatch: kernel...
355

958ef1e39   Petr Mladek   livepatch: Simpli...
356
  out:
b700e7f03   Seth Jennings   livepatch: kernel...
357
  	mutex_unlock(&klp_mutex);
958ef1e39   Petr Mladek   livepatch: Simpli...
358
359
  	if (ret)
  		return ret;
b700e7f03   Seth Jennings   livepatch: kernel...
360
  	return count;
b700e7f03   Seth Jennings   livepatch: kernel...
361
362
363
364
365
366
367
368
  }
  
  static ssize_t enabled_show(struct kobject *kobj,
  			    struct kobj_attribute *attr, char *buf)
  {
  	struct klp_patch *patch;
  
  	patch = container_of(kobj, struct klp_patch, kobj);
0dade9f37   Josh Poimboeuf   livepatch: separa...
369
370
  	return snprintf(buf, PAGE_SIZE-1, "%d
  ", patch->enabled);
b700e7f03   Seth Jennings   livepatch: kernel...
371
  }
d83a7cb37   Josh Poimboeuf   livepatch: change...
372
373
374
375
376
377
378
379
380
  static ssize_t transition_show(struct kobject *kobj,
  			       struct kobj_attribute *attr, char *buf)
  {
  	struct klp_patch *patch;
  
  	patch = container_of(kobj, struct klp_patch, kobj);
  	return snprintf(buf, PAGE_SIZE-1, "%d
  ",
  			patch == klp_transition_patch);
b700e7f03   Seth Jennings   livepatch: kernel...
381
  }
c99a2be79   Miroslav Benes   livepatch: force ...
382
383
384
385
386
387
  static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
  			   const char *buf, size_t count)
  {
  	struct klp_patch *patch;
  	int ret;
  	bool val;
c99a2be79   Miroslav Benes   livepatch: force ...
388
389
390
  	ret = kstrtobool(buf, &val);
  	if (ret)
  		return ret;
8869016d3   Miroslav Benes   livepatch: add lo...
391
392
393
394
395
396
397
398
399
400
401
402
403
404
  	if (!val)
  		return count;
  
  	mutex_lock(&klp_mutex);
  
  	patch = container_of(kobj, struct klp_patch, kobj);
  	if (patch != klp_transition_patch) {
  		mutex_unlock(&klp_mutex);
  		return -EINVAL;
  	}
  
  	klp_force_transition();
  
  	mutex_unlock(&klp_mutex);
c99a2be79   Miroslav Benes   livepatch: force ...
405
406
407
  
  	return count;
  }
b700e7f03   Seth Jennings   livepatch: kernel...
408
  static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
d83a7cb37   Josh Poimboeuf   livepatch: change...
409
  static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
c99a2be79   Miroslav Benes   livepatch: force ...
410
  static struct kobj_attribute force_kobj_attr = __ATTR_WO(force);
b700e7f03   Seth Jennings   livepatch: kernel...
411
412
  static struct attribute *klp_patch_attrs[] = {
  	&enabled_kobj_attr.attr,
d83a7cb37   Josh Poimboeuf   livepatch: change...
413
  	&transition_kobj_attr.attr,
c99a2be79   Miroslav Benes   livepatch: force ...
414
  	&force_kobj_attr.attr,
b700e7f03   Seth Jennings   livepatch: kernel...
415
416
  	NULL
  };
70283454c   Kimberly Brown   livepatch: Replac...
417
  ATTRIBUTE_GROUPS(klp_patch);
b700e7f03   Seth Jennings   livepatch: kernel...
418

e1452b607   Jason Baron   livepatch: Add at...
419
420
421
422
423
  static void klp_free_object_dynamic(struct klp_object *obj)
  {
  	kfree(obj->name);
  	kfree(obj);
  }
f68d67cf2   Petr Mladek   livepatch: Remove...
424
425
426
427
  static void klp_init_func_early(struct klp_object *obj,
  				struct klp_func *func);
  static void klp_init_object_early(struct klp_patch *patch,
  				  struct klp_object *obj);
4d141ab34   Petr Mladek   livepatch: Remove...
428

f68d67cf2   Petr Mladek   livepatch: Remove...
429
430
  static struct klp_object *klp_alloc_object_dynamic(const char *name,
  						   struct klp_patch *patch)
e1452b607   Jason Baron   livepatch: Add at...
431
432
433
434
435
436
437
438
439
440
441
442
443
444
  {
  	struct klp_object *obj;
  
  	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  	if (!obj)
  		return NULL;
  
  	if (name) {
  		obj->name = kstrdup(name, GFP_KERNEL);
  		if (!obj->name) {
  			kfree(obj);
  			return NULL;
  		}
  	}
f68d67cf2   Petr Mladek   livepatch: Remove...
445
  	klp_init_object_early(patch, obj);
e1452b607   Jason Baron   livepatch: Add at...
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
  	obj->dynamic = true;
  
  	return obj;
  }
  
  static void klp_free_func_nop(struct klp_func *func)
  {
  	kfree(func->old_name);
  	kfree(func);
  }
  
  static struct klp_func *klp_alloc_func_nop(struct klp_func *old_func,
  					   struct klp_object *obj)
  {
  	struct klp_func *func;
  
  	func = kzalloc(sizeof(*func), GFP_KERNEL);
  	if (!func)
  		return NULL;
  
  	if (old_func->old_name) {
  		func->old_name = kstrdup(old_func->old_name, GFP_KERNEL);
  		if (!func->old_name) {
  			kfree(func);
  			return NULL;
  		}
  	}
f68d67cf2   Petr Mladek   livepatch: Remove...
473
  	klp_init_func_early(obj, func);
e1452b607   Jason Baron   livepatch: Add at...
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
  	/*
  	 * func->new_func is same as func->old_func. These addresses are
  	 * set when the object is loaded, see klp_init_object_loaded().
  	 */
  	func->old_sympos = old_func->old_sympos;
  	func->nop = true;
  
  	return func;
  }
  
  static int klp_add_object_nops(struct klp_patch *patch,
  			       struct klp_object *old_obj)
  {
  	struct klp_object *obj;
  	struct klp_func *func, *old_func;
  
  	obj = klp_find_object(patch, old_obj);
  
  	if (!obj) {
f68d67cf2   Petr Mladek   livepatch: Remove...
493
  		obj = klp_alloc_object_dynamic(old_obj->name, patch);
e1452b607   Jason Baron   livepatch: Add at...
494
495
  		if (!obj)
  			return -ENOMEM;
e1452b607   Jason Baron   livepatch: Add at...
496
497
498
499
500
501
502
503
504
505
  	}
  
  	klp_for_each_func(old_obj, old_func) {
  		func = klp_find_func(obj, old_func);
  		if (func)
  			continue;
  
  		func = klp_alloc_func_nop(old_func, obj);
  		if (!func)
  			return -ENOMEM;
e1452b607   Jason Baron   livepatch: Add at...
506
507
508
509
510
511
512
513
514
515
516
517
518
519
  	}
  
  	return 0;
  }
  
  /*
   * Add 'nop' functions which simply return to the caller to run
   * the original function. The 'nop' functions are added to a
   * patch to facilitate a 'replace' mode.
   */
  static int klp_add_nops(struct klp_patch *patch)
  {
  	struct klp_patch *old_patch;
  	struct klp_object *old_obj;
ecba29f43   Petr Mladek   livepatch: Introd...
520
  	klp_for_each_patch(old_patch) {
e1452b607   Jason Baron   livepatch: Add at...
521
522
523
524
525
526
527
528
529
530
531
  		klp_for_each_object(old_patch, old_obj) {
  			int err;
  
  			err = klp_add_object_nops(patch, old_obj);
  			if (err)
  				return err;
  		}
  	}
  
  	return 0;
  }
b700e7f03   Seth Jennings   livepatch: kernel...
532
533
  static void klp_kobj_release_patch(struct kobject *kobj)
  {
3ec24776b   Josh Poimboeuf   livepatch: allow ...
534
535
536
537
  	struct klp_patch *patch;
  
  	patch = container_of(kobj, struct klp_patch, kobj);
  	complete(&patch->finish);
b700e7f03   Seth Jennings   livepatch: kernel...
538
539
540
541
542
  }
  
  static struct kobj_type klp_ktype_patch = {
  	.release = klp_kobj_release_patch,
  	.sysfs_ops = &kobj_sysfs_ops,
70283454c   Kimberly Brown   livepatch: Replac...
543
  	.default_groups = klp_patch_groups,
b700e7f03   Seth Jennings   livepatch: kernel...
544
  };
cad706df7   Miroslav Benes   livepatch: make k...
545
546
  static void klp_kobj_release_object(struct kobject *kobj)
  {
e1452b607   Jason Baron   livepatch: Add at...
547
548
549
550
551
552
  	struct klp_object *obj;
  
  	obj = container_of(kobj, struct klp_object, kobj);
  
  	if (obj->dynamic)
  		klp_free_object_dynamic(obj);
cad706df7   Miroslav Benes   livepatch: make k...
553
554
555
556
557
558
  }
  
  static struct kobj_type klp_ktype_object = {
  	.release = klp_kobj_release_object,
  	.sysfs_ops = &kobj_sysfs_ops,
  };
b700e7f03   Seth Jennings   livepatch: kernel...
559
560
  static void klp_kobj_release_func(struct kobject *kobj)
  {
e1452b607   Jason Baron   livepatch: Add at...
561
562
563
564
565
566
  	struct klp_func *func;
  
  	func = container_of(kobj, struct klp_func, kobj);
  
  	if (func->nop)
  		klp_free_func_nop(func);
b700e7f03   Seth Jennings   livepatch: kernel...
567
568
569
570
571
572
  }
  
  static struct kobj_type klp_ktype_func = {
  	.release = klp_kobj_release_func,
  	.sysfs_ops = &kobj_sysfs_ops,
  };
d697bad58   Petr Mladek   livepatch: Remove...
573
  static void __klp_free_funcs(struct klp_object *obj, bool nops_only)
b700e7f03   Seth Jennings   livepatch: kernel...
574
  {
e1452b607   Jason Baron   livepatch: Add at...
575
  	struct klp_func *func, *tmp_func;
b700e7f03   Seth Jennings   livepatch: kernel...
576

e1452b607   Jason Baron   livepatch: Add at...
577
  	klp_for_each_func_safe(obj, func, tmp_func) {
d697bad58   Petr Mladek   livepatch: Remove...
578
579
580
581
  		if (nops_only && !func->nop)
  			continue;
  
  		list_del(&func->node);
4d141ab34   Petr Mladek   livepatch: Remove...
582
  		kobject_put(&func->kobj);
0430f78bf   Petr Mladek   livepatch: Consol...
583
  	}
b700e7f03   Seth Jennings   livepatch: kernel...
584
585
586
587
588
589
590
591
  }
  
  /* Clean up when a patched object is unloaded */
  static void klp_free_object_loaded(struct klp_object *obj)
  {
  	struct klp_func *func;
  
  	obj->mod = NULL;
e1452b607   Jason Baron   livepatch: Add at...
592
  	klp_for_each_func(obj, func) {
19514910d   Petr Mladek   livepatch: Change...
593
  		func->old_func = NULL;
e1452b607   Jason Baron   livepatch: Add at...
594
595
596
597
  
  		if (func->nop)
  			func->new_func = NULL;
  	}
b700e7f03   Seth Jennings   livepatch: kernel...
598
  }
d697bad58   Petr Mladek   livepatch: Remove...
599
  static void __klp_free_objects(struct klp_patch *patch, bool nops_only)
b700e7f03   Seth Jennings   livepatch: kernel...
600
  {
e1452b607   Jason Baron   livepatch: Add at...
601
  	struct klp_object *obj, *tmp_obj;
b700e7f03   Seth Jennings   livepatch: kernel...
602

e1452b607   Jason Baron   livepatch: Add at...
603
  	klp_for_each_object_safe(patch, obj, tmp_obj) {
d697bad58   Petr Mladek   livepatch: Remove...
604
605
606
607
608
609
  		__klp_free_funcs(obj, nops_only);
  
  		if (nops_only && !obj->dynamic)
  			continue;
  
  		list_del(&obj->node);
4d141ab34   Petr Mladek   livepatch: Remove...
610
  		kobject_put(&obj->kobj);
b700e7f03   Seth Jennings   livepatch: kernel...
611
612
  	}
  }
d697bad58   Petr Mladek   livepatch: Remove...
613
614
615
616
617
618
619
620
621
  static void klp_free_objects(struct klp_patch *patch)
  {
  	__klp_free_objects(patch, false);
  }
  
  static void klp_free_objects_dynamic(struct klp_patch *patch)
  {
  	__klp_free_objects(patch, true);
  }
0430f78bf   Petr Mladek   livepatch: Consol...
622
623
624
625
626
627
628
  /*
   * This function implements the free operations that can be called safely
   * under klp_mutex.
   *
   * The operation must be completed by calling klp_free_patch_finish()
   * outside klp_mutex.
   */
7e35e4eb7   Petr Mladek   livepatch: Keep r...
629
  static void klp_free_patch_start(struct klp_patch *patch)
b700e7f03   Seth Jennings   livepatch: kernel...
630
  {
b700e7f03   Seth Jennings   livepatch: kernel...
631
632
  	if (!list_empty(&patch->list))
  		list_del(&patch->list);
0430f78bf   Petr Mladek   livepatch: Consol...
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
  
  	klp_free_objects(patch);
  }
  
  /*
   * This function implements the free part that must be called outside
   * klp_mutex.
   *
   * It must be called after klp_free_patch_start(). And it has to be
   * the last function accessing the livepatch structures when the patch
   * gets disabled.
   */
  static void klp_free_patch_finish(struct klp_patch *patch)
  {
  	/*
  	 * Avoid deadlock with enabled_store() sysfs callback by
  	 * calling this outside klp_mutex. It is safe because
  	 * this is called when the patch gets disabled and it
  	 * cannot get enabled again.
  	 */
4d141ab34   Petr Mladek   livepatch: Remove...
653
654
  	kobject_put(&patch->kobj);
  	wait_for_completion(&patch->finish);
958ef1e39   Petr Mladek   livepatch: Simpli...
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
  
  	/* Put the module after the last access to struct klp_patch. */
  	if (!patch->forced)
  		module_put(patch->mod);
  }
  
  /*
   * The livepatch might be freed from sysfs interface created by the patch.
   * This work allows to wait until the interface is destroyed in a separate
   * context.
   */
  static void klp_free_patch_work_fn(struct work_struct *work)
  {
  	struct klp_patch *patch =
  		container_of(work, struct klp_patch, free_work);
  
  	klp_free_patch_finish(patch);
b700e7f03   Seth Jennings   livepatch: kernel...
672
  }
7e35e4eb7   Petr Mladek   livepatch: Keep r...
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
  void klp_free_patch_async(struct klp_patch *patch)
  {
  	klp_free_patch_start(patch);
  	schedule_work(&patch->free_work);
  }
  
  void klp_free_replaced_patches_async(struct klp_patch *new_patch)
  {
  	struct klp_patch *old_patch, *tmp_patch;
  
  	klp_for_each_patch_safe(old_patch, tmp_patch) {
  		if (old_patch == new_patch)
  			return;
  		klp_free_patch_async(old_patch);
  	}
  }
b700e7f03   Seth Jennings   livepatch: kernel...
689
690
  static int klp_init_func(struct klp_object *obj, struct klp_func *func)
  {
e1452b607   Jason Baron   livepatch: Add at...
691
692
693
694
695
696
697
698
  	if (!func->old_name)
  		return -EINVAL;
  
  	/*
  	 * NOPs get the address later. The patched module must be loaded,
  	 * see klp_init_object_loaded().
  	 */
  	if (!func->new_func && !func->nop)
f09d90864   Miroslav Benes   livepatch: make o...
699
  		return -EINVAL;
6e9df95b7   Kamalesh Babulal   livepatch: Valida...
700
701
  	if (strlen(func->old_name) >= KSYM_NAME_LEN)
  		return -EINVAL;
3c33f5b99   Josh Poimboeuf   livepatch: suppor...
702
  	INIT_LIST_HEAD(&func->stack_node);
0dade9f37   Josh Poimboeuf   livepatch: separa...
703
  	func->patched = false;
d83a7cb37   Josh Poimboeuf   livepatch: change...
704
  	func->transition = false;
b700e7f03   Seth Jennings   livepatch: kernel...
705

444f9e99a   Chris J Arges   livepatch: functi...
706
707
708
709
710
  	/* The format for the sysfs directory is <function,sympos> where sympos
  	 * is the nth occurrence of this symbol in kallsyms for the patched
  	 * object. If the user selects 0 for old_sympos, then 1 will be used
  	 * since a unique symbol will be the first occurrence.
  	 */
4d141ab34   Petr Mladek   livepatch: Remove...
711
712
713
  	return kobject_add(&func->kobj, &obj->kobj, "%s,%lu",
  			   func->old_name,
  			   func->old_sympos ? func->old_sympos : 1);
b700e7f03   Seth Jennings   livepatch: kernel...
714
  }
a4ae16f65   Samuel Zou   livepatch: Make k...
715
716
  static int klp_apply_object_relocs(struct klp_patch *patch,
  				   struct klp_object *obj)
7c8e2bdd5   Josh Poimboeuf   livepatch: Apply ...
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
  {
  	int i, ret;
  	struct klp_modinfo *info = patch->mod->klp_info;
  
  	for (i = 1; i < info->hdr.e_shnum; i++) {
  		Elf_Shdr *sec = info->sechdrs + i;
  
  		if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
  			continue;
  
  		ret = klp_apply_section_relocs(patch->mod, info->sechdrs,
  					       info->secstrings,
  					       patch->mod->core_kallsyms.strtab,
  					       info->symndx, i, obj->name);
  		if (ret)
  			return ret;
  	}
  
  	return 0;
  }
b700e7f03   Seth Jennings   livepatch: kernel...
737
738
739
740
741
742
  /* parts of the initialization that is done only when the object is loaded */
  static int klp_init_object_loaded(struct klp_patch *patch,
  				  struct klp_object *obj)
  {
  	struct klp_func *func;
  	int ret;
7c8e2bdd5   Josh Poimboeuf   livepatch: Apply ...
743
744
745
746
747
748
749
750
  	if (klp_is_module(obj)) {
  		/*
  		 * Only write module-specific relocations here
  		 * (.klp.rela.{module}.*).  vmlinux-specific relocations were
  		 * written earlier during the initialization of the klp module
  		 * itself.
  		 */
  		ret = klp_apply_object_relocs(patch, obj);
1d05334d2   Peter Zijlstra   livepatch: Remove...
751
752
753
  		if (ret)
  			return ret;
  	}
9f255b632   Josh Poimboeuf   module: Fix livep...
754

8cdd043ab   Jiri Slaby   livepatch: introd...
755
  	klp_for_each_func(obj, func) {
b2b018ef4   Chris J Arges   livepatch: add ol...
756
757
  		ret = klp_find_object_symbol(obj->name, func->old_name,
  					     func->old_sympos,
19514910d   Petr Mladek   livepatch: Change...
758
  					     (unsigned long *)&func->old_func);
b700e7f03   Seth Jennings   livepatch: kernel...
759
760
  		if (ret)
  			return ret;
f5e547f4a   Josh Poimboeuf   livepatch: store ...
761

19514910d   Petr Mladek   livepatch: Change...
762
  		ret = kallsyms_lookup_size_offset((unsigned long)func->old_func,
f5e547f4a   Josh Poimboeuf   livepatch: store ...
763
764
765
766
767
768
769
  						  &func->old_size, NULL);
  		if (!ret) {
  			pr_err("kallsyms size lookup failed for '%s'
  ",
  			       func->old_name);
  			return -ENOENT;
  		}
e1452b607   Jason Baron   livepatch: Add at...
770
771
  		if (func->nop)
  			func->new_func = func->old_func;
f5e547f4a   Josh Poimboeuf   livepatch: store ...
772
773
774
775
776
777
778
779
  		ret = kallsyms_lookup_size_offset((unsigned long)func->new_func,
  						  &func->new_size, NULL);
  		if (!ret) {
  			pr_err("kallsyms size lookup failed for '%s' replacement
  ",
  			       func->old_name);
  			return -ENOENT;
  		}
b700e7f03   Seth Jennings   livepatch: kernel...
780
781
782
783
784
785
786
787
788
789
  	}
  
  	return 0;
  }
  
  static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
  {
  	struct klp_func *func;
  	int ret;
  	const char *name;
6e9df95b7   Kamalesh Babulal   livepatch: Valida...
790
791
  	if (klp_is_module(obj) && strlen(obj->name) >= MODULE_NAME_LEN)
  		return -EINVAL;
0dade9f37   Josh Poimboeuf   livepatch: separa...
792
  	obj->patched = false;
8cb2c2dc4   Petr Mladek   livepatch: Fix su...
793
  	obj->mod = NULL;
b700e7f03   Seth Jennings   livepatch: kernel...
794
795
796
797
  
  	klp_find_object_module(obj);
  
  	name = klp_is_module(obj) ? obj->name : "vmlinux";
4d141ab34   Petr Mladek   livepatch: Remove...
798
  	ret = kobject_add(&obj->kobj, &patch->kobj, "%s", name);
cad706df7   Miroslav Benes   livepatch: make k...
799
800
  	if (ret)
  		return ret;
b700e7f03   Seth Jennings   livepatch: kernel...
801

8cdd043ab   Jiri Slaby   livepatch: introd...
802
  	klp_for_each_func(obj, func) {
b700e7f03   Seth Jennings   livepatch: kernel...
803
804
  		ret = klp_init_func(obj, func);
  		if (ret)
0430f78bf   Petr Mladek   livepatch: Consol...
805
  			return ret;
b700e7f03   Seth Jennings   livepatch: kernel...
806
  	}
0430f78bf   Petr Mladek   livepatch: Consol...
807
  	if (klp_is_object_loaded(obj))
b700e7f03   Seth Jennings   livepatch: kernel...
808
  		ret = klp_init_object_loaded(patch, obj);
b700e7f03   Seth Jennings   livepatch: kernel...
809

b700e7f03   Seth Jennings   livepatch: kernel...
810
811
  	return ret;
  }
f68d67cf2   Petr Mladek   livepatch: Remove...
812
813
814
815
816
817
818
819
820
821
822
823
824
825
  static void klp_init_func_early(struct klp_object *obj,
  				struct klp_func *func)
  {
  	kobject_init(&func->kobj, &klp_ktype_func);
  	list_add_tail(&func->node, &obj->func_list);
  }
  
  static void klp_init_object_early(struct klp_patch *patch,
  				  struct klp_object *obj)
  {
  	INIT_LIST_HEAD(&obj->func_list);
  	kobject_init(&obj->kobj, &klp_ktype_object);
  	list_add_tail(&obj->node, &patch->obj_list);
  }
0430f78bf   Petr Mladek   livepatch: Consol...
826
  static int klp_init_patch_early(struct klp_patch *patch)
b700e7f03   Seth Jennings   livepatch: kernel...
827
828
  {
  	struct klp_object *obj;
0430f78bf   Petr Mladek   livepatch: Consol...
829
  	struct klp_func *func;
b700e7f03   Seth Jennings   livepatch: kernel...
830
831
832
  
  	if (!patch->objs)
  		return -EINVAL;
0430f78bf   Petr Mladek   livepatch: Consol...
833
  	INIT_LIST_HEAD(&patch->list);
20e550259   Jason Baron   livepatch: Use li...
834
  	INIT_LIST_HEAD(&patch->obj_list);
4d141ab34   Petr Mladek   livepatch: Remove...
835
  	kobject_init(&patch->kobj, &klp_ktype_patch);
0dade9f37   Josh Poimboeuf   livepatch: separa...
836
  	patch->enabled = false;
68007289b   Petr Mladek   livepatch: Don't ...
837
  	patch->forced = false;
958ef1e39   Petr Mladek   livepatch: Simpli...
838
  	INIT_WORK(&patch->free_work, klp_free_patch_work_fn);
3ec24776b   Josh Poimboeuf   livepatch: allow ...
839
  	init_completion(&patch->finish);
b700e7f03   Seth Jennings   livepatch: kernel...
840

20e550259   Jason Baron   livepatch: Use li...
841
  	klp_for_each_object_static(patch, obj) {
0430f78bf   Petr Mladek   livepatch: Consol...
842
843
  		if (!obj->funcs)
  			return -EINVAL;
f68d67cf2   Petr Mladek   livepatch: Remove...
844
  		klp_init_object_early(patch, obj);
0430f78bf   Petr Mladek   livepatch: Consol...
845

20e550259   Jason Baron   livepatch: Use li...
846
  		klp_for_each_func_static(obj, func) {
f68d67cf2   Petr Mladek   livepatch: Remove...
847
  			klp_init_func_early(obj, func);
20e550259   Jason Baron   livepatch: Use li...
848
  		}
0430f78bf   Petr Mladek   livepatch: Consol...
849
  	}
958ef1e39   Petr Mladek   livepatch: Simpli...
850
851
  	if (!try_module_get(patch->mod))
  		return -ENODEV;
0430f78bf   Petr Mladek   livepatch: Consol...
852
853
854
855
856
857
858
  	return 0;
  }
  
  static int klp_init_patch(struct klp_patch *patch)
  {
  	struct klp_object *obj;
  	int ret;
4d141ab34   Petr Mladek   livepatch: Remove...
859
  	ret = kobject_add(&patch->kobj, klp_root_kobj, "%s", patch->mod->name);
958ef1e39   Petr Mladek   livepatch: Simpli...
860
  	if (ret)
3ec24776b   Josh Poimboeuf   livepatch: allow ...
861
  		return ret;
b700e7f03   Seth Jennings   livepatch: kernel...
862

e1452b607   Jason Baron   livepatch: Add at...
863
864
865
866
867
  	if (patch->replace) {
  		ret = klp_add_nops(patch);
  		if (ret)
  			return ret;
  	}
8cdd043ab   Jiri Slaby   livepatch: introd...
868
  	klp_for_each_object(patch, obj) {
b700e7f03   Seth Jennings   livepatch: kernel...
869
870
  		ret = klp_init_object(patch, obj);
  		if (ret)
958ef1e39   Petr Mladek   livepatch: Simpli...
871
  			return ret;
b700e7f03   Seth Jennings   livepatch: kernel...
872
  	}
99590ba56   Josh Poimboeuf   livepatch: fix de...
873
  	list_add_tail(&patch->list, &klp_patches);
b700e7f03   Seth Jennings   livepatch: kernel...
874

3ec24776b   Josh Poimboeuf   livepatch: allow ...
875
  	return 0;
b700e7f03   Seth Jennings   livepatch: kernel...
876
  }
b700e7f03   Seth Jennings   livepatch: kernel...
877

26c3e98e2   Petr Mladek   livepatch: Shuffl...
878
879
880
881
882
883
884
885
886
  static int __klp_disable_patch(struct klp_patch *patch)
  {
  	struct klp_object *obj;
  
  	if (WARN_ON(!patch->enabled))
  		return -EINVAL;
  
  	if (klp_transition_patch)
  		return -EBUSY;
26c3e98e2   Petr Mladek   livepatch: Shuffl...
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
  	klp_init_transition(patch, KLP_UNPATCHED);
  
  	klp_for_each_object(patch, obj)
  		if (obj->patched)
  			klp_pre_unpatch_callback(obj);
  
  	/*
  	 * Enforce the order of the func->transition writes in
  	 * klp_init_transition() and the TIF_PATCH_PENDING writes in
  	 * klp_start_transition().  In the rare case where klp_ftrace_handler()
  	 * is called shortly after klp_update_patch_state() switches the task,
  	 * this ensures the handler sees that func->transition is set.
  	 */
  	smp_wmb();
  
  	klp_start_transition();
26c3e98e2   Petr Mladek   livepatch: Shuffl...
903
  	patch->enabled = false;
958ef1e39   Petr Mladek   livepatch: Simpli...
904
  	klp_try_complete_transition();
26c3e98e2   Petr Mladek   livepatch: Shuffl...
905
906
907
  
  	return 0;
  }
26c3e98e2   Petr Mladek   livepatch: Shuffl...
908
909
910
911
912
913
914
915
916
917
  static int __klp_enable_patch(struct klp_patch *patch)
  {
  	struct klp_object *obj;
  	int ret;
  
  	if (klp_transition_patch)
  		return -EBUSY;
  
  	if (WARN_ON(patch->enabled))
  		return -EINVAL;
26c3e98e2   Petr Mladek   livepatch: Shuffl...
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
  	pr_notice("enabling patch '%s'
  ", patch->mod->name);
  
  	klp_init_transition(patch, KLP_PATCHED);
  
  	/*
  	 * Enforce the order of the func->transition writes in
  	 * klp_init_transition() and the ops->func_stack writes in
  	 * klp_patch_object(), so that klp_ftrace_handler() will see the
  	 * func->transition updates before the handler is registered and the
  	 * new funcs become visible to the handler.
  	 */
  	smp_wmb();
  
  	klp_for_each_object(patch, obj) {
  		if (!klp_is_object_loaded(obj))
  			continue;
  
  		ret = klp_pre_patch_callback(obj);
  		if (ret) {
  			pr_warn("pre-patch callback failed for object '%s'
  ",
  				klp_is_module(obj) ? obj->name : "vmlinux");
  			goto err;
  		}
  
  		ret = klp_patch_object(obj);
  		if (ret) {
  			pr_warn("failed to patch object '%s'
  ",
  				klp_is_module(obj) ? obj->name : "vmlinux");
  			goto err;
  		}
  	}
  
  	klp_start_transition();
26c3e98e2   Petr Mladek   livepatch: Shuffl...
954
  	patch->enabled = true;
958ef1e39   Petr Mladek   livepatch: Simpli...
955
  	klp_try_complete_transition();
26c3e98e2   Petr Mladek   livepatch: Shuffl...
956
957
958
959
960
961
962
963
964
965
966
  
  	return 0;
  err:
  	pr_warn("failed to enable patch '%s'
  ", patch->mod->name);
  
  	klp_cancel_transition();
  	return ret;
  }
  
  /**
958ef1e39   Petr Mladek   livepatch: Simpli...
967
968
   * klp_enable_patch() - enable the livepatch
   * @patch:	patch to be enabled
26c3e98e2   Petr Mladek   livepatch: Shuffl...
969
   *
958ef1e39   Petr Mladek   livepatch: Simpli...
970
971
972
973
974
975
   * Initializes the data structure associated with the patch, creates the sysfs
   * interface, performs the needed symbol lookups and code relocations,
   * registers the patched functions with ftrace.
   *
   * This function is supposed to be called from the livepatch module_init()
   * callback.
26c3e98e2   Petr Mladek   livepatch: Shuffl...
976
977
978
979
980
981
   *
   * Return: 0 on success, otherwise error
   */
  int klp_enable_patch(struct klp_patch *patch)
  {
  	int ret;
958ef1e39   Petr Mladek   livepatch: Simpli...
982
983
984
985
986
987
988
989
990
991
992
993
994
995
  	if (!patch || !patch->mod)
  		return -EINVAL;
  
  	if (!is_livepatch_module(patch->mod)) {
  		pr_err("module %s is not marked as a livepatch module
  ",
  		       patch->mod->name);
  		return -EINVAL;
  	}
  
  	if (!klp_initialized())
  		return -ENODEV;
  
  	if (!klp_have_reliable_stack()) {
31adf2308   Petr Mladek   livepatch: Conver...
996
997
998
999
  		pr_warn("This architecture doesn't have support for the livepatch consistency model.
  ");
  		pr_warn("The livepatch transition may never complete.
  ");
958ef1e39   Petr Mladek   livepatch: Simpli...
1000
  	}
26c3e98e2   Petr Mladek   livepatch: Shuffl...
1001
  	mutex_lock(&klp_mutex);
92c9abf5e   Petr Mladek   livepatch: Allow ...
1002
1003
1004
1005
1006
1007
1008
  	if (!klp_is_patch_compatible(patch)) {
  		pr_err("Livepatch patch (%s) is not compatible with the already installed livepatches.
  ",
  			patch->mod->name);
  		mutex_unlock(&klp_mutex);
  		return -EINVAL;
  	}
958ef1e39   Petr Mladek   livepatch: Simpli...
1009
1010
1011
1012
  	ret = klp_init_patch_early(patch);
  	if (ret) {
  		mutex_unlock(&klp_mutex);
  		return ret;
26c3e98e2   Petr Mladek   livepatch: Shuffl...
1013
  	}
958ef1e39   Petr Mladek   livepatch: Simpli...
1014
1015
1016
  	ret = klp_init_patch(patch);
  	if (ret)
  		goto err;
26c3e98e2   Petr Mladek   livepatch: Shuffl...
1017
  	ret = __klp_enable_patch(patch);
958ef1e39   Petr Mladek   livepatch: Simpli...
1018
1019
1020
1021
1022
1023
  	if (ret)
  		goto err;
  
  	mutex_unlock(&klp_mutex);
  
  	return 0;
26c3e98e2   Petr Mladek   livepatch: Shuffl...
1024
1025
  
  err:
958ef1e39   Petr Mladek   livepatch: Simpli...
1026
  	klp_free_patch_start(patch);
26c3e98e2   Petr Mladek   livepatch: Shuffl...
1027
  	mutex_unlock(&klp_mutex);
958ef1e39   Petr Mladek   livepatch: Simpli...
1028
1029
  
  	klp_free_patch_finish(patch);
26c3e98e2   Petr Mladek   livepatch: Shuffl...
1030
1031
1032
  	return ret;
  }
  EXPORT_SYMBOL_GPL(klp_enable_patch);
ef8daf8ee   Joe Lawrence   livepatch: unpatc...
1033
  /*
7e35e4eb7   Petr Mladek   livepatch: Keep r...
1034
   * This function unpatches objects from the replaced livepatches.
e1452b607   Jason Baron   livepatch: Add at...
1035
1036
   *
   * We could be pretty aggressive here. It is called in the situation where
7e35e4eb7   Petr Mladek   livepatch: Keep r...
1037
1038
1039
1040
   * these structures are no longer accessed from the ftrace handler.
   * All functions are redirected by the klp_transition_patch. They
   * use either a new code or they are in the original code because
   * of the special nop function patches.
e1452b607   Jason Baron   livepatch: Add at...
1041
1042
1043
1044
1045
1046
1047
   *
   * The only exception is when the transition was forced. In this case,
   * klp_ftrace_handler() might still see the replaced patch on the stack.
   * Fortunately, it is carefully designed to work with removed functions
   * thanks to RCU. We only have to keep the patches on the system. Also
   * this is handled transparently by patch->module_put.
   */
7e35e4eb7   Petr Mladek   livepatch: Keep r...
1048
  void klp_unpatch_replaced_patches(struct klp_patch *new_patch)
e1452b607   Jason Baron   livepatch: Add at...
1049
  {
7e35e4eb7   Petr Mladek   livepatch: Keep r...
1050
  	struct klp_patch *old_patch;
e1452b607   Jason Baron   livepatch: Add at...
1051

7e35e4eb7   Petr Mladek   livepatch: Keep r...
1052
  	klp_for_each_patch(old_patch) {
e1452b607   Jason Baron   livepatch: Add at...
1053
1054
1055
1056
1057
  		if (old_patch == new_patch)
  			return;
  
  		old_patch->enabled = false;
  		klp_unpatch_objects(old_patch);
e1452b607   Jason Baron   livepatch: Add at...
1058
1059
1060
1061
  	}
  }
  
  /*
d697bad58   Petr Mladek   livepatch: Remove...
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
   * This function removes the dynamically allocated 'nop' functions.
   *
   * We could be pretty aggressive. NOPs do not change the existing
   * behavior except for adding unnecessary delay by the ftrace handler.
   *
   * It is safe even when the transition was forced. The ftrace handler
   * will see a valid ops->func_stack entry thanks to RCU.
   *
   * We could even free the NOPs structures. They must be the last entry
   * in ops->func_stack. Therefore unregister_ftrace_function() is called.
   * It does the same as klp_synchronize_transition() to make sure that
   * nobody is inside the ftrace handler once the operation finishes.
   *
   * IMPORTANT: It must be called right after removing the replaced patches!
   */
  void klp_discard_nops(struct klp_patch *new_patch)
  {
  	klp_unpatch_objects_dynamic(klp_transition_patch);
  	klp_free_objects_dynamic(klp_transition_patch);
  }
  
  /*
ef8daf8ee   Joe Lawrence   livepatch: unpatc...
1084
1085
1086
1087
1088
1089
1090
1091
1092
   * Remove parts of patches that touch a given kernel module. The list of
   * patches processed might be limited. When limit is NULL, all patches
   * will be handled.
   */
  static void klp_cleanup_module_patches_limited(struct module *mod,
  					       struct klp_patch *limit)
  {
  	struct klp_patch *patch;
  	struct klp_object *obj;
ecba29f43   Petr Mladek   livepatch: Introd...
1093
  	klp_for_each_patch(patch) {
ef8daf8ee   Joe Lawrence   livepatch: unpatc...
1094
1095
1096
1097
1098
1099
  		if (patch == limit)
  			break;
  
  		klp_for_each_object(patch, obj) {
  			if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
  				continue;
a087cdd40   Petr Mladek   livepatch: Module...
1100
1101
  			if (patch != klp_transition_patch)
  				klp_pre_unpatch_callback(obj);
fc41efc18   Jiri Kosina   Merge branch 'for...
1102

a087cdd40   Petr Mladek   livepatch: Module...
1103
1104
1105
1106
  			pr_notice("reverting patch '%s' on unloading module '%s'
  ",
  				  patch->mod->name, obj->mod->name);
  			klp_unpatch_object(obj);
fc41efc18   Jiri Kosina   Merge branch 'for...
1107

a087cdd40   Petr Mladek   livepatch: Module...
1108
  			klp_post_unpatch_callback(obj);
ef8daf8ee   Joe Lawrence   livepatch: unpatc...
1109
1110
1111
1112
1113
1114
  
  			klp_free_object_loaded(obj);
  			break;
  		}
  	}
  }
7e545d6ec   Jessica Yu   livepatch/module:...
1115
  int klp_module_coming(struct module *mod)
b700e7f03   Seth Jennings   livepatch: kernel...
1116
  {
b700e7f03   Seth Jennings   livepatch: kernel...
1117
  	int ret;
7e545d6ec   Jessica Yu   livepatch/module:...
1118
1119
  	struct klp_patch *patch;
  	struct klp_object *obj;
b700e7f03   Seth Jennings   livepatch: kernel...
1120

7e545d6ec   Jessica Yu   livepatch/module:...
1121
1122
  	if (WARN_ON(mod->state != MODULE_STATE_COMING))
  		return -EINVAL;
b700e7f03   Seth Jennings   livepatch: kernel...
1123

dcf550e52   Josh Poimboeuf   livepatch: Disall...
1124
1125
1126
1127
  	if (!strcmp(mod->name, "vmlinux")) {
  		pr_err("vmlinux.ko: invalid module name");
  		return -EINVAL;
  	}
7e545d6ec   Jessica Yu   livepatch/module:...
1128
1129
1130
1131
1132
1133
1134
  	mutex_lock(&klp_mutex);
  	/*
  	 * Each module has to know that klp_module_coming()
  	 * has been called. We never know what module will
  	 * get patched by a new patch.
  	 */
  	mod->klp_alive = true;
b700e7f03   Seth Jennings   livepatch: kernel...
1135

ecba29f43   Petr Mladek   livepatch: Introd...
1136
  	klp_for_each_patch(patch) {
7e545d6ec   Jessica Yu   livepatch/module:...
1137
1138
1139
  		klp_for_each_object(patch, obj) {
  			if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
  				continue;
b700e7f03   Seth Jennings   livepatch: kernel...
1140

7e545d6ec   Jessica Yu   livepatch/module:...
1141
  			obj->mod = mod;
b700e7f03   Seth Jennings   livepatch: kernel...
1142

7e545d6ec   Jessica Yu   livepatch/module:...
1143
1144
1145
1146
1147
1148
1149
  			ret = klp_init_object_loaded(patch, obj);
  			if (ret) {
  				pr_warn("failed to initialize patch '%s' for module '%s' (%d)
  ",
  					patch->mod->name, obj->mod->name, ret);
  				goto err;
  			}
b700e7f03   Seth Jennings   livepatch: kernel...
1150

7e545d6ec   Jessica Yu   livepatch/module:...
1151
1152
1153
  			pr_notice("applying patch '%s' to loading module '%s'
  ",
  				  patch->mod->name, obj->mod->name);
93862e385   Joe Lawrence   livepatch: add (u...
1154
1155
1156
1157
1158
1159
1160
  			ret = klp_pre_patch_callback(obj);
  			if (ret) {
  				pr_warn("pre-patch callback failed for object '%s'
  ",
  					obj->name);
  				goto err;
  			}
0dade9f37   Josh Poimboeuf   livepatch: separa...
1161
  			ret = klp_patch_object(obj);
7e545d6ec   Jessica Yu   livepatch/module:...
1162
1163
1164
1165
  			if (ret) {
  				pr_warn("failed to apply patch '%s' to module '%s' (%d)
  ",
  					patch->mod->name, obj->mod->name, ret);
93862e385   Joe Lawrence   livepatch: add (u...
1166

5aaf1ab55   Petr Mladek   livepatch: Correc...
1167
  				klp_post_unpatch_callback(obj);
7e545d6ec   Jessica Yu   livepatch/module:...
1168
1169
  				goto err;
  			}
93862e385   Joe Lawrence   livepatch: add (u...
1170
1171
  			if (patch != klp_transition_patch)
  				klp_post_patch_callback(obj);
7e545d6ec   Jessica Yu   livepatch/module:...
1172
1173
1174
  			break;
  		}
  	}
b700e7f03   Seth Jennings   livepatch: kernel...
1175

7e545d6ec   Jessica Yu   livepatch/module:...
1176
  	mutex_unlock(&klp_mutex);
b700e7f03   Seth Jennings   livepatch: kernel...
1177

7e545d6ec   Jessica Yu   livepatch/module:...
1178
  	return 0;
b700e7f03   Seth Jennings   livepatch: kernel...
1179

7e545d6ec   Jessica Yu   livepatch/module:...
1180
1181
1182
1183
1184
1185
1186
1187
1188
  err:
  	/*
  	 * If a patch is unsuccessfully applied, return
  	 * error to the module loader.
  	 */
  	pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'
  ",
  		patch->mod->name, obj->mod->name, obj->mod->name);
  	mod->klp_alive = false;
4ff96fb52   Miroslav Benes   livepatch: Nullif...
1189
  	obj->mod = NULL;
ef8daf8ee   Joe Lawrence   livepatch: unpatc...
1190
  	klp_cleanup_module_patches_limited(mod, patch);
7e545d6ec   Jessica Yu   livepatch/module:...
1191
1192
1193
  	mutex_unlock(&klp_mutex);
  
  	return ret;
b700e7f03   Seth Jennings   livepatch: kernel...
1194
  }
7e545d6ec   Jessica Yu   livepatch/module:...
1195
  void klp_module_going(struct module *mod)
b700e7f03   Seth Jennings   livepatch: kernel...
1196
  {
7e545d6ec   Jessica Yu   livepatch/module:...
1197
1198
1199
  	if (WARN_ON(mod->state != MODULE_STATE_GOING &&
  		    mod->state != MODULE_STATE_COMING))
  		return;
b700e7f03   Seth Jennings   livepatch: kernel...
1200
1201
  
  	mutex_lock(&klp_mutex);
8cb2c2dc4   Petr Mladek   livepatch: Fix su...
1202
  	/*
7e545d6ec   Jessica Yu   livepatch/module:...
1203
1204
1205
  	 * Each module has to know that klp_module_going()
  	 * has been called. We never know what module will
  	 * get patched by a new patch.
8cb2c2dc4   Petr Mladek   livepatch: Fix su...
1206
  	 */
7e545d6ec   Jessica Yu   livepatch/module:...
1207
  	mod->klp_alive = false;
8cb2c2dc4   Petr Mladek   livepatch: Fix su...
1208

ef8daf8ee   Joe Lawrence   livepatch: unpatc...
1209
  	klp_cleanup_module_patches_limited(mod, NULL);
b700e7f03   Seth Jennings   livepatch: kernel...
1210
1211
  
  	mutex_unlock(&klp_mutex);
b700e7f03   Seth Jennings   livepatch: kernel...
1212
  }
26029d88a   Minfei Huang   livepatch: annota...
1213
  static int __init klp_init(void)
b700e7f03   Seth Jennings   livepatch: kernel...
1214
  {
b700e7f03   Seth Jennings   livepatch: kernel...
1215
  	klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
7e545d6ec   Jessica Yu   livepatch/module:...
1216
1217
  	if (!klp_root_kobj)
  		return -ENOMEM;
b700e7f03   Seth Jennings   livepatch: kernel...
1218
1219
  
  	return 0;
b700e7f03   Seth Jennings   livepatch: kernel...
1220
1221
1222
  }
  
  module_init(klp_init);