Blame view

kernel/livepatch/core.c 23.7 KB
b700e7f03   Seth Jennings   livepatch: kernel...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
  /*
   * core.c - Kernel Live Patching Core
   *
   * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
   * Copyright (C) 2014 SUSE
   *
   * This program is free software; you can redistribute it and/or
   * modify it under the terms of the GNU General Public License
   * as published by the Free Software Foundation; either version 2
   * of the License, or (at your option) any later version.
   *
   * This program is distributed in the hope that it will be useful,
   * but WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   * GNU General Public License for more details.
   *
   * You should have received a copy of the GNU General Public License
   * along with this program; if not, see <http://www.gnu.org/licenses/>.
   */
  
  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  
  #include <linux/module.h>
  #include <linux/kernel.h>
  #include <linux/mutex.h>
  #include <linux/slab.h>
  #include <linux/ftrace.h>
  #include <linux/list.h>
  #include <linux/kallsyms.h>
  #include <linux/livepatch.h>
425595a7f   Jessica Yu   livepatch: reuse ...
31
32
  #include <linux/elf.h>
  #include <linux/moduleloader.h>
b56b36ee6   Josh Poimboeuf   livepatch: Cleanu...
33
  #include <asm/cacheflush.h>
b700e7f03   Seth Jennings   livepatch: kernel...
34

3c33f5b99   Josh Poimboeuf   livepatch: suppor...
35
36
37
38
39
40
41
42
43
44
45
46
  /**
   * struct klp_ops - structure for tracking registered ftrace ops structs
   *
   * A single ftrace_ops is shared between all enabled replacement functions
   * (klp_func structs) which have the same old_addr.  This allows the switch
   * between function versions to happen instantaneously by updating the klp_ops
   * struct's func_stack list.  The winner is the klp_func at the top of the
   * func_stack (front of the list).
   *
   * @node:	node for the global klp_ops list
   * @func_stack:	list head for the stack of klp_func's (active func is on top)
   * @fops:	registered ftrace ops struct
b700e7f03   Seth Jennings   livepatch: kernel...
47
   */
3c33f5b99   Josh Poimboeuf   livepatch: suppor...
48
49
50
51
52
  struct klp_ops {
  	struct list_head node;
  	struct list_head func_stack;
  	struct ftrace_ops fops;
  };
b700e7f03   Seth Jennings   livepatch: kernel...
53

3c33f5b99   Josh Poimboeuf   livepatch: suppor...
54
55
56
57
58
59
  /*
   * The klp_mutex protects the global lists and state transitions of any
   * structure reachable from them.  References to any structure must be obtained
   * under mutex protection (except in klp_ftrace_handler(), which uses RCU to
   * ensure it gets consistent data).
   */
b700e7f03   Seth Jennings   livepatch: kernel...
60
  static DEFINE_MUTEX(klp_mutex);
3c33f5b99   Josh Poimboeuf   livepatch: suppor...
61

b700e7f03   Seth Jennings   livepatch: kernel...
62
  static LIST_HEAD(klp_patches);
3c33f5b99   Josh Poimboeuf   livepatch: suppor...
63
  static LIST_HEAD(klp_ops);
b700e7f03   Seth Jennings   livepatch: kernel...
64
65
  
  static struct kobject *klp_root_kobj;
3c33f5b99   Josh Poimboeuf   livepatch: suppor...
66
67
68
69
70
71
72
73
74
75
76
77
78
79
  static struct klp_ops *klp_find_ops(unsigned long old_addr)
  {
  	struct klp_ops *ops;
  	struct klp_func *func;
  
  	list_for_each_entry(ops, &klp_ops, node) {
  		func = list_first_entry(&ops->func_stack, struct klp_func,
  					stack_node);
  		if (func->old_addr == old_addr)
  			return ops;
  	}
  
  	return NULL;
  }
b700e7f03   Seth Jennings   livepatch: kernel...
80
81
82
83
84
85
86
87
88
89
90
91
92
  static bool klp_is_module(struct klp_object *obj)
  {
  	return obj->name;
  }
  
  static bool klp_is_object_loaded(struct klp_object *obj)
  {
  	return !obj->name || obj->mod;
  }
  
  /* sets obj->mod if object is not vmlinux and module is found */
  static void klp_find_object_module(struct klp_object *obj)
  {
8cb2c2dc4   Petr Mladek   livepatch: Fix su...
93
  	struct module *mod;
b700e7f03   Seth Jennings   livepatch: kernel...
94
95
96
97
98
  	if (!klp_is_module(obj))
  		return;
  
  	mutex_lock(&module_mutex);
  	/*
8cb2c2dc4   Petr Mladek   livepatch: Fix su...
99
100
  	 * We do not want to block removal of patched modules and therefore
  	 * we do not take a reference here. The patches are removed by
7e545d6ec   Jessica Yu   livepatch/module:...
101
  	 * klp_module_going() instead.
8cb2c2dc4   Petr Mladek   livepatch: Fix su...
102
103
104
  	 */
  	mod = find_module(obj->name);
  	/*
7e545d6ec   Jessica Yu   livepatch/module:...
105
106
  	 * Do not mess work of klp_module_coming() and klp_module_going().
  	 * Note that the patch might still be needed before klp_module_going()
8cb2c2dc4   Petr Mladek   livepatch: Fix su...
107
108
109
  	 * is called. Module functions can be called even in the GOING state
  	 * until mod->exit() finishes. This is especially important for
  	 * patches that modify semantic of the functions.
b700e7f03   Seth Jennings   livepatch: kernel...
110
  	 */
8cb2c2dc4   Petr Mladek   livepatch: Fix su...
111
112
  	if (mod && mod->klp_alive)
  		obj->mod = mod;
b700e7f03   Seth Jennings   livepatch: kernel...
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
  	mutex_unlock(&module_mutex);
  }
  
  /* klp_mutex must be held by caller */
  static bool klp_is_patch_registered(struct klp_patch *patch)
  {
  	struct klp_patch *mypatch;
  
  	list_for_each_entry(mypatch, &klp_patches, list)
  		if (mypatch == patch)
  			return true;
  
  	return false;
  }
  
  static bool klp_initialized(void)
  {
e76ff06a9   Nicholas Mc Guire   livepatch: match ...
130
  	return !!klp_root_kobj;
b700e7f03   Seth Jennings   livepatch: kernel...
131
132
133
134
135
136
  }
  
  struct klp_find_arg {
  	const char *objname;
  	const char *name;
  	unsigned long addr;
b700e7f03   Seth Jennings   livepatch: kernel...
137
  	unsigned long count;
b2b018ef4   Chris J Arges   livepatch: add ol...
138
  	unsigned long pos;
b700e7f03   Seth Jennings   livepatch: kernel...
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
  };
  
  static int klp_find_callback(void *data, const char *name,
  			     struct module *mod, unsigned long addr)
  {
  	struct klp_find_arg *args = data;
  
  	if ((mod && !args->objname) || (!mod && args->objname))
  		return 0;
  
  	if (strcmp(args->name, name))
  		return 0;
  
  	if (args->objname && strcmp(args->objname, mod->name))
  		return 0;
b700e7f03   Seth Jennings   livepatch: kernel...
154
155
  	args->addr = addr;
  	args->count++;
b2b018ef4   Chris J Arges   livepatch: add ol...
156
157
158
159
160
161
162
  	/*
  	 * Finish the search when the symbol is found for the desired position
  	 * or the position is not defined for a non-unique symbol.
  	 */
  	if ((args->pos && (args->count == args->pos)) ||
  	    (!args->pos && (args->count > 1)))
  		return 1;
b700e7f03   Seth Jennings   livepatch: kernel...
163
164
165
166
  	return 0;
  }
  
  static int klp_find_object_symbol(const char *objname, const char *name,
b2b018ef4   Chris J Arges   livepatch: add ol...
167
  				  unsigned long sympos, unsigned long *addr)
b700e7f03   Seth Jennings   livepatch: kernel...
168
169
170
171
172
  {
  	struct klp_find_arg args = {
  		.objname = objname,
  		.name = name,
  		.addr = 0,
b2b018ef4   Chris J Arges   livepatch: add ol...
173
174
  		.count = 0,
  		.pos = sympos,
b700e7f03   Seth Jennings   livepatch: kernel...
175
  	};
9a1bd63cd   Miroslav Benes   livepatch: add mo...
176
  	mutex_lock(&module_mutex);
b700e7f03   Seth Jennings   livepatch: kernel...
177
  	kallsyms_on_each_symbol(klp_find_callback, &args);
9a1bd63cd   Miroslav Benes   livepatch: add mo...
178
  	mutex_unlock(&module_mutex);
b700e7f03   Seth Jennings   livepatch: kernel...
179

b2b018ef4   Chris J Arges   livepatch: add ol...
180
181
182
183
184
  	/*
  	 * Ensure an address was found. If sympos is 0, ensure symbol is unique;
  	 * otherwise ensure the symbol position count matches sympos.
  	 */
  	if (args.addr == 0)
b700e7f03   Seth Jennings   livepatch: kernel...
185
186
  		pr_err("symbol '%s' not found in symbol table
  ", name);
b2b018ef4   Chris J Arges   livepatch: add ol...
187
  	else if (args.count > 1 && sympos == 0) {
f995b5f72   Petr Mladek   livepatch: Fix th...
188
189
190
  		pr_err("unresolvable ambiguity for symbol '%s' in object '%s'
  ",
  		       name, objname);
b2b018ef4   Chris J Arges   livepatch: add ol...
191
192
193
194
195
  	} else if (sympos != args.count && sympos > 0) {
  		pr_err("symbol position %lu for symbol '%s' in object '%s' not found
  ",
  		       sympos, name, objname ? objname : "vmlinux");
  	} else {
b700e7f03   Seth Jennings   livepatch: kernel...
196
197
198
199
200
201
202
  		*addr = args.addr;
  		return 0;
  	}
  
  	*addr = 0;
  	return -EINVAL;
  }
425595a7f   Jessica Yu   livepatch: reuse ...
203
  static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod)
b700e7f03   Seth Jennings   livepatch: kernel...
204
  {
425595a7f   Jessica Yu   livepatch: reuse ...
205
206
207
208
209
210
211
  	int i, cnt, vmlinux, ret;
  	char objname[MODULE_NAME_LEN];
  	char symname[KSYM_NAME_LEN];
  	char *strtab = pmod->core_kallsyms.strtab;
  	Elf_Rela *relas;
  	Elf_Sym *sym;
  	unsigned long sympos, addr;
b700e7f03   Seth Jennings   livepatch: kernel...
212

b2b018ef4   Chris J Arges   livepatch: add ol...
213
  	/*
425595a7f   Jessica Yu   livepatch: reuse ...
214
215
216
217
218
219
220
221
  	 * Since the field widths for objname and symname in the sscanf()
  	 * call are hard-coded and correspond to MODULE_NAME_LEN and
  	 * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN
  	 * and KSYM_NAME_LEN have the values we expect them to have.
  	 *
  	 * Because the value of MODULE_NAME_LEN can differ among architectures,
  	 * we use the smallest/strictest upper bound possible (56, based on
  	 * the current definition of MODULE_NAME_LEN) to prevent overflows.
b2b018ef4   Chris J Arges   livepatch: add ol...
222
  	 */
425595a7f   Jessica Yu   livepatch: reuse ...
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
  	BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128);
  
  	relas = (Elf_Rela *) relasec->sh_addr;
  	/* For each rela in this klp relocation section */
  	for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
  		sym = pmod->core_kallsyms.symtab + ELF_R_SYM(relas[i].r_info);
  		if (sym->st_shndx != SHN_LIVEPATCH) {
  			pr_err("symbol %s is not marked as a livepatch symbol",
  			       strtab + sym->st_name);
  			return -EINVAL;
  		}
  
  		/* Format: .klp.sym.objname.symname,sympos */
  		cnt = sscanf(strtab + sym->st_name,
  			     ".klp.sym.%55[^.].%127[^,],%lu",
  			     objname, symname, &sympos);
  		if (cnt != 3) {
  			pr_err("symbol %s has an incorrectly formatted name",
  			       strtab + sym->st_name);
  			return -EINVAL;
  		}
  
  		/* klp_find_object_symbol() treats a NULL objname as vmlinux */
  		vmlinux = !strcmp(objname, "vmlinux");
  		ret = klp_find_object_symbol(vmlinux ? NULL : objname,
  					     symname, sympos, &addr);
  		if (ret)
  			return ret;
  
  		sym->st_value = addr;
  	}
  
  	return 0;
b700e7f03   Seth Jennings   livepatch: kernel...
256
257
258
259
260
  }
  
  static int klp_write_object_relocations(struct module *pmod,
  					struct klp_object *obj)
  {
425595a7f   Jessica Yu   livepatch: reuse ...
261
262
263
264
  	int i, cnt, ret = 0;
  	const char *objname, *secname;
  	char sec_objname[MODULE_NAME_LEN];
  	Elf_Shdr *sec;
b700e7f03   Seth Jennings   livepatch: kernel...
265
266
267
  
  	if (WARN_ON(!klp_is_object_loaded(obj)))
  		return -EINVAL;
425595a7f   Jessica Yu   livepatch: reuse ...
268
  	objname = klp_is_module(obj) ? obj->name : "vmlinux";
b700e7f03   Seth Jennings   livepatch: kernel...
269

b56b36ee6   Josh Poimboeuf   livepatch: Cleanu...
270
  	module_disable_ro(pmod);
425595a7f   Jessica Yu   livepatch: reuse ...
271
272
273
274
275
276
  	/* For each klp relocation section */
  	for (i = 1; i < pmod->klp_info->hdr.e_shnum; i++) {
  		sec = pmod->klp_info->sechdrs + i;
  		secname = pmod->klp_info->secstrings + sec->sh_name;
  		if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
  			continue;
b56b36ee6   Josh Poimboeuf   livepatch: Cleanu...
277

425595a7f   Jessica Yu   livepatch: reuse ...
278
279
280
281
282
283
284
285
286
287
288
289
  		/*
  		 * Format: .klp.rela.sec_objname.section_name
  		 * See comment in klp_resolve_symbols() for an explanation
  		 * of the selected field width value.
  		 */
  		cnt = sscanf(secname, ".klp.rela.%55[^.]", sec_objname);
  		if (cnt != 1) {
  			pr_err("section %s has an incorrectly formatted name",
  			       secname);
  			ret = -EINVAL;
  			break;
  		}
b56b36ee6   Josh Poimboeuf   livepatch: Cleanu...
290

425595a7f   Jessica Yu   livepatch: reuse ...
291
292
  		if (strcmp(objname, sec_objname))
  			continue;
b56b36ee6   Josh Poimboeuf   livepatch: Cleanu...
293

425595a7f   Jessica Yu   livepatch: reuse ...
294
  		ret = klp_resolve_symbols(sec, pmod);
064c89df6   Chris J Arges   livepatch: add sy...
295
  		if (ret)
425595a7f   Jessica Yu   livepatch: reuse ...
296
  			break;
064c89df6   Chris J Arges   livepatch: add sy...
297

425595a7f   Jessica Yu   livepatch: reuse ...
298
299
300
301
302
  		ret = apply_relocate_add(pmod->klp_info->sechdrs,
  					 pmod->core_kallsyms.strtab,
  					 pmod->klp_info->symndx, i, pmod);
  		if (ret)
  			break;
b700e7f03   Seth Jennings   livepatch: kernel...
303
  	}
444d13ff1   Jessica Yu   modules: add ro_a...
304
  	module_enable_ro(pmod, true);
b56b36ee6   Josh Poimboeuf   livepatch: Cleanu...
305
  	return ret;
b700e7f03   Seth Jennings   livepatch: kernel...
306
307
308
309
  }
  
  static void notrace klp_ftrace_handler(unsigned long ip,
  				       unsigned long parent_ip,
3c33f5b99   Josh Poimboeuf   livepatch: suppor...
310
  				       struct ftrace_ops *fops,
b700e7f03   Seth Jennings   livepatch: kernel...
311
312
  				       struct pt_regs *regs)
  {
3c33f5b99   Josh Poimboeuf   livepatch: suppor...
313
314
315
316
317
318
319
320
  	struct klp_ops *ops;
  	struct klp_func *func;
  
  	ops = container_of(fops, struct klp_ops, fops);
  
  	rcu_read_lock();
  	func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
  				      stack_node);
3c33f5b99   Josh Poimboeuf   livepatch: suppor...
321
  	if (WARN_ON_ONCE(!func))
c4ce0da8e   Petr Mladek   livepatch: RCU pr...
322
  		goto unlock;
b700e7f03   Seth Jennings   livepatch: kernel...
323

b5bfc5170   Li Bin   livepatch: move x...
324
  	klp_arch_set_pc(regs, (unsigned long)func->new_func);
c4ce0da8e   Petr Mladek   livepatch: RCU pr...
325
326
  unlock:
  	rcu_read_unlock();
b700e7f03   Seth Jennings   livepatch: kernel...
327
  }
28e7cbd3e   Michael Ellerman   livepatch: Allow ...
328
329
330
331
332
333
334
335
336
337
338
339
  /*
   * Convert a function address into the appropriate ftrace location.
   *
   * Usually this is just the address of the function, but on some architectures
   * it's more complicated so allow them to provide a custom behaviour.
   */
  #ifndef klp_get_ftrace_location
  static unsigned long klp_get_ftrace_location(unsigned long faddr)
  {
  	return faddr;
  }
  #endif
0937e3b02   Josh Poimboeuf   livepatch: simpli...
340
  static void klp_disable_func(struct klp_func *func)
b700e7f03   Seth Jennings   livepatch: kernel...
341
  {
3c33f5b99   Josh Poimboeuf   livepatch: suppor...
342
  	struct klp_ops *ops;
b700e7f03   Seth Jennings   livepatch: kernel...
343

225f58fbc   Minfei Huang   livepatch: Improv...
344
345
346
347
  	if (WARN_ON(func->state != KLP_ENABLED))
  		return;
  	if (WARN_ON(!func->old_addr))
  		return;
b700e7f03   Seth Jennings   livepatch: kernel...
348

3c33f5b99   Josh Poimboeuf   livepatch: suppor...
349
350
  	ops = klp_find_ops(func->old_addr);
  	if (WARN_ON(!ops))
0937e3b02   Josh Poimboeuf   livepatch: simpli...
351
  		return;
b700e7f03   Seth Jennings   livepatch: kernel...
352

3c33f5b99   Josh Poimboeuf   livepatch: suppor...
353
  	if (list_is_singular(&ops->func_stack)) {
28e7cbd3e   Michael Ellerman   livepatch: Allow ...
354
355
356
357
358
  		unsigned long ftrace_loc;
  
  		ftrace_loc = klp_get_ftrace_location(func->old_addr);
  		if (WARN_ON(!ftrace_loc))
  			return;
0937e3b02   Josh Poimboeuf   livepatch: simpli...
359
  		WARN_ON(unregister_ftrace_function(&ops->fops));
28e7cbd3e   Michael Ellerman   livepatch: Allow ...
360
  		WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
3c33f5b99   Josh Poimboeuf   livepatch: suppor...
361
362
363
364
365
366
367
  
  		list_del_rcu(&func->stack_node);
  		list_del(&ops->node);
  		kfree(ops);
  	} else {
  		list_del_rcu(&func->stack_node);
  	}
b700e7f03   Seth Jennings   livepatch: kernel...
368
369
  
  	func->state = KLP_DISABLED;
b700e7f03   Seth Jennings   livepatch: kernel...
370
371
372
373
  }
  
  static int klp_enable_func(struct klp_func *func)
  {
3c33f5b99   Josh Poimboeuf   livepatch: suppor...
374
  	struct klp_ops *ops;
b700e7f03   Seth Jennings   livepatch: kernel...
375
376
377
378
379
380
381
  	int ret;
  
  	if (WARN_ON(!func->old_addr))
  		return -EINVAL;
  
  	if (WARN_ON(func->state != KLP_DISABLED))
  		return -EINVAL;
3c33f5b99   Josh Poimboeuf   livepatch: suppor...
382
383
  	ops = klp_find_ops(func->old_addr);
  	if (!ops) {
28e7cbd3e   Michael Ellerman   livepatch: Allow ...
384
385
386
387
388
389
390
391
392
  		unsigned long ftrace_loc;
  
  		ftrace_loc = klp_get_ftrace_location(func->old_addr);
  		if (!ftrace_loc) {
  			pr_err("failed to find location for function '%s'
  ",
  				func->old_name);
  			return -EINVAL;
  		}
3c33f5b99   Josh Poimboeuf   livepatch: suppor...
393
394
395
396
397
398
399
400
401
402
403
404
405
  		ops = kzalloc(sizeof(*ops), GFP_KERNEL);
  		if (!ops)
  			return -ENOMEM;
  
  		ops->fops.func = klp_ftrace_handler;
  		ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
  				  FTRACE_OPS_FL_DYNAMIC |
  				  FTRACE_OPS_FL_IPMODIFY;
  
  		list_add(&ops->node, &klp_ops);
  
  		INIT_LIST_HEAD(&ops->func_stack);
  		list_add_rcu(&func->stack_node, &ops->func_stack);
28e7cbd3e   Michael Ellerman   livepatch: Allow ...
406
  		ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
3c33f5b99   Josh Poimboeuf   livepatch: suppor...
407
408
409
410
411
412
413
414
415
416
417
418
  		if (ret) {
  			pr_err("failed to set ftrace filter for function '%s' (%d)
  ",
  			       func->old_name, ret);
  			goto err;
  		}
  
  		ret = register_ftrace_function(&ops->fops);
  		if (ret) {
  			pr_err("failed to register ftrace handler for function '%s' (%d)
  ",
  			       func->old_name, ret);
28e7cbd3e   Michael Ellerman   livepatch: Allow ...
419
  			ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
3c33f5b99   Josh Poimboeuf   livepatch: suppor...
420
421
  			goto err;
  		}
b700e7f03   Seth Jennings   livepatch: kernel...
422

b700e7f03   Seth Jennings   livepatch: kernel...
423
  	} else {
3c33f5b99   Josh Poimboeuf   livepatch: suppor...
424
  		list_add_rcu(&func->stack_node, &ops->func_stack);
b700e7f03   Seth Jennings   livepatch: kernel...
425
  	}
3c33f5b99   Josh Poimboeuf   livepatch: suppor...
426
  	func->state = KLP_ENABLED;
dbed7ddab   Josh Poimboeuf   livepatch: fix un...
427
  	return 0;
3c33f5b99   Josh Poimboeuf   livepatch: suppor...
428
429
430
431
432
  
  err:
  	list_del_rcu(&func->stack_node);
  	list_del(&ops->node);
  	kfree(ops);
b700e7f03   Seth Jennings   livepatch: kernel...
433
434
  	return ret;
  }
0937e3b02   Josh Poimboeuf   livepatch: simpli...
435
  static void klp_disable_object(struct klp_object *obj)
b700e7f03   Seth Jennings   livepatch: kernel...
436
437
  {
  	struct klp_func *func;
b700e7f03   Seth Jennings   livepatch: kernel...
438

8cdd043ab   Jiri Slaby   livepatch: introd...
439
  	klp_for_each_func(obj, func)
0937e3b02   Josh Poimboeuf   livepatch: simpli...
440
441
  		if (func->state == KLP_ENABLED)
  			klp_disable_func(func);
b700e7f03   Seth Jennings   livepatch: kernel...
442
443
  
  	obj->state = KLP_DISABLED;
b700e7f03   Seth Jennings   livepatch: kernel...
444
445
446
447
448
449
450
451
452
453
454
455
  }
  
  static int klp_enable_object(struct klp_object *obj)
  {
  	struct klp_func *func;
  	int ret;
  
  	if (WARN_ON(obj->state != KLP_DISABLED))
  		return -EINVAL;
  
  	if (WARN_ON(!klp_is_object_loaded(obj)))
  		return -EINVAL;
8cdd043ab   Jiri Slaby   livepatch: introd...
456
  	klp_for_each_func(obj, func) {
b700e7f03   Seth Jennings   livepatch: kernel...
457
  		ret = klp_enable_func(func);
0937e3b02   Josh Poimboeuf   livepatch: simpli...
458
459
460
461
  		if (ret) {
  			klp_disable_object(obj);
  			return ret;
  		}
b700e7f03   Seth Jennings   livepatch: kernel...
462
463
464
465
  	}
  	obj->state = KLP_ENABLED;
  
  	return 0;
b700e7f03   Seth Jennings   livepatch: kernel...
466
467
468
469
470
  }
  
  static int __klp_disable_patch(struct klp_patch *patch)
  {
  	struct klp_object *obj;
b700e7f03   Seth Jennings   livepatch: kernel...
471

83a90bb13   Josh Poimboeuf   livepatch: enforc...
472
473
474
475
  	/* enforce stacking: only the last enabled patch can be disabled */
  	if (!list_is_last(&patch->list, &klp_patches) &&
  	    list_next_entry(patch, list)->state == KLP_ENABLED)
  		return -EBUSY;
b700e7f03   Seth Jennings   livepatch: kernel...
476
477
  	pr_notice("disabling patch '%s'
  ", patch->mod->name);
8cdd043ab   Jiri Slaby   livepatch: introd...
478
  	klp_for_each_object(patch, obj) {
0937e3b02   Josh Poimboeuf   livepatch: simpli...
479
480
  		if (obj->state == KLP_ENABLED)
  			klp_disable_object(obj);
b700e7f03   Seth Jennings   livepatch: kernel...
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
  	}
  
  	patch->state = KLP_DISABLED;
  
  	return 0;
  }
  
  /**
   * klp_disable_patch() - disables a registered patch
   * @patch:	The registered, enabled patch to be disabled
   *
   * Unregisters the patched functions from ftrace.
   *
   * Return: 0 on success, otherwise error
   */
  int klp_disable_patch(struct klp_patch *patch)
  {
  	int ret;
  
  	mutex_lock(&klp_mutex);
  
  	if (!klp_is_patch_registered(patch)) {
  		ret = -EINVAL;
  		goto err;
  	}
  
  	if (patch->state == KLP_DISABLED) {
  		ret = -EINVAL;
  		goto err;
  	}
  
  	ret = __klp_disable_patch(patch);
  
  err:
  	mutex_unlock(&klp_mutex);
  	return ret;
  }
  EXPORT_SYMBOL_GPL(klp_disable_patch);
  
  static int __klp_enable_patch(struct klp_patch *patch)
  {
  	struct klp_object *obj;
  	int ret;
  
  	if (WARN_ON(patch->state != KLP_DISABLED))
  		return -EINVAL;
83a90bb13   Josh Poimboeuf   livepatch: enforc...
527
528
529
530
  	/* enforce stacking: only the first disabled patch can be enabled */
  	if (patch->list.prev != &klp_patches &&
  	    list_prev_entry(patch, list)->state == KLP_DISABLED)
  		return -EBUSY;
b700e7f03   Seth Jennings   livepatch: kernel...
531
532
533
534
535
536
  	pr_notice_once("tainting kernel with TAINT_LIVEPATCH
  ");
  	add_taint(TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
  
  	pr_notice("enabling patch '%s'
  ", patch->mod->name);
8cdd043ab   Jiri Slaby   livepatch: introd...
537
  	klp_for_each_object(patch, obj) {
b700e7f03   Seth Jennings   livepatch: kernel...
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
  		if (!klp_is_object_loaded(obj))
  			continue;
  
  		ret = klp_enable_object(obj);
  		if (ret)
  			goto unregister;
  	}
  
  	patch->state = KLP_ENABLED;
  
  	return 0;
  
  unregister:
  	WARN_ON(__klp_disable_patch(patch));
  	return ret;
  }
  
  /**
   * klp_enable_patch() - enables a registered patch
   * @patch:	The registered, disabled patch to be enabled
   *
   * Performs the needed symbol lookups and code relocations,
   * then registers the patched functions with ftrace.
   *
   * Return: 0 on success, otherwise error
   */
  int klp_enable_patch(struct klp_patch *patch)
  {
  	int ret;
  
  	mutex_lock(&klp_mutex);
  
  	if (!klp_is_patch_registered(patch)) {
  		ret = -EINVAL;
  		goto err;
  	}
  
  	ret = __klp_enable_patch(patch);
  
  err:
  	mutex_unlock(&klp_mutex);
  	return ret;
  }
  EXPORT_SYMBOL_GPL(klp_enable_patch);
  
  /*
   * Sysfs Interface
   *
   * /sys/kernel/livepatch
   * /sys/kernel/livepatch/<patch>
   * /sys/kernel/livepatch/<patch>/enabled
   * /sys/kernel/livepatch/<patch>/<object>
444f9e99a   Chris J Arges   livepatch: functi...
590
   * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
b700e7f03   Seth Jennings   livepatch: kernel...
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
   */
  
  static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
  			     const char *buf, size_t count)
  {
  	struct klp_patch *patch;
  	int ret;
  	unsigned long val;
  
  	ret = kstrtoul(buf, 10, &val);
  	if (ret)
  		return -EINVAL;
  
  	if (val != KLP_DISABLED && val != KLP_ENABLED)
  		return -EINVAL;
  
  	patch = container_of(kobj, struct klp_patch, kobj);
  
  	mutex_lock(&klp_mutex);
  
  	if (val == patch->state) {
  		/* already in requested state */
  		ret = -EINVAL;
  		goto err;
  	}
  
  	if (val == KLP_ENABLED) {
  		ret = __klp_enable_patch(patch);
  		if (ret)
  			goto err;
  	} else {
  		ret = __klp_disable_patch(patch);
  		if (ret)
  			goto err;
  	}
  
  	mutex_unlock(&klp_mutex);
  
  	return count;
  
  err:
  	mutex_unlock(&klp_mutex);
  	return ret;
  }
  
  static ssize_t enabled_show(struct kobject *kobj,
  			    struct kobj_attribute *attr, char *buf)
  {
  	struct klp_patch *patch;
  
  	patch = container_of(kobj, struct klp_patch, kobj);
  	return snprintf(buf, PAGE_SIZE-1, "%d
  ", patch->state);
  }
  
  static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
  static struct attribute *klp_patch_attrs[] = {
  	&enabled_kobj_attr.attr,
  	NULL
  };
  
  static void klp_kobj_release_patch(struct kobject *kobj)
  {
  	/*
  	 * Once we have a consistency model we'll need to module_put() the
  	 * patch module here.  See klp_register_patch() for more details.
  	 */
  }
  
  static struct kobj_type klp_ktype_patch = {
  	.release = klp_kobj_release_patch,
  	.sysfs_ops = &kobj_sysfs_ops,
  	.default_attrs = klp_patch_attrs,
  };
cad706df7   Miroslav Benes   livepatch: make k...
665
666
667
668
669
670
671
672
  static void klp_kobj_release_object(struct kobject *kobj)
  {
  }
  
  static struct kobj_type klp_ktype_object = {
  	.release = klp_kobj_release_object,
  	.sysfs_ops = &kobj_sysfs_ops,
  };
b700e7f03   Seth Jennings   livepatch: kernel...
673
674
  static void klp_kobj_release_func(struct kobject *kobj)
  {
b700e7f03   Seth Jennings   livepatch: kernel...
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
  }
  
  static struct kobj_type klp_ktype_func = {
  	.release = klp_kobj_release_func,
  	.sysfs_ops = &kobj_sysfs_ops,
  };
  
  /*
   * Free all functions' kobjects in the array up to some limit. When limit is
   * NULL, all kobjects are freed.
   */
  static void klp_free_funcs_limited(struct klp_object *obj,
  				   struct klp_func *limit)
  {
  	struct klp_func *func;
  
  	for (func = obj->funcs; func->old_name && func != limit; func++)
  		kobject_put(&func->kobj);
  }
  
  /* Clean up when a patched object is unloaded */
  static void klp_free_object_loaded(struct klp_object *obj)
  {
  	struct klp_func *func;
  
  	obj->mod = NULL;
8cdd043ab   Jiri Slaby   livepatch: introd...
701
  	klp_for_each_func(obj, func)
b700e7f03   Seth Jennings   livepatch: kernel...
702
703
704
705
706
707
708
709
710
711
712
713
714
715
  		func->old_addr = 0;
  }
  
  /*
   * Free all objects' kobjects in the array up to some limit. When limit is
   * NULL, all kobjects are freed.
   */
  static void klp_free_objects_limited(struct klp_patch *patch,
  				     struct klp_object *limit)
  {
  	struct klp_object *obj;
  
  	for (obj = patch->objs; obj->funcs && obj != limit; obj++) {
  		klp_free_funcs_limited(obj, NULL);
cad706df7   Miroslav Benes   livepatch: make k...
716
  		kobject_put(&obj->kobj);
b700e7f03   Seth Jennings   livepatch: kernel...
717
718
719
720
721
722
723
724
725
726
727
728
729
  	}
  }
  
  static void klp_free_patch(struct klp_patch *patch)
  {
  	klp_free_objects_limited(patch, NULL);
  	if (!list_empty(&patch->list))
  		list_del(&patch->list);
  	kobject_put(&patch->kobj);
  }
  
  static int klp_init_func(struct klp_object *obj, struct klp_func *func)
  {
f09d90864   Miroslav Benes   livepatch: make o...
730
731
  	if (!func->old_name || !func->new_func)
  		return -EINVAL;
3c33f5b99   Josh Poimboeuf   livepatch: suppor...
732
  	INIT_LIST_HEAD(&func->stack_node);
b700e7f03   Seth Jennings   livepatch: kernel...
733
  	func->state = KLP_DISABLED;
444f9e99a   Chris J Arges   livepatch: functi...
734
735
736
737
738
  	/* The format for the sysfs directory is <function,sympos> where sympos
  	 * is the nth occurrence of this symbol in kallsyms for the patched
  	 * object. If the user selects 0 for old_sympos, then 1 will be used
  	 * since a unique symbol will be the first occurrence.
  	 */
3c33f5b99   Josh Poimboeuf   livepatch: suppor...
739
  	return kobject_init_and_add(&func->kobj, &klp_ktype_func,
444f9e99a   Chris J Arges   livepatch: functi...
740
741
  				    &obj->kobj, "%s,%lu", func->old_name,
  				    func->old_sympos ? func->old_sympos : 1);
b700e7f03   Seth Jennings   livepatch: kernel...
742
743
744
745
746
747
748
749
  }
  
  /* parts of the initialization that is done only when the object is loaded */
  static int klp_init_object_loaded(struct klp_patch *patch,
  				  struct klp_object *obj)
  {
  	struct klp_func *func;
  	int ret;
425595a7f   Jessica Yu   livepatch: reuse ...
750
751
752
  	ret = klp_write_object_relocations(patch->mod, obj);
  	if (ret)
  		return ret;
b700e7f03   Seth Jennings   livepatch: kernel...
753

8cdd043ab   Jiri Slaby   livepatch: introd...
754
  	klp_for_each_func(obj, func) {
b2b018ef4   Chris J Arges   livepatch: add ol...
755
756
757
  		ret = klp_find_object_symbol(obj->name, func->old_name,
  					     func->old_sympos,
  					     &func->old_addr);
b700e7f03   Seth Jennings   livepatch: kernel...
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
  		if (ret)
  			return ret;
  	}
  
  	return 0;
  }
  
  static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
  {
  	struct klp_func *func;
  	int ret;
  	const char *name;
  
  	if (!obj->funcs)
  		return -EINVAL;
  
  	obj->state = KLP_DISABLED;
8cb2c2dc4   Petr Mladek   livepatch: Fix su...
775
  	obj->mod = NULL;
b700e7f03   Seth Jennings   livepatch: kernel...
776
777
778
779
  
  	klp_find_object_module(obj);
  
  	name = klp_is_module(obj) ? obj->name : "vmlinux";
cad706df7   Miroslav Benes   livepatch: make k...
780
781
782
783
  	ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object,
  				   &patch->kobj, "%s", name);
  	if (ret)
  		return ret;
b700e7f03   Seth Jennings   livepatch: kernel...
784

8cdd043ab   Jiri Slaby   livepatch: introd...
785
  	klp_for_each_func(obj, func) {
b700e7f03   Seth Jennings   livepatch: kernel...
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
  		ret = klp_init_func(obj, func);
  		if (ret)
  			goto free;
  	}
  
  	if (klp_is_object_loaded(obj)) {
  		ret = klp_init_object_loaded(patch, obj);
  		if (ret)
  			goto free;
  	}
  
  	return 0;
  
  free:
  	klp_free_funcs_limited(obj, func);
cad706df7   Miroslav Benes   livepatch: make k...
801
  	kobject_put(&obj->kobj);
b700e7f03   Seth Jennings   livepatch: kernel...
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
  	return ret;
  }
  
  static int klp_init_patch(struct klp_patch *patch)
  {
  	struct klp_object *obj;
  	int ret;
  
  	if (!patch->objs)
  		return -EINVAL;
  
  	mutex_lock(&klp_mutex);
  
  	patch->state = KLP_DISABLED;
  
  	ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
e0b561ee7   Jiri Kosina   livepatch: fix fo...
818
  				   klp_root_kobj, "%s", patch->mod->name);
b700e7f03   Seth Jennings   livepatch: kernel...
819
820
  	if (ret)
  		goto unlock;
8cdd043ab   Jiri Slaby   livepatch: introd...
821
  	klp_for_each_object(patch, obj) {
b700e7f03   Seth Jennings   livepatch: kernel...
822
823
824
825
  		ret = klp_init_object(patch, obj);
  		if (ret)
  			goto free;
  	}
99590ba56   Josh Poimboeuf   livepatch: fix de...
826
  	list_add_tail(&patch->list, &klp_patches);
b700e7f03   Seth Jennings   livepatch: kernel...
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
  
  	mutex_unlock(&klp_mutex);
  
  	return 0;
  
  free:
  	klp_free_objects_limited(patch, obj);
  	kobject_put(&patch->kobj);
  unlock:
  	mutex_unlock(&klp_mutex);
  	return ret;
  }
  
  /**
   * klp_unregister_patch() - unregisters a patch
   * @patch:	Disabled patch to be unregistered
   *
   * Frees the data structures and removes the sysfs interface.
   *
   * Return: 0 on success, otherwise error
   */
  int klp_unregister_patch(struct klp_patch *patch)
  {
  	int ret = 0;
  
  	mutex_lock(&klp_mutex);
  
  	if (!klp_is_patch_registered(patch)) {
  		ret = -EINVAL;
  		goto out;
  	}
  
  	if (patch->state == KLP_ENABLED) {
  		ret = -EBUSY;
  		goto out;
  	}
  
  	klp_free_patch(patch);
  
  out:
  	mutex_unlock(&klp_mutex);
  	return ret;
  }
  EXPORT_SYMBOL_GPL(klp_unregister_patch);
  
  /**
   * klp_register_patch() - registers a patch
   * @patch:	Patch to be registered
   *
   * Initializes the data structure associated with the patch and
   * creates the sysfs interface.
   *
   * Return: 0 on success, otherwise error
   */
  int klp_register_patch(struct klp_patch *patch)
  {
  	int ret;
b700e7f03   Seth Jennings   livepatch: kernel...
884
885
  	if (!patch || !patch->mod)
  		return -EINVAL;
425595a7f   Jessica Yu   livepatch: reuse ...
886
887
888
889
890
  	if (!is_livepatch_module(patch->mod)) {
  		pr_err("module %s is not marked as a livepatch module",
  		       patch->mod->name);
  		return -EINVAL;
  	}
b700e7f03   Seth Jennings   livepatch: kernel...
891
892
  	if (!klp_initialized())
  		return -ENODEV;
b700e7f03   Seth Jennings   livepatch: kernel...
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
  	/*
  	 * A reference is taken on the patch module to prevent it from being
  	 * unloaded.  Right now, we don't allow patch modules to unload since
  	 * there is currently no method to determine if a thread is still
  	 * running in the patched code contained in the patch module once
  	 * the ftrace registration is successful.
  	 */
  	if (!try_module_get(patch->mod))
  		return -ENODEV;
  
  	ret = klp_init_patch(patch);
  	if (ret)
  		module_put(patch->mod);
  
  	return ret;
  }
  EXPORT_SYMBOL_GPL(klp_register_patch);
7e545d6ec   Jessica Yu   livepatch/module:...
910
  int klp_module_coming(struct module *mod)
b700e7f03   Seth Jennings   livepatch: kernel...
911
  {
b700e7f03   Seth Jennings   livepatch: kernel...
912
  	int ret;
7e545d6ec   Jessica Yu   livepatch/module:...
913
914
  	struct klp_patch *patch;
  	struct klp_object *obj;
b700e7f03   Seth Jennings   livepatch: kernel...
915

7e545d6ec   Jessica Yu   livepatch/module:...
916
917
  	if (WARN_ON(mod->state != MODULE_STATE_COMING))
  		return -EINVAL;
b700e7f03   Seth Jennings   livepatch: kernel...
918

7e545d6ec   Jessica Yu   livepatch/module:...
919
920
921
922
923
924
925
  	mutex_lock(&klp_mutex);
  	/*
  	 * Each module has to know that klp_module_coming()
  	 * has been called. We never know what module will
  	 * get patched by a new patch.
  	 */
  	mod->klp_alive = true;
b700e7f03   Seth Jennings   livepatch: kernel...
926

7e545d6ec   Jessica Yu   livepatch/module:...
927
928
929
930
  	list_for_each_entry(patch, &klp_patches, list) {
  		klp_for_each_object(patch, obj) {
  			if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
  				continue;
b700e7f03   Seth Jennings   livepatch: kernel...
931

7e545d6ec   Jessica Yu   livepatch/module:...
932
  			obj->mod = mod;
b700e7f03   Seth Jennings   livepatch: kernel...
933

7e545d6ec   Jessica Yu   livepatch/module:...
934
935
936
937
938
939
940
  			ret = klp_init_object_loaded(patch, obj);
  			if (ret) {
  				pr_warn("failed to initialize patch '%s' for module '%s' (%d)
  ",
  					patch->mod->name, obj->mod->name, ret);
  				goto err;
  			}
b700e7f03   Seth Jennings   livepatch: kernel...
941

7e545d6ec   Jessica Yu   livepatch/module:...
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
  			if (patch->state == KLP_DISABLED)
  				break;
  
  			pr_notice("applying patch '%s' to loading module '%s'
  ",
  				  patch->mod->name, obj->mod->name);
  
  			ret = klp_enable_object(obj);
  			if (ret) {
  				pr_warn("failed to apply patch '%s' to module '%s' (%d)
  ",
  					patch->mod->name, obj->mod->name, ret);
  				goto err;
  			}
  
  			break;
  		}
  	}
b700e7f03   Seth Jennings   livepatch: kernel...
960

7e545d6ec   Jessica Yu   livepatch/module:...
961
  	mutex_unlock(&klp_mutex);
b700e7f03   Seth Jennings   livepatch: kernel...
962

7e545d6ec   Jessica Yu   livepatch/module:...
963
  	return 0;
b700e7f03   Seth Jennings   livepatch: kernel...
964

7e545d6ec   Jessica Yu   livepatch/module:...
965
966
967
968
969
970
971
972
973
  err:
  	/*
  	 * If a patch is unsuccessfully applied, return
  	 * error to the module loader.
  	 */
  	pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'
  ",
  		patch->mod->name, obj->mod->name, obj->mod->name);
  	mod->klp_alive = false;
b700e7f03   Seth Jennings   livepatch: kernel...
974
  	klp_free_object_loaded(obj);
7e545d6ec   Jessica Yu   livepatch/module:...
975
976
977
  	mutex_unlock(&klp_mutex);
  
  	return ret;
b700e7f03   Seth Jennings   livepatch: kernel...
978
  }
7e545d6ec   Jessica Yu   livepatch/module:...
979
  void klp_module_going(struct module *mod)
b700e7f03   Seth Jennings   livepatch: kernel...
980
  {
b700e7f03   Seth Jennings   livepatch: kernel...
981
982
  	struct klp_patch *patch;
  	struct klp_object *obj;
7e545d6ec   Jessica Yu   livepatch/module:...
983
984
985
  	if (WARN_ON(mod->state != MODULE_STATE_GOING &&
  		    mod->state != MODULE_STATE_COMING))
  		return;
b700e7f03   Seth Jennings   livepatch: kernel...
986
987
  
  	mutex_lock(&klp_mutex);
8cb2c2dc4   Petr Mladek   livepatch: Fix su...
988
  	/*
7e545d6ec   Jessica Yu   livepatch/module:...
989
990
991
  	 * Each module has to know that klp_module_going()
  	 * has been called. We never know what module will
  	 * get patched by a new patch.
8cb2c2dc4   Petr Mladek   livepatch: Fix su...
992
  	 */
7e545d6ec   Jessica Yu   livepatch/module:...
993
  	mod->klp_alive = false;
8cb2c2dc4   Petr Mladek   livepatch: Fix su...
994

b700e7f03   Seth Jennings   livepatch: kernel...
995
  	list_for_each_entry(patch, &klp_patches, list) {
8cdd043ab   Jiri Slaby   livepatch: introd...
996
  		klp_for_each_object(patch, obj) {
b700e7f03   Seth Jennings   livepatch: kernel...
997
998
  			if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
  				continue;
7e545d6ec   Jessica Yu   livepatch/module:...
999
1000
1001
1002
1003
1004
  			if (patch->state != KLP_DISABLED) {
  				pr_notice("reverting patch '%s' on unloading module '%s'
  ",
  					  patch->mod->name, obj->mod->name);
  				klp_disable_object(obj);
  			}
b700e7f03   Seth Jennings   livepatch: kernel...
1005

7e545d6ec   Jessica Yu   livepatch/module:...
1006
  			klp_free_object_loaded(obj);
b700e7f03   Seth Jennings   livepatch: kernel...
1007
1008
1009
1010
1011
  			break;
  		}
  	}
  
  	mutex_unlock(&klp_mutex);
b700e7f03   Seth Jennings   livepatch: kernel...
1012
  }
26029d88a   Minfei Huang   livepatch: annota...
1013
  static int __init klp_init(void)
b700e7f03   Seth Jennings   livepatch: kernel...
1014
1015
  {
  	int ret;
b9dfe0bed   Jiri Kosina   livepatch: handle...
1016
1017
1018
1019
1020
1021
  	ret = klp_check_compiler_support();
  	if (ret) {
  		pr_info("Your compiler is too old; turning off.
  ");
  		return -EINVAL;
  	}
b700e7f03   Seth Jennings   livepatch: kernel...
1022
  	klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
7e545d6ec   Jessica Yu   livepatch/module:...
1023
1024
  	if (!klp_root_kobj)
  		return -ENOMEM;
b700e7f03   Seth Jennings   livepatch: kernel...
1025
1026
  
  	return 0;
b700e7f03   Seth Jennings   livepatch: kernel...
1027
1028
1029
  }
  
  module_init(klp_init);