Blame view

kernel/scs.c 2.8 KB
d08b9f0ca   Sami Tolvanen   scs: Add support ...
1
2
3
4
5
6
  // SPDX-License-Identifier: GPL-2.0
  /*
   * Shadow Call Stack support.
   *
   * Copyright (C) 2019 Google LLC
   */
27047fb22   Sami Tolvanen   UPSTREAM: scs: sw...
7
  #include <linux/cpuhotplug.h>
d08b9f0ca   Sami Tolvanen   scs: Add support ...
8
  #include <linux/kasan.h>
628d06a48   Sami Tolvanen   scs: Add page acc...
9
  #include <linux/mm.h>
d08b9f0ca   Sami Tolvanen   scs: Add support ...
10
  #include <linux/scs.h>
27047fb22   Sami Tolvanen   UPSTREAM: scs: sw...
11
  #include <linux/vmalloc.h>
628d06a48   Sami Tolvanen   scs: Add page acc...
12
  #include <linux/vmstat.h>
d08b9f0ca   Sami Tolvanen   scs: Add support ...
13

bee348fab   Will Deacon   scs: Move account...
14
15
  static void __scs_account(void *s, int account)
  {
27047fb22   Sami Tolvanen   UPSTREAM: scs: sw...
16
  	struct page *scs_page = vmalloc_to_page(s);
bee348fab   Will Deacon   scs: Move account...
17

991e76738   Shakeel Butt   mm: memcontrol: a...
18
  	mod_node_page_state(page_pgdat(scs_page), NR_KERNEL_SCS_KB,
bee348fab   Will Deacon   scs: Move account...
19
20
  			    account * (SCS_SIZE / SZ_1K));
  }
27047fb22   Sami Tolvanen   UPSTREAM: scs: sw...
21
22
23
24
25
  /* Matches NR_CACHED_STACKS for VMAP_STACK */
  #define NR_CACHED_SCS 2
  static DEFINE_PER_CPU(void *, scs_cache[NR_CACHED_SCS]);
  
  static void *__scs_alloc(int node)
d08b9f0ca   Sami Tolvanen   scs: Add support ...
26
  {
27047fb22   Sami Tolvanen   UPSTREAM: scs: sw...
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
  	int i;
  	void *s;
  
  	for (i = 0; i < NR_CACHED_SCS; i++) {
  		s = this_cpu_xchg(scs_cache[i], NULL);
  		if (s) {
  			kasan_unpoison_vmalloc(s, SCS_SIZE);
  			memset(s, 0, SCS_SIZE);
  			return s;
  		}
  	}
  
  	return __vmalloc_node_range(SCS_SIZE, 1, VMALLOC_START, VMALLOC_END,
  				    GFP_SCS, PAGE_KERNEL, 0, node,
  				    __builtin_return_address(0));
  }
bee348fab   Will Deacon   scs: Move account...
43

27047fb22   Sami Tolvanen   UPSTREAM: scs: sw...
44
45
46
47
48
  void *scs_alloc(int node)
  {
  	void *s;
  
  	s = __scs_alloc(node);
bee348fab   Will Deacon   scs: Move account...
49
50
  	if (!s)
  		return NULL;
d08b9f0ca   Sami Tolvanen   scs: Add support ...
51

bee348fab   Will Deacon   scs: Move account...
52
53
54
55
56
57
  	*__scs_magic(s) = SCS_END_MAGIC;
  
  	/*
  	 * Poison the allocation to catch unintentional accesses to
  	 * the shadow stack when KASAN is enabled.
  	 */
27047fb22   Sami Tolvanen   UPSTREAM: scs: sw...
58
  	kasan_poison_vmalloc(s, SCS_SIZE);
bee348fab   Will Deacon   scs: Move account...
59
  	__scs_account(s, 1);
d08b9f0ca   Sami Tolvanen   scs: Add support ...
60
61
  	return s;
  }
27047fb22   Sami Tolvanen   UPSTREAM: scs: sw...
62
  void scs_free(void *s)
d08b9f0ca   Sami Tolvanen   scs: Add support ...
63
  {
27047fb22   Sami Tolvanen   UPSTREAM: scs: sw...
64
  	int i;
bee348fab   Will Deacon   scs: Move account...
65
  	__scs_account(s, -1);
27047fb22   Sami Tolvanen   UPSTREAM: scs: sw...
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
  
  	/*
  	 * We cannot sleep as this can be called in interrupt context,
  	 * so use this_cpu_cmpxchg to update the cache, and vfree_atomic
  	 * to free the stack.
  	 */
  
  	for (i = 0; i < NR_CACHED_SCS; i++)
  		if (this_cpu_cmpxchg(scs_cache[i], 0, s) == NULL)
  			return;
  
  	vfree_atomic(s);
  }
  
  static int scs_cleanup(unsigned int cpu)
  {
  	int i;
  	void **cache = per_cpu_ptr(scs_cache, cpu);
  
  	for (i = 0; i < NR_CACHED_SCS; i++) {
  		vfree(cache[i]);
  		cache[i] = NULL;
  	}
  
  	return 0;
d08b9f0ca   Sami Tolvanen   scs: Add support ...
91
92
93
94
  }
  
  void __init scs_init(void)
  {
27047fb22   Sami Tolvanen   UPSTREAM: scs: sw...
95
96
  	cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "scs:scs_cache", NULL,
  			  scs_cleanup);
d08b9f0ca   Sami Tolvanen   scs: Add support ...
97
98
99
100
101
102
103
104
  }
  
  int scs_prepare(struct task_struct *tsk, int node)
  {
  	void *s = scs_alloc(node);
  
  	if (!s)
  		return -ENOMEM;
51189c7a7   Will Deacon   arm64: scs: Store...
105
  	task_scs(tsk) = task_scs_sp(tsk) = s;
d08b9f0ca   Sami Tolvanen   scs: Add support ...
106
107
  	return 0;
  }
5bbaf9d1f   Sami Tolvanen   scs: Add support ...
108
109
110
111
112
113
114
115
116
117
118
119
  static void scs_check_usage(struct task_struct *tsk)
  {
  	static unsigned long highest;
  
  	unsigned long *p, prev, curr = highest, used = 0;
  
  	if (!IS_ENABLED(CONFIG_DEBUG_STACK_USAGE))
  		return;
  
  	for (p = task_scs(tsk); p < __scs_magic(tsk); ++p) {
  		if (!READ_ONCE_NOCHECK(*p))
  			break;
333ed7468   Will Deacon   scs: Report SCS u...
120
  		used += sizeof(*p);
5bbaf9d1f   Sami Tolvanen   scs: Add support ...
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
  	}
  
  	while (used > curr) {
  		prev = cmpxchg_relaxed(&highest, curr, used);
  
  		if (prev == curr) {
  			pr_info("%s (%d): highest shadow stack usage: %lu bytes
  ",
  				tsk->comm, task_pid_nr(tsk), used);
  			break;
  		}
  
  		curr = prev;
  	}
  }
d08b9f0ca   Sami Tolvanen   scs: Add support ...
136
137
138
139
140
141
  void scs_release(struct task_struct *tsk)
  {
  	void *s = task_scs(tsk);
  
  	if (!s)
  		return;
88485be53   Will Deacon   scs: Move scs_ove...
142
143
144
  	WARN(task_scs_end_corrupted(tsk),
  	     "corrupted shadow stack detected when freeing task
  ");
5bbaf9d1f   Sami Tolvanen   scs: Add support ...
145
  	scs_check_usage(tsk);
d08b9f0ca   Sami Tolvanen   scs: Add support ...
146
147
  	scs_free(s);
  }