Blame view

kernel/kcsan/core.c 37.2 KB
dfd402a4c   Marco Elver   kcsan: Add Kernel...
1
  // SPDX-License-Identifier: GPL-2.0
277879307   Marco Elver   kcsan: Show messa...
2
  #define pr_fmt(fmt) "kcsan: " fmt
dfd402a4c   Marco Elver   kcsan: Add Kernel...
3
4
5
6
7
  #include <linux/atomic.h>
  #include <linux/bug.h>
  #include <linux/delay.h>
  #include <linux/export.h>
  #include <linux/init.h>
1e6ee2f0f   Marco Elver   kcsan: Add option...
8
  #include <linux/kernel.h>
757a4cefd   Marco Elver   kcsan: Add suppor...
9
  #include <linux/list.h>
80d4c4775   Marco Elver   kcsan: Expose cor...
10
  #include <linux/moduleparam.h>
dfd402a4c   Marco Elver   kcsan: Add Kernel...
11
12
13
14
15
16
17
18
19
  #include <linux/percpu.h>
  #include <linux/preempt.h>
  #include <linux/random.h>
  #include <linux/sched.h>
  #include <linux/uaccess.h>
  
  #include "atomic.h"
  #include "encoding.h"
  #include "kcsan.h"
80d4c4775   Marco Elver   kcsan: Expose cor...
20
  static bool kcsan_early_enable = IS_ENABLED(CONFIG_KCSAN_EARLY_ENABLE);
2402d0eae   Marco Elver   kcsan: Add option...
21
22
  unsigned int kcsan_udelay_task = CONFIG_KCSAN_UDELAY_TASK;
  unsigned int kcsan_udelay_interrupt = CONFIG_KCSAN_UDELAY_INTERRUPT;
80d4c4775   Marco Elver   kcsan: Expose cor...
23
  static long kcsan_skip_watch = CONFIG_KCSAN_SKIP_WATCH;
48b1fc190   Marco Elver   kcsan: Add option...
24
  static bool kcsan_interrupt_watcher = IS_ENABLED(CONFIG_KCSAN_INTERRUPT_WATCHER);
80d4c4775   Marco Elver   kcsan: Expose cor...
25
26
27
28
29
30
31
32
33
  
  #ifdef MODULE_PARAM_PREFIX
  #undef MODULE_PARAM_PREFIX
  #endif
  #define MODULE_PARAM_PREFIX "kcsan."
  module_param_named(early_enable, kcsan_early_enable, bool, 0);
  module_param_named(udelay_task, kcsan_udelay_task, uint, 0644);
  module_param_named(udelay_interrupt, kcsan_udelay_interrupt, uint, 0644);
  module_param_named(skip_watch, kcsan_skip_watch, long, 0644);
48b1fc190   Marco Elver   kcsan: Add option...
34
  module_param_named(interrupt_watcher, kcsan_interrupt_watcher, bool, 0444);
80d4c4775   Marco Elver   kcsan: Expose cor...
35

dfd402a4c   Marco Elver   kcsan: Add Kernel...
36
37
38
39
  bool kcsan_enabled;
  
  /* Per-CPU kcsan_ctx for interrupts */
  static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = {
5cbaefe97   Ingo Molnar   kcsan: Improve va...
40
41
42
43
  	.disable_count		= 0,
  	.atomic_next		= 0,
  	.atomic_nest_count	= 0,
  	.in_flat_atomic		= false,
81af89e15   Marco Elver   kcsan: Add kcsan_...
44
  	.access_mask		= 0,
757a4cefd   Marco Elver   kcsan: Add suppor...
45
  	.scoped_accesses	= {LIST_POISON1, NULL},
dfd402a4c   Marco Elver   kcsan: Add Kernel...
46
47
48
  };
  
  /*
e7b341005   Qiujun Huang   kcsan: Fix a typo...
49
   * Helper macros to index into adjacent slots, starting from address slot
dfd402a4c   Marco Elver   kcsan: Add Kernel...
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
   * itself, followed by the right and left slots.
   *
   * The purpose is 2-fold:
   *
   *	1. if during insertion the address slot is already occupied, check if
   *	   any adjacent slots are free;
   *	2. accesses that straddle a slot boundary due to size that exceeds a
   *	   slot's range may check adjacent slots if any watchpoint matches.
   *
   * Note that accesses with very large size may still miss a watchpoint; however,
   * given this should be rare, this is a reasonable trade-off to make, since this
   * will avoid:
   *
   *	1. excessive contention between watchpoint checks and setup;
   *	2. larger number of simultaneous watchpoints without sacrificing
   *	   performance.
   *
   * Example: SLOT_IDX values for KCSAN_CHECK_ADJACENT=1, where i is [0, 1, 2]:
   *
   *   slot=0:  [ 1,  2,  0]
   *   slot=9:  [10, 11,  9]
   *   slot=63: [64, 65, 63]
   */
dfd402a4c   Marco Elver   kcsan: Add Kernel...
73
74
75
  #define SLOT_IDX(slot, i) (slot + ((i + KCSAN_CHECK_ADJACENT) % NUM_SLOTS))
  
  /*
5cbaefe97   Ingo Molnar   kcsan: Improve va...
76
   * SLOT_IDX_FAST is used in the fast-path. Not first checking the address's primary
d591ec3db   Marco Elver   kcsan: Introduce ...
77
   * slot (middle) is fine if we assume that races occur rarely. The set of
dfd402a4c   Marco Elver   kcsan: Add Kernel...
78
79
80
81
82
83
84
85
86
87
88
89
   * indices {SLOT_IDX(slot, i) | i in [0, NUM_SLOTS)} is equivalent to
   * {SLOT_IDX_FAST(slot, i) | i in [0, NUM_SLOTS)}.
   */
  #define SLOT_IDX_FAST(slot, i) (slot + i)
  
  /*
   * Watchpoints, with each entry encoded as defined in encoding.h: in order to be
   * able to safely update and access a watchpoint without introducing locking
   * overhead, we encode each watchpoint as a single atomic long. The initial
   * zero-initialized state matches INVALID_WATCHPOINT.
   *
   * Add NUM_SLOTS-1 entries to account for overflow; this helps avoid having to
5cbaefe97   Ingo Molnar   kcsan: Improve va...
90
   * use more complicated SLOT_IDX_FAST calculation with modulo in the fast-path.
dfd402a4c   Marco Elver   kcsan: Add Kernel...
91
   */
5cbaefe97   Ingo Molnar   kcsan: Improve va...
92
  static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];
dfd402a4c   Marco Elver   kcsan: Add Kernel...
93
94
95
96
97
98
  
  /*
   * Instructions to skip watching counter, used in should_watch(). We use a
   * per-CPU counter to avoid excessive contention.
   */
  static DEFINE_PER_CPU(long, kcsan_skip);
cd290ec24   Marco Elver   kcsan: Use tracin...
99
100
  /* For kcsan_prandom_u32_max(). */
  static DEFINE_PER_CPU(struct rnd_state, kcsan_rand_state);
5c3614257   Marco Elver   kcsan: Prefer __a...
101
102
103
104
  static __always_inline atomic_long_t *find_watchpoint(unsigned long addr,
  						      size_t size,
  						      bool expect_write,
  						      long *encoded_watchpoint)
dfd402a4c   Marco Elver   kcsan: Add Kernel...
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
  {
  	const int slot = watchpoint_slot(addr);
  	const unsigned long addr_masked = addr & WATCHPOINT_ADDR_MASK;
  	atomic_long_t *watchpoint;
  	unsigned long wp_addr_masked;
  	size_t wp_size;
  	bool is_write;
  	int i;
  
  	BUILD_BUG_ON(CONFIG_KCSAN_NUM_WATCHPOINTS < NUM_SLOTS);
  
  	for (i = 0; i < NUM_SLOTS; ++i) {
  		watchpoint = &watchpoints[SLOT_IDX_FAST(slot, i)];
  		*encoded_watchpoint = atomic_long_read(watchpoint);
  		if (!decode_watchpoint(*encoded_watchpoint, &wp_addr_masked,
  				       &wp_size, &is_write))
  			continue;
  
  		if (expect_write && !is_write)
  			continue;
  
  		/* Check if the watchpoint matches the access. */
  		if (matching_access(wp_addr_masked, wp_size, addr_masked, size))
  			return watchpoint;
  	}
  
  	return NULL;
  }
5cbaefe97   Ingo Molnar   kcsan: Improve va...
133
134
  static inline atomic_long_t *
  insert_watchpoint(unsigned long addr, size_t size, bool is_write)
dfd402a4c   Marco Elver   kcsan: Add Kernel...
135
136
137
138
139
140
141
142
  {
  	const int slot = watchpoint_slot(addr);
  	const long encoded_watchpoint = encode_watchpoint(addr, size, is_write);
  	atomic_long_t *watchpoint;
  	int i;
  
  	/* Check slot index logic, ensuring we stay within array bounds. */
  	BUILD_BUG_ON(SLOT_IDX(0, 0) != KCSAN_CHECK_ADJACENT);
5cbaefe97   Ingo Molnar   kcsan: Improve va...
143
144
145
  	BUILD_BUG_ON(SLOT_IDX(0, KCSAN_CHECK_ADJACENT+1) != 0);
  	BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT) != ARRAY_SIZE(watchpoints)-1);
  	BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT+1) != ARRAY_SIZE(watchpoints) - NUM_SLOTS);
dfd402a4c   Marco Elver   kcsan: Add Kernel...
146
147
148
149
150
151
  
  	for (i = 0; i < NUM_SLOTS; ++i) {
  		long expect_val = INVALID_WATCHPOINT;
  
  		/* Try to acquire this slot. */
  		watchpoint = &watchpoints[SLOT_IDX(slot, i)];
5cbaefe97   Ingo Molnar   kcsan: Improve va...
152
  		if (atomic_long_try_cmpxchg_relaxed(watchpoint, &expect_val, encoded_watchpoint))
dfd402a4c   Marco Elver   kcsan: Add Kernel...
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
  			return watchpoint;
  	}
  
  	return NULL;
  }
  
  /*
   * Return true if watchpoint was successfully consumed, false otherwise.
   *
   * This may return false if:
   *
   *	1. another thread already consumed the watchpoint;
   *	2. the thread that set up the watchpoint already removed it;
   *	3. the watchpoint was removed and then re-used.
   */
5c3614257   Marco Elver   kcsan: Prefer __a...
168
  static __always_inline bool
5cbaefe97   Ingo Molnar   kcsan: Improve va...
169
  try_consume_watchpoint(atomic_long_t *watchpoint, long encoded_watchpoint)
dfd402a4c   Marco Elver   kcsan: Add Kernel...
170
  {
5cbaefe97   Ingo Molnar   kcsan: Improve va...
171
  	return atomic_long_try_cmpxchg_relaxed(watchpoint, &encoded_watchpoint, CONSUMED_WATCHPOINT);
dfd402a4c   Marco Elver   kcsan: Add Kernel...
172
  }
6119418f9   Marco Elver   kcsan: Avoid bloc...
173
174
  /* Return true if watchpoint was not touched, false if already consumed. */
  static inline bool consume_watchpoint(atomic_long_t *watchpoint)
dfd402a4c   Marco Elver   kcsan: Add Kernel...
175
  {
6119418f9   Marco Elver   kcsan: Avoid bloc...
176
177
178
179
180
181
182
  	return atomic_long_xchg_relaxed(watchpoint, CONSUMED_WATCHPOINT) != CONSUMED_WATCHPOINT;
  }
  
  /* Remove the watchpoint -- its slot may be reused after. */
  static inline void remove_watchpoint(atomic_long_t *watchpoint)
  {
  	atomic_long_set(watchpoint, INVALID_WATCHPOINT);
dfd402a4c   Marco Elver   kcsan: Add Kernel...
183
  }
5c3614257   Marco Elver   kcsan: Prefer __a...
184
  static __always_inline struct kcsan_ctx *get_ctx(void)
dfd402a4c   Marco Elver   kcsan: Add Kernel...
185
186
  {
  	/*
5cbaefe97   Ingo Molnar   kcsan: Improve va...
187
  	 * In interrupts, use raw_cpu_ptr to avoid unnecessary checks, that would
dfd402a4c   Marco Elver   kcsan: Add Kernel...
188
189
190
191
  	 * also result in calls that generate warnings in uaccess regions.
  	 */
  	return in_task() ? &current->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx);
  }
757a4cefd   Marco Elver   kcsan: Add suppor...
192
193
194
195
196
197
198
199
200
201
202
203
  /* Check scoped accesses; never inline because this is a slow-path! */
  static noinline void kcsan_check_scoped_accesses(void)
  {
  	struct kcsan_ctx *ctx = get_ctx();
  	struct list_head *prev_save = ctx->scoped_accesses.prev;
  	struct kcsan_scoped_access *scoped_access;
  
  	ctx->scoped_accesses.prev = NULL;  /* Avoid recursion. */
  	list_for_each_entry(scoped_access, &ctx->scoped_accesses, list)
  		__kcsan_check_access(scoped_access->ptr, scoped_access->size, scoped_access->type);
  	ctx->scoped_accesses.prev = prev_save;
  }
44656d3dc   Marco Elver   kcsan: Add curren...
204
  /* Rules for generic atomic accesses. Called from fast-path. */
1e6ee2f0f   Marco Elver   kcsan: Add option...
205
  static __always_inline bool
757a4cefd   Marco Elver   kcsan: Add suppor...
206
  is_atomic(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx)
dfd402a4c   Marco Elver   kcsan: Add Kernel...
207
  {
44656d3dc   Marco Elver   kcsan: Add curren...
208
  	if (type & KCSAN_ACCESS_ATOMIC)
1e6ee2f0f   Marco Elver   kcsan: Add option...
209
  		return true;
dfd402a4c   Marco Elver   kcsan: Add Kernel...
210

d591ec3db   Marco Elver   kcsan: Introduce ...
211
212
213
214
215
  	/*
  	 * Unless explicitly declared atomic, never consider an assertion access
  	 * as atomic. This allows using them also in atomic regions, such as
  	 * seqlocks, without implicitly changing their semantics.
  	 */
44656d3dc   Marco Elver   kcsan: Add curren...
216
  	if (type & KCSAN_ACCESS_ASSERT)
d591ec3db   Marco Elver   kcsan: Introduce ...
217
  		return false;
1e6ee2f0f   Marco Elver   kcsan: Add option...
218
  	if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC) &&
44656d3dc   Marco Elver   kcsan: Add curren...
219
  	    (type & KCSAN_ACCESS_WRITE) && size <= sizeof(long) &&
14e2ac8de   Marco Elver   kcsan: Support co...
220
  	    !(type & KCSAN_ACCESS_COMPOUND) && IS_ALIGNED((unsigned long)ptr, size))
1e6ee2f0f   Marco Elver   kcsan: Add option...
221
  		return true; /* Assume aligned writes up to word size are atomic. */
44656d3dc   Marco Elver   kcsan: Add curren...
222
  	if (ctx->atomic_next > 0) {
dfd402a4c   Marco Elver   kcsan: Add Kernel...
223
224
225
226
227
228
229
230
231
232
233
234
235
  		/*
  		 * Because we do not have separate contexts for nested
  		 * interrupts, in case atomic_next is set, we simply assume that
  		 * the outer interrupt set atomic_next. In the worst case, we
  		 * will conservatively consider operations as atomic. This is a
  		 * reasonable trade-off to make, since this case should be
  		 * extremely rare; however, even if extremely rare, it could
  		 * lead to false positives otherwise.
  		 */
  		if ((hardirq_count() >> HARDIRQ_SHIFT) < 2)
  			--ctx->atomic_next; /* in task, or outer interrupt */
  		return true;
  	}
dfd402a4c   Marco Elver   kcsan: Add Kernel...
236

44656d3dc   Marco Elver   kcsan: Add curren...
237
  	return ctx->atomic_nest_count > 0 || ctx->in_flat_atomic;
dfd402a4c   Marco Elver   kcsan: Add Kernel...
238
  }
1e6ee2f0f   Marco Elver   kcsan: Add option...
239
  static __always_inline bool
757a4cefd   Marco Elver   kcsan: Add suppor...
240
  should_watch(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx)
dfd402a4c   Marco Elver   kcsan: Add Kernel...
241
242
243
244
245
246
247
248
  {
  	/*
  	 * Never set up watchpoints when memory operations are atomic.
  	 *
  	 * Need to check this first, before kcsan_skip check below: (1) atomics
  	 * should not count towards skipped instructions, and (2) to actually
  	 * decrement kcsan_atomic_next for consecutive instruction stream.
  	 */
757a4cefd   Marco Elver   kcsan: Add suppor...
249
  	if (is_atomic(ptr, size, type, ctx))
dfd402a4c   Marco Elver   kcsan: Add Kernel...
250
251
252
253
254
255
256
257
258
259
260
261
262
  		return false;
  
  	if (this_cpu_dec_return(kcsan_skip) >= 0)
  		return false;
  
  	/*
  	 * NOTE: If we get here, kcsan_skip must always be reset in slow path
  	 * via reset_kcsan_skip() to avoid underflow.
  	 */
  
  	/* this operation should be watched */
  	return true;
  }
cd290ec24   Marco Elver   kcsan: Use tracin...
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
  /*
   * Returns a pseudo-random number in interval [0, ep_ro). See prandom_u32_max()
   * for more details.
   *
   * The open-coded version here is using only safe primitives for all contexts
   * where we can have KCSAN instrumentation. In particular, we cannot use
   * prandom_u32() directly, as its tracepoint could cause recursion.
   */
  static u32 kcsan_prandom_u32_max(u32 ep_ro)
  {
  	struct rnd_state *state = &get_cpu_var(kcsan_rand_state);
  	const u32 res = prandom_u32_state(state);
  
  	put_cpu_var(kcsan_rand_state);
  	return (u32)(((u64) res * ep_ro) >> 32);
  }
dfd402a4c   Marco Elver   kcsan: Add Kernel...
279
280
  static inline void reset_kcsan_skip(void)
  {
80d4c4775   Marco Elver   kcsan: Expose cor...
281
  	long skip_count = kcsan_skip_watch -
dfd402a4c   Marco Elver   kcsan: Add Kernel...
282
  			  (IS_ENABLED(CONFIG_KCSAN_SKIP_WATCH_RANDOMIZE) ?
cd290ec24   Marco Elver   kcsan: Use tracin...
283
  				   kcsan_prandom_u32_max(kcsan_skip_watch) :
dfd402a4c   Marco Elver   kcsan: Add Kernel...
284
285
286
  				   0);
  	this_cpu_write(kcsan_skip, skip_count);
  }
5c3614257   Marco Elver   kcsan: Prefer __a...
287
  static __always_inline bool kcsan_is_enabled(void)
dfd402a4c   Marco Elver   kcsan: Add Kernel...
288
289
290
  {
  	return READ_ONCE(kcsan_enabled) && get_ctx()->disable_count == 0;
  }
cd290ec24   Marco Elver   kcsan: Use tracin...
291
292
  /* Introduce delay depending on context and configuration. */
  static void delay_access(int type)
dfd402a4c   Marco Elver   kcsan: Add Kernel...
293
  {
80d4c4775   Marco Elver   kcsan: Expose cor...
294
  	unsigned int delay = in_task() ? kcsan_udelay_task : kcsan_udelay_interrupt;
106a307fd   Marco Elver   kcsan: Skew delay...
295
296
297
  	/* For certain access types, skew the random delay to be longer. */
  	unsigned int skew_delay_order =
  		(type & (KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_ASSERT)) ? 1 : 0;
cd290ec24   Marco Elver   kcsan: Use tracin...
298
299
300
301
  	delay -= IS_ENABLED(CONFIG_KCSAN_DELAY_RANDOMIZE) ?
  			       kcsan_prandom_u32_max(delay >> skew_delay_order) :
  			       0;
  	udelay(delay);
dfd402a4c   Marco Elver   kcsan: Add Kernel...
302
  }
92c209ac6   Marco Elver   kcsan: Improve IR...
303
304
305
306
307
308
309
310
311
312
313
314
315
  void kcsan_save_irqtrace(struct task_struct *task)
  {
  #ifdef CONFIG_TRACE_IRQFLAGS
  	task->kcsan_save_irqtrace = task->irqtrace;
  #endif
  }
  
  void kcsan_restore_irqtrace(struct task_struct *task)
  {
  #ifdef CONFIG_TRACE_IRQFLAGS
  	task->irqtrace = task->kcsan_save_irqtrace;
  #endif
  }
dfd402a4c   Marco Elver   kcsan: Add Kernel...
316
317
318
319
320
321
322
323
324
325
326
327
328
329
  /*
   * Pull everything together: check_access() below contains the performance
   * critical operations; the fast-path (including check_access) functions should
   * all be inlinable by the instrumentation functions.
   *
   * The slow-path (kcsan_found_watchpoint, kcsan_setup_watchpoint) are
   * non-inlinable -- note that, we prefix these with "kcsan_" to ensure they can
   * be filtered from the stacktrace, as well as give them unique names for the
   * UACCESS whitelist of objtool. Each function uses user_access_save/restore(),
   * since they do not access any user memory, but instrumentation is still
   * emitted in UACCESS regions.
   */
  
  static noinline void kcsan_found_watchpoint(const volatile void *ptr,
5cbaefe97   Ingo Molnar   kcsan: Improve va...
330
  					    size_t size,
47144eca2   Marco Elver   kcsan: Show full ...
331
  					    int type,
dfd402a4c   Marco Elver   kcsan: Add Kernel...
332
333
334
335
336
337
338
339
  					    atomic_long_t *watchpoint,
  					    long encoded_watchpoint)
  {
  	unsigned long flags;
  	bool consumed;
  
  	if (!kcsan_is_enabled())
  		return;
81af89e15   Marco Elver   kcsan: Add kcsan_...
340
341
342
343
344
345
346
347
  
  	/*
  	 * The access_mask check relies on value-change comparison. To avoid
  	 * reporting a race where e.g. the writer set up the watchpoint, but the
  	 * reader has access_mask!=0, we have to ignore the found watchpoint.
  	 */
  	if (get_ctx()->access_mask != 0)
  		return;
dfd402a4c   Marco Elver   kcsan: Add Kernel...
348
349
350
351
352
353
354
355
356
357
358
359
  	/*
  	 * Consume the watchpoint as soon as possible, to minimize the chances
  	 * of !consumed. Consuming the watchpoint must always be guarded by
  	 * kcsan_is_enabled() check, as otherwise we might erroneously
  	 * triggering reports when disabled.
  	 */
  	consumed = try_consume_watchpoint(watchpoint, encoded_watchpoint);
  
  	/* keep this after try_consume_watchpoint */
  	flags = user_access_save();
  
  	if (consumed) {
92c209ac6   Marco Elver   kcsan: Improve IR...
360
  		kcsan_save_irqtrace(current);
135c0872d   Marco Elver   kcsan: Introduce ...
361
  		kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_MAYBE,
6119418f9   Marco Elver   kcsan: Avoid bloc...
362
363
  			     KCSAN_REPORT_CONSUMED_WATCHPOINT,
  			     watchpoint - watchpoints);
92c209ac6   Marco Elver   kcsan: Improve IR...
364
  		kcsan_restore_irqtrace(current);
dfd402a4c   Marco Elver   kcsan: Add Kernel...
365
366
367
368
369
370
  	} else {
  		/*
  		 * The other thread may not print any diagnostics, as it has
  		 * already removed the watchpoint, or another thread consumed
  		 * the watchpoint before this thread.
  		 */
2e986b81f   Marco Elver   kcsan: Optimize d...
371
  		atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_REPORT_RACES]);
dfd402a4c   Marco Elver   kcsan: Add Kernel...
372
  	}
d591ec3db   Marco Elver   kcsan: Introduce ...
373
374
  
  	if ((type & KCSAN_ACCESS_ASSERT) != 0)
2e986b81f   Marco Elver   kcsan: Optimize d...
375
  		atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
d591ec3db   Marco Elver   kcsan: Introduce ...
376
  	else
2e986b81f   Marco Elver   kcsan: Optimize d...
377
  		atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_DATA_RACES]);
dfd402a4c   Marco Elver   kcsan: Add Kernel...
378
379
380
  
  	user_access_restore(flags);
  }
5cbaefe97   Ingo Molnar   kcsan: Improve va...
381
  static noinline void
47144eca2   Marco Elver   kcsan: Show full ...
382
  kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
dfd402a4c   Marco Elver   kcsan: Add Kernel...
383
  {
47144eca2   Marco Elver   kcsan: Show full ...
384
  	const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
d591ec3db   Marco Elver   kcsan: Introduce ...
385
  	const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0;
dfd402a4c   Marco Elver   kcsan: Add Kernel...
386
387
388
389
390
391
392
  	atomic_long_t *watchpoint;
  	union {
  		u8 _1;
  		u16 _2;
  		u32 _4;
  		u64 _8;
  	} expect_value;
81af89e15   Marco Elver   kcsan: Add kcsan_...
393
  	unsigned long access_mask;
b738f6169   Marco Elver   kcsan: Introduce ...
394
  	enum kcsan_value_change value_change = KCSAN_VALUE_CHANGE_MAYBE;
dfd402a4c   Marco Elver   kcsan: Add Kernel...
395
  	unsigned long ua_flags = user_access_save();
48b1fc190   Marco Elver   kcsan: Add option...
396
  	unsigned long irq_flags = 0;
dfd402a4c   Marco Elver   kcsan: Add Kernel...
397
398
399
400
401
402
403
404
405
  
  	/*
  	 * Always reset kcsan_skip counter in slow-path to avoid underflow; see
  	 * should_watch().
  	 */
  	reset_kcsan_skip();
  
  	if (!kcsan_is_enabled())
  		goto out;
44656d3dc   Marco Elver   kcsan: Add curren...
406
407
408
409
410
411
412
413
  	/*
  	 * Special atomic rules: unlikely to be true, so we check them here in
  	 * the slow-path, and not in the fast-path in is_atomic(). Call after
  	 * kcsan_is_enabled(), as we may access memory that is not yet
  	 * initialized during early boot.
  	 */
  	if (!is_assert && kcsan_is_atomic_special(ptr))
  		goto out;
dfd402a4c   Marco Elver   kcsan: Add Kernel...
414
  	if (!check_encodable((unsigned long)ptr, size)) {
2e986b81f   Marco Elver   kcsan: Optimize d...
415
  		atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_UNENCODABLE_ACCESSES]);
dfd402a4c   Marco Elver   kcsan: Add Kernel...
416
417
  		goto out;
  	}
92c209ac6   Marco Elver   kcsan: Improve IR...
418
419
420
421
422
423
  	/*
  	 * Save and restore the IRQ state trace touched by KCSAN, since KCSAN's
  	 * runtime is entered for every memory access, and potentially useful
  	 * information is lost if dirtied by KCSAN.
  	 */
  	kcsan_save_irqtrace(current);
48b1fc190   Marco Elver   kcsan: Add option...
424
  	if (!kcsan_interrupt_watcher)
248591f5d   Marco Elver   kcsan: Make KCSAN...
425
  		local_irq_save(irq_flags);
dfd402a4c   Marco Elver   kcsan: Add Kernel...
426
427
428
429
  
  	watchpoint = insert_watchpoint((unsigned long)ptr, size, is_write);
  	if (watchpoint == NULL) {
  		/*
5cbaefe97   Ingo Molnar   kcsan: Improve va...
430
431
  		 * Out of capacity: the size of 'watchpoints', and the frequency
  		 * with which should_watch() returns true should be tweaked so
dfd402a4c   Marco Elver   kcsan: Add Kernel...
432
433
  		 * that this case happens very rarely.
  		 */
2e986b81f   Marco Elver   kcsan: Optimize d...
434
  		atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_NO_CAPACITY]);
dfd402a4c   Marco Elver   kcsan: Add Kernel...
435
436
  		goto out_unlock;
  	}
2e986b81f   Marco Elver   kcsan: Optimize d...
437
438
  	atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_SETUP_WATCHPOINTS]);
  	atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_USED_WATCHPOINTS]);
dfd402a4c   Marco Elver   kcsan: Add Kernel...
439
440
441
442
443
  
  	/*
  	 * Read the current value, to later check and infer a race if the data
  	 * was modified via a non-instrumented access, e.g. from a device.
  	 */
b738f6169   Marco Elver   kcsan: Introduce ...
444
  	expect_value._8 = 0;
dfd402a4c   Marco Elver   kcsan: Add Kernel...
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
  	switch (size) {
  	case 1:
  		expect_value._1 = READ_ONCE(*(const u8 *)ptr);
  		break;
  	case 2:
  		expect_value._2 = READ_ONCE(*(const u16 *)ptr);
  		break;
  	case 4:
  		expect_value._4 = READ_ONCE(*(const u32 *)ptr);
  		break;
  	case 8:
  		expect_value._8 = READ_ONCE(*(const u64 *)ptr);
  		break;
  	default:
  		break; /* ignore; we do not diff the values */
  	}
  
  	if (IS_ENABLED(CONFIG_KCSAN_DEBUG)) {
  		kcsan_disable_current();
277879307   Marco Elver   kcsan: Show messa...
464
465
  		pr_err("watching %s, size: %zu, addr: %px [slot: %d, encoded: %lx]
  ",
dfd402a4c   Marco Elver   kcsan: Add Kernel...
466
467
468
469
470
471
472
473
474
475
  		       is_write ? "write" : "read", size, ptr,
  		       watchpoint_slot((unsigned long)ptr),
  		       encode_watchpoint((unsigned long)ptr, size, is_write));
  		kcsan_enable_current();
  	}
  
  	/*
  	 * Delay this thread, to increase probability of observing a racy
  	 * conflicting access.
  	 */
cd290ec24   Marco Elver   kcsan: Use tracin...
476
  	delay_access(type);
dfd402a4c   Marco Elver   kcsan: Add Kernel...
477
478
479
480
481
  
  	/*
  	 * Re-read value, and check if it is as expected; if not, we infer a
  	 * racy access.
  	 */
81af89e15   Marco Elver   kcsan: Add kcsan_...
482
  	access_mask = get_ctx()->access_mask;
dfd402a4c   Marco Elver   kcsan: Add Kernel...
483
484
  	switch (size) {
  	case 1:
b738f6169   Marco Elver   kcsan: Introduce ...
485
  		expect_value._1 ^= READ_ONCE(*(const u8 *)ptr);
81af89e15   Marco Elver   kcsan: Add kcsan_...
486
487
  		if (access_mask)
  			expect_value._1 &= (u8)access_mask;
dfd402a4c   Marco Elver   kcsan: Add Kernel...
488
489
  		break;
  	case 2:
b738f6169   Marco Elver   kcsan: Introduce ...
490
  		expect_value._2 ^= READ_ONCE(*(const u16 *)ptr);
81af89e15   Marco Elver   kcsan: Add kcsan_...
491
492
  		if (access_mask)
  			expect_value._2 &= (u16)access_mask;
dfd402a4c   Marco Elver   kcsan: Add Kernel...
493
494
  		break;
  	case 4:
b738f6169   Marco Elver   kcsan: Introduce ...
495
  		expect_value._4 ^= READ_ONCE(*(const u32 *)ptr);
81af89e15   Marco Elver   kcsan: Add kcsan_...
496
497
  		if (access_mask)
  			expect_value._4 &= (u32)access_mask;
dfd402a4c   Marco Elver   kcsan: Add Kernel...
498
499
  		break;
  	case 8:
b738f6169   Marco Elver   kcsan: Introduce ...
500
  		expect_value._8 ^= READ_ONCE(*(const u64 *)ptr);
81af89e15   Marco Elver   kcsan: Add kcsan_...
501
502
  		if (access_mask)
  			expect_value._8 &= (u64)access_mask;
dfd402a4c   Marco Elver   kcsan: Add Kernel...
503
504
505
506
  		break;
  	default:
  		break; /* ignore; we do not diff the values */
  	}
b738f6169   Marco Elver   kcsan: Introduce ...
507
508
509
  	/* Were we able to observe a value-change? */
  	if (expect_value._8 != 0)
  		value_change = KCSAN_VALUE_CHANGE_TRUE;
dfd402a4c   Marco Elver   kcsan: Add Kernel...
510
  	/* Check if this access raced with another. */
6119418f9   Marco Elver   kcsan: Avoid bloc...
511
  	if (!consume_watchpoint(watchpoint)) {
dfd402a4c   Marco Elver   kcsan: Add Kernel...
512
  		/*
b738f6169   Marco Elver   kcsan: Introduce ...
513
  		 * Depending on the access type, map a value_change of MAYBE to
81af89e15   Marco Elver   kcsan: Add kcsan_...
514
  		 * TRUE (always report) or FALSE (never report).
b738f6169   Marco Elver   kcsan: Introduce ...
515
  		 */
81af89e15   Marco Elver   kcsan: Add kcsan_...
516
517
518
519
520
521
522
523
524
525
526
527
  		if (value_change == KCSAN_VALUE_CHANGE_MAYBE) {
  			if (access_mask != 0) {
  				/*
  				 * For access with access_mask, we require a
  				 * value-change, as it is likely that races on
  				 * ~access_mask bits are expected.
  				 */
  				value_change = KCSAN_VALUE_CHANGE_FALSE;
  			} else if (size > 8 || is_assert) {
  				/* Always assume a value-change. */
  				value_change = KCSAN_VALUE_CHANGE_TRUE;
  			}
b738f6169   Marco Elver   kcsan: Introduce ...
528
529
530
  		}
  
  		/*
dfd402a4c   Marco Elver   kcsan: Add Kernel...
531
532
  		 * No need to increment 'data_races' counter, as the racing
  		 * thread already did.
d591ec3db   Marco Elver   kcsan: Introduce ...
533
534
535
536
  		 *
  		 * Count 'assert_failures' for each failed ASSERT access,
  		 * therefore both this thread and the racing thread may
  		 * increment this counter.
dfd402a4c   Marco Elver   kcsan: Add Kernel...
537
  		 */
b738f6169   Marco Elver   kcsan: Introduce ...
538
  		if (is_assert && value_change == KCSAN_VALUE_CHANGE_TRUE)
2e986b81f   Marco Elver   kcsan: Optimize d...
539
  			atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
d591ec3db   Marco Elver   kcsan: Introduce ...
540

6119418f9   Marco Elver   kcsan: Avoid bloc...
541
542
  		kcsan_report(ptr, size, type, value_change, KCSAN_REPORT_RACE_SIGNAL,
  			     watchpoint - watchpoints);
b738f6169   Marco Elver   kcsan: Introduce ...
543
  	} else if (value_change == KCSAN_VALUE_CHANGE_TRUE) {
dfd402a4c   Marco Elver   kcsan: Add Kernel...
544
  		/* Inferring a race, since the value should not have changed. */
d591ec3db   Marco Elver   kcsan: Introduce ...
545

2e986b81f   Marco Elver   kcsan: Optimize d...
546
  		atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN]);
d591ec3db   Marco Elver   kcsan: Introduce ...
547
  		if (is_assert)
2e986b81f   Marco Elver   kcsan: Optimize d...
548
  			atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
d591ec3db   Marco Elver   kcsan: Introduce ...
549
550
  
  		if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert)
b738f6169   Marco Elver   kcsan: Introduce ...
551
  			kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_TRUE,
6119418f9   Marco Elver   kcsan: Avoid bloc...
552
553
  				     KCSAN_REPORT_RACE_UNKNOWN_ORIGIN,
  				     watchpoint - watchpoints);
dfd402a4c   Marco Elver   kcsan: Add Kernel...
554
  	}
6119418f9   Marco Elver   kcsan: Avoid bloc...
555
556
557
558
559
  	/*
  	 * Remove watchpoint; must be after reporting, since the slot may be
  	 * reused after this point.
  	 */
  	remove_watchpoint(watchpoint);
2e986b81f   Marco Elver   kcsan: Optimize d...
560
  	atomic_long_dec(&kcsan_counters[KCSAN_COUNTER_USED_WATCHPOINTS]);
dfd402a4c   Marco Elver   kcsan: Add Kernel...
561
  out_unlock:
48b1fc190   Marco Elver   kcsan: Add option...
562
  	if (!kcsan_interrupt_watcher)
248591f5d   Marco Elver   kcsan: Make KCSAN...
563
  		local_irq_restore(irq_flags);
92c209ac6   Marco Elver   kcsan: Improve IR...
564
  	kcsan_restore_irqtrace(current);
dfd402a4c   Marco Elver   kcsan: Add Kernel...
565
566
567
568
569
570
571
572
573
574
575
576
  out:
  	user_access_restore(ua_flags);
  }
  
  static __always_inline void check_access(const volatile void *ptr, size_t size,
  					 int type)
  {
  	const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
  	atomic_long_t *watchpoint;
  	long encoded_watchpoint;
  
  	/*
ed95f95c8   Marco Elver   kcsan: Fix 0-size...
577
578
579
580
581
582
583
  	 * Do nothing for 0 sized check; this comparison will be optimized out
  	 * for constant sized instrumentation (__tsan_{read,write}N).
  	 */
  	if (unlikely(size == 0))
  		return;
  
  	/*
dfd402a4c   Marco Elver   kcsan: Add Kernel...
584
585
586
587
588
589
590
591
  	 * Avoid user_access_save in fast-path: find_watchpoint is safe without
  	 * user_access_save, as the address that ptr points to is only used to
  	 * check if a watchpoint exists; ptr is never dereferenced.
  	 */
  	watchpoint = find_watchpoint((unsigned long)ptr, size, !is_write,
  				     &encoded_watchpoint);
  	/*
  	 * It is safe to check kcsan_is_enabled() after find_watchpoint in the
d591ec3db   Marco Elver   kcsan: Introduce ...
592
  	 * slow-path, as long as no state changes that cause a race to be
dfd402a4c   Marco Elver   kcsan: Add Kernel...
593
594
595
596
597
  	 * detected and reported have occurred until kcsan_is_enabled() is
  	 * checked.
  	 */
  
  	if (unlikely(watchpoint != NULL))
47144eca2   Marco Elver   kcsan: Show full ...
598
  		kcsan_found_watchpoint(ptr, size, type, watchpoint,
dfd402a4c   Marco Elver   kcsan: Add Kernel...
599
  				       encoded_watchpoint);
757a4cefd   Marco Elver   kcsan: Add suppor...
600
601
602
603
604
605
606
607
  	else {
  		struct kcsan_ctx *ctx = get_ctx(); /* Call only once in fast-path. */
  
  		if (unlikely(should_watch(ptr, size, type, ctx)))
  			kcsan_setup_watchpoint(ptr, size, type);
  		else if (unlikely(ctx->scoped_accesses.prev))
  			kcsan_check_scoped_accesses();
  	}
dfd402a4c   Marco Elver   kcsan: Add Kernel...
608
609
610
611
612
613
614
615
616
  }
  
  /* === Public interface ===================================================== */
  
  void __init kcsan_init(void)
  {
  	BUG_ON(!in_task());
  
  	kcsan_debugfs_init();
cd290ec24   Marco Elver   kcsan: Use tracin...
617
  	prandom_seed_full_state(&kcsan_rand_state);
dfd402a4c   Marco Elver   kcsan: Add Kernel...
618
619
620
621
622
  
  	/*
  	 * We are in the init task, and no other tasks should be running;
  	 * WRITE_ONCE without memory barrier is sufficient.
  	 */
277879307   Marco Elver   kcsan: Show messa...
623
624
625
  	if (kcsan_early_enable) {
  		pr_info("enabled early
  ");
dfd402a4c   Marco Elver   kcsan: Add Kernel...
626
  		WRITE_ONCE(kcsan_enabled, true);
277879307   Marco Elver   kcsan: Show messa...
627
  	}
dfd402a4c   Marco Elver   kcsan: Add Kernel...
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
  }
  
  /* === Exported interface =================================================== */
  
  void kcsan_disable_current(void)
  {
  	++get_ctx()->disable_count;
  }
  EXPORT_SYMBOL(kcsan_disable_current);
  
  void kcsan_enable_current(void)
  {
  	if (get_ctx()->disable_count-- == 0) {
  		/*
  		 * Warn if kcsan_enable_current() calls are unbalanced with
  		 * kcsan_disable_current() calls, which causes disable_count to
  		 * become negative and should not happen.
  		 */
  		kcsan_disable_current(); /* restore to 0, KCSAN still enabled */
  		kcsan_disable_current(); /* disable to generate warning */
  		WARN(1, "Unbalanced %s()", __func__);
  		kcsan_enable_current();
  	}
  }
  EXPORT_SYMBOL(kcsan_enable_current);
19acd03d9   Marco Elver   kcsan: Add __kcsa...
653
654
655
656
657
658
  void kcsan_enable_current_nowarn(void)
  {
  	if (get_ctx()->disable_count-- == 0)
  		kcsan_disable_current();
  }
  EXPORT_SYMBOL(kcsan_enable_current_nowarn);
dfd402a4c   Marco Elver   kcsan: Add Kernel...
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
  void kcsan_nestable_atomic_begin(void)
  {
  	/*
  	 * Do *not* check and warn if we are in a flat atomic region: nestable
  	 * and flat atomic regions are independent from each other.
  	 * See include/linux/kcsan.h: struct kcsan_ctx comments for more
  	 * comments.
  	 */
  
  	++get_ctx()->atomic_nest_count;
  }
  EXPORT_SYMBOL(kcsan_nestable_atomic_begin);
  
  void kcsan_nestable_atomic_end(void)
  {
  	if (get_ctx()->atomic_nest_count-- == 0) {
  		/*
  		 * Warn if kcsan_nestable_atomic_end() calls are unbalanced with
  		 * kcsan_nestable_atomic_begin() calls, which causes
  		 * atomic_nest_count to become negative and should not happen.
  		 */
  		kcsan_nestable_atomic_begin(); /* restore to 0 */
  		kcsan_disable_current(); /* disable to generate warning */
  		WARN(1, "Unbalanced %s()", __func__);
  		kcsan_enable_current();
  	}
  }
  EXPORT_SYMBOL(kcsan_nestable_atomic_end);
  
  void kcsan_flat_atomic_begin(void)
  {
  	get_ctx()->in_flat_atomic = true;
  }
  EXPORT_SYMBOL(kcsan_flat_atomic_begin);
  
  void kcsan_flat_atomic_end(void)
  {
  	get_ctx()->in_flat_atomic = false;
  }
  EXPORT_SYMBOL(kcsan_flat_atomic_end);
  
  void kcsan_atomic_next(int n)
  {
  	get_ctx()->atomic_next = n;
  }
  EXPORT_SYMBOL(kcsan_atomic_next);
81af89e15   Marco Elver   kcsan: Add kcsan_...
705
706
707
708
709
  void kcsan_set_access_mask(unsigned long mask)
  {
  	get_ctx()->access_mask = mask;
  }
  EXPORT_SYMBOL(kcsan_set_access_mask);
757a4cefd   Marco Elver   kcsan: Add suppor...
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
  struct kcsan_scoped_access *
  kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
  			  struct kcsan_scoped_access *sa)
  {
  	struct kcsan_ctx *ctx = get_ctx();
  
  	__kcsan_check_access(ptr, size, type);
  
  	ctx->disable_count++; /* Disable KCSAN, in case list debugging is on. */
  
  	INIT_LIST_HEAD(&sa->list);
  	sa->ptr = ptr;
  	sa->size = size;
  	sa->type = type;
  
  	if (!ctx->scoped_accesses.prev) /* Lazy initialize list head. */
  		INIT_LIST_HEAD(&ctx->scoped_accesses);
  	list_add(&sa->list, &ctx->scoped_accesses);
  
  	ctx->disable_count--;
  	return sa;
  }
  EXPORT_SYMBOL(kcsan_begin_scoped_access);
  
  void kcsan_end_scoped_access(struct kcsan_scoped_access *sa)
  {
  	struct kcsan_ctx *ctx = get_ctx();
  
  	if (WARN(!ctx->scoped_accesses.prev, "Unbalanced %s()?", __func__))
  		return;
  
  	ctx->disable_count++; /* Disable KCSAN, in case list debugging is on. */
  
  	list_del(&sa->list);
  	if (list_empty(&ctx->scoped_accesses))
  		/*
  		 * Ensure we do not enter kcsan_check_scoped_accesses()
  		 * slow-path if unnecessary, and avoids requiring list_empty()
  		 * in the fast-path (to avoid a READ_ONCE() and potential
  		 * uaccess warning).
  		 */
  		ctx->scoped_accesses.prev = NULL;
  
  	ctx->disable_count--;
  
  	__kcsan_check_access(sa->ptr, sa->size, sa->type);
  }
  EXPORT_SYMBOL(kcsan_end_scoped_access);
dfd402a4c   Marco Elver   kcsan: Add Kernel...
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
  void __kcsan_check_access(const volatile void *ptr, size_t size, int type)
  {
  	check_access(ptr, size, type);
  }
  EXPORT_SYMBOL(__kcsan_check_access);
  
  /*
   * KCSAN uses the same instrumentation that is emitted by supported compilers
   * for ThreadSanitizer (TSAN).
   *
   * When enabled, the compiler emits instrumentation calls (the functions
   * prefixed with "__tsan" below) for all loads and stores that it generated;
   * inline asm is not instrumented.
   *
   * Note that, not all supported compiler versions distinguish aligned/unaligned
   * accesses, but e.g. recent versions of Clang do. We simply alias the unaligned
   * version to the generic version, which can handle both.
   */
  
  #define DEFINE_TSAN_READ_WRITE(size)                                           \
9dd979bae   Marco Elver   kcsan: Silence -W...
778
  	void __tsan_read##size(void *ptr);                                     \
dfd402a4c   Marco Elver   kcsan: Add Kernel...
779
780
781
782
783
784
785
786
  	void __tsan_read##size(void *ptr)                                      \
  	{                                                                      \
  		check_access(ptr, size, 0);                                    \
  	}                                                                      \
  	EXPORT_SYMBOL(__tsan_read##size);                                      \
  	void __tsan_unaligned_read##size(void *ptr)                            \
  		__alias(__tsan_read##size);                                    \
  	EXPORT_SYMBOL(__tsan_unaligned_read##size);                            \
9dd979bae   Marco Elver   kcsan: Silence -W...
787
  	void __tsan_write##size(void *ptr);                                    \
dfd402a4c   Marco Elver   kcsan: Add Kernel...
788
789
790
791
792
793
794
  	void __tsan_write##size(void *ptr)                                     \
  	{                                                                      \
  		check_access(ptr, size, KCSAN_ACCESS_WRITE);                   \
  	}                                                                      \
  	EXPORT_SYMBOL(__tsan_write##size);                                     \
  	void __tsan_unaligned_write##size(void *ptr)                           \
  		__alias(__tsan_write##size);                                   \
14e2ac8de   Marco Elver   kcsan: Support co...
795
796
797
798
799
800
801
802
803
804
805
  	EXPORT_SYMBOL(__tsan_unaligned_write##size);                           \
  	void __tsan_read_write##size(void *ptr);                               \
  	void __tsan_read_write##size(void *ptr)                                \
  	{                                                                      \
  		check_access(ptr, size,                                        \
  			     KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE);      \
  	}                                                                      \
  	EXPORT_SYMBOL(__tsan_read_write##size);                                \
  	void __tsan_unaligned_read_write##size(void *ptr)                      \
  		__alias(__tsan_read_write##size);                              \
  	EXPORT_SYMBOL(__tsan_unaligned_read_write##size)
dfd402a4c   Marco Elver   kcsan: Add Kernel...
806
807
808
809
810
811
  
  DEFINE_TSAN_READ_WRITE(1);
  DEFINE_TSAN_READ_WRITE(2);
  DEFINE_TSAN_READ_WRITE(4);
  DEFINE_TSAN_READ_WRITE(8);
  DEFINE_TSAN_READ_WRITE(16);
9dd979bae   Marco Elver   kcsan: Silence -W...
812
  void __tsan_read_range(void *ptr, size_t size);
dfd402a4c   Marco Elver   kcsan: Add Kernel...
813
814
815
816
817
  void __tsan_read_range(void *ptr, size_t size)
  {
  	check_access(ptr, size, 0);
  }
  EXPORT_SYMBOL(__tsan_read_range);
9dd979bae   Marco Elver   kcsan: Silence -W...
818
  void __tsan_write_range(void *ptr, size_t size);
dfd402a4c   Marco Elver   kcsan: Add Kernel...
819
820
821
822
823
824
825
  void __tsan_write_range(void *ptr, size_t size)
  {
  	check_access(ptr, size, KCSAN_ACCESS_WRITE);
  }
  EXPORT_SYMBOL(__tsan_write_range);
  
  /*
75d75b7a4   Marco Elver   kcsan: Support di...
826
827
828
829
830
831
832
833
834
   * Use of explicit volatile is generally disallowed [1], however, volatile is
   * still used in various concurrent context, whether in low-level
   * synchronization primitives or for legacy reasons.
   * [1] https://lwn.net/Articles/233479/
   *
   * We only consider volatile accesses atomic if they are aligned and would pass
   * the size-check of compiletime_assert_rwonce_type().
   */
  #define DEFINE_TSAN_VOLATILE_READ_WRITE(size)                                  \
9dd979bae   Marco Elver   kcsan: Silence -W...
835
  	void __tsan_volatile_read##size(void *ptr);                            \
75d75b7a4   Marco Elver   kcsan: Support di...
836
837
838
839
840
841
842
843
844
845
846
847
  	void __tsan_volatile_read##size(void *ptr)                             \
  	{                                                                      \
  		const bool is_atomic = size <= sizeof(long long) &&            \
  				       IS_ALIGNED((unsigned long)ptr, size);   \
  		if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic)      \
  			return;                                                \
  		check_access(ptr, size, is_atomic ? KCSAN_ACCESS_ATOMIC : 0);  \
  	}                                                                      \
  	EXPORT_SYMBOL(__tsan_volatile_read##size);                             \
  	void __tsan_unaligned_volatile_read##size(void *ptr)                   \
  		__alias(__tsan_volatile_read##size);                           \
  	EXPORT_SYMBOL(__tsan_unaligned_volatile_read##size);                   \
9dd979bae   Marco Elver   kcsan: Silence -W...
848
  	void __tsan_volatile_write##size(void *ptr);                           \
75d75b7a4   Marco Elver   kcsan: Support di...
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
  	void __tsan_volatile_write##size(void *ptr)                            \
  	{                                                                      \
  		const bool is_atomic = size <= sizeof(long long) &&            \
  				       IS_ALIGNED((unsigned long)ptr, size);   \
  		if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic)      \
  			return;                                                \
  		check_access(ptr, size,                                        \
  			     KCSAN_ACCESS_WRITE |                              \
  				     (is_atomic ? KCSAN_ACCESS_ATOMIC : 0));   \
  	}                                                                      \
  	EXPORT_SYMBOL(__tsan_volatile_write##size);                            \
  	void __tsan_unaligned_volatile_write##size(void *ptr)                  \
  		__alias(__tsan_volatile_write##size);                          \
  	EXPORT_SYMBOL(__tsan_unaligned_volatile_write##size)
  
  DEFINE_TSAN_VOLATILE_READ_WRITE(1);
  DEFINE_TSAN_VOLATILE_READ_WRITE(2);
  DEFINE_TSAN_VOLATILE_READ_WRITE(4);
  DEFINE_TSAN_VOLATILE_READ_WRITE(8);
  DEFINE_TSAN_VOLATILE_READ_WRITE(16);
  
  /*
dfd402a4c   Marco Elver   kcsan: Add Kernel...
871
872
873
   * The below are not required by KCSAN, but can still be emitted by the
   * compiler.
   */
9dd979bae   Marco Elver   kcsan: Silence -W...
874
  void __tsan_func_entry(void *call_pc);
dfd402a4c   Marco Elver   kcsan: Add Kernel...
875
876
877
878
  void __tsan_func_entry(void *call_pc)
  {
  }
  EXPORT_SYMBOL(__tsan_func_entry);
9dd979bae   Marco Elver   kcsan: Silence -W...
879
  void __tsan_func_exit(void);
dfd402a4c   Marco Elver   kcsan: Add Kernel...
880
881
882
883
  void __tsan_func_exit(void)
  {
  }
  EXPORT_SYMBOL(__tsan_func_exit);
9dd979bae   Marco Elver   kcsan: Silence -W...
884
  void __tsan_init(void);
dfd402a4c   Marco Elver   kcsan: Add Kernel...
885
886
887
888
  void __tsan_init(void)
  {
  }
  EXPORT_SYMBOL(__tsan_init);
0f8ad5f2e   Marco Elver   kcsan: Add suppor...
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
  
  /*
   * Instrumentation for atomic builtins (__atomic_*, __sync_*).
   *
   * Normal kernel code _should not_ be using them directly, but some
   * architectures may implement some or all atomics using the compilers'
   * builtins.
   *
   * Note: If an architecture decides to fully implement atomics using the
   * builtins, because they are implicitly instrumented by KCSAN (and KASAN,
   * etc.), implementing the ARCH_ATOMIC interface (to get instrumentation via
   * atomic-instrumented) is no longer necessary.
   *
   * TSAN instrumentation replaces atomic accesses with calls to any of the below
   * functions, whose job is to also execute the operation itself.
   */
  
  #define DEFINE_TSAN_ATOMIC_LOAD_STORE(bits)                                                        \
  	u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder);                      \
  	u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder)                       \
  	{                                                                                          \
9d1335cc1   Marco Elver   kcsan: Add missin...
910
911
912
  		if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) {                                    \
  			check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_ATOMIC);              \
  		}                                                                                  \
0f8ad5f2e   Marco Elver   kcsan: Add suppor...
913
914
915
916
917
918
  		return __atomic_load_n(ptr, memorder);                                             \
  	}                                                                                          \
  	EXPORT_SYMBOL(__tsan_atomic##bits##_load);                                                 \
  	void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder);                   \
  	void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder)                    \
  	{                                                                                          \
9d1335cc1   Marco Elver   kcsan: Add missin...
919
920
921
922
  		if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) {                                    \
  			check_access(ptr, bits / BITS_PER_BYTE,                                    \
  				     KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC);                    \
  		}                                                                                  \
0f8ad5f2e   Marco Elver   kcsan: Add suppor...
923
924
925
926
927
928
929
930
  		__atomic_store_n(ptr, v, memorder);                                                \
  	}                                                                                          \
  	EXPORT_SYMBOL(__tsan_atomic##bits##_store)
  
  #define DEFINE_TSAN_ATOMIC_RMW(op, bits, suffix)                                                   \
  	u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder);                 \
  	u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder)                  \
  	{                                                                                          \
9d1335cc1   Marco Elver   kcsan: Add missin...
931
932
933
934
935
  		if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) {                                    \
  			check_access(ptr, bits / BITS_PER_BYTE,                                    \
  				     KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE |                  \
  					     KCSAN_ACCESS_ATOMIC);                                 \
  		}                                                                                  \
0f8ad5f2e   Marco Elver   kcsan: Add suppor...
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
  		return __atomic_##op##suffix(ptr, v, memorder);                                    \
  	}                                                                                          \
  	EXPORT_SYMBOL(__tsan_atomic##bits##_##op)
  
  /*
   * Note: CAS operations are always classified as write, even in case they
   * fail. We cannot perform check_access() after a write, as it might lead to
   * false positives, in cases such as:
   *
   *	T0: __atomic_compare_exchange_n(&p->flag, &old, 1, ...)
   *
   *	T1: if (__atomic_load_n(&p->flag, ...)) {
   *		modify *p;
   *		p->flag = 0;
   *	    }
   *
   * The only downside is that, if there are 3 threads, with one CAS that
   * succeeds, another CAS that fails, and an unmarked racing operation, we may
   * point at the wrong CAS as the source of the race. However, if we assume that
   * all CAS can succeed in some other execution, the data race is still valid.
   */
  #define DEFINE_TSAN_ATOMIC_CMPXCHG(bits, strength, weak)                                           \
  	int __tsan_atomic##bits##_compare_exchange_##strength(u##bits *ptr, u##bits *exp,          \
  							      u##bits val, int mo, int fail_mo);   \
  	int __tsan_atomic##bits##_compare_exchange_##strength(u##bits *ptr, u##bits *exp,          \
  							      u##bits val, int mo, int fail_mo)    \
  	{                                                                                          \
9d1335cc1   Marco Elver   kcsan: Add missin...
963
964
965
966
967
  		if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) {                                    \
  			check_access(ptr, bits / BITS_PER_BYTE,                                    \
  				     KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE |                  \
  					     KCSAN_ACCESS_ATOMIC);                                 \
  		}                                                                                  \
0f8ad5f2e   Marco Elver   kcsan: Add suppor...
968
969
970
971
972
973
974
975
976
977
  		return __atomic_compare_exchange_n(ptr, exp, val, weak, mo, fail_mo);              \
  	}                                                                                          \
  	EXPORT_SYMBOL(__tsan_atomic##bits##_compare_exchange_##strength)
  
  #define DEFINE_TSAN_ATOMIC_CMPXCHG_VAL(bits)                                                       \
  	u##bits __tsan_atomic##bits##_compare_exchange_val(u##bits *ptr, u##bits exp, u##bits val, \
  							   int mo, int fail_mo);                   \
  	u##bits __tsan_atomic##bits##_compare_exchange_val(u##bits *ptr, u##bits exp, u##bits val, \
  							   int mo, int fail_mo)                    \
  	{                                                                                          \
9d1335cc1   Marco Elver   kcsan: Add missin...
978
979
980
981
982
  		if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) {                                    \
  			check_access(ptr, bits / BITS_PER_BYTE,                                    \
  				     KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE |                  \
  					     KCSAN_ACCESS_ATOMIC);                                 \
  		}                                                                                  \
0f8ad5f2e   Marco Elver   kcsan: Add suppor...
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
  		__atomic_compare_exchange_n(ptr, &exp, val, 0, mo, fail_mo);                       \
  		return exp;                                                                        \
  	}                                                                                          \
  	EXPORT_SYMBOL(__tsan_atomic##bits##_compare_exchange_val)
  
  #define DEFINE_TSAN_ATOMIC_OPS(bits)                                                               \
  	DEFINE_TSAN_ATOMIC_LOAD_STORE(bits);                                                       \
  	DEFINE_TSAN_ATOMIC_RMW(exchange, bits, _n);                                                \
  	DEFINE_TSAN_ATOMIC_RMW(fetch_add, bits, );                                                 \
  	DEFINE_TSAN_ATOMIC_RMW(fetch_sub, bits, );                                                 \
  	DEFINE_TSAN_ATOMIC_RMW(fetch_and, bits, );                                                 \
  	DEFINE_TSAN_ATOMIC_RMW(fetch_or, bits, );                                                  \
  	DEFINE_TSAN_ATOMIC_RMW(fetch_xor, bits, );                                                 \
  	DEFINE_TSAN_ATOMIC_RMW(fetch_nand, bits, );                                                \
  	DEFINE_TSAN_ATOMIC_CMPXCHG(bits, strong, 0);                                               \
  	DEFINE_TSAN_ATOMIC_CMPXCHG(bits, weak, 1);                                                 \
  	DEFINE_TSAN_ATOMIC_CMPXCHG_VAL(bits)
  
  DEFINE_TSAN_ATOMIC_OPS(8);
  DEFINE_TSAN_ATOMIC_OPS(16);
  DEFINE_TSAN_ATOMIC_OPS(32);
  DEFINE_TSAN_ATOMIC_OPS(64);
  
  void __tsan_atomic_thread_fence(int memorder);
  void __tsan_atomic_thread_fence(int memorder)
  {
  	__atomic_thread_fence(memorder);
  }
  EXPORT_SYMBOL(__tsan_atomic_thread_fence);
  
  void __tsan_atomic_signal_fence(int memorder);
  void __tsan_atomic_signal_fence(int memorder) { }
  EXPORT_SYMBOL(__tsan_atomic_signal_fence);