Blame view

net/bpf/test_run.c 10.5 KB
25763b3c8   Thomas Gleixner   treewide: Replace...
1
  // SPDX-License-Identifier: GPL-2.0-only
1cf1cae96   Alexei Starovoitov   bpf: introduce BP...
2
  /* Copyright (c) 2017 Facebook
1cf1cae96   Alexei Starovoitov   bpf: introduce BP...
3
4
5
6
7
8
9
   */
  #include <linux/bpf.h>
  #include <linux/slab.h>
  #include <linux/vmalloc.h>
  #include <linux/etherdevice.h>
  #include <linux/filter.h>
  #include <linux/sched/signal.h>
6ac99e8f2   Martin KaFai Lau   bpf: Introduce bp...
10
  #include <net/bpf_sk_storage.h>
2cb494a36   Song Liu   bpf: add tests fo...
11
12
  #include <net/sock.h>
  #include <net/tcp.h>
1cf1cae96   Alexei Starovoitov   bpf: introduce BP...
13

e950e8433   Matt Mullins   selftests: bpf: t...
14
15
  #define CREATE_TRACE_POINTS
  #include <trace/events/bpf_test_run.h>
df1a2cb7c   Stanislav Fomichev   bpf/test_run: fix...
16
17
  static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
  			u32 *retval, u32 *time)
1cf1cae96   Alexei Starovoitov   bpf: introduce BP...
18
  {
71b91a506   Bo YU   bpf: fix warning ...
19
  	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL };
8bad74f98   Roman Gushchin   bpf: extend cgrou...
20
  	enum bpf_cgroup_storage_type stype;
1cf1cae96   Alexei Starovoitov   bpf: introduce BP...
21
  	u64 time_start, time_spent = 0;
df1a2cb7c   Stanislav Fomichev   bpf/test_run: fix...
22
  	int ret = 0;
dcb40590e   Roman Gushchin   bpf: refactor bpf...
23
  	u32 i;
1cf1cae96   Alexei Starovoitov   bpf: introduce BP...
24

8bad74f98   Roman Gushchin   bpf: extend cgrou...
25
26
27
28
29
30
31
32
33
  	for_each_cgroup_storage_type(stype) {
  		storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
  		if (IS_ERR(storage[stype])) {
  			storage[stype] = NULL;
  			for_each_cgroup_storage_type(stype)
  				bpf_cgroup_storage_free(storage[stype]);
  			return -ENOMEM;
  		}
  	}
f42ee093b   Roman Gushchin   bpf/test_run: sup...
34

1cf1cae96   Alexei Starovoitov   bpf: introduce BP...
35
36
  	if (!repeat)
  		repeat = 1;
df1a2cb7c   Stanislav Fomichev   bpf/test_run: fix...
37
38
39
  
  	rcu_read_lock();
  	preempt_disable();
1cf1cae96   Alexei Starovoitov   bpf: introduce BP...
40
41
  	time_start = ktime_get_ns();
  	for (i = 0; i < repeat; i++) {
df1a2cb7c   Stanislav Fomichev   bpf/test_run: fix...
42
43
44
45
46
47
48
  		bpf_cgroup_storage_set(storage);
  		*retval = BPF_PROG_RUN(prog, ctx);
  
  		if (signal_pending(current)) {
  			ret = -EINTR;
  			break;
  		}
1cf1cae96   Alexei Starovoitov   bpf: introduce BP...
49
  		if (need_resched()) {
1cf1cae96   Alexei Starovoitov   bpf: introduce BP...
50
  			time_spent += ktime_get_ns() - time_start;
df1a2cb7c   Stanislav Fomichev   bpf/test_run: fix...
51
52
  			preempt_enable();
  			rcu_read_unlock();
1cf1cae96   Alexei Starovoitov   bpf: introduce BP...
53
  			cond_resched();
df1a2cb7c   Stanislav Fomichev   bpf/test_run: fix...
54
55
56
  
  			rcu_read_lock();
  			preempt_disable();
1cf1cae96   Alexei Starovoitov   bpf: introduce BP...
57
58
59
60
  			time_start = ktime_get_ns();
  		}
  	}
  	time_spent += ktime_get_ns() - time_start;
df1a2cb7c   Stanislav Fomichev   bpf/test_run: fix...
61
62
  	preempt_enable();
  	rcu_read_unlock();
1cf1cae96   Alexei Starovoitov   bpf: introduce BP...
63
64
  	do_div(time_spent, repeat);
  	*time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
8bad74f98   Roman Gushchin   bpf: extend cgrou...
65
66
  	for_each_cgroup_storage_type(stype)
  		bpf_cgroup_storage_free(storage[stype]);
f42ee093b   Roman Gushchin   bpf/test_run: sup...
67

df1a2cb7c   Stanislav Fomichev   bpf/test_run: fix...
68
  	return ret;
1cf1cae96   Alexei Starovoitov   bpf: introduce BP...
69
  }
78e522723   David Miller   bpf: Do not deref...
70
71
  static int bpf_test_finish(const union bpf_attr *kattr,
  			   union bpf_attr __user *uattr, const void *data,
1cf1cae96   Alexei Starovoitov   bpf: introduce BP...
72
73
  			   u32 size, u32 retval, u32 duration)
  {
78e522723   David Miller   bpf: Do not deref...
74
  	void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
1cf1cae96   Alexei Starovoitov   bpf: introduce BP...
75
  	int err = -EFAULT;
b5a36b1e1   Lorenz Bauer   bpf: respect size...
76
  	u32 copy_size = size;
1cf1cae96   Alexei Starovoitov   bpf: introduce BP...
77

b5a36b1e1   Lorenz Bauer   bpf: respect size...
78
79
80
81
82
83
84
85
86
87
  	/* Clamp copy if the user has provided a size hint, but copy the full
  	 * buffer if not to retain old behaviour.
  	 */
  	if (kattr->test.data_size_out &&
  	    copy_size > kattr->test.data_size_out) {
  		copy_size = kattr->test.data_size_out;
  		err = -ENOSPC;
  	}
  
  	if (data_out && copy_to_user(data_out, data, copy_size))
1cf1cae96   Alexei Starovoitov   bpf: introduce BP...
88
89
90
91
92
93
94
  		goto out;
  	if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
  		goto out;
  	if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
  		goto out;
  	if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
  		goto out;
b5a36b1e1   Lorenz Bauer   bpf: respect size...
95
96
  	if (err != -ENOSPC)
  		err = 0;
1cf1cae96   Alexei Starovoitov   bpf: introduce BP...
97
  out:
e950e8433   Matt Mullins   selftests: bpf: t...
98
  	trace_bpf_test_finish(&err);
1cf1cae96   Alexei Starovoitov   bpf: introduce BP...
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
  	return err;
  }
  
  static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
  			   u32 headroom, u32 tailroom)
  {
  	void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
  	void *data;
  
  	if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
  		return ERR_PTR(-EINVAL);
  
  	data = kzalloc(size + headroom + tailroom, GFP_USER);
  	if (!data)
  		return ERR_PTR(-ENOMEM);
  
  	if (copy_from_user(data + headroom, data_in, size)) {
  		kfree(data);
  		return ERR_PTR(-EFAULT);
  	}
  	return data;
  }
b0b9395d8   Stanislav Fomichev   bpf: support inpu...
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
  static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
  {
  	void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
  	void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
  	u32 size = kattr->test.ctx_size_in;
  	void *data;
  	int err;
  
  	if (!data_in && !data_out)
  		return NULL;
  
  	data = kzalloc(max_size, GFP_USER);
  	if (!data)
  		return ERR_PTR(-ENOMEM);
  
  	if (data_in) {
  		err = bpf_check_uarg_tail_zero(data_in, max_size, size);
  		if (err) {
  			kfree(data);
  			return ERR_PTR(err);
  		}
  
  		size = min_t(u32, max_size, size);
  		if (copy_from_user(data, data_in, size)) {
  			kfree(data);
  			return ERR_PTR(-EFAULT);
  		}
  	}
  	return data;
  }
  
  static int bpf_ctx_finish(const union bpf_attr *kattr,
  			  union bpf_attr __user *uattr, const void *data,
  			  u32 size)
  {
  	void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
  	int err = -EFAULT;
  	u32 copy_size = size;
  
  	if (!data || !data_out)
  		return 0;
  
  	if (copy_size > kattr->test.ctx_size_out) {
  		copy_size = kattr->test.ctx_size_out;
  		err = -ENOSPC;
  	}
  
  	if (copy_to_user(data_out, data, copy_size))
  		goto out;
  	if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
  		goto out;
  	if (err != -ENOSPC)
  		err = 0;
  out:
  	return err;
  }
  
  /**
   * range_is_zero - test whether buffer is initialized
   * @buf: buffer to check
   * @from: check from this position
   * @to: check up until (excluding) this position
   *
   * This function returns true if the there is a non-zero byte
   * in the buf in the range [from,to).
   */
  static inline bool range_is_zero(void *buf, size_t from, size_t to)
  {
  	return !memchr_inv((u8 *)buf + from, 0, to - from);
  }
  
  static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
  {
  	struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
  
  	if (!__skb)
  		return 0;
  
  	/* make sure the fields we don't use are zeroed */
  	if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, priority)))
  		return -EINVAL;
  
  	/* priority is allowed */
  
  	if (!range_is_zero(__skb, offsetof(struct __sk_buff, priority) +
  			   FIELD_SIZEOF(struct __sk_buff, priority),
  			   offsetof(struct __sk_buff, cb)))
  		return -EINVAL;
  
  	/* cb is allowed */
  
  	if (!range_is_zero(__skb, offsetof(struct __sk_buff, cb) +
  			   FIELD_SIZEOF(struct __sk_buff, cb),
  			   sizeof(struct __sk_buff)))
  		return -EINVAL;
  
  	skb->priority = __skb->priority;
  	memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
  
  	return 0;
  }
  
  static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
  {
  	struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
  
  	if (!__skb)
  		return;
  
  	__skb->priority = skb->priority;
  	memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
  }
1cf1cae96   Alexei Starovoitov   bpf: introduce BP...
233
234
235
236
237
238
  int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
  			  union bpf_attr __user *uattr)
  {
  	bool is_l2 = false, is_direct_pkt_access = false;
  	u32 size = kattr->test.data_size_in;
  	u32 repeat = kattr->test.repeat;
b0b9395d8   Stanislav Fomichev   bpf: support inpu...
239
  	struct __sk_buff *ctx = NULL;
1cf1cae96   Alexei Starovoitov   bpf: introduce BP...
240
  	u32 retval, duration;
6e6fddc78   Daniel Borkmann   bpf: fix panic du...
241
  	int hh_len = ETH_HLEN;
1cf1cae96   Alexei Starovoitov   bpf: introduce BP...
242
  	struct sk_buff *skb;
2cb494a36   Song Liu   bpf: add tests fo...
243
  	struct sock *sk;
1cf1cae96   Alexei Starovoitov   bpf: introduce BP...
244
245
  	void *data;
  	int ret;
586f85259   David Miller   bpf: Align packet...
246
  	data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
1cf1cae96   Alexei Starovoitov   bpf: introduce BP...
247
248
249
  			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
  	if (IS_ERR(data))
  		return PTR_ERR(data);
b0b9395d8   Stanislav Fomichev   bpf: support inpu...
250
251
252
253
254
  	ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
  	if (IS_ERR(ctx)) {
  		kfree(data);
  		return PTR_ERR(ctx);
  	}
1cf1cae96   Alexei Starovoitov   bpf: introduce BP...
255
256
257
258
259
260
261
262
263
264
265
266
267
  	switch (prog->type) {
  	case BPF_PROG_TYPE_SCHED_CLS:
  	case BPF_PROG_TYPE_SCHED_ACT:
  		is_l2 = true;
  		/* fall through */
  	case BPF_PROG_TYPE_LWT_IN:
  	case BPF_PROG_TYPE_LWT_OUT:
  	case BPF_PROG_TYPE_LWT_XMIT:
  		is_direct_pkt_access = true;
  		break;
  	default:
  		break;
  	}
2cb494a36   Song Liu   bpf: add tests fo...
268
269
270
  	sk = kzalloc(sizeof(struct sock), GFP_USER);
  	if (!sk) {
  		kfree(data);
b0b9395d8   Stanislav Fomichev   bpf: support inpu...
271
  		kfree(ctx);
2cb494a36   Song Liu   bpf: add tests fo...
272
273
274
275
  		return -ENOMEM;
  	}
  	sock_net_set(sk, current->nsproxy->net_ns);
  	sock_init_data(NULL, sk);
1cf1cae96   Alexei Starovoitov   bpf: introduce BP...
276
277
278
  	skb = build_skb(data, 0);
  	if (!skb) {
  		kfree(data);
b0b9395d8   Stanislav Fomichev   bpf: support inpu...
279
  		kfree(ctx);
2cb494a36   Song Liu   bpf: add tests fo...
280
  		kfree(sk);
1cf1cae96   Alexei Starovoitov   bpf: introduce BP...
281
282
  		return -ENOMEM;
  	}
2cb494a36   Song Liu   bpf: add tests fo...
283
  	skb->sk = sk;
1cf1cae96   Alexei Starovoitov   bpf: introduce BP...
284

586f85259   David Miller   bpf: Align packet...
285
  	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1cf1cae96   Alexei Starovoitov   bpf: introduce BP...
286
287
288
289
290
  	__skb_put(skb, size);
  	skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev);
  	skb_reset_network_header(skb);
  
  	if (is_l2)
6e6fddc78   Daniel Borkmann   bpf: fix panic du...
291
  		__skb_push(skb, hh_len);
1cf1cae96   Alexei Starovoitov   bpf: introduce BP...
292
  	if (is_direct_pkt_access)
6aaae2b6c   Daniel Borkmann   bpf: rename bpf_c...
293
  		bpf_compute_data_pointers(skb);
b0b9395d8   Stanislav Fomichev   bpf: support inpu...
294
295
296
  	ret = convert___skb_to_skb(skb, ctx);
  	if (ret)
  		goto out;
dcb40590e   Roman Gushchin   bpf: refactor bpf...
297
  	ret = bpf_test_run(prog, skb, repeat, &retval, &duration);
b0b9395d8   Stanislav Fomichev   bpf: support inpu...
298
299
  	if (ret)
  		goto out;
6e6fddc78   Daniel Borkmann   bpf: fix panic du...
300
301
302
303
304
  	if (!is_l2) {
  		if (skb_headroom(skb) < hh_len) {
  			int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
  
  			if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
b0b9395d8   Stanislav Fomichev   bpf: support inpu...
305
306
  				ret = -ENOMEM;
  				goto out;
6e6fddc78   Daniel Borkmann   bpf: fix panic du...
307
308
309
310
  			}
  		}
  		memset(__skb_push(skb, hh_len), 0, hh_len);
  	}
b0b9395d8   Stanislav Fomichev   bpf: support inpu...
311
  	convert_skb_to___skb(skb, ctx);
6e6fddc78   Daniel Borkmann   bpf: fix panic du...
312

1cf1cae96   Alexei Starovoitov   bpf: introduce BP...
313
314
315
316
  	size = skb->len;
  	/* bpf program can never convert linear skb to non-linear */
  	if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
  		size = skb_headlen(skb);
78e522723   David Miller   bpf: Do not deref...
317
  	ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
b0b9395d8   Stanislav Fomichev   bpf: support inpu...
318
319
320
321
  	if (!ret)
  		ret = bpf_ctx_finish(kattr, uattr, ctx,
  				     sizeof(struct __sk_buff));
  out:
1cf1cae96   Alexei Starovoitov   bpf: introduce BP...
322
  	kfree_skb(skb);
6ac99e8f2   Martin KaFai Lau   bpf: Introduce bp...
323
  	bpf_sk_storage_free(sk);
2cb494a36   Song Liu   bpf: add tests fo...
324
  	kfree(sk);
b0b9395d8   Stanislav Fomichev   bpf: support inpu...
325
  	kfree(ctx);
1cf1cae96   Alexei Starovoitov   bpf: introduce BP...
326
327
328
329
330
331
332
333
  	return ret;
  }
  
  int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
  			  union bpf_attr __user *uattr)
  {
  	u32 size = kattr->test.data_size_in;
  	u32 repeat = kattr->test.repeat;
65073a673   Daniel Borkmann   bpf: fix null poi...
334
  	struct netdev_rx_queue *rxqueue;
1cf1cae96   Alexei Starovoitov   bpf: introduce BP...
335
336
337
338
  	struct xdp_buff xdp = {};
  	u32 retval, duration;
  	void *data;
  	int ret;
947e8b595   Stanislav Fomichev   bpf: explicitly p...
339
340
  	if (kattr->test.ctx_in || kattr->test.ctx_out)
  		return -EINVAL;
586f85259   David Miller   bpf: Align packet...
341
  	data = bpf_test_init(kattr, size, XDP_PACKET_HEADROOM + NET_IP_ALIGN, 0);
1cf1cae96   Alexei Starovoitov   bpf: introduce BP...
342
343
344
345
  	if (IS_ERR(data))
  		return PTR_ERR(data);
  
  	xdp.data_hard_start = data;
586f85259   David Miller   bpf: Align packet...
346
  	xdp.data = data + XDP_PACKET_HEADROOM + NET_IP_ALIGN;
de8f3a83b   Daniel Borkmann   bpf: add meta poi...
347
  	xdp.data_meta = xdp.data;
1cf1cae96   Alexei Starovoitov   bpf: introduce BP...
348
  	xdp.data_end = xdp.data + size;
65073a673   Daniel Borkmann   bpf: fix null poi...
349
350
  	rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
  	xdp.rxq = &rxqueue->xdp_rxq;
dcb40590e   Roman Gushchin   bpf: refactor bpf...
351
352
353
  	ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration);
  	if (ret)
  		goto out;
587b80cce   Nikita V. Shirokov   bpf: making bpf_p...
354
355
  	if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN ||
  	    xdp.data_end != xdp.data + size)
1cf1cae96   Alexei Starovoitov   bpf: introduce BP...
356
  		size = xdp.data_end - xdp.data;
78e522723   David Miller   bpf: Do not deref...
357
  	ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
dcb40590e   Roman Gushchin   bpf: refactor bpf...
358
  out:
1cf1cae96   Alexei Starovoitov   bpf: introduce BP...
359
360
361
  	kfree(data);
  	return ret;
  }
b7a1848e8   Stanislav Fomichev   bpf: add BPF_PROG...
362
363
364
365
366
367
  
  int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
  				     const union bpf_attr *kattr,
  				     union bpf_attr __user *uattr)
  {
  	u32 size = kattr->test.data_size_in;
7b8a13043   Stanislav Fomichev   bpf: when doing B...
368
  	struct bpf_flow_dissector ctx = {};
b7a1848e8   Stanislav Fomichev   bpf: add BPF_PROG...
369
370
371
  	u32 repeat = kattr->test.repeat;
  	struct bpf_flow_keys flow_keys;
  	u64 time_start, time_spent = 0;
7b8a13043   Stanislav Fomichev   bpf: when doing B...
372
  	const struct ethhdr *eth;
b7a1848e8   Stanislav Fomichev   bpf: add BPF_PROG...
373
  	u32 retval, duration;
b7a1848e8   Stanislav Fomichev   bpf: add BPF_PROG...
374
375
376
377
378
379
  	void *data;
  	int ret;
  	u32 i;
  
  	if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
  		return -EINVAL;
947e8b595   Stanislav Fomichev   bpf: explicitly p...
380
381
  	if (kattr->test.ctx_in || kattr->test.ctx_out)
  		return -EINVAL;
7b8a13043   Stanislav Fomichev   bpf: when doing B...
382
383
384
385
  	if (size < ETH_HLEN)
  		return -EINVAL;
  
  	data = bpf_test_init(kattr, size, 0, 0);
b7a1848e8   Stanislav Fomichev   bpf: add BPF_PROG...
386
387
  	if (IS_ERR(data))
  		return PTR_ERR(data);
7b8a13043   Stanislav Fomichev   bpf: when doing B...
388
  	eth = (struct ethhdr *)data;
b7a1848e8   Stanislav Fomichev   bpf: add BPF_PROG...
389

b7a1848e8   Stanislav Fomichev   bpf: add BPF_PROG...
390
391
  	if (!repeat)
  		repeat = 1;
7b8a13043   Stanislav Fomichev   bpf: when doing B...
392
393
394
  	ctx.flow_keys = &flow_keys;
  	ctx.data = data;
  	ctx.data_end = (__u8 *)data + size;
a439184d5   Stanislav Fomichev   bpf/test_run: fix...
395
396
  	rcu_read_lock();
  	preempt_disable();
b7a1848e8   Stanislav Fomichev   bpf: add BPF_PROG...
397
398
  	time_start = ktime_get_ns();
  	for (i = 0; i < repeat; i++) {
7b8a13043   Stanislav Fomichev   bpf: when doing B...
399
  		retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
086f95682   Stanislav Fomichev   bpf/flow_dissecto...
400
  					  size, 0);
7b8a13043   Stanislav Fomichev   bpf: when doing B...
401

a439184d5   Stanislav Fomichev   bpf/test_run: fix...
402
403
404
405
406
407
408
  		if (signal_pending(current)) {
  			preempt_enable();
  			rcu_read_unlock();
  
  			ret = -EINTR;
  			goto out;
  		}
b7a1848e8   Stanislav Fomichev   bpf: add BPF_PROG...
409
410
  
  		if (need_resched()) {
b7a1848e8   Stanislav Fomichev   bpf: add BPF_PROG...
411
  			time_spent += ktime_get_ns() - time_start;
a439184d5   Stanislav Fomichev   bpf/test_run: fix...
412
413
  			preempt_enable();
  			rcu_read_unlock();
b7a1848e8   Stanislav Fomichev   bpf: add BPF_PROG...
414
  			cond_resched();
a439184d5   Stanislav Fomichev   bpf/test_run: fix...
415
416
417
  
  			rcu_read_lock();
  			preempt_disable();
b7a1848e8   Stanislav Fomichev   bpf: add BPF_PROG...
418
419
420
421
  			time_start = ktime_get_ns();
  		}
  	}
  	time_spent += ktime_get_ns() - time_start;
a439184d5   Stanislav Fomichev   bpf/test_run: fix...
422
423
  	preempt_enable();
  	rcu_read_unlock();
b7a1848e8   Stanislav Fomichev   bpf: add BPF_PROG...
424
425
426
427
428
  	do_div(time_spent, repeat);
  	duration = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
  
  	ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
  			      retval, duration);
a439184d5   Stanislav Fomichev   bpf/test_run: fix...
429
  out:
7b8a13043   Stanislav Fomichev   bpf: when doing B...
430
  	kfree(data);
b7a1848e8   Stanislav Fomichev   bpf: add BPF_PROG...
431
432
  	return ret;
  }