Blame view

net/xdp/xdp_umem.c 5.37 KB
c0c77d8fb   Björn Töpel   xsk: add user mem...
1
2
3
  // SPDX-License-Identifier: GPL-2.0
  /* XDP user-space packet buffer
   * Copyright(c) 2018 Intel Corporation.
c0c77d8fb   Björn Töpel   xsk: add user mem...
4
5
6
7
8
9
10
11
12
13
   */
  
  #include <linux/init.h>
  #include <linux/sched/mm.h>
  #include <linux/sched/signal.h>
  #include <linux/sched/task.h>
  #include <linux/uaccess.h>
  #include <linux/slab.h>
  #include <linux/bpf.h>
  #include <linux/mm.h>
84c6b8687   Jakub Kicinski   xsk: don't allow ...
14
15
  #include <linux/netdevice.h>
  #include <linux/rtnetlink.h>
50e74c013   Björn Töpel   xsk: add id to umem
16
  #include <linux/idr.h>
624676e78   Ivan Khoronzhuk   xdp: xdp_umem: re...
17
  #include <linux/vmalloc.h>
c0c77d8fb   Björn Töpel   xsk: add user mem...
18
19
  
  #include "xdp_umem.h"
e61e62b9e   Björn Töpel   xsk: moved struct...
20
  #include "xsk_queue.h"
c0c77d8fb   Björn Töpel   xsk: add user mem...
21

bbff2f321   Björn Töpel   xsk: new descript...
22
  #define XDP_UMEM_MIN_CHUNK_SIZE 2048
c0c77d8fb   Björn Töpel   xsk: add user mem...
23

50e74c013   Björn Töpel   xsk: add id to umem
24
  static DEFINE_IDA(umem_ida);
1c1efc2af   Magnus Karlsson   xsk: Create and f...
25
  static void xdp_umem_unpin_pages(struct xdp_umem *umem)
c9b47cc1f   Magnus Karlsson   xsk: fix bug when...
26
  {
1c1efc2af   Magnus Karlsson   xsk: Create and f...
27
  	unpin_user_pages_dirty_lock(umem->pgs, umem->npgs, true);
84c6b8687   Jakub Kicinski   xsk: don't allow ...
28

1c1efc2af   Magnus Karlsson   xsk: Create and f...
29
30
  	kfree(umem->pgs);
  	umem->pgs = NULL;
c9b47cc1f   Magnus Karlsson   xsk: fix bug when...
31
  }
84c6b8687   Jakub Kicinski   xsk: don't allow ...
32

1c1efc2af   Magnus Karlsson   xsk: Create and f...
33
  static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
c9b47cc1f   Magnus Karlsson   xsk: fix bug when...
34
  {
1c1efc2af   Magnus Karlsson   xsk: Create and f...
35
36
37
38
  	if (umem->user) {
  		atomic_long_sub(umem->npgs, &umem->user->locked_vm);
  		free_uid(umem->user);
  	}
84c6b8687   Jakub Kicinski   xsk: don't allow ...
39
  }
7f7ffa4e9   Magnus Karlsson   xsk: Move addrs f...
40
41
42
43
44
45
46
47
48
49
50
51
52
53
  static void xdp_umem_addr_unmap(struct xdp_umem *umem)
  {
  	vunmap(umem->addrs);
  	umem->addrs = NULL;
  }
  
  static int xdp_umem_addr_map(struct xdp_umem *umem, struct page **pages,
  			     u32 nr_pages)
  {
  	umem->addrs = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
  	if (!umem->addrs)
  		return -ENOMEM;
  	return 0;
  }
c0c77d8fb   Björn Töpel   xsk: add user mem...
54
55
  static void xdp_umem_release(struct xdp_umem *umem)
  {
c2d3d6a47   Magnus Karlsson   xsk: Move queue_i...
56
  	umem->zc = false;
50e74c013   Björn Töpel   xsk: add id to umem
57
  	ida_simple_remove(&umem_ida, umem->id);
7f7ffa4e9   Magnus Karlsson   xsk: Move addrs f...
58
  	xdp_umem_addr_unmap(umem);
a49049ea2   Björn Töpel   xsk: simplified u...
59
  	xdp_umem_unpin_pages(umem);
c0c77d8fb   Björn Töpel   xsk: add user mem...
60

c0c77d8fb   Björn Töpel   xsk: add user mem...
61
  	xdp_umem_unaccount_pages(umem);
c0c77d8fb   Björn Töpel   xsk: add user mem...
62
63
  	kfree(umem);
  }
537cf4e3c   Magnus Karlsson   xsk: Fix umem cle...
64
65
66
67
68
69
  static void xdp_umem_release_deferred(struct work_struct *work)
  {
  	struct xdp_umem *umem = container_of(work, struct xdp_umem, work);
  
  	xdp_umem_release(umem);
  }
c0c77d8fb   Björn Töpel   xsk: add user mem...
70
71
  void xdp_get_umem(struct xdp_umem *umem)
  {
d3b42f142   Björn Töpel   xsk: convert atom...
72
  	refcount_inc(&umem->users);
c0c77d8fb   Björn Töpel   xsk: add user mem...
73
  }
537cf4e3c   Magnus Karlsson   xsk: Fix umem cle...
74
  void xdp_put_umem(struct xdp_umem *umem, bool defer_cleanup)
c0c77d8fb   Björn Töpel   xsk: add user mem...
75
76
77
  {
  	if (!umem)
  		return;
537cf4e3c   Magnus Karlsson   xsk: Fix umem cle...
78
79
80
81
82
83
84
85
  	if (refcount_dec_and_test(&umem->users)) {
  		if (defer_cleanup) {
  			INIT_WORK(&umem->work, xdp_umem_release_deferred);
  			schedule_work(&umem->work);
  		} else {
  			xdp_umem_release(umem);
  		}
  	}
c0c77d8fb   Björn Töpel   xsk: add user mem...
86
  }
07bf2d97d   Magnus Karlsson   xsk: Remove unnec...
87
  static int xdp_umem_pin_pages(struct xdp_umem *umem, unsigned long address)
c0c77d8fb   Björn Töpel   xsk: add user mem...
88
89
90
91
  {
  	unsigned int gup_flags = FOLL_WRITE;
  	long npgs;
  	int err;
a343993c5   Björn Töpel   xsk: silence warn...
92
93
  	umem->pgs = kcalloc(umem->npgs, sizeof(*umem->pgs),
  			    GFP_KERNEL | __GFP_NOWARN);
c0c77d8fb   Björn Töpel   xsk: add user mem...
94
95
  	if (!umem->pgs)
  		return -ENOMEM;
d8ed45c5d   Michel Lespinasse   mmap locking API:...
96
  	mmap_read_lock(current->mm);
07bf2d97d   Magnus Karlsson   xsk: Remove unnec...
97
  	npgs = pin_user_pages(address, umem->npgs,
932f4a630   Ira Weiny   mm/gup: replace g...
98
  			      gup_flags | FOLL_LONGTERM, &umem->pgs[0], NULL);
d8ed45c5d   Michel Lespinasse   mmap locking API:...
99
  	mmap_read_unlock(current->mm);
c0c77d8fb   Björn Töpel   xsk: add user mem...
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
  
  	if (npgs != umem->npgs) {
  		if (npgs >= 0) {
  			umem->npgs = npgs;
  			err = -ENOMEM;
  			goto out_pin;
  		}
  		err = npgs;
  		goto out_pgs;
  	}
  	return 0;
  
  out_pin:
  	xdp_umem_unpin_pages(umem);
  out_pgs:
  	kfree(umem->pgs);
  	umem->pgs = NULL;
  	return err;
  }
  
  static int xdp_umem_account_pages(struct xdp_umem *umem)
  {
  	unsigned long lock_limit, new_npgs, old_npgs;
  
  	if (capable(CAP_IPC_LOCK))
  		return 0;
  
  	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  	umem->user = get_uid(current_user());
  
  	do {
  		old_npgs = atomic_long_read(&umem->user->locked_vm);
  		new_npgs = old_npgs + umem->npgs;
  		if (new_npgs > lock_limit) {
  			free_uid(umem->user);
  			umem->user = NULL;
  			return -ENOBUFS;
  		}
  	} while (atomic_long_cmpxchg(&umem->user->locked_vm, old_npgs,
  				     new_npgs) != old_npgs);
  	return 0;
  }
a49049ea2   Björn Töpel   xsk: simplified u...
142
  static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
c0c77d8fb   Björn Töpel   xsk: add user mem...
143
  {
2b1667e54   Björn Töpel   xsk: Fix number o...
144
  	u32 npgs_rem, chunk_size = mr->chunk_size, headroom = mr->headroom;
c05cd3645   Kevin Laatz   xsk: add support ...
145
  	bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
b16a87d0a   Björn Töpel   xsk: Add overflow...
146
  	u64 npgs, addr = mr->addr, size = mr->len;
2b1667e54   Björn Töpel   xsk: Fix number o...
147
  	unsigned int chunks, chunks_rem;
99e3a236d   Magnus Karlsson   xsk: Add missing ...
148
  	int err;
c0c77d8fb   Björn Töpel   xsk: add user mem...
149

bbff2f321   Björn Töpel   xsk: new descript...
150
  	if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
c0c77d8fb   Björn Töpel   xsk: add user mem...
151
152
153
154
155
156
157
158
  		/* Strictly speaking we could support this, if:
  		 * - huge pages, or*
  		 * - using an IOMMU, or
  		 * - making sure the memory area is consecutive
  		 * but for now, we simply say "computer says no".
  		 */
  		return -EINVAL;
  	}
c2d3d6a47   Magnus Karlsson   xsk: Move queue_i...
159
  	if (mr->flags & ~XDP_UMEM_UNALIGNED_CHUNK_FLAG)
c05cd3645   Kevin Laatz   xsk: add support ...
160
161
162
  		return -EINVAL;
  
  	if (!unaligned_chunks && !is_power_of_2(chunk_size))
c0c77d8fb   Björn Töpel   xsk: add user mem...
163
164
165
166
167
168
169
170
171
172
173
  		return -EINVAL;
  
  	if (!PAGE_ALIGNED(addr)) {
  		/* Memory area has to be page size aligned. For
  		 * simplicity, this might change.
  		 */
  		return -EINVAL;
  	}
  
  	if ((addr + size) < addr)
  		return -EINVAL;
2b1667e54   Björn Töpel   xsk: Fix number o...
174
175
176
  	npgs = div_u64_rem(size, PAGE_SIZE, &npgs_rem);
  	if (npgs_rem)
  		npgs++;
b16a87d0a   Björn Töpel   xsk: Add overflow...
177
178
  	if (npgs > U32_MAX)
  		return -EINVAL;
2b1667e54   Björn Töpel   xsk: Fix number o...
179
  	chunks = (unsigned int)div_u64_rem(size, chunk_size, &chunks_rem);
bbff2f321   Björn Töpel   xsk: new descript...
180
  	if (chunks == 0)
c0c77d8fb   Björn Töpel   xsk: add user mem...
181
  		return -EINVAL;
2b1667e54   Björn Töpel   xsk: Fix number o...
182
183
  	if (!unaligned_chunks && chunks_rem)
  		return -EINVAL;
c0c77d8fb   Björn Töpel   xsk: add user mem...
184

99e3a236d   Magnus Karlsson   xsk: Add missing ...
185
  	if (headroom >= chunk_size - XDP_PACKET_HEADROOM)
c0c77d8fb   Björn Töpel   xsk: add user mem...
186
  		return -EINVAL;
93ee30f3e   Magnus Karlsson   xsk: i40e: get ri...
187
  	umem->size = size;
bbff2f321   Björn Töpel   xsk: new descript...
188
  	umem->headroom = headroom;
2b43470ad   Björn Töpel   xsk: Introduce AF...
189
  	umem->chunk_size = chunk_size;
1c1efc2af   Magnus Karlsson   xsk: Create and f...
190
  	umem->chunks = chunks;
b16a87d0a   Björn Töpel   xsk: Add overflow...
191
  	umem->npgs = (u32)npgs;
c0c77d8fb   Björn Töpel   xsk: add user mem...
192
193
  	umem->pgs = NULL;
  	umem->user = NULL;
c05cd3645   Kevin Laatz   xsk: add support ...
194
  	umem->flags = mr->flags;
c0c77d8fb   Björn Töpel   xsk: add user mem...
195

921b68692   Magnus Karlsson   xsk: Enable shari...
196
  	INIT_LIST_HEAD(&umem->xsk_dma_list);
d3b42f142   Björn Töpel   xsk: convert atom...
197
  	refcount_set(&umem->users, 1);
c0c77d8fb   Björn Töpel   xsk: add user mem...
198
199
200
  
  	err = xdp_umem_account_pages(umem);
  	if (err)
044175a06   Björn Töpel   xsk: fix umem mem...
201
  		return err;
c0c77d8fb   Björn Töpel   xsk: add user mem...
202

07bf2d97d   Magnus Karlsson   xsk: Remove unnec...
203
  	err = xdp_umem_pin_pages(umem, (unsigned long)addr);
c0c77d8fb   Björn Töpel   xsk: add user mem...
204
205
  	if (err)
  		goto out_account;
8aef7340a   Björn Töpel   xsk: introduce xd...
206

7f7ffa4e9   Magnus Karlsson   xsk: Move addrs f...
207
208
209
  	err = xdp_umem_addr_map(umem, umem->pgs, umem->npgs);
  	if (err)
  		goto out_unpin;
2b43470ad   Björn Töpel   xsk: Introduce AF...
210
  	return 0;
c0c77d8fb   Björn Töpel   xsk: add user mem...
211

7f7ffa4e9   Magnus Karlsson   xsk: Move addrs f...
212
213
  out_unpin:
  	xdp_umem_unpin_pages(umem);
c0c77d8fb   Björn Töpel   xsk: add user mem...
214
215
  out_account:
  	xdp_umem_unaccount_pages(umem);
c0c77d8fb   Björn Töpel   xsk: add user mem...
216
217
  	return err;
  }
965a99098   Magnus Karlsson   xsk: add support ...
218

a49049ea2   Björn Töpel   xsk: simplified u...
219
220
221
222
223
224
225
226
  struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr)
  {
  	struct xdp_umem *umem;
  	int err;
  
  	umem = kzalloc(sizeof(*umem), GFP_KERNEL);
  	if (!umem)
  		return ERR_PTR(-ENOMEM);
50e74c013   Björn Töpel   xsk: add id to umem
227
228
229
230
231
232
  	err = ida_simple_get(&umem_ida, 0, 0, GFP_KERNEL);
  	if (err < 0) {
  		kfree(umem);
  		return ERR_PTR(err);
  	}
  	umem->id = err;
a49049ea2   Björn Töpel   xsk: simplified u...
233
234
  	err = xdp_umem_reg(umem, mr);
  	if (err) {
50e74c013   Björn Töpel   xsk: add id to umem
235
  		ida_simple_remove(&umem_ida, umem->id);
a49049ea2   Björn Töpel   xsk: simplified u...
236
237
238
239
240
241
  		kfree(umem);
  		return ERR_PTR(err);
  	}
  
  	return umem;
  }