Blame view

lib/iov_iter.c 44.2 KB
457c89965   Thomas Gleixner   treewide: Add SPD...
1
  // SPDX-License-Identifier: GPL-2.0-only
7999096fa   Herbert Xu   iov_iter: Move un...
2
  #include <crypto/hash.h>
4f18cd317   Al Viro   take iov_iter stu...
3
  #include <linux/export.h>
2f8b54447   Christoph Hellwig   block,fs: untangl...
4
  #include <linux/bvec.h>
4d0e9df5e   Albert van der Linde   lib, uaccess: add...
5
  #include <linux/fault-inject-usercopy.h>
4f18cd317   Al Viro   take iov_iter stu...
6
7
  #include <linux/uio.h>
  #include <linux/pagemap.h>
91f79c43d   Al Viro   new helper: iov_i...
8
9
  #include <linux/slab.h>
  #include <linux/vmalloc.h>
241699cd7   Al Viro   new iov_iter flav...
10
  #include <linux/splice.h>
d219a4240   Greg Kroah-Hartman   Revert "Revert "i...
11
  #include <linux/compat.h>
a604ec7e9   Al Viro   csum_and_copy_......
12
  #include <net/checksum.h>
d05f44355   Sagi Grimberg   iov_iter: introdu...
13
  #include <linux/scatterlist.h>
d0ef4c360   Marco Elver   iov_iter: Use gen...
14
  #include <linux/instrumented.h>
4f18cd317   Al Viro   take iov_iter stu...
15

241699cd7   Al Viro   new iov_iter flav...
16
  #define PIPE_PARANOIA /* for now */
04a311655   Al Viro   iov_iter.c: macro...
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
  #define iterate_iovec(i, n, __v, __p, skip, STEP) {	\
  	size_t left;					\
  	size_t wanted = n;				\
  	__p = i->iov;					\
  	__v.iov_len = min(n, __p->iov_len - skip);	\
  	if (likely(__v.iov_len)) {			\
  		__v.iov_base = __p->iov_base + skip;	\
  		left = (STEP);				\
  		__v.iov_len -= left;			\
  		skip += __v.iov_len;			\
  		n -= __v.iov_len;			\
  	} else {					\
  		left = 0;				\
  	}						\
  	while (unlikely(!left && n)) {			\
  		__p++;					\
  		__v.iov_len = min(n, __p->iov_len);	\
  		if (unlikely(!__v.iov_len))		\
  			continue;			\
  		__v.iov_base = __p->iov_base;		\
  		left = (STEP);				\
  		__v.iov_len -= left;			\
  		skip = __v.iov_len;			\
  		n -= __v.iov_len;			\
  	}						\
  	n = wanted - n;					\
  }
a280455fa   Al Viro   iov_iter.c: handl...
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
  #define iterate_kvec(i, n, __v, __p, skip, STEP) {	\
  	size_t wanted = n;				\
  	__p = i->kvec;					\
  	__v.iov_len = min(n, __p->iov_len - skip);	\
  	if (likely(__v.iov_len)) {			\
  		__v.iov_base = __p->iov_base + skip;	\
  		(void)(STEP);				\
  		skip += __v.iov_len;			\
  		n -= __v.iov_len;			\
  	}						\
  	while (unlikely(n)) {				\
  		__p++;					\
  		__v.iov_len = min(n, __p->iov_len);	\
  		if (unlikely(!__v.iov_len))		\
  			continue;			\
  		__v.iov_base = __p->iov_base;		\
  		(void)(STEP);				\
  		skip = __v.iov_len;			\
  		n -= __v.iov_len;			\
  	}						\
  	n = wanted;					\
  }
1bdc76aea   Ming Lei   iov_iter: use bve...
66
67
68
69
70
71
72
  #define iterate_bvec(i, n, __v, __bi, skip, STEP) {	\
  	struct bvec_iter __start;			\
  	__start.bi_size = n;				\
  	__start.bi_bvec_done = skip;			\
  	__start.bi_idx = 0;				\
  	for_each_bvec(__v, i->bvec, __bi, __start) {	\
  		if (!__v.bv_len)			\
04a311655   Al Viro   iov_iter.c: macro...
73
  			continue;			\
04a311655   Al Viro   iov_iter.c: macro...
74
  		(void)(STEP);				\
04a311655   Al Viro   iov_iter.c: macro...
75
  	}						\
04a311655   Al Viro   iov_iter.c: macro...
76
  }
a280455fa   Al Viro   iov_iter.c: handl...
77
  #define iterate_all_kinds(i, n, v, I, B, K) {			\
33844e665   Al Viro   [iov_iter] fix it...
78
79
80
81
82
83
84
85
86
87
  	if (likely(n)) {					\
  		size_t skip = i->iov_offset;			\
  		if (unlikely(i->type & ITER_BVEC)) {		\
  			struct bio_vec v;			\
  			struct bvec_iter __bi;			\
  			iterate_bvec(i, n, v, __bi, skip, (B))	\
  		} else if (unlikely(i->type & ITER_KVEC)) {	\
  			const struct kvec *kvec;		\
  			struct kvec v;				\
  			iterate_kvec(i, n, v, kvec, skip, (K))	\
9ea9ce042   David Howells   iov_iter: Add I/O...
88
  		} else if (unlikely(i->type & ITER_DISCARD)) {	\
33844e665   Al Viro   [iov_iter] fix it...
89
90
91
92
93
  		} else {					\
  			const struct iovec *iov;		\
  			struct iovec v;				\
  			iterate_iovec(i, n, v, iov, skip, (I))	\
  		}						\
04a311655   Al Viro   iov_iter.c: macro...
94
95
  	}							\
  }
a280455fa   Al Viro   iov_iter.c: handl...
96
  #define iterate_and_advance(i, n, v, I, B, K) {			\
dd254f5a3   Al Viro   fold checks into ...
97
98
  	if (unlikely(i->count < n))				\
  		n = i->count;					\
19f184593   Al Viro   do "fold checks i...
99
  	if (i->count) {						\
dd254f5a3   Al Viro   fold checks into ...
100
101
  		size_t skip = i->iov_offset;			\
  		if (unlikely(i->type & ITER_BVEC)) {		\
1bdc76aea   Ming Lei   iov_iter: use bve...
102
  			const struct bio_vec *bvec = i->bvec;	\
dd254f5a3   Al Viro   fold checks into ...
103
  			struct bio_vec v;			\
1bdc76aea   Ming Lei   iov_iter: use bve...
104
105
106
107
108
  			struct bvec_iter __bi;			\
  			iterate_bvec(i, n, v, __bi, skip, (B))	\
  			i->bvec = __bvec_iter_bvec(i->bvec, __bi);	\
  			i->nr_segs -= i->bvec - bvec;		\
  			skip = __bi.bi_bvec_done;		\
dd254f5a3   Al Viro   fold checks into ...
109
110
111
112
113
114
115
116
117
118
  		} else if (unlikely(i->type & ITER_KVEC)) {	\
  			const struct kvec *kvec;		\
  			struct kvec v;				\
  			iterate_kvec(i, n, v, kvec, skip, (K))	\
  			if (skip == kvec->iov_len) {		\
  				kvec++;				\
  				skip = 0;			\
  			}					\
  			i->nr_segs -= kvec - i->kvec;		\
  			i->kvec = kvec;				\
9ea9ce042   David Howells   iov_iter: Add I/O...
119
120
  		} else if (unlikely(i->type & ITER_DISCARD)) {	\
  			skip += n;				\
dd254f5a3   Al Viro   fold checks into ...
121
122
123
124
125
126
127
128
129
130
  		} else {					\
  			const struct iovec *iov;		\
  			struct iovec v;				\
  			iterate_iovec(i, n, v, iov, skip, (I))	\
  			if (skip == iov->iov_len) {		\
  				iov++;				\
  				skip = 0;			\
  			}					\
  			i->nr_segs -= iov - i->iov;		\
  			i->iov = iov;				\
7ce2a91e5   Al Viro   iov_iter.c: itera...
131
  		}						\
dd254f5a3   Al Viro   fold checks into ...
132
133
  		i->count -= n;					\
  		i->iov_offset = skip;				\
7ce2a91e5   Al Viro   iov_iter.c: itera...
134
  	}							\
7ce2a91e5   Al Viro   iov_iter.c: itera...
135
  }
09fc68dc6   Al Viro   iov_iter: saner c...
136
137
  static int copyout(void __user *to, const void *from, size_t n)
  {
4d0e9df5e   Albert van der Linde   lib, uaccess: add...
138
139
  	if (should_fail_usercopy())
  		return n;
96d4f267e   Linus Torvalds   Remove 'type' arg...
140
  	if (access_ok(to, n)) {
d0ef4c360   Marco Elver   iov_iter: Use gen...
141
  		instrument_copy_to_user(to, from, n);
09fc68dc6   Al Viro   iov_iter: saner c...
142
143
144
145
146
147
148
  		n = raw_copy_to_user(to, from, n);
  	}
  	return n;
  }
  
  static int copyin(void *to, const void __user *from, size_t n)
  {
4d0e9df5e   Albert van der Linde   lib, uaccess: add...
149
150
  	if (should_fail_usercopy())
  		return n;
96d4f267e   Linus Torvalds   Remove 'type' arg...
151
  	if (access_ok(from, n)) {
d0ef4c360   Marco Elver   iov_iter: Use gen...
152
  		instrument_copy_from_user(to, from, n);
09fc68dc6   Al Viro   iov_iter: saner c...
153
154
155
156
  		n = raw_copy_from_user(to, from, n);
  	}
  	return n;
  }
62a8067a7   Al Viro   bio_vec-backed io...
157
  static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
4f18cd317   Al Viro   take iov_iter stu...
158
159
160
161
162
163
164
165
166
167
168
169
  			 struct iov_iter *i)
  {
  	size_t skip, copy, left, wanted;
  	const struct iovec *iov;
  	char __user *buf;
  	void *kaddr, *from;
  
  	if (unlikely(bytes > i->count))
  		bytes = i->count;
  
  	if (unlikely(!bytes))
  		return 0;
09fc68dc6   Al Viro   iov_iter: saner c...
170
  	might_fault();
4f18cd317   Al Viro   take iov_iter stu...
171
172
173
174
175
  	wanted = bytes;
  	iov = i->iov;
  	skip = i->iov_offset;
  	buf = iov->iov_base + skip;
  	copy = min(bytes, iov->iov_len - skip);
3fa6c5073   Mikulas Patocka   mm: optimize copy...
176
  	if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
4f18cd317   Al Viro   take iov_iter stu...
177
178
179
180
  		kaddr = kmap_atomic(page);
  		from = kaddr + offset;
  
  		/* first chunk, usually the only one */
09fc68dc6   Al Viro   iov_iter: saner c...
181
  		left = copyout(buf, from, copy);
4f18cd317   Al Viro   take iov_iter stu...
182
183
184
185
186
187
188
189
190
  		copy -= left;
  		skip += copy;
  		from += copy;
  		bytes -= copy;
  
  		while (unlikely(!left && bytes)) {
  			iov++;
  			buf = iov->iov_base;
  			copy = min(bytes, iov->iov_len);
09fc68dc6   Al Viro   iov_iter: saner c...
191
  			left = copyout(buf, from, copy);
4f18cd317   Al Viro   take iov_iter stu...
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
  			copy -= left;
  			skip = copy;
  			from += copy;
  			bytes -= copy;
  		}
  		if (likely(!bytes)) {
  			kunmap_atomic(kaddr);
  			goto done;
  		}
  		offset = from - kaddr;
  		buf += copy;
  		kunmap_atomic(kaddr);
  		copy = min(bytes, iov->iov_len - skip);
  	}
  	/* Too bad - revert to non-atomic kmap */
3fa6c5073   Mikulas Patocka   mm: optimize copy...
207

4f18cd317   Al Viro   take iov_iter stu...
208
209
  	kaddr = kmap(page);
  	from = kaddr + offset;
09fc68dc6   Al Viro   iov_iter: saner c...
210
  	left = copyout(buf, from, copy);
4f18cd317   Al Viro   take iov_iter stu...
211
212
213
214
215
216
217
218
  	copy -= left;
  	skip += copy;
  	from += copy;
  	bytes -= copy;
  	while (unlikely(!left && bytes)) {
  		iov++;
  		buf = iov->iov_base;
  		copy = min(bytes, iov->iov_len);
09fc68dc6   Al Viro   iov_iter: saner c...
219
  		left = copyout(buf, from, copy);
4f18cd317   Al Viro   take iov_iter stu...
220
221
222
223
224
225
  		copy -= left;
  		skip = copy;
  		from += copy;
  		bytes -= copy;
  	}
  	kunmap(page);
3fa6c5073   Mikulas Patocka   mm: optimize copy...
226

4f18cd317   Al Viro   take iov_iter stu...
227
  done:
81055e584   Al Viro   optimize copy_pag...
228
229
230
231
  	if (skip == iov->iov_len) {
  		iov++;
  		skip = 0;
  	}
4f18cd317   Al Viro   take iov_iter stu...
232
233
234
235
236
237
  	i->count -= wanted - bytes;
  	i->nr_segs -= iov - i->iov;
  	i->iov = iov;
  	i->iov_offset = skip;
  	return wanted - bytes;
  }
4f18cd317   Al Viro   take iov_iter stu...
238

62a8067a7   Al Viro   bio_vec-backed io...
239
  static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
f0d1bec9d   Al Viro   new helper: copy_...
240
241
242
243
244
245
246
247
248
249
250
251
  			 struct iov_iter *i)
  {
  	size_t skip, copy, left, wanted;
  	const struct iovec *iov;
  	char __user *buf;
  	void *kaddr, *to;
  
  	if (unlikely(bytes > i->count))
  		bytes = i->count;
  
  	if (unlikely(!bytes))
  		return 0;
09fc68dc6   Al Viro   iov_iter: saner c...
252
  	might_fault();
f0d1bec9d   Al Viro   new helper: copy_...
253
254
255
256
257
  	wanted = bytes;
  	iov = i->iov;
  	skip = i->iov_offset;
  	buf = iov->iov_base + skip;
  	copy = min(bytes, iov->iov_len - skip);
3fa6c5073   Mikulas Patocka   mm: optimize copy...
258
  	if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
f0d1bec9d   Al Viro   new helper: copy_...
259
260
261
262
  		kaddr = kmap_atomic(page);
  		to = kaddr + offset;
  
  		/* first chunk, usually the only one */
09fc68dc6   Al Viro   iov_iter: saner c...
263
  		left = copyin(to, buf, copy);
f0d1bec9d   Al Viro   new helper: copy_...
264
265
266
267
268
269
270
271
272
  		copy -= left;
  		skip += copy;
  		to += copy;
  		bytes -= copy;
  
  		while (unlikely(!left && bytes)) {
  			iov++;
  			buf = iov->iov_base;
  			copy = min(bytes, iov->iov_len);
09fc68dc6   Al Viro   iov_iter: saner c...
273
  			left = copyin(to, buf, copy);
f0d1bec9d   Al Viro   new helper: copy_...
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
  			copy -= left;
  			skip = copy;
  			to += copy;
  			bytes -= copy;
  		}
  		if (likely(!bytes)) {
  			kunmap_atomic(kaddr);
  			goto done;
  		}
  		offset = to - kaddr;
  		buf += copy;
  		kunmap_atomic(kaddr);
  		copy = min(bytes, iov->iov_len - skip);
  	}
  	/* Too bad - revert to non-atomic kmap */
3fa6c5073   Mikulas Patocka   mm: optimize copy...
289

f0d1bec9d   Al Viro   new helper: copy_...
290
291
  	kaddr = kmap(page);
  	to = kaddr + offset;
09fc68dc6   Al Viro   iov_iter: saner c...
292
  	left = copyin(to, buf, copy);
f0d1bec9d   Al Viro   new helper: copy_...
293
294
295
296
297
298
299
300
  	copy -= left;
  	skip += copy;
  	to += copy;
  	bytes -= copy;
  	while (unlikely(!left && bytes)) {
  		iov++;
  		buf = iov->iov_base;
  		copy = min(bytes, iov->iov_len);
09fc68dc6   Al Viro   iov_iter: saner c...
301
  		left = copyin(to, buf, copy);
f0d1bec9d   Al Viro   new helper: copy_...
302
303
304
305
306
307
  		copy -= left;
  		skip = copy;
  		to += copy;
  		bytes -= copy;
  	}
  	kunmap(page);
3fa6c5073   Mikulas Patocka   mm: optimize copy...
308

f0d1bec9d   Al Viro   new helper: copy_...
309
  done:
81055e584   Al Viro   optimize copy_pag...
310
311
312
313
  	if (skip == iov->iov_len) {
  		iov++;
  		skip = 0;
  	}
f0d1bec9d   Al Viro   new helper: copy_...
314
315
316
317
318
319
  	i->count -= wanted - bytes;
  	i->nr_segs -= iov - i->iov;
  	i->iov = iov;
  	i->iov_offset = skip;
  	return wanted - bytes;
  }
f0d1bec9d   Al Viro   new helper: copy_...
320

241699cd7   Al Viro   new iov_iter flav...
321
322
323
324
  #ifdef PIPE_PARANOIA
  static bool sanity(const struct iov_iter *i)
  {
  	struct pipe_inode_info *pipe = i->pipe;
8cefc107c   David Howells   pipe: Use head an...
325
326
327
328
329
330
  	unsigned int p_head = pipe->head;
  	unsigned int p_tail = pipe->tail;
  	unsigned int p_mask = pipe->ring_size - 1;
  	unsigned int p_occupancy = pipe_occupancy(p_head, p_tail);
  	unsigned int i_head = i->head;
  	unsigned int idx;
241699cd7   Al Viro   new iov_iter flav...
331
332
  	if (i->iov_offset) {
  		struct pipe_buffer *p;
8cefc107c   David Howells   pipe: Use head an...
333
  		if (unlikely(p_occupancy == 0))
241699cd7   Al Viro   new iov_iter flav...
334
  			goto Bad;	// pipe must be non-empty
8cefc107c   David Howells   pipe: Use head an...
335
  		if (unlikely(i_head != p_head - 1))
241699cd7   Al Viro   new iov_iter flav...
336
  			goto Bad;	// must be at the last buffer...
8cefc107c   David Howells   pipe: Use head an...
337
  		p = &pipe->bufs[i_head & p_mask];
241699cd7   Al Viro   new iov_iter flav...
338
339
340
  		if (unlikely(p->offset + p->len != i->iov_offset))
  			goto Bad;	// ... at the end of segment
  	} else {
8cefc107c   David Howells   pipe: Use head an...
341
  		if (i_head != p_head)
241699cd7   Al Viro   new iov_iter flav...
342
343
344
345
  			goto Bad;	// must be right after the last buffer
  	}
  	return true;
  Bad:
8cefc107c   David Howells   pipe: Use head an...
346
347
348
349
350
351
  	printk(KERN_ERR "idx = %d, offset = %zd
  ", i_head, i->iov_offset);
  	printk(KERN_ERR "head = %d, tail = %d, buffers = %d
  ",
  			p_head, p_tail, pipe->ring_size);
  	for (idx = 0; idx < pipe->ring_size; idx++)
241699cd7   Al Viro   new iov_iter flav...
352
353
354
355
356
357
358
359
360
361
362
363
  		printk(KERN_ERR "[%p %p %d %d]
  ",
  			pipe->bufs[idx].ops,
  			pipe->bufs[idx].page,
  			pipe->bufs[idx].offset,
  			pipe->bufs[idx].len);
  	WARN_ON(1);
  	return false;
  }
  #else
  #define sanity(i) true
  #endif
241699cd7   Al Viro   new iov_iter flav...
364
365
366
367
368
  static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
  			 struct iov_iter *i)
  {
  	struct pipe_inode_info *pipe = i->pipe;
  	struct pipe_buffer *buf;
8cefc107c   David Howells   pipe: Use head an...
369
370
371
  	unsigned int p_tail = pipe->tail;
  	unsigned int p_mask = pipe->ring_size - 1;
  	unsigned int i_head = i->head;
241699cd7   Al Viro   new iov_iter flav...
372
  	size_t off;
241699cd7   Al Viro   new iov_iter flav...
373
374
375
376
377
378
379
380
381
382
383
  
  	if (unlikely(bytes > i->count))
  		bytes = i->count;
  
  	if (unlikely(!bytes))
  		return 0;
  
  	if (!sanity(i))
  		return 0;
  
  	off = i->iov_offset;
8cefc107c   David Howells   pipe: Use head an...
384
  	buf = &pipe->bufs[i_head & p_mask];
241699cd7   Al Viro   new iov_iter flav...
385
386
387
388
389
390
391
  	if (off) {
  		if (offset == off && buf->page == page) {
  			/* merge with the last one */
  			buf->len += bytes;
  			i->iov_offset += bytes;
  			goto out;
  		}
8cefc107c   David Howells   pipe: Use head an...
392
393
  		i_head++;
  		buf = &pipe->bufs[i_head & p_mask];
241699cd7   Al Viro   new iov_iter flav...
394
  	}
6718b6f85   David Howells   pipe: Allow pipes...
395
  	if (pipe_full(i_head, p_tail, pipe->max_usage))
241699cd7   Al Viro   new iov_iter flav...
396
  		return 0;
8cefc107c   David Howells   pipe: Use head an...
397

241699cd7   Al Viro   new iov_iter flav...
398
  	buf->ops = &page_cache_pipe_buf_ops;
8cefc107c   David Howells   pipe: Use head an...
399
400
  	get_page(page);
  	buf->page = page;
241699cd7   Al Viro   new iov_iter flav...
401
402
  	buf->offset = offset;
  	buf->len = bytes;
8cefc107c   David Howells   pipe: Use head an...
403
404
  
  	pipe->head = i_head + 1;
241699cd7   Al Viro   new iov_iter flav...
405
  	i->iov_offset = offset + bytes;
8cefc107c   David Howells   pipe: Use head an...
406
  	i->head = i_head;
241699cd7   Al Viro   new iov_iter flav...
407
408
409
410
  out:
  	i->count -= bytes;
  	return bytes;
  }
4f18cd317   Al Viro   take iov_iter stu...
411
  /*
171a02032   Anton Altaparmakov   VFS: Add iov_iter...
412
413
414
415
416
417
   * Fault in one or more iovecs of the given iov_iter, to a maximum length of
   * bytes.  For each iovec, fault in each page that constitutes the iovec.
   *
   * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
   * because it is an invalid address).
   */
d4690f1e1   Al Viro   fix iov_iter_faul...
418
  int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
171a02032   Anton Altaparmakov   VFS: Add iov_iter...
419
420
421
422
423
424
425
426
  {
  	size_t skip = i->iov_offset;
  	const struct iovec *iov;
  	int err;
  	struct iovec v;
  
  	if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
  		iterate_iovec(i, bytes, v, iov, skip, ({
4bce9f6ee   Al Viro   get rid of separa...
427
  			err = fault_in_pages_readable(v.iov_base, v.iov_len);
171a02032   Anton Altaparmakov   VFS: Add iov_iter...
428
429
430
431
432
433
  			if (unlikely(err))
  			return err;
  		0;}))
  	}
  	return 0;
  }
d4690f1e1   Al Viro   fix iov_iter_faul...
434
  EXPORT_SYMBOL(iov_iter_fault_in_readable);
171a02032   Anton Altaparmakov   VFS: Add iov_iter...
435

aa563d7bc   David Howells   iov_iter: Separat...
436
  void iov_iter_init(struct iov_iter *i, unsigned int direction,
71d8e532b   Al Viro   start adding the ...
437
438
439
  			const struct iovec *iov, unsigned long nr_segs,
  			size_t count)
  {
aa563d7bc   David Howells   iov_iter: Separat...
440
441
  	WARN_ON(direction & ~(READ | WRITE));
  	direction &= READ | WRITE;
71d8e532b   Al Viro   start adding the ...
442
  	/* It will get better.  Eventually... */
db68ce10c   Al Viro   new helper: uacce...
443
  	if (uaccess_kernel()) {
aa563d7bc   David Howells   iov_iter: Separat...
444
  		i->type = ITER_KVEC | direction;
a280455fa   Al Viro   iov_iter.c: handl...
445
446
  		i->kvec = (struct kvec *)iov;
  	} else {
aa563d7bc   David Howells   iov_iter: Separat...
447
  		i->type = ITER_IOVEC | direction;
a280455fa   Al Viro   iov_iter.c: handl...
448
449
  		i->iov = iov;
  	}
71d8e532b   Al Viro   start adding the ...
450
451
452
453
454
  	i->nr_segs = nr_segs;
  	i->iov_offset = 0;
  	i->count = count;
  }
  EXPORT_SYMBOL(iov_iter_init);
7b2c99d15   Al Viro   new helper: iov_i...
455

62a8067a7   Al Viro   bio_vec-backed io...
456
457
458
459
460
461
  static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
  {
  	char *from = kmap_atomic(page);
  	memcpy(to, from + offset, len);
  	kunmap_atomic(from);
  }
36f7a8a4c   Al Viro   iov_iter: constif...
462
  static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
62a8067a7   Al Viro   bio_vec-backed io...
463
464
465
466
467
  {
  	char *to = kmap_atomic(page);
  	memcpy(to + offset, from, len);
  	kunmap_atomic(to);
  }
c35e02480   Matthew Wilcox   Add copy_to_iter(...
468
469
470
471
472
473
  static void memzero_page(struct page *page, size_t offset, size_t len)
  {
  	char *addr = kmap_atomic(page);
  	memset(addr + offset, 0, len);
  	kunmap_atomic(addr);
  }
241699cd7   Al Viro   new iov_iter flav...
474
475
476
477
  static inline bool allocated(struct pipe_buffer *buf)
  {
  	return buf->ops == &default_pipe_buf_ops;
  }
8cefc107c   David Howells   pipe: Use head an...
478
479
  static inline void data_start(const struct iov_iter *i,
  			      unsigned int *iter_headp, size_t *offp)
241699cd7   Al Viro   new iov_iter flav...
480
  {
8cefc107c   David Howells   pipe: Use head an...
481
482
  	unsigned int p_mask = i->pipe->ring_size - 1;
  	unsigned int iter_head = i->head;
241699cd7   Al Viro   new iov_iter flav...
483
  	size_t off = i->iov_offset;
8cefc107c   David Howells   pipe: Use head an...
484
485
486
487
  
  	if (off && (!allocated(&i->pipe->bufs[iter_head & p_mask]) ||
  		    off == PAGE_SIZE)) {
  		iter_head++;
241699cd7   Al Viro   new iov_iter flav...
488
489
  		off = 0;
  	}
8cefc107c   David Howells   pipe: Use head an...
490
  	*iter_headp = iter_head;
241699cd7   Al Viro   new iov_iter flav...
491
492
493
494
  	*offp = off;
  }
  
  static size_t push_pipe(struct iov_iter *i, size_t size,
8cefc107c   David Howells   pipe: Use head an...
495
  			int *iter_headp, size_t *offp)
241699cd7   Al Viro   new iov_iter flav...
496
497
  {
  	struct pipe_inode_info *pipe = i->pipe;
8cefc107c   David Howells   pipe: Use head an...
498
499
500
  	unsigned int p_tail = pipe->tail;
  	unsigned int p_mask = pipe->ring_size - 1;
  	unsigned int iter_head;
241699cd7   Al Viro   new iov_iter flav...
501
  	size_t off;
241699cd7   Al Viro   new iov_iter flav...
502
503
504
505
506
507
508
509
  	ssize_t left;
  
  	if (unlikely(size > i->count))
  		size = i->count;
  	if (unlikely(!size))
  		return 0;
  
  	left = size;
8cefc107c   David Howells   pipe: Use head an...
510
511
  	data_start(i, &iter_head, &off);
  	*iter_headp = iter_head;
241699cd7   Al Viro   new iov_iter flav...
512
513
514
515
  	*offp = off;
  	if (off) {
  		left -= PAGE_SIZE - off;
  		if (left <= 0) {
8cefc107c   David Howells   pipe: Use head an...
516
  			pipe->bufs[iter_head & p_mask].len += size;
241699cd7   Al Viro   new iov_iter flav...
517
518
  			return size;
  		}
8cefc107c   David Howells   pipe: Use head an...
519
520
  		pipe->bufs[iter_head & p_mask].len = PAGE_SIZE;
  		iter_head++;
241699cd7   Al Viro   new iov_iter flav...
521
  	}
6718b6f85   David Howells   pipe: Allow pipes...
522
  	while (!pipe_full(iter_head, p_tail, pipe->max_usage)) {
8cefc107c   David Howells   pipe: Use head an...
523
  		struct pipe_buffer *buf = &pipe->bufs[iter_head & p_mask];
241699cd7   Al Viro   new iov_iter flav...
524
525
526
  		struct page *page = alloc_page(GFP_USER);
  		if (!page)
  			break;
8cefc107c   David Howells   pipe: Use head an...
527
528
529
530
531
532
533
534
535
536
  
  		buf->ops = &default_pipe_buf_ops;
  		buf->page = page;
  		buf->offset = 0;
  		buf->len = min_t(ssize_t, left, PAGE_SIZE);
  		left -= buf->len;
  		iter_head++;
  		pipe->head = iter_head;
  
  		if (left == 0)
241699cd7   Al Viro   new iov_iter flav...
537
  			return size;
241699cd7   Al Viro   new iov_iter flav...
538
539
540
541
542
543
544
545
  	}
  	return size - left;
  }
  
  static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
  				struct iov_iter *i)
  {
  	struct pipe_inode_info *pipe = i->pipe;
8cefc107c   David Howells   pipe: Use head an...
546
547
  	unsigned int p_mask = pipe->ring_size - 1;
  	unsigned int i_head;
241699cd7   Al Viro   new iov_iter flav...
548
  	size_t n, off;
241699cd7   Al Viro   new iov_iter flav...
549
550
551
  
  	if (!sanity(i))
  		return 0;
8cefc107c   David Howells   pipe: Use head an...
552
  	bytes = n = push_pipe(i, bytes, &i_head, &off);
241699cd7   Al Viro   new iov_iter flav...
553
554
  	if (unlikely(!n))
  		return 0;
8cefc107c   David Howells   pipe: Use head an...
555
  	do {
241699cd7   Al Viro   new iov_iter flav...
556
  		size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
8cefc107c   David Howells   pipe: Use head an...
557
558
  		memcpy_to_page(pipe->bufs[i_head & p_mask].page, off, addr, chunk);
  		i->head = i_head;
241699cd7   Al Viro   new iov_iter flav...
559
560
561
  		i->iov_offset = off + chunk;
  		n -= chunk;
  		addr += chunk;
8cefc107c   David Howells   pipe: Use head an...
562
563
564
  		off = 0;
  		i_head++;
  	} while (n);
241699cd7   Al Viro   new iov_iter flav...
565
566
567
  	i->count -= bytes;
  	return bytes;
  }
f91528955   Al Viro   iov_iter: reduce ...
568
569
570
  static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
  			      __wsum sum, size_t off)
  {
cc44c17ba   Al Viro   csum_partial_copy...
571
  	__wsum next = csum_partial_copy_nocheck(from, to, len);
f91528955   Al Viro   iov_iter: reduce ...
572
573
  	return csum_block_add(sum, next, off);
  }
78e1f3861   Al Viro   iov_iter: teach c...
574
575
576
577
  static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
  				__wsum *csum, struct iov_iter *i)
  {
  	struct pipe_inode_info *pipe = i->pipe;
8cefc107c   David Howells   pipe: Use head an...
578
579
  	unsigned int p_mask = pipe->ring_size - 1;
  	unsigned int i_head;
78e1f3861   Al Viro   iov_iter: teach c...
580
581
  	size_t n, r;
  	size_t off = 0;
f91528955   Al Viro   iov_iter: reduce ...
582
  	__wsum sum = *csum;
78e1f3861   Al Viro   iov_iter: teach c...
583
584
585
  
  	if (!sanity(i))
  		return 0;
8cefc107c   David Howells   pipe: Use head an...
586
  	bytes = n = push_pipe(i, bytes, &i_head, &r);
78e1f3861   Al Viro   iov_iter: teach c...
587
588
  	if (unlikely(!n))
  		return 0;
8cefc107c   David Howells   pipe: Use head an...
589
  	do {
78e1f3861   Al Viro   iov_iter: teach c...
590
  		size_t chunk = min_t(size_t, n, PAGE_SIZE - r);
8cefc107c   David Howells   pipe: Use head an...
591
  		char *p = kmap_atomic(pipe->bufs[i_head & p_mask].page);
f91528955   Al Viro   iov_iter: reduce ...
592
  		sum = csum_and_memcpy(p + r, addr, chunk, sum, off);
78e1f3861   Al Viro   iov_iter: teach c...
593
  		kunmap_atomic(p);
8cefc107c   David Howells   pipe: Use head an...
594
  		i->head = i_head;
78e1f3861   Al Viro   iov_iter: teach c...
595
596
597
598
  		i->iov_offset = r + chunk;
  		n -= chunk;
  		off += chunk;
  		addr += chunk;
8cefc107c   David Howells   pipe: Use head an...
599
600
601
  		r = 0;
  		i_head++;
  	} while (n);
78e1f3861   Al Viro   iov_iter: teach c...
602
603
604
605
  	i->count -= bytes;
  	*csum = sum;
  	return bytes;
  }
aa28de275   Al Viro   iov_iter/hardenin...
606
  size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
62a8067a7   Al Viro   bio_vec-backed io...
607
  {
36f7a8a4c   Al Viro   iov_iter: constif...
608
  	const char *from = addr;
00e237074   David Howells   iov_iter: Use acc...
609
  	if (unlikely(iov_iter_is_pipe(i)))
241699cd7   Al Viro   new iov_iter flav...
610
  		return copy_pipe_to_iter(addr, bytes, i);
09fc68dc6   Al Viro   iov_iter: saner c...
611
612
  	if (iter_is_iovec(i))
  		might_fault();
3d4d3e482   Al Viro   iov_iter.c: conve...
613
  	iterate_and_advance(i, bytes, v,
09fc68dc6   Al Viro   iov_iter: saner c...
614
  		copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
3d4d3e482   Al Viro   iov_iter.c: conve...
615
  		memcpy_to_page(v.bv_page, v.bv_offset,
a280455fa   Al Viro   iov_iter.c: handl...
616
617
  			       (from += v.bv_len) - v.bv_len, v.bv_len),
  		memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
3d4d3e482   Al Viro   iov_iter.c: conve...
618
  	)
62a8067a7   Al Viro   bio_vec-backed io...
619

3d4d3e482   Al Viro   iov_iter.c: conve...
620
  	return bytes;
c35e02480   Matthew Wilcox   Add copy_to_iter(...
621
  }
aa28de275   Al Viro   iov_iter/hardenin...
622
  EXPORT_SYMBOL(_copy_to_iter);
c35e02480   Matthew Wilcox   Add copy_to_iter(...
623

ec6347bb4   Dan Williams   x86, powerpc: Ren...
624
625
  #ifdef CONFIG_ARCH_HAS_COPY_MC
  static int copyout_mc(void __user *to, const void *from, size_t n)
8780356ef   Dan Williams   x86/asm/memcpy_mc...
626
  {
96d4f267e   Linus Torvalds   Remove 'type' arg...
627
  	if (access_ok(to, n)) {
d0ef4c360   Marco Elver   iov_iter: Use gen...
628
  		instrument_copy_to_user(to, from, n);
ec6347bb4   Dan Williams   x86, powerpc: Ren...
629
  		n = copy_mc_to_user((__force void *) to, from, n);
8780356ef   Dan Williams   x86/asm/memcpy_mc...
630
631
632
  	}
  	return n;
  }
ec6347bb4   Dan Williams   x86, powerpc: Ren...
633
  static unsigned long copy_mc_to_page(struct page *page, size_t offset,
8780356ef   Dan Williams   x86/asm/memcpy_mc...
634
635
636
637
638
639
  		const char *from, size_t len)
  {
  	unsigned long ret;
  	char *to;
  
  	to = kmap_atomic(page);
ec6347bb4   Dan Williams   x86, powerpc: Ren...
640
  	ret = copy_mc_to_kernel(to + offset, from, len);
8780356ef   Dan Williams   x86/asm/memcpy_mc...
641
642
643
644
  	kunmap_atomic(to);
  
  	return ret;
  }
ec6347bb4   Dan Williams   x86, powerpc: Ren...
645
  static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
ca146f6f0   Dan Williams   lib/iov_iter: Fix...
646
647
648
  				struct iov_iter *i)
  {
  	struct pipe_inode_info *pipe = i->pipe;
8cefc107c   David Howells   pipe: Use head an...
649
650
  	unsigned int p_mask = pipe->ring_size - 1;
  	unsigned int i_head;
ca146f6f0   Dan Williams   lib/iov_iter: Fix...
651
  	size_t n, off, xfer = 0;
ca146f6f0   Dan Williams   lib/iov_iter: Fix...
652
653
654
  
  	if (!sanity(i))
  		return 0;
8cefc107c   David Howells   pipe: Use head an...
655
  	bytes = n = push_pipe(i, bytes, &i_head, &off);
ca146f6f0   Dan Williams   lib/iov_iter: Fix...
656
657
  	if (unlikely(!n))
  		return 0;
8cefc107c   David Howells   pipe: Use head an...
658
  	do {
ca146f6f0   Dan Williams   lib/iov_iter: Fix...
659
660
  		size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
  		unsigned long rem;
ec6347bb4   Dan Williams   x86, powerpc: Ren...
661
  		rem = copy_mc_to_page(pipe->bufs[i_head & p_mask].page,
8cefc107c   David Howells   pipe: Use head an...
662
663
  					    off, addr, chunk);
  		i->head = i_head;
ca146f6f0   Dan Williams   lib/iov_iter: Fix...
664
665
666
667
668
669
  		i->iov_offset = off + chunk - rem;
  		xfer += chunk - rem;
  		if (rem)
  			break;
  		n -= chunk;
  		addr += chunk;
8cefc107c   David Howells   pipe: Use head an...
670
671
672
  		off = 0;
  		i_head++;
  	} while (n);
ca146f6f0   Dan Williams   lib/iov_iter: Fix...
673
674
675
  	i->count -= xfer;
  	return xfer;
  }
bf3eeb9b5   Dan Williams   lib/iov_iter: Doc...
676
  /**
ec6347bb4   Dan Williams   x86, powerpc: Ren...
677
   * _copy_mc_to_iter - copy to iter with source memory error exception handling
bf3eeb9b5   Dan Williams   lib/iov_iter: Doc...
678
679
680
681
   * @addr: source kernel address
   * @bytes: total transfer length
   * @iter: destination iterator
   *
ec6347bb4   Dan Williams   x86, powerpc: Ren...
682
683
684
685
   * The pmem driver deploys this for the dax operation
   * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
   * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
   * successfully copied.
bf3eeb9b5   Dan Williams   lib/iov_iter: Doc...
686
   *
ec6347bb4   Dan Williams   x86, powerpc: Ren...
687
   * The main differences between this and typical _copy_to_iter().
bf3eeb9b5   Dan Williams   lib/iov_iter: Doc...
688
689
690
691
692
693
694
695
696
697
   *
   * * Typical tail/residue handling after a fault retries the copy
   *   byte-by-byte until the fault happens again. Re-triggering machine
   *   checks is potentially fatal so the implementation uses source
   *   alignment and poison alignment assumptions to avoid re-triggering
   *   hardware exceptions.
   *
   * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
   *   Compare to copy_to_iter() where only ITER_IOVEC attempts might return
   *   a short copy.
bf3eeb9b5   Dan Williams   lib/iov_iter: Doc...
698
   */
ec6347bb4   Dan Williams   x86, powerpc: Ren...
699
  size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
8780356ef   Dan Williams   x86/asm/memcpy_mc...
700
701
702
  {
  	const char *from = addr;
  	unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
00e237074   David Howells   iov_iter: Use acc...
703
  	if (unlikely(iov_iter_is_pipe(i)))
ec6347bb4   Dan Williams   x86, powerpc: Ren...
704
  		return copy_mc_pipe_to_iter(addr, bytes, i);
8780356ef   Dan Williams   x86/asm/memcpy_mc...
705
706
707
  	if (iter_is_iovec(i))
  		might_fault();
  	iterate_and_advance(i, bytes, v,
ec6347bb4   Dan Williams   x86, powerpc: Ren...
708
709
  		copyout_mc(v.iov_base, (from += v.iov_len) - v.iov_len,
  			   v.iov_len),
8780356ef   Dan Williams   x86/asm/memcpy_mc...
710
  		({
ec6347bb4   Dan Williams   x86, powerpc: Ren...
711
712
  		rem = copy_mc_to_page(v.bv_page, v.bv_offset,
  				      (from += v.bv_len) - v.bv_len, v.bv_len);
8780356ef   Dan Williams   x86/asm/memcpy_mc...
713
714
715
716
717
718
719
  		if (rem) {
  			curr_addr = (unsigned long) from;
  			bytes = curr_addr - s_addr - rem;
  			return bytes;
  		}
  		}),
  		({
ec6347bb4   Dan Williams   x86, powerpc: Ren...
720
721
  		rem = copy_mc_to_kernel(v.iov_base, (from += v.iov_len)
  					- v.iov_len, v.iov_len);
8780356ef   Dan Williams   x86/asm/memcpy_mc...
722
723
724
725
726
727
728
729
730
731
  		if (rem) {
  			curr_addr = (unsigned long) from;
  			bytes = curr_addr - s_addr - rem;
  			return bytes;
  		}
  		})
  	)
  
  	return bytes;
  }
ec6347bb4   Dan Williams   x86, powerpc: Ren...
732
733
  EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
  #endif /* CONFIG_ARCH_HAS_COPY_MC */
8780356ef   Dan Williams   x86/asm/memcpy_mc...
734

aa28de275   Al Viro   iov_iter/hardenin...
735
  size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
c35e02480   Matthew Wilcox   Add copy_to_iter(...
736
  {
0dbca9a4b   Al Viro   iov_iter.c: conve...
737
  	char *to = addr;
00e237074   David Howells   iov_iter: Use acc...
738
  	if (unlikely(iov_iter_is_pipe(i))) {
241699cd7   Al Viro   new iov_iter flav...
739
740
741
  		WARN_ON(1);
  		return 0;
  	}
09fc68dc6   Al Viro   iov_iter: saner c...
742
743
  	if (iter_is_iovec(i))
  		might_fault();
0dbca9a4b   Al Viro   iov_iter.c: conve...
744
  	iterate_and_advance(i, bytes, v,
09fc68dc6   Al Viro   iov_iter: saner c...
745
  		copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
0dbca9a4b   Al Viro   iov_iter.c: conve...
746
  		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
a280455fa   Al Viro   iov_iter.c: handl...
747
748
  				 v.bv_offset, v.bv_len),
  		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
0dbca9a4b   Al Viro   iov_iter.c: conve...
749
750
751
  	)
  
  	return bytes;
c35e02480   Matthew Wilcox   Add copy_to_iter(...
752
  }
aa28de275   Al Viro   iov_iter/hardenin...
753
  EXPORT_SYMBOL(_copy_from_iter);
c35e02480   Matthew Wilcox   Add copy_to_iter(...
754

aa28de275   Al Viro   iov_iter/hardenin...
755
  bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
cbbd26b8b   Al Viro   [iov_iter] new pr...
756
757
  {
  	char *to = addr;
00e237074   David Howells   iov_iter: Use acc...
758
  	if (unlikely(iov_iter_is_pipe(i))) {
cbbd26b8b   Al Viro   [iov_iter] new pr...
759
760
761
  		WARN_ON(1);
  		return false;
  	}
33844e665   Al Viro   [iov_iter] fix it...
762
  	if (unlikely(i->count < bytes))
cbbd26b8b   Al Viro   [iov_iter] new pr...
763
  		return false;
09fc68dc6   Al Viro   iov_iter: saner c...
764
765
  	if (iter_is_iovec(i))
  		might_fault();
cbbd26b8b   Al Viro   [iov_iter] new pr...
766
  	iterate_all_kinds(i, bytes, v, ({
09fc68dc6   Al Viro   iov_iter: saner c...
767
  		if (copyin((to += v.iov_len) - v.iov_len,
cbbd26b8b   Al Viro   [iov_iter] new pr...
768
769
770
771
772
773
774
775
776
777
778
  				      v.iov_base, v.iov_len))
  			return false;
  		0;}),
  		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
  				 v.bv_offset, v.bv_len),
  		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
  	)
  
  	iov_iter_advance(i, bytes);
  	return true;
  }
aa28de275   Al Viro   iov_iter/hardenin...
779
  EXPORT_SYMBOL(_copy_from_iter_full);
cbbd26b8b   Al Viro   [iov_iter] new pr...
780

aa28de275   Al Viro   iov_iter/hardenin...
781
  size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
aa583096d   Al Viro   copy_from_iter_no...
782
783
  {
  	char *to = addr;
00e237074   David Howells   iov_iter: Use acc...
784
  	if (unlikely(iov_iter_is_pipe(i))) {
241699cd7   Al Viro   new iov_iter flav...
785
786
787
  		WARN_ON(1);
  		return 0;
  	}
aa583096d   Al Viro   copy_from_iter_no...
788
  	iterate_and_advance(i, bytes, v,
3f763453e   Al Viro   kill __copy_from_...
789
  		__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
aa583096d   Al Viro   copy_from_iter_no...
790
791
792
793
794
795
796
797
  					 v.iov_base, v.iov_len),
  		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
  				 v.bv_offset, v.bv_len),
  		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
  	)
  
  	return bytes;
  }
aa28de275   Al Viro   iov_iter/hardenin...
798
  EXPORT_SYMBOL(_copy_from_iter_nocache);
aa583096d   Al Viro   copy_from_iter_no...
799

0aed55af8   Dan Williams   x86, uaccess: int...
800
  #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
abd08d7d2   Dan Williams   lib/iov_iter: Doc...
801
802
803
804
805
806
807
808
809
810
811
812
813
814
  /**
   * _copy_from_iter_flushcache - write destination through cpu cache
   * @addr: destination kernel address
   * @bytes: total transfer length
   * @iter: source iterator
   *
   * The pmem driver arranges for filesystem-dax to use this facility via
   * dax_copy_from_iter() for ensuring that writes to persistent memory
   * are flushed through the CPU cache. It is differentiated from
   * _copy_from_iter_nocache() in that guarantees all data is flushed for
   * all iterator types. The _copy_from_iter_nocache() only attempts to
   * bypass the cache for the ITER_IOVEC case, and on some archs may use
   * instructions that strand dirty-data in the cache.
   */
6a37e9400   Linus Torvalds   Merge branch 'uac...
815
  size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
0aed55af8   Dan Williams   x86, uaccess: int...
816
817
  {
  	char *to = addr;
00e237074   David Howells   iov_iter: Use acc...
818
  	if (unlikely(iov_iter_is_pipe(i))) {
0aed55af8   Dan Williams   x86, uaccess: int...
819
820
821
822
823
824
825
826
827
828
829
830
831
832
  		WARN_ON(1);
  		return 0;
  	}
  	iterate_and_advance(i, bytes, v,
  		__copy_from_user_flushcache((to += v.iov_len) - v.iov_len,
  					 v.iov_base, v.iov_len),
  		memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
  				 v.bv_offset, v.bv_len),
  		memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
  			v.iov_len)
  	)
  
  	return bytes;
  }
6a37e9400   Linus Torvalds   Merge branch 'uac...
833
  EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
0aed55af8   Dan Williams   x86, uaccess: int...
834
  #endif
aa28de275   Al Viro   iov_iter/hardenin...
835
  bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
cbbd26b8b   Al Viro   [iov_iter] new pr...
836
837
  {
  	char *to = addr;
00e237074   David Howells   iov_iter: Use acc...
838
  	if (unlikely(iov_iter_is_pipe(i))) {
cbbd26b8b   Al Viro   [iov_iter] new pr...
839
840
841
  		WARN_ON(1);
  		return false;
  	}
33844e665   Al Viro   [iov_iter] fix it...
842
  	if (unlikely(i->count < bytes))
cbbd26b8b   Al Viro   [iov_iter] new pr...
843
844
  		return false;
  	iterate_all_kinds(i, bytes, v, ({
3f763453e   Al Viro   kill __copy_from_...
845
  		if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
cbbd26b8b   Al Viro   [iov_iter] new pr...
846
847
848
849
850
851
852
853
854
855
856
  					     v.iov_base, v.iov_len))
  			return false;
  		0;}),
  		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
  				 v.bv_offset, v.bv_len),
  		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
  	)
  
  	iov_iter_advance(i, bytes);
  	return true;
  }
aa28de275   Al Viro   iov_iter/hardenin...
857
  EXPORT_SYMBOL(_copy_from_iter_full_nocache);
cbbd26b8b   Al Viro   [iov_iter] new pr...
858

72e809ed8   Al Viro   iov_iter: sanity ...
859
860
  static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
  {
6daef95b8   Eric Dumazet   iov_iter: optimiz...
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
  	struct page *head;
  	size_t v = n + offset;
  
  	/*
  	 * The general case needs to access the page order in order
  	 * to compute the page size.
  	 * However, we mostly deal with order-0 pages and thus can
  	 * avoid a possible cache line miss for requests that fit all
  	 * page orders.
  	 */
  	if (n <= v && v <= PAGE_SIZE)
  		return true;
  
  	head = compound_head(page);
  	v += (page - head) << PAGE_SHIFT;
a90bcb86a   Petar Penkov   iov_iter: fix pag...
876

a50b854e0   Matthew Wilcox (Oracle)   mm: introduce pag...
877
  	if (likely(n <= v && v <= (page_size(head))))
72e809ed8   Al Viro   iov_iter: sanity ...
878
879
880
881
  		return true;
  	WARN_ON(1);
  	return false;
  }
cbbd26b8b   Al Viro   [iov_iter] new pr...
882

62a8067a7   Al Viro   bio_vec-backed io...
883
884
885
  size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
  			 struct iov_iter *i)
  {
72e809ed8   Al Viro   iov_iter: sanity ...
886
887
  	if (unlikely(!page_copy_sane(page, offset, bytes)))
  		return 0;
d271524a3   Al Viro   iov_iter.c: get r...
888
889
890
891
892
  	if (i->type & (ITER_BVEC|ITER_KVEC)) {
  		void *kaddr = kmap_atomic(page);
  		size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
  		kunmap_atomic(kaddr);
  		return wanted;
9ea9ce042   David Howells   iov_iter: Add I/O...
893
894
895
  	} else if (unlikely(iov_iter_is_discard(i)))
  		return bytes;
  	else if (likely(!iov_iter_is_pipe(i)))
62a8067a7   Al Viro   bio_vec-backed io...
896
  		return copy_page_to_iter_iovec(page, offset, bytes, i);
241699cd7   Al Viro   new iov_iter flav...
897
898
  	else
  		return copy_page_to_iter_pipe(page, offset, bytes, i);
62a8067a7   Al Viro   bio_vec-backed io...
899
900
901
902
903
904
  }
  EXPORT_SYMBOL(copy_page_to_iter);
  
  size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
  			 struct iov_iter *i)
  {
72e809ed8   Al Viro   iov_iter: sanity ...
905
906
  	if (unlikely(!page_copy_sane(page, offset, bytes)))
  		return 0;
9ea9ce042   David Howells   iov_iter: Add I/O...
907
  	if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
241699cd7   Al Viro   new iov_iter flav...
908
909
910
  		WARN_ON(1);
  		return 0;
  	}
a280455fa   Al Viro   iov_iter.c: handl...
911
  	if (i->type & (ITER_BVEC|ITER_KVEC)) {
d271524a3   Al Viro   iov_iter.c: get r...
912
  		void *kaddr = kmap_atomic(page);
aa28de275   Al Viro   iov_iter/hardenin...
913
  		size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
d271524a3   Al Viro   iov_iter.c: get r...
914
915
916
  		kunmap_atomic(kaddr);
  		return wanted;
  	} else
62a8067a7   Al Viro   bio_vec-backed io...
917
918
919
  		return copy_page_from_iter_iovec(page, offset, bytes, i);
  }
  EXPORT_SYMBOL(copy_page_from_iter);
241699cd7   Al Viro   new iov_iter flav...
920
921
922
  static size_t pipe_zero(size_t bytes, struct iov_iter *i)
  {
  	struct pipe_inode_info *pipe = i->pipe;
8cefc107c   David Howells   pipe: Use head an...
923
924
  	unsigned int p_mask = pipe->ring_size - 1;
  	unsigned int i_head;
241699cd7   Al Viro   new iov_iter flav...
925
  	size_t n, off;
241699cd7   Al Viro   new iov_iter flav...
926
927
928
  
  	if (!sanity(i))
  		return 0;
8cefc107c   David Howells   pipe: Use head an...
929
  	bytes = n = push_pipe(i, bytes, &i_head, &off);
241699cd7   Al Viro   new iov_iter flav...
930
931
  	if (unlikely(!n))
  		return 0;
8cefc107c   David Howells   pipe: Use head an...
932
  	do {
241699cd7   Al Viro   new iov_iter flav...
933
  		size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
8cefc107c   David Howells   pipe: Use head an...
934
935
  		memzero_page(pipe->bufs[i_head & p_mask].page, off, chunk);
  		i->head = i_head;
241699cd7   Al Viro   new iov_iter flav...
936
937
  		i->iov_offset = off + chunk;
  		n -= chunk;
8cefc107c   David Howells   pipe: Use head an...
938
939
940
  		off = 0;
  		i_head++;
  	} while (n);
241699cd7   Al Viro   new iov_iter flav...
941
942
943
  	i->count -= bytes;
  	return bytes;
  }
c35e02480   Matthew Wilcox   Add copy_to_iter(...
944
945
  size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
  {
00e237074   David Howells   iov_iter: Use acc...
946
  	if (unlikely(iov_iter_is_pipe(i)))
241699cd7   Al Viro   new iov_iter flav...
947
  		return pipe_zero(bytes, i);
8442fa46c   Al Viro   iov_iter.c: conve...
948
  	iterate_and_advance(i, bytes, v,
09fc68dc6   Al Viro   iov_iter: saner c...
949
  		clear_user(v.iov_base, v.iov_len),
a280455fa   Al Viro   iov_iter.c: handl...
950
951
  		memzero_page(v.bv_page, v.bv_offset, v.bv_len),
  		memset(v.iov_base, 0, v.iov_len)
8442fa46c   Al Viro   iov_iter.c: conve...
952
953
954
  	)
  
  	return bytes;
c35e02480   Matthew Wilcox   Add copy_to_iter(...
955
956
  }
  EXPORT_SYMBOL(iov_iter_zero);
62a8067a7   Al Viro   bio_vec-backed io...
957
958
959
  size_t iov_iter_copy_from_user_atomic(struct page *page,
  		struct iov_iter *i, unsigned long offset, size_t bytes)
  {
04a311655   Al Viro   iov_iter.c: macro...
960
  	char *kaddr = kmap_atomic(page), *p = kaddr + offset;
72e809ed8   Al Viro   iov_iter: sanity ...
961
962
963
964
  	if (unlikely(!page_copy_sane(page, offset, bytes))) {
  		kunmap_atomic(kaddr);
  		return 0;
  	}
9ea9ce042   David Howells   iov_iter: Add I/O...
965
  	if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
241699cd7   Al Viro   new iov_iter flav...
966
967
968
969
  		kunmap_atomic(kaddr);
  		WARN_ON(1);
  		return 0;
  	}
04a311655   Al Viro   iov_iter.c: macro...
970
  	iterate_all_kinds(i, bytes, v,
09fc68dc6   Al Viro   iov_iter: saner c...
971
  		copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
04a311655   Al Viro   iov_iter.c: macro...
972
  		memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
a280455fa   Al Viro   iov_iter.c: handl...
973
974
  				 v.bv_offset, v.bv_len),
  		memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
04a311655   Al Viro   iov_iter.c: macro...
975
976
977
  	)
  	kunmap_atomic(kaddr);
  	return bytes;
62a8067a7   Al Viro   bio_vec-backed io...
978
979
  }
  EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
b9dc6f65b   Al Viro   fix a fencepost e...
980
981
982
  static inline void pipe_truncate(struct iov_iter *i)
  {
  	struct pipe_inode_info *pipe = i->pipe;
8cefc107c   David Howells   pipe: Use head an...
983
984
985
986
987
988
989
  	unsigned int p_tail = pipe->tail;
  	unsigned int p_head = pipe->head;
  	unsigned int p_mask = pipe->ring_size - 1;
  
  	if (!pipe_empty(p_head, p_tail)) {
  		struct pipe_buffer *buf;
  		unsigned int i_head = i->head;
b9dc6f65b   Al Viro   fix a fencepost e...
990
  		size_t off = i->iov_offset;
8cefc107c   David Howells   pipe: Use head an...
991

b9dc6f65b   Al Viro   fix a fencepost e...
992
  		if (off) {
8cefc107c   David Howells   pipe: Use head an...
993
994
995
  			buf = &pipe->bufs[i_head & p_mask];
  			buf->len = off - buf->offset;
  			i_head++;
b9dc6f65b   Al Viro   fix a fencepost e...
996
  		}
8cefc107c   David Howells   pipe: Use head an...
997
998
999
  		while (p_head != i_head) {
  			p_head--;
  			pipe_buf_release(pipe, &pipe->bufs[p_head & p_mask]);
b9dc6f65b   Al Viro   fix a fencepost e...
1000
  		}
8cefc107c   David Howells   pipe: Use head an...
1001
1002
  
  		pipe->head = p_head;
b9dc6f65b   Al Viro   fix a fencepost e...
1003
1004
  	}
  }
241699cd7   Al Viro   new iov_iter flav...
1005
1006
1007
  static void pipe_advance(struct iov_iter *i, size_t size)
  {
  	struct pipe_inode_info *pipe = i->pipe;
241699cd7   Al Viro   new iov_iter flav...
1008
1009
  	if (unlikely(i->count < size))
  		size = i->count;
241699cd7   Al Viro   new iov_iter flav...
1010
  	if (size) {
b9dc6f65b   Al Viro   fix a fencepost e...
1011
  		struct pipe_buffer *buf;
8cefc107c   David Howells   pipe: Use head an...
1012
1013
  		unsigned int p_mask = pipe->ring_size - 1;
  		unsigned int i_head = i->head;
b9dc6f65b   Al Viro   fix a fencepost e...
1014
  		size_t off = i->iov_offset, left = size;
8cefc107c   David Howells   pipe: Use head an...
1015

241699cd7   Al Viro   new iov_iter flav...
1016
  		if (off) /* make it relative to the beginning of buffer */
8cefc107c   David Howells   pipe: Use head an...
1017
  			left += off - pipe->bufs[i_head & p_mask].offset;
241699cd7   Al Viro   new iov_iter flav...
1018
  		while (1) {
8cefc107c   David Howells   pipe: Use head an...
1019
  			buf = &pipe->bufs[i_head & p_mask];
b9dc6f65b   Al Viro   fix a fencepost e...
1020
  			if (left <= buf->len)
241699cd7   Al Viro   new iov_iter flav...
1021
  				break;
b9dc6f65b   Al Viro   fix a fencepost e...
1022
  			left -= buf->len;
8cefc107c   David Howells   pipe: Use head an...
1023
  			i_head++;
241699cd7   Al Viro   new iov_iter flav...
1024
  		}
8cefc107c   David Howells   pipe: Use head an...
1025
  		i->head = i_head;
b9dc6f65b   Al Viro   fix a fencepost e...
1026
  		i->iov_offset = buf->offset + left;
241699cd7   Al Viro   new iov_iter flav...
1027
  	}
b9dc6f65b   Al Viro   fix a fencepost e...
1028
1029
1030
  	i->count -= size;
  	/* ... and discard everything past that point */
  	pipe_truncate(i);
241699cd7   Al Viro   new iov_iter flav...
1031
  }
62a8067a7   Al Viro   bio_vec-backed io...
1032
1033
  void iov_iter_advance(struct iov_iter *i, size_t size)
  {
00e237074   David Howells   iov_iter: Use acc...
1034
  	if (unlikely(iov_iter_is_pipe(i))) {
241699cd7   Al Viro   new iov_iter flav...
1035
1036
1037
  		pipe_advance(i, size);
  		return;
  	}
9ea9ce042   David Howells   iov_iter: Add I/O...
1038
1039
1040
1041
  	if (unlikely(iov_iter_is_discard(i))) {
  		i->count -= size;
  		return;
  	}
a280455fa   Al Viro   iov_iter.c: handl...
1042
  	iterate_and_advance(i, size, v, 0, 0, 0)
62a8067a7   Al Viro   bio_vec-backed io...
1043
1044
  }
  EXPORT_SYMBOL(iov_iter_advance);
27c0e3748   Al Viro   [iov_iter] new pr...
1045
1046
1047
1048
  void iov_iter_revert(struct iov_iter *i, size_t unroll)
  {
  	if (!unroll)
  		return;
5b47d59af   Al Viro   fix braino in gen...
1049
1050
  	if (WARN_ON(unroll > MAX_RW_COUNT))
  		return;
27c0e3748   Al Viro   [iov_iter] new pr...
1051
  	i->count += unroll;
00e237074   David Howells   iov_iter: Use acc...
1052
  	if (unlikely(iov_iter_is_pipe(i))) {
27c0e3748   Al Viro   [iov_iter] new pr...
1053
  		struct pipe_inode_info *pipe = i->pipe;
8cefc107c   David Howells   pipe: Use head an...
1054
1055
  		unsigned int p_mask = pipe->ring_size - 1;
  		unsigned int i_head = i->head;
27c0e3748   Al Viro   [iov_iter] new pr...
1056
1057
  		size_t off = i->iov_offset;
  		while (1) {
8cefc107c   David Howells   pipe: Use head an...
1058
1059
  			struct pipe_buffer *b = &pipe->bufs[i_head & p_mask];
  			size_t n = off - b->offset;
27c0e3748   Al Viro   [iov_iter] new pr...
1060
  			if (unroll < n) {
4fa55cefe   Al Viro   fix a braino in I...
1061
  				off -= unroll;
27c0e3748   Al Viro   [iov_iter] new pr...
1062
1063
1064
  				break;
  			}
  			unroll -= n;
8cefc107c   David Howells   pipe: Use head an...
1065
  			if (!unroll && i_head == i->start_head) {
27c0e3748   Al Viro   [iov_iter] new pr...
1066
1067
1068
  				off = 0;
  				break;
  			}
8cefc107c   David Howells   pipe: Use head an...
1069
1070
1071
  			i_head--;
  			b = &pipe->bufs[i_head & p_mask];
  			off = b->offset + b->len;
27c0e3748   Al Viro   [iov_iter] new pr...
1072
1073
  		}
  		i->iov_offset = off;
8cefc107c   David Howells   pipe: Use head an...
1074
  		i->head = i_head;
27c0e3748   Al Viro   [iov_iter] new pr...
1075
1076
1077
  		pipe_truncate(i);
  		return;
  	}
9ea9ce042   David Howells   iov_iter: Add I/O...
1078
1079
  	if (unlikely(iov_iter_is_discard(i)))
  		return;
27c0e3748   Al Viro   [iov_iter] new pr...
1080
1081
1082
1083
1084
  	if (unroll <= i->iov_offset) {
  		i->iov_offset -= unroll;
  		return;
  	}
  	unroll -= i->iov_offset;
00e237074   David Howells   iov_iter: Use acc...
1085
  	if (iov_iter_is_bvec(i)) {
27c0e3748   Al Viro   [iov_iter] new pr...
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
  		const struct bio_vec *bvec = i->bvec;
  		while (1) {
  			size_t n = (--bvec)->bv_len;
  			i->nr_segs++;
  			if (unroll <= n) {
  				i->bvec = bvec;
  				i->iov_offset = n - unroll;
  				return;
  			}
  			unroll -= n;
  		}
  	} else { /* same logics for iovec and kvec */
  		const struct iovec *iov = i->iov;
  		while (1) {
  			size_t n = (--iov)->iov_len;
  			i->nr_segs++;
  			if (unroll <= n) {
  				i->iov = iov;
  				i->iov_offset = n - unroll;
  				return;
  			}
  			unroll -= n;
  		}
  	}
  }
  EXPORT_SYMBOL(iov_iter_revert);
62a8067a7   Al Viro   bio_vec-backed io...
1112
1113
1114
1115
1116
  /*
   * Return the count of just the current iov_iter segment.
   */
  size_t iov_iter_single_seg_count(const struct iov_iter *i)
  {
00e237074   David Howells   iov_iter: Use acc...
1117
  	if (unlikely(iov_iter_is_pipe(i)))
241699cd7   Al Viro   new iov_iter flav...
1118
  		return i->count;	// it is a silly place, anyway
62a8067a7   Al Viro   bio_vec-backed io...
1119
1120
  	if (i->nr_segs == 1)
  		return i->count;
9ea9ce042   David Howells   iov_iter: Add I/O...
1121
1122
  	if (unlikely(iov_iter_is_discard(i)))
  		return i->count;
00e237074   David Howells   iov_iter: Use acc...
1123
  	else if (iov_iter_is_bvec(i))
62a8067a7   Al Viro   bio_vec-backed io...
1124
  		return min(i->count, i->bvec->bv_len - i->iov_offset);
ad0eab929   Paul Mackerras   Fix thinko in iov...
1125
1126
  	else
  		return min(i->count, i->iov->iov_len - i->iov_offset);
62a8067a7   Al Viro   bio_vec-backed io...
1127
1128
  }
  EXPORT_SYMBOL(iov_iter_single_seg_count);
aa563d7bc   David Howells   iov_iter: Separat...
1129
  void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
05afcb77e   Al Viro   new helper: iov_i...
1130
  			const struct kvec *kvec, unsigned long nr_segs,
abb78f875   Al Viro   new helper: iov_i...
1131
1132
  			size_t count)
  {
aa563d7bc   David Howells   iov_iter: Separat...
1133
1134
  	WARN_ON(direction & ~(READ | WRITE));
  	i->type = ITER_KVEC | (direction & (READ | WRITE));
05afcb77e   Al Viro   new helper: iov_i...
1135
  	i->kvec = kvec;
abb78f875   Al Viro   new helper: iov_i...
1136
1137
1138
1139
1140
  	i->nr_segs = nr_segs;
  	i->iov_offset = 0;
  	i->count = count;
  }
  EXPORT_SYMBOL(iov_iter_kvec);
aa563d7bc   David Howells   iov_iter: Separat...
1141
  void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
05afcb77e   Al Viro   new helper: iov_i...
1142
1143
1144
  			const struct bio_vec *bvec, unsigned long nr_segs,
  			size_t count)
  {
aa563d7bc   David Howells   iov_iter: Separat...
1145
1146
  	WARN_ON(direction & ~(READ | WRITE));
  	i->type = ITER_BVEC | (direction & (READ | WRITE));
05afcb77e   Al Viro   new helper: iov_i...
1147
1148
1149
1150
1151
1152
  	i->bvec = bvec;
  	i->nr_segs = nr_segs;
  	i->iov_offset = 0;
  	i->count = count;
  }
  EXPORT_SYMBOL(iov_iter_bvec);
aa563d7bc   David Howells   iov_iter: Separat...
1153
  void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
241699cd7   Al Viro   new iov_iter flav...
1154
1155
1156
  			struct pipe_inode_info *pipe,
  			size_t count)
  {
aa563d7bc   David Howells   iov_iter: Separat...
1157
  	BUG_ON(direction != READ);
8cefc107c   David Howells   pipe: Use head an...
1158
  	WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size));
aa563d7bc   David Howells   iov_iter: Separat...
1159
  	i->type = ITER_PIPE | READ;
241699cd7   Al Viro   new iov_iter flav...
1160
  	i->pipe = pipe;
8cefc107c   David Howells   pipe: Use head an...
1161
  	i->head = pipe->head;
241699cd7   Al Viro   new iov_iter flav...
1162
1163
  	i->iov_offset = 0;
  	i->count = count;
8cefc107c   David Howells   pipe: Use head an...
1164
  	i->start_head = i->head;
241699cd7   Al Viro   new iov_iter flav...
1165
1166
  }
  EXPORT_SYMBOL(iov_iter_pipe);
9ea9ce042   David Howells   iov_iter: Add I/O...
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
  /**
   * iov_iter_discard - Initialise an I/O iterator that discards data
   * @i: The iterator to initialise.
   * @direction: The direction of the transfer.
   * @count: The size of the I/O buffer in bytes.
   *
   * Set up an I/O iterator that just discards everything that's written to it.
   * It's only available as a READ iterator.
   */
  void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
  {
  	BUG_ON(direction != READ);
  	i->type = ITER_DISCARD | READ;
  	i->count = count;
  	i->iov_offset = 0;
  }
  EXPORT_SYMBOL(iov_iter_discard);
62a8067a7   Al Viro   bio_vec-backed io...
1184
1185
  unsigned long iov_iter_alignment(const struct iov_iter *i)
  {
04a311655   Al Viro   iov_iter.c: macro...
1186
1187
  	unsigned long res = 0;
  	size_t size = i->count;
00e237074   David Howells   iov_iter: Use acc...
1188
  	if (unlikely(iov_iter_is_pipe(i))) {
e0ff126ee   Jan Kara   pipe: Fix bogus d...
1189
  		unsigned int p_mask = i->pipe->ring_size - 1;
8cefc107c   David Howells   pipe: Use head an...
1190
  		if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask]))
241699cd7   Al Viro   new iov_iter flav...
1191
1192
1193
  			return size | i->iov_offset;
  		return size;
  	}
04a311655   Al Viro   iov_iter.c: macro...
1194
1195
  	iterate_all_kinds(i, size, v,
  		(res |= (unsigned long)v.iov_base | v.iov_len, 0),
a280455fa   Al Viro   iov_iter.c: handl...
1196
1197
  		res |= v.bv_offset | v.bv_len,
  		res |= (unsigned long)v.iov_base | v.iov_len
04a311655   Al Viro   iov_iter.c: macro...
1198
1199
  	)
  	return res;
62a8067a7   Al Viro   bio_vec-backed io...
1200
1201
  }
  EXPORT_SYMBOL(iov_iter_alignment);
357f435d8   Al Viro   fix the copy vs. ...
1202
1203
  unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
  {
33844e665   Al Viro   [iov_iter] fix it...
1204
  	unsigned long res = 0;
357f435d8   Al Viro   fix the copy vs. ...
1205
  	size_t size = i->count;
357f435d8   Al Viro   fix the copy vs. ...
1206

9ea9ce042   David Howells   iov_iter: Add I/O...
1207
  	if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
241699cd7   Al Viro   new iov_iter flav...
1208
1209
1210
  		WARN_ON(1);
  		return ~0U;
  	}
357f435d8   Al Viro   fix the copy vs. ...
1211
1212
1213
1214
1215
1216
1217
1218
  	iterate_all_kinds(i, size, v,
  		(res |= (!res ? 0 : (unsigned long)v.iov_base) |
  			(size != v.iov_len ? size : 0), 0),
  		(res |= (!res ? 0 : (unsigned long)v.bv_offset) |
  			(size != v.bv_len ? size : 0)),
  		(res |= (!res ? 0 : (unsigned long)v.iov_base) |
  			(size != v.iov_len ? size : 0))
  		);
33844e665   Al Viro   [iov_iter] fix it...
1219
  	return res;
357f435d8   Al Viro   fix the copy vs. ...
1220
1221
  }
  EXPORT_SYMBOL(iov_iter_gap_alignment);
e76b63123   Ilya Dryomov   iov_iter: fix ret...
1222
  static inline ssize_t __pipe_get_pages(struct iov_iter *i,
241699cd7   Al Viro   new iov_iter flav...
1223
1224
  				size_t maxsize,
  				struct page **pages,
8cefc107c   David Howells   pipe: Use head an...
1225
  				int iter_head,
241699cd7   Al Viro   new iov_iter flav...
1226
1227
1228
  				size_t *start)
  {
  	struct pipe_inode_info *pipe = i->pipe;
8cefc107c   David Howells   pipe: Use head an...
1229
1230
  	unsigned int p_mask = pipe->ring_size - 1;
  	ssize_t n = push_pipe(i, maxsize, &iter_head, start);
241699cd7   Al Viro   new iov_iter flav...
1231
1232
1233
1234
1235
  	if (!n)
  		return -EFAULT;
  
  	maxsize = n;
  	n += *start;
1689c73a7   Al Viro   Fix off-by-one in...
1236
  	while (n > 0) {
8cefc107c   David Howells   pipe: Use head an...
1237
1238
  		get_page(*pages++ = pipe->bufs[iter_head & p_mask].page);
  		iter_head++;
241699cd7   Al Viro   new iov_iter flav...
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
  		n -= PAGE_SIZE;
  	}
  
  	return maxsize;
  }
  
  static ssize_t pipe_get_pages(struct iov_iter *i,
  		   struct page **pages, size_t maxsize, unsigned maxpages,
  		   size_t *start)
  {
8cefc107c   David Howells   pipe: Use head an...
1249
  	unsigned int iter_head, npages;
241699cd7   Al Viro   new iov_iter flav...
1250
  	size_t capacity;
241699cd7   Al Viro   new iov_iter flav...
1251

33844e665   Al Viro   [iov_iter] fix it...
1252
1253
  	if (!maxsize)
  		return 0;
241699cd7   Al Viro   new iov_iter flav...
1254
1255
  	if (!sanity(i))
  		return -EFAULT;
8cefc107c   David Howells   pipe: Use head an...
1256
1257
1258
1259
  	data_start(i, &iter_head, start);
  	/* Amount of free space: some of this one + all after this one */
  	npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
  	capacity = min(npages, maxpages) * PAGE_SIZE - *start;
241699cd7   Al Viro   new iov_iter flav...
1260

8cefc107c   David Howells   pipe: Use head an...
1261
  	return __pipe_get_pages(i, min(maxsize, capacity), pages, iter_head, start);
241699cd7   Al Viro   new iov_iter flav...
1262
  }
62a8067a7   Al Viro   bio_vec-backed io...
1263
  ssize_t iov_iter_get_pages(struct iov_iter *i,
2c80929c4   Miklos Szeredi   fuse: honour max_...
1264
  		   struct page **pages, size_t maxsize, unsigned maxpages,
62a8067a7   Al Viro   bio_vec-backed io...
1265
1266
  		   size_t *start)
  {
e5393fae3   Al Viro   iov_iter.c: conve...
1267
1268
  	if (maxsize > i->count)
  		maxsize = i->count;
00e237074   David Howells   iov_iter: Use acc...
1269
  	if (unlikely(iov_iter_is_pipe(i)))
241699cd7   Al Viro   new iov_iter flav...
1270
  		return pipe_get_pages(i, pages, maxsize, maxpages, start);
9ea9ce042   David Howells   iov_iter: Add I/O...
1271
1272
  	if (unlikely(iov_iter_is_discard(i)))
  		return -EFAULT;
e5393fae3   Al Viro   iov_iter.c: conve...
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
  	iterate_all_kinds(i, maxsize, v, ({
  		unsigned long addr = (unsigned long)v.iov_base;
  		size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
  		int n;
  		int res;
  
  		if (len > maxpages * PAGE_SIZE)
  			len = maxpages * PAGE_SIZE;
  		addr &= ~(PAGE_SIZE - 1);
  		n = DIV_ROUND_UP(len, PAGE_SIZE);
73b0140bf   Ira Weiny   mm/gup: change GU...
1283
1284
1285
  		res = get_user_pages_fast(addr, n,
  				iov_iter_rw(i) != WRITE ?  FOLL_WRITE : 0,
  				pages);
e5393fae3   Al Viro   iov_iter.c: conve...
1286
1287
1288
1289
1290
1291
1292
1293
  		if (unlikely(res < 0))
  			return res;
  		return (res == n ? len : res * PAGE_SIZE) - *start;
  	0;}),({
  		/* can't be more than PAGE_SIZE */
  		*start = v.bv_offset;
  		get_page(*pages = v.bv_page);
  		return v.bv_len;
a280455fa   Al Viro   iov_iter.c: handl...
1294
1295
  	}),({
  		return -EFAULT;
e5393fae3   Al Viro   iov_iter.c: conve...
1296
1297
1298
  	})
  	)
  	return 0;
62a8067a7   Al Viro   bio_vec-backed io...
1299
1300
  }
  EXPORT_SYMBOL(iov_iter_get_pages);
1b17f1f2e   Al Viro   iov_iter.c: conve...
1301
1302
  static struct page **get_pages_array(size_t n)
  {
752ade68c   Michal Hocko   treewide: use kv[...
1303
  	return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
1b17f1f2e   Al Viro   iov_iter.c: conve...
1304
  }
241699cd7   Al Viro   new iov_iter flav...
1305
1306
1307
1308
1309
  static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
  		   struct page ***pages, size_t maxsize,
  		   size_t *start)
  {
  	struct page **p;
8cefc107c   David Howells   pipe: Use head an...
1310
  	unsigned int iter_head, npages;
d7760d638   Ilya Dryomov   iov_iter: fix mem...
1311
  	ssize_t n;
241699cd7   Al Viro   new iov_iter flav...
1312

33844e665   Al Viro   [iov_iter] fix it...
1313
1314
  	if (!maxsize)
  		return 0;
241699cd7   Al Viro   new iov_iter flav...
1315
1316
  	if (!sanity(i))
  		return -EFAULT;
8cefc107c   David Howells   pipe: Use head an...
1317
1318
1319
  	data_start(i, &iter_head, start);
  	/* Amount of free space: some of this one + all after this one */
  	npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
241699cd7   Al Viro   new iov_iter flav...
1320
1321
1322
1323
1324
1325
1326
1327
  	n = npages * PAGE_SIZE - *start;
  	if (maxsize > n)
  		maxsize = n;
  	else
  		npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
  	p = get_pages_array(npages);
  	if (!p)
  		return -ENOMEM;
8cefc107c   David Howells   pipe: Use head an...
1328
  	n = __pipe_get_pages(i, maxsize, p, iter_head, start);
241699cd7   Al Viro   new iov_iter flav...
1329
1330
1331
1332
1333
1334
  	if (n > 0)
  		*pages = p;
  	else
  		kvfree(p);
  	return n;
  }
62a8067a7   Al Viro   bio_vec-backed io...
1335
1336
1337
1338
  ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
  		   struct page ***pages, size_t maxsize,
  		   size_t *start)
  {
1b17f1f2e   Al Viro   iov_iter.c: conve...
1339
1340
1341
1342
  	struct page **p;
  
  	if (maxsize > i->count)
  		maxsize = i->count;
00e237074   David Howells   iov_iter: Use acc...
1343
  	if (unlikely(iov_iter_is_pipe(i)))
241699cd7   Al Viro   new iov_iter flav...
1344
  		return pipe_get_pages_alloc(i, pages, maxsize, start);
9ea9ce042   David Howells   iov_iter: Add I/O...
1345
1346
  	if (unlikely(iov_iter_is_discard(i)))
  		return -EFAULT;
1b17f1f2e   Al Viro   iov_iter.c: conve...
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
  	iterate_all_kinds(i, maxsize, v, ({
  		unsigned long addr = (unsigned long)v.iov_base;
  		size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
  		int n;
  		int res;
  
  		addr &= ~(PAGE_SIZE - 1);
  		n = DIV_ROUND_UP(len, PAGE_SIZE);
  		p = get_pages_array(n);
  		if (!p)
  			return -ENOMEM;
73b0140bf   Ira Weiny   mm/gup: change GU...
1358
1359
  		res = get_user_pages_fast(addr, n,
  				iov_iter_rw(i) != WRITE ?  FOLL_WRITE : 0, p);
1b17f1f2e   Al Viro   iov_iter.c: conve...
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
  		if (unlikely(res < 0)) {
  			kvfree(p);
  			return res;
  		}
  		*pages = p;
  		return (res == n ? len : res * PAGE_SIZE) - *start;
  	0;}),({
  		/* can't be more than PAGE_SIZE */
  		*start = v.bv_offset;
  		*pages = p = get_pages_array(1);
  		if (!p)
  			return -ENOMEM;
  		get_page(*p = v.bv_page);
  		return v.bv_len;
a280455fa   Al Viro   iov_iter.c: handl...
1374
1375
  	}),({
  		return -EFAULT;
1b17f1f2e   Al Viro   iov_iter.c: conve...
1376
1377
1378
  	})
  	)
  	return 0;
62a8067a7   Al Viro   bio_vec-backed io...
1379
1380
  }
  EXPORT_SYMBOL(iov_iter_get_pages_alloc);
a604ec7e9   Al Viro   csum_and_copy_......
1381
1382
1383
1384
1385
1386
  size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
  			       struct iov_iter *i)
  {
  	char *to = addr;
  	__wsum sum, next;
  	size_t off = 0;
a604ec7e9   Al Viro   csum_and_copy_......
1387
  	sum = *csum;
9ea9ce042   David Howells   iov_iter: Add I/O...
1388
  	if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
241699cd7   Al Viro   new iov_iter flav...
1389
1390
1391
  		WARN_ON(1);
  		return 0;
  	}
a604ec7e9   Al Viro   csum_and_copy_......
1392
  	iterate_and_advance(i, bytes, v, ({
cbbd26b8b   Al Viro   [iov_iter] new pr...
1393
  		next = csum_and_copy_from_user(v.iov_base,
a604ec7e9   Al Viro   csum_and_copy_......
1394
  					       (to += v.iov_len) - v.iov_len,
c693cc467   Al Viro   saner calling con...
1395
1396
  					       v.iov_len);
  		if (next) {
a604ec7e9   Al Viro   csum_and_copy_......
1397
1398
1399
  			sum = csum_block_add(sum, next, off);
  			off += v.iov_len;
  		}
c693cc467   Al Viro   saner calling con...
1400
  		next ? 0 : v.iov_len;
a604ec7e9   Al Viro   csum_and_copy_......
1401
1402
  	}), ({
  		char *p = kmap_atomic(v.bv_page);
f91528955   Al Viro   iov_iter: reduce ...
1403
1404
1405
  		sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
  				      p + v.bv_offset, v.bv_len,
  				      sum, off);
a604ec7e9   Al Viro   csum_and_copy_......
1406
  		kunmap_atomic(p);
a604ec7e9   Al Viro   csum_and_copy_......
1407
1408
  		off += v.bv_len;
  	}),({
f91528955   Al Viro   iov_iter: reduce ...
1409
1410
1411
  		sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
  				      v.iov_base, v.iov_len,
  				      sum, off);
a604ec7e9   Al Viro   csum_and_copy_......
1412
1413
1414
1415
1416
1417
1418
  		off += v.iov_len;
  	})
  	)
  	*csum = sum;
  	return bytes;
  }
  EXPORT_SYMBOL(csum_and_copy_from_iter);
cbbd26b8b   Al Viro   [iov_iter] new pr...
1419
1420
1421
1422
1423
1424
1425
  bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
  			       struct iov_iter *i)
  {
  	char *to = addr;
  	__wsum sum, next;
  	size_t off = 0;
  	sum = *csum;
9ea9ce042   David Howells   iov_iter: Add I/O...
1426
  	if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
cbbd26b8b   Al Viro   [iov_iter] new pr...
1427
1428
1429
1430
1431
1432
  		WARN_ON(1);
  		return false;
  	}
  	if (unlikely(i->count < bytes))
  		return false;
  	iterate_all_kinds(i, bytes, v, ({
cbbd26b8b   Al Viro   [iov_iter] new pr...
1433
1434
  		next = csum_and_copy_from_user(v.iov_base,
  					       (to += v.iov_len) - v.iov_len,
c693cc467   Al Viro   saner calling con...
1435
1436
  					       v.iov_len);
  		if (!next)
cbbd26b8b   Al Viro   [iov_iter] new pr...
1437
1438
1439
1440
1441
1442
  			return false;
  		sum = csum_block_add(sum, next, off);
  		off += v.iov_len;
  		0;
  	}), ({
  		char *p = kmap_atomic(v.bv_page);
f91528955   Al Viro   iov_iter: reduce ...
1443
1444
1445
  		sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
  				      p + v.bv_offset, v.bv_len,
  				      sum, off);
cbbd26b8b   Al Viro   [iov_iter] new pr...
1446
  		kunmap_atomic(p);
cbbd26b8b   Al Viro   [iov_iter] new pr...
1447
1448
  		off += v.bv_len;
  	}),({
f91528955   Al Viro   iov_iter: reduce ...
1449
1450
1451
  		sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
  				      v.iov_base, v.iov_len,
  				      sum, off);
cbbd26b8b   Al Viro   [iov_iter] new pr...
1452
1453
1454
1455
1456
1457
1458
1459
  		off += v.iov_len;
  	})
  	)
  	*csum = sum;
  	iov_iter_advance(i, bytes);
  	return true;
  }
  EXPORT_SYMBOL(csum_and_copy_from_iter_full);
cb002d074   Sagi Grimberg   iov_iter: pass vo...
1460
  size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump,
a604ec7e9   Al Viro   csum_and_copy_......
1461
1462
  			     struct iov_iter *i)
  {
36f7a8a4c   Al Viro   iov_iter: constif...
1463
  	const char *from = addr;
cb002d074   Sagi Grimberg   iov_iter: pass vo...
1464
  	__wsum *csum = csump;
a604ec7e9   Al Viro   csum_and_copy_......
1465
1466
  	__wsum sum, next;
  	size_t off = 0;
78e1f3861   Al Viro   iov_iter: teach c...
1467
1468
1469
  
  	if (unlikely(iov_iter_is_pipe(i)))
  		return csum_and_copy_to_pipe_iter(addr, bytes, csum, i);
a604ec7e9   Al Viro   csum_and_copy_......
1470
  	sum = *csum;
78e1f3861   Al Viro   iov_iter: teach c...
1471
  	if (unlikely(iov_iter_is_discard(i))) {
241699cd7   Al Viro   new iov_iter flav...
1472
1473
1474
  		WARN_ON(1);	/* for now */
  		return 0;
  	}
a604ec7e9   Al Viro   csum_and_copy_......
1475
  	iterate_and_advance(i, bytes, v, ({
a604ec7e9   Al Viro   csum_and_copy_......
1476
  		next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
cbbd26b8b   Al Viro   [iov_iter] new pr...
1477
  					     v.iov_base,
c693cc467   Al Viro   saner calling con...
1478
1479
  					     v.iov_len);
  		if (next) {
a604ec7e9   Al Viro   csum_and_copy_......
1480
1481
1482
  			sum = csum_block_add(sum, next, off);
  			off += v.iov_len;
  		}
c693cc467   Al Viro   saner calling con...
1483
  		next ? 0 : v.iov_len;
a604ec7e9   Al Viro   csum_and_copy_......
1484
1485
  	}), ({
  		char *p = kmap_atomic(v.bv_page);
f91528955   Al Viro   iov_iter: reduce ...
1486
1487
1488
  		sum = csum_and_memcpy(p + v.bv_offset,
  				      (from += v.bv_len) - v.bv_len,
  				      v.bv_len, sum, off);
a604ec7e9   Al Viro   csum_and_copy_......
1489
  		kunmap_atomic(p);
a604ec7e9   Al Viro   csum_and_copy_......
1490
1491
  		off += v.bv_len;
  	}),({
f91528955   Al Viro   iov_iter: reduce ...
1492
1493
1494
  		sum = csum_and_memcpy(v.iov_base,
  				     (from += v.iov_len) - v.iov_len,
  				     v.iov_len, sum, off);
a604ec7e9   Al Viro   csum_and_copy_......
1495
1496
1497
1498
1499
1500
1501
  		off += v.iov_len;
  	})
  	)
  	*csum = sum;
  	return bytes;
  }
  EXPORT_SYMBOL(csum_and_copy_to_iter);
d05f44355   Sagi Grimberg   iov_iter: introdu...
1502
1503
1504
  size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
  		struct iov_iter *i)
  {
7999096fa   Herbert Xu   iov_iter: Move un...
1505
  #ifdef CONFIG_CRYPTO_HASH
d05f44355   Sagi Grimberg   iov_iter: introdu...
1506
1507
1508
1509
1510
1511
1512
1513
1514
  	struct ahash_request *hash = hashp;
  	struct scatterlist sg;
  	size_t copied;
  
  	copied = copy_to_iter(addr, bytes, i);
  	sg_init_one(&sg, addr, copied);
  	ahash_request_set_crypt(hash, &sg, NULL, copied);
  	crypto_ahash_update(hash);
  	return copied;
27fad74a5   YueHaibing   iov_iter: Fix bui...
1515
1516
1517
  #else
  	return 0;
  #endif
d05f44355   Sagi Grimberg   iov_iter: introdu...
1518
1519
  }
  EXPORT_SYMBOL(hash_and_copy_to_iter);
62a8067a7   Al Viro   bio_vec-backed io...
1520
1521
  int iov_iter_npages(const struct iov_iter *i, int maxpages)
  {
e0f2dc406   Al Viro   iov_iter.c: conve...
1522
1523
1524
1525
1526
  	size_t size = i->count;
  	int npages = 0;
  
  	if (!size)
  		return 0;
9ea9ce042   David Howells   iov_iter: Add I/O...
1527
1528
  	if (unlikely(iov_iter_is_discard(i)))
  		return 0;
e0f2dc406   Al Viro   iov_iter.c: conve...
1529

00e237074   David Howells   iov_iter: Use acc...
1530
  	if (unlikely(iov_iter_is_pipe(i))) {
241699cd7   Al Viro   new iov_iter flav...
1531
  		struct pipe_inode_info *pipe = i->pipe;
8cefc107c   David Howells   pipe: Use head an...
1532
  		unsigned int iter_head;
241699cd7   Al Viro   new iov_iter flav...
1533
  		size_t off;
241699cd7   Al Viro   new iov_iter flav...
1534
1535
1536
  
  		if (!sanity(i))
  			return 0;
8cefc107c   David Howells   pipe: Use head an...
1537
  		data_start(i, &iter_head, &off);
241699cd7   Al Viro   new iov_iter flav...
1538
  		/* some of this one + all after this one */
8cefc107c   David Howells   pipe: Use head an...
1539
  		npages = pipe_space_for_user(iter_head, pipe->tail, pipe);
241699cd7   Al Viro   new iov_iter flav...
1540
1541
1542
  		if (npages >= maxpages)
  			return maxpages;
  	} else iterate_all_kinds(i, size, v, ({
e0f2dc406   Al Viro   iov_iter.c: conve...
1543
1544
1545
1546
1547
1548
1549
1550
1551
  		unsigned long p = (unsigned long)v.iov_base;
  		npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
  			- p / PAGE_SIZE;
  		if (npages >= maxpages)
  			return maxpages;
  	0;}),({
  		npages++;
  		if (npages >= maxpages)
  			return maxpages;
a280455fa   Al Viro   iov_iter.c: handl...
1552
1553
1554
1555
1556
1557
  	}),({
  		unsigned long p = (unsigned long)v.iov_base;
  		npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
  			- p / PAGE_SIZE;
  		if (npages >= maxpages)
  			return maxpages;
e0f2dc406   Al Viro   iov_iter.c: conve...
1558
1559
1560
  	})
  	)
  	return npages;
62a8067a7   Al Viro   bio_vec-backed io...
1561
  }
f67da30c1   Al Viro   new helper: iov_i...
1562
  EXPORT_SYMBOL(iov_iter_npages);
4b8164b91   Al Viro   new helper: dup_i...
1563
1564
1565
1566
  
  const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
  {
  	*new = *old;
00e237074   David Howells   iov_iter: Use acc...
1567
  	if (unlikely(iov_iter_is_pipe(new))) {
241699cd7   Al Viro   new iov_iter flav...
1568
1569
1570
  		WARN_ON(1);
  		return NULL;
  	}
9ea9ce042   David Howells   iov_iter: Add I/O...
1571
1572
  	if (unlikely(iov_iter_is_discard(new)))
  		return NULL;
00e237074   David Howells   iov_iter: Use acc...
1573
  	if (iov_iter_is_bvec(new))
4b8164b91   Al Viro   new helper: dup_i...
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
  		return new->bvec = kmemdup(new->bvec,
  				    new->nr_segs * sizeof(struct bio_vec),
  				    flags);
  	else
  		/* iovec and kvec have identical layout */
  		return new->iov = kmemdup(new->iov,
  				   new->nr_segs * sizeof(struct iovec),
  				   flags);
  }
  EXPORT_SYMBOL(dup_iter);
bc917be81   Al Viro   saner iov_iter in...
1584

d219a4240   Greg Kroah-Hartman   Revert "Revert "i...
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
  static int copy_compat_iovec_from_user(struct iovec *iov,
  		const struct iovec __user *uvec, unsigned long nr_segs)
  {
  	const struct compat_iovec __user *uiov =
  		(const struct compat_iovec __user *)uvec;
  	int ret = -EFAULT, i;
  
  	if (!user_access_begin(uvec, nr_segs * sizeof(*uvec)))
  		return -EFAULT;
  
  	for (i = 0; i < nr_segs; i++) {
  		compat_uptr_t buf;
  		compat_ssize_t len;
  
  		unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
  		unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
  
  		/* check for compat_size_t not fitting in compat_ssize_t .. */
  		if (len < 0) {
  			ret = -EINVAL;
  			goto uaccess_end;
  		}
  		iov[i].iov_base = compat_ptr(buf);
  		iov[i].iov_len = len;
  	}
  
  	ret = 0;
  uaccess_end:
  	user_access_end();
  	return ret;
  }
  
  static int copy_iovec_from_user(struct iovec *iov,
  		const struct iovec __user *uvec, unsigned long nr_segs)
e1a742255   Greg Kroah-Hartman   Revert "Revert "i...
1619
1620
  {
  	unsigned long seg;
e1a742255   Greg Kroah-Hartman   Revert "Revert "i...
1621

d219a4240   Greg Kroah-Hartman   Revert "Revert "i...
1622
1623
1624
1625
1626
  	if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec)))
  		return -EFAULT;
  	for (seg = 0; seg < nr_segs; seg++) {
  		if ((ssize_t)iov[seg].iov_len < 0)
  			return -EINVAL;
e1a742255   Greg Kroah-Hartman   Revert "Revert "i...
1627
  	}
d219a4240   Greg Kroah-Hartman   Revert "Revert "i...
1628
1629
1630
1631
1632
1633
1634
1635
1636
  	return 0;
  }
  
  struct iovec *iovec_from_user(const struct iovec __user *uvec,
  		unsigned long nr_segs, unsigned long fast_segs,
  		struct iovec *fast_iov, bool compat)
  {
  	struct iovec *iov = fast_iov;
  	int ret;
e1a742255   Greg Kroah-Hartman   Revert "Revert "i...
1637
  	/*
d219a4240   Greg Kroah-Hartman   Revert "Revert "i...
1638
1639
1640
  	 * SuS says "The readv() function *may* fail if the iovcnt argument was
  	 * less than or equal to 0, or greater than {IOV_MAX}.  Linux has
  	 * traditionally returned zero for zero segments, so...
e1a742255   Greg Kroah-Hartman   Revert "Revert "i...
1641
  	 */
d219a4240   Greg Kroah-Hartman   Revert "Revert "i...
1642
1643
1644
1645
  	if (nr_segs == 0)
  		return iov;
  	if (nr_segs > UIO_MAXIOV)
  		return ERR_PTR(-EINVAL);
e1a742255   Greg Kroah-Hartman   Revert "Revert "i...
1646
1647
  	if (nr_segs > fast_segs) {
  		iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
d219a4240   Greg Kroah-Hartman   Revert "Revert "i...
1648
1649
  		if (!iov)
  			return ERR_PTR(-ENOMEM);
e1a742255   Greg Kroah-Hartman   Revert "Revert "i...
1650
  	}
d219a4240   Greg Kroah-Hartman   Revert "Revert "i...
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
  
  	if (compat)
  		ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
  	else
  		ret = copy_iovec_from_user(iov, uvec, nr_segs);
  	if (ret) {
  		if (iov != fast_iov)
  			kfree(iov);
  		return ERR_PTR(ret);
  	}
  
  	return iov;
  }
  
  ssize_t __import_iovec(int type, const struct iovec __user *uvec,
  		 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
  		 struct iov_iter *i, bool compat)
  {
  	ssize_t total_len = 0;
  	unsigned long seg;
  	struct iovec *iov;
  
  	iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
  	if (IS_ERR(iov)) {
  		*iovp = NULL;
  		return PTR_ERR(iov);
e1a742255   Greg Kroah-Hartman   Revert "Revert "i...
1677
1678
1679
  	}
  
  	/*
d219a4240   Greg Kroah-Hartman   Revert "Revert "i...
1680
1681
1682
  	 * According to the Single Unix Specification we should return EINVAL if
  	 * an element length is < 0 when cast to ssize_t or if the total length
  	 * would overflow the ssize_t return value of the system call.
e1a742255   Greg Kroah-Hartman   Revert "Revert "i...
1683
1684
1685
1686
  	 *
  	 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
  	 * overflow case.
  	 */
e1a742255   Greg Kroah-Hartman   Revert "Revert "i...
1687
  	for (seg = 0; seg < nr_segs; seg++) {
e1a742255   Greg Kroah-Hartman   Revert "Revert "i...
1688
  		ssize_t len = (ssize_t)iov[seg].iov_len;
d219a4240   Greg Kroah-Hartman   Revert "Revert "i...
1689
1690
1691
1692
1693
  		if (!access_ok(iov[seg].iov_base, len)) {
  			if (iov != *iovp)
  				kfree(iov);
  			*iovp = NULL;
  			return -EFAULT;
e1a742255   Greg Kroah-Hartman   Revert "Revert "i...
1694
  		}
d219a4240   Greg Kroah-Hartman   Revert "Revert "i...
1695
1696
1697
  
  		if (len > MAX_RW_COUNT - total_len) {
  			len = MAX_RW_COUNT - total_len;
e1a742255   Greg Kroah-Hartman   Revert "Revert "i...
1698
1699
  			iov[seg].iov_len = len;
  		}
d219a4240   Greg Kroah-Hartman   Revert "Revert "i...
1700
  		total_len += len;
e1a742255   Greg Kroah-Hartman   Revert "Revert "i...
1701
  	}
d219a4240   Greg Kroah-Hartman   Revert "Revert "i...
1702
1703
1704
1705
1706
1707
1708
  
  	iov_iter_init(i, type, iov, nr_segs, total_len);
  	if (iov == *iovp)
  		*iovp = NULL;
  	else
  		*iovp = iov;
  	return total_len;
e1a742255   Greg Kroah-Hartman   Revert "Revert "i...
1709
1710
1711
  }
  
  /**
ffecee4f2   Vegard Nossum   iov_iter: kernel-...
1712
1713
1714
1715
1716
   * import_iovec() - Copy an array of &struct iovec from userspace
   *     into the kernel, check that it is valid, and initialize a new
   *     &struct iov_iter iterator to access it.
   *
   * @type: One of %READ or %WRITE.
d219a4240   Greg Kroah-Hartman   Revert "Revert "i...
1717
   * @uvec: Pointer to the userspace array.
ffecee4f2   Vegard Nossum   iov_iter: kernel-...
1718
1719
   * @nr_segs: Number of elements in userspace array.
   * @fast_segs: Number of elements in @iov.
d219a4240   Greg Kroah-Hartman   Revert "Revert "i...
1720
   * @iovp: (input and output parameter) Pointer to pointer to (usually small
ffecee4f2   Vegard Nossum   iov_iter: kernel-...
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
   *     on-stack) kernel array.
   * @i: Pointer to iterator that will be initialized on success.
   *
   * If the array pointed to by *@iov is large enough to hold all @nr_segs,
   * then this function places %NULL in *@iov on return. Otherwise, a new
   * array will be allocated and the result placed in *@iov. This means that
   * the caller may call kfree() on *@iov regardless of whether the small
   * on-stack array was used or not (and regardless of whether this function
   * returns an error or not).
   *
87e5e6dab   Jens Axboe   uio: make import_...
1731
   * Return: Negative error code on error, bytes imported on success
ffecee4f2   Vegard Nossum   iov_iter: kernel-...
1732
   */
d219a4240   Greg Kroah-Hartman   Revert "Revert "i...
1733
  ssize_t import_iovec(int type, const struct iovec __user *uvec,
bc917be81   Al Viro   saner iov_iter in...
1734
  		 unsigned nr_segs, unsigned fast_segs,
d219a4240   Greg Kroah-Hartman   Revert "Revert "i...
1735
  		 struct iovec **iovp, struct iov_iter *i)
bc917be81   Al Viro   saner iov_iter in...
1736
  {
b28ad9eb4   Greg Kroah-Hartman   Revert "Revert "i...
1737
1738
  	return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
  			      in_compat_syscall());
bc917be81   Al Viro   saner iov_iter in...
1739
1740
  }
  EXPORT_SYMBOL(import_iovec);
bc917be81   Al Viro   saner iov_iter in...
1741
1742
1743
1744
1745
  int import_single_range(int rw, void __user *buf, size_t len,
  		 struct iovec *iov, struct iov_iter *i)
  {
  	if (len > MAX_RW_COUNT)
  		len = MAX_RW_COUNT;
96d4f267e   Linus Torvalds   Remove 'type' arg...
1746
  	if (unlikely(!access_ok(buf, len)))
bc917be81   Al Viro   saner iov_iter in...
1747
1748
1749
1750
1751
1752
1753
  		return -EFAULT;
  
  	iov->iov_base = buf;
  	iov->iov_len = len;
  	iov_iter_init(i, rw, iov, 1, len);
  	return 0;
  }
e12675853   Al Viro   iov_iter: export ...
1754
  EXPORT_SYMBOL(import_single_range);
09cf698a5   Al Viro   new primitive: io...
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
  
  int iov_iter_for_each_range(struct iov_iter *i, size_t bytes,
  			    int (*f)(struct kvec *vec, void *context),
  			    void *context)
  {
  	struct kvec w;
  	int err = -EINVAL;
  	if (!bytes)
  		return 0;
  
  	iterate_all_kinds(i, bytes, v, -EINVAL, ({
  		w.iov_base = kmap(v.bv_page) + v.bv_offset;
  		w.iov_len = v.bv_len;
  		err = f(&w, context);
  		kunmap(v.bv_page);
  		err;}), ({
  		w = v;
  		err = f(&w, context);})
  	)
  	return err;
  }
  EXPORT_SYMBOL(iov_iter_for_each_range);