Blame view

lib/iov_iter.c 28.8 KB
4f18cd317   Al Viro   take iov_iter stu...
1
2
3
  #include <linux/export.h>
  #include <linux/uio.h>
  #include <linux/pagemap.h>
91f79c43d   Al Viro   new helper: iov_i...
4
5
  #include <linux/slab.h>
  #include <linux/vmalloc.h>
241699cd7   Al Viro   new iov_iter flav...
6
  #include <linux/splice.h>
a604ec7e9   Al Viro   csum_and_copy_......
7
  #include <net/checksum.h>
4f18cd317   Al Viro   take iov_iter stu...
8

241699cd7   Al Viro   new iov_iter flav...
9
  #define PIPE_PARANOIA /* for now */
04a311655   Al Viro   iov_iter.c: macro...
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
  #define iterate_iovec(i, n, __v, __p, skip, STEP) {	\
  	size_t left;					\
  	size_t wanted = n;				\
  	__p = i->iov;					\
  	__v.iov_len = min(n, __p->iov_len - skip);	\
  	if (likely(__v.iov_len)) {			\
  		__v.iov_base = __p->iov_base + skip;	\
  		left = (STEP);				\
  		__v.iov_len -= left;			\
  		skip += __v.iov_len;			\
  		n -= __v.iov_len;			\
  	} else {					\
  		left = 0;				\
  	}						\
  	while (unlikely(!left && n)) {			\
  		__p++;					\
  		__v.iov_len = min(n, __p->iov_len);	\
  		if (unlikely(!__v.iov_len))		\
  			continue;			\
  		__v.iov_base = __p->iov_base;		\
  		left = (STEP);				\
  		__v.iov_len -= left;			\
  		skip = __v.iov_len;			\
  		n -= __v.iov_len;			\
  	}						\
  	n = wanted - n;					\
  }
a280455fa   Al Viro   iov_iter.c: handl...
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
  #define iterate_kvec(i, n, __v, __p, skip, STEP) {	\
  	size_t wanted = n;				\
  	__p = i->kvec;					\
  	__v.iov_len = min(n, __p->iov_len - skip);	\
  	if (likely(__v.iov_len)) {			\
  		__v.iov_base = __p->iov_base + skip;	\
  		(void)(STEP);				\
  		skip += __v.iov_len;			\
  		n -= __v.iov_len;			\
  	}						\
  	while (unlikely(n)) {				\
  		__p++;					\
  		__v.iov_len = min(n, __p->iov_len);	\
  		if (unlikely(!__v.iov_len))		\
  			continue;			\
  		__v.iov_base = __p->iov_base;		\
  		(void)(STEP);				\
  		skip = __v.iov_len;			\
  		n -= __v.iov_len;			\
  	}						\
  	n = wanted;					\
  }
1bdc76aea   Ming Lei   iov_iter: use bve...
59
60
61
62
63
64
65
  #define iterate_bvec(i, n, __v, __bi, skip, STEP) {	\
  	struct bvec_iter __start;			\
  	__start.bi_size = n;				\
  	__start.bi_bvec_done = skip;			\
  	__start.bi_idx = 0;				\
  	for_each_bvec(__v, i->bvec, __bi, __start) {	\
  		if (!__v.bv_len)			\
04a311655   Al Viro   iov_iter.c: macro...
66
  			continue;			\
04a311655   Al Viro   iov_iter.c: macro...
67
  		(void)(STEP);				\
04a311655   Al Viro   iov_iter.c: macro...
68
  	}						\
04a311655   Al Viro   iov_iter.c: macro...
69
  }
a280455fa   Al Viro   iov_iter.c: handl...
70
  #define iterate_all_kinds(i, n, v, I, B, K) {			\
04a311655   Al Viro   iov_iter.c: macro...
71
72
  	size_t skip = i->iov_offset;				\
  	if (unlikely(i->type & ITER_BVEC)) {			\
04a311655   Al Viro   iov_iter.c: macro...
73
  		struct bio_vec v;				\
1bdc76aea   Ming Lei   iov_iter: use bve...
74
75
  		struct bvec_iter __bi;				\
  		iterate_bvec(i, n, v, __bi, skip, (B))		\
a280455fa   Al Viro   iov_iter.c: handl...
76
77
78
79
  	} else if (unlikely(i->type & ITER_KVEC)) {		\
  		const struct kvec *kvec;			\
  		struct kvec v;					\
  		iterate_kvec(i, n, v, kvec, skip, (K))		\
04a311655   Al Viro   iov_iter.c: macro...
80
81
82
83
84
85
  	} else {						\
  		const struct iovec *iov;			\
  		struct iovec v;					\
  		iterate_iovec(i, n, v, iov, skip, (I))		\
  	}							\
  }
a280455fa   Al Viro   iov_iter.c: handl...
86
  #define iterate_and_advance(i, n, v, I, B, K) {			\
dd254f5a3   Al Viro   fold checks into ...
87
88
  	if (unlikely(i->count < n))				\
  		n = i->count;					\
19f184593   Al Viro   do "fold checks i...
89
  	if (i->count) {						\
dd254f5a3   Al Viro   fold checks into ...
90
91
  		size_t skip = i->iov_offset;			\
  		if (unlikely(i->type & ITER_BVEC)) {		\
1bdc76aea   Ming Lei   iov_iter: use bve...
92
  			const struct bio_vec *bvec = i->bvec;	\
dd254f5a3   Al Viro   fold checks into ...
93
  			struct bio_vec v;			\
1bdc76aea   Ming Lei   iov_iter: use bve...
94
95
96
97
98
  			struct bvec_iter __bi;			\
  			iterate_bvec(i, n, v, __bi, skip, (B))	\
  			i->bvec = __bvec_iter_bvec(i->bvec, __bi);	\
  			i->nr_segs -= i->bvec - bvec;		\
  			skip = __bi.bi_bvec_done;		\
dd254f5a3   Al Viro   fold checks into ...
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
  		} else if (unlikely(i->type & ITER_KVEC)) {	\
  			const struct kvec *kvec;		\
  			struct kvec v;				\
  			iterate_kvec(i, n, v, kvec, skip, (K))	\
  			if (skip == kvec->iov_len) {		\
  				kvec++;				\
  				skip = 0;			\
  			}					\
  			i->nr_segs -= kvec - i->kvec;		\
  			i->kvec = kvec;				\
  		} else {					\
  			const struct iovec *iov;		\
  			struct iovec v;				\
  			iterate_iovec(i, n, v, iov, skip, (I))	\
  			if (skip == iov->iov_len) {		\
  				iov++;				\
  				skip = 0;			\
  			}					\
  			i->nr_segs -= iov - i->iov;		\
  			i->iov = iov;				\
7ce2a91e5   Al Viro   iov_iter.c: itera...
119
  		}						\
dd254f5a3   Al Viro   fold checks into ...
120
121
  		i->count -= n;					\
  		i->iov_offset = skip;				\
7ce2a91e5   Al Viro   iov_iter.c: itera...
122
  	}							\
7ce2a91e5   Al Viro   iov_iter.c: itera...
123
  }
62a8067a7   Al Viro   bio_vec-backed io...
124
  static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
4f18cd317   Al Viro   take iov_iter stu...
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
  			 struct iov_iter *i)
  {
  	size_t skip, copy, left, wanted;
  	const struct iovec *iov;
  	char __user *buf;
  	void *kaddr, *from;
  
  	if (unlikely(bytes > i->count))
  		bytes = i->count;
  
  	if (unlikely(!bytes))
  		return 0;
  
  	wanted = bytes;
  	iov = i->iov;
  	skip = i->iov_offset;
  	buf = iov->iov_base + skip;
  	copy = min(bytes, iov->iov_len - skip);
3fa6c5073   Mikulas Patocka   mm: optimize copy...
143
  	if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
4f18cd317   Al Viro   take iov_iter stu...
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
  		kaddr = kmap_atomic(page);
  		from = kaddr + offset;
  
  		/* first chunk, usually the only one */
  		left = __copy_to_user_inatomic(buf, from, copy);
  		copy -= left;
  		skip += copy;
  		from += copy;
  		bytes -= copy;
  
  		while (unlikely(!left && bytes)) {
  			iov++;
  			buf = iov->iov_base;
  			copy = min(bytes, iov->iov_len);
  			left = __copy_to_user_inatomic(buf, from, copy);
  			copy -= left;
  			skip = copy;
  			from += copy;
  			bytes -= copy;
  		}
  		if (likely(!bytes)) {
  			kunmap_atomic(kaddr);
  			goto done;
  		}
  		offset = from - kaddr;
  		buf += copy;
  		kunmap_atomic(kaddr);
  		copy = min(bytes, iov->iov_len - skip);
  	}
  	/* Too bad - revert to non-atomic kmap */
3fa6c5073   Mikulas Patocka   mm: optimize copy...
174

4f18cd317   Al Viro   take iov_iter stu...
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
  	kaddr = kmap(page);
  	from = kaddr + offset;
  	left = __copy_to_user(buf, from, copy);
  	copy -= left;
  	skip += copy;
  	from += copy;
  	bytes -= copy;
  	while (unlikely(!left && bytes)) {
  		iov++;
  		buf = iov->iov_base;
  		copy = min(bytes, iov->iov_len);
  		left = __copy_to_user(buf, from, copy);
  		copy -= left;
  		skip = copy;
  		from += copy;
  		bytes -= copy;
  	}
  	kunmap(page);
3fa6c5073   Mikulas Patocka   mm: optimize copy...
193

4f18cd317   Al Viro   take iov_iter stu...
194
  done:
81055e584   Al Viro   optimize copy_pag...
195
196
197
198
  	if (skip == iov->iov_len) {
  		iov++;
  		skip = 0;
  	}
4f18cd317   Al Viro   take iov_iter stu...
199
200
201
202
203
204
  	i->count -= wanted - bytes;
  	i->nr_segs -= iov - i->iov;
  	i->iov = iov;
  	i->iov_offset = skip;
  	return wanted - bytes;
  }
4f18cd317   Al Viro   take iov_iter stu...
205

62a8067a7   Al Viro   bio_vec-backed io...
206
  static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
f0d1bec9d   Al Viro   new helper: copy_...
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
  			 struct iov_iter *i)
  {
  	size_t skip, copy, left, wanted;
  	const struct iovec *iov;
  	char __user *buf;
  	void *kaddr, *to;
  
  	if (unlikely(bytes > i->count))
  		bytes = i->count;
  
  	if (unlikely(!bytes))
  		return 0;
  
  	wanted = bytes;
  	iov = i->iov;
  	skip = i->iov_offset;
  	buf = iov->iov_base + skip;
  	copy = min(bytes, iov->iov_len - skip);
3fa6c5073   Mikulas Patocka   mm: optimize copy...
225
  	if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
f0d1bec9d   Al Viro   new helper: copy_...
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
  		kaddr = kmap_atomic(page);
  		to = kaddr + offset;
  
  		/* first chunk, usually the only one */
  		left = __copy_from_user_inatomic(to, buf, copy);
  		copy -= left;
  		skip += copy;
  		to += copy;
  		bytes -= copy;
  
  		while (unlikely(!left && bytes)) {
  			iov++;
  			buf = iov->iov_base;
  			copy = min(bytes, iov->iov_len);
  			left = __copy_from_user_inatomic(to, buf, copy);
  			copy -= left;
  			skip = copy;
  			to += copy;
  			bytes -= copy;
  		}
  		if (likely(!bytes)) {
  			kunmap_atomic(kaddr);
  			goto done;
  		}
  		offset = to - kaddr;
  		buf += copy;
  		kunmap_atomic(kaddr);
  		copy = min(bytes, iov->iov_len - skip);
  	}
  	/* Too bad - revert to non-atomic kmap */
3fa6c5073   Mikulas Patocka   mm: optimize copy...
256

f0d1bec9d   Al Viro   new helper: copy_...
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
  	kaddr = kmap(page);
  	to = kaddr + offset;
  	left = __copy_from_user(to, buf, copy);
  	copy -= left;
  	skip += copy;
  	to += copy;
  	bytes -= copy;
  	while (unlikely(!left && bytes)) {
  		iov++;
  		buf = iov->iov_base;
  		copy = min(bytes, iov->iov_len);
  		left = __copy_from_user(to, buf, copy);
  		copy -= left;
  		skip = copy;
  		to += copy;
  		bytes -= copy;
  	}
  	kunmap(page);
3fa6c5073   Mikulas Patocka   mm: optimize copy...
275

f0d1bec9d   Al Viro   new helper: copy_...
276
  done:
81055e584   Al Viro   optimize copy_pag...
277
278
279
280
  	if (skip == iov->iov_len) {
  		iov++;
  		skip = 0;
  	}
f0d1bec9d   Al Viro   new helper: copy_...
281
282
283
284
285
286
  	i->count -= wanted - bytes;
  	i->nr_segs -= iov - i->iov;
  	i->iov = iov;
  	i->iov_offset = skip;
  	return wanted - bytes;
  }
f0d1bec9d   Al Viro   new helper: copy_...
287

241699cd7   Al Viro   new iov_iter flav...
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
  #ifdef PIPE_PARANOIA
  static bool sanity(const struct iov_iter *i)
  {
  	struct pipe_inode_info *pipe = i->pipe;
  	int idx = i->idx;
  	int next = pipe->curbuf + pipe->nrbufs;
  	if (i->iov_offset) {
  		struct pipe_buffer *p;
  		if (unlikely(!pipe->nrbufs))
  			goto Bad;	// pipe must be non-empty
  		if (unlikely(idx != ((next - 1) & (pipe->buffers - 1))))
  			goto Bad;	// must be at the last buffer...
  
  		p = &pipe->bufs[idx];
  		if (unlikely(p->offset + p->len != i->iov_offset))
  			goto Bad;	// ... at the end of segment
  	} else {
  		if (idx != (next & (pipe->buffers - 1)))
  			goto Bad;	// must be right after the last buffer
  	}
  	return true;
  Bad:
  	printk(KERN_ERR "idx = %d, offset = %zd
  ", i->idx, i->iov_offset);
  	printk(KERN_ERR "curbuf = %d, nrbufs = %d, buffers = %d
  ",
  			pipe->curbuf, pipe->nrbufs, pipe->buffers);
  	for (idx = 0; idx < pipe->buffers; idx++)
  		printk(KERN_ERR "[%p %p %d %d]
  ",
  			pipe->bufs[idx].ops,
  			pipe->bufs[idx].page,
  			pipe->bufs[idx].offset,
  			pipe->bufs[idx].len);
  	WARN_ON(1);
  	return false;
  }
  #else
  #define sanity(i) true
  #endif
  
  static inline int next_idx(int idx, struct pipe_inode_info *pipe)
  {
  	return (idx + 1) & (pipe->buffers - 1);
  }
  
  static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
  			 struct iov_iter *i)
  {
  	struct pipe_inode_info *pipe = i->pipe;
  	struct pipe_buffer *buf;
  	size_t off;
  	int idx;
  
  	if (unlikely(bytes > i->count))
  		bytes = i->count;
  
  	if (unlikely(!bytes))
  		return 0;
  
  	if (!sanity(i))
  		return 0;
  
  	off = i->iov_offset;
  	idx = i->idx;
  	buf = &pipe->bufs[idx];
  	if (off) {
  		if (offset == off && buf->page == page) {
  			/* merge with the last one */
  			buf->len += bytes;
  			i->iov_offset += bytes;
  			goto out;
  		}
  		idx = next_idx(idx, pipe);
  		buf = &pipe->bufs[idx];
  	}
  	if (idx == pipe->curbuf && pipe->nrbufs)
  		return 0;
  	pipe->nrbufs++;
  	buf->ops = &page_cache_pipe_buf_ops;
  	get_page(buf->page = page);
  	buf->offset = offset;
  	buf->len = bytes;
  	i->iov_offset = offset + bytes;
  	i->idx = idx;
  out:
  	i->count -= bytes;
  	return bytes;
  }
4f18cd317   Al Viro   take iov_iter stu...
377
  /*
171a02032   Anton Altaparmakov   VFS: Add iov_iter...
378
379
380
381
382
383
   * Fault in one or more iovecs of the given iov_iter, to a maximum length of
   * bytes.  For each iovec, fault in each page that constitutes the iovec.
   *
   * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
   * because it is an invalid address).
   */
d4690f1e1   Al Viro   fix iov_iter_faul...
384
  int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
171a02032   Anton Altaparmakov   VFS: Add iov_iter...
385
386
387
388
389
390
391
392
  {
  	size_t skip = i->iov_offset;
  	const struct iovec *iov;
  	int err;
  	struct iovec v;
  
  	if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
  		iterate_iovec(i, bytes, v, iov, skip, ({
4bce9f6ee   Al Viro   get rid of separa...
393
  			err = fault_in_pages_readable(v.iov_base, v.iov_len);
171a02032   Anton Altaparmakov   VFS: Add iov_iter...
394
395
396
397
398
399
  			if (unlikely(err))
  			return err;
  		0;}))
  	}
  	return 0;
  }
d4690f1e1   Al Viro   fix iov_iter_faul...
400
  EXPORT_SYMBOL(iov_iter_fault_in_readable);
171a02032   Anton Altaparmakov   VFS: Add iov_iter...
401

71d8e532b   Al Viro   start adding the ...
402
403
404
405
406
  void iov_iter_init(struct iov_iter *i, int direction,
  			const struct iovec *iov, unsigned long nr_segs,
  			size_t count)
  {
  	/* It will get better.  Eventually... */
a280455fa   Al Viro   iov_iter.c: handl...
407
  	if (segment_eq(get_fs(), KERNEL_DS)) {
62a8067a7   Al Viro   bio_vec-backed io...
408
  		direction |= ITER_KVEC;
a280455fa   Al Viro   iov_iter.c: handl...
409
410
411
412
413
414
  		i->type = direction;
  		i->kvec = (struct kvec *)iov;
  	} else {
  		i->type = direction;
  		i->iov = iov;
  	}
71d8e532b   Al Viro   start adding the ...
415
416
417
418
419
  	i->nr_segs = nr_segs;
  	i->iov_offset = 0;
  	i->count = count;
  }
  EXPORT_SYMBOL(iov_iter_init);
7b2c99d15   Al Viro   new helper: iov_i...
420

62a8067a7   Al Viro   bio_vec-backed io...
421
422
423
424
425
426
  static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
  {
  	char *from = kmap_atomic(page);
  	memcpy(to, from + offset, len);
  	kunmap_atomic(from);
  }
36f7a8a4c   Al Viro   iov_iter: constif...
427
  static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
62a8067a7   Al Viro   bio_vec-backed io...
428
429
430
431
432
  {
  	char *to = kmap_atomic(page);
  	memcpy(to + offset, from, len);
  	kunmap_atomic(to);
  }
c35e02480   Matthew Wilcox   Add copy_to_iter(...
433
434
435
436
437
438
  static void memzero_page(struct page *page, size_t offset, size_t len)
  {
  	char *addr = kmap_atomic(page);
  	memset(addr + offset, 0, len);
  	kunmap_atomic(addr);
  }
241699cd7   Al Viro   new iov_iter flav...
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
  static inline bool allocated(struct pipe_buffer *buf)
  {
  	return buf->ops == &default_pipe_buf_ops;
  }
  
  static inline void data_start(const struct iov_iter *i, int *idxp, size_t *offp)
  {
  	size_t off = i->iov_offset;
  	int idx = i->idx;
  	if (off && (!allocated(&i->pipe->bufs[idx]) || off == PAGE_SIZE)) {
  		idx = next_idx(idx, i->pipe);
  		off = 0;
  	}
  	*idxp = idx;
  	*offp = off;
  }
  
  static size_t push_pipe(struct iov_iter *i, size_t size,
  			int *idxp, size_t *offp)
  {
  	struct pipe_inode_info *pipe = i->pipe;
  	size_t off;
  	int idx;
  	ssize_t left;
  
  	if (unlikely(size > i->count))
  		size = i->count;
  	if (unlikely(!size))
  		return 0;
  
  	left = size;
  	data_start(i, &idx, &off);
  	*idxp = idx;
  	*offp = off;
  	if (off) {
  		left -= PAGE_SIZE - off;
  		if (left <= 0) {
  			pipe->bufs[idx].len += size;
  			return size;
  		}
  		pipe->bufs[idx].len = PAGE_SIZE;
  		idx = next_idx(idx, pipe);
  	}
  	while (idx != pipe->curbuf || !pipe->nrbufs) {
  		struct page *page = alloc_page(GFP_USER);
  		if (!page)
  			break;
  		pipe->nrbufs++;
  		pipe->bufs[idx].ops = &default_pipe_buf_ops;
  		pipe->bufs[idx].page = page;
  		pipe->bufs[idx].offset = 0;
  		if (left <= PAGE_SIZE) {
  			pipe->bufs[idx].len = left;
  			return size;
  		}
  		pipe->bufs[idx].len = PAGE_SIZE;
  		left -= PAGE_SIZE;
  		idx = next_idx(idx, pipe);
  	}
  	return size - left;
  }
  
  static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
  				struct iov_iter *i)
  {
  	struct pipe_inode_info *pipe = i->pipe;
  	size_t n, off;
  	int idx;
  
  	if (!sanity(i))
  		return 0;
  
  	bytes = n = push_pipe(i, bytes, &idx, &off);
  	if (unlikely(!n))
  		return 0;
  	for ( ; n; idx = next_idx(idx, pipe), off = 0) {
  		size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
  		memcpy_to_page(pipe->bufs[idx].page, off, addr, chunk);
  		i->idx = idx;
  		i->iov_offset = off + chunk;
  		n -= chunk;
  		addr += chunk;
  	}
  	i->count -= bytes;
  	return bytes;
  }
36f7a8a4c   Al Viro   iov_iter: constif...
525
  size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
62a8067a7   Al Viro   bio_vec-backed io...
526
  {
36f7a8a4c   Al Viro   iov_iter: constif...
527
  	const char *from = addr;
241699cd7   Al Viro   new iov_iter flav...
528
529
  	if (unlikely(i->type & ITER_PIPE))
  		return copy_pipe_to_iter(addr, bytes, i);
3d4d3e482   Al Viro   iov_iter.c: conve...
530
531
532
533
  	iterate_and_advance(i, bytes, v,
  		__copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
  			       v.iov_len),
  		memcpy_to_page(v.bv_page, v.bv_offset,
a280455fa   Al Viro   iov_iter.c: handl...
534
535
  			       (from += v.bv_len) - v.bv_len, v.bv_len),
  		memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
3d4d3e482   Al Viro   iov_iter.c: conve...
536
  	)
62a8067a7   Al Viro   bio_vec-backed io...
537

3d4d3e482   Al Viro   iov_iter.c: conve...
538
  	return bytes;
c35e02480   Matthew Wilcox   Add copy_to_iter(...
539
  }
d271524a3   Al Viro   iov_iter.c: get r...
540
  EXPORT_SYMBOL(copy_to_iter);
c35e02480   Matthew Wilcox   Add copy_to_iter(...
541

d271524a3   Al Viro   iov_iter.c: get r...
542
  size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
c35e02480   Matthew Wilcox   Add copy_to_iter(...
543
  {
0dbca9a4b   Al Viro   iov_iter.c: conve...
544
  	char *to = addr;
241699cd7   Al Viro   new iov_iter flav...
545
546
547
548
  	if (unlikely(i->type & ITER_PIPE)) {
  		WARN_ON(1);
  		return 0;
  	}
0dbca9a4b   Al Viro   iov_iter.c: conve...
549
550
551
552
  	iterate_and_advance(i, bytes, v,
  		__copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
  				 v.iov_len),
  		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
a280455fa   Al Viro   iov_iter.c: handl...
553
554
  				 v.bv_offset, v.bv_len),
  		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
0dbca9a4b   Al Viro   iov_iter.c: conve...
555
556
557
  	)
  
  	return bytes;
c35e02480   Matthew Wilcox   Add copy_to_iter(...
558
  }
d271524a3   Al Viro   iov_iter.c: get r...
559
  EXPORT_SYMBOL(copy_from_iter);
c35e02480   Matthew Wilcox   Add copy_to_iter(...
560

aa583096d   Al Viro   copy_from_iter_no...
561
562
563
  size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
  {
  	char *to = addr;
241699cd7   Al Viro   new iov_iter flav...
564
565
566
567
  	if (unlikely(i->type & ITER_PIPE)) {
  		WARN_ON(1);
  		return 0;
  	}
aa583096d   Al Viro   copy_from_iter_no...
568
569
570
571
572
573
574
575
576
577
578
  	iterate_and_advance(i, bytes, v,
  		__copy_from_user_nocache((to += v.iov_len) - v.iov_len,
  					 v.iov_base, v.iov_len),
  		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
  				 v.bv_offset, v.bv_len),
  		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
  	)
  
  	return bytes;
  }
  EXPORT_SYMBOL(copy_from_iter_nocache);
62a8067a7   Al Viro   bio_vec-backed io...
579
580
581
  size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
  			 struct iov_iter *i)
  {
d271524a3   Al Viro   iov_iter.c: get r...
582
583
584
585
586
  	if (i->type & (ITER_BVEC|ITER_KVEC)) {
  		void *kaddr = kmap_atomic(page);
  		size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
  		kunmap_atomic(kaddr);
  		return wanted;
241699cd7   Al Viro   new iov_iter flav...
587
  	} else if (likely(!(i->type & ITER_PIPE)))
62a8067a7   Al Viro   bio_vec-backed io...
588
  		return copy_page_to_iter_iovec(page, offset, bytes, i);
241699cd7   Al Viro   new iov_iter flav...
589
590
  	else
  		return copy_page_to_iter_pipe(page, offset, bytes, i);
62a8067a7   Al Viro   bio_vec-backed io...
591
592
593
594
595
596
  }
  EXPORT_SYMBOL(copy_page_to_iter);
  
  size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
  			 struct iov_iter *i)
  {
241699cd7   Al Viro   new iov_iter flav...
597
598
599
600
  	if (unlikely(i->type & ITER_PIPE)) {
  		WARN_ON(1);
  		return 0;
  	}
a280455fa   Al Viro   iov_iter.c: handl...
601
  	if (i->type & (ITER_BVEC|ITER_KVEC)) {
d271524a3   Al Viro   iov_iter.c: get r...
602
603
604
605
606
  		void *kaddr = kmap_atomic(page);
  		size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
  		kunmap_atomic(kaddr);
  		return wanted;
  	} else
62a8067a7   Al Viro   bio_vec-backed io...
607
608
609
  		return copy_page_from_iter_iovec(page, offset, bytes, i);
  }
  EXPORT_SYMBOL(copy_page_from_iter);
241699cd7   Al Viro   new iov_iter flav...
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
  static size_t pipe_zero(size_t bytes, struct iov_iter *i)
  {
  	struct pipe_inode_info *pipe = i->pipe;
  	size_t n, off;
  	int idx;
  
  	if (!sanity(i))
  		return 0;
  
  	bytes = n = push_pipe(i, bytes, &idx, &off);
  	if (unlikely(!n))
  		return 0;
  
  	for ( ; n; idx = next_idx(idx, pipe), off = 0) {
  		size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
  		memzero_page(pipe->bufs[idx].page, off, chunk);
  		i->idx = idx;
  		i->iov_offset = off + chunk;
  		n -= chunk;
  	}
  	i->count -= bytes;
  	return bytes;
  }
c35e02480   Matthew Wilcox   Add copy_to_iter(...
633
634
  size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
  {
241699cd7   Al Viro   new iov_iter flav...
635
636
  	if (unlikely(i->type & ITER_PIPE))
  		return pipe_zero(bytes, i);
8442fa46c   Al Viro   iov_iter.c: conve...
637
638
  	iterate_and_advance(i, bytes, v,
  		__clear_user(v.iov_base, v.iov_len),
a280455fa   Al Viro   iov_iter.c: handl...
639
640
  		memzero_page(v.bv_page, v.bv_offset, v.bv_len),
  		memset(v.iov_base, 0, v.iov_len)
8442fa46c   Al Viro   iov_iter.c: conve...
641
642
643
  	)
  
  	return bytes;
c35e02480   Matthew Wilcox   Add copy_to_iter(...
644
645
  }
  EXPORT_SYMBOL(iov_iter_zero);
62a8067a7   Al Viro   bio_vec-backed io...
646
647
648
  size_t iov_iter_copy_from_user_atomic(struct page *page,
  		struct iov_iter *i, unsigned long offset, size_t bytes)
  {
04a311655   Al Viro   iov_iter.c: macro...
649
  	char *kaddr = kmap_atomic(page), *p = kaddr + offset;
241699cd7   Al Viro   new iov_iter flav...
650
651
652
653
654
  	if (unlikely(i->type & ITER_PIPE)) {
  		kunmap_atomic(kaddr);
  		WARN_ON(1);
  		return 0;
  	}
04a311655   Al Viro   iov_iter.c: macro...
655
656
657
658
  	iterate_all_kinds(i, bytes, v,
  		__copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
  					  v.iov_base, v.iov_len),
  		memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
a280455fa   Al Viro   iov_iter.c: handl...
659
660
  				 v.bv_offset, v.bv_len),
  		memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
04a311655   Al Viro   iov_iter.c: macro...
661
662
663
  	)
  	kunmap_atomic(kaddr);
  	return bytes;
62a8067a7   Al Viro   bio_vec-backed io...
664
665
  }
  EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
d06367ac1   Al Viro   fix a fencepost e...
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
  static inline void pipe_truncate(struct iov_iter *i)
  {
  	struct pipe_inode_info *pipe = i->pipe;
  	if (pipe->nrbufs) {
  		size_t off = i->iov_offset;
  		int idx = i->idx;
  		int nrbufs = (idx - pipe->curbuf) & (pipe->buffers - 1);
  		if (off) {
  			pipe->bufs[idx].len = off - pipe->bufs[idx].offset;
  			idx = next_idx(idx, pipe);
  			nrbufs++;
  		}
  		while (pipe->nrbufs > nrbufs) {
  			pipe_buf_release(pipe, &pipe->bufs[idx]);
  			idx = next_idx(idx, pipe);
  			pipe->nrbufs--;
  		}
  	}
  }
241699cd7   Al Viro   new iov_iter flav...
685
686
687
  static void pipe_advance(struct iov_iter *i, size_t size)
  {
  	struct pipe_inode_info *pipe = i->pipe;
241699cd7   Al Viro   new iov_iter flav...
688
689
  	if (unlikely(i->count < size))
  		size = i->count;
241699cd7   Al Viro   new iov_iter flav...
690
  	if (size) {
d06367ac1   Al Viro   fix a fencepost e...
691
692
693
  		struct pipe_buffer *buf;
  		size_t off = i->iov_offset, left = size;
  		int idx = i->idx;
241699cd7   Al Viro   new iov_iter flav...
694
  		if (off) /* make it relative to the beginning of buffer */
d06367ac1   Al Viro   fix a fencepost e...
695
  			left += off - pipe->bufs[idx].offset;
241699cd7   Al Viro   new iov_iter flav...
696
697
  		while (1) {
  			buf = &pipe->bufs[idx];
d06367ac1   Al Viro   fix a fencepost e...
698
  			if (left <= buf->len)
241699cd7   Al Viro   new iov_iter flav...
699
  				break;
d06367ac1   Al Viro   fix a fencepost e...
700
  			left -= buf->len;
241699cd7   Al Viro   new iov_iter flav...
701
702
  			idx = next_idx(idx, pipe);
  		}
241699cd7   Al Viro   new iov_iter flav...
703
  		i->idx = idx;
d06367ac1   Al Viro   fix a fencepost e...
704
  		i->iov_offset = buf->offset + left;
241699cd7   Al Viro   new iov_iter flav...
705
  	}
d06367ac1   Al Viro   fix a fencepost e...
706
707
708
  	i->count -= size;
  	/* ... and discard everything past that point */
  	pipe_truncate(i);
241699cd7   Al Viro   new iov_iter flav...
709
  }
62a8067a7   Al Viro   bio_vec-backed io...
710
711
  void iov_iter_advance(struct iov_iter *i, size_t size)
  {
241699cd7   Al Viro   new iov_iter flav...
712
713
714
715
  	if (unlikely(i->type & ITER_PIPE)) {
  		pipe_advance(i, size);
  		return;
  	}
a280455fa   Al Viro   iov_iter.c: handl...
716
  	iterate_and_advance(i, size, v, 0, 0, 0)
62a8067a7   Al Viro   bio_vec-backed io...
717
718
719
720
721
722
723
724
  }
  EXPORT_SYMBOL(iov_iter_advance);
  
  /*
   * Return the count of just the current iov_iter segment.
   */
  size_t iov_iter_single_seg_count(const struct iov_iter *i)
  {
241699cd7   Al Viro   new iov_iter flav...
725
726
  	if (unlikely(i->type & ITER_PIPE))
  		return i->count;	// it is a silly place, anyway
62a8067a7   Al Viro   bio_vec-backed io...
727
728
729
  	if (i->nr_segs == 1)
  		return i->count;
  	else if (i->type & ITER_BVEC)
62a8067a7   Al Viro   bio_vec-backed io...
730
  		return min(i->count, i->bvec->bv_len - i->iov_offset);
ad0eab929   Paul Mackerras   Fix thinko in iov...
731
732
  	else
  		return min(i->count, i->iov->iov_len - i->iov_offset);
62a8067a7   Al Viro   bio_vec-backed io...
733
734
  }
  EXPORT_SYMBOL(iov_iter_single_seg_count);
abb78f875   Al Viro   new helper: iov_i...
735
  void iov_iter_kvec(struct iov_iter *i, int direction,
05afcb77e   Al Viro   new helper: iov_i...
736
  			const struct kvec *kvec, unsigned long nr_segs,
abb78f875   Al Viro   new helper: iov_i...
737
738
739
740
  			size_t count)
  {
  	BUG_ON(!(direction & ITER_KVEC));
  	i->type = direction;
05afcb77e   Al Viro   new helper: iov_i...
741
  	i->kvec = kvec;
abb78f875   Al Viro   new helper: iov_i...
742
743
744
745
746
  	i->nr_segs = nr_segs;
  	i->iov_offset = 0;
  	i->count = count;
  }
  EXPORT_SYMBOL(iov_iter_kvec);
05afcb77e   Al Viro   new helper: iov_i...
747
748
749
750
751
752
753
754
755
756
757
758
  void iov_iter_bvec(struct iov_iter *i, int direction,
  			const struct bio_vec *bvec, unsigned long nr_segs,
  			size_t count)
  {
  	BUG_ON(!(direction & ITER_BVEC));
  	i->type = direction;
  	i->bvec = bvec;
  	i->nr_segs = nr_segs;
  	i->iov_offset = 0;
  	i->count = count;
  }
  EXPORT_SYMBOL(iov_iter_bvec);
241699cd7   Al Viro   new iov_iter flav...
759
760
761
762
763
  void iov_iter_pipe(struct iov_iter *i, int direction,
  			struct pipe_inode_info *pipe,
  			size_t count)
  {
  	BUG_ON(direction != ITER_PIPE);
d06367ac1   Al Viro   fix a fencepost e...
764
  	WARN_ON(pipe->nrbufs == pipe->buffers);
241699cd7   Al Viro   new iov_iter flav...
765
766
767
768
769
770
771
  	i->type = direction;
  	i->pipe = pipe;
  	i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
  	i->iov_offset = 0;
  	i->count = count;
  }
  EXPORT_SYMBOL(iov_iter_pipe);
62a8067a7   Al Viro   bio_vec-backed io...
772
773
  unsigned long iov_iter_alignment(const struct iov_iter *i)
  {
04a311655   Al Viro   iov_iter.c: macro...
774
775
776
777
778
  	unsigned long res = 0;
  	size_t size = i->count;
  
  	if (!size)
  		return 0;
241699cd7   Al Viro   new iov_iter flav...
779
780
781
782
783
  	if (unlikely(i->type & ITER_PIPE)) {
  		if (i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
  			return size | i->iov_offset;
  		return size;
  	}
04a311655   Al Viro   iov_iter.c: macro...
784
785
  	iterate_all_kinds(i, size, v,
  		(res |= (unsigned long)v.iov_base | v.iov_len, 0),
a280455fa   Al Viro   iov_iter.c: handl...
786
787
  		res |= v.bv_offset | v.bv_len,
  		res |= (unsigned long)v.iov_base | v.iov_len
04a311655   Al Viro   iov_iter.c: macro...
788
789
  	)
  	return res;
62a8067a7   Al Viro   bio_vec-backed io...
790
791
  }
  EXPORT_SYMBOL(iov_iter_alignment);
357f435d8   Al Viro   fix the copy vs. ...
792
793
794
795
796
797
  unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
  {
          unsigned long res = 0;
  	size_t size = i->count;
  	if (!size)
  		return 0;
241699cd7   Al Viro   new iov_iter flav...
798
799
800
801
  	if (unlikely(i->type & ITER_PIPE)) {
  		WARN_ON(1);
  		return ~0U;
  	}
357f435d8   Al Viro   fix the copy vs. ...
802
803
804
805
806
807
808
809
810
811
812
  	iterate_all_kinds(i, size, v,
  		(res |= (!res ? 0 : (unsigned long)v.iov_base) |
  			(size != v.iov_len ? size : 0), 0),
  		(res |= (!res ? 0 : (unsigned long)v.bv_offset) |
  			(size != v.bv_len ? size : 0)),
  		(res |= (!res ? 0 : (unsigned long)v.iov_base) |
  			(size != v.iov_len ? size : 0))
  		);
  		return res;
  }
  EXPORT_SYMBOL(iov_iter_gap_alignment);
241699cd7   Al Viro   new iov_iter flav...
813
814
815
816
817
818
819
  static inline size_t __pipe_get_pages(struct iov_iter *i,
  				size_t maxsize,
  				struct page **pages,
  				int idx,
  				size_t *start)
  {
  	struct pipe_inode_info *pipe = i->pipe;
1689c73a7   Al Viro   Fix off-by-one in...
820
  	ssize_t n = push_pipe(i, maxsize, &idx, start);
241699cd7   Al Viro   new iov_iter flav...
821
822
823
824
825
  	if (!n)
  		return -EFAULT;
  
  	maxsize = n;
  	n += *start;
1689c73a7   Al Viro   Fix off-by-one in...
826
  	while (n > 0) {
241699cd7   Al Viro   new iov_iter flav...
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
  		get_page(*pages++ = pipe->bufs[idx].page);
  		idx = next_idx(idx, pipe);
  		n -= PAGE_SIZE;
  	}
  
  	return maxsize;
  }
  
  static ssize_t pipe_get_pages(struct iov_iter *i,
  		   struct page **pages, size_t maxsize, unsigned maxpages,
  		   size_t *start)
  {
  	unsigned npages;
  	size_t capacity;
  	int idx;
  
  	if (!sanity(i))
  		return -EFAULT;
  
  	data_start(i, &idx, start);
  	/* some of this one + all after this one */
  	npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
  	capacity = min(npages,maxpages) * PAGE_SIZE - *start;
  
  	return __pipe_get_pages(i, min(maxsize, capacity), pages, idx, start);
  }
62a8067a7   Al Viro   bio_vec-backed io...
853
  ssize_t iov_iter_get_pages(struct iov_iter *i,
2c80929c4   Miklos Szeredi   fuse: honour max_...
854
  		   struct page **pages, size_t maxsize, unsigned maxpages,
62a8067a7   Al Viro   bio_vec-backed io...
855
856
  		   size_t *start)
  {
e5393fae3   Al Viro   iov_iter.c: conve...
857
858
859
860
861
  	if (maxsize > i->count)
  		maxsize = i->count;
  
  	if (!maxsize)
  		return 0;
241699cd7   Al Viro   new iov_iter flav...
862
863
  	if (unlikely(i->type & ITER_PIPE))
  		return pipe_get_pages(i, pages, maxsize, maxpages, start);
e5393fae3   Al Viro   iov_iter.c: conve...
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
  	iterate_all_kinds(i, maxsize, v, ({
  		unsigned long addr = (unsigned long)v.iov_base;
  		size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
  		int n;
  		int res;
  
  		if (len > maxpages * PAGE_SIZE)
  			len = maxpages * PAGE_SIZE;
  		addr &= ~(PAGE_SIZE - 1);
  		n = DIV_ROUND_UP(len, PAGE_SIZE);
  		res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
  		if (unlikely(res < 0))
  			return res;
  		return (res == n ? len : res * PAGE_SIZE) - *start;
  	0;}),({
  		/* can't be more than PAGE_SIZE */
  		*start = v.bv_offset;
  		get_page(*pages = v.bv_page);
  		return v.bv_len;
a280455fa   Al Viro   iov_iter.c: handl...
883
884
  	}),({
  		return -EFAULT;
e5393fae3   Al Viro   iov_iter.c: conve...
885
886
887
  	})
  	)
  	return 0;
62a8067a7   Al Viro   bio_vec-backed io...
888
889
  }
  EXPORT_SYMBOL(iov_iter_get_pages);
1b17f1f2e   Al Viro   iov_iter.c: conve...
890
891
892
893
894
895
896
  static struct page **get_pages_array(size_t n)
  {
  	struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
  	if (!p)
  		p = vmalloc(n * sizeof(struct page *));
  	return p;
  }
241699cd7   Al Viro   new iov_iter flav...
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
  static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
  		   struct page ***pages, size_t maxsize,
  		   size_t *start)
  {
  	struct page **p;
  	size_t n;
  	int idx;
  	int npages;
  
  	if (!sanity(i))
  		return -EFAULT;
  
  	data_start(i, &idx, start);
  	/* some of this one + all after this one */
  	npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
  	n = npages * PAGE_SIZE - *start;
  	if (maxsize > n)
  		maxsize = n;
  	else
  		npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
  	p = get_pages_array(npages);
  	if (!p)
  		return -ENOMEM;
  	n = __pipe_get_pages(i, maxsize, p, idx, start);
  	if (n > 0)
  		*pages = p;
  	else
  		kvfree(p);
  	return n;
  }
62a8067a7   Al Viro   bio_vec-backed io...
927
928
929
930
  ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
  		   struct page ***pages, size_t maxsize,
  		   size_t *start)
  {
1b17f1f2e   Al Viro   iov_iter.c: conve...
931
932
933
934
935
936
937
  	struct page **p;
  
  	if (maxsize > i->count)
  		maxsize = i->count;
  
  	if (!maxsize)
  		return 0;
241699cd7   Al Viro   new iov_iter flav...
938
939
  	if (unlikely(i->type & ITER_PIPE))
  		return pipe_get_pages_alloc(i, pages, maxsize, start);
1b17f1f2e   Al Viro   iov_iter.c: conve...
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
  	iterate_all_kinds(i, maxsize, v, ({
  		unsigned long addr = (unsigned long)v.iov_base;
  		size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
  		int n;
  		int res;
  
  		addr &= ~(PAGE_SIZE - 1);
  		n = DIV_ROUND_UP(len, PAGE_SIZE);
  		p = get_pages_array(n);
  		if (!p)
  			return -ENOMEM;
  		res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
  		if (unlikely(res < 0)) {
  			kvfree(p);
  			return res;
  		}
  		*pages = p;
  		return (res == n ? len : res * PAGE_SIZE) - *start;
  	0;}),({
  		/* can't be more than PAGE_SIZE */
  		*start = v.bv_offset;
  		*pages = p = get_pages_array(1);
  		if (!p)
  			return -ENOMEM;
  		get_page(*p = v.bv_page);
  		return v.bv_len;
a280455fa   Al Viro   iov_iter.c: handl...
966
967
  	}),({
  		return -EFAULT;
1b17f1f2e   Al Viro   iov_iter.c: conve...
968
969
970
  	})
  	)
  	return 0;
62a8067a7   Al Viro   bio_vec-backed io...
971
972
  }
  EXPORT_SYMBOL(iov_iter_get_pages_alloc);
a604ec7e9   Al Viro   csum_and_copy_......
973
974
975
976
977
978
  size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
  			       struct iov_iter *i)
  {
  	char *to = addr;
  	__wsum sum, next;
  	size_t off = 0;
a604ec7e9   Al Viro   csum_and_copy_......
979
  	sum = *csum;
241699cd7   Al Viro   new iov_iter flav...
980
981
982
983
  	if (unlikely(i->type & ITER_PIPE)) {
  		WARN_ON(1);
  		return 0;
  	}
a604ec7e9   Al Viro   csum_and_copy_......
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
  	iterate_and_advance(i, bytes, v, ({
  		int err = 0;
  		next = csum_and_copy_from_user(v.iov_base, 
  					       (to += v.iov_len) - v.iov_len,
  					       v.iov_len, 0, &err);
  		if (!err) {
  			sum = csum_block_add(sum, next, off);
  			off += v.iov_len;
  		}
  		err ? v.iov_len : 0;
  	}), ({
  		char *p = kmap_atomic(v.bv_page);
  		next = csum_partial_copy_nocheck(p + v.bv_offset,
  						 (to += v.bv_len) - v.bv_len,
  						 v.bv_len, 0);
  		kunmap_atomic(p);
  		sum = csum_block_add(sum, next, off);
  		off += v.bv_len;
  	}),({
  		next = csum_partial_copy_nocheck(v.iov_base,
  						 (to += v.iov_len) - v.iov_len,
  						 v.iov_len, 0);
  		sum = csum_block_add(sum, next, off);
  		off += v.iov_len;
  	})
  	)
  	*csum = sum;
  	return bytes;
  }
  EXPORT_SYMBOL(csum_and_copy_from_iter);
36f7a8a4c   Al Viro   iov_iter: constif...
1014
  size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
a604ec7e9   Al Viro   csum_and_copy_......
1015
1016
  			     struct iov_iter *i)
  {
36f7a8a4c   Al Viro   iov_iter: constif...
1017
  	const char *from = addr;
a604ec7e9   Al Viro   csum_and_copy_......
1018
1019
  	__wsum sum, next;
  	size_t off = 0;
a604ec7e9   Al Viro   csum_and_copy_......
1020
  	sum = *csum;
241699cd7   Al Viro   new iov_iter flav...
1021
1022
1023
1024
  	if (unlikely(i->type & ITER_PIPE)) {
  		WARN_ON(1);	/* for now */
  		return 0;
  	}
a604ec7e9   Al Viro   csum_and_copy_......
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
  	iterate_and_advance(i, bytes, v, ({
  		int err = 0;
  		next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
  					     v.iov_base, 
  					     v.iov_len, 0, &err);
  		if (!err) {
  			sum = csum_block_add(sum, next, off);
  			off += v.iov_len;
  		}
  		err ? v.iov_len : 0;
  	}), ({
  		char *p = kmap_atomic(v.bv_page);
  		next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
  						 p + v.bv_offset,
  						 v.bv_len, 0);
  		kunmap_atomic(p);
  		sum = csum_block_add(sum, next, off);
  		off += v.bv_len;
  	}),({
  		next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
  						 v.iov_base,
  						 v.iov_len, 0);
  		sum = csum_block_add(sum, next, off);
  		off += v.iov_len;
  	})
  	)
  	*csum = sum;
  	return bytes;
  }
  EXPORT_SYMBOL(csum_and_copy_to_iter);
62a8067a7   Al Viro   bio_vec-backed io...
1055
1056
  int iov_iter_npages(const struct iov_iter *i, int maxpages)
  {
e0f2dc406   Al Viro   iov_iter.c: conve...
1057
1058
1059
1060
1061
  	size_t size = i->count;
  	int npages = 0;
  
  	if (!size)
  		return 0;
241699cd7   Al Viro   new iov_iter flav...
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
  	if (unlikely(i->type & ITER_PIPE)) {
  		struct pipe_inode_info *pipe = i->pipe;
  		size_t off;
  		int idx;
  
  		if (!sanity(i))
  			return 0;
  
  		data_start(i, &idx, &off);
  		/* some of this one + all after this one */
  		npages = ((pipe->curbuf - idx - 1) & (pipe->buffers - 1)) + 1;
  		if (npages >= maxpages)
  			return maxpages;
  	} else iterate_all_kinds(i, size, v, ({
e0f2dc406   Al Viro   iov_iter.c: conve...
1076
1077
1078
1079
1080
1081
1082
1083
1084
  		unsigned long p = (unsigned long)v.iov_base;
  		npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
  			- p / PAGE_SIZE;
  		if (npages >= maxpages)
  			return maxpages;
  	0;}),({
  		npages++;
  		if (npages >= maxpages)
  			return maxpages;
a280455fa   Al Viro   iov_iter.c: handl...
1085
1086
1087
1088
1089
1090
  	}),({
  		unsigned long p = (unsigned long)v.iov_base;
  		npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
  			- p / PAGE_SIZE;
  		if (npages >= maxpages)
  			return maxpages;
e0f2dc406   Al Viro   iov_iter.c: conve...
1091
1092
1093
  	})
  	)
  	return npages;
62a8067a7   Al Viro   bio_vec-backed io...
1094
  }
f67da30c1   Al Viro   new helper: iov_i...
1095
  EXPORT_SYMBOL(iov_iter_npages);
4b8164b91   Al Viro   new helper: dup_i...
1096
1097
1098
1099
  
  const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
  {
  	*new = *old;
241699cd7   Al Viro   new iov_iter flav...
1100
1101
1102
1103
  	if (unlikely(new->type & ITER_PIPE)) {
  		WARN_ON(1);
  		return NULL;
  	}
4b8164b91   Al Viro   new helper: dup_i...
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
  	if (new->type & ITER_BVEC)
  		return new->bvec = kmemdup(new->bvec,
  				    new->nr_segs * sizeof(struct bio_vec),
  				    flags);
  	else
  		/* iovec and kvec have identical layout */
  		return new->iov = kmemdup(new->iov,
  				   new->nr_segs * sizeof(struct iovec),
  				   flags);
  }
  EXPORT_SYMBOL(dup_iter);
bc917be81   Al Viro   saner iov_iter in...
1115

ffecee4f2   Vegard Nossum   iov_iter: kernel-...
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
  /**
   * import_iovec() - Copy an array of &struct iovec from userspace
   *     into the kernel, check that it is valid, and initialize a new
   *     &struct iov_iter iterator to access it.
   *
   * @type: One of %READ or %WRITE.
   * @uvector: Pointer to the userspace array.
   * @nr_segs: Number of elements in userspace array.
   * @fast_segs: Number of elements in @iov.
   * @iov: (input and output parameter) Pointer to pointer to (usually small
   *     on-stack) kernel array.
   * @i: Pointer to iterator that will be initialized on success.
   *
   * If the array pointed to by *@iov is large enough to hold all @nr_segs,
   * then this function places %NULL in *@iov on return. Otherwise, a new
   * array will be allocated and the result placed in *@iov. This means that
   * the caller may call kfree() on *@iov regardless of whether the small
   * on-stack array was used or not (and regardless of whether this function
   * returns an error or not).
   *
   * Return: 0 on success or negative error code on error.
   */
bc917be81   Al Viro   saner iov_iter in...
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
  int import_iovec(int type, const struct iovec __user * uvector,
  		 unsigned nr_segs, unsigned fast_segs,
  		 struct iovec **iov, struct iov_iter *i)
  {
  	ssize_t n;
  	struct iovec *p;
  	n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
  				  *iov, &p);
  	if (n < 0) {
  		if (p != *iov)
  			kfree(p);
  		*iov = NULL;
  		return n;
  	}
  	iov_iter_init(i, type, p, nr_segs, n);
  	*iov = p == *iov ? NULL : p;
  	return 0;
  }
  EXPORT_SYMBOL(import_iovec);
  
  #ifdef CONFIG_COMPAT
  #include <linux/compat.h>
  
  int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
  		 unsigned nr_segs, unsigned fast_segs,
  		 struct iovec **iov, struct iov_iter *i)
  {
  	ssize_t n;
  	struct iovec *p;
  	n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
  				  *iov, &p);
  	if (n < 0) {
  		if (p != *iov)
  			kfree(p);
  		*iov = NULL;
  		return n;
  	}
  	iov_iter_init(i, type, p, nr_segs, n);
  	*iov = p == *iov ? NULL : p;
  	return 0;
  }
  #endif
  
  int import_single_range(int rw, void __user *buf, size_t len,
  		 struct iovec *iov, struct iov_iter *i)
  {
  	if (len > MAX_RW_COUNT)
  		len = MAX_RW_COUNT;
  	if (unlikely(!access_ok(!rw, buf, len)))
  		return -EFAULT;
  
  	iov->iov_base = buf;
  	iov->iov_len = len;
  	iov_iter_init(i, rw, iov, 1, len);
  	return 0;
  }
e12675853   Al Viro   iov_iter: export ...
1194
  EXPORT_SYMBOL(import_single_range);