Blame view

fs/ext4/file.c 23 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
ac27a0ec1   Dave Kleikamp   [PATCH] ext4: ini...
2
  /*
617ba13b3   Mingming Cao   [PATCH] ext4: ren...
3
   *  linux/fs/ext4/file.c
ac27a0ec1   Dave Kleikamp   [PATCH] ext4: ini...
4
5
6
7
8
9
10
11
12
13
14
15
   *
   * Copyright (C) 1992, 1993, 1994, 1995
   * Remy Card (card@masi.ibp.fr)
   * Laboratoire MASI - Institut Blaise Pascal
   * Universite Pierre et Marie Curie (Paris VI)
   *
   *  from
   *
   *  linux/fs/minix/file.c
   *
   *  Copyright (C) 1991, 1992  Linus Torvalds
   *
617ba13b3   Mingming Cao   [PATCH] ext4: ren...
16
   *  ext4 fs regular file handling primitives
ac27a0ec1   Dave Kleikamp   [PATCH] ext4: ini...
17
18
19
20
21
22
23
   *
   *  64-bit file support on 64-bit platforms by Jakub Jelinek
   *	(jj@sunsite.ms.mff.cuni.cz)
   */
  
  #include <linux/time.h>
  #include <linux/fs.h>
545052e9e   Christoph Hellwig   ext4: Switch to i...
24
  #include <linux/iomap.h>
bc0b0d6d6   Theodore Ts'o   ext4: update the ...
25
26
  #include <linux/mount.h>
  #include <linux/path.h>
c94c2acf8   Matthew Wilcox   dax: move DAX-rel...
27
  #include <linux/dax.h>
871a29315   Christoph Hellwig   dquot: cleanup dq...
28
  #include <linux/quotaops.h>
c8c0df241   Zheng Liu   ext4: introduce l...
29
  #include <linux/pagevec.h>
e2e40f2c1   Christoph Hellwig   fs: move struct k...
30
  #include <linux/uio.h>
b8a6176c2   Jan Kara   ext4: Support for...
31
  #include <linux/mman.h>
378f32bab   Matthew Bobrowski   ext4: introduce d...
32
  #include <linux/backing-dev.h>
3dcf54515   Christoph Hellwig   ext4: move header...
33
34
  #include "ext4.h"
  #include "ext4_jbd2.h"
ac27a0ec1   Dave Kleikamp   [PATCH] ext4: ini...
35
36
  #include "xattr.h"
  #include "acl.h"
569342dc2   Matthew Bobrowski   ext4: move inode ...
37
  #include "truncate.h"
ac27a0ec1   Dave Kleikamp   [PATCH] ext4: ini...
38

141f59b91   Eric Biggers   ANDROID: ext4, f2...
39
  static bool ext4_dio_supported(struct kiocb *iocb, struct iov_iter *iter)
b1b4705d5   Matthew Bobrowski   ext4: introduce d...
40
  {
141f59b91   Eric Biggers   ANDROID: ext4, f2...
41
  	struct inode *inode = file_inode(iocb->ki_filp);
8d6c90c9d   Eric Biggers   ANDROID: fscrypt:...
42
43
  	if (!fscrypt_dio_supported(iocb, iter))
  		return false;
b1b4705d5   Matthew Bobrowski   ext4: introduce d...
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
  	if (fsverity_active(inode))
  		return false;
  	if (ext4_should_journal_data(inode))
  		return false;
  	if (ext4_has_inline_data(inode))
  		return false;
  	return true;
  }
  
  static ssize_t ext4_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
  {
  	ssize_t ret;
  	struct inode *inode = file_inode(iocb->ki_filp);
  
  	if (iocb->ki_flags & IOCB_NOWAIT) {
  		if (!inode_trylock_shared(inode))
  			return -EAGAIN;
  	} else {
  		inode_lock_shared(inode);
  	}
141f59b91   Eric Biggers   ANDROID: ext4, f2...
64
  	if (!ext4_dio_supported(iocb, to)) {
b1b4705d5   Matthew Bobrowski   ext4: introduce d...
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
  		inode_unlock_shared(inode);
  		/*
  		 * Fallback to buffered I/O if the operation being performed on
  		 * the inode is not supported by direct I/O. The IOCB_DIRECT
  		 * flag needs to be cleared here in order to ensure that the
  		 * direct I/O path within generic_file_read_iter() is not
  		 * taken.
  		 */
  		iocb->ki_flags &= ~IOCB_DIRECT;
  		return generic_file_read_iter(iocb, to);
  	}
  
  	ret = iomap_dio_rw(iocb, to, &ext4_iomap_ops, NULL,
  			   is_sync_kiocb(iocb));
  	inode_unlock_shared(inode);
  
  	file_accessed(iocb->ki_filp);
  	return ret;
  }
364443cbc   Jan Kara   ext4: convert DAX...
84
85
86
87
88
  #ifdef CONFIG_FS_DAX
  static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
  {
  	struct inode *inode = file_inode(iocb->ki_filp);
  	ssize_t ret;
f629afe33   Ritesh Harjani   ext4: fix ext4_da...
89
90
  	if (iocb->ki_flags & IOCB_NOWAIT) {
  		if (!inode_trylock_shared(inode))
728fbc0e1   Goldwyn Rodrigues   ext4: nowait aio ...
91
  			return -EAGAIN;
f629afe33   Ritesh Harjani   ext4: fix ext4_da...
92
  	} else {
728fbc0e1   Goldwyn Rodrigues   ext4: nowait aio ...
93
94
  		inode_lock_shared(inode);
  	}
364443cbc   Jan Kara   ext4: convert DAX...
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
  	/*
  	 * Recheck under inode lock - at this point we are sure it cannot
  	 * change anymore
  	 */
  	if (!IS_DAX(inode)) {
  		inode_unlock_shared(inode);
  		/* Fallback to buffered IO in case we cannot support DAX */
  		return generic_file_read_iter(iocb, to);
  	}
  	ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops);
  	inode_unlock_shared(inode);
  
  	file_accessed(iocb->ki_filp);
  	return ret;
  }
  #endif
  
  static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
  {
b1b4705d5   Matthew Bobrowski   ext4: introduce d...
114
115
116
  	struct inode *inode = file_inode(iocb->ki_filp);
  
  	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
0db1ff222   Theodore Ts'o   ext4: add shutdow...
117
  		return -EIO;
364443cbc   Jan Kara   ext4: convert DAX...
118
119
120
121
  	if (!iov_iter_count(to))
  		return 0; /* skip atime */
  
  #ifdef CONFIG_FS_DAX
b1b4705d5   Matthew Bobrowski   ext4: introduce d...
122
  	if (IS_DAX(inode))
364443cbc   Jan Kara   ext4: convert DAX...
123
124
  		return ext4_dax_read_iter(iocb, to);
  #endif
b1b4705d5   Matthew Bobrowski   ext4: introduce d...
125
126
  	if (iocb->ki_flags & IOCB_DIRECT)
  		return ext4_dio_read_iter(iocb, to);
364443cbc   Jan Kara   ext4: convert DAX...
127
128
  	return generic_file_read_iter(iocb, to);
  }
ac27a0ec1   Dave Kleikamp   [PATCH] ext4: ini...
129
130
  /*
   * Called when an inode is released. Note that this is different
617ba13b3   Mingming Cao   [PATCH] ext4: ren...
131
   * from ext4_file_open: open gets called at every open, but release
ac27a0ec1   Dave Kleikamp   [PATCH] ext4: ini...
132
133
   * gets called only when /all/ the files are closed.
   */
af5bc92dd   Theodore Ts'o   ext4: Fix whitesp...
134
  static int ext4_release_file(struct inode *inode, struct file *filp)
ac27a0ec1   Dave Kleikamp   [PATCH] ext4: ini...
135
  {
19f5fb7ad   Theodore Ts'o   ext4: Use bitops ...
136
  	if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
7d8f9f7d1   Theodore Ts'o   ext4: Automatical...
137
  		ext4_alloc_da_blocks(inode);
19f5fb7ad   Theodore Ts'o   ext4: Use bitops ...
138
  		ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
7d8f9f7d1   Theodore Ts'o   ext4: Automatical...
139
  	}
ac27a0ec1   Dave Kleikamp   [PATCH] ext4: ini...
140
141
  	/* if we are the last writer on the inode, drop the block reservation */
  	if ((filp->f_mode & FMODE_WRITE) &&
d6014301b   Aneesh Kumar K.V   ext4: Fix discard...
142
  			(atomic_read(&inode->i_writecount) == 1) &&
e030a2881   Dio Putra   ext4: fix coding ...
143
  			!EXT4_I(inode)->i_reserved_data_blocks) {
0e855ac8b   Aneesh Kumar K.V   ext4: Convert tru...
144
  		down_write(&EXT4_I(inode)->i_data_sem);
27bc446e2   brookxu   ext4: limit the l...
145
  		ext4_discard_preallocations(inode, 0);
0e855ac8b   Aneesh Kumar K.V   ext4: Convert tru...
146
  		up_write(&EXT4_I(inode)->i_data_sem);
ac27a0ec1   Dave Kleikamp   [PATCH] ext4: ini...
147
148
  	}
  	if (is_dx(inode) && filp->private_data)
617ba13b3   Mingming Cao   [PATCH] ext4: ren...
149
  		ext4_htree_free_dir_info(filp->private_data);
ac27a0ec1   Dave Kleikamp   [PATCH] ext4: ini...
150
151
152
  
  	return 0;
  }
e9e3bcecf   Eric Sandeen   ext4: serialize u...
153
154
155
156
157
158
159
160
161
  /*
   * This tests whether the IO in question is block-aligned or not.
   * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
   * are converted to written only after the IO is complete.  Until they are
   * mapped, these blocks appear as holes, so dio_zero_block() will assume that
   * it needs to zero out portions of the start and/or end block.  If 2 AIO
   * threads are at work on the same unwritten block, they must be synchronized
   * or one thread will zero the other's data, causing corruption.
   */
aa9714d0e   Ritesh Harjani   ext4: Start with ...
162
163
  static bool
  ext4_unaligned_io(struct inode *inode, struct iov_iter *from, loff_t pos)
e9e3bcecf   Eric Sandeen   ext4: serialize u...
164
165
  {
  	struct super_block *sb = inode->i_sb;
aa9714d0e   Ritesh Harjani   ext4: Start with ...
166
  	unsigned long blockmask = sb->s_blocksize - 1;
e9e3bcecf   Eric Sandeen   ext4: serialize u...
167

9b884164d   Al Viro   convert ext4 to -...
168
  	if ((pos | iov_iter_alignment(from)) & blockmask)
aa9714d0e   Ritesh Harjani   ext4: Start with ...
169
  		return true;
e9e3bcecf   Eric Sandeen   ext4: serialize u...
170

aa9714d0e   Ritesh Harjani   ext4: Start with ...
171
172
173
174
175
176
177
178
179
180
  	return false;
  }
  
  static bool
  ext4_extending_io(struct inode *inode, loff_t offset, size_t len)
  {
  	if (offset + len > i_size_read(inode) ||
  	    offset + len > EXT4_I(inode)->i_disksize)
  		return true;
  	return false;
e9e3bcecf   Eric Sandeen   ext4: serialize u...
181
  }
213bcd9cc   Jan Kara   ext4: factor out ...
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
  /* Is IO overwriting allocated and initialized blocks? */
  static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len)
  {
  	struct ext4_map_blocks map;
  	unsigned int blkbits = inode->i_blkbits;
  	int err, blklen;
  
  	if (pos + len > i_size_read(inode))
  		return false;
  
  	map.m_lblk = pos >> blkbits;
  	map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits);
  	blklen = map.m_len;
  
  	err = ext4_map_blocks(NULL, inode, &map, 0);
  	/*
  	 * 'err==len' means that all of the blocks have been preallocated,
  	 * regardless of whether they have been initialized or not. To exclude
  	 * unwritten extents, we need to check m_flags.
  	 */
  	return err == blklen && (map.m_flags & EXT4_MAP_MAPPED);
  }
aa9714d0e   Ritesh Harjani   ext4: Start with ...
204
205
  static ssize_t ext4_generic_write_checks(struct kiocb *iocb,
  					 struct iov_iter *from)
213bcd9cc   Jan Kara   ext4: factor out ...
206
207
208
  {
  	struct inode *inode = file_inode(iocb->ki_filp);
  	ssize_t ret;
378f32bab   Matthew Bobrowski   ext4: introduce d...
209
210
  	if (unlikely(IS_IMMUTABLE(inode)))
  		return -EPERM;
213bcd9cc   Jan Kara   ext4: factor out ...
211
212
213
  	ret = generic_write_checks(iocb, from);
  	if (ret <= 0)
  		return ret;
02b016ca7   Theodore Ts'o   ext4: enforce the...
214

213bcd9cc   Jan Kara   ext4: factor out ...
215
216
217
218
219
220
221
222
223
224
225
  	/*
  	 * If we have encountered a bitmap-format file, the size limit
  	 * is smaller than s_maxbytes, which is for extent-mapped files.
  	 */
  	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
  		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  
  		if (iocb->ki_pos >= sbi->s_bitmap_maxbytes)
  			return -EFBIG;
  		iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
  	}
378f32bab   Matthew Bobrowski   ext4: introduce d...
226

aa9714d0e   Ritesh Harjani   ext4: Start with ...
227
228
229
230
231
232
233
234
235
236
  	return iov_iter_count(from);
  }
  
  static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
  {
  	ssize_t ret, count;
  
  	count = ext4_generic_write_checks(iocb, from);
  	if (count <= 0)
  		return count;
378f32bab   Matthew Bobrowski   ext4: introduce d...
237
238
239
  	ret = file_modified(iocb->ki_filp);
  	if (ret)
  		return ret;
aa9714d0e   Ritesh Harjani   ext4: Start with ...
240
  	return count;
213bcd9cc   Jan Kara   ext4: factor out ...
241
  }
378f32bab   Matthew Bobrowski   ext4: introduce d...
242
243
244
245
246
247
248
249
  static ssize_t ext4_buffered_write_iter(struct kiocb *iocb,
  					struct iov_iter *from)
  {
  	ssize_t ret;
  	struct inode *inode = file_inode(iocb->ki_filp);
  
  	if (iocb->ki_flags & IOCB_NOWAIT)
  		return -EOPNOTSUPP;
aa75f4d3d   Harshad Shirwadkar   ext4: main fast-c...
250
  	ext4_fc_start_update(inode);
378f32bab   Matthew Bobrowski   ext4: introduce d...
251
252
253
254
255
256
257
258
259
260
261
  	inode_lock(inode);
  	ret = ext4_write_checks(iocb, from);
  	if (ret <= 0)
  		goto out;
  
  	current->backing_dev_info = inode_to_bdi(inode);
  	ret = generic_perform_write(iocb->ki_filp, from, iocb->ki_pos);
  	current->backing_dev_info = NULL;
  
  out:
  	inode_unlock(inode);
aa75f4d3d   Harshad Shirwadkar   ext4: main fast-c...
262
  	ext4_fc_stop_update(inode);
378f32bab   Matthew Bobrowski   ext4: introduce d...
263
264
265
266
267
268
269
  	if (likely(ret > 0)) {
  		iocb->ki_pos += ret;
  		ret = generic_write_sync(iocb, ret);
  	}
  
  	return ret;
  }
569342dc2   Matthew Bobrowski   ext4: move inode ...
270
271
272
273
274
275
276
  static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset,
  					   ssize_t written, size_t count)
  {
  	handle_t *handle;
  	bool truncate = false;
  	u8 blkbits = inode->i_blkbits;
  	ext4_lblk_t written_blk, end_blk;
4209ae12b   Harshad Shirwadkar   ext4: handle ext4...
277
  	int ret;
569342dc2   Matthew Bobrowski   ext4: move inode ...
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
  
  	/*
  	 * Note that EXT4_I(inode)->i_disksize can get extended up to
  	 * inode->i_size while the I/O was running due to writeback of delalloc
  	 * blocks. But, the code in ext4_iomap_alloc() is careful to use
  	 * zeroed/unwritten extents if this is possible; thus we won't leave
  	 * uninitialized blocks in a file even if we didn't succeed in writing
  	 * as much as we intended.
  	 */
  	WARN_ON_ONCE(i_size_read(inode) < EXT4_I(inode)->i_disksize);
  	if (offset + count <= EXT4_I(inode)->i_disksize) {
  		/*
  		 * We need to ensure that the inode is removed from the orphan
  		 * list if it has been added prematurely, due to writeback of
  		 * delalloc blocks.
  		 */
  		if (!list_empty(&EXT4_I(inode)->i_orphan) && inode->i_nlink) {
  			handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
  
  			if (IS_ERR(handle)) {
  				ext4_orphan_del(NULL, inode);
  				return PTR_ERR(handle);
  			}
  
  			ext4_orphan_del(handle, inode);
  			ext4_journal_stop(handle);
  		}
  
  		return written;
  	}
  
  	if (written < 0)
  		goto truncate;
  
  	handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
  	if (IS_ERR(handle)) {
  		written = PTR_ERR(handle);
  		goto truncate;
  	}
4209ae12b   Harshad Shirwadkar   ext4: handle ext4...
317
318
319
320
321
322
323
324
  	if (ext4_update_inode_size(inode, offset + written)) {
  		ret = ext4_mark_inode_dirty(handle, inode);
  		if (unlikely(ret)) {
  			written = ret;
  			ext4_journal_stop(handle);
  			goto truncate;
  		}
  	}
569342dc2   Matthew Bobrowski   ext4: move inode ...
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
  
  	/*
  	 * We may need to truncate allocated but not written blocks beyond EOF.
  	 */
  	written_blk = ALIGN(offset + written, 1 << blkbits);
  	end_blk = ALIGN(offset + count, 1 << blkbits);
  	if (written_blk < end_blk && ext4_can_truncate(inode))
  		truncate = true;
  
  	/*
  	 * Remove the inode from the orphan list if it has been extended and
  	 * everything went OK.
  	 */
  	if (!truncate && inode->i_nlink)
  		ext4_orphan_del(handle, inode);
  	ext4_journal_stop(handle);
  
  	if (truncate) {
  truncate:
  		ext4_truncate_failed_write(inode);
  		/*
  		 * If the truncate operation failed early, then the inode may
  		 * still be on the orphan list. In that case, we need to try
  		 * remove the inode from the in-memory linked list.
  		 */
  		if (inode->i_nlink)
  			ext4_orphan_del(NULL, inode);
  	}
  
  	return written;
  }
378f32bab   Matthew Bobrowski   ext4: introduce d...
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
  static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
  				 int error, unsigned int flags)
  {
  	loff_t offset = iocb->ki_pos;
  	struct inode *inode = file_inode(iocb->ki_filp);
  
  	if (error)
  		return error;
  
  	if (size && flags & IOMAP_DIO_UNWRITTEN)
  		return ext4_convert_unwritten_extents(NULL, inode,
  						      offset, size);
  
  	return 0;
  }
  
  static const struct iomap_dio_ops ext4_dio_write_ops = {
  	.end_io = ext4_dio_write_end_io,
  };
aa9714d0e   Ritesh Harjani   ext4: Start with ...
375
376
377
378
379
380
381
382
383
384
385
  /*
   * The intention here is to start with shared lock acquired then see if any
   * condition requires an exclusive inode lock. If yes, then we restart the
   * whole operation by releasing the shared lock and acquiring exclusive lock.
   *
   * - For unaligned_io we never take shared lock as it may cause data corruption
   *   when two unaligned IO tries to modify the same block e.g. while zeroing.
   *
   * - For extending writes case we don't take the shared lock, since it requires
   *   updating inode i_disksize and/or orphan handling with exclusive lock.
   *
bc6385dab   Ritesh Harjani   ext4: Move to sha...
386
387
   * - shared locking will only be true mostly with overwrites. Otherwise we will
   *   switch to exclusive i_rwsem lock.
aa9714d0e   Ritesh Harjani   ext4: Start with ...
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
   */
  static ssize_t ext4_dio_write_checks(struct kiocb *iocb, struct iov_iter *from,
  				     bool *ilock_shared, bool *extend)
  {
  	struct file *file = iocb->ki_filp;
  	struct inode *inode = file_inode(file);
  	loff_t offset;
  	size_t count;
  	ssize_t ret;
  
  restart:
  	ret = ext4_generic_write_checks(iocb, from);
  	if (ret <= 0)
  		goto out;
  
  	offset = iocb->ki_pos;
  	count = ret;
  	if (ext4_extending_io(inode, offset, count))
  		*extend = true;
  	/*
  	 * Determine whether the IO operation will overwrite allocated
bc6385dab   Ritesh Harjani   ext4: Move to sha...
409
  	 * and initialized blocks.
aa9714d0e   Ritesh Harjani   ext4: Start with ...
410
411
412
413
  	 * We need exclusive i_rwsem for changing security info
  	 * in file_modified().
  	 */
  	if (*ilock_shared && (!IS_NOSEC(inode) || *extend ||
aa9714d0e   Ritesh Harjani   ext4: Start with ...
414
  	     !ext4_overwrite_io(inode, offset, count))) {
0b3171b6d   Jan Kara   ext4: do not bloc...
415
416
417
418
  		if (iocb->ki_flags & IOCB_NOWAIT) {
  			ret = -EAGAIN;
  			goto out;
  		}
aa9714d0e   Ritesh Harjani   ext4: Start with ...
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
  		inode_unlock_shared(inode);
  		*ilock_shared = false;
  		inode_lock(inode);
  		goto restart;
  	}
  
  	ret = file_modified(file);
  	if (ret < 0)
  		goto out;
  
  	return count;
  out:
  	if (*ilock_shared)
  		inode_unlock_shared(inode);
  	else
  		inode_unlock(inode);
  	return ret;
  }
378f32bab   Matthew Bobrowski   ext4: introduce d...
437
438
439
  static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
  {
  	ssize_t ret;
378f32bab   Matthew Bobrowski   ext4: introduce d...
440
441
  	handle_t *handle;
  	struct inode *inode = file_inode(iocb->ki_filp);
aa9714d0e   Ritesh Harjani   ext4: Start with ...
442
443
  	loff_t offset = iocb->ki_pos;
  	size_t count = iov_iter_count(from);
8cd115bdd   Jan Kara   ext4: Optimize ex...
444
  	const struct iomap_ops *iomap_ops = &ext4_iomap_ops;
aa9714d0e   Ritesh Harjani   ext4: Start with ...
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
  	bool extend = false, unaligned_io = false;
  	bool ilock_shared = true;
  
  	/*
  	 * We initially start with shared inode lock unless it is
  	 * unaligned IO which needs exclusive lock anyways.
  	 */
  	if (ext4_unaligned_io(inode, from, offset)) {
  		unaligned_io = true;
  		ilock_shared = false;
  	}
  	/*
  	 * Quick check here without any i_rwsem lock to see if it is extending
  	 * IO. A more reliable check is done in ext4_dio_write_checks() with
  	 * proper locking in place.
  	 */
  	if (offset + count > i_size_read(inode))
  		ilock_shared = false;
378f32bab   Matthew Bobrowski   ext4: introduce d...
463
464
  
  	if (iocb->ki_flags & IOCB_NOWAIT) {
aa9714d0e   Ritesh Harjani   ext4: Start with ...
465
466
467
468
469
470
471
  		if (ilock_shared) {
  			if (!inode_trylock_shared(inode))
  				return -EAGAIN;
  		} else {
  			if (!inode_trylock(inode))
  				return -EAGAIN;
  		}
378f32bab   Matthew Bobrowski   ext4: introduce d...
472
  	} else {
aa9714d0e   Ritesh Harjani   ext4: Start with ...
473
474
475
476
  		if (ilock_shared)
  			inode_lock_shared(inode);
  		else
  			inode_lock(inode);
378f32bab   Matthew Bobrowski   ext4: introduce d...
477
  	}
aa9714d0e   Ritesh Harjani   ext4: Start with ...
478
  	/* Fallback to buffered I/O if the inode does not support direct I/O. */
141f59b91   Eric Biggers   ANDROID: ext4, f2...
479
  	if (!ext4_dio_supported(iocb, from)) {
aa9714d0e   Ritesh Harjani   ext4: Start with ...
480
481
482
483
  		if (ilock_shared)
  			inode_unlock_shared(inode);
  		else
  			inode_unlock(inode);
378f32bab   Matthew Bobrowski   ext4: introduce d...
484
485
  		return ext4_buffered_write_iter(iocb, from);
  	}
aa9714d0e   Ritesh Harjani   ext4: Start with ...
486
487
  	ret = ext4_dio_write_checks(iocb, from, &ilock_shared, &extend);
  	if (ret <= 0)
378f32bab   Matthew Bobrowski   ext4: introduce d...
488
  		return ret;
378f32bab   Matthew Bobrowski   ext4: introduce d...
489

6e014c621   Jens Axboe   ext4: don't block...
490
491
492
493
494
  	/* if we're going to block and IOCB_NOWAIT is set, return -EAGAIN */
  	if ((iocb->ki_flags & IOCB_NOWAIT) && (unaligned_io || extend)) {
  		ret = -EAGAIN;
  		goto out;
  	}
378f32bab   Matthew Bobrowski   ext4: introduce d...
495
  	offset = iocb->ki_pos;
aa9714d0e   Ritesh Harjani   ext4: Start with ...
496
  	count = ret;
378f32bab   Matthew Bobrowski   ext4: introduce d...
497
498
  
  	/*
aa9714d0e   Ritesh Harjani   ext4: Start with ...
499
500
501
502
503
504
505
506
  	 * Unaligned direct IO must be serialized among each other as zeroing
  	 * of partial blocks of two competing unaligned IOs can result in data
  	 * corruption.
  	 *
  	 * So we make sure we don't allow any unaligned IO in flight.
  	 * For IOs where we need not wait (like unaligned non-AIO DIO),
  	 * below inode_dio_wait() may anyway become a no-op, since we start
  	 * with exclusive lock.
378f32bab   Matthew Bobrowski   ext4: introduce d...
507
  	 */
aa9714d0e   Ritesh Harjani   ext4: Start with ...
508
509
  	if (unaligned_io)
  		inode_dio_wait(inode);
378f32bab   Matthew Bobrowski   ext4: introduce d...
510

aa9714d0e   Ritesh Harjani   ext4: Start with ...
511
  	if (extend) {
378f32bab   Matthew Bobrowski   ext4: introduce d...
512
513
514
515
516
  		handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
  		if (IS_ERR(handle)) {
  			ret = PTR_ERR(handle);
  			goto out;
  		}
aa75f4d3d   Harshad Shirwadkar   ext4: main fast-c...
517
  		ext4_fc_start_update(inode);
378f32bab   Matthew Bobrowski   ext4: introduce d...
518
  		ret = ext4_orphan_add(handle, inode);
aa75f4d3d   Harshad Shirwadkar   ext4: main fast-c...
519
  		ext4_fc_stop_update(inode);
378f32bab   Matthew Bobrowski   ext4: introduce d...
520
521
522
523
  		if (ret) {
  			ext4_journal_stop(handle);
  			goto out;
  		}
378f32bab   Matthew Bobrowski   ext4: introduce d...
524
525
  		ext4_journal_stop(handle);
  	}
8cd115bdd   Jan Kara   ext4: Optimize ex...
526
527
528
  	if (ilock_shared)
  		iomap_ops = &ext4_iomap_overwrite_ops;
  	ret = iomap_dio_rw(iocb, from, iomap_ops, &ext4_dio_write_ops,
aa9714d0e   Ritesh Harjani   ext4: Start with ...
529
  			   is_sync_kiocb(iocb) || unaligned_io || extend);
60263d588   Christoph Hellwig   iomap: fall back ...
530
531
  	if (ret == -ENOTBLK)
  		ret = 0;
378f32bab   Matthew Bobrowski   ext4: introduce d...
532
533
534
535
536
  
  	if (extend)
  		ret = ext4_handle_inode_extension(inode, offset, ret, count);
  
  out:
aa9714d0e   Ritesh Harjani   ext4: Start with ...
537
  	if (ilock_shared)
378f32bab   Matthew Bobrowski   ext4: introduce d...
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
  		inode_unlock_shared(inode);
  	else
  		inode_unlock(inode);
  
  	if (ret >= 0 && iov_iter_count(from)) {
  		ssize_t err;
  		loff_t endbyte;
  
  		offset = iocb->ki_pos;
  		err = ext4_buffered_write_iter(iocb, from);
  		if (err < 0)
  			return err;
  
  		/*
  		 * We need to ensure that the pages within the page cache for
  		 * the range covered by this I/O are written to disk and
  		 * invalidated. This is in attempt to preserve the expected
  		 * direct I/O semantics in the case we fallback to buffered I/O
  		 * to complete off the I/O request.
  		 */
  		ret += err;
  		endbyte = offset + err - 1;
  		err = filemap_write_and_wait_range(iocb->ki_filp->f_mapping,
  						   offset, endbyte);
  		if (!err)
  			invalidate_mapping_pages(iocb->ki_filp->f_mapping,
  						 offset >> PAGE_SHIFT,
  						 endbyte >> PAGE_SHIFT);
  	}
  
  	return ret;
  }
776722e85   Jan Kara   ext4: DAX iomap w...
570
571
572
573
  #ifdef CONFIG_FS_DAX
  static ssize_t
  ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
  {
776722e85   Jan Kara   ext4: DAX iomap w...
574
  	ssize_t ret;
569342dc2   Matthew Bobrowski   ext4: move inode ...
575
576
  	size_t count;
  	loff_t offset;
0b9f230b9   Matthew Bobrowski   ext4: move inode ...
577
578
  	handle_t *handle;
  	bool extend = false;
569342dc2   Matthew Bobrowski   ext4: move inode ...
579
  	struct inode *inode = file_inode(iocb->ki_filp);
776722e85   Jan Kara   ext4: DAX iomap w...
580

f629afe33   Ritesh Harjani   ext4: fix ext4_da...
581
582
  	if (iocb->ki_flags & IOCB_NOWAIT) {
  		if (!inode_trylock(inode))
728fbc0e1   Goldwyn Rodrigues   ext4: nowait aio ...
583
  			return -EAGAIN;
f629afe33   Ritesh Harjani   ext4: fix ext4_da...
584
  	} else {
728fbc0e1   Goldwyn Rodrigues   ext4: nowait aio ...
585
586
  		inode_lock(inode);
  	}
378f32bab   Matthew Bobrowski   ext4: introduce d...
587

776722e85   Jan Kara   ext4: DAX iomap w...
588
589
590
  	ret = ext4_write_checks(iocb, from);
  	if (ret <= 0)
  		goto out;
776722e85   Jan Kara   ext4: DAX iomap w...
591

569342dc2   Matthew Bobrowski   ext4: move inode ...
592
593
  	offset = iocb->ki_pos;
  	count = iov_iter_count(from);
0b9f230b9   Matthew Bobrowski   ext4: move inode ...
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
  
  	if (offset + count > EXT4_I(inode)->i_disksize) {
  		handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
  		if (IS_ERR(handle)) {
  			ret = PTR_ERR(handle);
  			goto out;
  		}
  
  		ret = ext4_orphan_add(handle, inode);
  		if (ret) {
  			ext4_journal_stop(handle);
  			goto out;
  		}
  
  		extend = true;
  		ext4_journal_stop(handle);
  	}
776722e85   Jan Kara   ext4: DAX iomap w...
611
  	ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
0b9f230b9   Matthew Bobrowski   ext4: move inode ...
612
613
614
  
  	if (extend)
  		ret = ext4_handle_inode_extension(inode, offset, ret, count);
776722e85   Jan Kara   ext4: DAX iomap w...
615
  out:
ff5462e39   Christoph Hellwig   ext4: fix DAX wri...
616
  	inode_unlock(inode);
776722e85   Jan Kara   ext4: DAX iomap w...
617
618
619
620
621
  	if (ret > 0)
  		ret = generic_write_sync(iocb, ret);
  	return ret;
  }
  #endif
ac27a0ec1   Dave Kleikamp   [PATCH] ext4: ini...
622
  static ssize_t
9b884164d   Al Viro   convert ext4 to -...
623
  ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
ac27a0ec1   Dave Kleikamp   [PATCH] ext4: ini...
624
  {
8ad2850f4   Theodore Ts'o   ext4: move ext4_f...
625
  	struct inode *inode = file_inode(iocb->ki_filp);
7608e6104   Theodore Ts'o   ext4: inline gene...
626

0db1ff222   Theodore Ts'o   ext4: add shutdow...
627
628
  	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
  		return -EIO;
776722e85   Jan Kara   ext4: DAX iomap w...
629
630
631
632
  #ifdef CONFIG_FS_DAX
  	if (IS_DAX(inode))
  		return ext4_dax_write_iter(iocb, from);
  #endif
378f32bab   Matthew Bobrowski   ext4: introduce d...
633
634
  	if (iocb->ki_flags & IOCB_DIRECT)
  		return ext4_dio_write_iter(iocb, from);
aa75f4d3d   Harshad Shirwadkar   ext4: main fast-c...
635
636
  	else
  		return ext4_buffered_write_iter(iocb, from);
ac27a0ec1   Dave Kleikamp   [PATCH] ext4: ini...
637
  }
923ae0ff9   Ross Zwisler   ext4: add DAX fun...
638
  #ifdef CONFIG_FS_DAX
71fe98996   Souptick Joarder   fs: ext4: add new...
639
  static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf,
c791ace1e   Dave Jiang   mm: replace FAULT...
640
  		enum page_entry_size pe_size)
923ae0ff9   Ross Zwisler   ext4: add DAX fun...
641
  {
71fe98996   Souptick Joarder   fs: ext4: add new...
642
643
  	int error = 0;
  	vm_fault_t result;
224464231   Jan Kara   ext4: fix ENOSPC ...
644
  	int retries = 0;
fb26a1cbe   Jan Kara   ext4: return to s...
645
  	handle_t *handle = NULL;
11bac8000   Dave Jiang   mm, fs: reduce fa...
646
  	struct inode *inode = file_inode(vmf->vma->vm_file);
ea3d7209c   Jan Kara   ext4: fix races b...
647
  	struct super_block *sb = inode->i_sb;
fd96b8da6   Randy Dodgen   ext4: fix fault h...
648
649
650
651
652
653
654
655
656
657
658
659
660
661
  
  	/*
  	 * We have to distinguish real writes from writes which will result in a
  	 * COW page; COW writes should *not* poke the journal (the file will not
  	 * be changed). Doing so would cause unintended failures when mounted
  	 * read-only.
  	 *
  	 * We check for VM_SHARED rather than vmf->cow_page since the latter is
  	 * unset for pe_size != PE_SIZE_PTE (i.e. only in do_cow_fault); for
  	 * other sizes, dax_iomap_fault will handle splitting / fallback so that
  	 * we eventually come back with a COW page.
  	 */
  	bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
  		(vmf->vma->vm_flags & VM_SHARED);
b8a6176c2   Jan Kara   ext4: Support for...
662
  	pfn_t pfn;
01a33b4ac   Matthew Wilcox   ext4: start trans...
663
664
665
  
  	if (write) {
  		sb_start_pagefault(sb);
11bac8000   Dave Jiang   mm, fs: reduce fa...
666
  		file_update_time(vmf->vma->vm_file);
fb26a1cbe   Jan Kara   ext4: return to s...
667
  		down_read(&EXT4_I(inode)->i_mmap_sem);
224464231   Jan Kara   ext4: fix ENOSPC ...
668
  retry:
fb26a1cbe   Jan Kara   ext4: return to s...
669
670
  		handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
  					       EXT4_DATA_TRANS_BLOCKS(sb));
497f6926d   Jan Kara   ext4: Simplify er...
671
672
673
674
675
  		if (IS_ERR(handle)) {
  			up_read(&EXT4_I(inode)->i_mmap_sem);
  			sb_end_pagefault(sb);
  			return VM_FAULT_SIGBUS;
  		}
fb26a1cbe   Jan Kara   ext4: return to s...
676
677
  	} else {
  		down_read(&EXT4_I(inode)->i_mmap_sem);
1db175428   Jan Kara   ext4: Simplify DA...
678
  	}
224464231   Jan Kara   ext4: fix ENOSPC ...
679
  	result = dax_iomap_fault(vmf, pe_size, &pfn, &error, &ext4_iomap_ops);
fb26a1cbe   Jan Kara   ext4: return to s...
680
  	if (write) {
497f6926d   Jan Kara   ext4: Simplify er...
681
  		ext4_journal_stop(handle);
224464231   Jan Kara   ext4: fix ENOSPC ...
682
683
684
685
  
  		if ((result & VM_FAULT_ERROR) && error == -ENOSPC &&
  		    ext4_should_retry_alloc(sb, &retries))
  			goto retry;
b8a6176c2   Jan Kara   ext4: Support for...
686
687
688
  		/* Handling synchronous page fault? */
  		if (result & VM_FAULT_NEEDDSYNC)
  			result = dax_finish_sync_fault(vmf, pe_size, pfn);
fb26a1cbe   Jan Kara   ext4: return to s...
689
  		up_read(&EXT4_I(inode)->i_mmap_sem);
01a33b4ac   Matthew Wilcox   ext4: start trans...
690
  		sb_end_pagefault(sb);
fb26a1cbe   Jan Kara   ext4: return to s...
691
692
693
  	} else {
  		up_read(&EXT4_I(inode)->i_mmap_sem);
  	}
01a33b4ac   Matthew Wilcox   ext4: start trans...
694
695
  
  	return result;
923ae0ff9   Ross Zwisler   ext4: add DAX fun...
696
  }
71fe98996   Souptick Joarder   fs: ext4: add new...
697
  static vm_fault_t ext4_dax_fault(struct vm_fault *vmf)
c791ace1e   Dave Jiang   mm: replace FAULT...
698
699
700
  {
  	return ext4_dax_huge_fault(vmf, PE_SIZE_PTE);
  }
923ae0ff9   Ross Zwisler   ext4: add DAX fun...
701
702
  static const struct vm_operations_struct ext4_dax_vm_ops = {
  	.fault		= ext4_dax_fault,
c791ace1e   Dave Jiang   mm: replace FAULT...
703
  	.huge_fault	= ext4_dax_huge_fault,
1e9d180ba   Ross Zwisler   ext2, ext4: fix i...
704
  	.page_mkwrite	= ext4_dax_fault,
91d25ba8a   Ross Zwisler   dax: use common 4...
705
  	.pfn_mkwrite	= ext4_dax_fault,
923ae0ff9   Ross Zwisler   ext4: add DAX fun...
706
707
708
709
  };
  #else
  #define ext4_dax_vm_ops	ext4_file_vm_ops
  #endif
f0f37e2f7   Alexey Dobriyan   const: mark struc...
710
  static const struct vm_operations_struct ext4_file_vm_ops = {
ea3d7209c   Jan Kara   ext4: fix races b...
711
  	.fault		= ext4_filemap_fault,
f1820361f   Kirill A. Shutemov   mm: implement ->m...
712
  	.map_pages	= filemap_map_pages,
2e9ee8503   Aneesh Kumar K.V   ext4: Use page_mk...
713
714
715
716
717
  	.page_mkwrite   = ext4_page_mkwrite,
  };
  
  static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
  {
c9c7429c2   Michael Halcrow   ext4 crypto: impl...
718
  	struct inode *inode = file->f_mapping->host;
e46bfc3f0   Pankaj Gupta   ext4: disable map...
719
720
  	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  	struct dax_device *dax_dev = sbi->s_daxdev;
c9c7429c2   Michael Halcrow   ext4 crypto: impl...
721

e46bfc3f0   Pankaj Gupta   ext4: disable map...
722
  	if (unlikely(ext4_forced_shutdown(sbi)))
0db1ff222   Theodore Ts'o   ext4: add shutdow...
723
  		return -EIO;
b8a6176c2   Jan Kara   ext4: Support for...
724
  	/*
e46bfc3f0   Pankaj Gupta   ext4: disable map...
725
726
  	 * We don't support synchronous mappings for non-DAX files and
  	 * for DAX files if underneath dax_device is not synchronous.
b8a6176c2   Jan Kara   ext4: Support for...
727
  	 */
e46bfc3f0   Pankaj Gupta   ext4: disable map...
728
  	if (!daxdev_mapping_supported(vma, dax_dev))
b8a6176c2   Jan Kara   ext4: Support for...
729
  		return -EOPNOTSUPP;
2e9ee8503   Aneesh Kumar K.V   ext4: Use page_mk...
730
  	file_accessed(file);
923ae0ff9   Ross Zwisler   ext4: add DAX fun...
731
732
  	if (IS_DAX(file_inode(file))) {
  		vma->vm_ops = &ext4_dax_vm_ops;
e1fb4a086   Dave Jiang   dax: remove VM_MI...
733
  		vma->vm_flags |= VM_HUGEPAGE;
923ae0ff9   Ross Zwisler   ext4: add DAX fun...
734
735
736
  	} else {
  		vma->vm_ops = &ext4_file_vm_ops;
  	}
2e9ee8503   Aneesh Kumar K.V   ext4: Use page_mk...
737
738
  	return 0;
  }
833a95088   Amir Goldstein   ext4: factor out ...
739
740
  static int ext4_sample_last_mounted(struct super_block *sb,
  				    struct vfsmount *mnt)
bc0b0d6d6   Theodore Ts'o   ext4: update the ...
741
  {
833a95088   Amir Goldstein   ext4: factor out ...
742
  	struct ext4_sb_info *sbi = EXT4_SB(sb);
bc0b0d6d6   Theodore Ts'o   ext4: update the ...
743
744
  	struct path path;
  	char buf[64], *cp;
833a95088   Amir Goldstein   ext4: factor out ...
745
746
  	handle_t *handle;
  	int err;
9b5f6c9b8   Harshad Shirwadkar   ext4: make s_moun...
747
  	if (likely(ext4_test_mount_flag(sb, EXT4_MF_MNTDIR_SAMPLED)))
833a95088   Amir Goldstein   ext4: factor out ...
748
  		return 0;
db6516a5e   Amir Goldstein   ext4: do not upda...
749
  	if (sb_rdonly(sb) || !sb_start_intwrite_trylock(sb))
833a95088   Amir Goldstein   ext4: factor out ...
750
  		return 0;
9b5f6c9b8   Harshad Shirwadkar   ext4: make s_moun...
751
  	ext4_set_mount_flag(sb, EXT4_MF_MNTDIR_SAMPLED);
833a95088   Amir Goldstein   ext4: factor out ...
752
753
754
755
756
757
758
759
760
761
  	/*
  	 * Sample where the filesystem has been mounted and
  	 * store it in the superblock for sysadmin convenience
  	 * when trying to sort through large numbers of block
  	 * devices or filesystem images.
  	 */
  	memset(buf, 0, sizeof(buf));
  	path.mnt = mnt;
  	path.dentry = mnt->mnt_root;
  	cp = d_path(&path, buf, sizeof(buf));
db6516a5e   Amir Goldstein   ext4: do not upda...
762
  	err = 0;
833a95088   Amir Goldstein   ext4: factor out ...
763
  	if (IS_ERR(cp))
db6516a5e   Amir Goldstein   ext4: do not upda...
764
  		goto out;
833a95088   Amir Goldstein   ext4: factor out ...
765
766
  
  	handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
db6516a5e   Amir Goldstein   ext4: do not upda...
767
  	err = PTR_ERR(handle);
833a95088   Amir Goldstein   ext4: factor out ...
768
  	if (IS_ERR(handle))
db6516a5e   Amir Goldstein   ext4: do not upda...
769
  		goto out;
833a95088   Amir Goldstein   ext4: factor out ...
770
771
772
  	BUFFER_TRACE(sbi->s_sbh, "get_write_access");
  	err = ext4_journal_get_write_access(handle, sbi->s_sbh);
  	if (err)
db6516a5e   Amir Goldstein   ext4: do not upda...
773
  		goto out_journal;
85958f60e   Theodore Ts'o   ext4: don't leak ...
774
  	strncpy(sbi->s_es->s_last_mounted, cp,
833a95088   Amir Goldstein   ext4: factor out ...
775
776
  		sizeof(sbi->s_es->s_last_mounted));
  	ext4_handle_dirty_super(handle, sb);
db6516a5e   Amir Goldstein   ext4: do not upda...
777
  out_journal:
833a95088   Amir Goldstein   ext4: factor out ...
778
  	ext4_journal_stop(handle);
db6516a5e   Amir Goldstein   ext4: do not upda...
779
780
  out:
  	sb_end_intwrite(sb);
833a95088   Amir Goldstein   ext4: factor out ...
781
782
  	return err;
  }
e030a2881   Dio Putra   ext4: fix coding ...
783
  static int ext4_file_open(struct inode *inode, struct file *filp)
833a95088   Amir Goldstein   ext4: factor out ...
784
  {
c9c7429c2   Michael Halcrow   ext4 crypto: impl...
785
  	int ret;
bc0b0d6d6   Theodore Ts'o   ext4: update the ...
786

0db1ff222   Theodore Ts'o   ext4: add shutdow...
787
788
  	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
  		return -EIO;
833a95088   Amir Goldstein   ext4: factor out ...
789
790
791
  	ret = ext4_sample_last_mounted(inode->i_sb, filp->f_path.mnt);
  	if (ret)
  		return ret;
9dd78d8c9   Miklos Szeredi   ext4: use dget_pa...
792

09a5c31c9   Eric Biggers   ext4: switch to f...
793
794
  	ret = fscrypt_file_open(inode, filp);
  	if (ret)
c93d8f885   Eric Biggers   ext4: add basic f...
795
796
797
798
  		return ret;
  
  	ret = fsverity_file_open(inode, filp);
  	if (ret)
09a5c31c9   Eric Biggers   ext4: switch to f...
799
  		return ret;
8aefcd557   Theodore Ts'o   ext4: dynamically...
800
801
802
803
  	/*
  	 * Set up the jbd2_inode if we are opening the inode for
  	 * writing and the journal is present
  	 */
a361293f5   Jan Kara   jbd2: Fix oops in...
804
  	if (filp->f_mode & FMODE_WRITE) {
c9c7429c2   Michael Halcrow   ext4 crypto: impl...
805
  		ret = ext4_inode_attach_jinode(inode);
a361293f5   Jan Kara   jbd2: Fix oops in...
806
807
  		if (ret < 0)
  			return ret;
8aefcd557   Theodore Ts'o   ext4: dynamically...
808
  	}
728fbc0e1   Goldwyn Rodrigues   ext4: nowait aio ...
809

766ef1e10   Jens Axboe   ext4: flag as sup...
810
  	filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
abdd438b2   Theodore Ts'o   ext4 crypto: hand...
811
  	return dquot_file_open(inode, filp);
bc0b0d6d6   Theodore Ts'o   ext4: update the ...
812
  }
e0d10bfa9   Toshiyuki Okajima   ext4: improve lls...
813
  /*
ec7268ce2   Eric Sandeen   ext4: use core vf...
814
815
816
   * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
   * by calling generic_file_llseek_size() with the appropriate maxbytes
   * value for each.
e0d10bfa9   Toshiyuki Okajima   ext4: improve lls...
817
   */
965c8e59c   Andrew Morton   lseek: the "whenc...
818
  loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
e0d10bfa9   Toshiyuki Okajima   ext4: improve lls...
819
820
821
822
823
824
825
826
  {
  	struct inode *inode = file->f_mapping->host;
  	loff_t maxbytes;
  
  	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
  		maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
  	else
  		maxbytes = inode->i_sb->s_maxbytes;
e0d10bfa9   Toshiyuki Okajima   ext4: improve lls...
827

965c8e59c   Andrew Morton   lseek: the "whenc...
828
  	switch (whence) {
545052e9e   Christoph Hellwig   ext4: Switch to i...
829
  	default:
965c8e59c   Andrew Morton   lseek: the "whenc...
830
  		return generic_file_llseek_size(file, offset, whence,
c8c0df241   Zheng Liu   ext4: introduce l...
831
  						maxbytes, i_size_read(inode));
c8c0df241   Zheng Liu   ext4: introduce l...
832
  	case SEEK_HOLE:
545052e9e   Christoph Hellwig   ext4: Switch to i...
833
  		inode_lock_shared(inode);
09edf4d38   Matthew Bobrowski   ext4: introduce n...
834
835
  		offset = iomap_seek_hole(inode, offset,
  					 &ext4_iomap_report_ops);
545052e9e   Christoph Hellwig   ext4: Switch to i...
836
837
838
839
  		inode_unlock_shared(inode);
  		break;
  	case SEEK_DATA:
  		inode_lock_shared(inode);
09edf4d38   Matthew Bobrowski   ext4: introduce n...
840
841
  		offset = iomap_seek_data(inode, offset,
  					 &ext4_iomap_report_ops);
545052e9e   Christoph Hellwig   ext4: Switch to i...
842
843
  		inode_unlock_shared(inode);
  		break;
c8c0df241   Zheng Liu   ext4: introduce l...
844
  	}
545052e9e   Christoph Hellwig   ext4: Switch to i...
845
846
847
  	if (offset < 0)
  		return offset;
  	return vfs_setpos(file, offset, maxbytes);
e0d10bfa9   Toshiyuki Okajima   ext4: improve lls...
848
  }
617ba13b3   Mingming Cao   [PATCH] ext4: ren...
849
  const struct file_operations ext4_file_operations = {
e0d10bfa9   Toshiyuki Okajima   ext4: improve lls...
850
  	.llseek		= ext4_llseek,
364443cbc   Jan Kara   ext4: convert DAX...
851
  	.read_iter	= ext4_file_read_iter,
9b884164d   Al Viro   convert ext4 to -...
852
  	.write_iter	= ext4_file_write_iter,
72f9da1d5   Xiaoguang Wang   ext4: start to su...
853
  	.iopoll		= iomap_dio_iopoll,
5cdd7b2d7   Andi Kleen   Convert ext4 to u...
854
  	.unlocked_ioctl = ext4_ioctl,
ac27a0ec1   Dave Kleikamp   [PATCH] ext4: ini...
855
  #ifdef CONFIG_COMPAT
617ba13b3   Mingming Cao   [PATCH] ext4: ren...
856
  	.compat_ioctl	= ext4_compat_ioctl,
ac27a0ec1   Dave Kleikamp   [PATCH] ext4: ini...
857
  #endif
2e9ee8503   Aneesh Kumar K.V   ext4: Use page_mk...
858
  	.mmap		= ext4_file_mmap,
b8a6176c2   Jan Kara   ext4: Support for...
859
  	.mmap_supported_flags = MAP_SYNC,
bc0b0d6d6   Theodore Ts'o   ext4: update the ...
860
  	.open		= ext4_file_open,
617ba13b3   Mingming Cao   [PATCH] ext4: ren...
861
862
  	.release	= ext4_release_file,
  	.fsync		= ext4_sync_file,
dbe6ec815   Toshi Kani   ext2/4, xfs: call...
863
  	.get_unmapped_area = thp_get_unmapped_area,
ac27a0ec1   Dave Kleikamp   [PATCH] ext4: ini...
864
  	.splice_read	= generic_file_splice_read,
8d0207652   Al Viro   ->splice_write() ...
865
  	.splice_write	= iter_file_splice_write,
2fe17c107   Christoph Hellwig   fallocate should ...
866
  	.fallocate	= ext4_fallocate,
ac27a0ec1   Dave Kleikamp   [PATCH] ext4: ini...
867
  };
754661f14   Arjan van de Ven   [PATCH] mark stru...
868
  const struct inode_operations ext4_file_inode_operations = {
617ba13b3   Mingming Cao   [PATCH] ext4: ren...
869
  	.setattr	= ext4_setattr,
99652ea56   David Howells   ext4: Add statx s...
870
  	.getattr	= ext4_file_getattr,
617ba13b3   Mingming Cao   [PATCH] ext4: ren...
871
  	.listxattr	= ext4_listxattr,
4e34e719e   Christoph Hellwig   fs: take the ACL ...
872
  	.get_acl	= ext4_get_acl,
64e178a71   Christoph Hellwig   ext2/3/4: use gen...
873
  	.set_acl	= ext4_set_acl,
6873fa0de   Eric Sandeen   Hook ext4 to the ...
874
  	.fiemap		= ext4_fiemap,
ac27a0ec1   Dave Kleikamp   [PATCH] ext4: ini...
875
  };