Blame view

fs/f2fs/data.c 46.2 KB
0a8165d7c   Jaegeuk Kim   f2fs: adjust kern...
1
  /*
eb47b8009   Jaegeuk Kim   f2fs: add address...
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
   * fs/f2fs/data.c
   *
   * Copyright (c) 2012 Samsung Electronics Co., Ltd.
   *             http://www.samsung.com/
   *
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License version 2 as
   * published by the Free Software Foundation.
   */
  #include <linux/fs.h>
  #include <linux/f2fs_fs.h>
  #include <linux/buffer_head.h>
  #include <linux/mpage.h>
  #include <linux/writeback.h>
  #include <linux/backing-dev.h>
8f46dcaea   Chao Yu   f2fs: expose f2fs...
17
  #include <linux/pagevec.h>
eb47b8009   Jaegeuk Kim   f2fs: add address...
18
19
  #include <linux/blkdev.h>
  #include <linux/bio.h>
690e4a3ea   Geert Uytterhoeven   f2fs: add missing...
20
  #include <linux/prefetch.h>
e2e40f2c1   Christoph Hellwig   fs: move struct k...
21
  #include <linux/uio.h>
fe76b796f   Jaegeuk Kim   f2fs: introduce f...
22
23
  #include <linux/mm.h>
  #include <linux/memcontrol.h>
f1e886601   Jaegeuk Kim   f2fs: expose f2fs...
24
  #include <linux/cleancache.h>
eb47b8009   Jaegeuk Kim   f2fs: add address...
25
26
27
28
  
  #include "f2fs.h"
  #include "node.h"
  #include "segment.h"
db9f7c1a9   Jaegeuk Kim   f2fs: activate f2...
29
  #include "trace.h"
848753aa3   Namjae Jeon   f2fs: add tracepo...
30
  #include <trace/events/f2fs.h>
eb47b8009   Jaegeuk Kim   f2fs: add address...
31

4246a0b63   Christoph Hellwig   block: add a bi_e...
32
  static void f2fs_read_end_io(struct bio *bio)
93dfe2ac5   Jaegeuk Kim   f2fs: refactor bi...
33
  {
f568849ed   Linus Torvalds   Merge branch 'for...
34
35
  	struct bio_vec *bvec;
  	int i;
93dfe2ac5   Jaegeuk Kim   f2fs: refactor bi...
36

8b038c70d   Chao Yu   f2fs: support IO ...
37
  #ifdef CONFIG_F2FS_FAULT_INJECTION
1ecc0c5c5   Chao Yu   f2fs: support con...
38
  	if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO))
8b038c70d   Chao Yu   f2fs: support IO ...
39
40
  		bio->bi_error = -EIO;
  #endif
4375a3366   Jaegeuk Kim   f2fs crypto: add ...
41
  	if (f2fs_bio_encrypted(bio)) {
4246a0b63   Christoph Hellwig   block: add a bi_e...
42
  		if (bio->bi_error) {
0b81d0779   Jaegeuk Kim   fs crypto: move p...
43
  			fscrypt_release_ctx(bio->bi_private);
4375a3366   Jaegeuk Kim   f2fs crypto: add ...
44
  		} else {
0b81d0779   Jaegeuk Kim   fs crypto: move p...
45
  			fscrypt_decrypt_bio_pages(bio->bi_private, bio);
4375a3366   Jaegeuk Kim   f2fs crypto: add ...
46
47
48
  			return;
  		}
  	}
123770247   Chao Yu   f2fs: avoid dupli...
49
50
  	bio_for_each_segment_all(bvec, bio, i) {
  		struct page *page = bvec->bv_page;
f1e886601   Jaegeuk Kim   f2fs: expose f2fs...
51

4246a0b63   Christoph Hellwig   block: add a bi_e...
52
  		if (!bio->bi_error) {
237c0790e   Jaegeuk Kim   f2fs: call SetPag...
53
54
  			if (!PageUptodate(page))
  				SetPageUptodate(page);
f1e886601   Jaegeuk Kim   f2fs: expose f2fs...
55
56
57
58
59
60
  		} else {
  			ClearPageUptodate(page);
  			SetPageError(page);
  		}
  		unlock_page(page);
  	}
f1e886601   Jaegeuk Kim   f2fs: expose f2fs...
61
62
  	bio_put(bio);
  }
4246a0b63   Christoph Hellwig   block: add a bi_e...
63
  static void f2fs_write_end_io(struct bio *bio)
93dfe2ac5   Jaegeuk Kim   f2fs: refactor bi...
64
  {
1b1f559fc   Jaegeuk Kim   f2fs: remove the ...
65
  	struct f2fs_sb_info *sbi = bio->bi_private;
f568849ed   Linus Torvalds   Merge branch 'for...
66
67
  	struct bio_vec *bvec;
  	int i;
93dfe2ac5   Jaegeuk Kim   f2fs: refactor bi...
68

f568849ed   Linus Torvalds   Merge branch 'for...
69
  	bio_for_each_segment_all(bvec, bio, i) {
93dfe2ac5   Jaegeuk Kim   f2fs: refactor bi...
70
  		struct page *page = bvec->bv_page;
0b81d0779   Jaegeuk Kim   fs crypto: move p...
71
  		fscrypt_pullback_bio_page(&page, true);
4375a3366   Jaegeuk Kim   f2fs crypto: add ...
72

4246a0b63   Christoph Hellwig   block: add a bi_e...
73
  		if (unlikely(bio->bi_error)) {
5114a97a8   Michal Hocko   fs: use mapping_s...
74
  			mapping_set_error(page->mapping, -EIO);
38f91ca8c   Jaegeuk Kim   f2fs: flush pendi...
75
  			f2fs_stop_checkpoint(sbi, true);
93dfe2ac5   Jaegeuk Kim   f2fs: refactor bi...
76
77
  		}
  		end_page_writeback(page);
f568849ed   Linus Torvalds   Merge branch 'for...
78
  	}
f57301849   Jaegeuk Kim   f2fs: use bio cou...
79
80
  	if (atomic_dec_and_test(&sbi->nr_wb_bios) &&
  				wq_has_sleeper(&sbi->cp_wait))
93dfe2ac5   Jaegeuk Kim   f2fs: refactor bi...
81
82
83
84
  		wake_up(&sbi->cp_wait);
  
  	bio_put(bio);
  }
940a6d34b   Gu Zheng   f2fs: move all th...
85
86
87
88
89
90
91
  /*
   * Low-level block read/write IO operations.
   */
  static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
  				int npages, bool is_read)
  {
  	struct bio *bio;
740432f83   Jaegeuk Kim   f2fs: handle fail...
92
  	bio = f2fs_bio_alloc(npages);
940a6d34b   Gu Zheng   f2fs: move all th...
93
94
  
  	bio->bi_bdev = sbi->sb->s_bdev;
55cf9cb63   Chao Yu   f2fs: support lar...
95
  	bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
940a6d34b   Gu Zheng   f2fs: move all th...
96
  	bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
123770247   Chao Yu   f2fs: avoid dupli...
97
  	bio->bi_private = is_read ? NULL : sbi;
940a6d34b   Gu Zheng   f2fs: move all th...
98
99
100
  
  	return bio;
  }
4fc29c1aa   Linus Torvalds   Merge tag 'for-f2...
101
102
  static inline void __submit_bio(struct f2fs_sb_info *sbi,
  				struct bio *bio, enum page_type type)
f57301849   Jaegeuk Kim   f2fs: use bio cou...
103
  {
4fc29c1aa   Linus Torvalds   Merge tag 'for-f2...
104
  	if (!is_read_io(bio_op(bio))) {
f57301849   Jaegeuk Kim   f2fs: use bio cou...
105
  		atomic_inc(&sbi->nr_wb_bios);
52763a4b7   Jaegeuk Kim   f2fs: detect host...
106
107
  		if (f2fs_sb_mounted_hmsmr(sbi->sb) &&
  			current->plug && (type == DATA || type == NODE))
19a5f5e2e   Jaegeuk Kim   f2fs: drop any bl...
108
109
  			blk_finish_plug(current->plug);
  	}
4e49ea4a3   Mike Christie   block/fs/drivers:...
110
  	submit_bio(bio);
f57301849   Jaegeuk Kim   f2fs: use bio cou...
111
  }
458e6197c   Jaegeuk Kim   f2fs: refactor bi...
112
  static void __submit_merged_bio(struct f2fs_bio_info *io)
93dfe2ac5   Jaegeuk Kim   f2fs: refactor bi...
113
  {
458e6197c   Jaegeuk Kim   f2fs: refactor bi...
114
  	struct f2fs_io_info *fio = &io->fio;
93dfe2ac5   Jaegeuk Kim   f2fs: refactor bi...
115
116
117
  
  	if (!io->bio)
  		return;
04d328def   Mike Christie   f2fs: use bio op ...
118
  	if (is_read_io(fio->op))
2ace38e00   Chao Yu   f2fs: cleanup par...
119
  		trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio);
6a8f8ca58   Jaegeuk Kim   f2fs: avoid race ...
120
  	else
2ace38e00   Chao Yu   f2fs: cleanup par...
121
  		trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio);
940a6d34b   Gu Zheng   f2fs: move all th...
122

04d328def   Mike Christie   f2fs: use bio op ...
123
  	bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
4fc29c1aa   Linus Torvalds   Merge tag 'for-f2...
124
  	__submit_bio(io->sbi, io->bio, fio->type);
93dfe2ac5   Jaegeuk Kim   f2fs: refactor bi...
125
126
  	io->bio = NULL;
  }
0c3a57975   Chao Yu   f2fs: introduce f...
127
128
  static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode,
  						struct page *page, nid_t ino)
0fd785eb9   Chao Yu   f2fs: relocate is...
129
  {
0fd785eb9   Chao Yu   f2fs: relocate is...
130
131
132
  	struct bio_vec *bvec;
  	struct page *target;
  	int i;
0c3a57975   Chao Yu   f2fs: introduce f...
133
  	if (!io->bio)
0fd785eb9   Chao Yu   f2fs: relocate is...
134
  		return false;
0c3a57975   Chao Yu   f2fs: introduce f...
135
136
137
  
  	if (!inode && !page && !ino)
  		return true;
0fd785eb9   Chao Yu   f2fs: relocate is...
138
139
  
  	bio_for_each_segment_all(bvec, io->bio, i) {
0b81d0779   Jaegeuk Kim   fs crypto: move p...
140
  		if (bvec->bv_page->mapping)
0fd785eb9   Chao Yu   f2fs: relocate is...
141
  			target = bvec->bv_page;
0b81d0779   Jaegeuk Kim   fs crypto: move p...
142
143
  		else
  			target = fscrypt_control_page(bvec->bv_page);
0fd785eb9   Chao Yu   f2fs: relocate is...
144

0c3a57975   Chao Yu   f2fs: introduce f...
145
146
147
148
149
  		if (inode && inode == target->mapping->host)
  			return true;
  		if (page && page == target)
  			return true;
  		if (ino && ino == ino_of_node(target))
0fd785eb9   Chao Yu   f2fs: relocate is...
150
  			return true;
0fd785eb9   Chao Yu   f2fs: relocate is...
151
  	}
0fd785eb9   Chao Yu   f2fs: relocate is...
152
153
  	return false;
  }
0c3a57975   Chao Yu   f2fs: introduce f...
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
  static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
  						struct page *page, nid_t ino,
  						enum page_type type)
  {
  	enum page_type btype = PAGE_TYPE_OF_BIO(type);
  	struct f2fs_bio_info *io = &sbi->write_io[btype];
  	bool ret;
  
  	down_read(&io->io_rwsem);
  	ret = __has_merged_page(io, inode, page, ino);
  	up_read(&io->io_rwsem);
  	return ret;
  }
  
  static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
  				struct inode *inode, struct page *page,
  				nid_t ino, enum page_type type, int rw)
93dfe2ac5   Jaegeuk Kim   f2fs: refactor bi...
171
172
173
174
175
  {
  	enum page_type btype = PAGE_TYPE_OF_BIO(type);
  	struct f2fs_bio_info *io;
  
  	io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];
df0f8dc0e   Chao Yu   f2fs: avoid unnec...
176
  	down_write(&io->io_rwsem);
458e6197c   Jaegeuk Kim   f2fs: refactor bi...
177

0c3a57975   Chao Yu   f2fs: introduce f...
178
179
  	if (!__has_merged_page(io, inode, page, ino))
  		goto out;
458e6197c   Jaegeuk Kim   f2fs: refactor bi...
180
181
182
  	/* change META to META_FLUSH in the checkpoint procedure */
  	if (type >= META_FLUSH) {
  		io->fio.type = META_FLUSH;
04d328def   Mike Christie   f2fs: use bio op ...
183
  		io->fio.op = REQ_OP_WRITE;
0f7b2abd1   Jaegeuk Kim   f2fs: add nobarri...
184
  		if (test_opt(sbi, NOBARRIER))
04d328def   Mike Christie   f2fs: use bio op ...
185
  			io->fio.op_flags = WRITE_FLUSH | REQ_META | REQ_PRIO;
0f7b2abd1   Jaegeuk Kim   f2fs: add nobarri...
186
  		else
04d328def   Mike Christie   f2fs: use bio op ...
187
188
  			io->fio.op_flags = WRITE_FLUSH_FUA | REQ_META |
  								REQ_PRIO;
458e6197c   Jaegeuk Kim   f2fs: refactor bi...
189
190
  	}
  	__submit_merged_bio(io);
0c3a57975   Chao Yu   f2fs: introduce f...
191
  out:
df0f8dc0e   Chao Yu   f2fs: avoid unnec...
192
  	up_write(&io->io_rwsem);
93dfe2ac5   Jaegeuk Kim   f2fs: refactor bi...
193
  }
0c3a57975   Chao Yu   f2fs: introduce f...
194
195
196
197
198
199
200
201
202
203
204
205
206
  void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, enum page_type type,
  									int rw)
  {
  	__f2fs_submit_merged_bio(sbi, NULL, NULL, 0, type, rw);
  }
  
  void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *sbi,
  				struct inode *inode, struct page *page,
  				nid_t ino, enum page_type type, int rw)
  {
  	if (has_merged_page(sbi, inode, page, ino, type))
  		__f2fs_submit_merged_bio(sbi, inode, page, ino, type, rw);
  }
406657dd1   Chao Yu   f2fs: introduce f...
207
208
209
210
211
212
  void f2fs_flush_merged_bios(struct f2fs_sb_info *sbi)
  {
  	f2fs_submit_merged_bio(sbi, DATA, WRITE);
  	f2fs_submit_merged_bio(sbi, NODE, WRITE);
  	f2fs_submit_merged_bio(sbi, META, WRITE);
  }
93dfe2ac5   Jaegeuk Kim   f2fs: refactor bi...
213
214
215
216
  /*
   * Fill the locked page with data located in the block address.
   * Return unlocked page.
   */
05ca3632e   Jaegeuk Kim   f2fs: add sbi and...
217
  int f2fs_submit_page_bio(struct f2fs_io_info *fio)
93dfe2ac5   Jaegeuk Kim   f2fs: refactor bi...
218
  {
93dfe2ac5   Jaegeuk Kim   f2fs: refactor bi...
219
  	struct bio *bio;
0b81d0779   Jaegeuk Kim   fs crypto: move p...
220
221
  	struct page *page = fio->encrypted_page ?
  			fio->encrypted_page : fio->page;
93dfe2ac5   Jaegeuk Kim   f2fs: refactor bi...
222

2ace38e00   Chao Yu   f2fs: cleanup par...
223
  	trace_f2fs_submit_page_bio(page, fio);
05ca3632e   Jaegeuk Kim   f2fs: add sbi and...
224
  	f2fs_trace_ios(fio, 0);
93dfe2ac5   Jaegeuk Kim   f2fs: refactor bi...
225
226
  
  	/* Allocate a new bio */
04d328def   Mike Christie   f2fs: use bio op ...
227
  	bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->op));
93dfe2ac5   Jaegeuk Kim   f2fs: refactor bi...
228

09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
229
  	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
93dfe2ac5   Jaegeuk Kim   f2fs: refactor bi...
230
  		bio_put(bio);
93dfe2ac5   Jaegeuk Kim   f2fs: refactor bi...
231
232
  		return -EFAULT;
  	}
04d328def   Mike Christie   f2fs: use bio op ...
233
  	bio_set_op_attrs(bio, fio->op, fio->op_flags);
93dfe2ac5   Jaegeuk Kim   f2fs: refactor bi...
234

4fc29c1aa   Linus Torvalds   Merge tag 'for-f2...
235
  	__submit_bio(fio->sbi, bio, fio->type);
93dfe2ac5   Jaegeuk Kim   f2fs: refactor bi...
236
237
  	return 0;
  }
05ca3632e   Jaegeuk Kim   f2fs: add sbi and...
238
  void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
93dfe2ac5   Jaegeuk Kim   f2fs: refactor bi...
239
  {
05ca3632e   Jaegeuk Kim   f2fs: add sbi and...
240
  	struct f2fs_sb_info *sbi = fio->sbi;
458e6197c   Jaegeuk Kim   f2fs: refactor bi...
241
  	enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
93dfe2ac5   Jaegeuk Kim   f2fs: refactor bi...
242
  	struct f2fs_bio_info *io;
04d328def   Mike Christie   f2fs: use bio op ...
243
  	bool is_read = is_read_io(fio->op);
4375a3366   Jaegeuk Kim   f2fs crypto: add ...
244
  	struct page *bio_page;
93dfe2ac5   Jaegeuk Kim   f2fs: refactor bi...
245

940a6d34b   Gu Zheng   f2fs: move all th...
246
  	io = is_read ? &sbi->read_io : &sbi->write_io[btype];
93dfe2ac5   Jaegeuk Kim   f2fs: refactor bi...
247

7a9d75481   Chao Yu   f2fs: trace old b...
248
249
250
  	if (fio->old_blkaddr != NEW_ADDR)
  		verify_block_addr(sbi, fio->old_blkaddr);
  	verify_block_addr(sbi, fio->new_blkaddr);
93dfe2ac5   Jaegeuk Kim   f2fs: refactor bi...
251

df0f8dc0e   Chao Yu   f2fs: avoid unnec...
252
  	down_write(&io->io_rwsem);
93dfe2ac5   Jaegeuk Kim   f2fs: refactor bi...
253

7a9d75481   Chao Yu   f2fs: trace old b...
254
  	if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
04d328def   Mike Christie   f2fs: use bio op ...
255
  	    (io->fio.op != fio->op || io->fio.op_flags != fio->op_flags)))
458e6197c   Jaegeuk Kim   f2fs: refactor bi...
256
  		__submit_merged_bio(io);
93dfe2ac5   Jaegeuk Kim   f2fs: refactor bi...
257
258
  alloc_new:
  	if (io->bio == NULL) {
90a893c74   Jaegeuk Kim   f2fs: use MAX_BIO...
259
  		int bio_blocks = MAX_BIO_BLOCKS(sbi);
940a6d34b   Gu Zheng   f2fs: move all th...
260

7a9d75481   Chao Yu   f2fs: trace old b...
261
262
  		io->bio = __bio_alloc(sbi, fio->new_blkaddr,
  						bio_blocks, is_read);
458e6197c   Jaegeuk Kim   f2fs: refactor bi...
263
  		io->fio = *fio;
93dfe2ac5   Jaegeuk Kim   f2fs: refactor bi...
264
  	}
4375a3366   Jaegeuk Kim   f2fs crypto: add ...
265
  	bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
266
267
  	if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) <
  							PAGE_SIZE) {
458e6197c   Jaegeuk Kim   f2fs: refactor bi...
268
  		__submit_merged_bio(io);
93dfe2ac5   Jaegeuk Kim   f2fs: refactor bi...
269
270
  		goto alloc_new;
  	}
7a9d75481   Chao Yu   f2fs: trace old b...
271
  	io->last_block_in_bio = fio->new_blkaddr;
05ca3632e   Jaegeuk Kim   f2fs: add sbi and...
272
  	f2fs_trace_ios(fio, 0);
93dfe2ac5   Jaegeuk Kim   f2fs: refactor bi...
273

df0f8dc0e   Chao Yu   f2fs: avoid unnec...
274
  	up_write(&io->io_rwsem);
05ca3632e   Jaegeuk Kim   f2fs: add sbi and...
275
  	trace_f2fs_submit_page_mbio(fio->page, fio);
93dfe2ac5   Jaegeuk Kim   f2fs: refactor bi...
276
  }
46008c6d4   Chao Yu   f2fs: support in ...
277
278
279
280
281
282
283
284
285
  static void __set_data_blkaddr(struct dnode_of_data *dn)
  {
  	struct f2fs_node *rn = F2FS_NODE(dn->node_page);
  	__le32 *addr_array;
  
  	/* Get physical address of data block */
  	addr_array = blkaddr_in_node(rn);
  	addr_array[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
  }
93dfe2ac5   Jaegeuk Kim   f2fs: refactor bi...
286
  /*
eb47b8009   Jaegeuk Kim   f2fs: add address...
287
288
289
290
291
   * Lock ordering for the change of data block address:
   * ->data_page
   *  ->node_page
   *    update block addresses in the node page
   */
216a620a7   Chao Yu   f2fs: split set_d...
292
  void set_data_blkaddr(struct dnode_of_data *dn)
eb47b8009   Jaegeuk Kim   f2fs: add address...
293
  {
46008c6d4   Chao Yu   f2fs: support in ...
294
295
296
  	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
  	__set_data_blkaddr(dn);
  	if (set_page_dirty(dn->node_page))
12719ae14   Jaegeuk Kim   f2fs: avoid unnec...
297
  		dn->node_changed = true;
eb47b8009   Jaegeuk Kim   f2fs: add address...
298
  }
f28b3434a   Chao Yu   f2fs: introduce f...
299
300
301
302
303
304
  void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
  {
  	dn->data_blkaddr = blkaddr;
  	set_data_blkaddr(dn);
  	f2fs_update_extent_cache(dn);
  }
46008c6d4   Chao Yu   f2fs: support in ...
305
306
  /* dn->ofs_in_node will be returned with up-to-date last block pointer */
  int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
eb47b8009   Jaegeuk Kim   f2fs: add address...
307
  {
4081363fb   Jaegeuk Kim   f2fs: introduce F...
308
  	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
eb47b8009   Jaegeuk Kim   f2fs: add address...
309

46008c6d4   Chao Yu   f2fs: support in ...
310
311
  	if (!count)
  		return 0;
91942321e   Jaegeuk Kim   f2fs: use inode p...
312
  	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
eb47b8009   Jaegeuk Kim   f2fs: add address...
313
  		return -EPERM;
46008c6d4   Chao Yu   f2fs: support in ...
314
  	if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
eb47b8009   Jaegeuk Kim   f2fs: add address...
315
  		return -ENOSPC;
46008c6d4   Chao Yu   f2fs: support in ...
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
  	trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
  						dn->ofs_in_node, count);
  
  	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
  
  	for (; count > 0; dn->ofs_in_node++) {
  		block_t blkaddr =
  			datablock_addr(dn->node_page, dn->ofs_in_node);
  		if (blkaddr == NULL_ADDR) {
  			dn->data_blkaddr = NEW_ADDR;
  			__set_data_blkaddr(dn);
  			count--;
  		}
  	}
  
  	if (set_page_dirty(dn->node_page))
  		dn->node_changed = true;
eb47b8009   Jaegeuk Kim   f2fs: add address...
333
334
  	return 0;
  }
46008c6d4   Chao Yu   f2fs: support in ...
335
336
337
338
339
340
341
342
343
344
  /* Should keep dn->ofs_in_node unchanged */
  int reserve_new_block(struct dnode_of_data *dn)
  {
  	unsigned int ofs_in_node = dn->ofs_in_node;
  	int ret;
  
  	ret = reserve_new_blocks(dn, 1);
  	dn->ofs_in_node = ofs_in_node;
  	return ret;
  }
b600965c4   Huajun Li   f2fs: add a new f...
345
346
347
348
349
350
351
352
  int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
  {
  	bool need_put = dn->inode_page ? false : true;
  	int err;
  
  	err = get_dnode_of_data(dn, index, ALLOC_NODE);
  	if (err)
  		return err;
a8865372a   Jaegeuk Kim   f2fs: handle erro...
353

b600965c4   Huajun Li   f2fs: add a new f...
354
355
  	if (dn->data_blkaddr == NULL_ADDR)
  		err = reserve_new_block(dn);
a8865372a   Jaegeuk Kim   f2fs: handle erro...
356
  	if (err || need_put)
b600965c4   Huajun Li   f2fs: add a new f...
357
358
359
  		f2fs_put_dnode(dn);
  	return err;
  }
759af1c9c   Fan Li   f2fs: use extent ...
360
  int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
eb47b8009   Jaegeuk Kim   f2fs: add address...
361
  {
028a41e89   Chao Yu   f2fs: initialize ...
362
  	struct extent_info ei;
759af1c9c   Fan Li   f2fs: use extent ...
363
  	struct inode *inode = dn->inode;
028a41e89   Chao Yu   f2fs: initialize ...
364

759af1c9c   Fan Li   f2fs: use extent ...
365
366
367
  	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
  		dn->data_blkaddr = ei.blk + index - ei.fofs;
  		return 0;
429511cdf   Chao Yu   f2fs: add core fu...
368
  	}
028a41e89   Chao Yu   f2fs: initialize ...
369

759af1c9c   Fan Li   f2fs: use extent ...
370
  	return f2fs_reserve_block(dn, index);
eb47b8009   Jaegeuk Kim   f2fs: add address...
371
  }
a56c7c6fb   Jaegeuk Kim   f2fs: set GFP_NOF...
372
  struct page *get_read_data_page(struct inode *inode, pgoff_t index,
04d328def   Mike Christie   f2fs: use bio op ...
373
  						int op_flags, bool for_write)
eb47b8009   Jaegeuk Kim   f2fs: add address...
374
  {
eb47b8009   Jaegeuk Kim   f2fs: add address...
375
376
377
  	struct address_space *mapping = inode->i_mapping;
  	struct dnode_of_data dn;
  	struct page *page;
cb3bc9ee0   Chao Yu   f2fs: use extent ...
378
  	struct extent_info ei;
eb47b8009   Jaegeuk Kim   f2fs: add address...
379
  	int err;
cf04e8eb5   Jaegeuk Kim   f2fs: use f2fs_io...
380
  	struct f2fs_io_info fio = {
05ca3632e   Jaegeuk Kim   f2fs: add sbi and...
381
  		.sbi = F2FS_I_SB(inode),
cf04e8eb5   Jaegeuk Kim   f2fs: use f2fs_io...
382
  		.type = DATA,
04d328def   Mike Christie   f2fs: use bio op ...
383
384
  		.op = REQ_OP_READ,
  		.op_flags = op_flags,
4375a3366   Jaegeuk Kim   f2fs crypto: add ...
385
  		.encrypted_page = NULL,
cf04e8eb5   Jaegeuk Kim   f2fs: use f2fs_io...
386
  	};
eb47b8009   Jaegeuk Kim   f2fs: add address...
387

4375a3366   Jaegeuk Kim   f2fs crypto: add ...
388
389
  	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
  		return read_mapping_page(mapping, index, NULL);
a56c7c6fb   Jaegeuk Kim   f2fs: set GFP_NOF...
390
  	page = f2fs_grab_cache_page(mapping, index, for_write);
650495ded   Jaegeuk Kim   f2fs: fix the inc...
391
392
  	if (!page)
  		return ERR_PTR(-ENOMEM);
cb3bc9ee0   Chao Yu   f2fs: use extent ...
393
394
395
396
  	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
  		dn.data_blkaddr = ei.blk + index - ei.fofs;
  		goto got_it;
  	}
eb47b8009   Jaegeuk Kim   f2fs: add address...
397
  	set_new_dnode(&dn, inode, NULL, NULL, 0);
266e97a81   Jaegeuk Kim   f2fs: introduce r...
398
  	err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
86531d6b8   Jaegeuk Kim   f2fs: callers tak...
399
400
  	if (err)
  		goto put_err;
eb47b8009   Jaegeuk Kim   f2fs: add address...
401
  	f2fs_put_dnode(&dn);
6bacf52fb   Jaegeuk Kim   f2fs: add unlikel...
402
  	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
86531d6b8   Jaegeuk Kim   f2fs: callers tak...
403
404
  		err = -ENOENT;
  		goto put_err;
650495ded   Jaegeuk Kim   f2fs: fix the inc...
405
  	}
cb3bc9ee0   Chao Yu   f2fs: use extent ...
406
  got_it:
43f3eae1d   Jaegeuk Kim   f2fs: split find_...
407
408
  	if (PageUptodate(page)) {
  		unlock_page(page);
eb47b8009   Jaegeuk Kim   f2fs: add address...
409
  		return page;
43f3eae1d   Jaegeuk Kim   f2fs: split find_...
410
  	}
eb47b8009   Jaegeuk Kim   f2fs: add address...
411

d59ff4df7   Jaegeuk Kim   f2fs: fix wrong B...
412
413
414
415
416
417
418
  	/*
  	 * A new dentry page is allocated but not able to be written, since its
  	 * new inode page couldn't be allocated due to -ENOSPC.
  	 * In such the case, its blkaddr can be remained as NEW_ADDR.
  	 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
  	 */
  	if (dn.data_blkaddr == NEW_ADDR) {
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
419
  		zero_user_segment(page, 0, PAGE_SIZE);
237c0790e   Jaegeuk Kim   f2fs: call SetPag...
420
421
  		if (!PageUptodate(page))
  			SetPageUptodate(page);
43f3eae1d   Jaegeuk Kim   f2fs: split find_...
422
  		unlock_page(page);
d59ff4df7   Jaegeuk Kim   f2fs: fix wrong B...
423
424
  		return page;
  	}
eb47b8009   Jaegeuk Kim   f2fs: add address...
425

7a9d75481   Chao Yu   f2fs: trace old b...
426
  	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
05ca3632e   Jaegeuk Kim   f2fs: add sbi and...
427
428
  	fio.page = page;
  	err = f2fs_submit_page_bio(&fio);
393ff91f5   Jaegeuk Kim   f2fs: reduce unnc...
429
  	if (err)
86531d6b8   Jaegeuk Kim   f2fs: callers tak...
430
  		goto put_err;
43f3eae1d   Jaegeuk Kim   f2fs: split find_...
431
  	return page;
86531d6b8   Jaegeuk Kim   f2fs: callers tak...
432
433
434
435
  
  put_err:
  	f2fs_put_page(page, 1);
  	return ERR_PTR(err);
43f3eae1d   Jaegeuk Kim   f2fs: split find_...
436
437
438
439
440
441
442
443
444
445
446
  }
  
  struct page *find_data_page(struct inode *inode, pgoff_t index)
  {
  	struct address_space *mapping = inode->i_mapping;
  	struct page *page;
  
  	page = find_get_page(mapping, index);
  	if (page && PageUptodate(page))
  		return page;
  	f2fs_put_page(page, 0);
a56c7c6fb   Jaegeuk Kim   f2fs: set GFP_NOF...
447
  	page = get_read_data_page(inode, index, READ_SYNC, false);
43f3eae1d   Jaegeuk Kim   f2fs: split find_...
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
  	if (IS_ERR(page))
  		return page;
  
  	if (PageUptodate(page))
  		return page;
  
  	wait_on_page_locked(page);
  	if (unlikely(!PageUptodate(page))) {
  		f2fs_put_page(page, 0);
  		return ERR_PTR(-EIO);
  	}
  	return page;
  }
  
  /*
   * If it tries to access a hole, return an error.
   * Because, the callers, functions in dir.c and GC, should be able to know
   * whether this page exists or not.
   */
a56c7c6fb   Jaegeuk Kim   f2fs: set GFP_NOF...
467
468
  struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
  							bool for_write)
43f3eae1d   Jaegeuk Kim   f2fs: split find_...
469
470
471
472
  {
  	struct address_space *mapping = inode->i_mapping;
  	struct page *page;
  repeat:
a56c7c6fb   Jaegeuk Kim   f2fs: set GFP_NOF...
473
  	page = get_read_data_page(inode, index, READ_SYNC, for_write);
43f3eae1d   Jaegeuk Kim   f2fs: split find_...
474
475
  	if (IS_ERR(page))
  		return page;
393ff91f5   Jaegeuk Kim   f2fs: reduce unnc...
476

43f3eae1d   Jaegeuk Kim   f2fs: split find_...
477
  	/* wait for read completion */
393ff91f5   Jaegeuk Kim   f2fs: reduce unnc...
478
  	lock_page(page);
6bacf52fb   Jaegeuk Kim   f2fs: add unlikel...
479
  	if (unlikely(page->mapping != mapping)) {
afcb7ca01   Jaegeuk Kim   f2fs: check trunc...
480
481
  		f2fs_put_page(page, 1);
  		goto repeat;
eb47b8009   Jaegeuk Kim   f2fs: add address...
482
  	}
1563ac75e   Chao Yu   f2fs: fix to dete...
483
484
485
486
  	if (unlikely(!PageUptodate(page))) {
  		f2fs_put_page(page, 1);
  		return ERR_PTR(-EIO);
  	}
eb47b8009   Jaegeuk Kim   f2fs: add address...
487
488
  	return page;
  }
0a8165d7c   Jaegeuk Kim   f2fs: adjust kern...
489
  /*
eb47b8009   Jaegeuk Kim   f2fs: add address...
490
491
   * Caller ensures that this data page is never allocated.
   * A new zero-filled data page is allocated in the page cache.
399368372   Jaegeuk Kim   f2fs: introduce a...
492
   *
4f4124d0b   Chao Yu   f2fs: update seve...
493
494
   * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
   * f2fs_unlock_op().
470f00e96   Chao Yu   f2fs: fix to rele...
495
496
   * Note that, ipage is set only by make_empty_dir, and if any error occur,
   * ipage should be released by this function.
eb47b8009   Jaegeuk Kim   f2fs: add address...
497
   */
64aa7ed98   Jaegeuk Kim   f2fs: change get_...
498
  struct page *get_new_data_page(struct inode *inode,
a8865372a   Jaegeuk Kim   f2fs: handle erro...
499
  		struct page *ipage, pgoff_t index, bool new_i_size)
eb47b8009   Jaegeuk Kim   f2fs: add address...
500
  {
eb47b8009   Jaegeuk Kim   f2fs: add address...
501
502
503
504
  	struct address_space *mapping = inode->i_mapping;
  	struct page *page;
  	struct dnode_of_data dn;
  	int err;
7612118ae   Jaegeuk Kim   f2fs: check the p...
505

a56c7c6fb   Jaegeuk Kim   f2fs: set GFP_NOF...
506
  	page = f2fs_grab_cache_page(mapping, index, true);
470f00e96   Chao Yu   f2fs: fix to rele...
507
508
509
510
511
512
  	if (!page) {
  		/*
  		 * before exiting, we should make sure ipage will be released
  		 * if any error occur.
  		 */
  		f2fs_put_page(ipage, 1);
01f28610a   Jaegeuk Kim   f2fs: fix race on...
513
  		return ERR_PTR(-ENOMEM);
470f00e96   Chao Yu   f2fs: fix to rele...
514
  	}
eb47b8009   Jaegeuk Kim   f2fs: add address...
515

a8865372a   Jaegeuk Kim   f2fs: handle erro...
516
  	set_new_dnode(&dn, inode, ipage, NULL, 0);
b600965c4   Huajun Li   f2fs: add a new f...
517
  	err = f2fs_reserve_block(&dn, index);
01f28610a   Jaegeuk Kim   f2fs: fix race on...
518
519
  	if (err) {
  		f2fs_put_page(page, 1);
eb47b8009   Jaegeuk Kim   f2fs: add address...
520
  		return ERR_PTR(err);
a8865372a   Jaegeuk Kim   f2fs: handle erro...
521
  	}
01f28610a   Jaegeuk Kim   f2fs: fix race on...
522
523
  	if (!ipage)
  		f2fs_put_dnode(&dn);
eb47b8009   Jaegeuk Kim   f2fs: add address...
524
525
  
  	if (PageUptodate(page))
01f28610a   Jaegeuk Kim   f2fs: fix race on...
526
  		goto got_it;
eb47b8009   Jaegeuk Kim   f2fs: add address...
527
528
  
  	if (dn.data_blkaddr == NEW_ADDR) {
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
529
  		zero_user_segment(page, 0, PAGE_SIZE);
237c0790e   Jaegeuk Kim   f2fs: call SetPag...
530
531
  		if (!PageUptodate(page))
  			SetPageUptodate(page);
eb47b8009   Jaegeuk Kim   f2fs: add address...
532
  	} else {
4375a3366   Jaegeuk Kim   f2fs crypto: add ...
533
  		f2fs_put_page(page, 1);
a8865372a   Jaegeuk Kim   f2fs: handle erro...
534

7612118ae   Jaegeuk Kim   f2fs: check the p...
535
536
537
  		/* if ipage exists, blkaddr should be NEW_ADDR */
  		f2fs_bug_on(F2FS_I_SB(inode), ipage);
  		page = get_lock_data_page(inode, index, true);
4375a3366   Jaegeuk Kim   f2fs crypto: add ...
538
  		if (IS_ERR(page))
7612118ae   Jaegeuk Kim   f2fs: check the p...
539
  			return page;
eb47b8009   Jaegeuk Kim   f2fs: add address...
540
  	}
01f28610a   Jaegeuk Kim   f2fs: fix race on...
541
  got_it:
9edcdabf3   Chao Yu   f2fs: fix overflo...
542
  	if (new_i_size && i_size_read(inode) <
ee6d182f2   Jaegeuk Kim   f2fs: remove sync...
543
  				((loff_t)(index + 1) << PAGE_SHIFT))
fc9581c80   Jaegeuk Kim   f2fs: introduce f...
544
  		f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
eb47b8009   Jaegeuk Kim   f2fs: add address...
545
546
  	return page;
  }
bfad7c2d4   Jaegeuk Kim   f2fs: introduce a...
547
548
  static int __allocate_data_block(struct dnode_of_data *dn)
  {
4081363fb   Jaegeuk Kim   f2fs: introduce F...
549
  	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
bfad7c2d4   Jaegeuk Kim   f2fs: introduce a...
550
  	struct f2fs_summary sum;
bfad7c2d4   Jaegeuk Kim   f2fs: introduce a...
551
  	struct node_info ni;
38aa0889b   Jaegeuk Kim   f2fs: align direc...
552
  	int seg = CURSEG_WARM_DATA;
976e4c50a   Jaegeuk Kim   f2fs: update i_si...
553
  	pgoff_t fofs;
46008c6d4   Chao Yu   f2fs: support in ...
554
  	blkcnt_t count = 1;
bfad7c2d4   Jaegeuk Kim   f2fs: introduce a...
555

91942321e   Jaegeuk Kim   f2fs: use inode p...
556
  	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
bfad7c2d4   Jaegeuk Kim   f2fs: introduce a...
557
  		return -EPERM;
df6136ef5   Chao Yu   f2fs: preallocate...
558
559
560
561
  
  	dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
  	if (dn->data_blkaddr == NEW_ADDR)
  		goto alloc;
46008c6d4   Chao Yu   f2fs: support in ...
562
  	if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
bfad7c2d4   Jaegeuk Kim   f2fs: introduce a...
563
  		return -ENOSPC;
df6136ef5   Chao Yu   f2fs: preallocate...
564
  alloc:
bfad7c2d4   Jaegeuk Kim   f2fs: introduce a...
565
566
  	get_node_info(sbi, dn->nid, &ni);
  	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
38aa0889b   Jaegeuk Kim   f2fs: align direc...
567
568
  	if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page)
  		seg = CURSEG_DIRECT_IO;
df6136ef5   Chao Yu   f2fs: preallocate...
569
570
  	allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
  								&sum, seg);
216a620a7   Chao Yu   f2fs: split set_d...
571
  	set_data_blkaddr(dn);
bfad7c2d4   Jaegeuk Kim   f2fs: introduce a...
572

976e4c50a   Jaegeuk Kim   f2fs: update i_si...
573
  	/* update i_size */
81ca7350c   Chao Yu   f2fs: remove unne...
574
  	fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
976e4c50a   Jaegeuk Kim   f2fs: update i_si...
575
  							dn->ofs_in_node;
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
576
  	if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_SHIFT))
fc9581c80   Jaegeuk Kim   f2fs: introduce f...
577
  		f2fs_i_size_write(dn->inode,
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
578
  				((loff_t)(fofs + 1) << PAGE_SHIFT));
bfad7c2d4   Jaegeuk Kim   f2fs: introduce a...
579
580
  	return 0;
  }
b439b103a   Jaegeuk Kim   f2fs: move dio pr...
581
  ssize_t f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
59b802e5a   Jaegeuk Kim   f2fs: allocate da...
582
  {
b439b103a   Jaegeuk Kim   f2fs: move dio pr...
583
  	struct inode *inode = file_inode(iocb->ki_filp);
5b8db7fad   Chao Yu   f2fs: simplify __...
584
  	struct f2fs_map_blocks map;
b439b103a   Jaegeuk Kim   f2fs: move dio pr...
585
  	ssize_t ret = 0;
59b802e5a   Jaegeuk Kim   f2fs: allocate da...
586

0080c5076   Jaegeuk Kim   f2fs: do not prea...
587
  	map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
dfd02e4de   Chao Yu   f2fs: fix to prea...
588
589
590
591
592
  	map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
  	if (map.m_len > map.m_lblk)
  		map.m_len -= map.m_lblk;
  	else
  		map.m_len = 0;
da85985c6   Chao Yu   f2fs: speed up ha...
593
  	map.m_next_pgofs = NULL;
2a3407607   Jaegeuk Kim   f2fs: call f2fs_b...
594

24b849125   Jaegeuk Kim   f2fs: preallocate...
595
596
597
598
599
600
601
  	if (iocb->ki_flags & IOCB_DIRECT) {
  		ret = f2fs_convert_inline_inode(inode);
  		if (ret)
  			return ret;
  		return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
  	}
  	if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA) {
b439b103a   Jaegeuk Kim   f2fs: move dio pr...
602
603
604
  		ret = f2fs_convert_inline_inode(inode);
  		if (ret)
  			return ret;
b439b103a   Jaegeuk Kim   f2fs: move dio pr...
605
  	}
24b849125   Jaegeuk Kim   f2fs: preallocate...
606
607
  	if (!f2fs_has_inline_data(inode))
  		return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
b439b103a   Jaegeuk Kim   f2fs: move dio pr...
608
  	return ret;
59b802e5a   Jaegeuk Kim   f2fs: allocate da...
609
  }
0a8165d7c   Jaegeuk Kim   f2fs: adjust kern...
610
  /*
003a3e1d6   Jaegeuk Kim   f2fs: add f2fs_ma...
611
612
   * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
   * f2fs_map_blocks structure.
4f4124d0b   Chao Yu   f2fs: update seve...
613
614
615
616
617
   * If original data blocks are allocated, then give them to blockdev.
   * Otherwise,
   *     a. preallocate requested block addresses
   *     b. do not use extent cache for better performance
   *     c. give the block addresses to blockdev
eb47b8009   Jaegeuk Kim   f2fs: add address...
618
   */
d323d005a   Chao Yu   f2fs: support fil...
619
  int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
e2b4e2bc8   Chao Yu   f2fs: fix incorre...
620
  						int create, int flag)
eb47b8009   Jaegeuk Kim   f2fs: add address...
621
  {
003a3e1d6   Jaegeuk Kim   f2fs: add f2fs_ma...
622
  	unsigned int maxblocks = map->m_len;
eb47b8009   Jaegeuk Kim   f2fs: add address...
623
  	struct dnode_of_data dn;
f9811703f   Chao Yu   f2fs: fix to hand...
624
  	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
ac6f19998   Jaegeuk Kim   f2fs: avoid laten...
625
  	int mode = create ? ALLOC_NODE : LOOKUP_NODE;
46008c6d4   Chao Yu   f2fs: support in ...
626
  	pgoff_t pgofs, end_offset, end;
bfad7c2d4   Jaegeuk Kim   f2fs: introduce a...
627
  	int err = 0, ofs = 1;
46008c6d4   Chao Yu   f2fs: support in ...
628
629
  	unsigned int ofs_in_node, last_ofs_in_node;
  	blkcnt_t prealloc;
a2e7d1bfe   Chao Yu   f2fs: introduce f...
630
  	struct extent_info ei;
bfad7c2d4   Jaegeuk Kim   f2fs: introduce a...
631
  	bool allocated = false;
7df3a4318   Fan Li   f2fs: optimize th...
632
  	block_t blkaddr;
eb47b8009   Jaegeuk Kim   f2fs: add address...
633

dfd02e4de   Chao Yu   f2fs: fix to prea...
634
635
  	if (!maxblocks)
  		return 0;
003a3e1d6   Jaegeuk Kim   f2fs: add f2fs_ma...
636
637
638
639
640
  	map->m_len = 0;
  	map->m_flags = 0;
  
  	/* it only supports block size == page size */
  	pgofs =	(pgoff_t)map->m_lblk;
46008c6d4   Chao Yu   f2fs: support in ...
641
  	end = pgofs + maxblocks;
eb47b8009   Jaegeuk Kim   f2fs: add address...
642

24b849125   Jaegeuk Kim   f2fs: preallocate...
643
  	if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
003a3e1d6   Jaegeuk Kim   f2fs: add f2fs_ma...
644
645
646
  		map->m_pblk = ei.blk + pgofs - ei.fofs;
  		map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
  		map->m_flags = F2FS_MAP_MAPPED;
bfad7c2d4   Jaegeuk Kim   f2fs: introduce a...
647
  		goto out;
a2e7d1bfe   Chao Yu   f2fs: introduce f...
648
  	}
bfad7c2d4   Jaegeuk Kim   f2fs: introduce a...
649

4fe71e88b   Chao Yu   f2fs: simplify f2...
650
  next_dnode:
59b802e5a   Jaegeuk Kim   f2fs: allocate da...
651
  	if (create)
3104af35e   Chao Yu   f2fs: reduce cove...
652
  		f2fs_lock_op(sbi);
eb47b8009   Jaegeuk Kim   f2fs: add address...
653
654
655
  
  	/* When reading holes, we need its node page */
  	set_new_dnode(&dn, inode, NULL, NULL, 0);
bfad7c2d4   Jaegeuk Kim   f2fs: introduce a...
656
  	err = get_dnode_of_data(&dn, pgofs, mode);
1ec79083b   Jaegeuk Kim   f2fs: should put ...
657
  	if (err) {
43473f964   Chao Yu   f2fs: fix incorre...
658
659
  		if (flag == F2FS_GET_BLOCK_BMAP)
  			map->m_pblk = 0;
da85985c6   Chao Yu   f2fs: speed up ha...
660
  		if (err == -ENOENT) {
bfad7c2d4   Jaegeuk Kim   f2fs: introduce a...
661
  			err = 0;
da85985c6   Chao Yu   f2fs: speed up ha...
662
663
664
665
  			if (map->m_next_pgofs)
  				*map->m_next_pgofs =
  					get_next_page_offset(&dn, pgofs);
  		}
bfad7c2d4   Jaegeuk Kim   f2fs: introduce a...
666
  		goto unlock_out;
848753aa3   Namjae Jeon   f2fs: add tracepo...
667
  	}
973163fc0   Chao Yu   f2fs: reorganize ...
668

46008c6d4   Chao Yu   f2fs: support in ...
669
  	prealloc = 0;
a299abd23   Arnd Bergmann   f2fs: hide a mayb...
670
  	last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
81ca7350c   Chao Yu   f2fs: remove unne...
671
  	end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
4fe71e88b   Chao Yu   f2fs: simplify f2...
672
673
674
675
676
  
  next_block:
  	blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
  
  	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
973163fc0   Chao Yu   f2fs: reorganize ...
677
  		if (create) {
f9811703f   Chao Yu   f2fs: fix to hand...
678
679
  			if (unlikely(f2fs_cp_error(sbi))) {
  				err = -EIO;
4fe71e88b   Chao Yu   f2fs: simplify f2...
680
  				goto sync_out;
f9811703f   Chao Yu   f2fs: fix to hand...
681
  			}
24b849125   Jaegeuk Kim   f2fs: preallocate...
682
  			if (flag == F2FS_GET_BLOCK_PRE_AIO) {
46008c6d4   Chao Yu   f2fs: support in ...
683
684
685
686
  				if (blkaddr == NULL_ADDR) {
  					prealloc++;
  					last_ofs_in_node = dn.ofs_in_node;
  				}
24b849125   Jaegeuk Kim   f2fs: preallocate...
687
688
  			} else {
  				err = __allocate_data_block(&dn);
46008c6d4   Chao Yu   f2fs: support in ...
689
  				if (!err) {
91942321e   Jaegeuk Kim   f2fs: use inode p...
690
  					set_inode_flag(inode, FI_APPEND_WRITE);
46008c6d4   Chao Yu   f2fs: support in ...
691
692
  					allocated = true;
  				}
24b849125   Jaegeuk Kim   f2fs: preallocate...
693
  			}
973163fc0   Chao Yu   f2fs: reorganize ...
694
  			if (err)
4fe71e88b   Chao Yu   f2fs: simplify f2...
695
  				goto sync_out;
973163fc0   Chao Yu   f2fs: reorganize ...
696
  			map->m_flags = F2FS_MAP_NEW;
4fe71e88b   Chao Yu   f2fs: simplify f2...
697
  			blkaddr = dn.data_blkaddr;
973163fc0   Chao Yu   f2fs: reorganize ...
698
  		} else {
43473f964   Chao Yu   f2fs: fix incorre...
699
700
701
702
  			if (flag == F2FS_GET_BLOCK_BMAP) {
  				map->m_pblk = 0;
  				goto sync_out;
  			}
da85985c6   Chao Yu   f2fs: speed up ha...
703
704
705
706
707
  			if (flag == F2FS_GET_BLOCK_FIEMAP &&
  						blkaddr == NULL_ADDR) {
  				if (map->m_next_pgofs)
  					*map->m_next_pgofs = pgofs + 1;
  			}
973163fc0   Chao Yu   f2fs: reorganize ...
708
  			if (flag != F2FS_GET_BLOCK_FIEMAP ||
43473f964   Chao Yu   f2fs: fix incorre...
709
  						blkaddr != NEW_ADDR)
4fe71e88b   Chao Yu   f2fs: simplify f2...
710
  				goto sync_out;
e2b4e2bc8   Chao Yu   f2fs: fix incorre...
711
  		}
e2b4e2bc8   Chao Yu   f2fs: fix incorre...
712
  	}
eb47b8009   Jaegeuk Kim   f2fs: add address...
713

46008c6d4   Chao Yu   f2fs: support in ...
714
715
  	if (flag == F2FS_GET_BLOCK_PRE_AIO)
  		goto skip;
4fe71e88b   Chao Yu   f2fs: simplify f2...
716
717
718
719
720
721
722
723
724
725
  	if (map->m_len == 0) {
  		/* preallocated unwritten block should be mapped for fiemap. */
  		if (blkaddr == NEW_ADDR)
  			map->m_flags |= F2FS_MAP_UNWRITTEN;
  		map->m_flags |= F2FS_MAP_MAPPED;
  
  		map->m_pblk = blkaddr;
  		map->m_len = 1;
  	} else if ((map->m_pblk != NEW_ADDR &&
  			blkaddr == (map->m_pblk + ofs)) ||
b439b103a   Jaegeuk Kim   f2fs: move dio pr...
726
  			(map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
46008c6d4   Chao Yu   f2fs: support in ...
727
  			flag == F2FS_GET_BLOCK_PRE_DIO) {
4fe71e88b   Chao Yu   f2fs: simplify f2...
728
729
730
731
732
  		ofs++;
  		map->m_len++;
  	} else {
  		goto sync_out;
  	}
bfad7c2d4   Jaegeuk Kim   f2fs: introduce a...
733

46008c6d4   Chao Yu   f2fs: support in ...
734
  skip:
bfad7c2d4   Jaegeuk Kim   f2fs: introduce a...
735
736
  	dn.ofs_in_node++;
  	pgofs++;
46008c6d4   Chao Yu   f2fs: support in ...
737
738
739
  	/* preallocate blocks in batch for one dnode page */
  	if (flag == F2FS_GET_BLOCK_PRE_AIO &&
  			(pgofs == end || dn.ofs_in_node == end_offset)) {
7df3a4318   Fan Li   f2fs: optimize th...
740

46008c6d4   Chao Yu   f2fs: support in ...
741
742
743
744
  		dn.ofs_in_node = ofs_in_node;
  		err = reserve_new_blocks(&dn, prealloc);
  		if (err)
  			goto sync_out;
58383befc   Chao Yu   f2fs: fix to do f...
745
  		allocated = dn.node_changed;
bfad7c2d4   Jaegeuk Kim   f2fs: introduce a...
746

46008c6d4   Chao Yu   f2fs: support in ...
747
748
749
750
  		map->m_len += dn.ofs_in_node - ofs_in_node;
  		if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
  			err = -ENOSPC;
  			goto sync_out;
3104af35e   Chao Yu   f2fs: reduce cove...
751
  		}
46008c6d4   Chao Yu   f2fs: support in ...
752
753
754
755
756
757
758
  		dn.ofs_in_node = end_offset;
  	}
  
  	if (pgofs >= end)
  		goto sync_out;
  	else if (dn.ofs_in_node < end_offset)
  		goto next_block;
46008c6d4   Chao Yu   f2fs: support in ...
759
760
761
762
763
  	f2fs_put_dnode(&dn);
  
  	if (create) {
  		f2fs_unlock_op(sbi);
  		f2fs_balance_fs(sbi, allocated);
eb47b8009   Jaegeuk Kim   f2fs: add address...
764
  	}
46008c6d4   Chao Yu   f2fs: support in ...
765
766
  	allocated = false;
  	goto next_dnode;
7df3a4318   Fan Li   f2fs: optimize th...
767

bfad7c2d4   Jaegeuk Kim   f2fs: introduce a...
768
  sync_out:
eb47b8009   Jaegeuk Kim   f2fs: add address...
769
  	f2fs_put_dnode(&dn);
bfad7c2d4   Jaegeuk Kim   f2fs: introduce a...
770
  unlock_out:
2a3407607   Jaegeuk Kim   f2fs: call f2fs_b...
771
  	if (create) {
3104af35e   Chao Yu   f2fs: reduce cove...
772
  		f2fs_unlock_op(sbi);
3c082b7b5   Jaegeuk Kim   f2fs: do f2fs_bal...
773
  		f2fs_balance_fs(sbi, allocated);
2a3407607   Jaegeuk Kim   f2fs: call f2fs_b...
774
  	}
bfad7c2d4   Jaegeuk Kim   f2fs: introduce a...
775
  out:
003a3e1d6   Jaegeuk Kim   f2fs: add f2fs_ma...
776
  	trace_f2fs_map_blocks(inode, map, err);
bfad7c2d4   Jaegeuk Kim   f2fs: introduce a...
777
  	return err;
eb47b8009   Jaegeuk Kim   f2fs: add address...
778
  }
003a3e1d6   Jaegeuk Kim   f2fs: add f2fs_ma...
779
  static int __get_data_block(struct inode *inode, sector_t iblock,
da85985c6   Chao Yu   f2fs: speed up ha...
780
781
  			struct buffer_head *bh, int create, int flag,
  			pgoff_t *next_pgofs)
003a3e1d6   Jaegeuk Kim   f2fs: add f2fs_ma...
782
783
784
785
786
787
  {
  	struct f2fs_map_blocks map;
  	int ret;
  
  	map.m_lblk = iblock;
  	map.m_len = bh->b_size >> inode->i_blkbits;
da85985c6   Chao Yu   f2fs: speed up ha...
788
  	map.m_next_pgofs = next_pgofs;
003a3e1d6   Jaegeuk Kim   f2fs: add f2fs_ma...
789

e2b4e2bc8   Chao Yu   f2fs: fix incorre...
790
  	ret = f2fs_map_blocks(inode, &map, create, flag);
003a3e1d6   Jaegeuk Kim   f2fs: add f2fs_ma...
791
792
793
794
795
796
797
  	if (!ret) {
  		map_bh(bh, inode->i_sb, map.m_pblk);
  		bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
  		bh->b_size = map.m_len << inode->i_blkbits;
  	}
  	return ret;
  }
ccfb30001   Jaegeuk Kim   f2fs: fix to repo...
798
  static int get_data_block(struct inode *inode, sector_t iblock,
da85985c6   Chao Yu   f2fs: speed up ha...
799
800
  			struct buffer_head *bh_result, int create, int flag,
  			pgoff_t *next_pgofs)
e2b4e2bc8   Chao Yu   f2fs: fix incorre...
801
  {
da85985c6   Chao Yu   f2fs: speed up ha...
802
803
  	return __get_data_block(inode, iblock, bh_result, create,
  							flag, next_pgofs);
e2b4e2bc8   Chao Yu   f2fs: fix incorre...
804
805
806
  }
  
  static int get_data_block_dio(struct inode *inode, sector_t iblock,
ccfb30001   Jaegeuk Kim   f2fs: fix to repo...
807
808
  			struct buffer_head *bh_result, int create)
  {
e2b4e2bc8   Chao Yu   f2fs: fix incorre...
809
  	return __get_data_block(inode, iblock, bh_result, create,
da85985c6   Chao Yu   f2fs: speed up ha...
810
  						F2FS_GET_BLOCK_DIO, NULL);
ccfb30001   Jaegeuk Kim   f2fs: fix to repo...
811
  }
e2b4e2bc8   Chao Yu   f2fs: fix incorre...
812
  static int get_data_block_bmap(struct inode *inode, sector_t iblock,
ccfb30001   Jaegeuk Kim   f2fs: fix to repo...
813
814
  			struct buffer_head *bh_result, int create)
  {
179448bfe   Yunlei He   f2fs: add a max b...
815
  	/* Block number less than F2FS MAX BLOCKS */
e0afc4d6d   Chao Yu   f2fs: introduce m...
816
  	if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks))
179448bfe   Yunlei He   f2fs: add a max b...
817
  		return -EFBIG;
e2b4e2bc8   Chao Yu   f2fs: fix incorre...
818
  	return __get_data_block(inode, iblock, bh_result, create,
da85985c6   Chao Yu   f2fs: speed up ha...
819
  						F2FS_GET_BLOCK_BMAP, NULL);
ccfb30001   Jaegeuk Kim   f2fs: fix to repo...
820
  }
7f63eb77a   Jaegeuk Kim   f2fs: report unwr...
821
822
823
824
825
826
827
828
829
  static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
  {
  	return (offset >> inode->i_blkbits);
  }
  
  static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
  {
  	return (blk << inode->i_blkbits);
  }
9ab701349   Jaegeuk Kim   f2fs: support f2f...
830
831
832
  int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
  		u64 start, u64 len)
  {
7f63eb77a   Jaegeuk Kim   f2fs: report unwr...
833
834
  	struct buffer_head map_bh;
  	sector_t start_blk, last_blk;
da85985c6   Chao Yu   f2fs: speed up ha...
835
  	pgoff_t next_pgofs;
de1475cc5   Fan Li   f2fs: read isize ...
836
  	loff_t isize;
7f63eb77a   Jaegeuk Kim   f2fs: report unwr...
837
838
  	u64 logical = 0, phys = 0, size = 0;
  	u32 flags = 0;
7f63eb77a   Jaegeuk Kim   f2fs: report unwr...
839
840
841
842
843
  	int ret = 0;
  
  	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
  	if (ret)
  		return ret;
67f8cf3ce   Jaegeuk Kim   f2fs: support fie...
844
845
846
847
848
  	if (f2fs_has_inline_data(inode)) {
  		ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
  		if (ret != -EAGAIN)
  			return ret;
  	}
5955102c9   Al Viro   wrappers for ->i_...
849
  	inode_lock(inode);
de1475cc5   Fan Li   f2fs: read isize ...
850
851
  
  	isize = i_size_read(inode);
9a950d52b   Fan Li   f2fs: fix bugs an...
852
853
  	if (start >= isize)
  		goto out;
7f63eb77a   Jaegeuk Kim   f2fs: report unwr...
854

9a950d52b   Fan Li   f2fs: fix bugs an...
855
856
  	if (start + len > isize)
  		len = isize - start;
7f63eb77a   Jaegeuk Kim   f2fs: report unwr...
857
858
859
860
861
862
  
  	if (logical_to_blk(inode, len) == 0)
  		len = blk_to_logical(inode, 1);
  
  	start_blk = logical_to_blk(inode, start);
  	last_blk = logical_to_blk(inode, start + len - 1);
9a950d52b   Fan Li   f2fs: fix bugs an...
863

7f63eb77a   Jaegeuk Kim   f2fs: report unwr...
864
865
866
  next:
  	memset(&map_bh, 0, sizeof(struct buffer_head));
  	map_bh.b_size = len;
e2b4e2bc8   Chao Yu   f2fs: fix incorre...
867
  	ret = get_data_block(inode, start_blk, &map_bh, 0,
da85985c6   Chao Yu   f2fs: speed up ha...
868
  					F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
7f63eb77a   Jaegeuk Kim   f2fs: report unwr...
869
870
871
872
873
  	if (ret)
  		goto out;
  
  	/* HOLE */
  	if (!buffer_mapped(&map_bh)) {
da85985c6   Chao Yu   f2fs: speed up ha...
874
  		start_blk = next_pgofs;
9a950d52b   Fan Li   f2fs: fix bugs an...
875
  		/* Go through holes util pass the EOF */
da85985c6   Chao Yu   f2fs: speed up ha...
876
  		if (blk_to_logical(inode, start_blk) < isize)
9a950d52b   Fan Li   f2fs: fix bugs an...
877
878
879
880
881
882
883
  			goto prep_next;
  		/* Found a hole beyond isize means no more extents.
  		 * Note that the premise is that filesystems don't
  		 * punch holes beyond isize and keep size unchanged.
  		 */
  		flags |= FIEMAP_EXTENT_LAST;
  	}
7f63eb77a   Jaegeuk Kim   f2fs: report unwr...
884

da5af127a   Chao Yu   f2fs: recognize e...
885
886
887
  	if (size) {
  		if (f2fs_encrypted_inode(inode))
  			flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
9a950d52b   Fan Li   f2fs: fix bugs an...
888
889
  		ret = fiemap_fill_next_extent(fieinfo, logical,
  				phys, size, flags);
da5af127a   Chao Yu   f2fs: recognize e...
890
  	}
7f63eb77a   Jaegeuk Kim   f2fs: report unwr...
891

9a950d52b   Fan Li   f2fs: fix bugs an...
892
893
  	if (start_blk > last_blk || ret)
  		goto out;
7f63eb77a   Jaegeuk Kim   f2fs: report unwr...
894

9a950d52b   Fan Li   f2fs: fix bugs an...
895
896
897
898
899
900
  	logical = blk_to_logical(inode, start_blk);
  	phys = blk_to_logical(inode, map_bh.b_blocknr);
  	size = map_bh.b_size;
  	flags = 0;
  	if (buffer_unwritten(&map_bh))
  		flags = FIEMAP_EXTENT_UNWRITTEN;
7f63eb77a   Jaegeuk Kim   f2fs: report unwr...
901

9a950d52b   Fan Li   f2fs: fix bugs an...
902
  	start_blk += logical_to_blk(inode, size);
7f63eb77a   Jaegeuk Kim   f2fs: report unwr...
903

9a950d52b   Fan Li   f2fs: fix bugs an...
904
  prep_next:
7f63eb77a   Jaegeuk Kim   f2fs: report unwr...
905
906
907
908
909
910
911
912
  	cond_resched();
  	if (fatal_signal_pending(current))
  		ret = -EINTR;
  	else
  		goto next;
  out:
  	if (ret == 1)
  		ret = 0;
5955102c9   Al Viro   wrappers for ->i_...
913
  	inode_unlock(inode);
7f63eb77a   Jaegeuk Kim   f2fs: report unwr...
914
  	return ret;
9ab701349   Jaegeuk Kim   f2fs: support f2f...
915
  }
6a7a3aedd   Wei Yongjun   f2fs: fix non sta...
916
917
  static struct bio *f2fs_grab_bio(struct inode *inode, block_t blkaddr,
  				 unsigned nr_pages)
78682f794   Chao Yu   f2fs: fix to avoi...
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
  {
  	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  	struct fscrypt_ctx *ctx = NULL;
  	struct block_device *bdev = sbi->sb->s_bdev;
  	struct bio *bio;
  
  	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
  		ctx = fscrypt_get_ctx(inode, GFP_NOFS);
  		if (IS_ERR(ctx))
  			return ERR_CAST(ctx);
  
  		/* wait the page to be moved by cleaning */
  		f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
  	}
  
  	bio = bio_alloc(GFP_KERNEL, min_t(int, nr_pages, BIO_MAX_PAGES));
  	if (!bio) {
  		if (ctx)
  			fscrypt_release_ctx(ctx);
  		return ERR_PTR(-ENOMEM);
  	}
  	bio->bi_bdev = bdev;
  	bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blkaddr);
  	bio->bi_end_io = f2fs_read_end_io;
  	bio->bi_private = ctx;
  
  	return bio;
  }
f1e886601   Jaegeuk Kim   f2fs: expose f2fs...
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
  /*
   * This function was originally taken from fs/mpage.c, and customized for f2fs.
   * Major change was from block_size == page_size in f2fs by default.
   */
  static int f2fs_mpage_readpages(struct address_space *mapping,
  			struct list_head *pages, struct page *page,
  			unsigned nr_pages)
  {
  	struct bio *bio = NULL;
  	unsigned page_idx;
  	sector_t last_block_in_bio = 0;
  	struct inode *inode = mapping->host;
  	const unsigned blkbits = inode->i_blkbits;
  	const unsigned blocksize = 1 << blkbits;
  	sector_t block_in_file;
  	sector_t last_block;
  	sector_t last_block_in_file;
  	sector_t block_nr;
f1e886601   Jaegeuk Kim   f2fs: expose f2fs...
964
965
966
967
968
969
  	struct f2fs_map_blocks map;
  
  	map.m_pblk = 0;
  	map.m_lblk = 0;
  	map.m_len = 0;
  	map.m_flags = 0;
da85985c6   Chao Yu   f2fs: speed up ha...
970
  	map.m_next_pgofs = NULL;
f1e886601   Jaegeuk Kim   f2fs: expose f2fs...
971
972
973
974
975
976
977
978
  
  	for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {
  
  		prefetchw(&page->flags);
  		if (pages) {
  			page = list_entry(pages->prev, struct page, lru);
  			list_del(&page->lru);
  			if (add_to_page_cache_lru(page, mapping,
8a5c743e3   Michal Hocko   mm, memcg: use co...
979
980
  						  page->index,
  						  readahead_gfp_mask(mapping)))
f1e886601   Jaegeuk Kim   f2fs: expose f2fs...
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
  				goto next_page;
  		}
  
  		block_in_file = (sector_t)page->index;
  		last_block = block_in_file + nr_pages;
  		last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
  								blkbits;
  		if (last_block > last_block_in_file)
  			last_block = last_block_in_file;
  
  		/*
  		 * Map blocks using the previous result first.
  		 */
  		if ((map.m_flags & F2FS_MAP_MAPPED) &&
  				block_in_file > map.m_lblk &&
  				block_in_file < (map.m_lblk + map.m_len))
  			goto got_it;
  
  		/*
  		 * Then do more f2fs_map_blocks() calls until we are
  		 * done with this page.
  		 */
  		map.m_flags = 0;
  
  		if (block_in_file < last_block) {
  			map.m_lblk = block_in_file;
  			map.m_len = last_block - block_in_file;
46c9e1413   Chao Yu   f2fs: use correct...
1008
  			if (f2fs_map_blocks(inode, &map, 0,
da85985c6   Chao Yu   f2fs: speed up ha...
1009
  						F2FS_GET_BLOCK_READ))
f1e886601   Jaegeuk Kim   f2fs: expose f2fs...
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
  				goto set_error_page;
  		}
  got_it:
  		if ((map.m_flags & F2FS_MAP_MAPPED)) {
  			block_nr = map.m_pblk + block_in_file - map.m_lblk;
  			SetPageMappedToDisk(page);
  
  			if (!PageUptodate(page) && !cleancache_get_page(page)) {
  				SetPageUptodate(page);
  				goto confused;
  			}
  		} else {
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
1022
  			zero_user_segment(page, 0, PAGE_SIZE);
237c0790e   Jaegeuk Kim   f2fs: call SetPag...
1023
1024
  			if (!PageUptodate(page))
  				SetPageUptodate(page);
f1e886601   Jaegeuk Kim   f2fs: expose f2fs...
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
  			unlock_page(page);
  			goto next_page;
  		}
  
  		/*
  		 * This page will go to BIO.  Do we need to send this
  		 * BIO off first?
  		 */
  		if (bio && (last_block_in_bio != block_nr - 1)) {
  submit_and_realloc:
4fc29c1aa   Linus Torvalds   Merge tag 'for-f2...
1035
  			__submit_bio(F2FS_I_SB(inode), bio, DATA);
f1e886601   Jaegeuk Kim   f2fs: expose f2fs...
1036
1037
1038
  			bio = NULL;
  		}
  		if (bio == NULL) {
78682f794   Chao Yu   f2fs: fix to avoi...
1039
  			bio = f2fs_grab_bio(inode, block_nr, nr_pages);
1d353eb7e   Jaegeuk Kim   f2fs: fix ERR_PTR...
1040
1041
  			if (IS_ERR(bio)) {
  				bio = NULL;
f1e886601   Jaegeuk Kim   f2fs: expose f2fs...
1042
  				goto set_error_page;
4375a3366   Jaegeuk Kim   f2fs crypto: add ...
1043
  			}
04d328def   Mike Christie   f2fs: use bio op ...
1044
  			bio_set_op_attrs(bio, REQ_OP_READ, 0);
f1e886601   Jaegeuk Kim   f2fs: expose f2fs...
1045
1046
1047
1048
1049
1050
1051
1052
1053
  		}
  
  		if (bio_add_page(bio, page, blocksize, 0) < blocksize)
  			goto submit_and_realloc;
  
  		last_block_in_bio = block_nr;
  		goto next_page;
  set_error_page:
  		SetPageError(page);
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
1054
  		zero_user_segment(page, 0, PAGE_SIZE);
f1e886601   Jaegeuk Kim   f2fs: expose f2fs...
1055
1056
1057
1058
  		unlock_page(page);
  		goto next_page;
  confused:
  		if (bio) {
4fc29c1aa   Linus Torvalds   Merge tag 'for-f2...
1059
  			__submit_bio(F2FS_I_SB(inode), bio, DATA);
f1e886601   Jaegeuk Kim   f2fs: expose f2fs...
1060
1061
1062
1063
1064
  			bio = NULL;
  		}
  		unlock_page(page);
  next_page:
  		if (pages)
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
1065
  			put_page(page);
f1e886601   Jaegeuk Kim   f2fs: expose f2fs...
1066
1067
1068
  	}
  	BUG_ON(pages && !list_empty(pages));
  	if (bio)
4fc29c1aa   Linus Torvalds   Merge tag 'for-f2...
1069
  		__submit_bio(F2FS_I_SB(inode), bio, DATA);
f1e886601   Jaegeuk Kim   f2fs: expose f2fs...
1070
1071
  	return 0;
  }
eb47b8009   Jaegeuk Kim   f2fs: add address...
1072
1073
  static int f2fs_read_data_page(struct file *file, struct page *page)
  {
9ffe0fb5f   Huajun Li   f2fs: handle inli...
1074
  	struct inode *inode = page->mapping->host;
b3d208f96   Jaegeuk Kim   f2fs: revisit inl...
1075
  	int ret = -EAGAIN;
9ffe0fb5f   Huajun Li   f2fs: handle inli...
1076

c20e89cde   Chao Yu   f2fs: add a trace...
1077
  	trace_f2fs_readpage(page, DATA);
e1c420452   arter97   f2fs: fix typo
1078
  	/* If the file has inline data, try to read it directly */
9ffe0fb5f   Huajun Li   f2fs: handle inli...
1079
1080
  	if (f2fs_has_inline_data(inode))
  		ret = f2fs_read_inline_data(inode, page);
b3d208f96   Jaegeuk Kim   f2fs: revisit inl...
1081
  	if (ret == -EAGAIN)
f1e886601   Jaegeuk Kim   f2fs: expose f2fs...
1082
  		ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1);
9ffe0fb5f   Huajun Li   f2fs: handle inli...
1083
  	return ret;
eb47b8009   Jaegeuk Kim   f2fs: add address...
1084
1085
1086
1087
1088
1089
  }
  
  static int f2fs_read_data_pages(struct file *file,
  			struct address_space *mapping,
  			struct list_head *pages, unsigned nr_pages)
  {
9ffe0fb5f   Huajun Li   f2fs: handle inli...
1090
  	struct inode *inode = file->f_mapping->host;
b8c294004   Chao Yu   f2fs: add a trace...
1091
1092
1093
  	struct page *page = list_entry(pages->prev, struct page, lru);
  
  	trace_f2fs_readpages(inode, page, nr_pages);
9ffe0fb5f   Huajun Li   f2fs: handle inli...
1094
1095
1096
1097
  
  	/* If the file has inline data, skip readpages */
  	if (f2fs_has_inline_data(inode))
  		return 0;
f1e886601   Jaegeuk Kim   f2fs: expose f2fs...
1098
  	return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
eb47b8009   Jaegeuk Kim   f2fs: add address...
1099
  }
05ca3632e   Jaegeuk Kim   f2fs: add sbi and...
1100
  int do_write_data_page(struct f2fs_io_info *fio)
eb47b8009   Jaegeuk Kim   f2fs: add address...
1101
  {
05ca3632e   Jaegeuk Kim   f2fs: add sbi and...
1102
  	struct page *page = fio->page;
eb47b8009   Jaegeuk Kim   f2fs: add address...
1103
  	struct inode *inode = page->mapping->host;
eb47b8009   Jaegeuk Kim   f2fs: add address...
1104
1105
1106
1107
  	struct dnode_of_data dn;
  	int err = 0;
  
  	set_new_dnode(&dn, inode, NULL, NULL, 0);
266e97a81   Jaegeuk Kim   f2fs: introduce r...
1108
  	err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
eb47b8009   Jaegeuk Kim   f2fs: add address...
1109
1110
  	if (err)
  		return err;
28bc106b2   Chao Yu   f2fs: support rev...
1111
  	fio->old_blkaddr = dn.data_blkaddr;
eb47b8009   Jaegeuk Kim   f2fs: add address...
1112
1113
  
  	/* This page is already truncated */
7a9d75481   Chao Yu   f2fs: trace old b...
1114
  	if (fio->old_blkaddr == NULL_ADDR) {
2bca1e238   Jaegeuk Kim   f2fs: clear page'...
1115
  		ClearPageUptodate(page);
eb47b8009   Jaegeuk Kim   f2fs: add address...
1116
  		goto out_writepage;
2bca1e238   Jaegeuk Kim   f2fs: clear page'...
1117
  	}
eb47b8009   Jaegeuk Kim   f2fs: add address...
1118

4375a3366   Jaegeuk Kim   f2fs crypto: add ...
1119
  	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
b32e4482a   Jaegeuk Kim   fscrypto: don't l...
1120
  		gfp_t gfp_flags = GFP_NOFS;
08b39fbd5   Chao Yu   f2fs crypto: fix ...
1121
1122
1123
  
  		/* wait for GCed encrypted page writeback */
  		f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode),
7a9d75481   Chao Yu   f2fs: trace old b...
1124
  							fio->old_blkaddr);
b32e4482a   Jaegeuk Kim   fscrypto: don't l...
1125
1126
1127
  retry_encrypt:
  		fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
  								gfp_flags);
4375a3366   Jaegeuk Kim   f2fs crypto: add ...
1128
1129
  		if (IS_ERR(fio->encrypted_page)) {
  			err = PTR_ERR(fio->encrypted_page);
b32e4482a   Jaegeuk Kim   fscrypto: don't l...
1130
1131
1132
1133
1134
1135
1136
1137
  			if (err == -ENOMEM) {
  				/* flush pending ios and wait for a while */
  				f2fs_flush_merged_bios(F2FS_I_SB(inode));
  				congestion_wait(BLK_RW_ASYNC, HZ/50);
  				gfp_flags |= __GFP_NOFAIL;
  				err = 0;
  				goto retry_encrypt;
  			}
4375a3366   Jaegeuk Kim   f2fs crypto: add ...
1138
1139
1140
  			goto out_writepage;
  		}
  	}
eb47b8009   Jaegeuk Kim   f2fs: add address...
1141
1142
1143
1144
1145
1146
  	set_page_writeback(page);
  
  	/*
  	 * If current allocation needs SSR,
  	 * it had better in-place writes for updated data.
  	 */
7a9d75481   Chao Yu   f2fs: trace old b...
1147
  	if (unlikely(fio->old_blkaddr != NEW_ADDR &&
b25958b6e   Haicheng Li   f2fs: optimize do...
1148
  			!is_cold_data(page) &&
2da3e0274   Chao Yu   f2fs: commit atom...
1149
  			!IS_ATOMIC_WRITTEN_PAGE(page) &&
b25958b6e   Haicheng Li   f2fs: optimize do...
1150
  			need_inplace_update(inode))) {
05ca3632e   Jaegeuk Kim   f2fs: add sbi and...
1151
  		rewrite_data_page(fio);
91942321e   Jaegeuk Kim   f2fs: use inode p...
1152
  		set_inode_flag(inode, FI_UPDATE_WRITE);
8ce67cb07   Jaegeuk Kim   f2fs: add some tr...
1153
  		trace_f2fs_do_write_data_page(page, IPU);
eb47b8009   Jaegeuk Kim   f2fs: add address...
1154
  	} else {
05ca3632e   Jaegeuk Kim   f2fs: add sbi and...
1155
  		write_data_page(&dn, fio);
8ce67cb07   Jaegeuk Kim   f2fs: add some tr...
1156
  		trace_f2fs_do_write_data_page(page, OPU);
91942321e   Jaegeuk Kim   f2fs: use inode p...
1157
  		set_inode_flag(inode, FI_APPEND_WRITE);
3c6c2bebe   Jaegeuk Kim   f2fs: avoid punch...
1158
  		if (page->index == 0)
91942321e   Jaegeuk Kim   f2fs: use inode p...
1159
  			set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
eb47b8009   Jaegeuk Kim   f2fs: add address...
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
  	}
  out_writepage:
  	f2fs_put_dnode(&dn);
  	return err;
  }
  
  static int f2fs_write_data_page(struct page *page,
  					struct writeback_control *wbc)
  {
  	struct inode *inode = page->mapping->host;
4081363fb   Jaegeuk Kim   f2fs: introduce F...
1170
  	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
eb47b8009   Jaegeuk Kim   f2fs: add address...
1171
1172
  	loff_t i_size = i_size_read(inode);
  	const pgoff_t end_index = ((unsigned long long) i_size)
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
1173
  							>> PAGE_SHIFT;
26de9b117   Jaegeuk Kim   f2fs: avoid unnec...
1174
  	loff_t psize = (page->index + 1) << PAGE_SHIFT;
9ffe0fb5f   Huajun Li   f2fs: handle inli...
1175
  	unsigned offset = 0;
399368372   Jaegeuk Kim   f2fs: introduce a...
1176
  	bool need_balance_fs = false;
eb47b8009   Jaegeuk Kim   f2fs: add address...
1177
  	int err = 0;
458e6197c   Jaegeuk Kim   f2fs: refactor bi...
1178
  	struct f2fs_io_info fio = {
05ca3632e   Jaegeuk Kim   f2fs: add sbi and...
1179
  		.sbi = sbi,
458e6197c   Jaegeuk Kim   f2fs: refactor bi...
1180
  		.type = DATA,
04d328def   Mike Christie   f2fs: use bio op ...
1181
1182
  		.op = REQ_OP_WRITE,
  		.op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0,
05ca3632e   Jaegeuk Kim   f2fs: add sbi and...
1183
  		.page = page,
4375a3366   Jaegeuk Kim   f2fs crypto: add ...
1184
  		.encrypted_page = NULL,
458e6197c   Jaegeuk Kim   f2fs: refactor bi...
1185
  	};
eb47b8009   Jaegeuk Kim   f2fs: add address...
1186

ecda0de34   Chao Yu   f2fs: add a trace...
1187
  	trace_f2fs_writepage(page, DATA);
eb47b8009   Jaegeuk Kim   f2fs: add address...
1188
  	if (page->index < end_index)
399368372   Jaegeuk Kim   f2fs: introduce a...
1189
  		goto write;
eb47b8009   Jaegeuk Kim   f2fs: add address...
1190
1191
1192
1193
1194
  
  	/*
  	 * If the offset is out-of-range of file size,
  	 * this page does not have to be written to disk.
  	 */
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
1195
  	offset = i_size & (PAGE_SIZE - 1);
76f60268e   Jaegeuk Kim   f2fs: call redirt...
1196
  	if ((page->index >= end_index + 1) || !offset)
399368372   Jaegeuk Kim   f2fs: introduce a...
1197
  		goto out;
eb47b8009   Jaegeuk Kim   f2fs: add address...
1198

09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
1199
  	zero_user_segment(page, offset, PAGE_SIZE);
399368372   Jaegeuk Kim   f2fs: introduce a...
1200
  write:
caf0047e7   Chao Yu   f2fs: merge flags...
1201
  	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
eb47b8009   Jaegeuk Kim   f2fs: add address...
1202
  		goto redirty_out;
1e84371ff   Jaegeuk Kim   f2fs: change atom...
1203
1204
  	if (f2fs_is_drop_cache(inode))
  		goto out;
e6e5f5610   Jaegeuk Kim   f2fs: avoid writi...
1205
1206
1207
1208
  	/* we should not write 0'th page having journal header */
  	if (f2fs_is_volatile_file(inode) && (!page->index ||
  			(!wbc->for_reclaim &&
  			available_free_memory(sbi, BASE_CHECK))))
1e84371ff   Jaegeuk Kim   f2fs: change atom...
1209
  		goto redirty_out;
eb47b8009   Jaegeuk Kim   f2fs: add address...
1210

cf779cab1   Jaegeuk Kim   f2fs: handle EIO ...
1211
1212
  	/* we should bypass data pages to proceed the kworkder jobs */
  	if (unlikely(f2fs_cp_error(sbi))) {
7f319975c   Jaegeuk Kim   f2fs: set mapping...
1213
  		mapping_set_error(page->mapping, -EIO);
a7ffdbe22   Jaegeuk Kim   f2fs: expand coun...
1214
  		goto out;
cf779cab1   Jaegeuk Kim   f2fs: handle EIO ...
1215
  	}
399368372   Jaegeuk Kim   f2fs: introduce a...
1216
  	/* Dentry blocks are controlled by checkpoint */
eb47b8009   Jaegeuk Kim   f2fs: add address...
1217
  	if (S_ISDIR(inode->i_mode)) {
05ca3632e   Jaegeuk Kim   f2fs: add sbi and...
1218
  		err = do_write_data_page(&fio);
8618b881e   Jaegeuk Kim   f2fs: fix not to ...
1219
1220
  		goto done;
  	}
9ffe0fb5f   Huajun Li   f2fs: handle inli...
1221

8618b881e   Jaegeuk Kim   f2fs: fix not to ...
1222
  	if (!wbc->for_reclaim)
399368372   Jaegeuk Kim   f2fs: introduce a...
1223
  		need_balance_fs = true;
7f3037a5e   Jaegeuk Kim   f2fs: check free_...
1224
  	else if (has_not_enough_free_secs(sbi, 0, 0))
399368372   Jaegeuk Kim   f2fs: introduce a...
1225
  		goto redirty_out;
eb47b8009   Jaegeuk Kim   f2fs: add address...
1226

b3d208f96   Jaegeuk Kim   f2fs: revisit inl...
1227
  	err = -EAGAIN;
8618b881e   Jaegeuk Kim   f2fs: fix not to ...
1228
  	f2fs_lock_op(sbi);
b3d208f96   Jaegeuk Kim   f2fs: revisit inl...
1229
1230
1231
  	if (f2fs_has_inline_data(inode))
  		err = f2fs_write_inline_data(inode, page);
  	if (err == -EAGAIN)
05ca3632e   Jaegeuk Kim   f2fs: add sbi and...
1232
  		err = do_write_data_page(&fio);
26de9b117   Jaegeuk Kim   f2fs: avoid unnec...
1233
1234
  	if (F2FS_I(inode)->last_disk_size < psize)
  		F2FS_I(inode)->last_disk_size = psize;
8618b881e   Jaegeuk Kim   f2fs: fix not to ...
1235
1236
1237
1238
  	f2fs_unlock_op(sbi);
  done:
  	if (err && err != -ENOENT)
  		goto redirty_out;
eb47b8009   Jaegeuk Kim   f2fs: add address...
1239

eb47b8009   Jaegeuk Kim   f2fs: add address...
1240
  	clear_cold_data(page);
399368372   Jaegeuk Kim   f2fs: introduce a...
1241
  out:
a7ffdbe22   Jaegeuk Kim   f2fs: expand coun...
1242
  	inode_dec_dirty_pages(inode);
2bca1e238   Jaegeuk Kim   f2fs: clear page'...
1243
1244
  	if (err)
  		ClearPageUptodate(page);
0c3a57975   Chao Yu   f2fs: introduce f...
1245
1246
1247
1248
1249
  
  	if (wbc->for_reclaim) {
  		f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, DATA, WRITE);
  		remove_dirty_inode(inode);
  	}
eb47b8009   Jaegeuk Kim   f2fs: add address...
1250
  	unlock_page(page);
2c4db1a6f   Jaegeuk Kim   f2fs: clean up f2...
1251
  	f2fs_balance_fs(sbi, need_balance_fs);
0c3a57975   Chao Yu   f2fs: introduce f...
1252
1253
  
  	if (unlikely(f2fs_cp_error(sbi)))
2aea39eca   Jaegeuk Kim   f2fs: submit bio ...
1254
  		f2fs_submit_merged_bio(sbi, DATA, WRITE);
0c3a57975   Chao Yu   f2fs: introduce f...
1255

eb47b8009   Jaegeuk Kim   f2fs: add address...
1256
  	return 0;
eb47b8009   Jaegeuk Kim   f2fs: add address...
1257
  redirty_out:
76f60268e   Jaegeuk Kim   f2fs: call redirt...
1258
  	redirty_page_for_writepage(wbc, page);
b230e6cab   Jaegeuk Kim   f2fs: handle writ...
1259
1260
  	unlock_page(page);
  	return err;
fa9150a84   Namjae Jeon   f2fs: remove the ...
1261
  }
8f46dcaea   Chao Yu   f2fs: expose f2fs...
1262
1263
1264
1265
1266
1267
  /*
   * This function was copied from write_cche_pages from mm/page-writeback.c.
   * The major change is making write step of cold data page separately from
   * warm/hot data page.
   */
  static int f2fs_write_cache_pages(struct address_space *mapping,
b230e6cab   Jaegeuk Kim   f2fs: handle writ...
1268
  					struct writeback_control *wbc)
8f46dcaea   Chao Yu   f2fs: expose f2fs...
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
  {
  	int ret = 0;
  	int done = 0;
  	struct pagevec pvec;
  	int nr_pages;
  	pgoff_t uninitialized_var(writeback_index);
  	pgoff_t index;
  	pgoff_t end;		/* Inclusive */
  	pgoff_t done_index;
  	int cycled;
  	int range_whole = 0;
  	int tag;
6ca56ca42   Chao Yu   f2fs: don't submi...
1281
  	int nwritten = 0;
8f46dcaea   Chao Yu   f2fs: expose f2fs...
1282
1283
  
  	pagevec_init(&pvec, 0);
46ae957f9   Jaegeuk Kim   f2fs: remove two ...
1284

8f46dcaea   Chao Yu   f2fs: expose f2fs...
1285
1286
1287
1288
1289
1290
1291
1292
1293
  	if (wbc->range_cyclic) {
  		writeback_index = mapping->writeback_index; /* prev offset */
  		index = writeback_index;
  		if (index == 0)
  			cycled = 1;
  		else
  			cycled = 0;
  		end = -1;
  	} else {
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
1294
1295
  		index = wbc->range_start >> PAGE_SHIFT;
  		end = wbc->range_end >> PAGE_SHIFT;
8f46dcaea   Chao Yu   f2fs: expose f2fs...
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
  		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
  			range_whole = 1;
  		cycled = 1; /* ignore range_cyclic tests */
  	}
  	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
  		tag = PAGECACHE_TAG_TOWRITE;
  	else
  		tag = PAGECACHE_TAG_DIRTY;
  retry:
  	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
  		tag_pages_for_writeback(mapping, index, end);
  	done_index = index;
  	while (!done && (index <= end)) {
  		int i;
  
  		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
  			      min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1);
  		if (nr_pages == 0)
  			break;
  
  		for (i = 0; i < nr_pages; i++) {
  			struct page *page = pvec.pages[i];
  
  			if (page->index > end) {
  				done = 1;
  				break;
  			}
  
  			done_index = page->index;
  
  			lock_page(page);
  
  			if (unlikely(page->mapping != mapping)) {
  continue_unlock:
  				unlock_page(page);
  				continue;
  			}
  
  			if (!PageDirty(page)) {
  				/* someone wrote it for us */
  				goto continue_unlock;
  			}
8f46dcaea   Chao Yu   f2fs: expose f2fs...
1338
1339
  			if (PageWriteback(page)) {
  				if (wbc->sync_mode != WB_SYNC_NONE)
fec1d6576   Jaegeuk Kim   f2fs: use wait_fo...
1340
1341
  					f2fs_wait_on_page_writeback(page,
  								DATA, true);
8f46dcaea   Chao Yu   f2fs: expose f2fs...
1342
1343
1344
1345
1346
1347
1348
  				else
  					goto continue_unlock;
  			}
  
  			BUG_ON(PageWriteback(page));
  			if (!clear_page_dirty_for_io(page))
  				goto continue_unlock;
b230e6cab   Jaegeuk Kim   f2fs: handle writ...
1349
  			ret = mapping->a_ops->writepage(page, wbc);
8f46dcaea   Chao Yu   f2fs: expose f2fs...
1350
  			if (unlikely(ret)) {
b230e6cab   Jaegeuk Kim   f2fs: handle writ...
1351
1352
1353
  				done_index = page->index + 1;
  				done = 1;
  				break;
6ca56ca42   Chao Yu   f2fs: don't submi...
1354
1355
  			} else {
  				nwritten++;
8f46dcaea   Chao Yu   f2fs: expose f2fs...
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
  			}
  
  			if (--wbc->nr_to_write <= 0 &&
  			    wbc->sync_mode == WB_SYNC_NONE) {
  				done = 1;
  				break;
  			}
  		}
  		pagevec_release(&pvec);
  		cond_resched();
  	}
8f46dcaea   Chao Yu   f2fs: expose f2fs...
1367
1368
1369
1370
1371
1372
1373
1374
  	if (!cycled && !done) {
  		cycled = 1;
  		index = 0;
  		end = writeback_index - 1;
  		goto retry;
  	}
  	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
  		mapping->writeback_index = done_index;
6ca56ca42   Chao Yu   f2fs: don't submi...
1375
1376
1377
  	if (nwritten)
  		f2fs_submit_merged_bio_cond(F2FS_M_SB(mapping), mapping->host,
  							NULL, 0, DATA, WRITE);
8f46dcaea   Chao Yu   f2fs: expose f2fs...
1378
1379
  	return ret;
  }
25ca923b2   Jaegeuk Kim   f2fs: fix endian ...
1380
  static int f2fs_write_data_pages(struct address_space *mapping,
eb47b8009   Jaegeuk Kim   f2fs: add address...
1381
1382
1383
  			    struct writeback_control *wbc)
  {
  	struct inode *inode = mapping->host;
4081363fb   Jaegeuk Kim   f2fs: introduce F...
1384
  	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
9dfa1baff   Jaegeuk Kim   f2fs: use blk_plu...
1385
  	struct blk_plug plug;
eb47b8009   Jaegeuk Kim   f2fs: add address...
1386
  	int ret;
eb47b8009   Jaegeuk Kim   f2fs: add address...
1387

cfb185a14   P J P   f2fs: add NULL po...
1388
1389
1390
  	/* deal with chardevs and other special file */
  	if (!mapping->a_ops->writepage)
  		return 0;
6a2905443   Chao Yu   f2fs: skip writin...
1391
1392
1393
  	/* skip writing if there is no dirty page in this inode */
  	if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
  		return 0;
a12570232   Jaegeuk Kim   Revert "f2fs: do ...
1394
1395
1396
1397
  	if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
  			get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
  			available_free_memory(sbi, DIRTY_DENTS))
  		goto skip_write;
d323d005a   Chao Yu   f2fs: support fil...
1398
  	/* skip writing during file defragment */
91942321e   Jaegeuk Kim   f2fs: use inode p...
1399
  	if (is_inode_flag_set(inode, FI_DO_DEFRAG))
d323d005a   Chao Yu   f2fs: support fil...
1400
  		goto skip_write;
d5669f7b9   Jaegeuk Kim   f2fs: avoid to tr...
1401
1402
1403
  	/* during POR, we don't need to trigger writepage at all. */
  	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
  		goto skip_write;
d31c7c3f0   Yunlei He   f2fs: fix missing...
1404
  	trace_f2fs_writepages(mapping->host, wbc, DATA);
9dfa1baff   Jaegeuk Kim   f2fs: use blk_plu...
1405
  	blk_start_plug(&plug);
b230e6cab   Jaegeuk Kim   f2fs: handle writ...
1406
  	ret = f2fs_write_cache_pages(mapping, wbc);
9dfa1baff   Jaegeuk Kim   f2fs: use blk_plu...
1407
  	blk_finish_plug(&plug);
28ea6162e   Jaegeuk Kim   f2fs: do not skip...
1408
1409
1410
1411
  	/*
  	 * if some pages were truncated, we cannot guarantee its mapping->host
  	 * to detect pending bios.
  	 */
458e6197c   Jaegeuk Kim   f2fs: refactor bi...
1412

c227f9127   Chao Yu   f2fs: record dirt...
1413
  	remove_dirty_inode(inode);
eb47b8009   Jaegeuk Kim   f2fs: add address...
1414
  	return ret;
d3baf95da   Jaegeuk Kim   f2fs: increase pa...
1415
1416
  
  skip_write:
a7ffdbe22   Jaegeuk Kim   f2fs: expand coun...
1417
  	wbc->pages_skipped += get_dirty_pages(inode);
d31c7c3f0   Yunlei He   f2fs: fix missing...
1418
  	trace_f2fs_writepages(mapping->host, wbc, DATA);
d3baf95da   Jaegeuk Kim   f2fs: increase pa...
1419
  	return 0;
eb47b8009   Jaegeuk Kim   f2fs: add address...
1420
  }
3aab8f828   Chao Yu   f2fs: introduce f...
1421
1422
1423
  static void f2fs_write_failed(struct address_space *mapping, loff_t to)
  {
  	struct inode *inode = mapping->host;
819d9153d   Jaegeuk Kim   f2fs: use i_size_...
1424
  	loff_t i_size = i_size_read(inode);
3aab8f828   Chao Yu   f2fs: introduce f...
1425

819d9153d   Jaegeuk Kim   f2fs: use i_size_...
1426
1427
1428
  	if (to > i_size) {
  		truncate_pagecache(inode, i_size);
  		truncate_blocks(inode, i_size, true);
3aab8f828   Chao Yu   f2fs: introduce f...
1429
1430
  	}
  }
2aadac085   Jaegeuk Kim   f2fs: introduce p...
1431
1432
1433
1434
1435
1436
1437
1438
  static int prepare_write_begin(struct f2fs_sb_info *sbi,
  			struct page *page, loff_t pos, unsigned len,
  			block_t *blk_addr, bool *node_changed)
  {
  	struct inode *inode = page->mapping->host;
  	pgoff_t index = page->index;
  	struct dnode_of_data dn;
  	struct page *ipage;
b4d07a3e1   Jaegeuk Kim   f2fs: avoid f2fs_...
1439
1440
  	bool locked = false;
  	struct extent_info ei;
2aadac085   Jaegeuk Kim   f2fs: introduce p...
1441
  	int err = 0;
24b849125   Jaegeuk Kim   f2fs: preallocate...
1442
1443
1444
1445
  	/*
  	 * we already allocated all the blocks, so we don't need to get
  	 * the block addresses when there is no need to fill the page.
  	 */
5d4c0af41   Yunlei He   f2fs: preallocate...
1446
  	if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE)
24b849125   Jaegeuk Kim   f2fs: preallocate...
1447
  		return 0;
b4d07a3e1   Jaegeuk Kim   f2fs: avoid f2fs_...
1448
  	if (f2fs_has_inline_data(inode) ||
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
1449
  			(pos & PAGE_MASK) >= i_size_read(inode)) {
b4d07a3e1   Jaegeuk Kim   f2fs: avoid f2fs_...
1450
1451
1452
1453
  		f2fs_lock_op(sbi);
  		locked = true;
  	}
  restart:
2aadac085   Jaegeuk Kim   f2fs: introduce p...
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
  	/* check inline_data */
  	ipage = get_node_page(sbi, inode->i_ino);
  	if (IS_ERR(ipage)) {
  		err = PTR_ERR(ipage);
  		goto unlock_out;
  	}
  
  	set_new_dnode(&dn, inode, ipage, ipage, 0);
  
  	if (f2fs_has_inline_data(inode)) {
  		if (pos + len <= MAX_INLINE_DATA) {
  			read_inline_data(page, ipage);
91942321e   Jaegeuk Kim   f2fs: use inode p...
1466
  			set_inode_flag(inode, FI_DATA_EXIST);
ab47036d8   Chao Yu   f2fs: fix deadloc...
1467
1468
  			if (inode->i_nlink)
  				set_inline_node(ipage);
2aadac085   Jaegeuk Kim   f2fs: introduce p...
1469
1470
1471
  		} else {
  			err = f2fs_convert_inline_page(&dn, page);
  			if (err)
b4d07a3e1   Jaegeuk Kim   f2fs: avoid f2fs_...
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
  				goto out;
  			if (dn.data_blkaddr == NULL_ADDR)
  				err = f2fs_get_block(&dn, index);
  		}
  	} else if (locked) {
  		err = f2fs_get_block(&dn, index);
  	} else {
  		if (f2fs_lookup_extent_cache(inode, index, &ei)) {
  			dn.data_blkaddr = ei.blk + index - ei.fofs;
  		} else {
b4d07a3e1   Jaegeuk Kim   f2fs: avoid f2fs_...
1482
1483
  			/* hole case */
  			err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
4da7bf5a4   Jaegeuk Kim   f2fs: remove redu...
1484
  			if (err || dn.data_blkaddr == NULL_ADDR) {
b4d07a3e1   Jaegeuk Kim   f2fs: avoid f2fs_...
1485
1486
1487
1488
1489
  				f2fs_put_dnode(&dn);
  				f2fs_lock_op(sbi);
  				locked = true;
  				goto restart;
  			}
2aadac085   Jaegeuk Kim   f2fs: introduce p...
1490
1491
  		}
  	}
b4d07a3e1   Jaegeuk Kim   f2fs: avoid f2fs_...
1492

2aadac085   Jaegeuk Kim   f2fs: introduce p...
1493
1494
1495
  	/* convert_inline_page can make node_changed */
  	*blk_addr = dn.data_blkaddr;
  	*node_changed = dn.node_changed;
b4d07a3e1   Jaegeuk Kim   f2fs: avoid f2fs_...
1496
  out:
2aadac085   Jaegeuk Kim   f2fs: introduce p...
1497
1498
  	f2fs_put_dnode(&dn);
  unlock_out:
b4d07a3e1   Jaegeuk Kim   f2fs: avoid f2fs_...
1499
1500
  	if (locked)
  		f2fs_unlock_op(sbi);
2aadac085   Jaegeuk Kim   f2fs: introduce p...
1501
1502
  	return err;
  }
eb47b8009   Jaegeuk Kim   f2fs: add address...
1503
1504
1505
1506
1507
  static int f2fs_write_begin(struct file *file, struct address_space *mapping,
  		loff_t pos, unsigned len, unsigned flags,
  		struct page **pagep, void **fsdata)
  {
  	struct inode *inode = mapping->host;
4081363fb   Jaegeuk Kim   f2fs: introduce F...
1508
  	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
86531d6b8   Jaegeuk Kim   f2fs: callers tak...
1509
  	struct page *page = NULL;
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
1510
  	pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
2aadac085   Jaegeuk Kim   f2fs: introduce p...
1511
1512
  	bool need_balance = false;
  	block_t blkaddr = NULL_ADDR;
eb47b8009   Jaegeuk Kim   f2fs: add address...
1513
  	int err = 0;
62aed044e   Chao Yu   f2fs: add a trace...
1514
  	trace_f2fs_write_begin(inode, pos, len, flags);
5f7273958   Jaegeuk Kim   f2fs: fix deadloc...
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
  	/*
  	 * We should check this at this moment to avoid deadlock on inode page
  	 * and #0 page. The locking rule for inline_data conversion should be:
  	 * lock_page(page #0) -> lock_page(inode_page)
  	 */
  	if (index != 0) {
  		err = f2fs_convert_inline_inode(inode);
  		if (err)
  			goto fail;
  	}
afcb7ca01   Jaegeuk Kim   f2fs: check trunc...
1525
  repeat:
eb47b8009   Jaegeuk Kim   f2fs: add address...
1526
  	page = grab_cache_page_write_begin(mapping, index, flags);
3aab8f828   Chao Yu   f2fs: introduce f...
1527
1528
1529
1530
  	if (!page) {
  		err = -ENOMEM;
  		goto fail;
  	}
d5f66990b   Jaegeuk Kim   f2fs: decrease th...
1531

eb47b8009   Jaegeuk Kim   f2fs: add address...
1532
  	*pagep = page;
2aadac085   Jaegeuk Kim   f2fs: introduce p...
1533
1534
  	err = prepare_write_begin(sbi, page, pos, len,
  					&blkaddr, &need_balance);
9ba69cf98   Jaegeuk Kim   f2fs: avoid to al...
1535
  	if (err)
2aadac085   Jaegeuk Kim   f2fs: introduce p...
1536
  		goto fail;
9ba69cf98   Jaegeuk Kim   f2fs: avoid to al...
1537

7f3037a5e   Jaegeuk Kim   f2fs: check free_...
1538
  	if (need_balance && has_not_enough_free_secs(sbi, 0, 0)) {
2a3407607   Jaegeuk Kim   f2fs: call f2fs_b...
1539
  		unlock_page(page);
2c4db1a6f   Jaegeuk Kim   f2fs: clean up f2...
1540
  		f2fs_balance_fs(sbi, true);
2a3407607   Jaegeuk Kim   f2fs: call f2fs_b...
1541
1542
1543
1544
1545
1546
1547
  		lock_page(page);
  		if (page->mapping != mapping) {
  			/* The page got truncated from under us */
  			f2fs_put_page(page, 1);
  			goto repeat;
  		}
  	}
fec1d6576   Jaegeuk Kim   f2fs: use wait_fo...
1548
  	f2fs_wait_on_page_writeback(page, DATA, false);
b3d208f96   Jaegeuk Kim   f2fs: revisit inl...
1549

08b39fbd5   Chao Yu   f2fs crypto: fix ...
1550
1551
  	/* wait for GCed encrypted page writeback */
  	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
2aadac085   Jaegeuk Kim   f2fs: introduce p...
1552
  		f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
08b39fbd5   Chao Yu   f2fs crypto: fix ...
1553

649d7df29   Jaegeuk Kim   f2fs: fix to set ...
1554
1555
  	if (len == PAGE_SIZE || PageUptodate(page))
  		return 0;
eb47b8009   Jaegeuk Kim   f2fs: add address...
1556

2aadac085   Jaegeuk Kim   f2fs: introduce p...
1557
  	if (blkaddr == NEW_ADDR) {
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
1558
  		zero_user_segment(page, 0, PAGE_SIZE);
649d7df29   Jaegeuk Kim   f2fs: fix to set ...
1559
  		SetPageUptodate(page);
eb47b8009   Jaegeuk Kim   f2fs: add address...
1560
  	} else {
78682f794   Chao Yu   f2fs: fix to avoi...
1561
  		struct bio *bio;
d54c795b4   Chao Yu   f2fs: fix error p...
1562

78682f794   Chao Yu   f2fs: fix to avoi...
1563
1564
1565
  		bio = f2fs_grab_bio(inode, blkaddr, 1);
  		if (IS_ERR(bio)) {
  			err = PTR_ERR(bio);
3aab8f828   Chao Yu   f2fs: introduce f...
1566
  			goto fail;
eb47b8009   Jaegeuk Kim   f2fs: add address...
1567
  		}
4fc29c1aa   Linus Torvalds   Merge tag 'for-f2...
1568
  		bio_set_op_attrs(bio, REQ_OP_READ, READ_SYNC);
78682f794   Chao Yu   f2fs: fix to avoi...
1569
1570
1571
1572
1573
  		if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
  			bio_put(bio);
  			err = -EFAULT;
  			goto fail;
  		}
4fc29c1aa   Linus Torvalds   Merge tag 'for-f2...
1574
  		__submit_bio(sbi, bio, DATA);
d54c795b4   Chao Yu   f2fs: fix error p...
1575

393ff91f5   Jaegeuk Kim   f2fs: reduce unnc...
1576
  		lock_page(page);
6bacf52fb   Jaegeuk Kim   f2fs: add unlikel...
1577
  		if (unlikely(page->mapping != mapping)) {
afcb7ca01   Jaegeuk Kim   f2fs: check trunc...
1578
1579
  			f2fs_put_page(page, 1);
  			goto repeat;
eb47b8009   Jaegeuk Kim   f2fs: add address...
1580
  		}
1563ac75e   Chao Yu   f2fs: fix to dete...
1581
1582
1583
  		if (unlikely(!PageUptodate(page))) {
  			err = -EIO;
  			goto fail;
4375a3366   Jaegeuk Kim   f2fs crypto: add ...
1584
  		}
eb47b8009   Jaegeuk Kim   f2fs: add address...
1585
  	}
eb47b8009   Jaegeuk Kim   f2fs: add address...
1586
  	return 0;
9ba69cf98   Jaegeuk Kim   f2fs: avoid to al...
1587

3aab8f828   Chao Yu   f2fs: introduce f...
1588
  fail:
86531d6b8   Jaegeuk Kim   f2fs: callers tak...
1589
  	f2fs_put_page(page, 1);
3aab8f828   Chao Yu   f2fs: introduce f...
1590
1591
  	f2fs_write_failed(mapping, pos + len);
  	return err;
eb47b8009   Jaegeuk Kim   f2fs: add address...
1592
  }
a1dd3c13c   Jaegeuk Kim   f2fs: fix to reco...
1593
1594
1595
1596
1597
1598
  static int f2fs_write_end(struct file *file,
  			struct address_space *mapping,
  			loff_t pos, unsigned len, unsigned copied,
  			struct page *page, void *fsdata)
  {
  	struct inode *inode = page->mapping->host;
dfb2bf38b   Chao Yu   f2fs: add a trace...
1599
  	trace_f2fs_write_end(inode, pos, len, copied);
649d7df29   Jaegeuk Kim   f2fs: fix to set ...
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
  	/*
  	 * This should be come from len == PAGE_SIZE, and we expect copied
  	 * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
  	 * let generic_perform_write() try to copy data again through copied=0.
  	 */
  	if (!PageUptodate(page)) {
  		if (unlikely(copied != PAGE_SIZE))
  			copied = 0;
  		else
  			SetPageUptodate(page);
  	}
  	if (!copied)
  		goto unlock_out;
34ba94bac   Jaegeuk Kim   f2fs: do not make...
1613
  	set_page_dirty(page);
649d7df29   Jaegeuk Kim   f2fs: fix to set ...
1614
  	clear_cold_data(page);
a1dd3c13c   Jaegeuk Kim   f2fs: fix to reco...
1615

fc9581c80   Jaegeuk Kim   f2fs: introduce f...
1616
1617
  	if (pos + copied > i_size_read(inode))
  		f2fs_i_size_write(inode, pos + copied);
649d7df29   Jaegeuk Kim   f2fs: fix to set ...
1618
  unlock_out:
3024c9a1f   Chao Yu   Revert "f2fs: mov...
1619
  	f2fs_put_page(page, 1);
d0239e1bf   Jaegeuk Kim   f2fs: detect idle...
1620
  	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
a1dd3c13c   Jaegeuk Kim   f2fs: fix to reco...
1621
1622
  	return copied;
  }
6f6737631   Omar Sandoval   direct_IO: use io...
1623
1624
  static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
  			   loff_t offset)
944fcfc18   Jaegeuk Kim   f2fs: check the b...
1625
1626
  {
  	unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
944fcfc18   Jaegeuk Kim   f2fs: check the b...
1627

944fcfc18   Jaegeuk Kim   f2fs: check the b...
1628
1629
  	if (offset & blocksize_mask)
  		return -EINVAL;
5b46f25dd   Al Viro   f2fs: switch to i...
1630
1631
  	if (iov_iter_alignment(iter) & blocksize_mask)
  		return -EINVAL;
944fcfc18   Jaegeuk Kim   f2fs: check the b...
1632
1633
  	return 0;
  }
c8b8e32d7   Christoph Hellwig   direct-io: elimin...
1634
  static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
eb47b8009   Jaegeuk Kim   f2fs: add address...
1635
  {
b439b103a   Jaegeuk Kim   f2fs: move dio pr...
1636
  	struct address_space *mapping = iocb->ki_filp->f_mapping;
3aab8f828   Chao Yu   f2fs: introduce f...
1637
1638
  	struct inode *inode = mapping->host;
  	size_t count = iov_iter_count(iter);
c8b8e32d7   Christoph Hellwig   direct-io: elimin...
1639
  	loff_t offset = iocb->ki_pos;
82e0a5aa5   Chao Yu   f2fs: fix to avoi...
1640
  	int rw = iov_iter_rw(iter);
3aab8f828   Chao Yu   f2fs: introduce f...
1641
  	int err;
944fcfc18   Jaegeuk Kim   f2fs: check the b...
1642

b439b103a   Jaegeuk Kim   f2fs: move dio pr...
1643
  	err = check_direct_IO(inode, iter, offset);
b9d777b85   Jaegeuk Kim   f2fs: check inlin...
1644
1645
  	if (err)
  		return err;
9ffe0fb5f   Huajun Li   f2fs: handle inli...
1646

fcc85a4d8   Jaegeuk Kim   f2fs crypto: acti...
1647
1648
  	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
  		return 0;
36abef4e7   Jaegeuk Kim   f2fs: introduce m...
1649
1650
  	if (test_opt(F2FS_I_SB(inode), LFS))
  		return 0;
fcc85a4d8   Jaegeuk Kim   f2fs crypto: acti...
1651

5302fb000   Jaegeuk Kim   f2fs: clean up co...
1652
  	trace_f2fs_direct_IO_enter(inode, offset, count, rw);
70407fad8   Chao Yu   f2fs: add tracepo...
1653

82e0a5aa5   Chao Yu   f2fs: fix to avoi...
1654
  	down_read(&F2FS_I(inode)->dio_rwsem[rw]);
c8b8e32d7   Christoph Hellwig   direct-io: elimin...
1655
  	err = blockdev_direct_IO(iocb, inode, iter, get_data_block_dio);
82e0a5aa5   Chao Yu   f2fs: fix to avoi...
1656
1657
1658
  	up_read(&F2FS_I(inode)->dio_rwsem[rw]);
  
  	if (rw == WRITE) {
6bfc49197   Jaegeuk Kim   f2fs: issue cache...
1659
  		if (err > 0)
91942321e   Jaegeuk Kim   f2fs: use inode p...
1660
  			set_inode_flag(inode, FI_UPDATE_WRITE);
6bfc49197   Jaegeuk Kim   f2fs: issue cache...
1661
1662
1663
  		else if (err < 0)
  			f2fs_write_failed(mapping, offset + count);
  	}
70407fad8   Chao Yu   f2fs: add tracepo...
1664

5302fb000   Jaegeuk Kim   f2fs: clean up co...
1665
  	trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
70407fad8   Chao Yu   f2fs: add tracepo...
1666

3aab8f828   Chao Yu   f2fs: introduce f...
1667
  	return err;
eb47b8009   Jaegeuk Kim   f2fs: add address...
1668
  }
487261f39   Chao Yu   f2fs: merge {inva...
1669
1670
  void f2fs_invalidate_page(struct page *page, unsigned int offset,
  							unsigned int length)
eb47b8009   Jaegeuk Kim   f2fs: add address...
1671
1672
  {
  	struct inode *inode = page->mapping->host;
487261f39   Chao Yu   f2fs: merge {inva...
1673
  	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
a7ffdbe22   Jaegeuk Kim   f2fs: expand coun...
1674

487261f39   Chao Yu   f2fs: merge {inva...
1675
  	if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
1676
  		(offset % PAGE_SIZE || length != PAGE_SIZE))
a7ffdbe22   Jaegeuk Kim   f2fs: expand coun...
1677
  		return;
487261f39   Chao Yu   f2fs: merge {inva...
1678
1679
1680
1681
1682
1683
1684
1685
  	if (PageDirty(page)) {
  		if (inode->i_ino == F2FS_META_INO(sbi))
  			dec_page_count(sbi, F2FS_DIRTY_META);
  		else if (inode->i_ino == F2FS_NODE_INO(sbi))
  			dec_page_count(sbi, F2FS_DIRTY_NODES);
  		else
  			inode_dec_dirty_pages(inode);
  	}
decd36b6c   Chao Yu   f2fs: remove inme...
1686
1687
1688
1689
  
  	/* This is atomic written page, keep Private */
  	if (IS_ATOMIC_WRITTEN_PAGE(page))
  		return;
23dc974ee   Chao Yu   f2fs: fix to clea...
1690
  	set_page_private(page, 0);
eb47b8009   Jaegeuk Kim   f2fs: add address...
1691
1692
  	ClearPagePrivate(page);
  }
487261f39   Chao Yu   f2fs: merge {inva...
1693
  int f2fs_release_page(struct page *page, gfp_t wait)
eb47b8009   Jaegeuk Kim   f2fs: add address...
1694
  {
f68daeebb   Jaegeuk Kim   f2fs: keep PagePr...
1695
1696
1697
  	/* If this is dirty page, keep PagePrivate */
  	if (PageDirty(page))
  		return 0;
decd36b6c   Chao Yu   f2fs: remove inme...
1698
1699
1700
  	/* This is atomic written page, keep Private */
  	if (IS_ATOMIC_WRITTEN_PAGE(page))
  		return 0;
23dc974ee   Chao Yu   f2fs: fix to clea...
1701
  	set_page_private(page, 0);
eb47b8009   Jaegeuk Kim   f2fs: add address...
1702
  	ClearPagePrivate(page);
c3850aa1c   Jaegeuk Kim   f2fs: fix return ...
1703
  	return 1;
eb47b8009   Jaegeuk Kim   f2fs: add address...
1704
  }
fe76b796f   Jaegeuk Kim   f2fs: introduce f...
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
  /*
   * This was copied from __set_page_dirty_buffers which gives higher performance
   * in very high speed storages. (e.g., pmem)
   */
  void f2fs_set_page_dirty_nobuffers(struct page *page)
  {
  	struct address_space *mapping = page->mapping;
  	unsigned long flags;
  
  	if (unlikely(!mapping))
  		return;
  
  	spin_lock(&mapping->private_lock);
  	lock_page_memcg(page);
  	SetPageDirty(page);
  	spin_unlock(&mapping->private_lock);
  
  	spin_lock_irqsave(&mapping->tree_lock, flags);
  	WARN_ON_ONCE(!PageUptodate(page));
  	account_page_dirtied(page, mapping);
  	radix_tree_tag_set(&mapping->page_tree,
  			page_index(page), PAGECACHE_TAG_DIRTY);
  	spin_unlock_irqrestore(&mapping->tree_lock, flags);
  	unlock_page_memcg(page);
  
  	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
  	return;
  }
eb47b8009   Jaegeuk Kim   f2fs: add address...
1733
1734
1735
1736
  static int f2fs_set_data_page_dirty(struct page *page)
  {
  	struct address_space *mapping = page->mapping;
  	struct inode *inode = mapping->host;
26c6b8879   Jaegeuk Kim   f2fs: add tracepo...
1737
  	trace_f2fs_set_page_dirty(page, DATA);
237c0790e   Jaegeuk Kim   f2fs: call SetPag...
1738
1739
  	if (!PageUptodate(page))
  		SetPageUptodate(page);
34ba94bac   Jaegeuk Kim   f2fs: do not make...
1740

1e84371ff   Jaegeuk Kim   f2fs: change atom...
1741
  	if (f2fs_is_atomic_file(inode)) {
decd36b6c   Chao Yu   f2fs: remove inme...
1742
1743
1744
1745
1746
1747
1748
1749
1750
  		if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
  			register_inmem_page(inode, page);
  			return 1;
  		}
  		/*
  		 * Previously, this page has been registered, we just
  		 * return here.
  		 */
  		return 0;
34ba94bac   Jaegeuk Kim   f2fs: do not make...
1751
  	}
eb47b8009   Jaegeuk Kim   f2fs: add address...
1752
  	if (!PageDirty(page)) {
fe76b796f   Jaegeuk Kim   f2fs: introduce f...
1753
  		f2fs_set_page_dirty_nobuffers(page);
a7ffdbe22   Jaegeuk Kim   f2fs: expand coun...
1754
  		update_dirty_page(inode, page);
eb47b8009   Jaegeuk Kim   f2fs: add address...
1755
1756
1757
1758
  		return 1;
  	}
  	return 0;
  }
c01e54b77   Jaegeuk Kim   f2fs: support swa...
1759
1760
  static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
  {
454ae7e51   Chao Yu   f2fs: handle inli...
1761
  	struct inode *inode = mapping->host;
1d373a0ef   Jaegeuk Kim   f2fs: flush dirty...
1762
1763
1764
1765
1766
1767
  	if (f2fs_has_inline_data(inode))
  		return 0;
  
  	/* make sure allocating whole blocks */
  	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
  		filemap_write_and_wait(mapping);
e2b4e2bc8   Chao Yu   f2fs: fix incorre...
1768
  	return generic_block_bmap(mapping, block, get_data_block_bmap);
429511cdf   Chao Yu   f2fs: add core fu...
1769
  }
5b7a487cf   Weichao Guo   f2fs: add customi...
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
  #ifdef CONFIG_MIGRATION
  #include <linux/migrate.h>
  
  int f2fs_migrate_page(struct address_space *mapping,
  		struct page *newpage, struct page *page, enum migrate_mode mode)
  {
  	int rc, extra_count;
  	struct f2fs_inode_info *fi = F2FS_I(mapping->host);
  	bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page);
  
  	BUG_ON(PageWriteback(page));
  
  	/* migrating an atomic written page is safe with the inmem_lock hold */
  	if (atomic_written && !mutex_trylock(&fi->inmem_lock))
  		return -EAGAIN;
  
  	/*
  	 * A reference is expected if PagePrivate set when move mapping,
  	 * however F2FS breaks this for maintaining dirty page counts when
  	 * truncating pages. So here adjusting the 'extra_count' make it work.
  	 */
  	extra_count = (atomic_written ? 1 : 0) - page_has_private(page);
  	rc = migrate_page_move_mapping(mapping, newpage,
  				page, NULL, mode, extra_count);
  	if (rc != MIGRATEPAGE_SUCCESS) {
  		if (atomic_written)
  			mutex_unlock(&fi->inmem_lock);
  		return rc;
  	}
  
  	if (atomic_written) {
  		struct inmem_pages *cur;
  		list_for_each_entry(cur, &fi->inmem_pages, list)
  			if (cur->page == page) {
  				cur->page = newpage;
  				break;
  			}
  		mutex_unlock(&fi->inmem_lock);
  		put_page(page);
  		get_page(newpage);
  	}
  
  	if (PagePrivate(page))
  		SetPagePrivate(newpage);
  	set_page_private(newpage, page_private(page));
  
  	migrate_page_copy(newpage, page);
  
  	return MIGRATEPAGE_SUCCESS;
  }
  #endif
eb47b8009   Jaegeuk Kim   f2fs: add address...
1821
1822
1823
1824
1825
1826
  const struct address_space_operations f2fs_dblock_aops = {
  	.readpage	= f2fs_read_data_page,
  	.readpages	= f2fs_read_data_pages,
  	.writepage	= f2fs_write_data_page,
  	.writepages	= f2fs_write_data_pages,
  	.write_begin	= f2fs_write_begin,
a1dd3c13c   Jaegeuk Kim   f2fs: fix to reco...
1827
  	.write_end	= f2fs_write_end,
eb47b8009   Jaegeuk Kim   f2fs: add address...
1828
  	.set_page_dirty	= f2fs_set_data_page_dirty,
487261f39   Chao Yu   f2fs: merge {inva...
1829
1830
  	.invalidatepage	= f2fs_invalidate_page,
  	.releasepage	= f2fs_release_page,
eb47b8009   Jaegeuk Kim   f2fs: add address...
1831
  	.direct_IO	= f2fs_direct_IO,
c01e54b77   Jaegeuk Kim   f2fs: support swa...
1832
  	.bmap		= f2fs_bmap,
5b7a487cf   Weichao Guo   f2fs: add customi...
1833
1834
1835
  #ifdef CONFIG_MIGRATION
  	.migratepage    = f2fs_migrate_page,
  #endif
eb47b8009   Jaegeuk Kim   f2fs: add address...
1836
  };