Blame view

fs/ext4/extents.c 133 KB
a86c61812   Alex Tomas   [PATCH] ext3: add...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
  /*
   * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
   * Written by Alex Tomas <alex@clusterfs.com>
   *
   * Architecture independence:
   *   Copyright (c) 2005, Bull S.A.
   *   Written by Pierre Peiffer <pierre.peiffer@bull.net>
   *
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License version 2 as
   * published by the Free Software Foundation.
   *
   * This program is distributed in the hope that it will be useful,
   * but WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   * GNU General Public License for more details.
   *
   * You should have received a copy of the GNU General Public Licens
   * along with this program; if not, write to the Free Software
   * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
   */
  
  /*
   * Extents support for EXT4
   *
   * TODO:
   *   - ext4*_error() should be used in some situations
   *   - analyze all BUG()/BUG_ON(), use -EIO where appropriate
   *   - smart tree reduction
   */
a86c61812   Alex Tomas   [PATCH] ext3: add...
31
32
  #include <linux/fs.h>
  #include <linux/time.h>
cd02ff0b1   Mingming Cao   jbd2: JBD_XXX to ...
33
  #include <linux/jbd2.h>
a86c61812   Alex Tomas   [PATCH] ext3: add...
34
35
36
37
38
  #include <linux/highuid.h>
  #include <linux/pagemap.h>
  #include <linux/quotaops.h>
  #include <linux/string.h>
  #include <linux/slab.h>
a2df2a634   Amit Arora   fallocate support...
39
  #include <linux/falloc.h>
a86c61812   Alex Tomas   [PATCH] ext3: add...
40
  #include <asm/uaccess.h>
6873fa0de   Eric Sandeen   Hook ext4 to the ...
41
  #include <linux/fiemap.h>
3dcf54515   Christoph Hellwig   ext4: move header...
42
  #include "ext4_jbd2.h"
a86c61812   Alex Tomas   [PATCH] ext3: add...
43

0562e0bad   Jiaying Zhang   ext4: add more tr...
44
  #include <trace/events/ext4.h>
d583fb87a   Allison Henderson   ext4: punch out e...
45
46
47
48
49
50
  static int ext4_split_extent(handle_t *handle,
  				struct inode *inode,
  				struct ext4_ext_path *path,
  				struct ext4_map_blocks *map,
  				int split_flag,
  				int flags);
487caeef9   Jan Kara   ext4: Fix possibl...
51
52
53
  static int ext4_ext_truncate_extend_restart(handle_t *handle,
  					    struct inode *inode,
  					    int needed)
a86c61812   Alex Tomas   [PATCH] ext3: add...
54
55
  {
  	int err;
0390131ba   Frank Mayhar   ext4: Allow ext4 ...
56
57
  	if (!ext4_handle_valid(handle))
  		return 0;
a86c61812   Alex Tomas   [PATCH] ext3: add...
58
  	if (handle->h_buffer_credits > needed)
9102e4fa8   Shen Feng   ext4: Fix ext4_ex...
59
60
  		return 0;
  	err = ext4_journal_extend(handle, needed);
0123c9399   Theodore Ts'o   ext4: Fix ext4_ex...
61
  	if (err <= 0)
9102e4fa8   Shen Feng   ext4: Fix ext4_ex...
62
  		return err;
487caeef9   Jan Kara   ext4: Fix possibl...
63
  	err = ext4_truncate_restart_trans(handle, inode, needed);
0617b83fa   Dmitry Monakhov   ext4: restart ext...
64
65
  	if (err == 0)
  		err = -EAGAIN;
487caeef9   Jan Kara   ext4: Fix possibl...
66
67
  
  	return err;
a86c61812   Alex Tomas   [PATCH] ext3: add...
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
  }
  
  /*
   * could return:
   *  - EROFS
   *  - ENOMEM
   */
  static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
  				struct ext4_ext_path *path)
  {
  	if (path->p_bh) {
  		/* path points to block */
  		return ext4_journal_get_write_access(handle, path->p_bh);
  	}
  	/* path points to leaf/index in inode body */
  	/* we use in-core data, no need to protect them */
  	return 0;
  }
  
  /*
   * could return:
   *  - EROFS
   *  - ENOMEM
   *  - EIO
   */
9ea7a0df6   Theodore Ts'o   jbd2: add debuggi...
93
94
95
96
97
  #define ext4_ext_dirty(handle, inode, path) \
  		__ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path))
  static int __ext4_ext_dirty(const char *where, unsigned int line,
  			    handle_t *handle, struct inode *inode,
  			    struct ext4_ext_path *path)
a86c61812   Alex Tomas   [PATCH] ext3: add...
98
99
100
101
  {
  	int err;
  	if (path->p_bh) {
  		/* path points to block */
9ea7a0df6   Theodore Ts'o   jbd2: add debuggi...
102
103
  		err = __ext4_handle_dirty_metadata(where, line, handle,
  						   inode, path->p_bh);
a86c61812   Alex Tomas   [PATCH] ext3: add...
104
105
106
107
108
109
  	} else {
  		/* path points to leaf/index in inode body */
  		err = ext4_mark_inode_dirty(handle, inode);
  	}
  	return err;
  }
f65e6fba1   Alex Tomas   [PATCH] ext4: 48b...
110
  static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
a86c61812   Alex Tomas   [PATCH] ext3: add...
111
  			      struct ext4_ext_path *path,
725d26d3f   Aneesh Kumar K.V   ext4: Introduce e...
112
  			      ext4_lblk_t block)
a86c61812   Alex Tomas   [PATCH] ext3: add...
113
  {
a86c61812   Alex Tomas   [PATCH] ext3: add...
114
  	if (path) {
81fdbb4a8   Yongqiang Yang   ext4: move variab...
115
  		int depth = path->p_depth;
a86c61812   Alex Tomas   [PATCH] ext3: add...
116
  		struct ext4_extent *ex;
a86c61812   Alex Tomas   [PATCH] ext3: add...
117

ad4fb9caf   Kazuya Mio   ext4: fix 32bit o...
118
119
120
121
122
123
124
125
126
127
128
129
130
131
  		/*
  		 * Try to predict block placement assuming that we are
  		 * filling in a file which will eventually be
  		 * non-sparse --- i.e., in the case of libbfd writing
  		 * an ELF object sections out-of-order but in a way
  		 * the eventually results in a contiguous object or
  		 * executable file, or some database extending a table
  		 * space file.  However, this is actually somewhat
  		 * non-ideal if we are writing a sparse file such as
  		 * qemu or KVM writing a raw image file that is going
  		 * to stay fairly sparse, since it will end up
  		 * fragmenting the file system's free space.  Maybe we
  		 * should have some hueristics or some way to allow
  		 * userspace to pass a hint to file system,
b8d6568a1   Tao Ma   ext4: Fix comment...
132
  		 * especially if the latter case turns out to be
ad4fb9caf   Kazuya Mio   ext4: fix 32bit o...
133
134
  		 * common.
  		 */
7e0289766   Avantika Mathur   [PATCH] ext4: if ...
135
  		ex = path[depth].p_ext;
ad4fb9caf   Kazuya Mio   ext4: fix 32bit o...
136
137
138
139
140
141
142
143
144
  		if (ex) {
  			ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
  			ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
  
  			if (block > ext_block)
  				return ext_pblk + (block - ext_block);
  			else
  				return ext_pblk - (ext_block - block);
  		}
a86c61812   Alex Tomas   [PATCH] ext3: add...
145

d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
146
147
  		/* it looks like index is empty;
  		 * try to find starting block from index itself */
a86c61812   Alex Tomas   [PATCH] ext3: add...
148
149
150
151
152
  		if (path[depth].p_bh)
  			return path[depth].p_bh->b_blocknr;
  	}
  
  	/* OK. use inode's group */
f86186b44   Eric Sandeen   ext4: refactor du...
153
  	return ext4_inode_to_goal_block(inode);
a86c61812   Alex Tomas   [PATCH] ext3: add...
154
  }
654b4908b   Aneesh Kumar K.V   ext4: cleanup blo...
155
156
157
  /*
   * Allocation for a meta data block
   */
f65e6fba1   Alex Tomas   [PATCH] ext4: 48b...
158
  static ext4_fsblk_t
654b4908b   Aneesh Kumar K.V   ext4: cleanup blo...
159
  ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
a86c61812   Alex Tomas   [PATCH] ext3: add...
160
  			struct ext4_ext_path *path,
55f020db6   Allison Henderson   ext4: add flag to...
161
  			struct ext4_extent *ex, int *err, unsigned int flags)
a86c61812   Alex Tomas   [PATCH] ext3: add...
162
  {
f65e6fba1   Alex Tomas   [PATCH] ext4: 48b...
163
  	ext4_fsblk_t goal, newblock;
a86c61812   Alex Tomas   [PATCH] ext3: add...
164
165
  
  	goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
55f020db6   Allison Henderson   ext4: add flag to...
166
167
  	newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
  					NULL, err);
a86c61812   Alex Tomas   [PATCH] ext3: add...
168
169
  	return newblock;
  }
55ad63bf3   Theodore Ts'o   ext4: fix extent ...
170
  static inline int ext4_ext_space_block(struct inode *inode, int check)
a86c61812   Alex Tomas   [PATCH] ext3: add...
171
172
173
174
175
  {
  	int size;
  
  	size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
  			/ sizeof(struct ext4_extent);
bbf2f9fb1   Robert P. J. Day   Fix misspellings ...
176
  #ifdef AGGRESSIVE_TEST
02dc62fba   Yongqiang Yang   ext4: clean up AG...
177
178
  	if (!check && size > 6)
  		size = 6;
a86c61812   Alex Tomas   [PATCH] ext3: add...
179
180
181
  #endif
  	return size;
  }
55ad63bf3   Theodore Ts'o   ext4: fix extent ...
182
  static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
a86c61812   Alex Tomas   [PATCH] ext3: add...
183
184
185
186
187
  {
  	int size;
  
  	size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
  			/ sizeof(struct ext4_extent_idx);
bbf2f9fb1   Robert P. J. Day   Fix misspellings ...
188
  #ifdef AGGRESSIVE_TEST
02dc62fba   Yongqiang Yang   ext4: clean up AG...
189
190
  	if (!check && size > 5)
  		size = 5;
a86c61812   Alex Tomas   [PATCH] ext3: add...
191
192
193
  #endif
  	return size;
  }
55ad63bf3   Theodore Ts'o   ext4: fix extent ...
194
  static inline int ext4_ext_space_root(struct inode *inode, int check)
a86c61812   Alex Tomas   [PATCH] ext3: add...
195
196
197
198
199
200
  {
  	int size;
  
  	size = sizeof(EXT4_I(inode)->i_data);
  	size -= sizeof(struct ext4_extent_header);
  	size /= sizeof(struct ext4_extent);
bbf2f9fb1   Robert P. J. Day   Fix misspellings ...
201
  #ifdef AGGRESSIVE_TEST
02dc62fba   Yongqiang Yang   ext4: clean up AG...
202
203
  	if (!check && size > 3)
  		size = 3;
a86c61812   Alex Tomas   [PATCH] ext3: add...
204
205
206
  #endif
  	return size;
  }
55ad63bf3   Theodore Ts'o   ext4: fix extent ...
207
  static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
a86c61812   Alex Tomas   [PATCH] ext3: add...
208
209
210
211
212
213
  {
  	int size;
  
  	size = sizeof(EXT4_I(inode)->i_data);
  	size -= sizeof(struct ext4_extent_header);
  	size /= sizeof(struct ext4_extent_idx);
bbf2f9fb1   Robert P. J. Day   Fix misspellings ...
214
  #ifdef AGGRESSIVE_TEST
02dc62fba   Yongqiang Yang   ext4: clean up AG...
215
216
  	if (!check && size > 4)
  		size = 4;
a86c61812   Alex Tomas   [PATCH] ext3: add...
217
218
219
  #endif
  	return size;
  }
d2a176379   Mingming Cao   ext4: delayed all...
220
221
222
223
224
  /*
   * Calculate the number of metadata blocks needed
   * to allocate @blocks
   * Worse case is one block per extent
   */
01f49d0b9   Theodore Ts'o   ext4: use ext4_lb...
225
  int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
d2a176379   Mingming Cao   ext4: delayed all...
226
  {
9d0be5023   Theodore Ts'o   ext4: Calculate m...
227
  	struct ext4_inode_info *ei = EXT4_I(inode);
81fdbb4a8   Yongqiang Yang   ext4: move variab...
228
  	int idxs;
d2a176379   Mingming Cao   ext4: delayed all...
229

9d0be5023   Theodore Ts'o   ext4: Calculate m...
230
231
  	idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
  		/ sizeof(struct ext4_extent_idx));
d2a176379   Mingming Cao   ext4: delayed all...
232
233
  
  	/*
9d0be5023   Theodore Ts'o   ext4: Calculate m...
234
235
236
237
238
239
  	 * If the new delayed allocation block is contiguous with the
  	 * previous da block, it can share index blocks with the
  	 * previous block, so we only need to allocate a new index
  	 * block every idxs leaf blocks.  At ldxs**2 blocks, we need
  	 * an additional index block, and at ldxs**3 blocks, yet
  	 * another index blocks.
d2a176379   Mingming Cao   ext4: delayed all...
240
  	 */
9d0be5023   Theodore Ts'o   ext4: Calculate m...
241
242
  	if (ei->i_da_metadata_calc_len &&
  	    ei->i_da_metadata_calc_last_lblock+1 == lblock) {
81fdbb4a8   Yongqiang Yang   ext4: move variab...
243
  		int num = 0;
9d0be5023   Theodore Ts'o   ext4: Calculate m...
244
245
246
247
248
249
250
251
252
253
254
255
  		if ((ei->i_da_metadata_calc_len % idxs) == 0)
  			num++;
  		if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
  			num++;
  		if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
  			num++;
  			ei->i_da_metadata_calc_len = 0;
  		} else
  			ei->i_da_metadata_calc_len++;
  		ei->i_da_metadata_calc_last_lblock++;
  		return num;
  	}
d2a176379   Mingming Cao   ext4: delayed all...
256

9d0be5023   Theodore Ts'o   ext4: Calculate m...
257
258
259
260
261
262
263
  	/*
  	 * In the worst case we need a new set of index blocks at
  	 * every level of the inode's extent tree.
  	 */
  	ei->i_da_metadata_calc_len = 1;
  	ei->i_da_metadata_calc_last_lblock = lblock;
  	return ext_depth(inode) + 1;
d2a176379   Mingming Cao   ext4: delayed all...
264
  }
c29c0ae7f   Alex Tomas   ext4: Make extent...
265
266
267
268
269
270
271
  static int
  ext4_ext_max_entries(struct inode *inode, int depth)
  {
  	int max;
  
  	if (depth == ext_depth(inode)) {
  		if (depth == 0)
55ad63bf3   Theodore Ts'o   ext4: fix extent ...
272
  			max = ext4_ext_space_root(inode, 1);
c29c0ae7f   Alex Tomas   ext4: Make extent...
273
  		else
55ad63bf3   Theodore Ts'o   ext4: fix extent ...
274
  			max = ext4_ext_space_root_idx(inode, 1);
c29c0ae7f   Alex Tomas   ext4: Make extent...
275
276
  	} else {
  		if (depth == 0)
55ad63bf3   Theodore Ts'o   ext4: fix extent ...
277
  			max = ext4_ext_space_block(inode, 1);
c29c0ae7f   Alex Tomas   ext4: Make extent...
278
  		else
55ad63bf3   Theodore Ts'o   ext4: fix extent ...
279
  			max = ext4_ext_space_block_idx(inode, 1);
c29c0ae7f   Alex Tomas   ext4: Make extent...
280
281
282
283
  	}
  
  	return max;
  }
56b19868a   Aneesh Kumar K.V   ext4: Add checks ...
284
285
  static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
  {
bf89d16f6   Theodore Ts'o   ext4: rename {ext...
286
  	ext4_fsblk_t block = ext4_ext_pblock(ext);
56b19868a   Aneesh Kumar K.V   ext4: Add checks ...
287
  	int len = ext4_ext_get_actual_len(ext);
e84a26ce1   Theodore Ts'o   ext4: Make the ex...
288

6fd058f77   Theodore Ts'o   ext4: Add a compr...
289
  	return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
56b19868a   Aneesh Kumar K.V   ext4: Add checks ...
290
291
292
293
294
  }
  
  static int ext4_valid_extent_idx(struct inode *inode,
  				struct ext4_extent_idx *ext_idx)
  {
bf89d16f6   Theodore Ts'o   ext4: rename {ext...
295
  	ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
e84a26ce1   Theodore Ts'o   ext4: Make the ex...
296

6fd058f77   Theodore Ts'o   ext4: Add a compr...
297
  	return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
56b19868a   Aneesh Kumar K.V   ext4: Add checks ...
298
299
300
301
302
303
  }
  
  static int ext4_valid_extent_entries(struct inode *inode,
  				struct ext4_extent_header *eh,
  				int depth)
  {
56b19868a   Aneesh Kumar K.V   ext4: Add checks ...
304
305
306
307
308
309
310
311
  	unsigned short entries;
  	if (eh->eh_entries == 0)
  		return 1;
  
  	entries = le16_to_cpu(eh->eh_entries);
  
  	if (depth == 0) {
  		/* leaf entries */
81fdbb4a8   Yongqiang Yang   ext4: move variab...
312
  		struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
56b19868a   Aneesh Kumar K.V   ext4: Add checks ...
313
314
315
316
317
318
319
  		while (entries) {
  			if (!ext4_valid_extent(inode, ext))
  				return 0;
  			ext++;
  			entries--;
  		}
  	} else {
81fdbb4a8   Yongqiang Yang   ext4: move variab...
320
  		struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
56b19868a   Aneesh Kumar K.V   ext4: Add checks ...
321
322
323
324
325
326
327
328
329
  		while (entries) {
  			if (!ext4_valid_extent_idx(inode, ext_idx))
  				return 0;
  			ext_idx++;
  			entries--;
  		}
  	}
  	return 1;
  }
c398eda0e   Theodore Ts'o   ext4: Pass line n...
330
331
332
  static int __ext4_ext_check(const char *function, unsigned int line,
  			    struct inode *inode, struct ext4_extent_header *eh,
  			    int depth)
c29c0ae7f   Alex Tomas   ext4: Make extent...
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
  {
  	const char *error_msg;
  	int max = 0;
  
  	if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
  		error_msg = "invalid magic";
  		goto corrupted;
  	}
  	if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
  		error_msg = "unexpected eh_depth";
  		goto corrupted;
  	}
  	if (unlikely(eh->eh_max == 0)) {
  		error_msg = "invalid eh_max";
  		goto corrupted;
  	}
  	max = ext4_ext_max_entries(inode, depth);
  	if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
  		error_msg = "too large eh_max";
  		goto corrupted;
  	}
  	if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
  		error_msg = "invalid eh_entries";
  		goto corrupted;
  	}
56b19868a   Aneesh Kumar K.V   ext4: Add checks ...
358
359
360
361
  	if (!ext4_valid_extent_entries(inode, eh, depth)) {
  		error_msg = "invalid extent entries";
  		goto corrupted;
  	}
c29c0ae7f   Alex Tomas   ext4: Make extent...
362
363
364
  	return 0;
  
  corrupted:
c398eda0e   Theodore Ts'o   ext4: Pass line n...
365
  	ext4_error_inode(inode, function, line, 0,
24676da46   Theodore Ts'o   ext4: Convert cal...
366
  			"bad header/extent: %s - magic %x, "
c29c0ae7f   Alex Tomas   ext4: Make extent...
367
  			"entries %u, max %u(%u), depth %u(%u)",
24676da46   Theodore Ts'o   ext4: Convert cal...
368
  			error_msg, le16_to_cpu(eh->eh_magic),
c29c0ae7f   Alex Tomas   ext4: Make extent...
369
370
371
372
373
  			le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
  			max, le16_to_cpu(eh->eh_depth), depth);
  
  	return -EIO;
  }
56b19868a   Aneesh Kumar K.V   ext4: Add checks ...
374
  #define ext4_ext_check(inode, eh, depth)	\
c398eda0e   Theodore Ts'o   ext4: Pass line n...
375
  	__ext4_ext_check(__func__, __LINE__, inode, eh, depth)
c29c0ae7f   Alex Tomas   ext4: Make extent...
376

7a262f7c6   Aneesh Kumar K.V   ext4: Validate ex...
377
378
379
380
  int ext4_ext_check_inode(struct inode *inode)
  {
  	return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode));
  }
a86c61812   Alex Tomas   [PATCH] ext3: add...
381
382
383
384
385
386
387
388
  #ifdef EXT_DEBUG
  static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
  {
  	int k, l = path->p_depth;
  
  	ext_debug("path:");
  	for (k = 0; k <= l; k++, path++) {
  		if (path->p_idx) {
2ae021076   Mingming Cao   [PATCH] ext4: blk...
389
  		  ext_debug("  %d->%llu", le32_to_cpu(path->p_idx->ei_block),
bf89d16f6   Theodore Ts'o   ext4: rename {ext...
390
  			    ext4_idx_pblock(path->p_idx));
a86c61812   Alex Tomas   [PATCH] ext3: add...
391
  		} else if (path->p_ext) {
553f90089   Mingming   ext4: Show unwrit...
392
  			ext_debug("  %d:[%d]%d:%llu ",
a86c61812   Alex Tomas   [PATCH] ext3: add...
393
  				  le32_to_cpu(path->p_ext->ee_block),
553f90089   Mingming   ext4: Show unwrit...
394
  				  ext4_ext_is_uninitialized(path->p_ext),
a2df2a634   Amit Arora   fallocate support...
395
  				  ext4_ext_get_actual_len(path->p_ext),
bf89d16f6   Theodore Ts'o   ext4: rename {ext...
396
  				  ext4_ext_pblock(path->p_ext));
a86c61812   Alex Tomas   [PATCH] ext3: add...
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
  		} else
  			ext_debug("  []");
  	}
  	ext_debug("
  ");
  }
  
  static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
  {
  	int depth = ext_depth(inode);
  	struct ext4_extent_header *eh;
  	struct ext4_extent *ex;
  	int i;
  
  	if (!path)
  		return;
  
  	eh = path[depth].p_hdr;
  	ex = EXT_FIRST_EXTENT(eh);
553f90089   Mingming   ext4: Show unwrit...
416
417
  	ext_debug("Displaying leaf extents for inode %lu
  ", inode->i_ino);
a86c61812   Alex Tomas   [PATCH] ext3: add...
418
  	for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
553f90089   Mingming   ext4: Show unwrit...
419
420
  		ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
  			  ext4_ext_is_uninitialized(ex),
bf89d16f6   Theodore Ts'o   ext4: rename {ext...
421
  			  ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
a86c61812   Alex Tomas   [PATCH] ext3: add...
422
423
424
425
  	}
  	ext_debug("
  ");
  }
1b16da77f   Yongqiang Yang   ext4: teach ext4_...
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
  
  static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
  			ext4_fsblk_t newblock, int level)
  {
  	int depth = ext_depth(inode);
  	struct ext4_extent *ex;
  
  	if (depth != level) {
  		struct ext4_extent_idx *idx;
  		idx = path[level].p_idx;
  		while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) {
  			ext_debug("%d: move %d:%llu in new index %llu
  ", level,
  					le32_to_cpu(idx->ei_block),
  					ext4_idx_pblock(idx),
  					newblock);
  			idx++;
  		}
  
  		return;
  	}
  
  	ex = path[depth].p_ext;
  	while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) {
  		ext_debug("move %d:%llu:[%d]%d in new leaf %llu
  ",
  				le32_to_cpu(ex->ee_block),
  				ext4_ext_pblock(ex),
  				ext4_ext_is_uninitialized(ex),
  				ext4_ext_get_actual_len(ex),
  				newblock);
  		ex++;
  	}
  }
a86c61812   Alex Tomas   [PATCH] ext3: add...
460
  #else
af5bc92dd   Theodore Ts'o   ext4: Fix whitesp...
461
462
  #define ext4_ext_show_path(inode, path)
  #define ext4_ext_show_leaf(inode, path)
1b16da77f   Yongqiang Yang   ext4: teach ext4_...
463
  #define ext4_ext_show_move(inode, path, newblock, level)
a86c61812   Alex Tomas   [PATCH] ext3: add...
464
  #endif
b35905c16   Aneesh Kumar K.V   ext4: Fix memory ...
465
  void ext4_ext_drop_refs(struct ext4_ext_path *path)
a86c61812   Alex Tomas   [PATCH] ext3: add...
466
467
468
469
470
471
472
473
474
475
476
477
  {
  	int depth = path->p_depth;
  	int i;
  
  	for (i = 0; i <= depth; i++, path++)
  		if (path->p_bh) {
  			brelse(path->p_bh);
  			path->p_bh = NULL;
  		}
  }
  
  /*
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
478
479
   * ext4_ext_binsearch_idx:
   * binary search for the closest index of the given block
c29c0ae7f   Alex Tomas   ext4: Make extent...
480
   * the header must be checked before calling this
a86c61812   Alex Tomas   [PATCH] ext3: add...
481
482
   */
  static void
725d26d3f   Aneesh Kumar K.V   ext4: Introduce e...
483
484
  ext4_ext_binsearch_idx(struct inode *inode,
  			struct ext4_ext_path *path, ext4_lblk_t block)
a86c61812   Alex Tomas   [PATCH] ext3: add...
485
486
487
  {
  	struct ext4_extent_header *eh = path->p_hdr;
  	struct ext4_extent_idx *r, *l, *m;
a86c61812   Alex Tomas   [PATCH] ext3: add...
488

bba907433   Eric Sandeen   ext4 extents: rem...
489
  	ext_debug("binsearch for %u(idx):  ", block);
a86c61812   Alex Tomas   [PATCH] ext3: add...
490
491
  
  	l = EXT_FIRST_INDEX(eh) + 1;
e9f410b1c   Dmitry Monakhov   ext4: extent macr...
492
  	r = EXT_LAST_INDEX(eh);
a86c61812   Alex Tomas   [PATCH] ext3: add...
493
494
495
496
497
498
  	while (l <= r) {
  		m = l + (r - l) / 2;
  		if (block < le32_to_cpu(m->ei_block))
  			r = m - 1;
  		else
  			l = m + 1;
26d535ed2   Dmitry Monakhov   Fix compilation w...
499
500
501
  		ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
  				m, le32_to_cpu(m->ei_block),
  				r, le32_to_cpu(r->ei_block));
a86c61812   Alex Tomas   [PATCH] ext3: add...
502
503
504
  	}
  
  	path->p_idx = l - 1;
f65e6fba1   Alex Tomas   [PATCH] ext4: 48b...
505
  	ext_debug("  -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block),
bf89d16f6   Theodore Ts'o   ext4: rename {ext...
506
  		  ext4_idx_pblock(path->p_idx));
a86c61812   Alex Tomas   [PATCH] ext3: add...
507
508
509
510
511
512
513
514
515
516
  
  #ifdef CHECK_BINSEARCH
  	{
  		struct ext4_extent_idx *chix, *ix;
  		int k;
  
  		chix = ix = EXT_FIRST_INDEX(eh);
  		for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
  		  if (k != 0 &&
  		      le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
4776004f5   Theodore Ts'o   ext4: Add printk ...
517
518
519
520
521
522
  				printk(KERN_DEBUG "k=%d, ix=0x%p, "
  				       "first=0x%p
  ", k,
  				       ix, EXT_FIRST_INDEX(eh));
  				printk(KERN_DEBUG "%u <= %u
  ",
a86c61812   Alex Tomas   [PATCH] ext3: add...
523
524
525
526
  				       le32_to_cpu(ix->ei_block),
  				       le32_to_cpu(ix[-1].ei_block));
  			}
  			BUG_ON(k && le32_to_cpu(ix->ei_block)
8c55e2041   Dave Kleikamp   EXT4: Fix whitespace
527
  					   <= le32_to_cpu(ix[-1].ei_block));
a86c61812   Alex Tomas   [PATCH] ext3: add...
528
529
530
531
532
533
534
535
536
537
538
  			if (block < le32_to_cpu(ix->ei_block))
  				break;
  			chix = ix;
  		}
  		BUG_ON(chix != path->p_idx);
  	}
  #endif
  
  }
  
  /*
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
539
540
   * ext4_ext_binsearch:
   * binary search for closest extent of the given block
c29c0ae7f   Alex Tomas   ext4: Make extent...
541
   * the header must be checked before calling this
a86c61812   Alex Tomas   [PATCH] ext3: add...
542
543
   */
  static void
725d26d3f   Aneesh Kumar K.V   ext4: Introduce e...
544
545
  ext4_ext_binsearch(struct inode *inode,
  		struct ext4_ext_path *path, ext4_lblk_t block)
a86c61812   Alex Tomas   [PATCH] ext3: add...
546
547
548
  {
  	struct ext4_extent_header *eh = path->p_hdr;
  	struct ext4_extent *r, *l, *m;
a86c61812   Alex Tomas   [PATCH] ext3: add...
549
550
  	if (eh->eh_entries == 0) {
  		/*
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
551
552
  		 * this leaf is empty:
  		 * we get such a leaf in split/add case
a86c61812   Alex Tomas   [PATCH] ext3: add...
553
554
555
  		 */
  		return;
  	}
bba907433   Eric Sandeen   ext4 extents: rem...
556
  	ext_debug("binsearch for %u:  ", block);
a86c61812   Alex Tomas   [PATCH] ext3: add...
557
558
  
  	l = EXT_FIRST_EXTENT(eh) + 1;
e9f410b1c   Dmitry Monakhov   ext4: extent macr...
559
  	r = EXT_LAST_EXTENT(eh);
a86c61812   Alex Tomas   [PATCH] ext3: add...
560
561
562
563
564
565
566
  
  	while (l <= r) {
  		m = l + (r - l) / 2;
  		if (block < le32_to_cpu(m->ee_block))
  			r = m - 1;
  		else
  			l = m + 1;
26d535ed2   Dmitry Monakhov   Fix compilation w...
567
568
569
  		ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
  				m, le32_to_cpu(m->ee_block),
  				r, le32_to_cpu(r->ee_block));
a86c61812   Alex Tomas   [PATCH] ext3: add...
570
571
572
  	}
  
  	path->p_ext = l - 1;
553f90089   Mingming   ext4: Show unwrit...
573
  	ext_debug("  -> %d:%llu:[%d]%d ",
8c55e2041   Dave Kleikamp   EXT4: Fix whitespace
574
  			le32_to_cpu(path->p_ext->ee_block),
bf89d16f6   Theodore Ts'o   ext4: rename {ext...
575
  			ext4_ext_pblock(path->p_ext),
553f90089   Mingming   ext4: Show unwrit...
576
  			ext4_ext_is_uninitialized(path->p_ext),
a2df2a634   Amit Arora   fallocate support...
577
  			ext4_ext_get_actual_len(path->p_ext));
a86c61812   Alex Tomas   [PATCH] ext3: add...
578
579
580
581
582
583
584
585
586
  
  #ifdef CHECK_BINSEARCH
  	{
  		struct ext4_extent *chex, *ex;
  		int k;
  
  		chex = ex = EXT_FIRST_EXTENT(eh);
  		for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
  			BUG_ON(k && le32_to_cpu(ex->ee_block)
8c55e2041   Dave Kleikamp   EXT4: Fix whitespace
587
  					  <= le32_to_cpu(ex[-1].ee_block));
a86c61812   Alex Tomas   [PATCH] ext3: add...
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
  			if (block < le32_to_cpu(ex->ee_block))
  				break;
  			chex = ex;
  		}
  		BUG_ON(chex != path->p_ext);
  	}
  #endif
  
  }
  
  int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
  {
  	struct ext4_extent_header *eh;
  
  	eh = ext_inode_hdr(inode);
  	eh->eh_depth = 0;
  	eh->eh_entries = 0;
  	eh->eh_magic = EXT4_EXT_MAGIC;
55ad63bf3   Theodore Ts'o   ext4: fix extent ...
606
  	eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
a86c61812   Alex Tomas   [PATCH] ext3: add...
607
608
609
610
611
612
  	ext4_mark_inode_dirty(handle, inode);
  	ext4_ext_invalidate_cache(inode);
  	return 0;
  }
  
  struct ext4_ext_path *
725d26d3f   Aneesh Kumar K.V   ext4: Introduce e...
613
614
  ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
  					struct ext4_ext_path *path)
a86c61812   Alex Tomas   [PATCH] ext3: add...
615
616
617
618
619
620
  {
  	struct ext4_extent_header *eh;
  	struct buffer_head *bh;
  	short int depth, i, ppos = 0, alloc = 0;
  
  	eh = ext_inode_hdr(inode);
c29c0ae7f   Alex Tomas   ext4: Make extent...
621
  	depth = ext_depth(inode);
a86c61812   Alex Tomas   [PATCH] ext3: add...
622
623
624
  
  	/* account possible depth increase */
  	if (!path) {
5d4958f92   Avantika Mathur   [PATCH] ext4: kma...
625
  		path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
a86c61812   Alex Tomas   [PATCH] ext3: add...
626
627
628
629
630
  				GFP_NOFS);
  		if (!path)
  			return ERR_PTR(-ENOMEM);
  		alloc = 1;
  	}
a86c61812   Alex Tomas   [PATCH] ext3: add...
631
  	path[0].p_hdr = eh;
1973adcba   Shen Feng   ext4: Make ext4_e...
632
  	path[0].p_bh = NULL;
a86c61812   Alex Tomas   [PATCH] ext3: add...
633

c29c0ae7f   Alex Tomas   ext4: Make extent...
634
  	i = depth;
a86c61812   Alex Tomas   [PATCH] ext3: add...
635
636
  	/* walk through the tree */
  	while (i) {
7a262f7c6   Aneesh Kumar K.V   ext4: Validate ex...
637
  		int need_to_validate = 0;
a86c61812   Alex Tomas   [PATCH] ext3: add...
638
639
640
  		ext_debug("depth %d: num %d, max %d
  ",
  			  ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
c29c0ae7f   Alex Tomas   ext4: Make extent...
641

a86c61812   Alex Tomas   [PATCH] ext3: add...
642
  		ext4_ext_binsearch_idx(inode, path + ppos, block);
bf89d16f6   Theodore Ts'o   ext4: rename {ext...
643
  		path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
a86c61812   Alex Tomas   [PATCH] ext3: add...
644
645
  		path[ppos].p_depth = i;
  		path[ppos].p_ext = NULL;
7a262f7c6   Aneesh Kumar K.V   ext4: Validate ex...
646
647
  		bh = sb_getblk(inode->i_sb, path[ppos].p_block);
  		if (unlikely(!bh))
a86c61812   Alex Tomas   [PATCH] ext3: add...
648
  			goto err;
7a262f7c6   Aneesh Kumar K.V   ext4: Validate ex...
649
  		if (!bh_uptodate_or_lock(bh)) {
0562e0bad   Jiaying Zhang   ext4: add more tr...
650
651
  			trace_ext4_ext_load_extent(inode, block,
  						path[ppos].p_block);
7a262f7c6   Aneesh Kumar K.V   ext4: Validate ex...
652
653
654
655
656
657
658
  			if (bh_submit_read(bh) < 0) {
  				put_bh(bh);
  				goto err;
  			}
  			/* validate the extent entries */
  			need_to_validate = 1;
  		}
a86c61812   Alex Tomas   [PATCH] ext3: add...
659
660
  		eh = ext_block_hdr(bh);
  		ppos++;
273df556b   Frank Mayhar   ext4: Convert BUG...
661
662
663
664
665
666
  		if (unlikely(ppos > depth)) {
  			put_bh(bh);
  			EXT4_ERROR_INODE(inode,
  					 "ppos %d > depth %d", ppos, depth);
  			goto err;
  		}
a86c61812   Alex Tomas   [PATCH] ext3: add...
667
668
669
  		path[ppos].p_bh = bh;
  		path[ppos].p_hdr = eh;
  		i--;
7a262f7c6   Aneesh Kumar K.V   ext4: Validate ex...
670
  		if (need_to_validate && ext4_ext_check(inode, eh, i))
a86c61812   Alex Tomas   [PATCH] ext3: add...
671
672
673
674
  			goto err;
  	}
  
  	path[ppos].p_depth = i;
a86c61812   Alex Tomas   [PATCH] ext3: add...
675
676
  	path[ppos].p_ext = NULL;
  	path[ppos].p_idx = NULL;
a86c61812   Alex Tomas   [PATCH] ext3: add...
677
678
  	/* find extent */
  	ext4_ext_binsearch(inode, path + ppos, block);
1973adcba   Shen Feng   ext4: Make ext4_e...
679
680
  	/* if not an empty leaf */
  	if (path[ppos].p_ext)
bf89d16f6   Theodore Ts'o   ext4: rename {ext...
681
  		path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
a86c61812   Alex Tomas   [PATCH] ext3: add...
682
683
684
685
686
687
688
689
690
691
692
693
694
  
  	ext4_ext_show_path(inode, path);
  
  	return path;
  
  err:
  	ext4_ext_drop_refs(path);
  	if (alloc)
  		kfree(path);
  	return ERR_PTR(-EIO);
  }
  
  /*
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
695
696
697
   * ext4_ext_insert_index:
   * insert new index [@logical;@ptr] into the block at @curp;
   * check where to insert: before @curp or after @curp
a86c61812   Alex Tomas   [PATCH] ext3: add...
698
   */
1f109d5a1   Theodore Ts'o   ext4: make variou...
699
700
701
  static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
  				 struct ext4_ext_path *curp,
  				 int logical, ext4_fsblk_t ptr)
a86c61812   Alex Tomas   [PATCH] ext3: add...
702
703
704
  {
  	struct ext4_extent_idx *ix;
  	int len, err;
7e0289766   Avantika Mathur   [PATCH] ext4: if ...
705
706
  	err = ext4_ext_get_access(handle, inode, curp);
  	if (err)
a86c61812   Alex Tomas   [PATCH] ext3: add...
707
  		return err;
273df556b   Frank Mayhar   ext4: Convert BUG...
708
709
710
711
712
713
  	if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
  		EXT4_ERROR_INODE(inode,
  				 "logical %d == ei_block %d!",
  				 logical, le32_to_cpu(curp->p_idx->ei_block));
  		return -EIO;
  	}
d46203159   Robin Dong   ext4: avoid eh_en...
714
715
716
717
718
719
720
721
722
  
  	if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
  			     >= le16_to_cpu(curp->p_hdr->eh_max))) {
  		EXT4_ERROR_INODE(inode,
  				 "eh_entries %d >= eh_max %d!",
  				 le16_to_cpu(curp->p_hdr->eh_entries),
  				 le16_to_cpu(curp->p_hdr->eh_max));
  		return -EIO;
  	}
a86c61812   Alex Tomas   [PATCH] ext3: add...
723
724
  	if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
  		/* insert after */
80e675f90   Eric Gouriou   ext4: optimize me...
725
726
  		ext_debug("insert new index %d after: %llu
  ", logical, ptr);
a86c61812   Alex Tomas   [PATCH] ext3: add...
727
728
729
  		ix = curp->p_idx + 1;
  	} else {
  		/* insert before */
80e675f90   Eric Gouriou   ext4: optimize me...
730
731
  		ext_debug("insert new index %d before: %llu
  ", logical, ptr);
a86c61812   Alex Tomas   [PATCH] ext3: add...
732
733
  		ix = curp->p_idx;
  	}
80e675f90   Eric Gouriou   ext4: optimize me...
734
735
736
737
738
739
740
741
742
  	len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
  	BUG_ON(len < 0);
  	if (len > 0) {
  		ext_debug("insert new index %d: "
  				"move %d indices from 0x%p to 0x%p
  ",
  				logical, len, ix, ix + 1);
  		memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
  	}
f472e0266   Tao Ma   ext4: avoid stamp...
743
744
745
746
  	if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
  		EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
  		return -EIO;
  	}
a86c61812   Alex Tomas   [PATCH] ext3: add...
747
  	ix->ei_block = cpu_to_le32(logical);
f65e6fba1   Alex Tomas   [PATCH] ext4: 48b...
748
  	ext4_idx_store_pblock(ix, ptr);
e8546d061   Marcin Slusarz   ext4: le*_add_cpu...
749
  	le16_add_cpu(&curp->p_hdr->eh_entries, 1);
a86c61812   Alex Tomas   [PATCH] ext3: add...
750

273df556b   Frank Mayhar   ext4: Convert BUG...
751
752
753
754
  	if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
  		EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
  		return -EIO;
  	}
a86c61812   Alex Tomas   [PATCH] ext3: add...
755
756
757
758
759
760
761
762
  
  	err = ext4_ext_dirty(handle, inode, curp);
  	ext4_std_error(inode->i_sb, err);
  
  	return err;
  }
  
  /*
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
763
764
765
766
767
768
769
770
   * ext4_ext_split:
   * inserts new subtree into the path, using free index entry
   * at depth @at:
   * - allocates all needed blocks (new leaf and all intermediate index blocks)
   * - makes decision where to split
   * - moves remaining extents and index entries (right to the split point)
   *   into the newly allocated blocks
   * - initializes subtree
a86c61812   Alex Tomas   [PATCH] ext3: add...
771
772
   */
  static int ext4_ext_split(handle_t *handle, struct inode *inode,
55f020db6   Allison Henderson   ext4: add flag to...
773
774
775
  			  unsigned int flags,
  			  struct ext4_ext_path *path,
  			  struct ext4_extent *newext, int at)
a86c61812   Alex Tomas   [PATCH] ext3: add...
776
777
778
779
780
  {
  	struct buffer_head *bh = NULL;
  	int depth = ext_depth(inode);
  	struct ext4_extent_header *neh;
  	struct ext4_extent_idx *fidx;
a86c61812   Alex Tomas   [PATCH] ext3: add...
781
  	int i = at, k, m, a;
f65e6fba1   Alex Tomas   [PATCH] ext4: 48b...
782
  	ext4_fsblk_t newblock, oldblock;
a86c61812   Alex Tomas   [PATCH] ext3: add...
783
  	__le32 border;
f65e6fba1   Alex Tomas   [PATCH] ext4: 48b...
784
  	ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
a86c61812   Alex Tomas   [PATCH] ext3: add...
785
786
787
  	int err = 0;
  
  	/* make decision: where to split? */
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
788
  	/* FIXME: now decision is simplest: at current extent */
a86c61812   Alex Tomas   [PATCH] ext3: add...
789

d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
790
  	/* if current leaf will be split, then we should use
a86c61812   Alex Tomas   [PATCH] ext3: add...
791
  	 * border from split point */
273df556b   Frank Mayhar   ext4: Convert BUG...
792
793
794
795
  	if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
  		EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
  		return -EIO;
  	}
a86c61812   Alex Tomas   [PATCH] ext3: add...
796
797
  	if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
  		border = path[depth].p_ext[1].ee_block;
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
798
  		ext_debug("leaf will be split."
a86c61812   Alex Tomas   [PATCH] ext3: add...
799
800
  				" next leaf starts at %d
  ",
8c55e2041   Dave Kleikamp   EXT4: Fix whitespace
801
  				  le32_to_cpu(border));
a86c61812   Alex Tomas   [PATCH] ext3: add...
802
803
804
805
806
  	} else {
  		border = newext->ee_block;
  		ext_debug("leaf will be added."
  				" next leaf starts at %d
  ",
8c55e2041   Dave Kleikamp   EXT4: Fix whitespace
807
  				le32_to_cpu(border));
a86c61812   Alex Tomas   [PATCH] ext3: add...
808
809
810
  	}
  
  	/*
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
811
812
  	 * If error occurs, then we break processing
  	 * and mark filesystem read-only. index won't
a86c61812   Alex Tomas   [PATCH] ext3: add...
813
  	 * be inserted and tree will be in consistent
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
814
  	 * state. Next mount will repair buffers too.
a86c61812   Alex Tomas   [PATCH] ext3: add...
815
816
817
  	 */
  
  	/*
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
818
819
820
  	 * Get array to track all allocated blocks.
  	 * We need this to handle errors and free blocks
  	 * upon them.
a86c61812   Alex Tomas   [PATCH] ext3: add...
821
  	 */
5d4958f92   Avantika Mathur   [PATCH] ext4: kma...
822
  	ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
a86c61812   Alex Tomas   [PATCH] ext3: add...
823
824
  	if (!ablocks)
  		return -ENOMEM;
a86c61812   Alex Tomas   [PATCH] ext3: add...
825
826
827
828
829
  
  	/* allocate all needed blocks */
  	ext_debug("allocate %d blocks for indexes/leaf
  ", depth - at);
  	for (a = 0; a < depth - at; a++) {
654b4908b   Aneesh Kumar K.V   ext4: cleanup blo...
830
  		newblock = ext4_ext_new_meta_block(handle, inode, path,
55f020db6   Allison Henderson   ext4: add flag to...
831
  						   newext, &err, flags);
a86c61812   Alex Tomas   [PATCH] ext3: add...
832
833
834
835
836
837
838
  		if (newblock == 0)
  			goto cleanup;
  		ablocks[a] = newblock;
  	}
  
  	/* initialize new leaf */
  	newblock = ablocks[--a];
273df556b   Frank Mayhar   ext4: Convert BUG...
839
840
841
842
843
  	if (unlikely(newblock == 0)) {
  		EXT4_ERROR_INODE(inode, "newblock == 0!");
  		err = -EIO;
  		goto cleanup;
  	}
a86c61812   Alex Tomas   [PATCH] ext3: add...
844
845
846
847
848
849
  	bh = sb_getblk(inode->i_sb, newblock);
  	if (!bh) {
  		err = -EIO;
  		goto cleanup;
  	}
  	lock_buffer(bh);
7e0289766   Avantika Mathur   [PATCH] ext4: if ...
850
851
  	err = ext4_journal_get_create_access(handle, bh);
  	if (err)
a86c61812   Alex Tomas   [PATCH] ext3: add...
852
853
854
855
  		goto cleanup;
  
  	neh = ext_block_hdr(bh);
  	neh->eh_entries = 0;
55ad63bf3   Theodore Ts'o   ext4: fix extent ...
856
  	neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
a86c61812   Alex Tomas   [PATCH] ext3: add...
857
858
  	neh->eh_magic = EXT4_EXT_MAGIC;
  	neh->eh_depth = 0;
a86c61812   Alex Tomas   [PATCH] ext3: add...
859

d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
860
  	/* move remainder of path[depth] to the new leaf */
273df556b   Frank Mayhar   ext4: Convert BUG...
861
862
863
864
865
866
867
868
  	if (unlikely(path[depth].p_hdr->eh_entries !=
  		     path[depth].p_hdr->eh_max)) {
  		EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
  				 path[depth].p_hdr->eh_entries,
  				 path[depth].p_hdr->eh_max);
  		err = -EIO;
  		goto cleanup;
  	}
a86c61812   Alex Tomas   [PATCH] ext3: add...
869
  	/* start copy from next extent */
1b16da77f   Yongqiang Yang   ext4: teach ext4_...
870
871
  	m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++;
  	ext4_ext_show_move(inode, path, newblock, depth);
a86c61812   Alex Tomas   [PATCH] ext3: add...
872
  	if (m) {
1b16da77f   Yongqiang Yang   ext4: teach ext4_...
873
874
875
  		struct ext4_extent *ex;
  		ex = EXT_FIRST_EXTENT(neh);
  		memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m);
e8546d061   Marcin Slusarz   ext4: le*_add_cpu...
876
  		le16_add_cpu(&neh->eh_entries, m);
a86c61812   Alex Tomas   [PATCH] ext3: add...
877
878
879
880
  	}
  
  	set_buffer_uptodate(bh);
  	unlock_buffer(bh);
0390131ba   Frank Mayhar   ext4: Allow ext4 ...
881
  	err = ext4_handle_dirty_metadata(handle, inode, bh);
7e0289766   Avantika Mathur   [PATCH] ext4: if ...
882
  	if (err)
a86c61812   Alex Tomas   [PATCH] ext3: add...
883
884
885
886
887
888
  		goto cleanup;
  	brelse(bh);
  	bh = NULL;
  
  	/* correct old leaf */
  	if (m) {
7e0289766   Avantika Mathur   [PATCH] ext4: if ...
889
890
  		err = ext4_ext_get_access(handle, inode, path + depth);
  		if (err)
a86c61812   Alex Tomas   [PATCH] ext3: add...
891
  			goto cleanup;
e8546d061   Marcin Slusarz   ext4: le*_add_cpu...
892
  		le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
7e0289766   Avantika Mathur   [PATCH] ext4: if ...
893
894
  		err = ext4_ext_dirty(handle, inode, path + depth);
  		if (err)
a86c61812   Alex Tomas   [PATCH] ext3: add...
895
896
897
898
899
900
  			goto cleanup;
  
  	}
  
  	/* create intermediate indexes */
  	k = depth - at - 1;
273df556b   Frank Mayhar   ext4: Convert BUG...
901
902
903
904
905
  	if (unlikely(k < 0)) {
  		EXT4_ERROR_INODE(inode, "k %d < 0!", k);
  		err = -EIO;
  		goto cleanup;
  	}
a86c61812   Alex Tomas   [PATCH] ext3: add...
906
907
908
909
910
911
912
913
914
  	if (k)
  		ext_debug("create %d intermediate indices
  ", k);
  	/* insert new index into current index block */
  	/* current depth stored in i var */
  	i = depth - 1;
  	while (k--) {
  		oldblock = newblock;
  		newblock = ablocks[--a];
bba907433   Eric Sandeen   ext4 extents: rem...
915
  		bh = sb_getblk(inode->i_sb, newblock);
a86c61812   Alex Tomas   [PATCH] ext3: add...
916
917
918
919
920
  		if (!bh) {
  			err = -EIO;
  			goto cleanup;
  		}
  		lock_buffer(bh);
7e0289766   Avantika Mathur   [PATCH] ext4: if ...
921
922
  		err = ext4_journal_get_create_access(handle, bh);
  		if (err)
a86c61812   Alex Tomas   [PATCH] ext3: add...
923
924
925
926
927
  			goto cleanup;
  
  		neh = ext_block_hdr(bh);
  		neh->eh_entries = cpu_to_le16(1);
  		neh->eh_magic = EXT4_EXT_MAGIC;
55ad63bf3   Theodore Ts'o   ext4: fix extent ...
928
  		neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
a86c61812   Alex Tomas   [PATCH] ext3: add...
929
930
931
  		neh->eh_depth = cpu_to_le16(depth - i);
  		fidx = EXT_FIRST_INDEX(neh);
  		fidx->ei_block = border;
f65e6fba1   Alex Tomas   [PATCH] ext4: 48b...
932
  		ext4_idx_store_pblock(fidx, oldblock);
a86c61812   Alex Tomas   [PATCH] ext3: add...
933

bba907433   Eric Sandeen   ext4 extents: rem...
934
935
936
  		ext_debug("int.index at %d (block %llu): %u -> %llu
  ",
  				i, newblock, le32_to_cpu(border), oldblock);
a86c61812   Alex Tomas   [PATCH] ext3: add...
937

1b16da77f   Yongqiang Yang   ext4: teach ext4_...
938
  		/* move remainder of path[i] to the new index block */
273df556b   Frank Mayhar   ext4: Convert BUG...
939
940
941
942
943
944
945
946
  		if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
  					EXT_LAST_INDEX(path[i].p_hdr))) {
  			EXT4_ERROR_INODE(inode,
  					 "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
  					 le32_to_cpu(path[i].p_ext->ee_block));
  			err = -EIO;
  			goto cleanup;
  		}
1b16da77f   Yongqiang Yang   ext4: teach ext4_...
947
948
949
950
951
952
  		/* start copy indexes */
  		m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++;
  		ext_debug("cur 0x%p, last 0x%p
  ", path[i].p_idx,
  				EXT_MAX_INDEX(path[i].p_hdr));
  		ext4_ext_show_move(inode, path, newblock, i);
a86c61812   Alex Tomas   [PATCH] ext3: add...
953
  		if (m) {
1b16da77f   Yongqiang Yang   ext4: teach ext4_...
954
  			memmove(++fidx, path[i].p_idx,
a86c61812   Alex Tomas   [PATCH] ext3: add...
955
  				sizeof(struct ext4_extent_idx) * m);
e8546d061   Marcin Slusarz   ext4: le*_add_cpu...
956
  			le16_add_cpu(&neh->eh_entries, m);
a86c61812   Alex Tomas   [PATCH] ext3: add...
957
958
959
  		}
  		set_buffer_uptodate(bh);
  		unlock_buffer(bh);
0390131ba   Frank Mayhar   ext4: Allow ext4 ...
960
  		err = ext4_handle_dirty_metadata(handle, inode, bh);
7e0289766   Avantika Mathur   [PATCH] ext4: if ...
961
  		if (err)
a86c61812   Alex Tomas   [PATCH] ext3: add...
962
963
964
965
966
967
968
969
970
  			goto cleanup;
  		brelse(bh);
  		bh = NULL;
  
  		/* correct old index */
  		if (m) {
  			err = ext4_ext_get_access(handle, inode, path + i);
  			if (err)
  				goto cleanup;
e8546d061   Marcin Slusarz   ext4: le*_add_cpu...
971
  			le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
a86c61812   Alex Tomas   [PATCH] ext3: add...
972
973
974
975
976
977
978
979
980
  			err = ext4_ext_dirty(handle, inode, path + i);
  			if (err)
  				goto cleanup;
  		}
  
  		i--;
  	}
  
  	/* insert new index */
a86c61812   Alex Tomas   [PATCH] ext3: add...
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
  	err = ext4_ext_insert_index(handle, inode, path + at,
  				    le32_to_cpu(border), newblock);
  
  cleanup:
  	if (bh) {
  		if (buffer_locked(bh))
  			unlock_buffer(bh);
  		brelse(bh);
  	}
  
  	if (err) {
  		/* free all allocated blocks in error case */
  		for (i = 0; i < depth; i++) {
  			if (!ablocks[i])
  				continue;
7dc576158   Peter Huewe   ext4: Fix sparse ...
996
  			ext4_free_blocks(handle, inode, NULL, ablocks[i], 1,
e6362609b   Theodore Ts'o   ext4: call ext4_f...
997
  					 EXT4_FREE_BLOCKS_METADATA);
a86c61812   Alex Tomas   [PATCH] ext3: add...
998
999
1000
1001
1002
1003
1004
1005
  		}
  	}
  	kfree(ablocks);
  
  	return err;
  }
  
  /*
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
1006
1007
1008
1009
1010
1011
   * ext4_ext_grow_indepth:
   * implements tree growing procedure:
   * - allocates new block
   * - moves top-level data (index block or leaf) into the new block
   * - initializes new top-level, creating index that points to the
   *   just created block
a86c61812   Alex Tomas   [PATCH] ext3: add...
1012
1013
   */
  static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
55f020db6   Allison Henderson   ext4: add flag to...
1014
  				 unsigned int flags,
55f020db6   Allison Henderson   ext4: add flag to...
1015
  				 struct ext4_extent *newext)
a86c61812   Alex Tomas   [PATCH] ext3: add...
1016
  {
a86c61812   Alex Tomas   [PATCH] ext3: add...
1017
  	struct ext4_extent_header *neh;
a86c61812   Alex Tomas   [PATCH] ext3: add...
1018
  	struct buffer_head *bh;
f65e6fba1   Alex Tomas   [PATCH] ext4: 48b...
1019
  	ext4_fsblk_t newblock;
a86c61812   Alex Tomas   [PATCH] ext3: add...
1020
  	int err = 0;
1939dd84b   Dmitry Monakhov   ext4: cleanup ext...
1021
  	newblock = ext4_ext_new_meta_block(handle, inode, NULL,
55f020db6   Allison Henderson   ext4: add flag to...
1022
  		newext, &err, flags);
a86c61812   Alex Tomas   [PATCH] ext3: add...
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
  	if (newblock == 0)
  		return err;
  
  	bh = sb_getblk(inode->i_sb, newblock);
  	if (!bh) {
  		err = -EIO;
  		ext4_std_error(inode->i_sb, err);
  		return err;
  	}
  	lock_buffer(bh);
7e0289766   Avantika Mathur   [PATCH] ext4: if ...
1033
1034
  	err = ext4_journal_get_create_access(handle, bh);
  	if (err) {
a86c61812   Alex Tomas   [PATCH] ext3: add...
1035
1036
1037
1038
1039
  		unlock_buffer(bh);
  		goto out;
  	}
  
  	/* move top-level index/leaf into new block */
1939dd84b   Dmitry Monakhov   ext4: cleanup ext...
1040
1041
  	memmove(bh->b_data, EXT4_I(inode)->i_data,
  		sizeof(EXT4_I(inode)->i_data));
a86c61812   Alex Tomas   [PATCH] ext3: add...
1042
1043
1044
1045
1046
1047
  
  	/* set size of new block */
  	neh = ext_block_hdr(bh);
  	/* old root could have indexes or leaves
  	 * so calculate e_max right way */
  	if (ext_depth(inode))
55ad63bf3   Theodore Ts'o   ext4: fix extent ...
1048
  		neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
a86c61812   Alex Tomas   [PATCH] ext3: add...
1049
  	else
55ad63bf3   Theodore Ts'o   ext4: fix extent ...
1050
  		neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
a86c61812   Alex Tomas   [PATCH] ext3: add...
1051
1052
1053
  	neh->eh_magic = EXT4_EXT_MAGIC;
  	set_buffer_uptodate(bh);
  	unlock_buffer(bh);
0390131ba   Frank Mayhar   ext4: Allow ext4 ...
1054
  	err = ext4_handle_dirty_metadata(handle, inode, bh);
7e0289766   Avantika Mathur   [PATCH] ext4: if ...
1055
  	if (err)
a86c61812   Alex Tomas   [PATCH] ext3: add...
1056
  		goto out;
1939dd84b   Dmitry Monakhov   ext4: cleanup ext...
1057
  	/* Update top-level index: num,max,pointer */
a86c61812   Alex Tomas   [PATCH] ext3: add...
1058
  	neh = ext_inode_hdr(inode);
1939dd84b   Dmitry Monakhov   ext4: cleanup ext...
1059
1060
1061
1062
1063
1064
1065
1066
  	neh->eh_entries = cpu_to_le16(1);
  	ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock);
  	if (neh->eh_depth == 0) {
  		/* Root extent block becomes index block */
  		neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
  		EXT_FIRST_INDEX(neh)->ei_block =
  			EXT_FIRST_EXTENT(neh)->ee_block;
  	}
2ae021076   Mingming Cao   [PATCH] ext4: blk...
1067
1068
  	ext_debug("new root: num %d(%d), lblock %d, ptr %llu
  ",
a86c61812   Alex Tomas   [PATCH] ext3: add...
1069
  		  le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
5a0790c2c   Andi Kleen   ext4: remove init...
1070
  		  le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
bf89d16f6   Theodore Ts'o   ext4: rename {ext...
1071
  		  ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
a86c61812   Alex Tomas   [PATCH] ext3: add...
1072

b4611abfa   Paul Mackerras   ext4: Fix crash d...
1073
  	neh->eh_depth = cpu_to_le16(le16_to_cpu(neh->eh_depth) + 1);
1939dd84b   Dmitry Monakhov   ext4: cleanup ext...
1074
  	ext4_mark_inode_dirty(handle, inode);
a86c61812   Alex Tomas   [PATCH] ext3: add...
1075
1076
1077
1078
1079
1080
1081
  out:
  	brelse(bh);
  
  	return err;
  }
  
  /*
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
1082
1083
1084
   * ext4_ext_create_new_leaf:
   * finds empty index and adds new leaf.
   * if no free index is found, then it requests in-depth growing.
a86c61812   Alex Tomas   [PATCH] ext3: add...
1085
1086
   */
  static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
55f020db6   Allison Henderson   ext4: add flag to...
1087
1088
1089
  				    unsigned int flags,
  				    struct ext4_ext_path *path,
  				    struct ext4_extent *newext)
a86c61812   Alex Tomas   [PATCH] ext3: add...
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
  {
  	struct ext4_ext_path *curp;
  	int depth, i, err = 0;
  
  repeat:
  	i = depth = ext_depth(inode);
  
  	/* walk up to the tree and look for free index entry */
  	curp = path + depth;
  	while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
  		i--;
  		curp--;
  	}
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
1103
1104
  	/* we use already allocated block for index block,
  	 * so subsequent data blocks should be contiguous */
a86c61812   Alex Tomas   [PATCH] ext3: add...
1105
1106
1107
  	if (EXT_HAS_FREE_INDEX(curp)) {
  		/* if we found index with free entry, then use that
  		 * entry: create all needed subtree and add new leaf */
55f020db6   Allison Henderson   ext4: add flag to...
1108
  		err = ext4_ext_split(handle, inode, flags, path, newext, i);
787e0981f   Shen Feng   ext4: return erro...
1109
1110
  		if (err)
  			goto out;
a86c61812   Alex Tomas   [PATCH] ext3: add...
1111
1112
1113
1114
  
  		/* refill path */
  		ext4_ext_drop_refs(path);
  		path = ext4_ext_find_extent(inode,
725d26d3f   Aneesh Kumar K.V   ext4: Introduce e...
1115
1116
  				    (ext4_lblk_t)le32_to_cpu(newext->ee_block),
  				    path);
a86c61812   Alex Tomas   [PATCH] ext3: add...
1117
1118
1119
1120
  		if (IS_ERR(path))
  			err = PTR_ERR(path);
  	} else {
  		/* tree is full, time to grow in depth */
1939dd84b   Dmitry Monakhov   ext4: cleanup ext...
1121
  		err = ext4_ext_grow_indepth(handle, inode, flags, newext);
a86c61812   Alex Tomas   [PATCH] ext3: add...
1122
1123
1124
1125
1126
1127
  		if (err)
  			goto out;
  
  		/* refill path */
  		ext4_ext_drop_refs(path);
  		path = ext4_ext_find_extent(inode,
725d26d3f   Aneesh Kumar K.V   ext4: Introduce e...
1128
1129
  				   (ext4_lblk_t)le32_to_cpu(newext->ee_block),
  				    path);
a86c61812   Alex Tomas   [PATCH] ext3: add...
1130
1131
1132
1133
1134
1135
  		if (IS_ERR(path)) {
  			err = PTR_ERR(path);
  			goto out;
  		}
  
  		/*
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
1136
1137
  		 * only first (depth 0 -> 1) produces free space;
  		 * in all other cases we have to split the grown tree
a86c61812   Alex Tomas   [PATCH] ext3: add...
1138
1139
1140
  		 */
  		depth = ext_depth(inode);
  		if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
1141
  			/* now we need to split */
a86c61812   Alex Tomas   [PATCH] ext3: add...
1142
1143
1144
1145
1146
1147
1148
1149
1150
  			goto repeat;
  		}
  	}
  
  out:
  	return err;
  }
  
  /*
1988b51e4   Alex Tomas   ext4: Add new fun...
1151
1152
1153
1154
1155
1156
   * search the closest allocated block to the left for *logical
   * and returns it at @logical + it's physical address at @phys
   * if *logical is the smallest allocated block, the function
   * returns 0 at @phys
   * return value contains 0 (success) or error code
   */
1f109d5a1   Theodore Ts'o   ext4: make variou...
1157
1158
1159
  static int ext4_ext_search_left(struct inode *inode,
  				struct ext4_ext_path *path,
  				ext4_lblk_t *logical, ext4_fsblk_t *phys)
1988b51e4   Alex Tomas   ext4: Add new fun...
1160
1161
1162
  {
  	struct ext4_extent_idx *ix;
  	struct ext4_extent *ex;
b939e3766   Aneesh Kumar K.V   ext4: Use the ext...
1163
  	int depth, ee_len;
1988b51e4   Alex Tomas   ext4: Add new fun...
1164

273df556b   Frank Mayhar   ext4: Convert BUG...
1165
1166
1167
1168
  	if (unlikely(path == NULL)) {
  		EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
  		return -EIO;
  	}
1988b51e4   Alex Tomas   ext4: Add new fun...
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
  	depth = path->p_depth;
  	*phys = 0;
  
  	if (depth == 0 && path->p_ext == NULL)
  		return 0;
  
  	/* usually extent in the path covers blocks smaller
  	 * then *logical, but it can be that extent is the
  	 * first one in the file */
  
  	ex = path[depth].p_ext;
b939e3766   Aneesh Kumar K.V   ext4: Use the ext...
1180
  	ee_len = ext4_ext_get_actual_len(ex);
1988b51e4   Alex Tomas   ext4: Add new fun...
1181
  	if (*logical < le32_to_cpu(ex->ee_block)) {
273df556b   Frank Mayhar   ext4: Convert BUG...
1182
1183
1184
1185
1186
1187
  		if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
  			EXT4_ERROR_INODE(inode,
  					 "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
  					 *logical, le32_to_cpu(ex->ee_block));
  			return -EIO;
  		}
1988b51e4   Alex Tomas   ext4: Add new fun...
1188
1189
  		while (--depth >= 0) {
  			ix = path[depth].p_idx;
273df556b   Frank Mayhar   ext4: Convert BUG...
1190
1191
1192
  			if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
  				EXT4_ERROR_INODE(inode,
  				  "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
6ee3b2122   Tao Ma   ext4: use le32_to...
1193
  				  ix != NULL ? le32_to_cpu(ix->ei_block) : 0,
273df556b   Frank Mayhar   ext4: Convert BUG...
1194
  				  EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
6ee3b2122   Tao Ma   ext4: use le32_to...
1195
  		le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0,
273df556b   Frank Mayhar   ext4: Convert BUG...
1196
1197
1198
  				  depth);
  				return -EIO;
  			}
1988b51e4   Alex Tomas   ext4: Add new fun...
1199
1200
1201
  		}
  		return 0;
  	}
273df556b   Frank Mayhar   ext4: Convert BUG...
1202
1203
1204
1205
1206
1207
  	if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
  		EXT4_ERROR_INODE(inode,
  				 "logical %d < ee_block %d + ee_len %d!",
  				 *logical, le32_to_cpu(ex->ee_block), ee_len);
  		return -EIO;
  	}
1988b51e4   Alex Tomas   ext4: Add new fun...
1208

b939e3766   Aneesh Kumar K.V   ext4: Use the ext...
1209
  	*logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
bf89d16f6   Theodore Ts'o   ext4: rename {ext...
1210
  	*phys = ext4_ext_pblock(ex) + ee_len - 1;
1988b51e4   Alex Tomas   ext4: Add new fun...
1211
1212
1213
1214
1215
1216
  	return 0;
  }
  
  /*
   * search the closest allocated block to the right for *logical
   * and returns it at @logical + it's physical address at @phys
df3ab1707   Tao Ma   ext4: fix the com...
1217
   * if *logical is the largest allocated block, the function
1988b51e4   Alex Tomas   ext4: Add new fun...
1218
1219
1220
   * returns 0 at @phys
   * return value contains 0 (success) or error code
   */
1f109d5a1   Theodore Ts'o   ext4: make variou...
1221
1222
  static int ext4_ext_search_right(struct inode *inode,
  				 struct ext4_ext_path *path,
4d33b1ef1   Theodore Ts'o   ext4: teach ext4_...
1223
1224
  				 ext4_lblk_t *logical, ext4_fsblk_t *phys,
  				 struct ext4_extent **ret_ex)
1988b51e4   Alex Tomas   ext4: Add new fun...
1225
1226
1227
1228
1229
1230
  {
  	struct buffer_head *bh = NULL;
  	struct ext4_extent_header *eh;
  	struct ext4_extent_idx *ix;
  	struct ext4_extent *ex;
  	ext4_fsblk_t block;
395a87bfe   Eric Sandeen   ext4: fix header ...
1231
1232
  	int depth;	/* Note, NOT eh_depth; depth from top of tree */
  	int ee_len;
1988b51e4   Alex Tomas   ext4: Add new fun...
1233

273df556b   Frank Mayhar   ext4: Convert BUG...
1234
1235
1236
1237
  	if (unlikely(path == NULL)) {
  		EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
  		return -EIO;
  	}
1988b51e4   Alex Tomas   ext4: Add new fun...
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
  	depth = path->p_depth;
  	*phys = 0;
  
  	if (depth == 0 && path->p_ext == NULL)
  		return 0;
  
  	/* usually extent in the path covers blocks smaller
  	 * then *logical, but it can be that extent is the
  	 * first one in the file */
  
  	ex = path[depth].p_ext;
b939e3766   Aneesh Kumar K.V   ext4: Use the ext...
1249
  	ee_len = ext4_ext_get_actual_len(ex);
1988b51e4   Alex Tomas   ext4: Add new fun...
1250
  	if (*logical < le32_to_cpu(ex->ee_block)) {
273df556b   Frank Mayhar   ext4: Convert BUG...
1251
1252
1253
1254
1255
1256
  		if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
  			EXT4_ERROR_INODE(inode,
  					 "first_extent(path[%d].p_hdr) != ex",
  					 depth);
  			return -EIO;
  		}
1988b51e4   Alex Tomas   ext4: Add new fun...
1257
1258
  		while (--depth >= 0) {
  			ix = path[depth].p_idx;
273df556b   Frank Mayhar   ext4: Convert BUG...
1259
1260
1261
1262
1263
1264
  			if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
  				EXT4_ERROR_INODE(inode,
  						 "ix != EXT_FIRST_INDEX *logical %d!",
  						 *logical);
  				return -EIO;
  			}
1988b51e4   Alex Tomas   ext4: Add new fun...
1265
  		}
4d33b1ef1   Theodore Ts'o   ext4: teach ext4_...
1266
  		goto found_extent;
1988b51e4   Alex Tomas   ext4: Add new fun...
1267
  	}
273df556b   Frank Mayhar   ext4: Convert BUG...
1268
1269
1270
1271
1272
1273
  	if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
  		EXT4_ERROR_INODE(inode,
  				 "logical %d < ee_block %d + ee_len %d!",
  				 *logical, le32_to_cpu(ex->ee_block), ee_len);
  		return -EIO;
  	}
1988b51e4   Alex Tomas   ext4: Add new fun...
1274
1275
1276
1277
  
  	if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
  		/* next allocated block in this leaf */
  		ex++;
4d33b1ef1   Theodore Ts'o   ext4: teach ext4_...
1278
  		goto found_extent;
1988b51e4   Alex Tomas   ext4: Add new fun...
1279
1280
1281
1282
1283
1284
  	}
  
  	/* go up and search for index to the right */
  	while (--depth >= 0) {
  		ix = path[depth].p_idx;
  		if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
25f1ee3ab   Wu Fengguang   ext4: fix build w...
1285
  			goto got_index;
1988b51e4   Alex Tomas   ext4: Add new fun...
1286
  	}
25f1ee3ab   Wu Fengguang   ext4: fix build w...
1287
1288
  	/* we've gone up to the root and found no index to the right */
  	return 0;
1988b51e4   Alex Tomas   ext4: Add new fun...
1289

25f1ee3ab   Wu Fengguang   ext4: fix build w...
1290
  got_index:
1988b51e4   Alex Tomas   ext4: Add new fun...
1291
1292
1293
1294
  	/* we've found index to the right, let's
  	 * follow it and find the closest allocated
  	 * block to the right */
  	ix++;
bf89d16f6   Theodore Ts'o   ext4: rename {ext...
1295
  	block = ext4_idx_pblock(ix);
1988b51e4   Alex Tomas   ext4: Add new fun...
1296
1297
1298
1299
1300
  	while (++depth < path->p_depth) {
  		bh = sb_bread(inode->i_sb, block);
  		if (bh == NULL)
  			return -EIO;
  		eh = ext_block_hdr(bh);
395a87bfe   Eric Sandeen   ext4: fix header ...
1301
  		/* subtract from p_depth to get proper eh_depth */
56b19868a   Aneesh Kumar K.V   ext4: Add checks ...
1302
  		if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
1988b51e4   Alex Tomas   ext4: Add new fun...
1303
1304
1305
1306
  			put_bh(bh);
  			return -EIO;
  		}
  		ix = EXT_FIRST_INDEX(eh);
bf89d16f6   Theodore Ts'o   ext4: rename {ext...
1307
  		block = ext4_idx_pblock(ix);
1988b51e4   Alex Tomas   ext4: Add new fun...
1308
1309
1310
1311
1312
1313
1314
  		put_bh(bh);
  	}
  
  	bh = sb_bread(inode->i_sb, block);
  	if (bh == NULL)
  		return -EIO;
  	eh = ext_block_hdr(bh);
56b19868a   Aneesh Kumar K.V   ext4: Add checks ...
1315
  	if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
1988b51e4   Alex Tomas   ext4: Add new fun...
1316
1317
1318
1319
  		put_bh(bh);
  		return -EIO;
  	}
  	ex = EXT_FIRST_EXTENT(eh);
4d33b1ef1   Theodore Ts'o   ext4: teach ext4_...
1320
  found_extent:
1988b51e4   Alex Tomas   ext4: Add new fun...
1321
  	*logical = le32_to_cpu(ex->ee_block);
bf89d16f6   Theodore Ts'o   ext4: rename {ext...
1322
  	*phys = ext4_ext_pblock(ex);
4d33b1ef1   Theodore Ts'o   ext4: teach ext4_...
1323
1324
1325
  	*ret_ex = ex;
  	if (bh)
  		put_bh(bh);
1988b51e4   Alex Tomas   ext4: Add new fun...
1326
  	return 0;
1988b51e4   Alex Tomas   ext4: Add new fun...
1327
1328
1329
  }
  
  /*
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
1330
   * ext4_ext_next_allocated_block:
f17722f91   Lukas Czerner   ext4: Fix max fil...
1331
   * returns allocated block in subsequent extent or EXT_MAX_BLOCKS.
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
1332
1333
1334
   * NOTE: it considers block number from index entry as
   * allocated block. Thus, index entries have to be consistent
   * with leaves.
a86c61812   Alex Tomas   [PATCH] ext3: add...
1335
   */
725d26d3f   Aneesh Kumar K.V   ext4: Introduce e...
1336
  static ext4_lblk_t
a86c61812   Alex Tomas   [PATCH] ext3: add...
1337
1338
1339
1340
1341
1342
1343
1344
  ext4_ext_next_allocated_block(struct ext4_ext_path *path)
  {
  	int depth;
  
  	BUG_ON(path == NULL);
  	depth = path->p_depth;
  
  	if (depth == 0 && path->p_ext == NULL)
f17722f91   Lukas Czerner   ext4: Fix max fil...
1345
  		return EXT_MAX_BLOCKS;
a86c61812   Alex Tomas   [PATCH] ext3: add...
1346
1347
1348
1349
  
  	while (depth >= 0) {
  		if (depth == path->p_depth) {
  			/* leaf */
6f8ff5372   Curt Wohlgemuth   ext4: handle NULL...
1350
1351
  			if (path[depth].p_ext &&
  				path[depth].p_ext !=
a86c61812   Alex Tomas   [PATCH] ext3: add...
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
  					EXT_LAST_EXTENT(path[depth].p_hdr))
  			  return le32_to_cpu(path[depth].p_ext[1].ee_block);
  		} else {
  			/* index */
  			if (path[depth].p_idx !=
  					EXT_LAST_INDEX(path[depth].p_hdr))
  			  return le32_to_cpu(path[depth].p_idx[1].ei_block);
  		}
  		depth--;
  	}
f17722f91   Lukas Czerner   ext4: Fix max fil...
1362
  	return EXT_MAX_BLOCKS;
a86c61812   Alex Tomas   [PATCH] ext3: add...
1363
1364
1365
  }
  
  /*
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
1366
   * ext4_ext_next_leaf_block:
f17722f91   Lukas Czerner   ext4: Fix max fil...
1367
   * returns first allocated block from next leaf or EXT_MAX_BLOCKS
a86c61812   Alex Tomas   [PATCH] ext3: add...
1368
   */
5718789da   Robin Dong   ext4: remove unus...
1369
  static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path)
a86c61812   Alex Tomas   [PATCH] ext3: add...
1370
1371
1372
1373
1374
1375
1376
1377
  {
  	int depth;
  
  	BUG_ON(path == NULL);
  	depth = path->p_depth;
  
  	/* zero-tree has no leaf blocks at all */
  	if (depth == 0)
f17722f91   Lukas Czerner   ext4: Fix max fil...
1378
  		return EXT_MAX_BLOCKS;
a86c61812   Alex Tomas   [PATCH] ext3: add...
1379
1380
1381
1382
1383
1384
1385
  
  	/* go to index block */
  	depth--;
  
  	while (depth >= 0) {
  		if (path[depth].p_idx !=
  				EXT_LAST_INDEX(path[depth].p_hdr))
725d26d3f   Aneesh Kumar K.V   ext4: Introduce e...
1386
1387
  			return (ext4_lblk_t)
  				le32_to_cpu(path[depth].p_idx[1].ei_block);
a86c61812   Alex Tomas   [PATCH] ext3: add...
1388
1389
  		depth--;
  	}
f17722f91   Lukas Czerner   ext4: Fix max fil...
1390
  	return EXT_MAX_BLOCKS;
a86c61812   Alex Tomas   [PATCH] ext3: add...
1391
1392
1393
  }
  
  /*
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
1394
1395
1396
   * ext4_ext_correct_indexes:
   * if leaf gets modified and modified extent is first in the leaf,
   * then we have to correct all indexes above.
a86c61812   Alex Tomas   [PATCH] ext3: add...
1397
1398
   * TODO: do we need to correct tree in all cases?
   */
1d03ec984   Aneesh Kumar K.V   ext4: Fix sparse...
1399
  static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
a86c61812   Alex Tomas   [PATCH] ext3: add...
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
  				struct ext4_ext_path *path)
  {
  	struct ext4_extent_header *eh;
  	int depth = ext_depth(inode);
  	struct ext4_extent *ex;
  	__le32 border;
  	int k, err = 0;
  
  	eh = path[depth].p_hdr;
  	ex = path[depth].p_ext;
273df556b   Frank Mayhar   ext4: Convert BUG...
1410
1411
1412
1413
1414
1415
  
  	if (unlikely(ex == NULL || eh == NULL)) {
  		EXT4_ERROR_INODE(inode,
  				 "ex %p == NULL or eh %p == NULL", ex, eh);
  		return -EIO;
  	}
a86c61812   Alex Tomas   [PATCH] ext3: add...
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
  
  	if (depth == 0) {
  		/* there is no tree at all */
  		return 0;
  	}
  
  	if (ex != EXT_FIRST_EXTENT(eh)) {
  		/* we correct tree if first leaf got modified only */
  		return 0;
  	}
  
  	/*
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
1428
  	 * TODO: we need correction if border is smaller than current one
a86c61812   Alex Tomas   [PATCH] ext3: add...
1429
1430
1431
  	 */
  	k = depth - 1;
  	border = path[depth].p_ext->ee_block;
7e0289766   Avantika Mathur   [PATCH] ext4: if ...
1432
1433
  	err = ext4_ext_get_access(handle, inode, path + k);
  	if (err)
a86c61812   Alex Tomas   [PATCH] ext3: add...
1434
1435
  		return err;
  	path[k].p_idx->ei_block = border;
7e0289766   Avantika Mathur   [PATCH] ext4: if ...
1436
1437
  	err = ext4_ext_dirty(handle, inode, path + k);
  	if (err)
a86c61812   Alex Tomas   [PATCH] ext3: add...
1438
1439
1440
1441
1442
1443
  		return err;
  
  	while (k--) {
  		/* change all left-side indexes */
  		if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
  			break;
7e0289766   Avantika Mathur   [PATCH] ext4: if ...
1444
1445
  		err = ext4_ext_get_access(handle, inode, path + k);
  		if (err)
a86c61812   Alex Tomas   [PATCH] ext3: add...
1446
1447
  			break;
  		path[k].p_idx->ei_block = border;
7e0289766   Avantika Mathur   [PATCH] ext4: if ...
1448
1449
  		err = ext4_ext_dirty(handle, inode, path + k);
  		if (err)
a86c61812   Alex Tomas   [PATCH] ext3: add...
1450
1451
1452
1453
1454
  			break;
  	}
  
  	return err;
  }
748de6736   Akira Fujita   ext4: online defr...
1455
  int
a86c61812   Alex Tomas   [PATCH] ext3: add...
1456
1457
1458
  ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
  				struct ext4_extent *ex2)
  {
749269fac   Amit Arora   Change on-disk fo...
1459
  	unsigned short ext1_ee_len, ext2_ee_len, max_len;
a2df2a634   Amit Arora   fallocate support...
1460
1461
1462
1463
1464
1465
1466
  
  	/*
  	 * Make sure that either both extents are uninitialized, or
  	 * both are _not_.
  	 */
  	if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2))
  		return 0;
749269fac   Amit Arora   Change on-disk fo...
1467
1468
1469
1470
  	if (ext4_ext_is_uninitialized(ex1))
  		max_len = EXT_UNINIT_MAX_LEN;
  	else
  		max_len = EXT_INIT_MAX_LEN;
a2df2a634   Amit Arora   fallocate support...
1471
1472
1473
1474
  	ext1_ee_len = ext4_ext_get_actual_len(ex1);
  	ext2_ee_len = ext4_ext_get_actual_len(ex2);
  
  	if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
63f579335   Andrew Morton   [PATCH] ext4 whit...
1475
  			le32_to_cpu(ex2->ee_block))
a86c61812   Alex Tomas   [PATCH] ext3: add...
1476
  		return 0;
471d4011a   Suparna Bhattacharya   [PATCH] ext4: uni...
1477
1478
1479
  	/*
  	 * To allow future support for preallocated extents to be added
  	 * as an RO_COMPAT feature, refuse to merge to extents if
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
1480
  	 * this can result in the top bit of ee_len being set.
471d4011a   Suparna Bhattacharya   [PATCH] ext4: uni...
1481
  	 */
749269fac   Amit Arora   Change on-disk fo...
1482
  	if (ext1_ee_len + ext2_ee_len > max_len)
471d4011a   Suparna Bhattacharya   [PATCH] ext4: uni...
1483
  		return 0;
bbf2f9fb1   Robert P. J. Day   Fix misspellings ...
1484
  #ifdef AGGRESSIVE_TEST
b939e3766   Aneesh Kumar K.V   ext4: Use the ext...
1485
  	if (ext1_ee_len >= 4)
a86c61812   Alex Tomas   [PATCH] ext3: add...
1486
1487
  		return 0;
  #endif
bf89d16f6   Theodore Ts'o   ext4: rename {ext...
1488
  	if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2))
a86c61812   Alex Tomas   [PATCH] ext3: add...
1489
1490
1491
1492
1493
  		return 1;
  	return 0;
  }
  
  /*
56055d3ae   Amit Arora   write support for...
1494
1495
1496
1497
1498
1499
   * This function tries to merge the "ex" extent to the next extent in the tree.
   * It always tries to merge towards right. If you want to merge towards
   * left, pass "ex - 1" as argument instead of "ex".
   * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
   * 1 if they got merged.
   */
197217a5a   Yongqiang Yang   ext4: add a funct...
1500
  static int ext4_ext_try_to_merge_right(struct inode *inode,
1f109d5a1   Theodore Ts'o   ext4: make variou...
1501
1502
  				 struct ext4_ext_path *path,
  				 struct ext4_extent *ex)
56055d3ae   Amit Arora   write support for...
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
  {
  	struct ext4_extent_header *eh;
  	unsigned int depth, len;
  	int merge_done = 0;
  	int uninitialized = 0;
  
  	depth = ext_depth(inode);
  	BUG_ON(path[depth].p_hdr == NULL);
  	eh = path[depth].p_hdr;
  
  	while (ex < EXT_LAST_EXTENT(eh)) {
  		if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
  			break;
  		/* merge with next extent! */
  		if (ext4_ext_is_uninitialized(ex))
  			uninitialized = 1;
  		ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
  				+ ext4_ext_get_actual_len(ex + 1));
  		if (uninitialized)
  			ext4_ext_mark_uninitialized(ex);
  
  		if (ex + 1 < EXT_LAST_EXTENT(eh)) {
  			len = (EXT_LAST_EXTENT(eh) - ex - 1)
  				* sizeof(struct ext4_extent);
  			memmove(ex + 1, ex + 2, len);
  		}
e8546d061   Marcin Slusarz   ext4: le*_add_cpu...
1529
  		le16_add_cpu(&eh->eh_entries, -1);
56055d3ae   Amit Arora   write support for...
1530
1531
1532
  		merge_done = 1;
  		WARN_ON(eh->eh_entries == 0);
  		if (!eh->eh_entries)
24676da46   Theodore Ts'o   ext4: Convert cal...
1533
  			EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!");
56055d3ae   Amit Arora   write support for...
1534
1535
1536
1537
1538
1539
  	}
  
  	return merge_done;
  }
  
  /*
197217a5a   Yongqiang Yang   ext4: add a funct...
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
   * This function tries to merge the @ex extent to neighbours in the tree.
   * return 1 if merge left else 0.
   */
  static int ext4_ext_try_to_merge(struct inode *inode,
  				  struct ext4_ext_path *path,
  				  struct ext4_extent *ex) {
  	struct ext4_extent_header *eh;
  	unsigned int depth;
  	int merge_done = 0;
  	int ret = 0;
  
  	depth = ext_depth(inode);
  	BUG_ON(path[depth].p_hdr == NULL);
  	eh = path[depth].p_hdr;
  
  	if (ex > EXT_FIRST_EXTENT(eh))
  		merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
  
  	if (!merge_done)
  		ret = ext4_ext_try_to_merge_right(inode, path, ex);
  
  	return ret;
  }
  
  /*
25d14f983   Amit Arora   ext4: Extent over...
1565
1566
1567
1568
1569
1570
1571
   * check if a portion of the "newext" extent overlaps with an
   * existing extent.
   *
   * If there is an overlap discovered, it updates the length of the newext
   * such that there will be no overlap, and then returns 1.
   * If there is no overlap found, it returns 0.
   */
4d33b1ef1   Theodore Ts'o   ext4: teach ext4_...
1572
1573
  static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
  					   struct inode *inode,
1f109d5a1   Theodore Ts'o   ext4: make variou...
1574
1575
  					   struct ext4_extent *newext,
  					   struct ext4_ext_path *path)
25d14f983   Amit Arora   ext4: Extent over...
1576
  {
725d26d3f   Aneesh Kumar K.V   ext4: Introduce e...
1577
  	ext4_lblk_t b1, b2;
25d14f983   Amit Arora   ext4: Extent over...
1578
1579
1580
1581
  	unsigned int depth, len1;
  	unsigned int ret = 0;
  
  	b1 = le32_to_cpu(newext->ee_block);
a2df2a634   Amit Arora   fallocate support...
1582
  	len1 = ext4_ext_get_actual_len(newext);
25d14f983   Amit Arora   ext4: Extent over...
1583
1584
1585
1586
  	depth = ext_depth(inode);
  	if (!path[depth].p_ext)
  		goto out;
  	b2 = le32_to_cpu(path[depth].p_ext->ee_block);
4d33b1ef1   Theodore Ts'o   ext4: teach ext4_...
1587
  	b2 &= ~(sbi->s_cluster_ratio - 1);
25d14f983   Amit Arora   ext4: Extent over...
1588
1589
1590
  
  	/*
  	 * get the next allocated block if the extent in the path
2b2d6d019   Theodore Ts'o   ext4: Cleanup whi...
1591
  	 * is before the requested block(s)
25d14f983   Amit Arora   ext4: Extent over...
1592
1593
1594
  	 */
  	if (b2 < b1) {
  		b2 = ext4_ext_next_allocated_block(path);
f17722f91   Lukas Czerner   ext4: Fix max fil...
1595
  		if (b2 == EXT_MAX_BLOCKS)
25d14f983   Amit Arora   ext4: Extent over...
1596
  			goto out;
4d33b1ef1   Theodore Ts'o   ext4: teach ext4_...
1597
  		b2 &= ~(sbi->s_cluster_ratio - 1);
25d14f983   Amit Arora   ext4: Extent over...
1598
  	}
725d26d3f   Aneesh Kumar K.V   ext4: Introduce e...
1599
  	/* check for wrap through zero on extent logical start block*/
25d14f983   Amit Arora   ext4: Extent over...
1600
  	if (b1 + len1 < b1) {
f17722f91   Lukas Czerner   ext4: Fix max fil...
1601
  		len1 = EXT_MAX_BLOCKS - b1;
25d14f983   Amit Arora   ext4: Extent over...
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
  		newext->ee_len = cpu_to_le16(len1);
  		ret = 1;
  	}
  
  	/* check for overlap */
  	if (b1 + len1 > b2) {
  		newext->ee_len = cpu_to_le16(b2 - b1);
  		ret = 1;
  	}
  out:
  	return ret;
  }
  
  /*
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
1616
1617
1618
1619
   * ext4_ext_insert_extent:
   * tries to merge requsted extent into the existing extent or
   * inserts requested extent as new one into the tree,
   * creating new leaf in the no-space case.
a86c61812   Alex Tomas   [PATCH] ext3: add...
1620
1621
1622
   */
  int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
  				struct ext4_ext_path *path,
0031462b5   Mingming Cao   ext4: Split unini...
1623
  				struct ext4_extent *newext, int flag)
a86c61812   Alex Tomas   [PATCH] ext3: add...
1624
  {
af5bc92dd   Theodore Ts'o   ext4: Fix whitesp...
1625
  	struct ext4_extent_header *eh;
a86c61812   Alex Tomas   [PATCH] ext3: add...
1626
1627
1628
  	struct ext4_extent *ex, *fex;
  	struct ext4_extent *nearex; /* nearest extent */
  	struct ext4_ext_path *npath = NULL;
725d26d3f   Aneesh Kumar K.V   ext4: Introduce e...
1629
1630
  	int depth, len, err;
  	ext4_lblk_t next;
a2df2a634   Amit Arora   fallocate support...
1631
  	unsigned uninitialized = 0;
55f020db6   Allison Henderson   ext4: add flag to...
1632
  	int flags = 0;
a86c61812   Alex Tomas   [PATCH] ext3: add...
1633

273df556b   Frank Mayhar   ext4: Convert BUG...
1634
1635
1636
1637
  	if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
  		EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
  		return -EIO;
  	}
a86c61812   Alex Tomas   [PATCH] ext3: add...
1638
1639
  	depth = ext_depth(inode);
  	ex = path[depth].p_ext;
273df556b   Frank Mayhar   ext4: Convert BUG...
1640
1641
1642
1643
  	if (unlikely(path[depth].p_hdr == NULL)) {
  		EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
  		return -EIO;
  	}
a86c61812   Alex Tomas   [PATCH] ext3: add...
1644
1645
  
  	/* try to insert block into found extent and return */
744692dc0   Jiaying Zhang   ext4: use ext4_ge...
1646
  	if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO)
0031462b5   Mingming Cao   ext4: Split unini...
1647
  		&& ext4_can_extents_be_merged(inode, ex, newext)) {
32de67569   Yongqiang Yang   ext4: fix a synta...
1648
1649
  		ext_debug("append [%d]%d block to %u:[%d]%d (from %llu)
  ",
bf89d16f6   Theodore Ts'o   ext4: rename {ext...
1650
1651
1652
1653
1654
1655
  			  ext4_ext_is_uninitialized(newext),
  			  ext4_ext_get_actual_len(newext),
  			  le32_to_cpu(ex->ee_block),
  			  ext4_ext_is_uninitialized(ex),
  			  ext4_ext_get_actual_len(ex),
  			  ext4_ext_pblock(ex));
7e0289766   Avantika Mathur   [PATCH] ext4: if ...
1656
1657
  		err = ext4_ext_get_access(handle, inode, path + depth);
  		if (err)
a86c61812   Alex Tomas   [PATCH] ext3: add...
1658
  			return err;
a2df2a634   Amit Arora   fallocate support...
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
  
  		/*
  		 * ext4_can_extents_be_merged should have checked that either
  		 * both extents are uninitialized, or both aren't. Thus we
  		 * need to check only one of them here.
  		 */
  		if (ext4_ext_is_uninitialized(ex))
  			uninitialized = 1;
  		ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
  					+ ext4_ext_get_actual_len(newext));
  		if (uninitialized)
  			ext4_ext_mark_uninitialized(ex);
a86c61812   Alex Tomas   [PATCH] ext3: add...
1671
1672
1673
1674
  		eh = path[depth].p_hdr;
  		nearex = ex;
  		goto merge;
  	}
a86c61812   Alex Tomas   [PATCH] ext3: add...
1675
1676
1677
1678
1679
1680
1681
  	depth = ext_depth(inode);
  	eh = path[depth].p_hdr;
  	if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
  		goto has_space;
  
  	/* probably next leaf has space for us? */
  	fex = EXT_LAST_EXTENT(eh);
598dbdf24   Robin Dong   ext4: avoid unnee...
1682
1683
  	next = EXT_MAX_BLOCKS;
  	if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
5718789da   Robin Dong   ext4: remove unus...
1684
  		next = ext4_ext_next_leaf_block(path);
598dbdf24   Robin Dong   ext4: avoid unnee...
1685
  	if (next != EXT_MAX_BLOCKS) {
32de67569   Yongqiang Yang   ext4: fix a synta...
1686
1687
  		ext_debug("next leaf block - %u
  ", next);
a86c61812   Alex Tomas   [PATCH] ext3: add...
1688
1689
1690
1691
1692
1693
1694
  		BUG_ON(npath != NULL);
  		npath = ext4_ext_find_extent(inode, next, NULL);
  		if (IS_ERR(npath))
  			return PTR_ERR(npath);
  		BUG_ON(npath->p_depth != path->p_depth);
  		eh = npath[depth].p_hdr;
  		if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
25985edce   Lucas De Marchi   Fix common misspe...
1695
1696
  			ext_debug("next leaf isn't full(%d)
  ",
a86c61812   Alex Tomas   [PATCH] ext3: add...
1697
1698
  				  le16_to_cpu(eh->eh_entries));
  			path = npath;
ffb505ff0   Robin Dong   ext4: remove redu...
1699
  			goto has_space;
a86c61812   Alex Tomas   [PATCH] ext3: add...
1700
1701
1702
1703
1704
1705
1706
  		}
  		ext_debug("next leaf has no free space(%d,%d)
  ",
  			  le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
  	}
  
  	/*
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
1707
1708
  	 * There is no free space in the found leaf.
  	 * We're gonna add a new leaf in the tree.
a86c61812   Alex Tomas   [PATCH] ext3: add...
1709
  	 */
55f020db6   Allison Henderson   ext4: add flag to...
1710
1711
1712
  	if (flag & EXT4_GET_BLOCKS_PUNCH_OUT_EXT)
  		flags = EXT4_MB_USE_ROOT_BLOCKS;
  	err = ext4_ext_create_new_leaf(handle, inode, flags, path, newext);
a86c61812   Alex Tomas   [PATCH] ext3: add...
1713
1714
1715
1716
1717
1718
1719
  	if (err)
  		goto cleanup;
  	depth = ext_depth(inode);
  	eh = path[depth].p_hdr;
  
  has_space:
  	nearex = path[depth].p_ext;
7e0289766   Avantika Mathur   [PATCH] ext4: if ...
1720
1721
  	err = ext4_ext_get_access(handle, inode, path + depth);
  	if (err)
a86c61812   Alex Tomas   [PATCH] ext3: add...
1722
1723
1724
1725
  		goto cleanup;
  
  	if (!nearex) {
  		/* there is no extent in this leaf, create first one */
32de67569   Yongqiang Yang   ext4: fix a synta...
1726
1727
  		ext_debug("first extent in the leaf: %u:%llu:[%d]%d
  ",
8c55e2041   Dave Kleikamp   EXT4: Fix whitespace
1728
  				le32_to_cpu(newext->ee_block),
bf89d16f6   Theodore Ts'o   ext4: rename {ext...
1729
  				ext4_ext_pblock(newext),
553f90089   Mingming   ext4: Show unwrit...
1730
  				ext4_ext_is_uninitialized(newext),
a2df2a634   Amit Arora   fallocate support...
1731
  				ext4_ext_get_actual_len(newext));
80e675f90   Eric Gouriou   ext4: optimize me...
1732
1733
1734
  		nearex = EXT_FIRST_EXTENT(eh);
  	} else {
  		if (le32_to_cpu(newext->ee_block)
8c55e2041   Dave Kleikamp   EXT4: Fix whitespace
1735
  			   > le32_to_cpu(nearex->ee_block)) {
80e675f90   Eric Gouriou   ext4: optimize me...
1736
  			/* Insert after */
32de67569   Yongqiang Yang   ext4: fix a synta...
1737
1738
1739
  			ext_debug("insert %u:%llu:[%d]%d before: "
  					"nearest %p
  ",
80e675f90   Eric Gouriou   ext4: optimize me...
1740
1741
1742
1743
1744
1745
1746
1747
1748
  					le32_to_cpu(newext->ee_block),
  					ext4_ext_pblock(newext),
  					ext4_ext_is_uninitialized(newext),
  					ext4_ext_get_actual_len(newext),
  					nearex);
  			nearex++;
  		} else {
  			/* Insert before */
  			BUG_ON(newext->ee_block == nearex->ee_block);
32de67569   Yongqiang Yang   ext4: fix a synta...
1749
1750
1751
  			ext_debug("insert %u:%llu:[%d]%d after: "
  					"nearest %p
  ",
8c55e2041   Dave Kleikamp   EXT4: Fix whitespace
1752
  					le32_to_cpu(newext->ee_block),
bf89d16f6   Theodore Ts'o   ext4: rename {ext...
1753
  					ext4_ext_pblock(newext),
553f90089   Mingming   ext4: Show unwrit...
1754
  					ext4_ext_is_uninitialized(newext),
a2df2a634   Amit Arora   fallocate support...
1755
  					ext4_ext_get_actual_len(newext),
80e675f90   Eric Gouriou   ext4: optimize me...
1756
1757
1758
1759
  					nearex);
  		}
  		len = EXT_LAST_EXTENT(eh) - nearex + 1;
  		if (len > 0) {
32de67569   Yongqiang Yang   ext4: fix a synta...
1760
  			ext_debug("insert %u:%llu:[%d]%d: "
80e675f90   Eric Gouriou   ext4: optimize me...
1761
1762
1763
1764
1765
1766
1767
1768
1769
  					"move %d extents from 0x%p to 0x%p
  ",
  					le32_to_cpu(newext->ee_block),
  					ext4_ext_pblock(newext),
  					ext4_ext_is_uninitialized(newext),
  					ext4_ext_get_actual_len(newext),
  					len, nearex, nearex + 1);
  			memmove(nearex + 1, nearex,
  				len * sizeof(struct ext4_extent));
a86c61812   Alex Tomas   [PATCH] ext3: add...
1770
  		}
a86c61812   Alex Tomas   [PATCH] ext3: add...
1771
  	}
e8546d061   Marcin Slusarz   ext4: le*_add_cpu...
1772
  	le16_add_cpu(&eh->eh_entries, 1);
80e675f90   Eric Gouriou   ext4: optimize me...
1773
  	path[depth].p_ext = nearex;
a86c61812   Alex Tomas   [PATCH] ext3: add...
1774
  	nearex->ee_block = newext->ee_block;
bf89d16f6   Theodore Ts'o   ext4: rename {ext...
1775
  	ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext));
a86c61812   Alex Tomas   [PATCH] ext3: add...
1776
  	nearex->ee_len = newext->ee_len;
a86c61812   Alex Tomas   [PATCH] ext3: add...
1777
1778
1779
  
  merge:
  	/* try to merge extents to the right */
744692dc0   Jiaying Zhang   ext4: use ext4_ge...
1780
  	if (!(flag & EXT4_GET_BLOCKS_PRE_IO))
0031462b5   Mingming Cao   ext4: Split unini...
1781
  		ext4_ext_try_to_merge(inode, path, nearex);
a86c61812   Alex Tomas   [PATCH] ext3: add...
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
  
  	/* try to merge extents to the left */
  
  	/* time to correct all indexes above */
  	err = ext4_ext_correct_indexes(handle, inode, path);
  	if (err)
  		goto cleanup;
  
  	err = ext4_ext_dirty(handle, inode, path + depth);
  
  cleanup:
  	if (npath) {
  		ext4_ext_drop_refs(npath);
  		kfree(npath);
  	}
a86c61812   Alex Tomas   [PATCH] ext3: add...
1797
1798
1799
  	ext4_ext_invalidate_cache(inode);
  	return err;
  }
1f109d5a1   Theodore Ts'o   ext4: make variou...
1800
1801
1802
  static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
  			       ext4_lblk_t num, ext_prepare_callback func,
  			       void *cbdata)
6873fa0de   Eric Sandeen   Hook ext4 to the ...
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
  {
  	struct ext4_ext_path *path = NULL;
  	struct ext4_ext_cache cbex;
  	struct ext4_extent *ex;
  	ext4_lblk_t next, start = 0, end = 0;
  	ext4_lblk_t last = block + num;
  	int depth, exists, err = 0;
  
  	BUG_ON(func == NULL);
  	BUG_ON(inode == NULL);
f17722f91   Lukas Czerner   ext4: Fix max fil...
1813
  	while (block < last && block != EXT_MAX_BLOCKS) {
6873fa0de   Eric Sandeen   Hook ext4 to the ...
1814
1815
  		num = last - block;
  		/* find extent for this block */
fab3a549e   Theodore Ts'o   ext4: Fix potenti...
1816
  		down_read(&EXT4_I(inode)->i_data_sem);
6873fa0de   Eric Sandeen   Hook ext4 to the ...
1817
  		path = ext4_ext_find_extent(inode, block, path);
fab3a549e   Theodore Ts'o   ext4: Fix potenti...
1818
  		up_read(&EXT4_I(inode)->i_data_sem);
6873fa0de   Eric Sandeen   Hook ext4 to the ...
1819
1820
1821
1822
1823
1824
1825
  		if (IS_ERR(path)) {
  			err = PTR_ERR(path);
  			path = NULL;
  			break;
  		}
  
  		depth = ext_depth(inode);
273df556b   Frank Mayhar   ext4: Convert BUG...
1826
1827
1828
1829
1830
  		if (unlikely(path[depth].p_hdr == NULL)) {
  			EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
  			err = -EIO;
  			break;
  		}
6873fa0de   Eric Sandeen   Hook ext4 to the ...
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
  		ex = path[depth].p_ext;
  		next = ext4_ext_next_allocated_block(path);
  
  		exists = 0;
  		if (!ex) {
  			/* there is no extent yet, so try to allocate
  			 * all requested space */
  			start = block;
  			end = block + num;
  		} else if (le32_to_cpu(ex->ee_block) > block) {
  			/* need to allocate space before found extent */
  			start = block;
  			end = le32_to_cpu(ex->ee_block);
  			if (block + num < end)
  				end = block + num;
  		} else if (block >= le32_to_cpu(ex->ee_block)
  					+ ext4_ext_get_actual_len(ex)) {
  			/* need to allocate space after found extent */
  			start = block;
  			end = block + num;
  			if (end >= next)
  				end = next;
  		} else if (block >= le32_to_cpu(ex->ee_block)) {
  			/*
  			 * some part of requested space is covered
  			 * by found extent
  			 */
  			start = block;
  			end = le32_to_cpu(ex->ee_block)
  				+ ext4_ext_get_actual_len(ex);
  			if (block + num < end)
  				end = block + num;
  			exists = 1;
  		} else {
  			BUG();
  		}
  		BUG_ON(end <= start);
  
  		if (!exists) {
  			cbex.ec_block = start;
  			cbex.ec_len = end - start;
  			cbex.ec_start = 0;
6873fa0de   Eric Sandeen   Hook ext4 to the ...
1873
1874
1875
  		} else {
  			cbex.ec_block = le32_to_cpu(ex->ee_block);
  			cbex.ec_len = ext4_ext_get_actual_len(ex);
bf89d16f6   Theodore Ts'o   ext4: rename {ext...
1876
  			cbex.ec_start = ext4_ext_pblock(ex);
6873fa0de   Eric Sandeen   Hook ext4 to the ...
1877
  		}
273df556b   Frank Mayhar   ext4: Convert BUG...
1878
1879
1880
1881
1882
  		if (unlikely(cbex.ec_len == 0)) {
  			EXT4_ERROR_INODE(inode, "cbex.ec_len == 0");
  			err = -EIO;
  			break;
  		}
c03f8aa9a   Lukas Czerner   ext4: use FIEMAP_...
1883
  		err = func(inode, next, &cbex, ex, cbdata);
6873fa0de   Eric Sandeen   Hook ext4 to the ...
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
  		ext4_ext_drop_refs(path);
  
  		if (err < 0)
  			break;
  
  		if (err == EXT_REPEAT)
  			continue;
  		else if (err == EXT_BREAK) {
  			err = 0;
  			break;
  		}
  
  		if (ext_depth(inode) != depth) {
  			/* depth was changed. we have to realloc path */
  			kfree(path);
  			path = NULL;
  		}
  
  		block = cbex.ec_block + cbex.ec_len;
  	}
  
  	if (path) {
  		ext4_ext_drop_refs(path);
  		kfree(path);
  	}
  
  	return err;
  }
09b882520   Avantika Mathur   [PATCH] ext4: Eli...
1912
  static void
725d26d3f   Aneesh Kumar K.V   ext4: Introduce e...
1913
  ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
b05e6ae58   Theodore Ts'o   ext4: drop ec_typ...
1914
  			__u32 len, ext4_fsblk_t start)
a86c61812   Alex Tomas   [PATCH] ext3: add...
1915
1916
1917
  {
  	struct ext4_ext_cache *cex;
  	BUG_ON(len == 0);
2ec0ae3ac   Theodore Ts'o   ext4: Fix race in...
1918
  	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
d8990240d   Aditya Kali   ext4: add some tr...
1919
  	trace_ext4_ext_put_in_cache(inode, block, len, start);
a86c61812   Alex Tomas   [PATCH] ext3: add...
1920
  	cex = &EXT4_I(inode)->i_cached_extent;
a86c61812   Alex Tomas   [PATCH] ext3: add...
1921
1922
1923
  	cex->ec_block = block;
  	cex->ec_len = len;
  	cex->ec_start = start;
2ec0ae3ac   Theodore Ts'o   ext4: Fix race in...
1924
  	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
a86c61812   Alex Tomas   [PATCH] ext3: add...
1925
1926
1927
  }
  
  /*
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
1928
1929
   * ext4_ext_put_gap_in_cache:
   * calculate boundaries of the gap that the requested block fits into
a86c61812   Alex Tomas   [PATCH] ext3: add...
1930
1931
   * and cache this gap
   */
09b882520   Avantika Mathur   [PATCH] ext4: Eli...
1932
  static void
a86c61812   Alex Tomas   [PATCH] ext3: add...
1933
  ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
725d26d3f   Aneesh Kumar K.V   ext4: Introduce e...
1934
  				ext4_lblk_t block)
a86c61812   Alex Tomas   [PATCH] ext3: add...
1935
1936
  {
  	int depth = ext_depth(inode);
725d26d3f   Aneesh Kumar K.V   ext4: Introduce e...
1937
1938
  	unsigned long len;
  	ext4_lblk_t lblock;
a86c61812   Alex Tomas   [PATCH] ext3: add...
1939
1940
1941
1942
1943
1944
  	struct ext4_extent *ex;
  
  	ex = path[depth].p_ext;
  	if (ex == NULL) {
  		/* there is no extent yet, so gap is [0;-] */
  		lblock = 0;
f17722f91   Lukas Czerner   ext4: Fix max fil...
1945
  		len = EXT_MAX_BLOCKS;
a86c61812   Alex Tomas   [PATCH] ext3: add...
1946
1947
1948
1949
  		ext_debug("cache gap(whole file):");
  	} else if (block < le32_to_cpu(ex->ee_block)) {
  		lblock = block;
  		len = le32_to_cpu(ex->ee_block) - block;
bba907433   Eric Sandeen   ext4 extents: rem...
1950
1951
1952
1953
  		ext_debug("cache gap(before): %u [%u:%u]",
  				block,
  				le32_to_cpu(ex->ee_block),
  				 ext4_ext_get_actual_len(ex));
a86c61812   Alex Tomas   [PATCH] ext3: add...
1954
  	} else if (block >= le32_to_cpu(ex->ee_block)
a2df2a634   Amit Arora   fallocate support...
1955
  			+ ext4_ext_get_actual_len(ex)) {
725d26d3f   Aneesh Kumar K.V   ext4: Introduce e...
1956
  		ext4_lblk_t next;
8c55e2041   Dave Kleikamp   EXT4: Fix whitespace
1957
  		lblock = le32_to_cpu(ex->ee_block)
a2df2a634   Amit Arora   fallocate support...
1958
  			+ ext4_ext_get_actual_len(ex);
725d26d3f   Aneesh Kumar K.V   ext4: Introduce e...
1959
1960
  
  		next = ext4_ext_next_allocated_block(path);
bba907433   Eric Sandeen   ext4 extents: rem...
1961
1962
1963
1964
  		ext_debug("cache gap(after): [%u:%u] %u",
  				le32_to_cpu(ex->ee_block),
  				ext4_ext_get_actual_len(ex),
  				block);
725d26d3f   Aneesh Kumar K.V   ext4: Introduce e...
1965
1966
  		BUG_ON(next == lblock);
  		len = next - lblock;
a86c61812   Alex Tomas   [PATCH] ext3: add...
1967
1968
1969
1970
  	} else {
  		lblock = len = 0;
  		BUG();
  	}
bba907433   Eric Sandeen   ext4 extents: rem...
1971
1972
  	ext_debug(" -> %u:%lu
  ", lblock, len);
b05e6ae58   Theodore Ts'o   ext4: drop ec_typ...
1973
  	ext4_ext_put_in_cache(inode, lblock, len, 0);
a86c61812   Alex Tomas   [PATCH] ext3: add...
1974
  }
b05e6ae58   Theodore Ts'o   ext4: drop ec_typ...
1975
  /*
b7ca1e8ec   Robin Dong   ext4: correct com...
1976
   * ext4_ext_check_cache()
a4bb6b64e   Allison Henderson   ext4: enable "pun...
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
   * Checks to see if the given block is in the cache.
   * If it is, the cached extent is stored in the given
   * cache extent pointer.  If the cached extent is a hole,
   * this routine should be used instead of
   * ext4_ext_in_cache if the calling function needs to
   * know the size of the hole.
   *
   * @inode: The files inode
   * @block: The block to look for in the cache
   * @ex:    Pointer where the cached extent will be stored
   *         if it contains block
   *
b05e6ae58   Theodore Ts'o   ext4: drop ec_typ...
1989
1990
   * Return 0 if cache is invalid; 1 if the cache is valid
   */
a4bb6b64e   Allison Henderson   ext4: enable "pun...
1991
1992
  static int ext4_ext_check_cache(struct inode *inode, ext4_lblk_t block,
  	struct ext4_ext_cache *ex){
a86c61812   Alex Tomas   [PATCH] ext3: add...
1993
  	struct ext4_ext_cache *cex;
77f4135f2   Vivek Haldar   ext4: count hits/...
1994
  	struct ext4_sb_info *sbi;
b05e6ae58   Theodore Ts'o   ext4: drop ec_typ...
1995
  	int ret = 0;
a86c61812   Alex Tomas   [PATCH] ext3: add...
1996

60e6679e2   Theodore Ts'o   ext4: Drop whites...
1997
  	/*
2ec0ae3ac   Theodore Ts'o   ext4: Fix race in...
1998
1999
2000
  	 * We borrow i_block_reservation_lock to protect i_cached_extent
  	 */
  	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
a86c61812   Alex Tomas   [PATCH] ext3: add...
2001
  	cex = &EXT4_I(inode)->i_cached_extent;
77f4135f2   Vivek Haldar   ext4: count hits/...
2002
  	sbi = EXT4_SB(inode->i_sb);
a86c61812   Alex Tomas   [PATCH] ext3: add...
2003
2004
  
  	/* has cache valid data? */
b05e6ae58   Theodore Ts'o   ext4: drop ec_typ...
2005
  	if (cex->ec_len == 0)
2ec0ae3ac   Theodore Ts'o   ext4: Fix race in...
2006
  		goto errout;
a86c61812   Alex Tomas   [PATCH] ext3: add...
2007

731eb1a03   Akinobu Mita   ext4: consolidate...
2008
  	if (in_range(block, cex->ec_block, cex->ec_len)) {
a4bb6b64e   Allison Henderson   ext4: enable "pun...
2009
  		memcpy(ex, cex, sizeof(struct ext4_ext_cache));
bba907433   Eric Sandeen   ext4 extents: rem...
2010
2011
2012
2013
  		ext_debug("%u cached by %u:%u:%llu
  ",
  				block,
  				cex->ec_block, cex->ec_len, cex->ec_start);
b05e6ae58   Theodore Ts'o   ext4: drop ec_typ...
2014
  		ret = 1;
a86c61812   Alex Tomas   [PATCH] ext3: add...
2015
  	}
2ec0ae3ac   Theodore Ts'o   ext4: Fix race in...
2016
  errout:
77f4135f2   Vivek Haldar   ext4: count hits/...
2017
2018
2019
2020
  	if (!ret)
  		sbi->extent_cache_misses++;
  	else
  		sbi->extent_cache_hits++;
d8990240d   Aditya Kali   ext4: add some tr...
2021
  	trace_ext4_ext_in_cache(inode, block, ret);
2ec0ae3ac   Theodore Ts'o   ext4: Fix race in...
2022
2023
  	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
  	return ret;
a86c61812   Alex Tomas   [PATCH] ext3: add...
2024
2025
2026
  }
  
  /*
a4bb6b64e   Allison Henderson   ext4: enable "pun...
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
   * ext4_ext_in_cache()
   * Checks to see if the given block is in the cache.
   * If it is, the cached extent is stored in the given
   * extent pointer.
   *
   * @inode: The files inode
   * @block: The block to look for in the cache
   * @ex:    Pointer where the cached extent will be stored
   *         if it contains block
   *
   * Return 0 if cache is invalid; 1 if the cache is valid
   */
  static int
  ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
  			struct ext4_extent *ex)
  {
  	struct ext4_ext_cache cex;
  	int ret = 0;
  
  	if (ext4_ext_check_cache(inode, block, &cex)) {
  		ex->ee_block = cpu_to_le32(cex.ec_block);
  		ext4_ext_store_pblock(ex, cex.ec_start);
  		ex->ee_len = cpu_to_le16(cex.ec_len);
  		ret = 1;
  	}
  
  	return ret;
  }
  
  
  /*
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
2058
2059
   * ext4_ext_rm_idx:
   * removes index from the index block.
a86c61812   Alex Tomas   [PATCH] ext3: add...
2060
   */
1d03ec984   Aneesh Kumar K.V   ext4: Fix sparse...
2061
  static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
a86c61812   Alex Tomas   [PATCH] ext3: add...
2062
2063
  			struct ext4_ext_path *path)
  {
a86c61812   Alex Tomas   [PATCH] ext3: add...
2064
  	int err;
f65e6fba1   Alex Tomas   [PATCH] ext4: 48b...
2065
  	ext4_fsblk_t leaf;
a86c61812   Alex Tomas   [PATCH] ext3: add...
2066
2067
2068
  
  	/* free index block */
  	path--;
bf89d16f6   Theodore Ts'o   ext4: rename {ext...
2069
  	leaf = ext4_idx_pblock(path->p_idx);
273df556b   Frank Mayhar   ext4: Convert BUG...
2070
2071
2072
2073
  	if (unlikely(path->p_hdr->eh_entries == 0)) {
  		EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
  		return -EIO;
  	}
7e0289766   Avantika Mathur   [PATCH] ext4: if ...
2074
2075
  	err = ext4_ext_get_access(handle, inode, path);
  	if (err)
a86c61812   Alex Tomas   [PATCH] ext3: add...
2076
  		return err;
0e1147b00   Robin Dong   ext4: add action ...
2077
2078
2079
2080
2081
2082
  
  	if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) {
  		int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx;
  		len *= sizeof(struct ext4_extent_idx);
  		memmove(path->p_idx, path->p_idx + 1, len);
  	}
e8546d061   Marcin Slusarz   ext4: le*_add_cpu...
2083
  	le16_add_cpu(&path->p_hdr->eh_entries, -1);
7e0289766   Avantika Mathur   [PATCH] ext4: if ...
2084
2085
  	err = ext4_ext_dirty(handle, inode, path);
  	if (err)
a86c61812   Alex Tomas   [PATCH] ext3: add...
2086
  		return err;
2ae021076   Mingming Cao   [PATCH] ext4: blk...
2087
2088
  	ext_debug("index is empty, remove it, free block %llu
  ", leaf);
d8990240d   Aditya Kali   ext4: add some tr...
2089
  	trace_ext4_ext_rm_idx(inode, leaf);
7dc576158   Peter Huewe   ext4: Fix sparse ...
2090
  	ext4_free_blocks(handle, inode, NULL, leaf, 1,
e6362609b   Theodore Ts'o   ext4: call ext4_f...
2091
  			 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
a86c61812   Alex Tomas   [PATCH] ext3: add...
2092
2093
2094
2095
  	return err;
  }
  
  /*
ee12b6306   Mingming Cao   ext4: journal cre...
2096
2097
2098
2099
2100
   * ext4_ext_calc_credits_for_single_extent:
   * This routine returns max. credits that needed to insert an extent
   * to the extent tree.
   * When pass the actual path, the caller should calculate credits
   * under i_data_sem.
a86c61812   Alex Tomas   [PATCH] ext3: add...
2101
   */
525f4ed8d   Mingming Cao   ext4: journal cre...
2102
  int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
a86c61812   Alex Tomas   [PATCH] ext3: add...
2103
2104
  						struct ext4_ext_path *path)
  {
a86c61812   Alex Tomas   [PATCH] ext3: add...
2105
  	if (path) {
ee12b6306   Mingming Cao   ext4: journal cre...
2106
  		int depth = ext_depth(inode);
f3bd1f3fa   Mingming Cao   ext4: journal cre...
2107
  		int ret = 0;
ee12b6306   Mingming Cao   ext4: journal cre...
2108

a86c61812   Alex Tomas   [PATCH] ext3: add...
2109
  		/* probably there is space in leaf? */
a86c61812   Alex Tomas   [PATCH] ext3: add...
2110
  		if (le16_to_cpu(path[depth].p_hdr->eh_entries)
ee12b6306   Mingming Cao   ext4: journal cre...
2111
  				< le16_to_cpu(path[depth].p_hdr->eh_max)) {
a86c61812   Alex Tomas   [PATCH] ext3: add...
2112

ee12b6306   Mingming Cao   ext4: journal cre...
2113
2114
2115
2116
2117
  			/*
  			 *  There are some space in the leaf tree, no
  			 *  need to account for leaf block credit
  			 *
  			 *  bitmaps and block group descriptor blocks
df3ab1707   Tao Ma   ext4: fix the com...
2118
  			 *  and other metadata blocks still need to be
ee12b6306   Mingming Cao   ext4: journal cre...
2119
2120
  			 *  accounted.
  			 */
525f4ed8d   Mingming Cao   ext4: journal cre...
2121
  			/* 1 bitmap, 1 block group descriptor */
ee12b6306   Mingming Cao   ext4: journal cre...
2122
  			ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
5887e98b6   Aneesh Kumar K.V   ext4: Calculate r...
2123
  			return ret;
ee12b6306   Mingming Cao   ext4: journal cre...
2124
2125
  		}
  	}
a86c61812   Alex Tomas   [PATCH] ext3: add...
2126

525f4ed8d   Mingming Cao   ext4: journal cre...
2127
  	return ext4_chunk_trans_blocks(inode, nrblocks);
ee12b6306   Mingming Cao   ext4: journal cre...
2128
  }
a86c61812   Alex Tomas   [PATCH] ext3: add...
2129

ee12b6306   Mingming Cao   ext4: journal cre...
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
  /*
   * How many index/leaf blocks need to change/allocate to modify nrblocks?
   *
   * if nrblocks are fit in a single extent (chunk flag is 1), then
   * in the worse case, each tree level index/leaf need to be changed
   * if the tree split due to insert a new extent, then the old tree
   * index/leaf need to be updated too
   *
   * If the nrblocks are discontiguous, they could cause
   * the whole tree split more than once, but this is really rare.
   */
525f4ed8d   Mingming Cao   ext4: journal cre...
2141
  int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
ee12b6306   Mingming Cao   ext4: journal cre...
2142
2143
2144
  {
  	int index;
  	int depth = ext_depth(inode);
a86c61812   Alex Tomas   [PATCH] ext3: add...
2145

ee12b6306   Mingming Cao   ext4: journal cre...
2146
2147
2148
2149
  	if (chunk)
  		index = depth * 2;
  	else
  		index = depth * 3;
a86c61812   Alex Tomas   [PATCH] ext3: add...
2150

ee12b6306   Mingming Cao   ext4: journal cre...
2151
  	return index;
a86c61812   Alex Tomas   [PATCH] ext3: add...
2152
2153
2154
  }
  
  static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
0aa060000   Theodore Ts'o   ext4: teach ext4_...
2155
2156
2157
  			      struct ext4_extent *ex,
  			      ext4_fsblk_t *partial_cluster,
  			      ext4_lblk_t from, ext4_lblk_t to)
a86c61812   Alex Tomas   [PATCH] ext3: add...
2158
  {
0aa060000   Theodore Ts'o   ext4: teach ext4_...
2159
  	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
a2df2a634   Amit Arora   fallocate support...
2160
  	unsigned short ee_len =  ext4_ext_get_actual_len(ex);
0aa060000   Theodore Ts'o   ext4: teach ext4_...
2161
  	ext4_fsblk_t pblk;
e6362609b   Theodore Ts'o   ext4: call ext4_f...
2162
  	int flags = EXT4_FREE_BLOCKS_FORGET;
a86c61812   Alex Tomas   [PATCH] ext3: add...
2163

c9de560de   Alex Tomas   ext4: Add multi b...
2164
  	if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
e6362609b   Theodore Ts'o   ext4: call ext4_f...
2165
  		flags |= EXT4_FREE_BLOCKS_METADATA;
0aa060000   Theodore Ts'o   ext4: teach ext4_...
2166
2167
2168
2169
2170
2171
2172
2173
  	/*
  	 * For bigalloc file systems, we never free a partial cluster
  	 * at the beginning of the extent.  Instead, we make a note
  	 * that we tried freeing the cluster, and check to see if we
  	 * need to free it on a subsequent call to ext4_remove_blocks,
  	 * or at the end of the ext4_truncate() operation.
  	 */
  	flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
d8990240d   Aditya Kali   ext4: add some tr...
2174
  	trace_ext4_remove_blocks(inode, ex, from, to, *partial_cluster);
0aa060000   Theodore Ts'o   ext4: teach ext4_...
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
  	/*
  	 * If we have a partial cluster, and it's different from the
  	 * cluster of the last block, we need to explicitly free the
  	 * partial cluster here.
  	 */
  	pblk = ext4_ext_pblock(ex) + ee_len - 1;
  	if (*partial_cluster && (EXT4_B2C(sbi, pblk) != *partial_cluster)) {
  		ext4_free_blocks(handle, inode, NULL,
  				 EXT4_C2B(sbi, *partial_cluster),
  				 sbi->s_cluster_ratio, flags);
  		*partial_cluster = 0;
  	}
a86c61812   Alex Tomas   [PATCH] ext3: add...
2187
2188
2189
  #ifdef EXTENTS_STATS
  	{
  		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
a86c61812   Alex Tomas   [PATCH] ext3: add...
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
  		spin_lock(&sbi->s_ext_stats_lock);
  		sbi->s_ext_blocks += ee_len;
  		sbi->s_ext_extents++;
  		if (ee_len < sbi->s_ext_min)
  			sbi->s_ext_min = ee_len;
  		if (ee_len > sbi->s_ext_max)
  			sbi->s_ext_max = ee_len;
  		if (ext_depth(inode) > sbi->s_depth_max)
  			sbi->s_depth_max = ext_depth(inode);
  		spin_unlock(&sbi->s_ext_stats_lock);
  	}
  #endif
  	if (from >= le32_to_cpu(ex->ee_block)
a2df2a634   Amit Arora   fallocate support...
2203
  	    && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
a86c61812   Alex Tomas   [PATCH] ext3: add...
2204
  		/* tail removal */
725d26d3f   Aneesh Kumar K.V   ext4: Introduce e...
2205
  		ext4_lblk_t num;
725d26d3f   Aneesh Kumar K.V   ext4: Introduce e...
2206

a2df2a634   Amit Arora   fallocate support...
2207
  		num = le32_to_cpu(ex->ee_block) + ee_len - from;
0aa060000   Theodore Ts'o   ext4: teach ext4_...
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
  		pblk = ext4_ext_pblock(ex) + ee_len - num;
  		ext_debug("free last %u blocks starting %llu
  ", num, pblk);
  		ext4_free_blocks(handle, inode, NULL, pblk, num, flags);
  		/*
  		 * If the block range to be freed didn't start at the
  		 * beginning of a cluster, and we removed the entire
  		 * extent, save the partial cluster here, since we
  		 * might need to delete if we determine that the
  		 * truncate operation has removed all of the blocks in
  		 * the cluster.
  		 */
  		if (pblk & (sbi->s_cluster_ratio - 1) &&
  		    (ee_len == num))
  			*partial_cluster = EXT4_B2C(sbi, pblk);
  		else
  			*partial_cluster = 0;
a86c61812   Alex Tomas   [PATCH] ext3: add...
2225
  	} else if (from == le32_to_cpu(ex->ee_block)
a2df2a634   Amit Arora   fallocate support...
2226
  		   && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
d583fb87a   Allison Henderson   ext4: punch out e...
2227
2228
2229
2230
2231
2232
2233
2234
2235
  		/* head removal */
  		ext4_lblk_t num;
  		ext4_fsblk_t start;
  
  		num = to - from;
  		start = ext4_ext_pblock(ex);
  
  		ext_debug("free first %u blocks starting %llu
  ", num, start);
ee90d57e2   H Hartley Sweeten   ext4: quiet spars...
2236
  		ext4_free_blocks(handle, inode, NULL, start, num, flags);
d583fb87a   Allison Henderson   ext4: punch out e...
2237

a86c61812   Alex Tomas   [PATCH] ext3: add...
2238
  	} else {
725d26d3f   Aneesh Kumar K.V   ext4: Introduce e...
2239
2240
2241
2242
  		printk(KERN_INFO "strange request: removal(2) "
  				"%u-%u from %u:%u
  ",
  				from, to, le32_to_cpu(ex->ee_block), ee_len);
a86c61812   Alex Tomas   [PATCH] ext3: add...
2243
2244
2245
  	}
  	return 0;
  }
d583fb87a   Allison Henderson   ext4: punch out e...
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
  
  /*
   * ext4_ext_rm_leaf() Removes the extents associated with the
   * blocks appearing between "start" and "end", and splits the extents
   * if "start" and "end" appear in the same extent
   *
   * @handle: The journal handle
   * @inode:  The files inode
   * @path:   The path to the leaf
   * @start:  The first block to remove
   * @end:   The last block to remove
   */
a86c61812   Alex Tomas   [PATCH] ext3: add...
2258
2259
  static int
  ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
0aa060000   Theodore Ts'o   ext4: teach ext4_...
2260
2261
  		 struct ext4_ext_path *path, ext4_fsblk_t *partial_cluster,
  		 ext4_lblk_t start, ext4_lblk_t end)
a86c61812   Alex Tomas   [PATCH] ext3: add...
2262
  {
0aa060000   Theodore Ts'o   ext4: teach ext4_...
2263
  	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
a86c61812   Alex Tomas   [PATCH] ext3: add...
2264
2265
2266
  	int err = 0, correct_index = 0;
  	int depth = ext_depth(inode), credits;
  	struct ext4_extent_header *eh;
750c9c47a   Dmitry Monakhov   ext4: remove mess...
2267
  	ext4_lblk_t a, b;
725d26d3f   Aneesh Kumar K.V   ext4: Introduce e...
2268
2269
  	unsigned num;
  	ext4_lblk_t ex_ee_block;
a86c61812   Alex Tomas   [PATCH] ext3: add...
2270
  	unsigned short ex_ee_len;
a2df2a634   Amit Arora   fallocate support...
2271
  	unsigned uninitialized = 0;
a86c61812   Alex Tomas   [PATCH] ext3: add...
2272
  	struct ext4_extent *ex;
c29c0ae7f   Alex Tomas   ext4: Make extent...
2273
  	/* the header must be checked already in ext4_ext_remove_space() */
725d26d3f   Aneesh Kumar K.V   ext4: Introduce e...
2274
2275
  	ext_debug("truncate since %u in leaf
  ", start);
a86c61812   Alex Tomas   [PATCH] ext3: add...
2276
2277
2278
  	if (!path[depth].p_hdr)
  		path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
  	eh = path[depth].p_hdr;
273df556b   Frank Mayhar   ext4: Convert BUG...
2279
2280
2281
2282
  	if (unlikely(path[depth].p_hdr == NULL)) {
  		EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
  		return -EIO;
  	}
a86c61812   Alex Tomas   [PATCH] ext3: add...
2283
2284
2285
2286
  	/* find where to start removing */
  	ex = EXT_LAST_EXTENT(eh);
  
  	ex_ee_block = le32_to_cpu(ex->ee_block);
a2df2a634   Amit Arora   fallocate support...
2287
  	ex_ee_len = ext4_ext_get_actual_len(ex);
a86c61812   Alex Tomas   [PATCH] ext3: add...
2288

d8990240d   Aditya Kali   ext4: add some tr...
2289
  	trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster);
a86c61812   Alex Tomas   [PATCH] ext3: add...
2290
2291
  	while (ex >= EXT_FIRST_EXTENT(eh) &&
  			ex_ee_block + ex_ee_len > start) {
a41f20716   Aneesh Kumar K.V   ext4: Avoid corru...
2292
2293
2294
2295
2296
  
  		if (ext4_ext_is_uninitialized(ex))
  			uninitialized = 1;
  		else
  			uninitialized = 0;
553f90089   Mingming   ext4: Show unwrit...
2297
2298
2299
  		ext_debug("remove ext %u:[%d]%d
  ", ex_ee_block,
  			 uninitialized, ex_ee_len);
a86c61812   Alex Tomas   [PATCH] ext3: add...
2300
2301
2302
  		path[depth].p_ext = ex;
  
  		a = ex_ee_block > start ? ex_ee_block : start;
d583fb87a   Allison Henderson   ext4: punch out e...
2303
2304
  		b = ex_ee_block+ex_ee_len - 1 < end ?
  			ex_ee_block+ex_ee_len - 1 : end;
a86c61812   Alex Tomas   [PATCH] ext3: add...
2305
2306
2307
  
  		ext_debug("  border %u:%u
  ", a, b);
d583fb87a   Allison Henderson   ext4: punch out e...
2308
2309
2310
2311
2312
2313
  		/* If this extent is beyond the end of the hole, skip it */
  		if (end <= ex_ee_block) {
  			ex--;
  			ex_ee_block = le32_to_cpu(ex->ee_block);
  			ex_ee_len = ext4_ext_get_actual_len(ex);
  			continue;
750c9c47a   Dmitry Monakhov   ext4: remove mess...
2314
2315
2316
2317
2318
2319
  		} else if (b != ex_ee_block + ex_ee_len - 1) {
  			EXT4_ERROR_INODE(inode,"  bad truncate %u:%u
  ",
  					 start, end);
  			err = -EIO;
  			goto out;
a86c61812   Alex Tomas   [PATCH] ext3: add...
2320
2321
  		} else if (a != ex_ee_block) {
  			/* remove tail of the extent */
750c9c47a   Dmitry Monakhov   ext4: remove mess...
2322
  			num = a - ex_ee_block;
a86c61812   Alex Tomas   [PATCH] ext3: add...
2323
2324
  		} else {
  			/* remove whole extent: excellent! */
a86c61812   Alex Tomas   [PATCH] ext3: add...
2325
  			num = 0;
a86c61812   Alex Tomas   [PATCH] ext3: add...
2326
  		}
34071da71   Theodore Ts'o   ext4: don't assum...
2327
2328
2329
2330
2331
2332
2333
  		/*
  		 * 3 for leaf, sb, and inode plus 2 (bmap and group
  		 * descriptor) for each block group; assume two block
  		 * groups plus ex_ee_len/blocks_per_block_group for
  		 * the worst case
  		 */
  		credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
a86c61812   Alex Tomas   [PATCH] ext3: add...
2334
2335
2336
2337
  		if (ex == EXT_FIRST_EXTENT(eh)) {
  			correct_index = 1;
  			credits += (ext_depth(inode)) + 1;
  		}
5aca07eb7   Dmitry Monakhov   ext4: quota macro...
2338
  		credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
a86c61812   Alex Tomas   [PATCH] ext3: add...
2339

487caeef9   Jan Kara   ext4: Fix possibl...
2340
  		err = ext4_ext_truncate_extend_restart(handle, inode, credits);
9102e4fa8   Shen Feng   ext4: Fix ext4_ex...
2341
  		if (err)
a86c61812   Alex Tomas   [PATCH] ext3: add...
2342
  			goto out;
a86c61812   Alex Tomas   [PATCH] ext3: add...
2343
2344
2345
2346
  
  		err = ext4_ext_get_access(handle, inode, path + depth);
  		if (err)
  			goto out;
0aa060000   Theodore Ts'o   ext4: teach ext4_...
2347
2348
  		err = ext4_remove_blocks(handle, inode, ex, partial_cluster,
  					 a, b);
a86c61812   Alex Tomas   [PATCH] ext3: add...
2349
2350
  		if (err)
  			goto out;
750c9c47a   Dmitry Monakhov   ext4: remove mess...
2351
  		if (num == 0)
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
2352
  			/* this extent is removed; mark slot entirely unused */
f65e6fba1   Alex Tomas   [PATCH] ext4: 48b...
2353
  			ext4_ext_store_pblock(ex, 0);
a86c61812   Alex Tomas   [PATCH] ext3: add...
2354

a86c61812   Alex Tomas   [PATCH] ext3: add...
2355
  		ex->ee_len = cpu_to_le16(num);
749269fac   Amit Arora   Change on-disk fo...
2356
2357
2358
2359
2360
  		/*
  		 * Do not mark uninitialized if all the blocks in the
  		 * extent have been removed.
  		 */
  		if (uninitialized && num)
a2df2a634   Amit Arora   fallocate support...
2361
  			ext4_ext_mark_uninitialized(ex);
d583fb87a   Allison Henderson   ext4: punch out e...
2362
2363
2364
2365
2366
  		/*
  		 * If the extent was completely released,
  		 * we need to remove it from the leaf
  		 */
  		if (num == 0) {
f17722f91   Lukas Czerner   ext4: Fix max fil...
2367
  			if (end != EXT_MAX_BLOCKS - 1) {
d583fb87a   Allison Henderson   ext4: punch out e...
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
  				/*
  				 * For hole punching, we need to scoot all the
  				 * extents up when an extent is removed so that
  				 * we dont have blank extents in the middle
  				 */
  				memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) *
  					sizeof(struct ext4_extent));
  
  				/* Now get rid of the one at the end */
  				memset(EXT_LAST_EXTENT(eh), 0,
  					sizeof(struct ext4_extent));
  			}
  			le16_add_cpu(&eh->eh_entries, -1);
0aa060000   Theodore Ts'o   ext4: teach ext4_...
2381
2382
  		} else
  			*partial_cluster = 0;
d583fb87a   Allison Henderson   ext4: punch out e...
2383

750c9c47a   Dmitry Monakhov   ext4: remove mess...
2384
2385
2386
  		err = ext4_ext_dirty(handle, inode, path + depth);
  		if (err)
  			goto out;
bf52c6f7a   Yongqiang Yang   ext4: let ext4_ex...
2387
2388
  		ext_debug("new extent: %u:%u:%llu
  ", ex_ee_block, num,
bf89d16f6   Theodore Ts'o   ext4: rename {ext...
2389
  				ext4_ext_pblock(ex));
a86c61812   Alex Tomas   [PATCH] ext3: add...
2390
2391
  		ex--;
  		ex_ee_block = le32_to_cpu(ex->ee_block);
a2df2a634   Amit Arora   fallocate support...
2392
  		ex_ee_len = ext4_ext_get_actual_len(ex);
a86c61812   Alex Tomas   [PATCH] ext3: add...
2393
2394
2395
2396
  	}
  
  	if (correct_index && eh->eh_entries)
  		err = ext4_ext_correct_indexes(handle, inode, path);
0aa060000   Theodore Ts'o   ext4: teach ext4_...
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
  	/*
  	 * If there is still a entry in the leaf node, check to see if
  	 * it references the partial cluster.  This is the only place
  	 * where it could; if it doesn't, we can free the cluster.
  	 */
  	if (*partial_cluster && ex >= EXT_FIRST_EXTENT(eh) &&
  	    (EXT4_B2C(sbi, ext4_ext_pblock(ex) + ex_ee_len - 1) !=
  	     *partial_cluster)) {
  		int flags = EXT4_FREE_BLOCKS_FORGET;
  
  		if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
  			flags |= EXT4_FREE_BLOCKS_METADATA;
  
  		ext4_free_blocks(handle, inode, NULL,
  				 EXT4_C2B(sbi, *partial_cluster),
  				 sbi->s_cluster_ratio, flags);
  		*partial_cluster = 0;
  	}
a86c61812   Alex Tomas   [PATCH] ext3: add...
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
  	/* if this leaf is free, then we should
  	 * remove it from index block above */
  	if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
  		err = ext4_ext_rm_idx(handle, inode, path + depth);
  
  out:
  	return err;
  }
  
  /*
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
2425
2426
   * ext4_ext_more_to_rm:
   * returns 1 if current index has to be freed (even partial)
a86c61812   Alex Tomas   [PATCH] ext3: add...
2427
   */
09b882520   Avantika Mathur   [PATCH] ext4: Eli...
2428
  static int
a86c61812   Alex Tomas   [PATCH] ext3: add...
2429
2430
2431
2432
2433
2434
2435
2436
  ext4_ext_more_to_rm(struct ext4_ext_path *path)
  {
  	BUG_ON(path->p_idx == NULL);
  
  	if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
  		return 0;
  
  	/*
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
2437
  	 * if truncate on deeper level happened, it wasn't partial,
a86c61812   Alex Tomas   [PATCH] ext3: add...
2438
2439
2440
2441
2442
2443
  	 * so we have to consider current index for truncation
  	 */
  	if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
  		return 0;
  	return 1;
  }
c6a0371cb   Allison Henderson   ext4: remove unne...
2444
  static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start)
a86c61812   Alex Tomas   [PATCH] ext3: add...
2445
2446
2447
2448
  {
  	struct super_block *sb = inode->i_sb;
  	int depth = ext_depth(inode);
  	struct ext4_ext_path *path;
0aa060000   Theodore Ts'o   ext4: teach ext4_...
2449
  	ext4_fsblk_t partial_cluster = 0;
a86c61812   Alex Tomas   [PATCH] ext3: add...
2450
  	handle_t *handle;
0617b83fa   Dmitry Monakhov   ext4: restart ext...
2451
  	int i, err;
a86c61812   Alex Tomas   [PATCH] ext3: add...
2452

725d26d3f   Aneesh Kumar K.V   ext4: Introduce e...
2453
2454
  	ext_debug("truncate since %u
  ", start);
a86c61812   Alex Tomas   [PATCH] ext3: add...
2455
2456
2457
2458
2459
  
  	/* probably first extent we're gonna free will be last in block */
  	handle = ext4_journal_start(inode, depth + 1);
  	if (IS_ERR(handle))
  		return PTR_ERR(handle);
0617b83fa   Dmitry Monakhov   ext4: restart ext...
2460
  again:
a86c61812   Alex Tomas   [PATCH] ext3: add...
2461
  	ext4_ext_invalidate_cache(inode);
d8990240d   Aditya Kali   ext4: add some tr...
2462
  	trace_ext4_ext_remove_space(inode, start, depth);
a86c61812   Alex Tomas   [PATCH] ext3: add...
2463
  	/*
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
2464
2465
  	 * We start scanning from right side, freeing all the blocks
  	 * after i_size and walking into the tree depth-wise.
a86c61812   Alex Tomas   [PATCH] ext3: add...
2466
  	 */
0617b83fa   Dmitry Monakhov   ext4: restart ext...
2467
  	depth = ext_depth(inode);
216553c4b   Josef Bacik   ext4: fix wrong g...
2468
  	path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS);
a86c61812   Alex Tomas   [PATCH] ext3: add...
2469
2470
2471
2472
  	if (path == NULL) {
  		ext4_journal_stop(handle);
  		return -ENOMEM;
  	}
0617b83fa   Dmitry Monakhov   ext4: restart ext...
2473
  	path[0].p_depth = depth;
a86c61812   Alex Tomas   [PATCH] ext3: add...
2474
  	path[0].p_hdr = ext_inode_hdr(inode);
56b19868a   Aneesh Kumar K.V   ext4: Add checks ...
2475
  	if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
a86c61812   Alex Tomas   [PATCH] ext3: add...
2476
2477
2478
  		err = -EIO;
  		goto out;
  	}
0617b83fa   Dmitry Monakhov   ext4: restart ext...
2479
  	i = err = 0;
a86c61812   Alex Tomas   [PATCH] ext3: add...
2480
2481
2482
2483
  
  	while (i >= 0 && err == 0) {
  		if (i == depth) {
  			/* this is leaf block */
d583fb87a   Allison Henderson   ext4: punch out e...
2484
  			err = ext4_ext_rm_leaf(handle, inode, path,
0aa060000   Theodore Ts'o   ext4: teach ext4_...
2485
2486
  					       &partial_cluster, start,
  					       EXT_MAX_BLOCKS - 1);
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
2487
  			/* root level has p_bh == NULL, brelse() eats this */
a86c61812   Alex Tomas   [PATCH] ext3: add...
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
  			brelse(path[i].p_bh);
  			path[i].p_bh = NULL;
  			i--;
  			continue;
  		}
  
  		/* this is index block */
  		if (!path[i].p_hdr) {
  			ext_debug("initialize header
  ");
  			path[i].p_hdr = ext_block_hdr(path[i].p_bh);
a86c61812   Alex Tomas   [PATCH] ext3: add...
2499
  		}
a86c61812   Alex Tomas   [PATCH] ext3: add...
2500
  		if (!path[i].p_idx) {
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
2501
  			/* this level hasn't been touched yet */
a86c61812   Alex Tomas   [PATCH] ext3: add...
2502
2503
2504
2505
2506
2507
2508
  			path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
  			path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
  			ext_debug("init index ptr: hdr 0x%p, num %d
  ",
  				  path[i].p_hdr,
  				  le16_to_cpu(path[i].p_hdr->eh_entries));
  		} else {
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
2509
  			/* we were already here, see at next index */
a86c61812   Alex Tomas   [PATCH] ext3: add...
2510
2511
2512
2513
2514
2515
2516
2517
  			path[i].p_idx--;
  		}
  
  		ext_debug("level %d - index, first 0x%p, cur 0x%p
  ",
  				i, EXT_FIRST_INDEX(path[i].p_hdr),
  				path[i].p_idx);
  		if (ext4_ext_more_to_rm(path + i)) {
c29c0ae7f   Alex Tomas   ext4: Make extent...
2518
  			struct buffer_head *bh;
a86c61812   Alex Tomas   [PATCH] ext3: add...
2519
  			/* go to the next level */
2ae021076   Mingming Cao   [PATCH] ext4: blk...
2520
2521
  			ext_debug("move to level %d (block %llu)
  ",
bf89d16f6   Theodore Ts'o   ext4: rename {ext...
2522
  				  i + 1, ext4_idx_pblock(path[i].p_idx));
a86c61812   Alex Tomas   [PATCH] ext3: add...
2523
  			memset(path + i + 1, 0, sizeof(*path));
bf89d16f6   Theodore Ts'o   ext4: rename {ext...
2524
  			bh = sb_bread(sb, ext4_idx_pblock(path[i].p_idx));
c29c0ae7f   Alex Tomas   ext4: Make extent...
2525
  			if (!bh) {
a86c61812   Alex Tomas   [PATCH] ext3: add...
2526
2527
2528
2529
  				/* should we reset i_size? */
  				err = -EIO;
  				break;
  			}
c29c0ae7f   Alex Tomas   ext4: Make extent...
2530
2531
2532
2533
  			if (WARN_ON(i + 1 > depth)) {
  				err = -EIO;
  				break;
  			}
56b19868a   Aneesh Kumar K.V   ext4: Add checks ...
2534
  			if (ext4_ext_check(inode, ext_block_hdr(bh),
c29c0ae7f   Alex Tomas   ext4: Make extent...
2535
2536
2537
2538
2539
  							depth - i - 1)) {
  				err = -EIO;
  				break;
  			}
  			path[i + 1].p_bh = bh;
a86c61812   Alex Tomas   [PATCH] ext3: add...
2540

d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
2541
2542
  			/* save actual number of indexes since this
  			 * number is changed at the next iteration */
a86c61812   Alex Tomas   [PATCH] ext3: add...
2543
2544
2545
  			path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
  			i++;
  		} else {
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
2546
  			/* we finished processing this index, go up */
a86c61812   Alex Tomas   [PATCH] ext3: add...
2547
  			if (path[i].p_hdr->eh_entries == 0 && i > 0) {
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
2548
  				/* index is empty, remove it;
a86c61812   Alex Tomas   [PATCH] ext3: add...
2549
2550
2551
2552
  				 * handle must be already prepared by the
  				 * truncatei_leaf() */
  				err = ext4_ext_rm_idx(handle, inode, path + i);
  			}
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
2553
  			/* root level has p_bh == NULL, brelse() eats this */
a86c61812   Alex Tomas   [PATCH] ext3: add...
2554
2555
2556
2557
2558
2559
2560
  			brelse(path[i].p_bh);
  			path[i].p_bh = NULL;
  			i--;
  			ext_debug("return to level %d
  ", i);
  		}
  	}
d8990240d   Aditya Kali   ext4: add some tr...
2561
2562
  	trace_ext4_ext_remove_space_done(inode, start, depth, partial_cluster,
  			path->p_hdr->eh_entries);
7b415bf60   Aditya Kali   ext4: Fix bigallo...
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
  	/* If we still have something in the partial cluster and we have removed
  	 * even the first extent, then we should free the blocks in the partial
  	 * cluster as well. */
  	if (partial_cluster && path->p_hdr->eh_entries == 0) {
  		int flags = EXT4_FREE_BLOCKS_FORGET;
  
  		if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
  			flags |= EXT4_FREE_BLOCKS_METADATA;
  
  		ext4_free_blocks(handle, inode, NULL,
  				 EXT4_C2B(EXT4_SB(sb), partial_cluster),
  				 EXT4_SB(sb)->s_cluster_ratio, flags);
  		partial_cluster = 0;
  	}
a86c61812   Alex Tomas   [PATCH] ext3: add...
2577
2578
2579
  	/* TODO: flexible tree reduction should be here */
  	if (path->p_hdr->eh_entries == 0) {
  		/*
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
2580
2581
  		 * truncate to zero freed all the tree,
  		 * so we need to correct eh_depth
a86c61812   Alex Tomas   [PATCH] ext3: add...
2582
2583
2584
2585
2586
  		 */
  		err = ext4_ext_get_access(handle, inode, path);
  		if (err == 0) {
  			ext_inode_hdr(inode)->eh_depth = 0;
  			ext_inode_hdr(inode)->eh_max =
55ad63bf3   Theodore Ts'o   ext4: fix extent ...
2587
  				cpu_to_le16(ext4_ext_space_root(inode, 0));
a86c61812   Alex Tomas   [PATCH] ext3: add...
2588
2589
2590
2591
  			err = ext4_ext_dirty(handle, inode, path);
  		}
  	}
  out:
a86c61812   Alex Tomas   [PATCH] ext3: add...
2592
2593
  	ext4_ext_drop_refs(path);
  	kfree(path);
0617b83fa   Dmitry Monakhov   ext4: restart ext...
2594
2595
  	if (err == -EAGAIN)
  		goto again;
a86c61812   Alex Tomas   [PATCH] ext3: add...
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
  	ext4_journal_stop(handle);
  
  	return err;
  }
  
  /*
   * called at mount time
   */
  void ext4_ext_init(struct super_block *sb)
  {
  	/*
  	 * possible initialization would be here
  	 */
83982b6f4   Theodore Ts'o   ext4: Remove "ext...
2609
  	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
90576c0b9   Theodore Ts'o   ext4, jbd2: Drop ...
2610
  #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
4776004f5   Theodore Ts'o   ext4: Add printk ...
2611
  		printk(KERN_INFO "EXT4-fs: file extents enabled");
bbf2f9fb1   Robert P. J. Day   Fix misspellings ...
2612
2613
  #ifdef AGGRESSIVE_TEST
  		printk(", aggressive tests");
a86c61812   Alex Tomas   [PATCH] ext3: add...
2614
2615
2616
2617
2618
2619
2620
2621
2622
  #endif
  #ifdef CHECK_BINSEARCH
  		printk(", check binsearch");
  #endif
  #ifdef EXTENTS_STATS
  		printk(", stats");
  #endif
  		printk("
  ");
90576c0b9   Theodore Ts'o   ext4, jbd2: Drop ...
2623
  #endif
a86c61812   Alex Tomas   [PATCH] ext3: add...
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
  #ifdef EXTENTS_STATS
  		spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
  		EXT4_SB(sb)->s_ext_min = 1 << 30;
  		EXT4_SB(sb)->s_ext_max = 0;
  #endif
  	}
  }
  
  /*
   * called at umount time
   */
  void ext4_ext_release(struct super_block *sb)
  {
83982b6f4   Theodore Ts'o   ext4: Remove "ext...
2637
  	if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
a86c61812   Alex Tomas   [PATCH] ext3: add...
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
  		return;
  
  #ifdef EXTENTS_STATS
  	if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
  		struct ext4_sb_info *sbi = EXT4_SB(sb);
  		printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)
  ",
  			sbi->s_ext_blocks, sbi->s_ext_extents,
  			sbi->s_ext_blocks / sbi->s_ext_extents);
  		printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu
  ",
  			sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
  	}
  #endif
  }
093a088b7   Aneesh Kumar K.V   ext4: ENOSPC erro...
2653
2654
2655
  /* FIXME!! we need to try to merge to left or right after zero-out  */
  static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
  {
2407518de   Lukas Czerner   ext4: use sb_issu...
2656
2657
  	ext4_fsblk_t ee_pblock;
  	unsigned int ee_len;
b720303df   Jing Zhang   ext4: fix memory ...
2658
  	int ret;
093a088b7   Aneesh Kumar K.V   ext4: ENOSPC erro...
2659

093a088b7   Aneesh Kumar K.V   ext4: ENOSPC erro...
2660
  	ee_len    = ext4_ext_get_actual_len(ex);
bf89d16f6   Theodore Ts'o   ext4: rename {ext...
2661
  	ee_pblock = ext4_ext_pblock(ex);
b720303df   Jing Zhang   ext4: fix memory ...
2662

a107e5a3a   Theodore Ts'o   Merge branch 'nex...
2663
  	ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS);
2407518de   Lukas Czerner   ext4: use sb_issu...
2664
2665
  	if (ret > 0)
  		ret = 0;
093a088b7   Aneesh Kumar K.V   ext4: ENOSPC erro...
2666

2407518de   Lukas Czerner   ext4: use sb_issu...
2667
  	return ret;
093a088b7   Aneesh Kumar K.V   ext4: ENOSPC erro...
2668
  }
47ea3bb59   Yongqiang Yang   ext4: add ext4_sp...
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
  /*
   * used by extent splitting.
   */
  #define EXT4_EXT_MAY_ZEROOUT	0x1  /* safe to zeroout if split fails \
  					due to ENOSPC */
  #define EXT4_EXT_MARK_UNINIT1	0x2  /* mark first half uninitialized */
  #define EXT4_EXT_MARK_UNINIT2	0x4  /* mark second half uninitialized */
  
  /*
   * ext4_split_extent_at() splits an extent at given block.
   *
   * @handle: the journal handle
   * @inode: the file inode
   * @path: the path to the extent
   * @split: the logical block where the extent is splitted.
   * @split_flags: indicates if the extent could be zeroout if split fails, and
   *		 the states(init or uninit) of new extents.
   * @flags: flags used to insert new extent to extent tree.
   *
   *
   * Splits extent [a, b] into two extents [a, @split) and [@split, b], states
   * of which are deterimined by split_flag.
   *
   * There are two cases:
   *  a> the extent are splitted into two extent.
   *  b> split is not needed, and just mark the extent.
   *
   * return 0 on success.
   */
  static int ext4_split_extent_at(handle_t *handle,
  			     struct inode *inode,
  			     struct ext4_ext_path *path,
  			     ext4_lblk_t split,
  			     int split_flag,
  			     int flags)
  {
  	ext4_fsblk_t newblock;
  	ext4_lblk_t ee_block;
  	struct ext4_extent *ex, newex, orig_ex;
  	struct ext4_extent *ex2 = NULL;
  	unsigned int ee_len, depth;
  	int err = 0;
  
  	ext_debug("ext4_split_extents_at: inode %lu, logical"
  		"block %llu
  ", inode->i_ino, (unsigned long long)split);
  
  	ext4_ext_show_leaf(inode, path);
  
  	depth = ext_depth(inode);
  	ex = path[depth].p_ext;
  	ee_block = le32_to_cpu(ex->ee_block);
  	ee_len = ext4_ext_get_actual_len(ex);
  	newblock = split - ee_block + ext4_ext_pblock(ex);
  
  	BUG_ON(split < ee_block || split >= (ee_block + ee_len));
  
  	err = ext4_ext_get_access(handle, inode, path + depth);
  	if (err)
  		goto out;
  
  	if (split == ee_block) {
  		/*
  		 * case b: block @split is the block that the extent begins with
  		 * then we just change the state of the extent, and splitting
  		 * is not needed.
  		 */
  		if (split_flag & EXT4_EXT_MARK_UNINIT2)
  			ext4_ext_mark_uninitialized(ex);
  		else
  			ext4_ext_mark_initialized(ex);
  
  		if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
  			ext4_ext_try_to_merge(inode, path, ex);
  
  		err = ext4_ext_dirty(handle, inode, path + depth);
  		goto out;
  	}
  
  	/* case a */
  	memcpy(&orig_ex, ex, sizeof(orig_ex));
  	ex->ee_len = cpu_to_le16(split - ee_block);
  	if (split_flag & EXT4_EXT_MARK_UNINIT1)
  		ext4_ext_mark_uninitialized(ex);
  
  	/*
  	 * path may lead to new leaf, not to original leaf any more
  	 * after ext4_ext_insert_extent() returns,
  	 */
  	err = ext4_ext_dirty(handle, inode, path + depth);
  	if (err)
  		goto fix_extent_len;
  
  	ex2 = &newex;
  	ex2->ee_block = cpu_to_le32(split);
  	ex2->ee_len   = cpu_to_le16(ee_len - (split - ee_block));
  	ext4_ext_store_pblock(ex2, newblock);
  	if (split_flag & EXT4_EXT_MARK_UNINIT2)
  		ext4_ext_mark_uninitialized(ex2);
  
  	err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
  	if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
  		err = ext4_ext_zeroout(inode, &orig_ex);
  		if (err)
  			goto fix_extent_len;
  		/* update the extent length and mark as initialized */
  		ex->ee_len = cpu_to_le32(ee_len);
  		ext4_ext_try_to_merge(inode, path, ex);
  		err = ext4_ext_dirty(handle, inode, path + depth);
  		goto out;
  	} else if (err)
  		goto fix_extent_len;
  
  out:
  	ext4_ext_show_leaf(inode, path);
  	return err;
  
  fix_extent_len:
  	ex->ee_len = orig_ex.ee_len;
  	ext4_ext_dirty(handle, inode, path + depth);
  	return err;
  }
  
  /*
   * ext4_split_extents() splits an extent and mark extent which is covered
   * by @map as split_flags indicates
   *
   * It may result in splitting the extent into multiple extents (upto three)
   * There are three possibilities:
   *   a> There is no split required
   *   b> Splits in two extents: Split is happening at either end of the extent
   *   c> Splits in three extents: Somone is splitting in middle of the extent
   *
   */
  static int ext4_split_extent(handle_t *handle,
  			      struct inode *inode,
  			      struct ext4_ext_path *path,
  			      struct ext4_map_blocks *map,
  			      int split_flag,
  			      int flags)
  {
  	ext4_lblk_t ee_block;
  	struct ext4_extent *ex;
  	unsigned int ee_len, depth;
  	int err = 0;
  	int uninitialized;
  	int split_flag1, flags1;
  
  	depth = ext_depth(inode);
  	ex = path[depth].p_ext;
  	ee_block = le32_to_cpu(ex->ee_block);
  	ee_len = ext4_ext_get_actual_len(ex);
  	uninitialized = ext4_ext_is_uninitialized(ex);
  
  	if (map->m_lblk + map->m_len < ee_block + ee_len) {
  		split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ?
  			      EXT4_EXT_MAY_ZEROOUT : 0;
  		flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
  		if (uninitialized)
  			split_flag1 |= EXT4_EXT_MARK_UNINIT1 |
  				       EXT4_EXT_MARK_UNINIT2;
  		err = ext4_split_extent_at(handle, inode, path,
  				map->m_lblk + map->m_len, split_flag1, flags1);
93917411b   Yongqiang Yang   ext4: make ext4_s...
2832
2833
  		if (err)
  			goto out;
47ea3bb59   Yongqiang Yang   ext4: add ext4_sp...
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
  	}
  
  	ext4_ext_drop_refs(path);
  	path = ext4_ext_find_extent(inode, map->m_lblk, path);
  	if (IS_ERR(path))
  		return PTR_ERR(path);
  
  	if (map->m_lblk >= ee_block) {
  		split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ?
  			      EXT4_EXT_MAY_ZEROOUT : 0;
  		if (uninitialized)
  			split_flag1 |= EXT4_EXT_MARK_UNINIT1;
  		if (split_flag & EXT4_EXT_MARK_UNINIT2)
  			split_flag1 |= EXT4_EXT_MARK_UNINIT2;
  		err = ext4_split_extent_at(handle, inode, path,
  				map->m_lblk, split_flag1, flags);
  		if (err)
  			goto out;
  	}
  
  	ext4_ext_show_leaf(inode, path);
  out:
  	return err ? err : map->m_len;
  }
3977c965e   Aneesh Kumar K.V   ext4: zero out sm...
2858
  #define EXT4_EXT_ZERO_LEN 7
56055d3ae   Amit Arora   write support for...
2859
  /*
e35fd6609   Theodore Ts'o   ext4: Add new abs...
2860
   * This function is called by ext4_ext_map_blocks() if someone tries to write
56055d3ae   Amit Arora   write support for...
2861
   * to an uninitialized extent. It may result in splitting the uninitialized
25985edce   Lucas De Marchi   Fix common misspe...
2862
   * extent into multiple extents (up to three - one initialized and two
56055d3ae   Amit Arora   write support for...
2863
2864
2865
2866
2867
   * uninitialized).
   * There are three possibilities:
   *   a> There is no split required: Entire extent should be initialized
   *   b> Splits in two extents: Write is happening at either end of the extent
   *   c> Splits in three extents: Somone is writing in middle of the extent
6f91bc5fd   Eric Gouriou   ext4: optimize ex...
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
   *
   * Pre-conditions:
   *  - The extent pointed to by 'path' is uninitialized.
   *  - The extent pointed to by 'path' contains a superset
   *    of the logical span [map->m_lblk, map->m_lblk + map->m_len).
   *
   * Post-conditions on success:
   *  - the returned value is the number of blocks beyond map->l_lblk
   *    that are allocated and initialized.
   *    It is guaranteed to be >= map->m_len.
56055d3ae   Amit Arora   write support for...
2878
   */
725d26d3f   Aneesh Kumar K.V   ext4: Introduce e...
2879
  static int ext4_ext_convert_to_initialized(handle_t *handle,
e35fd6609   Theodore Ts'o   ext4: Add new abs...
2880
2881
2882
  					   struct inode *inode,
  					   struct ext4_map_blocks *map,
  					   struct ext4_ext_path *path)
56055d3ae   Amit Arora   write support for...
2883
  {
6f91bc5fd   Eric Gouriou   ext4: optimize ex...
2884
  	struct ext4_extent_header *eh;
667eff35a   Yongqiang Yang   ext4: reimplement...
2885
2886
2887
  	struct ext4_map_blocks split_map;
  	struct ext4_extent zero_ex;
  	struct ext4_extent *ex;
21ca087a3   Dmitry Monakhov   ext4: Do not zero...
2888
  	ext4_lblk_t ee_block, eof_block;
f85b287a0   Dan Carpenter   ext4: error handl...
2889
2890
  	unsigned int ee_len, depth;
  	int allocated;
56055d3ae   Amit Arora   write support for...
2891
  	int err = 0;
667eff35a   Yongqiang Yang   ext4: reimplement...
2892
  	int split_flag = 0;
21ca087a3   Dmitry Monakhov   ext4: Do not zero...
2893
2894
2895
2896
  
  	ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
  		"block %llu, max_blocks %u
  ", inode->i_ino,
e35fd6609   Theodore Ts'o   ext4: Add new abs...
2897
  		(unsigned long long)map->m_lblk, map->m_len);
21ca087a3   Dmitry Monakhov   ext4: Do not zero...
2898
2899
2900
  
  	eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
  		inode->i_sb->s_blocksize_bits;
e35fd6609   Theodore Ts'o   ext4: Add new abs...
2901
2902
  	if (eof_block < map->m_lblk + map->m_len)
  		eof_block = map->m_lblk + map->m_len;
56055d3ae   Amit Arora   write support for...
2903
2904
  
  	depth = ext_depth(inode);
6f91bc5fd   Eric Gouriou   ext4: optimize ex...
2905
  	eh = path[depth].p_hdr;
56055d3ae   Amit Arora   write support for...
2906
2907
2908
  	ex = path[depth].p_ext;
  	ee_block = le32_to_cpu(ex->ee_block);
  	ee_len = ext4_ext_get_actual_len(ex);
e35fd6609   Theodore Ts'o   ext4: Add new abs...
2909
  	allocated = ee_len - (map->m_lblk - ee_block);
56055d3ae   Amit Arora   write support for...
2910

6f91bc5fd   Eric Gouriou   ext4: optimize ex...
2911
2912
2913
2914
2915
  	trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
  
  	/* Pre-conditions */
  	BUG_ON(!ext4_ext_is_uninitialized(ex));
  	BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
6f91bc5fd   Eric Gouriou   ext4: optimize ex...
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
  
  	/*
  	 * Attempt to transfer newly initialized blocks from the currently
  	 * uninitialized extent to its left neighbor. This is much cheaper
  	 * than an insertion followed by a merge as those involve costly
  	 * memmove() calls. This is the common case in steady state for
  	 * workloads doing fallocate(FALLOC_FL_KEEP_SIZE) followed by append
  	 * writes.
  	 *
  	 * Limitations of the current logic:
  	 *  - L1: we only deal with writes at the start of the extent.
  	 *    The approach could be extended to writes at the end
  	 *    of the extent but this scenario was deemed less common.
  	 *  - L2: we do not deal with writes covering the whole extent.
  	 *    This would require removing the extent if the transfer
  	 *    is possible.
  	 *  - L3: we only attempt to merge with an extent stored in the
  	 *    same extent tree node.
  	 */
  	if ((map->m_lblk == ee_block) &&	/*L1*/
  		(map->m_len < ee_len) &&	/*L2*/
  		(ex > EXT_FIRST_EXTENT(eh))) {	/*L3*/
  		struct ext4_extent *prev_ex;
  		ext4_lblk_t prev_lblk;
  		ext4_fsblk_t prev_pblk, ee_pblk;
  		unsigned int prev_len, write_len;
  
  		prev_ex = ex - 1;
  		prev_lblk = le32_to_cpu(prev_ex->ee_block);
  		prev_len = ext4_ext_get_actual_len(prev_ex);
  		prev_pblk = ext4_ext_pblock(prev_ex);
  		ee_pblk = ext4_ext_pblock(ex);
  		write_len = map->m_len;
  
  		/*
  		 * A transfer of blocks from 'ex' to 'prev_ex' is allowed
  		 * upon those conditions:
  		 * - C1: prev_ex is initialized,
  		 * - C2: prev_ex is logically abutting ex,
  		 * - C3: prev_ex is physically abutting ex,
  		 * - C4: prev_ex can receive the additional blocks without
  		 *   overflowing the (initialized) length limit.
  		 */
  		if ((!ext4_ext_is_uninitialized(prev_ex)) &&		/*C1*/
  			((prev_lblk + prev_len) == ee_block) &&		/*C2*/
  			((prev_pblk + prev_len) == ee_pblk) &&		/*C3*/
  			(prev_len < (EXT_INIT_MAX_LEN - write_len))) {	/*C4*/
  			err = ext4_ext_get_access(handle, inode, path + depth);
  			if (err)
  				goto out;
  
  			trace_ext4_ext_convert_to_initialized_fastpath(inode,
  				map, ex, prev_ex);
  
  			/* Shift the start of ex by 'write_len' blocks */
  			ex->ee_block = cpu_to_le32(ee_block + write_len);
  			ext4_ext_store_pblock(ex, ee_pblk + write_len);
  			ex->ee_len = cpu_to_le16(ee_len - write_len);
  			ext4_ext_mark_uninitialized(ex); /* Restore the flag */
  
  			/* Extend prev_ex by 'write_len' blocks */
  			prev_ex->ee_len = cpu_to_le16(prev_len + write_len);
  
  			/* Mark the block containing both extents as dirty */
  			ext4_ext_dirty(handle, inode, path + depth);
  
  			/* Update path to point to the right extent */
  			path[depth].p_ext = prev_ex;
  
  			/* Result: number of initialized blocks past m_lblk */
  			allocated = write_len;
  			goto out;
  		}
  	}
667eff35a   Yongqiang Yang   ext4: reimplement...
2990
  	WARN_ON(map->m_lblk < ee_block);
21ca087a3   Dmitry Monakhov   ext4: Do not zero...
2991
2992
2993
2994
  	/*
  	 * It is safe to convert extent to initialized via explicit
  	 * zeroout only if extent is fully insde i_size or new_size.
  	 */
667eff35a   Yongqiang Yang   ext4: reimplement...
2995
  	split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
21ca087a3   Dmitry Monakhov   ext4: Do not zero...
2996

3977c965e   Aneesh Kumar K.V   ext4: zero out sm...
2997
  	/* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */
667eff35a   Yongqiang Yang   ext4: reimplement...
2998
2999
3000
  	if (ee_len <= 2*EXT4_EXT_ZERO_LEN &&
  	    (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
  		err = ext4_ext_zeroout(inode, ex);
3977c965e   Aneesh Kumar K.V   ext4: zero out sm...
3001
  		if (err)
d03856bd5   Aneesh Kumar K.V   ext4: Fix data co...
3002
  			goto out;
d03856bd5   Aneesh Kumar K.V   ext4: Fix data co...
3003
3004
3005
3006
  
  		err = ext4_ext_get_access(handle, inode, path + depth);
  		if (err)
  			goto out;
667eff35a   Yongqiang Yang   ext4: reimplement...
3007
3008
3009
3010
  		ext4_ext_mark_initialized(ex);
  		ext4_ext_try_to_merge(inode, path, ex);
  		err = ext4_ext_dirty(handle, inode, path + depth);
  		goto out;
56055d3ae   Amit Arora   write support for...
3011
  	}
667eff35a   Yongqiang Yang   ext4: reimplement...
3012

56055d3ae   Amit Arora   write support for...
3013
  	/*
667eff35a   Yongqiang Yang   ext4: reimplement...
3014
3015
3016
3017
3018
  	 * four cases:
  	 * 1. split the extent into three extents.
  	 * 2. split the extent into two extents, zeroout the first half.
  	 * 3. split the extent into two extents, zeroout the second half.
  	 * 4. split the extent into two extents with out zeroout.
56055d3ae   Amit Arora   write support for...
3019
  	 */
667eff35a   Yongqiang Yang   ext4: reimplement...
3020
3021
3022
3023
3024
3025
3026
3027
  	split_map.m_lblk = map->m_lblk;
  	split_map.m_len = map->m_len;
  
  	if (allocated > map->m_len) {
  		if (allocated <= EXT4_EXT_ZERO_LEN &&
  		    (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
  			/* case 3 */
  			zero_ex.ee_block =
9b940f8e8   Allison Henderson   ext4: ext4_ext_co...
3028
3029
  					 cpu_to_le32(map->m_lblk);
  			zero_ex.ee_len = cpu_to_le16(allocated);
667eff35a   Yongqiang Yang   ext4: reimplement...
3030
3031
3032
  			ext4_ext_store_pblock(&zero_ex,
  				ext4_ext_pblock(ex) + map->m_lblk - ee_block);
  			err = ext4_ext_zeroout(inode, &zero_ex);
56055d3ae   Amit Arora   write support for...
3033
3034
  			if (err)
  				goto out;
667eff35a   Yongqiang Yang   ext4: reimplement...
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
  			split_map.m_lblk = map->m_lblk;
  			split_map.m_len = allocated;
  		} else if ((map->m_lblk - ee_block + map->m_len <
  			   EXT4_EXT_ZERO_LEN) &&
  			   (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
  			/* case 2 */
  			if (map->m_lblk != ee_block) {
  				zero_ex.ee_block = ex->ee_block;
  				zero_ex.ee_len = cpu_to_le16(map->m_lblk -
  							ee_block);
  				ext4_ext_store_pblock(&zero_ex,
  						      ext4_ext_pblock(ex));
  				err = ext4_ext_zeroout(inode, &zero_ex);
  				if (err)
  					goto out;
  			}
667eff35a   Yongqiang Yang   ext4: reimplement...
3051
  			split_map.m_lblk = ee_block;
9b940f8e8   Allison Henderson   ext4: ext4_ext_co...
3052
3053
  			split_map.m_len = map->m_lblk - ee_block + map->m_len;
  			allocated = map->m_len;
56055d3ae   Amit Arora   write support for...
3054
3055
  		}
  	}
667eff35a   Yongqiang Yang   ext4: reimplement...
3056
3057
3058
3059
3060
  
  	allocated = ext4_split_extent(handle, inode, path,
  				       &split_map, split_flag, 0);
  	if (allocated < 0)
  		err = allocated;
56055d3ae   Amit Arora   write support for...
3061
3062
3063
  out:
  	return err ? err : allocated;
  }
c278bfece   Aneesh Kumar K.V   ext4: Make ext4_g...
3064
  /*
e35fd6609   Theodore Ts'o   ext4: Add new abs...
3065
   * This function is called by ext4_ext_map_blocks() from
0031462b5   Mingming Cao   ext4: Split unini...
3066
3067
3068
   * ext4_get_blocks_dio_write() when DIO to write
   * to an uninitialized extent.
   *
fd018fe82   Paul Bolle   ext4: fix comment...
3069
   * Writing to an uninitialized extent may result in splitting the uninitialized
b595076a1   Uwe Kleine-König   tree-wide: fix co...
3070
   * extent into multiple /initialized uninitialized extents (up to three)
0031462b5   Mingming Cao   ext4: Split unini...
3071
3072
3073
3074
3075
3076
   * There are three possibilities:
   *   a> There is no split required: Entire extent should be uninitialized
   *   b> Splits in two extents: Write is happening at either end of the extent
   *   c> Splits in three extents: Somone is writing in middle of the extent
   *
   * One of more index blocks maybe needed if the extent tree grow after
b595076a1   Uwe Kleine-König   tree-wide: fix co...
3077
   * the uninitialized extent split. To prevent ENOSPC occur at the IO
0031462b5   Mingming Cao   ext4: Split unini...
3078
   * complete, we need to split the uninitialized extent before DIO submit
421f91d21   Uwe Kleine-König   fix typos concern...
3079
   * the IO. The uninitialized extent called at this time will be split
0031462b5   Mingming Cao   ext4: Split unini...
3080
3081
3082
   * into three uninitialized extent(at most). After IO complete, the part
   * being filled will be convert to initialized by the end_io callback function
   * via ext4_convert_unwritten_extents().
ba230c3f6   Mingming   ext4: Fix return ...
3083
3084
   *
   * Returns the size of uninitialized extent to be written on success.
0031462b5   Mingming Cao   ext4: Split unini...
3085
3086
3087
   */
  static int ext4_split_unwritten_extents(handle_t *handle,
  					struct inode *inode,
e35fd6609   Theodore Ts'o   ext4: Add new abs...
3088
  					struct ext4_map_blocks *map,
0031462b5   Mingming Cao   ext4: Split unini...
3089
  					struct ext4_ext_path *path,
0031462b5   Mingming Cao   ext4: Split unini...
3090
3091
  					int flags)
  {
667eff35a   Yongqiang Yang   ext4: reimplement...
3092
3093
3094
3095
3096
  	ext4_lblk_t eof_block;
  	ext4_lblk_t ee_block;
  	struct ext4_extent *ex;
  	unsigned int ee_len;
  	int split_flag = 0, depth;
21ca087a3   Dmitry Monakhov   ext4: Do not zero...
3097
3098
3099
3100
  
  	ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
  		"block %llu, max_blocks %u
  ", inode->i_ino,
e35fd6609   Theodore Ts'o   ext4: Add new abs...
3101
  		(unsigned long long)map->m_lblk, map->m_len);
21ca087a3   Dmitry Monakhov   ext4: Do not zero...
3102
3103
3104
  
  	eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
  		inode->i_sb->s_blocksize_bits;
e35fd6609   Theodore Ts'o   ext4: Add new abs...
3105
3106
  	if (eof_block < map->m_lblk + map->m_len)
  		eof_block = map->m_lblk + map->m_len;
0031462b5   Mingming Cao   ext4: Split unini...
3107
  	/*
21ca087a3   Dmitry Monakhov   ext4: Do not zero...
3108
3109
3110
  	 * It is safe to convert extent to initialized via explicit
  	 * zeroout only if extent is fully insde i_size or new_size.
  	 */
667eff35a   Yongqiang Yang   ext4: reimplement...
3111
3112
3113
3114
  	depth = ext_depth(inode);
  	ex = path[depth].p_ext;
  	ee_block = le32_to_cpu(ex->ee_block);
  	ee_len = ext4_ext_get_actual_len(ex);
0031462b5   Mingming Cao   ext4: Split unini...
3115

667eff35a   Yongqiang Yang   ext4: reimplement...
3116
3117
  	split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
  	split_flag |= EXT4_EXT_MARK_UNINIT2;
0031462b5   Mingming Cao   ext4: Split unini...
3118

667eff35a   Yongqiang Yang   ext4: reimplement...
3119
3120
  	flags |= EXT4_GET_BLOCKS_PRE_IO;
  	return ext4_split_extent(handle, inode, path, map, split_flag, flags);
0031462b5   Mingming Cao   ext4: Split unini...
3121
  }
197217a5a   Yongqiang Yang   ext4: add a funct...
3122

c7064ef13   Jiaying Zhang   ext4: mechanical ...
3123
  static int ext4_convert_unwritten_extents_endio(handle_t *handle,
0031462b5   Mingming Cao   ext4: Split unini...
3124
3125
3126
3127
  					      struct inode *inode,
  					      struct ext4_ext_path *path)
  {
  	struct ext4_extent *ex;
0031462b5   Mingming Cao   ext4: Split unini...
3128
3129
  	int depth;
  	int err = 0;
0031462b5   Mingming Cao   ext4: Split unini...
3130
3131
  
  	depth = ext_depth(inode);
0031462b5   Mingming Cao   ext4: Split unini...
3132
  	ex = path[depth].p_ext;
197217a5a   Yongqiang Yang   ext4: add a funct...
3133
3134
3135
3136
3137
  	ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
  		"block %llu, max_blocks %u
  ", inode->i_ino,
  		(unsigned long long)le32_to_cpu(ex->ee_block),
  		ext4_ext_get_actual_len(ex));
0031462b5   Mingming Cao   ext4: Split unini...
3138
3139
3140
3141
3142
  	err = ext4_ext_get_access(handle, inode, path + depth);
  	if (err)
  		goto out;
  	/* first mark the extent as initialized */
  	ext4_ext_mark_initialized(ex);
197217a5a   Yongqiang Yang   ext4: add a funct...
3143
3144
  	/* note: ext4_ext_correct_indexes() isn't needed here because
  	 * borders are not changed
0031462b5   Mingming Cao   ext4: Split unini...
3145
  	 */
197217a5a   Yongqiang Yang   ext4: add a funct...
3146
  	ext4_ext_try_to_merge(inode, path, ex);
0031462b5   Mingming Cao   ext4: Split unini...
3147
3148
3149
3150
3151
3152
  	/* Mark modified extent as dirty */
  	err = ext4_ext_dirty(handle, inode, path + depth);
  out:
  	ext4_ext_show_leaf(inode, path);
  	return err;
  }
515f41c33   Aneesh Kumar K.V   ext4: Ensure zero...
3153
3154
3155
3156
3157
3158
3159
  static void unmap_underlying_metadata_blocks(struct block_device *bdev,
  			sector_t block, int count)
  {
  	int i;
  	for (i = 0; i < count; i++)
                  unmap_underlying_metadata(bdev, block + i);
  }
58590b06d   Theodore Ts'o   ext4: fix EOFBLOC...
3160
3161
3162
3163
  /*
   * Handle EOFBLOCKS_FL flag, clearing it if necessary
   */
  static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
d002ebf1d   Eric Sandeen   ext4: don't pass ...
3164
  			      ext4_lblk_t lblk,
58590b06d   Theodore Ts'o   ext4: fix EOFBLOC...
3165
3166
3167
3168
3169
  			      struct ext4_ext_path *path,
  			      unsigned int len)
  {
  	int i, depth;
  	struct ext4_extent_header *eh;
65922cb5c   Sergey Senozhatsky   ext4: unused vari...
3170
  	struct ext4_extent *last_ex;
58590b06d   Theodore Ts'o   ext4: fix EOFBLOC...
3171
3172
3173
3174
3175
3176
  
  	if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
  		return 0;
  
  	depth = ext_depth(inode);
  	eh = path[depth].p_hdr;
58590b06d   Theodore Ts'o   ext4: fix EOFBLOC...
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
  
  	if (unlikely(!eh->eh_entries)) {
  		EXT4_ERROR_INODE(inode, "eh->eh_entries == 0 and "
  				 "EOFBLOCKS_FL set");
  		return -EIO;
  	}
  	last_ex = EXT_LAST_EXTENT(eh);
  	/*
  	 * We should clear the EOFBLOCKS_FL flag if we are writing the
  	 * last block in the last extent in the file.  We test this by
  	 * first checking to see if the caller to
  	 * ext4_ext_get_blocks() was interested in the last block (or
  	 * a block beyond the last block) in the current extent.  If
  	 * this turns out to be false, we can bail out from this
  	 * function immediately.
  	 */
d002ebf1d   Eric Sandeen   ext4: don't pass ...
3193
  	if (lblk + len < le32_to_cpu(last_ex->ee_block) +
58590b06d   Theodore Ts'o   ext4: fix EOFBLOC...
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
  	    ext4_ext_get_actual_len(last_ex))
  		return 0;
  	/*
  	 * If the caller does appear to be planning to write at or
  	 * beyond the end of the current extent, we then test to see
  	 * if the current extent is the last extent in the file, by
  	 * checking to make sure it was reached via the rightmost node
  	 * at each level of the tree.
  	 */
  	for (i = depth-1; i >= 0; i--)
  		if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr))
  			return 0;
  	ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
  	return ext4_mark_inode_dirty(handle, inode);
  }
7b415bf60   Aditya Kali   ext4: Fix bigallo...
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
  /**
   * ext4_find_delalloc_range: find delayed allocated block in the given range.
   *
   * Goes through the buffer heads in the range [lblk_start, lblk_end] and returns
   * whether there are any buffers marked for delayed allocation. It returns '1'
   * on the first delalloc'ed buffer head found. If no buffer head in the given
   * range is marked for delalloc, it returns 0.
   * lblk_start should always be <= lblk_end.
   * search_hint_reverse is to indicate that searching in reverse from lblk_end to
   * lblk_start might be more efficient (i.e., we will likely hit the delalloc'ed
   * block sooner). This is useful when blocks are truncated sequentially from
   * lblk_start towards lblk_end.
   */
  static int ext4_find_delalloc_range(struct inode *inode,
  				    ext4_lblk_t lblk_start,
  				    ext4_lblk_t lblk_end,
  				    int search_hint_reverse)
  {
  	struct address_space *mapping = inode->i_mapping;
  	struct buffer_head *head, *bh = NULL;
  	struct page *page;
  	ext4_lblk_t i, pg_lblk;
  	pgoff_t index;
8c48f7e88   Robin Dong   ext4: optimize ex...
3232
3233
  	if (!test_opt(inode->i_sb, DELALLOC))
  		return 0;
7b415bf60   Aditya Kali   ext4: Fix bigallo...
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
  	/* reverse search wont work if fs block size is less than page size */
  	if (inode->i_blkbits < PAGE_CACHE_SHIFT)
  		search_hint_reverse = 0;
  
  	if (search_hint_reverse)
  		i = lblk_end;
  	else
  		i = lblk_start;
  
  	index = i >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
  
  	while ((i >= lblk_start) && (i <= lblk_end)) {
  		page = find_get_page(mapping, index);
5356f2615   Aditya Kali   ext4: attempt to ...
3247
  		if (!page)
7b415bf60   Aditya Kali   ext4: Fix bigallo...
3248
  			goto nextpage;
7b415bf60   Aditya Kali   ext4: Fix bigallo...
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
  		if (!page_has_buffers(page))
  			goto nextpage;
  
  		head = page_buffers(page);
  		if (!head)
  			goto nextpage;
  
  		bh = head;
  		pg_lblk = index << (PAGE_CACHE_SHIFT -
  						inode->i_blkbits);
  		do {
  			if (unlikely(pg_lblk < lblk_start)) {
  				/*
  				 * This is possible when fs block size is less
  				 * than page size and our cluster starts/ends in
  				 * middle of the page. So we need to skip the
  				 * initial few blocks till we reach the 'lblk'
  				 */
  				pg_lblk++;
  				continue;
  			}
5356f2615   Aditya Kali   ext4: attempt to ...
3270
3271
3272
3273
3274
  			/* Check if the buffer is delayed allocated and that it
  			 * is not yet mapped. (when da-buffers are mapped during
  			 * their writeout, their da_mapped bit is set.)
  			 */
  			if (buffer_delay(bh) && !buffer_da_mapped(bh)) {
7b415bf60   Aditya Kali   ext4: Fix bigallo...
3275
  				page_cache_release(page);
d8990240d   Aditya Kali   ext4: add some tr...
3276
3277
3278
3279
  				trace_ext4_find_delalloc_range(inode,
  						lblk_start, lblk_end,
  						search_hint_reverse,
  						1, i);
7b415bf60   Aditya Kali   ext4: Fix bigallo...
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
  				return 1;
  			}
  			if (search_hint_reverse)
  				i--;
  			else
  				i++;
  		} while ((i >= lblk_start) && (i <= lblk_end) &&
  				((bh = bh->b_this_page) != head));
  nextpage:
  		if (page)
  			page_cache_release(page);
  		/*
  		 * Move to next page. 'i' will be the first lblk in the next
  		 * page.
  		 */
  		if (search_hint_reverse)
  			index--;
  		else
  			index++;
  		i = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
  	}
d8990240d   Aditya Kali   ext4: add some tr...
3301
3302
  	trace_ext4_find_delalloc_range(inode, lblk_start, lblk_end,
  					search_hint_reverse, 0, 0);
7b415bf60   Aditya Kali   ext4: Fix bigallo...
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
  	return 0;
  }
  
  int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk,
  			       int search_hint_reverse)
  {
  	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  	ext4_lblk_t lblk_start, lblk_end;
  	lblk_start = lblk & (~(sbi->s_cluster_ratio - 1));
  	lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
  
  	return ext4_find_delalloc_range(inode, lblk_start, lblk_end,
  					search_hint_reverse);
  }
  
  /**
   * Determines how many complete clusters (out of those specified by the 'map')
   * are under delalloc and were reserved quota for.
   * This function is called when we are writing out the blocks that were
   * originally written with their allocation delayed, but then the space was
   * allocated using fallocate() before the delayed allocation could be resolved.
   * The cases to look for are:
   * ('=' indicated delayed allocated blocks
   *  '-' indicates non-delayed allocated blocks)
   * (a) partial clusters towards beginning and/or end outside of allocated range
   *     are not delalloc'ed.
   *	Ex:
   *	|----c---=|====c====|====c====|===-c----|
   *	         |++++++ allocated ++++++|
   *	==> 4 complete clusters in above example
   *
   * (b) partial cluster (outside of allocated range) towards either end is
   *     marked for delayed allocation. In this case, we will exclude that
   *     cluster.
   *	Ex:
   *	|----====c========|========c========|
   *	     |++++++ allocated ++++++|
   *	==> 1 complete clusters in above example
   *
   *	Ex:
   *	|================c================|
   *            |++++++ allocated ++++++|
   *	==> 0 complete clusters in above example
   *
   * The ext4_da_update_reserve_space will be called only if we
   * determine here that there were some "entire" clusters that span
   * this 'allocated' range.
   * In the non-bigalloc case, this function will just end up returning num_blks
   * without ever calling ext4_find_delalloc_range.
   */
  static unsigned int
  get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
  			   unsigned int num_blks)
  {
  	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  	ext4_lblk_t alloc_cluster_start, alloc_cluster_end;
  	ext4_lblk_t lblk_from, lblk_to, c_offset;
  	unsigned int allocated_clusters = 0;
  
  	alloc_cluster_start = EXT4_B2C(sbi, lblk_start);
  	alloc_cluster_end = EXT4_B2C(sbi, lblk_start + num_blks - 1);
  
  	/* max possible clusters for this allocation */
  	allocated_clusters = alloc_cluster_end - alloc_cluster_start + 1;
d8990240d   Aditya Kali   ext4: add some tr...
3367
  	trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
7b415bf60   Aditya Kali   ext4: Fix bigallo...
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
  	/* Check towards left side */
  	c_offset = lblk_start & (sbi->s_cluster_ratio - 1);
  	if (c_offset) {
  		lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1));
  		lblk_to = lblk_from + c_offset - 1;
  
  		if (ext4_find_delalloc_range(inode, lblk_from, lblk_to, 0))
  			allocated_clusters--;
  	}
  
  	/* Now check towards right. */
  	c_offset = (lblk_start + num_blks) & (sbi->s_cluster_ratio - 1);
  	if (allocated_clusters && c_offset) {
  		lblk_from = lblk_start + num_blks;
  		lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
  
  		if (ext4_find_delalloc_range(inode, lblk_from, lblk_to, 0))
  			allocated_clusters--;
  	}
  
  	return allocated_clusters;
  }
0031462b5   Mingming Cao   ext4: Split unini...
3390
3391
  static int
  ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
e35fd6609   Theodore Ts'o   ext4: Add new abs...
3392
  			struct ext4_map_blocks *map,
0031462b5   Mingming Cao   ext4: Split unini...
3393
  			struct ext4_ext_path *path, int flags,
e35fd6609   Theodore Ts'o   ext4: Add new abs...
3394
  			unsigned int allocated, ext4_fsblk_t newblock)
0031462b5   Mingming Cao   ext4: Split unini...
3395
3396
3397
  {
  	int ret = 0;
  	int err = 0;
8d5d02e6b   Mingming Cao   ext4: async direc...
3398
  	ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
0031462b5   Mingming Cao   ext4: Split unini...
3399

88635ca27   Zheng Liu   ext4: add missing...
3400
3401
3402
  	ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical "
  		  "block %llu, max_blocks %u, flags %x, allocated %u
  ",
e35fd6609   Theodore Ts'o   ext4: Add new abs...
3403
  		  inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
0031462b5   Mingming Cao   ext4: Split unini...
3404
3405
  		  flags, allocated);
  	ext4_ext_show_leaf(inode, path);
d8990240d   Aditya Kali   ext4: add some tr...
3406
3407
  	trace_ext4_ext_handle_uninitialized_extents(inode, map, allocated,
  						    newblock);
c7064ef13   Jiaying Zhang   ext4: mechanical ...
3408
  	/* get_block() before submit the IO, split the extent */
744692dc0   Jiaying Zhang   ext4: use ext4_ge...
3409
  	if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
e35fd6609   Theodore Ts'o   ext4: Add new abs...
3410
3411
  		ret = ext4_split_unwritten_extents(handle, inode, map,
  						   path, flags);
5f5249507   Mingming   ext4: skip conver...
3412
3413
  		/*
  		 * Flag the inode(non aio case) or end_io struct (aio case)
25985edce   Lucas De Marchi   Fix common misspe...
3414
  		 * that this IO needs to conversion to written when IO is
5f5249507   Mingming   ext4: skip conver...
3415
3416
  		 * completed
  		 */
0edeb71dc   Tao Ma   ext4: Create help...
3417
3418
3419
  		if (io)
  			ext4_set_io_unwritten_flag(inode, io);
  		else
19f5fb7ad   Theodore Ts'o   ext4: Use bitops ...
3420
  			ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
744692dc0   Jiaying Zhang   ext4: use ext4_ge...
3421
  		if (ext4_should_dioread_nolock(inode))
e35fd6609   Theodore Ts'o   ext4: Add new abs...
3422
  			map->m_flags |= EXT4_MAP_UNINIT;
0031462b5   Mingming Cao   ext4: Split unini...
3423
3424
  		goto out;
  	}
c7064ef13   Jiaying Zhang   ext4: mechanical ...
3425
  	/* IO end_io complete, convert the filled extent to written */
744692dc0   Jiaying Zhang   ext4: use ext4_ge...
3426
  	if ((flags & EXT4_GET_BLOCKS_CONVERT)) {
c7064ef13   Jiaying Zhang   ext4: mechanical ...
3427
  		ret = ext4_convert_unwritten_extents_endio(handle, inode,
0031462b5   Mingming Cao   ext4: Split unini...
3428
  							path);
58590b06d   Theodore Ts'o   ext4: fix EOFBLOC...
3429
  		if (ret >= 0) {
b436b9bef   Jan Kara   ext4: Wait for pr...
3430
  			ext4_update_inode_fsync_trans(handle, inode, 1);
d002ebf1d   Eric Sandeen   ext4: don't pass ...
3431
3432
  			err = check_eofblocks_fl(handle, inode, map->m_lblk,
  						 path, map->m_len);
58590b06d   Theodore Ts'o   ext4: fix EOFBLOC...
3433
3434
  		} else
  			err = ret;
0031462b5   Mingming Cao   ext4: Split unini...
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450
3451
3452
3453
  		goto out2;
  	}
  	/* buffered IO case */
  	/*
  	 * repeat fallocate creation request
  	 * we already have an unwritten extent
  	 */
  	if (flags & EXT4_GET_BLOCKS_UNINIT_EXT)
  		goto map_out;
  
  	/* buffered READ or buffered write_begin() lookup */
  	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
  		/*
  		 * We have blocks reserved already.  We
  		 * return allocated blocks so that delalloc
  		 * won't do block reservation for us.  But
  		 * the buffer head will be unmapped so that
  		 * a read from the block returns 0s.
  		 */
e35fd6609   Theodore Ts'o   ext4: Add new abs...
3454
  		map->m_flags |= EXT4_MAP_UNWRITTEN;
0031462b5   Mingming Cao   ext4: Split unini...
3455
3456
3457
3458
  		goto out1;
  	}
  
  	/* buffered write, writepage time, convert*/
e35fd6609   Theodore Ts'o   ext4: Add new abs...
3459
  	ret = ext4_ext_convert_to_initialized(handle, inode, map, path);
a4e5d88b1   Dmitry Monakhov   ext4: update EOFB...
3460
  	if (ret >= 0)
b436b9bef   Jan Kara   ext4: Wait for pr...
3461
  		ext4_update_inode_fsync_trans(handle, inode, 1);
0031462b5   Mingming Cao   ext4: Split unini...
3462
3463
3464
3465
3466
3467
  out:
  	if (ret <= 0) {
  		err = ret;
  		goto out2;
  	} else
  		allocated = ret;
e35fd6609   Theodore Ts'o   ext4: Add new abs...
3468
  	map->m_flags |= EXT4_MAP_NEW;
515f41c33   Aneesh Kumar K.V   ext4: Ensure zero...
3469
3470
3471
3472
3473
3474
3475
  	/*
  	 * if we allocated more blocks than requested
  	 * we need to make sure we unmap the extra block
  	 * allocated. The actual needed block will get
  	 * unmapped later when we find the buffer_head marked
  	 * new.
  	 */
e35fd6609   Theodore Ts'o   ext4: Add new abs...
3476
  	if (allocated > map->m_len) {
515f41c33   Aneesh Kumar K.V   ext4: Ensure zero...
3477
  		unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
e35fd6609   Theodore Ts'o   ext4: Add new abs...
3478
3479
3480
  					newblock + map->m_len,
  					allocated - map->m_len);
  		allocated = map->m_len;
515f41c33   Aneesh Kumar K.V   ext4: Ensure zero...
3481
  	}
5f634d064   Aneesh Kumar K.V   ext4: Fix quota a...
3482
3483
3484
3485
3486
3487
3488
3489
  
  	/*
  	 * If we have done fallocate with the offset that is already
  	 * delayed allocated, we would have block reservation
  	 * and quota reservation done in the delayed write path.
  	 * But fallocate would have already updated quota and block
  	 * count for this offset. So cancel these reservation
  	 */
7b415bf60   Aditya Kali   ext4: Fix bigallo...
3490
3491
3492
3493
3494
3495
3496
3497
3498
  	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
  		unsigned int reserved_clusters;
  		reserved_clusters = get_reserved_cluster_alloc(inode,
  				map->m_lblk, map->m_len);
  		if (reserved_clusters)
  			ext4_da_update_reserve_space(inode,
  						     reserved_clusters,
  						     0);
  	}
5f634d064   Aneesh Kumar K.V   ext4: Fix quota a...
3499

0031462b5   Mingming Cao   ext4: Split unini...
3500
  map_out:
e35fd6609   Theodore Ts'o   ext4: Add new abs...
3501
  	map->m_flags |= EXT4_MAP_MAPPED;
a4e5d88b1   Dmitry Monakhov   ext4: update EOFB...
3502
3503
3504
3505
3506
3507
  	if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) {
  		err = check_eofblocks_fl(handle, inode, map->m_lblk, path,
  					 map->m_len);
  		if (err < 0)
  			goto out2;
  	}
0031462b5   Mingming Cao   ext4: Split unini...
3508
  out1:
e35fd6609   Theodore Ts'o   ext4: Add new abs...
3509
3510
  	if (allocated > map->m_len)
  		allocated = map->m_len;
0031462b5   Mingming Cao   ext4: Split unini...
3511
  	ext4_ext_show_leaf(inode, path);
e35fd6609   Theodore Ts'o   ext4: Add new abs...
3512
3513
  	map->m_pblk = newblock;
  	map->m_len = allocated;
0031462b5   Mingming Cao   ext4: Split unini...
3514
3515
3516
3517
3518
3519
3520
  out2:
  	if (path) {
  		ext4_ext_drop_refs(path);
  		kfree(path);
  	}
  	return err ? err : allocated;
  }
58590b06d   Theodore Ts'o   ext4: fix EOFBLOC...
3521

0031462b5   Mingming Cao   ext4: Split unini...
3522
  /*
4d33b1ef1   Theodore Ts'o   ext4: teach ext4_...
3523
3524
3525
   * get_implied_cluster_alloc - check to see if the requested
   * allocation (in the map structure) overlaps with a cluster already
   * allocated in an extent.
d8990240d   Aditya Kali   ext4: add some tr...
3526
   *	@sb	The filesystem superblock structure
4d33b1ef1   Theodore Ts'o   ext4: teach ext4_...
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
   *	@map	The requested lblk->pblk mapping
   *	@ex	The extent structure which might contain an implied
   *			cluster allocation
   *
   * This function is called by ext4_ext_map_blocks() after we failed to
   * find blocks that were already in the inode's extent tree.  Hence,
   * we know that the beginning of the requested region cannot overlap
   * the extent from the inode's extent tree.  There are three cases we
   * want to catch.  The first is this case:
   *
   *		 |--- cluster # N--|
   *    |--- extent ---|	|---- requested region ---|
   *			|==========|
   *
   * The second case that we need to test for is this one:
   *
   *   |--------- cluster # N ----------------|
   *	   |--- requested region --|   |------- extent ----|
   *	   |=======================|
   *
   * The third case is when the requested region lies between two extents
   * within the same cluster:
   *          |------------- cluster # N-------------|
   * |----- ex -----|                  |---- ex_right ----|
   *                  |------ requested region ------|
   *                  |================|
   *
   * In each of the above cases, we need to set the map->m_pblk and
   * map->m_len so it corresponds to the return the extent labelled as
   * "|====|" from cluster #N, since it is already in use for data in
   * cluster EXT4_B2C(sbi, map->m_lblk).	We will then return 1 to
   * signal to ext4_ext_map_blocks() that map->m_pblk should be treated
   * as a new "allocated" block region.  Otherwise, we will return 0 and
   * ext4_ext_map_blocks() will then allocate one or more new clusters
   * by calling ext4_mb_new_blocks().
   */
d8990240d   Aditya Kali   ext4: add some tr...
3563
  static int get_implied_cluster_alloc(struct super_block *sb,
4d33b1ef1   Theodore Ts'o   ext4: teach ext4_...
3564
3565
3566
3567
  				     struct ext4_map_blocks *map,
  				     struct ext4_extent *ex,
  				     struct ext4_ext_path *path)
  {
d8990240d   Aditya Kali   ext4: add some tr...
3568
  	struct ext4_sb_info *sbi = EXT4_SB(sb);
4d33b1ef1   Theodore Ts'o   ext4: teach ext4_...
3569
3570
  	ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
  	ext4_lblk_t ex_cluster_start, ex_cluster_end;
14d7f3efe   Curt Wohlgemuth   ext4: remove unus...
3571
  	ext4_lblk_t rr_cluster_start;
4d33b1ef1   Theodore Ts'o   ext4: teach ext4_...
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581
  	ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
  	ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
  	unsigned short ee_len = ext4_ext_get_actual_len(ex);
  
  	/* The extent passed in that we are trying to match */
  	ex_cluster_start = EXT4_B2C(sbi, ee_block);
  	ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1);
  
  	/* The requested region passed into ext4_map_blocks() */
  	rr_cluster_start = EXT4_B2C(sbi, map->m_lblk);
4d33b1ef1   Theodore Ts'o   ext4: teach ext4_...
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
  
  	if ((rr_cluster_start == ex_cluster_end) ||
  	    (rr_cluster_start == ex_cluster_start)) {
  		if (rr_cluster_start == ex_cluster_end)
  			ee_start += ee_len - 1;
  		map->m_pblk = (ee_start & ~(sbi->s_cluster_ratio - 1)) +
  			c_offset;
  		map->m_len = min(map->m_len,
  				 (unsigned) sbi->s_cluster_ratio - c_offset);
  		/*
  		 * Check for and handle this case:
  		 *
  		 *   |--------- cluster # N-------------|
  		 *		       |------- extent ----|
  		 *	   |--- requested region ---|
  		 *	   |===========|
  		 */
  
  		if (map->m_lblk < ee_block)
  			map->m_len = min(map->m_len, ee_block - map->m_lblk);
  
  		/*
  		 * Check for the case where there is already another allocated
  		 * block to the right of 'ex' but before the end of the cluster.
  		 *
  		 *          |------------- cluster # N-------------|
  		 * |----- ex -----|                  |---- ex_right ----|
  		 *                  |------ requested region ------|
  		 *                  |================|
  		 */
  		if (map->m_lblk > ee_block) {
  			ext4_lblk_t next = ext4_ext_next_allocated_block(path);
  			map->m_len = min(map->m_len, next - map->m_lblk);
  		}
d8990240d   Aditya Kali   ext4: add some tr...
3616
3617
  
  		trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1);
4d33b1ef1   Theodore Ts'o   ext4: teach ext4_...
3618
3619
  		return 1;
  	}
d8990240d   Aditya Kali   ext4: add some tr...
3620
3621
  
  	trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0);
4d33b1ef1   Theodore Ts'o   ext4: teach ext4_...
3622
3623
3624
3625
3626
  	return 0;
  }
  
  
  /*
f5ab0d1f8   Mingming Cao   ext4: Fix BUG whe...
3627
3628
3629
   * Block allocation/map/preallocation routine for extents based files
   *
   *
c278bfece   Aneesh Kumar K.V   ext4: Make ext4_g...
3630
   * Need to be called with
0e855ac8b   Aneesh Kumar K.V   ext4: Convert tru...
3631
3632
   * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
   * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
f5ab0d1f8   Mingming Cao   ext4: Fix BUG whe...
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
   *
   * return > 0, number of of blocks already mapped/allocated
   *          if create == 0 and these are pre-allocated blocks
   *          	buffer head is unmapped
   *          otherwise blocks are mapped
   *
   * return = 0, if plain look up failed (blocks have not been allocated)
   *          buffer head is unmapped
   *
   * return < 0, error case.
c278bfece   Aneesh Kumar K.V   ext4: Make ext4_g...
3643
   */
e35fd6609   Theodore Ts'o   ext4: Add new abs...
3644
3645
  int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
  			struct ext4_map_blocks *map, int flags)
a86c61812   Alex Tomas   [PATCH] ext3: add...
3646
3647
  {
  	struct ext4_ext_path *path = NULL;
4d33b1ef1   Theodore Ts'o   ext4: teach ext4_...
3648
3649
  	struct ext4_extent newex, *ex, *ex2;
  	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
0562e0bad   Jiaying Zhang   ext4: add more tr...
3650
  	ext4_fsblk_t newblock = 0;
4d33b1ef1   Theodore Ts'o   ext4: teach ext4_...
3651
3652
  	int free_on_err = 0, err = 0, depth, ret;
  	unsigned int allocated = 0, offset = 0;
81fdbb4a8   Yongqiang Yang   ext4: move variab...
3653
  	unsigned int allocated_clusters = 0;
e861304b8   Allison Henderson   ext4: add "punch ...
3654
3655
  	unsigned int punched_out = 0;
  	unsigned int result = 0;
c9de560de   Alex Tomas   ext4: Add multi b...
3656
  	struct ext4_allocation_request ar;
8d5d02e6b   Mingming Cao   ext4: async direc...
3657
  	ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
4d33b1ef1   Theodore Ts'o   ext4: teach ext4_...
3658
  	ext4_lblk_t cluster_offset;
a86c61812   Alex Tomas   [PATCH] ext3: add...
3659

84fe3bef5   Mingming   ext4: Compile war...
3660
3661
  	ext_debug("blocks %u/%u requested for inode %lu
  ",
e35fd6609   Theodore Ts'o   ext4: Add new abs...
3662
  		  map->m_lblk, map->m_len, inode->i_ino);
0562e0bad   Jiaying Zhang   ext4: add more tr...
3663
  	trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
a86c61812   Alex Tomas   [PATCH] ext3: add...
3664
3665
  
  	/* check in cache */
015861bad   Robin Dong   ext4: avoid waste...
3666
3667
  	if (!(flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) &&
  		ext4_ext_in_cache(inode, map->m_lblk, &newex)) {
b05e6ae58   Theodore Ts'o   ext4: drop ec_typ...
3668
  		if (!newex.ee_start_lo && !newex.ee_start_hi) {
7b415bf60   Aditya Kali   ext4: Fix bigallo...
3669
3670
3671
  			if ((sbi->s_cluster_ratio > 1) &&
  			    ext4_find_delalloc_cluster(inode, map->m_lblk, 0))
  				map->m_flags |= EXT4_MAP_FROM_CLUSTER;
c21770573   Theodore Ts'o   ext4: Define a ne...
3672
  			if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
56055d3ae   Amit Arora   write support for...
3673
3674
3675
3676
  				/*
  				 * block isn't allocated yet and
  				 * user doesn't want to allocate it
  				 */
a86c61812   Alex Tomas   [PATCH] ext3: add...
3677
3678
3679
  				goto out2;
  			}
  			/* we should allocate requested block */
b05e6ae58   Theodore Ts'o   ext4: drop ec_typ...
3680
  		} else {
a86c61812   Alex Tomas   [PATCH] ext3: add...
3681
  			/* block is already allocated */
7b415bf60   Aditya Kali   ext4: Fix bigallo...
3682
3683
  			if (sbi->s_cluster_ratio > 1)
  				map->m_flags |= EXT4_MAP_FROM_CLUSTER;
e35fd6609   Theodore Ts'o   ext4: Add new abs...
3684
  			newblock = map->m_lblk
8c55e2041   Dave Kleikamp   EXT4: Fix whitespace
3685
  				   - le32_to_cpu(newex.ee_block)
bf89d16f6   Theodore Ts'o   ext4: rename {ext...
3686
  				   + ext4_ext_pblock(&newex);
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
3687
  			/* number of remaining blocks in the extent */
b939e3766   Aneesh Kumar K.V   ext4: Use the ext...
3688
  			allocated = ext4_ext_get_actual_len(&newex) -
e35fd6609   Theodore Ts'o   ext4: Add new abs...
3689
  				(map->m_lblk - le32_to_cpu(newex.ee_block));
a86c61812   Alex Tomas   [PATCH] ext3: add...
3690
  			goto out;
a86c61812   Alex Tomas   [PATCH] ext3: add...
3691
3692
3693
3694
  		}
  	}
  
  	/* find extent for this block */
e35fd6609   Theodore Ts'o   ext4: Add new abs...
3695
  	path = ext4_ext_find_extent(inode, map->m_lblk, NULL);
a86c61812   Alex Tomas   [PATCH] ext3: add...
3696
3697
3698
3699
3700
3701
3702
3703
3704
  	if (IS_ERR(path)) {
  		err = PTR_ERR(path);
  		path = NULL;
  		goto out2;
  	}
  
  	depth = ext_depth(inode);
  
  	/*
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
3705
3706
  	 * consistent leaf must not be empty;
  	 * this situation is possible, though, _during_ tree modification;
a86c61812   Alex Tomas   [PATCH] ext3: add...
3707
3708
  	 * this is why assert can't be put in ext4_ext_find_extent()
  	 */
273df556b   Frank Mayhar   ext4: Convert BUG...
3709
3710
  	if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
  		EXT4_ERROR_INODE(inode, "bad extent address "
f70f362b4   Theodore Ts'o   ext4: Avoid crash...
3711
3712
3713
  				 "lblock: %lu, depth: %d pblock %lld",
  				 (unsigned long) map->m_lblk, depth,
  				 path[depth].p_block);
034fb4c95   Surbhi Palande   ext4: replace BUG...
3714
3715
3716
  		err = -EIO;
  		goto out2;
  	}
a86c61812   Alex Tomas   [PATCH] ext3: add...
3717

7e0289766   Avantika Mathur   [PATCH] ext4: if ...
3718
3719
  	ex = path[depth].p_ext;
  	if (ex) {
725d26d3f   Aneesh Kumar K.V   ext4: Introduce e...
3720
  		ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
bf89d16f6   Theodore Ts'o   ext4: rename {ext...
3721
  		ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
a2df2a634   Amit Arora   fallocate support...
3722
  		unsigned short ee_len;
471d4011a   Suparna Bhattacharya   [PATCH] ext4: uni...
3723
3724
  
  		/*
471d4011a   Suparna Bhattacharya   [PATCH] ext4: uni...
3725
  		 * Uninitialized extents are treated as holes, except that
56055d3ae   Amit Arora   write support for...
3726
  		 * we split out initialized portions during a write.
471d4011a   Suparna Bhattacharya   [PATCH] ext4: uni...
3727
  		 */
a2df2a634   Amit Arora   fallocate support...
3728
  		ee_len = ext4_ext_get_actual_len(ex);
d8990240d   Aditya Kali   ext4: add some tr...
3729
3730
  
  		trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len);
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
3731
  		/* if found extent covers block, simply return it */
e35fd6609   Theodore Ts'o   ext4: Add new abs...
3732
  		if (in_range(map->m_lblk, ee_block, ee_len)) {
81fdbb4a8   Yongqiang Yang   ext4: move variab...
3733
  			struct ext4_map_blocks punch_map;
0aa060000   Theodore Ts'o   ext4: teach ext4_...
3734
  			ext4_fsblk_t partial_cluster = 0;
e35fd6609   Theodore Ts'o   ext4: Add new abs...
3735
  			newblock = map->m_lblk - ee_block + ee_start;
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
3736
  			/* number of remaining blocks in the extent */
e35fd6609   Theodore Ts'o   ext4: Add new abs...
3737
3738
3739
3740
  			allocated = ee_len - (map->m_lblk - ee_block);
  			ext_debug("%u fit into %u:%d -> %llu
  ", map->m_lblk,
  				  ee_block, ee_len, newblock);
56055d3ae   Amit Arora   write support for...
3741

e861304b8   Allison Henderson   ext4: add "punch ...
3742
3743
3744
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
  			if ((flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) == 0) {
  				/*
  				 * Do not put uninitialized extent
  				 * in the cache
  				 */
  				if (!ext4_ext_is_uninitialized(ex)) {
  					ext4_ext_put_in_cache(inode, ee_block,
  						ee_len, ee_start);
  					goto out;
  				}
  				ret = ext4_ext_handle_uninitialized_extents(
  					handle, inode, map, path, flags,
  					allocated, newblock);
  				return ret;
56055d3ae   Amit Arora   write support for...
3756
  			}
e861304b8   Allison Henderson   ext4: add "punch ...
3757
3758
3759
3760
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770
3771
3772
3773
3774
3775
3776
3777
3778
3779
3780
3781
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797
3798
3799
3800
3801
3802
3803
3804
3805
3806
3807
3808
3809
3810
3811
3812
3813
3814
  
  			/*
  			 * Punch out the map length, but only to the
  			 * end of the extent
  			 */
  			punched_out = allocated < map->m_len ?
  				allocated : map->m_len;
  
  			/*
  			 * Sense extents need to be converted to
  			 * uninitialized, they must fit in an
  			 * uninitialized extent
  			 */
  			if (punched_out > EXT_UNINIT_MAX_LEN)
  				punched_out = EXT_UNINIT_MAX_LEN;
  
  			punch_map.m_lblk = map->m_lblk;
  			punch_map.m_pblk = newblock;
  			punch_map.m_len = punched_out;
  			punch_map.m_flags = 0;
  
  			/* Check to see if the extent needs to be split */
  			if (punch_map.m_len != ee_len ||
  				punch_map.m_lblk != ee_block) {
  
  				ret = ext4_split_extent(handle, inode,
  				path, &punch_map, 0,
  				EXT4_GET_BLOCKS_PUNCH_OUT_EXT |
  				EXT4_GET_BLOCKS_PRE_IO);
  
  				if (ret < 0) {
  					err = ret;
  					goto out2;
  				}
  				/*
  				 * find extent for the block at
  				 * the start of the hole
  				 */
  				ext4_ext_drop_refs(path);
  				kfree(path);
  
  				path = ext4_ext_find_extent(inode,
  				map->m_lblk, NULL);
  				if (IS_ERR(path)) {
  					err = PTR_ERR(path);
  					path = NULL;
  					goto out2;
  				}
  
  				depth = ext_depth(inode);
  				ex = path[depth].p_ext;
  				ee_len = ext4_ext_get_actual_len(ex);
  				ee_block = le32_to_cpu(ex->ee_block);
  				ee_start = ext4_ext_pblock(ex);
  
  			}
  
  			ext4_ext_mark_uninitialized(ex);
f7d0d3797   Allison Henderson   ext4: punch hole ...
3815
3816
3817
  			ext4_ext_invalidate_cache(inode);
  
  			err = ext4_ext_rm_leaf(handle, inode, path,
0aa060000   Theodore Ts'o   ext4: teach ext4_...
3818
3819
  					       &partial_cluster, map->m_lblk,
  					       map->m_lblk + punched_out);
f7d0d3797   Allison Henderson   ext4: punch hole ...
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832
3833
3834
3835
3836
  
  			if (!err && path->p_hdr->eh_entries == 0) {
  				/*
  				 * Punch hole freed all of this sub tree,
  				 * so we need to correct eh_depth
  				 */
  				err = ext4_ext_get_access(handle, inode, path);
  				if (err == 0) {
  					ext_inode_hdr(inode)->eh_depth = 0;
  					ext_inode_hdr(inode)->eh_max =
  					cpu_to_le16(ext4_ext_space_root(
  						inode, 0));
  
  					err = ext4_ext_dirty(
  						handle, inode, path);
  				}
  			}
e861304b8   Allison Henderson   ext4: add "punch ...
3837
3838
  
  			goto out2;
a86c61812   Alex Tomas   [PATCH] ext3: add...
3839
3840
  		}
  	}
7b415bf60   Aditya Kali   ext4: Fix bigallo...
3841
3842
3843
  	if ((sbi->s_cluster_ratio > 1) &&
  	    ext4_find_delalloc_cluster(inode, map->m_lblk, 0))
  		map->m_flags |= EXT4_MAP_FROM_CLUSTER;
a86c61812   Alex Tomas   [PATCH] ext3: add...
3844
  	/*
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
3845
  	 * requested block isn't allocated yet;
a86c61812   Alex Tomas   [PATCH] ext3: add...
3846
3847
  	 * we couldn't try to create block if create flag is zero
  	 */
c21770573   Theodore Ts'o   ext4: Define a ne...
3848
  	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
56055d3ae   Amit Arora   write support for...
3849
3850
3851
3852
  		/*
  		 * put just found gap into cache to speed up
  		 * subsequent requests
  		 */
e35fd6609   Theodore Ts'o   ext4: Add new abs...
3853
  		ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
a86c61812   Alex Tomas   [PATCH] ext3: add...
3854
3855
  		goto out2;
  	}
4d33b1ef1   Theodore Ts'o   ext4: teach ext4_...
3856

a86c61812   Alex Tomas   [PATCH] ext3: add...
3857
  	/*
c2ea3fde6   Theodore Ts'o   ext4: Remove old ...
3858
  	 * Okay, we need to do block allocation.
63f579335   Andrew Morton   [PATCH] ext4 whit...
3859
  	 */
7b415bf60   Aditya Kali   ext4: Fix bigallo...
3860
  	map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
4d33b1ef1   Theodore Ts'o   ext4: teach ext4_...
3861
3862
3863
3864
3865
3866
3867
3868
  	newex.ee_block = cpu_to_le32(map->m_lblk);
  	cluster_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
  
  	/*
  	 * If we are doing bigalloc, check to see if the extent returned
  	 * by ext4_ext_find_extent() implies a cluster we can use.
  	 */
  	if (cluster_offset && ex &&
d8990240d   Aditya Kali   ext4: add some tr...
3869
  	    get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
4d33b1ef1   Theodore Ts'o   ext4: teach ext4_...
3870
3871
  		ar.len = allocated = map->m_len;
  		newblock = map->m_pblk;
7b415bf60   Aditya Kali   ext4: Fix bigallo...
3872
  		map->m_flags |= EXT4_MAP_FROM_CLUSTER;
4d33b1ef1   Theodore Ts'o   ext4: teach ext4_...
3873
3874
  		goto got_allocated_blocks;
  	}
a86c61812   Alex Tomas   [PATCH] ext3: add...
3875

c9de560de   Alex Tomas   ext4: Add multi b...
3876
  	/* find neighbour allocated blocks */
e35fd6609   Theodore Ts'o   ext4: Add new abs...
3877
  	ar.lleft = map->m_lblk;
c9de560de   Alex Tomas   ext4: Add multi b...
3878
3879
3880
  	err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
  	if (err)
  		goto out2;
e35fd6609   Theodore Ts'o   ext4: Add new abs...
3881
  	ar.lright = map->m_lblk;
4d33b1ef1   Theodore Ts'o   ext4: teach ext4_...
3882
3883
  	ex2 = NULL;
  	err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2);
c9de560de   Alex Tomas   ext4: Add multi b...
3884
3885
  	if (err)
  		goto out2;
25d14f983   Amit Arora   ext4: Extent over...
3886

4d33b1ef1   Theodore Ts'o   ext4: teach ext4_...
3887
3888
3889
  	/* Check if the extent after searching to the right implies a
  	 * cluster we can use. */
  	if ((sbi->s_cluster_ratio > 1) && ex2 &&
d8990240d   Aditya Kali   ext4: add some tr...
3890
  	    get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) {
4d33b1ef1   Theodore Ts'o   ext4: teach ext4_...
3891
3892
  		ar.len = allocated = map->m_len;
  		newblock = map->m_pblk;
7b415bf60   Aditya Kali   ext4: Fix bigallo...
3893
  		map->m_flags |= EXT4_MAP_FROM_CLUSTER;
4d33b1ef1   Theodore Ts'o   ext4: teach ext4_...
3894
3895
  		goto got_allocated_blocks;
  	}
749269fac   Amit Arora   Change on-disk fo...
3896
3897
3898
3899
3900
3901
  	/*
  	 * See if request is beyond maximum number of blocks we can have in
  	 * a single extent. For an initialized extent this limit is
  	 * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
  	 * EXT_UNINIT_MAX_LEN.
  	 */
e35fd6609   Theodore Ts'o   ext4: Add new abs...
3902
  	if (map->m_len > EXT_INIT_MAX_LEN &&
c21770573   Theodore Ts'o   ext4: Define a ne...
3903
  	    !(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
e35fd6609   Theodore Ts'o   ext4: Add new abs...
3904
3905
  		map->m_len = EXT_INIT_MAX_LEN;
  	else if (map->m_len > EXT_UNINIT_MAX_LEN &&
c21770573   Theodore Ts'o   ext4: Define a ne...
3906
  		 (flags & EXT4_GET_BLOCKS_UNINIT_EXT))
e35fd6609   Theodore Ts'o   ext4: Add new abs...
3907
  		map->m_len = EXT_UNINIT_MAX_LEN;
749269fac   Amit Arora   Change on-disk fo...
3908

e35fd6609   Theodore Ts'o   ext4: Add new abs...
3909
  	/* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
e35fd6609   Theodore Ts'o   ext4: Add new abs...
3910
  	newex.ee_len = cpu_to_le16(map->m_len);
4d33b1ef1   Theodore Ts'o   ext4: teach ext4_...
3911
  	err = ext4_ext_check_overlap(sbi, inode, &newex, path);
25d14f983   Amit Arora   ext4: Extent over...
3912
  	if (err)
b939e3766   Aneesh Kumar K.V   ext4: Use the ext...
3913
  		allocated = ext4_ext_get_actual_len(&newex);
25d14f983   Amit Arora   ext4: Extent over...
3914
  	else
e35fd6609   Theodore Ts'o   ext4: Add new abs...
3915
  		allocated = map->m_len;
c9de560de   Alex Tomas   ext4: Add multi b...
3916
3917
3918
  
  	/* allocate new block */
  	ar.inode = inode;
e35fd6609   Theodore Ts'o   ext4: Add new abs...
3919
3920
  	ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
  	ar.logical = map->m_lblk;
4d33b1ef1   Theodore Ts'o   ext4: teach ext4_...
3921
3922
3923
3924
3925
3926
3927
3928
3929
3930
3931
3932
  	/*
  	 * We calculate the offset from the beginning of the cluster
  	 * for the logical block number, since when we allocate a
  	 * physical cluster, the physical block should start at the
  	 * same offset from the beginning of the cluster.  This is
  	 * needed so that future calls to get_implied_cluster_alloc()
  	 * work correctly.
  	 */
  	offset = map->m_lblk & (sbi->s_cluster_ratio - 1);
  	ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
  	ar.goal -= offset;
  	ar.logical -= offset;
c9de560de   Alex Tomas   ext4: Add multi b...
3933
3934
3935
3936
3937
  	if (S_ISREG(inode->i_mode))
  		ar.flags = EXT4_MB_HINT_DATA;
  	else
  		/* disable in-core preallocation for non-regular files */
  		ar.flags = 0;
556b27abf   Vivek Haldar   ext4: do not norm...
3938
3939
  	if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
  		ar.flags |= EXT4_MB_HINT_NOPREALLOC;
c9de560de   Alex Tomas   ext4: Add multi b...
3940
  	newblock = ext4_mb_new_blocks(handle, &ar, &err);
a86c61812   Alex Tomas   [PATCH] ext3: add...
3941
3942
  	if (!newblock)
  		goto out2;
84fe3bef5   Mingming   ext4: Compile war...
3943
3944
  	ext_debug("allocate new block: goal %llu, found %llu/%u
  ",
498e5f241   Theodore Ts'o   ext4: Change unsi...
3945
  		  ar.goal, newblock, allocated);
4d33b1ef1   Theodore Ts'o   ext4: teach ext4_...
3946
  	free_on_err = 1;
7b415bf60   Aditya Kali   ext4: Fix bigallo...
3947
  	allocated_clusters = ar.len;
4d33b1ef1   Theodore Ts'o   ext4: teach ext4_...
3948
3949
3950
  	ar.len = EXT4_C2B(sbi, ar.len) - offset;
  	if (ar.len > allocated)
  		ar.len = allocated;
a86c61812   Alex Tomas   [PATCH] ext3: add...
3951

4d33b1ef1   Theodore Ts'o   ext4: teach ext4_...
3952
  got_allocated_blocks:
a86c61812   Alex Tomas   [PATCH] ext3: add...
3953
  	/* try to insert new extent into found leaf and return */
4d33b1ef1   Theodore Ts'o   ext4: teach ext4_...
3954
  	ext4_ext_store_pblock(&newex, newblock + offset);
c9de560de   Alex Tomas   ext4: Add multi b...
3955
  	newex.ee_len = cpu_to_le16(ar.len);
8d5d02e6b   Mingming Cao   ext4: async direc...
3956
3957
  	/* Mark uninitialized */
  	if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){
a2df2a634   Amit Arora   fallocate support...
3958
  		ext4_ext_mark_uninitialized(&newex);
8d5d02e6b   Mingming Cao   ext4: async direc...
3959
  		/*
744692dc0   Jiaying Zhang   ext4: use ext4_ge...
3960
  		 * io_end structure was created for every IO write to an
25985edce   Lucas De Marchi   Fix common misspe...
3961
  		 * uninitialized extent. To avoid unnecessary conversion,
744692dc0   Jiaying Zhang   ext4: use ext4_ge...
3962
  		 * here we flag the IO that really needs the conversion.
5f5249507   Mingming   ext4: skip conver...
3963
  		 * For non asycn direct IO case, flag the inode state
25985edce   Lucas De Marchi   Fix common misspe...
3964
  		 * that we need to perform conversion when IO is done.
8d5d02e6b   Mingming Cao   ext4: async direc...
3965
  		 */
744692dc0   Jiaying Zhang   ext4: use ext4_ge...
3966
  		if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
0edeb71dc   Tao Ma   ext4: Create help...
3967
3968
3969
  			if (io)
  				ext4_set_io_unwritten_flag(inode, io);
  			else
19f5fb7ad   Theodore Ts'o   ext4: Use bitops ...
3970
3971
  				ext4_set_inode_state(inode,
  						     EXT4_STATE_DIO_UNWRITTEN);
5f5249507   Mingming   ext4: skip conver...
3972
  		}
744692dc0   Jiaying Zhang   ext4: use ext4_ge...
3973
  		if (ext4_should_dioread_nolock(inode))
e35fd6609   Theodore Ts'o   ext4: Add new abs...
3974
  			map->m_flags |= EXT4_MAP_UNINIT;
8d5d02e6b   Mingming Cao   ext4: async direc...
3975
  	}
c8d46e41b   Jiaying Zhang   ext4: Add flag to...
3976

a4e5d88b1   Dmitry Monakhov   ext4: update EOFB...
3977
3978
3979
3980
  	err = 0;
  	if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0)
  		err = check_eofblocks_fl(handle, inode, map->m_lblk,
  					 path, ar.len);
575a1d4bd   Jiaying Zhang   ext4: free alloca...
3981
3982
3983
  	if (!err)
  		err = ext4_ext_insert_extent(handle, inode, path,
  					     &newex, flags);
4d33b1ef1   Theodore Ts'o   ext4: teach ext4_...
3984
  	if (err && free_on_err) {
7132de744   Maxim Patlasov   ext4: fix i_block...
3985
3986
  		int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ?
  			EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0;
315054f02   Alex Tomas   When ext4_ext_ins...
3987
  		/* free data blocks we just allocated */
c9de560de   Alex Tomas   ext4: Add multi b...
3988
3989
  		/* not a good idea to call discard here directly,
  		 * but otherwise we'd need to call it every free() */
c2ea3fde6   Theodore Ts'o   ext4: Remove old ...
3990
  		ext4_discard_preallocations(inode);
7dc576158   Peter Huewe   ext4: Fix sparse ...
3991
  		ext4_free_blocks(handle, inode, NULL, ext4_ext_pblock(&newex),
7132de744   Maxim Patlasov   ext4: fix i_block...
3992
  				 ext4_ext_get_actual_len(&newex), fb_flags);
a86c61812   Alex Tomas   [PATCH] ext3: add...
3993
  		goto out2;
315054f02   Alex Tomas   When ext4_ext_ins...
3994
  	}
a86c61812   Alex Tomas   [PATCH] ext3: add...
3995

a86c61812   Alex Tomas   [PATCH] ext3: add...
3996
  	/* previous routine could use block we allocated */
bf89d16f6   Theodore Ts'o   ext4: rename {ext...
3997
  	newblock = ext4_ext_pblock(&newex);
b939e3766   Aneesh Kumar K.V   ext4: Use the ext...
3998
  	allocated = ext4_ext_get_actual_len(&newex);
e35fd6609   Theodore Ts'o   ext4: Add new abs...
3999
4000
4001
  	if (allocated > map->m_len)
  		allocated = map->m_len;
  	map->m_flags |= EXT4_MAP_NEW;
a86c61812   Alex Tomas   [PATCH] ext3: add...
4002

b436b9bef   Jan Kara   ext4: Wait for pr...
4003
  	/*
5f634d064   Aneesh Kumar K.V   ext4: Fix quota a...
4004
4005
4006
  	 * Update reserved blocks/metadata blocks after successful
  	 * block allocation which had been deferred till now.
  	 */
7b415bf60   Aditya Kali   ext4: Fix bigallo...
4007
  	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
81fdbb4a8   Yongqiang Yang   ext4: move variab...
4008
  		unsigned int reserved_clusters;
7b415bf60   Aditya Kali   ext4: Fix bigallo...
4009
  		/*
81fdbb4a8   Yongqiang Yang   ext4: move variab...
4010
  		 * Check how many clusters we had reserved this allocated range
7b415bf60   Aditya Kali   ext4: Fix bigallo...
4011
4012
4013
4014
4015
4016
4017
4018
4019
4020
4021
4022
4023
4024
4025
4026
4027
4028
4029
4030
4031
  		 */
  		reserved_clusters = get_reserved_cluster_alloc(inode,
  						map->m_lblk, allocated);
  		if (map->m_flags & EXT4_MAP_FROM_CLUSTER) {
  			if (reserved_clusters) {
  				/*
  				 * We have clusters reserved for this range.
  				 * But since we are not doing actual allocation
  				 * and are simply using blocks from previously
  				 * allocated cluster, we should release the
  				 * reservation and not claim quota.
  				 */
  				ext4_da_update_reserve_space(inode,
  						reserved_clusters, 0);
  			}
  		} else {
  			BUG_ON(allocated_clusters < reserved_clusters);
  			/* We will claim quota for all newly allocated blocks.*/
  			ext4_da_update_reserve_space(inode, allocated_clusters,
  							1);
  			if (reserved_clusters < allocated_clusters) {
5356f2615   Aditya Kali   ext4: attempt to ...
4032
  				struct ext4_inode_info *ei = EXT4_I(inode);
7b415bf60   Aditya Kali   ext4: Fix bigallo...
4033
4034
4035
4036
4037
4038
4039
4040
4041
4042
4043
4044
4045
4046
4047
4048
4049
4050
4051
4052
4053
4054
4055
4056
4057
4058
4059
4060
4061
4062
4063
4064
4065
4066
4067
4068
4069
4070
4071
4072
4073
4074
  				int reservation = allocated_clusters -
  						  reserved_clusters;
  				/*
  				 * It seems we claimed few clusters outside of
  				 * the range of this allocation. We should give
  				 * it back to the reservation pool. This can
  				 * happen in the following case:
  				 *
  				 * * Suppose s_cluster_ratio is 4 (i.e., each
  				 *   cluster has 4 blocks. Thus, the clusters
  				 *   are [0-3],[4-7],[8-11]...
  				 * * First comes delayed allocation write for
  				 *   logical blocks 10 & 11. Since there were no
  				 *   previous delayed allocated blocks in the
  				 *   range [8-11], we would reserve 1 cluster
  				 *   for this write.
  				 * * Next comes write for logical blocks 3 to 8.
  				 *   In this case, we will reserve 2 clusters
  				 *   (for [0-3] and [4-7]; and not for [8-11] as
  				 *   that range has a delayed allocated blocks.
  				 *   Thus total reserved clusters now becomes 3.
  				 * * Now, during the delayed allocation writeout
  				 *   time, we will first write blocks [3-8] and
  				 *   allocate 3 clusters for writing these
  				 *   blocks. Also, we would claim all these
  				 *   three clusters above.
  				 * * Now when we come here to writeout the
  				 *   blocks [10-11], we would expect to claim
  				 *   the reservation of 1 cluster we had made
  				 *   (and we would claim it since there are no
  				 *   more delayed allocated blocks in the range
  				 *   [8-11]. But our reserved cluster count had
  				 *   already gone to 0.
  				 *
  				 *   Thus, at the step 4 above when we determine
  				 *   that there are still some unwritten delayed
  				 *   allocated blocks outside of our current
  				 *   block range, we should increment the
  				 *   reserved clusters count so that when the
  				 *   remaining blocks finally gets written, we
  				 *   could claim them.
  				 */
5356f2615   Aditya Kali   ext4: attempt to ...
4075
4076
4077
4078
4079
  				dquot_reserve_block(inode,
  						EXT4_C2B(sbi, reservation));
  				spin_lock(&ei->i_block_reservation_lock);
  				ei->i_reserved_data_blocks += reservation;
  				spin_unlock(&ei->i_block_reservation_lock);
7b415bf60   Aditya Kali   ext4: Fix bigallo...
4080
4081
4082
  			}
  		}
  	}
5f634d064   Aneesh Kumar K.V   ext4: Fix quota a...
4083
4084
  
  	/*
b436b9bef   Jan Kara   ext4: Wait for pr...
4085
4086
4087
4088
  	 * Cache the extent and update transaction to commit on fdatasync only
  	 * when it is _not_ an uninitialized extent.
  	 */
  	if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
b05e6ae58   Theodore Ts'o   ext4: drop ec_typ...
4089
  		ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock);
b436b9bef   Jan Kara   ext4: Wait for pr...
4090
4091
4092
  		ext4_update_inode_fsync_trans(handle, inode, 1);
  	} else
  		ext4_update_inode_fsync_trans(handle, inode, 0);
a86c61812   Alex Tomas   [PATCH] ext3: add...
4093
  out:
e35fd6609   Theodore Ts'o   ext4: Add new abs...
4094
4095
  	if (allocated > map->m_len)
  		allocated = map->m_len;
a86c61812   Alex Tomas   [PATCH] ext3: add...
4096
  	ext4_ext_show_leaf(inode, path);
e35fd6609   Theodore Ts'o   ext4: Add new abs...
4097
4098
4099
  	map->m_flags |= EXT4_MAP_MAPPED;
  	map->m_pblk = newblock;
  	map->m_len = allocated;
a86c61812   Alex Tomas   [PATCH] ext3: add...
4100
4101
4102
4103
4104
  out2:
  	if (path) {
  		ext4_ext_drop_refs(path);
  		kfree(path);
  	}
e861304b8   Allison Henderson   ext4: add "punch ...
4105
4106
  	result = (flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) ?
  			punched_out : allocated;
e7b319e39   Yongqiang Yang   ext4: trace punch...
4107
4108
  	trace_ext4_ext_map_blocks_exit(inode, map->m_lblk,
  		newblock, map->m_len, err ? err : result);
e861304b8   Allison Henderson   ext4: add "punch ...
4109
  	return err ? err : result;
a86c61812   Alex Tomas   [PATCH] ext3: add...
4110
  }
cf108bca4   Jan Kara   ext4: Invert the ...
4111
  void ext4_ext_truncate(struct inode *inode)
a86c61812   Alex Tomas   [PATCH] ext3: add...
4112
4113
4114
  {
  	struct address_space *mapping = inode->i_mapping;
  	struct super_block *sb = inode->i_sb;
725d26d3f   Aneesh Kumar K.V   ext4: Introduce e...
4115
  	ext4_lblk_t last_block;
a86c61812   Alex Tomas   [PATCH] ext3: add...
4116
  	handle_t *handle;
189e868fa   Allison Henderson   ext4: fix fsx tru...
4117
  	loff_t page_len;
a86c61812   Alex Tomas   [PATCH] ext3: add...
4118
4119
4120
  	int err = 0;
  
  	/*
3889fd57e   Jiaying Zhang   ext4: flush the i...
4121
4122
4123
4124
4125
4126
  	 * finish any pending end_io work so we won't run the risk of
  	 * converting any truncated blocks to initialized later
  	 */
  	ext4_flush_completed_IO(inode);
  
  	/*
a86c61812   Alex Tomas   [PATCH] ext3: add...
4127
4128
  	 * probably first extent we're gonna free will be last in block
  	 */
f3bd1f3fa   Mingming Cao   ext4: journal cre...
4129
  	err = ext4_writepage_trans_blocks(inode);
a86c61812   Alex Tomas   [PATCH] ext3: add...
4130
  	handle = ext4_journal_start(inode, err);
cf108bca4   Jan Kara   ext4: Invert the ...
4131
  	if (IS_ERR(handle))
a86c61812   Alex Tomas   [PATCH] ext3: add...
4132
  		return;
a86c61812   Alex Tomas   [PATCH] ext3: add...
4133

189e868fa   Allison Henderson   ext4: fix fsx tru...
4134
4135
4136
4137
4138
4139
4140
4141
4142
4143
  	if (inode->i_size % PAGE_CACHE_SIZE != 0) {
  		page_len = PAGE_CACHE_SIZE -
  			(inode->i_size & (PAGE_CACHE_SIZE - 1));
  
  		err = ext4_discard_partial_page_buffers(handle,
  			mapping, inode->i_size, page_len, 0);
  
  		if (err)
  			goto out_stop;
  	}
a86c61812   Alex Tomas   [PATCH] ext3: add...
4144

9ddfc3dc7   Jan Kara   ext4: Fix lock in...
4145
4146
  	if (ext4_orphan_add(handle, inode))
  		goto out_stop;
0e855ac8b   Aneesh Kumar K.V   ext4: Convert tru...
4147
  	down_write(&EXT4_I(inode)->i_data_sem);
a86c61812   Alex Tomas   [PATCH] ext3: add...
4148
  	ext4_ext_invalidate_cache(inode);
c2ea3fde6   Theodore Ts'o   ext4: Remove old ...
4149
  	ext4_discard_preallocations(inode);
c9de560de   Alex Tomas   ext4: Add multi b...
4150

a86c61812   Alex Tomas   [PATCH] ext3: add...
4151
  	/*
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
4152
4153
4154
  	 * TODO: optimization is possible here.
  	 * Probably we need not scan at all,
  	 * because page truncation is enough.
a86c61812   Alex Tomas   [PATCH] ext3: add...
4155
  	 */
a86c61812   Alex Tomas   [PATCH] ext3: add...
4156
4157
4158
4159
4160
4161
4162
  
  	/* we have to know where to truncate from in crash case */
  	EXT4_I(inode)->i_disksize = inode->i_size;
  	ext4_mark_inode_dirty(handle, inode);
  
  	last_block = (inode->i_size + sb->s_blocksize - 1)
  			>> EXT4_BLOCK_SIZE_BITS(sb);
c6a0371cb   Allison Henderson   ext4: remove unne...
4163
  	err = ext4_ext_remove_space(inode, last_block);
a86c61812   Alex Tomas   [PATCH] ext3: add...
4164
4165
  
  	/* In a multi-transaction truncate, we only make the final
56055d3ae   Amit Arora   write support for...
4166
4167
  	 * transaction synchronous.
  	 */
a86c61812   Alex Tomas   [PATCH] ext3: add...
4168
  	if (IS_SYNC(inode))
0390131ba   Frank Mayhar   ext4: Allow ext4 ...
4169
  		ext4_handle_sync(handle);
a86c61812   Alex Tomas   [PATCH] ext3: add...
4170

9ddfc3dc7   Jan Kara   ext4: Fix lock in...
4171
  	up_write(&EXT4_I(inode)->i_data_sem);
f6d2f6b32   Eric Gouriou   ext4: fix unbalan...
4172
4173
  
  out_stop:
a86c61812   Alex Tomas   [PATCH] ext3: add...
4174
  	/*
d0d856e8b   Randy Dunlap   [PATCH] ext4: cle...
4175
  	 * If this was a simple ftruncate() and the file will remain alive,
a86c61812   Alex Tomas   [PATCH] ext3: add...
4176
4177
4178
4179
4180
4181
4182
  	 * then we need to clear up the orphan record which we created above.
  	 * However, if this was a real unlink then we were called by
  	 * ext4_delete_inode(), and we allow that function to clean up the
  	 * orphan info for us.
  	 */
  	if (inode->i_nlink)
  		ext4_orphan_del(handle, inode);
ef7377289   Solofo Ramangalahy   ext4: update ctim...
4183
4184
  	inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
  	ext4_mark_inode_dirty(handle, inode);
a86c61812   Alex Tomas   [PATCH] ext3: add...
4185
4186
  	ext4_journal_stop(handle);
  }
fd28784ad   Aneesh Kumar K.V   ext4: Fix falloca...
4187
4188
4189
4190
4191
4192
4193
4194
4195
4196
4197
4198
4199
4200
  static void ext4_falloc_update_inode(struct inode *inode,
  				int mode, loff_t new_size, int update_ctime)
  {
  	struct timespec now;
  
  	if (update_ctime) {
  		now = current_fs_time(inode->i_sb);
  		if (!timespec_equal(&inode->i_ctime, &now))
  			inode->i_ctime = now;
  	}
  	/*
  	 * Update only when preallocation was requested beyond
  	 * the file size.
  	 */
cf17fea65   Aneesh Kumar K.V   ext4: Properly up...
4201
4202
4203
4204
4205
  	if (!(mode & FALLOC_FL_KEEP_SIZE)) {
  		if (new_size > i_size_read(inode))
  			i_size_write(inode, new_size);
  		if (new_size > EXT4_I(inode)->i_disksize)
  			ext4_update_i_disksize(inode, new_size);
c8d46e41b   Jiaying Zhang   ext4: Add flag to...
4206
4207
4208
4209
4210
4211
  	} else {
  		/*
  		 * Mark that we allocate beyond EOF so the subsequent truncate
  		 * can proceed even if the new size is the same as i_size.
  		 */
  		if (new_size > i_size_read(inode))
12e9b8920   Dmitry Monakhov   ext4: Use bitops ...
4212
  			ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
fd28784ad   Aneesh Kumar K.V   ext4: Fix falloca...
4213
4214
4215
  	}
  
  }
a2df2a634   Amit Arora   fallocate support...
4216
  /*
2fe17c107   Christoph Hellwig   fallocate should ...
4217
   * preallocate space for a file. This implements ext4's fallocate file
a2df2a634   Amit Arora   fallocate support...
4218
4219
4220
4221
4222
   * operation, which gets called from sys_fallocate system call.
   * For block-mapped files, posix_fallocate should fall back to the method
   * of writing zeroes to the required new blocks (the same behavior which is
   * expected for file systems which do not support fallocate() system call).
   */
2fe17c107   Christoph Hellwig   fallocate should ...
4223
  long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
a2df2a634   Amit Arora   fallocate support...
4224
  {
2fe17c107   Christoph Hellwig   fallocate should ...
4225
  	struct inode *inode = file->f_path.dentry->d_inode;
a2df2a634   Amit Arora   fallocate support...
4226
  	handle_t *handle;
fd28784ad   Aneesh Kumar K.V   ext4: Fix falloca...
4227
  	loff_t new_size;
498e5f241   Theodore Ts'o   ext4: Change unsi...
4228
  	unsigned int max_blocks;
a2df2a634   Amit Arora   fallocate support...
4229
4230
4231
  	int ret = 0;
  	int ret2 = 0;
  	int retries = 0;
a4e5d88b1   Dmitry Monakhov   ext4: update EOFB...
4232
  	int flags;
2ed886852   Theodore Ts'o   ext4: Convert cal...
4233
  	struct ext4_map_blocks map;
a2df2a634   Amit Arora   fallocate support...
4234
4235
4236
4237
4238
4239
  	unsigned int credits, blkbits = inode->i_blkbits;
  
  	/*
  	 * currently supporting (pre)allocate mode for extent-based
  	 * files _only_
  	 */
12e9b8920   Dmitry Monakhov   ext4: Use bitops ...
4240
  	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
a2df2a634   Amit Arora   fallocate support...
4241
  		return -EOPNOTSUPP;
a4bb6b64e   Allison Henderson   ext4: enable "pun...
4242
4243
4244
4245
4246
4247
  	/* Return error if mode is not supported */
  	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
  		return -EOPNOTSUPP;
  
  	if (mode & FALLOC_FL_PUNCH_HOLE)
  		return ext4_punch_hole(file, offset, len);
0562e0bad   Jiaying Zhang   ext4: add more tr...
4248
  	trace_ext4_fallocate_enter(inode, offset, len, mode);
2ed886852   Theodore Ts'o   ext4: Convert cal...
4249
  	map.m_lblk = offset >> blkbits;
fd28784ad   Aneesh Kumar K.V   ext4: Fix falloca...
4250
4251
4252
4253
  	/*
  	 * We can't just convert len to max_blocks because
  	 * If blocksize = 4096 offset = 3072 and len = 2048
  	 */
a2df2a634   Amit Arora   fallocate support...
4254
  	max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
2ed886852   Theodore Ts'o   ext4: Convert cal...
4255
  		- map.m_lblk;
a2df2a634   Amit Arora   fallocate support...
4256
  	/*
f3bd1f3fa   Mingming Cao   ext4: journal cre...
4257
  	 * credits to insert 1 extent into extent tree
a2df2a634   Amit Arora   fallocate support...
4258
  	 */
f3bd1f3fa   Mingming Cao   ext4: journal cre...
4259
  	credits = ext4_chunk_trans_blocks(inode, max_blocks);
55bd725aa   Aneesh Kumar K.V   ext4: Fix locking...
4260
  	mutex_lock(&inode->i_mutex);
6d19c42b7   Nikanth Karthikesan   ext4: Prevent cre...
4261
4262
4263
  	ret = inode_newsize_ok(inode, (len + offset));
  	if (ret) {
  		mutex_unlock(&inode->i_mutex);
0562e0bad   Jiaying Zhang   ext4: add more tr...
4264
  		trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
6d19c42b7   Nikanth Karthikesan   ext4: Prevent cre...
4265
4266
  		return ret;
  	}
3c6fe7701   Greg Harm   ext4: Don't norma...
4267
  	flags = EXT4_GET_BLOCKS_CREATE_UNINIT_EXT;
a4e5d88b1   Dmitry Monakhov   ext4: update EOFB...
4268
4269
  	if (mode & FALLOC_FL_KEEP_SIZE)
  		flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
3c6fe7701   Greg Harm   ext4: Don't norma...
4270
4271
4272
4273
4274
4275
4276
  	/*
  	 * Don't normalize the request if it can fit in one extent so
  	 * that it doesn't get unnecessarily split into multiple
  	 * extents.
  	 */
  	if (len <= EXT_UNINIT_MAX_LEN << blkbits)
  		flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
a2df2a634   Amit Arora   fallocate support...
4277
4278
  retry:
  	while (ret >= 0 && ret < max_blocks) {
2ed886852   Theodore Ts'o   ext4: Convert cal...
4279
4280
  		map.m_lblk = map.m_lblk + ret;
  		map.m_len = max_blocks = max_blocks - ret;
a2df2a634   Amit Arora   fallocate support...
4281
4282
4283
4284
4285
  		handle = ext4_journal_start(inode, credits);
  		if (IS_ERR(handle)) {
  			ret = PTR_ERR(handle);
  			break;
  		}
a4e5d88b1   Dmitry Monakhov   ext4: update EOFB...
4286
  		ret = ext4_map_blocks(handle, inode, &map, flags);
221879c92   Aneesh Kumar K.V   ext4: Check for t...
4287
  		if (ret <= 0) {
2c98615d3   Aneesh Kumar K.V   ext4: Don't mark ...
4288
4289
  #ifdef EXT4FS_DEBUG
  			WARN_ON(ret <= 0);
e35fd6609   Theodore Ts'o   ext4: Add new abs...
4290
  			printk(KERN_ERR "%s: ext4_ext_map_blocks "
2c98615d3   Aneesh Kumar K.V   ext4: Don't mark ...
4291
  				    "returned error inode#%lu, block=%u, "
9fd9784c9   Thadeu Lima de Souza Cascardo   ext4: Fix buildin...
4292
  				    "max_blocks=%u", __func__,
a6371b636   Kazuya Mio   ext4: fix compile...
4293
  				    inode->i_ino, map.m_lblk, max_blocks);
2c98615d3   Aneesh Kumar K.V   ext4: Don't mark ...
4294
  #endif
a2df2a634   Amit Arora   fallocate support...
4295
4296
4297
4298
  			ext4_mark_inode_dirty(handle, inode);
  			ret2 = ext4_journal_stop(handle);
  			break;
  		}
2ed886852   Theodore Ts'o   ext4: Convert cal...
4299
  		if ((map.m_lblk + ret) >= (EXT4_BLOCK_ALIGN(offset + len,
fd28784ad   Aneesh Kumar K.V   ext4: Fix falloca...
4300
4301
4302
  						blkbits) >> blkbits))
  			new_size = offset + len;
  		else
29ae07b70   Utako Kusaka   ext4: Fix overflo...
4303
  			new_size = ((loff_t) map.m_lblk + ret) << blkbits;
a2df2a634   Amit Arora   fallocate support...
4304

fd28784ad   Aneesh Kumar K.V   ext4: Fix falloca...
4305
  		ext4_falloc_update_inode(inode, mode, new_size,
2ed886852   Theodore Ts'o   ext4: Convert cal...
4306
  					 (map.m_flags & EXT4_MAP_NEW));
a2df2a634   Amit Arora   fallocate support...
4307
4308
4309
4310
4311
  		ext4_mark_inode_dirty(handle, inode);
  		ret2 = ext4_journal_stop(handle);
  		if (ret2)
  			break;
  	}
fd28784ad   Aneesh Kumar K.V   ext4: Fix falloca...
4312
4313
4314
  	if (ret == -ENOSPC &&
  			ext4_should_retry_alloc(inode->i_sb, &retries)) {
  		ret = 0;
a2df2a634   Amit Arora   fallocate support...
4315
  		goto retry;
a2df2a634   Amit Arora   fallocate support...
4316
  	}
55bd725aa   Aneesh Kumar K.V   ext4: Fix locking...
4317
  	mutex_unlock(&inode->i_mutex);
0562e0bad   Jiaying Zhang   ext4: add more tr...
4318
4319
  	trace_ext4_fallocate_exit(inode, offset, max_blocks,
  				ret > 0 ? ret2 : ret);
a2df2a634   Amit Arora   fallocate support...
4320
4321
  	return ret > 0 ? ret2 : ret;
  }
6873fa0de   Eric Sandeen   Hook ext4 to the ...
4322
4323
  
  /*
0031462b5   Mingming Cao   ext4: Split unini...
4324
4325
4326
4327
4328
4329
4330
   * This function convert a range of blocks to written extents
   * The caller of this function will pass the start offset and the size.
   * all unwritten extents within this range will be converted to
   * written extents.
   *
   * This function is called from the direct IO end io call back
   * function, to convert the fallocated extents after IO is completed.
109f55651   Mingming   ext4: fix ext4_ex...
4331
   * Returns 0 on success.
0031462b5   Mingming Cao   ext4: Split unini...
4332
4333
   */
  int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
a1de02dcc   Eric Sandeen   ext4: fix async i...
4334
  				    ssize_t len)
0031462b5   Mingming Cao   ext4: Split unini...
4335
4336
  {
  	handle_t *handle;
0031462b5   Mingming Cao   ext4: Split unini...
4337
4338
4339
  	unsigned int max_blocks;
  	int ret = 0;
  	int ret2 = 0;
2ed886852   Theodore Ts'o   ext4: Convert cal...
4340
  	struct ext4_map_blocks map;
0031462b5   Mingming Cao   ext4: Split unini...
4341
  	unsigned int credits, blkbits = inode->i_blkbits;
2ed886852   Theodore Ts'o   ext4: Convert cal...
4342
  	map.m_lblk = offset >> blkbits;
0031462b5   Mingming Cao   ext4: Split unini...
4343
4344
4345
4346
  	/*
  	 * We can't just convert len to max_blocks because
  	 * If blocksize = 4096 offset = 3072 and len = 2048
  	 */
2ed886852   Theodore Ts'o   ext4: Convert cal...
4347
4348
  	max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) -
  		      map.m_lblk);
0031462b5   Mingming Cao   ext4: Split unini...
4349
4350
4351
4352
4353
  	/*
  	 * credits to insert 1 extent into extent tree
  	 */
  	credits = ext4_chunk_trans_blocks(inode, max_blocks);
  	while (ret >= 0 && ret < max_blocks) {
2ed886852   Theodore Ts'o   ext4: Convert cal...
4354
4355
  		map.m_lblk += ret;
  		map.m_len = (max_blocks -= ret);
0031462b5   Mingming Cao   ext4: Split unini...
4356
4357
4358
4359
4360
  		handle = ext4_journal_start(inode, credits);
  		if (IS_ERR(handle)) {
  			ret = PTR_ERR(handle);
  			break;
  		}
2ed886852   Theodore Ts'o   ext4: Convert cal...
4361
  		ret = ext4_map_blocks(handle, inode, &map,
c7064ef13   Jiaying Zhang   ext4: mechanical ...
4362
  				      EXT4_GET_BLOCKS_IO_CONVERT_EXT);
0031462b5   Mingming Cao   ext4: Split unini...
4363
4364
  		if (ret <= 0) {
  			WARN_ON(ret <= 0);
e35fd6609   Theodore Ts'o   ext4: Add new abs...
4365
  			printk(KERN_ERR "%s: ext4_ext_map_blocks "
0031462b5   Mingming Cao   ext4: Split unini...
4366
4367
  				    "returned error inode#%lu, block=%u, "
  				    "max_blocks=%u", __func__,
2ed886852   Theodore Ts'o   ext4: Convert cal...
4368
  				    inode->i_ino, map.m_lblk, map.m_len);
0031462b5   Mingming Cao   ext4: Split unini...
4369
4370
4371
4372
4373
4374
4375
4376
  		}
  		ext4_mark_inode_dirty(handle, inode);
  		ret2 = ext4_journal_stop(handle);
  		if (ret <= 0 || ret2 )
  			break;
  	}
  	return ret > 0 ? ret2 : ret;
  }
6d9c85eb7   Yongqiang Yang   ext4: make FIEMAP...
4377

0031462b5   Mingming Cao   ext4: Split unini...
4378
  /*
6873fa0de   Eric Sandeen   Hook ext4 to the ...
4379
4380
   * Callback function called for each extent to gather FIEMAP information.
   */
c03f8aa9a   Lukas Czerner   ext4: use FIEMAP_...
4381
  static int ext4_ext_fiemap_cb(struct inode *inode, ext4_lblk_t next,
6873fa0de   Eric Sandeen   Hook ext4 to the ...
4382
4383
4384
  		       struct ext4_ext_cache *newex, struct ext4_extent *ex,
  		       void *data)
  {
6873fa0de   Eric Sandeen   Hook ext4 to the ...
4385
4386
4387
4388
  	__u64	logical;
  	__u64	physical;
  	__u64	length;
  	__u32	flags = 0;
6d9c85eb7   Yongqiang Yang   ext4: make FIEMAP...
4389
4390
4391
  	int		ret = 0;
  	struct fiemap_extent_info *fieinfo = data;
  	unsigned char blksize_bits;
6873fa0de   Eric Sandeen   Hook ext4 to the ...
4392

6d9c85eb7   Yongqiang Yang   ext4: make FIEMAP...
4393
4394
  	blksize_bits = inode->i_sb->s_blocksize_bits;
  	logical = (__u64)newex->ec_block << blksize_bits;
6873fa0de   Eric Sandeen   Hook ext4 to the ...
4395

b05e6ae58   Theodore Ts'o   ext4: drop ec_typ...
4396
  	if (newex->ec_start == 0) {
6d9c85eb7   Yongqiang Yang   ext4: make FIEMAP...
4397
4398
4399
4400
4401
4402
4403
4404
4405
4406
4407
4408
4409
4410
4411
4412
4413
4414
  		/*
  		 * No extent in extent-tree contains block @newex->ec_start,
  		 * then the block may stay in 1)a hole or 2)delayed-extent.
  		 *
  		 * Holes or delayed-extents are processed as follows.
  		 * 1. lookup dirty pages with specified range in pagecache.
  		 *    If no page is got, then there is no delayed-extent and
  		 *    return with EXT_CONTINUE.
  		 * 2. find the 1st mapped buffer,
  		 * 3. check if the mapped buffer is both in the request range
  		 *    and a delayed buffer. If not, there is no delayed-extent,
  		 *    then return.
  		 * 4. a delayed-extent is found, the extent will be collected.
  		 */
  		ext4_lblk_t	end = 0;
  		pgoff_t		last_offset;
  		pgoff_t		offset;
  		pgoff_t		index;
b221349fa   Yongqiang Yang   ext4: fix ext4_ex...
4415
  		pgoff_t		start_index = 0;
6d9c85eb7   Yongqiang Yang   ext4: make FIEMAP...
4416
  		struct page	**pages = NULL;
6873fa0de   Eric Sandeen   Hook ext4 to the ...
4417
  		struct buffer_head *bh = NULL;
6d9c85eb7   Yongqiang Yang   ext4: make FIEMAP...
4418
4419
4420
4421
4422
4423
  		struct buffer_head *head = NULL;
  		unsigned int nr_pages = PAGE_SIZE / sizeof(struct page *);
  
  		pages = kmalloc(PAGE_SIZE, GFP_KERNEL);
  		if (pages == NULL)
  			return -ENOMEM;
6873fa0de   Eric Sandeen   Hook ext4 to the ...
4424
4425
  
  		offset = logical >> PAGE_SHIFT;
6d9c85eb7   Yongqiang Yang   ext4: make FIEMAP...
4426
4427
4428
4429
4430
4431
4432
4433
4434
4435
4436
4437
4438
4439
4440
4441
  repeat:
  		last_offset = offset;
  		head = NULL;
  		ret = find_get_pages_tag(inode->i_mapping, &offset,
  					PAGECACHE_TAG_DIRTY, nr_pages, pages);
  
  		if (!(flags & FIEMAP_EXTENT_DELALLOC)) {
  			/* First time, try to find a mapped buffer. */
  			if (ret == 0) {
  out:
  				for (index = 0; index < ret; index++)
  					page_cache_release(pages[index]);
  				/* just a hole. */
  				kfree(pages);
  				return EXT_CONTINUE;
  			}
b221349fa   Yongqiang Yang   ext4: fix ext4_ex...
4442
  			index = 0;
6873fa0de   Eric Sandeen   Hook ext4 to the ...
4443

b221349fa   Yongqiang Yang   ext4: fix ext4_ex...
4444
  next_page:
6d9c85eb7   Yongqiang Yang   ext4: make FIEMAP...
4445
  			/* Try to find the 1st mapped buffer. */
b221349fa   Yongqiang Yang   ext4: fix ext4_ex...
4446
  			end = ((__u64)pages[index]->index << PAGE_SHIFT) >>
6d9c85eb7   Yongqiang Yang   ext4: make FIEMAP...
4447
  				  blksize_bits;
b221349fa   Yongqiang Yang   ext4: fix ext4_ex...
4448
  			if (!page_has_buffers(pages[index]))
6d9c85eb7   Yongqiang Yang   ext4: make FIEMAP...
4449
  				goto out;
b221349fa   Yongqiang Yang   ext4: fix ext4_ex...
4450
  			head = page_buffers(pages[index]);
6d9c85eb7   Yongqiang Yang   ext4: make FIEMAP...
4451
4452
  			if (!head)
  				goto out;
6873fa0de   Eric Sandeen   Hook ext4 to the ...
4453

b221349fa   Yongqiang Yang   ext4: fix ext4_ex...
4454
  			index++;
6d9c85eb7   Yongqiang Yang   ext4: make FIEMAP...
4455
4456
  			bh = head;
  			do {
b221349fa   Yongqiang Yang   ext4: fix ext4_ex...
4457
4458
4459
4460
4461
4462
4463
4464
4465
4466
  				if (end >= newex->ec_block +
  					newex->ec_len)
  					/* The buffer is out of
  					 * the request range.
  					 */
  					goto out;
  
  				if (buffer_mapped(bh) &&
  				    end >= newex->ec_block) {
  					start_index = index - 1;
6d9c85eb7   Yongqiang Yang   ext4: make FIEMAP...
4467
  					/* get the 1st mapped buffer. */
6d9c85eb7   Yongqiang Yang   ext4: make FIEMAP...
4468
4469
  					goto found_mapped_buffer;
  				}
b221349fa   Yongqiang Yang   ext4: fix ext4_ex...
4470

6d9c85eb7   Yongqiang Yang   ext4: make FIEMAP...
4471
4472
4473
  				bh = bh->b_this_page;
  				end++;
  			} while (bh != head);
6873fa0de   Eric Sandeen   Hook ext4 to the ...
4474

b221349fa   Yongqiang Yang   ext4: fix ext4_ex...
4475
4476
4477
4478
4479
4480
4481
4482
4483
4484
4485
  			/* No mapped buffer in the range found in this page,
  			 * We need to look up next page.
  			 */
  			if (index >= ret) {
  				/* There is no page left, but we need to limit
  				 * newex->ec_len.
  				 */
  				newex->ec_len = end - newex->ec_block;
  				goto out;
  			}
  			goto next_page;
6873fa0de   Eric Sandeen   Hook ext4 to the ...
4486
  		} else {
6d9c85eb7   Yongqiang Yang   ext4: make FIEMAP...
4487
4488
4489
4490
  			/*Find contiguous delayed buffers. */
  			if (ret > 0 && pages[0]->index == last_offset)
  				head = page_buffers(pages[0]);
  			bh = head;
b221349fa   Yongqiang Yang   ext4: fix ext4_ex...
4491
4492
  			index = 1;
  			start_index = 0;
6873fa0de   Eric Sandeen   Hook ext4 to the ...
4493
  		}
6d9c85eb7   Yongqiang Yang   ext4: make FIEMAP...
4494
4495
4496
4497
4498
4499
4500
4501
4502
4503
4504
4505
4506
4507
4508
4509
4510
4511
4512
4513
  
  found_mapped_buffer:
  		if (bh != NULL && buffer_delay(bh)) {
  			/* 1st or contiguous delayed buffer found. */
  			if (!(flags & FIEMAP_EXTENT_DELALLOC)) {
  				/*
  				 * 1st delayed buffer found, record
  				 * the start of extent.
  				 */
  				flags |= FIEMAP_EXTENT_DELALLOC;
  				newex->ec_block = end;
  				logical = (__u64)end << blksize_bits;
  			}
  			/* Find contiguous delayed buffers. */
  			do {
  				if (!buffer_delay(bh))
  					goto found_delayed_extent;
  				bh = bh->b_this_page;
  				end++;
  			} while (bh != head);
b221349fa   Yongqiang Yang   ext4: fix ext4_ex...
4514
  			for (; index < ret; index++) {
6d9c85eb7   Yongqiang Yang   ext4: make FIEMAP...
4515
4516
4517
4518
4519
4520
4521
4522
4523
  				if (!page_has_buffers(pages[index])) {
  					bh = NULL;
  					break;
  				}
  				head = page_buffers(pages[index]);
  				if (!head) {
  					bh = NULL;
  					break;
  				}
b221349fa   Yongqiang Yang   ext4: fix ext4_ex...
4524

6d9c85eb7   Yongqiang Yang   ext4: make FIEMAP...
4525
  				if (pages[index]->index !=
b221349fa   Yongqiang Yang   ext4: fix ext4_ex...
4526
4527
  				    pages[start_index]->index + index
  				    - start_index) {
6d9c85eb7   Yongqiang Yang   ext4: make FIEMAP...
4528
4529
4530
4531
4532
4533
4534
4535
4536
4537
4538
4539
4540
4541
4542
4543
4544
4545
4546
4547
4548
4549
4550
4551
4552
4553
4554
  					/* Blocks are not contiguous. */
  					bh = NULL;
  					break;
  				}
  				bh = head;
  				do {
  					if (!buffer_delay(bh))
  						/* Delayed-extent ends. */
  						goto found_delayed_extent;
  					bh = bh->b_this_page;
  					end++;
  				} while (bh != head);
  			}
  		} else if (!(flags & FIEMAP_EXTENT_DELALLOC))
  			/* a hole found. */
  			goto out;
  
  found_delayed_extent:
  		newex->ec_len = min(end - newex->ec_block,
  						(ext4_lblk_t)EXT_INIT_MAX_LEN);
  		if (ret == nr_pages && bh != NULL &&
  			newex->ec_len < EXT_INIT_MAX_LEN &&
  			buffer_delay(bh)) {
  			/* Have not collected an extent and continue. */
  			for (index = 0; index < ret; index++)
  				page_cache_release(pages[index]);
  			goto repeat;
6873fa0de   Eric Sandeen   Hook ext4 to the ...
4555
  		}
6d9c85eb7   Yongqiang Yang   ext4: make FIEMAP...
4556
4557
4558
4559
  
  		for (index = 0; index < ret; index++)
  			page_cache_release(pages[index]);
  		kfree(pages);
6873fa0de   Eric Sandeen   Hook ext4 to the ...
4560
4561
4562
4563
4564
4565
4566
  	}
  
  	physical = (__u64)newex->ec_start << blksize_bits;
  	length =   (__u64)newex->ec_len << blksize_bits;
  
  	if (ex && ext4_ext_is_uninitialized(ex))
  		flags |= FIEMAP_EXTENT_UNWRITTEN;
c03f8aa9a   Lukas Czerner   ext4: use FIEMAP_...
4567
  	if (next == EXT_MAX_BLOCKS)
6873fa0de   Eric Sandeen   Hook ext4 to the ...
4568
  		flags |= FIEMAP_EXTENT_LAST;
6d9c85eb7   Yongqiang Yang   ext4: make FIEMAP...
4569
  	ret = fiemap_fill_next_extent(fieinfo, logical, physical,
6873fa0de   Eric Sandeen   Hook ext4 to the ...
4570
  					length, flags);
6d9c85eb7   Yongqiang Yang   ext4: make FIEMAP...
4571
4572
4573
  	if (ret < 0)
  		return ret;
  	if (ret == 1)
6873fa0de   Eric Sandeen   Hook ext4 to the ...
4574
  		return EXT_BREAK;
6873fa0de   Eric Sandeen   Hook ext4 to the ...
4575
4576
  	return EXT_CONTINUE;
  }
6873fa0de   Eric Sandeen   Hook ext4 to the ...
4577
4578
  /* fiemap flags we can handle specified here */
  #define EXT4_FIEMAP_FLAGS	(FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
3a06d778d   Aneesh Kumar K.V   ext4: sparse fixes
4579
4580
  static int ext4_xattr_fiemap(struct inode *inode,
  				struct fiemap_extent_info *fieinfo)
6873fa0de   Eric Sandeen   Hook ext4 to the ...
4581
4582
4583
4584
4585
4586
4587
4588
  {
  	__u64 physical = 0;
  	__u64 length;
  	__u32 flags = FIEMAP_EXTENT_LAST;
  	int blockbits = inode->i_sb->s_blocksize_bits;
  	int error = 0;
  
  	/* in-inode? */
19f5fb7ad   Theodore Ts'o   ext4: Use bitops ...
4589
  	if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
6873fa0de   Eric Sandeen   Hook ext4 to the ...
4590
4591
4592
4593
4594
4595
4596
4597
4598
4599
4600
4601
  		struct ext4_iloc iloc;
  		int offset;	/* offset of xattr in inode */
  
  		error = ext4_get_inode_loc(inode, &iloc);
  		if (error)
  			return error;
  		physical = iloc.bh->b_blocknr << blockbits;
  		offset = EXT4_GOOD_OLD_INODE_SIZE +
  				EXT4_I(inode)->i_extra_isize;
  		physical += offset;
  		length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
  		flags |= FIEMAP_EXTENT_DATA_INLINE;
fd2dd9fba   Curt Wohlgemuth   ext4: Fix buffer ...
4602
  		brelse(iloc.bh);
6873fa0de   Eric Sandeen   Hook ext4 to the ...
4603
4604
4605
4606
4607
4608
4609
4610
4611
4612
  	} else { /* external block */
  		physical = EXT4_I(inode)->i_file_acl << blockbits;
  		length = inode->i_sb->s_blocksize;
  	}
  
  	if (physical)
  		error = fiemap_fill_next_extent(fieinfo, 0, physical,
  						length, flags);
  	return (error < 0 ? error : 0);
  }
a4bb6b64e   Allison Henderson   ext4: enable "pun...
4613
4614
4615
4616
4617
4618
4619
4620
4621
4622
4623
4624
4625
4626
4627
4628
4629
4630
4631
4632
4633
  /*
   * ext4_ext_punch_hole
   *
   * Punches a hole of "length" bytes in a file starting
   * at byte "offset"
   *
   * @inode:  The inode of the file to punch a hole in
   * @offset: The starting byte offset of the hole
   * @length: The length of the hole
   *
   * Returns the number of blocks removed or negative on err
   */
  int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length)
  {
  	struct inode *inode = file->f_path.dentry->d_inode;
  	struct super_block *sb = inode->i_sb;
  	struct ext4_ext_cache cache_ex;
  	ext4_lblk_t first_block, last_block, num_blocks, iblock, max_blocks;
  	struct address_space *mapping = inode->i_mapping;
  	struct ext4_map_blocks map;
  	handle_t *handle;
ba06208a1   Allison Henderson   ext4: fix xfstest...
4634
4635
  	loff_t first_page, last_page, page_len;
  	loff_t first_page_offset, last_page_offset;
a4bb6b64e   Allison Henderson   ext4: enable "pun...
4636
  	int ret, credits, blocks_released, err = 0;
2be4751b2   Allison Henderson   ext4: fix 2nd xfs...
4637
4638
4639
4640
4641
4642
4643
4644
4645
4646
4647
4648
4649
  	/* No need to punch hole beyond i_size */
  	if (offset >= inode->i_size)
  		return 0;
  
  	/*
  	 * If the hole extends beyond i_size, set the hole
  	 * to end after the page that contains i_size
  	 */
  	if (offset + length > inode->i_size) {
  		length = inode->i_size +
  		   PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) -
  		   offset;
  	}
a4bb6b64e   Allison Henderson   ext4: enable "pun...
4650
4651
4652
  	first_block = (offset + sb->s_blocksize - 1) >>
  		EXT4_BLOCK_SIZE_BITS(sb);
  	last_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
a4bb6b64e   Allison Henderson   ext4: enable "pun...
4653
4654
4655
4656
4657
4658
4659
4660
4661
4662
4663
4664
  	first_page = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
  	last_page = (offset + length) >> PAGE_CACHE_SHIFT;
  
  	first_page_offset = first_page << PAGE_CACHE_SHIFT;
  	last_page_offset = last_page << PAGE_CACHE_SHIFT;
  
  	/*
  	 * Write out all dirty pages to avoid race conditions
  	 * Then release them.
  	 */
  	if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
  		err = filemap_write_and_wait_range(mapping,
2be4751b2   Allison Henderson   ext4: fix 2nd xfs...
4665
  			offset, offset + length - 1);
a4bb6b64e   Allison Henderson   ext4: enable "pun...
4666

2be4751b2   Allison Henderson   ext4: fix 2nd xfs...
4667
4668
  		if (err)
  			return err;
a4bb6b64e   Allison Henderson   ext4: enable "pun...
4669
4670
4671
4672
4673
4674
4675
4676
4677
4678
4679
4680
4681
4682
4683
4684
4685
4686
4687
4688
4689
  	}
  
  	/* Now release the pages */
  	if (last_page_offset > first_page_offset) {
  		truncate_inode_pages_range(mapping, first_page_offset,
  					   last_page_offset-1);
  	}
  
  	/* finish any pending end_io work */
  	ext4_flush_completed_IO(inode);
  
  	credits = ext4_writepage_trans_blocks(inode);
  	handle = ext4_journal_start(inode, credits);
  	if (IS_ERR(handle))
  		return PTR_ERR(handle);
  
  	err = ext4_orphan_add(handle, inode);
  	if (err)
  		goto out;
  
  	/*
ba06208a1   Allison Henderson   ext4: fix xfstest...
4690
4691
4692
4693
  	 * Now we need to zero out the non-page-aligned data in the
  	 * pages at the start and tail of the hole, and unmap the buffer
  	 * heads for the block aligned regions of the page that were
  	 * completely zeroed.
a4bb6b64e   Allison Henderson   ext4: enable "pun...
4694
  	 */
ba06208a1   Allison Henderson   ext4: fix xfstest...
4695
4696
4697
4698
4699
4700
4701
4702
4703
4704
4705
4706
4707
4708
4709
4710
4711
4712
4713
4714
4715
4716
4717
4718
4719
4720
4721
4722
4723
4724
4725
4726
4727
  	if (first_page > last_page) {
  		/*
  		 * If the file space being truncated is contained within a page
  		 * just zero out and unmap the middle of that page
  		 */
  		err = ext4_discard_partial_page_buffers(handle,
  			mapping, offset, length, 0);
  
  		if (err)
  			goto out;
  	} else {
  		/*
  		 * zero out and unmap the partial page that contains
  		 * the start of the hole
  		 */
  		page_len  = first_page_offset - offset;
  		if (page_len > 0) {
  			err = ext4_discard_partial_page_buffers(handle, mapping,
  						   offset, page_len, 0);
  			if (err)
  				goto out;
  		}
  
  		/*
  		 * zero out and unmap the partial page that contains
  		 * the end of the hole
  		 */
  		page_len = offset + length - last_page_offset;
  		if (page_len > 0) {
  			err = ext4_discard_partial_page_buffers(handle, mapping,
  					last_page_offset, page_len, 0);
  			if (err)
  				goto out;
a4bb6b64e   Allison Henderson   ext4: enable "pun...
4728
4729
  		}
  	}
2be4751b2   Allison Henderson   ext4: fix 2nd xfs...
4730
4731
4732
4733
4734
4735
4736
4737
4738
4739
4740
4741
4742
4743
4744
4745
4746
4747
4748
  
  	/*
  	 * If i_size is contained in the last page, we need to
  	 * unmap and zero the partial page after i_size
  	 */
  	if (inode->i_size >> PAGE_CACHE_SHIFT == last_page &&
  	   inode->i_size % PAGE_CACHE_SIZE != 0) {
  
  		page_len = PAGE_CACHE_SIZE -
  			(inode->i_size & (PAGE_CACHE_SIZE - 1));
  
  		if (page_len > 0) {
  			err = ext4_discard_partial_page_buffers(handle,
  			  mapping, inode->i_size, page_len, 0);
  
  			if (err)
  				goto out;
  		}
  	}
a4bb6b64e   Allison Henderson   ext4: enable "pun...
4749
4750
4751
4752
4753
4754
4755
4756
4757
4758
4759
4760
4761
4762
4763
4764
4765
4766
4767
4768
4769
4770
4771
4772
4773
4774
4775
4776
4777
4778
4779
4780
4781
4782
4783
4784
4785
4786
4787
4788
4789
4790
4791
4792
4793
4794
4795
4796
4797
4798
4799
4800
4801
4802
4803
4804
4805
4806
4807
4808
4809
4810
4811
4812
4813
4814
4815
4816
4817
4818
4819
4820
4821
4822
4823
4824
4825
4826
4827
4828
  	/* If there are no blocks to remove, return now */
  	if (first_block >= last_block)
  		goto out;
  
  	down_write(&EXT4_I(inode)->i_data_sem);
  	ext4_ext_invalidate_cache(inode);
  	ext4_discard_preallocations(inode);
  
  	/*
  	 * Loop over all the blocks and identify blocks
  	 * that need to be punched out
  	 */
  	iblock = first_block;
  	blocks_released = 0;
  	while (iblock < last_block) {
  		max_blocks = last_block - iblock;
  		num_blocks = 1;
  		memset(&map, 0, sizeof(map));
  		map.m_lblk = iblock;
  		map.m_len = max_blocks;
  		ret = ext4_ext_map_blocks(handle, inode, &map,
  			EXT4_GET_BLOCKS_PUNCH_OUT_EXT);
  
  		if (ret > 0) {
  			blocks_released += ret;
  			num_blocks = ret;
  		} else if (ret == 0) {
  			/*
  			 * If map blocks could not find the block,
  			 * then it is in a hole.  If the hole was
  			 * not already cached, then map blocks should
  			 * put it in the cache.  So we can get the hole
  			 * out of the cache
  			 */
  			memset(&cache_ex, 0, sizeof(cache_ex));
  			if ((ext4_ext_check_cache(inode, iblock, &cache_ex)) &&
  				!cache_ex.ec_start) {
  
  				/* The hole is cached */
  				num_blocks = cache_ex.ec_block +
  				cache_ex.ec_len - iblock;
  
  			} else {
  				/* The block could not be identified */
  				err = -EIO;
  				break;
  			}
  		} else {
  			/* Map blocks error */
  			err = ret;
  			break;
  		}
  
  		if (num_blocks == 0) {
  			/* This condition should never happen */
  			ext_debug("Block lookup failed");
  			err = -EIO;
  			break;
  		}
  
  		iblock += num_blocks;
  	}
  
  	if (blocks_released > 0) {
  		ext4_ext_invalidate_cache(inode);
  		ext4_discard_preallocations(inode);
  	}
  
  	if (IS_SYNC(inode))
  		ext4_handle_sync(handle);
  
  	up_write(&EXT4_I(inode)->i_data_sem);
  
  out:
  	ext4_orphan_del(handle, inode);
  	inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
  	ext4_mark_inode_dirty(handle, inode);
  	ext4_journal_stop(handle);
  	return err;
  }
6873fa0de   Eric Sandeen   Hook ext4 to the ...
4829
4830
4831
4832
  int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
  		__u64 start, __u64 len)
  {
  	ext4_lblk_t start_blk;
6873fa0de   Eric Sandeen   Hook ext4 to the ...
4833
4834
4835
  	int error = 0;
  
  	/* fallback to generic here if not in extents fmt */
12e9b8920   Dmitry Monakhov   ext4: Use bitops ...
4836
  	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
6873fa0de   Eric Sandeen   Hook ext4 to the ...
4837
4838
4839
4840
4841
4842
4843
4844
4845
  		return generic_block_fiemap(inode, fieinfo, start, len,
  			ext4_get_block);
  
  	if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
  		return -EBADR;
  
  	if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
  		error = ext4_xattr_fiemap(inode, fieinfo);
  	} else {
aca92ff6f   Leonard Michlmayr   ext4: correctly c...
4846
4847
  		ext4_lblk_t len_blks;
  		__u64 last_blk;
6873fa0de   Eric Sandeen   Hook ext4 to the ...
4848
  		start_blk = start >> inode->i_sb->s_blocksize_bits;
aca92ff6f   Leonard Michlmayr   ext4: correctly c...
4849
  		last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
f17722f91   Lukas Czerner   ext4: Fix max fil...
4850
4851
  		if (last_blk >= EXT_MAX_BLOCKS)
  			last_blk = EXT_MAX_BLOCKS-1;
aca92ff6f   Leonard Michlmayr   ext4: correctly c...
4852
  		len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
6873fa0de   Eric Sandeen   Hook ext4 to the ...
4853
4854
4855
4856
4857
  
  		/*
  		 * Walk the extent tree gathering extent information.
  		 * ext4_ext_fiemap_cb will push extents back to user.
  		 */
6873fa0de   Eric Sandeen   Hook ext4 to the ...
4858
4859
  		error = ext4_ext_walk_space(inode, start_blk, len_blks,
  					  ext4_ext_fiemap_cb, fieinfo);
6873fa0de   Eric Sandeen   Hook ext4 to the ...
4860
4861
4862
4863
  	}
  
  	return error;
  }