Blame view

fs/f2fs/extent_cache.c 19.7 KB
7c1a000d4   Chao Yu   f2fs: add SPDX li...
1
  // SPDX-License-Identifier: GPL-2.0
a28ef1f5a   Chao Yu   f2fs: maintain ex...
2
3
4
5
6
7
8
  /*
   * f2fs extent cache support
   *
   * Copyright (c) 2015 Motorola Mobility
   * Copyright (c) 2015 Samsung Electronics
   * Authors: Jaegeuk Kim <jaegeuk@kernel.org>
   *          Chao Yu <chao2.yu@samsung.com>
a28ef1f5a   Chao Yu   f2fs: maintain ex...
9
10
11
12
13
14
15
16
   */
  
  #include <linux/fs.h>
  #include <linux/f2fs_fs.h>
  
  #include "f2fs.h"
  #include "node.h"
  #include <trace/events/f2fs.h>
54c2258cd   Chao Yu   f2fs: extract rb-...
17
18
19
20
21
22
23
24
25
26
27
  static struct rb_entry *__lookup_rb_tree_fast(struct rb_entry *cached_re,
  							unsigned int ofs)
  {
  	if (cached_re) {
  		if (cached_re->ofs <= ofs &&
  				cached_re->ofs + cached_re->len > ofs) {
  			return cached_re;
  		}
  	}
  	return NULL;
  }
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
28
  static struct rb_entry *__lookup_rb_tree_slow(struct rb_root_cached *root,
54c2258cd   Chao Yu   f2fs: extract rb-...
29
30
  							unsigned int ofs)
  {
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
31
  	struct rb_node *node = root->rb_root.rb_node;
54c2258cd   Chao Yu   f2fs: extract rb-...
32
33
34
35
36
37
38
39
40
41
42
43
44
45
  	struct rb_entry *re;
  
  	while (node) {
  		re = rb_entry(node, struct rb_entry, rb_node);
  
  		if (ofs < re->ofs)
  			node = node->rb_left;
  		else if (ofs >= re->ofs + re->len)
  			node = node->rb_right;
  		else
  			return re;
  	}
  	return NULL;
  }
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
46
  struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root,
54c2258cd   Chao Yu   f2fs: extract rb-...
47
48
49
50
51
52
53
54
55
56
  				struct rb_entry *cached_re, unsigned int ofs)
  {
  	struct rb_entry *re;
  
  	re = __lookup_rb_tree_fast(cached_re, ofs);
  	if (!re)
  		return __lookup_rb_tree_slow(root, ofs);
  
  	return re;
  }
4d57b86dd   Chao Yu   f2fs: clean up sy...
57
  struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
58
59
60
  				struct rb_root_cached *root,
  				struct rb_node **parent,
  				unsigned int ofs, bool *leftmost)
54c2258cd   Chao Yu   f2fs: extract rb-...
61
  {
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
62
  	struct rb_node **p = &root->rb_root.rb_node;
54c2258cd   Chao Yu   f2fs: extract rb-...
63
64
65
66
67
  	struct rb_entry *re;
  
  	while (*p) {
  		*parent = *p;
  		re = rb_entry(*parent, struct rb_entry, rb_node);
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
68
  		if (ofs < re->ofs) {
54c2258cd   Chao Yu   f2fs: extract rb-...
69
  			p = &(*p)->rb_left;
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
70
  		} else if (ofs >= re->ofs + re->len) {
54c2258cd   Chao Yu   f2fs: extract rb-...
71
  			p = &(*p)->rb_right;
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
72
73
  			*leftmost = false;
  		} else {
54c2258cd   Chao Yu   f2fs: extract rb-...
74
  			f2fs_bug_on(sbi, 1);
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
75
  		}
54c2258cd   Chao Yu   f2fs: extract rb-...
76
77
78
79
80
81
82
83
84
85
86
87
88
89
  	}
  
  	return p;
  }
  
  /*
   * lookup rb entry in position of @ofs in rb-tree,
   * if hit, return the entry, otherwise, return NULL
   * @prev_ex: extent before ofs
   * @next_ex: extent after ofs
   * @insert_p: insert point for new extent at ofs
   * in order to simpfy the insertion after.
   * tree must stay unchanged between lookup and insertion.
   */
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
90
  struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
54c2258cd   Chao Yu   f2fs: extract rb-...
91
92
93
94
95
  				struct rb_entry *cached_re,
  				unsigned int ofs,
  				struct rb_entry **prev_entry,
  				struct rb_entry **next_entry,
  				struct rb_node ***insert_p,
004b68621   Chao Yu   f2fs: use rb-tree...
96
  				struct rb_node **insert_parent,
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
97
  				bool force, bool *leftmost)
54c2258cd   Chao Yu   f2fs: extract rb-...
98
  {
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
99
  	struct rb_node **pnode = &root->rb_root.rb_node;
54c2258cd   Chao Yu   f2fs: extract rb-...
100
101
102
103
104
105
106
  	struct rb_node *parent = NULL, *tmp_node;
  	struct rb_entry *re = cached_re;
  
  	*insert_p = NULL;
  	*insert_parent = NULL;
  	*prev_entry = NULL;
  	*next_entry = NULL;
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
107
  	if (RB_EMPTY_ROOT(&root->rb_root))
54c2258cd   Chao Yu   f2fs: extract rb-...
108
109
110
111
112
113
  		return NULL;
  
  	if (re) {
  		if (re->ofs <= ofs && re->ofs + re->len > ofs)
  			goto lookup_neighbors;
  	}
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
114
115
  	if (leftmost)
  		*leftmost = true;
54c2258cd   Chao Yu   f2fs: extract rb-...
116
117
118
  	while (*pnode) {
  		parent = *pnode;
  		re = rb_entry(*pnode, struct rb_entry, rb_node);
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
119
  		if (ofs < re->ofs) {
54c2258cd   Chao Yu   f2fs: extract rb-...
120
  			pnode = &(*pnode)->rb_left;
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
121
  		} else if (ofs >= re->ofs + re->len) {
54c2258cd   Chao Yu   f2fs: extract rb-...
122
  			pnode = &(*pnode)->rb_right;
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
123
124
125
  			if (leftmost)
  				*leftmost = false;
  		} else {
54c2258cd   Chao Yu   f2fs: extract rb-...
126
  			goto lookup_neighbors;
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
127
  		}
54c2258cd   Chao Yu   f2fs: extract rb-...
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
  	}
  
  	*insert_p = pnode;
  	*insert_parent = parent;
  
  	re = rb_entry(parent, struct rb_entry, rb_node);
  	tmp_node = parent;
  	if (parent && ofs > re->ofs)
  		tmp_node = rb_next(parent);
  	*next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
  
  	tmp_node = parent;
  	if (parent && ofs < re->ofs)
  		tmp_node = rb_prev(parent);
  	*prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
  	return NULL;
  
  lookup_neighbors:
004b68621   Chao Yu   f2fs: use rb-tree...
146
  	if (ofs == re->ofs || force) {
54c2258cd   Chao Yu   f2fs: extract rb-...
147
148
149
150
  		/* lookup prev node for merging backward later */
  		tmp_node = rb_prev(&re->rb_node);
  		*prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
  	}
004b68621   Chao Yu   f2fs: use rb-tree...
151
  	if (ofs == re->ofs + re->len - 1 || force) {
54c2258cd   Chao Yu   f2fs: extract rb-...
152
153
154
155
156
157
  		/* lookup next node for merging frontward later */
  		tmp_node = rb_next(&re->rb_node);
  		*next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
  	}
  	return re;
  }
4d57b86dd   Chao Yu   f2fs: clean up sy...
158
  bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
159
  						struct rb_root_cached *root)
df0f6b44d   Chao Yu   f2fs: introduce _...
160
161
  {
  #ifdef CONFIG_F2FS_CHECK_FS
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
162
  	struct rb_node *cur = rb_first_cached(root), *next;
df0f6b44d   Chao Yu   f2fs: introduce _...
163
164
165
166
167
168
169
170
171
172
173
174
175
176
  	struct rb_entry *cur_re, *next_re;
  
  	if (!cur)
  		return true;
  
  	while (cur) {
  		next = rb_next(cur);
  		if (!next)
  			return true;
  
  		cur_re = rb_entry(cur, struct rb_entry, rb_node);
  		next_re = rb_entry(next, struct rb_entry, rb_node);
  
  		if (cur_re->ofs + cur_re->len > next_re->ofs) {
dcbb4c10e   Joe Perches   f2fs: introduce f...
177
178
179
  			f2fs_info(sbi, "inconsistent rbtree, cur(%u, %u) next(%u, %u)",
  				  cur_re->ofs, cur_re->len,
  				  next_re->ofs, next_re->len);
df0f6b44d   Chao Yu   f2fs: introduce _...
180
181
182
183
184
185
186
187
  			return false;
  		}
  
  		cur = next;
  	}
  #endif
  	return true;
  }
a28ef1f5a   Chao Yu   f2fs: maintain ex...
188
189
190
191
192
  static struct kmem_cache *extent_tree_slab;
  static struct kmem_cache *extent_node_slab;
  
  static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
  				struct extent_tree *et, struct extent_info *ei,
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
193
194
  				struct rb_node *parent, struct rb_node **p,
  				bool leftmost)
a28ef1f5a   Chao Yu   f2fs: maintain ex...
195
196
197
198
199
200
201
202
203
  {
  	struct extent_node *en;
  
  	en = kmem_cache_alloc(extent_node_slab, GFP_ATOMIC);
  	if (!en)
  		return NULL;
  
  	en->ei = *ei;
  	INIT_LIST_HEAD(&en->list);
201ef5e08   Hou Pengyang   f2fs: improve shr...
204
  	en->et = et;
a28ef1f5a   Chao Yu   f2fs: maintain ex...
205
206
  
  	rb_link_node(&en->rb_node, parent, p);
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
207
  	rb_insert_color_cached(&en->rb_node, &et->root, leftmost);
68e353851   Chao Yu   f2fs: use atomic ...
208
  	atomic_inc(&et->node_cnt);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
209
210
211
212
213
214
215
  	atomic_inc(&sbi->total_ext_node);
  	return en;
  }
  
  static void __detach_extent_node(struct f2fs_sb_info *sbi,
  				struct extent_tree *et, struct extent_node *en)
  {
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
216
  	rb_erase_cached(&en->rb_node, &et->root);
68e353851   Chao Yu   f2fs: use atomic ...
217
  	atomic_dec(&et->node_cnt);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
218
219
220
221
  	atomic_dec(&sbi->total_ext_node);
  
  	if (et->cached_en == en)
  		et->cached_en = NULL;
a03f01f26   Hou Pengyang   f2fs: reconstruct...
222
223
224
225
226
227
228
229
230
231
232
233
234
  	kmem_cache_free(extent_node_slab, en);
  }
  
  /*
   * Flow to release an extent_node:
   * 1. list_del_init
   * 2. __detach_extent_node
   * 3. kmem_cache_free.
   */
  static void __release_extent_node(struct f2fs_sb_info *sbi,
  			struct extent_tree *et, struct extent_node *en)
  {
  	spin_lock(&sbi->extent_lock);
201ef5e08   Hou Pengyang   f2fs: improve shr...
235
236
  	f2fs_bug_on(sbi, list_empty(&en->list));
  	list_del_init(&en->list);
a03f01f26   Hou Pengyang   f2fs: reconstruct...
237
238
239
  	spin_unlock(&sbi->extent_lock);
  
  	__detach_extent_node(sbi, et, en);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
240
241
242
243
244
245
246
  }
  
  static struct extent_tree *__grab_extent_tree(struct inode *inode)
  {
  	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  	struct extent_tree *et;
  	nid_t ino = inode->i_ino;
5e8256ac2   Yunlei He   f2fs: replace rw ...
247
  	mutex_lock(&sbi->extent_tree_lock);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
248
249
250
251
252
253
  	et = radix_tree_lookup(&sbi->extent_tree_root, ino);
  	if (!et) {
  		et = f2fs_kmem_cache_alloc(extent_tree_slab, GFP_NOFS);
  		f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et);
  		memset(et, 0, sizeof(struct extent_tree));
  		et->ino = ino;
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
254
  		et->root = RB_ROOT_CACHED;
a28ef1f5a   Chao Yu   f2fs: maintain ex...
255
256
  		et->cached_en = NULL;
  		rwlock_init(&et->lock);
137d09f00   Jaegeuk Kim   f2fs: introduce z...
257
  		INIT_LIST_HEAD(&et->list);
68e353851   Chao Yu   f2fs: use atomic ...
258
  		atomic_set(&et->node_cnt, 0);
7441ccef3   Jaegeuk Kim   f2fs: use atomic ...
259
  		atomic_inc(&sbi->total_ext_tree);
74fd8d992   Jaegeuk Kim   f2fs: speed up sh...
260
261
  	} else {
  		atomic_dec(&sbi->total_zombie_tree);
137d09f00   Jaegeuk Kim   f2fs: introduce z...
262
  		list_del_init(&et->list);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
263
  	}
5e8256ac2   Yunlei He   f2fs: replace rw ...
264
  	mutex_unlock(&sbi->extent_tree_lock);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
265
266
267
268
269
270
  
  	/* never died until evict_inode */
  	F2FS_I(inode)->extent_tree = et;
  
  	return et;
  }
a6f783459   Chao Yu   f2fs: kill dead c...
271
272
  static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
  				struct extent_tree *et, struct extent_info *ei)
a28ef1f5a   Chao Yu   f2fs: maintain ex...
273
  {
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
274
  	struct rb_node **p = &et->root.rb_root.rb_node;
a28ef1f5a   Chao Yu   f2fs: maintain ex...
275
  	struct extent_node *en;
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
276
  	en = __attach_extent_node(sbi, et, ei, NULL, p, true);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
277
278
  	if (!en)
  		return NULL;
a6f783459   Chao Yu   f2fs: kill dead c...
279
280
  
  	et->largest = en->ei;
a28ef1f5a   Chao Yu   f2fs: maintain ex...
281
282
283
284
285
  	et->cached_en = en;
  	return en;
  }
  
  static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
201ef5e08   Hou Pengyang   f2fs: improve shr...
286
  					struct extent_tree *et)
a28ef1f5a   Chao Yu   f2fs: maintain ex...
287
288
289
  {
  	struct rb_node *node, *next;
  	struct extent_node *en;
68e353851   Chao Yu   f2fs: use atomic ...
290
  	unsigned int count = atomic_read(&et->node_cnt);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
291

4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
292
  	node = rb_first_cached(&et->root);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
293
294
295
  	while (node) {
  		next = rb_next(node);
  		en = rb_entry(node, struct extent_node, rb_node);
201ef5e08   Hou Pengyang   f2fs: improve shr...
296
  		__release_extent_node(sbi, et, en);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
297
298
  		node = next;
  	}
68e353851   Chao Yu   f2fs: use atomic ...
299
  	return count - atomic_read(&et->node_cnt);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
300
  }
b430f7263   Zhikang Zhang   f2fs: avoid sleep...
301
  static void __drop_largest_extent(struct extent_tree *et,
41a099de3   Fan Li   f2fs: drop larges...
302
  					pgoff_t fofs, unsigned int len)
a28ef1f5a   Chao Yu   f2fs: maintain ex...
303
  {
b430f7263   Zhikang Zhang   f2fs: avoid sleep...
304
305
306
307
  	if (fofs < et->largest.fofs + et->largest.len &&
  			fofs + len > et->largest.fofs) {
  		et->largest.len = 0;
  		et->largest_updated = true;
205b98221   Jaegeuk Kim   f2fs: call mark_i...
308
  	}
a28ef1f5a   Chao Yu   f2fs: maintain ex...
309
  }
ed3d12561   Jaegeuk Kim   f2fs: load larges...
310
  /* return true, if inode page is changed */
dad48e731   Yunlei He   f2fs: fix a bug c...
311
  static bool __f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext)
a28ef1f5a   Chao Yu   f2fs: maintain ex...
312
313
314
315
316
  {
  	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  	struct extent_tree *et;
  	struct extent_node *en;
  	struct extent_info ei;
ed3d12561   Jaegeuk Kim   f2fs: load larges...
317
318
319
320
321
322
323
324
  	if (!f2fs_may_extent_tree(inode)) {
  		/* drop largest extent */
  		if (i_ext && i_ext->len) {
  			i_ext->len = 0;
  			return true;
  		}
  		return false;
  	}
a28ef1f5a   Chao Yu   f2fs: maintain ex...
325
326
  
  	et = __grab_extent_tree(inode);
ed3d12561   Jaegeuk Kim   f2fs: load larges...
327
328
  	if (!i_ext || !i_ext->len)
  		return false;
a28ef1f5a   Chao Yu   f2fs: maintain ex...
329

bd933d4fa   Chao Yu   f2fs: reuse get_e...
330
  	get_extent_info(&ei, i_ext);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
331
332
  
  	write_lock(&et->lock);
68e353851   Chao Yu   f2fs: use atomic ...
333
  	if (atomic_read(&et->node_cnt))
a28ef1f5a   Chao Yu   f2fs: maintain ex...
334
  		goto out;
a6f783459   Chao Yu   f2fs: kill dead c...
335
  	en = __init_extent_tree(sbi, et, &ei);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
336
337
338
339
340
341
342
  	if (en) {
  		spin_lock(&sbi->extent_lock);
  		list_add_tail(&en->list, &sbi->extent_list);
  		spin_unlock(&sbi->extent_lock);
  	}
  out:
  	write_unlock(&et->lock);
ed3d12561   Jaegeuk Kim   f2fs: load larges...
343
  	return false;
a28ef1f5a   Chao Yu   f2fs: maintain ex...
344
  }
dad48e731   Yunlei He   f2fs: fix a bug c...
345
346
347
348
349
350
351
352
353
  bool f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext)
  {
  	bool ret =  __f2fs_init_extent_tree(inode, i_ext);
  
  	if (!F2FS_I(inode)->extent_tree)
  		set_inode_flag(inode, FI_NO_EXTENT);
  
  	return ret;
  }
a28ef1f5a   Chao Yu   f2fs: maintain ex...
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
  static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
  							struct extent_info *ei)
  {
  	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  	struct extent_tree *et = F2FS_I(inode)->extent_tree;
  	struct extent_node *en;
  	bool ret = false;
  
  	f2fs_bug_on(sbi, !et);
  
  	trace_f2fs_lookup_extent_tree_start(inode, pgofs);
  
  	read_lock(&et->lock);
  
  	if (et->largest.fofs <= pgofs &&
  			et->largest.fofs + et->largest.len > pgofs) {
  		*ei = et->largest;
  		ret = true;
91c481fff   Chao Yu   f2fs: add largest...
372
  		stat_inc_largest_node_hit(sbi);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
373
374
  		goto out;
  	}
4d57b86dd   Chao Yu   f2fs: clean up sy...
375
  	en = (struct extent_node *)f2fs_lookup_rb_tree(&et->root,
54c2258cd   Chao Yu   f2fs: extract rb-...
376
377
378
379
380
381
382
383
384
385
386
387
388
389
  				(struct rb_entry *)et->cached_en, pgofs);
  	if (!en)
  		goto out;
  
  	if (en == et->cached_en)
  		stat_inc_cached_node_hit(sbi);
  	else
  		stat_inc_rbtree_node_hit(sbi);
  
  	*ei = en->ei;
  	spin_lock(&sbi->extent_lock);
  	if (!list_empty(&en->list)) {
  		list_move_tail(&en->list, &sbi->extent_list);
  		et->cached_en = en;
a28ef1f5a   Chao Yu   f2fs: maintain ex...
390
  	}
54c2258cd   Chao Yu   f2fs: extract rb-...
391
392
  	spin_unlock(&sbi->extent_lock);
  	ret = true;
a28ef1f5a   Chao Yu   f2fs: maintain ex...
393
  out:
727edac57   Chao Yu   f2fs: use atomic_...
394
  	stat_inc_total_hit(sbi);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
395
396
397
398
399
  	read_unlock(&et->lock);
  
  	trace_f2fs_lookup_extent_tree_end(inode, pgofs, ei);
  	return ret;
  }
b430f7263   Zhikang Zhang   f2fs: avoid sleep...
400
  static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
0f825ee6e   Fan Li   f2fs: add new int...
401
  				struct extent_tree *et, struct extent_info *ei,
0f825ee6e   Fan Li   f2fs: add new int...
402
  				struct extent_node *prev_ex,
ef05e2219   Chao Yu   f2fs: split __ins...
403
  				struct extent_node *next_ex)
0f825ee6e   Fan Li   f2fs: add new int...
404
  {
0f825ee6e   Fan Li   f2fs: add new int...
405
  	struct extent_node *en = NULL;
0f825ee6e   Fan Li   f2fs: add new int...
406
407
  
  	if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) {
0f825ee6e   Fan Li   f2fs: add new int...
408
409
410
411
  		prev_ex->ei.len += ei->len;
  		ei = &prev_ex->ei;
  		en = prev_ex;
  	}
ef05e2219   Chao Yu   f2fs: split __ins...
412

0f825ee6e   Fan Li   f2fs: add new int...
413
  	if (next_ex && __is_front_mergeable(ei, &next_ex->ei)) {
0f825ee6e   Fan Li   f2fs: add new int...
414
415
416
  		next_ex->ei.fofs = ei->fofs;
  		next_ex->ei.blk = ei->blk;
  		next_ex->ei.len += ei->len;
7855eba4d   Yunlei He   f2fs: fix a probl...
417
418
  		if (en)
  			__release_extent_node(sbi, et, prev_ex);
0f825ee6e   Fan Li   f2fs: add new int...
419
420
  		en = next_ex;
  	}
ef05e2219   Chao Yu   f2fs: split __ins...
421

43a2fa180   Jaegeuk Kim   f2fs: move extent...
422
423
  	if (!en)
  		return NULL;
b430f7263   Zhikang Zhang   f2fs: avoid sleep...
424
  	__try_update_largest_extent(et, en);
43a2fa180   Jaegeuk Kim   f2fs: move extent...
425
426
  
  	spin_lock(&sbi->extent_lock);
429267442   Jaegeuk Kim   f2fs: don't set c...
427
  	if (!list_empty(&en->list)) {
43a2fa180   Jaegeuk Kim   f2fs: move extent...
428
  		list_move_tail(&en->list, &sbi->extent_list);
429267442   Jaegeuk Kim   f2fs: don't set c...
429
430
  		et->cached_en = en;
  	}
43a2fa180   Jaegeuk Kim   f2fs: move extent...
431
  	spin_unlock(&sbi->extent_lock);
ef05e2219   Chao Yu   f2fs: split __ins...
432
433
  	return en;
  }
b430f7263   Zhikang Zhang   f2fs: avoid sleep...
434
  static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
ef05e2219   Chao Yu   f2fs: split __ins...
435
436
  				struct extent_tree *et, struct extent_info *ei,
  				struct rb_node **insert_p,
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
437
438
  				struct rb_node *insert_parent,
  				bool leftmost)
ef05e2219   Chao Yu   f2fs: split __ins...
439
  {
8fe326cb9   Colin Ian King   f2fs: remove redu...
440
  	struct rb_node **p;
ef05e2219   Chao Yu   f2fs: split __ins...
441
442
  	struct rb_node *parent = NULL;
  	struct extent_node *en = NULL;
0f825ee6e   Fan Li   f2fs: add new int...
443
444
445
446
447
448
  
  	if (insert_p && insert_parent) {
  		parent = insert_parent;
  		p = insert_p;
  		goto do_insert;
  	}
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
449
450
451
452
  	leftmost = true;
  
  	p = f2fs_lookup_rb_tree_for_insert(sbi, &et->root, &parent,
  						ei->fofs, &leftmost);
0f825ee6e   Fan Li   f2fs: add new int...
453
  do_insert:
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
454
  	en = __attach_extent_node(sbi, et, ei, parent, p, leftmost);
0f825ee6e   Fan Li   f2fs: add new int...
455
456
  	if (!en)
  		return NULL;
ef05e2219   Chao Yu   f2fs: split __ins...
457

b430f7263   Zhikang Zhang   f2fs: avoid sleep...
458
  	__try_update_largest_extent(et, en);
43a2fa180   Jaegeuk Kim   f2fs: move extent...
459
460
461
462
  
  	/* update in global extent list */
  	spin_lock(&sbi->extent_lock);
  	list_add_tail(&en->list, &sbi->extent_list);
429267442   Jaegeuk Kim   f2fs: don't set c...
463
  	et->cached_en = en;
43a2fa180   Jaegeuk Kim   f2fs: move extent...
464
  	spin_unlock(&sbi->extent_lock);
0f825ee6e   Fan Li   f2fs: add new int...
465
466
  	return en;
  }
317e13009   Chao Yu   f2fs: kill __is_e...
467
  static void f2fs_update_extent_tree_range(struct inode *inode,
19b2c30d3   Chao Yu   f2fs: update exte...
468
  				pgoff_t fofs, block_t blkaddr, unsigned int len)
a28ef1f5a   Chao Yu   f2fs: maintain ex...
469
470
471
  {
  	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  	struct extent_tree *et = F2FS_I(inode)->extent_tree;
4d1fa815f   Fan Li   f2fs: optimize co...
472
  	struct extent_node *en = NULL, *en1 = NULL;
19b2c30d3   Chao Yu   f2fs: update exte...
473
  	struct extent_node *prev_en = NULL, *next_en = NULL;
a28ef1f5a   Chao Yu   f2fs: maintain ex...
474
  	struct extent_info ei, dei, prev;
0f825ee6e   Fan Li   f2fs: add new int...
475
  	struct rb_node **insert_p = NULL, *insert_parent = NULL;
19b2c30d3   Chao Yu   f2fs: update exte...
476
477
  	unsigned int end = fofs + len;
  	unsigned int pos = (unsigned int)fofs;
b430f7263   Zhikang Zhang   f2fs: avoid sleep...
478
  	bool updated = false;
f9aa52a8c   Chao Yu   f2fs: fix to init...
479
  	bool leftmost = false;
a28ef1f5a   Chao Yu   f2fs: maintain ex...
480
481
  
  	if (!et)
317e13009   Chao Yu   f2fs: kill __is_e...
482
  		return;
a28ef1f5a   Chao Yu   f2fs: maintain ex...
483

744288c72   Chao Yu   f2fs: trace in ba...
484
  	trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, len);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
485
  	write_lock(&et->lock);
91942321e   Jaegeuk Kim   f2fs: use inode p...
486
  	if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
a28ef1f5a   Chao Yu   f2fs: maintain ex...
487
  		write_unlock(&et->lock);
317e13009   Chao Yu   f2fs: kill __is_e...
488
  		return;
a28ef1f5a   Chao Yu   f2fs: maintain ex...
489
490
491
492
  	}
  
  	prev = et->largest;
  	dei.len = 0;
4d1fa815f   Fan Li   f2fs: optimize co...
493
494
495
496
  	/*
  	 * drop largest extent before lookup, in case it's already
  	 * been shrunk from extent tree
  	 */
b430f7263   Zhikang Zhang   f2fs: avoid sleep...
497
  	__drop_largest_extent(et, fofs, len);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
498

19b2c30d3   Chao Yu   f2fs: update exte...
499
  	/* 1. lookup first extent node in range [fofs, fofs + len - 1] */
4d57b86dd   Chao Yu   f2fs: clean up sy...
500
  	en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root,
54c2258cd   Chao Yu   f2fs: extract rb-...
501
502
503
  					(struct rb_entry *)et->cached_en, fofs,
  					(struct rb_entry **)&prev_en,
  					(struct rb_entry **)&next_en,
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
504
505
  					&insert_p, &insert_parent, false,
  					&leftmost);
4d1fa815f   Fan Li   f2fs: optimize co...
506
507
  	if (!en)
  		en = next_en;
19b2c30d3   Chao Yu   f2fs: update exte...
508
509
  
  	/* 2. invlidate all extent nodes in range [fofs, fofs + len - 1] */
4d1fa815f   Fan Li   f2fs: optimize co...
510
511
512
  	while (en && en->ei.fofs < end) {
  		unsigned int org_end;
  		int parts = 0;	/* # of parts current extent split into */
19b2c30d3   Chao Yu   f2fs: update exte...
513

4d1fa815f   Fan Li   f2fs: optimize co...
514
  		next_en = en1 = NULL;
19b2c30d3   Chao Yu   f2fs: update exte...
515
516
  
  		dei = en->ei;
4d1fa815f   Fan Li   f2fs: optimize co...
517
518
  		org_end = dei.fofs + dei.len;
  		f2fs_bug_on(sbi, pos >= org_end);
19b2c30d3   Chao Yu   f2fs: update exte...
519

4d1fa815f   Fan Li   f2fs: optimize co...
520
521
522
523
524
  		if (pos > dei.fofs &&	pos - dei.fofs >= F2FS_MIN_EXTENT_LEN) {
  			en->ei.len = pos - en->ei.fofs;
  			prev_en = en;
  			parts = 1;
  		}
19b2c30d3   Chao Yu   f2fs: update exte...
525

4d1fa815f   Fan Li   f2fs: optimize co...
526
527
528
529
530
  		if (end < org_end && org_end - end >= F2FS_MIN_EXTENT_LEN) {
  			if (parts) {
  				set_extent_info(&ei, end,
  						end - dei.fofs + dei.blk,
  						org_end - end);
b430f7263   Zhikang Zhang   f2fs: avoid sleep...
531
  				en1 = __insert_extent_tree(sbi, et, &ei,
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
532
  							NULL, NULL, true);
4d1fa815f   Fan Li   f2fs: optimize co...
533
534
535
536
537
538
  				next_en = en1;
  			} else {
  				en->ei.fofs = end;
  				en->ei.blk += end - dei.fofs;
  				en->ei.len -= end - dei.fofs;
  				next_en = en;
19b2c30d3   Chao Yu   f2fs: update exte...
539
  			}
4d1fa815f   Fan Li   f2fs: optimize co...
540
  			parts++;
19b2c30d3   Chao Yu   f2fs: update exte...
541
  		}
4d1fa815f   Fan Li   f2fs: optimize co...
542
543
  		if (!next_en) {
  			struct rb_node *node = rb_next(&en->rb_node);
19b2c30d3   Chao Yu   f2fs: update exte...
544

ed0b56209   Geliang Tang   f2fs: use rb_entr...
545
546
  			next_en = rb_entry_safe(node, struct extent_node,
  						rb_node);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
547
  		}
4abd3f5ac   Chao Yu   f2fs: introduce _...
548
  		if (parts)
b430f7263   Zhikang Zhang   f2fs: avoid sleep...
549
  			__try_update_largest_extent(et, en);
4abd3f5ac   Chao Yu   f2fs: introduce _...
550
  		else
a03f01f26   Hou Pengyang   f2fs: reconstruct...
551
  			__release_extent_node(sbi, et, en);
19b2c30d3   Chao Yu   f2fs: update exte...
552
553
  
  		/*
4d1fa815f   Fan Li   f2fs: optimize co...
554
555
556
  		 * if original extent is split into zero or two parts, extent
  		 * tree has been altered by deletion or insertion, therefore
  		 * invalidate pointers regard to tree.
19b2c30d3   Chao Yu   f2fs: update exte...
557
  		 */
4d1fa815f   Fan Li   f2fs: optimize co...
558
559
560
  		if (parts != 1) {
  			insert_p = NULL;
  			insert_parent = NULL;
a28ef1f5a   Chao Yu   f2fs: maintain ex...
561
  		}
4d1fa815f   Fan Li   f2fs: optimize co...
562
  		en = next_en;
a28ef1f5a   Chao Yu   f2fs: maintain ex...
563
  	}
a28ef1f5a   Chao Yu   f2fs: maintain ex...
564
565
  	/* 3. update extent in extent cache */
  	if (blkaddr) {
19b2c30d3   Chao Yu   f2fs: update exte...
566
567
  
  		set_extent_info(&ei, fofs, blkaddr, len);
b430f7263   Zhikang Zhang   f2fs: avoid sleep...
568
569
  		if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
  			__insert_extent_tree(sbi, et, &ei,
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
570
  					insert_p, insert_parent, leftmost);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
571
572
573
574
575
  
  		/* give up extent_cache, if split and small updates happen */
  		if (dei.len >= 1 &&
  				prev.len < F2FS_MIN_EXTENT_LEN &&
  				et->largest.len < F2FS_MIN_EXTENT_LEN) {
b430f7263   Zhikang Zhang   f2fs: avoid sleep...
576
577
  			et->largest.len = 0;
  			et->largest_updated = true;
91942321e   Jaegeuk Kim   f2fs: use inode p...
578
  			set_inode_flag(inode, FI_NO_EXTENT);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
579
  		}
19b2c30d3   Chao Yu   f2fs: update exte...
580
  	}
a28ef1f5a   Chao Yu   f2fs: maintain ex...
581

91942321e   Jaegeuk Kim   f2fs: use inode p...
582
  	if (is_inode_flag_set(inode, FI_NO_EXTENT))
201ef5e08   Hou Pengyang   f2fs: improve shr...
583
  		__free_extent_tree(sbi, et);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
584

b430f7263   Zhikang Zhang   f2fs: avoid sleep...
585
586
587
588
  	if (et->largest_updated) {
  		et->largest_updated = false;
  		updated = true;
  	}
a28ef1f5a   Chao Yu   f2fs: maintain ex...
589
  	write_unlock(&et->lock);
b430f7263   Zhikang Zhang   f2fs: avoid sleep...
590
591
592
  
  	if (updated)
  		f2fs_mark_inode_dirty_sync(inode, true);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
593
594
595
596
  }
  
  unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
  {
137d09f00   Jaegeuk Kim   f2fs: introduce z...
597
  	struct extent_tree *et, *next;
201ef5e08   Hou Pengyang   f2fs: improve shr...
598
  	struct extent_node *en;
a28ef1f5a   Chao Yu   f2fs: maintain ex...
599
600
601
602
603
  	unsigned int node_cnt = 0, tree_cnt = 0;
  	int remained;
  
  	if (!test_opt(sbi, EXTENT_CACHE))
  		return 0;
74fd8d992   Jaegeuk Kim   f2fs: speed up sh...
604
605
  	if (!atomic_read(&sbi->total_zombie_tree))
  		goto free_node;
5e8256ac2   Yunlei He   f2fs: replace rw ...
606
  	if (!mutex_trylock(&sbi->extent_tree_lock))
a28ef1f5a   Chao Yu   f2fs: maintain ex...
607
608
609
  		goto out;
  
  	/* 1. remove unreferenced extent tree */
137d09f00   Jaegeuk Kim   f2fs: introduce z...
610
  	list_for_each_entry_safe(et, next, &sbi->zombie_list, list) {
9b72a388f   Chao Yu   f2fs: skip releas...
611
612
  		if (atomic_read(&et->node_cnt)) {
  			write_lock(&et->lock);
201ef5e08   Hou Pengyang   f2fs: improve shr...
613
  			node_cnt += __free_extent_tree(sbi, et);
9b72a388f   Chao Yu   f2fs: skip releas...
614
615
  			write_unlock(&et->lock);
  		}
201ef5e08   Hou Pengyang   f2fs: improve shr...
616
  		f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
137d09f00   Jaegeuk Kim   f2fs: introduce z...
617
618
619
620
621
622
  		list_del_init(&et->list);
  		radix_tree_delete(&sbi->extent_tree_root, et->ino);
  		kmem_cache_free(extent_tree_slab, et);
  		atomic_dec(&sbi->total_ext_tree);
  		atomic_dec(&sbi->total_zombie_tree);
  		tree_cnt++;
a28ef1f5a   Chao Yu   f2fs: maintain ex...
623

137d09f00   Jaegeuk Kim   f2fs: introduce z...
624
625
  		if (node_cnt + tree_cnt >= nr_shrink)
  			goto unlock_out;
6fe2bc956   Jaegeuk Kim   f2fs: give schedu...
626
  		cond_resched();
a28ef1f5a   Chao Yu   f2fs: maintain ex...
627
  	}
5e8256ac2   Yunlei He   f2fs: replace rw ...
628
  	mutex_unlock(&sbi->extent_tree_lock);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
629

74fd8d992   Jaegeuk Kim   f2fs: speed up sh...
630
  free_node:
a28ef1f5a   Chao Yu   f2fs: maintain ex...
631
  	/* 2. remove LRU extent entries */
5e8256ac2   Yunlei He   f2fs: replace rw ...
632
  	if (!mutex_trylock(&sbi->extent_tree_lock))
a28ef1f5a   Chao Yu   f2fs: maintain ex...
633
634
635
636
637
  		goto out;
  
  	remained = nr_shrink - (node_cnt + tree_cnt);
  
  	spin_lock(&sbi->extent_lock);
201ef5e08   Hou Pengyang   f2fs: improve shr...
638
639
  	for (; remained > 0; remained--) {
  		if (list_empty(&sbi->extent_list))
a28ef1f5a   Chao Yu   f2fs: maintain ex...
640
  			break;
201ef5e08   Hou Pengyang   f2fs: improve shr...
641
642
643
644
645
646
647
648
  		en = list_first_entry(&sbi->extent_list,
  					struct extent_node, list);
  		et = en->et;
  		if (!write_trylock(&et->lock)) {
  			/* refresh this extent node's position in extent list */
  			list_move_tail(&en->list, &sbi->extent_list);
  			continue;
  		}
a28ef1f5a   Chao Yu   f2fs: maintain ex...
649

201ef5e08   Hou Pengyang   f2fs: improve shr...
650
651
  		list_del_init(&en->list);
  		spin_unlock(&sbi->extent_lock);
9b72a388f   Chao Yu   f2fs: skip releas...
652

201ef5e08   Hou Pengyang   f2fs: improve shr...
653
  		__detach_extent_node(sbi, et, en);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
654

201ef5e08   Hou Pengyang   f2fs: improve shr...
655
656
657
  		write_unlock(&et->lock);
  		node_cnt++;
  		spin_lock(&sbi->extent_lock);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
658
  	}
201ef5e08   Hou Pengyang   f2fs: improve shr...
659
  	spin_unlock(&sbi->extent_lock);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
660
  unlock_out:
5e8256ac2   Yunlei He   f2fs: replace rw ...
661
  	mutex_unlock(&sbi->extent_tree_lock);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
662
663
664
665
666
667
668
669
670
671
672
  out:
  	trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt);
  
  	return node_cnt + tree_cnt;
  }
  
  unsigned int f2fs_destroy_extent_node(struct inode *inode)
  {
  	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  	struct extent_tree *et = F2FS_I(inode)->extent_tree;
  	unsigned int node_cnt = 0;
9b72a388f   Chao Yu   f2fs: skip releas...
673
  	if (!et || !atomic_read(&et->node_cnt))
a28ef1f5a   Chao Yu   f2fs: maintain ex...
674
675
676
  		return 0;
  
  	write_lock(&et->lock);
201ef5e08   Hou Pengyang   f2fs: improve shr...
677
  	node_cnt = __free_extent_tree(sbi, et);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
678
679
680
681
  	write_unlock(&et->lock);
  
  	return node_cnt;
  }
5f281fab9   Jaegeuk Kim   f2fs: disable ext...
682
683
684
685
  void f2fs_drop_extent_tree(struct inode *inode)
  {
  	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  	struct extent_tree *et = F2FS_I(inode)->extent_tree;
b430f7263   Zhikang Zhang   f2fs: avoid sleep...
686
  	bool updated = false;
5f281fab9   Jaegeuk Kim   f2fs: disable ext...
687

bf617f7a9   Chao Yu   f2fs: fix to chec...
688
689
  	if (!f2fs_may_extent_tree(inode))
  		return;
5f281fab9   Jaegeuk Kim   f2fs: disable ext...
690
691
692
693
  	set_inode_flag(inode, FI_NO_EXTENT);
  
  	write_lock(&et->lock);
  	__free_extent_tree(sbi, et);
b430f7263   Zhikang Zhang   f2fs: avoid sleep...
694
695
696
697
  	if (et->largest.len) {
  		et->largest.len = 0;
  		updated = true;
  	}
5f281fab9   Jaegeuk Kim   f2fs: disable ext...
698
  	write_unlock(&et->lock);
b430f7263   Zhikang Zhang   f2fs: avoid sleep...
699
700
  	if (updated)
  		f2fs_mark_inode_dirty_sync(inode, true);
5f281fab9   Jaegeuk Kim   f2fs: disable ext...
701
  }
a28ef1f5a   Chao Yu   f2fs: maintain ex...
702
703
704
705
706
707
708
709
  void f2fs_destroy_extent_tree(struct inode *inode)
  {
  	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  	struct extent_tree *et = F2FS_I(inode)->extent_tree;
  	unsigned int node_cnt = 0;
  
  	if (!et)
  		return;
68e353851   Chao Yu   f2fs: use atomic ...
710
711
  	if (inode->i_nlink && !is_bad_inode(inode) &&
  					atomic_read(&et->node_cnt)) {
5e8256ac2   Yunlei He   f2fs: replace rw ...
712
  		mutex_lock(&sbi->extent_tree_lock);
137d09f00   Jaegeuk Kim   f2fs: introduce z...
713
  		list_add_tail(&et->list, &sbi->zombie_list);
74fd8d992   Jaegeuk Kim   f2fs: speed up sh...
714
  		atomic_inc(&sbi->total_zombie_tree);
5e8256ac2   Yunlei He   f2fs: replace rw ...
715
  		mutex_unlock(&sbi->extent_tree_lock);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
716
717
718
719
720
721
722
  		return;
  	}
  
  	/* free all extent info belong to this extent tree */
  	node_cnt = f2fs_destroy_extent_node(inode);
  
  	/* delete extent tree entry in radix tree */
5e8256ac2   Yunlei He   f2fs: replace rw ...
723
  	mutex_lock(&sbi->extent_tree_lock);
68e353851   Chao Yu   f2fs: use atomic ...
724
  	f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
a28ef1f5a   Chao Yu   f2fs: maintain ex...
725
726
  	radix_tree_delete(&sbi->extent_tree_root, inode->i_ino);
  	kmem_cache_free(extent_tree_slab, et);
7441ccef3   Jaegeuk Kim   f2fs: use atomic ...
727
  	atomic_dec(&sbi->total_ext_tree);
5e8256ac2   Yunlei He   f2fs: replace rw ...
728
  	mutex_unlock(&sbi->extent_tree_lock);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
  
  	F2FS_I(inode)->extent_tree = NULL;
  
  	trace_f2fs_destroy_extent_tree(inode, node_cnt);
  }
  
  bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
  					struct extent_info *ei)
  {
  	if (!f2fs_may_extent_tree(inode))
  		return false;
  
  	return f2fs_lookup_extent_tree(inode, pgofs, ei);
  }
  
  void f2fs_update_extent_cache(struct dnode_of_data *dn)
  {
a28ef1f5a   Chao Yu   f2fs: maintain ex...
746
  	pgoff_t fofs;
f28b3434a   Chao Yu   f2fs: introduce f...
747
  	block_t blkaddr;
a28ef1f5a   Chao Yu   f2fs: maintain ex...
748
749
750
  
  	if (!f2fs_may_extent_tree(dn->inode))
  		return;
f28b3434a   Chao Yu   f2fs: introduce f...
751
752
753
754
  	if (dn->data_blkaddr == NEW_ADDR)
  		blkaddr = NULL_ADDR;
  	else
  		blkaddr = dn->data_blkaddr;
19b2c30d3   Chao Yu   f2fs: update exte...
755

4d57b86dd   Chao Yu   f2fs: clean up sy...
756
  	fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
81ca7350c   Chao Yu   f2fs: remove unne...
757
  								dn->ofs_in_node;
ee6d182f2   Jaegeuk Kim   f2fs: remove sync...
758
  	f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, 1);
19b2c30d3   Chao Yu   f2fs: update exte...
759
760
761
762
763
764
765
766
  }
  
  void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
  				pgoff_t fofs, block_t blkaddr, unsigned int len)
  
  {
  	if (!f2fs_may_extent_tree(dn->inode))
  		return;
ee6d182f2   Jaegeuk Kim   f2fs: remove sync...
767
  	f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, len);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
768
  }
4d57b86dd   Chao Yu   f2fs: clean up sy...
769
  void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi)
a28ef1f5a   Chao Yu   f2fs: maintain ex...
770
771
  {
  	INIT_RADIX_TREE(&sbi->extent_tree_root, GFP_NOIO);
5e8256ac2   Yunlei He   f2fs: replace rw ...
772
  	mutex_init(&sbi->extent_tree_lock);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
773
774
  	INIT_LIST_HEAD(&sbi->extent_list);
  	spin_lock_init(&sbi->extent_lock);
7441ccef3   Jaegeuk Kim   f2fs: use atomic ...
775
  	atomic_set(&sbi->total_ext_tree, 0);
137d09f00   Jaegeuk Kim   f2fs: introduce z...
776
  	INIT_LIST_HEAD(&sbi->zombie_list);
74fd8d992   Jaegeuk Kim   f2fs: speed up sh...
777
  	atomic_set(&sbi->total_zombie_tree, 0);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
778
779
  	atomic_set(&sbi->total_ext_node, 0);
  }
4d57b86dd   Chao Yu   f2fs: clean up sy...
780
  int __init f2fs_create_extent_cache(void)
a28ef1f5a   Chao Yu   f2fs: maintain ex...
781
782
783
784
785
786
787
788
789
790
791
792
793
  {
  	extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree",
  			sizeof(struct extent_tree));
  	if (!extent_tree_slab)
  		return -ENOMEM;
  	extent_node_slab = f2fs_kmem_cache_create("f2fs_extent_node",
  			sizeof(struct extent_node));
  	if (!extent_node_slab) {
  		kmem_cache_destroy(extent_tree_slab);
  		return -ENOMEM;
  	}
  	return 0;
  }
4d57b86dd   Chao Yu   f2fs: clean up sy...
794
  void f2fs_destroy_extent_cache(void)
a28ef1f5a   Chao Yu   f2fs: maintain ex...
795
796
797
798
  {
  	kmem_cache_destroy(extent_node_slab);
  	kmem_cache_destroy(extent_tree_slab);
  }