Blame view

fs/f2fs/extent_cache.c 20.5 KB
7c1a000d4   Chao Yu   f2fs: add SPDX li...
1
  // SPDX-License-Identifier: GPL-2.0
a28ef1f5a   Chao Yu   f2fs: maintain ex...
2
3
4
5
6
7
8
  /*
   * f2fs extent cache support
   *
   * Copyright (c) 2015 Motorola Mobility
   * Copyright (c) 2015 Samsung Electronics
   * Authors: Jaegeuk Kim <jaegeuk@kernel.org>
   *          Chao Yu <chao2.yu@samsung.com>
a28ef1f5a   Chao Yu   f2fs: maintain ex...
9
10
11
12
13
14
15
16
   */
  
  #include <linux/fs.h>
  #include <linux/f2fs_fs.h>
  
  #include "f2fs.h"
  #include "node.h"
  #include <trace/events/f2fs.h>
54c2258cd   Chao Yu   f2fs: extract rb-...
17
18
19
20
21
22
23
24
25
26
27
  static struct rb_entry *__lookup_rb_tree_fast(struct rb_entry *cached_re,
  							unsigned int ofs)
  {
  	if (cached_re) {
  		if (cached_re->ofs <= ofs &&
  				cached_re->ofs + cached_re->len > ofs) {
  			return cached_re;
  		}
  	}
  	return NULL;
  }
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
28
  static struct rb_entry *__lookup_rb_tree_slow(struct rb_root_cached *root,
54c2258cd   Chao Yu   f2fs: extract rb-...
29
30
  							unsigned int ofs)
  {
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
31
  	struct rb_node *node = root->rb_root.rb_node;
54c2258cd   Chao Yu   f2fs: extract rb-...
32
33
34
35
36
37
38
39
40
41
42
43
44
45
  	struct rb_entry *re;
  
  	while (node) {
  		re = rb_entry(node, struct rb_entry, rb_node);
  
  		if (ofs < re->ofs)
  			node = node->rb_left;
  		else if (ofs >= re->ofs + re->len)
  			node = node->rb_right;
  		else
  			return re;
  	}
  	return NULL;
  }
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
46
  struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root,
54c2258cd   Chao Yu   f2fs: extract rb-...
47
48
49
50
51
52
53
54
55
56
  				struct rb_entry *cached_re, unsigned int ofs)
  {
  	struct rb_entry *re;
  
  	re = __lookup_rb_tree_fast(cached_re, ofs);
  	if (!re)
  		return __lookup_rb_tree_slow(root, ofs);
  
  	return re;
  }
2e9b2bb25   Chao Yu   f2fs: support 64-...
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
  struct rb_node **f2fs_lookup_rb_tree_ext(struct f2fs_sb_info *sbi,
  					struct rb_root_cached *root,
  					struct rb_node **parent,
  					unsigned long long key, bool *leftmost)
  {
  	struct rb_node **p = &root->rb_root.rb_node;
  	struct rb_entry *re;
  
  	while (*p) {
  		*parent = *p;
  		re = rb_entry(*parent, struct rb_entry, rb_node);
  
  		if (key < re->key) {
  			p = &(*p)->rb_left;
  		} else {
  			p = &(*p)->rb_right;
  			*leftmost = false;
  		}
  	}
  
  	return p;
  }
4d57b86dd   Chao Yu   f2fs: clean up sy...
79
  struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
80
81
82
  				struct rb_root_cached *root,
  				struct rb_node **parent,
  				unsigned int ofs, bool *leftmost)
54c2258cd   Chao Yu   f2fs: extract rb-...
83
  {
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
84
  	struct rb_node **p = &root->rb_root.rb_node;
54c2258cd   Chao Yu   f2fs: extract rb-...
85
86
87
88
89
  	struct rb_entry *re;
  
  	while (*p) {
  		*parent = *p;
  		re = rb_entry(*parent, struct rb_entry, rb_node);
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
90
  		if (ofs < re->ofs) {
54c2258cd   Chao Yu   f2fs: extract rb-...
91
  			p = &(*p)->rb_left;
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
92
  		} else if (ofs >= re->ofs + re->len) {
54c2258cd   Chao Yu   f2fs: extract rb-...
93
  			p = &(*p)->rb_right;
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
94
95
  			*leftmost = false;
  		} else {
54c2258cd   Chao Yu   f2fs: extract rb-...
96
  			f2fs_bug_on(sbi, 1);
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
97
  		}
54c2258cd   Chao Yu   f2fs: extract rb-...
98
99
100
101
102
103
104
105
106
107
108
109
110
111
  	}
  
  	return p;
  }
  
  /*
   * lookup rb entry in position of @ofs in rb-tree,
   * if hit, return the entry, otherwise, return NULL
   * @prev_ex: extent before ofs
   * @next_ex: extent after ofs
   * @insert_p: insert point for new extent at ofs
   * in order to simpfy the insertion after.
   * tree must stay unchanged between lookup and insertion.
   */
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
112
  struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
54c2258cd   Chao Yu   f2fs: extract rb-...
113
114
115
116
117
  				struct rb_entry *cached_re,
  				unsigned int ofs,
  				struct rb_entry **prev_entry,
  				struct rb_entry **next_entry,
  				struct rb_node ***insert_p,
004b68621   Chao Yu   f2fs: use rb-tree...
118
  				struct rb_node **insert_parent,
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
119
  				bool force, bool *leftmost)
54c2258cd   Chao Yu   f2fs: extract rb-...
120
  {
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
121
  	struct rb_node **pnode = &root->rb_root.rb_node;
54c2258cd   Chao Yu   f2fs: extract rb-...
122
123
124
125
126
127
128
  	struct rb_node *parent = NULL, *tmp_node;
  	struct rb_entry *re = cached_re;
  
  	*insert_p = NULL;
  	*insert_parent = NULL;
  	*prev_entry = NULL;
  	*next_entry = NULL;
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
129
  	if (RB_EMPTY_ROOT(&root->rb_root))
54c2258cd   Chao Yu   f2fs: extract rb-...
130
131
132
133
134
135
  		return NULL;
  
  	if (re) {
  		if (re->ofs <= ofs && re->ofs + re->len > ofs)
  			goto lookup_neighbors;
  	}
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
136
137
  	if (leftmost)
  		*leftmost = true;
54c2258cd   Chao Yu   f2fs: extract rb-...
138
139
140
  	while (*pnode) {
  		parent = *pnode;
  		re = rb_entry(*pnode, struct rb_entry, rb_node);
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
141
  		if (ofs < re->ofs) {
54c2258cd   Chao Yu   f2fs: extract rb-...
142
  			pnode = &(*pnode)->rb_left;
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
143
  		} else if (ofs >= re->ofs + re->len) {
54c2258cd   Chao Yu   f2fs: extract rb-...
144
  			pnode = &(*pnode)->rb_right;
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
145
146
147
  			if (leftmost)
  				*leftmost = false;
  		} else {
54c2258cd   Chao Yu   f2fs: extract rb-...
148
  			goto lookup_neighbors;
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
149
  		}
54c2258cd   Chao Yu   f2fs: extract rb-...
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
  	}
  
  	*insert_p = pnode;
  	*insert_parent = parent;
  
  	re = rb_entry(parent, struct rb_entry, rb_node);
  	tmp_node = parent;
  	if (parent && ofs > re->ofs)
  		tmp_node = rb_next(parent);
  	*next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
  
  	tmp_node = parent;
  	if (parent && ofs < re->ofs)
  		tmp_node = rb_prev(parent);
  	*prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
  	return NULL;
  
  lookup_neighbors:
004b68621   Chao Yu   f2fs: use rb-tree...
168
  	if (ofs == re->ofs || force) {
54c2258cd   Chao Yu   f2fs: extract rb-...
169
170
171
172
  		/* lookup prev node for merging backward later */
  		tmp_node = rb_prev(&re->rb_node);
  		*prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
  	}
004b68621   Chao Yu   f2fs: use rb-tree...
173
  	if (ofs == re->ofs + re->len - 1 || force) {
54c2258cd   Chao Yu   f2fs: extract rb-...
174
175
176
177
178
179
  		/* lookup next node for merging frontward later */
  		tmp_node = rb_next(&re->rb_node);
  		*next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
  	}
  	return re;
  }
4d57b86dd   Chao Yu   f2fs: clean up sy...
180
  bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
2e9b2bb25   Chao Yu   f2fs: support 64-...
181
  				struct rb_root_cached *root, bool check_key)
df0f6b44d   Chao Yu   f2fs: introduce _...
182
183
  {
  #ifdef CONFIG_F2FS_CHECK_FS
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
184
  	struct rb_node *cur = rb_first_cached(root), *next;
df0f6b44d   Chao Yu   f2fs: introduce _...
185
186
187
188
189
190
191
192
193
194
195
196
  	struct rb_entry *cur_re, *next_re;
  
  	if (!cur)
  		return true;
  
  	while (cur) {
  		next = rb_next(cur);
  		if (!next)
  			return true;
  
  		cur_re = rb_entry(cur, struct rb_entry, rb_node);
  		next_re = rb_entry(next, struct rb_entry, rb_node);
2e9b2bb25   Chao Yu   f2fs: support 64-...
197
198
199
200
201
202
203
204
205
  		if (check_key) {
  			if (cur_re->key > next_re->key) {
  				f2fs_info(sbi, "inconsistent rbtree, "
  					"cur(%llu) next(%llu)",
  					cur_re->key, next_re->key);
  				return false;
  			}
  			goto next;
  		}
df0f6b44d   Chao Yu   f2fs: introduce _...
206
  		if (cur_re->ofs + cur_re->len > next_re->ofs) {
dcbb4c10e   Joe Perches   f2fs: introduce f...
207
208
209
  			f2fs_info(sbi, "inconsistent rbtree, cur(%u, %u) next(%u, %u)",
  				  cur_re->ofs, cur_re->len,
  				  next_re->ofs, next_re->len);
df0f6b44d   Chao Yu   f2fs: introduce _...
210
211
  			return false;
  		}
2e9b2bb25   Chao Yu   f2fs: support 64-...
212
  next:
df0f6b44d   Chao Yu   f2fs: introduce _...
213
214
215
216
217
  		cur = next;
  	}
  #endif
  	return true;
  }
a28ef1f5a   Chao Yu   f2fs: maintain ex...
218
219
220
221
222
  static struct kmem_cache *extent_tree_slab;
  static struct kmem_cache *extent_node_slab;
  
  static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
  				struct extent_tree *et, struct extent_info *ei,
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
223
224
  				struct rb_node *parent, struct rb_node **p,
  				bool leftmost)
a28ef1f5a   Chao Yu   f2fs: maintain ex...
225
226
227
228
229
230
231
232
233
  {
  	struct extent_node *en;
  
  	en = kmem_cache_alloc(extent_node_slab, GFP_ATOMIC);
  	if (!en)
  		return NULL;
  
  	en->ei = *ei;
  	INIT_LIST_HEAD(&en->list);
201ef5e08   Hou Pengyang   f2fs: improve shr...
234
  	en->et = et;
a28ef1f5a   Chao Yu   f2fs: maintain ex...
235
236
  
  	rb_link_node(&en->rb_node, parent, p);
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
237
  	rb_insert_color_cached(&en->rb_node, &et->root, leftmost);
68e353851   Chao Yu   f2fs: use atomic ...
238
  	atomic_inc(&et->node_cnt);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
239
240
241
242
243
244
245
  	atomic_inc(&sbi->total_ext_node);
  	return en;
  }
  
  static void __detach_extent_node(struct f2fs_sb_info *sbi,
  				struct extent_tree *et, struct extent_node *en)
  {
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
246
  	rb_erase_cached(&en->rb_node, &et->root);
68e353851   Chao Yu   f2fs: use atomic ...
247
  	atomic_dec(&et->node_cnt);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
248
249
250
251
  	atomic_dec(&sbi->total_ext_node);
  
  	if (et->cached_en == en)
  		et->cached_en = NULL;
a03f01f26   Hou Pengyang   f2fs: reconstruct...
252
253
254
255
256
257
258
259
260
261
262
263
264
  	kmem_cache_free(extent_node_slab, en);
  }
  
  /*
   * Flow to release an extent_node:
   * 1. list_del_init
   * 2. __detach_extent_node
   * 3. kmem_cache_free.
   */
  static void __release_extent_node(struct f2fs_sb_info *sbi,
  			struct extent_tree *et, struct extent_node *en)
  {
  	spin_lock(&sbi->extent_lock);
201ef5e08   Hou Pengyang   f2fs: improve shr...
265
266
  	f2fs_bug_on(sbi, list_empty(&en->list));
  	list_del_init(&en->list);
a03f01f26   Hou Pengyang   f2fs: reconstruct...
267
268
269
  	spin_unlock(&sbi->extent_lock);
  
  	__detach_extent_node(sbi, et, en);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
270
271
272
273
274
275
276
  }
  
  static struct extent_tree *__grab_extent_tree(struct inode *inode)
  {
  	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  	struct extent_tree *et;
  	nid_t ino = inode->i_ino;
5e8256ac2   Yunlei He   f2fs: replace rw ...
277
  	mutex_lock(&sbi->extent_tree_lock);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
278
279
280
281
282
283
  	et = radix_tree_lookup(&sbi->extent_tree_root, ino);
  	if (!et) {
  		et = f2fs_kmem_cache_alloc(extent_tree_slab, GFP_NOFS);
  		f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et);
  		memset(et, 0, sizeof(struct extent_tree));
  		et->ino = ino;
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
284
  		et->root = RB_ROOT_CACHED;
a28ef1f5a   Chao Yu   f2fs: maintain ex...
285
286
  		et->cached_en = NULL;
  		rwlock_init(&et->lock);
137d09f00   Jaegeuk Kim   f2fs: introduce z...
287
  		INIT_LIST_HEAD(&et->list);
68e353851   Chao Yu   f2fs: use atomic ...
288
  		atomic_set(&et->node_cnt, 0);
7441ccef3   Jaegeuk Kim   f2fs: use atomic ...
289
  		atomic_inc(&sbi->total_ext_tree);
74fd8d992   Jaegeuk Kim   f2fs: speed up sh...
290
291
  	} else {
  		atomic_dec(&sbi->total_zombie_tree);
137d09f00   Jaegeuk Kim   f2fs: introduce z...
292
  		list_del_init(&et->list);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
293
  	}
5e8256ac2   Yunlei He   f2fs: replace rw ...
294
  	mutex_unlock(&sbi->extent_tree_lock);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
295
296
297
298
299
300
  
  	/* never died until evict_inode */
  	F2FS_I(inode)->extent_tree = et;
  
  	return et;
  }
a6f783459   Chao Yu   f2fs: kill dead c...
301
302
  static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
  				struct extent_tree *et, struct extent_info *ei)
a28ef1f5a   Chao Yu   f2fs: maintain ex...
303
  {
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
304
  	struct rb_node **p = &et->root.rb_root.rb_node;
a28ef1f5a   Chao Yu   f2fs: maintain ex...
305
  	struct extent_node *en;
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
306
  	en = __attach_extent_node(sbi, et, ei, NULL, p, true);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
307
308
  	if (!en)
  		return NULL;
a6f783459   Chao Yu   f2fs: kill dead c...
309
310
  
  	et->largest = en->ei;
a28ef1f5a   Chao Yu   f2fs: maintain ex...
311
312
313
314
315
  	et->cached_en = en;
  	return en;
  }
  
  static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
201ef5e08   Hou Pengyang   f2fs: improve shr...
316
  					struct extent_tree *et)
a28ef1f5a   Chao Yu   f2fs: maintain ex...
317
318
319
  {
  	struct rb_node *node, *next;
  	struct extent_node *en;
68e353851   Chao Yu   f2fs: use atomic ...
320
  	unsigned int count = atomic_read(&et->node_cnt);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
321

4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
322
  	node = rb_first_cached(&et->root);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
323
324
325
  	while (node) {
  		next = rb_next(node);
  		en = rb_entry(node, struct extent_node, rb_node);
201ef5e08   Hou Pengyang   f2fs: improve shr...
326
  		__release_extent_node(sbi, et, en);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
327
328
  		node = next;
  	}
68e353851   Chao Yu   f2fs: use atomic ...
329
  	return count - atomic_read(&et->node_cnt);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
330
  }
b430f7263   Zhikang Zhang   f2fs: avoid sleep...
331
  static void __drop_largest_extent(struct extent_tree *et,
41a099de3   Fan Li   f2fs: drop larges...
332
  					pgoff_t fofs, unsigned int len)
a28ef1f5a   Chao Yu   f2fs: maintain ex...
333
  {
b430f7263   Zhikang Zhang   f2fs: avoid sleep...
334
335
336
337
  	if (fofs < et->largest.fofs + et->largest.len &&
  			fofs + len > et->largest.fofs) {
  		et->largest.len = 0;
  		et->largest_updated = true;
205b98221   Jaegeuk Kim   f2fs: call mark_i...
338
  	}
a28ef1f5a   Chao Yu   f2fs: maintain ex...
339
  }
ed3d12561   Jaegeuk Kim   f2fs: load larges...
340
  /* return true, if inode page is changed */
a6d601f30   Chao Yu   f2fs: fix to wait...
341
  static void __f2fs_init_extent_tree(struct inode *inode, struct page *ipage)
a28ef1f5a   Chao Yu   f2fs: maintain ex...
342
343
  {
  	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
a6d601f30   Chao Yu   f2fs: fix to wait...
344
  	struct f2fs_extent *i_ext = ipage ? &F2FS_INODE(ipage)->i_ext : NULL;
a28ef1f5a   Chao Yu   f2fs: maintain ex...
345
346
347
  	struct extent_tree *et;
  	struct extent_node *en;
  	struct extent_info ei;
ed3d12561   Jaegeuk Kim   f2fs: load larges...
348
349
350
  	if (!f2fs_may_extent_tree(inode)) {
  		/* drop largest extent */
  		if (i_ext && i_ext->len) {
a6d601f30   Chao Yu   f2fs: fix to wait...
351
  			f2fs_wait_on_page_writeback(ipage, NODE, true, true);
ed3d12561   Jaegeuk Kim   f2fs: load larges...
352
  			i_ext->len = 0;
a6d601f30   Chao Yu   f2fs: fix to wait...
353
354
  			set_page_dirty(ipage);
  			return;
ed3d12561   Jaegeuk Kim   f2fs: load larges...
355
  		}
a6d601f30   Chao Yu   f2fs: fix to wait...
356
  		return;
ed3d12561   Jaegeuk Kim   f2fs: load larges...
357
  	}
a28ef1f5a   Chao Yu   f2fs: maintain ex...
358
359
  
  	et = __grab_extent_tree(inode);
ed3d12561   Jaegeuk Kim   f2fs: load larges...
360
  	if (!i_ext || !i_ext->len)
a6d601f30   Chao Yu   f2fs: fix to wait...
361
  		return;
a28ef1f5a   Chao Yu   f2fs: maintain ex...
362

bd933d4fa   Chao Yu   f2fs: reuse get_e...
363
  	get_extent_info(&ei, i_ext);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
364
365
  
  	write_lock(&et->lock);
68e353851   Chao Yu   f2fs: use atomic ...
366
  	if (atomic_read(&et->node_cnt))
a28ef1f5a   Chao Yu   f2fs: maintain ex...
367
  		goto out;
a6f783459   Chao Yu   f2fs: kill dead c...
368
  	en = __init_extent_tree(sbi, et, &ei);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
369
370
371
372
373
374
375
376
  	if (en) {
  		spin_lock(&sbi->extent_lock);
  		list_add_tail(&en->list, &sbi->extent_list);
  		spin_unlock(&sbi->extent_lock);
  	}
  out:
  	write_unlock(&et->lock);
  }
a6d601f30   Chao Yu   f2fs: fix to wait...
377
  void f2fs_init_extent_tree(struct inode *inode, struct page *ipage)
dad48e731   Yunlei He   f2fs: fix a bug c...
378
  {
a6d601f30   Chao Yu   f2fs: fix to wait...
379
  	__f2fs_init_extent_tree(inode, ipage);
dad48e731   Yunlei He   f2fs: fix a bug c...
380
381
382
  
  	if (!F2FS_I(inode)->extent_tree)
  		set_inode_flag(inode, FI_NO_EXTENT);
dad48e731   Yunlei He   f2fs: fix a bug c...
383
  }
a28ef1f5a   Chao Yu   f2fs: maintain ex...
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
  static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
  							struct extent_info *ei)
  {
  	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  	struct extent_tree *et = F2FS_I(inode)->extent_tree;
  	struct extent_node *en;
  	bool ret = false;
  
  	f2fs_bug_on(sbi, !et);
  
  	trace_f2fs_lookup_extent_tree_start(inode, pgofs);
  
  	read_lock(&et->lock);
  
  	if (et->largest.fofs <= pgofs &&
  			et->largest.fofs + et->largest.len > pgofs) {
  		*ei = et->largest;
  		ret = true;
91c481fff   Chao Yu   f2fs: add largest...
402
  		stat_inc_largest_node_hit(sbi);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
403
404
  		goto out;
  	}
4d57b86dd   Chao Yu   f2fs: clean up sy...
405
  	en = (struct extent_node *)f2fs_lookup_rb_tree(&et->root,
54c2258cd   Chao Yu   f2fs: extract rb-...
406
407
408
409
410
411
412
413
414
415
416
417
418
419
  				(struct rb_entry *)et->cached_en, pgofs);
  	if (!en)
  		goto out;
  
  	if (en == et->cached_en)
  		stat_inc_cached_node_hit(sbi);
  	else
  		stat_inc_rbtree_node_hit(sbi);
  
  	*ei = en->ei;
  	spin_lock(&sbi->extent_lock);
  	if (!list_empty(&en->list)) {
  		list_move_tail(&en->list, &sbi->extent_list);
  		et->cached_en = en;
a28ef1f5a   Chao Yu   f2fs: maintain ex...
420
  	}
54c2258cd   Chao Yu   f2fs: extract rb-...
421
422
  	spin_unlock(&sbi->extent_lock);
  	ret = true;
a28ef1f5a   Chao Yu   f2fs: maintain ex...
423
  out:
727edac57   Chao Yu   f2fs: use atomic_...
424
  	stat_inc_total_hit(sbi);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
425
426
427
428
429
  	read_unlock(&et->lock);
  
  	trace_f2fs_lookup_extent_tree_end(inode, pgofs, ei);
  	return ret;
  }
b430f7263   Zhikang Zhang   f2fs: avoid sleep...
430
  static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
0f825ee6e   Fan Li   f2fs: add new int...
431
  				struct extent_tree *et, struct extent_info *ei,
0f825ee6e   Fan Li   f2fs: add new int...
432
  				struct extent_node *prev_ex,
ef05e2219   Chao Yu   f2fs: split __ins...
433
  				struct extent_node *next_ex)
0f825ee6e   Fan Li   f2fs: add new int...
434
  {
0f825ee6e   Fan Li   f2fs: add new int...
435
  	struct extent_node *en = NULL;
0f825ee6e   Fan Li   f2fs: add new int...
436
437
  
  	if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) {
0f825ee6e   Fan Li   f2fs: add new int...
438
439
440
441
  		prev_ex->ei.len += ei->len;
  		ei = &prev_ex->ei;
  		en = prev_ex;
  	}
ef05e2219   Chao Yu   f2fs: split __ins...
442

0f825ee6e   Fan Li   f2fs: add new int...
443
  	if (next_ex && __is_front_mergeable(ei, &next_ex->ei)) {
0f825ee6e   Fan Li   f2fs: add new int...
444
445
446
  		next_ex->ei.fofs = ei->fofs;
  		next_ex->ei.blk = ei->blk;
  		next_ex->ei.len += ei->len;
7855eba4d   Yunlei He   f2fs: fix a probl...
447
448
  		if (en)
  			__release_extent_node(sbi, et, prev_ex);
0f825ee6e   Fan Li   f2fs: add new int...
449
450
  		en = next_ex;
  	}
ef05e2219   Chao Yu   f2fs: split __ins...
451

43a2fa180   Jaegeuk Kim   f2fs: move extent...
452
453
  	if (!en)
  		return NULL;
b430f7263   Zhikang Zhang   f2fs: avoid sleep...
454
  	__try_update_largest_extent(et, en);
43a2fa180   Jaegeuk Kim   f2fs: move extent...
455
456
  
  	spin_lock(&sbi->extent_lock);
429267442   Jaegeuk Kim   f2fs: don't set c...
457
  	if (!list_empty(&en->list)) {
43a2fa180   Jaegeuk Kim   f2fs: move extent...
458
  		list_move_tail(&en->list, &sbi->extent_list);
429267442   Jaegeuk Kim   f2fs: don't set c...
459
460
  		et->cached_en = en;
  	}
43a2fa180   Jaegeuk Kim   f2fs: move extent...
461
  	spin_unlock(&sbi->extent_lock);
ef05e2219   Chao Yu   f2fs: split __ins...
462
463
  	return en;
  }
b430f7263   Zhikang Zhang   f2fs: avoid sleep...
464
  static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
ef05e2219   Chao Yu   f2fs: split __ins...
465
466
  				struct extent_tree *et, struct extent_info *ei,
  				struct rb_node **insert_p,
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
467
468
  				struct rb_node *insert_parent,
  				bool leftmost)
ef05e2219   Chao Yu   f2fs: split __ins...
469
  {
8fe326cb9   Colin Ian King   f2fs: remove redu...
470
  	struct rb_node **p;
ef05e2219   Chao Yu   f2fs: split __ins...
471
472
  	struct rb_node *parent = NULL;
  	struct extent_node *en = NULL;
0f825ee6e   Fan Li   f2fs: add new int...
473
474
475
476
477
478
  
  	if (insert_p && insert_parent) {
  		parent = insert_parent;
  		p = insert_p;
  		goto do_insert;
  	}
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
479
480
481
482
  	leftmost = true;
  
  	p = f2fs_lookup_rb_tree_for_insert(sbi, &et->root, &parent,
  						ei->fofs, &leftmost);
0f825ee6e   Fan Li   f2fs: add new int...
483
  do_insert:
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
484
  	en = __attach_extent_node(sbi, et, ei, parent, p, leftmost);
0f825ee6e   Fan Li   f2fs: add new int...
485
486
  	if (!en)
  		return NULL;
ef05e2219   Chao Yu   f2fs: split __ins...
487

b430f7263   Zhikang Zhang   f2fs: avoid sleep...
488
  	__try_update_largest_extent(et, en);
43a2fa180   Jaegeuk Kim   f2fs: move extent...
489
490
491
492
  
  	/* update in global extent list */
  	spin_lock(&sbi->extent_lock);
  	list_add_tail(&en->list, &sbi->extent_list);
429267442   Jaegeuk Kim   f2fs: don't set c...
493
  	et->cached_en = en;
43a2fa180   Jaegeuk Kim   f2fs: move extent...
494
  	spin_unlock(&sbi->extent_lock);
0f825ee6e   Fan Li   f2fs: add new int...
495
496
  	return en;
  }
317e13009   Chao Yu   f2fs: kill __is_e...
497
  static void f2fs_update_extent_tree_range(struct inode *inode,
19b2c30d3   Chao Yu   f2fs: update exte...
498
  				pgoff_t fofs, block_t blkaddr, unsigned int len)
a28ef1f5a   Chao Yu   f2fs: maintain ex...
499
500
501
  {
  	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  	struct extent_tree *et = F2FS_I(inode)->extent_tree;
4d1fa815f   Fan Li   f2fs: optimize co...
502
  	struct extent_node *en = NULL, *en1 = NULL;
19b2c30d3   Chao Yu   f2fs: update exte...
503
  	struct extent_node *prev_en = NULL, *next_en = NULL;
a28ef1f5a   Chao Yu   f2fs: maintain ex...
504
  	struct extent_info ei, dei, prev;
0f825ee6e   Fan Li   f2fs: add new int...
505
  	struct rb_node **insert_p = NULL, *insert_parent = NULL;
19b2c30d3   Chao Yu   f2fs: update exte...
506
507
  	unsigned int end = fofs + len;
  	unsigned int pos = (unsigned int)fofs;
b430f7263   Zhikang Zhang   f2fs: avoid sleep...
508
  	bool updated = false;
f9aa52a8c   Chao Yu   f2fs: fix to init...
509
  	bool leftmost = false;
a28ef1f5a   Chao Yu   f2fs: maintain ex...
510
511
  
  	if (!et)
317e13009   Chao Yu   f2fs: kill __is_e...
512
  		return;
a28ef1f5a   Chao Yu   f2fs: maintain ex...
513

744288c72   Chao Yu   f2fs: trace in ba...
514
  	trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, len);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
515
  	write_lock(&et->lock);
91942321e   Jaegeuk Kim   f2fs: use inode p...
516
  	if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
a28ef1f5a   Chao Yu   f2fs: maintain ex...
517
  		write_unlock(&et->lock);
317e13009   Chao Yu   f2fs: kill __is_e...
518
  		return;
a28ef1f5a   Chao Yu   f2fs: maintain ex...
519
520
521
522
  	}
  
  	prev = et->largest;
  	dei.len = 0;
4d1fa815f   Fan Li   f2fs: optimize co...
523
524
525
526
  	/*
  	 * drop largest extent before lookup, in case it's already
  	 * been shrunk from extent tree
  	 */
b430f7263   Zhikang Zhang   f2fs: avoid sleep...
527
  	__drop_largest_extent(et, fofs, len);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
528

19b2c30d3   Chao Yu   f2fs: update exte...
529
  	/* 1. lookup first extent node in range [fofs, fofs + len - 1] */
4d57b86dd   Chao Yu   f2fs: clean up sy...
530
  	en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root,
54c2258cd   Chao Yu   f2fs: extract rb-...
531
532
533
  					(struct rb_entry *)et->cached_en, fofs,
  					(struct rb_entry **)&prev_en,
  					(struct rb_entry **)&next_en,
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
534
535
  					&insert_p, &insert_parent, false,
  					&leftmost);
4d1fa815f   Fan Li   f2fs: optimize co...
536
537
  	if (!en)
  		en = next_en;
19b2c30d3   Chao Yu   f2fs: update exte...
538
539
  
  	/* 2. invlidate all extent nodes in range [fofs, fofs + len - 1] */
4d1fa815f   Fan Li   f2fs: optimize co...
540
541
542
  	while (en && en->ei.fofs < end) {
  		unsigned int org_end;
  		int parts = 0;	/* # of parts current extent split into */
19b2c30d3   Chao Yu   f2fs: update exte...
543

4d1fa815f   Fan Li   f2fs: optimize co...
544
  		next_en = en1 = NULL;
19b2c30d3   Chao Yu   f2fs: update exte...
545
546
  
  		dei = en->ei;
4d1fa815f   Fan Li   f2fs: optimize co...
547
548
  		org_end = dei.fofs + dei.len;
  		f2fs_bug_on(sbi, pos >= org_end);
19b2c30d3   Chao Yu   f2fs: update exte...
549

4d1fa815f   Fan Li   f2fs: optimize co...
550
551
552
553
554
  		if (pos > dei.fofs &&	pos - dei.fofs >= F2FS_MIN_EXTENT_LEN) {
  			en->ei.len = pos - en->ei.fofs;
  			prev_en = en;
  			parts = 1;
  		}
19b2c30d3   Chao Yu   f2fs: update exte...
555

4d1fa815f   Fan Li   f2fs: optimize co...
556
557
558
559
560
  		if (end < org_end && org_end - end >= F2FS_MIN_EXTENT_LEN) {
  			if (parts) {
  				set_extent_info(&ei, end,
  						end - dei.fofs + dei.blk,
  						org_end - end);
b430f7263   Zhikang Zhang   f2fs: avoid sleep...
561
  				en1 = __insert_extent_tree(sbi, et, &ei,
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
562
  							NULL, NULL, true);
4d1fa815f   Fan Li   f2fs: optimize co...
563
564
565
566
567
568
  				next_en = en1;
  			} else {
  				en->ei.fofs = end;
  				en->ei.blk += end - dei.fofs;
  				en->ei.len -= end - dei.fofs;
  				next_en = en;
19b2c30d3   Chao Yu   f2fs: update exte...
569
  			}
4d1fa815f   Fan Li   f2fs: optimize co...
570
  			parts++;
19b2c30d3   Chao Yu   f2fs: update exte...
571
  		}
4d1fa815f   Fan Li   f2fs: optimize co...
572
573
  		if (!next_en) {
  			struct rb_node *node = rb_next(&en->rb_node);
19b2c30d3   Chao Yu   f2fs: update exte...
574

ed0b56209   Geliang Tang   f2fs: use rb_entr...
575
576
  			next_en = rb_entry_safe(node, struct extent_node,
  						rb_node);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
577
  		}
4abd3f5ac   Chao Yu   f2fs: introduce _...
578
  		if (parts)
b430f7263   Zhikang Zhang   f2fs: avoid sleep...
579
  			__try_update_largest_extent(et, en);
4abd3f5ac   Chao Yu   f2fs: introduce _...
580
  		else
a03f01f26   Hou Pengyang   f2fs: reconstruct...
581
  			__release_extent_node(sbi, et, en);
19b2c30d3   Chao Yu   f2fs: update exte...
582
583
  
  		/*
4d1fa815f   Fan Li   f2fs: optimize co...
584
585
586
  		 * if original extent is split into zero or two parts, extent
  		 * tree has been altered by deletion or insertion, therefore
  		 * invalidate pointers regard to tree.
19b2c30d3   Chao Yu   f2fs: update exte...
587
  		 */
4d1fa815f   Fan Li   f2fs: optimize co...
588
589
590
  		if (parts != 1) {
  			insert_p = NULL;
  			insert_parent = NULL;
a28ef1f5a   Chao Yu   f2fs: maintain ex...
591
  		}
4d1fa815f   Fan Li   f2fs: optimize co...
592
  		en = next_en;
a28ef1f5a   Chao Yu   f2fs: maintain ex...
593
  	}
a28ef1f5a   Chao Yu   f2fs: maintain ex...
594
595
  	/* 3. update extent in extent cache */
  	if (blkaddr) {
19b2c30d3   Chao Yu   f2fs: update exte...
596
597
  
  		set_extent_info(&ei, fofs, blkaddr, len);
b430f7263   Zhikang Zhang   f2fs: avoid sleep...
598
599
  		if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
  			__insert_extent_tree(sbi, et, &ei,
4dada3fd7   Chao Yu   f2fs: use rb_*_ca...
600
  					insert_p, insert_parent, leftmost);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
601
602
603
604
605
  
  		/* give up extent_cache, if split and small updates happen */
  		if (dei.len >= 1 &&
  				prev.len < F2FS_MIN_EXTENT_LEN &&
  				et->largest.len < F2FS_MIN_EXTENT_LEN) {
b430f7263   Zhikang Zhang   f2fs: avoid sleep...
606
607
  			et->largest.len = 0;
  			et->largest_updated = true;
91942321e   Jaegeuk Kim   f2fs: use inode p...
608
  			set_inode_flag(inode, FI_NO_EXTENT);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
609
  		}
19b2c30d3   Chao Yu   f2fs: update exte...
610
  	}
a28ef1f5a   Chao Yu   f2fs: maintain ex...
611

91942321e   Jaegeuk Kim   f2fs: use inode p...
612
  	if (is_inode_flag_set(inode, FI_NO_EXTENT))
201ef5e08   Hou Pengyang   f2fs: improve shr...
613
  		__free_extent_tree(sbi, et);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
614

b430f7263   Zhikang Zhang   f2fs: avoid sleep...
615
616
617
618
  	if (et->largest_updated) {
  		et->largest_updated = false;
  		updated = true;
  	}
a28ef1f5a   Chao Yu   f2fs: maintain ex...
619
  	write_unlock(&et->lock);
b430f7263   Zhikang Zhang   f2fs: avoid sleep...
620
621
622
  
  	if (updated)
  		f2fs_mark_inode_dirty_sync(inode, true);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
623
624
625
626
  }
  
  unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
  {
137d09f00   Jaegeuk Kim   f2fs: introduce z...
627
  	struct extent_tree *et, *next;
201ef5e08   Hou Pengyang   f2fs: improve shr...
628
  	struct extent_node *en;
a28ef1f5a   Chao Yu   f2fs: maintain ex...
629
630
631
632
633
  	unsigned int node_cnt = 0, tree_cnt = 0;
  	int remained;
  
  	if (!test_opt(sbi, EXTENT_CACHE))
  		return 0;
74fd8d992   Jaegeuk Kim   f2fs: speed up sh...
634
635
  	if (!atomic_read(&sbi->total_zombie_tree))
  		goto free_node;
5e8256ac2   Yunlei He   f2fs: replace rw ...
636
  	if (!mutex_trylock(&sbi->extent_tree_lock))
a28ef1f5a   Chao Yu   f2fs: maintain ex...
637
638
639
  		goto out;
  
  	/* 1. remove unreferenced extent tree */
137d09f00   Jaegeuk Kim   f2fs: introduce z...
640
  	list_for_each_entry_safe(et, next, &sbi->zombie_list, list) {
9b72a388f   Chao Yu   f2fs: skip releas...
641
642
  		if (atomic_read(&et->node_cnt)) {
  			write_lock(&et->lock);
201ef5e08   Hou Pengyang   f2fs: improve shr...
643
  			node_cnt += __free_extent_tree(sbi, et);
9b72a388f   Chao Yu   f2fs: skip releas...
644
645
  			write_unlock(&et->lock);
  		}
201ef5e08   Hou Pengyang   f2fs: improve shr...
646
  		f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
137d09f00   Jaegeuk Kim   f2fs: introduce z...
647
648
649
650
651
652
  		list_del_init(&et->list);
  		radix_tree_delete(&sbi->extent_tree_root, et->ino);
  		kmem_cache_free(extent_tree_slab, et);
  		atomic_dec(&sbi->total_ext_tree);
  		atomic_dec(&sbi->total_zombie_tree);
  		tree_cnt++;
a28ef1f5a   Chao Yu   f2fs: maintain ex...
653

137d09f00   Jaegeuk Kim   f2fs: introduce z...
654
655
  		if (node_cnt + tree_cnt >= nr_shrink)
  			goto unlock_out;
6fe2bc956   Jaegeuk Kim   f2fs: give schedu...
656
  		cond_resched();
a28ef1f5a   Chao Yu   f2fs: maintain ex...
657
  	}
5e8256ac2   Yunlei He   f2fs: replace rw ...
658
  	mutex_unlock(&sbi->extent_tree_lock);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
659

74fd8d992   Jaegeuk Kim   f2fs: speed up sh...
660
  free_node:
a28ef1f5a   Chao Yu   f2fs: maintain ex...
661
  	/* 2. remove LRU extent entries */
5e8256ac2   Yunlei He   f2fs: replace rw ...
662
  	if (!mutex_trylock(&sbi->extent_tree_lock))
a28ef1f5a   Chao Yu   f2fs: maintain ex...
663
664
665
666
667
  		goto out;
  
  	remained = nr_shrink - (node_cnt + tree_cnt);
  
  	spin_lock(&sbi->extent_lock);
201ef5e08   Hou Pengyang   f2fs: improve shr...
668
669
  	for (; remained > 0; remained--) {
  		if (list_empty(&sbi->extent_list))
a28ef1f5a   Chao Yu   f2fs: maintain ex...
670
  			break;
201ef5e08   Hou Pengyang   f2fs: improve shr...
671
672
673
674
675
676
677
678
  		en = list_first_entry(&sbi->extent_list,
  					struct extent_node, list);
  		et = en->et;
  		if (!write_trylock(&et->lock)) {
  			/* refresh this extent node's position in extent list */
  			list_move_tail(&en->list, &sbi->extent_list);
  			continue;
  		}
a28ef1f5a   Chao Yu   f2fs: maintain ex...
679

201ef5e08   Hou Pengyang   f2fs: improve shr...
680
681
  		list_del_init(&en->list);
  		spin_unlock(&sbi->extent_lock);
9b72a388f   Chao Yu   f2fs: skip releas...
682

201ef5e08   Hou Pengyang   f2fs: improve shr...
683
  		__detach_extent_node(sbi, et, en);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
684

201ef5e08   Hou Pengyang   f2fs: improve shr...
685
686
687
  		write_unlock(&et->lock);
  		node_cnt++;
  		spin_lock(&sbi->extent_lock);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
688
  	}
201ef5e08   Hou Pengyang   f2fs: improve shr...
689
  	spin_unlock(&sbi->extent_lock);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
690
  unlock_out:
5e8256ac2   Yunlei He   f2fs: replace rw ...
691
  	mutex_unlock(&sbi->extent_tree_lock);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
692
693
694
695
696
697
698
699
700
701
702
  out:
  	trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt);
  
  	return node_cnt + tree_cnt;
  }
  
  unsigned int f2fs_destroy_extent_node(struct inode *inode)
  {
  	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  	struct extent_tree *et = F2FS_I(inode)->extent_tree;
  	unsigned int node_cnt = 0;
9b72a388f   Chao Yu   f2fs: skip releas...
703
  	if (!et || !atomic_read(&et->node_cnt))
a28ef1f5a   Chao Yu   f2fs: maintain ex...
704
705
706
  		return 0;
  
  	write_lock(&et->lock);
201ef5e08   Hou Pengyang   f2fs: improve shr...
707
  	node_cnt = __free_extent_tree(sbi, et);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
708
709
710
711
  	write_unlock(&et->lock);
  
  	return node_cnt;
  }
5f281fab9   Jaegeuk Kim   f2fs: disable ext...
712
713
714
715
  void f2fs_drop_extent_tree(struct inode *inode)
  {
  	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  	struct extent_tree *et = F2FS_I(inode)->extent_tree;
b430f7263   Zhikang Zhang   f2fs: avoid sleep...
716
  	bool updated = false;
5f281fab9   Jaegeuk Kim   f2fs: disable ext...
717

bf617f7a9   Chao Yu   f2fs: fix to chec...
718
719
  	if (!f2fs_may_extent_tree(inode))
  		return;
5f281fab9   Jaegeuk Kim   f2fs: disable ext...
720
721
722
723
  	set_inode_flag(inode, FI_NO_EXTENT);
  
  	write_lock(&et->lock);
  	__free_extent_tree(sbi, et);
b430f7263   Zhikang Zhang   f2fs: avoid sleep...
724
725
726
727
  	if (et->largest.len) {
  		et->largest.len = 0;
  		updated = true;
  	}
5f281fab9   Jaegeuk Kim   f2fs: disable ext...
728
  	write_unlock(&et->lock);
b430f7263   Zhikang Zhang   f2fs: avoid sleep...
729
730
  	if (updated)
  		f2fs_mark_inode_dirty_sync(inode, true);
5f281fab9   Jaegeuk Kim   f2fs: disable ext...
731
  }
a28ef1f5a   Chao Yu   f2fs: maintain ex...
732
733
734
735
736
737
738
739
  void f2fs_destroy_extent_tree(struct inode *inode)
  {
  	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  	struct extent_tree *et = F2FS_I(inode)->extent_tree;
  	unsigned int node_cnt = 0;
  
  	if (!et)
  		return;
68e353851   Chao Yu   f2fs: use atomic ...
740
741
  	if (inode->i_nlink && !is_bad_inode(inode) &&
  					atomic_read(&et->node_cnt)) {
5e8256ac2   Yunlei He   f2fs: replace rw ...
742
  		mutex_lock(&sbi->extent_tree_lock);
137d09f00   Jaegeuk Kim   f2fs: introduce z...
743
  		list_add_tail(&et->list, &sbi->zombie_list);
74fd8d992   Jaegeuk Kim   f2fs: speed up sh...
744
  		atomic_inc(&sbi->total_zombie_tree);
5e8256ac2   Yunlei He   f2fs: replace rw ...
745
  		mutex_unlock(&sbi->extent_tree_lock);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
746
747
748
749
750
751
752
  		return;
  	}
  
  	/* free all extent info belong to this extent tree */
  	node_cnt = f2fs_destroy_extent_node(inode);
  
  	/* delete extent tree entry in radix tree */
5e8256ac2   Yunlei He   f2fs: replace rw ...
753
  	mutex_lock(&sbi->extent_tree_lock);
68e353851   Chao Yu   f2fs: use atomic ...
754
  	f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
a28ef1f5a   Chao Yu   f2fs: maintain ex...
755
756
  	radix_tree_delete(&sbi->extent_tree_root, inode->i_ino);
  	kmem_cache_free(extent_tree_slab, et);
7441ccef3   Jaegeuk Kim   f2fs: use atomic ...
757
  	atomic_dec(&sbi->total_ext_tree);
5e8256ac2   Yunlei He   f2fs: replace rw ...
758
  	mutex_unlock(&sbi->extent_tree_lock);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
  
  	F2FS_I(inode)->extent_tree = NULL;
  
  	trace_f2fs_destroy_extent_tree(inode, node_cnt);
  }
  
  bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
  					struct extent_info *ei)
  {
  	if (!f2fs_may_extent_tree(inode))
  		return false;
  
  	return f2fs_lookup_extent_tree(inode, pgofs, ei);
  }
  
  void f2fs_update_extent_cache(struct dnode_of_data *dn)
  {
a28ef1f5a   Chao Yu   f2fs: maintain ex...
776
  	pgoff_t fofs;
f28b3434a   Chao Yu   f2fs: introduce f...
777
  	block_t blkaddr;
a28ef1f5a   Chao Yu   f2fs: maintain ex...
778
779
780
  
  	if (!f2fs_may_extent_tree(dn->inode))
  		return;
f28b3434a   Chao Yu   f2fs: introduce f...
781
782
783
784
  	if (dn->data_blkaddr == NEW_ADDR)
  		blkaddr = NULL_ADDR;
  	else
  		blkaddr = dn->data_blkaddr;
19b2c30d3   Chao Yu   f2fs: update exte...
785

4d57b86dd   Chao Yu   f2fs: clean up sy...
786
  	fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
81ca7350c   Chao Yu   f2fs: remove unne...
787
  								dn->ofs_in_node;
ee6d182f2   Jaegeuk Kim   f2fs: remove sync...
788
  	f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, 1);
19b2c30d3   Chao Yu   f2fs: update exte...
789
790
791
792
793
794
795
796
  }
  
  void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
  				pgoff_t fofs, block_t blkaddr, unsigned int len)
  
  {
  	if (!f2fs_may_extent_tree(dn->inode))
  		return;
ee6d182f2   Jaegeuk Kim   f2fs: remove sync...
797
  	f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, len);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
798
  }
4d57b86dd   Chao Yu   f2fs: clean up sy...
799
  void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi)
a28ef1f5a   Chao Yu   f2fs: maintain ex...
800
801
  {
  	INIT_RADIX_TREE(&sbi->extent_tree_root, GFP_NOIO);
5e8256ac2   Yunlei He   f2fs: replace rw ...
802
  	mutex_init(&sbi->extent_tree_lock);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
803
804
  	INIT_LIST_HEAD(&sbi->extent_list);
  	spin_lock_init(&sbi->extent_lock);
7441ccef3   Jaegeuk Kim   f2fs: use atomic ...
805
  	atomic_set(&sbi->total_ext_tree, 0);
137d09f00   Jaegeuk Kim   f2fs: introduce z...
806
  	INIT_LIST_HEAD(&sbi->zombie_list);
74fd8d992   Jaegeuk Kim   f2fs: speed up sh...
807
  	atomic_set(&sbi->total_zombie_tree, 0);
a28ef1f5a   Chao Yu   f2fs: maintain ex...
808
809
  	atomic_set(&sbi->total_ext_node, 0);
  }
4d57b86dd   Chao Yu   f2fs: clean up sy...
810
  int __init f2fs_create_extent_cache(void)
a28ef1f5a   Chao Yu   f2fs: maintain ex...
811
812
813
814
815
816
817
818
819
820
821
822
823
  {
  	extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree",
  			sizeof(struct extent_tree));
  	if (!extent_tree_slab)
  		return -ENOMEM;
  	extent_node_slab = f2fs_kmem_cache_create("f2fs_extent_node",
  			sizeof(struct extent_node));
  	if (!extent_node_slab) {
  		kmem_cache_destroy(extent_tree_slab);
  		return -ENOMEM;
  	}
  	return 0;
  }
4d57b86dd   Chao Yu   f2fs: clean up sy...
824
  void f2fs_destroy_extent_cache(void)
a28ef1f5a   Chao Yu   f2fs: maintain ex...
825
826
827
828
  {
  	kmem_cache_destroy(extent_node_slab);
  	kmem_cache_destroy(extent_tree_slab);
  }