Commit 6cb968d9b0358c7e807416a85699a526e820083c
1 parent
2af4bd6ca5
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
f2fs: avoid frequent background GC
If there is no victim segments selected by background GC, let's wait a little bit longer time to collect dirty segments. By default, let's give 5 minutes. Reviewed-by: Namjae Jeon <namjae.jeon@samsung.com> Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
Showing 2 changed files with 9 additions and 6 deletions Inline Diff
fs/f2fs/gc.c
1 | /* | 1 | /* |
2 | * fs/f2fs/gc.c | 2 | * fs/f2fs/gc.c |
3 | * | 3 | * |
4 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. | 4 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. |
5 | * http://www.samsung.com/ | 5 | * http://www.samsung.com/ |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | #include <linux/fs.h> | 11 | #include <linux/fs.h> |
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/backing-dev.h> | 13 | #include <linux/backing-dev.h> |
14 | #include <linux/proc_fs.h> | 14 | #include <linux/proc_fs.h> |
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/f2fs_fs.h> | 16 | #include <linux/f2fs_fs.h> |
17 | #include <linux/kthread.h> | 17 | #include <linux/kthread.h> |
18 | #include <linux/delay.h> | 18 | #include <linux/delay.h> |
19 | #include <linux/freezer.h> | 19 | #include <linux/freezer.h> |
20 | #include <linux/blkdev.h> | 20 | #include <linux/blkdev.h> |
21 | 21 | ||
22 | #include "f2fs.h" | 22 | #include "f2fs.h" |
23 | #include "node.h" | 23 | #include "node.h" |
24 | #include "segment.h" | 24 | #include "segment.h" |
25 | #include "gc.h" | 25 | #include "gc.h" |
26 | #include <trace/events/f2fs.h> | 26 | #include <trace/events/f2fs.h> |
27 | 27 | ||
28 | static struct kmem_cache *winode_slab; | 28 | static struct kmem_cache *winode_slab; |
29 | 29 | ||
30 | static int gc_thread_func(void *data) | 30 | static int gc_thread_func(void *data) |
31 | { | 31 | { |
32 | struct f2fs_sb_info *sbi = data; | 32 | struct f2fs_sb_info *sbi = data; |
33 | wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head; | 33 | wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head; |
34 | long wait_ms; | 34 | long wait_ms; |
35 | 35 | ||
36 | wait_ms = GC_THREAD_MIN_SLEEP_TIME; | 36 | wait_ms = GC_THREAD_MIN_SLEEP_TIME; |
37 | 37 | ||
38 | do { | 38 | do { |
39 | if (try_to_freeze()) | 39 | if (try_to_freeze()) |
40 | continue; | 40 | continue; |
41 | else | 41 | else |
42 | wait_event_interruptible_timeout(*wq, | 42 | wait_event_interruptible_timeout(*wq, |
43 | kthread_should_stop(), | 43 | kthread_should_stop(), |
44 | msecs_to_jiffies(wait_ms)); | 44 | msecs_to_jiffies(wait_ms)); |
45 | if (kthread_should_stop()) | 45 | if (kthread_should_stop()) |
46 | break; | 46 | break; |
47 | 47 | ||
48 | if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) { | 48 | if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) { |
49 | wait_ms = GC_THREAD_MAX_SLEEP_TIME; | 49 | wait_ms = GC_THREAD_MAX_SLEEP_TIME; |
50 | continue; | 50 | continue; |
51 | } | 51 | } |
52 | 52 | ||
53 | /* | 53 | /* |
54 | * [GC triggering condition] | 54 | * [GC triggering condition] |
55 | * 0. GC is not conducted currently. | 55 | * 0. GC is not conducted currently. |
56 | * 1. There are enough dirty segments. | 56 | * 1. There are enough dirty segments. |
57 | * 2. IO subsystem is idle by checking the # of writeback pages. | 57 | * 2. IO subsystem is idle by checking the # of writeback pages. |
58 | * 3. IO subsystem is idle by checking the # of requests in | 58 | * 3. IO subsystem is idle by checking the # of requests in |
59 | * bdev's request list. | 59 | * bdev's request list. |
60 | * | 60 | * |
61 | * Note) We have to avoid triggering GCs too much frequently. | 61 | * Note) We have to avoid triggering GCs too much frequently. |
62 | * Because it is possible that some segments can be | 62 | * Because it is possible that some segments can be |
63 | * invalidated soon after by user update or deletion. | 63 | * invalidated soon after by user update or deletion. |
64 | * So, I'd like to wait some time to collect dirty segments. | 64 | * So, I'd like to wait some time to collect dirty segments. |
65 | */ | 65 | */ |
66 | if (!mutex_trylock(&sbi->gc_mutex)) | 66 | if (!mutex_trylock(&sbi->gc_mutex)) |
67 | continue; | 67 | continue; |
68 | 68 | ||
69 | if (!is_idle(sbi)) { | 69 | if (!is_idle(sbi)) { |
70 | wait_ms = increase_sleep_time(wait_ms); | 70 | wait_ms = increase_sleep_time(wait_ms); |
71 | mutex_unlock(&sbi->gc_mutex); | 71 | mutex_unlock(&sbi->gc_mutex); |
72 | continue; | 72 | continue; |
73 | } | 73 | } |
74 | 74 | ||
75 | if (has_enough_invalid_blocks(sbi)) | 75 | if (has_enough_invalid_blocks(sbi)) |
76 | wait_ms = decrease_sleep_time(wait_ms); | 76 | wait_ms = decrease_sleep_time(wait_ms); |
77 | else | 77 | else |
78 | wait_ms = increase_sleep_time(wait_ms); | 78 | wait_ms = increase_sleep_time(wait_ms); |
79 | 79 | ||
80 | sbi->bg_gc++; | 80 | sbi->bg_gc++; |
81 | 81 | ||
82 | /* if return value is not zero, no victim was selected */ | 82 | /* if return value is not zero, no victim was selected */ |
83 | if (f2fs_gc(sbi)) | 83 | if (f2fs_gc(sbi)) |
84 | wait_ms = GC_THREAD_NOGC_SLEEP_TIME; | 84 | wait_ms = GC_THREAD_NOGC_SLEEP_TIME; |
85 | else if (wait_ms == GC_THREAD_NOGC_SLEEP_TIME) | ||
86 | wait_ms = GC_THREAD_MAX_SLEEP_TIME; | ||
87 | |||
88 | } while (!kthread_should_stop()); | 85 | } while (!kthread_should_stop()); |
89 | return 0; | 86 | return 0; |
90 | } | 87 | } |
91 | 88 | ||
92 | int start_gc_thread(struct f2fs_sb_info *sbi) | 89 | int start_gc_thread(struct f2fs_sb_info *sbi) |
93 | { | 90 | { |
94 | struct f2fs_gc_kthread *gc_th; | 91 | struct f2fs_gc_kthread *gc_th; |
95 | dev_t dev = sbi->sb->s_bdev->bd_dev; | 92 | dev_t dev = sbi->sb->s_bdev->bd_dev; |
96 | 93 | ||
97 | if (!test_opt(sbi, BG_GC)) | 94 | if (!test_opt(sbi, BG_GC)) |
98 | return 0; | 95 | return 0; |
99 | gc_th = kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL); | 96 | gc_th = kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL); |
100 | if (!gc_th) | 97 | if (!gc_th) |
101 | return -ENOMEM; | 98 | return -ENOMEM; |
102 | 99 | ||
103 | sbi->gc_thread = gc_th; | 100 | sbi->gc_thread = gc_th; |
104 | init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head); | 101 | init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head); |
105 | sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi, | 102 | sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi, |
106 | "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev)); | 103 | "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev)); |
107 | if (IS_ERR(gc_th->f2fs_gc_task)) { | 104 | if (IS_ERR(gc_th->f2fs_gc_task)) { |
108 | kfree(gc_th); | 105 | kfree(gc_th); |
109 | sbi->gc_thread = NULL; | 106 | sbi->gc_thread = NULL; |
110 | return -ENOMEM; | 107 | return -ENOMEM; |
111 | } | 108 | } |
112 | return 0; | 109 | return 0; |
113 | } | 110 | } |
114 | 111 | ||
115 | void stop_gc_thread(struct f2fs_sb_info *sbi) | 112 | void stop_gc_thread(struct f2fs_sb_info *sbi) |
116 | { | 113 | { |
117 | struct f2fs_gc_kthread *gc_th = sbi->gc_thread; | 114 | struct f2fs_gc_kthread *gc_th = sbi->gc_thread; |
118 | if (!gc_th) | 115 | if (!gc_th) |
119 | return; | 116 | return; |
120 | kthread_stop(gc_th->f2fs_gc_task); | 117 | kthread_stop(gc_th->f2fs_gc_task); |
121 | kfree(gc_th); | 118 | kfree(gc_th); |
122 | sbi->gc_thread = NULL; | 119 | sbi->gc_thread = NULL; |
123 | } | 120 | } |
124 | 121 | ||
125 | static int select_gc_type(int gc_type) | 122 | static int select_gc_type(int gc_type) |
126 | { | 123 | { |
127 | return (gc_type == BG_GC) ? GC_CB : GC_GREEDY; | 124 | return (gc_type == BG_GC) ? GC_CB : GC_GREEDY; |
128 | } | 125 | } |
129 | 126 | ||
130 | static void select_policy(struct f2fs_sb_info *sbi, int gc_type, | 127 | static void select_policy(struct f2fs_sb_info *sbi, int gc_type, |
131 | int type, struct victim_sel_policy *p) | 128 | int type, struct victim_sel_policy *p) |
132 | { | 129 | { |
133 | struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); | 130 | struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); |
134 | 131 | ||
135 | if (p->alloc_mode == SSR) { | 132 | if (p->alloc_mode == SSR) { |
136 | p->gc_mode = GC_GREEDY; | 133 | p->gc_mode = GC_GREEDY; |
137 | p->dirty_segmap = dirty_i->dirty_segmap[type]; | 134 | p->dirty_segmap = dirty_i->dirty_segmap[type]; |
138 | p->ofs_unit = 1; | 135 | p->ofs_unit = 1; |
139 | } else { | 136 | } else { |
140 | p->gc_mode = select_gc_type(gc_type); | 137 | p->gc_mode = select_gc_type(gc_type); |
141 | p->dirty_segmap = dirty_i->dirty_segmap[DIRTY]; | 138 | p->dirty_segmap = dirty_i->dirty_segmap[DIRTY]; |
142 | p->ofs_unit = sbi->segs_per_sec; | 139 | p->ofs_unit = sbi->segs_per_sec; |
143 | } | 140 | } |
144 | p->offset = sbi->last_victim[p->gc_mode]; | 141 | p->offset = sbi->last_victim[p->gc_mode]; |
145 | } | 142 | } |
146 | 143 | ||
147 | static unsigned int get_max_cost(struct f2fs_sb_info *sbi, | 144 | static unsigned int get_max_cost(struct f2fs_sb_info *sbi, |
148 | struct victim_sel_policy *p) | 145 | struct victim_sel_policy *p) |
149 | { | 146 | { |
150 | /* SSR allocates in a segment unit */ | 147 | /* SSR allocates in a segment unit */ |
151 | if (p->alloc_mode == SSR) | 148 | if (p->alloc_mode == SSR) |
152 | return 1 << sbi->log_blocks_per_seg; | 149 | return 1 << sbi->log_blocks_per_seg; |
153 | if (p->gc_mode == GC_GREEDY) | 150 | if (p->gc_mode == GC_GREEDY) |
154 | return (1 << sbi->log_blocks_per_seg) * p->ofs_unit; | 151 | return (1 << sbi->log_blocks_per_seg) * p->ofs_unit; |
155 | else if (p->gc_mode == GC_CB) | 152 | else if (p->gc_mode == GC_CB) |
156 | return UINT_MAX; | 153 | return UINT_MAX; |
157 | else /* No other gc_mode */ | 154 | else /* No other gc_mode */ |
158 | return 0; | 155 | return 0; |
159 | } | 156 | } |
160 | 157 | ||
161 | static unsigned int check_bg_victims(struct f2fs_sb_info *sbi) | 158 | static unsigned int check_bg_victims(struct f2fs_sb_info *sbi) |
162 | { | 159 | { |
163 | struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); | 160 | struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); |
164 | unsigned int hint = 0; | 161 | unsigned int hint = 0; |
165 | unsigned int secno; | 162 | unsigned int secno; |
166 | 163 | ||
167 | /* | 164 | /* |
168 | * If the gc_type is FG_GC, we can select victim segments | 165 | * If the gc_type is FG_GC, we can select victim segments |
169 | * selected by background GC before. | 166 | * selected by background GC before. |
170 | * Those segments guarantee they have small valid blocks. | 167 | * Those segments guarantee they have small valid blocks. |
171 | */ | 168 | */ |
172 | next: | 169 | next: |
173 | secno = find_next_bit(dirty_i->victim_secmap, TOTAL_SECS(sbi), hint++); | 170 | secno = find_next_bit(dirty_i->victim_secmap, TOTAL_SECS(sbi), hint++); |
174 | if (secno < TOTAL_SECS(sbi)) { | 171 | if (secno < TOTAL_SECS(sbi)) { |
175 | if (sec_usage_check(sbi, secno)) | 172 | if (sec_usage_check(sbi, secno)) |
176 | goto next; | 173 | goto next; |
177 | clear_bit(secno, dirty_i->victim_secmap); | 174 | clear_bit(secno, dirty_i->victim_secmap); |
178 | return secno * sbi->segs_per_sec; | 175 | return secno * sbi->segs_per_sec; |
179 | } | 176 | } |
180 | return NULL_SEGNO; | 177 | return NULL_SEGNO; |
181 | } | 178 | } |
182 | 179 | ||
183 | static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno) | 180 | static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno) |
184 | { | 181 | { |
185 | struct sit_info *sit_i = SIT_I(sbi); | 182 | struct sit_info *sit_i = SIT_I(sbi); |
186 | unsigned int secno = GET_SECNO(sbi, segno); | 183 | unsigned int secno = GET_SECNO(sbi, segno); |
187 | unsigned int start = secno * sbi->segs_per_sec; | 184 | unsigned int start = secno * sbi->segs_per_sec; |
188 | unsigned long long mtime = 0; | 185 | unsigned long long mtime = 0; |
189 | unsigned int vblocks; | 186 | unsigned int vblocks; |
190 | unsigned char age = 0; | 187 | unsigned char age = 0; |
191 | unsigned char u; | 188 | unsigned char u; |
192 | unsigned int i; | 189 | unsigned int i; |
193 | 190 | ||
194 | for (i = 0; i < sbi->segs_per_sec; i++) | 191 | for (i = 0; i < sbi->segs_per_sec; i++) |
195 | mtime += get_seg_entry(sbi, start + i)->mtime; | 192 | mtime += get_seg_entry(sbi, start + i)->mtime; |
196 | vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec); | 193 | vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec); |
197 | 194 | ||
198 | mtime = div_u64(mtime, sbi->segs_per_sec); | 195 | mtime = div_u64(mtime, sbi->segs_per_sec); |
199 | vblocks = div_u64(vblocks, sbi->segs_per_sec); | 196 | vblocks = div_u64(vblocks, sbi->segs_per_sec); |
200 | 197 | ||
201 | u = (vblocks * 100) >> sbi->log_blocks_per_seg; | 198 | u = (vblocks * 100) >> sbi->log_blocks_per_seg; |
202 | 199 | ||
203 | /* Handle if the system time is changed by user */ | 200 | /* Handle if the system time is changed by user */ |
204 | if (mtime < sit_i->min_mtime) | 201 | if (mtime < sit_i->min_mtime) |
205 | sit_i->min_mtime = mtime; | 202 | sit_i->min_mtime = mtime; |
206 | if (mtime > sit_i->max_mtime) | 203 | if (mtime > sit_i->max_mtime) |
207 | sit_i->max_mtime = mtime; | 204 | sit_i->max_mtime = mtime; |
208 | if (sit_i->max_mtime != sit_i->min_mtime) | 205 | if (sit_i->max_mtime != sit_i->min_mtime) |
209 | age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime), | 206 | age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime), |
210 | sit_i->max_mtime - sit_i->min_mtime); | 207 | sit_i->max_mtime - sit_i->min_mtime); |
211 | 208 | ||
212 | return UINT_MAX - ((100 * (100 - u) * age) / (100 + u)); | 209 | return UINT_MAX - ((100 * (100 - u) * age) / (100 + u)); |
213 | } | 210 | } |
214 | 211 | ||
215 | static unsigned int get_gc_cost(struct f2fs_sb_info *sbi, unsigned int segno, | 212 | static unsigned int get_gc_cost(struct f2fs_sb_info *sbi, unsigned int segno, |
216 | struct victim_sel_policy *p) | 213 | struct victim_sel_policy *p) |
217 | { | 214 | { |
218 | if (p->alloc_mode == SSR) | 215 | if (p->alloc_mode == SSR) |
219 | return get_seg_entry(sbi, segno)->ckpt_valid_blocks; | 216 | return get_seg_entry(sbi, segno)->ckpt_valid_blocks; |
220 | 217 | ||
221 | /* alloc_mode == LFS */ | 218 | /* alloc_mode == LFS */ |
222 | if (p->gc_mode == GC_GREEDY) | 219 | if (p->gc_mode == GC_GREEDY) |
223 | return get_valid_blocks(sbi, segno, sbi->segs_per_sec); | 220 | return get_valid_blocks(sbi, segno, sbi->segs_per_sec); |
224 | else | 221 | else |
225 | return get_cb_cost(sbi, segno); | 222 | return get_cb_cost(sbi, segno); |
226 | } | 223 | } |
227 | 224 | ||
228 | /* | 225 | /* |
229 | * This function is called from two paths. | 226 | * This function is called from two paths. |
230 | * One is garbage collection and the other is SSR segment selection. | 227 | * One is garbage collection and the other is SSR segment selection. |
231 | * When it is called during GC, it just gets a victim segment | 228 | * When it is called during GC, it just gets a victim segment |
232 | * and it does not remove it from dirty seglist. | 229 | * and it does not remove it from dirty seglist. |
233 | * When it is called from SSR segment selection, it finds a segment | 230 | * When it is called from SSR segment selection, it finds a segment |
234 | * which has minimum valid blocks and removes it from dirty seglist. | 231 | * which has minimum valid blocks and removes it from dirty seglist. |
235 | */ | 232 | */ |
236 | static int get_victim_by_default(struct f2fs_sb_info *sbi, | 233 | static int get_victim_by_default(struct f2fs_sb_info *sbi, |
237 | unsigned int *result, int gc_type, int type, char alloc_mode) | 234 | unsigned int *result, int gc_type, int type, char alloc_mode) |
238 | { | 235 | { |
239 | struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); | 236 | struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); |
240 | struct victim_sel_policy p; | 237 | struct victim_sel_policy p; |
241 | unsigned int secno; | 238 | unsigned int secno; |
242 | int nsearched = 0; | 239 | int nsearched = 0; |
243 | 240 | ||
244 | p.alloc_mode = alloc_mode; | 241 | p.alloc_mode = alloc_mode; |
245 | select_policy(sbi, gc_type, type, &p); | 242 | select_policy(sbi, gc_type, type, &p); |
246 | 243 | ||
247 | p.min_segno = NULL_SEGNO; | 244 | p.min_segno = NULL_SEGNO; |
248 | p.min_cost = get_max_cost(sbi, &p); | 245 | p.min_cost = get_max_cost(sbi, &p); |
249 | 246 | ||
250 | mutex_lock(&dirty_i->seglist_lock); | 247 | mutex_lock(&dirty_i->seglist_lock); |
251 | 248 | ||
252 | if (p.alloc_mode == LFS && gc_type == FG_GC) { | 249 | if (p.alloc_mode == LFS && gc_type == FG_GC) { |
253 | p.min_segno = check_bg_victims(sbi); | 250 | p.min_segno = check_bg_victims(sbi); |
254 | if (p.min_segno != NULL_SEGNO) | 251 | if (p.min_segno != NULL_SEGNO) |
255 | goto got_it; | 252 | goto got_it; |
256 | } | 253 | } |
257 | 254 | ||
258 | while (1) { | 255 | while (1) { |
259 | unsigned long cost; | 256 | unsigned long cost; |
260 | unsigned int segno; | 257 | unsigned int segno; |
261 | 258 | ||
262 | segno = find_next_bit(p.dirty_segmap, | 259 | segno = find_next_bit(p.dirty_segmap, |
263 | TOTAL_SEGS(sbi), p.offset); | 260 | TOTAL_SEGS(sbi), p.offset); |
264 | if (segno >= TOTAL_SEGS(sbi)) { | 261 | if (segno >= TOTAL_SEGS(sbi)) { |
265 | if (sbi->last_victim[p.gc_mode]) { | 262 | if (sbi->last_victim[p.gc_mode]) { |
266 | sbi->last_victim[p.gc_mode] = 0; | 263 | sbi->last_victim[p.gc_mode] = 0; |
267 | p.offset = 0; | 264 | p.offset = 0; |
268 | continue; | 265 | continue; |
269 | } | 266 | } |
270 | break; | 267 | break; |
271 | } | 268 | } |
272 | p.offset = ((segno / p.ofs_unit) * p.ofs_unit) + p.ofs_unit; | 269 | p.offset = ((segno / p.ofs_unit) * p.ofs_unit) + p.ofs_unit; |
273 | secno = GET_SECNO(sbi, segno); | 270 | secno = GET_SECNO(sbi, segno); |
274 | 271 | ||
275 | if (sec_usage_check(sbi, secno)) | 272 | if (sec_usage_check(sbi, secno)) |
276 | continue; | 273 | continue; |
277 | if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap)) | 274 | if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap)) |
278 | continue; | 275 | continue; |
279 | 276 | ||
280 | cost = get_gc_cost(sbi, segno, &p); | 277 | cost = get_gc_cost(sbi, segno, &p); |
281 | 278 | ||
282 | if (p.min_cost > cost) { | 279 | if (p.min_cost > cost) { |
283 | p.min_segno = segno; | 280 | p.min_segno = segno; |
284 | p.min_cost = cost; | 281 | p.min_cost = cost; |
285 | } | 282 | } |
286 | 283 | ||
287 | if (cost == get_max_cost(sbi, &p)) | 284 | if (cost == get_max_cost(sbi, &p)) |
288 | continue; | 285 | continue; |
289 | 286 | ||
290 | if (nsearched++ >= MAX_VICTIM_SEARCH) { | 287 | if (nsearched++ >= MAX_VICTIM_SEARCH) { |
291 | sbi->last_victim[p.gc_mode] = segno; | 288 | sbi->last_victim[p.gc_mode] = segno; |
292 | break; | 289 | break; |
293 | } | 290 | } |
294 | } | 291 | } |
295 | got_it: | 292 | got_it: |
296 | if (p.min_segno != NULL_SEGNO) { | 293 | if (p.min_segno != NULL_SEGNO) { |
297 | if (p.alloc_mode == LFS) { | 294 | if (p.alloc_mode == LFS) { |
298 | secno = GET_SECNO(sbi, p.min_segno); | 295 | secno = GET_SECNO(sbi, p.min_segno); |
299 | if (gc_type == FG_GC) | 296 | if (gc_type == FG_GC) |
300 | sbi->cur_victim_sec = secno; | 297 | sbi->cur_victim_sec = secno; |
301 | else | 298 | else |
302 | set_bit(secno, dirty_i->victim_secmap); | 299 | set_bit(secno, dirty_i->victim_secmap); |
303 | } | 300 | } |
304 | *result = (p.min_segno / p.ofs_unit) * p.ofs_unit; | 301 | *result = (p.min_segno / p.ofs_unit) * p.ofs_unit; |
305 | 302 | ||
306 | trace_f2fs_get_victim(sbi->sb, type, gc_type, &p, | 303 | trace_f2fs_get_victim(sbi->sb, type, gc_type, &p, |
307 | sbi->cur_victim_sec, | 304 | sbi->cur_victim_sec, |
308 | prefree_segments(sbi), free_segments(sbi)); | 305 | prefree_segments(sbi), free_segments(sbi)); |
309 | } | 306 | } |
310 | mutex_unlock(&dirty_i->seglist_lock); | 307 | mutex_unlock(&dirty_i->seglist_lock); |
311 | 308 | ||
312 | return (p.min_segno == NULL_SEGNO) ? 0 : 1; | 309 | return (p.min_segno == NULL_SEGNO) ? 0 : 1; |
313 | } | 310 | } |
314 | 311 | ||
315 | static const struct victim_selection default_v_ops = { | 312 | static const struct victim_selection default_v_ops = { |
316 | .get_victim = get_victim_by_default, | 313 | .get_victim = get_victim_by_default, |
317 | }; | 314 | }; |
318 | 315 | ||
319 | static struct inode *find_gc_inode(nid_t ino, struct list_head *ilist) | 316 | static struct inode *find_gc_inode(nid_t ino, struct list_head *ilist) |
320 | { | 317 | { |
321 | struct list_head *this; | 318 | struct list_head *this; |
322 | struct inode_entry *ie; | 319 | struct inode_entry *ie; |
323 | 320 | ||
324 | list_for_each(this, ilist) { | 321 | list_for_each(this, ilist) { |
325 | ie = list_entry(this, struct inode_entry, list); | 322 | ie = list_entry(this, struct inode_entry, list); |
326 | if (ie->inode->i_ino == ino) | 323 | if (ie->inode->i_ino == ino) |
327 | return ie->inode; | 324 | return ie->inode; |
328 | } | 325 | } |
329 | return NULL; | 326 | return NULL; |
330 | } | 327 | } |
331 | 328 | ||
332 | static void add_gc_inode(struct inode *inode, struct list_head *ilist) | 329 | static void add_gc_inode(struct inode *inode, struct list_head *ilist) |
333 | { | 330 | { |
334 | struct list_head *this; | 331 | struct list_head *this; |
335 | struct inode_entry *new_ie, *ie; | 332 | struct inode_entry *new_ie, *ie; |
336 | 333 | ||
337 | list_for_each(this, ilist) { | 334 | list_for_each(this, ilist) { |
338 | ie = list_entry(this, struct inode_entry, list); | 335 | ie = list_entry(this, struct inode_entry, list); |
339 | if (ie->inode == inode) { | 336 | if (ie->inode == inode) { |
340 | iput(inode); | 337 | iput(inode); |
341 | return; | 338 | return; |
342 | } | 339 | } |
343 | } | 340 | } |
344 | repeat: | 341 | repeat: |
345 | new_ie = kmem_cache_alloc(winode_slab, GFP_NOFS); | 342 | new_ie = kmem_cache_alloc(winode_slab, GFP_NOFS); |
346 | if (!new_ie) { | 343 | if (!new_ie) { |
347 | cond_resched(); | 344 | cond_resched(); |
348 | goto repeat; | 345 | goto repeat; |
349 | } | 346 | } |
350 | new_ie->inode = inode; | 347 | new_ie->inode = inode; |
351 | list_add_tail(&new_ie->list, ilist); | 348 | list_add_tail(&new_ie->list, ilist); |
352 | } | 349 | } |
353 | 350 | ||
354 | static void put_gc_inode(struct list_head *ilist) | 351 | static void put_gc_inode(struct list_head *ilist) |
355 | { | 352 | { |
356 | struct inode_entry *ie, *next_ie; | 353 | struct inode_entry *ie, *next_ie; |
357 | list_for_each_entry_safe(ie, next_ie, ilist, list) { | 354 | list_for_each_entry_safe(ie, next_ie, ilist, list) { |
358 | iput(ie->inode); | 355 | iput(ie->inode); |
359 | list_del(&ie->list); | 356 | list_del(&ie->list); |
360 | kmem_cache_free(winode_slab, ie); | 357 | kmem_cache_free(winode_slab, ie); |
361 | } | 358 | } |
362 | } | 359 | } |
363 | 360 | ||
364 | static int check_valid_map(struct f2fs_sb_info *sbi, | 361 | static int check_valid_map(struct f2fs_sb_info *sbi, |
365 | unsigned int segno, int offset) | 362 | unsigned int segno, int offset) |
366 | { | 363 | { |
367 | struct sit_info *sit_i = SIT_I(sbi); | 364 | struct sit_info *sit_i = SIT_I(sbi); |
368 | struct seg_entry *sentry; | 365 | struct seg_entry *sentry; |
369 | int ret; | 366 | int ret; |
370 | 367 | ||
371 | mutex_lock(&sit_i->sentry_lock); | 368 | mutex_lock(&sit_i->sentry_lock); |
372 | sentry = get_seg_entry(sbi, segno); | 369 | sentry = get_seg_entry(sbi, segno); |
373 | ret = f2fs_test_bit(offset, sentry->cur_valid_map); | 370 | ret = f2fs_test_bit(offset, sentry->cur_valid_map); |
374 | mutex_unlock(&sit_i->sentry_lock); | 371 | mutex_unlock(&sit_i->sentry_lock); |
375 | return ret; | 372 | return ret; |
376 | } | 373 | } |
377 | 374 | ||
378 | /* | 375 | /* |
379 | * This function compares node address got in summary with that in NAT. | 376 | * This function compares node address got in summary with that in NAT. |
380 | * On validity, copy that node with cold status, otherwise (invalid node) | 377 | * On validity, copy that node with cold status, otherwise (invalid node) |
381 | * ignore that. | 378 | * ignore that. |
382 | */ | 379 | */ |
383 | static void gc_node_segment(struct f2fs_sb_info *sbi, | 380 | static void gc_node_segment(struct f2fs_sb_info *sbi, |
384 | struct f2fs_summary *sum, unsigned int segno, int gc_type) | 381 | struct f2fs_summary *sum, unsigned int segno, int gc_type) |
385 | { | 382 | { |
386 | bool initial = true; | 383 | bool initial = true; |
387 | struct f2fs_summary *entry; | 384 | struct f2fs_summary *entry; |
388 | int off; | 385 | int off; |
389 | 386 | ||
390 | next_step: | 387 | next_step: |
391 | entry = sum; | 388 | entry = sum; |
392 | for (off = 0; off < sbi->blocks_per_seg; off++, entry++) { | 389 | for (off = 0; off < sbi->blocks_per_seg; off++, entry++) { |
393 | nid_t nid = le32_to_cpu(entry->nid); | 390 | nid_t nid = le32_to_cpu(entry->nid); |
394 | struct page *node_page; | 391 | struct page *node_page; |
395 | 392 | ||
396 | /* stop BG_GC if there is not enough free sections. */ | 393 | /* stop BG_GC if there is not enough free sections. */ |
397 | if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0)) | 394 | if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0)) |
398 | return; | 395 | return; |
399 | 396 | ||
400 | if (check_valid_map(sbi, segno, off) == 0) | 397 | if (check_valid_map(sbi, segno, off) == 0) |
401 | continue; | 398 | continue; |
402 | 399 | ||
403 | if (initial) { | 400 | if (initial) { |
404 | ra_node_page(sbi, nid); | 401 | ra_node_page(sbi, nid); |
405 | continue; | 402 | continue; |
406 | } | 403 | } |
407 | node_page = get_node_page(sbi, nid); | 404 | node_page = get_node_page(sbi, nid); |
408 | if (IS_ERR(node_page)) | 405 | if (IS_ERR(node_page)) |
409 | continue; | 406 | continue; |
410 | 407 | ||
411 | /* set page dirty and write it */ | 408 | /* set page dirty and write it */ |
412 | if (gc_type == FG_GC) { | 409 | if (gc_type == FG_GC) { |
413 | f2fs_submit_bio(sbi, NODE, true); | 410 | f2fs_submit_bio(sbi, NODE, true); |
414 | wait_on_page_writeback(node_page); | 411 | wait_on_page_writeback(node_page); |
415 | set_page_dirty(node_page); | 412 | set_page_dirty(node_page); |
416 | } else { | 413 | } else { |
417 | if (!PageWriteback(node_page)) | 414 | if (!PageWriteback(node_page)) |
418 | set_page_dirty(node_page); | 415 | set_page_dirty(node_page); |
419 | } | 416 | } |
420 | f2fs_put_page(node_page, 1); | 417 | f2fs_put_page(node_page, 1); |
421 | stat_inc_node_blk_count(sbi, 1); | 418 | stat_inc_node_blk_count(sbi, 1); |
422 | } | 419 | } |
423 | if (initial) { | 420 | if (initial) { |
424 | initial = false; | 421 | initial = false; |
425 | goto next_step; | 422 | goto next_step; |
426 | } | 423 | } |
427 | 424 | ||
428 | if (gc_type == FG_GC) { | 425 | if (gc_type == FG_GC) { |
429 | struct writeback_control wbc = { | 426 | struct writeback_control wbc = { |
430 | .sync_mode = WB_SYNC_ALL, | 427 | .sync_mode = WB_SYNC_ALL, |
431 | .nr_to_write = LONG_MAX, | 428 | .nr_to_write = LONG_MAX, |
432 | .for_reclaim = 0, | 429 | .for_reclaim = 0, |
433 | }; | 430 | }; |
434 | sync_node_pages(sbi, 0, &wbc); | 431 | sync_node_pages(sbi, 0, &wbc); |
435 | 432 | ||
436 | /* | 433 | /* |
437 | * In the case of FG_GC, it'd be better to reclaim this victim | 434 | * In the case of FG_GC, it'd be better to reclaim this victim |
438 | * completely. | 435 | * completely. |
439 | */ | 436 | */ |
440 | if (get_valid_blocks(sbi, segno, 1) != 0) | 437 | if (get_valid_blocks(sbi, segno, 1) != 0) |
441 | goto next_step; | 438 | goto next_step; |
442 | } | 439 | } |
443 | } | 440 | } |
444 | 441 | ||
445 | /* | 442 | /* |
446 | * Calculate start block index indicating the given node offset. | 443 | * Calculate start block index indicating the given node offset. |
447 | * Be careful, caller should give this node offset only indicating direct node | 444 | * Be careful, caller should give this node offset only indicating direct node |
448 | * blocks. If any node offsets, which point the other types of node blocks such | 445 | * blocks. If any node offsets, which point the other types of node blocks such |
449 | * as indirect or double indirect node blocks, are given, it must be a caller's | 446 | * as indirect or double indirect node blocks, are given, it must be a caller's |
450 | * bug. | 447 | * bug. |
451 | */ | 448 | */ |
452 | block_t start_bidx_of_node(unsigned int node_ofs) | 449 | block_t start_bidx_of_node(unsigned int node_ofs) |
453 | { | 450 | { |
454 | unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4; | 451 | unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4; |
455 | unsigned int bidx; | 452 | unsigned int bidx; |
456 | 453 | ||
457 | if (node_ofs == 0) | 454 | if (node_ofs == 0) |
458 | return 0; | 455 | return 0; |
459 | 456 | ||
460 | if (node_ofs <= 2) { | 457 | if (node_ofs <= 2) { |
461 | bidx = node_ofs - 1; | 458 | bidx = node_ofs - 1; |
462 | } else if (node_ofs <= indirect_blks) { | 459 | } else if (node_ofs <= indirect_blks) { |
463 | int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1); | 460 | int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1); |
464 | bidx = node_ofs - 2 - dec; | 461 | bidx = node_ofs - 2 - dec; |
465 | } else { | 462 | } else { |
466 | int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1); | 463 | int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1); |
467 | bidx = node_ofs - 5 - dec; | 464 | bidx = node_ofs - 5 - dec; |
468 | } | 465 | } |
469 | return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE; | 466 | return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE; |
470 | } | 467 | } |
471 | 468 | ||
472 | static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, | 469 | static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, |
473 | struct node_info *dni, block_t blkaddr, unsigned int *nofs) | 470 | struct node_info *dni, block_t blkaddr, unsigned int *nofs) |
474 | { | 471 | { |
475 | struct page *node_page; | 472 | struct page *node_page; |
476 | nid_t nid; | 473 | nid_t nid; |
477 | unsigned int ofs_in_node; | 474 | unsigned int ofs_in_node; |
478 | block_t source_blkaddr; | 475 | block_t source_blkaddr; |
479 | 476 | ||
480 | nid = le32_to_cpu(sum->nid); | 477 | nid = le32_to_cpu(sum->nid); |
481 | ofs_in_node = le16_to_cpu(sum->ofs_in_node); | 478 | ofs_in_node = le16_to_cpu(sum->ofs_in_node); |
482 | 479 | ||
483 | node_page = get_node_page(sbi, nid); | 480 | node_page = get_node_page(sbi, nid); |
484 | if (IS_ERR(node_page)) | 481 | if (IS_ERR(node_page)) |
485 | return 0; | 482 | return 0; |
486 | 483 | ||
487 | get_node_info(sbi, nid, dni); | 484 | get_node_info(sbi, nid, dni); |
488 | 485 | ||
489 | if (sum->version != dni->version) { | 486 | if (sum->version != dni->version) { |
490 | f2fs_put_page(node_page, 1); | 487 | f2fs_put_page(node_page, 1); |
491 | return 0; | 488 | return 0; |
492 | } | 489 | } |
493 | 490 | ||
494 | *nofs = ofs_of_node(node_page); | 491 | *nofs = ofs_of_node(node_page); |
495 | source_blkaddr = datablock_addr(node_page, ofs_in_node); | 492 | source_blkaddr = datablock_addr(node_page, ofs_in_node); |
496 | f2fs_put_page(node_page, 1); | 493 | f2fs_put_page(node_page, 1); |
497 | 494 | ||
498 | if (source_blkaddr != blkaddr) | 495 | if (source_blkaddr != blkaddr) |
499 | return 0; | 496 | return 0; |
500 | return 1; | 497 | return 1; |
501 | } | 498 | } |
502 | 499 | ||
503 | static void move_data_page(struct inode *inode, struct page *page, int gc_type) | 500 | static void move_data_page(struct inode *inode, struct page *page, int gc_type) |
504 | { | 501 | { |
505 | if (gc_type == BG_GC) { | 502 | if (gc_type == BG_GC) { |
506 | if (PageWriteback(page)) | 503 | if (PageWriteback(page)) |
507 | goto out; | 504 | goto out; |
508 | set_page_dirty(page); | 505 | set_page_dirty(page); |
509 | set_cold_data(page); | 506 | set_cold_data(page); |
510 | } else { | 507 | } else { |
511 | struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); | 508 | struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); |
512 | 509 | ||
513 | if (PageWriteback(page)) { | 510 | if (PageWriteback(page)) { |
514 | f2fs_submit_bio(sbi, DATA, true); | 511 | f2fs_submit_bio(sbi, DATA, true); |
515 | wait_on_page_writeback(page); | 512 | wait_on_page_writeback(page); |
516 | } | 513 | } |
517 | 514 | ||
518 | if (clear_page_dirty_for_io(page) && | 515 | if (clear_page_dirty_for_io(page) && |
519 | S_ISDIR(inode->i_mode)) { | 516 | S_ISDIR(inode->i_mode)) { |
520 | dec_page_count(sbi, F2FS_DIRTY_DENTS); | 517 | dec_page_count(sbi, F2FS_DIRTY_DENTS); |
521 | inode_dec_dirty_dents(inode); | 518 | inode_dec_dirty_dents(inode); |
522 | } | 519 | } |
523 | set_cold_data(page); | 520 | set_cold_data(page); |
524 | do_write_data_page(page); | 521 | do_write_data_page(page); |
525 | clear_cold_data(page); | 522 | clear_cold_data(page); |
526 | } | 523 | } |
527 | out: | 524 | out: |
528 | f2fs_put_page(page, 1); | 525 | f2fs_put_page(page, 1); |
529 | } | 526 | } |
530 | 527 | ||
531 | /* | 528 | /* |
532 | * This function tries to get parent node of victim data block, and identifies | 529 | * This function tries to get parent node of victim data block, and identifies |
533 | * data block validity. If the block is valid, copy that with cold status and | 530 | * data block validity. If the block is valid, copy that with cold status and |
534 | * modify parent node. | 531 | * modify parent node. |
535 | * If the parent node is not valid or the data block address is different, | 532 | * If the parent node is not valid or the data block address is different, |
536 | * the victim data block is ignored. | 533 | * the victim data block is ignored. |
537 | */ | 534 | */ |
538 | static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, | 535 | static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, |
539 | struct list_head *ilist, unsigned int segno, int gc_type) | 536 | struct list_head *ilist, unsigned int segno, int gc_type) |
540 | { | 537 | { |
541 | struct super_block *sb = sbi->sb; | 538 | struct super_block *sb = sbi->sb; |
542 | struct f2fs_summary *entry; | 539 | struct f2fs_summary *entry; |
543 | block_t start_addr; | 540 | block_t start_addr; |
544 | int off; | 541 | int off; |
545 | int phase = 0; | 542 | int phase = 0; |
546 | 543 | ||
547 | start_addr = START_BLOCK(sbi, segno); | 544 | start_addr = START_BLOCK(sbi, segno); |
548 | 545 | ||
549 | next_step: | 546 | next_step: |
550 | entry = sum; | 547 | entry = sum; |
551 | for (off = 0; off < sbi->blocks_per_seg; off++, entry++) { | 548 | for (off = 0; off < sbi->blocks_per_seg; off++, entry++) { |
552 | struct page *data_page; | 549 | struct page *data_page; |
553 | struct inode *inode; | 550 | struct inode *inode; |
554 | struct node_info dni; /* dnode info for the data */ | 551 | struct node_info dni; /* dnode info for the data */ |
555 | unsigned int ofs_in_node, nofs; | 552 | unsigned int ofs_in_node, nofs; |
556 | block_t start_bidx; | 553 | block_t start_bidx; |
557 | 554 | ||
558 | /* stop BG_GC if there is not enough free sections. */ | 555 | /* stop BG_GC if there is not enough free sections. */ |
559 | if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0)) | 556 | if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0)) |
560 | return; | 557 | return; |
561 | 558 | ||
562 | if (check_valid_map(sbi, segno, off) == 0) | 559 | if (check_valid_map(sbi, segno, off) == 0) |
563 | continue; | 560 | continue; |
564 | 561 | ||
565 | if (phase == 0) { | 562 | if (phase == 0) { |
566 | ra_node_page(sbi, le32_to_cpu(entry->nid)); | 563 | ra_node_page(sbi, le32_to_cpu(entry->nid)); |
567 | continue; | 564 | continue; |
568 | } | 565 | } |
569 | 566 | ||
570 | /* Get an inode by ino with checking validity */ | 567 | /* Get an inode by ino with checking validity */ |
571 | if (check_dnode(sbi, entry, &dni, start_addr + off, &nofs) == 0) | 568 | if (check_dnode(sbi, entry, &dni, start_addr + off, &nofs) == 0) |
572 | continue; | 569 | continue; |
573 | 570 | ||
574 | if (phase == 1) { | 571 | if (phase == 1) { |
575 | ra_node_page(sbi, dni.ino); | 572 | ra_node_page(sbi, dni.ino); |
576 | continue; | 573 | continue; |
577 | } | 574 | } |
578 | 575 | ||
579 | start_bidx = start_bidx_of_node(nofs); | 576 | start_bidx = start_bidx_of_node(nofs); |
580 | ofs_in_node = le16_to_cpu(entry->ofs_in_node); | 577 | ofs_in_node = le16_to_cpu(entry->ofs_in_node); |
581 | 578 | ||
582 | if (phase == 2) { | 579 | if (phase == 2) { |
583 | inode = f2fs_iget(sb, dni.ino); | 580 | inode = f2fs_iget(sb, dni.ino); |
584 | if (IS_ERR(inode)) | 581 | if (IS_ERR(inode)) |
585 | continue; | 582 | continue; |
586 | 583 | ||
587 | data_page = find_data_page(inode, | 584 | data_page = find_data_page(inode, |
588 | start_bidx + ofs_in_node); | 585 | start_bidx + ofs_in_node); |
589 | if (IS_ERR(data_page)) | 586 | if (IS_ERR(data_page)) |
590 | goto next_iput; | 587 | goto next_iput; |
591 | 588 | ||
592 | f2fs_put_page(data_page, 0); | 589 | f2fs_put_page(data_page, 0); |
593 | add_gc_inode(inode, ilist); | 590 | add_gc_inode(inode, ilist); |
594 | } else { | 591 | } else { |
595 | inode = find_gc_inode(dni.ino, ilist); | 592 | inode = find_gc_inode(dni.ino, ilist); |
596 | if (inode) { | 593 | if (inode) { |
597 | data_page = get_lock_data_page(inode, | 594 | data_page = get_lock_data_page(inode, |
598 | start_bidx + ofs_in_node); | 595 | start_bidx + ofs_in_node); |
599 | if (IS_ERR(data_page)) | 596 | if (IS_ERR(data_page)) |
600 | continue; | 597 | continue; |
601 | move_data_page(inode, data_page, gc_type); | 598 | move_data_page(inode, data_page, gc_type); |
602 | stat_inc_data_blk_count(sbi, 1); | 599 | stat_inc_data_blk_count(sbi, 1); |
603 | } | 600 | } |
604 | } | 601 | } |
605 | continue; | 602 | continue; |
606 | next_iput: | 603 | next_iput: |
607 | iput(inode); | 604 | iput(inode); |
608 | } | 605 | } |
609 | if (++phase < 4) | 606 | if (++phase < 4) |
610 | goto next_step; | 607 | goto next_step; |
611 | 608 | ||
612 | if (gc_type == FG_GC) { | 609 | if (gc_type == FG_GC) { |
613 | f2fs_submit_bio(sbi, DATA, true); | 610 | f2fs_submit_bio(sbi, DATA, true); |
614 | 611 | ||
615 | /* | 612 | /* |
616 | * In the case of FG_GC, it'd be better to reclaim this victim | 613 | * In the case of FG_GC, it'd be better to reclaim this victim |
617 | * completely. | 614 | * completely. |
618 | */ | 615 | */ |
619 | if (get_valid_blocks(sbi, segno, 1) != 0) { | 616 | if (get_valid_blocks(sbi, segno, 1) != 0) { |
620 | phase = 2; | 617 | phase = 2; |
621 | goto next_step; | 618 | goto next_step; |
622 | } | 619 | } |
623 | } | 620 | } |
624 | } | 621 | } |
625 | 622 | ||
626 | static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim, | 623 | static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim, |
627 | int gc_type, int type) | 624 | int gc_type, int type) |
628 | { | 625 | { |
629 | struct sit_info *sit_i = SIT_I(sbi); | 626 | struct sit_info *sit_i = SIT_I(sbi); |
630 | int ret; | 627 | int ret; |
631 | mutex_lock(&sit_i->sentry_lock); | 628 | mutex_lock(&sit_i->sentry_lock); |
632 | ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type, type, LFS); | 629 | ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type, type, LFS); |
633 | mutex_unlock(&sit_i->sentry_lock); | 630 | mutex_unlock(&sit_i->sentry_lock); |
634 | return ret; | 631 | return ret; |
635 | } | 632 | } |
636 | 633 | ||
637 | static void do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno, | 634 | static void do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno, |
638 | struct list_head *ilist, int gc_type) | 635 | struct list_head *ilist, int gc_type) |
639 | { | 636 | { |
640 | struct page *sum_page; | 637 | struct page *sum_page; |
641 | struct f2fs_summary_block *sum; | 638 | struct f2fs_summary_block *sum; |
642 | 639 | ||
643 | /* read segment summary of victim */ | 640 | /* read segment summary of victim */ |
644 | sum_page = get_sum_page(sbi, segno); | 641 | sum_page = get_sum_page(sbi, segno); |
645 | if (IS_ERR(sum_page)) | 642 | if (IS_ERR(sum_page)) |
646 | return; | 643 | return; |
647 | 644 | ||
648 | sum = page_address(sum_page); | 645 | sum = page_address(sum_page); |
649 | 646 | ||
650 | switch (GET_SUM_TYPE((&sum->footer))) { | 647 | switch (GET_SUM_TYPE((&sum->footer))) { |
651 | case SUM_TYPE_NODE: | 648 | case SUM_TYPE_NODE: |
652 | gc_node_segment(sbi, sum->entries, segno, gc_type); | 649 | gc_node_segment(sbi, sum->entries, segno, gc_type); |
653 | break; | 650 | break; |
654 | case SUM_TYPE_DATA: | 651 | case SUM_TYPE_DATA: |
655 | gc_data_segment(sbi, sum->entries, ilist, segno, gc_type); | 652 | gc_data_segment(sbi, sum->entries, ilist, segno, gc_type); |
656 | break; | 653 | break; |
657 | } | 654 | } |
658 | stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer))); | 655 | stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer))); |
659 | stat_inc_call_count(sbi->stat_info); | 656 | stat_inc_call_count(sbi->stat_info); |
660 | 657 | ||
661 | f2fs_put_page(sum_page, 1); | 658 | f2fs_put_page(sum_page, 1); |
662 | } | 659 | } |
663 | 660 | ||
664 | int f2fs_gc(struct f2fs_sb_info *sbi) | 661 | int f2fs_gc(struct f2fs_sb_info *sbi) |
665 | { | 662 | { |
666 | struct list_head ilist; | 663 | struct list_head ilist; |
667 | unsigned int segno, i; | 664 | unsigned int segno, i; |
668 | int gc_type = BG_GC; | 665 | int gc_type = BG_GC; |
669 | int nfree = 0; | 666 | int nfree = 0; |
670 | int ret = -1; | 667 | int ret = -1; |
671 | 668 | ||
672 | INIT_LIST_HEAD(&ilist); | 669 | INIT_LIST_HEAD(&ilist); |
673 | gc_more: | 670 | gc_more: |
674 | if (!(sbi->sb->s_flags & MS_ACTIVE)) | 671 | if (!(sbi->sb->s_flags & MS_ACTIVE)) |
675 | goto stop; | 672 | goto stop; |
676 | 673 | ||
677 | if (gc_type == BG_GC && has_not_enough_free_secs(sbi, nfree)) { | 674 | if (gc_type == BG_GC && has_not_enough_free_secs(sbi, nfree)) { |
678 | gc_type = FG_GC; | 675 | gc_type = FG_GC; |
679 | write_checkpoint(sbi, false); | 676 | write_checkpoint(sbi, false); |
680 | } | 677 | } |
681 | 678 | ||
682 | if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE)) | 679 | if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE)) |
683 | goto stop; | 680 | goto stop; |
684 | ret = 0; | 681 | ret = 0; |
685 | 682 | ||
686 | for (i = 0; i < sbi->segs_per_sec; i++) | 683 | for (i = 0; i < sbi->segs_per_sec; i++) |
687 | do_garbage_collect(sbi, segno + i, &ilist, gc_type); | 684 | do_garbage_collect(sbi, segno + i, &ilist, gc_type); |
688 | 685 | ||
689 | if (gc_type == FG_GC) { | 686 | if (gc_type == FG_GC) { |
690 | sbi->cur_victim_sec = NULL_SEGNO; | 687 | sbi->cur_victim_sec = NULL_SEGNO; |
691 | nfree++; | 688 | nfree++; |
692 | WARN_ON(get_valid_blocks(sbi, segno, sbi->segs_per_sec)); | 689 | WARN_ON(get_valid_blocks(sbi, segno, sbi->segs_per_sec)); |
693 | } | 690 | } |
694 | 691 | ||
695 | if (has_not_enough_free_secs(sbi, nfree)) | 692 | if (has_not_enough_free_secs(sbi, nfree)) |
696 | goto gc_more; | 693 | goto gc_more; |
697 | 694 | ||
698 | if (gc_type == FG_GC) | 695 | if (gc_type == FG_GC) |
699 | write_checkpoint(sbi, false); | 696 | write_checkpoint(sbi, false); |
700 | stop: | 697 | stop: |
701 | mutex_unlock(&sbi->gc_mutex); | 698 | mutex_unlock(&sbi->gc_mutex); |
702 | 699 | ||
703 | put_gc_inode(&ilist); | 700 | put_gc_inode(&ilist); |
704 | return ret; | 701 | return ret; |
705 | } | 702 | } |
706 | 703 | ||
707 | void build_gc_manager(struct f2fs_sb_info *sbi) | 704 | void build_gc_manager(struct f2fs_sb_info *sbi) |
708 | { | 705 | { |
709 | DIRTY_I(sbi)->v_ops = &default_v_ops; | 706 | DIRTY_I(sbi)->v_ops = &default_v_ops; |
710 | } | 707 | } |
711 | 708 | ||
712 | int __init create_gc_caches(void) | 709 | int __init create_gc_caches(void) |
713 | { | 710 | { |
714 | winode_slab = f2fs_kmem_cache_create("f2fs_gc_inodes", | 711 | winode_slab = f2fs_kmem_cache_create("f2fs_gc_inodes", |
715 | sizeof(struct inode_entry), NULL); | 712 | sizeof(struct inode_entry), NULL); |
716 | if (!winode_slab) | 713 | if (!winode_slab) |
717 | return -ENOMEM; | 714 | return -ENOMEM; |
718 | return 0; | 715 | return 0; |
719 | } | 716 | } |
720 | 717 | ||
721 | void destroy_gc_caches(void) | 718 | void destroy_gc_caches(void) |
722 | { | 719 | { |
723 | kmem_cache_destroy(winode_slab); | 720 | kmem_cache_destroy(winode_slab); |
724 | } | 721 | } |
725 | 722 |
fs/f2fs/gc.h
1 | /* | 1 | /* |
2 | * fs/f2fs/gc.h | 2 | * fs/f2fs/gc.h |
3 | * | 3 | * |
4 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. | 4 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. |
5 | * http://www.samsung.com/ | 5 | * http://www.samsung.com/ |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | #define GC_THREAD_MIN_WB_PAGES 1 /* | 11 | #define GC_THREAD_MIN_WB_PAGES 1 /* |
12 | * a threshold to determine | 12 | * a threshold to determine |
13 | * whether IO subsystem is idle | 13 | * whether IO subsystem is idle |
14 | * or not | 14 | * or not |
15 | */ | 15 | */ |
16 | #define GC_THREAD_MIN_SLEEP_TIME 10000 /* milliseconds */ | 16 | #define GC_THREAD_MIN_SLEEP_TIME 30000 /* milliseconds */ |
17 | #define GC_THREAD_MAX_SLEEP_TIME 30000 | 17 | #define GC_THREAD_MAX_SLEEP_TIME 60000 |
18 | #define GC_THREAD_NOGC_SLEEP_TIME 10000 | 18 | #define GC_THREAD_NOGC_SLEEP_TIME 300000 /* wait 5 min */ |
19 | #define LIMIT_INVALID_BLOCK 40 /* percentage over total user space */ | 19 | #define LIMIT_INVALID_BLOCK 40 /* percentage over total user space */ |
20 | #define LIMIT_FREE_BLOCK 40 /* percentage over invalid + free space */ | 20 | #define LIMIT_FREE_BLOCK 40 /* percentage over invalid + free space */ |
21 | 21 | ||
22 | /* Search max. number of dirty segments to select a victim segment */ | 22 | /* Search max. number of dirty segments to select a victim segment */ |
23 | #define MAX_VICTIM_SEARCH 20 | 23 | #define MAX_VICTIM_SEARCH 20 |
24 | 24 | ||
25 | struct f2fs_gc_kthread { | 25 | struct f2fs_gc_kthread { |
26 | struct task_struct *f2fs_gc_task; | 26 | struct task_struct *f2fs_gc_task; |
27 | wait_queue_head_t gc_wait_queue_head; | 27 | wait_queue_head_t gc_wait_queue_head; |
28 | }; | 28 | }; |
29 | 29 | ||
30 | struct inode_entry { | 30 | struct inode_entry { |
31 | struct list_head list; | 31 | struct list_head list; |
32 | struct inode *inode; | 32 | struct inode *inode; |
33 | }; | 33 | }; |
34 | 34 | ||
35 | /* | 35 | /* |
36 | * inline functions | 36 | * inline functions |
37 | */ | 37 | */ |
38 | static inline block_t free_user_blocks(struct f2fs_sb_info *sbi) | 38 | static inline block_t free_user_blocks(struct f2fs_sb_info *sbi) |
39 | { | 39 | { |
40 | if (free_segments(sbi) < overprovision_segments(sbi)) | 40 | if (free_segments(sbi) < overprovision_segments(sbi)) |
41 | return 0; | 41 | return 0; |
42 | else | 42 | else |
43 | return (free_segments(sbi) - overprovision_segments(sbi)) | 43 | return (free_segments(sbi) - overprovision_segments(sbi)) |
44 | << sbi->log_blocks_per_seg; | 44 | << sbi->log_blocks_per_seg; |
45 | } | 45 | } |
46 | 46 | ||
47 | static inline block_t limit_invalid_user_blocks(struct f2fs_sb_info *sbi) | 47 | static inline block_t limit_invalid_user_blocks(struct f2fs_sb_info *sbi) |
48 | { | 48 | { |
49 | return (long)(sbi->user_block_count * LIMIT_INVALID_BLOCK) / 100; | 49 | return (long)(sbi->user_block_count * LIMIT_INVALID_BLOCK) / 100; |
50 | } | 50 | } |
51 | 51 | ||
52 | static inline block_t limit_free_user_blocks(struct f2fs_sb_info *sbi) | 52 | static inline block_t limit_free_user_blocks(struct f2fs_sb_info *sbi) |
53 | { | 53 | { |
54 | block_t reclaimable_user_blocks = sbi->user_block_count - | 54 | block_t reclaimable_user_blocks = sbi->user_block_count - |
55 | written_block_count(sbi); | 55 | written_block_count(sbi); |
56 | return (long)(reclaimable_user_blocks * LIMIT_FREE_BLOCK) / 100; | 56 | return (long)(reclaimable_user_blocks * LIMIT_FREE_BLOCK) / 100; |
57 | } | 57 | } |
58 | 58 | ||
59 | static inline long increase_sleep_time(long wait) | 59 | static inline long increase_sleep_time(long wait) |
60 | { | 60 | { |
61 | if (wait == GC_THREAD_NOGC_SLEEP_TIME) | ||
62 | return wait; | ||
63 | |||
61 | wait += GC_THREAD_MIN_SLEEP_TIME; | 64 | wait += GC_THREAD_MIN_SLEEP_TIME; |
62 | if (wait > GC_THREAD_MAX_SLEEP_TIME) | 65 | if (wait > GC_THREAD_MAX_SLEEP_TIME) |
63 | wait = GC_THREAD_MAX_SLEEP_TIME; | 66 | wait = GC_THREAD_MAX_SLEEP_TIME; |
64 | return wait; | 67 | return wait; |
65 | } | 68 | } |
66 | 69 | ||
67 | static inline long decrease_sleep_time(long wait) | 70 | static inline long decrease_sleep_time(long wait) |
68 | { | 71 | { |
72 | if (wait == GC_THREAD_NOGC_SLEEP_TIME) | ||
73 | wait = GC_THREAD_MAX_SLEEP_TIME; | ||
74 | |||
69 | wait -= GC_THREAD_MIN_SLEEP_TIME; | 75 | wait -= GC_THREAD_MIN_SLEEP_TIME; |
70 | if (wait <= GC_THREAD_MIN_SLEEP_TIME) | 76 | if (wait <= GC_THREAD_MIN_SLEEP_TIME) |
71 | wait = GC_THREAD_MIN_SLEEP_TIME; | 77 | wait = GC_THREAD_MIN_SLEEP_TIME; |
72 | return wait; | 78 | return wait; |
73 | } | 79 | } |
74 | 80 | ||
75 | static inline bool has_enough_invalid_blocks(struct f2fs_sb_info *sbi) | 81 | static inline bool has_enough_invalid_blocks(struct f2fs_sb_info *sbi) |
76 | { | 82 | { |
77 | block_t invalid_user_blocks = sbi->user_block_count - | 83 | block_t invalid_user_blocks = sbi->user_block_count - |
78 | written_block_count(sbi); | 84 | written_block_count(sbi); |
79 | /* | 85 | /* |
80 | * Background GC is triggered with the following condition. | 86 | * Background GC is triggered with the following condition. |
81 | * 1. There are a number of invalid blocks. | 87 | * 1. There are a number of invalid blocks. |
82 | * 2. There is not enough free space. | 88 | * 2. There is not enough free space. |
83 | */ | 89 | */ |
84 | if (invalid_user_blocks > limit_invalid_user_blocks(sbi) && | 90 | if (invalid_user_blocks > limit_invalid_user_blocks(sbi) && |
85 | free_user_blocks(sbi) < limit_free_user_blocks(sbi)) | 91 | free_user_blocks(sbi) < limit_free_user_blocks(sbi)) |
86 | return true; | 92 | return true; |
87 | return false; | 93 | return false; |
88 | } | 94 | } |
89 | 95 | ||
90 | static inline int is_idle(struct f2fs_sb_info *sbi) | 96 | static inline int is_idle(struct f2fs_sb_info *sbi) |
91 | { | 97 | { |
92 | struct block_device *bdev = sbi->sb->s_bdev; | 98 | struct block_device *bdev = sbi->sb->s_bdev; |
93 | struct request_queue *q = bdev_get_queue(bdev); | 99 | struct request_queue *q = bdev_get_queue(bdev); |
94 | struct request_list *rl = &q->root_rl; | 100 | struct request_list *rl = &q->root_rl; |
95 | return !(rl->count[BLK_RW_SYNC]) && !(rl->count[BLK_RW_ASYNC]); | 101 | return !(rl->count[BLK_RW_SYNC]) && !(rl->count[BLK_RW_ASYNC]); |
96 | } | 102 | } |
97 | 103 |