Commit 434720fa98443c048df62512d3c6aaefb429bdd2

Authored by Masanari Iida
Committed by Jiri Kosina
1 parent cf2fbdd26f

f2fs: Fix typo in comments

Correct spelling typo in comments

Signed-off-by: Masanari Iida <standby24x7@gmail.com>
Acked-by: Namjae Jeon <namjae.jeon@samsung.com>
Signed-off-by: Jiri Kosina <jkosina@suse.cz>

Showing 3 changed files with 3 additions and 3 deletions Inline Diff

1 /* 1 /*
2 * fs/f2fs/f2fs.h 2 * fs/f2fs/f2fs.h
3 * 3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/ 5 * http://www.samsung.com/
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 */ 10 */
11 #ifndef _LINUX_F2FS_H 11 #ifndef _LINUX_F2FS_H
12 #define _LINUX_F2FS_H 12 #define _LINUX_F2FS_H
13 13
14 #include <linux/types.h> 14 #include <linux/types.h>
15 #include <linux/page-flags.h> 15 #include <linux/page-flags.h>
16 #include <linux/buffer_head.h> 16 #include <linux/buffer_head.h>
17 #include <linux/slab.h> 17 #include <linux/slab.h>
18 #include <linux/crc32.h> 18 #include <linux/crc32.h>
19 #include <linux/magic.h> 19 #include <linux/magic.h>
20 20
21 /* 21 /*
22 * For mount options 22 * For mount options
23 */ 23 */
24 #define F2FS_MOUNT_BG_GC 0x00000001 24 #define F2FS_MOUNT_BG_GC 0x00000001
25 #define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000002 25 #define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000002
26 #define F2FS_MOUNT_DISCARD 0x00000004 26 #define F2FS_MOUNT_DISCARD 0x00000004
27 #define F2FS_MOUNT_NOHEAP 0x00000008 27 #define F2FS_MOUNT_NOHEAP 0x00000008
28 #define F2FS_MOUNT_XATTR_USER 0x00000010 28 #define F2FS_MOUNT_XATTR_USER 0x00000010
29 #define F2FS_MOUNT_POSIX_ACL 0x00000020 29 #define F2FS_MOUNT_POSIX_ACL 0x00000020
30 #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000040 30 #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000040
31 31
32 #define clear_opt(sbi, option) (sbi->mount_opt.opt &= ~F2FS_MOUNT_##option) 32 #define clear_opt(sbi, option) (sbi->mount_opt.opt &= ~F2FS_MOUNT_##option)
33 #define set_opt(sbi, option) (sbi->mount_opt.opt |= F2FS_MOUNT_##option) 33 #define set_opt(sbi, option) (sbi->mount_opt.opt |= F2FS_MOUNT_##option)
34 #define test_opt(sbi, option) (sbi->mount_opt.opt & F2FS_MOUNT_##option) 34 #define test_opt(sbi, option) (sbi->mount_opt.opt & F2FS_MOUNT_##option)
35 35
36 #define ver_after(a, b) (typecheck(unsigned long long, a) && \ 36 #define ver_after(a, b) (typecheck(unsigned long long, a) && \
37 typecheck(unsigned long long, b) && \ 37 typecheck(unsigned long long, b) && \
38 ((long long)((a) - (b)) > 0)) 38 ((long long)((a) - (b)) > 0))
39 39
40 typedef u64 block_t; 40 typedef u64 block_t;
41 typedef u32 nid_t; 41 typedef u32 nid_t;
42 42
43 struct f2fs_mount_info { 43 struct f2fs_mount_info {
44 unsigned int opt; 44 unsigned int opt;
45 }; 45 };
46 46
47 static inline __u32 f2fs_crc32(void *buff, size_t len) 47 static inline __u32 f2fs_crc32(void *buff, size_t len)
48 { 48 {
49 return crc32_le(F2FS_SUPER_MAGIC, buff, len); 49 return crc32_le(F2FS_SUPER_MAGIC, buff, len);
50 } 50 }
51 51
52 static inline bool f2fs_crc_valid(__u32 blk_crc, void *buff, size_t buff_size) 52 static inline bool f2fs_crc_valid(__u32 blk_crc, void *buff, size_t buff_size)
53 { 53 {
54 return f2fs_crc32(buff, buff_size) == blk_crc; 54 return f2fs_crc32(buff, buff_size) == blk_crc;
55 } 55 }
56 56
57 /* 57 /*
58 * For checkpoint manager 58 * For checkpoint manager
59 */ 59 */
60 enum { 60 enum {
61 NAT_BITMAP, 61 NAT_BITMAP,
62 SIT_BITMAP 62 SIT_BITMAP
63 }; 63 };
64 64
65 /* for the list of orphan inodes */ 65 /* for the list of orphan inodes */
66 struct orphan_inode_entry { 66 struct orphan_inode_entry {
67 struct list_head list; /* list head */ 67 struct list_head list; /* list head */
68 nid_t ino; /* inode number */ 68 nid_t ino; /* inode number */
69 }; 69 };
70 70
71 /* for the list of directory inodes */ 71 /* for the list of directory inodes */
72 struct dir_inode_entry { 72 struct dir_inode_entry {
73 struct list_head list; /* list head */ 73 struct list_head list; /* list head */
74 struct inode *inode; /* vfs inode pointer */ 74 struct inode *inode; /* vfs inode pointer */
75 }; 75 };
76 76
77 /* for the list of fsync inodes, used only during recovery */ 77 /* for the list of fsync inodes, used only during recovery */
78 struct fsync_inode_entry { 78 struct fsync_inode_entry {
79 struct list_head list; /* list head */ 79 struct list_head list; /* list head */
80 struct inode *inode; /* vfs inode pointer */ 80 struct inode *inode; /* vfs inode pointer */
81 block_t blkaddr; /* block address locating the last inode */ 81 block_t blkaddr; /* block address locating the last inode */
82 }; 82 };
83 83
84 #define nats_in_cursum(sum) (le16_to_cpu(sum->n_nats)) 84 #define nats_in_cursum(sum) (le16_to_cpu(sum->n_nats))
85 #define sits_in_cursum(sum) (le16_to_cpu(sum->n_sits)) 85 #define sits_in_cursum(sum) (le16_to_cpu(sum->n_sits))
86 86
87 #define nat_in_journal(sum, i) (sum->nat_j.entries[i].ne) 87 #define nat_in_journal(sum, i) (sum->nat_j.entries[i].ne)
88 #define nid_in_journal(sum, i) (sum->nat_j.entries[i].nid) 88 #define nid_in_journal(sum, i) (sum->nat_j.entries[i].nid)
89 #define sit_in_journal(sum, i) (sum->sit_j.entries[i].se) 89 #define sit_in_journal(sum, i) (sum->sit_j.entries[i].se)
90 #define segno_in_journal(sum, i) (sum->sit_j.entries[i].segno) 90 #define segno_in_journal(sum, i) (sum->sit_j.entries[i].segno)
91 91
92 static inline int update_nats_in_cursum(struct f2fs_summary_block *rs, int i) 92 static inline int update_nats_in_cursum(struct f2fs_summary_block *rs, int i)
93 { 93 {
94 int before = nats_in_cursum(rs); 94 int before = nats_in_cursum(rs);
95 rs->n_nats = cpu_to_le16(before + i); 95 rs->n_nats = cpu_to_le16(before + i);
96 return before; 96 return before;
97 } 97 }
98 98
99 static inline int update_sits_in_cursum(struct f2fs_summary_block *rs, int i) 99 static inline int update_sits_in_cursum(struct f2fs_summary_block *rs, int i)
100 { 100 {
101 int before = sits_in_cursum(rs); 101 int before = sits_in_cursum(rs);
102 rs->n_sits = cpu_to_le16(before + i); 102 rs->n_sits = cpu_to_le16(before + i);
103 return before; 103 return before;
104 } 104 }
105 105
106 /* 106 /*
107 * ioctl commands 107 * ioctl commands
108 */ 108 */
109 #define F2FS_IOC_GETFLAGS FS_IOC_GETFLAGS 109 #define F2FS_IOC_GETFLAGS FS_IOC_GETFLAGS
110 #define F2FS_IOC_SETFLAGS FS_IOC_SETFLAGS 110 #define F2FS_IOC_SETFLAGS FS_IOC_SETFLAGS
111 111
112 #if defined(__KERNEL__) && defined(CONFIG_COMPAT) 112 #if defined(__KERNEL__) && defined(CONFIG_COMPAT)
113 /* 113 /*
114 * ioctl commands in 32 bit emulation 114 * ioctl commands in 32 bit emulation
115 */ 115 */
116 #define F2FS_IOC32_GETFLAGS FS_IOC32_GETFLAGS 116 #define F2FS_IOC32_GETFLAGS FS_IOC32_GETFLAGS
117 #define F2FS_IOC32_SETFLAGS FS_IOC32_SETFLAGS 117 #define F2FS_IOC32_SETFLAGS FS_IOC32_SETFLAGS
118 #endif 118 #endif
119 119
120 /* 120 /*
121 * For INODE and NODE manager 121 * For INODE and NODE manager
122 */ 122 */
123 #define XATTR_NODE_OFFSET (-1) /* 123 #define XATTR_NODE_OFFSET (-1) /*
124 * store xattrs to one node block per 124 * store xattrs to one node block per
125 * file keeping -1 as its node offset to 125 * file keeping -1 as its node offset to
126 * distinguish from index node blocks. 126 * distinguish from index node blocks.
127 */ 127 */
128 #define RDONLY_NODE 1 /* 128 #define RDONLY_NODE 1 /*
129 * specify a read-only mode when getting 129 * specify a read-only mode when getting
130 * a node block. 0 is read-write mode. 130 * a node block. 0 is read-write mode.
131 * used by get_dnode_of_data(). 131 * used by get_dnode_of_data().
132 */ 132 */
133 #define F2FS_LINK_MAX 32000 /* maximum link count per file */ 133 #define F2FS_LINK_MAX 32000 /* maximum link count per file */
134 134
135 /* for in-memory extent cache entry */ 135 /* for in-memory extent cache entry */
136 struct extent_info { 136 struct extent_info {
137 rwlock_t ext_lock; /* rwlock for consistency */ 137 rwlock_t ext_lock; /* rwlock for consistency */
138 unsigned int fofs; /* start offset in a file */ 138 unsigned int fofs; /* start offset in a file */
139 u32 blk_addr; /* start block address of the extent */ 139 u32 blk_addr; /* start block address of the extent */
140 unsigned int len; /* lenth of the extent */ 140 unsigned int len; /* length of the extent */
141 }; 141 };
142 142
143 /* 143 /*
144 * i_advise uses FADVISE_XXX_BIT. We can add additional hints later. 144 * i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
145 */ 145 */
146 #define FADVISE_COLD_BIT 0x01 146 #define FADVISE_COLD_BIT 0x01
147 147
148 struct f2fs_inode_info { 148 struct f2fs_inode_info {
149 struct inode vfs_inode; /* serve a vfs inode */ 149 struct inode vfs_inode; /* serve a vfs inode */
150 unsigned long i_flags; /* keep an inode flags for ioctl */ 150 unsigned long i_flags; /* keep an inode flags for ioctl */
151 unsigned char i_advise; /* use to give file attribute hints */ 151 unsigned char i_advise; /* use to give file attribute hints */
152 unsigned int i_current_depth; /* use only in directory structure */ 152 unsigned int i_current_depth; /* use only in directory structure */
153 unsigned int i_pino; /* parent inode number */ 153 unsigned int i_pino; /* parent inode number */
154 umode_t i_acl_mode; /* keep file acl mode temporarily */ 154 umode_t i_acl_mode; /* keep file acl mode temporarily */
155 155
156 /* Use below internally in f2fs*/ 156 /* Use below internally in f2fs*/
157 unsigned long flags; /* use to pass per-file flags */ 157 unsigned long flags; /* use to pass per-file flags */
158 unsigned long long data_version;/* latest version of data for fsync */ 158 unsigned long long data_version;/* latest version of data for fsync */
159 atomic_t dirty_dents; /* # of dirty dentry pages */ 159 atomic_t dirty_dents; /* # of dirty dentry pages */
160 f2fs_hash_t chash; /* hash value of given file name */ 160 f2fs_hash_t chash; /* hash value of given file name */
161 unsigned int clevel; /* maximum level of given file name */ 161 unsigned int clevel; /* maximum level of given file name */
162 nid_t i_xattr_nid; /* node id that contains xattrs */ 162 nid_t i_xattr_nid; /* node id that contains xattrs */
163 struct extent_info ext; /* in-memory extent cache entry */ 163 struct extent_info ext; /* in-memory extent cache entry */
164 }; 164 };
165 165
166 static inline void get_extent_info(struct extent_info *ext, 166 static inline void get_extent_info(struct extent_info *ext,
167 struct f2fs_extent i_ext) 167 struct f2fs_extent i_ext)
168 { 168 {
169 write_lock(&ext->ext_lock); 169 write_lock(&ext->ext_lock);
170 ext->fofs = le32_to_cpu(i_ext.fofs); 170 ext->fofs = le32_to_cpu(i_ext.fofs);
171 ext->blk_addr = le32_to_cpu(i_ext.blk_addr); 171 ext->blk_addr = le32_to_cpu(i_ext.blk_addr);
172 ext->len = le32_to_cpu(i_ext.len); 172 ext->len = le32_to_cpu(i_ext.len);
173 write_unlock(&ext->ext_lock); 173 write_unlock(&ext->ext_lock);
174 } 174 }
175 175
176 static inline void set_raw_extent(struct extent_info *ext, 176 static inline void set_raw_extent(struct extent_info *ext,
177 struct f2fs_extent *i_ext) 177 struct f2fs_extent *i_ext)
178 { 178 {
179 read_lock(&ext->ext_lock); 179 read_lock(&ext->ext_lock);
180 i_ext->fofs = cpu_to_le32(ext->fofs); 180 i_ext->fofs = cpu_to_le32(ext->fofs);
181 i_ext->blk_addr = cpu_to_le32(ext->blk_addr); 181 i_ext->blk_addr = cpu_to_le32(ext->blk_addr);
182 i_ext->len = cpu_to_le32(ext->len); 182 i_ext->len = cpu_to_le32(ext->len);
183 read_unlock(&ext->ext_lock); 183 read_unlock(&ext->ext_lock);
184 } 184 }
185 185
186 struct f2fs_nm_info { 186 struct f2fs_nm_info {
187 block_t nat_blkaddr; /* base disk address of NAT */ 187 block_t nat_blkaddr; /* base disk address of NAT */
188 nid_t max_nid; /* maximum possible node ids */ 188 nid_t max_nid; /* maximum possible node ids */
189 nid_t init_scan_nid; /* the first nid to be scanned */ 189 nid_t init_scan_nid; /* the first nid to be scanned */
190 nid_t next_scan_nid; /* the next nid to be scanned */ 190 nid_t next_scan_nid; /* the next nid to be scanned */
191 191
192 /* NAT cache management */ 192 /* NAT cache management */
193 struct radix_tree_root nat_root;/* root of the nat entry cache */ 193 struct radix_tree_root nat_root;/* root of the nat entry cache */
194 rwlock_t nat_tree_lock; /* protect nat_tree_lock */ 194 rwlock_t nat_tree_lock; /* protect nat_tree_lock */
195 unsigned int nat_cnt; /* the # of cached nat entries */ 195 unsigned int nat_cnt; /* the # of cached nat entries */
196 struct list_head nat_entries; /* cached nat entry list (clean) */ 196 struct list_head nat_entries; /* cached nat entry list (clean) */
197 struct list_head dirty_nat_entries; /* cached nat entry list (dirty) */ 197 struct list_head dirty_nat_entries; /* cached nat entry list (dirty) */
198 198
199 /* free node ids management */ 199 /* free node ids management */
200 struct list_head free_nid_list; /* a list for free nids */ 200 struct list_head free_nid_list; /* a list for free nids */
201 spinlock_t free_nid_list_lock; /* protect free nid list */ 201 spinlock_t free_nid_list_lock; /* protect free nid list */
202 unsigned int fcnt; /* the number of free node id */ 202 unsigned int fcnt; /* the number of free node id */
203 struct mutex build_lock; /* lock for build free nids */ 203 struct mutex build_lock; /* lock for build free nids */
204 204
205 /* for checkpoint */ 205 /* for checkpoint */
206 char *nat_bitmap; /* NAT bitmap pointer */ 206 char *nat_bitmap; /* NAT bitmap pointer */
207 int bitmap_size; /* bitmap size */ 207 int bitmap_size; /* bitmap size */
208 }; 208 };
209 209
210 /* 210 /*
211 * this structure is used as one of function parameters. 211 * this structure is used as one of function parameters.
212 * all the information are dedicated to a given direct node block determined 212 * all the information are dedicated to a given direct node block determined
213 * by the data offset in a file. 213 * by the data offset in a file.
214 */ 214 */
215 struct dnode_of_data { 215 struct dnode_of_data {
216 struct inode *inode; /* vfs inode pointer */ 216 struct inode *inode; /* vfs inode pointer */
217 struct page *inode_page; /* its inode page, NULL is possible */ 217 struct page *inode_page; /* its inode page, NULL is possible */
218 struct page *node_page; /* cached direct node page */ 218 struct page *node_page; /* cached direct node page */
219 nid_t nid; /* node id of the direct node block */ 219 nid_t nid; /* node id of the direct node block */
220 unsigned int ofs_in_node; /* data offset in the node page */ 220 unsigned int ofs_in_node; /* data offset in the node page */
221 bool inode_page_locked; /* inode page is locked or not */ 221 bool inode_page_locked; /* inode page is locked or not */
222 block_t data_blkaddr; /* block address of the node block */ 222 block_t data_blkaddr; /* block address of the node block */
223 }; 223 };
224 224
225 static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode, 225 static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode,
226 struct page *ipage, struct page *npage, nid_t nid) 226 struct page *ipage, struct page *npage, nid_t nid)
227 { 227 {
228 memset(dn, 0, sizeof(*dn)); 228 memset(dn, 0, sizeof(*dn));
229 dn->inode = inode; 229 dn->inode = inode;
230 dn->inode_page = ipage; 230 dn->inode_page = ipage;
231 dn->node_page = npage; 231 dn->node_page = npage;
232 dn->nid = nid; 232 dn->nid = nid;
233 } 233 }
234 234
235 /* 235 /*
236 * For SIT manager 236 * For SIT manager
237 * 237 *
238 * By default, there are 6 active log areas across the whole main area. 238 * By default, there are 6 active log areas across the whole main area.
239 * When considering hot and cold data separation to reduce cleaning overhead, 239 * When considering hot and cold data separation to reduce cleaning overhead,
240 * we split 3 for data logs and 3 for node logs as hot, warm, and cold types, 240 * we split 3 for data logs and 3 for node logs as hot, warm, and cold types,
241 * respectively. 241 * respectively.
242 * In the current design, you should not change the numbers intentionally. 242 * In the current design, you should not change the numbers intentionally.
243 * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6 243 * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6
244 * logs individually according to the underlying devices. (default: 6) 244 * logs individually according to the underlying devices. (default: 6)
245 * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for 245 * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for
246 * data and 8 for node logs. 246 * data and 8 for node logs.
247 */ 247 */
248 #define NR_CURSEG_DATA_TYPE (3) 248 #define NR_CURSEG_DATA_TYPE (3)
249 #define NR_CURSEG_NODE_TYPE (3) 249 #define NR_CURSEG_NODE_TYPE (3)
250 #define NR_CURSEG_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE) 250 #define NR_CURSEG_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE)
251 251
252 enum { 252 enum {
253 CURSEG_HOT_DATA = 0, /* directory entry blocks */ 253 CURSEG_HOT_DATA = 0, /* directory entry blocks */
254 CURSEG_WARM_DATA, /* data blocks */ 254 CURSEG_WARM_DATA, /* data blocks */
255 CURSEG_COLD_DATA, /* multimedia or GCed data blocks */ 255 CURSEG_COLD_DATA, /* multimedia or GCed data blocks */
256 CURSEG_HOT_NODE, /* direct node blocks of directory files */ 256 CURSEG_HOT_NODE, /* direct node blocks of directory files */
257 CURSEG_WARM_NODE, /* direct node blocks of normal files */ 257 CURSEG_WARM_NODE, /* direct node blocks of normal files */
258 CURSEG_COLD_NODE, /* indirect node blocks */ 258 CURSEG_COLD_NODE, /* indirect node blocks */
259 NO_CHECK_TYPE 259 NO_CHECK_TYPE
260 }; 260 };
261 261
262 struct f2fs_sm_info { 262 struct f2fs_sm_info {
263 struct sit_info *sit_info; /* whole segment information */ 263 struct sit_info *sit_info; /* whole segment information */
264 struct free_segmap_info *free_info; /* free segment information */ 264 struct free_segmap_info *free_info; /* free segment information */
265 struct dirty_seglist_info *dirty_info; /* dirty segment information */ 265 struct dirty_seglist_info *dirty_info; /* dirty segment information */
266 struct curseg_info *curseg_array; /* active segment information */ 266 struct curseg_info *curseg_array; /* active segment information */
267 267
268 struct list_head wblist_head; /* list of under-writeback pages */ 268 struct list_head wblist_head; /* list of under-writeback pages */
269 spinlock_t wblist_lock; /* lock for checkpoint */ 269 spinlock_t wblist_lock; /* lock for checkpoint */
270 270
271 block_t seg0_blkaddr; /* block address of 0'th segment */ 271 block_t seg0_blkaddr; /* block address of 0'th segment */
272 block_t main_blkaddr; /* start block address of main area */ 272 block_t main_blkaddr; /* start block address of main area */
273 block_t ssa_blkaddr; /* start block address of SSA area */ 273 block_t ssa_blkaddr; /* start block address of SSA area */
274 274
275 unsigned int segment_count; /* total # of segments */ 275 unsigned int segment_count; /* total # of segments */
276 unsigned int main_segments; /* # of segments in main area */ 276 unsigned int main_segments; /* # of segments in main area */
277 unsigned int reserved_segments; /* # of reserved segments */ 277 unsigned int reserved_segments; /* # of reserved segments */
278 unsigned int ovp_segments; /* # of overprovision segments */ 278 unsigned int ovp_segments; /* # of overprovision segments */
279 }; 279 };
280 280
281 /* 281 /*
282 * For directory operation 282 * For directory operation
283 */ 283 */
284 #define NODE_DIR1_BLOCK (ADDRS_PER_INODE + 1) 284 #define NODE_DIR1_BLOCK (ADDRS_PER_INODE + 1)
285 #define NODE_DIR2_BLOCK (ADDRS_PER_INODE + 2) 285 #define NODE_DIR2_BLOCK (ADDRS_PER_INODE + 2)
286 #define NODE_IND1_BLOCK (ADDRS_PER_INODE + 3) 286 #define NODE_IND1_BLOCK (ADDRS_PER_INODE + 3)
287 #define NODE_IND2_BLOCK (ADDRS_PER_INODE + 4) 287 #define NODE_IND2_BLOCK (ADDRS_PER_INODE + 4)
288 #define NODE_DIND_BLOCK (ADDRS_PER_INODE + 5) 288 #define NODE_DIND_BLOCK (ADDRS_PER_INODE + 5)
289 289
290 /* 290 /*
291 * For superblock 291 * For superblock
292 */ 292 */
293 /* 293 /*
294 * COUNT_TYPE for monitoring 294 * COUNT_TYPE for monitoring
295 * 295 *
296 * f2fs monitors the number of several block types such as on-writeback, 296 * f2fs monitors the number of several block types such as on-writeback,
297 * dirty dentry blocks, dirty node blocks, and dirty meta blocks. 297 * dirty dentry blocks, dirty node blocks, and dirty meta blocks.
298 */ 298 */
299 enum count_type { 299 enum count_type {
300 F2FS_WRITEBACK, 300 F2FS_WRITEBACK,
301 F2FS_DIRTY_DENTS, 301 F2FS_DIRTY_DENTS,
302 F2FS_DIRTY_NODES, 302 F2FS_DIRTY_NODES,
303 F2FS_DIRTY_META, 303 F2FS_DIRTY_META,
304 NR_COUNT_TYPE, 304 NR_COUNT_TYPE,
305 }; 305 };
306 306
307 /* 307 /*
308 * FS_LOCK nesting subclasses for the lock validator: 308 * FS_LOCK nesting subclasses for the lock validator:
309 * 309 *
310 * The locking order between these classes is 310 * The locking order between these classes is
311 * RENAME -> DENTRY_OPS -> DATA_WRITE -> DATA_NEW 311 * RENAME -> DENTRY_OPS -> DATA_WRITE -> DATA_NEW
312 * -> DATA_TRUNC -> NODE_WRITE -> NODE_NEW -> NODE_TRUNC 312 * -> DATA_TRUNC -> NODE_WRITE -> NODE_NEW -> NODE_TRUNC
313 */ 313 */
314 enum lock_type { 314 enum lock_type {
315 RENAME, /* for renaming operations */ 315 RENAME, /* for renaming operations */
316 DENTRY_OPS, /* for directory operations */ 316 DENTRY_OPS, /* for directory operations */
317 DATA_WRITE, /* for data write */ 317 DATA_WRITE, /* for data write */
318 DATA_NEW, /* for data allocation */ 318 DATA_NEW, /* for data allocation */
319 DATA_TRUNC, /* for data truncate */ 319 DATA_TRUNC, /* for data truncate */
320 NODE_NEW, /* for node allocation */ 320 NODE_NEW, /* for node allocation */
321 NODE_TRUNC, /* for node truncate */ 321 NODE_TRUNC, /* for node truncate */
322 NODE_WRITE, /* for node write */ 322 NODE_WRITE, /* for node write */
323 NR_LOCK_TYPE, 323 NR_LOCK_TYPE,
324 }; 324 };
325 325
326 /* 326 /*
327 * The below are the page types of bios used in submti_bio(). 327 * The below are the page types of bios used in submti_bio().
328 * The available types are: 328 * The available types are:
329 * DATA User data pages. It operates as async mode. 329 * DATA User data pages. It operates as async mode.
330 * NODE Node pages. It operates as async mode. 330 * NODE Node pages. It operates as async mode.
331 * META FS metadata pages such as SIT, NAT, CP. 331 * META FS metadata pages such as SIT, NAT, CP.
332 * NR_PAGE_TYPE The number of page types. 332 * NR_PAGE_TYPE The number of page types.
333 * META_FLUSH Make sure the previous pages are written 333 * META_FLUSH Make sure the previous pages are written
334 * with waiting the bio's completion 334 * with waiting the bio's completion
335 * ... Only can be used with META. 335 * ... Only can be used with META.
336 */ 336 */
337 enum page_type { 337 enum page_type {
338 DATA, 338 DATA,
339 NODE, 339 NODE,
340 META, 340 META,
341 NR_PAGE_TYPE, 341 NR_PAGE_TYPE,
342 META_FLUSH, 342 META_FLUSH,
343 }; 343 };
344 344
345 struct f2fs_sb_info { 345 struct f2fs_sb_info {
346 struct super_block *sb; /* pointer to VFS super block */ 346 struct super_block *sb; /* pointer to VFS super block */
347 struct buffer_head *raw_super_buf; /* buffer head of raw sb */ 347 struct buffer_head *raw_super_buf; /* buffer head of raw sb */
348 struct f2fs_super_block *raw_super; /* raw super block pointer */ 348 struct f2fs_super_block *raw_super; /* raw super block pointer */
349 int s_dirty; /* dirty flag for checkpoint */ 349 int s_dirty; /* dirty flag for checkpoint */
350 350
351 /* for node-related operations */ 351 /* for node-related operations */
352 struct f2fs_nm_info *nm_info; /* node manager */ 352 struct f2fs_nm_info *nm_info; /* node manager */
353 struct inode *node_inode; /* cache node blocks */ 353 struct inode *node_inode; /* cache node blocks */
354 354
355 /* for segment-related operations */ 355 /* for segment-related operations */
356 struct f2fs_sm_info *sm_info; /* segment manager */ 356 struct f2fs_sm_info *sm_info; /* segment manager */
357 struct bio *bio[NR_PAGE_TYPE]; /* bios to merge */ 357 struct bio *bio[NR_PAGE_TYPE]; /* bios to merge */
358 sector_t last_block_in_bio[NR_PAGE_TYPE]; /* last block number */ 358 sector_t last_block_in_bio[NR_PAGE_TYPE]; /* last block number */
359 struct rw_semaphore bio_sem; /* IO semaphore */ 359 struct rw_semaphore bio_sem; /* IO semaphore */
360 360
361 /* for checkpoint */ 361 /* for checkpoint */
362 struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */ 362 struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */
363 struct inode *meta_inode; /* cache meta blocks */ 363 struct inode *meta_inode; /* cache meta blocks */
364 struct mutex cp_mutex; /* for checkpoint procedure */ 364 struct mutex cp_mutex; /* for checkpoint procedure */
365 struct mutex fs_lock[NR_LOCK_TYPE]; /* for blocking FS operations */ 365 struct mutex fs_lock[NR_LOCK_TYPE]; /* for blocking FS operations */
366 struct mutex write_inode; /* mutex for write inode */ 366 struct mutex write_inode; /* mutex for write inode */
367 struct mutex writepages; /* mutex for writepages() */ 367 struct mutex writepages; /* mutex for writepages() */
368 int por_doing; /* recovery is doing or not */ 368 int por_doing; /* recovery is doing or not */
369 369
370 /* for orphan inode management */ 370 /* for orphan inode management */
371 struct list_head orphan_inode_list; /* orphan inode list */ 371 struct list_head orphan_inode_list; /* orphan inode list */
372 struct mutex orphan_inode_mutex; /* for orphan inode list */ 372 struct mutex orphan_inode_mutex; /* for orphan inode list */
373 unsigned int n_orphans; /* # of orphan inodes */ 373 unsigned int n_orphans; /* # of orphan inodes */
374 374
375 /* for directory inode management */ 375 /* for directory inode management */
376 struct list_head dir_inode_list; /* dir inode list */ 376 struct list_head dir_inode_list; /* dir inode list */
377 spinlock_t dir_inode_lock; /* for dir inode list lock */ 377 spinlock_t dir_inode_lock; /* for dir inode list lock */
378 unsigned int n_dirty_dirs; /* # of dir inodes */ 378 unsigned int n_dirty_dirs; /* # of dir inodes */
379 379
380 /* basic file system units */ 380 /* basic file system units */
381 unsigned int log_sectors_per_block; /* log2 sectors per block */ 381 unsigned int log_sectors_per_block; /* log2 sectors per block */
382 unsigned int log_blocksize; /* log2 block size */ 382 unsigned int log_blocksize; /* log2 block size */
383 unsigned int blocksize; /* block size */ 383 unsigned int blocksize; /* block size */
384 unsigned int root_ino_num; /* root inode number*/ 384 unsigned int root_ino_num; /* root inode number*/
385 unsigned int node_ino_num; /* node inode number*/ 385 unsigned int node_ino_num; /* node inode number*/
386 unsigned int meta_ino_num; /* meta inode number*/ 386 unsigned int meta_ino_num; /* meta inode number*/
387 unsigned int log_blocks_per_seg; /* log2 blocks per segment */ 387 unsigned int log_blocks_per_seg; /* log2 blocks per segment */
388 unsigned int blocks_per_seg; /* blocks per segment */ 388 unsigned int blocks_per_seg; /* blocks per segment */
389 unsigned int segs_per_sec; /* segments per section */ 389 unsigned int segs_per_sec; /* segments per section */
390 unsigned int secs_per_zone; /* sections per zone */ 390 unsigned int secs_per_zone; /* sections per zone */
391 unsigned int total_sections; /* total section count */ 391 unsigned int total_sections; /* total section count */
392 unsigned int total_node_count; /* total node block count */ 392 unsigned int total_node_count; /* total node block count */
393 unsigned int total_valid_node_count; /* valid node block count */ 393 unsigned int total_valid_node_count; /* valid node block count */
394 unsigned int total_valid_inode_count; /* valid inode count */ 394 unsigned int total_valid_inode_count; /* valid inode count */
395 int active_logs; /* # of active logs */ 395 int active_logs; /* # of active logs */
396 396
397 block_t user_block_count; /* # of user blocks */ 397 block_t user_block_count; /* # of user blocks */
398 block_t total_valid_block_count; /* # of valid blocks */ 398 block_t total_valid_block_count; /* # of valid blocks */
399 block_t alloc_valid_block_count; /* # of allocated blocks */ 399 block_t alloc_valid_block_count; /* # of allocated blocks */
400 block_t last_valid_block_count; /* for recovery */ 400 block_t last_valid_block_count; /* for recovery */
401 u32 s_next_generation; /* for NFS support */ 401 u32 s_next_generation; /* for NFS support */
402 atomic_t nr_pages[NR_COUNT_TYPE]; /* # of pages, see count_type */ 402 atomic_t nr_pages[NR_COUNT_TYPE]; /* # of pages, see count_type */
403 403
404 struct f2fs_mount_info mount_opt; /* mount options */ 404 struct f2fs_mount_info mount_opt; /* mount options */
405 405
406 /* for cleaning operations */ 406 /* for cleaning operations */
407 struct mutex gc_mutex; /* mutex for GC */ 407 struct mutex gc_mutex; /* mutex for GC */
408 struct f2fs_gc_kthread *gc_thread; /* GC thread */ 408 struct f2fs_gc_kthread *gc_thread; /* GC thread */
409 409
410 /* 410 /*
411 * for stat information. 411 * for stat information.
412 * one is for the LFS mode, and the other is for the SSR mode. 412 * one is for the LFS mode, and the other is for the SSR mode.
413 */ 413 */
414 struct f2fs_stat_info *stat_info; /* FS status information */ 414 struct f2fs_stat_info *stat_info; /* FS status information */
415 unsigned int segment_count[2]; /* # of allocated segments */ 415 unsigned int segment_count[2]; /* # of allocated segments */
416 unsigned int block_count[2]; /* # of allocated blocks */ 416 unsigned int block_count[2]; /* # of allocated blocks */
417 unsigned int last_victim[2]; /* last victim segment # */ 417 unsigned int last_victim[2]; /* last victim segment # */
418 int total_hit_ext, read_hit_ext; /* extent cache hit ratio */ 418 int total_hit_ext, read_hit_ext; /* extent cache hit ratio */
419 int bg_gc; /* background gc calls */ 419 int bg_gc; /* background gc calls */
420 spinlock_t stat_lock; /* lock for stat operations */ 420 spinlock_t stat_lock; /* lock for stat operations */
421 }; 421 };
422 422
423 /* 423 /*
424 * Inline functions 424 * Inline functions
425 */ 425 */
426 static inline struct f2fs_inode_info *F2FS_I(struct inode *inode) 426 static inline struct f2fs_inode_info *F2FS_I(struct inode *inode)
427 { 427 {
428 return container_of(inode, struct f2fs_inode_info, vfs_inode); 428 return container_of(inode, struct f2fs_inode_info, vfs_inode);
429 } 429 }
430 430
431 static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb) 431 static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb)
432 { 432 {
433 return sb->s_fs_info; 433 return sb->s_fs_info;
434 } 434 }
435 435
436 static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi) 436 static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
437 { 437 {
438 return (struct f2fs_super_block *)(sbi->raw_super); 438 return (struct f2fs_super_block *)(sbi->raw_super);
439 } 439 }
440 440
441 static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi) 441 static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi)
442 { 442 {
443 return (struct f2fs_checkpoint *)(sbi->ckpt); 443 return (struct f2fs_checkpoint *)(sbi->ckpt);
444 } 444 }
445 445
446 static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi) 446 static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi)
447 { 447 {
448 return (struct f2fs_nm_info *)(sbi->nm_info); 448 return (struct f2fs_nm_info *)(sbi->nm_info);
449 } 449 }
450 450
451 static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi) 451 static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi)
452 { 452 {
453 return (struct f2fs_sm_info *)(sbi->sm_info); 453 return (struct f2fs_sm_info *)(sbi->sm_info);
454 } 454 }
455 455
456 static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi) 456 static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi)
457 { 457 {
458 return (struct sit_info *)(SM_I(sbi)->sit_info); 458 return (struct sit_info *)(SM_I(sbi)->sit_info);
459 } 459 }
460 460
461 static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi) 461 static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi)
462 { 462 {
463 return (struct free_segmap_info *)(SM_I(sbi)->free_info); 463 return (struct free_segmap_info *)(SM_I(sbi)->free_info);
464 } 464 }
465 465
466 static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi) 466 static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi)
467 { 467 {
468 return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info); 468 return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info);
469 } 469 }
470 470
471 static inline void F2FS_SET_SB_DIRT(struct f2fs_sb_info *sbi) 471 static inline void F2FS_SET_SB_DIRT(struct f2fs_sb_info *sbi)
472 { 472 {
473 sbi->s_dirty = 1; 473 sbi->s_dirty = 1;
474 } 474 }
475 475
476 static inline void F2FS_RESET_SB_DIRT(struct f2fs_sb_info *sbi) 476 static inline void F2FS_RESET_SB_DIRT(struct f2fs_sb_info *sbi)
477 { 477 {
478 sbi->s_dirty = 0; 478 sbi->s_dirty = 0;
479 } 479 }
480 480
481 static inline bool is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 481 static inline bool is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
482 { 482 {
483 unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); 483 unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
484 return ckpt_flags & f; 484 return ckpt_flags & f;
485 } 485 }
486 486
487 static inline void set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 487 static inline void set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
488 { 488 {
489 unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); 489 unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
490 ckpt_flags |= f; 490 ckpt_flags |= f;
491 cp->ckpt_flags = cpu_to_le32(ckpt_flags); 491 cp->ckpt_flags = cpu_to_le32(ckpt_flags);
492 } 492 }
493 493
494 static inline void clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 494 static inline void clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
495 { 495 {
496 unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); 496 unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
497 ckpt_flags &= (~f); 497 ckpt_flags &= (~f);
498 cp->ckpt_flags = cpu_to_le32(ckpt_flags); 498 cp->ckpt_flags = cpu_to_le32(ckpt_flags);
499 } 499 }
500 500
501 static inline void mutex_lock_op(struct f2fs_sb_info *sbi, enum lock_type t) 501 static inline void mutex_lock_op(struct f2fs_sb_info *sbi, enum lock_type t)
502 { 502 {
503 mutex_lock_nested(&sbi->fs_lock[t], t); 503 mutex_lock_nested(&sbi->fs_lock[t], t);
504 } 504 }
505 505
506 static inline void mutex_unlock_op(struct f2fs_sb_info *sbi, enum lock_type t) 506 static inline void mutex_unlock_op(struct f2fs_sb_info *sbi, enum lock_type t)
507 { 507 {
508 mutex_unlock(&sbi->fs_lock[t]); 508 mutex_unlock(&sbi->fs_lock[t]);
509 } 509 }
510 510
511 /* 511 /*
512 * Check whether the given nid is within node id range. 512 * Check whether the given nid is within node id range.
513 */ 513 */
514 static inline void check_nid_range(struct f2fs_sb_info *sbi, nid_t nid) 514 static inline void check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
515 { 515 {
516 BUG_ON((nid >= NM_I(sbi)->max_nid)); 516 BUG_ON((nid >= NM_I(sbi)->max_nid));
517 } 517 }
518 518
519 #define F2FS_DEFAULT_ALLOCATED_BLOCKS 1 519 #define F2FS_DEFAULT_ALLOCATED_BLOCKS 1
520 520
521 /* 521 /*
522 * Check whether the inode has blocks or not 522 * Check whether the inode has blocks or not
523 */ 523 */
524 static inline int F2FS_HAS_BLOCKS(struct inode *inode) 524 static inline int F2FS_HAS_BLOCKS(struct inode *inode)
525 { 525 {
526 if (F2FS_I(inode)->i_xattr_nid) 526 if (F2FS_I(inode)->i_xattr_nid)
527 return (inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS + 1); 527 return (inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS + 1);
528 else 528 else
529 return (inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS); 529 return (inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS);
530 } 530 }
531 531
532 static inline bool inc_valid_block_count(struct f2fs_sb_info *sbi, 532 static inline bool inc_valid_block_count(struct f2fs_sb_info *sbi,
533 struct inode *inode, blkcnt_t count) 533 struct inode *inode, blkcnt_t count)
534 { 534 {
535 block_t valid_block_count; 535 block_t valid_block_count;
536 536
537 spin_lock(&sbi->stat_lock); 537 spin_lock(&sbi->stat_lock);
538 valid_block_count = 538 valid_block_count =
539 sbi->total_valid_block_count + (block_t)count; 539 sbi->total_valid_block_count + (block_t)count;
540 if (valid_block_count > sbi->user_block_count) { 540 if (valid_block_count > sbi->user_block_count) {
541 spin_unlock(&sbi->stat_lock); 541 spin_unlock(&sbi->stat_lock);
542 return false; 542 return false;
543 } 543 }
544 inode->i_blocks += count; 544 inode->i_blocks += count;
545 sbi->total_valid_block_count = valid_block_count; 545 sbi->total_valid_block_count = valid_block_count;
546 sbi->alloc_valid_block_count += (block_t)count; 546 sbi->alloc_valid_block_count += (block_t)count;
547 spin_unlock(&sbi->stat_lock); 547 spin_unlock(&sbi->stat_lock);
548 return true; 548 return true;
549 } 549 }
550 550
551 static inline int dec_valid_block_count(struct f2fs_sb_info *sbi, 551 static inline int dec_valid_block_count(struct f2fs_sb_info *sbi,
552 struct inode *inode, 552 struct inode *inode,
553 blkcnt_t count) 553 blkcnt_t count)
554 { 554 {
555 spin_lock(&sbi->stat_lock); 555 spin_lock(&sbi->stat_lock);
556 BUG_ON(sbi->total_valid_block_count < (block_t) count); 556 BUG_ON(sbi->total_valid_block_count < (block_t) count);
557 BUG_ON(inode->i_blocks < count); 557 BUG_ON(inode->i_blocks < count);
558 inode->i_blocks -= count; 558 inode->i_blocks -= count;
559 sbi->total_valid_block_count -= (block_t)count; 559 sbi->total_valid_block_count -= (block_t)count;
560 spin_unlock(&sbi->stat_lock); 560 spin_unlock(&sbi->stat_lock);
561 return 0; 561 return 0;
562 } 562 }
563 563
564 static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type) 564 static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
565 { 565 {
566 atomic_inc(&sbi->nr_pages[count_type]); 566 atomic_inc(&sbi->nr_pages[count_type]);
567 F2FS_SET_SB_DIRT(sbi); 567 F2FS_SET_SB_DIRT(sbi);
568 } 568 }
569 569
570 static inline void inode_inc_dirty_dents(struct inode *inode) 570 static inline void inode_inc_dirty_dents(struct inode *inode)
571 { 571 {
572 atomic_inc(&F2FS_I(inode)->dirty_dents); 572 atomic_inc(&F2FS_I(inode)->dirty_dents);
573 } 573 }
574 574
575 static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type) 575 static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type)
576 { 576 {
577 atomic_dec(&sbi->nr_pages[count_type]); 577 atomic_dec(&sbi->nr_pages[count_type]);
578 } 578 }
579 579
580 static inline void inode_dec_dirty_dents(struct inode *inode) 580 static inline void inode_dec_dirty_dents(struct inode *inode)
581 { 581 {
582 atomic_dec(&F2FS_I(inode)->dirty_dents); 582 atomic_dec(&F2FS_I(inode)->dirty_dents);
583 } 583 }
584 584
585 static inline int get_pages(struct f2fs_sb_info *sbi, int count_type) 585 static inline int get_pages(struct f2fs_sb_info *sbi, int count_type)
586 { 586 {
587 return atomic_read(&sbi->nr_pages[count_type]); 587 return atomic_read(&sbi->nr_pages[count_type]);
588 } 588 }
589 589
590 static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type) 590 static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type)
591 { 591 {
592 unsigned int pages_per_sec = sbi->segs_per_sec * 592 unsigned int pages_per_sec = sbi->segs_per_sec *
593 (1 << sbi->log_blocks_per_seg); 593 (1 << sbi->log_blocks_per_seg);
594 return ((get_pages(sbi, block_type) + pages_per_sec - 1) 594 return ((get_pages(sbi, block_type) + pages_per_sec - 1)
595 >> sbi->log_blocks_per_seg) / sbi->segs_per_sec; 595 >> sbi->log_blocks_per_seg) / sbi->segs_per_sec;
596 } 596 }
597 597
598 static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi) 598 static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
599 { 599 {
600 block_t ret; 600 block_t ret;
601 spin_lock(&sbi->stat_lock); 601 spin_lock(&sbi->stat_lock);
602 ret = sbi->total_valid_block_count; 602 ret = sbi->total_valid_block_count;
603 spin_unlock(&sbi->stat_lock); 603 spin_unlock(&sbi->stat_lock);
604 return ret; 604 return ret;
605 } 605 }
606 606
607 static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag) 607 static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag)
608 { 608 {
609 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 609 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
610 610
611 /* return NAT or SIT bitmap */ 611 /* return NAT or SIT bitmap */
612 if (flag == NAT_BITMAP) 612 if (flag == NAT_BITMAP)
613 return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize); 613 return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
614 else if (flag == SIT_BITMAP) 614 else if (flag == SIT_BITMAP)
615 return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize); 615 return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
616 616
617 return 0; 617 return 0;
618 } 618 }
619 619
620 static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag) 620 static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag)
621 { 621 {
622 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 622 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
623 int offset = (flag == NAT_BITMAP) ? 623 int offset = (flag == NAT_BITMAP) ?
624 le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0; 624 le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0;
625 return &ckpt->sit_nat_version_bitmap + offset; 625 return &ckpt->sit_nat_version_bitmap + offset;
626 } 626 }
627 627
628 static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi) 628 static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
629 { 629 {
630 block_t start_addr; 630 block_t start_addr;
631 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 631 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
632 unsigned long long ckpt_version = le64_to_cpu(ckpt->checkpoint_ver); 632 unsigned long long ckpt_version = le64_to_cpu(ckpt->checkpoint_ver);
633 633
634 start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); 634 start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
635 635
636 /* 636 /*
637 * odd numbered checkpoint should at cp segment 0 637 * odd numbered checkpoint should at cp segment 0
638 * and even segent must be at cp segment 1 638 * and even segent must be at cp segment 1
639 */ 639 */
640 if (!(ckpt_version & 1)) 640 if (!(ckpt_version & 1))
641 start_addr += sbi->blocks_per_seg; 641 start_addr += sbi->blocks_per_seg;
642 642
643 return start_addr; 643 return start_addr;
644 } 644 }
645 645
646 static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi) 646 static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi)
647 { 647 {
648 return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum); 648 return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
649 } 649 }
650 650
651 static inline bool inc_valid_node_count(struct f2fs_sb_info *sbi, 651 static inline bool inc_valid_node_count(struct f2fs_sb_info *sbi,
652 struct inode *inode, 652 struct inode *inode,
653 unsigned int count) 653 unsigned int count)
654 { 654 {
655 block_t valid_block_count; 655 block_t valid_block_count;
656 unsigned int valid_node_count; 656 unsigned int valid_node_count;
657 657
658 spin_lock(&sbi->stat_lock); 658 spin_lock(&sbi->stat_lock);
659 659
660 valid_block_count = sbi->total_valid_block_count + (block_t)count; 660 valid_block_count = sbi->total_valid_block_count + (block_t)count;
661 sbi->alloc_valid_block_count += (block_t)count; 661 sbi->alloc_valid_block_count += (block_t)count;
662 valid_node_count = sbi->total_valid_node_count + count; 662 valid_node_count = sbi->total_valid_node_count + count;
663 663
664 if (valid_block_count > sbi->user_block_count) { 664 if (valid_block_count > sbi->user_block_count) {
665 spin_unlock(&sbi->stat_lock); 665 spin_unlock(&sbi->stat_lock);
666 return false; 666 return false;
667 } 667 }
668 668
669 if (valid_node_count > sbi->total_node_count) { 669 if (valid_node_count > sbi->total_node_count) {
670 spin_unlock(&sbi->stat_lock); 670 spin_unlock(&sbi->stat_lock);
671 return false; 671 return false;
672 } 672 }
673 673
674 if (inode) 674 if (inode)
675 inode->i_blocks += count; 675 inode->i_blocks += count;
676 sbi->total_valid_node_count = valid_node_count; 676 sbi->total_valid_node_count = valid_node_count;
677 sbi->total_valid_block_count = valid_block_count; 677 sbi->total_valid_block_count = valid_block_count;
678 spin_unlock(&sbi->stat_lock); 678 spin_unlock(&sbi->stat_lock);
679 679
680 return true; 680 return true;
681 } 681 }
682 682
683 static inline void dec_valid_node_count(struct f2fs_sb_info *sbi, 683 static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
684 struct inode *inode, 684 struct inode *inode,
685 unsigned int count) 685 unsigned int count)
686 { 686 {
687 spin_lock(&sbi->stat_lock); 687 spin_lock(&sbi->stat_lock);
688 688
689 BUG_ON(sbi->total_valid_block_count < count); 689 BUG_ON(sbi->total_valid_block_count < count);
690 BUG_ON(sbi->total_valid_node_count < count); 690 BUG_ON(sbi->total_valid_node_count < count);
691 BUG_ON(inode->i_blocks < count); 691 BUG_ON(inode->i_blocks < count);
692 692
693 inode->i_blocks -= count; 693 inode->i_blocks -= count;
694 sbi->total_valid_node_count -= count; 694 sbi->total_valid_node_count -= count;
695 sbi->total_valid_block_count -= (block_t)count; 695 sbi->total_valid_block_count -= (block_t)count;
696 696
697 spin_unlock(&sbi->stat_lock); 697 spin_unlock(&sbi->stat_lock);
698 } 698 }
699 699
700 static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi) 700 static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi)
701 { 701 {
702 unsigned int ret; 702 unsigned int ret;
703 spin_lock(&sbi->stat_lock); 703 spin_lock(&sbi->stat_lock);
704 ret = sbi->total_valid_node_count; 704 ret = sbi->total_valid_node_count;
705 spin_unlock(&sbi->stat_lock); 705 spin_unlock(&sbi->stat_lock);
706 return ret; 706 return ret;
707 } 707 }
708 708
709 static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi) 709 static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi)
710 { 710 {
711 spin_lock(&sbi->stat_lock); 711 spin_lock(&sbi->stat_lock);
712 BUG_ON(sbi->total_valid_inode_count == sbi->total_node_count); 712 BUG_ON(sbi->total_valid_inode_count == sbi->total_node_count);
713 sbi->total_valid_inode_count++; 713 sbi->total_valid_inode_count++;
714 spin_unlock(&sbi->stat_lock); 714 spin_unlock(&sbi->stat_lock);
715 } 715 }
716 716
717 static inline int dec_valid_inode_count(struct f2fs_sb_info *sbi) 717 static inline int dec_valid_inode_count(struct f2fs_sb_info *sbi)
718 { 718 {
719 spin_lock(&sbi->stat_lock); 719 spin_lock(&sbi->stat_lock);
720 BUG_ON(!sbi->total_valid_inode_count); 720 BUG_ON(!sbi->total_valid_inode_count);
721 sbi->total_valid_inode_count--; 721 sbi->total_valid_inode_count--;
722 spin_unlock(&sbi->stat_lock); 722 spin_unlock(&sbi->stat_lock);
723 return 0; 723 return 0;
724 } 724 }
725 725
726 static inline unsigned int valid_inode_count(struct f2fs_sb_info *sbi) 726 static inline unsigned int valid_inode_count(struct f2fs_sb_info *sbi)
727 { 727 {
728 unsigned int ret; 728 unsigned int ret;
729 spin_lock(&sbi->stat_lock); 729 spin_lock(&sbi->stat_lock);
730 ret = sbi->total_valid_inode_count; 730 ret = sbi->total_valid_inode_count;
731 spin_unlock(&sbi->stat_lock); 731 spin_unlock(&sbi->stat_lock);
732 return ret; 732 return ret;
733 } 733 }
734 734
735 static inline void f2fs_put_page(struct page *page, int unlock) 735 static inline void f2fs_put_page(struct page *page, int unlock)
736 { 736 {
737 if (!page || IS_ERR(page)) 737 if (!page || IS_ERR(page))
738 return; 738 return;
739 739
740 if (unlock) { 740 if (unlock) {
741 BUG_ON(!PageLocked(page)); 741 BUG_ON(!PageLocked(page));
742 unlock_page(page); 742 unlock_page(page);
743 } 743 }
744 page_cache_release(page); 744 page_cache_release(page);
745 } 745 }
746 746
747 static inline void f2fs_put_dnode(struct dnode_of_data *dn) 747 static inline void f2fs_put_dnode(struct dnode_of_data *dn)
748 { 748 {
749 if (dn->node_page) 749 if (dn->node_page)
750 f2fs_put_page(dn->node_page, 1); 750 f2fs_put_page(dn->node_page, 1);
751 if (dn->inode_page && dn->node_page != dn->inode_page) 751 if (dn->inode_page && dn->node_page != dn->inode_page)
752 f2fs_put_page(dn->inode_page, 0); 752 f2fs_put_page(dn->inode_page, 0);
753 dn->node_page = NULL; 753 dn->node_page = NULL;
754 dn->inode_page = NULL; 754 dn->inode_page = NULL;
755 } 755 }
756 756
757 static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name, 757 static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name,
758 size_t size, void (*ctor)(void *)) 758 size_t size, void (*ctor)(void *))
759 { 759 {
760 return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, ctor); 760 return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, ctor);
761 } 761 }
762 762
763 #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino) 763 #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino)
764 764
765 static inline bool IS_INODE(struct page *page) 765 static inline bool IS_INODE(struct page *page)
766 { 766 {
767 struct f2fs_node *p = (struct f2fs_node *)page_address(page); 767 struct f2fs_node *p = (struct f2fs_node *)page_address(page);
768 return RAW_IS_INODE(p); 768 return RAW_IS_INODE(p);
769 } 769 }
770 770
771 static inline __le32 *blkaddr_in_node(struct f2fs_node *node) 771 static inline __le32 *blkaddr_in_node(struct f2fs_node *node)
772 { 772 {
773 return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr; 773 return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr;
774 } 774 }
775 775
776 static inline block_t datablock_addr(struct page *node_page, 776 static inline block_t datablock_addr(struct page *node_page,
777 unsigned int offset) 777 unsigned int offset)
778 { 778 {
779 struct f2fs_node *raw_node; 779 struct f2fs_node *raw_node;
780 __le32 *addr_array; 780 __le32 *addr_array;
781 raw_node = (struct f2fs_node *)page_address(node_page); 781 raw_node = (struct f2fs_node *)page_address(node_page);
782 addr_array = blkaddr_in_node(raw_node); 782 addr_array = blkaddr_in_node(raw_node);
783 return le32_to_cpu(addr_array[offset]); 783 return le32_to_cpu(addr_array[offset]);
784 } 784 }
785 785
786 static inline int f2fs_test_bit(unsigned int nr, char *addr) 786 static inline int f2fs_test_bit(unsigned int nr, char *addr)
787 { 787 {
788 int mask; 788 int mask;
789 789
790 addr += (nr >> 3); 790 addr += (nr >> 3);
791 mask = 1 << (7 - (nr & 0x07)); 791 mask = 1 << (7 - (nr & 0x07));
792 return mask & *addr; 792 return mask & *addr;
793 } 793 }
794 794
795 static inline int f2fs_set_bit(unsigned int nr, char *addr) 795 static inline int f2fs_set_bit(unsigned int nr, char *addr)
796 { 796 {
797 int mask; 797 int mask;
798 int ret; 798 int ret;
799 799
800 addr += (nr >> 3); 800 addr += (nr >> 3);
801 mask = 1 << (7 - (nr & 0x07)); 801 mask = 1 << (7 - (nr & 0x07));
802 ret = mask & *addr; 802 ret = mask & *addr;
803 *addr |= mask; 803 *addr |= mask;
804 return ret; 804 return ret;
805 } 805 }
806 806
807 static inline int f2fs_clear_bit(unsigned int nr, char *addr) 807 static inline int f2fs_clear_bit(unsigned int nr, char *addr)
808 { 808 {
809 int mask; 809 int mask;
810 int ret; 810 int ret;
811 811
812 addr += (nr >> 3); 812 addr += (nr >> 3);
813 mask = 1 << (7 - (nr & 0x07)); 813 mask = 1 << (7 - (nr & 0x07));
814 ret = mask & *addr; 814 ret = mask & *addr;
815 *addr &= ~mask; 815 *addr &= ~mask;
816 return ret; 816 return ret;
817 } 817 }
818 818
819 /* used for f2fs_inode_info->flags */ 819 /* used for f2fs_inode_info->flags */
820 enum { 820 enum {
821 FI_NEW_INODE, /* indicate newly allocated inode */ 821 FI_NEW_INODE, /* indicate newly allocated inode */
822 FI_NEED_CP, /* need to do checkpoint during fsync */ 822 FI_NEED_CP, /* need to do checkpoint during fsync */
823 FI_INC_LINK, /* need to increment i_nlink */ 823 FI_INC_LINK, /* need to increment i_nlink */
824 FI_ACL_MODE, /* indicate acl mode */ 824 FI_ACL_MODE, /* indicate acl mode */
825 FI_NO_ALLOC, /* should not allocate any blocks */ 825 FI_NO_ALLOC, /* should not allocate any blocks */
826 }; 826 };
827 827
828 static inline void set_inode_flag(struct f2fs_inode_info *fi, int flag) 828 static inline void set_inode_flag(struct f2fs_inode_info *fi, int flag)
829 { 829 {
830 set_bit(flag, &fi->flags); 830 set_bit(flag, &fi->flags);
831 } 831 }
832 832
833 static inline int is_inode_flag_set(struct f2fs_inode_info *fi, int flag) 833 static inline int is_inode_flag_set(struct f2fs_inode_info *fi, int flag)
834 { 834 {
835 return test_bit(flag, &fi->flags); 835 return test_bit(flag, &fi->flags);
836 } 836 }
837 837
838 static inline void clear_inode_flag(struct f2fs_inode_info *fi, int flag) 838 static inline void clear_inode_flag(struct f2fs_inode_info *fi, int flag)
839 { 839 {
840 clear_bit(flag, &fi->flags); 840 clear_bit(flag, &fi->flags);
841 } 841 }
842 842
843 static inline void set_acl_inode(struct f2fs_inode_info *fi, umode_t mode) 843 static inline void set_acl_inode(struct f2fs_inode_info *fi, umode_t mode)
844 { 844 {
845 fi->i_acl_mode = mode; 845 fi->i_acl_mode = mode;
846 set_inode_flag(fi, FI_ACL_MODE); 846 set_inode_flag(fi, FI_ACL_MODE);
847 } 847 }
848 848
849 static inline int cond_clear_inode_flag(struct f2fs_inode_info *fi, int flag) 849 static inline int cond_clear_inode_flag(struct f2fs_inode_info *fi, int flag)
850 { 850 {
851 if (is_inode_flag_set(fi, FI_ACL_MODE)) { 851 if (is_inode_flag_set(fi, FI_ACL_MODE)) {
852 clear_inode_flag(fi, FI_ACL_MODE); 852 clear_inode_flag(fi, FI_ACL_MODE);
853 return 1; 853 return 1;
854 } 854 }
855 return 0; 855 return 0;
856 } 856 }
857 857
858 /* 858 /*
859 * file.c 859 * file.c
860 */ 860 */
861 int f2fs_sync_file(struct file *, loff_t, loff_t, int); 861 int f2fs_sync_file(struct file *, loff_t, loff_t, int);
862 void truncate_data_blocks(struct dnode_of_data *); 862 void truncate_data_blocks(struct dnode_of_data *);
863 void f2fs_truncate(struct inode *); 863 void f2fs_truncate(struct inode *);
864 int f2fs_setattr(struct dentry *, struct iattr *); 864 int f2fs_setattr(struct dentry *, struct iattr *);
865 int truncate_hole(struct inode *, pgoff_t, pgoff_t); 865 int truncate_hole(struct inode *, pgoff_t, pgoff_t);
866 long f2fs_ioctl(struct file *, unsigned int, unsigned long); 866 long f2fs_ioctl(struct file *, unsigned int, unsigned long);
867 long f2fs_compat_ioctl(struct file *, unsigned int, unsigned long); 867 long f2fs_compat_ioctl(struct file *, unsigned int, unsigned long);
868 868
869 /* 869 /*
870 * inode.c 870 * inode.c
871 */ 871 */
872 void f2fs_set_inode_flags(struct inode *); 872 void f2fs_set_inode_flags(struct inode *);
873 struct inode *f2fs_iget(struct super_block *, unsigned long); 873 struct inode *f2fs_iget(struct super_block *, unsigned long);
874 void update_inode(struct inode *, struct page *); 874 void update_inode(struct inode *, struct page *);
875 int f2fs_write_inode(struct inode *, struct writeback_control *); 875 int f2fs_write_inode(struct inode *, struct writeback_control *);
876 void f2fs_evict_inode(struct inode *); 876 void f2fs_evict_inode(struct inode *);
877 877
878 /* 878 /*
879 * namei.c 879 * namei.c
880 */ 880 */
881 struct dentry *f2fs_get_parent(struct dentry *child); 881 struct dentry *f2fs_get_parent(struct dentry *child);
882 882
883 /* 883 /*
884 * dir.c 884 * dir.c
885 */ 885 */
886 struct f2fs_dir_entry *f2fs_find_entry(struct inode *, struct qstr *, 886 struct f2fs_dir_entry *f2fs_find_entry(struct inode *, struct qstr *,
887 struct page **); 887 struct page **);
888 struct f2fs_dir_entry *f2fs_parent_dir(struct inode *, struct page **); 888 struct f2fs_dir_entry *f2fs_parent_dir(struct inode *, struct page **);
889 ino_t f2fs_inode_by_name(struct inode *, struct qstr *); 889 ino_t f2fs_inode_by_name(struct inode *, struct qstr *);
890 void f2fs_set_link(struct inode *, struct f2fs_dir_entry *, 890 void f2fs_set_link(struct inode *, struct f2fs_dir_entry *,
891 struct page *, struct inode *); 891 struct page *, struct inode *);
892 void init_dent_inode(const struct qstr *, struct page *); 892 void init_dent_inode(const struct qstr *, struct page *);
893 int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *); 893 int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *);
894 void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *); 894 void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *);
895 int f2fs_make_empty(struct inode *, struct inode *); 895 int f2fs_make_empty(struct inode *, struct inode *);
896 bool f2fs_empty_dir(struct inode *); 896 bool f2fs_empty_dir(struct inode *);
897 897
898 static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode) 898 static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
899 { 899 {
900 return __f2fs_add_link(dentry->d_parent->d_inode, &dentry->d_name, 900 return __f2fs_add_link(dentry->d_parent->d_inode, &dentry->d_name,
901 inode); 901 inode);
902 } 902 }
903 903
904 /* 904 /*
905 * super.c 905 * super.c
906 */ 906 */
907 int f2fs_sync_fs(struct super_block *, int); 907 int f2fs_sync_fs(struct super_block *, int);
908 extern __printf(3, 4) 908 extern __printf(3, 4)
909 void f2fs_msg(struct super_block *, const char *, const char *, ...); 909 void f2fs_msg(struct super_block *, const char *, const char *, ...);
910 910
911 /* 911 /*
912 * hash.c 912 * hash.c
913 */ 913 */
914 f2fs_hash_t f2fs_dentry_hash(const char *, size_t); 914 f2fs_hash_t f2fs_dentry_hash(const char *, size_t);
915 915
916 /* 916 /*
917 * node.c 917 * node.c
918 */ 918 */
919 struct dnode_of_data; 919 struct dnode_of_data;
920 struct node_info; 920 struct node_info;
921 921
922 int is_checkpointed_node(struct f2fs_sb_info *, nid_t); 922 int is_checkpointed_node(struct f2fs_sb_info *, nid_t);
923 void get_node_info(struct f2fs_sb_info *, nid_t, struct node_info *); 923 void get_node_info(struct f2fs_sb_info *, nid_t, struct node_info *);
924 int get_dnode_of_data(struct dnode_of_data *, pgoff_t, int); 924 int get_dnode_of_data(struct dnode_of_data *, pgoff_t, int);
925 int truncate_inode_blocks(struct inode *, pgoff_t); 925 int truncate_inode_blocks(struct inode *, pgoff_t);
926 int remove_inode_page(struct inode *); 926 int remove_inode_page(struct inode *);
927 int new_inode_page(struct inode *, const struct qstr *); 927 int new_inode_page(struct inode *, const struct qstr *);
928 struct page *new_node_page(struct dnode_of_data *, unsigned int); 928 struct page *new_node_page(struct dnode_of_data *, unsigned int);
929 void ra_node_page(struct f2fs_sb_info *, nid_t); 929 void ra_node_page(struct f2fs_sb_info *, nid_t);
930 struct page *get_node_page(struct f2fs_sb_info *, pgoff_t); 930 struct page *get_node_page(struct f2fs_sb_info *, pgoff_t);
931 struct page *get_node_page_ra(struct page *, int); 931 struct page *get_node_page_ra(struct page *, int);
932 void sync_inode_page(struct dnode_of_data *); 932 void sync_inode_page(struct dnode_of_data *);
933 int sync_node_pages(struct f2fs_sb_info *, nid_t, struct writeback_control *); 933 int sync_node_pages(struct f2fs_sb_info *, nid_t, struct writeback_control *);
934 bool alloc_nid(struct f2fs_sb_info *, nid_t *); 934 bool alloc_nid(struct f2fs_sb_info *, nid_t *);
935 void alloc_nid_done(struct f2fs_sb_info *, nid_t); 935 void alloc_nid_done(struct f2fs_sb_info *, nid_t);
936 void alloc_nid_failed(struct f2fs_sb_info *, nid_t); 936 void alloc_nid_failed(struct f2fs_sb_info *, nid_t);
937 void recover_node_page(struct f2fs_sb_info *, struct page *, 937 void recover_node_page(struct f2fs_sb_info *, struct page *,
938 struct f2fs_summary *, struct node_info *, block_t); 938 struct f2fs_summary *, struct node_info *, block_t);
939 int recover_inode_page(struct f2fs_sb_info *, struct page *); 939 int recover_inode_page(struct f2fs_sb_info *, struct page *);
940 int restore_node_summary(struct f2fs_sb_info *, unsigned int, 940 int restore_node_summary(struct f2fs_sb_info *, unsigned int,
941 struct f2fs_summary_block *); 941 struct f2fs_summary_block *);
942 void flush_nat_entries(struct f2fs_sb_info *); 942 void flush_nat_entries(struct f2fs_sb_info *);
943 int build_node_manager(struct f2fs_sb_info *); 943 int build_node_manager(struct f2fs_sb_info *);
944 void destroy_node_manager(struct f2fs_sb_info *); 944 void destroy_node_manager(struct f2fs_sb_info *);
945 int __init create_node_manager_caches(void); 945 int __init create_node_manager_caches(void);
946 void destroy_node_manager_caches(void); 946 void destroy_node_manager_caches(void);
947 947
948 /* 948 /*
949 * segment.c 949 * segment.c
950 */ 950 */
951 void f2fs_balance_fs(struct f2fs_sb_info *); 951 void f2fs_balance_fs(struct f2fs_sb_info *);
952 void invalidate_blocks(struct f2fs_sb_info *, block_t); 952 void invalidate_blocks(struct f2fs_sb_info *, block_t);
953 void locate_dirty_segment(struct f2fs_sb_info *, unsigned int); 953 void locate_dirty_segment(struct f2fs_sb_info *, unsigned int);
954 void clear_prefree_segments(struct f2fs_sb_info *); 954 void clear_prefree_segments(struct f2fs_sb_info *);
955 int npages_for_summary_flush(struct f2fs_sb_info *); 955 int npages_for_summary_flush(struct f2fs_sb_info *);
956 void allocate_new_segments(struct f2fs_sb_info *); 956 void allocate_new_segments(struct f2fs_sb_info *);
957 struct page *get_sum_page(struct f2fs_sb_info *, unsigned int); 957 struct page *get_sum_page(struct f2fs_sb_info *, unsigned int);
958 struct bio *f2fs_bio_alloc(struct block_device *, int); 958 struct bio *f2fs_bio_alloc(struct block_device *, int);
959 void f2fs_submit_bio(struct f2fs_sb_info *, enum page_type, bool sync); 959 void f2fs_submit_bio(struct f2fs_sb_info *, enum page_type, bool sync);
960 void write_meta_page(struct f2fs_sb_info *, struct page *); 960 void write_meta_page(struct f2fs_sb_info *, struct page *);
961 void write_node_page(struct f2fs_sb_info *, struct page *, unsigned int, 961 void write_node_page(struct f2fs_sb_info *, struct page *, unsigned int,
962 block_t, block_t *); 962 block_t, block_t *);
963 void write_data_page(struct inode *, struct page *, struct dnode_of_data*, 963 void write_data_page(struct inode *, struct page *, struct dnode_of_data*,
964 block_t, block_t *); 964 block_t, block_t *);
965 void rewrite_data_page(struct f2fs_sb_info *, struct page *, block_t); 965 void rewrite_data_page(struct f2fs_sb_info *, struct page *, block_t);
966 void recover_data_page(struct f2fs_sb_info *, struct page *, 966 void recover_data_page(struct f2fs_sb_info *, struct page *,
967 struct f2fs_summary *, block_t, block_t); 967 struct f2fs_summary *, block_t, block_t);
968 void rewrite_node_page(struct f2fs_sb_info *, struct page *, 968 void rewrite_node_page(struct f2fs_sb_info *, struct page *,
969 struct f2fs_summary *, block_t, block_t); 969 struct f2fs_summary *, block_t, block_t);
970 void write_data_summaries(struct f2fs_sb_info *, block_t); 970 void write_data_summaries(struct f2fs_sb_info *, block_t);
971 void write_node_summaries(struct f2fs_sb_info *, block_t); 971 void write_node_summaries(struct f2fs_sb_info *, block_t);
972 int lookup_journal_in_cursum(struct f2fs_summary_block *, 972 int lookup_journal_in_cursum(struct f2fs_summary_block *,
973 int, unsigned int, int); 973 int, unsigned int, int);
974 void flush_sit_entries(struct f2fs_sb_info *); 974 void flush_sit_entries(struct f2fs_sb_info *);
975 int build_segment_manager(struct f2fs_sb_info *); 975 int build_segment_manager(struct f2fs_sb_info *);
976 void reset_victim_segmap(struct f2fs_sb_info *); 976 void reset_victim_segmap(struct f2fs_sb_info *);
977 void destroy_segment_manager(struct f2fs_sb_info *); 977 void destroy_segment_manager(struct f2fs_sb_info *);
978 978
979 /* 979 /*
980 * checkpoint.c 980 * checkpoint.c
981 */ 981 */
982 struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t); 982 struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t);
983 struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t); 983 struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t);
984 long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long); 984 long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long);
985 int check_orphan_space(struct f2fs_sb_info *); 985 int check_orphan_space(struct f2fs_sb_info *);
986 void add_orphan_inode(struct f2fs_sb_info *, nid_t); 986 void add_orphan_inode(struct f2fs_sb_info *, nid_t);
987 void remove_orphan_inode(struct f2fs_sb_info *, nid_t); 987 void remove_orphan_inode(struct f2fs_sb_info *, nid_t);
988 int recover_orphan_inodes(struct f2fs_sb_info *); 988 int recover_orphan_inodes(struct f2fs_sb_info *);
989 int get_valid_checkpoint(struct f2fs_sb_info *); 989 int get_valid_checkpoint(struct f2fs_sb_info *);
990 void set_dirty_dir_page(struct inode *, struct page *); 990 void set_dirty_dir_page(struct inode *, struct page *);
991 void remove_dirty_dir_inode(struct inode *); 991 void remove_dirty_dir_inode(struct inode *);
992 void sync_dirty_dir_inodes(struct f2fs_sb_info *); 992 void sync_dirty_dir_inodes(struct f2fs_sb_info *);
993 void write_checkpoint(struct f2fs_sb_info *, bool); 993 void write_checkpoint(struct f2fs_sb_info *, bool);
994 void init_orphan_info(struct f2fs_sb_info *); 994 void init_orphan_info(struct f2fs_sb_info *);
995 int __init create_checkpoint_caches(void); 995 int __init create_checkpoint_caches(void);
996 void destroy_checkpoint_caches(void); 996 void destroy_checkpoint_caches(void);
997 997
998 /* 998 /*
999 * data.c 999 * data.c
1000 */ 1000 */
1001 int reserve_new_block(struct dnode_of_data *); 1001 int reserve_new_block(struct dnode_of_data *);
1002 void update_extent_cache(block_t, struct dnode_of_data *); 1002 void update_extent_cache(block_t, struct dnode_of_data *);
1003 struct page *find_data_page(struct inode *, pgoff_t); 1003 struct page *find_data_page(struct inode *, pgoff_t);
1004 struct page *get_lock_data_page(struct inode *, pgoff_t); 1004 struct page *get_lock_data_page(struct inode *, pgoff_t);
1005 struct page *get_new_data_page(struct inode *, pgoff_t, bool); 1005 struct page *get_new_data_page(struct inode *, pgoff_t, bool);
1006 int f2fs_readpage(struct f2fs_sb_info *, struct page *, block_t, int); 1006 int f2fs_readpage(struct f2fs_sb_info *, struct page *, block_t, int);
1007 int do_write_data_page(struct page *); 1007 int do_write_data_page(struct page *);
1008 1008
1009 /* 1009 /*
1010 * gc.c 1010 * gc.c
1011 */ 1011 */
1012 int start_gc_thread(struct f2fs_sb_info *); 1012 int start_gc_thread(struct f2fs_sb_info *);
1013 void stop_gc_thread(struct f2fs_sb_info *); 1013 void stop_gc_thread(struct f2fs_sb_info *);
1014 block_t start_bidx_of_node(unsigned int); 1014 block_t start_bidx_of_node(unsigned int);
1015 int f2fs_gc(struct f2fs_sb_info *); 1015 int f2fs_gc(struct f2fs_sb_info *);
1016 void build_gc_manager(struct f2fs_sb_info *); 1016 void build_gc_manager(struct f2fs_sb_info *);
1017 int __init create_gc_caches(void); 1017 int __init create_gc_caches(void);
1018 void destroy_gc_caches(void); 1018 void destroy_gc_caches(void);
1019 1019
1020 /* 1020 /*
1021 * recovery.c 1021 * recovery.c
1022 */ 1022 */
1023 void recover_fsync_data(struct f2fs_sb_info *); 1023 void recover_fsync_data(struct f2fs_sb_info *);
1024 bool space_for_roll_forward(struct f2fs_sb_info *); 1024 bool space_for_roll_forward(struct f2fs_sb_info *);
1025 1025
1026 /* 1026 /*
1027 * debug.c 1027 * debug.c
1028 */ 1028 */
1029 #ifdef CONFIG_F2FS_STAT_FS 1029 #ifdef CONFIG_F2FS_STAT_FS
1030 struct f2fs_stat_info { 1030 struct f2fs_stat_info {
1031 struct list_head stat_list; 1031 struct list_head stat_list;
1032 struct f2fs_sb_info *sbi; 1032 struct f2fs_sb_info *sbi;
1033 struct mutex stat_lock; 1033 struct mutex stat_lock;
1034 int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs; 1034 int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs;
1035 int main_area_segs, main_area_sections, main_area_zones; 1035 int main_area_segs, main_area_sections, main_area_zones;
1036 int hit_ext, total_ext; 1036 int hit_ext, total_ext;
1037 int ndirty_node, ndirty_dent, ndirty_dirs, ndirty_meta; 1037 int ndirty_node, ndirty_dent, ndirty_dirs, ndirty_meta;
1038 int nats, sits, fnids; 1038 int nats, sits, fnids;
1039 int total_count, utilization; 1039 int total_count, utilization;
1040 int bg_gc; 1040 int bg_gc;
1041 unsigned int valid_count, valid_node_count, valid_inode_count; 1041 unsigned int valid_count, valid_node_count, valid_inode_count;
1042 unsigned int bimodal, avg_vblocks; 1042 unsigned int bimodal, avg_vblocks;
1043 int util_free, util_valid, util_invalid; 1043 int util_free, util_valid, util_invalid;
1044 int rsvd_segs, overp_segs; 1044 int rsvd_segs, overp_segs;
1045 int dirty_count, node_pages, meta_pages; 1045 int dirty_count, node_pages, meta_pages;
1046 int prefree_count, call_count; 1046 int prefree_count, call_count;
1047 int tot_segs, node_segs, data_segs, free_segs, free_secs; 1047 int tot_segs, node_segs, data_segs, free_segs, free_secs;
1048 int tot_blks, data_blks, node_blks; 1048 int tot_blks, data_blks, node_blks;
1049 int curseg[NR_CURSEG_TYPE]; 1049 int curseg[NR_CURSEG_TYPE];
1050 int cursec[NR_CURSEG_TYPE]; 1050 int cursec[NR_CURSEG_TYPE];
1051 int curzone[NR_CURSEG_TYPE]; 1051 int curzone[NR_CURSEG_TYPE];
1052 1052
1053 unsigned int segment_count[2]; 1053 unsigned int segment_count[2];
1054 unsigned int block_count[2]; 1054 unsigned int block_count[2];
1055 unsigned base_mem, cache_mem; 1055 unsigned base_mem, cache_mem;
1056 }; 1056 };
1057 1057
1058 #define stat_inc_call_count(si) ((si)->call_count++) 1058 #define stat_inc_call_count(si) ((si)->call_count++)
1059 1059
1060 #define stat_inc_seg_count(sbi, type) \ 1060 #define stat_inc_seg_count(sbi, type) \
1061 do { \ 1061 do { \
1062 struct f2fs_stat_info *si = sbi->stat_info; \ 1062 struct f2fs_stat_info *si = sbi->stat_info; \
1063 (si)->tot_segs++; \ 1063 (si)->tot_segs++; \
1064 if (type == SUM_TYPE_DATA) \ 1064 if (type == SUM_TYPE_DATA) \
1065 si->data_segs++; \ 1065 si->data_segs++; \
1066 else \ 1066 else \
1067 si->node_segs++; \ 1067 si->node_segs++; \
1068 } while (0) 1068 } while (0)
1069 1069
1070 #define stat_inc_tot_blk_count(si, blks) \ 1070 #define stat_inc_tot_blk_count(si, blks) \
1071 (si->tot_blks += (blks)) 1071 (si->tot_blks += (blks))
1072 1072
1073 #define stat_inc_data_blk_count(sbi, blks) \ 1073 #define stat_inc_data_blk_count(sbi, blks) \
1074 do { \ 1074 do { \
1075 struct f2fs_stat_info *si = sbi->stat_info; \ 1075 struct f2fs_stat_info *si = sbi->stat_info; \
1076 stat_inc_tot_blk_count(si, blks); \ 1076 stat_inc_tot_blk_count(si, blks); \
1077 si->data_blks += (blks); \ 1077 si->data_blks += (blks); \
1078 } while (0) 1078 } while (0)
1079 1079
1080 #define stat_inc_node_blk_count(sbi, blks) \ 1080 #define stat_inc_node_blk_count(sbi, blks) \
1081 do { \ 1081 do { \
1082 struct f2fs_stat_info *si = sbi->stat_info; \ 1082 struct f2fs_stat_info *si = sbi->stat_info; \
1083 stat_inc_tot_blk_count(si, blks); \ 1083 stat_inc_tot_blk_count(si, blks); \
1084 si->node_blks += (blks); \ 1084 si->node_blks += (blks); \
1085 } while (0) 1085 } while (0)
1086 1086
1087 int f2fs_build_stats(struct f2fs_sb_info *); 1087 int f2fs_build_stats(struct f2fs_sb_info *);
1088 void f2fs_destroy_stats(struct f2fs_sb_info *); 1088 void f2fs_destroy_stats(struct f2fs_sb_info *);
1089 void __init f2fs_create_root_stats(void); 1089 void __init f2fs_create_root_stats(void);
1090 void f2fs_destroy_root_stats(void); 1090 void f2fs_destroy_root_stats(void);
1091 #else 1091 #else
1092 #define stat_inc_call_count(si) 1092 #define stat_inc_call_count(si)
1093 #define stat_inc_seg_count(si, type) 1093 #define stat_inc_seg_count(si, type)
1094 #define stat_inc_tot_blk_count(si, blks) 1094 #define stat_inc_tot_blk_count(si, blks)
1095 #define stat_inc_data_blk_count(si, blks) 1095 #define stat_inc_data_blk_count(si, blks)
1096 #define stat_inc_node_blk_count(sbi, blks) 1096 #define stat_inc_node_blk_count(sbi, blks)
1097 1097
1098 static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; } 1098 static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
1099 static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { } 1099 static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
1100 static inline void __init f2fs_create_root_stats(void) { } 1100 static inline void __init f2fs_create_root_stats(void) { }
1101 static inline void f2fs_destroy_root_stats(void) { } 1101 static inline void f2fs_destroy_root_stats(void) { }
1102 #endif 1102 #endif
1103 1103
1104 extern const struct file_operations f2fs_dir_operations; 1104 extern const struct file_operations f2fs_dir_operations;
1105 extern const struct file_operations f2fs_file_operations; 1105 extern const struct file_operations f2fs_file_operations;
1106 extern const struct inode_operations f2fs_file_inode_operations; 1106 extern const struct inode_operations f2fs_file_inode_operations;
1107 extern const struct address_space_operations f2fs_dblock_aops; 1107 extern const struct address_space_operations f2fs_dblock_aops;
1108 extern const struct address_space_operations f2fs_node_aops; 1108 extern const struct address_space_operations f2fs_node_aops;
1109 extern const struct address_space_operations f2fs_meta_aops; 1109 extern const struct address_space_operations f2fs_meta_aops;
1110 extern const struct inode_operations f2fs_dir_inode_operations; 1110 extern const struct inode_operations f2fs_dir_inode_operations;
1111 extern const struct inode_operations f2fs_symlink_inode_operations; 1111 extern const struct inode_operations f2fs_symlink_inode_operations;
1112 extern const struct inode_operations f2fs_special_inode_operations; 1112 extern const struct inode_operations f2fs_special_inode_operations;
1113 #endif 1113 #endif
1114 1114
1 /* 1 /*
2 * fs/f2fs/gc.c 2 * fs/f2fs/gc.c
3 * 3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/ 5 * http://www.samsung.com/
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 */ 10 */
11 #include <linux/fs.h> 11 #include <linux/fs.h>
12 #include <linux/module.h> 12 #include <linux/module.h>
13 #include <linux/backing-dev.h> 13 #include <linux/backing-dev.h>
14 #include <linux/proc_fs.h> 14 #include <linux/proc_fs.h>
15 #include <linux/init.h> 15 #include <linux/init.h>
16 #include <linux/f2fs_fs.h> 16 #include <linux/f2fs_fs.h>
17 #include <linux/kthread.h> 17 #include <linux/kthread.h>
18 #include <linux/delay.h> 18 #include <linux/delay.h>
19 #include <linux/freezer.h> 19 #include <linux/freezer.h>
20 #include <linux/blkdev.h> 20 #include <linux/blkdev.h>
21 21
22 #include "f2fs.h" 22 #include "f2fs.h"
23 #include "node.h" 23 #include "node.h"
24 #include "segment.h" 24 #include "segment.h"
25 #include "gc.h" 25 #include "gc.h"
26 26
27 static struct kmem_cache *winode_slab; 27 static struct kmem_cache *winode_slab;
28 28
29 static int gc_thread_func(void *data) 29 static int gc_thread_func(void *data)
30 { 30 {
31 struct f2fs_sb_info *sbi = data; 31 struct f2fs_sb_info *sbi = data;
32 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head; 32 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
33 long wait_ms; 33 long wait_ms;
34 34
35 wait_ms = GC_THREAD_MIN_SLEEP_TIME; 35 wait_ms = GC_THREAD_MIN_SLEEP_TIME;
36 36
37 do { 37 do {
38 if (try_to_freeze()) 38 if (try_to_freeze())
39 continue; 39 continue;
40 else 40 else
41 wait_event_interruptible_timeout(*wq, 41 wait_event_interruptible_timeout(*wq,
42 kthread_should_stop(), 42 kthread_should_stop(),
43 msecs_to_jiffies(wait_ms)); 43 msecs_to_jiffies(wait_ms));
44 if (kthread_should_stop()) 44 if (kthread_should_stop())
45 break; 45 break;
46 46
47 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) { 47 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
48 wait_ms = GC_THREAD_MAX_SLEEP_TIME; 48 wait_ms = GC_THREAD_MAX_SLEEP_TIME;
49 continue; 49 continue;
50 } 50 }
51 51
52 /* 52 /*
53 * [GC triggering condition] 53 * [GC triggering condition]
54 * 0. GC is not conducted currently. 54 * 0. GC is not conducted currently.
55 * 1. There are enough dirty segments. 55 * 1. There are enough dirty segments.
56 * 2. IO subsystem is idle by checking the # of writeback pages. 56 * 2. IO subsystem is idle by checking the # of writeback pages.
57 * 3. IO subsystem is idle by checking the # of requests in 57 * 3. IO subsystem is idle by checking the # of requests in
58 * bdev's request list. 58 * bdev's request list.
59 * 59 *
60 * Note) We have to avoid triggering GCs too much frequently. 60 * Note) We have to avoid triggering GCs too much frequently.
61 * Because it is possible that some segments can be 61 * Because it is possible that some segments can be
62 * invalidated soon after by user update or deletion. 62 * invalidated soon after by user update or deletion.
63 * So, I'd like to wait some time to collect dirty segments. 63 * So, I'd like to wait some time to collect dirty segments.
64 */ 64 */
65 if (!mutex_trylock(&sbi->gc_mutex)) 65 if (!mutex_trylock(&sbi->gc_mutex))
66 continue; 66 continue;
67 67
68 if (!is_idle(sbi)) { 68 if (!is_idle(sbi)) {
69 wait_ms = increase_sleep_time(wait_ms); 69 wait_ms = increase_sleep_time(wait_ms);
70 mutex_unlock(&sbi->gc_mutex); 70 mutex_unlock(&sbi->gc_mutex);
71 continue; 71 continue;
72 } 72 }
73 73
74 if (has_enough_invalid_blocks(sbi)) 74 if (has_enough_invalid_blocks(sbi))
75 wait_ms = decrease_sleep_time(wait_ms); 75 wait_ms = decrease_sleep_time(wait_ms);
76 else 76 else
77 wait_ms = increase_sleep_time(wait_ms); 77 wait_ms = increase_sleep_time(wait_ms);
78 78
79 sbi->bg_gc++; 79 sbi->bg_gc++;
80 80
81 /* if return value is not zero, no victim was selected */ 81 /* if return value is not zero, no victim was selected */
82 if (f2fs_gc(sbi)) 82 if (f2fs_gc(sbi))
83 wait_ms = GC_THREAD_NOGC_SLEEP_TIME; 83 wait_ms = GC_THREAD_NOGC_SLEEP_TIME;
84 else if (wait_ms == GC_THREAD_NOGC_SLEEP_TIME) 84 else if (wait_ms == GC_THREAD_NOGC_SLEEP_TIME)
85 wait_ms = GC_THREAD_MAX_SLEEP_TIME; 85 wait_ms = GC_THREAD_MAX_SLEEP_TIME;
86 86
87 } while (!kthread_should_stop()); 87 } while (!kthread_should_stop());
88 return 0; 88 return 0;
89 } 89 }
90 90
91 int start_gc_thread(struct f2fs_sb_info *sbi) 91 int start_gc_thread(struct f2fs_sb_info *sbi)
92 { 92 {
93 struct f2fs_gc_kthread *gc_th; 93 struct f2fs_gc_kthread *gc_th;
94 dev_t dev = sbi->sb->s_bdev->bd_dev; 94 dev_t dev = sbi->sb->s_bdev->bd_dev;
95 95
96 if (!test_opt(sbi, BG_GC)) 96 if (!test_opt(sbi, BG_GC))
97 return 0; 97 return 0;
98 gc_th = kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL); 98 gc_th = kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
99 if (!gc_th) 99 if (!gc_th)
100 return -ENOMEM; 100 return -ENOMEM;
101 101
102 sbi->gc_thread = gc_th; 102 sbi->gc_thread = gc_th;
103 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head); 103 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
104 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi, 104 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
105 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev)); 105 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
106 if (IS_ERR(gc_th->f2fs_gc_task)) { 106 if (IS_ERR(gc_th->f2fs_gc_task)) {
107 kfree(gc_th); 107 kfree(gc_th);
108 sbi->gc_thread = NULL; 108 sbi->gc_thread = NULL;
109 return -ENOMEM; 109 return -ENOMEM;
110 } 110 }
111 return 0; 111 return 0;
112 } 112 }
113 113
114 void stop_gc_thread(struct f2fs_sb_info *sbi) 114 void stop_gc_thread(struct f2fs_sb_info *sbi)
115 { 115 {
116 struct f2fs_gc_kthread *gc_th = sbi->gc_thread; 116 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
117 if (!gc_th) 117 if (!gc_th)
118 return; 118 return;
119 kthread_stop(gc_th->f2fs_gc_task); 119 kthread_stop(gc_th->f2fs_gc_task);
120 kfree(gc_th); 120 kfree(gc_th);
121 sbi->gc_thread = NULL; 121 sbi->gc_thread = NULL;
122 } 122 }
123 123
124 static int select_gc_type(int gc_type) 124 static int select_gc_type(int gc_type)
125 { 125 {
126 return (gc_type == BG_GC) ? GC_CB : GC_GREEDY; 126 return (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
127 } 127 }
128 128
129 static void select_policy(struct f2fs_sb_info *sbi, int gc_type, 129 static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
130 int type, struct victim_sel_policy *p) 130 int type, struct victim_sel_policy *p)
131 { 131 {
132 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 132 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
133 133
134 if (p->alloc_mode) { 134 if (p->alloc_mode) {
135 p->gc_mode = GC_GREEDY; 135 p->gc_mode = GC_GREEDY;
136 p->dirty_segmap = dirty_i->dirty_segmap[type]; 136 p->dirty_segmap = dirty_i->dirty_segmap[type];
137 p->ofs_unit = 1; 137 p->ofs_unit = 1;
138 } else { 138 } else {
139 p->gc_mode = select_gc_type(gc_type); 139 p->gc_mode = select_gc_type(gc_type);
140 p->dirty_segmap = dirty_i->dirty_segmap[DIRTY]; 140 p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
141 p->ofs_unit = sbi->segs_per_sec; 141 p->ofs_unit = sbi->segs_per_sec;
142 } 142 }
143 p->offset = sbi->last_victim[p->gc_mode]; 143 p->offset = sbi->last_victim[p->gc_mode];
144 } 144 }
145 145
146 static unsigned int get_max_cost(struct f2fs_sb_info *sbi, 146 static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
147 struct victim_sel_policy *p) 147 struct victim_sel_policy *p)
148 { 148 {
149 /* SSR allocates in a segment unit */ 149 /* SSR allocates in a segment unit */
150 if (p->alloc_mode == SSR) 150 if (p->alloc_mode == SSR)
151 return 1 << sbi->log_blocks_per_seg; 151 return 1 << sbi->log_blocks_per_seg;
152 if (p->gc_mode == GC_GREEDY) 152 if (p->gc_mode == GC_GREEDY)
153 return (1 << sbi->log_blocks_per_seg) * p->ofs_unit; 153 return (1 << sbi->log_blocks_per_seg) * p->ofs_unit;
154 else if (p->gc_mode == GC_CB) 154 else if (p->gc_mode == GC_CB)
155 return UINT_MAX; 155 return UINT_MAX;
156 else /* No other gc_mode */ 156 else /* No other gc_mode */
157 return 0; 157 return 0;
158 } 158 }
159 159
160 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi) 160 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
161 { 161 {
162 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 162 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
163 unsigned int segno; 163 unsigned int segno;
164 164
165 /* 165 /*
166 * If the gc_type is FG_GC, we can select victim segments 166 * If the gc_type is FG_GC, we can select victim segments
167 * selected by background GC before. 167 * selected by background GC before.
168 * Those segments guarantee they have small valid blocks. 168 * Those segments guarantee they have small valid blocks.
169 */ 169 */
170 segno = find_next_bit(dirty_i->victim_segmap[BG_GC], 170 segno = find_next_bit(dirty_i->victim_segmap[BG_GC],
171 TOTAL_SEGS(sbi), 0); 171 TOTAL_SEGS(sbi), 0);
172 if (segno < TOTAL_SEGS(sbi)) { 172 if (segno < TOTAL_SEGS(sbi)) {
173 clear_bit(segno, dirty_i->victim_segmap[BG_GC]); 173 clear_bit(segno, dirty_i->victim_segmap[BG_GC]);
174 return segno; 174 return segno;
175 } 175 }
176 return NULL_SEGNO; 176 return NULL_SEGNO;
177 } 177 }
178 178
179 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno) 179 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
180 { 180 {
181 struct sit_info *sit_i = SIT_I(sbi); 181 struct sit_info *sit_i = SIT_I(sbi);
182 unsigned int secno = GET_SECNO(sbi, segno); 182 unsigned int secno = GET_SECNO(sbi, segno);
183 unsigned int start = secno * sbi->segs_per_sec; 183 unsigned int start = secno * sbi->segs_per_sec;
184 unsigned long long mtime = 0; 184 unsigned long long mtime = 0;
185 unsigned int vblocks; 185 unsigned int vblocks;
186 unsigned char age = 0; 186 unsigned char age = 0;
187 unsigned char u; 187 unsigned char u;
188 unsigned int i; 188 unsigned int i;
189 189
190 for (i = 0; i < sbi->segs_per_sec; i++) 190 for (i = 0; i < sbi->segs_per_sec; i++)
191 mtime += get_seg_entry(sbi, start + i)->mtime; 191 mtime += get_seg_entry(sbi, start + i)->mtime;
192 vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec); 192 vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec);
193 193
194 mtime = div_u64(mtime, sbi->segs_per_sec); 194 mtime = div_u64(mtime, sbi->segs_per_sec);
195 vblocks = div_u64(vblocks, sbi->segs_per_sec); 195 vblocks = div_u64(vblocks, sbi->segs_per_sec);
196 196
197 u = (vblocks * 100) >> sbi->log_blocks_per_seg; 197 u = (vblocks * 100) >> sbi->log_blocks_per_seg;
198 198
199 /* Handle if the system time is changed by user */ 199 /* Handle if the system time is changed by user */
200 if (mtime < sit_i->min_mtime) 200 if (mtime < sit_i->min_mtime)
201 sit_i->min_mtime = mtime; 201 sit_i->min_mtime = mtime;
202 if (mtime > sit_i->max_mtime) 202 if (mtime > sit_i->max_mtime)
203 sit_i->max_mtime = mtime; 203 sit_i->max_mtime = mtime;
204 if (sit_i->max_mtime != sit_i->min_mtime) 204 if (sit_i->max_mtime != sit_i->min_mtime)
205 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime), 205 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
206 sit_i->max_mtime - sit_i->min_mtime); 206 sit_i->max_mtime - sit_i->min_mtime);
207 207
208 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u)); 208 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
209 } 209 }
210 210
211 static unsigned int get_gc_cost(struct f2fs_sb_info *sbi, unsigned int segno, 211 static unsigned int get_gc_cost(struct f2fs_sb_info *sbi, unsigned int segno,
212 struct victim_sel_policy *p) 212 struct victim_sel_policy *p)
213 { 213 {
214 if (p->alloc_mode == SSR) 214 if (p->alloc_mode == SSR)
215 return get_seg_entry(sbi, segno)->ckpt_valid_blocks; 215 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
216 216
217 /* alloc_mode == LFS */ 217 /* alloc_mode == LFS */
218 if (p->gc_mode == GC_GREEDY) 218 if (p->gc_mode == GC_GREEDY)
219 return get_valid_blocks(sbi, segno, sbi->segs_per_sec); 219 return get_valid_blocks(sbi, segno, sbi->segs_per_sec);
220 else 220 else
221 return get_cb_cost(sbi, segno); 221 return get_cb_cost(sbi, segno);
222 } 222 }
223 223
224 /* 224 /*
225 * This function is called from two pathes. 225 * This function is called from two paths.
226 * One is garbage collection and the other is SSR segment selection. 226 * One is garbage collection and the other is SSR segment selection.
227 * When it is called during GC, it just gets a victim segment 227 * When it is called during GC, it just gets a victim segment
228 * and it does not remove it from dirty seglist. 228 * and it does not remove it from dirty seglist.
229 * When it is called from SSR segment selection, it finds a segment 229 * When it is called from SSR segment selection, it finds a segment
230 * which has minimum valid blocks and removes it from dirty seglist. 230 * which has minimum valid blocks and removes it from dirty seglist.
231 */ 231 */
232 static int get_victim_by_default(struct f2fs_sb_info *sbi, 232 static int get_victim_by_default(struct f2fs_sb_info *sbi,
233 unsigned int *result, int gc_type, int type, char alloc_mode) 233 unsigned int *result, int gc_type, int type, char alloc_mode)
234 { 234 {
235 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 235 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
236 struct victim_sel_policy p; 236 struct victim_sel_policy p;
237 unsigned int segno; 237 unsigned int segno;
238 int nsearched = 0; 238 int nsearched = 0;
239 239
240 p.alloc_mode = alloc_mode; 240 p.alloc_mode = alloc_mode;
241 select_policy(sbi, gc_type, type, &p); 241 select_policy(sbi, gc_type, type, &p);
242 242
243 p.min_segno = NULL_SEGNO; 243 p.min_segno = NULL_SEGNO;
244 p.min_cost = get_max_cost(sbi, &p); 244 p.min_cost = get_max_cost(sbi, &p);
245 245
246 mutex_lock(&dirty_i->seglist_lock); 246 mutex_lock(&dirty_i->seglist_lock);
247 247
248 if (p.alloc_mode == LFS && gc_type == FG_GC) { 248 if (p.alloc_mode == LFS && gc_type == FG_GC) {
249 p.min_segno = check_bg_victims(sbi); 249 p.min_segno = check_bg_victims(sbi);
250 if (p.min_segno != NULL_SEGNO) 250 if (p.min_segno != NULL_SEGNO)
251 goto got_it; 251 goto got_it;
252 } 252 }
253 253
254 while (1) { 254 while (1) {
255 unsigned long cost; 255 unsigned long cost;
256 256
257 segno = find_next_bit(p.dirty_segmap, 257 segno = find_next_bit(p.dirty_segmap,
258 TOTAL_SEGS(sbi), p.offset); 258 TOTAL_SEGS(sbi), p.offset);
259 if (segno >= TOTAL_SEGS(sbi)) { 259 if (segno >= TOTAL_SEGS(sbi)) {
260 if (sbi->last_victim[p.gc_mode]) { 260 if (sbi->last_victim[p.gc_mode]) {
261 sbi->last_victim[p.gc_mode] = 0; 261 sbi->last_victim[p.gc_mode] = 0;
262 p.offset = 0; 262 p.offset = 0;
263 continue; 263 continue;
264 } 264 }
265 break; 265 break;
266 } 266 }
267 p.offset = ((segno / p.ofs_unit) * p.ofs_unit) + p.ofs_unit; 267 p.offset = ((segno / p.ofs_unit) * p.ofs_unit) + p.ofs_unit;
268 268
269 if (test_bit(segno, dirty_i->victim_segmap[FG_GC])) 269 if (test_bit(segno, dirty_i->victim_segmap[FG_GC]))
270 continue; 270 continue;
271 if (gc_type == BG_GC && 271 if (gc_type == BG_GC &&
272 test_bit(segno, dirty_i->victim_segmap[BG_GC])) 272 test_bit(segno, dirty_i->victim_segmap[BG_GC]))
273 continue; 273 continue;
274 if (IS_CURSEC(sbi, GET_SECNO(sbi, segno))) 274 if (IS_CURSEC(sbi, GET_SECNO(sbi, segno)))
275 continue; 275 continue;
276 276
277 cost = get_gc_cost(sbi, segno, &p); 277 cost = get_gc_cost(sbi, segno, &p);
278 278
279 if (p.min_cost > cost) { 279 if (p.min_cost > cost) {
280 p.min_segno = segno; 280 p.min_segno = segno;
281 p.min_cost = cost; 281 p.min_cost = cost;
282 } 282 }
283 283
284 if (cost == get_max_cost(sbi, &p)) 284 if (cost == get_max_cost(sbi, &p))
285 continue; 285 continue;
286 286
287 if (nsearched++ >= MAX_VICTIM_SEARCH) { 287 if (nsearched++ >= MAX_VICTIM_SEARCH) {
288 sbi->last_victim[p.gc_mode] = segno; 288 sbi->last_victim[p.gc_mode] = segno;
289 break; 289 break;
290 } 290 }
291 } 291 }
292 got_it: 292 got_it:
293 if (p.min_segno != NULL_SEGNO) { 293 if (p.min_segno != NULL_SEGNO) {
294 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit; 294 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
295 if (p.alloc_mode == LFS) { 295 if (p.alloc_mode == LFS) {
296 int i; 296 int i;
297 for (i = 0; i < p.ofs_unit; i++) 297 for (i = 0; i < p.ofs_unit; i++)
298 set_bit(*result + i, 298 set_bit(*result + i,
299 dirty_i->victim_segmap[gc_type]); 299 dirty_i->victim_segmap[gc_type]);
300 } 300 }
301 } 301 }
302 mutex_unlock(&dirty_i->seglist_lock); 302 mutex_unlock(&dirty_i->seglist_lock);
303 303
304 return (p.min_segno == NULL_SEGNO) ? 0 : 1; 304 return (p.min_segno == NULL_SEGNO) ? 0 : 1;
305 } 305 }
306 306
307 static const struct victim_selection default_v_ops = { 307 static const struct victim_selection default_v_ops = {
308 .get_victim = get_victim_by_default, 308 .get_victim = get_victim_by_default,
309 }; 309 };
310 310
311 static struct inode *find_gc_inode(nid_t ino, struct list_head *ilist) 311 static struct inode *find_gc_inode(nid_t ino, struct list_head *ilist)
312 { 312 {
313 struct list_head *this; 313 struct list_head *this;
314 struct inode_entry *ie; 314 struct inode_entry *ie;
315 315
316 list_for_each(this, ilist) { 316 list_for_each(this, ilist) {
317 ie = list_entry(this, struct inode_entry, list); 317 ie = list_entry(this, struct inode_entry, list);
318 if (ie->inode->i_ino == ino) 318 if (ie->inode->i_ino == ino)
319 return ie->inode; 319 return ie->inode;
320 } 320 }
321 return NULL; 321 return NULL;
322 } 322 }
323 323
324 static void add_gc_inode(struct inode *inode, struct list_head *ilist) 324 static void add_gc_inode(struct inode *inode, struct list_head *ilist)
325 { 325 {
326 struct list_head *this; 326 struct list_head *this;
327 struct inode_entry *new_ie, *ie; 327 struct inode_entry *new_ie, *ie;
328 328
329 list_for_each(this, ilist) { 329 list_for_each(this, ilist) {
330 ie = list_entry(this, struct inode_entry, list); 330 ie = list_entry(this, struct inode_entry, list);
331 if (ie->inode == inode) { 331 if (ie->inode == inode) {
332 iput(inode); 332 iput(inode);
333 return; 333 return;
334 } 334 }
335 } 335 }
336 repeat: 336 repeat:
337 new_ie = kmem_cache_alloc(winode_slab, GFP_NOFS); 337 new_ie = kmem_cache_alloc(winode_slab, GFP_NOFS);
338 if (!new_ie) { 338 if (!new_ie) {
339 cond_resched(); 339 cond_resched();
340 goto repeat; 340 goto repeat;
341 } 341 }
342 new_ie->inode = inode; 342 new_ie->inode = inode;
343 list_add_tail(&new_ie->list, ilist); 343 list_add_tail(&new_ie->list, ilist);
344 } 344 }
345 345
346 static void put_gc_inode(struct list_head *ilist) 346 static void put_gc_inode(struct list_head *ilist)
347 { 347 {
348 struct inode_entry *ie, *next_ie; 348 struct inode_entry *ie, *next_ie;
349 list_for_each_entry_safe(ie, next_ie, ilist, list) { 349 list_for_each_entry_safe(ie, next_ie, ilist, list) {
350 iput(ie->inode); 350 iput(ie->inode);
351 list_del(&ie->list); 351 list_del(&ie->list);
352 kmem_cache_free(winode_slab, ie); 352 kmem_cache_free(winode_slab, ie);
353 } 353 }
354 } 354 }
355 355
356 static int check_valid_map(struct f2fs_sb_info *sbi, 356 static int check_valid_map(struct f2fs_sb_info *sbi,
357 unsigned int segno, int offset) 357 unsigned int segno, int offset)
358 { 358 {
359 struct sit_info *sit_i = SIT_I(sbi); 359 struct sit_info *sit_i = SIT_I(sbi);
360 struct seg_entry *sentry; 360 struct seg_entry *sentry;
361 int ret; 361 int ret;
362 362
363 mutex_lock(&sit_i->sentry_lock); 363 mutex_lock(&sit_i->sentry_lock);
364 sentry = get_seg_entry(sbi, segno); 364 sentry = get_seg_entry(sbi, segno);
365 ret = f2fs_test_bit(offset, sentry->cur_valid_map); 365 ret = f2fs_test_bit(offset, sentry->cur_valid_map);
366 mutex_unlock(&sit_i->sentry_lock); 366 mutex_unlock(&sit_i->sentry_lock);
367 return ret; 367 return ret;
368 } 368 }
369 369
370 /* 370 /*
371 * This function compares node address got in summary with that in NAT. 371 * This function compares node address got in summary with that in NAT.
372 * On validity, copy that node with cold status, otherwise (invalid node) 372 * On validity, copy that node with cold status, otherwise (invalid node)
373 * ignore that. 373 * ignore that.
374 */ 374 */
375 static void gc_node_segment(struct f2fs_sb_info *sbi, 375 static void gc_node_segment(struct f2fs_sb_info *sbi,
376 struct f2fs_summary *sum, unsigned int segno, int gc_type) 376 struct f2fs_summary *sum, unsigned int segno, int gc_type)
377 { 377 {
378 bool initial = true; 378 bool initial = true;
379 struct f2fs_summary *entry; 379 struct f2fs_summary *entry;
380 int off; 380 int off;
381 381
382 next_step: 382 next_step:
383 entry = sum; 383 entry = sum;
384 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) { 384 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
385 nid_t nid = le32_to_cpu(entry->nid); 385 nid_t nid = le32_to_cpu(entry->nid);
386 struct page *node_page; 386 struct page *node_page;
387 387
388 /* stop BG_GC if there is not enough free sections. */ 388 /* stop BG_GC if there is not enough free sections. */
389 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0)) 389 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
390 return; 390 return;
391 391
392 if (check_valid_map(sbi, segno, off) == 0) 392 if (check_valid_map(sbi, segno, off) == 0)
393 continue; 393 continue;
394 394
395 if (initial) { 395 if (initial) {
396 ra_node_page(sbi, nid); 396 ra_node_page(sbi, nid);
397 continue; 397 continue;
398 } 398 }
399 node_page = get_node_page(sbi, nid); 399 node_page = get_node_page(sbi, nid);
400 if (IS_ERR(node_page)) 400 if (IS_ERR(node_page))
401 continue; 401 continue;
402 402
403 /* set page dirty and write it */ 403 /* set page dirty and write it */
404 if (!PageWriteback(node_page)) 404 if (!PageWriteback(node_page))
405 set_page_dirty(node_page); 405 set_page_dirty(node_page);
406 f2fs_put_page(node_page, 1); 406 f2fs_put_page(node_page, 1);
407 stat_inc_node_blk_count(sbi, 1); 407 stat_inc_node_blk_count(sbi, 1);
408 } 408 }
409 if (initial) { 409 if (initial) {
410 initial = false; 410 initial = false;
411 goto next_step; 411 goto next_step;
412 } 412 }
413 413
414 if (gc_type == FG_GC) { 414 if (gc_type == FG_GC) {
415 struct writeback_control wbc = { 415 struct writeback_control wbc = {
416 .sync_mode = WB_SYNC_ALL, 416 .sync_mode = WB_SYNC_ALL,
417 .nr_to_write = LONG_MAX, 417 .nr_to_write = LONG_MAX,
418 .for_reclaim = 0, 418 .for_reclaim = 0,
419 }; 419 };
420 sync_node_pages(sbi, 0, &wbc); 420 sync_node_pages(sbi, 0, &wbc);
421 } 421 }
422 } 422 }
423 423
424 /* 424 /*
425 * Calculate start block index indicating the given node offset. 425 * Calculate start block index indicating the given node offset.
426 * Be careful, caller should give this node offset only indicating direct node 426 * Be careful, caller should give this node offset only indicating direct node
427 * blocks. If any node offsets, which point the other types of node blocks such 427 * blocks. If any node offsets, which point the other types of node blocks such
428 * as indirect or double indirect node blocks, are given, it must be a caller's 428 * as indirect or double indirect node blocks, are given, it must be a caller's
429 * bug. 429 * bug.
430 */ 430 */
431 block_t start_bidx_of_node(unsigned int node_ofs) 431 block_t start_bidx_of_node(unsigned int node_ofs)
432 { 432 {
433 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4; 433 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
434 unsigned int bidx; 434 unsigned int bidx;
435 435
436 if (node_ofs == 0) 436 if (node_ofs == 0)
437 return 0; 437 return 0;
438 438
439 if (node_ofs <= 2) { 439 if (node_ofs <= 2) {
440 bidx = node_ofs - 1; 440 bidx = node_ofs - 1;
441 } else if (node_ofs <= indirect_blks) { 441 } else if (node_ofs <= indirect_blks) {
442 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1); 442 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
443 bidx = node_ofs - 2 - dec; 443 bidx = node_ofs - 2 - dec;
444 } else { 444 } else {
445 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1); 445 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
446 bidx = node_ofs - 5 - dec; 446 bidx = node_ofs - 5 - dec;
447 } 447 }
448 return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE; 448 return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE;
449 } 449 }
450 450
451 static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, 451 static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
452 struct node_info *dni, block_t blkaddr, unsigned int *nofs) 452 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
453 { 453 {
454 struct page *node_page; 454 struct page *node_page;
455 nid_t nid; 455 nid_t nid;
456 unsigned int ofs_in_node; 456 unsigned int ofs_in_node;
457 block_t source_blkaddr; 457 block_t source_blkaddr;
458 458
459 nid = le32_to_cpu(sum->nid); 459 nid = le32_to_cpu(sum->nid);
460 ofs_in_node = le16_to_cpu(sum->ofs_in_node); 460 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
461 461
462 node_page = get_node_page(sbi, nid); 462 node_page = get_node_page(sbi, nid);
463 if (IS_ERR(node_page)) 463 if (IS_ERR(node_page))
464 return 0; 464 return 0;
465 465
466 get_node_info(sbi, nid, dni); 466 get_node_info(sbi, nid, dni);
467 467
468 if (sum->version != dni->version) { 468 if (sum->version != dni->version) {
469 f2fs_put_page(node_page, 1); 469 f2fs_put_page(node_page, 1);
470 return 0; 470 return 0;
471 } 471 }
472 472
473 *nofs = ofs_of_node(node_page); 473 *nofs = ofs_of_node(node_page);
474 source_blkaddr = datablock_addr(node_page, ofs_in_node); 474 source_blkaddr = datablock_addr(node_page, ofs_in_node);
475 f2fs_put_page(node_page, 1); 475 f2fs_put_page(node_page, 1);
476 476
477 if (source_blkaddr != blkaddr) 477 if (source_blkaddr != blkaddr)
478 return 0; 478 return 0;
479 return 1; 479 return 1;
480 } 480 }
481 481
482 static void move_data_page(struct inode *inode, struct page *page, int gc_type) 482 static void move_data_page(struct inode *inode, struct page *page, int gc_type)
483 { 483 {
484 if (page->mapping != inode->i_mapping) 484 if (page->mapping != inode->i_mapping)
485 goto out; 485 goto out;
486 486
487 if (inode != page->mapping->host) 487 if (inode != page->mapping->host)
488 goto out; 488 goto out;
489 489
490 if (PageWriteback(page)) 490 if (PageWriteback(page))
491 goto out; 491 goto out;
492 492
493 if (gc_type == BG_GC) { 493 if (gc_type == BG_GC) {
494 set_page_dirty(page); 494 set_page_dirty(page);
495 set_cold_data(page); 495 set_cold_data(page);
496 } else { 496 } else {
497 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 497 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
498 mutex_lock_op(sbi, DATA_WRITE); 498 mutex_lock_op(sbi, DATA_WRITE);
499 if (clear_page_dirty_for_io(page) && 499 if (clear_page_dirty_for_io(page) &&
500 S_ISDIR(inode->i_mode)) { 500 S_ISDIR(inode->i_mode)) {
501 dec_page_count(sbi, F2FS_DIRTY_DENTS); 501 dec_page_count(sbi, F2FS_DIRTY_DENTS);
502 inode_dec_dirty_dents(inode); 502 inode_dec_dirty_dents(inode);
503 } 503 }
504 set_cold_data(page); 504 set_cold_data(page);
505 do_write_data_page(page); 505 do_write_data_page(page);
506 mutex_unlock_op(sbi, DATA_WRITE); 506 mutex_unlock_op(sbi, DATA_WRITE);
507 clear_cold_data(page); 507 clear_cold_data(page);
508 } 508 }
509 out: 509 out:
510 f2fs_put_page(page, 1); 510 f2fs_put_page(page, 1);
511 } 511 }
512 512
513 /* 513 /*
514 * This function tries to get parent node of victim data block, and identifies 514 * This function tries to get parent node of victim data block, and identifies
515 * data block validity. If the block is valid, copy that with cold status and 515 * data block validity. If the block is valid, copy that with cold status and
516 * modify parent node. 516 * modify parent node.
517 * If the parent node is not valid or the data block address is different, 517 * If the parent node is not valid or the data block address is different,
518 * the victim data block is ignored. 518 * the victim data block is ignored.
519 */ 519 */
520 static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, 520 static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
521 struct list_head *ilist, unsigned int segno, int gc_type) 521 struct list_head *ilist, unsigned int segno, int gc_type)
522 { 522 {
523 struct super_block *sb = sbi->sb; 523 struct super_block *sb = sbi->sb;
524 struct f2fs_summary *entry; 524 struct f2fs_summary *entry;
525 block_t start_addr; 525 block_t start_addr;
526 int off; 526 int off;
527 int phase = 0; 527 int phase = 0;
528 528
529 start_addr = START_BLOCK(sbi, segno); 529 start_addr = START_BLOCK(sbi, segno);
530 530
531 next_step: 531 next_step:
532 entry = sum; 532 entry = sum;
533 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) { 533 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
534 struct page *data_page; 534 struct page *data_page;
535 struct inode *inode; 535 struct inode *inode;
536 struct node_info dni; /* dnode info for the data */ 536 struct node_info dni; /* dnode info for the data */
537 unsigned int ofs_in_node, nofs; 537 unsigned int ofs_in_node, nofs;
538 block_t start_bidx; 538 block_t start_bidx;
539 539
540 /* stop BG_GC if there is not enough free sections. */ 540 /* stop BG_GC if there is not enough free sections. */
541 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0)) 541 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
542 return; 542 return;
543 543
544 if (check_valid_map(sbi, segno, off) == 0) 544 if (check_valid_map(sbi, segno, off) == 0)
545 continue; 545 continue;
546 546
547 if (phase == 0) { 547 if (phase == 0) {
548 ra_node_page(sbi, le32_to_cpu(entry->nid)); 548 ra_node_page(sbi, le32_to_cpu(entry->nid));
549 continue; 549 continue;
550 } 550 }
551 551
552 /* Get an inode by ino with checking validity */ 552 /* Get an inode by ino with checking validity */
553 if (check_dnode(sbi, entry, &dni, start_addr + off, &nofs) == 0) 553 if (check_dnode(sbi, entry, &dni, start_addr + off, &nofs) == 0)
554 continue; 554 continue;
555 555
556 if (phase == 1) { 556 if (phase == 1) {
557 ra_node_page(sbi, dni.ino); 557 ra_node_page(sbi, dni.ino);
558 continue; 558 continue;
559 } 559 }
560 560
561 start_bidx = start_bidx_of_node(nofs); 561 start_bidx = start_bidx_of_node(nofs);
562 ofs_in_node = le16_to_cpu(entry->ofs_in_node); 562 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
563 563
564 if (phase == 2) { 564 if (phase == 2) {
565 inode = f2fs_iget(sb, dni.ino); 565 inode = f2fs_iget(sb, dni.ino);
566 if (IS_ERR(inode)) 566 if (IS_ERR(inode))
567 continue; 567 continue;
568 568
569 data_page = find_data_page(inode, 569 data_page = find_data_page(inode,
570 start_bidx + ofs_in_node); 570 start_bidx + ofs_in_node);
571 if (IS_ERR(data_page)) 571 if (IS_ERR(data_page))
572 goto next_iput; 572 goto next_iput;
573 573
574 f2fs_put_page(data_page, 0); 574 f2fs_put_page(data_page, 0);
575 add_gc_inode(inode, ilist); 575 add_gc_inode(inode, ilist);
576 } else { 576 } else {
577 inode = find_gc_inode(dni.ino, ilist); 577 inode = find_gc_inode(dni.ino, ilist);
578 if (inode) { 578 if (inode) {
579 data_page = get_lock_data_page(inode, 579 data_page = get_lock_data_page(inode,
580 start_bidx + ofs_in_node); 580 start_bidx + ofs_in_node);
581 if (IS_ERR(data_page)) 581 if (IS_ERR(data_page))
582 continue; 582 continue;
583 move_data_page(inode, data_page, gc_type); 583 move_data_page(inode, data_page, gc_type);
584 stat_inc_data_blk_count(sbi, 1); 584 stat_inc_data_blk_count(sbi, 1);
585 } 585 }
586 } 586 }
587 continue; 587 continue;
588 next_iput: 588 next_iput:
589 iput(inode); 589 iput(inode);
590 } 590 }
591 if (++phase < 4) 591 if (++phase < 4)
592 goto next_step; 592 goto next_step;
593 593
594 if (gc_type == FG_GC) 594 if (gc_type == FG_GC)
595 f2fs_submit_bio(sbi, DATA, true); 595 f2fs_submit_bio(sbi, DATA, true);
596 } 596 }
597 597
598 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim, 598 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
599 int gc_type, int type) 599 int gc_type, int type)
600 { 600 {
601 struct sit_info *sit_i = SIT_I(sbi); 601 struct sit_info *sit_i = SIT_I(sbi);
602 int ret; 602 int ret;
603 mutex_lock(&sit_i->sentry_lock); 603 mutex_lock(&sit_i->sentry_lock);
604 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type, type, LFS); 604 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type, type, LFS);
605 mutex_unlock(&sit_i->sentry_lock); 605 mutex_unlock(&sit_i->sentry_lock);
606 return ret; 606 return ret;
607 } 607 }
608 608
609 static void do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno, 609 static void do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
610 struct list_head *ilist, int gc_type) 610 struct list_head *ilist, int gc_type)
611 { 611 {
612 struct page *sum_page; 612 struct page *sum_page;
613 struct f2fs_summary_block *sum; 613 struct f2fs_summary_block *sum;
614 614
615 /* read segment summary of victim */ 615 /* read segment summary of victim */
616 sum_page = get_sum_page(sbi, segno); 616 sum_page = get_sum_page(sbi, segno);
617 if (IS_ERR(sum_page)) 617 if (IS_ERR(sum_page))
618 return; 618 return;
619 619
620 /* 620 /*
621 * CP needs to lock sum_page. In this time, we don't need 621 * CP needs to lock sum_page. In this time, we don't need
622 * to lock this page, because this summary page is not gone anywhere. 622 * to lock this page, because this summary page is not gone anywhere.
623 * Also, this page is not gonna be updated before GC is done. 623 * Also, this page is not gonna be updated before GC is done.
624 */ 624 */
625 unlock_page(sum_page); 625 unlock_page(sum_page);
626 sum = page_address(sum_page); 626 sum = page_address(sum_page);
627 627
628 switch (GET_SUM_TYPE((&sum->footer))) { 628 switch (GET_SUM_TYPE((&sum->footer))) {
629 case SUM_TYPE_NODE: 629 case SUM_TYPE_NODE:
630 gc_node_segment(sbi, sum->entries, segno, gc_type); 630 gc_node_segment(sbi, sum->entries, segno, gc_type);
631 break; 631 break;
632 case SUM_TYPE_DATA: 632 case SUM_TYPE_DATA:
633 gc_data_segment(sbi, sum->entries, ilist, segno, gc_type); 633 gc_data_segment(sbi, sum->entries, ilist, segno, gc_type);
634 break; 634 break;
635 } 635 }
636 stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer))); 636 stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)));
637 stat_inc_call_count(sbi->stat_info); 637 stat_inc_call_count(sbi->stat_info);
638 638
639 f2fs_put_page(sum_page, 0); 639 f2fs_put_page(sum_page, 0);
640 } 640 }
641 641
642 int f2fs_gc(struct f2fs_sb_info *sbi) 642 int f2fs_gc(struct f2fs_sb_info *sbi)
643 { 643 {
644 struct list_head ilist; 644 struct list_head ilist;
645 unsigned int segno, i; 645 unsigned int segno, i;
646 int gc_type = BG_GC; 646 int gc_type = BG_GC;
647 int nfree = 0; 647 int nfree = 0;
648 int ret = -1; 648 int ret = -1;
649 649
650 INIT_LIST_HEAD(&ilist); 650 INIT_LIST_HEAD(&ilist);
651 gc_more: 651 gc_more:
652 if (!(sbi->sb->s_flags & MS_ACTIVE)) 652 if (!(sbi->sb->s_flags & MS_ACTIVE))
653 goto stop; 653 goto stop;
654 654
655 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, nfree)) 655 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, nfree))
656 gc_type = FG_GC; 656 gc_type = FG_GC;
657 657
658 if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE)) 658 if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE))
659 goto stop; 659 goto stop;
660 ret = 0; 660 ret = 0;
661 661
662 for (i = 0; i < sbi->segs_per_sec; i++) 662 for (i = 0; i < sbi->segs_per_sec; i++)
663 do_garbage_collect(sbi, segno + i, &ilist, gc_type); 663 do_garbage_collect(sbi, segno + i, &ilist, gc_type);
664 664
665 if (gc_type == FG_GC && 665 if (gc_type == FG_GC &&
666 get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0) 666 get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
667 nfree++; 667 nfree++;
668 668
669 if (has_not_enough_free_secs(sbi, nfree)) 669 if (has_not_enough_free_secs(sbi, nfree))
670 goto gc_more; 670 goto gc_more;
671 671
672 if (gc_type == FG_GC) 672 if (gc_type == FG_GC)
673 write_checkpoint(sbi, false); 673 write_checkpoint(sbi, false);
674 stop: 674 stop:
675 mutex_unlock(&sbi->gc_mutex); 675 mutex_unlock(&sbi->gc_mutex);
676 676
677 put_gc_inode(&ilist); 677 put_gc_inode(&ilist);
678 return ret; 678 return ret;
679 } 679 }
680 680
681 void build_gc_manager(struct f2fs_sb_info *sbi) 681 void build_gc_manager(struct f2fs_sb_info *sbi)
682 { 682 {
683 DIRTY_I(sbi)->v_ops = &default_v_ops; 683 DIRTY_I(sbi)->v_ops = &default_v_ops;
684 } 684 }
685 685
686 int __init create_gc_caches(void) 686 int __init create_gc_caches(void)
687 { 687 {
688 winode_slab = f2fs_kmem_cache_create("f2fs_gc_inodes", 688 winode_slab = f2fs_kmem_cache_create("f2fs_gc_inodes",
689 sizeof(struct inode_entry), NULL); 689 sizeof(struct inode_entry), NULL);
690 if (!winode_slab) 690 if (!winode_slab)
691 return -ENOMEM; 691 return -ENOMEM;
692 return 0; 692 return 0;
693 } 693 }
694 694
695 void destroy_gc_caches(void) 695 void destroy_gc_caches(void)
696 { 696 {
697 kmem_cache_destroy(winode_slab); 697 kmem_cache_destroy(winode_slab);
698 } 698 }
699 699
1 /* 1 /*
2 * fs/f2fs/super.c 2 * fs/f2fs/super.c
3 * 3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/ 5 * http://www.samsung.com/
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 */ 10 */
11 #include <linux/module.h> 11 #include <linux/module.h>
12 #include <linux/init.h> 12 #include <linux/init.h>
13 #include <linux/fs.h> 13 #include <linux/fs.h>
14 #include <linux/statfs.h> 14 #include <linux/statfs.h>
15 #include <linux/proc_fs.h> 15 #include <linux/proc_fs.h>
16 #include <linux/buffer_head.h> 16 #include <linux/buffer_head.h>
17 #include <linux/backing-dev.h> 17 #include <linux/backing-dev.h>
18 #include <linux/kthread.h> 18 #include <linux/kthread.h>
19 #include <linux/parser.h> 19 #include <linux/parser.h>
20 #include <linux/mount.h> 20 #include <linux/mount.h>
21 #include <linux/seq_file.h> 21 #include <linux/seq_file.h>
22 #include <linux/random.h> 22 #include <linux/random.h>
23 #include <linux/exportfs.h> 23 #include <linux/exportfs.h>
24 #include <linux/f2fs_fs.h> 24 #include <linux/f2fs_fs.h>
25 25
26 #include "f2fs.h" 26 #include "f2fs.h"
27 #include "node.h" 27 #include "node.h"
28 #include "xattr.h" 28 #include "xattr.h"
29 29
30 static struct kmem_cache *f2fs_inode_cachep; 30 static struct kmem_cache *f2fs_inode_cachep;
31 31
32 enum { 32 enum {
33 Opt_gc_background_off, 33 Opt_gc_background_off,
34 Opt_disable_roll_forward, 34 Opt_disable_roll_forward,
35 Opt_discard, 35 Opt_discard,
36 Opt_noheap, 36 Opt_noheap,
37 Opt_nouser_xattr, 37 Opt_nouser_xattr,
38 Opt_noacl, 38 Opt_noacl,
39 Opt_active_logs, 39 Opt_active_logs,
40 Opt_disable_ext_identify, 40 Opt_disable_ext_identify,
41 Opt_err, 41 Opt_err,
42 }; 42 };
43 43
44 static match_table_t f2fs_tokens = { 44 static match_table_t f2fs_tokens = {
45 {Opt_gc_background_off, "background_gc_off"}, 45 {Opt_gc_background_off, "background_gc_off"},
46 {Opt_disable_roll_forward, "disable_roll_forward"}, 46 {Opt_disable_roll_forward, "disable_roll_forward"},
47 {Opt_discard, "discard"}, 47 {Opt_discard, "discard"},
48 {Opt_noheap, "no_heap"}, 48 {Opt_noheap, "no_heap"},
49 {Opt_nouser_xattr, "nouser_xattr"}, 49 {Opt_nouser_xattr, "nouser_xattr"},
50 {Opt_noacl, "noacl"}, 50 {Opt_noacl, "noacl"},
51 {Opt_active_logs, "active_logs=%u"}, 51 {Opt_active_logs, "active_logs=%u"},
52 {Opt_disable_ext_identify, "disable_ext_identify"}, 52 {Opt_disable_ext_identify, "disable_ext_identify"},
53 {Opt_err, NULL}, 53 {Opt_err, NULL},
54 }; 54 };
55 55
56 void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...) 56 void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
57 { 57 {
58 struct va_format vaf; 58 struct va_format vaf;
59 va_list args; 59 va_list args;
60 60
61 va_start(args, fmt); 61 va_start(args, fmt);
62 vaf.fmt = fmt; 62 vaf.fmt = fmt;
63 vaf.va = &args; 63 vaf.va = &args;
64 printk("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf); 64 printk("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf);
65 va_end(args); 65 va_end(args);
66 } 66 }
67 67
68 static void init_once(void *foo) 68 static void init_once(void *foo)
69 { 69 {
70 struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo; 70 struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
71 71
72 inode_init_once(&fi->vfs_inode); 72 inode_init_once(&fi->vfs_inode);
73 } 73 }
74 74
75 static struct inode *f2fs_alloc_inode(struct super_block *sb) 75 static struct inode *f2fs_alloc_inode(struct super_block *sb)
76 { 76 {
77 struct f2fs_inode_info *fi; 77 struct f2fs_inode_info *fi;
78 78
79 fi = kmem_cache_alloc(f2fs_inode_cachep, GFP_NOFS | __GFP_ZERO); 79 fi = kmem_cache_alloc(f2fs_inode_cachep, GFP_NOFS | __GFP_ZERO);
80 if (!fi) 80 if (!fi)
81 return NULL; 81 return NULL;
82 82
83 init_once((void *) fi); 83 init_once((void *) fi);
84 84
85 /* Initilize f2fs-specific inode info */ 85 /* Initialize f2fs-specific inode info */
86 fi->vfs_inode.i_version = 1; 86 fi->vfs_inode.i_version = 1;
87 atomic_set(&fi->dirty_dents, 0); 87 atomic_set(&fi->dirty_dents, 0);
88 fi->i_current_depth = 1; 88 fi->i_current_depth = 1;
89 fi->i_advise = 0; 89 fi->i_advise = 0;
90 rwlock_init(&fi->ext.ext_lock); 90 rwlock_init(&fi->ext.ext_lock);
91 91
92 set_inode_flag(fi, FI_NEW_INODE); 92 set_inode_flag(fi, FI_NEW_INODE);
93 93
94 return &fi->vfs_inode; 94 return &fi->vfs_inode;
95 } 95 }
96 96
97 static void f2fs_i_callback(struct rcu_head *head) 97 static void f2fs_i_callback(struct rcu_head *head)
98 { 98 {
99 struct inode *inode = container_of(head, struct inode, i_rcu); 99 struct inode *inode = container_of(head, struct inode, i_rcu);
100 kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode)); 100 kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
101 } 101 }
102 102
103 static void f2fs_destroy_inode(struct inode *inode) 103 static void f2fs_destroy_inode(struct inode *inode)
104 { 104 {
105 call_rcu(&inode->i_rcu, f2fs_i_callback); 105 call_rcu(&inode->i_rcu, f2fs_i_callback);
106 } 106 }
107 107
108 static void f2fs_put_super(struct super_block *sb) 108 static void f2fs_put_super(struct super_block *sb)
109 { 109 {
110 struct f2fs_sb_info *sbi = F2FS_SB(sb); 110 struct f2fs_sb_info *sbi = F2FS_SB(sb);
111 111
112 f2fs_destroy_stats(sbi); 112 f2fs_destroy_stats(sbi);
113 stop_gc_thread(sbi); 113 stop_gc_thread(sbi);
114 114
115 write_checkpoint(sbi, true); 115 write_checkpoint(sbi, true);
116 116
117 iput(sbi->node_inode); 117 iput(sbi->node_inode);
118 iput(sbi->meta_inode); 118 iput(sbi->meta_inode);
119 119
120 /* destroy f2fs internal modules */ 120 /* destroy f2fs internal modules */
121 destroy_node_manager(sbi); 121 destroy_node_manager(sbi);
122 destroy_segment_manager(sbi); 122 destroy_segment_manager(sbi);
123 123
124 kfree(sbi->ckpt); 124 kfree(sbi->ckpt);
125 125
126 sb->s_fs_info = NULL; 126 sb->s_fs_info = NULL;
127 brelse(sbi->raw_super_buf); 127 brelse(sbi->raw_super_buf);
128 kfree(sbi); 128 kfree(sbi);
129 } 129 }
130 130
131 int f2fs_sync_fs(struct super_block *sb, int sync) 131 int f2fs_sync_fs(struct super_block *sb, int sync)
132 { 132 {
133 struct f2fs_sb_info *sbi = F2FS_SB(sb); 133 struct f2fs_sb_info *sbi = F2FS_SB(sb);
134 134
135 if (!sbi->s_dirty && !get_pages(sbi, F2FS_DIRTY_NODES)) 135 if (!sbi->s_dirty && !get_pages(sbi, F2FS_DIRTY_NODES))
136 return 0; 136 return 0;
137 137
138 if (sync) 138 if (sync)
139 write_checkpoint(sbi, false); 139 write_checkpoint(sbi, false);
140 else 140 else
141 f2fs_balance_fs(sbi); 141 f2fs_balance_fs(sbi);
142 142
143 return 0; 143 return 0;
144 } 144 }
145 145
146 static int f2fs_freeze(struct super_block *sb) 146 static int f2fs_freeze(struct super_block *sb)
147 { 147 {
148 int err; 148 int err;
149 149
150 if (sb->s_flags & MS_RDONLY) 150 if (sb->s_flags & MS_RDONLY)
151 return 0; 151 return 0;
152 152
153 err = f2fs_sync_fs(sb, 1); 153 err = f2fs_sync_fs(sb, 1);
154 return err; 154 return err;
155 } 155 }
156 156
157 static int f2fs_unfreeze(struct super_block *sb) 157 static int f2fs_unfreeze(struct super_block *sb)
158 { 158 {
159 return 0; 159 return 0;
160 } 160 }
161 161
162 static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf) 162 static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
163 { 163 {
164 struct super_block *sb = dentry->d_sb; 164 struct super_block *sb = dentry->d_sb;
165 struct f2fs_sb_info *sbi = F2FS_SB(sb); 165 struct f2fs_sb_info *sbi = F2FS_SB(sb);
166 u64 id = huge_encode_dev(sb->s_bdev->bd_dev); 166 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
167 block_t total_count, user_block_count, start_count, ovp_count; 167 block_t total_count, user_block_count, start_count, ovp_count;
168 168
169 total_count = le64_to_cpu(sbi->raw_super->block_count); 169 total_count = le64_to_cpu(sbi->raw_super->block_count);
170 user_block_count = sbi->user_block_count; 170 user_block_count = sbi->user_block_count;
171 start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr); 171 start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
172 ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg; 172 ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg;
173 buf->f_type = F2FS_SUPER_MAGIC; 173 buf->f_type = F2FS_SUPER_MAGIC;
174 buf->f_bsize = sbi->blocksize; 174 buf->f_bsize = sbi->blocksize;
175 175
176 buf->f_blocks = total_count - start_count; 176 buf->f_blocks = total_count - start_count;
177 buf->f_bfree = buf->f_blocks - valid_user_blocks(sbi) - ovp_count; 177 buf->f_bfree = buf->f_blocks - valid_user_blocks(sbi) - ovp_count;
178 buf->f_bavail = user_block_count - valid_user_blocks(sbi); 178 buf->f_bavail = user_block_count - valid_user_blocks(sbi);
179 179
180 buf->f_files = sbi->total_node_count; 180 buf->f_files = sbi->total_node_count;
181 buf->f_ffree = sbi->total_node_count - valid_inode_count(sbi); 181 buf->f_ffree = sbi->total_node_count - valid_inode_count(sbi);
182 182
183 buf->f_namelen = F2FS_MAX_NAME_LEN; 183 buf->f_namelen = F2FS_MAX_NAME_LEN;
184 buf->f_fsid.val[0] = (u32)id; 184 buf->f_fsid.val[0] = (u32)id;
185 buf->f_fsid.val[1] = (u32)(id >> 32); 185 buf->f_fsid.val[1] = (u32)(id >> 32);
186 186
187 return 0; 187 return 0;
188 } 188 }
189 189
190 static int f2fs_show_options(struct seq_file *seq, struct dentry *root) 190 static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
191 { 191 {
192 struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb); 192 struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
193 193
194 if (test_opt(sbi, BG_GC)) 194 if (test_opt(sbi, BG_GC))
195 seq_puts(seq, ",background_gc_on"); 195 seq_puts(seq, ",background_gc_on");
196 else 196 else
197 seq_puts(seq, ",background_gc_off"); 197 seq_puts(seq, ",background_gc_off");
198 if (test_opt(sbi, DISABLE_ROLL_FORWARD)) 198 if (test_opt(sbi, DISABLE_ROLL_FORWARD))
199 seq_puts(seq, ",disable_roll_forward"); 199 seq_puts(seq, ",disable_roll_forward");
200 if (test_opt(sbi, DISCARD)) 200 if (test_opt(sbi, DISCARD))
201 seq_puts(seq, ",discard"); 201 seq_puts(seq, ",discard");
202 if (test_opt(sbi, NOHEAP)) 202 if (test_opt(sbi, NOHEAP))
203 seq_puts(seq, ",no_heap_alloc"); 203 seq_puts(seq, ",no_heap_alloc");
204 #ifdef CONFIG_F2FS_FS_XATTR 204 #ifdef CONFIG_F2FS_FS_XATTR
205 if (test_opt(sbi, XATTR_USER)) 205 if (test_opt(sbi, XATTR_USER))
206 seq_puts(seq, ",user_xattr"); 206 seq_puts(seq, ",user_xattr");
207 else 207 else
208 seq_puts(seq, ",nouser_xattr"); 208 seq_puts(seq, ",nouser_xattr");
209 #endif 209 #endif
210 #ifdef CONFIG_F2FS_FS_POSIX_ACL 210 #ifdef CONFIG_F2FS_FS_POSIX_ACL
211 if (test_opt(sbi, POSIX_ACL)) 211 if (test_opt(sbi, POSIX_ACL))
212 seq_puts(seq, ",acl"); 212 seq_puts(seq, ",acl");
213 else 213 else
214 seq_puts(seq, ",noacl"); 214 seq_puts(seq, ",noacl");
215 #endif 215 #endif
216 if (test_opt(sbi, DISABLE_EXT_IDENTIFY)) 216 if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
217 seq_puts(seq, ",disable_ext_identify"); 217 seq_puts(seq, ",disable_ext_identify");
218 218
219 seq_printf(seq, ",active_logs=%u", sbi->active_logs); 219 seq_printf(seq, ",active_logs=%u", sbi->active_logs);
220 220
221 return 0; 221 return 0;
222 } 222 }
223 223
224 static struct super_operations f2fs_sops = { 224 static struct super_operations f2fs_sops = {
225 .alloc_inode = f2fs_alloc_inode, 225 .alloc_inode = f2fs_alloc_inode,
226 .destroy_inode = f2fs_destroy_inode, 226 .destroy_inode = f2fs_destroy_inode,
227 .write_inode = f2fs_write_inode, 227 .write_inode = f2fs_write_inode,
228 .show_options = f2fs_show_options, 228 .show_options = f2fs_show_options,
229 .evict_inode = f2fs_evict_inode, 229 .evict_inode = f2fs_evict_inode,
230 .put_super = f2fs_put_super, 230 .put_super = f2fs_put_super,
231 .sync_fs = f2fs_sync_fs, 231 .sync_fs = f2fs_sync_fs,
232 .freeze_fs = f2fs_freeze, 232 .freeze_fs = f2fs_freeze,
233 .unfreeze_fs = f2fs_unfreeze, 233 .unfreeze_fs = f2fs_unfreeze,
234 .statfs = f2fs_statfs, 234 .statfs = f2fs_statfs,
235 }; 235 };
236 236
237 static struct inode *f2fs_nfs_get_inode(struct super_block *sb, 237 static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
238 u64 ino, u32 generation) 238 u64 ino, u32 generation)
239 { 239 {
240 struct f2fs_sb_info *sbi = F2FS_SB(sb); 240 struct f2fs_sb_info *sbi = F2FS_SB(sb);
241 struct inode *inode; 241 struct inode *inode;
242 242
243 if (ino < F2FS_ROOT_INO(sbi)) 243 if (ino < F2FS_ROOT_INO(sbi))
244 return ERR_PTR(-ESTALE); 244 return ERR_PTR(-ESTALE);
245 245
246 /* 246 /*
247 * f2fs_iget isn't quite right if the inode is currently unallocated! 247 * f2fs_iget isn't quite right if the inode is currently unallocated!
248 * However f2fs_iget currently does appropriate checks to handle stale 248 * However f2fs_iget currently does appropriate checks to handle stale
249 * inodes so everything is OK. 249 * inodes so everything is OK.
250 */ 250 */
251 inode = f2fs_iget(sb, ino); 251 inode = f2fs_iget(sb, ino);
252 if (IS_ERR(inode)) 252 if (IS_ERR(inode))
253 return ERR_CAST(inode); 253 return ERR_CAST(inode);
254 if (generation && inode->i_generation != generation) { 254 if (generation && inode->i_generation != generation) {
255 /* we didn't find the right inode.. */ 255 /* we didn't find the right inode.. */
256 iput(inode); 256 iput(inode);
257 return ERR_PTR(-ESTALE); 257 return ERR_PTR(-ESTALE);
258 } 258 }
259 return inode; 259 return inode;
260 } 260 }
261 261
262 static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid, 262 static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
263 int fh_len, int fh_type) 263 int fh_len, int fh_type)
264 { 264 {
265 return generic_fh_to_dentry(sb, fid, fh_len, fh_type, 265 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
266 f2fs_nfs_get_inode); 266 f2fs_nfs_get_inode);
267 } 267 }
268 268
269 static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid, 269 static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
270 int fh_len, int fh_type) 270 int fh_len, int fh_type)
271 { 271 {
272 return generic_fh_to_parent(sb, fid, fh_len, fh_type, 272 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
273 f2fs_nfs_get_inode); 273 f2fs_nfs_get_inode);
274 } 274 }
275 275
276 static const struct export_operations f2fs_export_ops = { 276 static const struct export_operations f2fs_export_ops = {
277 .fh_to_dentry = f2fs_fh_to_dentry, 277 .fh_to_dentry = f2fs_fh_to_dentry,
278 .fh_to_parent = f2fs_fh_to_parent, 278 .fh_to_parent = f2fs_fh_to_parent,
279 .get_parent = f2fs_get_parent, 279 .get_parent = f2fs_get_parent,
280 }; 280 };
281 281
282 static int parse_options(struct super_block *sb, struct f2fs_sb_info *sbi, 282 static int parse_options(struct super_block *sb, struct f2fs_sb_info *sbi,
283 char *options) 283 char *options)
284 { 284 {
285 substring_t args[MAX_OPT_ARGS]; 285 substring_t args[MAX_OPT_ARGS];
286 char *p; 286 char *p;
287 int arg = 0; 287 int arg = 0;
288 288
289 if (!options) 289 if (!options)
290 return 0; 290 return 0;
291 291
292 while ((p = strsep(&options, ",")) != NULL) { 292 while ((p = strsep(&options, ",")) != NULL) {
293 int token; 293 int token;
294 if (!*p) 294 if (!*p)
295 continue; 295 continue;
296 /* 296 /*
297 * Initialize args struct so we know whether arg was 297 * Initialize args struct so we know whether arg was
298 * found; some options take optional arguments. 298 * found; some options take optional arguments.
299 */ 299 */
300 args[0].to = args[0].from = NULL; 300 args[0].to = args[0].from = NULL;
301 token = match_token(p, f2fs_tokens, args); 301 token = match_token(p, f2fs_tokens, args);
302 302
303 switch (token) { 303 switch (token) {
304 case Opt_gc_background_off: 304 case Opt_gc_background_off:
305 clear_opt(sbi, BG_GC); 305 clear_opt(sbi, BG_GC);
306 break; 306 break;
307 case Opt_disable_roll_forward: 307 case Opt_disable_roll_forward:
308 set_opt(sbi, DISABLE_ROLL_FORWARD); 308 set_opt(sbi, DISABLE_ROLL_FORWARD);
309 break; 309 break;
310 case Opt_discard: 310 case Opt_discard:
311 set_opt(sbi, DISCARD); 311 set_opt(sbi, DISCARD);
312 break; 312 break;
313 case Opt_noheap: 313 case Opt_noheap:
314 set_opt(sbi, NOHEAP); 314 set_opt(sbi, NOHEAP);
315 break; 315 break;
316 #ifdef CONFIG_F2FS_FS_XATTR 316 #ifdef CONFIG_F2FS_FS_XATTR
317 case Opt_nouser_xattr: 317 case Opt_nouser_xattr:
318 clear_opt(sbi, XATTR_USER); 318 clear_opt(sbi, XATTR_USER);
319 break; 319 break;
320 #else 320 #else
321 case Opt_nouser_xattr: 321 case Opt_nouser_xattr:
322 f2fs_msg(sb, KERN_INFO, 322 f2fs_msg(sb, KERN_INFO,
323 "nouser_xattr options not supported"); 323 "nouser_xattr options not supported");
324 break; 324 break;
325 #endif 325 #endif
326 #ifdef CONFIG_F2FS_FS_POSIX_ACL 326 #ifdef CONFIG_F2FS_FS_POSIX_ACL
327 case Opt_noacl: 327 case Opt_noacl:
328 clear_opt(sbi, POSIX_ACL); 328 clear_opt(sbi, POSIX_ACL);
329 break; 329 break;
330 #else 330 #else
331 case Opt_noacl: 331 case Opt_noacl:
332 f2fs_msg(sb, KERN_INFO, "noacl options not supported"); 332 f2fs_msg(sb, KERN_INFO, "noacl options not supported");
333 break; 333 break;
334 #endif 334 #endif
335 case Opt_active_logs: 335 case Opt_active_logs:
336 if (args->from && match_int(args, &arg)) 336 if (args->from && match_int(args, &arg))
337 return -EINVAL; 337 return -EINVAL;
338 if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE) 338 if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE)
339 return -EINVAL; 339 return -EINVAL;
340 sbi->active_logs = arg; 340 sbi->active_logs = arg;
341 break; 341 break;
342 case Opt_disable_ext_identify: 342 case Opt_disable_ext_identify:
343 set_opt(sbi, DISABLE_EXT_IDENTIFY); 343 set_opt(sbi, DISABLE_EXT_IDENTIFY);
344 break; 344 break;
345 default: 345 default:
346 f2fs_msg(sb, KERN_ERR, 346 f2fs_msg(sb, KERN_ERR,
347 "Unrecognized mount option \"%s\" or missing value", 347 "Unrecognized mount option \"%s\" or missing value",
348 p); 348 p);
349 return -EINVAL; 349 return -EINVAL;
350 } 350 }
351 } 351 }
352 return 0; 352 return 0;
353 } 353 }
354 354
355 static loff_t max_file_size(unsigned bits) 355 static loff_t max_file_size(unsigned bits)
356 { 356 {
357 loff_t result = ADDRS_PER_INODE; 357 loff_t result = ADDRS_PER_INODE;
358 loff_t leaf_count = ADDRS_PER_BLOCK; 358 loff_t leaf_count = ADDRS_PER_BLOCK;
359 359
360 /* two direct node blocks */ 360 /* two direct node blocks */
361 result += (leaf_count * 2); 361 result += (leaf_count * 2);
362 362
363 /* two indirect node blocks */ 363 /* two indirect node blocks */
364 leaf_count *= NIDS_PER_BLOCK; 364 leaf_count *= NIDS_PER_BLOCK;
365 result += (leaf_count * 2); 365 result += (leaf_count * 2);
366 366
367 /* one double indirect node block */ 367 /* one double indirect node block */
368 leaf_count *= NIDS_PER_BLOCK; 368 leaf_count *= NIDS_PER_BLOCK;
369 result += leaf_count; 369 result += leaf_count;
370 370
371 result <<= bits; 371 result <<= bits;
372 return result; 372 return result;
373 } 373 }
374 374
375 static int sanity_check_raw_super(struct super_block *sb, 375 static int sanity_check_raw_super(struct super_block *sb,
376 struct f2fs_super_block *raw_super) 376 struct f2fs_super_block *raw_super)
377 { 377 {
378 unsigned int blocksize; 378 unsigned int blocksize;
379 379
380 if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) { 380 if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
381 f2fs_msg(sb, KERN_INFO, 381 f2fs_msg(sb, KERN_INFO,
382 "Magic Mismatch, valid(0x%x) - read(0x%x)", 382 "Magic Mismatch, valid(0x%x) - read(0x%x)",
383 F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic)); 383 F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
384 return 1; 384 return 1;
385 } 385 }
386 386
387 /* Currently, support only 4KB page cache size */ 387 /* Currently, support only 4KB page cache size */
388 if (F2FS_BLKSIZE != PAGE_CACHE_SIZE) { 388 if (F2FS_BLKSIZE != PAGE_CACHE_SIZE) {
389 f2fs_msg(sb, KERN_INFO, 389 f2fs_msg(sb, KERN_INFO,
390 "Invalid page_cache_size (%lu), supports only 4KB\n", 390 "Invalid page_cache_size (%lu), supports only 4KB\n",
391 PAGE_CACHE_SIZE); 391 PAGE_CACHE_SIZE);
392 return 1; 392 return 1;
393 } 393 }
394 394
395 /* Currently, support only 4KB block size */ 395 /* Currently, support only 4KB block size */
396 blocksize = 1 << le32_to_cpu(raw_super->log_blocksize); 396 blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
397 if (blocksize != F2FS_BLKSIZE) { 397 if (blocksize != F2FS_BLKSIZE) {
398 f2fs_msg(sb, KERN_INFO, 398 f2fs_msg(sb, KERN_INFO,
399 "Invalid blocksize (%u), supports only 4KB\n", 399 "Invalid blocksize (%u), supports only 4KB\n",
400 blocksize); 400 blocksize);
401 return 1; 401 return 1;
402 } 402 }
403 403
404 if (le32_to_cpu(raw_super->log_sectorsize) != 404 if (le32_to_cpu(raw_super->log_sectorsize) !=
405 F2FS_LOG_SECTOR_SIZE) { 405 F2FS_LOG_SECTOR_SIZE) {
406 f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize"); 406 f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize");
407 return 1; 407 return 1;
408 } 408 }
409 if (le32_to_cpu(raw_super->log_sectors_per_block) != 409 if (le32_to_cpu(raw_super->log_sectors_per_block) !=
410 F2FS_LOG_SECTORS_PER_BLOCK) { 410 F2FS_LOG_SECTORS_PER_BLOCK) {
411 f2fs_msg(sb, KERN_INFO, "Invalid log sectors per block"); 411 f2fs_msg(sb, KERN_INFO, "Invalid log sectors per block");
412 return 1; 412 return 1;
413 } 413 }
414 return 0; 414 return 0;
415 } 415 }
416 416
417 static int sanity_check_ckpt(struct f2fs_sb_info *sbi) 417 static int sanity_check_ckpt(struct f2fs_sb_info *sbi)
418 { 418 {
419 unsigned int total, fsmeta; 419 unsigned int total, fsmeta;
420 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); 420 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
421 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 421 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
422 422
423 total = le32_to_cpu(raw_super->segment_count); 423 total = le32_to_cpu(raw_super->segment_count);
424 fsmeta = le32_to_cpu(raw_super->segment_count_ckpt); 424 fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
425 fsmeta += le32_to_cpu(raw_super->segment_count_sit); 425 fsmeta += le32_to_cpu(raw_super->segment_count_sit);
426 fsmeta += le32_to_cpu(raw_super->segment_count_nat); 426 fsmeta += le32_to_cpu(raw_super->segment_count_nat);
427 fsmeta += le32_to_cpu(ckpt->rsvd_segment_count); 427 fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
428 fsmeta += le32_to_cpu(raw_super->segment_count_ssa); 428 fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
429 429
430 if (fsmeta >= total) 430 if (fsmeta >= total)
431 return 1; 431 return 1;
432 432
433 if (is_set_ckpt_flags(ckpt, CP_ERROR_FLAG)) { 433 if (is_set_ckpt_flags(ckpt, CP_ERROR_FLAG)) {
434 f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck"); 434 f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
435 return 1; 435 return 1;
436 } 436 }
437 return 0; 437 return 0;
438 } 438 }
439 439
440 static void init_sb_info(struct f2fs_sb_info *sbi) 440 static void init_sb_info(struct f2fs_sb_info *sbi)
441 { 441 {
442 struct f2fs_super_block *raw_super = sbi->raw_super; 442 struct f2fs_super_block *raw_super = sbi->raw_super;
443 int i; 443 int i;
444 444
445 sbi->log_sectors_per_block = 445 sbi->log_sectors_per_block =
446 le32_to_cpu(raw_super->log_sectors_per_block); 446 le32_to_cpu(raw_super->log_sectors_per_block);
447 sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize); 447 sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
448 sbi->blocksize = 1 << sbi->log_blocksize; 448 sbi->blocksize = 1 << sbi->log_blocksize;
449 sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg); 449 sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
450 sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg; 450 sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
451 sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec); 451 sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
452 sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone); 452 sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
453 sbi->total_sections = le32_to_cpu(raw_super->section_count); 453 sbi->total_sections = le32_to_cpu(raw_super->section_count);
454 sbi->total_node_count = 454 sbi->total_node_count =
455 (le32_to_cpu(raw_super->segment_count_nat) / 2) 455 (le32_to_cpu(raw_super->segment_count_nat) / 2)
456 * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK; 456 * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
457 sbi->root_ino_num = le32_to_cpu(raw_super->root_ino); 457 sbi->root_ino_num = le32_to_cpu(raw_super->root_ino);
458 sbi->node_ino_num = le32_to_cpu(raw_super->node_ino); 458 sbi->node_ino_num = le32_to_cpu(raw_super->node_ino);
459 sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino); 459 sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino);
460 460
461 for (i = 0; i < NR_COUNT_TYPE; i++) 461 for (i = 0; i < NR_COUNT_TYPE; i++)
462 atomic_set(&sbi->nr_pages[i], 0); 462 atomic_set(&sbi->nr_pages[i], 0);
463 } 463 }
464 464
465 static int validate_superblock(struct super_block *sb, 465 static int validate_superblock(struct super_block *sb,
466 struct f2fs_super_block **raw_super, 466 struct f2fs_super_block **raw_super,
467 struct buffer_head **raw_super_buf, sector_t block) 467 struct buffer_head **raw_super_buf, sector_t block)
468 { 468 {
469 const char *super = (block == 0 ? "first" : "second"); 469 const char *super = (block == 0 ? "first" : "second");
470 470
471 /* read f2fs raw super block */ 471 /* read f2fs raw super block */
472 *raw_super_buf = sb_bread(sb, block); 472 *raw_super_buf = sb_bread(sb, block);
473 if (!*raw_super_buf) { 473 if (!*raw_super_buf) {
474 f2fs_msg(sb, KERN_ERR, "unable to read %s superblock", 474 f2fs_msg(sb, KERN_ERR, "unable to read %s superblock",
475 super); 475 super);
476 return 1; 476 return 1;
477 } 477 }
478 478
479 *raw_super = (struct f2fs_super_block *) 479 *raw_super = (struct f2fs_super_block *)
480 ((char *)(*raw_super_buf)->b_data + F2FS_SUPER_OFFSET); 480 ((char *)(*raw_super_buf)->b_data + F2FS_SUPER_OFFSET);
481 481
482 /* sanity checking of raw super */ 482 /* sanity checking of raw super */
483 if (!sanity_check_raw_super(sb, *raw_super)) 483 if (!sanity_check_raw_super(sb, *raw_super))
484 return 0; 484 return 0;
485 485
486 f2fs_msg(sb, KERN_ERR, "Can't find a valid F2FS filesystem " 486 f2fs_msg(sb, KERN_ERR, "Can't find a valid F2FS filesystem "
487 "in %s superblock", super); 487 "in %s superblock", super);
488 return 1; 488 return 1;
489 } 489 }
490 490
491 static int f2fs_fill_super(struct super_block *sb, void *data, int silent) 491 static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
492 { 492 {
493 struct f2fs_sb_info *sbi; 493 struct f2fs_sb_info *sbi;
494 struct f2fs_super_block *raw_super; 494 struct f2fs_super_block *raw_super;
495 struct buffer_head *raw_super_buf; 495 struct buffer_head *raw_super_buf;
496 struct inode *root; 496 struct inode *root;
497 long err = -EINVAL; 497 long err = -EINVAL;
498 int i; 498 int i;
499 499
500 /* allocate memory for f2fs-specific super block info */ 500 /* allocate memory for f2fs-specific super block info */
501 sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL); 501 sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
502 if (!sbi) 502 if (!sbi)
503 return -ENOMEM; 503 return -ENOMEM;
504 504
505 /* set a block size */ 505 /* set a block size */
506 if (!sb_set_blocksize(sb, F2FS_BLKSIZE)) { 506 if (!sb_set_blocksize(sb, F2FS_BLKSIZE)) {
507 f2fs_msg(sb, KERN_ERR, "unable to set blocksize"); 507 f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
508 goto free_sbi; 508 goto free_sbi;
509 } 509 }
510 510
511 if (validate_superblock(sb, &raw_super, &raw_super_buf, 0)) { 511 if (validate_superblock(sb, &raw_super, &raw_super_buf, 0)) {
512 brelse(raw_super_buf); 512 brelse(raw_super_buf);
513 if (validate_superblock(sb, &raw_super, &raw_super_buf, 1)) 513 if (validate_superblock(sb, &raw_super, &raw_super_buf, 1))
514 goto free_sb_buf; 514 goto free_sb_buf;
515 } 515 }
516 /* init some FS parameters */ 516 /* init some FS parameters */
517 sbi->active_logs = NR_CURSEG_TYPE; 517 sbi->active_logs = NR_CURSEG_TYPE;
518 518
519 set_opt(sbi, BG_GC); 519 set_opt(sbi, BG_GC);
520 520
521 #ifdef CONFIG_F2FS_FS_XATTR 521 #ifdef CONFIG_F2FS_FS_XATTR
522 set_opt(sbi, XATTR_USER); 522 set_opt(sbi, XATTR_USER);
523 #endif 523 #endif
524 #ifdef CONFIG_F2FS_FS_POSIX_ACL 524 #ifdef CONFIG_F2FS_FS_POSIX_ACL
525 set_opt(sbi, POSIX_ACL); 525 set_opt(sbi, POSIX_ACL);
526 #endif 526 #endif
527 /* parse mount options */ 527 /* parse mount options */
528 if (parse_options(sb, sbi, (char *)data)) 528 if (parse_options(sb, sbi, (char *)data))
529 goto free_sb_buf; 529 goto free_sb_buf;
530 530
531 sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize)); 531 sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize));
532 sb->s_max_links = F2FS_LINK_MAX; 532 sb->s_max_links = F2FS_LINK_MAX;
533 get_random_bytes(&sbi->s_next_generation, sizeof(u32)); 533 get_random_bytes(&sbi->s_next_generation, sizeof(u32));
534 534
535 sb->s_op = &f2fs_sops; 535 sb->s_op = &f2fs_sops;
536 sb->s_xattr = f2fs_xattr_handlers; 536 sb->s_xattr = f2fs_xattr_handlers;
537 sb->s_export_op = &f2fs_export_ops; 537 sb->s_export_op = &f2fs_export_ops;
538 sb->s_magic = F2FS_SUPER_MAGIC; 538 sb->s_magic = F2FS_SUPER_MAGIC;
539 sb->s_fs_info = sbi; 539 sb->s_fs_info = sbi;
540 sb->s_time_gran = 1; 540 sb->s_time_gran = 1;
541 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | 541 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
542 (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0); 542 (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
543 memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid)); 543 memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
544 544
545 /* init f2fs-specific super block info */ 545 /* init f2fs-specific super block info */
546 sbi->sb = sb; 546 sbi->sb = sb;
547 sbi->raw_super = raw_super; 547 sbi->raw_super = raw_super;
548 sbi->raw_super_buf = raw_super_buf; 548 sbi->raw_super_buf = raw_super_buf;
549 mutex_init(&sbi->gc_mutex); 549 mutex_init(&sbi->gc_mutex);
550 mutex_init(&sbi->write_inode); 550 mutex_init(&sbi->write_inode);
551 mutex_init(&sbi->writepages); 551 mutex_init(&sbi->writepages);
552 mutex_init(&sbi->cp_mutex); 552 mutex_init(&sbi->cp_mutex);
553 for (i = 0; i < NR_LOCK_TYPE; i++) 553 for (i = 0; i < NR_LOCK_TYPE; i++)
554 mutex_init(&sbi->fs_lock[i]); 554 mutex_init(&sbi->fs_lock[i]);
555 sbi->por_doing = 0; 555 sbi->por_doing = 0;
556 spin_lock_init(&sbi->stat_lock); 556 spin_lock_init(&sbi->stat_lock);
557 init_rwsem(&sbi->bio_sem); 557 init_rwsem(&sbi->bio_sem);
558 init_sb_info(sbi); 558 init_sb_info(sbi);
559 559
560 /* get an inode for meta space */ 560 /* get an inode for meta space */
561 sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi)); 561 sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
562 if (IS_ERR(sbi->meta_inode)) { 562 if (IS_ERR(sbi->meta_inode)) {
563 f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode"); 563 f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
564 err = PTR_ERR(sbi->meta_inode); 564 err = PTR_ERR(sbi->meta_inode);
565 goto free_sb_buf; 565 goto free_sb_buf;
566 } 566 }
567 567
568 err = get_valid_checkpoint(sbi); 568 err = get_valid_checkpoint(sbi);
569 if (err) { 569 if (err) {
570 f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint"); 570 f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");
571 goto free_meta_inode; 571 goto free_meta_inode;
572 } 572 }
573 573
574 /* sanity checking of checkpoint */ 574 /* sanity checking of checkpoint */
575 err = -EINVAL; 575 err = -EINVAL;
576 if (sanity_check_ckpt(sbi)) { 576 if (sanity_check_ckpt(sbi)) {
577 f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint"); 577 f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint");
578 goto free_cp; 578 goto free_cp;
579 } 579 }
580 580
581 sbi->total_valid_node_count = 581 sbi->total_valid_node_count =
582 le32_to_cpu(sbi->ckpt->valid_node_count); 582 le32_to_cpu(sbi->ckpt->valid_node_count);
583 sbi->total_valid_inode_count = 583 sbi->total_valid_inode_count =
584 le32_to_cpu(sbi->ckpt->valid_inode_count); 584 le32_to_cpu(sbi->ckpt->valid_inode_count);
585 sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count); 585 sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
586 sbi->total_valid_block_count = 586 sbi->total_valid_block_count =
587 le64_to_cpu(sbi->ckpt->valid_block_count); 587 le64_to_cpu(sbi->ckpt->valid_block_count);
588 sbi->last_valid_block_count = sbi->total_valid_block_count; 588 sbi->last_valid_block_count = sbi->total_valid_block_count;
589 sbi->alloc_valid_block_count = 0; 589 sbi->alloc_valid_block_count = 0;
590 INIT_LIST_HEAD(&sbi->dir_inode_list); 590 INIT_LIST_HEAD(&sbi->dir_inode_list);
591 spin_lock_init(&sbi->dir_inode_lock); 591 spin_lock_init(&sbi->dir_inode_lock);
592 592
593 init_orphan_info(sbi); 593 init_orphan_info(sbi);
594 594
595 /* setup f2fs internal modules */ 595 /* setup f2fs internal modules */
596 err = build_segment_manager(sbi); 596 err = build_segment_manager(sbi);
597 if (err) { 597 if (err) {
598 f2fs_msg(sb, KERN_ERR, 598 f2fs_msg(sb, KERN_ERR,
599 "Failed to initialize F2FS segment manager"); 599 "Failed to initialize F2FS segment manager");
600 goto free_sm; 600 goto free_sm;
601 } 601 }
602 err = build_node_manager(sbi); 602 err = build_node_manager(sbi);
603 if (err) { 603 if (err) {
604 f2fs_msg(sb, KERN_ERR, 604 f2fs_msg(sb, KERN_ERR,
605 "Failed to initialize F2FS node manager"); 605 "Failed to initialize F2FS node manager");
606 goto free_nm; 606 goto free_nm;
607 } 607 }
608 608
609 build_gc_manager(sbi); 609 build_gc_manager(sbi);
610 610
611 /* get an inode for node space */ 611 /* get an inode for node space */
612 sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi)); 612 sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
613 if (IS_ERR(sbi->node_inode)) { 613 if (IS_ERR(sbi->node_inode)) {
614 f2fs_msg(sb, KERN_ERR, "Failed to read node inode"); 614 f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
615 err = PTR_ERR(sbi->node_inode); 615 err = PTR_ERR(sbi->node_inode);
616 goto free_nm; 616 goto free_nm;
617 } 617 }
618 618
619 /* if there are nt orphan nodes free them */ 619 /* if there are nt orphan nodes free them */
620 err = -EINVAL; 620 err = -EINVAL;
621 if (recover_orphan_inodes(sbi)) 621 if (recover_orphan_inodes(sbi))
622 goto free_node_inode; 622 goto free_node_inode;
623 623
624 /* read root inode and dentry */ 624 /* read root inode and dentry */
625 root = f2fs_iget(sb, F2FS_ROOT_INO(sbi)); 625 root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
626 if (IS_ERR(root)) { 626 if (IS_ERR(root)) {
627 f2fs_msg(sb, KERN_ERR, "Failed to read root inode"); 627 f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
628 err = PTR_ERR(root); 628 err = PTR_ERR(root);
629 goto free_node_inode; 629 goto free_node_inode;
630 } 630 }
631 if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) 631 if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size)
632 goto free_root_inode; 632 goto free_root_inode;
633 633
634 sb->s_root = d_make_root(root); /* allocate root dentry */ 634 sb->s_root = d_make_root(root); /* allocate root dentry */
635 if (!sb->s_root) { 635 if (!sb->s_root) {
636 err = -ENOMEM; 636 err = -ENOMEM;
637 goto free_root_inode; 637 goto free_root_inode;
638 } 638 }
639 639
640 /* recover fsynced data */ 640 /* recover fsynced data */
641 if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) 641 if (!test_opt(sbi, DISABLE_ROLL_FORWARD))
642 recover_fsync_data(sbi); 642 recover_fsync_data(sbi);
643 643
644 /* After POR, we can run background GC thread */ 644 /* After POR, we can run background GC thread */
645 err = start_gc_thread(sbi); 645 err = start_gc_thread(sbi);
646 if (err) 646 if (err)
647 goto fail; 647 goto fail;
648 648
649 err = f2fs_build_stats(sbi); 649 err = f2fs_build_stats(sbi);
650 if (err) 650 if (err)
651 goto fail; 651 goto fail;
652 652
653 return 0; 653 return 0;
654 fail: 654 fail:
655 stop_gc_thread(sbi); 655 stop_gc_thread(sbi);
656 free_root_inode: 656 free_root_inode:
657 dput(sb->s_root); 657 dput(sb->s_root);
658 sb->s_root = NULL; 658 sb->s_root = NULL;
659 free_node_inode: 659 free_node_inode:
660 iput(sbi->node_inode); 660 iput(sbi->node_inode);
661 free_nm: 661 free_nm:
662 destroy_node_manager(sbi); 662 destroy_node_manager(sbi);
663 free_sm: 663 free_sm:
664 destroy_segment_manager(sbi); 664 destroy_segment_manager(sbi);
665 free_cp: 665 free_cp:
666 kfree(sbi->ckpt); 666 kfree(sbi->ckpt);
667 free_meta_inode: 667 free_meta_inode:
668 make_bad_inode(sbi->meta_inode); 668 make_bad_inode(sbi->meta_inode);
669 iput(sbi->meta_inode); 669 iput(sbi->meta_inode);
670 free_sb_buf: 670 free_sb_buf:
671 brelse(raw_super_buf); 671 brelse(raw_super_buf);
672 free_sbi: 672 free_sbi:
673 kfree(sbi); 673 kfree(sbi);
674 return err; 674 return err;
675 } 675 }
676 676
677 static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags, 677 static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
678 const char *dev_name, void *data) 678 const char *dev_name, void *data)
679 { 679 {
680 return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super); 680 return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
681 } 681 }
682 682
683 static struct file_system_type f2fs_fs_type = { 683 static struct file_system_type f2fs_fs_type = {
684 .owner = THIS_MODULE, 684 .owner = THIS_MODULE,
685 .name = "f2fs", 685 .name = "f2fs",
686 .mount = f2fs_mount, 686 .mount = f2fs_mount,
687 .kill_sb = kill_block_super, 687 .kill_sb = kill_block_super,
688 .fs_flags = FS_REQUIRES_DEV, 688 .fs_flags = FS_REQUIRES_DEV,
689 }; 689 };
690 690
691 static int __init init_inodecache(void) 691 static int __init init_inodecache(void)
692 { 692 {
693 f2fs_inode_cachep = f2fs_kmem_cache_create("f2fs_inode_cache", 693 f2fs_inode_cachep = f2fs_kmem_cache_create("f2fs_inode_cache",
694 sizeof(struct f2fs_inode_info), NULL); 694 sizeof(struct f2fs_inode_info), NULL);
695 if (f2fs_inode_cachep == NULL) 695 if (f2fs_inode_cachep == NULL)
696 return -ENOMEM; 696 return -ENOMEM;
697 return 0; 697 return 0;
698 } 698 }
699 699
700 static void destroy_inodecache(void) 700 static void destroy_inodecache(void)
701 { 701 {
702 /* 702 /*
703 * Make sure all delayed rcu free inodes are flushed before we 703 * Make sure all delayed rcu free inodes are flushed before we
704 * destroy cache. 704 * destroy cache.
705 */ 705 */
706 rcu_barrier(); 706 rcu_barrier();
707 kmem_cache_destroy(f2fs_inode_cachep); 707 kmem_cache_destroy(f2fs_inode_cachep);
708 } 708 }
709 709
710 static int __init init_f2fs_fs(void) 710 static int __init init_f2fs_fs(void)
711 { 711 {
712 int err; 712 int err;
713 713
714 err = init_inodecache(); 714 err = init_inodecache();
715 if (err) 715 if (err)
716 goto fail; 716 goto fail;
717 err = create_node_manager_caches(); 717 err = create_node_manager_caches();
718 if (err) 718 if (err)
719 goto fail; 719 goto fail;
720 err = create_gc_caches(); 720 err = create_gc_caches();
721 if (err) 721 if (err)
722 goto fail; 722 goto fail;
723 err = create_checkpoint_caches(); 723 err = create_checkpoint_caches();
724 if (err) 724 if (err)
725 goto fail; 725 goto fail;
726 err = register_filesystem(&f2fs_fs_type); 726 err = register_filesystem(&f2fs_fs_type);
727 if (err) 727 if (err)
728 goto fail; 728 goto fail;
729 f2fs_create_root_stats(); 729 f2fs_create_root_stats();
730 fail: 730 fail:
731 return err; 731 return err;
732 } 732 }
733 733
734 static void __exit exit_f2fs_fs(void) 734 static void __exit exit_f2fs_fs(void)
735 { 735 {
736 f2fs_destroy_root_stats(); 736 f2fs_destroy_root_stats();
737 unregister_filesystem(&f2fs_fs_type); 737 unregister_filesystem(&f2fs_fs_type);
738 destroy_checkpoint_caches(); 738 destroy_checkpoint_caches();
739 destroy_gc_caches(); 739 destroy_gc_caches();
740 destroy_node_manager_caches(); 740 destroy_node_manager_caches();
741 destroy_inodecache(); 741 destroy_inodecache();
742 } 742 }
743 743
744 module_init(init_f2fs_fs) 744 module_init(init_f2fs_fs)
745 module_exit(exit_f2fs_fs) 745 module_exit(exit_f2fs_fs)
746 746
747 MODULE_AUTHOR("Samsung Electronics's Praesto Team"); 747 MODULE_AUTHOR("Samsung Electronics's Praesto Team");
748 MODULE_DESCRIPTION("Flash Friendly File System"); 748 MODULE_DESCRIPTION("Flash Friendly File System");
749 MODULE_LICENSE("GPL"); 749 MODULE_LICENSE("GPL");
750 750