Commit 8707df38478c8e0958b706f0ea1cdf99d00a9469

Authored by Ryusuke Konishi
1 parent 79739565e1

nilfs2: separate read method of meta data files on super root block

Will displace nilfs_mdt_read_inode_direct function with an individual
read method: nilfs_dat_read, nilfs_sufile_read, nilfs_cpfile_read.

This provides the opportunity to initialize local variables of each
metadata file after reading the inode.

Signed-off-by: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>

Showing 7 changed files with 39 additions and 6 deletions Inline Diff

1 /* 1 /*
2 * cpfile.c - NILFS checkpoint file. 2 * cpfile.c - NILFS checkpoint file.
3 * 3 *
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or 8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version. 9 * (at your option) any later version.
10 * 10 *
11 * This program is distributed in the hope that it will be useful, 11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 * 15 *
16 * You should have received a copy of the GNU General Public License 16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software 17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 * 19 *
20 * Written by Koji Sato <koji@osrg.net>. 20 * Written by Koji Sato <koji@osrg.net>.
21 */ 21 */
22 22
23 #include <linux/kernel.h> 23 #include <linux/kernel.h>
24 #include <linux/fs.h> 24 #include <linux/fs.h>
25 #include <linux/string.h> 25 #include <linux/string.h>
26 #include <linux/buffer_head.h> 26 #include <linux/buffer_head.h>
27 #include <linux/errno.h> 27 #include <linux/errno.h>
28 #include <linux/nilfs2_fs.h> 28 #include <linux/nilfs2_fs.h>
29 #include "mdt.h" 29 #include "mdt.h"
30 #include "cpfile.h" 30 #include "cpfile.h"
31 31
32 32
33 static inline unsigned long 33 static inline unsigned long
34 nilfs_cpfile_checkpoints_per_block(const struct inode *cpfile) 34 nilfs_cpfile_checkpoints_per_block(const struct inode *cpfile)
35 { 35 {
36 return NILFS_MDT(cpfile)->mi_entries_per_block; 36 return NILFS_MDT(cpfile)->mi_entries_per_block;
37 } 37 }
38 38
39 /* block number from the beginning of the file */ 39 /* block number from the beginning of the file */
40 static unsigned long 40 static unsigned long
41 nilfs_cpfile_get_blkoff(const struct inode *cpfile, __u64 cno) 41 nilfs_cpfile_get_blkoff(const struct inode *cpfile, __u64 cno)
42 { 42 {
43 __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1; 43 __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1;
44 do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile)); 44 do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile));
45 return (unsigned long)tcno; 45 return (unsigned long)tcno;
46 } 46 }
47 47
48 /* offset in block */ 48 /* offset in block */
49 static unsigned long 49 static unsigned long
50 nilfs_cpfile_get_offset(const struct inode *cpfile, __u64 cno) 50 nilfs_cpfile_get_offset(const struct inode *cpfile, __u64 cno)
51 { 51 {
52 __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1; 52 __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1;
53 return do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile)); 53 return do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile));
54 } 54 }
55 55
56 static unsigned long 56 static unsigned long
57 nilfs_cpfile_checkpoints_in_block(const struct inode *cpfile, 57 nilfs_cpfile_checkpoints_in_block(const struct inode *cpfile,
58 __u64 curr, 58 __u64 curr,
59 __u64 max) 59 __u64 max)
60 { 60 {
61 return min_t(__u64, 61 return min_t(__u64,
62 nilfs_cpfile_checkpoints_per_block(cpfile) - 62 nilfs_cpfile_checkpoints_per_block(cpfile) -
63 nilfs_cpfile_get_offset(cpfile, curr), 63 nilfs_cpfile_get_offset(cpfile, curr),
64 max - curr); 64 max - curr);
65 } 65 }
66 66
67 static inline int nilfs_cpfile_is_in_first(const struct inode *cpfile, 67 static inline int nilfs_cpfile_is_in_first(const struct inode *cpfile,
68 __u64 cno) 68 __u64 cno)
69 { 69 {
70 return nilfs_cpfile_get_blkoff(cpfile, cno) == 0; 70 return nilfs_cpfile_get_blkoff(cpfile, cno) == 0;
71 } 71 }
72 72
73 static unsigned int 73 static unsigned int
74 nilfs_cpfile_block_add_valid_checkpoints(const struct inode *cpfile, 74 nilfs_cpfile_block_add_valid_checkpoints(const struct inode *cpfile,
75 struct buffer_head *bh, 75 struct buffer_head *bh,
76 void *kaddr, 76 void *kaddr,
77 unsigned int n) 77 unsigned int n)
78 { 78 {
79 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh); 79 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
80 unsigned int count; 80 unsigned int count;
81 81
82 count = le32_to_cpu(cp->cp_checkpoints_count) + n; 82 count = le32_to_cpu(cp->cp_checkpoints_count) + n;
83 cp->cp_checkpoints_count = cpu_to_le32(count); 83 cp->cp_checkpoints_count = cpu_to_le32(count);
84 return count; 84 return count;
85 } 85 }
86 86
87 static unsigned int 87 static unsigned int
88 nilfs_cpfile_block_sub_valid_checkpoints(const struct inode *cpfile, 88 nilfs_cpfile_block_sub_valid_checkpoints(const struct inode *cpfile,
89 struct buffer_head *bh, 89 struct buffer_head *bh,
90 void *kaddr, 90 void *kaddr,
91 unsigned int n) 91 unsigned int n)
92 { 92 {
93 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh); 93 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
94 unsigned int count; 94 unsigned int count;
95 95
96 WARN_ON(le32_to_cpu(cp->cp_checkpoints_count) < n); 96 WARN_ON(le32_to_cpu(cp->cp_checkpoints_count) < n);
97 count = le32_to_cpu(cp->cp_checkpoints_count) - n; 97 count = le32_to_cpu(cp->cp_checkpoints_count) - n;
98 cp->cp_checkpoints_count = cpu_to_le32(count); 98 cp->cp_checkpoints_count = cpu_to_le32(count);
99 return count; 99 return count;
100 } 100 }
101 101
102 static inline struct nilfs_cpfile_header * 102 static inline struct nilfs_cpfile_header *
103 nilfs_cpfile_block_get_header(const struct inode *cpfile, 103 nilfs_cpfile_block_get_header(const struct inode *cpfile,
104 struct buffer_head *bh, 104 struct buffer_head *bh,
105 void *kaddr) 105 void *kaddr)
106 { 106 {
107 return kaddr + bh_offset(bh); 107 return kaddr + bh_offset(bh);
108 } 108 }
109 109
110 static struct nilfs_checkpoint * 110 static struct nilfs_checkpoint *
111 nilfs_cpfile_block_get_checkpoint(const struct inode *cpfile, __u64 cno, 111 nilfs_cpfile_block_get_checkpoint(const struct inode *cpfile, __u64 cno,
112 struct buffer_head *bh, 112 struct buffer_head *bh,
113 void *kaddr) 113 void *kaddr)
114 { 114 {
115 return kaddr + bh_offset(bh) + nilfs_cpfile_get_offset(cpfile, cno) * 115 return kaddr + bh_offset(bh) + nilfs_cpfile_get_offset(cpfile, cno) *
116 NILFS_MDT(cpfile)->mi_entry_size; 116 NILFS_MDT(cpfile)->mi_entry_size;
117 } 117 }
118 118
119 static void nilfs_cpfile_block_init(struct inode *cpfile, 119 static void nilfs_cpfile_block_init(struct inode *cpfile,
120 struct buffer_head *bh, 120 struct buffer_head *bh,
121 void *kaddr) 121 void *kaddr)
122 { 122 {
123 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh); 123 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
124 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size; 124 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
125 int n = nilfs_cpfile_checkpoints_per_block(cpfile); 125 int n = nilfs_cpfile_checkpoints_per_block(cpfile);
126 126
127 while (n-- > 0) { 127 while (n-- > 0) {
128 nilfs_checkpoint_set_invalid(cp); 128 nilfs_checkpoint_set_invalid(cp);
129 cp = (void *)cp + cpsz; 129 cp = (void *)cp + cpsz;
130 } 130 }
131 } 131 }
132 132
133 static inline int nilfs_cpfile_get_header_block(struct inode *cpfile, 133 static inline int nilfs_cpfile_get_header_block(struct inode *cpfile,
134 struct buffer_head **bhp) 134 struct buffer_head **bhp)
135 { 135 {
136 return nilfs_mdt_get_block(cpfile, 0, 0, NULL, bhp); 136 return nilfs_mdt_get_block(cpfile, 0, 0, NULL, bhp);
137 } 137 }
138 138
139 static inline int nilfs_cpfile_get_checkpoint_block(struct inode *cpfile, 139 static inline int nilfs_cpfile_get_checkpoint_block(struct inode *cpfile,
140 __u64 cno, 140 __u64 cno,
141 int create, 141 int create,
142 struct buffer_head **bhp) 142 struct buffer_head **bhp)
143 { 143 {
144 return nilfs_mdt_get_block(cpfile, 144 return nilfs_mdt_get_block(cpfile,
145 nilfs_cpfile_get_blkoff(cpfile, cno), 145 nilfs_cpfile_get_blkoff(cpfile, cno),
146 create, nilfs_cpfile_block_init, bhp); 146 create, nilfs_cpfile_block_init, bhp);
147 } 147 }
148 148
149 static inline int nilfs_cpfile_delete_checkpoint_block(struct inode *cpfile, 149 static inline int nilfs_cpfile_delete_checkpoint_block(struct inode *cpfile,
150 __u64 cno) 150 __u64 cno)
151 { 151 {
152 return nilfs_mdt_delete_block(cpfile, 152 return nilfs_mdt_delete_block(cpfile,
153 nilfs_cpfile_get_blkoff(cpfile, cno)); 153 nilfs_cpfile_get_blkoff(cpfile, cno));
154 } 154 }
155 155
156 /** 156 /**
157 * nilfs_cpfile_get_checkpoint - get a checkpoint 157 * nilfs_cpfile_get_checkpoint - get a checkpoint
158 * @cpfile: inode of checkpoint file 158 * @cpfile: inode of checkpoint file
159 * @cno: checkpoint number 159 * @cno: checkpoint number
160 * @create: create flag 160 * @create: create flag
161 * @cpp: pointer to a checkpoint 161 * @cpp: pointer to a checkpoint
162 * @bhp: pointer to a buffer head 162 * @bhp: pointer to a buffer head
163 * 163 *
164 * Description: nilfs_cpfile_get_checkpoint() acquires the checkpoint 164 * Description: nilfs_cpfile_get_checkpoint() acquires the checkpoint
165 * specified by @cno. A new checkpoint will be created if @cno is the current 165 * specified by @cno. A new checkpoint will be created if @cno is the current
166 * checkpoint number and @create is nonzero. 166 * checkpoint number and @create is nonzero.
167 * 167 *
168 * Return Value: On success, 0 is returned, and the checkpoint and the 168 * Return Value: On success, 0 is returned, and the checkpoint and the
169 * buffer head of the buffer on which the checkpoint is located are stored in 169 * buffer head of the buffer on which the checkpoint is located are stored in
170 * the place pointed by @cpp and @bhp, respectively. On error, one of the 170 * the place pointed by @cpp and @bhp, respectively. On error, one of the
171 * following negative error codes is returned. 171 * following negative error codes is returned.
172 * 172 *
173 * %-EIO - I/O error. 173 * %-EIO - I/O error.
174 * 174 *
175 * %-ENOMEM - Insufficient amount of memory available. 175 * %-ENOMEM - Insufficient amount of memory available.
176 * 176 *
177 * %-ENOENT - No such checkpoint. 177 * %-ENOENT - No such checkpoint.
178 * 178 *
179 * %-EINVAL - invalid checkpoint. 179 * %-EINVAL - invalid checkpoint.
180 */ 180 */
181 int nilfs_cpfile_get_checkpoint(struct inode *cpfile, 181 int nilfs_cpfile_get_checkpoint(struct inode *cpfile,
182 __u64 cno, 182 __u64 cno,
183 int create, 183 int create,
184 struct nilfs_checkpoint **cpp, 184 struct nilfs_checkpoint **cpp,
185 struct buffer_head **bhp) 185 struct buffer_head **bhp)
186 { 186 {
187 struct buffer_head *header_bh, *cp_bh; 187 struct buffer_head *header_bh, *cp_bh;
188 struct nilfs_cpfile_header *header; 188 struct nilfs_cpfile_header *header;
189 struct nilfs_checkpoint *cp; 189 struct nilfs_checkpoint *cp;
190 void *kaddr; 190 void *kaddr;
191 int ret; 191 int ret;
192 192
193 if (unlikely(cno < 1 || cno > nilfs_mdt_cno(cpfile) || 193 if (unlikely(cno < 1 || cno > nilfs_mdt_cno(cpfile) ||
194 (cno < nilfs_mdt_cno(cpfile) && create))) 194 (cno < nilfs_mdt_cno(cpfile) && create)))
195 return -EINVAL; 195 return -EINVAL;
196 196
197 down_write(&NILFS_MDT(cpfile)->mi_sem); 197 down_write(&NILFS_MDT(cpfile)->mi_sem);
198 198
199 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh); 199 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
200 if (ret < 0) 200 if (ret < 0)
201 goto out_sem; 201 goto out_sem;
202 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, create, &cp_bh); 202 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, create, &cp_bh);
203 if (ret < 0) 203 if (ret < 0)
204 goto out_header; 204 goto out_header;
205 kaddr = kmap(cp_bh->b_page); 205 kaddr = kmap(cp_bh->b_page);
206 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); 206 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
207 if (nilfs_checkpoint_invalid(cp)) { 207 if (nilfs_checkpoint_invalid(cp)) {
208 if (!create) { 208 if (!create) {
209 kunmap(cp_bh->b_page); 209 kunmap(cp_bh->b_page);
210 brelse(cp_bh); 210 brelse(cp_bh);
211 ret = -ENOENT; 211 ret = -ENOENT;
212 goto out_header; 212 goto out_header;
213 } 213 }
214 /* a newly-created checkpoint */ 214 /* a newly-created checkpoint */
215 nilfs_checkpoint_clear_invalid(cp); 215 nilfs_checkpoint_clear_invalid(cp);
216 if (!nilfs_cpfile_is_in_first(cpfile, cno)) 216 if (!nilfs_cpfile_is_in_first(cpfile, cno))
217 nilfs_cpfile_block_add_valid_checkpoints(cpfile, cp_bh, 217 nilfs_cpfile_block_add_valid_checkpoints(cpfile, cp_bh,
218 kaddr, 1); 218 kaddr, 1);
219 nilfs_mdt_mark_buffer_dirty(cp_bh); 219 nilfs_mdt_mark_buffer_dirty(cp_bh);
220 220
221 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 221 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
222 header = nilfs_cpfile_block_get_header(cpfile, header_bh, 222 header = nilfs_cpfile_block_get_header(cpfile, header_bh,
223 kaddr); 223 kaddr);
224 le64_add_cpu(&header->ch_ncheckpoints, 1); 224 le64_add_cpu(&header->ch_ncheckpoints, 1);
225 kunmap_atomic(kaddr, KM_USER0); 225 kunmap_atomic(kaddr, KM_USER0);
226 nilfs_mdt_mark_buffer_dirty(header_bh); 226 nilfs_mdt_mark_buffer_dirty(header_bh);
227 nilfs_mdt_mark_dirty(cpfile); 227 nilfs_mdt_mark_dirty(cpfile);
228 } 228 }
229 229
230 if (cpp != NULL) 230 if (cpp != NULL)
231 *cpp = cp; 231 *cpp = cp;
232 *bhp = cp_bh; 232 *bhp = cp_bh;
233 233
234 out_header: 234 out_header:
235 brelse(header_bh); 235 brelse(header_bh);
236 236
237 out_sem: 237 out_sem:
238 up_write(&NILFS_MDT(cpfile)->mi_sem); 238 up_write(&NILFS_MDT(cpfile)->mi_sem);
239 return ret; 239 return ret;
240 } 240 }
241 241
242 /** 242 /**
243 * nilfs_cpfile_put_checkpoint - put a checkpoint 243 * nilfs_cpfile_put_checkpoint - put a checkpoint
244 * @cpfile: inode of checkpoint file 244 * @cpfile: inode of checkpoint file
245 * @cno: checkpoint number 245 * @cno: checkpoint number
246 * @bh: buffer head 246 * @bh: buffer head
247 * 247 *
248 * Description: nilfs_cpfile_put_checkpoint() releases the checkpoint 248 * Description: nilfs_cpfile_put_checkpoint() releases the checkpoint
249 * specified by @cno. @bh must be the buffer head which has been returned by 249 * specified by @cno. @bh must be the buffer head which has been returned by
250 * a previous call to nilfs_cpfile_get_checkpoint() with @cno. 250 * a previous call to nilfs_cpfile_get_checkpoint() with @cno.
251 */ 251 */
252 void nilfs_cpfile_put_checkpoint(struct inode *cpfile, __u64 cno, 252 void nilfs_cpfile_put_checkpoint(struct inode *cpfile, __u64 cno,
253 struct buffer_head *bh) 253 struct buffer_head *bh)
254 { 254 {
255 kunmap(bh->b_page); 255 kunmap(bh->b_page);
256 brelse(bh); 256 brelse(bh);
257 } 257 }
258 258
259 /** 259 /**
260 * nilfs_cpfile_delete_checkpoints - delete checkpoints 260 * nilfs_cpfile_delete_checkpoints - delete checkpoints
261 * @cpfile: inode of checkpoint file 261 * @cpfile: inode of checkpoint file
262 * @start: start checkpoint number 262 * @start: start checkpoint number
263 * @end: end checkpoint numer 263 * @end: end checkpoint numer
264 * 264 *
265 * Description: nilfs_cpfile_delete_checkpoints() deletes the checkpoints in 265 * Description: nilfs_cpfile_delete_checkpoints() deletes the checkpoints in
266 * the period from @start to @end, excluding @end itself. The checkpoints 266 * the period from @start to @end, excluding @end itself. The checkpoints
267 * which have been already deleted are ignored. 267 * which have been already deleted are ignored.
268 * 268 *
269 * Return Value: On success, 0 is returned. On error, one of the following 269 * Return Value: On success, 0 is returned. On error, one of the following
270 * negative error codes is returned. 270 * negative error codes is returned.
271 * 271 *
272 * %-EIO - I/O error. 272 * %-EIO - I/O error.
273 * 273 *
274 * %-ENOMEM - Insufficient amount of memory available. 274 * %-ENOMEM - Insufficient amount of memory available.
275 * 275 *
276 * %-EINVAL - invalid checkpoints. 276 * %-EINVAL - invalid checkpoints.
277 */ 277 */
278 int nilfs_cpfile_delete_checkpoints(struct inode *cpfile, 278 int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
279 __u64 start, 279 __u64 start,
280 __u64 end) 280 __u64 end)
281 { 281 {
282 struct buffer_head *header_bh, *cp_bh; 282 struct buffer_head *header_bh, *cp_bh;
283 struct nilfs_cpfile_header *header; 283 struct nilfs_cpfile_header *header;
284 struct nilfs_checkpoint *cp; 284 struct nilfs_checkpoint *cp;
285 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size; 285 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
286 __u64 cno; 286 __u64 cno;
287 void *kaddr; 287 void *kaddr;
288 unsigned long tnicps; 288 unsigned long tnicps;
289 int ret, ncps, nicps, count, i; 289 int ret, ncps, nicps, count, i;
290 290
291 if (unlikely(start == 0 || start > end)) { 291 if (unlikely(start == 0 || start > end)) {
292 printk(KERN_ERR "%s: invalid range of checkpoint numbers: " 292 printk(KERN_ERR "%s: invalid range of checkpoint numbers: "
293 "[%llu, %llu)\n", __func__, 293 "[%llu, %llu)\n", __func__,
294 (unsigned long long)start, (unsigned long long)end); 294 (unsigned long long)start, (unsigned long long)end);
295 return -EINVAL; 295 return -EINVAL;
296 } 296 }
297 297
298 down_write(&NILFS_MDT(cpfile)->mi_sem); 298 down_write(&NILFS_MDT(cpfile)->mi_sem);
299 299
300 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh); 300 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
301 if (ret < 0) 301 if (ret < 0)
302 goto out_sem; 302 goto out_sem;
303 tnicps = 0; 303 tnicps = 0;
304 304
305 for (cno = start; cno < end; cno += ncps) { 305 for (cno = start; cno < end; cno += ncps) {
306 ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, end); 306 ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, end);
307 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh); 307 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
308 if (ret < 0) { 308 if (ret < 0) {
309 if (ret != -ENOENT) 309 if (ret != -ENOENT)
310 break; 310 break;
311 /* skip hole */ 311 /* skip hole */
312 ret = 0; 312 ret = 0;
313 continue; 313 continue;
314 } 314 }
315 315
316 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0); 316 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
317 cp = nilfs_cpfile_block_get_checkpoint( 317 cp = nilfs_cpfile_block_get_checkpoint(
318 cpfile, cno, cp_bh, kaddr); 318 cpfile, cno, cp_bh, kaddr);
319 nicps = 0; 319 nicps = 0;
320 for (i = 0; i < ncps; i++, cp = (void *)cp + cpsz) { 320 for (i = 0; i < ncps; i++, cp = (void *)cp + cpsz) {
321 WARN_ON(nilfs_checkpoint_snapshot(cp)); 321 WARN_ON(nilfs_checkpoint_snapshot(cp));
322 if (!nilfs_checkpoint_invalid(cp)) { 322 if (!nilfs_checkpoint_invalid(cp)) {
323 nilfs_checkpoint_set_invalid(cp); 323 nilfs_checkpoint_set_invalid(cp);
324 nicps++; 324 nicps++;
325 } 325 }
326 } 326 }
327 if (nicps > 0) { 327 if (nicps > 0) {
328 tnicps += nicps; 328 tnicps += nicps;
329 nilfs_mdt_mark_buffer_dirty(cp_bh); 329 nilfs_mdt_mark_buffer_dirty(cp_bh);
330 nilfs_mdt_mark_dirty(cpfile); 330 nilfs_mdt_mark_dirty(cpfile);
331 if (!nilfs_cpfile_is_in_first(cpfile, cno) && 331 if (!nilfs_cpfile_is_in_first(cpfile, cno) &&
332 (count = nilfs_cpfile_block_sub_valid_checkpoints( 332 (count = nilfs_cpfile_block_sub_valid_checkpoints(
333 cpfile, cp_bh, kaddr, nicps)) == 0) { 333 cpfile, cp_bh, kaddr, nicps)) == 0) {
334 /* make hole */ 334 /* make hole */
335 kunmap_atomic(kaddr, KM_USER0); 335 kunmap_atomic(kaddr, KM_USER0);
336 brelse(cp_bh); 336 brelse(cp_bh);
337 ret = nilfs_cpfile_delete_checkpoint_block( 337 ret = nilfs_cpfile_delete_checkpoint_block(
338 cpfile, cno); 338 cpfile, cno);
339 if (ret == 0) 339 if (ret == 0)
340 continue; 340 continue;
341 printk(KERN_ERR "%s: cannot delete block\n", 341 printk(KERN_ERR "%s: cannot delete block\n",
342 __func__); 342 __func__);
343 break; 343 break;
344 } 344 }
345 } 345 }
346 346
347 kunmap_atomic(kaddr, KM_USER0); 347 kunmap_atomic(kaddr, KM_USER0);
348 brelse(cp_bh); 348 brelse(cp_bh);
349 } 349 }
350 350
351 if (tnicps > 0) { 351 if (tnicps > 0) {
352 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 352 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
353 header = nilfs_cpfile_block_get_header(cpfile, header_bh, 353 header = nilfs_cpfile_block_get_header(cpfile, header_bh,
354 kaddr); 354 kaddr);
355 le64_add_cpu(&header->ch_ncheckpoints, -(u64)tnicps); 355 le64_add_cpu(&header->ch_ncheckpoints, -(u64)tnicps);
356 nilfs_mdt_mark_buffer_dirty(header_bh); 356 nilfs_mdt_mark_buffer_dirty(header_bh);
357 nilfs_mdt_mark_dirty(cpfile); 357 nilfs_mdt_mark_dirty(cpfile);
358 kunmap_atomic(kaddr, KM_USER0); 358 kunmap_atomic(kaddr, KM_USER0);
359 } 359 }
360 360
361 brelse(header_bh); 361 brelse(header_bh);
362 362
363 out_sem: 363 out_sem:
364 up_write(&NILFS_MDT(cpfile)->mi_sem); 364 up_write(&NILFS_MDT(cpfile)->mi_sem);
365 return ret; 365 return ret;
366 } 366 }
367 367
368 static void nilfs_cpfile_checkpoint_to_cpinfo(struct inode *cpfile, 368 static void nilfs_cpfile_checkpoint_to_cpinfo(struct inode *cpfile,
369 struct nilfs_checkpoint *cp, 369 struct nilfs_checkpoint *cp,
370 struct nilfs_cpinfo *ci) 370 struct nilfs_cpinfo *ci)
371 { 371 {
372 ci->ci_flags = le32_to_cpu(cp->cp_flags); 372 ci->ci_flags = le32_to_cpu(cp->cp_flags);
373 ci->ci_cno = le64_to_cpu(cp->cp_cno); 373 ci->ci_cno = le64_to_cpu(cp->cp_cno);
374 ci->ci_create = le64_to_cpu(cp->cp_create); 374 ci->ci_create = le64_to_cpu(cp->cp_create);
375 ci->ci_nblk_inc = le64_to_cpu(cp->cp_nblk_inc); 375 ci->ci_nblk_inc = le64_to_cpu(cp->cp_nblk_inc);
376 ci->ci_inodes_count = le64_to_cpu(cp->cp_inodes_count); 376 ci->ci_inodes_count = le64_to_cpu(cp->cp_inodes_count);
377 ci->ci_blocks_count = le64_to_cpu(cp->cp_blocks_count); 377 ci->ci_blocks_count = le64_to_cpu(cp->cp_blocks_count);
378 ci->ci_next = le64_to_cpu(cp->cp_snapshot_list.ssl_next); 378 ci->ci_next = le64_to_cpu(cp->cp_snapshot_list.ssl_next);
379 } 379 }
380 380
381 static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop, 381 static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop,
382 void *buf, unsigned cisz, size_t nci) 382 void *buf, unsigned cisz, size_t nci)
383 { 383 {
384 struct nilfs_checkpoint *cp; 384 struct nilfs_checkpoint *cp;
385 struct nilfs_cpinfo *ci = buf; 385 struct nilfs_cpinfo *ci = buf;
386 struct buffer_head *bh; 386 struct buffer_head *bh;
387 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size; 387 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
388 __u64 cur_cno = nilfs_mdt_cno(cpfile), cno = *cnop; 388 __u64 cur_cno = nilfs_mdt_cno(cpfile), cno = *cnop;
389 void *kaddr; 389 void *kaddr;
390 int n, ret; 390 int n, ret;
391 int ncps, i; 391 int ncps, i;
392 392
393 if (cno == 0) 393 if (cno == 0)
394 return -ENOENT; /* checkpoint number 0 is invalid */ 394 return -ENOENT; /* checkpoint number 0 is invalid */
395 down_read(&NILFS_MDT(cpfile)->mi_sem); 395 down_read(&NILFS_MDT(cpfile)->mi_sem);
396 396
397 for (n = 0; cno < cur_cno && n < nci; cno += ncps) { 397 for (n = 0; cno < cur_cno && n < nci; cno += ncps) {
398 ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, cur_cno); 398 ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, cur_cno);
399 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh); 399 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh);
400 if (ret < 0) { 400 if (ret < 0) {
401 if (ret != -ENOENT) 401 if (ret != -ENOENT)
402 goto out; 402 goto out;
403 continue; /* skip hole */ 403 continue; /* skip hole */
404 } 404 }
405 405
406 kaddr = kmap_atomic(bh->b_page, KM_USER0); 406 kaddr = kmap_atomic(bh->b_page, KM_USER0);
407 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr); 407 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
408 for (i = 0; i < ncps && n < nci; i++, cp = (void *)cp + cpsz) { 408 for (i = 0; i < ncps && n < nci; i++, cp = (void *)cp + cpsz) {
409 if (!nilfs_checkpoint_invalid(cp)) { 409 if (!nilfs_checkpoint_invalid(cp)) {
410 nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp, 410 nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp,
411 ci); 411 ci);
412 ci = (void *)ci + cisz; 412 ci = (void *)ci + cisz;
413 n++; 413 n++;
414 } 414 }
415 } 415 }
416 kunmap_atomic(kaddr, KM_USER0); 416 kunmap_atomic(kaddr, KM_USER0);
417 brelse(bh); 417 brelse(bh);
418 } 418 }
419 419
420 ret = n; 420 ret = n;
421 if (n > 0) { 421 if (n > 0) {
422 ci = (void *)ci - cisz; 422 ci = (void *)ci - cisz;
423 *cnop = ci->ci_cno + 1; 423 *cnop = ci->ci_cno + 1;
424 } 424 }
425 425
426 out: 426 out:
427 up_read(&NILFS_MDT(cpfile)->mi_sem); 427 up_read(&NILFS_MDT(cpfile)->mi_sem);
428 return ret; 428 return ret;
429 } 429 }
430 430
431 static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop, 431 static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
432 void *buf, unsigned cisz, size_t nci) 432 void *buf, unsigned cisz, size_t nci)
433 { 433 {
434 struct buffer_head *bh; 434 struct buffer_head *bh;
435 struct nilfs_cpfile_header *header; 435 struct nilfs_cpfile_header *header;
436 struct nilfs_checkpoint *cp; 436 struct nilfs_checkpoint *cp;
437 struct nilfs_cpinfo *ci = buf; 437 struct nilfs_cpinfo *ci = buf;
438 __u64 curr = *cnop, next; 438 __u64 curr = *cnop, next;
439 unsigned long curr_blkoff, next_blkoff; 439 unsigned long curr_blkoff, next_blkoff;
440 void *kaddr; 440 void *kaddr;
441 int n = 0, ret; 441 int n = 0, ret;
442 442
443 down_read(&NILFS_MDT(cpfile)->mi_sem); 443 down_read(&NILFS_MDT(cpfile)->mi_sem);
444 444
445 if (curr == 0) { 445 if (curr == 0) {
446 ret = nilfs_cpfile_get_header_block(cpfile, &bh); 446 ret = nilfs_cpfile_get_header_block(cpfile, &bh);
447 if (ret < 0) 447 if (ret < 0)
448 goto out; 448 goto out;
449 kaddr = kmap_atomic(bh->b_page, KM_USER0); 449 kaddr = kmap_atomic(bh->b_page, KM_USER0);
450 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr); 450 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
451 curr = le64_to_cpu(header->ch_snapshot_list.ssl_next); 451 curr = le64_to_cpu(header->ch_snapshot_list.ssl_next);
452 kunmap_atomic(kaddr, KM_USER0); 452 kunmap_atomic(kaddr, KM_USER0);
453 brelse(bh); 453 brelse(bh);
454 if (curr == 0) { 454 if (curr == 0) {
455 ret = 0; 455 ret = 0;
456 goto out; 456 goto out;
457 } 457 }
458 } else if (unlikely(curr == ~(__u64)0)) { 458 } else if (unlikely(curr == ~(__u64)0)) {
459 ret = 0; 459 ret = 0;
460 goto out; 460 goto out;
461 } 461 }
462 462
463 curr_blkoff = nilfs_cpfile_get_blkoff(cpfile, curr); 463 curr_blkoff = nilfs_cpfile_get_blkoff(cpfile, curr);
464 ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr, 0, &bh); 464 ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr, 0, &bh);
465 if (unlikely(ret < 0)) { 465 if (unlikely(ret < 0)) {
466 if (ret == -ENOENT) 466 if (ret == -ENOENT)
467 ret = 0; /* No snapshots (started from a hole block) */ 467 ret = 0; /* No snapshots (started from a hole block) */
468 goto out; 468 goto out;
469 } 469 }
470 kaddr = kmap_atomic(bh->b_page, KM_USER0); 470 kaddr = kmap_atomic(bh->b_page, KM_USER0);
471 while (n < nci) { 471 while (n < nci) {
472 cp = nilfs_cpfile_block_get_checkpoint(cpfile, curr, bh, kaddr); 472 cp = nilfs_cpfile_block_get_checkpoint(cpfile, curr, bh, kaddr);
473 curr = ~(__u64)0; /* Terminator */ 473 curr = ~(__u64)0; /* Terminator */
474 if (unlikely(nilfs_checkpoint_invalid(cp) || 474 if (unlikely(nilfs_checkpoint_invalid(cp) ||
475 !nilfs_checkpoint_snapshot(cp))) 475 !nilfs_checkpoint_snapshot(cp)))
476 break; 476 break;
477 nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp, ci); 477 nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp, ci);
478 ci = (void *)ci + cisz; 478 ci = (void *)ci + cisz;
479 n++; 479 n++;
480 next = le64_to_cpu(cp->cp_snapshot_list.ssl_next); 480 next = le64_to_cpu(cp->cp_snapshot_list.ssl_next);
481 if (next == 0) 481 if (next == 0)
482 break; /* reach end of the snapshot list */ 482 break; /* reach end of the snapshot list */
483 483
484 next_blkoff = nilfs_cpfile_get_blkoff(cpfile, next); 484 next_blkoff = nilfs_cpfile_get_blkoff(cpfile, next);
485 if (curr_blkoff != next_blkoff) { 485 if (curr_blkoff != next_blkoff) {
486 kunmap_atomic(kaddr, KM_USER0); 486 kunmap_atomic(kaddr, KM_USER0);
487 brelse(bh); 487 brelse(bh);
488 ret = nilfs_cpfile_get_checkpoint_block(cpfile, next, 488 ret = nilfs_cpfile_get_checkpoint_block(cpfile, next,
489 0, &bh); 489 0, &bh);
490 if (unlikely(ret < 0)) { 490 if (unlikely(ret < 0)) {
491 WARN_ON(ret == -ENOENT); 491 WARN_ON(ret == -ENOENT);
492 goto out; 492 goto out;
493 } 493 }
494 kaddr = kmap_atomic(bh->b_page, KM_USER0); 494 kaddr = kmap_atomic(bh->b_page, KM_USER0);
495 } 495 }
496 curr = next; 496 curr = next;
497 curr_blkoff = next_blkoff; 497 curr_blkoff = next_blkoff;
498 } 498 }
499 kunmap_atomic(kaddr, KM_USER0); 499 kunmap_atomic(kaddr, KM_USER0);
500 brelse(bh); 500 brelse(bh);
501 *cnop = curr; 501 *cnop = curr;
502 ret = n; 502 ret = n;
503 503
504 out: 504 out:
505 up_read(&NILFS_MDT(cpfile)->mi_sem); 505 up_read(&NILFS_MDT(cpfile)->mi_sem);
506 return ret; 506 return ret;
507 } 507 }
508 508
509 /** 509 /**
510 * nilfs_cpfile_get_cpinfo - 510 * nilfs_cpfile_get_cpinfo -
511 * @cpfile: 511 * @cpfile:
512 * @cno: 512 * @cno:
513 * @ci: 513 * @ci:
514 * @nci: 514 * @nci:
515 */ 515 */
516 516
517 ssize_t nilfs_cpfile_get_cpinfo(struct inode *cpfile, __u64 *cnop, int mode, 517 ssize_t nilfs_cpfile_get_cpinfo(struct inode *cpfile, __u64 *cnop, int mode,
518 void *buf, unsigned cisz, size_t nci) 518 void *buf, unsigned cisz, size_t nci)
519 { 519 {
520 switch (mode) { 520 switch (mode) {
521 case NILFS_CHECKPOINT: 521 case NILFS_CHECKPOINT:
522 return nilfs_cpfile_do_get_cpinfo(cpfile, cnop, buf, cisz, nci); 522 return nilfs_cpfile_do_get_cpinfo(cpfile, cnop, buf, cisz, nci);
523 case NILFS_SNAPSHOT: 523 case NILFS_SNAPSHOT:
524 return nilfs_cpfile_do_get_ssinfo(cpfile, cnop, buf, cisz, nci); 524 return nilfs_cpfile_do_get_ssinfo(cpfile, cnop, buf, cisz, nci);
525 default: 525 default:
526 return -EINVAL; 526 return -EINVAL;
527 } 527 }
528 } 528 }
529 529
530 /** 530 /**
531 * nilfs_cpfile_delete_checkpoint - 531 * nilfs_cpfile_delete_checkpoint -
532 * @cpfile: 532 * @cpfile:
533 * @cno: 533 * @cno:
534 */ 534 */
535 int nilfs_cpfile_delete_checkpoint(struct inode *cpfile, __u64 cno) 535 int nilfs_cpfile_delete_checkpoint(struct inode *cpfile, __u64 cno)
536 { 536 {
537 struct nilfs_cpinfo ci; 537 struct nilfs_cpinfo ci;
538 __u64 tcno = cno; 538 __u64 tcno = cno;
539 ssize_t nci; 539 ssize_t nci;
540 540
541 nci = nilfs_cpfile_do_get_cpinfo(cpfile, &tcno, &ci, sizeof(ci), 1); 541 nci = nilfs_cpfile_do_get_cpinfo(cpfile, &tcno, &ci, sizeof(ci), 1);
542 if (nci < 0) 542 if (nci < 0)
543 return nci; 543 return nci;
544 else if (nci == 0 || ci.ci_cno != cno) 544 else if (nci == 0 || ci.ci_cno != cno)
545 return -ENOENT; 545 return -ENOENT;
546 else if (nilfs_cpinfo_snapshot(&ci)) 546 else if (nilfs_cpinfo_snapshot(&ci))
547 return -EBUSY; 547 return -EBUSY;
548 548
549 return nilfs_cpfile_delete_checkpoints(cpfile, cno, cno + 1); 549 return nilfs_cpfile_delete_checkpoints(cpfile, cno, cno + 1);
550 } 550 }
551 551
552 static struct nilfs_snapshot_list * 552 static struct nilfs_snapshot_list *
553 nilfs_cpfile_block_get_snapshot_list(const struct inode *cpfile, 553 nilfs_cpfile_block_get_snapshot_list(const struct inode *cpfile,
554 __u64 cno, 554 __u64 cno,
555 struct buffer_head *bh, 555 struct buffer_head *bh,
556 void *kaddr) 556 void *kaddr)
557 { 557 {
558 struct nilfs_cpfile_header *header; 558 struct nilfs_cpfile_header *header;
559 struct nilfs_checkpoint *cp; 559 struct nilfs_checkpoint *cp;
560 struct nilfs_snapshot_list *list; 560 struct nilfs_snapshot_list *list;
561 561
562 if (cno != 0) { 562 if (cno != 0) {
563 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr); 563 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
564 list = &cp->cp_snapshot_list; 564 list = &cp->cp_snapshot_list;
565 } else { 565 } else {
566 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr); 566 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
567 list = &header->ch_snapshot_list; 567 list = &header->ch_snapshot_list;
568 } 568 }
569 return list; 569 return list;
570 } 570 }
571 571
572 static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno) 572 static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
573 { 573 {
574 struct buffer_head *header_bh, *curr_bh, *prev_bh, *cp_bh; 574 struct buffer_head *header_bh, *curr_bh, *prev_bh, *cp_bh;
575 struct nilfs_cpfile_header *header; 575 struct nilfs_cpfile_header *header;
576 struct nilfs_checkpoint *cp; 576 struct nilfs_checkpoint *cp;
577 struct nilfs_snapshot_list *list; 577 struct nilfs_snapshot_list *list;
578 __u64 curr, prev; 578 __u64 curr, prev;
579 unsigned long curr_blkoff, prev_blkoff; 579 unsigned long curr_blkoff, prev_blkoff;
580 void *kaddr; 580 void *kaddr;
581 int ret; 581 int ret;
582 582
583 if (cno == 0) 583 if (cno == 0)
584 return -ENOENT; /* checkpoint number 0 is invalid */ 584 return -ENOENT; /* checkpoint number 0 is invalid */
585 down_write(&NILFS_MDT(cpfile)->mi_sem); 585 down_write(&NILFS_MDT(cpfile)->mi_sem);
586 586
587 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh); 587 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
588 if (ret < 0) 588 if (ret < 0)
589 goto out_sem; 589 goto out_sem;
590 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0); 590 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
591 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); 591 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
592 if (nilfs_checkpoint_invalid(cp)) { 592 if (nilfs_checkpoint_invalid(cp)) {
593 ret = -ENOENT; 593 ret = -ENOENT;
594 kunmap_atomic(kaddr, KM_USER0); 594 kunmap_atomic(kaddr, KM_USER0);
595 goto out_cp; 595 goto out_cp;
596 } 596 }
597 if (nilfs_checkpoint_snapshot(cp)) { 597 if (nilfs_checkpoint_snapshot(cp)) {
598 ret = 0; 598 ret = 0;
599 kunmap_atomic(kaddr, KM_USER0); 599 kunmap_atomic(kaddr, KM_USER0);
600 goto out_cp; 600 goto out_cp;
601 } 601 }
602 kunmap_atomic(kaddr, KM_USER0); 602 kunmap_atomic(kaddr, KM_USER0);
603 603
604 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh); 604 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
605 if (ret < 0) 605 if (ret < 0)
606 goto out_cp; 606 goto out_cp;
607 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 607 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
608 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); 608 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
609 list = &header->ch_snapshot_list; 609 list = &header->ch_snapshot_list;
610 curr_bh = header_bh; 610 curr_bh = header_bh;
611 get_bh(curr_bh); 611 get_bh(curr_bh);
612 curr = 0; 612 curr = 0;
613 curr_blkoff = 0; 613 curr_blkoff = 0;
614 prev = le64_to_cpu(list->ssl_prev); 614 prev = le64_to_cpu(list->ssl_prev);
615 while (prev > cno) { 615 while (prev > cno) {
616 prev_blkoff = nilfs_cpfile_get_blkoff(cpfile, prev); 616 prev_blkoff = nilfs_cpfile_get_blkoff(cpfile, prev);
617 curr = prev; 617 curr = prev;
618 if (curr_blkoff != prev_blkoff) { 618 if (curr_blkoff != prev_blkoff) {
619 kunmap_atomic(kaddr, KM_USER0); 619 kunmap_atomic(kaddr, KM_USER0);
620 brelse(curr_bh); 620 brelse(curr_bh);
621 ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr, 621 ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr,
622 0, &curr_bh); 622 0, &curr_bh);
623 if (ret < 0) 623 if (ret < 0)
624 goto out_header; 624 goto out_header;
625 kaddr = kmap_atomic(curr_bh->b_page, KM_USER0); 625 kaddr = kmap_atomic(curr_bh->b_page, KM_USER0);
626 } 626 }
627 curr_blkoff = prev_blkoff; 627 curr_blkoff = prev_blkoff;
628 cp = nilfs_cpfile_block_get_checkpoint( 628 cp = nilfs_cpfile_block_get_checkpoint(
629 cpfile, curr, curr_bh, kaddr); 629 cpfile, curr, curr_bh, kaddr);
630 list = &cp->cp_snapshot_list; 630 list = &cp->cp_snapshot_list;
631 prev = le64_to_cpu(list->ssl_prev); 631 prev = le64_to_cpu(list->ssl_prev);
632 } 632 }
633 kunmap_atomic(kaddr, KM_USER0); 633 kunmap_atomic(kaddr, KM_USER0);
634 634
635 if (prev != 0) { 635 if (prev != 0) {
636 ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0, 636 ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0,
637 &prev_bh); 637 &prev_bh);
638 if (ret < 0) 638 if (ret < 0)
639 goto out_curr; 639 goto out_curr;
640 } else { 640 } else {
641 prev_bh = header_bh; 641 prev_bh = header_bh;
642 get_bh(prev_bh); 642 get_bh(prev_bh);
643 } 643 }
644 644
645 kaddr = kmap_atomic(curr_bh->b_page, KM_USER0); 645 kaddr = kmap_atomic(curr_bh->b_page, KM_USER0);
646 list = nilfs_cpfile_block_get_snapshot_list( 646 list = nilfs_cpfile_block_get_snapshot_list(
647 cpfile, curr, curr_bh, kaddr); 647 cpfile, curr, curr_bh, kaddr);
648 list->ssl_prev = cpu_to_le64(cno); 648 list->ssl_prev = cpu_to_le64(cno);
649 kunmap_atomic(kaddr, KM_USER0); 649 kunmap_atomic(kaddr, KM_USER0);
650 650
651 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0); 651 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
652 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); 652 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
653 cp->cp_snapshot_list.ssl_next = cpu_to_le64(curr); 653 cp->cp_snapshot_list.ssl_next = cpu_to_le64(curr);
654 cp->cp_snapshot_list.ssl_prev = cpu_to_le64(prev); 654 cp->cp_snapshot_list.ssl_prev = cpu_to_le64(prev);
655 nilfs_checkpoint_set_snapshot(cp); 655 nilfs_checkpoint_set_snapshot(cp);
656 kunmap_atomic(kaddr, KM_USER0); 656 kunmap_atomic(kaddr, KM_USER0);
657 657
658 kaddr = kmap_atomic(prev_bh->b_page, KM_USER0); 658 kaddr = kmap_atomic(prev_bh->b_page, KM_USER0);
659 list = nilfs_cpfile_block_get_snapshot_list( 659 list = nilfs_cpfile_block_get_snapshot_list(
660 cpfile, prev, prev_bh, kaddr); 660 cpfile, prev, prev_bh, kaddr);
661 list->ssl_next = cpu_to_le64(cno); 661 list->ssl_next = cpu_to_le64(cno);
662 kunmap_atomic(kaddr, KM_USER0); 662 kunmap_atomic(kaddr, KM_USER0);
663 663
664 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 664 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
665 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); 665 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
666 le64_add_cpu(&header->ch_nsnapshots, 1); 666 le64_add_cpu(&header->ch_nsnapshots, 1);
667 kunmap_atomic(kaddr, KM_USER0); 667 kunmap_atomic(kaddr, KM_USER0);
668 668
669 nilfs_mdt_mark_buffer_dirty(prev_bh); 669 nilfs_mdt_mark_buffer_dirty(prev_bh);
670 nilfs_mdt_mark_buffer_dirty(curr_bh); 670 nilfs_mdt_mark_buffer_dirty(curr_bh);
671 nilfs_mdt_mark_buffer_dirty(cp_bh); 671 nilfs_mdt_mark_buffer_dirty(cp_bh);
672 nilfs_mdt_mark_buffer_dirty(header_bh); 672 nilfs_mdt_mark_buffer_dirty(header_bh);
673 nilfs_mdt_mark_dirty(cpfile); 673 nilfs_mdt_mark_dirty(cpfile);
674 674
675 brelse(prev_bh); 675 brelse(prev_bh);
676 676
677 out_curr: 677 out_curr:
678 brelse(curr_bh); 678 brelse(curr_bh);
679 679
680 out_header: 680 out_header:
681 brelse(header_bh); 681 brelse(header_bh);
682 682
683 out_cp: 683 out_cp:
684 brelse(cp_bh); 684 brelse(cp_bh);
685 685
686 out_sem: 686 out_sem:
687 up_write(&NILFS_MDT(cpfile)->mi_sem); 687 up_write(&NILFS_MDT(cpfile)->mi_sem);
688 return ret; 688 return ret;
689 } 689 }
690 690
691 static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno) 691 static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno)
692 { 692 {
693 struct buffer_head *header_bh, *next_bh, *prev_bh, *cp_bh; 693 struct buffer_head *header_bh, *next_bh, *prev_bh, *cp_bh;
694 struct nilfs_cpfile_header *header; 694 struct nilfs_cpfile_header *header;
695 struct nilfs_checkpoint *cp; 695 struct nilfs_checkpoint *cp;
696 struct nilfs_snapshot_list *list; 696 struct nilfs_snapshot_list *list;
697 __u64 next, prev; 697 __u64 next, prev;
698 void *kaddr; 698 void *kaddr;
699 int ret; 699 int ret;
700 700
701 if (cno == 0) 701 if (cno == 0)
702 return -ENOENT; /* checkpoint number 0 is invalid */ 702 return -ENOENT; /* checkpoint number 0 is invalid */
703 down_write(&NILFS_MDT(cpfile)->mi_sem); 703 down_write(&NILFS_MDT(cpfile)->mi_sem);
704 704
705 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh); 705 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
706 if (ret < 0) 706 if (ret < 0)
707 goto out_sem; 707 goto out_sem;
708 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0); 708 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
709 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); 709 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
710 if (nilfs_checkpoint_invalid(cp)) { 710 if (nilfs_checkpoint_invalid(cp)) {
711 ret = -ENOENT; 711 ret = -ENOENT;
712 kunmap_atomic(kaddr, KM_USER0); 712 kunmap_atomic(kaddr, KM_USER0);
713 goto out_cp; 713 goto out_cp;
714 } 714 }
715 if (!nilfs_checkpoint_snapshot(cp)) { 715 if (!nilfs_checkpoint_snapshot(cp)) {
716 ret = 0; 716 ret = 0;
717 kunmap_atomic(kaddr, KM_USER0); 717 kunmap_atomic(kaddr, KM_USER0);
718 goto out_cp; 718 goto out_cp;
719 } 719 }
720 720
721 list = &cp->cp_snapshot_list; 721 list = &cp->cp_snapshot_list;
722 next = le64_to_cpu(list->ssl_next); 722 next = le64_to_cpu(list->ssl_next);
723 prev = le64_to_cpu(list->ssl_prev); 723 prev = le64_to_cpu(list->ssl_prev);
724 kunmap_atomic(kaddr, KM_USER0); 724 kunmap_atomic(kaddr, KM_USER0);
725 725
726 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh); 726 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
727 if (ret < 0) 727 if (ret < 0)
728 goto out_cp; 728 goto out_cp;
729 if (next != 0) { 729 if (next != 0) {
730 ret = nilfs_cpfile_get_checkpoint_block(cpfile, next, 0, 730 ret = nilfs_cpfile_get_checkpoint_block(cpfile, next, 0,
731 &next_bh); 731 &next_bh);
732 if (ret < 0) 732 if (ret < 0)
733 goto out_header; 733 goto out_header;
734 } else { 734 } else {
735 next_bh = header_bh; 735 next_bh = header_bh;
736 get_bh(next_bh); 736 get_bh(next_bh);
737 } 737 }
738 if (prev != 0) { 738 if (prev != 0) {
739 ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0, 739 ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0,
740 &prev_bh); 740 &prev_bh);
741 if (ret < 0) 741 if (ret < 0)
742 goto out_next; 742 goto out_next;
743 } else { 743 } else {
744 prev_bh = header_bh; 744 prev_bh = header_bh;
745 get_bh(prev_bh); 745 get_bh(prev_bh);
746 } 746 }
747 747
748 kaddr = kmap_atomic(next_bh->b_page, KM_USER0); 748 kaddr = kmap_atomic(next_bh->b_page, KM_USER0);
749 list = nilfs_cpfile_block_get_snapshot_list( 749 list = nilfs_cpfile_block_get_snapshot_list(
750 cpfile, next, next_bh, kaddr); 750 cpfile, next, next_bh, kaddr);
751 list->ssl_prev = cpu_to_le64(prev); 751 list->ssl_prev = cpu_to_le64(prev);
752 kunmap_atomic(kaddr, KM_USER0); 752 kunmap_atomic(kaddr, KM_USER0);
753 753
754 kaddr = kmap_atomic(prev_bh->b_page, KM_USER0); 754 kaddr = kmap_atomic(prev_bh->b_page, KM_USER0);
755 list = nilfs_cpfile_block_get_snapshot_list( 755 list = nilfs_cpfile_block_get_snapshot_list(
756 cpfile, prev, prev_bh, kaddr); 756 cpfile, prev, prev_bh, kaddr);
757 list->ssl_next = cpu_to_le64(next); 757 list->ssl_next = cpu_to_le64(next);
758 kunmap_atomic(kaddr, KM_USER0); 758 kunmap_atomic(kaddr, KM_USER0);
759 759
760 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0); 760 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
761 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); 761 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
762 cp->cp_snapshot_list.ssl_next = cpu_to_le64(0); 762 cp->cp_snapshot_list.ssl_next = cpu_to_le64(0);
763 cp->cp_snapshot_list.ssl_prev = cpu_to_le64(0); 763 cp->cp_snapshot_list.ssl_prev = cpu_to_le64(0);
764 nilfs_checkpoint_clear_snapshot(cp); 764 nilfs_checkpoint_clear_snapshot(cp);
765 kunmap_atomic(kaddr, KM_USER0); 765 kunmap_atomic(kaddr, KM_USER0);
766 766
767 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 767 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
768 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); 768 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
769 le64_add_cpu(&header->ch_nsnapshots, -1); 769 le64_add_cpu(&header->ch_nsnapshots, -1);
770 kunmap_atomic(kaddr, KM_USER0); 770 kunmap_atomic(kaddr, KM_USER0);
771 771
772 nilfs_mdt_mark_buffer_dirty(next_bh); 772 nilfs_mdt_mark_buffer_dirty(next_bh);
773 nilfs_mdt_mark_buffer_dirty(prev_bh); 773 nilfs_mdt_mark_buffer_dirty(prev_bh);
774 nilfs_mdt_mark_buffer_dirty(cp_bh); 774 nilfs_mdt_mark_buffer_dirty(cp_bh);
775 nilfs_mdt_mark_buffer_dirty(header_bh); 775 nilfs_mdt_mark_buffer_dirty(header_bh);
776 nilfs_mdt_mark_dirty(cpfile); 776 nilfs_mdt_mark_dirty(cpfile);
777 777
778 brelse(prev_bh); 778 brelse(prev_bh);
779 779
780 out_next: 780 out_next:
781 brelse(next_bh); 781 brelse(next_bh);
782 782
783 out_header: 783 out_header:
784 brelse(header_bh); 784 brelse(header_bh);
785 785
786 out_cp: 786 out_cp:
787 brelse(cp_bh); 787 brelse(cp_bh);
788 788
789 out_sem: 789 out_sem:
790 up_write(&NILFS_MDT(cpfile)->mi_sem); 790 up_write(&NILFS_MDT(cpfile)->mi_sem);
791 return ret; 791 return ret;
792 } 792 }
793 793
794 /** 794 /**
795 * nilfs_cpfile_is_snapshot - 795 * nilfs_cpfile_is_snapshot -
796 * @cpfile: inode of checkpoint file 796 * @cpfile: inode of checkpoint file
797 * @cno: checkpoint number 797 * @cno: checkpoint number
798 * 798 *
799 * Description: 799 * Description:
800 * 800 *
801 * Return Value: On success, 1 is returned if the checkpoint specified by 801 * Return Value: On success, 1 is returned if the checkpoint specified by
802 * @cno is a snapshot, or 0 if not. On error, one of the following negative 802 * @cno is a snapshot, or 0 if not. On error, one of the following negative
803 * error codes is returned. 803 * error codes is returned.
804 * 804 *
805 * %-EIO - I/O error. 805 * %-EIO - I/O error.
806 * 806 *
807 * %-ENOMEM - Insufficient amount of memory available. 807 * %-ENOMEM - Insufficient amount of memory available.
808 * 808 *
809 * %-ENOENT - No such checkpoint. 809 * %-ENOENT - No such checkpoint.
810 */ 810 */
811 int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno) 811 int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno)
812 { 812 {
813 struct buffer_head *bh; 813 struct buffer_head *bh;
814 struct nilfs_checkpoint *cp; 814 struct nilfs_checkpoint *cp;
815 void *kaddr; 815 void *kaddr;
816 int ret; 816 int ret;
817 817
818 /* CP number is invalid if it's zero or larger than the 818 /* CP number is invalid if it's zero or larger than the
819 largest exist one.*/ 819 largest exist one.*/
820 if (cno == 0 || cno >= nilfs_mdt_cno(cpfile)) 820 if (cno == 0 || cno >= nilfs_mdt_cno(cpfile))
821 return -ENOENT; 821 return -ENOENT;
822 down_read(&NILFS_MDT(cpfile)->mi_sem); 822 down_read(&NILFS_MDT(cpfile)->mi_sem);
823 823
824 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh); 824 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh);
825 if (ret < 0) 825 if (ret < 0)
826 goto out; 826 goto out;
827 kaddr = kmap_atomic(bh->b_page, KM_USER0); 827 kaddr = kmap_atomic(bh->b_page, KM_USER0);
828 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr); 828 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
829 if (nilfs_checkpoint_invalid(cp)) 829 if (nilfs_checkpoint_invalid(cp))
830 ret = -ENOENT; 830 ret = -ENOENT;
831 else 831 else
832 ret = nilfs_checkpoint_snapshot(cp); 832 ret = nilfs_checkpoint_snapshot(cp);
833 kunmap_atomic(kaddr, KM_USER0); 833 kunmap_atomic(kaddr, KM_USER0);
834 brelse(bh); 834 brelse(bh);
835 835
836 out: 836 out:
837 up_read(&NILFS_MDT(cpfile)->mi_sem); 837 up_read(&NILFS_MDT(cpfile)->mi_sem);
838 return ret; 838 return ret;
839 } 839 }
840 840
841 /** 841 /**
842 * nilfs_cpfile_change_cpmode - change checkpoint mode 842 * nilfs_cpfile_change_cpmode - change checkpoint mode
843 * @cpfile: inode of checkpoint file 843 * @cpfile: inode of checkpoint file
844 * @cno: checkpoint number 844 * @cno: checkpoint number
845 * @status: mode of checkpoint 845 * @status: mode of checkpoint
846 * 846 *
847 * Description: nilfs_change_cpmode() changes the mode of the checkpoint 847 * Description: nilfs_change_cpmode() changes the mode of the checkpoint
848 * specified by @cno. The mode @mode is NILFS_CHECKPOINT or NILFS_SNAPSHOT. 848 * specified by @cno. The mode @mode is NILFS_CHECKPOINT or NILFS_SNAPSHOT.
849 * 849 *
850 * Return Value: On success, 0 is returned. On error, one of the following 850 * Return Value: On success, 0 is returned. On error, one of the following
851 * negative error codes is returned. 851 * negative error codes is returned.
852 * 852 *
853 * %-EIO - I/O error. 853 * %-EIO - I/O error.
854 * 854 *
855 * %-ENOMEM - Insufficient amount of memory available. 855 * %-ENOMEM - Insufficient amount of memory available.
856 * 856 *
857 * %-ENOENT - No such checkpoint. 857 * %-ENOENT - No such checkpoint.
858 */ 858 */
859 int nilfs_cpfile_change_cpmode(struct inode *cpfile, __u64 cno, int mode) 859 int nilfs_cpfile_change_cpmode(struct inode *cpfile, __u64 cno, int mode)
860 { 860 {
861 struct the_nilfs *nilfs; 861 struct the_nilfs *nilfs;
862 int ret; 862 int ret;
863 863
864 nilfs = NILFS_MDT(cpfile)->mi_nilfs; 864 nilfs = NILFS_MDT(cpfile)->mi_nilfs;
865 865
866 switch (mode) { 866 switch (mode) {
867 case NILFS_CHECKPOINT: 867 case NILFS_CHECKPOINT:
868 /* 868 /*
869 * Check for protecting existing snapshot mounts: 869 * Check for protecting existing snapshot mounts:
870 * ns_mount_mutex is used to make this operation atomic and 870 * ns_mount_mutex is used to make this operation atomic and
871 * exclusive with a new mount job. Though it doesn't cover 871 * exclusive with a new mount job. Though it doesn't cover
872 * umount, it's enough for the purpose. 872 * umount, it's enough for the purpose.
873 */ 873 */
874 if (nilfs_checkpoint_is_mounted(nilfs, cno, 1)) { 874 if (nilfs_checkpoint_is_mounted(nilfs, cno, 1)) {
875 /* Current implementation does not have to protect 875 /* Current implementation does not have to protect
876 plain read-only mounts since they are exclusive 876 plain read-only mounts since they are exclusive
877 with a read/write mount and are protected from the 877 with a read/write mount and are protected from the
878 cleaner. */ 878 cleaner. */
879 ret = -EBUSY; 879 ret = -EBUSY;
880 } else 880 } else
881 ret = nilfs_cpfile_clear_snapshot(cpfile, cno); 881 ret = nilfs_cpfile_clear_snapshot(cpfile, cno);
882 return ret; 882 return ret;
883 case NILFS_SNAPSHOT: 883 case NILFS_SNAPSHOT:
884 return nilfs_cpfile_set_snapshot(cpfile, cno); 884 return nilfs_cpfile_set_snapshot(cpfile, cno);
885 default: 885 default:
886 return -EINVAL; 886 return -EINVAL;
887 } 887 }
888 } 888 }
889 889
890 /** 890 /**
891 * nilfs_cpfile_get_stat - get checkpoint statistics 891 * nilfs_cpfile_get_stat - get checkpoint statistics
892 * @cpfile: inode of checkpoint file 892 * @cpfile: inode of checkpoint file
893 * @stat: pointer to a structure of checkpoint statistics 893 * @stat: pointer to a structure of checkpoint statistics
894 * 894 *
895 * Description: nilfs_cpfile_get_stat() returns information about checkpoints. 895 * Description: nilfs_cpfile_get_stat() returns information about checkpoints.
896 * 896 *
897 * Return Value: On success, 0 is returned, and checkpoints information is 897 * Return Value: On success, 0 is returned, and checkpoints information is
898 * stored in the place pointed by @stat. On error, one of the following 898 * stored in the place pointed by @stat. On error, one of the following
899 * negative error codes is returned. 899 * negative error codes is returned.
900 * 900 *
901 * %-EIO - I/O error. 901 * %-EIO - I/O error.
902 * 902 *
903 * %-ENOMEM - Insufficient amount of memory available. 903 * %-ENOMEM - Insufficient amount of memory available.
904 */ 904 */
905 int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat) 905 int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat)
906 { 906 {
907 struct buffer_head *bh; 907 struct buffer_head *bh;
908 struct nilfs_cpfile_header *header; 908 struct nilfs_cpfile_header *header;
909 void *kaddr; 909 void *kaddr;
910 int ret; 910 int ret;
911 911
912 down_read(&NILFS_MDT(cpfile)->mi_sem); 912 down_read(&NILFS_MDT(cpfile)->mi_sem);
913 913
914 ret = nilfs_cpfile_get_header_block(cpfile, &bh); 914 ret = nilfs_cpfile_get_header_block(cpfile, &bh);
915 if (ret < 0) 915 if (ret < 0)
916 goto out_sem; 916 goto out_sem;
917 kaddr = kmap_atomic(bh->b_page, KM_USER0); 917 kaddr = kmap_atomic(bh->b_page, KM_USER0);
918 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr); 918 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
919 cpstat->cs_cno = nilfs_mdt_cno(cpfile); 919 cpstat->cs_cno = nilfs_mdt_cno(cpfile);
920 cpstat->cs_ncps = le64_to_cpu(header->ch_ncheckpoints); 920 cpstat->cs_ncps = le64_to_cpu(header->ch_ncheckpoints);
921 cpstat->cs_nsss = le64_to_cpu(header->ch_nsnapshots); 921 cpstat->cs_nsss = le64_to_cpu(header->ch_nsnapshots);
922 kunmap_atomic(kaddr, KM_USER0); 922 kunmap_atomic(kaddr, KM_USER0);
923 brelse(bh); 923 brelse(bh);
924 924
925 out_sem: 925 out_sem:
926 up_read(&NILFS_MDT(cpfile)->mi_sem); 926 up_read(&NILFS_MDT(cpfile)->mi_sem);
927 return ret; 927 return ret;
928 } 928 }
929 929
930 /** 930 /**
931 * nilfs_cpfile_read - read cpfile inode
932 * @cpfile: cpfile inode
933 * @raw_inode: on-disk cpfile inode
934 */
935 int nilfs_cpfile_read(struct inode *cpfile, struct nilfs_inode *raw_inode)
936 {
937 return nilfs_read_inode_common(cpfile, raw_inode);
938 }
939
940 /**
931 * nilfs_cpfile_new - create cpfile 941 * nilfs_cpfile_new - create cpfile
932 * @nilfs: nilfs object 942 * @nilfs: nilfs object
933 * @cpsize: size of a checkpoint entry 943 * @cpsize: size of a checkpoint entry
934 */ 944 */
935 struct inode *nilfs_cpfile_new(struct the_nilfs *nilfs, size_t cpsize) 945 struct inode *nilfs_cpfile_new(struct the_nilfs *nilfs, size_t cpsize)
936 { 946 {
937 struct inode *cpfile; 947 struct inode *cpfile;
938 948
939 cpfile = nilfs_mdt_new(nilfs, NULL, NILFS_CPFILE_INO, 0); 949 cpfile = nilfs_mdt_new(nilfs, NULL, NILFS_CPFILE_INO, 0);
940 if (cpfile) 950 if (cpfile)
941 nilfs_mdt_set_entry_size(cpfile, cpsize, 951 nilfs_mdt_set_entry_size(cpfile, cpsize,
942 sizeof(struct nilfs_cpfile_header)); 952 sizeof(struct nilfs_cpfile_header));
943 return cpfile; 953 return cpfile;
944 } 954 }
945 955
1 /* 1 /*
2 * cpfile.h - NILFS checkpoint file. 2 * cpfile.h - NILFS checkpoint file.
3 * 3 *
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or 8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version. 9 * (at your option) any later version.
10 * 10 *
11 * This program is distributed in the hope that it will be useful, 11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 * 15 *
16 * You should have received a copy of the GNU General Public License 16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software 17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 * 19 *
20 * Written by Koji Sato <koji@osrg.net>. 20 * Written by Koji Sato <koji@osrg.net>.
21 */ 21 */
22 22
23 #ifndef _NILFS_CPFILE_H 23 #ifndef _NILFS_CPFILE_H
24 #define _NILFS_CPFILE_H 24 #define _NILFS_CPFILE_H
25 25
26 #include <linux/fs.h> 26 #include <linux/fs.h>
27 #include <linux/buffer_head.h> 27 #include <linux/buffer_head.h>
28 #include <linux/nilfs2_fs.h> 28 #include <linux/nilfs2_fs.h>
29 29
30 30
31 int nilfs_cpfile_get_checkpoint(struct inode *, __u64, int, 31 int nilfs_cpfile_get_checkpoint(struct inode *, __u64, int,
32 struct nilfs_checkpoint **, 32 struct nilfs_checkpoint **,
33 struct buffer_head **); 33 struct buffer_head **);
34 void nilfs_cpfile_put_checkpoint(struct inode *, __u64, struct buffer_head *); 34 void nilfs_cpfile_put_checkpoint(struct inode *, __u64, struct buffer_head *);
35 int nilfs_cpfile_delete_checkpoints(struct inode *, __u64, __u64); 35 int nilfs_cpfile_delete_checkpoints(struct inode *, __u64, __u64);
36 int nilfs_cpfile_delete_checkpoint(struct inode *, __u64); 36 int nilfs_cpfile_delete_checkpoint(struct inode *, __u64);
37 int nilfs_cpfile_change_cpmode(struct inode *, __u64, int); 37 int nilfs_cpfile_change_cpmode(struct inode *, __u64, int);
38 int nilfs_cpfile_is_snapshot(struct inode *, __u64); 38 int nilfs_cpfile_is_snapshot(struct inode *, __u64);
39 int nilfs_cpfile_get_stat(struct inode *, struct nilfs_cpstat *); 39 int nilfs_cpfile_get_stat(struct inode *, struct nilfs_cpstat *);
40 ssize_t nilfs_cpfile_get_cpinfo(struct inode *, __u64 *, int, void *, unsigned, 40 ssize_t nilfs_cpfile_get_cpinfo(struct inode *, __u64 *, int, void *, unsigned,
41 size_t); 41 size_t);
42 42
43 int nilfs_cpfile_read(struct inode *cpfile, struct nilfs_inode *raw_inode);
43 struct inode *nilfs_cpfile_new(struct the_nilfs *nilfs, size_t cpsize); 44 struct inode *nilfs_cpfile_new(struct the_nilfs *nilfs, size_t cpsize);
44 45
45 #endif /* _NILFS_CPFILE_H */ 46 #endif /* _NILFS_CPFILE_H */
46 47
1 /* 1 /*
2 * dat.c - NILFS disk address translation. 2 * dat.c - NILFS disk address translation.
3 * 3 *
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or 8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version. 9 * (at your option) any later version.
10 * 10 *
11 * This program is distributed in the hope that it will be useful, 11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 * 15 *
16 * You should have received a copy of the GNU General Public License 16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software 17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 * 19 *
20 * Written by Koji Sato <koji@osrg.net>. 20 * Written by Koji Sato <koji@osrg.net>.
21 */ 21 */
22 22
23 #include <linux/types.h> 23 #include <linux/types.h>
24 #include <linux/buffer_head.h> 24 #include <linux/buffer_head.h>
25 #include <linux/string.h> 25 #include <linux/string.h>
26 #include <linux/errno.h> 26 #include <linux/errno.h>
27 #include "nilfs.h" 27 #include "nilfs.h"
28 #include "mdt.h" 28 #include "mdt.h"
29 #include "alloc.h" 29 #include "alloc.h"
30 #include "dat.h" 30 #include "dat.h"
31 31
32 32
33 #define NILFS_CNO_MIN ((__u64)1) 33 #define NILFS_CNO_MIN ((__u64)1)
34 #define NILFS_CNO_MAX (~(__u64)0) 34 #define NILFS_CNO_MAX (~(__u64)0)
35 35
36 static int nilfs_dat_prepare_entry(struct inode *dat, 36 static int nilfs_dat_prepare_entry(struct inode *dat,
37 struct nilfs_palloc_req *req, int create) 37 struct nilfs_palloc_req *req, int create)
38 { 38 {
39 return nilfs_palloc_get_entry_block(dat, req->pr_entry_nr, 39 return nilfs_palloc_get_entry_block(dat, req->pr_entry_nr,
40 create, &req->pr_entry_bh); 40 create, &req->pr_entry_bh);
41 } 41 }
42 42
43 static void nilfs_dat_commit_entry(struct inode *dat, 43 static void nilfs_dat_commit_entry(struct inode *dat,
44 struct nilfs_palloc_req *req) 44 struct nilfs_palloc_req *req)
45 { 45 {
46 nilfs_mdt_mark_buffer_dirty(req->pr_entry_bh); 46 nilfs_mdt_mark_buffer_dirty(req->pr_entry_bh);
47 nilfs_mdt_mark_dirty(dat); 47 nilfs_mdt_mark_dirty(dat);
48 brelse(req->pr_entry_bh); 48 brelse(req->pr_entry_bh);
49 } 49 }
50 50
51 static void nilfs_dat_abort_entry(struct inode *dat, 51 static void nilfs_dat_abort_entry(struct inode *dat,
52 struct nilfs_palloc_req *req) 52 struct nilfs_palloc_req *req)
53 { 53 {
54 brelse(req->pr_entry_bh); 54 brelse(req->pr_entry_bh);
55 } 55 }
56 56
57 int nilfs_dat_prepare_alloc(struct inode *dat, struct nilfs_palloc_req *req) 57 int nilfs_dat_prepare_alloc(struct inode *dat, struct nilfs_palloc_req *req)
58 { 58 {
59 int ret; 59 int ret;
60 60
61 ret = nilfs_palloc_prepare_alloc_entry(dat, req); 61 ret = nilfs_palloc_prepare_alloc_entry(dat, req);
62 if (ret < 0) 62 if (ret < 0)
63 return ret; 63 return ret;
64 64
65 ret = nilfs_dat_prepare_entry(dat, req, 1); 65 ret = nilfs_dat_prepare_entry(dat, req, 1);
66 if (ret < 0) 66 if (ret < 0)
67 nilfs_palloc_abort_alloc_entry(dat, req); 67 nilfs_palloc_abort_alloc_entry(dat, req);
68 68
69 return ret; 69 return ret;
70 } 70 }
71 71
72 void nilfs_dat_commit_alloc(struct inode *dat, struct nilfs_palloc_req *req) 72 void nilfs_dat_commit_alloc(struct inode *dat, struct nilfs_palloc_req *req)
73 { 73 {
74 struct nilfs_dat_entry *entry; 74 struct nilfs_dat_entry *entry;
75 void *kaddr; 75 void *kaddr;
76 76
77 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); 77 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
78 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, 78 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
79 req->pr_entry_bh, kaddr); 79 req->pr_entry_bh, kaddr);
80 entry->de_start = cpu_to_le64(NILFS_CNO_MIN); 80 entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
81 entry->de_end = cpu_to_le64(NILFS_CNO_MAX); 81 entry->de_end = cpu_to_le64(NILFS_CNO_MAX);
82 entry->de_blocknr = cpu_to_le64(0); 82 entry->de_blocknr = cpu_to_le64(0);
83 kunmap_atomic(kaddr, KM_USER0); 83 kunmap_atomic(kaddr, KM_USER0);
84 84
85 nilfs_palloc_commit_alloc_entry(dat, req); 85 nilfs_palloc_commit_alloc_entry(dat, req);
86 nilfs_dat_commit_entry(dat, req); 86 nilfs_dat_commit_entry(dat, req);
87 } 87 }
88 88
89 void nilfs_dat_abort_alloc(struct inode *dat, struct nilfs_palloc_req *req) 89 void nilfs_dat_abort_alloc(struct inode *dat, struct nilfs_palloc_req *req)
90 { 90 {
91 nilfs_dat_abort_entry(dat, req); 91 nilfs_dat_abort_entry(dat, req);
92 nilfs_palloc_abort_alloc_entry(dat, req); 92 nilfs_palloc_abort_alloc_entry(dat, req);
93 } 93 }
94 94
95 void nilfs_dat_commit_free(struct inode *dat, struct nilfs_palloc_req *req) 95 void nilfs_dat_commit_free(struct inode *dat, struct nilfs_palloc_req *req)
96 { 96 {
97 struct nilfs_dat_entry *entry; 97 struct nilfs_dat_entry *entry;
98 void *kaddr; 98 void *kaddr;
99 99
100 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); 100 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
101 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, 101 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
102 req->pr_entry_bh, kaddr); 102 req->pr_entry_bh, kaddr);
103 entry->de_start = cpu_to_le64(NILFS_CNO_MIN); 103 entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
104 entry->de_end = cpu_to_le64(NILFS_CNO_MIN); 104 entry->de_end = cpu_to_le64(NILFS_CNO_MIN);
105 entry->de_blocknr = cpu_to_le64(0); 105 entry->de_blocknr = cpu_to_le64(0);
106 kunmap_atomic(kaddr, KM_USER0); 106 kunmap_atomic(kaddr, KM_USER0);
107 107
108 nilfs_dat_commit_entry(dat, req); 108 nilfs_dat_commit_entry(dat, req);
109 nilfs_palloc_commit_free_entry(dat, req); 109 nilfs_palloc_commit_free_entry(dat, req);
110 } 110 }
111 111
112 int nilfs_dat_prepare_start(struct inode *dat, struct nilfs_palloc_req *req) 112 int nilfs_dat_prepare_start(struct inode *dat, struct nilfs_palloc_req *req)
113 { 113 {
114 int ret; 114 int ret;
115 115
116 ret = nilfs_dat_prepare_entry(dat, req, 0); 116 ret = nilfs_dat_prepare_entry(dat, req, 0);
117 WARN_ON(ret == -ENOENT); 117 WARN_ON(ret == -ENOENT);
118 return ret; 118 return ret;
119 } 119 }
120 120
121 void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req, 121 void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req,
122 sector_t blocknr) 122 sector_t blocknr)
123 { 123 {
124 struct nilfs_dat_entry *entry; 124 struct nilfs_dat_entry *entry;
125 void *kaddr; 125 void *kaddr;
126 126
127 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); 127 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
128 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, 128 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
129 req->pr_entry_bh, kaddr); 129 req->pr_entry_bh, kaddr);
130 entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat)); 130 entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat));
131 entry->de_blocknr = cpu_to_le64(blocknr); 131 entry->de_blocknr = cpu_to_le64(blocknr);
132 kunmap_atomic(kaddr, KM_USER0); 132 kunmap_atomic(kaddr, KM_USER0);
133 133
134 nilfs_dat_commit_entry(dat, req); 134 nilfs_dat_commit_entry(dat, req);
135 } 135 }
136 136
137 int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req) 137 int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req)
138 { 138 {
139 struct nilfs_dat_entry *entry; 139 struct nilfs_dat_entry *entry;
140 __u64 start; 140 __u64 start;
141 sector_t blocknr; 141 sector_t blocknr;
142 void *kaddr; 142 void *kaddr;
143 int ret; 143 int ret;
144 144
145 ret = nilfs_dat_prepare_entry(dat, req, 0); 145 ret = nilfs_dat_prepare_entry(dat, req, 0);
146 if (ret < 0) { 146 if (ret < 0) {
147 WARN_ON(ret == -ENOENT); 147 WARN_ON(ret == -ENOENT);
148 return ret; 148 return ret;
149 } 149 }
150 150
151 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); 151 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
152 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, 152 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
153 req->pr_entry_bh, kaddr); 153 req->pr_entry_bh, kaddr);
154 start = le64_to_cpu(entry->de_start); 154 start = le64_to_cpu(entry->de_start);
155 blocknr = le64_to_cpu(entry->de_blocknr); 155 blocknr = le64_to_cpu(entry->de_blocknr);
156 kunmap_atomic(kaddr, KM_USER0); 156 kunmap_atomic(kaddr, KM_USER0);
157 157
158 if (blocknr == 0) { 158 if (blocknr == 0) {
159 ret = nilfs_palloc_prepare_free_entry(dat, req); 159 ret = nilfs_palloc_prepare_free_entry(dat, req);
160 if (ret < 0) { 160 if (ret < 0) {
161 nilfs_dat_abort_entry(dat, req); 161 nilfs_dat_abort_entry(dat, req);
162 return ret; 162 return ret;
163 } 163 }
164 } 164 }
165 165
166 return 0; 166 return 0;
167 } 167 }
168 168
169 void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req, 169 void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req,
170 int dead) 170 int dead)
171 { 171 {
172 struct nilfs_dat_entry *entry; 172 struct nilfs_dat_entry *entry;
173 __u64 start, end; 173 __u64 start, end;
174 sector_t blocknr; 174 sector_t blocknr;
175 void *kaddr; 175 void *kaddr;
176 176
177 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); 177 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
178 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, 178 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
179 req->pr_entry_bh, kaddr); 179 req->pr_entry_bh, kaddr);
180 end = start = le64_to_cpu(entry->de_start); 180 end = start = le64_to_cpu(entry->de_start);
181 if (!dead) { 181 if (!dead) {
182 end = nilfs_mdt_cno(dat); 182 end = nilfs_mdt_cno(dat);
183 WARN_ON(start > end); 183 WARN_ON(start > end);
184 } 184 }
185 entry->de_end = cpu_to_le64(end); 185 entry->de_end = cpu_to_le64(end);
186 blocknr = le64_to_cpu(entry->de_blocknr); 186 blocknr = le64_to_cpu(entry->de_blocknr);
187 kunmap_atomic(kaddr, KM_USER0); 187 kunmap_atomic(kaddr, KM_USER0);
188 188
189 if (blocknr == 0) 189 if (blocknr == 0)
190 nilfs_dat_commit_free(dat, req); 190 nilfs_dat_commit_free(dat, req);
191 else 191 else
192 nilfs_dat_commit_entry(dat, req); 192 nilfs_dat_commit_entry(dat, req);
193 } 193 }
194 194
195 void nilfs_dat_abort_end(struct inode *dat, struct nilfs_palloc_req *req) 195 void nilfs_dat_abort_end(struct inode *dat, struct nilfs_palloc_req *req)
196 { 196 {
197 struct nilfs_dat_entry *entry; 197 struct nilfs_dat_entry *entry;
198 __u64 start; 198 __u64 start;
199 sector_t blocknr; 199 sector_t blocknr;
200 void *kaddr; 200 void *kaddr;
201 201
202 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); 202 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
203 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, 203 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
204 req->pr_entry_bh, kaddr); 204 req->pr_entry_bh, kaddr);
205 start = le64_to_cpu(entry->de_start); 205 start = le64_to_cpu(entry->de_start);
206 blocknr = le64_to_cpu(entry->de_blocknr); 206 blocknr = le64_to_cpu(entry->de_blocknr);
207 kunmap_atomic(kaddr, KM_USER0); 207 kunmap_atomic(kaddr, KM_USER0);
208 208
209 if (start == nilfs_mdt_cno(dat) && blocknr == 0) 209 if (start == nilfs_mdt_cno(dat) && blocknr == 0)
210 nilfs_palloc_abort_free_entry(dat, req); 210 nilfs_palloc_abort_free_entry(dat, req);
211 nilfs_dat_abort_entry(dat, req); 211 nilfs_dat_abort_entry(dat, req);
212 } 212 }
213 213
214 int nilfs_dat_prepare_update(struct inode *dat, 214 int nilfs_dat_prepare_update(struct inode *dat,
215 struct nilfs_palloc_req *oldreq, 215 struct nilfs_palloc_req *oldreq,
216 struct nilfs_palloc_req *newreq) 216 struct nilfs_palloc_req *newreq)
217 { 217 {
218 int ret; 218 int ret;
219 219
220 ret = nilfs_dat_prepare_end(dat, oldreq); 220 ret = nilfs_dat_prepare_end(dat, oldreq);
221 if (!ret) { 221 if (!ret) {
222 ret = nilfs_dat_prepare_alloc(dat, newreq); 222 ret = nilfs_dat_prepare_alloc(dat, newreq);
223 if (ret < 0) 223 if (ret < 0)
224 nilfs_dat_abort_end(dat, oldreq); 224 nilfs_dat_abort_end(dat, oldreq);
225 } 225 }
226 return ret; 226 return ret;
227 } 227 }
228 228
229 void nilfs_dat_commit_update(struct inode *dat, 229 void nilfs_dat_commit_update(struct inode *dat,
230 struct nilfs_palloc_req *oldreq, 230 struct nilfs_palloc_req *oldreq,
231 struct nilfs_palloc_req *newreq, int dead) 231 struct nilfs_palloc_req *newreq, int dead)
232 { 232 {
233 nilfs_dat_commit_end(dat, oldreq, dead); 233 nilfs_dat_commit_end(dat, oldreq, dead);
234 nilfs_dat_commit_alloc(dat, newreq); 234 nilfs_dat_commit_alloc(dat, newreq);
235 } 235 }
236 236
237 void nilfs_dat_abort_update(struct inode *dat, 237 void nilfs_dat_abort_update(struct inode *dat,
238 struct nilfs_palloc_req *oldreq, 238 struct nilfs_palloc_req *oldreq,
239 struct nilfs_palloc_req *newreq) 239 struct nilfs_palloc_req *newreq)
240 { 240 {
241 nilfs_dat_abort_end(dat, oldreq); 241 nilfs_dat_abort_end(dat, oldreq);
242 nilfs_dat_abort_alloc(dat, newreq); 242 nilfs_dat_abort_alloc(dat, newreq);
243 } 243 }
244 244
245 /** 245 /**
246 * nilfs_dat_mark_dirty - 246 * nilfs_dat_mark_dirty -
247 * @dat: DAT file inode 247 * @dat: DAT file inode
248 * @vblocknr: virtual block number 248 * @vblocknr: virtual block number
249 * 249 *
250 * Description: 250 * Description:
251 * 251 *
252 * Return Value: On success, 0 is returned. On error, one of the following 252 * Return Value: On success, 0 is returned. On error, one of the following
253 * negative error codes is returned. 253 * negative error codes is returned.
254 * 254 *
255 * %-EIO - I/O error. 255 * %-EIO - I/O error.
256 * 256 *
257 * %-ENOMEM - Insufficient amount of memory available. 257 * %-ENOMEM - Insufficient amount of memory available.
258 */ 258 */
259 int nilfs_dat_mark_dirty(struct inode *dat, __u64 vblocknr) 259 int nilfs_dat_mark_dirty(struct inode *dat, __u64 vblocknr)
260 { 260 {
261 struct nilfs_palloc_req req; 261 struct nilfs_palloc_req req;
262 int ret; 262 int ret;
263 263
264 req.pr_entry_nr = vblocknr; 264 req.pr_entry_nr = vblocknr;
265 ret = nilfs_dat_prepare_entry(dat, &req, 0); 265 ret = nilfs_dat_prepare_entry(dat, &req, 0);
266 if (ret == 0) 266 if (ret == 0)
267 nilfs_dat_commit_entry(dat, &req); 267 nilfs_dat_commit_entry(dat, &req);
268 return ret; 268 return ret;
269 } 269 }
270 270
271 /** 271 /**
272 * nilfs_dat_freev - free virtual block numbers 272 * nilfs_dat_freev - free virtual block numbers
273 * @dat: DAT file inode 273 * @dat: DAT file inode
274 * @vblocknrs: array of virtual block numbers 274 * @vblocknrs: array of virtual block numbers
275 * @nitems: number of virtual block numbers 275 * @nitems: number of virtual block numbers
276 * 276 *
277 * Description: nilfs_dat_freev() frees the virtual block numbers specified by 277 * Description: nilfs_dat_freev() frees the virtual block numbers specified by
278 * @vblocknrs and @nitems. 278 * @vblocknrs and @nitems.
279 * 279 *
280 * Return Value: On success, 0 is returned. On error, one of the following 280 * Return Value: On success, 0 is returned. On error, one of the following
281 * nagative error codes is returned. 281 * nagative error codes is returned.
282 * 282 *
283 * %-EIO - I/O error. 283 * %-EIO - I/O error.
284 * 284 *
285 * %-ENOMEM - Insufficient amount of memory available. 285 * %-ENOMEM - Insufficient amount of memory available.
286 * 286 *
287 * %-ENOENT - The virtual block number have not been allocated. 287 * %-ENOENT - The virtual block number have not been allocated.
288 */ 288 */
289 int nilfs_dat_freev(struct inode *dat, __u64 *vblocknrs, size_t nitems) 289 int nilfs_dat_freev(struct inode *dat, __u64 *vblocknrs, size_t nitems)
290 { 290 {
291 return nilfs_palloc_freev(dat, vblocknrs, nitems); 291 return nilfs_palloc_freev(dat, vblocknrs, nitems);
292 } 292 }
293 293
294 /** 294 /**
295 * nilfs_dat_move - change a block number 295 * nilfs_dat_move - change a block number
296 * @dat: DAT file inode 296 * @dat: DAT file inode
297 * @vblocknr: virtual block number 297 * @vblocknr: virtual block number
298 * @blocknr: block number 298 * @blocknr: block number
299 * 299 *
300 * Description: nilfs_dat_move() changes the block number associated with 300 * Description: nilfs_dat_move() changes the block number associated with
301 * @vblocknr to @blocknr. 301 * @vblocknr to @blocknr.
302 * 302 *
303 * Return Value: On success, 0 is returned. On error, one of the following 303 * Return Value: On success, 0 is returned. On error, one of the following
304 * negative error codes is returned. 304 * negative error codes is returned.
305 * 305 *
306 * %-EIO - I/O error. 306 * %-EIO - I/O error.
307 * 307 *
308 * %-ENOMEM - Insufficient amount of memory available. 308 * %-ENOMEM - Insufficient amount of memory available.
309 */ 309 */
310 int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr) 310 int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
311 { 311 {
312 struct buffer_head *entry_bh; 312 struct buffer_head *entry_bh;
313 struct nilfs_dat_entry *entry; 313 struct nilfs_dat_entry *entry;
314 void *kaddr; 314 void *kaddr;
315 int ret; 315 int ret;
316 316
317 ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh); 317 ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
318 if (ret < 0) 318 if (ret < 0)
319 return ret; 319 return ret;
320 kaddr = kmap_atomic(entry_bh->b_page, KM_USER0); 320 kaddr = kmap_atomic(entry_bh->b_page, KM_USER0);
321 entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr); 321 entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
322 if (unlikely(entry->de_blocknr == cpu_to_le64(0))) { 322 if (unlikely(entry->de_blocknr == cpu_to_le64(0))) {
323 printk(KERN_CRIT "%s: vbn = %llu, [%llu, %llu)\n", __func__, 323 printk(KERN_CRIT "%s: vbn = %llu, [%llu, %llu)\n", __func__,
324 (unsigned long long)vblocknr, 324 (unsigned long long)vblocknr,
325 (unsigned long long)le64_to_cpu(entry->de_start), 325 (unsigned long long)le64_to_cpu(entry->de_start),
326 (unsigned long long)le64_to_cpu(entry->de_end)); 326 (unsigned long long)le64_to_cpu(entry->de_end));
327 kunmap_atomic(kaddr, KM_USER0); 327 kunmap_atomic(kaddr, KM_USER0);
328 brelse(entry_bh); 328 brelse(entry_bh);
329 return -EINVAL; 329 return -EINVAL;
330 } 330 }
331 WARN_ON(blocknr == 0); 331 WARN_ON(blocknr == 0);
332 entry->de_blocknr = cpu_to_le64(blocknr); 332 entry->de_blocknr = cpu_to_le64(blocknr);
333 kunmap_atomic(kaddr, KM_USER0); 333 kunmap_atomic(kaddr, KM_USER0);
334 334
335 nilfs_mdt_mark_buffer_dirty(entry_bh); 335 nilfs_mdt_mark_buffer_dirty(entry_bh);
336 nilfs_mdt_mark_dirty(dat); 336 nilfs_mdt_mark_dirty(dat);
337 337
338 brelse(entry_bh); 338 brelse(entry_bh);
339 339
340 return 0; 340 return 0;
341 } 341 }
342 342
343 /** 343 /**
344 * nilfs_dat_translate - translate a virtual block number to a block number 344 * nilfs_dat_translate - translate a virtual block number to a block number
345 * @dat: DAT file inode 345 * @dat: DAT file inode
346 * @vblocknr: virtual block number 346 * @vblocknr: virtual block number
347 * @blocknrp: pointer to a block number 347 * @blocknrp: pointer to a block number
348 * 348 *
349 * Description: nilfs_dat_translate() maps the virtual block number @vblocknr 349 * Description: nilfs_dat_translate() maps the virtual block number @vblocknr
350 * to the corresponding block number. 350 * to the corresponding block number.
351 * 351 *
352 * Return Value: On success, 0 is returned and the block number associated 352 * Return Value: On success, 0 is returned and the block number associated
353 * with @vblocknr is stored in the place pointed by @blocknrp. On error, one 353 * with @vblocknr is stored in the place pointed by @blocknrp. On error, one
354 * of the following negative error codes is returned. 354 * of the following negative error codes is returned.
355 * 355 *
356 * %-EIO - I/O error. 356 * %-EIO - I/O error.
357 * 357 *
358 * %-ENOMEM - Insufficient amount of memory available. 358 * %-ENOMEM - Insufficient amount of memory available.
359 * 359 *
360 * %-ENOENT - A block number associated with @vblocknr does not exist. 360 * %-ENOENT - A block number associated with @vblocknr does not exist.
361 */ 361 */
362 int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp) 362 int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
363 { 363 {
364 struct buffer_head *entry_bh; 364 struct buffer_head *entry_bh;
365 struct nilfs_dat_entry *entry; 365 struct nilfs_dat_entry *entry;
366 sector_t blocknr; 366 sector_t blocknr;
367 void *kaddr; 367 void *kaddr;
368 int ret; 368 int ret;
369 369
370 ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh); 370 ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
371 if (ret < 0) 371 if (ret < 0)
372 return ret; 372 return ret;
373 373
374 kaddr = kmap_atomic(entry_bh->b_page, KM_USER0); 374 kaddr = kmap_atomic(entry_bh->b_page, KM_USER0);
375 entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr); 375 entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
376 blocknr = le64_to_cpu(entry->de_blocknr); 376 blocknr = le64_to_cpu(entry->de_blocknr);
377 if (blocknr == 0) { 377 if (blocknr == 0) {
378 ret = -ENOENT; 378 ret = -ENOENT;
379 goto out; 379 goto out;
380 } 380 }
381 if (blocknrp != NULL) 381 if (blocknrp != NULL)
382 *blocknrp = blocknr; 382 *blocknrp = blocknr;
383 383
384 out: 384 out:
385 kunmap_atomic(kaddr, KM_USER0); 385 kunmap_atomic(kaddr, KM_USER0);
386 brelse(entry_bh); 386 brelse(entry_bh);
387 return ret; 387 return ret;
388 } 388 }
389 389
390 ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz, 390 ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz,
391 size_t nvi) 391 size_t nvi)
392 { 392 {
393 struct buffer_head *entry_bh; 393 struct buffer_head *entry_bh;
394 struct nilfs_dat_entry *entry; 394 struct nilfs_dat_entry *entry;
395 struct nilfs_vinfo *vinfo = buf; 395 struct nilfs_vinfo *vinfo = buf;
396 __u64 first, last; 396 __u64 first, last;
397 void *kaddr; 397 void *kaddr;
398 unsigned long entries_per_block = NILFS_MDT(dat)->mi_entries_per_block; 398 unsigned long entries_per_block = NILFS_MDT(dat)->mi_entries_per_block;
399 int i, j, n, ret; 399 int i, j, n, ret;
400 400
401 for (i = 0; i < nvi; i += n) { 401 for (i = 0; i < nvi; i += n) {
402 ret = nilfs_palloc_get_entry_block(dat, vinfo->vi_vblocknr, 402 ret = nilfs_palloc_get_entry_block(dat, vinfo->vi_vblocknr,
403 0, &entry_bh); 403 0, &entry_bh);
404 if (ret < 0) 404 if (ret < 0)
405 return ret; 405 return ret;
406 kaddr = kmap_atomic(entry_bh->b_page, KM_USER0); 406 kaddr = kmap_atomic(entry_bh->b_page, KM_USER0);
407 /* last virtual block number in this block */ 407 /* last virtual block number in this block */
408 first = vinfo->vi_vblocknr; 408 first = vinfo->vi_vblocknr;
409 do_div(first, entries_per_block); 409 do_div(first, entries_per_block);
410 first *= entries_per_block; 410 first *= entries_per_block;
411 last = first + entries_per_block - 1; 411 last = first + entries_per_block - 1;
412 for (j = i, n = 0; 412 for (j = i, n = 0;
413 j < nvi && vinfo->vi_vblocknr >= first && 413 j < nvi && vinfo->vi_vblocknr >= first &&
414 vinfo->vi_vblocknr <= last; 414 vinfo->vi_vblocknr <= last;
415 j++, n++, vinfo = (void *)vinfo + visz) { 415 j++, n++, vinfo = (void *)vinfo + visz) {
416 entry = nilfs_palloc_block_get_entry( 416 entry = nilfs_palloc_block_get_entry(
417 dat, vinfo->vi_vblocknr, entry_bh, kaddr); 417 dat, vinfo->vi_vblocknr, entry_bh, kaddr);
418 vinfo->vi_start = le64_to_cpu(entry->de_start); 418 vinfo->vi_start = le64_to_cpu(entry->de_start);
419 vinfo->vi_end = le64_to_cpu(entry->de_end); 419 vinfo->vi_end = le64_to_cpu(entry->de_end);
420 vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr); 420 vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr);
421 } 421 }
422 kunmap_atomic(kaddr, KM_USER0); 422 kunmap_atomic(kaddr, KM_USER0);
423 brelse(entry_bh); 423 brelse(entry_bh);
424 } 424 }
425 425
426 return nvi; 426 return nvi;
427 } 427 }
428 428
429 /** 429 /**
430 * nilfs_dat_read - read dat inode
431 * @dat: dat inode
432 * @raw_inode: on-disk dat inode
433 */
434 int nilfs_dat_read(struct inode *dat, struct nilfs_inode *raw_inode)
435 {
436 return nilfs_read_inode_common(dat, raw_inode);
437 }
438
439 /**
430 * nilfs_dat_new - create dat file 440 * nilfs_dat_new - create dat file
431 * @nilfs: nilfs object 441 * @nilfs: nilfs object
432 * @entry_size: size of a dat entry 442 * @entry_size: size of a dat entry
433 */ 443 */
434 struct inode *nilfs_dat_new(struct the_nilfs *nilfs, size_t entry_size) 444 struct inode *nilfs_dat_new(struct the_nilfs *nilfs, size_t entry_size)
435 { 445 {
436 static struct lock_class_key dat_lock_key; 446 static struct lock_class_key dat_lock_key;
437 struct inode *dat; 447 struct inode *dat;
438 int err; 448 int err;
439 449
440 dat = nilfs_mdt_new(nilfs, NULL, NILFS_DAT_INO, 0); 450 dat = nilfs_mdt_new(nilfs, NULL, NILFS_DAT_INO, 0);
441 if (dat) { 451 if (dat) {
442 err = nilfs_palloc_init_blockgroup(dat, entry_size); 452 err = nilfs_palloc_init_blockgroup(dat, entry_size);
443 if (unlikely(err)) { 453 if (unlikely(err)) {
444 nilfs_mdt_destroy(dat); 454 nilfs_mdt_destroy(dat);
445 return NULL; 455 return NULL;
446 } 456 }
447 lockdep_set_class(&NILFS_MDT(dat)->mi_sem, &dat_lock_key); 457 lockdep_set_class(&NILFS_MDT(dat)->mi_sem, &dat_lock_key);
448 } 458 }
449 return dat; 459 return dat;
450 } 460 }
451 461
1 /* 1 /*
2 * dat.h - NILFS disk address translation. 2 * dat.h - NILFS disk address translation.
3 * 3 *
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or 8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version. 9 * (at your option) any later version.
10 * 10 *
11 * This program is distributed in the hope that it will be useful, 11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 * 15 *
16 * You should have received a copy of the GNU General Public License 16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software 17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 * 19 *
20 * Written by Koji Sato <koji@osrg.net>. 20 * Written by Koji Sato <koji@osrg.net>.
21 */ 21 */
22 22
23 #ifndef _NILFS_DAT_H 23 #ifndef _NILFS_DAT_H
24 #define _NILFS_DAT_H 24 #define _NILFS_DAT_H
25 25
26 #include <linux/types.h> 26 #include <linux/types.h>
27 #include <linux/buffer_head.h> 27 #include <linux/buffer_head.h>
28 #include <linux/fs.h> 28 #include <linux/fs.h>
29 29
30 30
31 struct nilfs_palloc_req; 31 struct nilfs_palloc_req;
32 32
33 int nilfs_dat_translate(struct inode *, __u64, sector_t *); 33 int nilfs_dat_translate(struct inode *, __u64, sector_t *);
34 34
35 int nilfs_dat_prepare_alloc(struct inode *, struct nilfs_palloc_req *); 35 int nilfs_dat_prepare_alloc(struct inode *, struct nilfs_palloc_req *);
36 void nilfs_dat_commit_alloc(struct inode *, struct nilfs_palloc_req *); 36 void nilfs_dat_commit_alloc(struct inode *, struct nilfs_palloc_req *);
37 void nilfs_dat_abort_alloc(struct inode *, struct nilfs_palloc_req *); 37 void nilfs_dat_abort_alloc(struct inode *, struct nilfs_palloc_req *);
38 int nilfs_dat_prepare_start(struct inode *, struct nilfs_palloc_req *); 38 int nilfs_dat_prepare_start(struct inode *, struct nilfs_palloc_req *);
39 void nilfs_dat_commit_start(struct inode *, struct nilfs_palloc_req *, 39 void nilfs_dat_commit_start(struct inode *, struct nilfs_palloc_req *,
40 sector_t); 40 sector_t);
41 int nilfs_dat_prepare_end(struct inode *, struct nilfs_palloc_req *); 41 int nilfs_dat_prepare_end(struct inode *, struct nilfs_palloc_req *);
42 void nilfs_dat_commit_end(struct inode *, struct nilfs_palloc_req *, int); 42 void nilfs_dat_commit_end(struct inode *, struct nilfs_palloc_req *, int);
43 void nilfs_dat_abort_end(struct inode *, struct nilfs_palloc_req *); 43 void nilfs_dat_abort_end(struct inode *, struct nilfs_palloc_req *);
44 int nilfs_dat_prepare_update(struct inode *, struct nilfs_palloc_req *, 44 int nilfs_dat_prepare_update(struct inode *, struct nilfs_palloc_req *,
45 struct nilfs_palloc_req *); 45 struct nilfs_palloc_req *);
46 void nilfs_dat_commit_update(struct inode *, struct nilfs_palloc_req *, 46 void nilfs_dat_commit_update(struct inode *, struct nilfs_palloc_req *,
47 struct nilfs_palloc_req *, int); 47 struct nilfs_palloc_req *, int);
48 void nilfs_dat_abort_update(struct inode *, struct nilfs_palloc_req *, 48 void nilfs_dat_abort_update(struct inode *, struct nilfs_palloc_req *,
49 struct nilfs_palloc_req *); 49 struct nilfs_palloc_req *);
50 50
51 int nilfs_dat_mark_dirty(struct inode *, __u64); 51 int nilfs_dat_mark_dirty(struct inode *, __u64);
52 int nilfs_dat_freev(struct inode *, __u64 *, size_t); 52 int nilfs_dat_freev(struct inode *, __u64 *, size_t);
53 int nilfs_dat_move(struct inode *, __u64, sector_t); 53 int nilfs_dat_move(struct inode *, __u64, sector_t);
54 ssize_t nilfs_dat_get_vinfo(struct inode *, void *, unsigned, size_t); 54 ssize_t nilfs_dat_get_vinfo(struct inode *, void *, unsigned, size_t);
55 55
56 int nilfs_dat_read(struct inode *dat, struct nilfs_inode *raw_inode);
56 struct inode *nilfs_dat_new(struct the_nilfs *nilfs, size_t entry_size); 57 struct inode *nilfs_dat_new(struct the_nilfs *nilfs, size_t entry_size);
57 58
58 #endif /* _NILFS_DAT_H */ 59 #endif /* _NILFS_DAT_H */
59 60
1 /* 1 /*
2 * sufile.c - NILFS segment usage file. 2 * sufile.c - NILFS segment usage file.
3 * 3 *
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or 8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version. 9 * (at your option) any later version.
10 * 10 *
11 * This program is distributed in the hope that it will be useful, 11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 * 15 *
16 * You should have received a copy of the GNU General Public License 16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software 17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 * 19 *
20 * Written by Koji Sato <koji@osrg.net>. 20 * Written by Koji Sato <koji@osrg.net>.
21 * Rivised by Ryusuke Konishi <ryusuke@osrg.net>. 21 * Rivised by Ryusuke Konishi <ryusuke@osrg.net>.
22 */ 22 */
23 23
24 #include <linux/kernel.h> 24 #include <linux/kernel.h>
25 #include <linux/fs.h> 25 #include <linux/fs.h>
26 #include <linux/string.h> 26 #include <linux/string.h>
27 #include <linux/buffer_head.h> 27 #include <linux/buffer_head.h>
28 #include <linux/errno.h> 28 #include <linux/errno.h>
29 #include <linux/nilfs2_fs.h> 29 #include <linux/nilfs2_fs.h>
30 #include "mdt.h" 30 #include "mdt.h"
31 #include "sufile.h" 31 #include "sufile.h"
32 32
33 33
34 static inline unsigned long 34 static inline unsigned long
35 nilfs_sufile_segment_usages_per_block(const struct inode *sufile) 35 nilfs_sufile_segment_usages_per_block(const struct inode *sufile)
36 { 36 {
37 return NILFS_MDT(sufile)->mi_entries_per_block; 37 return NILFS_MDT(sufile)->mi_entries_per_block;
38 } 38 }
39 39
40 static unsigned long 40 static unsigned long
41 nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum) 41 nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum)
42 { 42 {
43 __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset; 43 __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
44 do_div(t, nilfs_sufile_segment_usages_per_block(sufile)); 44 do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
45 return (unsigned long)t; 45 return (unsigned long)t;
46 } 46 }
47 47
48 static unsigned long 48 static unsigned long
49 nilfs_sufile_get_offset(const struct inode *sufile, __u64 segnum) 49 nilfs_sufile_get_offset(const struct inode *sufile, __u64 segnum)
50 { 50 {
51 __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset; 51 __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
52 return do_div(t, nilfs_sufile_segment_usages_per_block(sufile)); 52 return do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
53 } 53 }
54 54
55 static unsigned long 55 static unsigned long
56 nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr, 56 nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr,
57 __u64 max) 57 __u64 max)
58 { 58 {
59 return min_t(unsigned long, 59 return min_t(unsigned long,
60 nilfs_sufile_segment_usages_per_block(sufile) - 60 nilfs_sufile_segment_usages_per_block(sufile) -
61 nilfs_sufile_get_offset(sufile, curr), 61 nilfs_sufile_get_offset(sufile, curr),
62 max - curr + 1); 62 max - curr + 1);
63 } 63 }
64 64
65 static inline struct nilfs_sufile_header * 65 static inline struct nilfs_sufile_header *
66 nilfs_sufile_block_get_header(const struct inode *sufile, 66 nilfs_sufile_block_get_header(const struct inode *sufile,
67 struct buffer_head *bh, 67 struct buffer_head *bh,
68 void *kaddr) 68 void *kaddr)
69 { 69 {
70 return kaddr + bh_offset(bh); 70 return kaddr + bh_offset(bh);
71 } 71 }
72 72
73 static struct nilfs_segment_usage * 73 static struct nilfs_segment_usage *
74 nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum, 74 nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum,
75 struct buffer_head *bh, void *kaddr) 75 struct buffer_head *bh, void *kaddr)
76 { 76 {
77 return kaddr + bh_offset(bh) + 77 return kaddr + bh_offset(bh) +
78 nilfs_sufile_get_offset(sufile, segnum) * 78 nilfs_sufile_get_offset(sufile, segnum) *
79 NILFS_MDT(sufile)->mi_entry_size; 79 NILFS_MDT(sufile)->mi_entry_size;
80 } 80 }
81 81
82 static inline int nilfs_sufile_get_header_block(struct inode *sufile, 82 static inline int nilfs_sufile_get_header_block(struct inode *sufile,
83 struct buffer_head **bhp) 83 struct buffer_head **bhp)
84 { 84 {
85 return nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp); 85 return nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp);
86 } 86 }
87 87
88 static inline int 88 static inline int
89 nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum, 89 nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum,
90 int create, struct buffer_head **bhp) 90 int create, struct buffer_head **bhp)
91 { 91 {
92 return nilfs_mdt_get_block(sufile, 92 return nilfs_mdt_get_block(sufile,
93 nilfs_sufile_get_blkoff(sufile, segnum), 93 nilfs_sufile_get_blkoff(sufile, segnum),
94 create, NULL, bhp); 94 create, NULL, bhp);
95 } 95 }
96 96
97 static void nilfs_sufile_mod_counter(struct buffer_head *header_bh, 97 static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
98 u64 ncleanadd, u64 ndirtyadd) 98 u64 ncleanadd, u64 ndirtyadd)
99 { 99 {
100 struct nilfs_sufile_header *header; 100 struct nilfs_sufile_header *header;
101 void *kaddr; 101 void *kaddr;
102 102
103 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 103 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
104 header = kaddr + bh_offset(header_bh); 104 header = kaddr + bh_offset(header_bh);
105 le64_add_cpu(&header->sh_ncleansegs, ncleanadd); 105 le64_add_cpu(&header->sh_ncleansegs, ncleanadd);
106 le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd); 106 le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
107 kunmap_atomic(kaddr, KM_USER0); 107 kunmap_atomic(kaddr, KM_USER0);
108 108
109 nilfs_mdt_mark_buffer_dirty(header_bh); 109 nilfs_mdt_mark_buffer_dirty(header_bh);
110 } 110 }
111 111
112 /** 112 /**
113 * nilfs_sufile_updatev - modify multiple segment usages at a time 113 * nilfs_sufile_updatev - modify multiple segment usages at a time
114 * @sufile: inode of segment usage file 114 * @sufile: inode of segment usage file
115 * @segnumv: array of segment numbers 115 * @segnumv: array of segment numbers
116 * @nsegs: size of @segnumv array 116 * @nsegs: size of @segnumv array
117 * @create: creation flag 117 * @create: creation flag
118 * @ndone: place to store number of modified segments on @segnumv 118 * @ndone: place to store number of modified segments on @segnumv
119 * @dofunc: primitive operation for the update 119 * @dofunc: primitive operation for the update
120 * 120 *
121 * Description: nilfs_sufile_updatev() repeatedly calls @dofunc 121 * Description: nilfs_sufile_updatev() repeatedly calls @dofunc
122 * against the given array of segments. The @dofunc is called with 122 * against the given array of segments. The @dofunc is called with
123 * buffers of a header block and the sufile block in which the target 123 * buffers of a header block and the sufile block in which the target
124 * segment usage entry is contained. If @ndone is given, the number 124 * segment usage entry is contained. If @ndone is given, the number
125 * of successfully modified segments from the head is stored in the 125 * of successfully modified segments from the head is stored in the
126 * place @ndone points to. 126 * place @ndone points to.
127 * 127 *
128 * Return Value: On success, zero is returned. On error, one of the 128 * Return Value: On success, zero is returned. On error, one of the
129 * following negative error codes is returned. 129 * following negative error codes is returned.
130 * 130 *
131 * %-EIO - I/O error. 131 * %-EIO - I/O error.
132 * 132 *
133 * %-ENOMEM - Insufficient amount of memory available. 133 * %-ENOMEM - Insufficient amount of memory available.
134 * 134 *
135 * %-ENOENT - Given segment usage is in hole block (may be returned if 135 * %-ENOENT - Given segment usage is in hole block (may be returned if
136 * @create is zero) 136 * @create is zero)
137 * 137 *
138 * %-EINVAL - Invalid segment usage number 138 * %-EINVAL - Invalid segment usage number
139 */ 139 */
140 int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs, 140 int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs,
141 int create, size_t *ndone, 141 int create, size_t *ndone,
142 void (*dofunc)(struct inode *, __u64, 142 void (*dofunc)(struct inode *, __u64,
143 struct buffer_head *, 143 struct buffer_head *,
144 struct buffer_head *)) 144 struct buffer_head *))
145 { 145 {
146 struct buffer_head *header_bh, *bh; 146 struct buffer_head *header_bh, *bh;
147 unsigned long blkoff, prev_blkoff; 147 unsigned long blkoff, prev_blkoff;
148 __u64 *seg; 148 __u64 *seg;
149 size_t nerr = 0, n = 0; 149 size_t nerr = 0, n = 0;
150 int ret = 0; 150 int ret = 0;
151 151
152 if (unlikely(nsegs == 0)) 152 if (unlikely(nsegs == 0))
153 goto out; 153 goto out;
154 154
155 down_write(&NILFS_MDT(sufile)->mi_sem); 155 down_write(&NILFS_MDT(sufile)->mi_sem);
156 for (seg = segnumv; seg < segnumv + nsegs; seg++) { 156 for (seg = segnumv; seg < segnumv + nsegs; seg++) {
157 if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) { 157 if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) {
158 printk(KERN_WARNING 158 printk(KERN_WARNING
159 "%s: invalid segment number: %llu\n", __func__, 159 "%s: invalid segment number: %llu\n", __func__,
160 (unsigned long long)*seg); 160 (unsigned long long)*seg);
161 nerr++; 161 nerr++;
162 } 162 }
163 } 163 }
164 if (nerr > 0) { 164 if (nerr > 0) {
165 ret = -EINVAL; 165 ret = -EINVAL;
166 goto out_sem; 166 goto out_sem;
167 } 167 }
168 168
169 ret = nilfs_sufile_get_header_block(sufile, &header_bh); 169 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
170 if (ret < 0) 170 if (ret < 0)
171 goto out_sem; 171 goto out_sem;
172 172
173 seg = segnumv; 173 seg = segnumv;
174 blkoff = nilfs_sufile_get_blkoff(sufile, *seg); 174 blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
175 ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh); 175 ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
176 if (ret < 0) 176 if (ret < 0)
177 goto out_header; 177 goto out_header;
178 178
179 for (;;) { 179 for (;;) {
180 dofunc(sufile, *seg, header_bh, bh); 180 dofunc(sufile, *seg, header_bh, bh);
181 181
182 if (++seg >= segnumv + nsegs) 182 if (++seg >= segnumv + nsegs)
183 break; 183 break;
184 prev_blkoff = blkoff; 184 prev_blkoff = blkoff;
185 blkoff = nilfs_sufile_get_blkoff(sufile, *seg); 185 blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
186 if (blkoff == prev_blkoff) 186 if (blkoff == prev_blkoff)
187 continue; 187 continue;
188 188
189 /* get different block */ 189 /* get different block */
190 brelse(bh); 190 brelse(bh);
191 ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh); 191 ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
192 if (unlikely(ret < 0)) 192 if (unlikely(ret < 0))
193 goto out_header; 193 goto out_header;
194 } 194 }
195 brelse(bh); 195 brelse(bh);
196 196
197 out_header: 197 out_header:
198 n = seg - segnumv; 198 n = seg - segnumv;
199 brelse(header_bh); 199 brelse(header_bh);
200 out_sem: 200 out_sem:
201 up_write(&NILFS_MDT(sufile)->mi_sem); 201 up_write(&NILFS_MDT(sufile)->mi_sem);
202 out: 202 out:
203 if (ndone) 203 if (ndone)
204 *ndone = n; 204 *ndone = n;
205 return ret; 205 return ret;
206 } 206 }
207 207
208 int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create, 208 int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create,
209 void (*dofunc)(struct inode *, __u64, 209 void (*dofunc)(struct inode *, __u64,
210 struct buffer_head *, 210 struct buffer_head *,
211 struct buffer_head *)) 211 struct buffer_head *))
212 { 212 {
213 struct buffer_head *header_bh, *bh; 213 struct buffer_head *header_bh, *bh;
214 int ret; 214 int ret;
215 215
216 if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) { 216 if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) {
217 printk(KERN_WARNING "%s: invalid segment number: %llu\n", 217 printk(KERN_WARNING "%s: invalid segment number: %llu\n",
218 __func__, (unsigned long long)segnum); 218 __func__, (unsigned long long)segnum);
219 return -EINVAL; 219 return -EINVAL;
220 } 220 }
221 down_write(&NILFS_MDT(sufile)->mi_sem); 221 down_write(&NILFS_MDT(sufile)->mi_sem);
222 222
223 ret = nilfs_sufile_get_header_block(sufile, &header_bh); 223 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
224 if (ret < 0) 224 if (ret < 0)
225 goto out_sem; 225 goto out_sem;
226 226
227 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, create, &bh); 227 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, create, &bh);
228 if (!ret) { 228 if (!ret) {
229 dofunc(sufile, segnum, header_bh, bh); 229 dofunc(sufile, segnum, header_bh, bh);
230 brelse(bh); 230 brelse(bh);
231 } 231 }
232 brelse(header_bh); 232 brelse(header_bh);
233 233
234 out_sem: 234 out_sem:
235 up_write(&NILFS_MDT(sufile)->mi_sem); 235 up_write(&NILFS_MDT(sufile)->mi_sem);
236 return ret; 236 return ret;
237 } 237 }
238 238
239 /** 239 /**
240 * nilfs_sufile_alloc - allocate a segment 240 * nilfs_sufile_alloc - allocate a segment
241 * @sufile: inode of segment usage file 241 * @sufile: inode of segment usage file
242 * @segnump: pointer to segment number 242 * @segnump: pointer to segment number
243 * 243 *
244 * Description: nilfs_sufile_alloc() allocates a clean segment. 244 * Description: nilfs_sufile_alloc() allocates a clean segment.
245 * 245 *
246 * Return Value: On success, 0 is returned and the segment number of the 246 * Return Value: On success, 0 is returned and the segment number of the
247 * allocated segment is stored in the place pointed by @segnump. On error, one 247 * allocated segment is stored in the place pointed by @segnump. On error, one
248 * of the following negative error codes is returned. 248 * of the following negative error codes is returned.
249 * 249 *
250 * %-EIO - I/O error. 250 * %-EIO - I/O error.
251 * 251 *
252 * %-ENOMEM - Insufficient amount of memory available. 252 * %-ENOMEM - Insufficient amount of memory available.
253 * 253 *
254 * %-ENOSPC - No clean segment left. 254 * %-ENOSPC - No clean segment left.
255 */ 255 */
256 int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) 256 int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
257 { 257 {
258 struct buffer_head *header_bh, *su_bh; 258 struct buffer_head *header_bh, *su_bh;
259 struct nilfs_sufile_header *header; 259 struct nilfs_sufile_header *header;
260 struct nilfs_segment_usage *su; 260 struct nilfs_segment_usage *su;
261 size_t susz = NILFS_MDT(sufile)->mi_entry_size; 261 size_t susz = NILFS_MDT(sufile)->mi_entry_size;
262 __u64 segnum, maxsegnum, last_alloc; 262 __u64 segnum, maxsegnum, last_alloc;
263 void *kaddr; 263 void *kaddr;
264 unsigned long nsegments, ncleansegs, nsus; 264 unsigned long nsegments, ncleansegs, nsus;
265 int ret, i, j; 265 int ret, i, j;
266 266
267 down_write(&NILFS_MDT(sufile)->mi_sem); 267 down_write(&NILFS_MDT(sufile)->mi_sem);
268 268
269 ret = nilfs_sufile_get_header_block(sufile, &header_bh); 269 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
270 if (ret < 0) 270 if (ret < 0)
271 goto out_sem; 271 goto out_sem;
272 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 272 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
273 header = nilfs_sufile_block_get_header(sufile, header_bh, kaddr); 273 header = nilfs_sufile_block_get_header(sufile, header_bh, kaddr);
274 ncleansegs = le64_to_cpu(header->sh_ncleansegs); 274 ncleansegs = le64_to_cpu(header->sh_ncleansegs);
275 last_alloc = le64_to_cpu(header->sh_last_alloc); 275 last_alloc = le64_to_cpu(header->sh_last_alloc);
276 kunmap_atomic(kaddr, KM_USER0); 276 kunmap_atomic(kaddr, KM_USER0);
277 277
278 nsegments = nilfs_sufile_get_nsegments(sufile); 278 nsegments = nilfs_sufile_get_nsegments(sufile);
279 segnum = last_alloc + 1; 279 segnum = last_alloc + 1;
280 maxsegnum = nsegments - 1; 280 maxsegnum = nsegments - 1;
281 for (i = 0; i < nsegments; i += nsus) { 281 for (i = 0; i < nsegments; i += nsus) {
282 if (segnum >= nsegments) { 282 if (segnum >= nsegments) {
283 /* wrap around */ 283 /* wrap around */
284 segnum = 0; 284 segnum = 0;
285 maxsegnum = last_alloc; 285 maxsegnum = last_alloc;
286 } 286 }
287 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1, 287 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1,
288 &su_bh); 288 &su_bh);
289 if (ret < 0) 289 if (ret < 0)
290 goto out_header; 290 goto out_header;
291 kaddr = kmap_atomic(su_bh->b_page, KM_USER0); 291 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
292 su = nilfs_sufile_block_get_segment_usage( 292 su = nilfs_sufile_block_get_segment_usage(
293 sufile, segnum, su_bh, kaddr); 293 sufile, segnum, su_bh, kaddr);
294 294
295 nsus = nilfs_sufile_segment_usages_in_block( 295 nsus = nilfs_sufile_segment_usages_in_block(
296 sufile, segnum, maxsegnum); 296 sufile, segnum, maxsegnum);
297 for (j = 0; j < nsus; j++, su = (void *)su + susz, segnum++) { 297 for (j = 0; j < nsus; j++, su = (void *)su + susz, segnum++) {
298 if (!nilfs_segment_usage_clean(su)) 298 if (!nilfs_segment_usage_clean(su))
299 continue; 299 continue;
300 /* found a clean segment */ 300 /* found a clean segment */
301 nilfs_segment_usage_set_dirty(su); 301 nilfs_segment_usage_set_dirty(su);
302 kunmap_atomic(kaddr, KM_USER0); 302 kunmap_atomic(kaddr, KM_USER0);
303 303
304 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 304 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
305 header = nilfs_sufile_block_get_header( 305 header = nilfs_sufile_block_get_header(
306 sufile, header_bh, kaddr); 306 sufile, header_bh, kaddr);
307 le64_add_cpu(&header->sh_ncleansegs, -1); 307 le64_add_cpu(&header->sh_ncleansegs, -1);
308 le64_add_cpu(&header->sh_ndirtysegs, 1); 308 le64_add_cpu(&header->sh_ndirtysegs, 1);
309 header->sh_last_alloc = cpu_to_le64(segnum); 309 header->sh_last_alloc = cpu_to_le64(segnum);
310 kunmap_atomic(kaddr, KM_USER0); 310 kunmap_atomic(kaddr, KM_USER0);
311 311
312 nilfs_mdt_mark_buffer_dirty(header_bh); 312 nilfs_mdt_mark_buffer_dirty(header_bh);
313 nilfs_mdt_mark_buffer_dirty(su_bh); 313 nilfs_mdt_mark_buffer_dirty(su_bh);
314 nilfs_mdt_mark_dirty(sufile); 314 nilfs_mdt_mark_dirty(sufile);
315 brelse(su_bh); 315 brelse(su_bh);
316 *segnump = segnum; 316 *segnump = segnum;
317 goto out_header; 317 goto out_header;
318 } 318 }
319 319
320 kunmap_atomic(kaddr, KM_USER0); 320 kunmap_atomic(kaddr, KM_USER0);
321 brelse(su_bh); 321 brelse(su_bh);
322 } 322 }
323 323
324 /* no segments left */ 324 /* no segments left */
325 ret = -ENOSPC; 325 ret = -ENOSPC;
326 326
327 out_header: 327 out_header:
328 brelse(header_bh); 328 brelse(header_bh);
329 329
330 out_sem: 330 out_sem:
331 up_write(&NILFS_MDT(sufile)->mi_sem); 331 up_write(&NILFS_MDT(sufile)->mi_sem);
332 return ret; 332 return ret;
333 } 333 }
334 334
335 void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum, 335 void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
336 struct buffer_head *header_bh, 336 struct buffer_head *header_bh,
337 struct buffer_head *su_bh) 337 struct buffer_head *su_bh)
338 { 338 {
339 struct nilfs_segment_usage *su; 339 struct nilfs_segment_usage *su;
340 void *kaddr; 340 void *kaddr;
341 341
342 kaddr = kmap_atomic(su_bh->b_page, KM_USER0); 342 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
343 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); 343 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
344 if (unlikely(!nilfs_segment_usage_clean(su))) { 344 if (unlikely(!nilfs_segment_usage_clean(su))) {
345 printk(KERN_WARNING "%s: segment %llu must be clean\n", 345 printk(KERN_WARNING "%s: segment %llu must be clean\n",
346 __func__, (unsigned long long)segnum); 346 __func__, (unsigned long long)segnum);
347 kunmap_atomic(kaddr, KM_USER0); 347 kunmap_atomic(kaddr, KM_USER0);
348 return; 348 return;
349 } 349 }
350 nilfs_segment_usage_set_dirty(su); 350 nilfs_segment_usage_set_dirty(su);
351 kunmap_atomic(kaddr, KM_USER0); 351 kunmap_atomic(kaddr, KM_USER0);
352 352
353 nilfs_sufile_mod_counter(header_bh, -1, 1); 353 nilfs_sufile_mod_counter(header_bh, -1, 1);
354 nilfs_mdt_mark_buffer_dirty(su_bh); 354 nilfs_mdt_mark_buffer_dirty(su_bh);
355 nilfs_mdt_mark_dirty(sufile); 355 nilfs_mdt_mark_dirty(sufile);
356 } 356 }
357 357
358 void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum, 358 void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
359 struct buffer_head *header_bh, 359 struct buffer_head *header_bh,
360 struct buffer_head *su_bh) 360 struct buffer_head *su_bh)
361 { 361 {
362 struct nilfs_segment_usage *su; 362 struct nilfs_segment_usage *su;
363 void *kaddr; 363 void *kaddr;
364 int clean, dirty; 364 int clean, dirty;
365 365
366 kaddr = kmap_atomic(su_bh->b_page, KM_USER0); 366 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
367 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); 367 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
368 if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) && 368 if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) &&
369 su->su_nblocks == cpu_to_le32(0)) { 369 su->su_nblocks == cpu_to_le32(0)) {
370 kunmap_atomic(kaddr, KM_USER0); 370 kunmap_atomic(kaddr, KM_USER0);
371 return; 371 return;
372 } 372 }
373 clean = nilfs_segment_usage_clean(su); 373 clean = nilfs_segment_usage_clean(su);
374 dirty = nilfs_segment_usage_dirty(su); 374 dirty = nilfs_segment_usage_dirty(su);
375 375
376 /* make the segment garbage */ 376 /* make the segment garbage */
377 su->su_lastmod = cpu_to_le64(0); 377 su->su_lastmod = cpu_to_le64(0);
378 su->su_nblocks = cpu_to_le32(0); 378 su->su_nblocks = cpu_to_le32(0);
379 su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY); 379 su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY);
380 kunmap_atomic(kaddr, KM_USER0); 380 kunmap_atomic(kaddr, KM_USER0);
381 381
382 nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1); 382 nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
383 nilfs_mdt_mark_buffer_dirty(su_bh); 383 nilfs_mdt_mark_buffer_dirty(su_bh);
384 nilfs_mdt_mark_dirty(sufile); 384 nilfs_mdt_mark_dirty(sufile);
385 } 385 }
386 386
387 void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum, 387 void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
388 struct buffer_head *header_bh, 388 struct buffer_head *header_bh,
389 struct buffer_head *su_bh) 389 struct buffer_head *su_bh)
390 { 390 {
391 struct nilfs_segment_usage *su; 391 struct nilfs_segment_usage *su;
392 void *kaddr; 392 void *kaddr;
393 int sudirty; 393 int sudirty;
394 394
395 kaddr = kmap_atomic(su_bh->b_page, KM_USER0); 395 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
396 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); 396 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
397 if (nilfs_segment_usage_clean(su)) { 397 if (nilfs_segment_usage_clean(su)) {
398 printk(KERN_WARNING "%s: segment %llu is already clean\n", 398 printk(KERN_WARNING "%s: segment %llu is already clean\n",
399 __func__, (unsigned long long)segnum); 399 __func__, (unsigned long long)segnum);
400 kunmap_atomic(kaddr, KM_USER0); 400 kunmap_atomic(kaddr, KM_USER0);
401 return; 401 return;
402 } 402 }
403 WARN_ON(nilfs_segment_usage_error(su)); 403 WARN_ON(nilfs_segment_usage_error(su));
404 WARN_ON(!nilfs_segment_usage_dirty(su)); 404 WARN_ON(!nilfs_segment_usage_dirty(su));
405 405
406 sudirty = nilfs_segment_usage_dirty(su); 406 sudirty = nilfs_segment_usage_dirty(su);
407 nilfs_segment_usage_set_clean(su); 407 nilfs_segment_usage_set_clean(su);
408 kunmap_atomic(kaddr, KM_USER0); 408 kunmap_atomic(kaddr, KM_USER0);
409 nilfs_mdt_mark_buffer_dirty(su_bh); 409 nilfs_mdt_mark_buffer_dirty(su_bh);
410 410
411 nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0); 411 nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
412 nilfs_mdt_mark_dirty(sufile); 412 nilfs_mdt_mark_dirty(sufile);
413 } 413 }
414 414
415 /** 415 /**
416 * nilfs_sufile_get_segment_usage - get a segment usage 416 * nilfs_sufile_get_segment_usage - get a segment usage
417 * @sufile: inode of segment usage file 417 * @sufile: inode of segment usage file
418 * @segnum: segment number 418 * @segnum: segment number
419 * @sup: pointer to segment usage 419 * @sup: pointer to segment usage
420 * @bhp: pointer to buffer head 420 * @bhp: pointer to buffer head
421 * 421 *
422 * Description: nilfs_sufile_get_segment_usage() acquires the segment usage 422 * Description: nilfs_sufile_get_segment_usage() acquires the segment usage
423 * specified by @segnum. 423 * specified by @segnum.
424 * 424 *
425 * Return Value: On success, 0 is returned, and the segment usage and the 425 * Return Value: On success, 0 is returned, and the segment usage and the
426 * buffer head of the buffer on which the segment usage is located are stored 426 * buffer head of the buffer on which the segment usage is located are stored
427 * in the place pointed by @sup and @bhp, respectively. On error, one of the 427 * in the place pointed by @sup and @bhp, respectively. On error, one of the
428 * following negative error codes is returned. 428 * following negative error codes is returned.
429 * 429 *
430 * %-EIO - I/O error. 430 * %-EIO - I/O error.
431 * 431 *
432 * %-ENOMEM - Insufficient amount of memory available. 432 * %-ENOMEM - Insufficient amount of memory available.
433 * 433 *
434 * %-EINVAL - Invalid segment usage number. 434 * %-EINVAL - Invalid segment usage number.
435 */ 435 */
436 int nilfs_sufile_get_segment_usage(struct inode *sufile, __u64 segnum, 436 int nilfs_sufile_get_segment_usage(struct inode *sufile, __u64 segnum,
437 struct nilfs_segment_usage **sup, 437 struct nilfs_segment_usage **sup,
438 struct buffer_head **bhp) 438 struct buffer_head **bhp)
439 { 439 {
440 struct buffer_head *bh; 440 struct buffer_head *bh;
441 struct nilfs_segment_usage *su; 441 struct nilfs_segment_usage *su;
442 void *kaddr; 442 void *kaddr;
443 int ret; 443 int ret;
444 444
445 /* segnum is 0 origin */ 445 /* segnum is 0 origin */
446 if (segnum >= nilfs_sufile_get_nsegments(sufile)) 446 if (segnum >= nilfs_sufile_get_nsegments(sufile))
447 return -EINVAL; 447 return -EINVAL;
448 down_write(&NILFS_MDT(sufile)->mi_sem); 448 down_write(&NILFS_MDT(sufile)->mi_sem);
449 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1, &bh); 449 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1, &bh);
450 if (ret < 0) 450 if (ret < 0)
451 goto out_sem; 451 goto out_sem;
452 kaddr = kmap(bh->b_page); 452 kaddr = kmap(bh->b_page);
453 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr); 453 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
454 if (nilfs_segment_usage_error(su)) { 454 if (nilfs_segment_usage_error(su)) {
455 kunmap(bh->b_page); 455 kunmap(bh->b_page);
456 brelse(bh); 456 brelse(bh);
457 ret = -EINVAL; 457 ret = -EINVAL;
458 goto out_sem; 458 goto out_sem;
459 } 459 }
460 460
461 if (sup != NULL) 461 if (sup != NULL)
462 *sup = su; 462 *sup = su;
463 *bhp = bh; 463 *bhp = bh;
464 464
465 out_sem: 465 out_sem:
466 up_write(&NILFS_MDT(sufile)->mi_sem); 466 up_write(&NILFS_MDT(sufile)->mi_sem);
467 return ret; 467 return ret;
468 } 468 }
469 469
470 /** 470 /**
471 * nilfs_sufile_put_segment_usage - put a segment usage 471 * nilfs_sufile_put_segment_usage - put a segment usage
472 * @sufile: inode of segment usage file 472 * @sufile: inode of segment usage file
473 * @segnum: segment number 473 * @segnum: segment number
474 * @bh: buffer head 474 * @bh: buffer head
475 * 475 *
476 * Description: nilfs_sufile_put_segment_usage() releases the segment usage 476 * Description: nilfs_sufile_put_segment_usage() releases the segment usage
477 * specified by @segnum. @bh must be the buffer head which have been returned 477 * specified by @segnum. @bh must be the buffer head which have been returned
478 * by a previous call to nilfs_sufile_get_segment_usage() with @segnum. 478 * by a previous call to nilfs_sufile_get_segment_usage() with @segnum.
479 */ 479 */
480 void nilfs_sufile_put_segment_usage(struct inode *sufile, __u64 segnum, 480 void nilfs_sufile_put_segment_usage(struct inode *sufile, __u64 segnum,
481 struct buffer_head *bh) 481 struct buffer_head *bh)
482 { 482 {
483 kunmap(bh->b_page); 483 kunmap(bh->b_page);
484 brelse(bh); 484 brelse(bh);
485 } 485 }
486 486
487 /** 487 /**
488 * nilfs_sufile_get_stat - get segment usage statistics 488 * nilfs_sufile_get_stat - get segment usage statistics
489 * @sufile: inode of segment usage file 489 * @sufile: inode of segment usage file
490 * @stat: pointer to a structure of segment usage statistics 490 * @stat: pointer to a structure of segment usage statistics
491 * 491 *
492 * Description: nilfs_sufile_get_stat() returns information about segment 492 * Description: nilfs_sufile_get_stat() returns information about segment
493 * usage. 493 * usage.
494 * 494 *
495 * Return Value: On success, 0 is returned, and segment usage information is 495 * Return Value: On success, 0 is returned, and segment usage information is
496 * stored in the place pointed by @stat. On error, one of the following 496 * stored in the place pointed by @stat. On error, one of the following
497 * negative error codes is returned. 497 * negative error codes is returned.
498 * 498 *
499 * %-EIO - I/O error. 499 * %-EIO - I/O error.
500 * 500 *
501 * %-ENOMEM - Insufficient amount of memory available. 501 * %-ENOMEM - Insufficient amount of memory available.
502 */ 502 */
503 int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat) 503 int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
504 { 504 {
505 struct buffer_head *header_bh; 505 struct buffer_head *header_bh;
506 struct nilfs_sufile_header *header; 506 struct nilfs_sufile_header *header;
507 struct the_nilfs *nilfs = NILFS_MDT(sufile)->mi_nilfs; 507 struct the_nilfs *nilfs = NILFS_MDT(sufile)->mi_nilfs;
508 void *kaddr; 508 void *kaddr;
509 int ret; 509 int ret;
510 510
511 down_read(&NILFS_MDT(sufile)->mi_sem); 511 down_read(&NILFS_MDT(sufile)->mi_sem);
512 512
513 ret = nilfs_sufile_get_header_block(sufile, &header_bh); 513 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
514 if (ret < 0) 514 if (ret < 0)
515 goto out_sem; 515 goto out_sem;
516 516
517 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 517 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
518 header = nilfs_sufile_block_get_header(sufile, header_bh, kaddr); 518 header = nilfs_sufile_block_get_header(sufile, header_bh, kaddr);
519 sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile); 519 sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
520 sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs); 520 sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
521 sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs); 521 sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs);
522 sustat->ss_ctime = nilfs->ns_ctime; 522 sustat->ss_ctime = nilfs->ns_ctime;
523 sustat->ss_nongc_ctime = nilfs->ns_nongc_ctime; 523 sustat->ss_nongc_ctime = nilfs->ns_nongc_ctime;
524 spin_lock(&nilfs->ns_last_segment_lock); 524 spin_lock(&nilfs->ns_last_segment_lock);
525 sustat->ss_prot_seq = nilfs->ns_prot_seq; 525 sustat->ss_prot_seq = nilfs->ns_prot_seq;
526 spin_unlock(&nilfs->ns_last_segment_lock); 526 spin_unlock(&nilfs->ns_last_segment_lock);
527 kunmap_atomic(kaddr, KM_USER0); 527 kunmap_atomic(kaddr, KM_USER0);
528 brelse(header_bh); 528 brelse(header_bh);
529 529
530 out_sem: 530 out_sem:
531 up_read(&NILFS_MDT(sufile)->mi_sem); 531 up_read(&NILFS_MDT(sufile)->mi_sem);
532 return ret; 532 return ret;
533 } 533 }
534 534
535 /** 535 /**
536 * nilfs_sufile_get_ncleansegs - get the number of clean segments 536 * nilfs_sufile_get_ncleansegs - get the number of clean segments
537 * @sufile: inode of segment usage file 537 * @sufile: inode of segment usage file
538 * @nsegsp: pointer to the number of clean segments 538 * @nsegsp: pointer to the number of clean segments
539 * 539 *
540 * Description: nilfs_sufile_get_ncleansegs() acquires the number of clean 540 * Description: nilfs_sufile_get_ncleansegs() acquires the number of clean
541 * segments. 541 * segments.
542 * 542 *
543 * Return Value: On success, 0 is returned and the number of clean segments is 543 * Return Value: On success, 0 is returned and the number of clean segments is
544 * stored in the place pointed by @nsegsp. On error, one of the following 544 * stored in the place pointed by @nsegsp. On error, one of the following
545 * negative error codes is returned. 545 * negative error codes is returned.
546 * 546 *
547 * %-EIO - I/O error. 547 * %-EIO - I/O error.
548 * 548 *
549 * %-ENOMEM - Insufficient amount of memory available. 549 * %-ENOMEM - Insufficient amount of memory available.
550 */ 550 */
551 int nilfs_sufile_get_ncleansegs(struct inode *sufile, unsigned long *nsegsp) 551 int nilfs_sufile_get_ncleansegs(struct inode *sufile, unsigned long *nsegsp)
552 { 552 {
553 struct nilfs_sustat sustat; 553 struct nilfs_sustat sustat;
554 int ret; 554 int ret;
555 555
556 ret = nilfs_sufile_get_stat(sufile, &sustat); 556 ret = nilfs_sufile_get_stat(sufile, &sustat);
557 if (ret == 0) 557 if (ret == 0)
558 *nsegsp = sustat.ss_ncleansegs; 558 *nsegsp = sustat.ss_ncleansegs;
559 return ret; 559 return ret;
560 } 560 }
561 561
562 void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum, 562 void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
563 struct buffer_head *header_bh, 563 struct buffer_head *header_bh,
564 struct buffer_head *su_bh) 564 struct buffer_head *su_bh)
565 { 565 {
566 struct nilfs_segment_usage *su; 566 struct nilfs_segment_usage *su;
567 void *kaddr; 567 void *kaddr;
568 int suclean; 568 int suclean;
569 569
570 kaddr = kmap_atomic(su_bh->b_page, KM_USER0); 570 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
571 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); 571 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
572 if (nilfs_segment_usage_error(su)) { 572 if (nilfs_segment_usage_error(su)) {
573 kunmap_atomic(kaddr, KM_USER0); 573 kunmap_atomic(kaddr, KM_USER0);
574 return; 574 return;
575 } 575 }
576 suclean = nilfs_segment_usage_clean(su); 576 suclean = nilfs_segment_usage_clean(su);
577 nilfs_segment_usage_set_error(su); 577 nilfs_segment_usage_set_error(su);
578 kunmap_atomic(kaddr, KM_USER0); 578 kunmap_atomic(kaddr, KM_USER0);
579 579
580 if (suclean) 580 if (suclean)
581 nilfs_sufile_mod_counter(header_bh, -1, 0); 581 nilfs_sufile_mod_counter(header_bh, -1, 0);
582 nilfs_mdt_mark_buffer_dirty(su_bh); 582 nilfs_mdt_mark_buffer_dirty(su_bh);
583 nilfs_mdt_mark_dirty(sufile); 583 nilfs_mdt_mark_dirty(sufile);
584 } 584 }
585 585
586 /** 586 /**
587 * nilfs_sufile_get_suinfo - 587 * nilfs_sufile_get_suinfo -
588 * @sufile: inode of segment usage file 588 * @sufile: inode of segment usage file
589 * @segnum: segment number to start looking 589 * @segnum: segment number to start looking
590 * @buf: array of suinfo 590 * @buf: array of suinfo
591 * @sisz: byte size of suinfo 591 * @sisz: byte size of suinfo
592 * @nsi: size of suinfo array 592 * @nsi: size of suinfo array
593 * 593 *
594 * Description: 594 * Description:
595 * 595 *
596 * Return Value: On success, 0 is returned and .... On error, one of the 596 * Return Value: On success, 0 is returned and .... On error, one of the
597 * following negative error codes is returned. 597 * following negative error codes is returned.
598 * 598 *
599 * %-EIO - I/O error. 599 * %-EIO - I/O error.
600 * 600 *
601 * %-ENOMEM - Insufficient amount of memory available. 601 * %-ENOMEM - Insufficient amount of memory available.
602 */ 602 */
603 ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf, 603 ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
604 unsigned sisz, size_t nsi) 604 unsigned sisz, size_t nsi)
605 { 605 {
606 struct buffer_head *su_bh; 606 struct buffer_head *su_bh;
607 struct nilfs_segment_usage *su; 607 struct nilfs_segment_usage *su;
608 struct nilfs_suinfo *si = buf; 608 struct nilfs_suinfo *si = buf;
609 size_t susz = NILFS_MDT(sufile)->mi_entry_size; 609 size_t susz = NILFS_MDT(sufile)->mi_entry_size;
610 struct the_nilfs *nilfs = NILFS_MDT(sufile)->mi_nilfs; 610 struct the_nilfs *nilfs = NILFS_MDT(sufile)->mi_nilfs;
611 void *kaddr; 611 void *kaddr;
612 unsigned long nsegs, segusages_per_block; 612 unsigned long nsegs, segusages_per_block;
613 ssize_t n; 613 ssize_t n;
614 int ret, i, j; 614 int ret, i, j;
615 615
616 down_read(&NILFS_MDT(sufile)->mi_sem); 616 down_read(&NILFS_MDT(sufile)->mi_sem);
617 617
618 segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile); 618 segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
619 nsegs = min_t(unsigned long, 619 nsegs = min_t(unsigned long,
620 nilfs_sufile_get_nsegments(sufile) - segnum, 620 nilfs_sufile_get_nsegments(sufile) - segnum,
621 nsi); 621 nsi);
622 for (i = 0; i < nsegs; i += n, segnum += n) { 622 for (i = 0; i < nsegs; i += n, segnum += n) {
623 n = min_t(unsigned long, 623 n = min_t(unsigned long,
624 segusages_per_block - 624 segusages_per_block -
625 nilfs_sufile_get_offset(sufile, segnum), 625 nilfs_sufile_get_offset(sufile, segnum),
626 nsegs - i); 626 nsegs - i);
627 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, 627 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
628 &su_bh); 628 &su_bh);
629 if (ret < 0) { 629 if (ret < 0) {
630 if (ret != -ENOENT) 630 if (ret != -ENOENT)
631 goto out; 631 goto out;
632 /* hole */ 632 /* hole */
633 memset(si, 0, sisz * n); 633 memset(si, 0, sisz * n);
634 si = (void *)si + sisz * n; 634 si = (void *)si + sisz * n;
635 continue; 635 continue;
636 } 636 }
637 637
638 kaddr = kmap_atomic(su_bh->b_page, KM_USER0); 638 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
639 su = nilfs_sufile_block_get_segment_usage( 639 su = nilfs_sufile_block_get_segment_usage(
640 sufile, segnum, su_bh, kaddr); 640 sufile, segnum, su_bh, kaddr);
641 for (j = 0; j < n; 641 for (j = 0; j < n;
642 j++, su = (void *)su + susz, si = (void *)si + sisz) { 642 j++, su = (void *)su + susz, si = (void *)si + sisz) {
643 si->sui_lastmod = le64_to_cpu(su->su_lastmod); 643 si->sui_lastmod = le64_to_cpu(su->su_lastmod);
644 si->sui_nblocks = le32_to_cpu(su->su_nblocks); 644 si->sui_nblocks = le32_to_cpu(su->su_nblocks);
645 si->sui_flags = le32_to_cpu(su->su_flags) & 645 si->sui_flags = le32_to_cpu(su->su_flags) &
646 ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE); 646 ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE);
647 if (nilfs_segment_is_active(nilfs, segnum + j)) 647 if (nilfs_segment_is_active(nilfs, segnum + j))
648 si->sui_flags |= 648 si->sui_flags |=
649 (1UL << NILFS_SEGMENT_USAGE_ACTIVE); 649 (1UL << NILFS_SEGMENT_USAGE_ACTIVE);
650 } 650 }
651 kunmap_atomic(kaddr, KM_USER0); 651 kunmap_atomic(kaddr, KM_USER0);
652 brelse(su_bh); 652 brelse(su_bh);
653 } 653 }
654 ret = nsegs; 654 ret = nsegs;
655 655
656 out: 656 out:
657 up_read(&NILFS_MDT(sufile)->mi_sem); 657 up_read(&NILFS_MDT(sufile)->mi_sem);
658 return ret; 658 return ret;
659 } 659 }
660 660
661 /** 661 /**
662 * nilfs_sufile_read - read sufile inode
663 * @sufile: sufile inode
664 * @raw_inode: on-disk sufile inode
665 */
666 int nilfs_sufile_read(struct inode *sufile, struct nilfs_inode *raw_inode)
667 {
668 return nilfs_read_inode_common(sufile, raw_inode);
669 }
670
671 /**
662 * nilfs_sufile_new - create sufile 672 * nilfs_sufile_new - create sufile
663 * @nilfs: nilfs object 673 * @nilfs: nilfs object
664 * @susize: size of a segment usage entry 674 * @susize: size of a segment usage entry
665 */ 675 */
666 struct inode *nilfs_sufile_new(struct the_nilfs *nilfs, size_t susize) 676 struct inode *nilfs_sufile_new(struct the_nilfs *nilfs, size_t susize)
667 { 677 {
668 struct inode *sufile; 678 struct inode *sufile;
669 679
670 sufile = nilfs_mdt_new(nilfs, NULL, NILFS_SUFILE_INO, 0); 680 sufile = nilfs_mdt_new(nilfs, NULL, NILFS_SUFILE_INO, 0);
671 if (sufile) 681 if (sufile)
672 nilfs_mdt_set_entry_size(sufile, susize, 682 nilfs_mdt_set_entry_size(sufile, susize,
673 sizeof(struct nilfs_sufile_header)); 683 sizeof(struct nilfs_sufile_header));
674 return sufile; 684 return sufile;
675 } 685 }
676 686
1 /* 1 /*
2 * sufile.h - NILFS segment usage file. 2 * sufile.h - NILFS segment usage file.
3 * 3 *
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or 8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version. 9 * (at your option) any later version.
10 * 10 *
11 * This program is distributed in the hope that it will be useful, 11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 * 15 *
16 * You should have received a copy of the GNU General Public License 16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software 17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 * 19 *
20 * Written by Koji Sato <koji@osrg.net>. 20 * Written by Koji Sato <koji@osrg.net>.
21 */ 21 */
22 22
23 #ifndef _NILFS_SUFILE_H 23 #ifndef _NILFS_SUFILE_H
24 #define _NILFS_SUFILE_H 24 #define _NILFS_SUFILE_H
25 25
26 #include <linux/fs.h> 26 #include <linux/fs.h>
27 #include <linux/buffer_head.h> 27 #include <linux/buffer_head.h>
28 #include <linux/nilfs2_fs.h> 28 #include <linux/nilfs2_fs.h>
29 #include "mdt.h" 29 #include "mdt.h"
30 30
31 31
32 static inline unsigned long nilfs_sufile_get_nsegments(struct inode *sufile) 32 static inline unsigned long nilfs_sufile_get_nsegments(struct inode *sufile)
33 { 33 {
34 return NILFS_MDT(sufile)->mi_nilfs->ns_nsegments; 34 return NILFS_MDT(sufile)->mi_nilfs->ns_nsegments;
35 } 35 }
36 36
37 int nilfs_sufile_alloc(struct inode *, __u64 *); 37 int nilfs_sufile_alloc(struct inode *, __u64 *);
38 int nilfs_sufile_get_segment_usage(struct inode *, __u64, 38 int nilfs_sufile_get_segment_usage(struct inode *, __u64,
39 struct nilfs_segment_usage **, 39 struct nilfs_segment_usage **,
40 struct buffer_head **); 40 struct buffer_head **);
41 void nilfs_sufile_put_segment_usage(struct inode *, __u64, 41 void nilfs_sufile_put_segment_usage(struct inode *, __u64,
42 struct buffer_head *); 42 struct buffer_head *);
43 int nilfs_sufile_get_stat(struct inode *, struct nilfs_sustat *); 43 int nilfs_sufile_get_stat(struct inode *, struct nilfs_sustat *);
44 int nilfs_sufile_get_ncleansegs(struct inode *, unsigned long *); 44 int nilfs_sufile_get_ncleansegs(struct inode *, unsigned long *);
45 ssize_t nilfs_sufile_get_suinfo(struct inode *, __u64, void *, unsigned, 45 ssize_t nilfs_sufile_get_suinfo(struct inode *, __u64, void *, unsigned,
46 size_t); 46 size_t);
47 47
48 int nilfs_sufile_updatev(struct inode *, __u64 *, size_t, int, size_t *, 48 int nilfs_sufile_updatev(struct inode *, __u64 *, size_t, int, size_t *,
49 void (*dofunc)(struct inode *, __u64, 49 void (*dofunc)(struct inode *, __u64,
50 struct buffer_head *, 50 struct buffer_head *,
51 struct buffer_head *)); 51 struct buffer_head *));
52 int nilfs_sufile_update(struct inode *, __u64, int, 52 int nilfs_sufile_update(struct inode *, __u64, int,
53 void (*dofunc)(struct inode *, __u64, 53 void (*dofunc)(struct inode *, __u64,
54 struct buffer_head *, 54 struct buffer_head *,
55 struct buffer_head *)); 55 struct buffer_head *));
56 void nilfs_sufile_do_scrap(struct inode *, __u64, struct buffer_head *, 56 void nilfs_sufile_do_scrap(struct inode *, __u64, struct buffer_head *,
57 struct buffer_head *); 57 struct buffer_head *);
58 void nilfs_sufile_do_free(struct inode *, __u64, struct buffer_head *, 58 void nilfs_sufile_do_free(struct inode *, __u64, struct buffer_head *,
59 struct buffer_head *); 59 struct buffer_head *);
60 void nilfs_sufile_do_cancel_free(struct inode *, __u64, struct buffer_head *, 60 void nilfs_sufile_do_cancel_free(struct inode *, __u64, struct buffer_head *,
61 struct buffer_head *); 61 struct buffer_head *);
62 void nilfs_sufile_do_set_error(struct inode *, __u64, struct buffer_head *, 62 void nilfs_sufile_do_set_error(struct inode *, __u64, struct buffer_head *,
63 struct buffer_head *); 63 struct buffer_head *);
64 64
65 int nilfs_sufile_read(struct inode *sufile, struct nilfs_inode *raw_inode);
65 struct inode *nilfs_sufile_new(struct the_nilfs *nilfs, size_t susize); 66 struct inode *nilfs_sufile_new(struct the_nilfs *nilfs, size_t susize);
66 67
67 /** 68 /**
68 * nilfs_sufile_scrap - make a segment garbage 69 * nilfs_sufile_scrap - make a segment garbage
69 * @sufile: inode of segment usage file 70 * @sufile: inode of segment usage file
70 * @segnum: segment number to be freed 71 * @segnum: segment number to be freed
71 */ 72 */
72 static inline int nilfs_sufile_scrap(struct inode *sufile, __u64 segnum) 73 static inline int nilfs_sufile_scrap(struct inode *sufile, __u64 segnum)
73 { 74 {
74 return nilfs_sufile_update(sufile, segnum, 1, nilfs_sufile_do_scrap); 75 return nilfs_sufile_update(sufile, segnum, 1, nilfs_sufile_do_scrap);
75 } 76 }
76 77
77 /** 78 /**
78 * nilfs_sufile_free - free segment 79 * nilfs_sufile_free - free segment
79 * @sufile: inode of segment usage file 80 * @sufile: inode of segment usage file
80 * @segnum: segment number to be freed 81 * @segnum: segment number to be freed
81 */ 82 */
82 static inline int nilfs_sufile_free(struct inode *sufile, __u64 segnum) 83 static inline int nilfs_sufile_free(struct inode *sufile, __u64 segnum)
83 { 84 {
84 return nilfs_sufile_update(sufile, segnum, 0, nilfs_sufile_do_free); 85 return nilfs_sufile_update(sufile, segnum, 0, nilfs_sufile_do_free);
85 } 86 }
86 87
87 /** 88 /**
88 * nilfs_sufile_freev - free segments 89 * nilfs_sufile_freev - free segments
89 * @sufile: inode of segment usage file 90 * @sufile: inode of segment usage file
90 * @segnumv: array of segment numbers 91 * @segnumv: array of segment numbers
91 * @nsegs: size of @segnumv array 92 * @nsegs: size of @segnumv array
92 * @ndone: place to store the number of freed segments 93 * @ndone: place to store the number of freed segments
93 */ 94 */
94 static inline int nilfs_sufile_freev(struct inode *sufile, __u64 *segnumv, 95 static inline int nilfs_sufile_freev(struct inode *sufile, __u64 *segnumv,
95 size_t nsegs, size_t *ndone) 96 size_t nsegs, size_t *ndone)
96 { 97 {
97 return nilfs_sufile_updatev(sufile, segnumv, nsegs, 0, ndone, 98 return nilfs_sufile_updatev(sufile, segnumv, nsegs, 0, ndone,
98 nilfs_sufile_do_free); 99 nilfs_sufile_do_free);
99 } 100 }
100 101
101 /** 102 /**
102 * nilfs_sufile_cancel_freev - reallocate freeing segments 103 * nilfs_sufile_cancel_freev - reallocate freeing segments
103 * @sufile: inode of segment usage file 104 * @sufile: inode of segment usage file
104 * @segnumv: array of segment numbers 105 * @segnumv: array of segment numbers
105 * @nsegs: size of @segnumv array 106 * @nsegs: size of @segnumv array
106 * @ndone: place to store the number of cancelled segments 107 * @ndone: place to store the number of cancelled segments
107 * 108 *
108 * Return Value: On success, 0 is returned. On error, a negative error codes 109 * Return Value: On success, 0 is returned. On error, a negative error codes
109 * is returned. 110 * is returned.
110 */ 111 */
111 static inline int nilfs_sufile_cancel_freev(struct inode *sufile, 112 static inline int nilfs_sufile_cancel_freev(struct inode *sufile,
112 __u64 *segnumv, size_t nsegs, 113 __u64 *segnumv, size_t nsegs,
113 size_t *ndone) 114 size_t *ndone)
114 { 115 {
115 return nilfs_sufile_updatev(sufile, segnumv, nsegs, 0, ndone, 116 return nilfs_sufile_updatev(sufile, segnumv, nsegs, 0, ndone,
116 nilfs_sufile_do_cancel_free); 117 nilfs_sufile_do_cancel_free);
117 } 118 }
118 119
119 /** 120 /**
120 * nilfs_sufile_set_error - mark a segment as erroneous 121 * nilfs_sufile_set_error - mark a segment as erroneous
121 * @sufile: inode of segment usage file 122 * @sufile: inode of segment usage file
122 * @segnum: segment number 123 * @segnum: segment number
123 * 124 *
124 * Description: nilfs_sufile_set_error() marks the segment specified by 125 * Description: nilfs_sufile_set_error() marks the segment specified by
125 * @segnum as erroneous. The error segment will never be used again. 126 * @segnum as erroneous. The error segment will never be used again.
126 * 127 *
127 * Return Value: On success, 0 is returned. On error, one of the following 128 * Return Value: On success, 0 is returned. On error, one of the following
128 * negative error codes is returned. 129 * negative error codes is returned.
129 * 130 *
130 * %-EIO - I/O error. 131 * %-EIO - I/O error.
131 * 132 *
132 * %-ENOMEM - Insufficient amount of memory available. 133 * %-ENOMEM - Insufficient amount of memory available.
133 * 134 *
134 * %-EINVAL - Invalid segment usage number. 135 * %-EINVAL - Invalid segment usage number.
135 */ 136 */
136 static inline int nilfs_sufile_set_error(struct inode *sufile, __u64 segnum) 137 static inline int nilfs_sufile_set_error(struct inode *sufile, __u64 segnum)
137 { 138 {
138 return nilfs_sufile_update(sufile, segnum, 0, 139 return nilfs_sufile_update(sufile, segnum, 0,
139 nilfs_sufile_do_set_error); 140 nilfs_sufile_do_set_error);
140 } 141 }
141 142
142 #endif /* _NILFS_SUFILE_H */ 143 #endif /* _NILFS_SUFILE_H */
143 144
fs/nilfs2/the_nilfs.c
1 /* 1 /*
2 * the_nilfs.c - the_nilfs shared structure. 2 * the_nilfs.c - the_nilfs shared structure.
3 * 3 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or 8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version. 9 * (at your option) any later version.
10 * 10 *
11 * This program is distributed in the hope that it will be useful, 11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 * 15 *
16 * You should have received a copy of the GNU General Public License 16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software 17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 * 19 *
20 * Written by Ryusuke Konishi <ryusuke@osrg.net> 20 * Written by Ryusuke Konishi <ryusuke@osrg.net>
21 * 21 *
22 */ 22 */
23 23
24 #include <linux/buffer_head.h> 24 #include <linux/buffer_head.h>
25 #include <linux/slab.h> 25 #include <linux/slab.h>
26 #include <linux/blkdev.h> 26 #include <linux/blkdev.h>
27 #include <linux/backing-dev.h> 27 #include <linux/backing-dev.h>
28 #include <linux/crc32.h> 28 #include <linux/crc32.h>
29 #include "nilfs.h" 29 #include "nilfs.h"
30 #include "segment.h" 30 #include "segment.h"
31 #include "alloc.h" 31 #include "alloc.h"
32 #include "cpfile.h" 32 #include "cpfile.h"
33 #include "sufile.h" 33 #include "sufile.h"
34 #include "dat.h" 34 #include "dat.h"
35 #include "segbuf.h" 35 #include "segbuf.h"
36 36
37 37
38 static LIST_HEAD(nilfs_objects); 38 static LIST_HEAD(nilfs_objects);
39 static DEFINE_SPINLOCK(nilfs_lock); 39 static DEFINE_SPINLOCK(nilfs_lock);
40 40
41 void nilfs_set_last_segment(struct the_nilfs *nilfs, 41 void nilfs_set_last_segment(struct the_nilfs *nilfs,
42 sector_t start_blocknr, u64 seq, __u64 cno) 42 sector_t start_blocknr, u64 seq, __u64 cno)
43 { 43 {
44 spin_lock(&nilfs->ns_last_segment_lock); 44 spin_lock(&nilfs->ns_last_segment_lock);
45 nilfs->ns_last_pseg = start_blocknr; 45 nilfs->ns_last_pseg = start_blocknr;
46 nilfs->ns_last_seq = seq; 46 nilfs->ns_last_seq = seq;
47 nilfs->ns_last_cno = cno; 47 nilfs->ns_last_cno = cno;
48 spin_unlock(&nilfs->ns_last_segment_lock); 48 spin_unlock(&nilfs->ns_last_segment_lock);
49 } 49 }
50 50
51 /** 51 /**
52 * alloc_nilfs - allocate the_nilfs structure 52 * alloc_nilfs - allocate the_nilfs structure
53 * @bdev: block device to which the_nilfs is related 53 * @bdev: block device to which the_nilfs is related
54 * 54 *
55 * alloc_nilfs() allocates memory for the_nilfs and 55 * alloc_nilfs() allocates memory for the_nilfs and
56 * initializes its reference count and locks. 56 * initializes its reference count and locks.
57 * 57 *
58 * Return Value: On success, pointer to the_nilfs is returned. 58 * Return Value: On success, pointer to the_nilfs is returned.
59 * On error, NULL is returned. 59 * On error, NULL is returned.
60 */ 60 */
61 static struct the_nilfs *alloc_nilfs(struct block_device *bdev) 61 static struct the_nilfs *alloc_nilfs(struct block_device *bdev)
62 { 62 {
63 struct the_nilfs *nilfs; 63 struct the_nilfs *nilfs;
64 64
65 nilfs = kzalloc(sizeof(*nilfs), GFP_KERNEL); 65 nilfs = kzalloc(sizeof(*nilfs), GFP_KERNEL);
66 if (!nilfs) 66 if (!nilfs)
67 return NULL; 67 return NULL;
68 68
69 nilfs->ns_bdev = bdev; 69 nilfs->ns_bdev = bdev;
70 atomic_set(&nilfs->ns_count, 1); 70 atomic_set(&nilfs->ns_count, 1);
71 atomic_set(&nilfs->ns_ndirtyblks, 0); 71 atomic_set(&nilfs->ns_ndirtyblks, 0);
72 init_rwsem(&nilfs->ns_sem); 72 init_rwsem(&nilfs->ns_sem);
73 init_rwsem(&nilfs->ns_super_sem); 73 init_rwsem(&nilfs->ns_super_sem);
74 mutex_init(&nilfs->ns_mount_mutex); 74 mutex_init(&nilfs->ns_mount_mutex);
75 init_rwsem(&nilfs->ns_writer_sem); 75 init_rwsem(&nilfs->ns_writer_sem);
76 INIT_LIST_HEAD(&nilfs->ns_list); 76 INIT_LIST_HEAD(&nilfs->ns_list);
77 INIT_LIST_HEAD(&nilfs->ns_supers); 77 INIT_LIST_HEAD(&nilfs->ns_supers);
78 spin_lock_init(&nilfs->ns_last_segment_lock); 78 spin_lock_init(&nilfs->ns_last_segment_lock);
79 nilfs->ns_gc_inodes_h = NULL; 79 nilfs->ns_gc_inodes_h = NULL;
80 init_rwsem(&nilfs->ns_segctor_sem); 80 init_rwsem(&nilfs->ns_segctor_sem);
81 81
82 return nilfs; 82 return nilfs;
83 } 83 }
84 84
85 /** 85 /**
86 * find_or_create_nilfs - find or create nilfs object 86 * find_or_create_nilfs - find or create nilfs object
87 * @bdev: block device to which the_nilfs is related 87 * @bdev: block device to which the_nilfs is related
88 * 88 *
89 * find_nilfs() looks up an existent nilfs object created on the 89 * find_nilfs() looks up an existent nilfs object created on the
90 * device and gets the reference count of the object. If no nilfs object 90 * device and gets the reference count of the object. If no nilfs object
91 * is found on the device, a new nilfs object is allocated. 91 * is found on the device, a new nilfs object is allocated.
92 * 92 *
93 * Return Value: On success, pointer to the nilfs object is returned. 93 * Return Value: On success, pointer to the nilfs object is returned.
94 * On error, NULL is returned. 94 * On error, NULL is returned.
95 */ 95 */
96 struct the_nilfs *find_or_create_nilfs(struct block_device *bdev) 96 struct the_nilfs *find_or_create_nilfs(struct block_device *bdev)
97 { 97 {
98 struct the_nilfs *nilfs, *new = NULL; 98 struct the_nilfs *nilfs, *new = NULL;
99 99
100 retry: 100 retry:
101 spin_lock(&nilfs_lock); 101 spin_lock(&nilfs_lock);
102 list_for_each_entry(nilfs, &nilfs_objects, ns_list) { 102 list_for_each_entry(nilfs, &nilfs_objects, ns_list) {
103 if (nilfs->ns_bdev == bdev) { 103 if (nilfs->ns_bdev == bdev) {
104 get_nilfs(nilfs); 104 get_nilfs(nilfs);
105 spin_unlock(&nilfs_lock); 105 spin_unlock(&nilfs_lock);
106 if (new) 106 if (new)
107 put_nilfs(new); 107 put_nilfs(new);
108 return nilfs; /* existing object */ 108 return nilfs; /* existing object */
109 } 109 }
110 } 110 }
111 if (new) { 111 if (new) {
112 list_add_tail(&new->ns_list, &nilfs_objects); 112 list_add_tail(&new->ns_list, &nilfs_objects);
113 spin_unlock(&nilfs_lock); 113 spin_unlock(&nilfs_lock);
114 return new; /* new object */ 114 return new; /* new object */
115 } 115 }
116 spin_unlock(&nilfs_lock); 116 spin_unlock(&nilfs_lock);
117 117
118 new = alloc_nilfs(bdev); 118 new = alloc_nilfs(bdev);
119 if (new) 119 if (new)
120 goto retry; 120 goto retry;
121 return NULL; /* insufficient memory */ 121 return NULL; /* insufficient memory */
122 } 122 }
123 123
124 /** 124 /**
125 * put_nilfs - release a reference to the_nilfs 125 * put_nilfs - release a reference to the_nilfs
126 * @nilfs: the_nilfs structure to be released 126 * @nilfs: the_nilfs structure to be released
127 * 127 *
128 * put_nilfs() decrements a reference counter of the_nilfs. 128 * put_nilfs() decrements a reference counter of the_nilfs.
129 * If the reference count reaches zero, the_nilfs is freed. 129 * If the reference count reaches zero, the_nilfs is freed.
130 */ 130 */
131 void put_nilfs(struct the_nilfs *nilfs) 131 void put_nilfs(struct the_nilfs *nilfs)
132 { 132 {
133 spin_lock(&nilfs_lock); 133 spin_lock(&nilfs_lock);
134 if (!atomic_dec_and_test(&nilfs->ns_count)) { 134 if (!atomic_dec_and_test(&nilfs->ns_count)) {
135 spin_unlock(&nilfs_lock); 135 spin_unlock(&nilfs_lock);
136 return; 136 return;
137 } 137 }
138 list_del_init(&nilfs->ns_list); 138 list_del_init(&nilfs->ns_list);
139 spin_unlock(&nilfs_lock); 139 spin_unlock(&nilfs_lock);
140 140
141 /* 141 /*
142 * Increment of ns_count never occurs below because the caller 142 * Increment of ns_count never occurs below because the caller
143 * of get_nilfs() holds at least one reference to the_nilfs. 143 * of get_nilfs() holds at least one reference to the_nilfs.
144 * Thus its exclusion control is not required here. 144 * Thus its exclusion control is not required here.
145 */ 145 */
146 146
147 might_sleep(); 147 might_sleep();
148 if (nilfs_loaded(nilfs)) { 148 if (nilfs_loaded(nilfs)) {
149 nilfs_mdt_clear(nilfs->ns_sufile); 149 nilfs_mdt_clear(nilfs->ns_sufile);
150 nilfs_mdt_destroy(nilfs->ns_sufile); 150 nilfs_mdt_destroy(nilfs->ns_sufile);
151 nilfs_mdt_clear(nilfs->ns_cpfile); 151 nilfs_mdt_clear(nilfs->ns_cpfile);
152 nilfs_mdt_destroy(nilfs->ns_cpfile); 152 nilfs_mdt_destroy(nilfs->ns_cpfile);
153 nilfs_mdt_clear(nilfs->ns_dat); 153 nilfs_mdt_clear(nilfs->ns_dat);
154 nilfs_mdt_destroy(nilfs->ns_dat); 154 nilfs_mdt_destroy(nilfs->ns_dat);
155 /* XXX: how and when to clear nilfs->ns_gc_dat? */ 155 /* XXX: how and when to clear nilfs->ns_gc_dat? */
156 nilfs_mdt_destroy(nilfs->ns_gc_dat); 156 nilfs_mdt_destroy(nilfs->ns_gc_dat);
157 } 157 }
158 if (nilfs_init(nilfs)) { 158 if (nilfs_init(nilfs)) {
159 nilfs_destroy_gccache(nilfs); 159 nilfs_destroy_gccache(nilfs);
160 brelse(nilfs->ns_sbh[0]); 160 brelse(nilfs->ns_sbh[0]);
161 brelse(nilfs->ns_sbh[1]); 161 brelse(nilfs->ns_sbh[1]);
162 } 162 }
163 kfree(nilfs); 163 kfree(nilfs);
164 } 164 }
165 165
166 static int nilfs_load_super_root(struct the_nilfs *nilfs, 166 static int nilfs_load_super_root(struct the_nilfs *nilfs,
167 struct nilfs_sb_info *sbi, sector_t sr_block) 167 struct nilfs_sb_info *sbi, sector_t sr_block)
168 { 168 {
169 struct buffer_head *bh_sr; 169 struct buffer_head *bh_sr;
170 struct nilfs_super_root *raw_sr; 170 struct nilfs_super_root *raw_sr;
171 struct nilfs_super_block **sbp = nilfs->ns_sbp; 171 struct nilfs_super_block **sbp = nilfs->ns_sbp;
172 unsigned dat_entry_size, segment_usage_size, checkpoint_size; 172 unsigned dat_entry_size, segment_usage_size, checkpoint_size;
173 unsigned inode_size; 173 unsigned inode_size;
174 int err; 174 int err;
175 175
176 err = nilfs_read_super_root_block(sbi->s_super, sr_block, &bh_sr, 1); 176 err = nilfs_read_super_root_block(sbi->s_super, sr_block, &bh_sr, 1);
177 if (unlikely(err)) 177 if (unlikely(err))
178 return err; 178 return err;
179 179
180 down_read(&nilfs->ns_sem); 180 down_read(&nilfs->ns_sem);
181 dat_entry_size = le16_to_cpu(sbp[0]->s_dat_entry_size); 181 dat_entry_size = le16_to_cpu(sbp[0]->s_dat_entry_size);
182 checkpoint_size = le16_to_cpu(sbp[0]->s_checkpoint_size); 182 checkpoint_size = le16_to_cpu(sbp[0]->s_checkpoint_size);
183 segment_usage_size = le16_to_cpu(sbp[0]->s_segment_usage_size); 183 segment_usage_size = le16_to_cpu(sbp[0]->s_segment_usage_size);
184 up_read(&nilfs->ns_sem); 184 up_read(&nilfs->ns_sem);
185 185
186 inode_size = nilfs->ns_inode_size; 186 inode_size = nilfs->ns_inode_size;
187 187
188 err = -ENOMEM; 188 err = -ENOMEM;
189 nilfs->ns_dat = nilfs_dat_new(nilfs, dat_entry_size); 189 nilfs->ns_dat = nilfs_dat_new(nilfs, dat_entry_size);
190 if (unlikely(!nilfs->ns_dat)) 190 if (unlikely(!nilfs->ns_dat))
191 goto failed; 191 goto failed;
192 192
193 nilfs->ns_gc_dat = nilfs_dat_new(nilfs, dat_entry_size); 193 nilfs->ns_gc_dat = nilfs_dat_new(nilfs, dat_entry_size);
194 if (unlikely(!nilfs->ns_gc_dat)) 194 if (unlikely(!nilfs->ns_gc_dat))
195 goto failed_dat; 195 goto failed_dat;
196 196
197 nilfs->ns_cpfile = nilfs_cpfile_new(nilfs, checkpoint_size); 197 nilfs->ns_cpfile = nilfs_cpfile_new(nilfs, checkpoint_size);
198 if (unlikely(!nilfs->ns_cpfile)) 198 if (unlikely(!nilfs->ns_cpfile))
199 goto failed_gc_dat; 199 goto failed_gc_dat;
200 200
201 nilfs->ns_sufile = nilfs_sufile_new(nilfs, segment_usage_size); 201 nilfs->ns_sufile = nilfs_sufile_new(nilfs, segment_usage_size);
202 if (unlikely(!nilfs->ns_sufile)) 202 if (unlikely(!nilfs->ns_sufile))
203 goto failed_cpfile; 203 goto failed_cpfile;
204 204
205 nilfs_mdt_set_shadow(nilfs->ns_dat, nilfs->ns_gc_dat); 205 nilfs_mdt_set_shadow(nilfs->ns_dat, nilfs->ns_gc_dat);
206 206
207 err = nilfs_mdt_read_inode_direct( 207 err = nilfs_dat_read(nilfs->ns_dat, (void *)bh_sr->b_data +
208 nilfs->ns_dat, bh_sr, NILFS_SR_DAT_OFFSET(inode_size)); 208 NILFS_SR_DAT_OFFSET(inode_size));
209 if (unlikely(err)) 209 if (unlikely(err))
210 goto failed_sufile; 210 goto failed_sufile;
211 211
212 err = nilfs_mdt_read_inode_direct( 212 err = nilfs_cpfile_read(nilfs->ns_cpfile, (void *)bh_sr->b_data +
213 nilfs->ns_cpfile, bh_sr, NILFS_SR_CPFILE_OFFSET(inode_size)); 213 NILFS_SR_CPFILE_OFFSET(inode_size));
214 if (unlikely(err)) 214 if (unlikely(err))
215 goto failed_sufile; 215 goto failed_sufile;
216 216
217 err = nilfs_mdt_read_inode_direct( 217 err = nilfs_sufile_read(nilfs->ns_sufile, (void *)bh_sr->b_data +
218 nilfs->ns_sufile, bh_sr, NILFS_SR_SUFILE_OFFSET(inode_size)); 218 NILFS_SR_SUFILE_OFFSET(inode_size));
219 if (unlikely(err)) 219 if (unlikely(err))
220 goto failed_sufile; 220 goto failed_sufile;
221 221
222 raw_sr = (struct nilfs_super_root *)bh_sr->b_data; 222 raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
223 nilfs->ns_nongc_ctime = le64_to_cpu(raw_sr->sr_nongc_ctime); 223 nilfs->ns_nongc_ctime = le64_to_cpu(raw_sr->sr_nongc_ctime);
224 224
225 failed: 225 failed:
226 brelse(bh_sr); 226 brelse(bh_sr);
227 return err; 227 return err;
228 228
229 failed_sufile: 229 failed_sufile:
230 nilfs_mdt_destroy(nilfs->ns_sufile); 230 nilfs_mdt_destroy(nilfs->ns_sufile);
231 231
232 failed_cpfile: 232 failed_cpfile:
233 nilfs_mdt_destroy(nilfs->ns_cpfile); 233 nilfs_mdt_destroy(nilfs->ns_cpfile);
234 234
235 failed_gc_dat: 235 failed_gc_dat:
236 nilfs_mdt_destroy(nilfs->ns_gc_dat); 236 nilfs_mdt_destroy(nilfs->ns_gc_dat);
237 237
238 failed_dat: 238 failed_dat:
239 nilfs_mdt_destroy(nilfs->ns_dat); 239 nilfs_mdt_destroy(nilfs->ns_dat);
240 goto failed; 240 goto failed;
241 } 241 }
242 242
243 static void nilfs_init_recovery_info(struct nilfs_recovery_info *ri) 243 static void nilfs_init_recovery_info(struct nilfs_recovery_info *ri)
244 { 244 {
245 memset(ri, 0, sizeof(*ri)); 245 memset(ri, 0, sizeof(*ri));
246 INIT_LIST_HEAD(&ri->ri_used_segments); 246 INIT_LIST_HEAD(&ri->ri_used_segments);
247 } 247 }
248 248
249 static void nilfs_clear_recovery_info(struct nilfs_recovery_info *ri) 249 static void nilfs_clear_recovery_info(struct nilfs_recovery_info *ri)
250 { 250 {
251 nilfs_dispose_segment_list(&ri->ri_used_segments); 251 nilfs_dispose_segment_list(&ri->ri_used_segments);
252 } 252 }
253 253
254 /** 254 /**
255 * load_nilfs - load and recover the nilfs 255 * load_nilfs - load and recover the nilfs
256 * @nilfs: the_nilfs structure to be released 256 * @nilfs: the_nilfs structure to be released
257 * @sbi: nilfs_sb_info used to recover past segment 257 * @sbi: nilfs_sb_info used to recover past segment
258 * 258 *
259 * load_nilfs() searches and load the latest super root, 259 * load_nilfs() searches and load the latest super root,
260 * attaches the last segment, and does recovery if needed. 260 * attaches the last segment, and does recovery if needed.
261 * The caller must call this exclusively for simultaneous mounts. 261 * The caller must call this exclusively for simultaneous mounts.
262 */ 262 */
263 int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi) 263 int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi)
264 { 264 {
265 struct nilfs_recovery_info ri; 265 struct nilfs_recovery_info ri;
266 unsigned int s_flags = sbi->s_super->s_flags; 266 unsigned int s_flags = sbi->s_super->s_flags;
267 int really_read_only = bdev_read_only(nilfs->ns_bdev); 267 int really_read_only = bdev_read_only(nilfs->ns_bdev);
268 unsigned valid_fs; 268 unsigned valid_fs;
269 int err = 0; 269 int err = 0;
270 270
271 nilfs_init_recovery_info(&ri); 271 nilfs_init_recovery_info(&ri);
272 272
273 down_write(&nilfs->ns_sem); 273 down_write(&nilfs->ns_sem);
274 valid_fs = (nilfs->ns_mount_state & NILFS_VALID_FS); 274 valid_fs = (nilfs->ns_mount_state & NILFS_VALID_FS);
275 up_write(&nilfs->ns_sem); 275 up_write(&nilfs->ns_sem);
276 276
277 if (!valid_fs && (s_flags & MS_RDONLY)) { 277 if (!valid_fs && (s_flags & MS_RDONLY)) {
278 printk(KERN_INFO "NILFS: INFO: recovery " 278 printk(KERN_INFO "NILFS: INFO: recovery "
279 "required for readonly filesystem.\n"); 279 "required for readonly filesystem.\n");
280 if (really_read_only) { 280 if (really_read_only) {
281 printk(KERN_ERR "NILFS: write access " 281 printk(KERN_ERR "NILFS: write access "
282 "unavailable, cannot proceed.\n"); 282 "unavailable, cannot proceed.\n");
283 err = -EROFS; 283 err = -EROFS;
284 goto failed; 284 goto failed;
285 } 285 }
286 printk(KERN_INFO "NILFS: write access will " 286 printk(KERN_INFO "NILFS: write access will "
287 "be enabled during recovery.\n"); 287 "be enabled during recovery.\n");
288 sbi->s_super->s_flags &= ~MS_RDONLY; 288 sbi->s_super->s_flags &= ~MS_RDONLY;
289 } 289 }
290 290
291 err = nilfs_search_super_root(nilfs, sbi, &ri); 291 err = nilfs_search_super_root(nilfs, sbi, &ri);
292 if (unlikely(err)) { 292 if (unlikely(err)) {
293 printk(KERN_ERR "NILFS: error searching super root.\n"); 293 printk(KERN_ERR "NILFS: error searching super root.\n");
294 goto failed; 294 goto failed;
295 } 295 }
296 296
297 err = nilfs_load_super_root(nilfs, sbi, ri.ri_super_root); 297 err = nilfs_load_super_root(nilfs, sbi, ri.ri_super_root);
298 if (unlikely(err)) { 298 if (unlikely(err)) {
299 printk(KERN_ERR "NILFS: error loading super root.\n"); 299 printk(KERN_ERR "NILFS: error loading super root.\n");
300 goto failed; 300 goto failed;
301 } 301 }
302 302
303 if (!valid_fs) { 303 if (!valid_fs) {
304 err = nilfs_recover_logical_segments(nilfs, sbi, &ri); 304 err = nilfs_recover_logical_segments(nilfs, sbi, &ri);
305 if (unlikely(err)) { 305 if (unlikely(err)) {
306 nilfs_mdt_destroy(nilfs->ns_cpfile); 306 nilfs_mdt_destroy(nilfs->ns_cpfile);
307 nilfs_mdt_destroy(nilfs->ns_sufile); 307 nilfs_mdt_destroy(nilfs->ns_sufile);
308 nilfs_mdt_destroy(nilfs->ns_dat); 308 nilfs_mdt_destroy(nilfs->ns_dat);
309 goto failed; 309 goto failed;
310 } 310 }
311 if (ri.ri_need_recovery == NILFS_RECOVERY_SR_UPDATED) 311 if (ri.ri_need_recovery == NILFS_RECOVERY_SR_UPDATED)
312 sbi->s_super->s_dirt = 1; 312 sbi->s_super->s_dirt = 1;
313 } 313 }
314 314
315 set_nilfs_loaded(nilfs); 315 set_nilfs_loaded(nilfs);
316 316
317 failed: 317 failed:
318 nilfs_clear_recovery_info(&ri); 318 nilfs_clear_recovery_info(&ri);
319 sbi->s_super->s_flags = s_flags; 319 sbi->s_super->s_flags = s_flags;
320 return err; 320 return err;
321 } 321 }
322 322
323 static unsigned long long nilfs_max_size(unsigned int blkbits) 323 static unsigned long long nilfs_max_size(unsigned int blkbits)
324 { 324 {
325 unsigned int max_bits; 325 unsigned int max_bits;
326 unsigned long long res = MAX_LFS_FILESIZE; /* page cache limit */ 326 unsigned long long res = MAX_LFS_FILESIZE; /* page cache limit */
327 327
328 max_bits = blkbits + NILFS_BMAP_KEY_BIT; /* bmap size limit */ 328 max_bits = blkbits + NILFS_BMAP_KEY_BIT; /* bmap size limit */
329 if (max_bits < 64) 329 if (max_bits < 64)
330 res = min_t(unsigned long long, res, (1ULL << max_bits) - 1); 330 res = min_t(unsigned long long, res, (1ULL << max_bits) - 1);
331 return res; 331 return res;
332 } 332 }
333 333
334 static int nilfs_store_disk_layout(struct the_nilfs *nilfs, 334 static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
335 struct nilfs_super_block *sbp) 335 struct nilfs_super_block *sbp)
336 { 336 {
337 if (le32_to_cpu(sbp->s_rev_level) != NILFS_CURRENT_REV) { 337 if (le32_to_cpu(sbp->s_rev_level) != NILFS_CURRENT_REV) {
338 printk(KERN_ERR "NILFS: revision mismatch " 338 printk(KERN_ERR "NILFS: revision mismatch "
339 "(superblock rev.=%d.%d, current rev.=%d.%d). " 339 "(superblock rev.=%d.%d, current rev.=%d.%d). "
340 "Please check the version of mkfs.nilfs.\n", 340 "Please check the version of mkfs.nilfs.\n",
341 le32_to_cpu(sbp->s_rev_level), 341 le32_to_cpu(sbp->s_rev_level),
342 le16_to_cpu(sbp->s_minor_rev_level), 342 le16_to_cpu(sbp->s_minor_rev_level),
343 NILFS_CURRENT_REV, NILFS_MINOR_REV); 343 NILFS_CURRENT_REV, NILFS_MINOR_REV);
344 return -EINVAL; 344 return -EINVAL;
345 } 345 }
346 nilfs->ns_sbsize = le16_to_cpu(sbp->s_bytes); 346 nilfs->ns_sbsize = le16_to_cpu(sbp->s_bytes);
347 if (nilfs->ns_sbsize > BLOCK_SIZE) 347 if (nilfs->ns_sbsize > BLOCK_SIZE)
348 return -EINVAL; 348 return -EINVAL;
349 349
350 nilfs->ns_inode_size = le16_to_cpu(sbp->s_inode_size); 350 nilfs->ns_inode_size = le16_to_cpu(sbp->s_inode_size);
351 nilfs->ns_first_ino = le32_to_cpu(sbp->s_first_ino); 351 nilfs->ns_first_ino = le32_to_cpu(sbp->s_first_ino);
352 352
353 nilfs->ns_blocks_per_segment = le32_to_cpu(sbp->s_blocks_per_segment); 353 nilfs->ns_blocks_per_segment = le32_to_cpu(sbp->s_blocks_per_segment);
354 if (nilfs->ns_blocks_per_segment < NILFS_SEG_MIN_BLOCKS) { 354 if (nilfs->ns_blocks_per_segment < NILFS_SEG_MIN_BLOCKS) {
355 printk(KERN_ERR "NILFS: too short segment. \n"); 355 printk(KERN_ERR "NILFS: too short segment. \n");
356 return -EINVAL; 356 return -EINVAL;
357 } 357 }
358 358
359 nilfs->ns_first_data_block = le64_to_cpu(sbp->s_first_data_block); 359 nilfs->ns_first_data_block = le64_to_cpu(sbp->s_first_data_block);
360 nilfs->ns_nsegments = le64_to_cpu(sbp->s_nsegments); 360 nilfs->ns_nsegments = le64_to_cpu(sbp->s_nsegments);
361 nilfs->ns_r_segments_percentage = 361 nilfs->ns_r_segments_percentage =
362 le32_to_cpu(sbp->s_r_segments_percentage); 362 le32_to_cpu(sbp->s_r_segments_percentage);
363 nilfs->ns_nrsvsegs = 363 nilfs->ns_nrsvsegs =
364 max_t(unsigned long, NILFS_MIN_NRSVSEGS, 364 max_t(unsigned long, NILFS_MIN_NRSVSEGS,
365 DIV_ROUND_UP(nilfs->ns_nsegments * 365 DIV_ROUND_UP(nilfs->ns_nsegments *
366 nilfs->ns_r_segments_percentage, 100)); 366 nilfs->ns_r_segments_percentage, 100));
367 nilfs->ns_crc_seed = le32_to_cpu(sbp->s_crc_seed); 367 nilfs->ns_crc_seed = le32_to_cpu(sbp->s_crc_seed);
368 return 0; 368 return 0;
369 } 369 }
370 370
371 static int nilfs_valid_sb(struct nilfs_super_block *sbp) 371 static int nilfs_valid_sb(struct nilfs_super_block *sbp)
372 { 372 {
373 static unsigned char sum[4]; 373 static unsigned char sum[4];
374 const int sumoff = offsetof(struct nilfs_super_block, s_sum); 374 const int sumoff = offsetof(struct nilfs_super_block, s_sum);
375 size_t bytes; 375 size_t bytes;
376 u32 crc; 376 u32 crc;
377 377
378 if (!sbp || le16_to_cpu(sbp->s_magic) != NILFS_SUPER_MAGIC) 378 if (!sbp || le16_to_cpu(sbp->s_magic) != NILFS_SUPER_MAGIC)
379 return 0; 379 return 0;
380 bytes = le16_to_cpu(sbp->s_bytes); 380 bytes = le16_to_cpu(sbp->s_bytes);
381 if (bytes > BLOCK_SIZE) 381 if (bytes > BLOCK_SIZE)
382 return 0; 382 return 0;
383 crc = crc32_le(le32_to_cpu(sbp->s_crc_seed), (unsigned char *)sbp, 383 crc = crc32_le(le32_to_cpu(sbp->s_crc_seed), (unsigned char *)sbp,
384 sumoff); 384 sumoff);
385 crc = crc32_le(crc, sum, 4); 385 crc = crc32_le(crc, sum, 4);
386 crc = crc32_le(crc, (unsigned char *)sbp + sumoff + 4, 386 crc = crc32_le(crc, (unsigned char *)sbp + sumoff + 4,
387 bytes - sumoff - 4); 387 bytes - sumoff - 4);
388 return crc == le32_to_cpu(sbp->s_sum); 388 return crc == le32_to_cpu(sbp->s_sum);
389 } 389 }
390 390
391 static int nilfs_sb2_bad_offset(struct nilfs_super_block *sbp, u64 offset) 391 static int nilfs_sb2_bad_offset(struct nilfs_super_block *sbp, u64 offset)
392 { 392 {
393 return offset < ((le64_to_cpu(sbp->s_nsegments) * 393 return offset < ((le64_to_cpu(sbp->s_nsegments) *
394 le32_to_cpu(sbp->s_blocks_per_segment)) << 394 le32_to_cpu(sbp->s_blocks_per_segment)) <<
395 (le32_to_cpu(sbp->s_log_block_size) + 10)); 395 (le32_to_cpu(sbp->s_log_block_size) + 10));
396 } 396 }
397 397
398 static void nilfs_release_super_block(struct the_nilfs *nilfs) 398 static void nilfs_release_super_block(struct the_nilfs *nilfs)
399 { 399 {
400 int i; 400 int i;
401 401
402 for (i = 0; i < 2; i++) { 402 for (i = 0; i < 2; i++) {
403 if (nilfs->ns_sbp[i]) { 403 if (nilfs->ns_sbp[i]) {
404 brelse(nilfs->ns_sbh[i]); 404 brelse(nilfs->ns_sbh[i]);
405 nilfs->ns_sbh[i] = NULL; 405 nilfs->ns_sbh[i] = NULL;
406 nilfs->ns_sbp[i] = NULL; 406 nilfs->ns_sbp[i] = NULL;
407 } 407 }
408 } 408 }
409 } 409 }
410 410
411 void nilfs_fall_back_super_block(struct the_nilfs *nilfs) 411 void nilfs_fall_back_super_block(struct the_nilfs *nilfs)
412 { 412 {
413 brelse(nilfs->ns_sbh[0]); 413 brelse(nilfs->ns_sbh[0]);
414 nilfs->ns_sbh[0] = nilfs->ns_sbh[1]; 414 nilfs->ns_sbh[0] = nilfs->ns_sbh[1];
415 nilfs->ns_sbp[0] = nilfs->ns_sbp[1]; 415 nilfs->ns_sbp[0] = nilfs->ns_sbp[1];
416 nilfs->ns_sbh[1] = NULL; 416 nilfs->ns_sbh[1] = NULL;
417 nilfs->ns_sbp[1] = NULL; 417 nilfs->ns_sbp[1] = NULL;
418 } 418 }
419 419
420 void nilfs_swap_super_block(struct the_nilfs *nilfs) 420 void nilfs_swap_super_block(struct the_nilfs *nilfs)
421 { 421 {
422 struct buffer_head *tsbh = nilfs->ns_sbh[0]; 422 struct buffer_head *tsbh = nilfs->ns_sbh[0];
423 struct nilfs_super_block *tsbp = nilfs->ns_sbp[0]; 423 struct nilfs_super_block *tsbp = nilfs->ns_sbp[0];
424 424
425 nilfs->ns_sbh[0] = nilfs->ns_sbh[1]; 425 nilfs->ns_sbh[0] = nilfs->ns_sbh[1];
426 nilfs->ns_sbp[0] = nilfs->ns_sbp[1]; 426 nilfs->ns_sbp[0] = nilfs->ns_sbp[1];
427 nilfs->ns_sbh[1] = tsbh; 427 nilfs->ns_sbh[1] = tsbh;
428 nilfs->ns_sbp[1] = tsbp; 428 nilfs->ns_sbp[1] = tsbp;
429 } 429 }
430 430
431 static int nilfs_load_super_block(struct the_nilfs *nilfs, 431 static int nilfs_load_super_block(struct the_nilfs *nilfs,
432 struct super_block *sb, int blocksize, 432 struct super_block *sb, int blocksize,
433 struct nilfs_super_block **sbpp) 433 struct nilfs_super_block **sbpp)
434 { 434 {
435 struct nilfs_super_block **sbp = nilfs->ns_sbp; 435 struct nilfs_super_block **sbp = nilfs->ns_sbp;
436 struct buffer_head **sbh = nilfs->ns_sbh; 436 struct buffer_head **sbh = nilfs->ns_sbh;
437 u64 sb2off = NILFS_SB2_OFFSET_BYTES(nilfs->ns_bdev->bd_inode->i_size); 437 u64 sb2off = NILFS_SB2_OFFSET_BYTES(nilfs->ns_bdev->bd_inode->i_size);
438 int valid[2], swp = 0; 438 int valid[2], swp = 0;
439 439
440 sbp[0] = nilfs_read_super_block(sb, NILFS_SB_OFFSET_BYTES, blocksize, 440 sbp[0] = nilfs_read_super_block(sb, NILFS_SB_OFFSET_BYTES, blocksize,
441 &sbh[0]); 441 &sbh[0]);
442 sbp[1] = nilfs_read_super_block(sb, sb2off, blocksize, &sbh[1]); 442 sbp[1] = nilfs_read_super_block(sb, sb2off, blocksize, &sbh[1]);
443 443
444 if (!sbp[0]) { 444 if (!sbp[0]) {
445 if (!sbp[1]) { 445 if (!sbp[1]) {
446 printk(KERN_ERR "NILFS: unable to read superblock\n"); 446 printk(KERN_ERR "NILFS: unable to read superblock\n");
447 return -EIO; 447 return -EIO;
448 } 448 }
449 printk(KERN_WARNING 449 printk(KERN_WARNING
450 "NILFS warning: unable to read primary superblock\n"); 450 "NILFS warning: unable to read primary superblock\n");
451 } else if (!sbp[1]) 451 } else if (!sbp[1])
452 printk(KERN_WARNING 452 printk(KERN_WARNING
453 "NILFS warning: unable to read secondary superblock\n"); 453 "NILFS warning: unable to read secondary superblock\n");
454 454
455 valid[0] = nilfs_valid_sb(sbp[0]); 455 valid[0] = nilfs_valid_sb(sbp[0]);
456 valid[1] = nilfs_valid_sb(sbp[1]); 456 valid[1] = nilfs_valid_sb(sbp[1]);
457 swp = valid[1] && 457 swp = valid[1] &&
458 (!valid[0] || 458 (!valid[0] ||
459 le64_to_cpu(sbp[1]->s_wtime) > le64_to_cpu(sbp[0]->s_wtime)); 459 le64_to_cpu(sbp[1]->s_wtime) > le64_to_cpu(sbp[0]->s_wtime));
460 460
461 if (valid[swp] && nilfs_sb2_bad_offset(sbp[swp], sb2off)) { 461 if (valid[swp] && nilfs_sb2_bad_offset(sbp[swp], sb2off)) {
462 brelse(sbh[1]); 462 brelse(sbh[1]);
463 sbh[1] = NULL; 463 sbh[1] = NULL;
464 sbp[1] = NULL; 464 sbp[1] = NULL;
465 swp = 0; 465 swp = 0;
466 } 466 }
467 if (!valid[swp]) { 467 if (!valid[swp]) {
468 nilfs_release_super_block(nilfs); 468 nilfs_release_super_block(nilfs);
469 printk(KERN_ERR "NILFS: Can't find nilfs on dev %s.\n", 469 printk(KERN_ERR "NILFS: Can't find nilfs on dev %s.\n",
470 sb->s_id); 470 sb->s_id);
471 return -EINVAL; 471 return -EINVAL;
472 } 472 }
473 473
474 if (swp) { 474 if (swp) {
475 printk(KERN_WARNING "NILFS warning: broken superblock. " 475 printk(KERN_WARNING "NILFS warning: broken superblock. "
476 "using spare superblock.\n"); 476 "using spare superblock.\n");
477 nilfs_swap_super_block(nilfs); 477 nilfs_swap_super_block(nilfs);
478 } 478 }
479 479
480 nilfs->ns_sbwtime[0] = le64_to_cpu(sbp[0]->s_wtime); 480 nilfs->ns_sbwtime[0] = le64_to_cpu(sbp[0]->s_wtime);
481 nilfs->ns_sbwtime[1] = valid[!swp] ? le64_to_cpu(sbp[1]->s_wtime) : 0; 481 nilfs->ns_sbwtime[1] = valid[!swp] ? le64_to_cpu(sbp[1]->s_wtime) : 0;
482 nilfs->ns_prot_seq = le64_to_cpu(sbp[valid[1] & !swp]->s_last_seq); 482 nilfs->ns_prot_seq = le64_to_cpu(sbp[valid[1] & !swp]->s_last_seq);
483 *sbpp = sbp[0]; 483 *sbpp = sbp[0];
484 return 0; 484 return 0;
485 } 485 }
486 486
487 /** 487 /**
488 * init_nilfs - initialize a NILFS instance. 488 * init_nilfs - initialize a NILFS instance.
489 * @nilfs: the_nilfs structure 489 * @nilfs: the_nilfs structure
490 * @sbi: nilfs_sb_info 490 * @sbi: nilfs_sb_info
491 * @sb: super block 491 * @sb: super block
492 * @data: mount options 492 * @data: mount options
493 * 493 *
494 * init_nilfs() performs common initialization per block device (e.g. 494 * init_nilfs() performs common initialization per block device (e.g.
495 * reading the super block, getting disk layout information, initializing 495 * reading the super block, getting disk layout information, initializing
496 * shared fields in the_nilfs). It takes on some portion of the jobs 496 * shared fields in the_nilfs). It takes on some portion of the jobs
497 * typically done by a fill_super() routine. This division arises from 497 * typically done by a fill_super() routine. This division arises from
498 * the nature that multiple NILFS instances may be simultaneously 498 * the nature that multiple NILFS instances may be simultaneously
499 * mounted on a device. 499 * mounted on a device.
500 * For multiple mounts on the same device, only the first mount 500 * For multiple mounts on the same device, only the first mount
501 * invokes these tasks. 501 * invokes these tasks.
502 * 502 *
503 * Return Value: On success, 0 is returned. On error, a negative error 503 * Return Value: On success, 0 is returned. On error, a negative error
504 * code is returned. 504 * code is returned.
505 */ 505 */
506 int init_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, char *data) 506 int init_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, char *data)
507 { 507 {
508 struct super_block *sb = sbi->s_super; 508 struct super_block *sb = sbi->s_super;
509 struct nilfs_super_block *sbp; 509 struct nilfs_super_block *sbp;
510 struct backing_dev_info *bdi; 510 struct backing_dev_info *bdi;
511 int blocksize; 511 int blocksize;
512 int err; 512 int err;
513 513
514 down_write(&nilfs->ns_sem); 514 down_write(&nilfs->ns_sem);
515 if (nilfs_init(nilfs)) { 515 if (nilfs_init(nilfs)) {
516 /* Load values from existing the_nilfs */ 516 /* Load values from existing the_nilfs */
517 sbp = nilfs->ns_sbp[0]; 517 sbp = nilfs->ns_sbp[0];
518 err = nilfs_store_magic_and_option(sb, sbp, data); 518 err = nilfs_store_magic_and_option(sb, sbp, data);
519 if (err) 519 if (err)
520 goto out; 520 goto out;
521 521
522 blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size); 522 blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size);
523 if (sb->s_blocksize != blocksize && 523 if (sb->s_blocksize != blocksize &&
524 !sb_set_blocksize(sb, blocksize)) { 524 !sb_set_blocksize(sb, blocksize)) {
525 printk(KERN_ERR "NILFS: blocksize %d unfit to device\n", 525 printk(KERN_ERR "NILFS: blocksize %d unfit to device\n",
526 blocksize); 526 blocksize);
527 err = -EINVAL; 527 err = -EINVAL;
528 } 528 }
529 sb->s_maxbytes = nilfs_max_size(sb->s_blocksize_bits); 529 sb->s_maxbytes = nilfs_max_size(sb->s_blocksize_bits);
530 goto out; 530 goto out;
531 } 531 }
532 532
533 blocksize = sb_min_blocksize(sb, BLOCK_SIZE); 533 blocksize = sb_min_blocksize(sb, BLOCK_SIZE);
534 if (!blocksize) { 534 if (!blocksize) {
535 printk(KERN_ERR "NILFS: unable to set blocksize\n"); 535 printk(KERN_ERR "NILFS: unable to set blocksize\n");
536 err = -EINVAL; 536 err = -EINVAL;
537 goto out; 537 goto out;
538 } 538 }
539 err = nilfs_load_super_block(nilfs, sb, blocksize, &sbp); 539 err = nilfs_load_super_block(nilfs, sb, blocksize, &sbp);
540 if (err) 540 if (err)
541 goto out; 541 goto out;
542 542
543 err = nilfs_store_magic_and_option(sb, sbp, data); 543 err = nilfs_store_magic_and_option(sb, sbp, data);
544 if (err) 544 if (err)
545 goto failed_sbh; 545 goto failed_sbh;
546 546
547 blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size); 547 blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size);
548 if (sb->s_blocksize != blocksize) { 548 if (sb->s_blocksize != blocksize) {
549 int hw_blocksize = bdev_logical_block_size(sb->s_bdev); 549 int hw_blocksize = bdev_logical_block_size(sb->s_bdev);
550 550
551 if (blocksize < hw_blocksize) { 551 if (blocksize < hw_blocksize) {
552 printk(KERN_ERR 552 printk(KERN_ERR
553 "NILFS: blocksize %d too small for device " 553 "NILFS: blocksize %d too small for device "
554 "(sector-size = %d).\n", 554 "(sector-size = %d).\n",
555 blocksize, hw_blocksize); 555 blocksize, hw_blocksize);
556 err = -EINVAL; 556 err = -EINVAL;
557 goto failed_sbh; 557 goto failed_sbh;
558 } 558 }
559 nilfs_release_super_block(nilfs); 559 nilfs_release_super_block(nilfs);
560 sb_set_blocksize(sb, blocksize); 560 sb_set_blocksize(sb, blocksize);
561 561
562 err = nilfs_load_super_block(nilfs, sb, blocksize, &sbp); 562 err = nilfs_load_super_block(nilfs, sb, blocksize, &sbp);
563 if (err) 563 if (err)
564 goto out; 564 goto out;
565 /* not failed_sbh; sbh is released automatically 565 /* not failed_sbh; sbh is released automatically
566 when reloading fails. */ 566 when reloading fails. */
567 } 567 }
568 nilfs->ns_blocksize_bits = sb->s_blocksize_bits; 568 nilfs->ns_blocksize_bits = sb->s_blocksize_bits;
569 569
570 err = nilfs_store_disk_layout(nilfs, sbp); 570 err = nilfs_store_disk_layout(nilfs, sbp);
571 if (err) 571 if (err)
572 goto failed_sbh; 572 goto failed_sbh;
573 573
574 sb->s_maxbytes = nilfs_max_size(sb->s_blocksize_bits); 574 sb->s_maxbytes = nilfs_max_size(sb->s_blocksize_bits);
575 575
576 nilfs->ns_mount_state = le16_to_cpu(sbp->s_state); 576 nilfs->ns_mount_state = le16_to_cpu(sbp->s_state);
577 577
578 bdi = nilfs->ns_bdev->bd_inode->i_mapping->backing_dev_info; 578 bdi = nilfs->ns_bdev->bd_inode->i_mapping->backing_dev_info;
579 nilfs->ns_bdi = bdi ? : &default_backing_dev_info; 579 nilfs->ns_bdi = bdi ? : &default_backing_dev_info;
580 580
581 /* Finding last segment */ 581 /* Finding last segment */
582 nilfs->ns_last_pseg = le64_to_cpu(sbp->s_last_pseg); 582 nilfs->ns_last_pseg = le64_to_cpu(sbp->s_last_pseg);
583 nilfs->ns_last_cno = le64_to_cpu(sbp->s_last_cno); 583 nilfs->ns_last_cno = le64_to_cpu(sbp->s_last_cno);
584 nilfs->ns_last_seq = le64_to_cpu(sbp->s_last_seq); 584 nilfs->ns_last_seq = le64_to_cpu(sbp->s_last_seq);
585 585
586 nilfs->ns_seg_seq = nilfs->ns_last_seq; 586 nilfs->ns_seg_seq = nilfs->ns_last_seq;
587 nilfs->ns_segnum = 587 nilfs->ns_segnum =
588 nilfs_get_segnum_of_block(nilfs, nilfs->ns_last_pseg); 588 nilfs_get_segnum_of_block(nilfs, nilfs->ns_last_pseg);
589 nilfs->ns_cno = nilfs->ns_last_cno + 1; 589 nilfs->ns_cno = nilfs->ns_last_cno + 1;
590 if (nilfs->ns_segnum >= nilfs->ns_nsegments) { 590 if (nilfs->ns_segnum >= nilfs->ns_nsegments) {
591 printk(KERN_ERR "NILFS invalid last segment number.\n"); 591 printk(KERN_ERR "NILFS invalid last segment number.\n");
592 err = -EINVAL; 592 err = -EINVAL;
593 goto failed_sbh; 593 goto failed_sbh;
594 } 594 }
595 /* Dummy values */ 595 /* Dummy values */
596 nilfs->ns_free_segments_count = 596 nilfs->ns_free_segments_count =
597 nilfs->ns_nsegments - (nilfs->ns_segnum + 1); 597 nilfs->ns_nsegments - (nilfs->ns_segnum + 1);
598 598
599 /* Initialize gcinode cache */ 599 /* Initialize gcinode cache */
600 err = nilfs_init_gccache(nilfs); 600 err = nilfs_init_gccache(nilfs);
601 if (err) 601 if (err)
602 goto failed_sbh; 602 goto failed_sbh;
603 603
604 set_nilfs_init(nilfs); 604 set_nilfs_init(nilfs);
605 err = 0; 605 err = 0;
606 out: 606 out:
607 up_write(&nilfs->ns_sem); 607 up_write(&nilfs->ns_sem);
608 return err; 608 return err;
609 609
610 failed_sbh: 610 failed_sbh:
611 nilfs_release_super_block(nilfs); 611 nilfs_release_super_block(nilfs);
612 goto out; 612 goto out;
613 } 613 }
614 614
615 int nilfs_count_free_blocks(struct the_nilfs *nilfs, sector_t *nblocks) 615 int nilfs_count_free_blocks(struct the_nilfs *nilfs, sector_t *nblocks)
616 { 616 {
617 struct inode *dat = nilfs_dat_inode(nilfs); 617 struct inode *dat = nilfs_dat_inode(nilfs);
618 unsigned long ncleansegs; 618 unsigned long ncleansegs;
619 int err; 619 int err;
620 620
621 down_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ 621 down_read(&NILFS_MDT(dat)->mi_sem); /* XXX */
622 err = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile, &ncleansegs); 622 err = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile, &ncleansegs);
623 up_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ 623 up_read(&NILFS_MDT(dat)->mi_sem); /* XXX */
624 if (likely(!err)) 624 if (likely(!err))
625 *nblocks = (sector_t)ncleansegs * nilfs->ns_blocks_per_segment; 625 *nblocks = (sector_t)ncleansegs * nilfs->ns_blocks_per_segment;
626 return err; 626 return err;
627 } 627 }
628 628
629 int nilfs_near_disk_full(struct the_nilfs *nilfs) 629 int nilfs_near_disk_full(struct the_nilfs *nilfs)
630 { 630 {
631 struct inode *sufile = nilfs->ns_sufile; 631 struct inode *sufile = nilfs->ns_sufile;
632 unsigned long ncleansegs, nincsegs; 632 unsigned long ncleansegs, nincsegs;
633 int ret; 633 int ret;
634 634
635 ret = nilfs_sufile_get_ncleansegs(sufile, &ncleansegs); 635 ret = nilfs_sufile_get_ncleansegs(sufile, &ncleansegs);
636 if (likely(!ret)) { 636 if (likely(!ret)) {
637 nincsegs = atomic_read(&nilfs->ns_ndirtyblks) / 637 nincsegs = atomic_read(&nilfs->ns_ndirtyblks) /
638 nilfs->ns_blocks_per_segment + 1; 638 nilfs->ns_blocks_per_segment + 1;
639 if (ncleansegs <= nilfs->ns_nrsvsegs + nincsegs) 639 if (ncleansegs <= nilfs->ns_nrsvsegs + nincsegs)
640 ret++; 640 ret++;
641 } 641 }
642 return ret; 642 return ret;
643 } 643 }
644 644
645 /** 645 /**
646 * nilfs_find_sbinfo - find existing nilfs_sb_info structure 646 * nilfs_find_sbinfo - find existing nilfs_sb_info structure
647 * @nilfs: nilfs object 647 * @nilfs: nilfs object
648 * @rw_mount: mount type (non-zero value for read/write mount) 648 * @rw_mount: mount type (non-zero value for read/write mount)
649 * @cno: checkpoint number (zero for read-only mount) 649 * @cno: checkpoint number (zero for read-only mount)
650 * 650 *
651 * nilfs_find_sbinfo() returns the nilfs_sb_info structure which 651 * nilfs_find_sbinfo() returns the nilfs_sb_info structure which
652 * @rw_mount and @cno (in case of snapshots) matched. If no instance 652 * @rw_mount and @cno (in case of snapshots) matched. If no instance
653 * was found, NULL is returned. Although the super block instance can 653 * was found, NULL is returned. Although the super block instance can
654 * be unmounted after this function returns, the nilfs_sb_info struct 654 * be unmounted after this function returns, the nilfs_sb_info struct
655 * is kept on memory until nilfs_put_sbinfo() is called. 655 * is kept on memory until nilfs_put_sbinfo() is called.
656 */ 656 */
657 struct nilfs_sb_info *nilfs_find_sbinfo(struct the_nilfs *nilfs, 657 struct nilfs_sb_info *nilfs_find_sbinfo(struct the_nilfs *nilfs,
658 int rw_mount, __u64 cno) 658 int rw_mount, __u64 cno)
659 { 659 {
660 struct nilfs_sb_info *sbi; 660 struct nilfs_sb_info *sbi;
661 661
662 down_read(&nilfs->ns_super_sem); 662 down_read(&nilfs->ns_super_sem);
663 /* 663 /*
664 * The SNAPSHOT flag and sb->s_flags are supposed to be 664 * The SNAPSHOT flag and sb->s_flags are supposed to be
665 * protected with nilfs->ns_super_sem. 665 * protected with nilfs->ns_super_sem.
666 */ 666 */
667 sbi = nilfs->ns_current; 667 sbi = nilfs->ns_current;
668 if (rw_mount) { 668 if (rw_mount) {
669 if (sbi && !(sbi->s_super->s_flags & MS_RDONLY)) 669 if (sbi && !(sbi->s_super->s_flags & MS_RDONLY))
670 goto found; /* read/write mount */ 670 goto found; /* read/write mount */
671 else 671 else
672 goto out; 672 goto out;
673 } else if (cno == 0) { 673 } else if (cno == 0) {
674 if (sbi && (sbi->s_super->s_flags & MS_RDONLY)) 674 if (sbi && (sbi->s_super->s_flags & MS_RDONLY))
675 goto found; /* read-only mount */ 675 goto found; /* read-only mount */
676 else 676 else
677 goto out; 677 goto out;
678 } 678 }
679 679
680 list_for_each_entry(sbi, &nilfs->ns_supers, s_list) { 680 list_for_each_entry(sbi, &nilfs->ns_supers, s_list) {
681 if (nilfs_test_opt(sbi, SNAPSHOT) && 681 if (nilfs_test_opt(sbi, SNAPSHOT) &&
682 sbi->s_snapshot_cno == cno) 682 sbi->s_snapshot_cno == cno)
683 goto found; /* snapshot mount */ 683 goto found; /* snapshot mount */
684 } 684 }
685 out: 685 out:
686 up_read(&nilfs->ns_super_sem); 686 up_read(&nilfs->ns_super_sem);
687 return NULL; 687 return NULL;
688 688
689 found: 689 found:
690 atomic_inc(&sbi->s_count); 690 atomic_inc(&sbi->s_count);
691 up_read(&nilfs->ns_super_sem); 691 up_read(&nilfs->ns_super_sem);
692 return sbi; 692 return sbi;
693 } 693 }
694 694
695 int nilfs_checkpoint_is_mounted(struct the_nilfs *nilfs, __u64 cno, 695 int nilfs_checkpoint_is_mounted(struct the_nilfs *nilfs, __u64 cno,
696 int snapshot_mount) 696 int snapshot_mount)
697 { 697 {
698 struct nilfs_sb_info *sbi; 698 struct nilfs_sb_info *sbi;
699 int ret = 0; 699 int ret = 0;
700 700
701 down_read(&nilfs->ns_super_sem); 701 down_read(&nilfs->ns_super_sem);
702 if (cno == 0 || cno > nilfs->ns_cno) 702 if (cno == 0 || cno > nilfs->ns_cno)
703 goto out_unlock; 703 goto out_unlock;
704 704
705 list_for_each_entry(sbi, &nilfs->ns_supers, s_list) { 705 list_for_each_entry(sbi, &nilfs->ns_supers, s_list) {
706 if (sbi->s_snapshot_cno == cno && 706 if (sbi->s_snapshot_cno == cno &&
707 (!snapshot_mount || nilfs_test_opt(sbi, SNAPSHOT))) { 707 (!snapshot_mount || nilfs_test_opt(sbi, SNAPSHOT))) {
708 /* exclude read-only mounts */ 708 /* exclude read-only mounts */
709 ret++; 709 ret++;
710 break; 710 break;
711 } 711 }
712 } 712 }
713 /* for protecting recent checkpoints */ 713 /* for protecting recent checkpoints */
714 if (cno >= nilfs_last_cno(nilfs)) 714 if (cno >= nilfs_last_cno(nilfs))
715 ret++; 715 ret++;
716 716
717 out_unlock: 717 out_unlock:
718 up_read(&nilfs->ns_super_sem); 718 up_read(&nilfs->ns_super_sem);
719 return ret; 719 return ret;
720 } 720 }
721 721