Commit 5ee581483246e832ca5a9fc1ac669bba0b6ec468
Committed by
Ryusuke Konishi
1 parent
6b7b284958
Exists in
master
and in
4 other branches
nilfs2: trivial coding style fix
This is a trivial style fix patch to mend errors/warnings reported by "checkpatch.pl --file". Signed-off-by: Jiro SEKIBA <jir@unicus.jp> Signed-off-by: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
Showing 3 changed files with 29 additions and 23 deletions Inline Diff
fs/nilfs2/bmap.c
1 | /* | 1 | /* |
2 | * bmap.c - NILFS block mapping. | 2 | * bmap.c - NILFS block mapping. |
3 | * | 3 | * |
4 | * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. | 4 | * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
8 | * the Free Software Foundation; either version 2 of the License, or | 8 | * the Free Software Foundation; either version 2 of the License, or |
9 | * (at your option) any later version. | 9 | * (at your option) any later version. |
10 | * | 10 | * |
11 | * This program is distributed in the hope that it will be useful, | 11 | * This program is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | * GNU General Public License for more details. | 14 | * GNU General Public License for more details. |
15 | * | 15 | * |
16 | * You should have received a copy of the GNU General Public License | 16 | * You should have received a copy of the GNU General Public License |
17 | * along with this program; if not, write to the Free Software | 17 | * along with this program; if not, write to the Free Software |
18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | 18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
19 | * | 19 | * |
20 | * Written by Koji Sato <koji@osrg.net>. | 20 | * Written by Koji Sato <koji@osrg.net>. |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <linux/fs.h> | 23 | #include <linux/fs.h> |
24 | #include <linux/string.h> | 24 | #include <linux/string.h> |
25 | #include <linux/errno.h> | 25 | #include <linux/errno.h> |
26 | #include "nilfs.h" | 26 | #include "nilfs.h" |
27 | #include "bmap.h" | 27 | #include "bmap.h" |
28 | #include "sb.h" | 28 | #include "sb.h" |
29 | #include "btnode.h" | 29 | #include "btnode.h" |
30 | #include "mdt.h" | 30 | #include "mdt.h" |
31 | #include "dat.h" | 31 | #include "dat.h" |
32 | #include "alloc.h" | 32 | #include "alloc.h" |
33 | 33 | ||
34 | struct inode *nilfs_bmap_get_dat(const struct nilfs_bmap *bmap) | 34 | struct inode *nilfs_bmap_get_dat(const struct nilfs_bmap *bmap) |
35 | { | 35 | { |
36 | return nilfs_dat_inode(NILFS_I_NILFS(bmap->b_inode)); | 36 | return nilfs_dat_inode(NILFS_I_NILFS(bmap->b_inode)); |
37 | } | 37 | } |
38 | 38 | ||
39 | /** | 39 | /** |
40 | * nilfs_bmap_lookup_at_level - find a data block or node block | 40 | * nilfs_bmap_lookup_at_level - find a data block or node block |
41 | * @bmap: bmap | 41 | * @bmap: bmap |
42 | * @key: key | 42 | * @key: key |
43 | * @level: level | 43 | * @level: level |
44 | * @ptrp: place to store the value associated to @key | 44 | * @ptrp: place to store the value associated to @key |
45 | * | 45 | * |
46 | * Description: nilfs_bmap_lookup_at_level() finds a record whose key | 46 | * Description: nilfs_bmap_lookup_at_level() finds a record whose key |
47 | * matches @key in the block at @level of the bmap. | 47 | * matches @key in the block at @level of the bmap. |
48 | * | 48 | * |
49 | * Return Value: On success, 0 is returned and the record associated with @key | 49 | * Return Value: On success, 0 is returned and the record associated with @key |
50 | * is stored in the place pointed by @ptrp. On error, one of the following | 50 | * is stored in the place pointed by @ptrp. On error, one of the following |
51 | * negative error codes is returned. | 51 | * negative error codes is returned. |
52 | * | 52 | * |
53 | * %-EIO - I/O error. | 53 | * %-EIO - I/O error. |
54 | * | 54 | * |
55 | * %-ENOMEM - Insufficient amount of memory available. | 55 | * %-ENOMEM - Insufficient amount of memory available. |
56 | * | 56 | * |
57 | * %-ENOENT - A record associated with @key does not exist. | 57 | * %-ENOENT - A record associated with @key does not exist. |
58 | */ | 58 | */ |
59 | int nilfs_bmap_lookup_at_level(struct nilfs_bmap *bmap, __u64 key, int level, | 59 | int nilfs_bmap_lookup_at_level(struct nilfs_bmap *bmap, __u64 key, int level, |
60 | __u64 *ptrp) | 60 | __u64 *ptrp) |
61 | { | 61 | { |
62 | sector_t blocknr; | 62 | sector_t blocknr; |
63 | int ret; | 63 | int ret; |
64 | 64 | ||
65 | down_read(&bmap->b_sem); | 65 | down_read(&bmap->b_sem); |
66 | ret = bmap->b_ops->bop_lookup(bmap, key, level, ptrp); | 66 | ret = bmap->b_ops->bop_lookup(bmap, key, level, ptrp); |
67 | if (ret < 0) | 67 | if (ret < 0) |
68 | goto out; | 68 | goto out; |
69 | if (NILFS_BMAP_USE_VBN(bmap)) { | 69 | if (NILFS_BMAP_USE_VBN(bmap)) { |
70 | ret = nilfs_dat_translate(nilfs_bmap_get_dat(bmap), *ptrp, | 70 | ret = nilfs_dat_translate(nilfs_bmap_get_dat(bmap), *ptrp, |
71 | &blocknr); | 71 | &blocknr); |
72 | if (!ret) | 72 | if (!ret) |
73 | *ptrp = blocknr; | 73 | *ptrp = blocknr; |
74 | } | 74 | } |
75 | 75 | ||
76 | out: | 76 | out: |
77 | up_read(&bmap->b_sem); | 77 | up_read(&bmap->b_sem); |
78 | return ret; | 78 | return ret; |
79 | } | 79 | } |
80 | 80 | ||
81 | int nilfs_bmap_lookup_contig(struct nilfs_bmap *bmap, __u64 key, __u64 *ptrp, | 81 | int nilfs_bmap_lookup_contig(struct nilfs_bmap *bmap, __u64 key, __u64 *ptrp, |
82 | unsigned maxblocks) | 82 | unsigned maxblocks) |
83 | { | 83 | { |
84 | int ret; | 84 | int ret; |
85 | 85 | ||
86 | down_read(&bmap->b_sem); | 86 | down_read(&bmap->b_sem); |
87 | ret = bmap->b_ops->bop_lookup_contig(bmap, key, ptrp, maxblocks); | 87 | ret = bmap->b_ops->bop_lookup_contig(bmap, key, ptrp, maxblocks); |
88 | up_read(&bmap->b_sem); | 88 | up_read(&bmap->b_sem); |
89 | return ret; | 89 | return ret; |
90 | } | 90 | } |
91 | 91 | ||
92 | static int nilfs_bmap_do_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr) | 92 | static int nilfs_bmap_do_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr) |
93 | { | 93 | { |
94 | __u64 keys[NILFS_BMAP_SMALL_HIGH + 1]; | 94 | __u64 keys[NILFS_BMAP_SMALL_HIGH + 1]; |
95 | __u64 ptrs[NILFS_BMAP_SMALL_HIGH + 1]; | 95 | __u64 ptrs[NILFS_BMAP_SMALL_HIGH + 1]; |
96 | int ret, n; | 96 | int ret, n; |
97 | 97 | ||
98 | if (bmap->b_ops->bop_check_insert != NULL) { | 98 | if (bmap->b_ops->bop_check_insert != NULL) { |
99 | ret = bmap->b_ops->bop_check_insert(bmap, key); | 99 | ret = bmap->b_ops->bop_check_insert(bmap, key); |
100 | if (ret > 0) { | 100 | if (ret > 0) { |
101 | n = bmap->b_ops->bop_gather_data( | 101 | n = bmap->b_ops->bop_gather_data( |
102 | bmap, keys, ptrs, NILFS_BMAP_SMALL_HIGH + 1); | 102 | bmap, keys, ptrs, NILFS_BMAP_SMALL_HIGH + 1); |
103 | if (n < 0) | 103 | if (n < 0) |
104 | return n; | 104 | return n; |
105 | ret = nilfs_btree_convert_and_insert( | 105 | ret = nilfs_btree_convert_and_insert( |
106 | bmap, key, ptr, keys, ptrs, n); | 106 | bmap, key, ptr, keys, ptrs, n); |
107 | if (ret == 0) | 107 | if (ret == 0) |
108 | bmap->b_u.u_flags |= NILFS_BMAP_LARGE; | 108 | bmap->b_u.u_flags |= NILFS_BMAP_LARGE; |
109 | 109 | ||
110 | return ret; | 110 | return ret; |
111 | } else if (ret < 0) | 111 | } else if (ret < 0) |
112 | return ret; | 112 | return ret; |
113 | } | 113 | } |
114 | 114 | ||
115 | return bmap->b_ops->bop_insert(bmap, key, ptr); | 115 | return bmap->b_ops->bop_insert(bmap, key, ptr); |
116 | } | 116 | } |
117 | 117 | ||
118 | /** | 118 | /** |
119 | * nilfs_bmap_insert - insert a new key-record pair into a bmap | 119 | * nilfs_bmap_insert - insert a new key-record pair into a bmap |
120 | * @bmap: bmap | 120 | * @bmap: bmap |
121 | * @key: key | 121 | * @key: key |
122 | * @rec: record | 122 | * @rec: record |
123 | * | 123 | * |
124 | * Description: nilfs_bmap_insert() inserts the new key-record pair specified | 124 | * Description: nilfs_bmap_insert() inserts the new key-record pair specified |
125 | * by @key and @rec into @bmap. | 125 | * by @key and @rec into @bmap. |
126 | * | 126 | * |
127 | * Return Value: On success, 0 is returned. On error, one of the following | 127 | * Return Value: On success, 0 is returned. On error, one of the following |
128 | * negative error codes is returned. | 128 | * negative error codes is returned. |
129 | * | 129 | * |
130 | * %-EIO - I/O error. | 130 | * %-EIO - I/O error. |
131 | * | 131 | * |
132 | * %-ENOMEM - Insufficient amount of memory available. | 132 | * %-ENOMEM - Insufficient amount of memory available. |
133 | * | 133 | * |
134 | * %-EEXIST - A record associated with @key already exist. | 134 | * %-EEXIST - A record associated with @key already exist. |
135 | */ | 135 | */ |
136 | int nilfs_bmap_insert(struct nilfs_bmap *bmap, | 136 | int nilfs_bmap_insert(struct nilfs_bmap *bmap, |
137 | unsigned long key, | 137 | unsigned long key, |
138 | unsigned long rec) | 138 | unsigned long rec) |
139 | { | 139 | { |
140 | int ret; | 140 | int ret; |
141 | 141 | ||
142 | down_write(&bmap->b_sem); | 142 | down_write(&bmap->b_sem); |
143 | ret = nilfs_bmap_do_insert(bmap, key, rec); | 143 | ret = nilfs_bmap_do_insert(bmap, key, rec); |
144 | up_write(&bmap->b_sem); | 144 | up_write(&bmap->b_sem); |
145 | return ret; | 145 | return ret; |
146 | } | 146 | } |
147 | 147 | ||
148 | static int nilfs_bmap_do_delete(struct nilfs_bmap *bmap, __u64 key) | 148 | static int nilfs_bmap_do_delete(struct nilfs_bmap *bmap, __u64 key) |
149 | { | 149 | { |
150 | __u64 keys[NILFS_BMAP_LARGE_LOW + 1]; | 150 | __u64 keys[NILFS_BMAP_LARGE_LOW + 1]; |
151 | __u64 ptrs[NILFS_BMAP_LARGE_LOW + 1]; | 151 | __u64 ptrs[NILFS_BMAP_LARGE_LOW + 1]; |
152 | int ret, n; | 152 | int ret, n; |
153 | 153 | ||
154 | if (bmap->b_ops->bop_check_delete != NULL) { | 154 | if (bmap->b_ops->bop_check_delete != NULL) { |
155 | ret = bmap->b_ops->bop_check_delete(bmap, key); | 155 | ret = bmap->b_ops->bop_check_delete(bmap, key); |
156 | if (ret > 0) { | 156 | if (ret > 0) { |
157 | n = bmap->b_ops->bop_gather_data( | 157 | n = bmap->b_ops->bop_gather_data( |
158 | bmap, keys, ptrs, NILFS_BMAP_LARGE_LOW + 1); | 158 | bmap, keys, ptrs, NILFS_BMAP_LARGE_LOW + 1); |
159 | if (n < 0) | 159 | if (n < 0) |
160 | return n; | 160 | return n; |
161 | ret = nilfs_direct_delete_and_convert( | 161 | ret = nilfs_direct_delete_and_convert( |
162 | bmap, key, keys, ptrs, n); | 162 | bmap, key, keys, ptrs, n); |
163 | if (ret == 0) | 163 | if (ret == 0) |
164 | bmap->b_u.u_flags &= ~NILFS_BMAP_LARGE; | 164 | bmap->b_u.u_flags &= ~NILFS_BMAP_LARGE; |
165 | 165 | ||
166 | return ret; | 166 | return ret; |
167 | } else if (ret < 0) | 167 | } else if (ret < 0) |
168 | return ret; | 168 | return ret; |
169 | } | 169 | } |
170 | 170 | ||
171 | return bmap->b_ops->bop_delete(bmap, key); | 171 | return bmap->b_ops->bop_delete(bmap, key); |
172 | } | 172 | } |
173 | 173 | ||
174 | int nilfs_bmap_last_key(struct nilfs_bmap *bmap, unsigned long *key) | 174 | int nilfs_bmap_last_key(struct nilfs_bmap *bmap, unsigned long *key) |
175 | { | 175 | { |
176 | __u64 lastkey; | 176 | __u64 lastkey; |
177 | int ret; | 177 | int ret; |
178 | 178 | ||
179 | down_read(&bmap->b_sem); | 179 | down_read(&bmap->b_sem); |
180 | ret = bmap->b_ops->bop_last_key(bmap, &lastkey); | 180 | ret = bmap->b_ops->bop_last_key(bmap, &lastkey); |
181 | if (!ret) | 181 | if (!ret) |
182 | *key = lastkey; | 182 | *key = lastkey; |
183 | up_read(&bmap->b_sem); | 183 | up_read(&bmap->b_sem); |
184 | return ret; | 184 | return ret; |
185 | } | 185 | } |
186 | 186 | ||
187 | /** | 187 | /** |
188 | * nilfs_bmap_delete - delete a key-record pair from a bmap | 188 | * nilfs_bmap_delete - delete a key-record pair from a bmap |
189 | * @bmap: bmap | 189 | * @bmap: bmap |
190 | * @key: key | 190 | * @key: key |
191 | * | 191 | * |
192 | * Description: nilfs_bmap_delete() deletes the key-record pair specified by | 192 | * Description: nilfs_bmap_delete() deletes the key-record pair specified by |
193 | * @key from @bmap. | 193 | * @key from @bmap. |
194 | * | 194 | * |
195 | * Return Value: On success, 0 is returned. On error, one of the following | 195 | * Return Value: On success, 0 is returned. On error, one of the following |
196 | * negative error codes is returned. | 196 | * negative error codes is returned. |
197 | * | 197 | * |
198 | * %-EIO - I/O error. | 198 | * %-EIO - I/O error. |
199 | * | 199 | * |
200 | * %-ENOMEM - Insufficient amount of memory available. | 200 | * %-ENOMEM - Insufficient amount of memory available. |
201 | * | 201 | * |
202 | * %-ENOENT - A record associated with @key does not exist. | 202 | * %-ENOENT - A record associated with @key does not exist. |
203 | */ | 203 | */ |
204 | int nilfs_bmap_delete(struct nilfs_bmap *bmap, unsigned long key) | 204 | int nilfs_bmap_delete(struct nilfs_bmap *bmap, unsigned long key) |
205 | { | 205 | { |
206 | int ret; | 206 | int ret; |
207 | 207 | ||
208 | down_write(&bmap->b_sem); | 208 | down_write(&bmap->b_sem); |
209 | ret = nilfs_bmap_do_delete(bmap, key); | 209 | ret = nilfs_bmap_do_delete(bmap, key); |
210 | up_write(&bmap->b_sem); | 210 | up_write(&bmap->b_sem); |
211 | return ret; | 211 | return ret; |
212 | } | 212 | } |
213 | 213 | ||
214 | static int nilfs_bmap_do_truncate(struct nilfs_bmap *bmap, unsigned long key) | 214 | static int nilfs_bmap_do_truncate(struct nilfs_bmap *bmap, unsigned long key) |
215 | { | 215 | { |
216 | __u64 lastkey; | 216 | __u64 lastkey; |
217 | int ret; | 217 | int ret; |
218 | 218 | ||
219 | ret = bmap->b_ops->bop_last_key(bmap, &lastkey); | 219 | ret = bmap->b_ops->bop_last_key(bmap, &lastkey); |
220 | if (ret < 0) { | 220 | if (ret < 0) { |
221 | if (ret == -ENOENT) | 221 | if (ret == -ENOENT) |
222 | ret = 0; | 222 | ret = 0; |
223 | return ret; | 223 | return ret; |
224 | } | 224 | } |
225 | 225 | ||
226 | while (key <= lastkey) { | 226 | while (key <= lastkey) { |
227 | ret = nilfs_bmap_do_delete(bmap, lastkey); | 227 | ret = nilfs_bmap_do_delete(bmap, lastkey); |
228 | if (ret < 0) | 228 | if (ret < 0) |
229 | return ret; | 229 | return ret; |
230 | ret = bmap->b_ops->bop_last_key(bmap, &lastkey); | 230 | ret = bmap->b_ops->bop_last_key(bmap, &lastkey); |
231 | if (ret < 0) { | 231 | if (ret < 0) { |
232 | if (ret == -ENOENT) | 232 | if (ret == -ENOENT) |
233 | ret = 0; | 233 | ret = 0; |
234 | return ret; | 234 | return ret; |
235 | } | 235 | } |
236 | } | 236 | } |
237 | return 0; | 237 | return 0; |
238 | } | 238 | } |
239 | 239 | ||
240 | /** | 240 | /** |
241 | * nilfs_bmap_truncate - truncate a bmap to a specified key | 241 | * nilfs_bmap_truncate - truncate a bmap to a specified key |
242 | * @bmap: bmap | 242 | * @bmap: bmap |
243 | * @key: key | 243 | * @key: key |
244 | * | 244 | * |
245 | * Description: nilfs_bmap_truncate() removes key-record pairs whose keys are | 245 | * Description: nilfs_bmap_truncate() removes key-record pairs whose keys are |
246 | * greater than or equal to @key from @bmap. | 246 | * greater than or equal to @key from @bmap. |
247 | * | 247 | * |
248 | * Return Value: On success, 0 is returned. On error, one of the following | 248 | * Return Value: On success, 0 is returned. On error, one of the following |
249 | * negative error codes is returned. | 249 | * negative error codes is returned. |
250 | * | 250 | * |
251 | * %-EIO - I/O error. | 251 | * %-EIO - I/O error. |
252 | * | 252 | * |
253 | * %-ENOMEM - Insufficient amount of memory available. | 253 | * %-ENOMEM - Insufficient amount of memory available. |
254 | */ | 254 | */ |
255 | int nilfs_bmap_truncate(struct nilfs_bmap *bmap, unsigned long key) | 255 | int nilfs_bmap_truncate(struct nilfs_bmap *bmap, unsigned long key) |
256 | { | 256 | { |
257 | int ret; | 257 | int ret; |
258 | 258 | ||
259 | down_write(&bmap->b_sem); | 259 | down_write(&bmap->b_sem); |
260 | ret = nilfs_bmap_do_truncate(bmap, key); | 260 | ret = nilfs_bmap_do_truncate(bmap, key); |
261 | up_write(&bmap->b_sem); | 261 | up_write(&bmap->b_sem); |
262 | return ret; | 262 | return ret; |
263 | } | 263 | } |
264 | 264 | ||
265 | /** | 265 | /** |
266 | * nilfs_bmap_clear - free resources a bmap holds | 266 | * nilfs_bmap_clear - free resources a bmap holds |
267 | * @bmap: bmap | 267 | * @bmap: bmap |
268 | * | 268 | * |
269 | * Description: nilfs_bmap_clear() frees resources associated with @bmap. | 269 | * Description: nilfs_bmap_clear() frees resources associated with @bmap. |
270 | */ | 270 | */ |
271 | void nilfs_bmap_clear(struct nilfs_bmap *bmap) | 271 | void nilfs_bmap_clear(struct nilfs_bmap *bmap) |
272 | { | 272 | { |
273 | down_write(&bmap->b_sem); | 273 | down_write(&bmap->b_sem); |
274 | if (bmap->b_ops->bop_clear != NULL) | 274 | if (bmap->b_ops->bop_clear != NULL) |
275 | bmap->b_ops->bop_clear(bmap); | 275 | bmap->b_ops->bop_clear(bmap); |
276 | up_write(&bmap->b_sem); | 276 | up_write(&bmap->b_sem); |
277 | } | 277 | } |
278 | 278 | ||
279 | /** | 279 | /** |
280 | * nilfs_bmap_propagate - propagate dirty state | 280 | * nilfs_bmap_propagate - propagate dirty state |
281 | * @bmap: bmap | 281 | * @bmap: bmap |
282 | * @bh: buffer head | 282 | * @bh: buffer head |
283 | * | 283 | * |
284 | * Description: nilfs_bmap_propagate() marks the buffers that directly or | 284 | * Description: nilfs_bmap_propagate() marks the buffers that directly or |
285 | * indirectly refer to the block specified by @bh dirty. | 285 | * indirectly refer to the block specified by @bh dirty. |
286 | * | 286 | * |
287 | * Return Value: On success, 0 is returned. On error, one of the following | 287 | * Return Value: On success, 0 is returned. On error, one of the following |
288 | * negative error codes is returned. | 288 | * negative error codes is returned. |
289 | * | 289 | * |
290 | * %-EIO - I/O error. | 290 | * %-EIO - I/O error. |
291 | * | 291 | * |
292 | * %-ENOMEM - Insufficient amount of memory available. | 292 | * %-ENOMEM - Insufficient amount of memory available. |
293 | */ | 293 | */ |
294 | int nilfs_bmap_propagate(struct nilfs_bmap *bmap, struct buffer_head *bh) | 294 | int nilfs_bmap_propagate(struct nilfs_bmap *bmap, struct buffer_head *bh) |
295 | { | 295 | { |
296 | int ret; | 296 | int ret; |
297 | 297 | ||
298 | down_write(&bmap->b_sem); | 298 | down_write(&bmap->b_sem); |
299 | ret = bmap->b_ops->bop_propagate(bmap, bh); | 299 | ret = bmap->b_ops->bop_propagate(bmap, bh); |
300 | up_write(&bmap->b_sem); | 300 | up_write(&bmap->b_sem); |
301 | return ret; | 301 | return ret; |
302 | } | 302 | } |
303 | 303 | ||
304 | /** | 304 | /** |
305 | * nilfs_bmap_lookup_dirty_buffers - | 305 | * nilfs_bmap_lookup_dirty_buffers - |
306 | * @bmap: bmap | 306 | * @bmap: bmap |
307 | * @listp: pointer to buffer head list | 307 | * @listp: pointer to buffer head list |
308 | */ | 308 | */ |
309 | void nilfs_bmap_lookup_dirty_buffers(struct nilfs_bmap *bmap, | 309 | void nilfs_bmap_lookup_dirty_buffers(struct nilfs_bmap *bmap, |
310 | struct list_head *listp) | 310 | struct list_head *listp) |
311 | { | 311 | { |
312 | if (bmap->b_ops->bop_lookup_dirty_buffers != NULL) | 312 | if (bmap->b_ops->bop_lookup_dirty_buffers != NULL) |
313 | bmap->b_ops->bop_lookup_dirty_buffers(bmap, listp); | 313 | bmap->b_ops->bop_lookup_dirty_buffers(bmap, listp); |
314 | } | 314 | } |
315 | 315 | ||
316 | /** | 316 | /** |
317 | * nilfs_bmap_assign - assign a new block number to a block | 317 | * nilfs_bmap_assign - assign a new block number to a block |
318 | * @bmap: bmap | 318 | * @bmap: bmap |
319 | * @bhp: pointer to buffer head | 319 | * @bhp: pointer to buffer head |
320 | * @blocknr: block number | 320 | * @blocknr: block number |
321 | * @binfo: block information | 321 | * @binfo: block information |
322 | * | 322 | * |
323 | * Description: nilfs_bmap_assign() assigns the block number @blocknr to the | 323 | * Description: nilfs_bmap_assign() assigns the block number @blocknr to the |
324 | * buffer specified by @bh. | 324 | * buffer specified by @bh. |
325 | * | 325 | * |
326 | * Return Value: On success, 0 is returned and the buffer head of a newly | 326 | * Return Value: On success, 0 is returned and the buffer head of a newly |
327 | * create buffer and the block information associated with the buffer are | 327 | * create buffer and the block information associated with the buffer are |
328 | * stored in the place pointed by @bh and @binfo, respectively. On error, one | 328 | * stored in the place pointed by @bh and @binfo, respectively. On error, one |
329 | * of the following negative error codes is returned. | 329 | * of the following negative error codes is returned. |
330 | * | 330 | * |
331 | * %-EIO - I/O error. | 331 | * %-EIO - I/O error. |
332 | * | 332 | * |
333 | * %-ENOMEM - Insufficient amount of memory available. | 333 | * %-ENOMEM - Insufficient amount of memory available. |
334 | */ | 334 | */ |
335 | int nilfs_bmap_assign(struct nilfs_bmap *bmap, | 335 | int nilfs_bmap_assign(struct nilfs_bmap *bmap, |
336 | struct buffer_head **bh, | 336 | struct buffer_head **bh, |
337 | unsigned long blocknr, | 337 | unsigned long blocknr, |
338 | union nilfs_binfo *binfo) | 338 | union nilfs_binfo *binfo) |
339 | { | 339 | { |
340 | int ret; | 340 | int ret; |
341 | 341 | ||
342 | down_write(&bmap->b_sem); | 342 | down_write(&bmap->b_sem); |
343 | ret = bmap->b_ops->bop_assign(bmap, bh, blocknr, binfo); | 343 | ret = bmap->b_ops->bop_assign(bmap, bh, blocknr, binfo); |
344 | up_write(&bmap->b_sem); | 344 | up_write(&bmap->b_sem); |
345 | return ret; | 345 | return ret; |
346 | } | 346 | } |
347 | 347 | ||
348 | /** | 348 | /** |
349 | * nilfs_bmap_mark - mark block dirty | 349 | * nilfs_bmap_mark - mark block dirty |
350 | * @bmap: bmap | 350 | * @bmap: bmap |
351 | * @key: key | 351 | * @key: key |
352 | * @level: level | 352 | * @level: level |
353 | * | 353 | * |
354 | * Description: nilfs_bmap_mark() marks the block specified by @key and @level | 354 | * Description: nilfs_bmap_mark() marks the block specified by @key and @level |
355 | * as dirty. | 355 | * as dirty. |
356 | * | 356 | * |
357 | * Return Value: On success, 0 is returned. On error, one of the following | 357 | * Return Value: On success, 0 is returned. On error, one of the following |
358 | * negative error codes is returned. | 358 | * negative error codes is returned. |
359 | * | 359 | * |
360 | * %-EIO - I/O error. | 360 | * %-EIO - I/O error. |
361 | * | 361 | * |
362 | * %-ENOMEM - Insufficient amount of memory available. | 362 | * %-ENOMEM - Insufficient amount of memory available. |
363 | */ | 363 | */ |
364 | int nilfs_bmap_mark(struct nilfs_bmap *bmap, __u64 key, int level) | 364 | int nilfs_bmap_mark(struct nilfs_bmap *bmap, __u64 key, int level) |
365 | { | 365 | { |
366 | int ret; | 366 | int ret; |
367 | 367 | ||
368 | if (bmap->b_ops->bop_mark == NULL) | 368 | if (bmap->b_ops->bop_mark == NULL) |
369 | return 0; | 369 | return 0; |
370 | 370 | ||
371 | down_write(&bmap->b_sem); | 371 | down_write(&bmap->b_sem); |
372 | ret = bmap->b_ops->bop_mark(bmap, key, level); | 372 | ret = bmap->b_ops->bop_mark(bmap, key, level); |
373 | up_write(&bmap->b_sem); | 373 | up_write(&bmap->b_sem); |
374 | return ret; | 374 | return ret; |
375 | } | 375 | } |
376 | 376 | ||
377 | /** | 377 | /** |
378 | * nilfs_bmap_test_and_clear_dirty - test and clear a bmap dirty state | 378 | * nilfs_bmap_test_and_clear_dirty - test and clear a bmap dirty state |
379 | * @bmap: bmap | 379 | * @bmap: bmap |
380 | * | 380 | * |
381 | * Description: nilfs_test_and_clear() is the atomic operation to test and | 381 | * Description: nilfs_test_and_clear() is the atomic operation to test and |
382 | * clear the dirty state of @bmap. | 382 | * clear the dirty state of @bmap. |
383 | * | 383 | * |
384 | * Return Value: 1 is returned if @bmap is dirty, or 0 if clear. | 384 | * Return Value: 1 is returned if @bmap is dirty, or 0 if clear. |
385 | */ | 385 | */ |
386 | int nilfs_bmap_test_and_clear_dirty(struct nilfs_bmap *bmap) | 386 | int nilfs_bmap_test_and_clear_dirty(struct nilfs_bmap *bmap) |
387 | { | 387 | { |
388 | int ret; | 388 | int ret; |
389 | 389 | ||
390 | down_write(&bmap->b_sem); | 390 | down_write(&bmap->b_sem); |
391 | ret = nilfs_bmap_dirty(bmap); | 391 | ret = nilfs_bmap_dirty(bmap); |
392 | nilfs_bmap_clear_dirty(bmap); | 392 | nilfs_bmap_clear_dirty(bmap); |
393 | up_write(&bmap->b_sem); | 393 | up_write(&bmap->b_sem); |
394 | return ret; | 394 | return ret; |
395 | } | 395 | } |
396 | 396 | ||
397 | 397 | ||
398 | /* | 398 | /* |
399 | * Internal use only | 399 | * Internal use only |
400 | */ | 400 | */ |
401 | 401 | ||
402 | void nilfs_bmap_add_blocks(const struct nilfs_bmap *bmap, int n) | 402 | void nilfs_bmap_add_blocks(const struct nilfs_bmap *bmap, int n) |
403 | { | 403 | { |
404 | inode_add_bytes(bmap->b_inode, (1 << bmap->b_inode->i_blkbits) * n); | 404 | inode_add_bytes(bmap->b_inode, (1 << bmap->b_inode->i_blkbits) * n); |
405 | } | 405 | } |
406 | 406 | ||
407 | void nilfs_bmap_sub_blocks(const struct nilfs_bmap *bmap, int n) | 407 | void nilfs_bmap_sub_blocks(const struct nilfs_bmap *bmap, int n) |
408 | { | 408 | { |
409 | inode_sub_bytes(bmap->b_inode, (1 << bmap->b_inode->i_blkbits) * n); | 409 | inode_sub_bytes(bmap->b_inode, (1 << bmap->b_inode->i_blkbits) * n); |
410 | } | 410 | } |
411 | 411 | ||
412 | __u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *bmap, | 412 | __u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *bmap, |
413 | const struct buffer_head *bh) | 413 | const struct buffer_head *bh) |
414 | { | 414 | { |
415 | struct buffer_head *pbh; | 415 | struct buffer_head *pbh; |
416 | __u64 key; | 416 | __u64 key; |
417 | 417 | ||
418 | key = page_index(bh->b_page) << (PAGE_CACHE_SHIFT - | 418 | key = page_index(bh->b_page) << (PAGE_CACHE_SHIFT - |
419 | bmap->b_inode->i_blkbits); | 419 | bmap->b_inode->i_blkbits); |
420 | for (pbh = page_buffers(bh->b_page); pbh != bh; | 420 | for (pbh = page_buffers(bh->b_page); pbh != bh; pbh = pbh->b_this_page) |
421 | pbh = pbh->b_this_page, key++); | 421 | key++; |
422 | 422 | ||
423 | return key; | 423 | return key; |
424 | } | 424 | } |
425 | 425 | ||
426 | __u64 nilfs_bmap_find_target_seq(const struct nilfs_bmap *bmap, __u64 key) | 426 | __u64 nilfs_bmap_find_target_seq(const struct nilfs_bmap *bmap, __u64 key) |
427 | { | 427 | { |
428 | __s64 diff; | 428 | __s64 diff; |
429 | 429 | ||
430 | diff = key - bmap->b_last_allocated_key; | 430 | diff = key - bmap->b_last_allocated_key; |
431 | if ((nilfs_bmap_keydiff_abs(diff) < NILFS_INODE_BMAP_SIZE) && | 431 | if ((nilfs_bmap_keydiff_abs(diff) < NILFS_INODE_BMAP_SIZE) && |
432 | (bmap->b_last_allocated_ptr != NILFS_BMAP_INVALID_PTR) && | 432 | (bmap->b_last_allocated_ptr != NILFS_BMAP_INVALID_PTR) && |
433 | (bmap->b_last_allocated_ptr + diff > 0)) | 433 | (bmap->b_last_allocated_ptr + diff > 0)) |
434 | return bmap->b_last_allocated_ptr + diff; | 434 | return bmap->b_last_allocated_ptr + diff; |
435 | else | 435 | else |
436 | return NILFS_BMAP_INVALID_PTR; | 436 | return NILFS_BMAP_INVALID_PTR; |
437 | } | 437 | } |
438 | 438 | ||
439 | #define NILFS_BMAP_GROUP_DIV 8 | 439 | #define NILFS_BMAP_GROUP_DIV 8 |
440 | __u64 nilfs_bmap_find_target_in_group(const struct nilfs_bmap *bmap) | 440 | __u64 nilfs_bmap_find_target_in_group(const struct nilfs_bmap *bmap) |
441 | { | 441 | { |
442 | struct inode *dat = nilfs_bmap_get_dat(bmap); | 442 | struct inode *dat = nilfs_bmap_get_dat(bmap); |
443 | unsigned long entries_per_group = nilfs_palloc_entries_per_group(dat); | 443 | unsigned long entries_per_group = nilfs_palloc_entries_per_group(dat); |
444 | unsigned long group = bmap->b_inode->i_ino / entries_per_group; | 444 | unsigned long group = bmap->b_inode->i_ino / entries_per_group; |
445 | 445 | ||
446 | return group * entries_per_group + | 446 | return group * entries_per_group + |
447 | (bmap->b_inode->i_ino % NILFS_BMAP_GROUP_DIV) * | 447 | (bmap->b_inode->i_ino % NILFS_BMAP_GROUP_DIV) * |
448 | (entries_per_group / NILFS_BMAP_GROUP_DIV); | 448 | (entries_per_group / NILFS_BMAP_GROUP_DIV); |
449 | } | 449 | } |
450 | 450 | ||
451 | static struct lock_class_key nilfs_bmap_dat_lock_key; | 451 | static struct lock_class_key nilfs_bmap_dat_lock_key; |
452 | static struct lock_class_key nilfs_bmap_mdt_lock_key; | 452 | static struct lock_class_key nilfs_bmap_mdt_lock_key; |
453 | 453 | ||
454 | /** | 454 | /** |
455 | * nilfs_bmap_read - read a bmap from an inode | 455 | * nilfs_bmap_read - read a bmap from an inode |
456 | * @bmap: bmap | 456 | * @bmap: bmap |
457 | * @raw_inode: on-disk inode | 457 | * @raw_inode: on-disk inode |
458 | * | 458 | * |
459 | * Description: nilfs_bmap_read() initializes the bmap @bmap. | 459 | * Description: nilfs_bmap_read() initializes the bmap @bmap. |
460 | * | 460 | * |
461 | * Return Value: On success, 0 is returned. On error, the following negative | 461 | * Return Value: On success, 0 is returned. On error, the following negative |
462 | * error code is returned. | 462 | * error code is returned. |
463 | * | 463 | * |
464 | * %-ENOMEM - Insufficient amount of memory available. | 464 | * %-ENOMEM - Insufficient amount of memory available. |
465 | */ | 465 | */ |
466 | int nilfs_bmap_read(struct nilfs_bmap *bmap, struct nilfs_inode *raw_inode) | 466 | int nilfs_bmap_read(struct nilfs_bmap *bmap, struct nilfs_inode *raw_inode) |
467 | { | 467 | { |
468 | if (raw_inode == NULL) | 468 | if (raw_inode == NULL) |
469 | memset(bmap->b_u.u_data, 0, NILFS_BMAP_SIZE); | 469 | memset(bmap->b_u.u_data, 0, NILFS_BMAP_SIZE); |
470 | else | 470 | else |
471 | memcpy(bmap->b_u.u_data, raw_inode->i_bmap, NILFS_BMAP_SIZE); | 471 | memcpy(bmap->b_u.u_data, raw_inode->i_bmap, NILFS_BMAP_SIZE); |
472 | 472 | ||
473 | init_rwsem(&bmap->b_sem); | 473 | init_rwsem(&bmap->b_sem); |
474 | bmap->b_state = 0; | 474 | bmap->b_state = 0; |
475 | bmap->b_inode = &NILFS_BMAP_I(bmap)->vfs_inode; | 475 | bmap->b_inode = &NILFS_BMAP_I(bmap)->vfs_inode; |
476 | switch (bmap->b_inode->i_ino) { | 476 | switch (bmap->b_inode->i_ino) { |
477 | case NILFS_DAT_INO: | 477 | case NILFS_DAT_INO: |
478 | bmap->b_ptr_type = NILFS_BMAP_PTR_P; | 478 | bmap->b_ptr_type = NILFS_BMAP_PTR_P; |
479 | bmap->b_last_allocated_key = 0; | 479 | bmap->b_last_allocated_key = 0; |
480 | bmap->b_last_allocated_ptr = NILFS_BMAP_NEW_PTR_INIT; | 480 | bmap->b_last_allocated_ptr = NILFS_BMAP_NEW_PTR_INIT; |
481 | lockdep_set_class(&bmap->b_sem, &nilfs_bmap_dat_lock_key); | 481 | lockdep_set_class(&bmap->b_sem, &nilfs_bmap_dat_lock_key); |
482 | break; | 482 | break; |
483 | case NILFS_CPFILE_INO: | 483 | case NILFS_CPFILE_INO: |
484 | case NILFS_SUFILE_INO: | 484 | case NILFS_SUFILE_INO: |
485 | bmap->b_ptr_type = NILFS_BMAP_PTR_VS; | 485 | bmap->b_ptr_type = NILFS_BMAP_PTR_VS; |
486 | bmap->b_last_allocated_key = 0; | 486 | bmap->b_last_allocated_key = 0; |
487 | bmap->b_last_allocated_ptr = NILFS_BMAP_INVALID_PTR; | 487 | bmap->b_last_allocated_ptr = NILFS_BMAP_INVALID_PTR; |
488 | lockdep_set_class(&bmap->b_sem, &nilfs_bmap_mdt_lock_key); | 488 | lockdep_set_class(&bmap->b_sem, &nilfs_bmap_mdt_lock_key); |
489 | break; | 489 | break; |
490 | case NILFS_IFILE_INO: | 490 | case NILFS_IFILE_INO: |
491 | lockdep_set_class(&bmap->b_sem, &nilfs_bmap_mdt_lock_key); | 491 | lockdep_set_class(&bmap->b_sem, &nilfs_bmap_mdt_lock_key); |
492 | /* Fall through */ | 492 | /* Fall through */ |
493 | default: | 493 | default: |
494 | bmap->b_ptr_type = NILFS_BMAP_PTR_VM; | 494 | bmap->b_ptr_type = NILFS_BMAP_PTR_VM; |
495 | bmap->b_last_allocated_key = 0; | 495 | bmap->b_last_allocated_key = 0; |
496 | bmap->b_last_allocated_ptr = NILFS_BMAP_INVALID_PTR; | 496 | bmap->b_last_allocated_ptr = NILFS_BMAP_INVALID_PTR; |
497 | break; | 497 | break; |
498 | } | 498 | } |
499 | 499 | ||
500 | return (bmap->b_u.u_flags & NILFS_BMAP_LARGE) ? | 500 | return (bmap->b_u.u_flags & NILFS_BMAP_LARGE) ? |
501 | nilfs_btree_init(bmap) : nilfs_direct_init(bmap); | 501 | nilfs_btree_init(bmap) : nilfs_direct_init(bmap); |
502 | } | 502 | } |
503 | 503 | ||
504 | /** | 504 | /** |
505 | * nilfs_bmap_write - write back a bmap to an inode | 505 | * nilfs_bmap_write - write back a bmap to an inode |
506 | * @bmap: bmap | 506 | * @bmap: bmap |
507 | * @raw_inode: on-disk inode | 507 | * @raw_inode: on-disk inode |
508 | * | 508 | * |
509 | * Description: nilfs_bmap_write() stores @bmap in @raw_inode. | 509 | * Description: nilfs_bmap_write() stores @bmap in @raw_inode. |
510 | */ | 510 | */ |
511 | void nilfs_bmap_write(struct nilfs_bmap *bmap, struct nilfs_inode *raw_inode) | 511 | void nilfs_bmap_write(struct nilfs_bmap *bmap, struct nilfs_inode *raw_inode) |
512 | { | 512 | { |
513 | down_write(&bmap->b_sem); | 513 | down_write(&bmap->b_sem); |
514 | memcpy(raw_inode->i_bmap, bmap->b_u.u_data, | 514 | memcpy(raw_inode->i_bmap, bmap->b_u.u_data, |
515 | NILFS_INODE_BMAP_SIZE * sizeof(__le64)); | 515 | NILFS_INODE_BMAP_SIZE * sizeof(__le64)); |
516 | if (bmap->b_inode->i_ino == NILFS_DAT_INO) | 516 | if (bmap->b_inode->i_ino == NILFS_DAT_INO) |
517 | bmap->b_last_allocated_ptr = NILFS_BMAP_NEW_PTR_INIT; | 517 | bmap->b_last_allocated_ptr = NILFS_BMAP_NEW_PTR_INIT; |
518 | 518 | ||
519 | up_write(&bmap->b_sem); | 519 | up_write(&bmap->b_sem); |
520 | } | 520 | } |
521 | 521 | ||
522 | void nilfs_bmap_init_gc(struct nilfs_bmap *bmap) | 522 | void nilfs_bmap_init_gc(struct nilfs_bmap *bmap) |
523 | { | 523 | { |
524 | memset(&bmap->b_u, 0, NILFS_BMAP_SIZE); | 524 | memset(&bmap->b_u, 0, NILFS_BMAP_SIZE); |
525 | init_rwsem(&bmap->b_sem); | 525 | init_rwsem(&bmap->b_sem); |
526 | bmap->b_inode = &NILFS_BMAP_I(bmap)->vfs_inode; | 526 | bmap->b_inode = &NILFS_BMAP_I(bmap)->vfs_inode; |
527 | bmap->b_ptr_type = NILFS_BMAP_PTR_U; | 527 | bmap->b_ptr_type = NILFS_BMAP_PTR_U; |
528 | bmap->b_last_allocated_key = 0; | 528 | bmap->b_last_allocated_key = 0; |
529 | bmap->b_last_allocated_ptr = NILFS_BMAP_INVALID_PTR; | 529 | bmap->b_last_allocated_ptr = NILFS_BMAP_INVALID_PTR; |
530 | bmap->b_state = 0; | 530 | bmap->b_state = 0; |
531 | nilfs_btree_init_gc(bmap); | 531 | nilfs_btree_init_gc(bmap); |
532 | } | 532 | } |
533 | 533 | ||
534 | void nilfs_bmap_init_gcdat(struct nilfs_bmap *gcbmap, struct nilfs_bmap *bmap) | 534 | void nilfs_bmap_init_gcdat(struct nilfs_bmap *gcbmap, struct nilfs_bmap *bmap) |
535 | { | 535 | { |
536 | memcpy(gcbmap, bmap, sizeof(union nilfs_bmap_union)); | 536 | memcpy(gcbmap, bmap, sizeof(union nilfs_bmap_union)); |
537 | init_rwsem(&gcbmap->b_sem); | 537 | init_rwsem(&gcbmap->b_sem); |
538 | lockdep_set_class(&bmap->b_sem, &nilfs_bmap_dat_lock_key); | 538 | lockdep_set_class(&bmap->b_sem, &nilfs_bmap_dat_lock_key); |
539 | gcbmap->b_inode = &NILFS_BMAP_I(gcbmap)->vfs_inode; | 539 | gcbmap->b_inode = &NILFS_BMAP_I(gcbmap)->vfs_inode; |
540 | } | 540 | } |
541 | 541 | ||
542 | void nilfs_bmap_commit_gcdat(struct nilfs_bmap *gcbmap, struct nilfs_bmap *bmap) | 542 | void nilfs_bmap_commit_gcdat(struct nilfs_bmap *gcbmap, struct nilfs_bmap *bmap) |
543 | { | 543 | { |
544 | memcpy(bmap, gcbmap, sizeof(union nilfs_bmap_union)); | 544 | memcpy(bmap, gcbmap, sizeof(union nilfs_bmap_union)); |
545 | init_rwsem(&bmap->b_sem); | 545 | init_rwsem(&bmap->b_sem); |
546 | lockdep_set_class(&bmap->b_sem, &nilfs_bmap_dat_lock_key); | 546 | lockdep_set_class(&bmap->b_sem, &nilfs_bmap_dat_lock_key); |
547 | bmap->b_inode = &NILFS_BMAP_I(bmap)->vfs_inode; | 547 | bmap->b_inode = &NILFS_BMAP_I(bmap)->vfs_inode; |
548 | } | 548 | } |
549 | 549 |
fs/nilfs2/cpfile.c
1 | /* | 1 | /* |
2 | * cpfile.c - NILFS checkpoint file. | 2 | * cpfile.c - NILFS checkpoint file. |
3 | * | 3 | * |
4 | * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. | 4 | * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
8 | * the Free Software Foundation; either version 2 of the License, or | 8 | * the Free Software Foundation; either version 2 of the License, or |
9 | * (at your option) any later version. | 9 | * (at your option) any later version. |
10 | * | 10 | * |
11 | * This program is distributed in the hope that it will be useful, | 11 | * This program is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | * GNU General Public License for more details. | 14 | * GNU General Public License for more details. |
15 | * | 15 | * |
16 | * You should have received a copy of the GNU General Public License | 16 | * You should have received a copy of the GNU General Public License |
17 | * along with this program; if not, write to the Free Software | 17 | * along with this program; if not, write to the Free Software |
18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | 18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
19 | * | 19 | * |
20 | * Written by Koji Sato <koji@osrg.net>. | 20 | * Written by Koji Sato <koji@osrg.net>. |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
24 | #include <linux/fs.h> | 24 | #include <linux/fs.h> |
25 | #include <linux/string.h> | 25 | #include <linux/string.h> |
26 | #include <linux/buffer_head.h> | 26 | #include <linux/buffer_head.h> |
27 | #include <linux/errno.h> | 27 | #include <linux/errno.h> |
28 | #include <linux/nilfs2_fs.h> | 28 | #include <linux/nilfs2_fs.h> |
29 | #include "mdt.h" | 29 | #include "mdt.h" |
30 | #include "cpfile.h" | 30 | #include "cpfile.h" |
31 | 31 | ||
32 | 32 | ||
33 | static inline unsigned long | 33 | static inline unsigned long |
34 | nilfs_cpfile_checkpoints_per_block(const struct inode *cpfile) | 34 | nilfs_cpfile_checkpoints_per_block(const struct inode *cpfile) |
35 | { | 35 | { |
36 | return NILFS_MDT(cpfile)->mi_entries_per_block; | 36 | return NILFS_MDT(cpfile)->mi_entries_per_block; |
37 | } | 37 | } |
38 | 38 | ||
39 | /* block number from the beginning of the file */ | 39 | /* block number from the beginning of the file */ |
40 | static unsigned long | 40 | static unsigned long |
41 | nilfs_cpfile_get_blkoff(const struct inode *cpfile, __u64 cno) | 41 | nilfs_cpfile_get_blkoff(const struct inode *cpfile, __u64 cno) |
42 | { | 42 | { |
43 | __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1; | 43 | __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1; |
44 | do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile)); | 44 | do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile)); |
45 | return (unsigned long)tcno; | 45 | return (unsigned long)tcno; |
46 | } | 46 | } |
47 | 47 | ||
48 | /* offset in block */ | 48 | /* offset in block */ |
49 | static unsigned long | 49 | static unsigned long |
50 | nilfs_cpfile_get_offset(const struct inode *cpfile, __u64 cno) | 50 | nilfs_cpfile_get_offset(const struct inode *cpfile, __u64 cno) |
51 | { | 51 | { |
52 | __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1; | 52 | __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1; |
53 | return do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile)); | 53 | return do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile)); |
54 | } | 54 | } |
55 | 55 | ||
56 | static unsigned long | 56 | static unsigned long |
57 | nilfs_cpfile_checkpoints_in_block(const struct inode *cpfile, | 57 | nilfs_cpfile_checkpoints_in_block(const struct inode *cpfile, |
58 | __u64 curr, | 58 | __u64 curr, |
59 | __u64 max) | 59 | __u64 max) |
60 | { | 60 | { |
61 | return min_t(__u64, | 61 | return min_t(__u64, |
62 | nilfs_cpfile_checkpoints_per_block(cpfile) - | 62 | nilfs_cpfile_checkpoints_per_block(cpfile) - |
63 | nilfs_cpfile_get_offset(cpfile, curr), | 63 | nilfs_cpfile_get_offset(cpfile, curr), |
64 | max - curr); | 64 | max - curr); |
65 | } | 65 | } |
66 | 66 | ||
67 | static inline int nilfs_cpfile_is_in_first(const struct inode *cpfile, | 67 | static inline int nilfs_cpfile_is_in_first(const struct inode *cpfile, |
68 | __u64 cno) | 68 | __u64 cno) |
69 | { | 69 | { |
70 | return nilfs_cpfile_get_blkoff(cpfile, cno) == 0; | 70 | return nilfs_cpfile_get_blkoff(cpfile, cno) == 0; |
71 | } | 71 | } |
72 | 72 | ||
73 | static unsigned int | 73 | static unsigned int |
74 | nilfs_cpfile_block_add_valid_checkpoints(const struct inode *cpfile, | 74 | nilfs_cpfile_block_add_valid_checkpoints(const struct inode *cpfile, |
75 | struct buffer_head *bh, | 75 | struct buffer_head *bh, |
76 | void *kaddr, | 76 | void *kaddr, |
77 | unsigned int n) | 77 | unsigned int n) |
78 | { | 78 | { |
79 | struct nilfs_checkpoint *cp = kaddr + bh_offset(bh); | 79 | struct nilfs_checkpoint *cp = kaddr + bh_offset(bh); |
80 | unsigned int count; | 80 | unsigned int count; |
81 | 81 | ||
82 | count = le32_to_cpu(cp->cp_checkpoints_count) + n; | 82 | count = le32_to_cpu(cp->cp_checkpoints_count) + n; |
83 | cp->cp_checkpoints_count = cpu_to_le32(count); | 83 | cp->cp_checkpoints_count = cpu_to_le32(count); |
84 | return count; | 84 | return count; |
85 | } | 85 | } |
86 | 86 | ||
87 | static unsigned int | 87 | static unsigned int |
88 | nilfs_cpfile_block_sub_valid_checkpoints(const struct inode *cpfile, | 88 | nilfs_cpfile_block_sub_valid_checkpoints(const struct inode *cpfile, |
89 | struct buffer_head *bh, | 89 | struct buffer_head *bh, |
90 | void *kaddr, | 90 | void *kaddr, |
91 | unsigned int n) | 91 | unsigned int n) |
92 | { | 92 | { |
93 | struct nilfs_checkpoint *cp = kaddr + bh_offset(bh); | 93 | struct nilfs_checkpoint *cp = kaddr + bh_offset(bh); |
94 | unsigned int count; | 94 | unsigned int count; |
95 | 95 | ||
96 | WARN_ON(le32_to_cpu(cp->cp_checkpoints_count) < n); | 96 | WARN_ON(le32_to_cpu(cp->cp_checkpoints_count) < n); |
97 | count = le32_to_cpu(cp->cp_checkpoints_count) - n; | 97 | count = le32_to_cpu(cp->cp_checkpoints_count) - n; |
98 | cp->cp_checkpoints_count = cpu_to_le32(count); | 98 | cp->cp_checkpoints_count = cpu_to_le32(count); |
99 | return count; | 99 | return count; |
100 | } | 100 | } |
101 | 101 | ||
102 | static inline struct nilfs_cpfile_header * | 102 | static inline struct nilfs_cpfile_header * |
103 | nilfs_cpfile_block_get_header(const struct inode *cpfile, | 103 | nilfs_cpfile_block_get_header(const struct inode *cpfile, |
104 | struct buffer_head *bh, | 104 | struct buffer_head *bh, |
105 | void *kaddr) | 105 | void *kaddr) |
106 | { | 106 | { |
107 | return kaddr + bh_offset(bh); | 107 | return kaddr + bh_offset(bh); |
108 | } | 108 | } |
109 | 109 | ||
110 | static struct nilfs_checkpoint * | 110 | static struct nilfs_checkpoint * |
111 | nilfs_cpfile_block_get_checkpoint(const struct inode *cpfile, __u64 cno, | 111 | nilfs_cpfile_block_get_checkpoint(const struct inode *cpfile, __u64 cno, |
112 | struct buffer_head *bh, | 112 | struct buffer_head *bh, |
113 | void *kaddr) | 113 | void *kaddr) |
114 | { | 114 | { |
115 | return kaddr + bh_offset(bh) + nilfs_cpfile_get_offset(cpfile, cno) * | 115 | return kaddr + bh_offset(bh) + nilfs_cpfile_get_offset(cpfile, cno) * |
116 | NILFS_MDT(cpfile)->mi_entry_size; | 116 | NILFS_MDT(cpfile)->mi_entry_size; |
117 | } | 117 | } |
118 | 118 | ||
119 | static void nilfs_cpfile_block_init(struct inode *cpfile, | 119 | static void nilfs_cpfile_block_init(struct inode *cpfile, |
120 | struct buffer_head *bh, | 120 | struct buffer_head *bh, |
121 | void *kaddr) | 121 | void *kaddr) |
122 | { | 122 | { |
123 | struct nilfs_checkpoint *cp = kaddr + bh_offset(bh); | 123 | struct nilfs_checkpoint *cp = kaddr + bh_offset(bh); |
124 | size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size; | 124 | size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size; |
125 | int n = nilfs_cpfile_checkpoints_per_block(cpfile); | 125 | int n = nilfs_cpfile_checkpoints_per_block(cpfile); |
126 | 126 | ||
127 | while (n-- > 0) { | 127 | while (n-- > 0) { |
128 | nilfs_checkpoint_set_invalid(cp); | 128 | nilfs_checkpoint_set_invalid(cp); |
129 | cp = (void *)cp + cpsz; | 129 | cp = (void *)cp + cpsz; |
130 | } | 130 | } |
131 | } | 131 | } |
132 | 132 | ||
133 | static inline int nilfs_cpfile_get_header_block(struct inode *cpfile, | 133 | static inline int nilfs_cpfile_get_header_block(struct inode *cpfile, |
134 | struct buffer_head **bhp) | 134 | struct buffer_head **bhp) |
135 | { | 135 | { |
136 | return nilfs_mdt_get_block(cpfile, 0, 0, NULL, bhp); | 136 | return nilfs_mdt_get_block(cpfile, 0, 0, NULL, bhp); |
137 | } | 137 | } |
138 | 138 | ||
139 | static inline int nilfs_cpfile_get_checkpoint_block(struct inode *cpfile, | 139 | static inline int nilfs_cpfile_get_checkpoint_block(struct inode *cpfile, |
140 | __u64 cno, | 140 | __u64 cno, |
141 | int create, | 141 | int create, |
142 | struct buffer_head **bhp) | 142 | struct buffer_head **bhp) |
143 | { | 143 | { |
144 | return nilfs_mdt_get_block(cpfile, | 144 | return nilfs_mdt_get_block(cpfile, |
145 | nilfs_cpfile_get_blkoff(cpfile, cno), | 145 | nilfs_cpfile_get_blkoff(cpfile, cno), |
146 | create, nilfs_cpfile_block_init, bhp); | 146 | create, nilfs_cpfile_block_init, bhp); |
147 | } | 147 | } |
148 | 148 | ||
149 | static inline int nilfs_cpfile_delete_checkpoint_block(struct inode *cpfile, | 149 | static inline int nilfs_cpfile_delete_checkpoint_block(struct inode *cpfile, |
150 | __u64 cno) | 150 | __u64 cno) |
151 | { | 151 | { |
152 | return nilfs_mdt_delete_block(cpfile, | 152 | return nilfs_mdt_delete_block(cpfile, |
153 | nilfs_cpfile_get_blkoff(cpfile, cno)); | 153 | nilfs_cpfile_get_blkoff(cpfile, cno)); |
154 | } | 154 | } |
155 | 155 | ||
156 | /** | 156 | /** |
157 | * nilfs_cpfile_get_checkpoint - get a checkpoint | 157 | * nilfs_cpfile_get_checkpoint - get a checkpoint |
158 | * @cpfile: inode of checkpoint file | 158 | * @cpfile: inode of checkpoint file |
159 | * @cno: checkpoint number | 159 | * @cno: checkpoint number |
160 | * @create: create flag | 160 | * @create: create flag |
161 | * @cpp: pointer to a checkpoint | 161 | * @cpp: pointer to a checkpoint |
162 | * @bhp: pointer to a buffer head | 162 | * @bhp: pointer to a buffer head |
163 | * | 163 | * |
164 | * Description: nilfs_cpfile_get_checkpoint() acquires the checkpoint | 164 | * Description: nilfs_cpfile_get_checkpoint() acquires the checkpoint |
165 | * specified by @cno. A new checkpoint will be created if @cno is the current | 165 | * specified by @cno. A new checkpoint will be created if @cno is the current |
166 | * checkpoint number and @create is nonzero. | 166 | * checkpoint number and @create is nonzero. |
167 | * | 167 | * |
168 | * Return Value: On success, 0 is returned, and the checkpoint and the | 168 | * Return Value: On success, 0 is returned, and the checkpoint and the |
169 | * buffer head of the buffer on which the checkpoint is located are stored in | 169 | * buffer head of the buffer on which the checkpoint is located are stored in |
170 | * the place pointed by @cpp and @bhp, respectively. On error, one of the | 170 | * the place pointed by @cpp and @bhp, respectively. On error, one of the |
171 | * following negative error codes is returned. | 171 | * following negative error codes is returned. |
172 | * | 172 | * |
173 | * %-EIO - I/O error. | 173 | * %-EIO - I/O error. |
174 | * | 174 | * |
175 | * %-ENOMEM - Insufficient amount of memory available. | 175 | * %-ENOMEM - Insufficient amount of memory available. |
176 | * | 176 | * |
177 | * %-ENOENT - No such checkpoint. | 177 | * %-ENOENT - No such checkpoint. |
178 | * | 178 | * |
179 | * %-EINVAL - invalid checkpoint. | 179 | * %-EINVAL - invalid checkpoint. |
180 | */ | 180 | */ |
181 | int nilfs_cpfile_get_checkpoint(struct inode *cpfile, | 181 | int nilfs_cpfile_get_checkpoint(struct inode *cpfile, |
182 | __u64 cno, | 182 | __u64 cno, |
183 | int create, | 183 | int create, |
184 | struct nilfs_checkpoint **cpp, | 184 | struct nilfs_checkpoint **cpp, |
185 | struct buffer_head **bhp) | 185 | struct buffer_head **bhp) |
186 | { | 186 | { |
187 | struct buffer_head *header_bh, *cp_bh; | 187 | struct buffer_head *header_bh, *cp_bh; |
188 | struct nilfs_cpfile_header *header; | 188 | struct nilfs_cpfile_header *header; |
189 | struct nilfs_checkpoint *cp; | 189 | struct nilfs_checkpoint *cp; |
190 | void *kaddr; | 190 | void *kaddr; |
191 | int ret; | 191 | int ret; |
192 | 192 | ||
193 | if (unlikely(cno < 1 || cno > nilfs_mdt_cno(cpfile) || | 193 | if (unlikely(cno < 1 || cno > nilfs_mdt_cno(cpfile) || |
194 | (cno < nilfs_mdt_cno(cpfile) && create))) | 194 | (cno < nilfs_mdt_cno(cpfile) && create))) |
195 | return -EINVAL; | 195 | return -EINVAL; |
196 | 196 | ||
197 | down_write(&NILFS_MDT(cpfile)->mi_sem); | 197 | down_write(&NILFS_MDT(cpfile)->mi_sem); |
198 | 198 | ||
199 | ret = nilfs_cpfile_get_header_block(cpfile, &header_bh); | 199 | ret = nilfs_cpfile_get_header_block(cpfile, &header_bh); |
200 | if (ret < 0) | 200 | if (ret < 0) |
201 | goto out_sem; | 201 | goto out_sem; |
202 | ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, create, &cp_bh); | 202 | ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, create, &cp_bh); |
203 | if (ret < 0) | 203 | if (ret < 0) |
204 | goto out_header; | 204 | goto out_header; |
205 | kaddr = kmap(cp_bh->b_page); | 205 | kaddr = kmap(cp_bh->b_page); |
206 | cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); | 206 | cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); |
207 | if (nilfs_checkpoint_invalid(cp)) { | 207 | if (nilfs_checkpoint_invalid(cp)) { |
208 | if (!create) { | 208 | if (!create) { |
209 | kunmap(cp_bh->b_page); | 209 | kunmap(cp_bh->b_page); |
210 | brelse(cp_bh); | 210 | brelse(cp_bh); |
211 | ret = -ENOENT; | 211 | ret = -ENOENT; |
212 | goto out_header; | 212 | goto out_header; |
213 | } | 213 | } |
214 | /* a newly-created checkpoint */ | 214 | /* a newly-created checkpoint */ |
215 | nilfs_checkpoint_clear_invalid(cp); | 215 | nilfs_checkpoint_clear_invalid(cp); |
216 | if (!nilfs_cpfile_is_in_first(cpfile, cno)) | 216 | if (!nilfs_cpfile_is_in_first(cpfile, cno)) |
217 | nilfs_cpfile_block_add_valid_checkpoints(cpfile, cp_bh, | 217 | nilfs_cpfile_block_add_valid_checkpoints(cpfile, cp_bh, |
218 | kaddr, 1); | 218 | kaddr, 1); |
219 | nilfs_mdt_mark_buffer_dirty(cp_bh); | 219 | nilfs_mdt_mark_buffer_dirty(cp_bh); |
220 | 220 | ||
221 | kaddr = kmap_atomic(header_bh->b_page, KM_USER0); | 221 | kaddr = kmap_atomic(header_bh->b_page, KM_USER0); |
222 | header = nilfs_cpfile_block_get_header(cpfile, header_bh, | 222 | header = nilfs_cpfile_block_get_header(cpfile, header_bh, |
223 | kaddr); | 223 | kaddr); |
224 | le64_add_cpu(&header->ch_ncheckpoints, 1); | 224 | le64_add_cpu(&header->ch_ncheckpoints, 1); |
225 | kunmap_atomic(kaddr, KM_USER0); | 225 | kunmap_atomic(kaddr, KM_USER0); |
226 | nilfs_mdt_mark_buffer_dirty(header_bh); | 226 | nilfs_mdt_mark_buffer_dirty(header_bh); |
227 | nilfs_mdt_mark_dirty(cpfile); | 227 | nilfs_mdt_mark_dirty(cpfile); |
228 | } | 228 | } |
229 | 229 | ||
230 | if (cpp != NULL) | 230 | if (cpp != NULL) |
231 | *cpp = cp; | 231 | *cpp = cp; |
232 | *bhp = cp_bh; | 232 | *bhp = cp_bh; |
233 | 233 | ||
234 | out_header: | 234 | out_header: |
235 | brelse(header_bh); | 235 | brelse(header_bh); |
236 | 236 | ||
237 | out_sem: | 237 | out_sem: |
238 | up_write(&NILFS_MDT(cpfile)->mi_sem); | 238 | up_write(&NILFS_MDT(cpfile)->mi_sem); |
239 | return ret; | 239 | return ret; |
240 | } | 240 | } |
241 | 241 | ||
242 | /** | 242 | /** |
243 | * nilfs_cpfile_put_checkpoint - put a checkpoint | 243 | * nilfs_cpfile_put_checkpoint - put a checkpoint |
244 | * @cpfile: inode of checkpoint file | 244 | * @cpfile: inode of checkpoint file |
245 | * @cno: checkpoint number | 245 | * @cno: checkpoint number |
246 | * @bh: buffer head | 246 | * @bh: buffer head |
247 | * | 247 | * |
248 | * Description: nilfs_cpfile_put_checkpoint() releases the checkpoint | 248 | * Description: nilfs_cpfile_put_checkpoint() releases the checkpoint |
249 | * specified by @cno. @bh must be the buffer head which has been returned by | 249 | * specified by @cno. @bh must be the buffer head which has been returned by |
250 | * a previous call to nilfs_cpfile_get_checkpoint() with @cno. | 250 | * a previous call to nilfs_cpfile_get_checkpoint() with @cno. |
251 | */ | 251 | */ |
252 | void nilfs_cpfile_put_checkpoint(struct inode *cpfile, __u64 cno, | 252 | void nilfs_cpfile_put_checkpoint(struct inode *cpfile, __u64 cno, |
253 | struct buffer_head *bh) | 253 | struct buffer_head *bh) |
254 | { | 254 | { |
255 | kunmap(bh->b_page); | 255 | kunmap(bh->b_page); |
256 | brelse(bh); | 256 | brelse(bh); |
257 | } | 257 | } |
258 | 258 | ||
259 | /** | 259 | /** |
260 | * nilfs_cpfile_delete_checkpoints - delete checkpoints | 260 | * nilfs_cpfile_delete_checkpoints - delete checkpoints |
261 | * @cpfile: inode of checkpoint file | 261 | * @cpfile: inode of checkpoint file |
262 | * @start: start checkpoint number | 262 | * @start: start checkpoint number |
263 | * @end: end checkpoint numer | 263 | * @end: end checkpoint numer |
264 | * | 264 | * |
265 | * Description: nilfs_cpfile_delete_checkpoints() deletes the checkpoints in | 265 | * Description: nilfs_cpfile_delete_checkpoints() deletes the checkpoints in |
266 | * the period from @start to @end, excluding @end itself. The checkpoints | 266 | * the period from @start to @end, excluding @end itself. The checkpoints |
267 | * which have been already deleted are ignored. | 267 | * which have been already deleted are ignored. |
268 | * | 268 | * |
269 | * Return Value: On success, 0 is returned. On error, one of the following | 269 | * Return Value: On success, 0 is returned. On error, one of the following |
270 | * negative error codes is returned. | 270 | * negative error codes is returned. |
271 | * | 271 | * |
272 | * %-EIO - I/O error. | 272 | * %-EIO - I/O error. |
273 | * | 273 | * |
274 | * %-ENOMEM - Insufficient amount of memory available. | 274 | * %-ENOMEM - Insufficient amount of memory available. |
275 | * | 275 | * |
276 | * %-EINVAL - invalid checkpoints. | 276 | * %-EINVAL - invalid checkpoints. |
277 | */ | 277 | */ |
278 | int nilfs_cpfile_delete_checkpoints(struct inode *cpfile, | 278 | int nilfs_cpfile_delete_checkpoints(struct inode *cpfile, |
279 | __u64 start, | 279 | __u64 start, |
280 | __u64 end) | 280 | __u64 end) |
281 | { | 281 | { |
282 | struct buffer_head *header_bh, *cp_bh; | 282 | struct buffer_head *header_bh, *cp_bh; |
283 | struct nilfs_cpfile_header *header; | 283 | struct nilfs_cpfile_header *header; |
284 | struct nilfs_checkpoint *cp; | 284 | struct nilfs_checkpoint *cp; |
285 | size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size; | 285 | size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size; |
286 | __u64 cno; | 286 | __u64 cno; |
287 | void *kaddr; | 287 | void *kaddr; |
288 | unsigned long tnicps; | 288 | unsigned long tnicps; |
289 | int ret, ncps, nicps, count, i; | 289 | int ret, ncps, nicps, count, i; |
290 | 290 | ||
291 | if (unlikely(start == 0 || start > end)) { | 291 | if (unlikely(start == 0 || start > end)) { |
292 | printk(KERN_ERR "%s: invalid range of checkpoint numbers: " | 292 | printk(KERN_ERR "%s: invalid range of checkpoint numbers: " |
293 | "[%llu, %llu)\n", __func__, | 293 | "[%llu, %llu)\n", __func__, |
294 | (unsigned long long)start, (unsigned long long)end); | 294 | (unsigned long long)start, (unsigned long long)end); |
295 | return -EINVAL; | 295 | return -EINVAL; |
296 | } | 296 | } |
297 | 297 | ||
298 | down_write(&NILFS_MDT(cpfile)->mi_sem); | 298 | down_write(&NILFS_MDT(cpfile)->mi_sem); |
299 | 299 | ||
300 | ret = nilfs_cpfile_get_header_block(cpfile, &header_bh); | 300 | ret = nilfs_cpfile_get_header_block(cpfile, &header_bh); |
301 | if (ret < 0) | 301 | if (ret < 0) |
302 | goto out_sem; | 302 | goto out_sem; |
303 | tnicps = 0; | 303 | tnicps = 0; |
304 | 304 | ||
305 | for (cno = start; cno < end; cno += ncps) { | 305 | for (cno = start; cno < end; cno += ncps) { |
306 | ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, end); | 306 | ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, end); |
307 | ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh); | 307 | ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh); |
308 | if (ret < 0) { | 308 | if (ret < 0) { |
309 | if (ret != -ENOENT) | 309 | if (ret != -ENOENT) |
310 | break; | 310 | break; |
311 | /* skip hole */ | 311 | /* skip hole */ |
312 | ret = 0; | 312 | ret = 0; |
313 | continue; | 313 | continue; |
314 | } | 314 | } |
315 | 315 | ||
316 | kaddr = kmap_atomic(cp_bh->b_page, KM_USER0); | 316 | kaddr = kmap_atomic(cp_bh->b_page, KM_USER0); |
317 | cp = nilfs_cpfile_block_get_checkpoint( | 317 | cp = nilfs_cpfile_block_get_checkpoint( |
318 | cpfile, cno, cp_bh, kaddr); | 318 | cpfile, cno, cp_bh, kaddr); |
319 | nicps = 0; | 319 | nicps = 0; |
320 | for (i = 0; i < ncps; i++, cp = (void *)cp + cpsz) { | 320 | for (i = 0; i < ncps; i++, cp = (void *)cp + cpsz) { |
321 | WARN_ON(nilfs_checkpoint_snapshot(cp)); | 321 | WARN_ON(nilfs_checkpoint_snapshot(cp)); |
322 | if (!nilfs_checkpoint_invalid(cp)) { | 322 | if (!nilfs_checkpoint_invalid(cp)) { |
323 | nilfs_checkpoint_set_invalid(cp); | 323 | nilfs_checkpoint_set_invalid(cp); |
324 | nicps++; | 324 | nicps++; |
325 | } | 325 | } |
326 | } | 326 | } |
327 | if (nicps > 0) { | 327 | if (nicps > 0) { |
328 | tnicps += nicps; | 328 | tnicps += nicps; |
329 | nilfs_mdt_mark_buffer_dirty(cp_bh); | 329 | nilfs_mdt_mark_buffer_dirty(cp_bh); |
330 | nilfs_mdt_mark_dirty(cpfile); | 330 | nilfs_mdt_mark_dirty(cpfile); |
331 | if (!nilfs_cpfile_is_in_first(cpfile, cno) && | 331 | if (!nilfs_cpfile_is_in_first(cpfile, cno)) { |
332 | (count = nilfs_cpfile_block_sub_valid_checkpoints( | 332 | count = |
333 | cpfile, cp_bh, kaddr, nicps)) == 0) { | 333 | nilfs_cpfile_block_sub_valid_checkpoints( |
334 | /* make hole */ | 334 | cpfile, cp_bh, kaddr, nicps); |
335 | kunmap_atomic(kaddr, KM_USER0); | 335 | if (count == 0) { |
336 | brelse(cp_bh); | 336 | /* make hole */ |
337 | ret = nilfs_cpfile_delete_checkpoint_block( | 337 | kunmap_atomic(kaddr, KM_USER0); |
338 | cpfile, cno); | 338 | brelse(cp_bh); |
339 | if (ret == 0) | 339 | ret = |
340 | continue; | 340 | nilfs_cpfile_delete_checkpoint_block( |
341 | printk(KERN_ERR "%s: cannot delete block\n", | 341 | cpfile, cno); |
342 | __func__); | 342 | if (ret == 0) |
343 | break; | 343 | continue; |
344 | printk(KERN_ERR | ||
345 | "%s: cannot delete block\n", | ||
346 | __func__); | ||
347 | break; | ||
348 | } | ||
344 | } | 349 | } |
345 | } | 350 | } |
346 | 351 | ||
347 | kunmap_atomic(kaddr, KM_USER0); | 352 | kunmap_atomic(kaddr, KM_USER0); |
348 | brelse(cp_bh); | 353 | brelse(cp_bh); |
349 | } | 354 | } |
350 | 355 | ||
351 | if (tnicps > 0) { | 356 | if (tnicps > 0) { |
352 | kaddr = kmap_atomic(header_bh->b_page, KM_USER0); | 357 | kaddr = kmap_atomic(header_bh->b_page, KM_USER0); |
353 | header = nilfs_cpfile_block_get_header(cpfile, header_bh, | 358 | header = nilfs_cpfile_block_get_header(cpfile, header_bh, |
354 | kaddr); | 359 | kaddr); |
355 | le64_add_cpu(&header->ch_ncheckpoints, -(u64)tnicps); | 360 | le64_add_cpu(&header->ch_ncheckpoints, -(u64)tnicps); |
356 | nilfs_mdt_mark_buffer_dirty(header_bh); | 361 | nilfs_mdt_mark_buffer_dirty(header_bh); |
357 | nilfs_mdt_mark_dirty(cpfile); | 362 | nilfs_mdt_mark_dirty(cpfile); |
358 | kunmap_atomic(kaddr, KM_USER0); | 363 | kunmap_atomic(kaddr, KM_USER0); |
359 | } | 364 | } |
360 | 365 | ||
361 | brelse(header_bh); | 366 | brelse(header_bh); |
362 | 367 | ||
363 | out_sem: | 368 | out_sem: |
364 | up_write(&NILFS_MDT(cpfile)->mi_sem); | 369 | up_write(&NILFS_MDT(cpfile)->mi_sem); |
365 | return ret; | 370 | return ret; |
366 | } | 371 | } |
367 | 372 | ||
368 | static void nilfs_cpfile_checkpoint_to_cpinfo(struct inode *cpfile, | 373 | static void nilfs_cpfile_checkpoint_to_cpinfo(struct inode *cpfile, |
369 | struct nilfs_checkpoint *cp, | 374 | struct nilfs_checkpoint *cp, |
370 | struct nilfs_cpinfo *ci) | 375 | struct nilfs_cpinfo *ci) |
371 | { | 376 | { |
372 | ci->ci_flags = le32_to_cpu(cp->cp_flags); | 377 | ci->ci_flags = le32_to_cpu(cp->cp_flags); |
373 | ci->ci_cno = le64_to_cpu(cp->cp_cno); | 378 | ci->ci_cno = le64_to_cpu(cp->cp_cno); |
374 | ci->ci_create = le64_to_cpu(cp->cp_create); | 379 | ci->ci_create = le64_to_cpu(cp->cp_create); |
375 | ci->ci_nblk_inc = le64_to_cpu(cp->cp_nblk_inc); | 380 | ci->ci_nblk_inc = le64_to_cpu(cp->cp_nblk_inc); |
376 | ci->ci_inodes_count = le64_to_cpu(cp->cp_inodes_count); | 381 | ci->ci_inodes_count = le64_to_cpu(cp->cp_inodes_count); |
377 | ci->ci_blocks_count = le64_to_cpu(cp->cp_blocks_count); | 382 | ci->ci_blocks_count = le64_to_cpu(cp->cp_blocks_count); |
378 | ci->ci_next = le64_to_cpu(cp->cp_snapshot_list.ssl_next); | 383 | ci->ci_next = le64_to_cpu(cp->cp_snapshot_list.ssl_next); |
379 | } | 384 | } |
380 | 385 | ||
381 | static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop, | 386 | static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop, |
382 | void *buf, unsigned cisz, size_t nci) | 387 | void *buf, unsigned cisz, size_t nci) |
383 | { | 388 | { |
384 | struct nilfs_checkpoint *cp; | 389 | struct nilfs_checkpoint *cp; |
385 | struct nilfs_cpinfo *ci = buf; | 390 | struct nilfs_cpinfo *ci = buf; |
386 | struct buffer_head *bh; | 391 | struct buffer_head *bh; |
387 | size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size; | 392 | size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size; |
388 | __u64 cur_cno = nilfs_mdt_cno(cpfile), cno = *cnop; | 393 | __u64 cur_cno = nilfs_mdt_cno(cpfile), cno = *cnop; |
389 | void *kaddr; | 394 | void *kaddr; |
390 | int n, ret; | 395 | int n, ret; |
391 | int ncps, i; | 396 | int ncps, i; |
392 | 397 | ||
393 | if (cno == 0) | 398 | if (cno == 0) |
394 | return -ENOENT; /* checkpoint number 0 is invalid */ | 399 | return -ENOENT; /* checkpoint number 0 is invalid */ |
395 | down_read(&NILFS_MDT(cpfile)->mi_sem); | 400 | down_read(&NILFS_MDT(cpfile)->mi_sem); |
396 | 401 | ||
397 | for (n = 0; cno < cur_cno && n < nci; cno += ncps) { | 402 | for (n = 0; cno < cur_cno && n < nci; cno += ncps) { |
398 | ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, cur_cno); | 403 | ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, cur_cno); |
399 | ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh); | 404 | ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh); |
400 | if (ret < 0) { | 405 | if (ret < 0) { |
401 | if (ret != -ENOENT) | 406 | if (ret != -ENOENT) |
402 | goto out; | 407 | goto out; |
403 | continue; /* skip hole */ | 408 | continue; /* skip hole */ |
404 | } | 409 | } |
405 | 410 | ||
406 | kaddr = kmap_atomic(bh->b_page, KM_USER0); | 411 | kaddr = kmap_atomic(bh->b_page, KM_USER0); |
407 | cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr); | 412 | cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr); |
408 | for (i = 0; i < ncps && n < nci; i++, cp = (void *)cp + cpsz) { | 413 | for (i = 0; i < ncps && n < nci; i++, cp = (void *)cp + cpsz) { |
409 | if (!nilfs_checkpoint_invalid(cp)) { | 414 | if (!nilfs_checkpoint_invalid(cp)) { |
410 | nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp, | 415 | nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp, |
411 | ci); | 416 | ci); |
412 | ci = (void *)ci + cisz; | 417 | ci = (void *)ci + cisz; |
413 | n++; | 418 | n++; |
414 | } | 419 | } |
415 | } | 420 | } |
416 | kunmap_atomic(kaddr, KM_USER0); | 421 | kunmap_atomic(kaddr, KM_USER0); |
417 | brelse(bh); | 422 | brelse(bh); |
418 | } | 423 | } |
419 | 424 | ||
420 | ret = n; | 425 | ret = n; |
421 | if (n > 0) { | 426 | if (n > 0) { |
422 | ci = (void *)ci - cisz; | 427 | ci = (void *)ci - cisz; |
423 | *cnop = ci->ci_cno + 1; | 428 | *cnop = ci->ci_cno + 1; |
424 | } | 429 | } |
425 | 430 | ||
426 | out: | 431 | out: |
427 | up_read(&NILFS_MDT(cpfile)->mi_sem); | 432 | up_read(&NILFS_MDT(cpfile)->mi_sem); |
428 | return ret; | 433 | return ret; |
429 | } | 434 | } |
430 | 435 | ||
431 | static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop, | 436 | static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop, |
432 | void *buf, unsigned cisz, size_t nci) | 437 | void *buf, unsigned cisz, size_t nci) |
433 | { | 438 | { |
434 | struct buffer_head *bh; | 439 | struct buffer_head *bh; |
435 | struct nilfs_cpfile_header *header; | 440 | struct nilfs_cpfile_header *header; |
436 | struct nilfs_checkpoint *cp; | 441 | struct nilfs_checkpoint *cp; |
437 | struct nilfs_cpinfo *ci = buf; | 442 | struct nilfs_cpinfo *ci = buf; |
438 | __u64 curr = *cnop, next; | 443 | __u64 curr = *cnop, next; |
439 | unsigned long curr_blkoff, next_blkoff; | 444 | unsigned long curr_blkoff, next_blkoff; |
440 | void *kaddr; | 445 | void *kaddr; |
441 | int n = 0, ret; | 446 | int n = 0, ret; |
442 | 447 | ||
443 | down_read(&NILFS_MDT(cpfile)->mi_sem); | 448 | down_read(&NILFS_MDT(cpfile)->mi_sem); |
444 | 449 | ||
445 | if (curr == 0) { | 450 | if (curr == 0) { |
446 | ret = nilfs_cpfile_get_header_block(cpfile, &bh); | 451 | ret = nilfs_cpfile_get_header_block(cpfile, &bh); |
447 | if (ret < 0) | 452 | if (ret < 0) |
448 | goto out; | 453 | goto out; |
449 | kaddr = kmap_atomic(bh->b_page, KM_USER0); | 454 | kaddr = kmap_atomic(bh->b_page, KM_USER0); |
450 | header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr); | 455 | header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr); |
451 | curr = le64_to_cpu(header->ch_snapshot_list.ssl_next); | 456 | curr = le64_to_cpu(header->ch_snapshot_list.ssl_next); |
452 | kunmap_atomic(kaddr, KM_USER0); | 457 | kunmap_atomic(kaddr, KM_USER0); |
453 | brelse(bh); | 458 | brelse(bh); |
454 | if (curr == 0) { | 459 | if (curr == 0) { |
455 | ret = 0; | 460 | ret = 0; |
456 | goto out; | 461 | goto out; |
457 | } | 462 | } |
458 | } else if (unlikely(curr == ~(__u64)0)) { | 463 | } else if (unlikely(curr == ~(__u64)0)) { |
459 | ret = 0; | 464 | ret = 0; |
460 | goto out; | 465 | goto out; |
461 | } | 466 | } |
462 | 467 | ||
463 | curr_blkoff = nilfs_cpfile_get_blkoff(cpfile, curr); | 468 | curr_blkoff = nilfs_cpfile_get_blkoff(cpfile, curr); |
464 | ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr, 0, &bh); | 469 | ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr, 0, &bh); |
465 | if (unlikely(ret < 0)) { | 470 | if (unlikely(ret < 0)) { |
466 | if (ret == -ENOENT) | 471 | if (ret == -ENOENT) |
467 | ret = 0; /* No snapshots (started from a hole block) */ | 472 | ret = 0; /* No snapshots (started from a hole block) */ |
468 | goto out; | 473 | goto out; |
469 | } | 474 | } |
470 | kaddr = kmap_atomic(bh->b_page, KM_USER0); | 475 | kaddr = kmap_atomic(bh->b_page, KM_USER0); |
471 | while (n < nci) { | 476 | while (n < nci) { |
472 | cp = nilfs_cpfile_block_get_checkpoint(cpfile, curr, bh, kaddr); | 477 | cp = nilfs_cpfile_block_get_checkpoint(cpfile, curr, bh, kaddr); |
473 | curr = ~(__u64)0; /* Terminator */ | 478 | curr = ~(__u64)0; /* Terminator */ |
474 | if (unlikely(nilfs_checkpoint_invalid(cp) || | 479 | if (unlikely(nilfs_checkpoint_invalid(cp) || |
475 | !nilfs_checkpoint_snapshot(cp))) | 480 | !nilfs_checkpoint_snapshot(cp))) |
476 | break; | 481 | break; |
477 | nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp, ci); | 482 | nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp, ci); |
478 | ci = (void *)ci + cisz; | 483 | ci = (void *)ci + cisz; |
479 | n++; | 484 | n++; |
480 | next = le64_to_cpu(cp->cp_snapshot_list.ssl_next); | 485 | next = le64_to_cpu(cp->cp_snapshot_list.ssl_next); |
481 | if (next == 0) | 486 | if (next == 0) |
482 | break; /* reach end of the snapshot list */ | 487 | break; /* reach end of the snapshot list */ |
483 | 488 | ||
484 | next_blkoff = nilfs_cpfile_get_blkoff(cpfile, next); | 489 | next_blkoff = nilfs_cpfile_get_blkoff(cpfile, next); |
485 | if (curr_blkoff != next_blkoff) { | 490 | if (curr_blkoff != next_blkoff) { |
486 | kunmap_atomic(kaddr, KM_USER0); | 491 | kunmap_atomic(kaddr, KM_USER0); |
487 | brelse(bh); | 492 | brelse(bh); |
488 | ret = nilfs_cpfile_get_checkpoint_block(cpfile, next, | 493 | ret = nilfs_cpfile_get_checkpoint_block(cpfile, next, |
489 | 0, &bh); | 494 | 0, &bh); |
490 | if (unlikely(ret < 0)) { | 495 | if (unlikely(ret < 0)) { |
491 | WARN_ON(ret == -ENOENT); | 496 | WARN_ON(ret == -ENOENT); |
492 | goto out; | 497 | goto out; |
493 | } | 498 | } |
494 | kaddr = kmap_atomic(bh->b_page, KM_USER0); | 499 | kaddr = kmap_atomic(bh->b_page, KM_USER0); |
495 | } | 500 | } |
496 | curr = next; | 501 | curr = next; |
497 | curr_blkoff = next_blkoff; | 502 | curr_blkoff = next_blkoff; |
498 | } | 503 | } |
499 | kunmap_atomic(kaddr, KM_USER0); | 504 | kunmap_atomic(kaddr, KM_USER0); |
500 | brelse(bh); | 505 | brelse(bh); |
501 | *cnop = curr; | 506 | *cnop = curr; |
502 | ret = n; | 507 | ret = n; |
503 | 508 | ||
504 | out: | 509 | out: |
505 | up_read(&NILFS_MDT(cpfile)->mi_sem); | 510 | up_read(&NILFS_MDT(cpfile)->mi_sem); |
506 | return ret; | 511 | return ret; |
507 | } | 512 | } |
508 | 513 | ||
509 | /** | 514 | /** |
510 | * nilfs_cpfile_get_cpinfo - | 515 | * nilfs_cpfile_get_cpinfo - |
511 | * @cpfile: | 516 | * @cpfile: |
512 | * @cno: | 517 | * @cno: |
513 | * @ci: | 518 | * @ci: |
514 | * @nci: | 519 | * @nci: |
515 | */ | 520 | */ |
516 | 521 | ||
517 | ssize_t nilfs_cpfile_get_cpinfo(struct inode *cpfile, __u64 *cnop, int mode, | 522 | ssize_t nilfs_cpfile_get_cpinfo(struct inode *cpfile, __u64 *cnop, int mode, |
518 | void *buf, unsigned cisz, size_t nci) | 523 | void *buf, unsigned cisz, size_t nci) |
519 | { | 524 | { |
520 | switch (mode) { | 525 | switch (mode) { |
521 | case NILFS_CHECKPOINT: | 526 | case NILFS_CHECKPOINT: |
522 | return nilfs_cpfile_do_get_cpinfo(cpfile, cnop, buf, cisz, nci); | 527 | return nilfs_cpfile_do_get_cpinfo(cpfile, cnop, buf, cisz, nci); |
523 | case NILFS_SNAPSHOT: | 528 | case NILFS_SNAPSHOT: |
524 | return nilfs_cpfile_do_get_ssinfo(cpfile, cnop, buf, cisz, nci); | 529 | return nilfs_cpfile_do_get_ssinfo(cpfile, cnop, buf, cisz, nci); |
525 | default: | 530 | default: |
526 | return -EINVAL; | 531 | return -EINVAL; |
527 | } | 532 | } |
528 | } | 533 | } |
529 | 534 | ||
530 | /** | 535 | /** |
531 | * nilfs_cpfile_delete_checkpoint - | 536 | * nilfs_cpfile_delete_checkpoint - |
532 | * @cpfile: | 537 | * @cpfile: |
533 | * @cno: | 538 | * @cno: |
534 | */ | 539 | */ |
535 | int nilfs_cpfile_delete_checkpoint(struct inode *cpfile, __u64 cno) | 540 | int nilfs_cpfile_delete_checkpoint(struct inode *cpfile, __u64 cno) |
536 | { | 541 | { |
537 | struct nilfs_cpinfo ci; | 542 | struct nilfs_cpinfo ci; |
538 | __u64 tcno = cno; | 543 | __u64 tcno = cno; |
539 | ssize_t nci; | 544 | ssize_t nci; |
540 | 545 | ||
541 | nci = nilfs_cpfile_do_get_cpinfo(cpfile, &tcno, &ci, sizeof(ci), 1); | 546 | nci = nilfs_cpfile_do_get_cpinfo(cpfile, &tcno, &ci, sizeof(ci), 1); |
542 | if (nci < 0) | 547 | if (nci < 0) |
543 | return nci; | 548 | return nci; |
544 | else if (nci == 0 || ci.ci_cno != cno) | 549 | else if (nci == 0 || ci.ci_cno != cno) |
545 | return -ENOENT; | 550 | return -ENOENT; |
546 | else if (nilfs_cpinfo_snapshot(&ci)) | 551 | else if (nilfs_cpinfo_snapshot(&ci)) |
547 | return -EBUSY; | 552 | return -EBUSY; |
548 | 553 | ||
549 | return nilfs_cpfile_delete_checkpoints(cpfile, cno, cno + 1); | 554 | return nilfs_cpfile_delete_checkpoints(cpfile, cno, cno + 1); |
550 | } | 555 | } |
551 | 556 | ||
552 | static struct nilfs_snapshot_list * | 557 | static struct nilfs_snapshot_list * |
553 | nilfs_cpfile_block_get_snapshot_list(const struct inode *cpfile, | 558 | nilfs_cpfile_block_get_snapshot_list(const struct inode *cpfile, |
554 | __u64 cno, | 559 | __u64 cno, |
555 | struct buffer_head *bh, | 560 | struct buffer_head *bh, |
556 | void *kaddr) | 561 | void *kaddr) |
557 | { | 562 | { |
558 | struct nilfs_cpfile_header *header; | 563 | struct nilfs_cpfile_header *header; |
559 | struct nilfs_checkpoint *cp; | 564 | struct nilfs_checkpoint *cp; |
560 | struct nilfs_snapshot_list *list; | 565 | struct nilfs_snapshot_list *list; |
561 | 566 | ||
562 | if (cno != 0) { | 567 | if (cno != 0) { |
563 | cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr); | 568 | cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr); |
564 | list = &cp->cp_snapshot_list; | 569 | list = &cp->cp_snapshot_list; |
565 | } else { | 570 | } else { |
566 | header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr); | 571 | header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr); |
567 | list = &header->ch_snapshot_list; | 572 | list = &header->ch_snapshot_list; |
568 | } | 573 | } |
569 | return list; | 574 | return list; |
570 | } | 575 | } |
571 | 576 | ||
572 | static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno) | 577 | static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno) |
573 | { | 578 | { |
574 | struct buffer_head *header_bh, *curr_bh, *prev_bh, *cp_bh; | 579 | struct buffer_head *header_bh, *curr_bh, *prev_bh, *cp_bh; |
575 | struct nilfs_cpfile_header *header; | 580 | struct nilfs_cpfile_header *header; |
576 | struct nilfs_checkpoint *cp; | 581 | struct nilfs_checkpoint *cp; |
577 | struct nilfs_snapshot_list *list; | 582 | struct nilfs_snapshot_list *list; |
578 | __u64 curr, prev; | 583 | __u64 curr, prev; |
579 | unsigned long curr_blkoff, prev_blkoff; | 584 | unsigned long curr_blkoff, prev_blkoff; |
580 | void *kaddr; | 585 | void *kaddr; |
581 | int ret; | 586 | int ret; |
582 | 587 | ||
583 | if (cno == 0) | 588 | if (cno == 0) |
584 | return -ENOENT; /* checkpoint number 0 is invalid */ | 589 | return -ENOENT; /* checkpoint number 0 is invalid */ |
585 | down_write(&NILFS_MDT(cpfile)->mi_sem); | 590 | down_write(&NILFS_MDT(cpfile)->mi_sem); |
586 | 591 | ||
587 | ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh); | 592 | ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh); |
588 | if (ret < 0) | 593 | if (ret < 0) |
589 | goto out_sem; | 594 | goto out_sem; |
590 | kaddr = kmap_atomic(cp_bh->b_page, KM_USER0); | 595 | kaddr = kmap_atomic(cp_bh->b_page, KM_USER0); |
591 | cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); | 596 | cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); |
592 | if (nilfs_checkpoint_invalid(cp)) { | 597 | if (nilfs_checkpoint_invalid(cp)) { |
593 | ret = -ENOENT; | 598 | ret = -ENOENT; |
594 | kunmap_atomic(kaddr, KM_USER0); | 599 | kunmap_atomic(kaddr, KM_USER0); |
595 | goto out_cp; | 600 | goto out_cp; |
596 | } | 601 | } |
597 | if (nilfs_checkpoint_snapshot(cp)) { | 602 | if (nilfs_checkpoint_snapshot(cp)) { |
598 | ret = 0; | 603 | ret = 0; |
599 | kunmap_atomic(kaddr, KM_USER0); | 604 | kunmap_atomic(kaddr, KM_USER0); |
600 | goto out_cp; | 605 | goto out_cp; |
601 | } | 606 | } |
602 | kunmap_atomic(kaddr, KM_USER0); | 607 | kunmap_atomic(kaddr, KM_USER0); |
603 | 608 | ||
604 | ret = nilfs_cpfile_get_header_block(cpfile, &header_bh); | 609 | ret = nilfs_cpfile_get_header_block(cpfile, &header_bh); |
605 | if (ret < 0) | 610 | if (ret < 0) |
606 | goto out_cp; | 611 | goto out_cp; |
607 | kaddr = kmap_atomic(header_bh->b_page, KM_USER0); | 612 | kaddr = kmap_atomic(header_bh->b_page, KM_USER0); |
608 | header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); | 613 | header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); |
609 | list = &header->ch_snapshot_list; | 614 | list = &header->ch_snapshot_list; |
610 | curr_bh = header_bh; | 615 | curr_bh = header_bh; |
611 | get_bh(curr_bh); | 616 | get_bh(curr_bh); |
612 | curr = 0; | 617 | curr = 0; |
613 | curr_blkoff = 0; | 618 | curr_blkoff = 0; |
614 | prev = le64_to_cpu(list->ssl_prev); | 619 | prev = le64_to_cpu(list->ssl_prev); |
615 | while (prev > cno) { | 620 | while (prev > cno) { |
616 | prev_blkoff = nilfs_cpfile_get_blkoff(cpfile, prev); | 621 | prev_blkoff = nilfs_cpfile_get_blkoff(cpfile, prev); |
617 | curr = prev; | 622 | curr = prev; |
618 | if (curr_blkoff != prev_blkoff) { | 623 | if (curr_blkoff != prev_blkoff) { |
619 | kunmap_atomic(kaddr, KM_USER0); | 624 | kunmap_atomic(kaddr, KM_USER0); |
620 | brelse(curr_bh); | 625 | brelse(curr_bh); |
621 | ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr, | 626 | ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr, |
622 | 0, &curr_bh); | 627 | 0, &curr_bh); |
623 | if (ret < 0) | 628 | if (ret < 0) |
624 | goto out_header; | 629 | goto out_header; |
625 | kaddr = kmap_atomic(curr_bh->b_page, KM_USER0); | 630 | kaddr = kmap_atomic(curr_bh->b_page, KM_USER0); |
626 | } | 631 | } |
627 | curr_blkoff = prev_blkoff; | 632 | curr_blkoff = prev_blkoff; |
628 | cp = nilfs_cpfile_block_get_checkpoint( | 633 | cp = nilfs_cpfile_block_get_checkpoint( |
629 | cpfile, curr, curr_bh, kaddr); | 634 | cpfile, curr, curr_bh, kaddr); |
630 | list = &cp->cp_snapshot_list; | 635 | list = &cp->cp_snapshot_list; |
631 | prev = le64_to_cpu(list->ssl_prev); | 636 | prev = le64_to_cpu(list->ssl_prev); |
632 | } | 637 | } |
633 | kunmap_atomic(kaddr, KM_USER0); | 638 | kunmap_atomic(kaddr, KM_USER0); |
634 | 639 | ||
635 | if (prev != 0) { | 640 | if (prev != 0) { |
636 | ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0, | 641 | ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0, |
637 | &prev_bh); | 642 | &prev_bh); |
638 | if (ret < 0) | 643 | if (ret < 0) |
639 | goto out_curr; | 644 | goto out_curr; |
640 | } else { | 645 | } else { |
641 | prev_bh = header_bh; | 646 | prev_bh = header_bh; |
642 | get_bh(prev_bh); | 647 | get_bh(prev_bh); |
643 | } | 648 | } |
644 | 649 | ||
645 | kaddr = kmap_atomic(curr_bh->b_page, KM_USER0); | 650 | kaddr = kmap_atomic(curr_bh->b_page, KM_USER0); |
646 | list = nilfs_cpfile_block_get_snapshot_list( | 651 | list = nilfs_cpfile_block_get_snapshot_list( |
647 | cpfile, curr, curr_bh, kaddr); | 652 | cpfile, curr, curr_bh, kaddr); |
648 | list->ssl_prev = cpu_to_le64(cno); | 653 | list->ssl_prev = cpu_to_le64(cno); |
649 | kunmap_atomic(kaddr, KM_USER0); | 654 | kunmap_atomic(kaddr, KM_USER0); |
650 | 655 | ||
651 | kaddr = kmap_atomic(cp_bh->b_page, KM_USER0); | 656 | kaddr = kmap_atomic(cp_bh->b_page, KM_USER0); |
652 | cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); | 657 | cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); |
653 | cp->cp_snapshot_list.ssl_next = cpu_to_le64(curr); | 658 | cp->cp_snapshot_list.ssl_next = cpu_to_le64(curr); |
654 | cp->cp_snapshot_list.ssl_prev = cpu_to_le64(prev); | 659 | cp->cp_snapshot_list.ssl_prev = cpu_to_le64(prev); |
655 | nilfs_checkpoint_set_snapshot(cp); | 660 | nilfs_checkpoint_set_snapshot(cp); |
656 | kunmap_atomic(kaddr, KM_USER0); | 661 | kunmap_atomic(kaddr, KM_USER0); |
657 | 662 | ||
658 | kaddr = kmap_atomic(prev_bh->b_page, KM_USER0); | 663 | kaddr = kmap_atomic(prev_bh->b_page, KM_USER0); |
659 | list = nilfs_cpfile_block_get_snapshot_list( | 664 | list = nilfs_cpfile_block_get_snapshot_list( |
660 | cpfile, prev, prev_bh, kaddr); | 665 | cpfile, prev, prev_bh, kaddr); |
661 | list->ssl_next = cpu_to_le64(cno); | 666 | list->ssl_next = cpu_to_le64(cno); |
662 | kunmap_atomic(kaddr, KM_USER0); | 667 | kunmap_atomic(kaddr, KM_USER0); |
663 | 668 | ||
664 | kaddr = kmap_atomic(header_bh->b_page, KM_USER0); | 669 | kaddr = kmap_atomic(header_bh->b_page, KM_USER0); |
665 | header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); | 670 | header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); |
666 | le64_add_cpu(&header->ch_nsnapshots, 1); | 671 | le64_add_cpu(&header->ch_nsnapshots, 1); |
667 | kunmap_atomic(kaddr, KM_USER0); | 672 | kunmap_atomic(kaddr, KM_USER0); |
668 | 673 | ||
669 | nilfs_mdt_mark_buffer_dirty(prev_bh); | 674 | nilfs_mdt_mark_buffer_dirty(prev_bh); |
670 | nilfs_mdt_mark_buffer_dirty(curr_bh); | 675 | nilfs_mdt_mark_buffer_dirty(curr_bh); |
671 | nilfs_mdt_mark_buffer_dirty(cp_bh); | 676 | nilfs_mdt_mark_buffer_dirty(cp_bh); |
672 | nilfs_mdt_mark_buffer_dirty(header_bh); | 677 | nilfs_mdt_mark_buffer_dirty(header_bh); |
673 | nilfs_mdt_mark_dirty(cpfile); | 678 | nilfs_mdt_mark_dirty(cpfile); |
674 | 679 | ||
675 | brelse(prev_bh); | 680 | brelse(prev_bh); |
676 | 681 | ||
677 | out_curr: | 682 | out_curr: |
678 | brelse(curr_bh); | 683 | brelse(curr_bh); |
679 | 684 | ||
680 | out_header: | 685 | out_header: |
681 | brelse(header_bh); | 686 | brelse(header_bh); |
682 | 687 | ||
683 | out_cp: | 688 | out_cp: |
684 | brelse(cp_bh); | 689 | brelse(cp_bh); |
685 | 690 | ||
686 | out_sem: | 691 | out_sem: |
687 | up_write(&NILFS_MDT(cpfile)->mi_sem); | 692 | up_write(&NILFS_MDT(cpfile)->mi_sem); |
688 | return ret; | 693 | return ret; |
689 | } | 694 | } |
690 | 695 | ||
691 | static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno) | 696 | static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno) |
692 | { | 697 | { |
693 | struct buffer_head *header_bh, *next_bh, *prev_bh, *cp_bh; | 698 | struct buffer_head *header_bh, *next_bh, *prev_bh, *cp_bh; |
694 | struct nilfs_cpfile_header *header; | 699 | struct nilfs_cpfile_header *header; |
695 | struct nilfs_checkpoint *cp; | 700 | struct nilfs_checkpoint *cp; |
696 | struct nilfs_snapshot_list *list; | 701 | struct nilfs_snapshot_list *list; |
697 | __u64 next, prev; | 702 | __u64 next, prev; |
698 | void *kaddr; | 703 | void *kaddr; |
699 | int ret; | 704 | int ret; |
700 | 705 | ||
701 | if (cno == 0) | 706 | if (cno == 0) |
702 | return -ENOENT; /* checkpoint number 0 is invalid */ | 707 | return -ENOENT; /* checkpoint number 0 is invalid */ |
703 | down_write(&NILFS_MDT(cpfile)->mi_sem); | 708 | down_write(&NILFS_MDT(cpfile)->mi_sem); |
704 | 709 | ||
705 | ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh); | 710 | ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh); |
706 | if (ret < 0) | 711 | if (ret < 0) |
707 | goto out_sem; | 712 | goto out_sem; |
708 | kaddr = kmap_atomic(cp_bh->b_page, KM_USER0); | 713 | kaddr = kmap_atomic(cp_bh->b_page, KM_USER0); |
709 | cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); | 714 | cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); |
710 | if (nilfs_checkpoint_invalid(cp)) { | 715 | if (nilfs_checkpoint_invalid(cp)) { |
711 | ret = -ENOENT; | 716 | ret = -ENOENT; |
712 | kunmap_atomic(kaddr, KM_USER0); | 717 | kunmap_atomic(kaddr, KM_USER0); |
713 | goto out_cp; | 718 | goto out_cp; |
714 | } | 719 | } |
715 | if (!nilfs_checkpoint_snapshot(cp)) { | 720 | if (!nilfs_checkpoint_snapshot(cp)) { |
716 | ret = 0; | 721 | ret = 0; |
717 | kunmap_atomic(kaddr, KM_USER0); | 722 | kunmap_atomic(kaddr, KM_USER0); |
718 | goto out_cp; | 723 | goto out_cp; |
719 | } | 724 | } |
720 | 725 | ||
721 | list = &cp->cp_snapshot_list; | 726 | list = &cp->cp_snapshot_list; |
722 | next = le64_to_cpu(list->ssl_next); | 727 | next = le64_to_cpu(list->ssl_next); |
723 | prev = le64_to_cpu(list->ssl_prev); | 728 | prev = le64_to_cpu(list->ssl_prev); |
724 | kunmap_atomic(kaddr, KM_USER0); | 729 | kunmap_atomic(kaddr, KM_USER0); |
725 | 730 | ||
726 | ret = nilfs_cpfile_get_header_block(cpfile, &header_bh); | 731 | ret = nilfs_cpfile_get_header_block(cpfile, &header_bh); |
727 | if (ret < 0) | 732 | if (ret < 0) |
728 | goto out_cp; | 733 | goto out_cp; |
729 | if (next != 0) { | 734 | if (next != 0) { |
730 | ret = nilfs_cpfile_get_checkpoint_block(cpfile, next, 0, | 735 | ret = nilfs_cpfile_get_checkpoint_block(cpfile, next, 0, |
731 | &next_bh); | 736 | &next_bh); |
732 | if (ret < 0) | 737 | if (ret < 0) |
733 | goto out_header; | 738 | goto out_header; |
734 | } else { | 739 | } else { |
735 | next_bh = header_bh; | 740 | next_bh = header_bh; |
736 | get_bh(next_bh); | 741 | get_bh(next_bh); |
737 | } | 742 | } |
738 | if (prev != 0) { | 743 | if (prev != 0) { |
739 | ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0, | 744 | ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0, |
740 | &prev_bh); | 745 | &prev_bh); |
741 | if (ret < 0) | 746 | if (ret < 0) |
742 | goto out_next; | 747 | goto out_next; |
743 | } else { | 748 | } else { |
744 | prev_bh = header_bh; | 749 | prev_bh = header_bh; |
745 | get_bh(prev_bh); | 750 | get_bh(prev_bh); |
746 | } | 751 | } |
747 | 752 | ||
748 | kaddr = kmap_atomic(next_bh->b_page, KM_USER0); | 753 | kaddr = kmap_atomic(next_bh->b_page, KM_USER0); |
749 | list = nilfs_cpfile_block_get_snapshot_list( | 754 | list = nilfs_cpfile_block_get_snapshot_list( |
750 | cpfile, next, next_bh, kaddr); | 755 | cpfile, next, next_bh, kaddr); |
751 | list->ssl_prev = cpu_to_le64(prev); | 756 | list->ssl_prev = cpu_to_le64(prev); |
752 | kunmap_atomic(kaddr, KM_USER0); | 757 | kunmap_atomic(kaddr, KM_USER0); |
753 | 758 | ||
754 | kaddr = kmap_atomic(prev_bh->b_page, KM_USER0); | 759 | kaddr = kmap_atomic(prev_bh->b_page, KM_USER0); |
755 | list = nilfs_cpfile_block_get_snapshot_list( | 760 | list = nilfs_cpfile_block_get_snapshot_list( |
756 | cpfile, prev, prev_bh, kaddr); | 761 | cpfile, prev, prev_bh, kaddr); |
757 | list->ssl_next = cpu_to_le64(next); | 762 | list->ssl_next = cpu_to_le64(next); |
758 | kunmap_atomic(kaddr, KM_USER0); | 763 | kunmap_atomic(kaddr, KM_USER0); |
759 | 764 | ||
760 | kaddr = kmap_atomic(cp_bh->b_page, KM_USER0); | 765 | kaddr = kmap_atomic(cp_bh->b_page, KM_USER0); |
761 | cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); | 766 | cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); |
762 | cp->cp_snapshot_list.ssl_next = cpu_to_le64(0); | 767 | cp->cp_snapshot_list.ssl_next = cpu_to_le64(0); |
763 | cp->cp_snapshot_list.ssl_prev = cpu_to_le64(0); | 768 | cp->cp_snapshot_list.ssl_prev = cpu_to_le64(0); |
764 | nilfs_checkpoint_clear_snapshot(cp); | 769 | nilfs_checkpoint_clear_snapshot(cp); |
765 | kunmap_atomic(kaddr, KM_USER0); | 770 | kunmap_atomic(kaddr, KM_USER0); |
766 | 771 | ||
767 | kaddr = kmap_atomic(header_bh->b_page, KM_USER0); | 772 | kaddr = kmap_atomic(header_bh->b_page, KM_USER0); |
768 | header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); | 773 | header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); |
769 | le64_add_cpu(&header->ch_nsnapshots, -1); | 774 | le64_add_cpu(&header->ch_nsnapshots, -1); |
770 | kunmap_atomic(kaddr, KM_USER0); | 775 | kunmap_atomic(kaddr, KM_USER0); |
771 | 776 | ||
772 | nilfs_mdt_mark_buffer_dirty(next_bh); | 777 | nilfs_mdt_mark_buffer_dirty(next_bh); |
773 | nilfs_mdt_mark_buffer_dirty(prev_bh); | 778 | nilfs_mdt_mark_buffer_dirty(prev_bh); |
774 | nilfs_mdt_mark_buffer_dirty(cp_bh); | 779 | nilfs_mdt_mark_buffer_dirty(cp_bh); |
775 | nilfs_mdt_mark_buffer_dirty(header_bh); | 780 | nilfs_mdt_mark_buffer_dirty(header_bh); |
776 | nilfs_mdt_mark_dirty(cpfile); | 781 | nilfs_mdt_mark_dirty(cpfile); |
777 | 782 | ||
778 | brelse(prev_bh); | 783 | brelse(prev_bh); |
779 | 784 | ||
780 | out_next: | 785 | out_next: |
781 | brelse(next_bh); | 786 | brelse(next_bh); |
782 | 787 | ||
783 | out_header: | 788 | out_header: |
784 | brelse(header_bh); | 789 | brelse(header_bh); |
785 | 790 | ||
786 | out_cp: | 791 | out_cp: |
787 | brelse(cp_bh); | 792 | brelse(cp_bh); |
788 | 793 | ||
789 | out_sem: | 794 | out_sem: |
790 | up_write(&NILFS_MDT(cpfile)->mi_sem); | 795 | up_write(&NILFS_MDT(cpfile)->mi_sem); |
791 | return ret; | 796 | return ret; |
792 | } | 797 | } |
793 | 798 | ||
794 | /** | 799 | /** |
795 | * nilfs_cpfile_is_snapshot - | 800 | * nilfs_cpfile_is_snapshot - |
796 | * @cpfile: inode of checkpoint file | 801 | * @cpfile: inode of checkpoint file |
797 | * @cno: checkpoint number | 802 | * @cno: checkpoint number |
798 | * | 803 | * |
799 | * Description: | 804 | * Description: |
800 | * | 805 | * |
801 | * Return Value: On success, 1 is returned if the checkpoint specified by | 806 | * Return Value: On success, 1 is returned if the checkpoint specified by |
802 | * @cno is a snapshot, or 0 if not. On error, one of the following negative | 807 | * @cno is a snapshot, or 0 if not. On error, one of the following negative |
803 | * error codes is returned. | 808 | * error codes is returned. |
804 | * | 809 | * |
805 | * %-EIO - I/O error. | 810 | * %-EIO - I/O error. |
806 | * | 811 | * |
807 | * %-ENOMEM - Insufficient amount of memory available. | 812 | * %-ENOMEM - Insufficient amount of memory available. |
808 | * | 813 | * |
809 | * %-ENOENT - No such checkpoint. | 814 | * %-ENOENT - No such checkpoint. |
810 | */ | 815 | */ |
811 | int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno) | 816 | int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno) |
812 | { | 817 | { |
813 | struct buffer_head *bh; | 818 | struct buffer_head *bh; |
814 | struct nilfs_checkpoint *cp; | 819 | struct nilfs_checkpoint *cp; |
815 | void *kaddr; | 820 | void *kaddr; |
816 | int ret; | 821 | int ret; |
817 | 822 | ||
818 | /* CP number is invalid if it's zero or larger than the | 823 | /* CP number is invalid if it's zero or larger than the |
819 | largest exist one.*/ | 824 | largest exist one.*/ |
820 | if (cno == 0 || cno >= nilfs_mdt_cno(cpfile)) | 825 | if (cno == 0 || cno >= nilfs_mdt_cno(cpfile)) |
821 | return -ENOENT; | 826 | return -ENOENT; |
822 | down_read(&NILFS_MDT(cpfile)->mi_sem); | 827 | down_read(&NILFS_MDT(cpfile)->mi_sem); |
823 | 828 | ||
824 | ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh); | 829 | ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh); |
825 | if (ret < 0) | 830 | if (ret < 0) |
826 | goto out; | 831 | goto out; |
827 | kaddr = kmap_atomic(bh->b_page, KM_USER0); | 832 | kaddr = kmap_atomic(bh->b_page, KM_USER0); |
828 | cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr); | 833 | cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr); |
829 | if (nilfs_checkpoint_invalid(cp)) | 834 | if (nilfs_checkpoint_invalid(cp)) |
830 | ret = -ENOENT; | 835 | ret = -ENOENT; |
831 | else | 836 | else |
832 | ret = nilfs_checkpoint_snapshot(cp); | 837 | ret = nilfs_checkpoint_snapshot(cp); |
833 | kunmap_atomic(kaddr, KM_USER0); | 838 | kunmap_atomic(kaddr, KM_USER0); |
834 | brelse(bh); | 839 | brelse(bh); |
835 | 840 | ||
836 | out: | 841 | out: |
837 | up_read(&NILFS_MDT(cpfile)->mi_sem); | 842 | up_read(&NILFS_MDT(cpfile)->mi_sem); |
838 | return ret; | 843 | return ret; |
839 | } | 844 | } |
840 | 845 | ||
841 | /** | 846 | /** |
842 | * nilfs_cpfile_change_cpmode - change checkpoint mode | 847 | * nilfs_cpfile_change_cpmode - change checkpoint mode |
843 | * @cpfile: inode of checkpoint file | 848 | * @cpfile: inode of checkpoint file |
844 | * @cno: checkpoint number | 849 | * @cno: checkpoint number |
845 | * @status: mode of checkpoint | 850 | * @status: mode of checkpoint |
846 | * | 851 | * |
847 | * Description: nilfs_change_cpmode() changes the mode of the checkpoint | 852 | * Description: nilfs_change_cpmode() changes the mode of the checkpoint |
848 | * specified by @cno. The mode @mode is NILFS_CHECKPOINT or NILFS_SNAPSHOT. | 853 | * specified by @cno. The mode @mode is NILFS_CHECKPOINT or NILFS_SNAPSHOT. |
849 | * | 854 | * |
850 | * Return Value: On success, 0 is returned. On error, one of the following | 855 | * Return Value: On success, 0 is returned. On error, one of the following |
851 | * negative error codes is returned. | 856 | * negative error codes is returned. |
852 | * | 857 | * |
853 | * %-EIO - I/O error. | 858 | * %-EIO - I/O error. |
854 | * | 859 | * |
855 | * %-ENOMEM - Insufficient amount of memory available. | 860 | * %-ENOMEM - Insufficient amount of memory available. |
856 | * | 861 | * |
857 | * %-ENOENT - No such checkpoint. | 862 | * %-ENOENT - No such checkpoint. |
858 | */ | 863 | */ |
859 | int nilfs_cpfile_change_cpmode(struct inode *cpfile, __u64 cno, int mode) | 864 | int nilfs_cpfile_change_cpmode(struct inode *cpfile, __u64 cno, int mode) |
860 | { | 865 | { |
861 | struct the_nilfs *nilfs; | 866 | struct the_nilfs *nilfs; |
862 | int ret; | 867 | int ret; |
863 | 868 | ||
864 | nilfs = NILFS_MDT(cpfile)->mi_nilfs; | 869 | nilfs = NILFS_MDT(cpfile)->mi_nilfs; |
865 | 870 | ||
866 | switch (mode) { | 871 | switch (mode) { |
867 | case NILFS_CHECKPOINT: | 872 | case NILFS_CHECKPOINT: |
868 | /* | 873 | /* |
869 | * Check for protecting existing snapshot mounts: | 874 | * Check for protecting existing snapshot mounts: |
870 | * ns_mount_mutex is used to make this operation atomic and | 875 | * ns_mount_mutex is used to make this operation atomic and |
871 | * exclusive with a new mount job. Though it doesn't cover | 876 | * exclusive with a new mount job. Though it doesn't cover |
872 | * umount, it's enough for the purpose. | 877 | * umount, it's enough for the purpose. |
873 | */ | 878 | */ |
874 | if (nilfs_checkpoint_is_mounted(nilfs, cno, 1)) { | 879 | if (nilfs_checkpoint_is_mounted(nilfs, cno, 1)) { |
875 | /* Current implementation does not have to protect | 880 | /* Current implementation does not have to protect |
876 | plain read-only mounts since they are exclusive | 881 | plain read-only mounts since they are exclusive |
877 | with a read/write mount and are protected from the | 882 | with a read/write mount and are protected from the |
878 | cleaner. */ | 883 | cleaner. */ |
879 | ret = -EBUSY; | 884 | ret = -EBUSY; |
880 | } else | 885 | } else |
881 | ret = nilfs_cpfile_clear_snapshot(cpfile, cno); | 886 | ret = nilfs_cpfile_clear_snapshot(cpfile, cno); |
882 | return ret; | 887 | return ret; |
883 | case NILFS_SNAPSHOT: | 888 | case NILFS_SNAPSHOT: |
884 | return nilfs_cpfile_set_snapshot(cpfile, cno); | 889 | return nilfs_cpfile_set_snapshot(cpfile, cno); |
885 | default: | 890 | default: |
886 | return -EINVAL; | 891 | return -EINVAL; |
887 | } | 892 | } |
888 | } | 893 | } |
889 | 894 | ||
890 | /** | 895 | /** |
891 | * nilfs_cpfile_get_stat - get checkpoint statistics | 896 | * nilfs_cpfile_get_stat - get checkpoint statistics |
892 | * @cpfile: inode of checkpoint file | 897 | * @cpfile: inode of checkpoint file |
893 | * @stat: pointer to a structure of checkpoint statistics | 898 | * @stat: pointer to a structure of checkpoint statistics |
894 | * | 899 | * |
895 | * Description: nilfs_cpfile_get_stat() returns information about checkpoints. | 900 | * Description: nilfs_cpfile_get_stat() returns information about checkpoints. |
896 | * | 901 | * |
897 | * Return Value: On success, 0 is returned, and checkpoints information is | 902 | * Return Value: On success, 0 is returned, and checkpoints information is |
898 | * stored in the place pointed by @stat. On error, one of the following | 903 | * stored in the place pointed by @stat. On error, one of the following |
899 | * negative error codes is returned. | 904 | * negative error codes is returned. |
900 | * | 905 | * |
901 | * %-EIO - I/O error. | 906 | * %-EIO - I/O error. |
902 | * | 907 | * |
903 | * %-ENOMEM - Insufficient amount of memory available. | 908 | * %-ENOMEM - Insufficient amount of memory available. |
904 | */ | 909 | */ |
905 | int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat) | 910 | int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat) |
906 | { | 911 | { |
907 | struct buffer_head *bh; | 912 | struct buffer_head *bh; |
908 | struct nilfs_cpfile_header *header; | 913 | struct nilfs_cpfile_header *header; |
909 | void *kaddr; | 914 | void *kaddr; |
910 | int ret; | 915 | int ret; |
911 | 916 | ||
912 | down_read(&NILFS_MDT(cpfile)->mi_sem); | 917 | down_read(&NILFS_MDT(cpfile)->mi_sem); |
913 | 918 | ||
914 | ret = nilfs_cpfile_get_header_block(cpfile, &bh); | 919 | ret = nilfs_cpfile_get_header_block(cpfile, &bh); |
915 | if (ret < 0) | 920 | if (ret < 0) |
916 | goto out_sem; | 921 | goto out_sem; |
917 | kaddr = kmap_atomic(bh->b_page, KM_USER0); | 922 | kaddr = kmap_atomic(bh->b_page, KM_USER0); |
918 | header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr); | 923 | header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr); |
919 | cpstat->cs_cno = nilfs_mdt_cno(cpfile); | 924 | cpstat->cs_cno = nilfs_mdt_cno(cpfile); |
920 | cpstat->cs_ncps = le64_to_cpu(header->ch_ncheckpoints); | 925 | cpstat->cs_ncps = le64_to_cpu(header->ch_ncheckpoints); |
921 | cpstat->cs_nsss = le64_to_cpu(header->ch_nsnapshots); | 926 | cpstat->cs_nsss = le64_to_cpu(header->ch_nsnapshots); |
922 | kunmap_atomic(kaddr, KM_USER0); | 927 | kunmap_atomic(kaddr, KM_USER0); |
923 | brelse(bh); | 928 | brelse(bh); |
924 | 929 | ||
925 | out_sem: | 930 | out_sem: |
926 | up_read(&NILFS_MDT(cpfile)->mi_sem); | 931 | up_read(&NILFS_MDT(cpfile)->mi_sem); |
927 | return ret; | 932 | return ret; |
928 | } | 933 | } |
929 | 934 | ||
930 | /** | 935 | /** |
931 | * nilfs_cpfile_read - read cpfile inode | 936 | * nilfs_cpfile_read - read cpfile inode |
932 | * @cpfile: cpfile inode | 937 | * @cpfile: cpfile inode |
933 | * @raw_inode: on-disk cpfile inode | 938 | * @raw_inode: on-disk cpfile inode |
934 | */ | 939 | */ |
935 | int nilfs_cpfile_read(struct inode *cpfile, struct nilfs_inode *raw_inode) | 940 | int nilfs_cpfile_read(struct inode *cpfile, struct nilfs_inode *raw_inode) |
936 | { | 941 | { |
937 | return nilfs_read_inode_common(cpfile, raw_inode); | 942 | return nilfs_read_inode_common(cpfile, raw_inode); |
938 | } | 943 | } |
939 | 944 | ||
940 | /** | 945 | /** |
941 | * nilfs_cpfile_new - create cpfile | 946 | * nilfs_cpfile_new - create cpfile |
942 | * @nilfs: nilfs object | 947 | * @nilfs: nilfs object |
943 | * @cpsize: size of a checkpoint entry | 948 | * @cpsize: size of a checkpoint entry |
944 | */ | 949 | */ |
945 | struct inode *nilfs_cpfile_new(struct the_nilfs *nilfs, size_t cpsize) | 950 | struct inode *nilfs_cpfile_new(struct the_nilfs *nilfs, size_t cpsize) |
946 | { | 951 | { |
947 | struct inode *cpfile; | 952 | struct inode *cpfile; |
948 | 953 | ||
949 | cpfile = nilfs_mdt_new(nilfs, NULL, NILFS_CPFILE_INO, 0); | 954 | cpfile = nilfs_mdt_new(nilfs, NULL, NILFS_CPFILE_INO, 0); |
950 | if (cpfile) | 955 | if (cpfile) |
951 | nilfs_mdt_set_entry_size(cpfile, cpsize, | 956 | nilfs_mdt_set_entry_size(cpfile, cpsize, |
952 | sizeof(struct nilfs_cpfile_header)); | 957 | sizeof(struct nilfs_cpfile_header)); |
953 | return cpfile; | 958 | return cpfile; |
954 | } | 959 | } |
955 | 960 |
fs/nilfs2/direct.c
1 | /* | 1 | /* |
2 | * direct.c - NILFS direct block pointer. | 2 | * direct.c - NILFS direct block pointer. |
3 | * | 3 | * |
4 | * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. | 4 | * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
8 | * the Free Software Foundation; either version 2 of the License, or | 8 | * the Free Software Foundation; either version 2 of the License, or |
9 | * (at your option) any later version. | 9 | * (at your option) any later version. |
10 | * | 10 | * |
11 | * This program is distributed in the hope that it will be useful, | 11 | * This program is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | * GNU General Public License for more details. | 14 | * GNU General Public License for more details. |
15 | * | 15 | * |
16 | * You should have received a copy of the GNU General Public License | 16 | * You should have received a copy of the GNU General Public License |
17 | * along with this program; if not, write to the Free Software | 17 | * along with this program; if not, write to the Free Software |
18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | 18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
19 | * | 19 | * |
20 | * Written by Koji Sato <koji@osrg.net>. | 20 | * Written by Koji Sato <koji@osrg.net>. |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <linux/errno.h> | 23 | #include <linux/errno.h> |
24 | #include "nilfs.h" | 24 | #include "nilfs.h" |
25 | #include "page.h" | 25 | #include "page.h" |
26 | #include "direct.h" | 26 | #include "direct.h" |
27 | #include "alloc.h" | 27 | #include "alloc.h" |
28 | #include "dat.h" | 28 | #include "dat.h" |
29 | 29 | ||
30 | static inline __le64 *nilfs_direct_dptrs(const struct nilfs_direct *direct) | 30 | static inline __le64 *nilfs_direct_dptrs(const struct nilfs_direct *direct) |
31 | { | 31 | { |
32 | return (__le64 *) | 32 | return (__le64 *) |
33 | ((struct nilfs_direct_node *)direct->d_bmap.b_u.u_data + 1); | 33 | ((struct nilfs_direct_node *)direct->d_bmap.b_u.u_data + 1); |
34 | } | 34 | } |
35 | 35 | ||
36 | static inline __u64 | 36 | static inline __u64 |
37 | nilfs_direct_get_ptr(const struct nilfs_direct *direct, __u64 key) | 37 | nilfs_direct_get_ptr(const struct nilfs_direct *direct, __u64 key) |
38 | { | 38 | { |
39 | return nilfs_bmap_dptr_to_ptr(*(nilfs_direct_dptrs(direct) + key)); | 39 | return nilfs_bmap_dptr_to_ptr(*(nilfs_direct_dptrs(direct) + key)); |
40 | } | 40 | } |
41 | 41 | ||
42 | static inline void nilfs_direct_set_ptr(struct nilfs_direct *direct, | 42 | static inline void nilfs_direct_set_ptr(struct nilfs_direct *direct, |
43 | __u64 key, __u64 ptr) | 43 | __u64 key, __u64 ptr) |
44 | { | 44 | { |
45 | *(nilfs_direct_dptrs(direct) + key) = nilfs_bmap_ptr_to_dptr(ptr); | 45 | *(nilfs_direct_dptrs(direct) + key) = nilfs_bmap_ptr_to_dptr(ptr); |
46 | } | 46 | } |
47 | 47 | ||
48 | static int nilfs_direct_lookup(const struct nilfs_bmap *bmap, | 48 | static int nilfs_direct_lookup(const struct nilfs_bmap *bmap, |
49 | __u64 key, int level, __u64 *ptrp) | 49 | __u64 key, int level, __u64 *ptrp) |
50 | { | 50 | { |
51 | struct nilfs_direct *direct; | 51 | struct nilfs_direct *direct; |
52 | __u64 ptr; | 52 | __u64 ptr; |
53 | 53 | ||
54 | direct = (struct nilfs_direct *)bmap; | 54 | direct = (struct nilfs_direct *)bmap; /* XXX: use macro for level 1 */ |
55 | if ((key > NILFS_DIRECT_KEY_MAX) || | 55 | if (key > NILFS_DIRECT_KEY_MAX || level != 1) |
56 | (level != 1) || /* XXX: use macro for level 1 */ | ||
57 | ((ptr = nilfs_direct_get_ptr(direct, key)) == | ||
58 | NILFS_BMAP_INVALID_PTR)) | ||
59 | return -ENOENT; | 56 | return -ENOENT; |
57 | ptr = nilfs_direct_get_ptr(direct, key); | ||
58 | if (ptr == NILFS_BMAP_INVALID_PTR) | ||
59 | return -ENOENT; | ||
60 | 60 | ||
61 | if (ptrp != NULL) | 61 | if (ptrp != NULL) |
62 | *ptrp = ptr; | 62 | *ptrp = ptr; |
63 | return 0; | 63 | return 0; |
64 | } | 64 | } |
65 | 65 | ||
66 | static int nilfs_direct_lookup_contig(const struct nilfs_bmap *bmap, | 66 | static int nilfs_direct_lookup_contig(const struct nilfs_bmap *bmap, |
67 | __u64 key, __u64 *ptrp, | 67 | __u64 key, __u64 *ptrp, |
68 | unsigned maxblocks) | 68 | unsigned maxblocks) |
69 | { | 69 | { |
70 | struct nilfs_direct *direct = (struct nilfs_direct *)bmap; | 70 | struct nilfs_direct *direct = (struct nilfs_direct *)bmap; |
71 | struct inode *dat = NULL; | 71 | struct inode *dat = NULL; |
72 | __u64 ptr, ptr2; | 72 | __u64 ptr, ptr2; |
73 | sector_t blocknr; | 73 | sector_t blocknr; |
74 | int ret, cnt; | 74 | int ret, cnt; |
75 | 75 | ||
76 | if (key > NILFS_DIRECT_KEY_MAX || | 76 | if (key > NILFS_DIRECT_KEY_MAX) |
77 | (ptr = nilfs_direct_get_ptr(direct, key)) == | 77 | return -ENOENT; |
78 | NILFS_BMAP_INVALID_PTR) | 78 | ptr = nilfs_direct_get_ptr(direct, key); |
79 | if (ptr == NILFS_BMAP_INVALID_PTR) | ||
79 | return -ENOENT; | 80 | return -ENOENT; |
80 | 81 | ||
81 | if (NILFS_BMAP_USE_VBN(bmap)) { | 82 | if (NILFS_BMAP_USE_VBN(bmap)) { |
82 | dat = nilfs_bmap_get_dat(bmap); | 83 | dat = nilfs_bmap_get_dat(bmap); |
83 | ret = nilfs_dat_translate(dat, ptr, &blocknr); | 84 | ret = nilfs_dat_translate(dat, ptr, &blocknr); |
84 | if (ret < 0) | 85 | if (ret < 0) |
85 | return ret; | 86 | return ret; |
86 | ptr = blocknr; | 87 | ptr = blocknr; |
87 | } | 88 | } |
88 | 89 | ||
89 | maxblocks = min_t(unsigned, maxblocks, NILFS_DIRECT_KEY_MAX - key + 1); | 90 | maxblocks = min_t(unsigned, maxblocks, NILFS_DIRECT_KEY_MAX - key + 1); |
90 | for (cnt = 1; cnt < maxblocks && | 91 | for (cnt = 1; cnt < maxblocks && |
91 | (ptr2 = nilfs_direct_get_ptr(direct, key + cnt)) != | 92 | (ptr2 = nilfs_direct_get_ptr(direct, key + cnt)) != |
92 | NILFS_BMAP_INVALID_PTR; | 93 | NILFS_BMAP_INVALID_PTR; |
93 | cnt++) { | 94 | cnt++) { |
94 | if (dat) { | 95 | if (dat) { |
95 | ret = nilfs_dat_translate(dat, ptr2, &blocknr); | 96 | ret = nilfs_dat_translate(dat, ptr2, &blocknr); |
96 | if (ret < 0) | 97 | if (ret < 0) |
97 | return ret; | 98 | return ret; |
98 | ptr2 = blocknr; | 99 | ptr2 = blocknr; |
99 | } | 100 | } |
100 | if (ptr2 != ptr + cnt) | 101 | if (ptr2 != ptr + cnt) |
101 | break; | 102 | break; |
102 | } | 103 | } |
103 | *ptrp = ptr; | 104 | *ptrp = ptr; |
104 | return cnt; | 105 | return cnt; |
105 | } | 106 | } |
106 | 107 | ||
107 | static __u64 | 108 | static __u64 |
108 | nilfs_direct_find_target_v(const struct nilfs_direct *direct, __u64 key) | 109 | nilfs_direct_find_target_v(const struct nilfs_direct *direct, __u64 key) |
109 | { | 110 | { |
110 | __u64 ptr; | 111 | __u64 ptr; |
111 | 112 | ||
112 | ptr = nilfs_bmap_find_target_seq(&direct->d_bmap, key); | 113 | ptr = nilfs_bmap_find_target_seq(&direct->d_bmap, key); |
113 | if (ptr != NILFS_BMAP_INVALID_PTR) | 114 | if (ptr != NILFS_BMAP_INVALID_PTR) |
114 | /* sequential access */ | 115 | /* sequential access */ |
115 | return ptr; | 116 | return ptr; |
116 | else | 117 | else |
117 | /* block group */ | 118 | /* block group */ |
118 | return nilfs_bmap_find_target_in_group(&direct->d_bmap); | 119 | return nilfs_bmap_find_target_in_group(&direct->d_bmap); |
119 | } | 120 | } |
120 | 121 | ||
121 | static void nilfs_direct_set_target_v(struct nilfs_direct *direct, | 122 | static void nilfs_direct_set_target_v(struct nilfs_direct *direct, |
122 | __u64 key, __u64 ptr) | 123 | __u64 key, __u64 ptr) |
123 | { | 124 | { |
124 | direct->d_bmap.b_last_allocated_key = key; | 125 | direct->d_bmap.b_last_allocated_key = key; |
125 | direct->d_bmap.b_last_allocated_ptr = ptr; | 126 | direct->d_bmap.b_last_allocated_ptr = ptr; |
126 | } | 127 | } |
127 | 128 | ||
128 | static int nilfs_direct_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr) | 129 | static int nilfs_direct_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr) |
129 | { | 130 | { |
130 | struct nilfs_direct *direct = (struct nilfs_direct *)bmap; | 131 | struct nilfs_direct *direct = (struct nilfs_direct *)bmap; |
131 | union nilfs_bmap_ptr_req req; | 132 | union nilfs_bmap_ptr_req req; |
132 | struct inode *dat = NULL; | 133 | struct inode *dat = NULL; |
133 | struct buffer_head *bh; | 134 | struct buffer_head *bh; |
134 | int ret; | 135 | int ret; |
135 | 136 | ||
136 | if (key > NILFS_DIRECT_KEY_MAX) | 137 | if (key > NILFS_DIRECT_KEY_MAX) |
137 | return -ENOENT; | 138 | return -ENOENT; |
138 | if (nilfs_direct_get_ptr(direct, key) != NILFS_BMAP_INVALID_PTR) | 139 | if (nilfs_direct_get_ptr(direct, key) != NILFS_BMAP_INVALID_PTR) |
139 | return -EEXIST; | 140 | return -EEXIST; |
140 | 141 | ||
141 | if (NILFS_BMAP_USE_VBN(bmap)) { | 142 | if (NILFS_BMAP_USE_VBN(bmap)) { |
142 | req.bpr_ptr = nilfs_direct_find_target_v(direct, key); | 143 | req.bpr_ptr = nilfs_direct_find_target_v(direct, key); |
143 | dat = nilfs_bmap_get_dat(bmap); | 144 | dat = nilfs_bmap_get_dat(bmap); |
144 | } | 145 | } |
145 | ret = nilfs_bmap_prepare_alloc_ptr(bmap, &req, dat); | 146 | ret = nilfs_bmap_prepare_alloc_ptr(bmap, &req, dat); |
146 | if (!ret) { | 147 | if (!ret) { |
147 | /* ptr must be a pointer to a buffer head. */ | 148 | /* ptr must be a pointer to a buffer head. */ |
148 | bh = (struct buffer_head *)((unsigned long)ptr); | 149 | bh = (struct buffer_head *)((unsigned long)ptr); |
149 | set_buffer_nilfs_volatile(bh); | 150 | set_buffer_nilfs_volatile(bh); |
150 | 151 | ||
151 | nilfs_bmap_commit_alloc_ptr(bmap, &req, dat); | 152 | nilfs_bmap_commit_alloc_ptr(bmap, &req, dat); |
152 | nilfs_direct_set_ptr(direct, key, req.bpr_ptr); | 153 | nilfs_direct_set_ptr(direct, key, req.bpr_ptr); |
153 | 154 | ||
154 | if (!nilfs_bmap_dirty(bmap)) | 155 | if (!nilfs_bmap_dirty(bmap)) |
155 | nilfs_bmap_set_dirty(bmap); | 156 | nilfs_bmap_set_dirty(bmap); |
156 | 157 | ||
157 | if (NILFS_BMAP_USE_VBN(bmap)) | 158 | if (NILFS_BMAP_USE_VBN(bmap)) |
158 | nilfs_direct_set_target_v(direct, key, req.bpr_ptr); | 159 | nilfs_direct_set_target_v(direct, key, req.bpr_ptr); |
159 | 160 | ||
160 | nilfs_bmap_add_blocks(bmap, 1); | 161 | nilfs_bmap_add_blocks(bmap, 1); |
161 | } | 162 | } |
162 | return ret; | 163 | return ret; |
163 | } | 164 | } |
164 | 165 | ||
165 | static int nilfs_direct_delete(struct nilfs_bmap *bmap, __u64 key) | 166 | static int nilfs_direct_delete(struct nilfs_bmap *bmap, __u64 key) |
166 | { | 167 | { |
167 | struct nilfs_direct *direct = (struct nilfs_direct *)bmap; | 168 | struct nilfs_direct *direct = (struct nilfs_direct *)bmap; |
168 | union nilfs_bmap_ptr_req req; | 169 | union nilfs_bmap_ptr_req req; |
169 | struct inode *dat; | 170 | struct inode *dat; |
170 | int ret; | 171 | int ret; |
171 | 172 | ||
172 | if (key > NILFS_DIRECT_KEY_MAX || | 173 | if (key > NILFS_DIRECT_KEY_MAX || |
173 | nilfs_direct_get_ptr(direct, key) == NILFS_BMAP_INVALID_PTR) | 174 | nilfs_direct_get_ptr(direct, key) == NILFS_BMAP_INVALID_PTR) |
174 | return -ENOENT; | 175 | return -ENOENT; |
175 | 176 | ||
176 | dat = NILFS_BMAP_USE_VBN(bmap) ? nilfs_bmap_get_dat(bmap) : NULL; | 177 | dat = NILFS_BMAP_USE_VBN(bmap) ? nilfs_bmap_get_dat(bmap) : NULL; |
177 | req.bpr_ptr = nilfs_direct_get_ptr(direct, key); | 178 | req.bpr_ptr = nilfs_direct_get_ptr(direct, key); |
178 | 179 | ||
179 | ret = nilfs_bmap_prepare_end_ptr(bmap, &req, dat); | 180 | ret = nilfs_bmap_prepare_end_ptr(bmap, &req, dat); |
180 | if (!ret) { | 181 | if (!ret) { |
181 | nilfs_bmap_commit_end_ptr(bmap, &req, dat); | 182 | nilfs_bmap_commit_end_ptr(bmap, &req, dat); |
182 | nilfs_direct_set_ptr(direct, key, NILFS_BMAP_INVALID_PTR); | 183 | nilfs_direct_set_ptr(direct, key, NILFS_BMAP_INVALID_PTR); |
183 | nilfs_bmap_sub_blocks(bmap, 1); | 184 | nilfs_bmap_sub_blocks(bmap, 1); |
184 | } | 185 | } |
185 | return ret; | 186 | return ret; |
186 | } | 187 | } |
187 | 188 | ||
188 | static int nilfs_direct_last_key(const struct nilfs_bmap *bmap, __u64 *keyp) | 189 | static int nilfs_direct_last_key(const struct nilfs_bmap *bmap, __u64 *keyp) |
189 | { | 190 | { |
190 | struct nilfs_direct *direct; | 191 | struct nilfs_direct *direct; |
191 | __u64 key, lastkey; | 192 | __u64 key, lastkey; |
192 | 193 | ||
193 | direct = (struct nilfs_direct *)bmap; | 194 | direct = (struct nilfs_direct *)bmap; |
194 | lastkey = NILFS_DIRECT_KEY_MAX + 1; | 195 | lastkey = NILFS_DIRECT_KEY_MAX + 1; |
195 | for (key = NILFS_DIRECT_KEY_MIN; key <= NILFS_DIRECT_KEY_MAX; key++) | 196 | for (key = NILFS_DIRECT_KEY_MIN; key <= NILFS_DIRECT_KEY_MAX; key++) |
196 | if (nilfs_direct_get_ptr(direct, key) != | 197 | if (nilfs_direct_get_ptr(direct, key) != |
197 | NILFS_BMAP_INVALID_PTR) | 198 | NILFS_BMAP_INVALID_PTR) |
198 | lastkey = key; | 199 | lastkey = key; |
199 | 200 | ||
200 | if (lastkey == NILFS_DIRECT_KEY_MAX + 1) | 201 | if (lastkey == NILFS_DIRECT_KEY_MAX + 1) |
201 | return -ENOENT; | 202 | return -ENOENT; |
202 | 203 | ||
203 | *keyp = lastkey; | 204 | *keyp = lastkey; |
204 | 205 | ||
205 | return 0; | 206 | return 0; |
206 | } | 207 | } |
207 | 208 | ||
208 | static int nilfs_direct_check_insert(const struct nilfs_bmap *bmap, __u64 key) | 209 | static int nilfs_direct_check_insert(const struct nilfs_bmap *bmap, __u64 key) |
209 | { | 210 | { |
210 | return key > NILFS_DIRECT_KEY_MAX; | 211 | return key > NILFS_DIRECT_KEY_MAX; |
211 | } | 212 | } |
212 | 213 | ||
213 | static int nilfs_direct_gather_data(struct nilfs_bmap *bmap, | 214 | static int nilfs_direct_gather_data(struct nilfs_bmap *bmap, |
214 | __u64 *keys, __u64 *ptrs, int nitems) | 215 | __u64 *keys, __u64 *ptrs, int nitems) |
215 | { | 216 | { |
216 | struct nilfs_direct *direct; | 217 | struct nilfs_direct *direct; |
217 | __u64 key; | 218 | __u64 key; |
218 | __u64 ptr; | 219 | __u64 ptr; |
219 | int n; | 220 | int n; |
220 | 221 | ||
221 | direct = (struct nilfs_direct *)bmap; | 222 | direct = (struct nilfs_direct *)bmap; |
222 | if (nitems > NILFS_DIRECT_NBLOCKS) | 223 | if (nitems > NILFS_DIRECT_NBLOCKS) |
223 | nitems = NILFS_DIRECT_NBLOCKS; | 224 | nitems = NILFS_DIRECT_NBLOCKS; |
224 | n = 0; | 225 | n = 0; |
225 | for (key = 0; key < nitems; key++) { | 226 | for (key = 0; key < nitems; key++) { |
226 | ptr = nilfs_direct_get_ptr(direct, key); | 227 | ptr = nilfs_direct_get_ptr(direct, key); |
227 | if (ptr != NILFS_BMAP_INVALID_PTR) { | 228 | if (ptr != NILFS_BMAP_INVALID_PTR) { |
228 | keys[n] = key; | 229 | keys[n] = key; |
229 | ptrs[n] = ptr; | 230 | ptrs[n] = ptr; |
230 | n++; | 231 | n++; |
231 | } | 232 | } |
232 | } | 233 | } |
233 | return n; | 234 | return n; |
234 | } | 235 | } |
235 | 236 | ||
236 | int nilfs_direct_delete_and_convert(struct nilfs_bmap *bmap, | 237 | int nilfs_direct_delete_and_convert(struct nilfs_bmap *bmap, |
237 | __u64 key, __u64 *keys, __u64 *ptrs, int n) | 238 | __u64 key, __u64 *keys, __u64 *ptrs, int n) |
238 | { | 239 | { |
239 | struct nilfs_direct *direct; | 240 | struct nilfs_direct *direct; |
240 | __le64 *dptrs; | 241 | __le64 *dptrs; |
241 | int ret, i, j; | 242 | int ret, i, j; |
242 | 243 | ||
243 | /* no need to allocate any resource for conversion */ | 244 | /* no need to allocate any resource for conversion */ |
244 | 245 | ||
245 | /* delete */ | 246 | /* delete */ |
246 | ret = bmap->b_ops->bop_delete(bmap, key); | 247 | ret = bmap->b_ops->bop_delete(bmap, key); |
247 | if (ret < 0) | 248 | if (ret < 0) |
248 | return ret; | 249 | return ret; |
249 | 250 | ||
250 | /* free resources */ | 251 | /* free resources */ |
251 | if (bmap->b_ops->bop_clear != NULL) | 252 | if (bmap->b_ops->bop_clear != NULL) |
252 | bmap->b_ops->bop_clear(bmap); | 253 | bmap->b_ops->bop_clear(bmap); |
253 | 254 | ||
254 | /* convert */ | 255 | /* convert */ |
255 | direct = (struct nilfs_direct *)bmap; | 256 | direct = (struct nilfs_direct *)bmap; |
256 | dptrs = nilfs_direct_dptrs(direct); | 257 | dptrs = nilfs_direct_dptrs(direct); |
257 | for (i = 0, j = 0; i < NILFS_DIRECT_NBLOCKS; i++) { | 258 | for (i = 0, j = 0; i < NILFS_DIRECT_NBLOCKS; i++) { |
258 | if ((j < n) && (i == keys[j])) { | 259 | if ((j < n) && (i == keys[j])) { |
259 | dptrs[i] = (i != key) ? | 260 | dptrs[i] = (i != key) ? |
260 | nilfs_bmap_ptr_to_dptr(ptrs[j]) : | 261 | nilfs_bmap_ptr_to_dptr(ptrs[j]) : |
261 | NILFS_BMAP_INVALID_PTR; | 262 | NILFS_BMAP_INVALID_PTR; |
262 | j++; | 263 | j++; |
263 | } else | 264 | } else |
264 | dptrs[i] = NILFS_BMAP_INVALID_PTR; | 265 | dptrs[i] = NILFS_BMAP_INVALID_PTR; |
265 | } | 266 | } |
266 | 267 | ||
267 | nilfs_direct_init(bmap); | 268 | nilfs_direct_init(bmap); |
268 | return 0; | 269 | return 0; |
269 | } | 270 | } |
270 | 271 | ||
271 | static int nilfs_direct_propagate(const struct nilfs_bmap *bmap, | 272 | static int nilfs_direct_propagate(const struct nilfs_bmap *bmap, |
272 | struct buffer_head *bh) | 273 | struct buffer_head *bh) |
273 | { | 274 | { |
274 | struct nilfs_direct *direct = (struct nilfs_direct *)bmap; | 275 | struct nilfs_direct *direct = (struct nilfs_direct *)bmap; |
275 | struct nilfs_palloc_req oldreq, newreq; | 276 | struct nilfs_palloc_req oldreq, newreq; |
276 | struct inode *dat; | 277 | struct inode *dat; |
277 | __u64 key; | 278 | __u64 key; |
278 | __u64 ptr; | 279 | __u64 ptr; |
279 | int ret; | 280 | int ret; |
280 | 281 | ||
281 | if (!NILFS_BMAP_USE_VBN(bmap)) | 282 | if (!NILFS_BMAP_USE_VBN(bmap)) |
282 | return 0; | 283 | return 0; |
283 | 284 | ||
284 | dat = nilfs_bmap_get_dat(bmap); | 285 | dat = nilfs_bmap_get_dat(bmap); |
285 | key = nilfs_bmap_data_get_key(bmap, bh); | 286 | key = nilfs_bmap_data_get_key(bmap, bh); |
286 | ptr = nilfs_direct_get_ptr(direct, key); | 287 | ptr = nilfs_direct_get_ptr(direct, key); |
287 | if (!buffer_nilfs_volatile(bh)) { | 288 | if (!buffer_nilfs_volatile(bh)) { |
288 | oldreq.pr_entry_nr = ptr; | 289 | oldreq.pr_entry_nr = ptr; |
289 | newreq.pr_entry_nr = ptr; | 290 | newreq.pr_entry_nr = ptr; |
290 | ret = nilfs_dat_prepare_update(dat, &oldreq, &newreq); | 291 | ret = nilfs_dat_prepare_update(dat, &oldreq, &newreq); |
291 | if (ret < 0) | 292 | if (ret < 0) |
292 | return ret; | 293 | return ret; |
293 | nilfs_dat_commit_update(dat, &oldreq, &newreq, | 294 | nilfs_dat_commit_update(dat, &oldreq, &newreq, |
294 | bmap->b_ptr_type == NILFS_BMAP_PTR_VS); | 295 | bmap->b_ptr_type == NILFS_BMAP_PTR_VS); |
295 | set_buffer_nilfs_volatile(bh); | 296 | set_buffer_nilfs_volatile(bh); |
296 | nilfs_direct_set_ptr(direct, key, newreq.pr_entry_nr); | 297 | nilfs_direct_set_ptr(direct, key, newreq.pr_entry_nr); |
297 | } else | 298 | } else |
298 | ret = nilfs_dat_mark_dirty(dat, ptr); | 299 | ret = nilfs_dat_mark_dirty(dat, ptr); |
299 | 300 | ||
300 | return ret; | 301 | return ret; |
301 | } | 302 | } |
302 | 303 | ||
303 | static int nilfs_direct_assign_v(struct nilfs_direct *direct, | 304 | static int nilfs_direct_assign_v(struct nilfs_direct *direct, |
304 | __u64 key, __u64 ptr, | 305 | __u64 key, __u64 ptr, |
305 | struct buffer_head **bh, | 306 | struct buffer_head **bh, |
306 | sector_t blocknr, | 307 | sector_t blocknr, |
307 | union nilfs_binfo *binfo) | 308 | union nilfs_binfo *binfo) |
308 | { | 309 | { |
309 | struct inode *dat = nilfs_bmap_get_dat(&direct->d_bmap); | 310 | struct inode *dat = nilfs_bmap_get_dat(&direct->d_bmap); |
310 | union nilfs_bmap_ptr_req req; | 311 | union nilfs_bmap_ptr_req req; |
311 | int ret; | 312 | int ret; |
312 | 313 | ||
313 | req.bpr_ptr = ptr; | 314 | req.bpr_ptr = ptr; |
314 | ret = nilfs_dat_prepare_start(dat, &req.bpr_req); | 315 | ret = nilfs_dat_prepare_start(dat, &req.bpr_req); |
315 | if (!ret) { | 316 | if (!ret) { |
316 | nilfs_dat_commit_start(dat, &req.bpr_req, blocknr); | 317 | nilfs_dat_commit_start(dat, &req.bpr_req, blocknr); |
317 | binfo->bi_v.bi_vblocknr = nilfs_bmap_ptr_to_dptr(ptr); | 318 | binfo->bi_v.bi_vblocknr = nilfs_bmap_ptr_to_dptr(ptr); |
318 | binfo->bi_v.bi_blkoff = nilfs_bmap_key_to_dkey(key); | 319 | binfo->bi_v.bi_blkoff = nilfs_bmap_key_to_dkey(key); |
319 | } | 320 | } |
320 | return ret; | 321 | return ret; |
321 | } | 322 | } |
322 | 323 | ||
323 | static int nilfs_direct_assign_p(struct nilfs_direct *direct, | 324 | static int nilfs_direct_assign_p(struct nilfs_direct *direct, |
324 | __u64 key, __u64 ptr, | 325 | __u64 key, __u64 ptr, |
325 | struct buffer_head **bh, | 326 | struct buffer_head **bh, |
326 | sector_t blocknr, | 327 | sector_t blocknr, |
327 | union nilfs_binfo *binfo) | 328 | union nilfs_binfo *binfo) |
328 | { | 329 | { |
329 | nilfs_direct_set_ptr(direct, key, blocknr); | 330 | nilfs_direct_set_ptr(direct, key, blocknr); |
330 | 331 | ||
331 | binfo->bi_dat.bi_blkoff = nilfs_bmap_key_to_dkey(key); | 332 | binfo->bi_dat.bi_blkoff = nilfs_bmap_key_to_dkey(key); |
332 | binfo->bi_dat.bi_level = 0; | 333 | binfo->bi_dat.bi_level = 0; |
333 | 334 | ||
334 | return 0; | 335 | return 0; |
335 | } | 336 | } |
336 | 337 | ||
337 | static int nilfs_direct_assign(struct nilfs_bmap *bmap, | 338 | static int nilfs_direct_assign(struct nilfs_bmap *bmap, |
338 | struct buffer_head **bh, | 339 | struct buffer_head **bh, |
339 | sector_t blocknr, | 340 | sector_t blocknr, |
340 | union nilfs_binfo *binfo) | 341 | union nilfs_binfo *binfo) |
341 | { | 342 | { |
342 | struct nilfs_direct *direct; | 343 | struct nilfs_direct *direct; |
343 | __u64 key; | 344 | __u64 key; |
344 | __u64 ptr; | 345 | __u64 ptr; |
345 | 346 | ||
346 | direct = (struct nilfs_direct *)bmap; | 347 | direct = (struct nilfs_direct *)bmap; |
347 | key = nilfs_bmap_data_get_key(bmap, *bh); | 348 | key = nilfs_bmap_data_get_key(bmap, *bh); |
348 | if (unlikely(key > NILFS_DIRECT_KEY_MAX)) { | 349 | if (unlikely(key > NILFS_DIRECT_KEY_MAX)) { |
349 | printk(KERN_CRIT "%s: invalid key: %llu\n", __func__, | 350 | printk(KERN_CRIT "%s: invalid key: %llu\n", __func__, |
350 | (unsigned long long)key); | 351 | (unsigned long long)key); |
351 | return -EINVAL; | 352 | return -EINVAL; |
352 | } | 353 | } |
353 | ptr = nilfs_direct_get_ptr(direct, key); | 354 | ptr = nilfs_direct_get_ptr(direct, key); |
354 | if (unlikely(ptr == NILFS_BMAP_INVALID_PTR)) { | 355 | if (unlikely(ptr == NILFS_BMAP_INVALID_PTR)) { |
355 | printk(KERN_CRIT "%s: invalid pointer: %llu\n", __func__, | 356 | printk(KERN_CRIT "%s: invalid pointer: %llu\n", __func__, |
356 | (unsigned long long)ptr); | 357 | (unsigned long long)ptr); |
357 | return -EINVAL; | 358 | return -EINVAL; |
358 | } | 359 | } |
359 | 360 | ||
360 | return NILFS_BMAP_USE_VBN(bmap) ? | 361 | return NILFS_BMAP_USE_VBN(bmap) ? |
361 | nilfs_direct_assign_v(direct, key, ptr, bh, blocknr, binfo) : | 362 | nilfs_direct_assign_v(direct, key, ptr, bh, blocknr, binfo) : |
362 | nilfs_direct_assign_p(direct, key, ptr, bh, blocknr, binfo); | 363 | nilfs_direct_assign_p(direct, key, ptr, bh, blocknr, binfo); |
363 | } | 364 | } |
364 | 365 | ||
365 | static const struct nilfs_bmap_operations nilfs_direct_ops = { | 366 | static const struct nilfs_bmap_operations nilfs_direct_ops = { |
366 | .bop_lookup = nilfs_direct_lookup, | 367 | .bop_lookup = nilfs_direct_lookup, |
367 | .bop_lookup_contig = nilfs_direct_lookup_contig, | 368 | .bop_lookup_contig = nilfs_direct_lookup_contig, |
368 | .bop_insert = nilfs_direct_insert, | 369 | .bop_insert = nilfs_direct_insert, |
369 | .bop_delete = nilfs_direct_delete, | 370 | .bop_delete = nilfs_direct_delete, |
370 | .bop_clear = NULL, | 371 | .bop_clear = NULL, |
371 | 372 | ||
372 | .bop_propagate = nilfs_direct_propagate, | 373 | .bop_propagate = nilfs_direct_propagate, |
373 | 374 | ||
374 | .bop_lookup_dirty_buffers = NULL, | 375 | .bop_lookup_dirty_buffers = NULL, |
375 | 376 | ||
376 | .bop_assign = nilfs_direct_assign, | 377 | .bop_assign = nilfs_direct_assign, |
377 | .bop_mark = NULL, | 378 | .bop_mark = NULL, |
378 | 379 | ||
379 | .bop_last_key = nilfs_direct_last_key, | 380 | .bop_last_key = nilfs_direct_last_key, |
380 | .bop_check_insert = nilfs_direct_check_insert, | 381 | .bop_check_insert = nilfs_direct_check_insert, |
381 | .bop_check_delete = NULL, | 382 | .bop_check_delete = NULL, |
382 | .bop_gather_data = nilfs_direct_gather_data, | 383 | .bop_gather_data = nilfs_direct_gather_data, |
383 | }; | 384 | }; |
384 | 385 | ||
385 | 386 | ||
386 | int nilfs_direct_init(struct nilfs_bmap *bmap) | 387 | int nilfs_direct_init(struct nilfs_bmap *bmap) |
387 | { | 388 | { |
388 | bmap->b_ops = &nilfs_direct_ops; | 389 | bmap->b_ops = &nilfs_direct_ops; |