Commit e2bc322bf05936ec7160d62bc3fd45cbf4aa405a
1 parent
697fa9721c
Exists in
master
and in
39 other branches
[JFFS2] Add erase_checking_list to hold blocks being marked.
Just to keep the debug code happy when it's adding all the blocks up. Otherwise, they disappear for a while while the locks are dropped to check them and write the cleanmarker. Signed-off-by: David Woodhouse <dwmw2@infradead.org>
Showing 4 changed files with 25 additions and 4 deletions Inline Diff
fs/jffs2/build.c
1 | /* | 1 | /* |
2 | * JFFS2 -- Journalling Flash File System, Version 2. | 2 | * JFFS2 -- Journalling Flash File System, Version 2. |
3 | * | 3 | * |
4 | * Copyright © 2001-2007 Red Hat, Inc. | 4 | * Copyright © 2001-2007 Red Hat, Inc. |
5 | * | 5 | * |
6 | * Created by David Woodhouse <dwmw2@infradead.org> | 6 | * Created by David Woodhouse <dwmw2@infradead.org> |
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
15 | #include <linux/vmalloc.h> | 15 | #include <linux/vmalloc.h> |
16 | #include <linux/mtd/mtd.h> | 16 | #include <linux/mtd/mtd.h> |
17 | #include "nodelist.h" | 17 | #include "nodelist.h" |
18 | 18 | ||
19 | static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *, | 19 | static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *, |
20 | struct jffs2_inode_cache *, struct jffs2_full_dirent **); | 20 | struct jffs2_inode_cache *, struct jffs2_full_dirent **); |
21 | 21 | ||
22 | static inline struct jffs2_inode_cache * | 22 | static inline struct jffs2_inode_cache * |
23 | first_inode_chain(int *i, struct jffs2_sb_info *c) | 23 | first_inode_chain(int *i, struct jffs2_sb_info *c) |
24 | { | 24 | { |
25 | for (; *i < INOCACHE_HASHSIZE; (*i)++) { | 25 | for (; *i < INOCACHE_HASHSIZE; (*i)++) { |
26 | if (c->inocache_list[*i]) | 26 | if (c->inocache_list[*i]) |
27 | return c->inocache_list[*i]; | 27 | return c->inocache_list[*i]; |
28 | } | 28 | } |
29 | return NULL; | 29 | return NULL; |
30 | } | 30 | } |
31 | 31 | ||
32 | static inline struct jffs2_inode_cache * | 32 | static inline struct jffs2_inode_cache * |
33 | next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c) | 33 | next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c) |
34 | { | 34 | { |
35 | /* More in this chain? */ | 35 | /* More in this chain? */ |
36 | if (ic->next) | 36 | if (ic->next) |
37 | return ic->next; | 37 | return ic->next; |
38 | (*i)++; | 38 | (*i)++; |
39 | return first_inode_chain(i, c); | 39 | return first_inode_chain(i, c); |
40 | } | 40 | } |
41 | 41 | ||
42 | #define for_each_inode(i, c, ic) \ | 42 | #define for_each_inode(i, c, ic) \ |
43 | for (i = 0, ic = first_inode_chain(&i, (c)); \ | 43 | for (i = 0, ic = first_inode_chain(&i, (c)); \ |
44 | ic; \ | 44 | ic; \ |
45 | ic = next_inode(&i, ic, (c))) | 45 | ic = next_inode(&i, ic, (c))) |
46 | 46 | ||
47 | 47 | ||
48 | static void jffs2_build_inode_pass1(struct jffs2_sb_info *c, | 48 | static void jffs2_build_inode_pass1(struct jffs2_sb_info *c, |
49 | struct jffs2_inode_cache *ic) | 49 | struct jffs2_inode_cache *ic) |
50 | { | 50 | { |
51 | struct jffs2_full_dirent *fd; | 51 | struct jffs2_full_dirent *fd; |
52 | 52 | ||
53 | dbg_fsbuild("building directory inode #%u\n", ic->ino); | 53 | dbg_fsbuild("building directory inode #%u\n", ic->ino); |
54 | 54 | ||
55 | /* For each child, increase nlink */ | 55 | /* For each child, increase nlink */ |
56 | for(fd = ic->scan_dents; fd; fd = fd->next) { | 56 | for(fd = ic->scan_dents; fd; fd = fd->next) { |
57 | struct jffs2_inode_cache *child_ic; | 57 | struct jffs2_inode_cache *child_ic; |
58 | if (!fd->ino) | 58 | if (!fd->ino) |
59 | continue; | 59 | continue; |
60 | 60 | ||
61 | /* we can get high latency here with huge directories */ | 61 | /* we can get high latency here with huge directories */ |
62 | 62 | ||
63 | child_ic = jffs2_get_ino_cache(c, fd->ino); | 63 | child_ic = jffs2_get_ino_cache(c, fd->ino); |
64 | if (!child_ic) { | 64 | if (!child_ic) { |
65 | dbg_fsbuild("child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n", | 65 | dbg_fsbuild("child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n", |
66 | fd->name, fd->ino, ic->ino); | 66 | fd->name, fd->ino, ic->ino); |
67 | jffs2_mark_node_obsolete(c, fd->raw); | 67 | jffs2_mark_node_obsolete(c, fd->raw); |
68 | continue; | 68 | continue; |
69 | } | 69 | } |
70 | 70 | ||
71 | if (child_ic->nlink++ && fd->type == DT_DIR) { | 71 | if (child_ic->nlink++ && fd->type == DT_DIR) { |
72 | JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n", | 72 | JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n", |
73 | fd->name, fd->ino, ic->ino); | 73 | fd->name, fd->ino, ic->ino); |
74 | /* TODO: What do we do about it? */ | 74 | /* TODO: What do we do about it? */ |
75 | } | 75 | } |
76 | dbg_fsbuild("increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino); | 76 | dbg_fsbuild("increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino); |
77 | /* Can't free scan_dents so far. We might need them in pass 2 */ | 77 | /* Can't free scan_dents so far. We might need them in pass 2 */ |
78 | } | 78 | } |
79 | } | 79 | } |
80 | 80 | ||
81 | /* Scan plan: | 81 | /* Scan plan: |
82 | - Scan physical nodes. Build map of inodes/dirents. Allocate inocaches as we go | 82 | - Scan physical nodes. Build map of inodes/dirents. Allocate inocaches as we go |
83 | - Scan directory tree from top down, setting nlink in inocaches | 83 | - Scan directory tree from top down, setting nlink in inocaches |
84 | - Scan inocaches for inodes with nlink==0 | 84 | - Scan inocaches for inodes with nlink==0 |
85 | */ | 85 | */ |
86 | static int jffs2_build_filesystem(struct jffs2_sb_info *c) | 86 | static int jffs2_build_filesystem(struct jffs2_sb_info *c) |
87 | { | 87 | { |
88 | int ret; | 88 | int ret; |
89 | int i; | 89 | int i; |
90 | struct jffs2_inode_cache *ic; | 90 | struct jffs2_inode_cache *ic; |
91 | struct jffs2_full_dirent *fd; | 91 | struct jffs2_full_dirent *fd; |
92 | struct jffs2_full_dirent *dead_fds = NULL; | 92 | struct jffs2_full_dirent *dead_fds = NULL; |
93 | 93 | ||
94 | dbg_fsbuild("build FS data structures\n"); | 94 | dbg_fsbuild("build FS data structures\n"); |
95 | 95 | ||
96 | /* First, scan the medium and build all the inode caches with | 96 | /* First, scan the medium and build all the inode caches with |
97 | lists of physical nodes */ | 97 | lists of physical nodes */ |
98 | 98 | ||
99 | c->flags |= JFFS2_SB_FLAG_SCANNING; | 99 | c->flags |= JFFS2_SB_FLAG_SCANNING; |
100 | ret = jffs2_scan_medium(c); | 100 | ret = jffs2_scan_medium(c); |
101 | c->flags &= ~JFFS2_SB_FLAG_SCANNING; | 101 | c->flags &= ~JFFS2_SB_FLAG_SCANNING; |
102 | if (ret) | 102 | if (ret) |
103 | goto exit; | 103 | goto exit; |
104 | 104 | ||
105 | dbg_fsbuild("scanned flash completely\n"); | 105 | dbg_fsbuild("scanned flash completely\n"); |
106 | jffs2_dbg_dump_block_lists_nolock(c); | 106 | jffs2_dbg_dump_block_lists_nolock(c); |
107 | 107 | ||
108 | dbg_fsbuild("pass 1 starting\n"); | 108 | dbg_fsbuild("pass 1 starting\n"); |
109 | c->flags |= JFFS2_SB_FLAG_BUILDING; | 109 | c->flags |= JFFS2_SB_FLAG_BUILDING; |
110 | /* Now scan the directory tree, increasing nlink according to every dirent found. */ | 110 | /* Now scan the directory tree, increasing nlink according to every dirent found. */ |
111 | for_each_inode(i, c, ic) { | 111 | for_each_inode(i, c, ic) { |
112 | if (ic->scan_dents) { | 112 | if (ic->scan_dents) { |
113 | jffs2_build_inode_pass1(c, ic); | 113 | jffs2_build_inode_pass1(c, ic); |
114 | cond_resched(); | 114 | cond_resched(); |
115 | } | 115 | } |
116 | } | 116 | } |
117 | 117 | ||
118 | dbg_fsbuild("pass 1 complete\n"); | 118 | dbg_fsbuild("pass 1 complete\n"); |
119 | 119 | ||
120 | /* Next, scan for inodes with nlink == 0 and remove them. If | 120 | /* Next, scan for inodes with nlink == 0 and remove them. If |
121 | they were directories, then decrement the nlink of their | 121 | they were directories, then decrement the nlink of their |
122 | children too, and repeat the scan. As that's going to be | 122 | children too, and repeat the scan. As that's going to be |
123 | a fairly uncommon occurrence, it's not so evil to do it this | 123 | a fairly uncommon occurrence, it's not so evil to do it this |
124 | way. Recursion bad. */ | 124 | way. Recursion bad. */ |
125 | dbg_fsbuild("pass 2 starting\n"); | 125 | dbg_fsbuild("pass 2 starting\n"); |
126 | 126 | ||
127 | for_each_inode(i, c, ic) { | 127 | for_each_inode(i, c, ic) { |
128 | if (ic->nlink) | 128 | if (ic->nlink) |
129 | continue; | 129 | continue; |
130 | 130 | ||
131 | jffs2_build_remove_unlinked_inode(c, ic, &dead_fds); | 131 | jffs2_build_remove_unlinked_inode(c, ic, &dead_fds); |
132 | cond_resched(); | 132 | cond_resched(); |
133 | } | 133 | } |
134 | 134 | ||
135 | dbg_fsbuild("pass 2a starting\n"); | 135 | dbg_fsbuild("pass 2a starting\n"); |
136 | 136 | ||
137 | while (dead_fds) { | 137 | while (dead_fds) { |
138 | fd = dead_fds; | 138 | fd = dead_fds; |
139 | dead_fds = fd->next; | 139 | dead_fds = fd->next; |
140 | 140 | ||
141 | ic = jffs2_get_ino_cache(c, fd->ino); | 141 | ic = jffs2_get_ino_cache(c, fd->ino); |
142 | 142 | ||
143 | if (ic) | 143 | if (ic) |
144 | jffs2_build_remove_unlinked_inode(c, ic, &dead_fds); | 144 | jffs2_build_remove_unlinked_inode(c, ic, &dead_fds); |
145 | jffs2_free_full_dirent(fd); | 145 | jffs2_free_full_dirent(fd); |
146 | } | 146 | } |
147 | 147 | ||
148 | dbg_fsbuild("pass 2a complete\n"); | 148 | dbg_fsbuild("pass 2a complete\n"); |
149 | dbg_fsbuild("freeing temporary data structures\n"); | 149 | dbg_fsbuild("freeing temporary data structures\n"); |
150 | 150 | ||
151 | /* Finally, we can scan again and free the dirent structs */ | 151 | /* Finally, we can scan again and free the dirent structs */ |
152 | for_each_inode(i, c, ic) { | 152 | for_each_inode(i, c, ic) { |
153 | while(ic->scan_dents) { | 153 | while(ic->scan_dents) { |
154 | fd = ic->scan_dents; | 154 | fd = ic->scan_dents; |
155 | ic->scan_dents = fd->next; | 155 | ic->scan_dents = fd->next; |
156 | jffs2_free_full_dirent(fd); | 156 | jffs2_free_full_dirent(fd); |
157 | } | 157 | } |
158 | ic->scan_dents = NULL; | 158 | ic->scan_dents = NULL; |
159 | cond_resched(); | 159 | cond_resched(); |
160 | } | 160 | } |
161 | jffs2_build_xattr_subsystem(c); | 161 | jffs2_build_xattr_subsystem(c); |
162 | c->flags &= ~JFFS2_SB_FLAG_BUILDING; | 162 | c->flags &= ~JFFS2_SB_FLAG_BUILDING; |
163 | 163 | ||
164 | dbg_fsbuild("FS build complete\n"); | 164 | dbg_fsbuild("FS build complete\n"); |
165 | 165 | ||
166 | /* Rotate the lists by some number to ensure wear levelling */ | 166 | /* Rotate the lists by some number to ensure wear levelling */ |
167 | jffs2_rotate_lists(c); | 167 | jffs2_rotate_lists(c); |
168 | 168 | ||
169 | ret = 0; | 169 | ret = 0; |
170 | 170 | ||
171 | exit: | 171 | exit: |
172 | if (ret) { | 172 | if (ret) { |
173 | for_each_inode(i, c, ic) { | 173 | for_each_inode(i, c, ic) { |
174 | while(ic->scan_dents) { | 174 | while(ic->scan_dents) { |
175 | fd = ic->scan_dents; | 175 | fd = ic->scan_dents; |
176 | ic->scan_dents = fd->next; | 176 | ic->scan_dents = fd->next; |
177 | jffs2_free_full_dirent(fd); | 177 | jffs2_free_full_dirent(fd); |
178 | } | 178 | } |
179 | } | 179 | } |
180 | jffs2_clear_xattr_subsystem(c); | 180 | jffs2_clear_xattr_subsystem(c); |
181 | } | 181 | } |
182 | 182 | ||
183 | return ret; | 183 | return ret; |
184 | } | 184 | } |
185 | 185 | ||
186 | static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c, | 186 | static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c, |
187 | struct jffs2_inode_cache *ic, | 187 | struct jffs2_inode_cache *ic, |
188 | struct jffs2_full_dirent **dead_fds) | 188 | struct jffs2_full_dirent **dead_fds) |
189 | { | 189 | { |
190 | struct jffs2_raw_node_ref *raw; | 190 | struct jffs2_raw_node_ref *raw; |
191 | struct jffs2_full_dirent *fd; | 191 | struct jffs2_full_dirent *fd; |
192 | 192 | ||
193 | dbg_fsbuild("removing ino #%u with nlink == zero.\n", ic->ino); | 193 | dbg_fsbuild("removing ino #%u with nlink == zero.\n", ic->ino); |
194 | 194 | ||
195 | raw = ic->nodes; | 195 | raw = ic->nodes; |
196 | while (raw != (void *)ic) { | 196 | while (raw != (void *)ic) { |
197 | struct jffs2_raw_node_ref *next = raw->next_in_ino; | 197 | struct jffs2_raw_node_ref *next = raw->next_in_ino; |
198 | dbg_fsbuild("obsoleting node at 0x%08x\n", ref_offset(raw)); | 198 | dbg_fsbuild("obsoleting node at 0x%08x\n", ref_offset(raw)); |
199 | jffs2_mark_node_obsolete(c, raw); | 199 | jffs2_mark_node_obsolete(c, raw); |
200 | raw = next; | 200 | raw = next; |
201 | } | 201 | } |
202 | 202 | ||
203 | if (ic->scan_dents) { | 203 | if (ic->scan_dents) { |
204 | int whinged = 0; | 204 | int whinged = 0; |
205 | dbg_fsbuild("inode #%u was a directory which may have children...\n", ic->ino); | 205 | dbg_fsbuild("inode #%u was a directory which may have children...\n", ic->ino); |
206 | 206 | ||
207 | while(ic->scan_dents) { | 207 | while(ic->scan_dents) { |
208 | struct jffs2_inode_cache *child_ic; | 208 | struct jffs2_inode_cache *child_ic; |
209 | 209 | ||
210 | fd = ic->scan_dents; | 210 | fd = ic->scan_dents; |
211 | ic->scan_dents = fd->next; | 211 | ic->scan_dents = fd->next; |
212 | 212 | ||
213 | if (!fd->ino) { | 213 | if (!fd->ino) { |
214 | /* It's a deletion dirent. Ignore it */ | 214 | /* It's a deletion dirent. Ignore it */ |
215 | dbg_fsbuild("child \"%s\" is a deletion dirent, skipping...\n", fd->name); | 215 | dbg_fsbuild("child \"%s\" is a deletion dirent, skipping...\n", fd->name); |
216 | jffs2_free_full_dirent(fd); | 216 | jffs2_free_full_dirent(fd); |
217 | continue; | 217 | continue; |
218 | } | 218 | } |
219 | if (!whinged) | 219 | if (!whinged) |
220 | whinged = 1; | 220 | whinged = 1; |
221 | 221 | ||
222 | dbg_fsbuild("removing child \"%s\", ino #%u\n", fd->name, fd->ino); | 222 | dbg_fsbuild("removing child \"%s\", ino #%u\n", fd->name, fd->ino); |
223 | 223 | ||
224 | child_ic = jffs2_get_ino_cache(c, fd->ino); | 224 | child_ic = jffs2_get_ino_cache(c, fd->ino); |
225 | if (!child_ic) { | 225 | if (!child_ic) { |
226 | dbg_fsbuild("cannot remove child \"%s\", ino #%u, because it doesn't exist\n", | 226 | dbg_fsbuild("cannot remove child \"%s\", ino #%u, because it doesn't exist\n", |
227 | fd->name, fd->ino); | 227 | fd->name, fd->ino); |
228 | jffs2_free_full_dirent(fd); | 228 | jffs2_free_full_dirent(fd); |
229 | continue; | 229 | continue; |
230 | } | 230 | } |
231 | 231 | ||
232 | /* Reduce nlink of the child. If it's now zero, stick it on the | 232 | /* Reduce nlink of the child. If it's now zero, stick it on the |
233 | dead_fds list to be cleaned up later. Else just free the fd */ | 233 | dead_fds list to be cleaned up later. Else just free the fd */ |
234 | 234 | ||
235 | child_ic->nlink--; | 235 | child_ic->nlink--; |
236 | 236 | ||
237 | if (!child_ic->nlink) { | 237 | if (!child_ic->nlink) { |
238 | dbg_fsbuild("inode #%u (\"%s\") has now got zero nlink, adding to dead_fds list.\n", | 238 | dbg_fsbuild("inode #%u (\"%s\") has now got zero nlink, adding to dead_fds list.\n", |
239 | fd->ino, fd->name); | 239 | fd->ino, fd->name); |
240 | fd->next = *dead_fds; | 240 | fd->next = *dead_fds; |
241 | *dead_fds = fd; | 241 | *dead_fds = fd; |
242 | } else { | 242 | } else { |
243 | dbg_fsbuild("inode #%u (\"%s\") has now got nlink %d. Ignoring.\n", | 243 | dbg_fsbuild("inode #%u (\"%s\") has now got nlink %d. Ignoring.\n", |
244 | fd->ino, fd->name, child_ic->nlink); | 244 | fd->ino, fd->name, child_ic->nlink); |
245 | jffs2_free_full_dirent(fd); | 245 | jffs2_free_full_dirent(fd); |
246 | } | 246 | } |
247 | } | 247 | } |
248 | } | 248 | } |
249 | 249 | ||
250 | /* | 250 | /* |
251 | We don't delete the inocache from the hash list and free it yet. | 251 | We don't delete the inocache from the hash list and free it yet. |
252 | The erase code will do that, when all the nodes are completely gone. | 252 | The erase code will do that, when all the nodes are completely gone. |
253 | */ | 253 | */ |
254 | } | 254 | } |
255 | 255 | ||
256 | static void jffs2_calc_trigger_levels(struct jffs2_sb_info *c) | 256 | static void jffs2_calc_trigger_levels(struct jffs2_sb_info *c) |
257 | { | 257 | { |
258 | uint32_t size; | 258 | uint32_t size; |
259 | 259 | ||
260 | /* Deletion should almost _always_ be allowed. We're fairly | 260 | /* Deletion should almost _always_ be allowed. We're fairly |
261 | buggered once we stop allowing people to delete stuff | 261 | buggered once we stop allowing people to delete stuff |
262 | because there's not enough free space... */ | 262 | because there's not enough free space... */ |
263 | c->resv_blocks_deletion = 2; | 263 | c->resv_blocks_deletion = 2; |
264 | 264 | ||
265 | /* Be conservative about how much space we need before we allow writes. | 265 | /* Be conservative about how much space we need before we allow writes. |
266 | On top of that which is required for deletia, require an extra 2% | 266 | On top of that which is required for deletia, require an extra 2% |
267 | of the medium to be available, for overhead caused by nodes being | 267 | of the medium to be available, for overhead caused by nodes being |
268 | split across blocks, etc. */ | 268 | split across blocks, etc. */ |
269 | 269 | ||
270 | size = c->flash_size / 50; /* 2% of flash size */ | 270 | size = c->flash_size / 50; /* 2% of flash size */ |
271 | size += c->nr_blocks * 100; /* And 100 bytes per eraseblock */ | 271 | size += c->nr_blocks * 100; /* And 100 bytes per eraseblock */ |
272 | size += c->sector_size - 1; /* ... and round up */ | 272 | size += c->sector_size - 1; /* ... and round up */ |
273 | 273 | ||
274 | c->resv_blocks_write = c->resv_blocks_deletion + (size / c->sector_size); | 274 | c->resv_blocks_write = c->resv_blocks_deletion + (size / c->sector_size); |
275 | 275 | ||
276 | /* When do we let the GC thread run in the background */ | 276 | /* When do we let the GC thread run in the background */ |
277 | 277 | ||
278 | c->resv_blocks_gctrigger = c->resv_blocks_write + 1; | 278 | c->resv_blocks_gctrigger = c->resv_blocks_write + 1; |
279 | 279 | ||
280 | /* When do we allow garbage collection to merge nodes to make | 280 | /* When do we allow garbage collection to merge nodes to make |
281 | long-term progress at the expense of short-term space exhaustion? */ | 281 | long-term progress at the expense of short-term space exhaustion? */ |
282 | c->resv_blocks_gcmerge = c->resv_blocks_deletion + 1; | 282 | c->resv_blocks_gcmerge = c->resv_blocks_deletion + 1; |
283 | 283 | ||
284 | /* When do we allow garbage collection to eat from bad blocks rather | 284 | /* When do we allow garbage collection to eat from bad blocks rather |
285 | than actually making progress? */ | 285 | than actually making progress? */ |
286 | c->resv_blocks_gcbad = 0;//c->resv_blocks_deletion + 2; | 286 | c->resv_blocks_gcbad = 0;//c->resv_blocks_deletion + 2; |
287 | 287 | ||
288 | /* What number of 'very dirty' eraseblocks do we allow before we | 288 | /* What number of 'very dirty' eraseblocks do we allow before we |
289 | trigger the GC thread even if we don't _need_ the space. When we | 289 | trigger the GC thread even if we don't _need_ the space. When we |
290 | can't mark nodes obsolete on the medium, the old dirty nodes cause | 290 | can't mark nodes obsolete on the medium, the old dirty nodes cause |
291 | performance problems because we have to inspect and discard them. */ | 291 | performance problems because we have to inspect and discard them. */ |
292 | c->vdirty_blocks_gctrigger = c->resv_blocks_gctrigger; | 292 | c->vdirty_blocks_gctrigger = c->resv_blocks_gctrigger; |
293 | if (jffs2_can_mark_obsolete(c)) | 293 | if (jffs2_can_mark_obsolete(c)) |
294 | c->vdirty_blocks_gctrigger *= 10; | 294 | c->vdirty_blocks_gctrigger *= 10; |
295 | 295 | ||
296 | /* If there's less than this amount of dirty space, don't bother | 296 | /* If there's less than this amount of dirty space, don't bother |
297 | trying to GC to make more space. It'll be a fruitless task */ | 297 | trying to GC to make more space. It'll be a fruitless task */ |
298 | c->nospc_dirty_size = c->sector_size + (c->flash_size / 100); | 298 | c->nospc_dirty_size = c->sector_size + (c->flash_size / 100); |
299 | 299 | ||
300 | dbg_fsbuild("JFFS2 trigger levels (size %d KiB, block size %d KiB, %d blocks)\n", | 300 | dbg_fsbuild("JFFS2 trigger levels (size %d KiB, block size %d KiB, %d blocks)\n", |
301 | c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks); | 301 | c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks); |
302 | dbg_fsbuild("Blocks required to allow deletion: %d (%d KiB)\n", | 302 | dbg_fsbuild("Blocks required to allow deletion: %d (%d KiB)\n", |
303 | c->resv_blocks_deletion, c->resv_blocks_deletion*c->sector_size/1024); | 303 | c->resv_blocks_deletion, c->resv_blocks_deletion*c->sector_size/1024); |
304 | dbg_fsbuild("Blocks required to allow writes: %d (%d KiB)\n", | 304 | dbg_fsbuild("Blocks required to allow writes: %d (%d KiB)\n", |
305 | c->resv_blocks_write, c->resv_blocks_write*c->sector_size/1024); | 305 | c->resv_blocks_write, c->resv_blocks_write*c->sector_size/1024); |
306 | dbg_fsbuild("Blocks required to quiesce GC thread: %d (%d KiB)\n", | 306 | dbg_fsbuild("Blocks required to quiesce GC thread: %d (%d KiB)\n", |
307 | c->resv_blocks_gctrigger, c->resv_blocks_gctrigger*c->sector_size/1024); | 307 | c->resv_blocks_gctrigger, c->resv_blocks_gctrigger*c->sector_size/1024); |
308 | dbg_fsbuild("Blocks required to allow GC merges: %d (%d KiB)\n", | 308 | dbg_fsbuild("Blocks required to allow GC merges: %d (%d KiB)\n", |
309 | c->resv_blocks_gcmerge, c->resv_blocks_gcmerge*c->sector_size/1024); | 309 | c->resv_blocks_gcmerge, c->resv_blocks_gcmerge*c->sector_size/1024); |
310 | dbg_fsbuild("Blocks required to GC bad blocks: %d (%d KiB)\n", | 310 | dbg_fsbuild("Blocks required to GC bad blocks: %d (%d KiB)\n", |
311 | c->resv_blocks_gcbad, c->resv_blocks_gcbad*c->sector_size/1024); | 311 | c->resv_blocks_gcbad, c->resv_blocks_gcbad*c->sector_size/1024); |
312 | dbg_fsbuild("Amount of dirty space required to GC: %d bytes\n", | 312 | dbg_fsbuild("Amount of dirty space required to GC: %d bytes\n", |
313 | c->nospc_dirty_size); | 313 | c->nospc_dirty_size); |
314 | dbg_fsbuild("Very dirty blocks before GC triggered: %d\n", | 314 | dbg_fsbuild("Very dirty blocks before GC triggered: %d\n", |
315 | c->vdirty_blocks_gctrigger); | 315 | c->vdirty_blocks_gctrigger); |
316 | } | 316 | } |
317 | 317 | ||
318 | int jffs2_do_mount_fs(struct jffs2_sb_info *c) | 318 | int jffs2_do_mount_fs(struct jffs2_sb_info *c) |
319 | { | 319 | { |
320 | int ret; | 320 | int ret; |
321 | int i; | 321 | int i; |
322 | int size; | 322 | int size; |
323 | 323 | ||
324 | c->free_size = c->flash_size; | 324 | c->free_size = c->flash_size; |
325 | c->nr_blocks = c->flash_size / c->sector_size; | 325 | c->nr_blocks = c->flash_size / c->sector_size; |
326 | size = sizeof(struct jffs2_eraseblock) * c->nr_blocks; | 326 | size = sizeof(struct jffs2_eraseblock) * c->nr_blocks; |
327 | #ifndef __ECOS | 327 | #ifndef __ECOS |
328 | if (jffs2_blocks_use_vmalloc(c)) | 328 | if (jffs2_blocks_use_vmalloc(c)) |
329 | c->blocks = vmalloc(size); | 329 | c->blocks = vmalloc(size); |
330 | else | 330 | else |
331 | #endif | 331 | #endif |
332 | c->blocks = kmalloc(size, GFP_KERNEL); | 332 | c->blocks = kmalloc(size, GFP_KERNEL); |
333 | if (!c->blocks) | 333 | if (!c->blocks) |
334 | return -ENOMEM; | 334 | return -ENOMEM; |
335 | 335 | ||
336 | memset(c->blocks, 0, size); | 336 | memset(c->blocks, 0, size); |
337 | for (i=0; i<c->nr_blocks; i++) { | 337 | for (i=0; i<c->nr_blocks; i++) { |
338 | INIT_LIST_HEAD(&c->blocks[i].list); | 338 | INIT_LIST_HEAD(&c->blocks[i].list); |
339 | c->blocks[i].offset = i * c->sector_size; | 339 | c->blocks[i].offset = i * c->sector_size; |
340 | c->blocks[i].free_size = c->sector_size; | 340 | c->blocks[i].free_size = c->sector_size; |
341 | } | 341 | } |
342 | 342 | ||
343 | INIT_LIST_HEAD(&c->clean_list); | 343 | INIT_LIST_HEAD(&c->clean_list); |
344 | INIT_LIST_HEAD(&c->very_dirty_list); | 344 | INIT_LIST_HEAD(&c->very_dirty_list); |
345 | INIT_LIST_HEAD(&c->dirty_list); | 345 | INIT_LIST_HEAD(&c->dirty_list); |
346 | INIT_LIST_HEAD(&c->erasable_list); | 346 | INIT_LIST_HEAD(&c->erasable_list); |
347 | INIT_LIST_HEAD(&c->erasing_list); | 347 | INIT_LIST_HEAD(&c->erasing_list); |
348 | INIT_LIST_HEAD(&c->erase_checking_list); | ||
348 | INIT_LIST_HEAD(&c->erase_pending_list); | 349 | INIT_LIST_HEAD(&c->erase_pending_list); |
349 | INIT_LIST_HEAD(&c->erasable_pending_wbuf_list); | 350 | INIT_LIST_HEAD(&c->erasable_pending_wbuf_list); |
350 | INIT_LIST_HEAD(&c->erase_complete_list); | 351 | INIT_LIST_HEAD(&c->erase_complete_list); |
351 | INIT_LIST_HEAD(&c->free_list); | 352 | INIT_LIST_HEAD(&c->free_list); |
352 | INIT_LIST_HEAD(&c->bad_list); | 353 | INIT_LIST_HEAD(&c->bad_list); |
353 | INIT_LIST_HEAD(&c->bad_used_list); | 354 | INIT_LIST_HEAD(&c->bad_used_list); |
354 | c->highest_ino = 1; | 355 | c->highest_ino = 1; |
355 | c->summary = NULL; | 356 | c->summary = NULL; |
356 | 357 | ||
357 | ret = jffs2_sum_init(c); | 358 | ret = jffs2_sum_init(c); |
358 | if (ret) | 359 | if (ret) |
359 | goto out_free; | 360 | goto out_free; |
360 | 361 | ||
361 | if (jffs2_build_filesystem(c)) { | 362 | if (jffs2_build_filesystem(c)) { |
362 | dbg_fsbuild("build_fs failed\n"); | 363 | dbg_fsbuild("build_fs failed\n"); |
363 | jffs2_free_ino_caches(c); | 364 | jffs2_free_ino_caches(c); |
364 | jffs2_free_raw_node_refs(c); | 365 | jffs2_free_raw_node_refs(c); |
365 | ret = -EIO; | 366 | ret = -EIO; |
366 | goto out_free; | 367 | goto out_free; |
367 | } | 368 | } |
368 | 369 | ||
369 | jffs2_calc_trigger_levels(c); | 370 | jffs2_calc_trigger_levels(c); |
370 | 371 | ||
371 | return 0; | 372 | return 0; |
372 | 373 | ||
373 | out_free: | 374 | out_free: |
374 | #ifndef __ECOS | 375 | #ifndef __ECOS |
375 | if (jffs2_blocks_use_vmalloc(c)) | 376 | if (jffs2_blocks_use_vmalloc(c)) |
376 | vfree(c->blocks); | 377 | vfree(c->blocks); |
377 | else | 378 | else |
378 | #endif | 379 | #endif |
379 | kfree(c->blocks); | 380 | kfree(c->blocks); |
380 | 381 | ||
381 | return ret; | 382 | return ret; |
382 | } | 383 | } |
383 | 384 |
fs/jffs2/debug.c
1 | /* | 1 | /* |
2 | * JFFS2 -- Journalling Flash File System, Version 2. | 2 | * JFFS2 -- Journalling Flash File System, Version 2. |
3 | * | 3 | * |
4 | * Copyright © 2001-2007 Red Hat, Inc. | 4 | * Copyright © 2001-2007 Red Hat, Inc. |
5 | * | 5 | * |
6 | * Created by David Woodhouse <dwmw2@infradead.org> | 6 | * Created by David Woodhouse <dwmw2@infradead.org> |
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
13 | #include <linux/types.h> | 13 | #include <linux/types.h> |
14 | #include <linux/pagemap.h> | 14 | #include <linux/pagemap.h> |
15 | #include <linux/crc32.h> | 15 | #include <linux/crc32.h> |
16 | #include <linux/jffs2.h> | 16 | #include <linux/jffs2.h> |
17 | #include <linux/mtd/mtd.h> | 17 | #include <linux/mtd/mtd.h> |
18 | #include "nodelist.h" | 18 | #include "nodelist.h" |
19 | #include "debug.h" | 19 | #include "debug.h" |
20 | 20 | ||
21 | #ifdef JFFS2_DBG_SANITY_CHECKS | 21 | #ifdef JFFS2_DBG_SANITY_CHECKS |
22 | 22 | ||
23 | void | 23 | void |
24 | __jffs2_dbg_acct_sanity_check_nolock(struct jffs2_sb_info *c, | 24 | __jffs2_dbg_acct_sanity_check_nolock(struct jffs2_sb_info *c, |
25 | struct jffs2_eraseblock *jeb) | 25 | struct jffs2_eraseblock *jeb) |
26 | { | 26 | { |
27 | if (unlikely(jeb && jeb->used_size + jeb->dirty_size + | 27 | if (unlikely(jeb && jeb->used_size + jeb->dirty_size + |
28 | jeb->free_size + jeb->wasted_size + | 28 | jeb->free_size + jeb->wasted_size + |
29 | jeb->unchecked_size != c->sector_size)) { | 29 | jeb->unchecked_size != c->sector_size)) { |
30 | JFFS2_ERROR("eeep, space accounting for block at 0x%08x is screwed.\n", jeb->offset); | 30 | JFFS2_ERROR("eeep, space accounting for block at 0x%08x is screwed.\n", jeb->offset); |
31 | JFFS2_ERROR("free %#08x + dirty %#08x + used %#08x + wasted %#08x + unchecked %#08x != total %#08x.\n", | 31 | JFFS2_ERROR("free %#08x + dirty %#08x + used %#08x + wasted %#08x + unchecked %#08x != total %#08x.\n", |
32 | jeb->free_size, jeb->dirty_size, jeb->used_size, | 32 | jeb->free_size, jeb->dirty_size, jeb->used_size, |
33 | jeb->wasted_size, jeb->unchecked_size, c->sector_size); | 33 | jeb->wasted_size, jeb->unchecked_size, c->sector_size); |
34 | BUG(); | 34 | BUG(); |
35 | } | 35 | } |
36 | 36 | ||
37 | if (unlikely(c->used_size + c->dirty_size + c->free_size + c->erasing_size + c->bad_size | 37 | if (unlikely(c->used_size + c->dirty_size + c->free_size + c->erasing_size + c->bad_size |
38 | + c->wasted_size + c->unchecked_size != c->flash_size)) { | 38 | + c->wasted_size + c->unchecked_size != c->flash_size)) { |
39 | JFFS2_ERROR("eeep, space accounting superblock info is screwed.\n"); | 39 | JFFS2_ERROR("eeep, space accounting superblock info is screwed.\n"); |
40 | JFFS2_ERROR("free %#08x + dirty %#08x + used %#08x + erasing %#08x + bad %#08x + wasted %#08x + unchecked %#08x != total %#08x.\n", | 40 | JFFS2_ERROR("free %#08x + dirty %#08x + used %#08x + erasing %#08x + bad %#08x + wasted %#08x + unchecked %#08x != total %#08x.\n", |
41 | c->free_size, c->dirty_size, c->used_size, c->erasing_size, c->bad_size, | 41 | c->free_size, c->dirty_size, c->used_size, c->erasing_size, c->bad_size, |
42 | c->wasted_size, c->unchecked_size, c->flash_size); | 42 | c->wasted_size, c->unchecked_size, c->flash_size); |
43 | BUG(); | 43 | BUG(); |
44 | } | 44 | } |
45 | } | 45 | } |
46 | 46 | ||
47 | void | 47 | void |
48 | __jffs2_dbg_acct_sanity_check(struct jffs2_sb_info *c, | 48 | __jffs2_dbg_acct_sanity_check(struct jffs2_sb_info *c, |
49 | struct jffs2_eraseblock *jeb) | 49 | struct jffs2_eraseblock *jeb) |
50 | { | 50 | { |
51 | spin_lock(&c->erase_completion_lock); | 51 | spin_lock(&c->erase_completion_lock); |
52 | jffs2_dbg_acct_sanity_check_nolock(c, jeb); | 52 | jffs2_dbg_acct_sanity_check_nolock(c, jeb); |
53 | spin_unlock(&c->erase_completion_lock); | 53 | spin_unlock(&c->erase_completion_lock); |
54 | } | 54 | } |
55 | 55 | ||
56 | #endif /* JFFS2_DBG_SANITY_CHECKS */ | 56 | #endif /* JFFS2_DBG_SANITY_CHECKS */ |
57 | 57 | ||
58 | #ifdef JFFS2_DBG_PARANOIA_CHECKS | 58 | #ifdef JFFS2_DBG_PARANOIA_CHECKS |
59 | /* | 59 | /* |
60 | * Check the fragtree. | 60 | * Check the fragtree. |
61 | */ | 61 | */ |
62 | void | 62 | void |
63 | __jffs2_dbg_fragtree_paranoia_check(struct jffs2_inode_info *f) | 63 | __jffs2_dbg_fragtree_paranoia_check(struct jffs2_inode_info *f) |
64 | { | 64 | { |
65 | mutex_lock(&f->sem); | 65 | mutex_lock(&f->sem); |
66 | __jffs2_dbg_fragtree_paranoia_check_nolock(f); | 66 | __jffs2_dbg_fragtree_paranoia_check_nolock(f); |
67 | mutex_unlock(&f->sem); | 67 | mutex_unlock(&f->sem); |
68 | } | 68 | } |
69 | 69 | ||
70 | void | 70 | void |
71 | __jffs2_dbg_fragtree_paranoia_check_nolock(struct jffs2_inode_info *f) | 71 | __jffs2_dbg_fragtree_paranoia_check_nolock(struct jffs2_inode_info *f) |
72 | { | 72 | { |
73 | struct jffs2_node_frag *frag; | 73 | struct jffs2_node_frag *frag; |
74 | int bitched = 0; | 74 | int bitched = 0; |
75 | 75 | ||
76 | for (frag = frag_first(&f->fragtree); frag; frag = frag_next(frag)) { | 76 | for (frag = frag_first(&f->fragtree); frag; frag = frag_next(frag)) { |
77 | struct jffs2_full_dnode *fn = frag->node; | 77 | struct jffs2_full_dnode *fn = frag->node; |
78 | 78 | ||
79 | if (!fn || !fn->raw) | 79 | if (!fn || !fn->raw) |
80 | continue; | 80 | continue; |
81 | 81 | ||
82 | if (ref_flags(fn->raw) == REF_PRISTINE) { | 82 | if (ref_flags(fn->raw) == REF_PRISTINE) { |
83 | if (fn->frags > 1) { | 83 | if (fn->frags > 1) { |
84 | JFFS2_ERROR("REF_PRISTINE node at 0x%08x had %d frags. Tell dwmw2.\n", | 84 | JFFS2_ERROR("REF_PRISTINE node at 0x%08x had %d frags. Tell dwmw2.\n", |
85 | ref_offset(fn->raw), fn->frags); | 85 | ref_offset(fn->raw), fn->frags); |
86 | bitched = 1; | 86 | bitched = 1; |
87 | } | 87 | } |
88 | 88 | ||
89 | /* A hole node which isn't multi-page should be garbage-collected | 89 | /* A hole node which isn't multi-page should be garbage-collected |
90 | and merged anyway, so we just check for the frag size here, | 90 | and merged anyway, so we just check for the frag size here, |
91 | rather than mucking around with actually reading the node | 91 | rather than mucking around with actually reading the node |
92 | and checking the compression type, which is the real way | 92 | and checking the compression type, which is the real way |
93 | to tell a hole node. */ | 93 | to tell a hole node. */ |
94 | if (frag->ofs & (PAGE_CACHE_SIZE-1) && frag_prev(frag) | 94 | if (frag->ofs & (PAGE_CACHE_SIZE-1) && frag_prev(frag) |
95 | && frag_prev(frag)->size < PAGE_CACHE_SIZE && frag_prev(frag)->node) { | 95 | && frag_prev(frag)->size < PAGE_CACHE_SIZE && frag_prev(frag)->node) { |
96 | JFFS2_ERROR("REF_PRISTINE node at 0x%08x had a previous non-hole frag in the same page. Tell dwmw2.\n", | 96 | JFFS2_ERROR("REF_PRISTINE node at 0x%08x had a previous non-hole frag in the same page. Tell dwmw2.\n", |
97 | ref_offset(fn->raw)); | 97 | ref_offset(fn->raw)); |
98 | bitched = 1; | 98 | bitched = 1; |
99 | } | 99 | } |
100 | 100 | ||
101 | if ((frag->ofs+frag->size) & (PAGE_CACHE_SIZE-1) && frag_next(frag) | 101 | if ((frag->ofs+frag->size) & (PAGE_CACHE_SIZE-1) && frag_next(frag) |
102 | && frag_next(frag)->size < PAGE_CACHE_SIZE && frag_next(frag)->node) { | 102 | && frag_next(frag)->size < PAGE_CACHE_SIZE && frag_next(frag)->node) { |
103 | JFFS2_ERROR("REF_PRISTINE node at 0x%08x (%08x-%08x) had a following non-hole frag in the same page. Tell dwmw2.\n", | 103 | JFFS2_ERROR("REF_PRISTINE node at 0x%08x (%08x-%08x) had a following non-hole frag in the same page. Tell dwmw2.\n", |
104 | ref_offset(fn->raw), frag->ofs, frag->ofs+frag->size); | 104 | ref_offset(fn->raw), frag->ofs, frag->ofs+frag->size); |
105 | bitched = 1; | 105 | bitched = 1; |
106 | } | 106 | } |
107 | } | 107 | } |
108 | } | 108 | } |
109 | 109 | ||
110 | if (bitched) { | 110 | if (bitched) { |
111 | JFFS2_ERROR("fragtree is corrupted.\n"); | 111 | JFFS2_ERROR("fragtree is corrupted.\n"); |
112 | __jffs2_dbg_dump_fragtree_nolock(f); | 112 | __jffs2_dbg_dump_fragtree_nolock(f); |
113 | BUG(); | 113 | BUG(); |
114 | } | 114 | } |
115 | } | 115 | } |
116 | 116 | ||
117 | /* | 117 | /* |
118 | * Check if the flash contains all 0xFF before we start writing. | 118 | * Check if the flash contains all 0xFF before we start writing. |
119 | */ | 119 | */ |
120 | void | 120 | void |
121 | __jffs2_dbg_prewrite_paranoia_check(struct jffs2_sb_info *c, | 121 | __jffs2_dbg_prewrite_paranoia_check(struct jffs2_sb_info *c, |
122 | uint32_t ofs, int len) | 122 | uint32_t ofs, int len) |
123 | { | 123 | { |
124 | size_t retlen; | 124 | size_t retlen; |
125 | int ret, i; | 125 | int ret, i; |
126 | unsigned char *buf; | 126 | unsigned char *buf; |
127 | 127 | ||
128 | buf = kmalloc(len, GFP_KERNEL); | 128 | buf = kmalloc(len, GFP_KERNEL); |
129 | if (!buf) | 129 | if (!buf) |
130 | return; | 130 | return; |
131 | 131 | ||
132 | ret = jffs2_flash_read(c, ofs, len, &retlen, buf); | 132 | ret = jffs2_flash_read(c, ofs, len, &retlen, buf); |
133 | if (ret || (retlen != len)) { | 133 | if (ret || (retlen != len)) { |
134 | JFFS2_WARNING("read %d bytes failed or short. ret %d, retlen %zd.\n", | 134 | JFFS2_WARNING("read %d bytes failed or short. ret %d, retlen %zd.\n", |
135 | len, ret, retlen); | 135 | len, ret, retlen); |
136 | kfree(buf); | 136 | kfree(buf); |
137 | return; | 137 | return; |
138 | } | 138 | } |
139 | 139 | ||
140 | ret = 0; | 140 | ret = 0; |
141 | for (i = 0; i < len; i++) | 141 | for (i = 0; i < len; i++) |
142 | if (buf[i] != 0xff) | 142 | if (buf[i] != 0xff) |
143 | ret = 1; | 143 | ret = 1; |
144 | 144 | ||
145 | if (ret) { | 145 | if (ret) { |
146 | JFFS2_ERROR("argh, about to write node to %#08x on flash, but there are data already there. The first corrupted byte is at %#08x offset.\n", | 146 | JFFS2_ERROR("argh, about to write node to %#08x on flash, but there are data already there. The first corrupted byte is at %#08x offset.\n", |
147 | ofs, ofs + i); | 147 | ofs, ofs + i); |
148 | __jffs2_dbg_dump_buffer(buf, len, ofs); | 148 | __jffs2_dbg_dump_buffer(buf, len, ofs); |
149 | kfree(buf); | 149 | kfree(buf); |
150 | BUG(); | 150 | BUG(); |
151 | } | 151 | } |
152 | 152 | ||
153 | kfree(buf); | 153 | kfree(buf); |
154 | } | 154 | } |
155 | 155 | ||
156 | void __jffs2_dbg_superblock_counts(struct jffs2_sb_info *c) | 156 | void __jffs2_dbg_superblock_counts(struct jffs2_sb_info *c) |
157 | { | 157 | { |
158 | struct jffs2_eraseblock *jeb; | 158 | struct jffs2_eraseblock *jeb; |
159 | uint32_t free = 0, dirty = 0, used = 0, wasted = 0, | 159 | uint32_t free = 0, dirty = 0, used = 0, wasted = 0, |
160 | erasing = 0, bad = 0, unchecked = 0; | 160 | erasing = 0, bad = 0, unchecked = 0; |
161 | int nr_counted = 0; | 161 | int nr_counted = 0; |
162 | int dump = 0; | 162 | int dump = 0; |
163 | 163 | ||
164 | if (c->gcblock) { | 164 | if (c->gcblock) { |
165 | nr_counted++; | 165 | nr_counted++; |
166 | free += c->gcblock->free_size; | 166 | free += c->gcblock->free_size; |
167 | dirty += c->gcblock->dirty_size; | 167 | dirty += c->gcblock->dirty_size; |
168 | used += c->gcblock->used_size; | 168 | used += c->gcblock->used_size; |
169 | wasted += c->gcblock->wasted_size; | 169 | wasted += c->gcblock->wasted_size; |
170 | unchecked += c->gcblock->unchecked_size; | 170 | unchecked += c->gcblock->unchecked_size; |
171 | } | 171 | } |
172 | if (c->nextblock) { | 172 | if (c->nextblock) { |
173 | nr_counted++; | 173 | nr_counted++; |
174 | free += c->nextblock->free_size; | 174 | free += c->nextblock->free_size; |
175 | dirty += c->nextblock->dirty_size; | 175 | dirty += c->nextblock->dirty_size; |
176 | used += c->nextblock->used_size; | 176 | used += c->nextblock->used_size; |
177 | wasted += c->nextblock->wasted_size; | 177 | wasted += c->nextblock->wasted_size; |
178 | unchecked += c->nextblock->unchecked_size; | 178 | unchecked += c->nextblock->unchecked_size; |
179 | } | 179 | } |
180 | list_for_each_entry(jeb, &c->clean_list, list) { | 180 | list_for_each_entry(jeb, &c->clean_list, list) { |
181 | nr_counted++; | 181 | nr_counted++; |
182 | free += jeb->free_size; | 182 | free += jeb->free_size; |
183 | dirty += jeb->dirty_size; | 183 | dirty += jeb->dirty_size; |
184 | used += jeb->used_size; | 184 | used += jeb->used_size; |
185 | wasted += jeb->wasted_size; | 185 | wasted += jeb->wasted_size; |
186 | unchecked += jeb->unchecked_size; | 186 | unchecked += jeb->unchecked_size; |
187 | } | 187 | } |
188 | list_for_each_entry(jeb, &c->very_dirty_list, list) { | 188 | list_for_each_entry(jeb, &c->very_dirty_list, list) { |
189 | nr_counted++; | 189 | nr_counted++; |
190 | free += jeb->free_size; | 190 | free += jeb->free_size; |
191 | dirty += jeb->dirty_size; | 191 | dirty += jeb->dirty_size; |
192 | used += jeb->used_size; | 192 | used += jeb->used_size; |
193 | wasted += jeb->wasted_size; | 193 | wasted += jeb->wasted_size; |
194 | unchecked += jeb->unchecked_size; | 194 | unchecked += jeb->unchecked_size; |
195 | } | 195 | } |
196 | list_for_each_entry(jeb, &c->dirty_list, list) { | 196 | list_for_each_entry(jeb, &c->dirty_list, list) { |
197 | nr_counted++; | 197 | nr_counted++; |
198 | free += jeb->free_size; | 198 | free += jeb->free_size; |
199 | dirty += jeb->dirty_size; | 199 | dirty += jeb->dirty_size; |
200 | used += jeb->used_size; | 200 | used += jeb->used_size; |
201 | wasted += jeb->wasted_size; | 201 | wasted += jeb->wasted_size; |
202 | unchecked += jeb->unchecked_size; | 202 | unchecked += jeb->unchecked_size; |
203 | } | 203 | } |
204 | list_for_each_entry(jeb, &c->erasable_list, list) { | 204 | list_for_each_entry(jeb, &c->erasable_list, list) { |
205 | nr_counted++; | 205 | nr_counted++; |
206 | free += jeb->free_size; | 206 | free += jeb->free_size; |
207 | dirty += jeb->dirty_size; | 207 | dirty += jeb->dirty_size; |
208 | used += jeb->used_size; | 208 | used += jeb->used_size; |
209 | wasted += jeb->wasted_size; | 209 | wasted += jeb->wasted_size; |
210 | unchecked += jeb->unchecked_size; | 210 | unchecked += jeb->unchecked_size; |
211 | } | 211 | } |
212 | list_for_each_entry(jeb, &c->erasable_pending_wbuf_list, list) { | 212 | list_for_each_entry(jeb, &c->erasable_pending_wbuf_list, list) { |
213 | nr_counted++; | 213 | nr_counted++; |
214 | free += jeb->free_size; | 214 | free += jeb->free_size; |
215 | dirty += jeb->dirty_size; | 215 | dirty += jeb->dirty_size; |
216 | used += jeb->used_size; | 216 | used += jeb->used_size; |
217 | wasted += jeb->wasted_size; | 217 | wasted += jeb->wasted_size; |
218 | unchecked += jeb->unchecked_size; | 218 | unchecked += jeb->unchecked_size; |
219 | } | 219 | } |
220 | list_for_each_entry(jeb, &c->erase_pending_list, list) { | 220 | list_for_each_entry(jeb, &c->erase_pending_list, list) { |
221 | nr_counted++; | 221 | nr_counted++; |
222 | free += jeb->free_size; | 222 | free += jeb->free_size; |
223 | dirty += jeb->dirty_size; | 223 | dirty += jeb->dirty_size; |
224 | used += jeb->used_size; | 224 | used += jeb->used_size; |
225 | wasted += jeb->wasted_size; | 225 | wasted += jeb->wasted_size; |
226 | unchecked += jeb->unchecked_size; | 226 | unchecked += jeb->unchecked_size; |
227 | } | 227 | } |
228 | list_for_each_entry(jeb, &c->free_list, list) { | 228 | list_for_each_entry(jeb, &c->free_list, list) { |
229 | nr_counted++; | 229 | nr_counted++; |
230 | free += jeb->free_size; | 230 | free += jeb->free_size; |
231 | dirty += jeb->dirty_size; | 231 | dirty += jeb->dirty_size; |
232 | used += jeb->used_size; | 232 | used += jeb->used_size; |
233 | wasted += jeb->wasted_size; | 233 | wasted += jeb->wasted_size; |
234 | unchecked += jeb->unchecked_size; | 234 | unchecked += jeb->unchecked_size; |
235 | } | 235 | } |
236 | list_for_each_entry(jeb, &c->bad_used_list, list) { | 236 | list_for_each_entry(jeb, &c->bad_used_list, list) { |
237 | nr_counted++; | 237 | nr_counted++; |
238 | free += jeb->free_size; | 238 | free += jeb->free_size; |
239 | dirty += jeb->dirty_size; | 239 | dirty += jeb->dirty_size; |
240 | used += jeb->used_size; | 240 | used += jeb->used_size; |
241 | wasted += jeb->wasted_size; | 241 | wasted += jeb->wasted_size; |
242 | unchecked += jeb->unchecked_size; | 242 | unchecked += jeb->unchecked_size; |
243 | } | 243 | } |
244 | 244 | ||
245 | list_for_each_entry(jeb, &c->erasing_list, list) { | 245 | list_for_each_entry(jeb, &c->erasing_list, list) { |
246 | nr_counted++; | 246 | nr_counted++; |
247 | erasing += c->sector_size; | 247 | erasing += c->sector_size; |
248 | } | 248 | } |
249 | list_for_each_entry(jeb, &c->erase_checking_list, list) { | ||
250 | nr_counted++; | ||
251 | erasing += c->sector_size; | ||
252 | } | ||
249 | list_for_each_entry(jeb, &c->erase_complete_list, list) { | 253 | list_for_each_entry(jeb, &c->erase_complete_list, list) { |
250 | nr_counted++; | 254 | nr_counted++; |
251 | erasing += c->sector_size; | 255 | erasing += c->sector_size; |
252 | } | 256 | } |
253 | list_for_each_entry(jeb, &c->bad_list, list) { | 257 | list_for_each_entry(jeb, &c->bad_list, list) { |
254 | nr_counted++; | 258 | nr_counted++; |
255 | bad += c->sector_size; | 259 | bad += c->sector_size; |
256 | } | 260 | } |
257 | 261 | ||
258 | #define check(sz) \ | 262 | #define check(sz) \ |
259 | if (sz != c->sz##_size) { \ | 263 | if (sz != c->sz##_size) { \ |
260 | printk(KERN_WARNING #sz "_size mismatch counted 0x%x, c->" #sz "_size 0x%x\n", \ | 264 | printk(KERN_WARNING #sz "_size mismatch counted 0x%x, c->" #sz "_size 0x%x\n", \ |
261 | sz, c->sz##_size); \ | 265 | sz, c->sz##_size); \ |
262 | dump = 1; \ | 266 | dump = 1; \ |
263 | } | 267 | } |
264 | check(free); | 268 | check(free); |
265 | check(dirty); | 269 | check(dirty); |
266 | check(used); | 270 | check(used); |
267 | check(wasted); | 271 | check(wasted); |
268 | check(unchecked); | 272 | check(unchecked); |
269 | check(bad); | 273 | check(bad); |
270 | check(erasing); | 274 | check(erasing); |
271 | #undef check | 275 | #undef check |
272 | 276 | ||
273 | if (nr_counted != c->nr_blocks) { | 277 | if (nr_counted != c->nr_blocks) { |
274 | printk(KERN_WARNING "%s counted only 0x%x blocks of 0x%x. Where are the others?\n", | 278 | printk(KERN_WARNING "%s counted only 0x%x blocks of 0x%x. Where are the others?\n", |
275 | __func__, nr_counted, c->nr_blocks); | 279 | __func__, nr_counted, c->nr_blocks); |
276 | dump = 1; | 280 | dump = 1; |
277 | } | 281 | } |
278 | 282 | ||
279 | if (dump) { | 283 | if (dump) { |
280 | __jffs2_dbg_dump_block_lists_nolock(c); | 284 | __jffs2_dbg_dump_block_lists_nolock(c); |
281 | BUG(); | 285 | BUG(); |
282 | } | 286 | } |
283 | } | 287 | } |
284 | 288 | ||
285 | /* | 289 | /* |
286 | * Check the space accounting and node_ref list correctness for the JFFS2 erasable block 'jeb'. | 290 | * Check the space accounting and node_ref list correctness for the JFFS2 erasable block 'jeb'. |
287 | */ | 291 | */ |
288 | void | 292 | void |
289 | __jffs2_dbg_acct_paranoia_check(struct jffs2_sb_info *c, | 293 | __jffs2_dbg_acct_paranoia_check(struct jffs2_sb_info *c, |
290 | struct jffs2_eraseblock *jeb) | 294 | struct jffs2_eraseblock *jeb) |
291 | { | 295 | { |
292 | spin_lock(&c->erase_completion_lock); | 296 | spin_lock(&c->erase_completion_lock); |
293 | __jffs2_dbg_acct_paranoia_check_nolock(c, jeb); | 297 | __jffs2_dbg_acct_paranoia_check_nolock(c, jeb); |
294 | spin_unlock(&c->erase_completion_lock); | 298 | spin_unlock(&c->erase_completion_lock); |
295 | } | 299 | } |
296 | 300 | ||
297 | void | 301 | void |
298 | __jffs2_dbg_acct_paranoia_check_nolock(struct jffs2_sb_info *c, | 302 | __jffs2_dbg_acct_paranoia_check_nolock(struct jffs2_sb_info *c, |
299 | struct jffs2_eraseblock *jeb) | 303 | struct jffs2_eraseblock *jeb) |
300 | { | 304 | { |
301 | uint32_t my_used_size = 0; | 305 | uint32_t my_used_size = 0; |
302 | uint32_t my_unchecked_size = 0; | 306 | uint32_t my_unchecked_size = 0; |
303 | uint32_t my_dirty_size = 0; | 307 | uint32_t my_dirty_size = 0; |
304 | struct jffs2_raw_node_ref *ref2 = jeb->first_node; | 308 | struct jffs2_raw_node_ref *ref2 = jeb->first_node; |
305 | 309 | ||
306 | while (ref2) { | 310 | while (ref2) { |
307 | uint32_t totlen = ref_totlen(c, jeb, ref2); | 311 | uint32_t totlen = ref_totlen(c, jeb, ref2); |
308 | 312 | ||
309 | if (ref_offset(ref2) < jeb->offset || | 313 | if (ref_offset(ref2) < jeb->offset || |
310 | ref_offset(ref2) > jeb->offset + c->sector_size) { | 314 | ref_offset(ref2) > jeb->offset + c->sector_size) { |
311 | JFFS2_ERROR("node_ref %#08x shouldn't be in block at %#08x.\n", | 315 | JFFS2_ERROR("node_ref %#08x shouldn't be in block at %#08x.\n", |
312 | ref_offset(ref2), jeb->offset); | 316 | ref_offset(ref2), jeb->offset); |
313 | goto error; | 317 | goto error; |
314 | 318 | ||
315 | } | 319 | } |
316 | if (ref_flags(ref2) == REF_UNCHECKED) | 320 | if (ref_flags(ref2) == REF_UNCHECKED) |
317 | my_unchecked_size += totlen; | 321 | my_unchecked_size += totlen; |
318 | else if (!ref_obsolete(ref2)) | 322 | else if (!ref_obsolete(ref2)) |
319 | my_used_size += totlen; | 323 | my_used_size += totlen; |
320 | else | 324 | else |
321 | my_dirty_size += totlen; | 325 | my_dirty_size += totlen; |
322 | 326 | ||
323 | if ((!ref_next(ref2)) != (ref2 == jeb->last_node)) { | 327 | if ((!ref_next(ref2)) != (ref2 == jeb->last_node)) { |
324 | JFFS2_ERROR("node_ref for node at %#08x (mem %p) has next at %#08x (mem %p), last_node is at %#08x (mem %p).\n", | 328 | JFFS2_ERROR("node_ref for node at %#08x (mem %p) has next at %#08x (mem %p), last_node is at %#08x (mem %p).\n", |
325 | ref_offset(ref2), ref2, ref_offset(ref_next(ref2)), ref_next(ref2), | 329 | ref_offset(ref2), ref2, ref_offset(ref_next(ref2)), ref_next(ref2), |
326 | ref_offset(jeb->last_node), jeb->last_node); | 330 | ref_offset(jeb->last_node), jeb->last_node); |
327 | goto error; | 331 | goto error; |
328 | } | 332 | } |
329 | ref2 = ref_next(ref2); | 333 | ref2 = ref_next(ref2); |
330 | } | 334 | } |
331 | 335 | ||
332 | if (my_used_size != jeb->used_size) { | 336 | if (my_used_size != jeb->used_size) { |
333 | JFFS2_ERROR("Calculated used size %#08x != stored used size %#08x.\n", | 337 | JFFS2_ERROR("Calculated used size %#08x != stored used size %#08x.\n", |
334 | my_used_size, jeb->used_size); | 338 | my_used_size, jeb->used_size); |
335 | goto error; | 339 | goto error; |
336 | } | 340 | } |
337 | 341 | ||
338 | if (my_unchecked_size != jeb->unchecked_size) { | 342 | if (my_unchecked_size != jeb->unchecked_size) { |
339 | JFFS2_ERROR("Calculated unchecked size %#08x != stored unchecked size %#08x.\n", | 343 | JFFS2_ERROR("Calculated unchecked size %#08x != stored unchecked size %#08x.\n", |
340 | my_unchecked_size, jeb->unchecked_size); | 344 | my_unchecked_size, jeb->unchecked_size); |
341 | goto error; | 345 | goto error; |
342 | } | 346 | } |
343 | 347 | ||
344 | #if 0 | 348 | #if 0 |
345 | /* This should work when we implement ref->__totlen elemination */ | 349 | /* This should work when we implement ref->__totlen elemination */ |
346 | if (my_dirty_size != jeb->dirty_size + jeb->wasted_size) { | 350 | if (my_dirty_size != jeb->dirty_size + jeb->wasted_size) { |
347 | JFFS2_ERROR("Calculated dirty+wasted size %#08x != stored dirty + wasted size %#08x\n", | 351 | JFFS2_ERROR("Calculated dirty+wasted size %#08x != stored dirty + wasted size %#08x\n", |
348 | my_dirty_size, jeb->dirty_size + jeb->wasted_size); | 352 | my_dirty_size, jeb->dirty_size + jeb->wasted_size); |
349 | goto error; | 353 | goto error; |
350 | } | 354 | } |
351 | 355 | ||
352 | if (jeb->free_size == 0 | 356 | if (jeb->free_size == 0 |
353 | && my_used_size + my_unchecked_size + my_dirty_size != c->sector_size) { | 357 | && my_used_size + my_unchecked_size + my_dirty_size != c->sector_size) { |
354 | JFFS2_ERROR("The sum of all nodes in block (%#x) != size of block (%#x)\n", | 358 | JFFS2_ERROR("The sum of all nodes in block (%#x) != size of block (%#x)\n", |
355 | my_used_size + my_unchecked_size + my_dirty_size, | 359 | my_used_size + my_unchecked_size + my_dirty_size, |
356 | c->sector_size); | 360 | c->sector_size); |
357 | goto error; | 361 | goto error; |
358 | } | 362 | } |
359 | #endif | 363 | #endif |
360 | 364 | ||
361 | if (!(c->flags & (JFFS2_SB_FLAG_BUILDING|JFFS2_SB_FLAG_SCANNING))) | 365 | if (!(c->flags & (JFFS2_SB_FLAG_BUILDING|JFFS2_SB_FLAG_SCANNING))) |
362 | __jffs2_dbg_superblock_counts(c); | 366 | __jffs2_dbg_superblock_counts(c); |
363 | 367 | ||
364 | return; | 368 | return; |
365 | 369 | ||
366 | error: | 370 | error: |
367 | __jffs2_dbg_dump_node_refs_nolock(c, jeb); | 371 | __jffs2_dbg_dump_node_refs_nolock(c, jeb); |
368 | __jffs2_dbg_dump_jeb_nolock(jeb); | 372 | __jffs2_dbg_dump_jeb_nolock(jeb); |
369 | __jffs2_dbg_dump_block_lists_nolock(c); | 373 | __jffs2_dbg_dump_block_lists_nolock(c); |
370 | BUG(); | 374 | BUG(); |
371 | 375 | ||
372 | } | 376 | } |
373 | #endif /* JFFS2_DBG_PARANOIA_CHECKS */ | 377 | #endif /* JFFS2_DBG_PARANOIA_CHECKS */ |
374 | 378 | ||
375 | #if defined(JFFS2_DBG_DUMPS) || defined(JFFS2_DBG_PARANOIA_CHECKS) | 379 | #if defined(JFFS2_DBG_DUMPS) || defined(JFFS2_DBG_PARANOIA_CHECKS) |
376 | /* | 380 | /* |
377 | * Dump the node_refs of the 'jeb' JFFS2 eraseblock. | 381 | * Dump the node_refs of the 'jeb' JFFS2 eraseblock. |
378 | */ | 382 | */ |
379 | void | 383 | void |
380 | __jffs2_dbg_dump_node_refs(struct jffs2_sb_info *c, | 384 | __jffs2_dbg_dump_node_refs(struct jffs2_sb_info *c, |
381 | struct jffs2_eraseblock *jeb) | 385 | struct jffs2_eraseblock *jeb) |
382 | { | 386 | { |
383 | spin_lock(&c->erase_completion_lock); | 387 | spin_lock(&c->erase_completion_lock); |
384 | __jffs2_dbg_dump_node_refs_nolock(c, jeb); | 388 | __jffs2_dbg_dump_node_refs_nolock(c, jeb); |
385 | spin_unlock(&c->erase_completion_lock); | 389 | spin_unlock(&c->erase_completion_lock); |
386 | } | 390 | } |
387 | 391 | ||
388 | void | 392 | void |
389 | __jffs2_dbg_dump_node_refs_nolock(struct jffs2_sb_info *c, | 393 | __jffs2_dbg_dump_node_refs_nolock(struct jffs2_sb_info *c, |
390 | struct jffs2_eraseblock *jeb) | 394 | struct jffs2_eraseblock *jeb) |
391 | { | 395 | { |
392 | struct jffs2_raw_node_ref *ref; | 396 | struct jffs2_raw_node_ref *ref; |
393 | int i = 0; | 397 | int i = 0; |
394 | 398 | ||
395 | printk(JFFS2_DBG_MSG_PREFIX " Dump node_refs of the eraseblock %#08x\n", jeb->offset); | 399 | printk(JFFS2_DBG_MSG_PREFIX " Dump node_refs of the eraseblock %#08x\n", jeb->offset); |
396 | if (!jeb->first_node) { | 400 | if (!jeb->first_node) { |
397 | printk(JFFS2_DBG_MSG_PREFIX " no nodes in the eraseblock %#08x\n", jeb->offset); | 401 | printk(JFFS2_DBG_MSG_PREFIX " no nodes in the eraseblock %#08x\n", jeb->offset); |
398 | return; | 402 | return; |
399 | } | 403 | } |
400 | 404 | ||
401 | printk(JFFS2_DBG); | 405 | printk(JFFS2_DBG); |
402 | for (ref = jeb->first_node; ; ref = ref_next(ref)) { | 406 | for (ref = jeb->first_node; ; ref = ref_next(ref)) { |
403 | printk("%#08x", ref_offset(ref)); | 407 | printk("%#08x", ref_offset(ref)); |
404 | #ifdef TEST_TOTLEN | 408 | #ifdef TEST_TOTLEN |
405 | printk("(%x)", ref->__totlen); | 409 | printk("(%x)", ref->__totlen); |
406 | #endif | 410 | #endif |
407 | if (ref_next(ref)) | 411 | if (ref_next(ref)) |
408 | printk("->"); | 412 | printk("->"); |
409 | else | 413 | else |
410 | break; | 414 | break; |
411 | if (++i == 4) { | 415 | if (++i == 4) { |
412 | i = 0; | 416 | i = 0; |
413 | printk("\n" JFFS2_DBG); | 417 | printk("\n" JFFS2_DBG); |
414 | } | 418 | } |
415 | } | 419 | } |
416 | printk("\n"); | 420 | printk("\n"); |
417 | } | 421 | } |
418 | 422 | ||
419 | /* | 423 | /* |
420 | * Dump an eraseblock's space accounting. | 424 | * Dump an eraseblock's space accounting. |
421 | */ | 425 | */ |
422 | void | 426 | void |
423 | __jffs2_dbg_dump_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) | 427 | __jffs2_dbg_dump_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) |
424 | { | 428 | { |
425 | spin_lock(&c->erase_completion_lock); | 429 | spin_lock(&c->erase_completion_lock); |
426 | __jffs2_dbg_dump_jeb_nolock(jeb); | 430 | __jffs2_dbg_dump_jeb_nolock(jeb); |
427 | spin_unlock(&c->erase_completion_lock); | 431 | spin_unlock(&c->erase_completion_lock); |
428 | } | 432 | } |
429 | 433 | ||
430 | void | 434 | void |
431 | __jffs2_dbg_dump_jeb_nolock(struct jffs2_eraseblock *jeb) | 435 | __jffs2_dbg_dump_jeb_nolock(struct jffs2_eraseblock *jeb) |
432 | { | 436 | { |
433 | if (!jeb) | 437 | if (!jeb) |
434 | return; | 438 | return; |
435 | 439 | ||
436 | printk(JFFS2_DBG_MSG_PREFIX " dump space accounting for the eraseblock at %#08x:\n", | 440 | printk(JFFS2_DBG_MSG_PREFIX " dump space accounting for the eraseblock at %#08x:\n", |
437 | jeb->offset); | 441 | jeb->offset); |
438 | 442 | ||
439 | printk(JFFS2_DBG "used_size: %#08x\n", jeb->used_size); | 443 | printk(JFFS2_DBG "used_size: %#08x\n", jeb->used_size); |
440 | printk(JFFS2_DBG "dirty_size: %#08x\n", jeb->dirty_size); | 444 | printk(JFFS2_DBG "dirty_size: %#08x\n", jeb->dirty_size); |
441 | printk(JFFS2_DBG "wasted_size: %#08x\n", jeb->wasted_size); | 445 | printk(JFFS2_DBG "wasted_size: %#08x\n", jeb->wasted_size); |
442 | printk(JFFS2_DBG "unchecked_size: %#08x\n", jeb->unchecked_size); | 446 | printk(JFFS2_DBG "unchecked_size: %#08x\n", jeb->unchecked_size); |
443 | printk(JFFS2_DBG "free_size: %#08x\n", jeb->free_size); | 447 | printk(JFFS2_DBG "free_size: %#08x\n", jeb->free_size); |
444 | } | 448 | } |
445 | 449 | ||
446 | void | 450 | void |
447 | __jffs2_dbg_dump_block_lists(struct jffs2_sb_info *c) | 451 | __jffs2_dbg_dump_block_lists(struct jffs2_sb_info *c) |
448 | { | 452 | { |
449 | spin_lock(&c->erase_completion_lock); | 453 | spin_lock(&c->erase_completion_lock); |
450 | __jffs2_dbg_dump_block_lists_nolock(c); | 454 | __jffs2_dbg_dump_block_lists_nolock(c); |
451 | spin_unlock(&c->erase_completion_lock); | 455 | spin_unlock(&c->erase_completion_lock); |
452 | } | 456 | } |
453 | 457 | ||
454 | void | 458 | void |
455 | __jffs2_dbg_dump_block_lists_nolock(struct jffs2_sb_info *c) | 459 | __jffs2_dbg_dump_block_lists_nolock(struct jffs2_sb_info *c) |
456 | { | 460 | { |
457 | printk(JFFS2_DBG_MSG_PREFIX " dump JFFS2 blocks lists:\n"); | 461 | printk(JFFS2_DBG_MSG_PREFIX " dump JFFS2 blocks lists:\n"); |
458 | 462 | ||
459 | printk(JFFS2_DBG "flash_size: %#08x\n", c->flash_size); | 463 | printk(JFFS2_DBG "flash_size: %#08x\n", c->flash_size); |
460 | printk(JFFS2_DBG "used_size: %#08x\n", c->used_size); | 464 | printk(JFFS2_DBG "used_size: %#08x\n", c->used_size); |
461 | printk(JFFS2_DBG "dirty_size: %#08x\n", c->dirty_size); | 465 | printk(JFFS2_DBG "dirty_size: %#08x\n", c->dirty_size); |
462 | printk(JFFS2_DBG "wasted_size: %#08x\n", c->wasted_size); | 466 | printk(JFFS2_DBG "wasted_size: %#08x\n", c->wasted_size); |
463 | printk(JFFS2_DBG "unchecked_size: %#08x\n", c->unchecked_size); | 467 | printk(JFFS2_DBG "unchecked_size: %#08x\n", c->unchecked_size); |
464 | printk(JFFS2_DBG "free_size: %#08x\n", c->free_size); | 468 | printk(JFFS2_DBG "free_size: %#08x\n", c->free_size); |
465 | printk(JFFS2_DBG "erasing_size: %#08x\n", c->erasing_size); | 469 | printk(JFFS2_DBG "erasing_size: %#08x\n", c->erasing_size); |
466 | printk(JFFS2_DBG "bad_size: %#08x\n", c->bad_size); | 470 | printk(JFFS2_DBG "bad_size: %#08x\n", c->bad_size); |
467 | printk(JFFS2_DBG "sector_size: %#08x\n", c->sector_size); | 471 | printk(JFFS2_DBG "sector_size: %#08x\n", c->sector_size); |
468 | printk(JFFS2_DBG "jffs2_reserved_blocks size: %#08x\n", | 472 | printk(JFFS2_DBG "jffs2_reserved_blocks size: %#08x\n", |
469 | c->sector_size * c->resv_blocks_write); | 473 | c->sector_size * c->resv_blocks_write); |
470 | 474 | ||
471 | if (c->nextblock) | 475 | if (c->nextblock) |
472 | printk(JFFS2_DBG "nextblock: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", | 476 | printk(JFFS2_DBG "nextblock: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", |
473 | c->nextblock->offset, c->nextblock->used_size, | 477 | c->nextblock->offset, c->nextblock->used_size, |
474 | c->nextblock->dirty_size, c->nextblock->wasted_size, | 478 | c->nextblock->dirty_size, c->nextblock->wasted_size, |
475 | c->nextblock->unchecked_size, c->nextblock->free_size); | 479 | c->nextblock->unchecked_size, c->nextblock->free_size); |
476 | else | 480 | else |
477 | printk(JFFS2_DBG "nextblock: NULL\n"); | 481 | printk(JFFS2_DBG "nextblock: NULL\n"); |
478 | 482 | ||
479 | if (c->gcblock) | 483 | if (c->gcblock) |
480 | printk(JFFS2_DBG "gcblock: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", | 484 | printk(JFFS2_DBG "gcblock: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", |
481 | c->gcblock->offset, c->gcblock->used_size, c->gcblock->dirty_size, | 485 | c->gcblock->offset, c->gcblock->used_size, c->gcblock->dirty_size, |
482 | c->gcblock->wasted_size, c->gcblock->unchecked_size, c->gcblock->free_size); | 486 | c->gcblock->wasted_size, c->gcblock->unchecked_size, c->gcblock->free_size); |
483 | else | 487 | else |
484 | printk(JFFS2_DBG "gcblock: NULL\n"); | 488 | printk(JFFS2_DBG "gcblock: NULL\n"); |
485 | 489 | ||
486 | if (list_empty(&c->clean_list)) { | 490 | if (list_empty(&c->clean_list)) { |
487 | printk(JFFS2_DBG "clean_list: empty\n"); | 491 | printk(JFFS2_DBG "clean_list: empty\n"); |
488 | } else { | 492 | } else { |
489 | struct list_head *this; | 493 | struct list_head *this; |
490 | int numblocks = 0; | 494 | int numblocks = 0; |
491 | uint32_t dirty = 0; | 495 | uint32_t dirty = 0; |
492 | 496 | ||
493 | list_for_each(this, &c->clean_list) { | 497 | list_for_each(this, &c->clean_list) { |
494 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | 498 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); |
495 | numblocks ++; | 499 | numblocks ++; |
496 | dirty += jeb->wasted_size; | 500 | dirty += jeb->wasted_size; |
497 | if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { | 501 | if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { |
498 | printk(JFFS2_DBG "clean_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", | 502 | printk(JFFS2_DBG "clean_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", |
499 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, | 503 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, |
500 | jeb->unchecked_size, jeb->free_size); | 504 | jeb->unchecked_size, jeb->free_size); |
501 | } | 505 | } |
502 | } | 506 | } |
503 | 507 | ||
504 | printk (JFFS2_DBG "Contains %d blocks with total wasted size %u, average wasted size: %u\n", | 508 | printk (JFFS2_DBG "Contains %d blocks with total wasted size %u, average wasted size: %u\n", |
505 | numblocks, dirty, dirty / numblocks); | 509 | numblocks, dirty, dirty / numblocks); |
506 | } | 510 | } |
507 | 511 | ||
508 | if (list_empty(&c->very_dirty_list)) { | 512 | if (list_empty(&c->very_dirty_list)) { |
509 | printk(JFFS2_DBG "very_dirty_list: empty\n"); | 513 | printk(JFFS2_DBG "very_dirty_list: empty\n"); |
510 | } else { | 514 | } else { |
511 | struct list_head *this; | 515 | struct list_head *this; |
512 | int numblocks = 0; | 516 | int numblocks = 0; |
513 | uint32_t dirty = 0; | 517 | uint32_t dirty = 0; |
514 | 518 | ||
515 | list_for_each(this, &c->very_dirty_list) { | 519 | list_for_each(this, &c->very_dirty_list) { |
516 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | 520 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); |
517 | 521 | ||
518 | numblocks ++; | 522 | numblocks ++; |
519 | dirty += jeb->dirty_size; | 523 | dirty += jeb->dirty_size; |
520 | if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { | 524 | if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { |
521 | printk(JFFS2_DBG "very_dirty_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", | 525 | printk(JFFS2_DBG "very_dirty_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", |
522 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, | 526 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, |
523 | jeb->unchecked_size, jeb->free_size); | 527 | jeb->unchecked_size, jeb->free_size); |
524 | } | 528 | } |
525 | } | 529 | } |
526 | 530 | ||
527 | printk (JFFS2_DBG "Contains %d blocks with total dirty size %u, average dirty size: %u\n", | 531 | printk (JFFS2_DBG "Contains %d blocks with total dirty size %u, average dirty size: %u\n", |
528 | numblocks, dirty, dirty / numblocks); | 532 | numblocks, dirty, dirty / numblocks); |
529 | } | 533 | } |
530 | 534 | ||
531 | if (list_empty(&c->dirty_list)) { | 535 | if (list_empty(&c->dirty_list)) { |
532 | printk(JFFS2_DBG "dirty_list: empty\n"); | 536 | printk(JFFS2_DBG "dirty_list: empty\n"); |
533 | } else { | 537 | } else { |
534 | struct list_head *this; | 538 | struct list_head *this; |
535 | int numblocks = 0; | 539 | int numblocks = 0; |
536 | uint32_t dirty = 0; | 540 | uint32_t dirty = 0; |
537 | 541 | ||
538 | list_for_each(this, &c->dirty_list) { | 542 | list_for_each(this, &c->dirty_list) { |
539 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | 543 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); |
540 | 544 | ||
541 | numblocks ++; | 545 | numblocks ++; |
542 | dirty += jeb->dirty_size; | 546 | dirty += jeb->dirty_size; |
543 | if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { | 547 | if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { |
544 | printk(JFFS2_DBG "dirty_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", | 548 | printk(JFFS2_DBG "dirty_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", |
545 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, | 549 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, |
546 | jeb->unchecked_size, jeb->free_size); | 550 | jeb->unchecked_size, jeb->free_size); |
547 | } | 551 | } |
548 | } | 552 | } |
549 | 553 | ||
550 | printk (JFFS2_DBG "contains %d blocks with total dirty size %u, average dirty size: %u\n", | 554 | printk (JFFS2_DBG "contains %d blocks with total dirty size %u, average dirty size: %u\n", |
551 | numblocks, dirty, dirty / numblocks); | 555 | numblocks, dirty, dirty / numblocks); |
552 | } | 556 | } |
553 | 557 | ||
554 | if (list_empty(&c->erasable_list)) { | 558 | if (list_empty(&c->erasable_list)) { |
555 | printk(JFFS2_DBG "erasable_list: empty\n"); | 559 | printk(JFFS2_DBG "erasable_list: empty\n"); |
556 | } else { | 560 | } else { |
557 | struct list_head *this; | 561 | struct list_head *this; |
558 | 562 | ||
559 | list_for_each(this, &c->erasable_list) { | 563 | list_for_each(this, &c->erasable_list) { |
560 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | 564 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); |
561 | 565 | ||
562 | if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { | 566 | if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { |
563 | printk(JFFS2_DBG "erasable_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", | 567 | printk(JFFS2_DBG "erasable_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", |
564 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, | 568 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, |
565 | jeb->unchecked_size, jeb->free_size); | 569 | jeb->unchecked_size, jeb->free_size); |
566 | } | 570 | } |
567 | } | 571 | } |
568 | } | 572 | } |
569 | 573 | ||
570 | if (list_empty(&c->erasing_list)) { | 574 | if (list_empty(&c->erasing_list)) { |
571 | printk(JFFS2_DBG "erasing_list: empty\n"); | 575 | printk(JFFS2_DBG "erasing_list: empty\n"); |
572 | } else { | 576 | } else { |
573 | struct list_head *this; | 577 | struct list_head *this; |
574 | 578 | ||
575 | list_for_each(this, &c->erasing_list) { | 579 | list_for_each(this, &c->erasing_list) { |
576 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | 580 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); |
577 | 581 | ||
578 | if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { | 582 | if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { |
579 | printk(JFFS2_DBG "erasing_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", | 583 | printk(JFFS2_DBG "erasing_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", |
584 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, | ||
585 | jeb->unchecked_size, jeb->free_size); | ||
586 | } | ||
587 | } | ||
588 | } | ||
589 | if (list_empty(&c->erase_checking_list)) { | ||
590 | printk(JFFS2_DBG "erase_checking_list: empty\n"); | ||
591 | } else { | ||
592 | struct list_head *this; | ||
593 | |||
594 | list_for_each(this, &c->erase_checking_list) { | ||
595 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
596 | |||
597 | if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { | ||
598 | printk(JFFS2_DBG "erase_checking_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", | ||
580 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, | 599 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, |
581 | jeb->unchecked_size, jeb->free_size); | 600 | jeb->unchecked_size, jeb->free_size); |
582 | } | 601 | } |
583 | } | 602 | } |
584 | } | 603 | } |
585 | 604 | ||
586 | if (list_empty(&c->erase_pending_list)) { | 605 | if (list_empty(&c->erase_pending_list)) { |
587 | printk(JFFS2_DBG "erase_pending_list: empty\n"); | 606 | printk(JFFS2_DBG "erase_pending_list: empty\n"); |
588 | } else { | 607 | } else { |
589 | struct list_head *this; | 608 | struct list_head *this; |
590 | 609 | ||
591 | list_for_each(this, &c->erase_pending_list) { | 610 | list_for_each(this, &c->erase_pending_list) { |
592 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | 611 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); |
593 | 612 | ||
594 | if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { | 613 | if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { |
595 | printk(JFFS2_DBG "erase_pending_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", | 614 | printk(JFFS2_DBG "erase_pending_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", |
596 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, | 615 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, |
597 | jeb->unchecked_size, jeb->free_size); | 616 | jeb->unchecked_size, jeb->free_size); |
598 | } | 617 | } |
599 | } | 618 | } |
600 | } | 619 | } |
601 | 620 | ||
602 | if (list_empty(&c->erasable_pending_wbuf_list)) { | 621 | if (list_empty(&c->erasable_pending_wbuf_list)) { |
603 | printk(JFFS2_DBG "erasable_pending_wbuf_list: empty\n"); | 622 | printk(JFFS2_DBG "erasable_pending_wbuf_list: empty\n"); |
604 | } else { | 623 | } else { |
605 | struct list_head *this; | 624 | struct list_head *this; |
606 | 625 | ||
607 | list_for_each(this, &c->erasable_pending_wbuf_list) { | 626 | list_for_each(this, &c->erasable_pending_wbuf_list) { |
608 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | 627 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); |
609 | 628 | ||
610 | if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { | 629 | if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { |
611 | printk(JFFS2_DBG "erasable_pending_wbuf_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", | 630 | printk(JFFS2_DBG "erasable_pending_wbuf_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", |
612 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, | 631 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, |
613 | jeb->unchecked_size, jeb->free_size); | 632 | jeb->unchecked_size, jeb->free_size); |
614 | } | 633 | } |
615 | } | 634 | } |
616 | } | 635 | } |
617 | 636 | ||
618 | if (list_empty(&c->free_list)) { | 637 | if (list_empty(&c->free_list)) { |
619 | printk(JFFS2_DBG "free_list: empty\n"); | 638 | printk(JFFS2_DBG "free_list: empty\n"); |
620 | } else { | 639 | } else { |
621 | struct list_head *this; | 640 | struct list_head *this; |
622 | 641 | ||
623 | list_for_each(this, &c->free_list) { | 642 | list_for_each(this, &c->free_list) { |
624 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | 643 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); |
625 | 644 | ||
626 | if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { | 645 | if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { |
627 | printk(JFFS2_DBG "free_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", | 646 | printk(JFFS2_DBG "free_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", |
628 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, | 647 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, |
629 | jeb->unchecked_size, jeb->free_size); | 648 | jeb->unchecked_size, jeb->free_size); |
630 | } | 649 | } |
631 | } | 650 | } |
632 | } | 651 | } |
633 | 652 | ||
634 | if (list_empty(&c->bad_list)) { | 653 | if (list_empty(&c->bad_list)) { |
635 | printk(JFFS2_DBG "bad_list: empty\n"); | 654 | printk(JFFS2_DBG "bad_list: empty\n"); |
636 | } else { | 655 | } else { |
637 | struct list_head *this; | 656 | struct list_head *this; |
638 | 657 | ||
639 | list_for_each(this, &c->bad_list) { | 658 | list_for_each(this, &c->bad_list) { |
640 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | 659 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); |
641 | 660 | ||
642 | if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { | 661 | if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { |
643 | printk(JFFS2_DBG "bad_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", | 662 | printk(JFFS2_DBG "bad_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", |
644 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, | 663 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, |
645 | jeb->unchecked_size, jeb->free_size); | 664 | jeb->unchecked_size, jeb->free_size); |
646 | } | 665 | } |
647 | } | 666 | } |
648 | } | 667 | } |
649 | 668 | ||
650 | if (list_empty(&c->bad_used_list)) { | 669 | if (list_empty(&c->bad_used_list)) { |
651 | printk(JFFS2_DBG "bad_used_list: empty\n"); | 670 | printk(JFFS2_DBG "bad_used_list: empty\n"); |
652 | } else { | 671 | } else { |
653 | struct list_head *this; | 672 | struct list_head *this; |
654 | 673 | ||
655 | list_for_each(this, &c->bad_used_list) { | 674 | list_for_each(this, &c->bad_used_list) { |
656 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | 675 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); |
657 | 676 | ||
658 | if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { | 677 | if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { |
659 | printk(JFFS2_DBG "bad_used_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", | 678 | printk(JFFS2_DBG "bad_used_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", |
660 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, | 679 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, |
661 | jeb->unchecked_size, jeb->free_size); | 680 | jeb->unchecked_size, jeb->free_size); |
662 | } | 681 | } |
663 | } | 682 | } |
664 | } | 683 | } |
665 | } | 684 | } |
666 | 685 | ||
667 | void | 686 | void |
668 | __jffs2_dbg_dump_fragtree(struct jffs2_inode_info *f) | 687 | __jffs2_dbg_dump_fragtree(struct jffs2_inode_info *f) |
669 | { | 688 | { |
670 | mutex_lock(&f->sem); | 689 | mutex_lock(&f->sem); |
671 | jffs2_dbg_dump_fragtree_nolock(f); | 690 | jffs2_dbg_dump_fragtree_nolock(f); |
672 | mutex_unlock(&f->sem); | 691 | mutex_unlock(&f->sem); |
673 | } | 692 | } |
674 | 693 | ||
675 | void | 694 | void |
676 | __jffs2_dbg_dump_fragtree_nolock(struct jffs2_inode_info *f) | 695 | __jffs2_dbg_dump_fragtree_nolock(struct jffs2_inode_info *f) |
677 | { | 696 | { |
678 | struct jffs2_node_frag *this = frag_first(&f->fragtree); | 697 | struct jffs2_node_frag *this = frag_first(&f->fragtree); |
679 | uint32_t lastofs = 0; | 698 | uint32_t lastofs = 0; |
680 | int buggy = 0; | 699 | int buggy = 0; |
681 | 700 | ||
682 | printk(JFFS2_DBG_MSG_PREFIX " dump fragtree of ino #%u\n", f->inocache->ino); | 701 | printk(JFFS2_DBG_MSG_PREFIX " dump fragtree of ino #%u\n", f->inocache->ino); |
683 | while(this) { | 702 | while(this) { |
684 | if (this->node) | 703 | if (this->node) |
685 | printk(JFFS2_DBG "frag %#04x-%#04x: %#08x(%d) on flash (*%p), left (%p), right (%p), parent (%p)\n", | 704 | printk(JFFS2_DBG "frag %#04x-%#04x: %#08x(%d) on flash (*%p), left (%p), right (%p), parent (%p)\n", |
686 | this->ofs, this->ofs+this->size, ref_offset(this->node->raw), | 705 | this->ofs, this->ofs+this->size, ref_offset(this->node->raw), |
687 | ref_flags(this->node->raw), this, frag_left(this), frag_right(this), | 706 | ref_flags(this->node->raw), this, frag_left(this), frag_right(this), |
688 | frag_parent(this)); | 707 | frag_parent(this)); |
689 | else | 708 | else |
690 | printk(JFFS2_DBG "frag %#04x-%#04x: hole (*%p). left (%p), right (%p), parent (%p)\n", | 709 | printk(JFFS2_DBG "frag %#04x-%#04x: hole (*%p). left (%p), right (%p), parent (%p)\n", |
691 | this->ofs, this->ofs+this->size, this, frag_left(this), | 710 | this->ofs, this->ofs+this->size, this, frag_left(this), |
692 | frag_right(this), frag_parent(this)); | 711 | frag_right(this), frag_parent(this)); |
693 | if (this->ofs != lastofs) | 712 | if (this->ofs != lastofs) |
694 | buggy = 1; | 713 | buggy = 1; |
695 | lastofs = this->ofs + this->size; | 714 | lastofs = this->ofs + this->size; |
696 | this = frag_next(this); | 715 | this = frag_next(this); |
697 | } | 716 | } |
698 | 717 | ||
699 | if (f->metadata) | 718 | if (f->metadata) |
700 | printk(JFFS2_DBG "metadata at 0x%08x\n", ref_offset(f->metadata->raw)); | 719 | printk(JFFS2_DBG "metadata at 0x%08x\n", ref_offset(f->metadata->raw)); |
701 | 720 | ||
702 | if (buggy) { | 721 | if (buggy) { |
703 | JFFS2_ERROR("frag tree got a hole in it.\n"); | 722 | JFFS2_ERROR("frag tree got a hole in it.\n"); |
704 | BUG(); | 723 | BUG(); |
705 | } | 724 | } |
706 | } | 725 | } |
707 | 726 | ||
708 | #define JFFS2_BUFDUMP_BYTES_PER_LINE 32 | 727 | #define JFFS2_BUFDUMP_BYTES_PER_LINE 32 |
709 | void | 728 | void |
710 | __jffs2_dbg_dump_buffer(unsigned char *buf, int len, uint32_t offs) | 729 | __jffs2_dbg_dump_buffer(unsigned char *buf, int len, uint32_t offs) |
711 | { | 730 | { |
712 | int skip; | 731 | int skip; |
713 | int i; | 732 | int i; |
714 | 733 | ||
715 | printk(JFFS2_DBG_MSG_PREFIX " dump from offset %#08x to offset %#08x (%x bytes).\n", | 734 | printk(JFFS2_DBG_MSG_PREFIX " dump from offset %#08x to offset %#08x (%x bytes).\n", |
716 | offs, offs + len, len); | 735 | offs, offs + len, len); |
717 | i = skip = offs % JFFS2_BUFDUMP_BYTES_PER_LINE; | 736 | i = skip = offs % JFFS2_BUFDUMP_BYTES_PER_LINE; |
718 | offs = offs & ~(JFFS2_BUFDUMP_BYTES_PER_LINE - 1); | 737 | offs = offs & ~(JFFS2_BUFDUMP_BYTES_PER_LINE - 1); |
719 | 738 | ||
720 | if (skip != 0) | 739 | if (skip != 0) |
721 | printk(JFFS2_DBG "%#08x: ", offs); | 740 | printk(JFFS2_DBG "%#08x: ", offs); |
722 | 741 | ||
723 | while (skip--) | 742 | while (skip--) |
724 | printk(" "); | 743 | printk(" "); |
725 | 744 | ||
726 | while (i < len) { | 745 | while (i < len) { |
727 | if ((i % JFFS2_BUFDUMP_BYTES_PER_LINE) == 0 && i != len -1) { | 746 | if ((i % JFFS2_BUFDUMP_BYTES_PER_LINE) == 0 && i != len -1) { |
728 | if (i != 0) | 747 | if (i != 0) |
729 | printk("\n"); | 748 | printk("\n"); |
730 | offs += JFFS2_BUFDUMP_BYTES_PER_LINE; | 749 | offs += JFFS2_BUFDUMP_BYTES_PER_LINE; |
731 | printk(JFFS2_DBG "%0#8x: ", offs); | 750 | printk(JFFS2_DBG "%0#8x: ", offs); |
732 | } | 751 | } |
733 | 752 | ||
734 | printk("%02x ", buf[i]); | 753 | printk("%02x ", buf[i]); |
735 | 754 | ||
736 | i += 1; | 755 | i += 1; |
737 | } | 756 | } |
738 | 757 | ||
739 | printk("\n"); | 758 | printk("\n"); |
740 | } | 759 | } |
741 | 760 | ||
742 | /* | 761 | /* |
743 | * Dump a JFFS2 node. | 762 | * Dump a JFFS2 node. |
744 | */ | 763 | */ |
745 | void | 764 | void |
746 | __jffs2_dbg_dump_node(struct jffs2_sb_info *c, uint32_t ofs) | 765 | __jffs2_dbg_dump_node(struct jffs2_sb_info *c, uint32_t ofs) |
747 | { | 766 | { |
748 | union jffs2_node_union node; | 767 | union jffs2_node_union node; |
749 | int len = sizeof(union jffs2_node_union); | 768 | int len = sizeof(union jffs2_node_union); |
750 | size_t retlen; | 769 | size_t retlen; |
751 | uint32_t crc; | 770 | uint32_t crc; |
752 | int ret; | 771 | int ret; |
753 | 772 | ||
754 | printk(JFFS2_DBG_MSG_PREFIX " dump node at offset %#08x.\n", ofs); | 773 | printk(JFFS2_DBG_MSG_PREFIX " dump node at offset %#08x.\n", ofs); |
755 | 774 | ||
756 | ret = jffs2_flash_read(c, ofs, len, &retlen, (unsigned char *)&node); | 775 | ret = jffs2_flash_read(c, ofs, len, &retlen, (unsigned char *)&node); |
757 | if (ret || (retlen != len)) { | 776 | if (ret || (retlen != len)) { |
758 | JFFS2_ERROR("read %d bytes failed or short. ret %d, retlen %zd.\n", | 777 | JFFS2_ERROR("read %d bytes failed or short. ret %d, retlen %zd.\n", |
759 | len, ret, retlen); | 778 | len, ret, retlen); |
760 | return; | 779 | return; |
761 | } | 780 | } |
762 | 781 | ||
763 | printk(JFFS2_DBG "magic:\t%#04x\n", je16_to_cpu(node.u.magic)); | 782 | printk(JFFS2_DBG "magic:\t%#04x\n", je16_to_cpu(node.u.magic)); |
764 | printk(JFFS2_DBG "nodetype:\t%#04x\n", je16_to_cpu(node.u.nodetype)); | 783 | printk(JFFS2_DBG "nodetype:\t%#04x\n", je16_to_cpu(node.u.nodetype)); |
765 | printk(JFFS2_DBG "totlen:\t%#08x\n", je32_to_cpu(node.u.totlen)); | 784 | printk(JFFS2_DBG "totlen:\t%#08x\n", je32_to_cpu(node.u.totlen)); |
766 | printk(JFFS2_DBG "hdr_crc:\t%#08x\n", je32_to_cpu(node.u.hdr_crc)); | 785 | printk(JFFS2_DBG "hdr_crc:\t%#08x\n", je32_to_cpu(node.u.hdr_crc)); |
767 | 786 | ||
768 | crc = crc32(0, &node.u, sizeof(node.u) - 4); | 787 | crc = crc32(0, &node.u, sizeof(node.u) - 4); |
769 | if (crc != je32_to_cpu(node.u.hdr_crc)) { | 788 | if (crc != je32_to_cpu(node.u.hdr_crc)) { |
770 | JFFS2_ERROR("wrong common header CRC.\n"); | 789 | JFFS2_ERROR("wrong common header CRC.\n"); |
771 | return; | 790 | return; |
772 | } | 791 | } |
773 | 792 | ||
774 | if (je16_to_cpu(node.u.magic) != JFFS2_MAGIC_BITMASK && | 793 | if (je16_to_cpu(node.u.magic) != JFFS2_MAGIC_BITMASK && |
775 | je16_to_cpu(node.u.magic) != JFFS2_OLD_MAGIC_BITMASK) | 794 | je16_to_cpu(node.u.magic) != JFFS2_OLD_MAGIC_BITMASK) |
776 | { | 795 | { |
777 | JFFS2_ERROR("wrong node magic: %#04x instead of %#04x.\n", | 796 | JFFS2_ERROR("wrong node magic: %#04x instead of %#04x.\n", |
778 | je16_to_cpu(node.u.magic), JFFS2_MAGIC_BITMASK); | 797 | je16_to_cpu(node.u.magic), JFFS2_MAGIC_BITMASK); |
779 | return; | 798 | return; |
780 | } | 799 | } |
781 | 800 | ||
782 | switch(je16_to_cpu(node.u.nodetype)) { | 801 | switch(je16_to_cpu(node.u.nodetype)) { |
783 | 802 | ||
784 | case JFFS2_NODETYPE_INODE: | 803 | case JFFS2_NODETYPE_INODE: |
785 | 804 | ||
786 | printk(JFFS2_DBG "the node is inode node\n"); | 805 | printk(JFFS2_DBG "the node is inode node\n"); |
787 | printk(JFFS2_DBG "ino:\t%#08x\n", je32_to_cpu(node.i.ino)); | 806 | printk(JFFS2_DBG "ino:\t%#08x\n", je32_to_cpu(node.i.ino)); |
788 | printk(JFFS2_DBG "version:\t%#08x\n", je32_to_cpu(node.i.version)); | 807 | printk(JFFS2_DBG "version:\t%#08x\n", je32_to_cpu(node.i.version)); |
789 | printk(JFFS2_DBG "mode:\t%#08x\n", node.i.mode.m); | 808 | printk(JFFS2_DBG "mode:\t%#08x\n", node.i.mode.m); |
790 | printk(JFFS2_DBG "uid:\t%#04x\n", je16_to_cpu(node.i.uid)); | 809 | printk(JFFS2_DBG "uid:\t%#04x\n", je16_to_cpu(node.i.uid)); |
791 | printk(JFFS2_DBG "gid:\t%#04x\n", je16_to_cpu(node.i.gid)); | 810 | printk(JFFS2_DBG "gid:\t%#04x\n", je16_to_cpu(node.i.gid)); |
792 | printk(JFFS2_DBG "isize:\t%#08x\n", je32_to_cpu(node.i.isize)); | 811 | printk(JFFS2_DBG "isize:\t%#08x\n", je32_to_cpu(node.i.isize)); |
793 | printk(JFFS2_DBG "atime:\t%#08x\n", je32_to_cpu(node.i.atime)); | 812 | printk(JFFS2_DBG "atime:\t%#08x\n", je32_to_cpu(node.i.atime)); |
794 | printk(JFFS2_DBG "mtime:\t%#08x\n", je32_to_cpu(node.i.mtime)); | 813 | printk(JFFS2_DBG "mtime:\t%#08x\n", je32_to_cpu(node.i.mtime)); |
795 | printk(JFFS2_DBG "ctime:\t%#08x\n", je32_to_cpu(node.i.ctime)); | 814 | printk(JFFS2_DBG "ctime:\t%#08x\n", je32_to_cpu(node.i.ctime)); |
796 | printk(JFFS2_DBG "offset:\t%#08x\n", je32_to_cpu(node.i.offset)); | 815 | printk(JFFS2_DBG "offset:\t%#08x\n", je32_to_cpu(node.i.offset)); |
797 | printk(JFFS2_DBG "csize:\t%#08x\n", je32_to_cpu(node.i.csize)); | 816 | printk(JFFS2_DBG "csize:\t%#08x\n", je32_to_cpu(node.i.csize)); |
798 | printk(JFFS2_DBG "dsize:\t%#08x\n", je32_to_cpu(node.i.dsize)); | 817 | printk(JFFS2_DBG "dsize:\t%#08x\n", je32_to_cpu(node.i.dsize)); |
799 | printk(JFFS2_DBG "compr:\t%#02x\n", node.i.compr); | 818 | printk(JFFS2_DBG "compr:\t%#02x\n", node.i.compr); |
800 | printk(JFFS2_DBG "usercompr:\t%#02x\n", node.i.usercompr); | 819 | printk(JFFS2_DBG "usercompr:\t%#02x\n", node.i.usercompr); |
801 | printk(JFFS2_DBG "flags:\t%#04x\n", je16_to_cpu(node.i.flags)); | 820 | printk(JFFS2_DBG "flags:\t%#04x\n", je16_to_cpu(node.i.flags)); |
802 | printk(JFFS2_DBG "data_crc:\t%#08x\n", je32_to_cpu(node.i.data_crc)); | 821 | printk(JFFS2_DBG "data_crc:\t%#08x\n", je32_to_cpu(node.i.data_crc)); |
803 | printk(JFFS2_DBG "node_crc:\t%#08x\n", je32_to_cpu(node.i.node_crc)); | 822 | printk(JFFS2_DBG "node_crc:\t%#08x\n", je32_to_cpu(node.i.node_crc)); |
804 | 823 | ||
805 | crc = crc32(0, &node.i, sizeof(node.i) - 8); | 824 | crc = crc32(0, &node.i, sizeof(node.i) - 8); |
806 | if (crc != je32_to_cpu(node.i.node_crc)) { | 825 | if (crc != je32_to_cpu(node.i.node_crc)) { |
807 | JFFS2_ERROR("wrong node header CRC.\n"); | 826 | JFFS2_ERROR("wrong node header CRC.\n"); |
808 | return; | 827 | return; |
809 | } | 828 | } |
810 | break; | 829 | break; |
811 | 830 | ||
812 | case JFFS2_NODETYPE_DIRENT: | 831 | case JFFS2_NODETYPE_DIRENT: |
813 | 832 | ||
814 | printk(JFFS2_DBG "the node is dirent node\n"); | 833 | printk(JFFS2_DBG "the node is dirent node\n"); |
815 | printk(JFFS2_DBG "pino:\t%#08x\n", je32_to_cpu(node.d.pino)); | 834 | printk(JFFS2_DBG "pino:\t%#08x\n", je32_to_cpu(node.d.pino)); |
816 | printk(JFFS2_DBG "version:\t%#08x\n", je32_to_cpu(node.d.version)); | 835 | printk(JFFS2_DBG "version:\t%#08x\n", je32_to_cpu(node.d.version)); |
817 | printk(JFFS2_DBG "ino:\t%#08x\n", je32_to_cpu(node.d.ino)); | 836 | printk(JFFS2_DBG "ino:\t%#08x\n", je32_to_cpu(node.d.ino)); |
818 | printk(JFFS2_DBG "mctime:\t%#08x\n", je32_to_cpu(node.d.mctime)); | 837 | printk(JFFS2_DBG "mctime:\t%#08x\n", je32_to_cpu(node.d.mctime)); |
819 | printk(JFFS2_DBG "nsize:\t%#02x\n", node.d.nsize); | 838 | printk(JFFS2_DBG "nsize:\t%#02x\n", node.d.nsize); |
820 | printk(JFFS2_DBG "type:\t%#02x\n", node.d.type); | 839 | printk(JFFS2_DBG "type:\t%#02x\n", node.d.type); |
821 | printk(JFFS2_DBG "node_crc:\t%#08x\n", je32_to_cpu(node.d.node_crc)); | 840 | printk(JFFS2_DBG "node_crc:\t%#08x\n", je32_to_cpu(node.d.node_crc)); |
822 | printk(JFFS2_DBG "name_crc:\t%#08x\n", je32_to_cpu(node.d.name_crc)); | 841 | printk(JFFS2_DBG "name_crc:\t%#08x\n", je32_to_cpu(node.d.name_crc)); |
823 | 842 | ||
824 | node.d.name[node.d.nsize] = '\0'; | 843 | node.d.name[node.d.nsize] = '\0'; |
825 | printk(JFFS2_DBG "name:\t\"%s\"\n", node.d.name); | 844 | printk(JFFS2_DBG "name:\t\"%s\"\n", node.d.name); |
826 | 845 | ||
827 | crc = crc32(0, &node.d, sizeof(node.d) - 8); | 846 | crc = crc32(0, &node.d, sizeof(node.d) - 8); |
828 | if (crc != je32_to_cpu(node.d.node_crc)) { | 847 | if (crc != je32_to_cpu(node.d.node_crc)) { |
829 | JFFS2_ERROR("wrong node header CRC.\n"); | 848 | JFFS2_ERROR("wrong node header CRC.\n"); |
830 | return; | 849 | return; |
831 | } | 850 | } |
832 | break; | 851 | break; |
833 | 852 | ||
834 | default: | 853 | default: |
835 | printk(JFFS2_DBG "node type is unknown\n"); | 854 | printk(JFFS2_DBG "node type is unknown\n"); |
836 | break; | 855 | break; |
837 | } | 856 | } |
838 | } | 857 | } |
839 | #endif /* JFFS2_DBG_DUMPS || JFFS2_DBG_PARANOIA_CHECKS */ | 858 | #endif /* JFFS2_DBG_DUMPS || JFFS2_DBG_PARANOIA_CHECKS */ |
840 | 859 |
fs/jffs2/erase.c
1 | /* | 1 | /* |
2 | * JFFS2 -- Journalling Flash File System, Version 2. | 2 | * JFFS2 -- Journalling Flash File System, Version 2. |
3 | * | 3 | * |
4 | * Copyright © 2001-2007 Red Hat, Inc. | 4 | * Copyright © 2001-2007 Red Hat, Inc. |
5 | * | 5 | * |
6 | * Created by David Woodhouse <dwmw2@infradead.org> | 6 | * Created by David Woodhouse <dwmw2@infradead.org> |
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
13 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
14 | #include <linux/mtd/mtd.h> | 14 | #include <linux/mtd/mtd.h> |
15 | #include <linux/compiler.h> | 15 | #include <linux/compiler.h> |
16 | #include <linux/crc32.h> | 16 | #include <linux/crc32.h> |
17 | #include <linux/sched.h> | 17 | #include <linux/sched.h> |
18 | #include <linux/pagemap.h> | 18 | #include <linux/pagemap.h> |
19 | #include "nodelist.h" | 19 | #include "nodelist.h" |
20 | 20 | ||
21 | struct erase_priv_struct { | 21 | struct erase_priv_struct { |
22 | struct jffs2_eraseblock *jeb; | 22 | struct jffs2_eraseblock *jeb; |
23 | struct jffs2_sb_info *c; | 23 | struct jffs2_sb_info *c; |
24 | }; | 24 | }; |
25 | 25 | ||
26 | #ifndef __ECOS | 26 | #ifndef __ECOS |
27 | static void jffs2_erase_callback(struct erase_info *); | 27 | static void jffs2_erase_callback(struct erase_info *); |
28 | #endif | 28 | #endif |
29 | static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset); | 29 | static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset); |
30 | static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); | 30 | static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); |
31 | static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); | 31 | static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); |
32 | 32 | ||
33 | static void jffs2_erase_block(struct jffs2_sb_info *c, | 33 | static void jffs2_erase_block(struct jffs2_sb_info *c, |
34 | struct jffs2_eraseblock *jeb) | 34 | struct jffs2_eraseblock *jeb) |
35 | { | 35 | { |
36 | int ret; | 36 | int ret; |
37 | uint32_t bad_offset; | 37 | uint32_t bad_offset; |
38 | #ifdef __ECOS | 38 | #ifdef __ECOS |
39 | ret = jffs2_flash_erase(c, jeb); | 39 | ret = jffs2_flash_erase(c, jeb); |
40 | if (!ret) { | 40 | if (!ret) { |
41 | jffs2_erase_succeeded(c, jeb); | 41 | jffs2_erase_succeeded(c, jeb); |
42 | return; | 42 | return; |
43 | } | 43 | } |
44 | bad_offset = jeb->offset; | 44 | bad_offset = jeb->offset; |
45 | #else /* Linux */ | 45 | #else /* Linux */ |
46 | struct erase_info *instr; | 46 | struct erase_info *instr; |
47 | 47 | ||
48 | D1(printk(KERN_DEBUG "jffs2_erase_block(): erase block %#08x (range %#08x-%#08x)\n", | 48 | D1(printk(KERN_DEBUG "jffs2_erase_block(): erase block %#08x (range %#08x-%#08x)\n", |
49 | jeb->offset, jeb->offset, jeb->offset + c->sector_size)); | 49 | jeb->offset, jeb->offset, jeb->offset + c->sector_size)); |
50 | instr = kmalloc(sizeof(struct erase_info) + sizeof(struct erase_priv_struct), GFP_KERNEL); | 50 | instr = kmalloc(sizeof(struct erase_info) + sizeof(struct erase_priv_struct), GFP_KERNEL); |
51 | if (!instr) { | 51 | if (!instr) { |
52 | printk(KERN_WARNING "kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n"); | 52 | printk(KERN_WARNING "kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n"); |
53 | mutex_lock(&c->erase_free_sem); | 53 | mutex_lock(&c->erase_free_sem); |
54 | spin_lock(&c->erase_completion_lock); | 54 | spin_lock(&c->erase_completion_lock); |
55 | list_move(&jeb->list, &c->erase_pending_list); | 55 | list_move(&jeb->list, &c->erase_pending_list); |
56 | c->erasing_size -= c->sector_size; | 56 | c->erasing_size -= c->sector_size; |
57 | c->dirty_size += c->sector_size; | 57 | c->dirty_size += c->sector_size; |
58 | jeb->dirty_size = c->sector_size; | 58 | jeb->dirty_size = c->sector_size; |
59 | spin_unlock(&c->erase_completion_lock); | 59 | spin_unlock(&c->erase_completion_lock); |
60 | mutex_unlock(&c->erase_free_sem); | 60 | mutex_unlock(&c->erase_free_sem); |
61 | return; | 61 | return; |
62 | } | 62 | } |
63 | 63 | ||
64 | memset(instr, 0, sizeof(*instr)); | 64 | memset(instr, 0, sizeof(*instr)); |
65 | 65 | ||
66 | instr->mtd = c->mtd; | 66 | instr->mtd = c->mtd; |
67 | instr->addr = jeb->offset; | 67 | instr->addr = jeb->offset; |
68 | instr->len = c->sector_size; | 68 | instr->len = c->sector_size; |
69 | instr->callback = jffs2_erase_callback; | 69 | instr->callback = jffs2_erase_callback; |
70 | instr->priv = (unsigned long)(&instr[1]); | 70 | instr->priv = (unsigned long)(&instr[1]); |
71 | instr->fail_addr = 0xffffffff; | 71 | instr->fail_addr = 0xffffffff; |
72 | 72 | ||
73 | ((struct erase_priv_struct *)instr->priv)->jeb = jeb; | 73 | ((struct erase_priv_struct *)instr->priv)->jeb = jeb; |
74 | ((struct erase_priv_struct *)instr->priv)->c = c; | 74 | ((struct erase_priv_struct *)instr->priv)->c = c; |
75 | 75 | ||
76 | ret = c->mtd->erase(c->mtd, instr); | 76 | ret = c->mtd->erase(c->mtd, instr); |
77 | if (!ret) | 77 | if (!ret) |
78 | return; | 78 | return; |
79 | 79 | ||
80 | bad_offset = instr->fail_addr; | 80 | bad_offset = instr->fail_addr; |
81 | kfree(instr); | 81 | kfree(instr); |
82 | #endif /* __ECOS */ | 82 | #endif /* __ECOS */ |
83 | 83 | ||
84 | if (ret == -ENOMEM || ret == -EAGAIN) { | 84 | if (ret == -ENOMEM || ret == -EAGAIN) { |
85 | /* Erase failed immediately. Refile it on the list */ | 85 | /* Erase failed immediately. Refile it on the list */ |
86 | D1(printk(KERN_DEBUG "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n", jeb->offset, ret)); | 86 | D1(printk(KERN_DEBUG "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n", jeb->offset, ret)); |
87 | mutex_lock(&c->erase_free_sem); | 87 | mutex_lock(&c->erase_free_sem); |
88 | spin_lock(&c->erase_completion_lock); | 88 | spin_lock(&c->erase_completion_lock); |
89 | list_move(&jeb->list, &c->erase_pending_list); | 89 | list_move(&jeb->list, &c->erase_pending_list); |
90 | c->erasing_size -= c->sector_size; | 90 | c->erasing_size -= c->sector_size; |
91 | c->dirty_size += c->sector_size; | 91 | c->dirty_size += c->sector_size; |
92 | jeb->dirty_size = c->sector_size; | 92 | jeb->dirty_size = c->sector_size; |
93 | spin_unlock(&c->erase_completion_lock); | 93 | spin_unlock(&c->erase_completion_lock); |
94 | mutex_unlock(&c->erase_free_sem); | 94 | mutex_unlock(&c->erase_free_sem); |
95 | return; | 95 | return; |
96 | } | 96 | } |
97 | 97 | ||
98 | if (ret == -EROFS) | 98 | if (ret == -EROFS) |
99 | printk(KERN_WARNING "Erase at 0x%08x failed immediately: -EROFS. Is the sector locked?\n", jeb->offset); | 99 | printk(KERN_WARNING "Erase at 0x%08x failed immediately: -EROFS. Is the sector locked?\n", jeb->offset); |
100 | else | 100 | else |
101 | printk(KERN_WARNING "Erase at 0x%08x failed immediately: errno %d\n", jeb->offset, ret); | 101 | printk(KERN_WARNING "Erase at 0x%08x failed immediately: errno %d\n", jeb->offset, ret); |
102 | 102 | ||
103 | jffs2_erase_failed(c, jeb, bad_offset); | 103 | jffs2_erase_failed(c, jeb, bad_offset); |
104 | } | 104 | } |
105 | 105 | ||
106 | void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count) | 106 | void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count) |
107 | { | 107 | { |
108 | struct jffs2_eraseblock *jeb; | 108 | struct jffs2_eraseblock *jeb; |
109 | 109 | ||
110 | mutex_lock(&c->erase_free_sem); | 110 | mutex_lock(&c->erase_free_sem); |
111 | 111 | ||
112 | spin_lock(&c->erase_completion_lock); | 112 | spin_lock(&c->erase_completion_lock); |
113 | 113 | ||
114 | while (!list_empty(&c->erase_complete_list) || | 114 | while (!list_empty(&c->erase_complete_list) || |
115 | !list_empty(&c->erase_pending_list)) { | 115 | !list_empty(&c->erase_pending_list)) { |
116 | 116 | ||
117 | if (!list_empty(&c->erase_complete_list)) { | 117 | if (!list_empty(&c->erase_complete_list)) { |
118 | jeb = list_entry(c->erase_complete_list.next, struct jffs2_eraseblock, list); | 118 | jeb = list_entry(c->erase_complete_list.next, struct jffs2_eraseblock, list); |
119 | list_del(&jeb->list); | 119 | list_move(&jeb->list, &c->erase_checking_list); |
120 | spin_unlock(&c->erase_completion_lock); | 120 | spin_unlock(&c->erase_completion_lock); |
121 | mutex_unlock(&c->erase_free_sem); | 121 | mutex_unlock(&c->erase_free_sem); |
122 | jffs2_mark_erased_block(c, jeb); | 122 | jffs2_mark_erased_block(c, jeb); |
123 | 123 | ||
124 | if (!--count) { | 124 | if (!--count) { |
125 | D1(printk(KERN_DEBUG "Count reached. jffs2_erase_pending_blocks leaving\n")); | 125 | D1(printk(KERN_DEBUG "Count reached. jffs2_erase_pending_blocks leaving\n")); |
126 | goto done; | 126 | goto done; |
127 | } | 127 | } |
128 | 128 | ||
129 | } else if (!list_empty(&c->erase_pending_list)) { | 129 | } else if (!list_empty(&c->erase_pending_list)) { |
130 | jeb = list_entry(c->erase_pending_list.next, struct jffs2_eraseblock, list); | 130 | jeb = list_entry(c->erase_pending_list.next, struct jffs2_eraseblock, list); |
131 | D1(printk(KERN_DEBUG "Starting erase of pending block 0x%08x\n", jeb->offset)); | 131 | D1(printk(KERN_DEBUG "Starting erase of pending block 0x%08x\n", jeb->offset)); |
132 | list_del(&jeb->list); | 132 | list_del(&jeb->list); |
133 | c->erasing_size += c->sector_size; | 133 | c->erasing_size += c->sector_size; |
134 | c->wasted_size -= jeb->wasted_size; | 134 | c->wasted_size -= jeb->wasted_size; |
135 | c->free_size -= jeb->free_size; | 135 | c->free_size -= jeb->free_size; |
136 | c->used_size -= jeb->used_size; | 136 | c->used_size -= jeb->used_size; |
137 | c->dirty_size -= jeb->dirty_size; | 137 | c->dirty_size -= jeb->dirty_size; |
138 | jeb->wasted_size = jeb->used_size = jeb->dirty_size = jeb->free_size = 0; | 138 | jeb->wasted_size = jeb->used_size = jeb->dirty_size = jeb->free_size = 0; |
139 | jffs2_free_jeb_node_refs(c, jeb); | 139 | jffs2_free_jeb_node_refs(c, jeb); |
140 | list_add(&jeb->list, &c->erasing_list); | 140 | list_add(&jeb->list, &c->erasing_list); |
141 | spin_unlock(&c->erase_completion_lock); | 141 | spin_unlock(&c->erase_completion_lock); |
142 | mutex_unlock(&c->erase_free_sem); | 142 | mutex_unlock(&c->erase_free_sem); |
143 | 143 | ||
144 | jffs2_erase_block(c, jeb); | 144 | jffs2_erase_block(c, jeb); |
145 | 145 | ||
146 | } else { | 146 | } else { |
147 | BUG(); | 147 | BUG(); |
148 | } | 148 | } |
149 | 149 | ||
150 | /* Be nice */ | 150 | /* Be nice */ |
151 | yield(); | 151 | yield(); |
152 | mutex_lock(&c->erase_free_sem); | 152 | mutex_lock(&c->erase_free_sem); |
153 | spin_lock(&c->erase_completion_lock); | 153 | spin_lock(&c->erase_completion_lock); |
154 | } | 154 | } |
155 | 155 | ||
156 | spin_unlock(&c->erase_completion_lock); | 156 | spin_unlock(&c->erase_completion_lock); |
157 | mutex_unlock(&c->erase_free_sem); | 157 | mutex_unlock(&c->erase_free_sem); |
158 | done: | 158 | done: |
159 | D1(printk(KERN_DEBUG "jffs2_erase_pending_blocks completed\n")); | 159 | D1(printk(KERN_DEBUG "jffs2_erase_pending_blocks completed\n")); |
160 | } | 160 | } |
161 | 161 | ||
162 | static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) | 162 | static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) |
163 | { | 163 | { |
164 | D1(printk(KERN_DEBUG "Erase completed successfully at 0x%08x\n", jeb->offset)); | 164 | D1(printk(KERN_DEBUG "Erase completed successfully at 0x%08x\n", jeb->offset)); |
165 | mutex_lock(&c->erase_free_sem); | 165 | mutex_lock(&c->erase_free_sem); |
166 | spin_lock(&c->erase_completion_lock); | 166 | spin_lock(&c->erase_completion_lock); |
167 | list_move_tail(&jeb->list, &c->erase_complete_list); | 167 | list_move_tail(&jeb->list, &c->erase_complete_list); |
168 | spin_unlock(&c->erase_completion_lock); | 168 | spin_unlock(&c->erase_completion_lock); |
169 | mutex_unlock(&c->erase_free_sem); | 169 | mutex_unlock(&c->erase_free_sem); |
170 | /* Ensure that kupdated calls us again to mark them clean */ | 170 | /* Ensure that kupdated calls us again to mark them clean */ |
171 | jffs2_erase_pending_trigger(c); | 171 | jffs2_erase_pending_trigger(c); |
172 | } | 172 | } |
173 | 173 | ||
174 | static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset) | 174 | static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset) |
175 | { | 175 | { |
176 | /* For NAND, if the failure did not occur at the device level for a | 176 | /* For NAND, if the failure did not occur at the device level for a |
177 | specific physical page, don't bother updating the bad block table. */ | 177 | specific physical page, don't bother updating the bad block table. */ |
178 | if (jffs2_cleanmarker_oob(c) && (bad_offset != 0xffffffff)) { | 178 | if (jffs2_cleanmarker_oob(c) && (bad_offset != 0xffffffff)) { |
179 | /* We had a device-level failure to erase. Let's see if we've | 179 | /* We had a device-level failure to erase. Let's see if we've |
180 | failed too many times. */ | 180 | failed too many times. */ |
181 | if (!jffs2_write_nand_badblock(c, jeb, bad_offset)) { | 181 | if (!jffs2_write_nand_badblock(c, jeb, bad_offset)) { |
182 | /* We'd like to give this block another try. */ | 182 | /* We'd like to give this block another try. */ |
183 | mutex_lock(&c->erase_free_sem); | 183 | mutex_lock(&c->erase_free_sem); |
184 | spin_lock(&c->erase_completion_lock); | 184 | spin_lock(&c->erase_completion_lock); |
185 | list_move(&jeb->list, &c->erase_pending_list); | 185 | list_move(&jeb->list, &c->erase_pending_list); |
186 | c->erasing_size -= c->sector_size; | 186 | c->erasing_size -= c->sector_size; |
187 | c->dirty_size += c->sector_size; | 187 | c->dirty_size += c->sector_size; |
188 | jeb->dirty_size = c->sector_size; | 188 | jeb->dirty_size = c->sector_size; |
189 | spin_unlock(&c->erase_completion_lock); | 189 | spin_unlock(&c->erase_completion_lock); |
190 | mutex_unlock(&c->erase_free_sem); | 190 | mutex_unlock(&c->erase_free_sem); |
191 | return; | 191 | return; |
192 | } | 192 | } |
193 | } | 193 | } |
194 | 194 | ||
195 | mutex_lock(&c->erase_free_sem); | 195 | mutex_lock(&c->erase_free_sem); |
196 | spin_lock(&c->erase_completion_lock); | 196 | spin_lock(&c->erase_completion_lock); |
197 | c->erasing_size -= c->sector_size; | 197 | c->erasing_size -= c->sector_size; |
198 | c->bad_size += c->sector_size; | 198 | c->bad_size += c->sector_size; |
199 | list_move(&jeb->list, &c->bad_list); | 199 | list_move(&jeb->list, &c->bad_list); |
200 | c->nr_erasing_blocks--; | 200 | c->nr_erasing_blocks--; |
201 | spin_unlock(&c->erase_completion_lock); | 201 | spin_unlock(&c->erase_completion_lock); |
202 | mutex_unlock(&c->erase_free_sem); | 202 | mutex_unlock(&c->erase_free_sem); |
203 | wake_up(&c->erase_wait); | 203 | wake_up(&c->erase_wait); |
204 | } | 204 | } |
205 | 205 | ||
206 | #ifndef __ECOS | 206 | #ifndef __ECOS |
207 | static void jffs2_erase_callback(struct erase_info *instr) | 207 | static void jffs2_erase_callback(struct erase_info *instr) |
208 | { | 208 | { |
209 | struct erase_priv_struct *priv = (void *)instr->priv; | 209 | struct erase_priv_struct *priv = (void *)instr->priv; |
210 | 210 | ||
211 | if(instr->state != MTD_ERASE_DONE) { | 211 | if(instr->state != MTD_ERASE_DONE) { |
212 | printk(KERN_WARNING "Erase at 0x%08x finished, but state != MTD_ERASE_DONE. State is 0x%x instead.\n", instr->addr, instr->state); | 212 | printk(KERN_WARNING "Erase at 0x%08x finished, but state != MTD_ERASE_DONE. State is 0x%x instead.\n", instr->addr, instr->state); |
213 | jffs2_erase_failed(priv->c, priv->jeb, instr->fail_addr); | 213 | jffs2_erase_failed(priv->c, priv->jeb, instr->fail_addr); |
214 | } else { | 214 | } else { |
215 | jffs2_erase_succeeded(priv->c, priv->jeb); | 215 | jffs2_erase_succeeded(priv->c, priv->jeb); |
216 | } | 216 | } |
217 | kfree(instr); | 217 | kfree(instr); |
218 | } | 218 | } |
219 | #endif /* !__ECOS */ | 219 | #endif /* !__ECOS */ |
220 | 220 | ||
221 | /* Hmmm. Maybe we should accept the extra space it takes and make | 221 | /* Hmmm. Maybe we should accept the extra space it takes and make |
222 | this a standard doubly-linked list? */ | 222 | this a standard doubly-linked list? */ |
223 | static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c, | 223 | static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c, |
224 | struct jffs2_raw_node_ref *ref, struct jffs2_eraseblock *jeb) | 224 | struct jffs2_raw_node_ref *ref, struct jffs2_eraseblock *jeb) |
225 | { | 225 | { |
226 | struct jffs2_inode_cache *ic = NULL; | 226 | struct jffs2_inode_cache *ic = NULL; |
227 | struct jffs2_raw_node_ref **prev; | 227 | struct jffs2_raw_node_ref **prev; |
228 | 228 | ||
229 | prev = &ref->next_in_ino; | 229 | prev = &ref->next_in_ino; |
230 | 230 | ||
231 | /* Walk the inode's list once, removing any nodes from this eraseblock */ | 231 | /* Walk the inode's list once, removing any nodes from this eraseblock */ |
232 | while (1) { | 232 | while (1) { |
233 | if (!(*prev)->next_in_ino) { | 233 | if (!(*prev)->next_in_ino) { |
234 | /* We're looking at the jffs2_inode_cache, which is | 234 | /* We're looking at the jffs2_inode_cache, which is |
235 | at the end of the linked list. Stash it and continue | 235 | at the end of the linked list. Stash it and continue |
236 | from the beginning of the list */ | 236 | from the beginning of the list */ |
237 | ic = (struct jffs2_inode_cache *)(*prev); | 237 | ic = (struct jffs2_inode_cache *)(*prev); |
238 | prev = &ic->nodes; | 238 | prev = &ic->nodes; |
239 | continue; | 239 | continue; |
240 | } | 240 | } |
241 | 241 | ||
242 | if (SECTOR_ADDR((*prev)->flash_offset) == jeb->offset) { | 242 | if (SECTOR_ADDR((*prev)->flash_offset) == jeb->offset) { |
243 | /* It's in the block we're erasing */ | 243 | /* It's in the block we're erasing */ |
244 | struct jffs2_raw_node_ref *this; | 244 | struct jffs2_raw_node_ref *this; |
245 | 245 | ||
246 | this = *prev; | 246 | this = *prev; |
247 | *prev = this->next_in_ino; | 247 | *prev = this->next_in_ino; |
248 | this->next_in_ino = NULL; | 248 | this->next_in_ino = NULL; |
249 | 249 | ||
250 | if (this == ref) | 250 | if (this == ref) |
251 | break; | 251 | break; |
252 | 252 | ||
253 | continue; | 253 | continue; |
254 | } | 254 | } |
255 | /* Not to be deleted. Skip */ | 255 | /* Not to be deleted. Skip */ |
256 | prev = &((*prev)->next_in_ino); | 256 | prev = &((*prev)->next_in_ino); |
257 | } | 257 | } |
258 | 258 | ||
259 | /* PARANOIA */ | 259 | /* PARANOIA */ |
260 | if (!ic) { | 260 | if (!ic) { |
261 | JFFS2_WARNING("inode_cache/xattr_datum/xattr_ref" | 261 | JFFS2_WARNING("inode_cache/xattr_datum/xattr_ref" |
262 | " not found in remove_node_refs()!!\n"); | 262 | " not found in remove_node_refs()!!\n"); |
263 | return; | 263 | return; |
264 | } | 264 | } |
265 | 265 | ||
266 | D1(printk(KERN_DEBUG "Removed nodes in range 0x%08x-0x%08x from ino #%u\n", | 266 | D1(printk(KERN_DEBUG "Removed nodes in range 0x%08x-0x%08x from ino #%u\n", |
267 | jeb->offset, jeb->offset + c->sector_size, ic->ino)); | 267 | jeb->offset, jeb->offset + c->sector_size, ic->ino)); |
268 | 268 | ||
269 | D2({ | 269 | D2({ |
270 | int i=0; | 270 | int i=0; |
271 | struct jffs2_raw_node_ref *this; | 271 | struct jffs2_raw_node_ref *this; |
272 | printk(KERN_DEBUG "After remove_node_refs_from_ino_list: \n" KERN_DEBUG); | 272 | printk(KERN_DEBUG "After remove_node_refs_from_ino_list: \n" KERN_DEBUG); |
273 | 273 | ||
274 | this = ic->nodes; | 274 | this = ic->nodes; |
275 | 275 | ||
276 | while(this) { | 276 | while(this) { |
277 | printk( "0x%08x(%d)->", ref_offset(this), ref_flags(this)); | 277 | printk( "0x%08x(%d)->", ref_offset(this), ref_flags(this)); |
278 | if (++i == 5) { | 278 | if (++i == 5) { |
279 | printk("\n" KERN_DEBUG); | 279 | printk("\n" KERN_DEBUG); |
280 | i=0; | 280 | i=0; |
281 | } | 281 | } |
282 | this = this->next_in_ino; | 282 | this = this->next_in_ino; |
283 | } | 283 | } |
284 | printk("\n"); | 284 | printk("\n"); |
285 | }); | 285 | }); |
286 | 286 | ||
287 | switch (ic->class) { | 287 | switch (ic->class) { |
288 | #ifdef CONFIG_JFFS2_FS_XATTR | 288 | #ifdef CONFIG_JFFS2_FS_XATTR |
289 | case RAWNODE_CLASS_XATTR_DATUM: | 289 | case RAWNODE_CLASS_XATTR_DATUM: |
290 | jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic); | 290 | jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic); |
291 | break; | 291 | break; |
292 | case RAWNODE_CLASS_XATTR_REF: | 292 | case RAWNODE_CLASS_XATTR_REF: |
293 | jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic); | 293 | jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic); |
294 | break; | 294 | break; |
295 | #endif | 295 | #endif |
296 | default: | 296 | default: |
297 | if (ic->nodes == (void *)ic && ic->nlink == 0) | 297 | if (ic->nodes == (void *)ic && ic->nlink == 0) |
298 | jffs2_del_ino_cache(c, ic); | 298 | jffs2_del_ino_cache(c, ic); |
299 | } | 299 | } |
300 | } | 300 | } |
301 | 301 | ||
302 | void jffs2_free_jeb_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) | 302 | void jffs2_free_jeb_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) |
303 | { | 303 | { |
304 | struct jffs2_raw_node_ref *block, *ref; | 304 | struct jffs2_raw_node_ref *block, *ref; |
305 | D1(printk(KERN_DEBUG "Freeing all node refs for eraseblock offset 0x%08x\n", jeb->offset)); | 305 | D1(printk(KERN_DEBUG "Freeing all node refs for eraseblock offset 0x%08x\n", jeb->offset)); |
306 | 306 | ||
307 | block = ref = jeb->first_node; | 307 | block = ref = jeb->first_node; |
308 | 308 | ||
309 | while (ref) { | 309 | while (ref) { |
310 | if (ref->flash_offset == REF_LINK_NODE) { | 310 | if (ref->flash_offset == REF_LINK_NODE) { |
311 | ref = ref->next_in_ino; | 311 | ref = ref->next_in_ino; |
312 | jffs2_free_refblock(block); | 312 | jffs2_free_refblock(block); |
313 | block = ref; | 313 | block = ref; |
314 | continue; | 314 | continue; |
315 | } | 315 | } |
316 | if (ref->flash_offset != REF_EMPTY_NODE && ref->next_in_ino) | 316 | if (ref->flash_offset != REF_EMPTY_NODE && ref->next_in_ino) |
317 | jffs2_remove_node_refs_from_ino_list(c, ref, jeb); | 317 | jffs2_remove_node_refs_from_ino_list(c, ref, jeb); |
318 | /* else it was a non-inode node or already removed, so don't bother */ | 318 | /* else it was a non-inode node or already removed, so don't bother */ |
319 | 319 | ||
320 | ref++; | 320 | ref++; |
321 | } | 321 | } |
322 | jeb->first_node = jeb->last_node = NULL; | 322 | jeb->first_node = jeb->last_node = NULL; |
323 | } | 323 | } |
324 | 324 | ||
325 | static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t *bad_offset) | 325 | static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t *bad_offset) |
326 | { | 326 | { |
327 | void *ebuf; | 327 | void *ebuf; |
328 | uint32_t ofs; | 328 | uint32_t ofs; |
329 | size_t retlen; | 329 | size_t retlen; |
330 | int ret = -EIO; | 330 | int ret = -EIO; |
331 | 331 | ||
332 | if (c->mtd->point) { | 332 | if (c->mtd->point) { |
333 | unsigned long *wordebuf; | 333 | unsigned long *wordebuf; |
334 | 334 | ||
335 | ret = c->mtd->point(c->mtd, jeb->offset, c->sector_size, &retlen, (unsigned char **)&ebuf); | 335 | ret = c->mtd->point(c->mtd, jeb->offset, c->sector_size, &retlen, (unsigned char **)&ebuf); |
336 | if (ret) { | 336 | if (ret) { |
337 | D1(printk(KERN_DEBUG "MTD point failed %d\n", ret)); | 337 | D1(printk(KERN_DEBUG "MTD point failed %d\n", ret)); |
338 | goto do_flash_read; | 338 | goto do_flash_read; |
339 | } | 339 | } |
340 | if (retlen < c->sector_size) { | 340 | if (retlen < c->sector_size) { |
341 | /* Don't muck about if it won't let us point to the whole erase sector */ | 341 | /* Don't muck about if it won't let us point to the whole erase sector */ |
342 | D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", retlen)); | 342 | D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", retlen)); |
343 | c->mtd->unpoint(c->mtd, ebuf, jeb->offset, retlen); | 343 | c->mtd->unpoint(c->mtd, ebuf, jeb->offset, retlen); |
344 | goto do_flash_read; | 344 | goto do_flash_read; |
345 | } | 345 | } |
346 | wordebuf = ebuf-sizeof(*wordebuf); | 346 | wordebuf = ebuf-sizeof(*wordebuf); |
347 | retlen /= sizeof(*wordebuf); | 347 | retlen /= sizeof(*wordebuf); |
348 | do { | 348 | do { |
349 | if (*++wordebuf != ~0) | 349 | if (*++wordebuf != ~0) |
350 | break; | 350 | break; |
351 | } while(--retlen); | 351 | } while(--retlen); |
352 | c->mtd->unpoint(c->mtd, ebuf, jeb->offset, c->sector_size); | 352 | c->mtd->unpoint(c->mtd, ebuf, jeb->offset, c->sector_size); |
353 | if (retlen) { | 353 | if (retlen) { |
354 | printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08tx\n", | 354 | printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08tx\n", |
355 | *wordebuf, jeb->offset + c->sector_size-retlen*sizeof(*wordebuf)); | 355 | *wordebuf, jeb->offset + c->sector_size-retlen*sizeof(*wordebuf)); |
356 | return -EIO; | 356 | return -EIO; |
357 | } | 357 | } |
358 | return 0; | 358 | return 0; |
359 | } | 359 | } |
360 | do_flash_read: | 360 | do_flash_read: |
361 | ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL); | 361 | ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL); |
362 | if (!ebuf) { | 362 | if (!ebuf) { |
363 | printk(KERN_WARNING "Failed to allocate page buffer for verifying erase at 0x%08x. Refiling\n", jeb->offset); | 363 | printk(KERN_WARNING "Failed to allocate page buffer for verifying erase at 0x%08x. Refiling\n", jeb->offset); |
364 | return -EAGAIN; | 364 | return -EAGAIN; |
365 | } | 365 | } |
366 | 366 | ||
367 | D1(printk(KERN_DEBUG "Verifying erase at 0x%08x\n", jeb->offset)); | 367 | D1(printk(KERN_DEBUG "Verifying erase at 0x%08x\n", jeb->offset)); |
368 | 368 | ||
369 | for (ofs = jeb->offset; ofs < jeb->offset + c->sector_size; ) { | 369 | for (ofs = jeb->offset; ofs < jeb->offset + c->sector_size; ) { |
370 | uint32_t readlen = min((uint32_t)PAGE_SIZE, jeb->offset + c->sector_size - ofs); | 370 | uint32_t readlen = min((uint32_t)PAGE_SIZE, jeb->offset + c->sector_size - ofs); |
371 | int i; | 371 | int i; |
372 | 372 | ||
373 | *bad_offset = ofs; | 373 | *bad_offset = ofs; |
374 | 374 | ||
375 | ret = c->mtd->read(c->mtd, ofs, readlen, &retlen, ebuf); | 375 | ret = c->mtd->read(c->mtd, ofs, readlen, &retlen, ebuf); |
376 | if (ret) { | 376 | if (ret) { |
377 | printk(KERN_WARNING "Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n", ofs, ret); | 377 | printk(KERN_WARNING "Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n", ofs, ret); |
378 | ret = -EIO; | 378 | ret = -EIO; |
379 | goto fail; | 379 | goto fail; |
380 | } | 380 | } |
381 | if (retlen != readlen) { | 381 | if (retlen != readlen) { |
382 | printk(KERN_WARNING "Short read from newly-erased block at 0x%08x. Wanted %d, got %zd\n", ofs, readlen, retlen); | 382 | printk(KERN_WARNING "Short read from newly-erased block at 0x%08x. Wanted %d, got %zd\n", ofs, readlen, retlen); |
383 | ret = -EIO; | 383 | ret = -EIO; |
384 | goto fail; | 384 | goto fail; |
385 | } | 385 | } |
386 | for (i=0; i<readlen; i += sizeof(unsigned long)) { | 386 | for (i=0; i<readlen; i += sizeof(unsigned long)) { |
387 | /* It's OK. We know it's properly aligned */ | 387 | /* It's OK. We know it's properly aligned */ |
388 | unsigned long *datum = ebuf + i; | 388 | unsigned long *datum = ebuf + i; |
389 | if (*datum + 1) { | 389 | if (*datum + 1) { |
390 | *bad_offset += i; | 390 | *bad_offset += i; |
391 | printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08x\n", *datum, *bad_offset); | 391 | printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08x\n", *datum, *bad_offset); |
392 | ret = -EIO; | 392 | ret = -EIO; |
393 | goto fail; | 393 | goto fail; |
394 | } | 394 | } |
395 | } | 395 | } |
396 | ofs += readlen; | 396 | ofs += readlen; |
397 | cond_resched(); | 397 | cond_resched(); |
398 | } | 398 | } |
399 | ret = 0; | 399 | ret = 0; |
400 | fail: | 400 | fail: |
401 | kfree(ebuf); | 401 | kfree(ebuf); |
402 | return ret; | 402 | return ret; |
403 | } | 403 | } |
404 | 404 | ||
405 | static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) | 405 | static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) |
406 | { | 406 | { |
407 | size_t retlen; | 407 | size_t retlen; |
408 | int ret; | 408 | int ret; |
409 | uint32_t uninitialized_var(bad_offset); | 409 | uint32_t uninitialized_var(bad_offset); |
410 | 410 | ||
411 | switch (jffs2_block_check_erase(c, jeb, &bad_offset)) { | 411 | switch (jffs2_block_check_erase(c, jeb, &bad_offset)) { |
412 | case -EAGAIN: goto refile; | 412 | case -EAGAIN: goto refile; |
413 | case -EIO: goto filebad; | 413 | case -EIO: goto filebad; |
414 | } | 414 | } |
415 | 415 | ||
416 | /* Write the erase complete marker */ | 416 | /* Write the erase complete marker */ |
417 | D1(printk(KERN_DEBUG "Writing erased marker to block at 0x%08x\n", jeb->offset)); | 417 | D1(printk(KERN_DEBUG "Writing erased marker to block at 0x%08x\n", jeb->offset)); |
418 | bad_offset = jeb->offset; | 418 | bad_offset = jeb->offset; |
419 | 419 | ||
420 | /* Cleanmarker in oob area or no cleanmarker at all ? */ | 420 | /* Cleanmarker in oob area or no cleanmarker at all ? */ |
421 | if (jffs2_cleanmarker_oob(c) || c->cleanmarker_size == 0) { | 421 | if (jffs2_cleanmarker_oob(c) || c->cleanmarker_size == 0) { |
422 | 422 | ||
423 | if (jffs2_cleanmarker_oob(c)) { | 423 | if (jffs2_cleanmarker_oob(c)) { |
424 | if (jffs2_write_nand_cleanmarker(c, jeb)) | 424 | if (jffs2_write_nand_cleanmarker(c, jeb)) |
425 | goto filebad; | 425 | goto filebad; |
426 | } | 426 | } |
427 | } else { | 427 | } else { |
428 | 428 | ||
429 | struct kvec vecs[1]; | 429 | struct kvec vecs[1]; |
430 | struct jffs2_unknown_node marker = { | 430 | struct jffs2_unknown_node marker = { |
431 | .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK), | 431 | .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK), |
432 | .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER), | 432 | .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER), |
433 | .totlen = cpu_to_je32(c->cleanmarker_size) | 433 | .totlen = cpu_to_je32(c->cleanmarker_size) |
434 | }; | 434 | }; |
435 | 435 | ||
436 | jffs2_prealloc_raw_node_refs(c, jeb, 1); | 436 | jffs2_prealloc_raw_node_refs(c, jeb, 1); |
437 | 437 | ||
438 | marker.hdr_crc = cpu_to_je32(crc32(0, &marker, sizeof(struct jffs2_unknown_node)-4)); | 438 | marker.hdr_crc = cpu_to_je32(crc32(0, &marker, sizeof(struct jffs2_unknown_node)-4)); |
439 | 439 | ||
440 | vecs[0].iov_base = (unsigned char *) ▮ | 440 | vecs[0].iov_base = (unsigned char *) ▮ |
441 | vecs[0].iov_len = sizeof(marker); | 441 | vecs[0].iov_len = sizeof(marker); |
442 | ret = jffs2_flash_direct_writev(c, vecs, 1, jeb->offset, &retlen); | 442 | ret = jffs2_flash_direct_writev(c, vecs, 1, jeb->offset, &retlen); |
443 | 443 | ||
444 | if (ret || retlen != sizeof(marker)) { | 444 | if (ret || retlen != sizeof(marker)) { |
445 | if (ret) | 445 | if (ret) |
446 | printk(KERN_WARNING "Write clean marker to block at 0x%08x failed: %d\n", | 446 | printk(KERN_WARNING "Write clean marker to block at 0x%08x failed: %d\n", |
447 | jeb->offset, ret); | 447 | jeb->offset, ret); |
448 | else | 448 | else |
449 | printk(KERN_WARNING "Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n", | 449 | printk(KERN_WARNING "Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n", |
450 | jeb->offset, sizeof(marker), retlen); | 450 | jeb->offset, sizeof(marker), retlen); |
451 | 451 | ||
452 | goto filebad; | 452 | goto filebad; |
453 | } | 453 | } |
454 | } | 454 | } |
455 | /* Everything else got zeroed before the erase */ | 455 | /* Everything else got zeroed before the erase */ |
456 | jeb->free_size = c->sector_size; | 456 | jeb->free_size = c->sector_size; |
457 | 457 | ||
458 | mutex_lock(&c->erase_free_sem); | 458 | mutex_lock(&c->erase_free_sem); |
459 | spin_lock(&c->erase_completion_lock); | 459 | spin_lock(&c->erase_completion_lock); |
460 | 460 | ||
461 | c->erasing_size -= c->sector_size; | 461 | c->erasing_size -= c->sector_size; |
462 | c->free_size += c->sector_size; | 462 | c->free_size += c->sector_size; |
463 | 463 | ||
464 | /* Account for cleanmarker now, if it's in-band */ | 464 | /* Account for cleanmarker now, if it's in-band */ |
465 | if (c->cleanmarker_size && !jffs2_cleanmarker_oob(c)) | 465 | if (c->cleanmarker_size && !jffs2_cleanmarker_oob(c)) |
466 | jffs2_link_node_ref(c, jeb, jeb->offset | REF_NORMAL, c->cleanmarker_size, NULL); | 466 | jffs2_link_node_ref(c, jeb, jeb->offset | REF_NORMAL, c->cleanmarker_size, NULL); |
467 | 467 | ||
468 | list_add_tail(&jeb->list, &c->free_list); | 468 | list_move_tail(&jeb->list, &c->free_list); |
469 | c->nr_erasing_blocks--; | 469 | c->nr_erasing_blocks--; |
470 | c->nr_free_blocks++; | 470 | c->nr_free_blocks++; |
471 | 471 | ||
472 | jffs2_dbg_acct_sanity_check_nolock(c, jeb); | 472 | jffs2_dbg_acct_sanity_check_nolock(c, jeb); |
473 | jffs2_dbg_acct_paranoia_check_nolock(c, jeb); | 473 | jffs2_dbg_acct_paranoia_check_nolock(c, jeb); |
474 | 474 | ||
475 | spin_unlock(&c->erase_completion_lock); | 475 | spin_unlock(&c->erase_completion_lock); |
476 | mutex_unlock(&c->erase_free_sem); | 476 | mutex_unlock(&c->erase_free_sem); |
477 | wake_up(&c->erase_wait); | 477 | wake_up(&c->erase_wait); |
478 | return; | 478 | return; |
479 | 479 | ||
480 | filebad: | 480 | filebad: |
481 | mutex_lock(&c->erase_free_sem); | 481 | mutex_lock(&c->erase_free_sem); |
482 | spin_lock(&c->erase_completion_lock); | 482 | spin_lock(&c->erase_completion_lock); |
483 | /* Stick it on a list (any list) so erase_failed can take it | 483 | /* Stick it on a list (any list) so erase_failed can take it |
484 | right off again. Silly, but shouldn't happen often. */ | 484 | right off again. Silly, but shouldn't happen often. */ |
485 | list_add(&jeb->list, &c->erasing_list); | 485 | list_move(&jeb->list, &c->erasing_list); |
486 | spin_unlock(&c->erase_completion_lock); | 486 | spin_unlock(&c->erase_completion_lock); |
487 | mutex_unlock(&c->erase_free_sem); | 487 | mutex_unlock(&c->erase_free_sem); |
488 | jffs2_erase_failed(c, jeb, bad_offset); | 488 | jffs2_erase_failed(c, jeb, bad_offset); |
489 | return; | 489 | return; |
490 | 490 | ||
491 | refile: | 491 | refile: |
492 | /* Stick it back on the list from whence it came and come back later */ | 492 | /* Stick it back on the list from whence it came and come back later */ |
493 | jffs2_erase_pending_trigger(c); | 493 | jffs2_erase_pending_trigger(c); |
494 | mutex_lock(&c->erase_free_sem); | 494 | mutex_lock(&c->erase_free_sem); |
495 | spin_lock(&c->erase_completion_lock); | 495 | spin_lock(&c->erase_completion_lock); |
496 | list_add(&jeb->list, &c->erase_complete_list); | 496 | list_move(&jeb->list, &c->erase_complete_list); |
497 | spin_unlock(&c->erase_completion_lock); | 497 | spin_unlock(&c->erase_completion_lock); |
498 | mutex_unlock(&c->erase_free_sem); | 498 | mutex_unlock(&c->erase_free_sem); |
499 | return; | 499 | return; |
500 | } | 500 | } |
501 | 501 |
fs/jffs2/jffs2_fs_sb.h
1 | /* | 1 | /* |
2 | * JFFS2 -- Journalling Flash File System, Version 2. | 2 | * JFFS2 -- Journalling Flash File System, Version 2. |
3 | * | 3 | * |
4 | * Copyright © 2001-2007 Red Hat, Inc. | 4 | * Copyright © 2001-2007 Red Hat, Inc. |
5 | * | 5 | * |
6 | * Created by David Woodhouse <dwmw2@infradead.org> | 6 | * Created by David Woodhouse <dwmw2@infradead.org> |
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #ifndef _JFFS2_FS_SB | 12 | #ifndef _JFFS2_FS_SB |
13 | #define _JFFS2_FS_SB | 13 | #define _JFFS2_FS_SB |
14 | 14 | ||
15 | #include <linux/types.h> | 15 | #include <linux/types.h> |
16 | #include <linux/spinlock.h> | 16 | #include <linux/spinlock.h> |
17 | #include <linux/workqueue.h> | 17 | #include <linux/workqueue.h> |
18 | #include <linux/completion.h> | 18 | #include <linux/completion.h> |
19 | #include <linux/mutex.h> | 19 | #include <linux/mutex.h> |
20 | #include <linux/timer.h> | 20 | #include <linux/timer.h> |
21 | #include <linux/wait.h> | 21 | #include <linux/wait.h> |
22 | #include <linux/list.h> | 22 | #include <linux/list.h> |
23 | #include <linux/rwsem.h> | 23 | #include <linux/rwsem.h> |
24 | 24 | ||
25 | #define JFFS2_SB_FLAG_RO 1 | 25 | #define JFFS2_SB_FLAG_RO 1 |
26 | #define JFFS2_SB_FLAG_SCANNING 2 /* Flash scanning is in progress */ | 26 | #define JFFS2_SB_FLAG_SCANNING 2 /* Flash scanning is in progress */ |
27 | #define JFFS2_SB_FLAG_BUILDING 4 /* File system building is in progress */ | 27 | #define JFFS2_SB_FLAG_BUILDING 4 /* File system building is in progress */ |
28 | 28 | ||
29 | struct jffs2_inodirty; | 29 | struct jffs2_inodirty; |
30 | 30 | ||
31 | /* A struct for the overall file system control. Pointers to | 31 | /* A struct for the overall file system control. Pointers to |
32 | jffs2_sb_info structs are named `c' in the source code. | 32 | jffs2_sb_info structs are named `c' in the source code. |
33 | Nee jffs_control | 33 | Nee jffs_control |
34 | */ | 34 | */ |
35 | struct jffs2_sb_info { | 35 | struct jffs2_sb_info { |
36 | struct mtd_info *mtd; | 36 | struct mtd_info *mtd; |
37 | 37 | ||
38 | uint32_t highest_ino; | 38 | uint32_t highest_ino; |
39 | uint32_t checked_ino; | 39 | uint32_t checked_ino; |
40 | 40 | ||
41 | unsigned int flags; | 41 | unsigned int flags; |
42 | 42 | ||
43 | struct task_struct *gc_task; /* GC task struct */ | 43 | struct task_struct *gc_task; /* GC task struct */ |
44 | struct completion gc_thread_start; /* GC thread start completion */ | 44 | struct completion gc_thread_start; /* GC thread start completion */ |
45 | struct completion gc_thread_exit; /* GC thread exit completion port */ | 45 | struct completion gc_thread_exit; /* GC thread exit completion port */ |
46 | 46 | ||
47 | struct mutex alloc_sem; /* Used to protect all the following | 47 | struct mutex alloc_sem; /* Used to protect all the following |
48 | fields, and also to protect against | 48 | fields, and also to protect against |
49 | out-of-order writing of nodes. And GC. */ | 49 | out-of-order writing of nodes. And GC. */ |
50 | uint32_t cleanmarker_size; /* Size of an _inline_ CLEANMARKER | 50 | uint32_t cleanmarker_size; /* Size of an _inline_ CLEANMARKER |
51 | (i.e. zero for OOB CLEANMARKER */ | 51 | (i.e. zero for OOB CLEANMARKER */ |
52 | 52 | ||
53 | uint32_t flash_size; | 53 | uint32_t flash_size; |
54 | uint32_t used_size; | 54 | uint32_t used_size; |
55 | uint32_t dirty_size; | 55 | uint32_t dirty_size; |
56 | uint32_t wasted_size; | 56 | uint32_t wasted_size; |
57 | uint32_t free_size; | 57 | uint32_t free_size; |
58 | uint32_t erasing_size; | 58 | uint32_t erasing_size; |
59 | uint32_t bad_size; | 59 | uint32_t bad_size; |
60 | uint32_t sector_size; | 60 | uint32_t sector_size; |
61 | uint32_t unchecked_size; | 61 | uint32_t unchecked_size; |
62 | 62 | ||
63 | uint32_t nr_free_blocks; | 63 | uint32_t nr_free_blocks; |
64 | uint32_t nr_erasing_blocks; | 64 | uint32_t nr_erasing_blocks; |
65 | 65 | ||
66 | /* Number of free blocks there must be before we... */ | 66 | /* Number of free blocks there must be before we... */ |
67 | uint8_t resv_blocks_write; /* ... allow a normal filesystem write */ | 67 | uint8_t resv_blocks_write; /* ... allow a normal filesystem write */ |
68 | uint8_t resv_blocks_deletion; /* ... allow a normal filesystem deletion */ | 68 | uint8_t resv_blocks_deletion; /* ... allow a normal filesystem deletion */ |
69 | uint8_t resv_blocks_gctrigger; /* ... wake up the GC thread */ | 69 | uint8_t resv_blocks_gctrigger; /* ... wake up the GC thread */ |
70 | uint8_t resv_blocks_gcbad; /* ... pick a block from the bad_list to GC */ | 70 | uint8_t resv_blocks_gcbad; /* ... pick a block from the bad_list to GC */ |
71 | uint8_t resv_blocks_gcmerge; /* ... merge pages when garbage collecting */ | 71 | uint8_t resv_blocks_gcmerge; /* ... merge pages when garbage collecting */ |
72 | /* Number of 'very dirty' blocks before we trigger immediate GC */ | 72 | /* Number of 'very dirty' blocks before we trigger immediate GC */ |
73 | uint8_t vdirty_blocks_gctrigger; | 73 | uint8_t vdirty_blocks_gctrigger; |
74 | 74 | ||
75 | uint32_t nospc_dirty_size; | 75 | uint32_t nospc_dirty_size; |
76 | 76 | ||
77 | uint32_t nr_blocks; | 77 | uint32_t nr_blocks; |
78 | struct jffs2_eraseblock *blocks; /* The whole array of blocks. Used for getting blocks | 78 | struct jffs2_eraseblock *blocks; /* The whole array of blocks. Used for getting blocks |
79 | * from the offset (blocks[ofs / sector_size]) */ | 79 | * from the offset (blocks[ofs / sector_size]) */ |
80 | struct jffs2_eraseblock *nextblock; /* The block we're currently filling */ | 80 | struct jffs2_eraseblock *nextblock; /* The block we're currently filling */ |
81 | 81 | ||
82 | struct jffs2_eraseblock *gcblock; /* The block we're currently garbage-collecting */ | 82 | struct jffs2_eraseblock *gcblock; /* The block we're currently garbage-collecting */ |
83 | 83 | ||
84 | struct list_head clean_list; /* Blocks 100% full of clean data */ | 84 | struct list_head clean_list; /* Blocks 100% full of clean data */ |
85 | struct list_head very_dirty_list; /* Blocks with lots of dirty space */ | 85 | struct list_head very_dirty_list; /* Blocks with lots of dirty space */ |
86 | struct list_head dirty_list; /* Blocks with some dirty space */ | 86 | struct list_head dirty_list; /* Blocks with some dirty space */ |
87 | struct list_head erasable_list; /* Blocks which are completely dirty, and need erasing */ | 87 | struct list_head erasable_list; /* Blocks which are completely dirty, and need erasing */ |
88 | struct list_head erasable_pending_wbuf_list; /* Blocks which need erasing but only after the current wbuf is flushed */ | 88 | struct list_head erasable_pending_wbuf_list; /* Blocks which need erasing but only after the current wbuf is flushed */ |
89 | struct list_head erasing_list; /* Blocks which are currently erasing */ | 89 | struct list_head erasing_list; /* Blocks which are currently erasing */ |
90 | struct list_head erase_checking_list; /* Blocks which are being checked and marked */ | ||
90 | struct list_head erase_pending_list; /* Blocks which need erasing now */ | 91 | struct list_head erase_pending_list; /* Blocks which need erasing now */ |
91 | struct list_head erase_complete_list; /* Blocks which are erased and need the clean marker written to them */ | 92 | struct list_head erase_complete_list; /* Blocks which are erased and need the clean marker written to them */ |
92 | struct list_head free_list; /* Blocks which are free and ready to be used */ | 93 | struct list_head free_list; /* Blocks which are free and ready to be used */ |
93 | struct list_head bad_list; /* Bad blocks. */ | 94 | struct list_head bad_list; /* Bad blocks. */ |
94 | struct list_head bad_used_list; /* Bad blocks with valid data in. */ | 95 | struct list_head bad_used_list; /* Bad blocks with valid data in. */ |
95 | 96 | ||
96 | spinlock_t erase_completion_lock; /* Protect free_list and erasing_list | 97 | spinlock_t erase_completion_lock; /* Protect free_list and erasing_list |
97 | against erase completion handler */ | 98 | against erase completion handler */ |
98 | wait_queue_head_t erase_wait; /* For waiting for erases to complete */ | 99 | wait_queue_head_t erase_wait; /* For waiting for erases to complete */ |
99 | 100 | ||
100 | wait_queue_head_t inocache_wq; | 101 | wait_queue_head_t inocache_wq; |
101 | struct jffs2_inode_cache **inocache_list; | 102 | struct jffs2_inode_cache **inocache_list; |
102 | spinlock_t inocache_lock; | 103 | spinlock_t inocache_lock; |
103 | 104 | ||
104 | /* Sem to allow jffs2_garbage_collect_deletion_dirent to | 105 | /* Sem to allow jffs2_garbage_collect_deletion_dirent to |
105 | drop the erase_completion_lock while it's holding a pointer | 106 | drop the erase_completion_lock while it's holding a pointer |
106 | to an obsoleted node. I don't like this. Alternatives welcomed. */ | 107 | to an obsoleted node. I don't like this. Alternatives welcomed. */ |
107 | struct mutex erase_free_sem; | 108 | struct mutex erase_free_sem; |
108 | 109 | ||
109 | uint32_t wbuf_pagesize; /* 0 for NOR and other flashes with no wbuf */ | 110 | uint32_t wbuf_pagesize; /* 0 for NOR and other flashes with no wbuf */ |
110 | 111 | ||
111 | #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY | 112 | #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY |
112 | unsigned char *wbuf_verify; /* read-back buffer for verification */ | 113 | unsigned char *wbuf_verify; /* read-back buffer for verification */ |
113 | #endif | 114 | #endif |
114 | #ifdef CONFIG_JFFS2_FS_WRITEBUFFER | 115 | #ifdef CONFIG_JFFS2_FS_WRITEBUFFER |
115 | unsigned char *wbuf; /* Write-behind buffer for NAND flash */ | 116 | unsigned char *wbuf; /* Write-behind buffer for NAND flash */ |
116 | uint32_t wbuf_ofs; | 117 | uint32_t wbuf_ofs; |
117 | uint32_t wbuf_len; | 118 | uint32_t wbuf_len; |
118 | struct jffs2_inodirty *wbuf_inodes; | 119 | struct jffs2_inodirty *wbuf_inodes; |
119 | struct rw_semaphore wbuf_sem; /* Protects the write buffer */ | 120 | struct rw_semaphore wbuf_sem; /* Protects the write buffer */ |
120 | 121 | ||
121 | unsigned char *oobbuf; | 122 | unsigned char *oobbuf; |
122 | int oobavail; /* How many bytes are available for JFFS2 in OOB */ | 123 | int oobavail; /* How many bytes are available for JFFS2 in OOB */ |
123 | #endif | 124 | #endif |
124 | 125 | ||
125 | struct jffs2_summary *summary; /* Summary information */ | 126 | struct jffs2_summary *summary; /* Summary information */ |
126 | 127 | ||
127 | #ifdef CONFIG_JFFS2_FS_XATTR | 128 | #ifdef CONFIG_JFFS2_FS_XATTR |
128 | #define XATTRINDEX_HASHSIZE (57) | 129 | #define XATTRINDEX_HASHSIZE (57) |
129 | uint32_t highest_xid; | 130 | uint32_t highest_xid; |
130 | uint32_t highest_xseqno; | 131 | uint32_t highest_xseqno; |
131 | struct list_head xattrindex[XATTRINDEX_HASHSIZE]; | 132 | struct list_head xattrindex[XATTRINDEX_HASHSIZE]; |
132 | struct list_head xattr_unchecked; | 133 | struct list_head xattr_unchecked; |
133 | struct list_head xattr_dead_list; | 134 | struct list_head xattr_dead_list; |
134 | struct jffs2_xattr_ref *xref_dead_list; | 135 | struct jffs2_xattr_ref *xref_dead_list; |
135 | struct jffs2_xattr_ref *xref_temp; | 136 | struct jffs2_xattr_ref *xref_temp; |
136 | struct rw_semaphore xattr_sem; | 137 | struct rw_semaphore xattr_sem; |
137 | uint32_t xdatum_mem_usage; | 138 | uint32_t xdatum_mem_usage; |
138 | uint32_t xdatum_mem_threshold; | 139 | uint32_t xdatum_mem_threshold; |
139 | #endif | 140 | #endif |
140 | /* OS-private pointer for getting back to master superblock info */ | 141 | /* OS-private pointer for getting back to master superblock info */ |
141 | void *os_priv; | 142 | void *os_priv; |
142 | }; | 143 | }; |
143 | 144 | ||
144 | #endif /* _JFFS2_FB_SB */ | 145 | #endif /* _JFFS2_FB_SB */ |
145 | 146 |