Commit a37b06d589f2c687a38d07569f4ef97c650fde39
Committed by
Phillip Lougher
1 parent
1d80cac0fe
Exists in
master
and in
7 other branches
Squashfs: fix breakage when page size > metadata block size
Squashfs is broken on any system where the page size is larger than the metadata size (8192). This is easily fixed by ensuring cache->pages is always > 0. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Doug Chapman <doug.chapman@hp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Phillip Lougher <phillip@lougher.demon.co.uk>
Showing 1 changed file with 1 additions and 0 deletions Inline Diff
fs/squashfs/cache.c
1 | /* | 1 | /* |
2 | * Squashfs - a compressed read only filesystem for Linux | 2 | * Squashfs - a compressed read only filesystem for Linux |
3 | * | 3 | * |
4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 | 4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 |
5 | * Phillip Lougher <phillip@lougher.demon.co.uk> | 5 | * Phillip Lougher <phillip@lougher.demon.co.uk> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or | 7 | * This program is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public License | 8 | * modify it under the terms of the GNU General Public License |
9 | * as published by the Free Software Foundation; either version 2, | 9 | * as published by the Free Software Foundation; either version 2, |
10 | * or (at your option) any later version. | 10 | * or (at your option) any later version. |
11 | * | 11 | * |
12 | * This program is distributed in the hope that it will be useful, | 12 | * This program is distributed in the hope that it will be useful, |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
15 | * GNU General Public License for more details. | 15 | * GNU General Public License for more details. |
16 | * | 16 | * |
17 | * You should have received a copy of the GNU General Public License | 17 | * You should have received a copy of the GNU General Public License |
18 | * along with this program; if not, write to the Free Software | 18 | * along with this program; if not, write to the Free Software |
19 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | 19 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
20 | * | 20 | * |
21 | * cache.c | 21 | * cache.c |
22 | */ | 22 | */ |
23 | 23 | ||
24 | /* | 24 | /* |
25 | * Blocks in Squashfs are compressed. To avoid repeatedly decompressing | 25 | * Blocks in Squashfs are compressed. To avoid repeatedly decompressing |
26 | * recently accessed data Squashfs uses two small metadata and fragment caches. | 26 | * recently accessed data Squashfs uses two small metadata and fragment caches. |
27 | * | 27 | * |
28 | * This file implements a generic cache implementation used for both caches, | 28 | * This file implements a generic cache implementation used for both caches, |
29 | * plus functions layered ontop of the generic cache implementation to | 29 | * plus functions layered ontop of the generic cache implementation to |
30 | * access the metadata and fragment caches. | 30 | * access the metadata and fragment caches. |
31 | * | 31 | * |
32 | * To avoid out of memory and fragmentation isssues with vmalloc the cache | 32 | * To avoid out of memory and fragmentation isssues with vmalloc the cache |
33 | * uses sequences of kmalloced PAGE_CACHE_SIZE buffers. | 33 | * uses sequences of kmalloced PAGE_CACHE_SIZE buffers. |
34 | * | 34 | * |
35 | * It should be noted that the cache is not used for file datablocks, these | 35 | * It should be noted that the cache is not used for file datablocks, these |
36 | * are decompressed and cached in the page-cache in the normal way. The | 36 | * are decompressed and cached in the page-cache in the normal way. The |
37 | * cache is only used to temporarily cache fragment and metadata blocks | 37 | * cache is only used to temporarily cache fragment and metadata blocks |
38 | * which have been read as as a result of a metadata (i.e. inode or | 38 | * which have been read as as a result of a metadata (i.e. inode or |
39 | * directory) or fragment access. Because metadata and fragments are packed | 39 | * directory) or fragment access. Because metadata and fragments are packed |
40 | * together into blocks (to gain greater compression) the read of a particular | 40 | * together into blocks (to gain greater compression) the read of a particular |
41 | * piece of metadata or fragment will retrieve other metadata/fragments which | 41 | * piece of metadata or fragment will retrieve other metadata/fragments which |
42 | * have been packed with it, these because of locality-of-reference may be read | 42 | * have been packed with it, these because of locality-of-reference may be read |
43 | * in the near future. Temporarily caching them ensures they are available for | 43 | * in the near future. Temporarily caching them ensures they are available for |
44 | * near future access without requiring an additional read and decompress. | 44 | * near future access without requiring an additional read and decompress. |
45 | */ | 45 | */ |
46 | 46 | ||
47 | #include <linux/fs.h> | 47 | #include <linux/fs.h> |
48 | #include <linux/vfs.h> | 48 | #include <linux/vfs.h> |
49 | #include <linux/slab.h> | 49 | #include <linux/slab.h> |
50 | #include <linux/vmalloc.h> | 50 | #include <linux/vmalloc.h> |
51 | #include <linux/sched.h> | 51 | #include <linux/sched.h> |
52 | #include <linux/spinlock.h> | 52 | #include <linux/spinlock.h> |
53 | #include <linux/wait.h> | 53 | #include <linux/wait.h> |
54 | #include <linux/zlib.h> | 54 | #include <linux/zlib.h> |
55 | #include <linux/pagemap.h> | 55 | #include <linux/pagemap.h> |
56 | 56 | ||
57 | #include "squashfs_fs.h" | 57 | #include "squashfs_fs.h" |
58 | #include "squashfs_fs_sb.h" | 58 | #include "squashfs_fs_sb.h" |
59 | #include "squashfs_fs_i.h" | 59 | #include "squashfs_fs_i.h" |
60 | #include "squashfs.h" | 60 | #include "squashfs.h" |
61 | 61 | ||
62 | /* | 62 | /* |
63 | * Look-up block in cache, and increment usage count. If not in cache, read | 63 | * Look-up block in cache, and increment usage count. If not in cache, read |
64 | * and decompress it from disk. | 64 | * and decompress it from disk. |
65 | */ | 65 | */ |
66 | struct squashfs_cache_entry *squashfs_cache_get(struct super_block *sb, | 66 | struct squashfs_cache_entry *squashfs_cache_get(struct super_block *sb, |
67 | struct squashfs_cache *cache, u64 block, int length) | 67 | struct squashfs_cache *cache, u64 block, int length) |
68 | { | 68 | { |
69 | int i, n; | 69 | int i, n; |
70 | struct squashfs_cache_entry *entry; | 70 | struct squashfs_cache_entry *entry; |
71 | 71 | ||
72 | spin_lock(&cache->lock); | 72 | spin_lock(&cache->lock); |
73 | 73 | ||
74 | while (1) { | 74 | while (1) { |
75 | for (i = 0; i < cache->entries; i++) | 75 | for (i = 0; i < cache->entries; i++) |
76 | if (cache->entry[i].block == block) | 76 | if (cache->entry[i].block == block) |
77 | break; | 77 | break; |
78 | 78 | ||
79 | if (i == cache->entries) { | 79 | if (i == cache->entries) { |
80 | /* | 80 | /* |
81 | * Block not in cache, if all cache entries are used | 81 | * Block not in cache, if all cache entries are used |
82 | * go to sleep waiting for one to become available. | 82 | * go to sleep waiting for one to become available. |
83 | */ | 83 | */ |
84 | if (cache->unused == 0) { | 84 | if (cache->unused == 0) { |
85 | cache->num_waiters++; | 85 | cache->num_waiters++; |
86 | spin_unlock(&cache->lock); | 86 | spin_unlock(&cache->lock); |
87 | wait_event(cache->wait_queue, cache->unused); | 87 | wait_event(cache->wait_queue, cache->unused); |
88 | spin_lock(&cache->lock); | 88 | spin_lock(&cache->lock); |
89 | cache->num_waiters--; | 89 | cache->num_waiters--; |
90 | continue; | 90 | continue; |
91 | } | 91 | } |
92 | 92 | ||
93 | /* | 93 | /* |
94 | * At least one unused cache entry. A simple | 94 | * At least one unused cache entry. A simple |
95 | * round-robin strategy is used to choose the entry to | 95 | * round-robin strategy is used to choose the entry to |
96 | * be evicted from the cache. | 96 | * be evicted from the cache. |
97 | */ | 97 | */ |
98 | i = cache->next_blk; | 98 | i = cache->next_blk; |
99 | for (n = 0; n < cache->entries; n++) { | 99 | for (n = 0; n < cache->entries; n++) { |
100 | if (cache->entry[i].refcount == 0) | 100 | if (cache->entry[i].refcount == 0) |
101 | break; | 101 | break; |
102 | i = (i + 1) % cache->entries; | 102 | i = (i + 1) % cache->entries; |
103 | } | 103 | } |
104 | 104 | ||
105 | cache->next_blk = (i + 1) % cache->entries; | 105 | cache->next_blk = (i + 1) % cache->entries; |
106 | entry = &cache->entry[i]; | 106 | entry = &cache->entry[i]; |
107 | 107 | ||
108 | /* | 108 | /* |
109 | * Initialise choosen cache entry, and fill it in from | 109 | * Initialise choosen cache entry, and fill it in from |
110 | * disk. | 110 | * disk. |
111 | */ | 111 | */ |
112 | cache->unused--; | 112 | cache->unused--; |
113 | entry->block = block; | 113 | entry->block = block; |
114 | entry->refcount = 1; | 114 | entry->refcount = 1; |
115 | entry->pending = 1; | 115 | entry->pending = 1; |
116 | entry->num_waiters = 0; | 116 | entry->num_waiters = 0; |
117 | entry->error = 0; | 117 | entry->error = 0; |
118 | spin_unlock(&cache->lock); | 118 | spin_unlock(&cache->lock); |
119 | 119 | ||
120 | entry->length = squashfs_read_data(sb, entry->data, | 120 | entry->length = squashfs_read_data(sb, entry->data, |
121 | block, length, &entry->next_index, | 121 | block, length, &entry->next_index, |
122 | cache->block_size, cache->pages); | 122 | cache->block_size, cache->pages); |
123 | 123 | ||
124 | spin_lock(&cache->lock); | 124 | spin_lock(&cache->lock); |
125 | 125 | ||
126 | if (entry->length < 0) | 126 | if (entry->length < 0) |
127 | entry->error = entry->length; | 127 | entry->error = entry->length; |
128 | 128 | ||
129 | entry->pending = 0; | 129 | entry->pending = 0; |
130 | 130 | ||
131 | /* | 131 | /* |
132 | * While filling this entry one or more other processes | 132 | * While filling this entry one or more other processes |
133 | * have looked it up in the cache, and have slept | 133 | * have looked it up in the cache, and have slept |
134 | * waiting for it to become available. | 134 | * waiting for it to become available. |
135 | */ | 135 | */ |
136 | if (entry->num_waiters) { | 136 | if (entry->num_waiters) { |
137 | spin_unlock(&cache->lock); | 137 | spin_unlock(&cache->lock); |
138 | wake_up_all(&entry->wait_queue); | 138 | wake_up_all(&entry->wait_queue); |
139 | } else | 139 | } else |
140 | spin_unlock(&cache->lock); | 140 | spin_unlock(&cache->lock); |
141 | 141 | ||
142 | goto out; | 142 | goto out; |
143 | } | 143 | } |
144 | 144 | ||
145 | /* | 145 | /* |
146 | * Block already in cache. Increment refcount so it doesn't | 146 | * Block already in cache. Increment refcount so it doesn't |
147 | * get reused until we're finished with it, if it was | 147 | * get reused until we're finished with it, if it was |
148 | * previously unused there's one less cache entry available | 148 | * previously unused there's one less cache entry available |
149 | * for reuse. | 149 | * for reuse. |
150 | */ | 150 | */ |
151 | entry = &cache->entry[i]; | 151 | entry = &cache->entry[i]; |
152 | if (entry->refcount == 0) | 152 | if (entry->refcount == 0) |
153 | cache->unused--; | 153 | cache->unused--; |
154 | entry->refcount++; | 154 | entry->refcount++; |
155 | 155 | ||
156 | /* | 156 | /* |
157 | * If the entry is currently being filled in by another process | 157 | * If the entry is currently being filled in by another process |
158 | * go to sleep waiting for it to become available. | 158 | * go to sleep waiting for it to become available. |
159 | */ | 159 | */ |
160 | if (entry->pending) { | 160 | if (entry->pending) { |
161 | entry->num_waiters++; | 161 | entry->num_waiters++; |
162 | spin_unlock(&cache->lock); | 162 | spin_unlock(&cache->lock); |
163 | wait_event(entry->wait_queue, !entry->pending); | 163 | wait_event(entry->wait_queue, !entry->pending); |
164 | } else | 164 | } else |
165 | spin_unlock(&cache->lock); | 165 | spin_unlock(&cache->lock); |
166 | 166 | ||
167 | goto out; | 167 | goto out; |
168 | } | 168 | } |
169 | 169 | ||
170 | out: | 170 | out: |
171 | TRACE("Got %s %d, start block %lld, refcount %d, error %d\n", | 171 | TRACE("Got %s %d, start block %lld, refcount %d, error %d\n", |
172 | cache->name, i, entry->block, entry->refcount, entry->error); | 172 | cache->name, i, entry->block, entry->refcount, entry->error); |
173 | 173 | ||
174 | if (entry->error) | 174 | if (entry->error) |
175 | ERROR("Unable to read %s cache entry [%llx]\n", cache->name, | 175 | ERROR("Unable to read %s cache entry [%llx]\n", cache->name, |
176 | block); | 176 | block); |
177 | return entry; | 177 | return entry; |
178 | } | 178 | } |
179 | 179 | ||
180 | 180 | ||
181 | /* | 181 | /* |
182 | * Release cache entry, once usage count is zero it can be reused. | 182 | * Release cache entry, once usage count is zero it can be reused. |
183 | */ | 183 | */ |
184 | void squashfs_cache_put(struct squashfs_cache_entry *entry) | 184 | void squashfs_cache_put(struct squashfs_cache_entry *entry) |
185 | { | 185 | { |
186 | struct squashfs_cache *cache = entry->cache; | 186 | struct squashfs_cache *cache = entry->cache; |
187 | 187 | ||
188 | spin_lock(&cache->lock); | 188 | spin_lock(&cache->lock); |
189 | entry->refcount--; | 189 | entry->refcount--; |
190 | if (entry->refcount == 0) { | 190 | if (entry->refcount == 0) { |
191 | cache->unused++; | 191 | cache->unused++; |
192 | /* | 192 | /* |
193 | * If there's any processes waiting for a block to become | 193 | * If there's any processes waiting for a block to become |
194 | * available, wake one up. | 194 | * available, wake one up. |
195 | */ | 195 | */ |
196 | if (cache->num_waiters) { | 196 | if (cache->num_waiters) { |
197 | spin_unlock(&cache->lock); | 197 | spin_unlock(&cache->lock); |
198 | wake_up(&cache->wait_queue); | 198 | wake_up(&cache->wait_queue); |
199 | return; | 199 | return; |
200 | } | 200 | } |
201 | } | 201 | } |
202 | spin_unlock(&cache->lock); | 202 | spin_unlock(&cache->lock); |
203 | } | 203 | } |
204 | 204 | ||
205 | /* | 205 | /* |
206 | * Delete cache reclaiming all kmalloced buffers. | 206 | * Delete cache reclaiming all kmalloced buffers. |
207 | */ | 207 | */ |
208 | void squashfs_cache_delete(struct squashfs_cache *cache) | 208 | void squashfs_cache_delete(struct squashfs_cache *cache) |
209 | { | 209 | { |
210 | int i, j; | 210 | int i, j; |
211 | 211 | ||
212 | if (cache == NULL) | 212 | if (cache == NULL) |
213 | return; | 213 | return; |
214 | 214 | ||
215 | for (i = 0; i < cache->entries; i++) { | 215 | for (i = 0; i < cache->entries; i++) { |
216 | if (cache->entry[i].data) { | 216 | if (cache->entry[i].data) { |
217 | for (j = 0; j < cache->pages; j++) | 217 | for (j = 0; j < cache->pages; j++) |
218 | kfree(cache->entry[i].data[j]); | 218 | kfree(cache->entry[i].data[j]); |
219 | kfree(cache->entry[i].data); | 219 | kfree(cache->entry[i].data); |
220 | } | 220 | } |
221 | } | 221 | } |
222 | 222 | ||
223 | kfree(cache->entry); | 223 | kfree(cache->entry); |
224 | kfree(cache); | 224 | kfree(cache); |
225 | } | 225 | } |
226 | 226 | ||
227 | 227 | ||
228 | /* | 228 | /* |
229 | * Initialise cache allocating the specified number of entries, each of | 229 | * Initialise cache allocating the specified number of entries, each of |
230 | * size block_size. To avoid vmalloc fragmentation issues each entry | 230 | * size block_size. To avoid vmalloc fragmentation issues each entry |
231 | * is allocated as a sequence of kmalloced PAGE_CACHE_SIZE buffers. | 231 | * is allocated as a sequence of kmalloced PAGE_CACHE_SIZE buffers. |
232 | */ | 232 | */ |
233 | struct squashfs_cache *squashfs_cache_init(char *name, int entries, | 233 | struct squashfs_cache *squashfs_cache_init(char *name, int entries, |
234 | int block_size) | 234 | int block_size) |
235 | { | 235 | { |
236 | int i, j; | 236 | int i, j; |
237 | struct squashfs_cache *cache = kzalloc(sizeof(*cache), GFP_KERNEL); | 237 | struct squashfs_cache *cache = kzalloc(sizeof(*cache), GFP_KERNEL); |
238 | 238 | ||
239 | if (cache == NULL) { | 239 | if (cache == NULL) { |
240 | ERROR("Failed to allocate %s cache\n", name); | 240 | ERROR("Failed to allocate %s cache\n", name); |
241 | return NULL; | 241 | return NULL; |
242 | } | 242 | } |
243 | 243 | ||
244 | cache->entry = kcalloc(entries, sizeof(*(cache->entry)), GFP_KERNEL); | 244 | cache->entry = kcalloc(entries, sizeof(*(cache->entry)), GFP_KERNEL); |
245 | if (cache->entry == NULL) { | 245 | if (cache->entry == NULL) { |
246 | ERROR("Failed to allocate %s cache\n", name); | 246 | ERROR("Failed to allocate %s cache\n", name); |
247 | goto cleanup; | 247 | goto cleanup; |
248 | } | 248 | } |
249 | 249 | ||
250 | cache->next_blk = 0; | 250 | cache->next_blk = 0; |
251 | cache->unused = entries; | 251 | cache->unused = entries; |
252 | cache->entries = entries; | 252 | cache->entries = entries; |
253 | cache->block_size = block_size; | 253 | cache->block_size = block_size; |
254 | cache->pages = block_size >> PAGE_CACHE_SHIFT; | 254 | cache->pages = block_size >> PAGE_CACHE_SHIFT; |
255 | cache->pages = cache->pages ? cache->pages : 1; | ||
255 | cache->name = name; | 256 | cache->name = name; |
256 | cache->num_waiters = 0; | 257 | cache->num_waiters = 0; |
257 | spin_lock_init(&cache->lock); | 258 | spin_lock_init(&cache->lock); |
258 | init_waitqueue_head(&cache->wait_queue); | 259 | init_waitqueue_head(&cache->wait_queue); |
259 | 260 | ||
260 | for (i = 0; i < entries; i++) { | 261 | for (i = 0; i < entries; i++) { |
261 | struct squashfs_cache_entry *entry = &cache->entry[i]; | 262 | struct squashfs_cache_entry *entry = &cache->entry[i]; |
262 | 263 | ||
263 | init_waitqueue_head(&cache->entry[i].wait_queue); | 264 | init_waitqueue_head(&cache->entry[i].wait_queue); |
264 | entry->cache = cache; | 265 | entry->cache = cache; |
265 | entry->block = SQUASHFS_INVALID_BLK; | 266 | entry->block = SQUASHFS_INVALID_BLK; |
266 | entry->data = kcalloc(cache->pages, sizeof(void *), GFP_KERNEL); | 267 | entry->data = kcalloc(cache->pages, sizeof(void *), GFP_KERNEL); |
267 | if (entry->data == NULL) { | 268 | if (entry->data == NULL) { |
268 | ERROR("Failed to allocate %s cache entry\n", name); | 269 | ERROR("Failed to allocate %s cache entry\n", name); |
269 | goto cleanup; | 270 | goto cleanup; |
270 | } | 271 | } |
271 | 272 | ||
272 | for (j = 0; j < cache->pages; j++) { | 273 | for (j = 0; j < cache->pages; j++) { |
273 | entry->data[j] = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL); | 274 | entry->data[j] = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL); |
274 | if (entry->data[j] == NULL) { | 275 | if (entry->data[j] == NULL) { |
275 | ERROR("Failed to allocate %s buffer\n", name); | 276 | ERROR("Failed to allocate %s buffer\n", name); |
276 | goto cleanup; | 277 | goto cleanup; |
277 | } | 278 | } |
278 | } | 279 | } |
279 | } | 280 | } |
280 | 281 | ||
281 | return cache; | 282 | return cache; |
282 | 283 | ||
283 | cleanup: | 284 | cleanup: |
284 | squashfs_cache_delete(cache); | 285 | squashfs_cache_delete(cache); |
285 | return NULL; | 286 | return NULL; |
286 | } | 287 | } |
287 | 288 | ||
288 | 289 | ||
289 | /* | 290 | /* |
290 | * Copy upto length bytes from cache entry to buffer starting at offset bytes | 291 | * Copy upto length bytes from cache entry to buffer starting at offset bytes |
291 | * into the cache entry. If there's not length bytes then copy the number of | 292 | * into the cache entry. If there's not length bytes then copy the number of |
292 | * bytes available. In all cases return the number of bytes copied. | 293 | * bytes available. In all cases return the number of bytes copied. |
293 | */ | 294 | */ |
294 | int squashfs_copy_data(void *buffer, struct squashfs_cache_entry *entry, | 295 | int squashfs_copy_data(void *buffer, struct squashfs_cache_entry *entry, |
295 | int offset, int length) | 296 | int offset, int length) |
296 | { | 297 | { |
297 | int remaining = length; | 298 | int remaining = length; |
298 | 299 | ||
299 | if (length == 0) | 300 | if (length == 0) |
300 | return 0; | 301 | return 0; |
301 | else if (buffer == NULL) | 302 | else if (buffer == NULL) |
302 | return min(length, entry->length - offset); | 303 | return min(length, entry->length - offset); |
303 | 304 | ||
304 | while (offset < entry->length) { | 305 | while (offset < entry->length) { |
305 | void *buff = entry->data[offset / PAGE_CACHE_SIZE] | 306 | void *buff = entry->data[offset / PAGE_CACHE_SIZE] |
306 | + (offset % PAGE_CACHE_SIZE); | 307 | + (offset % PAGE_CACHE_SIZE); |
307 | int bytes = min_t(int, entry->length - offset, | 308 | int bytes = min_t(int, entry->length - offset, |
308 | PAGE_CACHE_SIZE - (offset % PAGE_CACHE_SIZE)); | 309 | PAGE_CACHE_SIZE - (offset % PAGE_CACHE_SIZE)); |
309 | 310 | ||
310 | if (bytes >= remaining) { | 311 | if (bytes >= remaining) { |
311 | memcpy(buffer, buff, remaining); | 312 | memcpy(buffer, buff, remaining); |
312 | remaining = 0; | 313 | remaining = 0; |
313 | break; | 314 | break; |
314 | } | 315 | } |
315 | 316 | ||
316 | memcpy(buffer, buff, bytes); | 317 | memcpy(buffer, buff, bytes); |
317 | buffer += bytes; | 318 | buffer += bytes; |
318 | remaining -= bytes; | 319 | remaining -= bytes; |
319 | offset += bytes; | 320 | offset += bytes; |
320 | } | 321 | } |
321 | 322 | ||
322 | return length - remaining; | 323 | return length - remaining; |
323 | } | 324 | } |
324 | 325 | ||
325 | 326 | ||
326 | /* | 327 | /* |
327 | * Read length bytes from metadata position <block, offset> (block is the | 328 | * Read length bytes from metadata position <block, offset> (block is the |
328 | * start of the compressed block on disk, and offset is the offset into | 329 | * start of the compressed block on disk, and offset is the offset into |
329 | * the block once decompressed). Data is packed into consecutive blocks, | 330 | * the block once decompressed). Data is packed into consecutive blocks, |
330 | * and length bytes may require reading more than one block. | 331 | * and length bytes may require reading more than one block. |
331 | */ | 332 | */ |
332 | int squashfs_read_metadata(struct super_block *sb, void *buffer, | 333 | int squashfs_read_metadata(struct super_block *sb, void *buffer, |
333 | u64 *block, int *offset, int length) | 334 | u64 *block, int *offset, int length) |
334 | { | 335 | { |
335 | struct squashfs_sb_info *msblk = sb->s_fs_info; | 336 | struct squashfs_sb_info *msblk = sb->s_fs_info; |
336 | int bytes, copied = length; | 337 | int bytes, copied = length; |
337 | struct squashfs_cache_entry *entry; | 338 | struct squashfs_cache_entry *entry; |
338 | 339 | ||
339 | TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset); | 340 | TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset); |
340 | 341 | ||
341 | while (length) { | 342 | while (length) { |
342 | entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0); | 343 | entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0); |
343 | if (entry->error) | 344 | if (entry->error) |
344 | return entry->error; | 345 | return entry->error; |
345 | else if (*offset >= entry->length) | 346 | else if (*offset >= entry->length) |
346 | return -EIO; | 347 | return -EIO; |
347 | 348 | ||
348 | bytes = squashfs_copy_data(buffer, entry, *offset, length); | 349 | bytes = squashfs_copy_data(buffer, entry, *offset, length); |
349 | if (buffer) | 350 | if (buffer) |
350 | buffer += bytes; | 351 | buffer += bytes; |
351 | length -= bytes; | 352 | length -= bytes; |
352 | *offset += bytes; | 353 | *offset += bytes; |
353 | 354 | ||
354 | if (*offset == entry->length) { | 355 | if (*offset == entry->length) { |
355 | *block = entry->next_index; | 356 | *block = entry->next_index; |
356 | *offset = 0; | 357 | *offset = 0; |
357 | } | 358 | } |
358 | 359 | ||
359 | squashfs_cache_put(entry); | 360 | squashfs_cache_put(entry); |
360 | } | 361 | } |
361 | 362 | ||
362 | return copied; | 363 | return copied; |
363 | } | 364 | } |
364 | 365 | ||
365 | 366 | ||
366 | /* | 367 | /* |
367 | * Look-up in the fragmment cache the fragment located at <start_block> in the | 368 | * Look-up in the fragmment cache the fragment located at <start_block> in the |
368 | * filesystem. If necessary read and decompress it from disk. | 369 | * filesystem. If necessary read and decompress it from disk. |
369 | */ | 370 | */ |
370 | struct squashfs_cache_entry *squashfs_get_fragment(struct super_block *sb, | 371 | struct squashfs_cache_entry *squashfs_get_fragment(struct super_block *sb, |
371 | u64 start_block, int length) | 372 | u64 start_block, int length) |
372 | { | 373 | { |
373 | struct squashfs_sb_info *msblk = sb->s_fs_info; | 374 | struct squashfs_sb_info *msblk = sb->s_fs_info; |
374 | 375 | ||
375 | return squashfs_cache_get(sb, msblk->fragment_cache, start_block, | 376 | return squashfs_cache_get(sb, msblk->fragment_cache, start_block, |
376 | length); | 377 | length); |
377 | } | 378 | } |
378 | 379 | ||
379 | 380 | ||
380 | /* | 381 | /* |
381 | * Read and decompress the datablock located at <start_block> in the | 382 | * Read and decompress the datablock located at <start_block> in the |
382 | * filesystem. The cache is used here to avoid duplicating locking and | 383 | * filesystem. The cache is used here to avoid duplicating locking and |
383 | * read/decompress code. | 384 | * read/decompress code. |
384 | */ | 385 | */ |
385 | struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *sb, | 386 | struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *sb, |
386 | u64 start_block, int length) | 387 | u64 start_block, int length) |
387 | { | 388 | { |
388 | struct squashfs_sb_info *msblk = sb->s_fs_info; | 389 | struct squashfs_sb_info *msblk = sb->s_fs_info; |
389 | 390 | ||
390 | return squashfs_cache_get(sb, msblk->read_page, start_block, length); | 391 | return squashfs_cache_get(sb, msblk->read_page, start_block, length); |
391 | } | 392 | } |
392 | 393 | ||
393 | 394 | ||
394 | /* | 395 | /* |
395 | * Read a filesystem table (uncompressed sequence of bytes) from disk | 396 | * Read a filesystem table (uncompressed sequence of bytes) from disk |
396 | */ | 397 | */ |
397 | int squashfs_read_table(struct super_block *sb, void *buffer, u64 block, | 398 | int squashfs_read_table(struct super_block *sb, void *buffer, u64 block, |
398 | int length) | 399 | int length) |
399 | { | 400 | { |
400 | int pages = (length + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | 401 | int pages = (length + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; |
401 | int i, res; | 402 | int i, res; |
402 | void **data = kcalloc(pages, sizeof(void *), GFP_KERNEL); | 403 | void **data = kcalloc(pages, sizeof(void *), GFP_KERNEL); |
403 | if (data == NULL) | 404 | if (data == NULL) |
404 | return -ENOMEM; | 405 | return -ENOMEM; |
405 | 406 | ||
406 | for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE) | 407 | for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE) |
407 | data[i] = buffer; | 408 | data[i] = buffer; |
408 | res = squashfs_read_data(sb, data, block, length | | 409 | res = squashfs_read_data(sb, data, block, length | |
409 | SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length, pages); | 410 | SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length, pages); |
410 | kfree(data); | 411 | kfree(data); |
411 | return res; | 412 | return res; |
412 | } | 413 | } |
413 | 414 |