Commit ba1f9db908a9ac4038f6b694de3e55959886258d

Authored by Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/hch/hfsplus

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/hch/hfsplus:
  hfsplus: ensure bio requests are not smaller than the hardware sectors
  hfsplus: Add additional range check to handle on-disk corruptions
  hfsplus: Add error propagation for hfsplus_ext_write_extent_locked
  hfsplus: add error checking for hfs_find_init()
  hfsplus: lift the 2TB size limit
  hfsplus: fix overflow in hfsplus_read_wrapper
  hfsplus: fix overflow in hfsplus_get_block
  hfsplus: assignments inside `if' condition clean-up

Showing 10 changed files Side-by-side Diff

... ... @@ -43,6 +43,10 @@
43 43 node->tree->node_size - (rec + 1) * 2);
44 44 if (!recoff)
45 45 return 0;
  46 + if (recoff > node->tree->node_size - 2) {
  47 + printk(KERN_ERR "hfs: recoff %d too large\n", recoff);
  48 + return 0;
  49 + }
46 50  
47 51 retval = hfs_bnode_read_u16(node, recoff) + 2;
48 52 if (retval > node->tree->max_key_len + 2) {
fs/hfsplus/catalog.c
... ... @@ -212,7 +212,9 @@
212 212  
213 213 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n",
214 214 str->name, cnid, inode->i_nlink);
215   - hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
  215 + err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
  216 + if (err)
  217 + return err;
216 218  
217 219 hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL);
218 220 entry_size = hfsplus_fill_cat_thread(sb, &entry,
... ... @@ -269,7 +271,9 @@
269 271  
270 272 dprint(DBG_CAT_MOD, "delete_cat: %s,%u\n",
271 273 str ? str->name : NULL, cnid);
272   - hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
  274 + err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
  275 + if (err)
  276 + return err;
273 277  
274 278 if (!str) {
275 279 int len;
276 280  
... ... @@ -347,12 +351,14 @@
347 351 struct hfs_find_data src_fd, dst_fd;
348 352 hfsplus_cat_entry entry;
349 353 int entry_size, type;
350   - int err = 0;
  354 + int err;
351 355  
352 356 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
353 357 cnid, src_dir->i_ino, src_name->name,
354 358 dst_dir->i_ino, dst_name->name);
355   - hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &src_fd);
  359 + err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &src_fd);
  360 + if (err)
  361 + return err;
356 362 dst_fd = src_fd;
357 363  
358 364 /* find the old dir entry and read the data */
... ... @@ -38,7 +38,9 @@
38 38 sb = dir->i_sb;
39 39  
40 40 dentry->d_fsdata = NULL;
41   - hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
  41 + err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
  42 + if (err)
  43 + return ERR_PTR(err);
42 44 hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, &dentry->d_name);
43 45 again:
44 46 err = hfs_brec_read(&fd, &entry, sizeof(entry));
... ... @@ -132,7 +134,9 @@
132 134 if (filp->f_pos >= inode->i_size)
133 135 return 0;
134 136  
135   - hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
  137 + err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
  138 + if (err)
  139 + return err;
136 140 hfsplus_cat_build_key(sb, fd.search_key, inode->i_ino, NULL);
137 141 err = hfs_brec_find(&fd);
138 142 if (err)
fs/hfsplus/extents.c
... ... @@ -119,22 +119,31 @@
119 119 set_bit(HFSPLUS_I_EXT_DIRTY, &hip->flags);
120 120 }
121 121  
122   -static void hfsplus_ext_write_extent_locked(struct inode *inode)
  122 +static int hfsplus_ext_write_extent_locked(struct inode *inode)
123 123 {
  124 + int res;
  125 +
124 126 if (HFSPLUS_I(inode)->extent_state & HFSPLUS_EXT_DIRTY) {
125 127 struct hfs_find_data fd;
126 128  
127   - hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd);
  129 + res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd);
  130 + if (res)
  131 + return res;
128 132 __hfsplus_ext_write_extent(inode, &fd);
129 133 hfs_find_exit(&fd);
130 134 }
  135 + return 0;
131 136 }
132 137  
133   -void hfsplus_ext_write_extent(struct inode *inode)
  138 +int hfsplus_ext_write_extent(struct inode *inode)
134 139 {
  140 + int res;
  141 +
135 142 mutex_lock(&HFSPLUS_I(inode)->extents_lock);
136   - hfsplus_ext_write_extent_locked(inode);
  143 + res = hfsplus_ext_write_extent_locked(inode);
137 144 mutex_unlock(&HFSPLUS_I(inode)->extents_lock);
  145 +
  146 + return res;
138 147 }
139 148  
140 149 static inline int __hfsplus_ext_read_extent(struct hfs_find_data *fd,
... ... @@ -194,9 +203,11 @@
194 203 block < hip->cached_start + hip->cached_blocks)
195 204 return 0;
196 205  
197   - hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd);
198   - res = __hfsplus_ext_cache_extent(&fd, inode, block);
199   - hfs_find_exit(&fd);
  206 + res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd);
  207 + if (!res) {
  208 + res = __hfsplus_ext_cache_extent(&fd, inode, block);
  209 + hfs_find_exit(&fd);
  210 + }
200 211 return res;
201 212 }
202 213  
... ... @@ -209,6 +220,7 @@
209 220 struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
210 221 int res = -EIO;
211 222 u32 ablock, dblock, mask;
  223 + sector_t sector;
212 224 int was_dirty = 0;
213 225 int shift;
214 226  
215 227  
... ... @@ -255,10 +267,12 @@
255 267 done:
256 268 dprint(DBG_EXTENT, "get_block(%lu): %llu - %u\n",
257 269 inode->i_ino, (long long)iblock, dblock);
  270 +
258 271 mask = (1 << sbi->fs_shift) - 1;
259   - map_bh(bh_result, sb,
260   - (dblock << sbi->fs_shift) + sbi->blockoffset +
261   - (iblock & mask));
  272 + sector = ((sector_t)dblock << sbi->fs_shift) +
  273 + sbi->blockoffset + (iblock & mask);
  274 + map_bh(bh_result, sb, sector);
  275 +
262 276 if (create) {
263 277 set_buffer_new(bh_result);
264 278 hip->phys_size += sb->s_blocksize;
... ... @@ -371,7 +385,9 @@
371 385 if (total_blocks == blocks)
372 386 return 0;
373 387  
374   - hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd);
  388 + res = hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd);
  389 + if (res)
  390 + return res;
375 391 do {
376 392 res = __hfsplus_ext_read_extent(&fd, ext_entry, cnid,
377 393 total_blocks, type);
... ... @@ -469,7 +485,9 @@
469 485  
470 486 insert_extent:
471 487 dprint(DBG_EXTENT, "insert new extent\n");
472   - hfsplus_ext_write_extent_locked(inode);
  488 + res = hfsplus_ext_write_extent_locked(inode);
  489 + if (res)
  490 + goto out;
473 491  
474 492 memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec));
475 493 hip->cached_extents[0].start_block = cpu_to_be32(start);
... ... @@ -500,7 +518,6 @@
500 518 struct page *page;
501 519 void *fsdata;
502 520 u32 size = inode->i_size;
503   - int res;
504 521  
505 522 res = pagecache_write_begin(NULL, mapping, size, 0,
506 523 AOP_FLAG_UNINTERRUPTIBLE,
... ... @@ -523,7 +540,12 @@
523 540 goto out;
524 541  
525 542 mutex_lock(&hip->extents_lock);
526   - hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd);
  543 + res = hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd);
  544 + if (res) {
  545 + mutex_unlock(&hip->extents_lock);
  546 + /* XXX: We lack error handling of hfsplus_file_truncate() */
  547 + return;
  548 + }
527 549 while (1) {
528 550 if (alloc_cnt == hip->first_blocks) {
529 551 hfsplus_free_extents(sb, hip->first_extents,
fs/hfsplus/hfsplus_fs.h
... ... @@ -13,6 +13,7 @@
13 13 #include <linux/fs.h>
14 14 #include <linux/mutex.h>
15 15 #include <linux/buffer_head.h>
  16 +#include <linux/blkdev.h>
16 17 #include "hfsplus_raw.h"
17 18  
18 19 #define DBG_BNODE_REFS 0x00000001
19 20  
... ... @@ -110,7 +111,9 @@
110 111 struct hfs_btree;
111 112  
112 113 struct hfsplus_sb_info {
  114 + void *s_vhdr_buf;
113 115 struct hfsplus_vh *s_vhdr;
  116 + void *s_backup_vhdr_buf;
114 117 struct hfsplus_vh *s_backup_vhdr;
115 118 struct hfs_btree *ext_tree;
116 119 struct hfs_btree *cat_tree;
... ... @@ -258,6 +261,15 @@
258 261 struct hfsplus_cat_key key;
259 262 };
260 263  
  264 +/*
  265 + * Find minimum acceptible I/O size for an hfsplus sb.
  266 + */
  267 +static inline unsigned short hfsplus_min_io_size(struct super_block *sb)
  268 +{
  269 + return max_t(unsigned short, bdev_logical_block_size(sb->s_bdev),
  270 + HFSPLUS_SECTOR_SIZE);
  271 +}
  272 +
261 273 #define hfs_btree_open hfsplus_btree_open
262 274 #define hfs_btree_close hfsplus_btree_close
263 275 #define hfs_btree_write hfsplus_btree_write
... ... @@ -374,7 +386,7 @@
374 386  
375 387 /* extents.c */
376 388 int hfsplus_ext_cmp_key(const hfsplus_btree_key *, const hfsplus_btree_key *);
377   -void hfsplus_ext_write_extent(struct inode *);
  389 +int hfsplus_ext_write_extent(struct inode *);
378 390 int hfsplus_get_block(struct inode *, sector_t, struct buffer_head *, int);
379 391 int hfsplus_free_fork(struct super_block *, u32,
380 392 struct hfsplus_fork_raw *, int);
... ... @@ -436,8 +448,8 @@
436 448 /* wrapper.c */
437 449 int hfsplus_read_wrapper(struct super_block *);
438 450 int hfs_part_find(struct super_block *, sector_t *, sector_t *);
439   -int hfsplus_submit_bio(struct block_device *bdev, sector_t sector,
440   - void *data, int rw);
  451 +int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
  452 + void *buf, void **data, int rw);
441 453  
442 454 /* time macros */
443 455 #define __hfsp_mt2ut(t) (be32_to_cpu(t) - 2082844800U)
... ... @@ -195,11 +195,13 @@
195 195 hip->flags = 0;
196 196 set_bit(HFSPLUS_I_RSRC, &hip->flags);
197 197  
198   - hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
199   - err = hfsplus_find_cat(sb, dir->i_ino, &fd);
200   - if (!err)
201   - err = hfsplus_cat_read_inode(inode, &fd);
202   - hfs_find_exit(&fd);
  198 + err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
  199 + if (!err) {
  200 + err = hfsplus_find_cat(sb, dir->i_ino, &fd);
  201 + if (!err)
  202 + err = hfsplus_cat_read_inode(inode, &fd);
  203 + hfs_find_exit(&fd);
  204 + }
203 205 if (err) {
204 206 iput(inode);
205 207 return ERR_PTR(err);
fs/hfsplus/part_tbl.c
... ... @@ -88,11 +88,12 @@
88 88 return -ENOENT;
89 89 }
90 90  
91   -static int hfs_parse_new_pmap(struct super_block *sb, struct new_pmap *pm,
92   - sector_t *part_start, sector_t *part_size)
  91 +static int hfs_parse_new_pmap(struct super_block *sb, void *buf,
  92 + struct new_pmap *pm, sector_t *part_start, sector_t *part_size)
93 93 {
94 94 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
95 95 int size = be32_to_cpu(pm->pmMapBlkCnt);
  96 + int buf_size = hfsplus_min_io_size(sb);
96 97 int res;
97 98 int i = 0;
98 99  
... ... @@ -107,11 +108,14 @@
107 108 if (++i >= size)
108 109 return -ENOENT;
109 110  
110   - res = hfsplus_submit_bio(sb->s_bdev,
111   - *part_start + HFS_PMAP_BLK + i,
112   - pm, READ);
113   - if (res)
114   - return res;
  111 + pm = (struct new_pmap *)((u8 *)pm + HFSPLUS_SECTOR_SIZE);
  112 + if ((u8 *)pm - (u8 *)buf >= buf_size) {
  113 + res = hfsplus_submit_bio(sb,
  114 + *part_start + HFS_PMAP_BLK + i,
  115 + buf, (void **)&pm, READ);
  116 + if (res)
  117 + return res;
  118 + }
115 119 } while (pm->pmSig == cpu_to_be16(HFS_NEW_PMAP_MAGIC));
116 120  
117 121 return -ENOENT;
118 122  
119 123  
... ... @@ -124,15 +128,15 @@
124 128 int hfs_part_find(struct super_block *sb,
125 129 sector_t *part_start, sector_t *part_size)
126 130 {
127   - void *data;
  131 + void *buf, *data;
128 132 int res;
129 133  
130   - data = kmalloc(HFSPLUS_SECTOR_SIZE, GFP_KERNEL);
131   - if (!data)
  134 + buf = kmalloc(hfsplus_min_io_size(sb), GFP_KERNEL);
  135 + if (!buf)
132 136 return -ENOMEM;
133 137  
134   - res = hfsplus_submit_bio(sb->s_bdev, *part_start + HFS_PMAP_BLK,
135   - data, READ);
  138 + res = hfsplus_submit_bio(sb, *part_start + HFS_PMAP_BLK,
  139 + buf, &data, READ);
136 140 if (res)
137 141 goto out;
138 142  
139 143  
... ... @@ -141,14 +145,14 @@
141 145 res = hfs_parse_old_pmap(sb, data, part_start, part_size);
142 146 break;
143 147 case HFS_NEW_PMAP_MAGIC:
144   - res = hfs_parse_new_pmap(sb, data, part_start, part_size);
  148 + res = hfs_parse_new_pmap(sb, buf, data, part_start, part_size);
145 149 break;
146 150 default:
147 151 res = -ENOENT;
148 152 break;
149 153 }
150 154 out:
151   - kfree(data);
  155 + kfree(buf);
152 156 return res;
153 157 }
... ... @@ -73,11 +73,13 @@
73 73  
74 74 if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID ||
75 75 inode->i_ino == HFSPLUS_ROOT_CNID) {
76   - hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd);
77   - err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
78   - if (!err)
79   - err = hfsplus_cat_read_inode(inode, &fd);
80   - hfs_find_exit(&fd);
  76 + err = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd);
  77 + if (!err) {
  78 + err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
  79 + if (!err)
  80 + err = hfsplus_cat_read_inode(inode, &fd);
  81 + hfs_find_exit(&fd);
  82 + }
81 83 } else {
82 84 err = hfsplus_system_read_inode(inode);
83 85 }
84 86  
... ... @@ -133,9 +135,13 @@
133 135 static int hfsplus_write_inode(struct inode *inode,
134 136 struct writeback_control *wbc)
135 137 {
  138 + int err;
  139 +
136 140 dprint(DBG_INODE, "hfsplus_write_inode: %lu\n", inode->i_ino);
137 141  
138   - hfsplus_ext_write_extent(inode);
  142 + err = hfsplus_ext_write_extent(inode);
  143 + if (err)
  144 + return err;
139 145  
140 146 if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID ||
141 147 inode->i_ino == HFSPLUS_ROOT_CNID)
142 148  
143 149  
144 150  
... ... @@ -197,17 +203,17 @@
197 203 write_backup = 1;
198 204 }
199 205  
200   - error2 = hfsplus_submit_bio(sb->s_bdev,
  206 + error2 = hfsplus_submit_bio(sb,
201 207 sbi->part_start + HFSPLUS_VOLHEAD_SECTOR,
202   - sbi->s_vhdr, WRITE_SYNC);
  208 + sbi->s_vhdr_buf, NULL, WRITE_SYNC);
203 209 if (!error)
204 210 error = error2;
205 211 if (!write_backup)
206 212 goto out;
207 213  
208   - error2 = hfsplus_submit_bio(sb->s_bdev,
  214 + error2 = hfsplus_submit_bio(sb,
209 215 sbi->part_start + sbi->sect_count - 2,
210   - sbi->s_backup_vhdr, WRITE_SYNC);
  216 + sbi->s_backup_vhdr_buf, NULL, WRITE_SYNC);
211 217 if (!error)
212 218 error2 = error;
213 219 out:
... ... @@ -251,8 +257,8 @@
251 257 hfs_btree_close(sbi->ext_tree);
252 258 iput(sbi->alloc_file);
253 259 iput(sbi->hidden_dir);
254   - kfree(sbi->s_vhdr);
255   - kfree(sbi->s_backup_vhdr);
  260 + kfree(sbi->s_vhdr_buf);
  261 + kfree(sbi->s_backup_vhdr_buf);
256 262 unload_nls(sbi->nls);
257 263 kfree(sb->s_fs_info);
258 264 sb->s_fs_info = NULL;
... ... @@ -393,6 +399,13 @@
393 399 if (!sbi->rsrc_clump_blocks)
394 400 sbi->rsrc_clump_blocks = 1;
395 401  
  402 + err = generic_check_addressable(sbi->alloc_blksz_shift,
  403 + sbi->total_blocks);
  404 + if (err) {
  405 + printk(KERN_ERR "hfs: filesystem size too large.\n");
  406 + goto out_free_vhdr;
  407 + }
  408 +
396 409 /* Set up operations so we can load metadata */
397 410 sb->s_op = &hfsplus_sops;
398 411 sb->s_maxbytes = MAX_LFS_FILESIZE;
... ... @@ -417,6 +430,8 @@
417 430 sb->s_flags |= MS_RDONLY;
418 431 }
419 432  
  433 + err = -EINVAL;
  434 +
420 435 /* Load metadata objects (B*Trees) */
421 436 sbi->ext_tree = hfs_btree_open(sb, HFSPLUS_EXT_CNID);
422 437 if (!sbi->ext_tree) {
... ... @@ -447,7 +462,9 @@
447 462  
448 463 str.len = sizeof(HFSP_HIDDENDIR_NAME) - 1;
449 464 str.name = HFSP_HIDDENDIR_NAME;
450   - hfs_find_init(sbi->cat_tree, &fd);
  465 + err = hfs_find_init(sbi->cat_tree, &fd);
  466 + if (err)
  467 + goto out_put_root;
451 468 hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_ROOT_CNID, &str);
452 469 if (!hfs_brec_read(&fd, &entry, sizeof(entry))) {
453 470 hfs_find_exit(&fd);
fs/hfsplus/unicode.c
... ... @@ -142,7 +142,11 @@
142 142 /* search for single decomposed char */
143 143 if (likely(compose))
144 144 ce1 = hfsplus_compose_lookup(hfsplus_compose_table, c0);
145   - if (ce1 && (cc = ce1[0])) {
  145 + if (ce1)
  146 + cc = ce1[0];
  147 + else
  148 + cc = 0;
  149 + if (cc) {
146 150 /* start of a possibly decomposed Hangul char */
147 151 if (cc != 0xffff)
148 152 goto done;
... ... @@ -209,7 +213,8 @@
209 213 i++;
210 214 ce2 = ce1;
211 215 }
212   - if ((cc = ce2[0])) {
  216 + cc = ce2[0];
  217 + if (cc) {
213 218 ip += i;
214 219 ustrlen -= i;
215 220 goto done;
... ... @@ -301,7 +306,11 @@
301 306 while (outlen < HFSPLUS_MAX_STRLEN && len > 0) {
302 307 size = asc2unichar(sb, astr, len, &c);
303 308  
304   - if (decompose && (dstr = decompose_unichar(c, &dsize))) {
  309 + if (decompose)
  310 + dstr = decompose_unichar(c, &dsize);
  311 + else
  312 + dstr = NULL;
  313 + if (dstr) {
305 314 if (outlen + dsize > HFSPLUS_MAX_STRLEN)
306 315 break;
307 316 do {
308 317  
309 318  
... ... @@ -346,15 +355,23 @@
346 355 astr += size;
347 356 len -= size;
348 357  
349   - if (decompose && (dstr = decompose_unichar(c, &dsize))) {
  358 + if (decompose)
  359 + dstr = decompose_unichar(c, &dsize);
  360 + else
  361 + dstr = NULL;
  362 + if (dstr) {
350 363 do {
351 364 c2 = *dstr++;
352   - if (!casefold || (c2 = case_fold(c2)))
  365 + if (casefold)
  366 + c2 = case_fold(c2);
  367 + if (!casefold || c2)
353 368 hash = partial_name_hash(c2, hash);
354 369 } while (--dsize > 0);
355 370 } else {
356 371 c2 = c;
357   - if (!casefold || (c2 = case_fold(c2)))
  372 + if (casefold)
  373 + c2 = case_fold(c2);
  374 + if (!casefold || c2)
358 375 hash = partial_name_hash(c2, hash);
359 376 }
360 377 }
361 378  
... ... @@ -422,12 +439,14 @@
422 439 c1 = *dstr1;
423 440 c2 = *dstr2;
424 441 if (casefold) {
425   - if (!(c1 = case_fold(c1))) {
  442 + c1 = case_fold(c1);
  443 + if (!c1) {
426 444 dstr1++;
427 445 dsize1--;
428 446 continue;
429 447 }
430   - if (!(c2 = case_fold(c2))) {
  448 + c2 = case_fold(c2);
  449 + if (!c2) {
431 450 dstr2++;
432 451 dsize2--;
433 452 continue;
fs/hfsplus/wrapper.c
... ... @@ -31,34 +31,77 @@
31 31 complete(bio->bi_private);
32 32 }
33 33  
34   -int hfsplus_submit_bio(struct block_device *bdev, sector_t sector,
35   - void *data, int rw)
  34 +/*
  35 + * hfsplus_submit_bio - Perfrom block I/O
  36 + * @sb: super block of volume for I/O
  37 + * @sector: block to read or write, for blocks of HFSPLUS_SECTOR_SIZE bytes
  38 + * @buf: buffer for I/O
  39 + * @data: output pointer for location of requested data
  40 + * @rw: direction of I/O
  41 + *
  42 + * The unit of I/O is hfsplus_min_io_size(sb), which may be bigger than
  43 + * HFSPLUS_SECTOR_SIZE, and @buf must be sized accordingly. On reads
  44 + * @data will return a pointer to the start of the requested sector,
  45 + * which may not be the same location as @buf.
  46 + *
  47 + * If @sector is not aligned to the bdev logical block size it will
  48 + * be rounded down. For writes this means that @buf should contain data
  49 + * that starts at the rounded-down address. As long as the data was
  50 + * read using hfsplus_submit_bio() and the same buffer is used things
  51 + * will work correctly.
  52 + */
  53 +int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
  54 + void *buf, void **data, int rw)
36 55 {
37 56 DECLARE_COMPLETION_ONSTACK(wait);
38 57 struct bio *bio;
39 58 int ret = 0;
  59 + unsigned int io_size;
  60 + loff_t start;
  61 + int offset;
40 62  
  63 + /*
  64 + * Align sector to hardware sector size and find offset. We
  65 + * assume that io_size is a power of two, which _should_
  66 + * be true.
  67 + */
  68 + io_size = hfsplus_min_io_size(sb);
  69 + start = (loff_t)sector << HFSPLUS_SECTOR_SHIFT;
  70 + offset = start & (io_size - 1);
  71 + sector &= ~((io_size >> HFSPLUS_SECTOR_SHIFT) - 1);
  72 +
41 73 bio = bio_alloc(GFP_NOIO, 1);
42 74 bio->bi_sector = sector;
43   - bio->bi_bdev = bdev;
  75 + bio->bi_bdev = sb->s_bdev;
44 76 bio->bi_end_io = hfsplus_end_io_sync;
45 77 bio->bi_private = &wait;
46 78  
47   - /*
48   - * We always submit one sector at a time, so bio_add_page must not fail.
49   - */
50   - if (bio_add_page(bio, virt_to_page(data), HFSPLUS_SECTOR_SIZE,
51   - offset_in_page(data)) != HFSPLUS_SECTOR_SIZE)
52   - BUG();
  79 + if (!(rw & WRITE) && data)
  80 + *data = (u8 *)buf + offset;
53 81  
  82 + while (io_size > 0) {
  83 + unsigned int page_offset = offset_in_page(buf);
  84 + unsigned int len = min_t(unsigned int, PAGE_SIZE - page_offset,
  85 + io_size);
  86 +
  87 + ret = bio_add_page(bio, virt_to_page(buf), len, page_offset);
  88 + if (ret != len) {
  89 + ret = -EIO;
  90 + goto out;
  91 + }
  92 + io_size -= len;
  93 + buf = (u8 *)buf + len;
  94 + }
  95 +
54 96 submit_bio(rw, bio);
55 97 wait_for_completion(&wait);
56 98  
57 99 if (!bio_flagged(bio, BIO_UPTODATE))
58 100 ret = -EIO;
59 101  
  102 +out:
60 103 bio_put(bio);
61   - return ret;
  104 + return ret < 0 ? ret : 0;
62 105 }
63 106  
64 107 static int hfsplus_read_mdb(void *bufptr, struct hfsplus_wd *wd)
65 108  
66 109  
67 110  
... ... @@ -141,23 +184,19 @@
141 184  
142 185 if (hfsplus_get_last_session(sb, &part_start, &part_size))
143 186 goto out;
144   - if ((u64)part_start + part_size > 0x100000000ULL) {
145   - pr_err("hfs: volumes larger than 2TB are not supported yet\n");
146   - goto out;
147   - }
148 187  
149 188 error = -ENOMEM;
150   - sbi->s_vhdr = kmalloc(HFSPLUS_SECTOR_SIZE, GFP_KERNEL);
151   - if (!sbi->s_vhdr)
  189 + sbi->s_vhdr_buf = kmalloc(hfsplus_min_io_size(sb), GFP_KERNEL);
  190 + if (!sbi->s_vhdr_buf)
152 191 goto out;
153   - sbi->s_backup_vhdr = kmalloc(HFSPLUS_SECTOR_SIZE, GFP_KERNEL);
154   - if (!sbi->s_backup_vhdr)
  192 + sbi->s_backup_vhdr_buf = kmalloc(hfsplus_min_io_size(sb), GFP_KERNEL);
  193 + if (!sbi->s_backup_vhdr_buf)
155 194 goto out_free_vhdr;
156 195  
157 196 reread:
158   - error = hfsplus_submit_bio(sb->s_bdev,
159   - part_start + HFSPLUS_VOLHEAD_SECTOR,
160   - sbi->s_vhdr, READ);
  197 + error = hfsplus_submit_bio(sb, part_start + HFSPLUS_VOLHEAD_SECTOR,
  198 + sbi->s_vhdr_buf, (void **)&sbi->s_vhdr,
  199 + READ);
161 200 if (error)
162 201 goto out_free_backup_vhdr;
163 202  
... ... @@ -172,8 +211,9 @@
172 211 if (!hfsplus_read_mdb(sbi->s_vhdr, &wd))
173 212 goto out_free_backup_vhdr;
174 213 wd.ablk_size >>= HFSPLUS_SECTOR_SHIFT;
175   - part_start += wd.ablk_start + wd.embed_start * wd.ablk_size;
176   - part_size = wd.embed_count * wd.ablk_size;
  214 + part_start += (sector_t)wd.ablk_start +
  215 + (sector_t)wd.embed_start * wd.ablk_size;
  216 + part_size = (sector_t)wd.embed_count * wd.ablk_size;
177 217 goto reread;
178 218 default:
179 219 /*
... ... @@ -186,9 +226,9 @@
186 226 goto reread;
187 227 }
188 228  
189   - error = hfsplus_submit_bio(sb->s_bdev,
190   - part_start + part_size - 2,
191   - sbi->s_backup_vhdr, READ);
  229 + error = hfsplus_submit_bio(sb, part_start + part_size - 2,
  230 + sbi->s_backup_vhdr_buf,
  231 + (void **)&sbi->s_backup_vhdr, READ);
192 232 if (error)
193 233 goto out_free_backup_vhdr;
194 234