Commit 2e0c2c73923fed27337039ddfd69985e6c4b91fe

Authored by Ryusuke Konishi
1 parent bd8169efae

nilfs2: allow btree code to directly call dat operations

The current btree code is written so that btree functions call dat
operations via wrapper functions in bmap.c when they allocate, free,
or modify virtual block addresses.

This abstraction requires additional function calls and causes
frequent call of nilfs_bmap_get_dat() function since it is used in the
every wrapper function.

This removes the wrapper functions and makes them available from
btree.c and direct.c, which will increase the opportunity of
compiler optimization.

Signed-off-by: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>

Showing 4 changed files with 167 additions and 299 deletions Side-by-side Diff

... ... @@ -469,91 +469,6 @@
469 469 (entries_per_group / NILFS_BMAP_GROUP_DIV);
470 470 }
471 471  
472   -int nilfs_bmap_prepare_alloc_v(struct nilfs_bmap *bmap,
473   - union nilfs_bmap_ptr_req *req)
474   -{
475   - return nilfs_dat_prepare_alloc(nilfs_bmap_get_dat(bmap), &req->bpr_req);
476   -}
477   -
478   -void nilfs_bmap_commit_alloc_v(struct nilfs_bmap *bmap,
479   - union nilfs_bmap_ptr_req *req)
480   -{
481   - nilfs_dat_commit_alloc(nilfs_bmap_get_dat(bmap), &req->bpr_req);
482   -}
483   -
484   -void nilfs_bmap_abort_alloc_v(struct nilfs_bmap *bmap,
485   - union nilfs_bmap_ptr_req *req)
486   -{
487   - nilfs_dat_abort_alloc(nilfs_bmap_get_dat(bmap), &req->bpr_req);
488   -}
489   -
490   -int nilfs_bmap_start_v(struct nilfs_bmap *bmap, union nilfs_bmap_ptr_req *req,
491   - sector_t blocknr)
492   -{
493   - struct inode *dat = nilfs_bmap_get_dat(bmap);
494   - int ret;
495   -
496   - ret = nilfs_dat_prepare_start(dat, &req->bpr_req);
497   - if (likely(!ret))
498   - nilfs_dat_commit_start(dat, &req->bpr_req, blocknr);
499   - return ret;
500   -}
501   -
502   -int nilfs_bmap_prepare_end_v(struct nilfs_bmap *bmap,
503   - union nilfs_bmap_ptr_req *req)
504   -{
505   - return nilfs_dat_prepare_end(nilfs_bmap_get_dat(bmap), &req->bpr_req);
506   -}
507   -
508   -void nilfs_bmap_commit_end_v(struct nilfs_bmap *bmap,
509   - union nilfs_bmap_ptr_req *req)
510   -{
511   - nilfs_dat_commit_end(nilfs_bmap_get_dat(bmap), &req->bpr_req,
512   - bmap->b_ptr_type == NILFS_BMAP_PTR_VS);
513   -}
514   -
515   -void nilfs_bmap_abort_end_v(struct nilfs_bmap *bmap,
516   - union nilfs_bmap_ptr_req *req)
517   -{
518   - nilfs_dat_abort_end(nilfs_bmap_get_dat(bmap), &req->bpr_req);
519   -}
520   -
521   -int nilfs_bmap_move_v(const struct nilfs_bmap *bmap, __u64 vblocknr,
522   - sector_t blocknr)
523   -{
524   - return nilfs_dat_move(nilfs_bmap_get_dat(bmap), vblocknr, blocknr);
525   -}
526   -
527   -int nilfs_bmap_mark_dirty(const struct nilfs_bmap *bmap, __u64 vblocknr)
528   -{
529   - return nilfs_dat_mark_dirty(nilfs_bmap_get_dat(bmap), vblocknr);
530   -}
531   -
532   -int nilfs_bmap_prepare_update_v(struct nilfs_bmap *bmap,
533   - union nilfs_bmap_ptr_req *oldreq,
534   - union nilfs_bmap_ptr_req *newreq)
535   -{
536   - return nilfs_dat_prepare_update(nilfs_bmap_get_dat(bmap),
537   - &oldreq->bpr_req, &newreq->bpr_req);
538   -}
539   -
540   -void nilfs_bmap_commit_update_v(struct nilfs_bmap *bmap,
541   - union nilfs_bmap_ptr_req *oldreq,
542   - union nilfs_bmap_ptr_req *newreq)
543   -{
544   - nilfs_dat_commit_update(nilfs_bmap_get_dat(bmap),
545   - &oldreq->bpr_req, &newreq->bpr_req,
546   - bmap->b_ptr_type == NILFS_BMAP_PTR_VS);
547   -}
548   -
549   -void nilfs_bmap_abort_update_v(struct nilfs_bmap *bmap,
550   - union nilfs_bmap_ptr_req *oldreq,
551   - union nilfs_bmap_ptr_req *newreq)
552   -{
553   - nilfs_dat_abort_update(nilfs_bmap_get_dat(bmap),
554   - &oldreq->bpr_req, &newreq->bpr_req);
555   -}
556   -
557 472 static struct lock_class_key nilfs_bmap_dat_lock_key;
558 473 static struct lock_class_key nilfs_bmap_mdt_lock_key;
559 474  
... ... @@ -28,6 +28,7 @@
28 28 #include <linux/buffer_head.h>
29 29 #include <linux/nilfs2_fs.h>
30 30 #include "alloc.h"
  31 +#include "dat.h"
31 32  
32 33 #define NILFS_BMAP_INVALID_PTR 0
33 34  
34 35  
35 36  
36 37  
37 38  
38 39  
39 40  
40 41  
41 42  
42 43  
43 44  
44 45  
45 46  
46 47  
47 48  
48 49  
... ... @@ -164,85 +165,65 @@
164 165 * Internal use only
165 166 */
166 167 struct inode *nilfs_bmap_get_dat(const struct nilfs_bmap *);
167   -int nilfs_bmap_prepare_alloc_v(struct nilfs_bmap *,
168   - union nilfs_bmap_ptr_req *);
169   -void nilfs_bmap_commit_alloc_v(struct nilfs_bmap *,
170   - union nilfs_bmap_ptr_req *);
171   -void nilfs_bmap_abort_alloc_v(struct nilfs_bmap *,
172   - union nilfs_bmap_ptr_req *);
173 168  
174 169 static inline int nilfs_bmap_prepare_alloc_ptr(struct nilfs_bmap *bmap,
175   - union nilfs_bmap_ptr_req *req)
  170 + union nilfs_bmap_ptr_req *req,
  171 + struct inode *dat)
176 172 {
177   - if (NILFS_BMAP_USE_VBN(bmap))
178   - return nilfs_bmap_prepare_alloc_v(bmap, req);
  173 + if (dat)
  174 + return nilfs_dat_prepare_alloc(dat, &req->bpr_req);
179 175 /* ignore target ptr */
180 176 req->bpr_ptr = bmap->b_last_allocated_ptr++;
181 177 return 0;
182 178 }
183 179  
184 180 static inline void nilfs_bmap_commit_alloc_ptr(struct nilfs_bmap *bmap,
185   - union nilfs_bmap_ptr_req *req)
  181 + union nilfs_bmap_ptr_req *req,
  182 + struct inode *dat)
186 183 {
187   - if (NILFS_BMAP_USE_VBN(bmap))
188   - nilfs_bmap_commit_alloc_v(bmap, req);
  184 + if (dat)
  185 + nilfs_dat_commit_alloc(dat, &req->bpr_req);
189 186 }
190 187  
191 188 static inline void nilfs_bmap_abort_alloc_ptr(struct nilfs_bmap *bmap,
192   - union nilfs_bmap_ptr_req *req)
  189 + union nilfs_bmap_ptr_req *req,
  190 + struct inode *dat)
193 191 {
194   - if (NILFS_BMAP_USE_VBN(bmap))
195   - nilfs_bmap_abort_alloc_v(bmap, req);
  192 + if (dat)
  193 + nilfs_dat_abort_alloc(dat, &req->bpr_req);
196 194 else
197 195 bmap->b_last_allocated_ptr--;
198 196 }
199 197  
200   -int nilfs_bmap_prepare_end_v(struct nilfs_bmap *, union nilfs_bmap_ptr_req *);
201   -void nilfs_bmap_commit_end_v(struct nilfs_bmap *, union nilfs_bmap_ptr_req *);
202   -void nilfs_bmap_abort_end_v(struct nilfs_bmap *, union nilfs_bmap_ptr_req *);
203   -
204 198 static inline int nilfs_bmap_prepare_end_ptr(struct nilfs_bmap *bmap,
205   - union nilfs_bmap_ptr_req *req)
  199 + union nilfs_bmap_ptr_req *req,
  200 + struct inode *dat)
206 201 {
207   - return NILFS_BMAP_USE_VBN(bmap) ?
208   - nilfs_bmap_prepare_end_v(bmap, req) : 0;
  202 + return dat ? nilfs_dat_prepare_end(dat, &req->bpr_req) : 0;
209 203 }
210 204  
211 205 static inline void nilfs_bmap_commit_end_ptr(struct nilfs_bmap *bmap,
212   - union nilfs_bmap_ptr_req *req)
  206 + union nilfs_bmap_ptr_req *req,
  207 + struct inode *dat)
213 208 {
214   - if (NILFS_BMAP_USE_VBN(bmap))
215   - nilfs_bmap_commit_end_v(bmap, req);
  209 + if (dat)
  210 + nilfs_dat_commit_end(dat, &req->bpr_req,
  211 + bmap->b_ptr_type == NILFS_BMAP_PTR_VS);
216 212 }
217 213  
218 214 static inline void nilfs_bmap_abort_end_ptr(struct nilfs_bmap *bmap,
219   - union nilfs_bmap_ptr_req *req)
  215 + union nilfs_bmap_ptr_req *req,
  216 + struct inode *dat)
220 217 {
221   - if (NILFS_BMAP_USE_VBN(bmap))
222   - nilfs_bmap_abort_end_v(bmap, req);
  218 + if (dat)
  219 + nilfs_dat_abort_end(dat, &req->bpr_req);
223 220 }
224 221  
225   -int nilfs_bmap_start_v(struct nilfs_bmap *, union nilfs_bmap_ptr_req *,
226   - sector_t);
227   -int nilfs_bmap_move_v(const struct nilfs_bmap *, __u64, sector_t);
228   -int nilfs_bmap_mark_dirty(const struct nilfs_bmap *, __u64);
229   -
230   -
231 222 __u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *,
232 223 const struct buffer_head *);
233 224  
234 225 __u64 nilfs_bmap_find_target_seq(const struct nilfs_bmap *, __u64);
235 226 __u64 nilfs_bmap_find_target_in_group(const struct nilfs_bmap *);
236   -
237   -int nilfs_bmap_prepare_update_v(struct nilfs_bmap *,
238   - union nilfs_bmap_ptr_req *,
239   - union nilfs_bmap_ptr_req *);
240   -void nilfs_bmap_commit_update_v(struct nilfs_bmap *,
241   - union nilfs_bmap_ptr_req *,
242   - union nilfs_bmap_ptr_req *);
243   -void nilfs_bmap_abort_update_v(struct nilfs_bmap *,
244   - union nilfs_bmap_ptr_req *,
245   - union nilfs_bmap_ptr_req *);
246 227  
247 228 void nilfs_bmap_add_blocks(const struct nilfs_bmap *, int);
248 229 void nilfs_bmap_sub_blocks(const struct nilfs_bmap *, int);
... ... @@ -940,17 +940,20 @@
940 940 struct nilfs_btree_node *node, *parent, *sib;
941 941 __u64 sibptr;
942 942 int pindex, level, ret;
  943 + struct inode *dat = NULL;
943 944  
944 945 stats->bs_nblocks = 0;
945 946 level = NILFS_BTREE_LEVEL_DATA;
946 947  
947 948 /* allocate a new ptr for data block */
948   - if (NILFS_BMAP_USE_VBN(&btree->bt_bmap))
  949 + if (NILFS_BMAP_USE_VBN(&btree->bt_bmap)) {
949 950 path[level].bp_newreq.bpr_ptr =
950 951 nilfs_btree_find_target_v(btree, path, key);
  952 + dat = nilfs_bmap_get_dat(&btree->bt_bmap);
  953 + }
951 954  
952 955 ret = nilfs_bmap_prepare_alloc_ptr(&btree->bt_bmap,
953   - &path[level].bp_newreq);
  956 + &path[level].bp_newreq, dat);
954 957 if (ret < 0)
955 958 goto err_out_data;
956 959  
... ... @@ -1009,7 +1012,7 @@
1009 1012 path[level].bp_newreq.bpr_ptr =
1010 1013 path[level - 1].bp_newreq.bpr_ptr + 1;
1011 1014 ret = nilfs_bmap_prepare_alloc_ptr(&btree->bt_bmap,
1012   - &path[level].bp_newreq);
  1015 + &path[level].bp_newreq, dat);
1013 1016 if (ret < 0)
1014 1017 goto err_out_child_node;
1015 1018 ret = nilfs_btree_get_new_block(btree,
... ... @@ -1041,7 +1044,7 @@
1041 1044 /* grow */
1042 1045 path[level].bp_newreq.bpr_ptr = path[level - 1].bp_newreq.bpr_ptr + 1;
1043 1046 ret = nilfs_bmap_prepare_alloc_ptr(&btree->bt_bmap,
1044   - &path[level].bp_newreq);
  1047 + &path[level].bp_newreq, dat);
1045 1048 if (ret < 0)
1046 1049 goto err_out_child_node;
1047 1050 ret = nilfs_btree_get_new_block(btree, path[level].bp_newreq.bpr_ptr,
1048 1051  
1049 1052  
... ... @@ -1069,16 +1072,18 @@
1069 1072  
1070 1073 /* error */
1071 1074 err_out_curr_node:
1072   - nilfs_bmap_abort_alloc_ptr(&btree->bt_bmap, &path[level].bp_newreq);
  1075 + nilfs_bmap_abort_alloc_ptr(&btree->bt_bmap, &path[level].bp_newreq,
  1076 + dat);
1073 1077 err_out_child_node:
1074 1078 for (level--; level > NILFS_BTREE_LEVEL_DATA; level--) {
1075 1079 nilfs_btnode_delete(path[level].bp_sib_bh);
1076 1080 nilfs_bmap_abort_alloc_ptr(&btree->bt_bmap,
1077   - &path[level].bp_newreq);
  1081 + &path[level].bp_newreq, dat);
1078 1082  
1079 1083 }
1080 1084  
1081   - nilfs_bmap_abort_alloc_ptr(&btree->bt_bmap, &path[level].bp_newreq);
  1085 + nilfs_bmap_abort_alloc_ptr(&btree->bt_bmap, &path[level].bp_newreq,
  1086 + dat);
1082 1087 err_out_data:
1083 1088 *levelp = level;
1084 1089 stats->bs_nblocks = 0;
1085 1090  
1086 1091  
1087 1092  
... ... @@ -1089,16 +1094,19 @@
1089 1094 struct nilfs_btree_path *path,
1090 1095 int maxlevel, __u64 key, __u64 ptr)
1091 1096 {
  1097 + struct inode *dat = NULL;
1092 1098 int level;
1093 1099  
1094 1100 set_buffer_nilfs_volatile((struct buffer_head *)((unsigned long)ptr));
1095 1101 ptr = path[NILFS_BTREE_LEVEL_DATA].bp_newreq.bpr_ptr;
1096   - if (NILFS_BMAP_USE_VBN(&btree->bt_bmap))
  1102 + if (NILFS_BMAP_USE_VBN(&btree->bt_bmap)) {
1097 1103 nilfs_btree_set_target_v(btree, key, ptr);
  1104 + dat = nilfs_bmap_get_dat(&btree->bt_bmap);
  1105 + }
1098 1106  
1099 1107 for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) {
1100 1108 nilfs_bmap_commit_alloc_ptr(&btree->bt_bmap,
1101   - &path[level - 1].bp_newreq);
  1109 + &path[level - 1].bp_newreq, dat);
1102 1110 path[level].bp_op(btree, path, level, &key, &ptr);
1103 1111 }
1104 1112  
... ... @@ -1326,7 +1334,8 @@
1326 1334 static int nilfs_btree_prepare_delete(struct nilfs_btree *btree,
1327 1335 struct nilfs_btree_path *path,
1328 1336 int *levelp,
1329   - struct nilfs_bmap_stats *stats)
  1337 + struct nilfs_bmap_stats *stats,
  1338 + struct inode *dat)
1330 1339 {
1331 1340 struct buffer_head *bh;
1332 1341 struct nilfs_btree_node *node, *parent, *sib;
... ... @@ -1343,7 +1352,7 @@
1343 1352 nilfs_btree_node_get_ptr(btree, node,
1344 1353 path[level].bp_index);
1345 1354 ret = nilfs_bmap_prepare_end_ptr(&btree->bt_bmap,
1346   - &path[level].bp_oldreq);
  1355 + &path[level].bp_oldreq, dat);
1347 1356 if (ret < 0)
1348 1357 goto err_out_child_node;
1349 1358  
... ... @@ -1421,7 +1430,7 @@
1421 1430 nilfs_btree_node_get_ptr(btree, node, path[level].bp_index);
1422 1431  
1423 1432 ret = nilfs_bmap_prepare_end_ptr(&btree->bt_bmap,
1424   - &path[level].bp_oldreq);
  1433 + &path[level].bp_oldreq, dat);
1425 1434 if (ret < 0)
1426 1435 goto err_out_child_node;
1427 1436  
1428 1437  
... ... @@ -1436,12 +1445,12 @@
1436 1445  
1437 1446 /* error */
1438 1447 err_out_curr_node:
1439   - nilfs_bmap_abort_end_ptr(&btree->bt_bmap, &path[level].bp_oldreq);
  1448 + nilfs_bmap_abort_end_ptr(&btree->bt_bmap, &path[level].bp_oldreq, dat);
1440 1449 err_out_child_node:
1441 1450 for (level--; level >= NILFS_BTREE_LEVEL_NODE_MIN; level--) {
1442 1451 brelse(path[level].bp_sib_bh);
1443 1452 nilfs_bmap_abort_end_ptr(&btree->bt_bmap,
1444   - &path[level].bp_oldreq);
  1453 + &path[level].bp_oldreq, dat);
1445 1454 }
1446 1455 *levelp = level;
1447 1456 stats->bs_nblocks = 0;
1448 1457  
... ... @@ -1450,13 +1459,13 @@
1450 1459  
1451 1460 static void nilfs_btree_commit_delete(struct nilfs_btree *btree,
1452 1461 struct nilfs_btree_path *path,
1453   - int maxlevel)
  1462 + int maxlevel, struct inode *dat)
1454 1463 {
1455 1464 int level;
1456 1465  
1457 1466 for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) {
1458 1467 nilfs_bmap_commit_end_ptr(&btree->bt_bmap,
1459   - &path[level].bp_oldreq);
  1468 + &path[level].bp_oldreq, dat);
1460 1469 path[level].bp_op(btree, path, level, NULL, NULL);
1461 1470 }
1462 1471  
... ... @@ -1470,6 +1479,7 @@
1470 1479 struct nilfs_btree *btree;
1471 1480 struct nilfs_btree_path *path;
1472 1481 struct nilfs_bmap_stats stats;
  1482 + struct inode *dat;
1473 1483 int level, ret;
1474 1484  
1475 1485 btree = (struct nilfs_btree *)bmap;
1476 1486  
... ... @@ -1482,10 +1492,14 @@
1482 1492 if (ret < 0)
1483 1493 goto out;
1484 1494  
1485   - ret = nilfs_btree_prepare_delete(btree, path, &level, &stats);
  1495 +
  1496 + dat = NILFS_BMAP_USE_VBN(&btree->bt_bmap) ?
  1497 + nilfs_bmap_get_dat(&btree->bt_bmap) : NULL;
  1498 +
  1499 + ret = nilfs_btree_prepare_delete(btree, path, &level, &stats, dat);
1486 1500 if (ret < 0)
1487 1501 goto out;
1488   - nilfs_btree_commit_delete(btree, path, level);
  1502 + nilfs_btree_commit_delete(btree, path, level, dat);
1489 1503 nilfs_bmap_sub_blocks(bmap, stats.bs_nblocks);
1490 1504  
1491 1505 out:
1492 1506  
1493 1507  
1494 1508  
1495 1509  
... ... @@ -1610,18 +1624,20 @@
1610 1624 struct nilfs_bmap_stats *stats)
1611 1625 {
1612 1626 struct buffer_head *bh;
1613   - struct nilfs_btree *btree;
  1627 + struct nilfs_btree *btree = (struct nilfs_btree *)bmap;
  1628 + struct inode *dat = NULL;
1614 1629 int ret;
1615 1630  
1616   - btree = (struct nilfs_btree *)bmap;
1617 1631 stats->bs_nblocks = 0;
1618 1632  
1619 1633 /* for data */
1620 1634 /* cannot find near ptr */
1621   - if (NILFS_BMAP_USE_VBN(bmap))
  1635 + if (NILFS_BMAP_USE_VBN(bmap)) {
1622 1636 dreq->bpr_ptr = nilfs_btree_find_target_v(btree, NULL, key);
  1637 + dat = nilfs_bmap_get_dat(bmap);
  1638 + }
1623 1639  
1624   - ret = nilfs_bmap_prepare_alloc_ptr(bmap, dreq);
  1640 + ret = nilfs_bmap_prepare_alloc_ptr(bmap, dreq, dat);
1625 1641 if (ret < 0)
1626 1642 return ret;
1627 1643  
... ... @@ -1629,7 +1645,7 @@
1629 1645 stats->bs_nblocks++;
1630 1646 if (nreq != NULL) {
1631 1647 nreq->bpr_ptr = dreq->bpr_ptr + 1;
1632   - ret = nilfs_bmap_prepare_alloc_ptr(bmap, nreq);
  1648 + ret = nilfs_bmap_prepare_alloc_ptr(bmap, nreq, dat);
1633 1649 if (ret < 0)
1634 1650 goto err_out_dreq;
1635 1651  
1636 1652  
... ... @@ -1646,9 +1662,9 @@
1646 1662  
1647 1663 /* error */
1648 1664 err_out_nreq:
1649   - nilfs_bmap_abort_alloc_ptr(bmap, nreq);
  1665 + nilfs_bmap_abort_alloc_ptr(bmap, nreq, dat);
1650 1666 err_out_dreq:
1651   - nilfs_bmap_abort_alloc_ptr(bmap, dreq);
  1667 + nilfs_bmap_abort_alloc_ptr(bmap, dreq, dat);
1652 1668 stats->bs_nblocks = 0;
1653 1669 return ret;
1654 1670  
1655 1671  
... ... @@ -1663,8 +1679,9 @@
1663 1679 union nilfs_bmap_ptr_req *nreq,
1664 1680 struct buffer_head *bh)
1665 1681 {
1666   - struct nilfs_btree *btree;
  1682 + struct nilfs_btree *btree = (struct nilfs_btree *)bmap;
1667 1683 struct nilfs_btree_node *node;
  1684 + struct inode *dat;
1668 1685 __u64 tmpptr;
1669 1686  
1670 1687 /* free resources */
1671 1688  
... ... @@ -1675,11 +1692,11 @@
1675 1692 set_buffer_nilfs_volatile((struct buffer_head *)((unsigned long)ptr));
1676 1693  
1677 1694 /* convert and insert */
1678   - btree = (struct nilfs_btree *)bmap;
  1695 + dat = NILFS_BMAP_USE_VBN(bmap) ? nilfs_bmap_get_dat(bmap) : NULL;
1679 1696 nilfs_btree_init(bmap);
1680 1697 if (nreq != NULL) {
1681   - nilfs_bmap_commit_alloc_ptr(bmap, dreq);
1682   - nilfs_bmap_commit_alloc_ptr(bmap, nreq);
  1698 + nilfs_bmap_commit_alloc_ptr(bmap, dreq, dat);
  1699 + nilfs_bmap_commit_alloc_ptr(bmap, nreq, dat);
1683 1700  
1684 1701 /* create child node at level 1 */
1685 1702 lock_buffer(bh);
... ... @@ -1701,7 +1718,7 @@
1701 1718 nilfs_btree_node_init(btree, node, NILFS_BTREE_NODE_ROOT,
1702 1719 2, 1, &keys[0], &tmpptr);
1703 1720 } else {
1704   - nilfs_bmap_commit_alloc_ptr(bmap, dreq);
  1721 + nilfs_bmap_commit_alloc_ptr(bmap, dreq, dat);
1705 1722  
1706 1723 /* create root node at level 1 */
1707 1724 node = nilfs_btree_get_root(btree);
... ... @@ -1772,7 +1789,7 @@
1772 1789  
1773 1790 static int nilfs_btree_prepare_update_v(struct nilfs_btree *btree,
1774 1791 struct nilfs_btree_path *path,
1775   - int level)
  1792 + int level, struct inode *dat)
1776 1793 {
1777 1794 struct nilfs_btree_node *parent;
1778 1795 int ret;
... ... @@ -1782,9 +1799,8 @@
1782 1799 nilfs_btree_node_get_ptr(btree, parent,
1783 1800 path[level + 1].bp_index);
1784 1801 path[level].bp_newreq.bpr_ptr = path[level].bp_oldreq.bpr_ptr + 1;
1785   - ret = nilfs_bmap_prepare_update_v(&btree->bt_bmap,
1786   - &path[level].bp_oldreq,
1787   - &path[level].bp_newreq);
  1802 + ret = nilfs_dat_prepare_update(dat, &path[level].bp_oldreq.bpr_req,
  1803 + &path[level].bp_newreq.bpr_req);
1788 1804 if (ret < 0)
1789 1805 return ret;
1790 1806  
... ... @@ -1796,9 +1812,9 @@
1796 1812 &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache,
1797 1813 &path[level].bp_ctxt);
1798 1814 if (ret < 0) {
1799   - nilfs_bmap_abort_update_v(&btree->bt_bmap,
1800   - &path[level].bp_oldreq,
1801   - &path[level].bp_newreq);
  1815 + nilfs_dat_abort_update(dat,
  1816 + &path[level].bp_oldreq.bpr_req,
  1817 + &path[level].bp_newreq.bpr_req);
1802 1818 return ret;
1803 1819 }
1804 1820 }
1805 1821  
... ... @@ -1808,13 +1824,13 @@
1808 1824  
1809 1825 static void nilfs_btree_commit_update_v(struct nilfs_btree *btree,
1810 1826 struct nilfs_btree_path *path,
1811   - int level)
  1827 + int level, struct inode *dat)
1812 1828 {
1813 1829 struct nilfs_btree_node *parent;
1814 1830  
1815   - nilfs_bmap_commit_update_v(&btree->bt_bmap,
1816   - &path[level].bp_oldreq,
1817   - &path[level].bp_newreq);
  1831 + nilfs_dat_commit_update(dat, &path[level].bp_oldreq.bpr_req,
  1832 + &path[level].bp_newreq.bpr_req,
  1833 + btree->bt_bmap.b_ptr_type == NILFS_BMAP_PTR_VS);
1818 1834  
1819 1835 if (buffer_nilfs_node(path[level].bp_bh)) {
1820 1836 nilfs_btnode_commit_change_key(
1821 1837  
... ... @@ -1831,11 +1847,10 @@
1831 1847  
1832 1848 static void nilfs_btree_abort_update_v(struct nilfs_btree *btree,
1833 1849 struct nilfs_btree_path *path,
1834   - int level)
  1850 + int level, struct inode *dat)
1835 1851 {
1836   - nilfs_bmap_abort_update_v(&btree->bt_bmap,
1837   - &path[level].bp_oldreq,
1838   - &path[level].bp_newreq);
  1852 + nilfs_dat_abort_update(dat, &path[level].bp_oldreq.bpr_req,
  1853 + &path[level].bp_newreq.bpr_req);
1839 1854 if (buffer_nilfs_node(path[level].bp_bh))
1840 1855 nilfs_btnode_abort_change_key(
1841 1856 &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache,
1842 1857  
... ... @@ -1844,14 +1859,14 @@
1844 1859  
1845 1860 static int nilfs_btree_prepare_propagate_v(struct nilfs_btree *btree,
1846 1861 struct nilfs_btree_path *path,
1847   - int minlevel,
1848   - int *maxlevelp)
  1862 + int minlevel, int *maxlevelp,
  1863 + struct inode *dat)
1849 1864 {
1850 1865 int level, ret;
1851 1866  
1852 1867 level = minlevel;
1853 1868 if (!buffer_nilfs_volatile(path[level].bp_bh)) {
1854   - ret = nilfs_btree_prepare_update_v(btree, path, level);
  1869 + ret = nilfs_btree_prepare_update_v(btree, path, level, dat);
1855 1870 if (ret < 0)
1856 1871 return ret;
1857 1872 }
... ... @@ -1859,7 +1874,7 @@
1859 1874 !buffer_dirty(path[level].bp_bh)) {
1860 1875  
1861 1876 WARN_ON(buffer_nilfs_volatile(path[level].bp_bh));
1862   - ret = nilfs_btree_prepare_update_v(btree, path, level);
  1877 + ret = nilfs_btree_prepare_update_v(btree, path, level, dat);
1863 1878 if (ret < 0)
1864 1879 goto out;
1865 1880 }
1866 1881  
1867 1882  
1868 1883  
1869 1884  
1870 1885  
1871 1886  
1872 1887  
... ... @@ -1871,39 +1886,40 @@
1871 1886 /* error */
1872 1887 out:
1873 1888 while (--level > minlevel)
1874   - nilfs_btree_abort_update_v(btree, path, level);
  1889 + nilfs_btree_abort_update_v(btree, path, level, dat);
1875 1890 if (!buffer_nilfs_volatile(path[level].bp_bh))
1876   - nilfs_btree_abort_update_v(btree, path, level);
  1891 + nilfs_btree_abort_update_v(btree, path, level, dat);
1877 1892 return ret;
1878 1893 }
1879 1894  
1880 1895 static void nilfs_btree_commit_propagate_v(struct nilfs_btree *btree,
1881 1896 struct nilfs_btree_path *path,
1882   - int minlevel,
1883   - int maxlevel,
1884   - struct buffer_head *bh)
  1897 + int minlevel, int maxlevel,
  1898 + struct buffer_head *bh,
  1899 + struct inode *dat)
1885 1900 {
1886 1901 int level;
1887 1902  
1888 1903 if (!buffer_nilfs_volatile(path[minlevel].bp_bh))
1889   - nilfs_btree_commit_update_v(btree, path, minlevel);
  1904 + nilfs_btree_commit_update_v(btree, path, minlevel, dat);
1890 1905  
1891 1906 for (level = minlevel + 1; level <= maxlevel; level++)
1892   - nilfs_btree_commit_update_v(btree, path, level);
  1907 + nilfs_btree_commit_update_v(btree, path, level, dat);
1893 1908 }
1894 1909  
1895 1910 static int nilfs_btree_propagate_v(struct nilfs_btree *btree,
1896 1911 struct nilfs_btree_path *path,
1897   - int level,
1898   - struct buffer_head *bh)
  1912 + int level, struct buffer_head *bh)
1899 1913 {
1900 1914 int maxlevel, ret;
1901 1915 struct nilfs_btree_node *parent;
  1916 + struct inode *dat = nilfs_bmap_get_dat(&btree->bt_bmap);
1902 1917 __u64 ptr;
1903 1918  
1904 1919 get_bh(bh);
1905 1920 path[level].bp_bh = bh;
1906   - ret = nilfs_btree_prepare_propagate_v(btree, path, level, &maxlevel);
  1921 + ret = nilfs_btree_prepare_propagate_v(btree, path, level, &maxlevel,
  1922 + dat);
1907 1923 if (ret < 0)
1908 1924 goto out;
1909 1925  
1910 1926  
... ... @@ -1911,12 +1927,12 @@
1911 1927 parent = nilfs_btree_get_node(btree, path, level + 1);
1912 1928 ptr = nilfs_btree_node_get_ptr(btree, parent,
1913 1929 path[level + 1].bp_index);
1914   - ret = nilfs_bmap_mark_dirty(&btree->bt_bmap, ptr);
  1930 + ret = nilfs_dat_mark_dirty(dat, ptr);
1915 1931 if (ret < 0)
1916 1932 goto out;
1917 1933 }
1918 1934  
1919   - nilfs_btree_commit_propagate_v(btree, path, level, maxlevel, bh);
  1935 + nilfs_btree_commit_propagate_v(btree, path, level, maxlevel, bh, dat);
1920 1936  
1921 1937 out:
1922 1938 brelse(path[level].bp_bh);
... ... @@ -1972,7 +1988,7 @@
1972 1988 static int nilfs_btree_propagate_gc(const struct nilfs_bmap *bmap,
1973 1989 struct buffer_head *bh)
1974 1990 {
1975   - return nilfs_bmap_mark_dirty(bmap, bh->b_blocknr);
  1991 + return nilfs_dat_mark_dirty(nilfs_bmap_get_dat(bmap), bh->b_blocknr);
1976 1992 }
1977 1993  
1978 1994 static void nilfs_btree_add_dirty_buffer(struct nilfs_btree *btree,
... ... @@ -2086,6 +2102,7 @@
2086 2102 union nilfs_binfo *binfo)
2087 2103 {
2088 2104 struct nilfs_btree_node *parent;
  2105 + struct inode *dat = nilfs_bmap_get_dat(&btree->bt_bmap);
2089 2106 __u64 key;
2090 2107 __u64 ptr;
2091 2108 union nilfs_bmap_ptr_req req;
2092 2109  
... ... @@ -2095,9 +2112,10 @@
2095 2112 ptr = nilfs_btree_node_get_ptr(btree, parent,
2096 2113 path[level + 1].bp_index);
2097 2114 req.bpr_ptr = ptr;
2098   - ret = nilfs_bmap_start_v(&btree->bt_bmap, &req, blocknr);
2099   - if (unlikely(ret < 0))
  2115 + ret = nilfs_dat_prepare_start(dat, &req.bpr_req);
  2116 + if (ret < 0)
2100 2117 return ret;
  2118 + nilfs_dat_commit_start(dat, &req.bpr_req, blocknr);
2101 2119  
2102 2120 key = nilfs_btree_node_get_key(parent, path[level + 1].bp_index);
2103 2121 /* on-disk format */
2104 2122  
... ... @@ -2155,13 +2173,12 @@
2155 2173 sector_t blocknr,
2156 2174 union nilfs_binfo *binfo)
2157 2175 {
2158   - struct nilfs_btree *btree;
2159 2176 struct nilfs_btree_node *node;
2160 2177 __u64 key;
2161 2178 int ret;
2162 2179  
2163   - btree = (struct nilfs_btree *)bmap;
2164   - ret = nilfs_bmap_move_v(bmap, (*bh)->b_blocknr, blocknr);
  2180 + ret = nilfs_dat_move(nilfs_bmap_get_dat(bmap), (*bh)->b_blocknr,
  2181 + blocknr);
2165 2182 if (ret < 0)
2166 2183 return ret;
2167 2184  
... ... @@ -125,106 +125,64 @@
125 125 direct->d_bmap.b_last_allocated_ptr = ptr;
126 126 }
127 127  
128   -static int nilfs_direct_prepare_insert(struct nilfs_direct *direct,
129   - __u64 key,
130   - union nilfs_bmap_ptr_req *req,
131   - struct nilfs_bmap_stats *stats)
132   -{
133   - int ret;
134   -
135   - if (NILFS_BMAP_USE_VBN(&direct->d_bmap))
136   - req->bpr_ptr = nilfs_direct_find_target_v(direct, key);
137   - ret = nilfs_bmap_prepare_alloc_ptr(&direct->d_bmap, req);
138   - if (ret < 0)
139   - return ret;
140   -
141   - stats->bs_nblocks = 1;
142   - return 0;
143   -}
144   -
145   -static void nilfs_direct_commit_insert(struct nilfs_direct *direct,
146   - union nilfs_bmap_ptr_req *req,
147   - __u64 key, __u64 ptr)
148   -{
149   - struct buffer_head *bh;
150   -
151   - /* ptr must be a pointer to a buffer head. */
152   - bh = (struct buffer_head *)((unsigned long)ptr);
153   - set_buffer_nilfs_volatile(bh);
154   -
155   - nilfs_bmap_commit_alloc_ptr(&direct->d_bmap, req);
156   - nilfs_direct_set_ptr(direct, key, req->bpr_ptr);
157   -
158   - if (!nilfs_bmap_dirty(&direct->d_bmap))
159   - nilfs_bmap_set_dirty(&direct->d_bmap);
160   -
161   - if (NILFS_BMAP_USE_VBN(&direct->d_bmap))
162   - nilfs_direct_set_target_v(direct, key, req->bpr_ptr);
163   -}
164   -
165 128 static int nilfs_direct_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr)
166 129 {
167   - struct nilfs_direct *direct;
  130 + struct nilfs_direct *direct = (struct nilfs_direct *)bmap;
168 131 union nilfs_bmap_ptr_req req;
169   - struct nilfs_bmap_stats stats;
  132 + struct inode *dat = NULL;
  133 + struct buffer_head *bh;
170 134 int ret;
171 135  
172   - direct = (struct nilfs_direct *)bmap;
173 136 if (key > NILFS_DIRECT_KEY_MAX)
174 137 return -ENOENT;
175 138 if (nilfs_direct_get_ptr(direct, key) != NILFS_BMAP_INVALID_PTR)
176 139 return -EEXIST;
177 140  
178   - ret = nilfs_direct_prepare_insert(direct, key, &req, &stats);
179   - if (ret < 0)
180   - return ret;
181   - nilfs_direct_commit_insert(direct, &req, key, ptr);
182   - nilfs_bmap_add_blocks(bmap, stats.bs_nblocks);
  141 + if (NILFS_BMAP_USE_VBN(bmap)) {
  142 + req.bpr_ptr = nilfs_direct_find_target_v(direct, key);
  143 + dat = nilfs_bmap_get_dat(bmap);
  144 + }
  145 + ret = nilfs_bmap_prepare_alloc_ptr(bmap, &req, dat);
  146 + if (!ret) {
  147 + /* ptr must be a pointer to a buffer head. */
  148 + bh = (struct buffer_head *)((unsigned long)ptr);
  149 + set_buffer_nilfs_volatile(bh);
183 150  
184   - return 0;
185   -}
  151 + nilfs_bmap_commit_alloc_ptr(bmap, &req, dat);
  152 + nilfs_direct_set_ptr(direct, key, req.bpr_ptr);
186 153  
187   -static int nilfs_direct_prepare_delete(struct nilfs_direct *direct,
188   - union nilfs_bmap_ptr_req *req,
189   - __u64 key,
190   - struct nilfs_bmap_stats *stats)
191   -{
192   - int ret;
  154 + if (!nilfs_bmap_dirty(bmap))
  155 + nilfs_bmap_set_dirty(bmap);
193 156  
194   - req->bpr_ptr = nilfs_direct_get_ptr(direct, key);
195   - ret = nilfs_bmap_prepare_end_ptr(&direct->d_bmap, req);
196   - if (!ret)
197   - stats->bs_nblocks = 1;
  157 + if (NILFS_BMAP_USE_VBN(bmap))
  158 + nilfs_direct_set_target_v(direct, key, req.bpr_ptr);
  159 +
  160 + nilfs_bmap_add_blocks(bmap, 1);
  161 + }
198 162 return ret;
199 163 }
200 164  
201   -static void nilfs_direct_commit_delete(struct nilfs_direct *direct,
202   - union nilfs_bmap_ptr_req *req,
203   - __u64 key)
204   -{
205   - nilfs_bmap_commit_end_ptr(&direct->d_bmap, req);
206   - nilfs_direct_set_ptr(direct, key, NILFS_BMAP_INVALID_PTR);
207   -}
208   -
209 165 static int nilfs_direct_delete(struct nilfs_bmap *bmap, __u64 key)
210 166 {
211   - struct nilfs_direct *direct;
  167 + struct nilfs_direct *direct = (struct nilfs_direct *)bmap;
212 168 union nilfs_bmap_ptr_req req;
213   - struct nilfs_bmap_stats stats;
  169 + struct inode *dat;
214 170 int ret;
215 171  
216   - direct = (struct nilfs_direct *)bmap;
217   - if ((key > NILFS_DIRECT_KEY_MAX) ||
  172 + if (key > NILFS_DIRECT_KEY_MAX ||
218 173 nilfs_direct_get_ptr(direct, key) == NILFS_BMAP_INVALID_PTR)
219 174 return -ENOENT;
220 175  
221   - ret = nilfs_direct_prepare_delete(direct, &req, key, &stats);
222   - if (ret < 0)
223   - return ret;
224   - nilfs_direct_commit_delete(direct, &req, key);
225   - nilfs_bmap_sub_blocks(bmap, stats.bs_nblocks);
  176 + dat = NILFS_BMAP_USE_VBN(bmap) ? nilfs_bmap_get_dat(bmap) : NULL;
  177 + req.bpr_ptr = nilfs_direct_get_ptr(direct, key);
226 178  
227   - return 0;
  179 + ret = nilfs_bmap_prepare_end_ptr(bmap, &req, dat);
  180 + if (!ret) {
  181 + nilfs_bmap_commit_end_ptr(bmap, &req, dat);
  182 + nilfs_direct_set_ptr(direct, key, NILFS_BMAP_INVALID_PTR);
  183 + nilfs_bmap_sub_blocks(bmap, 1);
  184 + }
  185 + return ret;
228 186 }
229 187  
230 188 static int nilfs_direct_last_key(const struct nilfs_bmap *bmap, __u64 *keyp)
231 189  
232 190  
233 191  
234 192  
235 193  
236 194  
237 195  
238 196  
239 197  
... ... @@ -310,59 +268,56 @@
310 268 return 0;
311 269 }
312 270  
313   -static int nilfs_direct_propagate_v(struct nilfs_direct *direct,
314   - struct buffer_head *bh)
  271 +static int nilfs_direct_propagate(const struct nilfs_bmap *bmap,
  272 + struct buffer_head *bh)
315 273 {
316   - union nilfs_bmap_ptr_req oldreq, newreq;
  274 + struct nilfs_direct *direct = (struct nilfs_direct *)bmap;
  275 + struct nilfs_palloc_req oldreq, newreq;
  276 + struct inode *dat;
317 277 __u64 key;
318 278 __u64 ptr;
319 279 int ret;
320 280  
321   - key = nilfs_bmap_data_get_key(&direct->d_bmap, bh);
  281 + if (!NILFS_BMAP_USE_VBN(bmap))
  282 + return 0;
  283 +
  284 + dat = nilfs_bmap_get_dat(bmap);
  285 + key = nilfs_bmap_data_get_key(bmap, bh);
322 286 ptr = nilfs_direct_get_ptr(direct, key);
323 287 if (!buffer_nilfs_volatile(bh)) {
324   - oldreq.bpr_ptr = ptr;
325   - newreq.bpr_ptr = ptr;
326   - ret = nilfs_bmap_prepare_update_v(&direct->d_bmap, &oldreq,
327   - &newreq);
  288 + oldreq.pr_entry_nr = ptr;
  289 + newreq.pr_entry_nr = ptr;
  290 + ret = nilfs_dat_prepare_update(dat, &oldreq, &newreq);
328 291 if (ret < 0)
329 292 return ret;
330   - nilfs_bmap_commit_update_v(&direct->d_bmap, &oldreq, &newreq);
  293 + nilfs_dat_commit_update(dat, &oldreq, &newreq,
  294 + bmap->b_ptr_type == NILFS_BMAP_PTR_VS);
331 295 set_buffer_nilfs_volatile(bh);
332   - nilfs_direct_set_ptr(direct, key, newreq.bpr_ptr);
  296 + nilfs_direct_set_ptr(direct, key, newreq.pr_entry_nr);
333 297 } else
334   - ret = nilfs_bmap_mark_dirty(&direct->d_bmap, ptr);
  298 + ret = nilfs_dat_mark_dirty(dat, ptr);
335 299  
336 300 return ret;
337 301 }
338 302  
339   -static int nilfs_direct_propagate(const struct nilfs_bmap *bmap,
340   - struct buffer_head *bh)
341   -{
342   - struct nilfs_direct *direct = (struct nilfs_direct *)bmap;
343   -
344   - return NILFS_BMAP_USE_VBN(bmap) ?
345   - nilfs_direct_propagate_v(direct, bh) : 0;
346   -}
347   -
348 303 static int nilfs_direct_assign_v(struct nilfs_direct *direct,
349 304 __u64 key, __u64 ptr,
350 305 struct buffer_head **bh,
351 306 sector_t blocknr,
352 307 union nilfs_binfo *binfo)
353 308 {
  309 + struct inode *dat = nilfs_bmap_get_dat(&direct->d_bmap);
354 310 union nilfs_bmap_ptr_req req;
355 311 int ret;
356 312  
357 313 req.bpr_ptr = ptr;
358   - ret = nilfs_bmap_start_v(&direct->d_bmap, &req, blocknr);
359   - if (unlikely(ret < 0))
360   - return ret;
361   -
362   - binfo->bi_v.bi_vblocknr = nilfs_bmap_ptr_to_dptr(ptr);
363   - binfo->bi_v.bi_blkoff = nilfs_bmap_key_to_dkey(key);
364   -
365   - return 0;
  314 + ret = nilfs_dat_prepare_start(dat, &req.bpr_req);
  315 + if (!ret) {
  316 + nilfs_dat_commit_start(dat, &req.bpr_req, blocknr);
  317 + binfo->bi_v.bi_vblocknr = nilfs_bmap_ptr_to_dptr(ptr);
  318 + binfo->bi_v.bi_blkoff = nilfs_bmap_key_to_dkey(key);
  319 + }
  320 + return ret;
366 321 }
367 322  
368 323 static int nilfs_direct_assign_p(struct nilfs_direct *direct,