Commit a206c817c864583c44e2f418db8e6c7a000fbc38

Authored by Christoph Hellwig
Committed by Alex Elder
1 parent 405f804294

xfs: kill xfs_iomap

Opencode the xfs_iomap code in it's two callers.  The overlap of
passed flags already was minimal and will be further reduced in the
next patch.

As a side effect the BMAPI_* flags for xfs_bmapi and the IO_* flags
for I/O end processing are merged into a single set of flags, which
should be a bit more descriptive of the operation we perform.

Also improve the tracing by giving each caller it's own type set of
tracepoints.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Alex Elder <aelder@sgi.com>

Showing 5 changed files with 191 additions and 213 deletions Side-by-side Diff

fs/xfs/linux-2.6/xfs_aops.c
... ... @@ -38,15 +38,6 @@
38 38 #include <linux/pagevec.h>
39 39 #include <linux/writeback.h>
40 40  
41   -/*
42   - * Types of I/O for bmap clustering and I/O completion tracking.
43   - */
44   -enum {
45   - IO_READ, /* mapping for a read */
46   - IO_DELAY, /* mapping covers delalloc region */
47   - IO_UNWRITTEN, /* mapping covers allocated but uninitialized data */
48   - IO_NEW /* just allocated */
49   -};
50 41  
51 42 /*
52 43 * Prime number of hash buckets since address is used as the key.
... ... @@ -182,9 +173,6 @@
182 173 xfs_inode_t *ip = XFS_I(ioend->io_inode);
183 174 xfs_fsize_t isize;
184 175  
185   - ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
186   - ASSERT(ioend->io_type != IO_READ);
187   -
188 176 if (unlikely(ioend->io_error))
189 177 return 0;
190 178  
... ... @@ -244,10 +232,8 @@
244 232 * We might have to update the on-disk file size after extending
245 233 * writes.
246 234 */
247   - if (ioend->io_type != IO_READ) {
248   - error = xfs_setfilesize(ioend);
249   - ASSERT(!error || error == EAGAIN);
250   - }
  235 + error = xfs_setfilesize(ioend);
  236 + ASSERT(!error || error == EAGAIN);
251 237  
252 238 /*
253 239 * If we didn't complete processing of the ioend, requeue it to the
254 240  
255 241  
... ... @@ -320,12 +306,88 @@
320 306 loff_t offset,
321 307 ssize_t count,
322 308 struct xfs_bmbt_irec *imap,
323   - int flags)
  309 + int type,
  310 + int nonblocking)
324 311 {
325   - int nmaps = 1;
326   - int new = 0;
  312 + struct xfs_inode *ip = XFS_I(inode);
  313 + struct xfs_mount *mp = ip->i_mount;
  314 + xfs_fileoff_t offset_fsb, end_fsb;
  315 + int error = 0;
  316 + int lockmode = 0;
  317 + int bmapi_flags = XFS_BMAPI_ENTIRE;
  318 + int nimaps = 1;
327 319  
328   - return -xfs_iomap(XFS_I(inode), offset, count, flags, imap, &nmaps, &new);
  320 + if (XFS_FORCED_SHUTDOWN(mp))
  321 + return -XFS_ERROR(EIO);
  322 +
  323 + switch (type) {
  324 + case IO_OVERWRITE:
  325 + lockmode = xfs_ilock_map_shared(ip);
  326 + break;
  327 + case IO_UNWRITTEN:
  328 + lockmode = XFS_ILOCK_EXCL;
  329 + bmapi_flags |= XFS_BMAPI_IGSTATE;
  330 + xfs_ilock(ip, lockmode);
  331 + break;
  332 + case IO_DELALLOC:
  333 + lockmode = XFS_ILOCK_SHARED;
  334 +
  335 + if (!xfs_ilock_nowait(ip, lockmode)) {
  336 + if (nonblocking)
  337 + return -XFS_ERROR(EAGAIN);
  338 + xfs_ilock(ip, lockmode);
  339 + }
  340 + break;
  341 + }
  342 +
  343 + ASSERT(offset <= mp->m_maxioffset);
  344 + if (offset + count > mp->m_maxioffset)
  345 + count = mp->m_maxioffset - offset;
  346 + end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
  347 + offset_fsb = XFS_B_TO_FSBT(mp, offset);
  348 +
  349 + error = xfs_bmapi(NULL, ip, offset_fsb, end_fsb - offset_fsb,
  350 + bmapi_flags, NULL, 0, imap, &nimaps, NULL);
  351 + if (error)
  352 + goto out;
  353 +
  354 + switch (type) {
  355 + case IO_UNWRITTEN:
  356 + /* If we found an extent, return it */
  357 + if (nimaps &&
  358 + (imap->br_startblock != HOLESTARTBLOCK) &&
  359 + (imap->br_startblock != DELAYSTARTBLOCK)) {
  360 + trace_xfs_map_blocks_found(ip, offset, count, type, imap);
  361 + break;
  362 + }
  363 +
  364 + error = xfs_iomap_write_delay(ip, offset, count, imap);
  365 + if (!error)
  366 + trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
  367 + break;
  368 + case IO_DELALLOC:
  369 + /* If we found an extent, return it */
  370 + xfs_iunlock(ip, lockmode);
  371 + lockmode = 0;
  372 +
  373 + if (nimaps && !isnullstartblock(imap->br_startblock)) {
  374 + trace_xfs_map_blocks_found(ip, offset, count, type, imap);
  375 + break;
  376 + }
  377 +
  378 + error = xfs_iomap_write_allocate(ip, offset, count, imap);
  379 + if (!error)
  380 + trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
  381 + break;
  382 + default:
  383 + if (nimaps)
  384 + trace_xfs_map_blocks_found(ip, offset, count, type, imap);
  385 + }
  386 +
  387 +out:
  388 + if (lockmode)
  389 + xfs_iunlock(ip, lockmode);
  390 + return -XFS_ERROR(error);
329 391 }
330 392  
331 393 STATIC int
332 394  
... ... @@ -722,9 +784,9 @@
722 784 if (buffer_unwritten(bh))
723 785 acceptable = (type == IO_UNWRITTEN);
724 786 else if (buffer_delay(bh))
725   - acceptable = (type == IO_DELAY);
  787 + acceptable = (type == IO_DELALLOC);
726 788 else if (buffer_dirty(bh) && buffer_mapped(bh))
727   - acceptable = (type == IO_NEW);
  789 + acceptable = (type == IO_OVERWRITE);
728 790 else
729 791 break;
730 792 } while ((bh = bh->b_this_page) != head);
... ... @@ -809,7 +871,7 @@
809 871 if (buffer_unwritten(bh))
810 872 type = IO_UNWRITTEN;
811 873 else
812   - type = IO_DELAY;
  874 + type = IO_DELALLOC;
813 875  
814 876 if (!xfs_imap_valid(inode, imap, offset)) {
815 877 done = 1;
... ... @@ -826,7 +888,7 @@
826 888 page_dirty--;
827 889 count++;
828 890 } else {
829   - type = IO_NEW;
  891 + type = IO_OVERWRITE;
830 892 if (buffer_mapped(bh) && all_bh) {
831 893 lock_buffer(bh);
832 894 xfs_add_to_ioend(inode, bh, offset,
... ... @@ -926,7 +988,7 @@
926 988 struct buffer_head *bh, *head;
927 989 loff_t offset = page_offset(page);
928 990  
929   - if (!xfs_is_delayed_page(page, IO_DELAY))
  991 + if (!xfs_is_delayed_page(page, IO_DELALLOC))
930 992 goto out_invalidate;
931 993  
932 994 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
933 995  
... ... @@ -994,9 +1056,10 @@
994 1056 __uint64_t end_offset;
995 1057 pgoff_t end_index, last_index;
996 1058 ssize_t size, len;
997   - int flags, err, imap_valid = 0, uptodate = 1;
  1059 + int err, imap_valid = 0, uptodate = 1;
998 1060 int count = 0;
999 1061 int all_bh = 0;
  1062 + int nonblocking = 0;
1000 1063  
1001 1064 trace_xfs_writepage(inode, page, 0);
1002 1065  
1003 1066  
... ... @@ -1047,9 +1110,11 @@
1047 1110  
1048 1111 bh = head = page_buffers(page);
1049 1112 offset = page_offset(page);
1050   - flags = BMAPI_READ;
1051   - type = IO_NEW;
  1113 + type = IO_OVERWRITE;
1052 1114  
  1115 + if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking)
  1116 + nonblocking = 1;
  1117 +
1053 1118 do {
1054 1119 int new_ioend = 0;
1055 1120  
1056 1121  
1057 1122  
... ... @@ -1078,16 +1143,11 @@
1078 1143 type = IO_UNWRITTEN;
1079 1144 imap_valid = 0;
1080 1145 }
1081   - flags = BMAPI_WRITE | BMAPI_IGNSTATE;
1082 1146 } else if (buffer_delay(bh)) {
1083   - if (type != IO_DELAY) {
1084   - type = IO_DELAY;
  1147 + if (type != IO_DELALLOC) {
  1148 + type = IO_DELALLOC;
1085 1149 imap_valid = 0;
1086 1150 }
1087   - flags = BMAPI_ALLOCATE;
1088   -
1089   - if (wbc->sync_mode == WB_SYNC_NONE)
1090   - flags |= BMAPI_TRYLOCK;
1091 1151 }
1092 1152  
1093 1153 if (!imap_valid) {
... ... @@ -1100,8 +1160,8 @@
1100 1160 * for unwritten extent conversion.
1101 1161 */
1102 1162 new_ioend = 1;
1103   - err = xfs_map_blocks(inode, offset, len,
1104   - &imap, flags);
  1163 + err = xfs_map_blocks(inode, offset, len, &imap,
  1164 + type, nonblocking);
1105 1165 if (err)
1106 1166 goto error;
1107 1167 imap_valid = xfs_imap_valid(inode, &imap,
1108 1168  
1109 1169  
... ... @@ -1119,30 +1179,21 @@
1119 1179 * That means it must already have extents allocated
1120 1180 * underneath it. Map the extent by reading it.
1121 1181 */
1122   - if (flags != BMAPI_READ) {
1123   - flags = BMAPI_READ;
  1182 + if (type != IO_OVERWRITE) {
  1183 + type = IO_OVERWRITE;
1124 1184 imap_valid = 0;
1125 1185 }
1126 1186 if (!imap_valid) {
1127 1187 new_ioend = 1;
1128 1188 size = xfs_probe_cluster(inode, page, bh, head);
1129 1189 err = xfs_map_blocks(inode, offset, size,
1130   - &imap, flags);
  1190 + &imap, type, nonblocking);
1131 1191 if (err)
1132 1192 goto error;
1133 1193 imap_valid = xfs_imap_valid(inode, &imap,
1134 1194 offset);
1135 1195 }
1136 1196  
1137   - /*
1138   - * We set the type to IO_NEW in case we are doing a
1139   - * small write at EOF that is extending the file but
1140   - * without needing an allocation. We need to update the
1141   - * file size on I/O completion in this case so it is
1142   - * the same case as having just allocated a new extent
1143   - * that we are writing into for the first time.
1144   - */
1145   - type = IO_NEW;
1146 1197 if (imap_valid) {
1147 1198 all_bh = 1;
1148 1199 lock_buffer(bh);
1149 1200  
1150 1201  
1151 1202  
1152 1203  
... ... @@ -1250,14 +1301,20 @@
1250 1301 int create,
1251 1302 int direct)
1252 1303 {
1253   - int flags = create ? BMAPI_WRITE : BMAPI_READ;
  1304 + struct xfs_inode *ip = XFS_I(inode);
  1305 + struct xfs_mount *mp = ip->i_mount;
  1306 + xfs_fileoff_t offset_fsb, end_fsb;
  1307 + int error = 0;
  1308 + int lockmode = 0;
1254 1309 struct xfs_bmbt_irec imap;
  1310 + int nimaps = 1;
1255 1311 xfs_off_t offset;
1256 1312 ssize_t size;
1257   - int nimap = 1;
1258 1313 int new = 0;
1259   - int error;
1260 1314  
  1315 + if (XFS_FORCED_SHUTDOWN(mp))
  1316 + return -XFS_ERROR(EIO);
  1317 +
1261 1318 offset = (xfs_off_t)iblock << inode->i_blkbits;
1262 1319 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1263 1320 size = bh_result->b_size;
1264 1321  
1265 1322  
1266 1323  
... ... @@ -1265,16 +1322,46 @@
1265 1322 if (!create && direct && offset >= i_size_read(inode))
1266 1323 return 0;
1267 1324  
1268   - if (direct && create)
1269   - flags |= BMAPI_DIRECT;
  1325 + if (create) {
  1326 + lockmode = XFS_ILOCK_EXCL;
  1327 + xfs_ilock(ip, lockmode);
  1328 + } else {
  1329 + lockmode = xfs_ilock_map_shared(ip);
  1330 + }
1270 1331  
1271   - error = xfs_iomap(XFS_I(inode), offset, size, flags, &imap, &nimap,
1272   - &new);
  1332 + ASSERT(offset <= mp->m_maxioffset);
  1333 + if (offset + size > mp->m_maxioffset)
  1334 + size = mp->m_maxioffset - offset;
  1335 + end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
  1336 + offset_fsb = XFS_B_TO_FSBT(mp, offset);
  1337 +
  1338 + error = xfs_bmapi(NULL, ip, offset_fsb, end_fsb - offset_fsb,
  1339 + XFS_BMAPI_ENTIRE, NULL, 0, &imap, &nimaps, NULL);
1273 1340 if (error)
1274   - return -error;
1275   - if (nimap == 0)
1276   - return 0;
  1341 + goto out_unlock;
1277 1342  
  1343 + if (create &&
  1344 + (!nimaps ||
  1345 + (imap.br_startblock == HOLESTARTBLOCK ||
  1346 + imap.br_startblock == DELAYSTARTBLOCK))) {
  1347 + if (direct) {
  1348 + error = xfs_iomap_write_direct(ip, offset, size,
  1349 + &imap, nimaps);
  1350 + } else {
  1351 + error = xfs_iomap_write_delay(ip, offset, size, &imap);
  1352 + }
  1353 + if (error)
  1354 + goto out_unlock;
  1355 +
  1356 + trace_xfs_get_blocks_alloc(ip, offset, size, 0, &imap);
  1357 + } else if (nimaps) {
  1358 + trace_xfs_get_blocks_found(ip, offset, size, 0, &imap);
  1359 + } else {
  1360 + trace_xfs_get_blocks_notfound(ip, offset, size);
  1361 + goto out_unlock;
  1362 + }
  1363 + xfs_iunlock(ip, lockmode);
  1364 +
1278 1365 if (imap.br_startblock != HOLESTARTBLOCK &&
1279 1366 imap.br_startblock != DELAYSTARTBLOCK) {
1280 1367 /*
... ... @@ -1340,6 +1427,10 @@
1340 1427 }
1341 1428  
1342 1429 return 0;
  1430 +
  1431 +out_unlock:
  1432 + xfs_iunlock(ip, lockmode);
  1433 + return -error;
1343 1434 }
1344 1435  
1345 1436 int
... ... @@ -1427,7 +1518,7 @@
1427 1518 ssize_t ret;
1428 1519  
1429 1520 if (rw & WRITE) {
1430   - iocb->private = xfs_alloc_ioend(inode, IO_NEW);
  1521 + iocb->private = xfs_alloc_ioend(inode, IO_DIRECT);
1431 1522  
1432 1523 ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
1433 1524 offset, nr_segs,
fs/xfs/linux-2.6/xfs_aops.h
... ... @@ -23,6 +23,22 @@
23 23 extern mempool_t *xfs_ioend_pool;
24 24  
25 25 /*
  26 + * Types of I/O for bmap clustering and I/O completion tracking.
  27 + */
  28 +enum {
  29 + IO_DIRECT = 0, /* special case for direct I/O ioends */
  30 + IO_DELALLOC, /* mapping covers delalloc region */
  31 + IO_UNWRITTEN, /* mapping covers allocated but uninitialized data */
  32 + IO_OVERWRITE, /* mapping covers already allocated extent */
  33 +};
  34 +
  35 +#define XFS_IO_TYPES \
  36 + { 0, "" }, \
  37 + { IO_DELALLOC, "delalloc" }, \
  38 + { IO_UNWRITTEN, "unwritten" }, \
  39 + { IO_OVERWRITE, "overwrite" }
  40 +
  41 +/*
26 42 * xfs_ioend struct manages large extent writes for XFS.
27 43 * It can manage several multi-page bio's at once.
28 44 */
fs/xfs/linux-2.6/xfs_trace.h
... ... @@ -935,10 +935,10 @@
935 935 DEFINE_PAGE_EVENT(xfs_releasepage);
936 936 DEFINE_PAGE_EVENT(xfs_invalidatepage);
937 937  
938   -DECLARE_EVENT_CLASS(xfs_iomap_class,
  938 +DECLARE_EVENT_CLASS(xfs_imap_class,
939 939 TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count,
940   - int flags, struct xfs_bmbt_irec *irec),
941   - TP_ARGS(ip, offset, count, flags, irec),
  940 + int type, struct xfs_bmbt_irec *irec),
  941 + TP_ARGS(ip, offset, count, type, irec),
942 942 TP_STRUCT__entry(
943 943 __field(dev_t, dev)
944 944 __field(xfs_ino_t, ino)
... ... @@ -946,7 +946,7 @@
946 946 __field(loff_t, new_size)
947 947 __field(loff_t, offset)
948 948 __field(size_t, count)
949   - __field(int, flags)
  949 + __field(int, type)
950 950 __field(xfs_fileoff_t, startoff)
951 951 __field(xfs_fsblock_t, startblock)
952 952 __field(xfs_filblks_t, blockcount)
953 953  
... ... @@ -958,13 +958,13 @@
958 958 __entry->new_size = ip->i_new_size;
959 959 __entry->offset = offset;
960 960 __entry->count = count;
961   - __entry->flags = flags;
  961 + __entry->type = type;
962 962 __entry->startoff = irec ? irec->br_startoff : 0;
963 963 __entry->startblock = irec ? irec->br_startblock : 0;
964 964 __entry->blockcount = irec ? irec->br_blockcount : 0;
965 965 ),
966 966 TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx "
967   - "offset 0x%llx count %zd flags %s "
  967 + "offset 0x%llx count %zd type %s "
968 968 "startoff 0x%llx startblock %lld blockcount 0x%llx",
969 969 MAJOR(__entry->dev), MINOR(__entry->dev),
970 970 __entry->ino,
971 971  
972 972  
... ... @@ -972,20 +972,21 @@
972 972 __entry->new_size,
973 973 __entry->offset,
974 974 __entry->count,
975   - __print_flags(__entry->flags, "|", BMAPI_FLAGS),
  975 + __print_symbolic(__entry->type, XFS_IO_TYPES),
976 976 __entry->startoff,
977 977 (__int64_t)__entry->startblock,
978 978 __entry->blockcount)
979 979 )
980 980  
981 981 #define DEFINE_IOMAP_EVENT(name) \
982   -DEFINE_EVENT(xfs_iomap_class, name, \
  982 +DEFINE_EVENT(xfs_imap_class, name, \
983 983 TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, \
984   - int flags, struct xfs_bmbt_irec *irec), \
985   - TP_ARGS(ip, offset, count, flags, irec))
986   -DEFINE_IOMAP_EVENT(xfs_iomap_enter);
987   -DEFINE_IOMAP_EVENT(xfs_iomap_found);
988   -DEFINE_IOMAP_EVENT(xfs_iomap_alloc);
  984 + int type, struct xfs_bmbt_irec *irec), \
  985 + TP_ARGS(ip, offset, count, type, irec))
  986 +DEFINE_IOMAP_EVENT(xfs_map_blocks_found);
  987 +DEFINE_IOMAP_EVENT(xfs_map_blocks_alloc);
  988 +DEFINE_IOMAP_EVENT(xfs_get_blocks_found);
  989 +DEFINE_IOMAP_EVENT(xfs_get_blocks_alloc);
989 990  
990 991 DECLARE_EVENT_CLASS(xfs_simple_io_class,
991 992 TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count),
... ... @@ -1022,6 +1023,7 @@
1022 1023 TP_ARGS(ip, offset, count))
1023 1024 DEFINE_SIMPLE_IO_EVENT(xfs_delalloc_enospc);
1024 1025 DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert);
  1026 +DEFINE_SIMPLE_IO_EVENT(xfs_get_blocks_notfound);
1025 1027  
1026 1028  
1027 1029 TRACE_EVENT(xfs_itruncate_start,
... ... @@ -47,124 +47,8 @@
47 47  
48 48 #define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \
49 49 << mp->m_writeio_log)
50   -#define XFS_STRAT_WRITE_IMAPS 2
51 50 #define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP
52 51  
53   -STATIC int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
54   - struct xfs_bmbt_irec *, int);
55   -STATIC int xfs_iomap_write_delay(struct xfs_inode *, xfs_off_t, size_t,
56   - struct xfs_bmbt_irec *);
57   -STATIC int xfs_iomap_write_allocate(struct xfs_inode *, xfs_off_t, size_t,
58   - struct xfs_bmbt_irec *);
59   -
60   -int
61   -xfs_iomap(
62   - struct xfs_inode *ip,
63   - xfs_off_t offset,
64   - ssize_t count,
65   - int flags,
66   - struct xfs_bmbt_irec *imap,
67   - int *nimaps,
68   - int *new)
69   -{
70   - struct xfs_mount *mp = ip->i_mount;
71   - xfs_fileoff_t offset_fsb, end_fsb;
72   - int error = 0;
73   - int lockmode = 0;
74   - int bmapi_flags = 0;
75   -
76   - ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
77   -
78   - *new = 0;
79   -
80   - if (XFS_FORCED_SHUTDOWN(mp))
81   - return XFS_ERROR(EIO);
82   -
83   - trace_xfs_iomap_enter(ip, offset, count, flags, NULL);
84   -
85   - switch (flags & (BMAPI_READ | BMAPI_WRITE | BMAPI_ALLOCATE)) {
86   - case BMAPI_READ:
87   - lockmode = xfs_ilock_map_shared(ip);
88   - bmapi_flags = XFS_BMAPI_ENTIRE;
89   - break;
90   - case BMAPI_WRITE:
91   - lockmode = XFS_ILOCK_EXCL;
92   - if (flags & BMAPI_IGNSTATE)
93   - bmapi_flags |= XFS_BMAPI_IGSTATE|XFS_BMAPI_ENTIRE;
94   - xfs_ilock(ip, lockmode);
95   - break;
96   - case BMAPI_ALLOCATE:
97   - lockmode = XFS_ILOCK_SHARED;
98   - bmapi_flags = XFS_BMAPI_ENTIRE;
99   -
100   - /* Attempt non-blocking lock */
101   - if (flags & BMAPI_TRYLOCK) {
102   - if (!xfs_ilock_nowait(ip, lockmode))
103   - return XFS_ERROR(EAGAIN);
104   - } else {
105   - xfs_ilock(ip, lockmode);
106   - }
107   - break;
108   - default:
109   - BUG();
110   - }
111   -
112   - ASSERT(offset <= mp->m_maxioffset);
113   - if ((xfs_fsize_t)offset + count > mp->m_maxioffset)
114   - count = mp->m_maxioffset - offset;
115   - end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
116   - offset_fsb = XFS_B_TO_FSBT(mp, offset);
117   -
118   - error = xfs_bmapi(NULL, ip, offset_fsb,
119   - (xfs_filblks_t)(end_fsb - offset_fsb),
120   - bmapi_flags, NULL, 0, imap,
121   - nimaps, NULL);
122   -
123   - if (error)
124   - goto out;
125   -
126   - switch (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)) {
127   - case BMAPI_WRITE:
128   - /* If we found an extent, return it */
129   - if (*nimaps &&
130   - (imap->br_startblock != HOLESTARTBLOCK) &&
131   - (imap->br_startblock != DELAYSTARTBLOCK)) {
132   - trace_xfs_iomap_found(ip, offset, count, flags, imap);
133   - break;
134   - }
135   -
136   - if (flags & BMAPI_DIRECT) {
137   - error = xfs_iomap_write_direct(ip, offset, count, imap,
138   - *nimaps);
139   - } else {
140   - error = xfs_iomap_write_delay(ip, offset, count, imap);
141   - }
142   -
143   - if (!error) {
144   - trace_xfs_iomap_alloc(ip, offset, count, flags, imap);
145   - }
146   - *new = 1;
147   - break;
148   - case BMAPI_ALLOCATE:
149   - /* If we found an extent, return it */
150   - xfs_iunlock(ip, lockmode);
151   - lockmode = 0;
152   -
153   - if (*nimaps && !isnullstartblock(imap->br_startblock)) {
154   - trace_xfs_iomap_found(ip, offset, count, flags, imap);
155   - break;
156   - }
157   -
158   - error = xfs_iomap_write_allocate(ip, offset, count, imap);
159   - break;
160   - }
161   -
162   -out:
163   - if (lockmode)
164   - xfs_iunlock(ip, lockmode);
165   - return XFS_ERROR(error);
166   -}
167   -
168 52 STATIC int
169 53 xfs_iomap_eof_align_last_fsb(
170 54 xfs_mount_t *mp,
... ... @@ -233,7 +117,7 @@
233 117 return EFSCORRUPTED;
234 118 }
235 119  
236   -STATIC int
  120 +int
237 121 xfs_iomap_write_direct(
238 122 xfs_inode_t *ip,
239 123 xfs_off_t offset,
... ... @@ -428,7 +312,7 @@
428 312 return 0;
429 313 }
430 314  
431   -STATIC int
  315 +int
432 316 xfs_iomap_write_delay(
433 317 xfs_inode_t *ip,
434 318 xfs_off_t offset,
... ... @@ -527,7 +411,7 @@
527 411 * We no longer bother to look at the incoming map - all we have to
528 412 * guarantee is that whatever we allocate fills the required range.
529 413 */
530   -STATIC int
  414 +int
531 415 xfs_iomap_write_allocate(
532 416 xfs_inode_t *ip,
533 417 xfs_off_t offset,
... ... @@ -18,30 +18,15 @@
18 18 #ifndef __XFS_IOMAP_H__
19 19 #define __XFS_IOMAP_H__
20 20  
21   -/* base extent manipulation calls */
22   -#define BMAPI_READ (1 << 0) /* read extents */
23   -#define BMAPI_WRITE (1 << 1) /* create extents */
24   -#define BMAPI_ALLOCATE (1 << 2) /* delayed allocate to real extents */
25   -
26   -/* modifiers */
27   -#define BMAPI_IGNSTATE (1 << 4) /* ignore unwritten state on read */
28   -#define BMAPI_DIRECT (1 << 5) /* direct instead of buffered write */
29   -#define BMAPI_MMA (1 << 6) /* allocate for mmap write */
30   -#define BMAPI_TRYLOCK (1 << 7) /* non-blocking request */
31   -
32   -#define BMAPI_FLAGS \
33   - { BMAPI_READ, "READ" }, \
34   - { BMAPI_WRITE, "WRITE" }, \
35   - { BMAPI_ALLOCATE, "ALLOCATE" }, \
36   - { BMAPI_IGNSTATE, "IGNSTATE" }, \
37   - { BMAPI_DIRECT, "DIRECT" }, \
38   - { BMAPI_TRYLOCK, "TRYLOCK" }
39   -
40 21 struct xfs_inode;
41 22 struct xfs_bmbt_irec;
42 23  
43   -extern int xfs_iomap(struct xfs_inode *, xfs_off_t, ssize_t, int,
44   - struct xfs_bmbt_irec *, int *, int *);
  24 +extern int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
  25 + struct xfs_bmbt_irec *, int);
  26 +extern int xfs_iomap_write_delay(struct xfs_inode *, xfs_off_t, size_t,
  27 + struct xfs_bmbt_irec *);
  28 +extern int xfs_iomap_write_allocate(struct xfs_inode *, xfs_off_t, size_t,
  29 + struct xfs_bmbt_irec *);
45 30 extern int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, size_t);
46 31  
47 32 #endif /* __XFS_IOMAP_H__*/