Commit 6d4a8ecb344bddbbb8c71deb4dcea0be6955cfc3
Committed by
Dave Chinner
1 parent
5348778699
Exists in
master
and in
7 other branches
xfs: rename xfs_cmn_err_fsblock_zero()
The "cmn_err" part of the function name is no longer relevant. Rename the function to xfs_alert_fsblock_zero() to match the new logging API. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Alex Elder <aelder@sgi.com> Reviewed-by: Christoph Hellwig <hch@lst.de>
Showing 1 changed file with 5 additions and 5 deletions Inline Diff
fs/xfs/xfs_iomap.c
1 | /* | 1 | /* |
2 | * Copyright (c) 2000-2006 Silicon Graphics, Inc. | 2 | * Copyright (c) 2000-2006 Silicon Graphics, Inc. |
3 | * All Rights Reserved. | 3 | * All Rights Reserved. |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or | 5 | * This program is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU General Public License as | 6 | * modify it under the terms of the GNU General Public License as |
7 | * published by the Free Software Foundation. | 7 | * published by the Free Software Foundation. |
8 | * | 8 | * |
9 | * This program is distributed in the hope that it would be useful, | 9 | * This program is distributed in the hope that it would be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
12 | * GNU General Public License for more details. | 12 | * GNU General Public License for more details. |
13 | * | 13 | * |
14 | * You should have received a copy of the GNU General Public License | 14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write the Free Software Foundation, | 15 | * along with this program; if not, write the Free Software Foundation, |
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | 16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
17 | */ | 17 | */ |
18 | #include "xfs.h" | 18 | #include "xfs.h" |
19 | #include "xfs_fs.h" | 19 | #include "xfs_fs.h" |
20 | #include "xfs_bit.h" | 20 | #include "xfs_bit.h" |
21 | #include "xfs_log.h" | 21 | #include "xfs_log.h" |
22 | #include "xfs_inum.h" | 22 | #include "xfs_inum.h" |
23 | #include "xfs_trans.h" | 23 | #include "xfs_trans.h" |
24 | #include "xfs_sb.h" | 24 | #include "xfs_sb.h" |
25 | #include "xfs_ag.h" | 25 | #include "xfs_ag.h" |
26 | #include "xfs_alloc.h" | 26 | #include "xfs_alloc.h" |
27 | #include "xfs_quota.h" | 27 | #include "xfs_quota.h" |
28 | #include "xfs_mount.h" | 28 | #include "xfs_mount.h" |
29 | #include "xfs_bmap_btree.h" | 29 | #include "xfs_bmap_btree.h" |
30 | #include "xfs_alloc_btree.h" | 30 | #include "xfs_alloc_btree.h" |
31 | #include "xfs_ialloc_btree.h" | 31 | #include "xfs_ialloc_btree.h" |
32 | #include "xfs_dinode.h" | 32 | #include "xfs_dinode.h" |
33 | #include "xfs_inode.h" | 33 | #include "xfs_inode.h" |
34 | #include "xfs_btree.h" | 34 | #include "xfs_btree.h" |
35 | #include "xfs_bmap.h" | 35 | #include "xfs_bmap.h" |
36 | #include "xfs_rtalloc.h" | 36 | #include "xfs_rtalloc.h" |
37 | #include "xfs_error.h" | 37 | #include "xfs_error.h" |
38 | #include "xfs_itable.h" | 38 | #include "xfs_itable.h" |
39 | #include "xfs_rw.h" | 39 | #include "xfs_rw.h" |
40 | #include "xfs_attr.h" | 40 | #include "xfs_attr.h" |
41 | #include "xfs_buf_item.h" | 41 | #include "xfs_buf_item.h" |
42 | #include "xfs_trans_space.h" | 42 | #include "xfs_trans_space.h" |
43 | #include "xfs_utils.h" | 43 | #include "xfs_utils.h" |
44 | #include "xfs_iomap.h" | 44 | #include "xfs_iomap.h" |
45 | #include "xfs_trace.h" | 45 | #include "xfs_trace.h" |
46 | 46 | ||
47 | 47 | ||
48 | #define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \ | 48 | #define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \ |
49 | << mp->m_writeio_log) | 49 | << mp->m_writeio_log) |
50 | #define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP | 50 | #define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP |
51 | 51 | ||
52 | STATIC int | 52 | STATIC int |
53 | xfs_iomap_eof_align_last_fsb( | 53 | xfs_iomap_eof_align_last_fsb( |
54 | xfs_mount_t *mp, | 54 | xfs_mount_t *mp, |
55 | xfs_inode_t *ip, | 55 | xfs_inode_t *ip, |
56 | xfs_extlen_t extsize, | 56 | xfs_extlen_t extsize, |
57 | xfs_fileoff_t *last_fsb) | 57 | xfs_fileoff_t *last_fsb) |
58 | { | 58 | { |
59 | xfs_fileoff_t new_last_fsb = 0; | 59 | xfs_fileoff_t new_last_fsb = 0; |
60 | xfs_extlen_t align; | 60 | xfs_extlen_t align; |
61 | int eof, error; | 61 | int eof, error; |
62 | 62 | ||
63 | if (XFS_IS_REALTIME_INODE(ip)) | 63 | if (XFS_IS_REALTIME_INODE(ip)) |
64 | ; | 64 | ; |
65 | /* | 65 | /* |
66 | * If mounted with the "-o swalloc" option, roundup the allocation | 66 | * If mounted with the "-o swalloc" option, roundup the allocation |
67 | * request to a stripe width boundary if the file size is >= | 67 | * request to a stripe width boundary if the file size is >= |
68 | * stripe width and we are allocating past the allocation eof. | 68 | * stripe width and we are allocating past the allocation eof. |
69 | */ | 69 | */ |
70 | else if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC) && | 70 | else if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC) && |
71 | (ip->i_size >= XFS_FSB_TO_B(mp, mp->m_swidth))) | 71 | (ip->i_size >= XFS_FSB_TO_B(mp, mp->m_swidth))) |
72 | new_last_fsb = roundup_64(*last_fsb, mp->m_swidth); | 72 | new_last_fsb = roundup_64(*last_fsb, mp->m_swidth); |
73 | /* | 73 | /* |
74 | * Roundup the allocation request to a stripe unit (m_dalign) boundary | 74 | * Roundup the allocation request to a stripe unit (m_dalign) boundary |
75 | * if the file size is >= stripe unit size, and we are allocating past | 75 | * if the file size is >= stripe unit size, and we are allocating past |
76 | * the allocation eof. | 76 | * the allocation eof. |
77 | */ | 77 | */ |
78 | else if (mp->m_dalign && (ip->i_size >= XFS_FSB_TO_B(mp, mp->m_dalign))) | 78 | else if (mp->m_dalign && (ip->i_size >= XFS_FSB_TO_B(mp, mp->m_dalign))) |
79 | new_last_fsb = roundup_64(*last_fsb, mp->m_dalign); | 79 | new_last_fsb = roundup_64(*last_fsb, mp->m_dalign); |
80 | 80 | ||
81 | /* | 81 | /* |
82 | * Always round up the allocation request to an extent boundary | 82 | * Always round up the allocation request to an extent boundary |
83 | * (when file on a real-time subvolume or has di_extsize hint). | 83 | * (when file on a real-time subvolume or has di_extsize hint). |
84 | */ | 84 | */ |
85 | if (extsize) { | 85 | if (extsize) { |
86 | if (new_last_fsb) | 86 | if (new_last_fsb) |
87 | align = roundup_64(new_last_fsb, extsize); | 87 | align = roundup_64(new_last_fsb, extsize); |
88 | else | 88 | else |
89 | align = extsize; | 89 | align = extsize; |
90 | new_last_fsb = roundup_64(*last_fsb, align); | 90 | new_last_fsb = roundup_64(*last_fsb, align); |
91 | } | 91 | } |
92 | 92 | ||
93 | if (new_last_fsb) { | 93 | if (new_last_fsb) { |
94 | error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof); | 94 | error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof); |
95 | if (error) | 95 | if (error) |
96 | return error; | 96 | return error; |
97 | if (eof) | 97 | if (eof) |
98 | *last_fsb = new_last_fsb; | 98 | *last_fsb = new_last_fsb; |
99 | } | 99 | } |
100 | return 0; | 100 | return 0; |
101 | } | 101 | } |
102 | 102 | ||
103 | STATIC int | 103 | STATIC int |
104 | xfs_cmn_err_fsblock_zero( | 104 | xfs_alert_fsblock_zero( |
105 | xfs_inode_t *ip, | 105 | xfs_inode_t *ip, |
106 | xfs_bmbt_irec_t *imap) | 106 | xfs_bmbt_irec_t *imap) |
107 | { | 107 | { |
108 | xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO, | 108 | xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO, |
109 | "Access to block zero in inode %llu " | 109 | "Access to block zero in inode %llu " |
110 | "start_block: %llx start_off: %llx " | 110 | "start_block: %llx start_off: %llx " |
111 | "blkcnt: %llx extent-state: %x\n", | 111 | "blkcnt: %llx extent-state: %x\n", |
112 | (unsigned long long)ip->i_ino, | 112 | (unsigned long long)ip->i_ino, |
113 | (unsigned long long)imap->br_startblock, | 113 | (unsigned long long)imap->br_startblock, |
114 | (unsigned long long)imap->br_startoff, | 114 | (unsigned long long)imap->br_startoff, |
115 | (unsigned long long)imap->br_blockcount, | 115 | (unsigned long long)imap->br_blockcount, |
116 | imap->br_state); | 116 | imap->br_state); |
117 | return EFSCORRUPTED; | 117 | return EFSCORRUPTED; |
118 | } | 118 | } |
119 | 119 | ||
120 | int | 120 | int |
121 | xfs_iomap_write_direct( | 121 | xfs_iomap_write_direct( |
122 | xfs_inode_t *ip, | 122 | xfs_inode_t *ip, |
123 | xfs_off_t offset, | 123 | xfs_off_t offset, |
124 | size_t count, | 124 | size_t count, |
125 | xfs_bmbt_irec_t *imap, | 125 | xfs_bmbt_irec_t *imap, |
126 | int nmaps) | 126 | int nmaps) |
127 | { | 127 | { |
128 | xfs_mount_t *mp = ip->i_mount; | 128 | xfs_mount_t *mp = ip->i_mount; |
129 | xfs_fileoff_t offset_fsb; | 129 | xfs_fileoff_t offset_fsb; |
130 | xfs_fileoff_t last_fsb; | 130 | xfs_fileoff_t last_fsb; |
131 | xfs_filblks_t count_fsb, resaligned; | 131 | xfs_filblks_t count_fsb, resaligned; |
132 | xfs_fsblock_t firstfsb; | 132 | xfs_fsblock_t firstfsb; |
133 | xfs_extlen_t extsz, temp; | 133 | xfs_extlen_t extsz, temp; |
134 | int nimaps; | 134 | int nimaps; |
135 | int bmapi_flag; | 135 | int bmapi_flag; |
136 | int quota_flag; | 136 | int quota_flag; |
137 | int rt; | 137 | int rt; |
138 | xfs_trans_t *tp; | 138 | xfs_trans_t *tp; |
139 | xfs_bmap_free_t free_list; | 139 | xfs_bmap_free_t free_list; |
140 | uint qblocks, resblks, resrtextents; | 140 | uint qblocks, resblks, resrtextents; |
141 | int committed; | 141 | int committed; |
142 | int error; | 142 | int error; |
143 | 143 | ||
144 | /* | 144 | /* |
145 | * Make sure that the dquots are there. This doesn't hold | 145 | * Make sure that the dquots are there. This doesn't hold |
146 | * the ilock across a disk read. | 146 | * the ilock across a disk read. |
147 | */ | 147 | */ |
148 | error = xfs_qm_dqattach_locked(ip, 0); | 148 | error = xfs_qm_dqattach_locked(ip, 0); |
149 | if (error) | 149 | if (error) |
150 | return XFS_ERROR(error); | 150 | return XFS_ERROR(error); |
151 | 151 | ||
152 | rt = XFS_IS_REALTIME_INODE(ip); | 152 | rt = XFS_IS_REALTIME_INODE(ip); |
153 | extsz = xfs_get_extsz_hint(ip); | 153 | extsz = xfs_get_extsz_hint(ip); |
154 | 154 | ||
155 | offset_fsb = XFS_B_TO_FSBT(mp, offset); | 155 | offset_fsb = XFS_B_TO_FSBT(mp, offset); |
156 | last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); | 156 | last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); |
157 | if ((offset + count) > ip->i_size) { | 157 | if ((offset + count) > ip->i_size) { |
158 | error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb); | 158 | error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb); |
159 | if (error) | 159 | if (error) |
160 | goto error_out; | 160 | goto error_out; |
161 | } else { | 161 | } else { |
162 | if (nmaps && (imap->br_startblock == HOLESTARTBLOCK)) | 162 | if (nmaps && (imap->br_startblock == HOLESTARTBLOCK)) |
163 | last_fsb = MIN(last_fsb, (xfs_fileoff_t) | 163 | last_fsb = MIN(last_fsb, (xfs_fileoff_t) |
164 | imap->br_blockcount + | 164 | imap->br_blockcount + |
165 | imap->br_startoff); | 165 | imap->br_startoff); |
166 | } | 166 | } |
167 | count_fsb = last_fsb - offset_fsb; | 167 | count_fsb = last_fsb - offset_fsb; |
168 | ASSERT(count_fsb > 0); | 168 | ASSERT(count_fsb > 0); |
169 | 169 | ||
170 | resaligned = count_fsb; | 170 | resaligned = count_fsb; |
171 | if (unlikely(extsz)) { | 171 | if (unlikely(extsz)) { |
172 | if ((temp = do_mod(offset_fsb, extsz))) | 172 | if ((temp = do_mod(offset_fsb, extsz))) |
173 | resaligned += temp; | 173 | resaligned += temp; |
174 | if ((temp = do_mod(resaligned, extsz))) | 174 | if ((temp = do_mod(resaligned, extsz))) |
175 | resaligned += extsz - temp; | 175 | resaligned += extsz - temp; |
176 | } | 176 | } |
177 | 177 | ||
178 | if (unlikely(rt)) { | 178 | if (unlikely(rt)) { |
179 | resrtextents = qblocks = resaligned; | 179 | resrtextents = qblocks = resaligned; |
180 | resrtextents /= mp->m_sb.sb_rextsize; | 180 | resrtextents /= mp->m_sb.sb_rextsize; |
181 | resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); | 181 | resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); |
182 | quota_flag = XFS_QMOPT_RES_RTBLKS; | 182 | quota_flag = XFS_QMOPT_RES_RTBLKS; |
183 | } else { | 183 | } else { |
184 | resrtextents = 0; | 184 | resrtextents = 0; |
185 | resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned); | 185 | resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned); |
186 | quota_flag = XFS_QMOPT_RES_REGBLKS; | 186 | quota_flag = XFS_QMOPT_RES_REGBLKS; |
187 | } | 187 | } |
188 | 188 | ||
189 | /* | 189 | /* |
190 | * Allocate and setup the transaction | 190 | * Allocate and setup the transaction |
191 | */ | 191 | */ |
192 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 192 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
193 | tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT); | 193 | tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT); |
194 | error = xfs_trans_reserve(tp, resblks, | 194 | error = xfs_trans_reserve(tp, resblks, |
195 | XFS_WRITE_LOG_RES(mp), resrtextents, | 195 | XFS_WRITE_LOG_RES(mp), resrtextents, |
196 | XFS_TRANS_PERM_LOG_RES, | 196 | XFS_TRANS_PERM_LOG_RES, |
197 | XFS_WRITE_LOG_COUNT); | 197 | XFS_WRITE_LOG_COUNT); |
198 | /* | 198 | /* |
199 | * Check for running out of space, note: need lock to return | 199 | * Check for running out of space, note: need lock to return |
200 | */ | 200 | */ |
201 | if (error) | 201 | if (error) |
202 | xfs_trans_cancel(tp, 0); | 202 | xfs_trans_cancel(tp, 0); |
203 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 203 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
204 | if (error) | 204 | if (error) |
205 | goto error_out; | 205 | goto error_out; |
206 | 206 | ||
207 | error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag); | 207 | error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag); |
208 | if (error) | 208 | if (error) |
209 | goto error1; | 209 | goto error1; |
210 | 210 | ||
211 | xfs_trans_ijoin(tp, ip); | 211 | xfs_trans_ijoin(tp, ip); |
212 | 212 | ||
213 | bmapi_flag = XFS_BMAPI_WRITE; | 213 | bmapi_flag = XFS_BMAPI_WRITE; |
214 | if (offset < ip->i_size || extsz) | 214 | if (offset < ip->i_size || extsz) |
215 | bmapi_flag |= XFS_BMAPI_PREALLOC; | 215 | bmapi_flag |= XFS_BMAPI_PREALLOC; |
216 | 216 | ||
217 | /* | 217 | /* |
218 | * Issue the xfs_bmapi() call to allocate the blocks. | 218 | * Issue the xfs_bmapi() call to allocate the blocks. |
219 | * | 219 | * |
220 | * From this point onwards we overwrite the imap pointer that the | 220 | * From this point onwards we overwrite the imap pointer that the |
221 | * caller gave to us. | 221 | * caller gave to us. |
222 | */ | 222 | */ |
223 | xfs_bmap_init(&free_list, &firstfsb); | 223 | xfs_bmap_init(&free_list, &firstfsb); |
224 | nimaps = 1; | 224 | nimaps = 1; |
225 | error = xfs_bmapi(tp, ip, offset_fsb, count_fsb, bmapi_flag, | 225 | error = xfs_bmapi(tp, ip, offset_fsb, count_fsb, bmapi_flag, |
226 | &firstfsb, 0, imap, &nimaps, &free_list); | 226 | &firstfsb, 0, imap, &nimaps, &free_list); |
227 | if (error) | 227 | if (error) |
228 | goto error0; | 228 | goto error0; |
229 | 229 | ||
230 | /* | 230 | /* |
231 | * Complete the transaction | 231 | * Complete the transaction |
232 | */ | 232 | */ |
233 | error = xfs_bmap_finish(&tp, &free_list, &committed); | 233 | error = xfs_bmap_finish(&tp, &free_list, &committed); |
234 | if (error) | 234 | if (error) |
235 | goto error0; | 235 | goto error0; |
236 | error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); | 236 | error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); |
237 | if (error) | 237 | if (error) |
238 | goto error_out; | 238 | goto error_out; |
239 | 239 | ||
240 | /* | 240 | /* |
241 | * Copy any maps to caller's array and return any error. | 241 | * Copy any maps to caller's array and return any error. |
242 | */ | 242 | */ |
243 | if (nimaps == 0) { | 243 | if (nimaps == 0) { |
244 | error = ENOSPC; | 244 | error = ENOSPC; |
245 | goto error_out; | 245 | goto error_out; |
246 | } | 246 | } |
247 | 247 | ||
248 | if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) { | 248 | if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) { |
249 | error = xfs_cmn_err_fsblock_zero(ip, imap); | 249 | error = xfs_alert_fsblock_zero(ip, imap); |
250 | goto error_out; | 250 | goto error_out; |
251 | } | 251 | } |
252 | 252 | ||
253 | return 0; | 253 | return 0; |
254 | 254 | ||
255 | error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */ | 255 | error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */ |
256 | xfs_bmap_cancel(&free_list); | 256 | xfs_bmap_cancel(&free_list); |
257 | xfs_trans_unreserve_quota_nblks(tp, ip, qblocks, 0, quota_flag); | 257 | xfs_trans_unreserve_quota_nblks(tp, ip, qblocks, 0, quota_flag); |
258 | 258 | ||
259 | error1: /* Just cancel transaction */ | 259 | error1: /* Just cancel transaction */ |
260 | xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); | 260 | xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); |
261 | 261 | ||
262 | error_out: | 262 | error_out: |
263 | return XFS_ERROR(error); | 263 | return XFS_ERROR(error); |
264 | } | 264 | } |
265 | 265 | ||
266 | /* | 266 | /* |
267 | * If the caller is doing a write at the end of the file, then extend the | 267 | * If the caller is doing a write at the end of the file, then extend the |
268 | * allocation out to the file system's write iosize. We clean up any extra | 268 | * allocation out to the file system's write iosize. We clean up any extra |
269 | * space left over when the file is closed in xfs_inactive(). | 269 | * space left over when the file is closed in xfs_inactive(). |
270 | * | 270 | * |
271 | * If we find we already have delalloc preallocation beyond EOF, don't do more | 271 | * If we find we already have delalloc preallocation beyond EOF, don't do more |
272 | * preallocation as it it not needed. | 272 | * preallocation as it it not needed. |
273 | */ | 273 | */ |
274 | STATIC int | 274 | STATIC int |
275 | xfs_iomap_eof_want_preallocate( | 275 | xfs_iomap_eof_want_preallocate( |
276 | xfs_mount_t *mp, | 276 | xfs_mount_t *mp, |
277 | xfs_inode_t *ip, | 277 | xfs_inode_t *ip, |
278 | xfs_off_t offset, | 278 | xfs_off_t offset, |
279 | size_t count, | 279 | size_t count, |
280 | xfs_bmbt_irec_t *imap, | 280 | xfs_bmbt_irec_t *imap, |
281 | int nimaps, | 281 | int nimaps, |
282 | int *prealloc) | 282 | int *prealloc) |
283 | { | 283 | { |
284 | xfs_fileoff_t start_fsb; | 284 | xfs_fileoff_t start_fsb; |
285 | xfs_filblks_t count_fsb; | 285 | xfs_filblks_t count_fsb; |
286 | xfs_fsblock_t firstblock; | 286 | xfs_fsblock_t firstblock; |
287 | int n, error, imaps; | 287 | int n, error, imaps; |
288 | int found_delalloc = 0; | 288 | int found_delalloc = 0; |
289 | 289 | ||
290 | *prealloc = 0; | 290 | *prealloc = 0; |
291 | if ((offset + count) <= ip->i_size) | 291 | if ((offset + count) <= ip->i_size) |
292 | return 0; | 292 | return 0; |
293 | 293 | ||
294 | /* | 294 | /* |
295 | * If there are any real blocks past eof, then don't | 295 | * If there are any real blocks past eof, then don't |
296 | * do any speculative allocation. | 296 | * do any speculative allocation. |
297 | */ | 297 | */ |
298 | start_fsb = XFS_B_TO_FSBT(mp, ((xfs_ufsize_t)(offset + count - 1))); | 298 | start_fsb = XFS_B_TO_FSBT(mp, ((xfs_ufsize_t)(offset + count - 1))); |
299 | count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp)); | 299 | count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp)); |
300 | while (count_fsb > 0) { | 300 | while (count_fsb > 0) { |
301 | imaps = nimaps; | 301 | imaps = nimaps; |
302 | firstblock = NULLFSBLOCK; | 302 | firstblock = NULLFSBLOCK; |
303 | error = xfs_bmapi(NULL, ip, start_fsb, count_fsb, 0, | 303 | error = xfs_bmapi(NULL, ip, start_fsb, count_fsb, 0, |
304 | &firstblock, 0, imap, &imaps, NULL); | 304 | &firstblock, 0, imap, &imaps, NULL); |
305 | if (error) | 305 | if (error) |
306 | return error; | 306 | return error; |
307 | for (n = 0; n < imaps; n++) { | 307 | for (n = 0; n < imaps; n++) { |
308 | if ((imap[n].br_startblock != HOLESTARTBLOCK) && | 308 | if ((imap[n].br_startblock != HOLESTARTBLOCK) && |
309 | (imap[n].br_startblock != DELAYSTARTBLOCK)) | 309 | (imap[n].br_startblock != DELAYSTARTBLOCK)) |
310 | return 0; | 310 | return 0; |
311 | start_fsb += imap[n].br_blockcount; | 311 | start_fsb += imap[n].br_blockcount; |
312 | count_fsb -= imap[n].br_blockcount; | 312 | count_fsb -= imap[n].br_blockcount; |
313 | 313 | ||
314 | if (imap[n].br_startblock == DELAYSTARTBLOCK) | 314 | if (imap[n].br_startblock == DELAYSTARTBLOCK) |
315 | found_delalloc = 1; | 315 | found_delalloc = 1; |
316 | } | 316 | } |
317 | } | 317 | } |
318 | if (!found_delalloc) | 318 | if (!found_delalloc) |
319 | *prealloc = 1; | 319 | *prealloc = 1; |
320 | return 0; | 320 | return 0; |
321 | } | 321 | } |
322 | 322 | ||
323 | /* | 323 | /* |
324 | * If we don't have a user specified preallocation size, dynamically increase | 324 | * If we don't have a user specified preallocation size, dynamically increase |
325 | * the preallocation size as the size of the file grows. Cap the maximum size | 325 | * the preallocation size as the size of the file grows. Cap the maximum size |
326 | * at a single extent or less if the filesystem is near full. The closer the | 326 | * at a single extent or less if the filesystem is near full. The closer the |
327 | * filesystem is to full, the smaller the maximum prealocation. | 327 | * filesystem is to full, the smaller the maximum prealocation. |
328 | */ | 328 | */ |
329 | STATIC xfs_fsblock_t | 329 | STATIC xfs_fsblock_t |
330 | xfs_iomap_prealloc_size( | 330 | xfs_iomap_prealloc_size( |
331 | struct xfs_mount *mp, | 331 | struct xfs_mount *mp, |
332 | struct xfs_inode *ip) | 332 | struct xfs_inode *ip) |
333 | { | 333 | { |
334 | xfs_fsblock_t alloc_blocks = 0; | 334 | xfs_fsblock_t alloc_blocks = 0; |
335 | 335 | ||
336 | if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) { | 336 | if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) { |
337 | int shift = 0; | 337 | int shift = 0; |
338 | int64_t freesp; | 338 | int64_t freesp; |
339 | 339 | ||
340 | /* | 340 | /* |
341 | * rounddown_pow_of_two() returns an undefined result | 341 | * rounddown_pow_of_two() returns an undefined result |
342 | * if we pass in alloc_blocks = 0. Hence the "+ 1" to | 342 | * if we pass in alloc_blocks = 0. Hence the "+ 1" to |
343 | * ensure we always pass in a non-zero value. | 343 | * ensure we always pass in a non-zero value. |
344 | */ | 344 | */ |
345 | alloc_blocks = XFS_B_TO_FSB(mp, ip->i_size) + 1; | 345 | alloc_blocks = XFS_B_TO_FSB(mp, ip->i_size) + 1; |
346 | alloc_blocks = XFS_FILEOFF_MIN(MAXEXTLEN, | 346 | alloc_blocks = XFS_FILEOFF_MIN(MAXEXTLEN, |
347 | rounddown_pow_of_two(alloc_blocks)); | 347 | rounddown_pow_of_two(alloc_blocks)); |
348 | 348 | ||
349 | xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT); | 349 | xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT); |
350 | freesp = mp->m_sb.sb_fdblocks; | 350 | freesp = mp->m_sb.sb_fdblocks; |
351 | if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) { | 351 | if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) { |
352 | shift = 2; | 352 | shift = 2; |
353 | if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT]) | 353 | if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT]) |
354 | shift++; | 354 | shift++; |
355 | if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT]) | 355 | if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT]) |
356 | shift++; | 356 | shift++; |
357 | if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT]) | 357 | if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT]) |
358 | shift++; | 358 | shift++; |
359 | if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT]) | 359 | if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT]) |
360 | shift++; | 360 | shift++; |
361 | } | 361 | } |
362 | if (shift) | 362 | if (shift) |
363 | alloc_blocks >>= shift; | 363 | alloc_blocks >>= shift; |
364 | } | 364 | } |
365 | 365 | ||
366 | if (alloc_blocks < mp->m_writeio_blocks) | 366 | if (alloc_blocks < mp->m_writeio_blocks) |
367 | alloc_blocks = mp->m_writeio_blocks; | 367 | alloc_blocks = mp->m_writeio_blocks; |
368 | 368 | ||
369 | return alloc_blocks; | 369 | return alloc_blocks; |
370 | } | 370 | } |
371 | 371 | ||
372 | int | 372 | int |
373 | xfs_iomap_write_delay( | 373 | xfs_iomap_write_delay( |
374 | xfs_inode_t *ip, | 374 | xfs_inode_t *ip, |
375 | xfs_off_t offset, | 375 | xfs_off_t offset, |
376 | size_t count, | 376 | size_t count, |
377 | xfs_bmbt_irec_t *ret_imap) | 377 | xfs_bmbt_irec_t *ret_imap) |
378 | { | 378 | { |
379 | xfs_mount_t *mp = ip->i_mount; | 379 | xfs_mount_t *mp = ip->i_mount; |
380 | xfs_fileoff_t offset_fsb; | 380 | xfs_fileoff_t offset_fsb; |
381 | xfs_fileoff_t last_fsb; | 381 | xfs_fileoff_t last_fsb; |
382 | xfs_off_t aligned_offset; | 382 | xfs_off_t aligned_offset; |
383 | xfs_fileoff_t ioalign; | 383 | xfs_fileoff_t ioalign; |
384 | xfs_fsblock_t firstblock; | 384 | xfs_fsblock_t firstblock; |
385 | xfs_extlen_t extsz; | 385 | xfs_extlen_t extsz; |
386 | int nimaps; | 386 | int nimaps; |
387 | xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS]; | 387 | xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS]; |
388 | int prealloc, flushed = 0; | 388 | int prealloc, flushed = 0; |
389 | int error; | 389 | int error; |
390 | 390 | ||
391 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | 391 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
392 | 392 | ||
393 | /* | 393 | /* |
394 | * Make sure that the dquots are there. This doesn't hold | 394 | * Make sure that the dquots are there. This doesn't hold |
395 | * the ilock across a disk read. | 395 | * the ilock across a disk read. |
396 | */ | 396 | */ |
397 | error = xfs_qm_dqattach_locked(ip, 0); | 397 | error = xfs_qm_dqattach_locked(ip, 0); |
398 | if (error) | 398 | if (error) |
399 | return XFS_ERROR(error); | 399 | return XFS_ERROR(error); |
400 | 400 | ||
401 | extsz = xfs_get_extsz_hint(ip); | 401 | extsz = xfs_get_extsz_hint(ip); |
402 | offset_fsb = XFS_B_TO_FSBT(mp, offset); | 402 | offset_fsb = XFS_B_TO_FSBT(mp, offset); |
403 | 403 | ||
404 | 404 | ||
405 | error = xfs_iomap_eof_want_preallocate(mp, ip, offset, count, | 405 | error = xfs_iomap_eof_want_preallocate(mp, ip, offset, count, |
406 | imap, XFS_WRITE_IMAPS, &prealloc); | 406 | imap, XFS_WRITE_IMAPS, &prealloc); |
407 | if (error) | 407 | if (error) |
408 | return error; | 408 | return error; |
409 | 409 | ||
410 | retry: | 410 | retry: |
411 | if (prealloc) { | 411 | if (prealloc) { |
412 | xfs_fsblock_t alloc_blocks = xfs_iomap_prealloc_size(mp, ip); | 412 | xfs_fsblock_t alloc_blocks = xfs_iomap_prealloc_size(mp, ip); |
413 | 413 | ||
414 | aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1)); | 414 | aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1)); |
415 | ioalign = XFS_B_TO_FSBT(mp, aligned_offset); | 415 | ioalign = XFS_B_TO_FSBT(mp, aligned_offset); |
416 | last_fsb = ioalign + alloc_blocks; | 416 | last_fsb = ioalign + alloc_blocks; |
417 | } else { | 417 | } else { |
418 | last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); | 418 | last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); |
419 | } | 419 | } |
420 | 420 | ||
421 | if (prealloc || extsz) { | 421 | if (prealloc || extsz) { |
422 | error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb); | 422 | error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb); |
423 | if (error) | 423 | if (error) |
424 | return error; | 424 | return error; |
425 | } | 425 | } |
426 | 426 | ||
427 | nimaps = XFS_WRITE_IMAPS; | 427 | nimaps = XFS_WRITE_IMAPS; |
428 | firstblock = NULLFSBLOCK; | 428 | firstblock = NULLFSBLOCK; |
429 | error = xfs_bmapi(NULL, ip, offset_fsb, | 429 | error = xfs_bmapi(NULL, ip, offset_fsb, |
430 | (xfs_filblks_t)(last_fsb - offset_fsb), | 430 | (xfs_filblks_t)(last_fsb - offset_fsb), |
431 | XFS_BMAPI_DELAY | XFS_BMAPI_WRITE | | 431 | XFS_BMAPI_DELAY | XFS_BMAPI_WRITE | |
432 | XFS_BMAPI_ENTIRE, &firstblock, 1, imap, | 432 | XFS_BMAPI_ENTIRE, &firstblock, 1, imap, |
433 | &nimaps, NULL); | 433 | &nimaps, NULL); |
434 | switch (error) { | 434 | switch (error) { |
435 | case 0: | 435 | case 0: |
436 | case ENOSPC: | 436 | case ENOSPC: |
437 | case EDQUOT: | 437 | case EDQUOT: |
438 | break; | 438 | break; |
439 | default: | 439 | default: |
440 | return XFS_ERROR(error); | 440 | return XFS_ERROR(error); |
441 | } | 441 | } |
442 | 442 | ||
443 | /* | 443 | /* |
444 | * If bmapi returned us nothing, we got either ENOSPC or EDQUOT. For | 444 | * If bmapi returned us nothing, we got either ENOSPC or EDQUOT. For |
445 | * ENOSPC, * flush all other inodes with delalloc blocks to free up | 445 | * ENOSPC, * flush all other inodes with delalloc blocks to free up |
446 | * some of the excess reserved metadata space. For both cases, retry | 446 | * some of the excess reserved metadata space. For both cases, retry |
447 | * without EOF preallocation. | 447 | * without EOF preallocation. |
448 | */ | 448 | */ |
449 | if (nimaps == 0) { | 449 | if (nimaps == 0) { |
450 | trace_xfs_delalloc_enospc(ip, offset, count); | 450 | trace_xfs_delalloc_enospc(ip, offset, count); |
451 | if (flushed) | 451 | if (flushed) |
452 | return XFS_ERROR(error ? error : ENOSPC); | 452 | return XFS_ERROR(error ? error : ENOSPC); |
453 | 453 | ||
454 | if (error == ENOSPC) { | 454 | if (error == ENOSPC) { |
455 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 455 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
456 | xfs_flush_inodes(ip); | 456 | xfs_flush_inodes(ip); |
457 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 457 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
458 | } | 458 | } |
459 | 459 | ||
460 | flushed = 1; | 460 | flushed = 1; |
461 | error = 0; | 461 | error = 0; |
462 | prealloc = 0; | 462 | prealloc = 0; |
463 | goto retry; | 463 | goto retry; |
464 | } | 464 | } |
465 | 465 | ||
466 | if (!(imap[0].br_startblock || XFS_IS_REALTIME_INODE(ip))) | 466 | if (!(imap[0].br_startblock || XFS_IS_REALTIME_INODE(ip))) |
467 | return xfs_cmn_err_fsblock_zero(ip, &imap[0]); | 467 | return xfs_alert_fsblock_zero(ip, &imap[0]); |
468 | 468 | ||
469 | *ret_imap = imap[0]; | 469 | *ret_imap = imap[0]; |
470 | return 0; | 470 | return 0; |
471 | } | 471 | } |
472 | 472 | ||
473 | /* | 473 | /* |
474 | * Pass in a delayed allocate extent, convert it to real extents; | 474 | * Pass in a delayed allocate extent, convert it to real extents; |
475 | * return to the caller the extent we create which maps on top of | 475 | * return to the caller the extent we create which maps on top of |
476 | * the originating callers request. | 476 | * the originating callers request. |
477 | * | 477 | * |
478 | * Called without a lock on the inode. | 478 | * Called without a lock on the inode. |
479 | * | 479 | * |
480 | * We no longer bother to look at the incoming map - all we have to | 480 | * We no longer bother to look at the incoming map - all we have to |
481 | * guarantee is that whatever we allocate fills the required range. | 481 | * guarantee is that whatever we allocate fills the required range. |
482 | */ | 482 | */ |
483 | int | 483 | int |
484 | xfs_iomap_write_allocate( | 484 | xfs_iomap_write_allocate( |
485 | xfs_inode_t *ip, | 485 | xfs_inode_t *ip, |
486 | xfs_off_t offset, | 486 | xfs_off_t offset, |
487 | size_t count, | 487 | size_t count, |
488 | xfs_bmbt_irec_t *imap) | 488 | xfs_bmbt_irec_t *imap) |
489 | { | 489 | { |
490 | xfs_mount_t *mp = ip->i_mount; | 490 | xfs_mount_t *mp = ip->i_mount; |
491 | xfs_fileoff_t offset_fsb, last_block; | 491 | xfs_fileoff_t offset_fsb, last_block; |
492 | xfs_fileoff_t end_fsb, map_start_fsb; | 492 | xfs_fileoff_t end_fsb, map_start_fsb; |
493 | xfs_fsblock_t first_block; | 493 | xfs_fsblock_t first_block; |
494 | xfs_bmap_free_t free_list; | 494 | xfs_bmap_free_t free_list; |
495 | xfs_filblks_t count_fsb; | 495 | xfs_filblks_t count_fsb; |
496 | xfs_trans_t *tp; | 496 | xfs_trans_t *tp; |
497 | int nimaps, committed; | 497 | int nimaps, committed; |
498 | int error = 0; | 498 | int error = 0; |
499 | int nres; | 499 | int nres; |
500 | 500 | ||
501 | /* | 501 | /* |
502 | * Make sure that the dquots are there. | 502 | * Make sure that the dquots are there. |
503 | */ | 503 | */ |
504 | error = xfs_qm_dqattach(ip, 0); | 504 | error = xfs_qm_dqattach(ip, 0); |
505 | if (error) | 505 | if (error) |
506 | return XFS_ERROR(error); | 506 | return XFS_ERROR(error); |
507 | 507 | ||
508 | offset_fsb = XFS_B_TO_FSBT(mp, offset); | 508 | offset_fsb = XFS_B_TO_FSBT(mp, offset); |
509 | count_fsb = imap->br_blockcount; | 509 | count_fsb = imap->br_blockcount; |
510 | map_start_fsb = imap->br_startoff; | 510 | map_start_fsb = imap->br_startoff; |
511 | 511 | ||
512 | XFS_STATS_ADD(xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb)); | 512 | XFS_STATS_ADD(xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb)); |
513 | 513 | ||
514 | while (count_fsb != 0) { | 514 | while (count_fsb != 0) { |
515 | /* | 515 | /* |
516 | * Set up a transaction with which to allocate the | 516 | * Set up a transaction with which to allocate the |
517 | * backing store for the file. Do allocations in a | 517 | * backing store for the file. Do allocations in a |
518 | * loop until we get some space in the range we are | 518 | * loop until we get some space in the range we are |
519 | * interested in. The other space that might be allocated | 519 | * interested in. The other space that might be allocated |
520 | * is in the delayed allocation extent on which we sit | 520 | * is in the delayed allocation extent on which we sit |
521 | * but before our buffer starts. | 521 | * but before our buffer starts. |
522 | */ | 522 | */ |
523 | 523 | ||
524 | nimaps = 0; | 524 | nimaps = 0; |
525 | while (nimaps == 0) { | 525 | while (nimaps == 0) { |
526 | tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE); | 526 | tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE); |
527 | tp->t_flags |= XFS_TRANS_RESERVE; | 527 | tp->t_flags |= XFS_TRANS_RESERVE; |
528 | nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); | 528 | nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); |
529 | error = xfs_trans_reserve(tp, nres, | 529 | error = xfs_trans_reserve(tp, nres, |
530 | XFS_WRITE_LOG_RES(mp), | 530 | XFS_WRITE_LOG_RES(mp), |
531 | 0, XFS_TRANS_PERM_LOG_RES, | 531 | 0, XFS_TRANS_PERM_LOG_RES, |
532 | XFS_WRITE_LOG_COUNT); | 532 | XFS_WRITE_LOG_COUNT); |
533 | if (error) { | 533 | if (error) { |
534 | xfs_trans_cancel(tp, 0); | 534 | xfs_trans_cancel(tp, 0); |
535 | return XFS_ERROR(error); | 535 | return XFS_ERROR(error); |
536 | } | 536 | } |
537 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 537 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
538 | xfs_trans_ijoin(tp, ip); | 538 | xfs_trans_ijoin(tp, ip); |
539 | 539 | ||
540 | xfs_bmap_init(&free_list, &first_block); | 540 | xfs_bmap_init(&free_list, &first_block); |
541 | 541 | ||
542 | /* | 542 | /* |
543 | * it is possible that the extents have changed since | 543 | * it is possible that the extents have changed since |
544 | * we did the read call as we dropped the ilock for a | 544 | * we did the read call as we dropped the ilock for a |
545 | * while. We have to be careful about truncates or hole | 545 | * while. We have to be careful about truncates or hole |
546 | * punchs here - we are not allowed to allocate | 546 | * punchs here - we are not allowed to allocate |
547 | * non-delalloc blocks here. | 547 | * non-delalloc blocks here. |
548 | * | 548 | * |
549 | * The only protection against truncation is the pages | 549 | * The only protection against truncation is the pages |
550 | * for the range we are being asked to convert are | 550 | * for the range we are being asked to convert are |
551 | * locked and hence a truncate will block on them | 551 | * locked and hence a truncate will block on them |
552 | * first. | 552 | * first. |
553 | * | 553 | * |
554 | * As a result, if we go beyond the range we really | 554 | * As a result, if we go beyond the range we really |
555 | * need and hit an delalloc extent boundary followed by | 555 | * need and hit an delalloc extent boundary followed by |
556 | * a hole while we have excess blocks in the map, we | 556 | * a hole while we have excess blocks in the map, we |
557 | * will fill the hole incorrectly and overrun the | 557 | * will fill the hole incorrectly and overrun the |
558 | * transaction reservation. | 558 | * transaction reservation. |
559 | * | 559 | * |
560 | * Using a single map prevents this as we are forced to | 560 | * Using a single map prevents this as we are forced to |
561 | * check each map we look for overlap with the desired | 561 | * check each map we look for overlap with the desired |
562 | * range and abort as soon as we find it. Also, given | 562 | * range and abort as soon as we find it. Also, given |
563 | * that we only return a single map, having one beyond | 563 | * that we only return a single map, having one beyond |
564 | * what we can return is probably a bit silly. | 564 | * what we can return is probably a bit silly. |
565 | * | 565 | * |
566 | * We also need to check that we don't go beyond EOF; | 566 | * We also need to check that we don't go beyond EOF; |
567 | * this is a truncate optimisation as a truncate sets | 567 | * this is a truncate optimisation as a truncate sets |
568 | * the new file size before block on the pages we | 568 | * the new file size before block on the pages we |
569 | * currently have locked under writeback. Because they | 569 | * currently have locked under writeback. Because they |
570 | * are about to be tossed, we don't need to write them | 570 | * are about to be tossed, we don't need to write them |
571 | * back.... | 571 | * back.... |
572 | */ | 572 | */ |
573 | nimaps = 1; | 573 | nimaps = 1; |
574 | end_fsb = XFS_B_TO_FSB(mp, ip->i_size); | 574 | end_fsb = XFS_B_TO_FSB(mp, ip->i_size); |
575 | error = xfs_bmap_last_offset(NULL, ip, &last_block, | 575 | error = xfs_bmap_last_offset(NULL, ip, &last_block, |
576 | XFS_DATA_FORK); | 576 | XFS_DATA_FORK); |
577 | if (error) | 577 | if (error) |
578 | goto trans_cancel; | 578 | goto trans_cancel; |
579 | 579 | ||
580 | last_block = XFS_FILEOFF_MAX(last_block, end_fsb); | 580 | last_block = XFS_FILEOFF_MAX(last_block, end_fsb); |
581 | if ((map_start_fsb + count_fsb) > last_block) { | 581 | if ((map_start_fsb + count_fsb) > last_block) { |
582 | count_fsb = last_block - map_start_fsb; | 582 | count_fsb = last_block - map_start_fsb; |
583 | if (count_fsb == 0) { | 583 | if (count_fsb == 0) { |
584 | error = EAGAIN; | 584 | error = EAGAIN; |
585 | goto trans_cancel; | 585 | goto trans_cancel; |
586 | } | 586 | } |
587 | } | 587 | } |
588 | 588 | ||
589 | /* | 589 | /* |
590 | * Go get the actual blocks. | 590 | * Go get the actual blocks. |
591 | * | 591 | * |
592 | * From this point onwards we overwrite the imap | 592 | * From this point onwards we overwrite the imap |
593 | * pointer that the caller gave to us. | 593 | * pointer that the caller gave to us. |
594 | */ | 594 | */ |
595 | error = xfs_bmapi(tp, ip, map_start_fsb, count_fsb, | 595 | error = xfs_bmapi(tp, ip, map_start_fsb, count_fsb, |
596 | XFS_BMAPI_WRITE, &first_block, 1, | 596 | XFS_BMAPI_WRITE, &first_block, 1, |
597 | imap, &nimaps, &free_list); | 597 | imap, &nimaps, &free_list); |
598 | if (error) | 598 | if (error) |
599 | goto trans_cancel; | 599 | goto trans_cancel; |
600 | 600 | ||
601 | error = xfs_bmap_finish(&tp, &free_list, &committed); | 601 | error = xfs_bmap_finish(&tp, &free_list, &committed); |
602 | if (error) | 602 | if (error) |
603 | goto trans_cancel; | 603 | goto trans_cancel; |
604 | 604 | ||
605 | error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); | 605 | error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); |
606 | if (error) | 606 | if (error) |
607 | goto error0; | 607 | goto error0; |
608 | 608 | ||
609 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 609 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
610 | } | 610 | } |
611 | 611 | ||
612 | /* | 612 | /* |
613 | * See if we were able to allocate an extent that | 613 | * See if we were able to allocate an extent that |
614 | * covers at least part of the callers request | 614 | * covers at least part of the callers request |
615 | */ | 615 | */ |
616 | if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) | 616 | if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) |
617 | return xfs_cmn_err_fsblock_zero(ip, imap); | 617 | return xfs_alert_fsblock_zero(ip, imap); |
618 | 618 | ||
619 | if ((offset_fsb >= imap->br_startoff) && | 619 | if ((offset_fsb >= imap->br_startoff) && |
620 | (offset_fsb < (imap->br_startoff + | 620 | (offset_fsb < (imap->br_startoff + |
621 | imap->br_blockcount))) { | 621 | imap->br_blockcount))) { |
622 | XFS_STATS_INC(xs_xstrat_quick); | 622 | XFS_STATS_INC(xs_xstrat_quick); |
623 | return 0; | 623 | return 0; |
624 | } | 624 | } |
625 | 625 | ||
626 | /* | 626 | /* |
627 | * So far we have not mapped the requested part of the | 627 | * So far we have not mapped the requested part of the |
628 | * file, just surrounding data, try again. | 628 | * file, just surrounding data, try again. |
629 | */ | 629 | */ |
630 | count_fsb -= imap->br_blockcount; | 630 | count_fsb -= imap->br_blockcount; |
631 | map_start_fsb = imap->br_startoff + imap->br_blockcount; | 631 | map_start_fsb = imap->br_startoff + imap->br_blockcount; |
632 | } | 632 | } |
633 | 633 | ||
634 | trans_cancel: | 634 | trans_cancel: |
635 | xfs_bmap_cancel(&free_list); | 635 | xfs_bmap_cancel(&free_list); |
636 | xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); | 636 | xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); |
637 | error0: | 637 | error0: |
638 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 638 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
639 | return XFS_ERROR(error); | 639 | return XFS_ERROR(error); |
640 | } | 640 | } |
641 | 641 | ||
642 | int | 642 | int |
643 | xfs_iomap_write_unwritten( | 643 | xfs_iomap_write_unwritten( |
644 | xfs_inode_t *ip, | 644 | xfs_inode_t *ip, |
645 | xfs_off_t offset, | 645 | xfs_off_t offset, |
646 | size_t count) | 646 | size_t count) |
647 | { | 647 | { |
648 | xfs_mount_t *mp = ip->i_mount; | 648 | xfs_mount_t *mp = ip->i_mount; |
649 | xfs_fileoff_t offset_fsb; | 649 | xfs_fileoff_t offset_fsb; |
650 | xfs_filblks_t count_fsb; | 650 | xfs_filblks_t count_fsb; |
651 | xfs_filblks_t numblks_fsb; | 651 | xfs_filblks_t numblks_fsb; |
652 | xfs_fsblock_t firstfsb; | 652 | xfs_fsblock_t firstfsb; |
653 | int nimaps; | 653 | int nimaps; |
654 | xfs_trans_t *tp; | 654 | xfs_trans_t *tp; |
655 | xfs_bmbt_irec_t imap; | 655 | xfs_bmbt_irec_t imap; |
656 | xfs_bmap_free_t free_list; | 656 | xfs_bmap_free_t free_list; |
657 | uint resblks; | 657 | uint resblks; |
658 | int committed; | 658 | int committed; |
659 | int error; | 659 | int error; |
660 | 660 | ||
661 | trace_xfs_unwritten_convert(ip, offset, count); | 661 | trace_xfs_unwritten_convert(ip, offset, count); |
662 | 662 | ||
663 | offset_fsb = XFS_B_TO_FSBT(mp, offset); | 663 | offset_fsb = XFS_B_TO_FSBT(mp, offset); |
664 | count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); | 664 | count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); |
665 | count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb); | 665 | count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb); |
666 | 666 | ||
667 | /* | 667 | /* |
668 | * Reserve enough blocks in this transaction for two complete extent | 668 | * Reserve enough blocks in this transaction for two complete extent |
669 | * btree splits. We may be converting the middle part of an unwritten | 669 | * btree splits. We may be converting the middle part of an unwritten |
670 | * extent and in this case we will insert two new extents in the btree | 670 | * extent and in this case we will insert two new extents in the btree |
671 | * each of which could cause a full split. | 671 | * each of which could cause a full split. |
672 | * | 672 | * |
673 | * This reservation amount will be used in the first call to | 673 | * This reservation amount will be used in the first call to |
674 | * xfs_bmbt_split() to select an AG with enough space to satisfy the | 674 | * xfs_bmbt_split() to select an AG with enough space to satisfy the |
675 | * rest of the operation. | 675 | * rest of the operation. |
676 | */ | 676 | */ |
677 | resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1; | 677 | resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1; |
678 | 678 | ||
679 | do { | 679 | do { |
680 | /* | 680 | /* |
681 | * set up a transaction to convert the range of extents | 681 | * set up a transaction to convert the range of extents |
682 | * from unwritten to real. Do allocations in a loop until | 682 | * from unwritten to real. Do allocations in a loop until |
683 | * we have covered the range passed in. | 683 | * we have covered the range passed in. |
684 | * | 684 | * |
685 | * Note that we open code the transaction allocation here | 685 | * Note that we open code the transaction allocation here |
686 | * to pass KM_NOFS--we can't risk to recursing back into | 686 | * to pass KM_NOFS--we can't risk to recursing back into |
687 | * the filesystem here as we might be asked to write out | 687 | * the filesystem here as we might be asked to write out |
688 | * the same inode that we complete here and might deadlock | 688 | * the same inode that we complete here and might deadlock |
689 | * on the iolock. | 689 | * on the iolock. |
690 | */ | 690 | */ |
691 | xfs_wait_for_freeze(mp, SB_FREEZE_TRANS); | 691 | xfs_wait_for_freeze(mp, SB_FREEZE_TRANS); |
692 | tp = _xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE, KM_NOFS); | 692 | tp = _xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE, KM_NOFS); |
693 | tp->t_flags |= XFS_TRANS_RESERVE; | 693 | tp->t_flags |= XFS_TRANS_RESERVE; |
694 | error = xfs_trans_reserve(tp, resblks, | 694 | error = xfs_trans_reserve(tp, resblks, |
695 | XFS_WRITE_LOG_RES(mp), 0, | 695 | XFS_WRITE_LOG_RES(mp), 0, |
696 | XFS_TRANS_PERM_LOG_RES, | 696 | XFS_TRANS_PERM_LOG_RES, |
697 | XFS_WRITE_LOG_COUNT); | 697 | XFS_WRITE_LOG_COUNT); |
698 | if (error) { | 698 | if (error) { |
699 | xfs_trans_cancel(tp, 0); | 699 | xfs_trans_cancel(tp, 0); |
700 | return XFS_ERROR(error); | 700 | return XFS_ERROR(error); |
701 | } | 701 | } |
702 | 702 | ||
703 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 703 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
704 | xfs_trans_ijoin(tp, ip); | 704 | xfs_trans_ijoin(tp, ip); |
705 | 705 | ||
706 | /* | 706 | /* |
707 | * Modify the unwritten extent state of the buffer. | 707 | * Modify the unwritten extent state of the buffer. |
708 | */ | 708 | */ |
709 | xfs_bmap_init(&free_list, &firstfsb); | 709 | xfs_bmap_init(&free_list, &firstfsb); |
710 | nimaps = 1; | 710 | nimaps = 1; |
711 | error = xfs_bmapi(tp, ip, offset_fsb, count_fsb, | 711 | error = xfs_bmapi(tp, ip, offset_fsb, count_fsb, |
712 | XFS_BMAPI_WRITE|XFS_BMAPI_CONVERT, &firstfsb, | 712 | XFS_BMAPI_WRITE|XFS_BMAPI_CONVERT, &firstfsb, |
713 | 1, &imap, &nimaps, &free_list); | 713 | 1, &imap, &nimaps, &free_list); |
714 | if (error) | 714 | if (error) |
715 | goto error_on_bmapi_transaction; | 715 | goto error_on_bmapi_transaction; |
716 | 716 | ||
717 | error = xfs_bmap_finish(&(tp), &(free_list), &committed); | 717 | error = xfs_bmap_finish(&(tp), &(free_list), &committed); |
718 | if (error) | 718 | if (error) |
719 | goto error_on_bmapi_transaction; | 719 | goto error_on_bmapi_transaction; |
720 | 720 | ||
721 | error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); | 721 | error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); |
722 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 722 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
723 | if (error) | 723 | if (error) |
724 | return XFS_ERROR(error); | 724 | return XFS_ERROR(error); |
725 | 725 | ||
726 | if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip))) | 726 | if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip))) |
727 | return xfs_cmn_err_fsblock_zero(ip, &imap); | 727 | return xfs_alert_fsblock_zero(ip, &imap); |
728 | 728 | ||
729 | if ((numblks_fsb = imap.br_blockcount) == 0) { | 729 | if ((numblks_fsb = imap.br_blockcount) == 0) { |
730 | /* | 730 | /* |
731 | * The numblks_fsb value should always get | 731 | * The numblks_fsb value should always get |
732 | * smaller, otherwise the loop is stuck. | 732 | * smaller, otherwise the loop is stuck. |
733 | */ | 733 | */ |
734 | ASSERT(imap.br_blockcount); | 734 | ASSERT(imap.br_blockcount); |
735 | break; | 735 | break; |
736 | } | 736 | } |
737 | offset_fsb += numblks_fsb; | 737 | offset_fsb += numblks_fsb; |
738 | count_fsb -= numblks_fsb; | 738 | count_fsb -= numblks_fsb; |
739 | } while (count_fsb > 0); | 739 | } while (count_fsb > 0); |
740 | 740 | ||
741 | return 0; | 741 | return 0; |
742 | 742 | ||
743 | error_on_bmapi_transaction: | 743 | error_on_bmapi_transaction: |
744 | xfs_bmap_cancel(&free_list); | 744 | xfs_bmap_cancel(&free_list); |
745 | xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT)); | 745 | xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT)); |
746 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 746 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
747 | return XFS_ERROR(error); | 747 | return XFS_ERROR(error); |
748 | } | 748 | } |
749 | 749 |