Commit 2455881c0b52f87be539c4c7deab1afff4d8a560

Authored by Dave Chinner
Committed by Ben Myers
1 parent a00416844b

xfs: introduce XFS_BMAPI_STACK_SWITCH

Certain allocation paths through xfs_bmapi_write() are in situations
where we have limited stack available. These are almost always in
the buffered IO writeback path when convertion delayed allocation
extents to real extents.

The current stack switch occurs for userdata allocations, which
means we also do stack switches for preallocation, direct IO and
unwritten extent conversion, even those these call chains have never
been implicated in a stack overrun.

Hence, let's target just the single stack overun offended for stack
switches. To do that, introduce a XFS_BMAPI_STACK_SWITCH flag that
the caller can pass xfs_bmapi_write() to indicate it should switch
stacks if it needs to do allocation.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>

Showing 5 changed files with 13 additions and 3 deletions Inline Diff

1 /* 1 /*
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. 2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * All Rights Reserved. 3 * All Rights Reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as 6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 * 8 *
9 * This program is distributed in the hope that it would be useful, 9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License 14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation, 15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */ 17 */
18 #include "xfs.h" 18 #include "xfs.h"
19 #include "xfs_fs.h" 19 #include "xfs_fs.h"
20 #include "xfs_types.h" 20 #include "xfs_types.h"
21 #include "xfs_bit.h" 21 #include "xfs_bit.h"
22 #include "xfs_log.h" 22 #include "xfs_log.h"
23 #include "xfs_trans.h" 23 #include "xfs_trans.h"
24 #include "xfs_sb.h" 24 #include "xfs_sb.h"
25 #include "xfs_ag.h" 25 #include "xfs_ag.h"
26 #include "xfs_mount.h" 26 #include "xfs_mount.h"
27 #include "xfs_bmap_btree.h" 27 #include "xfs_bmap_btree.h"
28 #include "xfs_alloc_btree.h" 28 #include "xfs_alloc_btree.h"
29 #include "xfs_ialloc_btree.h" 29 #include "xfs_ialloc_btree.h"
30 #include "xfs_dinode.h" 30 #include "xfs_dinode.h"
31 #include "xfs_inode.h" 31 #include "xfs_inode.h"
32 #include "xfs_btree.h" 32 #include "xfs_btree.h"
33 #include "xfs_alloc.h" 33 #include "xfs_alloc.h"
34 #include "xfs_extent_busy.h" 34 #include "xfs_extent_busy.h"
35 #include "xfs_error.h" 35 #include "xfs_error.h"
36 #include "xfs_trace.h" 36 #include "xfs_trace.h"
37 37
38 struct workqueue_struct *xfs_alloc_wq; 38 struct workqueue_struct *xfs_alloc_wq;
39 39
40 #define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b))) 40 #define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
41 41
42 #define XFSA_FIXUP_BNO_OK 1 42 #define XFSA_FIXUP_BNO_OK 1
43 #define XFSA_FIXUP_CNT_OK 2 43 #define XFSA_FIXUP_CNT_OK 2
44 44
45 STATIC int xfs_alloc_ag_vextent_exact(xfs_alloc_arg_t *); 45 STATIC int xfs_alloc_ag_vextent_exact(xfs_alloc_arg_t *);
46 STATIC int xfs_alloc_ag_vextent_near(xfs_alloc_arg_t *); 46 STATIC int xfs_alloc_ag_vextent_near(xfs_alloc_arg_t *);
47 STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *); 47 STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *);
48 STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *, 48 STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *,
49 xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *); 49 xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *);
50 50
51 /* 51 /*
52 * Lookup the record equal to [bno, len] in the btree given by cur. 52 * Lookup the record equal to [bno, len] in the btree given by cur.
53 */ 53 */
54 STATIC int /* error */ 54 STATIC int /* error */
55 xfs_alloc_lookup_eq( 55 xfs_alloc_lookup_eq(
56 struct xfs_btree_cur *cur, /* btree cursor */ 56 struct xfs_btree_cur *cur, /* btree cursor */
57 xfs_agblock_t bno, /* starting block of extent */ 57 xfs_agblock_t bno, /* starting block of extent */
58 xfs_extlen_t len, /* length of extent */ 58 xfs_extlen_t len, /* length of extent */
59 int *stat) /* success/failure */ 59 int *stat) /* success/failure */
60 { 60 {
61 cur->bc_rec.a.ar_startblock = bno; 61 cur->bc_rec.a.ar_startblock = bno;
62 cur->bc_rec.a.ar_blockcount = len; 62 cur->bc_rec.a.ar_blockcount = len;
63 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat); 63 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
64 } 64 }
65 65
66 /* 66 /*
67 * Lookup the first record greater than or equal to [bno, len] 67 * Lookup the first record greater than or equal to [bno, len]
68 * in the btree given by cur. 68 * in the btree given by cur.
69 */ 69 */
70 int /* error */ 70 int /* error */
71 xfs_alloc_lookup_ge( 71 xfs_alloc_lookup_ge(
72 struct xfs_btree_cur *cur, /* btree cursor */ 72 struct xfs_btree_cur *cur, /* btree cursor */
73 xfs_agblock_t bno, /* starting block of extent */ 73 xfs_agblock_t bno, /* starting block of extent */
74 xfs_extlen_t len, /* length of extent */ 74 xfs_extlen_t len, /* length of extent */
75 int *stat) /* success/failure */ 75 int *stat) /* success/failure */
76 { 76 {
77 cur->bc_rec.a.ar_startblock = bno; 77 cur->bc_rec.a.ar_startblock = bno;
78 cur->bc_rec.a.ar_blockcount = len; 78 cur->bc_rec.a.ar_blockcount = len;
79 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat); 79 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
80 } 80 }
81 81
82 /* 82 /*
83 * Lookup the first record less than or equal to [bno, len] 83 * Lookup the first record less than or equal to [bno, len]
84 * in the btree given by cur. 84 * in the btree given by cur.
85 */ 85 */
86 int /* error */ 86 int /* error */
87 xfs_alloc_lookup_le( 87 xfs_alloc_lookup_le(
88 struct xfs_btree_cur *cur, /* btree cursor */ 88 struct xfs_btree_cur *cur, /* btree cursor */
89 xfs_agblock_t bno, /* starting block of extent */ 89 xfs_agblock_t bno, /* starting block of extent */
90 xfs_extlen_t len, /* length of extent */ 90 xfs_extlen_t len, /* length of extent */
91 int *stat) /* success/failure */ 91 int *stat) /* success/failure */
92 { 92 {
93 cur->bc_rec.a.ar_startblock = bno; 93 cur->bc_rec.a.ar_startblock = bno;
94 cur->bc_rec.a.ar_blockcount = len; 94 cur->bc_rec.a.ar_blockcount = len;
95 return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat); 95 return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
96 } 96 }
97 97
98 /* 98 /*
99 * Update the record referred to by cur to the value given 99 * Update the record referred to by cur to the value given
100 * by [bno, len]. 100 * by [bno, len].
101 * This either works (return 0) or gets an EFSCORRUPTED error. 101 * This either works (return 0) or gets an EFSCORRUPTED error.
102 */ 102 */
103 STATIC int /* error */ 103 STATIC int /* error */
104 xfs_alloc_update( 104 xfs_alloc_update(
105 struct xfs_btree_cur *cur, /* btree cursor */ 105 struct xfs_btree_cur *cur, /* btree cursor */
106 xfs_agblock_t bno, /* starting block of extent */ 106 xfs_agblock_t bno, /* starting block of extent */
107 xfs_extlen_t len) /* length of extent */ 107 xfs_extlen_t len) /* length of extent */
108 { 108 {
109 union xfs_btree_rec rec; 109 union xfs_btree_rec rec;
110 110
111 rec.alloc.ar_startblock = cpu_to_be32(bno); 111 rec.alloc.ar_startblock = cpu_to_be32(bno);
112 rec.alloc.ar_blockcount = cpu_to_be32(len); 112 rec.alloc.ar_blockcount = cpu_to_be32(len);
113 return xfs_btree_update(cur, &rec); 113 return xfs_btree_update(cur, &rec);
114 } 114 }
115 115
116 /* 116 /*
117 * Get the data from the pointed-to record. 117 * Get the data from the pointed-to record.
118 */ 118 */
119 int /* error */ 119 int /* error */
120 xfs_alloc_get_rec( 120 xfs_alloc_get_rec(
121 struct xfs_btree_cur *cur, /* btree cursor */ 121 struct xfs_btree_cur *cur, /* btree cursor */
122 xfs_agblock_t *bno, /* output: starting block of extent */ 122 xfs_agblock_t *bno, /* output: starting block of extent */
123 xfs_extlen_t *len, /* output: length of extent */ 123 xfs_extlen_t *len, /* output: length of extent */
124 int *stat) /* output: success/failure */ 124 int *stat) /* output: success/failure */
125 { 125 {
126 union xfs_btree_rec *rec; 126 union xfs_btree_rec *rec;
127 int error; 127 int error;
128 128
129 error = xfs_btree_get_rec(cur, &rec, stat); 129 error = xfs_btree_get_rec(cur, &rec, stat);
130 if (!error && *stat == 1) { 130 if (!error && *stat == 1) {
131 *bno = be32_to_cpu(rec->alloc.ar_startblock); 131 *bno = be32_to_cpu(rec->alloc.ar_startblock);
132 *len = be32_to_cpu(rec->alloc.ar_blockcount); 132 *len = be32_to_cpu(rec->alloc.ar_blockcount);
133 } 133 }
134 return error; 134 return error;
135 } 135 }
136 136
137 /* 137 /*
138 * Compute aligned version of the found extent. 138 * Compute aligned version of the found extent.
139 * Takes alignment and min length into account. 139 * Takes alignment and min length into account.
140 */ 140 */
141 STATIC void 141 STATIC void
142 xfs_alloc_compute_aligned( 142 xfs_alloc_compute_aligned(
143 xfs_alloc_arg_t *args, /* allocation argument structure */ 143 xfs_alloc_arg_t *args, /* allocation argument structure */
144 xfs_agblock_t foundbno, /* starting block in found extent */ 144 xfs_agblock_t foundbno, /* starting block in found extent */
145 xfs_extlen_t foundlen, /* length in found extent */ 145 xfs_extlen_t foundlen, /* length in found extent */
146 xfs_agblock_t *resbno, /* result block number */ 146 xfs_agblock_t *resbno, /* result block number */
147 xfs_extlen_t *reslen) /* result length */ 147 xfs_extlen_t *reslen) /* result length */
148 { 148 {
149 xfs_agblock_t bno; 149 xfs_agblock_t bno;
150 xfs_extlen_t len; 150 xfs_extlen_t len;
151 151
152 /* Trim busy sections out of found extent */ 152 /* Trim busy sections out of found extent */
153 xfs_extent_busy_trim(args, foundbno, foundlen, &bno, &len); 153 xfs_extent_busy_trim(args, foundbno, foundlen, &bno, &len);
154 154
155 if (args->alignment > 1 && len >= args->minlen) { 155 if (args->alignment > 1 && len >= args->minlen) {
156 xfs_agblock_t aligned_bno = roundup(bno, args->alignment); 156 xfs_agblock_t aligned_bno = roundup(bno, args->alignment);
157 xfs_extlen_t diff = aligned_bno - bno; 157 xfs_extlen_t diff = aligned_bno - bno;
158 158
159 *resbno = aligned_bno; 159 *resbno = aligned_bno;
160 *reslen = diff >= len ? 0 : len - diff; 160 *reslen = diff >= len ? 0 : len - diff;
161 } else { 161 } else {
162 *resbno = bno; 162 *resbno = bno;
163 *reslen = len; 163 *reslen = len;
164 } 164 }
165 } 165 }
166 166
167 /* 167 /*
168 * Compute best start block and diff for "near" allocations. 168 * Compute best start block and diff for "near" allocations.
169 * freelen >= wantlen already checked by caller. 169 * freelen >= wantlen already checked by caller.
170 */ 170 */
171 STATIC xfs_extlen_t /* difference value (absolute) */ 171 STATIC xfs_extlen_t /* difference value (absolute) */
172 xfs_alloc_compute_diff( 172 xfs_alloc_compute_diff(
173 xfs_agblock_t wantbno, /* target starting block */ 173 xfs_agblock_t wantbno, /* target starting block */
174 xfs_extlen_t wantlen, /* target length */ 174 xfs_extlen_t wantlen, /* target length */
175 xfs_extlen_t alignment, /* target alignment */ 175 xfs_extlen_t alignment, /* target alignment */
176 xfs_agblock_t freebno, /* freespace's starting block */ 176 xfs_agblock_t freebno, /* freespace's starting block */
177 xfs_extlen_t freelen, /* freespace's length */ 177 xfs_extlen_t freelen, /* freespace's length */
178 xfs_agblock_t *newbnop) /* result: best start block from free */ 178 xfs_agblock_t *newbnop) /* result: best start block from free */
179 { 179 {
180 xfs_agblock_t freeend; /* end of freespace extent */ 180 xfs_agblock_t freeend; /* end of freespace extent */
181 xfs_agblock_t newbno1; /* return block number */ 181 xfs_agblock_t newbno1; /* return block number */
182 xfs_agblock_t newbno2; /* other new block number */ 182 xfs_agblock_t newbno2; /* other new block number */
183 xfs_extlen_t newlen1=0; /* length with newbno1 */ 183 xfs_extlen_t newlen1=0; /* length with newbno1 */
184 xfs_extlen_t newlen2=0; /* length with newbno2 */ 184 xfs_extlen_t newlen2=0; /* length with newbno2 */
185 xfs_agblock_t wantend; /* end of target extent */ 185 xfs_agblock_t wantend; /* end of target extent */
186 186
187 ASSERT(freelen >= wantlen); 187 ASSERT(freelen >= wantlen);
188 freeend = freebno + freelen; 188 freeend = freebno + freelen;
189 wantend = wantbno + wantlen; 189 wantend = wantbno + wantlen;
190 if (freebno >= wantbno) { 190 if (freebno >= wantbno) {
191 if ((newbno1 = roundup(freebno, alignment)) >= freeend) 191 if ((newbno1 = roundup(freebno, alignment)) >= freeend)
192 newbno1 = NULLAGBLOCK; 192 newbno1 = NULLAGBLOCK;
193 } else if (freeend >= wantend && alignment > 1) { 193 } else if (freeend >= wantend && alignment > 1) {
194 newbno1 = roundup(wantbno, alignment); 194 newbno1 = roundup(wantbno, alignment);
195 newbno2 = newbno1 - alignment; 195 newbno2 = newbno1 - alignment;
196 if (newbno1 >= freeend) 196 if (newbno1 >= freeend)
197 newbno1 = NULLAGBLOCK; 197 newbno1 = NULLAGBLOCK;
198 else 198 else
199 newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1); 199 newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1);
200 if (newbno2 < freebno) 200 if (newbno2 < freebno)
201 newbno2 = NULLAGBLOCK; 201 newbno2 = NULLAGBLOCK;
202 else 202 else
203 newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2); 203 newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2);
204 if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) { 204 if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) {
205 if (newlen1 < newlen2 || 205 if (newlen1 < newlen2 ||
206 (newlen1 == newlen2 && 206 (newlen1 == newlen2 &&
207 XFS_ABSDIFF(newbno1, wantbno) > 207 XFS_ABSDIFF(newbno1, wantbno) >
208 XFS_ABSDIFF(newbno2, wantbno))) 208 XFS_ABSDIFF(newbno2, wantbno)))
209 newbno1 = newbno2; 209 newbno1 = newbno2;
210 } else if (newbno2 != NULLAGBLOCK) 210 } else if (newbno2 != NULLAGBLOCK)
211 newbno1 = newbno2; 211 newbno1 = newbno2;
212 } else if (freeend >= wantend) { 212 } else if (freeend >= wantend) {
213 newbno1 = wantbno; 213 newbno1 = wantbno;
214 } else if (alignment > 1) { 214 } else if (alignment > 1) {
215 newbno1 = roundup(freeend - wantlen, alignment); 215 newbno1 = roundup(freeend - wantlen, alignment);
216 if (newbno1 > freeend - wantlen && 216 if (newbno1 > freeend - wantlen &&
217 newbno1 - alignment >= freebno) 217 newbno1 - alignment >= freebno)
218 newbno1 -= alignment; 218 newbno1 -= alignment;
219 else if (newbno1 >= freeend) 219 else if (newbno1 >= freeend)
220 newbno1 = NULLAGBLOCK; 220 newbno1 = NULLAGBLOCK;
221 } else 221 } else
222 newbno1 = freeend - wantlen; 222 newbno1 = freeend - wantlen;
223 *newbnop = newbno1; 223 *newbnop = newbno1;
224 return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno); 224 return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno);
225 } 225 }
226 226
227 /* 227 /*
228 * Fix up the length, based on mod and prod. 228 * Fix up the length, based on mod and prod.
229 * len should be k * prod + mod for some k. 229 * len should be k * prod + mod for some k.
230 * If len is too small it is returned unchanged. 230 * If len is too small it is returned unchanged.
231 * If len hits maxlen it is left alone. 231 * If len hits maxlen it is left alone.
232 */ 232 */
233 STATIC void 233 STATIC void
234 xfs_alloc_fix_len( 234 xfs_alloc_fix_len(
235 xfs_alloc_arg_t *args) /* allocation argument structure */ 235 xfs_alloc_arg_t *args) /* allocation argument structure */
236 { 236 {
237 xfs_extlen_t k; 237 xfs_extlen_t k;
238 xfs_extlen_t rlen; 238 xfs_extlen_t rlen;
239 239
240 ASSERT(args->mod < args->prod); 240 ASSERT(args->mod < args->prod);
241 rlen = args->len; 241 rlen = args->len;
242 ASSERT(rlen >= args->minlen); 242 ASSERT(rlen >= args->minlen);
243 ASSERT(rlen <= args->maxlen); 243 ASSERT(rlen <= args->maxlen);
244 if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen || 244 if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen ||
245 (args->mod == 0 && rlen < args->prod)) 245 (args->mod == 0 && rlen < args->prod))
246 return; 246 return;
247 k = rlen % args->prod; 247 k = rlen % args->prod;
248 if (k == args->mod) 248 if (k == args->mod)
249 return; 249 return;
250 if (k > args->mod) { 250 if (k > args->mod) {
251 if ((int)(rlen = rlen - k - args->mod) < (int)args->minlen) 251 if ((int)(rlen = rlen - k - args->mod) < (int)args->minlen)
252 return; 252 return;
253 } else { 253 } else {
254 if ((int)(rlen = rlen - args->prod - (args->mod - k)) < 254 if ((int)(rlen = rlen - args->prod - (args->mod - k)) <
255 (int)args->minlen) 255 (int)args->minlen)
256 return; 256 return;
257 } 257 }
258 ASSERT(rlen >= args->minlen); 258 ASSERT(rlen >= args->minlen);
259 ASSERT(rlen <= args->maxlen); 259 ASSERT(rlen <= args->maxlen);
260 args->len = rlen; 260 args->len = rlen;
261 } 261 }
262 262
263 /* 263 /*
264 * Fix up length if there is too little space left in the a.g. 264 * Fix up length if there is too little space left in the a.g.
265 * Return 1 if ok, 0 if too little, should give up. 265 * Return 1 if ok, 0 if too little, should give up.
266 */ 266 */
267 STATIC int 267 STATIC int
268 xfs_alloc_fix_minleft( 268 xfs_alloc_fix_minleft(
269 xfs_alloc_arg_t *args) /* allocation argument structure */ 269 xfs_alloc_arg_t *args) /* allocation argument structure */
270 { 270 {
271 xfs_agf_t *agf; /* a.g. freelist header */ 271 xfs_agf_t *agf; /* a.g. freelist header */
272 int diff; /* free space difference */ 272 int diff; /* free space difference */
273 273
274 if (args->minleft == 0) 274 if (args->minleft == 0)
275 return 1; 275 return 1;
276 agf = XFS_BUF_TO_AGF(args->agbp); 276 agf = XFS_BUF_TO_AGF(args->agbp);
277 diff = be32_to_cpu(agf->agf_freeblks) 277 diff = be32_to_cpu(agf->agf_freeblks)
278 - args->len - args->minleft; 278 - args->len - args->minleft;
279 if (diff >= 0) 279 if (diff >= 0)
280 return 1; 280 return 1;
281 args->len += diff; /* shrink the allocated space */ 281 args->len += diff; /* shrink the allocated space */
282 if (args->len >= args->minlen) 282 if (args->len >= args->minlen)
283 return 1; 283 return 1;
284 args->agbno = NULLAGBLOCK; 284 args->agbno = NULLAGBLOCK;
285 return 0; 285 return 0;
286 } 286 }
287 287
288 /* 288 /*
289 * Update the two btrees, logically removing from freespace the extent 289 * Update the two btrees, logically removing from freespace the extent
290 * starting at rbno, rlen blocks. The extent is contained within the 290 * starting at rbno, rlen blocks. The extent is contained within the
291 * actual (current) free extent fbno for flen blocks. 291 * actual (current) free extent fbno for flen blocks.
292 * Flags are passed in indicating whether the cursors are set to the 292 * Flags are passed in indicating whether the cursors are set to the
293 * relevant records. 293 * relevant records.
294 */ 294 */
295 STATIC int /* error code */ 295 STATIC int /* error code */
296 xfs_alloc_fixup_trees( 296 xfs_alloc_fixup_trees(
297 xfs_btree_cur_t *cnt_cur, /* cursor for by-size btree */ 297 xfs_btree_cur_t *cnt_cur, /* cursor for by-size btree */
298 xfs_btree_cur_t *bno_cur, /* cursor for by-block btree */ 298 xfs_btree_cur_t *bno_cur, /* cursor for by-block btree */
299 xfs_agblock_t fbno, /* starting block of free extent */ 299 xfs_agblock_t fbno, /* starting block of free extent */
300 xfs_extlen_t flen, /* length of free extent */ 300 xfs_extlen_t flen, /* length of free extent */
301 xfs_agblock_t rbno, /* starting block of returned extent */ 301 xfs_agblock_t rbno, /* starting block of returned extent */
302 xfs_extlen_t rlen, /* length of returned extent */ 302 xfs_extlen_t rlen, /* length of returned extent */
303 int flags) /* flags, XFSA_FIXUP_... */ 303 int flags) /* flags, XFSA_FIXUP_... */
304 { 304 {
305 int error; /* error code */ 305 int error; /* error code */
306 int i; /* operation results */ 306 int i; /* operation results */
307 xfs_agblock_t nfbno1; /* first new free startblock */ 307 xfs_agblock_t nfbno1; /* first new free startblock */
308 xfs_agblock_t nfbno2; /* second new free startblock */ 308 xfs_agblock_t nfbno2; /* second new free startblock */
309 xfs_extlen_t nflen1=0; /* first new free length */ 309 xfs_extlen_t nflen1=0; /* first new free length */
310 xfs_extlen_t nflen2=0; /* second new free length */ 310 xfs_extlen_t nflen2=0; /* second new free length */
311 311
312 /* 312 /*
313 * Look up the record in the by-size tree if necessary. 313 * Look up the record in the by-size tree if necessary.
314 */ 314 */
315 if (flags & XFSA_FIXUP_CNT_OK) { 315 if (flags & XFSA_FIXUP_CNT_OK) {
316 #ifdef DEBUG 316 #ifdef DEBUG
317 if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i))) 317 if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i)))
318 return error; 318 return error;
319 XFS_WANT_CORRUPTED_RETURN( 319 XFS_WANT_CORRUPTED_RETURN(
320 i == 1 && nfbno1 == fbno && nflen1 == flen); 320 i == 1 && nfbno1 == fbno && nflen1 == flen);
321 #endif 321 #endif
322 } else { 322 } else {
323 if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i))) 323 if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i)))
324 return error; 324 return error;
325 XFS_WANT_CORRUPTED_RETURN(i == 1); 325 XFS_WANT_CORRUPTED_RETURN(i == 1);
326 } 326 }
327 /* 327 /*
328 * Look up the record in the by-block tree if necessary. 328 * Look up the record in the by-block tree if necessary.
329 */ 329 */
330 if (flags & XFSA_FIXUP_BNO_OK) { 330 if (flags & XFSA_FIXUP_BNO_OK) {
331 #ifdef DEBUG 331 #ifdef DEBUG
332 if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i))) 332 if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i)))
333 return error; 333 return error;
334 XFS_WANT_CORRUPTED_RETURN( 334 XFS_WANT_CORRUPTED_RETURN(
335 i == 1 && nfbno1 == fbno && nflen1 == flen); 335 i == 1 && nfbno1 == fbno && nflen1 == flen);
336 #endif 336 #endif
337 } else { 337 } else {
338 if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i))) 338 if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i)))
339 return error; 339 return error;
340 XFS_WANT_CORRUPTED_RETURN(i == 1); 340 XFS_WANT_CORRUPTED_RETURN(i == 1);
341 } 341 }
342 342
343 #ifdef DEBUG 343 #ifdef DEBUG
344 if (bno_cur->bc_nlevels == 1 && cnt_cur->bc_nlevels == 1) { 344 if (bno_cur->bc_nlevels == 1 && cnt_cur->bc_nlevels == 1) {
345 struct xfs_btree_block *bnoblock; 345 struct xfs_btree_block *bnoblock;
346 struct xfs_btree_block *cntblock; 346 struct xfs_btree_block *cntblock;
347 347
348 bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_bufs[0]); 348 bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_bufs[0]);
349 cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_bufs[0]); 349 cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_bufs[0]);
350 350
351 XFS_WANT_CORRUPTED_RETURN( 351 XFS_WANT_CORRUPTED_RETURN(
352 bnoblock->bb_numrecs == cntblock->bb_numrecs); 352 bnoblock->bb_numrecs == cntblock->bb_numrecs);
353 } 353 }
354 #endif 354 #endif
355 355
356 /* 356 /*
357 * Deal with all four cases: the allocated record is contained 357 * Deal with all four cases: the allocated record is contained
358 * within the freespace record, so we can have new freespace 358 * within the freespace record, so we can have new freespace
359 * at either (or both) end, or no freespace remaining. 359 * at either (or both) end, or no freespace remaining.
360 */ 360 */
361 if (rbno == fbno && rlen == flen) 361 if (rbno == fbno && rlen == flen)
362 nfbno1 = nfbno2 = NULLAGBLOCK; 362 nfbno1 = nfbno2 = NULLAGBLOCK;
363 else if (rbno == fbno) { 363 else if (rbno == fbno) {
364 nfbno1 = rbno + rlen; 364 nfbno1 = rbno + rlen;
365 nflen1 = flen - rlen; 365 nflen1 = flen - rlen;
366 nfbno2 = NULLAGBLOCK; 366 nfbno2 = NULLAGBLOCK;
367 } else if (rbno + rlen == fbno + flen) { 367 } else if (rbno + rlen == fbno + flen) {
368 nfbno1 = fbno; 368 nfbno1 = fbno;
369 nflen1 = flen - rlen; 369 nflen1 = flen - rlen;
370 nfbno2 = NULLAGBLOCK; 370 nfbno2 = NULLAGBLOCK;
371 } else { 371 } else {
372 nfbno1 = fbno; 372 nfbno1 = fbno;
373 nflen1 = rbno - fbno; 373 nflen1 = rbno - fbno;
374 nfbno2 = rbno + rlen; 374 nfbno2 = rbno + rlen;
375 nflen2 = (fbno + flen) - nfbno2; 375 nflen2 = (fbno + flen) - nfbno2;
376 } 376 }
377 /* 377 /*
378 * Delete the entry from the by-size btree. 378 * Delete the entry from the by-size btree.
379 */ 379 */
380 if ((error = xfs_btree_delete(cnt_cur, &i))) 380 if ((error = xfs_btree_delete(cnt_cur, &i)))
381 return error; 381 return error;
382 XFS_WANT_CORRUPTED_RETURN(i == 1); 382 XFS_WANT_CORRUPTED_RETURN(i == 1);
383 /* 383 /*
384 * Add new by-size btree entry(s). 384 * Add new by-size btree entry(s).
385 */ 385 */
386 if (nfbno1 != NULLAGBLOCK) { 386 if (nfbno1 != NULLAGBLOCK) {
387 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i))) 387 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
388 return error; 388 return error;
389 XFS_WANT_CORRUPTED_RETURN(i == 0); 389 XFS_WANT_CORRUPTED_RETURN(i == 0);
390 if ((error = xfs_btree_insert(cnt_cur, &i))) 390 if ((error = xfs_btree_insert(cnt_cur, &i)))
391 return error; 391 return error;
392 XFS_WANT_CORRUPTED_RETURN(i == 1); 392 XFS_WANT_CORRUPTED_RETURN(i == 1);
393 } 393 }
394 if (nfbno2 != NULLAGBLOCK) { 394 if (nfbno2 != NULLAGBLOCK) {
395 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i))) 395 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
396 return error; 396 return error;
397 XFS_WANT_CORRUPTED_RETURN(i == 0); 397 XFS_WANT_CORRUPTED_RETURN(i == 0);
398 if ((error = xfs_btree_insert(cnt_cur, &i))) 398 if ((error = xfs_btree_insert(cnt_cur, &i)))
399 return error; 399 return error;
400 XFS_WANT_CORRUPTED_RETURN(i == 1); 400 XFS_WANT_CORRUPTED_RETURN(i == 1);
401 } 401 }
402 /* 402 /*
403 * Fix up the by-block btree entry(s). 403 * Fix up the by-block btree entry(s).
404 */ 404 */
405 if (nfbno1 == NULLAGBLOCK) { 405 if (nfbno1 == NULLAGBLOCK) {
406 /* 406 /*
407 * No remaining freespace, just delete the by-block tree entry. 407 * No remaining freespace, just delete the by-block tree entry.
408 */ 408 */
409 if ((error = xfs_btree_delete(bno_cur, &i))) 409 if ((error = xfs_btree_delete(bno_cur, &i)))
410 return error; 410 return error;
411 XFS_WANT_CORRUPTED_RETURN(i == 1); 411 XFS_WANT_CORRUPTED_RETURN(i == 1);
412 } else { 412 } else {
413 /* 413 /*
414 * Update the by-block entry to start later|be shorter. 414 * Update the by-block entry to start later|be shorter.
415 */ 415 */
416 if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1))) 416 if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1)))
417 return error; 417 return error;
418 } 418 }
419 if (nfbno2 != NULLAGBLOCK) { 419 if (nfbno2 != NULLAGBLOCK) {
420 /* 420 /*
421 * 2 resulting free entries, need to add one. 421 * 2 resulting free entries, need to add one.
422 */ 422 */
423 if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i))) 423 if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
424 return error; 424 return error;
425 XFS_WANT_CORRUPTED_RETURN(i == 0); 425 XFS_WANT_CORRUPTED_RETURN(i == 0);
426 if ((error = xfs_btree_insert(bno_cur, &i))) 426 if ((error = xfs_btree_insert(bno_cur, &i)))
427 return error; 427 return error;
428 XFS_WANT_CORRUPTED_RETURN(i == 1); 428 XFS_WANT_CORRUPTED_RETURN(i == 1);
429 } 429 }
430 return 0; 430 return 0;
431 } 431 }
432 432
433 /* 433 /*
434 * Read in the allocation group free block array. 434 * Read in the allocation group free block array.
435 */ 435 */
436 STATIC int /* error */ 436 STATIC int /* error */
437 xfs_alloc_read_agfl( 437 xfs_alloc_read_agfl(
438 xfs_mount_t *mp, /* mount point structure */ 438 xfs_mount_t *mp, /* mount point structure */
439 xfs_trans_t *tp, /* transaction pointer */ 439 xfs_trans_t *tp, /* transaction pointer */
440 xfs_agnumber_t agno, /* allocation group number */ 440 xfs_agnumber_t agno, /* allocation group number */
441 xfs_buf_t **bpp) /* buffer for the ag free block array */ 441 xfs_buf_t **bpp) /* buffer for the ag free block array */
442 { 442 {
443 xfs_buf_t *bp; /* return value */ 443 xfs_buf_t *bp; /* return value */
444 int error; 444 int error;
445 445
446 ASSERT(agno != NULLAGNUMBER); 446 ASSERT(agno != NULLAGNUMBER);
447 error = xfs_trans_read_buf( 447 error = xfs_trans_read_buf(
448 mp, tp, mp->m_ddev_targp, 448 mp, tp, mp->m_ddev_targp,
449 XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)), 449 XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
450 XFS_FSS_TO_BB(mp, 1), 0, &bp); 450 XFS_FSS_TO_BB(mp, 1), 0, &bp);
451 if (error) 451 if (error)
452 return error; 452 return error;
453 ASSERT(!xfs_buf_geterror(bp)); 453 ASSERT(!xfs_buf_geterror(bp));
454 xfs_buf_set_ref(bp, XFS_AGFL_REF); 454 xfs_buf_set_ref(bp, XFS_AGFL_REF);
455 *bpp = bp; 455 *bpp = bp;
456 return 0; 456 return 0;
457 } 457 }
458 458
459 STATIC int 459 STATIC int
460 xfs_alloc_update_counters( 460 xfs_alloc_update_counters(
461 struct xfs_trans *tp, 461 struct xfs_trans *tp,
462 struct xfs_perag *pag, 462 struct xfs_perag *pag,
463 struct xfs_buf *agbp, 463 struct xfs_buf *agbp,
464 long len) 464 long len)
465 { 465 {
466 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp); 466 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
467 467
468 pag->pagf_freeblks += len; 468 pag->pagf_freeblks += len;
469 be32_add_cpu(&agf->agf_freeblks, len); 469 be32_add_cpu(&agf->agf_freeblks, len);
470 470
471 xfs_trans_agblocks_delta(tp, len); 471 xfs_trans_agblocks_delta(tp, len);
472 if (unlikely(be32_to_cpu(agf->agf_freeblks) > 472 if (unlikely(be32_to_cpu(agf->agf_freeblks) >
473 be32_to_cpu(agf->agf_length))) 473 be32_to_cpu(agf->agf_length)))
474 return EFSCORRUPTED; 474 return EFSCORRUPTED;
475 475
476 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS); 476 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
477 return 0; 477 return 0;
478 } 478 }
479 479
480 /* 480 /*
481 * Allocation group level functions. 481 * Allocation group level functions.
482 */ 482 */
483 483
484 /* 484 /*
485 * Allocate a variable extent in the allocation group agno. 485 * Allocate a variable extent in the allocation group agno.
486 * Type and bno are used to determine where in the allocation group the 486 * Type and bno are used to determine where in the allocation group the
487 * extent will start. 487 * extent will start.
488 * Extent's length (returned in *len) will be between minlen and maxlen, 488 * Extent's length (returned in *len) will be between minlen and maxlen,
489 * and of the form k * prod + mod unless there's nothing that large. 489 * and of the form k * prod + mod unless there's nothing that large.
490 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it. 490 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
491 */ 491 */
492 STATIC int /* error */ 492 STATIC int /* error */
493 xfs_alloc_ag_vextent( 493 xfs_alloc_ag_vextent(
494 xfs_alloc_arg_t *args) /* argument structure for allocation */ 494 xfs_alloc_arg_t *args) /* argument structure for allocation */
495 { 495 {
496 int error=0; 496 int error=0;
497 497
498 ASSERT(args->minlen > 0); 498 ASSERT(args->minlen > 0);
499 ASSERT(args->maxlen > 0); 499 ASSERT(args->maxlen > 0);
500 ASSERT(args->minlen <= args->maxlen); 500 ASSERT(args->minlen <= args->maxlen);
501 ASSERT(args->mod < args->prod); 501 ASSERT(args->mod < args->prod);
502 ASSERT(args->alignment > 0); 502 ASSERT(args->alignment > 0);
503 /* 503 /*
504 * Branch to correct routine based on the type. 504 * Branch to correct routine based on the type.
505 */ 505 */
506 args->wasfromfl = 0; 506 args->wasfromfl = 0;
507 switch (args->type) { 507 switch (args->type) {
508 case XFS_ALLOCTYPE_THIS_AG: 508 case XFS_ALLOCTYPE_THIS_AG:
509 error = xfs_alloc_ag_vextent_size(args); 509 error = xfs_alloc_ag_vextent_size(args);
510 break; 510 break;
511 case XFS_ALLOCTYPE_NEAR_BNO: 511 case XFS_ALLOCTYPE_NEAR_BNO:
512 error = xfs_alloc_ag_vextent_near(args); 512 error = xfs_alloc_ag_vextent_near(args);
513 break; 513 break;
514 case XFS_ALLOCTYPE_THIS_BNO: 514 case XFS_ALLOCTYPE_THIS_BNO:
515 error = xfs_alloc_ag_vextent_exact(args); 515 error = xfs_alloc_ag_vextent_exact(args);
516 break; 516 break;
517 default: 517 default:
518 ASSERT(0); 518 ASSERT(0);
519 /* NOTREACHED */ 519 /* NOTREACHED */
520 } 520 }
521 521
522 if (error || args->agbno == NULLAGBLOCK) 522 if (error || args->agbno == NULLAGBLOCK)
523 return error; 523 return error;
524 524
525 ASSERT(args->len >= args->minlen); 525 ASSERT(args->len >= args->minlen);
526 ASSERT(args->len <= args->maxlen); 526 ASSERT(args->len <= args->maxlen);
527 ASSERT(!args->wasfromfl || !args->isfl); 527 ASSERT(!args->wasfromfl || !args->isfl);
528 ASSERT(args->agbno % args->alignment == 0); 528 ASSERT(args->agbno % args->alignment == 0);
529 529
530 if (!args->wasfromfl) { 530 if (!args->wasfromfl) {
531 error = xfs_alloc_update_counters(args->tp, args->pag, 531 error = xfs_alloc_update_counters(args->tp, args->pag,
532 args->agbp, 532 args->agbp,
533 -((long)(args->len))); 533 -((long)(args->len)));
534 if (error) 534 if (error)
535 return error; 535 return error;
536 536
537 ASSERT(!xfs_extent_busy_search(args->mp, args->agno, 537 ASSERT(!xfs_extent_busy_search(args->mp, args->agno,
538 args->agbno, args->len)); 538 args->agbno, args->len));
539 } 539 }
540 540
541 if (!args->isfl) { 541 if (!args->isfl) {
542 xfs_trans_mod_sb(args->tp, args->wasdel ? 542 xfs_trans_mod_sb(args->tp, args->wasdel ?
543 XFS_TRANS_SB_RES_FDBLOCKS : 543 XFS_TRANS_SB_RES_FDBLOCKS :
544 XFS_TRANS_SB_FDBLOCKS, 544 XFS_TRANS_SB_FDBLOCKS,
545 -((long)(args->len))); 545 -((long)(args->len)));
546 } 546 }
547 547
548 XFS_STATS_INC(xs_allocx); 548 XFS_STATS_INC(xs_allocx);
549 XFS_STATS_ADD(xs_allocb, args->len); 549 XFS_STATS_ADD(xs_allocb, args->len);
550 return error; 550 return error;
551 } 551 }
552 552
553 /* 553 /*
554 * Allocate a variable extent at exactly agno/bno. 554 * Allocate a variable extent at exactly agno/bno.
555 * Extent's length (returned in *len) will be between minlen and maxlen, 555 * Extent's length (returned in *len) will be between minlen and maxlen,
556 * and of the form k * prod + mod unless there's nothing that large. 556 * and of the form k * prod + mod unless there's nothing that large.
557 * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it. 557 * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it.
558 */ 558 */
559 STATIC int /* error */ 559 STATIC int /* error */
560 xfs_alloc_ag_vextent_exact( 560 xfs_alloc_ag_vextent_exact(
561 xfs_alloc_arg_t *args) /* allocation argument structure */ 561 xfs_alloc_arg_t *args) /* allocation argument structure */
562 { 562 {
563 xfs_btree_cur_t *bno_cur;/* by block-number btree cursor */ 563 xfs_btree_cur_t *bno_cur;/* by block-number btree cursor */
564 xfs_btree_cur_t *cnt_cur;/* by count btree cursor */ 564 xfs_btree_cur_t *cnt_cur;/* by count btree cursor */
565 int error; 565 int error;
566 xfs_agblock_t fbno; /* start block of found extent */ 566 xfs_agblock_t fbno; /* start block of found extent */
567 xfs_extlen_t flen; /* length of found extent */ 567 xfs_extlen_t flen; /* length of found extent */
568 xfs_agblock_t tbno; /* start block of trimmed extent */ 568 xfs_agblock_t tbno; /* start block of trimmed extent */
569 xfs_extlen_t tlen; /* length of trimmed extent */ 569 xfs_extlen_t tlen; /* length of trimmed extent */
570 xfs_agblock_t tend; /* end block of trimmed extent */ 570 xfs_agblock_t tend; /* end block of trimmed extent */
571 int i; /* success/failure of operation */ 571 int i; /* success/failure of operation */
572 572
573 ASSERT(args->alignment == 1); 573 ASSERT(args->alignment == 1);
574 574
575 /* 575 /*
576 * Allocate/initialize a cursor for the by-number freespace btree. 576 * Allocate/initialize a cursor for the by-number freespace btree.
577 */ 577 */
578 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp, 578 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
579 args->agno, XFS_BTNUM_BNO); 579 args->agno, XFS_BTNUM_BNO);
580 580
581 /* 581 /*
582 * Lookup bno and minlen in the btree (minlen is irrelevant, really). 582 * Lookup bno and minlen in the btree (minlen is irrelevant, really).
583 * Look for the closest free block <= bno, it must contain bno 583 * Look for the closest free block <= bno, it must contain bno
584 * if any free block does. 584 * if any free block does.
585 */ 585 */
586 error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i); 586 error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i);
587 if (error) 587 if (error)
588 goto error0; 588 goto error0;
589 if (!i) 589 if (!i)
590 goto not_found; 590 goto not_found;
591 591
592 /* 592 /*
593 * Grab the freespace record. 593 * Grab the freespace record.
594 */ 594 */
595 error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i); 595 error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
596 if (error) 596 if (error)
597 goto error0; 597 goto error0;
598 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 598 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
599 ASSERT(fbno <= args->agbno); 599 ASSERT(fbno <= args->agbno);
600 600
601 /* 601 /*
602 * Check for overlapping busy extents. 602 * Check for overlapping busy extents.
603 */ 603 */
604 xfs_extent_busy_trim(args, fbno, flen, &tbno, &tlen); 604 xfs_extent_busy_trim(args, fbno, flen, &tbno, &tlen);
605 605
606 /* 606 /*
607 * Give up if the start of the extent is busy, or the freespace isn't 607 * Give up if the start of the extent is busy, or the freespace isn't
608 * long enough for the minimum request. 608 * long enough for the minimum request.
609 */ 609 */
610 if (tbno > args->agbno) 610 if (tbno > args->agbno)
611 goto not_found; 611 goto not_found;
612 if (tlen < args->minlen) 612 if (tlen < args->minlen)
613 goto not_found; 613 goto not_found;
614 tend = tbno + tlen; 614 tend = tbno + tlen;
615 if (tend < args->agbno + args->minlen) 615 if (tend < args->agbno + args->minlen)
616 goto not_found; 616 goto not_found;
617 617
618 /* 618 /*
619 * End of extent will be smaller of the freespace end and the 619 * End of extent will be smaller of the freespace end and the
620 * maximal requested end. 620 * maximal requested end.
621 * 621 *
622 * Fix the length according to mod and prod if given. 622 * Fix the length according to mod and prod if given.
623 */ 623 */
624 args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen) 624 args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
625 - args->agbno; 625 - args->agbno;
626 xfs_alloc_fix_len(args); 626 xfs_alloc_fix_len(args);
627 if (!xfs_alloc_fix_minleft(args)) 627 if (!xfs_alloc_fix_minleft(args))
628 goto not_found; 628 goto not_found;
629 629
630 ASSERT(args->agbno + args->len <= tend); 630 ASSERT(args->agbno + args->len <= tend);
631 631
632 /* 632 /*
633 * We are allocating agbno for args->len 633 * We are allocating agbno for args->len
634 * Allocate/initialize a cursor for the by-size btree. 634 * Allocate/initialize a cursor for the by-size btree.
635 */ 635 */
636 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp, 636 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
637 args->agno, XFS_BTNUM_CNT); 637 args->agno, XFS_BTNUM_CNT);
638 ASSERT(args->agbno + args->len <= 638 ASSERT(args->agbno + args->len <=
639 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length)); 639 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
640 error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno, 640 error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
641 args->len, XFSA_FIXUP_BNO_OK); 641 args->len, XFSA_FIXUP_BNO_OK);
642 if (error) { 642 if (error) {
643 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR); 643 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
644 goto error0; 644 goto error0;
645 } 645 }
646 646
647 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR); 647 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
648 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 648 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
649 649
650 args->wasfromfl = 0; 650 args->wasfromfl = 0;
651 trace_xfs_alloc_exact_done(args); 651 trace_xfs_alloc_exact_done(args);
652 return 0; 652 return 0;
653 653
654 not_found: 654 not_found:
655 /* Didn't find it, return null. */ 655 /* Didn't find it, return null. */
656 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR); 656 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
657 args->agbno = NULLAGBLOCK; 657 args->agbno = NULLAGBLOCK;
658 trace_xfs_alloc_exact_notfound(args); 658 trace_xfs_alloc_exact_notfound(args);
659 return 0; 659 return 0;
660 660
661 error0: 661 error0:
662 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR); 662 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
663 trace_xfs_alloc_exact_error(args); 663 trace_xfs_alloc_exact_error(args);
664 return error; 664 return error;
665 } 665 }
666 666
667 /* 667 /*
668 * Search the btree in a given direction via the search cursor and compare 668 * Search the btree in a given direction via the search cursor and compare
669 * the records found against the good extent we've already found. 669 * the records found against the good extent we've already found.
670 */ 670 */
671 STATIC int 671 STATIC int
672 xfs_alloc_find_best_extent( 672 xfs_alloc_find_best_extent(
673 struct xfs_alloc_arg *args, /* allocation argument structure */ 673 struct xfs_alloc_arg *args, /* allocation argument structure */
674 struct xfs_btree_cur **gcur, /* good cursor */ 674 struct xfs_btree_cur **gcur, /* good cursor */
675 struct xfs_btree_cur **scur, /* searching cursor */ 675 struct xfs_btree_cur **scur, /* searching cursor */
676 xfs_agblock_t gdiff, /* difference for search comparison */ 676 xfs_agblock_t gdiff, /* difference for search comparison */
677 xfs_agblock_t *sbno, /* extent found by search */ 677 xfs_agblock_t *sbno, /* extent found by search */
678 xfs_extlen_t *slen, /* extent length */ 678 xfs_extlen_t *slen, /* extent length */
679 xfs_agblock_t *sbnoa, /* aligned extent found by search */ 679 xfs_agblock_t *sbnoa, /* aligned extent found by search */
680 xfs_extlen_t *slena, /* aligned extent length */ 680 xfs_extlen_t *slena, /* aligned extent length */
681 int dir) /* 0 = search right, 1 = search left */ 681 int dir) /* 0 = search right, 1 = search left */
682 { 682 {
683 xfs_agblock_t new; 683 xfs_agblock_t new;
684 xfs_agblock_t sdiff; 684 xfs_agblock_t sdiff;
685 int error; 685 int error;
686 int i; 686 int i;
687 687
688 /* The good extent is perfect, no need to search. */ 688 /* The good extent is perfect, no need to search. */
689 if (!gdiff) 689 if (!gdiff)
690 goto out_use_good; 690 goto out_use_good;
691 691
692 /* 692 /*
693 * Look until we find a better one, run out of space or run off the end. 693 * Look until we find a better one, run out of space or run off the end.
694 */ 694 */
695 do { 695 do {
696 error = xfs_alloc_get_rec(*scur, sbno, slen, &i); 696 error = xfs_alloc_get_rec(*scur, sbno, slen, &i);
697 if (error) 697 if (error)
698 goto error0; 698 goto error0;
699 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 699 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
700 xfs_alloc_compute_aligned(args, *sbno, *slen, sbnoa, slena); 700 xfs_alloc_compute_aligned(args, *sbno, *slen, sbnoa, slena);
701 701
702 /* 702 /*
703 * The good extent is closer than this one. 703 * The good extent is closer than this one.
704 */ 704 */
705 if (!dir) { 705 if (!dir) {
706 if (*sbnoa >= args->agbno + gdiff) 706 if (*sbnoa >= args->agbno + gdiff)
707 goto out_use_good; 707 goto out_use_good;
708 } else { 708 } else {
709 if (*sbnoa <= args->agbno - gdiff) 709 if (*sbnoa <= args->agbno - gdiff)
710 goto out_use_good; 710 goto out_use_good;
711 } 711 }
712 712
713 /* 713 /*
714 * Same distance, compare length and pick the best. 714 * Same distance, compare length and pick the best.
715 */ 715 */
716 if (*slena >= args->minlen) { 716 if (*slena >= args->minlen) {
717 args->len = XFS_EXTLEN_MIN(*slena, args->maxlen); 717 args->len = XFS_EXTLEN_MIN(*slena, args->maxlen);
718 xfs_alloc_fix_len(args); 718 xfs_alloc_fix_len(args);
719 719
720 sdiff = xfs_alloc_compute_diff(args->agbno, args->len, 720 sdiff = xfs_alloc_compute_diff(args->agbno, args->len,
721 args->alignment, *sbnoa, 721 args->alignment, *sbnoa,
722 *slena, &new); 722 *slena, &new);
723 723
724 /* 724 /*
725 * Choose closer size and invalidate other cursor. 725 * Choose closer size and invalidate other cursor.
726 */ 726 */
727 if (sdiff < gdiff) 727 if (sdiff < gdiff)
728 goto out_use_search; 728 goto out_use_search;
729 goto out_use_good; 729 goto out_use_good;
730 } 730 }
731 731
732 if (!dir) 732 if (!dir)
733 error = xfs_btree_increment(*scur, 0, &i); 733 error = xfs_btree_increment(*scur, 0, &i);
734 else 734 else
735 error = xfs_btree_decrement(*scur, 0, &i); 735 error = xfs_btree_decrement(*scur, 0, &i);
736 if (error) 736 if (error)
737 goto error0; 737 goto error0;
738 } while (i); 738 } while (i);
739 739
740 out_use_good: 740 out_use_good:
741 xfs_btree_del_cursor(*scur, XFS_BTREE_NOERROR); 741 xfs_btree_del_cursor(*scur, XFS_BTREE_NOERROR);
742 *scur = NULL; 742 *scur = NULL;
743 return 0; 743 return 0;
744 744
745 out_use_search: 745 out_use_search:
746 xfs_btree_del_cursor(*gcur, XFS_BTREE_NOERROR); 746 xfs_btree_del_cursor(*gcur, XFS_BTREE_NOERROR);
747 *gcur = NULL; 747 *gcur = NULL;
748 return 0; 748 return 0;
749 749
750 error0: 750 error0:
751 /* caller invalidates cursors */ 751 /* caller invalidates cursors */
752 return error; 752 return error;
753 } 753 }
754 754
755 /* 755 /*
756 * Allocate a variable extent near bno in the allocation group agno. 756 * Allocate a variable extent near bno in the allocation group agno.
757 * Extent's length (returned in len) will be between minlen and maxlen, 757 * Extent's length (returned in len) will be between minlen and maxlen,
758 * and of the form k * prod + mod unless there's nothing that large. 758 * and of the form k * prod + mod unless there's nothing that large.
759 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it. 759 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
760 */ 760 */
761 STATIC int /* error */ 761 STATIC int /* error */
762 xfs_alloc_ag_vextent_near( 762 xfs_alloc_ag_vextent_near(
763 xfs_alloc_arg_t *args) /* allocation argument structure */ 763 xfs_alloc_arg_t *args) /* allocation argument structure */
764 { 764 {
765 xfs_btree_cur_t *bno_cur_gt; /* cursor for bno btree, right side */ 765 xfs_btree_cur_t *bno_cur_gt; /* cursor for bno btree, right side */
766 xfs_btree_cur_t *bno_cur_lt; /* cursor for bno btree, left side */ 766 xfs_btree_cur_t *bno_cur_lt; /* cursor for bno btree, left side */
767 xfs_btree_cur_t *cnt_cur; /* cursor for count btree */ 767 xfs_btree_cur_t *cnt_cur; /* cursor for count btree */
768 xfs_agblock_t gtbno; /* start bno of right side entry */ 768 xfs_agblock_t gtbno; /* start bno of right side entry */
769 xfs_agblock_t gtbnoa; /* aligned ... */ 769 xfs_agblock_t gtbnoa; /* aligned ... */
770 xfs_extlen_t gtdiff; /* difference to right side entry */ 770 xfs_extlen_t gtdiff; /* difference to right side entry */
771 xfs_extlen_t gtlen; /* length of right side entry */ 771 xfs_extlen_t gtlen; /* length of right side entry */
772 xfs_extlen_t gtlena; /* aligned ... */ 772 xfs_extlen_t gtlena; /* aligned ... */
773 xfs_agblock_t gtnew; /* useful start bno of right side */ 773 xfs_agblock_t gtnew; /* useful start bno of right side */
774 int error; /* error code */ 774 int error; /* error code */
775 int i; /* result code, temporary */ 775 int i; /* result code, temporary */
776 int j; /* result code, temporary */ 776 int j; /* result code, temporary */
777 xfs_agblock_t ltbno; /* start bno of left side entry */ 777 xfs_agblock_t ltbno; /* start bno of left side entry */
778 xfs_agblock_t ltbnoa; /* aligned ... */ 778 xfs_agblock_t ltbnoa; /* aligned ... */
779 xfs_extlen_t ltdiff; /* difference to left side entry */ 779 xfs_extlen_t ltdiff; /* difference to left side entry */
780 xfs_extlen_t ltlen; /* length of left side entry */ 780 xfs_extlen_t ltlen; /* length of left side entry */
781 xfs_extlen_t ltlena; /* aligned ... */ 781 xfs_extlen_t ltlena; /* aligned ... */
782 xfs_agblock_t ltnew; /* useful start bno of left side */ 782 xfs_agblock_t ltnew; /* useful start bno of left side */
783 xfs_extlen_t rlen; /* length of returned extent */ 783 xfs_extlen_t rlen; /* length of returned extent */
784 int forced = 0; 784 int forced = 0;
785 #if defined(DEBUG) && defined(__KERNEL__) 785 #if defined(DEBUG) && defined(__KERNEL__)
786 /* 786 /*
787 * Randomly don't execute the first algorithm. 787 * Randomly don't execute the first algorithm.
788 */ 788 */
789 int dofirst; /* set to do first algorithm */ 789 int dofirst; /* set to do first algorithm */
790 790
791 dofirst = random32() & 1; 791 dofirst = random32() & 1;
792 #endif 792 #endif
793 793
794 restart: 794 restart:
795 bno_cur_lt = NULL; 795 bno_cur_lt = NULL;
796 bno_cur_gt = NULL; 796 bno_cur_gt = NULL;
797 ltlen = 0; 797 ltlen = 0;
798 gtlena = 0; 798 gtlena = 0;
799 ltlena = 0; 799 ltlena = 0;
800 800
801 /* 801 /*
802 * Get a cursor for the by-size btree. 802 * Get a cursor for the by-size btree.
803 */ 803 */
804 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp, 804 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
805 args->agno, XFS_BTNUM_CNT); 805 args->agno, XFS_BTNUM_CNT);
806 806
807 /* 807 /*
808 * See if there are any free extents as big as maxlen. 808 * See if there are any free extents as big as maxlen.
809 */ 809 */
810 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0, args->maxlen, &i))) 810 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0, args->maxlen, &i)))
811 goto error0; 811 goto error0;
812 /* 812 /*
813 * If none, then pick up the last entry in the tree unless the 813 * If none, then pick up the last entry in the tree unless the
814 * tree is empty. 814 * tree is empty.
815 */ 815 */
816 if (!i) { 816 if (!i) {
817 if ((error = xfs_alloc_ag_vextent_small(args, cnt_cur, &ltbno, 817 if ((error = xfs_alloc_ag_vextent_small(args, cnt_cur, &ltbno,
818 &ltlen, &i))) 818 &ltlen, &i)))
819 goto error0; 819 goto error0;
820 if (i == 0 || ltlen == 0) { 820 if (i == 0 || ltlen == 0) {
821 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 821 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
822 trace_xfs_alloc_near_noentry(args); 822 trace_xfs_alloc_near_noentry(args);
823 return 0; 823 return 0;
824 } 824 }
825 ASSERT(i == 1); 825 ASSERT(i == 1);
826 } 826 }
827 args->wasfromfl = 0; 827 args->wasfromfl = 0;
828 828
829 /* 829 /*
830 * First algorithm. 830 * First algorithm.
831 * If the requested extent is large wrt the freespaces available 831 * If the requested extent is large wrt the freespaces available
832 * in this a.g., then the cursor will be pointing to a btree entry 832 * in this a.g., then the cursor will be pointing to a btree entry
833 * near the right edge of the tree. If it's in the last btree leaf 833 * near the right edge of the tree. If it's in the last btree leaf
834 * block, then we just examine all the entries in that block 834 * block, then we just examine all the entries in that block
835 * that are big enough, and pick the best one. 835 * that are big enough, and pick the best one.
836 * This is written as a while loop so we can break out of it, 836 * This is written as a while loop so we can break out of it,
837 * but we never loop back to the top. 837 * but we never loop back to the top.
838 */ 838 */
839 while (xfs_btree_islastblock(cnt_cur, 0)) { 839 while (xfs_btree_islastblock(cnt_cur, 0)) {
840 xfs_extlen_t bdiff; 840 xfs_extlen_t bdiff;
841 int besti=0; 841 int besti=0;
842 xfs_extlen_t blen=0; 842 xfs_extlen_t blen=0;
843 xfs_agblock_t bnew=0; 843 xfs_agblock_t bnew=0;
844 844
845 #if defined(DEBUG) && defined(__KERNEL__) 845 #if defined(DEBUG) && defined(__KERNEL__)
846 if (!dofirst) 846 if (!dofirst)
847 break; 847 break;
848 #endif 848 #endif
849 /* 849 /*
850 * Start from the entry that lookup found, sequence through 850 * Start from the entry that lookup found, sequence through
851 * all larger free blocks. If we're actually pointing at a 851 * all larger free blocks. If we're actually pointing at a
852 * record smaller than maxlen, go to the start of this block, 852 * record smaller than maxlen, go to the start of this block,
853 * and skip all those smaller than minlen. 853 * and skip all those smaller than minlen.
854 */ 854 */
855 if (ltlen || args->alignment > 1) { 855 if (ltlen || args->alignment > 1) {
856 cnt_cur->bc_ptrs[0] = 1; 856 cnt_cur->bc_ptrs[0] = 1;
857 do { 857 do {
858 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, 858 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno,
859 &ltlen, &i))) 859 &ltlen, &i)))
860 goto error0; 860 goto error0;
861 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 861 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
862 if (ltlen >= args->minlen) 862 if (ltlen >= args->minlen)
863 break; 863 break;
864 if ((error = xfs_btree_increment(cnt_cur, 0, &i))) 864 if ((error = xfs_btree_increment(cnt_cur, 0, &i)))
865 goto error0; 865 goto error0;
866 } while (i); 866 } while (i);
867 ASSERT(ltlen >= args->minlen); 867 ASSERT(ltlen >= args->minlen);
868 if (!i) 868 if (!i)
869 break; 869 break;
870 } 870 }
871 i = cnt_cur->bc_ptrs[0]; 871 i = cnt_cur->bc_ptrs[0];
872 for (j = 1, blen = 0, bdiff = 0; 872 for (j = 1, blen = 0, bdiff = 0;
873 !error && j && (blen < args->maxlen || bdiff > 0); 873 !error && j && (blen < args->maxlen || bdiff > 0);
874 error = xfs_btree_increment(cnt_cur, 0, &j)) { 874 error = xfs_btree_increment(cnt_cur, 0, &j)) {
875 /* 875 /*
876 * For each entry, decide if it's better than 876 * For each entry, decide if it's better than
877 * the previous best entry. 877 * the previous best entry.
878 */ 878 */
879 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i))) 879 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
880 goto error0; 880 goto error0;
881 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 881 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
882 xfs_alloc_compute_aligned(args, ltbno, ltlen, 882 xfs_alloc_compute_aligned(args, ltbno, ltlen,
883 &ltbnoa, &ltlena); 883 &ltbnoa, &ltlena);
884 if (ltlena < args->minlen) 884 if (ltlena < args->minlen)
885 continue; 885 continue;
886 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen); 886 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
887 xfs_alloc_fix_len(args); 887 xfs_alloc_fix_len(args);
888 ASSERT(args->len >= args->minlen); 888 ASSERT(args->len >= args->minlen);
889 if (args->len < blen) 889 if (args->len < blen)
890 continue; 890 continue;
891 ltdiff = xfs_alloc_compute_diff(args->agbno, args->len, 891 ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
892 args->alignment, ltbnoa, ltlena, &ltnew); 892 args->alignment, ltbnoa, ltlena, &ltnew);
893 if (ltnew != NULLAGBLOCK && 893 if (ltnew != NULLAGBLOCK &&
894 (args->len > blen || ltdiff < bdiff)) { 894 (args->len > blen || ltdiff < bdiff)) {
895 bdiff = ltdiff; 895 bdiff = ltdiff;
896 bnew = ltnew; 896 bnew = ltnew;
897 blen = args->len; 897 blen = args->len;
898 besti = cnt_cur->bc_ptrs[0]; 898 besti = cnt_cur->bc_ptrs[0];
899 } 899 }
900 } 900 }
901 /* 901 /*
902 * It didn't work. We COULD be in a case where 902 * It didn't work. We COULD be in a case where
903 * there's a good record somewhere, so try again. 903 * there's a good record somewhere, so try again.
904 */ 904 */
905 if (blen == 0) 905 if (blen == 0)
906 break; 906 break;
907 /* 907 /*
908 * Point at the best entry, and retrieve it again. 908 * Point at the best entry, and retrieve it again.
909 */ 909 */
910 cnt_cur->bc_ptrs[0] = besti; 910 cnt_cur->bc_ptrs[0] = besti;
911 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i))) 911 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
912 goto error0; 912 goto error0;
913 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 913 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
914 ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length)); 914 ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
915 args->len = blen; 915 args->len = blen;
916 if (!xfs_alloc_fix_minleft(args)) { 916 if (!xfs_alloc_fix_minleft(args)) {
917 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 917 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
918 trace_xfs_alloc_near_nominleft(args); 918 trace_xfs_alloc_near_nominleft(args);
919 return 0; 919 return 0;
920 } 920 }
921 blen = args->len; 921 blen = args->len;
922 /* 922 /*
923 * We are allocating starting at bnew for blen blocks. 923 * We are allocating starting at bnew for blen blocks.
924 */ 924 */
925 args->agbno = bnew; 925 args->agbno = bnew;
926 ASSERT(bnew >= ltbno); 926 ASSERT(bnew >= ltbno);
927 ASSERT(bnew + blen <= ltbno + ltlen); 927 ASSERT(bnew + blen <= ltbno + ltlen);
928 /* 928 /*
929 * Set up a cursor for the by-bno tree. 929 * Set up a cursor for the by-bno tree.
930 */ 930 */
931 bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp, 931 bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp,
932 args->agbp, args->agno, XFS_BTNUM_BNO); 932 args->agbp, args->agno, XFS_BTNUM_BNO);
933 /* 933 /*
934 * Fix up the btree entries. 934 * Fix up the btree entries.
935 */ 935 */
936 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, 936 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno,
937 ltlen, bnew, blen, XFSA_FIXUP_CNT_OK))) 937 ltlen, bnew, blen, XFSA_FIXUP_CNT_OK)))
938 goto error0; 938 goto error0;
939 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 939 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
940 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR); 940 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
941 941
942 trace_xfs_alloc_near_first(args); 942 trace_xfs_alloc_near_first(args);
943 return 0; 943 return 0;
944 } 944 }
945 /* 945 /*
946 * Second algorithm. 946 * Second algorithm.
947 * Search in the by-bno tree to the left and to the right 947 * Search in the by-bno tree to the left and to the right
948 * simultaneously, until in each case we find a space big enough, 948 * simultaneously, until in each case we find a space big enough,
949 * or run into the edge of the tree. When we run into the edge, 949 * or run into the edge of the tree. When we run into the edge,
950 * we deallocate that cursor. 950 * we deallocate that cursor.
951 * If both searches succeed, we compare the two spaces and pick 951 * If both searches succeed, we compare the two spaces and pick
952 * the better one. 952 * the better one.
953 * With alignment, it's possible for both to fail; the upper 953 * With alignment, it's possible for both to fail; the upper
954 * level algorithm that picks allocation groups for allocations 954 * level algorithm that picks allocation groups for allocations
955 * is not supposed to do this. 955 * is not supposed to do this.
956 */ 956 */
957 /* 957 /*
958 * Allocate and initialize the cursor for the leftward search. 958 * Allocate and initialize the cursor for the leftward search.
959 */ 959 */
960 bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp, 960 bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
961 args->agno, XFS_BTNUM_BNO); 961 args->agno, XFS_BTNUM_BNO);
962 /* 962 /*
963 * Lookup <= bno to find the leftward search's starting point. 963 * Lookup <= bno to find the leftward search's starting point.
964 */ 964 */
965 if ((error = xfs_alloc_lookup_le(bno_cur_lt, args->agbno, args->maxlen, &i))) 965 if ((error = xfs_alloc_lookup_le(bno_cur_lt, args->agbno, args->maxlen, &i)))
966 goto error0; 966 goto error0;
967 if (!i) { 967 if (!i) {
968 /* 968 /*
969 * Didn't find anything; use this cursor for the rightward 969 * Didn't find anything; use this cursor for the rightward
970 * search. 970 * search.
971 */ 971 */
972 bno_cur_gt = bno_cur_lt; 972 bno_cur_gt = bno_cur_lt;
973 bno_cur_lt = NULL; 973 bno_cur_lt = NULL;
974 } 974 }
975 /* 975 /*
976 * Found something. Duplicate the cursor for the rightward search. 976 * Found something. Duplicate the cursor for the rightward search.
977 */ 977 */
978 else if ((error = xfs_btree_dup_cursor(bno_cur_lt, &bno_cur_gt))) 978 else if ((error = xfs_btree_dup_cursor(bno_cur_lt, &bno_cur_gt)))
979 goto error0; 979 goto error0;
980 /* 980 /*
981 * Increment the cursor, so we will point at the entry just right 981 * Increment the cursor, so we will point at the entry just right
982 * of the leftward entry if any, or to the leftmost entry. 982 * of the leftward entry if any, or to the leftmost entry.
983 */ 983 */
984 if ((error = xfs_btree_increment(bno_cur_gt, 0, &i))) 984 if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
985 goto error0; 985 goto error0;
986 if (!i) { 986 if (!i) {
987 /* 987 /*
988 * It failed, there are no rightward entries. 988 * It failed, there are no rightward entries.
989 */ 989 */
990 xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_NOERROR); 990 xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_NOERROR);
991 bno_cur_gt = NULL; 991 bno_cur_gt = NULL;
992 } 992 }
993 /* 993 /*
994 * Loop going left with the leftward cursor, right with the 994 * Loop going left with the leftward cursor, right with the
995 * rightward cursor, until either both directions give up or 995 * rightward cursor, until either both directions give up or
996 * we find an entry at least as big as minlen. 996 * we find an entry at least as big as minlen.
997 */ 997 */
998 do { 998 do {
999 if (bno_cur_lt) { 999 if (bno_cur_lt) {
1000 if ((error = xfs_alloc_get_rec(bno_cur_lt, &ltbno, &ltlen, &i))) 1000 if ((error = xfs_alloc_get_rec(bno_cur_lt, &ltbno, &ltlen, &i)))
1001 goto error0; 1001 goto error0;
1002 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1002 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1003 xfs_alloc_compute_aligned(args, ltbno, ltlen, 1003 xfs_alloc_compute_aligned(args, ltbno, ltlen,
1004 &ltbnoa, &ltlena); 1004 &ltbnoa, &ltlena);
1005 if (ltlena >= args->minlen) 1005 if (ltlena >= args->minlen)
1006 break; 1006 break;
1007 if ((error = xfs_btree_decrement(bno_cur_lt, 0, &i))) 1007 if ((error = xfs_btree_decrement(bno_cur_lt, 0, &i)))
1008 goto error0; 1008 goto error0;
1009 if (!i) { 1009 if (!i) {
1010 xfs_btree_del_cursor(bno_cur_lt, 1010 xfs_btree_del_cursor(bno_cur_lt,
1011 XFS_BTREE_NOERROR); 1011 XFS_BTREE_NOERROR);
1012 bno_cur_lt = NULL; 1012 bno_cur_lt = NULL;
1013 } 1013 }
1014 } 1014 }
1015 if (bno_cur_gt) { 1015 if (bno_cur_gt) {
1016 if ((error = xfs_alloc_get_rec(bno_cur_gt, &gtbno, &gtlen, &i))) 1016 if ((error = xfs_alloc_get_rec(bno_cur_gt, &gtbno, &gtlen, &i)))
1017 goto error0; 1017 goto error0;
1018 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1018 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1019 xfs_alloc_compute_aligned(args, gtbno, gtlen, 1019 xfs_alloc_compute_aligned(args, gtbno, gtlen,
1020 &gtbnoa, &gtlena); 1020 &gtbnoa, &gtlena);
1021 if (gtlena >= args->minlen) 1021 if (gtlena >= args->minlen)
1022 break; 1022 break;
1023 if ((error = xfs_btree_increment(bno_cur_gt, 0, &i))) 1023 if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
1024 goto error0; 1024 goto error0;
1025 if (!i) { 1025 if (!i) {
1026 xfs_btree_del_cursor(bno_cur_gt, 1026 xfs_btree_del_cursor(bno_cur_gt,
1027 XFS_BTREE_NOERROR); 1027 XFS_BTREE_NOERROR);
1028 bno_cur_gt = NULL; 1028 bno_cur_gt = NULL;
1029 } 1029 }
1030 } 1030 }
1031 } while (bno_cur_lt || bno_cur_gt); 1031 } while (bno_cur_lt || bno_cur_gt);
1032 1032
1033 /* 1033 /*
1034 * Got both cursors still active, need to find better entry. 1034 * Got both cursors still active, need to find better entry.
1035 */ 1035 */
1036 if (bno_cur_lt && bno_cur_gt) { 1036 if (bno_cur_lt && bno_cur_gt) {
1037 if (ltlena >= args->minlen) { 1037 if (ltlena >= args->minlen) {
1038 /* 1038 /*
1039 * Left side is good, look for a right side entry. 1039 * Left side is good, look for a right side entry.
1040 */ 1040 */
1041 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen); 1041 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1042 xfs_alloc_fix_len(args); 1042 xfs_alloc_fix_len(args);
1043 ltdiff = xfs_alloc_compute_diff(args->agbno, args->len, 1043 ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1044 args->alignment, ltbnoa, ltlena, &ltnew); 1044 args->alignment, ltbnoa, ltlena, &ltnew);
1045 1045
1046 error = xfs_alloc_find_best_extent(args, 1046 error = xfs_alloc_find_best_extent(args,
1047 &bno_cur_lt, &bno_cur_gt, 1047 &bno_cur_lt, &bno_cur_gt,
1048 ltdiff, &gtbno, &gtlen, 1048 ltdiff, &gtbno, &gtlen,
1049 &gtbnoa, &gtlena, 1049 &gtbnoa, &gtlena,
1050 0 /* search right */); 1050 0 /* search right */);
1051 } else { 1051 } else {
1052 ASSERT(gtlena >= args->minlen); 1052 ASSERT(gtlena >= args->minlen);
1053 1053
1054 /* 1054 /*
1055 * Right side is good, look for a left side entry. 1055 * Right side is good, look for a left side entry.
1056 */ 1056 */
1057 args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen); 1057 args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen);
1058 xfs_alloc_fix_len(args); 1058 xfs_alloc_fix_len(args);
1059 gtdiff = xfs_alloc_compute_diff(args->agbno, args->len, 1059 gtdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1060 args->alignment, gtbnoa, gtlena, &gtnew); 1060 args->alignment, gtbnoa, gtlena, &gtnew);
1061 1061
1062 error = xfs_alloc_find_best_extent(args, 1062 error = xfs_alloc_find_best_extent(args,
1063 &bno_cur_gt, &bno_cur_lt, 1063 &bno_cur_gt, &bno_cur_lt,
1064 gtdiff, &ltbno, &ltlen, 1064 gtdiff, &ltbno, &ltlen,
1065 &ltbnoa, &ltlena, 1065 &ltbnoa, &ltlena,
1066 1 /* search left */); 1066 1 /* search left */);
1067 } 1067 }
1068 1068
1069 if (error) 1069 if (error)
1070 goto error0; 1070 goto error0;
1071 } 1071 }
1072 1072
1073 /* 1073 /*
1074 * If we couldn't get anything, give up. 1074 * If we couldn't get anything, give up.
1075 */ 1075 */
1076 if (bno_cur_lt == NULL && bno_cur_gt == NULL) { 1076 if (bno_cur_lt == NULL && bno_cur_gt == NULL) {
1077 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 1077 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1078 1078
1079 if (!forced++) { 1079 if (!forced++) {
1080 trace_xfs_alloc_near_busy(args); 1080 trace_xfs_alloc_near_busy(args);
1081 xfs_log_force(args->mp, XFS_LOG_SYNC); 1081 xfs_log_force(args->mp, XFS_LOG_SYNC);
1082 goto restart; 1082 goto restart;
1083 } 1083 }
1084 trace_xfs_alloc_size_neither(args); 1084 trace_xfs_alloc_size_neither(args);
1085 args->agbno = NULLAGBLOCK; 1085 args->agbno = NULLAGBLOCK;
1086 return 0; 1086 return 0;
1087 } 1087 }
1088 1088
1089 /* 1089 /*
1090 * At this point we have selected a freespace entry, either to the 1090 * At this point we have selected a freespace entry, either to the
1091 * left or to the right. If it's on the right, copy all the 1091 * left or to the right. If it's on the right, copy all the
1092 * useful variables to the "left" set so we only have one 1092 * useful variables to the "left" set so we only have one
1093 * copy of this code. 1093 * copy of this code.
1094 */ 1094 */
1095 if (bno_cur_gt) { 1095 if (bno_cur_gt) {
1096 bno_cur_lt = bno_cur_gt; 1096 bno_cur_lt = bno_cur_gt;
1097 bno_cur_gt = NULL; 1097 bno_cur_gt = NULL;
1098 ltbno = gtbno; 1098 ltbno = gtbno;
1099 ltbnoa = gtbnoa; 1099 ltbnoa = gtbnoa;
1100 ltlen = gtlen; 1100 ltlen = gtlen;
1101 ltlena = gtlena; 1101 ltlena = gtlena;
1102 j = 1; 1102 j = 1;
1103 } else 1103 } else
1104 j = 0; 1104 j = 0;
1105 1105
1106 /* 1106 /*
1107 * Fix up the length and compute the useful address. 1107 * Fix up the length and compute the useful address.
1108 */ 1108 */
1109 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen); 1109 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1110 xfs_alloc_fix_len(args); 1110 xfs_alloc_fix_len(args);
1111 if (!xfs_alloc_fix_minleft(args)) { 1111 if (!xfs_alloc_fix_minleft(args)) {
1112 trace_xfs_alloc_near_nominleft(args); 1112 trace_xfs_alloc_near_nominleft(args);
1113 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR); 1113 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1114 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 1114 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1115 return 0; 1115 return 0;
1116 } 1116 }
1117 rlen = args->len; 1117 rlen = args->len;
1118 (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment, 1118 (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment,
1119 ltbnoa, ltlena, &ltnew); 1119 ltbnoa, ltlena, &ltnew);
1120 ASSERT(ltnew >= ltbno); 1120 ASSERT(ltnew >= ltbno);
1121 ASSERT(ltnew + rlen <= ltbnoa + ltlena); 1121 ASSERT(ltnew + rlen <= ltbnoa + ltlena);
1122 ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length)); 1122 ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
1123 args->agbno = ltnew; 1123 args->agbno = ltnew;
1124 1124
1125 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen, 1125 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen,
1126 ltnew, rlen, XFSA_FIXUP_BNO_OK))) 1126 ltnew, rlen, XFSA_FIXUP_BNO_OK)))
1127 goto error0; 1127 goto error0;
1128 1128
1129 if (j) 1129 if (j)
1130 trace_xfs_alloc_near_greater(args); 1130 trace_xfs_alloc_near_greater(args);
1131 else 1131 else
1132 trace_xfs_alloc_near_lesser(args); 1132 trace_xfs_alloc_near_lesser(args);
1133 1133
1134 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 1134 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1135 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR); 1135 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1136 return 0; 1136 return 0;
1137 1137
1138 error0: 1138 error0:
1139 trace_xfs_alloc_near_error(args); 1139 trace_xfs_alloc_near_error(args);
1140 if (cnt_cur != NULL) 1140 if (cnt_cur != NULL)
1141 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR); 1141 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1142 if (bno_cur_lt != NULL) 1142 if (bno_cur_lt != NULL)
1143 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_ERROR); 1143 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_ERROR);
1144 if (bno_cur_gt != NULL) 1144 if (bno_cur_gt != NULL)
1145 xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_ERROR); 1145 xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_ERROR);
1146 return error; 1146 return error;
1147 } 1147 }
1148 1148
1149 /* 1149 /*
1150 * Allocate a variable extent anywhere in the allocation group agno. 1150 * Allocate a variable extent anywhere in the allocation group agno.
1151 * Extent's length (returned in len) will be between minlen and maxlen, 1151 * Extent's length (returned in len) will be between minlen and maxlen,
1152 * and of the form k * prod + mod unless there's nothing that large. 1152 * and of the form k * prod + mod unless there's nothing that large.
1153 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it. 1153 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1154 */ 1154 */
1155 STATIC int /* error */ 1155 STATIC int /* error */
1156 xfs_alloc_ag_vextent_size( 1156 xfs_alloc_ag_vextent_size(
1157 xfs_alloc_arg_t *args) /* allocation argument structure */ 1157 xfs_alloc_arg_t *args) /* allocation argument structure */
1158 { 1158 {
1159 xfs_btree_cur_t *bno_cur; /* cursor for bno btree */ 1159 xfs_btree_cur_t *bno_cur; /* cursor for bno btree */
1160 xfs_btree_cur_t *cnt_cur; /* cursor for cnt btree */ 1160 xfs_btree_cur_t *cnt_cur; /* cursor for cnt btree */
1161 int error; /* error result */ 1161 int error; /* error result */
1162 xfs_agblock_t fbno; /* start of found freespace */ 1162 xfs_agblock_t fbno; /* start of found freespace */
1163 xfs_extlen_t flen; /* length of found freespace */ 1163 xfs_extlen_t flen; /* length of found freespace */
1164 int i; /* temp status variable */ 1164 int i; /* temp status variable */
1165 xfs_agblock_t rbno; /* returned block number */ 1165 xfs_agblock_t rbno; /* returned block number */
1166 xfs_extlen_t rlen; /* length of returned extent */ 1166 xfs_extlen_t rlen; /* length of returned extent */
1167 int forced = 0; 1167 int forced = 0;
1168 1168
1169 restart: 1169 restart:
1170 /* 1170 /*
1171 * Allocate and initialize a cursor for the by-size btree. 1171 * Allocate and initialize a cursor for the by-size btree.
1172 */ 1172 */
1173 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp, 1173 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1174 args->agno, XFS_BTNUM_CNT); 1174 args->agno, XFS_BTNUM_CNT);
1175 bno_cur = NULL; 1175 bno_cur = NULL;
1176 1176
1177 /* 1177 /*
1178 * Look for an entry >= maxlen+alignment-1 blocks. 1178 * Look for an entry >= maxlen+alignment-1 blocks.
1179 */ 1179 */
1180 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0, 1180 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0,
1181 args->maxlen + args->alignment - 1, &i))) 1181 args->maxlen + args->alignment - 1, &i)))
1182 goto error0; 1182 goto error0;
1183 1183
1184 /* 1184 /*
1185 * If none or we have busy extents that we cannot allocate from, then 1185 * If none or we have busy extents that we cannot allocate from, then
1186 * we have to settle for a smaller extent. In the case that there are 1186 * we have to settle for a smaller extent. In the case that there are
1187 * no large extents, this will return the last entry in the tree unless 1187 * no large extents, this will return the last entry in the tree unless
1188 * the tree is empty. In the case that there are only busy large 1188 * the tree is empty. In the case that there are only busy large
1189 * extents, this will return the largest small extent unless there 1189 * extents, this will return the largest small extent unless there
1190 * are no smaller extents available. 1190 * are no smaller extents available.
1191 */ 1191 */
1192 if (!i || forced > 1) { 1192 if (!i || forced > 1) {
1193 error = xfs_alloc_ag_vextent_small(args, cnt_cur, 1193 error = xfs_alloc_ag_vextent_small(args, cnt_cur,
1194 &fbno, &flen, &i); 1194 &fbno, &flen, &i);
1195 if (error) 1195 if (error)
1196 goto error0; 1196 goto error0;
1197 if (i == 0 || flen == 0) { 1197 if (i == 0 || flen == 0) {
1198 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 1198 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1199 trace_xfs_alloc_size_noentry(args); 1199 trace_xfs_alloc_size_noentry(args);
1200 return 0; 1200 return 0;
1201 } 1201 }
1202 ASSERT(i == 1); 1202 ASSERT(i == 1);
1203 xfs_alloc_compute_aligned(args, fbno, flen, &rbno, &rlen); 1203 xfs_alloc_compute_aligned(args, fbno, flen, &rbno, &rlen);
1204 } else { 1204 } else {
1205 /* 1205 /*
1206 * Search for a non-busy extent that is large enough. 1206 * Search for a non-busy extent that is large enough.
1207 * If we are at low space, don't check, or if we fall of 1207 * If we are at low space, don't check, or if we fall of
1208 * the end of the btree, turn off the busy check and 1208 * the end of the btree, turn off the busy check and
1209 * restart. 1209 * restart.
1210 */ 1210 */
1211 for (;;) { 1211 for (;;) {
1212 error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i); 1212 error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i);
1213 if (error) 1213 if (error)
1214 goto error0; 1214 goto error0;
1215 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1215 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1216 1216
1217 xfs_alloc_compute_aligned(args, fbno, flen, 1217 xfs_alloc_compute_aligned(args, fbno, flen,
1218 &rbno, &rlen); 1218 &rbno, &rlen);
1219 1219
1220 if (rlen >= args->maxlen) 1220 if (rlen >= args->maxlen)
1221 break; 1221 break;
1222 1222
1223 error = xfs_btree_increment(cnt_cur, 0, &i); 1223 error = xfs_btree_increment(cnt_cur, 0, &i);
1224 if (error) 1224 if (error)
1225 goto error0; 1225 goto error0;
1226 if (i == 0) { 1226 if (i == 0) {
1227 /* 1227 /*
1228 * Our only valid extents must have been busy. 1228 * Our only valid extents must have been busy.
1229 * Make it unbusy by forcing the log out and 1229 * Make it unbusy by forcing the log out and
1230 * retrying. If we've been here before, forcing 1230 * retrying. If we've been here before, forcing
1231 * the log isn't making the extents available, 1231 * the log isn't making the extents available,
1232 * which means they have probably been freed in 1232 * which means they have probably been freed in
1233 * this transaction. In that case, we have to 1233 * this transaction. In that case, we have to
1234 * give up on them and we'll attempt a minlen 1234 * give up on them and we'll attempt a minlen
1235 * allocation the next time around. 1235 * allocation the next time around.
1236 */ 1236 */
1237 xfs_btree_del_cursor(cnt_cur, 1237 xfs_btree_del_cursor(cnt_cur,
1238 XFS_BTREE_NOERROR); 1238 XFS_BTREE_NOERROR);
1239 trace_xfs_alloc_size_busy(args); 1239 trace_xfs_alloc_size_busy(args);
1240 if (!forced++) 1240 if (!forced++)
1241 xfs_log_force(args->mp, XFS_LOG_SYNC); 1241 xfs_log_force(args->mp, XFS_LOG_SYNC);
1242 goto restart; 1242 goto restart;
1243 } 1243 }
1244 } 1244 }
1245 } 1245 }
1246 1246
1247 /* 1247 /*
1248 * In the first case above, we got the last entry in the 1248 * In the first case above, we got the last entry in the
1249 * by-size btree. Now we check to see if the space hits maxlen 1249 * by-size btree. Now we check to see if the space hits maxlen
1250 * once aligned; if not, we search left for something better. 1250 * once aligned; if not, we search left for something better.
1251 * This can't happen in the second case above. 1251 * This can't happen in the second case above.
1252 */ 1252 */
1253 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen); 1253 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1254 XFS_WANT_CORRUPTED_GOTO(rlen == 0 || 1254 XFS_WANT_CORRUPTED_GOTO(rlen == 0 ||
1255 (rlen <= flen && rbno + rlen <= fbno + flen), error0); 1255 (rlen <= flen && rbno + rlen <= fbno + flen), error0);
1256 if (rlen < args->maxlen) { 1256 if (rlen < args->maxlen) {
1257 xfs_agblock_t bestfbno; 1257 xfs_agblock_t bestfbno;
1258 xfs_extlen_t bestflen; 1258 xfs_extlen_t bestflen;
1259 xfs_agblock_t bestrbno; 1259 xfs_agblock_t bestrbno;
1260 xfs_extlen_t bestrlen; 1260 xfs_extlen_t bestrlen;
1261 1261
1262 bestrlen = rlen; 1262 bestrlen = rlen;
1263 bestrbno = rbno; 1263 bestrbno = rbno;
1264 bestflen = flen; 1264 bestflen = flen;
1265 bestfbno = fbno; 1265 bestfbno = fbno;
1266 for (;;) { 1266 for (;;) {
1267 if ((error = xfs_btree_decrement(cnt_cur, 0, &i))) 1267 if ((error = xfs_btree_decrement(cnt_cur, 0, &i)))
1268 goto error0; 1268 goto error0;
1269 if (i == 0) 1269 if (i == 0)
1270 break; 1270 break;
1271 if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, 1271 if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen,
1272 &i))) 1272 &i)))
1273 goto error0; 1273 goto error0;
1274 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1274 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1275 if (flen < bestrlen) 1275 if (flen < bestrlen)
1276 break; 1276 break;
1277 xfs_alloc_compute_aligned(args, fbno, flen, 1277 xfs_alloc_compute_aligned(args, fbno, flen,
1278 &rbno, &rlen); 1278 &rbno, &rlen);
1279 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen); 1279 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1280 XFS_WANT_CORRUPTED_GOTO(rlen == 0 || 1280 XFS_WANT_CORRUPTED_GOTO(rlen == 0 ||
1281 (rlen <= flen && rbno + rlen <= fbno + flen), 1281 (rlen <= flen && rbno + rlen <= fbno + flen),
1282 error0); 1282 error0);
1283 if (rlen > bestrlen) { 1283 if (rlen > bestrlen) {
1284 bestrlen = rlen; 1284 bestrlen = rlen;
1285 bestrbno = rbno; 1285 bestrbno = rbno;
1286 bestflen = flen; 1286 bestflen = flen;
1287 bestfbno = fbno; 1287 bestfbno = fbno;
1288 if (rlen == args->maxlen) 1288 if (rlen == args->maxlen)
1289 break; 1289 break;
1290 } 1290 }
1291 } 1291 }
1292 if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen, 1292 if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen,
1293 &i))) 1293 &i)))
1294 goto error0; 1294 goto error0;
1295 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1295 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1296 rlen = bestrlen; 1296 rlen = bestrlen;
1297 rbno = bestrbno; 1297 rbno = bestrbno;
1298 flen = bestflen; 1298 flen = bestflen;
1299 fbno = bestfbno; 1299 fbno = bestfbno;
1300 } 1300 }
1301 args->wasfromfl = 0; 1301 args->wasfromfl = 0;
1302 /* 1302 /*
1303 * Fix up the length. 1303 * Fix up the length.
1304 */ 1304 */
1305 args->len = rlen; 1305 args->len = rlen;
1306 if (rlen < args->minlen) { 1306 if (rlen < args->minlen) {
1307 if (!forced++) { 1307 if (!forced++) {
1308 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 1308 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1309 trace_xfs_alloc_size_busy(args); 1309 trace_xfs_alloc_size_busy(args);
1310 xfs_log_force(args->mp, XFS_LOG_SYNC); 1310 xfs_log_force(args->mp, XFS_LOG_SYNC);
1311 goto restart; 1311 goto restart;
1312 } 1312 }
1313 goto out_nominleft; 1313 goto out_nominleft;
1314 } 1314 }
1315 xfs_alloc_fix_len(args); 1315 xfs_alloc_fix_len(args);
1316 1316
1317 if (!xfs_alloc_fix_minleft(args)) 1317 if (!xfs_alloc_fix_minleft(args))
1318 goto out_nominleft; 1318 goto out_nominleft;
1319 rlen = args->len; 1319 rlen = args->len;
1320 XFS_WANT_CORRUPTED_GOTO(rlen <= flen, error0); 1320 XFS_WANT_CORRUPTED_GOTO(rlen <= flen, error0);
1321 /* 1321 /*
1322 * Allocate and initialize a cursor for the by-block tree. 1322 * Allocate and initialize a cursor for the by-block tree.
1323 */ 1323 */
1324 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp, 1324 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1325 args->agno, XFS_BTNUM_BNO); 1325 args->agno, XFS_BTNUM_BNO);
1326 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, 1326 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
1327 rbno, rlen, XFSA_FIXUP_CNT_OK))) 1327 rbno, rlen, XFSA_FIXUP_CNT_OK)))
1328 goto error0; 1328 goto error0;
1329 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 1329 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1330 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR); 1330 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1331 cnt_cur = bno_cur = NULL; 1331 cnt_cur = bno_cur = NULL;
1332 args->len = rlen; 1332 args->len = rlen;
1333 args->agbno = rbno; 1333 args->agbno = rbno;
1334 XFS_WANT_CORRUPTED_GOTO( 1334 XFS_WANT_CORRUPTED_GOTO(
1335 args->agbno + args->len <= 1335 args->agbno + args->len <=
1336 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length), 1336 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
1337 error0); 1337 error0);
1338 trace_xfs_alloc_size_done(args); 1338 trace_xfs_alloc_size_done(args);
1339 return 0; 1339 return 0;
1340 1340
1341 error0: 1341 error0:
1342 trace_xfs_alloc_size_error(args); 1342 trace_xfs_alloc_size_error(args);
1343 if (cnt_cur) 1343 if (cnt_cur)
1344 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR); 1344 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1345 if (bno_cur) 1345 if (bno_cur)
1346 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR); 1346 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1347 return error; 1347 return error;
1348 1348
1349 out_nominleft: 1349 out_nominleft:
1350 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 1350 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1351 trace_xfs_alloc_size_nominleft(args); 1351 trace_xfs_alloc_size_nominleft(args);
1352 args->agbno = NULLAGBLOCK; 1352 args->agbno = NULLAGBLOCK;
1353 return 0; 1353 return 0;
1354 } 1354 }
1355 1355
1356 /* 1356 /*
1357 * Deal with the case where only small freespaces remain. 1357 * Deal with the case where only small freespaces remain.
1358 * Either return the contents of the last freespace record, 1358 * Either return the contents of the last freespace record,
1359 * or allocate space from the freelist if there is nothing in the tree. 1359 * or allocate space from the freelist if there is nothing in the tree.
1360 */ 1360 */
1361 STATIC int /* error */ 1361 STATIC int /* error */
1362 xfs_alloc_ag_vextent_small( 1362 xfs_alloc_ag_vextent_small(
1363 xfs_alloc_arg_t *args, /* allocation argument structure */ 1363 xfs_alloc_arg_t *args, /* allocation argument structure */
1364 xfs_btree_cur_t *ccur, /* by-size cursor */ 1364 xfs_btree_cur_t *ccur, /* by-size cursor */
1365 xfs_agblock_t *fbnop, /* result block number */ 1365 xfs_agblock_t *fbnop, /* result block number */
1366 xfs_extlen_t *flenp, /* result length */ 1366 xfs_extlen_t *flenp, /* result length */
1367 int *stat) /* status: 0-freelist, 1-normal/none */ 1367 int *stat) /* status: 0-freelist, 1-normal/none */
1368 { 1368 {
1369 int error; 1369 int error;
1370 xfs_agblock_t fbno; 1370 xfs_agblock_t fbno;
1371 xfs_extlen_t flen; 1371 xfs_extlen_t flen;
1372 int i; 1372 int i;
1373 1373
1374 if ((error = xfs_btree_decrement(ccur, 0, &i))) 1374 if ((error = xfs_btree_decrement(ccur, 0, &i)))
1375 goto error0; 1375 goto error0;
1376 if (i) { 1376 if (i) {
1377 if ((error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i))) 1377 if ((error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i)))
1378 goto error0; 1378 goto error0;
1379 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1379 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1380 } 1380 }
1381 /* 1381 /*
1382 * Nothing in the btree, try the freelist. Make sure 1382 * Nothing in the btree, try the freelist. Make sure
1383 * to respect minleft even when pulling from the 1383 * to respect minleft even when pulling from the
1384 * freelist. 1384 * freelist.
1385 */ 1385 */
1386 else if (args->minlen == 1 && args->alignment == 1 && !args->isfl && 1386 else if (args->minlen == 1 && args->alignment == 1 && !args->isfl &&
1387 (be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_flcount) 1387 (be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_flcount)
1388 > args->minleft)) { 1388 > args->minleft)) {
1389 error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno, 0); 1389 error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno, 0);
1390 if (error) 1390 if (error)
1391 goto error0; 1391 goto error0;
1392 if (fbno != NULLAGBLOCK) { 1392 if (fbno != NULLAGBLOCK) {
1393 xfs_extent_busy_reuse(args->mp, args->agno, fbno, 1, 1393 xfs_extent_busy_reuse(args->mp, args->agno, fbno, 1,
1394 args->userdata); 1394 args->userdata);
1395 1395
1396 if (args->userdata) { 1396 if (args->userdata) {
1397 xfs_buf_t *bp; 1397 xfs_buf_t *bp;
1398 1398
1399 bp = xfs_btree_get_bufs(args->mp, args->tp, 1399 bp = xfs_btree_get_bufs(args->mp, args->tp,
1400 args->agno, fbno, 0); 1400 args->agno, fbno, 0);
1401 xfs_trans_binval(args->tp, bp); 1401 xfs_trans_binval(args->tp, bp);
1402 } 1402 }
1403 args->len = 1; 1403 args->len = 1;
1404 args->agbno = fbno; 1404 args->agbno = fbno;
1405 XFS_WANT_CORRUPTED_GOTO( 1405 XFS_WANT_CORRUPTED_GOTO(
1406 args->agbno + args->len <= 1406 args->agbno + args->len <=
1407 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length), 1407 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
1408 error0); 1408 error0);
1409 args->wasfromfl = 1; 1409 args->wasfromfl = 1;
1410 trace_xfs_alloc_small_freelist(args); 1410 trace_xfs_alloc_small_freelist(args);
1411 *stat = 0; 1411 *stat = 0;
1412 return 0; 1412 return 0;
1413 } 1413 }
1414 /* 1414 /*
1415 * Nothing in the freelist. 1415 * Nothing in the freelist.
1416 */ 1416 */
1417 else 1417 else
1418 flen = 0; 1418 flen = 0;
1419 } 1419 }
1420 /* 1420 /*
1421 * Can't allocate from the freelist for some reason. 1421 * Can't allocate from the freelist for some reason.
1422 */ 1422 */
1423 else { 1423 else {
1424 fbno = NULLAGBLOCK; 1424 fbno = NULLAGBLOCK;
1425 flen = 0; 1425 flen = 0;
1426 } 1426 }
1427 /* 1427 /*
1428 * Can't do the allocation, give up. 1428 * Can't do the allocation, give up.
1429 */ 1429 */
1430 if (flen < args->minlen) { 1430 if (flen < args->minlen) {
1431 args->agbno = NULLAGBLOCK; 1431 args->agbno = NULLAGBLOCK;
1432 trace_xfs_alloc_small_notenough(args); 1432 trace_xfs_alloc_small_notenough(args);
1433 flen = 0; 1433 flen = 0;
1434 } 1434 }
1435 *fbnop = fbno; 1435 *fbnop = fbno;
1436 *flenp = flen; 1436 *flenp = flen;
1437 *stat = 1; 1437 *stat = 1;
1438 trace_xfs_alloc_small_done(args); 1438 trace_xfs_alloc_small_done(args);
1439 return 0; 1439 return 0;
1440 1440
1441 error0: 1441 error0:
1442 trace_xfs_alloc_small_error(args); 1442 trace_xfs_alloc_small_error(args);
1443 return error; 1443 return error;
1444 } 1444 }
1445 1445
1446 /* 1446 /*
1447 * Free the extent starting at agno/bno for length. 1447 * Free the extent starting at agno/bno for length.
1448 */ 1448 */
1449 STATIC int /* error */ 1449 STATIC int /* error */
1450 xfs_free_ag_extent( 1450 xfs_free_ag_extent(
1451 xfs_trans_t *tp, /* transaction pointer */ 1451 xfs_trans_t *tp, /* transaction pointer */
1452 xfs_buf_t *agbp, /* buffer for a.g. freelist header */ 1452 xfs_buf_t *agbp, /* buffer for a.g. freelist header */
1453 xfs_agnumber_t agno, /* allocation group number */ 1453 xfs_agnumber_t agno, /* allocation group number */
1454 xfs_agblock_t bno, /* starting block number */ 1454 xfs_agblock_t bno, /* starting block number */
1455 xfs_extlen_t len, /* length of extent */ 1455 xfs_extlen_t len, /* length of extent */
1456 int isfl) /* set if is freelist blocks - no sb acctg */ 1456 int isfl) /* set if is freelist blocks - no sb acctg */
1457 { 1457 {
1458 xfs_btree_cur_t *bno_cur; /* cursor for by-block btree */ 1458 xfs_btree_cur_t *bno_cur; /* cursor for by-block btree */
1459 xfs_btree_cur_t *cnt_cur; /* cursor for by-size btree */ 1459 xfs_btree_cur_t *cnt_cur; /* cursor for by-size btree */
1460 int error; /* error return value */ 1460 int error; /* error return value */
1461 xfs_agblock_t gtbno; /* start of right neighbor block */ 1461 xfs_agblock_t gtbno; /* start of right neighbor block */
1462 xfs_extlen_t gtlen; /* length of right neighbor block */ 1462 xfs_extlen_t gtlen; /* length of right neighbor block */
1463 int haveleft; /* have a left neighbor block */ 1463 int haveleft; /* have a left neighbor block */
1464 int haveright; /* have a right neighbor block */ 1464 int haveright; /* have a right neighbor block */
1465 int i; /* temp, result code */ 1465 int i; /* temp, result code */
1466 xfs_agblock_t ltbno; /* start of left neighbor block */ 1466 xfs_agblock_t ltbno; /* start of left neighbor block */
1467 xfs_extlen_t ltlen; /* length of left neighbor block */ 1467 xfs_extlen_t ltlen; /* length of left neighbor block */
1468 xfs_mount_t *mp; /* mount point struct for filesystem */ 1468 xfs_mount_t *mp; /* mount point struct for filesystem */
1469 xfs_agblock_t nbno; /* new starting block of freespace */ 1469 xfs_agblock_t nbno; /* new starting block of freespace */
1470 xfs_extlen_t nlen; /* new length of freespace */ 1470 xfs_extlen_t nlen; /* new length of freespace */
1471 xfs_perag_t *pag; /* per allocation group data */ 1471 xfs_perag_t *pag; /* per allocation group data */
1472 1472
1473 mp = tp->t_mountp; 1473 mp = tp->t_mountp;
1474 /* 1474 /*
1475 * Allocate and initialize a cursor for the by-block btree. 1475 * Allocate and initialize a cursor for the by-block btree.
1476 */ 1476 */
1477 bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO); 1477 bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO);
1478 cnt_cur = NULL; 1478 cnt_cur = NULL;
1479 /* 1479 /*
1480 * Look for a neighboring block on the left (lower block numbers) 1480 * Look for a neighboring block on the left (lower block numbers)
1481 * that is contiguous with this space. 1481 * that is contiguous with this space.
1482 */ 1482 */
1483 if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft))) 1483 if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft)))
1484 goto error0; 1484 goto error0;
1485 if (haveleft) { 1485 if (haveleft) {
1486 /* 1486 /*
1487 * There is a block to our left. 1487 * There is a block to our left.
1488 */ 1488 */
1489 if ((error = xfs_alloc_get_rec(bno_cur, &ltbno, &ltlen, &i))) 1489 if ((error = xfs_alloc_get_rec(bno_cur, &ltbno, &ltlen, &i)))
1490 goto error0; 1490 goto error0;
1491 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1491 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1492 /* 1492 /*
1493 * It's not contiguous, though. 1493 * It's not contiguous, though.
1494 */ 1494 */
1495 if (ltbno + ltlen < bno) 1495 if (ltbno + ltlen < bno)
1496 haveleft = 0; 1496 haveleft = 0;
1497 else { 1497 else {
1498 /* 1498 /*
1499 * If this failure happens the request to free this 1499 * If this failure happens the request to free this
1500 * space was invalid, it's (partly) already free. 1500 * space was invalid, it's (partly) already free.
1501 * Very bad. 1501 * Very bad.
1502 */ 1502 */
1503 XFS_WANT_CORRUPTED_GOTO(ltbno + ltlen <= bno, error0); 1503 XFS_WANT_CORRUPTED_GOTO(ltbno + ltlen <= bno, error0);
1504 } 1504 }
1505 } 1505 }
1506 /* 1506 /*
1507 * Look for a neighboring block on the right (higher block numbers) 1507 * Look for a neighboring block on the right (higher block numbers)
1508 * that is contiguous with this space. 1508 * that is contiguous with this space.
1509 */ 1509 */
1510 if ((error = xfs_btree_increment(bno_cur, 0, &haveright))) 1510 if ((error = xfs_btree_increment(bno_cur, 0, &haveright)))
1511 goto error0; 1511 goto error0;
1512 if (haveright) { 1512 if (haveright) {
1513 /* 1513 /*
1514 * There is a block to our right. 1514 * There is a block to our right.
1515 */ 1515 */
1516 if ((error = xfs_alloc_get_rec(bno_cur, &gtbno, &gtlen, &i))) 1516 if ((error = xfs_alloc_get_rec(bno_cur, &gtbno, &gtlen, &i)))
1517 goto error0; 1517 goto error0;
1518 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1518 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1519 /* 1519 /*
1520 * It's not contiguous, though. 1520 * It's not contiguous, though.
1521 */ 1521 */
1522 if (bno + len < gtbno) 1522 if (bno + len < gtbno)
1523 haveright = 0; 1523 haveright = 0;
1524 else { 1524 else {
1525 /* 1525 /*
1526 * If this failure happens the request to free this 1526 * If this failure happens the request to free this
1527 * space was invalid, it's (partly) already free. 1527 * space was invalid, it's (partly) already free.
1528 * Very bad. 1528 * Very bad.
1529 */ 1529 */
1530 XFS_WANT_CORRUPTED_GOTO(gtbno >= bno + len, error0); 1530 XFS_WANT_CORRUPTED_GOTO(gtbno >= bno + len, error0);
1531 } 1531 }
1532 } 1532 }
1533 /* 1533 /*
1534 * Now allocate and initialize a cursor for the by-size tree. 1534 * Now allocate and initialize a cursor for the by-size tree.
1535 */ 1535 */
1536 cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_CNT); 1536 cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_CNT);
1537 /* 1537 /*
1538 * Have both left and right contiguous neighbors. 1538 * Have both left and right contiguous neighbors.
1539 * Merge all three into a single free block. 1539 * Merge all three into a single free block.
1540 */ 1540 */
1541 if (haveleft && haveright) { 1541 if (haveleft && haveright) {
1542 /* 1542 /*
1543 * Delete the old by-size entry on the left. 1543 * Delete the old by-size entry on the left.
1544 */ 1544 */
1545 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i))) 1545 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1546 goto error0; 1546 goto error0;
1547 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1547 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1548 if ((error = xfs_btree_delete(cnt_cur, &i))) 1548 if ((error = xfs_btree_delete(cnt_cur, &i)))
1549 goto error0; 1549 goto error0;
1550 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1550 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1551 /* 1551 /*
1552 * Delete the old by-size entry on the right. 1552 * Delete the old by-size entry on the right.
1553 */ 1553 */
1554 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i))) 1554 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1555 goto error0; 1555 goto error0;
1556 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1556 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1557 if ((error = xfs_btree_delete(cnt_cur, &i))) 1557 if ((error = xfs_btree_delete(cnt_cur, &i)))
1558 goto error0; 1558 goto error0;
1559 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1559 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1560 /* 1560 /*
1561 * Delete the old by-block entry for the right block. 1561 * Delete the old by-block entry for the right block.
1562 */ 1562 */
1563 if ((error = xfs_btree_delete(bno_cur, &i))) 1563 if ((error = xfs_btree_delete(bno_cur, &i)))
1564 goto error0; 1564 goto error0;
1565 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1565 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1566 /* 1566 /*
1567 * Move the by-block cursor back to the left neighbor. 1567 * Move the by-block cursor back to the left neighbor.
1568 */ 1568 */
1569 if ((error = xfs_btree_decrement(bno_cur, 0, &i))) 1569 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
1570 goto error0; 1570 goto error0;
1571 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1571 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1572 #ifdef DEBUG 1572 #ifdef DEBUG
1573 /* 1573 /*
1574 * Check that this is the right record: delete didn't 1574 * Check that this is the right record: delete didn't
1575 * mangle the cursor. 1575 * mangle the cursor.
1576 */ 1576 */
1577 { 1577 {
1578 xfs_agblock_t xxbno; 1578 xfs_agblock_t xxbno;
1579 xfs_extlen_t xxlen; 1579 xfs_extlen_t xxlen;
1580 1580
1581 if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen, 1581 if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen,
1582 &i))) 1582 &i)))
1583 goto error0; 1583 goto error0;
1584 XFS_WANT_CORRUPTED_GOTO( 1584 XFS_WANT_CORRUPTED_GOTO(
1585 i == 1 && xxbno == ltbno && xxlen == ltlen, 1585 i == 1 && xxbno == ltbno && xxlen == ltlen,
1586 error0); 1586 error0);
1587 } 1587 }
1588 #endif 1588 #endif
1589 /* 1589 /*
1590 * Update remaining by-block entry to the new, joined block. 1590 * Update remaining by-block entry to the new, joined block.
1591 */ 1591 */
1592 nbno = ltbno; 1592 nbno = ltbno;
1593 nlen = len + ltlen + gtlen; 1593 nlen = len + ltlen + gtlen;
1594 if ((error = xfs_alloc_update(bno_cur, nbno, nlen))) 1594 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1595 goto error0; 1595 goto error0;
1596 } 1596 }
1597 /* 1597 /*
1598 * Have only a left contiguous neighbor. 1598 * Have only a left contiguous neighbor.
1599 * Merge it together with the new freespace. 1599 * Merge it together with the new freespace.
1600 */ 1600 */
1601 else if (haveleft) { 1601 else if (haveleft) {
1602 /* 1602 /*
1603 * Delete the old by-size entry on the left. 1603 * Delete the old by-size entry on the left.
1604 */ 1604 */
1605 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i))) 1605 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1606 goto error0; 1606 goto error0;
1607 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1607 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1608 if ((error = xfs_btree_delete(cnt_cur, &i))) 1608 if ((error = xfs_btree_delete(cnt_cur, &i)))
1609 goto error0; 1609 goto error0;
1610 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1610 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1611 /* 1611 /*
1612 * Back up the by-block cursor to the left neighbor, and 1612 * Back up the by-block cursor to the left neighbor, and
1613 * update its length. 1613 * update its length.
1614 */ 1614 */
1615 if ((error = xfs_btree_decrement(bno_cur, 0, &i))) 1615 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
1616 goto error0; 1616 goto error0;
1617 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1617 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1618 nbno = ltbno; 1618 nbno = ltbno;
1619 nlen = len + ltlen; 1619 nlen = len + ltlen;
1620 if ((error = xfs_alloc_update(bno_cur, nbno, nlen))) 1620 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1621 goto error0; 1621 goto error0;
1622 } 1622 }
1623 /* 1623 /*
1624 * Have only a right contiguous neighbor. 1624 * Have only a right contiguous neighbor.
1625 * Merge it together with the new freespace. 1625 * Merge it together with the new freespace.
1626 */ 1626 */
1627 else if (haveright) { 1627 else if (haveright) {
1628 /* 1628 /*
1629 * Delete the old by-size entry on the right. 1629 * Delete the old by-size entry on the right.
1630 */ 1630 */
1631 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i))) 1631 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1632 goto error0; 1632 goto error0;
1633 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1633 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1634 if ((error = xfs_btree_delete(cnt_cur, &i))) 1634 if ((error = xfs_btree_delete(cnt_cur, &i)))
1635 goto error0; 1635 goto error0;
1636 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1636 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1637 /* 1637 /*
1638 * Update the starting block and length of the right 1638 * Update the starting block and length of the right
1639 * neighbor in the by-block tree. 1639 * neighbor in the by-block tree.
1640 */ 1640 */
1641 nbno = bno; 1641 nbno = bno;
1642 nlen = len + gtlen; 1642 nlen = len + gtlen;
1643 if ((error = xfs_alloc_update(bno_cur, nbno, nlen))) 1643 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1644 goto error0; 1644 goto error0;
1645 } 1645 }
1646 /* 1646 /*
1647 * No contiguous neighbors. 1647 * No contiguous neighbors.
1648 * Insert the new freespace into the by-block tree. 1648 * Insert the new freespace into the by-block tree.
1649 */ 1649 */
1650 else { 1650 else {
1651 nbno = bno; 1651 nbno = bno;
1652 nlen = len; 1652 nlen = len;
1653 if ((error = xfs_btree_insert(bno_cur, &i))) 1653 if ((error = xfs_btree_insert(bno_cur, &i)))
1654 goto error0; 1654 goto error0;
1655 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1655 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1656 } 1656 }
1657 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR); 1657 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1658 bno_cur = NULL; 1658 bno_cur = NULL;
1659 /* 1659 /*
1660 * In all cases we need to insert the new freespace in the by-size tree. 1660 * In all cases we need to insert the new freespace in the by-size tree.
1661 */ 1661 */
1662 if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i))) 1662 if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
1663 goto error0; 1663 goto error0;
1664 XFS_WANT_CORRUPTED_GOTO(i == 0, error0); 1664 XFS_WANT_CORRUPTED_GOTO(i == 0, error0);
1665 if ((error = xfs_btree_insert(cnt_cur, &i))) 1665 if ((error = xfs_btree_insert(cnt_cur, &i)))
1666 goto error0; 1666 goto error0;
1667 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1667 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1668 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 1668 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1669 cnt_cur = NULL; 1669 cnt_cur = NULL;
1670 1670
1671 /* 1671 /*
1672 * Update the freespace totals in the ag and superblock. 1672 * Update the freespace totals in the ag and superblock.
1673 */ 1673 */
1674 pag = xfs_perag_get(mp, agno); 1674 pag = xfs_perag_get(mp, agno);
1675 error = xfs_alloc_update_counters(tp, pag, agbp, len); 1675 error = xfs_alloc_update_counters(tp, pag, agbp, len);
1676 xfs_perag_put(pag); 1676 xfs_perag_put(pag);
1677 if (error) 1677 if (error)
1678 goto error0; 1678 goto error0;
1679 1679
1680 if (!isfl) 1680 if (!isfl)
1681 xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, (long)len); 1681 xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, (long)len);
1682 XFS_STATS_INC(xs_freex); 1682 XFS_STATS_INC(xs_freex);
1683 XFS_STATS_ADD(xs_freeb, len); 1683 XFS_STATS_ADD(xs_freeb, len);
1684 1684
1685 trace_xfs_free_extent(mp, agno, bno, len, isfl, haveleft, haveright); 1685 trace_xfs_free_extent(mp, agno, bno, len, isfl, haveleft, haveright);
1686 1686
1687 return 0; 1687 return 0;
1688 1688
1689 error0: 1689 error0:
1690 trace_xfs_free_extent(mp, agno, bno, len, isfl, -1, -1); 1690 trace_xfs_free_extent(mp, agno, bno, len, isfl, -1, -1);
1691 if (bno_cur) 1691 if (bno_cur)
1692 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR); 1692 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1693 if (cnt_cur) 1693 if (cnt_cur)
1694 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR); 1694 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1695 return error; 1695 return error;
1696 } 1696 }
1697 1697
1698 /* 1698 /*
1699 * Visible (exported) allocation/free functions. 1699 * Visible (exported) allocation/free functions.
1700 * Some of these are used just by xfs_alloc_btree.c and this file. 1700 * Some of these are used just by xfs_alloc_btree.c and this file.
1701 */ 1701 */
1702 1702
1703 /* 1703 /*
1704 * Compute and fill in value of m_ag_maxlevels. 1704 * Compute and fill in value of m_ag_maxlevels.
1705 */ 1705 */
1706 void 1706 void
1707 xfs_alloc_compute_maxlevels( 1707 xfs_alloc_compute_maxlevels(
1708 xfs_mount_t *mp) /* file system mount structure */ 1708 xfs_mount_t *mp) /* file system mount structure */
1709 { 1709 {
1710 int level; 1710 int level;
1711 uint maxblocks; 1711 uint maxblocks;
1712 uint maxleafents; 1712 uint maxleafents;
1713 int minleafrecs; 1713 int minleafrecs;
1714 int minnoderecs; 1714 int minnoderecs;
1715 1715
1716 maxleafents = (mp->m_sb.sb_agblocks + 1) / 2; 1716 maxleafents = (mp->m_sb.sb_agblocks + 1) / 2;
1717 minleafrecs = mp->m_alloc_mnr[0]; 1717 minleafrecs = mp->m_alloc_mnr[0];
1718 minnoderecs = mp->m_alloc_mnr[1]; 1718 minnoderecs = mp->m_alloc_mnr[1];
1719 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs; 1719 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
1720 for (level = 1; maxblocks > 1; level++) 1720 for (level = 1; maxblocks > 1; level++)
1721 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs; 1721 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
1722 mp->m_ag_maxlevels = level; 1722 mp->m_ag_maxlevels = level;
1723 } 1723 }
1724 1724
1725 /* 1725 /*
1726 * Find the length of the longest extent in an AG. 1726 * Find the length of the longest extent in an AG.
1727 */ 1727 */
1728 xfs_extlen_t 1728 xfs_extlen_t
1729 xfs_alloc_longest_free_extent( 1729 xfs_alloc_longest_free_extent(
1730 struct xfs_mount *mp, 1730 struct xfs_mount *mp,
1731 struct xfs_perag *pag) 1731 struct xfs_perag *pag)
1732 { 1732 {
1733 xfs_extlen_t need, delta = 0; 1733 xfs_extlen_t need, delta = 0;
1734 1734
1735 need = XFS_MIN_FREELIST_PAG(pag, mp); 1735 need = XFS_MIN_FREELIST_PAG(pag, mp);
1736 if (need > pag->pagf_flcount) 1736 if (need > pag->pagf_flcount)
1737 delta = need - pag->pagf_flcount; 1737 delta = need - pag->pagf_flcount;
1738 1738
1739 if (pag->pagf_longest > delta) 1739 if (pag->pagf_longest > delta)
1740 return pag->pagf_longest - delta; 1740 return pag->pagf_longest - delta;
1741 return pag->pagf_flcount > 0 || pag->pagf_longest > 0; 1741 return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
1742 } 1742 }
1743 1743
1744 /* 1744 /*
1745 * Decide whether to use this allocation group for this allocation. 1745 * Decide whether to use this allocation group for this allocation.
1746 * If so, fix up the btree freelist's size. 1746 * If so, fix up the btree freelist's size.
1747 */ 1747 */
1748 STATIC int /* error */ 1748 STATIC int /* error */
1749 xfs_alloc_fix_freelist( 1749 xfs_alloc_fix_freelist(
1750 xfs_alloc_arg_t *args, /* allocation argument structure */ 1750 xfs_alloc_arg_t *args, /* allocation argument structure */
1751 int flags) /* XFS_ALLOC_FLAG_... */ 1751 int flags) /* XFS_ALLOC_FLAG_... */
1752 { 1752 {
1753 xfs_buf_t *agbp; /* agf buffer pointer */ 1753 xfs_buf_t *agbp; /* agf buffer pointer */
1754 xfs_agf_t *agf; /* a.g. freespace structure pointer */ 1754 xfs_agf_t *agf; /* a.g. freespace structure pointer */
1755 xfs_buf_t *agflbp;/* agfl buffer pointer */ 1755 xfs_buf_t *agflbp;/* agfl buffer pointer */
1756 xfs_agblock_t bno; /* freelist block */ 1756 xfs_agblock_t bno; /* freelist block */
1757 xfs_extlen_t delta; /* new blocks needed in freelist */ 1757 xfs_extlen_t delta; /* new blocks needed in freelist */
1758 int error; /* error result code */ 1758 int error; /* error result code */
1759 xfs_extlen_t longest;/* longest extent in allocation group */ 1759 xfs_extlen_t longest;/* longest extent in allocation group */
1760 xfs_mount_t *mp; /* file system mount point structure */ 1760 xfs_mount_t *mp; /* file system mount point structure */
1761 xfs_extlen_t need; /* total blocks needed in freelist */ 1761 xfs_extlen_t need; /* total blocks needed in freelist */
1762 xfs_perag_t *pag; /* per-ag information structure */ 1762 xfs_perag_t *pag; /* per-ag information structure */
1763 xfs_alloc_arg_t targs; /* local allocation arguments */ 1763 xfs_alloc_arg_t targs; /* local allocation arguments */
1764 xfs_trans_t *tp; /* transaction pointer */ 1764 xfs_trans_t *tp; /* transaction pointer */
1765 1765
1766 mp = args->mp; 1766 mp = args->mp;
1767 1767
1768 pag = args->pag; 1768 pag = args->pag;
1769 tp = args->tp; 1769 tp = args->tp;
1770 if (!pag->pagf_init) { 1770 if (!pag->pagf_init) {
1771 if ((error = xfs_alloc_read_agf(mp, tp, args->agno, flags, 1771 if ((error = xfs_alloc_read_agf(mp, tp, args->agno, flags,
1772 &agbp))) 1772 &agbp)))
1773 return error; 1773 return error;
1774 if (!pag->pagf_init) { 1774 if (!pag->pagf_init) {
1775 ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK); 1775 ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
1776 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING)); 1776 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
1777 args->agbp = NULL; 1777 args->agbp = NULL;
1778 return 0; 1778 return 0;
1779 } 1779 }
1780 } else 1780 } else
1781 agbp = NULL; 1781 agbp = NULL;
1782 1782
1783 /* 1783 /*
1784 * If this is a metadata preferred pag and we are user data 1784 * If this is a metadata preferred pag and we are user data
1785 * then try somewhere else if we are not being asked to 1785 * then try somewhere else if we are not being asked to
1786 * try harder at this point 1786 * try harder at this point
1787 */ 1787 */
1788 if (pag->pagf_metadata && args->userdata && 1788 if (pag->pagf_metadata && args->userdata &&
1789 (flags & XFS_ALLOC_FLAG_TRYLOCK)) { 1789 (flags & XFS_ALLOC_FLAG_TRYLOCK)) {
1790 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING)); 1790 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
1791 args->agbp = NULL; 1791 args->agbp = NULL;
1792 return 0; 1792 return 0;
1793 } 1793 }
1794 1794
1795 if (!(flags & XFS_ALLOC_FLAG_FREEING)) { 1795 if (!(flags & XFS_ALLOC_FLAG_FREEING)) {
1796 /* 1796 /*
1797 * If it looks like there isn't a long enough extent, or enough 1797 * If it looks like there isn't a long enough extent, or enough
1798 * total blocks, reject it. 1798 * total blocks, reject it.
1799 */ 1799 */
1800 need = XFS_MIN_FREELIST_PAG(pag, mp); 1800 need = XFS_MIN_FREELIST_PAG(pag, mp);
1801 longest = xfs_alloc_longest_free_extent(mp, pag); 1801 longest = xfs_alloc_longest_free_extent(mp, pag);
1802 if ((args->minlen + args->alignment + args->minalignslop - 1) > 1802 if ((args->minlen + args->alignment + args->minalignslop - 1) >
1803 longest || 1803 longest ||
1804 ((int)(pag->pagf_freeblks + pag->pagf_flcount - 1804 ((int)(pag->pagf_freeblks + pag->pagf_flcount -
1805 need - args->total) < (int)args->minleft)) { 1805 need - args->total) < (int)args->minleft)) {
1806 if (agbp) 1806 if (agbp)
1807 xfs_trans_brelse(tp, agbp); 1807 xfs_trans_brelse(tp, agbp);
1808 args->agbp = NULL; 1808 args->agbp = NULL;
1809 return 0; 1809 return 0;
1810 } 1810 }
1811 } 1811 }
1812 1812
1813 /* 1813 /*
1814 * Get the a.g. freespace buffer. 1814 * Get the a.g. freespace buffer.
1815 * Can fail if we're not blocking on locks, and it's held. 1815 * Can fail if we're not blocking on locks, and it's held.
1816 */ 1816 */
1817 if (agbp == NULL) { 1817 if (agbp == NULL) {
1818 if ((error = xfs_alloc_read_agf(mp, tp, args->agno, flags, 1818 if ((error = xfs_alloc_read_agf(mp, tp, args->agno, flags,
1819 &agbp))) 1819 &agbp)))
1820 return error; 1820 return error;
1821 if (agbp == NULL) { 1821 if (agbp == NULL) {
1822 ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK); 1822 ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
1823 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING)); 1823 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
1824 args->agbp = NULL; 1824 args->agbp = NULL;
1825 return 0; 1825 return 0;
1826 } 1826 }
1827 } 1827 }
1828 /* 1828 /*
1829 * Figure out how many blocks we should have in the freelist. 1829 * Figure out how many blocks we should have in the freelist.
1830 */ 1830 */
1831 agf = XFS_BUF_TO_AGF(agbp); 1831 agf = XFS_BUF_TO_AGF(agbp);
1832 need = XFS_MIN_FREELIST(agf, mp); 1832 need = XFS_MIN_FREELIST(agf, mp);
1833 /* 1833 /*
1834 * If there isn't enough total or single-extent, reject it. 1834 * If there isn't enough total or single-extent, reject it.
1835 */ 1835 */
1836 if (!(flags & XFS_ALLOC_FLAG_FREEING)) { 1836 if (!(flags & XFS_ALLOC_FLAG_FREEING)) {
1837 delta = need > be32_to_cpu(agf->agf_flcount) ? 1837 delta = need > be32_to_cpu(agf->agf_flcount) ?
1838 (need - be32_to_cpu(agf->agf_flcount)) : 0; 1838 (need - be32_to_cpu(agf->agf_flcount)) : 0;
1839 longest = be32_to_cpu(agf->agf_longest); 1839 longest = be32_to_cpu(agf->agf_longest);
1840 longest = (longest > delta) ? (longest - delta) : 1840 longest = (longest > delta) ? (longest - delta) :
1841 (be32_to_cpu(agf->agf_flcount) > 0 || longest > 0); 1841 (be32_to_cpu(agf->agf_flcount) > 0 || longest > 0);
1842 if ((args->minlen + args->alignment + args->minalignslop - 1) > 1842 if ((args->minlen + args->alignment + args->minalignslop - 1) >
1843 longest || 1843 longest ||
1844 ((int)(be32_to_cpu(agf->agf_freeblks) + 1844 ((int)(be32_to_cpu(agf->agf_freeblks) +
1845 be32_to_cpu(agf->agf_flcount) - need - args->total) < 1845 be32_to_cpu(agf->agf_flcount) - need - args->total) <
1846 (int)args->minleft)) { 1846 (int)args->minleft)) {
1847 xfs_trans_brelse(tp, agbp); 1847 xfs_trans_brelse(tp, agbp);
1848 args->agbp = NULL; 1848 args->agbp = NULL;
1849 return 0; 1849 return 0;
1850 } 1850 }
1851 } 1851 }
1852 /* 1852 /*
1853 * Make the freelist shorter if it's too long. 1853 * Make the freelist shorter if it's too long.
1854 */ 1854 */
1855 while (be32_to_cpu(agf->agf_flcount) > need) { 1855 while (be32_to_cpu(agf->agf_flcount) > need) {
1856 xfs_buf_t *bp; 1856 xfs_buf_t *bp;
1857 1857
1858 error = xfs_alloc_get_freelist(tp, agbp, &bno, 0); 1858 error = xfs_alloc_get_freelist(tp, agbp, &bno, 0);
1859 if (error) 1859 if (error)
1860 return error; 1860 return error;
1861 if ((error = xfs_free_ag_extent(tp, agbp, args->agno, bno, 1, 1))) 1861 if ((error = xfs_free_ag_extent(tp, agbp, args->agno, bno, 1, 1)))
1862 return error; 1862 return error;
1863 bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0); 1863 bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0);
1864 xfs_trans_binval(tp, bp); 1864 xfs_trans_binval(tp, bp);
1865 } 1865 }
1866 /* 1866 /*
1867 * Initialize the args structure. 1867 * Initialize the args structure.
1868 */ 1868 */
1869 memset(&targs, 0, sizeof(targs)); 1869 memset(&targs, 0, sizeof(targs));
1870 targs.tp = tp; 1870 targs.tp = tp;
1871 targs.mp = mp; 1871 targs.mp = mp;
1872 targs.agbp = agbp; 1872 targs.agbp = agbp;
1873 targs.agno = args->agno; 1873 targs.agno = args->agno;
1874 targs.mod = targs.minleft = targs.wasdel = targs.userdata = 1874 targs.mod = targs.minleft = targs.wasdel = targs.userdata =
1875 targs.minalignslop = 0; 1875 targs.minalignslop = 0;
1876 targs.alignment = targs.minlen = targs.prod = targs.isfl = 1; 1876 targs.alignment = targs.minlen = targs.prod = targs.isfl = 1;
1877 targs.type = XFS_ALLOCTYPE_THIS_AG; 1877 targs.type = XFS_ALLOCTYPE_THIS_AG;
1878 targs.pag = pag; 1878 targs.pag = pag;
1879 if ((error = xfs_alloc_read_agfl(mp, tp, targs.agno, &agflbp))) 1879 if ((error = xfs_alloc_read_agfl(mp, tp, targs.agno, &agflbp)))
1880 return error; 1880 return error;
1881 /* 1881 /*
1882 * Make the freelist longer if it's too short. 1882 * Make the freelist longer if it's too short.
1883 */ 1883 */
1884 while (be32_to_cpu(agf->agf_flcount) < need) { 1884 while (be32_to_cpu(agf->agf_flcount) < need) {
1885 targs.agbno = 0; 1885 targs.agbno = 0;
1886 targs.maxlen = need - be32_to_cpu(agf->agf_flcount); 1886 targs.maxlen = need - be32_to_cpu(agf->agf_flcount);
1887 /* 1887 /*
1888 * Allocate as many blocks as possible at once. 1888 * Allocate as many blocks as possible at once.
1889 */ 1889 */
1890 if ((error = xfs_alloc_ag_vextent(&targs))) { 1890 if ((error = xfs_alloc_ag_vextent(&targs))) {
1891 xfs_trans_brelse(tp, agflbp); 1891 xfs_trans_brelse(tp, agflbp);
1892 return error; 1892 return error;
1893 } 1893 }
1894 /* 1894 /*
1895 * Stop if we run out. Won't happen if callers are obeying 1895 * Stop if we run out. Won't happen if callers are obeying
1896 * the restrictions correctly. Can happen for free calls 1896 * the restrictions correctly. Can happen for free calls
1897 * on a completely full ag. 1897 * on a completely full ag.
1898 */ 1898 */
1899 if (targs.agbno == NULLAGBLOCK) { 1899 if (targs.agbno == NULLAGBLOCK) {
1900 if (flags & XFS_ALLOC_FLAG_FREEING) 1900 if (flags & XFS_ALLOC_FLAG_FREEING)
1901 break; 1901 break;
1902 xfs_trans_brelse(tp, agflbp); 1902 xfs_trans_brelse(tp, agflbp);
1903 args->agbp = NULL; 1903 args->agbp = NULL;
1904 return 0; 1904 return 0;
1905 } 1905 }
1906 /* 1906 /*
1907 * Put each allocated block on the list. 1907 * Put each allocated block on the list.
1908 */ 1908 */
1909 for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) { 1909 for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) {
1910 error = xfs_alloc_put_freelist(tp, agbp, 1910 error = xfs_alloc_put_freelist(tp, agbp,
1911 agflbp, bno, 0); 1911 agflbp, bno, 0);
1912 if (error) 1912 if (error)
1913 return error; 1913 return error;
1914 } 1914 }
1915 } 1915 }
1916 xfs_trans_brelse(tp, agflbp); 1916 xfs_trans_brelse(tp, agflbp);
1917 args->agbp = agbp; 1917 args->agbp = agbp;
1918 return 0; 1918 return 0;
1919 } 1919 }
1920 1920
1921 /* 1921 /*
1922 * Get a block from the freelist. 1922 * Get a block from the freelist.
1923 * Returns with the buffer for the block gotten. 1923 * Returns with the buffer for the block gotten.
1924 */ 1924 */
1925 int /* error */ 1925 int /* error */
1926 xfs_alloc_get_freelist( 1926 xfs_alloc_get_freelist(
1927 xfs_trans_t *tp, /* transaction pointer */ 1927 xfs_trans_t *tp, /* transaction pointer */
1928 xfs_buf_t *agbp, /* buffer containing the agf structure */ 1928 xfs_buf_t *agbp, /* buffer containing the agf structure */
1929 xfs_agblock_t *bnop, /* block address retrieved from freelist */ 1929 xfs_agblock_t *bnop, /* block address retrieved from freelist */
1930 int btreeblk) /* destination is a AGF btree */ 1930 int btreeblk) /* destination is a AGF btree */
1931 { 1931 {
1932 xfs_agf_t *agf; /* a.g. freespace structure */ 1932 xfs_agf_t *agf; /* a.g. freespace structure */
1933 xfs_agfl_t *agfl; /* a.g. freelist structure */ 1933 xfs_agfl_t *agfl; /* a.g. freelist structure */
1934 xfs_buf_t *agflbp;/* buffer for a.g. freelist structure */ 1934 xfs_buf_t *agflbp;/* buffer for a.g. freelist structure */
1935 xfs_agblock_t bno; /* block number returned */ 1935 xfs_agblock_t bno; /* block number returned */
1936 int error; 1936 int error;
1937 int logflags; 1937 int logflags;
1938 xfs_mount_t *mp; /* mount structure */ 1938 xfs_mount_t *mp; /* mount structure */
1939 xfs_perag_t *pag; /* per allocation group data */ 1939 xfs_perag_t *pag; /* per allocation group data */
1940 1940
1941 agf = XFS_BUF_TO_AGF(agbp); 1941 agf = XFS_BUF_TO_AGF(agbp);
1942 /* 1942 /*
1943 * Freelist is empty, give up. 1943 * Freelist is empty, give up.
1944 */ 1944 */
1945 if (!agf->agf_flcount) { 1945 if (!agf->agf_flcount) {
1946 *bnop = NULLAGBLOCK; 1946 *bnop = NULLAGBLOCK;
1947 return 0; 1947 return 0;
1948 } 1948 }
1949 /* 1949 /*
1950 * Read the array of free blocks. 1950 * Read the array of free blocks.
1951 */ 1951 */
1952 mp = tp->t_mountp; 1952 mp = tp->t_mountp;
1953 if ((error = xfs_alloc_read_agfl(mp, tp, 1953 if ((error = xfs_alloc_read_agfl(mp, tp,
1954 be32_to_cpu(agf->agf_seqno), &agflbp))) 1954 be32_to_cpu(agf->agf_seqno), &agflbp)))
1955 return error; 1955 return error;
1956 agfl = XFS_BUF_TO_AGFL(agflbp); 1956 agfl = XFS_BUF_TO_AGFL(agflbp);
1957 /* 1957 /*
1958 * Get the block number and update the data structures. 1958 * Get the block number and update the data structures.
1959 */ 1959 */
1960 bno = be32_to_cpu(agfl->agfl_bno[be32_to_cpu(agf->agf_flfirst)]); 1960 bno = be32_to_cpu(agfl->agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
1961 be32_add_cpu(&agf->agf_flfirst, 1); 1961 be32_add_cpu(&agf->agf_flfirst, 1);
1962 xfs_trans_brelse(tp, agflbp); 1962 xfs_trans_brelse(tp, agflbp);
1963 if (be32_to_cpu(agf->agf_flfirst) == XFS_AGFL_SIZE(mp)) 1963 if (be32_to_cpu(agf->agf_flfirst) == XFS_AGFL_SIZE(mp))
1964 agf->agf_flfirst = 0; 1964 agf->agf_flfirst = 0;
1965 1965
1966 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno)); 1966 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
1967 be32_add_cpu(&agf->agf_flcount, -1); 1967 be32_add_cpu(&agf->agf_flcount, -1);
1968 xfs_trans_agflist_delta(tp, -1); 1968 xfs_trans_agflist_delta(tp, -1);
1969 pag->pagf_flcount--; 1969 pag->pagf_flcount--;
1970 xfs_perag_put(pag); 1970 xfs_perag_put(pag);
1971 1971
1972 logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT; 1972 logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT;
1973 if (btreeblk) { 1973 if (btreeblk) {
1974 be32_add_cpu(&agf->agf_btreeblks, 1); 1974 be32_add_cpu(&agf->agf_btreeblks, 1);
1975 pag->pagf_btreeblks++; 1975 pag->pagf_btreeblks++;
1976 logflags |= XFS_AGF_BTREEBLKS; 1976 logflags |= XFS_AGF_BTREEBLKS;
1977 } 1977 }
1978 1978
1979 xfs_alloc_log_agf(tp, agbp, logflags); 1979 xfs_alloc_log_agf(tp, agbp, logflags);
1980 *bnop = bno; 1980 *bnop = bno;
1981 1981
1982 return 0; 1982 return 0;
1983 } 1983 }
1984 1984
1985 /* 1985 /*
1986 * Log the given fields from the agf structure. 1986 * Log the given fields from the agf structure.
1987 */ 1987 */
1988 void 1988 void
1989 xfs_alloc_log_agf( 1989 xfs_alloc_log_agf(
1990 xfs_trans_t *tp, /* transaction pointer */ 1990 xfs_trans_t *tp, /* transaction pointer */
1991 xfs_buf_t *bp, /* buffer for a.g. freelist header */ 1991 xfs_buf_t *bp, /* buffer for a.g. freelist header */
1992 int fields) /* mask of fields to be logged (XFS_AGF_...) */ 1992 int fields) /* mask of fields to be logged (XFS_AGF_...) */
1993 { 1993 {
1994 int first; /* first byte offset */ 1994 int first; /* first byte offset */
1995 int last; /* last byte offset */ 1995 int last; /* last byte offset */
1996 static const short offsets[] = { 1996 static const short offsets[] = {
1997 offsetof(xfs_agf_t, agf_magicnum), 1997 offsetof(xfs_agf_t, agf_magicnum),
1998 offsetof(xfs_agf_t, agf_versionnum), 1998 offsetof(xfs_agf_t, agf_versionnum),
1999 offsetof(xfs_agf_t, agf_seqno), 1999 offsetof(xfs_agf_t, agf_seqno),
2000 offsetof(xfs_agf_t, agf_length), 2000 offsetof(xfs_agf_t, agf_length),
2001 offsetof(xfs_agf_t, agf_roots[0]), 2001 offsetof(xfs_agf_t, agf_roots[0]),
2002 offsetof(xfs_agf_t, agf_levels[0]), 2002 offsetof(xfs_agf_t, agf_levels[0]),
2003 offsetof(xfs_agf_t, agf_flfirst), 2003 offsetof(xfs_agf_t, agf_flfirst),
2004 offsetof(xfs_agf_t, agf_fllast), 2004 offsetof(xfs_agf_t, agf_fllast),
2005 offsetof(xfs_agf_t, agf_flcount), 2005 offsetof(xfs_agf_t, agf_flcount),
2006 offsetof(xfs_agf_t, agf_freeblks), 2006 offsetof(xfs_agf_t, agf_freeblks),
2007 offsetof(xfs_agf_t, agf_longest), 2007 offsetof(xfs_agf_t, agf_longest),
2008 offsetof(xfs_agf_t, agf_btreeblks), 2008 offsetof(xfs_agf_t, agf_btreeblks),
2009 sizeof(xfs_agf_t) 2009 sizeof(xfs_agf_t)
2010 }; 2010 };
2011 2011
2012 trace_xfs_agf(tp->t_mountp, XFS_BUF_TO_AGF(bp), fields, _RET_IP_); 2012 trace_xfs_agf(tp->t_mountp, XFS_BUF_TO_AGF(bp), fields, _RET_IP_);
2013 2013
2014 xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last); 2014 xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
2015 xfs_trans_log_buf(tp, bp, (uint)first, (uint)last); 2015 xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
2016 } 2016 }
2017 2017
2018 /* 2018 /*
2019 * Interface for inode allocation to force the pag data to be initialized. 2019 * Interface for inode allocation to force the pag data to be initialized.
2020 */ 2020 */
2021 int /* error */ 2021 int /* error */
2022 xfs_alloc_pagf_init( 2022 xfs_alloc_pagf_init(
2023 xfs_mount_t *mp, /* file system mount structure */ 2023 xfs_mount_t *mp, /* file system mount structure */
2024 xfs_trans_t *tp, /* transaction pointer */ 2024 xfs_trans_t *tp, /* transaction pointer */
2025 xfs_agnumber_t agno, /* allocation group number */ 2025 xfs_agnumber_t agno, /* allocation group number */
2026 int flags) /* XFS_ALLOC_FLAGS_... */ 2026 int flags) /* XFS_ALLOC_FLAGS_... */
2027 { 2027 {
2028 xfs_buf_t *bp; 2028 xfs_buf_t *bp;
2029 int error; 2029 int error;
2030 2030
2031 if ((error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp))) 2031 if ((error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp)))
2032 return error; 2032 return error;
2033 if (bp) 2033 if (bp)
2034 xfs_trans_brelse(tp, bp); 2034 xfs_trans_brelse(tp, bp);
2035 return 0; 2035 return 0;
2036 } 2036 }
2037 2037
2038 /* 2038 /*
2039 * Put the block on the freelist for the allocation group. 2039 * Put the block on the freelist for the allocation group.
2040 */ 2040 */
2041 int /* error */ 2041 int /* error */
2042 xfs_alloc_put_freelist( 2042 xfs_alloc_put_freelist(
2043 xfs_trans_t *tp, /* transaction pointer */ 2043 xfs_trans_t *tp, /* transaction pointer */
2044 xfs_buf_t *agbp, /* buffer for a.g. freelist header */ 2044 xfs_buf_t *agbp, /* buffer for a.g. freelist header */
2045 xfs_buf_t *agflbp,/* buffer for a.g. free block array */ 2045 xfs_buf_t *agflbp,/* buffer for a.g. free block array */
2046 xfs_agblock_t bno, /* block being freed */ 2046 xfs_agblock_t bno, /* block being freed */
2047 int btreeblk) /* block came from a AGF btree */ 2047 int btreeblk) /* block came from a AGF btree */
2048 { 2048 {
2049 xfs_agf_t *agf; /* a.g. freespace structure */ 2049 xfs_agf_t *agf; /* a.g. freespace structure */
2050 xfs_agfl_t *agfl; /* a.g. free block array */ 2050 xfs_agfl_t *agfl; /* a.g. free block array */
2051 __be32 *blockp;/* pointer to array entry */ 2051 __be32 *blockp;/* pointer to array entry */
2052 int error; 2052 int error;
2053 int logflags; 2053 int logflags;
2054 xfs_mount_t *mp; /* mount structure */ 2054 xfs_mount_t *mp; /* mount structure */
2055 xfs_perag_t *pag; /* per allocation group data */ 2055 xfs_perag_t *pag; /* per allocation group data */
2056 2056
2057 agf = XFS_BUF_TO_AGF(agbp); 2057 agf = XFS_BUF_TO_AGF(agbp);
2058 mp = tp->t_mountp; 2058 mp = tp->t_mountp;
2059 2059
2060 if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp, 2060 if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp,
2061 be32_to_cpu(agf->agf_seqno), &agflbp))) 2061 be32_to_cpu(agf->agf_seqno), &agflbp)))
2062 return error; 2062 return error;
2063 agfl = XFS_BUF_TO_AGFL(agflbp); 2063 agfl = XFS_BUF_TO_AGFL(agflbp);
2064 be32_add_cpu(&agf->agf_fllast, 1); 2064 be32_add_cpu(&agf->agf_fllast, 1);
2065 if (be32_to_cpu(agf->agf_fllast) == XFS_AGFL_SIZE(mp)) 2065 if (be32_to_cpu(agf->agf_fllast) == XFS_AGFL_SIZE(mp))
2066 agf->agf_fllast = 0; 2066 agf->agf_fllast = 0;
2067 2067
2068 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno)); 2068 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
2069 be32_add_cpu(&agf->agf_flcount, 1); 2069 be32_add_cpu(&agf->agf_flcount, 1);
2070 xfs_trans_agflist_delta(tp, 1); 2070 xfs_trans_agflist_delta(tp, 1);
2071 pag->pagf_flcount++; 2071 pag->pagf_flcount++;
2072 2072
2073 logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT; 2073 logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT;
2074 if (btreeblk) { 2074 if (btreeblk) {
2075 be32_add_cpu(&agf->agf_btreeblks, -1); 2075 be32_add_cpu(&agf->agf_btreeblks, -1);
2076 pag->pagf_btreeblks--; 2076 pag->pagf_btreeblks--;
2077 logflags |= XFS_AGF_BTREEBLKS; 2077 logflags |= XFS_AGF_BTREEBLKS;
2078 } 2078 }
2079 xfs_perag_put(pag); 2079 xfs_perag_put(pag);
2080 2080
2081 xfs_alloc_log_agf(tp, agbp, logflags); 2081 xfs_alloc_log_agf(tp, agbp, logflags);
2082 2082
2083 ASSERT(be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp)); 2083 ASSERT(be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp));
2084 blockp = &agfl->agfl_bno[be32_to_cpu(agf->agf_fllast)]; 2084 blockp = &agfl->agfl_bno[be32_to_cpu(agf->agf_fllast)];
2085 *blockp = cpu_to_be32(bno); 2085 *blockp = cpu_to_be32(bno);
2086 xfs_alloc_log_agf(tp, agbp, logflags); 2086 xfs_alloc_log_agf(tp, agbp, logflags);
2087 xfs_trans_log_buf(tp, agflbp, 2087 xfs_trans_log_buf(tp, agflbp,
2088 (int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl), 2088 (int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl),
2089 (int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl + 2089 (int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl +
2090 sizeof(xfs_agblock_t) - 1)); 2090 sizeof(xfs_agblock_t) - 1));
2091 return 0; 2091 return 0;
2092 } 2092 }
2093 2093
2094 /* 2094 /*
2095 * Read in the allocation group header (free/alloc section). 2095 * Read in the allocation group header (free/alloc section).
2096 */ 2096 */
2097 int /* error */ 2097 int /* error */
2098 xfs_read_agf( 2098 xfs_read_agf(
2099 struct xfs_mount *mp, /* mount point structure */ 2099 struct xfs_mount *mp, /* mount point structure */
2100 struct xfs_trans *tp, /* transaction pointer */ 2100 struct xfs_trans *tp, /* transaction pointer */
2101 xfs_agnumber_t agno, /* allocation group number */ 2101 xfs_agnumber_t agno, /* allocation group number */
2102 int flags, /* XFS_BUF_ */ 2102 int flags, /* XFS_BUF_ */
2103 struct xfs_buf **bpp) /* buffer for the ag freelist header */ 2103 struct xfs_buf **bpp) /* buffer for the ag freelist header */
2104 { 2104 {
2105 struct xfs_agf *agf; /* ag freelist header */ 2105 struct xfs_agf *agf; /* ag freelist header */
2106 int agf_ok; /* set if agf is consistent */ 2106 int agf_ok; /* set if agf is consistent */
2107 int error; 2107 int error;
2108 2108
2109 ASSERT(agno != NULLAGNUMBER); 2109 ASSERT(agno != NULLAGNUMBER);
2110 error = xfs_trans_read_buf( 2110 error = xfs_trans_read_buf(
2111 mp, tp, mp->m_ddev_targp, 2111 mp, tp, mp->m_ddev_targp,
2112 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)), 2112 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
2113 XFS_FSS_TO_BB(mp, 1), flags, bpp); 2113 XFS_FSS_TO_BB(mp, 1), flags, bpp);
2114 if (error) 2114 if (error)
2115 return error; 2115 return error;
2116 if (!*bpp) 2116 if (!*bpp)
2117 return 0; 2117 return 0;
2118 2118
2119 ASSERT(!(*bpp)->b_error); 2119 ASSERT(!(*bpp)->b_error);
2120 agf = XFS_BUF_TO_AGF(*bpp); 2120 agf = XFS_BUF_TO_AGF(*bpp);
2121 2121
2122 /* 2122 /*
2123 * Validate the magic number of the agf block. 2123 * Validate the magic number of the agf block.
2124 */ 2124 */
2125 agf_ok = 2125 agf_ok =
2126 agf->agf_magicnum == cpu_to_be32(XFS_AGF_MAGIC) && 2126 agf->agf_magicnum == cpu_to_be32(XFS_AGF_MAGIC) &&
2127 XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) && 2127 XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
2128 be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) && 2128 be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
2129 be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) && 2129 be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) &&
2130 be32_to_cpu(agf->agf_fllast) < XFS_AGFL_SIZE(mp) && 2130 be32_to_cpu(agf->agf_fllast) < XFS_AGFL_SIZE(mp) &&
2131 be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp) && 2131 be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp) &&
2132 be32_to_cpu(agf->agf_seqno) == agno; 2132 be32_to_cpu(agf->agf_seqno) == agno;
2133 if (xfs_sb_version_haslazysbcount(&mp->m_sb)) 2133 if (xfs_sb_version_haslazysbcount(&mp->m_sb))
2134 agf_ok = agf_ok && be32_to_cpu(agf->agf_btreeblks) <= 2134 agf_ok = agf_ok && be32_to_cpu(agf->agf_btreeblks) <=
2135 be32_to_cpu(agf->agf_length); 2135 be32_to_cpu(agf->agf_length);
2136 if (unlikely(XFS_TEST_ERROR(!agf_ok, mp, XFS_ERRTAG_ALLOC_READ_AGF, 2136 if (unlikely(XFS_TEST_ERROR(!agf_ok, mp, XFS_ERRTAG_ALLOC_READ_AGF,
2137 XFS_RANDOM_ALLOC_READ_AGF))) { 2137 XFS_RANDOM_ALLOC_READ_AGF))) {
2138 XFS_CORRUPTION_ERROR("xfs_alloc_read_agf", 2138 XFS_CORRUPTION_ERROR("xfs_alloc_read_agf",
2139 XFS_ERRLEVEL_LOW, mp, agf); 2139 XFS_ERRLEVEL_LOW, mp, agf);
2140 xfs_trans_brelse(tp, *bpp); 2140 xfs_trans_brelse(tp, *bpp);
2141 return XFS_ERROR(EFSCORRUPTED); 2141 return XFS_ERROR(EFSCORRUPTED);
2142 } 2142 }
2143 xfs_buf_set_ref(*bpp, XFS_AGF_REF); 2143 xfs_buf_set_ref(*bpp, XFS_AGF_REF);
2144 return 0; 2144 return 0;
2145 } 2145 }
2146 2146
2147 /* 2147 /*
2148 * Read in the allocation group header (free/alloc section). 2148 * Read in the allocation group header (free/alloc section).
2149 */ 2149 */
2150 int /* error */ 2150 int /* error */
2151 xfs_alloc_read_agf( 2151 xfs_alloc_read_agf(
2152 struct xfs_mount *mp, /* mount point structure */ 2152 struct xfs_mount *mp, /* mount point structure */
2153 struct xfs_trans *tp, /* transaction pointer */ 2153 struct xfs_trans *tp, /* transaction pointer */
2154 xfs_agnumber_t agno, /* allocation group number */ 2154 xfs_agnumber_t agno, /* allocation group number */
2155 int flags, /* XFS_ALLOC_FLAG_... */ 2155 int flags, /* XFS_ALLOC_FLAG_... */
2156 struct xfs_buf **bpp) /* buffer for the ag freelist header */ 2156 struct xfs_buf **bpp) /* buffer for the ag freelist header */
2157 { 2157 {
2158 struct xfs_agf *agf; /* ag freelist header */ 2158 struct xfs_agf *agf; /* ag freelist header */
2159 struct xfs_perag *pag; /* per allocation group data */ 2159 struct xfs_perag *pag; /* per allocation group data */
2160 int error; 2160 int error;
2161 2161
2162 ASSERT(agno != NULLAGNUMBER); 2162 ASSERT(agno != NULLAGNUMBER);
2163 2163
2164 error = xfs_read_agf(mp, tp, agno, 2164 error = xfs_read_agf(mp, tp, agno,
2165 (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0, 2165 (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
2166 bpp); 2166 bpp);
2167 if (error) 2167 if (error)
2168 return error; 2168 return error;
2169 if (!*bpp) 2169 if (!*bpp)
2170 return 0; 2170 return 0;
2171 ASSERT(!(*bpp)->b_error); 2171 ASSERT(!(*bpp)->b_error);
2172 2172
2173 agf = XFS_BUF_TO_AGF(*bpp); 2173 agf = XFS_BUF_TO_AGF(*bpp);
2174 pag = xfs_perag_get(mp, agno); 2174 pag = xfs_perag_get(mp, agno);
2175 if (!pag->pagf_init) { 2175 if (!pag->pagf_init) {
2176 pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks); 2176 pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
2177 pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks); 2177 pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
2178 pag->pagf_flcount = be32_to_cpu(agf->agf_flcount); 2178 pag->pagf_flcount = be32_to_cpu(agf->agf_flcount);
2179 pag->pagf_longest = be32_to_cpu(agf->agf_longest); 2179 pag->pagf_longest = be32_to_cpu(agf->agf_longest);
2180 pag->pagf_levels[XFS_BTNUM_BNOi] = 2180 pag->pagf_levels[XFS_BTNUM_BNOi] =
2181 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]); 2181 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]);
2182 pag->pagf_levels[XFS_BTNUM_CNTi] = 2182 pag->pagf_levels[XFS_BTNUM_CNTi] =
2183 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]); 2183 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
2184 spin_lock_init(&pag->pagb_lock); 2184 spin_lock_init(&pag->pagb_lock);
2185 pag->pagb_count = 0; 2185 pag->pagb_count = 0;
2186 pag->pagb_tree = RB_ROOT; 2186 pag->pagb_tree = RB_ROOT;
2187 pag->pagf_init = 1; 2187 pag->pagf_init = 1;
2188 } 2188 }
2189 #ifdef DEBUG 2189 #ifdef DEBUG
2190 else if (!XFS_FORCED_SHUTDOWN(mp)) { 2190 else if (!XFS_FORCED_SHUTDOWN(mp)) {
2191 ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks)); 2191 ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks));
2192 ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks)); 2192 ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks));
2193 ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount)); 2193 ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount));
2194 ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest)); 2194 ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest));
2195 ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] == 2195 ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] ==
2196 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi])); 2196 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]));
2197 ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] == 2197 ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] ==
2198 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi])); 2198 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]));
2199 } 2199 }
2200 #endif 2200 #endif
2201 xfs_perag_put(pag); 2201 xfs_perag_put(pag);
2202 return 0; 2202 return 0;
2203 } 2203 }
2204 2204
2205 /* 2205 /*
2206 * Allocate an extent (variable-size). 2206 * Allocate an extent (variable-size).
2207 * Depending on the allocation type, we either look in a single allocation 2207 * Depending on the allocation type, we either look in a single allocation
2208 * group or loop over the allocation groups to find the result. 2208 * group or loop over the allocation groups to find the result.
2209 */ 2209 */
2210 int /* error */ 2210 int /* error */
2211 __xfs_alloc_vextent( 2211 __xfs_alloc_vextent(
2212 xfs_alloc_arg_t *args) /* allocation argument structure */ 2212 xfs_alloc_arg_t *args) /* allocation argument structure */
2213 { 2213 {
2214 xfs_agblock_t agsize; /* allocation group size */ 2214 xfs_agblock_t agsize; /* allocation group size */
2215 int error; 2215 int error;
2216 int flags; /* XFS_ALLOC_FLAG_... locking flags */ 2216 int flags; /* XFS_ALLOC_FLAG_... locking flags */
2217 xfs_extlen_t minleft;/* minimum left value, temp copy */ 2217 xfs_extlen_t minleft;/* minimum left value, temp copy */
2218 xfs_mount_t *mp; /* mount structure pointer */ 2218 xfs_mount_t *mp; /* mount structure pointer */
2219 xfs_agnumber_t sagno; /* starting allocation group number */ 2219 xfs_agnumber_t sagno; /* starting allocation group number */
2220 xfs_alloctype_t type; /* input allocation type */ 2220 xfs_alloctype_t type; /* input allocation type */
2221 int bump_rotor = 0; 2221 int bump_rotor = 0;
2222 int no_min = 0; 2222 int no_min = 0;
2223 xfs_agnumber_t rotorstep = xfs_rotorstep; /* inode32 agf stepper */ 2223 xfs_agnumber_t rotorstep = xfs_rotorstep; /* inode32 agf stepper */
2224 2224
2225 mp = args->mp; 2225 mp = args->mp;
2226 type = args->otype = args->type; 2226 type = args->otype = args->type;
2227 args->agbno = NULLAGBLOCK; 2227 args->agbno = NULLAGBLOCK;
2228 /* 2228 /*
2229 * Just fix this up, for the case where the last a.g. is shorter 2229 * Just fix this up, for the case where the last a.g. is shorter
2230 * (or there's only one a.g.) and the caller couldn't easily figure 2230 * (or there's only one a.g.) and the caller couldn't easily figure
2231 * that out (xfs_bmap_alloc). 2231 * that out (xfs_bmap_alloc).
2232 */ 2232 */
2233 agsize = mp->m_sb.sb_agblocks; 2233 agsize = mp->m_sb.sb_agblocks;
2234 if (args->maxlen > agsize) 2234 if (args->maxlen > agsize)
2235 args->maxlen = agsize; 2235 args->maxlen = agsize;
2236 if (args->alignment == 0) 2236 if (args->alignment == 0)
2237 args->alignment = 1; 2237 args->alignment = 1;
2238 ASSERT(XFS_FSB_TO_AGNO(mp, args->fsbno) < mp->m_sb.sb_agcount); 2238 ASSERT(XFS_FSB_TO_AGNO(mp, args->fsbno) < mp->m_sb.sb_agcount);
2239 ASSERT(XFS_FSB_TO_AGBNO(mp, args->fsbno) < agsize); 2239 ASSERT(XFS_FSB_TO_AGBNO(mp, args->fsbno) < agsize);
2240 ASSERT(args->minlen <= args->maxlen); 2240 ASSERT(args->minlen <= args->maxlen);
2241 ASSERT(args->minlen <= agsize); 2241 ASSERT(args->minlen <= agsize);
2242 ASSERT(args->mod < args->prod); 2242 ASSERT(args->mod < args->prod);
2243 if (XFS_FSB_TO_AGNO(mp, args->fsbno) >= mp->m_sb.sb_agcount || 2243 if (XFS_FSB_TO_AGNO(mp, args->fsbno) >= mp->m_sb.sb_agcount ||
2244 XFS_FSB_TO_AGBNO(mp, args->fsbno) >= agsize || 2244 XFS_FSB_TO_AGBNO(mp, args->fsbno) >= agsize ||
2245 args->minlen > args->maxlen || args->minlen > agsize || 2245 args->minlen > args->maxlen || args->minlen > agsize ||
2246 args->mod >= args->prod) { 2246 args->mod >= args->prod) {
2247 args->fsbno = NULLFSBLOCK; 2247 args->fsbno = NULLFSBLOCK;
2248 trace_xfs_alloc_vextent_badargs(args); 2248 trace_xfs_alloc_vextent_badargs(args);
2249 return 0; 2249 return 0;
2250 } 2250 }
2251 minleft = args->minleft; 2251 minleft = args->minleft;
2252 2252
2253 switch (type) { 2253 switch (type) {
2254 case XFS_ALLOCTYPE_THIS_AG: 2254 case XFS_ALLOCTYPE_THIS_AG:
2255 case XFS_ALLOCTYPE_NEAR_BNO: 2255 case XFS_ALLOCTYPE_NEAR_BNO:
2256 case XFS_ALLOCTYPE_THIS_BNO: 2256 case XFS_ALLOCTYPE_THIS_BNO:
2257 /* 2257 /*
2258 * These three force us into a single a.g. 2258 * These three force us into a single a.g.
2259 */ 2259 */
2260 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno); 2260 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2261 args->pag = xfs_perag_get(mp, args->agno); 2261 args->pag = xfs_perag_get(mp, args->agno);
2262 args->minleft = 0; 2262 args->minleft = 0;
2263 error = xfs_alloc_fix_freelist(args, 0); 2263 error = xfs_alloc_fix_freelist(args, 0);
2264 args->minleft = minleft; 2264 args->minleft = minleft;
2265 if (error) { 2265 if (error) {
2266 trace_xfs_alloc_vextent_nofix(args); 2266 trace_xfs_alloc_vextent_nofix(args);
2267 goto error0; 2267 goto error0;
2268 } 2268 }
2269 if (!args->agbp) { 2269 if (!args->agbp) {
2270 trace_xfs_alloc_vextent_noagbp(args); 2270 trace_xfs_alloc_vextent_noagbp(args);
2271 break; 2271 break;
2272 } 2272 }
2273 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno); 2273 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2274 if ((error = xfs_alloc_ag_vextent(args))) 2274 if ((error = xfs_alloc_ag_vextent(args)))
2275 goto error0; 2275 goto error0;
2276 break; 2276 break;
2277 case XFS_ALLOCTYPE_START_BNO: 2277 case XFS_ALLOCTYPE_START_BNO:
2278 /* 2278 /*
2279 * Try near allocation first, then anywhere-in-ag after 2279 * Try near allocation first, then anywhere-in-ag after
2280 * the first a.g. fails. 2280 * the first a.g. fails.
2281 */ 2281 */
2282 if ((args->userdata == XFS_ALLOC_INITIAL_USER_DATA) && 2282 if ((args->userdata == XFS_ALLOC_INITIAL_USER_DATA) &&
2283 (mp->m_flags & XFS_MOUNT_32BITINODES)) { 2283 (mp->m_flags & XFS_MOUNT_32BITINODES)) {
2284 args->fsbno = XFS_AGB_TO_FSB(mp, 2284 args->fsbno = XFS_AGB_TO_FSB(mp,
2285 ((mp->m_agfrotor / rotorstep) % 2285 ((mp->m_agfrotor / rotorstep) %
2286 mp->m_sb.sb_agcount), 0); 2286 mp->m_sb.sb_agcount), 0);
2287 bump_rotor = 1; 2287 bump_rotor = 1;
2288 } 2288 }
2289 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno); 2289 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2290 args->type = XFS_ALLOCTYPE_NEAR_BNO; 2290 args->type = XFS_ALLOCTYPE_NEAR_BNO;
2291 /* FALLTHROUGH */ 2291 /* FALLTHROUGH */
2292 case XFS_ALLOCTYPE_ANY_AG: 2292 case XFS_ALLOCTYPE_ANY_AG:
2293 case XFS_ALLOCTYPE_START_AG: 2293 case XFS_ALLOCTYPE_START_AG:
2294 case XFS_ALLOCTYPE_FIRST_AG: 2294 case XFS_ALLOCTYPE_FIRST_AG:
2295 /* 2295 /*
2296 * Rotate through the allocation groups looking for a winner. 2296 * Rotate through the allocation groups looking for a winner.
2297 */ 2297 */
2298 if (type == XFS_ALLOCTYPE_ANY_AG) { 2298 if (type == XFS_ALLOCTYPE_ANY_AG) {
2299 /* 2299 /*
2300 * Start with the last place we left off. 2300 * Start with the last place we left off.
2301 */ 2301 */
2302 args->agno = sagno = (mp->m_agfrotor / rotorstep) % 2302 args->agno = sagno = (mp->m_agfrotor / rotorstep) %
2303 mp->m_sb.sb_agcount; 2303 mp->m_sb.sb_agcount;
2304 args->type = XFS_ALLOCTYPE_THIS_AG; 2304 args->type = XFS_ALLOCTYPE_THIS_AG;
2305 flags = XFS_ALLOC_FLAG_TRYLOCK; 2305 flags = XFS_ALLOC_FLAG_TRYLOCK;
2306 } else if (type == XFS_ALLOCTYPE_FIRST_AG) { 2306 } else if (type == XFS_ALLOCTYPE_FIRST_AG) {
2307 /* 2307 /*
2308 * Start with allocation group given by bno. 2308 * Start with allocation group given by bno.
2309 */ 2309 */
2310 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno); 2310 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2311 args->type = XFS_ALLOCTYPE_THIS_AG; 2311 args->type = XFS_ALLOCTYPE_THIS_AG;
2312 sagno = 0; 2312 sagno = 0;
2313 flags = 0; 2313 flags = 0;
2314 } else { 2314 } else {
2315 if (type == XFS_ALLOCTYPE_START_AG) 2315 if (type == XFS_ALLOCTYPE_START_AG)
2316 args->type = XFS_ALLOCTYPE_THIS_AG; 2316 args->type = XFS_ALLOCTYPE_THIS_AG;
2317 /* 2317 /*
2318 * Start with the given allocation group. 2318 * Start with the given allocation group.
2319 */ 2319 */
2320 args->agno = sagno = XFS_FSB_TO_AGNO(mp, args->fsbno); 2320 args->agno = sagno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2321 flags = XFS_ALLOC_FLAG_TRYLOCK; 2321 flags = XFS_ALLOC_FLAG_TRYLOCK;
2322 } 2322 }
2323 /* 2323 /*
2324 * Loop over allocation groups twice; first time with 2324 * Loop over allocation groups twice; first time with
2325 * trylock set, second time without. 2325 * trylock set, second time without.
2326 */ 2326 */
2327 for (;;) { 2327 for (;;) {
2328 args->pag = xfs_perag_get(mp, args->agno); 2328 args->pag = xfs_perag_get(mp, args->agno);
2329 if (no_min) args->minleft = 0; 2329 if (no_min) args->minleft = 0;
2330 error = xfs_alloc_fix_freelist(args, flags); 2330 error = xfs_alloc_fix_freelist(args, flags);
2331 args->minleft = minleft; 2331 args->minleft = minleft;
2332 if (error) { 2332 if (error) {
2333 trace_xfs_alloc_vextent_nofix(args); 2333 trace_xfs_alloc_vextent_nofix(args);
2334 goto error0; 2334 goto error0;
2335 } 2335 }
2336 /* 2336 /*
2337 * If we get a buffer back then the allocation will fly. 2337 * If we get a buffer back then the allocation will fly.
2338 */ 2338 */
2339 if (args->agbp) { 2339 if (args->agbp) {
2340 if ((error = xfs_alloc_ag_vextent(args))) 2340 if ((error = xfs_alloc_ag_vextent(args)))
2341 goto error0; 2341 goto error0;
2342 break; 2342 break;
2343 } 2343 }
2344 2344
2345 trace_xfs_alloc_vextent_loopfailed(args); 2345 trace_xfs_alloc_vextent_loopfailed(args);
2346 2346
2347 /* 2347 /*
2348 * Didn't work, figure out the next iteration. 2348 * Didn't work, figure out the next iteration.
2349 */ 2349 */
2350 if (args->agno == sagno && 2350 if (args->agno == sagno &&
2351 type == XFS_ALLOCTYPE_START_BNO) 2351 type == XFS_ALLOCTYPE_START_BNO)
2352 args->type = XFS_ALLOCTYPE_THIS_AG; 2352 args->type = XFS_ALLOCTYPE_THIS_AG;
2353 /* 2353 /*
2354 * For the first allocation, we can try any AG to get 2354 * For the first allocation, we can try any AG to get
2355 * space. However, if we already have allocated a 2355 * space. However, if we already have allocated a
2356 * block, we don't want to try AGs whose number is below 2356 * block, we don't want to try AGs whose number is below
2357 * sagno. Otherwise, we may end up with out-of-order 2357 * sagno. Otherwise, we may end up with out-of-order
2358 * locking of AGF, which might cause deadlock. 2358 * locking of AGF, which might cause deadlock.
2359 */ 2359 */
2360 if (++(args->agno) == mp->m_sb.sb_agcount) { 2360 if (++(args->agno) == mp->m_sb.sb_agcount) {
2361 if (args->firstblock != NULLFSBLOCK) 2361 if (args->firstblock != NULLFSBLOCK)
2362 args->agno = sagno; 2362 args->agno = sagno;
2363 else 2363 else
2364 args->agno = 0; 2364 args->agno = 0;
2365 } 2365 }
2366 /* 2366 /*
2367 * Reached the starting a.g., must either be done 2367 * Reached the starting a.g., must either be done
2368 * or switch to non-trylock mode. 2368 * or switch to non-trylock mode.
2369 */ 2369 */
2370 if (args->agno == sagno) { 2370 if (args->agno == sagno) {
2371 if (no_min == 1) { 2371 if (no_min == 1) {
2372 args->agbno = NULLAGBLOCK; 2372 args->agbno = NULLAGBLOCK;
2373 trace_xfs_alloc_vextent_allfailed(args); 2373 trace_xfs_alloc_vextent_allfailed(args);
2374 break; 2374 break;
2375 } 2375 }
2376 if (flags == 0) { 2376 if (flags == 0) {
2377 no_min = 1; 2377 no_min = 1;
2378 } else { 2378 } else {
2379 flags = 0; 2379 flags = 0;
2380 if (type == XFS_ALLOCTYPE_START_BNO) { 2380 if (type == XFS_ALLOCTYPE_START_BNO) {
2381 args->agbno = XFS_FSB_TO_AGBNO(mp, 2381 args->agbno = XFS_FSB_TO_AGBNO(mp,
2382 args->fsbno); 2382 args->fsbno);
2383 args->type = XFS_ALLOCTYPE_NEAR_BNO; 2383 args->type = XFS_ALLOCTYPE_NEAR_BNO;
2384 } 2384 }
2385 } 2385 }
2386 } 2386 }
2387 xfs_perag_put(args->pag); 2387 xfs_perag_put(args->pag);
2388 } 2388 }
2389 if (bump_rotor || (type == XFS_ALLOCTYPE_ANY_AG)) { 2389 if (bump_rotor || (type == XFS_ALLOCTYPE_ANY_AG)) {
2390 if (args->agno == sagno) 2390 if (args->agno == sagno)
2391 mp->m_agfrotor = (mp->m_agfrotor + 1) % 2391 mp->m_agfrotor = (mp->m_agfrotor + 1) %
2392 (mp->m_sb.sb_agcount * rotorstep); 2392 (mp->m_sb.sb_agcount * rotorstep);
2393 else 2393 else
2394 mp->m_agfrotor = (args->agno * rotorstep + 1) % 2394 mp->m_agfrotor = (args->agno * rotorstep + 1) %
2395 (mp->m_sb.sb_agcount * rotorstep); 2395 (mp->m_sb.sb_agcount * rotorstep);
2396 } 2396 }
2397 break; 2397 break;
2398 default: 2398 default:
2399 ASSERT(0); 2399 ASSERT(0);
2400 /* NOTREACHED */ 2400 /* NOTREACHED */
2401 } 2401 }
2402 if (args->agbno == NULLAGBLOCK) 2402 if (args->agbno == NULLAGBLOCK)
2403 args->fsbno = NULLFSBLOCK; 2403 args->fsbno = NULLFSBLOCK;
2404 else { 2404 else {
2405 args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno); 2405 args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno);
2406 #ifdef DEBUG 2406 #ifdef DEBUG
2407 ASSERT(args->len >= args->minlen); 2407 ASSERT(args->len >= args->minlen);
2408 ASSERT(args->len <= args->maxlen); 2408 ASSERT(args->len <= args->maxlen);
2409 ASSERT(args->agbno % args->alignment == 0); 2409 ASSERT(args->agbno % args->alignment == 0);
2410 XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno), 2410 XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno),
2411 args->len); 2411 args->len);
2412 #endif 2412 #endif
2413 } 2413 }
2414 xfs_perag_put(args->pag); 2414 xfs_perag_put(args->pag);
2415 return 0; 2415 return 0;
2416 error0: 2416 error0:
2417 xfs_perag_put(args->pag); 2417 xfs_perag_put(args->pag);
2418 return error; 2418 return error;
2419 } 2419 }
2420 2420
2421 static void 2421 static void
2422 xfs_alloc_vextent_worker( 2422 xfs_alloc_vextent_worker(
2423 struct work_struct *work) 2423 struct work_struct *work)
2424 { 2424 {
2425 struct xfs_alloc_arg *args = container_of(work, 2425 struct xfs_alloc_arg *args = container_of(work,
2426 struct xfs_alloc_arg, work); 2426 struct xfs_alloc_arg, work);
2427 unsigned long pflags; 2427 unsigned long pflags;
2428 2428
2429 /* we are in a transaction context here */ 2429 /* we are in a transaction context here */
2430 current_set_flags_nested(&pflags, PF_FSTRANS); 2430 current_set_flags_nested(&pflags, PF_FSTRANS);
2431 2431
2432 args->result = __xfs_alloc_vextent(args); 2432 args->result = __xfs_alloc_vextent(args);
2433 complete(args->done); 2433 complete(args->done);
2434 2434
2435 current_restore_flags_nested(&pflags, PF_FSTRANS); 2435 current_restore_flags_nested(&pflags, PF_FSTRANS);
2436 } 2436 }
2437 2437
2438 /* 2438 /*
2439 * Data allocation requests often come in with little stack to work on. Push 2439 * Data allocation requests often come in with little stack to work on. Push
2440 * them off to a worker thread so there is lots of stack to use. Metadata 2440 * them off to a worker thread so there is lots of stack to use. Metadata
2441 * requests, OTOH, are generally from low stack usage paths, so avoid the 2441 * requests, OTOH, are generally from low stack usage paths, so avoid the
2442 * context switch overhead here. 2442 * context switch overhead here.
2443 */ 2443 */
2444 int 2444 int
2445 xfs_alloc_vextent( 2445 xfs_alloc_vextent(
2446 struct xfs_alloc_arg *args) 2446 struct xfs_alloc_arg *args)
2447 { 2447 {
2448 DECLARE_COMPLETION_ONSTACK(done); 2448 DECLARE_COMPLETION_ONSTACK(done);
2449 2449
2450 if (!args->userdata) 2450 if (!args->stack_switch)
2451 return __xfs_alloc_vextent(args); 2451 return __xfs_alloc_vextent(args);
2452 2452
2453 2453
2454 args->done = &done; 2454 args->done = &done;
2455 INIT_WORK_ONSTACK(&args->work, xfs_alloc_vextent_worker); 2455 INIT_WORK_ONSTACK(&args->work, xfs_alloc_vextent_worker);
2456 queue_work(xfs_alloc_wq, &args->work); 2456 queue_work(xfs_alloc_wq, &args->work);
2457 wait_for_completion(&done); 2457 wait_for_completion(&done);
2458 return args->result; 2458 return args->result;
2459 } 2459 }
2460 2460
2461 /* 2461 /*
2462 * Free an extent. 2462 * Free an extent.
2463 * Just break up the extent address and hand off to xfs_free_ag_extent 2463 * Just break up the extent address and hand off to xfs_free_ag_extent
2464 * after fixing up the freelist. 2464 * after fixing up the freelist.
2465 */ 2465 */
2466 int /* error */ 2466 int /* error */
2467 xfs_free_extent( 2467 xfs_free_extent(
2468 xfs_trans_t *tp, /* transaction pointer */ 2468 xfs_trans_t *tp, /* transaction pointer */
2469 xfs_fsblock_t bno, /* starting block number of extent */ 2469 xfs_fsblock_t bno, /* starting block number of extent */
2470 xfs_extlen_t len) /* length of extent */ 2470 xfs_extlen_t len) /* length of extent */
2471 { 2471 {
2472 xfs_alloc_arg_t args; 2472 xfs_alloc_arg_t args;
2473 int error; 2473 int error;
2474 2474
2475 ASSERT(len != 0); 2475 ASSERT(len != 0);
2476 memset(&args, 0, sizeof(xfs_alloc_arg_t)); 2476 memset(&args, 0, sizeof(xfs_alloc_arg_t));
2477 args.tp = tp; 2477 args.tp = tp;
2478 args.mp = tp->t_mountp; 2478 args.mp = tp->t_mountp;
2479 2479
2480 /* 2480 /*
2481 * validate that the block number is legal - the enables us to detect 2481 * validate that the block number is legal - the enables us to detect
2482 * and handle a silent filesystem corruption rather than crashing. 2482 * and handle a silent filesystem corruption rather than crashing.
2483 */ 2483 */
2484 args.agno = XFS_FSB_TO_AGNO(args.mp, bno); 2484 args.agno = XFS_FSB_TO_AGNO(args.mp, bno);
2485 if (args.agno >= args.mp->m_sb.sb_agcount) 2485 if (args.agno >= args.mp->m_sb.sb_agcount)
2486 return EFSCORRUPTED; 2486 return EFSCORRUPTED;
2487 2487
2488 args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno); 2488 args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno);
2489 if (args.agbno >= args.mp->m_sb.sb_agblocks) 2489 if (args.agbno >= args.mp->m_sb.sb_agblocks)
2490 return EFSCORRUPTED; 2490 return EFSCORRUPTED;
2491 2491
2492 args.pag = xfs_perag_get(args.mp, args.agno); 2492 args.pag = xfs_perag_get(args.mp, args.agno);
2493 ASSERT(args.pag); 2493 ASSERT(args.pag);
2494 2494
2495 error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING); 2495 error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
2496 if (error) 2496 if (error)
2497 goto error0; 2497 goto error0;
2498 2498
2499 /* validate the extent size is legal now we have the agf locked */ 2499 /* validate the extent size is legal now we have the agf locked */
2500 if (args.agbno + len > 2500 if (args.agbno + len >
2501 be32_to_cpu(XFS_BUF_TO_AGF(args.agbp)->agf_length)) { 2501 be32_to_cpu(XFS_BUF_TO_AGF(args.agbp)->agf_length)) {
2502 error = EFSCORRUPTED; 2502 error = EFSCORRUPTED;
2503 goto error0; 2503 goto error0;
2504 } 2504 }
2505 2505
2506 error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0); 2506 error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0);
2507 if (!error) 2507 if (!error)
2508 xfs_extent_busy_insert(tp, args.agno, args.agbno, len, 0); 2508 xfs_extent_busy_insert(tp, args.agno, args.agbno, len, 0);
2509 error0: 2509 error0:
2510 xfs_perag_put(args.pag); 2510 xfs_perag_put(args.pag);
2511 return error; 2511 return error;
2512 } 2512 }
2513 2513
1 /* 1 /*
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. 2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * All Rights Reserved. 3 * All Rights Reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as 6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 * 8 *
9 * This program is distributed in the hope that it would be useful, 9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License 14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation, 15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */ 17 */
18 #ifndef __XFS_ALLOC_H__ 18 #ifndef __XFS_ALLOC_H__
19 #define __XFS_ALLOC_H__ 19 #define __XFS_ALLOC_H__
20 20
21 struct xfs_buf; 21 struct xfs_buf;
22 struct xfs_btree_cur; 22 struct xfs_btree_cur;
23 struct xfs_mount; 23 struct xfs_mount;
24 struct xfs_perag; 24 struct xfs_perag;
25 struct xfs_trans; 25 struct xfs_trans;
26 26
27 extern struct workqueue_struct *xfs_alloc_wq; 27 extern struct workqueue_struct *xfs_alloc_wq;
28 28
29 /* 29 /*
30 * Freespace allocation types. Argument to xfs_alloc_[v]extent. 30 * Freespace allocation types. Argument to xfs_alloc_[v]extent.
31 */ 31 */
32 #define XFS_ALLOCTYPE_ANY_AG 0x01 /* allocate anywhere, use rotor */ 32 #define XFS_ALLOCTYPE_ANY_AG 0x01 /* allocate anywhere, use rotor */
33 #define XFS_ALLOCTYPE_FIRST_AG 0x02 /* ... start at ag 0 */ 33 #define XFS_ALLOCTYPE_FIRST_AG 0x02 /* ... start at ag 0 */
34 #define XFS_ALLOCTYPE_START_AG 0x04 /* anywhere, start in this a.g. */ 34 #define XFS_ALLOCTYPE_START_AG 0x04 /* anywhere, start in this a.g. */
35 #define XFS_ALLOCTYPE_THIS_AG 0x08 /* anywhere in this a.g. */ 35 #define XFS_ALLOCTYPE_THIS_AG 0x08 /* anywhere in this a.g. */
36 #define XFS_ALLOCTYPE_START_BNO 0x10 /* near this block else anywhere */ 36 #define XFS_ALLOCTYPE_START_BNO 0x10 /* near this block else anywhere */
37 #define XFS_ALLOCTYPE_NEAR_BNO 0x20 /* in this a.g. and near this block */ 37 #define XFS_ALLOCTYPE_NEAR_BNO 0x20 /* in this a.g. and near this block */
38 #define XFS_ALLOCTYPE_THIS_BNO 0x40 /* at exactly this block */ 38 #define XFS_ALLOCTYPE_THIS_BNO 0x40 /* at exactly this block */
39 39
40 /* this should become an enum again when the tracing code is fixed */ 40 /* this should become an enum again when the tracing code is fixed */
41 typedef unsigned int xfs_alloctype_t; 41 typedef unsigned int xfs_alloctype_t;
42 42
43 #define XFS_ALLOC_TYPES \ 43 #define XFS_ALLOC_TYPES \
44 { XFS_ALLOCTYPE_ANY_AG, "ANY_AG" }, \ 44 { XFS_ALLOCTYPE_ANY_AG, "ANY_AG" }, \
45 { XFS_ALLOCTYPE_FIRST_AG, "FIRST_AG" }, \ 45 { XFS_ALLOCTYPE_FIRST_AG, "FIRST_AG" }, \
46 { XFS_ALLOCTYPE_START_AG, "START_AG" }, \ 46 { XFS_ALLOCTYPE_START_AG, "START_AG" }, \
47 { XFS_ALLOCTYPE_THIS_AG, "THIS_AG" }, \ 47 { XFS_ALLOCTYPE_THIS_AG, "THIS_AG" }, \
48 { XFS_ALLOCTYPE_START_BNO, "START_BNO" }, \ 48 { XFS_ALLOCTYPE_START_BNO, "START_BNO" }, \
49 { XFS_ALLOCTYPE_NEAR_BNO, "NEAR_BNO" }, \ 49 { XFS_ALLOCTYPE_NEAR_BNO, "NEAR_BNO" }, \
50 { XFS_ALLOCTYPE_THIS_BNO, "THIS_BNO" } 50 { XFS_ALLOCTYPE_THIS_BNO, "THIS_BNO" }
51 51
52 /* 52 /*
53 * Flags for xfs_alloc_fix_freelist. 53 * Flags for xfs_alloc_fix_freelist.
54 */ 54 */
55 #define XFS_ALLOC_FLAG_TRYLOCK 0x00000001 /* use trylock for buffer locking */ 55 #define XFS_ALLOC_FLAG_TRYLOCK 0x00000001 /* use trylock for buffer locking */
56 #define XFS_ALLOC_FLAG_FREEING 0x00000002 /* indicate caller is freeing extents*/ 56 #define XFS_ALLOC_FLAG_FREEING 0x00000002 /* indicate caller is freeing extents*/
57 57
58 /* 58 /*
59 * In order to avoid ENOSPC-related deadlock caused by 59 * In order to avoid ENOSPC-related deadlock caused by
60 * out-of-order locking of AGF buffer (PV 947395), we place 60 * out-of-order locking of AGF buffer (PV 947395), we place
61 * constraints on the relationship among actual allocations for 61 * constraints on the relationship among actual allocations for
62 * data blocks, freelist blocks, and potential file data bmap 62 * data blocks, freelist blocks, and potential file data bmap
63 * btree blocks. However, these restrictions may result in no 63 * btree blocks. However, these restrictions may result in no
64 * actual space allocated for a delayed extent, for example, a data 64 * actual space allocated for a delayed extent, for example, a data
65 * block in a certain AG is allocated but there is no additional 65 * block in a certain AG is allocated but there is no additional
66 * block for the additional bmap btree block due to a split of the 66 * block for the additional bmap btree block due to a split of the
67 * bmap btree of the file. The result of this may lead to an 67 * bmap btree of the file. The result of this may lead to an
68 * infinite loop in xfssyncd when the file gets flushed to disk and 68 * infinite loop in xfssyncd when the file gets flushed to disk and
69 * all delayed extents need to be actually allocated. To get around 69 * all delayed extents need to be actually allocated. To get around
70 * this, we explicitly set aside a few blocks which will not be 70 * this, we explicitly set aside a few blocks which will not be
71 * reserved in delayed allocation. Considering the minimum number of 71 * reserved in delayed allocation. Considering the minimum number of
72 * needed freelist blocks is 4 fsbs _per AG_, a potential split of file's bmap 72 * needed freelist blocks is 4 fsbs _per AG_, a potential split of file's bmap
73 * btree requires 1 fsb, so we set the number of set-aside blocks 73 * btree requires 1 fsb, so we set the number of set-aside blocks
74 * to 4 + 4*agcount. 74 * to 4 + 4*agcount.
75 */ 75 */
76 #define XFS_ALLOC_SET_ASIDE(mp) (4 + ((mp)->m_sb.sb_agcount * 4)) 76 #define XFS_ALLOC_SET_ASIDE(mp) (4 + ((mp)->m_sb.sb_agcount * 4))
77 77
78 /* 78 /*
79 * When deciding how much space to allocate out of an AG, we limit the 79 * When deciding how much space to allocate out of an AG, we limit the
80 * allocation maximum size to the size the AG. However, we cannot use all the 80 * allocation maximum size to the size the AG. However, we cannot use all the
81 * blocks in the AG - some are permanently used by metadata. These 81 * blocks in the AG - some are permanently used by metadata. These
82 * blocks are generally: 82 * blocks are generally:
83 * - the AG superblock, AGF, AGI and AGFL 83 * - the AG superblock, AGF, AGI and AGFL
84 * - the AGF (bno and cnt) and AGI btree root blocks 84 * - the AGF (bno and cnt) and AGI btree root blocks
85 * - 4 blocks on the AGFL according to XFS_ALLOC_SET_ASIDE() limits 85 * - 4 blocks on the AGFL according to XFS_ALLOC_SET_ASIDE() limits
86 * 86 *
87 * The AG headers are sector sized, so the amount of space they take up is 87 * The AG headers are sector sized, so the amount of space they take up is
88 * dependent on filesystem geometry. The others are all single blocks. 88 * dependent on filesystem geometry. The others are all single blocks.
89 */ 89 */
90 #define XFS_ALLOC_AG_MAX_USABLE(mp) \ 90 #define XFS_ALLOC_AG_MAX_USABLE(mp) \
91 ((mp)->m_sb.sb_agblocks - XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4)) - 7) 91 ((mp)->m_sb.sb_agblocks - XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4)) - 7)
92 92
93 93
94 /* 94 /*
95 * Argument structure for xfs_alloc routines. 95 * Argument structure for xfs_alloc routines.
96 * This is turned into a structure to avoid having 20 arguments passed 96 * This is turned into a structure to avoid having 20 arguments passed
97 * down several levels of the stack. 97 * down several levels of the stack.
98 */ 98 */
99 typedef struct xfs_alloc_arg { 99 typedef struct xfs_alloc_arg {
100 struct xfs_trans *tp; /* transaction pointer */ 100 struct xfs_trans *tp; /* transaction pointer */
101 struct xfs_mount *mp; /* file system mount point */ 101 struct xfs_mount *mp; /* file system mount point */
102 struct xfs_buf *agbp; /* buffer for a.g. freelist header */ 102 struct xfs_buf *agbp; /* buffer for a.g. freelist header */
103 struct xfs_perag *pag; /* per-ag struct for this agno */ 103 struct xfs_perag *pag; /* per-ag struct for this agno */
104 xfs_fsblock_t fsbno; /* file system block number */ 104 xfs_fsblock_t fsbno; /* file system block number */
105 xfs_agnumber_t agno; /* allocation group number */ 105 xfs_agnumber_t agno; /* allocation group number */
106 xfs_agblock_t agbno; /* allocation group-relative block # */ 106 xfs_agblock_t agbno; /* allocation group-relative block # */
107 xfs_extlen_t minlen; /* minimum size of extent */ 107 xfs_extlen_t minlen; /* minimum size of extent */
108 xfs_extlen_t maxlen; /* maximum size of extent */ 108 xfs_extlen_t maxlen; /* maximum size of extent */
109 xfs_extlen_t mod; /* mod value for extent size */ 109 xfs_extlen_t mod; /* mod value for extent size */
110 xfs_extlen_t prod; /* prod value for extent size */ 110 xfs_extlen_t prod; /* prod value for extent size */
111 xfs_extlen_t minleft; /* min blocks must be left after us */ 111 xfs_extlen_t minleft; /* min blocks must be left after us */
112 xfs_extlen_t total; /* total blocks needed in xaction */ 112 xfs_extlen_t total; /* total blocks needed in xaction */
113 xfs_extlen_t alignment; /* align answer to multiple of this */ 113 xfs_extlen_t alignment; /* align answer to multiple of this */
114 xfs_extlen_t minalignslop; /* slop for minlen+alignment calcs */ 114 xfs_extlen_t minalignslop; /* slop for minlen+alignment calcs */
115 xfs_extlen_t len; /* output: actual size of extent */ 115 xfs_extlen_t len; /* output: actual size of extent */
116 xfs_alloctype_t type; /* allocation type XFS_ALLOCTYPE_... */ 116 xfs_alloctype_t type; /* allocation type XFS_ALLOCTYPE_... */
117 xfs_alloctype_t otype; /* original allocation type */ 117 xfs_alloctype_t otype; /* original allocation type */
118 char wasdel; /* set if allocation was prev delayed */ 118 char wasdel; /* set if allocation was prev delayed */
119 char wasfromfl; /* set if allocation is from freelist */ 119 char wasfromfl; /* set if allocation is from freelist */
120 char isfl; /* set if is freelist blocks - !acctg */ 120 char isfl; /* set if is freelist blocks - !acctg */
121 char userdata; /* set if this is user data */ 121 char userdata; /* set if this is user data */
122 xfs_fsblock_t firstblock; /* io first block allocated */ 122 xfs_fsblock_t firstblock; /* io first block allocated */
123 struct completion *done; 123 struct completion *done;
124 struct work_struct work; 124 struct work_struct work;
125 int result; 125 int result;
126 char stack_switch;
126 } xfs_alloc_arg_t; 127 } xfs_alloc_arg_t;
127 128
128 /* 129 /*
129 * Defines for userdata 130 * Defines for userdata
130 */ 131 */
131 #define XFS_ALLOC_USERDATA 1 /* allocation is for user data*/ 132 #define XFS_ALLOC_USERDATA 1 /* allocation is for user data*/
132 #define XFS_ALLOC_INITIAL_USER_DATA 2 /* special case start of file */ 133 #define XFS_ALLOC_INITIAL_USER_DATA 2 /* special case start of file */
133 134
134 /* 135 /*
135 * Find the length of the longest extent in an AG. 136 * Find the length of the longest extent in an AG.
136 */ 137 */
137 xfs_extlen_t 138 xfs_extlen_t
138 xfs_alloc_longest_free_extent(struct xfs_mount *mp, 139 xfs_alloc_longest_free_extent(struct xfs_mount *mp,
139 struct xfs_perag *pag); 140 struct xfs_perag *pag);
140 141
141 /* 142 /*
142 * Compute and fill in value of m_ag_maxlevels. 143 * Compute and fill in value of m_ag_maxlevels.
143 */ 144 */
144 void 145 void
145 xfs_alloc_compute_maxlevels( 146 xfs_alloc_compute_maxlevels(
146 struct xfs_mount *mp); /* file system mount structure */ 147 struct xfs_mount *mp); /* file system mount structure */
147 148
148 /* 149 /*
149 * Get a block from the freelist. 150 * Get a block from the freelist.
150 * Returns with the buffer for the block gotten. 151 * Returns with the buffer for the block gotten.
151 */ 152 */
152 int /* error */ 153 int /* error */
153 xfs_alloc_get_freelist( 154 xfs_alloc_get_freelist(
154 struct xfs_trans *tp, /* transaction pointer */ 155 struct xfs_trans *tp, /* transaction pointer */
155 struct xfs_buf *agbp, /* buffer containing the agf structure */ 156 struct xfs_buf *agbp, /* buffer containing the agf structure */
156 xfs_agblock_t *bnop, /* block address retrieved from freelist */ 157 xfs_agblock_t *bnop, /* block address retrieved from freelist */
157 int btreeblk); /* destination is a AGF btree */ 158 int btreeblk); /* destination is a AGF btree */
158 159
159 /* 160 /*
160 * Log the given fields from the agf structure. 161 * Log the given fields from the agf structure.
161 */ 162 */
162 void 163 void
163 xfs_alloc_log_agf( 164 xfs_alloc_log_agf(
164 struct xfs_trans *tp, /* transaction pointer */ 165 struct xfs_trans *tp, /* transaction pointer */
165 struct xfs_buf *bp, /* buffer for a.g. freelist header */ 166 struct xfs_buf *bp, /* buffer for a.g. freelist header */
166 int fields);/* mask of fields to be logged (XFS_AGF_...) */ 167 int fields);/* mask of fields to be logged (XFS_AGF_...) */
167 168
168 /* 169 /*
169 * Interface for inode allocation to force the pag data to be initialized. 170 * Interface for inode allocation to force the pag data to be initialized.
170 */ 171 */
171 int /* error */ 172 int /* error */
172 xfs_alloc_pagf_init( 173 xfs_alloc_pagf_init(
173 struct xfs_mount *mp, /* file system mount structure */ 174 struct xfs_mount *mp, /* file system mount structure */
174 struct xfs_trans *tp, /* transaction pointer */ 175 struct xfs_trans *tp, /* transaction pointer */
175 xfs_agnumber_t agno, /* allocation group number */ 176 xfs_agnumber_t agno, /* allocation group number */
176 int flags); /* XFS_ALLOC_FLAGS_... */ 177 int flags); /* XFS_ALLOC_FLAGS_... */
177 178
178 /* 179 /*
179 * Put the block on the freelist for the allocation group. 180 * Put the block on the freelist for the allocation group.
180 */ 181 */
181 int /* error */ 182 int /* error */
182 xfs_alloc_put_freelist( 183 xfs_alloc_put_freelist(
183 struct xfs_trans *tp, /* transaction pointer */ 184 struct xfs_trans *tp, /* transaction pointer */
184 struct xfs_buf *agbp, /* buffer for a.g. freelist header */ 185 struct xfs_buf *agbp, /* buffer for a.g. freelist header */
185 struct xfs_buf *agflbp,/* buffer for a.g. free block array */ 186 struct xfs_buf *agflbp,/* buffer for a.g. free block array */
186 xfs_agblock_t bno, /* block being freed */ 187 xfs_agblock_t bno, /* block being freed */
187 int btreeblk); /* owner was a AGF btree */ 188 int btreeblk); /* owner was a AGF btree */
188 189
189 /* 190 /*
190 * Read in the allocation group header (free/alloc section). 191 * Read in the allocation group header (free/alloc section).
191 */ 192 */
192 int /* error */ 193 int /* error */
193 xfs_alloc_read_agf( 194 xfs_alloc_read_agf(
194 struct xfs_mount *mp, /* mount point structure */ 195 struct xfs_mount *mp, /* mount point structure */
195 struct xfs_trans *tp, /* transaction pointer */ 196 struct xfs_trans *tp, /* transaction pointer */
196 xfs_agnumber_t agno, /* allocation group number */ 197 xfs_agnumber_t agno, /* allocation group number */
197 int flags, /* XFS_ALLOC_FLAG_... */ 198 int flags, /* XFS_ALLOC_FLAG_... */
198 struct xfs_buf **bpp); /* buffer for the ag freelist header */ 199 struct xfs_buf **bpp); /* buffer for the ag freelist header */
199 200
200 /* 201 /*
201 * Allocate an extent (variable-size). 202 * Allocate an extent (variable-size).
202 */ 203 */
203 int /* error */ 204 int /* error */
204 xfs_alloc_vextent( 205 xfs_alloc_vextent(
205 xfs_alloc_arg_t *args); /* allocation argument structure */ 206 xfs_alloc_arg_t *args); /* allocation argument structure */
206 207
207 /* 208 /*
208 * Free an extent. 209 * Free an extent.
209 */ 210 */
210 int /* error */ 211 int /* error */
211 xfs_free_extent( 212 xfs_free_extent(
212 struct xfs_trans *tp, /* transaction pointer */ 213 struct xfs_trans *tp, /* transaction pointer */
213 xfs_fsblock_t bno, /* starting block number of extent */ 214 xfs_fsblock_t bno, /* starting block number of extent */
214 xfs_extlen_t len); /* length of extent */ 215 xfs_extlen_t len); /* length of extent */
215 216
216 int /* error */ 217 int /* error */
217 xfs_alloc_lookup_le( 218 xfs_alloc_lookup_le(
218 struct xfs_btree_cur *cur, /* btree cursor */ 219 struct xfs_btree_cur *cur, /* btree cursor */
219 xfs_agblock_t bno, /* starting block of extent */ 220 xfs_agblock_t bno, /* starting block of extent */
220 xfs_extlen_t len, /* length of extent */ 221 xfs_extlen_t len, /* length of extent */
221 int *stat); /* success/failure */ 222 int *stat); /* success/failure */
222 223
223 int /* error */ 224 int /* error */
224 xfs_alloc_lookup_ge( 225 xfs_alloc_lookup_ge(
225 struct xfs_btree_cur *cur, /* btree cursor */ 226 struct xfs_btree_cur *cur, /* btree cursor */
226 xfs_agblock_t bno, /* starting block of extent */ 227 xfs_agblock_t bno, /* starting block of extent */
227 xfs_extlen_t len, /* length of extent */ 228 xfs_extlen_t len, /* length of extent */
228 int *stat); /* success/failure */ 229 int *stat); /* success/failure */
229 230
230 int /* error */ 231 int /* error */
231 xfs_alloc_get_rec( 232 xfs_alloc_get_rec(
232 struct xfs_btree_cur *cur, /* btree cursor */ 233 struct xfs_btree_cur *cur, /* btree cursor */
233 xfs_agblock_t *bno, /* output: starting block of extent */ 234 xfs_agblock_t *bno, /* output: starting block of extent */
234 xfs_extlen_t *len, /* output: length of extent */ 235 xfs_extlen_t *len, /* output: length of extent */
235 int *stat); /* output: success/failure */ 236 int *stat); /* output: success/failure */
236 237
237 #endif /* __XFS_ALLOC_H__ */ 238 #endif /* __XFS_ALLOC_H__ */
238 239
1 /* 1 /*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved. 3 * All Rights Reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as 6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 * 8 *
9 * This program is distributed in the hope that it would be useful, 9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License 14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation, 15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */ 17 */
18 #include "xfs.h" 18 #include "xfs.h"
19 #include "xfs_fs.h" 19 #include "xfs_fs.h"
20 #include "xfs_types.h" 20 #include "xfs_types.h"
21 #include "xfs_bit.h" 21 #include "xfs_bit.h"
22 #include "xfs_log.h" 22 #include "xfs_log.h"
23 #include "xfs_inum.h" 23 #include "xfs_inum.h"
24 #include "xfs_trans.h" 24 #include "xfs_trans.h"
25 #include "xfs_sb.h" 25 #include "xfs_sb.h"
26 #include "xfs_ag.h" 26 #include "xfs_ag.h"
27 #include "xfs_dir2.h" 27 #include "xfs_dir2.h"
28 #include "xfs_da_btree.h" 28 #include "xfs_da_btree.h"
29 #include "xfs_bmap_btree.h" 29 #include "xfs_bmap_btree.h"
30 #include "xfs_alloc_btree.h" 30 #include "xfs_alloc_btree.h"
31 #include "xfs_ialloc_btree.h" 31 #include "xfs_ialloc_btree.h"
32 #include "xfs_dinode.h" 32 #include "xfs_dinode.h"
33 #include "xfs_inode.h" 33 #include "xfs_inode.h"
34 #include "xfs_btree.h" 34 #include "xfs_btree.h"
35 #include "xfs_mount.h" 35 #include "xfs_mount.h"
36 #include "xfs_itable.h" 36 #include "xfs_itable.h"
37 #include "xfs_inode_item.h" 37 #include "xfs_inode_item.h"
38 #include "xfs_extfree_item.h" 38 #include "xfs_extfree_item.h"
39 #include "xfs_alloc.h" 39 #include "xfs_alloc.h"
40 #include "xfs_bmap.h" 40 #include "xfs_bmap.h"
41 #include "xfs_rtalloc.h" 41 #include "xfs_rtalloc.h"
42 #include "xfs_error.h" 42 #include "xfs_error.h"
43 #include "xfs_attr_leaf.h" 43 #include "xfs_attr_leaf.h"
44 #include "xfs_quota.h" 44 #include "xfs_quota.h"
45 #include "xfs_trans_space.h" 45 #include "xfs_trans_space.h"
46 #include "xfs_buf_item.h" 46 #include "xfs_buf_item.h"
47 #include "xfs_filestream.h" 47 #include "xfs_filestream.h"
48 #include "xfs_vnodeops.h" 48 #include "xfs_vnodeops.h"
49 #include "xfs_trace.h" 49 #include "xfs_trace.h"
50 50
51 51
52 kmem_zone_t *xfs_bmap_free_item_zone; 52 kmem_zone_t *xfs_bmap_free_item_zone;
53 53
54 /* 54 /*
55 * Prototypes for internal bmap routines. 55 * Prototypes for internal bmap routines.
56 */ 56 */
57 57
58 #ifdef DEBUG 58 #ifdef DEBUG
59 STATIC void 59 STATIC void
60 xfs_bmap_check_leaf_extents( 60 xfs_bmap_check_leaf_extents(
61 struct xfs_btree_cur *cur, 61 struct xfs_btree_cur *cur,
62 struct xfs_inode *ip, 62 struct xfs_inode *ip,
63 int whichfork); 63 int whichfork);
64 #else 64 #else
65 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0) 65 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
66 #endif 66 #endif
67 67
68 68
69 /* 69 /*
70 * Called from xfs_bmap_add_attrfork to handle extents format files. 70 * Called from xfs_bmap_add_attrfork to handle extents format files.
71 */ 71 */
72 STATIC int /* error */ 72 STATIC int /* error */
73 xfs_bmap_add_attrfork_extents( 73 xfs_bmap_add_attrfork_extents(
74 xfs_trans_t *tp, /* transaction pointer */ 74 xfs_trans_t *tp, /* transaction pointer */
75 xfs_inode_t *ip, /* incore inode pointer */ 75 xfs_inode_t *ip, /* incore inode pointer */
76 xfs_fsblock_t *firstblock, /* first block allocated */ 76 xfs_fsblock_t *firstblock, /* first block allocated */
77 xfs_bmap_free_t *flist, /* blocks to free at commit */ 77 xfs_bmap_free_t *flist, /* blocks to free at commit */
78 int *flags); /* inode logging flags */ 78 int *flags); /* inode logging flags */
79 79
80 /* 80 /*
81 * Called from xfs_bmap_add_attrfork to handle local format files. 81 * Called from xfs_bmap_add_attrfork to handle local format files.
82 */ 82 */
83 STATIC int /* error */ 83 STATIC int /* error */
84 xfs_bmap_add_attrfork_local( 84 xfs_bmap_add_attrfork_local(
85 xfs_trans_t *tp, /* transaction pointer */ 85 xfs_trans_t *tp, /* transaction pointer */
86 xfs_inode_t *ip, /* incore inode pointer */ 86 xfs_inode_t *ip, /* incore inode pointer */
87 xfs_fsblock_t *firstblock, /* first block allocated */ 87 xfs_fsblock_t *firstblock, /* first block allocated */
88 xfs_bmap_free_t *flist, /* blocks to free at commit */ 88 xfs_bmap_free_t *flist, /* blocks to free at commit */
89 int *flags); /* inode logging flags */ 89 int *flags); /* inode logging flags */
90 90
91 /* 91 /*
92 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file. 92 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
93 * It figures out where to ask the underlying allocator to put the new extent. 93 * It figures out where to ask the underlying allocator to put the new extent.
94 */ 94 */
95 STATIC int /* error */ 95 STATIC int /* error */
96 xfs_bmap_alloc( 96 xfs_bmap_alloc(
97 xfs_bmalloca_t *ap); /* bmap alloc argument struct */ 97 xfs_bmalloca_t *ap); /* bmap alloc argument struct */
98 98
99 /* 99 /*
100 * Transform a btree format file with only one leaf node, where the 100 * Transform a btree format file with only one leaf node, where the
101 * extents list will fit in the inode, into an extents format file. 101 * extents list will fit in the inode, into an extents format file.
102 * Since the file extents are already in-core, all we have to do is 102 * Since the file extents are already in-core, all we have to do is
103 * give up the space for the btree root and pitch the leaf block. 103 * give up the space for the btree root and pitch the leaf block.
104 */ 104 */
105 STATIC int /* error */ 105 STATIC int /* error */
106 xfs_bmap_btree_to_extents( 106 xfs_bmap_btree_to_extents(
107 xfs_trans_t *tp, /* transaction pointer */ 107 xfs_trans_t *tp, /* transaction pointer */
108 xfs_inode_t *ip, /* incore inode pointer */ 108 xfs_inode_t *ip, /* incore inode pointer */
109 xfs_btree_cur_t *cur, /* btree cursor */ 109 xfs_btree_cur_t *cur, /* btree cursor */
110 int *logflagsp, /* inode logging flags */ 110 int *logflagsp, /* inode logging flags */
111 int whichfork); /* data or attr fork */ 111 int whichfork); /* data or attr fork */
112 112
113 /* 113 /*
114 * Remove the entry "free" from the free item list. Prev points to the 114 * Remove the entry "free" from the free item list. Prev points to the
115 * previous entry, unless "free" is the head of the list. 115 * previous entry, unless "free" is the head of the list.
116 */ 116 */
117 STATIC void 117 STATIC void
118 xfs_bmap_del_free( 118 xfs_bmap_del_free(
119 xfs_bmap_free_t *flist, /* free item list header */ 119 xfs_bmap_free_t *flist, /* free item list header */
120 xfs_bmap_free_item_t *prev, /* previous item on list, if any */ 120 xfs_bmap_free_item_t *prev, /* previous item on list, if any */
121 xfs_bmap_free_item_t *free); /* list item to be freed */ 121 xfs_bmap_free_item_t *free); /* list item to be freed */
122 122
123 /* 123 /*
124 * Convert an extents-format file into a btree-format file. 124 * Convert an extents-format file into a btree-format file.
125 * The new file will have a root block (in the inode) and a single child block. 125 * The new file will have a root block (in the inode) and a single child block.
126 */ 126 */
127 STATIC int /* error */ 127 STATIC int /* error */
128 xfs_bmap_extents_to_btree( 128 xfs_bmap_extents_to_btree(
129 xfs_trans_t *tp, /* transaction pointer */ 129 xfs_trans_t *tp, /* transaction pointer */
130 xfs_inode_t *ip, /* incore inode pointer */ 130 xfs_inode_t *ip, /* incore inode pointer */
131 xfs_fsblock_t *firstblock, /* first-block-allocated */ 131 xfs_fsblock_t *firstblock, /* first-block-allocated */
132 xfs_bmap_free_t *flist, /* blocks freed in xaction */ 132 xfs_bmap_free_t *flist, /* blocks freed in xaction */
133 xfs_btree_cur_t **curp, /* cursor returned to caller */ 133 xfs_btree_cur_t **curp, /* cursor returned to caller */
134 int wasdel, /* converting a delayed alloc */ 134 int wasdel, /* converting a delayed alloc */
135 int *logflagsp, /* inode logging flags */ 135 int *logflagsp, /* inode logging flags */
136 int whichfork); /* data or attr fork */ 136 int whichfork); /* data or attr fork */
137 137
138 /* 138 /*
139 * Convert a local file to an extents file. 139 * Convert a local file to an extents file.
140 * This code is sort of bogus, since the file data needs to get 140 * This code is sort of bogus, since the file data needs to get
141 * logged so it won't be lost. The bmap-level manipulations are ok, though. 141 * logged so it won't be lost. The bmap-level manipulations are ok, though.
142 */ 142 */
143 STATIC int /* error */ 143 STATIC int /* error */
144 xfs_bmap_local_to_extents( 144 xfs_bmap_local_to_extents(
145 xfs_trans_t *tp, /* transaction pointer */ 145 xfs_trans_t *tp, /* transaction pointer */
146 xfs_inode_t *ip, /* incore inode pointer */ 146 xfs_inode_t *ip, /* incore inode pointer */
147 xfs_fsblock_t *firstblock, /* first block allocated in xaction */ 147 xfs_fsblock_t *firstblock, /* first block allocated in xaction */
148 xfs_extlen_t total, /* total blocks needed by transaction */ 148 xfs_extlen_t total, /* total blocks needed by transaction */
149 int *logflagsp, /* inode logging flags */ 149 int *logflagsp, /* inode logging flags */
150 int whichfork); /* data or attr fork */ 150 int whichfork); /* data or attr fork */
151 151
152 /* 152 /*
153 * Search the extents list for the inode, for the extent containing bno. 153 * Search the extents list for the inode, for the extent containing bno.
154 * If bno lies in a hole, point to the next entry. If bno lies past eof, 154 * If bno lies in a hole, point to the next entry. If bno lies past eof,
155 * *eofp will be set, and *prevp will contain the last entry (null if none). 155 * *eofp will be set, and *prevp will contain the last entry (null if none).
156 * Else, *lastxp will be set to the index of the found 156 * Else, *lastxp will be set to the index of the found
157 * entry; *gotp will contain the entry. 157 * entry; *gotp will contain the entry.
158 */ 158 */
159 STATIC xfs_bmbt_rec_host_t * /* pointer to found extent entry */ 159 STATIC xfs_bmbt_rec_host_t * /* pointer to found extent entry */
160 xfs_bmap_search_extents( 160 xfs_bmap_search_extents(
161 xfs_inode_t *ip, /* incore inode pointer */ 161 xfs_inode_t *ip, /* incore inode pointer */
162 xfs_fileoff_t bno, /* block number searched for */ 162 xfs_fileoff_t bno, /* block number searched for */
163 int whichfork, /* data or attr fork */ 163 int whichfork, /* data or attr fork */
164 int *eofp, /* out: end of file found */ 164 int *eofp, /* out: end of file found */
165 xfs_extnum_t *lastxp, /* out: last extent index */ 165 xfs_extnum_t *lastxp, /* out: last extent index */
166 xfs_bmbt_irec_t *gotp, /* out: extent entry found */ 166 xfs_bmbt_irec_t *gotp, /* out: extent entry found */
167 xfs_bmbt_irec_t *prevp); /* out: previous extent entry found */ 167 xfs_bmbt_irec_t *prevp); /* out: previous extent entry found */
168 168
169 /* 169 /*
170 * Compute the worst-case number of indirect blocks that will be used 170 * Compute the worst-case number of indirect blocks that will be used
171 * for ip's delayed extent of length "len". 171 * for ip's delayed extent of length "len".
172 */ 172 */
173 STATIC xfs_filblks_t 173 STATIC xfs_filblks_t
174 xfs_bmap_worst_indlen( 174 xfs_bmap_worst_indlen(
175 xfs_inode_t *ip, /* incore inode pointer */ 175 xfs_inode_t *ip, /* incore inode pointer */
176 xfs_filblks_t len); /* delayed extent length */ 176 xfs_filblks_t len); /* delayed extent length */
177 177
178 #ifdef DEBUG 178 #ifdef DEBUG
179 /* 179 /*
180 * Perform various validation checks on the values being returned 180 * Perform various validation checks on the values being returned
181 * from xfs_bmapi(). 181 * from xfs_bmapi().
182 */ 182 */
183 STATIC void 183 STATIC void
184 xfs_bmap_validate_ret( 184 xfs_bmap_validate_ret(
185 xfs_fileoff_t bno, 185 xfs_fileoff_t bno,
186 xfs_filblks_t len, 186 xfs_filblks_t len,
187 int flags, 187 int flags,
188 xfs_bmbt_irec_t *mval, 188 xfs_bmbt_irec_t *mval,
189 int nmap, 189 int nmap,
190 int ret_nmap); 190 int ret_nmap);
191 #else 191 #else
192 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) 192 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
193 #endif /* DEBUG */ 193 #endif /* DEBUG */
194 194
195 STATIC int 195 STATIC int
196 xfs_bmap_count_tree( 196 xfs_bmap_count_tree(
197 xfs_mount_t *mp, 197 xfs_mount_t *mp,
198 xfs_trans_t *tp, 198 xfs_trans_t *tp,
199 xfs_ifork_t *ifp, 199 xfs_ifork_t *ifp,
200 xfs_fsblock_t blockno, 200 xfs_fsblock_t blockno,
201 int levelin, 201 int levelin,
202 int *count); 202 int *count);
203 203
204 STATIC void 204 STATIC void
205 xfs_bmap_count_leaves( 205 xfs_bmap_count_leaves(
206 xfs_ifork_t *ifp, 206 xfs_ifork_t *ifp,
207 xfs_extnum_t idx, 207 xfs_extnum_t idx,
208 int numrecs, 208 int numrecs,
209 int *count); 209 int *count);
210 210
211 STATIC void 211 STATIC void
212 xfs_bmap_disk_count_leaves( 212 xfs_bmap_disk_count_leaves(
213 struct xfs_mount *mp, 213 struct xfs_mount *mp,
214 struct xfs_btree_block *block, 214 struct xfs_btree_block *block,
215 int numrecs, 215 int numrecs,
216 int *count); 216 int *count);
217 217
218 /* 218 /*
219 * Bmap internal routines. 219 * Bmap internal routines.
220 */ 220 */
221 221
222 STATIC int /* error */ 222 STATIC int /* error */
223 xfs_bmbt_lookup_eq( 223 xfs_bmbt_lookup_eq(
224 struct xfs_btree_cur *cur, 224 struct xfs_btree_cur *cur,
225 xfs_fileoff_t off, 225 xfs_fileoff_t off,
226 xfs_fsblock_t bno, 226 xfs_fsblock_t bno,
227 xfs_filblks_t len, 227 xfs_filblks_t len,
228 int *stat) /* success/failure */ 228 int *stat) /* success/failure */
229 { 229 {
230 cur->bc_rec.b.br_startoff = off; 230 cur->bc_rec.b.br_startoff = off;
231 cur->bc_rec.b.br_startblock = bno; 231 cur->bc_rec.b.br_startblock = bno;
232 cur->bc_rec.b.br_blockcount = len; 232 cur->bc_rec.b.br_blockcount = len;
233 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat); 233 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
234 } 234 }
235 235
236 STATIC int /* error */ 236 STATIC int /* error */
237 xfs_bmbt_lookup_ge( 237 xfs_bmbt_lookup_ge(
238 struct xfs_btree_cur *cur, 238 struct xfs_btree_cur *cur,
239 xfs_fileoff_t off, 239 xfs_fileoff_t off,
240 xfs_fsblock_t bno, 240 xfs_fsblock_t bno,
241 xfs_filblks_t len, 241 xfs_filblks_t len,
242 int *stat) /* success/failure */ 242 int *stat) /* success/failure */
243 { 243 {
244 cur->bc_rec.b.br_startoff = off; 244 cur->bc_rec.b.br_startoff = off;
245 cur->bc_rec.b.br_startblock = bno; 245 cur->bc_rec.b.br_startblock = bno;
246 cur->bc_rec.b.br_blockcount = len; 246 cur->bc_rec.b.br_blockcount = len;
247 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat); 247 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
248 } 248 }
249 249
250 /* 250 /*
251 * Check if the inode needs to be converted to btree format. 251 * Check if the inode needs to be converted to btree format.
252 */ 252 */
253 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork) 253 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
254 { 254 {
255 return XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS && 255 return XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
256 XFS_IFORK_NEXTENTS(ip, whichfork) > 256 XFS_IFORK_NEXTENTS(ip, whichfork) >
257 XFS_IFORK_MAXEXT(ip, whichfork); 257 XFS_IFORK_MAXEXT(ip, whichfork);
258 } 258 }
259 259
260 /* 260 /*
261 * Check if the inode should be converted to extent format. 261 * Check if the inode should be converted to extent format.
262 */ 262 */
263 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork) 263 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
264 { 264 {
265 return XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE && 265 return XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
266 XFS_IFORK_NEXTENTS(ip, whichfork) <= 266 XFS_IFORK_NEXTENTS(ip, whichfork) <=
267 XFS_IFORK_MAXEXT(ip, whichfork); 267 XFS_IFORK_MAXEXT(ip, whichfork);
268 } 268 }
269 269
270 /* 270 /*
271 * Update the record referred to by cur to the value given 271 * Update the record referred to by cur to the value given
272 * by [off, bno, len, state]. 272 * by [off, bno, len, state].
273 * This either works (return 0) or gets an EFSCORRUPTED error. 273 * This either works (return 0) or gets an EFSCORRUPTED error.
274 */ 274 */
275 STATIC int 275 STATIC int
276 xfs_bmbt_update( 276 xfs_bmbt_update(
277 struct xfs_btree_cur *cur, 277 struct xfs_btree_cur *cur,
278 xfs_fileoff_t off, 278 xfs_fileoff_t off,
279 xfs_fsblock_t bno, 279 xfs_fsblock_t bno,
280 xfs_filblks_t len, 280 xfs_filblks_t len,
281 xfs_exntst_t state) 281 xfs_exntst_t state)
282 { 282 {
283 union xfs_btree_rec rec; 283 union xfs_btree_rec rec;
284 284
285 xfs_bmbt_disk_set_allf(&rec.bmbt, off, bno, len, state); 285 xfs_bmbt_disk_set_allf(&rec.bmbt, off, bno, len, state);
286 return xfs_btree_update(cur, &rec); 286 return xfs_btree_update(cur, &rec);
287 } 287 }
288 288
289 /* 289 /*
290 * Called from xfs_bmap_add_attrfork to handle btree format files. 290 * Called from xfs_bmap_add_attrfork to handle btree format files.
291 */ 291 */
292 STATIC int /* error */ 292 STATIC int /* error */
293 xfs_bmap_add_attrfork_btree( 293 xfs_bmap_add_attrfork_btree(
294 xfs_trans_t *tp, /* transaction pointer */ 294 xfs_trans_t *tp, /* transaction pointer */
295 xfs_inode_t *ip, /* incore inode pointer */ 295 xfs_inode_t *ip, /* incore inode pointer */
296 xfs_fsblock_t *firstblock, /* first block allocated */ 296 xfs_fsblock_t *firstblock, /* first block allocated */
297 xfs_bmap_free_t *flist, /* blocks to free at commit */ 297 xfs_bmap_free_t *flist, /* blocks to free at commit */
298 int *flags) /* inode logging flags */ 298 int *flags) /* inode logging flags */
299 { 299 {
300 xfs_btree_cur_t *cur; /* btree cursor */ 300 xfs_btree_cur_t *cur; /* btree cursor */
301 int error; /* error return value */ 301 int error; /* error return value */
302 xfs_mount_t *mp; /* file system mount struct */ 302 xfs_mount_t *mp; /* file system mount struct */
303 int stat; /* newroot status */ 303 int stat; /* newroot status */
304 304
305 mp = ip->i_mount; 305 mp = ip->i_mount;
306 if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip)) 306 if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip))
307 *flags |= XFS_ILOG_DBROOT; 307 *flags |= XFS_ILOG_DBROOT;
308 else { 308 else {
309 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK); 309 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
310 cur->bc_private.b.flist = flist; 310 cur->bc_private.b.flist = flist;
311 cur->bc_private.b.firstblock = *firstblock; 311 cur->bc_private.b.firstblock = *firstblock;
312 if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat))) 312 if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat)))
313 goto error0; 313 goto error0;
314 /* must be at least one entry */ 314 /* must be at least one entry */
315 XFS_WANT_CORRUPTED_GOTO(stat == 1, error0); 315 XFS_WANT_CORRUPTED_GOTO(stat == 1, error0);
316 if ((error = xfs_btree_new_iroot(cur, flags, &stat))) 316 if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
317 goto error0; 317 goto error0;
318 if (stat == 0) { 318 if (stat == 0) {
319 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 319 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
320 return XFS_ERROR(ENOSPC); 320 return XFS_ERROR(ENOSPC);
321 } 321 }
322 *firstblock = cur->bc_private.b.firstblock; 322 *firstblock = cur->bc_private.b.firstblock;
323 cur->bc_private.b.allocated = 0; 323 cur->bc_private.b.allocated = 0;
324 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 324 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
325 } 325 }
326 return 0; 326 return 0;
327 error0: 327 error0:
328 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 328 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
329 return error; 329 return error;
330 } 330 }
331 331
332 /* 332 /*
333 * Called from xfs_bmap_add_attrfork to handle extents format files. 333 * Called from xfs_bmap_add_attrfork to handle extents format files.
334 */ 334 */
335 STATIC int /* error */ 335 STATIC int /* error */
336 xfs_bmap_add_attrfork_extents( 336 xfs_bmap_add_attrfork_extents(
337 xfs_trans_t *tp, /* transaction pointer */ 337 xfs_trans_t *tp, /* transaction pointer */
338 xfs_inode_t *ip, /* incore inode pointer */ 338 xfs_inode_t *ip, /* incore inode pointer */
339 xfs_fsblock_t *firstblock, /* first block allocated */ 339 xfs_fsblock_t *firstblock, /* first block allocated */
340 xfs_bmap_free_t *flist, /* blocks to free at commit */ 340 xfs_bmap_free_t *flist, /* blocks to free at commit */
341 int *flags) /* inode logging flags */ 341 int *flags) /* inode logging flags */
342 { 342 {
343 xfs_btree_cur_t *cur; /* bmap btree cursor */ 343 xfs_btree_cur_t *cur; /* bmap btree cursor */
344 int error; /* error return value */ 344 int error; /* error return value */
345 345
346 if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip)) 346 if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip))
347 return 0; 347 return 0;
348 cur = NULL; 348 cur = NULL;
349 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist, &cur, 0, 349 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist, &cur, 0,
350 flags, XFS_DATA_FORK); 350 flags, XFS_DATA_FORK);
351 if (cur) { 351 if (cur) {
352 cur->bc_private.b.allocated = 0; 352 cur->bc_private.b.allocated = 0;
353 xfs_btree_del_cursor(cur, 353 xfs_btree_del_cursor(cur,
354 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); 354 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
355 } 355 }
356 return error; 356 return error;
357 } 357 }
358 358
359 /* 359 /*
360 * Called from xfs_bmap_add_attrfork to handle local format files. 360 * Called from xfs_bmap_add_attrfork to handle local format files.
361 */ 361 */
362 STATIC int /* error */ 362 STATIC int /* error */
363 xfs_bmap_add_attrfork_local( 363 xfs_bmap_add_attrfork_local(
364 xfs_trans_t *tp, /* transaction pointer */ 364 xfs_trans_t *tp, /* transaction pointer */
365 xfs_inode_t *ip, /* incore inode pointer */ 365 xfs_inode_t *ip, /* incore inode pointer */
366 xfs_fsblock_t *firstblock, /* first block allocated */ 366 xfs_fsblock_t *firstblock, /* first block allocated */
367 xfs_bmap_free_t *flist, /* blocks to free at commit */ 367 xfs_bmap_free_t *flist, /* blocks to free at commit */
368 int *flags) /* inode logging flags */ 368 int *flags) /* inode logging flags */
369 { 369 {
370 xfs_da_args_t dargs; /* args for dir/attr code */ 370 xfs_da_args_t dargs; /* args for dir/attr code */
371 int error; /* error return value */ 371 int error; /* error return value */
372 xfs_mount_t *mp; /* mount structure pointer */ 372 xfs_mount_t *mp; /* mount structure pointer */
373 373
374 if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip)) 374 if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
375 return 0; 375 return 0;
376 if (S_ISDIR(ip->i_d.di_mode)) { 376 if (S_ISDIR(ip->i_d.di_mode)) {
377 mp = ip->i_mount; 377 mp = ip->i_mount;
378 memset(&dargs, 0, sizeof(dargs)); 378 memset(&dargs, 0, sizeof(dargs));
379 dargs.dp = ip; 379 dargs.dp = ip;
380 dargs.firstblock = firstblock; 380 dargs.firstblock = firstblock;
381 dargs.flist = flist; 381 dargs.flist = flist;
382 dargs.total = mp->m_dirblkfsbs; 382 dargs.total = mp->m_dirblkfsbs;
383 dargs.whichfork = XFS_DATA_FORK; 383 dargs.whichfork = XFS_DATA_FORK;
384 dargs.trans = tp; 384 dargs.trans = tp;
385 error = xfs_dir2_sf_to_block(&dargs); 385 error = xfs_dir2_sf_to_block(&dargs);
386 } else 386 } else
387 error = xfs_bmap_local_to_extents(tp, ip, firstblock, 1, flags, 387 error = xfs_bmap_local_to_extents(tp, ip, firstblock, 1, flags,
388 XFS_DATA_FORK); 388 XFS_DATA_FORK);
389 return error; 389 return error;
390 } 390 }
391 391
392 /* 392 /*
393 * Convert a delayed allocation to a real allocation. 393 * Convert a delayed allocation to a real allocation.
394 */ 394 */
395 STATIC int /* error */ 395 STATIC int /* error */
396 xfs_bmap_add_extent_delay_real( 396 xfs_bmap_add_extent_delay_real(
397 struct xfs_bmalloca *bma) 397 struct xfs_bmalloca *bma)
398 { 398 {
399 struct xfs_bmbt_irec *new = &bma->got; 399 struct xfs_bmbt_irec *new = &bma->got;
400 int diff; /* temp value */ 400 int diff; /* temp value */
401 xfs_bmbt_rec_host_t *ep; /* extent entry for idx */ 401 xfs_bmbt_rec_host_t *ep; /* extent entry for idx */
402 int error; /* error return value */ 402 int error; /* error return value */
403 int i; /* temp state */ 403 int i; /* temp state */
404 xfs_ifork_t *ifp; /* inode fork pointer */ 404 xfs_ifork_t *ifp; /* inode fork pointer */
405 xfs_fileoff_t new_endoff; /* end offset of new entry */ 405 xfs_fileoff_t new_endoff; /* end offset of new entry */
406 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ 406 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
407 /* left is 0, right is 1, prev is 2 */ 407 /* left is 0, right is 1, prev is 2 */
408 int rval=0; /* return value (logging flags) */ 408 int rval=0; /* return value (logging flags) */
409 int state = 0;/* state bits, accessed thru macros */ 409 int state = 0;/* state bits, accessed thru macros */
410 xfs_filblks_t da_new; /* new count del alloc blocks used */ 410 xfs_filblks_t da_new; /* new count del alloc blocks used */
411 xfs_filblks_t da_old; /* old count del alloc blocks used */ 411 xfs_filblks_t da_old; /* old count del alloc blocks used */
412 xfs_filblks_t temp=0; /* value for da_new calculations */ 412 xfs_filblks_t temp=0; /* value for da_new calculations */
413 xfs_filblks_t temp2=0;/* value for da_new calculations */ 413 xfs_filblks_t temp2=0;/* value for da_new calculations */
414 int tmp_rval; /* partial logging flags */ 414 int tmp_rval; /* partial logging flags */
415 415
416 ifp = XFS_IFORK_PTR(bma->ip, XFS_DATA_FORK); 416 ifp = XFS_IFORK_PTR(bma->ip, XFS_DATA_FORK);
417 417
418 ASSERT(bma->idx >= 0); 418 ASSERT(bma->idx >= 0);
419 ASSERT(bma->idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec)); 419 ASSERT(bma->idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
420 ASSERT(!isnullstartblock(new->br_startblock)); 420 ASSERT(!isnullstartblock(new->br_startblock));
421 ASSERT(!bma->cur || 421 ASSERT(!bma->cur ||
422 (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL)); 422 (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
423 423
424 XFS_STATS_INC(xs_add_exlist); 424 XFS_STATS_INC(xs_add_exlist);
425 425
426 #define LEFT r[0] 426 #define LEFT r[0]
427 #define RIGHT r[1] 427 #define RIGHT r[1]
428 #define PREV r[2] 428 #define PREV r[2]
429 429
430 /* 430 /*
431 * Set up a bunch of variables to make the tests simpler. 431 * Set up a bunch of variables to make the tests simpler.
432 */ 432 */
433 ep = xfs_iext_get_ext(ifp, bma->idx); 433 ep = xfs_iext_get_ext(ifp, bma->idx);
434 xfs_bmbt_get_all(ep, &PREV); 434 xfs_bmbt_get_all(ep, &PREV);
435 new_endoff = new->br_startoff + new->br_blockcount; 435 new_endoff = new->br_startoff + new->br_blockcount;
436 ASSERT(PREV.br_startoff <= new->br_startoff); 436 ASSERT(PREV.br_startoff <= new->br_startoff);
437 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); 437 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
438 438
439 da_old = startblockval(PREV.br_startblock); 439 da_old = startblockval(PREV.br_startblock);
440 da_new = 0; 440 da_new = 0;
441 441
442 /* 442 /*
443 * Set flags determining what part of the previous delayed allocation 443 * Set flags determining what part of the previous delayed allocation
444 * extent is being replaced by a real allocation. 444 * extent is being replaced by a real allocation.
445 */ 445 */
446 if (PREV.br_startoff == new->br_startoff) 446 if (PREV.br_startoff == new->br_startoff)
447 state |= BMAP_LEFT_FILLING; 447 state |= BMAP_LEFT_FILLING;
448 if (PREV.br_startoff + PREV.br_blockcount == new_endoff) 448 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
449 state |= BMAP_RIGHT_FILLING; 449 state |= BMAP_RIGHT_FILLING;
450 450
451 /* 451 /*
452 * Check and set flags if this segment has a left neighbor. 452 * Check and set flags if this segment has a left neighbor.
453 * Don't set contiguous if the combined extent would be too large. 453 * Don't set contiguous if the combined extent would be too large.
454 */ 454 */
455 if (bma->idx > 0) { 455 if (bma->idx > 0) {
456 state |= BMAP_LEFT_VALID; 456 state |= BMAP_LEFT_VALID;
457 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &LEFT); 457 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &LEFT);
458 458
459 if (isnullstartblock(LEFT.br_startblock)) 459 if (isnullstartblock(LEFT.br_startblock))
460 state |= BMAP_LEFT_DELAY; 460 state |= BMAP_LEFT_DELAY;
461 } 461 }
462 462
463 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 463 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
464 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && 464 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
465 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && 465 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
466 LEFT.br_state == new->br_state && 466 LEFT.br_state == new->br_state &&
467 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN) 467 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
468 state |= BMAP_LEFT_CONTIG; 468 state |= BMAP_LEFT_CONTIG;
469 469
470 /* 470 /*
471 * Check and set flags if this segment has a right neighbor. 471 * Check and set flags if this segment has a right neighbor.
472 * Don't set contiguous if the combined extent would be too large. 472 * Don't set contiguous if the combined extent would be too large.
473 * Also check for all-three-contiguous being too large. 473 * Also check for all-three-contiguous being too large.
474 */ 474 */
475 if (bma->idx < bma->ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) { 475 if (bma->idx < bma->ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
476 state |= BMAP_RIGHT_VALID; 476 state |= BMAP_RIGHT_VALID;
477 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx + 1), &RIGHT); 477 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx + 1), &RIGHT);
478 478
479 if (isnullstartblock(RIGHT.br_startblock)) 479 if (isnullstartblock(RIGHT.br_startblock))
480 state |= BMAP_RIGHT_DELAY; 480 state |= BMAP_RIGHT_DELAY;
481 } 481 }
482 482
483 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 483 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
484 new_endoff == RIGHT.br_startoff && 484 new_endoff == RIGHT.br_startoff &&
485 new->br_startblock + new->br_blockcount == RIGHT.br_startblock && 485 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
486 new->br_state == RIGHT.br_state && 486 new->br_state == RIGHT.br_state &&
487 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN && 487 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
488 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 488 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
489 BMAP_RIGHT_FILLING)) != 489 BMAP_RIGHT_FILLING)) !=
490 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 490 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
491 BMAP_RIGHT_FILLING) || 491 BMAP_RIGHT_FILLING) ||
492 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount 492 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
493 <= MAXEXTLEN)) 493 <= MAXEXTLEN))
494 state |= BMAP_RIGHT_CONTIG; 494 state |= BMAP_RIGHT_CONTIG;
495 495
496 error = 0; 496 error = 0;
497 /* 497 /*
498 * Switch out based on the FILLING and CONTIG state bits. 498 * Switch out based on the FILLING and CONTIG state bits.
499 */ 499 */
500 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 500 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
501 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) { 501 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
502 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 502 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
503 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 503 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
504 /* 504 /*
505 * Filling in all of a previously delayed allocation extent. 505 * Filling in all of a previously delayed allocation extent.
506 * The left and right neighbors are both contiguous with new. 506 * The left and right neighbors are both contiguous with new.
507 */ 507 */
508 bma->idx--; 508 bma->idx--;
509 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); 509 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
510 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx), 510 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
511 LEFT.br_blockcount + PREV.br_blockcount + 511 LEFT.br_blockcount + PREV.br_blockcount +
512 RIGHT.br_blockcount); 512 RIGHT.br_blockcount);
513 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); 513 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
514 514
515 xfs_iext_remove(bma->ip, bma->idx + 1, 2, state); 515 xfs_iext_remove(bma->ip, bma->idx + 1, 2, state);
516 bma->ip->i_d.di_nextents--; 516 bma->ip->i_d.di_nextents--;
517 if (bma->cur == NULL) 517 if (bma->cur == NULL)
518 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 518 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
519 else { 519 else {
520 rval = XFS_ILOG_CORE; 520 rval = XFS_ILOG_CORE;
521 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff, 521 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
522 RIGHT.br_startblock, 522 RIGHT.br_startblock,
523 RIGHT.br_blockcount, &i); 523 RIGHT.br_blockcount, &i);
524 if (error) 524 if (error)
525 goto done; 525 goto done;
526 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 526 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
527 error = xfs_btree_delete(bma->cur, &i); 527 error = xfs_btree_delete(bma->cur, &i);
528 if (error) 528 if (error)
529 goto done; 529 goto done;
530 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 530 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
531 error = xfs_btree_decrement(bma->cur, 0, &i); 531 error = xfs_btree_decrement(bma->cur, 0, &i);
532 if (error) 532 if (error)
533 goto done; 533 goto done;
534 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 534 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
535 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff, 535 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
536 LEFT.br_startblock, 536 LEFT.br_startblock,
537 LEFT.br_blockcount + 537 LEFT.br_blockcount +
538 PREV.br_blockcount + 538 PREV.br_blockcount +
539 RIGHT.br_blockcount, LEFT.br_state); 539 RIGHT.br_blockcount, LEFT.br_state);
540 if (error) 540 if (error)
541 goto done; 541 goto done;
542 } 542 }
543 break; 543 break;
544 544
545 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 545 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
546 /* 546 /*
547 * Filling in all of a previously delayed allocation extent. 547 * Filling in all of a previously delayed allocation extent.
548 * The left neighbor is contiguous, the right is not. 548 * The left neighbor is contiguous, the right is not.
549 */ 549 */
550 bma->idx--; 550 bma->idx--;
551 551
552 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); 552 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
553 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx), 553 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
554 LEFT.br_blockcount + PREV.br_blockcount); 554 LEFT.br_blockcount + PREV.br_blockcount);
555 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); 555 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
556 556
557 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state); 557 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
558 if (bma->cur == NULL) 558 if (bma->cur == NULL)
559 rval = XFS_ILOG_DEXT; 559 rval = XFS_ILOG_DEXT;
560 else { 560 else {
561 rval = 0; 561 rval = 0;
562 error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff, 562 error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff,
563 LEFT.br_startblock, LEFT.br_blockcount, 563 LEFT.br_startblock, LEFT.br_blockcount,
564 &i); 564 &i);
565 if (error) 565 if (error)
566 goto done; 566 goto done;
567 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 567 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
568 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff, 568 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
569 LEFT.br_startblock, 569 LEFT.br_startblock,
570 LEFT.br_blockcount + 570 LEFT.br_blockcount +
571 PREV.br_blockcount, LEFT.br_state); 571 PREV.br_blockcount, LEFT.br_state);
572 if (error) 572 if (error)
573 goto done; 573 goto done;
574 } 574 }
575 break; 575 break;
576 576
577 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 577 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
578 /* 578 /*
579 * Filling in all of a previously delayed allocation extent. 579 * Filling in all of a previously delayed allocation extent.
580 * The right neighbor is contiguous, the left is not. 580 * The right neighbor is contiguous, the left is not.
581 */ 581 */
582 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); 582 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
583 xfs_bmbt_set_startblock(ep, new->br_startblock); 583 xfs_bmbt_set_startblock(ep, new->br_startblock);
584 xfs_bmbt_set_blockcount(ep, 584 xfs_bmbt_set_blockcount(ep,
585 PREV.br_blockcount + RIGHT.br_blockcount); 585 PREV.br_blockcount + RIGHT.br_blockcount);
586 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); 586 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
587 587
588 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state); 588 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
589 if (bma->cur == NULL) 589 if (bma->cur == NULL)
590 rval = XFS_ILOG_DEXT; 590 rval = XFS_ILOG_DEXT;
591 else { 591 else {
592 rval = 0; 592 rval = 0;
593 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff, 593 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
594 RIGHT.br_startblock, 594 RIGHT.br_startblock,
595 RIGHT.br_blockcount, &i); 595 RIGHT.br_blockcount, &i);
596 if (error) 596 if (error)
597 goto done; 597 goto done;
598 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 598 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
599 error = xfs_bmbt_update(bma->cur, PREV.br_startoff, 599 error = xfs_bmbt_update(bma->cur, PREV.br_startoff,
600 new->br_startblock, 600 new->br_startblock,
601 PREV.br_blockcount + 601 PREV.br_blockcount +
602 RIGHT.br_blockcount, PREV.br_state); 602 RIGHT.br_blockcount, PREV.br_state);
603 if (error) 603 if (error)
604 goto done; 604 goto done;
605 } 605 }
606 break; 606 break;
607 607
608 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 608 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
609 /* 609 /*
610 * Filling in all of a previously delayed allocation extent. 610 * Filling in all of a previously delayed allocation extent.
611 * Neither the left nor right neighbors are contiguous with 611 * Neither the left nor right neighbors are contiguous with
612 * the new one. 612 * the new one.
613 */ 613 */
614 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); 614 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
615 xfs_bmbt_set_startblock(ep, new->br_startblock); 615 xfs_bmbt_set_startblock(ep, new->br_startblock);
616 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); 616 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
617 617
618 bma->ip->i_d.di_nextents++; 618 bma->ip->i_d.di_nextents++;
619 if (bma->cur == NULL) 619 if (bma->cur == NULL)
620 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 620 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
621 else { 621 else {
622 rval = XFS_ILOG_CORE; 622 rval = XFS_ILOG_CORE;
623 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff, 623 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
624 new->br_startblock, new->br_blockcount, 624 new->br_startblock, new->br_blockcount,
625 &i); 625 &i);
626 if (error) 626 if (error)
627 goto done; 627 goto done;
628 XFS_WANT_CORRUPTED_GOTO(i == 0, done); 628 XFS_WANT_CORRUPTED_GOTO(i == 0, done);
629 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM; 629 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
630 error = xfs_btree_insert(bma->cur, &i); 630 error = xfs_btree_insert(bma->cur, &i);
631 if (error) 631 if (error)
632 goto done; 632 goto done;
633 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 633 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
634 } 634 }
635 break; 635 break;
636 636
637 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG: 637 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
638 /* 638 /*
639 * Filling in the first part of a previous delayed allocation. 639 * Filling in the first part of a previous delayed allocation.
640 * The left neighbor is contiguous. 640 * The left neighbor is contiguous.
641 */ 641 */
642 trace_xfs_bmap_pre_update(bma->ip, bma->idx - 1, state, _THIS_IP_); 642 trace_xfs_bmap_pre_update(bma->ip, bma->idx - 1, state, _THIS_IP_);
643 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx - 1), 643 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx - 1),
644 LEFT.br_blockcount + new->br_blockcount); 644 LEFT.br_blockcount + new->br_blockcount);
645 xfs_bmbt_set_startoff(ep, 645 xfs_bmbt_set_startoff(ep,
646 PREV.br_startoff + new->br_blockcount); 646 PREV.br_startoff + new->br_blockcount);
647 trace_xfs_bmap_post_update(bma->ip, bma->idx - 1, state, _THIS_IP_); 647 trace_xfs_bmap_post_update(bma->ip, bma->idx - 1, state, _THIS_IP_);
648 648
649 temp = PREV.br_blockcount - new->br_blockcount; 649 temp = PREV.br_blockcount - new->br_blockcount;
650 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); 650 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
651 xfs_bmbt_set_blockcount(ep, temp); 651 xfs_bmbt_set_blockcount(ep, temp);
652 if (bma->cur == NULL) 652 if (bma->cur == NULL)
653 rval = XFS_ILOG_DEXT; 653 rval = XFS_ILOG_DEXT;
654 else { 654 else {
655 rval = 0; 655 rval = 0;
656 error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff, 656 error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff,
657 LEFT.br_startblock, LEFT.br_blockcount, 657 LEFT.br_startblock, LEFT.br_blockcount,
658 &i); 658 &i);
659 if (error) 659 if (error)
660 goto done; 660 goto done;
661 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 661 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
662 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff, 662 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
663 LEFT.br_startblock, 663 LEFT.br_startblock,
664 LEFT.br_blockcount + 664 LEFT.br_blockcount +
665 new->br_blockcount, 665 new->br_blockcount,
666 LEFT.br_state); 666 LEFT.br_state);
667 if (error) 667 if (error)
668 goto done; 668 goto done;
669 } 669 }
670 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 670 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
671 startblockval(PREV.br_startblock)); 671 startblockval(PREV.br_startblock));
672 xfs_bmbt_set_startblock(ep, nullstartblock(da_new)); 672 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
673 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); 673 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
674 674
675 bma->idx--; 675 bma->idx--;
676 break; 676 break;
677 677
678 case BMAP_LEFT_FILLING: 678 case BMAP_LEFT_FILLING:
679 /* 679 /*
680 * Filling in the first part of a previous delayed allocation. 680 * Filling in the first part of a previous delayed allocation.
681 * The left neighbor is not contiguous. 681 * The left neighbor is not contiguous.
682 */ 682 */
683 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); 683 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
684 xfs_bmbt_set_startoff(ep, new_endoff); 684 xfs_bmbt_set_startoff(ep, new_endoff);
685 temp = PREV.br_blockcount - new->br_blockcount; 685 temp = PREV.br_blockcount - new->br_blockcount;
686 xfs_bmbt_set_blockcount(ep, temp); 686 xfs_bmbt_set_blockcount(ep, temp);
687 xfs_iext_insert(bma->ip, bma->idx, 1, new, state); 687 xfs_iext_insert(bma->ip, bma->idx, 1, new, state);
688 bma->ip->i_d.di_nextents++; 688 bma->ip->i_d.di_nextents++;
689 if (bma->cur == NULL) 689 if (bma->cur == NULL)
690 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 690 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
691 else { 691 else {
692 rval = XFS_ILOG_CORE; 692 rval = XFS_ILOG_CORE;
693 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff, 693 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
694 new->br_startblock, new->br_blockcount, 694 new->br_startblock, new->br_blockcount,
695 &i); 695 &i);
696 if (error) 696 if (error)
697 goto done; 697 goto done;
698 XFS_WANT_CORRUPTED_GOTO(i == 0, done); 698 XFS_WANT_CORRUPTED_GOTO(i == 0, done);
699 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM; 699 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
700 error = xfs_btree_insert(bma->cur, &i); 700 error = xfs_btree_insert(bma->cur, &i);
701 if (error) 701 if (error)
702 goto done; 702 goto done;
703 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 703 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
704 } 704 }
705 705
706 if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) { 706 if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) {
707 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 707 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
708 bma->firstblock, bma->flist, 708 bma->firstblock, bma->flist,
709 &bma->cur, 1, &tmp_rval, XFS_DATA_FORK); 709 &bma->cur, 1, &tmp_rval, XFS_DATA_FORK);
710 rval |= tmp_rval; 710 rval |= tmp_rval;
711 if (error) 711 if (error)
712 goto done; 712 goto done;
713 } 713 }
714 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 714 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
715 startblockval(PREV.br_startblock) - 715 startblockval(PREV.br_startblock) -
716 (bma->cur ? bma->cur->bc_private.b.allocated : 0)); 716 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
717 ep = xfs_iext_get_ext(ifp, bma->idx + 1); 717 ep = xfs_iext_get_ext(ifp, bma->idx + 1);
718 xfs_bmbt_set_startblock(ep, nullstartblock(da_new)); 718 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
719 trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_); 719 trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
720 break; 720 break;
721 721
722 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 722 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
723 /* 723 /*
724 * Filling in the last part of a previous delayed allocation. 724 * Filling in the last part of a previous delayed allocation.
725 * The right neighbor is contiguous with the new allocation. 725 * The right neighbor is contiguous with the new allocation.
726 */ 726 */
727 temp = PREV.br_blockcount - new->br_blockcount; 727 temp = PREV.br_blockcount - new->br_blockcount;
728 trace_xfs_bmap_pre_update(bma->ip, bma->idx + 1, state, _THIS_IP_); 728 trace_xfs_bmap_pre_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
729 xfs_bmbt_set_blockcount(ep, temp); 729 xfs_bmbt_set_blockcount(ep, temp);
730 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx + 1), 730 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx + 1),
731 new->br_startoff, new->br_startblock, 731 new->br_startoff, new->br_startblock,
732 new->br_blockcount + RIGHT.br_blockcount, 732 new->br_blockcount + RIGHT.br_blockcount,
733 RIGHT.br_state); 733 RIGHT.br_state);
734 trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_); 734 trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
735 if (bma->cur == NULL) 735 if (bma->cur == NULL)
736 rval = XFS_ILOG_DEXT; 736 rval = XFS_ILOG_DEXT;
737 else { 737 else {
738 rval = 0; 738 rval = 0;
739 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff, 739 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
740 RIGHT.br_startblock, 740 RIGHT.br_startblock,
741 RIGHT.br_blockcount, &i); 741 RIGHT.br_blockcount, &i);
742 if (error) 742 if (error)
743 goto done; 743 goto done;
744 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 744 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
745 error = xfs_bmbt_update(bma->cur, new->br_startoff, 745 error = xfs_bmbt_update(bma->cur, new->br_startoff,
746 new->br_startblock, 746 new->br_startblock,
747 new->br_blockcount + 747 new->br_blockcount +
748 RIGHT.br_blockcount, 748 RIGHT.br_blockcount,
749 RIGHT.br_state); 749 RIGHT.br_state);
750 if (error) 750 if (error)
751 goto done; 751 goto done;
752 } 752 }
753 753
754 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 754 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
755 startblockval(PREV.br_startblock)); 755 startblockval(PREV.br_startblock));
756 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); 756 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
757 xfs_bmbt_set_startblock(ep, nullstartblock(da_new)); 757 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
758 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); 758 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
759 759
760 bma->idx++; 760 bma->idx++;
761 break; 761 break;
762 762
763 case BMAP_RIGHT_FILLING: 763 case BMAP_RIGHT_FILLING:
764 /* 764 /*
765 * Filling in the last part of a previous delayed allocation. 765 * Filling in the last part of a previous delayed allocation.
766 * The right neighbor is not contiguous. 766 * The right neighbor is not contiguous.
767 */ 767 */
768 temp = PREV.br_blockcount - new->br_blockcount; 768 temp = PREV.br_blockcount - new->br_blockcount;
769 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); 769 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
770 xfs_bmbt_set_blockcount(ep, temp); 770 xfs_bmbt_set_blockcount(ep, temp);
771 xfs_iext_insert(bma->ip, bma->idx + 1, 1, new, state); 771 xfs_iext_insert(bma->ip, bma->idx + 1, 1, new, state);
772 bma->ip->i_d.di_nextents++; 772 bma->ip->i_d.di_nextents++;
773 if (bma->cur == NULL) 773 if (bma->cur == NULL)
774 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 774 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
775 else { 775 else {
776 rval = XFS_ILOG_CORE; 776 rval = XFS_ILOG_CORE;
777 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff, 777 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
778 new->br_startblock, new->br_blockcount, 778 new->br_startblock, new->br_blockcount,
779 &i); 779 &i);
780 if (error) 780 if (error)
781 goto done; 781 goto done;
782 XFS_WANT_CORRUPTED_GOTO(i == 0, done); 782 XFS_WANT_CORRUPTED_GOTO(i == 0, done);
783 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM; 783 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
784 error = xfs_btree_insert(bma->cur, &i); 784 error = xfs_btree_insert(bma->cur, &i);
785 if (error) 785 if (error)
786 goto done; 786 goto done;
787 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 787 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
788 } 788 }
789 789
790 if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) { 790 if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) {
791 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 791 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
792 bma->firstblock, bma->flist, &bma->cur, 1, 792 bma->firstblock, bma->flist, &bma->cur, 1,
793 &tmp_rval, XFS_DATA_FORK); 793 &tmp_rval, XFS_DATA_FORK);
794 rval |= tmp_rval; 794 rval |= tmp_rval;
795 if (error) 795 if (error)
796 goto done; 796 goto done;
797 } 797 }
798 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 798 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
799 startblockval(PREV.br_startblock) - 799 startblockval(PREV.br_startblock) -
800 (bma->cur ? bma->cur->bc_private.b.allocated : 0)); 800 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
801 ep = xfs_iext_get_ext(ifp, bma->idx); 801 ep = xfs_iext_get_ext(ifp, bma->idx);
802 xfs_bmbt_set_startblock(ep, nullstartblock(da_new)); 802 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
803 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); 803 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
804 804
805 bma->idx++; 805 bma->idx++;
806 break; 806 break;
807 807
808 case 0: 808 case 0:
809 /* 809 /*
810 * Filling in the middle part of a previous delayed allocation. 810 * Filling in the middle part of a previous delayed allocation.
811 * Contiguity is impossible here. 811 * Contiguity is impossible here.
812 * This case is avoided almost all the time. 812 * This case is avoided almost all the time.
813 * 813 *
814 * We start with a delayed allocation: 814 * We start with a delayed allocation:
815 * 815 *
816 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+ 816 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
817 * PREV @ idx 817 * PREV @ idx
818 * 818 *
819 * and we are allocating: 819 * and we are allocating:
820 * +rrrrrrrrrrrrrrrrr+ 820 * +rrrrrrrrrrrrrrrrr+
821 * new 821 * new
822 * 822 *
823 * and we set it up for insertion as: 823 * and we set it up for insertion as:
824 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+ 824 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
825 * new 825 * new
826 * PREV @ idx LEFT RIGHT 826 * PREV @ idx LEFT RIGHT
827 * inserted at idx + 1 827 * inserted at idx + 1
828 */ 828 */
829 temp = new->br_startoff - PREV.br_startoff; 829 temp = new->br_startoff - PREV.br_startoff;
830 temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff; 830 temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff;
831 trace_xfs_bmap_pre_update(bma->ip, bma->idx, 0, _THIS_IP_); 831 trace_xfs_bmap_pre_update(bma->ip, bma->idx, 0, _THIS_IP_);
832 xfs_bmbt_set_blockcount(ep, temp); /* truncate PREV */ 832 xfs_bmbt_set_blockcount(ep, temp); /* truncate PREV */
833 LEFT = *new; 833 LEFT = *new;
834 RIGHT.br_state = PREV.br_state; 834 RIGHT.br_state = PREV.br_state;
835 RIGHT.br_startblock = nullstartblock( 835 RIGHT.br_startblock = nullstartblock(
836 (int)xfs_bmap_worst_indlen(bma->ip, temp2)); 836 (int)xfs_bmap_worst_indlen(bma->ip, temp2));
837 RIGHT.br_startoff = new_endoff; 837 RIGHT.br_startoff = new_endoff;
838 RIGHT.br_blockcount = temp2; 838 RIGHT.br_blockcount = temp2;
839 /* insert LEFT (r[0]) and RIGHT (r[1]) at the same time */ 839 /* insert LEFT (r[0]) and RIGHT (r[1]) at the same time */
840 xfs_iext_insert(bma->ip, bma->idx + 1, 2, &LEFT, state); 840 xfs_iext_insert(bma->ip, bma->idx + 1, 2, &LEFT, state);
841 bma->ip->i_d.di_nextents++; 841 bma->ip->i_d.di_nextents++;
842 if (bma->cur == NULL) 842 if (bma->cur == NULL)
843 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 843 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
844 else { 844 else {
845 rval = XFS_ILOG_CORE; 845 rval = XFS_ILOG_CORE;
846 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff, 846 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
847 new->br_startblock, new->br_blockcount, 847 new->br_startblock, new->br_blockcount,
848 &i); 848 &i);
849 if (error) 849 if (error)
850 goto done; 850 goto done;
851 XFS_WANT_CORRUPTED_GOTO(i == 0, done); 851 XFS_WANT_CORRUPTED_GOTO(i == 0, done);
852 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM; 852 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
853 error = xfs_btree_insert(bma->cur, &i); 853 error = xfs_btree_insert(bma->cur, &i);
854 if (error) 854 if (error)
855 goto done; 855 goto done;
856 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 856 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
857 } 857 }
858 858
859 if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) { 859 if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) {
860 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 860 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
861 bma->firstblock, bma->flist, &bma->cur, 861 bma->firstblock, bma->flist, &bma->cur,
862 1, &tmp_rval, XFS_DATA_FORK); 862 1, &tmp_rval, XFS_DATA_FORK);
863 rval |= tmp_rval; 863 rval |= tmp_rval;
864 if (error) 864 if (error)
865 goto done; 865 goto done;
866 } 866 }
867 temp = xfs_bmap_worst_indlen(bma->ip, temp); 867 temp = xfs_bmap_worst_indlen(bma->ip, temp);
868 temp2 = xfs_bmap_worst_indlen(bma->ip, temp2); 868 temp2 = xfs_bmap_worst_indlen(bma->ip, temp2);
869 diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) - 869 diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) -
870 (bma->cur ? bma->cur->bc_private.b.allocated : 0)); 870 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
871 if (diff > 0) { 871 if (diff > 0) {
872 error = xfs_icsb_modify_counters(bma->ip->i_mount, 872 error = xfs_icsb_modify_counters(bma->ip->i_mount,
873 XFS_SBS_FDBLOCKS, 873 XFS_SBS_FDBLOCKS,
874 -((int64_t)diff), 0); 874 -((int64_t)diff), 0);
875 ASSERT(!error); 875 ASSERT(!error);
876 if (error) 876 if (error)
877 goto done; 877 goto done;
878 } 878 }
879 879
880 ep = xfs_iext_get_ext(ifp, bma->idx); 880 ep = xfs_iext_get_ext(ifp, bma->idx);
881 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 881 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
882 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); 882 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
883 trace_xfs_bmap_pre_update(bma->ip, bma->idx + 2, state, _THIS_IP_); 883 trace_xfs_bmap_pre_update(bma->ip, bma->idx + 2, state, _THIS_IP_);
884 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, bma->idx + 2), 884 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, bma->idx + 2),
885 nullstartblock((int)temp2)); 885 nullstartblock((int)temp2));
886 trace_xfs_bmap_post_update(bma->ip, bma->idx + 2, state, _THIS_IP_); 886 trace_xfs_bmap_post_update(bma->ip, bma->idx + 2, state, _THIS_IP_);
887 887
888 bma->idx++; 888 bma->idx++;
889 da_new = temp + temp2; 889 da_new = temp + temp2;
890 break; 890 break;
891 891
892 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 892 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
893 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 893 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
894 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG: 894 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
895 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 895 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
896 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 896 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
897 case BMAP_LEFT_CONTIG: 897 case BMAP_LEFT_CONTIG:
898 case BMAP_RIGHT_CONTIG: 898 case BMAP_RIGHT_CONTIG:
899 /* 899 /*
900 * These cases are all impossible. 900 * These cases are all impossible.
901 */ 901 */
902 ASSERT(0); 902 ASSERT(0);
903 } 903 }
904 904
905 /* convert to a btree if necessary */ 905 /* convert to a btree if necessary */
906 if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) { 906 if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) {
907 int tmp_logflags; /* partial log flag return val */ 907 int tmp_logflags; /* partial log flag return val */
908 908
909 ASSERT(bma->cur == NULL); 909 ASSERT(bma->cur == NULL);
910 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 910 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
911 bma->firstblock, bma->flist, &bma->cur, 911 bma->firstblock, bma->flist, &bma->cur,
912 da_old > 0, &tmp_logflags, XFS_DATA_FORK); 912 da_old > 0, &tmp_logflags, XFS_DATA_FORK);
913 bma->logflags |= tmp_logflags; 913 bma->logflags |= tmp_logflags;
914 if (error) 914 if (error)
915 goto done; 915 goto done;
916 } 916 }
917 917
918 /* adjust for changes in reserved delayed indirect blocks */ 918 /* adjust for changes in reserved delayed indirect blocks */
919 if (da_old || da_new) { 919 if (da_old || da_new) {
920 temp = da_new; 920 temp = da_new;
921 if (bma->cur) 921 if (bma->cur)
922 temp += bma->cur->bc_private.b.allocated; 922 temp += bma->cur->bc_private.b.allocated;
923 ASSERT(temp <= da_old); 923 ASSERT(temp <= da_old);
924 if (temp < da_old) 924 if (temp < da_old)
925 xfs_icsb_modify_counters(bma->ip->i_mount, 925 xfs_icsb_modify_counters(bma->ip->i_mount,
926 XFS_SBS_FDBLOCKS, 926 XFS_SBS_FDBLOCKS,
927 (int64_t)(da_old - temp), 0); 927 (int64_t)(da_old - temp), 0);
928 } 928 }
929 929
930 /* clear out the allocated field, done with it now in any case. */ 930 /* clear out the allocated field, done with it now in any case. */
931 if (bma->cur) 931 if (bma->cur)
932 bma->cur->bc_private.b.allocated = 0; 932 bma->cur->bc_private.b.allocated = 0;
933 933
934 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, XFS_DATA_FORK); 934 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, XFS_DATA_FORK);
935 done: 935 done:
936 bma->logflags |= rval; 936 bma->logflags |= rval;
937 return error; 937 return error;
938 #undef LEFT 938 #undef LEFT
939 #undef RIGHT 939 #undef RIGHT
940 #undef PREV 940 #undef PREV
941 } 941 }
942 942
943 /* 943 /*
944 * Convert an unwritten allocation to a real allocation or vice versa. 944 * Convert an unwritten allocation to a real allocation or vice versa.
945 */ 945 */
946 STATIC int /* error */ 946 STATIC int /* error */
947 xfs_bmap_add_extent_unwritten_real( 947 xfs_bmap_add_extent_unwritten_real(
948 struct xfs_trans *tp, 948 struct xfs_trans *tp,
949 xfs_inode_t *ip, /* incore inode pointer */ 949 xfs_inode_t *ip, /* incore inode pointer */
950 xfs_extnum_t *idx, /* extent number to update/insert */ 950 xfs_extnum_t *idx, /* extent number to update/insert */
951 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ 951 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
952 xfs_bmbt_irec_t *new, /* new data to add to file extents */ 952 xfs_bmbt_irec_t *new, /* new data to add to file extents */
953 xfs_fsblock_t *first, /* pointer to firstblock variable */ 953 xfs_fsblock_t *first, /* pointer to firstblock variable */
954 xfs_bmap_free_t *flist, /* list of extents to be freed */ 954 xfs_bmap_free_t *flist, /* list of extents to be freed */
955 int *logflagsp) /* inode logging flags */ 955 int *logflagsp) /* inode logging flags */
956 { 956 {
957 xfs_btree_cur_t *cur; /* btree cursor */ 957 xfs_btree_cur_t *cur; /* btree cursor */
958 xfs_bmbt_rec_host_t *ep; /* extent entry for idx */ 958 xfs_bmbt_rec_host_t *ep; /* extent entry for idx */
959 int error; /* error return value */ 959 int error; /* error return value */
960 int i; /* temp state */ 960 int i; /* temp state */
961 xfs_ifork_t *ifp; /* inode fork pointer */ 961 xfs_ifork_t *ifp; /* inode fork pointer */
962 xfs_fileoff_t new_endoff; /* end offset of new entry */ 962 xfs_fileoff_t new_endoff; /* end offset of new entry */
963 xfs_exntst_t newext; /* new extent state */ 963 xfs_exntst_t newext; /* new extent state */
964 xfs_exntst_t oldext; /* old extent state */ 964 xfs_exntst_t oldext; /* old extent state */
965 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ 965 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
966 /* left is 0, right is 1, prev is 2 */ 966 /* left is 0, right is 1, prev is 2 */
967 int rval=0; /* return value (logging flags) */ 967 int rval=0; /* return value (logging flags) */
968 int state = 0;/* state bits, accessed thru macros */ 968 int state = 0;/* state bits, accessed thru macros */
969 969
970 *logflagsp = 0; 970 *logflagsp = 0;
971 971
972 cur = *curp; 972 cur = *curp;
973 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); 973 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
974 974
975 ASSERT(*idx >= 0); 975 ASSERT(*idx >= 0);
976 ASSERT(*idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec)); 976 ASSERT(*idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
977 ASSERT(!isnullstartblock(new->br_startblock)); 977 ASSERT(!isnullstartblock(new->br_startblock));
978 978
979 XFS_STATS_INC(xs_add_exlist); 979 XFS_STATS_INC(xs_add_exlist);
980 980
981 #define LEFT r[0] 981 #define LEFT r[0]
982 #define RIGHT r[1] 982 #define RIGHT r[1]
983 #define PREV r[2] 983 #define PREV r[2]
984 984
985 /* 985 /*
986 * Set up a bunch of variables to make the tests simpler. 986 * Set up a bunch of variables to make the tests simpler.
987 */ 987 */
988 error = 0; 988 error = 0;
989 ep = xfs_iext_get_ext(ifp, *idx); 989 ep = xfs_iext_get_ext(ifp, *idx);
990 xfs_bmbt_get_all(ep, &PREV); 990 xfs_bmbt_get_all(ep, &PREV);
991 newext = new->br_state; 991 newext = new->br_state;
992 oldext = (newext == XFS_EXT_UNWRITTEN) ? 992 oldext = (newext == XFS_EXT_UNWRITTEN) ?
993 XFS_EXT_NORM : XFS_EXT_UNWRITTEN; 993 XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
994 ASSERT(PREV.br_state == oldext); 994 ASSERT(PREV.br_state == oldext);
995 new_endoff = new->br_startoff + new->br_blockcount; 995 new_endoff = new->br_startoff + new->br_blockcount;
996 ASSERT(PREV.br_startoff <= new->br_startoff); 996 ASSERT(PREV.br_startoff <= new->br_startoff);
997 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); 997 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
998 998
999 /* 999 /*
1000 * Set flags determining what part of the previous oldext allocation 1000 * Set flags determining what part of the previous oldext allocation
1001 * extent is being replaced by a newext allocation. 1001 * extent is being replaced by a newext allocation.
1002 */ 1002 */
1003 if (PREV.br_startoff == new->br_startoff) 1003 if (PREV.br_startoff == new->br_startoff)
1004 state |= BMAP_LEFT_FILLING; 1004 state |= BMAP_LEFT_FILLING;
1005 if (PREV.br_startoff + PREV.br_blockcount == new_endoff) 1005 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
1006 state |= BMAP_RIGHT_FILLING; 1006 state |= BMAP_RIGHT_FILLING;
1007 1007
1008 /* 1008 /*
1009 * Check and set flags if this segment has a left neighbor. 1009 * Check and set flags if this segment has a left neighbor.
1010 * Don't set contiguous if the combined extent would be too large. 1010 * Don't set contiguous if the combined extent would be too large.
1011 */ 1011 */
1012 if (*idx > 0) { 1012 if (*idx > 0) {
1013 state |= BMAP_LEFT_VALID; 1013 state |= BMAP_LEFT_VALID;
1014 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &LEFT); 1014 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &LEFT);
1015 1015
1016 if (isnullstartblock(LEFT.br_startblock)) 1016 if (isnullstartblock(LEFT.br_startblock))
1017 state |= BMAP_LEFT_DELAY; 1017 state |= BMAP_LEFT_DELAY;
1018 } 1018 }
1019 1019
1020 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 1020 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1021 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && 1021 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1022 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && 1022 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1023 LEFT.br_state == newext && 1023 LEFT.br_state == newext &&
1024 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN) 1024 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1025 state |= BMAP_LEFT_CONTIG; 1025 state |= BMAP_LEFT_CONTIG;
1026 1026
1027 /* 1027 /*
1028 * Check and set flags if this segment has a right neighbor. 1028 * Check and set flags if this segment has a right neighbor.
1029 * Don't set contiguous if the combined extent would be too large. 1029 * Don't set contiguous if the combined extent would be too large.
1030 * Also check for all-three-contiguous being too large. 1030 * Also check for all-three-contiguous being too large.
1031 */ 1031 */
1032 if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) { 1032 if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
1033 state |= BMAP_RIGHT_VALID; 1033 state |= BMAP_RIGHT_VALID;
1034 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx + 1), &RIGHT); 1034 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx + 1), &RIGHT);
1035 if (isnullstartblock(RIGHT.br_startblock)) 1035 if (isnullstartblock(RIGHT.br_startblock))
1036 state |= BMAP_RIGHT_DELAY; 1036 state |= BMAP_RIGHT_DELAY;
1037 } 1037 }
1038 1038
1039 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 1039 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1040 new_endoff == RIGHT.br_startoff && 1040 new_endoff == RIGHT.br_startoff &&
1041 new->br_startblock + new->br_blockcount == RIGHT.br_startblock && 1041 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
1042 newext == RIGHT.br_state && 1042 newext == RIGHT.br_state &&
1043 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN && 1043 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
1044 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 1044 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1045 BMAP_RIGHT_FILLING)) != 1045 BMAP_RIGHT_FILLING)) !=
1046 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 1046 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1047 BMAP_RIGHT_FILLING) || 1047 BMAP_RIGHT_FILLING) ||
1048 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount 1048 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1049 <= MAXEXTLEN)) 1049 <= MAXEXTLEN))
1050 state |= BMAP_RIGHT_CONTIG; 1050 state |= BMAP_RIGHT_CONTIG;
1051 1051
1052 /* 1052 /*
1053 * Switch out based on the FILLING and CONTIG state bits. 1053 * Switch out based on the FILLING and CONTIG state bits.
1054 */ 1054 */
1055 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 1055 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1056 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) { 1056 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
1057 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 1057 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1058 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1058 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1059 /* 1059 /*
1060 * Setting all of a previous oldext extent to newext. 1060 * Setting all of a previous oldext extent to newext.
1061 * The left and right neighbors are both contiguous with new. 1061 * The left and right neighbors are both contiguous with new.
1062 */ 1062 */
1063 --*idx; 1063 --*idx;
1064 1064
1065 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 1065 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1066 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), 1066 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
1067 LEFT.br_blockcount + PREV.br_blockcount + 1067 LEFT.br_blockcount + PREV.br_blockcount +
1068 RIGHT.br_blockcount); 1068 RIGHT.br_blockcount);
1069 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 1069 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1070 1070
1071 xfs_iext_remove(ip, *idx + 1, 2, state); 1071 xfs_iext_remove(ip, *idx + 1, 2, state);
1072 ip->i_d.di_nextents -= 2; 1072 ip->i_d.di_nextents -= 2;
1073 if (cur == NULL) 1073 if (cur == NULL)
1074 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1074 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1075 else { 1075 else {
1076 rval = XFS_ILOG_CORE; 1076 rval = XFS_ILOG_CORE;
1077 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff, 1077 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
1078 RIGHT.br_startblock, 1078 RIGHT.br_startblock,
1079 RIGHT.br_blockcount, &i))) 1079 RIGHT.br_blockcount, &i)))
1080 goto done; 1080 goto done;
1081 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1081 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1082 if ((error = xfs_btree_delete(cur, &i))) 1082 if ((error = xfs_btree_delete(cur, &i)))
1083 goto done; 1083 goto done;
1084 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1084 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1085 if ((error = xfs_btree_decrement(cur, 0, &i))) 1085 if ((error = xfs_btree_decrement(cur, 0, &i)))
1086 goto done; 1086 goto done;
1087 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1087 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1088 if ((error = xfs_btree_delete(cur, &i))) 1088 if ((error = xfs_btree_delete(cur, &i)))
1089 goto done; 1089 goto done;
1090 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1090 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1091 if ((error = xfs_btree_decrement(cur, 0, &i))) 1091 if ((error = xfs_btree_decrement(cur, 0, &i)))
1092 goto done; 1092 goto done;
1093 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1093 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1094 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff, 1094 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
1095 LEFT.br_startblock, 1095 LEFT.br_startblock,
1096 LEFT.br_blockcount + PREV.br_blockcount + 1096 LEFT.br_blockcount + PREV.br_blockcount +
1097 RIGHT.br_blockcount, LEFT.br_state))) 1097 RIGHT.br_blockcount, LEFT.br_state)))
1098 goto done; 1098 goto done;
1099 } 1099 }
1100 break; 1100 break;
1101 1101
1102 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 1102 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1103 /* 1103 /*
1104 * Setting all of a previous oldext extent to newext. 1104 * Setting all of a previous oldext extent to newext.
1105 * The left neighbor is contiguous, the right is not. 1105 * The left neighbor is contiguous, the right is not.
1106 */ 1106 */
1107 --*idx; 1107 --*idx;
1108 1108
1109 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 1109 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1110 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), 1110 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
1111 LEFT.br_blockcount + PREV.br_blockcount); 1111 LEFT.br_blockcount + PREV.br_blockcount);
1112 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 1112 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1113 1113
1114 xfs_iext_remove(ip, *idx + 1, 1, state); 1114 xfs_iext_remove(ip, *idx + 1, 1, state);
1115 ip->i_d.di_nextents--; 1115 ip->i_d.di_nextents--;
1116 if (cur == NULL) 1116 if (cur == NULL)
1117 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1117 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1118 else { 1118 else {
1119 rval = XFS_ILOG_CORE; 1119 rval = XFS_ILOG_CORE;
1120 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, 1120 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1121 PREV.br_startblock, PREV.br_blockcount, 1121 PREV.br_startblock, PREV.br_blockcount,
1122 &i))) 1122 &i)))
1123 goto done; 1123 goto done;
1124 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1124 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1125 if ((error = xfs_btree_delete(cur, &i))) 1125 if ((error = xfs_btree_delete(cur, &i)))
1126 goto done; 1126 goto done;
1127 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1127 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1128 if ((error = xfs_btree_decrement(cur, 0, &i))) 1128 if ((error = xfs_btree_decrement(cur, 0, &i)))
1129 goto done; 1129 goto done;
1130 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1130 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1131 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff, 1131 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
1132 LEFT.br_startblock, 1132 LEFT.br_startblock,
1133 LEFT.br_blockcount + PREV.br_blockcount, 1133 LEFT.br_blockcount + PREV.br_blockcount,
1134 LEFT.br_state))) 1134 LEFT.br_state)))
1135 goto done; 1135 goto done;
1136 } 1136 }
1137 break; 1137 break;
1138 1138
1139 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1139 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1140 /* 1140 /*
1141 * Setting all of a previous oldext extent to newext. 1141 * Setting all of a previous oldext extent to newext.
1142 * The right neighbor is contiguous, the left is not. 1142 * The right neighbor is contiguous, the left is not.
1143 */ 1143 */
1144 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 1144 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1145 xfs_bmbt_set_blockcount(ep, 1145 xfs_bmbt_set_blockcount(ep,
1146 PREV.br_blockcount + RIGHT.br_blockcount); 1146 PREV.br_blockcount + RIGHT.br_blockcount);
1147 xfs_bmbt_set_state(ep, newext); 1147 xfs_bmbt_set_state(ep, newext);
1148 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 1148 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1149 xfs_iext_remove(ip, *idx + 1, 1, state); 1149 xfs_iext_remove(ip, *idx + 1, 1, state);
1150 ip->i_d.di_nextents--; 1150 ip->i_d.di_nextents--;
1151 if (cur == NULL) 1151 if (cur == NULL)
1152 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1152 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1153 else { 1153 else {
1154 rval = XFS_ILOG_CORE; 1154 rval = XFS_ILOG_CORE;
1155 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff, 1155 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
1156 RIGHT.br_startblock, 1156 RIGHT.br_startblock,
1157 RIGHT.br_blockcount, &i))) 1157 RIGHT.br_blockcount, &i)))
1158 goto done; 1158 goto done;
1159 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1159 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1160 if ((error = xfs_btree_delete(cur, &i))) 1160 if ((error = xfs_btree_delete(cur, &i)))
1161 goto done; 1161 goto done;
1162 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1162 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1163 if ((error = xfs_btree_decrement(cur, 0, &i))) 1163 if ((error = xfs_btree_decrement(cur, 0, &i)))
1164 goto done; 1164 goto done;
1165 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1165 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1166 if ((error = xfs_bmbt_update(cur, new->br_startoff, 1166 if ((error = xfs_bmbt_update(cur, new->br_startoff,
1167 new->br_startblock, 1167 new->br_startblock,
1168 new->br_blockcount + RIGHT.br_blockcount, 1168 new->br_blockcount + RIGHT.br_blockcount,
1169 newext))) 1169 newext)))
1170 goto done; 1170 goto done;
1171 } 1171 }
1172 break; 1172 break;
1173 1173
1174 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 1174 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
1175 /* 1175 /*
1176 * Setting all of a previous oldext extent to newext. 1176 * Setting all of a previous oldext extent to newext.
1177 * Neither the left nor right neighbors are contiguous with 1177 * Neither the left nor right neighbors are contiguous with
1178 * the new one. 1178 * the new one.
1179 */ 1179 */
1180 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 1180 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1181 xfs_bmbt_set_state(ep, newext); 1181 xfs_bmbt_set_state(ep, newext);
1182 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 1182 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1183 1183
1184 if (cur == NULL) 1184 if (cur == NULL)
1185 rval = XFS_ILOG_DEXT; 1185 rval = XFS_ILOG_DEXT;
1186 else { 1186 else {
1187 rval = 0; 1187 rval = 0;
1188 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, 1188 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1189 new->br_startblock, new->br_blockcount, 1189 new->br_startblock, new->br_blockcount,
1190 &i))) 1190 &i)))
1191 goto done; 1191 goto done;
1192 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1192 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1193 if ((error = xfs_bmbt_update(cur, new->br_startoff, 1193 if ((error = xfs_bmbt_update(cur, new->br_startoff,
1194 new->br_startblock, new->br_blockcount, 1194 new->br_startblock, new->br_blockcount,
1195 newext))) 1195 newext)))
1196 goto done; 1196 goto done;
1197 } 1197 }
1198 break; 1198 break;
1199 1199
1200 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG: 1200 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
1201 /* 1201 /*
1202 * Setting the first part of a previous oldext extent to newext. 1202 * Setting the first part of a previous oldext extent to newext.
1203 * The left neighbor is contiguous. 1203 * The left neighbor is contiguous.
1204 */ 1204 */
1205 trace_xfs_bmap_pre_update(ip, *idx - 1, state, _THIS_IP_); 1205 trace_xfs_bmap_pre_update(ip, *idx - 1, state, _THIS_IP_);
1206 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx - 1), 1206 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx - 1),
1207 LEFT.br_blockcount + new->br_blockcount); 1207 LEFT.br_blockcount + new->br_blockcount);
1208 xfs_bmbt_set_startoff(ep, 1208 xfs_bmbt_set_startoff(ep,
1209 PREV.br_startoff + new->br_blockcount); 1209 PREV.br_startoff + new->br_blockcount);
1210 trace_xfs_bmap_post_update(ip, *idx - 1, state, _THIS_IP_); 1210 trace_xfs_bmap_post_update(ip, *idx - 1, state, _THIS_IP_);
1211 1211
1212 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 1212 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1213 xfs_bmbt_set_startblock(ep, 1213 xfs_bmbt_set_startblock(ep,
1214 new->br_startblock + new->br_blockcount); 1214 new->br_startblock + new->br_blockcount);
1215 xfs_bmbt_set_blockcount(ep, 1215 xfs_bmbt_set_blockcount(ep,
1216 PREV.br_blockcount - new->br_blockcount); 1216 PREV.br_blockcount - new->br_blockcount);
1217 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 1217 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1218 1218
1219 --*idx; 1219 --*idx;
1220 1220
1221 if (cur == NULL) 1221 if (cur == NULL)
1222 rval = XFS_ILOG_DEXT; 1222 rval = XFS_ILOG_DEXT;
1223 else { 1223 else {
1224 rval = 0; 1224 rval = 0;
1225 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, 1225 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1226 PREV.br_startblock, PREV.br_blockcount, 1226 PREV.br_startblock, PREV.br_blockcount,
1227 &i))) 1227 &i)))
1228 goto done; 1228 goto done;
1229 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1229 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1230 if ((error = xfs_bmbt_update(cur, 1230 if ((error = xfs_bmbt_update(cur,
1231 PREV.br_startoff + new->br_blockcount, 1231 PREV.br_startoff + new->br_blockcount,
1232 PREV.br_startblock + new->br_blockcount, 1232 PREV.br_startblock + new->br_blockcount,
1233 PREV.br_blockcount - new->br_blockcount, 1233 PREV.br_blockcount - new->br_blockcount,
1234 oldext))) 1234 oldext)))
1235 goto done; 1235 goto done;
1236 if ((error = xfs_btree_decrement(cur, 0, &i))) 1236 if ((error = xfs_btree_decrement(cur, 0, &i)))
1237 goto done; 1237 goto done;
1238 error = xfs_bmbt_update(cur, LEFT.br_startoff, 1238 error = xfs_bmbt_update(cur, LEFT.br_startoff,
1239 LEFT.br_startblock, 1239 LEFT.br_startblock,
1240 LEFT.br_blockcount + new->br_blockcount, 1240 LEFT.br_blockcount + new->br_blockcount,
1241 LEFT.br_state); 1241 LEFT.br_state);
1242 if (error) 1242 if (error)
1243 goto done; 1243 goto done;
1244 } 1244 }
1245 break; 1245 break;
1246 1246
1247 case BMAP_LEFT_FILLING: 1247 case BMAP_LEFT_FILLING:
1248 /* 1248 /*
1249 * Setting the first part of a previous oldext extent to newext. 1249 * Setting the first part of a previous oldext extent to newext.
1250 * The left neighbor is not contiguous. 1250 * The left neighbor is not contiguous.
1251 */ 1251 */
1252 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 1252 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1253 ASSERT(ep && xfs_bmbt_get_state(ep) == oldext); 1253 ASSERT(ep && xfs_bmbt_get_state(ep) == oldext);
1254 xfs_bmbt_set_startoff(ep, new_endoff); 1254 xfs_bmbt_set_startoff(ep, new_endoff);
1255 xfs_bmbt_set_blockcount(ep, 1255 xfs_bmbt_set_blockcount(ep,
1256 PREV.br_blockcount - new->br_blockcount); 1256 PREV.br_blockcount - new->br_blockcount);
1257 xfs_bmbt_set_startblock(ep, 1257 xfs_bmbt_set_startblock(ep,
1258 new->br_startblock + new->br_blockcount); 1258 new->br_startblock + new->br_blockcount);
1259 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 1259 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1260 1260
1261 xfs_iext_insert(ip, *idx, 1, new, state); 1261 xfs_iext_insert(ip, *idx, 1, new, state);
1262 ip->i_d.di_nextents++; 1262 ip->i_d.di_nextents++;
1263 if (cur == NULL) 1263 if (cur == NULL)
1264 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1264 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1265 else { 1265 else {
1266 rval = XFS_ILOG_CORE; 1266 rval = XFS_ILOG_CORE;
1267 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, 1267 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1268 PREV.br_startblock, PREV.br_blockcount, 1268 PREV.br_startblock, PREV.br_blockcount,
1269 &i))) 1269 &i)))
1270 goto done; 1270 goto done;
1271 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1271 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1272 if ((error = xfs_bmbt_update(cur, 1272 if ((error = xfs_bmbt_update(cur,
1273 PREV.br_startoff + new->br_blockcount, 1273 PREV.br_startoff + new->br_blockcount,
1274 PREV.br_startblock + new->br_blockcount, 1274 PREV.br_startblock + new->br_blockcount,
1275 PREV.br_blockcount - new->br_blockcount, 1275 PREV.br_blockcount - new->br_blockcount,
1276 oldext))) 1276 oldext)))
1277 goto done; 1277 goto done;
1278 cur->bc_rec.b = *new; 1278 cur->bc_rec.b = *new;
1279 if ((error = xfs_btree_insert(cur, &i))) 1279 if ((error = xfs_btree_insert(cur, &i)))
1280 goto done; 1280 goto done;
1281 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1281 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1282 } 1282 }
1283 break; 1283 break;
1284 1284
1285 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1285 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1286 /* 1286 /*
1287 * Setting the last part of a previous oldext extent to newext. 1287 * Setting the last part of a previous oldext extent to newext.
1288 * The right neighbor is contiguous with the new allocation. 1288 * The right neighbor is contiguous with the new allocation.
1289 */ 1289 */
1290 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 1290 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1291 xfs_bmbt_set_blockcount(ep, 1291 xfs_bmbt_set_blockcount(ep,
1292 PREV.br_blockcount - new->br_blockcount); 1292 PREV.br_blockcount - new->br_blockcount);
1293 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 1293 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1294 1294
1295 ++*idx; 1295 ++*idx;
1296 1296
1297 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 1297 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1298 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx), 1298 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
1299 new->br_startoff, new->br_startblock, 1299 new->br_startoff, new->br_startblock,
1300 new->br_blockcount + RIGHT.br_blockcount, newext); 1300 new->br_blockcount + RIGHT.br_blockcount, newext);
1301 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 1301 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1302 1302
1303 if (cur == NULL) 1303 if (cur == NULL)
1304 rval = XFS_ILOG_DEXT; 1304 rval = XFS_ILOG_DEXT;
1305 else { 1305 else {
1306 rval = 0; 1306 rval = 0;
1307 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, 1307 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1308 PREV.br_startblock, 1308 PREV.br_startblock,
1309 PREV.br_blockcount, &i))) 1309 PREV.br_blockcount, &i)))
1310 goto done; 1310 goto done;
1311 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1311 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1312 if ((error = xfs_bmbt_update(cur, PREV.br_startoff, 1312 if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
1313 PREV.br_startblock, 1313 PREV.br_startblock,
1314 PREV.br_blockcount - new->br_blockcount, 1314 PREV.br_blockcount - new->br_blockcount,
1315 oldext))) 1315 oldext)))
1316 goto done; 1316 goto done;
1317 if ((error = xfs_btree_increment(cur, 0, &i))) 1317 if ((error = xfs_btree_increment(cur, 0, &i)))
1318 goto done; 1318 goto done;
1319 if ((error = xfs_bmbt_update(cur, new->br_startoff, 1319 if ((error = xfs_bmbt_update(cur, new->br_startoff,
1320 new->br_startblock, 1320 new->br_startblock,
1321 new->br_blockcount + RIGHT.br_blockcount, 1321 new->br_blockcount + RIGHT.br_blockcount,
1322 newext))) 1322 newext)))
1323 goto done; 1323 goto done;
1324 } 1324 }
1325 break; 1325 break;
1326 1326
1327 case BMAP_RIGHT_FILLING: 1327 case BMAP_RIGHT_FILLING:
1328 /* 1328 /*
1329 * Setting the last part of a previous oldext extent to newext. 1329 * Setting the last part of a previous oldext extent to newext.
1330 * The right neighbor is not contiguous. 1330 * The right neighbor is not contiguous.
1331 */ 1331 */
1332 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 1332 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1333 xfs_bmbt_set_blockcount(ep, 1333 xfs_bmbt_set_blockcount(ep,
1334 PREV.br_blockcount - new->br_blockcount); 1334 PREV.br_blockcount - new->br_blockcount);
1335 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 1335 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1336 1336
1337 ++*idx; 1337 ++*idx;
1338 xfs_iext_insert(ip, *idx, 1, new, state); 1338 xfs_iext_insert(ip, *idx, 1, new, state);
1339 1339
1340 ip->i_d.di_nextents++; 1340 ip->i_d.di_nextents++;
1341 if (cur == NULL) 1341 if (cur == NULL)
1342 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1342 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1343 else { 1343 else {
1344 rval = XFS_ILOG_CORE; 1344 rval = XFS_ILOG_CORE;
1345 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, 1345 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1346 PREV.br_startblock, PREV.br_blockcount, 1346 PREV.br_startblock, PREV.br_blockcount,
1347 &i))) 1347 &i)))
1348 goto done; 1348 goto done;
1349 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1349 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1350 if ((error = xfs_bmbt_update(cur, PREV.br_startoff, 1350 if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
1351 PREV.br_startblock, 1351 PREV.br_startblock,
1352 PREV.br_blockcount - new->br_blockcount, 1352 PREV.br_blockcount - new->br_blockcount,
1353 oldext))) 1353 oldext)))
1354 goto done; 1354 goto done;
1355 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, 1355 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1356 new->br_startblock, new->br_blockcount, 1356 new->br_startblock, new->br_blockcount,
1357 &i))) 1357 &i)))
1358 goto done; 1358 goto done;
1359 XFS_WANT_CORRUPTED_GOTO(i == 0, done); 1359 XFS_WANT_CORRUPTED_GOTO(i == 0, done);
1360 cur->bc_rec.b.br_state = XFS_EXT_NORM; 1360 cur->bc_rec.b.br_state = XFS_EXT_NORM;
1361 if ((error = xfs_btree_insert(cur, &i))) 1361 if ((error = xfs_btree_insert(cur, &i)))
1362 goto done; 1362 goto done;
1363 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1363 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1364 } 1364 }
1365 break; 1365 break;
1366 1366
1367 case 0: 1367 case 0:
1368 /* 1368 /*
1369 * Setting the middle part of a previous oldext extent to 1369 * Setting the middle part of a previous oldext extent to
1370 * newext. Contiguity is impossible here. 1370 * newext. Contiguity is impossible here.
1371 * One extent becomes three extents. 1371 * One extent becomes three extents.
1372 */ 1372 */
1373 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 1373 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1374 xfs_bmbt_set_blockcount(ep, 1374 xfs_bmbt_set_blockcount(ep,
1375 new->br_startoff - PREV.br_startoff); 1375 new->br_startoff - PREV.br_startoff);
1376 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 1376 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1377 1377
1378 r[0] = *new; 1378 r[0] = *new;
1379 r[1].br_startoff = new_endoff; 1379 r[1].br_startoff = new_endoff;
1380 r[1].br_blockcount = 1380 r[1].br_blockcount =
1381 PREV.br_startoff + PREV.br_blockcount - new_endoff; 1381 PREV.br_startoff + PREV.br_blockcount - new_endoff;
1382 r[1].br_startblock = new->br_startblock + new->br_blockcount; 1382 r[1].br_startblock = new->br_startblock + new->br_blockcount;
1383 r[1].br_state = oldext; 1383 r[1].br_state = oldext;
1384 1384
1385 ++*idx; 1385 ++*idx;
1386 xfs_iext_insert(ip, *idx, 2, &r[0], state); 1386 xfs_iext_insert(ip, *idx, 2, &r[0], state);
1387 1387
1388 ip->i_d.di_nextents += 2; 1388 ip->i_d.di_nextents += 2;
1389 if (cur == NULL) 1389 if (cur == NULL)
1390 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1390 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1391 else { 1391 else {
1392 rval = XFS_ILOG_CORE; 1392 rval = XFS_ILOG_CORE;
1393 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, 1393 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1394 PREV.br_startblock, PREV.br_blockcount, 1394 PREV.br_startblock, PREV.br_blockcount,
1395 &i))) 1395 &i)))
1396 goto done; 1396 goto done;
1397 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1397 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1398 /* new right extent - oldext */ 1398 /* new right extent - oldext */
1399 if ((error = xfs_bmbt_update(cur, r[1].br_startoff, 1399 if ((error = xfs_bmbt_update(cur, r[1].br_startoff,
1400 r[1].br_startblock, r[1].br_blockcount, 1400 r[1].br_startblock, r[1].br_blockcount,
1401 r[1].br_state))) 1401 r[1].br_state)))
1402 goto done; 1402 goto done;
1403 /* new left extent - oldext */ 1403 /* new left extent - oldext */
1404 cur->bc_rec.b = PREV; 1404 cur->bc_rec.b = PREV;
1405 cur->bc_rec.b.br_blockcount = 1405 cur->bc_rec.b.br_blockcount =
1406 new->br_startoff - PREV.br_startoff; 1406 new->br_startoff - PREV.br_startoff;
1407 if ((error = xfs_btree_insert(cur, &i))) 1407 if ((error = xfs_btree_insert(cur, &i)))
1408 goto done; 1408 goto done;
1409 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1409 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1410 /* 1410 /*
1411 * Reset the cursor to the position of the new extent 1411 * Reset the cursor to the position of the new extent
1412 * we are about to insert as we can't trust it after 1412 * we are about to insert as we can't trust it after
1413 * the previous insert. 1413 * the previous insert.
1414 */ 1414 */
1415 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, 1415 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1416 new->br_startblock, new->br_blockcount, 1416 new->br_startblock, new->br_blockcount,
1417 &i))) 1417 &i)))
1418 goto done; 1418 goto done;
1419 XFS_WANT_CORRUPTED_GOTO(i == 0, done); 1419 XFS_WANT_CORRUPTED_GOTO(i == 0, done);
1420 /* new middle extent - newext */ 1420 /* new middle extent - newext */
1421 cur->bc_rec.b.br_state = new->br_state; 1421 cur->bc_rec.b.br_state = new->br_state;
1422 if ((error = xfs_btree_insert(cur, &i))) 1422 if ((error = xfs_btree_insert(cur, &i)))
1423 goto done; 1423 goto done;
1424 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1424 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1425 } 1425 }
1426 break; 1426 break;
1427 1427
1428 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 1428 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1429 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 1429 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1430 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG: 1430 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
1431 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 1431 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1432 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 1432 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1433 case BMAP_LEFT_CONTIG: 1433 case BMAP_LEFT_CONTIG:
1434 case BMAP_RIGHT_CONTIG: 1434 case BMAP_RIGHT_CONTIG:
1435 /* 1435 /*
1436 * These cases are all impossible. 1436 * These cases are all impossible.
1437 */ 1437 */
1438 ASSERT(0); 1438 ASSERT(0);
1439 } 1439 }
1440 1440
1441 /* convert to a btree if necessary */ 1441 /* convert to a btree if necessary */
1442 if (xfs_bmap_needs_btree(ip, XFS_DATA_FORK)) { 1442 if (xfs_bmap_needs_btree(ip, XFS_DATA_FORK)) {
1443 int tmp_logflags; /* partial log flag return val */ 1443 int tmp_logflags; /* partial log flag return val */
1444 1444
1445 ASSERT(cur == NULL); 1445 ASSERT(cur == NULL);
1446 error = xfs_bmap_extents_to_btree(tp, ip, first, flist, &cur, 1446 error = xfs_bmap_extents_to_btree(tp, ip, first, flist, &cur,
1447 0, &tmp_logflags, XFS_DATA_FORK); 1447 0, &tmp_logflags, XFS_DATA_FORK);
1448 *logflagsp |= tmp_logflags; 1448 *logflagsp |= tmp_logflags;
1449 if (error) 1449 if (error)
1450 goto done; 1450 goto done;
1451 } 1451 }
1452 1452
1453 /* clear out the allocated field, done with it now in any case. */ 1453 /* clear out the allocated field, done with it now in any case. */
1454 if (cur) { 1454 if (cur) {
1455 cur->bc_private.b.allocated = 0; 1455 cur->bc_private.b.allocated = 0;
1456 *curp = cur; 1456 *curp = cur;
1457 } 1457 }
1458 1458
1459 xfs_bmap_check_leaf_extents(*curp, ip, XFS_DATA_FORK); 1459 xfs_bmap_check_leaf_extents(*curp, ip, XFS_DATA_FORK);
1460 done: 1460 done:
1461 *logflagsp |= rval; 1461 *logflagsp |= rval;
1462 return error; 1462 return error;
1463 #undef LEFT 1463 #undef LEFT
1464 #undef RIGHT 1464 #undef RIGHT
1465 #undef PREV 1465 #undef PREV
1466 } 1466 }
1467 1467
1468 /* 1468 /*
1469 * Convert a hole to a delayed allocation. 1469 * Convert a hole to a delayed allocation.
1470 */ 1470 */
1471 STATIC void 1471 STATIC void
1472 xfs_bmap_add_extent_hole_delay( 1472 xfs_bmap_add_extent_hole_delay(
1473 xfs_inode_t *ip, /* incore inode pointer */ 1473 xfs_inode_t *ip, /* incore inode pointer */
1474 xfs_extnum_t *idx, /* extent number to update/insert */ 1474 xfs_extnum_t *idx, /* extent number to update/insert */
1475 xfs_bmbt_irec_t *new) /* new data to add to file extents */ 1475 xfs_bmbt_irec_t *new) /* new data to add to file extents */
1476 { 1476 {
1477 xfs_ifork_t *ifp; /* inode fork pointer */ 1477 xfs_ifork_t *ifp; /* inode fork pointer */
1478 xfs_bmbt_irec_t left; /* left neighbor extent entry */ 1478 xfs_bmbt_irec_t left; /* left neighbor extent entry */
1479 xfs_filblks_t newlen=0; /* new indirect size */ 1479 xfs_filblks_t newlen=0; /* new indirect size */
1480 xfs_filblks_t oldlen=0; /* old indirect size */ 1480 xfs_filblks_t oldlen=0; /* old indirect size */
1481 xfs_bmbt_irec_t right; /* right neighbor extent entry */ 1481 xfs_bmbt_irec_t right; /* right neighbor extent entry */
1482 int state; /* state bits, accessed thru macros */ 1482 int state; /* state bits, accessed thru macros */
1483 xfs_filblks_t temp=0; /* temp for indirect calculations */ 1483 xfs_filblks_t temp=0; /* temp for indirect calculations */
1484 1484
1485 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); 1485 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1486 state = 0; 1486 state = 0;
1487 ASSERT(isnullstartblock(new->br_startblock)); 1487 ASSERT(isnullstartblock(new->br_startblock));
1488 1488
1489 /* 1489 /*
1490 * Check and set flags if this segment has a left neighbor 1490 * Check and set flags if this segment has a left neighbor
1491 */ 1491 */
1492 if (*idx > 0) { 1492 if (*idx > 0) {
1493 state |= BMAP_LEFT_VALID; 1493 state |= BMAP_LEFT_VALID;
1494 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left); 1494 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left);
1495 1495
1496 if (isnullstartblock(left.br_startblock)) 1496 if (isnullstartblock(left.br_startblock))
1497 state |= BMAP_LEFT_DELAY; 1497 state |= BMAP_LEFT_DELAY;
1498 } 1498 }
1499 1499
1500 /* 1500 /*
1501 * Check and set flags if the current (right) segment exists. 1501 * Check and set flags if the current (right) segment exists.
1502 * If it doesn't exist, we're converting the hole at end-of-file. 1502 * If it doesn't exist, we're converting the hole at end-of-file.
1503 */ 1503 */
1504 if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) { 1504 if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
1505 state |= BMAP_RIGHT_VALID; 1505 state |= BMAP_RIGHT_VALID;
1506 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right); 1506 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right);
1507 1507
1508 if (isnullstartblock(right.br_startblock)) 1508 if (isnullstartblock(right.br_startblock))
1509 state |= BMAP_RIGHT_DELAY; 1509 state |= BMAP_RIGHT_DELAY;
1510 } 1510 }
1511 1511
1512 /* 1512 /*
1513 * Set contiguity flags on the left and right neighbors. 1513 * Set contiguity flags on the left and right neighbors.
1514 * Don't let extents get too large, even if the pieces are contiguous. 1514 * Don't let extents get too large, even if the pieces are contiguous.
1515 */ 1515 */
1516 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) && 1516 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
1517 left.br_startoff + left.br_blockcount == new->br_startoff && 1517 left.br_startoff + left.br_blockcount == new->br_startoff &&
1518 left.br_blockcount + new->br_blockcount <= MAXEXTLEN) 1518 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1519 state |= BMAP_LEFT_CONTIG; 1519 state |= BMAP_LEFT_CONTIG;
1520 1520
1521 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) && 1521 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
1522 new->br_startoff + new->br_blockcount == right.br_startoff && 1522 new->br_startoff + new->br_blockcount == right.br_startoff &&
1523 new->br_blockcount + right.br_blockcount <= MAXEXTLEN && 1523 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
1524 (!(state & BMAP_LEFT_CONTIG) || 1524 (!(state & BMAP_LEFT_CONTIG) ||
1525 (left.br_blockcount + new->br_blockcount + 1525 (left.br_blockcount + new->br_blockcount +
1526 right.br_blockcount <= MAXEXTLEN))) 1526 right.br_blockcount <= MAXEXTLEN)))
1527 state |= BMAP_RIGHT_CONTIG; 1527 state |= BMAP_RIGHT_CONTIG;
1528 1528
1529 /* 1529 /*
1530 * Switch out based on the contiguity flags. 1530 * Switch out based on the contiguity flags.
1531 */ 1531 */
1532 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { 1532 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
1533 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 1533 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1534 /* 1534 /*
1535 * New allocation is contiguous with delayed allocations 1535 * New allocation is contiguous with delayed allocations
1536 * on the left and on the right. 1536 * on the left and on the right.
1537 * Merge all three into a single extent record. 1537 * Merge all three into a single extent record.
1538 */ 1538 */
1539 --*idx; 1539 --*idx;
1540 temp = left.br_blockcount + new->br_blockcount + 1540 temp = left.br_blockcount + new->br_blockcount +
1541 right.br_blockcount; 1541 right.br_blockcount;
1542 1542
1543 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 1543 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1544 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp); 1544 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
1545 oldlen = startblockval(left.br_startblock) + 1545 oldlen = startblockval(left.br_startblock) +
1546 startblockval(new->br_startblock) + 1546 startblockval(new->br_startblock) +
1547 startblockval(right.br_startblock); 1547 startblockval(right.br_startblock);
1548 newlen = xfs_bmap_worst_indlen(ip, temp); 1548 newlen = xfs_bmap_worst_indlen(ip, temp);
1549 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx), 1549 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
1550 nullstartblock((int)newlen)); 1550 nullstartblock((int)newlen));
1551 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 1551 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1552 1552
1553 xfs_iext_remove(ip, *idx + 1, 1, state); 1553 xfs_iext_remove(ip, *idx + 1, 1, state);
1554 break; 1554 break;
1555 1555
1556 case BMAP_LEFT_CONTIG: 1556 case BMAP_LEFT_CONTIG:
1557 /* 1557 /*
1558 * New allocation is contiguous with a delayed allocation 1558 * New allocation is contiguous with a delayed allocation
1559 * on the left. 1559 * on the left.
1560 * Merge the new allocation with the left neighbor. 1560 * Merge the new allocation with the left neighbor.
1561 */ 1561 */
1562 --*idx; 1562 --*idx;
1563 temp = left.br_blockcount + new->br_blockcount; 1563 temp = left.br_blockcount + new->br_blockcount;
1564 1564
1565 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 1565 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1566 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp); 1566 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
1567 oldlen = startblockval(left.br_startblock) + 1567 oldlen = startblockval(left.br_startblock) +
1568 startblockval(new->br_startblock); 1568 startblockval(new->br_startblock);
1569 newlen = xfs_bmap_worst_indlen(ip, temp); 1569 newlen = xfs_bmap_worst_indlen(ip, temp);
1570 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx), 1570 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
1571 nullstartblock((int)newlen)); 1571 nullstartblock((int)newlen));
1572 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 1572 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1573 break; 1573 break;
1574 1574
1575 case BMAP_RIGHT_CONTIG: 1575 case BMAP_RIGHT_CONTIG:
1576 /* 1576 /*
1577 * New allocation is contiguous with a delayed allocation 1577 * New allocation is contiguous with a delayed allocation
1578 * on the right. 1578 * on the right.
1579 * Merge the new allocation with the right neighbor. 1579 * Merge the new allocation with the right neighbor.
1580 */ 1580 */
1581 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 1581 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1582 temp = new->br_blockcount + right.br_blockcount; 1582 temp = new->br_blockcount + right.br_blockcount;
1583 oldlen = startblockval(new->br_startblock) + 1583 oldlen = startblockval(new->br_startblock) +
1584 startblockval(right.br_startblock); 1584 startblockval(right.br_startblock);
1585 newlen = xfs_bmap_worst_indlen(ip, temp); 1585 newlen = xfs_bmap_worst_indlen(ip, temp);
1586 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx), 1586 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
1587 new->br_startoff, 1587 new->br_startoff,
1588 nullstartblock((int)newlen), temp, right.br_state); 1588 nullstartblock((int)newlen), temp, right.br_state);
1589 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 1589 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1590 break; 1590 break;
1591 1591
1592 case 0: 1592 case 0:
1593 /* 1593 /*
1594 * New allocation is not contiguous with another 1594 * New allocation is not contiguous with another
1595 * delayed allocation. 1595 * delayed allocation.
1596 * Insert a new entry. 1596 * Insert a new entry.
1597 */ 1597 */
1598 oldlen = newlen = 0; 1598 oldlen = newlen = 0;
1599 xfs_iext_insert(ip, *idx, 1, new, state); 1599 xfs_iext_insert(ip, *idx, 1, new, state);
1600 break; 1600 break;
1601 } 1601 }
1602 if (oldlen != newlen) { 1602 if (oldlen != newlen) {
1603 ASSERT(oldlen > newlen); 1603 ASSERT(oldlen > newlen);
1604 xfs_icsb_modify_counters(ip->i_mount, XFS_SBS_FDBLOCKS, 1604 xfs_icsb_modify_counters(ip->i_mount, XFS_SBS_FDBLOCKS,
1605 (int64_t)(oldlen - newlen), 0); 1605 (int64_t)(oldlen - newlen), 0);
1606 /* 1606 /*
1607 * Nothing to do for disk quota accounting here. 1607 * Nothing to do for disk quota accounting here.
1608 */ 1608 */
1609 } 1609 }
1610 } 1610 }
1611 1611
1612 /* 1612 /*
1613 * Convert a hole to a real allocation. 1613 * Convert a hole to a real allocation.
1614 */ 1614 */
1615 STATIC int /* error */ 1615 STATIC int /* error */
1616 xfs_bmap_add_extent_hole_real( 1616 xfs_bmap_add_extent_hole_real(
1617 struct xfs_bmalloca *bma, 1617 struct xfs_bmalloca *bma,
1618 int whichfork) 1618 int whichfork)
1619 { 1619 {
1620 struct xfs_bmbt_irec *new = &bma->got; 1620 struct xfs_bmbt_irec *new = &bma->got;
1621 int error; /* error return value */ 1621 int error; /* error return value */
1622 int i; /* temp state */ 1622 int i; /* temp state */
1623 xfs_ifork_t *ifp; /* inode fork pointer */ 1623 xfs_ifork_t *ifp; /* inode fork pointer */
1624 xfs_bmbt_irec_t left; /* left neighbor extent entry */ 1624 xfs_bmbt_irec_t left; /* left neighbor extent entry */
1625 xfs_bmbt_irec_t right; /* right neighbor extent entry */ 1625 xfs_bmbt_irec_t right; /* right neighbor extent entry */
1626 int rval=0; /* return value (logging flags) */ 1626 int rval=0; /* return value (logging flags) */
1627 int state; /* state bits, accessed thru macros */ 1627 int state; /* state bits, accessed thru macros */
1628 1628
1629 ifp = XFS_IFORK_PTR(bma->ip, whichfork); 1629 ifp = XFS_IFORK_PTR(bma->ip, whichfork);
1630 1630
1631 ASSERT(bma->idx >= 0); 1631 ASSERT(bma->idx >= 0);
1632 ASSERT(bma->idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec)); 1632 ASSERT(bma->idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
1633 ASSERT(!isnullstartblock(new->br_startblock)); 1633 ASSERT(!isnullstartblock(new->br_startblock));
1634 ASSERT(!bma->cur || 1634 ASSERT(!bma->cur ||
1635 !(bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL)); 1635 !(bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
1636 1636
1637 XFS_STATS_INC(xs_add_exlist); 1637 XFS_STATS_INC(xs_add_exlist);
1638 1638
1639 state = 0; 1639 state = 0;
1640 if (whichfork == XFS_ATTR_FORK) 1640 if (whichfork == XFS_ATTR_FORK)
1641 state |= BMAP_ATTRFORK; 1641 state |= BMAP_ATTRFORK;
1642 1642
1643 /* 1643 /*
1644 * Check and set flags if this segment has a left neighbor. 1644 * Check and set flags if this segment has a left neighbor.
1645 */ 1645 */
1646 if (bma->idx > 0) { 1646 if (bma->idx > 0) {
1647 state |= BMAP_LEFT_VALID; 1647 state |= BMAP_LEFT_VALID;
1648 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &left); 1648 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &left);
1649 if (isnullstartblock(left.br_startblock)) 1649 if (isnullstartblock(left.br_startblock))
1650 state |= BMAP_LEFT_DELAY; 1650 state |= BMAP_LEFT_DELAY;
1651 } 1651 }
1652 1652
1653 /* 1653 /*
1654 * Check and set flags if this segment has a current value. 1654 * Check and set flags if this segment has a current value.
1655 * Not true if we're inserting into the "hole" at eof. 1655 * Not true if we're inserting into the "hole" at eof.
1656 */ 1656 */
1657 if (bma->idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) { 1657 if (bma->idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
1658 state |= BMAP_RIGHT_VALID; 1658 state |= BMAP_RIGHT_VALID;
1659 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &right); 1659 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &right);
1660 if (isnullstartblock(right.br_startblock)) 1660 if (isnullstartblock(right.br_startblock))
1661 state |= BMAP_RIGHT_DELAY; 1661 state |= BMAP_RIGHT_DELAY;
1662 } 1662 }
1663 1663
1664 /* 1664 /*
1665 * We're inserting a real allocation between "left" and "right". 1665 * We're inserting a real allocation between "left" and "right".
1666 * Set the contiguity flags. Don't let extents get too large. 1666 * Set the contiguity flags. Don't let extents get too large.
1667 */ 1667 */
1668 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 1668 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1669 left.br_startoff + left.br_blockcount == new->br_startoff && 1669 left.br_startoff + left.br_blockcount == new->br_startoff &&
1670 left.br_startblock + left.br_blockcount == new->br_startblock && 1670 left.br_startblock + left.br_blockcount == new->br_startblock &&
1671 left.br_state == new->br_state && 1671 left.br_state == new->br_state &&
1672 left.br_blockcount + new->br_blockcount <= MAXEXTLEN) 1672 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1673 state |= BMAP_LEFT_CONTIG; 1673 state |= BMAP_LEFT_CONTIG;
1674 1674
1675 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 1675 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1676 new->br_startoff + new->br_blockcount == right.br_startoff && 1676 new->br_startoff + new->br_blockcount == right.br_startoff &&
1677 new->br_startblock + new->br_blockcount == right.br_startblock && 1677 new->br_startblock + new->br_blockcount == right.br_startblock &&
1678 new->br_state == right.br_state && 1678 new->br_state == right.br_state &&
1679 new->br_blockcount + right.br_blockcount <= MAXEXTLEN && 1679 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
1680 (!(state & BMAP_LEFT_CONTIG) || 1680 (!(state & BMAP_LEFT_CONTIG) ||
1681 left.br_blockcount + new->br_blockcount + 1681 left.br_blockcount + new->br_blockcount +
1682 right.br_blockcount <= MAXEXTLEN)) 1682 right.br_blockcount <= MAXEXTLEN))
1683 state |= BMAP_RIGHT_CONTIG; 1683 state |= BMAP_RIGHT_CONTIG;
1684 1684
1685 error = 0; 1685 error = 0;
1686 /* 1686 /*
1687 * Select which case we're in here, and implement it. 1687 * Select which case we're in here, and implement it.
1688 */ 1688 */
1689 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { 1689 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
1690 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 1690 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1691 /* 1691 /*
1692 * New allocation is contiguous with real allocations on the 1692 * New allocation is contiguous with real allocations on the
1693 * left and on the right. 1693 * left and on the right.
1694 * Merge all three into a single extent record. 1694 * Merge all three into a single extent record.
1695 */ 1695 */
1696 --bma->idx; 1696 --bma->idx;
1697 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); 1697 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1698 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx), 1698 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
1699 left.br_blockcount + new->br_blockcount + 1699 left.br_blockcount + new->br_blockcount +
1700 right.br_blockcount); 1700 right.br_blockcount);
1701 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); 1701 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1702 1702
1703 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state); 1703 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
1704 1704
1705 XFS_IFORK_NEXT_SET(bma->ip, whichfork, 1705 XFS_IFORK_NEXT_SET(bma->ip, whichfork,
1706 XFS_IFORK_NEXTENTS(bma->ip, whichfork) - 1); 1706 XFS_IFORK_NEXTENTS(bma->ip, whichfork) - 1);
1707 if (bma->cur == NULL) { 1707 if (bma->cur == NULL) {
1708 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 1708 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
1709 } else { 1709 } else {
1710 rval = XFS_ILOG_CORE; 1710 rval = XFS_ILOG_CORE;
1711 error = xfs_bmbt_lookup_eq(bma->cur, right.br_startoff, 1711 error = xfs_bmbt_lookup_eq(bma->cur, right.br_startoff,
1712 right.br_startblock, right.br_blockcount, 1712 right.br_startblock, right.br_blockcount,
1713 &i); 1713 &i);
1714 if (error) 1714 if (error)
1715 goto done; 1715 goto done;
1716 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1716 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1717 error = xfs_btree_delete(bma->cur, &i); 1717 error = xfs_btree_delete(bma->cur, &i);
1718 if (error) 1718 if (error)
1719 goto done; 1719 goto done;
1720 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1720 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1721 error = xfs_btree_decrement(bma->cur, 0, &i); 1721 error = xfs_btree_decrement(bma->cur, 0, &i);
1722 if (error) 1722 if (error)
1723 goto done; 1723 goto done;
1724 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1724 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1725 error = xfs_bmbt_update(bma->cur, left.br_startoff, 1725 error = xfs_bmbt_update(bma->cur, left.br_startoff,
1726 left.br_startblock, 1726 left.br_startblock,
1727 left.br_blockcount + 1727 left.br_blockcount +
1728 new->br_blockcount + 1728 new->br_blockcount +
1729 right.br_blockcount, 1729 right.br_blockcount,
1730 left.br_state); 1730 left.br_state);
1731 if (error) 1731 if (error)
1732 goto done; 1732 goto done;
1733 } 1733 }
1734 break; 1734 break;
1735 1735
1736 case BMAP_LEFT_CONTIG: 1736 case BMAP_LEFT_CONTIG:
1737 /* 1737 /*
1738 * New allocation is contiguous with a real allocation 1738 * New allocation is contiguous with a real allocation
1739 * on the left. 1739 * on the left.
1740 * Merge the new allocation with the left neighbor. 1740 * Merge the new allocation with the left neighbor.
1741 */ 1741 */
1742 --bma->idx; 1742 --bma->idx;
1743 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); 1743 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1744 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx), 1744 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
1745 left.br_blockcount + new->br_blockcount); 1745 left.br_blockcount + new->br_blockcount);
1746 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); 1746 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1747 1747
1748 if (bma->cur == NULL) { 1748 if (bma->cur == NULL) {
1749 rval = xfs_ilog_fext(whichfork); 1749 rval = xfs_ilog_fext(whichfork);
1750 } else { 1750 } else {
1751 rval = 0; 1751 rval = 0;
1752 error = xfs_bmbt_lookup_eq(bma->cur, left.br_startoff, 1752 error = xfs_bmbt_lookup_eq(bma->cur, left.br_startoff,
1753 left.br_startblock, left.br_blockcount, 1753 left.br_startblock, left.br_blockcount,
1754 &i); 1754 &i);
1755 if (error) 1755 if (error)
1756 goto done; 1756 goto done;
1757 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1757 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1758 error = xfs_bmbt_update(bma->cur, left.br_startoff, 1758 error = xfs_bmbt_update(bma->cur, left.br_startoff,
1759 left.br_startblock, 1759 left.br_startblock,
1760 left.br_blockcount + 1760 left.br_blockcount +
1761 new->br_blockcount, 1761 new->br_blockcount,
1762 left.br_state); 1762 left.br_state);
1763 if (error) 1763 if (error)
1764 goto done; 1764 goto done;
1765 } 1765 }
1766 break; 1766 break;
1767 1767
1768 case BMAP_RIGHT_CONTIG: 1768 case BMAP_RIGHT_CONTIG:
1769 /* 1769 /*
1770 * New allocation is contiguous with a real allocation 1770 * New allocation is contiguous with a real allocation
1771 * on the right. 1771 * on the right.
1772 * Merge the new allocation with the right neighbor. 1772 * Merge the new allocation with the right neighbor.
1773 */ 1773 */
1774 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); 1774 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1775 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx), 1775 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx),
1776 new->br_startoff, new->br_startblock, 1776 new->br_startoff, new->br_startblock,
1777 new->br_blockcount + right.br_blockcount, 1777 new->br_blockcount + right.br_blockcount,
1778 right.br_state); 1778 right.br_state);
1779 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); 1779 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1780 1780
1781 if (bma->cur == NULL) { 1781 if (bma->cur == NULL) {
1782 rval = xfs_ilog_fext(whichfork); 1782 rval = xfs_ilog_fext(whichfork);
1783 } else { 1783 } else {
1784 rval = 0; 1784 rval = 0;
1785 error = xfs_bmbt_lookup_eq(bma->cur, 1785 error = xfs_bmbt_lookup_eq(bma->cur,
1786 right.br_startoff, 1786 right.br_startoff,
1787 right.br_startblock, 1787 right.br_startblock,
1788 right.br_blockcount, &i); 1788 right.br_blockcount, &i);
1789 if (error) 1789 if (error)
1790 goto done; 1790 goto done;
1791 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1791 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1792 error = xfs_bmbt_update(bma->cur, new->br_startoff, 1792 error = xfs_bmbt_update(bma->cur, new->br_startoff,
1793 new->br_startblock, 1793 new->br_startblock,
1794 new->br_blockcount + 1794 new->br_blockcount +
1795 right.br_blockcount, 1795 right.br_blockcount,
1796 right.br_state); 1796 right.br_state);
1797 if (error) 1797 if (error)
1798 goto done; 1798 goto done;
1799 } 1799 }
1800 break; 1800 break;
1801 1801
1802 case 0: 1802 case 0:
1803 /* 1803 /*
1804 * New allocation is not contiguous with another 1804 * New allocation is not contiguous with another
1805 * real allocation. 1805 * real allocation.
1806 * Insert a new entry. 1806 * Insert a new entry.
1807 */ 1807 */
1808 xfs_iext_insert(bma->ip, bma->idx, 1, new, state); 1808 xfs_iext_insert(bma->ip, bma->idx, 1, new, state);
1809 XFS_IFORK_NEXT_SET(bma->ip, whichfork, 1809 XFS_IFORK_NEXT_SET(bma->ip, whichfork,
1810 XFS_IFORK_NEXTENTS(bma->ip, whichfork) + 1); 1810 XFS_IFORK_NEXTENTS(bma->ip, whichfork) + 1);
1811 if (bma->cur == NULL) { 1811 if (bma->cur == NULL) {
1812 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 1812 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
1813 } else { 1813 } else {
1814 rval = XFS_ILOG_CORE; 1814 rval = XFS_ILOG_CORE;
1815 error = xfs_bmbt_lookup_eq(bma->cur, 1815 error = xfs_bmbt_lookup_eq(bma->cur,
1816 new->br_startoff, 1816 new->br_startoff,
1817 new->br_startblock, 1817 new->br_startblock,
1818 new->br_blockcount, &i); 1818 new->br_blockcount, &i);
1819 if (error) 1819 if (error)
1820 goto done; 1820 goto done;
1821 XFS_WANT_CORRUPTED_GOTO(i == 0, done); 1821 XFS_WANT_CORRUPTED_GOTO(i == 0, done);
1822 bma->cur->bc_rec.b.br_state = new->br_state; 1822 bma->cur->bc_rec.b.br_state = new->br_state;
1823 error = xfs_btree_insert(bma->cur, &i); 1823 error = xfs_btree_insert(bma->cur, &i);
1824 if (error) 1824 if (error)
1825 goto done; 1825 goto done;
1826 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1826 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1827 } 1827 }
1828 break; 1828 break;
1829 } 1829 }
1830 1830
1831 /* convert to a btree if necessary */ 1831 /* convert to a btree if necessary */
1832 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 1832 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1833 int tmp_logflags; /* partial log flag return val */ 1833 int tmp_logflags; /* partial log flag return val */
1834 1834
1835 ASSERT(bma->cur == NULL); 1835 ASSERT(bma->cur == NULL);
1836 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1836 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1837 bma->firstblock, bma->flist, &bma->cur, 1837 bma->firstblock, bma->flist, &bma->cur,
1838 0, &tmp_logflags, whichfork); 1838 0, &tmp_logflags, whichfork);
1839 bma->logflags |= tmp_logflags; 1839 bma->logflags |= tmp_logflags;
1840 if (error) 1840 if (error)
1841 goto done; 1841 goto done;
1842 } 1842 }
1843 1843
1844 /* clear out the allocated field, done with it now in any case. */ 1844 /* clear out the allocated field, done with it now in any case. */
1845 if (bma->cur) 1845 if (bma->cur)
1846 bma->cur->bc_private.b.allocated = 0; 1846 bma->cur->bc_private.b.allocated = 0;
1847 1847
1848 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork); 1848 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
1849 done: 1849 done:
1850 bma->logflags |= rval; 1850 bma->logflags |= rval;
1851 return error; 1851 return error;
1852 } 1852 }
1853 1853
1854 /* 1854 /*
1855 * Adjust the size of the new extent based on di_extsize and rt extsize. 1855 * Adjust the size of the new extent based on di_extsize and rt extsize.
1856 */ 1856 */
1857 STATIC int 1857 STATIC int
1858 xfs_bmap_extsize_align( 1858 xfs_bmap_extsize_align(
1859 xfs_mount_t *mp, 1859 xfs_mount_t *mp,
1860 xfs_bmbt_irec_t *gotp, /* next extent pointer */ 1860 xfs_bmbt_irec_t *gotp, /* next extent pointer */
1861 xfs_bmbt_irec_t *prevp, /* previous extent pointer */ 1861 xfs_bmbt_irec_t *prevp, /* previous extent pointer */
1862 xfs_extlen_t extsz, /* align to this extent size */ 1862 xfs_extlen_t extsz, /* align to this extent size */
1863 int rt, /* is this a realtime inode? */ 1863 int rt, /* is this a realtime inode? */
1864 int eof, /* is extent at end-of-file? */ 1864 int eof, /* is extent at end-of-file? */
1865 int delay, /* creating delalloc extent? */ 1865 int delay, /* creating delalloc extent? */
1866 int convert, /* overwriting unwritten extent? */ 1866 int convert, /* overwriting unwritten extent? */
1867 xfs_fileoff_t *offp, /* in/out: aligned offset */ 1867 xfs_fileoff_t *offp, /* in/out: aligned offset */
1868 xfs_extlen_t *lenp) /* in/out: aligned length */ 1868 xfs_extlen_t *lenp) /* in/out: aligned length */
1869 { 1869 {
1870 xfs_fileoff_t orig_off; /* original offset */ 1870 xfs_fileoff_t orig_off; /* original offset */
1871 xfs_extlen_t orig_alen; /* original length */ 1871 xfs_extlen_t orig_alen; /* original length */
1872 xfs_fileoff_t orig_end; /* original off+len */ 1872 xfs_fileoff_t orig_end; /* original off+len */
1873 xfs_fileoff_t nexto; /* next file offset */ 1873 xfs_fileoff_t nexto; /* next file offset */
1874 xfs_fileoff_t prevo; /* previous file offset */ 1874 xfs_fileoff_t prevo; /* previous file offset */
1875 xfs_fileoff_t align_off; /* temp for offset */ 1875 xfs_fileoff_t align_off; /* temp for offset */
1876 xfs_extlen_t align_alen; /* temp for length */ 1876 xfs_extlen_t align_alen; /* temp for length */
1877 xfs_extlen_t temp; /* temp for calculations */ 1877 xfs_extlen_t temp; /* temp for calculations */
1878 1878
1879 if (convert) 1879 if (convert)
1880 return 0; 1880 return 0;
1881 1881
1882 orig_off = align_off = *offp; 1882 orig_off = align_off = *offp;
1883 orig_alen = align_alen = *lenp; 1883 orig_alen = align_alen = *lenp;
1884 orig_end = orig_off + orig_alen; 1884 orig_end = orig_off + orig_alen;
1885 1885
1886 /* 1886 /*
1887 * If this request overlaps an existing extent, then don't 1887 * If this request overlaps an existing extent, then don't
1888 * attempt to perform any additional alignment. 1888 * attempt to perform any additional alignment.
1889 */ 1889 */
1890 if (!delay && !eof && 1890 if (!delay && !eof &&
1891 (orig_off >= gotp->br_startoff) && 1891 (orig_off >= gotp->br_startoff) &&
1892 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) { 1892 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
1893 return 0; 1893 return 0;
1894 } 1894 }
1895 1895
1896 /* 1896 /*
1897 * If the file offset is unaligned vs. the extent size 1897 * If the file offset is unaligned vs. the extent size
1898 * we need to align it. This will be possible unless 1898 * we need to align it. This will be possible unless
1899 * the file was previously written with a kernel that didn't 1899 * the file was previously written with a kernel that didn't
1900 * perform this alignment, or if a truncate shot us in the 1900 * perform this alignment, or if a truncate shot us in the
1901 * foot. 1901 * foot.
1902 */ 1902 */
1903 temp = do_mod(orig_off, extsz); 1903 temp = do_mod(orig_off, extsz);
1904 if (temp) { 1904 if (temp) {
1905 align_alen += temp; 1905 align_alen += temp;
1906 align_off -= temp; 1906 align_off -= temp;
1907 } 1907 }
1908 /* 1908 /*
1909 * Same adjustment for the end of the requested area. 1909 * Same adjustment for the end of the requested area.
1910 */ 1910 */
1911 if ((temp = (align_alen % extsz))) { 1911 if ((temp = (align_alen % extsz))) {
1912 align_alen += extsz - temp; 1912 align_alen += extsz - temp;
1913 } 1913 }
1914 /* 1914 /*
1915 * If the previous block overlaps with this proposed allocation 1915 * If the previous block overlaps with this proposed allocation
1916 * then move the start forward without adjusting the length. 1916 * then move the start forward without adjusting the length.
1917 */ 1917 */
1918 if (prevp->br_startoff != NULLFILEOFF) { 1918 if (prevp->br_startoff != NULLFILEOFF) {
1919 if (prevp->br_startblock == HOLESTARTBLOCK) 1919 if (prevp->br_startblock == HOLESTARTBLOCK)
1920 prevo = prevp->br_startoff; 1920 prevo = prevp->br_startoff;
1921 else 1921 else
1922 prevo = prevp->br_startoff + prevp->br_blockcount; 1922 prevo = prevp->br_startoff + prevp->br_blockcount;
1923 } else 1923 } else
1924 prevo = 0; 1924 prevo = 0;
1925 if (align_off != orig_off && align_off < prevo) 1925 if (align_off != orig_off && align_off < prevo)
1926 align_off = prevo; 1926 align_off = prevo;
1927 /* 1927 /*
1928 * If the next block overlaps with this proposed allocation 1928 * If the next block overlaps with this proposed allocation
1929 * then move the start back without adjusting the length, 1929 * then move the start back without adjusting the length,
1930 * but not before offset 0. 1930 * but not before offset 0.
1931 * This may of course make the start overlap previous block, 1931 * This may of course make the start overlap previous block,
1932 * and if we hit the offset 0 limit then the next block 1932 * and if we hit the offset 0 limit then the next block
1933 * can still overlap too. 1933 * can still overlap too.
1934 */ 1934 */
1935 if (!eof && gotp->br_startoff != NULLFILEOFF) { 1935 if (!eof && gotp->br_startoff != NULLFILEOFF) {
1936 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) || 1936 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
1937 (!delay && gotp->br_startblock == DELAYSTARTBLOCK)) 1937 (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
1938 nexto = gotp->br_startoff + gotp->br_blockcount; 1938 nexto = gotp->br_startoff + gotp->br_blockcount;
1939 else 1939 else
1940 nexto = gotp->br_startoff; 1940 nexto = gotp->br_startoff;
1941 } else 1941 } else
1942 nexto = NULLFILEOFF; 1942 nexto = NULLFILEOFF;
1943 if (!eof && 1943 if (!eof &&
1944 align_off + align_alen != orig_end && 1944 align_off + align_alen != orig_end &&
1945 align_off + align_alen > nexto) 1945 align_off + align_alen > nexto)
1946 align_off = nexto > align_alen ? nexto - align_alen : 0; 1946 align_off = nexto > align_alen ? nexto - align_alen : 0;
1947 /* 1947 /*
1948 * If we're now overlapping the next or previous extent that 1948 * If we're now overlapping the next or previous extent that
1949 * means we can't fit an extsz piece in this hole. Just move 1949 * means we can't fit an extsz piece in this hole. Just move
1950 * the start forward to the first valid spot and set 1950 * the start forward to the first valid spot and set
1951 * the length so we hit the end. 1951 * the length so we hit the end.
1952 */ 1952 */
1953 if (align_off != orig_off && align_off < prevo) 1953 if (align_off != orig_off && align_off < prevo)
1954 align_off = prevo; 1954 align_off = prevo;
1955 if (align_off + align_alen != orig_end && 1955 if (align_off + align_alen != orig_end &&
1956 align_off + align_alen > nexto && 1956 align_off + align_alen > nexto &&
1957 nexto != NULLFILEOFF) { 1957 nexto != NULLFILEOFF) {
1958 ASSERT(nexto > prevo); 1958 ASSERT(nexto > prevo);
1959 align_alen = nexto - align_off; 1959 align_alen = nexto - align_off;
1960 } 1960 }
1961 1961
1962 /* 1962 /*
1963 * If realtime, and the result isn't a multiple of the realtime 1963 * If realtime, and the result isn't a multiple of the realtime
1964 * extent size we need to remove blocks until it is. 1964 * extent size we need to remove blocks until it is.
1965 */ 1965 */
1966 if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) { 1966 if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) {
1967 /* 1967 /*
1968 * We're not covering the original request, or 1968 * We're not covering the original request, or
1969 * we won't be able to once we fix the length. 1969 * we won't be able to once we fix the length.
1970 */ 1970 */
1971 if (orig_off < align_off || 1971 if (orig_off < align_off ||
1972 orig_end > align_off + align_alen || 1972 orig_end > align_off + align_alen ||
1973 align_alen - temp < orig_alen) 1973 align_alen - temp < orig_alen)
1974 return XFS_ERROR(EINVAL); 1974 return XFS_ERROR(EINVAL);
1975 /* 1975 /*
1976 * Try to fix it by moving the start up. 1976 * Try to fix it by moving the start up.
1977 */ 1977 */
1978 if (align_off + temp <= orig_off) { 1978 if (align_off + temp <= orig_off) {
1979 align_alen -= temp; 1979 align_alen -= temp;
1980 align_off += temp; 1980 align_off += temp;
1981 } 1981 }
1982 /* 1982 /*
1983 * Try to fix it by moving the end in. 1983 * Try to fix it by moving the end in.
1984 */ 1984 */
1985 else if (align_off + align_alen - temp >= orig_end) 1985 else if (align_off + align_alen - temp >= orig_end)
1986 align_alen -= temp; 1986 align_alen -= temp;
1987 /* 1987 /*
1988 * Set the start to the minimum then trim the length. 1988 * Set the start to the minimum then trim the length.
1989 */ 1989 */
1990 else { 1990 else {
1991 align_alen -= orig_off - align_off; 1991 align_alen -= orig_off - align_off;
1992 align_off = orig_off; 1992 align_off = orig_off;
1993 align_alen -= align_alen % mp->m_sb.sb_rextsize; 1993 align_alen -= align_alen % mp->m_sb.sb_rextsize;
1994 } 1994 }
1995 /* 1995 /*
1996 * Result doesn't cover the request, fail it. 1996 * Result doesn't cover the request, fail it.
1997 */ 1997 */
1998 if (orig_off < align_off || orig_end > align_off + align_alen) 1998 if (orig_off < align_off || orig_end > align_off + align_alen)
1999 return XFS_ERROR(EINVAL); 1999 return XFS_ERROR(EINVAL);
2000 } else { 2000 } else {
2001 ASSERT(orig_off >= align_off); 2001 ASSERT(orig_off >= align_off);
2002 ASSERT(orig_end <= align_off + align_alen); 2002 ASSERT(orig_end <= align_off + align_alen);
2003 } 2003 }
2004 2004
2005 #ifdef DEBUG 2005 #ifdef DEBUG
2006 if (!eof && gotp->br_startoff != NULLFILEOFF) 2006 if (!eof && gotp->br_startoff != NULLFILEOFF)
2007 ASSERT(align_off + align_alen <= gotp->br_startoff); 2007 ASSERT(align_off + align_alen <= gotp->br_startoff);
2008 if (prevp->br_startoff != NULLFILEOFF) 2008 if (prevp->br_startoff != NULLFILEOFF)
2009 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount); 2009 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
2010 #endif 2010 #endif
2011 2011
2012 *lenp = align_alen; 2012 *lenp = align_alen;
2013 *offp = align_off; 2013 *offp = align_off;
2014 return 0; 2014 return 0;
2015 } 2015 }
2016 2016
2017 #define XFS_ALLOC_GAP_UNITS 4 2017 #define XFS_ALLOC_GAP_UNITS 4
2018 2018
2019 STATIC void 2019 STATIC void
2020 xfs_bmap_adjacent( 2020 xfs_bmap_adjacent(
2021 xfs_bmalloca_t *ap) /* bmap alloc argument struct */ 2021 xfs_bmalloca_t *ap) /* bmap alloc argument struct */
2022 { 2022 {
2023 xfs_fsblock_t adjust; /* adjustment to block numbers */ 2023 xfs_fsblock_t adjust; /* adjustment to block numbers */
2024 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */ 2024 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
2025 xfs_mount_t *mp; /* mount point structure */ 2025 xfs_mount_t *mp; /* mount point structure */
2026 int nullfb; /* true if ap->firstblock isn't set */ 2026 int nullfb; /* true if ap->firstblock isn't set */
2027 int rt; /* true if inode is realtime */ 2027 int rt; /* true if inode is realtime */
2028 2028
2029 #define ISVALID(x,y) \ 2029 #define ISVALID(x,y) \
2030 (rt ? \ 2030 (rt ? \
2031 (x) < mp->m_sb.sb_rblocks : \ 2031 (x) < mp->m_sb.sb_rblocks : \
2032 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \ 2032 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
2033 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \ 2033 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
2034 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks) 2034 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
2035 2035
2036 mp = ap->ip->i_mount; 2036 mp = ap->ip->i_mount;
2037 nullfb = *ap->firstblock == NULLFSBLOCK; 2037 nullfb = *ap->firstblock == NULLFSBLOCK;
2038 rt = XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata; 2038 rt = XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata;
2039 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock); 2039 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
2040 /* 2040 /*
2041 * If allocating at eof, and there's a previous real block, 2041 * If allocating at eof, and there's a previous real block,
2042 * try to use its last block as our starting point. 2042 * try to use its last block as our starting point.
2043 */ 2043 */
2044 if (ap->eof && ap->prev.br_startoff != NULLFILEOFF && 2044 if (ap->eof && ap->prev.br_startoff != NULLFILEOFF &&
2045 !isnullstartblock(ap->prev.br_startblock) && 2045 !isnullstartblock(ap->prev.br_startblock) &&
2046 ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount, 2046 ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount,
2047 ap->prev.br_startblock)) { 2047 ap->prev.br_startblock)) {
2048 ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount; 2048 ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount;
2049 /* 2049 /*
2050 * Adjust for the gap between prevp and us. 2050 * Adjust for the gap between prevp and us.
2051 */ 2051 */
2052 adjust = ap->offset - 2052 adjust = ap->offset -
2053 (ap->prev.br_startoff + ap->prev.br_blockcount); 2053 (ap->prev.br_startoff + ap->prev.br_blockcount);
2054 if (adjust && 2054 if (adjust &&
2055 ISVALID(ap->blkno + adjust, ap->prev.br_startblock)) 2055 ISVALID(ap->blkno + adjust, ap->prev.br_startblock))
2056 ap->blkno += adjust; 2056 ap->blkno += adjust;
2057 } 2057 }
2058 /* 2058 /*
2059 * If not at eof, then compare the two neighbor blocks. 2059 * If not at eof, then compare the two neighbor blocks.
2060 * Figure out whether either one gives us a good starting point, 2060 * Figure out whether either one gives us a good starting point,
2061 * and pick the better one. 2061 * and pick the better one.
2062 */ 2062 */
2063 else if (!ap->eof) { 2063 else if (!ap->eof) {
2064 xfs_fsblock_t gotbno; /* right side block number */ 2064 xfs_fsblock_t gotbno; /* right side block number */
2065 xfs_fsblock_t gotdiff=0; /* right side difference */ 2065 xfs_fsblock_t gotdiff=0; /* right side difference */
2066 xfs_fsblock_t prevbno; /* left side block number */ 2066 xfs_fsblock_t prevbno; /* left side block number */
2067 xfs_fsblock_t prevdiff=0; /* left side difference */ 2067 xfs_fsblock_t prevdiff=0; /* left side difference */
2068 2068
2069 /* 2069 /*
2070 * If there's a previous (left) block, select a requested 2070 * If there's a previous (left) block, select a requested
2071 * start block based on it. 2071 * start block based on it.
2072 */ 2072 */
2073 if (ap->prev.br_startoff != NULLFILEOFF && 2073 if (ap->prev.br_startoff != NULLFILEOFF &&
2074 !isnullstartblock(ap->prev.br_startblock) && 2074 !isnullstartblock(ap->prev.br_startblock) &&
2075 (prevbno = ap->prev.br_startblock + 2075 (prevbno = ap->prev.br_startblock +
2076 ap->prev.br_blockcount) && 2076 ap->prev.br_blockcount) &&
2077 ISVALID(prevbno, ap->prev.br_startblock)) { 2077 ISVALID(prevbno, ap->prev.br_startblock)) {
2078 /* 2078 /*
2079 * Calculate gap to end of previous block. 2079 * Calculate gap to end of previous block.
2080 */ 2080 */
2081 adjust = prevdiff = ap->offset - 2081 adjust = prevdiff = ap->offset -
2082 (ap->prev.br_startoff + 2082 (ap->prev.br_startoff +
2083 ap->prev.br_blockcount); 2083 ap->prev.br_blockcount);
2084 /* 2084 /*
2085 * Figure the startblock based on the previous block's 2085 * Figure the startblock based on the previous block's
2086 * end and the gap size. 2086 * end and the gap size.
2087 * Heuristic! 2087 * Heuristic!
2088 * If the gap is large relative to the piece we're 2088 * If the gap is large relative to the piece we're
2089 * allocating, or using it gives us an invalid block 2089 * allocating, or using it gives us an invalid block
2090 * number, then just use the end of the previous block. 2090 * number, then just use the end of the previous block.
2091 */ 2091 */
2092 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length && 2092 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
2093 ISVALID(prevbno + prevdiff, 2093 ISVALID(prevbno + prevdiff,
2094 ap->prev.br_startblock)) 2094 ap->prev.br_startblock))
2095 prevbno += adjust; 2095 prevbno += adjust;
2096 else 2096 else
2097 prevdiff += adjust; 2097 prevdiff += adjust;
2098 /* 2098 /*
2099 * If the firstblock forbids it, can't use it, 2099 * If the firstblock forbids it, can't use it,
2100 * must use default. 2100 * must use default.
2101 */ 2101 */
2102 if (!rt && !nullfb && 2102 if (!rt && !nullfb &&
2103 XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno) 2103 XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno)
2104 prevbno = NULLFSBLOCK; 2104 prevbno = NULLFSBLOCK;
2105 } 2105 }
2106 /* 2106 /*
2107 * No previous block or can't follow it, just default. 2107 * No previous block or can't follow it, just default.
2108 */ 2108 */
2109 else 2109 else
2110 prevbno = NULLFSBLOCK; 2110 prevbno = NULLFSBLOCK;
2111 /* 2111 /*
2112 * If there's a following (right) block, select a requested 2112 * If there's a following (right) block, select a requested
2113 * start block based on it. 2113 * start block based on it.
2114 */ 2114 */
2115 if (!isnullstartblock(ap->got.br_startblock)) { 2115 if (!isnullstartblock(ap->got.br_startblock)) {
2116 /* 2116 /*
2117 * Calculate gap to start of next block. 2117 * Calculate gap to start of next block.
2118 */ 2118 */
2119 adjust = gotdiff = ap->got.br_startoff - ap->offset; 2119 adjust = gotdiff = ap->got.br_startoff - ap->offset;
2120 /* 2120 /*
2121 * Figure the startblock based on the next block's 2121 * Figure the startblock based on the next block's
2122 * start and the gap size. 2122 * start and the gap size.
2123 */ 2123 */
2124 gotbno = ap->got.br_startblock; 2124 gotbno = ap->got.br_startblock;
2125 /* 2125 /*
2126 * Heuristic! 2126 * Heuristic!
2127 * If the gap is large relative to the piece we're 2127 * If the gap is large relative to the piece we're
2128 * allocating, or using it gives us an invalid block 2128 * allocating, or using it gives us an invalid block
2129 * number, then just use the start of the next block 2129 * number, then just use the start of the next block
2130 * offset by our length. 2130 * offset by our length.
2131 */ 2131 */
2132 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length && 2132 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
2133 ISVALID(gotbno - gotdiff, gotbno)) 2133 ISVALID(gotbno - gotdiff, gotbno))
2134 gotbno -= adjust; 2134 gotbno -= adjust;
2135 else if (ISVALID(gotbno - ap->length, gotbno)) { 2135 else if (ISVALID(gotbno - ap->length, gotbno)) {
2136 gotbno -= ap->length; 2136 gotbno -= ap->length;
2137 gotdiff += adjust - ap->length; 2137 gotdiff += adjust - ap->length;
2138 } else 2138 } else
2139 gotdiff += adjust; 2139 gotdiff += adjust;
2140 /* 2140 /*
2141 * If the firstblock forbids it, can't use it, 2141 * If the firstblock forbids it, can't use it,
2142 * must use default. 2142 * must use default.
2143 */ 2143 */
2144 if (!rt && !nullfb && 2144 if (!rt && !nullfb &&
2145 XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno) 2145 XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno)
2146 gotbno = NULLFSBLOCK; 2146 gotbno = NULLFSBLOCK;
2147 } 2147 }
2148 /* 2148 /*
2149 * No next block, just default. 2149 * No next block, just default.
2150 */ 2150 */
2151 else 2151 else
2152 gotbno = NULLFSBLOCK; 2152 gotbno = NULLFSBLOCK;
2153 /* 2153 /*
2154 * If both valid, pick the better one, else the only good 2154 * If both valid, pick the better one, else the only good
2155 * one, else ap->blkno is already set (to 0 or the inode block). 2155 * one, else ap->blkno is already set (to 0 or the inode block).
2156 */ 2156 */
2157 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK) 2157 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK)
2158 ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno; 2158 ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno;
2159 else if (prevbno != NULLFSBLOCK) 2159 else if (prevbno != NULLFSBLOCK)
2160 ap->blkno = prevbno; 2160 ap->blkno = prevbno;
2161 else if (gotbno != NULLFSBLOCK) 2161 else if (gotbno != NULLFSBLOCK)
2162 ap->blkno = gotbno; 2162 ap->blkno = gotbno;
2163 } 2163 }
2164 #undef ISVALID 2164 #undef ISVALID
2165 } 2165 }
2166 2166
2167 STATIC int 2167 STATIC int
2168 xfs_bmap_rtalloc( 2168 xfs_bmap_rtalloc(
2169 xfs_bmalloca_t *ap) /* bmap alloc argument struct */ 2169 xfs_bmalloca_t *ap) /* bmap alloc argument struct */
2170 { 2170 {
2171 xfs_alloctype_t atype = 0; /* type for allocation routines */ 2171 xfs_alloctype_t atype = 0; /* type for allocation routines */
2172 int error; /* error return value */ 2172 int error; /* error return value */
2173 xfs_mount_t *mp; /* mount point structure */ 2173 xfs_mount_t *mp; /* mount point structure */
2174 xfs_extlen_t prod = 0; /* product factor for allocators */ 2174 xfs_extlen_t prod = 0; /* product factor for allocators */
2175 xfs_extlen_t ralen = 0; /* realtime allocation length */ 2175 xfs_extlen_t ralen = 0; /* realtime allocation length */
2176 xfs_extlen_t align; /* minimum allocation alignment */ 2176 xfs_extlen_t align; /* minimum allocation alignment */
2177 xfs_rtblock_t rtb; 2177 xfs_rtblock_t rtb;
2178 2178
2179 mp = ap->ip->i_mount; 2179 mp = ap->ip->i_mount;
2180 align = xfs_get_extsz_hint(ap->ip); 2180 align = xfs_get_extsz_hint(ap->ip);
2181 prod = align / mp->m_sb.sb_rextsize; 2181 prod = align / mp->m_sb.sb_rextsize;
2182 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, 2182 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
2183 align, 1, ap->eof, 0, 2183 align, 1, ap->eof, 0,
2184 ap->conv, &ap->offset, &ap->length); 2184 ap->conv, &ap->offset, &ap->length);
2185 if (error) 2185 if (error)
2186 return error; 2186 return error;
2187 ASSERT(ap->length); 2187 ASSERT(ap->length);
2188 ASSERT(ap->length % mp->m_sb.sb_rextsize == 0); 2188 ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
2189 2189
2190 /* 2190 /*
2191 * If the offset & length are not perfectly aligned 2191 * If the offset & length are not perfectly aligned
2192 * then kill prod, it will just get us in trouble. 2192 * then kill prod, it will just get us in trouble.
2193 */ 2193 */
2194 if (do_mod(ap->offset, align) || ap->length % align) 2194 if (do_mod(ap->offset, align) || ap->length % align)
2195 prod = 1; 2195 prod = 1;
2196 /* 2196 /*
2197 * Set ralen to be the actual requested length in rtextents. 2197 * Set ralen to be the actual requested length in rtextents.
2198 */ 2198 */
2199 ralen = ap->length / mp->m_sb.sb_rextsize; 2199 ralen = ap->length / mp->m_sb.sb_rextsize;
2200 /* 2200 /*
2201 * If the old value was close enough to MAXEXTLEN that 2201 * If the old value was close enough to MAXEXTLEN that
2202 * we rounded up to it, cut it back so it's valid again. 2202 * we rounded up to it, cut it back so it's valid again.
2203 * Note that if it's a really large request (bigger than 2203 * Note that if it's a really large request (bigger than
2204 * MAXEXTLEN), we don't hear about that number, and can't 2204 * MAXEXTLEN), we don't hear about that number, and can't
2205 * adjust the starting point to match it. 2205 * adjust the starting point to match it.
2206 */ 2206 */
2207 if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN) 2207 if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
2208 ralen = MAXEXTLEN / mp->m_sb.sb_rextsize; 2208 ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
2209 2209
2210 /* 2210 /*
2211 * Lock out other modifications to the RT bitmap inode. 2211 * Lock out other modifications to the RT bitmap inode.
2212 */ 2212 */
2213 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL); 2213 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL);
2214 xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL); 2214 xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
2215 2215
2216 /* 2216 /*
2217 * If it's an allocation to an empty file at offset 0, 2217 * If it's an allocation to an empty file at offset 0,
2218 * pick an extent that will space things out in the rt area. 2218 * pick an extent that will space things out in the rt area.
2219 */ 2219 */
2220 if (ap->eof && ap->offset == 0) { 2220 if (ap->eof && ap->offset == 0) {
2221 xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */ 2221 xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
2222 2222
2223 error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx); 2223 error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
2224 if (error) 2224 if (error)
2225 return error; 2225 return error;
2226 ap->blkno = rtx * mp->m_sb.sb_rextsize; 2226 ap->blkno = rtx * mp->m_sb.sb_rextsize;
2227 } else { 2227 } else {
2228 ap->blkno = 0; 2228 ap->blkno = 0;
2229 } 2229 }
2230 2230
2231 xfs_bmap_adjacent(ap); 2231 xfs_bmap_adjacent(ap);
2232 2232
2233 /* 2233 /*
2234 * Realtime allocation, done through xfs_rtallocate_extent. 2234 * Realtime allocation, done through xfs_rtallocate_extent.
2235 */ 2235 */
2236 atype = ap->blkno == 0 ? XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO; 2236 atype = ap->blkno == 0 ? XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO;
2237 do_div(ap->blkno, mp->m_sb.sb_rextsize); 2237 do_div(ap->blkno, mp->m_sb.sb_rextsize);
2238 rtb = ap->blkno; 2238 rtb = ap->blkno;
2239 ap->length = ralen; 2239 ap->length = ralen;
2240 if ((error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length, 2240 if ((error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
2241 &ralen, atype, ap->wasdel, prod, &rtb))) 2241 &ralen, atype, ap->wasdel, prod, &rtb)))
2242 return error; 2242 return error;
2243 if (rtb == NULLFSBLOCK && prod > 1 && 2243 if (rtb == NULLFSBLOCK && prod > 1 &&
2244 (error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, 2244 (error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1,
2245 ap->length, &ralen, atype, 2245 ap->length, &ralen, atype,
2246 ap->wasdel, 1, &rtb))) 2246 ap->wasdel, 1, &rtb)))
2247 return error; 2247 return error;
2248 ap->blkno = rtb; 2248 ap->blkno = rtb;
2249 if (ap->blkno != NULLFSBLOCK) { 2249 if (ap->blkno != NULLFSBLOCK) {
2250 ap->blkno *= mp->m_sb.sb_rextsize; 2250 ap->blkno *= mp->m_sb.sb_rextsize;
2251 ralen *= mp->m_sb.sb_rextsize; 2251 ralen *= mp->m_sb.sb_rextsize;
2252 ap->length = ralen; 2252 ap->length = ralen;
2253 ap->ip->i_d.di_nblocks += ralen; 2253 ap->ip->i_d.di_nblocks += ralen;
2254 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); 2254 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
2255 if (ap->wasdel) 2255 if (ap->wasdel)
2256 ap->ip->i_delayed_blks -= ralen; 2256 ap->ip->i_delayed_blks -= ralen;
2257 /* 2257 /*
2258 * Adjust the disk quota also. This was reserved 2258 * Adjust the disk quota also. This was reserved
2259 * earlier. 2259 * earlier.
2260 */ 2260 */
2261 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, 2261 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
2262 ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT : 2262 ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
2263 XFS_TRANS_DQ_RTBCOUNT, (long) ralen); 2263 XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
2264 } else { 2264 } else {
2265 ap->length = 0; 2265 ap->length = 0;
2266 } 2266 }
2267 return 0; 2267 return 0;
2268 } 2268 }
2269 2269
2270 STATIC int 2270 STATIC int
2271 xfs_bmap_btalloc_nullfb( 2271 xfs_bmap_btalloc_nullfb(
2272 struct xfs_bmalloca *ap, 2272 struct xfs_bmalloca *ap,
2273 struct xfs_alloc_arg *args, 2273 struct xfs_alloc_arg *args,
2274 xfs_extlen_t *blen) 2274 xfs_extlen_t *blen)
2275 { 2275 {
2276 struct xfs_mount *mp = ap->ip->i_mount; 2276 struct xfs_mount *mp = ap->ip->i_mount;
2277 struct xfs_perag *pag; 2277 struct xfs_perag *pag;
2278 xfs_agnumber_t ag, startag; 2278 xfs_agnumber_t ag, startag;
2279 int notinit = 0; 2279 int notinit = 0;
2280 int error; 2280 int error;
2281 2281
2282 if (ap->userdata && xfs_inode_is_filestream(ap->ip)) 2282 if (ap->userdata && xfs_inode_is_filestream(ap->ip))
2283 args->type = XFS_ALLOCTYPE_NEAR_BNO; 2283 args->type = XFS_ALLOCTYPE_NEAR_BNO;
2284 else 2284 else
2285 args->type = XFS_ALLOCTYPE_START_BNO; 2285 args->type = XFS_ALLOCTYPE_START_BNO;
2286 args->total = ap->total; 2286 args->total = ap->total;
2287 2287
2288 /* 2288 /*
2289 * Search for an allocation group with a single extent large enough 2289 * Search for an allocation group with a single extent large enough
2290 * for the request. If one isn't found, then adjust the minimum 2290 * for the request. If one isn't found, then adjust the minimum
2291 * allocation size to the largest space found. 2291 * allocation size to the largest space found.
2292 */ 2292 */
2293 startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno); 2293 startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
2294 if (startag == NULLAGNUMBER) 2294 if (startag == NULLAGNUMBER)
2295 startag = ag = 0; 2295 startag = ag = 0;
2296 2296
2297 pag = xfs_perag_get(mp, ag); 2297 pag = xfs_perag_get(mp, ag);
2298 while (*blen < args->maxlen) { 2298 while (*blen < args->maxlen) {
2299 if (!pag->pagf_init) { 2299 if (!pag->pagf_init) {
2300 error = xfs_alloc_pagf_init(mp, args->tp, ag, 2300 error = xfs_alloc_pagf_init(mp, args->tp, ag,
2301 XFS_ALLOC_FLAG_TRYLOCK); 2301 XFS_ALLOC_FLAG_TRYLOCK);
2302 if (error) { 2302 if (error) {
2303 xfs_perag_put(pag); 2303 xfs_perag_put(pag);
2304 return error; 2304 return error;
2305 } 2305 }
2306 } 2306 }
2307 2307
2308 /* 2308 /*
2309 * See xfs_alloc_fix_freelist... 2309 * See xfs_alloc_fix_freelist...
2310 */ 2310 */
2311 if (pag->pagf_init) { 2311 if (pag->pagf_init) {
2312 xfs_extlen_t longest; 2312 xfs_extlen_t longest;
2313 longest = xfs_alloc_longest_free_extent(mp, pag); 2313 longest = xfs_alloc_longest_free_extent(mp, pag);
2314 if (*blen < longest) 2314 if (*blen < longest)
2315 *blen = longest; 2315 *blen = longest;
2316 } else 2316 } else
2317 notinit = 1; 2317 notinit = 1;
2318 2318
2319 if (xfs_inode_is_filestream(ap->ip)) { 2319 if (xfs_inode_is_filestream(ap->ip)) {
2320 if (*blen >= args->maxlen) 2320 if (*blen >= args->maxlen)
2321 break; 2321 break;
2322 2322
2323 if (ap->userdata) { 2323 if (ap->userdata) {
2324 /* 2324 /*
2325 * If startag is an invalid AG, we've 2325 * If startag is an invalid AG, we've
2326 * come here once before and 2326 * come here once before and
2327 * xfs_filestream_new_ag picked the 2327 * xfs_filestream_new_ag picked the
2328 * best currently available. 2328 * best currently available.
2329 * 2329 *
2330 * Don't continue looping, since we 2330 * Don't continue looping, since we
2331 * could loop forever. 2331 * could loop forever.
2332 */ 2332 */
2333 if (startag == NULLAGNUMBER) 2333 if (startag == NULLAGNUMBER)
2334 break; 2334 break;
2335 2335
2336 error = xfs_filestream_new_ag(ap, &ag); 2336 error = xfs_filestream_new_ag(ap, &ag);
2337 xfs_perag_put(pag); 2337 xfs_perag_put(pag);
2338 if (error) 2338 if (error)
2339 return error; 2339 return error;
2340 2340
2341 /* loop again to set 'blen'*/ 2341 /* loop again to set 'blen'*/
2342 startag = NULLAGNUMBER; 2342 startag = NULLAGNUMBER;
2343 pag = xfs_perag_get(mp, ag); 2343 pag = xfs_perag_get(mp, ag);
2344 continue; 2344 continue;
2345 } 2345 }
2346 } 2346 }
2347 if (++ag == mp->m_sb.sb_agcount) 2347 if (++ag == mp->m_sb.sb_agcount)
2348 ag = 0; 2348 ag = 0;
2349 if (ag == startag) 2349 if (ag == startag)
2350 break; 2350 break;
2351 xfs_perag_put(pag); 2351 xfs_perag_put(pag);
2352 pag = xfs_perag_get(mp, ag); 2352 pag = xfs_perag_get(mp, ag);
2353 } 2353 }
2354 xfs_perag_put(pag); 2354 xfs_perag_put(pag);
2355 2355
2356 /* 2356 /*
2357 * Since the above loop did a BUF_TRYLOCK, it is 2357 * Since the above loop did a BUF_TRYLOCK, it is
2358 * possible that there is space for this request. 2358 * possible that there is space for this request.
2359 */ 2359 */
2360 if (notinit || *blen < ap->minlen) 2360 if (notinit || *blen < ap->minlen)
2361 args->minlen = ap->minlen; 2361 args->minlen = ap->minlen;
2362 /* 2362 /*
2363 * If the best seen length is less than the request 2363 * If the best seen length is less than the request
2364 * length, use the best as the minimum. 2364 * length, use the best as the minimum.
2365 */ 2365 */
2366 else if (*blen < args->maxlen) 2366 else if (*blen < args->maxlen)
2367 args->minlen = *blen; 2367 args->minlen = *blen;
2368 /* 2368 /*
2369 * Otherwise we've seen an extent as big as maxlen, 2369 * Otherwise we've seen an extent as big as maxlen,
2370 * use that as the minimum. 2370 * use that as the minimum.
2371 */ 2371 */
2372 else 2372 else
2373 args->minlen = args->maxlen; 2373 args->minlen = args->maxlen;
2374 2374
2375 /* 2375 /*
2376 * set the failure fallback case to look in the selected 2376 * set the failure fallback case to look in the selected
2377 * AG as the stream may have moved. 2377 * AG as the stream may have moved.
2378 */ 2378 */
2379 if (xfs_inode_is_filestream(ap->ip)) 2379 if (xfs_inode_is_filestream(ap->ip))
2380 ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0); 2380 ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0);
2381 2381
2382 return 0; 2382 return 0;
2383 } 2383 }
2384 2384
2385 STATIC int 2385 STATIC int
2386 xfs_bmap_btalloc( 2386 xfs_bmap_btalloc(
2387 xfs_bmalloca_t *ap) /* bmap alloc argument struct */ 2387 xfs_bmalloca_t *ap) /* bmap alloc argument struct */
2388 { 2388 {
2389 xfs_mount_t *mp; /* mount point structure */ 2389 xfs_mount_t *mp; /* mount point structure */
2390 xfs_alloctype_t atype = 0; /* type for allocation routines */ 2390 xfs_alloctype_t atype = 0; /* type for allocation routines */
2391 xfs_extlen_t align; /* minimum allocation alignment */ 2391 xfs_extlen_t align; /* minimum allocation alignment */
2392 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */ 2392 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
2393 xfs_agnumber_t ag; 2393 xfs_agnumber_t ag;
2394 xfs_alloc_arg_t args; 2394 xfs_alloc_arg_t args;
2395 xfs_extlen_t blen; 2395 xfs_extlen_t blen;
2396 xfs_extlen_t nextminlen = 0; 2396 xfs_extlen_t nextminlen = 0;
2397 int nullfb; /* true if ap->firstblock isn't set */ 2397 int nullfb; /* true if ap->firstblock isn't set */
2398 int isaligned; 2398 int isaligned;
2399 int tryagain; 2399 int tryagain;
2400 int error; 2400 int error;
2401 2401
2402 ASSERT(ap->length); 2402 ASSERT(ap->length);
2403 2403
2404 mp = ap->ip->i_mount; 2404 mp = ap->ip->i_mount;
2405 align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0; 2405 align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0;
2406 if (unlikely(align)) { 2406 if (unlikely(align)) {
2407 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, 2407 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
2408 align, 0, ap->eof, 0, ap->conv, 2408 align, 0, ap->eof, 0, ap->conv,
2409 &ap->offset, &ap->length); 2409 &ap->offset, &ap->length);
2410 ASSERT(!error); 2410 ASSERT(!error);
2411 ASSERT(ap->length); 2411 ASSERT(ap->length);
2412 } 2412 }
2413 nullfb = *ap->firstblock == NULLFSBLOCK; 2413 nullfb = *ap->firstblock == NULLFSBLOCK;
2414 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock); 2414 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
2415 if (nullfb) { 2415 if (nullfb) {
2416 if (ap->userdata && xfs_inode_is_filestream(ap->ip)) { 2416 if (ap->userdata && xfs_inode_is_filestream(ap->ip)) {
2417 ag = xfs_filestream_lookup_ag(ap->ip); 2417 ag = xfs_filestream_lookup_ag(ap->ip);
2418 ag = (ag != NULLAGNUMBER) ? ag : 0; 2418 ag = (ag != NULLAGNUMBER) ? ag : 0;
2419 ap->blkno = XFS_AGB_TO_FSB(mp, ag, 0); 2419 ap->blkno = XFS_AGB_TO_FSB(mp, ag, 0);
2420 } else { 2420 } else {
2421 ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino); 2421 ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
2422 } 2422 }
2423 } else 2423 } else
2424 ap->blkno = *ap->firstblock; 2424 ap->blkno = *ap->firstblock;
2425 2425
2426 xfs_bmap_adjacent(ap); 2426 xfs_bmap_adjacent(ap);
2427 2427
2428 /* 2428 /*
2429 * If allowed, use ap->blkno; otherwise must use firstblock since 2429 * If allowed, use ap->blkno; otherwise must use firstblock since
2430 * it's in the right allocation group. 2430 * it's in the right allocation group.
2431 */ 2431 */
2432 if (nullfb || XFS_FSB_TO_AGNO(mp, ap->blkno) == fb_agno) 2432 if (nullfb || XFS_FSB_TO_AGNO(mp, ap->blkno) == fb_agno)
2433 ; 2433 ;
2434 else 2434 else
2435 ap->blkno = *ap->firstblock; 2435 ap->blkno = *ap->firstblock;
2436 /* 2436 /*
2437 * Normal allocation, done through xfs_alloc_vextent. 2437 * Normal allocation, done through xfs_alloc_vextent.
2438 */ 2438 */
2439 tryagain = isaligned = 0; 2439 tryagain = isaligned = 0;
2440 memset(&args, 0, sizeof(args)); 2440 memset(&args, 0, sizeof(args));
2441 args.tp = ap->tp; 2441 args.tp = ap->tp;
2442 args.mp = mp; 2442 args.mp = mp;
2443 args.fsbno = ap->blkno; 2443 args.fsbno = ap->blkno;
2444 args.stack_switch = ap->stack_switch;
2444 2445
2445 /* Trim the allocation back to the maximum an AG can fit. */ 2446 /* Trim the allocation back to the maximum an AG can fit. */
2446 args.maxlen = MIN(ap->length, XFS_ALLOC_AG_MAX_USABLE(mp)); 2447 args.maxlen = MIN(ap->length, XFS_ALLOC_AG_MAX_USABLE(mp));
2447 args.firstblock = *ap->firstblock; 2448 args.firstblock = *ap->firstblock;
2448 blen = 0; 2449 blen = 0;
2449 if (nullfb) { 2450 if (nullfb) {
2450 error = xfs_bmap_btalloc_nullfb(ap, &args, &blen); 2451 error = xfs_bmap_btalloc_nullfb(ap, &args, &blen);
2451 if (error) 2452 if (error)
2452 return error; 2453 return error;
2453 } else if (ap->flist->xbf_low) { 2454 } else if (ap->flist->xbf_low) {
2454 if (xfs_inode_is_filestream(ap->ip)) 2455 if (xfs_inode_is_filestream(ap->ip))
2455 args.type = XFS_ALLOCTYPE_FIRST_AG; 2456 args.type = XFS_ALLOCTYPE_FIRST_AG;
2456 else 2457 else
2457 args.type = XFS_ALLOCTYPE_START_BNO; 2458 args.type = XFS_ALLOCTYPE_START_BNO;
2458 args.total = args.minlen = ap->minlen; 2459 args.total = args.minlen = ap->minlen;
2459 } else { 2460 } else {
2460 args.type = XFS_ALLOCTYPE_NEAR_BNO; 2461 args.type = XFS_ALLOCTYPE_NEAR_BNO;
2461 args.total = ap->total; 2462 args.total = ap->total;
2462 args.minlen = ap->minlen; 2463 args.minlen = ap->minlen;
2463 } 2464 }
2464 /* apply extent size hints if obtained earlier */ 2465 /* apply extent size hints if obtained earlier */
2465 if (unlikely(align)) { 2466 if (unlikely(align)) {
2466 args.prod = align; 2467 args.prod = align;
2467 if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod))) 2468 if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod)))
2468 args.mod = (xfs_extlen_t)(args.prod - args.mod); 2469 args.mod = (xfs_extlen_t)(args.prod - args.mod);
2469 } else if (mp->m_sb.sb_blocksize >= PAGE_CACHE_SIZE) { 2470 } else if (mp->m_sb.sb_blocksize >= PAGE_CACHE_SIZE) {
2470 args.prod = 1; 2471 args.prod = 1;
2471 args.mod = 0; 2472 args.mod = 0;
2472 } else { 2473 } else {
2473 args.prod = PAGE_CACHE_SIZE >> mp->m_sb.sb_blocklog; 2474 args.prod = PAGE_CACHE_SIZE >> mp->m_sb.sb_blocklog;
2474 if ((args.mod = (xfs_extlen_t)(do_mod(ap->offset, args.prod)))) 2475 if ((args.mod = (xfs_extlen_t)(do_mod(ap->offset, args.prod))))
2475 args.mod = (xfs_extlen_t)(args.prod - args.mod); 2476 args.mod = (xfs_extlen_t)(args.prod - args.mod);
2476 } 2477 }
2477 /* 2478 /*
2478 * If we are not low on available data blocks, and the 2479 * If we are not low on available data blocks, and the
2479 * underlying logical volume manager is a stripe, and 2480 * underlying logical volume manager is a stripe, and
2480 * the file offset is zero then try to allocate data 2481 * the file offset is zero then try to allocate data
2481 * blocks on stripe unit boundary. 2482 * blocks on stripe unit boundary.
2482 * NOTE: ap->aeof is only set if the allocation length 2483 * NOTE: ap->aeof is only set if the allocation length
2483 * is >= the stripe unit and the allocation offset is 2484 * is >= the stripe unit and the allocation offset is
2484 * at the end of file. 2485 * at the end of file.
2485 */ 2486 */
2486 if (!ap->flist->xbf_low && ap->aeof) { 2487 if (!ap->flist->xbf_low && ap->aeof) {
2487 if (!ap->offset) { 2488 if (!ap->offset) {
2488 args.alignment = mp->m_dalign; 2489 args.alignment = mp->m_dalign;
2489 atype = args.type; 2490 atype = args.type;
2490 isaligned = 1; 2491 isaligned = 1;
2491 /* 2492 /*
2492 * Adjust for alignment 2493 * Adjust for alignment
2493 */ 2494 */
2494 if (blen > args.alignment && blen <= args.maxlen) 2495 if (blen > args.alignment && blen <= args.maxlen)
2495 args.minlen = blen - args.alignment; 2496 args.minlen = blen - args.alignment;
2496 args.minalignslop = 0; 2497 args.minalignslop = 0;
2497 } else { 2498 } else {
2498 /* 2499 /*
2499 * First try an exact bno allocation. 2500 * First try an exact bno allocation.
2500 * If it fails then do a near or start bno 2501 * If it fails then do a near or start bno
2501 * allocation with alignment turned on. 2502 * allocation with alignment turned on.
2502 */ 2503 */
2503 atype = args.type; 2504 atype = args.type;
2504 tryagain = 1; 2505 tryagain = 1;
2505 args.type = XFS_ALLOCTYPE_THIS_BNO; 2506 args.type = XFS_ALLOCTYPE_THIS_BNO;
2506 args.alignment = 1; 2507 args.alignment = 1;
2507 /* 2508 /*
2508 * Compute the minlen+alignment for the 2509 * Compute the minlen+alignment for the
2509 * next case. Set slop so that the value 2510 * next case. Set slop so that the value
2510 * of minlen+alignment+slop doesn't go up 2511 * of minlen+alignment+slop doesn't go up
2511 * between the calls. 2512 * between the calls.
2512 */ 2513 */
2513 if (blen > mp->m_dalign && blen <= args.maxlen) 2514 if (blen > mp->m_dalign && blen <= args.maxlen)
2514 nextminlen = blen - mp->m_dalign; 2515 nextminlen = blen - mp->m_dalign;
2515 else 2516 else
2516 nextminlen = args.minlen; 2517 nextminlen = args.minlen;
2517 if (nextminlen + mp->m_dalign > args.minlen + 1) 2518 if (nextminlen + mp->m_dalign > args.minlen + 1)
2518 args.minalignslop = 2519 args.minalignslop =
2519 nextminlen + mp->m_dalign - 2520 nextminlen + mp->m_dalign -
2520 args.minlen - 1; 2521 args.minlen - 1;
2521 else 2522 else
2522 args.minalignslop = 0; 2523 args.minalignslop = 0;
2523 } 2524 }
2524 } else { 2525 } else {
2525 args.alignment = 1; 2526 args.alignment = 1;
2526 args.minalignslop = 0; 2527 args.minalignslop = 0;
2527 } 2528 }
2528 args.minleft = ap->minleft; 2529 args.minleft = ap->minleft;
2529 args.wasdel = ap->wasdel; 2530 args.wasdel = ap->wasdel;
2530 args.isfl = 0; 2531 args.isfl = 0;
2531 args.userdata = ap->userdata; 2532 args.userdata = ap->userdata;
2532 if ((error = xfs_alloc_vextent(&args))) 2533 if ((error = xfs_alloc_vextent(&args)))
2533 return error; 2534 return error;
2534 if (tryagain && args.fsbno == NULLFSBLOCK) { 2535 if (tryagain && args.fsbno == NULLFSBLOCK) {
2535 /* 2536 /*
2536 * Exact allocation failed. Now try with alignment 2537 * Exact allocation failed. Now try with alignment
2537 * turned on. 2538 * turned on.
2538 */ 2539 */
2539 args.type = atype; 2540 args.type = atype;
2540 args.fsbno = ap->blkno; 2541 args.fsbno = ap->blkno;
2541 args.alignment = mp->m_dalign; 2542 args.alignment = mp->m_dalign;
2542 args.minlen = nextminlen; 2543 args.minlen = nextminlen;
2543 args.minalignslop = 0; 2544 args.minalignslop = 0;
2544 isaligned = 1; 2545 isaligned = 1;
2545 if ((error = xfs_alloc_vextent(&args))) 2546 if ((error = xfs_alloc_vextent(&args)))
2546 return error; 2547 return error;
2547 } 2548 }
2548 if (isaligned && args.fsbno == NULLFSBLOCK) { 2549 if (isaligned && args.fsbno == NULLFSBLOCK) {
2549 /* 2550 /*
2550 * allocation failed, so turn off alignment and 2551 * allocation failed, so turn off alignment and
2551 * try again. 2552 * try again.
2552 */ 2553 */
2553 args.type = atype; 2554 args.type = atype;
2554 args.fsbno = ap->blkno; 2555 args.fsbno = ap->blkno;
2555 args.alignment = 0; 2556 args.alignment = 0;
2556 if ((error = xfs_alloc_vextent(&args))) 2557 if ((error = xfs_alloc_vextent(&args)))
2557 return error; 2558 return error;
2558 } 2559 }
2559 if (args.fsbno == NULLFSBLOCK && nullfb && 2560 if (args.fsbno == NULLFSBLOCK && nullfb &&
2560 args.minlen > ap->minlen) { 2561 args.minlen > ap->minlen) {
2561 args.minlen = ap->minlen; 2562 args.minlen = ap->minlen;
2562 args.type = XFS_ALLOCTYPE_START_BNO; 2563 args.type = XFS_ALLOCTYPE_START_BNO;
2563 args.fsbno = ap->blkno; 2564 args.fsbno = ap->blkno;
2564 if ((error = xfs_alloc_vextent(&args))) 2565 if ((error = xfs_alloc_vextent(&args)))
2565 return error; 2566 return error;
2566 } 2567 }
2567 if (args.fsbno == NULLFSBLOCK && nullfb) { 2568 if (args.fsbno == NULLFSBLOCK && nullfb) {
2568 args.fsbno = 0; 2569 args.fsbno = 0;
2569 args.type = XFS_ALLOCTYPE_FIRST_AG; 2570 args.type = XFS_ALLOCTYPE_FIRST_AG;
2570 args.total = ap->minlen; 2571 args.total = ap->minlen;
2571 args.minleft = 0; 2572 args.minleft = 0;
2572 if ((error = xfs_alloc_vextent(&args))) 2573 if ((error = xfs_alloc_vextent(&args)))
2573 return error; 2574 return error;
2574 ap->flist->xbf_low = 1; 2575 ap->flist->xbf_low = 1;
2575 } 2576 }
2576 if (args.fsbno != NULLFSBLOCK) { 2577 if (args.fsbno != NULLFSBLOCK) {
2577 /* 2578 /*
2578 * check the allocation happened at the same or higher AG than 2579 * check the allocation happened at the same or higher AG than
2579 * the first block that was allocated. 2580 * the first block that was allocated.
2580 */ 2581 */
2581 ASSERT(*ap->firstblock == NULLFSBLOCK || 2582 ASSERT(*ap->firstblock == NULLFSBLOCK ||
2582 XFS_FSB_TO_AGNO(mp, *ap->firstblock) == 2583 XFS_FSB_TO_AGNO(mp, *ap->firstblock) ==
2583 XFS_FSB_TO_AGNO(mp, args.fsbno) || 2584 XFS_FSB_TO_AGNO(mp, args.fsbno) ||
2584 (ap->flist->xbf_low && 2585 (ap->flist->xbf_low &&
2585 XFS_FSB_TO_AGNO(mp, *ap->firstblock) < 2586 XFS_FSB_TO_AGNO(mp, *ap->firstblock) <
2586 XFS_FSB_TO_AGNO(mp, args.fsbno))); 2587 XFS_FSB_TO_AGNO(mp, args.fsbno)));
2587 2588
2588 ap->blkno = args.fsbno; 2589 ap->blkno = args.fsbno;
2589 if (*ap->firstblock == NULLFSBLOCK) 2590 if (*ap->firstblock == NULLFSBLOCK)
2590 *ap->firstblock = args.fsbno; 2591 *ap->firstblock = args.fsbno;
2591 ASSERT(nullfb || fb_agno == args.agno || 2592 ASSERT(nullfb || fb_agno == args.agno ||
2592 (ap->flist->xbf_low && fb_agno < args.agno)); 2593 (ap->flist->xbf_low && fb_agno < args.agno));
2593 ap->length = args.len; 2594 ap->length = args.len;
2594 ap->ip->i_d.di_nblocks += args.len; 2595 ap->ip->i_d.di_nblocks += args.len;
2595 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); 2596 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
2596 if (ap->wasdel) 2597 if (ap->wasdel)
2597 ap->ip->i_delayed_blks -= args.len; 2598 ap->ip->i_delayed_blks -= args.len;
2598 /* 2599 /*
2599 * Adjust the disk quota also. This was reserved 2600 * Adjust the disk quota also. This was reserved
2600 * earlier. 2601 * earlier.
2601 */ 2602 */
2602 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, 2603 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
2603 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : 2604 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT :
2604 XFS_TRANS_DQ_BCOUNT, 2605 XFS_TRANS_DQ_BCOUNT,
2605 (long) args.len); 2606 (long) args.len);
2606 } else { 2607 } else {
2607 ap->blkno = NULLFSBLOCK; 2608 ap->blkno = NULLFSBLOCK;
2608 ap->length = 0; 2609 ap->length = 0;
2609 } 2610 }
2610 return 0; 2611 return 0;
2611 } 2612 }
2612 2613
2613 /* 2614 /*
2614 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file. 2615 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
2615 * It figures out where to ask the underlying allocator to put the new extent. 2616 * It figures out where to ask the underlying allocator to put the new extent.
2616 */ 2617 */
2617 STATIC int 2618 STATIC int
2618 xfs_bmap_alloc( 2619 xfs_bmap_alloc(
2619 xfs_bmalloca_t *ap) /* bmap alloc argument struct */ 2620 xfs_bmalloca_t *ap) /* bmap alloc argument struct */
2620 { 2621 {
2621 if (XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata) 2622 if (XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata)
2622 return xfs_bmap_rtalloc(ap); 2623 return xfs_bmap_rtalloc(ap);
2623 return xfs_bmap_btalloc(ap); 2624 return xfs_bmap_btalloc(ap);
2624 } 2625 }
2625 2626
2626 /* 2627 /*
2627 * Transform a btree format file with only one leaf node, where the 2628 * Transform a btree format file with only one leaf node, where the
2628 * extents list will fit in the inode, into an extents format file. 2629 * extents list will fit in the inode, into an extents format file.
2629 * Since the file extents are already in-core, all we have to do is 2630 * Since the file extents are already in-core, all we have to do is
2630 * give up the space for the btree root and pitch the leaf block. 2631 * give up the space for the btree root and pitch the leaf block.
2631 */ 2632 */
2632 STATIC int /* error */ 2633 STATIC int /* error */
2633 xfs_bmap_btree_to_extents( 2634 xfs_bmap_btree_to_extents(
2634 xfs_trans_t *tp, /* transaction pointer */ 2635 xfs_trans_t *tp, /* transaction pointer */
2635 xfs_inode_t *ip, /* incore inode pointer */ 2636 xfs_inode_t *ip, /* incore inode pointer */
2636 xfs_btree_cur_t *cur, /* btree cursor */ 2637 xfs_btree_cur_t *cur, /* btree cursor */
2637 int *logflagsp, /* inode logging flags */ 2638 int *logflagsp, /* inode logging flags */
2638 int whichfork) /* data or attr fork */ 2639 int whichfork) /* data or attr fork */
2639 { 2640 {
2640 /* REFERENCED */ 2641 /* REFERENCED */
2641 struct xfs_btree_block *cblock;/* child btree block */ 2642 struct xfs_btree_block *cblock;/* child btree block */
2642 xfs_fsblock_t cbno; /* child block number */ 2643 xfs_fsblock_t cbno; /* child block number */
2643 xfs_buf_t *cbp; /* child block's buffer */ 2644 xfs_buf_t *cbp; /* child block's buffer */
2644 int error; /* error return value */ 2645 int error; /* error return value */
2645 xfs_ifork_t *ifp; /* inode fork data */ 2646 xfs_ifork_t *ifp; /* inode fork data */
2646 xfs_mount_t *mp; /* mount point structure */ 2647 xfs_mount_t *mp; /* mount point structure */
2647 __be64 *pp; /* ptr to block address */ 2648 __be64 *pp; /* ptr to block address */
2648 struct xfs_btree_block *rblock;/* root btree block */ 2649 struct xfs_btree_block *rblock;/* root btree block */
2649 2650
2650 mp = ip->i_mount; 2651 mp = ip->i_mount;
2651 ifp = XFS_IFORK_PTR(ip, whichfork); 2652 ifp = XFS_IFORK_PTR(ip, whichfork);
2652 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 2653 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
2653 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE); 2654 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
2654 rblock = ifp->if_broot; 2655 rblock = ifp->if_broot;
2655 ASSERT(be16_to_cpu(rblock->bb_level) == 1); 2656 ASSERT(be16_to_cpu(rblock->bb_level) == 1);
2656 ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1); 2657 ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
2657 ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1); 2658 ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1);
2658 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes); 2659 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes);
2659 cbno = be64_to_cpu(*pp); 2660 cbno = be64_to_cpu(*pp);
2660 *logflagsp = 0; 2661 *logflagsp = 0;
2661 #ifdef DEBUG 2662 #ifdef DEBUG
2662 if ((error = xfs_btree_check_lptr(cur, cbno, 1))) 2663 if ((error = xfs_btree_check_lptr(cur, cbno, 1)))
2663 return error; 2664 return error;
2664 #endif 2665 #endif
2665 if ((error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp, 2666 if ((error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp,
2666 XFS_BMAP_BTREE_REF))) 2667 XFS_BMAP_BTREE_REF)))
2667 return error; 2668 return error;
2668 cblock = XFS_BUF_TO_BLOCK(cbp); 2669 cblock = XFS_BUF_TO_BLOCK(cbp);
2669 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp))) 2670 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
2670 return error; 2671 return error;
2671 xfs_bmap_add_free(cbno, 1, cur->bc_private.b.flist, mp); 2672 xfs_bmap_add_free(cbno, 1, cur->bc_private.b.flist, mp);
2672 ip->i_d.di_nblocks--; 2673 ip->i_d.di_nblocks--;
2673 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); 2674 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
2674 xfs_trans_binval(tp, cbp); 2675 xfs_trans_binval(tp, cbp);
2675 if (cur->bc_bufs[0] == cbp) 2676 if (cur->bc_bufs[0] == cbp)
2676 cur->bc_bufs[0] = NULL; 2677 cur->bc_bufs[0] = NULL;
2677 xfs_iroot_realloc(ip, -1, whichfork); 2678 xfs_iroot_realloc(ip, -1, whichfork);
2678 ASSERT(ifp->if_broot == NULL); 2679 ASSERT(ifp->if_broot == NULL);
2679 ASSERT((ifp->if_flags & XFS_IFBROOT) == 0); 2680 ASSERT((ifp->if_flags & XFS_IFBROOT) == 0);
2680 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); 2681 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
2681 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 2682 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2682 return 0; 2683 return 0;
2683 } 2684 }
2684 2685
2685 /* 2686 /*
2686 * Called by xfs_bmapi to update file extent records and the btree 2687 * Called by xfs_bmapi to update file extent records and the btree
2687 * after removing space (or undoing a delayed allocation). 2688 * after removing space (or undoing a delayed allocation).
2688 */ 2689 */
2689 STATIC int /* error */ 2690 STATIC int /* error */
2690 xfs_bmap_del_extent( 2691 xfs_bmap_del_extent(
2691 xfs_inode_t *ip, /* incore inode pointer */ 2692 xfs_inode_t *ip, /* incore inode pointer */
2692 xfs_trans_t *tp, /* current transaction pointer */ 2693 xfs_trans_t *tp, /* current transaction pointer */
2693 xfs_extnum_t *idx, /* extent number to update/delete */ 2694 xfs_extnum_t *idx, /* extent number to update/delete */
2694 xfs_bmap_free_t *flist, /* list of extents to be freed */ 2695 xfs_bmap_free_t *flist, /* list of extents to be freed */
2695 xfs_btree_cur_t *cur, /* if null, not a btree */ 2696 xfs_btree_cur_t *cur, /* if null, not a btree */
2696 xfs_bmbt_irec_t *del, /* data to remove from extents */ 2697 xfs_bmbt_irec_t *del, /* data to remove from extents */
2697 int *logflagsp, /* inode logging flags */ 2698 int *logflagsp, /* inode logging flags */
2698 int whichfork) /* data or attr fork */ 2699 int whichfork) /* data or attr fork */
2699 { 2700 {
2700 xfs_filblks_t da_new; /* new delay-alloc indirect blocks */ 2701 xfs_filblks_t da_new; /* new delay-alloc indirect blocks */
2701 xfs_filblks_t da_old; /* old delay-alloc indirect blocks */ 2702 xfs_filblks_t da_old; /* old delay-alloc indirect blocks */
2702 xfs_fsblock_t del_endblock=0; /* first block past del */ 2703 xfs_fsblock_t del_endblock=0; /* first block past del */
2703 xfs_fileoff_t del_endoff; /* first offset past del */ 2704 xfs_fileoff_t del_endoff; /* first offset past del */
2704 int delay; /* current block is delayed allocated */ 2705 int delay; /* current block is delayed allocated */
2705 int do_fx; /* free extent at end of routine */ 2706 int do_fx; /* free extent at end of routine */
2706 xfs_bmbt_rec_host_t *ep; /* current extent entry pointer */ 2707 xfs_bmbt_rec_host_t *ep; /* current extent entry pointer */
2707 int error; /* error return value */ 2708 int error; /* error return value */
2708 int flags; /* inode logging flags */ 2709 int flags; /* inode logging flags */
2709 xfs_bmbt_irec_t got; /* current extent entry */ 2710 xfs_bmbt_irec_t got; /* current extent entry */
2710 xfs_fileoff_t got_endoff; /* first offset past got */ 2711 xfs_fileoff_t got_endoff; /* first offset past got */
2711 int i; /* temp state */ 2712 int i; /* temp state */
2712 xfs_ifork_t *ifp; /* inode fork pointer */ 2713 xfs_ifork_t *ifp; /* inode fork pointer */
2713 xfs_mount_t *mp; /* mount structure */ 2714 xfs_mount_t *mp; /* mount structure */
2714 xfs_filblks_t nblks; /* quota/sb block count */ 2715 xfs_filblks_t nblks; /* quota/sb block count */
2715 xfs_bmbt_irec_t new; /* new record to be inserted */ 2716 xfs_bmbt_irec_t new; /* new record to be inserted */
2716 /* REFERENCED */ 2717 /* REFERENCED */
2717 uint qfield; /* quota field to update */ 2718 uint qfield; /* quota field to update */
2718 xfs_filblks_t temp; /* for indirect length calculations */ 2719 xfs_filblks_t temp; /* for indirect length calculations */
2719 xfs_filblks_t temp2; /* for indirect length calculations */ 2720 xfs_filblks_t temp2; /* for indirect length calculations */
2720 int state = 0; 2721 int state = 0;
2721 2722
2722 XFS_STATS_INC(xs_del_exlist); 2723 XFS_STATS_INC(xs_del_exlist);
2723 2724
2724 if (whichfork == XFS_ATTR_FORK) 2725 if (whichfork == XFS_ATTR_FORK)
2725 state |= BMAP_ATTRFORK; 2726 state |= BMAP_ATTRFORK;
2726 2727
2727 mp = ip->i_mount; 2728 mp = ip->i_mount;
2728 ifp = XFS_IFORK_PTR(ip, whichfork); 2729 ifp = XFS_IFORK_PTR(ip, whichfork);
2729 ASSERT((*idx >= 0) && (*idx < ifp->if_bytes / 2730 ASSERT((*idx >= 0) && (*idx < ifp->if_bytes /
2730 (uint)sizeof(xfs_bmbt_rec_t))); 2731 (uint)sizeof(xfs_bmbt_rec_t)));
2731 ASSERT(del->br_blockcount > 0); 2732 ASSERT(del->br_blockcount > 0);
2732 ep = xfs_iext_get_ext(ifp, *idx); 2733 ep = xfs_iext_get_ext(ifp, *idx);
2733 xfs_bmbt_get_all(ep, &got); 2734 xfs_bmbt_get_all(ep, &got);
2734 ASSERT(got.br_startoff <= del->br_startoff); 2735 ASSERT(got.br_startoff <= del->br_startoff);
2735 del_endoff = del->br_startoff + del->br_blockcount; 2736 del_endoff = del->br_startoff + del->br_blockcount;
2736 got_endoff = got.br_startoff + got.br_blockcount; 2737 got_endoff = got.br_startoff + got.br_blockcount;
2737 ASSERT(got_endoff >= del_endoff); 2738 ASSERT(got_endoff >= del_endoff);
2738 delay = isnullstartblock(got.br_startblock); 2739 delay = isnullstartblock(got.br_startblock);
2739 ASSERT(isnullstartblock(del->br_startblock) == delay); 2740 ASSERT(isnullstartblock(del->br_startblock) == delay);
2740 flags = 0; 2741 flags = 0;
2741 qfield = 0; 2742 qfield = 0;
2742 error = 0; 2743 error = 0;
2743 /* 2744 /*
2744 * If deleting a real allocation, must free up the disk space. 2745 * If deleting a real allocation, must free up the disk space.
2745 */ 2746 */
2746 if (!delay) { 2747 if (!delay) {
2747 flags = XFS_ILOG_CORE; 2748 flags = XFS_ILOG_CORE;
2748 /* 2749 /*
2749 * Realtime allocation. Free it and record di_nblocks update. 2750 * Realtime allocation. Free it and record di_nblocks update.
2750 */ 2751 */
2751 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) { 2752 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
2752 xfs_fsblock_t bno; 2753 xfs_fsblock_t bno;
2753 xfs_filblks_t len; 2754 xfs_filblks_t len;
2754 2755
2755 ASSERT(do_mod(del->br_blockcount, 2756 ASSERT(do_mod(del->br_blockcount,
2756 mp->m_sb.sb_rextsize) == 0); 2757 mp->m_sb.sb_rextsize) == 0);
2757 ASSERT(do_mod(del->br_startblock, 2758 ASSERT(do_mod(del->br_startblock,
2758 mp->m_sb.sb_rextsize) == 0); 2759 mp->m_sb.sb_rextsize) == 0);
2759 bno = del->br_startblock; 2760 bno = del->br_startblock;
2760 len = del->br_blockcount; 2761 len = del->br_blockcount;
2761 do_div(bno, mp->m_sb.sb_rextsize); 2762 do_div(bno, mp->m_sb.sb_rextsize);
2762 do_div(len, mp->m_sb.sb_rextsize); 2763 do_div(len, mp->m_sb.sb_rextsize);
2763 error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len); 2764 error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
2764 if (error) 2765 if (error)
2765 goto done; 2766 goto done;
2766 do_fx = 0; 2767 do_fx = 0;
2767 nblks = len * mp->m_sb.sb_rextsize; 2768 nblks = len * mp->m_sb.sb_rextsize;
2768 qfield = XFS_TRANS_DQ_RTBCOUNT; 2769 qfield = XFS_TRANS_DQ_RTBCOUNT;
2769 } 2770 }
2770 /* 2771 /*
2771 * Ordinary allocation. 2772 * Ordinary allocation.
2772 */ 2773 */
2773 else { 2774 else {
2774 do_fx = 1; 2775 do_fx = 1;
2775 nblks = del->br_blockcount; 2776 nblks = del->br_blockcount;
2776 qfield = XFS_TRANS_DQ_BCOUNT; 2777 qfield = XFS_TRANS_DQ_BCOUNT;
2777 } 2778 }
2778 /* 2779 /*
2779 * Set up del_endblock and cur for later. 2780 * Set up del_endblock and cur for later.
2780 */ 2781 */
2781 del_endblock = del->br_startblock + del->br_blockcount; 2782 del_endblock = del->br_startblock + del->br_blockcount;
2782 if (cur) { 2783 if (cur) {
2783 if ((error = xfs_bmbt_lookup_eq(cur, got.br_startoff, 2784 if ((error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
2784 got.br_startblock, got.br_blockcount, 2785 got.br_startblock, got.br_blockcount,
2785 &i))) 2786 &i)))
2786 goto done; 2787 goto done;
2787 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 2788 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
2788 } 2789 }
2789 da_old = da_new = 0; 2790 da_old = da_new = 0;
2790 } else { 2791 } else {
2791 da_old = startblockval(got.br_startblock); 2792 da_old = startblockval(got.br_startblock);
2792 da_new = 0; 2793 da_new = 0;
2793 nblks = 0; 2794 nblks = 0;
2794 do_fx = 0; 2795 do_fx = 0;
2795 } 2796 }
2796 /* 2797 /*
2797 * Set flag value to use in switch statement. 2798 * Set flag value to use in switch statement.
2798 * Left-contig is 2, right-contig is 1. 2799 * Left-contig is 2, right-contig is 1.
2799 */ 2800 */
2800 switch (((got.br_startoff == del->br_startoff) << 1) | 2801 switch (((got.br_startoff == del->br_startoff) << 1) |
2801 (got_endoff == del_endoff)) { 2802 (got_endoff == del_endoff)) {
2802 case 3: 2803 case 3:
2803 /* 2804 /*
2804 * Matches the whole extent. Delete the entry. 2805 * Matches the whole extent. Delete the entry.
2805 */ 2806 */
2806 xfs_iext_remove(ip, *idx, 1, 2807 xfs_iext_remove(ip, *idx, 1,
2807 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0); 2808 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0);
2808 --*idx; 2809 --*idx;
2809 if (delay) 2810 if (delay)
2810 break; 2811 break;
2811 2812
2812 XFS_IFORK_NEXT_SET(ip, whichfork, 2813 XFS_IFORK_NEXT_SET(ip, whichfork,
2813 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 2814 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2814 flags |= XFS_ILOG_CORE; 2815 flags |= XFS_ILOG_CORE;
2815 if (!cur) { 2816 if (!cur) {
2816 flags |= xfs_ilog_fext(whichfork); 2817 flags |= xfs_ilog_fext(whichfork);
2817 break; 2818 break;
2818 } 2819 }
2819 if ((error = xfs_btree_delete(cur, &i))) 2820 if ((error = xfs_btree_delete(cur, &i)))
2820 goto done; 2821 goto done;
2821 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 2822 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
2822 break; 2823 break;
2823 2824
2824 case 2: 2825 case 2:
2825 /* 2826 /*
2826 * Deleting the first part of the extent. 2827 * Deleting the first part of the extent.
2827 */ 2828 */
2828 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2829 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2829 xfs_bmbt_set_startoff(ep, del_endoff); 2830 xfs_bmbt_set_startoff(ep, del_endoff);
2830 temp = got.br_blockcount - del->br_blockcount; 2831 temp = got.br_blockcount - del->br_blockcount;
2831 xfs_bmbt_set_blockcount(ep, temp); 2832 xfs_bmbt_set_blockcount(ep, temp);
2832 if (delay) { 2833 if (delay) {
2833 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 2834 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2834 da_old); 2835 da_old);
2835 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 2836 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
2836 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2837 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2837 da_new = temp; 2838 da_new = temp;
2838 break; 2839 break;
2839 } 2840 }
2840 xfs_bmbt_set_startblock(ep, del_endblock); 2841 xfs_bmbt_set_startblock(ep, del_endblock);
2841 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2842 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2842 if (!cur) { 2843 if (!cur) {
2843 flags |= xfs_ilog_fext(whichfork); 2844 flags |= xfs_ilog_fext(whichfork);
2844 break; 2845 break;
2845 } 2846 }
2846 if ((error = xfs_bmbt_update(cur, del_endoff, del_endblock, 2847 if ((error = xfs_bmbt_update(cur, del_endoff, del_endblock,
2847 got.br_blockcount - del->br_blockcount, 2848 got.br_blockcount - del->br_blockcount,
2848 got.br_state))) 2849 got.br_state)))
2849 goto done; 2850 goto done;
2850 break; 2851 break;
2851 2852
2852 case 1: 2853 case 1:
2853 /* 2854 /*
2854 * Deleting the last part of the extent. 2855 * Deleting the last part of the extent.
2855 */ 2856 */
2856 temp = got.br_blockcount - del->br_blockcount; 2857 temp = got.br_blockcount - del->br_blockcount;
2857 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2858 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2858 xfs_bmbt_set_blockcount(ep, temp); 2859 xfs_bmbt_set_blockcount(ep, temp);
2859 if (delay) { 2860 if (delay) {
2860 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 2861 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2861 da_old); 2862 da_old);
2862 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 2863 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
2863 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2864 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2864 da_new = temp; 2865 da_new = temp;
2865 break; 2866 break;
2866 } 2867 }
2867 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2868 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2868 if (!cur) { 2869 if (!cur) {
2869 flags |= xfs_ilog_fext(whichfork); 2870 flags |= xfs_ilog_fext(whichfork);
2870 break; 2871 break;
2871 } 2872 }
2872 if ((error = xfs_bmbt_update(cur, got.br_startoff, 2873 if ((error = xfs_bmbt_update(cur, got.br_startoff,
2873 got.br_startblock, 2874 got.br_startblock,
2874 got.br_blockcount - del->br_blockcount, 2875 got.br_blockcount - del->br_blockcount,
2875 got.br_state))) 2876 got.br_state)))
2876 goto done; 2877 goto done;
2877 break; 2878 break;
2878 2879
2879 case 0: 2880 case 0:
2880 /* 2881 /*
2881 * Deleting the middle of the extent. 2882 * Deleting the middle of the extent.
2882 */ 2883 */
2883 temp = del->br_startoff - got.br_startoff; 2884 temp = del->br_startoff - got.br_startoff;
2884 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 2885 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2885 xfs_bmbt_set_blockcount(ep, temp); 2886 xfs_bmbt_set_blockcount(ep, temp);
2886 new.br_startoff = del_endoff; 2887 new.br_startoff = del_endoff;
2887 temp2 = got_endoff - del_endoff; 2888 temp2 = got_endoff - del_endoff;
2888 new.br_blockcount = temp2; 2889 new.br_blockcount = temp2;
2889 new.br_state = got.br_state; 2890 new.br_state = got.br_state;
2890 if (!delay) { 2891 if (!delay) {
2891 new.br_startblock = del_endblock; 2892 new.br_startblock = del_endblock;
2892 flags |= XFS_ILOG_CORE; 2893 flags |= XFS_ILOG_CORE;
2893 if (cur) { 2894 if (cur) {
2894 if ((error = xfs_bmbt_update(cur, 2895 if ((error = xfs_bmbt_update(cur,
2895 got.br_startoff, 2896 got.br_startoff,
2896 got.br_startblock, temp, 2897 got.br_startblock, temp,
2897 got.br_state))) 2898 got.br_state)))
2898 goto done; 2899 goto done;
2899 if ((error = xfs_btree_increment(cur, 0, &i))) 2900 if ((error = xfs_btree_increment(cur, 0, &i)))
2900 goto done; 2901 goto done;
2901 cur->bc_rec.b = new; 2902 cur->bc_rec.b = new;
2902 error = xfs_btree_insert(cur, &i); 2903 error = xfs_btree_insert(cur, &i);
2903 if (error && error != ENOSPC) 2904 if (error && error != ENOSPC)
2904 goto done; 2905 goto done;
2905 /* 2906 /*
2906 * If get no-space back from btree insert, 2907 * If get no-space back from btree insert,
2907 * it tried a split, and we have a zero 2908 * it tried a split, and we have a zero
2908 * block reservation. 2909 * block reservation.
2909 * Fix up our state and return the error. 2910 * Fix up our state and return the error.
2910 */ 2911 */
2911 if (error == ENOSPC) { 2912 if (error == ENOSPC) {
2912 /* 2913 /*
2913 * Reset the cursor, don't trust 2914 * Reset the cursor, don't trust
2914 * it after any insert operation. 2915 * it after any insert operation.
2915 */ 2916 */
2916 if ((error = xfs_bmbt_lookup_eq(cur, 2917 if ((error = xfs_bmbt_lookup_eq(cur,
2917 got.br_startoff, 2918 got.br_startoff,
2918 got.br_startblock, 2919 got.br_startblock,
2919 temp, &i))) 2920 temp, &i)))
2920 goto done; 2921 goto done;
2921 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 2922 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
2922 /* 2923 /*
2923 * Update the btree record back 2924 * Update the btree record back
2924 * to the original value. 2925 * to the original value.
2925 */ 2926 */
2926 if ((error = xfs_bmbt_update(cur, 2927 if ((error = xfs_bmbt_update(cur,
2927 got.br_startoff, 2928 got.br_startoff,
2928 got.br_startblock, 2929 got.br_startblock,
2929 got.br_blockcount, 2930 got.br_blockcount,
2930 got.br_state))) 2931 got.br_state)))
2931 goto done; 2932 goto done;
2932 /* 2933 /*
2933 * Reset the extent record back 2934 * Reset the extent record back
2934 * to the original value. 2935 * to the original value.
2935 */ 2936 */
2936 xfs_bmbt_set_blockcount(ep, 2937 xfs_bmbt_set_blockcount(ep,
2937 got.br_blockcount); 2938 got.br_blockcount);
2938 flags = 0; 2939 flags = 0;
2939 error = XFS_ERROR(ENOSPC); 2940 error = XFS_ERROR(ENOSPC);
2940 goto done; 2941 goto done;
2941 } 2942 }
2942 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 2943 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
2943 } else 2944 } else
2944 flags |= xfs_ilog_fext(whichfork); 2945 flags |= xfs_ilog_fext(whichfork);
2945 XFS_IFORK_NEXT_SET(ip, whichfork, 2946 XFS_IFORK_NEXT_SET(ip, whichfork,
2946 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 2947 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2947 } else { 2948 } else {
2948 ASSERT(whichfork == XFS_DATA_FORK); 2949 ASSERT(whichfork == XFS_DATA_FORK);
2949 temp = xfs_bmap_worst_indlen(ip, temp); 2950 temp = xfs_bmap_worst_indlen(ip, temp);
2950 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 2951 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
2951 temp2 = xfs_bmap_worst_indlen(ip, temp2); 2952 temp2 = xfs_bmap_worst_indlen(ip, temp2);
2952 new.br_startblock = nullstartblock((int)temp2); 2953 new.br_startblock = nullstartblock((int)temp2);
2953 da_new = temp + temp2; 2954 da_new = temp + temp2;
2954 while (da_new > da_old) { 2955 while (da_new > da_old) {
2955 if (temp) { 2956 if (temp) {
2956 temp--; 2957 temp--;
2957 da_new--; 2958 da_new--;
2958 xfs_bmbt_set_startblock(ep, 2959 xfs_bmbt_set_startblock(ep,
2959 nullstartblock((int)temp)); 2960 nullstartblock((int)temp));
2960 } 2961 }
2961 if (da_new == da_old) 2962 if (da_new == da_old)
2962 break; 2963 break;
2963 if (temp2) { 2964 if (temp2) {
2964 temp2--; 2965 temp2--;
2965 da_new--; 2966 da_new--;
2966 new.br_startblock = 2967 new.br_startblock =
2967 nullstartblock((int)temp2); 2968 nullstartblock((int)temp2);
2968 } 2969 }
2969 } 2970 }
2970 } 2971 }
2971 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 2972 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2972 xfs_iext_insert(ip, *idx + 1, 1, &new, state); 2973 xfs_iext_insert(ip, *idx + 1, 1, &new, state);
2973 ++*idx; 2974 ++*idx;
2974 break; 2975 break;
2975 } 2976 }
2976 /* 2977 /*
2977 * If we need to, add to list of extents to delete. 2978 * If we need to, add to list of extents to delete.
2978 */ 2979 */
2979 if (do_fx) 2980 if (do_fx)
2980 xfs_bmap_add_free(del->br_startblock, del->br_blockcount, flist, 2981 xfs_bmap_add_free(del->br_startblock, del->br_blockcount, flist,
2981 mp); 2982 mp);
2982 /* 2983 /*
2983 * Adjust inode # blocks in the file. 2984 * Adjust inode # blocks in the file.
2984 */ 2985 */
2985 if (nblks) 2986 if (nblks)
2986 ip->i_d.di_nblocks -= nblks; 2987 ip->i_d.di_nblocks -= nblks;
2987 /* 2988 /*
2988 * Adjust quota data. 2989 * Adjust quota data.
2989 */ 2990 */
2990 if (qfield) 2991 if (qfield)
2991 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks); 2992 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
2992 2993
2993 /* 2994 /*
2994 * Account for change in delayed indirect blocks. 2995 * Account for change in delayed indirect blocks.
2995 * Nothing to do for disk quota accounting here. 2996 * Nothing to do for disk quota accounting here.
2996 */ 2997 */
2997 ASSERT(da_old >= da_new); 2998 ASSERT(da_old >= da_new);
2998 if (da_old > da_new) { 2999 if (da_old > da_new) {
2999 xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, 3000 xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
3000 (int64_t)(da_old - da_new), 0); 3001 (int64_t)(da_old - da_new), 0);
3001 } 3002 }
3002 done: 3003 done:
3003 *logflagsp = flags; 3004 *logflagsp = flags;
3004 return error; 3005 return error;
3005 } 3006 }
3006 3007
3007 /* 3008 /*
3008 * Remove the entry "free" from the free item list. Prev points to the 3009 * Remove the entry "free" from the free item list. Prev points to the
3009 * previous entry, unless "free" is the head of the list. 3010 * previous entry, unless "free" is the head of the list.
3010 */ 3011 */
3011 STATIC void 3012 STATIC void
3012 xfs_bmap_del_free( 3013 xfs_bmap_del_free(
3013 xfs_bmap_free_t *flist, /* free item list header */ 3014 xfs_bmap_free_t *flist, /* free item list header */
3014 xfs_bmap_free_item_t *prev, /* previous item on list, if any */ 3015 xfs_bmap_free_item_t *prev, /* previous item on list, if any */
3015 xfs_bmap_free_item_t *free) /* list item to be freed */ 3016 xfs_bmap_free_item_t *free) /* list item to be freed */
3016 { 3017 {
3017 if (prev) 3018 if (prev)
3018 prev->xbfi_next = free->xbfi_next; 3019 prev->xbfi_next = free->xbfi_next;
3019 else 3020 else
3020 flist->xbf_first = free->xbfi_next; 3021 flist->xbf_first = free->xbfi_next;
3021 flist->xbf_count--; 3022 flist->xbf_count--;
3022 kmem_zone_free(xfs_bmap_free_item_zone, free); 3023 kmem_zone_free(xfs_bmap_free_item_zone, free);
3023 } 3024 }
3024 3025
3025 /* 3026 /*
3026 * Convert an extents-format file into a btree-format file. 3027 * Convert an extents-format file into a btree-format file.
3027 * The new file will have a root block (in the inode) and a single child block. 3028 * The new file will have a root block (in the inode) and a single child block.
3028 */ 3029 */
3029 STATIC int /* error */ 3030 STATIC int /* error */
3030 xfs_bmap_extents_to_btree( 3031 xfs_bmap_extents_to_btree(
3031 xfs_trans_t *tp, /* transaction pointer */ 3032 xfs_trans_t *tp, /* transaction pointer */
3032 xfs_inode_t *ip, /* incore inode pointer */ 3033 xfs_inode_t *ip, /* incore inode pointer */
3033 xfs_fsblock_t *firstblock, /* first-block-allocated */ 3034 xfs_fsblock_t *firstblock, /* first-block-allocated */
3034 xfs_bmap_free_t *flist, /* blocks freed in xaction */ 3035 xfs_bmap_free_t *flist, /* blocks freed in xaction */
3035 xfs_btree_cur_t **curp, /* cursor returned to caller */ 3036 xfs_btree_cur_t **curp, /* cursor returned to caller */
3036 int wasdel, /* converting a delayed alloc */ 3037 int wasdel, /* converting a delayed alloc */
3037 int *logflagsp, /* inode logging flags */ 3038 int *logflagsp, /* inode logging flags */
3038 int whichfork) /* data or attr fork */ 3039 int whichfork) /* data or attr fork */
3039 { 3040 {
3040 struct xfs_btree_block *ablock; /* allocated (child) bt block */ 3041 struct xfs_btree_block *ablock; /* allocated (child) bt block */
3041 xfs_buf_t *abp; /* buffer for ablock */ 3042 xfs_buf_t *abp; /* buffer for ablock */
3042 xfs_alloc_arg_t args; /* allocation arguments */ 3043 xfs_alloc_arg_t args; /* allocation arguments */
3043 xfs_bmbt_rec_t *arp; /* child record pointer */ 3044 xfs_bmbt_rec_t *arp; /* child record pointer */
3044 struct xfs_btree_block *block; /* btree root block */ 3045 struct xfs_btree_block *block; /* btree root block */
3045 xfs_btree_cur_t *cur; /* bmap btree cursor */ 3046 xfs_btree_cur_t *cur; /* bmap btree cursor */
3046 xfs_bmbt_rec_host_t *ep; /* extent record pointer */ 3047 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
3047 int error; /* error return value */ 3048 int error; /* error return value */
3048 xfs_extnum_t i, cnt; /* extent record index */ 3049 xfs_extnum_t i, cnt; /* extent record index */
3049 xfs_ifork_t *ifp; /* inode fork pointer */ 3050 xfs_ifork_t *ifp; /* inode fork pointer */
3050 xfs_bmbt_key_t *kp; /* root block key pointer */ 3051 xfs_bmbt_key_t *kp; /* root block key pointer */
3051 xfs_mount_t *mp; /* mount structure */ 3052 xfs_mount_t *mp; /* mount structure */
3052 xfs_extnum_t nextents; /* number of file extents */ 3053 xfs_extnum_t nextents; /* number of file extents */
3053 xfs_bmbt_ptr_t *pp; /* root block address pointer */ 3054 xfs_bmbt_ptr_t *pp; /* root block address pointer */
3054 3055
3055 ifp = XFS_IFORK_PTR(ip, whichfork); 3056 ifp = XFS_IFORK_PTR(ip, whichfork);
3056 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS); 3057 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS);
3057 3058
3058 /* 3059 /*
3059 * Make space in the inode incore. 3060 * Make space in the inode incore.
3060 */ 3061 */
3061 xfs_iroot_realloc(ip, 1, whichfork); 3062 xfs_iroot_realloc(ip, 1, whichfork);
3062 ifp->if_flags |= XFS_IFBROOT; 3063 ifp->if_flags |= XFS_IFBROOT;
3063 3064
3064 /* 3065 /*
3065 * Fill in the root. 3066 * Fill in the root.
3066 */ 3067 */
3067 block = ifp->if_broot; 3068 block = ifp->if_broot;
3068 block->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC); 3069 block->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC);
3069 block->bb_level = cpu_to_be16(1); 3070 block->bb_level = cpu_to_be16(1);
3070 block->bb_numrecs = cpu_to_be16(1); 3071 block->bb_numrecs = cpu_to_be16(1);
3071 block->bb_u.l.bb_leftsib = cpu_to_be64(NULLDFSBNO); 3072 block->bb_u.l.bb_leftsib = cpu_to_be64(NULLDFSBNO);
3072 block->bb_u.l.bb_rightsib = cpu_to_be64(NULLDFSBNO); 3073 block->bb_u.l.bb_rightsib = cpu_to_be64(NULLDFSBNO);
3073 3074
3074 /* 3075 /*
3075 * Need a cursor. Can't allocate until bb_level is filled in. 3076 * Need a cursor. Can't allocate until bb_level is filled in.
3076 */ 3077 */
3077 mp = ip->i_mount; 3078 mp = ip->i_mount;
3078 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 3079 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
3079 cur->bc_private.b.firstblock = *firstblock; 3080 cur->bc_private.b.firstblock = *firstblock;
3080 cur->bc_private.b.flist = flist; 3081 cur->bc_private.b.flist = flist;
3081 cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0; 3082 cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
3082 /* 3083 /*
3083 * Convert to a btree with two levels, one record in root. 3084 * Convert to a btree with two levels, one record in root.
3084 */ 3085 */
3085 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE); 3086 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE);
3086 memset(&args, 0, sizeof(args)); 3087 memset(&args, 0, sizeof(args));
3087 args.tp = tp; 3088 args.tp = tp;
3088 args.mp = mp; 3089 args.mp = mp;
3089 args.firstblock = *firstblock; 3090 args.firstblock = *firstblock;
3090 if (*firstblock == NULLFSBLOCK) { 3091 if (*firstblock == NULLFSBLOCK) {
3091 args.type = XFS_ALLOCTYPE_START_BNO; 3092 args.type = XFS_ALLOCTYPE_START_BNO;
3092 args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino); 3093 args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
3093 } else if (flist->xbf_low) { 3094 } else if (flist->xbf_low) {
3094 args.type = XFS_ALLOCTYPE_START_BNO; 3095 args.type = XFS_ALLOCTYPE_START_BNO;
3095 args.fsbno = *firstblock; 3096 args.fsbno = *firstblock;
3096 } else { 3097 } else {
3097 args.type = XFS_ALLOCTYPE_NEAR_BNO; 3098 args.type = XFS_ALLOCTYPE_NEAR_BNO;
3098 args.fsbno = *firstblock; 3099 args.fsbno = *firstblock;
3099 } 3100 }
3100 args.minlen = args.maxlen = args.prod = 1; 3101 args.minlen = args.maxlen = args.prod = 1;
3101 args.total = args.minleft = args.alignment = args.mod = args.isfl = 3102 args.total = args.minleft = args.alignment = args.mod = args.isfl =
3102 args.minalignslop = 0; 3103 args.minalignslop = 0;
3103 args.wasdel = wasdel; 3104 args.wasdel = wasdel;
3104 *logflagsp = 0; 3105 *logflagsp = 0;
3105 if ((error = xfs_alloc_vextent(&args))) { 3106 if ((error = xfs_alloc_vextent(&args))) {
3106 xfs_iroot_realloc(ip, -1, whichfork); 3107 xfs_iroot_realloc(ip, -1, whichfork);
3107 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 3108 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
3108 return error; 3109 return error;
3109 } 3110 }
3110 /* 3111 /*
3111 * Allocation can't fail, the space was reserved. 3112 * Allocation can't fail, the space was reserved.
3112 */ 3113 */
3113 ASSERT(args.fsbno != NULLFSBLOCK); 3114 ASSERT(args.fsbno != NULLFSBLOCK);
3114 ASSERT(*firstblock == NULLFSBLOCK || 3115 ASSERT(*firstblock == NULLFSBLOCK ||
3115 args.agno == XFS_FSB_TO_AGNO(mp, *firstblock) || 3116 args.agno == XFS_FSB_TO_AGNO(mp, *firstblock) ||
3116 (flist->xbf_low && 3117 (flist->xbf_low &&
3117 args.agno > XFS_FSB_TO_AGNO(mp, *firstblock))); 3118 args.agno > XFS_FSB_TO_AGNO(mp, *firstblock)));
3118 *firstblock = cur->bc_private.b.firstblock = args.fsbno; 3119 *firstblock = cur->bc_private.b.firstblock = args.fsbno;
3119 cur->bc_private.b.allocated++; 3120 cur->bc_private.b.allocated++;
3120 ip->i_d.di_nblocks++; 3121 ip->i_d.di_nblocks++;
3121 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L); 3122 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
3122 abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0); 3123 abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0);
3123 /* 3124 /*
3124 * Fill in the child block. 3125 * Fill in the child block.
3125 */ 3126 */
3126 ablock = XFS_BUF_TO_BLOCK(abp); 3127 ablock = XFS_BUF_TO_BLOCK(abp);
3127 ablock->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC); 3128 ablock->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC);
3128 ablock->bb_level = 0; 3129 ablock->bb_level = 0;
3129 ablock->bb_u.l.bb_leftsib = cpu_to_be64(NULLDFSBNO); 3130 ablock->bb_u.l.bb_leftsib = cpu_to_be64(NULLDFSBNO);
3130 ablock->bb_u.l.bb_rightsib = cpu_to_be64(NULLDFSBNO); 3131 ablock->bb_u.l.bb_rightsib = cpu_to_be64(NULLDFSBNO);
3131 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1); 3132 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
3132 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 3133 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3133 for (cnt = i = 0; i < nextents; i++) { 3134 for (cnt = i = 0; i < nextents; i++) {
3134 ep = xfs_iext_get_ext(ifp, i); 3135 ep = xfs_iext_get_ext(ifp, i);
3135 if (!isnullstartblock(xfs_bmbt_get_startblock(ep))) { 3136 if (!isnullstartblock(xfs_bmbt_get_startblock(ep))) {
3136 arp->l0 = cpu_to_be64(ep->l0); 3137 arp->l0 = cpu_to_be64(ep->l0);
3137 arp->l1 = cpu_to_be64(ep->l1); 3138 arp->l1 = cpu_to_be64(ep->l1);
3138 arp++; cnt++; 3139 arp++; cnt++;
3139 } 3140 }
3140 } 3141 }
3141 ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork)); 3142 ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork));
3142 xfs_btree_set_numrecs(ablock, cnt); 3143 xfs_btree_set_numrecs(ablock, cnt);
3143 3144
3144 /* 3145 /*
3145 * Fill in the root key and pointer. 3146 * Fill in the root key and pointer.
3146 */ 3147 */
3147 kp = XFS_BMBT_KEY_ADDR(mp, block, 1); 3148 kp = XFS_BMBT_KEY_ADDR(mp, block, 1);
3148 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1); 3149 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
3149 kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp)); 3150 kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
3150 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur, 3151 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
3151 be16_to_cpu(block->bb_level))); 3152 be16_to_cpu(block->bb_level)));
3152 *pp = cpu_to_be64(args.fsbno); 3153 *pp = cpu_to_be64(args.fsbno);
3153 3154
3154 /* 3155 /*
3155 * Do all this logging at the end so that 3156 * Do all this logging at the end so that
3156 * the root is at the right level. 3157 * the root is at the right level.
3157 */ 3158 */
3158 xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS); 3159 xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS);
3159 xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs)); 3160 xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
3160 ASSERT(*curp == NULL); 3161 ASSERT(*curp == NULL);
3161 *curp = cur; 3162 *curp = cur;
3162 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork); 3163 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
3163 return 0; 3164 return 0;
3164 } 3165 }
3165 3166
3166 /* 3167 /*
3167 * Calculate the default attribute fork offset for newly created inodes. 3168 * Calculate the default attribute fork offset for newly created inodes.
3168 */ 3169 */
3169 uint 3170 uint
3170 xfs_default_attroffset( 3171 xfs_default_attroffset(
3171 struct xfs_inode *ip) 3172 struct xfs_inode *ip)
3172 { 3173 {
3173 struct xfs_mount *mp = ip->i_mount; 3174 struct xfs_mount *mp = ip->i_mount;
3174 uint offset; 3175 uint offset;
3175 3176
3176 if (mp->m_sb.sb_inodesize == 256) { 3177 if (mp->m_sb.sb_inodesize == 256) {
3177 offset = XFS_LITINO(mp) - 3178 offset = XFS_LITINO(mp) -
3178 XFS_BMDR_SPACE_CALC(MINABTPTRS); 3179 XFS_BMDR_SPACE_CALC(MINABTPTRS);
3179 } else { 3180 } else {
3180 offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS); 3181 offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
3181 } 3182 }
3182 3183
3183 ASSERT(offset < XFS_LITINO(mp)); 3184 ASSERT(offset < XFS_LITINO(mp));
3184 return offset; 3185 return offset;
3185 } 3186 }
3186 3187
3187 /* 3188 /*
3188 * Helper routine to reset inode di_forkoff field when switching 3189 * Helper routine to reset inode di_forkoff field when switching
3189 * attribute fork from local to extent format - we reset it where 3190 * attribute fork from local to extent format - we reset it where
3190 * possible to make space available for inline data fork extents. 3191 * possible to make space available for inline data fork extents.
3191 */ 3192 */
3192 STATIC void 3193 STATIC void
3193 xfs_bmap_forkoff_reset( 3194 xfs_bmap_forkoff_reset(
3194 xfs_mount_t *mp, 3195 xfs_mount_t *mp,
3195 xfs_inode_t *ip, 3196 xfs_inode_t *ip,
3196 int whichfork) 3197 int whichfork)
3197 { 3198 {
3198 if (whichfork == XFS_ATTR_FORK && 3199 if (whichfork == XFS_ATTR_FORK &&
3199 ip->i_d.di_format != XFS_DINODE_FMT_DEV && 3200 ip->i_d.di_format != XFS_DINODE_FMT_DEV &&
3200 ip->i_d.di_format != XFS_DINODE_FMT_UUID && 3201 ip->i_d.di_format != XFS_DINODE_FMT_UUID &&
3201 ip->i_d.di_format != XFS_DINODE_FMT_BTREE) { 3202 ip->i_d.di_format != XFS_DINODE_FMT_BTREE) {
3202 uint dfl_forkoff = xfs_default_attroffset(ip) >> 3; 3203 uint dfl_forkoff = xfs_default_attroffset(ip) >> 3;
3203 3204
3204 if (dfl_forkoff > ip->i_d.di_forkoff) 3205 if (dfl_forkoff > ip->i_d.di_forkoff)
3205 ip->i_d.di_forkoff = dfl_forkoff; 3206 ip->i_d.di_forkoff = dfl_forkoff;
3206 } 3207 }
3207 } 3208 }
3208 3209
3209 /* 3210 /*
3210 * Convert a local file to an extents file. 3211 * Convert a local file to an extents file.
3211 * This code is out of bounds for data forks of regular files, 3212 * This code is out of bounds for data forks of regular files,
3212 * since the file data needs to get logged so things will stay consistent. 3213 * since the file data needs to get logged so things will stay consistent.
3213 * (The bmap-level manipulations are ok, though). 3214 * (The bmap-level manipulations are ok, though).
3214 */ 3215 */
3215 STATIC int /* error */ 3216 STATIC int /* error */
3216 xfs_bmap_local_to_extents( 3217 xfs_bmap_local_to_extents(
3217 xfs_trans_t *tp, /* transaction pointer */ 3218 xfs_trans_t *tp, /* transaction pointer */
3218 xfs_inode_t *ip, /* incore inode pointer */ 3219 xfs_inode_t *ip, /* incore inode pointer */
3219 xfs_fsblock_t *firstblock, /* first block allocated in xaction */ 3220 xfs_fsblock_t *firstblock, /* first block allocated in xaction */
3220 xfs_extlen_t total, /* total blocks needed by transaction */ 3221 xfs_extlen_t total, /* total blocks needed by transaction */
3221 int *logflagsp, /* inode logging flags */ 3222 int *logflagsp, /* inode logging flags */
3222 int whichfork) /* data or attr fork */ 3223 int whichfork) /* data or attr fork */
3223 { 3224 {
3224 int error; /* error return value */ 3225 int error; /* error return value */
3225 int flags; /* logging flags returned */ 3226 int flags; /* logging flags returned */
3226 xfs_ifork_t *ifp; /* inode fork pointer */ 3227 xfs_ifork_t *ifp; /* inode fork pointer */
3227 3228
3228 /* 3229 /*
3229 * We don't want to deal with the case of keeping inode data inline yet. 3230 * We don't want to deal with the case of keeping inode data inline yet.
3230 * So sending the data fork of a regular inode is invalid. 3231 * So sending the data fork of a regular inode is invalid.
3231 */ 3232 */
3232 ASSERT(!(S_ISREG(ip->i_d.di_mode) && whichfork == XFS_DATA_FORK)); 3233 ASSERT(!(S_ISREG(ip->i_d.di_mode) && whichfork == XFS_DATA_FORK));
3233 ifp = XFS_IFORK_PTR(ip, whichfork); 3234 ifp = XFS_IFORK_PTR(ip, whichfork);
3234 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); 3235 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
3235 flags = 0; 3236 flags = 0;
3236 error = 0; 3237 error = 0;
3237 if (ifp->if_bytes) { 3238 if (ifp->if_bytes) {
3238 xfs_alloc_arg_t args; /* allocation arguments */ 3239 xfs_alloc_arg_t args; /* allocation arguments */
3239 xfs_buf_t *bp; /* buffer for extent block */ 3240 xfs_buf_t *bp; /* buffer for extent block */
3240 xfs_bmbt_rec_host_t *ep;/* extent record pointer */ 3241 xfs_bmbt_rec_host_t *ep;/* extent record pointer */
3241 3242
3242 memset(&args, 0, sizeof(args)); 3243 memset(&args, 0, sizeof(args));
3243 args.tp = tp; 3244 args.tp = tp;
3244 args.mp = ip->i_mount; 3245 args.mp = ip->i_mount;
3245 args.firstblock = *firstblock; 3246 args.firstblock = *firstblock;
3246 ASSERT((ifp->if_flags & 3247 ASSERT((ifp->if_flags &
3247 (XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) == XFS_IFINLINE); 3248 (XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) == XFS_IFINLINE);
3248 /* 3249 /*
3249 * Allocate a block. We know we need only one, since the 3250 * Allocate a block. We know we need only one, since the
3250 * file currently fits in an inode. 3251 * file currently fits in an inode.
3251 */ 3252 */
3252 if (*firstblock == NULLFSBLOCK) { 3253 if (*firstblock == NULLFSBLOCK) {
3253 args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino); 3254 args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino);
3254 args.type = XFS_ALLOCTYPE_START_BNO; 3255 args.type = XFS_ALLOCTYPE_START_BNO;
3255 } else { 3256 } else {
3256 args.fsbno = *firstblock; 3257 args.fsbno = *firstblock;
3257 args.type = XFS_ALLOCTYPE_NEAR_BNO; 3258 args.type = XFS_ALLOCTYPE_NEAR_BNO;
3258 } 3259 }
3259 args.total = total; 3260 args.total = total;
3260 args.mod = args.minleft = args.alignment = args.wasdel = 3261 args.mod = args.minleft = args.alignment = args.wasdel =
3261 args.isfl = args.minalignslop = 0; 3262 args.isfl = args.minalignslop = 0;
3262 args.minlen = args.maxlen = args.prod = 1; 3263 args.minlen = args.maxlen = args.prod = 1;
3263 if ((error = xfs_alloc_vextent(&args))) 3264 if ((error = xfs_alloc_vextent(&args)))
3264 goto done; 3265 goto done;
3265 /* 3266 /*
3266 * Can't fail, the space was reserved. 3267 * Can't fail, the space was reserved.
3267 */ 3268 */
3268 ASSERT(args.fsbno != NULLFSBLOCK); 3269 ASSERT(args.fsbno != NULLFSBLOCK);
3269 ASSERT(args.len == 1); 3270 ASSERT(args.len == 1);
3270 *firstblock = args.fsbno; 3271 *firstblock = args.fsbno;
3271 bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0); 3272 bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
3272 memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes); 3273 memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes);
3273 xfs_trans_log_buf(tp, bp, 0, ifp->if_bytes - 1); 3274 xfs_trans_log_buf(tp, bp, 0, ifp->if_bytes - 1);
3274 xfs_bmap_forkoff_reset(args.mp, ip, whichfork); 3275 xfs_bmap_forkoff_reset(args.mp, ip, whichfork);
3275 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork); 3276 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
3276 xfs_iext_add(ifp, 0, 1); 3277 xfs_iext_add(ifp, 0, 1);
3277 ep = xfs_iext_get_ext(ifp, 0); 3278 ep = xfs_iext_get_ext(ifp, 0);
3278 xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM); 3279 xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM);
3279 trace_xfs_bmap_post_update(ip, 0, 3280 trace_xfs_bmap_post_update(ip, 0,
3280 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0, 3281 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0,
3281 _THIS_IP_); 3282 _THIS_IP_);
3282 XFS_IFORK_NEXT_SET(ip, whichfork, 1); 3283 XFS_IFORK_NEXT_SET(ip, whichfork, 1);
3283 ip->i_d.di_nblocks = 1; 3284 ip->i_d.di_nblocks = 1;
3284 xfs_trans_mod_dquot_byino(tp, ip, 3285 xfs_trans_mod_dquot_byino(tp, ip,
3285 XFS_TRANS_DQ_BCOUNT, 1L); 3286 XFS_TRANS_DQ_BCOUNT, 1L);
3286 flags |= xfs_ilog_fext(whichfork); 3287 flags |= xfs_ilog_fext(whichfork);
3287 } else { 3288 } else {
3288 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0); 3289 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0);
3289 xfs_bmap_forkoff_reset(ip->i_mount, ip, whichfork); 3290 xfs_bmap_forkoff_reset(ip->i_mount, ip, whichfork);
3290 } 3291 }
3291 ifp->if_flags &= ~XFS_IFINLINE; 3292 ifp->if_flags &= ~XFS_IFINLINE;
3292 ifp->if_flags |= XFS_IFEXTENTS; 3293 ifp->if_flags |= XFS_IFEXTENTS;
3293 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); 3294 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
3294 flags |= XFS_ILOG_CORE; 3295 flags |= XFS_ILOG_CORE;
3295 done: 3296 done:
3296 *logflagsp = flags; 3297 *logflagsp = flags;
3297 return error; 3298 return error;
3298 } 3299 }
3299 3300
3300 /* 3301 /*
3301 * Search the extent records for the entry containing block bno. 3302 * Search the extent records for the entry containing block bno.
3302 * If bno lies in a hole, point to the next entry. If bno lies 3303 * If bno lies in a hole, point to the next entry. If bno lies
3303 * past eof, *eofp will be set, and *prevp will contain the last 3304 * past eof, *eofp will be set, and *prevp will contain the last
3304 * entry (null if none). Else, *lastxp will be set to the index 3305 * entry (null if none). Else, *lastxp will be set to the index
3305 * of the found entry; *gotp will contain the entry. 3306 * of the found entry; *gotp will contain the entry.
3306 */ 3307 */
3307 STATIC xfs_bmbt_rec_host_t * /* pointer to found extent entry */ 3308 STATIC xfs_bmbt_rec_host_t * /* pointer to found extent entry */
3308 xfs_bmap_search_multi_extents( 3309 xfs_bmap_search_multi_extents(
3309 xfs_ifork_t *ifp, /* inode fork pointer */ 3310 xfs_ifork_t *ifp, /* inode fork pointer */
3310 xfs_fileoff_t bno, /* block number searched for */ 3311 xfs_fileoff_t bno, /* block number searched for */
3311 int *eofp, /* out: end of file found */ 3312 int *eofp, /* out: end of file found */
3312 xfs_extnum_t *lastxp, /* out: last extent index */ 3313 xfs_extnum_t *lastxp, /* out: last extent index */
3313 xfs_bmbt_irec_t *gotp, /* out: extent entry found */ 3314 xfs_bmbt_irec_t *gotp, /* out: extent entry found */
3314 xfs_bmbt_irec_t *prevp) /* out: previous extent entry found */ 3315 xfs_bmbt_irec_t *prevp) /* out: previous extent entry found */
3315 { 3316 {
3316 xfs_bmbt_rec_host_t *ep; /* extent record pointer */ 3317 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
3317 xfs_extnum_t lastx; /* last extent index */ 3318 xfs_extnum_t lastx; /* last extent index */
3318 3319
3319 /* 3320 /*
3320 * Initialize the extent entry structure to catch access to 3321 * Initialize the extent entry structure to catch access to
3321 * uninitialized br_startblock field. 3322 * uninitialized br_startblock field.
3322 */ 3323 */
3323 gotp->br_startoff = 0xffa5a5a5a5a5a5a5LL; 3324 gotp->br_startoff = 0xffa5a5a5a5a5a5a5LL;
3324 gotp->br_blockcount = 0xa55a5a5a5a5a5a5aLL; 3325 gotp->br_blockcount = 0xa55a5a5a5a5a5a5aLL;
3325 gotp->br_state = XFS_EXT_INVALID; 3326 gotp->br_state = XFS_EXT_INVALID;
3326 #if XFS_BIG_BLKNOS 3327 #if XFS_BIG_BLKNOS
3327 gotp->br_startblock = 0xffffa5a5a5a5a5a5LL; 3328 gotp->br_startblock = 0xffffa5a5a5a5a5a5LL;
3328 #else 3329 #else
3329 gotp->br_startblock = 0xffffa5a5; 3330 gotp->br_startblock = 0xffffa5a5;
3330 #endif 3331 #endif
3331 prevp->br_startoff = NULLFILEOFF; 3332 prevp->br_startoff = NULLFILEOFF;
3332 3333
3333 ep = xfs_iext_bno_to_ext(ifp, bno, &lastx); 3334 ep = xfs_iext_bno_to_ext(ifp, bno, &lastx);
3334 if (lastx > 0) { 3335 if (lastx > 0) {
3335 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx - 1), prevp); 3336 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx - 1), prevp);
3336 } 3337 }
3337 if (lastx < (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) { 3338 if (lastx < (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) {
3338 xfs_bmbt_get_all(ep, gotp); 3339 xfs_bmbt_get_all(ep, gotp);
3339 *eofp = 0; 3340 *eofp = 0;
3340 } else { 3341 } else {
3341 if (lastx > 0) { 3342 if (lastx > 0) {
3342 *gotp = *prevp; 3343 *gotp = *prevp;
3343 } 3344 }
3344 *eofp = 1; 3345 *eofp = 1;
3345 ep = NULL; 3346 ep = NULL;
3346 } 3347 }
3347 *lastxp = lastx; 3348 *lastxp = lastx;
3348 return ep; 3349 return ep;
3349 } 3350 }
3350 3351
3351 /* 3352 /*
3352 * Search the extents list for the inode, for the extent containing bno. 3353 * Search the extents list for the inode, for the extent containing bno.
3353 * If bno lies in a hole, point to the next entry. If bno lies past eof, 3354 * If bno lies in a hole, point to the next entry. If bno lies past eof,
3354 * *eofp will be set, and *prevp will contain the last entry (null if none). 3355 * *eofp will be set, and *prevp will contain the last entry (null if none).
3355 * Else, *lastxp will be set to the index of the found 3356 * Else, *lastxp will be set to the index of the found
3356 * entry; *gotp will contain the entry. 3357 * entry; *gotp will contain the entry.
3357 */ 3358 */
3358 STATIC xfs_bmbt_rec_host_t * /* pointer to found extent entry */ 3359 STATIC xfs_bmbt_rec_host_t * /* pointer to found extent entry */
3359 xfs_bmap_search_extents( 3360 xfs_bmap_search_extents(
3360 xfs_inode_t *ip, /* incore inode pointer */ 3361 xfs_inode_t *ip, /* incore inode pointer */
3361 xfs_fileoff_t bno, /* block number searched for */ 3362 xfs_fileoff_t bno, /* block number searched for */
3362 int fork, /* data or attr fork */ 3363 int fork, /* data or attr fork */
3363 int *eofp, /* out: end of file found */ 3364 int *eofp, /* out: end of file found */
3364 xfs_extnum_t *lastxp, /* out: last extent index */ 3365 xfs_extnum_t *lastxp, /* out: last extent index */
3365 xfs_bmbt_irec_t *gotp, /* out: extent entry found */ 3366 xfs_bmbt_irec_t *gotp, /* out: extent entry found */
3366 xfs_bmbt_irec_t *prevp) /* out: previous extent entry found */ 3367 xfs_bmbt_irec_t *prevp) /* out: previous extent entry found */
3367 { 3368 {
3368 xfs_ifork_t *ifp; /* inode fork pointer */ 3369 xfs_ifork_t *ifp; /* inode fork pointer */
3369 xfs_bmbt_rec_host_t *ep; /* extent record pointer */ 3370 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
3370 3371
3371 XFS_STATS_INC(xs_look_exlist); 3372 XFS_STATS_INC(xs_look_exlist);
3372 ifp = XFS_IFORK_PTR(ip, fork); 3373 ifp = XFS_IFORK_PTR(ip, fork);
3373 3374
3374 ep = xfs_bmap_search_multi_extents(ifp, bno, eofp, lastxp, gotp, prevp); 3375 ep = xfs_bmap_search_multi_extents(ifp, bno, eofp, lastxp, gotp, prevp);
3375 3376
3376 if (unlikely(!(gotp->br_startblock) && (*lastxp != NULLEXTNUM) && 3377 if (unlikely(!(gotp->br_startblock) && (*lastxp != NULLEXTNUM) &&
3377 !(XFS_IS_REALTIME_INODE(ip) && fork == XFS_DATA_FORK))) { 3378 !(XFS_IS_REALTIME_INODE(ip) && fork == XFS_DATA_FORK))) {
3378 xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO, 3379 xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
3379 "Access to block zero in inode %llu " 3380 "Access to block zero in inode %llu "
3380 "start_block: %llx start_off: %llx " 3381 "start_block: %llx start_off: %llx "
3381 "blkcnt: %llx extent-state: %x lastx: %x\n", 3382 "blkcnt: %llx extent-state: %x lastx: %x\n",
3382 (unsigned long long)ip->i_ino, 3383 (unsigned long long)ip->i_ino,
3383 (unsigned long long)gotp->br_startblock, 3384 (unsigned long long)gotp->br_startblock,
3384 (unsigned long long)gotp->br_startoff, 3385 (unsigned long long)gotp->br_startoff,
3385 (unsigned long long)gotp->br_blockcount, 3386 (unsigned long long)gotp->br_blockcount,
3386 gotp->br_state, *lastxp); 3387 gotp->br_state, *lastxp);
3387 *lastxp = NULLEXTNUM; 3388 *lastxp = NULLEXTNUM;
3388 *eofp = 1; 3389 *eofp = 1;
3389 return NULL; 3390 return NULL;
3390 } 3391 }
3391 return ep; 3392 return ep;
3392 } 3393 }
3393 3394
3394 /* 3395 /*
3395 * Compute the worst-case number of indirect blocks that will be used 3396 * Compute the worst-case number of indirect blocks that will be used
3396 * for ip's delayed extent of length "len". 3397 * for ip's delayed extent of length "len".
3397 */ 3398 */
3398 STATIC xfs_filblks_t 3399 STATIC xfs_filblks_t
3399 xfs_bmap_worst_indlen( 3400 xfs_bmap_worst_indlen(
3400 xfs_inode_t *ip, /* incore inode pointer */ 3401 xfs_inode_t *ip, /* incore inode pointer */
3401 xfs_filblks_t len) /* delayed extent length */ 3402 xfs_filblks_t len) /* delayed extent length */
3402 { 3403 {
3403 int level; /* btree level number */ 3404 int level; /* btree level number */
3404 int maxrecs; /* maximum record count at this level */ 3405 int maxrecs; /* maximum record count at this level */
3405 xfs_mount_t *mp; /* mount structure */ 3406 xfs_mount_t *mp; /* mount structure */
3406 xfs_filblks_t rval; /* return value */ 3407 xfs_filblks_t rval; /* return value */
3407 3408
3408 mp = ip->i_mount; 3409 mp = ip->i_mount;
3409 maxrecs = mp->m_bmap_dmxr[0]; 3410 maxrecs = mp->m_bmap_dmxr[0];
3410 for (level = 0, rval = 0; 3411 for (level = 0, rval = 0;
3411 level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK); 3412 level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
3412 level++) { 3413 level++) {
3413 len += maxrecs - 1; 3414 len += maxrecs - 1;
3414 do_div(len, maxrecs); 3415 do_div(len, maxrecs);
3415 rval += len; 3416 rval += len;
3416 if (len == 1) 3417 if (len == 1)
3417 return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) - 3418 return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
3418 level - 1; 3419 level - 1;
3419 if (level == 0) 3420 if (level == 0)
3420 maxrecs = mp->m_bmap_dmxr[1]; 3421 maxrecs = mp->m_bmap_dmxr[1];
3421 } 3422 }
3422 return rval; 3423 return rval;
3423 } 3424 }
3424 3425
3425 /* 3426 /*
3426 * Convert inode from non-attributed to attributed. 3427 * Convert inode from non-attributed to attributed.
3427 * Must not be in a transaction, ip must not be locked. 3428 * Must not be in a transaction, ip must not be locked.
3428 */ 3429 */
3429 int /* error code */ 3430 int /* error code */
3430 xfs_bmap_add_attrfork( 3431 xfs_bmap_add_attrfork(
3431 xfs_inode_t *ip, /* incore inode pointer */ 3432 xfs_inode_t *ip, /* incore inode pointer */
3432 int size, /* space new attribute needs */ 3433 int size, /* space new attribute needs */
3433 int rsvd) /* xact may use reserved blks */ 3434 int rsvd) /* xact may use reserved blks */
3434 { 3435 {
3435 xfs_fsblock_t firstblock; /* 1st block/ag allocated */ 3436 xfs_fsblock_t firstblock; /* 1st block/ag allocated */
3436 xfs_bmap_free_t flist; /* freed extent records */ 3437 xfs_bmap_free_t flist; /* freed extent records */
3437 xfs_mount_t *mp; /* mount structure */ 3438 xfs_mount_t *mp; /* mount structure */
3438 xfs_trans_t *tp; /* transaction pointer */ 3439 xfs_trans_t *tp; /* transaction pointer */
3439 int blks; /* space reservation */ 3440 int blks; /* space reservation */
3440 int version = 1; /* superblock attr version */ 3441 int version = 1; /* superblock attr version */
3441 int committed; /* xaction was committed */ 3442 int committed; /* xaction was committed */
3442 int logflags; /* logging flags */ 3443 int logflags; /* logging flags */
3443 int error; /* error return value */ 3444 int error; /* error return value */
3444 3445
3445 ASSERT(XFS_IFORK_Q(ip) == 0); 3446 ASSERT(XFS_IFORK_Q(ip) == 0);
3446 3447
3447 mp = ip->i_mount; 3448 mp = ip->i_mount;
3448 ASSERT(!XFS_NOT_DQATTACHED(mp, ip)); 3449 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
3449 tp = xfs_trans_alloc(mp, XFS_TRANS_ADDAFORK); 3450 tp = xfs_trans_alloc(mp, XFS_TRANS_ADDAFORK);
3450 blks = XFS_ADDAFORK_SPACE_RES(mp); 3451 blks = XFS_ADDAFORK_SPACE_RES(mp);
3451 if (rsvd) 3452 if (rsvd)
3452 tp->t_flags |= XFS_TRANS_RESERVE; 3453 tp->t_flags |= XFS_TRANS_RESERVE;
3453 if ((error = xfs_trans_reserve(tp, blks, XFS_ADDAFORK_LOG_RES(mp), 0, 3454 if ((error = xfs_trans_reserve(tp, blks, XFS_ADDAFORK_LOG_RES(mp), 0,
3454 XFS_TRANS_PERM_LOG_RES, XFS_ADDAFORK_LOG_COUNT))) 3455 XFS_TRANS_PERM_LOG_RES, XFS_ADDAFORK_LOG_COUNT)))
3455 goto error0; 3456 goto error0;
3456 xfs_ilock(ip, XFS_ILOCK_EXCL); 3457 xfs_ilock(ip, XFS_ILOCK_EXCL);
3457 error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ? 3458 error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ?
3458 XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES : 3459 XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
3459 XFS_QMOPT_RES_REGBLKS); 3460 XFS_QMOPT_RES_REGBLKS);
3460 if (error) { 3461 if (error) {
3461 xfs_iunlock(ip, XFS_ILOCK_EXCL); 3462 xfs_iunlock(ip, XFS_ILOCK_EXCL);
3462 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES); 3463 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES);
3463 return error; 3464 return error;
3464 } 3465 }
3465 if (XFS_IFORK_Q(ip)) 3466 if (XFS_IFORK_Q(ip))
3466 goto error1; 3467 goto error1;
3467 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) { 3468 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) {
3468 /* 3469 /*
3469 * For inodes coming from pre-6.2 filesystems. 3470 * For inodes coming from pre-6.2 filesystems.
3470 */ 3471 */
3471 ASSERT(ip->i_d.di_aformat == 0); 3472 ASSERT(ip->i_d.di_aformat == 0);
3472 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; 3473 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
3473 } 3474 }
3474 ASSERT(ip->i_d.di_anextents == 0); 3475 ASSERT(ip->i_d.di_anextents == 0);
3475 3476
3476 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 3477 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
3477 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 3478 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
3478 3479
3479 switch (ip->i_d.di_format) { 3480 switch (ip->i_d.di_format) {
3480 case XFS_DINODE_FMT_DEV: 3481 case XFS_DINODE_FMT_DEV:
3481 ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3; 3482 ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
3482 break; 3483 break;
3483 case XFS_DINODE_FMT_UUID: 3484 case XFS_DINODE_FMT_UUID:
3484 ip->i_d.di_forkoff = roundup(sizeof(uuid_t), 8) >> 3; 3485 ip->i_d.di_forkoff = roundup(sizeof(uuid_t), 8) >> 3;
3485 break; 3486 break;
3486 case XFS_DINODE_FMT_LOCAL: 3487 case XFS_DINODE_FMT_LOCAL:
3487 case XFS_DINODE_FMT_EXTENTS: 3488 case XFS_DINODE_FMT_EXTENTS:
3488 case XFS_DINODE_FMT_BTREE: 3489 case XFS_DINODE_FMT_BTREE:
3489 ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size); 3490 ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
3490 if (!ip->i_d.di_forkoff) 3491 if (!ip->i_d.di_forkoff)
3491 ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3; 3492 ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
3492 else if (mp->m_flags & XFS_MOUNT_ATTR2) 3493 else if (mp->m_flags & XFS_MOUNT_ATTR2)
3493 version = 2; 3494 version = 2;
3494 break; 3495 break;
3495 default: 3496 default:
3496 ASSERT(0); 3497 ASSERT(0);
3497 error = XFS_ERROR(EINVAL); 3498 error = XFS_ERROR(EINVAL);
3498 goto error1; 3499 goto error1;
3499 } 3500 }
3500 3501
3501 ASSERT(ip->i_afp == NULL); 3502 ASSERT(ip->i_afp == NULL);
3502 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP); 3503 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
3503 ip->i_afp->if_flags = XFS_IFEXTENTS; 3504 ip->i_afp->if_flags = XFS_IFEXTENTS;
3504 logflags = 0; 3505 logflags = 0;
3505 xfs_bmap_init(&flist, &firstblock); 3506 xfs_bmap_init(&flist, &firstblock);
3506 switch (ip->i_d.di_format) { 3507 switch (ip->i_d.di_format) {
3507 case XFS_DINODE_FMT_LOCAL: 3508 case XFS_DINODE_FMT_LOCAL:
3508 error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &flist, 3509 error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &flist,
3509 &logflags); 3510 &logflags);
3510 break; 3511 break;
3511 case XFS_DINODE_FMT_EXTENTS: 3512 case XFS_DINODE_FMT_EXTENTS:
3512 error = xfs_bmap_add_attrfork_extents(tp, ip, &firstblock, 3513 error = xfs_bmap_add_attrfork_extents(tp, ip, &firstblock,
3513 &flist, &logflags); 3514 &flist, &logflags);
3514 break; 3515 break;
3515 case XFS_DINODE_FMT_BTREE: 3516 case XFS_DINODE_FMT_BTREE:
3516 error = xfs_bmap_add_attrfork_btree(tp, ip, &firstblock, &flist, 3517 error = xfs_bmap_add_attrfork_btree(tp, ip, &firstblock, &flist,
3517 &logflags); 3518 &logflags);
3518 break; 3519 break;
3519 default: 3520 default:
3520 error = 0; 3521 error = 0;
3521 break; 3522 break;
3522 } 3523 }
3523 if (logflags) 3524 if (logflags)
3524 xfs_trans_log_inode(tp, ip, logflags); 3525 xfs_trans_log_inode(tp, ip, logflags);
3525 if (error) 3526 if (error)
3526 goto error2; 3527 goto error2;
3527 if (!xfs_sb_version_hasattr(&mp->m_sb) || 3528 if (!xfs_sb_version_hasattr(&mp->m_sb) ||
3528 (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) { 3529 (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) {
3529 __int64_t sbfields = 0; 3530 __int64_t sbfields = 0;
3530 3531
3531 spin_lock(&mp->m_sb_lock); 3532 spin_lock(&mp->m_sb_lock);
3532 if (!xfs_sb_version_hasattr(&mp->m_sb)) { 3533 if (!xfs_sb_version_hasattr(&mp->m_sb)) {
3533 xfs_sb_version_addattr(&mp->m_sb); 3534 xfs_sb_version_addattr(&mp->m_sb);
3534 sbfields |= XFS_SB_VERSIONNUM; 3535 sbfields |= XFS_SB_VERSIONNUM;
3535 } 3536 }
3536 if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) { 3537 if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) {
3537 xfs_sb_version_addattr2(&mp->m_sb); 3538 xfs_sb_version_addattr2(&mp->m_sb);
3538 sbfields |= (XFS_SB_VERSIONNUM | XFS_SB_FEATURES2); 3539 sbfields |= (XFS_SB_VERSIONNUM | XFS_SB_FEATURES2);
3539 } 3540 }
3540 if (sbfields) { 3541 if (sbfields) {
3541 spin_unlock(&mp->m_sb_lock); 3542 spin_unlock(&mp->m_sb_lock);
3542 xfs_mod_sb(tp, sbfields); 3543 xfs_mod_sb(tp, sbfields);
3543 } else 3544 } else
3544 spin_unlock(&mp->m_sb_lock); 3545 spin_unlock(&mp->m_sb_lock);
3545 } 3546 }
3546 3547
3547 error = xfs_bmap_finish(&tp, &flist, &committed); 3548 error = xfs_bmap_finish(&tp, &flist, &committed);
3548 if (error) 3549 if (error)
3549 goto error2; 3550 goto error2;
3550 return xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); 3551 return xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
3551 error2: 3552 error2:
3552 xfs_bmap_cancel(&flist); 3553 xfs_bmap_cancel(&flist);
3553 error1: 3554 error1:
3554 xfs_iunlock(ip, XFS_ILOCK_EXCL); 3555 xfs_iunlock(ip, XFS_ILOCK_EXCL);
3555 error0: 3556 error0:
3556 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT); 3557 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
3557 return error; 3558 return error;
3558 } 3559 }
3559 3560
3560 /* 3561 /*
3561 * Add the extent to the list of extents to be free at transaction end. 3562 * Add the extent to the list of extents to be free at transaction end.
3562 * The list is maintained sorted (by block number). 3563 * The list is maintained sorted (by block number).
3563 */ 3564 */
3564 /* ARGSUSED */ 3565 /* ARGSUSED */
3565 void 3566 void
3566 xfs_bmap_add_free( 3567 xfs_bmap_add_free(
3567 xfs_fsblock_t bno, /* fs block number of extent */ 3568 xfs_fsblock_t bno, /* fs block number of extent */
3568 xfs_filblks_t len, /* length of extent */ 3569 xfs_filblks_t len, /* length of extent */
3569 xfs_bmap_free_t *flist, /* list of extents */ 3570 xfs_bmap_free_t *flist, /* list of extents */
3570 xfs_mount_t *mp) /* mount point structure */ 3571 xfs_mount_t *mp) /* mount point structure */
3571 { 3572 {
3572 xfs_bmap_free_item_t *cur; /* current (next) element */ 3573 xfs_bmap_free_item_t *cur; /* current (next) element */
3573 xfs_bmap_free_item_t *new; /* new element */ 3574 xfs_bmap_free_item_t *new; /* new element */
3574 xfs_bmap_free_item_t *prev; /* previous element */ 3575 xfs_bmap_free_item_t *prev; /* previous element */
3575 #ifdef DEBUG 3576 #ifdef DEBUG
3576 xfs_agnumber_t agno; 3577 xfs_agnumber_t agno;
3577 xfs_agblock_t agbno; 3578 xfs_agblock_t agbno;
3578 3579
3579 ASSERT(bno != NULLFSBLOCK); 3580 ASSERT(bno != NULLFSBLOCK);
3580 ASSERT(len > 0); 3581 ASSERT(len > 0);
3581 ASSERT(len <= MAXEXTLEN); 3582 ASSERT(len <= MAXEXTLEN);
3582 ASSERT(!isnullstartblock(bno)); 3583 ASSERT(!isnullstartblock(bno));
3583 agno = XFS_FSB_TO_AGNO(mp, bno); 3584 agno = XFS_FSB_TO_AGNO(mp, bno);
3584 agbno = XFS_FSB_TO_AGBNO(mp, bno); 3585 agbno = XFS_FSB_TO_AGBNO(mp, bno);
3585 ASSERT(agno < mp->m_sb.sb_agcount); 3586 ASSERT(agno < mp->m_sb.sb_agcount);
3586 ASSERT(agbno < mp->m_sb.sb_agblocks); 3587 ASSERT(agbno < mp->m_sb.sb_agblocks);
3587 ASSERT(len < mp->m_sb.sb_agblocks); 3588 ASSERT(len < mp->m_sb.sb_agblocks);
3588 ASSERT(agbno + len <= mp->m_sb.sb_agblocks); 3589 ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
3589 #endif 3590 #endif
3590 ASSERT(xfs_bmap_free_item_zone != NULL); 3591 ASSERT(xfs_bmap_free_item_zone != NULL);
3591 new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP); 3592 new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP);
3592 new->xbfi_startblock = bno; 3593 new->xbfi_startblock = bno;
3593 new->xbfi_blockcount = (xfs_extlen_t)len; 3594 new->xbfi_blockcount = (xfs_extlen_t)len;
3594 for (prev = NULL, cur = flist->xbf_first; 3595 for (prev = NULL, cur = flist->xbf_first;
3595 cur != NULL; 3596 cur != NULL;
3596 prev = cur, cur = cur->xbfi_next) { 3597 prev = cur, cur = cur->xbfi_next) {
3597 if (cur->xbfi_startblock >= bno) 3598 if (cur->xbfi_startblock >= bno)
3598 break; 3599 break;
3599 } 3600 }
3600 if (prev) 3601 if (prev)
3601 prev->xbfi_next = new; 3602 prev->xbfi_next = new;
3602 else 3603 else
3603 flist->xbf_first = new; 3604 flist->xbf_first = new;
3604 new->xbfi_next = cur; 3605 new->xbfi_next = cur;
3605 flist->xbf_count++; 3606 flist->xbf_count++;
3606 } 3607 }
3607 3608
3608 /* 3609 /*
3609 * Compute and fill in the value of the maximum depth of a bmap btree 3610 * Compute and fill in the value of the maximum depth of a bmap btree
3610 * in this filesystem. Done once, during mount. 3611 * in this filesystem. Done once, during mount.
3611 */ 3612 */
3612 void 3613 void
3613 xfs_bmap_compute_maxlevels( 3614 xfs_bmap_compute_maxlevels(
3614 xfs_mount_t *mp, /* file system mount structure */ 3615 xfs_mount_t *mp, /* file system mount structure */
3615 int whichfork) /* data or attr fork */ 3616 int whichfork) /* data or attr fork */
3616 { 3617 {
3617 int level; /* btree level */ 3618 int level; /* btree level */
3618 uint maxblocks; /* max blocks at this level */ 3619 uint maxblocks; /* max blocks at this level */
3619 uint maxleafents; /* max leaf entries possible */ 3620 uint maxleafents; /* max leaf entries possible */
3620 int maxrootrecs; /* max records in root block */ 3621 int maxrootrecs; /* max records in root block */
3621 int minleafrecs; /* min records in leaf block */ 3622 int minleafrecs; /* min records in leaf block */
3622 int minnoderecs; /* min records in node block */ 3623 int minnoderecs; /* min records in node block */
3623 int sz; /* root block size */ 3624 int sz; /* root block size */
3624 3625
3625 /* 3626 /*
3626 * The maximum number of extents in a file, hence the maximum 3627 * The maximum number of extents in a file, hence the maximum
3627 * number of leaf entries, is controlled by the type of di_nextents 3628 * number of leaf entries, is controlled by the type of di_nextents
3628 * (a signed 32-bit number, xfs_extnum_t), or by di_anextents 3629 * (a signed 32-bit number, xfs_extnum_t), or by di_anextents
3629 * (a signed 16-bit number, xfs_aextnum_t). 3630 * (a signed 16-bit number, xfs_aextnum_t).
3630 * 3631 *
3631 * Note that we can no longer assume that if we are in ATTR1 that 3632 * Note that we can no longer assume that if we are in ATTR1 that
3632 * the fork offset of all the inodes will be 3633 * the fork offset of all the inodes will be
3633 * (xfs_default_attroffset(ip) >> 3) because we could have mounted 3634 * (xfs_default_attroffset(ip) >> 3) because we could have mounted
3634 * with ATTR2 and then mounted back with ATTR1, keeping the 3635 * with ATTR2 and then mounted back with ATTR1, keeping the
3635 * di_forkoff's fixed but probably at various positions. Therefore, 3636 * di_forkoff's fixed but probably at various positions. Therefore,
3636 * for both ATTR1 and ATTR2 we have to assume the worst case scenario 3637 * for both ATTR1 and ATTR2 we have to assume the worst case scenario
3637 * of a minimum size available. 3638 * of a minimum size available.
3638 */ 3639 */
3639 if (whichfork == XFS_DATA_FORK) { 3640 if (whichfork == XFS_DATA_FORK) {
3640 maxleafents = MAXEXTNUM; 3641 maxleafents = MAXEXTNUM;
3641 sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS); 3642 sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
3642 } else { 3643 } else {
3643 maxleafents = MAXAEXTNUM; 3644 maxleafents = MAXAEXTNUM;
3644 sz = XFS_BMDR_SPACE_CALC(MINABTPTRS); 3645 sz = XFS_BMDR_SPACE_CALC(MINABTPTRS);
3645 } 3646 }
3646 maxrootrecs = xfs_bmdr_maxrecs(mp, sz, 0); 3647 maxrootrecs = xfs_bmdr_maxrecs(mp, sz, 0);
3647 minleafrecs = mp->m_bmap_dmnr[0]; 3648 minleafrecs = mp->m_bmap_dmnr[0];
3648 minnoderecs = mp->m_bmap_dmnr[1]; 3649 minnoderecs = mp->m_bmap_dmnr[1];
3649 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs; 3650 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
3650 for (level = 1; maxblocks > 1; level++) { 3651 for (level = 1; maxblocks > 1; level++) {
3651 if (maxblocks <= maxrootrecs) 3652 if (maxblocks <= maxrootrecs)
3652 maxblocks = 1; 3653 maxblocks = 1;
3653 else 3654 else
3654 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs; 3655 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
3655 } 3656 }
3656 mp->m_bm_maxlevels[whichfork] = level; 3657 mp->m_bm_maxlevels[whichfork] = level;
3657 } 3658 }
3658 3659
3659 /* 3660 /*
3660 * Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi 3661 * Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi
3661 * caller. Frees all the extents that need freeing, which must be done 3662 * caller. Frees all the extents that need freeing, which must be done
3662 * last due to locking considerations. We never free any extents in 3663 * last due to locking considerations. We never free any extents in
3663 * the first transaction. 3664 * the first transaction.
3664 * 3665 *
3665 * Return 1 if the given transaction was committed and a new one 3666 * Return 1 if the given transaction was committed and a new one
3666 * started, and 0 otherwise in the committed parameter. 3667 * started, and 0 otherwise in the committed parameter.
3667 */ 3668 */
3668 int /* error */ 3669 int /* error */
3669 xfs_bmap_finish( 3670 xfs_bmap_finish(
3670 xfs_trans_t **tp, /* transaction pointer addr */ 3671 xfs_trans_t **tp, /* transaction pointer addr */
3671 xfs_bmap_free_t *flist, /* i/o: list extents to free */ 3672 xfs_bmap_free_t *flist, /* i/o: list extents to free */
3672 int *committed) /* xact committed or not */ 3673 int *committed) /* xact committed or not */
3673 { 3674 {
3674 xfs_efd_log_item_t *efd; /* extent free data */ 3675 xfs_efd_log_item_t *efd; /* extent free data */
3675 xfs_efi_log_item_t *efi; /* extent free intention */ 3676 xfs_efi_log_item_t *efi; /* extent free intention */
3676 int error; /* error return value */ 3677 int error; /* error return value */
3677 xfs_bmap_free_item_t *free; /* free extent item */ 3678 xfs_bmap_free_item_t *free; /* free extent item */
3678 unsigned int logres; /* new log reservation */ 3679 unsigned int logres; /* new log reservation */
3679 unsigned int logcount; /* new log count */ 3680 unsigned int logcount; /* new log count */
3680 xfs_mount_t *mp; /* filesystem mount structure */ 3681 xfs_mount_t *mp; /* filesystem mount structure */
3681 xfs_bmap_free_item_t *next; /* next item on free list */ 3682 xfs_bmap_free_item_t *next; /* next item on free list */
3682 xfs_trans_t *ntp; /* new transaction pointer */ 3683 xfs_trans_t *ntp; /* new transaction pointer */
3683 3684
3684 ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES); 3685 ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
3685 if (flist->xbf_count == 0) { 3686 if (flist->xbf_count == 0) {
3686 *committed = 0; 3687 *committed = 0;
3687 return 0; 3688 return 0;
3688 } 3689 }
3689 ntp = *tp; 3690 ntp = *tp;
3690 efi = xfs_trans_get_efi(ntp, flist->xbf_count); 3691 efi = xfs_trans_get_efi(ntp, flist->xbf_count);
3691 for (free = flist->xbf_first; free; free = free->xbfi_next) 3692 for (free = flist->xbf_first; free; free = free->xbfi_next)
3692 xfs_trans_log_efi_extent(ntp, efi, free->xbfi_startblock, 3693 xfs_trans_log_efi_extent(ntp, efi, free->xbfi_startblock,
3693 free->xbfi_blockcount); 3694 free->xbfi_blockcount);
3694 logres = ntp->t_log_res; 3695 logres = ntp->t_log_res;
3695 logcount = ntp->t_log_count; 3696 logcount = ntp->t_log_count;
3696 ntp = xfs_trans_dup(*tp); 3697 ntp = xfs_trans_dup(*tp);
3697 error = xfs_trans_commit(*tp, 0); 3698 error = xfs_trans_commit(*tp, 0);
3698 *tp = ntp; 3699 *tp = ntp;
3699 *committed = 1; 3700 *committed = 1;
3700 /* 3701 /*
3701 * We have a new transaction, so we should return committed=1, 3702 * We have a new transaction, so we should return committed=1,
3702 * even though we're returning an error. 3703 * even though we're returning an error.
3703 */ 3704 */
3704 if (error) 3705 if (error)
3705 return error; 3706 return error;
3706 3707
3707 /* 3708 /*
3708 * transaction commit worked ok so we can drop the extra ticket 3709 * transaction commit worked ok so we can drop the extra ticket
3709 * reference that we gained in xfs_trans_dup() 3710 * reference that we gained in xfs_trans_dup()
3710 */ 3711 */
3711 xfs_log_ticket_put(ntp->t_ticket); 3712 xfs_log_ticket_put(ntp->t_ticket);
3712 3713
3713 if ((error = xfs_trans_reserve(ntp, 0, logres, 0, XFS_TRANS_PERM_LOG_RES, 3714 if ((error = xfs_trans_reserve(ntp, 0, logres, 0, XFS_TRANS_PERM_LOG_RES,
3714 logcount))) 3715 logcount)))
3715 return error; 3716 return error;
3716 efd = xfs_trans_get_efd(ntp, efi, flist->xbf_count); 3717 efd = xfs_trans_get_efd(ntp, efi, flist->xbf_count);
3717 for (free = flist->xbf_first; free != NULL; free = next) { 3718 for (free = flist->xbf_first; free != NULL; free = next) {
3718 next = free->xbfi_next; 3719 next = free->xbfi_next;
3719 if ((error = xfs_free_extent(ntp, free->xbfi_startblock, 3720 if ((error = xfs_free_extent(ntp, free->xbfi_startblock,
3720 free->xbfi_blockcount))) { 3721 free->xbfi_blockcount))) {
3721 /* 3722 /*
3722 * The bmap free list will be cleaned up at a 3723 * The bmap free list will be cleaned up at a
3723 * higher level. The EFI will be canceled when 3724 * higher level. The EFI will be canceled when
3724 * this transaction is aborted. 3725 * this transaction is aborted.
3725 * Need to force shutdown here to make sure it 3726 * Need to force shutdown here to make sure it
3726 * happens, since this transaction may not be 3727 * happens, since this transaction may not be
3727 * dirty yet. 3728 * dirty yet.
3728 */ 3729 */
3729 mp = ntp->t_mountp; 3730 mp = ntp->t_mountp;
3730 if (!XFS_FORCED_SHUTDOWN(mp)) 3731 if (!XFS_FORCED_SHUTDOWN(mp))
3731 xfs_force_shutdown(mp, 3732 xfs_force_shutdown(mp,
3732 (error == EFSCORRUPTED) ? 3733 (error == EFSCORRUPTED) ?
3733 SHUTDOWN_CORRUPT_INCORE : 3734 SHUTDOWN_CORRUPT_INCORE :
3734 SHUTDOWN_META_IO_ERROR); 3735 SHUTDOWN_META_IO_ERROR);
3735 return error; 3736 return error;
3736 } 3737 }
3737 xfs_trans_log_efd_extent(ntp, efd, free->xbfi_startblock, 3738 xfs_trans_log_efd_extent(ntp, efd, free->xbfi_startblock,
3738 free->xbfi_blockcount); 3739 free->xbfi_blockcount);
3739 xfs_bmap_del_free(flist, NULL, free); 3740 xfs_bmap_del_free(flist, NULL, free);
3740 } 3741 }
3741 return 0; 3742 return 0;
3742 } 3743 }
3743 3744
3744 /* 3745 /*
3745 * Free up any items left in the list. 3746 * Free up any items left in the list.
3746 */ 3747 */
3747 void 3748 void
3748 xfs_bmap_cancel( 3749 xfs_bmap_cancel(
3749 xfs_bmap_free_t *flist) /* list of bmap_free_items */ 3750 xfs_bmap_free_t *flist) /* list of bmap_free_items */
3750 { 3751 {
3751 xfs_bmap_free_item_t *free; /* free list item */ 3752 xfs_bmap_free_item_t *free; /* free list item */
3752 xfs_bmap_free_item_t *next; 3753 xfs_bmap_free_item_t *next;
3753 3754
3754 if (flist->xbf_count == 0) 3755 if (flist->xbf_count == 0)
3755 return; 3756 return;
3756 ASSERT(flist->xbf_first != NULL); 3757 ASSERT(flist->xbf_first != NULL);
3757 for (free = flist->xbf_first; free; free = next) { 3758 for (free = flist->xbf_first; free; free = next) {
3758 next = free->xbfi_next; 3759 next = free->xbfi_next;
3759 xfs_bmap_del_free(flist, NULL, free); 3760 xfs_bmap_del_free(flist, NULL, free);
3760 } 3761 }
3761 ASSERT(flist->xbf_count == 0); 3762 ASSERT(flist->xbf_count == 0);
3762 } 3763 }
3763 3764
3764 /* 3765 /*
3765 * Returns the file-relative block number of the first unused block(s) 3766 * Returns the file-relative block number of the first unused block(s)
3766 * in the file with at least "len" logically contiguous blocks free. 3767 * in the file with at least "len" logically contiguous blocks free.
3767 * This is the lowest-address hole if the file has holes, else the first block 3768 * This is the lowest-address hole if the file has holes, else the first block
3768 * past the end of file. 3769 * past the end of file.
3769 * Return 0 if the file is currently local (in-inode). 3770 * Return 0 if the file is currently local (in-inode).
3770 */ 3771 */
3771 int /* error */ 3772 int /* error */
3772 xfs_bmap_first_unused( 3773 xfs_bmap_first_unused(
3773 xfs_trans_t *tp, /* transaction pointer */ 3774 xfs_trans_t *tp, /* transaction pointer */
3774 xfs_inode_t *ip, /* incore inode */ 3775 xfs_inode_t *ip, /* incore inode */
3775 xfs_extlen_t len, /* size of hole to find */ 3776 xfs_extlen_t len, /* size of hole to find */
3776 xfs_fileoff_t *first_unused, /* unused block */ 3777 xfs_fileoff_t *first_unused, /* unused block */
3777 int whichfork) /* data or attr fork */ 3778 int whichfork) /* data or attr fork */
3778 { 3779 {
3779 int error; /* error return value */ 3780 int error; /* error return value */
3780 int idx; /* extent record index */ 3781 int idx; /* extent record index */
3781 xfs_ifork_t *ifp; /* inode fork pointer */ 3782 xfs_ifork_t *ifp; /* inode fork pointer */
3782 xfs_fileoff_t lastaddr; /* last block number seen */ 3783 xfs_fileoff_t lastaddr; /* last block number seen */
3783 xfs_fileoff_t lowest; /* lowest useful block */ 3784 xfs_fileoff_t lowest; /* lowest useful block */
3784 xfs_fileoff_t max; /* starting useful block */ 3785 xfs_fileoff_t max; /* starting useful block */
3785 xfs_fileoff_t off; /* offset for this block */ 3786 xfs_fileoff_t off; /* offset for this block */
3786 xfs_extnum_t nextents; /* number of extent entries */ 3787 xfs_extnum_t nextents; /* number of extent entries */
3787 3788
3788 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE || 3789 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE ||
3789 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS || 3790 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ||
3790 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); 3791 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
3791 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { 3792 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
3792 *first_unused = 0; 3793 *first_unused = 0;
3793 return 0; 3794 return 0;
3794 } 3795 }
3795 ifp = XFS_IFORK_PTR(ip, whichfork); 3796 ifp = XFS_IFORK_PTR(ip, whichfork);
3796 if (!(ifp->if_flags & XFS_IFEXTENTS) && 3797 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
3797 (error = xfs_iread_extents(tp, ip, whichfork))) 3798 (error = xfs_iread_extents(tp, ip, whichfork)))
3798 return error; 3799 return error;
3799 lowest = *first_unused; 3800 lowest = *first_unused;
3800 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 3801 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3801 for (idx = 0, lastaddr = 0, max = lowest; idx < nextents; idx++) { 3802 for (idx = 0, lastaddr = 0, max = lowest; idx < nextents; idx++) {
3802 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, idx); 3803 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, idx);
3803 off = xfs_bmbt_get_startoff(ep); 3804 off = xfs_bmbt_get_startoff(ep);
3804 /* 3805 /*
3805 * See if the hole before this extent will work. 3806 * See if the hole before this extent will work.
3806 */ 3807 */
3807 if (off >= lowest + len && off - max >= len) { 3808 if (off >= lowest + len && off - max >= len) {
3808 *first_unused = max; 3809 *first_unused = max;
3809 return 0; 3810 return 0;
3810 } 3811 }
3811 lastaddr = off + xfs_bmbt_get_blockcount(ep); 3812 lastaddr = off + xfs_bmbt_get_blockcount(ep);
3812 max = XFS_FILEOFF_MAX(lastaddr, lowest); 3813 max = XFS_FILEOFF_MAX(lastaddr, lowest);
3813 } 3814 }
3814 *first_unused = max; 3815 *first_unused = max;
3815 return 0; 3816 return 0;
3816 } 3817 }
3817 3818
3818 /* 3819 /*
3819 * Returns the file-relative block number of the last block + 1 before 3820 * Returns the file-relative block number of the last block + 1 before
3820 * last_block (input value) in the file. 3821 * last_block (input value) in the file.
3821 * This is not based on i_size, it is based on the extent records. 3822 * This is not based on i_size, it is based on the extent records.
3822 * Returns 0 for local files, as they do not have extent records. 3823 * Returns 0 for local files, as they do not have extent records.
3823 */ 3824 */
3824 int /* error */ 3825 int /* error */
3825 xfs_bmap_last_before( 3826 xfs_bmap_last_before(
3826 xfs_trans_t *tp, /* transaction pointer */ 3827 xfs_trans_t *tp, /* transaction pointer */
3827 xfs_inode_t *ip, /* incore inode */ 3828 xfs_inode_t *ip, /* incore inode */
3828 xfs_fileoff_t *last_block, /* last block */ 3829 xfs_fileoff_t *last_block, /* last block */
3829 int whichfork) /* data or attr fork */ 3830 int whichfork) /* data or attr fork */
3830 { 3831 {
3831 xfs_fileoff_t bno; /* input file offset */ 3832 xfs_fileoff_t bno; /* input file offset */
3832 int eof; /* hit end of file */ 3833 int eof; /* hit end of file */
3833 xfs_bmbt_rec_host_t *ep; /* pointer to last extent */ 3834 xfs_bmbt_rec_host_t *ep; /* pointer to last extent */
3834 int error; /* error return value */ 3835 int error; /* error return value */
3835 xfs_bmbt_irec_t got; /* current extent value */ 3836 xfs_bmbt_irec_t got; /* current extent value */
3836 xfs_ifork_t *ifp; /* inode fork pointer */ 3837 xfs_ifork_t *ifp; /* inode fork pointer */
3837 xfs_extnum_t lastx; /* last extent used */ 3838 xfs_extnum_t lastx; /* last extent used */
3838 xfs_bmbt_irec_t prev; /* previous extent value */ 3839 xfs_bmbt_irec_t prev; /* previous extent value */
3839 3840
3840 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE && 3841 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
3841 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 3842 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
3842 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL) 3843 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
3843 return XFS_ERROR(EIO); 3844 return XFS_ERROR(EIO);
3844 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { 3845 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
3845 *last_block = 0; 3846 *last_block = 0;
3846 return 0; 3847 return 0;
3847 } 3848 }
3848 ifp = XFS_IFORK_PTR(ip, whichfork); 3849 ifp = XFS_IFORK_PTR(ip, whichfork);
3849 if (!(ifp->if_flags & XFS_IFEXTENTS) && 3850 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
3850 (error = xfs_iread_extents(tp, ip, whichfork))) 3851 (error = xfs_iread_extents(tp, ip, whichfork)))
3851 return error; 3852 return error;
3852 bno = *last_block - 1; 3853 bno = *last_block - 1;
3853 ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got, 3854 ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
3854 &prev); 3855 &prev);
3855 if (eof || xfs_bmbt_get_startoff(ep) > bno) { 3856 if (eof || xfs_bmbt_get_startoff(ep) > bno) {
3856 if (prev.br_startoff == NULLFILEOFF) 3857 if (prev.br_startoff == NULLFILEOFF)
3857 *last_block = 0; 3858 *last_block = 0;
3858 else 3859 else
3859 *last_block = prev.br_startoff + prev.br_blockcount; 3860 *last_block = prev.br_startoff + prev.br_blockcount;
3860 } 3861 }
3861 /* 3862 /*
3862 * Otherwise *last_block is already the right answer. 3863 * Otherwise *last_block is already the right answer.
3863 */ 3864 */
3864 return 0; 3865 return 0;
3865 } 3866 }
3866 3867
3867 STATIC int 3868 STATIC int
3868 xfs_bmap_last_extent( 3869 xfs_bmap_last_extent(
3869 struct xfs_trans *tp, 3870 struct xfs_trans *tp,
3870 struct xfs_inode *ip, 3871 struct xfs_inode *ip,
3871 int whichfork, 3872 int whichfork,
3872 struct xfs_bmbt_irec *rec, 3873 struct xfs_bmbt_irec *rec,
3873 int *is_empty) 3874 int *is_empty)
3874 { 3875 {
3875 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 3876 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
3876 int error; 3877 int error;
3877 int nextents; 3878 int nextents;
3878 3879
3879 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 3880 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
3880 error = xfs_iread_extents(tp, ip, whichfork); 3881 error = xfs_iread_extents(tp, ip, whichfork);
3881 if (error) 3882 if (error)
3882 return error; 3883 return error;
3883 } 3884 }
3884 3885
3885 nextents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t); 3886 nextents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
3886 if (nextents == 0) { 3887 if (nextents == 0) {
3887 *is_empty = 1; 3888 *is_empty = 1;
3888 return 0; 3889 return 0;
3889 } 3890 }
3890 3891
3891 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, nextents - 1), rec); 3892 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, nextents - 1), rec);
3892 *is_empty = 0; 3893 *is_empty = 0;
3893 return 0; 3894 return 0;
3894 } 3895 }
3895 3896
3896 /* 3897 /*
3897 * Check the last inode extent to determine whether this allocation will result 3898 * Check the last inode extent to determine whether this allocation will result
3898 * in blocks being allocated at the end of the file. When we allocate new data 3899 * in blocks being allocated at the end of the file. When we allocate new data
3899 * blocks at the end of the file which do not start at the previous data block, 3900 * blocks at the end of the file which do not start at the previous data block,
3900 * we will try to align the new blocks at stripe unit boundaries. 3901 * we will try to align the new blocks at stripe unit boundaries.
3901 * 3902 *
3902 * Returns 0 in bma->aeof if the file (fork) is empty as any new write will be 3903 * Returns 0 in bma->aeof if the file (fork) is empty as any new write will be
3903 * at, or past the EOF. 3904 * at, or past the EOF.
3904 */ 3905 */
3905 STATIC int 3906 STATIC int
3906 xfs_bmap_isaeof( 3907 xfs_bmap_isaeof(
3907 struct xfs_bmalloca *bma, 3908 struct xfs_bmalloca *bma,
3908 int whichfork) 3909 int whichfork)
3909 { 3910 {
3910 struct xfs_bmbt_irec rec; 3911 struct xfs_bmbt_irec rec;
3911 int is_empty; 3912 int is_empty;
3912 int error; 3913 int error;
3913 3914
3914 bma->aeof = 0; 3915 bma->aeof = 0;
3915 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec, 3916 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
3916 &is_empty); 3917 &is_empty);
3917 if (error || is_empty) 3918 if (error || is_empty)
3918 return error; 3919 return error;
3919 3920
3920 /* 3921 /*
3921 * Check if we are allocation or past the last extent, or at least into 3922 * Check if we are allocation or past the last extent, or at least into
3922 * the last delayed allocated extent. 3923 * the last delayed allocated extent.
3923 */ 3924 */
3924 bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount || 3925 bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount ||
3925 (bma->offset >= rec.br_startoff && 3926 (bma->offset >= rec.br_startoff &&
3926 isnullstartblock(rec.br_startblock)); 3927 isnullstartblock(rec.br_startblock));
3927 return 0; 3928 return 0;
3928 } 3929 }
3929 3930
3930 /* 3931 /*
3931 * Check if the endoff is outside the last extent. If so the caller will grow 3932 * Check if the endoff is outside the last extent. If so the caller will grow
3932 * the allocation to a stripe unit boundary. All offsets are considered outside 3933 * the allocation to a stripe unit boundary. All offsets are considered outside
3933 * the end of file for an empty fork, so 1 is returned in *eof in that case. 3934 * the end of file for an empty fork, so 1 is returned in *eof in that case.
3934 */ 3935 */
3935 int 3936 int
3936 xfs_bmap_eof( 3937 xfs_bmap_eof(
3937 struct xfs_inode *ip, 3938 struct xfs_inode *ip,
3938 xfs_fileoff_t endoff, 3939 xfs_fileoff_t endoff,
3939 int whichfork, 3940 int whichfork,
3940 int *eof) 3941 int *eof)
3941 { 3942 {
3942 struct xfs_bmbt_irec rec; 3943 struct xfs_bmbt_irec rec;
3943 int error; 3944 int error;
3944 3945
3945 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof); 3946 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof);
3946 if (error || *eof) 3947 if (error || *eof)
3947 return error; 3948 return error;
3948 3949
3949 *eof = endoff >= rec.br_startoff + rec.br_blockcount; 3950 *eof = endoff >= rec.br_startoff + rec.br_blockcount;
3950 return 0; 3951 return 0;
3951 } 3952 }
3952 3953
3953 /* 3954 /*
3954 * Returns the file-relative block number of the first block past eof in 3955 * Returns the file-relative block number of the first block past eof in
3955 * the file. This is not based on i_size, it is based on the extent records. 3956 * the file. This is not based on i_size, it is based on the extent records.
3956 * Returns 0 for local files, as they do not have extent records. 3957 * Returns 0 for local files, as they do not have extent records.
3957 */ 3958 */
3958 int 3959 int
3959 xfs_bmap_last_offset( 3960 xfs_bmap_last_offset(
3960 struct xfs_trans *tp, 3961 struct xfs_trans *tp,
3961 struct xfs_inode *ip, 3962 struct xfs_inode *ip,
3962 xfs_fileoff_t *last_block, 3963 xfs_fileoff_t *last_block,
3963 int whichfork) 3964 int whichfork)
3964 { 3965 {
3965 struct xfs_bmbt_irec rec; 3966 struct xfs_bmbt_irec rec;
3966 int is_empty; 3967 int is_empty;
3967 int error; 3968 int error;
3968 3969
3969 *last_block = 0; 3970 *last_block = 0;
3970 3971
3971 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) 3972 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL)
3972 return 0; 3973 return 0;
3973 3974
3974 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE && 3975 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
3975 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 3976 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
3976 return XFS_ERROR(EIO); 3977 return XFS_ERROR(EIO);
3977 3978
3978 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty); 3979 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty);
3979 if (error || is_empty) 3980 if (error || is_empty)
3980 return error; 3981 return error;
3981 3982
3982 *last_block = rec.br_startoff + rec.br_blockcount; 3983 *last_block = rec.br_startoff + rec.br_blockcount;
3983 return 0; 3984 return 0;
3984 } 3985 }
3985 3986
3986 /* 3987 /*
3987 * Returns whether the selected fork of the inode has exactly one 3988 * Returns whether the selected fork of the inode has exactly one
3988 * block or not. For the data fork we check this matches di_size, 3989 * block or not. For the data fork we check this matches di_size,
3989 * implying the file's range is 0..bsize-1. 3990 * implying the file's range is 0..bsize-1.
3990 */ 3991 */
3991 int /* 1=>1 block, 0=>otherwise */ 3992 int /* 1=>1 block, 0=>otherwise */
3992 xfs_bmap_one_block( 3993 xfs_bmap_one_block(
3993 xfs_inode_t *ip, /* incore inode */ 3994 xfs_inode_t *ip, /* incore inode */
3994 int whichfork) /* data or attr fork */ 3995 int whichfork) /* data or attr fork */
3995 { 3996 {
3996 xfs_bmbt_rec_host_t *ep; /* ptr to fork's extent */ 3997 xfs_bmbt_rec_host_t *ep; /* ptr to fork's extent */
3997 xfs_ifork_t *ifp; /* inode fork pointer */ 3998 xfs_ifork_t *ifp; /* inode fork pointer */
3998 int rval; /* return value */ 3999 int rval; /* return value */
3999 xfs_bmbt_irec_t s; /* internal version of extent */ 4000 xfs_bmbt_irec_t s; /* internal version of extent */
4000 4001
4001 #ifndef DEBUG 4002 #ifndef DEBUG
4002 if (whichfork == XFS_DATA_FORK) 4003 if (whichfork == XFS_DATA_FORK)
4003 return XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize; 4004 return XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize;
4004 #endif /* !DEBUG */ 4005 #endif /* !DEBUG */
4005 if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1) 4006 if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1)
4006 return 0; 4007 return 0;
4007 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 4008 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
4008 return 0; 4009 return 0;
4009 ifp = XFS_IFORK_PTR(ip, whichfork); 4010 ifp = XFS_IFORK_PTR(ip, whichfork);
4010 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 4011 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
4011 ep = xfs_iext_get_ext(ifp, 0); 4012 ep = xfs_iext_get_ext(ifp, 0);
4012 xfs_bmbt_get_all(ep, &s); 4013 xfs_bmbt_get_all(ep, &s);
4013 rval = s.br_startoff == 0 && s.br_blockcount == 1; 4014 rval = s.br_startoff == 0 && s.br_blockcount == 1;
4014 if (rval && whichfork == XFS_DATA_FORK) 4015 if (rval && whichfork == XFS_DATA_FORK)
4015 ASSERT(XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize); 4016 ASSERT(XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize);
4016 return rval; 4017 return rval;
4017 } 4018 }
4018 4019
4019 STATIC int 4020 STATIC int
4020 xfs_bmap_sanity_check( 4021 xfs_bmap_sanity_check(
4021 struct xfs_mount *mp, 4022 struct xfs_mount *mp,
4022 struct xfs_buf *bp, 4023 struct xfs_buf *bp,
4023 int level) 4024 int level)
4024 { 4025 {
4025 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); 4026 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
4026 4027
4027 if (block->bb_magic != cpu_to_be32(XFS_BMAP_MAGIC) || 4028 if (block->bb_magic != cpu_to_be32(XFS_BMAP_MAGIC) ||
4028 be16_to_cpu(block->bb_level) != level || 4029 be16_to_cpu(block->bb_level) != level ||
4029 be16_to_cpu(block->bb_numrecs) == 0 || 4030 be16_to_cpu(block->bb_numrecs) == 0 ||
4030 be16_to_cpu(block->bb_numrecs) > mp->m_bmap_dmxr[level != 0]) 4031 be16_to_cpu(block->bb_numrecs) > mp->m_bmap_dmxr[level != 0])
4031 return 0; 4032 return 0;
4032 return 1; 4033 return 1;
4033 } 4034 }
4034 4035
4035 /* 4036 /*
4036 * Read in the extents to if_extents. 4037 * Read in the extents to if_extents.
4037 * All inode fields are set up by caller, we just traverse the btree 4038 * All inode fields are set up by caller, we just traverse the btree
4038 * and copy the records in. If the file system cannot contain unwritten 4039 * and copy the records in. If the file system cannot contain unwritten
4039 * extents, the records are checked for no "state" flags. 4040 * extents, the records are checked for no "state" flags.
4040 */ 4041 */
4041 int /* error */ 4042 int /* error */
4042 xfs_bmap_read_extents( 4043 xfs_bmap_read_extents(
4043 xfs_trans_t *tp, /* transaction pointer */ 4044 xfs_trans_t *tp, /* transaction pointer */
4044 xfs_inode_t *ip, /* incore inode */ 4045 xfs_inode_t *ip, /* incore inode */
4045 int whichfork) /* data or attr fork */ 4046 int whichfork) /* data or attr fork */
4046 { 4047 {
4047 struct xfs_btree_block *block; /* current btree block */ 4048 struct xfs_btree_block *block; /* current btree block */
4048 xfs_fsblock_t bno; /* block # of "block" */ 4049 xfs_fsblock_t bno; /* block # of "block" */
4049 xfs_buf_t *bp; /* buffer for "block" */ 4050 xfs_buf_t *bp; /* buffer for "block" */
4050 int error; /* error return value */ 4051 int error; /* error return value */
4051 xfs_exntfmt_t exntf; /* XFS_EXTFMT_NOSTATE, if checking */ 4052 xfs_exntfmt_t exntf; /* XFS_EXTFMT_NOSTATE, if checking */
4052 xfs_extnum_t i, j; /* index into the extents list */ 4053 xfs_extnum_t i, j; /* index into the extents list */
4053 xfs_ifork_t *ifp; /* fork structure */ 4054 xfs_ifork_t *ifp; /* fork structure */
4054 int level; /* btree level, for checking */ 4055 int level; /* btree level, for checking */
4055 xfs_mount_t *mp; /* file system mount structure */ 4056 xfs_mount_t *mp; /* file system mount structure */
4056 __be64 *pp; /* pointer to block address */ 4057 __be64 *pp; /* pointer to block address */
4057 /* REFERENCED */ 4058 /* REFERENCED */
4058 xfs_extnum_t room; /* number of entries there's room for */ 4059 xfs_extnum_t room; /* number of entries there's room for */
4059 4060
4060 bno = NULLFSBLOCK; 4061 bno = NULLFSBLOCK;
4061 mp = ip->i_mount; 4062 mp = ip->i_mount;
4062 ifp = XFS_IFORK_PTR(ip, whichfork); 4063 ifp = XFS_IFORK_PTR(ip, whichfork);
4063 exntf = (whichfork != XFS_DATA_FORK) ? XFS_EXTFMT_NOSTATE : 4064 exntf = (whichfork != XFS_DATA_FORK) ? XFS_EXTFMT_NOSTATE :
4064 XFS_EXTFMT_INODE(ip); 4065 XFS_EXTFMT_INODE(ip);
4065 block = ifp->if_broot; 4066 block = ifp->if_broot;
4066 /* 4067 /*
4067 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. 4068 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
4068 */ 4069 */
4069 level = be16_to_cpu(block->bb_level); 4070 level = be16_to_cpu(block->bb_level);
4070 ASSERT(level > 0); 4071 ASSERT(level > 0);
4071 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); 4072 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
4072 bno = be64_to_cpu(*pp); 4073 bno = be64_to_cpu(*pp);
4073 ASSERT(bno != NULLDFSBNO); 4074 ASSERT(bno != NULLDFSBNO);
4074 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount); 4075 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
4075 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks); 4076 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
4076 /* 4077 /*
4077 * Go down the tree until leaf level is reached, following the first 4078 * Go down the tree until leaf level is reached, following the first
4078 * pointer (leftmost) at each level. 4079 * pointer (leftmost) at each level.
4079 */ 4080 */
4080 while (level-- > 0) { 4081 while (level-- > 0) {
4081 if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, 4082 if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
4082 XFS_BMAP_BTREE_REF))) 4083 XFS_BMAP_BTREE_REF)))
4083 return error; 4084 return error;
4084 block = XFS_BUF_TO_BLOCK(bp); 4085 block = XFS_BUF_TO_BLOCK(bp);
4085 XFS_WANT_CORRUPTED_GOTO( 4086 XFS_WANT_CORRUPTED_GOTO(
4086 xfs_bmap_sanity_check(mp, bp, level), 4087 xfs_bmap_sanity_check(mp, bp, level),
4087 error0); 4088 error0);
4088 if (level == 0) 4089 if (level == 0)
4089 break; 4090 break;
4090 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); 4091 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
4091 bno = be64_to_cpu(*pp); 4092 bno = be64_to_cpu(*pp);
4092 XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, bno), error0); 4093 XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, bno), error0);
4093 xfs_trans_brelse(tp, bp); 4094 xfs_trans_brelse(tp, bp);
4094 } 4095 }
4095 /* 4096 /*
4096 * Here with bp and block set to the leftmost leaf node in the tree. 4097 * Here with bp and block set to the leftmost leaf node in the tree.
4097 */ 4098 */
4098 room = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 4099 room = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4099 i = 0; 4100 i = 0;
4100 /* 4101 /*
4101 * Loop over all leaf nodes. Copy information to the extent records. 4102 * Loop over all leaf nodes. Copy information to the extent records.
4102 */ 4103 */
4103 for (;;) { 4104 for (;;) {
4104 xfs_bmbt_rec_t *frp; 4105 xfs_bmbt_rec_t *frp;
4105 xfs_fsblock_t nextbno; 4106 xfs_fsblock_t nextbno;
4106 xfs_extnum_t num_recs; 4107 xfs_extnum_t num_recs;
4107 xfs_extnum_t start; 4108 xfs_extnum_t start;
4108 4109
4109 num_recs = xfs_btree_get_numrecs(block); 4110 num_recs = xfs_btree_get_numrecs(block);
4110 if (unlikely(i + num_recs > room)) { 4111 if (unlikely(i + num_recs > room)) {
4111 ASSERT(i + num_recs <= room); 4112 ASSERT(i + num_recs <= room);
4112 xfs_warn(ip->i_mount, 4113 xfs_warn(ip->i_mount,
4113 "corrupt dinode %Lu, (btree extents).", 4114 "corrupt dinode %Lu, (btree extents).",
4114 (unsigned long long) ip->i_ino); 4115 (unsigned long long) ip->i_ino);
4115 XFS_CORRUPTION_ERROR("xfs_bmap_read_extents(1)", 4116 XFS_CORRUPTION_ERROR("xfs_bmap_read_extents(1)",
4116 XFS_ERRLEVEL_LOW, ip->i_mount, block); 4117 XFS_ERRLEVEL_LOW, ip->i_mount, block);
4117 goto error0; 4118 goto error0;
4118 } 4119 }
4119 XFS_WANT_CORRUPTED_GOTO( 4120 XFS_WANT_CORRUPTED_GOTO(
4120 xfs_bmap_sanity_check(mp, bp, 0), 4121 xfs_bmap_sanity_check(mp, bp, 0),
4121 error0); 4122 error0);
4122 /* 4123 /*
4123 * Read-ahead the next leaf block, if any. 4124 * Read-ahead the next leaf block, if any.
4124 */ 4125 */
4125 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); 4126 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
4126 if (nextbno != NULLFSBLOCK) 4127 if (nextbno != NULLFSBLOCK)
4127 xfs_btree_reada_bufl(mp, nextbno, 1); 4128 xfs_btree_reada_bufl(mp, nextbno, 1);
4128 /* 4129 /*
4129 * Copy records into the extent records. 4130 * Copy records into the extent records.
4130 */ 4131 */
4131 frp = XFS_BMBT_REC_ADDR(mp, block, 1); 4132 frp = XFS_BMBT_REC_ADDR(mp, block, 1);
4132 start = i; 4133 start = i;
4133 for (j = 0; j < num_recs; j++, i++, frp++) { 4134 for (j = 0; j < num_recs; j++, i++, frp++) {
4134 xfs_bmbt_rec_host_t *trp = xfs_iext_get_ext(ifp, i); 4135 xfs_bmbt_rec_host_t *trp = xfs_iext_get_ext(ifp, i);
4135 trp->l0 = be64_to_cpu(frp->l0); 4136 trp->l0 = be64_to_cpu(frp->l0);
4136 trp->l1 = be64_to_cpu(frp->l1); 4137 trp->l1 = be64_to_cpu(frp->l1);
4137 } 4138 }
4138 if (exntf == XFS_EXTFMT_NOSTATE) { 4139 if (exntf == XFS_EXTFMT_NOSTATE) {
4139 /* 4140 /*
4140 * Check all attribute bmap btree records and 4141 * Check all attribute bmap btree records and
4141 * any "older" data bmap btree records for a 4142 * any "older" data bmap btree records for a
4142 * set bit in the "extent flag" position. 4143 * set bit in the "extent flag" position.
4143 */ 4144 */
4144 if (unlikely(xfs_check_nostate_extents(ifp, 4145 if (unlikely(xfs_check_nostate_extents(ifp,
4145 start, num_recs))) { 4146 start, num_recs))) {
4146 XFS_ERROR_REPORT("xfs_bmap_read_extents(2)", 4147 XFS_ERROR_REPORT("xfs_bmap_read_extents(2)",
4147 XFS_ERRLEVEL_LOW, 4148 XFS_ERRLEVEL_LOW,
4148 ip->i_mount); 4149 ip->i_mount);
4149 goto error0; 4150 goto error0;
4150 } 4151 }
4151 } 4152 }
4152 xfs_trans_brelse(tp, bp); 4153 xfs_trans_brelse(tp, bp);
4153 bno = nextbno; 4154 bno = nextbno;
4154 /* 4155 /*
4155 * If we've reached the end, stop. 4156 * If we've reached the end, stop.
4156 */ 4157 */
4157 if (bno == NULLFSBLOCK) 4158 if (bno == NULLFSBLOCK)
4158 break; 4159 break;
4159 if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, 4160 if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
4160 XFS_BMAP_BTREE_REF))) 4161 XFS_BMAP_BTREE_REF)))
4161 return error; 4162 return error;
4162 block = XFS_BUF_TO_BLOCK(bp); 4163 block = XFS_BUF_TO_BLOCK(bp);
4163 } 4164 }
4164 ASSERT(i == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))); 4165 ASSERT(i == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)));
4165 ASSERT(i == XFS_IFORK_NEXTENTS(ip, whichfork)); 4166 ASSERT(i == XFS_IFORK_NEXTENTS(ip, whichfork));
4166 XFS_BMAP_TRACE_EXLIST(ip, i, whichfork); 4167 XFS_BMAP_TRACE_EXLIST(ip, i, whichfork);
4167 return 0; 4168 return 0;
4168 error0: 4169 error0:
4169 xfs_trans_brelse(tp, bp); 4170 xfs_trans_brelse(tp, bp);
4170 return XFS_ERROR(EFSCORRUPTED); 4171 return XFS_ERROR(EFSCORRUPTED);
4171 } 4172 }
4172 4173
4173 #ifdef DEBUG 4174 #ifdef DEBUG
4174 /* 4175 /*
4175 * Add bmap trace insert entries for all the contents of the extent records. 4176 * Add bmap trace insert entries for all the contents of the extent records.
4176 */ 4177 */
4177 void 4178 void
4178 xfs_bmap_trace_exlist( 4179 xfs_bmap_trace_exlist(
4179 xfs_inode_t *ip, /* incore inode pointer */ 4180 xfs_inode_t *ip, /* incore inode pointer */
4180 xfs_extnum_t cnt, /* count of entries in the list */ 4181 xfs_extnum_t cnt, /* count of entries in the list */
4181 int whichfork, /* data or attr fork */ 4182 int whichfork, /* data or attr fork */
4182 unsigned long caller_ip) 4183 unsigned long caller_ip)
4183 { 4184 {
4184 xfs_extnum_t idx; /* extent record index */ 4185 xfs_extnum_t idx; /* extent record index */
4185 xfs_ifork_t *ifp; /* inode fork pointer */ 4186 xfs_ifork_t *ifp; /* inode fork pointer */
4186 int state = 0; 4187 int state = 0;
4187 4188
4188 if (whichfork == XFS_ATTR_FORK) 4189 if (whichfork == XFS_ATTR_FORK)
4189 state |= BMAP_ATTRFORK; 4190 state |= BMAP_ATTRFORK;
4190 4191
4191 ifp = XFS_IFORK_PTR(ip, whichfork); 4192 ifp = XFS_IFORK_PTR(ip, whichfork);
4192 ASSERT(cnt == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))); 4193 ASSERT(cnt == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)));
4193 for (idx = 0; idx < cnt; idx++) 4194 for (idx = 0; idx < cnt; idx++)
4194 trace_xfs_extlist(ip, idx, whichfork, caller_ip); 4195 trace_xfs_extlist(ip, idx, whichfork, caller_ip);
4195 } 4196 }
4196 4197
4197 /* 4198 /*
4198 * Validate that the bmbt_irecs being returned from bmapi are valid 4199 * Validate that the bmbt_irecs being returned from bmapi are valid
4199 * given the callers original parameters. Specifically check the 4200 * given the callers original parameters. Specifically check the
4200 * ranges of the returned irecs to ensure that they only extent beyond 4201 * ranges of the returned irecs to ensure that they only extent beyond
4201 * the given parameters if the XFS_BMAPI_ENTIRE flag was set. 4202 * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
4202 */ 4203 */
4203 STATIC void 4204 STATIC void
4204 xfs_bmap_validate_ret( 4205 xfs_bmap_validate_ret(
4205 xfs_fileoff_t bno, 4206 xfs_fileoff_t bno,
4206 xfs_filblks_t len, 4207 xfs_filblks_t len,
4207 int flags, 4208 int flags,
4208 xfs_bmbt_irec_t *mval, 4209 xfs_bmbt_irec_t *mval,
4209 int nmap, 4210 int nmap,
4210 int ret_nmap) 4211 int ret_nmap)
4211 { 4212 {
4212 int i; /* index to map values */ 4213 int i; /* index to map values */
4213 4214
4214 ASSERT(ret_nmap <= nmap); 4215 ASSERT(ret_nmap <= nmap);
4215 4216
4216 for (i = 0; i < ret_nmap; i++) { 4217 for (i = 0; i < ret_nmap; i++) {
4217 ASSERT(mval[i].br_blockcount > 0); 4218 ASSERT(mval[i].br_blockcount > 0);
4218 if (!(flags & XFS_BMAPI_ENTIRE)) { 4219 if (!(flags & XFS_BMAPI_ENTIRE)) {
4219 ASSERT(mval[i].br_startoff >= bno); 4220 ASSERT(mval[i].br_startoff >= bno);
4220 ASSERT(mval[i].br_blockcount <= len); 4221 ASSERT(mval[i].br_blockcount <= len);
4221 ASSERT(mval[i].br_startoff + mval[i].br_blockcount <= 4222 ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
4222 bno + len); 4223 bno + len);
4223 } else { 4224 } else {
4224 ASSERT(mval[i].br_startoff < bno + len); 4225 ASSERT(mval[i].br_startoff < bno + len);
4225 ASSERT(mval[i].br_startoff + mval[i].br_blockcount > 4226 ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
4226 bno); 4227 bno);
4227 } 4228 }
4228 ASSERT(i == 0 || 4229 ASSERT(i == 0 ||
4229 mval[i - 1].br_startoff + mval[i - 1].br_blockcount == 4230 mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
4230 mval[i].br_startoff); 4231 mval[i].br_startoff);
4231 ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK && 4232 ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
4232 mval[i].br_startblock != HOLESTARTBLOCK); 4233 mval[i].br_startblock != HOLESTARTBLOCK);
4233 ASSERT(mval[i].br_state == XFS_EXT_NORM || 4234 ASSERT(mval[i].br_state == XFS_EXT_NORM ||
4234 mval[i].br_state == XFS_EXT_UNWRITTEN); 4235 mval[i].br_state == XFS_EXT_UNWRITTEN);
4235 } 4236 }
4236 } 4237 }
4237 #endif /* DEBUG */ 4238 #endif /* DEBUG */
4238 4239
4239 4240
4240 /* 4241 /*
4241 * Trim the returned map to the required bounds 4242 * Trim the returned map to the required bounds
4242 */ 4243 */
4243 STATIC void 4244 STATIC void
4244 xfs_bmapi_trim_map( 4245 xfs_bmapi_trim_map(
4245 struct xfs_bmbt_irec *mval, 4246 struct xfs_bmbt_irec *mval,
4246 struct xfs_bmbt_irec *got, 4247 struct xfs_bmbt_irec *got,
4247 xfs_fileoff_t *bno, 4248 xfs_fileoff_t *bno,
4248 xfs_filblks_t len, 4249 xfs_filblks_t len,
4249 xfs_fileoff_t obno, 4250 xfs_fileoff_t obno,
4250 xfs_fileoff_t end, 4251 xfs_fileoff_t end,
4251 int n, 4252 int n,
4252 int flags) 4253 int flags)
4253 { 4254 {
4254 if ((flags & XFS_BMAPI_ENTIRE) || 4255 if ((flags & XFS_BMAPI_ENTIRE) ||
4255 got->br_startoff + got->br_blockcount <= obno) { 4256 got->br_startoff + got->br_blockcount <= obno) {
4256 *mval = *got; 4257 *mval = *got;
4257 if (isnullstartblock(got->br_startblock)) 4258 if (isnullstartblock(got->br_startblock))
4258 mval->br_startblock = DELAYSTARTBLOCK; 4259 mval->br_startblock = DELAYSTARTBLOCK;
4259 return; 4260 return;
4260 } 4261 }
4261 4262
4262 if (obno > *bno) 4263 if (obno > *bno)
4263 *bno = obno; 4264 *bno = obno;
4264 ASSERT((*bno >= obno) || (n == 0)); 4265 ASSERT((*bno >= obno) || (n == 0));
4265 ASSERT(*bno < end); 4266 ASSERT(*bno < end);
4266 mval->br_startoff = *bno; 4267 mval->br_startoff = *bno;
4267 if (isnullstartblock(got->br_startblock)) 4268 if (isnullstartblock(got->br_startblock))
4268 mval->br_startblock = DELAYSTARTBLOCK; 4269 mval->br_startblock = DELAYSTARTBLOCK;
4269 else 4270 else
4270 mval->br_startblock = got->br_startblock + 4271 mval->br_startblock = got->br_startblock +
4271 (*bno - got->br_startoff); 4272 (*bno - got->br_startoff);
4272 /* 4273 /*
4273 * Return the minimum of what we got and what we asked for for 4274 * Return the minimum of what we got and what we asked for for
4274 * the length. We can use the len variable here because it is 4275 * the length. We can use the len variable here because it is
4275 * modified below and we could have been there before coming 4276 * modified below and we could have been there before coming
4276 * here if the first part of the allocation didn't overlap what 4277 * here if the first part of the allocation didn't overlap what
4277 * was asked for. 4278 * was asked for.
4278 */ 4279 */
4279 mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno, 4280 mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno,
4280 got->br_blockcount - (*bno - got->br_startoff)); 4281 got->br_blockcount - (*bno - got->br_startoff));
4281 mval->br_state = got->br_state; 4282 mval->br_state = got->br_state;
4282 ASSERT(mval->br_blockcount <= len); 4283 ASSERT(mval->br_blockcount <= len);
4283 return; 4284 return;
4284 } 4285 }
4285 4286
4286 /* 4287 /*
4287 * Update and validate the extent map to return 4288 * Update and validate the extent map to return
4288 */ 4289 */
4289 STATIC void 4290 STATIC void
4290 xfs_bmapi_update_map( 4291 xfs_bmapi_update_map(
4291 struct xfs_bmbt_irec **map, 4292 struct xfs_bmbt_irec **map,
4292 xfs_fileoff_t *bno, 4293 xfs_fileoff_t *bno,
4293 xfs_filblks_t *len, 4294 xfs_filblks_t *len,
4294 xfs_fileoff_t obno, 4295 xfs_fileoff_t obno,
4295 xfs_fileoff_t end, 4296 xfs_fileoff_t end,
4296 int *n, 4297 int *n,
4297 int flags) 4298 int flags)
4298 { 4299 {
4299 xfs_bmbt_irec_t *mval = *map; 4300 xfs_bmbt_irec_t *mval = *map;
4300 4301
4301 ASSERT((flags & XFS_BMAPI_ENTIRE) || 4302 ASSERT((flags & XFS_BMAPI_ENTIRE) ||
4302 ((mval->br_startoff + mval->br_blockcount) <= end)); 4303 ((mval->br_startoff + mval->br_blockcount) <= end));
4303 ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) || 4304 ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) ||
4304 (mval->br_startoff < obno)); 4305 (mval->br_startoff < obno));
4305 4306
4306 *bno = mval->br_startoff + mval->br_blockcount; 4307 *bno = mval->br_startoff + mval->br_blockcount;
4307 *len = end - *bno; 4308 *len = end - *bno;
4308 if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) { 4309 if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) {
4309 /* update previous map with new information */ 4310 /* update previous map with new information */
4310 ASSERT(mval->br_startblock == mval[-1].br_startblock); 4311 ASSERT(mval->br_startblock == mval[-1].br_startblock);
4311 ASSERT(mval->br_blockcount > mval[-1].br_blockcount); 4312 ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
4312 ASSERT(mval->br_state == mval[-1].br_state); 4313 ASSERT(mval->br_state == mval[-1].br_state);
4313 mval[-1].br_blockcount = mval->br_blockcount; 4314 mval[-1].br_blockcount = mval->br_blockcount;
4314 mval[-1].br_state = mval->br_state; 4315 mval[-1].br_state = mval->br_state;
4315 } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK && 4316 } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
4316 mval[-1].br_startblock != DELAYSTARTBLOCK && 4317 mval[-1].br_startblock != DELAYSTARTBLOCK &&
4317 mval[-1].br_startblock != HOLESTARTBLOCK && 4318 mval[-1].br_startblock != HOLESTARTBLOCK &&
4318 mval->br_startblock == mval[-1].br_startblock + 4319 mval->br_startblock == mval[-1].br_startblock +
4319 mval[-1].br_blockcount && 4320 mval[-1].br_blockcount &&
4320 ((flags & XFS_BMAPI_IGSTATE) || 4321 ((flags & XFS_BMAPI_IGSTATE) ||
4321 mval[-1].br_state == mval->br_state)) { 4322 mval[-1].br_state == mval->br_state)) {
4322 ASSERT(mval->br_startoff == 4323 ASSERT(mval->br_startoff ==
4323 mval[-1].br_startoff + mval[-1].br_blockcount); 4324 mval[-1].br_startoff + mval[-1].br_blockcount);
4324 mval[-1].br_blockcount += mval->br_blockcount; 4325 mval[-1].br_blockcount += mval->br_blockcount;
4325 } else if (*n > 0 && 4326 } else if (*n > 0 &&
4326 mval->br_startblock == DELAYSTARTBLOCK && 4327 mval->br_startblock == DELAYSTARTBLOCK &&
4327 mval[-1].br_startblock == DELAYSTARTBLOCK && 4328 mval[-1].br_startblock == DELAYSTARTBLOCK &&
4328 mval->br_startoff == 4329 mval->br_startoff ==
4329 mval[-1].br_startoff + mval[-1].br_blockcount) { 4330 mval[-1].br_startoff + mval[-1].br_blockcount) {
4330 mval[-1].br_blockcount += mval->br_blockcount; 4331 mval[-1].br_blockcount += mval->br_blockcount;
4331 mval[-1].br_state = mval->br_state; 4332 mval[-1].br_state = mval->br_state;
4332 } else if (!((*n == 0) && 4333 } else if (!((*n == 0) &&
4333 ((mval->br_startoff + mval->br_blockcount) <= 4334 ((mval->br_startoff + mval->br_blockcount) <=
4334 obno))) { 4335 obno))) {
4335 mval++; 4336 mval++;
4336 (*n)++; 4337 (*n)++;
4337 } 4338 }
4338 *map = mval; 4339 *map = mval;
4339 } 4340 }
4340 4341
4341 /* 4342 /*
4342 * Map file blocks to filesystem blocks without allocation. 4343 * Map file blocks to filesystem blocks without allocation.
4343 */ 4344 */
4344 int 4345 int
4345 xfs_bmapi_read( 4346 xfs_bmapi_read(
4346 struct xfs_inode *ip, 4347 struct xfs_inode *ip,
4347 xfs_fileoff_t bno, 4348 xfs_fileoff_t bno,
4348 xfs_filblks_t len, 4349 xfs_filblks_t len,
4349 struct xfs_bmbt_irec *mval, 4350 struct xfs_bmbt_irec *mval,
4350 int *nmap, 4351 int *nmap,
4351 int flags) 4352 int flags)
4352 { 4353 {
4353 struct xfs_mount *mp = ip->i_mount; 4354 struct xfs_mount *mp = ip->i_mount;
4354 struct xfs_ifork *ifp; 4355 struct xfs_ifork *ifp;
4355 struct xfs_bmbt_irec got; 4356 struct xfs_bmbt_irec got;
4356 struct xfs_bmbt_irec prev; 4357 struct xfs_bmbt_irec prev;
4357 xfs_fileoff_t obno; 4358 xfs_fileoff_t obno;
4358 xfs_fileoff_t end; 4359 xfs_fileoff_t end;
4359 xfs_extnum_t lastx; 4360 xfs_extnum_t lastx;
4360 int error; 4361 int error;
4361 int eof; 4362 int eof;
4362 int n = 0; 4363 int n = 0;
4363 int whichfork = (flags & XFS_BMAPI_ATTRFORK) ? 4364 int whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
4364 XFS_ATTR_FORK : XFS_DATA_FORK; 4365 XFS_ATTR_FORK : XFS_DATA_FORK;
4365 4366
4366 ASSERT(*nmap >= 1); 4367 ASSERT(*nmap >= 1);
4367 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE| 4368 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE|
4368 XFS_BMAPI_IGSTATE))); 4369 XFS_BMAPI_IGSTATE)));
4369 4370
4370 if (unlikely(XFS_TEST_ERROR( 4371 if (unlikely(XFS_TEST_ERROR(
4371 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 4372 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4372 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 4373 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
4373 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) { 4374 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
4374 XFS_ERROR_REPORT("xfs_bmapi_read", XFS_ERRLEVEL_LOW, mp); 4375 XFS_ERROR_REPORT("xfs_bmapi_read", XFS_ERRLEVEL_LOW, mp);
4375 return XFS_ERROR(EFSCORRUPTED); 4376 return XFS_ERROR(EFSCORRUPTED);
4376 } 4377 }
4377 4378
4378 if (XFS_FORCED_SHUTDOWN(mp)) 4379 if (XFS_FORCED_SHUTDOWN(mp))
4379 return XFS_ERROR(EIO); 4380 return XFS_ERROR(EIO);
4380 4381
4381 XFS_STATS_INC(xs_blk_mapr); 4382 XFS_STATS_INC(xs_blk_mapr);
4382 4383
4383 ifp = XFS_IFORK_PTR(ip, whichfork); 4384 ifp = XFS_IFORK_PTR(ip, whichfork);
4384 4385
4385 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 4386 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4386 error = xfs_iread_extents(NULL, ip, whichfork); 4387 error = xfs_iread_extents(NULL, ip, whichfork);
4387 if (error) 4388 if (error)
4388 return error; 4389 return error;
4389 } 4390 }
4390 4391
4391 xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got, &prev); 4392 xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got, &prev);
4392 end = bno + len; 4393 end = bno + len;
4393 obno = bno; 4394 obno = bno;
4394 4395
4395 while (bno < end && n < *nmap) { 4396 while (bno < end && n < *nmap) {
4396 /* Reading past eof, act as though there's a hole up to end. */ 4397 /* Reading past eof, act as though there's a hole up to end. */
4397 if (eof) 4398 if (eof)
4398 got.br_startoff = end; 4399 got.br_startoff = end;
4399 if (got.br_startoff > bno) { 4400 if (got.br_startoff > bno) {
4400 /* Reading in a hole. */ 4401 /* Reading in a hole. */
4401 mval->br_startoff = bno; 4402 mval->br_startoff = bno;
4402 mval->br_startblock = HOLESTARTBLOCK; 4403 mval->br_startblock = HOLESTARTBLOCK;
4403 mval->br_blockcount = 4404 mval->br_blockcount =
4404 XFS_FILBLKS_MIN(len, got.br_startoff - bno); 4405 XFS_FILBLKS_MIN(len, got.br_startoff - bno);
4405 mval->br_state = XFS_EXT_NORM; 4406 mval->br_state = XFS_EXT_NORM;
4406 bno += mval->br_blockcount; 4407 bno += mval->br_blockcount;
4407 len -= mval->br_blockcount; 4408 len -= mval->br_blockcount;
4408 mval++; 4409 mval++;
4409 n++; 4410 n++;
4410 continue; 4411 continue;
4411 } 4412 }
4412 4413
4413 /* set up the extent map to return. */ 4414 /* set up the extent map to return. */
4414 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags); 4415 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
4415 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags); 4416 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4416 4417
4417 /* If we're done, stop now. */ 4418 /* If we're done, stop now. */
4418 if (bno >= end || n >= *nmap) 4419 if (bno >= end || n >= *nmap)
4419 break; 4420 break;
4420 4421
4421 /* Else go on to the next record. */ 4422 /* Else go on to the next record. */
4422 if (++lastx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t)) 4423 if (++lastx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t))
4423 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx), &got); 4424 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx), &got);
4424 else 4425 else
4425 eof = 1; 4426 eof = 1;
4426 } 4427 }
4427 *nmap = n; 4428 *nmap = n;
4428 return 0; 4429 return 0;
4429 } 4430 }
4430 4431
4431 STATIC int 4432 STATIC int
4432 xfs_bmapi_reserve_delalloc( 4433 xfs_bmapi_reserve_delalloc(
4433 struct xfs_inode *ip, 4434 struct xfs_inode *ip,
4434 xfs_fileoff_t aoff, 4435 xfs_fileoff_t aoff,
4435 xfs_filblks_t len, 4436 xfs_filblks_t len,
4436 struct xfs_bmbt_irec *got, 4437 struct xfs_bmbt_irec *got,
4437 struct xfs_bmbt_irec *prev, 4438 struct xfs_bmbt_irec *prev,
4438 xfs_extnum_t *lastx, 4439 xfs_extnum_t *lastx,
4439 int eof) 4440 int eof)
4440 { 4441 {
4441 struct xfs_mount *mp = ip->i_mount; 4442 struct xfs_mount *mp = ip->i_mount;
4442 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); 4443 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
4443 xfs_extlen_t alen; 4444 xfs_extlen_t alen;
4444 xfs_extlen_t indlen; 4445 xfs_extlen_t indlen;
4445 char rt = XFS_IS_REALTIME_INODE(ip); 4446 char rt = XFS_IS_REALTIME_INODE(ip);
4446 xfs_extlen_t extsz; 4447 xfs_extlen_t extsz;
4447 int error; 4448 int error;
4448 4449
4449 alen = XFS_FILBLKS_MIN(len, MAXEXTLEN); 4450 alen = XFS_FILBLKS_MIN(len, MAXEXTLEN);
4450 if (!eof) 4451 if (!eof)
4451 alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff); 4452 alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);
4452 4453
4453 /* Figure out the extent size, adjust alen */ 4454 /* Figure out the extent size, adjust alen */
4454 extsz = xfs_get_extsz_hint(ip); 4455 extsz = xfs_get_extsz_hint(ip);
4455 if (extsz) { 4456 if (extsz) {
4456 /* 4457 /*
4457 * Make sure we don't exceed a single extent length when we 4458 * Make sure we don't exceed a single extent length when we
4458 * align the extent by reducing length we are going to 4459 * align the extent by reducing length we are going to
4459 * allocate by the maximum amount extent size aligment may 4460 * allocate by the maximum amount extent size aligment may
4460 * require. 4461 * require.
4461 */ 4462 */
4462 alen = XFS_FILBLKS_MIN(len, MAXEXTLEN - (2 * extsz - 1)); 4463 alen = XFS_FILBLKS_MIN(len, MAXEXTLEN - (2 * extsz - 1));
4463 error = xfs_bmap_extsize_align(mp, got, prev, extsz, rt, eof, 4464 error = xfs_bmap_extsize_align(mp, got, prev, extsz, rt, eof,
4464 1, 0, &aoff, &alen); 4465 1, 0, &aoff, &alen);
4465 ASSERT(!error); 4466 ASSERT(!error);
4466 } 4467 }
4467 4468
4468 if (rt) 4469 if (rt)
4469 extsz = alen / mp->m_sb.sb_rextsize; 4470 extsz = alen / mp->m_sb.sb_rextsize;
4470 4471
4471 /* 4472 /*
4472 * Make a transaction-less quota reservation for delayed allocation 4473 * Make a transaction-less quota reservation for delayed allocation
4473 * blocks. This number gets adjusted later. We return if we haven't 4474 * blocks. This number gets adjusted later. We return if we haven't
4474 * allocated blocks already inside this loop. 4475 * allocated blocks already inside this loop.
4475 */ 4476 */
4476 error = xfs_trans_reserve_quota_nblks(NULL, ip, (long)alen, 0, 4477 error = xfs_trans_reserve_quota_nblks(NULL, ip, (long)alen, 0,
4477 rt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS); 4478 rt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
4478 if (error) 4479 if (error)
4479 return error; 4480 return error;
4480 4481
4481 /* 4482 /*
4482 * Split changing sb for alen and indlen since they could be coming 4483 * Split changing sb for alen and indlen since they could be coming
4483 * from different places. 4484 * from different places.
4484 */ 4485 */
4485 indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen); 4486 indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen);
4486 ASSERT(indlen > 0); 4487 ASSERT(indlen > 0);
4487 4488
4488 if (rt) { 4489 if (rt) {
4489 error = xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS, 4490 error = xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS,
4490 -((int64_t)extsz), 0); 4491 -((int64_t)extsz), 0);
4491 } else { 4492 } else {
4492 error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, 4493 error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
4493 -((int64_t)alen), 0); 4494 -((int64_t)alen), 0);
4494 } 4495 }
4495 4496
4496 if (error) 4497 if (error)
4497 goto out_unreserve_quota; 4498 goto out_unreserve_quota;
4498 4499
4499 error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, 4500 error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
4500 -((int64_t)indlen), 0); 4501 -((int64_t)indlen), 0);
4501 if (error) 4502 if (error)
4502 goto out_unreserve_blocks; 4503 goto out_unreserve_blocks;
4503 4504
4504 4505
4505 ip->i_delayed_blks += alen; 4506 ip->i_delayed_blks += alen;
4506 4507
4507 got->br_startoff = aoff; 4508 got->br_startoff = aoff;
4508 got->br_startblock = nullstartblock(indlen); 4509 got->br_startblock = nullstartblock(indlen);
4509 got->br_blockcount = alen; 4510 got->br_blockcount = alen;
4510 got->br_state = XFS_EXT_NORM; 4511 got->br_state = XFS_EXT_NORM;
4511 xfs_bmap_add_extent_hole_delay(ip, lastx, got); 4512 xfs_bmap_add_extent_hole_delay(ip, lastx, got);
4512 4513
4513 /* 4514 /*
4514 * Update our extent pointer, given that xfs_bmap_add_extent_hole_delay 4515 * Update our extent pointer, given that xfs_bmap_add_extent_hole_delay
4515 * might have merged it into one of the neighbouring ones. 4516 * might have merged it into one of the neighbouring ones.
4516 */ 4517 */
4517 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *lastx), got); 4518 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *lastx), got);
4518 4519
4519 ASSERT(got->br_startoff <= aoff); 4520 ASSERT(got->br_startoff <= aoff);
4520 ASSERT(got->br_startoff + got->br_blockcount >= aoff + alen); 4521 ASSERT(got->br_startoff + got->br_blockcount >= aoff + alen);
4521 ASSERT(isnullstartblock(got->br_startblock)); 4522 ASSERT(isnullstartblock(got->br_startblock));
4522 ASSERT(got->br_state == XFS_EXT_NORM); 4523 ASSERT(got->br_state == XFS_EXT_NORM);
4523 return 0; 4524 return 0;
4524 4525
4525 out_unreserve_blocks: 4526 out_unreserve_blocks:
4526 if (rt) 4527 if (rt)
4527 xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS, extsz, 0); 4528 xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS, extsz, 0);
4528 else 4529 else
4529 xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, alen, 0); 4530 xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, alen, 0);
4530 out_unreserve_quota: 4531 out_unreserve_quota:
4531 if (XFS_IS_QUOTA_ON(mp)) 4532 if (XFS_IS_QUOTA_ON(mp))
4532 xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0, rt ? 4533 xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0, rt ?
4533 XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS); 4534 XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
4534 return error; 4535 return error;
4535 } 4536 }
4536 4537
4537 /* 4538 /*
4538 * Map file blocks to filesystem blocks, adding delayed allocations as needed. 4539 * Map file blocks to filesystem blocks, adding delayed allocations as needed.
4539 */ 4540 */
4540 int 4541 int
4541 xfs_bmapi_delay( 4542 xfs_bmapi_delay(
4542 struct xfs_inode *ip, /* incore inode */ 4543 struct xfs_inode *ip, /* incore inode */
4543 xfs_fileoff_t bno, /* starting file offs. mapped */ 4544 xfs_fileoff_t bno, /* starting file offs. mapped */
4544 xfs_filblks_t len, /* length to map in file */ 4545 xfs_filblks_t len, /* length to map in file */
4545 struct xfs_bmbt_irec *mval, /* output: map values */ 4546 struct xfs_bmbt_irec *mval, /* output: map values */
4546 int *nmap, /* i/o: mval size/count */ 4547 int *nmap, /* i/o: mval size/count */
4547 int flags) /* XFS_BMAPI_... */ 4548 int flags) /* XFS_BMAPI_... */
4548 { 4549 {
4549 struct xfs_mount *mp = ip->i_mount; 4550 struct xfs_mount *mp = ip->i_mount;
4550 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); 4551 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
4551 struct xfs_bmbt_irec got; /* current file extent record */ 4552 struct xfs_bmbt_irec got; /* current file extent record */
4552 struct xfs_bmbt_irec prev; /* previous file extent record */ 4553 struct xfs_bmbt_irec prev; /* previous file extent record */
4553 xfs_fileoff_t obno; /* old block number (offset) */ 4554 xfs_fileoff_t obno; /* old block number (offset) */
4554 xfs_fileoff_t end; /* end of mapped file region */ 4555 xfs_fileoff_t end; /* end of mapped file region */
4555 xfs_extnum_t lastx; /* last useful extent number */ 4556 xfs_extnum_t lastx; /* last useful extent number */
4556 int eof; /* we've hit the end of extents */ 4557 int eof; /* we've hit the end of extents */
4557 int n = 0; /* current extent index */ 4558 int n = 0; /* current extent index */
4558 int error = 0; 4559 int error = 0;
4559 4560
4560 ASSERT(*nmap >= 1); 4561 ASSERT(*nmap >= 1);
4561 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP); 4562 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
4562 ASSERT(!(flags & ~XFS_BMAPI_ENTIRE)); 4563 ASSERT(!(flags & ~XFS_BMAPI_ENTIRE));
4563 4564
4564 if (unlikely(XFS_TEST_ERROR( 4565 if (unlikely(XFS_TEST_ERROR(
4565 (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS && 4566 (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS &&
4566 XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE), 4567 XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE),
4567 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) { 4568 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
4568 XFS_ERROR_REPORT("xfs_bmapi_delay", XFS_ERRLEVEL_LOW, mp); 4569 XFS_ERROR_REPORT("xfs_bmapi_delay", XFS_ERRLEVEL_LOW, mp);
4569 return XFS_ERROR(EFSCORRUPTED); 4570 return XFS_ERROR(EFSCORRUPTED);
4570 } 4571 }
4571 4572
4572 if (XFS_FORCED_SHUTDOWN(mp)) 4573 if (XFS_FORCED_SHUTDOWN(mp))
4573 return XFS_ERROR(EIO); 4574 return XFS_ERROR(EIO);
4574 4575
4575 XFS_STATS_INC(xs_blk_mapw); 4576 XFS_STATS_INC(xs_blk_mapw);
4576 4577
4577 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 4578 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4578 error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK); 4579 error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
4579 if (error) 4580 if (error)
4580 return error; 4581 return error;
4581 } 4582 }
4582 4583
4583 xfs_bmap_search_extents(ip, bno, XFS_DATA_FORK, &eof, &lastx, &got, &prev); 4584 xfs_bmap_search_extents(ip, bno, XFS_DATA_FORK, &eof, &lastx, &got, &prev);
4584 end = bno + len; 4585 end = bno + len;
4585 obno = bno; 4586 obno = bno;
4586 4587
4587 while (bno < end && n < *nmap) { 4588 while (bno < end && n < *nmap) {
4588 if (eof || got.br_startoff > bno) { 4589 if (eof || got.br_startoff > bno) {
4589 error = xfs_bmapi_reserve_delalloc(ip, bno, len, &got, 4590 error = xfs_bmapi_reserve_delalloc(ip, bno, len, &got,
4590 &prev, &lastx, eof); 4591 &prev, &lastx, eof);
4591 if (error) { 4592 if (error) {
4592 if (n == 0) { 4593 if (n == 0) {
4593 *nmap = 0; 4594 *nmap = 0;
4594 return error; 4595 return error;
4595 } 4596 }
4596 break; 4597 break;
4597 } 4598 }
4598 } 4599 }
4599 4600
4600 /* set up the extent map to return. */ 4601 /* set up the extent map to return. */
4601 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags); 4602 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
4602 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags); 4603 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4603 4604
4604 /* If we're done, stop now. */ 4605 /* If we're done, stop now. */
4605 if (bno >= end || n >= *nmap) 4606 if (bno >= end || n >= *nmap)
4606 break; 4607 break;
4607 4608
4608 /* Else go on to the next record. */ 4609 /* Else go on to the next record. */
4609 prev = got; 4610 prev = got;
4610 if (++lastx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t)) 4611 if (++lastx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t))
4611 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx), &got); 4612 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx), &got);
4612 else 4613 else
4613 eof = 1; 4614 eof = 1;
4614 } 4615 }
4615 4616
4616 *nmap = n; 4617 *nmap = n;
4617 return 0; 4618 return 0;
4618 } 4619 }
4619 4620
4620 4621
4621 STATIC int 4622 STATIC int
4622 xfs_bmapi_allocate( 4623 xfs_bmapi_allocate(
4623 struct xfs_bmalloca *bma, 4624 struct xfs_bmalloca *bma,
4624 int flags) 4625 int flags)
4625 { 4626 {
4626 struct xfs_mount *mp = bma->ip->i_mount; 4627 struct xfs_mount *mp = bma->ip->i_mount;
4627 int whichfork = (flags & XFS_BMAPI_ATTRFORK) ? 4628 int whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
4628 XFS_ATTR_FORK : XFS_DATA_FORK; 4629 XFS_ATTR_FORK : XFS_DATA_FORK;
4629 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork); 4630 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4630 int tmp_logflags = 0; 4631 int tmp_logflags = 0;
4631 int error; 4632 int error;
4632 int rt; 4633 int rt;
4633 4634
4634 ASSERT(bma->length > 0); 4635 ASSERT(bma->length > 0);
4635 4636
4636 rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(bma->ip); 4637 rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(bma->ip);
4637 4638
4638 /* 4639 /*
4639 * For the wasdelay case, we could also just allocate the stuff asked 4640 * For the wasdelay case, we could also just allocate the stuff asked
4640 * for in this bmap call but that wouldn't be as good. 4641 * for in this bmap call but that wouldn't be as good.
4641 */ 4642 */
4642 if (bma->wasdel) { 4643 if (bma->wasdel) {
4643 bma->length = (xfs_extlen_t)bma->got.br_blockcount; 4644 bma->length = (xfs_extlen_t)bma->got.br_blockcount;
4644 bma->offset = bma->got.br_startoff; 4645 bma->offset = bma->got.br_startoff;
4645 if (bma->idx != NULLEXTNUM && bma->idx) { 4646 if (bma->idx != NULLEXTNUM && bma->idx) {
4646 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), 4647 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1),
4647 &bma->prev); 4648 &bma->prev);
4648 } 4649 }
4649 } else { 4650 } else {
4650 bma->length = XFS_FILBLKS_MIN(bma->length, MAXEXTLEN); 4651 bma->length = XFS_FILBLKS_MIN(bma->length, MAXEXTLEN);
4651 if (!bma->eof) 4652 if (!bma->eof)
4652 bma->length = XFS_FILBLKS_MIN(bma->length, 4653 bma->length = XFS_FILBLKS_MIN(bma->length,
4653 bma->got.br_startoff - bma->offset); 4654 bma->got.br_startoff - bma->offset);
4654 } 4655 }
4655 4656
4656 /* 4657 /*
4657 * Indicate if this is the first user data in the file, or just any 4658 * Indicate if this is the first user data in the file, or just any
4658 * user data. 4659 * user data.
4659 */ 4660 */
4660 if (!(flags & XFS_BMAPI_METADATA)) { 4661 if (!(flags & XFS_BMAPI_METADATA)) {
4661 bma->userdata = (bma->offset == 0) ? 4662 bma->userdata = (bma->offset == 0) ?
4662 XFS_ALLOC_INITIAL_USER_DATA : XFS_ALLOC_USERDATA; 4663 XFS_ALLOC_INITIAL_USER_DATA : XFS_ALLOC_USERDATA;
4663 } 4664 }
4664 4665
4665 bma->minlen = (flags & XFS_BMAPI_CONTIG) ? bma->length : 1; 4666 bma->minlen = (flags & XFS_BMAPI_CONTIG) ? bma->length : 1;
4666 4667
4667 /* 4668 /*
4668 * Only want to do the alignment at the eof if it is userdata and 4669 * Only want to do the alignment at the eof if it is userdata and
4669 * allocation length is larger than a stripe unit. 4670 * allocation length is larger than a stripe unit.
4670 */ 4671 */
4671 if (mp->m_dalign && bma->length >= mp->m_dalign && 4672 if (mp->m_dalign && bma->length >= mp->m_dalign &&
4672 !(flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) { 4673 !(flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) {
4673 error = xfs_bmap_isaeof(bma, whichfork); 4674 error = xfs_bmap_isaeof(bma, whichfork);
4674 if (error) 4675 if (error)
4675 return error; 4676 return error;
4676 } 4677 }
4678
4679 if (flags & XFS_BMAPI_STACK_SWITCH)
4680 bma->stack_switch = 1;
4677 4681
4678 error = xfs_bmap_alloc(bma); 4682 error = xfs_bmap_alloc(bma);
4679 if (error) 4683 if (error)
4680 return error; 4684 return error;
4681 4685
4682 if (bma->flist->xbf_low) 4686 if (bma->flist->xbf_low)
4683 bma->minleft = 0; 4687 bma->minleft = 0;
4684 if (bma->cur) 4688 if (bma->cur)
4685 bma->cur->bc_private.b.firstblock = *bma->firstblock; 4689 bma->cur->bc_private.b.firstblock = *bma->firstblock;
4686 if (bma->blkno == NULLFSBLOCK) 4690 if (bma->blkno == NULLFSBLOCK)
4687 return 0; 4691 return 0;
4688 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) { 4692 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
4689 bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork); 4693 bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
4690 bma->cur->bc_private.b.firstblock = *bma->firstblock; 4694 bma->cur->bc_private.b.firstblock = *bma->firstblock;
4691 bma->cur->bc_private.b.flist = bma->flist; 4695 bma->cur->bc_private.b.flist = bma->flist;
4692 } 4696 }
4693 /* 4697 /*
4694 * Bump the number of extents we've allocated 4698 * Bump the number of extents we've allocated
4695 * in this call. 4699 * in this call.
4696 */ 4700 */
4697 bma->nallocs++; 4701 bma->nallocs++;
4698 4702
4699 if (bma->cur) 4703 if (bma->cur)
4700 bma->cur->bc_private.b.flags = 4704 bma->cur->bc_private.b.flags =
4701 bma->wasdel ? XFS_BTCUR_BPRV_WASDEL : 0; 4705 bma->wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
4702 4706
4703 bma->got.br_startoff = bma->offset; 4707 bma->got.br_startoff = bma->offset;
4704 bma->got.br_startblock = bma->blkno; 4708 bma->got.br_startblock = bma->blkno;
4705 bma->got.br_blockcount = bma->length; 4709 bma->got.br_blockcount = bma->length;
4706 bma->got.br_state = XFS_EXT_NORM; 4710 bma->got.br_state = XFS_EXT_NORM;
4707 4711
4708 /* 4712 /*
4709 * A wasdelay extent has been initialized, so shouldn't be flagged 4713 * A wasdelay extent has been initialized, so shouldn't be flagged
4710 * as unwritten. 4714 * as unwritten.
4711 */ 4715 */
4712 if (!bma->wasdel && (flags & XFS_BMAPI_PREALLOC) && 4716 if (!bma->wasdel && (flags & XFS_BMAPI_PREALLOC) &&
4713 xfs_sb_version_hasextflgbit(&mp->m_sb)) 4717 xfs_sb_version_hasextflgbit(&mp->m_sb))
4714 bma->got.br_state = XFS_EXT_UNWRITTEN; 4718 bma->got.br_state = XFS_EXT_UNWRITTEN;
4715 4719
4716 if (bma->wasdel) 4720 if (bma->wasdel)
4717 error = xfs_bmap_add_extent_delay_real(bma); 4721 error = xfs_bmap_add_extent_delay_real(bma);
4718 else 4722 else
4719 error = xfs_bmap_add_extent_hole_real(bma, whichfork); 4723 error = xfs_bmap_add_extent_hole_real(bma, whichfork);
4720 4724
4721 bma->logflags |= tmp_logflags; 4725 bma->logflags |= tmp_logflags;
4722 if (error) 4726 if (error)
4723 return error; 4727 return error;
4724 4728
4725 /* 4729 /*
4726 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real 4730 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real
4727 * or xfs_bmap_add_extent_hole_real might have merged it into one of 4731 * or xfs_bmap_add_extent_hole_real might have merged it into one of
4728 * the neighbouring ones. 4732 * the neighbouring ones.
4729 */ 4733 */
4730 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got); 4734 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got);
4731 4735
4732 ASSERT(bma->got.br_startoff <= bma->offset); 4736 ASSERT(bma->got.br_startoff <= bma->offset);
4733 ASSERT(bma->got.br_startoff + bma->got.br_blockcount >= 4737 ASSERT(bma->got.br_startoff + bma->got.br_blockcount >=
4734 bma->offset + bma->length); 4738 bma->offset + bma->length);
4735 ASSERT(bma->got.br_state == XFS_EXT_NORM || 4739 ASSERT(bma->got.br_state == XFS_EXT_NORM ||
4736 bma->got.br_state == XFS_EXT_UNWRITTEN); 4740 bma->got.br_state == XFS_EXT_UNWRITTEN);
4737 return 0; 4741 return 0;
4738 } 4742 }
4739 4743
4740 STATIC int 4744 STATIC int
4741 xfs_bmapi_convert_unwritten( 4745 xfs_bmapi_convert_unwritten(
4742 struct xfs_bmalloca *bma, 4746 struct xfs_bmalloca *bma,
4743 struct xfs_bmbt_irec *mval, 4747 struct xfs_bmbt_irec *mval,
4744 xfs_filblks_t len, 4748 xfs_filblks_t len,
4745 int flags) 4749 int flags)
4746 { 4750 {
4747 int whichfork = (flags & XFS_BMAPI_ATTRFORK) ? 4751 int whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
4748 XFS_ATTR_FORK : XFS_DATA_FORK; 4752 XFS_ATTR_FORK : XFS_DATA_FORK;
4749 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork); 4753 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4750 int tmp_logflags = 0; 4754 int tmp_logflags = 0;
4751 int error; 4755 int error;
4752 4756
4753 /* check if we need to do unwritten->real conversion */ 4757 /* check if we need to do unwritten->real conversion */
4754 if (mval->br_state == XFS_EXT_UNWRITTEN && 4758 if (mval->br_state == XFS_EXT_UNWRITTEN &&
4755 (flags & XFS_BMAPI_PREALLOC)) 4759 (flags & XFS_BMAPI_PREALLOC))
4756 return 0; 4760 return 0;
4757 4761
4758 /* check if we need to do real->unwritten conversion */ 4762 /* check if we need to do real->unwritten conversion */
4759 if (mval->br_state == XFS_EXT_NORM && 4763 if (mval->br_state == XFS_EXT_NORM &&
4760 (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) != 4764 (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) !=
4761 (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) 4765 (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT))
4762 return 0; 4766 return 0;
4763 4767
4764 /* 4768 /*
4765 * Modify (by adding) the state flag, if writing. 4769 * Modify (by adding) the state flag, if writing.
4766 */ 4770 */
4767 ASSERT(mval->br_blockcount <= len); 4771 ASSERT(mval->br_blockcount <= len);
4768 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) { 4772 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
4769 bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp, 4773 bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp,
4770 bma->ip, whichfork); 4774 bma->ip, whichfork);
4771 bma->cur->bc_private.b.firstblock = *bma->firstblock; 4775 bma->cur->bc_private.b.firstblock = *bma->firstblock;
4772 bma->cur->bc_private.b.flist = bma->flist; 4776 bma->cur->bc_private.b.flist = bma->flist;
4773 } 4777 }
4774 mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN) 4778 mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
4775 ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN; 4779 ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
4776 4780
4777 error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, &bma->idx, 4781 error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, &bma->idx,
4778 &bma->cur, mval, bma->firstblock, bma->flist, 4782 &bma->cur, mval, bma->firstblock, bma->flist,
4779 &tmp_logflags); 4783 &tmp_logflags);
4780 bma->logflags |= tmp_logflags; 4784 bma->logflags |= tmp_logflags;
4781 if (error) 4785 if (error)
4782 return error; 4786 return error;
4783 4787
4784 /* 4788 /*
4785 * Update our extent pointer, given that 4789 * Update our extent pointer, given that
4786 * xfs_bmap_add_extent_unwritten_real might have merged it into one 4790 * xfs_bmap_add_extent_unwritten_real might have merged it into one
4787 * of the neighbouring ones. 4791 * of the neighbouring ones.
4788 */ 4792 */
4789 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got); 4793 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got);
4790 4794
4791 /* 4795 /*
4792 * We may have combined previously unwritten space with written space, 4796 * We may have combined previously unwritten space with written space,
4793 * so generate another request. 4797 * so generate another request.
4794 */ 4798 */
4795 if (mval->br_blockcount < len) 4799 if (mval->br_blockcount < len)
4796 return EAGAIN; 4800 return EAGAIN;
4797 return 0; 4801 return 0;
4798 } 4802 }
4799 4803
4800 /* 4804 /*
4801 * Map file blocks to filesystem blocks, and allocate blocks or convert the 4805 * Map file blocks to filesystem blocks, and allocate blocks or convert the
4802 * extent state if necessary. Details behaviour is controlled by the flags 4806 * extent state if necessary. Details behaviour is controlled by the flags
4803 * parameter. Only allocates blocks from a single allocation group, to avoid 4807 * parameter. Only allocates blocks from a single allocation group, to avoid
4804 * locking problems. 4808 * locking problems.
4805 * 4809 *
4806 * The returned value in "firstblock" from the first call in a transaction 4810 * The returned value in "firstblock" from the first call in a transaction
4807 * must be remembered and presented to subsequent calls in "firstblock". 4811 * must be remembered and presented to subsequent calls in "firstblock".
4808 * An upper bound for the number of blocks to be allocated is supplied to 4812 * An upper bound for the number of blocks to be allocated is supplied to
4809 * the first call in "total"; if no allocation group has that many free 4813 * the first call in "total"; if no allocation group has that many free
4810 * blocks then the call will fail (return NULLFSBLOCK in "firstblock"). 4814 * blocks then the call will fail (return NULLFSBLOCK in "firstblock").
4811 */ 4815 */
4812 int 4816 int
4813 xfs_bmapi_write( 4817 xfs_bmapi_write(
4814 struct xfs_trans *tp, /* transaction pointer */ 4818 struct xfs_trans *tp, /* transaction pointer */
4815 struct xfs_inode *ip, /* incore inode */ 4819 struct xfs_inode *ip, /* incore inode */
4816 xfs_fileoff_t bno, /* starting file offs. mapped */ 4820 xfs_fileoff_t bno, /* starting file offs. mapped */
4817 xfs_filblks_t len, /* length to map in file */ 4821 xfs_filblks_t len, /* length to map in file */
4818 int flags, /* XFS_BMAPI_... */ 4822 int flags, /* XFS_BMAPI_... */
4819 xfs_fsblock_t *firstblock, /* first allocated block 4823 xfs_fsblock_t *firstblock, /* first allocated block
4820 controls a.g. for allocs */ 4824 controls a.g. for allocs */
4821 xfs_extlen_t total, /* total blocks needed */ 4825 xfs_extlen_t total, /* total blocks needed */
4822 struct xfs_bmbt_irec *mval, /* output: map values */ 4826 struct xfs_bmbt_irec *mval, /* output: map values */
4823 int *nmap, /* i/o: mval size/count */ 4827 int *nmap, /* i/o: mval size/count */
4824 struct xfs_bmap_free *flist) /* i/o: list extents to free */ 4828 struct xfs_bmap_free *flist) /* i/o: list extents to free */
4825 { 4829 {
4826 struct xfs_mount *mp = ip->i_mount; 4830 struct xfs_mount *mp = ip->i_mount;
4827 struct xfs_ifork *ifp; 4831 struct xfs_ifork *ifp;
4828 struct xfs_bmalloca bma = { 0 }; /* args for xfs_bmap_alloc */ 4832 struct xfs_bmalloca bma = { 0 }; /* args for xfs_bmap_alloc */
4829 xfs_fileoff_t end; /* end of mapped file region */ 4833 xfs_fileoff_t end; /* end of mapped file region */
4830 int eof; /* after the end of extents */ 4834 int eof; /* after the end of extents */
4831 int error; /* error return */ 4835 int error; /* error return */
4832 int n; /* current extent index */ 4836 int n; /* current extent index */
4833 xfs_fileoff_t obno; /* old block number (offset) */ 4837 xfs_fileoff_t obno; /* old block number (offset) */
4834 int whichfork; /* data or attr fork */ 4838 int whichfork; /* data or attr fork */
4835 char inhole; /* current location is hole in file */ 4839 char inhole; /* current location is hole in file */
4836 char wasdelay; /* old extent was delayed */ 4840 char wasdelay; /* old extent was delayed */
4837 4841
4838 #ifdef DEBUG 4842 #ifdef DEBUG
4839 xfs_fileoff_t orig_bno; /* original block number value */ 4843 xfs_fileoff_t orig_bno; /* original block number value */
4840 int orig_flags; /* original flags arg value */ 4844 int orig_flags; /* original flags arg value */
4841 xfs_filblks_t orig_len; /* original value of len arg */ 4845 xfs_filblks_t orig_len; /* original value of len arg */
4842 struct xfs_bmbt_irec *orig_mval; /* original value of mval */ 4846 struct xfs_bmbt_irec *orig_mval; /* original value of mval */
4843 int orig_nmap; /* original value of *nmap */ 4847 int orig_nmap; /* original value of *nmap */
4844 4848
4845 orig_bno = bno; 4849 orig_bno = bno;
4846 orig_len = len; 4850 orig_len = len;
4847 orig_flags = flags; 4851 orig_flags = flags;
4848 orig_mval = mval; 4852 orig_mval = mval;
4849 orig_nmap = *nmap; 4853 orig_nmap = *nmap;
4850 #endif 4854 #endif
4851 4855
4852 ASSERT(*nmap >= 1); 4856 ASSERT(*nmap >= 1);
4853 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP); 4857 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
4854 ASSERT(!(flags & XFS_BMAPI_IGSTATE)); 4858 ASSERT(!(flags & XFS_BMAPI_IGSTATE));
4855 ASSERT(tp != NULL); 4859 ASSERT(tp != NULL);
4856 ASSERT(len > 0); 4860 ASSERT(len > 0);
4857 4861
4858 whichfork = (flags & XFS_BMAPI_ATTRFORK) ? 4862 whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
4859 XFS_ATTR_FORK : XFS_DATA_FORK; 4863 XFS_ATTR_FORK : XFS_DATA_FORK;
4860 4864
4861 if (unlikely(XFS_TEST_ERROR( 4865 if (unlikely(XFS_TEST_ERROR(
4862 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 4866 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4863 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE && 4867 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
4864 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL), 4868 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL),
4865 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) { 4869 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
4866 XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW, mp); 4870 XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW, mp);
4867 return XFS_ERROR(EFSCORRUPTED); 4871 return XFS_ERROR(EFSCORRUPTED);
4868 } 4872 }
4869 4873
4870 if (XFS_FORCED_SHUTDOWN(mp)) 4874 if (XFS_FORCED_SHUTDOWN(mp))
4871 return XFS_ERROR(EIO); 4875 return XFS_ERROR(EIO);
4872 4876
4873 ifp = XFS_IFORK_PTR(ip, whichfork); 4877 ifp = XFS_IFORK_PTR(ip, whichfork);
4874 4878
4875 XFS_STATS_INC(xs_blk_mapw); 4879 XFS_STATS_INC(xs_blk_mapw);
4876 4880
4877 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { 4881 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
4878 error = xfs_bmap_local_to_extents(tp, ip, firstblock, total, 4882 error = xfs_bmap_local_to_extents(tp, ip, firstblock, total,
4879 &bma.logflags, whichfork); 4883 &bma.logflags, whichfork);
4880 if (error) 4884 if (error)
4881 goto error0; 4885 goto error0;
4882 } 4886 }
4883 4887
4884 if (*firstblock == NULLFSBLOCK) { 4888 if (*firstblock == NULLFSBLOCK) {
4885 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE) 4889 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE)
4886 bma.minleft = be16_to_cpu(ifp->if_broot->bb_level) + 1; 4890 bma.minleft = be16_to_cpu(ifp->if_broot->bb_level) + 1;
4887 else 4891 else
4888 bma.minleft = 1; 4892 bma.minleft = 1;
4889 } else { 4893 } else {
4890 bma.minleft = 0; 4894 bma.minleft = 0;
4891 } 4895 }
4892 4896
4893 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 4897 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4894 error = xfs_iread_extents(tp, ip, whichfork); 4898 error = xfs_iread_extents(tp, ip, whichfork);
4895 if (error) 4899 if (error)
4896 goto error0; 4900 goto error0;
4897 } 4901 }
4898 4902
4899 xfs_bmap_search_extents(ip, bno, whichfork, &eof, &bma.idx, &bma.got, 4903 xfs_bmap_search_extents(ip, bno, whichfork, &eof, &bma.idx, &bma.got,
4900 &bma.prev); 4904 &bma.prev);
4901 n = 0; 4905 n = 0;
4902 end = bno + len; 4906 end = bno + len;
4903 obno = bno; 4907 obno = bno;
4904 4908
4905 bma.tp = tp; 4909 bma.tp = tp;
4906 bma.ip = ip; 4910 bma.ip = ip;
4907 bma.total = total; 4911 bma.total = total;
4908 bma.userdata = 0; 4912 bma.userdata = 0;
4909 bma.flist = flist; 4913 bma.flist = flist;
4910 bma.firstblock = firstblock; 4914 bma.firstblock = firstblock;
4911 4915
4912 while (bno < end && n < *nmap) { 4916 while (bno < end && n < *nmap) {
4913 inhole = eof || bma.got.br_startoff > bno; 4917 inhole = eof || bma.got.br_startoff > bno;
4914 wasdelay = !inhole && isnullstartblock(bma.got.br_startblock); 4918 wasdelay = !inhole && isnullstartblock(bma.got.br_startblock);
4915 4919
4916 /* 4920 /*
4917 * First, deal with the hole before the allocated space 4921 * First, deal with the hole before the allocated space
4918 * that we found, if any. 4922 * that we found, if any.
4919 */ 4923 */
4920 if (inhole || wasdelay) { 4924 if (inhole || wasdelay) {
4921 bma.eof = eof; 4925 bma.eof = eof;
4922 bma.conv = !!(flags & XFS_BMAPI_CONVERT); 4926 bma.conv = !!(flags & XFS_BMAPI_CONVERT);
4923 bma.wasdel = wasdelay; 4927 bma.wasdel = wasdelay;
4924 bma.offset = bno; 4928 bma.offset = bno;
4925 4929
4926 /* 4930 /*
4927 * There's a 32/64 bit type mismatch between the 4931 * There's a 32/64 bit type mismatch between the
4928 * allocation length request (which can be 64 bits in 4932 * allocation length request (which can be 64 bits in
4929 * length) and the bma length request, which is 4933 * length) and the bma length request, which is
4930 * xfs_extlen_t and therefore 32 bits. Hence we have to 4934 * xfs_extlen_t and therefore 32 bits. Hence we have to
4931 * check for 32-bit overflows and handle them here. 4935 * check for 32-bit overflows and handle them here.
4932 */ 4936 */
4933 if (len > (xfs_filblks_t)MAXEXTLEN) 4937 if (len > (xfs_filblks_t)MAXEXTLEN)
4934 bma.length = MAXEXTLEN; 4938 bma.length = MAXEXTLEN;
4935 else 4939 else
4936 bma.length = len; 4940 bma.length = len;
4937 4941
4938 ASSERT(len > 0); 4942 ASSERT(len > 0);
4939 ASSERT(bma.length > 0); 4943 ASSERT(bma.length > 0);
4940 error = xfs_bmapi_allocate(&bma, flags); 4944 error = xfs_bmapi_allocate(&bma, flags);
4941 if (error) 4945 if (error)
4942 goto error0; 4946 goto error0;
4943 if (bma.blkno == NULLFSBLOCK) 4947 if (bma.blkno == NULLFSBLOCK)
4944 break; 4948 break;
4945 } 4949 }
4946 4950
4947 /* Deal with the allocated space we found. */ 4951 /* Deal with the allocated space we found. */
4948 xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno, 4952 xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno,
4949 end, n, flags); 4953 end, n, flags);
4950 4954
4951 /* Execute unwritten extent conversion if necessary */ 4955 /* Execute unwritten extent conversion if necessary */
4952 error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags); 4956 error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags);
4953 if (error == EAGAIN) 4957 if (error == EAGAIN)
4954 continue; 4958 continue;
4955 if (error) 4959 if (error)
4956 goto error0; 4960 goto error0;
4957 4961
4958 /* update the extent map to return */ 4962 /* update the extent map to return */
4959 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags); 4963 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4960 4964
4961 /* 4965 /*
4962 * If we're done, stop now. Stop when we've allocated 4966 * If we're done, stop now. Stop when we've allocated
4963 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise 4967 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise
4964 * the transaction may get too big. 4968 * the transaction may get too big.
4965 */ 4969 */
4966 if (bno >= end || n >= *nmap || bma.nallocs >= *nmap) 4970 if (bno >= end || n >= *nmap || bma.nallocs >= *nmap)
4967 break; 4971 break;
4968 4972
4969 /* Else go on to the next record. */ 4973 /* Else go on to the next record. */
4970 bma.prev = bma.got; 4974 bma.prev = bma.got;
4971 if (++bma.idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t)) { 4975 if (++bma.idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t)) {
4972 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma.idx), 4976 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma.idx),
4973 &bma.got); 4977 &bma.got);
4974 } else 4978 } else
4975 eof = 1; 4979 eof = 1;
4976 } 4980 }
4977 *nmap = n; 4981 *nmap = n;
4978 4982
4979 /* 4983 /*
4980 * Transform from btree to extents, give it cur. 4984 * Transform from btree to extents, give it cur.
4981 */ 4985 */
4982 if (xfs_bmap_wants_extents(ip, whichfork)) { 4986 if (xfs_bmap_wants_extents(ip, whichfork)) {
4983 int tmp_logflags = 0; 4987 int tmp_logflags = 0;
4984 4988
4985 ASSERT(bma.cur); 4989 ASSERT(bma.cur);
4986 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, 4990 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur,
4987 &tmp_logflags, whichfork); 4991 &tmp_logflags, whichfork);
4988 bma.logflags |= tmp_logflags; 4992 bma.logflags |= tmp_logflags;
4989 if (error) 4993 if (error)
4990 goto error0; 4994 goto error0;
4991 } 4995 }
4992 4996
4993 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE || 4997 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE ||
4994 XFS_IFORK_NEXTENTS(ip, whichfork) > 4998 XFS_IFORK_NEXTENTS(ip, whichfork) >
4995 XFS_IFORK_MAXEXT(ip, whichfork)); 4999 XFS_IFORK_MAXEXT(ip, whichfork));
4996 error = 0; 5000 error = 0;
4997 error0: 5001 error0:
4998 /* 5002 /*
4999 * Log everything. Do this after conversion, there's no point in 5003 * Log everything. Do this after conversion, there's no point in
5000 * logging the extent records if we've converted to btree format. 5004 * logging the extent records if we've converted to btree format.
5001 */ 5005 */
5002 if ((bma.logflags & xfs_ilog_fext(whichfork)) && 5006 if ((bma.logflags & xfs_ilog_fext(whichfork)) &&
5003 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 5007 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
5004 bma.logflags &= ~xfs_ilog_fext(whichfork); 5008 bma.logflags &= ~xfs_ilog_fext(whichfork);
5005 else if ((bma.logflags & xfs_ilog_fbroot(whichfork)) && 5009 else if ((bma.logflags & xfs_ilog_fbroot(whichfork)) &&
5006 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) 5010 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
5007 bma.logflags &= ~xfs_ilog_fbroot(whichfork); 5011 bma.logflags &= ~xfs_ilog_fbroot(whichfork);
5008 /* 5012 /*
5009 * Log whatever the flags say, even if error. Otherwise we might miss 5013 * Log whatever the flags say, even if error. Otherwise we might miss
5010 * detecting a case where the data is changed, there's an error, 5014 * detecting a case where the data is changed, there's an error,
5011 * and it's not logged so we don't shutdown when we should. 5015 * and it's not logged so we don't shutdown when we should.
5012 */ 5016 */
5013 if (bma.logflags) 5017 if (bma.logflags)
5014 xfs_trans_log_inode(tp, ip, bma.logflags); 5018 xfs_trans_log_inode(tp, ip, bma.logflags);
5015 5019
5016 if (bma.cur) { 5020 if (bma.cur) {
5017 if (!error) { 5021 if (!error) {
5018 ASSERT(*firstblock == NULLFSBLOCK || 5022 ASSERT(*firstblock == NULLFSBLOCK ||
5019 XFS_FSB_TO_AGNO(mp, *firstblock) == 5023 XFS_FSB_TO_AGNO(mp, *firstblock) ==
5020 XFS_FSB_TO_AGNO(mp, 5024 XFS_FSB_TO_AGNO(mp,
5021 bma.cur->bc_private.b.firstblock) || 5025 bma.cur->bc_private.b.firstblock) ||
5022 (flist->xbf_low && 5026 (flist->xbf_low &&
5023 XFS_FSB_TO_AGNO(mp, *firstblock) < 5027 XFS_FSB_TO_AGNO(mp, *firstblock) <
5024 XFS_FSB_TO_AGNO(mp, 5028 XFS_FSB_TO_AGNO(mp,
5025 bma.cur->bc_private.b.firstblock))); 5029 bma.cur->bc_private.b.firstblock)));
5026 *firstblock = bma.cur->bc_private.b.firstblock; 5030 *firstblock = bma.cur->bc_private.b.firstblock;
5027 } 5031 }
5028 xfs_btree_del_cursor(bma.cur, 5032 xfs_btree_del_cursor(bma.cur,
5029 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); 5033 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5030 } 5034 }
5031 if (!error) 5035 if (!error)
5032 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval, 5036 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
5033 orig_nmap, *nmap); 5037 orig_nmap, *nmap);
5034 return error; 5038 return error;
5035 } 5039 }
5036 5040
5037 /* 5041 /*
5038 * Unmap (remove) blocks from a file. 5042 * Unmap (remove) blocks from a file.
5039 * If nexts is nonzero then the number of extents to remove is limited to 5043 * If nexts is nonzero then the number of extents to remove is limited to
5040 * that value. If not all extents in the block range can be removed then 5044 * that value. If not all extents in the block range can be removed then
5041 * *done is set. 5045 * *done is set.
5042 */ 5046 */
5043 int /* error */ 5047 int /* error */
5044 xfs_bunmapi( 5048 xfs_bunmapi(
5045 xfs_trans_t *tp, /* transaction pointer */ 5049 xfs_trans_t *tp, /* transaction pointer */
5046 struct xfs_inode *ip, /* incore inode */ 5050 struct xfs_inode *ip, /* incore inode */
5047 xfs_fileoff_t bno, /* starting offset to unmap */ 5051 xfs_fileoff_t bno, /* starting offset to unmap */
5048 xfs_filblks_t len, /* length to unmap in file */ 5052 xfs_filblks_t len, /* length to unmap in file */
5049 int flags, /* misc flags */ 5053 int flags, /* misc flags */
5050 xfs_extnum_t nexts, /* number of extents max */ 5054 xfs_extnum_t nexts, /* number of extents max */
5051 xfs_fsblock_t *firstblock, /* first allocated block 5055 xfs_fsblock_t *firstblock, /* first allocated block
5052 controls a.g. for allocs */ 5056 controls a.g. for allocs */
5053 xfs_bmap_free_t *flist, /* i/o: list extents to free */ 5057 xfs_bmap_free_t *flist, /* i/o: list extents to free */
5054 int *done) /* set if not done yet */ 5058 int *done) /* set if not done yet */
5055 { 5059 {
5056 xfs_btree_cur_t *cur; /* bmap btree cursor */ 5060 xfs_btree_cur_t *cur; /* bmap btree cursor */
5057 xfs_bmbt_irec_t del; /* extent being deleted */ 5061 xfs_bmbt_irec_t del; /* extent being deleted */
5058 int eof; /* is deleting at eof */ 5062 int eof; /* is deleting at eof */
5059 xfs_bmbt_rec_host_t *ep; /* extent record pointer */ 5063 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
5060 int error; /* error return value */ 5064 int error; /* error return value */
5061 xfs_extnum_t extno; /* extent number in list */ 5065 xfs_extnum_t extno; /* extent number in list */
5062 xfs_bmbt_irec_t got; /* current extent record */ 5066 xfs_bmbt_irec_t got; /* current extent record */
5063 xfs_ifork_t *ifp; /* inode fork pointer */ 5067 xfs_ifork_t *ifp; /* inode fork pointer */
5064 int isrt; /* freeing in rt area */ 5068 int isrt; /* freeing in rt area */
5065 xfs_extnum_t lastx; /* last extent index used */ 5069 xfs_extnum_t lastx; /* last extent index used */
5066 int logflags; /* transaction logging flags */ 5070 int logflags; /* transaction logging flags */
5067 xfs_extlen_t mod; /* rt extent offset */ 5071 xfs_extlen_t mod; /* rt extent offset */
5068 xfs_mount_t *mp; /* mount structure */ 5072 xfs_mount_t *mp; /* mount structure */
5069 xfs_extnum_t nextents; /* number of file extents */ 5073 xfs_extnum_t nextents; /* number of file extents */
5070 xfs_bmbt_irec_t prev; /* previous extent record */ 5074 xfs_bmbt_irec_t prev; /* previous extent record */
5071 xfs_fileoff_t start; /* first file offset deleted */ 5075 xfs_fileoff_t start; /* first file offset deleted */
5072 int tmp_logflags; /* partial logging flags */ 5076 int tmp_logflags; /* partial logging flags */
5073 int wasdel; /* was a delayed alloc extent */ 5077 int wasdel; /* was a delayed alloc extent */
5074 int whichfork; /* data or attribute fork */ 5078 int whichfork; /* data or attribute fork */
5075 xfs_fsblock_t sum; 5079 xfs_fsblock_t sum;
5076 5080
5077 trace_xfs_bunmap(ip, bno, len, flags, _RET_IP_); 5081 trace_xfs_bunmap(ip, bno, len, flags, _RET_IP_);
5078 5082
5079 whichfork = (flags & XFS_BMAPI_ATTRFORK) ? 5083 whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
5080 XFS_ATTR_FORK : XFS_DATA_FORK; 5084 XFS_ATTR_FORK : XFS_DATA_FORK;
5081 ifp = XFS_IFORK_PTR(ip, whichfork); 5085 ifp = XFS_IFORK_PTR(ip, whichfork);
5082 if (unlikely( 5086 if (unlikely(
5083 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 5087 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5084 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { 5088 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
5085 XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW, 5089 XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW,
5086 ip->i_mount); 5090 ip->i_mount);
5087 return XFS_ERROR(EFSCORRUPTED); 5091 return XFS_ERROR(EFSCORRUPTED);
5088 } 5092 }
5089 mp = ip->i_mount; 5093 mp = ip->i_mount;
5090 if (XFS_FORCED_SHUTDOWN(mp)) 5094 if (XFS_FORCED_SHUTDOWN(mp))
5091 return XFS_ERROR(EIO); 5095 return XFS_ERROR(EIO);
5092 5096
5093 ASSERT(len > 0); 5097 ASSERT(len > 0);
5094 ASSERT(nexts >= 0); 5098 ASSERT(nexts >= 0);
5095 5099
5096 if (!(ifp->if_flags & XFS_IFEXTENTS) && 5100 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5097 (error = xfs_iread_extents(tp, ip, whichfork))) 5101 (error = xfs_iread_extents(tp, ip, whichfork)))
5098 return error; 5102 return error;
5099 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 5103 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
5100 if (nextents == 0) { 5104 if (nextents == 0) {
5101 *done = 1; 5105 *done = 1;
5102 return 0; 5106 return 0;
5103 } 5107 }
5104 XFS_STATS_INC(xs_blk_unmap); 5108 XFS_STATS_INC(xs_blk_unmap);
5105 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip); 5109 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
5106 start = bno; 5110 start = bno;
5107 bno = start + len - 1; 5111 bno = start + len - 1;
5108 ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got, 5112 ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
5109 &prev); 5113 &prev);
5110 5114
5111 /* 5115 /*
5112 * Check to see if the given block number is past the end of the 5116 * Check to see if the given block number is past the end of the
5113 * file, back up to the last block if so... 5117 * file, back up to the last block if so...
5114 */ 5118 */
5115 if (eof) { 5119 if (eof) {
5116 ep = xfs_iext_get_ext(ifp, --lastx); 5120 ep = xfs_iext_get_ext(ifp, --lastx);
5117 xfs_bmbt_get_all(ep, &got); 5121 xfs_bmbt_get_all(ep, &got);
5118 bno = got.br_startoff + got.br_blockcount - 1; 5122 bno = got.br_startoff + got.br_blockcount - 1;
5119 } 5123 }
5120 logflags = 0; 5124 logflags = 0;
5121 if (ifp->if_flags & XFS_IFBROOT) { 5125 if (ifp->if_flags & XFS_IFBROOT) {
5122 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE); 5126 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
5123 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 5127 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5124 cur->bc_private.b.firstblock = *firstblock; 5128 cur->bc_private.b.firstblock = *firstblock;
5125 cur->bc_private.b.flist = flist; 5129 cur->bc_private.b.flist = flist;
5126 cur->bc_private.b.flags = 0; 5130 cur->bc_private.b.flags = 0;
5127 } else 5131 } else
5128 cur = NULL; 5132 cur = NULL;
5129 5133
5130 if (isrt) { 5134 if (isrt) {
5131 /* 5135 /*
5132 * Synchronize by locking the bitmap inode. 5136 * Synchronize by locking the bitmap inode.
5133 */ 5137 */
5134 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL); 5138 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL);
5135 xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL); 5139 xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL);
5136 } 5140 }
5137 5141
5138 extno = 0; 5142 extno = 0;
5139 while (bno != (xfs_fileoff_t)-1 && bno >= start && lastx >= 0 && 5143 while (bno != (xfs_fileoff_t)-1 && bno >= start && lastx >= 0 &&
5140 (nexts == 0 || extno < nexts)) { 5144 (nexts == 0 || extno < nexts)) {
5141 /* 5145 /*
5142 * Is the found extent after a hole in which bno lives? 5146 * Is the found extent after a hole in which bno lives?
5143 * Just back up to the previous extent, if so. 5147 * Just back up to the previous extent, if so.
5144 */ 5148 */
5145 if (got.br_startoff > bno) { 5149 if (got.br_startoff > bno) {
5146 if (--lastx < 0) 5150 if (--lastx < 0)
5147 break; 5151 break;
5148 ep = xfs_iext_get_ext(ifp, lastx); 5152 ep = xfs_iext_get_ext(ifp, lastx);
5149 xfs_bmbt_get_all(ep, &got); 5153 xfs_bmbt_get_all(ep, &got);
5150 } 5154 }
5151 /* 5155 /*
5152 * Is the last block of this extent before the range 5156 * Is the last block of this extent before the range
5153 * we're supposed to delete? If so, we're done. 5157 * we're supposed to delete? If so, we're done.
5154 */ 5158 */
5155 bno = XFS_FILEOFF_MIN(bno, 5159 bno = XFS_FILEOFF_MIN(bno,
5156 got.br_startoff + got.br_blockcount - 1); 5160 got.br_startoff + got.br_blockcount - 1);
5157 if (bno < start) 5161 if (bno < start)
5158 break; 5162 break;
5159 /* 5163 /*
5160 * Then deal with the (possibly delayed) allocated space 5164 * Then deal with the (possibly delayed) allocated space
5161 * we found. 5165 * we found.
5162 */ 5166 */
5163 ASSERT(ep != NULL); 5167 ASSERT(ep != NULL);
5164 del = got; 5168 del = got;
5165 wasdel = isnullstartblock(del.br_startblock); 5169 wasdel = isnullstartblock(del.br_startblock);
5166 if (got.br_startoff < start) { 5170 if (got.br_startoff < start) {
5167 del.br_startoff = start; 5171 del.br_startoff = start;
5168 del.br_blockcount -= start - got.br_startoff; 5172 del.br_blockcount -= start - got.br_startoff;
5169 if (!wasdel) 5173 if (!wasdel)
5170 del.br_startblock += start - got.br_startoff; 5174 del.br_startblock += start - got.br_startoff;
5171 } 5175 }
5172 if (del.br_startoff + del.br_blockcount > bno + 1) 5176 if (del.br_startoff + del.br_blockcount > bno + 1)
5173 del.br_blockcount = bno + 1 - del.br_startoff; 5177 del.br_blockcount = bno + 1 - del.br_startoff;
5174 sum = del.br_startblock + del.br_blockcount; 5178 sum = del.br_startblock + del.br_blockcount;
5175 if (isrt && 5179 if (isrt &&
5176 (mod = do_mod(sum, mp->m_sb.sb_rextsize))) { 5180 (mod = do_mod(sum, mp->m_sb.sb_rextsize))) {
5177 /* 5181 /*
5178 * Realtime extent not lined up at the end. 5182 * Realtime extent not lined up at the end.
5179 * The extent could have been split into written 5183 * The extent could have been split into written
5180 * and unwritten pieces, or we could just be 5184 * and unwritten pieces, or we could just be
5181 * unmapping part of it. But we can't really 5185 * unmapping part of it. But we can't really
5182 * get rid of part of a realtime extent. 5186 * get rid of part of a realtime extent.
5183 */ 5187 */
5184 if (del.br_state == XFS_EXT_UNWRITTEN || 5188 if (del.br_state == XFS_EXT_UNWRITTEN ||
5185 !xfs_sb_version_hasextflgbit(&mp->m_sb)) { 5189 !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
5186 /* 5190 /*
5187 * This piece is unwritten, or we're not 5191 * This piece is unwritten, or we're not
5188 * using unwritten extents. Skip over it. 5192 * using unwritten extents. Skip over it.
5189 */ 5193 */
5190 ASSERT(bno >= mod); 5194 ASSERT(bno >= mod);
5191 bno -= mod > del.br_blockcount ? 5195 bno -= mod > del.br_blockcount ?
5192 del.br_blockcount : mod; 5196 del.br_blockcount : mod;
5193 if (bno < got.br_startoff) { 5197 if (bno < got.br_startoff) {
5194 if (--lastx >= 0) 5198 if (--lastx >= 0)
5195 xfs_bmbt_get_all(xfs_iext_get_ext( 5199 xfs_bmbt_get_all(xfs_iext_get_ext(
5196 ifp, lastx), &got); 5200 ifp, lastx), &got);
5197 } 5201 }
5198 continue; 5202 continue;
5199 } 5203 }
5200 /* 5204 /*
5201 * It's written, turn it unwritten. 5205 * It's written, turn it unwritten.
5202 * This is better than zeroing it. 5206 * This is better than zeroing it.
5203 */ 5207 */
5204 ASSERT(del.br_state == XFS_EXT_NORM); 5208 ASSERT(del.br_state == XFS_EXT_NORM);
5205 ASSERT(xfs_trans_get_block_res(tp) > 0); 5209 ASSERT(xfs_trans_get_block_res(tp) > 0);
5206 /* 5210 /*
5207 * If this spans a realtime extent boundary, 5211 * If this spans a realtime extent boundary,
5208 * chop it back to the start of the one we end at. 5212 * chop it back to the start of the one we end at.
5209 */ 5213 */
5210 if (del.br_blockcount > mod) { 5214 if (del.br_blockcount > mod) {
5211 del.br_startoff += del.br_blockcount - mod; 5215 del.br_startoff += del.br_blockcount - mod;
5212 del.br_startblock += del.br_blockcount - mod; 5216 del.br_startblock += del.br_blockcount - mod;
5213 del.br_blockcount = mod; 5217 del.br_blockcount = mod;
5214 } 5218 }
5215 del.br_state = XFS_EXT_UNWRITTEN; 5219 del.br_state = XFS_EXT_UNWRITTEN;
5216 error = xfs_bmap_add_extent_unwritten_real(tp, ip, 5220 error = xfs_bmap_add_extent_unwritten_real(tp, ip,
5217 &lastx, &cur, &del, firstblock, flist, 5221 &lastx, &cur, &del, firstblock, flist,
5218 &logflags); 5222 &logflags);
5219 if (error) 5223 if (error)
5220 goto error0; 5224 goto error0;
5221 goto nodelete; 5225 goto nodelete;
5222 } 5226 }
5223 if (isrt && (mod = do_mod(del.br_startblock, mp->m_sb.sb_rextsize))) { 5227 if (isrt && (mod = do_mod(del.br_startblock, mp->m_sb.sb_rextsize))) {
5224 /* 5228 /*
5225 * Realtime extent is lined up at the end but not 5229 * Realtime extent is lined up at the end but not
5226 * at the front. We'll get rid of full extents if 5230 * at the front. We'll get rid of full extents if
5227 * we can. 5231 * we can.
5228 */ 5232 */
5229 mod = mp->m_sb.sb_rextsize - mod; 5233 mod = mp->m_sb.sb_rextsize - mod;
5230 if (del.br_blockcount > mod) { 5234 if (del.br_blockcount > mod) {
5231 del.br_blockcount -= mod; 5235 del.br_blockcount -= mod;
5232 del.br_startoff += mod; 5236 del.br_startoff += mod;
5233 del.br_startblock += mod; 5237 del.br_startblock += mod;
5234 } else if ((del.br_startoff == start && 5238 } else if ((del.br_startoff == start &&
5235 (del.br_state == XFS_EXT_UNWRITTEN || 5239 (del.br_state == XFS_EXT_UNWRITTEN ||
5236 xfs_trans_get_block_res(tp) == 0)) || 5240 xfs_trans_get_block_res(tp) == 0)) ||
5237 !xfs_sb_version_hasextflgbit(&mp->m_sb)) { 5241 !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
5238 /* 5242 /*
5239 * Can't make it unwritten. There isn't 5243 * Can't make it unwritten. There isn't
5240 * a full extent here so just skip it. 5244 * a full extent here so just skip it.
5241 */ 5245 */
5242 ASSERT(bno >= del.br_blockcount); 5246 ASSERT(bno >= del.br_blockcount);
5243 bno -= del.br_blockcount; 5247 bno -= del.br_blockcount;
5244 if (got.br_startoff > bno) { 5248 if (got.br_startoff > bno) {
5245 if (--lastx >= 0) { 5249 if (--lastx >= 0) {
5246 ep = xfs_iext_get_ext(ifp, 5250 ep = xfs_iext_get_ext(ifp,
5247 lastx); 5251 lastx);
5248 xfs_bmbt_get_all(ep, &got); 5252 xfs_bmbt_get_all(ep, &got);
5249 } 5253 }
5250 } 5254 }
5251 continue; 5255 continue;
5252 } else if (del.br_state == XFS_EXT_UNWRITTEN) { 5256 } else if (del.br_state == XFS_EXT_UNWRITTEN) {
5253 /* 5257 /*
5254 * This one is already unwritten. 5258 * This one is already unwritten.
5255 * It must have a written left neighbor. 5259 * It must have a written left neighbor.
5256 * Unwrite the killed part of that one and 5260 * Unwrite the killed part of that one and
5257 * try again. 5261 * try again.
5258 */ 5262 */
5259 ASSERT(lastx > 0); 5263 ASSERT(lastx > 0);
5260 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, 5264 xfs_bmbt_get_all(xfs_iext_get_ext(ifp,
5261 lastx - 1), &prev); 5265 lastx - 1), &prev);
5262 ASSERT(prev.br_state == XFS_EXT_NORM); 5266 ASSERT(prev.br_state == XFS_EXT_NORM);
5263 ASSERT(!isnullstartblock(prev.br_startblock)); 5267 ASSERT(!isnullstartblock(prev.br_startblock));
5264 ASSERT(del.br_startblock == 5268 ASSERT(del.br_startblock ==
5265 prev.br_startblock + prev.br_blockcount); 5269 prev.br_startblock + prev.br_blockcount);
5266 if (prev.br_startoff < start) { 5270 if (prev.br_startoff < start) {
5267 mod = start - prev.br_startoff; 5271 mod = start - prev.br_startoff;
5268 prev.br_blockcount -= mod; 5272 prev.br_blockcount -= mod;
5269 prev.br_startblock += mod; 5273 prev.br_startblock += mod;
5270 prev.br_startoff = start; 5274 prev.br_startoff = start;
5271 } 5275 }
5272 prev.br_state = XFS_EXT_UNWRITTEN; 5276 prev.br_state = XFS_EXT_UNWRITTEN;
5273 lastx--; 5277 lastx--;
5274 error = xfs_bmap_add_extent_unwritten_real(tp, 5278 error = xfs_bmap_add_extent_unwritten_real(tp,
5275 ip, &lastx, &cur, &prev, 5279 ip, &lastx, &cur, &prev,
5276 firstblock, flist, &logflags); 5280 firstblock, flist, &logflags);
5277 if (error) 5281 if (error)
5278 goto error0; 5282 goto error0;
5279 goto nodelete; 5283 goto nodelete;
5280 } else { 5284 } else {
5281 ASSERT(del.br_state == XFS_EXT_NORM); 5285 ASSERT(del.br_state == XFS_EXT_NORM);
5282 del.br_state = XFS_EXT_UNWRITTEN; 5286 del.br_state = XFS_EXT_UNWRITTEN;
5283 error = xfs_bmap_add_extent_unwritten_real(tp, 5287 error = xfs_bmap_add_extent_unwritten_real(tp,
5284 ip, &lastx, &cur, &del, 5288 ip, &lastx, &cur, &del,
5285 firstblock, flist, &logflags); 5289 firstblock, flist, &logflags);
5286 if (error) 5290 if (error)
5287 goto error0; 5291 goto error0;
5288 goto nodelete; 5292 goto nodelete;
5289 } 5293 }
5290 } 5294 }
5291 if (wasdel) { 5295 if (wasdel) {
5292 ASSERT(startblockval(del.br_startblock) > 0); 5296 ASSERT(startblockval(del.br_startblock) > 0);
5293 /* Update realtime/data freespace, unreserve quota */ 5297 /* Update realtime/data freespace, unreserve quota */
5294 if (isrt) { 5298 if (isrt) {
5295 xfs_filblks_t rtexts; 5299 xfs_filblks_t rtexts;
5296 5300
5297 rtexts = XFS_FSB_TO_B(mp, del.br_blockcount); 5301 rtexts = XFS_FSB_TO_B(mp, del.br_blockcount);
5298 do_div(rtexts, mp->m_sb.sb_rextsize); 5302 do_div(rtexts, mp->m_sb.sb_rextsize);
5299 xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS, 5303 xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS,
5300 (int64_t)rtexts, 0); 5304 (int64_t)rtexts, 0);
5301 (void)xfs_trans_reserve_quota_nblks(NULL, 5305 (void)xfs_trans_reserve_quota_nblks(NULL,
5302 ip, -((long)del.br_blockcount), 0, 5306 ip, -((long)del.br_blockcount), 0,
5303 XFS_QMOPT_RES_RTBLKS); 5307 XFS_QMOPT_RES_RTBLKS);
5304 } else { 5308 } else {
5305 xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, 5309 xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
5306 (int64_t)del.br_blockcount, 0); 5310 (int64_t)del.br_blockcount, 0);
5307 (void)xfs_trans_reserve_quota_nblks(NULL, 5311 (void)xfs_trans_reserve_quota_nblks(NULL,
5308 ip, -((long)del.br_blockcount), 0, 5312 ip, -((long)del.br_blockcount), 0,
5309 XFS_QMOPT_RES_REGBLKS); 5313 XFS_QMOPT_RES_REGBLKS);
5310 } 5314 }
5311 ip->i_delayed_blks -= del.br_blockcount; 5315 ip->i_delayed_blks -= del.br_blockcount;
5312 if (cur) 5316 if (cur)
5313 cur->bc_private.b.flags |= 5317 cur->bc_private.b.flags |=
5314 XFS_BTCUR_BPRV_WASDEL; 5318 XFS_BTCUR_BPRV_WASDEL;
5315 } else if (cur) 5319 } else if (cur)
5316 cur->bc_private.b.flags &= ~XFS_BTCUR_BPRV_WASDEL; 5320 cur->bc_private.b.flags &= ~XFS_BTCUR_BPRV_WASDEL;
5317 /* 5321 /*
5318 * If it's the case where the directory code is running 5322 * If it's the case where the directory code is running
5319 * with no block reservation, and the deleted block is in 5323 * with no block reservation, and the deleted block is in
5320 * the middle of its extent, and the resulting insert 5324 * the middle of its extent, and the resulting insert
5321 * of an extent would cause transformation to btree format, 5325 * of an extent would cause transformation to btree format,
5322 * then reject it. The calling code will then swap 5326 * then reject it. The calling code will then swap
5323 * blocks around instead. 5327 * blocks around instead.
5324 * We have to do this now, rather than waiting for the 5328 * We have to do this now, rather than waiting for the
5325 * conversion to btree format, since the transaction 5329 * conversion to btree format, since the transaction
5326 * will be dirty. 5330 * will be dirty.
5327 */ 5331 */
5328 if (!wasdel && xfs_trans_get_block_res(tp) == 0 && 5332 if (!wasdel && xfs_trans_get_block_res(tp) == 0 &&
5329 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS && 5333 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
5330 XFS_IFORK_NEXTENTS(ip, whichfork) >= /* Note the >= */ 5334 XFS_IFORK_NEXTENTS(ip, whichfork) >= /* Note the >= */
5331 XFS_IFORK_MAXEXT(ip, whichfork) && 5335 XFS_IFORK_MAXEXT(ip, whichfork) &&
5332 del.br_startoff > got.br_startoff && 5336 del.br_startoff > got.br_startoff &&
5333 del.br_startoff + del.br_blockcount < 5337 del.br_startoff + del.br_blockcount <
5334 got.br_startoff + got.br_blockcount) { 5338 got.br_startoff + got.br_blockcount) {
5335 error = XFS_ERROR(ENOSPC); 5339 error = XFS_ERROR(ENOSPC);
5336 goto error0; 5340 goto error0;
5337 } 5341 }
5338 error = xfs_bmap_del_extent(ip, tp, &lastx, flist, cur, &del, 5342 error = xfs_bmap_del_extent(ip, tp, &lastx, flist, cur, &del,
5339 &tmp_logflags, whichfork); 5343 &tmp_logflags, whichfork);
5340 logflags |= tmp_logflags; 5344 logflags |= tmp_logflags;
5341 if (error) 5345 if (error)
5342 goto error0; 5346 goto error0;
5343 bno = del.br_startoff - 1; 5347 bno = del.br_startoff - 1;
5344 nodelete: 5348 nodelete:
5345 /* 5349 /*
5346 * If not done go on to the next (previous) record. 5350 * If not done go on to the next (previous) record.
5347 */ 5351 */
5348 if (bno != (xfs_fileoff_t)-1 && bno >= start) { 5352 if (bno != (xfs_fileoff_t)-1 && bno >= start) {
5349 if (lastx >= 0) { 5353 if (lastx >= 0) {
5350 ep = xfs_iext_get_ext(ifp, lastx); 5354 ep = xfs_iext_get_ext(ifp, lastx);
5351 if (xfs_bmbt_get_startoff(ep) > bno) { 5355 if (xfs_bmbt_get_startoff(ep) > bno) {
5352 if (--lastx >= 0) 5356 if (--lastx >= 0)
5353 ep = xfs_iext_get_ext(ifp, 5357 ep = xfs_iext_get_ext(ifp,
5354 lastx); 5358 lastx);
5355 } 5359 }
5356 xfs_bmbt_get_all(ep, &got); 5360 xfs_bmbt_get_all(ep, &got);
5357 } 5361 }
5358 extno++; 5362 extno++;
5359 } 5363 }
5360 } 5364 }
5361 *done = bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0; 5365 *done = bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0;
5362 5366
5363 /* 5367 /*
5364 * Convert to a btree if necessary. 5368 * Convert to a btree if necessary.
5365 */ 5369 */
5366 if (xfs_bmap_needs_btree(ip, whichfork)) { 5370 if (xfs_bmap_needs_btree(ip, whichfork)) {
5367 ASSERT(cur == NULL); 5371 ASSERT(cur == NULL);
5368 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist, 5372 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist,
5369 &cur, 0, &tmp_logflags, whichfork); 5373 &cur, 0, &tmp_logflags, whichfork);
5370 logflags |= tmp_logflags; 5374 logflags |= tmp_logflags;
5371 if (error) 5375 if (error)
5372 goto error0; 5376 goto error0;
5373 } 5377 }
5374 /* 5378 /*
5375 * transform from btree to extents, give it cur 5379 * transform from btree to extents, give it cur
5376 */ 5380 */
5377 else if (xfs_bmap_wants_extents(ip, whichfork)) { 5381 else if (xfs_bmap_wants_extents(ip, whichfork)) {
5378 ASSERT(cur != NULL); 5382 ASSERT(cur != NULL);
5379 error = xfs_bmap_btree_to_extents(tp, ip, cur, &tmp_logflags, 5383 error = xfs_bmap_btree_to_extents(tp, ip, cur, &tmp_logflags,
5380 whichfork); 5384 whichfork);
5381 logflags |= tmp_logflags; 5385 logflags |= tmp_logflags;
5382 if (error) 5386 if (error)
5383 goto error0; 5387 goto error0;
5384 } 5388 }
5385 /* 5389 /*
5386 * transform from extents to local? 5390 * transform from extents to local?
5387 */ 5391 */
5388 error = 0; 5392 error = 0;
5389 error0: 5393 error0:
5390 /* 5394 /*
5391 * Log everything. Do this after conversion, there's no point in 5395 * Log everything. Do this after conversion, there's no point in
5392 * logging the extent records if we've converted to btree format. 5396 * logging the extent records if we've converted to btree format.
5393 */ 5397 */
5394 if ((logflags & xfs_ilog_fext(whichfork)) && 5398 if ((logflags & xfs_ilog_fext(whichfork)) &&
5395 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 5399 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
5396 logflags &= ~xfs_ilog_fext(whichfork); 5400 logflags &= ~xfs_ilog_fext(whichfork);
5397 else if ((logflags & xfs_ilog_fbroot(whichfork)) && 5401 else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
5398 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) 5402 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
5399 logflags &= ~xfs_ilog_fbroot(whichfork); 5403 logflags &= ~xfs_ilog_fbroot(whichfork);
5400 /* 5404 /*
5401 * Log inode even in the error case, if the transaction 5405 * Log inode even in the error case, if the transaction
5402 * is dirty we'll need to shut down the filesystem. 5406 * is dirty we'll need to shut down the filesystem.
5403 */ 5407 */
5404 if (logflags) 5408 if (logflags)
5405 xfs_trans_log_inode(tp, ip, logflags); 5409 xfs_trans_log_inode(tp, ip, logflags);
5406 if (cur) { 5410 if (cur) {
5407 if (!error) { 5411 if (!error) {
5408 *firstblock = cur->bc_private.b.firstblock; 5412 *firstblock = cur->bc_private.b.firstblock;
5409 cur->bc_private.b.allocated = 0; 5413 cur->bc_private.b.allocated = 0;
5410 } 5414 }
5411 xfs_btree_del_cursor(cur, 5415 xfs_btree_del_cursor(cur,
5412 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); 5416 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5413 } 5417 }
5414 return error; 5418 return error;
5415 } 5419 }
5416 5420
5417 /* 5421 /*
5418 * returns 1 for success, 0 if we failed to map the extent. 5422 * returns 1 for success, 0 if we failed to map the extent.
5419 */ 5423 */
5420 STATIC int 5424 STATIC int
5421 xfs_getbmapx_fix_eof_hole( 5425 xfs_getbmapx_fix_eof_hole(
5422 xfs_inode_t *ip, /* xfs incore inode pointer */ 5426 xfs_inode_t *ip, /* xfs incore inode pointer */
5423 struct getbmapx *out, /* output structure */ 5427 struct getbmapx *out, /* output structure */
5424 int prealloced, /* this is a file with 5428 int prealloced, /* this is a file with
5425 * preallocated data space */ 5429 * preallocated data space */
5426 __int64_t end, /* last block requested */ 5430 __int64_t end, /* last block requested */
5427 xfs_fsblock_t startblock) 5431 xfs_fsblock_t startblock)
5428 { 5432 {
5429 __int64_t fixlen; 5433 __int64_t fixlen;
5430 xfs_mount_t *mp; /* file system mount point */ 5434 xfs_mount_t *mp; /* file system mount point */
5431 xfs_ifork_t *ifp; /* inode fork pointer */ 5435 xfs_ifork_t *ifp; /* inode fork pointer */
5432 xfs_extnum_t lastx; /* last extent pointer */ 5436 xfs_extnum_t lastx; /* last extent pointer */
5433 xfs_fileoff_t fileblock; 5437 xfs_fileoff_t fileblock;
5434 5438
5435 if (startblock == HOLESTARTBLOCK) { 5439 if (startblock == HOLESTARTBLOCK) {
5436 mp = ip->i_mount; 5440 mp = ip->i_mount;
5437 out->bmv_block = -1; 5441 out->bmv_block = -1;
5438 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, XFS_ISIZE(ip))); 5442 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
5439 fixlen -= out->bmv_offset; 5443 fixlen -= out->bmv_offset;
5440 if (prealloced && out->bmv_offset + out->bmv_length == end) { 5444 if (prealloced && out->bmv_offset + out->bmv_length == end) {
5441 /* Came to hole at EOF. Trim it. */ 5445 /* Came to hole at EOF. Trim it. */
5442 if (fixlen <= 0) 5446 if (fixlen <= 0)
5443 return 0; 5447 return 0;
5444 out->bmv_length = fixlen; 5448 out->bmv_length = fixlen;
5445 } 5449 }
5446 } else { 5450 } else {
5447 if (startblock == DELAYSTARTBLOCK) 5451 if (startblock == DELAYSTARTBLOCK)
5448 out->bmv_block = -2; 5452 out->bmv_block = -2;
5449 else 5453 else
5450 out->bmv_block = xfs_fsb_to_db(ip, startblock); 5454 out->bmv_block = xfs_fsb_to_db(ip, startblock);
5451 fileblock = XFS_BB_TO_FSB(ip->i_mount, out->bmv_offset); 5455 fileblock = XFS_BB_TO_FSB(ip->i_mount, out->bmv_offset);
5452 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); 5456 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
5453 if (xfs_iext_bno_to_ext(ifp, fileblock, &lastx) && 5457 if (xfs_iext_bno_to_ext(ifp, fileblock, &lastx) &&
5454 (lastx == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))-1)) 5458 (lastx == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))-1))
5455 out->bmv_oflags |= BMV_OF_LAST; 5459 out->bmv_oflags |= BMV_OF_LAST;
5456 } 5460 }
5457 5461
5458 return 1; 5462 return 1;
5459 } 5463 }
5460 5464
5461 /* 5465 /*
5462 * Get inode's extents as described in bmv, and format for output. 5466 * Get inode's extents as described in bmv, and format for output.
5463 * Calls formatter to fill the user's buffer until all extents 5467 * Calls formatter to fill the user's buffer until all extents
5464 * are mapped, until the passed-in bmv->bmv_count slots have 5468 * are mapped, until the passed-in bmv->bmv_count slots have
5465 * been filled, or until the formatter short-circuits the loop, 5469 * been filled, or until the formatter short-circuits the loop,
5466 * if it is tracking filled-in extents on its own. 5470 * if it is tracking filled-in extents on its own.
5467 */ 5471 */
5468 int /* error code */ 5472 int /* error code */
5469 xfs_getbmap( 5473 xfs_getbmap(
5470 xfs_inode_t *ip, 5474 xfs_inode_t *ip,
5471 struct getbmapx *bmv, /* user bmap structure */ 5475 struct getbmapx *bmv, /* user bmap structure */
5472 xfs_bmap_format_t formatter, /* format to user */ 5476 xfs_bmap_format_t formatter, /* format to user */
5473 void *arg) /* formatter arg */ 5477 void *arg) /* formatter arg */
5474 { 5478 {
5475 __int64_t bmvend; /* last block requested */ 5479 __int64_t bmvend; /* last block requested */
5476 int error = 0; /* return value */ 5480 int error = 0; /* return value */
5477 __int64_t fixlen; /* length for -1 case */ 5481 __int64_t fixlen; /* length for -1 case */
5478 int i; /* extent number */ 5482 int i; /* extent number */
5479 int lock; /* lock state */ 5483 int lock; /* lock state */
5480 xfs_bmbt_irec_t *map; /* buffer for user's data */ 5484 xfs_bmbt_irec_t *map; /* buffer for user's data */
5481 xfs_mount_t *mp; /* file system mount point */ 5485 xfs_mount_t *mp; /* file system mount point */
5482 int nex; /* # of user extents can do */ 5486 int nex; /* # of user extents can do */
5483 int nexleft; /* # of user extents left */ 5487 int nexleft; /* # of user extents left */
5484 int subnex; /* # of bmapi's can do */ 5488 int subnex; /* # of bmapi's can do */
5485 int nmap; /* number of map entries */ 5489 int nmap; /* number of map entries */
5486 struct getbmapx *out; /* output structure */ 5490 struct getbmapx *out; /* output structure */
5487 int whichfork; /* data or attr fork */ 5491 int whichfork; /* data or attr fork */
5488 int prealloced; /* this is a file with 5492 int prealloced; /* this is a file with
5489 * preallocated data space */ 5493 * preallocated data space */
5490 int iflags; /* interface flags */ 5494 int iflags; /* interface flags */
5491 int bmapi_flags; /* flags for xfs_bmapi */ 5495 int bmapi_flags; /* flags for xfs_bmapi */
5492 int cur_ext = 0; 5496 int cur_ext = 0;
5493 5497
5494 mp = ip->i_mount; 5498 mp = ip->i_mount;
5495 iflags = bmv->bmv_iflags; 5499 iflags = bmv->bmv_iflags;
5496 whichfork = iflags & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK; 5500 whichfork = iflags & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK;
5497 5501
5498 if (whichfork == XFS_ATTR_FORK) { 5502 if (whichfork == XFS_ATTR_FORK) {
5499 if (XFS_IFORK_Q(ip)) { 5503 if (XFS_IFORK_Q(ip)) {
5500 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS && 5504 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS &&
5501 ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE && 5505 ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE &&
5502 ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL) 5506 ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)
5503 return XFS_ERROR(EINVAL); 5507 return XFS_ERROR(EINVAL);
5504 } else if (unlikely( 5508 } else if (unlikely(
5505 ip->i_d.di_aformat != 0 && 5509 ip->i_d.di_aformat != 0 &&
5506 ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) { 5510 ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) {
5507 XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW, 5511 XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW,
5508 ip->i_mount); 5512 ip->i_mount);
5509 return XFS_ERROR(EFSCORRUPTED); 5513 return XFS_ERROR(EFSCORRUPTED);
5510 } 5514 }
5511 5515
5512 prealloced = 0; 5516 prealloced = 0;
5513 fixlen = 1LL << 32; 5517 fixlen = 1LL << 32;
5514 } else { 5518 } else {
5515 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS && 5519 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
5516 ip->i_d.di_format != XFS_DINODE_FMT_BTREE && 5520 ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
5517 ip->i_d.di_format != XFS_DINODE_FMT_LOCAL) 5521 ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
5518 return XFS_ERROR(EINVAL); 5522 return XFS_ERROR(EINVAL);
5519 5523
5520 if (xfs_get_extsz_hint(ip) || 5524 if (xfs_get_extsz_hint(ip) ||
5521 ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){ 5525 ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
5522 prealloced = 1; 5526 prealloced = 1;
5523 fixlen = mp->m_super->s_maxbytes; 5527 fixlen = mp->m_super->s_maxbytes;
5524 } else { 5528 } else {
5525 prealloced = 0; 5529 prealloced = 0;
5526 fixlen = XFS_ISIZE(ip); 5530 fixlen = XFS_ISIZE(ip);
5527 } 5531 }
5528 } 5532 }
5529 5533
5530 if (bmv->bmv_length == -1) { 5534 if (bmv->bmv_length == -1) {
5531 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen)); 5535 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen));
5532 bmv->bmv_length = 5536 bmv->bmv_length =
5533 max_t(__int64_t, fixlen - bmv->bmv_offset, 0); 5537 max_t(__int64_t, fixlen - bmv->bmv_offset, 0);
5534 } else if (bmv->bmv_length == 0) { 5538 } else if (bmv->bmv_length == 0) {
5535 bmv->bmv_entries = 0; 5539 bmv->bmv_entries = 0;
5536 return 0; 5540 return 0;
5537 } else if (bmv->bmv_length < 0) { 5541 } else if (bmv->bmv_length < 0) {
5538 return XFS_ERROR(EINVAL); 5542 return XFS_ERROR(EINVAL);
5539 } 5543 }
5540 5544
5541 nex = bmv->bmv_count - 1; 5545 nex = bmv->bmv_count - 1;
5542 if (nex <= 0) 5546 if (nex <= 0)
5543 return XFS_ERROR(EINVAL); 5547 return XFS_ERROR(EINVAL);
5544 bmvend = bmv->bmv_offset + bmv->bmv_length; 5548 bmvend = bmv->bmv_offset + bmv->bmv_length;
5545 5549
5546 5550
5547 if (bmv->bmv_count > ULONG_MAX / sizeof(struct getbmapx)) 5551 if (bmv->bmv_count > ULONG_MAX / sizeof(struct getbmapx))
5548 return XFS_ERROR(ENOMEM); 5552 return XFS_ERROR(ENOMEM);
5549 out = kmem_zalloc(bmv->bmv_count * sizeof(struct getbmapx), KM_MAYFAIL); 5553 out = kmem_zalloc(bmv->bmv_count * sizeof(struct getbmapx), KM_MAYFAIL);
5550 if (!out) { 5554 if (!out) {
5551 out = kmem_zalloc_large(bmv->bmv_count * 5555 out = kmem_zalloc_large(bmv->bmv_count *
5552 sizeof(struct getbmapx)); 5556 sizeof(struct getbmapx));
5553 if (!out) 5557 if (!out)
5554 return XFS_ERROR(ENOMEM); 5558 return XFS_ERROR(ENOMEM);
5555 } 5559 }
5556 5560
5557 xfs_ilock(ip, XFS_IOLOCK_SHARED); 5561 xfs_ilock(ip, XFS_IOLOCK_SHARED);
5558 if (whichfork == XFS_DATA_FORK && !(iflags & BMV_IF_DELALLOC)) { 5562 if (whichfork == XFS_DATA_FORK && !(iflags & BMV_IF_DELALLOC)) {
5559 if (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size) { 5563 if (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size) {
5560 error = xfs_flush_pages(ip, 0, -1, 0, FI_REMAPF); 5564 error = xfs_flush_pages(ip, 0, -1, 0, FI_REMAPF);
5561 if (error) 5565 if (error)
5562 goto out_unlock_iolock; 5566 goto out_unlock_iolock;
5563 } 5567 }
5564 /* 5568 /*
5565 * even after flushing the inode, there can still be delalloc 5569 * even after flushing the inode, there can still be delalloc
5566 * blocks on the inode beyond EOF due to speculative 5570 * blocks on the inode beyond EOF due to speculative
5567 * preallocation. These are not removed until the release 5571 * preallocation. These are not removed until the release
5568 * function is called or the inode is inactivated. Hence we 5572 * function is called or the inode is inactivated. Hence we
5569 * cannot assert here that ip->i_delayed_blks == 0. 5573 * cannot assert here that ip->i_delayed_blks == 0.
5570 */ 5574 */
5571 } 5575 }
5572 5576
5573 lock = xfs_ilock_map_shared(ip); 5577 lock = xfs_ilock_map_shared(ip);
5574 5578
5575 /* 5579 /*
5576 * Don't let nex be bigger than the number of extents 5580 * Don't let nex be bigger than the number of extents
5577 * we can have assuming alternating holes and real extents. 5581 * we can have assuming alternating holes and real extents.
5578 */ 5582 */
5579 if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1) 5583 if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1)
5580 nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1; 5584 nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1;
5581 5585
5582 bmapi_flags = xfs_bmapi_aflag(whichfork); 5586 bmapi_flags = xfs_bmapi_aflag(whichfork);
5583 if (!(iflags & BMV_IF_PREALLOC)) 5587 if (!(iflags & BMV_IF_PREALLOC))
5584 bmapi_flags |= XFS_BMAPI_IGSTATE; 5588 bmapi_flags |= XFS_BMAPI_IGSTATE;
5585 5589
5586 /* 5590 /*
5587 * Allocate enough space to handle "subnex" maps at a time. 5591 * Allocate enough space to handle "subnex" maps at a time.
5588 */ 5592 */
5589 error = ENOMEM; 5593 error = ENOMEM;
5590 subnex = 16; 5594 subnex = 16;
5591 map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL | KM_NOFS); 5595 map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL | KM_NOFS);
5592 if (!map) 5596 if (!map)
5593 goto out_unlock_ilock; 5597 goto out_unlock_ilock;
5594 5598
5595 bmv->bmv_entries = 0; 5599 bmv->bmv_entries = 0;
5596 5600
5597 if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0 && 5601 if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0 &&
5598 (whichfork == XFS_ATTR_FORK || !(iflags & BMV_IF_DELALLOC))) { 5602 (whichfork == XFS_ATTR_FORK || !(iflags & BMV_IF_DELALLOC))) {
5599 error = 0; 5603 error = 0;
5600 goto out_free_map; 5604 goto out_free_map;
5601 } 5605 }
5602 5606
5603 nexleft = nex; 5607 nexleft = nex;
5604 5608
5605 do { 5609 do {
5606 nmap = (nexleft > subnex) ? subnex : nexleft; 5610 nmap = (nexleft > subnex) ? subnex : nexleft;
5607 error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset), 5611 error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
5608 XFS_BB_TO_FSB(mp, bmv->bmv_length), 5612 XFS_BB_TO_FSB(mp, bmv->bmv_length),
5609 map, &nmap, bmapi_flags); 5613 map, &nmap, bmapi_flags);
5610 if (error) 5614 if (error)
5611 goto out_free_map; 5615 goto out_free_map;
5612 ASSERT(nmap <= subnex); 5616 ASSERT(nmap <= subnex);
5613 5617
5614 for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) { 5618 for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) {
5615 out[cur_ext].bmv_oflags = 0; 5619 out[cur_ext].bmv_oflags = 0;
5616 if (map[i].br_state == XFS_EXT_UNWRITTEN) 5620 if (map[i].br_state == XFS_EXT_UNWRITTEN)
5617 out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC; 5621 out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
5618 else if (map[i].br_startblock == DELAYSTARTBLOCK) 5622 else if (map[i].br_startblock == DELAYSTARTBLOCK)
5619 out[cur_ext].bmv_oflags |= BMV_OF_DELALLOC; 5623 out[cur_ext].bmv_oflags |= BMV_OF_DELALLOC;
5620 out[cur_ext].bmv_offset = 5624 out[cur_ext].bmv_offset =
5621 XFS_FSB_TO_BB(mp, map[i].br_startoff); 5625 XFS_FSB_TO_BB(mp, map[i].br_startoff);
5622 out[cur_ext].bmv_length = 5626 out[cur_ext].bmv_length =
5623 XFS_FSB_TO_BB(mp, map[i].br_blockcount); 5627 XFS_FSB_TO_BB(mp, map[i].br_blockcount);
5624 out[cur_ext].bmv_unused1 = 0; 5628 out[cur_ext].bmv_unused1 = 0;
5625 out[cur_ext].bmv_unused2 = 0; 5629 out[cur_ext].bmv_unused2 = 0;
5626 5630
5627 /* 5631 /*
5628 * delayed allocation extents that start beyond EOF can 5632 * delayed allocation extents that start beyond EOF can
5629 * occur due to speculative EOF allocation when the 5633 * occur due to speculative EOF allocation when the
5630 * delalloc extent is larger than the largest freespace 5634 * delalloc extent is larger than the largest freespace
5631 * extent at conversion time. These extents cannot be 5635 * extent at conversion time. These extents cannot be
5632 * converted by data writeback, so can exist here even 5636 * converted by data writeback, so can exist here even
5633 * if we are not supposed to be finding delalloc 5637 * if we are not supposed to be finding delalloc
5634 * extents. 5638 * extents.
5635 */ 5639 */
5636 if (map[i].br_startblock == DELAYSTARTBLOCK && 5640 if (map[i].br_startblock == DELAYSTARTBLOCK &&
5637 map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip))) 5641 map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
5638 ASSERT((iflags & BMV_IF_DELALLOC) != 0); 5642 ASSERT((iflags & BMV_IF_DELALLOC) != 0);
5639 5643
5640 if (map[i].br_startblock == HOLESTARTBLOCK && 5644 if (map[i].br_startblock == HOLESTARTBLOCK &&
5641 whichfork == XFS_ATTR_FORK) { 5645 whichfork == XFS_ATTR_FORK) {
5642 /* came to the end of attribute fork */ 5646 /* came to the end of attribute fork */
5643 out[cur_ext].bmv_oflags |= BMV_OF_LAST; 5647 out[cur_ext].bmv_oflags |= BMV_OF_LAST;
5644 goto out_free_map; 5648 goto out_free_map;
5645 } 5649 }
5646 5650
5647 if (!xfs_getbmapx_fix_eof_hole(ip, &out[cur_ext], 5651 if (!xfs_getbmapx_fix_eof_hole(ip, &out[cur_ext],
5648 prealloced, bmvend, 5652 prealloced, bmvend,
5649 map[i].br_startblock)) 5653 map[i].br_startblock))
5650 goto out_free_map; 5654 goto out_free_map;
5651 5655
5652 bmv->bmv_offset = 5656 bmv->bmv_offset =
5653 out[cur_ext].bmv_offset + 5657 out[cur_ext].bmv_offset +
5654 out[cur_ext].bmv_length; 5658 out[cur_ext].bmv_length;
5655 bmv->bmv_length = 5659 bmv->bmv_length =
5656 max_t(__int64_t, 0, bmvend - bmv->bmv_offset); 5660 max_t(__int64_t, 0, bmvend - bmv->bmv_offset);
5657 5661
5658 /* 5662 /*
5659 * In case we don't want to return the hole, 5663 * In case we don't want to return the hole,
5660 * don't increase cur_ext so that we can reuse 5664 * don't increase cur_ext so that we can reuse
5661 * it in the next loop. 5665 * it in the next loop.
5662 */ 5666 */
5663 if ((iflags & BMV_IF_NO_HOLES) && 5667 if ((iflags & BMV_IF_NO_HOLES) &&
5664 map[i].br_startblock == HOLESTARTBLOCK) { 5668 map[i].br_startblock == HOLESTARTBLOCK) {
5665 memset(&out[cur_ext], 0, sizeof(out[cur_ext])); 5669 memset(&out[cur_ext], 0, sizeof(out[cur_ext]));
5666 continue; 5670 continue;
5667 } 5671 }
5668 5672
5669 nexleft--; 5673 nexleft--;
5670 bmv->bmv_entries++; 5674 bmv->bmv_entries++;
5671 cur_ext++; 5675 cur_ext++;
5672 } 5676 }
5673 } while (nmap && nexleft && bmv->bmv_length); 5677 } while (nmap && nexleft && bmv->bmv_length);
5674 5678
5675 out_free_map: 5679 out_free_map:
5676 kmem_free(map); 5680 kmem_free(map);
5677 out_unlock_ilock: 5681 out_unlock_ilock:
5678 xfs_iunlock_map_shared(ip, lock); 5682 xfs_iunlock_map_shared(ip, lock);
5679 out_unlock_iolock: 5683 out_unlock_iolock:
5680 xfs_iunlock(ip, XFS_IOLOCK_SHARED); 5684 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
5681 5685
5682 for (i = 0; i < cur_ext; i++) { 5686 for (i = 0; i < cur_ext; i++) {
5683 int full = 0; /* user array is full */ 5687 int full = 0; /* user array is full */
5684 5688
5685 /* format results & advance arg */ 5689 /* format results & advance arg */
5686 error = formatter(&arg, &out[i], &full); 5690 error = formatter(&arg, &out[i], &full);
5687 if (error || full) 5691 if (error || full)
5688 break; 5692 break;
5689 } 5693 }
5690 5694
5691 if (is_vmalloc_addr(out)) 5695 if (is_vmalloc_addr(out))
5692 kmem_free_large(out); 5696 kmem_free_large(out);
5693 else 5697 else
5694 kmem_free(out); 5698 kmem_free(out);
5695 return error; 5699 return error;
5696 } 5700 }
5697 5701
5698 #ifdef DEBUG 5702 #ifdef DEBUG
5699 STATIC struct xfs_buf * 5703 STATIC struct xfs_buf *
5700 xfs_bmap_get_bp( 5704 xfs_bmap_get_bp(
5701 struct xfs_btree_cur *cur, 5705 struct xfs_btree_cur *cur,
5702 xfs_fsblock_t bno) 5706 xfs_fsblock_t bno)
5703 { 5707 {
5704 struct xfs_log_item_desc *lidp; 5708 struct xfs_log_item_desc *lidp;
5705 int i; 5709 int i;
5706 5710
5707 if (!cur) 5711 if (!cur)
5708 return NULL; 5712 return NULL;
5709 5713
5710 for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) { 5714 for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) {
5711 if (!cur->bc_bufs[i]) 5715 if (!cur->bc_bufs[i])
5712 break; 5716 break;
5713 if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno) 5717 if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno)
5714 return cur->bc_bufs[i]; 5718 return cur->bc_bufs[i];
5715 } 5719 }
5716 5720
5717 /* Chase down all the log items to see if the bp is there */ 5721 /* Chase down all the log items to see if the bp is there */
5718 list_for_each_entry(lidp, &cur->bc_tp->t_items, lid_trans) { 5722 list_for_each_entry(lidp, &cur->bc_tp->t_items, lid_trans) {
5719 struct xfs_buf_log_item *bip; 5723 struct xfs_buf_log_item *bip;
5720 bip = (struct xfs_buf_log_item *)lidp->lid_item; 5724 bip = (struct xfs_buf_log_item *)lidp->lid_item;
5721 if (bip->bli_item.li_type == XFS_LI_BUF && 5725 if (bip->bli_item.li_type == XFS_LI_BUF &&
5722 XFS_BUF_ADDR(bip->bli_buf) == bno) 5726 XFS_BUF_ADDR(bip->bli_buf) == bno)
5723 return bip->bli_buf; 5727 return bip->bli_buf;
5724 } 5728 }
5725 5729
5726 return NULL; 5730 return NULL;
5727 } 5731 }
5728 5732
5729 STATIC void 5733 STATIC void
5730 xfs_check_block( 5734 xfs_check_block(
5731 struct xfs_btree_block *block, 5735 struct xfs_btree_block *block,
5732 xfs_mount_t *mp, 5736 xfs_mount_t *mp,
5733 int root, 5737 int root,
5734 short sz) 5738 short sz)
5735 { 5739 {
5736 int i, j, dmxr; 5740 int i, j, dmxr;
5737 __be64 *pp, *thispa; /* pointer to block address */ 5741 __be64 *pp, *thispa; /* pointer to block address */
5738 xfs_bmbt_key_t *prevp, *keyp; 5742 xfs_bmbt_key_t *prevp, *keyp;
5739 5743
5740 ASSERT(be16_to_cpu(block->bb_level) > 0); 5744 ASSERT(be16_to_cpu(block->bb_level) > 0);
5741 5745
5742 prevp = NULL; 5746 prevp = NULL;
5743 for( i = 1; i <= xfs_btree_get_numrecs(block); i++) { 5747 for( i = 1; i <= xfs_btree_get_numrecs(block); i++) {
5744 dmxr = mp->m_bmap_dmxr[0]; 5748 dmxr = mp->m_bmap_dmxr[0];
5745 keyp = XFS_BMBT_KEY_ADDR(mp, block, i); 5749 keyp = XFS_BMBT_KEY_ADDR(mp, block, i);
5746 5750
5747 if (prevp) { 5751 if (prevp) {
5748 ASSERT(be64_to_cpu(prevp->br_startoff) < 5752 ASSERT(be64_to_cpu(prevp->br_startoff) <
5749 be64_to_cpu(keyp->br_startoff)); 5753 be64_to_cpu(keyp->br_startoff));
5750 } 5754 }
5751 prevp = keyp; 5755 prevp = keyp;
5752 5756
5753 /* 5757 /*
5754 * Compare the block numbers to see if there are dups. 5758 * Compare the block numbers to see if there are dups.
5755 */ 5759 */
5756 if (root) 5760 if (root)
5757 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz); 5761 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz);
5758 else 5762 else
5759 pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr); 5763 pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr);
5760 5764
5761 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) { 5765 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
5762 if (root) 5766 if (root)
5763 thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz); 5767 thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz);
5764 else 5768 else
5765 thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr); 5769 thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr);
5766 if (*thispa == *pp) { 5770 if (*thispa == *pp) {
5767 xfs_warn(mp, "%s: thispa(%d) == pp(%d) %Ld", 5771 xfs_warn(mp, "%s: thispa(%d) == pp(%d) %Ld",
5768 __func__, j, i, 5772 __func__, j, i,
5769 (unsigned long long)be64_to_cpu(*thispa)); 5773 (unsigned long long)be64_to_cpu(*thispa));
5770 panic("%s: ptrs are equal in node\n", 5774 panic("%s: ptrs are equal in node\n",
5771 __func__); 5775 __func__);
5772 } 5776 }
5773 } 5777 }
5774 } 5778 }
5775 } 5779 }
5776 5780
5777 /* 5781 /*
5778 * Check that the extents for the inode ip are in the right order in all 5782 * Check that the extents for the inode ip are in the right order in all
5779 * btree leaves. 5783 * btree leaves.
5780 */ 5784 */
5781 5785
5782 STATIC void 5786 STATIC void
5783 xfs_bmap_check_leaf_extents( 5787 xfs_bmap_check_leaf_extents(
5784 xfs_btree_cur_t *cur, /* btree cursor or null */ 5788 xfs_btree_cur_t *cur, /* btree cursor or null */
5785 xfs_inode_t *ip, /* incore inode pointer */ 5789 xfs_inode_t *ip, /* incore inode pointer */
5786 int whichfork) /* data or attr fork */ 5790 int whichfork) /* data or attr fork */
5787 { 5791 {
5788 struct xfs_btree_block *block; /* current btree block */ 5792 struct xfs_btree_block *block; /* current btree block */
5789 xfs_fsblock_t bno; /* block # of "block" */ 5793 xfs_fsblock_t bno; /* block # of "block" */
5790 xfs_buf_t *bp; /* buffer for "block" */ 5794 xfs_buf_t *bp; /* buffer for "block" */
5791 int error; /* error return value */ 5795 int error; /* error return value */
5792 xfs_extnum_t i=0, j; /* index into the extents list */ 5796 xfs_extnum_t i=0, j; /* index into the extents list */
5793 xfs_ifork_t *ifp; /* fork structure */ 5797 xfs_ifork_t *ifp; /* fork structure */
5794 int level; /* btree level, for checking */ 5798 int level; /* btree level, for checking */
5795 xfs_mount_t *mp; /* file system mount structure */ 5799 xfs_mount_t *mp; /* file system mount structure */
5796 __be64 *pp; /* pointer to block address */ 5800 __be64 *pp; /* pointer to block address */
5797 xfs_bmbt_rec_t *ep; /* pointer to current extent */ 5801 xfs_bmbt_rec_t *ep; /* pointer to current extent */
5798 xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */ 5802 xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */
5799 xfs_bmbt_rec_t *nextp; /* pointer to next extent */ 5803 xfs_bmbt_rec_t *nextp; /* pointer to next extent */
5800 int bp_release = 0; 5804 int bp_release = 0;
5801 5805
5802 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) { 5806 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) {
5803 return; 5807 return;
5804 } 5808 }
5805 5809
5806 bno = NULLFSBLOCK; 5810 bno = NULLFSBLOCK;
5807 mp = ip->i_mount; 5811 mp = ip->i_mount;
5808 ifp = XFS_IFORK_PTR(ip, whichfork); 5812 ifp = XFS_IFORK_PTR(ip, whichfork);
5809 block = ifp->if_broot; 5813 block = ifp->if_broot;
5810 /* 5814 /*
5811 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. 5815 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
5812 */ 5816 */
5813 level = be16_to_cpu(block->bb_level); 5817 level = be16_to_cpu(block->bb_level);
5814 ASSERT(level > 0); 5818 ASSERT(level > 0);
5815 xfs_check_block(block, mp, 1, ifp->if_broot_bytes); 5819 xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
5816 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); 5820 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
5817 bno = be64_to_cpu(*pp); 5821 bno = be64_to_cpu(*pp);
5818 5822
5819 ASSERT(bno != NULLDFSBNO); 5823 ASSERT(bno != NULLDFSBNO);
5820 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount); 5824 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
5821 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks); 5825 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
5822 5826
5823 /* 5827 /*
5824 * Go down the tree until leaf level is reached, following the first 5828 * Go down the tree until leaf level is reached, following the first
5825 * pointer (leftmost) at each level. 5829 * pointer (leftmost) at each level.
5826 */ 5830 */
5827 while (level-- > 0) { 5831 while (level-- > 0) {
5828 /* See if buf is in cur first */ 5832 /* See if buf is in cur first */
5829 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno)); 5833 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
5830 if (bp) { 5834 if (bp) {
5831 bp_release = 0; 5835 bp_release = 0;
5832 } else { 5836 } else {
5833 bp_release = 1; 5837 bp_release = 1;
5834 } 5838 }
5835 if (!bp && (error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp, 5839 if (!bp && (error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
5836 XFS_BMAP_BTREE_REF))) 5840 XFS_BMAP_BTREE_REF)))
5837 goto error_norelse; 5841 goto error_norelse;
5838 block = XFS_BUF_TO_BLOCK(bp); 5842 block = XFS_BUF_TO_BLOCK(bp);
5839 XFS_WANT_CORRUPTED_GOTO( 5843 XFS_WANT_CORRUPTED_GOTO(
5840 xfs_bmap_sanity_check(mp, bp, level), 5844 xfs_bmap_sanity_check(mp, bp, level),
5841 error0); 5845 error0);
5842 if (level == 0) 5846 if (level == 0)
5843 break; 5847 break;
5844 5848
5845 /* 5849 /*
5846 * Check this block for basic sanity (increasing keys and 5850 * Check this block for basic sanity (increasing keys and
5847 * no duplicate blocks). 5851 * no duplicate blocks).
5848 */ 5852 */
5849 5853
5850 xfs_check_block(block, mp, 0, 0); 5854 xfs_check_block(block, mp, 0, 0);
5851 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); 5855 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
5852 bno = be64_to_cpu(*pp); 5856 bno = be64_to_cpu(*pp);
5853 XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, bno), error0); 5857 XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, bno), error0);
5854 if (bp_release) { 5858 if (bp_release) {
5855 bp_release = 0; 5859 bp_release = 0;
5856 xfs_trans_brelse(NULL, bp); 5860 xfs_trans_brelse(NULL, bp);
5857 } 5861 }
5858 } 5862 }
5859 5863
5860 /* 5864 /*
5861 * Here with bp and block set to the leftmost leaf node in the tree. 5865 * Here with bp and block set to the leftmost leaf node in the tree.
5862 */ 5866 */
5863 i = 0; 5867 i = 0;
5864 5868
5865 /* 5869 /*
5866 * Loop over all leaf nodes checking that all extents are in the right order. 5870 * Loop over all leaf nodes checking that all extents are in the right order.
5867 */ 5871 */
5868 for (;;) { 5872 for (;;) {
5869 xfs_fsblock_t nextbno; 5873 xfs_fsblock_t nextbno;
5870 xfs_extnum_t num_recs; 5874 xfs_extnum_t num_recs;
5871 5875
5872 5876
5873 num_recs = xfs_btree_get_numrecs(block); 5877 num_recs = xfs_btree_get_numrecs(block);
5874 5878
5875 /* 5879 /*
5876 * Read-ahead the next leaf block, if any. 5880 * Read-ahead the next leaf block, if any.
5877 */ 5881 */
5878 5882
5879 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); 5883 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
5880 5884
5881 /* 5885 /*
5882 * Check all the extents to make sure they are OK. 5886 * Check all the extents to make sure they are OK.
5883 * If we had a previous block, the last entry should 5887 * If we had a previous block, the last entry should
5884 * conform with the first entry in this one. 5888 * conform with the first entry in this one.
5885 */ 5889 */
5886 5890
5887 ep = XFS_BMBT_REC_ADDR(mp, block, 1); 5891 ep = XFS_BMBT_REC_ADDR(mp, block, 1);
5888 if (i) { 5892 if (i) {
5889 ASSERT(xfs_bmbt_disk_get_startoff(&last) + 5893 ASSERT(xfs_bmbt_disk_get_startoff(&last) +
5890 xfs_bmbt_disk_get_blockcount(&last) <= 5894 xfs_bmbt_disk_get_blockcount(&last) <=
5891 xfs_bmbt_disk_get_startoff(ep)); 5895 xfs_bmbt_disk_get_startoff(ep));
5892 } 5896 }
5893 for (j = 1; j < num_recs; j++) { 5897 for (j = 1; j < num_recs; j++) {
5894 nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1); 5898 nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1);
5895 ASSERT(xfs_bmbt_disk_get_startoff(ep) + 5899 ASSERT(xfs_bmbt_disk_get_startoff(ep) +
5896 xfs_bmbt_disk_get_blockcount(ep) <= 5900 xfs_bmbt_disk_get_blockcount(ep) <=
5897 xfs_bmbt_disk_get_startoff(nextp)); 5901 xfs_bmbt_disk_get_startoff(nextp));
5898 ep = nextp; 5902 ep = nextp;
5899 } 5903 }
5900 5904
5901 last = *ep; 5905 last = *ep;
5902 i += num_recs; 5906 i += num_recs;
5903 if (bp_release) { 5907 if (bp_release) {
5904 bp_release = 0; 5908 bp_release = 0;
5905 xfs_trans_brelse(NULL, bp); 5909 xfs_trans_brelse(NULL, bp);
5906 } 5910 }
5907 bno = nextbno; 5911 bno = nextbno;
5908 /* 5912 /*
5909 * If we've reached the end, stop. 5913 * If we've reached the end, stop.
5910 */ 5914 */
5911 if (bno == NULLFSBLOCK) 5915 if (bno == NULLFSBLOCK)
5912 break; 5916 break;
5913 5917
5914 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno)); 5918 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
5915 if (bp) { 5919 if (bp) {
5916 bp_release = 0; 5920 bp_release = 0;
5917 } else { 5921 } else {
5918 bp_release = 1; 5922 bp_release = 1;
5919 } 5923 }
5920 if (!bp && (error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp, 5924 if (!bp && (error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
5921 XFS_BMAP_BTREE_REF))) 5925 XFS_BMAP_BTREE_REF)))
5922 goto error_norelse; 5926 goto error_norelse;
5923 block = XFS_BUF_TO_BLOCK(bp); 5927 block = XFS_BUF_TO_BLOCK(bp);
5924 } 5928 }
5925 if (bp_release) { 5929 if (bp_release) {
5926 bp_release = 0; 5930 bp_release = 0;
5927 xfs_trans_brelse(NULL, bp); 5931 xfs_trans_brelse(NULL, bp);
5928 } 5932 }
5929 return; 5933 return;
5930 5934
5931 error0: 5935 error0:
5932 xfs_warn(mp, "%s: at error0", __func__); 5936 xfs_warn(mp, "%s: at error0", __func__);
5933 if (bp_release) 5937 if (bp_release)
5934 xfs_trans_brelse(NULL, bp); 5938 xfs_trans_brelse(NULL, bp);
5935 error_norelse: 5939 error_norelse:
5936 xfs_warn(mp, "%s: BAD after btree leaves for %d extents", 5940 xfs_warn(mp, "%s: BAD after btree leaves for %d extents",
5937 __func__, i); 5941 __func__, i);
5938 panic("%s: CORRUPTED BTREE OR SOMETHING", __func__); 5942 panic("%s: CORRUPTED BTREE OR SOMETHING", __func__);
5939 return; 5943 return;
5940 } 5944 }
5941 #endif 5945 #endif
5942 5946
5943 /* 5947 /*
5944 * Count fsblocks of the given fork. 5948 * Count fsblocks of the given fork.
5945 */ 5949 */
5946 int /* error */ 5950 int /* error */
5947 xfs_bmap_count_blocks( 5951 xfs_bmap_count_blocks(
5948 xfs_trans_t *tp, /* transaction pointer */ 5952 xfs_trans_t *tp, /* transaction pointer */
5949 xfs_inode_t *ip, /* incore inode */ 5953 xfs_inode_t *ip, /* incore inode */
5950 int whichfork, /* data or attr fork */ 5954 int whichfork, /* data or attr fork */
5951 int *count) /* out: count of blocks */ 5955 int *count) /* out: count of blocks */
5952 { 5956 {
5953 struct xfs_btree_block *block; /* current btree block */ 5957 struct xfs_btree_block *block; /* current btree block */
5954 xfs_fsblock_t bno; /* block # of "block" */ 5958 xfs_fsblock_t bno; /* block # of "block" */
5955 xfs_ifork_t *ifp; /* fork structure */ 5959 xfs_ifork_t *ifp; /* fork structure */
5956 int level; /* btree level, for checking */ 5960 int level; /* btree level, for checking */
5957 xfs_mount_t *mp; /* file system mount structure */ 5961 xfs_mount_t *mp; /* file system mount structure */
5958 __be64 *pp; /* pointer to block address */ 5962 __be64 *pp; /* pointer to block address */
5959 5963
5960 bno = NULLFSBLOCK; 5964 bno = NULLFSBLOCK;
5961 mp = ip->i_mount; 5965 mp = ip->i_mount;
5962 ifp = XFS_IFORK_PTR(ip, whichfork); 5966 ifp = XFS_IFORK_PTR(ip, whichfork);
5963 if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) { 5967 if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) {
5964 xfs_bmap_count_leaves(ifp, 0, 5968 xfs_bmap_count_leaves(ifp, 0,
5965 ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t), 5969 ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t),
5966 count); 5970 count);
5967 return 0; 5971 return 0;
5968 } 5972 }
5969 5973
5970 /* 5974 /*
5971 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. 5975 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
5972 */ 5976 */
5973 block = ifp->if_broot; 5977 block = ifp->if_broot;
5974 level = be16_to_cpu(block->bb_level); 5978 level = be16_to_cpu(block->bb_level);
5975 ASSERT(level > 0); 5979 ASSERT(level > 0);
5976 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); 5980 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
5977 bno = be64_to_cpu(*pp); 5981 bno = be64_to_cpu(*pp);
5978 ASSERT(bno != NULLDFSBNO); 5982 ASSERT(bno != NULLDFSBNO);
5979 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount); 5983 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
5980 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks); 5984 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
5981 5985
5982 if (unlikely(xfs_bmap_count_tree(mp, tp, ifp, bno, level, count) < 0)) { 5986 if (unlikely(xfs_bmap_count_tree(mp, tp, ifp, bno, level, count) < 0)) {
5983 XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW, 5987 XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW,
5984 mp); 5988 mp);
5985 return XFS_ERROR(EFSCORRUPTED); 5989 return XFS_ERROR(EFSCORRUPTED);
5986 } 5990 }
5987 5991
5988 return 0; 5992 return 0;
5989 } 5993 }
5990 5994
5991 /* 5995 /*
5992 * Recursively walks each level of a btree 5996 * Recursively walks each level of a btree
5993 * to count total fsblocks is use. 5997 * to count total fsblocks is use.
5994 */ 5998 */
5995 STATIC int /* error */ 5999 STATIC int /* error */
5996 xfs_bmap_count_tree( 6000 xfs_bmap_count_tree(
5997 xfs_mount_t *mp, /* file system mount point */ 6001 xfs_mount_t *mp, /* file system mount point */
5998 xfs_trans_t *tp, /* transaction pointer */ 6002 xfs_trans_t *tp, /* transaction pointer */
5999 xfs_ifork_t *ifp, /* inode fork pointer */ 6003 xfs_ifork_t *ifp, /* inode fork pointer */
6000 xfs_fsblock_t blockno, /* file system block number */ 6004 xfs_fsblock_t blockno, /* file system block number */
6001 int levelin, /* level in btree */ 6005 int levelin, /* level in btree */
6002 int *count) /* Count of blocks */ 6006 int *count) /* Count of blocks */
6003 { 6007 {
6004 int error; 6008 int error;
6005 xfs_buf_t *bp, *nbp; 6009 xfs_buf_t *bp, *nbp;
6006 int level = levelin; 6010 int level = levelin;
6007 __be64 *pp; 6011 __be64 *pp;
6008 xfs_fsblock_t bno = blockno; 6012 xfs_fsblock_t bno = blockno;
6009 xfs_fsblock_t nextbno; 6013 xfs_fsblock_t nextbno;
6010 struct xfs_btree_block *block, *nextblock; 6014 struct xfs_btree_block *block, *nextblock;
6011 int numrecs; 6015 int numrecs;
6012 6016
6013 if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF))) 6017 if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF)))
6014 return error; 6018 return error;
6015 *count += 1; 6019 *count += 1;
6016 block = XFS_BUF_TO_BLOCK(bp); 6020 block = XFS_BUF_TO_BLOCK(bp);
6017 6021
6018 if (--level) { 6022 if (--level) {
6019 /* Not at node above leaves, count this level of nodes */ 6023 /* Not at node above leaves, count this level of nodes */
6020 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); 6024 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
6021 while (nextbno != NULLFSBLOCK) { 6025 while (nextbno != NULLFSBLOCK) {
6022 if ((error = xfs_btree_read_bufl(mp, tp, nextbno, 6026 if ((error = xfs_btree_read_bufl(mp, tp, nextbno,
6023 0, &nbp, XFS_BMAP_BTREE_REF))) 6027 0, &nbp, XFS_BMAP_BTREE_REF)))
6024 return error; 6028 return error;
6025 *count += 1; 6029 *count += 1;
6026 nextblock = XFS_BUF_TO_BLOCK(nbp); 6030 nextblock = XFS_BUF_TO_BLOCK(nbp);
6027 nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib); 6031 nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
6028 xfs_trans_brelse(tp, nbp); 6032 xfs_trans_brelse(tp, nbp);
6029 } 6033 }
6030 6034
6031 /* Dive to the next level */ 6035 /* Dive to the next level */
6032 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); 6036 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
6033 bno = be64_to_cpu(*pp); 6037 bno = be64_to_cpu(*pp);
6034 if (unlikely((error = 6038 if (unlikely((error =
6035 xfs_bmap_count_tree(mp, tp, ifp, bno, level, count)) < 0)) { 6039 xfs_bmap_count_tree(mp, tp, ifp, bno, level, count)) < 0)) {
6036 xfs_trans_brelse(tp, bp); 6040 xfs_trans_brelse(tp, bp);
6037 XFS_ERROR_REPORT("xfs_bmap_count_tree(1)", 6041 XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
6038 XFS_ERRLEVEL_LOW, mp); 6042 XFS_ERRLEVEL_LOW, mp);
6039 return XFS_ERROR(EFSCORRUPTED); 6043 return XFS_ERROR(EFSCORRUPTED);
6040 } 6044 }
6041 xfs_trans_brelse(tp, bp); 6045 xfs_trans_brelse(tp, bp);
6042 } else { 6046 } else {
6043 /* count all level 1 nodes and their leaves */ 6047 /* count all level 1 nodes and their leaves */
6044 for (;;) { 6048 for (;;) {
6045 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); 6049 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
6046 numrecs = be16_to_cpu(block->bb_numrecs); 6050 numrecs = be16_to_cpu(block->bb_numrecs);
6047 xfs_bmap_disk_count_leaves(mp, block, numrecs, count); 6051 xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
6048 xfs_trans_brelse(tp, bp); 6052 xfs_trans_brelse(tp, bp);
6049 if (nextbno == NULLFSBLOCK) 6053 if (nextbno == NULLFSBLOCK)
6050 break; 6054 break;
6051 bno = nextbno; 6055 bno = nextbno;
6052 if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, 6056 if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
6053 XFS_BMAP_BTREE_REF))) 6057 XFS_BMAP_BTREE_REF)))
6054 return error; 6058 return error;
6055 *count += 1; 6059 *count += 1;
6056 block = XFS_BUF_TO_BLOCK(bp); 6060 block = XFS_BUF_TO_BLOCK(bp);
6057 } 6061 }
6058 } 6062 }
6059 return 0; 6063 return 0;
6060 } 6064 }
6061 6065
6062 /* 6066 /*
6063 * Count leaf blocks given a range of extent records. 6067 * Count leaf blocks given a range of extent records.
6064 */ 6068 */
6065 STATIC void 6069 STATIC void
6066 xfs_bmap_count_leaves( 6070 xfs_bmap_count_leaves(
6067 xfs_ifork_t *ifp, 6071 xfs_ifork_t *ifp,
6068 xfs_extnum_t idx, 6072 xfs_extnum_t idx,
6069 int numrecs, 6073 int numrecs,
6070 int *count) 6074 int *count)
6071 { 6075 {
6072 int b; 6076 int b;
6073 6077
6074 for (b = 0; b < numrecs; b++) { 6078 for (b = 0; b < numrecs; b++) {
6075 xfs_bmbt_rec_host_t *frp = xfs_iext_get_ext(ifp, idx + b); 6079 xfs_bmbt_rec_host_t *frp = xfs_iext_get_ext(ifp, idx + b);
6076 *count += xfs_bmbt_get_blockcount(frp); 6080 *count += xfs_bmbt_get_blockcount(frp);
6077 } 6081 }
6078 } 6082 }
6079 6083
6080 /* 6084 /*
6081 * Count leaf blocks given a range of extent records originally 6085 * Count leaf blocks given a range of extent records originally
6082 * in btree format. 6086 * in btree format.
6083 */ 6087 */
6084 STATIC void 6088 STATIC void
6085 xfs_bmap_disk_count_leaves( 6089 xfs_bmap_disk_count_leaves(
6086 struct xfs_mount *mp, 6090 struct xfs_mount *mp,
6087 struct xfs_btree_block *block, 6091 struct xfs_btree_block *block,
6088 int numrecs, 6092 int numrecs,
6089 int *count) 6093 int *count)
6090 { 6094 {
6091 int b; 6095 int b;
6092 xfs_bmbt_rec_t *frp; 6096 xfs_bmbt_rec_t *frp;
6093 6097
6094 for (b = 1; b <= numrecs; b++) { 6098 for (b = 1; b <= numrecs; b++) {
6095 frp = XFS_BMBT_REC_ADDR(mp, block, b); 6099 frp = XFS_BMBT_REC_ADDR(mp, block, b);
6096 *count += xfs_bmbt_disk_get_blockcount(frp); 6100 *count += xfs_bmbt_disk_get_blockcount(frp);
6097 } 6101 }
6098 } 6102 }
6099 6103
6100 /* 6104 /*
6101 * dead simple method of punching delalyed allocation blocks from a range in 6105 * dead simple method of punching delalyed allocation blocks from a range in
6102 * the inode. Walks a block at a time so will be slow, but is only executed in 6106 * the inode. Walks a block at a time so will be slow, but is only executed in
6103 * rare error cases so the overhead is not critical. This will alays punch out 6107 * rare error cases so the overhead is not critical. This will alays punch out
6104 * both the start and end blocks, even if the ranges only partially overlap 6108 * both the start and end blocks, even if the ranges only partially overlap
6105 * them, so it is up to the caller to ensure that partial blocks are not 6109 * them, so it is up to the caller to ensure that partial blocks are not
6106 * passed in. 6110 * passed in.
6107 */ 6111 */
6108 int 6112 int
6109 xfs_bmap_punch_delalloc_range( 6113 xfs_bmap_punch_delalloc_range(
6110 struct xfs_inode *ip, 6114 struct xfs_inode *ip,
6111 xfs_fileoff_t start_fsb, 6115 xfs_fileoff_t start_fsb,
6112 xfs_fileoff_t length) 6116 xfs_fileoff_t length)
6113 { 6117 {
6114 xfs_fileoff_t remaining = length; 6118 xfs_fileoff_t remaining = length;
6115 int error = 0; 6119 int error = 0;
6116 6120
6117 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 6121 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
6118 6122
6119 do { 6123 do {
6120 int done; 6124 int done;
6121 xfs_bmbt_irec_t imap; 6125 xfs_bmbt_irec_t imap;
6122 int nimaps = 1; 6126 int nimaps = 1;
6123 xfs_fsblock_t firstblock; 6127 xfs_fsblock_t firstblock;
6124 xfs_bmap_free_t flist; 6128 xfs_bmap_free_t flist;
6125 6129
6126 /* 6130 /*
6127 * Map the range first and check that it is a delalloc extent 6131 * Map the range first and check that it is a delalloc extent
6128 * before trying to unmap the range. Otherwise we will be 6132 * before trying to unmap the range. Otherwise we will be
6129 * trying to remove a real extent (which requires a 6133 * trying to remove a real extent (which requires a
6130 * transaction) or a hole, which is probably a bad idea... 6134 * transaction) or a hole, which is probably a bad idea...
6131 */ 6135 */
6132 error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps, 6136 error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps,
6133 XFS_BMAPI_ENTIRE); 6137 XFS_BMAPI_ENTIRE);
6134 6138
6135 if (error) { 6139 if (error) {
6136 /* something screwed, just bail */ 6140 /* something screwed, just bail */
6137 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { 6141 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
6138 xfs_alert(ip->i_mount, 6142 xfs_alert(ip->i_mount,
6139 "Failed delalloc mapping lookup ino %lld fsb %lld.", 6143 "Failed delalloc mapping lookup ino %lld fsb %lld.",
6140 ip->i_ino, start_fsb); 6144 ip->i_ino, start_fsb);
6141 } 6145 }
6142 break; 6146 break;
6143 } 6147 }
6144 if (!nimaps) { 6148 if (!nimaps) {
6145 /* nothing there */ 6149 /* nothing there */
6146 goto next_block; 6150 goto next_block;
6147 } 6151 }
6148 if (imap.br_startblock != DELAYSTARTBLOCK) { 6152 if (imap.br_startblock != DELAYSTARTBLOCK) {
6149 /* been converted, ignore */ 6153 /* been converted, ignore */
6150 goto next_block; 6154 goto next_block;
6151 } 6155 }
6152 WARN_ON(imap.br_blockcount == 0); 6156 WARN_ON(imap.br_blockcount == 0);
6153 6157
6154 /* 6158 /*
6155 * Note: while we initialise the firstblock/flist pair, they 6159 * Note: while we initialise the firstblock/flist pair, they
6156 * should never be used because blocks should never be 6160 * should never be used because blocks should never be
6157 * allocated or freed for a delalloc extent and hence we need 6161 * allocated or freed for a delalloc extent and hence we need
6158 * don't cancel or finish them after the xfs_bunmapi() call. 6162 * don't cancel or finish them after the xfs_bunmapi() call.
6159 */ 6163 */
6160 xfs_bmap_init(&flist, &firstblock); 6164 xfs_bmap_init(&flist, &firstblock);
6161 error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock, 6165 error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
6162 &flist, &done); 6166 &flist, &done);
6163 if (error) 6167 if (error)
6164 break; 6168 break;
6165 6169
6166 ASSERT(!flist.xbf_count && !flist.xbf_first); 6170 ASSERT(!flist.xbf_count && !flist.xbf_first);
6167 next_block: 6171 next_block:
6168 start_fsb++; 6172 start_fsb++;
6169 remaining--; 6173 remaining--;
6170 } while(remaining > 0); 6174 } while(remaining > 0);
6171 6175
6172 return error; 6176 return error;
6173 } 6177 }
6174 6178
6175 /* 6179 /*
6176 * Convert the given file system block to a disk block. We have to treat it 6180 * Convert the given file system block to a disk block. We have to treat it
6177 * differently based on whether the file is a real time file or not, because the 6181 * differently based on whether the file is a real time file or not, because the
6178 * bmap code does. 6182 * bmap code does.
6179 */ 6183 */
6180 xfs_daddr_t 6184 xfs_daddr_t
6181 xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb) 6185 xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
6182 { 6186 {
6183 return (XFS_IS_REALTIME_INODE(ip) ? \ 6187 return (XFS_IS_REALTIME_INODE(ip) ? \
6184 (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \ 6188 (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \
6185 XFS_FSB_TO_DADDR((ip)->i_mount, (fsb))); 6189 XFS_FSB_TO_DADDR((ip)->i_mount, (fsb)));
6186 } 6190 }
6187 6191
1 /* 1 /*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved. 3 * All Rights Reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as 6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 * 8 *
9 * This program is distributed in the hope that it would be useful, 9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License 14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation, 15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */ 17 */
18 #ifndef __XFS_BMAP_H__ 18 #ifndef __XFS_BMAP_H__
19 #define __XFS_BMAP_H__ 19 #define __XFS_BMAP_H__
20 20
21 struct getbmap; 21 struct getbmap;
22 struct xfs_bmbt_irec; 22 struct xfs_bmbt_irec;
23 struct xfs_ifork; 23 struct xfs_ifork;
24 struct xfs_inode; 24 struct xfs_inode;
25 struct xfs_mount; 25 struct xfs_mount;
26 struct xfs_trans; 26 struct xfs_trans;
27 27
28 extern kmem_zone_t *xfs_bmap_free_item_zone; 28 extern kmem_zone_t *xfs_bmap_free_item_zone;
29 29
30 /* 30 /*
31 * List of extents to be free "later". 31 * List of extents to be free "later".
32 * The list is kept sorted on xbf_startblock. 32 * The list is kept sorted on xbf_startblock.
33 */ 33 */
34 typedef struct xfs_bmap_free_item 34 typedef struct xfs_bmap_free_item
35 { 35 {
36 xfs_fsblock_t xbfi_startblock;/* starting fs block number */ 36 xfs_fsblock_t xbfi_startblock;/* starting fs block number */
37 xfs_extlen_t xbfi_blockcount;/* number of blocks in extent */ 37 xfs_extlen_t xbfi_blockcount;/* number of blocks in extent */
38 struct xfs_bmap_free_item *xbfi_next; /* link to next entry */ 38 struct xfs_bmap_free_item *xbfi_next; /* link to next entry */
39 } xfs_bmap_free_item_t; 39 } xfs_bmap_free_item_t;
40 40
41 /* 41 /*
42 * Header for free extent list. 42 * Header for free extent list.
43 * 43 *
44 * xbf_low is used by the allocator to activate the lowspace algorithm - 44 * xbf_low is used by the allocator to activate the lowspace algorithm -
45 * when free space is running low the extent allocator may choose to 45 * when free space is running low the extent allocator may choose to
46 * allocate an extent from an AG without leaving sufficient space for 46 * allocate an extent from an AG without leaving sufficient space for
47 * a btree split when inserting the new extent. In this case the allocator 47 * a btree split when inserting the new extent. In this case the allocator
48 * will enable the lowspace algorithm which is supposed to allow further 48 * will enable the lowspace algorithm which is supposed to allow further
49 * allocations (such as btree splits and newroots) to allocate from 49 * allocations (such as btree splits and newroots) to allocate from
50 * sequential AGs. In order to avoid locking AGs out of order the lowspace 50 * sequential AGs. In order to avoid locking AGs out of order the lowspace
51 * algorithm will start searching for free space from AG 0. If the correct 51 * algorithm will start searching for free space from AG 0. If the correct
52 * transaction reservations have been made then this algorithm will eventually 52 * transaction reservations have been made then this algorithm will eventually
53 * find all the space it needs. 53 * find all the space it needs.
54 */ 54 */
55 typedef struct xfs_bmap_free 55 typedef struct xfs_bmap_free
56 { 56 {
57 xfs_bmap_free_item_t *xbf_first; /* list of to-be-free extents */ 57 xfs_bmap_free_item_t *xbf_first; /* list of to-be-free extents */
58 int xbf_count; /* count of items on list */ 58 int xbf_count; /* count of items on list */
59 int xbf_low; /* alloc in low mode */ 59 int xbf_low; /* alloc in low mode */
60 } xfs_bmap_free_t; 60 } xfs_bmap_free_t;
61 61
62 #define XFS_BMAP_MAX_NMAP 4 62 #define XFS_BMAP_MAX_NMAP 4
63 63
64 /* 64 /*
65 * Flags for xfs_bmapi_* 65 * Flags for xfs_bmapi_*
66 */ 66 */
67 #define XFS_BMAPI_ENTIRE 0x001 /* return entire extent, not trimmed */ 67 #define XFS_BMAPI_ENTIRE 0x001 /* return entire extent, not trimmed */
68 #define XFS_BMAPI_METADATA 0x002 /* mapping metadata not user data */ 68 #define XFS_BMAPI_METADATA 0x002 /* mapping metadata not user data */
69 #define XFS_BMAPI_ATTRFORK 0x004 /* use attribute fork not data */ 69 #define XFS_BMAPI_ATTRFORK 0x004 /* use attribute fork not data */
70 #define XFS_BMAPI_PREALLOC 0x008 /* preallocation op: unwritten space */ 70 #define XFS_BMAPI_PREALLOC 0x008 /* preallocation op: unwritten space */
71 #define XFS_BMAPI_IGSTATE 0x010 /* Ignore state - */ 71 #define XFS_BMAPI_IGSTATE 0x010 /* Ignore state - */
72 /* combine contig. space */ 72 /* combine contig. space */
73 #define XFS_BMAPI_CONTIG 0x020 /* must allocate only one extent */ 73 #define XFS_BMAPI_CONTIG 0x020 /* must allocate only one extent */
74 /* 74 /*
75 * unwritten extent conversion - this needs write cache flushing and no additional 75 * unwritten extent conversion - this needs write cache flushing and no additional
76 * allocation alignments. When specified with XFS_BMAPI_PREALLOC it converts 76 * allocation alignments. When specified with XFS_BMAPI_PREALLOC it converts
77 * from written to unwritten, otherwise convert from unwritten to written. 77 * from written to unwritten, otherwise convert from unwritten to written.
78 */ 78 */
79 #define XFS_BMAPI_CONVERT 0x040 79 #define XFS_BMAPI_CONVERT 0x040
80 #define XFS_BMAPI_STACK_SWITCH 0x080
80 81
81 #define XFS_BMAPI_FLAGS \ 82 #define XFS_BMAPI_FLAGS \
82 { XFS_BMAPI_ENTIRE, "ENTIRE" }, \ 83 { XFS_BMAPI_ENTIRE, "ENTIRE" }, \
83 { XFS_BMAPI_METADATA, "METADATA" }, \ 84 { XFS_BMAPI_METADATA, "METADATA" }, \
84 { XFS_BMAPI_ATTRFORK, "ATTRFORK" }, \ 85 { XFS_BMAPI_ATTRFORK, "ATTRFORK" }, \
85 { XFS_BMAPI_PREALLOC, "PREALLOC" }, \ 86 { XFS_BMAPI_PREALLOC, "PREALLOC" }, \
86 { XFS_BMAPI_IGSTATE, "IGSTATE" }, \ 87 { XFS_BMAPI_IGSTATE, "IGSTATE" }, \
87 { XFS_BMAPI_CONTIG, "CONTIG" }, \ 88 { XFS_BMAPI_CONTIG, "CONTIG" }, \
88 { XFS_BMAPI_CONVERT, "CONVERT" } 89 { XFS_BMAPI_CONVERT, "CONVERT" }, \
90 { XFS_BMAPI_STACK_SWITCH, "STACK_SWITCH" }
89 91
90 92
91 static inline int xfs_bmapi_aflag(int w) 93 static inline int xfs_bmapi_aflag(int w)
92 { 94 {
93 return (w == XFS_ATTR_FORK ? XFS_BMAPI_ATTRFORK : 0); 95 return (w == XFS_ATTR_FORK ? XFS_BMAPI_ATTRFORK : 0);
94 } 96 }
95 97
96 /* 98 /*
97 * Special values for xfs_bmbt_irec_t br_startblock field. 99 * Special values for xfs_bmbt_irec_t br_startblock field.
98 */ 100 */
99 #define DELAYSTARTBLOCK ((xfs_fsblock_t)-1LL) 101 #define DELAYSTARTBLOCK ((xfs_fsblock_t)-1LL)
100 #define HOLESTARTBLOCK ((xfs_fsblock_t)-2LL) 102 #define HOLESTARTBLOCK ((xfs_fsblock_t)-2LL)
101 103
102 static inline void xfs_bmap_init(xfs_bmap_free_t *flp, xfs_fsblock_t *fbp) 104 static inline void xfs_bmap_init(xfs_bmap_free_t *flp, xfs_fsblock_t *fbp)
103 { 105 {
104 ((flp)->xbf_first = NULL, (flp)->xbf_count = 0, \ 106 ((flp)->xbf_first = NULL, (flp)->xbf_count = 0, \
105 (flp)->xbf_low = 0, *(fbp) = NULLFSBLOCK); 107 (flp)->xbf_low = 0, *(fbp) = NULLFSBLOCK);
106 } 108 }
107 109
108 /* 110 /*
109 * Argument structure for xfs_bmap_alloc. 111 * Argument structure for xfs_bmap_alloc.
110 */ 112 */
111 typedef struct xfs_bmalloca { 113 typedef struct xfs_bmalloca {
112 xfs_fsblock_t *firstblock; /* i/o first block allocated */ 114 xfs_fsblock_t *firstblock; /* i/o first block allocated */
113 struct xfs_bmap_free *flist; /* bmap freelist */ 115 struct xfs_bmap_free *flist; /* bmap freelist */
114 struct xfs_trans *tp; /* transaction pointer */ 116 struct xfs_trans *tp; /* transaction pointer */
115 struct xfs_inode *ip; /* incore inode pointer */ 117 struct xfs_inode *ip; /* incore inode pointer */
116 struct xfs_bmbt_irec prev; /* extent before the new one */ 118 struct xfs_bmbt_irec prev; /* extent before the new one */
117 struct xfs_bmbt_irec got; /* extent after, or delayed */ 119 struct xfs_bmbt_irec got; /* extent after, or delayed */
118 120
119 xfs_fileoff_t offset; /* offset in file filling in */ 121 xfs_fileoff_t offset; /* offset in file filling in */
120 xfs_extlen_t length; /* i/o length asked/allocated */ 122 xfs_extlen_t length; /* i/o length asked/allocated */
121 xfs_fsblock_t blkno; /* starting block of new extent */ 123 xfs_fsblock_t blkno; /* starting block of new extent */
122 124
123 struct xfs_btree_cur *cur; /* btree cursor */ 125 struct xfs_btree_cur *cur; /* btree cursor */
124 xfs_extnum_t idx; /* current extent index */ 126 xfs_extnum_t idx; /* current extent index */
125 int nallocs;/* number of extents alloc'd */ 127 int nallocs;/* number of extents alloc'd */
126 int logflags;/* flags for transaction logging */ 128 int logflags;/* flags for transaction logging */
127 129
128 xfs_extlen_t total; /* total blocks needed for xaction */ 130 xfs_extlen_t total; /* total blocks needed for xaction */
129 xfs_extlen_t minlen; /* minimum allocation size (blocks) */ 131 xfs_extlen_t minlen; /* minimum allocation size (blocks) */
130 xfs_extlen_t minleft; /* amount must be left after alloc */ 132 xfs_extlen_t minleft; /* amount must be left after alloc */
131 char eof; /* set if allocating past last extent */ 133 char eof; /* set if allocating past last extent */
132 char wasdel; /* replacing a delayed allocation */ 134 char wasdel; /* replacing a delayed allocation */
133 char userdata;/* set if is user data */ 135 char userdata;/* set if is user data */
134 char aeof; /* allocated space at eof */ 136 char aeof; /* allocated space at eof */
135 char conv; /* overwriting unwritten extents */ 137 char conv; /* overwriting unwritten extents */
138 char stack_switch;
136 } xfs_bmalloca_t; 139 } xfs_bmalloca_t;
137 140
138 /* 141 /*
139 * Flags for xfs_bmap_add_extent*. 142 * Flags for xfs_bmap_add_extent*.
140 */ 143 */
141 #define BMAP_LEFT_CONTIG (1 << 0) 144 #define BMAP_LEFT_CONTIG (1 << 0)
142 #define BMAP_RIGHT_CONTIG (1 << 1) 145 #define BMAP_RIGHT_CONTIG (1 << 1)
143 #define BMAP_LEFT_FILLING (1 << 2) 146 #define BMAP_LEFT_FILLING (1 << 2)
144 #define BMAP_RIGHT_FILLING (1 << 3) 147 #define BMAP_RIGHT_FILLING (1 << 3)
145 #define BMAP_LEFT_DELAY (1 << 4) 148 #define BMAP_LEFT_DELAY (1 << 4)
146 #define BMAP_RIGHT_DELAY (1 << 5) 149 #define BMAP_RIGHT_DELAY (1 << 5)
147 #define BMAP_LEFT_VALID (1 << 6) 150 #define BMAP_LEFT_VALID (1 << 6)
148 #define BMAP_RIGHT_VALID (1 << 7) 151 #define BMAP_RIGHT_VALID (1 << 7)
149 #define BMAP_ATTRFORK (1 << 8) 152 #define BMAP_ATTRFORK (1 << 8)
150 153
151 #define XFS_BMAP_EXT_FLAGS \ 154 #define XFS_BMAP_EXT_FLAGS \
152 { BMAP_LEFT_CONTIG, "LC" }, \ 155 { BMAP_LEFT_CONTIG, "LC" }, \
153 { BMAP_RIGHT_CONTIG, "RC" }, \ 156 { BMAP_RIGHT_CONTIG, "RC" }, \
154 { BMAP_LEFT_FILLING, "LF" }, \ 157 { BMAP_LEFT_FILLING, "LF" }, \
155 { BMAP_RIGHT_FILLING, "RF" }, \ 158 { BMAP_RIGHT_FILLING, "RF" }, \
156 { BMAP_ATTRFORK, "ATTR" } 159 { BMAP_ATTRFORK, "ATTR" }
157 160
158 #if defined(__KERNEL) && defined(DEBUG) 161 #if defined(__KERNEL) && defined(DEBUG)
159 void xfs_bmap_trace_exlist(struct xfs_inode *ip, xfs_extnum_t cnt, 162 void xfs_bmap_trace_exlist(struct xfs_inode *ip, xfs_extnum_t cnt,
160 int whichfork, unsigned long caller_ip); 163 int whichfork, unsigned long caller_ip);
161 #define XFS_BMAP_TRACE_EXLIST(ip,c,w) \ 164 #define XFS_BMAP_TRACE_EXLIST(ip,c,w) \
162 xfs_bmap_trace_exlist(ip,c,w, _THIS_IP_) 165 xfs_bmap_trace_exlist(ip,c,w, _THIS_IP_)
163 #else 166 #else
164 #define XFS_BMAP_TRACE_EXLIST(ip,c,w) 167 #define XFS_BMAP_TRACE_EXLIST(ip,c,w)
165 #endif 168 #endif
166 169
167 int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd); 170 int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
168 void xfs_bmap_add_free(xfs_fsblock_t bno, xfs_filblks_t len, 171 void xfs_bmap_add_free(xfs_fsblock_t bno, xfs_filblks_t len,
169 struct xfs_bmap_free *flist, struct xfs_mount *mp); 172 struct xfs_bmap_free *flist, struct xfs_mount *mp);
170 void xfs_bmap_cancel(struct xfs_bmap_free *flist); 173 void xfs_bmap_cancel(struct xfs_bmap_free *flist);
171 void xfs_bmap_compute_maxlevels(struct xfs_mount *mp, int whichfork); 174 void xfs_bmap_compute_maxlevels(struct xfs_mount *mp, int whichfork);
172 int xfs_bmap_first_unused(struct xfs_trans *tp, struct xfs_inode *ip, 175 int xfs_bmap_first_unused(struct xfs_trans *tp, struct xfs_inode *ip,
173 xfs_extlen_t len, xfs_fileoff_t *unused, int whichfork); 176 xfs_extlen_t len, xfs_fileoff_t *unused, int whichfork);
174 int xfs_bmap_last_before(struct xfs_trans *tp, struct xfs_inode *ip, 177 int xfs_bmap_last_before(struct xfs_trans *tp, struct xfs_inode *ip,
175 xfs_fileoff_t *last_block, int whichfork); 178 xfs_fileoff_t *last_block, int whichfork);
176 int xfs_bmap_last_offset(struct xfs_trans *tp, struct xfs_inode *ip, 179 int xfs_bmap_last_offset(struct xfs_trans *tp, struct xfs_inode *ip,
177 xfs_fileoff_t *unused, int whichfork); 180 xfs_fileoff_t *unused, int whichfork);
178 int xfs_bmap_one_block(struct xfs_inode *ip, int whichfork); 181 int xfs_bmap_one_block(struct xfs_inode *ip, int whichfork);
179 int xfs_bmap_read_extents(struct xfs_trans *tp, struct xfs_inode *ip, 182 int xfs_bmap_read_extents(struct xfs_trans *tp, struct xfs_inode *ip,
180 int whichfork); 183 int whichfork);
181 int xfs_bmapi_read(struct xfs_inode *ip, xfs_fileoff_t bno, 184 int xfs_bmapi_read(struct xfs_inode *ip, xfs_fileoff_t bno,
182 xfs_filblks_t len, struct xfs_bmbt_irec *mval, 185 xfs_filblks_t len, struct xfs_bmbt_irec *mval,
183 int *nmap, int flags); 186 int *nmap, int flags);
184 int xfs_bmapi_delay(struct xfs_inode *ip, xfs_fileoff_t bno, 187 int xfs_bmapi_delay(struct xfs_inode *ip, xfs_fileoff_t bno,
185 xfs_filblks_t len, struct xfs_bmbt_irec *mval, 188 xfs_filblks_t len, struct xfs_bmbt_irec *mval,
186 int *nmap, int flags); 189 int *nmap, int flags);
187 int xfs_bmapi_write(struct xfs_trans *tp, struct xfs_inode *ip, 190 int xfs_bmapi_write(struct xfs_trans *tp, struct xfs_inode *ip,
188 xfs_fileoff_t bno, xfs_filblks_t len, int flags, 191 xfs_fileoff_t bno, xfs_filblks_t len, int flags,
189 xfs_fsblock_t *firstblock, xfs_extlen_t total, 192 xfs_fsblock_t *firstblock, xfs_extlen_t total,
190 struct xfs_bmbt_irec *mval, int *nmap, 193 struct xfs_bmbt_irec *mval, int *nmap,
191 struct xfs_bmap_free *flist); 194 struct xfs_bmap_free *flist);
192 int xfs_bunmapi(struct xfs_trans *tp, struct xfs_inode *ip, 195 int xfs_bunmapi(struct xfs_trans *tp, struct xfs_inode *ip,
193 xfs_fileoff_t bno, xfs_filblks_t len, int flags, 196 xfs_fileoff_t bno, xfs_filblks_t len, int flags,
194 xfs_extnum_t nexts, xfs_fsblock_t *firstblock, 197 xfs_extnum_t nexts, xfs_fsblock_t *firstblock,
195 struct xfs_bmap_free *flist, int *done); 198 struct xfs_bmap_free *flist, int *done);
196 int xfs_check_nostate_extents(struct xfs_ifork *ifp, xfs_extnum_t idx, 199 int xfs_check_nostate_extents(struct xfs_ifork *ifp, xfs_extnum_t idx,
197 xfs_extnum_t num); 200 xfs_extnum_t num);
198 uint xfs_default_attroffset(struct xfs_inode *ip); 201 uint xfs_default_attroffset(struct xfs_inode *ip);
199 202
200 #ifdef __KERNEL__ 203 #ifdef __KERNEL__
201 /* bmap to userspace formatter - copy to user & advance pointer */ 204 /* bmap to userspace formatter - copy to user & advance pointer */
202 typedef int (*xfs_bmap_format_t)(void **, struct getbmapx *, int *); 205 typedef int (*xfs_bmap_format_t)(void **, struct getbmapx *, int *);
203 206
204 int xfs_bmap_finish(struct xfs_trans **tp, struct xfs_bmap_free *flist, 207 int xfs_bmap_finish(struct xfs_trans **tp, struct xfs_bmap_free *flist,
205 int *committed); 208 int *committed);
206 int xfs_getbmap(struct xfs_inode *ip, struct getbmapx *bmv, 209 int xfs_getbmap(struct xfs_inode *ip, struct getbmapx *bmv,
207 xfs_bmap_format_t formatter, void *arg); 210 xfs_bmap_format_t formatter, void *arg);
208 int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff, 211 int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff,
209 int whichfork, int *eof); 212 int whichfork, int *eof);
210 int xfs_bmap_count_blocks(struct xfs_trans *tp, struct xfs_inode *ip, 213 int xfs_bmap_count_blocks(struct xfs_trans *tp, struct xfs_inode *ip,
211 int whichfork, int *count); 214 int whichfork, int *count);
212 int xfs_bmap_punch_delalloc_range(struct xfs_inode *ip, 215 int xfs_bmap_punch_delalloc_range(struct xfs_inode *ip,
213 xfs_fileoff_t start_fsb, xfs_fileoff_t length); 216 xfs_fileoff_t start_fsb, xfs_fileoff_t length);
214 217
215 xfs_daddr_t xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb); 218 xfs_daddr_t xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb);
216 219
217 #endif /* __KERNEL__ */ 220 #endif /* __KERNEL__ */
218 221
219 #endif /* __XFS_BMAP_H__ */ 222 #endif /* __XFS_BMAP_H__ */
220 223
1 /* 1 /*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved. 3 * All Rights Reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as 6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 * 8 *
9 * This program is distributed in the hope that it would be useful, 9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License 14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation, 15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */ 17 */
18 #include "xfs.h" 18 #include "xfs.h"
19 #include "xfs_fs.h" 19 #include "xfs_fs.h"
20 #include "xfs_log.h" 20 #include "xfs_log.h"
21 #include "xfs_trans.h" 21 #include "xfs_trans.h"
22 #include "xfs_sb.h" 22 #include "xfs_sb.h"
23 #include "xfs_ag.h" 23 #include "xfs_ag.h"
24 #include "xfs_alloc.h" 24 #include "xfs_alloc.h"
25 #include "xfs_quota.h" 25 #include "xfs_quota.h"
26 #include "xfs_mount.h" 26 #include "xfs_mount.h"
27 #include "xfs_bmap_btree.h" 27 #include "xfs_bmap_btree.h"
28 #include "xfs_alloc_btree.h" 28 #include "xfs_alloc_btree.h"
29 #include "xfs_ialloc_btree.h" 29 #include "xfs_ialloc_btree.h"
30 #include "xfs_dinode.h" 30 #include "xfs_dinode.h"
31 #include "xfs_inode.h" 31 #include "xfs_inode.h"
32 #include "xfs_inode_item.h" 32 #include "xfs_inode_item.h"
33 #include "xfs_btree.h" 33 #include "xfs_btree.h"
34 #include "xfs_bmap.h" 34 #include "xfs_bmap.h"
35 #include "xfs_rtalloc.h" 35 #include "xfs_rtalloc.h"
36 #include "xfs_error.h" 36 #include "xfs_error.h"
37 #include "xfs_itable.h" 37 #include "xfs_itable.h"
38 #include "xfs_attr.h" 38 #include "xfs_attr.h"
39 #include "xfs_buf_item.h" 39 #include "xfs_buf_item.h"
40 #include "xfs_trans_space.h" 40 #include "xfs_trans_space.h"
41 #include "xfs_utils.h" 41 #include "xfs_utils.h"
42 #include "xfs_iomap.h" 42 #include "xfs_iomap.h"
43 #include "xfs_trace.h" 43 #include "xfs_trace.h"
44 44
45 45
46 #define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \ 46 #define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \
47 << mp->m_writeio_log) 47 << mp->m_writeio_log)
48 #define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP 48 #define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP
49 49
50 STATIC int 50 STATIC int
51 xfs_iomap_eof_align_last_fsb( 51 xfs_iomap_eof_align_last_fsb(
52 xfs_mount_t *mp, 52 xfs_mount_t *mp,
53 xfs_inode_t *ip, 53 xfs_inode_t *ip,
54 xfs_extlen_t extsize, 54 xfs_extlen_t extsize,
55 xfs_fileoff_t *last_fsb) 55 xfs_fileoff_t *last_fsb)
56 { 56 {
57 xfs_fileoff_t new_last_fsb = 0; 57 xfs_fileoff_t new_last_fsb = 0;
58 xfs_extlen_t align = 0; 58 xfs_extlen_t align = 0;
59 int eof, error; 59 int eof, error;
60 60
61 if (!XFS_IS_REALTIME_INODE(ip)) { 61 if (!XFS_IS_REALTIME_INODE(ip)) {
62 /* 62 /*
63 * Round up the allocation request to a stripe unit 63 * Round up the allocation request to a stripe unit
64 * (m_dalign) boundary if the file size is >= stripe unit 64 * (m_dalign) boundary if the file size is >= stripe unit
65 * size, and we are allocating past the allocation eof. 65 * size, and we are allocating past the allocation eof.
66 * 66 *
67 * If mounted with the "-o swalloc" option the alignment is 67 * If mounted with the "-o swalloc" option the alignment is
68 * increased from the strip unit size to the stripe width. 68 * increased from the strip unit size to the stripe width.
69 */ 69 */
70 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC)) 70 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
71 align = mp->m_swidth; 71 align = mp->m_swidth;
72 else if (mp->m_dalign) 72 else if (mp->m_dalign)
73 align = mp->m_dalign; 73 align = mp->m_dalign;
74 74
75 if (align && XFS_ISIZE(ip) >= XFS_FSB_TO_B(mp, align)) 75 if (align && XFS_ISIZE(ip) >= XFS_FSB_TO_B(mp, align))
76 new_last_fsb = roundup_64(*last_fsb, align); 76 new_last_fsb = roundup_64(*last_fsb, align);
77 } 77 }
78 78
79 /* 79 /*
80 * Always round up the allocation request to an extent boundary 80 * Always round up the allocation request to an extent boundary
81 * (when file on a real-time subvolume or has di_extsize hint). 81 * (when file on a real-time subvolume or has di_extsize hint).
82 */ 82 */
83 if (extsize) { 83 if (extsize) {
84 if (new_last_fsb) 84 if (new_last_fsb)
85 align = roundup_64(new_last_fsb, extsize); 85 align = roundup_64(new_last_fsb, extsize);
86 else 86 else
87 align = extsize; 87 align = extsize;
88 new_last_fsb = roundup_64(*last_fsb, align); 88 new_last_fsb = roundup_64(*last_fsb, align);
89 } 89 }
90 90
91 if (new_last_fsb) { 91 if (new_last_fsb) {
92 error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof); 92 error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof);
93 if (error) 93 if (error)
94 return error; 94 return error;
95 if (eof) 95 if (eof)
96 *last_fsb = new_last_fsb; 96 *last_fsb = new_last_fsb;
97 } 97 }
98 return 0; 98 return 0;
99 } 99 }
100 100
101 STATIC int 101 STATIC int
102 xfs_alert_fsblock_zero( 102 xfs_alert_fsblock_zero(
103 xfs_inode_t *ip, 103 xfs_inode_t *ip,
104 xfs_bmbt_irec_t *imap) 104 xfs_bmbt_irec_t *imap)
105 { 105 {
106 xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO, 106 xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
107 "Access to block zero in inode %llu " 107 "Access to block zero in inode %llu "
108 "start_block: %llx start_off: %llx " 108 "start_block: %llx start_off: %llx "
109 "blkcnt: %llx extent-state: %x\n", 109 "blkcnt: %llx extent-state: %x\n",
110 (unsigned long long)ip->i_ino, 110 (unsigned long long)ip->i_ino,
111 (unsigned long long)imap->br_startblock, 111 (unsigned long long)imap->br_startblock,
112 (unsigned long long)imap->br_startoff, 112 (unsigned long long)imap->br_startoff,
113 (unsigned long long)imap->br_blockcount, 113 (unsigned long long)imap->br_blockcount,
114 imap->br_state); 114 imap->br_state);
115 return EFSCORRUPTED; 115 return EFSCORRUPTED;
116 } 116 }
117 117
118 int 118 int
119 xfs_iomap_write_direct( 119 xfs_iomap_write_direct(
120 xfs_inode_t *ip, 120 xfs_inode_t *ip,
121 xfs_off_t offset, 121 xfs_off_t offset,
122 size_t count, 122 size_t count,
123 xfs_bmbt_irec_t *imap, 123 xfs_bmbt_irec_t *imap,
124 int nmaps) 124 int nmaps)
125 { 125 {
126 xfs_mount_t *mp = ip->i_mount; 126 xfs_mount_t *mp = ip->i_mount;
127 xfs_fileoff_t offset_fsb; 127 xfs_fileoff_t offset_fsb;
128 xfs_fileoff_t last_fsb; 128 xfs_fileoff_t last_fsb;
129 xfs_filblks_t count_fsb, resaligned; 129 xfs_filblks_t count_fsb, resaligned;
130 xfs_fsblock_t firstfsb; 130 xfs_fsblock_t firstfsb;
131 xfs_extlen_t extsz, temp; 131 xfs_extlen_t extsz, temp;
132 int nimaps; 132 int nimaps;
133 int bmapi_flag; 133 int bmapi_flag;
134 int quota_flag; 134 int quota_flag;
135 int rt; 135 int rt;
136 xfs_trans_t *tp; 136 xfs_trans_t *tp;
137 xfs_bmap_free_t free_list; 137 xfs_bmap_free_t free_list;
138 uint qblocks, resblks, resrtextents; 138 uint qblocks, resblks, resrtextents;
139 int committed; 139 int committed;
140 int error; 140 int error;
141 141
142 error = xfs_qm_dqattach(ip, 0); 142 error = xfs_qm_dqattach(ip, 0);
143 if (error) 143 if (error)
144 return XFS_ERROR(error); 144 return XFS_ERROR(error);
145 145
146 rt = XFS_IS_REALTIME_INODE(ip); 146 rt = XFS_IS_REALTIME_INODE(ip);
147 extsz = xfs_get_extsz_hint(ip); 147 extsz = xfs_get_extsz_hint(ip);
148 148
149 offset_fsb = XFS_B_TO_FSBT(mp, offset); 149 offset_fsb = XFS_B_TO_FSBT(mp, offset);
150 last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); 150 last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
151 if ((offset + count) > XFS_ISIZE(ip)) { 151 if ((offset + count) > XFS_ISIZE(ip)) {
152 error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb); 152 error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb);
153 if (error) 153 if (error)
154 return XFS_ERROR(error); 154 return XFS_ERROR(error);
155 } else { 155 } else {
156 if (nmaps && (imap->br_startblock == HOLESTARTBLOCK)) 156 if (nmaps && (imap->br_startblock == HOLESTARTBLOCK))
157 last_fsb = MIN(last_fsb, (xfs_fileoff_t) 157 last_fsb = MIN(last_fsb, (xfs_fileoff_t)
158 imap->br_blockcount + 158 imap->br_blockcount +
159 imap->br_startoff); 159 imap->br_startoff);
160 } 160 }
161 count_fsb = last_fsb - offset_fsb; 161 count_fsb = last_fsb - offset_fsb;
162 ASSERT(count_fsb > 0); 162 ASSERT(count_fsb > 0);
163 163
164 resaligned = count_fsb; 164 resaligned = count_fsb;
165 if (unlikely(extsz)) { 165 if (unlikely(extsz)) {
166 if ((temp = do_mod(offset_fsb, extsz))) 166 if ((temp = do_mod(offset_fsb, extsz)))
167 resaligned += temp; 167 resaligned += temp;
168 if ((temp = do_mod(resaligned, extsz))) 168 if ((temp = do_mod(resaligned, extsz)))
169 resaligned += extsz - temp; 169 resaligned += extsz - temp;
170 } 170 }
171 171
172 if (unlikely(rt)) { 172 if (unlikely(rt)) {
173 resrtextents = qblocks = resaligned; 173 resrtextents = qblocks = resaligned;
174 resrtextents /= mp->m_sb.sb_rextsize; 174 resrtextents /= mp->m_sb.sb_rextsize;
175 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); 175 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
176 quota_flag = XFS_QMOPT_RES_RTBLKS; 176 quota_flag = XFS_QMOPT_RES_RTBLKS;
177 } else { 177 } else {
178 resrtextents = 0; 178 resrtextents = 0;
179 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned); 179 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
180 quota_flag = XFS_QMOPT_RES_REGBLKS; 180 quota_flag = XFS_QMOPT_RES_REGBLKS;
181 } 181 }
182 182
183 /* 183 /*
184 * Allocate and setup the transaction 184 * Allocate and setup the transaction
185 */ 185 */
186 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT); 186 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
187 error = xfs_trans_reserve(tp, resblks, 187 error = xfs_trans_reserve(tp, resblks,
188 XFS_WRITE_LOG_RES(mp), resrtextents, 188 XFS_WRITE_LOG_RES(mp), resrtextents,
189 XFS_TRANS_PERM_LOG_RES, 189 XFS_TRANS_PERM_LOG_RES,
190 XFS_WRITE_LOG_COUNT); 190 XFS_WRITE_LOG_COUNT);
191 /* 191 /*
192 * Check for running out of space, note: need lock to return 192 * Check for running out of space, note: need lock to return
193 */ 193 */
194 if (error) { 194 if (error) {
195 xfs_trans_cancel(tp, 0); 195 xfs_trans_cancel(tp, 0);
196 return XFS_ERROR(error); 196 return XFS_ERROR(error);
197 } 197 }
198 198
199 xfs_ilock(ip, XFS_ILOCK_EXCL); 199 xfs_ilock(ip, XFS_ILOCK_EXCL);
200 200
201 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag); 201 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
202 if (error) 202 if (error)
203 goto out_trans_cancel; 203 goto out_trans_cancel;
204 204
205 xfs_trans_ijoin(tp, ip, 0); 205 xfs_trans_ijoin(tp, ip, 0);
206 206
207 bmapi_flag = 0; 207 bmapi_flag = 0;
208 if (offset < XFS_ISIZE(ip) || extsz) 208 if (offset < XFS_ISIZE(ip) || extsz)
209 bmapi_flag |= XFS_BMAPI_PREALLOC; 209 bmapi_flag |= XFS_BMAPI_PREALLOC;
210 210
211 /* 211 /*
212 * From this point onwards we overwrite the imap pointer that the 212 * From this point onwards we overwrite the imap pointer that the
213 * caller gave to us. 213 * caller gave to us.
214 */ 214 */
215 xfs_bmap_init(&free_list, &firstfsb); 215 xfs_bmap_init(&free_list, &firstfsb);
216 nimaps = 1; 216 nimaps = 1;
217 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, bmapi_flag, 217 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, bmapi_flag,
218 &firstfsb, 0, imap, &nimaps, &free_list); 218 &firstfsb, 0, imap, &nimaps, &free_list);
219 if (error) 219 if (error)
220 goto out_bmap_cancel; 220 goto out_bmap_cancel;
221 221
222 /* 222 /*
223 * Complete the transaction 223 * Complete the transaction
224 */ 224 */
225 error = xfs_bmap_finish(&tp, &free_list, &committed); 225 error = xfs_bmap_finish(&tp, &free_list, &committed);
226 if (error) 226 if (error)
227 goto out_bmap_cancel; 227 goto out_bmap_cancel;
228 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); 228 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
229 if (error) 229 if (error)
230 goto out_unlock; 230 goto out_unlock;
231 231
232 /* 232 /*
233 * Copy any maps to caller's array and return any error. 233 * Copy any maps to caller's array and return any error.
234 */ 234 */
235 if (nimaps == 0) { 235 if (nimaps == 0) {
236 error = XFS_ERROR(ENOSPC); 236 error = XFS_ERROR(ENOSPC);
237 goto out_unlock; 237 goto out_unlock;
238 } 238 }
239 239
240 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) 240 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip)))
241 error = xfs_alert_fsblock_zero(ip, imap); 241 error = xfs_alert_fsblock_zero(ip, imap);
242 242
243 out_unlock: 243 out_unlock:
244 xfs_iunlock(ip, XFS_ILOCK_EXCL); 244 xfs_iunlock(ip, XFS_ILOCK_EXCL);
245 return error; 245 return error;
246 246
247 out_bmap_cancel: 247 out_bmap_cancel:
248 xfs_bmap_cancel(&free_list); 248 xfs_bmap_cancel(&free_list);
249 xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag); 249 xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
250 out_trans_cancel: 250 out_trans_cancel:
251 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); 251 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
252 goto out_unlock; 252 goto out_unlock;
253 } 253 }
254 254
255 /* 255 /*
256 * If the caller is doing a write at the end of the file, then extend the 256 * If the caller is doing a write at the end of the file, then extend the
257 * allocation out to the file system's write iosize. We clean up any extra 257 * allocation out to the file system's write iosize. We clean up any extra
258 * space left over when the file is closed in xfs_inactive(). 258 * space left over when the file is closed in xfs_inactive().
259 * 259 *
260 * If we find we already have delalloc preallocation beyond EOF, don't do more 260 * If we find we already have delalloc preallocation beyond EOF, don't do more
261 * preallocation as it it not needed. 261 * preallocation as it it not needed.
262 */ 262 */
263 STATIC int 263 STATIC int
264 xfs_iomap_eof_want_preallocate( 264 xfs_iomap_eof_want_preallocate(
265 xfs_mount_t *mp, 265 xfs_mount_t *mp,
266 xfs_inode_t *ip, 266 xfs_inode_t *ip,
267 xfs_off_t offset, 267 xfs_off_t offset,
268 size_t count, 268 size_t count,
269 xfs_bmbt_irec_t *imap, 269 xfs_bmbt_irec_t *imap,
270 int nimaps, 270 int nimaps,
271 int *prealloc) 271 int *prealloc)
272 { 272 {
273 xfs_fileoff_t start_fsb; 273 xfs_fileoff_t start_fsb;
274 xfs_filblks_t count_fsb; 274 xfs_filblks_t count_fsb;
275 xfs_fsblock_t firstblock; 275 xfs_fsblock_t firstblock;
276 int n, error, imaps; 276 int n, error, imaps;
277 int found_delalloc = 0; 277 int found_delalloc = 0;
278 278
279 *prealloc = 0; 279 *prealloc = 0;
280 if (offset + count <= XFS_ISIZE(ip)) 280 if (offset + count <= XFS_ISIZE(ip))
281 return 0; 281 return 0;
282 282
283 /* 283 /*
284 * If there are any real blocks past eof, then don't 284 * If there are any real blocks past eof, then don't
285 * do any speculative allocation. 285 * do any speculative allocation.
286 */ 286 */
287 start_fsb = XFS_B_TO_FSBT(mp, ((xfs_ufsize_t)(offset + count - 1))); 287 start_fsb = XFS_B_TO_FSBT(mp, ((xfs_ufsize_t)(offset + count - 1)));
288 count_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); 288 count_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
289 while (count_fsb > 0) { 289 while (count_fsb > 0) {
290 imaps = nimaps; 290 imaps = nimaps;
291 firstblock = NULLFSBLOCK; 291 firstblock = NULLFSBLOCK;
292 error = xfs_bmapi_read(ip, start_fsb, count_fsb, imap, &imaps, 292 error = xfs_bmapi_read(ip, start_fsb, count_fsb, imap, &imaps,
293 0); 293 0);
294 if (error) 294 if (error)
295 return error; 295 return error;
296 for (n = 0; n < imaps; n++) { 296 for (n = 0; n < imaps; n++) {
297 if ((imap[n].br_startblock != HOLESTARTBLOCK) && 297 if ((imap[n].br_startblock != HOLESTARTBLOCK) &&
298 (imap[n].br_startblock != DELAYSTARTBLOCK)) 298 (imap[n].br_startblock != DELAYSTARTBLOCK))
299 return 0; 299 return 0;
300 start_fsb += imap[n].br_blockcount; 300 start_fsb += imap[n].br_blockcount;
301 count_fsb -= imap[n].br_blockcount; 301 count_fsb -= imap[n].br_blockcount;
302 302
303 if (imap[n].br_startblock == DELAYSTARTBLOCK) 303 if (imap[n].br_startblock == DELAYSTARTBLOCK)
304 found_delalloc = 1; 304 found_delalloc = 1;
305 } 305 }
306 } 306 }
307 if (!found_delalloc) 307 if (!found_delalloc)
308 *prealloc = 1; 308 *prealloc = 1;
309 return 0; 309 return 0;
310 } 310 }
311 311
312 /* 312 /*
313 * If we don't have a user specified preallocation size, dynamically increase 313 * If we don't have a user specified preallocation size, dynamically increase
314 * the preallocation size as the size of the file grows. Cap the maximum size 314 * the preallocation size as the size of the file grows. Cap the maximum size
315 * at a single extent or less if the filesystem is near full. The closer the 315 * at a single extent or less if the filesystem is near full. The closer the
316 * filesystem is to full, the smaller the maximum prealocation. 316 * filesystem is to full, the smaller the maximum prealocation.
317 */ 317 */
318 STATIC xfs_fsblock_t 318 STATIC xfs_fsblock_t
319 xfs_iomap_prealloc_size( 319 xfs_iomap_prealloc_size(
320 struct xfs_mount *mp, 320 struct xfs_mount *mp,
321 struct xfs_inode *ip) 321 struct xfs_inode *ip)
322 { 322 {
323 xfs_fsblock_t alloc_blocks = 0; 323 xfs_fsblock_t alloc_blocks = 0;
324 324
325 if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) { 325 if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) {
326 int shift = 0; 326 int shift = 0;
327 int64_t freesp; 327 int64_t freesp;
328 328
329 /* 329 /*
330 * rounddown_pow_of_two() returns an undefined result 330 * rounddown_pow_of_two() returns an undefined result
331 * if we pass in alloc_blocks = 0. Hence the "+ 1" to 331 * if we pass in alloc_blocks = 0. Hence the "+ 1" to
332 * ensure we always pass in a non-zero value. 332 * ensure we always pass in a non-zero value.
333 */ 333 */
334 alloc_blocks = XFS_B_TO_FSB(mp, XFS_ISIZE(ip)) + 1; 334 alloc_blocks = XFS_B_TO_FSB(mp, XFS_ISIZE(ip)) + 1;
335 alloc_blocks = XFS_FILEOFF_MIN(MAXEXTLEN, 335 alloc_blocks = XFS_FILEOFF_MIN(MAXEXTLEN,
336 rounddown_pow_of_two(alloc_blocks)); 336 rounddown_pow_of_two(alloc_blocks));
337 337
338 xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT); 338 xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT);
339 freesp = mp->m_sb.sb_fdblocks; 339 freesp = mp->m_sb.sb_fdblocks;
340 if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) { 340 if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) {
341 shift = 2; 341 shift = 2;
342 if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT]) 342 if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT])
343 shift++; 343 shift++;
344 if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT]) 344 if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT])
345 shift++; 345 shift++;
346 if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT]) 346 if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT])
347 shift++; 347 shift++;
348 if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT]) 348 if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT])
349 shift++; 349 shift++;
350 } 350 }
351 if (shift) 351 if (shift)
352 alloc_blocks >>= shift; 352 alloc_blocks >>= shift;
353 } 353 }
354 354
355 if (alloc_blocks < mp->m_writeio_blocks) 355 if (alloc_blocks < mp->m_writeio_blocks)
356 alloc_blocks = mp->m_writeio_blocks; 356 alloc_blocks = mp->m_writeio_blocks;
357 357
358 return alloc_blocks; 358 return alloc_blocks;
359 } 359 }
360 360
361 int 361 int
362 xfs_iomap_write_delay( 362 xfs_iomap_write_delay(
363 xfs_inode_t *ip, 363 xfs_inode_t *ip,
364 xfs_off_t offset, 364 xfs_off_t offset,
365 size_t count, 365 size_t count,
366 xfs_bmbt_irec_t *ret_imap) 366 xfs_bmbt_irec_t *ret_imap)
367 { 367 {
368 xfs_mount_t *mp = ip->i_mount; 368 xfs_mount_t *mp = ip->i_mount;
369 xfs_fileoff_t offset_fsb; 369 xfs_fileoff_t offset_fsb;
370 xfs_fileoff_t last_fsb; 370 xfs_fileoff_t last_fsb;
371 xfs_off_t aligned_offset; 371 xfs_off_t aligned_offset;
372 xfs_fileoff_t ioalign; 372 xfs_fileoff_t ioalign;
373 xfs_extlen_t extsz; 373 xfs_extlen_t extsz;
374 int nimaps; 374 int nimaps;
375 xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS]; 375 xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS];
376 int prealloc; 376 int prealloc;
377 int error; 377 int error;
378 378
379 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 379 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
380 380
381 /* 381 /*
382 * Make sure that the dquots are there. This doesn't hold 382 * Make sure that the dquots are there. This doesn't hold
383 * the ilock across a disk read. 383 * the ilock across a disk read.
384 */ 384 */
385 error = xfs_qm_dqattach_locked(ip, 0); 385 error = xfs_qm_dqattach_locked(ip, 0);
386 if (error) 386 if (error)
387 return XFS_ERROR(error); 387 return XFS_ERROR(error);
388 388
389 extsz = xfs_get_extsz_hint(ip); 389 extsz = xfs_get_extsz_hint(ip);
390 offset_fsb = XFS_B_TO_FSBT(mp, offset); 390 offset_fsb = XFS_B_TO_FSBT(mp, offset);
391 391
392 392
393 error = xfs_iomap_eof_want_preallocate(mp, ip, offset, count, 393 error = xfs_iomap_eof_want_preallocate(mp, ip, offset, count,
394 imap, XFS_WRITE_IMAPS, &prealloc); 394 imap, XFS_WRITE_IMAPS, &prealloc);
395 if (error) 395 if (error)
396 return error; 396 return error;
397 397
398 retry: 398 retry:
399 if (prealloc) { 399 if (prealloc) {
400 xfs_fsblock_t alloc_blocks = xfs_iomap_prealloc_size(mp, ip); 400 xfs_fsblock_t alloc_blocks = xfs_iomap_prealloc_size(mp, ip);
401 401
402 aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1)); 402 aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1));
403 ioalign = XFS_B_TO_FSBT(mp, aligned_offset); 403 ioalign = XFS_B_TO_FSBT(mp, aligned_offset);
404 last_fsb = ioalign + alloc_blocks; 404 last_fsb = ioalign + alloc_blocks;
405 } else { 405 } else {
406 last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); 406 last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
407 } 407 }
408 408
409 if (prealloc || extsz) { 409 if (prealloc || extsz) {
410 error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb); 410 error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb);
411 if (error) 411 if (error)
412 return error; 412 return error;
413 } 413 }
414 414
415 /* 415 /*
416 * Make sure preallocation does not create extents beyond the range we 416 * Make sure preallocation does not create extents beyond the range we
417 * actually support in this filesystem. 417 * actually support in this filesystem.
418 */ 418 */
419 if (last_fsb > XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes)) 419 if (last_fsb > XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes))
420 last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); 420 last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
421 421
422 ASSERT(last_fsb > offset_fsb); 422 ASSERT(last_fsb > offset_fsb);
423 423
424 nimaps = XFS_WRITE_IMAPS; 424 nimaps = XFS_WRITE_IMAPS;
425 error = xfs_bmapi_delay(ip, offset_fsb, last_fsb - offset_fsb, 425 error = xfs_bmapi_delay(ip, offset_fsb, last_fsb - offset_fsb,
426 imap, &nimaps, XFS_BMAPI_ENTIRE); 426 imap, &nimaps, XFS_BMAPI_ENTIRE);
427 switch (error) { 427 switch (error) {
428 case 0: 428 case 0:
429 case ENOSPC: 429 case ENOSPC:
430 case EDQUOT: 430 case EDQUOT:
431 break; 431 break;
432 default: 432 default:
433 return XFS_ERROR(error); 433 return XFS_ERROR(error);
434 } 434 }
435 435
436 /* 436 /*
437 * If bmapi returned us nothing, we got either ENOSPC or EDQUOT. Retry 437 * If bmapi returned us nothing, we got either ENOSPC or EDQUOT. Retry
438 * without EOF preallocation. 438 * without EOF preallocation.
439 */ 439 */
440 if (nimaps == 0) { 440 if (nimaps == 0) {
441 trace_xfs_delalloc_enospc(ip, offset, count); 441 trace_xfs_delalloc_enospc(ip, offset, count);
442 if (prealloc) { 442 if (prealloc) {
443 prealloc = 0; 443 prealloc = 0;
444 error = 0; 444 error = 0;
445 goto retry; 445 goto retry;
446 } 446 }
447 return XFS_ERROR(error ? error : ENOSPC); 447 return XFS_ERROR(error ? error : ENOSPC);
448 } 448 }
449 449
450 if (!(imap[0].br_startblock || XFS_IS_REALTIME_INODE(ip))) 450 if (!(imap[0].br_startblock || XFS_IS_REALTIME_INODE(ip)))
451 return xfs_alert_fsblock_zero(ip, &imap[0]); 451 return xfs_alert_fsblock_zero(ip, &imap[0]);
452 452
453 *ret_imap = imap[0]; 453 *ret_imap = imap[0];
454 return 0; 454 return 0;
455 } 455 }
456 456
457 /* 457 /*
458 * Pass in a delayed allocate extent, convert it to real extents; 458 * Pass in a delayed allocate extent, convert it to real extents;
459 * return to the caller the extent we create which maps on top of 459 * return to the caller the extent we create which maps on top of
460 * the originating callers request. 460 * the originating callers request.
461 * 461 *
462 * Called without a lock on the inode. 462 * Called without a lock on the inode.
463 * 463 *
464 * We no longer bother to look at the incoming map - all we have to 464 * We no longer bother to look at the incoming map - all we have to
465 * guarantee is that whatever we allocate fills the required range. 465 * guarantee is that whatever we allocate fills the required range.
466 */ 466 */
467 int 467 int
468 xfs_iomap_write_allocate( 468 xfs_iomap_write_allocate(
469 xfs_inode_t *ip, 469 xfs_inode_t *ip,
470 xfs_off_t offset, 470 xfs_off_t offset,
471 size_t count, 471 size_t count,
472 xfs_bmbt_irec_t *imap) 472 xfs_bmbt_irec_t *imap)
473 { 473 {
474 xfs_mount_t *mp = ip->i_mount; 474 xfs_mount_t *mp = ip->i_mount;
475 xfs_fileoff_t offset_fsb, last_block; 475 xfs_fileoff_t offset_fsb, last_block;
476 xfs_fileoff_t end_fsb, map_start_fsb; 476 xfs_fileoff_t end_fsb, map_start_fsb;
477 xfs_fsblock_t first_block; 477 xfs_fsblock_t first_block;
478 xfs_bmap_free_t free_list; 478 xfs_bmap_free_t free_list;
479 xfs_filblks_t count_fsb; 479 xfs_filblks_t count_fsb;
480 xfs_trans_t *tp; 480 xfs_trans_t *tp;
481 int nimaps, committed; 481 int nimaps, committed;
482 int error = 0; 482 int error = 0;
483 int nres; 483 int nres;
484 484
485 /* 485 /*
486 * Make sure that the dquots are there. 486 * Make sure that the dquots are there.
487 */ 487 */
488 error = xfs_qm_dqattach(ip, 0); 488 error = xfs_qm_dqattach(ip, 0);
489 if (error) 489 if (error)
490 return XFS_ERROR(error); 490 return XFS_ERROR(error);
491 491
492 offset_fsb = XFS_B_TO_FSBT(mp, offset); 492 offset_fsb = XFS_B_TO_FSBT(mp, offset);
493 count_fsb = imap->br_blockcount; 493 count_fsb = imap->br_blockcount;
494 map_start_fsb = imap->br_startoff; 494 map_start_fsb = imap->br_startoff;
495 495
496 XFS_STATS_ADD(xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb)); 496 XFS_STATS_ADD(xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb));
497 497
498 while (count_fsb != 0) { 498 while (count_fsb != 0) {
499 /* 499 /*
500 * Set up a transaction with which to allocate the 500 * Set up a transaction with which to allocate the
501 * backing store for the file. Do allocations in a 501 * backing store for the file. Do allocations in a
502 * loop until we get some space in the range we are 502 * loop until we get some space in the range we are
503 * interested in. The other space that might be allocated 503 * interested in. The other space that might be allocated
504 * is in the delayed allocation extent on which we sit 504 * is in the delayed allocation extent on which we sit
505 * but before our buffer starts. 505 * but before our buffer starts.
506 */ 506 */
507 507
508 nimaps = 0; 508 nimaps = 0;
509 while (nimaps == 0) { 509 while (nimaps == 0) {
510 tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE); 510 tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE);
511 tp->t_flags |= XFS_TRANS_RESERVE; 511 tp->t_flags |= XFS_TRANS_RESERVE;
512 nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); 512 nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
513 error = xfs_trans_reserve(tp, nres, 513 error = xfs_trans_reserve(tp, nres,
514 XFS_WRITE_LOG_RES(mp), 514 XFS_WRITE_LOG_RES(mp),
515 0, XFS_TRANS_PERM_LOG_RES, 515 0, XFS_TRANS_PERM_LOG_RES,
516 XFS_WRITE_LOG_COUNT); 516 XFS_WRITE_LOG_COUNT);
517 if (error) { 517 if (error) {
518 xfs_trans_cancel(tp, 0); 518 xfs_trans_cancel(tp, 0);
519 return XFS_ERROR(error); 519 return XFS_ERROR(error);
520 } 520 }
521 xfs_ilock(ip, XFS_ILOCK_EXCL); 521 xfs_ilock(ip, XFS_ILOCK_EXCL);
522 xfs_trans_ijoin(tp, ip, 0); 522 xfs_trans_ijoin(tp, ip, 0);
523 523
524 xfs_bmap_init(&free_list, &first_block); 524 xfs_bmap_init(&free_list, &first_block);
525 525
526 /* 526 /*
527 * it is possible that the extents have changed since 527 * it is possible that the extents have changed since
528 * we did the read call as we dropped the ilock for a 528 * we did the read call as we dropped the ilock for a
529 * while. We have to be careful about truncates or hole 529 * while. We have to be careful about truncates or hole
530 * punchs here - we are not allowed to allocate 530 * punchs here - we are not allowed to allocate
531 * non-delalloc blocks here. 531 * non-delalloc blocks here.
532 * 532 *
533 * The only protection against truncation is the pages 533 * The only protection against truncation is the pages
534 * for the range we are being asked to convert are 534 * for the range we are being asked to convert are
535 * locked and hence a truncate will block on them 535 * locked and hence a truncate will block on them
536 * first. 536 * first.
537 * 537 *
538 * As a result, if we go beyond the range we really 538 * As a result, if we go beyond the range we really
539 * need and hit an delalloc extent boundary followed by 539 * need and hit an delalloc extent boundary followed by
540 * a hole while we have excess blocks in the map, we 540 * a hole while we have excess blocks in the map, we
541 * will fill the hole incorrectly and overrun the 541 * will fill the hole incorrectly and overrun the
542 * transaction reservation. 542 * transaction reservation.
543 * 543 *
544 * Using a single map prevents this as we are forced to 544 * Using a single map prevents this as we are forced to
545 * check each map we look for overlap with the desired 545 * check each map we look for overlap with the desired
546 * range and abort as soon as we find it. Also, given 546 * range and abort as soon as we find it. Also, given
547 * that we only return a single map, having one beyond 547 * that we only return a single map, having one beyond
548 * what we can return is probably a bit silly. 548 * what we can return is probably a bit silly.
549 * 549 *
550 * We also need to check that we don't go beyond EOF; 550 * We also need to check that we don't go beyond EOF;
551 * this is a truncate optimisation as a truncate sets 551 * this is a truncate optimisation as a truncate sets
552 * the new file size before block on the pages we 552 * the new file size before block on the pages we
553 * currently have locked under writeback. Because they 553 * currently have locked under writeback. Because they
554 * are about to be tossed, we don't need to write them 554 * are about to be tossed, we don't need to write them
555 * back.... 555 * back....
556 */ 556 */
557 nimaps = 1; 557 nimaps = 1;
558 end_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip)); 558 end_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
559 error = xfs_bmap_last_offset(NULL, ip, &last_block, 559 error = xfs_bmap_last_offset(NULL, ip, &last_block,
560 XFS_DATA_FORK); 560 XFS_DATA_FORK);
561 if (error) 561 if (error)
562 goto trans_cancel; 562 goto trans_cancel;
563 563
564 last_block = XFS_FILEOFF_MAX(last_block, end_fsb); 564 last_block = XFS_FILEOFF_MAX(last_block, end_fsb);
565 if ((map_start_fsb + count_fsb) > last_block) { 565 if ((map_start_fsb + count_fsb) > last_block) {
566 count_fsb = last_block - map_start_fsb; 566 count_fsb = last_block - map_start_fsb;
567 if (count_fsb == 0) { 567 if (count_fsb == 0) {
568 error = EAGAIN; 568 error = EAGAIN;
569 goto trans_cancel; 569 goto trans_cancel;
570 } 570 }
571 } 571 }
572 572
573 /* 573 /*
574 * From this point onwards we overwrite the imap 574 * From this point onwards we overwrite the imap
575 * pointer that the caller gave to us. 575 * pointer that the caller gave to us.
576 */ 576 */
577 error = xfs_bmapi_write(tp, ip, map_start_fsb, 577 error = xfs_bmapi_write(tp, ip, map_start_fsb,
578 count_fsb, 0, &first_block, 1, 578 count_fsb,
579 XFS_BMAPI_STACK_SWITCH,
580 &first_block, 1,
579 imap, &nimaps, &free_list); 581 imap, &nimaps, &free_list);
580 if (error) 582 if (error)
581 goto trans_cancel; 583 goto trans_cancel;
582 584
583 error = xfs_bmap_finish(&tp, &free_list, &committed); 585 error = xfs_bmap_finish(&tp, &free_list, &committed);
584 if (error) 586 if (error)
585 goto trans_cancel; 587 goto trans_cancel;
586 588
587 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); 589 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
588 if (error) 590 if (error)
589 goto error0; 591 goto error0;
590 592
591 xfs_iunlock(ip, XFS_ILOCK_EXCL); 593 xfs_iunlock(ip, XFS_ILOCK_EXCL);
592 } 594 }
593 595
594 /* 596 /*
595 * See if we were able to allocate an extent that 597 * See if we were able to allocate an extent that
596 * covers at least part of the callers request 598 * covers at least part of the callers request
597 */ 599 */
598 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) 600 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip)))
599 return xfs_alert_fsblock_zero(ip, imap); 601 return xfs_alert_fsblock_zero(ip, imap);
600 602
601 if ((offset_fsb >= imap->br_startoff) && 603 if ((offset_fsb >= imap->br_startoff) &&
602 (offset_fsb < (imap->br_startoff + 604 (offset_fsb < (imap->br_startoff +
603 imap->br_blockcount))) { 605 imap->br_blockcount))) {
604 XFS_STATS_INC(xs_xstrat_quick); 606 XFS_STATS_INC(xs_xstrat_quick);
605 return 0; 607 return 0;
606 } 608 }
607 609
608 /* 610 /*
609 * So far we have not mapped the requested part of the 611 * So far we have not mapped the requested part of the
610 * file, just surrounding data, try again. 612 * file, just surrounding data, try again.
611 */ 613 */
612 count_fsb -= imap->br_blockcount; 614 count_fsb -= imap->br_blockcount;
613 map_start_fsb = imap->br_startoff + imap->br_blockcount; 615 map_start_fsb = imap->br_startoff + imap->br_blockcount;
614 } 616 }
615 617
616 trans_cancel: 618 trans_cancel:
617 xfs_bmap_cancel(&free_list); 619 xfs_bmap_cancel(&free_list);
618 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); 620 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
619 error0: 621 error0:
620 xfs_iunlock(ip, XFS_ILOCK_EXCL); 622 xfs_iunlock(ip, XFS_ILOCK_EXCL);
621 return XFS_ERROR(error); 623 return XFS_ERROR(error);
622 } 624 }
623 625
624 int 626 int
625 xfs_iomap_write_unwritten( 627 xfs_iomap_write_unwritten(
626 xfs_inode_t *ip, 628 xfs_inode_t *ip,
627 xfs_off_t offset, 629 xfs_off_t offset,
628 size_t count) 630 size_t count)
629 { 631 {
630 xfs_mount_t *mp = ip->i_mount; 632 xfs_mount_t *mp = ip->i_mount;
631 xfs_fileoff_t offset_fsb; 633 xfs_fileoff_t offset_fsb;
632 xfs_filblks_t count_fsb; 634 xfs_filblks_t count_fsb;
633 xfs_filblks_t numblks_fsb; 635 xfs_filblks_t numblks_fsb;
634 xfs_fsblock_t firstfsb; 636 xfs_fsblock_t firstfsb;
635 int nimaps; 637 int nimaps;
636 xfs_trans_t *tp; 638 xfs_trans_t *tp;
637 xfs_bmbt_irec_t imap; 639 xfs_bmbt_irec_t imap;
638 xfs_bmap_free_t free_list; 640 xfs_bmap_free_t free_list;
639 xfs_fsize_t i_size; 641 xfs_fsize_t i_size;
640 uint resblks; 642 uint resblks;
641 int committed; 643 int committed;
642 int error; 644 int error;
643 645
644 trace_xfs_unwritten_convert(ip, offset, count); 646 trace_xfs_unwritten_convert(ip, offset, count);
645 647
646 offset_fsb = XFS_B_TO_FSBT(mp, offset); 648 offset_fsb = XFS_B_TO_FSBT(mp, offset);
647 count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); 649 count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
648 count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb); 650 count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb);
649 651
650 /* 652 /*
651 * Reserve enough blocks in this transaction for two complete extent 653 * Reserve enough blocks in this transaction for two complete extent
652 * btree splits. We may be converting the middle part of an unwritten 654 * btree splits. We may be converting the middle part of an unwritten
653 * extent and in this case we will insert two new extents in the btree 655 * extent and in this case we will insert two new extents in the btree
654 * each of which could cause a full split. 656 * each of which could cause a full split.
655 * 657 *
656 * This reservation amount will be used in the first call to 658 * This reservation amount will be used in the first call to
657 * xfs_bmbt_split() to select an AG with enough space to satisfy the 659 * xfs_bmbt_split() to select an AG with enough space to satisfy the
658 * rest of the operation. 660 * rest of the operation.
659 */ 661 */
660 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1; 662 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
661 663
662 do { 664 do {
663 /* 665 /*
664 * set up a transaction to convert the range of extents 666 * set up a transaction to convert the range of extents
665 * from unwritten to real. Do allocations in a loop until 667 * from unwritten to real. Do allocations in a loop until
666 * we have covered the range passed in. 668 * we have covered the range passed in.
667 * 669 *
668 * Note that we open code the transaction allocation here 670 * Note that we open code the transaction allocation here
669 * to pass KM_NOFS--we can't risk to recursing back into 671 * to pass KM_NOFS--we can't risk to recursing back into
670 * the filesystem here as we might be asked to write out 672 * the filesystem here as we might be asked to write out
671 * the same inode that we complete here and might deadlock 673 * the same inode that we complete here and might deadlock
672 * on the iolock. 674 * on the iolock.
673 */ 675 */
674 sb_start_intwrite(mp->m_super); 676 sb_start_intwrite(mp->m_super);
675 tp = _xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE, KM_NOFS); 677 tp = _xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE, KM_NOFS);
676 tp->t_flags |= XFS_TRANS_RESERVE | XFS_TRANS_FREEZE_PROT; 678 tp->t_flags |= XFS_TRANS_RESERVE | XFS_TRANS_FREEZE_PROT;
677 error = xfs_trans_reserve(tp, resblks, 679 error = xfs_trans_reserve(tp, resblks,
678 XFS_WRITE_LOG_RES(mp), 0, 680 XFS_WRITE_LOG_RES(mp), 0,
679 XFS_TRANS_PERM_LOG_RES, 681 XFS_TRANS_PERM_LOG_RES,
680 XFS_WRITE_LOG_COUNT); 682 XFS_WRITE_LOG_COUNT);
681 if (error) { 683 if (error) {
682 xfs_trans_cancel(tp, 0); 684 xfs_trans_cancel(tp, 0);
683 return XFS_ERROR(error); 685 return XFS_ERROR(error);
684 } 686 }
685 687
686 xfs_ilock(ip, XFS_ILOCK_EXCL); 688 xfs_ilock(ip, XFS_ILOCK_EXCL);
687 xfs_trans_ijoin(tp, ip, 0); 689 xfs_trans_ijoin(tp, ip, 0);
688 690
689 /* 691 /*
690 * Modify the unwritten extent state of the buffer. 692 * Modify the unwritten extent state of the buffer.
691 */ 693 */
692 xfs_bmap_init(&free_list, &firstfsb); 694 xfs_bmap_init(&free_list, &firstfsb);
693 nimaps = 1; 695 nimaps = 1;
694 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, 696 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
695 XFS_BMAPI_CONVERT, &firstfsb, 697 XFS_BMAPI_CONVERT, &firstfsb,
696 1, &imap, &nimaps, &free_list); 698 1, &imap, &nimaps, &free_list);
697 if (error) 699 if (error)
698 goto error_on_bmapi_transaction; 700 goto error_on_bmapi_transaction;
699 701
700 /* 702 /*
701 * Log the updated inode size as we go. We have to be careful 703 * Log the updated inode size as we go. We have to be careful
702 * to only log it up to the actual write offset if it is 704 * to only log it up to the actual write offset if it is
703 * halfway into a block. 705 * halfway into a block.
704 */ 706 */
705 i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb); 707 i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb);
706 if (i_size > offset + count) 708 if (i_size > offset + count)
707 i_size = offset + count; 709 i_size = offset + count;
708 710
709 i_size = xfs_new_eof(ip, i_size); 711 i_size = xfs_new_eof(ip, i_size);
710 if (i_size) { 712 if (i_size) {
711 ip->i_d.di_size = i_size; 713 ip->i_d.di_size = i_size;
712 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 714 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
713 } 715 }
714 716
715 error = xfs_bmap_finish(&tp, &free_list, &committed); 717 error = xfs_bmap_finish(&tp, &free_list, &committed);
716 if (error) 718 if (error)
717 goto error_on_bmapi_transaction; 719 goto error_on_bmapi_transaction;
718 720
719 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); 721 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
720 xfs_iunlock(ip, XFS_ILOCK_EXCL); 722 xfs_iunlock(ip, XFS_ILOCK_EXCL);
721 if (error) 723 if (error)
722 return XFS_ERROR(error); 724 return XFS_ERROR(error);
723 725
724 if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip))) 726 if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip)))
725 return xfs_alert_fsblock_zero(ip, &imap); 727 return xfs_alert_fsblock_zero(ip, &imap);
726 728
727 if ((numblks_fsb = imap.br_blockcount) == 0) { 729 if ((numblks_fsb = imap.br_blockcount) == 0) {
728 /* 730 /*
729 * The numblks_fsb value should always get 731 * The numblks_fsb value should always get
730 * smaller, otherwise the loop is stuck. 732 * smaller, otherwise the loop is stuck.
731 */ 733 */
732 ASSERT(imap.br_blockcount); 734 ASSERT(imap.br_blockcount);
733 break; 735 break;
734 } 736 }
735 offset_fsb += numblks_fsb; 737 offset_fsb += numblks_fsb;
736 count_fsb -= numblks_fsb; 738 count_fsb -= numblks_fsb;
737 } while (count_fsb > 0); 739 } while (count_fsb > 0);
738 740
739 return 0; 741 return 0;
740 742
741 error_on_bmapi_transaction: 743 error_on_bmapi_transaction:
742 xfs_bmap_cancel(&free_list); 744 xfs_bmap_cancel(&free_list);
743 xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT)); 745 xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT));
744 xfs_iunlock(ip, XFS_ILOCK_EXCL); 746 xfs_iunlock(ip, XFS_ILOCK_EXCL);
745 return XFS_ERROR(error); 747 return XFS_ERROR(error);
746 } 748 }
747 749