Commit 85c0b2ab5e69ca6133380ead1c50e0840d136b39

Authored by Dave Chinner
Committed by Felix Blyakher
1 parent 1da1daed81

xfs: factor out inode initialisation

Factor out code to initialize new inode clusters into a function of it's own.
This keeps xfs_ialloc_ag_alloc smaller and better structured and enables a
future inode cluster initialization transaction.  Also initialize the agno
variable earlier in xfs_ialloc_ag_alloc to avoid repeated byte swaps.

[hch:  The original patch is from Dave from his unpublished inode create
 transaction patch series, with some modifcations by me to apply stand-alone]

Signed-off-by: Dave Chinner <david@fromorbit.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Alex Elder <aelder@sgi.com>
Signed-off-by: Felix Blyakher <felixb@sgi.com>

Showing 1 changed file with 95 additions and 80 deletions Side-by-side Diff

... ... @@ -153,6 +153,87 @@
153 153 }
154 154  
155 155 /*
  156 + * Initialise a new set of inodes.
  157 + */
  158 +STATIC void
  159 +xfs_ialloc_inode_init(
  160 + struct xfs_mount *mp,
  161 + struct xfs_trans *tp,
  162 + xfs_agnumber_t agno,
  163 + xfs_agblock_t agbno,
  164 + xfs_agblock_t length,
  165 + unsigned int gen)
  166 +{
  167 + struct xfs_buf *fbuf;
  168 + struct xfs_dinode *free;
  169 + int blks_per_cluster, nbufs, ninodes;
  170 + int version;
  171 + int i, j;
  172 + xfs_daddr_t d;
  173 +
  174 + /*
  175 + * Loop over the new block(s), filling in the inodes.
  176 + * For small block sizes, manipulate the inodes in buffers
  177 + * which are multiples of the blocks size.
  178 + */
  179 + if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) {
  180 + blks_per_cluster = 1;
  181 + nbufs = length;
  182 + ninodes = mp->m_sb.sb_inopblock;
  183 + } else {
  184 + blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) /
  185 + mp->m_sb.sb_blocksize;
  186 + nbufs = length / blks_per_cluster;
  187 + ninodes = blks_per_cluster * mp->m_sb.sb_inopblock;
  188 + }
  189 +
  190 + /*
  191 + * Figure out what version number to use in the inodes we create.
  192 + * If the superblock version has caught up to the one that supports
  193 + * the new inode format, then use the new inode version. Otherwise
  194 + * use the old version so that old kernels will continue to be
  195 + * able to use the file system.
  196 + */
  197 + if (xfs_sb_version_hasnlink(&mp->m_sb))
  198 + version = 2;
  199 + else
  200 + version = 1;
  201 +
  202 + for (j = 0; j < nbufs; j++) {
  203 + /*
  204 + * Get the block.
  205 + */
  206 + d = XFS_AGB_TO_DADDR(mp, agno, agbno + (j * blks_per_cluster));
  207 + fbuf = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
  208 + mp->m_bsize * blks_per_cluster,
  209 + XFS_BUF_LOCK);
  210 + ASSERT(fbuf);
  211 + ASSERT(!XFS_BUF_GETERROR(fbuf));
  212 +
  213 + /*
  214 + * Initialize all inodes in this buffer and then log them.
  215 + *
  216 + * XXX: It would be much better if we had just one transaction
  217 + * to log a whole cluster of inodes instead of all the
  218 + * individual transactions causing a lot of log traffic.
  219 + */
  220 + xfs_biozero(fbuf, 0, ninodes << mp->m_sb.sb_inodelog);
  221 + for (i = 0; i < ninodes; i++) {
  222 + int ioffset = i << mp->m_sb.sb_inodelog;
  223 + uint isize = sizeof(struct xfs_dinode);
  224 +
  225 + free = xfs_make_iptr(mp, fbuf, i);
  226 + free->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
  227 + free->di_version = version;
  228 + free->di_gen = cpu_to_be32(gen);
  229 + free->di_next_unlinked = cpu_to_be32(NULLAGINO);
  230 + xfs_trans_log_buf(tp, fbuf, ioffset, ioffset + isize - 1);
  231 + }
  232 + xfs_trans_inode_alloc_buf(tp, fbuf);
  233 + }
  234 +}
  235 +
  236 +/*
156 237 * Allocate new inodes in the allocation group specified by agbp.
157 238 * Return 0 for success, else error code.
158 239 */
159 240  
160 241  
161 242  
162 243  
163 244  
... ... @@ -164,24 +245,15 @@
164 245 {
165 246 xfs_agi_t *agi; /* allocation group header */
166 247 xfs_alloc_arg_t args; /* allocation argument structure */
167   - int blks_per_cluster; /* fs blocks per inode cluster */
168 248 xfs_btree_cur_t *cur; /* inode btree cursor */
169   - xfs_daddr_t d; /* disk addr of buffer */
170 249 xfs_agnumber_t agno;
171 250 int error;
172   - xfs_buf_t *fbuf; /* new free inodes' buffer */
173   - xfs_dinode_t *free; /* new free inode structure */
174   - int i; /* inode counter */
175   - int j; /* block counter */
176   - int nbufs; /* num bufs of new inodes */
  251 + int i;
177 252 xfs_agino_t newino; /* new first inode's number */
178 253 xfs_agino_t newlen; /* new number of inodes */
179   - int ninodes; /* num inodes per buf */
180 254 xfs_agino_t thisino; /* current inode number, for loop */
181   - int version; /* inode version number to use */
182 255 int isaligned = 0; /* inode allocation at stripe unit */
183 256 /* boundary */
184   - unsigned int gen;
185 257  
186 258 args.tp = tp;
187 259 args.mp = tp->t_mountp;
188 260  
... ... @@ -202,12 +274,12 @@
202 274 */
203 275 agi = XFS_BUF_TO_AGI(agbp);
204 276 newino = be32_to_cpu(agi->agi_newino);
  277 + agno = be32_to_cpu(agi->agi_seqno);
205 278 args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) +
206 279 XFS_IALLOC_BLOCKS(args.mp);
207 280 if (likely(newino != NULLAGINO &&
208 281 (args.agbno < be32_to_cpu(agi->agi_length)))) {
209   - args.fsbno = XFS_AGB_TO_FSB(args.mp,
210   - be32_to_cpu(agi->agi_seqno), args.agbno);
  282 + args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
211 283 args.type = XFS_ALLOCTYPE_THIS_BNO;
212 284 args.mod = args.total = args.wasdel = args.isfl =
213 285 args.userdata = args.minalignslop = 0;
... ... @@ -258,8 +330,7 @@
258 330 * For now, just allocate blocks up front.
259 331 */
260 332 args.agbno = be32_to_cpu(agi->agi_root);
261   - args.fsbno = XFS_AGB_TO_FSB(args.mp,
262   - be32_to_cpu(agi->agi_seqno), args.agbno);
  333 + args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
263 334 /*
264 335 * Allocate a fixed-size extent of inodes.
265 336 */
... ... @@ -282,8 +353,7 @@
282 353 if (isaligned && args.fsbno == NULLFSBLOCK) {
283 354 args.type = XFS_ALLOCTYPE_NEAR_BNO;
284 355 args.agbno = be32_to_cpu(agi->agi_root);
285   - args.fsbno = XFS_AGB_TO_FSB(args.mp,
286   - be32_to_cpu(agi->agi_seqno), args.agbno);
  356 + args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
287 357 args.alignment = xfs_ialloc_cluster_alignment(&args);
288 358 if ((error = xfs_alloc_vextent(&args)))
289 359 return error;
290 360  
291 361  
292 362  
293 363  
294 364  
... ... @@ -294,85 +364,30 @@
294 364 return 0;
295 365 }
296 366 ASSERT(args.len == args.minlen);
297   - /*
298   - * Convert the results.
299   - */
300   - newino = XFS_OFFBNO_TO_AGINO(args.mp, args.agbno, 0);
301   - /*
302   - * Loop over the new block(s), filling in the inodes.
303   - * For small block sizes, manipulate the inodes in buffers
304   - * which are multiples of the blocks size.
305   - */
306   - if (args.mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(args.mp)) {
307   - blks_per_cluster = 1;
308   - nbufs = (int)args.len;
309   - ninodes = args.mp->m_sb.sb_inopblock;
310   - } else {
311   - blks_per_cluster = XFS_INODE_CLUSTER_SIZE(args.mp) /
312   - args.mp->m_sb.sb_blocksize;
313   - nbufs = (int)args.len / blks_per_cluster;
314   - ninodes = blks_per_cluster * args.mp->m_sb.sb_inopblock;
315   - }
316   - /*
317   - * Figure out what version number to use in the inodes we create.
318   - * If the superblock version has caught up to the one that supports
319   - * the new inode format, then use the new inode version. Otherwise
320   - * use the old version so that old kernels will continue to be
321   - * able to use the file system.
322   - */
323   - if (xfs_sb_version_hasnlink(&args.mp->m_sb))
324   - version = 2;
325   - else
326   - version = 1;
327 367  
328 368 /*
  369 + * Stamp and write the inode buffers.
  370 + *
329 371 * Seed the new inode cluster with a random generation number. This
330 372 * prevents short-term reuse of generation numbers if a chunk is
331 373 * freed and then immediately reallocated. We use random numbers
332 374 * rather than a linear progression to prevent the next generation
333 375 * number from being easily guessable.
334 376 */
335   - gen = random32();
336   - for (j = 0; j < nbufs; j++) {
337   - /*
338   - * Get the block.
339   - */
340   - d = XFS_AGB_TO_DADDR(args.mp, be32_to_cpu(agi->agi_seqno),
341   - args.agbno + (j * blks_per_cluster));
342   - fbuf = xfs_trans_get_buf(tp, args.mp->m_ddev_targp, d,
343   - args.mp->m_bsize * blks_per_cluster,
344   - XFS_BUF_LOCK);
345   - ASSERT(fbuf);
346   - ASSERT(!XFS_BUF_GETERROR(fbuf));
  377 + xfs_ialloc_inode_init(args.mp, tp, agno, args.agbno, args.len,
  378 + random32());
347 379  
348   - /*
349   - * Initialize all inodes in this buffer and then log them.
350   - *
351   - * XXX: It would be much better if we had just one transaction to
352   - * log a whole cluster of inodes instead of all the individual
353   - * transactions causing a lot of log traffic.
354   - */
355   - xfs_biozero(fbuf, 0, ninodes << args.mp->m_sb.sb_inodelog);
356   - for (i = 0; i < ninodes; i++) {
357   - int ioffset = i << args.mp->m_sb.sb_inodelog;
358   - uint isize = sizeof(struct xfs_dinode);
359   -
360   - free = xfs_make_iptr(args.mp, fbuf, i);
361   - free->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
362   - free->di_version = version;
363   - free->di_gen = cpu_to_be32(gen);
364   - free->di_next_unlinked = cpu_to_be32(NULLAGINO);
365   - xfs_trans_log_buf(tp, fbuf, ioffset, ioffset + isize - 1);
366   - }
367   - xfs_trans_inode_alloc_buf(tp, fbuf);
368   - }
  380 + /*
  381 + * Convert the results.
  382 + */
  383 + newino = XFS_OFFBNO_TO_AGINO(args.mp, args.agbno, 0);
369 384 be32_add_cpu(&agi->agi_count, newlen);
370 385 be32_add_cpu(&agi->agi_freecount, newlen);
371   - agno = be32_to_cpu(agi->agi_seqno);
372 386 down_read(&args.mp->m_peraglock);
373 387 args.mp->m_perag[agno].pagi_freecount += newlen;
374 388 up_read(&args.mp->m_peraglock);
375 389 agi->agi_newino = cpu_to_be32(newino);
  390 +
376 391 /*
377 392 * Insert records describing the new inode chunk into the btree.
378 393 */