Commit 661b99e95fa3652417931e28b54b64941bdd56f0

Authored by Linus Torvalds

Merge tag 'xfs-for-linus-3.18-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/dgc/linux-xfs

Pull xfs fixes from Dave Chinner:
 "This update fixes a warning in the new pagecache_isize_extended() and
  updates some related comments, another fix for zero-range
  misbehaviour, and an unforntuately large set of fixes for regressions
  in the bulkstat code.

  The bulkstat fixes are large but necessary.  I wouldn't normally push
  such a rework for a -rcX update, but right now xfsdump can silently
  create incomplete dumps on 3.17 and it's possible that even xfsrestore
  won't notice that the dumps were incomplete.  Hence we need to get
  this update into 3.17-stable kernels ASAP.

  In more detail, the refactoring work I committed in 3.17 has exposed a
  major hole in our QA coverage.  With both xfsdump (the major user of
  bulkstat) and xfsrestore silently ignoring missing files in the
  dump/restore process, incomplete dumps were going unnoticed if they
  were being triggered.  Many of the dump/restore filesets were so small
  that they didn't evenhave a chance of triggering the loop iteration
  bugs we introduced in 3.17, so we didn't exercise the code
  sufficiently, either.

  We have already taken steps to improve QA coverage in xfstests to
  avoid this happening again, and I've done a lot of manual verification
  of dump/restore on very large data sets (tens of millions of inodes)
  of the past week to verify this patch set results in bulkstat behaving
  the same way as it does on 3.16.

  Unfortunately, the fixes are not exactly simple - in tracking down the
  problem historic API warts were discovered (e.g xfsdump has been
  working around a 20 year old bug in the bulkstat API for the past 10
  years) and so that complicated the process of diagnosing and fixing
  the problems.  i.e. we had to fix bugs in the code as well as
  discover and re-introduce the userspace visible API bugs that we
  unwittingly "fixed" in 3.17 that xfsdump relied on to work correctly.

  Summary:

   - incorrect warnings about i_mutex locking in pagecache_isize_extended()
     and updates comments to match expected locking
   - another zero-range bug fix for stray file size updates
   - a bunch of fixes for regression in the bulkstat code introduced in
     3.17"

* tag 'xfs-for-linus-3.18-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/dgc/linux-xfs:
  xfs: track bulkstat progress by agino
  xfs: bulkstat error handling is broken
  xfs: bulkstat main loop logic is a mess
  xfs: bulkstat chunk-formatter has issues
  xfs: bulkstat chunk formatting cursor is broken
  xfs: bulkstat btree walk doesn't terminate
  mm: Fix comment before truncate_setsize()
  xfs: rework zero range to prevent invalid i_size updates
  mm: Remove false WARN_ON from pagecache_isize_extended()
  xfs: Check error during inode btree iteration in xfs_bulkstat()
  xfs: bulkstat doesn't release AGI buffer on error

Showing 4 changed files Side-by-side Diff

fs/xfs/xfs_bmap_util.c
... ... @@ -1338,7 +1338,10 @@
1338 1338 goto out;
1339 1339 }
1340 1340  
1341   -
  1341 +/*
  1342 + * Preallocate and zero a range of a file. This mechanism has the allocation
  1343 + * semantics of fallocate and in addition converts data in the range to zeroes.
  1344 + */
1342 1345 int
1343 1346 xfs_zero_file_space(
1344 1347 struct xfs_inode *ip,
1345 1348  
1346 1349  
1347 1350  
1348 1351  
... ... @@ -1346,65 +1349,30 @@
1346 1349 xfs_off_t len)
1347 1350 {
1348 1351 struct xfs_mount *mp = ip->i_mount;
1349   - uint granularity;
1350   - xfs_off_t start_boundary;
1351   - xfs_off_t end_boundary;
  1352 + uint blksize;
1352 1353 int error;
1353 1354  
1354 1355 trace_xfs_zero_file_space(ip);
1355 1356  
1356   - granularity = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
  1357 + blksize = 1 << mp->m_sb.sb_blocklog;
1357 1358  
1358 1359 /*
1359   - * Round the range of extents we are going to convert inwards. If the
1360   - * offset is aligned, then it doesn't get changed so we zero from the
1361   - * start of the block offset points to.
  1360 + * Punch a hole and prealloc the range. We use hole punch rather than
  1361 + * unwritten extent conversion for two reasons:
  1362 + *
  1363 + * 1.) Hole punch handles partial block zeroing for us.
  1364 + *
  1365 + * 2.) If prealloc returns ENOSPC, the file range is still zero-valued
  1366 + * by virtue of the hole punch.
1362 1367 */
1363   - start_boundary = round_up(offset, granularity);
1364   - end_boundary = round_down(offset + len, granularity);
  1368 + error = xfs_free_file_space(ip, offset, len);
  1369 + if (error)
  1370 + goto out;
1365 1371  
1366   - ASSERT(start_boundary >= offset);
1367   - ASSERT(end_boundary <= offset + len);
1368   -
1369   - if (start_boundary < end_boundary - 1) {
1370   - /*
1371   - * Writeback the range to ensure any inode size updates due to
1372   - * appending writes make it to disk (otherwise we could just
1373   - * punch out the delalloc blocks).
1374   - */
1375   - error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
1376   - start_boundary, end_boundary - 1);
1377   - if (error)
1378   - goto out;
1379   - truncate_pagecache_range(VFS_I(ip), start_boundary,
1380   - end_boundary - 1);
1381   -
1382   - /* convert the blocks */
1383   - error = xfs_alloc_file_space(ip, start_boundary,
1384   - end_boundary - start_boundary - 1,
1385   - XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT);
1386   - if (error)
1387   - goto out;
1388   -
1389   - /* We've handled the interior of the range, now for the edges */
1390   - if (start_boundary != offset) {
1391   - error = xfs_iozero(ip, offset, start_boundary - offset);
1392   - if (error)
1393   - goto out;
1394   - }
1395   -
1396   - if (end_boundary != offset + len)
1397   - error = xfs_iozero(ip, end_boundary,
1398   - offset + len - end_boundary);
1399   -
1400   - } else {
1401   - /*
1402   - * It's either a sub-granularity range or the range spanned lies
1403   - * partially across two adjacent blocks.
1404   - */
1405   - error = xfs_iozero(ip, offset, len);
1406   - }
1407   -
  1372 + error = xfs_alloc_file_space(ip, round_down(offset, blksize),
  1373 + round_up(offset + len, blksize) -
  1374 + round_down(offset, blksize),
  1375 + XFS_BMAPI_PREALLOC);
1408 1376 out:
1409 1377 return error;
1410 1378  
... ... @@ -236,8 +236,10 @@
236 236 XFS_WANT_CORRUPTED_RETURN(stat == 1);
237 237  
238 238 /* Check if the record contains the inode in request */
239   - if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino)
240   - return -EINVAL;
  239 + if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino) {
  240 + *icount = 0;
  241 + return 0;
  242 + }
241 243  
242 244 idx = agino - irec->ir_startino + 1;
243 245 if (idx < XFS_INODES_PER_CHUNK &&
244 246  
245 247  
246 248  
247 249  
248 250  
249 251  
250 252  
251 253  
252 254  
253 255  
254 256  
255 257  
256 258  
257 259  
... ... @@ -262,75 +264,76 @@
262 264  
263 265 #define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size)
264 266  
  267 +struct xfs_bulkstat_agichunk {
  268 + char __user **ac_ubuffer;/* pointer into user's buffer */
  269 + int ac_ubleft; /* bytes left in user's buffer */
  270 + int ac_ubelem; /* spaces used in user's buffer */
  271 +};
  272 +
265 273 /*
266 274 * Process inodes in chunk with a pointer to a formatter function
267 275 * that will iget the inode and fill in the appropriate structure.
268 276 */
269   -int
  277 +static int
270 278 xfs_bulkstat_ag_ichunk(
271 279 struct xfs_mount *mp,
272 280 xfs_agnumber_t agno,
273 281 struct xfs_inobt_rec_incore *irbp,
274 282 bulkstat_one_pf formatter,
275 283 size_t statstruct_size,
276   - struct xfs_bulkstat_agichunk *acp)
  284 + struct xfs_bulkstat_agichunk *acp,
  285 + xfs_agino_t *last_agino)
277 286 {
278   - xfs_ino_t lastino = acp->ac_lastino;
279 287 char __user **ubufp = acp->ac_ubuffer;
280   - int ubleft = acp->ac_ubleft;
281   - int ubelem = acp->ac_ubelem;
282   - int chunkidx, clustidx;
  288 + int chunkidx;
283 289 int error = 0;
284   - xfs_agino_t agino;
  290 + xfs_agino_t agino = irbp->ir_startino;
285 291  
286   - for (agino = irbp->ir_startino, chunkidx = clustidx = 0;
287   - XFS_BULKSTAT_UBLEFT(ubleft) &&
288   - irbp->ir_freecount < XFS_INODES_PER_CHUNK;
289   - chunkidx++, clustidx++, agino++) {
290   - int fmterror; /* bulkstat formatter result */
  292 + for (chunkidx = 0; chunkidx < XFS_INODES_PER_CHUNK;
  293 + chunkidx++, agino++) {
  294 + int fmterror;
291 295 int ubused;
292   - xfs_ino_t ino = XFS_AGINO_TO_INO(mp, agno, agino);
293 296  
294   - ASSERT(chunkidx < XFS_INODES_PER_CHUNK);
  297 + /* inode won't fit in buffer, we are done */
  298 + if (acp->ac_ubleft < statstruct_size)
  299 + break;
295 300  
296 301 /* Skip if this inode is free */
297   - if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free) {
298   - lastino = ino;
  302 + if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free)
299 303 continue;
300   - }
301 304  
302   - /*
303   - * Count used inodes as free so we can tell when the
304   - * chunk is used up.
305   - */
306   - irbp->ir_freecount++;
307   -
308 305 /* Get the inode and fill in a single buffer */
309 306 ubused = statstruct_size;
310   - error = formatter(mp, ino, *ubufp, ubleft, &ubused, &fmterror);
311   - if (fmterror == BULKSTAT_RV_NOTHING) {
312   - if (error && error != -ENOENT && error != -EINVAL) {
313   - ubleft = 0;
314   - break;
315   - }
316   - lastino = ino;
317   - continue;
318   - }
319   - if (fmterror == BULKSTAT_RV_GIVEUP) {
320   - ubleft = 0;
  307 + error = formatter(mp, XFS_AGINO_TO_INO(mp, agno, agino),
  308 + *ubufp, acp->ac_ubleft, &ubused, &fmterror);
  309 +
  310 + if (fmterror == BULKSTAT_RV_GIVEUP ||
  311 + (error && error != -ENOENT && error != -EINVAL)) {
  312 + acp->ac_ubleft = 0;
321 313 ASSERT(error);
322 314 break;
323 315 }
324   - if (*ubufp)
325   - *ubufp += ubused;
326   - ubleft -= ubused;
327   - ubelem++;
328   - lastino = ino;
  316 +
  317 + /* be careful not to leak error if at end of chunk */
  318 + if (fmterror == BULKSTAT_RV_NOTHING || error) {
  319 + error = 0;
  320 + continue;
  321 + }
  322 +
  323 + *ubufp += ubused;
  324 + acp->ac_ubleft -= ubused;
  325 + acp->ac_ubelem++;
329 326 }
330 327  
331   - acp->ac_lastino = lastino;
332   - acp->ac_ubleft = ubleft;
333   - acp->ac_ubelem = ubelem;
  328 + /*
  329 + * Post-update *last_agino. At this point, agino will always point one
  330 + * inode past the last inode we processed successfully. Hence we
  331 + * substract that inode when setting the *last_agino cursor so that we
  332 + * return the correct cookie to userspace. On the next bulkstat call,
  333 + * the inode under the lastino cookie will be skipped as we have already
  334 + * processed it here.
  335 + */
  336 + *last_agino = agino - 1;
334 337  
335 338 return error;
336 339 }
337 340  
338 341  
339 342  
340 343  
341 344  
342 345  
343 346  
344 347  
... ... @@ -353,45 +356,33 @@
353 356 xfs_agino_t agino; /* inode # in allocation group */
354 357 xfs_agnumber_t agno; /* allocation group number */
355 358 xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */
356   - int end_of_ag; /* set if we've seen the ag end */
357   - int error; /* error code */
358   - int fmterror;/* bulkstat formatter result */
359   - int i; /* loop index */
360   - int icount; /* count of inodes good in irbuf */
361 359 size_t irbsize; /* size of irec buffer in bytes */
362   - xfs_ino_t ino; /* inode number (filesystem) */
363   - xfs_inobt_rec_incore_t *irbp; /* current irec buffer pointer */
364 360 xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */
365   - xfs_inobt_rec_incore_t *irbufend; /* end of good irec buffer entries */
366   - xfs_ino_t lastino; /* last inode number returned */
367 361 int nirbuf; /* size of irbuf */
368   - int rval; /* return value error code */
369   - int tmp; /* result value from btree calls */
370 362 int ubcount; /* size of user's buffer */
371   - int ubleft; /* bytes left in user's buffer */
372   - char __user *ubufp; /* pointer into user's buffer */
373   - int ubelem; /* spaces used in user's buffer */
  363 + struct xfs_bulkstat_agichunk ac;
  364 + int error = 0;
374 365  
375 366 /*
376 367 * Get the last inode value, see if there's nothing to do.
377 368 */
378   - ino = (xfs_ino_t)*lastinop;
379   - lastino = ino;
380   - agno = XFS_INO_TO_AGNO(mp, ino);
381   - agino = XFS_INO_TO_AGINO(mp, ino);
  369 + agno = XFS_INO_TO_AGNO(mp, *lastinop);
  370 + agino = XFS_INO_TO_AGINO(mp, *lastinop);
382 371 if (agno >= mp->m_sb.sb_agcount ||
383   - ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
  372 + *lastinop != XFS_AGINO_TO_INO(mp, agno, agino)) {
384 373 *done = 1;
385 374 *ubcountp = 0;
386 375 return 0;
387 376 }
388 377  
389 378 ubcount = *ubcountp; /* statstruct's */
390   - ubleft = ubcount * statstruct_size; /* bytes */
391   - *ubcountp = ubelem = 0;
  379 + ac.ac_ubuffer = &ubuffer;
  380 + ac.ac_ubleft = ubcount * statstruct_size; /* bytes */;
  381 + ac.ac_ubelem = 0;
  382 +
  383 + *ubcountp = 0;
392 384 *done = 0;
393   - fmterror = 0;
394   - ubufp = ubuffer;
  385 +
395 386 irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4);
396 387 if (!irbuf)
397 388 return -ENOMEM;
... ... @@ -402,9 +393,13 @@
402 393 * Loop over the allocation groups, starting from the last
403 394 * inode returned; 0 means start of the allocation group.
404 395 */
405   - rval = 0;
406   - while (XFS_BULKSTAT_UBLEFT(ubleft) && agno < mp->m_sb.sb_agcount) {
407   - cond_resched();
  396 + while (agno < mp->m_sb.sb_agcount) {
  397 + struct xfs_inobt_rec_incore *irbp = irbuf;
  398 + struct xfs_inobt_rec_incore *irbufend = irbuf + nirbuf;
  399 + bool end_of_ag = false;
  400 + int icount = 0;
  401 + int stat;
  402 +
408 403 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
409 404 if (error)
410 405 break;
... ... @@ -414,10 +409,6 @@
414 409 */
415 410 cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno,
416 411 XFS_BTNUM_INO);
417   - irbp = irbuf;
418   - irbufend = irbuf + nirbuf;
419   - end_of_ag = 0;
420   - icount = 0;
421 412 if (agino > 0) {
422 413 /*
423 414 * In the middle of an allocation group, we need to get
424 415  
425 416  
426 417  
427 418  
... ... @@ -427,22 +418,23 @@
427 418  
428 419 error = xfs_bulkstat_grab_ichunk(cur, agino, &icount, &r);
429 420 if (error)
430   - break;
  421 + goto del_cursor;
431 422 if (icount) {
432 423 irbp->ir_startino = r.ir_startino;
433 424 irbp->ir_freecount = r.ir_freecount;
434 425 irbp->ir_free = r.ir_free;
435 426 irbp++;
436   - agino = r.ir_startino + XFS_INODES_PER_CHUNK;
437 427 }
438 428 /* Increment to the next record */
439   - error = xfs_btree_increment(cur, 0, &tmp);
  429 + error = xfs_btree_increment(cur, 0, &stat);
440 430 } else {
441 431 /* Start of ag. Lookup the first inode chunk */
442   - error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &tmp);
  432 + error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &stat);
443 433 }
444   - if (error)
445   - break;
  434 + if (error || stat == 0) {
  435 + end_of_ag = true;
  436 + goto del_cursor;
  437 + }
446 438  
447 439 /*
448 440 * Loop through inode btree records in this ag,
... ... @@ -451,10 +443,10 @@
451 443 while (irbp < irbufend && icount < ubcount) {
452 444 struct xfs_inobt_rec_incore r;
453 445  
454   - error = xfs_inobt_get_rec(cur, &r, &i);
455   - if (error || i == 0) {
456   - end_of_ag = 1;
457   - break;
  446 + error = xfs_inobt_get_rec(cur, &r, &stat);
  447 + if (error || stat == 0) {
  448 + end_of_ag = true;
  449 + goto del_cursor;
458 450 }
459 451  
460 452 /*
461 453  
462 454  
463 455  
464 456  
465 457  
466 458  
467 459  
468 460  
469 461  
470 462  
471 463  
472 464  
473 465  
474 466  
475 467  
476 468  
477 469  
478 470  
... ... @@ -469,77 +461,79 @@
469 461 irbp++;
470 462 icount += XFS_INODES_PER_CHUNK - r.ir_freecount;
471 463 }
472   - /*
473   - * Set agino to after this chunk and bump the cursor.
474   - */
475   - agino = r.ir_startino + XFS_INODES_PER_CHUNK;
476   - error = xfs_btree_increment(cur, 0, &tmp);
  464 + error = xfs_btree_increment(cur, 0, &stat);
  465 + if (error || stat == 0) {
  466 + end_of_ag = true;
  467 + goto del_cursor;
  468 + }
477 469 cond_resched();
478 470 }
  471 +
479 472 /*
480   - * Drop the btree buffers and the agi buffer.
481   - * We can't hold any of the locks these represent
482   - * when calling iget.
  473 + * Drop the btree buffers and the agi buffer as we can't hold any
  474 + * of the locks these represent when calling iget. If there is a
  475 + * pending error, then we are done.
483 476 */
  477 +del_cursor:
484 478 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
485 479 xfs_buf_relse(agbp);
  480 + if (error)
  481 + break;
486 482 /*
487   - * Now format all the good inodes into the user's buffer.
  483 + * Now format all the good inodes into the user's buffer. The
  484 + * call to xfs_bulkstat_ag_ichunk() sets up the agino pointer
  485 + * for the next loop iteration.
488 486 */
489 487 irbufend = irbp;
490 488 for (irbp = irbuf;
491   - irbp < irbufend && XFS_BULKSTAT_UBLEFT(ubleft); irbp++) {
492   - struct xfs_bulkstat_agichunk ac;
493   -
494   - ac.ac_lastino = lastino;
495   - ac.ac_ubuffer = &ubuffer;
496   - ac.ac_ubleft = ubleft;
497   - ac.ac_ubelem = ubelem;
  489 + irbp < irbufend && ac.ac_ubleft >= statstruct_size;
  490 + irbp++) {
498 491 error = xfs_bulkstat_ag_ichunk(mp, agno, irbp,
499   - formatter, statstruct_size, &ac);
  492 + formatter, statstruct_size, &ac,
  493 + &agino);
500 494 if (error)
501   - rval = error;
  495 + break;
502 496  
503   - lastino = ac.ac_lastino;
504   - ubleft = ac.ac_ubleft;
505   - ubelem = ac.ac_ubelem;
506   -
507 497 cond_resched();
508 498 }
  499 +
509 500 /*
510   - * Set up for the next loop iteration.
  501 + * If we've run out of space or had a formatting error, we
  502 + * are now done
511 503 */
512   - if (XFS_BULKSTAT_UBLEFT(ubleft)) {
513   - if (end_of_ag) {
514   - agno++;
515   - agino = 0;
516   - } else
517   - agino = XFS_INO_TO_AGINO(mp, lastino);
518   - } else
  504 + if (ac.ac_ubleft < statstruct_size || error)
519 505 break;
  506 +
  507 + if (end_of_ag) {
  508 + agno++;
  509 + agino = 0;
  510 + }
520 511 }
521 512 /*
522 513 * Done, we're either out of filesystem or space to put the data.
523 514 */
524 515 kmem_free(irbuf);
525   - *ubcountp = ubelem;
  516 + *ubcountp = ac.ac_ubelem;
  517 +
526 518 /*
527   - * Found some inodes, return them now and return the error next time.
  519 + * We found some inodes, so clear the error status and return them.
  520 + * The lastino pointer will point directly at the inode that triggered
  521 + * any error that occurred, so on the next call the error will be
  522 + * triggered again and propagated to userspace as there will be no
  523 + * formatted inodes in the buffer.
528 524 */
529   - if (ubelem)
530   - rval = 0;
531   - if (agno >= mp->m_sb.sb_agcount) {
532   - /*
533   - * If we ran out of filesystem, mark lastino as off
534   - * the end of the filesystem, so the next call
535   - * will return immediately.
536   - */
537   - *lastinop = (xfs_ino_t)XFS_AGINO_TO_INO(mp, agno, 0);
  525 + if (ac.ac_ubelem)
  526 + error = 0;
  527 +
  528 + /*
  529 + * If we ran out of filesystem, lastino will point off the end of
  530 + * the filesystem so the next call will return immediately.
  531 + */
  532 + *lastinop = XFS_AGINO_TO_INO(mp, agno, agino);
  533 + if (agno >= mp->m_sb.sb_agcount)
538 534 *done = 1;
539   - } else
540   - *lastinop = (xfs_ino_t)lastino;
541 535  
542   - return rval;
  536 + return error;
543 537 }
544 538  
545 539 int
... ... @@ -30,22 +30,6 @@
30 30 int *ubused,
31 31 int *stat);
32 32  
33   -struct xfs_bulkstat_agichunk {
34   - xfs_ino_t ac_lastino; /* last inode returned */
35   - char __user **ac_ubuffer;/* pointer into user's buffer */
36   - int ac_ubleft; /* bytes left in user's buffer */
37   - int ac_ubelem; /* spaces used in user's buffer */
38   -};
39   -
40   -int
41   -xfs_bulkstat_ag_ichunk(
42   - struct xfs_mount *mp,
43   - xfs_agnumber_t agno,
44   - struct xfs_inobt_rec_incore *irbp,
45   - bulkstat_one_pf formatter,
46   - size_t statstruct_size,
47   - struct xfs_bulkstat_agichunk *acp);
48   -
49 33 /*
50 34 * Values for stat return value.
51 35 */
... ... @@ -715,8 +715,9 @@
715 715 * necessary) to @newsize. It will be typically be called from the filesystem's
716 716 * setattr function when ATTR_SIZE is passed in.
717 717 *
718   - * Must be called with inode_mutex held and before all filesystem specific
719   - * block truncation has been performed.
  718 + * Must be called with a lock serializing truncates and writes (generally
  719 + * i_mutex but e.g. xfs uses a different lock) and before all filesystem
  720 + * specific block truncation has been performed.
720 721 */
721 722 void truncate_setsize(struct inode *inode, loff_t newsize)
722 723 {
... ... @@ -755,7 +756,6 @@
755 756 struct page *page;
756 757 pgoff_t index;
757 758  
758   - WARN_ON(!mutex_is_locked(&inode->i_mutex));
759 759 WARN_ON(to > inode->i_size);
760 760  
761 761 if (from >= to || bsize == PAGE_CACHE_SIZE)