Commit 5f79ed685fc6122018c4b5826e2e5bdb7bc6f109

Authored by Christoph Hellwig
Committed by Felix Blyakher
1 parent b9ec9068d7

xfs: a couple getbmap cleanups

- reshuffle various conditionals for data vs attr fork to make the code
   more readable
 - do fine-grainded goto-based error handling
 - exit early from conditionals instead of keeping a long else branch around
 - allow kmem_alloc to fail

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Eric Sandeen <sandeen@sandeen.net>
Reviewed-by: Felix Blyakher <felixb@sgi.com>
Signed-off-by: Felix Blyakher <felixb@sgi.com>

Showing 1 changed file with 79 additions and 83 deletions Side-by-side Diff

... ... @@ -5880,7 +5880,7 @@
5880 5880 void *arg) /* formatter arg */
5881 5881 {
5882 5882 __int64_t bmvend; /* last block requested */
5883   - int error; /* return value */
  5883 + int error = 0; /* return value */
5884 5884 __int64_t fixlen; /* length for -1 case */
5885 5885 int i; /* extent number */
5886 5886 int lock; /* lock state */
5887 5887  
... ... @@ -5899,30 +5899,8 @@
5899 5899  
5900 5900 mp = ip->i_mount;
5901 5901 iflags = bmv->bmv_iflags;
5902   -
5903 5902 whichfork = iflags & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK;
5904 5903  
5905   - /* If the BMV_IF_NO_DMAPI_READ interface bit specified, do not
5906   - * generate a DMAPI read event. Otherwise, if the DM_EVENT_READ
5907   - * bit is set for the file, generate a read event in order
5908   - * that the DMAPI application may do its thing before we return
5909   - * the extents. Usually this means restoring user file data to
5910   - * regions of the file that look like holes.
5911   - *
5912   - * The "old behavior" (from XFS_IOC_GETBMAP) is to not specify
5913   - * BMV_IF_NO_DMAPI_READ so that read events are generated.
5914   - * If this were not true, callers of ioctl( XFS_IOC_GETBMAP )
5915   - * could misinterpret holes in a DMAPI file as true holes,
5916   - * when in fact they may represent offline user data.
5917   - */
5918   - if ((iflags & BMV_IF_NO_DMAPI_READ) == 0 &&
5919   - DM_EVENT_ENABLED(ip, DM_EVENT_READ) &&
5920   - whichfork == XFS_DATA_FORK) {
5921   - error = XFS_SEND_DATA(mp, DM_EVENT_READ, ip, 0, 0, 0, NULL);
5922   - if (error)
5923   - return XFS_ERROR(error);
5924   - }
5925   -
5926 5904 if (whichfork == XFS_ATTR_FORK) {
5927 5905 if (XFS_IFORK_Q(ip)) {
5928 5906 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS &&
... ... @@ -5936,11 +5914,37 @@
5936 5914 ip->i_mount);
5937 5915 return XFS_ERROR(EFSCORRUPTED);
5938 5916 }
5939   - } else if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
5940   - ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
5941   - ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
5942   - return XFS_ERROR(EINVAL);
5943   - if (whichfork == XFS_DATA_FORK) {
  5917 +
  5918 + prealloced = 0;
  5919 + fixlen = 1LL << 32;
  5920 + } else {
  5921 + /*
  5922 + * If the BMV_IF_NO_DMAPI_READ interface bit specified, do
  5923 + * not generate a DMAPI read event. Otherwise, if the
  5924 + * DM_EVENT_READ bit is set for the file, generate a read
  5925 + * event in order that the DMAPI application may do its thing
  5926 + * before we return the extents. Usually this means restoring
  5927 + * user file data to regions of the file that look like holes.
  5928 + *
  5929 + * The "old behavior" (from XFS_IOC_GETBMAP) is to not specify
  5930 + * BMV_IF_NO_DMAPI_READ so that read events are generated.
  5931 + * If this were not true, callers of ioctl(XFS_IOC_GETBMAP)
  5932 + * could misinterpret holes in a DMAPI file as true holes,
  5933 + * when in fact they may represent offline user data.
  5934 + */
  5935 + if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) &&
  5936 + !(iflags & BMV_IF_NO_DMAPI_READ)) {
  5937 + error = XFS_SEND_DATA(mp, DM_EVENT_READ, ip,
  5938 + 0, 0, 0, NULL);
  5939 + if (error)
  5940 + return XFS_ERROR(error);
  5941 + }
  5942 +
  5943 + if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
  5944 + ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
  5945 + ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
  5946 + return XFS_ERROR(EINVAL);
  5947 +
5944 5948 if (xfs_get_extsz_hint(ip) ||
5945 5949 ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
5946 5950 prealloced = 1;
5947 5951  
5948 5952  
5949 5953  
5950 5954  
5951 5955  
5952 5956  
... ... @@ -5949,43 +5953,35 @@
5949 5953 prealloced = 0;
5950 5954 fixlen = ip->i_size;
5951 5955 }
5952   - } else {
5953   - prealloced = 0;
5954   - fixlen = 1LL << 32;
5955 5956 }
5956 5957  
5957 5958 if (bmv->bmv_length == -1) {
5958 5959 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen));
5959   - bmv->bmv_length = MAX( (__int64_t)(fixlen - bmv->bmv_offset),
5960   - (__int64_t)0);
5961   - } else if (bmv->bmv_length < 0)
5962   - return XFS_ERROR(EINVAL);
5963   - if (bmv->bmv_length == 0) {
  5960 + bmv->bmv_length =
  5961 + max_t(__int64_t, fixlen - bmv->bmv_offset, 0);
  5962 + } else if (bmv->bmv_length == 0) {
5964 5963 bmv->bmv_entries = 0;
5965 5964 return 0;
  5965 + } else if (bmv->bmv_length < 0) {
  5966 + return XFS_ERROR(EINVAL);
5966 5967 }
  5968 +
5967 5969 nex = bmv->bmv_count - 1;
5968 5970 if (nex <= 0)
5969 5971 return XFS_ERROR(EINVAL);
5970 5972 bmvend = bmv->bmv_offset + bmv->bmv_length;
5971 5973  
5972 5974 xfs_ilock(ip, XFS_IOLOCK_SHARED);
5973   -
5974   - if (((iflags & BMV_IF_DELALLOC) == 0) &&
5975   - (whichfork == XFS_DATA_FORK) &&
5976   - (ip->i_delayed_blks || ip->i_size > ip->i_d.di_size)) {
5977   - /* xfs_fsize_t last_byte = xfs_file_last_byte(ip); */
5978   - error = xfs_flush_pages(ip, (xfs_off_t)0,
5979   - -1, 0, FI_REMAPF);
5980   - if (error) {
5981   - xfs_iunlock(ip, XFS_IOLOCK_SHARED);
5982   - return error;
  5975 + if (whichfork == XFS_DATA_FORK && !(iflags & BMV_IF_DELALLOC)) {
  5976 + if (ip->i_delayed_blks || ip->i_size > ip->i_d.di_size) {
  5977 + error = xfs_flush_pages(ip, 0, -1, 0, FI_REMAPF);
  5978 + if (error)
  5979 + goto out_unlock_iolock;
5983 5980 }
  5981 +
  5982 + ASSERT(ip->i_delayed_blks == 0);
5984 5983 }
5985 5984  
5986   - ASSERT(whichfork == XFS_ATTR_FORK || (iflags & BMV_IF_DELALLOC) ||
5987   - ip->i_delayed_blks == 0);
5988   -
5989 5985 lock = xfs_ilock_map_shared(ip);
5990 5986  
5991 5987 /*
5992 5988  
5993 5989  
5994 5990  
... ... @@ -5995,23 +5991,25 @@
5995 5991 if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1)
5996 5992 nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1;
5997 5993  
5998   - bmapi_flags = xfs_bmapi_aflag(whichfork) |
5999   - ((iflags & BMV_IF_PREALLOC) ? 0 : XFS_BMAPI_IGSTATE);
  5994 + bmapi_flags = xfs_bmapi_aflag(whichfork);
  5995 + if (!(iflags & BMV_IF_PREALLOC))
  5996 + bmapi_flags |= XFS_BMAPI_IGSTATE;
6000 5997  
6001 5998 /*
6002 5999 * Allocate enough space to handle "subnex" maps at a time.
6003 6000 */
  6001 + error = ENOMEM;
6004 6002 subnex = 16;
6005   - map = kmem_alloc(subnex * sizeof(*map), KM_SLEEP);
  6003 + map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL);
  6004 + if (!map)
  6005 + goto out_unlock_ilock;
6006 6006  
6007 6007 bmv->bmv_entries = 0;
6008 6008  
6009   - if ((XFS_IFORK_NEXTENTS(ip, whichfork) == 0)) {
6010   - if (((iflags & BMV_IF_DELALLOC) == 0) ||
6011   - whichfork == XFS_ATTR_FORK) {
6012   - error = 0;
6013   - goto unlock_and_return;
6014   - }
  6009 + if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0 &&
  6010 + (whichfork == XFS_ATTR_FORK || !(iflags & BMV_IF_DELALLOC))) {
  6011 + error = 0;
  6012 + goto out_free_map;
6015 6013 }
6016 6014  
6017 6015 nexleft = nex;
6018 6016  
... ... @@ -6023,10 +6021,12 @@
6023 6021 bmapi_flags, NULL, 0, map, &nmap,
6024 6022 NULL, NULL);
6025 6023 if (error)
6026   - goto unlock_and_return;
  6024 + goto out_free_map;
6027 6025 ASSERT(nmap <= subnex);
6028 6026  
6029 6027 for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) {
  6028 + int full = 0; /* user array is full */
  6029 +
6030 6030 out.bmv_oflags = 0;
6031 6031 if (map[i].br_state == XFS_EXT_UNWRITTEN)
6032 6032 out.bmv_oflags |= BMV_OF_PREALLOC;
6033 6033  
6034 6034  
6035 6035  
6036 6036  
6037 6037  
... ... @@ -6041,36 +6041,32 @@
6041 6041 whichfork == XFS_ATTR_FORK) {
6042 6042 /* came to the end of attribute fork */
6043 6043 out.bmv_oflags |= BMV_OF_LAST;
6044   - goto unlock_and_return;
6045   - } else {
6046   - int full = 0; /* user array is full */
  6044 + goto out_free_map;
  6045 + }
6047 6046  
6048   - if (!xfs_getbmapx_fix_eof_hole(ip, &out,
6049   - prealloced, bmvend,
6050   - map[i].br_startblock)) {
6051   - goto unlock_and_return;
6052   - }
  6047 + if (!xfs_getbmapx_fix_eof_hole(ip, &out, prealloced,
  6048 + bmvend, map[i].br_startblock))
  6049 + goto out_free_map;
6053 6050  
6054   - /* format results & advance arg */
6055   - error = formatter(&arg, &out, &full);
6056   - if (error || full)
6057   - goto unlock_and_return;
6058   - nexleft--;
6059   - bmv->bmv_offset =
6060   - out.bmv_offset + out.bmv_length;
6061   - bmv->bmv_length = MAX((__int64_t)0,
6062   - (__int64_t)(bmvend - bmv->bmv_offset));
6063   - bmv->bmv_entries++;
6064   - }
  6051 + /* format results & advance arg */
  6052 + error = formatter(&arg, &out, &full);
  6053 + if (error || full)
  6054 + goto out_free_map;
  6055 + nexleft--;
  6056 + bmv->bmv_offset =
  6057 + out.bmv_offset + out.bmv_length;
  6058 + bmv->bmv_length =
  6059 + max_t(__int64_t, 0, bmvend - bmv->bmv_offset);
  6060 + bmv->bmv_entries++;
6065 6061 }
6066 6062 } while (nmap && nexleft && bmv->bmv_length);
6067 6063  
6068   -unlock_and_return:
  6064 + out_free_map:
  6065 + kmem_free(map);
  6066 + out_unlock_ilock:
6069 6067 xfs_iunlock_map_shared(ip, lock);
  6068 + out_unlock_iolock:
6070 6069 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
6071   -
6072   - kmem_free(map);
6073   -
6074 6070 return error;
6075 6071 }
6076 6072