Commit 99600051b04bc4ec8bd4d16a8bf993ca54042db6

Authored by Namjae Jeon
Committed by Jan Kara
1 parent 9734c971aa

udf: add extent cache support in case of file reading

This patch implements extent caching in case of file reading.
While reading a file, currently, UDF reads metadata serially
which takes a lot of time depending on the number of extents present
in the file. Caching last accessd extent improves metadata read time.
Instead of reading file metadata from start, now we read from
the cached extent.

This patch considerably improves the time spent by CPU in kernel mode.
For example, while reading a 10.9 GB file using dd:
Time before applying patch:
11677022208 bytes (10.9GB) copied, 1529.748921 seconds, 7.3MB/s
real    25m 29.85s
user    0m 12.41s
sys     15m 34.75s

Time after applying patch:
11677022208 bytes (10.9GB) copied, 1469.338231 seconds, 7.6MB/s
real    24m 29.44s
user    0m 15.73s
sys     3m 27.61s

[JK: Fix bh refcounting issues, simplify initialization]

Signed-off-by: Namjae Jeon <namjae.jeon@samsung.com>
Signed-off-by: Ashish Sangwan <a.sangwan@samsung.com>
Signed-off-by: Bonggil Bak <bgbak@samsung.com>
Signed-off-by: Jan Kara <jack@suse.cz>

Showing 4 changed files with 98 additions and 11 deletions Side-by-side Diff

... ... @@ -67,7 +67,75 @@
67 67 struct extent_position *);
68 68 static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
69 69  
  70 +static void __udf_clear_extent_cache(struct inode *inode)
  71 +{
  72 + struct udf_inode_info *iinfo = UDF_I(inode);
70 73  
  74 + if (iinfo->cached_extent.lstart != -1) {
  75 + brelse(iinfo->cached_extent.epos.bh);
  76 + iinfo->cached_extent.lstart = -1;
  77 + }
  78 +}
  79 +
  80 +/* Invalidate extent cache */
  81 +static void udf_clear_extent_cache(struct inode *inode)
  82 +{
  83 + struct udf_inode_info *iinfo = UDF_I(inode);
  84 +
  85 + spin_lock(&iinfo->i_extent_cache_lock);
  86 + __udf_clear_extent_cache(inode);
  87 + spin_unlock(&iinfo->i_extent_cache_lock);
  88 +}
  89 +
  90 +/* Return contents of extent cache */
  91 +static int udf_read_extent_cache(struct inode *inode, loff_t bcount,
  92 + loff_t *lbcount, struct extent_position *pos)
  93 +{
  94 + struct udf_inode_info *iinfo = UDF_I(inode);
  95 + int ret = 0;
  96 +
  97 + spin_lock(&iinfo->i_extent_cache_lock);
  98 + if ((iinfo->cached_extent.lstart <= bcount) &&
  99 + (iinfo->cached_extent.lstart != -1)) {
  100 + /* Cache hit */
  101 + *lbcount = iinfo->cached_extent.lstart;
  102 + memcpy(pos, &iinfo->cached_extent.epos,
  103 + sizeof(struct extent_position));
  104 + if (pos->bh)
  105 + get_bh(pos->bh);
  106 + ret = 1;
  107 + }
  108 + spin_unlock(&iinfo->i_extent_cache_lock);
  109 + return ret;
  110 +}
  111 +
  112 +/* Add extent to extent cache */
  113 +static void udf_update_extent_cache(struct inode *inode, loff_t estart,
  114 + struct extent_position *pos, int next_epos)
  115 +{
  116 + struct udf_inode_info *iinfo = UDF_I(inode);
  117 +
  118 + spin_lock(&iinfo->i_extent_cache_lock);
  119 + /* Invalidate previously cached extent */
  120 + __udf_clear_extent_cache(inode);
  121 + if (pos->bh)
  122 + get_bh(pos->bh);
  123 + memcpy(&iinfo->cached_extent.epos, pos,
  124 + sizeof(struct extent_position));
  125 + iinfo->cached_extent.lstart = estart;
  126 + if (next_epos)
  127 + switch (iinfo->i_alloc_type) {
  128 + case ICBTAG_FLAG_AD_SHORT:
  129 + iinfo->cached_extent.epos.offset -=
  130 + sizeof(struct short_ad);
  131 + break;
  132 + case ICBTAG_FLAG_AD_LONG:
  133 + iinfo->cached_extent.epos.offset -=
  134 + sizeof(struct long_ad);
  135 + }
  136 + spin_unlock(&iinfo->i_extent_cache_lock);
  137 +}
  138 +
71 139 void udf_evict_inode(struct inode *inode)
72 140 {
73 141 struct udf_inode_info *iinfo = UDF_I(inode);
... ... @@ -90,6 +158,7 @@
90 158 }
91 159 kfree(iinfo->i_ext.i_data);
92 160 iinfo->i_ext.i_data = NULL;
  161 + udf_clear_extent_cache(inode);
93 162 if (want_delete) {
94 163 udf_free_inode(inode);
95 164 }
... ... @@ -105,6 +174,7 @@
105 174 truncate_pagecache(inode, to, isize);
106 175 if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
107 176 down_write(&iinfo->i_data_sem);
  177 + udf_clear_extent_cache(inode);
108 178 udf_truncate_extents(inode);
109 179 up_write(&iinfo->i_data_sem);
110 180 }
... ... @@ -372,7 +442,7 @@
372 442 iinfo->i_next_alloc_goal++;
373 443 }
374 444  
375   -
  445 + udf_clear_extent_cache(inode);
376 446 phys = inode_getblk(inode, block, &err, &new);
377 447 if (!phys)
378 448 goto abort;
... ... @@ -1171,6 +1241,7 @@
1171 1241 } else {
1172 1242 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
1173 1243 down_write(&iinfo->i_data_sem);
  1244 + udf_clear_extent_cache(inode);
1174 1245 memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr + newsize,
1175 1246 0x00, bsize - newsize -
1176 1247 udf_file_entry_alloc_offset(inode));
... ... @@ -1184,6 +1255,7 @@
1184 1255 if (err)
1185 1256 return err;
1186 1257 down_write(&iinfo->i_data_sem);
  1258 + udf_clear_extent_cache(inode);
1187 1259 truncate_setsize(inode, newsize);
1188 1260 udf_truncate_extents(inode);
1189 1261 up_write(&iinfo->i_data_sem);
1190 1262  
... ... @@ -2156,11 +2228,12 @@
2156 2228 struct udf_inode_info *iinfo;
2157 2229  
2158 2230 iinfo = UDF_I(inode);
2159   - pos->offset = 0;
2160   - pos->block = iinfo->i_location;
2161   - pos->bh = NULL;
  2231 + if (!udf_read_extent_cache(inode, bcount, &lbcount, pos)) {
  2232 + pos->offset = 0;
  2233 + pos->block = iinfo->i_location;
  2234 + pos->bh = NULL;
  2235 + }
2162 2236 *elen = 0;
2163   -
2164 2237 do {
2165 2238 etype = udf_next_aext(inode, pos, eloc, elen, 1);
2166 2239 if (etype == -1) {
... ... @@ -2170,7 +2243,8 @@
2170 2243 }
2171 2244 lbcount += *elen;
2172 2245 } while (lbcount <= bcount);
2173   -
  2246 + /* update extent cache */
  2247 + udf_update_extent_cache(inode, lbcount - *elen, pos, 1);
2174 2248 *offset = (bcount + *elen - lbcount) >> blocksize_bits;
2175 2249  
2176 2250 return etype;
... ... @@ -134,6 +134,8 @@
134 134 ei->i_next_alloc_goal = 0;
135 135 ei->i_strat4096 = 0;
136 136 init_rwsem(&ei->i_data_sem);
  137 + ei->cached_extent.lstart = -1;
  138 + spin_lock_init(&ei->i_extent_cache_lock);
137 139  
138 140 return &ei->vfs_inode;
139 141 }
1 1 #ifndef _UDF_I_H
2 2 #define _UDF_I_H
3 3  
  4 +struct extent_position {
  5 + struct buffer_head *bh;
  6 + uint32_t offset;
  7 + struct kernel_lb_addr block;
  8 +};
  9 +
  10 +struct udf_ext_cache {
  11 + /* Extent position */
  12 + struct extent_position epos;
  13 + /* Start logical offset in bytes */
  14 + loff_t lstart;
  15 +};
  16 +
4 17 /*
5 18 * The i_data_sem and i_mutex serve for protection of allocation information
6 19 * of a regular files and symlinks. This includes all extents belonging to
... ... @@ -35,6 +48,9 @@
35 48 __u8 *i_data;
36 49 } i_ext;
37 50 struct rw_semaphore i_data_sem;
  51 + struct udf_ext_cache cached_extent;
  52 + /* Spinlock for protecting extent cache */
  53 + spinlock_t i_extent_cache_lock;
38 54 struct inode vfs_inode;
39 55 };
40 56  
... ... @@ -113,11 +113,6 @@
113 113 uint8_t u_len;
114 114 };
115 115  
116   -struct extent_position {
117   - struct buffer_head *bh;
118   - uint32_t offset;
119   - struct kernel_lb_addr block;
120   -};
121 116  
122 117 /* super.c */
123 118