Commit 60c52e3a72fda10e82f38b6f979956eb2dcb3d4e

Authored by Peng Tao
Committed by Trond Myklebust
1 parent 74a6eeb44c

pnfsblock: cleanup bl_mark_sectors_init

It does not need to manipulate on partial initialized blocks.
Writeback code takes care of it.

Signed-off-by: Peng Tao <peng_tao@emc.com>
Signed-off-by: Benny Halevy <bhalevy@tonian.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>

Showing 3 changed files with 8 additions and 77 deletions Side-by-side Diff

fs/nfs/blocklayout/blocklayout.c
... ... @@ -577,8 +577,7 @@
577 577 unlock_page(page);
578 578  
579 579 ret = bl_mark_sectors_init(be->be_inval, isect,
580   - PAGE_CACHE_SECTORS,
581   - NULL);
  580 + PAGE_CACHE_SECTORS);
582 581 if (unlikely(ret)) {
583 582 dprintk("%s bl_mark_sectors_init fail %d\n",
584 583 __func__, ret);
... ... @@ -627,8 +626,7 @@
627 626 }
628 627 if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
629 628 ret = bl_mark_sectors_init(be->be_inval, isect,
630   - PAGE_CACHE_SECTORS,
631   - NULL);
  629 + PAGE_CACHE_SECTORS);
632 630 if (unlikely(ret)) {
633 631 dprintk("%s bl_mark_sectors_init fail %d\n",
634 632 __func__, ret);
fs/nfs/blocklayout/blocklayout.h
... ... @@ -186,8 +186,7 @@
186 186 bl_find_get_extent(struct pnfs_block_layout *bl, sector_t isect,
187 187 struct pnfs_block_extent **cow_read);
188 188 int bl_mark_sectors_init(struct pnfs_inval_markings *marks,
189   - sector_t offset, sector_t length,
190   - sector_t **pages);
  189 + sector_t offset, sector_t length);
191 190 void bl_put_extent(struct pnfs_block_extent *be);
192 191 struct pnfs_block_extent *bl_alloc_extent(void);
193 192 int bl_is_sector_init(struct pnfs_inval_markings *marks, sector_t isect);
fs/nfs/blocklayout/extents.c
... ... @@ -174,33 +174,6 @@
174 174 return status;
175 175 }
176 176  
177   -static void set_needs_init(sector_t *array, sector_t offset)
178   -{
179   - sector_t *p = array;
180   -
181   - dprintk("%s enter\n", __func__);
182   - if (!p)
183   - return;
184   - while (*p < offset)
185   - p++;
186   - if (*p == offset)
187   - return;
188   - else if (*p == ~0) {
189   - *p++ = offset;
190   - *p = ~0;
191   - return;
192   - } else {
193   - sector_t *save = p;
194   - dprintk("%s Adding %llu\n", __func__, (u64)offset);
195   - while (*p != ~0)
196   - p++;
197   - p++;
198   - memmove(save + 1, save, (char *)p - (char *)save);
199   - *save = offset;
200   - return;
201   - }
202   -}
203   -
204 177 /* We are relying on page lock to serialize this */
205 178 int bl_is_sector_init(struct pnfs_inval_markings *marks, sector_t isect)
206 179 {
207 180  
208 181  
209 182  
210 183  
... ... @@ -256,28 +229,15 @@
256 229  
257 230 /* Marks sectors in [offest, offset_length) as having been initialized.
258 231 * All lengths are step-aligned, where step is min(pagesize, blocksize).
259   - * Notes where partial block is initialized, and helps prepare it for
260   - * complete initialization later.
  232 + * Currently assumes offset is page-aligned
261 233 */
262   -/* Currently assumes offset is page-aligned */
263 234 int bl_mark_sectors_init(struct pnfs_inval_markings *marks,
264   - sector_t offset, sector_t length,
265   - sector_t **pages)
  235 + sector_t offset, sector_t length)
266 236 {
267   - sector_t s, start, end;
268   - sector_t *array = NULL; /* Pages to mark */
  237 + sector_t start, end;
269 238  
270 239 dprintk("%s(offset=%llu,len=%llu) enter\n",
271 240 __func__, (u64)offset, (u64)length);
272   - s = max((sector_t) 3,
273   - 2 * (marks->im_block_size / (PAGE_CACHE_SECTORS)));
274   - dprintk("%s set max=%llu\n", __func__, (u64)s);
275   - if (pages) {
276   - array = kmalloc(s * sizeof(sector_t), GFP_NOFS);
277   - if (!array)
278   - goto outerr;
279   - array[0] = ~0;
280   - }
281 241  
282 242 start = normalize(offset, marks->im_block_size);
283 243 end = normalize_up(offset + length, marks->im_block_size);
284 244  
285 245  
286 246  
287 247  
... ... @@ -285,41 +245,15 @@
285 245 goto outerr;
286 246  
287 247 spin_lock(&marks->im_lock);
288   -
289   - for (s = normalize_up(start, PAGE_CACHE_SECTORS);
290   - s < offset; s += PAGE_CACHE_SECTORS) {
291   - dprintk("%s pre-area pages\n", __func__);
292   - /* Portion of used block is not initialized */
293   - if (!_has_tag(&marks->im_tree, s, EXTENT_INITIALIZED))
294   - set_needs_init(array, s);
295   - }
296 248 if (_set_range(&marks->im_tree, EXTENT_INITIALIZED, offset, length))
297 249 goto out_unlock;
298   - for (s = normalize_up(offset + length, PAGE_CACHE_SECTORS);
299   - s < end; s += PAGE_CACHE_SECTORS) {
300   - dprintk("%s post-area pages\n", __func__);
301   - if (!_has_tag(&marks->im_tree, s, EXTENT_INITIALIZED))
302   - set_needs_init(array, s);
303   - }
304   -
305 250 spin_unlock(&marks->im_lock);
306 251  
307   - if (pages) {
308   - if (array[0] == ~0) {
309   - kfree(array);
310   - *pages = NULL;
311   - } else
312   - *pages = array;
313   - }
314 252 return 0;
315 253  
316   - out_unlock:
  254 +out_unlock:
317 255 spin_unlock(&marks->im_lock);
318   - outerr:
319   - if (pages) {
320   - kfree(array);
321   - *pages = NULL;
322   - }
  256 +outerr:
323 257 return -ENOMEM;
324 258 }
325 259