Commit 4899f9c852564ce7b6d0ca932ac6674bf471fd28
Committed by
Linus Torvalds
1 parent
a20fa20c54
nfs: convert to new aops
[akpm@linux-foundation.org: fix against git-nfs] [peterz@infradead.org: fix against git-nfs] Signed-off-by: Nick Piggin <npiggin@suse.de> Acked-by: Trond Myklebust <Trond.Myklebust@netapp.com> Cc: "J. Bruce Fields" <bfields@fieldses.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 1 changed file with 60 additions and 20 deletions Side-by-side Diff
fs/nfs/file.c
... | ... | @@ -306,27 +306,50 @@ |
306 | 306 | } |
307 | 307 | |
308 | 308 | /* |
309 | - * This does the "real" work of the write. The generic routine has | |
310 | - * allocated the page, locked it, done all the page alignment stuff | |
311 | - * calculations etc. Now we should just copy the data from user | |
312 | - * space and write it back to the real medium.. | |
309 | + * This does the "real" work of the write. We must allocate and lock the | |
310 | + * page to be sent back to the generic routine, which then copies the | |
311 | + * data from user space. | |
313 | 312 | * |
314 | 313 | * If the writer ends up delaying the write, the writer needs to |
315 | 314 | * increment the page use counts until he is done with the page. |
316 | 315 | */ |
317 | -static int nfs_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to) | |
316 | +static int nfs_write_begin(struct file *file, struct address_space *mapping, | |
317 | + loff_t pos, unsigned len, unsigned flags, | |
318 | + struct page **pagep, void **fsdata) | |
318 | 319 | { |
319 | - return nfs_flush_incompatible(file, page); | |
320 | + int ret; | |
321 | + pgoff_t index; | |
322 | + struct page *page; | |
323 | + index = pos >> PAGE_CACHE_SHIFT; | |
324 | + | |
325 | + page = __grab_cache_page(mapping, index); | |
326 | + if (!page) | |
327 | + return -ENOMEM; | |
328 | + *pagep = page; | |
329 | + | |
330 | + ret = nfs_flush_incompatible(file, page); | |
331 | + if (ret) { | |
332 | + unlock_page(page); | |
333 | + page_cache_release(page); | |
334 | + } | |
335 | + return ret; | |
320 | 336 | } |
321 | 337 | |
322 | -static int nfs_commit_write(struct file *file, struct page *page, unsigned offset, unsigned to) | |
338 | +static int nfs_write_end(struct file *file, struct address_space *mapping, | |
339 | + loff_t pos, unsigned len, unsigned copied, | |
340 | + struct page *page, void *fsdata) | |
323 | 341 | { |
324 | - long status; | |
342 | + unsigned offset = pos & (PAGE_CACHE_SIZE - 1); | |
343 | + int status; | |
325 | 344 | |
326 | 345 | lock_kernel(); |
327 | - status = nfs_updatepage(file, page, offset, to-offset); | |
346 | + status = nfs_updatepage(file, page, offset, copied); | |
328 | 347 | unlock_kernel(); |
329 | - return status; | |
348 | + | |
349 | + unlock_page(page); | |
350 | + page_cache_release(page); | |
351 | + | |
352 | + return status < 0 ? status : copied; | |
330 | 353 | } |
331 | 354 | |
332 | 355 | static void nfs_invalidate_page(struct page *page, unsigned long offset) |
... | ... | @@ -354,8 +377,8 @@ |
354 | 377 | .set_page_dirty = __set_page_dirty_nobuffers, |
355 | 378 | .writepage = nfs_writepage, |
356 | 379 | .writepages = nfs_writepages, |
357 | - .prepare_write = nfs_prepare_write, | |
358 | - .commit_write = nfs_commit_write, | |
380 | + .write_begin = nfs_write_begin, | |
381 | + .write_end = nfs_write_end, | |
359 | 382 | .invalidatepage = nfs_invalidate_page, |
360 | 383 | .releasepage = nfs_release_page, |
361 | 384 | #ifdef CONFIG_NFS_DIRECTIO |
362 | 385 | |
363 | 386 | |
364 | 387 | |
... | ... | @@ -369,18 +392,35 @@ |
369 | 392 | struct file *filp = vma->vm_file; |
370 | 393 | unsigned pagelen; |
371 | 394 | int ret = -EINVAL; |
395 | + void *fsdata; | |
396 | + struct address_space *mapping; | |
397 | + loff_t offset; | |
372 | 398 | |
373 | 399 | lock_page(page); |
374 | - if (page->mapping != vma->vm_file->f_path.dentry->d_inode->i_mapping) | |
375 | - goto out_unlock; | |
400 | + mapping = page->mapping; | |
401 | + if (mapping != vma->vm_file->f_path.dentry->d_inode->i_mapping) { | |
402 | + unlock_page(page); | |
403 | + return -EINVAL; | |
404 | + } | |
376 | 405 | pagelen = nfs_page_length(page); |
377 | - if (pagelen == 0) | |
378 | - goto out_unlock; | |
379 | - ret = nfs_prepare_write(filp, page, 0, pagelen); | |
380 | - if (!ret) | |
381 | - ret = nfs_commit_write(filp, page, 0, pagelen); | |
382 | -out_unlock: | |
406 | + offset = (loff_t)page->index << PAGE_CACHE_SHIFT; | |
383 | 407 | unlock_page(page); |
408 | + | |
409 | + /* | |
410 | + * we can use mapping after releasing the page lock, because: | |
411 | + * we hold mmap_sem on the fault path, which should pin the vma | |
412 | + * which should pin the file, which pins the dentry which should | |
413 | + * hold a reference on inode. | |
414 | + */ | |
415 | + | |
416 | + if (pagelen) { | |
417 | + struct page *page2 = NULL; | |
418 | + ret = nfs_write_begin(filp, mapping, offset, pagelen, | |
419 | + 0, &page2, &fsdata); | |
420 | + if (!ret) | |
421 | + ret = nfs_write_end(filp, mapping, offset, pagelen, | |
422 | + pagelen, page2, fsdata); | |
423 | + } | |
384 | 424 | return ret; |
385 | 425 | } |
386 | 426 |