Commit 9994b62b5621f88828d442fcd03fe3ce4c43344b
Committed by
Trond Myklebust
1 parent
ad1e396829
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
NFS: remove NFS_PAGE_TAG_LOCKED
The last real use of this tag was removed by commit 7f2f12d963 NFS: Simplify nfs_wb_page() Signed-off-by: Fred Isaman <iisaman@netapp.com> Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Showing 3 changed files with 10 additions and 45 deletions Side-by-side Diff
fs/nfs/pagelist.c
... | ... | @@ -107,36 +107,6 @@ |
107 | 107 | nfs_release_request(req); |
108 | 108 | } |
109 | 109 | |
110 | -/** | |
111 | - * nfs_set_page_tag_locked - Tag a request as locked | |
112 | - * @req: | |
113 | - */ | |
114 | -int nfs_set_page_tag_locked(struct nfs_page *req) | |
115 | -{ | |
116 | - if (!nfs_lock_request_dontget(req)) | |
117 | - return 0; | |
118 | - if (test_bit(PG_MAPPED, &req->wb_flags)) | |
119 | - radix_tree_tag_set(&NFS_I(req->wb_context->dentry->d_inode)->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); | |
120 | - return 1; | |
121 | -} | |
122 | - | |
123 | -/** | |
124 | - * nfs_clear_page_tag_locked - Clear request tag and wake up sleepers | |
125 | - */ | |
126 | -void nfs_clear_page_tag_locked(struct nfs_page *req) | |
127 | -{ | |
128 | - if (test_bit(PG_MAPPED, &req->wb_flags)) { | |
129 | - struct inode *inode = req->wb_context->dentry->d_inode; | |
130 | - struct nfs_inode *nfsi = NFS_I(inode); | |
131 | - | |
132 | - spin_lock(&inode->i_lock); | |
133 | - radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); | |
134 | - nfs_unlock_request(req); | |
135 | - spin_unlock(&inode->i_lock); | |
136 | - } else | |
137 | - nfs_unlock_request(req); | |
138 | -} | |
139 | - | |
140 | 110 | /* |
141 | 111 | * nfs_clear_request - Free up all resources allocated to the request |
142 | 112 | * @req: |
... | ... | @@ -469,7 +439,7 @@ |
469 | 439 | if (req->wb_index > idx_end) |
470 | 440 | goto out; |
471 | 441 | idx_start = req->wb_index + 1; |
472 | - if (nfs_set_page_tag_locked(req)) { | |
442 | + if (nfs_lock_request_dontget(req)) { | |
473 | 443 | kref_get(&req->wb_kref); |
474 | 444 | radix_tree_tag_clear(&nfsi->nfs_page_tree, |
475 | 445 | req->wb_index, tag); |
fs/nfs/write.c
... | ... | @@ -236,10 +236,10 @@ |
236 | 236 | req = nfs_page_find_request_locked(page); |
237 | 237 | if (req == NULL) |
238 | 238 | break; |
239 | - if (nfs_set_page_tag_locked(req)) | |
239 | + if (nfs_lock_request_dontget(req)) | |
240 | 240 | break; |
241 | 241 | /* Note: If we hold the page lock, as is the case in nfs_writepage, |
242 | - * then the call to nfs_set_page_tag_locked() will always | |
242 | + * then the call to nfs_lock_request_dontget() will always | |
243 | 243 | * succeed provided that someone hasn't already marked the |
244 | 244 | * request as dirty (in which case we don't care). |
245 | 245 | */ |
... | ... | @@ -397,8 +397,6 @@ |
397 | 397 | set_page_private(req->wb_page, (unsigned long)req); |
398 | 398 | nfsi->npages++; |
399 | 399 | kref_get(&req->wb_kref); |
400 | - radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, | |
401 | - NFS_PAGE_TAG_LOCKED); | |
402 | 400 | spin_unlock(&inode->i_lock); |
403 | 401 | radix_tree_preload_end(); |
404 | 402 | out: |
... | ... | @@ -604,7 +602,7 @@ |
604 | 602 | || end < req->wb_offset) |
605 | 603 | goto out_flushme; |
606 | 604 | |
607 | - if (nfs_set_page_tag_locked(req)) | |
605 | + if (nfs_lock_request_dontget(req)) | |
608 | 606 | break; |
609 | 607 | |
610 | 608 | /* The request is locked, so wait and then retry */ |
... | ... | @@ -684,7 +682,7 @@ |
684 | 682 | nfs_grow_file(page, offset, count); |
685 | 683 | nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes); |
686 | 684 | nfs_mark_request_dirty(req); |
687 | - nfs_clear_page_tag_locked(req); | |
685 | + nfs_unlock_request(req); | |
688 | 686 | return 0; |
689 | 687 | } |
690 | 688 | |
... | ... | @@ -777,7 +775,7 @@ |
777 | 775 | |
778 | 776 | if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req, data)) |
779 | 777 | nfs_inode_remove_request(req); |
780 | - nfs_clear_page_tag_locked(req); | |
778 | + nfs_unlock_request(req); | |
781 | 779 | nfs_end_page_writeback(page); |
782 | 780 | } |
783 | 781 | |
... | ... | @@ -925,7 +923,7 @@ |
925 | 923 | struct page *page = req->wb_page; |
926 | 924 | |
927 | 925 | nfs_mark_request_dirty(req); |
928 | - nfs_clear_page_tag_locked(req); | |
926 | + nfs_unlock_request(req); | |
929 | 927 | nfs_end_page_writeback(page); |
930 | 928 | } |
931 | 929 | |
... | ... | @@ -1199,7 +1197,7 @@ |
1199 | 1197 | remove_request: |
1200 | 1198 | nfs_inode_remove_request(req); |
1201 | 1199 | next: |
1202 | - nfs_clear_page_tag_locked(req); | |
1200 | + nfs_unlock_request(req); | |
1203 | 1201 | nfs_end_page_writeback(page); |
1204 | 1202 | } |
1205 | 1203 | nfs_writedata_release(calldata); |
... | ... | @@ -1411,7 +1409,7 @@ |
1411 | 1409 | dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); |
1412 | 1410 | dec_bdi_stat(req->wb_page->mapping->backing_dev_info, |
1413 | 1411 | BDI_RECLAIMABLE); |
1414 | - nfs_clear_page_tag_locked(req); | |
1412 | + nfs_unlock_request(req); | |
1415 | 1413 | } |
1416 | 1414 | } |
1417 | 1415 | EXPORT_SYMBOL_GPL(nfs_retry_commit); |
... | ... | @@ -1486,7 +1484,7 @@ |
1486 | 1484 | dprintk(" mismatch\n"); |
1487 | 1485 | nfs_mark_request_dirty(req); |
1488 | 1486 | next: |
1489 | - nfs_clear_page_tag_locked(req); | |
1487 | + nfs_unlock_request(req); | |
1490 | 1488 | } |
1491 | 1489 | } |
1492 | 1490 | EXPORT_SYMBOL_GPL(nfs_commit_release_pages); |
include/linux/nfs_page.h
... | ... | @@ -21,7 +21,6 @@ |
21 | 21 | /* |
22 | 22 | * Valid flags for the radix tree |
23 | 23 | */ |
24 | -#define NFS_PAGE_TAG_LOCKED 0 | |
25 | 24 | #define NFS_PAGE_TAG_COMMIT 1 |
26 | 25 | |
27 | 26 | /* |
... | ... | @@ -106,8 +105,6 @@ |
106 | 105 | struct nfs_page *req); |
107 | 106 | extern int nfs_wait_on_request(struct nfs_page *); |
108 | 107 | extern void nfs_unlock_request(struct nfs_page *req); |
109 | -extern int nfs_set_page_tag_locked(struct nfs_page *req); | |
110 | -extern void nfs_clear_page_tag_locked(struct nfs_page *req); | |
111 | 108 | |
112 | 109 | /* |
113 | 110 | * Lock the page of an asynchronous request without getting a new reference |