Commit 587142f85f796cf0b823dd3080e815f02ff6b952
1 parent
4e56e082dd
Exists in
master
and in
7 other branches
NFS: Replace NFS_I(inode)->req_lock with inode->i_lock
There is no justification for keeping a special spinlock for the exclusive use of the NFS writeback code. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Showing 4 changed files with 46 additions and 51 deletions Side-by-side Diff
fs/nfs/inode.c
... | ... | @@ -1154,7 +1154,6 @@ |
1154 | 1154 | struct nfs_inode *nfsi = (struct nfs_inode *) foo; |
1155 | 1155 | |
1156 | 1156 | inode_init_once(&nfsi->vfs_inode); |
1157 | - spin_lock_init(&nfsi->req_lock); | |
1158 | 1157 | INIT_LIST_HEAD(&nfsi->open_files); |
1159 | 1158 | INIT_LIST_HEAD(&nfsi->access_cache_entry_lru); |
1160 | 1159 | INIT_LIST_HEAD(&nfsi->access_cache_inode_lru); |
fs/nfs/pagelist.c
... | ... | @@ -126,12 +126,13 @@ |
126 | 126 | */ |
127 | 127 | void nfs_clear_page_tag_locked(struct nfs_page *req) |
128 | 128 | { |
129 | - struct nfs_inode *nfsi = NFS_I(req->wb_context->path.dentry->d_inode); | |
129 | + struct inode *inode = req->wb_context->path.dentry->d_inode; | |
130 | + struct nfs_inode *nfsi = NFS_I(inode); | |
130 | 131 | |
131 | 132 | if (req->wb_page != NULL) { |
132 | - spin_lock(&nfsi->req_lock); | |
133 | + spin_lock(&inode->i_lock); | |
133 | 134 | radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); |
134 | - spin_unlock(&nfsi->req_lock); | |
135 | + spin_unlock(&inode->i_lock); | |
135 | 136 | } |
136 | 137 | nfs_unlock_request(req); |
137 | 138 | } |
... | ... | @@ -390,7 +391,7 @@ |
390 | 391 | * If the number of requests is set to 0, the entire address_space |
391 | 392 | * starting at index idx_start, is scanned. |
392 | 393 | * The requests are *not* checked to ensure that they form a contiguous set. |
393 | - * You must be holding the inode's req_lock when calling this function | |
394 | + * You must be holding the inode's i_lock when calling this function | |
394 | 395 | */ |
395 | 396 | int nfs_scan_list(struct nfs_inode *nfsi, |
396 | 397 | struct list_head *dst, pgoff_t idx_start, |
... | ... | @@ -430,7 +431,7 @@ |
430 | 431 | } |
431 | 432 | } |
432 | 433 | /* for latency reduction */ |
433 | - cond_resched_lock(&nfsi->req_lock); | |
434 | + cond_resched_lock(&nfsi->vfs_inode.i_lock); | |
434 | 435 | } |
435 | 436 | out: |
436 | 437 | return res; |
fs/nfs/write.c
... | ... | @@ -124,12 +124,12 @@ |
124 | 124 | |
125 | 125 | static struct nfs_page *nfs_page_find_request(struct page *page) |
126 | 126 | { |
127 | + struct inode *inode = page->mapping->host; | |
127 | 128 | struct nfs_page *req = NULL; |
128 | - spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock; | |
129 | 129 | |
130 | - spin_lock(req_lock); | |
130 | + spin_lock(&inode->i_lock); | |
131 | 131 | req = nfs_page_find_request_locked(page); |
132 | - spin_unlock(req_lock); | |
132 | + spin_unlock(&inode->i_lock); | |
133 | 133 | return req; |
134 | 134 | } |
135 | 135 | |
136 | 136 | |
137 | 137 | |
138 | 138 | |
... | ... | @@ -251,16 +251,16 @@ |
251 | 251 | static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, |
252 | 252 | struct page *page) |
253 | 253 | { |
254 | + struct inode *inode = page->mapping->host; | |
255 | + struct nfs_inode *nfsi = NFS_I(inode); | |
254 | 256 | struct nfs_page *req; |
255 | - struct nfs_inode *nfsi = NFS_I(page->mapping->host); | |
256 | - spinlock_t *req_lock = &nfsi->req_lock; | |
257 | 257 | int ret; |
258 | 258 | |
259 | - spin_lock(req_lock); | |
259 | + spin_lock(&inode->i_lock); | |
260 | 260 | for(;;) { |
261 | 261 | req = nfs_page_find_request_locked(page); |
262 | 262 | if (req == NULL) { |
263 | - spin_unlock(req_lock); | |
263 | + spin_unlock(&inode->i_lock); | |
264 | 264 | return 1; |
265 | 265 | } |
266 | 266 | if (nfs_lock_request_dontget(req)) |
267 | 267 | |
268 | 268 | |
269 | 269 | |
270 | 270 | |
... | ... | @@ -270,28 +270,28 @@ |
270 | 270 | * succeed provided that someone hasn't already marked the |
271 | 271 | * request as dirty (in which case we don't care). |
272 | 272 | */ |
273 | - spin_unlock(req_lock); | |
273 | + spin_unlock(&inode->i_lock); | |
274 | 274 | ret = nfs_wait_on_request(req); |
275 | 275 | nfs_release_request(req); |
276 | 276 | if (ret != 0) |
277 | 277 | return ret; |
278 | - spin_lock(req_lock); | |
278 | + spin_lock(&inode->i_lock); | |
279 | 279 | } |
280 | 280 | if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) { |
281 | 281 | /* This request is marked for commit */ |
282 | - spin_unlock(req_lock); | |
282 | + spin_unlock(&inode->i_lock); | |
283 | 283 | nfs_unlock_request(req); |
284 | 284 | nfs_pageio_complete(pgio); |
285 | 285 | return 1; |
286 | 286 | } |
287 | 287 | if (nfs_set_page_writeback(page) != 0) { |
288 | - spin_unlock(req_lock); | |
288 | + spin_unlock(&inode->i_lock); | |
289 | 289 | BUG(); |
290 | 290 | } |
291 | 291 | radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, |
292 | 292 | NFS_PAGE_TAG_LOCKED); |
293 | 293 | ret = test_bit(PG_NEED_FLUSH, &req->wb_flags); |
294 | - spin_unlock(req_lock); | |
294 | + spin_unlock(&inode->i_lock); | |
295 | 295 | nfs_pageio_add_request(pgio, req); |
296 | 296 | return ret; |
297 | 297 | } |
... | ... | @@ -412,7 +412,7 @@ |
412 | 412 | |
413 | 413 | BUG_ON (!NFS_WBACK_BUSY(req)); |
414 | 414 | |
415 | - spin_lock(&nfsi->req_lock); | |
415 | + spin_lock(&inode->i_lock); | |
416 | 416 | set_page_private(req->wb_page, 0); |
417 | 417 | ClearPagePrivate(req->wb_page); |
418 | 418 | radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index); |
419 | 419 | |
... | ... | @@ -420,11 +420,11 @@ |
420 | 420 | __set_page_dirty_nobuffers(req->wb_page); |
421 | 421 | nfsi->npages--; |
422 | 422 | if (!nfsi->npages) { |
423 | - spin_unlock(&nfsi->req_lock); | |
423 | + spin_unlock(&inode->i_lock); | |
424 | 424 | nfs_end_data_update(inode); |
425 | 425 | iput(inode); |
426 | 426 | } else |
427 | - spin_unlock(&nfsi->req_lock); | |
427 | + spin_unlock(&inode->i_lock); | |
428 | 428 | nfs_clear_request(req); |
429 | 429 | nfs_release_request(req); |
430 | 430 | } |
431 | 431 | |
... | ... | @@ -458,13 +458,13 @@ |
458 | 458 | struct inode *inode = req->wb_context->path.dentry->d_inode; |
459 | 459 | struct nfs_inode *nfsi = NFS_I(inode); |
460 | 460 | |
461 | - spin_lock(&nfsi->req_lock); | |
461 | + spin_lock(&inode->i_lock); | |
462 | 462 | nfsi->ncommit++; |
463 | 463 | set_bit(PG_NEED_COMMIT, &(req)->wb_flags); |
464 | 464 | radix_tree_tag_set(&nfsi->nfs_page_tree, |
465 | 465 | req->wb_index, |
466 | 466 | NFS_PAGE_TAG_COMMIT); |
467 | - spin_unlock(&nfsi->req_lock); | |
467 | + spin_unlock(&inode->i_lock); | |
468 | 468 | inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); |
469 | 469 | __mark_inode_dirty(inode, I_DIRTY_DATASYNC); |
470 | 470 | } |
471 | 471 | |
... | ... | @@ -534,10 +534,10 @@ |
534 | 534 | BUG_ON(!NFS_WBACK_BUSY(req)); |
535 | 535 | |
536 | 536 | kref_get(&req->wb_kref); |
537 | - spin_unlock(&nfsi->req_lock); | |
537 | + spin_unlock(&inode->i_lock); | |
538 | 538 | error = nfs_wait_on_request(req); |
539 | 539 | nfs_release_request(req); |
540 | - spin_lock(&nfsi->req_lock); | |
540 | + spin_lock(&inode->i_lock); | |
541 | 541 | if (error < 0) |
542 | 542 | return error; |
543 | 543 | res++; |
... | ... | @@ -602,7 +602,6 @@ |
602 | 602 | { |
603 | 603 | struct address_space *mapping = page->mapping; |
604 | 604 | struct inode *inode = mapping->host; |
605 | - struct nfs_inode *nfsi = NFS_I(inode); | |
606 | 605 | struct nfs_page *req, *new = NULL; |
607 | 606 | pgoff_t rqend, end; |
608 | 607 | |
609 | 608 | |
... | ... | @@ -612,13 +611,13 @@ |
612 | 611 | /* Loop over all inode entries and see if we find |
613 | 612 | * A request for the page we wish to update |
614 | 613 | */ |
615 | - spin_lock(&nfsi->req_lock); | |
614 | + spin_lock(&inode->i_lock); | |
616 | 615 | req = nfs_page_find_request_locked(page); |
617 | 616 | if (req) { |
618 | 617 | if (!nfs_lock_request_dontget(req)) { |
619 | 618 | int error; |
620 | 619 | |
621 | - spin_unlock(&nfsi->req_lock); | |
620 | + spin_unlock(&inode->i_lock); | |
622 | 621 | error = nfs_wait_on_request(req); |
623 | 622 | nfs_release_request(req); |
624 | 623 | if (error < 0) { |
... | ... | @@ -628,7 +627,7 @@ |
628 | 627 | } |
629 | 628 | continue; |
630 | 629 | } |
631 | - spin_unlock(&nfsi->req_lock); | |
630 | + spin_unlock(&inode->i_lock); | |
632 | 631 | if (new) |
633 | 632 | nfs_release_request(new); |
634 | 633 | break; |
635 | 634 | |
636 | 635 | |
... | ... | @@ -639,14 +638,14 @@ |
639 | 638 | nfs_lock_request_dontget(new); |
640 | 639 | error = nfs_inode_add_request(inode, new); |
641 | 640 | if (error) { |
642 | - spin_unlock(&nfsi->req_lock); | |
641 | + spin_unlock(&inode->i_lock); | |
643 | 642 | nfs_unlock_request(new); |
644 | 643 | return ERR_PTR(error); |
645 | 644 | } |
646 | - spin_unlock(&nfsi->req_lock); | |
645 | + spin_unlock(&inode->i_lock); | |
647 | 646 | return new; |
648 | 647 | } |
649 | - spin_unlock(&nfsi->req_lock); | |
648 | + spin_unlock(&inode->i_lock); | |
650 | 649 | |
651 | 650 | new = nfs_create_request(ctx, inode, page, offset, bytes); |
652 | 651 | if (IS_ERR(new)) |
653 | 652 | |
... | ... | @@ -974,9 +973,9 @@ |
974 | 973 | } |
975 | 974 | |
976 | 975 | if (nfs_write_need_commit(data)) { |
977 | - spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock; | |
976 | + struct inode *inode = page->mapping->host; | |
978 | 977 | |
979 | - spin_lock(req_lock); | |
978 | + spin_lock(&inode->i_lock); | |
980 | 979 | if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) { |
981 | 980 | /* Do nothing we need to resend the writes */ |
982 | 981 | } else if (!test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags)) { |
... | ... | @@ -987,7 +986,7 @@ |
987 | 986 | clear_bit(PG_NEED_COMMIT, &req->wb_flags); |
988 | 987 | dprintk(" server reboot detected\n"); |
989 | 988 | } |
990 | - spin_unlock(req_lock); | |
989 | + spin_unlock(&inode->i_lock); | |
991 | 990 | } else |
992 | 991 | dprintk(" OK\n"); |
993 | 992 | |
994 | 993 | |
995 | 994 | |
... | ... | @@ -1277,13 +1276,12 @@ |
1277 | 1276 | |
1278 | 1277 | int nfs_commit_inode(struct inode *inode, int how) |
1279 | 1278 | { |
1280 | - struct nfs_inode *nfsi = NFS_I(inode); | |
1281 | 1279 | LIST_HEAD(head); |
1282 | 1280 | int res; |
1283 | 1281 | |
1284 | - spin_lock(&nfsi->req_lock); | |
1282 | + spin_lock(&inode->i_lock); | |
1285 | 1283 | res = nfs_scan_commit(inode, &head, 0, 0); |
1286 | - spin_unlock(&nfsi->req_lock); | |
1284 | + spin_unlock(&inode->i_lock); | |
1287 | 1285 | if (res) { |
1288 | 1286 | int error = nfs_commit_list(inode, &head, how); |
1289 | 1287 | if (error < 0) |
... | ... | @@ -1301,7 +1299,6 @@ |
1301 | 1299 | long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how) |
1302 | 1300 | { |
1303 | 1301 | struct inode *inode = mapping->host; |
1304 | - struct nfs_inode *nfsi = NFS_I(inode); | |
1305 | 1302 | pgoff_t idx_start, idx_end; |
1306 | 1303 | unsigned int npages = 0; |
1307 | 1304 | LIST_HEAD(head); |
... | ... | @@ -1323,7 +1320,7 @@ |
1323 | 1320 | } |
1324 | 1321 | } |
1325 | 1322 | how &= ~FLUSH_NOCOMMIT; |
1326 | - spin_lock(&nfsi->req_lock); | |
1323 | + spin_lock(&inode->i_lock); | |
1327 | 1324 | do { |
1328 | 1325 | ret = nfs_wait_on_requests_locked(inode, idx_start, npages); |
1329 | 1326 | if (ret != 0) |
1330 | 1327 | |
1331 | 1328 | |
1332 | 1329 | |
1333 | 1330 | |
... | ... | @@ -1334,18 +1331,19 @@ |
1334 | 1331 | if (pages == 0) |
1335 | 1332 | break; |
1336 | 1333 | if (how & FLUSH_INVALIDATE) { |
1337 | - spin_unlock(&nfsi->req_lock); | |
1334 | + spin_unlock(&inode->i_lock); | |
1338 | 1335 | nfs_cancel_commit_list(&head); |
1339 | 1336 | ret = pages; |
1340 | - spin_lock(&nfsi->req_lock); | |
1337 | + spin_lock(&inode->i_lock); | |
1341 | 1338 | continue; |
1342 | 1339 | } |
1343 | 1340 | pages += nfs_scan_commit(inode, &head, 0, 0); |
1344 | - spin_unlock(&nfsi->req_lock); | |
1341 | + spin_unlock(&inode->i_lock); | |
1345 | 1342 | ret = nfs_commit_list(inode, &head, how); |
1346 | - spin_lock(&nfsi->req_lock); | |
1343 | + spin_lock(&inode->i_lock); | |
1344 | + | |
1347 | 1345 | } while (ret >= 0); |
1348 | - spin_unlock(&nfsi->req_lock); | |
1346 | + spin_unlock(&inode->i_lock); | |
1349 | 1347 | return ret; |
1350 | 1348 | } |
1351 | 1349 | |
... | ... | @@ -1439,7 +1437,6 @@ |
1439 | 1437 | { |
1440 | 1438 | struct address_space *mapping = page->mapping; |
1441 | 1439 | struct inode *inode; |
1442 | - spinlock_t *req_lock; | |
1443 | 1440 | struct nfs_page *req; |
1444 | 1441 | int ret; |
1445 | 1442 | |
1446 | 1443 | |
1447 | 1444 | |
... | ... | @@ -1448,18 +1445,17 @@ |
1448 | 1445 | inode = mapping->host; |
1449 | 1446 | if (!inode) |
1450 | 1447 | goto out_raced; |
1451 | - req_lock = &NFS_I(inode)->req_lock; | |
1452 | - spin_lock(req_lock); | |
1448 | + spin_lock(&inode->i_lock); | |
1453 | 1449 | req = nfs_page_find_request_locked(page); |
1454 | 1450 | if (req != NULL) { |
1455 | 1451 | /* Mark any existing write requests for flushing */ |
1456 | 1452 | ret = !test_and_set_bit(PG_NEED_FLUSH, &req->wb_flags); |
1457 | - spin_unlock(req_lock); | |
1453 | + spin_unlock(&inode->i_lock); | |
1458 | 1454 | nfs_release_request(req); |
1459 | 1455 | return ret; |
1460 | 1456 | } |
1461 | 1457 | ret = __set_page_dirty_nobuffers(page); |
1462 | - spin_unlock(req_lock); | |
1458 | + spin_unlock(&inode->i_lock); | |
1463 | 1459 | return ret; |
1464 | 1460 | out_raced: |
1465 | 1461 | return !TestSetPageDirty(page); |