Commit 8535b2be5181fc3019e4150567ef53210fe3b04f
1 parent
712a433866
Exists in
master
and in
7 other branches
NFSv4: Don't use GFP_KERNEL allocations in state recovery
We do not want to have the state recovery thread kick off and wait for a memory reclaim, since that may deadlock when the writebacks end up waiting for the state recovery thread to complete. The safe thing is therefore to use GFP_NOFS in all open, close, delegation return, lock, etc. operations that may be called by the state recovery thread. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Showing 4 changed files with 39 additions and 35 deletions Side-by-side Diff
fs/nfs/delegation.c
... | ... | @@ -213,7 +213,7 @@ |
213 | 213 | struct nfs_delegation *freeme = NULL; |
214 | 214 | int status = 0; |
215 | 215 | |
216 | - delegation = kmalloc(sizeof(*delegation), GFP_KERNEL); | |
216 | + delegation = kmalloc(sizeof(*delegation), GFP_NOFS); | |
217 | 217 | if (delegation == NULL) |
218 | 218 | return -ENOMEM; |
219 | 219 | memcpy(delegation->stateid.data, res->delegation.data, |
fs/nfs/nfs4_fs.h
... | ... | @@ -213,7 +213,7 @@ |
213 | 213 | extern int nfs4_proc_renew(struct nfs_client *, struct rpc_cred *); |
214 | 214 | extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *); |
215 | 215 | extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *); |
216 | -extern int nfs4_do_close(struct path *path, struct nfs4_state *state, int wait); | |
216 | +extern int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, int wait); | |
217 | 217 | extern struct dentry *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *); |
218 | 218 | extern int nfs4_open_revalidate(struct inode *, struct dentry *, int, struct nameidata *); |
219 | 219 | extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle); |
... | ... | @@ -286,7 +286,7 @@ |
286 | 286 | extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl); |
287 | 287 | extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t); |
288 | 288 | |
289 | -extern struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter); | |
289 | +extern struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask); | |
290 | 290 | extern int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task); |
291 | 291 | extern void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid); |
292 | 292 | extern void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid); |
fs/nfs/nfs4proc.c
... | ... | @@ -717,17 +717,18 @@ |
717 | 717 | |
718 | 718 | static struct nfs4_opendata *nfs4_opendata_alloc(struct path *path, |
719 | 719 | struct nfs4_state_owner *sp, fmode_t fmode, int flags, |
720 | - const struct iattr *attrs) | |
720 | + const struct iattr *attrs, | |
721 | + gfp_t gfp_mask) | |
721 | 722 | { |
722 | 723 | struct dentry *parent = dget_parent(path->dentry); |
723 | 724 | struct inode *dir = parent->d_inode; |
724 | 725 | struct nfs_server *server = NFS_SERVER(dir); |
725 | 726 | struct nfs4_opendata *p; |
726 | 727 | |
727 | - p = kzalloc(sizeof(*p), GFP_KERNEL); | |
728 | + p = kzalloc(sizeof(*p), gfp_mask); | |
728 | 729 | if (p == NULL) |
729 | 730 | goto err; |
730 | - p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid); | |
731 | + p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid, gfp_mask); | |
731 | 732 | if (p->o_arg.seqid == NULL) |
732 | 733 | goto err_free; |
733 | 734 | path_get(path); |
... | ... | @@ -1063,7 +1064,7 @@ |
1063 | 1064 | { |
1064 | 1065 | struct nfs4_opendata *opendata; |
1065 | 1066 | |
1066 | - opendata = nfs4_opendata_alloc(&ctx->path, state->owner, 0, 0, NULL); | |
1067 | + opendata = nfs4_opendata_alloc(&ctx->path, state->owner, 0, 0, NULL, GFP_NOFS); | |
1067 | 1068 | if (opendata == NULL) |
1068 | 1069 | return ERR_PTR(-ENOMEM); |
1069 | 1070 | opendata->state = state; |
... | ... | @@ -1651,7 +1652,7 @@ |
1651 | 1652 | if (path->dentry->d_inode != NULL) |
1652 | 1653 | nfs4_return_incompatible_delegation(path->dentry->d_inode, fmode); |
1653 | 1654 | status = -ENOMEM; |
1654 | - opendata = nfs4_opendata_alloc(path, sp, fmode, flags, sattr); | |
1655 | + opendata = nfs4_opendata_alloc(path, sp, fmode, flags, sattr, GFP_KERNEL); | |
1655 | 1656 | if (opendata == NULL) |
1656 | 1657 | goto err_put_state_owner; |
1657 | 1658 | |
... | ... | @@ -1926,7 +1927,7 @@ |
1926 | 1927 | * |
1927 | 1928 | * NOTE: Caller must be holding the sp->so_owner semaphore! |
1928 | 1929 | */ |
1929 | -int nfs4_do_close(struct path *path, struct nfs4_state *state, int wait) | |
1930 | +int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, int wait) | |
1930 | 1931 | { |
1931 | 1932 | struct nfs_server *server = NFS_SERVER(state->inode); |
1932 | 1933 | struct nfs4_closedata *calldata; |
... | ... | @@ -1945,7 +1946,7 @@ |
1945 | 1946 | }; |
1946 | 1947 | int status = -ENOMEM; |
1947 | 1948 | |
1948 | - calldata = kzalloc(sizeof(*calldata), GFP_KERNEL); | |
1949 | + calldata = kzalloc(sizeof(*calldata), gfp_mask); | |
1949 | 1950 | if (calldata == NULL) |
1950 | 1951 | goto out; |
1951 | 1952 | calldata->inode = state->inode; |
... | ... | @@ -1953,7 +1954,7 @@ |
1953 | 1954 | calldata->arg.fh = NFS_FH(state->inode); |
1954 | 1955 | calldata->arg.stateid = &state->open_stateid; |
1955 | 1956 | /* Serialization for the sequence id */ |
1956 | - calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid); | |
1957 | + calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid, gfp_mask); | |
1957 | 1958 | if (calldata->arg.seqid == NULL) |
1958 | 1959 | goto out_free_calldata; |
1959 | 1960 | calldata->arg.fmode = 0; |
... | ... | @@ -3704,7 +3705,7 @@ |
3704 | 3705 | }; |
3705 | 3706 | int status = 0; |
3706 | 3707 | |
3707 | - data = kzalloc(sizeof(*data), GFP_KERNEL); | |
3708 | + data = kzalloc(sizeof(*data), GFP_NOFS); | |
3708 | 3709 | if (data == NULL) |
3709 | 3710 | return -ENOMEM; |
3710 | 3711 | data->args.fhandle = &data->fh; |
... | ... | @@ -3860,7 +3861,7 @@ |
3860 | 3861 | struct nfs4_unlockdata *p; |
3861 | 3862 | struct inode *inode = lsp->ls_state->inode; |
3862 | 3863 | |
3863 | - p = kzalloc(sizeof(*p), GFP_KERNEL); | |
3864 | + p = kzalloc(sizeof(*p), GFP_NOFS); | |
3864 | 3865 | if (p == NULL) |
3865 | 3866 | return NULL; |
3866 | 3867 | p->arg.fh = NFS_FH(inode); |
... | ... | @@ -3998,7 +3999,7 @@ |
3998 | 3999 | if (test_bit(NFS_DELEGATED_STATE, &state->flags)) |
3999 | 4000 | goto out; |
4000 | 4001 | lsp = request->fl_u.nfs4_fl.owner; |
4001 | - seqid = nfs_alloc_seqid(&lsp->ls_seqid); | |
4002 | + seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL); | |
4002 | 4003 | status = -ENOMEM; |
4003 | 4004 | if (seqid == NULL) |
4004 | 4005 | goto out; |
4005 | 4006 | |
4006 | 4007 | |
4007 | 4008 | |
... | ... | @@ -4026,22 +4027,23 @@ |
4026 | 4027 | }; |
4027 | 4028 | |
4028 | 4029 | static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, |
4029 | - struct nfs_open_context *ctx, struct nfs4_lock_state *lsp) | |
4030 | + struct nfs_open_context *ctx, struct nfs4_lock_state *lsp, | |
4031 | + gfp_t gfp_mask) | |
4030 | 4032 | { |
4031 | 4033 | struct nfs4_lockdata *p; |
4032 | 4034 | struct inode *inode = lsp->ls_state->inode; |
4033 | 4035 | struct nfs_server *server = NFS_SERVER(inode); |
4034 | 4036 | |
4035 | - p = kzalloc(sizeof(*p), GFP_KERNEL); | |
4037 | + p = kzalloc(sizeof(*p), gfp_mask); | |
4036 | 4038 | if (p == NULL) |
4037 | 4039 | return NULL; |
4038 | 4040 | |
4039 | 4041 | p->arg.fh = NFS_FH(inode); |
4040 | 4042 | p->arg.fl = &p->fl; |
4041 | - p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid); | |
4043 | + p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask); | |
4042 | 4044 | if (p->arg.open_seqid == NULL) |
4043 | 4045 | goto out_free; |
4044 | - p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid); | |
4046 | + p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid, gfp_mask); | |
4045 | 4047 | if (p->arg.lock_seqid == NULL) |
4046 | 4048 | goto out_free_seqid; |
4047 | 4049 | p->arg.lock_stateid = &lsp->ls_stateid; |
... | ... | @@ -4195,7 +4197,8 @@ |
4195 | 4197 | |
4196 | 4198 | dprintk("%s: begin!\n", __func__); |
4197 | 4199 | data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file), |
4198 | - fl->fl_u.nfs4_fl.owner); | |
4200 | + fl->fl_u.nfs4_fl.owner, | |
4201 | + recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS); | |
4199 | 4202 | if (data == NULL) |
4200 | 4203 | return -ENOMEM; |
4201 | 4204 | if (IS_SETLKW(cmd)) |
... | ... | @@ -4684,7 +4687,7 @@ |
4684 | 4687 | if (max_reqs != tbl->max_slots) { |
4685 | 4688 | ret = -ENOMEM; |
4686 | 4689 | new = kmalloc(max_reqs * sizeof(struct nfs4_slot), |
4687 | - GFP_KERNEL); | |
4690 | + GFP_NOFS); | |
4688 | 4691 | if (!new) |
4689 | 4692 | goto out; |
4690 | 4693 | ret = 0; |
... | ... | @@ -4749,7 +4752,7 @@ |
4749 | 4752 | |
4750 | 4753 | dprintk("--> %s: max_reqs=%u\n", __func__, max_slots); |
4751 | 4754 | |
4752 | - slot = kcalloc(max_slots, sizeof(struct nfs4_slot), GFP_KERNEL); | |
4755 | + slot = kcalloc(max_slots, sizeof(struct nfs4_slot), GFP_NOFS); | |
4753 | 4756 | if (!slot) |
4754 | 4757 | goto out; |
4755 | 4758 | ret = 0; |
... | ... | @@ -4798,7 +4801,7 @@ |
4798 | 4801 | struct nfs4_session *session; |
4799 | 4802 | struct nfs4_slot_table *tbl; |
4800 | 4803 | |
4801 | - session = kzalloc(sizeof(struct nfs4_session), GFP_KERNEL); | |
4804 | + session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS); | |
4802 | 4805 | if (!session) |
4803 | 4806 | return NULL; |
4804 | 4807 | |
... | ... | @@ -5142,8 +5145,8 @@ |
5142 | 5145 | |
5143 | 5146 | if (!atomic_inc_not_zero(&clp->cl_count)) |
5144 | 5147 | return -EIO; |
5145 | - args = kzalloc(sizeof(*args), GFP_KERNEL); | |
5146 | - res = kzalloc(sizeof(*res), GFP_KERNEL); | |
5148 | + args = kzalloc(sizeof(*args), GFP_NOFS); | |
5149 | + res = kzalloc(sizeof(*res), GFP_NOFS); | |
5147 | 5150 | if (!args || !res) { |
5148 | 5151 | kfree(args); |
5149 | 5152 | kfree(res); |
... | ... | @@ -5244,7 +5247,7 @@ |
5244 | 5247 | int status = -ENOMEM; |
5245 | 5248 | |
5246 | 5249 | dprintk("--> %s\n", __func__); |
5247 | - calldata = kzalloc(sizeof(*calldata), GFP_KERNEL); | |
5250 | + calldata = kzalloc(sizeof(*calldata), GFP_NOFS); | |
5248 | 5251 | if (calldata == NULL) |
5249 | 5252 | goto out; |
5250 | 5253 | calldata->clp = clp; |
fs/nfs/nfs4state.c
... | ... | @@ -366,7 +366,7 @@ |
366 | 366 | { |
367 | 367 | struct nfs4_state_owner *sp; |
368 | 368 | |
369 | - sp = kzalloc(sizeof(*sp),GFP_KERNEL); | |
369 | + sp = kzalloc(sizeof(*sp),GFP_NOFS); | |
370 | 370 | if (!sp) |
371 | 371 | return NULL; |
372 | 372 | spin_lock_init(&sp->so_lock); |
... | ... | @@ -440,7 +440,7 @@ |
440 | 440 | { |
441 | 441 | struct nfs4_state *state; |
442 | 442 | |
443 | - state = kzalloc(sizeof(*state), GFP_KERNEL); | |
443 | + state = kzalloc(sizeof(*state), GFP_NOFS); | |
444 | 444 | if (!state) |
445 | 445 | return NULL; |
446 | 446 | atomic_set(&state->count, 1); |
... | ... | @@ -542,7 +542,8 @@ |
542 | 542 | /* |
543 | 543 | * Close the current file. |
544 | 544 | */ |
545 | -static void __nfs4_close(struct path *path, struct nfs4_state *state, fmode_t fmode, int wait) | |
545 | +static void __nfs4_close(struct path *path, struct nfs4_state *state, | |
546 | + fmode_t fmode, gfp_t gfp_mask, int wait) | |
546 | 547 | { |
547 | 548 | struct nfs4_state_owner *owner = state->owner; |
548 | 549 | int call_close = 0; |
549 | 550 | |
550 | 551 | |
... | ... | @@ -583,17 +584,17 @@ |
583 | 584 | nfs4_put_open_state(state); |
584 | 585 | nfs4_put_state_owner(owner); |
585 | 586 | } else |
586 | - nfs4_do_close(path, state, wait); | |
587 | + nfs4_do_close(path, state, gfp_mask, wait); | |
587 | 588 | } |
588 | 589 | |
589 | 590 | void nfs4_close_state(struct path *path, struct nfs4_state *state, fmode_t fmode) |
590 | 591 | { |
591 | - __nfs4_close(path, state, fmode, 0); | |
592 | + __nfs4_close(path, state, fmode, GFP_NOFS, 0); | |
592 | 593 | } |
593 | 594 | |
594 | 595 | void nfs4_close_sync(struct path *path, struct nfs4_state *state, fmode_t fmode) |
595 | 596 | { |
596 | - __nfs4_close(path, state, fmode, 1); | |
597 | + __nfs4_close(path, state, fmode, GFP_KERNEL, 1); | |
597 | 598 | } |
598 | 599 | |
599 | 600 | /* |
... | ... | @@ -623,7 +624,7 @@ |
623 | 624 | struct nfs4_lock_state *lsp; |
624 | 625 | struct nfs_client *clp = state->owner->so_client; |
625 | 626 | |
626 | - lsp = kzalloc(sizeof(*lsp), GFP_KERNEL); | |
627 | + lsp = kzalloc(sizeof(*lsp), GFP_NOFS); | |
627 | 628 | if (lsp == NULL) |
628 | 629 | return NULL; |
629 | 630 | rpc_init_wait_queue(&lsp->ls_sequence.wait, "lock_seqid_waitqueue"); |
630 | 631 | |
... | ... | @@ -759,11 +760,11 @@ |
759 | 760 | nfs4_put_lock_state(lsp); |
760 | 761 | } |
761 | 762 | |
762 | -struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter) | |
763 | +struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask) | |
763 | 764 | { |
764 | 765 | struct nfs_seqid *new; |
765 | 766 | |
766 | - new = kmalloc(sizeof(*new), GFP_KERNEL); | |
767 | + new = kmalloc(sizeof(*new), gfp_mask); | |
767 | 768 | if (new != NULL) { |
768 | 769 | new->sequence = counter; |
769 | 770 | INIT_LIST_HEAD(&new->list); |
... | ... | @@ -1352,7 +1353,7 @@ |
1352 | 1353 | |
1353 | 1354 | nfs4_begin_drain_session(clp); |
1354 | 1355 | new = kmalloc(fc_tbl->target_max_slots * sizeof(struct nfs4_slot), |
1355 | - GFP_KERNEL); | |
1356 | + GFP_NOFS); | |
1356 | 1357 | if (!new) |
1357 | 1358 | return -ENOMEM; |
1358 | 1359 |