Commit bf6c8a81481397e0c53b0a91941d945c785356be
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
Merge tag 'nfs-for-3.8-4' of git://git.linux-nfs.org/projects/trondmy/linux-nfs
Pull NFS client bugfixes from Trond Myklebust: - Error reporting in nfs_xdev_mount incorrectly maps all errors to ENOMEM - Fix an NFSv4 refcounting issue - Fix a mount failure when the server reboots during NFSv4 trunking discovery - NFSv4.1 mounts may need to run the lease recovery thread. - Don't silently fail setattr() requests on mountpoints - Fix a SUNRPC socket/transport livelock and priority queue issue - We must handle NFS4ERR_DELAY when resetting the NFSv4.1 session. * tag 'nfs-for-3.8-4' of git://git.linux-nfs.org/projects/trondmy/linux-nfs: NFSv4.1: Handle NFS4ERR_DELAY when resetting the NFSv4.1 session SUNRPC: When changing the queue priority, ensure that we change the owner NFS: Don't silently fail setattr() requests on mountpoints NFSv4.1: Ensure that nfs41_walk_client_list() does start lease recovery NFSv4: Fix NFSv4 trunking discovery NFSv4: Fix NFSv4 reference counting for trunked sessions NFS: Fix error reporting in nfs_xdev_mount
Showing 5 changed files Side-by-side Diff
fs/nfs/namespace.c
... | ... | @@ -177,11 +177,31 @@ |
177 | 177 | return mnt; |
178 | 178 | } |
179 | 179 | |
180 | +static int | |
181 | +nfs_namespace_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) | |
182 | +{ | |
183 | + if (NFS_FH(dentry->d_inode)->size != 0) | |
184 | + return nfs_getattr(mnt, dentry, stat); | |
185 | + generic_fillattr(dentry->d_inode, stat); | |
186 | + return 0; | |
187 | +} | |
188 | + | |
189 | +static int | |
190 | +nfs_namespace_setattr(struct dentry *dentry, struct iattr *attr) | |
191 | +{ | |
192 | + if (NFS_FH(dentry->d_inode)->size != 0) | |
193 | + return nfs_setattr(dentry, attr); | |
194 | + return -EACCES; | |
195 | +} | |
196 | + | |
180 | 197 | const struct inode_operations nfs_mountpoint_inode_operations = { |
181 | 198 | .getattr = nfs_getattr, |
199 | + .setattr = nfs_setattr, | |
182 | 200 | }; |
183 | 201 | |
184 | 202 | const struct inode_operations nfs_referral_inode_operations = { |
203 | + .getattr = nfs_namespace_getattr, | |
204 | + .setattr = nfs_namespace_setattr, | |
185 | 205 | }; |
186 | 206 | |
187 | 207 | static void nfs_expire_automounts(struct work_struct *work) |
fs/nfs/nfs4client.c
... | ... | @@ -236,11 +236,10 @@ |
236 | 236 | error = nfs4_discover_server_trunking(clp, &old); |
237 | 237 | if (error < 0) |
238 | 238 | goto error; |
239 | + nfs_put_client(clp); | |
239 | 240 | if (clp != old) { |
240 | 241 | clp->cl_preserve_clid = true; |
241 | - nfs_put_client(clp); | |
242 | 242 | clp = old; |
243 | - atomic_inc(&clp->cl_count); | |
244 | 243 | } |
245 | 244 | |
246 | 245 | return clp; |
... | ... | @@ -306,7 +305,7 @@ |
306 | 305 | .clientid = new->cl_clientid, |
307 | 306 | .confirm = new->cl_confirm, |
308 | 307 | }; |
309 | - int status; | |
308 | + int status = -NFS4ERR_STALE_CLIENTID; | |
310 | 309 | |
311 | 310 | spin_lock(&nn->nfs_client_lock); |
312 | 311 | list_for_each_entry_safe(pos, n, &nn->nfs_client_list, cl_share_link) { |
313 | 312 | |
314 | 313 | |
315 | 314 | |
316 | 315 | |
317 | 316 | |
318 | 317 | |
319 | 318 | |
320 | 319 | |
... | ... | @@ -332,40 +331,33 @@ |
332 | 331 | |
333 | 332 | if (prev) |
334 | 333 | nfs_put_client(prev); |
334 | + prev = pos; | |
335 | 335 | |
336 | 336 | status = nfs4_proc_setclientid_confirm(pos, &clid, cred); |
337 | - if (status == 0) { | |
337 | + switch (status) { | |
338 | + case -NFS4ERR_STALE_CLIENTID: | |
339 | + break; | |
340 | + case 0: | |
338 | 341 | nfs4_swap_callback_idents(pos, new); |
339 | 342 | |
340 | - nfs_put_client(pos); | |
343 | + prev = NULL; | |
341 | 344 | *result = pos; |
342 | 345 | dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n", |
343 | 346 | __func__, pos, atomic_read(&pos->cl_count)); |
344 | - return 0; | |
347 | + default: | |
348 | + goto out; | |
345 | 349 | } |
346 | - if (status != -NFS4ERR_STALE_CLIENTID) { | |
347 | - nfs_put_client(pos); | |
348 | - dprintk("NFS: <-- %s status = %d, no result\n", | |
349 | - __func__, status); | |
350 | - return status; | |
351 | - } | |
352 | 350 | |
353 | 351 | spin_lock(&nn->nfs_client_lock); |
354 | - prev = pos; | |
355 | 352 | } |
353 | + spin_unlock(&nn->nfs_client_lock); | |
356 | 354 | |
357 | - /* | |
358 | - * No matching nfs_client found. This should be impossible, | |
359 | - * because the new nfs_client has already been added to | |
360 | - * nfs_client_list by nfs_get_client(). | |
361 | - * | |
362 | - * Don't BUG(), since the caller is holding a mutex. | |
363 | - */ | |
355 | + /* No match found. The server lost our clientid */ | |
356 | +out: | |
364 | 357 | if (prev) |
365 | 358 | nfs_put_client(prev); |
366 | - spin_unlock(&nn->nfs_client_lock); | |
367 | - pr_err("NFS: %s Error: no matching nfs_client found\n", __func__); | |
368 | - return -NFS4ERR_STALE_CLIENTID; | |
359 | + dprintk("NFS: <-- %s status = %d\n", __func__, status); | |
360 | + return status; | |
369 | 361 | } |
370 | 362 | |
371 | 363 | #ifdef CONFIG_NFS_V4_1 |
... | ... | @@ -432,7 +424,7 @@ |
432 | 424 | { |
433 | 425 | struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id); |
434 | 426 | struct nfs_client *pos, *n, *prev = NULL; |
435 | - int error; | |
427 | + int status = -NFS4ERR_STALE_CLIENTID; | |
436 | 428 | |
437 | 429 | spin_lock(&nn->nfs_client_lock); |
438 | 430 | list_for_each_entry_safe(pos, n, &nn->nfs_client_list, cl_share_link) { |
439 | 431 | |
440 | 432 | |
... | ... | @@ -448,14 +440,17 @@ |
448 | 440 | nfs_put_client(prev); |
449 | 441 | prev = pos; |
450 | 442 | |
451 | - error = nfs_wait_client_init_complete(pos); | |
452 | - if (error < 0) { | |
443 | + nfs4_schedule_lease_recovery(pos); | |
444 | + status = nfs_wait_client_init_complete(pos); | |
445 | + if (status < 0) { | |
453 | 446 | nfs_put_client(pos); |
454 | 447 | spin_lock(&nn->nfs_client_lock); |
455 | 448 | continue; |
456 | 449 | } |
457 | - | |
450 | + status = pos->cl_cons_state; | |
458 | 451 | spin_lock(&nn->nfs_client_lock); |
452 | + if (status < 0) | |
453 | + continue; | |
459 | 454 | } |
460 | 455 | |
461 | 456 | if (pos->rpc_ops != new->rpc_ops) |
... | ... | @@ -473,6 +468,7 @@ |
473 | 468 | if (!nfs4_match_serverowners(pos, new)) |
474 | 469 | continue; |
475 | 470 | |
471 | + atomic_inc(&pos->cl_count); | |
476 | 472 | spin_unlock(&nn->nfs_client_lock); |
477 | 473 | dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n", |
478 | 474 | __func__, pos, atomic_read(&pos->cl_count)); |
479 | 475 | |
... | ... | @@ -481,16 +477,10 @@ |
481 | 477 | return 0; |
482 | 478 | } |
483 | 479 | |
484 | - /* | |
485 | - * No matching nfs_client found. This should be impossible, | |
486 | - * because the new nfs_client has already been added to | |
487 | - * nfs_client_list by nfs_get_client(). | |
488 | - * | |
489 | - * Don't BUG(), since the caller is holding a mutex. | |
490 | - */ | |
480 | + /* No matching nfs_client found. */ | |
491 | 481 | spin_unlock(&nn->nfs_client_lock); |
492 | - pr_err("NFS: %s Error: no matching nfs_client found\n", __func__); | |
493 | - return -NFS4ERR_STALE_CLIENTID; | |
482 | + dprintk("NFS: <-- %s status = %d\n", __func__, status); | |
483 | + return status; | |
494 | 484 | } |
495 | 485 | #endif /* CONFIG_NFS_V4_1 */ |
496 | 486 |
fs/nfs/nfs4state.c
... | ... | @@ -136,16 +136,11 @@ |
136 | 136 | clp->cl_confirm = clid.confirm; |
137 | 137 | |
138 | 138 | status = nfs40_walk_client_list(clp, result, cred); |
139 | - switch (status) { | |
140 | - case -NFS4ERR_STALE_CLIENTID: | |
141 | - set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); | |
142 | - case 0: | |
139 | + if (status == 0) { | |
143 | 140 | /* Sustain the lease, even if it's empty. If the clientid4 |
144 | 141 | * goes stale it's of no use for trunking discovery. */ |
145 | 142 | nfs4_schedule_state_renewal(*result); |
146 | - break; | |
147 | 143 | } |
148 | - | |
149 | 144 | out: |
150 | 145 | return status; |
151 | 146 | } |
... | ... | @@ -1863,6 +1858,7 @@ |
1863 | 1858 | case -ETIMEDOUT: |
1864 | 1859 | case -EAGAIN: |
1865 | 1860 | ssleep(1); |
1861 | + case -NFS4ERR_STALE_CLIENTID: | |
1866 | 1862 | dprintk("NFS: %s after status %d, retrying\n", |
1867 | 1863 | __func__, status); |
1868 | 1864 | goto again; |
... | ... | @@ -2022,8 +2018,18 @@ |
2022 | 2018 | nfs4_begin_drain_session(clp); |
2023 | 2019 | cred = nfs4_get_exchange_id_cred(clp); |
2024 | 2020 | status = nfs4_proc_destroy_session(clp->cl_session, cred); |
2025 | - if (status && status != -NFS4ERR_BADSESSION && | |
2026 | - status != -NFS4ERR_DEADSESSION) { | |
2021 | + switch (status) { | |
2022 | + case 0: | |
2023 | + case -NFS4ERR_BADSESSION: | |
2024 | + case -NFS4ERR_DEADSESSION: | |
2025 | + break; | |
2026 | + case -NFS4ERR_BACK_CHAN_BUSY: | |
2027 | + case -NFS4ERR_DELAY: | |
2028 | + set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state); | |
2029 | + status = 0; | |
2030 | + ssleep(1); | |
2031 | + goto out; | |
2032 | + default: | |
2027 | 2033 | status = nfs4_recovery_handle_error(clp, status); |
2028 | 2034 | goto out; |
2029 | 2035 | } |
fs/nfs/super.c
... | ... | @@ -2589,27 +2589,23 @@ |
2589 | 2589 | struct nfs_server *server; |
2590 | 2590 | struct dentry *mntroot = ERR_PTR(-ENOMEM); |
2591 | 2591 | struct nfs_subversion *nfs_mod = NFS_SB(data->sb)->nfs_client->cl_nfs_mod; |
2592 | - int error; | |
2593 | 2592 | |
2594 | - dprintk("--> nfs_xdev_mount_common()\n"); | |
2593 | + dprintk("--> nfs_xdev_mount()\n"); | |
2595 | 2594 | |
2596 | 2595 | mount_info.mntfh = mount_info.cloned->fh; |
2597 | 2596 | |
2598 | 2597 | /* create a new volume representation */ |
2599 | 2598 | server = nfs_mod->rpc_ops->clone_server(NFS_SB(data->sb), data->fh, data->fattr, data->authflavor); |
2600 | - if (IS_ERR(server)) { | |
2601 | - error = PTR_ERR(server); | |
2602 | - goto out_err; | |
2603 | - } | |
2604 | 2599 | |
2605 | - mntroot = nfs_fs_mount_common(server, flags, dev_name, &mount_info, nfs_mod); | |
2606 | - dprintk("<-- nfs_xdev_mount_common() = 0\n"); | |
2607 | -out: | |
2608 | - return mntroot; | |
2600 | + if (IS_ERR(server)) | |
2601 | + mntroot = ERR_CAST(server); | |
2602 | + else | |
2603 | + mntroot = nfs_fs_mount_common(server, flags, | |
2604 | + dev_name, &mount_info, nfs_mod); | |
2609 | 2605 | |
2610 | -out_err: | |
2611 | - dprintk("<-- nfs_xdev_mount_common() = %d [error]\n", error); | |
2612 | - goto out; | |
2606 | + dprintk("<-- nfs_xdev_mount() = %ld\n", | |
2607 | + IS_ERR(mntroot) ? PTR_ERR(mntroot) : 0L); | |
2608 | + return mntroot; | |
2613 | 2609 | } |
2614 | 2610 | |
2615 | 2611 | #if IS_ENABLED(CONFIG_NFS_V4) |
net/sunrpc/sched.c
... | ... | @@ -98,9 +98,25 @@ |
98 | 98 | list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); |
99 | 99 | } |
100 | 100 | |
101 | +static void rpc_rotate_queue_owner(struct rpc_wait_queue *queue) | |
102 | +{ | |
103 | + struct list_head *q = &queue->tasks[queue->priority]; | |
104 | + struct rpc_task *task; | |
105 | + | |
106 | + if (!list_empty(q)) { | |
107 | + task = list_first_entry(q, struct rpc_task, u.tk_wait.list); | |
108 | + if (task->tk_owner == queue->owner) | |
109 | + list_move_tail(&task->u.tk_wait.list, q); | |
110 | + } | |
111 | +} | |
112 | + | |
101 | 113 | static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority) |
102 | 114 | { |
103 | - queue->priority = priority; | |
115 | + if (queue->priority != priority) { | |
116 | + /* Fairness: rotate the list when changing priority */ | |
117 | + rpc_rotate_queue_owner(queue); | |
118 | + queue->priority = priority; | |
119 | + } | |
104 | 120 | } |
105 | 121 | |
106 | 122 | static void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid) |