Commit a12587b00388d1694933252e97abca237bc3a6b8
Exists in
master
and in
6 other branches
Merge tag 'nfs-for-3.3-2' of git://git.linux-nfs.org/projects/trondmy/linux-nfs
NFS client bugfixes and cleanups for Linux 3.3 (pull 2) * tag 'nfs-for-3.3-2' of git://git.linux-nfs.org/projects/trondmy/linux-nfs: pnfsblock: alloc short extent before submit bio pnfsblock: remove rpc_call_ops from struct parallel_io pnfsblock: move find lock page logic out of bl_write_pagelist pnfsblock: cleanup bl_mark_sectors_init pnfsblock: limit bio page count pnfsblock: don't spinlock when freeing block_dev pnfsblock: clean up _add_entry pnfsblock: set read/write tk_status to pnfs_error pnfsblock: acquire im_lock in _preload_range NFS4: fix compile warnings in nfs4proc.c nfs: check for integer overflow in decode_devicenotify_args() NFS: cleanup endian type in decode_ds_addr() NFS: add an endian notation
Showing 7 changed files Inline Diff
fs/nfs/blocklayout/blocklayout.c
1 | /* | 1 | /* |
2 | * linux/fs/nfs/blocklayout/blocklayout.c | 2 | * linux/fs/nfs/blocklayout/blocklayout.c |
3 | * | 3 | * |
4 | * Module for the NFSv4.1 pNFS block layout driver. | 4 | * Module for the NFSv4.1 pNFS block layout driver. |
5 | * | 5 | * |
6 | * Copyright (c) 2006 The Regents of the University of Michigan. | 6 | * Copyright (c) 2006 The Regents of the University of Michigan. |
7 | * All rights reserved. | 7 | * All rights reserved. |
8 | * | 8 | * |
9 | * Andy Adamson <andros@citi.umich.edu> | 9 | * Andy Adamson <andros@citi.umich.edu> |
10 | * Fred Isaman <iisaman@umich.edu> | 10 | * Fred Isaman <iisaman@umich.edu> |
11 | * | 11 | * |
12 | * permission is granted to use, copy, create derivative works and | 12 | * permission is granted to use, copy, create derivative works and |
13 | * redistribute this software and such derivative works for any purpose, | 13 | * redistribute this software and such derivative works for any purpose, |
14 | * so long as the name of the university of michigan is not used in | 14 | * so long as the name of the university of michigan is not used in |
15 | * any advertising or publicity pertaining to the use or distribution | 15 | * any advertising or publicity pertaining to the use or distribution |
16 | * of this software without specific, written prior authorization. if | 16 | * of this software without specific, written prior authorization. if |
17 | * the above copyright notice or any other identification of the | 17 | * the above copyright notice or any other identification of the |
18 | * university of michigan is included in any copy of any portion of | 18 | * university of michigan is included in any copy of any portion of |
19 | * this software, then the disclaimer below must also be included. | 19 | * this software, then the disclaimer below must also be included. |
20 | * | 20 | * |
21 | * this software is provided as is, without representation from the | 21 | * this software is provided as is, without representation from the |
22 | * university of michigan as to its fitness for any purpose, and without | 22 | * university of michigan as to its fitness for any purpose, and without |
23 | * warranty by the university of michigan of any kind, either express | 23 | * warranty by the university of michigan of any kind, either express |
24 | * or implied, including without limitation the implied warranties of | 24 | * or implied, including without limitation the implied warranties of |
25 | * merchantability and fitness for a particular purpose. the regents | 25 | * merchantability and fitness for a particular purpose. the regents |
26 | * of the university of michigan shall not be liable for any damages, | 26 | * of the university of michigan shall not be liable for any damages, |
27 | * including special, indirect, incidental, or consequential damages, | 27 | * including special, indirect, incidental, or consequential damages, |
28 | * with respect to any claim arising out or in connection with the use | 28 | * with respect to any claim arising out or in connection with the use |
29 | * of the software, even if it has been or is hereafter advised of the | 29 | * of the software, even if it has been or is hereafter advised of the |
30 | * possibility of such damages. | 30 | * possibility of such damages. |
31 | */ | 31 | */ |
32 | 32 | ||
33 | #include <linux/module.h> | 33 | #include <linux/module.h> |
34 | #include <linux/init.h> | 34 | #include <linux/init.h> |
35 | #include <linux/mount.h> | 35 | #include <linux/mount.h> |
36 | #include <linux/namei.h> | 36 | #include <linux/namei.h> |
37 | #include <linux/bio.h> /* struct bio */ | 37 | #include <linux/bio.h> /* struct bio */ |
38 | #include <linux/buffer_head.h> /* various write calls */ | 38 | #include <linux/buffer_head.h> /* various write calls */ |
39 | #include <linux/prefetch.h> | 39 | #include <linux/prefetch.h> |
40 | 40 | ||
41 | #include "blocklayout.h" | 41 | #include "blocklayout.h" |
42 | 42 | ||
43 | #define NFSDBG_FACILITY NFSDBG_PNFS_LD | 43 | #define NFSDBG_FACILITY NFSDBG_PNFS_LD |
44 | 44 | ||
45 | MODULE_LICENSE("GPL"); | 45 | MODULE_LICENSE("GPL"); |
46 | MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>"); | 46 | MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>"); |
47 | MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver"); | 47 | MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver"); |
48 | 48 | ||
49 | struct dentry *bl_device_pipe; | 49 | struct dentry *bl_device_pipe; |
50 | wait_queue_head_t bl_wq; | 50 | wait_queue_head_t bl_wq; |
51 | 51 | ||
52 | static void print_page(struct page *page) | 52 | static void print_page(struct page *page) |
53 | { | 53 | { |
54 | dprintk("PRINTPAGE page %p\n", page); | 54 | dprintk("PRINTPAGE page %p\n", page); |
55 | dprintk(" PagePrivate %d\n", PagePrivate(page)); | 55 | dprintk(" PagePrivate %d\n", PagePrivate(page)); |
56 | dprintk(" PageUptodate %d\n", PageUptodate(page)); | 56 | dprintk(" PageUptodate %d\n", PageUptodate(page)); |
57 | dprintk(" PageError %d\n", PageError(page)); | 57 | dprintk(" PageError %d\n", PageError(page)); |
58 | dprintk(" PageDirty %d\n", PageDirty(page)); | 58 | dprintk(" PageDirty %d\n", PageDirty(page)); |
59 | dprintk(" PageReferenced %d\n", PageReferenced(page)); | 59 | dprintk(" PageReferenced %d\n", PageReferenced(page)); |
60 | dprintk(" PageLocked %d\n", PageLocked(page)); | 60 | dprintk(" PageLocked %d\n", PageLocked(page)); |
61 | dprintk(" PageWriteback %d\n", PageWriteback(page)); | 61 | dprintk(" PageWriteback %d\n", PageWriteback(page)); |
62 | dprintk(" PageMappedToDisk %d\n", PageMappedToDisk(page)); | 62 | dprintk(" PageMappedToDisk %d\n", PageMappedToDisk(page)); |
63 | dprintk("\n"); | 63 | dprintk("\n"); |
64 | } | 64 | } |
65 | 65 | ||
66 | /* Given the be associated with isect, determine if page data needs to be | 66 | /* Given the be associated with isect, determine if page data needs to be |
67 | * initialized. | 67 | * initialized. |
68 | */ | 68 | */ |
69 | static int is_hole(struct pnfs_block_extent *be, sector_t isect) | 69 | static int is_hole(struct pnfs_block_extent *be, sector_t isect) |
70 | { | 70 | { |
71 | if (be->be_state == PNFS_BLOCK_NONE_DATA) | 71 | if (be->be_state == PNFS_BLOCK_NONE_DATA) |
72 | return 1; | 72 | return 1; |
73 | else if (be->be_state != PNFS_BLOCK_INVALID_DATA) | 73 | else if (be->be_state != PNFS_BLOCK_INVALID_DATA) |
74 | return 0; | 74 | return 0; |
75 | else | 75 | else |
76 | return !bl_is_sector_init(be->be_inval, isect); | 76 | return !bl_is_sector_init(be->be_inval, isect); |
77 | } | 77 | } |
78 | 78 | ||
79 | /* Given the be associated with isect, determine if page data can be | 79 | /* Given the be associated with isect, determine if page data can be |
80 | * written to disk. | 80 | * written to disk. |
81 | */ | 81 | */ |
82 | static int is_writable(struct pnfs_block_extent *be, sector_t isect) | 82 | static int is_writable(struct pnfs_block_extent *be, sector_t isect) |
83 | { | 83 | { |
84 | return (be->be_state == PNFS_BLOCK_READWRITE_DATA || | 84 | return (be->be_state == PNFS_BLOCK_READWRITE_DATA || |
85 | be->be_state == PNFS_BLOCK_INVALID_DATA); | 85 | be->be_state == PNFS_BLOCK_INVALID_DATA); |
86 | } | 86 | } |
87 | 87 | ||
88 | /* The data we are handed might be spread across several bios. We need | 88 | /* The data we are handed might be spread across several bios. We need |
89 | * to track when the last one is finished. | 89 | * to track when the last one is finished. |
90 | */ | 90 | */ |
91 | struct parallel_io { | 91 | struct parallel_io { |
92 | struct kref refcnt; | 92 | struct kref refcnt; |
93 | struct rpc_call_ops call_ops; | 93 | void (*pnfs_callback) (void *data, int num_se); |
94 | void (*pnfs_callback) (void *data); | ||
95 | void *data; | 94 | void *data; |
95 | int bse_count; | ||
96 | }; | 96 | }; |
97 | 97 | ||
98 | static inline struct parallel_io *alloc_parallel(void *data) | 98 | static inline struct parallel_io *alloc_parallel(void *data) |
99 | { | 99 | { |
100 | struct parallel_io *rv; | 100 | struct parallel_io *rv; |
101 | 101 | ||
102 | rv = kmalloc(sizeof(*rv), GFP_NOFS); | 102 | rv = kmalloc(sizeof(*rv), GFP_NOFS); |
103 | if (rv) { | 103 | if (rv) { |
104 | rv->data = data; | 104 | rv->data = data; |
105 | kref_init(&rv->refcnt); | 105 | kref_init(&rv->refcnt); |
106 | rv->bse_count = 0; | ||
106 | } | 107 | } |
107 | return rv; | 108 | return rv; |
108 | } | 109 | } |
109 | 110 | ||
110 | static inline void get_parallel(struct parallel_io *p) | 111 | static inline void get_parallel(struct parallel_io *p) |
111 | { | 112 | { |
112 | kref_get(&p->refcnt); | 113 | kref_get(&p->refcnt); |
113 | } | 114 | } |
114 | 115 | ||
115 | static void destroy_parallel(struct kref *kref) | 116 | static void destroy_parallel(struct kref *kref) |
116 | { | 117 | { |
117 | struct parallel_io *p = container_of(kref, struct parallel_io, refcnt); | 118 | struct parallel_io *p = container_of(kref, struct parallel_io, refcnt); |
118 | 119 | ||
119 | dprintk("%s enter\n", __func__); | 120 | dprintk("%s enter\n", __func__); |
120 | p->pnfs_callback(p->data); | 121 | p->pnfs_callback(p->data, p->bse_count); |
121 | kfree(p); | 122 | kfree(p); |
122 | } | 123 | } |
123 | 124 | ||
124 | static inline void put_parallel(struct parallel_io *p) | 125 | static inline void put_parallel(struct parallel_io *p) |
125 | { | 126 | { |
126 | kref_put(&p->refcnt, destroy_parallel); | 127 | kref_put(&p->refcnt, destroy_parallel); |
127 | } | 128 | } |
128 | 129 | ||
129 | static struct bio * | 130 | static struct bio * |
130 | bl_submit_bio(int rw, struct bio *bio) | 131 | bl_submit_bio(int rw, struct bio *bio) |
131 | { | 132 | { |
132 | if (bio) { | 133 | if (bio) { |
133 | get_parallel(bio->bi_private); | 134 | get_parallel(bio->bi_private); |
134 | dprintk("%s submitting %s bio %u@%llu\n", __func__, | 135 | dprintk("%s submitting %s bio %u@%llu\n", __func__, |
135 | rw == READ ? "read" : "write", | 136 | rw == READ ? "read" : "write", |
136 | bio->bi_size, (unsigned long long)bio->bi_sector); | 137 | bio->bi_size, (unsigned long long)bio->bi_sector); |
137 | submit_bio(rw, bio); | 138 | submit_bio(rw, bio); |
138 | } | 139 | } |
139 | return NULL; | 140 | return NULL; |
140 | } | 141 | } |
141 | 142 | ||
142 | static struct bio *bl_alloc_init_bio(int npg, sector_t isect, | 143 | static struct bio *bl_alloc_init_bio(int npg, sector_t isect, |
143 | struct pnfs_block_extent *be, | 144 | struct pnfs_block_extent *be, |
144 | void (*end_io)(struct bio *, int err), | 145 | void (*end_io)(struct bio *, int err), |
145 | struct parallel_io *par) | 146 | struct parallel_io *par) |
146 | { | 147 | { |
147 | struct bio *bio; | 148 | struct bio *bio; |
148 | 149 | ||
150 | npg = min(npg, BIO_MAX_PAGES); | ||
149 | bio = bio_alloc(GFP_NOIO, npg); | 151 | bio = bio_alloc(GFP_NOIO, npg); |
150 | if (!bio) | 152 | if (!bio && (current->flags & PF_MEMALLOC)) { |
151 | return NULL; | 153 | while (!bio && (npg /= 2)) |
154 | bio = bio_alloc(GFP_NOIO, npg); | ||
155 | } | ||
152 | 156 | ||
153 | bio->bi_sector = isect - be->be_f_offset + be->be_v_offset; | 157 | if (bio) { |
154 | bio->bi_bdev = be->be_mdev; | 158 | bio->bi_sector = isect - be->be_f_offset + be->be_v_offset; |
155 | bio->bi_end_io = end_io; | 159 | bio->bi_bdev = be->be_mdev; |
156 | bio->bi_private = par; | 160 | bio->bi_end_io = end_io; |
161 | bio->bi_private = par; | ||
162 | } | ||
157 | return bio; | 163 | return bio; |
158 | } | 164 | } |
159 | 165 | ||
160 | static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw, | 166 | static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw, |
161 | sector_t isect, struct page *page, | 167 | sector_t isect, struct page *page, |
162 | struct pnfs_block_extent *be, | 168 | struct pnfs_block_extent *be, |
163 | void (*end_io)(struct bio *, int err), | 169 | void (*end_io)(struct bio *, int err), |
164 | struct parallel_io *par) | 170 | struct parallel_io *par) |
165 | { | 171 | { |
166 | retry: | 172 | retry: |
167 | if (!bio) { | 173 | if (!bio) { |
168 | bio = bl_alloc_init_bio(npg, isect, be, end_io, par); | 174 | bio = bl_alloc_init_bio(npg, isect, be, end_io, par); |
169 | if (!bio) | 175 | if (!bio) |
170 | return ERR_PTR(-ENOMEM); | 176 | return ERR_PTR(-ENOMEM); |
171 | } | 177 | } |
172 | if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { | 178 | if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { |
173 | bio = bl_submit_bio(rw, bio); | 179 | bio = bl_submit_bio(rw, bio); |
174 | goto retry; | 180 | goto retry; |
175 | } | 181 | } |
176 | return bio; | 182 | return bio; |
177 | } | 183 | } |
178 | 184 | ||
179 | /* This is basically copied from mpage_end_io_read */ | 185 | /* This is basically copied from mpage_end_io_read */ |
180 | static void bl_end_io_read(struct bio *bio, int err) | 186 | static void bl_end_io_read(struct bio *bio, int err) |
181 | { | 187 | { |
182 | struct parallel_io *par = bio->bi_private; | 188 | struct parallel_io *par = bio->bi_private; |
183 | const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | 189 | const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); |
184 | struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; | 190 | struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; |
185 | struct nfs_read_data *rdata = (struct nfs_read_data *)par->data; | 191 | struct nfs_read_data *rdata = (struct nfs_read_data *)par->data; |
186 | 192 | ||
187 | do { | 193 | do { |
188 | struct page *page = bvec->bv_page; | 194 | struct page *page = bvec->bv_page; |
189 | 195 | ||
190 | if (--bvec >= bio->bi_io_vec) | 196 | if (--bvec >= bio->bi_io_vec) |
191 | prefetchw(&bvec->bv_page->flags); | 197 | prefetchw(&bvec->bv_page->flags); |
192 | if (uptodate) | 198 | if (uptodate) |
193 | SetPageUptodate(page); | 199 | SetPageUptodate(page); |
194 | } while (bvec >= bio->bi_io_vec); | 200 | } while (bvec >= bio->bi_io_vec); |
195 | if (!uptodate) { | 201 | if (!uptodate) { |
196 | if (!rdata->pnfs_error) | 202 | if (!rdata->pnfs_error) |
197 | rdata->pnfs_error = -EIO; | 203 | rdata->pnfs_error = -EIO; |
198 | pnfs_set_lo_fail(rdata->lseg); | 204 | pnfs_set_lo_fail(rdata->lseg); |
199 | } | 205 | } |
200 | bio_put(bio); | 206 | bio_put(bio); |
201 | put_parallel(par); | 207 | put_parallel(par); |
202 | } | 208 | } |
203 | 209 | ||
204 | static void bl_read_cleanup(struct work_struct *work) | 210 | static void bl_read_cleanup(struct work_struct *work) |
205 | { | 211 | { |
206 | struct rpc_task *task; | 212 | struct rpc_task *task; |
207 | struct nfs_read_data *rdata; | 213 | struct nfs_read_data *rdata; |
208 | dprintk("%s enter\n", __func__); | 214 | dprintk("%s enter\n", __func__); |
209 | task = container_of(work, struct rpc_task, u.tk_work); | 215 | task = container_of(work, struct rpc_task, u.tk_work); |
210 | rdata = container_of(task, struct nfs_read_data, task); | 216 | rdata = container_of(task, struct nfs_read_data, task); |
211 | pnfs_ld_read_done(rdata); | 217 | pnfs_ld_read_done(rdata); |
212 | } | 218 | } |
213 | 219 | ||
214 | static void | 220 | static void |
215 | bl_end_par_io_read(void *data) | 221 | bl_end_par_io_read(void *data, int unused) |
216 | { | 222 | { |
217 | struct nfs_read_data *rdata = data; | 223 | struct nfs_read_data *rdata = data; |
218 | 224 | ||
225 | rdata->task.tk_status = rdata->pnfs_error; | ||
219 | INIT_WORK(&rdata->task.u.tk_work, bl_read_cleanup); | 226 | INIT_WORK(&rdata->task.u.tk_work, bl_read_cleanup); |
220 | schedule_work(&rdata->task.u.tk_work); | 227 | schedule_work(&rdata->task.u.tk_work); |
221 | } | 228 | } |
222 | 229 | ||
223 | /* We don't want normal .rpc_call_done callback used, so we replace it | ||
224 | * with this stub. | ||
225 | */ | ||
226 | static void bl_rpc_do_nothing(struct rpc_task *task, void *calldata) | ||
227 | { | ||
228 | return; | ||
229 | } | ||
230 | |||
231 | static enum pnfs_try_status | 230 | static enum pnfs_try_status |
232 | bl_read_pagelist(struct nfs_read_data *rdata) | 231 | bl_read_pagelist(struct nfs_read_data *rdata) |
233 | { | 232 | { |
234 | int i, hole; | 233 | int i, hole; |
235 | struct bio *bio = NULL; | 234 | struct bio *bio = NULL; |
236 | struct pnfs_block_extent *be = NULL, *cow_read = NULL; | 235 | struct pnfs_block_extent *be = NULL, *cow_read = NULL; |
237 | sector_t isect, extent_length = 0; | 236 | sector_t isect, extent_length = 0; |
238 | struct parallel_io *par; | 237 | struct parallel_io *par; |
239 | loff_t f_offset = rdata->args.offset; | 238 | loff_t f_offset = rdata->args.offset; |
240 | size_t count = rdata->args.count; | 239 | size_t count = rdata->args.count; |
241 | struct page **pages = rdata->args.pages; | 240 | struct page **pages = rdata->args.pages; |
242 | int pg_index = rdata->args.pgbase >> PAGE_CACHE_SHIFT; | 241 | int pg_index = rdata->args.pgbase >> PAGE_CACHE_SHIFT; |
243 | 242 | ||
244 | dprintk("%s enter nr_pages %u offset %lld count %Zd\n", __func__, | 243 | dprintk("%s enter nr_pages %u offset %lld count %Zd\n", __func__, |
245 | rdata->npages, f_offset, count); | 244 | rdata->npages, f_offset, count); |
246 | 245 | ||
247 | par = alloc_parallel(rdata); | 246 | par = alloc_parallel(rdata); |
248 | if (!par) | 247 | if (!par) |
249 | goto use_mds; | 248 | goto use_mds; |
250 | par->call_ops = *rdata->mds_ops; | ||
251 | par->call_ops.rpc_call_done = bl_rpc_do_nothing; | ||
252 | par->pnfs_callback = bl_end_par_io_read; | 249 | par->pnfs_callback = bl_end_par_io_read; |
253 | /* At this point, we can no longer jump to use_mds */ | 250 | /* At this point, we can no longer jump to use_mds */ |
254 | 251 | ||
255 | isect = (sector_t) (f_offset >> SECTOR_SHIFT); | 252 | isect = (sector_t) (f_offset >> SECTOR_SHIFT); |
256 | /* Code assumes extents are page-aligned */ | 253 | /* Code assumes extents are page-aligned */ |
257 | for (i = pg_index; i < rdata->npages; i++) { | 254 | for (i = pg_index; i < rdata->npages; i++) { |
258 | if (!extent_length) { | 255 | if (!extent_length) { |
259 | /* We've used up the previous extent */ | 256 | /* We've used up the previous extent */ |
260 | bl_put_extent(be); | 257 | bl_put_extent(be); |
261 | bl_put_extent(cow_read); | 258 | bl_put_extent(cow_read); |
262 | bio = bl_submit_bio(READ, bio); | 259 | bio = bl_submit_bio(READ, bio); |
263 | /* Get the next one */ | 260 | /* Get the next one */ |
264 | be = bl_find_get_extent(BLK_LSEG2EXT(rdata->lseg), | 261 | be = bl_find_get_extent(BLK_LSEG2EXT(rdata->lseg), |
265 | isect, &cow_read); | 262 | isect, &cow_read); |
266 | if (!be) { | 263 | if (!be) { |
267 | rdata->pnfs_error = -EIO; | 264 | rdata->pnfs_error = -EIO; |
268 | goto out; | 265 | goto out; |
269 | } | 266 | } |
270 | extent_length = be->be_length - | 267 | extent_length = be->be_length - |
271 | (isect - be->be_f_offset); | 268 | (isect - be->be_f_offset); |
272 | if (cow_read) { | 269 | if (cow_read) { |
273 | sector_t cow_length = cow_read->be_length - | 270 | sector_t cow_length = cow_read->be_length - |
274 | (isect - cow_read->be_f_offset); | 271 | (isect - cow_read->be_f_offset); |
275 | extent_length = min(extent_length, cow_length); | 272 | extent_length = min(extent_length, cow_length); |
276 | } | 273 | } |
277 | } | 274 | } |
278 | hole = is_hole(be, isect); | 275 | hole = is_hole(be, isect); |
279 | if (hole && !cow_read) { | 276 | if (hole && !cow_read) { |
280 | bio = bl_submit_bio(READ, bio); | 277 | bio = bl_submit_bio(READ, bio); |
281 | /* Fill hole w/ zeroes w/o accessing device */ | 278 | /* Fill hole w/ zeroes w/o accessing device */ |
282 | dprintk("%s Zeroing page for hole\n", __func__); | 279 | dprintk("%s Zeroing page for hole\n", __func__); |
283 | zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE); | 280 | zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE); |
284 | print_page(pages[i]); | 281 | print_page(pages[i]); |
285 | SetPageUptodate(pages[i]); | 282 | SetPageUptodate(pages[i]); |
286 | } else { | 283 | } else { |
287 | struct pnfs_block_extent *be_read; | 284 | struct pnfs_block_extent *be_read; |
288 | 285 | ||
289 | be_read = (hole && cow_read) ? cow_read : be; | 286 | be_read = (hole && cow_read) ? cow_read : be; |
290 | bio = bl_add_page_to_bio(bio, rdata->npages - i, READ, | 287 | bio = bl_add_page_to_bio(bio, rdata->npages - i, READ, |
291 | isect, pages[i], be_read, | 288 | isect, pages[i], be_read, |
292 | bl_end_io_read, par); | 289 | bl_end_io_read, par); |
293 | if (IS_ERR(bio)) { | 290 | if (IS_ERR(bio)) { |
294 | rdata->pnfs_error = PTR_ERR(bio); | 291 | rdata->pnfs_error = PTR_ERR(bio); |
295 | bio = NULL; | 292 | bio = NULL; |
296 | goto out; | 293 | goto out; |
297 | } | 294 | } |
298 | } | 295 | } |
299 | isect += PAGE_CACHE_SECTORS; | 296 | isect += PAGE_CACHE_SECTORS; |
300 | extent_length -= PAGE_CACHE_SECTORS; | 297 | extent_length -= PAGE_CACHE_SECTORS; |
301 | } | 298 | } |
302 | if ((isect << SECTOR_SHIFT) >= rdata->inode->i_size) { | 299 | if ((isect << SECTOR_SHIFT) >= rdata->inode->i_size) { |
303 | rdata->res.eof = 1; | 300 | rdata->res.eof = 1; |
304 | rdata->res.count = rdata->inode->i_size - f_offset; | 301 | rdata->res.count = rdata->inode->i_size - f_offset; |
305 | } else { | 302 | } else { |
306 | rdata->res.count = (isect << SECTOR_SHIFT) - f_offset; | 303 | rdata->res.count = (isect << SECTOR_SHIFT) - f_offset; |
307 | } | 304 | } |
308 | out: | 305 | out: |
309 | bl_put_extent(be); | 306 | bl_put_extent(be); |
310 | bl_put_extent(cow_read); | 307 | bl_put_extent(cow_read); |
311 | bl_submit_bio(READ, bio); | 308 | bl_submit_bio(READ, bio); |
312 | put_parallel(par); | 309 | put_parallel(par); |
313 | return PNFS_ATTEMPTED; | 310 | return PNFS_ATTEMPTED; |
314 | 311 | ||
315 | use_mds: | 312 | use_mds: |
316 | dprintk("Giving up and using normal NFS\n"); | 313 | dprintk("Giving up and using normal NFS\n"); |
317 | return PNFS_NOT_ATTEMPTED; | 314 | return PNFS_NOT_ATTEMPTED; |
318 | } | 315 | } |
319 | 316 | ||
320 | static void mark_extents_written(struct pnfs_block_layout *bl, | 317 | static void mark_extents_written(struct pnfs_block_layout *bl, |
321 | __u64 offset, __u32 count) | 318 | __u64 offset, __u32 count) |
322 | { | 319 | { |
323 | sector_t isect, end; | 320 | sector_t isect, end; |
324 | struct pnfs_block_extent *be; | 321 | struct pnfs_block_extent *be; |
322 | struct pnfs_block_short_extent *se; | ||
325 | 323 | ||
326 | dprintk("%s(%llu, %u)\n", __func__, offset, count); | 324 | dprintk("%s(%llu, %u)\n", __func__, offset, count); |
327 | if (count == 0) | 325 | if (count == 0) |
328 | return; | 326 | return; |
329 | isect = (offset & (long)(PAGE_CACHE_MASK)) >> SECTOR_SHIFT; | 327 | isect = (offset & (long)(PAGE_CACHE_MASK)) >> SECTOR_SHIFT; |
330 | end = (offset + count + PAGE_CACHE_SIZE - 1) & (long)(PAGE_CACHE_MASK); | 328 | end = (offset + count + PAGE_CACHE_SIZE - 1) & (long)(PAGE_CACHE_MASK); |
331 | end >>= SECTOR_SHIFT; | 329 | end >>= SECTOR_SHIFT; |
332 | while (isect < end) { | 330 | while (isect < end) { |
333 | sector_t len; | 331 | sector_t len; |
334 | be = bl_find_get_extent(bl, isect, NULL); | 332 | be = bl_find_get_extent(bl, isect, NULL); |
335 | BUG_ON(!be); /* FIXME */ | 333 | BUG_ON(!be); /* FIXME */ |
336 | len = min(end, be->be_f_offset + be->be_length) - isect; | 334 | len = min(end, be->be_f_offset + be->be_length) - isect; |
337 | if (be->be_state == PNFS_BLOCK_INVALID_DATA) | 335 | if (be->be_state == PNFS_BLOCK_INVALID_DATA) { |
338 | bl_mark_for_commit(be, isect, len); /* What if fails? */ | 336 | se = bl_pop_one_short_extent(be->be_inval); |
337 | BUG_ON(!se); | ||
338 | bl_mark_for_commit(be, isect, len, se); | ||
339 | } | ||
339 | isect += len; | 340 | isect += len; |
340 | bl_put_extent(be); | 341 | bl_put_extent(be); |
341 | } | 342 | } |
342 | } | 343 | } |
343 | 344 | ||
344 | static void bl_end_io_write_zero(struct bio *bio, int err) | 345 | static void bl_end_io_write_zero(struct bio *bio, int err) |
345 | { | 346 | { |
346 | struct parallel_io *par = bio->bi_private; | 347 | struct parallel_io *par = bio->bi_private; |
347 | const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | 348 | const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); |
348 | struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; | 349 | struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; |
349 | struct nfs_write_data *wdata = (struct nfs_write_data *)par->data; | 350 | struct nfs_write_data *wdata = (struct nfs_write_data *)par->data; |
350 | 351 | ||
351 | do { | 352 | do { |
352 | struct page *page = bvec->bv_page; | 353 | struct page *page = bvec->bv_page; |
353 | 354 | ||
354 | if (--bvec >= bio->bi_io_vec) | 355 | if (--bvec >= bio->bi_io_vec) |
355 | prefetchw(&bvec->bv_page->flags); | 356 | prefetchw(&bvec->bv_page->flags); |
356 | /* This is the zeroing page we added */ | 357 | /* This is the zeroing page we added */ |
357 | end_page_writeback(page); | 358 | end_page_writeback(page); |
358 | page_cache_release(page); | 359 | page_cache_release(page); |
359 | } while (bvec >= bio->bi_io_vec); | 360 | } while (bvec >= bio->bi_io_vec); |
360 | if (!uptodate) { | 361 | |
362 | if (unlikely(!uptodate)) { | ||
361 | if (!wdata->pnfs_error) | 363 | if (!wdata->pnfs_error) |
362 | wdata->pnfs_error = -EIO; | 364 | wdata->pnfs_error = -EIO; |
363 | pnfs_set_lo_fail(wdata->lseg); | 365 | pnfs_set_lo_fail(wdata->lseg); |
364 | } | 366 | } |
365 | bio_put(bio); | 367 | bio_put(bio); |
366 | put_parallel(par); | 368 | put_parallel(par); |
367 | } | 369 | } |
368 | 370 | ||
369 | /* This is basically copied from mpage_end_io_read */ | ||
370 | static void bl_end_io_write(struct bio *bio, int err) | 371 | static void bl_end_io_write(struct bio *bio, int err) |
371 | { | 372 | { |
372 | struct parallel_io *par = bio->bi_private; | 373 | struct parallel_io *par = bio->bi_private; |
373 | const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | 374 | const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); |
374 | struct nfs_write_data *wdata = (struct nfs_write_data *)par->data; | 375 | struct nfs_write_data *wdata = (struct nfs_write_data *)par->data; |
375 | 376 | ||
376 | if (!uptodate) { | 377 | if (!uptodate) { |
377 | if (!wdata->pnfs_error) | 378 | if (!wdata->pnfs_error) |
378 | wdata->pnfs_error = -EIO; | 379 | wdata->pnfs_error = -EIO; |
379 | pnfs_set_lo_fail(wdata->lseg); | 380 | pnfs_set_lo_fail(wdata->lseg); |
380 | } | 381 | } |
381 | bio_put(bio); | 382 | bio_put(bio); |
382 | put_parallel(par); | 383 | put_parallel(par); |
383 | } | 384 | } |
384 | 385 | ||
385 | /* Function scheduled for call during bl_end_par_io_write, | 386 | /* Function scheduled for call during bl_end_par_io_write, |
386 | * it marks sectors as written and extends the commitlist. | 387 | * it marks sectors as written and extends the commitlist. |
387 | */ | 388 | */ |
388 | static void bl_write_cleanup(struct work_struct *work) | 389 | static void bl_write_cleanup(struct work_struct *work) |
389 | { | 390 | { |
390 | struct rpc_task *task; | 391 | struct rpc_task *task; |
391 | struct nfs_write_data *wdata; | 392 | struct nfs_write_data *wdata; |
392 | dprintk("%s enter\n", __func__); | 393 | dprintk("%s enter\n", __func__); |
393 | task = container_of(work, struct rpc_task, u.tk_work); | 394 | task = container_of(work, struct rpc_task, u.tk_work); |
394 | wdata = container_of(task, struct nfs_write_data, task); | 395 | wdata = container_of(task, struct nfs_write_data, task); |
395 | if (!wdata->pnfs_error) { | 396 | if (likely(!wdata->pnfs_error)) { |
396 | /* Marks for LAYOUTCOMMIT */ | 397 | /* Marks for LAYOUTCOMMIT */ |
397 | mark_extents_written(BLK_LSEG2EXT(wdata->lseg), | 398 | mark_extents_written(BLK_LSEG2EXT(wdata->lseg), |
398 | wdata->args.offset, wdata->args.count); | 399 | wdata->args.offset, wdata->args.count); |
399 | } | 400 | } |
400 | pnfs_ld_write_done(wdata); | 401 | pnfs_ld_write_done(wdata); |
401 | } | 402 | } |
402 | 403 | ||
403 | /* Called when last of bios associated with a bl_write_pagelist call finishes */ | 404 | /* Called when last of bios associated with a bl_write_pagelist call finishes */ |
404 | static void bl_end_par_io_write(void *data) | 405 | static void bl_end_par_io_write(void *data, int num_se) |
405 | { | 406 | { |
406 | struct nfs_write_data *wdata = data; | 407 | struct nfs_write_data *wdata = data; |
407 | 408 | ||
408 | wdata->task.tk_status = 0; | 409 | if (unlikely(wdata->pnfs_error)) { |
410 | bl_free_short_extents(&BLK_LSEG2EXT(wdata->lseg)->bl_inval, | ||
411 | num_se); | ||
412 | } | ||
413 | |||
414 | wdata->task.tk_status = wdata->pnfs_error; | ||
409 | wdata->verf.committed = NFS_FILE_SYNC; | 415 | wdata->verf.committed = NFS_FILE_SYNC; |
410 | INIT_WORK(&wdata->task.u.tk_work, bl_write_cleanup); | 416 | INIT_WORK(&wdata->task.u.tk_work, bl_write_cleanup); |
411 | schedule_work(&wdata->task.u.tk_work); | 417 | schedule_work(&wdata->task.u.tk_work); |
412 | } | 418 | } |
413 | 419 | ||
414 | /* FIXME STUB - mark intersection of layout and page as bad, so is not | 420 | /* FIXME STUB - mark intersection of layout and page as bad, so is not |
415 | * used again. | 421 | * used again. |
416 | */ | 422 | */ |
417 | static void mark_bad_read(void) | 423 | static void mark_bad_read(void) |
418 | { | 424 | { |
419 | return; | 425 | return; |
420 | } | 426 | } |
421 | 427 | ||
422 | /* | 428 | /* |
423 | * map_block: map a requested I/0 block (isect) into an offset in the LVM | 429 | * map_block: map a requested I/0 block (isect) into an offset in the LVM |
424 | * block_device | 430 | * block_device |
425 | */ | 431 | */ |
426 | static void | 432 | static void |
427 | map_block(struct buffer_head *bh, sector_t isect, struct pnfs_block_extent *be) | 433 | map_block(struct buffer_head *bh, sector_t isect, struct pnfs_block_extent *be) |
428 | { | 434 | { |
429 | dprintk("%s enter be=%p\n", __func__, be); | 435 | dprintk("%s enter be=%p\n", __func__, be); |
430 | 436 | ||
431 | set_buffer_mapped(bh); | 437 | set_buffer_mapped(bh); |
432 | bh->b_bdev = be->be_mdev; | 438 | bh->b_bdev = be->be_mdev; |
433 | bh->b_blocknr = (isect - be->be_f_offset + be->be_v_offset) >> | 439 | bh->b_blocknr = (isect - be->be_f_offset + be->be_v_offset) >> |
434 | (be->be_mdev->bd_inode->i_blkbits - SECTOR_SHIFT); | 440 | (be->be_mdev->bd_inode->i_blkbits - SECTOR_SHIFT); |
435 | 441 | ||
436 | dprintk("%s isect %llu, bh->b_blocknr %ld, using bsize %Zd\n", | 442 | dprintk("%s isect %llu, bh->b_blocknr %ld, using bsize %Zd\n", |
437 | __func__, (unsigned long long)isect, (long)bh->b_blocknr, | 443 | __func__, (unsigned long long)isect, (long)bh->b_blocknr, |
438 | bh->b_size); | 444 | bh->b_size); |
439 | return; | 445 | return; |
440 | } | 446 | } |
441 | 447 | ||
442 | /* Given an unmapped page, zero it or read in page for COW, page is locked | 448 | /* Given an unmapped page, zero it or read in page for COW, page is locked |
443 | * by caller. | 449 | * by caller. |
444 | */ | 450 | */ |
445 | static int | 451 | static int |
446 | init_page_for_write(struct page *page, struct pnfs_block_extent *cow_read) | 452 | init_page_for_write(struct page *page, struct pnfs_block_extent *cow_read) |
447 | { | 453 | { |
448 | struct buffer_head *bh = NULL; | 454 | struct buffer_head *bh = NULL; |
449 | int ret = 0; | 455 | int ret = 0; |
450 | sector_t isect; | 456 | sector_t isect; |
451 | 457 | ||
452 | dprintk("%s enter, %p\n", __func__, page); | 458 | dprintk("%s enter, %p\n", __func__, page); |
453 | BUG_ON(PageUptodate(page)); | 459 | BUG_ON(PageUptodate(page)); |
454 | if (!cow_read) { | 460 | if (!cow_read) { |
455 | zero_user_segment(page, 0, PAGE_SIZE); | 461 | zero_user_segment(page, 0, PAGE_SIZE); |
456 | SetPageUptodate(page); | 462 | SetPageUptodate(page); |
457 | goto cleanup; | 463 | goto cleanup; |
458 | } | 464 | } |
459 | 465 | ||
460 | bh = alloc_page_buffers(page, PAGE_CACHE_SIZE, 0); | 466 | bh = alloc_page_buffers(page, PAGE_CACHE_SIZE, 0); |
461 | if (!bh) { | 467 | if (!bh) { |
462 | ret = -ENOMEM; | 468 | ret = -ENOMEM; |
463 | goto cleanup; | 469 | goto cleanup; |
464 | } | 470 | } |
465 | 471 | ||
466 | isect = (sector_t) page->index << PAGE_CACHE_SECTOR_SHIFT; | 472 | isect = (sector_t) page->index << PAGE_CACHE_SECTOR_SHIFT; |
467 | map_block(bh, isect, cow_read); | 473 | map_block(bh, isect, cow_read); |
468 | if (!bh_uptodate_or_lock(bh)) | 474 | if (!bh_uptodate_or_lock(bh)) |
469 | ret = bh_submit_read(bh); | 475 | ret = bh_submit_read(bh); |
470 | if (ret) | 476 | if (ret) |
471 | goto cleanup; | 477 | goto cleanup; |
472 | SetPageUptodate(page); | 478 | SetPageUptodate(page); |
473 | 479 | ||
474 | cleanup: | 480 | cleanup: |
475 | bl_put_extent(cow_read); | 481 | bl_put_extent(cow_read); |
476 | if (bh) | 482 | if (bh) |
477 | free_buffer_head(bh); | 483 | free_buffer_head(bh); |
478 | if (ret) { | 484 | if (ret) { |
479 | /* Need to mark layout with bad read...should now | 485 | /* Need to mark layout with bad read...should now |
480 | * just use nfs4 for reads and writes. | 486 | * just use nfs4 for reads and writes. |
481 | */ | 487 | */ |
482 | mark_bad_read(); | 488 | mark_bad_read(); |
483 | } | 489 | } |
484 | return ret; | 490 | return ret; |
485 | } | 491 | } |
486 | 492 | ||
493 | /* Find or create a zeroing page marked being writeback. | ||
494 | * Return ERR_PTR on error, NULL to indicate skip this page and page itself | ||
495 | * to indicate write out. | ||
496 | */ | ||
497 | static struct page * | ||
498 | bl_find_get_zeroing_page(struct inode *inode, pgoff_t index, | ||
499 | struct pnfs_block_extent *cow_read) | ||
500 | { | ||
501 | struct page *page; | ||
502 | int locked = 0; | ||
503 | page = find_get_page(inode->i_mapping, index); | ||
504 | if (page) | ||
505 | goto check_page; | ||
506 | |||
507 | page = find_or_create_page(inode->i_mapping, index, GFP_NOFS); | ||
508 | if (unlikely(!page)) { | ||
509 | dprintk("%s oom\n", __func__); | ||
510 | return ERR_PTR(-ENOMEM); | ||
511 | } | ||
512 | locked = 1; | ||
513 | |||
514 | check_page: | ||
515 | /* PageDirty: Other will write this out | ||
516 | * PageWriteback: Other is writing this out | ||
517 | * PageUptodate: It was read before | ||
518 | */ | ||
519 | if (PageDirty(page) || PageWriteback(page)) { | ||
520 | print_page(page); | ||
521 | if (locked) | ||
522 | unlock_page(page); | ||
523 | page_cache_release(page); | ||
524 | return NULL; | ||
525 | } | ||
526 | |||
527 | if (!locked) { | ||
528 | lock_page(page); | ||
529 | locked = 1; | ||
530 | goto check_page; | ||
531 | } | ||
532 | if (!PageUptodate(page)) { | ||
533 | /* New page, readin or zero it */ | ||
534 | init_page_for_write(page, cow_read); | ||
535 | } | ||
536 | set_page_writeback(page); | ||
537 | unlock_page(page); | ||
538 | |||
539 | return page; | ||
540 | } | ||
541 | |||
487 | static enum pnfs_try_status | 542 | static enum pnfs_try_status |
488 | bl_write_pagelist(struct nfs_write_data *wdata, int sync) | 543 | bl_write_pagelist(struct nfs_write_data *wdata, int sync) |
489 | { | 544 | { |
490 | int i, ret, npg_zero, pg_index, last = 0; | 545 | int i, ret, npg_zero, pg_index, last = 0; |
491 | struct bio *bio = NULL; | 546 | struct bio *bio = NULL; |
492 | struct pnfs_block_extent *be = NULL, *cow_read = NULL; | 547 | struct pnfs_block_extent *be = NULL, *cow_read = NULL; |
493 | sector_t isect, last_isect = 0, extent_length = 0; | 548 | sector_t isect, last_isect = 0, extent_length = 0; |
494 | struct parallel_io *par; | 549 | struct parallel_io *par; |
495 | loff_t offset = wdata->args.offset; | 550 | loff_t offset = wdata->args.offset; |
496 | size_t count = wdata->args.count; | 551 | size_t count = wdata->args.count; |
497 | struct page **pages = wdata->args.pages; | 552 | struct page **pages = wdata->args.pages; |
498 | struct page *page; | 553 | struct page *page; |
499 | pgoff_t index; | 554 | pgoff_t index; |
500 | u64 temp; | 555 | u64 temp; |
501 | int npg_per_block = | 556 | int npg_per_block = |
502 | NFS_SERVER(wdata->inode)->pnfs_blksize >> PAGE_CACHE_SHIFT; | 557 | NFS_SERVER(wdata->inode)->pnfs_blksize >> PAGE_CACHE_SHIFT; |
503 | 558 | ||
504 | dprintk("%s enter, %Zu@%lld\n", __func__, count, offset); | 559 | dprintk("%s enter, %Zu@%lld\n", __func__, count, offset); |
505 | /* At this point, wdata->pages is a (sequential) list of nfs_pages. | 560 | /* At this point, wdata->pages is a (sequential) list of nfs_pages. |
506 | * We want to write each, and if there is an error set pnfs_error | 561 | * We want to write each, and if there is an error set pnfs_error |
507 | * to have it redone using nfs. | 562 | * to have it redone using nfs. |
508 | */ | 563 | */ |
509 | par = alloc_parallel(wdata); | 564 | par = alloc_parallel(wdata); |
510 | if (!par) | 565 | if (!par) |
511 | return PNFS_NOT_ATTEMPTED; | 566 | goto out_mds; |
512 | par->call_ops = *wdata->mds_ops; | ||
513 | par->call_ops.rpc_call_done = bl_rpc_do_nothing; | ||
514 | par->pnfs_callback = bl_end_par_io_write; | 567 | par->pnfs_callback = bl_end_par_io_write; |
515 | /* At this point, have to be more careful with error handling */ | 568 | /* At this point, have to be more careful with error handling */ |
516 | 569 | ||
517 | isect = (sector_t) ((offset & (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT); | 570 | isect = (sector_t) ((offset & (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT); |
518 | be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg), isect, &cow_read); | 571 | be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg), isect, &cow_read); |
519 | if (!be || !is_writable(be, isect)) { | 572 | if (!be || !is_writable(be, isect)) { |
520 | dprintk("%s no matching extents!\n", __func__); | 573 | dprintk("%s no matching extents!\n", __func__); |
521 | wdata->pnfs_error = -EINVAL; | 574 | goto out_mds; |
522 | goto out; | ||
523 | } | 575 | } |
524 | 576 | ||
525 | /* First page inside INVALID extent */ | 577 | /* First page inside INVALID extent */ |
526 | if (be->be_state == PNFS_BLOCK_INVALID_DATA) { | 578 | if (be->be_state == PNFS_BLOCK_INVALID_DATA) { |
579 | if (likely(!bl_push_one_short_extent(be->be_inval))) | ||
580 | par->bse_count++; | ||
581 | else | ||
582 | goto out_mds; | ||
527 | temp = offset >> PAGE_CACHE_SHIFT; | 583 | temp = offset >> PAGE_CACHE_SHIFT; |
528 | npg_zero = do_div(temp, npg_per_block); | 584 | npg_zero = do_div(temp, npg_per_block); |
529 | isect = (sector_t) (((offset - npg_zero * PAGE_CACHE_SIZE) & | 585 | isect = (sector_t) (((offset - npg_zero * PAGE_CACHE_SIZE) & |
530 | (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT); | 586 | (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT); |
531 | extent_length = be->be_length - (isect - be->be_f_offset); | 587 | extent_length = be->be_length - (isect - be->be_f_offset); |
532 | 588 | ||
533 | fill_invalid_ext: | 589 | fill_invalid_ext: |
534 | dprintk("%s need to zero %d pages\n", __func__, npg_zero); | 590 | dprintk("%s need to zero %d pages\n", __func__, npg_zero); |
535 | for (;npg_zero > 0; npg_zero--) { | 591 | for (;npg_zero > 0; npg_zero--) { |
536 | if (bl_is_sector_init(be->be_inval, isect)) { | 592 | if (bl_is_sector_init(be->be_inval, isect)) { |
537 | dprintk("isect %llu already init\n", | 593 | dprintk("isect %llu already init\n", |
538 | (unsigned long long)isect); | 594 | (unsigned long long)isect); |
539 | goto next_page; | 595 | goto next_page; |
540 | } | 596 | } |
541 | /* page ref released in bl_end_io_write_zero */ | 597 | /* page ref released in bl_end_io_write_zero */ |
542 | index = isect >> PAGE_CACHE_SECTOR_SHIFT; | 598 | index = isect >> PAGE_CACHE_SECTOR_SHIFT; |
543 | dprintk("%s zero %dth page: index %lu isect %llu\n", | 599 | dprintk("%s zero %dth page: index %lu isect %llu\n", |
544 | __func__, npg_zero, index, | 600 | __func__, npg_zero, index, |
545 | (unsigned long long)isect); | 601 | (unsigned long long)isect); |
546 | page = | 602 | page = bl_find_get_zeroing_page(wdata->inode, index, |
547 | find_or_create_page(wdata->inode->i_mapping, index, | 603 | cow_read); |
548 | GFP_NOFS); | 604 | if (unlikely(IS_ERR(page))) { |
549 | if (!page) { | 605 | wdata->pnfs_error = PTR_ERR(page); |
550 | dprintk("%s oom\n", __func__); | ||
551 | wdata->pnfs_error = -ENOMEM; | ||
552 | goto out; | 606 | goto out; |
553 | } | 607 | } else if (page == NULL) |
554 | |||
555 | /* PageDirty: Other will write this out | ||
556 | * PageWriteback: Other is writing this out | ||
557 | * PageUptodate: It was read before | ||
558 | * sector_initialized: already written out | ||
559 | */ | ||
560 | if (PageDirty(page) || PageWriteback(page)) { | ||
561 | print_page(page); | ||
562 | unlock_page(page); | ||
563 | page_cache_release(page); | ||
564 | goto next_page; | 608 | goto next_page; |
565 | } | ||
566 | if (!PageUptodate(page)) { | ||
567 | /* New page, readin or zero it */ | ||
568 | init_page_for_write(page, cow_read); | ||
569 | } | ||
570 | set_page_writeback(page); | ||
571 | unlock_page(page); | ||
572 | 609 | ||
573 | ret = bl_mark_sectors_init(be->be_inval, isect, | 610 | ret = bl_mark_sectors_init(be->be_inval, isect, |
574 | PAGE_CACHE_SECTORS, | 611 | PAGE_CACHE_SECTORS); |
575 | NULL); | ||
576 | if (unlikely(ret)) { | 612 | if (unlikely(ret)) { |
577 | dprintk("%s bl_mark_sectors_init fail %d\n", | 613 | dprintk("%s bl_mark_sectors_init fail %d\n", |
578 | __func__, ret); | 614 | __func__, ret); |
579 | end_page_writeback(page); | 615 | end_page_writeback(page); |
580 | page_cache_release(page); | 616 | page_cache_release(page); |
581 | wdata->pnfs_error = ret; | 617 | wdata->pnfs_error = ret; |
582 | goto out; | 618 | goto out; |
583 | } | 619 | } |
620 | if (likely(!bl_push_one_short_extent(be->be_inval))) | ||
621 | par->bse_count++; | ||
622 | else { | ||
623 | end_page_writeback(page); | ||
624 | page_cache_release(page); | ||
625 | wdata->pnfs_error = -ENOMEM; | ||
626 | goto out; | ||
627 | } | ||
628 | /* FIXME: This should be done in bi_end_io */ | ||
629 | mark_extents_written(BLK_LSEG2EXT(wdata->lseg), | ||
630 | page->index << PAGE_CACHE_SHIFT, | ||
631 | PAGE_CACHE_SIZE); | ||
632 | |||
584 | bio = bl_add_page_to_bio(bio, npg_zero, WRITE, | 633 | bio = bl_add_page_to_bio(bio, npg_zero, WRITE, |
585 | isect, page, be, | 634 | isect, page, be, |
586 | bl_end_io_write_zero, par); | 635 | bl_end_io_write_zero, par); |
587 | if (IS_ERR(bio)) { | 636 | if (IS_ERR(bio)) { |
588 | wdata->pnfs_error = PTR_ERR(bio); | 637 | wdata->pnfs_error = PTR_ERR(bio); |
589 | bio = NULL; | 638 | bio = NULL; |
590 | goto out; | 639 | goto out; |
591 | } | 640 | } |
592 | /* FIXME: This should be done in bi_end_io */ | ||
593 | mark_extents_written(BLK_LSEG2EXT(wdata->lseg), | ||
594 | page->index << PAGE_CACHE_SHIFT, | ||
595 | PAGE_CACHE_SIZE); | ||
596 | next_page: | 641 | next_page: |
597 | isect += PAGE_CACHE_SECTORS; | 642 | isect += PAGE_CACHE_SECTORS; |
598 | extent_length -= PAGE_CACHE_SECTORS; | 643 | extent_length -= PAGE_CACHE_SECTORS; |
599 | } | 644 | } |
600 | if (last) | 645 | if (last) |
601 | goto write_done; | 646 | goto write_done; |
602 | } | 647 | } |
603 | bio = bl_submit_bio(WRITE, bio); | 648 | bio = bl_submit_bio(WRITE, bio); |
604 | 649 | ||
605 | /* Middle pages */ | 650 | /* Middle pages */ |
606 | pg_index = wdata->args.pgbase >> PAGE_CACHE_SHIFT; | 651 | pg_index = wdata->args.pgbase >> PAGE_CACHE_SHIFT; |
607 | for (i = pg_index; i < wdata->npages; i++) { | 652 | for (i = pg_index; i < wdata->npages; i++) { |
608 | if (!extent_length) { | 653 | if (!extent_length) { |
609 | /* We've used up the previous extent */ | 654 | /* We've used up the previous extent */ |
610 | bl_put_extent(be); | 655 | bl_put_extent(be); |
611 | bio = bl_submit_bio(WRITE, bio); | 656 | bio = bl_submit_bio(WRITE, bio); |
612 | /* Get the next one */ | 657 | /* Get the next one */ |
613 | be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg), | 658 | be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg), |
614 | isect, NULL); | 659 | isect, NULL); |
615 | if (!be || !is_writable(be, isect)) { | 660 | if (!be || !is_writable(be, isect)) { |
616 | wdata->pnfs_error = -EINVAL; | 661 | wdata->pnfs_error = -EINVAL; |
617 | goto out; | 662 | goto out; |
618 | } | 663 | } |
664 | if (be->be_state == PNFS_BLOCK_INVALID_DATA) { | ||
665 | if (likely(!bl_push_one_short_extent( | ||
666 | be->be_inval))) | ||
667 | par->bse_count++; | ||
668 | else { | ||
669 | wdata->pnfs_error = -ENOMEM; | ||
670 | goto out; | ||
671 | } | ||
672 | } | ||
619 | extent_length = be->be_length - | 673 | extent_length = be->be_length - |
620 | (isect - be->be_f_offset); | 674 | (isect - be->be_f_offset); |
621 | } | 675 | } |
622 | if (be->be_state == PNFS_BLOCK_INVALID_DATA) { | 676 | if (be->be_state == PNFS_BLOCK_INVALID_DATA) { |
623 | ret = bl_mark_sectors_init(be->be_inval, isect, | 677 | ret = bl_mark_sectors_init(be->be_inval, isect, |
624 | PAGE_CACHE_SECTORS, | 678 | PAGE_CACHE_SECTORS); |
625 | NULL); | ||
626 | if (unlikely(ret)) { | 679 | if (unlikely(ret)) { |
627 | dprintk("%s bl_mark_sectors_init fail %d\n", | 680 | dprintk("%s bl_mark_sectors_init fail %d\n", |
628 | __func__, ret); | 681 | __func__, ret); |
629 | wdata->pnfs_error = ret; | 682 | wdata->pnfs_error = ret; |
630 | goto out; | 683 | goto out; |
631 | } | 684 | } |
632 | } | 685 | } |
633 | bio = bl_add_page_to_bio(bio, wdata->npages - i, WRITE, | 686 | bio = bl_add_page_to_bio(bio, wdata->npages - i, WRITE, |
634 | isect, pages[i], be, | 687 | isect, pages[i], be, |
635 | bl_end_io_write, par); | 688 | bl_end_io_write, par); |
636 | if (IS_ERR(bio)) { | 689 | if (IS_ERR(bio)) { |
637 | wdata->pnfs_error = PTR_ERR(bio); | 690 | wdata->pnfs_error = PTR_ERR(bio); |
638 | bio = NULL; | 691 | bio = NULL; |
639 | goto out; | 692 | goto out; |
640 | } | 693 | } |
641 | isect += PAGE_CACHE_SECTORS; | 694 | isect += PAGE_CACHE_SECTORS; |
642 | last_isect = isect; | 695 | last_isect = isect; |
643 | extent_length -= PAGE_CACHE_SECTORS; | 696 | extent_length -= PAGE_CACHE_SECTORS; |
644 | } | 697 | } |
645 | 698 | ||
646 | /* Last page inside INVALID extent */ | 699 | /* Last page inside INVALID extent */ |
647 | if (be->be_state == PNFS_BLOCK_INVALID_DATA) { | 700 | if (be->be_state == PNFS_BLOCK_INVALID_DATA) { |
648 | bio = bl_submit_bio(WRITE, bio); | 701 | bio = bl_submit_bio(WRITE, bio); |
649 | temp = last_isect >> PAGE_CACHE_SECTOR_SHIFT; | 702 | temp = last_isect >> PAGE_CACHE_SECTOR_SHIFT; |
650 | npg_zero = npg_per_block - do_div(temp, npg_per_block); | 703 | npg_zero = npg_per_block - do_div(temp, npg_per_block); |
651 | if (npg_zero < npg_per_block) { | 704 | if (npg_zero < npg_per_block) { |
652 | last = 1; | 705 | last = 1; |
653 | goto fill_invalid_ext; | 706 | goto fill_invalid_ext; |
654 | } | 707 | } |
655 | } | 708 | } |
656 | 709 | ||
657 | write_done: | 710 | write_done: |
658 | wdata->res.count = (last_isect << SECTOR_SHIFT) - (offset); | 711 | wdata->res.count = (last_isect << SECTOR_SHIFT) - (offset); |
659 | if (count < wdata->res.count) { | 712 | if (count < wdata->res.count) { |
660 | wdata->res.count = count; | 713 | wdata->res.count = count; |
661 | } | 714 | } |
662 | out: | 715 | out: |
663 | bl_put_extent(be); | 716 | bl_put_extent(be); |
664 | bl_submit_bio(WRITE, bio); | 717 | bl_submit_bio(WRITE, bio); |
665 | put_parallel(par); | 718 | put_parallel(par); |
666 | return PNFS_ATTEMPTED; | 719 | return PNFS_ATTEMPTED; |
720 | out_mds: | ||
721 | bl_put_extent(be); | ||
722 | kfree(par); | ||
723 | return PNFS_NOT_ATTEMPTED; | ||
667 | } | 724 | } |
668 | 725 | ||
669 | /* FIXME - range ignored */ | 726 | /* FIXME - range ignored */ |
670 | static void | 727 | static void |
671 | release_extents(struct pnfs_block_layout *bl, struct pnfs_layout_range *range) | 728 | release_extents(struct pnfs_block_layout *bl, struct pnfs_layout_range *range) |
672 | { | 729 | { |
673 | int i; | 730 | int i; |
674 | struct pnfs_block_extent *be; | 731 | struct pnfs_block_extent *be; |
675 | 732 | ||
676 | spin_lock(&bl->bl_ext_lock); | 733 | spin_lock(&bl->bl_ext_lock); |
677 | for (i = 0; i < EXTENT_LISTS; i++) { | 734 | for (i = 0; i < EXTENT_LISTS; i++) { |
678 | while (!list_empty(&bl->bl_extents[i])) { | 735 | while (!list_empty(&bl->bl_extents[i])) { |
679 | be = list_first_entry(&bl->bl_extents[i], | 736 | be = list_first_entry(&bl->bl_extents[i], |
680 | struct pnfs_block_extent, | 737 | struct pnfs_block_extent, |
681 | be_node); | 738 | be_node); |
682 | list_del(&be->be_node); | 739 | list_del(&be->be_node); |
683 | bl_put_extent(be); | 740 | bl_put_extent(be); |
684 | } | 741 | } |
685 | } | 742 | } |
686 | spin_unlock(&bl->bl_ext_lock); | 743 | spin_unlock(&bl->bl_ext_lock); |
687 | } | 744 | } |
688 | 745 | ||
689 | static void | 746 | static void |
690 | release_inval_marks(struct pnfs_inval_markings *marks) | 747 | release_inval_marks(struct pnfs_inval_markings *marks) |
691 | { | 748 | { |
692 | struct pnfs_inval_tracking *pos, *temp; | 749 | struct pnfs_inval_tracking *pos, *temp; |
750 | struct pnfs_block_short_extent *se, *stemp; | ||
693 | 751 | ||
694 | list_for_each_entry_safe(pos, temp, &marks->im_tree.mtt_stub, it_link) { | 752 | list_for_each_entry_safe(pos, temp, &marks->im_tree.mtt_stub, it_link) { |
695 | list_del(&pos->it_link); | 753 | list_del(&pos->it_link); |
696 | kfree(pos); | 754 | kfree(pos); |
697 | } | 755 | } |
756 | |||
757 | list_for_each_entry_safe(se, stemp, &marks->im_extents, bse_node) { | ||
758 | list_del(&se->bse_node); | ||
759 | kfree(se); | ||
760 | } | ||
698 | return; | 761 | return; |
699 | } | 762 | } |
700 | 763 | ||
701 | static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo) | 764 | static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo) |
702 | { | 765 | { |
703 | struct pnfs_block_layout *bl = BLK_LO2EXT(lo); | 766 | struct pnfs_block_layout *bl = BLK_LO2EXT(lo); |
704 | 767 | ||
705 | dprintk("%s enter\n", __func__); | 768 | dprintk("%s enter\n", __func__); |
706 | release_extents(bl, NULL); | 769 | release_extents(bl, NULL); |
707 | release_inval_marks(&bl->bl_inval); | 770 | release_inval_marks(&bl->bl_inval); |
708 | kfree(bl); | 771 | kfree(bl); |
709 | } | 772 | } |
710 | 773 | ||
711 | static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode, | 774 | static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode, |
712 | gfp_t gfp_flags) | 775 | gfp_t gfp_flags) |
713 | { | 776 | { |
714 | struct pnfs_block_layout *bl; | 777 | struct pnfs_block_layout *bl; |
715 | 778 | ||
716 | dprintk("%s enter\n", __func__); | 779 | dprintk("%s enter\n", __func__); |
717 | bl = kzalloc(sizeof(*bl), gfp_flags); | 780 | bl = kzalloc(sizeof(*bl), gfp_flags); |
718 | if (!bl) | 781 | if (!bl) |
719 | return NULL; | 782 | return NULL; |
720 | spin_lock_init(&bl->bl_ext_lock); | 783 | spin_lock_init(&bl->bl_ext_lock); |
721 | INIT_LIST_HEAD(&bl->bl_extents[0]); | 784 | INIT_LIST_HEAD(&bl->bl_extents[0]); |
722 | INIT_LIST_HEAD(&bl->bl_extents[1]); | 785 | INIT_LIST_HEAD(&bl->bl_extents[1]); |
723 | INIT_LIST_HEAD(&bl->bl_commit); | 786 | INIT_LIST_HEAD(&bl->bl_commit); |
724 | INIT_LIST_HEAD(&bl->bl_committing); | 787 | INIT_LIST_HEAD(&bl->bl_committing); |
725 | bl->bl_count = 0; | 788 | bl->bl_count = 0; |
726 | bl->bl_blocksize = NFS_SERVER(inode)->pnfs_blksize >> SECTOR_SHIFT; | 789 | bl->bl_blocksize = NFS_SERVER(inode)->pnfs_blksize >> SECTOR_SHIFT; |
727 | BL_INIT_INVAL_MARKS(&bl->bl_inval, bl->bl_blocksize); | 790 | BL_INIT_INVAL_MARKS(&bl->bl_inval, bl->bl_blocksize); |
728 | return &bl->bl_layout; | 791 | return &bl->bl_layout; |
729 | } | 792 | } |
730 | 793 | ||
731 | static void bl_free_lseg(struct pnfs_layout_segment *lseg) | 794 | static void bl_free_lseg(struct pnfs_layout_segment *lseg) |
732 | { | 795 | { |
733 | dprintk("%s enter\n", __func__); | 796 | dprintk("%s enter\n", __func__); |
734 | kfree(lseg); | 797 | kfree(lseg); |
735 | } | 798 | } |
736 | 799 | ||
737 | /* We pretty much ignore lseg, and store all data layout wide, so we | 800 | /* We pretty much ignore lseg, and store all data layout wide, so we |
738 | * can correctly merge. | 801 | * can correctly merge. |
739 | */ | 802 | */ |
740 | static struct pnfs_layout_segment *bl_alloc_lseg(struct pnfs_layout_hdr *lo, | 803 | static struct pnfs_layout_segment *bl_alloc_lseg(struct pnfs_layout_hdr *lo, |
741 | struct nfs4_layoutget_res *lgr, | 804 | struct nfs4_layoutget_res *lgr, |
742 | gfp_t gfp_flags) | 805 | gfp_t gfp_flags) |
743 | { | 806 | { |
744 | struct pnfs_layout_segment *lseg; | 807 | struct pnfs_layout_segment *lseg; |
745 | int status; | 808 | int status; |
746 | 809 | ||
747 | dprintk("%s enter\n", __func__); | 810 | dprintk("%s enter\n", __func__); |
748 | lseg = kzalloc(sizeof(*lseg), gfp_flags); | 811 | lseg = kzalloc(sizeof(*lseg), gfp_flags); |
749 | if (!lseg) | 812 | if (!lseg) |
750 | return ERR_PTR(-ENOMEM); | 813 | return ERR_PTR(-ENOMEM); |
751 | status = nfs4_blk_process_layoutget(lo, lgr, gfp_flags); | 814 | status = nfs4_blk_process_layoutget(lo, lgr, gfp_flags); |
752 | if (status) { | 815 | if (status) { |
753 | /* We don't want to call the full-blown bl_free_lseg, | 816 | /* We don't want to call the full-blown bl_free_lseg, |
754 | * since on error extents were not touched. | 817 | * since on error extents were not touched. |
755 | */ | 818 | */ |
756 | kfree(lseg); | 819 | kfree(lseg); |
757 | return ERR_PTR(status); | 820 | return ERR_PTR(status); |
758 | } | 821 | } |
759 | return lseg; | 822 | return lseg; |
760 | } | 823 | } |
761 | 824 | ||
762 | static void | 825 | static void |
763 | bl_encode_layoutcommit(struct pnfs_layout_hdr *lo, struct xdr_stream *xdr, | 826 | bl_encode_layoutcommit(struct pnfs_layout_hdr *lo, struct xdr_stream *xdr, |
764 | const struct nfs4_layoutcommit_args *arg) | 827 | const struct nfs4_layoutcommit_args *arg) |
765 | { | 828 | { |
766 | dprintk("%s enter\n", __func__); | 829 | dprintk("%s enter\n", __func__); |
767 | encode_pnfs_block_layoutupdate(BLK_LO2EXT(lo), xdr, arg); | 830 | encode_pnfs_block_layoutupdate(BLK_LO2EXT(lo), xdr, arg); |
768 | } | 831 | } |
769 | 832 | ||
770 | static void | 833 | static void |
771 | bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata) | 834 | bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata) |
772 | { | 835 | { |
773 | struct pnfs_layout_hdr *lo = NFS_I(lcdata->args.inode)->layout; | 836 | struct pnfs_layout_hdr *lo = NFS_I(lcdata->args.inode)->layout; |
774 | 837 | ||
775 | dprintk("%s enter\n", __func__); | 838 | dprintk("%s enter\n", __func__); |
776 | clean_pnfs_block_layoutupdate(BLK_LO2EXT(lo), &lcdata->args, lcdata->res.status); | 839 | clean_pnfs_block_layoutupdate(BLK_LO2EXT(lo), &lcdata->args, lcdata->res.status); |
777 | } | 840 | } |
778 | 841 | ||
779 | static void free_blk_mountid(struct block_mount_id *mid) | 842 | static void free_blk_mountid(struct block_mount_id *mid) |
780 | { | 843 | { |
781 | if (mid) { | 844 | if (mid) { |
782 | struct pnfs_block_dev *dev; | 845 | struct pnfs_block_dev *dev, *tmp; |
783 | spin_lock(&mid->bm_lock); | 846 | |
784 | while (!list_empty(&mid->bm_devlist)) { | 847 | /* No need to take bm_lock as we are last user freeing bm_devlist */ |
785 | dev = list_first_entry(&mid->bm_devlist, | 848 | list_for_each_entry_safe(dev, tmp, &mid->bm_devlist, bm_node) { |
786 | struct pnfs_block_dev, | ||
787 | bm_node); | ||
788 | list_del(&dev->bm_node); | 849 | list_del(&dev->bm_node); |
789 | bl_free_block_dev(dev); | 850 | bl_free_block_dev(dev); |
790 | } | 851 | } |
791 | spin_unlock(&mid->bm_lock); | ||
792 | kfree(mid); | 852 | kfree(mid); |
793 | } | 853 | } |
794 | } | 854 | } |
795 | 855 | ||
796 | /* This is mostly copied from the filelayout's get_device_info function. | 856 | /* This is mostly copied from the filelayout's get_device_info function. |
797 | * It seems much of this should be at the generic pnfs level. | 857 | * It seems much of this should be at the generic pnfs level. |
798 | */ | 858 | */ |
799 | static struct pnfs_block_dev * | 859 | static struct pnfs_block_dev * |
800 | nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh, | 860 | nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh, |
801 | struct nfs4_deviceid *d_id) | 861 | struct nfs4_deviceid *d_id) |
802 | { | 862 | { |
803 | struct pnfs_device *dev; | 863 | struct pnfs_device *dev; |
804 | struct pnfs_block_dev *rv; | 864 | struct pnfs_block_dev *rv; |
805 | u32 max_resp_sz; | 865 | u32 max_resp_sz; |
806 | int max_pages; | 866 | int max_pages; |
807 | struct page **pages = NULL; | 867 | struct page **pages = NULL; |
808 | int i, rc; | 868 | int i, rc; |
809 | 869 | ||
810 | /* | 870 | /* |
811 | * Use the session max response size as the basis for setting | 871 | * Use the session max response size as the basis for setting |
812 | * GETDEVICEINFO's maxcount | 872 | * GETDEVICEINFO's maxcount |
813 | */ | 873 | */ |
814 | max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; | 874 | max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; |
815 | max_pages = max_resp_sz >> PAGE_SHIFT; | 875 | max_pages = max_resp_sz >> PAGE_SHIFT; |
816 | dprintk("%s max_resp_sz %u max_pages %d\n", | 876 | dprintk("%s max_resp_sz %u max_pages %d\n", |
817 | __func__, max_resp_sz, max_pages); | 877 | __func__, max_resp_sz, max_pages); |
818 | 878 | ||
819 | dev = kmalloc(sizeof(*dev), GFP_NOFS); | 879 | dev = kmalloc(sizeof(*dev), GFP_NOFS); |
820 | if (!dev) { | 880 | if (!dev) { |
821 | dprintk("%s kmalloc failed\n", __func__); | 881 | dprintk("%s kmalloc failed\n", __func__); |
822 | return ERR_PTR(-ENOMEM); | 882 | return ERR_PTR(-ENOMEM); |
823 | } | 883 | } |
824 | 884 | ||
825 | pages = kzalloc(max_pages * sizeof(struct page *), GFP_NOFS); | 885 | pages = kzalloc(max_pages * sizeof(struct page *), GFP_NOFS); |
826 | if (pages == NULL) { | 886 | if (pages == NULL) { |
827 | kfree(dev); | 887 | kfree(dev); |
828 | return ERR_PTR(-ENOMEM); | 888 | return ERR_PTR(-ENOMEM); |
829 | } | 889 | } |
830 | for (i = 0; i < max_pages; i++) { | 890 | for (i = 0; i < max_pages; i++) { |
831 | pages[i] = alloc_page(GFP_NOFS); | 891 | pages[i] = alloc_page(GFP_NOFS); |
832 | if (!pages[i]) { | 892 | if (!pages[i]) { |
833 | rv = ERR_PTR(-ENOMEM); | 893 | rv = ERR_PTR(-ENOMEM); |
834 | goto out_free; | 894 | goto out_free; |
835 | } | 895 | } |
836 | } | 896 | } |
837 | 897 | ||
838 | memcpy(&dev->dev_id, d_id, sizeof(*d_id)); | 898 | memcpy(&dev->dev_id, d_id, sizeof(*d_id)); |
839 | dev->layout_type = LAYOUT_BLOCK_VOLUME; | 899 | dev->layout_type = LAYOUT_BLOCK_VOLUME; |
840 | dev->pages = pages; | 900 | dev->pages = pages; |
841 | dev->pgbase = 0; | 901 | dev->pgbase = 0; |
842 | dev->pglen = PAGE_SIZE * max_pages; | 902 | dev->pglen = PAGE_SIZE * max_pages; |
843 | dev->mincount = 0; | 903 | dev->mincount = 0; |
844 | 904 | ||
845 | dprintk("%s: dev_id: %s\n", __func__, dev->dev_id.data); | 905 | dprintk("%s: dev_id: %s\n", __func__, dev->dev_id.data); |
846 | rc = nfs4_proc_getdeviceinfo(server, dev); | 906 | rc = nfs4_proc_getdeviceinfo(server, dev); |
847 | dprintk("%s getdevice info returns %d\n", __func__, rc); | 907 | dprintk("%s getdevice info returns %d\n", __func__, rc); |
848 | if (rc) { | 908 | if (rc) { |
849 | rv = ERR_PTR(rc); | 909 | rv = ERR_PTR(rc); |
850 | goto out_free; | 910 | goto out_free; |
851 | } | 911 | } |
852 | 912 | ||
853 | rv = nfs4_blk_decode_device(server, dev); | 913 | rv = nfs4_blk_decode_device(server, dev); |
854 | out_free: | 914 | out_free: |
855 | for (i = 0; i < max_pages; i++) | 915 | for (i = 0; i < max_pages; i++) |
856 | __free_page(pages[i]); | 916 | __free_page(pages[i]); |
857 | kfree(pages); | 917 | kfree(pages); |
858 | kfree(dev); | 918 | kfree(dev); |
859 | return rv; | 919 | return rv; |
860 | } | 920 | } |
861 | 921 | ||
862 | static int | 922 | static int |
863 | bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh) | 923 | bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh) |
864 | { | 924 | { |
865 | struct block_mount_id *b_mt_id = NULL; | 925 | struct block_mount_id *b_mt_id = NULL; |
866 | struct pnfs_devicelist *dlist = NULL; | 926 | struct pnfs_devicelist *dlist = NULL; |
867 | struct pnfs_block_dev *bdev; | 927 | struct pnfs_block_dev *bdev; |
868 | LIST_HEAD(block_disklist); | 928 | LIST_HEAD(block_disklist); |
869 | int status, i; | 929 | int status, i; |
870 | 930 | ||
871 | dprintk("%s enter\n", __func__); | 931 | dprintk("%s enter\n", __func__); |
872 | 932 | ||
873 | if (server->pnfs_blksize == 0) { | 933 | if (server->pnfs_blksize == 0) { |
874 | dprintk("%s Server did not return blksize\n", __func__); | 934 | dprintk("%s Server did not return blksize\n", __func__); |
875 | return -EINVAL; | 935 | return -EINVAL; |
876 | } | 936 | } |
877 | b_mt_id = kzalloc(sizeof(struct block_mount_id), GFP_NOFS); | 937 | b_mt_id = kzalloc(sizeof(struct block_mount_id), GFP_NOFS); |
878 | if (!b_mt_id) { | 938 | if (!b_mt_id) { |
879 | status = -ENOMEM; | 939 | status = -ENOMEM; |
880 | goto out_error; | 940 | goto out_error; |
881 | } | 941 | } |
882 | /* Initialize nfs4 block layout mount id */ | 942 | /* Initialize nfs4 block layout mount id */ |
883 | spin_lock_init(&b_mt_id->bm_lock); | 943 | spin_lock_init(&b_mt_id->bm_lock); |
884 | INIT_LIST_HEAD(&b_mt_id->bm_devlist); | 944 | INIT_LIST_HEAD(&b_mt_id->bm_devlist); |
885 | 945 | ||
886 | dlist = kmalloc(sizeof(struct pnfs_devicelist), GFP_NOFS); | 946 | dlist = kmalloc(sizeof(struct pnfs_devicelist), GFP_NOFS); |
887 | if (!dlist) { | 947 | if (!dlist) { |
888 | status = -ENOMEM; | 948 | status = -ENOMEM; |
889 | goto out_error; | 949 | goto out_error; |
890 | } | 950 | } |
891 | dlist->eof = 0; | 951 | dlist->eof = 0; |
892 | while (!dlist->eof) { | 952 | while (!dlist->eof) { |
893 | status = nfs4_proc_getdevicelist(server, fh, dlist); | 953 | status = nfs4_proc_getdevicelist(server, fh, dlist); |
894 | if (status) | 954 | if (status) |
895 | goto out_error; | 955 | goto out_error; |
896 | dprintk("%s GETDEVICELIST numdevs=%i, eof=%i\n", | 956 | dprintk("%s GETDEVICELIST numdevs=%i, eof=%i\n", |
897 | __func__, dlist->num_devs, dlist->eof); | 957 | __func__, dlist->num_devs, dlist->eof); |
898 | for (i = 0; i < dlist->num_devs; i++) { | 958 | for (i = 0; i < dlist->num_devs; i++) { |
899 | bdev = nfs4_blk_get_deviceinfo(server, fh, | 959 | bdev = nfs4_blk_get_deviceinfo(server, fh, |
900 | &dlist->dev_id[i]); | 960 | &dlist->dev_id[i]); |
901 | if (IS_ERR(bdev)) { | 961 | if (IS_ERR(bdev)) { |
902 | status = PTR_ERR(bdev); | 962 | status = PTR_ERR(bdev); |
903 | goto out_error; | 963 | goto out_error; |
904 | } | 964 | } |
905 | spin_lock(&b_mt_id->bm_lock); | 965 | spin_lock(&b_mt_id->bm_lock); |
906 | list_add(&bdev->bm_node, &b_mt_id->bm_devlist); | 966 | list_add(&bdev->bm_node, &b_mt_id->bm_devlist); |
907 | spin_unlock(&b_mt_id->bm_lock); | 967 | spin_unlock(&b_mt_id->bm_lock); |
908 | } | 968 | } |
909 | } | 969 | } |
910 | dprintk("%s SUCCESS\n", __func__); | 970 | dprintk("%s SUCCESS\n", __func__); |
911 | server->pnfs_ld_data = b_mt_id; | 971 | server->pnfs_ld_data = b_mt_id; |
912 | 972 | ||
913 | out_return: | 973 | out_return: |
914 | kfree(dlist); | 974 | kfree(dlist); |
915 | return status; | 975 | return status; |
916 | 976 | ||
917 | out_error: | 977 | out_error: |
918 | free_blk_mountid(b_mt_id); | 978 | free_blk_mountid(b_mt_id); |
919 | goto out_return; | 979 | goto out_return; |
920 | } | 980 | } |
921 | 981 | ||
922 | static int | 982 | static int |
923 | bl_clear_layoutdriver(struct nfs_server *server) | 983 | bl_clear_layoutdriver(struct nfs_server *server) |
924 | { | 984 | { |
925 | struct block_mount_id *b_mt_id = server->pnfs_ld_data; | 985 | struct block_mount_id *b_mt_id = server->pnfs_ld_data; |
926 | 986 | ||
927 | dprintk("%s enter\n", __func__); | 987 | dprintk("%s enter\n", __func__); |
928 | free_blk_mountid(b_mt_id); | 988 | free_blk_mountid(b_mt_id); |
929 | dprintk("%s RETURNS\n", __func__); | 989 | dprintk("%s RETURNS\n", __func__); |
930 | return 0; | 990 | return 0; |
931 | } | 991 | } |
932 | 992 | ||
933 | static const struct nfs_pageio_ops bl_pg_read_ops = { | 993 | static const struct nfs_pageio_ops bl_pg_read_ops = { |
934 | .pg_init = pnfs_generic_pg_init_read, | 994 | .pg_init = pnfs_generic_pg_init_read, |
935 | .pg_test = pnfs_generic_pg_test, | 995 | .pg_test = pnfs_generic_pg_test, |
936 | .pg_doio = pnfs_generic_pg_readpages, | 996 | .pg_doio = pnfs_generic_pg_readpages, |
937 | }; | 997 | }; |
938 | 998 | ||
939 | static const struct nfs_pageio_ops bl_pg_write_ops = { | 999 | static const struct nfs_pageio_ops bl_pg_write_ops = { |
940 | .pg_init = pnfs_generic_pg_init_write, | 1000 | .pg_init = pnfs_generic_pg_init_write, |
941 | .pg_test = pnfs_generic_pg_test, | 1001 | .pg_test = pnfs_generic_pg_test, |
942 | .pg_doio = pnfs_generic_pg_writepages, | 1002 | .pg_doio = pnfs_generic_pg_writepages, |
943 | }; | 1003 | }; |
944 | 1004 | ||
945 | static struct pnfs_layoutdriver_type blocklayout_type = { | 1005 | static struct pnfs_layoutdriver_type blocklayout_type = { |
946 | .id = LAYOUT_BLOCK_VOLUME, | 1006 | .id = LAYOUT_BLOCK_VOLUME, |
947 | .name = "LAYOUT_BLOCK_VOLUME", | 1007 | .name = "LAYOUT_BLOCK_VOLUME", |
948 | .read_pagelist = bl_read_pagelist, | 1008 | .read_pagelist = bl_read_pagelist, |
949 | .write_pagelist = bl_write_pagelist, | 1009 | .write_pagelist = bl_write_pagelist, |
950 | .alloc_layout_hdr = bl_alloc_layout_hdr, | 1010 | .alloc_layout_hdr = bl_alloc_layout_hdr, |
951 | .free_layout_hdr = bl_free_layout_hdr, | 1011 | .free_layout_hdr = bl_free_layout_hdr, |
952 | .alloc_lseg = bl_alloc_lseg, | 1012 | .alloc_lseg = bl_alloc_lseg, |
953 | .free_lseg = bl_free_lseg, | 1013 | .free_lseg = bl_free_lseg, |
954 | .encode_layoutcommit = bl_encode_layoutcommit, | 1014 | .encode_layoutcommit = bl_encode_layoutcommit, |
955 | .cleanup_layoutcommit = bl_cleanup_layoutcommit, | 1015 | .cleanup_layoutcommit = bl_cleanup_layoutcommit, |
956 | .set_layoutdriver = bl_set_layoutdriver, | 1016 | .set_layoutdriver = bl_set_layoutdriver, |
957 | .clear_layoutdriver = bl_clear_layoutdriver, | 1017 | .clear_layoutdriver = bl_clear_layoutdriver, |
958 | .pg_read_ops = &bl_pg_read_ops, | 1018 | .pg_read_ops = &bl_pg_read_ops, |
959 | .pg_write_ops = &bl_pg_write_ops, | 1019 | .pg_write_ops = &bl_pg_write_ops, |
960 | }; | 1020 | }; |
961 | 1021 | ||
962 | static const struct rpc_pipe_ops bl_upcall_ops = { | 1022 | static const struct rpc_pipe_ops bl_upcall_ops = { |
963 | .upcall = rpc_pipe_generic_upcall, | 1023 | .upcall = rpc_pipe_generic_upcall, |
964 | .downcall = bl_pipe_downcall, | 1024 | .downcall = bl_pipe_downcall, |
965 | .destroy_msg = bl_pipe_destroy_msg, | 1025 | .destroy_msg = bl_pipe_destroy_msg, |
966 | }; | 1026 | }; |
967 | 1027 | ||
968 | static int __init nfs4blocklayout_init(void) | 1028 | static int __init nfs4blocklayout_init(void) |
969 | { | 1029 | { |
970 | struct vfsmount *mnt; | 1030 | struct vfsmount *mnt; |
971 | struct path path; | 1031 | struct path path; |
972 | int ret; | 1032 | int ret; |
973 | 1033 | ||
974 | dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__); | 1034 | dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__); |
975 | 1035 | ||
976 | ret = pnfs_register_layoutdriver(&blocklayout_type); | 1036 | ret = pnfs_register_layoutdriver(&blocklayout_type); |
977 | if (ret) | 1037 | if (ret) |
978 | goto out; | 1038 | goto out; |
979 | 1039 | ||
980 | init_waitqueue_head(&bl_wq); | 1040 | init_waitqueue_head(&bl_wq); |
981 | 1041 | ||
982 | mnt = rpc_get_mount(); | 1042 | mnt = rpc_get_mount(); |
fs/nfs/blocklayout/blocklayout.h
1 | /* | 1 | /* |
2 | * linux/fs/nfs/blocklayout/blocklayout.h | 2 | * linux/fs/nfs/blocklayout/blocklayout.h |
3 | * | 3 | * |
4 | * Module for the NFSv4.1 pNFS block layout driver. | 4 | * Module for the NFSv4.1 pNFS block layout driver. |
5 | * | 5 | * |
6 | * Copyright (c) 2006 The Regents of the University of Michigan. | 6 | * Copyright (c) 2006 The Regents of the University of Michigan. |
7 | * All rights reserved. | 7 | * All rights reserved. |
8 | * | 8 | * |
9 | * Andy Adamson <andros@citi.umich.edu> | 9 | * Andy Adamson <andros@citi.umich.edu> |
10 | * Fred Isaman <iisaman@umich.edu> | 10 | * Fred Isaman <iisaman@umich.edu> |
11 | * | 11 | * |
12 | * permission is granted to use, copy, create derivative works and | 12 | * permission is granted to use, copy, create derivative works and |
13 | * redistribute this software and such derivative works for any purpose, | 13 | * redistribute this software and such derivative works for any purpose, |
14 | * so long as the name of the university of michigan is not used in | 14 | * so long as the name of the university of michigan is not used in |
15 | * any advertising or publicity pertaining to the use or distribution | 15 | * any advertising or publicity pertaining to the use or distribution |
16 | * of this software without specific, written prior authorization. if | 16 | * of this software without specific, written prior authorization. if |
17 | * the above copyright notice or any other identification of the | 17 | * the above copyright notice or any other identification of the |
18 | * university of michigan is included in any copy of any portion of | 18 | * university of michigan is included in any copy of any portion of |
19 | * this software, then the disclaimer below must also be included. | 19 | * this software, then the disclaimer below must also be included. |
20 | * | 20 | * |
21 | * this software is provided as is, without representation from the | 21 | * this software is provided as is, without representation from the |
22 | * university of michigan as to its fitness for any purpose, and without | 22 | * university of michigan as to its fitness for any purpose, and without |
23 | * warranty by the university of michigan of any kind, either express | 23 | * warranty by the university of michigan of any kind, either express |
24 | * or implied, including without limitation the implied warranties of | 24 | * or implied, including without limitation the implied warranties of |
25 | * merchantability and fitness for a particular purpose. the regents | 25 | * merchantability and fitness for a particular purpose. the regents |
26 | * of the university of michigan shall not be liable for any damages, | 26 | * of the university of michigan shall not be liable for any damages, |
27 | * including special, indirect, incidental, or consequential damages, | 27 | * including special, indirect, incidental, or consequential damages, |
28 | * with respect to any claim arising out or in connection with the use | 28 | * with respect to any claim arising out or in connection with the use |
29 | * of the software, even if it has been or is hereafter advised of the | 29 | * of the software, even if it has been or is hereafter advised of the |
30 | * possibility of such damages. | 30 | * possibility of such damages. |
31 | */ | 31 | */ |
32 | #ifndef FS_NFS_NFS4BLOCKLAYOUT_H | 32 | #ifndef FS_NFS_NFS4BLOCKLAYOUT_H |
33 | #define FS_NFS_NFS4BLOCKLAYOUT_H | 33 | #define FS_NFS_NFS4BLOCKLAYOUT_H |
34 | 34 | ||
35 | #include <linux/device-mapper.h> | 35 | #include <linux/device-mapper.h> |
36 | #include <linux/nfs_fs.h> | 36 | #include <linux/nfs_fs.h> |
37 | #include <linux/sunrpc/rpc_pipe_fs.h> | 37 | #include <linux/sunrpc/rpc_pipe_fs.h> |
38 | 38 | ||
39 | #include "../pnfs.h" | 39 | #include "../pnfs.h" |
40 | 40 | ||
41 | #define PAGE_CACHE_SECTORS (PAGE_CACHE_SIZE >> SECTOR_SHIFT) | 41 | #define PAGE_CACHE_SECTORS (PAGE_CACHE_SIZE >> SECTOR_SHIFT) |
42 | #define PAGE_CACHE_SECTOR_SHIFT (PAGE_CACHE_SHIFT - SECTOR_SHIFT) | 42 | #define PAGE_CACHE_SECTOR_SHIFT (PAGE_CACHE_SHIFT - SECTOR_SHIFT) |
43 | 43 | ||
44 | struct block_mount_id { | 44 | struct block_mount_id { |
45 | spinlock_t bm_lock; /* protects list */ | 45 | spinlock_t bm_lock; /* protects list */ |
46 | struct list_head bm_devlist; /* holds pnfs_block_dev */ | 46 | struct list_head bm_devlist; /* holds pnfs_block_dev */ |
47 | }; | 47 | }; |
48 | 48 | ||
49 | struct pnfs_block_dev { | 49 | struct pnfs_block_dev { |
50 | struct list_head bm_node; | 50 | struct list_head bm_node; |
51 | struct nfs4_deviceid bm_mdevid; /* associated devid */ | 51 | struct nfs4_deviceid bm_mdevid; /* associated devid */ |
52 | struct block_device *bm_mdev; /* meta device itself */ | 52 | struct block_device *bm_mdev; /* meta device itself */ |
53 | }; | 53 | }; |
54 | 54 | ||
55 | enum exstate4 { | 55 | enum exstate4 { |
56 | PNFS_BLOCK_READWRITE_DATA = 0, | 56 | PNFS_BLOCK_READWRITE_DATA = 0, |
57 | PNFS_BLOCK_READ_DATA = 1, | 57 | PNFS_BLOCK_READ_DATA = 1, |
58 | PNFS_BLOCK_INVALID_DATA = 2, /* mapped, but data is invalid */ | 58 | PNFS_BLOCK_INVALID_DATA = 2, /* mapped, but data is invalid */ |
59 | PNFS_BLOCK_NONE_DATA = 3 /* unmapped, it's a hole */ | 59 | PNFS_BLOCK_NONE_DATA = 3 /* unmapped, it's a hole */ |
60 | }; | 60 | }; |
61 | 61 | ||
62 | #define MY_MAX_TAGS (15) /* tag bitnums used must be less than this */ | 62 | #define MY_MAX_TAGS (15) /* tag bitnums used must be less than this */ |
63 | 63 | ||
64 | struct my_tree { | 64 | struct my_tree { |
65 | sector_t mtt_step_size; /* Internal sector alignment */ | 65 | sector_t mtt_step_size; /* Internal sector alignment */ |
66 | struct list_head mtt_stub; /* Should be a radix tree */ | 66 | struct list_head mtt_stub; /* Should be a radix tree */ |
67 | }; | 67 | }; |
68 | 68 | ||
69 | struct pnfs_inval_markings { | 69 | struct pnfs_inval_markings { |
70 | spinlock_t im_lock; | 70 | spinlock_t im_lock; |
71 | struct my_tree im_tree; /* Sectors that need LAYOUTCOMMIT */ | 71 | struct my_tree im_tree; /* Sectors that need LAYOUTCOMMIT */ |
72 | sector_t im_block_size; /* Server blocksize in sectors */ | 72 | sector_t im_block_size; /* Server blocksize in sectors */ |
73 | struct list_head im_extents; /* Short extents for INVAL->RW conversion */ | ||
73 | }; | 74 | }; |
74 | 75 | ||
75 | struct pnfs_inval_tracking { | 76 | struct pnfs_inval_tracking { |
76 | struct list_head it_link; | 77 | struct list_head it_link; |
77 | int it_sector; | 78 | int it_sector; |
78 | int it_tags; | 79 | int it_tags; |
79 | }; | 80 | }; |
80 | 81 | ||
81 | /* sector_t fields are all in 512-byte sectors */ | 82 | /* sector_t fields are all in 512-byte sectors */ |
82 | struct pnfs_block_extent { | 83 | struct pnfs_block_extent { |
83 | struct kref be_refcnt; | 84 | struct kref be_refcnt; |
84 | struct list_head be_node; /* link into lseg list */ | 85 | struct list_head be_node; /* link into lseg list */ |
85 | struct nfs4_deviceid be_devid; /* FIXME: could use device cache instead */ | 86 | struct nfs4_deviceid be_devid; /* FIXME: could use device cache instead */ |
86 | struct block_device *be_mdev; | 87 | struct block_device *be_mdev; |
87 | sector_t be_f_offset; /* the starting offset in the file */ | 88 | sector_t be_f_offset; /* the starting offset in the file */ |
88 | sector_t be_length; /* the size of the extent */ | 89 | sector_t be_length; /* the size of the extent */ |
89 | sector_t be_v_offset; /* the starting offset in the volume */ | 90 | sector_t be_v_offset; /* the starting offset in the volume */ |
90 | enum exstate4 be_state; /* the state of this extent */ | 91 | enum exstate4 be_state; /* the state of this extent */ |
91 | struct pnfs_inval_markings *be_inval; /* tracks INVAL->RW transition */ | 92 | struct pnfs_inval_markings *be_inval; /* tracks INVAL->RW transition */ |
92 | }; | 93 | }; |
93 | 94 | ||
94 | /* Shortened extent used by LAYOUTCOMMIT */ | 95 | /* Shortened extent used by LAYOUTCOMMIT */ |
95 | struct pnfs_block_short_extent { | 96 | struct pnfs_block_short_extent { |
96 | struct list_head bse_node; | 97 | struct list_head bse_node; |
97 | struct nfs4_deviceid bse_devid; | 98 | struct nfs4_deviceid bse_devid; |
98 | struct block_device *bse_mdev; | 99 | struct block_device *bse_mdev; |
99 | sector_t bse_f_offset; /* the starting offset in the file */ | 100 | sector_t bse_f_offset; /* the starting offset in the file */ |
100 | sector_t bse_length; /* the size of the extent */ | 101 | sector_t bse_length; /* the size of the extent */ |
101 | }; | 102 | }; |
102 | 103 | ||
103 | static inline void | 104 | static inline void |
104 | BL_INIT_INVAL_MARKS(struct pnfs_inval_markings *marks, sector_t blocksize) | 105 | BL_INIT_INVAL_MARKS(struct pnfs_inval_markings *marks, sector_t blocksize) |
105 | { | 106 | { |
106 | spin_lock_init(&marks->im_lock); | 107 | spin_lock_init(&marks->im_lock); |
107 | INIT_LIST_HEAD(&marks->im_tree.mtt_stub); | 108 | INIT_LIST_HEAD(&marks->im_tree.mtt_stub); |
109 | INIT_LIST_HEAD(&marks->im_extents); | ||
108 | marks->im_block_size = blocksize; | 110 | marks->im_block_size = blocksize; |
109 | marks->im_tree.mtt_step_size = min((sector_t)PAGE_CACHE_SECTORS, | 111 | marks->im_tree.mtt_step_size = min((sector_t)PAGE_CACHE_SECTORS, |
110 | blocksize); | 112 | blocksize); |
111 | } | 113 | } |
112 | 114 | ||
113 | enum extentclass4 { | 115 | enum extentclass4 { |
114 | RW_EXTENT = 0, /* READWRTE and INVAL */ | 116 | RW_EXTENT = 0, /* READWRTE and INVAL */ |
115 | RO_EXTENT = 1, /* READ and NONE */ | 117 | RO_EXTENT = 1, /* READ and NONE */ |
116 | EXTENT_LISTS = 2, | 118 | EXTENT_LISTS = 2, |
117 | }; | 119 | }; |
118 | 120 | ||
119 | static inline int bl_choose_list(enum exstate4 state) | 121 | static inline int bl_choose_list(enum exstate4 state) |
120 | { | 122 | { |
121 | if (state == PNFS_BLOCK_READ_DATA || state == PNFS_BLOCK_NONE_DATA) | 123 | if (state == PNFS_BLOCK_READ_DATA || state == PNFS_BLOCK_NONE_DATA) |
122 | return RO_EXTENT; | 124 | return RO_EXTENT; |
123 | else | 125 | else |
124 | return RW_EXTENT; | 126 | return RW_EXTENT; |
125 | } | 127 | } |
126 | 128 | ||
127 | struct pnfs_block_layout { | 129 | struct pnfs_block_layout { |
128 | struct pnfs_layout_hdr bl_layout; | 130 | struct pnfs_layout_hdr bl_layout; |
129 | struct pnfs_inval_markings bl_inval; /* tracks INVAL->RW transition */ | 131 | struct pnfs_inval_markings bl_inval; /* tracks INVAL->RW transition */ |
130 | spinlock_t bl_ext_lock; /* Protects list manipulation */ | 132 | spinlock_t bl_ext_lock; /* Protects list manipulation */ |
131 | struct list_head bl_extents[EXTENT_LISTS]; /* R and RW extents */ | 133 | struct list_head bl_extents[EXTENT_LISTS]; /* R and RW extents */ |
132 | struct list_head bl_commit; /* Needs layout commit */ | 134 | struct list_head bl_commit; /* Needs layout commit */ |
133 | struct list_head bl_committing; /* Layout committing */ | 135 | struct list_head bl_committing; /* Layout committing */ |
134 | unsigned int bl_count; /* entries in bl_commit */ | 136 | unsigned int bl_count; /* entries in bl_commit */ |
135 | sector_t bl_blocksize; /* Server blocksize in sectors */ | 137 | sector_t bl_blocksize; /* Server blocksize in sectors */ |
136 | }; | 138 | }; |
137 | 139 | ||
138 | #define BLK_ID(lo) ((struct block_mount_id *)(NFS_SERVER(lo->plh_inode)->pnfs_ld_data)) | 140 | #define BLK_ID(lo) ((struct block_mount_id *)(NFS_SERVER(lo->plh_inode)->pnfs_ld_data)) |
139 | 141 | ||
140 | static inline struct pnfs_block_layout * | 142 | static inline struct pnfs_block_layout * |
141 | BLK_LO2EXT(struct pnfs_layout_hdr *lo) | 143 | BLK_LO2EXT(struct pnfs_layout_hdr *lo) |
142 | { | 144 | { |
143 | return container_of(lo, struct pnfs_block_layout, bl_layout); | 145 | return container_of(lo, struct pnfs_block_layout, bl_layout); |
144 | } | 146 | } |
145 | 147 | ||
146 | static inline struct pnfs_block_layout * | 148 | static inline struct pnfs_block_layout * |
147 | BLK_LSEG2EXT(struct pnfs_layout_segment *lseg) | 149 | BLK_LSEG2EXT(struct pnfs_layout_segment *lseg) |
148 | { | 150 | { |
149 | return BLK_LO2EXT(lseg->pls_layout); | 151 | return BLK_LO2EXT(lseg->pls_layout); |
150 | } | 152 | } |
151 | 153 | ||
152 | struct bl_dev_msg { | 154 | struct bl_dev_msg { |
153 | int32_t status; | 155 | int32_t status; |
154 | uint32_t major, minor; | 156 | uint32_t major, minor; |
155 | }; | 157 | }; |
156 | 158 | ||
157 | struct bl_msg_hdr { | 159 | struct bl_msg_hdr { |
158 | u8 type; | 160 | u8 type; |
159 | u16 totallen; /* length of entire message, including hdr itself */ | 161 | u16 totallen; /* length of entire message, including hdr itself */ |
160 | }; | 162 | }; |
161 | 163 | ||
162 | extern struct dentry *bl_device_pipe; | 164 | extern struct dentry *bl_device_pipe; |
163 | extern wait_queue_head_t bl_wq; | 165 | extern wait_queue_head_t bl_wq; |
164 | 166 | ||
165 | #define BL_DEVICE_UMOUNT 0x0 /* Umount--delete devices */ | 167 | #define BL_DEVICE_UMOUNT 0x0 /* Umount--delete devices */ |
166 | #define BL_DEVICE_MOUNT 0x1 /* Mount--create devices*/ | 168 | #define BL_DEVICE_MOUNT 0x1 /* Mount--create devices*/ |
167 | #define BL_DEVICE_REQUEST_INIT 0x0 /* Start request */ | 169 | #define BL_DEVICE_REQUEST_INIT 0x0 /* Start request */ |
168 | #define BL_DEVICE_REQUEST_PROC 0x1 /* User level process succeeds */ | 170 | #define BL_DEVICE_REQUEST_PROC 0x1 /* User level process succeeds */ |
169 | #define BL_DEVICE_REQUEST_ERR 0x2 /* User level process fails */ | 171 | #define BL_DEVICE_REQUEST_ERR 0x2 /* User level process fails */ |
170 | 172 | ||
171 | /* blocklayoutdev.c */ | 173 | /* blocklayoutdev.c */ |
172 | ssize_t bl_pipe_downcall(struct file *, const char __user *, size_t); | 174 | ssize_t bl_pipe_downcall(struct file *, const char __user *, size_t); |
173 | void bl_pipe_destroy_msg(struct rpc_pipe_msg *); | 175 | void bl_pipe_destroy_msg(struct rpc_pipe_msg *); |
174 | struct block_device *nfs4_blkdev_get(dev_t dev); | 176 | struct block_device *nfs4_blkdev_get(dev_t dev); |
175 | int nfs4_blkdev_put(struct block_device *bdev); | 177 | int nfs4_blkdev_put(struct block_device *bdev); |
176 | struct pnfs_block_dev *nfs4_blk_decode_device(struct nfs_server *server, | 178 | struct pnfs_block_dev *nfs4_blk_decode_device(struct nfs_server *server, |
177 | struct pnfs_device *dev); | 179 | struct pnfs_device *dev); |
178 | int nfs4_blk_process_layoutget(struct pnfs_layout_hdr *lo, | 180 | int nfs4_blk_process_layoutget(struct pnfs_layout_hdr *lo, |
179 | struct nfs4_layoutget_res *lgr, gfp_t gfp_flags); | 181 | struct nfs4_layoutget_res *lgr, gfp_t gfp_flags); |
180 | 182 | ||
181 | /* blocklayoutdm.c */ | 183 | /* blocklayoutdm.c */ |
182 | void bl_free_block_dev(struct pnfs_block_dev *bdev); | 184 | void bl_free_block_dev(struct pnfs_block_dev *bdev); |
183 | 185 | ||
184 | /* extents.c */ | 186 | /* extents.c */ |
185 | struct pnfs_block_extent * | 187 | struct pnfs_block_extent * |
186 | bl_find_get_extent(struct pnfs_block_layout *bl, sector_t isect, | 188 | bl_find_get_extent(struct pnfs_block_layout *bl, sector_t isect, |
187 | struct pnfs_block_extent **cow_read); | 189 | struct pnfs_block_extent **cow_read); |
188 | int bl_mark_sectors_init(struct pnfs_inval_markings *marks, | 190 | int bl_mark_sectors_init(struct pnfs_inval_markings *marks, |
189 | sector_t offset, sector_t length, | 191 | sector_t offset, sector_t length); |
190 | sector_t **pages); | ||
191 | void bl_put_extent(struct pnfs_block_extent *be); | 192 | void bl_put_extent(struct pnfs_block_extent *be); |
192 | struct pnfs_block_extent *bl_alloc_extent(void); | 193 | struct pnfs_block_extent *bl_alloc_extent(void); |
193 | int bl_is_sector_init(struct pnfs_inval_markings *marks, sector_t isect); | 194 | int bl_is_sector_init(struct pnfs_inval_markings *marks, sector_t isect); |
194 | int encode_pnfs_block_layoutupdate(struct pnfs_block_layout *bl, | 195 | int encode_pnfs_block_layoutupdate(struct pnfs_block_layout *bl, |
195 | struct xdr_stream *xdr, | 196 | struct xdr_stream *xdr, |
196 | const struct nfs4_layoutcommit_args *arg); | 197 | const struct nfs4_layoutcommit_args *arg); |
197 | void clean_pnfs_block_layoutupdate(struct pnfs_block_layout *bl, | 198 | void clean_pnfs_block_layoutupdate(struct pnfs_block_layout *bl, |
198 | const struct nfs4_layoutcommit_args *arg, | 199 | const struct nfs4_layoutcommit_args *arg, |
199 | int status); | 200 | int status); |
200 | int bl_add_merge_extent(struct pnfs_block_layout *bl, | 201 | int bl_add_merge_extent(struct pnfs_block_layout *bl, |
201 | struct pnfs_block_extent *new); | 202 | struct pnfs_block_extent *new); |
202 | int bl_mark_for_commit(struct pnfs_block_extent *be, | 203 | int bl_mark_for_commit(struct pnfs_block_extent *be, |
203 | sector_t offset, sector_t length); | 204 | sector_t offset, sector_t length, |
205 | struct pnfs_block_short_extent *new); | ||
206 | int bl_push_one_short_extent(struct pnfs_inval_markings *marks); | ||
207 | struct pnfs_block_short_extent * | ||
208 | bl_pop_one_short_extent(struct pnfs_inval_markings *marks); | ||
209 | void bl_free_short_extents(struct pnfs_inval_markings *marks, int num_to_free); | ||
204 | 210 | ||
205 | #endif /* FS_NFS_NFS4BLOCKLAYOUT_H */ | 211 | #endif /* FS_NFS_NFS4BLOCKLAYOUT_H */ |
fs/nfs/blocklayout/extents.c
1 | /* | 1 | /* |
2 | * linux/fs/nfs/blocklayout/blocklayout.h | 2 | * linux/fs/nfs/blocklayout/blocklayout.h |
3 | * | 3 | * |
4 | * Module for the NFSv4.1 pNFS block layout driver. | 4 | * Module for the NFSv4.1 pNFS block layout driver. |
5 | * | 5 | * |
6 | * Copyright (c) 2006 The Regents of the University of Michigan. | 6 | * Copyright (c) 2006 The Regents of the University of Michigan. |
7 | * All rights reserved. | 7 | * All rights reserved. |
8 | * | 8 | * |
9 | * Andy Adamson <andros@citi.umich.edu> | 9 | * Andy Adamson <andros@citi.umich.edu> |
10 | * Fred Isaman <iisaman@umich.edu> | 10 | * Fred Isaman <iisaman@umich.edu> |
11 | * | 11 | * |
12 | * permission is granted to use, copy, create derivative works and | 12 | * permission is granted to use, copy, create derivative works and |
13 | * redistribute this software and such derivative works for any purpose, | 13 | * redistribute this software and such derivative works for any purpose, |
14 | * so long as the name of the university of michigan is not used in | 14 | * so long as the name of the university of michigan is not used in |
15 | * any advertising or publicity pertaining to the use or distribution | 15 | * any advertising or publicity pertaining to the use or distribution |
16 | * of this software without specific, written prior authorization. if | 16 | * of this software without specific, written prior authorization. if |
17 | * the above copyright notice or any other identification of the | 17 | * the above copyright notice or any other identification of the |
18 | * university of michigan is included in any copy of any portion of | 18 | * university of michigan is included in any copy of any portion of |
19 | * this software, then the disclaimer below must also be included. | 19 | * this software, then the disclaimer below must also be included. |
20 | * | 20 | * |
21 | * this software is provided as is, without representation from the | 21 | * this software is provided as is, without representation from the |
22 | * university of michigan as to its fitness for any purpose, and without | 22 | * university of michigan as to its fitness for any purpose, and without |
23 | * warranty by the university of michigan of any kind, either express | 23 | * warranty by the university of michigan of any kind, either express |
24 | * or implied, including without limitation the implied warranties of | 24 | * or implied, including without limitation the implied warranties of |
25 | * merchantability and fitness for a particular purpose. the regents | 25 | * merchantability and fitness for a particular purpose. the regents |
26 | * of the university of michigan shall not be liable for any damages, | 26 | * of the university of michigan shall not be liable for any damages, |
27 | * including special, indirect, incidental, or consequential damages, | 27 | * including special, indirect, incidental, or consequential damages, |
28 | * with respect to any claim arising out or in connection with the use | 28 | * with respect to any claim arising out or in connection with the use |
29 | * of the software, even if it has been or is hereafter advised of the | 29 | * of the software, even if it has been or is hereafter advised of the |
30 | * possibility of such damages. | 30 | * possibility of such damages. |
31 | */ | 31 | */ |
32 | 32 | ||
33 | #include "blocklayout.h" | 33 | #include "blocklayout.h" |
34 | #define NFSDBG_FACILITY NFSDBG_PNFS_LD | 34 | #define NFSDBG_FACILITY NFSDBG_PNFS_LD |
35 | 35 | ||
36 | /* Bit numbers */ | 36 | /* Bit numbers */ |
37 | #define EXTENT_INITIALIZED 0 | 37 | #define EXTENT_INITIALIZED 0 |
38 | #define EXTENT_WRITTEN 1 | 38 | #define EXTENT_WRITTEN 1 |
39 | #define EXTENT_IN_COMMIT 2 | 39 | #define EXTENT_IN_COMMIT 2 |
40 | #define INTERNAL_EXISTS MY_MAX_TAGS | 40 | #define INTERNAL_EXISTS MY_MAX_TAGS |
41 | #define INTERNAL_MASK ((1 << INTERNAL_EXISTS) - 1) | 41 | #define INTERNAL_MASK ((1 << INTERNAL_EXISTS) - 1) |
42 | 42 | ||
43 | /* Returns largest t<=s s.t. t%base==0 */ | 43 | /* Returns largest t<=s s.t. t%base==0 */ |
44 | static inline sector_t normalize(sector_t s, int base) | 44 | static inline sector_t normalize(sector_t s, int base) |
45 | { | 45 | { |
46 | sector_t tmp = s; /* Since do_div modifies its argument */ | 46 | sector_t tmp = s; /* Since do_div modifies its argument */ |
47 | return s - do_div(tmp, base); | 47 | return s - do_div(tmp, base); |
48 | } | 48 | } |
49 | 49 | ||
50 | static inline sector_t normalize_up(sector_t s, int base) | 50 | static inline sector_t normalize_up(sector_t s, int base) |
51 | { | 51 | { |
52 | return normalize(s + base - 1, base); | 52 | return normalize(s + base - 1, base); |
53 | } | 53 | } |
54 | 54 | ||
55 | /* Complete stub using list while determine API wanted */ | 55 | /* Complete stub using list while determine API wanted */ |
56 | 56 | ||
57 | /* Returns tags, or negative */ | 57 | /* Returns tags, or negative */ |
58 | static int32_t _find_entry(struct my_tree *tree, u64 s) | 58 | static int32_t _find_entry(struct my_tree *tree, u64 s) |
59 | { | 59 | { |
60 | struct pnfs_inval_tracking *pos; | 60 | struct pnfs_inval_tracking *pos; |
61 | 61 | ||
62 | dprintk("%s(%llu) enter\n", __func__, s); | 62 | dprintk("%s(%llu) enter\n", __func__, s); |
63 | list_for_each_entry_reverse(pos, &tree->mtt_stub, it_link) { | 63 | list_for_each_entry_reverse(pos, &tree->mtt_stub, it_link) { |
64 | if (pos->it_sector > s) | 64 | if (pos->it_sector > s) |
65 | continue; | 65 | continue; |
66 | else if (pos->it_sector == s) | 66 | else if (pos->it_sector == s) |
67 | return pos->it_tags & INTERNAL_MASK; | 67 | return pos->it_tags & INTERNAL_MASK; |
68 | else | 68 | else |
69 | break; | 69 | break; |
70 | } | 70 | } |
71 | return -ENOENT; | 71 | return -ENOENT; |
72 | } | 72 | } |
73 | 73 | ||
74 | static inline | 74 | static inline |
75 | int _has_tag(struct my_tree *tree, u64 s, int32_t tag) | 75 | int _has_tag(struct my_tree *tree, u64 s, int32_t tag) |
76 | { | 76 | { |
77 | int32_t tags; | 77 | int32_t tags; |
78 | 78 | ||
79 | dprintk("%s(%llu, %i) enter\n", __func__, s, tag); | 79 | dprintk("%s(%llu, %i) enter\n", __func__, s, tag); |
80 | s = normalize(s, tree->mtt_step_size); | 80 | s = normalize(s, tree->mtt_step_size); |
81 | tags = _find_entry(tree, s); | 81 | tags = _find_entry(tree, s); |
82 | if ((tags < 0) || !(tags & (1 << tag))) | 82 | if ((tags < 0) || !(tags & (1 << tag))) |
83 | return 0; | 83 | return 0; |
84 | else | 84 | else |
85 | return 1; | 85 | return 1; |
86 | } | 86 | } |
87 | 87 | ||
88 | /* Creates entry with tag, or if entry already exists, unions tag to it. | 88 | /* Creates entry with tag, or if entry already exists, unions tag to it. |
89 | * If storage is not NULL, newly created entry will use it. | 89 | * If storage is not NULL, newly created entry will use it. |
90 | * Returns number of entries added, or negative on error. | 90 | * Returns number of entries added, or negative on error. |
91 | */ | 91 | */ |
92 | static int _add_entry(struct my_tree *tree, u64 s, int32_t tag, | 92 | static int _add_entry(struct my_tree *tree, u64 s, int32_t tag, |
93 | struct pnfs_inval_tracking *storage) | 93 | struct pnfs_inval_tracking *storage) |
94 | { | 94 | { |
95 | int found = 0; | 95 | int found = 0; |
96 | struct pnfs_inval_tracking *pos; | 96 | struct pnfs_inval_tracking *pos; |
97 | 97 | ||
98 | dprintk("%s(%llu, %i, %p) enter\n", __func__, s, tag, storage); | 98 | dprintk("%s(%llu, %i, %p) enter\n", __func__, s, tag, storage); |
99 | list_for_each_entry_reverse(pos, &tree->mtt_stub, it_link) { | 99 | list_for_each_entry_reverse(pos, &tree->mtt_stub, it_link) { |
100 | if (pos->it_sector > s) | 100 | if (pos->it_sector > s) |
101 | continue; | 101 | continue; |
102 | else if (pos->it_sector == s) { | 102 | else if (pos->it_sector == s) { |
103 | found = 1; | 103 | found = 1; |
104 | break; | 104 | break; |
105 | } else | 105 | } else |
106 | break; | 106 | break; |
107 | } | 107 | } |
108 | if (found) { | 108 | if (found) { |
109 | pos->it_tags |= (1 << tag); | 109 | pos->it_tags |= (1 << tag); |
110 | return 0; | 110 | return 0; |
111 | } else { | 111 | } else { |
112 | struct pnfs_inval_tracking *new; | 112 | struct pnfs_inval_tracking *new; |
113 | if (storage) | 113 | new = storage; |
114 | new = storage; | ||
115 | else { | ||
116 | new = kmalloc(sizeof(*new), GFP_NOFS); | ||
117 | if (!new) | ||
118 | return -ENOMEM; | ||
119 | } | ||
120 | new->it_sector = s; | 114 | new->it_sector = s; |
121 | new->it_tags = (1 << tag); | 115 | new->it_tags = (1 << tag); |
122 | list_add(&new->it_link, &pos->it_link); | 116 | list_add(&new->it_link, &pos->it_link); |
123 | return 1; | 117 | return 1; |
124 | } | 118 | } |
125 | } | 119 | } |
126 | 120 | ||
127 | /* XXXX Really want option to not create */ | 121 | /* XXXX Really want option to not create */ |
128 | /* Over range, unions tag with existing entries, else creates entry with tag */ | 122 | /* Over range, unions tag with existing entries, else creates entry with tag */ |
129 | static int _set_range(struct my_tree *tree, int32_t tag, u64 s, u64 length) | 123 | static int _set_range(struct my_tree *tree, int32_t tag, u64 s, u64 length) |
130 | { | 124 | { |
131 | u64 i; | 125 | u64 i; |
132 | 126 | ||
133 | dprintk("%s(%i, %llu, %llu) enter\n", __func__, tag, s, length); | 127 | dprintk("%s(%i, %llu, %llu) enter\n", __func__, tag, s, length); |
134 | for (i = normalize(s, tree->mtt_step_size); i < s + length; | 128 | for (i = normalize(s, tree->mtt_step_size); i < s + length; |
135 | i += tree->mtt_step_size) | 129 | i += tree->mtt_step_size) |
136 | if (_add_entry(tree, i, tag, NULL)) | 130 | if (_add_entry(tree, i, tag, NULL)) |
137 | return -ENOMEM; | 131 | return -ENOMEM; |
138 | return 0; | 132 | return 0; |
139 | } | 133 | } |
140 | 134 | ||
141 | /* Ensure that future operations on given range of tree will not malloc */ | 135 | /* Ensure that future operations on given range of tree will not malloc */ |
142 | static int _preload_range(struct my_tree *tree, u64 offset, u64 length) | 136 | static int _preload_range(struct pnfs_inval_markings *marks, |
137 | u64 offset, u64 length) | ||
143 | { | 138 | { |
144 | u64 start, end, s; | 139 | u64 start, end, s; |
145 | int count, i, used = 0, status = -ENOMEM; | 140 | int count, i, used = 0, status = -ENOMEM; |
146 | struct pnfs_inval_tracking **storage; | 141 | struct pnfs_inval_tracking **storage; |
142 | struct my_tree *tree = &marks->im_tree; | ||
147 | 143 | ||
148 | dprintk("%s(%llu, %llu) enter\n", __func__, offset, length); | 144 | dprintk("%s(%llu, %llu) enter\n", __func__, offset, length); |
149 | start = normalize(offset, tree->mtt_step_size); | 145 | start = normalize(offset, tree->mtt_step_size); |
150 | end = normalize_up(offset + length, tree->mtt_step_size); | 146 | end = normalize_up(offset + length, tree->mtt_step_size); |
151 | count = (int)(end - start) / (int)tree->mtt_step_size; | 147 | count = (int)(end - start) / (int)tree->mtt_step_size; |
152 | 148 | ||
153 | /* Pre-malloc what memory we might need */ | 149 | /* Pre-malloc what memory we might need */ |
154 | storage = kmalloc(sizeof(*storage) * count, GFP_NOFS); | 150 | storage = kmalloc(sizeof(*storage) * count, GFP_NOFS); |
155 | if (!storage) | 151 | if (!storage) |
156 | return -ENOMEM; | 152 | return -ENOMEM; |
157 | for (i = 0; i < count; i++) { | 153 | for (i = 0; i < count; i++) { |
158 | storage[i] = kmalloc(sizeof(struct pnfs_inval_tracking), | 154 | storage[i] = kmalloc(sizeof(struct pnfs_inval_tracking), |
159 | GFP_NOFS); | 155 | GFP_NOFS); |
160 | if (!storage[i]) | 156 | if (!storage[i]) |
161 | goto out_cleanup; | 157 | goto out_cleanup; |
162 | } | 158 | } |
163 | 159 | ||
164 | /* Now need lock - HOW??? */ | 160 | spin_lock_bh(&marks->im_lock); |
165 | |||
166 | for (s = start; s < end; s += tree->mtt_step_size) | 161 | for (s = start; s < end; s += tree->mtt_step_size) |
167 | used += _add_entry(tree, s, INTERNAL_EXISTS, storage[used]); | 162 | used += _add_entry(tree, s, INTERNAL_EXISTS, storage[used]); |
163 | spin_unlock_bh(&marks->im_lock); | ||
168 | 164 | ||
169 | /* Unlock - HOW??? */ | ||
170 | status = 0; | 165 | status = 0; |
171 | 166 | ||
172 | out_cleanup: | 167 | out_cleanup: |
173 | for (i = used; i < count; i++) { | 168 | for (i = used; i < count; i++) { |
174 | if (!storage[i]) | 169 | if (!storage[i]) |
175 | break; | 170 | break; |
176 | kfree(storage[i]); | 171 | kfree(storage[i]); |
177 | } | 172 | } |
178 | kfree(storage); | 173 | kfree(storage); |
179 | return status; | 174 | return status; |
180 | } | 175 | } |
181 | 176 | ||
182 | static void set_needs_init(sector_t *array, sector_t offset) | ||
183 | { | ||
184 | sector_t *p = array; | ||
185 | |||
186 | dprintk("%s enter\n", __func__); | ||
187 | if (!p) | ||
188 | return; | ||
189 | while (*p < offset) | ||
190 | p++; | ||
191 | if (*p == offset) | ||
192 | return; | ||
193 | else if (*p == ~0) { | ||
194 | *p++ = offset; | ||
195 | *p = ~0; | ||
196 | return; | ||
197 | } else { | ||
198 | sector_t *save = p; | ||
199 | dprintk("%s Adding %llu\n", __func__, (u64)offset); | ||
200 | while (*p != ~0) | ||
201 | p++; | ||
202 | p++; | ||
203 | memmove(save + 1, save, (char *)p - (char *)save); | ||
204 | *save = offset; | ||
205 | return; | ||
206 | } | ||
207 | } | ||
208 | |||
209 | /* We are relying on page lock to serialize this */ | 177 | /* We are relying on page lock to serialize this */ |
210 | int bl_is_sector_init(struct pnfs_inval_markings *marks, sector_t isect) | 178 | int bl_is_sector_init(struct pnfs_inval_markings *marks, sector_t isect) |
211 | { | 179 | { |
212 | int rv; | 180 | int rv; |
213 | 181 | ||
214 | spin_lock(&marks->im_lock); | 182 | spin_lock_bh(&marks->im_lock); |
215 | rv = _has_tag(&marks->im_tree, isect, EXTENT_INITIALIZED); | 183 | rv = _has_tag(&marks->im_tree, isect, EXTENT_INITIALIZED); |
216 | spin_unlock(&marks->im_lock); | 184 | spin_unlock_bh(&marks->im_lock); |
217 | return rv; | 185 | return rv; |
218 | } | 186 | } |
219 | 187 | ||
220 | /* Assume start, end already sector aligned */ | 188 | /* Assume start, end already sector aligned */ |
221 | static int | 189 | static int |
222 | _range_has_tag(struct my_tree *tree, u64 start, u64 end, int32_t tag) | 190 | _range_has_tag(struct my_tree *tree, u64 start, u64 end, int32_t tag) |
223 | { | 191 | { |
224 | struct pnfs_inval_tracking *pos; | 192 | struct pnfs_inval_tracking *pos; |
225 | u64 expect = 0; | 193 | u64 expect = 0; |
226 | 194 | ||
227 | dprintk("%s(%llu, %llu, %i) enter\n", __func__, start, end, tag); | 195 | dprintk("%s(%llu, %llu, %i) enter\n", __func__, start, end, tag); |
228 | list_for_each_entry_reverse(pos, &tree->mtt_stub, it_link) { | 196 | list_for_each_entry_reverse(pos, &tree->mtt_stub, it_link) { |
229 | if (pos->it_sector >= end) | 197 | if (pos->it_sector >= end) |
230 | continue; | 198 | continue; |
231 | if (!expect) { | 199 | if (!expect) { |
232 | if ((pos->it_sector == end - tree->mtt_step_size) && | 200 | if ((pos->it_sector == end - tree->mtt_step_size) && |
233 | (pos->it_tags & (1 << tag))) { | 201 | (pos->it_tags & (1 << tag))) { |
234 | expect = pos->it_sector - tree->mtt_step_size; | 202 | expect = pos->it_sector - tree->mtt_step_size; |
235 | if (pos->it_sector < tree->mtt_step_size || expect < start) | 203 | if (pos->it_sector < tree->mtt_step_size || expect < start) |
236 | return 1; | 204 | return 1; |
237 | continue; | 205 | continue; |
238 | } else { | 206 | } else { |
239 | return 0; | 207 | return 0; |
240 | } | 208 | } |
241 | } | 209 | } |
242 | if (pos->it_sector != expect || !(pos->it_tags & (1 << tag))) | 210 | if (pos->it_sector != expect || !(pos->it_tags & (1 << tag))) |
243 | return 0; | 211 | return 0; |
244 | expect -= tree->mtt_step_size; | 212 | expect -= tree->mtt_step_size; |
245 | if (expect < start) | 213 | if (expect < start) |
246 | return 1; | 214 | return 1; |
247 | } | 215 | } |
248 | return 0; | 216 | return 0; |
249 | } | 217 | } |
250 | 218 | ||
251 | static int is_range_written(struct pnfs_inval_markings *marks, | 219 | static int is_range_written(struct pnfs_inval_markings *marks, |
252 | sector_t start, sector_t end) | 220 | sector_t start, sector_t end) |
253 | { | 221 | { |
254 | int rv; | 222 | int rv; |
255 | 223 | ||
256 | spin_lock(&marks->im_lock); | 224 | spin_lock_bh(&marks->im_lock); |
257 | rv = _range_has_tag(&marks->im_tree, start, end, EXTENT_WRITTEN); | 225 | rv = _range_has_tag(&marks->im_tree, start, end, EXTENT_WRITTEN); |
258 | spin_unlock(&marks->im_lock); | 226 | spin_unlock_bh(&marks->im_lock); |
259 | return rv; | 227 | return rv; |
260 | } | 228 | } |
261 | 229 | ||
262 | /* Marks sectors in [offest, offset_length) as having been initialized. | 230 | /* Marks sectors in [offest, offset_length) as having been initialized. |
263 | * All lengths are step-aligned, where step is min(pagesize, blocksize). | 231 | * All lengths are step-aligned, where step is min(pagesize, blocksize). |
264 | * Notes where partial block is initialized, and helps prepare it for | 232 | * Currently assumes offset is page-aligned |
265 | * complete initialization later. | ||
266 | */ | 233 | */ |
267 | /* Currently assumes offset is page-aligned */ | ||
268 | int bl_mark_sectors_init(struct pnfs_inval_markings *marks, | 234 | int bl_mark_sectors_init(struct pnfs_inval_markings *marks, |
269 | sector_t offset, sector_t length, | 235 | sector_t offset, sector_t length) |
270 | sector_t **pages) | ||
271 | { | 236 | { |
272 | sector_t s, start, end; | 237 | sector_t start, end; |
273 | sector_t *array = NULL; /* Pages to mark */ | ||
274 | 238 | ||
275 | dprintk("%s(offset=%llu,len=%llu) enter\n", | 239 | dprintk("%s(offset=%llu,len=%llu) enter\n", |
276 | __func__, (u64)offset, (u64)length); | 240 | __func__, (u64)offset, (u64)length); |
277 | s = max((sector_t) 3, | ||
278 | 2 * (marks->im_block_size / (PAGE_CACHE_SECTORS))); | ||
279 | dprintk("%s set max=%llu\n", __func__, (u64)s); | ||
280 | if (pages) { | ||
281 | array = kmalloc(s * sizeof(sector_t), GFP_NOFS); | ||
282 | if (!array) | ||
283 | goto outerr; | ||
284 | array[0] = ~0; | ||
285 | } | ||
286 | 241 | ||
287 | start = normalize(offset, marks->im_block_size); | 242 | start = normalize(offset, marks->im_block_size); |
288 | end = normalize_up(offset + length, marks->im_block_size); | 243 | end = normalize_up(offset + length, marks->im_block_size); |
289 | if (_preload_range(&marks->im_tree, start, end - start)) | 244 | if (_preload_range(marks, start, end - start)) |
290 | goto outerr; | 245 | goto outerr; |
291 | 246 | ||
292 | spin_lock(&marks->im_lock); | 247 | spin_lock_bh(&marks->im_lock); |
293 | |||
294 | for (s = normalize_up(start, PAGE_CACHE_SECTORS); | ||
295 | s < offset; s += PAGE_CACHE_SECTORS) { | ||
296 | dprintk("%s pre-area pages\n", __func__); | ||
297 | /* Portion of used block is not initialized */ | ||
298 | if (!_has_tag(&marks->im_tree, s, EXTENT_INITIALIZED)) | ||
299 | set_needs_init(array, s); | ||
300 | } | ||
301 | if (_set_range(&marks->im_tree, EXTENT_INITIALIZED, offset, length)) | 248 | if (_set_range(&marks->im_tree, EXTENT_INITIALIZED, offset, length)) |
302 | goto out_unlock; | 249 | goto out_unlock; |
303 | for (s = normalize_up(offset + length, PAGE_CACHE_SECTORS); | 250 | spin_unlock_bh(&marks->im_lock); |
304 | s < end; s += PAGE_CACHE_SECTORS) { | ||
305 | dprintk("%s post-area pages\n", __func__); | ||
306 | if (!_has_tag(&marks->im_tree, s, EXTENT_INITIALIZED)) | ||
307 | set_needs_init(array, s); | ||
308 | } | ||
309 | 251 | ||
310 | spin_unlock(&marks->im_lock); | ||
311 | |||
312 | if (pages) { | ||
313 | if (array[0] == ~0) { | ||
314 | kfree(array); | ||
315 | *pages = NULL; | ||
316 | } else | ||
317 | *pages = array; | ||
318 | } | ||
319 | return 0; | 252 | return 0; |
320 | 253 | ||
321 | out_unlock: | 254 | out_unlock: |
322 | spin_unlock(&marks->im_lock); | 255 | spin_unlock_bh(&marks->im_lock); |
323 | outerr: | 256 | outerr: |
324 | if (pages) { | ||
325 | kfree(array); | ||
326 | *pages = NULL; | ||
327 | } | ||
328 | return -ENOMEM; | 257 | return -ENOMEM; |
329 | } | 258 | } |
330 | 259 | ||
331 | /* Marks sectors in [offest, offset+length) as having been written to disk. | 260 | /* Marks sectors in [offest, offset+length) as having been written to disk. |
332 | * All lengths should be block aligned. | 261 | * All lengths should be block aligned. |
333 | */ | 262 | */ |
334 | static int mark_written_sectors(struct pnfs_inval_markings *marks, | 263 | static int mark_written_sectors(struct pnfs_inval_markings *marks, |
335 | sector_t offset, sector_t length) | 264 | sector_t offset, sector_t length) |
336 | { | 265 | { |
337 | int status; | 266 | int status; |
338 | 267 | ||
339 | dprintk("%s(offset=%llu,len=%llu) enter\n", __func__, | 268 | dprintk("%s(offset=%llu,len=%llu) enter\n", __func__, |
340 | (u64)offset, (u64)length); | 269 | (u64)offset, (u64)length); |
341 | spin_lock(&marks->im_lock); | 270 | spin_lock_bh(&marks->im_lock); |
342 | status = _set_range(&marks->im_tree, EXTENT_WRITTEN, offset, length); | 271 | status = _set_range(&marks->im_tree, EXTENT_WRITTEN, offset, length); |
343 | spin_unlock(&marks->im_lock); | 272 | spin_unlock_bh(&marks->im_lock); |
344 | return status; | 273 | return status; |
345 | } | 274 | } |
346 | 275 | ||
347 | static void print_short_extent(struct pnfs_block_short_extent *be) | 276 | static void print_short_extent(struct pnfs_block_short_extent *be) |
348 | { | 277 | { |
349 | dprintk("PRINT SHORT EXTENT extent %p\n", be); | 278 | dprintk("PRINT SHORT EXTENT extent %p\n", be); |
350 | if (be) { | 279 | if (be) { |
351 | dprintk(" be_f_offset %llu\n", (u64)be->bse_f_offset); | 280 | dprintk(" be_f_offset %llu\n", (u64)be->bse_f_offset); |
352 | dprintk(" be_length %llu\n", (u64)be->bse_length); | 281 | dprintk(" be_length %llu\n", (u64)be->bse_length); |
353 | } | 282 | } |
354 | } | 283 | } |
355 | 284 | ||
356 | static void print_clist(struct list_head *list, unsigned int count) | 285 | static void print_clist(struct list_head *list, unsigned int count) |
357 | { | 286 | { |
358 | struct pnfs_block_short_extent *be; | 287 | struct pnfs_block_short_extent *be; |
359 | unsigned int i = 0; | 288 | unsigned int i = 0; |
360 | 289 | ||
361 | ifdebug(FACILITY) { | 290 | ifdebug(FACILITY) { |
362 | printk(KERN_DEBUG "****************\n"); | 291 | printk(KERN_DEBUG "****************\n"); |
363 | printk(KERN_DEBUG "Extent list looks like:\n"); | 292 | printk(KERN_DEBUG "Extent list looks like:\n"); |
364 | list_for_each_entry(be, list, bse_node) { | 293 | list_for_each_entry(be, list, bse_node) { |
365 | i++; | 294 | i++; |
366 | print_short_extent(be); | 295 | print_short_extent(be); |
367 | } | 296 | } |
368 | if (i != count) | 297 | if (i != count) |
369 | printk(KERN_DEBUG "\n\nExpected %u entries\n\n\n", count); | 298 | printk(KERN_DEBUG "\n\nExpected %u entries\n\n\n", count); |
370 | printk(KERN_DEBUG "****************\n"); | 299 | printk(KERN_DEBUG "****************\n"); |
371 | } | 300 | } |
372 | } | 301 | } |
373 | 302 | ||
374 | /* Note: In theory, we should do more checking that devid's match between | 303 | /* Note: In theory, we should do more checking that devid's match between |
375 | * old and new, but if they don't, the lists are too corrupt to salvage anyway. | 304 | * old and new, but if they don't, the lists are too corrupt to salvage anyway. |
376 | */ | 305 | */ |
377 | /* Note this is very similar to bl_add_merge_extent */ | 306 | /* Note this is very similar to bl_add_merge_extent */ |
378 | static void add_to_commitlist(struct pnfs_block_layout *bl, | 307 | static void add_to_commitlist(struct pnfs_block_layout *bl, |
379 | struct pnfs_block_short_extent *new) | 308 | struct pnfs_block_short_extent *new) |
380 | { | 309 | { |
381 | struct list_head *clist = &bl->bl_commit; | 310 | struct list_head *clist = &bl->bl_commit; |
382 | struct pnfs_block_short_extent *old, *save; | 311 | struct pnfs_block_short_extent *old, *save; |
383 | sector_t end = new->bse_f_offset + new->bse_length; | 312 | sector_t end = new->bse_f_offset + new->bse_length; |
384 | 313 | ||
385 | dprintk("%s enter\n", __func__); | 314 | dprintk("%s enter\n", __func__); |
386 | print_short_extent(new); | 315 | print_short_extent(new); |
387 | print_clist(clist, bl->bl_count); | 316 | print_clist(clist, bl->bl_count); |
388 | bl->bl_count++; | 317 | bl->bl_count++; |
389 | /* Scan for proper place to insert, extending new to the left | 318 | /* Scan for proper place to insert, extending new to the left |
390 | * as much as possible. | 319 | * as much as possible. |
391 | */ | 320 | */ |
392 | list_for_each_entry_safe(old, save, clist, bse_node) { | 321 | list_for_each_entry_safe(old, save, clist, bse_node) { |
393 | if (new->bse_f_offset < old->bse_f_offset) | 322 | if (new->bse_f_offset < old->bse_f_offset) |
394 | break; | 323 | break; |
395 | if (end <= old->bse_f_offset + old->bse_length) { | 324 | if (end <= old->bse_f_offset + old->bse_length) { |
396 | /* Range is already in list */ | 325 | /* Range is already in list */ |
397 | bl->bl_count--; | 326 | bl->bl_count--; |
398 | kfree(new); | 327 | kfree(new); |
399 | return; | 328 | return; |
400 | } else if (new->bse_f_offset <= | 329 | } else if (new->bse_f_offset <= |
401 | old->bse_f_offset + old->bse_length) { | 330 | old->bse_f_offset + old->bse_length) { |
402 | /* new overlaps or abuts existing be */ | 331 | /* new overlaps or abuts existing be */ |
403 | if (new->bse_mdev == old->bse_mdev) { | 332 | if (new->bse_mdev == old->bse_mdev) { |
404 | /* extend new to fully replace old */ | 333 | /* extend new to fully replace old */ |
405 | new->bse_length += new->bse_f_offset - | 334 | new->bse_length += new->bse_f_offset - |
406 | old->bse_f_offset; | 335 | old->bse_f_offset; |
407 | new->bse_f_offset = old->bse_f_offset; | 336 | new->bse_f_offset = old->bse_f_offset; |
408 | list_del(&old->bse_node); | 337 | list_del(&old->bse_node); |
409 | bl->bl_count--; | 338 | bl->bl_count--; |
410 | kfree(old); | 339 | kfree(old); |
411 | } | 340 | } |
412 | } | 341 | } |
413 | } | 342 | } |
414 | /* Note that if we never hit the above break, old will not point to a | 343 | /* Note that if we never hit the above break, old will not point to a |
415 | * valid extent. However, in that case &old->bse_node==list. | 344 | * valid extent. However, in that case &old->bse_node==list. |
416 | */ | 345 | */ |
417 | list_add_tail(&new->bse_node, &old->bse_node); | 346 | list_add_tail(&new->bse_node, &old->bse_node); |
418 | /* Scan forward for overlaps. If we find any, extend new and | 347 | /* Scan forward for overlaps. If we find any, extend new and |
419 | * remove the overlapped extent. | 348 | * remove the overlapped extent. |
420 | */ | 349 | */ |
421 | old = list_prepare_entry(new, clist, bse_node); | 350 | old = list_prepare_entry(new, clist, bse_node); |
422 | list_for_each_entry_safe_continue(old, save, clist, bse_node) { | 351 | list_for_each_entry_safe_continue(old, save, clist, bse_node) { |
423 | if (end < old->bse_f_offset) | 352 | if (end < old->bse_f_offset) |
424 | break; | 353 | break; |
425 | /* new overlaps or abuts old */ | 354 | /* new overlaps or abuts old */ |
426 | if (new->bse_mdev == old->bse_mdev) { | 355 | if (new->bse_mdev == old->bse_mdev) { |
427 | if (end < old->bse_f_offset + old->bse_length) { | 356 | if (end < old->bse_f_offset + old->bse_length) { |
428 | /* extend new to fully cover old */ | 357 | /* extend new to fully cover old */ |
429 | end = old->bse_f_offset + old->bse_length; | 358 | end = old->bse_f_offset + old->bse_length; |
430 | new->bse_length = end - new->bse_f_offset; | 359 | new->bse_length = end - new->bse_f_offset; |
431 | } | 360 | } |
432 | list_del(&old->bse_node); | 361 | list_del(&old->bse_node); |
433 | bl->bl_count--; | 362 | bl->bl_count--; |
434 | kfree(old); | 363 | kfree(old); |
435 | } | 364 | } |
436 | } | 365 | } |
437 | dprintk("%s: after merging\n", __func__); | 366 | dprintk("%s: after merging\n", __func__); |
438 | print_clist(clist, bl->bl_count); | 367 | print_clist(clist, bl->bl_count); |
439 | } | 368 | } |
440 | 369 | ||
441 | /* Note the range described by offset, length is guaranteed to be contained | 370 | /* Note the range described by offset, length is guaranteed to be contained |
442 | * within be. | 371 | * within be. |
372 | * new will be freed, either by this function or add_to_commitlist if they | ||
373 | * decide not to use it, or after LAYOUTCOMMIT uses it in the commitlist. | ||
443 | */ | 374 | */ |
444 | int bl_mark_for_commit(struct pnfs_block_extent *be, | 375 | int bl_mark_for_commit(struct pnfs_block_extent *be, |
445 | sector_t offset, sector_t length) | 376 | sector_t offset, sector_t length, |
377 | struct pnfs_block_short_extent *new) | ||
446 | { | 378 | { |
447 | sector_t new_end, end = offset + length; | 379 | sector_t new_end, end = offset + length; |
448 | struct pnfs_block_short_extent *new; | ||
449 | struct pnfs_block_layout *bl = container_of(be->be_inval, | 380 | struct pnfs_block_layout *bl = container_of(be->be_inval, |
450 | struct pnfs_block_layout, | 381 | struct pnfs_block_layout, |
451 | bl_inval); | 382 | bl_inval); |
452 | 383 | ||
453 | new = kmalloc(sizeof(*new), GFP_NOFS); | ||
454 | if (!new) | ||
455 | return -ENOMEM; | ||
456 | |||
457 | mark_written_sectors(be->be_inval, offset, length); | 384 | mark_written_sectors(be->be_inval, offset, length); |
458 | /* We want to add the range to commit list, but it must be | 385 | /* We want to add the range to commit list, but it must be |
459 | * block-normalized, and verified that the normalized range has | 386 | * block-normalized, and verified that the normalized range has |
460 | * been entirely written to disk. | 387 | * been entirely written to disk. |
461 | */ | 388 | */ |
462 | new->bse_f_offset = offset; | 389 | new->bse_f_offset = offset; |
463 | offset = normalize(offset, bl->bl_blocksize); | 390 | offset = normalize(offset, bl->bl_blocksize); |
464 | if (offset < new->bse_f_offset) { | 391 | if (offset < new->bse_f_offset) { |
465 | if (is_range_written(be->be_inval, offset, new->bse_f_offset)) | 392 | if (is_range_written(be->be_inval, offset, new->bse_f_offset)) |
466 | new->bse_f_offset = offset; | 393 | new->bse_f_offset = offset; |
467 | else | 394 | else |
468 | new->bse_f_offset = offset + bl->bl_blocksize; | 395 | new->bse_f_offset = offset + bl->bl_blocksize; |
469 | } | 396 | } |
470 | new_end = normalize_up(end, bl->bl_blocksize); | 397 | new_end = normalize_up(end, bl->bl_blocksize); |
471 | if (end < new_end) { | 398 | if (end < new_end) { |
472 | if (is_range_written(be->be_inval, end, new_end)) | 399 | if (is_range_written(be->be_inval, end, new_end)) |
473 | end = new_end; | 400 | end = new_end; |
474 | else | 401 | else |
475 | end = new_end - bl->bl_blocksize; | 402 | end = new_end - bl->bl_blocksize; |
476 | } | 403 | } |
477 | if (end <= new->bse_f_offset) { | 404 | if (end <= new->bse_f_offset) { |
478 | kfree(new); | 405 | kfree(new); |
479 | return 0; | 406 | return 0; |
480 | } | 407 | } |
481 | new->bse_length = end - new->bse_f_offset; | 408 | new->bse_length = end - new->bse_f_offset; |
482 | new->bse_devid = be->be_devid; | 409 | new->bse_devid = be->be_devid; |
483 | new->bse_mdev = be->be_mdev; | 410 | new->bse_mdev = be->be_mdev; |
484 | 411 | ||
485 | spin_lock(&bl->bl_ext_lock); | 412 | spin_lock(&bl->bl_ext_lock); |
486 | /* new will be freed, either by add_to_commitlist if it decides not | ||
487 | * to use it, or after LAYOUTCOMMIT uses it in the commitlist. | ||
488 | */ | ||
489 | add_to_commitlist(bl, new); | 413 | add_to_commitlist(bl, new); |
490 | spin_unlock(&bl->bl_ext_lock); | 414 | spin_unlock(&bl->bl_ext_lock); |
491 | return 0; | 415 | return 0; |
492 | } | 416 | } |
493 | 417 | ||
494 | static void print_bl_extent(struct pnfs_block_extent *be) | 418 | static void print_bl_extent(struct pnfs_block_extent *be) |
495 | { | 419 | { |
496 | dprintk("PRINT EXTENT extent %p\n", be); | 420 | dprintk("PRINT EXTENT extent %p\n", be); |
497 | if (be) { | 421 | if (be) { |
498 | dprintk(" be_f_offset %llu\n", (u64)be->be_f_offset); | 422 | dprintk(" be_f_offset %llu\n", (u64)be->be_f_offset); |
499 | dprintk(" be_length %llu\n", (u64)be->be_length); | 423 | dprintk(" be_length %llu\n", (u64)be->be_length); |
500 | dprintk(" be_v_offset %llu\n", (u64)be->be_v_offset); | 424 | dprintk(" be_v_offset %llu\n", (u64)be->be_v_offset); |
501 | dprintk(" be_state %d\n", be->be_state); | 425 | dprintk(" be_state %d\n", be->be_state); |
502 | } | 426 | } |
503 | } | 427 | } |
504 | 428 | ||
505 | static void | 429 | static void |
506 | destroy_extent(struct kref *kref) | 430 | destroy_extent(struct kref *kref) |
507 | { | 431 | { |
508 | struct pnfs_block_extent *be; | 432 | struct pnfs_block_extent *be; |
509 | 433 | ||
510 | be = container_of(kref, struct pnfs_block_extent, be_refcnt); | 434 | be = container_of(kref, struct pnfs_block_extent, be_refcnt); |
511 | dprintk("%s be=%p\n", __func__, be); | 435 | dprintk("%s be=%p\n", __func__, be); |
512 | kfree(be); | 436 | kfree(be); |
513 | } | 437 | } |
514 | 438 | ||
515 | void | 439 | void |
516 | bl_put_extent(struct pnfs_block_extent *be) | 440 | bl_put_extent(struct pnfs_block_extent *be) |
517 | { | 441 | { |
518 | if (be) { | 442 | if (be) { |
519 | dprintk("%s enter %p (%i)\n", __func__, be, | 443 | dprintk("%s enter %p (%i)\n", __func__, be, |
520 | atomic_read(&be->be_refcnt.refcount)); | 444 | atomic_read(&be->be_refcnt.refcount)); |
521 | kref_put(&be->be_refcnt, destroy_extent); | 445 | kref_put(&be->be_refcnt, destroy_extent); |
522 | } | 446 | } |
523 | } | 447 | } |
524 | 448 | ||
525 | struct pnfs_block_extent *bl_alloc_extent(void) | 449 | struct pnfs_block_extent *bl_alloc_extent(void) |
526 | { | 450 | { |
527 | struct pnfs_block_extent *be; | 451 | struct pnfs_block_extent *be; |
528 | 452 | ||
529 | be = kmalloc(sizeof(struct pnfs_block_extent), GFP_NOFS); | 453 | be = kmalloc(sizeof(struct pnfs_block_extent), GFP_NOFS); |
530 | if (!be) | 454 | if (!be) |
531 | return NULL; | 455 | return NULL; |
532 | INIT_LIST_HEAD(&be->be_node); | 456 | INIT_LIST_HEAD(&be->be_node); |
533 | kref_init(&be->be_refcnt); | 457 | kref_init(&be->be_refcnt); |
534 | be->be_inval = NULL; | 458 | be->be_inval = NULL; |
535 | return be; | 459 | return be; |
536 | } | 460 | } |
537 | 461 | ||
538 | static void print_elist(struct list_head *list) | 462 | static void print_elist(struct list_head *list) |
539 | { | 463 | { |
540 | struct pnfs_block_extent *be; | 464 | struct pnfs_block_extent *be; |
541 | dprintk("****************\n"); | 465 | dprintk("****************\n"); |
542 | dprintk("Extent list looks like:\n"); | 466 | dprintk("Extent list looks like:\n"); |
543 | list_for_each_entry(be, list, be_node) { | 467 | list_for_each_entry(be, list, be_node) { |
544 | print_bl_extent(be); | 468 | print_bl_extent(be); |
545 | } | 469 | } |
546 | dprintk("****************\n"); | 470 | dprintk("****************\n"); |
547 | } | 471 | } |
548 | 472 | ||
549 | static inline int | 473 | static inline int |
550 | extents_consistent(struct pnfs_block_extent *old, struct pnfs_block_extent *new) | 474 | extents_consistent(struct pnfs_block_extent *old, struct pnfs_block_extent *new) |
551 | { | 475 | { |
552 | /* Note this assumes new->be_f_offset >= old->be_f_offset */ | 476 | /* Note this assumes new->be_f_offset >= old->be_f_offset */ |
553 | return (new->be_state == old->be_state) && | 477 | return (new->be_state == old->be_state) && |
554 | ((new->be_state == PNFS_BLOCK_NONE_DATA) || | 478 | ((new->be_state == PNFS_BLOCK_NONE_DATA) || |
555 | ((new->be_v_offset - old->be_v_offset == | 479 | ((new->be_v_offset - old->be_v_offset == |
556 | new->be_f_offset - old->be_f_offset) && | 480 | new->be_f_offset - old->be_f_offset) && |
557 | new->be_mdev == old->be_mdev)); | 481 | new->be_mdev == old->be_mdev)); |
558 | } | 482 | } |
559 | 483 | ||
560 | /* Adds new to appropriate list in bl, modifying new and removing existing | 484 | /* Adds new to appropriate list in bl, modifying new and removing existing |
561 | * extents as appropriate to deal with overlaps. | 485 | * extents as appropriate to deal with overlaps. |
562 | * | 486 | * |
563 | * See bl_find_get_extent for list constraints. | 487 | * See bl_find_get_extent for list constraints. |
564 | * | 488 | * |
565 | * Refcount on new is already set. If end up not using it, or error out, | 489 | * Refcount on new is already set. If end up not using it, or error out, |
566 | * need to put the reference. | 490 | * need to put the reference. |
567 | * | 491 | * |
568 | * bl->bl_ext_lock is held by caller. | 492 | * bl->bl_ext_lock is held by caller. |
569 | */ | 493 | */ |
570 | int | 494 | int |
571 | bl_add_merge_extent(struct pnfs_block_layout *bl, | 495 | bl_add_merge_extent(struct pnfs_block_layout *bl, |
572 | struct pnfs_block_extent *new) | 496 | struct pnfs_block_extent *new) |
573 | { | 497 | { |
574 | struct pnfs_block_extent *be, *tmp; | 498 | struct pnfs_block_extent *be, *tmp; |
575 | sector_t end = new->be_f_offset + new->be_length; | 499 | sector_t end = new->be_f_offset + new->be_length; |
576 | struct list_head *list; | 500 | struct list_head *list; |
577 | 501 | ||
578 | dprintk("%s enter with be=%p\n", __func__, new); | 502 | dprintk("%s enter with be=%p\n", __func__, new); |
579 | print_bl_extent(new); | 503 | print_bl_extent(new); |
580 | list = &bl->bl_extents[bl_choose_list(new->be_state)]; | 504 | list = &bl->bl_extents[bl_choose_list(new->be_state)]; |
581 | print_elist(list); | 505 | print_elist(list); |
582 | 506 | ||
583 | /* Scan for proper place to insert, extending new to the left | 507 | /* Scan for proper place to insert, extending new to the left |
584 | * as much as possible. | 508 | * as much as possible. |
585 | */ | 509 | */ |
586 | list_for_each_entry_safe_reverse(be, tmp, list, be_node) { | 510 | list_for_each_entry_safe_reverse(be, tmp, list, be_node) { |
587 | if (new->be_f_offset >= be->be_f_offset + be->be_length) | 511 | if (new->be_f_offset >= be->be_f_offset + be->be_length) |
588 | break; | 512 | break; |
589 | if (new->be_f_offset >= be->be_f_offset) { | 513 | if (new->be_f_offset >= be->be_f_offset) { |
590 | if (end <= be->be_f_offset + be->be_length) { | 514 | if (end <= be->be_f_offset + be->be_length) { |
591 | /* new is a subset of existing be*/ | 515 | /* new is a subset of existing be*/ |
592 | if (extents_consistent(be, new)) { | 516 | if (extents_consistent(be, new)) { |
593 | dprintk("%s: new is subset, ignoring\n", | 517 | dprintk("%s: new is subset, ignoring\n", |
594 | __func__); | 518 | __func__); |
595 | bl_put_extent(new); | 519 | bl_put_extent(new); |
596 | return 0; | 520 | return 0; |
597 | } else { | 521 | } else { |
598 | goto out_err; | 522 | goto out_err; |
599 | } | 523 | } |
600 | } else { | 524 | } else { |
601 | /* |<-- be -->| | 525 | /* |<-- be -->| |
602 | * |<-- new -->| */ | 526 | * |<-- new -->| */ |
603 | if (extents_consistent(be, new)) { | 527 | if (extents_consistent(be, new)) { |
604 | /* extend new to fully replace be */ | 528 | /* extend new to fully replace be */ |
605 | new->be_length += new->be_f_offset - | 529 | new->be_length += new->be_f_offset - |
606 | be->be_f_offset; | 530 | be->be_f_offset; |
607 | new->be_f_offset = be->be_f_offset; | 531 | new->be_f_offset = be->be_f_offset; |
608 | new->be_v_offset = be->be_v_offset; | 532 | new->be_v_offset = be->be_v_offset; |
609 | dprintk("%s: removing %p\n", __func__, be); | 533 | dprintk("%s: removing %p\n", __func__, be); |
610 | list_del(&be->be_node); | 534 | list_del(&be->be_node); |
611 | bl_put_extent(be); | 535 | bl_put_extent(be); |
612 | } else { | 536 | } else { |
613 | goto out_err; | 537 | goto out_err; |
614 | } | 538 | } |
615 | } | 539 | } |
616 | } else if (end >= be->be_f_offset + be->be_length) { | 540 | } else if (end >= be->be_f_offset + be->be_length) { |
617 | /* new extent overlap existing be */ | 541 | /* new extent overlap existing be */ |
618 | if (extents_consistent(be, new)) { | 542 | if (extents_consistent(be, new)) { |
619 | /* extend new to fully replace be */ | 543 | /* extend new to fully replace be */ |
620 | dprintk("%s: removing %p\n", __func__, be); | 544 | dprintk("%s: removing %p\n", __func__, be); |
621 | list_del(&be->be_node); | 545 | list_del(&be->be_node); |
622 | bl_put_extent(be); | 546 | bl_put_extent(be); |
623 | } else { | 547 | } else { |
624 | goto out_err; | 548 | goto out_err; |
625 | } | 549 | } |
626 | } else if (end > be->be_f_offset) { | 550 | } else if (end > be->be_f_offset) { |
627 | /* |<-- be -->| | 551 | /* |<-- be -->| |
628 | *|<-- new -->| */ | 552 | *|<-- new -->| */ |
629 | if (extents_consistent(new, be)) { | 553 | if (extents_consistent(new, be)) { |
630 | /* extend new to fully replace be */ | 554 | /* extend new to fully replace be */ |
631 | new->be_length += be->be_f_offset + be->be_length - | 555 | new->be_length += be->be_f_offset + be->be_length - |
632 | new->be_f_offset - new->be_length; | 556 | new->be_f_offset - new->be_length; |
633 | dprintk("%s: removing %p\n", __func__, be); | 557 | dprintk("%s: removing %p\n", __func__, be); |
634 | list_del(&be->be_node); | 558 | list_del(&be->be_node); |
635 | bl_put_extent(be); | 559 | bl_put_extent(be); |
636 | } else { | 560 | } else { |
637 | goto out_err; | 561 | goto out_err; |
638 | } | 562 | } |
639 | } | 563 | } |
640 | } | 564 | } |
641 | /* Note that if we never hit the above break, be will not point to a | 565 | /* Note that if we never hit the above break, be will not point to a |
642 | * valid extent. However, in that case &be->be_node==list. | 566 | * valid extent. However, in that case &be->be_node==list. |
643 | */ | 567 | */ |
644 | list_add(&new->be_node, &be->be_node); | 568 | list_add(&new->be_node, &be->be_node); |
645 | dprintk("%s: inserting new\n", __func__); | 569 | dprintk("%s: inserting new\n", __func__); |
646 | print_elist(list); | 570 | print_elist(list); |
647 | /* FIXME - The per-list consistency checks have all been done, | 571 | /* FIXME - The per-list consistency checks have all been done, |
648 | * should now check cross-list consistency. | 572 | * should now check cross-list consistency. |
649 | */ | 573 | */ |
650 | return 0; | 574 | return 0; |
651 | 575 | ||
652 | out_err: | 576 | out_err: |
653 | bl_put_extent(new); | 577 | bl_put_extent(new); |
654 | return -EIO; | 578 | return -EIO; |
655 | } | 579 | } |
656 | 580 | ||
657 | /* Returns extent, or NULL. If a second READ extent exists, it is returned | 581 | /* Returns extent, or NULL. If a second READ extent exists, it is returned |
658 | * in cow_read, if given. | 582 | * in cow_read, if given. |
659 | * | 583 | * |
660 | * The extents are kept in two seperate ordered lists, one for READ and NONE, | 584 | * The extents are kept in two seperate ordered lists, one for READ and NONE, |
661 | * one for READWRITE and INVALID. Within each list, we assume: | 585 | * one for READWRITE and INVALID. Within each list, we assume: |
662 | * 1. Extents are ordered by file offset. | 586 | * 1. Extents are ordered by file offset. |
663 | * 2. For any given isect, there is at most one extents that matches. | 587 | * 2. For any given isect, there is at most one extents that matches. |
664 | */ | 588 | */ |
665 | struct pnfs_block_extent * | 589 | struct pnfs_block_extent * |
666 | bl_find_get_extent(struct pnfs_block_layout *bl, sector_t isect, | 590 | bl_find_get_extent(struct pnfs_block_layout *bl, sector_t isect, |
667 | struct pnfs_block_extent **cow_read) | 591 | struct pnfs_block_extent **cow_read) |
668 | { | 592 | { |
669 | struct pnfs_block_extent *be, *cow, *ret; | 593 | struct pnfs_block_extent *be, *cow, *ret; |
670 | int i; | 594 | int i; |
671 | 595 | ||
672 | dprintk("%s enter with isect %llu\n", __func__, (u64)isect); | 596 | dprintk("%s enter with isect %llu\n", __func__, (u64)isect); |
673 | cow = ret = NULL; | 597 | cow = ret = NULL; |
674 | spin_lock(&bl->bl_ext_lock); | 598 | spin_lock(&bl->bl_ext_lock); |
675 | for (i = 0; i < EXTENT_LISTS; i++) { | 599 | for (i = 0; i < EXTENT_LISTS; i++) { |
676 | list_for_each_entry_reverse(be, &bl->bl_extents[i], be_node) { | 600 | list_for_each_entry_reverse(be, &bl->bl_extents[i], be_node) { |
677 | if (isect >= be->be_f_offset + be->be_length) | 601 | if (isect >= be->be_f_offset + be->be_length) |
678 | break; | 602 | break; |
679 | if (isect >= be->be_f_offset) { | 603 | if (isect >= be->be_f_offset) { |
680 | /* We have found an extent */ | 604 | /* We have found an extent */ |
681 | dprintk("%s Get %p (%i)\n", __func__, be, | 605 | dprintk("%s Get %p (%i)\n", __func__, be, |
682 | atomic_read(&be->be_refcnt.refcount)); | 606 | atomic_read(&be->be_refcnt.refcount)); |
683 | kref_get(&be->be_refcnt); | 607 | kref_get(&be->be_refcnt); |
684 | if (!ret) | 608 | if (!ret) |
685 | ret = be; | 609 | ret = be; |
686 | else if (be->be_state != PNFS_BLOCK_READ_DATA) | 610 | else if (be->be_state != PNFS_BLOCK_READ_DATA) |
687 | bl_put_extent(be); | 611 | bl_put_extent(be); |
688 | else | 612 | else |
689 | cow = be; | 613 | cow = be; |
690 | break; | 614 | break; |
691 | } | 615 | } |
692 | } | 616 | } |
693 | if (ret && | 617 | if (ret && |
694 | (!cow_read || ret->be_state != PNFS_BLOCK_INVALID_DATA)) | 618 | (!cow_read || ret->be_state != PNFS_BLOCK_INVALID_DATA)) |
695 | break; | 619 | break; |
696 | } | 620 | } |
697 | spin_unlock(&bl->bl_ext_lock); | 621 | spin_unlock(&bl->bl_ext_lock); |
698 | if (cow_read) | 622 | if (cow_read) |
699 | *cow_read = cow; | 623 | *cow_read = cow; |
700 | print_bl_extent(ret); | 624 | print_bl_extent(ret); |
701 | return ret; | 625 | return ret; |
702 | } | 626 | } |
703 | 627 | ||
704 | /* Similar to bl_find_get_extent, but called with lock held, and ignores cow */ | 628 | /* Similar to bl_find_get_extent, but called with lock held, and ignores cow */ |
705 | static struct pnfs_block_extent * | 629 | static struct pnfs_block_extent * |
706 | bl_find_get_extent_locked(struct pnfs_block_layout *bl, sector_t isect) | 630 | bl_find_get_extent_locked(struct pnfs_block_layout *bl, sector_t isect) |
707 | { | 631 | { |
708 | struct pnfs_block_extent *be, *ret = NULL; | 632 | struct pnfs_block_extent *be, *ret = NULL; |
709 | int i; | 633 | int i; |
710 | 634 | ||
711 | dprintk("%s enter with isect %llu\n", __func__, (u64)isect); | 635 | dprintk("%s enter with isect %llu\n", __func__, (u64)isect); |
712 | for (i = 0; i < EXTENT_LISTS; i++) { | 636 | for (i = 0; i < EXTENT_LISTS; i++) { |
713 | if (ret) | 637 | if (ret) |
714 | break; | 638 | break; |
715 | list_for_each_entry_reverse(be, &bl->bl_extents[i], be_node) { | 639 | list_for_each_entry_reverse(be, &bl->bl_extents[i], be_node) { |
716 | if (isect >= be->be_f_offset + be->be_length) | 640 | if (isect >= be->be_f_offset + be->be_length) |
717 | break; | 641 | break; |
718 | if (isect >= be->be_f_offset) { | 642 | if (isect >= be->be_f_offset) { |
719 | /* We have found an extent */ | 643 | /* We have found an extent */ |
720 | dprintk("%s Get %p (%i)\n", __func__, be, | 644 | dprintk("%s Get %p (%i)\n", __func__, be, |
721 | atomic_read(&be->be_refcnt.refcount)); | 645 | atomic_read(&be->be_refcnt.refcount)); |
722 | kref_get(&be->be_refcnt); | 646 | kref_get(&be->be_refcnt); |
723 | ret = be; | 647 | ret = be; |
724 | break; | 648 | break; |
725 | } | 649 | } |
726 | } | 650 | } |
727 | } | 651 | } |
728 | print_bl_extent(ret); | 652 | print_bl_extent(ret); |
729 | return ret; | 653 | return ret; |
730 | } | 654 | } |
731 | 655 | ||
732 | int | 656 | int |
733 | encode_pnfs_block_layoutupdate(struct pnfs_block_layout *bl, | 657 | encode_pnfs_block_layoutupdate(struct pnfs_block_layout *bl, |
734 | struct xdr_stream *xdr, | 658 | struct xdr_stream *xdr, |
735 | const struct nfs4_layoutcommit_args *arg) | 659 | const struct nfs4_layoutcommit_args *arg) |
736 | { | 660 | { |
737 | struct pnfs_block_short_extent *lce, *save; | 661 | struct pnfs_block_short_extent *lce, *save; |
738 | unsigned int count = 0; | 662 | unsigned int count = 0; |
739 | __be32 *p, *xdr_start; | 663 | __be32 *p, *xdr_start; |
740 | 664 | ||
741 | dprintk("%s enter\n", __func__); | 665 | dprintk("%s enter\n", __func__); |
742 | /* BUG - creation of bl_commit is buggy - need to wait for | 666 | /* BUG - creation of bl_commit is buggy - need to wait for |
743 | * entire block to be marked WRITTEN before it can be added. | 667 | * entire block to be marked WRITTEN before it can be added. |
744 | */ | 668 | */ |
745 | spin_lock(&bl->bl_ext_lock); | 669 | spin_lock(&bl->bl_ext_lock); |
746 | /* Want to adjust for possible truncate */ | 670 | /* Want to adjust for possible truncate */ |
747 | /* We now want to adjust argument range */ | 671 | /* We now want to adjust argument range */ |
748 | 672 | ||
749 | /* XDR encode the ranges found */ | 673 | /* XDR encode the ranges found */ |
750 | xdr_start = xdr_reserve_space(xdr, 8); | 674 | xdr_start = xdr_reserve_space(xdr, 8); |
751 | if (!xdr_start) | 675 | if (!xdr_start) |
752 | goto out; | 676 | goto out; |
753 | list_for_each_entry_safe(lce, save, &bl->bl_commit, bse_node) { | 677 | list_for_each_entry_safe(lce, save, &bl->bl_commit, bse_node) { |
754 | p = xdr_reserve_space(xdr, 7 * 4 + sizeof(lce->bse_devid.data)); | 678 | p = xdr_reserve_space(xdr, 7 * 4 + sizeof(lce->bse_devid.data)); |
755 | if (!p) | 679 | if (!p) |
756 | break; | 680 | break; |
757 | p = xdr_encode_opaque_fixed(p, lce->bse_devid.data, NFS4_DEVICEID4_SIZE); | 681 | p = xdr_encode_opaque_fixed(p, lce->bse_devid.data, NFS4_DEVICEID4_SIZE); |
758 | p = xdr_encode_hyper(p, lce->bse_f_offset << SECTOR_SHIFT); | 682 | p = xdr_encode_hyper(p, lce->bse_f_offset << SECTOR_SHIFT); |
759 | p = xdr_encode_hyper(p, lce->bse_length << SECTOR_SHIFT); | 683 | p = xdr_encode_hyper(p, lce->bse_length << SECTOR_SHIFT); |
760 | p = xdr_encode_hyper(p, 0LL); | 684 | p = xdr_encode_hyper(p, 0LL); |
761 | *p++ = cpu_to_be32(PNFS_BLOCK_READWRITE_DATA); | 685 | *p++ = cpu_to_be32(PNFS_BLOCK_READWRITE_DATA); |
762 | list_del(&lce->bse_node); | 686 | list_del(&lce->bse_node); |
763 | list_add_tail(&lce->bse_node, &bl->bl_committing); | 687 | list_add_tail(&lce->bse_node, &bl->bl_committing); |
764 | bl->bl_count--; | 688 | bl->bl_count--; |
765 | count++; | 689 | count++; |
766 | } | 690 | } |
767 | xdr_start[0] = cpu_to_be32((xdr->p - xdr_start - 1) * 4); | 691 | xdr_start[0] = cpu_to_be32((xdr->p - xdr_start - 1) * 4); |
768 | xdr_start[1] = cpu_to_be32(count); | 692 | xdr_start[1] = cpu_to_be32(count); |
769 | out: | 693 | out: |
770 | spin_unlock(&bl->bl_ext_lock); | 694 | spin_unlock(&bl->bl_ext_lock); |
771 | dprintk("%s found %i ranges\n", __func__, count); | 695 | dprintk("%s found %i ranges\n", __func__, count); |
772 | return 0; | 696 | return 0; |
773 | } | 697 | } |
774 | 698 | ||
775 | /* Helper function to set_to_rw that initialize a new extent */ | 699 | /* Helper function to set_to_rw that initialize a new extent */ |
776 | static void | 700 | static void |
777 | _prep_new_extent(struct pnfs_block_extent *new, | 701 | _prep_new_extent(struct pnfs_block_extent *new, |
778 | struct pnfs_block_extent *orig, | 702 | struct pnfs_block_extent *orig, |
779 | sector_t offset, sector_t length, int state) | 703 | sector_t offset, sector_t length, int state) |
780 | { | 704 | { |
781 | kref_init(&new->be_refcnt); | 705 | kref_init(&new->be_refcnt); |
782 | /* don't need to INIT_LIST_HEAD(&new->be_node) */ | 706 | /* don't need to INIT_LIST_HEAD(&new->be_node) */ |
783 | memcpy(&new->be_devid, &orig->be_devid, sizeof(struct nfs4_deviceid)); | 707 | memcpy(&new->be_devid, &orig->be_devid, sizeof(struct nfs4_deviceid)); |
784 | new->be_mdev = orig->be_mdev; | 708 | new->be_mdev = orig->be_mdev; |
785 | new->be_f_offset = offset; | 709 | new->be_f_offset = offset; |
786 | new->be_length = length; | 710 | new->be_length = length; |
787 | new->be_v_offset = orig->be_v_offset - orig->be_f_offset + offset; | 711 | new->be_v_offset = orig->be_v_offset - orig->be_f_offset + offset; |
788 | new->be_state = state; | 712 | new->be_state = state; |
789 | new->be_inval = orig->be_inval; | 713 | new->be_inval = orig->be_inval; |
790 | } | 714 | } |
791 | 715 | ||
792 | /* Tries to merge be with extent in front of it in list. | 716 | /* Tries to merge be with extent in front of it in list. |
793 | * Frees storage if not used. | 717 | * Frees storage if not used. |
794 | */ | 718 | */ |
795 | static struct pnfs_block_extent * | 719 | static struct pnfs_block_extent * |
796 | _front_merge(struct pnfs_block_extent *be, struct list_head *head, | 720 | _front_merge(struct pnfs_block_extent *be, struct list_head *head, |
797 | struct pnfs_block_extent *storage) | 721 | struct pnfs_block_extent *storage) |
798 | { | 722 | { |
799 | struct pnfs_block_extent *prev; | 723 | struct pnfs_block_extent *prev; |
800 | 724 | ||
801 | if (!storage) | 725 | if (!storage) |
802 | goto no_merge; | 726 | goto no_merge; |
803 | if (&be->be_node == head || be->be_node.prev == head) | 727 | if (&be->be_node == head || be->be_node.prev == head) |
804 | goto no_merge; | 728 | goto no_merge; |
805 | prev = list_entry(be->be_node.prev, struct pnfs_block_extent, be_node); | 729 | prev = list_entry(be->be_node.prev, struct pnfs_block_extent, be_node); |
806 | if ((prev->be_f_offset + prev->be_length != be->be_f_offset) || | 730 | if ((prev->be_f_offset + prev->be_length != be->be_f_offset) || |
807 | !extents_consistent(prev, be)) | 731 | !extents_consistent(prev, be)) |
808 | goto no_merge; | 732 | goto no_merge; |
809 | _prep_new_extent(storage, prev, prev->be_f_offset, | 733 | _prep_new_extent(storage, prev, prev->be_f_offset, |
810 | prev->be_length + be->be_length, prev->be_state); | 734 | prev->be_length + be->be_length, prev->be_state); |
811 | list_replace(&prev->be_node, &storage->be_node); | 735 | list_replace(&prev->be_node, &storage->be_node); |
812 | bl_put_extent(prev); | 736 | bl_put_extent(prev); |
813 | list_del(&be->be_node); | 737 | list_del(&be->be_node); |
814 | bl_put_extent(be); | 738 | bl_put_extent(be); |
815 | return storage; | 739 | return storage; |
816 | 740 | ||
817 | no_merge: | 741 | no_merge: |
818 | kfree(storage); | 742 | kfree(storage); |
819 | return be; | 743 | return be; |
820 | } | 744 | } |
821 | 745 | ||
822 | static u64 | 746 | static u64 |
823 | set_to_rw(struct pnfs_block_layout *bl, u64 offset, u64 length) | 747 | set_to_rw(struct pnfs_block_layout *bl, u64 offset, u64 length) |
824 | { | 748 | { |
825 | u64 rv = offset + length; | 749 | u64 rv = offset + length; |
826 | struct pnfs_block_extent *be, *e1, *e2, *e3, *new, *old; | 750 | struct pnfs_block_extent *be, *e1, *e2, *e3, *new, *old; |
827 | struct pnfs_block_extent *children[3]; | 751 | struct pnfs_block_extent *children[3]; |
828 | struct pnfs_block_extent *merge1 = NULL, *merge2 = NULL; | 752 | struct pnfs_block_extent *merge1 = NULL, *merge2 = NULL; |
829 | int i = 0, j; | 753 | int i = 0, j; |
830 | 754 | ||
831 | dprintk("%s(%llu, %llu)\n", __func__, offset, length); | 755 | dprintk("%s(%llu, %llu)\n", __func__, offset, length); |
832 | /* Create storage for up to three new extents e1, e2, e3 */ | 756 | /* Create storage for up to three new extents e1, e2, e3 */ |
833 | e1 = kmalloc(sizeof(*e1), GFP_ATOMIC); | 757 | e1 = kmalloc(sizeof(*e1), GFP_ATOMIC); |
834 | e2 = kmalloc(sizeof(*e2), GFP_ATOMIC); | 758 | e2 = kmalloc(sizeof(*e2), GFP_ATOMIC); |
835 | e3 = kmalloc(sizeof(*e3), GFP_ATOMIC); | 759 | e3 = kmalloc(sizeof(*e3), GFP_ATOMIC); |
836 | /* BUG - we are ignoring any failure */ | 760 | /* BUG - we are ignoring any failure */ |
837 | if (!e1 || !e2 || !e3) | 761 | if (!e1 || !e2 || !e3) |
838 | goto out_nosplit; | 762 | goto out_nosplit; |
839 | 763 | ||
840 | spin_lock(&bl->bl_ext_lock); | 764 | spin_lock(&bl->bl_ext_lock); |
841 | be = bl_find_get_extent_locked(bl, offset); | 765 | be = bl_find_get_extent_locked(bl, offset); |
842 | rv = be->be_f_offset + be->be_length; | 766 | rv = be->be_f_offset + be->be_length; |
843 | if (be->be_state != PNFS_BLOCK_INVALID_DATA) { | 767 | if (be->be_state != PNFS_BLOCK_INVALID_DATA) { |
844 | spin_unlock(&bl->bl_ext_lock); | 768 | spin_unlock(&bl->bl_ext_lock); |
845 | goto out_nosplit; | 769 | goto out_nosplit; |
846 | } | 770 | } |
847 | /* Add e* to children, bumping e*'s krefs */ | 771 | /* Add e* to children, bumping e*'s krefs */ |
848 | if (be->be_f_offset != offset) { | 772 | if (be->be_f_offset != offset) { |
849 | _prep_new_extent(e1, be, be->be_f_offset, | 773 | _prep_new_extent(e1, be, be->be_f_offset, |
850 | offset - be->be_f_offset, | 774 | offset - be->be_f_offset, |
851 | PNFS_BLOCK_INVALID_DATA); | 775 | PNFS_BLOCK_INVALID_DATA); |
852 | children[i++] = e1; | 776 | children[i++] = e1; |
853 | print_bl_extent(e1); | 777 | print_bl_extent(e1); |
854 | } else | 778 | } else |
855 | merge1 = e1; | 779 | merge1 = e1; |
856 | _prep_new_extent(e2, be, offset, | 780 | _prep_new_extent(e2, be, offset, |
857 | min(length, be->be_f_offset + be->be_length - offset), | 781 | min(length, be->be_f_offset + be->be_length - offset), |
858 | PNFS_BLOCK_READWRITE_DATA); | 782 | PNFS_BLOCK_READWRITE_DATA); |
859 | children[i++] = e2; | 783 | children[i++] = e2; |
860 | print_bl_extent(e2); | 784 | print_bl_extent(e2); |
861 | if (offset + length < be->be_f_offset + be->be_length) { | 785 | if (offset + length < be->be_f_offset + be->be_length) { |
862 | _prep_new_extent(e3, be, e2->be_f_offset + e2->be_length, | 786 | _prep_new_extent(e3, be, e2->be_f_offset + e2->be_length, |
863 | be->be_f_offset + be->be_length - | 787 | be->be_f_offset + be->be_length - |
864 | offset - length, | 788 | offset - length, |
865 | PNFS_BLOCK_INVALID_DATA); | 789 | PNFS_BLOCK_INVALID_DATA); |
866 | children[i++] = e3; | 790 | children[i++] = e3; |
867 | print_bl_extent(e3); | 791 | print_bl_extent(e3); |
868 | } else | 792 | } else |
869 | merge2 = e3; | 793 | merge2 = e3; |
870 | 794 | ||
871 | /* Remove be from list, and insert the e* */ | 795 | /* Remove be from list, and insert the e* */ |
872 | /* We don't get refs on e*, since this list is the base reference | 796 | /* We don't get refs on e*, since this list is the base reference |
873 | * set when init'ed. | 797 | * set when init'ed. |
874 | */ | 798 | */ |
875 | if (i < 3) | 799 | if (i < 3) |
876 | children[i] = NULL; | 800 | children[i] = NULL; |
877 | new = children[0]; | 801 | new = children[0]; |
878 | list_replace(&be->be_node, &new->be_node); | 802 | list_replace(&be->be_node, &new->be_node); |
879 | bl_put_extent(be); | 803 | bl_put_extent(be); |
880 | new = _front_merge(new, &bl->bl_extents[RW_EXTENT], merge1); | 804 | new = _front_merge(new, &bl->bl_extents[RW_EXTENT], merge1); |
881 | for (j = 1; j < i; j++) { | 805 | for (j = 1; j < i; j++) { |
882 | old = new; | 806 | old = new; |
883 | new = children[j]; | 807 | new = children[j]; |
884 | list_add(&new->be_node, &old->be_node); | 808 | list_add(&new->be_node, &old->be_node); |
885 | } | 809 | } |
886 | if (merge2) { | 810 | if (merge2) { |
887 | /* This is a HACK, should just create a _back_merge function */ | 811 | /* This is a HACK, should just create a _back_merge function */ |
888 | new = list_entry(new->be_node.next, | 812 | new = list_entry(new->be_node.next, |
889 | struct pnfs_block_extent, be_node); | 813 | struct pnfs_block_extent, be_node); |
890 | new = _front_merge(new, &bl->bl_extents[RW_EXTENT], merge2); | 814 | new = _front_merge(new, &bl->bl_extents[RW_EXTENT], merge2); |
891 | } | 815 | } |
892 | spin_unlock(&bl->bl_ext_lock); | 816 | spin_unlock(&bl->bl_ext_lock); |
893 | 817 | ||
894 | /* Since we removed the base reference above, be is now scheduled for | 818 | /* Since we removed the base reference above, be is now scheduled for |
895 | * destruction. | 819 | * destruction. |
896 | */ | 820 | */ |
897 | bl_put_extent(be); | 821 | bl_put_extent(be); |
898 | dprintk("%s returns %llu after split\n", __func__, rv); | 822 | dprintk("%s returns %llu after split\n", __func__, rv); |
899 | return rv; | 823 | return rv; |
900 | 824 | ||
901 | out_nosplit: | 825 | out_nosplit: |
902 | kfree(e1); | 826 | kfree(e1); |
903 | kfree(e2); | 827 | kfree(e2); |
904 | kfree(e3); | 828 | kfree(e3); |
905 | dprintk("%s returns %llu without splitting\n", __func__, rv); | 829 | dprintk("%s returns %llu without splitting\n", __func__, rv); |
906 | return rv; | 830 | return rv; |
907 | } | 831 | } |
908 | 832 | ||
909 | void | 833 | void |
910 | clean_pnfs_block_layoutupdate(struct pnfs_block_layout *bl, | 834 | clean_pnfs_block_layoutupdate(struct pnfs_block_layout *bl, |
911 | const struct nfs4_layoutcommit_args *arg, | 835 | const struct nfs4_layoutcommit_args *arg, |
912 | int status) | 836 | int status) |
913 | { | 837 | { |
914 | struct pnfs_block_short_extent *lce, *save; | 838 | struct pnfs_block_short_extent *lce, *save; |
915 | 839 | ||
916 | dprintk("%s status %d\n", __func__, status); | 840 | dprintk("%s status %d\n", __func__, status); |
917 | list_for_each_entry_safe(lce, save, &bl->bl_committing, bse_node) { | 841 | list_for_each_entry_safe(lce, save, &bl->bl_committing, bse_node) { |
918 | if (likely(!status)) { | 842 | if (likely(!status)) { |
919 | u64 offset = lce->bse_f_offset; | 843 | u64 offset = lce->bse_f_offset; |
920 | u64 end = offset + lce->bse_length; | 844 | u64 end = offset + lce->bse_length; |
921 | 845 | ||
922 | do { | 846 | do { |
923 | offset = set_to_rw(bl, offset, end - offset); | 847 | offset = set_to_rw(bl, offset, end - offset); |
924 | } while (offset < end); | 848 | } while (offset < end); |
925 | list_del(&lce->bse_node); | 849 | list_del(&lce->bse_node); |
926 | 850 | ||
927 | kfree(lce); | 851 | kfree(lce); |
928 | } else { | 852 | } else { |
929 | list_del(&lce->bse_node); | 853 | list_del(&lce->bse_node); |
930 | spin_lock(&bl->bl_ext_lock); | 854 | spin_lock(&bl->bl_ext_lock); |
fs/nfs/callback.h
1 | /* | 1 | /* |
2 | * linux/fs/nfs/callback.h | 2 | * linux/fs/nfs/callback.h |
3 | * | 3 | * |
4 | * Copyright (C) 2004 Trond Myklebust | 4 | * Copyright (C) 2004 Trond Myklebust |
5 | * | 5 | * |
6 | * NFSv4 callback definitions | 6 | * NFSv4 callback definitions |
7 | */ | 7 | */ |
8 | #ifndef __LINUX_FS_NFS_CALLBACK_H | 8 | #ifndef __LINUX_FS_NFS_CALLBACK_H |
9 | #define __LINUX_FS_NFS_CALLBACK_H | 9 | #define __LINUX_FS_NFS_CALLBACK_H |
10 | #include <linux/sunrpc/svc.h> | 10 | #include <linux/sunrpc/svc.h> |
11 | 11 | ||
12 | #define NFS4_CALLBACK 0x40000000 | 12 | #define NFS4_CALLBACK 0x40000000 |
13 | #define NFS4_CALLBACK_XDRSIZE 2048 | 13 | #define NFS4_CALLBACK_XDRSIZE 2048 |
14 | #define NFS4_CALLBACK_BUFSIZE (1024 + NFS4_CALLBACK_XDRSIZE) | 14 | #define NFS4_CALLBACK_BUFSIZE (1024 + NFS4_CALLBACK_XDRSIZE) |
15 | 15 | ||
16 | enum nfs4_callback_procnum { | 16 | enum nfs4_callback_procnum { |
17 | CB_NULL = 0, | 17 | CB_NULL = 0, |
18 | CB_COMPOUND = 1, | 18 | CB_COMPOUND = 1, |
19 | }; | 19 | }; |
20 | 20 | ||
21 | enum nfs4_callback_opnum { | 21 | enum nfs4_callback_opnum { |
22 | OP_CB_GETATTR = 3, | 22 | OP_CB_GETATTR = 3, |
23 | OP_CB_RECALL = 4, | 23 | OP_CB_RECALL = 4, |
24 | /* Callback operations new to NFSv4.1 */ | 24 | /* Callback operations new to NFSv4.1 */ |
25 | OP_CB_LAYOUTRECALL = 5, | 25 | OP_CB_LAYOUTRECALL = 5, |
26 | OP_CB_NOTIFY = 6, | 26 | OP_CB_NOTIFY = 6, |
27 | OP_CB_PUSH_DELEG = 7, | 27 | OP_CB_PUSH_DELEG = 7, |
28 | OP_CB_RECALL_ANY = 8, | 28 | OP_CB_RECALL_ANY = 8, |
29 | OP_CB_RECALLABLE_OBJ_AVAIL = 9, | 29 | OP_CB_RECALLABLE_OBJ_AVAIL = 9, |
30 | OP_CB_RECALL_SLOT = 10, | 30 | OP_CB_RECALL_SLOT = 10, |
31 | OP_CB_SEQUENCE = 11, | 31 | OP_CB_SEQUENCE = 11, |
32 | OP_CB_WANTS_CANCELLED = 12, | 32 | OP_CB_WANTS_CANCELLED = 12, |
33 | OP_CB_NOTIFY_LOCK = 13, | 33 | OP_CB_NOTIFY_LOCK = 13, |
34 | OP_CB_NOTIFY_DEVICEID = 14, | 34 | OP_CB_NOTIFY_DEVICEID = 14, |
35 | OP_CB_ILLEGAL = 10044, | 35 | OP_CB_ILLEGAL = 10044, |
36 | }; | 36 | }; |
37 | 37 | ||
38 | struct cb_process_state { | 38 | struct cb_process_state { |
39 | __be32 drc_status; | 39 | __be32 drc_status; |
40 | struct nfs_client *clp; | 40 | struct nfs_client *clp; |
41 | int slotid; | 41 | int slotid; |
42 | }; | 42 | }; |
43 | 43 | ||
44 | struct cb_compound_hdr_arg { | 44 | struct cb_compound_hdr_arg { |
45 | unsigned int taglen; | 45 | unsigned int taglen; |
46 | const char *tag; | 46 | const char *tag; |
47 | unsigned int minorversion; | 47 | unsigned int minorversion; |
48 | unsigned int cb_ident; /* v4.0 callback identifier */ | 48 | unsigned int cb_ident; /* v4.0 callback identifier */ |
49 | unsigned nops; | 49 | unsigned nops; |
50 | }; | 50 | }; |
51 | 51 | ||
52 | struct cb_compound_hdr_res { | 52 | struct cb_compound_hdr_res { |
53 | __be32 *status; | 53 | __be32 *status; |
54 | unsigned int taglen; | 54 | unsigned int taglen; |
55 | const char *tag; | 55 | const char *tag; |
56 | __be32 *nops; | 56 | __be32 *nops; |
57 | }; | 57 | }; |
58 | 58 | ||
59 | struct cb_getattrargs { | 59 | struct cb_getattrargs { |
60 | struct sockaddr *addr; | 60 | struct sockaddr *addr; |
61 | struct nfs_fh fh; | 61 | struct nfs_fh fh; |
62 | uint32_t bitmap[2]; | 62 | uint32_t bitmap[2]; |
63 | }; | 63 | }; |
64 | 64 | ||
65 | struct cb_getattrres { | 65 | struct cb_getattrres { |
66 | __be32 status; | 66 | __be32 status; |
67 | uint32_t bitmap[2]; | 67 | uint32_t bitmap[2]; |
68 | uint64_t size; | 68 | uint64_t size; |
69 | uint64_t change_attr; | 69 | uint64_t change_attr; |
70 | struct timespec ctime; | 70 | struct timespec ctime; |
71 | struct timespec mtime; | 71 | struct timespec mtime; |
72 | }; | 72 | }; |
73 | 73 | ||
74 | struct cb_recallargs { | 74 | struct cb_recallargs { |
75 | struct sockaddr *addr; | 75 | struct sockaddr *addr; |
76 | struct nfs_fh fh; | 76 | struct nfs_fh fh; |
77 | nfs4_stateid stateid; | 77 | nfs4_stateid stateid; |
78 | uint32_t truncate; | 78 | uint32_t truncate; |
79 | }; | 79 | }; |
80 | 80 | ||
81 | #if defined(CONFIG_NFS_V4_1) | 81 | #if defined(CONFIG_NFS_V4_1) |
82 | 82 | ||
83 | struct referring_call { | 83 | struct referring_call { |
84 | uint32_t rc_sequenceid; | 84 | uint32_t rc_sequenceid; |
85 | uint32_t rc_slotid; | 85 | uint32_t rc_slotid; |
86 | }; | 86 | }; |
87 | 87 | ||
88 | struct referring_call_list { | 88 | struct referring_call_list { |
89 | struct nfs4_sessionid rcl_sessionid; | 89 | struct nfs4_sessionid rcl_sessionid; |
90 | uint32_t rcl_nrefcalls; | 90 | uint32_t rcl_nrefcalls; |
91 | struct referring_call *rcl_refcalls; | 91 | struct referring_call *rcl_refcalls; |
92 | }; | 92 | }; |
93 | 93 | ||
94 | struct cb_sequenceargs { | 94 | struct cb_sequenceargs { |
95 | struct sockaddr *csa_addr; | 95 | struct sockaddr *csa_addr; |
96 | struct nfs4_sessionid csa_sessionid; | 96 | struct nfs4_sessionid csa_sessionid; |
97 | uint32_t csa_sequenceid; | 97 | uint32_t csa_sequenceid; |
98 | uint32_t csa_slotid; | 98 | uint32_t csa_slotid; |
99 | uint32_t csa_highestslotid; | 99 | uint32_t csa_highestslotid; |
100 | uint32_t csa_cachethis; | 100 | uint32_t csa_cachethis; |
101 | uint32_t csa_nrclists; | 101 | uint32_t csa_nrclists; |
102 | struct referring_call_list *csa_rclists; | 102 | struct referring_call_list *csa_rclists; |
103 | }; | 103 | }; |
104 | 104 | ||
105 | struct cb_sequenceres { | 105 | struct cb_sequenceres { |
106 | __be32 csr_status; | 106 | __be32 csr_status; |
107 | struct nfs4_sessionid csr_sessionid; | 107 | struct nfs4_sessionid csr_sessionid; |
108 | uint32_t csr_sequenceid; | 108 | uint32_t csr_sequenceid; |
109 | uint32_t csr_slotid; | 109 | uint32_t csr_slotid; |
110 | uint32_t csr_highestslotid; | 110 | uint32_t csr_highestslotid; |
111 | uint32_t csr_target_highestslotid; | 111 | uint32_t csr_target_highestslotid; |
112 | }; | 112 | }; |
113 | 113 | ||
114 | extern __be32 nfs4_callback_sequence(struct cb_sequenceargs *args, | 114 | extern __be32 nfs4_callback_sequence(struct cb_sequenceargs *args, |
115 | struct cb_sequenceres *res, | 115 | struct cb_sequenceres *res, |
116 | struct cb_process_state *cps); | 116 | struct cb_process_state *cps); |
117 | 117 | ||
118 | extern int nfs41_validate_delegation_stateid(struct nfs_delegation *delegation, | 118 | extern int nfs41_validate_delegation_stateid(struct nfs_delegation *delegation, |
119 | const nfs4_stateid *stateid); | 119 | const nfs4_stateid *stateid); |
120 | 120 | ||
121 | #define RCA4_TYPE_MASK_RDATA_DLG 0 | 121 | #define RCA4_TYPE_MASK_RDATA_DLG 0 |
122 | #define RCA4_TYPE_MASK_WDATA_DLG 1 | 122 | #define RCA4_TYPE_MASK_WDATA_DLG 1 |
123 | #define RCA4_TYPE_MASK_DIR_DLG 2 | 123 | #define RCA4_TYPE_MASK_DIR_DLG 2 |
124 | #define RCA4_TYPE_MASK_FILE_LAYOUT 3 | 124 | #define RCA4_TYPE_MASK_FILE_LAYOUT 3 |
125 | #define RCA4_TYPE_MASK_BLK_LAYOUT 4 | 125 | #define RCA4_TYPE_MASK_BLK_LAYOUT 4 |
126 | #define RCA4_TYPE_MASK_OBJ_LAYOUT_MIN 8 | 126 | #define RCA4_TYPE_MASK_OBJ_LAYOUT_MIN 8 |
127 | #define RCA4_TYPE_MASK_OBJ_LAYOUT_MAX 9 | 127 | #define RCA4_TYPE_MASK_OBJ_LAYOUT_MAX 9 |
128 | #define RCA4_TYPE_MASK_OTHER_LAYOUT_MIN 12 | 128 | #define RCA4_TYPE_MASK_OTHER_LAYOUT_MIN 12 |
129 | #define RCA4_TYPE_MASK_OTHER_LAYOUT_MAX 15 | 129 | #define RCA4_TYPE_MASK_OTHER_LAYOUT_MAX 15 |
130 | #define RCA4_TYPE_MASK_ALL 0xf31f | 130 | #define RCA4_TYPE_MASK_ALL 0xf31f |
131 | 131 | ||
132 | struct cb_recallanyargs { | 132 | struct cb_recallanyargs { |
133 | struct sockaddr *craa_addr; | 133 | struct sockaddr *craa_addr; |
134 | uint32_t craa_objs_to_keep; | 134 | uint32_t craa_objs_to_keep; |
135 | uint32_t craa_type_mask; | 135 | uint32_t craa_type_mask; |
136 | }; | 136 | }; |
137 | 137 | ||
138 | extern __be32 nfs4_callback_recallany(struct cb_recallanyargs *args, | 138 | extern __be32 nfs4_callback_recallany(struct cb_recallanyargs *args, |
139 | void *dummy, | 139 | void *dummy, |
140 | struct cb_process_state *cps); | 140 | struct cb_process_state *cps); |
141 | 141 | ||
142 | struct cb_recallslotargs { | 142 | struct cb_recallslotargs { |
143 | struct sockaddr *crsa_addr; | 143 | struct sockaddr *crsa_addr; |
144 | uint32_t crsa_target_max_slots; | 144 | uint32_t crsa_target_max_slots; |
145 | }; | 145 | }; |
146 | extern __be32 nfs4_callback_recallslot(struct cb_recallslotargs *args, | 146 | extern __be32 nfs4_callback_recallslot(struct cb_recallslotargs *args, |
147 | void *dummy, | 147 | void *dummy, |
148 | struct cb_process_state *cps); | 148 | struct cb_process_state *cps); |
149 | 149 | ||
150 | struct cb_layoutrecallargs { | 150 | struct cb_layoutrecallargs { |
151 | struct sockaddr *cbl_addr; | 151 | struct sockaddr *cbl_addr; |
152 | uint32_t cbl_recall_type; | 152 | uint32_t cbl_recall_type; |
153 | uint32_t cbl_layout_type; | 153 | uint32_t cbl_layout_type; |
154 | uint32_t cbl_layoutchanged; | 154 | uint32_t cbl_layoutchanged; |
155 | union { | 155 | union { |
156 | struct { | 156 | struct { |
157 | struct nfs_fh cbl_fh; | 157 | struct nfs_fh cbl_fh; |
158 | struct pnfs_layout_range cbl_range; | 158 | struct pnfs_layout_range cbl_range; |
159 | nfs4_stateid cbl_stateid; | 159 | nfs4_stateid cbl_stateid; |
160 | }; | 160 | }; |
161 | struct nfs_fsid cbl_fsid; | 161 | struct nfs_fsid cbl_fsid; |
162 | }; | 162 | }; |
163 | }; | 163 | }; |
164 | 164 | ||
165 | extern unsigned nfs4_callback_layoutrecall( | 165 | extern __be32 nfs4_callback_layoutrecall( |
166 | struct cb_layoutrecallargs *args, | 166 | struct cb_layoutrecallargs *args, |
167 | void *dummy, struct cb_process_state *cps); | 167 | void *dummy, struct cb_process_state *cps); |
168 | 168 | ||
169 | extern void nfs4_check_drain_bc_complete(struct nfs4_session *ses); | 169 | extern void nfs4_check_drain_bc_complete(struct nfs4_session *ses); |
170 | 170 | ||
171 | struct cb_devicenotifyitem { | 171 | struct cb_devicenotifyitem { |
172 | uint32_t cbd_notify_type; | 172 | uint32_t cbd_notify_type; |
173 | uint32_t cbd_layout_type; | 173 | uint32_t cbd_layout_type; |
174 | struct nfs4_deviceid cbd_dev_id; | 174 | struct nfs4_deviceid cbd_dev_id; |
175 | uint32_t cbd_immediate; | 175 | uint32_t cbd_immediate; |
176 | }; | 176 | }; |
177 | 177 | ||
178 | struct cb_devicenotifyargs { | 178 | struct cb_devicenotifyargs { |
179 | int ndevs; | 179 | int ndevs; |
180 | struct cb_devicenotifyitem *devs; | 180 | struct cb_devicenotifyitem *devs; |
181 | }; | 181 | }; |
182 | 182 | ||
183 | extern __be32 nfs4_callback_devicenotify( | 183 | extern __be32 nfs4_callback_devicenotify( |
184 | struct cb_devicenotifyargs *args, | 184 | struct cb_devicenotifyargs *args, |
185 | void *dummy, struct cb_process_state *cps); | 185 | void *dummy, struct cb_process_state *cps); |
186 | 186 | ||
187 | #endif /* CONFIG_NFS_V4_1 */ | 187 | #endif /* CONFIG_NFS_V4_1 */ |
188 | extern int check_gss_callback_principal(struct nfs_client *, struct svc_rqst *); | 188 | extern int check_gss_callback_principal(struct nfs_client *, struct svc_rqst *); |
189 | extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args, | 189 | extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args, |
190 | struct cb_getattrres *res, | 190 | struct cb_getattrres *res, |
191 | struct cb_process_state *cps); | 191 | struct cb_process_state *cps); |
192 | extern __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy, | 192 | extern __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy, |
193 | struct cb_process_state *cps); | 193 | struct cb_process_state *cps); |
194 | #ifdef CONFIG_NFS_V4 | 194 | #ifdef CONFIG_NFS_V4 |
195 | extern int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt); | 195 | extern int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt); |
196 | extern void nfs_callback_down(int minorversion); | 196 | extern void nfs_callback_down(int minorversion); |
197 | extern int nfs4_validate_delegation_stateid(struct nfs_delegation *delegation, | 197 | extern int nfs4_validate_delegation_stateid(struct nfs_delegation *delegation, |
198 | const nfs4_stateid *stateid); | 198 | const nfs4_stateid *stateid); |
199 | extern int nfs4_set_callback_sessionid(struct nfs_client *clp); | 199 | extern int nfs4_set_callback_sessionid(struct nfs_client *clp); |
200 | #endif /* CONFIG_NFS_V4 */ | 200 | #endif /* CONFIG_NFS_V4 */ |
201 | /* | 201 | /* |
202 | * nfs41: Callbacks are expected to not cause substantial latency, | 202 | * nfs41: Callbacks are expected to not cause substantial latency, |
203 | * so we limit their concurrency to 1 by setting up the maximum number | 203 | * so we limit their concurrency to 1 by setting up the maximum number |
204 | * of slots for the backchannel. | 204 | * of slots for the backchannel. |
205 | */ | 205 | */ |
206 | #define NFS41_BC_MIN_CALLBACKS 1 | 206 | #define NFS41_BC_MIN_CALLBACKS 1 |
207 | #define NFS41_BC_MAX_CALLBACKS 1 | 207 | #define NFS41_BC_MAX_CALLBACKS 1 |
208 | 208 | ||
209 | extern unsigned int nfs_callback_set_tcpport; | 209 | extern unsigned int nfs_callback_set_tcpport; |
210 | extern unsigned short nfs_callback_tcpport; | 210 | extern unsigned short nfs_callback_tcpport; |
211 | extern unsigned short nfs_callback_tcpport6; | 211 | extern unsigned short nfs_callback_tcpport6; |
212 | 212 | ||
213 | #endif /* __LINUX_FS_NFS_CALLBACK_H */ | 213 | #endif /* __LINUX_FS_NFS_CALLBACK_H */ |
214 | 214 |
fs/nfs/callback_xdr.c
1 | /* | 1 | /* |
2 | * linux/fs/nfs/callback_xdr.c | 2 | * linux/fs/nfs/callback_xdr.c |
3 | * | 3 | * |
4 | * Copyright (C) 2004 Trond Myklebust | 4 | * Copyright (C) 2004 Trond Myklebust |
5 | * | 5 | * |
6 | * NFSv4 callback encode/decode procedures | 6 | * NFSv4 callback encode/decode procedures |
7 | */ | 7 | */ |
8 | #include <linux/kernel.h> | 8 | #include <linux/kernel.h> |
9 | #include <linux/sunrpc/svc.h> | 9 | #include <linux/sunrpc/svc.h> |
10 | #include <linux/nfs4.h> | 10 | #include <linux/nfs4.h> |
11 | #include <linux/nfs_fs.h> | 11 | #include <linux/nfs_fs.h> |
12 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
13 | #include <linux/sunrpc/bc_xprt.h> | 13 | #include <linux/sunrpc/bc_xprt.h> |
14 | #include "nfs4_fs.h" | 14 | #include "nfs4_fs.h" |
15 | #include "callback.h" | 15 | #include "callback.h" |
16 | #include "internal.h" | 16 | #include "internal.h" |
17 | 17 | ||
18 | #define CB_OP_TAGLEN_MAXSZ (512) | 18 | #define CB_OP_TAGLEN_MAXSZ (512) |
19 | #define CB_OP_HDR_RES_MAXSZ (2 + CB_OP_TAGLEN_MAXSZ) | 19 | #define CB_OP_HDR_RES_MAXSZ (2 + CB_OP_TAGLEN_MAXSZ) |
20 | #define CB_OP_GETATTR_BITMAP_MAXSZ (4) | 20 | #define CB_OP_GETATTR_BITMAP_MAXSZ (4) |
21 | #define CB_OP_GETATTR_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ + \ | 21 | #define CB_OP_GETATTR_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ + \ |
22 | CB_OP_GETATTR_BITMAP_MAXSZ + \ | 22 | CB_OP_GETATTR_BITMAP_MAXSZ + \ |
23 | 2 + 2 + 3 + 3) | 23 | 2 + 2 + 3 + 3) |
24 | #define CB_OP_RECALL_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ) | 24 | #define CB_OP_RECALL_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ) |
25 | 25 | ||
26 | #if defined(CONFIG_NFS_V4_1) | 26 | #if defined(CONFIG_NFS_V4_1) |
27 | #define CB_OP_LAYOUTRECALL_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ) | 27 | #define CB_OP_LAYOUTRECALL_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ) |
28 | #define CB_OP_DEVICENOTIFY_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ) | 28 | #define CB_OP_DEVICENOTIFY_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ) |
29 | #define CB_OP_SEQUENCE_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ + \ | 29 | #define CB_OP_SEQUENCE_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ + \ |
30 | 4 + 1 + 3) | 30 | 4 + 1 + 3) |
31 | #define CB_OP_RECALLANY_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ) | 31 | #define CB_OP_RECALLANY_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ) |
32 | #define CB_OP_RECALLSLOT_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ) | 32 | #define CB_OP_RECALLSLOT_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ) |
33 | #endif /* CONFIG_NFS_V4_1 */ | 33 | #endif /* CONFIG_NFS_V4_1 */ |
34 | 34 | ||
35 | #define NFSDBG_FACILITY NFSDBG_CALLBACK | 35 | #define NFSDBG_FACILITY NFSDBG_CALLBACK |
36 | 36 | ||
37 | /* Internal error code */ | 37 | /* Internal error code */ |
38 | #define NFS4ERR_RESOURCE_HDR 11050 | 38 | #define NFS4ERR_RESOURCE_HDR 11050 |
39 | 39 | ||
40 | typedef __be32 (*callback_process_op_t)(void *, void *, | 40 | typedef __be32 (*callback_process_op_t)(void *, void *, |
41 | struct cb_process_state *); | 41 | struct cb_process_state *); |
42 | typedef __be32 (*callback_decode_arg_t)(struct svc_rqst *, struct xdr_stream *, void *); | 42 | typedef __be32 (*callback_decode_arg_t)(struct svc_rqst *, struct xdr_stream *, void *); |
43 | typedef __be32 (*callback_encode_res_t)(struct svc_rqst *, struct xdr_stream *, void *); | 43 | typedef __be32 (*callback_encode_res_t)(struct svc_rqst *, struct xdr_stream *, void *); |
44 | 44 | ||
45 | 45 | ||
46 | struct callback_op { | 46 | struct callback_op { |
47 | callback_process_op_t process_op; | 47 | callback_process_op_t process_op; |
48 | callback_decode_arg_t decode_args; | 48 | callback_decode_arg_t decode_args; |
49 | callback_encode_res_t encode_res; | 49 | callback_encode_res_t encode_res; |
50 | long res_maxsize; | 50 | long res_maxsize; |
51 | }; | 51 | }; |
52 | 52 | ||
53 | static struct callback_op callback_ops[]; | 53 | static struct callback_op callback_ops[]; |
54 | 54 | ||
55 | static __be32 nfs4_callback_null(struct svc_rqst *rqstp, void *argp, void *resp) | 55 | static __be32 nfs4_callback_null(struct svc_rqst *rqstp, void *argp, void *resp) |
56 | { | 56 | { |
57 | return htonl(NFS4_OK); | 57 | return htonl(NFS4_OK); |
58 | } | 58 | } |
59 | 59 | ||
60 | static int nfs4_decode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy) | 60 | static int nfs4_decode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy) |
61 | { | 61 | { |
62 | return xdr_argsize_check(rqstp, p); | 62 | return xdr_argsize_check(rqstp, p); |
63 | } | 63 | } |
64 | 64 | ||
65 | static int nfs4_encode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy) | 65 | static int nfs4_encode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy) |
66 | { | 66 | { |
67 | return xdr_ressize_check(rqstp, p); | 67 | return xdr_ressize_check(rqstp, p); |
68 | } | 68 | } |
69 | 69 | ||
70 | static __be32 *read_buf(struct xdr_stream *xdr, int nbytes) | 70 | static __be32 *read_buf(struct xdr_stream *xdr, int nbytes) |
71 | { | 71 | { |
72 | __be32 *p; | 72 | __be32 *p; |
73 | 73 | ||
74 | p = xdr_inline_decode(xdr, nbytes); | 74 | p = xdr_inline_decode(xdr, nbytes); |
75 | if (unlikely(p == NULL)) | 75 | if (unlikely(p == NULL)) |
76 | printk(KERN_WARNING "NFSv4 callback reply buffer overflowed!\n"); | 76 | printk(KERN_WARNING "NFSv4 callback reply buffer overflowed!\n"); |
77 | return p; | 77 | return p; |
78 | } | 78 | } |
79 | 79 | ||
80 | static __be32 decode_string(struct xdr_stream *xdr, unsigned int *len, const char **str) | 80 | static __be32 decode_string(struct xdr_stream *xdr, unsigned int *len, const char **str) |
81 | { | 81 | { |
82 | __be32 *p; | 82 | __be32 *p; |
83 | 83 | ||
84 | p = read_buf(xdr, 4); | 84 | p = read_buf(xdr, 4); |
85 | if (unlikely(p == NULL)) | 85 | if (unlikely(p == NULL)) |
86 | return htonl(NFS4ERR_RESOURCE); | 86 | return htonl(NFS4ERR_RESOURCE); |
87 | *len = ntohl(*p); | 87 | *len = ntohl(*p); |
88 | 88 | ||
89 | if (*len != 0) { | 89 | if (*len != 0) { |
90 | p = read_buf(xdr, *len); | 90 | p = read_buf(xdr, *len); |
91 | if (unlikely(p == NULL)) | 91 | if (unlikely(p == NULL)) |
92 | return htonl(NFS4ERR_RESOURCE); | 92 | return htonl(NFS4ERR_RESOURCE); |
93 | *str = (const char *)p; | 93 | *str = (const char *)p; |
94 | } else | 94 | } else |
95 | *str = NULL; | 95 | *str = NULL; |
96 | 96 | ||
97 | return 0; | 97 | return 0; |
98 | } | 98 | } |
99 | 99 | ||
100 | static __be32 decode_fh(struct xdr_stream *xdr, struct nfs_fh *fh) | 100 | static __be32 decode_fh(struct xdr_stream *xdr, struct nfs_fh *fh) |
101 | { | 101 | { |
102 | __be32 *p; | 102 | __be32 *p; |
103 | 103 | ||
104 | p = read_buf(xdr, 4); | 104 | p = read_buf(xdr, 4); |
105 | if (unlikely(p == NULL)) | 105 | if (unlikely(p == NULL)) |
106 | return htonl(NFS4ERR_RESOURCE); | 106 | return htonl(NFS4ERR_RESOURCE); |
107 | fh->size = ntohl(*p); | 107 | fh->size = ntohl(*p); |
108 | if (fh->size > NFS4_FHSIZE) | 108 | if (fh->size > NFS4_FHSIZE) |
109 | return htonl(NFS4ERR_BADHANDLE); | 109 | return htonl(NFS4ERR_BADHANDLE); |
110 | p = read_buf(xdr, fh->size); | 110 | p = read_buf(xdr, fh->size); |
111 | if (unlikely(p == NULL)) | 111 | if (unlikely(p == NULL)) |
112 | return htonl(NFS4ERR_RESOURCE); | 112 | return htonl(NFS4ERR_RESOURCE); |
113 | memcpy(&fh->data[0], p, fh->size); | 113 | memcpy(&fh->data[0], p, fh->size); |
114 | memset(&fh->data[fh->size], 0, sizeof(fh->data) - fh->size); | 114 | memset(&fh->data[fh->size], 0, sizeof(fh->data) - fh->size); |
115 | return 0; | 115 | return 0; |
116 | } | 116 | } |
117 | 117 | ||
118 | static __be32 decode_bitmap(struct xdr_stream *xdr, uint32_t *bitmap) | 118 | static __be32 decode_bitmap(struct xdr_stream *xdr, uint32_t *bitmap) |
119 | { | 119 | { |
120 | __be32 *p; | 120 | __be32 *p; |
121 | unsigned int attrlen; | 121 | unsigned int attrlen; |
122 | 122 | ||
123 | p = read_buf(xdr, 4); | 123 | p = read_buf(xdr, 4); |
124 | if (unlikely(p == NULL)) | 124 | if (unlikely(p == NULL)) |
125 | return htonl(NFS4ERR_RESOURCE); | 125 | return htonl(NFS4ERR_RESOURCE); |
126 | attrlen = ntohl(*p); | 126 | attrlen = ntohl(*p); |
127 | p = read_buf(xdr, attrlen << 2); | 127 | p = read_buf(xdr, attrlen << 2); |
128 | if (unlikely(p == NULL)) | 128 | if (unlikely(p == NULL)) |
129 | return htonl(NFS4ERR_RESOURCE); | 129 | return htonl(NFS4ERR_RESOURCE); |
130 | if (likely(attrlen > 0)) | 130 | if (likely(attrlen > 0)) |
131 | bitmap[0] = ntohl(*p++); | 131 | bitmap[0] = ntohl(*p++); |
132 | if (attrlen > 1) | 132 | if (attrlen > 1) |
133 | bitmap[1] = ntohl(*p); | 133 | bitmap[1] = ntohl(*p); |
134 | return 0; | 134 | return 0; |
135 | } | 135 | } |
136 | 136 | ||
137 | static __be32 decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid) | 137 | static __be32 decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid) |
138 | { | 138 | { |
139 | __be32 *p; | 139 | __be32 *p; |
140 | 140 | ||
141 | p = read_buf(xdr, 16); | 141 | p = read_buf(xdr, 16); |
142 | if (unlikely(p == NULL)) | 142 | if (unlikely(p == NULL)) |
143 | return htonl(NFS4ERR_RESOURCE); | 143 | return htonl(NFS4ERR_RESOURCE); |
144 | memcpy(stateid->data, p, 16); | 144 | memcpy(stateid->data, p, 16); |
145 | return 0; | 145 | return 0; |
146 | } | 146 | } |
147 | 147 | ||
148 | static __be32 decode_compound_hdr_arg(struct xdr_stream *xdr, struct cb_compound_hdr_arg *hdr) | 148 | static __be32 decode_compound_hdr_arg(struct xdr_stream *xdr, struct cb_compound_hdr_arg *hdr) |
149 | { | 149 | { |
150 | __be32 *p; | 150 | __be32 *p; |
151 | __be32 status; | 151 | __be32 status; |
152 | 152 | ||
153 | status = decode_string(xdr, &hdr->taglen, &hdr->tag); | 153 | status = decode_string(xdr, &hdr->taglen, &hdr->tag); |
154 | if (unlikely(status != 0)) | 154 | if (unlikely(status != 0)) |
155 | return status; | 155 | return status; |
156 | /* We do not like overly long tags! */ | 156 | /* We do not like overly long tags! */ |
157 | if (hdr->taglen > CB_OP_TAGLEN_MAXSZ - 12) { | 157 | if (hdr->taglen > CB_OP_TAGLEN_MAXSZ - 12) { |
158 | printk("NFSv4 CALLBACK %s: client sent tag of length %u\n", | 158 | printk("NFSv4 CALLBACK %s: client sent tag of length %u\n", |
159 | __func__, hdr->taglen); | 159 | __func__, hdr->taglen); |
160 | return htonl(NFS4ERR_RESOURCE); | 160 | return htonl(NFS4ERR_RESOURCE); |
161 | } | 161 | } |
162 | p = read_buf(xdr, 12); | 162 | p = read_buf(xdr, 12); |
163 | if (unlikely(p == NULL)) | 163 | if (unlikely(p == NULL)) |
164 | return htonl(NFS4ERR_RESOURCE); | 164 | return htonl(NFS4ERR_RESOURCE); |
165 | hdr->minorversion = ntohl(*p++); | 165 | hdr->minorversion = ntohl(*p++); |
166 | /* Check minor version is zero or one. */ | 166 | /* Check minor version is zero or one. */ |
167 | if (hdr->minorversion <= 1) { | 167 | if (hdr->minorversion <= 1) { |
168 | hdr->cb_ident = ntohl(*p++); /* ignored by v4.1 */ | 168 | hdr->cb_ident = ntohl(*p++); /* ignored by v4.1 */ |
169 | } else { | 169 | } else { |
170 | printk(KERN_WARNING "%s: NFSv4 server callback with " | 170 | printk(KERN_WARNING "%s: NFSv4 server callback with " |
171 | "illegal minor version %u!\n", | 171 | "illegal minor version %u!\n", |
172 | __func__, hdr->minorversion); | 172 | __func__, hdr->minorversion); |
173 | return htonl(NFS4ERR_MINOR_VERS_MISMATCH); | 173 | return htonl(NFS4ERR_MINOR_VERS_MISMATCH); |
174 | } | 174 | } |
175 | hdr->nops = ntohl(*p); | 175 | hdr->nops = ntohl(*p); |
176 | dprintk("%s: minorversion %d nops %d\n", __func__, | 176 | dprintk("%s: minorversion %d nops %d\n", __func__, |
177 | hdr->minorversion, hdr->nops); | 177 | hdr->minorversion, hdr->nops); |
178 | return 0; | 178 | return 0; |
179 | } | 179 | } |
180 | 180 | ||
181 | static __be32 decode_op_hdr(struct xdr_stream *xdr, unsigned int *op) | 181 | static __be32 decode_op_hdr(struct xdr_stream *xdr, unsigned int *op) |
182 | { | 182 | { |
183 | __be32 *p; | 183 | __be32 *p; |
184 | p = read_buf(xdr, 4); | 184 | p = read_buf(xdr, 4); |
185 | if (unlikely(p == NULL)) | 185 | if (unlikely(p == NULL)) |
186 | return htonl(NFS4ERR_RESOURCE_HDR); | 186 | return htonl(NFS4ERR_RESOURCE_HDR); |
187 | *op = ntohl(*p); | 187 | *op = ntohl(*p); |
188 | return 0; | 188 | return 0; |
189 | } | 189 | } |
190 | 190 | ||
191 | static __be32 decode_getattr_args(struct svc_rqst *rqstp, struct xdr_stream *xdr, struct cb_getattrargs *args) | 191 | static __be32 decode_getattr_args(struct svc_rqst *rqstp, struct xdr_stream *xdr, struct cb_getattrargs *args) |
192 | { | 192 | { |
193 | __be32 status; | 193 | __be32 status; |
194 | 194 | ||
195 | status = decode_fh(xdr, &args->fh); | 195 | status = decode_fh(xdr, &args->fh); |
196 | if (unlikely(status != 0)) | 196 | if (unlikely(status != 0)) |
197 | goto out; | 197 | goto out; |
198 | args->addr = svc_addr(rqstp); | 198 | args->addr = svc_addr(rqstp); |
199 | status = decode_bitmap(xdr, args->bitmap); | 199 | status = decode_bitmap(xdr, args->bitmap); |
200 | out: | 200 | out: |
201 | dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); | 201 | dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); |
202 | return status; | 202 | return status; |
203 | } | 203 | } |
204 | 204 | ||
205 | static __be32 decode_recall_args(struct svc_rqst *rqstp, struct xdr_stream *xdr, struct cb_recallargs *args) | 205 | static __be32 decode_recall_args(struct svc_rqst *rqstp, struct xdr_stream *xdr, struct cb_recallargs *args) |
206 | { | 206 | { |
207 | __be32 *p; | 207 | __be32 *p; |
208 | __be32 status; | 208 | __be32 status; |
209 | 209 | ||
210 | args->addr = svc_addr(rqstp); | 210 | args->addr = svc_addr(rqstp); |
211 | status = decode_stateid(xdr, &args->stateid); | 211 | status = decode_stateid(xdr, &args->stateid); |
212 | if (unlikely(status != 0)) | 212 | if (unlikely(status != 0)) |
213 | goto out; | 213 | goto out; |
214 | p = read_buf(xdr, 4); | 214 | p = read_buf(xdr, 4); |
215 | if (unlikely(p == NULL)) { | 215 | if (unlikely(p == NULL)) { |
216 | status = htonl(NFS4ERR_RESOURCE); | 216 | status = htonl(NFS4ERR_RESOURCE); |
217 | goto out; | 217 | goto out; |
218 | } | 218 | } |
219 | args->truncate = ntohl(*p); | 219 | args->truncate = ntohl(*p); |
220 | status = decode_fh(xdr, &args->fh); | 220 | status = decode_fh(xdr, &args->fh); |
221 | out: | 221 | out: |
222 | dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); | 222 | dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); |
223 | return status; | 223 | return status; |
224 | } | 224 | } |
225 | 225 | ||
226 | #if defined(CONFIG_NFS_V4_1) | 226 | #if defined(CONFIG_NFS_V4_1) |
227 | 227 | ||
228 | static __be32 decode_layoutrecall_args(struct svc_rqst *rqstp, | 228 | static __be32 decode_layoutrecall_args(struct svc_rqst *rqstp, |
229 | struct xdr_stream *xdr, | 229 | struct xdr_stream *xdr, |
230 | struct cb_layoutrecallargs *args) | 230 | struct cb_layoutrecallargs *args) |
231 | { | 231 | { |
232 | __be32 *p; | 232 | __be32 *p; |
233 | __be32 status = 0; | 233 | __be32 status = 0; |
234 | uint32_t iomode; | 234 | uint32_t iomode; |
235 | 235 | ||
236 | args->cbl_addr = svc_addr(rqstp); | 236 | args->cbl_addr = svc_addr(rqstp); |
237 | p = read_buf(xdr, 4 * sizeof(uint32_t)); | 237 | p = read_buf(xdr, 4 * sizeof(uint32_t)); |
238 | if (unlikely(p == NULL)) { | 238 | if (unlikely(p == NULL)) { |
239 | status = htonl(NFS4ERR_BADXDR); | 239 | status = htonl(NFS4ERR_BADXDR); |
240 | goto out; | 240 | goto out; |
241 | } | 241 | } |
242 | 242 | ||
243 | args->cbl_layout_type = ntohl(*p++); | 243 | args->cbl_layout_type = ntohl(*p++); |
244 | /* Depite the spec's xdr, iomode really belongs in the FILE switch, | 244 | /* Depite the spec's xdr, iomode really belongs in the FILE switch, |
245 | * as it is unusable and ignored with the other types. | 245 | * as it is unusable and ignored with the other types. |
246 | */ | 246 | */ |
247 | iomode = ntohl(*p++); | 247 | iomode = ntohl(*p++); |
248 | args->cbl_layoutchanged = ntohl(*p++); | 248 | args->cbl_layoutchanged = ntohl(*p++); |
249 | args->cbl_recall_type = ntohl(*p++); | 249 | args->cbl_recall_type = ntohl(*p++); |
250 | 250 | ||
251 | if (args->cbl_recall_type == RETURN_FILE) { | 251 | if (args->cbl_recall_type == RETURN_FILE) { |
252 | args->cbl_range.iomode = iomode; | 252 | args->cbl_range.iomode = iomode; |
253 | status = decode_fh(xdr, &args->cbl_fh); | 253 | status = decode_fh(xdr, &args->cbl_fh); |
254 | if (unlikely(status != 0)) | 254 | if (unlikely(status != 0)) |
255 | goto out; | 255 | goto out; |
256 | 256 | ||
257 | p = read_buf(xdr, 2 * sizeof(uint64_t)); | 257 | p = read_buf(xdr, 2 * sizeof(uint64_t)); |
258 | if (unlikely(p == NULL)) { | 258 | if (unlikely(p == NULL)) { |
259 | status = htonl(NFS4ERR_BADXDR); | 259 | status = htonl(NFS4ERR_BADXDR); |
260 | goto out; | 260 | goto out; |
261 | } | 261 | } |
262 | p = xdr_decode_hyper(p, &args->cbl_range.offset); | 262 | p = xdr_decode_hyper(p, &args->cbl_range.offset); |
263 | p = xdr_decode_hyper(p, &args->cbl_range.length); | 263 | p = xdr_decode_hyper(p, &args->cbl_range.length); |
264 | status = decode_stateid(xdr, &args->cbl_stateid); | 264 | status = decode_stateid(xdr, &args->cbl_stateid); |
265 | if (unlikely(status != 0)) | 265 | if (unlikely(status != 0)) |
266 | goto out; | 266 | goto out; |
267 | } else if (args->cbl_recall_type == RETURN_FSID) { | 267 | } else if (args->cbl_recall_type == RETURN_FSID) { |
268 | p = read_buf(xdr, 2 * sizeof(uint64_t)); | 268 | p = read_buf(xdr, 2 * sizeof(uint64_t)); |
269 | if (unlikely(p == NULL)) { | 269 | if (unlikely(p == NULL)) { |
270 | status = htonl(NFS4ERR_BADXDR); | 270 | status = htonl(NFS4ERR_BADXDR); |
271 | goto out; | 271 | goto out; |
272 | } | 272 | } |
273 | p = xdr_decode_hyper(p, &args->cbl_fsid.major); | 273 | p = xdr_decode_hyper(p, &args->cbl_fsid.major); |
274 | p = xdr_decode_hyper(p, &args->cbl_fsid.minor); | 274 | p = xdr_decode_hyper(p, &args->cbl_fsid.minor); |
275 | } else if (args->cbl_recall_type != RETURN_ALL) { | 275 | } else if (args->cbl_recall_type != RETURN_ALL) { |
276 | status = htonl(NFS4ERR_BADXDR); | 276 | status = htonl(NFS4ERR_BADXDR); |
277 | goto out; | 277 | goto out; |
278 | } | 278 | } |
279 | dprintk("%s: ltype 0x%x iomode %d changed %d recall_type %d\n", | 279 | dprintk("%s: ltype 0x%x iomode %d changed %d recall_type %d\n", |
280 | __func__, | 280 | __func__, |
281 | args->cbl_layout_type, iomode, | 281 | args->cbl_layout_type, iomode, |
282 | args->cbl_layoutchanged, args->cbl_recall_type); | 282 | args->cbl_layoutchanged, args->cbl_recall_type); |
283 | out: | 283 | out: |
284 | dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); | 284 | dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); |
285 | return status; | 285 | return status; |
286 | } | 286 | } |
287 | 287 | ||
288 | static | 288 | static |
289 | __be32 decode_devicenotify_args(struct svc_rqst *rqstp, | 289 | __be32 decode_devicenotify_args(struct svc_rqst *rqstp, |
290 | struct xdr_stream *xdr, | 290 | struct xdr_stream *xdr, |
291 | struct cb_devicenotifyargs *args) | 291 | struct cb_devicenotifyargs *args) |
292 | { | 292 | { |
293 | __be32 *p; | 293 | __be32 *p; |
294 | __be32 status = 0; | 294 | __be32 status = 0; |
295 | u32 tmp; | 295 | u32 tmp; |
296 | int n, i; | 296 | int n, i; |
297 | args->ndevs = 0; | 297 | args->ndevs = 0; |
298 | 298 | ||
299 | /* Num of device notifications */ | 299 | /* Num of device notifications */ |
300 | p = read_buf(xdr, sizeof(uint32_t)); | 300 | p = read_buf(xdr, sizeof(uint32_t)); |
301 | if (unlikely(p == NULL)) { | 301 | if (unlikely(p == NULL)) { |
302 | status = htonl(NFS4ERR_BADXDR); | 302 | status = htonl(NFS4ERR_BADXDR); |
303 | goto out; | 303 | goto out; |
304 | } | 304 | } |
305 | n = ntohl(*p++); | 305 | n = ntohl(*p++); |
306 | if (n <= 0) | 306 | if (n <= 0) |
307 | goto out; | 307 | goto out; |
308 | if (n > ULONG_MAX / sizeof(*args->devs)) { | ||
309 | status = htonl(NFS4ERR_BADXDR); | ||
310 | goto out; | ||
311 | } | ||
308 | 312 | ||
309 | args->devs = kmalloc(n * sizeof(*args->devs), GFP_KERNEL); | 313 | args->devs = kmalloc(n * sizeof(*args->devs), GFP_KERNEL); |
310 | if (!args->devs) { | 314 | if (!args->devs) { |
311 | status = htonl(NFS4ERR_DELAY); | 315 | status = htonl(NFS4ERR_DELAY); |
312 | goto out; | 316 | goto out; |
313 | } | 317 | } |
314 | 318 | ||
315 | /* Decode each dev notification */ | 319 | /* Decode each dev notification */ |
316 | for (i = 0; i < n; i++) { | 320 | for (i = 0; i < n; i++) { |
317 | struct cb_devicenotifyitem *dev = &args->devs[i]; | 321 | struct cb_devicenotifyitem *dev = &args->devs[i]; |
318 | 322 | ||
319 | p = read_buf(xdr, (4 * sizeof(uint32_t)) + NFS4_DEVICEID4_SIZE); | 323 | p = read_buf(xdr, (4 * sizeof(uint32_t)) + NFS4_DEVICEID4_SIZE); |
320 | if (unlikely(p == NULL)) { | 324 | if (unlikely(p == NULL)) { |
321 | status = htonl(NFS4ERR_BADXDR); | 325 | status = htonl(NFS4ERR_BADXDR); |
322 | goto err; | 326 | goto err; |
323 | } | 327 | } |
324 | 328 | ||
325 | tmp = ntohl(*p++); /* bitmap size */ | 329 | tmp = ntohl(*p++); /* bitmap size */ |
326 | if (tmp != 1) { | 330 | if (tmp != 1) { |
327 | status = htonl(NFS4ERR_INVAL); | 331 | status = htonl(NFS4ERR_INVAL); |
328 | goto err; | 332 | goto err; |
329 | } | 333 | } |
330 | dev->cbd_notify_type = ntohl(*p++); | 334 | dev->cbd_notify_type = ntohl(*p++); |
331 | if (dev->cbd_notify_type != NOTIFY_DEVICEID4_CHANGE && | 335 | if (dev->cbd_notify_type != NOTIFY_DEVICEID4_CHANGE && |
332 | dev->cbd_notify_type != NOTIFY_DEVICEID4_DELETE) { | 336 | dev->cbd_notify_type != NOTIFY_DEVICEID4_DELETE) { |
333 | status = htonl(NFS4ERR_INVAL); | 337 | status = htonl(NFS4ERR_INVAL); |
334 | goto err; | 338 | goto err; |
335 | } | 339 | } |
336 | 340 | ||
337 | tmp = ntohl(*p++); /* opaque size */ | 341 | tmp = ntohl(*p++); /* opaque size */ |
338 | if (((dev->cbd_notify_type == NOTIFY_DEVICEID4_CHANGE) && | 342 | if (((dev->cbd_notify_type == NOTIFY_DEVICEID4_CHANGE) && |
339 | (tmp != NFS4_DEVICEID4_SIZE + 8)) || | 343 | (tmp != NFS4_DEVICEID4_SIZE + 8)) || |
340 | ((dev->cbd_notify_type == NOTIFY_DEVICEID4_DELETE) && | 344 | ((dev->cbd_notify_type == NOTIFY_DEVICEID4_DELETE) && |
341 | (tmp != NFS4_DEVICEID4_SIZE + 4))) { | 345 | (tmp != NFS4_DEVICEID4_SIZE + 4))) { |
342 | status = htonl(NFS4ERR_INVAL); | 346 | status = htonl(NFS4ERR_INVAL); |
343 | goto err; | 347 | goto err; |
344 | } | 348 | } |
345 | dev->cbd_layout_type = ntohl(*p++); | 349 | dev->cbd_layout_type = ntohl(*p++); |
346 | memcpy(dev->cbd_dev_id.data, p, NFS4_DEVICEID4_SIZE); | 350 | memcpy(dev->cbd_dev_id.data, p, NFS4_DEVICEID4_SIZE); |
347 | p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE); | 351 | p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE); |
348 | 352 | ||
349 | if (dev->cbd_layout_type == NOTIFY_DEVICEID4_CHANGE) { | 353 | if (dev->cbd_layout_type == NOTIFY_DEVICEID4_CHANGE) { |
350 | p = read_buf(xdr, sizeof(uint32_t)); | 354 | p = read_buf(xdr, sizeof(uint32_t)); |
351 | if (unlikely(p == NULL)) { | 355 | if (unlikely(p == NULL)) { |
352 | status = htonl(NFS4ERR_BADXDR); | 356 | status = htonl(NFS4ERR_BADXDR); |
353 | goto err; | 357 | goto err; |
354 | } | 358 | } |
355 | dev->cbd_immediate = ntohl(*p++); | 359 | dev->cbd_immediate = ntohl(*p++); |
356 | } else { | 360 | } else { |
357 | dev->cbd_immediate = 0; | 361 | dev->cbd_immediate = 0; |
358 | } | 362 | } |
359 | 363 | ||
360 | args->ndevs++; | 364 | args->ndevs++; |
361 | 365 | ||
362 | dprintk("%s: type %d layout 0x%x immediate %d\n", | 366 | dprintk("%s: type %d layout 0x%x immediate %d\n", |
363 | __func__, dev->cbd_notify_type, dev->cbd_layout_type, | 367 | __func__, dev->cbd_notify_type, dev->cbd_layout_type, |
364 | dev->cbd_immediate); | 368 | dev->cbd_immediate); |
365 | } | 369 | } |
366 | out: | 370 | out: |
367 | dprintk("%s: status %d ndevs %d\n", | 371 | dprintk("%s: status %d ndevs %d\n", |
368 | __func__, ntohl(status), args->ndevs); | 372 | __func__, ntohl(status), args->ndevs); |
369 | return status; | 373 | return status; |
370 | err: | 374 | err: |
371 | kfree(args->devs); | 375 | kfree(args->devs); |
372 | goto out; | 376 | goto out; |
373 | } | 377 | } |
374 | 378 | ||
375 | static __be32 decode_sessionid(struct xdr_stream *xdr, | 379 | static __be32 decode_sessionid(struct xdr_stream *xdr, |
376 | struct nfs4_sessionid *sid) | 380 | struct nfs4_sessionid *sid) |
377 | { | 381 | { |
378 | __be32 *p; | 382 | __be32 *p; |
379 | int len = NFS4_MAX_SESSIONID_LEN; | 383 | int len = NFS4_MAX_SESSIONID_LEN; |
380 | 384 | ||
381 | p = read_buf(xdr, len); | 385 | p = read_buf(xdr, len); |
382 | if (unlikely(p == NULL)) | 386 | if (unlikely(p == NULL)) |
383 | return htonl(NFS4ERR_RESOURCE); | 387 | return htonl(NFS4ERR_RESOURCE); |
384 | 388 | ||
385 | memcpy(sid->data, p, len); | 389 | memcpy(sid->data, p, len); |
386 | return 0; | 390 | return 0; |
387 | } | 391 | } |
388 | 392 | ||
389 | static __be32 decode_rc_list(struct xdr_stream *xdr, | 393 | static __be32 decode_rc_list(struct xdr_stream *xdr, |
390 | struct referring_call_list *rc_list) | 394 | struct referring_call_list *rc_list) |
391 | { | 395 | { |
392 | __be32 *p; | 396 | __be32 *p; |
393 | int i; | 397 | int i; |
394 | __be32 status; | 398 | __be32 status; |
395 | 399 | ||
396 | status = decode_sessionid(xdr, &rc_list->rcl_sessionid); | 400 | status = decode_sessionid(xdr, &rc_list->rcl_sessionid); |
397 | if (status) | 401 | if (status) |
398 | goto out; | 402 | goto out; |
399 | 403 | ||
400 | status = htonl(NFS4ERR_RESOURCE); | 404 | status = htonl(NFS4ERR_RESOURCE); |
401 | p = read_buf(xdr, sizeof(uint32_t)); | 405 | p = read_buf(xdr, sizeof(uint32_t)); |
402 | if (unlikely(p == NULL)) | 406 | if (unlikely(p == NULL)) |
403 | goto out; | 407 | goto out; |
404 | 408 | ||
405 | rc_list->rcl_nrefcalls = ntohl(*p++); | 409 | rc_list->rcl_nrefcalls = ntohl(*p++); |
406 | if (rc_list->rcl_nrefcalls) { | 410 | if (rc_list->rcl_nrefcalls) { |
407 | p = read_buf(xdr, | 411 | p = read_buf(xdr, |
408 | rc_list->rcl_nrefcalls * 2 * sizeof(uint32_t)); | 412 | rc_list->rcl_nrefcalls * 2 * sizeof(uint32_t)); |
409 | if (unlikely(p == NULL)) | 413 | if (unlikely(p == NULL)) |
410 | goto out; | 414 | goto out; |
411 | rc_list->rcl_refcalls = kmalloc(rc_list->rcl_nrefcalls * | 415 | rc_list->rcl_refcalls = kmalloc(rc_list->rcl_nrefcalls * |
412 | sizeof(*rc_list->rcl_refcalls), | 416 | sizeof(*rc_list->rcl_refcalls), |
413 | GFP_KERNEL); | 417 | GFP_KERNEL); |
414 | if (unlikely(rc_list->rcl_refcalls == NULL)) | 418 | if (unlikely(rc_list->rcl_refcalls == NULL)) |
415 | goto out; | 419 | goto out; |
416 | for (i = 0; i < rc_list->rcl_nrefcalls; i++) { | 420 | for (i = 0; i < rc_list->rcl_nrefcalls; i++) { |
417 | rc_list->rcl_refcalls[i].rc_sequenceid = ntohl(*p++); | 421 | rc_list->rcl_refcalls[i].rc_sequenceid = ntohl(*p++); |
418 | rc_list->rcl_refcalls[i].rc_slotid = ntohl(*p++); | 422 | rc_list->rcl_refcalls[i].rc_slotid = ntohl(*p++); |
419 | } | 423 | } |
420 | } | 424 | } |
421 | status = 0; | 425 | status = 0; |
422 | 426 | ||
423 | out: | 427 | out: |
424 | return status; | 428 | return status; |
425 | } | 429 | } |
426 | 430 | ||
427 | static __be32 decode_cb_sequence_args(struct svc_rqst *rqstp, | 431 | static __be32 decode_cb_sequence_args(struct svc_rqst *rqstp, |
428 | struct xdr_stream *xdr, | 432 | struct xdr_stream *xdr, |
429 | struct cb_sequenceargs *args) | 433 | struct cb_sequenceargs *args) |
430 | { | 434 | { |
431 | __be32 *p; | 435 | __be32 *p; |
432 | int i; | 436 | int i; |
433 | __be32 status; | 437 | __be32 status; |
434 | 438 | ||
435 | status = decode_sessionid(xdr, &args->csa_sessionid); | 439 | status = decode_sessionid(xdr, &args->csa_sessionid); |
436 | if (status) | 440 | if (status) |
437 | goto out; | 441 | goto out; |
438 | 442 | ||
439 | status = htonl(NFS4ERR_RESOURCE); | 443 | status = htonl(NFS4ERR_RESOURCE); |
440 | p = read_buf(xdr, 5 * sizeof(uint32_t)); | 444 | p = read_buf(xdr, 5 * sizeof(uint32_t)); |
441 | if (unlikely(p == NULL)) | 445 | if (unlikely(p == NULL)) |
442 | goto out; | 446 | goto out; |
443 | 447 | ||
444 | args->csa_addr = svc_addr(rqstp); | 448 | args->csa_addr = svc_addr(rqstp); |
445 | args->csa_sequenceid = ntohl(*p++); | 449 | args->csa_sequenceid = ntohl(*p++); |
446 | args->csa_slotid = ntohl(*p++); | 450 | args->csa_slotid = ntohl(*p++); |
447 | args->csa_highestslotid = ntohl(*p++); | 451 | args->csa_highestslotid = ntohl(*p++); |
448 | args->csa_cachethis = ntohl(*p++); | 452 | args->csa_cachethis = ntohl(*p++); |
449 | args->csa_nrclists = ntohl(*p++); | 453 | args->csa_nrclists = ntohl(*p++); |
450 | args->csa_rclists = NULL; | 454 | args->csa_rclists = NULL; |
451 | if (args->csa_nrclists) { | 455 | if (args->csa_nrclists) { |
452 | args->csa_rclists = kmalloc(args->csa_nrclists * | 456 | args->csa_rclists = kmalloc(args->csa_nrclists * |
453 | sizeof(*args->csa_rclists), | 457 | sizeof(*args->csa_rclists), |
454 | GFP_KERNEL); | 458 | GFP_KERNEL); |
455 | if (unlikely(args->csa_rclists == NULL)) | 459 | if (unlikely(args->csa_rclists == NULL)) |
456 | goto out; | 460 | goto out; |
457 | 461 | ||
458 | for (i = 0; i < args->csa_nrclists; i++) { | 462 | for (i = 0; i < args->csa_nrclists; i++) { |
459 | status = decode_rc_list(xdr, &args->csa_rclists[i]); | 463 | status = decode_rc_list(xdr, &args->csa_rclists[i]); |
460 | if (status) | 464 | if (status) |
461 | goto out_free; | 465 | goto out_free; |
462 | } | 466 | } |
463 | } | 467 | } |
464 | status = 0; | 468 | status = 0; |
465 | 469 | ||
466 | dprintk("%s: sessionid %x:%x:%x:%x sequenceid %u slotid %u " | 470 | dprintk("%s: sessionid %x:%x:%x:%x sequenceid %u slotid %u " |
467 | "highestslotid %u cachethis %d nrclists %u\n", | 471 | "highestslotid %u cachethis %d nrclists %u\n", |
468 | __func__, | 472 | __func__, |
469 | ((u32 *)&args->csa_sessionid)[0], | 473 | ((u32 *)&args->csa_sessionid)[0], |
470 | ((u32 *)&args->csa_sessionid)[1], | 474 | ((u32 *)&args->csa_sessionid)[1], |
471 | ((u32 *)&args->csa_sessionid)[2], | 475 | ((u32 *)&args->csa_sessionid)[2], |
472 | ((u32 *)&args->csa_sessionid)[3], | 476 | ((u32 *)&args->csa_sessionid)[3], |
473 | args->csa_sequenceid, args->csa_slotid, | 477 | args->csa_sequenceid, args->csa_slotid, |
474 | args->csa_highestslotid, args->csa_cachethis, | 478 | args->csa_highestslotid, args->csa_cachethis, |
475 | args->csa_nrclists); | 479 | args->csa_nrclists); |
476 | out: | 480 | out: |
477 | dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); | 481 | dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); |
478 | return status; | 482 | return status; |
479 | 483 | ||
480 | out_free: | 484 | out_free: |
481 | for (i = 0; i < args->csa_nrclists; i++) | 485 | for (i = 0; i < args->csa_nrclists; i++) |
482 | kfree(args->csa_rclists[i].rcl_refcalls); | 486 | kfree(args->csa_rclists[i].rcl_refcalls); |
483 | kfree(args->csa_rclists); | 487 | kfree(args->csa_rclists); |
484 | goto out; | 488 | goto out; |
485 | } | 489 | } |
486 | 490 | ||
487 | static __be32 decode_recallany_args(struct svc_rqst *rqstp, | 491 | static __be32 decode_recallany_args(struct svc_rqst *rqstp, |
488 | struct xdr_stream *xdr, | 492 | struct xdr_stream *xdr, |
489 | struct cb_recallanyargs *args) | 493 | struct cb_recallanyargs *args) |
490 | { | 494 | { |
491 | uint32_t bitmap[2]; | 495 | uint32_t bitmap[2]; |
492 | __be32 *p, status; | 496 | __be32 *p, status; |
493 | 497 | ||
494 | args->craa_addr = svc_addr(rqstp); | 498 | args->craa_addr = svc_addr(rqstp); |
495 | p = read_buf(xdr, 4); | 499 | p = read_buf(xdr, 4); |
496 | if (unlikely(p == NULL)) | 500 | if (unlikely(p == NULL)) |
497 | return htonl(NFS4ERR_BADXDR); | 501 | return htonl(NFS4ERR_BADXDR); |
498 | args->craa_objs_to_keep = ntohl(*p++); | 502 | args->craa_objs_to_keep = ntohl(*p++); |
499 | status = decode_bitmap(xdr, bitmap); | 503 | status = decode_bitmap(xdr, bitmap); |
500 | if (unlikely(status)) | 504 | if (unlikely(status)) |
501 | return status; | 505 | return status; |
502 | args->craa_type_mask = bitmap[0]; | 506 | args->craa_type_mask = bitmap[0]; |
503 | 507 | ||
504 | return 0; | 508 | return 0; |
505 | } | 509 | } |
506 | 510 | ||
507 | static __be32 decode_recallslot_args(struct svc_rqst *rqstp, | 511 | static __be32 decode_recallslot_args(struct svc_rqst *rqstp, |
508 | struct xdr_stream *xdr, | 512 | struct xdr_stream *xdr, |
509 | struct cb_recallslotargs *args) | 513 | struct cb_recallslotargs *args) |
510 | { | 514 | { |
511 | __be32 *p; | 515 | __be32 *p; |
512 | 516 | ||
513 | args->crsa_addr = svc_addr(rqstp); | 517 | args->crsa_addr = svc_addr(rqstp); |
514 | p = read_buf(xdr, 4); | 518 | p = read_buf(xdr, 4); |
515 | if (unlikely(p == NULL)) | 519 | if (unlikely(p == NULL)) |
516 | return htonl(NFS4ERR_BADXDR); | 520 | return htonl(NFS4ERR_BADXDR); |
517 | args->crsa_target_max_slots = ntohl(*p++); | 521 | args->crsa_target_max_slots = ntohl(*p++); |
518 | return 0; | 522 | return 0; |
519 | } | 523 | } |
520 | 524 | ||
521 | #endif /* CONFIG_NFS_V4_1 */ | 525 | #endif /* CONFIG_NFS_V4_1 */ |
522 | 526 | ||
523 | static __be32 encode_string(struct xdr_stream *xdr, unsigned int len, const char *str) | 527 | static __be32 encode_string(struct xdr_stream *xdr, unsigned int len, const char *str) |
524 | { | 528 | { |
525 | __be32 *p; | 529 | __be32 *p; |
526 | 530 | ||
527 | p = xdr_reserve_space(xdr, 4 + len); | 531 | p = xdr_reserve_space(xdr, 4 + len); |
528 | if (unlikely(p == NULL)) | 532 | if (unlikely(p == NULL)) |
529 | return htonl(NFS4ERR_RESOURCE); | 533 | return htonl(NFS4ERR_RESOURCE); |
530 | xdr_encode_opaque(p, str, len); | 534 | xdr_encode_opaque(p, str, len); |
531 | return 0; | 535 | return 0; |
532 | } | 536 | } |
533 | 537 | ||
534 | #define CB_SUPPORTED_ATTR0 (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE) | 538 | #define CB_SUPPORTED_ATTR0 (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE) |
535 | #define CB_SUPPORTED_ATTR1 (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY) | 539 | #define CB_SUPPORTED_ATTR1 (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY) |
536 | static __be32 encode_attr_bitmap(struct xdr_stream *xdr, const uint32_t *bitmap, __be32 **savep) | 540 | static __be32 encode_attr_bitmap(struct xdr_stream *xdr, const uint32_t *bitmap, __be32 **savep) |
537 | { | 541 | { |
538 | __be32 bm[2]; | 542 | __be32 bm[2]; |
539 | __be32 *p; | 543 | __be32 *p; |
540 | 544 | ||
541 | bm[0] = htonl(bitmap[0] & CB_SUPPORTED_ATTR0); | 545 | bm[0] = htonl(bitmap[0] & CB_SUPPORTED_ATTR0); |
542 | bm[1] = htonl(bitmap[1] & CB_SUPPORTED_ATTR1); | 546 | bm[1] = htonl(bitmap[1] & CB_SUPPORTED_ATTR1); |
543 | if (bm[1] != 0) { | 547 | if (bm[1] != 0) { |
544 | p = xdr_reserve_space(xdr, 16); | 548 | p = xdr_reserve_space(xdr, 16); |
545 | if (unlikely(p == NULL)) | 549 | if (unlikely(p == NULL)) |
546 | return htonl(NFS4ERR_RESOURCE); | 550 | return htonl(NFS4ERR_RESOURCE); |
547 | *p++ = htonl(2); | 551 | *p++ = htonl(2); |
548 | *p++ = bm[0]; | 552 | *p++ = bm[0]; |
549 | *p++ = bm[1]; | 553 | *p++ = bm[1]; |
550 | } else if (bm[0] != 0) { | 554 | } else if (bm[0] != 0) { |
551 | p = xdr_reserve_space(xdr, 12); | 555 | p = xdr_reserve_space(xdr, 12); |
552 | if (unlikely(p == NULL)) | 556 | if (unlikely(p == NULL)) |
553 | return htonl(NFS4ERR_RESOURCE); | 557 | return htonl(NFS4ERR_RESOURCE); |
554 | *p++ = htonl(1); | 558 | *p++ = htonl(1); |
555 | *p++ = bm[0]; | 559 | *p++ = bm[0]; |
556 | } else { | 560 | } else { |
557 | p = xdr_reserve_space(xdr, 8); | 561 | p = xdr_reserve_space(xdr, 8); |
558 | if (unlikely(p == NULL)) | 562 | if (unlikely(p == NULL)) |
559 | return htonl(NFS4ERR_RESOURCE); | 563 | return htonl(NFS4ERR_RESOURCE); |
560 | *p++ = htonl(0); | 564 | *p++ = htonl(0); |
561 | } | 565 | } |
562 | *savep = p; | 566 | *savep = p; |
563 | return 0; | 567 | return 0; |
564 | } | 568 | } |
565 | 569 | ||
566 | static __be32 encode_attr_change(struct xdr_stream *xdr, const uint32_t *bitmap, uint64_t change) | 570 | static __be32 encode_attr_change(struct xdr_stream *xdr, const uint32_t *bitmap, uint64_t change) |
567 | { | 571 | { |
568 | __be32 *p; | 572 | __be32 *p; |
569 | 573 | ||
570 | if (!(bitmap[0] & FATTR4_WORD0_CHANGE)) | 574 | if (!(bitmap[0] & FATTR4_WORD0_CHANGE)) |
571 | return 0; | 575 | return 0; |
572 | p = xdr_reserve_space(xdr, 8); | 576 | p = xdr_reserve_space(xdr, 8); |
573 | if (unlikely(!p)) | 577 | if (unlikely(!p)) |
574 | return htonl(NFS4ERR_RESOURCE); | 578 | return htonl(NFS4ERR_RESOURCE); |
575 | p = xdr_encode_hyper(p, change); | 579 | p = xdr_encode_hyper(p, change); |
576 | return 0; | 580 | return 0; |
577 | } | 581 | } |
578 | 582 | ||
579 | static __be32 encode_attr_size(struct xdr_stream *xdr, const uint32_t *bitmap, uint64_t size) | 583 | static __be32 encode_attr_size(struct xdr_stream *xdr, const uint32_t *bitmap, uint64_t size) |
580 | { | 584 | { |
581 | __be32 *p; | 585 | __be32 *p; |
582 | 586 | ||
583 | if (!(bitmap[0] & FATTR4_WORD0_SIZE)) | 587 | if (!(bitmap[0] & FATTR4_WORD0_SIZE)) |
584 | return 0; | 588 | return 0; |
585 | p = xdr_reserve_space(xdr, 8); | 589 | p = xdr_reserve_space(xdr, 8); |
586 | if (unlikely(!p)) | 590 | if (unlikely(!p)) |
587 | return htonl(NFS4ERR_RESOURCE); | 591 | return htonl(NFS4ERR_RESOURCE); |
588 | p = xdr_encode_hyper(p, size); | 592 | p = xdr_encode_hyper(p, size); |
589 | return 0; | 593 | return 0; |
590 | } | 594 | } |
591 | 595 | ||
592 | static __be32 encode_attr_time(struct xdr_stream *xdr, const struct timespec *time) | 596 | static __be32 encode_attr_time(struct xdr_stream *xdr, const struct timespec *time) |
593 | { | 597 | { |
594 | __be32 *p; | 598 | __be32 *p; |
595 | 599 | ||
596 | p = xdr_reserve_space(xdr, 12); | 600 | p = xdr_reserve_space(xdr, 12); |
597 | if (unlikely(!p)) | 601 | if (unlikely(!p)) |
598 | return htonl(NFS4ERR_RESOURCE); | 602 | return htonl(NFS4ERR_RESOURCE); |
599 | p = xdr_encode_hyper(p, time->tv_sec); | 603 | p = xdr_encode_hyper(p, time->tv_sec); |
600 | *p = htonl(time->tv_nsec); | 604 | *p = htonl(time->tv_nsec); |
601 | return 0; | 605 | return 0; |
602 | } | 606 | } |
603 | 607 | ||
604 | static __be32 encode_attr_ctime(struct xdr_stream *xdr, const uint32_t *bitmap, const struct timespec *time) | 608 | static __be32 encode_attr_ctime(struct xdr_stream *xdr, const uint32_t *bitmap, const struct timespec *time) |
605 | { | 609 | { |
606 | if (!(bitmap[1] & FATTR4_WORD1_TIME_METADATA)) | 610 | if (!(bitmap[1] & FATTR4_WORD1_TIME_METADATA)) |
607 | return 0; | 611 | return 0; |
608 | return encode_attr_time(xdr,time); | 612 | return encode_attr_time(xdr,time); |
609 | } | 613 | } |
610 | 614 | ||
611 | static __be32 encode_attr_mtime(struct xdr_stream *xdr, const uint32_t *bitmap, const struct timespec *time) | 615 | static __be32 encode_attr_mtime(struct xdr_stream *xdr, const uint32_t *bitmap, const struct timespec *time) |
612 | { | 616 | { |
613 | if (!(bitmap[1] & FATTR4_WORD1_TIME_MODIFY)) | 617 | if (!(bitmap[1] & FATTR4_WORD1_TIME_MODIFY)) |
614 | return 0; | 618 | return 0; |
615 | return encode_attr_time(xdr,time); | 619 | return encode_attr_time(xdr,time); |
616 | } | 620 | } |
617 | 621 | ||
618 | static __be32 encode_compound_hdr_res(struct xdr_stream *xdr, struct cb_compound_hdr_res *hdr) | 622 | static __be32 encode_compound_hdr_res(struct xdr_stream *xdr, struct cb_compound_hdr_res *hdr) |
619 | { | 623 | { |
620 | __be32 status; | 624 | __be32 status; |
621 | 625 | ||
622 | hdr->status = xdr_reserve_space(xdr, 4); | 626 | hdr->status = xdr_reserve_space(xdr, 4); |
623 | if (unlikely(hdr->status == NULL)) | 627 | if (unlikely(hdr->status == NULL)) |
624 | return htonl(NFS4ERR_RESOURCE); | 628 | return htonl(NFS4ERR_RESOURCE); |
625 | status = encode_string(xdr, hdr->taglen, hdr->tag); | 629 | status = encode_string(xdr, hdr->taglen, hdr->tag); |
626 | if (unlikely(status != 0)) | 630 | if (unlikely(status != 0)) |
627 | return status; | 631 | return status; |
628 | hdr->nops = xdr_reserve_space(xdr, 4); | 632 | hdr->nops = xdr_reserve_space(xdr, 4); |
629 | if (unlikely(hdr->nops == NULL)) | 633 | if (unlikely(hdr->nops == NULL)) |
630 | return htonl(NFS4ERR_RESOURCE); | 634 | return htonl(NFS4ERR_RESOURCE); |
631 | return 0; | 635 | return 0; |
632 | } | 636 | } |
633 | 637 | ||
634 | static __be32 encode_op_hdr(struct xdr_stream *xdr, uint32_t op, __be32 res) | 638 | static __be32 encode_op_hdr(struct xdr_stream *xdr, uint32_t op, __be32 res) |
635 | { | 639 | { |
636 | __be32 *p; | 640 | __be32 *p; |
637 | 641 | ||
638 | p = xdr_reserve_space(xdr, 8); | 642 | p = xdr_reserve_space(xdr, 8); |
639 | if (unlikely(p == NULL)) | 643 | if (unlikely(p == NULL)) |
640 | return htonl(NFS4ERR_RESOURCE_HDR); | 644 | return htonl(NFS4ERR_RESOURCE_HDR); |
641 | *p++ = htonl(op); | 645 | *p++ = htonl(op); |
642 | *p = res; | 646 | *p = res; |
643 | return 0; | 647 | return 0; |
644 | } | 648 | } |
645 | 649 | ||
646 | static __be32 encode_getattr_res(struct svc_rqst *rqstp, struct xdr_stream *xdr, const struct cb_getattrres *res) | 650 | static __be32 encode_getattr_res(struct svc_rqst *rqstp, struct xdr_stream *xdr, const struct cb_getattrres *res) |
647 | { | 651 | { |
648 | __be32 *savep = NULL; | 652 | __be32 *savep = NULL; |
649 | __be32 status = res->status; | 653 | __be32 status = res->status; |
650 | 654 | ||
651 | if (unlikely(status != 0)) | 655 | if (unlikely(status != 0)) |
652 | goto out; | 656 | goto out; |
653 | status = encode_attr_bitmap(xdr, res->bitmap, &savep); | 657 | status = encode_attr_bitmap(xdr, res->bitmap, &savep); |
654 | if (unlikely(status != 0)) | 658 | if (unlikely(status != 0)) |
655 | goto out; | 659 | goto out; |
656 | status = encode_attr_change(xdr, res->bitmap, res->change_attr); | 660 | status = encode_attr_change(xdr, res->bitmap, res->change_attr); |
657 | if (unlikely(status != 0)) | 661 | if (unlikely(status != 0)) |
658 | goto out; | 662 | goto out; |
659 | status = encode_attr_size(xdr, res->bitmap, res->size); | 663 | status = encode_attr_size(xdr, res->bitmap, res->size); |
660 | if (unlikely(status != 0)) | 664 | if (unlikely(status != 0)) |
661 | goto out; | 665 | goto out; |
662 | status = encode_attr_ctime(xdr, res->bitmap, &res->ctime); | 666 | status = encode_attr_ctime(xdr, res->bitmap, &res->ctime); |
663 | if (unlikely(status != 0)) | 667 | if (unlikely(status != 0)) |
664 | goto out; | 668 | goto out; |
665 | status = encode_attr_mtime(xdr, res->bitmap, &res->mtime); | 669 | status = encode_attr_mtime(xdr, res->bitmap, &res->mtime); |
666 | *savep = htonl((unsigned int)((char *)xdr->p - (char *)(savep+1))); | 670 | *savep = htonl((unsigned int)((char *)xdr->p - (char *)(savep+1))); |
667 | out: | 671 | out: |
668 | dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); | 672 | dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); |
669 | return status; | 673 | return status; |
670 | } | 674 | } |
671 | 675 | ||
672 | #if defined(CONFIG_NFS_V4_1) | 676 | #if defined(CONFIG_NFS_V4_1) |
673 | 677 | ||
674 | static __be32 encode_sessionid(struct xdr_stream *xdr, | 678 | static __be32 encode_sessionid(struct xdr_stream *xdr, |
675 | const struct nfs4_sessionid *sid) | 679 | const struct nfs4_sessionid *sid) |
676 | { | 680 | { |
677 | __be32 *p; | 681 | __be32 *p; |
678 | int len = NFS4_MAX_SESSIONID_LEN; | 682 | int len = NFS4_MAX_SESSIONID_LEN; |
679 | 683 | ||
680 | p = xdr_reserve_space(xdr, len); | 684 | p = xdr_reserve_space(xdr, len); |
681 | if (unlikely(p == NULL)) | 685 | if (unlikely(p == NULL)) |
682 | return htonl(NFS4ERR_RESOURCE); | 686 | return htonl(NFS4ERR_RESOURCE); |
683 | 687 | ||
684 | memcpy(p, sid, len); | 688 | memcpy(p, sid, len); |
685 | return 0; | 689 | return 0; |
686 | } | 690 | } |
687 | 691 | ||
688 | static __be32 encode_cb_sequence_res(struct svc_rqst *rqstp, | 692 | static __be32 encode_cb_sequence_res(struct svc_rqst *rqstp, |
689 | struct xdr_stream *xdr, | 693 | struct xdr_stream *xdr, |
690 | const struct cb_sequenceres *res) | 694 | const struct cb_sequenceres *res) |
691 | { | 695 | { |
692 | __be32 *p; | 696 | __be32 *p; |
693 | unsigned status = res->csr_status; | 697 | unsigned status = res->csr_status; |
694 | 698 | ||
695 | if (unlikely(status != 0)) | 699 | if (unlikely(status != 0)) |
696 | goto out; | 700 | goto out; |
697 | 701 | ||
698 | encode_sessionid(xdr, &res->csr_sessionid); | 702 | encode_sessionid(xdr, &res->csr_sessionid); |
699 | 703 | ||
700 | p = xdr_reserve_space(xdr, 4 * sizeof(uint32_t)); | 704 | p = xdr_reserve_space(xdr, 4 * sizeof(uint32_t)); |
701 | if (unlikely(p == NULL)) | 705 | if (unlikely(p == NULL)) |
702 | return htonl(NFS4ERR_RESOURCE); | 706 | return htonl(NFS4ERR_RESOURCE); |
703 | 707 | ||
704 | *p++ = htonl(res->csr_sequenceid); | 708 | *p++ = htonl(res->csr_sequenceid); |
705 | *p++ = htonl(res->csr_slotid); | 709 | *p++ = htonl(res->csr_slotid); |
706 | *p++ = htonl(res->csr_highestslotid); | 710 | *p++ = htonl(res->csr_highestslotid); |
707 | *p++ = htonl(res->csr_target_highestslotid); | 711 | *p++ = htonl(res->csr_target_highestslotid); |
708 | out: | 712 | out: |
709 | dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); | 713 | dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); |
710 | return status; | 714 | return status; |
711 | } | 715 | } |
712 | 716 | ||
713 | static __be32 | 717 | static __be32 |
714 | preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op) | 718 | preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op) |
715 | { | 719 | { |
716 | if (op_nr == OP_CB_SEQUENCE) { | 720 | if (op_nr == OP_CB_SEQUENCE) { |
717 | if (nop != 0) | 721 | if (nop != 0) |
718 | return htonl(NFS4ERR_SEQUENCE_POS); | 722 | return htonl(NFS4ERR_SEQUENCE_POS); |
719 | } else { | 723 | } else { |
720 | if (nop == 0) | 724 | if (nop == 0) |
721 | return htonl(NFS4ERR_OP_NOT_IN_SESSION); | 725 | return htonl(NFS4ERR_OP_NOT_IN_SESSION); |
722 | } | 726 | } |
723 | 727 | ||
724 | switch (op_nr) { | 728 | switch (op_nr) { |
725 | case OP_CB_GETATTR: | 729 | case OP_CB_GETATTR: |
726 | case OP_CB_RECALL: | 730 | case OP_CB_RECALL: |
727 | case OP_CB_SEQUENCE: | 731 | case OP_CB_SEQUENCE: |
728 | case OP_CB_RECALL_ANY: | 732 | case OP_CB_RECALL_ANY: |
729 | case OP_CB_RECALL_SLOT: | 733 | case OP_CB_RECALL_SLOT: |
730 | case OP_CB_LAYOUTRECALL: | 734 | case OP_CB_LAYOUTRECALL: |
731 | case OP_CB_NOTIFY_DEVICEID: | 735 | case OP_CB_NOTIFY_DEVICEID: |
732 | *op = &callback_ops[op_nr]; | 736 | *op = &callback_ops[op_nr]; |
733 | break; | 737 | break; |
734 | 738 | ||
735 | case OP_CB_NOTIFY: | 739 | case OP_CB_NOTIFY: |
736 | case OP_CB_PUSH_DELEG: | 740 | case OP_CB_PUSH_DELEG: |
737 | case OP_CB_RECALLABLE_OBJ_AVAIL: | 741 | case OP_CB_RECALLABLE_OBJ_AVAIL: |
738 | case OP_CB_WANTS_CANCELLED: | 742 | case OP_CB_WANTS_CANCELLED: |
739 | case OP_CB_NOTIFY_LOCK: | 743 | case OP_CB_NOTIFY_LOCK: |
740 | return htonl(NFS4ERR_NOTSUPP); | 744 | return htonl(NFS4ERR_NOTSUPP); |
741 | 745 | ||
742 | default: | 746 | default: |
743 | return htonl(NFS4ERR_OP_ILLEGAL); | 747 | return htonl(NFS4ERR_OP_ILLEGAL); |
744 | } | 748 | } |
745 | 749 | ||
746 | return htonl(NFS_OK); | 750 | return htonl(NFS_OK); |
747 | } | 751 | } |
748 | 752 | ||
749 | static void nfs4_callback_free_slot(struct nfs4_session *session) | 753 | static void nfs4_callback_free_slot(struct nfs4_session *session) |
750 | { | 754 | { |
751 | struct nfs4_slot_table *tbl = &session->bc_slot_table; | 755 | struct nfs4_slot_table *tbl = &session->bc_slot_table; |
752 | 756 | ||
753 | spin_lock(&tbl->slot_tbl_lock); | 757 | spin_lock(&tbl->slot_tbl_lock); |
754 | /* | 758 | /* |
755 | * Let the state manager know callback processing done. | 759 | * Let the state manager know callback processing done. |
756 | * A single slot, so highest used slotid is either 0 or -1 | 760 | * A single slot, so highest used slotid is either 0 or -1 |
757 | */ | 761 | */ |
758 | tbl->highest_used_slotid = -1; | 762 | tbl->highest_used_slotid = -1; |
759 | nfs4_check_drain_bc_complete(session); | 763 | nfs4_check_drain_bc_complete(session); |
760 | spin_unlock(&tbl->slot_tbl_lock); | 764 | spin_unlock(&tbl->slot_tbl_lock); |
761 | } | 765 | } |
762 | 766 | ||
763 | static void nfs4_cb_free_slot(struct cb_process_state *cps) | 767 | static void nfs4_cb_free_slot(struct cb_process_state *cps) |
764 | { | 768 | { |
765 | if (cps->slotid != -1) | 769 | if (cps->slotid != -1) |
766 | nfs4_callback_free_slot(cps->clp->cl_session); | 770 | nfs4_callback_free_slot(cps->clp->cl_session); |
767 | } | 771 | } |
768 | 772 | ||
769 | #else /* CONFIG_NFS_V4_1 */ | 773 | #else /* CONFIG_NFS_V4_1 */ |
770 | 774 | ||
771 | static __be32 | 775 | static __be32 |
772 | preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op) | 776 | preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op) |
773 | { | 777 | { |
774 | return htonl(NFS4ERR_MINOR_VERS_MISMATCH); | 778 | return htonl(NFS4ERR_MINOR_VERS_MISMATCH); |
775 | } | 779 | } |
776 | 780 | ||
777 | static void nfs4_cb_free_slot(struct cb_process_state *cps) | 781 | static void nfs4_cb_free_slot(struct cb_process_state *cps) |
778 | { | 782 | { |
779 | } | 783 | } |
780 | #endif /* CONFIG_NFS_V4_1 */ | 784 | #endif /* CONFIG_NFS_V4_1 */ |
781 | 785 | ||
782 | static __be32 | 786 | static __be32 |
783 | preprocess_nfs4_op(unsigned int op_nr, struct callback_op **op) | 787 | preprocess_nfs4_op(unsigned int op_nr, struct callback_op **op) |
784 | { | 788 | { |
785 | switch (op_nr) { | 789 | switch (op_nr) { |
786 | case OP_CB_GETATTR: | 790 | case OP_CB_GETATTR: |
787 | case OP_CB_RECALL: | 791 | case OP_CB_RECALL: |
788 | *op = &callback_ops[op_nr]; | 792 | *op = &callback_ops[op_nr]; |
789 | break; | 793 | break; |
790 | default: | 794 | default: |
791 | return htonl(NFS4ERR_OP_ILLEGAL); | 795 | return htonl(NFS4ERR_OP_ILLEGAL); |
792 | } | 796 | } |
793 | 797 | ||
794 | return htonl(NFS_OK); | 798 | return htonl(NFS_OK); |
795 | } | 799 | } |
796 | 800 | ||
797 | static __be32 process_op(uint32_t minorversion, int nop, | 801 | static __be32 process_op(uint32_t minorversion, int nop, |
798 | struct svc_rqst *rqstp, | 802 | struct svc_rqst *rqstp, |
799 | struct xdr_stream *xdr_in, void *argp, | 803 | struct xdr_stream *xdr_in, void *argp, |
800 | struct xdr_stream *xdr_out, void *resp, | 804 | struct xdr_stream *xdr_out, void *resp, |
801 | struct cb_process_state *cps) | 805 | struct cb_process_state *cps) |
802 | { | 806 | { |
803 | struct callback_op *op = &callback_ops[0]; | 807 | struct callback_op *op = &callback_ops[0]; |
804 | unsigned int op_nr; | 808 | unsigned int op_nr; |
805 | __be32 status; | 809 | __be32 status; |
806 | long maxlen; | 810 | long maxlen; |
807 | __be32 res; | 811 | __be32 res; |
808 | 812 | ||
809 | dprintk("%s: start\n", __func__); | 813 | dprintk("%s: start\n", __func__); |
810 | status = decode_op_hdr(xdr_in, &op_nr); | 814 | status = decode_op_hdr(xdr_in, &op_nr); |
811 | if (unlikely(status)) | 815 | if (unlikely(status)) |
812 | return status; | 816 | return status; |
813 | 817 | ||
814 | dprintk("%s: minorversion=%d nop=%d op_nr=%u\n", | 818 | dprintk("%s: minorversion=%d nop=%d op_nr=%u\n", |
815 | __func__, minorversion, nop, op_nr); | 819 | __func__, minorversion, nop, op_nr); |
816 | 820 | ||
817 | status = minorversion ? preprocess_nfs41_op(nop, op_nr, &op) : | 821 | status = minorversion ? preprocess_nfs41_op(nop, op_nr, &op) : |
818 | preprocess_nfs4_op(op_nr, &op); | 822 | preprocess_nfs4_op(op_nr, &op); |
819 | if (status == htonl(NFS4ERR_OP_ILLEGAL)) | 823 | if (status == htonl(NFS4ERR_OP_ILLEGAL)) |
820 | op_nr = OP_CB_ILLEGAL; | 824 | op_nr = OP_CB_ILLEGAL; |
821 | if (status) | 825 | if (status) |
822 | goto encode_hdr; | 826 | goto encode_hdr; |
823 | 827 | ||
824 | if (cps->drc_status) { | 828 | if (cps->drc_status) { |
825 | status = cps->drc_status; | 829 | status = cps->drc_status; |
826 | goto encode_hdr; | 830 | goto encode_hdr; |
827 | } | 831 | } |
828 | 832 | ||
829 | maxlen = xdr_out->end - xdr_out->p; | 833 | maxlen = xdr_out->end - xdr_out->p; |
830 | if (maxlen > 0 && maxlen < PAGE_SIZE) { | 834 | if (maxlen > 0 && maxlen < PAGE_SIZE) { |
831 | status = op->decode_args(rqstp, xdr_in, argp); | 835 | status = op->decode_args(rqstp, xdr_in, argp); |
832 | if (likely(status == 0)) | 836 | if (likely(status == 0)) |
833 | status = op->process_op(argp, resp, cps); | 837 | status = op->process_op(argp, resp, cps); |
834 | } else | 838 | } else |
835 | status = htonl(NFS4ERR_RESOURCE); | 839 | status = htonl(NFS4ERR_RESOURCE); |
836 | 840 | ||
837 | encode_hdr: | 841 | encode_hdr: |
838 | res = encode_op_hdr(xdr_out, op_nr, status); | 842 | res = encode_op_hdr(xdr_out, op_nr, status); |
839 | if (unlikely(res)) | 843 | if (unlikely(res)) |
840 | return res; | 844 | return res; |
841 | if (op->encode_res != NULL && status == 0) | 845 | if (op->encode_res != NULL && status == 0) |
842 | status = op->encode_res(rqstp, xdr_out, resp); | 846 | status = op->encode_res(rqstp, xdr_out, resp); |
843 | dprintk("%s: done, status = %d\n", __func__, ntohl(status)); | 847 | dprintk("%s: done, status = %d\n", __func__, ntohl(status)); |
844 | return status; | 848 | return status; |
845 | } | 849 | } |
846 | 850 | ||
847 | /* | 851 | /* |
848 | * Decode, process and encode a COMPOUND | 852 | * Decode, process and encode a COMPOUND |
849 | */ | 853 | */ |
850 | static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *resp) | 854 | static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *resp) |
851 | { | 855 | { |
852 | struct cb_compound_hdr_arg hdr_arg = { 0 }; | 856 | struct cb_compound_hdr_arg hdr_arg = { 0 }; |
853 | struct cb_compound_hdr_res hdr_res = { NULL }; | 857 | struct cb_compound_hdr_res hdr_res = { NULL }; |
854 | struct xdr_stream xdr_in, xdr_out; | 858 | struct xdr_stream xdr_in, xdr_out; |
855 | __be32 *p, status; | 859 | __be32 *p, status; |
856 | struct cb_process_state cps = { | 860 | struct cb_process_state cps = { |
857 | .drc_status = 0, | 861 | .drc_status = 0, |
858 | .clp = NULL, | 862 | .clp = NULL, |
859 | .slotid = -1, | 863 | .slotid = -1, |
860 | }; | 864 | }; |
861 | unsigned int nops = 0; | 865 | unsigned int nops = 0; |
862 | 866 | ||
863 | dprintk("%s: start\n", __func__); | 867 | dprintk("%s: start\n", __func__); |
864 | 868 | ||
865 | xdr_init_decode(&xdr_in, &rqstp->rq_arg, rqstp->rq_arg.head[0].iov_base); | 869 | xdr_init_decode(&xdr_in, &rqstp->rq_arg, rqstp->rq_arg.head[0].iov_base); |
866 | 870 | ||
867 | p = (__be32*)((char *)rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len); | 871 | p = (__be32*)((char *)rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len); |
868 | xdr_init_encode(&xdr_out, &rqstp->rq_res, p); | 872 | xdr_init_encode(&xdr_out, &rqstp->rq_res, p); |
869 | 873 | ||
870 | status = decode_compound_hdr_arg(&xdr_in, &hdr_arg); | 874 | status = decode_compound_hdr_arg(&xdr_in, &hdr_arg); |
871 | if (status == __constant_htonl(NFS4ERR_RESOURCE)) | 875 | if (status == __constant_htonl(NFS4ERR_RESOURCE)) |
872 | return rpc_garbage_args; | 876 | return rpc_garbage_args; |
873 | 877 | ||
874 | if (hdr_arg.minorversion == 0) { | 878 | if (hdr_arg.minorversion == 0) { |
875 | cps.clp = nfs4_find_client_ident(hdr_arg.cb_ident); | 879 | cps.clp = nfs4_find_client_ident(hdr_arg.cb_ident); |
876 | if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp)) | 880 | if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp)) |
877 | return rpc_drop_reply; | 881 | return rpc_drop_reply; |
878 | } | 882 | } |
879 | 883 | ||
880 | hdr_res.taglen = hdr_arg.taglen; | 884 | hdr_res.taglen = hdr_arg.taglen; |
881 | hdr_res.tag = hdr_arg.tag; | 885 | hdr_res.tag = hdr_arg.tag; |
882 | if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0) | 886 | if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0) |
883 | return rpc_system_err; | 887 | return rpc_system_err; |
884 | 888 | ||
885 | while (status == 0 && nops != hdr_arg.nops) { | 889 | while (status == 0 && nops != hdr_arg.nops) { |
886 | status = process_op(hdr_arg.minorversion, nops, rqstp, | 890 | status = process_op(hdr_arg.minorversion, nops, rqstp, |
887 | &xdr_in, argp, &xdr_out, resp, &cps); | 891 | &xdr_in, argp, &xdr_out, resp, &cps); |
888 | nops++; | 892 | nops++; |
889 | } | 893 | } |
890 | 894 | ||
891 | /* Buffer overflow in decode_ops_hdr or encode_ops_hdr. Return | 895 | /* Buffer overflow in decode_ops_hdr or encode_ops_hdr. Return |
892 | * resource error in cb_compound status without returning op */ | 896 | * resource error in cb_compound status without returning op */ |
893 | if (unlikely(status == htonl(NFS4ERR_RESOURCE_HDR))) { | 897 | if (unlikely(status == htonl(NFS4ERR_RESOURCE_HDR))) { |
894 | status = htonl(NFS4ERR_RESOURCE); | 898 | status = htonl(NFS4ERR_RESOURCE); |
895 | nops--; | 899 | nops--; |
896 | } | 900 | } |
897 | 901 | ||
898 | *hdr_res.status = status; | 902 | *hdr_res.status = status; |
899 | *hdr_res.nops = htonl(nops); | 903 | *hdr_res.nops = htonl(nops); |
900 | nfs4_cb_free_slot(&cps); | 904 | nfs4_cb_free_slot(&cps); |
901 | nfs_put_client(cps.clp); | 905 | nfs_put_client(cps.clp); |
902 | dprintk("%s: done, status = %u\n", __func__, ntohl(status)); | 906 | dprintk("%s: done, status = %u\n", __func__, ntohl(status)); |
903 | return rpc_success; | 907 | return rpc_success; |
904 | } | 908 | } |
905 | 909 | ||
906 | /* | 910 | /* |
907 | * Define NFS4 callback COMPOUND ops. | 911 | * Define NFS4 callback COMPOUND ops. |
908 | */ | 912 | */ |
909 | static struct callback_op callback_ops[] = { | 913 | static struct callback_op callback_ops[] = { |
910 | [0] = { | 914 | [0] = { |
911 | .res_maxsize = CB_OP_HDR_RES_MAXSZ, | 915 | .res_maxsize = CB_OP_HDR_RES_MAXSZ, |
912 | }, | 916 | }, |
913 | [OP_CB_GETATTR] = { | 917 | [OP_CB_GETATTR] = { |
914 | .process_op = (callback_process_op_t)nfs4_callback_getattr, | 918 | .process_op = (callback_process_op_t)nfs4_callback_getattr, |
915 | .decode_args = (callback_decode_arg_t)decode_getattr_args, | 919 | .decode_args = (callback_decode_arg_t)decode_getattr_args, |
916 | .encode_res = (callback_encode_res_t)encode_getattr_res, | 920 | .encode_res = (callback_encode_res_t)encode_getattr_res, |
917 | .res_maxsize = CB_OP_GETATTR_RES_MAXSZ, | 921 | .res_maxsize = CB_OP_GETATTR_RES_MAXSZ, |
918 | }, | 922 | }, |
919 | [OP_CB_RECALL] = { | 923 | [OP_CB_RECALL] = { |
920 | .process_op = (callback_process_op_t)nfs4_callback_recall, | 924 | .process_op = (callback_process_op_t)nfs4_callback_recall, |
921 | .decode_args = (callback_decode_arg_t)decode_recall_args, | 925 | .decode_args = (callback_decode_arg_t)decode_recall_args, |
922 | .res_maxsize = CB_OP_RECALL_RES_MAXSZ, | 926 | .res_maxsize = CB_OP_RECALL_RES_MAXSZ, |
923 | }, | 927 | }, |
924 | #if defined(CONFIG_NFS_V4_1) | 928 | #if defined(CONFIG_NFS_V4_1) |
925 | [OP_CB_LAYOUTRECALL] = { | 929 | [OP_CB_LAYOUTRECALL] = { |
926 | .process_op = (callback_process_op_t)nfs4_callback_layoutrecall, | 930 | .process_op = (callback_process_op_t)nfs4_callback_layoutrecall, |
927 | .decode_args = | 931 | .decode_args = |
928 | (callback_decode_arg_t)decode_layoutrecall_args, | 932 | (callback_decode_arg_t)decode_layoutrecall_args, |
929 | .res_maxsize = CB_OP_LAYOUTRECALL_RES_MAXSZ, | 933 | .res_maxsize = CB_OP_LAYOUTRECALL_RES_MAXSZ, |
930 | }, | 934 | }, |
931 | [OP_CB_NOTIFY_DEVICEID] = { | 935 | [OP_CB_NOTIFY_DEVICEID] = { |
932 | .process_op = (callback_process_op_t)nfs4_callback_devicenotify, | 936 | .process_op = (callback_process_op_t)nfs4_callback_devicenotify, |
933 | .decode_args = | 937 | .decode_args = |
934 | (callback_decode_arg_t)decode_devicenotify_args, | 938 | (callback_decode_arg_t)decode_devicenotify_args, |
935 | .res_maxsize = CB_OP_DEVICENOTIFY_RES_MAXSZ, | 939 | .res_maxsize = CB_OP_DEVICENOTIFY_RES_MAXSZ, |
936 | }, | 940 | }, |
937 | [OP_CB_SEQUENCE] = { | 941 | [OP_CB_SEQUENCE] = { |
938 | .process_op = (callback_process_op_t)nfs4_callback_sequence, | 942 | .process_op = (callback_process_op_t)nfs4_callback_sequence, |
939 | .decode_args = (callback_decode_arg_t)decode_cb_sequence_args, | 943 | .decode_args = (callback_decode_arg_t)decode_cb_sequence_args, |
940 | .encode_res = (callback_encode_res_t)encode_cb_sequence_res, | 944 | .encode_res = (callback_encode_res_t)encode_cb_sequence_res, |
941 | .res_maxsize = CB_OP_SEQUENCE_RES_MAXSZ, | 945 | .res_maxsize = CB_OP_SEQUENCE_RES_MAXSZ, |
942 | }, | 946 | }, |
943 | [OP_CB_RECALL_ANY] = { | 947 | [OP_CB_RECALL_ANY] = { |
944 | .process_op = (callback_process_op_t)nfs4_callback_recallany, | 948 | .process_op = (callback_process_op_t)nfs4_callback_recallany, |
945 | .decode_args = (callback_decode_arg_t)decode_recallany_args, | 949 | .decode_args = (callback_decode_arg_t)decode_recallany_args, |
946 | .res_maxsize = CB_OP_RECALLANY_RES_MAXSZ, | 950 | .res_maxsize = CB_OP_RECALLANY_RES_MAXSZ, |
947 | }, | 951 | }, |
948 | [OP_CB_RECALL_SLOT] = { | 952 | [OP_CB_RECALL_SLOT] = { |
949 | .process_op = (callback_process_op_t)nfs4_callback_recallslot, | 953 | .process_op = (callback_process_op_t)nfs4_callback_recallslot, |
950 | .decode_args = (callback_decode_arg_t)decode_recallslot_args, | 954 | .decode_args = (callback_decode_arg_t)decode_recallslot_args, |
951 | .res_maxsize = CB_OP_RECALLSLOT_RES_MAXSZ, | 955 | .res_maxsize = CB_OP_RECALLSLOT_RES_MAXSZ, |
952 | }, | 956 | }, |
953 | #endif /* CONFIG_NFS_V4_1 */ | 957 | #endif /* CONFIG_NFS_V4_1 */ |
954 | }; | 958 | }; |
955 | 959 | ||
956 | /* | 960 | /* |
957 | * Define NFS4 callback procedures | 961 | * Define NFS4 callback procedures |
958 | */ | 962 | */ |
959 | static struct svc_procedure nfs4_callback_procedures1[] = { | 963 | static struct svc_procedure nfs4_callback_procedures1[] = { |
960 | [CB_NULL] = { | 964 | [CB_NULL] = { |
961 | .pc_func = nfs4_callback_null, | 965 | .pc_func = nfs4_callback_null, |
962 | .pc_decode = (kxdrproc_t)nfs4_decode_void, | 966 | .pc_decode = (kxdrproc_t)nfs4_decode_void, |
963 | .pc_encode = (kxdrproc_t)nfs4_encode_void, | 967 | .pc_encode = (kxdrproc_t)nfs4_encode_void, |
964 | .pc_xdrressize = 1, | 968 | .pc_xdrressize = 1, |
965 | }, | 969 | }, |
966 | [CB_COMPOUND] = { | 970 | [CB_COMPOUND] = { |
967 | .pc_func = nfs4_callback_compound, | 971 | .pc_func = nfs4_callback_compound, |
968 | .pc_encode = (kxdrproc_t)nfs4_encode_void, | 972 | .pc_encode = (kxdrproc_t)nfs4_encode_void, |
969 | .pc_argsize = 256, | 973 | .pc_argsize = 256, |
970 | .pc_ressize = 256, | 974 | .pc_ressize = 256, |
971 | .pc_xdrressize = NFS4_CALLBACK_BUFSIZE, | 975 | .pc_xdrressize = NFS4_CALLBACK_BUFSIZE, |
972 | } | 976 | } |
973 | }; | 977 | }; |
974 | 978 | ||
975 | struct svc_version nfs4_callback_version1 = { | 979 | struct svc_version nfs4_callback_version1 = { |
976 | .vs_vers = 1, | 980 | .vs_vers = 1, |
977 | .vs_nproc = ARRAY_SIZE(nfs4_callback_procedures1), | 981 | .vs_nproc = ARRAY_SIZE(nfs4_callback_procedures1), |
978 | .vs_proc = nfs4_callback_procedures1, | 982 | .vs_proc = nfs4_callback_procedures1, |
979 | .vs_xdrsize = NFS4_CALLBACK_XDRSIZE, | 983 | .vs_xdrsize = NFS4_CALLBACK_XDRSIZE, |
980 | .vs_dispatch = NULL, | 984 | .vs_dispatch = NULL, |
981 | .vs_hidden = 1, | 985 | .vs_hidden = 1, |
982 | }; | 986 | }; |
983 | 987 | ||
984 | struct svc_version nfs4_callback_version4 = { | 988 | struct svc_version nfs4_callback_version4 = { |
985 | .vs_vers = 4, | 989 | .vs_vers = 4, |
986 | .vs_nproc = ARRAY_SIZE(nfs4_callback_procedures1), | 990 | .vs_nproc = ARRAY_SIZE(nfs4_callback_procedures1), |
987 | .vs_proc = nfs4_callback_procedures1, | 991 | .vs_proc = nfs4_callback_procedures1, |
988 | .vs_xdrsize = NFS4_CALLBACK_XDRSIZE, | 992 | .vs_xdrsize = NFS4_CALLBACK_XDRSIZE, |
989 | .vs_dispatch = NULL, | 993 | .vs_dispatch = NULL, |
990 | .vs_hidden = 1, | 994 | .vs_hidden = 1, |
991 | }; | 995 | }; |
992 | 996 |
fs/nfs/nfs4filelayoutdev.c
1 | /* | 1 | /* |
2 | * Device operations for the pnfs nfs4 file layout driver. | 2 | * Device operations for the pnfs nfs4 file layout driver. |
3 | * | 3 | * |
4 | * Copyright (c) 2002 | 4 | * Copyright (c) 2002 |
5 | * The Regents of the University of Michigan | 5 | * The Regents of the University of Michigan |
6 | * All Rights Reserved | 6 | * All Rights Reserved |
7 | * | 7 | * |
8 | * Dean Hildebrand <dhildebz@umich.edu> | 8 | * Dean Hildebrand <dhildebz@umich.edu> |
9 | * Garth Goodson <Garth.Goodson@netapp.com> | 9 | * Garth Goodson <Garth.Goodson@netapp.com> |
10 | * | 10 | * |
11 | * Permission is granted to use, copy, create derivative works, and | 11 | * Permission is granted to use, copy, create derivative works, and |
12 | * redistribute this software and such derivative works for any purpose, | 12 | * redistribute this software and such derivative works for any purpose, |
13 | * so long as the name of the University of Michigan is not used in | 13 | * so long as the name of the University of Michigan is not used in |
14 | * any advertising or publicity pertaining to the use or distribution | 14 | * any advertising or publicity pertaining to the use or distribution |
15 | * of this software without specific, written prior authorization. If | 15 | * of this software without specific, written prior authorization. If |
16 | * the above copyright notice or any other identification of the | 16 | * the above copyright notice or any other identification of the |
17 | * University of Michigan is included in any copy of any portion of | 17 | * University of Michigan is included in any copy of any portion of |
18 | * this software, then the disclaimer below must also be included. | 18 | * this software, then the disclaimer below must also be included. |
19 | * | 19 | * |
20 | * This software is provided as is, without representation or warranty | 20 | * This software is provided as is, without representation or warranty |
21 | * of any kind either express or implied, including without limitation | 21 | * of any kind either express or implied, including without limitation |
22 | * the implied warranties of merchantability, fitness for a particular | 22 | * the implied warranties of merchantability, fitness for a particular |
23 | * purpose, or noninfringement. The Regents of the University of | 23 | * purpose, or noninfringement. The Regents of the University of |
24 | * Michigan shall not be liable for any damages, including special, | 24 | * Michigan shall not be liable for any damages, including special, |
25 | * indirect, incidental, or consequential damages, with respect to any | 25 | * indirect, incidental, or consequential damages, with respect to any |
26 | * claim arising out of or in connection with the use of the software, | 26 | * claim arising out of or in connection with the use of the software, |
27 | * even if it has been or is hereafter advised of the possibility of | 27 | * even if it has been or is hereafter advised of the possibility of |
28 | * such damages. | 28 | * such damages. |
29 | */ | 29 | */ |
30 | 30 | ||
31 | #include <linux/nfs_fs.h> | 31 | #include <linux/nfs_fs.h> |
32 | #include <linux/vmalloc.h> | 32 | #include <linux/vmalloc.h> |
33 | 33 | ||
34 | #include "internal.h" | 34 | #include "internal.h" |
35 | #include "nfs4filelayout.h" | 35 | #include "nfs4filelayout.h" |
36 | 36 | ||
37 | #define NFSDBG_FACILITY NFSDBG_PNFS_LD | 37 | #define NFSDBG_FACILITY NFSDBG_PNFS_LD |
38 | 38 | ||
39 | /* | 39 | /* |
40 | * Data server cache | 40 | * Data server cache |
41 | * | 41 | * |
42 | * Data servers can be mapped to different device ids. | 42 | * Data servers can be mapped to different device ids. |
43 | * nfs4_pnfs_ds reference counting | 43 | * nfs4_pnfs_ds reference counting |
44 | * - set to 1 on allocation | 44 | * - set to 1 on allocation |
45 | * - incremented when a device id maps a data server already in the cache. | 45 | * - incremented when a device id maps a data server already in the cache. |
46 | * - decremented when deviceid is removed from the cache. | 46 | * - decremented when deviceid is removed from the cache. |
47 | */ | 47 | */ |
48 | DEFINE_SPINLOCK(nfs4_ds_cache_lock); | 48 | DEFINE_SPINLOCK(nfs4_ds_cache_lock); |
49 | static LIST_HEAD(nfs4_data_server_cache); | 49 | static LIST_HEAD(nfs4_data_server_cache); |
50 | 50 | ||
51 | /* Debug routines */ | 51 | /* Debug routines */ |
52 | void | 52 | void |
53 | print_ds(struct nfs4_pnfs_ds *ds) | 53 | print_ds(struct nfs4_pnfs_ds *ds) |
54 | { | 54 | { |
55 | if (ds == NULL) { | 55 | if (ds == NULL) { |
56 | printk("%s NULL device\n", __func__); | 56 | printk("%s NULL device\n", __func__); |
57 | return; | 57 | return; |
58 | } | 58 | } |
59 | printk(" ds %s\n" | 59 | printk(" ds %s\n" |
60 | " ref count %d\n" | 60 | " ref count %d\n" |
61 | " client %p\n" | 61 | " client %p\n" |
62 | " cl_exchange_flags %x\n", | 62 | " cl_exchange_flags %x\n", |
63 | ds->ds_remotestr, | 63 | ds->ds_remotestr, |
64 | atomic_read(&ds->ds_count), ds->ds_clp, | 64 | atomic_read(&ds->ds_count), ds->ds_clp, |
65 | ds->ds_clp ? ds->ds_clp->cl_exchange_flags : 0); | 65 | ds->ds_clp ? ds->ds_clp->cl_exchange_flags : 0); |
66 | } | 66 | } |
67 | 67 | ||
68 | static bool | 68 | static bool |
69 | same_sockaddr(struct sockaddr *addr1, struct sockaddr *addr2) | 69 | same_sockaddr(struct sockaddr *addr1, struct sockaddr *addr2) |
70 | { | 70 | { |
71 | struct sockaddr_in *a, *b; | 71 | struct sockaddr_in *a, *b; |
72 | struct sockaddr_in6 *a6, *b6; | 72 | struct sockaddr_in6 *a6, *b6; |
73 | 73 | ||
74 | if (addr1->sa_family != addr2->sa_family) | 74 | if (addr1->sa_family != addr2->sa_family) |
75 | return false; | 75 | return false; |
76 | 76 | ||
77 | switch (addr1->sa_family) { | 77 | switch (addr1->sa_family) { |
78 | case AF_INET: | 78 | case AF_INET: |
79 | a = (struct sockaddr_in *)addr1; | 79 | a = (struct sockaddr_in *)addr1; |
80 | b = (struct sockaddr_in *)addr2; | 80 | b = (struct sockaddr_in *)addr2; |
81 | 81 | ||
82 | if (a->sin_addr.s_addr == b->sin_addr.s_addr && | 82 | if (a->sin_addr.s_addr == b->sin_addr.s_addr && |
83 | a->sin_port == b->sin_port) | 83 | a->sin_port == b->sin_port) |
84 | return true; | 84 | return true; |
85 | break; | 85 | break; |
86 | 86 | ||
87 | case AF_INET6: | 87 | case AF_INET6: |
88 | a6 = (struct sockaddr_in6 *)addr1; | 88 | a6 = (struct sockaddr_in6 *)addr1; |
89 | b6 = (struct sockaddr_in6 *)addr2; | 89 | b6 = (struct sockaddr_in6 *)addr2; |
90 | 90 | ||
91 | /* LINKLOCAL addresses must have matching scope_id */ | 91 | /* LINKLOCAL addresses must have matching scope_id */ |
92 | if (ipv6_addr_scope(&a6->sin6_addr) == | 92 | if (ipv6_addr_scope(&a6->sin6_addr) == |
93 | IPV6_ADDR_SCOPE_LINKLOCAL && | 93 | IPV6_ADDR_SCOPE_LINKLOCAL && |
94 | a6->sin6_scope_id != b6->sin6_scope_id) | 94 | a6->sin6_scope_id != b6->sin6_scope_id) |
95 | return false; | 95 | return false; |
96 | 96 | ||
97 | if (ipv6_addr_equal(&a6->sin6_addr, &b6->sin6_addr) && | 97 | if (ipv6_addr_equal(&a6->sin6_addr, &b6->sin6_addr) && |
98 | a6->sin6_port == b6->sin6_port) | 98 | a6->sin6_port == b6->sin6_port) |
99 | return true; | 99 | return true; |
100 | break; | 100 | break; |
101 | 101 | ||
102 | default: | 102 | default: |
103 | dprintk("%s: unhandled address family: %u\n", | 103 | dprintk("%s: unhandled address family: %u\n", |
104 | __func__, addr1->sa_family); | 104 | __func__, addr1->sa_family); |
105 | return false; | 105 | return false; |
106 | } | 106 | } |
107 | 107 | ||
108 | return false; | 108 | return false; |
109 | } | 109 | } |
110 | 110 | ||
111 | /* | 111 | /* |
112 | * Lookup DS by addresses. The first matching address returns true. | 112 | * Lookup DS by addresses. The first matching address returns true. |
113 | * nfs4_ds_cache_lock is held | 113 | * nfs4_ds_cache_lock is held |
114 | */ | 114 | */ |
115 | static struct nfs4_pnfs_ds * | 115 | static struct nfs4_pnfs_ds * |
116 | _data_server_lookup_locked(struct list_head *dsaddrs) | 116 | _data_server_lookup_locked(struct list_head *dsaddrs) |
117 | { | 117 | { |
118 | struct nfs4_pnfs_ds *ds; | 118 | struct nfs4_pnfs_ds *ds; |
119 | struct nfs4_pnfs_ds_addr *da1, *da2; | 119 | struct nfs4_pnfs_ds_addr *da1, *da2; |
120 | 120 | ||
121 | list_for_each_entry(da1, dsaddrs, da_node) { | 121 | list_for_each_entry(da1, dsaddrs, da_node) { |
122 | list_for_each_entry(ds, &nfs4_data_server_cache, ds_node) { | 122 | list_for_each_entry(ds, &nfs4_data_server_cache, ds_node) { |
123 | list_for_each_entry(da2, &ds->ds_addrs, da_node) { | 123 | list_for_each_entry(da2, &ds->ds_addrs, da_node) { |
124 | if (same_sockaddr( | 124 | if (same_sockaddr( |
125 | (struct sockaddr *)&da1->da_addr, | 125 | (struct sockaddr *)&da1->da_addr, |
126 | (struct sockaddr *)&da2->da_addr)) | 126 | (struct sockaddr *)&da2->da_addr)) |
127 | return ds; | 127 | return ds; |
128 | } | 128 | } |
129 | } | 129 | } |
130 | } | 130 | } |
131 | return NULL; | 131 | return NULL; |
132 | } | 132 | } |
133 | 133 | ||
134 | /* | 134 | /* |
135 | * Compare two lists of addresses. | 135 | * Compare two lists of addresses. |
136 | */ | 136 | */ |
137 | static bool | 137 | static bool |
138 | _data_server_match_all_addrs_locked(struct list_head *dsaddrs1, | 138 | _data_server_match_all_addrs_locked(struct list_head *dsaddrs1, |
139 | struct list_head *dsaddrs2) | 139 | struct list_head *dsaddrs2) |
140 | { | 140 | { |
141 | struct nfs4_pnfs_ds_addr *da1, *da2; | 141 | struct nfs4_pnfs_ds_addr *da1, *da2; |
142 | size_t count1 = 0, | 142 | size_t count1 = 0, |
143 | count2 = 0; | 143 | count2 = 0; |
144 | 144 | ||
145 | list_for_each_entry(da1, dsaddrs1, da_node) | 145 | list_for_each_entry(da1, dsaddrs1, da_node) |
146 | count1++; | 146 | count1++; |
147 | 147 | ||
148 | list_for_each_entry(da2, dsaddrs2, da_node) { | 148 | list_for_each_entry(da2, dsaddrs2, da_node) { |
149 | bool found = false; | 149 | bool found = false; |
150 | count2++; | 150 | count2++; |
151 | list_for_each_entry(da1, dsaddrs1, da_node) { | 151 | list_for_each_entry(da1, dsaddrs1, da_node) { |
152 | if (same_sockaddr((struct sockaddr *)&da1->da_addr, | 152 | if (same_sockaddr((struct sockaddr *)&da1->da_addr, |
153 | (struct sockaddr *)&da2->da_addr)) { | 153 | (struct sockaddr *)&da2->da_addr)) { |
154 | found = true; | 154 | found = true; |
155 | break; | 155 | break; |
156 | } | 156 | } |
157 | } | 157 | } |
158 | if (!found) | 158 | if (!found) |
159 | return false; | 159 | return false; |
160 | } | 160 | } |
161 | 161 | ||
162 | return (count1 == count2); | 162 | return (count1 == count2); |
163 | } | 163 | } |
164 | 164 | ||
165 | /* | 165 | /* |
166 | * Create an rpc connection to the nfs4_pnfs_ds data server | 166 | * Create an rpc connection to the nfs4_pnfs_ds data server |
167 | * Currently only supports IPv4 and IPv6 addresses | 167 | * Currently only supports IPv4 and IPv6 addresses |
168 | */ | 168 | */ |
169 | static int | 169 | static int |
170 | nfs4_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds) | 170 | nfs4_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds) |
171 | { | 171 | { |
172 | struct nfs_client *clp = ERR_PTR(-EIO); | 172 | struct nfs_client *clp = ERR_PTR(-EIO); |
173 | struct nfs4_pnfs_ds_addr *da; | 173 | struct nfs4_pnfs_ds_addr *da; |
174 | int status = 0; | 174 | int status = 0; |
175 | 175 | ||
176 | dprintk("--> %s DS %s au_flavor %d\n", __func__, ds->ds_remotestr, | 176 | dprintk("--> %s DS %s au_flavor %d\n", __func__, ds->ds_remotestr, |
177 | mds_srv->nfs_client->cl_rpcclient->cl_auth->au_flavor); | 177 | mds_srv->nfs_client->cl_rpcclient->cl_auth->au_flavor); |
178 | 178 | ||
179 | BUG_ON(list_empty(&ds->ds_addrs)); | 179 | BUG_ON(list_empty(&ds->ds_addrs)); |
180 | 180 | ||
181 | list_for_each_entry(da, &ds->ds_addrs, da_node) { | 181 | list_for_each_entry(da, &ds->ds_addrs, da_node) { |
182 | dprintk("%s: DS %s: trying address %s\n", | 182 | dprintk("%s: DS %s: trying address %s\n", |
183 | __func__, ds->ds_remotestr, da->da_remotestr); | 183 | __func__, ds->ds_remotestr, da->da_remotestr); |
184 | 184 | ||
185 | clp = nfs4_set_ds_client(mds_srv->nfs_client, | 185 | clp = nfs4_set_ds_client(mds_srv->nfs_client, |
186 | (struct sockaddr *)&da->da_addr, | 186 | (struct sockaddr *)&da->da_addr, |
187 | da->da_addrlen, IPPROTO_TCP); | 187 | da->da_addrlen, IPPROTO_TCP); |
188 | if (!IS_ERR(clp)) | 188 | if (!IS_ERR(clp)) |
189 | break; | 189 | break; |
190 | } | 190 | } |
191 | 191 | ||
192 | if (IS_ERR(clp)) { | 192 | if (IS_ERR(clp)) { |
193 | status = PTR_ERR(clp); | 193 | status = PTR_ERR(clp); |
194 | goto out; | 194 | goto out; |
195 | } | 195 | } |
196 | 196 | ||
197 | if ((clp->cl_exchange_flags & EXCHGID4_FLAG_MASK_PNFS) != 0) { | 197 | if ((clp->cl_exchange_flags & EXCHGID4_FLAG_MASK_PNFS) != 0) { |
198 | if (!is_ds_client(clp)) { | 198 | if (!is_ds_client(clp)) { |
199 | status = -ENODEV; | 199 | status = -ENODEV; |
200 | goto out_put; | 200 | goto out_put; |
201 | } | 201 | } |
202 | ds->ds_clp = clp; | 202 | ds->ds_clp = clp; |
203 | dprintk("%s [existing] server=%s\n", __func__, | 203 | dprintk("%s [existing] server=%s\n", __func__, |
204 | ds->ds_remotestr); | 204 | ds->ds_remotestr); |
205 | goto out; | 205 | goto out; |
206 | } | 206 | } |
207 | 207 | ||
208 | /* | 208 | /* |
209 | * Do not set NFS_CS_CHECK_LEASE_TIME instead set the DS lease to | 209 | * Do not set NFS_CS_CHECK_LEASE_TIME instead set the DS lease to |
210 | * be equal to the MDS lease. Renewal is scheduled in create_session. | 210 | * be equal to the MDS lease. Renewal is scheduled in create_session. |
211 | */ | 211 | */ |
212 | spin_lock(&mds_srv->nfs_client->cl_lock); | 212 | spin_lock(&mds_srv->nfs_client->cl_lock); |
213 | clp->cl_lease_time = mds_srv->nfs_client->cl_lease_time; | 213 | clp->cl_lease_time = mds_srv->nfs_client->cl_lease_time; |
214 | spin_unlock(&mds_srv->nfs_client->cl_lock); | 214 | spin_unlock(&mds_srv->nfs_client->cl_lock); |
215 | clp->cl_last_renewal = jiffies; | 215 | clp->cl_last_renewal = jiffies; |
216 | 216 | ||
217 | /* New nfs_client */ | 217 | /* New nfs_client */ |
218 | status = nfs4_init_ds_session(clp); | 218 | status = nfs4_init_ds_session(clp); |
219 | if (status) | 219 | if (status) |
220 | goto out_put; | 220 | goto out_put; |
221 | 221 | ||
222 | ds->ds_clp = clp; | 222 | ds->ds_clp = clp; |
223 | dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr); | 223 | dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr); |
224 | out: | 224 | out: |
225 | return status; | 225 | return status; |
226 | out_put: | 226 | out_put: |
227 | nfs_put_client(clp); | 227 | nfs_put_client(clp); |
228 | goto out; | 228 | goto out; |
229 | } | 229 | } |
230 | 230 | ||
231 | static void | 231 | static void |
232 | destroy_ds(struct nfs4_pnfs_ds *ds) | 232 | destroy_ds(struct nfs4_pnfs_ds *ds) |
233 | { | 233 | { |
234 | struct nfs4_pnfs_ds_addr *da; | 234 | struct nfs4_pnfs_ds_addr *da; |
235 | 235 | ||
236 | dprintk("--> %s\n", __func__); | 236 | dprintk("--> %s\n", __func__); |
237 | ifdebug(FACILITY) | 237 | ifdebug(FACILITY) |
238 | print_ds(ds); | 238 | print_ds(ds); |
239 | 239 | ||
240 | if (ds->ds_clp) | 240 | if (ds->ds_clp) |
241 | nfs_put_client(ds->ds_clp); | 241 | nfs_put_client(ds->ds_clp); |
242 | 242 | ||
243 | while (!list_empty(&ds->ds_addrs)) { | 243 | while (!list_empty(&ds->ds_addrs)) { |
244 | da = list_first_entry(&ds->ds_addrs, | 244 | da = list_first_entry(&ds->ds_addrs, |
245 | struct nfs4_pnfs_ds_addr, | 245 | struct nfs4_pnfs_ds_addr, |
246 | da_node); | 246 | da_node); |
247 | list_del_init(&da->da_node); | 247 | list_del_init(&da->da_node); |
248 | kfree(da->da_remotestr); | 248 | kfree(da->da_remotestr); |
249 | kfree(da); | 249 | kfree(da); |
250 | } | 250 | } |
251 | 251 | ||
252 | kfree(ds->ds_remotestr); | 252 | kfree(ds->ds_remotestr); |
253 | kfree(ds); | 253 | kfree(ds); |
254 | } | 254 | } |
255 | 255 | ||
256 | void | 256 | void |
257 | nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr) | 257 | nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr) |
258 | { | 258 | { |
259 | struct nfs4_pnfs_ds *ds; | 259 | struct nfs4_pnfs_ds *ds; |
260 | int i; | 260 | int i; |
261 | 261 | ||
262 | nfs4_print_deviceid(&dsaddr->id_node.deviceid); | 262 | nfs4_print_deviceid(&dsaddr->id_node.deviceid); |
263 | 263 | ||
264 | for (i = 0; i < dsaddr->ds_num; i++) { | 264 | for (i = 0; i < dsaddr->ds_num; i++) { |
265 | ds = dsaddr->ds_list[i]; | 265 | ds = dsaddr->ds_list[i]; |
266 | if (ds != NULL) { | 266 | if (ds != NULL) { |
267 | if (atomic_dec_and_lock(&ds->ds_count, | 267 | if (atomic_dec_and_lock(&ds->ds_count, |
268 | &nfs4_ds_cache_lock)) { | 268 | &nfs4_ds_cache_lock)) { |
269 | list_del_init(&ds->ds_node); | 269 | list_del_init(&ds->ds_node); |
270 | spin_unlock(&nfs4_ds_cache_lock); | 270 | spin_unlock(&nfs4_ds_cache_lock); |
271 | destroy_ds(ds); | 271 | destroy_ds(ds); |
272 | } | 272 | } |
273 | } | 273 | } |
274 | } | 274 | } |
275 | kfree(dsaddr->stripe_indices); | 275 | kfree(dsaddr->stripe_indices); |
276 | kfree(dsaddr); | 276 | kfree(dsaddr); |
277 | } | 277 | } |
278 | 278 | ||
279 | /* | 279 | /* |
280 | * Create a string with a human readable address and port to avoid | 280 | * Create a string with a human readable address and port to avoid |
281 | * complicated setup around many dprinks. | 281 | * complicated setup around many dprinks. |
282 | */ | 282 | */ |
283 | static char * | 283 | static char * |
284 | nfs4_pnfs_remotestr(struct list_head *dsaddrs, gfp_t gfp_flags) | 284 | nfs4_pnfs_remotestr(struct list_head *dsaddrs, gfp_t gfp_flags) |
285 | { | 285 | { |
286 | struct nfs4_pnfs_ds_addr *da; | 286 | struct nfs4_pnfs_ds_addr *da; |
287 | char *remotestr; | 287 | char *remotestr; |
288 | size_t len; | 288 | size_t len; |
289 | char *p; | 289 | char *p; |
290 | 290 | ||
291 | len = 3; /* '{', '}' and eol */ | 291 | len = 3; /* '{', '}' and eol */ |
292 | list_for_each_entry(da, dsaddrs, da_node) { | 292 | list_for_each_entry(da, dsaddrs, da_node) { |
293 | len += strlen(da->da_remotestr) + 1; /* string plus comma */ | 293 | len += strlen(da->da_remotestr) + 1; /* string plus comma */ |
294 | } | 294 | } |
295 | 295 | ||
296 | remotestr = kzalloc(len, gfp_flags); | 296 | remotestr = kzalloc(len, gfp_flags); |
297 | if (!remotestr) | 297 | if (!remotestr) |
298 | return NULL; | 298 | return NULL; |
299 | 299 | ||
300 | p = remotestr; | 300 | p = remotestr; |
301 | *(p++) = '{'; | 301 | *(p++) = '{'; |
302 | len--; | 302 | len--; |
303 | list_for_each_entry(da, dsaddrs, da_node) { | 303 | list_for_each_entry(da, dsaddrs, da_node) { |
304 | size_t ll = strlen(da->da_remotestr); | 304 | size_t ll = strlen(da->da_remotestr); |
305 | 305 | ||
306 | if (ll > len) | 306 | if (ll > len) |
307 | goto out_err; | 307 | goto out_err; |
308 | 308 | ||
309 | memcpy(p, da->da_remotestr, ll); | 309 | memcpy(p, da->da_remotestr, ll); |
310 | p += ll; | 310 | p += ll; |
311 | len -= ll; | 311 | len -= ll; |
312 | 312 | ||
313 | if (len < 1) | 313 | if (len < 1) |
314 | goto out_err; | 314 | goto out_err; |
315 | (*p++) = ','; | 315 | (*p++) = ','; |
316 | len--; | 316 | len--; |
317 | } | 317 | } |
318 | if (len < 2) | 318 | if (len < 2) |
319 | goto out_err; | 319 | goto out_err; |
320 | *(p++) = '}'; | 320 | *(p++) = '}'; |
321 | *p = '\0'; | 321 | *p = '\0'; |
322 | return remotestr; | 322 | return remotestr; |
323 | out_err: | 323 | out_err: |
324 | kfree(remotestr); | 324 | kfree(remotestr); |
325 | return NULL; | 325 | return NULL; |
326 | } | 326 | } |
327 | 327 | ||
328 | static struct nfs4_pnfs_ds * | 328 | static struct nfs4_pnfs_ds * |
329 | nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags) | 329 | nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags) |
330 | { | 330 | { |
331 | struct nfs4_pnfs_ds *tmp_ds, *ds = NULL; | 331 | struct nfs4_pnfs_ds *tmp_ds, *ds = NULL; |
332 | char *remotestr; | 332 | char *remotestr; |
333 | 333 | ||
334 | if (list_empty(dsaddrs)) { | 334 | if (list_empty(dsaddrs)) { |
335 | dprintk("%s: no addresses defined\n", __func__); | 335 | dprintk("%s: no addresses defined\n", __func__); |
336 | goto out; | 336 | goto out; |
337 | } | 337 | } |
338 | 338 | ||
339 | ds = kzalloc(sizeof(*ds), gfp_flags); | 339 | ds = kzalloc(sizeof(*ds), gfp_flags); |
340 | if (!ds) | 340 | if (!ds) |
341 | goto out; | 341 | goto out; |
342 | 342 | ||
343 | /* this is only used for debugging, so it's ok if its NULL */ | 343 | /* this is only used for debugging, so it's ok if its NULL */ |
344 | remotestr = nfs4_pnfs_remotestr(dsaddrs, gfp_flags); | 344 | remotestr = nfs4_pnfs_remotestr(dsaddrs, gfp_flags); |
345 | 345 | ||
346 | spin_lock(&nfs4_ds_cache_lock); | 346 | spin_lock(&nfs4_ds_cache_lock); |
347 | tmp_ds = _data_server_lookup_locked(dsaddrs); | 347 | tmp_ds = _data_server_lookup_locked(dsaddrs); |
348 | if (tmp_ds == NULL) { | 348 | if (tmp_ds == NULL) { |
349 | INIT_LIST_HEAD(&ds->ds_addrs); | 349 | INIT_LIST_HEAD(&ds->ds_addrs); |
350 | list_splice_init(dsaddrs, &ds->ds_addrs); | 350 | list_splice_init(dsaddrs, &ds->ds_addrs); |
351 | ds->ds_remotestr = remotestr; | 351 | ds->ds_remotestr = remotestr; |
352 | atomic_set(&ds->ds_count, 1); | 352 | atomic_set(&ds->ds_count, 1); |
353 | INIT_LIST_HEAD(&ds->ds_node); | 353 | INIT_LIST_HEAD(&ds->ds_node); |
354 | ds->ds_clp = NULL; | 354 | ds->ds_clp = NULL; |
355 | list_add(&ds->ds_node, &nfs4_data_server_cache); | 355 | list_add(&ds->ds_node, &nfs4_data_server_cache); |
356 | dprintk("%s add new data server %s\n", __func__, | 356 | dprintk("%s add new data server %s\n", __func__, |
357 | ds->ds_remotestr); | 357 | ds->ds_remotestr); |
358 | } else { | 358 | } else { |
359 | if (!_data_server_match_all_addrs_locked(&tmp_ds->ds_addrs, | 359 | if (!_data_server_match_all_addrs_locked(&tmp_ds->ds_addrs, |
360 | dsaddrs)) { | 360 | dsaddrs)) { |
361 | dprintk("%s: multipath address mismatch: %s != %s", | 361 | dprintk("%s: multipath address mismatch: %s != %s", |
362 | __func__, tmp_ds->ds_remotestr, remotestr); | 362 | __func__, tmp_ds->ds_remotestr, remotestr); |
363 | } | 363 | } |
364 | kfree(remotestr); | 364 | kfree(remotestr); |
365 | kfree(ds); | 365 | kfree(ds); |
366 | atomic_inc(&tmp_ds->ds_count); | 366 | atomic_inc(&tmp_ds->ds_count); |
367 | dprintk("%s data server %s found, inc'ed ds_count to %d\n", | 367 | dprintk("%s data server %s found, inc'ed ds_count to %d\n", |
368 | __func__, tmp_ds->ds_remotestr, | 368 | __func__, tmp_ds->ds_remotestr, |
369 | atomic_read(&tmp_ds->ds_count)); | 369 | atomic_read(&tmp_ds->ds_count)); |
370 | ds = tmp_ds; | 370 | ds = tmp_ds; |
371 | } | 371 | } |
372 | spin_unlock(&nfs4_ds_cache_lock); | 372 | spin_unlock(&nfs4_ds_cache_lock); |
373 | out: | 373 | out: |
374 | return ds; | 374 | return ds; |
375 | } | 375 | } |
376 | 376 | ||
377 | /* | 377 | /* |
378 | * Currently only supports ipv4, ipv6 and one multi-path address. | 378 | * Currently only supports ipv4, ipv6 and one multi-path address. |
379 | */ | 379 | */ |
380 | static struct nfs4_pnfs_ds_addr * | 380 | static struct nfs4_pnfs_ds_addr * |
381 | decode_ds_addr(struct xdr_stream *streamp, gfp_t gfp_flags) | 381 | decode_ds_addr(struct xdr_stream *streamp, gfp_t gfp_flags) |
382 | { | 382 | { |
383 | struct nfs4_pnfs_ds_addr *da = NULL; | 383 | struct nfs4_pnfs_ds_addr *da = NULL; |
384 | char *buf, *portstr; | 384 | char *buf, *portstr; |
385 | u32 port; | 385 | __be16 port; |
386 | int nlen, rlen; | 386 | int nlen, rlen; |
387 | int tmp[2]; | 387 | int tmp[2]; |
388 | __be32 *p; | 388 | __be32 *p; |
389 | char *netid, *match_netid; | 389 | char *netid, *match_netid; |
390 | size_t len, match_netid_len; | 390 | size_t len, match_netid_len; |
391 | char *startsep = ""; | 391 | char *startsep = ""; |
392 | char *endsep = ""; | 392 | char *endsep = ""; |
393 | 393 | ||
394 | 394 | ||
395 | /* r_netid */ | 395 | /* r_netid */ |
396 | p = xdr_inline_decode(streamp, 4); | 396 | p = xdr_inline_decode(streamp, 4); |
397 | if (unlikely(!p)) | 397 | if (unlikely(!p)) |
398 | goto out_err; | 398 | goto out_err; |
399 | nlen = be32_to_cpup(p++); | 399 | nlen = be32_to_cpup(p++); |
400 | 400 | ||
401 | p = xdr_inline_decode(streamp, nlen); | 401 | p = xdr_inline_decode(streamp, nlen); |
402 | if (unlikely(!p)) | 402 | if (unlikely(!p)) |
403 | goto out_err; | 403 | goto out_err; |
404 | 404 | ||
405 | netid = kmalloc(nlen+1, gfp_flags); | 405 | netid = kmalloc(nlen+1, gfp_flags); |
406 | if (unlikely(!netid)) | 406 | if (unlikely(!netid)) |
407 | goto out_err; | 407 | goto out_err; |
408 | 408 | ||
409 | netid[nlen] = '\0'; | 409 | netid[nlen] = '\0'; |
410 | memcpy(netid, p, nlen); | 410 | memcpy(netid, p, nlen); |
411 | 411 | ||
412 | /* r_addr: ip/ip6addr with port in dec octets - see RFC 5665 */ | 412 | /* r_addr: ip/ip6addr with port in dec octets - see RFC 5665 */ |
413 | p = xdr_inline_decode(streamp, 4); | 413 | p = xdr_inline_decode(streamp, 4); |
414 | if (unlikely(!p)) | 414 | if (unlikely(!p)) |
415 | goto out_free_netid; | 415 | goto out_free_netid; |
416 | rlen = be32_to_cpup(p); | 416 | rlen = be32_to_cpup(p); |
417 | 417 | ||
418 | p = xdr_inline_decode(streamp, rlen); | 418 | p = xdr_inline_decode(streamp, rlen); |
419 | if (unlikely(!p)) | 419 | if (unlikely(!p)) |
420 | goto out_free_netid; | 420 | goto out_free_netid; |
421 | 421 | ||
422 | /* port is ".ABC.DEF", 8 chars max */ | 422 | /* port is ".ABC.DEF", 8 chars max */ |
423 | if (rlen > INET6_ADDRSTRLEN + IPV6_SCOPE_ID_LEN + 8) { | 423 | if (rlen > INET6_ADDRSTRLEN + IPV6_SCOPE_ID_LEN + 8) { |
424 | dprintk("%s: Invalid address, length %d\n", __func__, | 424 | dprintk("%s: Invalid address, length %d\n", __func__, |
425 | rlen); | 425 | rlen); |
426 | goto out_free_netid; | 426 | goto out_free_netid; |
427 | } | 427 | } |
428 | buf = kmalloc(rlen + 1, gfp_flags); | 428 | buf = kmalloc(rlen + 1, gfp_flags); |
429 | if (!buf) { | 429 | if (!buf) { |
430 | dprintk("%s: Not enough memory\n", __func__); | 430 | dprintk("%s: Not enough memory\n", __func__); |
431 | goto out_free_netid; | 431 | goto out_free_netid; |
432 | } | 432 | } |
433 | buf[rlen] = '\0'; | 433 | buf[rlen] = '\0'; |
434 | memcpy(buf, p, rlen); | 434 | memcpy(buf, p, rlen); |
435 | 435 | ||
436 | /* replace port '.' with '-' */ | 436 | /* replace port '.' with '-' */ |
437 | portstr = strrchr(buf, '.'); | 437 | portstr = strrchr(buf, '.'); |
438 | if (!portstr) { | 438 | if (!portstr) { |
439 | dprintk("%s: Failed finding expected dot in port\n", | 439 | dprintk("%s: Failed finding expected dot in port\n", |
440 | __func__); | 440 | __func__); |
441 | goto out_free_buf; | 441 | goto out_free_buf; |
442 | } | 442 | } |
443 | *portstr = '-'; | 443 | *portstr = '-'; |
444 | 444 | ||
445 | /* find '.' between address and port */ | 445 | /* find '.' between address and port */ |
446 | portstr = strrchr(buf, '.'); | 446 | portstr = strrchr(buf, '.'); |
447 | if (!portstr) { | 447 | if (!portstr) { |
448 | dprintk("%s: Failed finding expected dot between address and " | 448 | dprintk("%s: Failed finding expected dot between address and " |
449 | "port\n", __func__); | 449 | "port\n", __func__); |
450 | goto out_free_buf; | 450 | goto out_free_buf; |
451 | } | 451 | } |
452 | *portstr = '\0'; | 452 | *portstr = '\0'; |
453 | 453 | ||
454 | da = kzalloc(sizeof(*da), gfp_flags); | 454 | da = kzalloc(sizeof(*da), gfp_flags); |
455 | if (unlikely(!da)) | 455 | if (unlikely(!da)) |
456 | goto out_free_buf; | 456 | goto out_free_buf; |
457 | 457 | ||
458 | INIT_LIST_HEAD(&da->da_node); | 458 | INIT_LIST_HEAD(&da->da_node); |
459 | 459 | ||
460 | if (!rpc_pton(buf, portstr-buf, (struct sockaddr *)&da->da_addr, | 460 | if (!rpc_pton(buf, portstr-buf, (struct sockaddr *)&da->da_addr, |
461 | sizeof(da->da_addr))) { | 461 | sizeof(da->da_addr))) { |
462 | dprintk("%s: error parsing address %s\n", __func__, buf); | 462 | dprintk("%s: error parsing address %s\n", __func__, buf); |
463 | goto out_free_da; | 463 | goto out_free_da; |
464 | } | 464 | } |
465 | 465 | ||
466 | portstr++; | 466 | portstr++; |
467 | sscanf(portstr, "%d-%d", &tmp[0], &tmp[1]); | 467 | sscanf(portstr, "%d-%d", &tmp[0], &tmp[1]); |
468 | port = htons((tmp[0] << 8) | (tmp[1])); | 468 | port = htons((tmp[0] << 8) | (tmp[1])); |
469 | 469 | ||
470 | switch (da->da_addr.ss_family) { | 470 | switch (da->da_addr.ss_family) { |
471 | case AF_INET: | 471 | case AF_INET: |
472 | ((struct sockaddr_in *)&da->da_addr)->sin_port = port; | 472 | ((struct sockaddr_in *)&da->da_addr)->sin_port = port; |
473 | da->da_addrlen = sizeof(struct sockaddr_in); | 473 | da->da_addrlen = sizeof(struct sockaddr_in); |
474 | match_netid = "tcp"; | 474 | match_netid = "tcp"; |
475 | match_netid_len = 3; | 475 | match_netid_len = 3; |
476 | break; | 476 | break; |
477 | 477 | ||
478 | case AF_INET6: | 478 | case AF_INET6: |
479 | ((struct sockaddr_in6 *)&da->da_addr)->sin6_port = port; | 479 | ((struct sockaddr_in6 *)&da->da_addr)->sin6_port = port; |
480 | da->da_addrlen = sizeof(struct sockaddr_in6); | 480 | da->da_addrlen = sizeof(struct sockaddr_in6); |
481 | match_netid = "tcp6"; | 481 | match_netid = "tcp6"; |
482 | match_netid_len = 4; | 482 | match_netid_len = 4; |
483 | startsep = "["; | 483 | startsep = "["; |
484 | endsep = "]"; | 484 | endsep = "]"; |
485 | break; | 485 | break; |
486 | 486 | ||
487 | default: | 487 | default: |
488 | dprintk("%s: unsupported address family: %u\n", | 488 | dprintk("%s: unsupported address family: %u\n", |
489 | __func__, da->da_addr.ss_family); | 489 | __func__, da->da_addr.ss_family); |
490 | goto out_free_da; | 490 | goto out_free_da; |
491 | } | 491 | } |
492 | 492 | ||
493 | if (nlen != match_netid_len || strncmp(netid, match_netid, nlen)) { | 493 | if (nlen != match_netid_len || strncmp(netid, match_netid, nlen)) { |
494 | dprintk("%s: ERROR: r_netid \"%s\" != \"%s\"\n", | 494 | dprintk("%s: ERROR: r_netid \"%s\" != \"%s\"\n", |
495 | __func__, netid, match_netid); | 495 | __func__, netid, match_netid); |
496 | goto out_free_da; | 496 | goto out_free_da; |
497 | } | 497 | } |
498 | 498 | ||
499 | /* save human readable address */ | 499 | /* save human readable address */ |
500 | len = strlen(startsep) + strlen(buf) + strlen(endsep) + 7; | 500 | len = strlen(startsep) + strlen(buf) + strlen(endsep) + 7; |
501 | da->da_remotestr = kzalloc(len, gfp_flags); | 501 | da->da_remotestr = kzalloc(len, gfp_flags); |
502 | 502 | ||
503 | /* NULL is ok, only used for dprintk */ | 503 | /* NULL is ok, only used for dprintk */ |
504 | if (da->da_remotestr) | 504 | if (da->da_remotestr) |
505 | snprintf(da->da_remotestr, len, "%s%s%s:%u", startsep, | 505 | snprintf(da->da_remotestr, len, "%s%s%s:%u", startsep, |
506 | buf, endsep, ntohs(port)); | 506 | buf, endsep, ntohs(port)); |
507 | 507 | ||
508 | dprintk("%s: Parsed DS addr %s\n", __func__, da->da_remotestr); | 508 | dprintk("%s: Parsed DS addr %s\n", __func__, da->da_remotestr); |
509 | kfree(buf); | 509 | kfree(buf); |
510 | kfree(netid); | 510 | kfree(netid); |
511 | return da; | 511 | return da; |
512 | 512 | ||
513 | out_free_da: | 513 | out_free_da: |
514 | kfree(da); | 514 | kfree(da); |
515 | out_free_buf: | 515 | out_free_buf: |
516 | dprintk("%s: Error parsing DS addr: %s\n", __func__, buf); | 516 | dprintk("%s: Error parsing DS addr: %s\n", __func__, buf); |
517 | kfree(buf); | 517 | kfree(buf); |
518 | out_free_netid: | 518 | out_free_netid: |
519 | kfree(netid); | 519 | kfree(netid); |
520 | out_err: | 520 | out_err: |
521 | return NULL; | 521 | return NULL; |
522 | } | 522 | } |
523 | 523 | ||
524 | /* Decode opaque device data and return the result */ | 524 | /* Decode opaque device data and return the result */ |
525 | static struct nfs4_file_layout_dsaddr* | 525 | static struct nfs4_file_layout_dsaddr* |
526 | decode_device(struct inode *ino, struct pnfs_device *pdev, gfp_t gfp_flags) | 526 | decode_device(struct inode *ino, struct pnfs_device *pdev, gfp_t gfp_flags) |
527 | { | 527 | { |
528 | int i; | 528 | int i; |
529 | u32 cnt, num; | 529 | u32 cnt, num; |
530 | u8 *indexp; | 530 | u8 *indexp; |
531 | __be32 *p; | 531 | __be32 *p; |
532 | u8 *stripe_indices; | 532 | u8 *stripe_indices; |
533 | u8 max_stripe_index; | 533 | u8 max_stripe_index; |
534 | struct nfs4_file_layout_dsaddr *dsaddr = NULL; | 534 | struct nfs4_file_layout_dsaddr *dsaddr = NULL; |
535 | struct xdr_stream stream; | 535 | struct xdr_stream stream; |
536 | struct xdr_buf buf; | 536 | struct xdr_buf buf; |
537 | struct page *scratch; | 537 | struct page *scratch; |
538 | struct list_head dsaddrs; | 538 | struct list_head dsaddrs; |
539 | struct nfs4_pnfs_ds_addr *da; | 539 | struct nfs4_pnfs_ds_addr *da; |
540 | 540 | ||
541 | /* set up xdr stream */ | 541 | /* set up xdr stream */ |
542 | scratch = alloc_page(gfp_flags); | 542 | scratch = alloc_page(gfp_flags); |
543 | if (!scratch) | 543 | if (!scratch) |
544 | goto out_err; | 544 | goto out_err; |
545 | 545 | ||
546 | xdr_init_decode_pages(&stream, &buf, pdev->pages, pdev->pglen); | 546 | xdr_init_decode_pages(&stream, &buf, pdev->pages, pdev->pglen); |
547 | xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE); | 547 | xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE); |
548 | 548 | ||
549 | /* Get the stripe count (number of stripe index) */ | 549 | /* Get the stripe count (number of stripe index) */ |
550 | p = xdr_inline_decode(&stream, 4); | 550 | p = xdr_inline_decode(&stream, 4); |
551 | if (unlikely(!p)) | 551 | if (unlikely(!p)) |
552 | goto out_err_free_scratch; | 552 | goto out_err_free_scratch; |
553 | 553 | ||
554 | cnt = be32_to_cpup(p); | 554 | cnt = be32_to_cpup(p); |
555 | dprintk("%s stripe count %d\n", __func__, cnt); | 555 | dprintk("%s stripe count %d\n", __func__, cnt); |
556 | if (cnt > NFS4_PNFS_MAX_STRIPE_CNT) { | 556 | if (cnt > NFS4_PNFS_MAX_STRIPE_CNT) { |
557 | printk(KERN_WARNING "%s: stripe count %d greater than " | 557 | printk(KERN_WARNING "%s: stripe count %d greater than " |
558 | "supported maximum %d\n", __func__, | 558 | "supported maximum %d\n", __func__, |
559 | cnt, NFS4_PNFS_MAX_STRIPE_CNT); | 559 | cnt, NFS4_PNFS_MAX_STRIPE_CNT); |
560 | goto out_err_free_scratch; | 560 | goto out_err_free_scratch; |
561 | } | 561 | } |
562 | 562 | ||
563 | /* read stripe indices */ | 563 | /* read stripe indices */ |
564 | stripe_indices = kcalloc(cnt, sizeof(u8), gfp_flags); | 564 | stripe_indices = kcalloc(cnt, sizeof(u8), gfp_flags); |
565 | if (!stripe_indices) | 565 | if (!stripe_indices) |
566 | goto out_err_free_scratch; | 566 | goto out_err_free_scratch; |
567 | 567 | ||
568 | p = xdr_inline_decode(&stream, cnt << 2); | 568 | p = xdr_inline_decode(&stream, cnt << 2); |
569 | if (unlikely(!p)) | 569 | if (unlikely(!p)) |
570 | goto out_err_free_stripe_indices; | 570 | goto out_err_free_stripe_indices; |
571 | 571 | ||
572 | indexp = &stripe_indices[0]; | 572 | indexp = &stripe_indices[0]; |
573 | max_stripe_index = 0; | 573 | max_stripe_index = 0; |
574 | for (i = 0; i < cnt; i++) { | 574 | for (i = 0; i < cnt; i++) { |
575 | *indexp = be32_to_cpup(p++); | 575 | *indexp = be32_to_cpup(p++); |
576 | max_stripe_index = max(max_stripe_index, *indexp); | 576 | max_stripe_index = max(max_stripe_index, *indexp); |
577 | indexp++; | 577 | indexp++; |
578 | } | 578 | } |
579 | 579 | ||
580 | /* Check the multipath list count */ | 580 | /* Check the multipath list count */ |
581 | p = xdr_inline_decode(&stream, 4); | 581 | p = xdr_inline_decode(&stream, 4); |
582 | if (unlikely(!p)) | 582 | if (unlikely(!p)) |
583 | goto out_err_free_stripe_indices; | 583 | goto out_err_free_stripe_indices; |
584 | 584 | ||
585 | num = be32_to_cpup(p); | 585 | num = be32_to_cpup(p); |
586 | dprintk("%s ds_num %u\n", __func__, num); | 586 | dprintk("%s ds_num %u\n", __func__, num); |
587 | if (num > NFS4_PNFS_MAX_MULTI_CNT) { | 587 | if (num > NFS4_PNFS_MAX_MULTI_CNT) { |
588 | printk(KERN_WARNING "%s: multipath count %d greater than " | 588 | printk(KERN_WARNING "%s: multipath count %d greater than " |
589 | "supported maximum %d\n", __func__, | 589 | "supported maximum %d\n", __func__, |
590 | num, NFS4_PNFS_MAX_MULTI_CNT); | 590 | num, NFS4_PNFS_MAX_MULTI_CNT); |
591 | goto out_err_free_stripe_indices; | 591 | goto out_err_free_stripe_indices; |
592 | } | 592 | } |
593 | 593 | ||
594 | /* validate stripe indices are all < num */ | 594 | /* validate stripe indices are all < num */ |
595 | if (max_stripe_index >= num) { | 595 | if (max_stripe_index >= num) { |
596 | printk(KERN_WARNING "%s: stripe index %u >= num ds %u\n", | 596 | printk(KERN_WARNING "%s: stripe index %u >= num ds %u\n", |
597 | __func__, max_stripe_index, num); | 597 | __func__, max_stripe_index, num); |
598 | goto out_err_free_stripe_indices; | 598 | goto out_err_free_stripe_indices; |
599 | } | 599 | } |
600 | 600 | ||
601 | dsaddr = kzalloc(sizeof(*dsaddr) + | 601 | dsaddr = kzalloc(sizeof(*dsaddr) + |
602 | (sizeof(struct nfs4_pnfs_ds *) * (num - 1)), | 602 | (sizeof(struct nfs4_pnfs_ds *) * (num - 1)), |
603 | gfp_flags); | 603 | gfp_flags); |
604 | if (!dsaddr) | 604 | if (!dsaddr) |
605 | goto out_err_free_stripe_indices; | 605 | goto out_err_free_stripe_indices; |
606 | 606 | ||
607 | dsaddr->stripe_count = cnt; | 607 | dsaddr->stripe_count = cnt; |
608 | dsaddr->stripe_indices = stripe_indices; | 608 | dsaddr->stripe_indices = stripe_indices; |
609 | stripe_indices = NULL; | 609 | stripe_indices = NULL; |
610 | dsaddr->ds_num = num; | 610 | dsaddr->ds_num = num; |
611 | nfs4_init_deviceid_node(&dsaddr->id_node, | 611 | nfs4_init_deviceid_node(&dsaddr->id_node, |
612 | NFS_SERVER(ino)->pnfs_curr_ld, | 612 | NFS_SERVER(ino)->pnfs_curr_ld, |
613 | NFS_SERVER(ino)->nfs_client, | 613 | NFS_SERVER(ino)->nfs_client, |
614 | &pdev->dev_id); | 614 | &pdev->dev_id); |
615 | 615 | ||
616 | INIT_LIST_HEAD(&dsaddrs); | 616 | INIT_LIST_HEAD(&dsaddrs); |
617 | 617 | ||
618 | for (i = 0; i < dsaddr->ds_num; i++) { | 618 | for (i = 0; i < dsaddr->ds_num; i++) { |
619 | int j; | 619 | int j; |
620 | u32 mp_count; | 620 | u32 mp_count; |
621 | 621 | ||
622 | p = xdr_inline_decode(&stream, 4); | 622 | p = xdr_inline_decode(&stream, 4); |
623 | if (unlikely(!p)) | 623 | if (unlikely(!p)) |
624 | goto out_err_free_deviceid; | 624 | goto out_err_free_deviceid; |
625 | 625 | ||
626 | mp_count = be32_to_cpup(p); /* multipath count */ | 626 | mp_count = be32_to_cpup(p); /* multipath count */ |
627 | for (j = 0; j < mp_count; j++) { | 627 | for (j = 0; j < mp_count; j++) { |
628 | da = decode_ds_addr(&stream, gfp_flags); | 628 | da = decode_ds_addr(&stream, gfp_flags); |
629 | if (da) | 629 | if (da) |
630 | list_add_tail(&da->da_node, &dsaddrs); | 630 | list_add_tail(&da->da_node, &dsaddrs); |
631 | } | 631 | } |
632 | if (list_empty(&dsaddrs)) { | 632 | if (list_empty(&dsaddrs)) { |
633 | dprintk("%s: no suitable DS addresses found\n", | 633 | dprintk("%s: no suitable DS addresses found\n", |
634 | __func__); | 634 | __func__); |
635 | goto out_err_free_deviceid; | 635 | goto out_err_free_deviceid; |
636 | } | 636 | } |
637 | 637 | ||
638 | dsaddr->ds_list[i] = nfs4_pnfs_ds_add(&dsaddrs, gfp_flags); | 638 | dsaddr->ds_list[i] = nfs4_pnfs_ds_add(&dsaddrs, gfp_flags); |
639 | if (!dsaddr->ds_list[i]) | 639 | if (!dsaddr->ds_list[i]) |
640 | goto out_err_drain_dsaddrs; | 640 | goto out_err_drain_dsaddrs; |
641 | 641 | ||
642 | /* If DS was already in cache, free ds addrs */ | 642 | /* If DS was already in cache, free ds addrs */ |
643 | while (!list_empty(&dsaddrs)) { | 643 | while (!list_empty(&dsaddrs)) { |
644 | da = list_first_entry(&dsaddrs, | 644 | da = list_first_entry(&dsaddrs, |
645 | struct nfs4_pnfs_ds_addr, | 645 | struct nfs4_pnfs_ds_addr, |
646 | da_node); | 646 | da_node); |
647 | list_del_init(&da->da_node); | 647 | list_del_init(&da->da_node); |
648 | kfree(da->da_remotestr); | 648 | kfree(da->da_remotestr); |
649 | kfree(da); | 649 | kfree(da); |
650 | } | 650 | } |
651 | } | 651 | } |
652 | 652 | ||
653 | __free_page(scratch); | 653 | __free_page(scratch); |
654 | return dsaddr; | 654 | return dsaddr; |
655 | 655 | ||
656 | out_err_drain_dsaddrs: | 656 | out_err_drain_dsaddrs: |
657 | while (!list_empty(&dsaddrs)) { | 657 | while (!list_empty(&dsaddrs)) { |
658 | da = list_first_entry(&dsaddrs, struct nfs4_pnfs_ds_addr, | 658 | da = list_first_entry(&dsaddrs, struct nfs4_pnfs_ds_addr, |
659 | da_node); | 659 | da_node); |
660 | list_del_init(&da->da_node); | 660 | list_del_init(&da->da_node); |
661 | kfree(da->da_remotestr); | 661 | kfree(da->da_remotestr); |
662 | kfree(da); | 662 | kfree(da); |
663 | } | 663 | } |
664 | out_err_free_deviceid: | 664 | out_err_free_deviceid: |
665 | nfs4_fl_free_deviceid(dsaddr); | 665 | nfs4_fl_free_deviceid(dsaddr); |
666 | /* stripe_indicies was part of dsaddr */ | 666 | /* stripe_indicies was part of dsaddr */ |
667 | goto out_err_free_scratch; | 667 | goto out_err_free_scratch; |
668 | out_err_free_stripe_indices: | 668 | out_err_free_stripe_indices: |
669 | kfree(stripe_indices); | 669 | kfree(stripe_indices); |
670 | out_err_free_scratch: | 670 | out_err_free_scratch: |
671 | __free_page(scratch); | 671 | __free_page(scratch); |
672 | out_err: | 672 | out_err: |
673 | dprintk("%s ERROR: returning NULL\n", __func__); | 673 | dprintk("%s ERROR: returning NULL\n", __func__); |
674 | return NULL; | 674 | return NULL; |
675 | } | 675 | } |
676 | 676 | ||
677 | /* | 677 | /* |
678 | * Decode the opaque device specified in 'dev' and add it to the cache of | 678 | * Decode the opaque device specified in 'dev' and add it to the cache of |
679 | * available devices. | 679 | * available devices. |
680 | */ | 680 | */ |
681 | static struct nfs4_file_layout_dsaddr * | 681 | static struct nfs4_file_layout_dsaddr * |
682 | decode_and_add_device(struct inode *inode, struct pnfs_device *dev, gfp_t gfp_flags) | 682 | decode_and_add_device(struct inode *inode, struct pnfs_device *dev, gfp_t gfp_flags) |
683 | { | 683 | { |
684 | struct nfs4_deviceid_node *d; | 684 | struct nfs4_deviceid_node *d; |
685 | struct nfs4_file_layout_dsaddr *n, *new; | 685 | struct nfs4_file_layout_dsaddr *n, *new; |
686 | 686 | ||
687 | new = decode_device(inode, dev, gfp_flags); | 687 | new = decode_device(inode, dev, gfp_flags); |
688 | if (!new) { | 688 | if (!new) { |
689 | printk(KERN_WARNING "%s: Could not decode or add device\n", | 689 | printk(KERN_WARNING "%s: Could not decode or add device\n", |
690 | __func__); | 690 | __func__); |
691 | return NULL; | 691 | return NULL; |
692 | } | 692 | } |
693 | 693 | ||
694 | d = nfs4_insert_deviceid_node(&new->id_node); | 694 | d = nfs4_insert_deviceid_node(&new->id_node); |
695 | n = container_of(d, struct nfs4_file_layout_dsaddr, id_node); | 695 | n = container_of(d, struct nfs4_file_layout_dsaddr, id_node); |
696 | if (n != new) { | 696 | if (n != new) { |
697 | nfs4_fl_free_deviceid(new); | 697 | nfs4_fl_free_deviceid(new); |
698 | return n; | 698 | return n; |
699 | } | 699 | } |
700 | 700 | ||
701 | return new; | 701 | return new; |
702 | } | 702 | } |
703 | 703 | ||
704 | /* | 704 | /* |
705 | * Retrieve the information for dev_id, add it to the list | 705 | * Retrieve the information for dev_id, add it to the list |
706 | * of available devices, and return it. | 706 | * of available devices, and return it. |
707 | */ | 707 | */ |
708 | struct nfs4_file_layout_dsaddr * | 708 | struct nfs4_file_layout_dsaddr * |
709 | get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_flags) | 709 | get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_flags) |
710 | { | 710 | { |
711 | struct pnfs_device *pdev = NULL; | 711 | struct pnfs_device *pdev = NULL; |
712 | u32 max_resp_sz; | 712 | u32 max_resp_sz; |
713 | int max_pages; | 713 | int max_pages; |
714 | struct page **pages = NULL; | 714 | struct page **pages = NULL; |
715 | struct nfs4_file_layout_dsaddr *dsaddr = NULL; | 715 | struct nfs4_file_layout_dsaddr *dsaddr = NULL; |
716 | int rc, i; | 716 | int rc, i; |
717 | struct nfs_server *server = NFS_SERVER(inode); | 717 | struct nfs_server *server = NFS_SERVER(inode); |
718 | 718 | ||
719 | /* | 719 | /* |
720 | * Use the session max response size as the basis for setting | 720 | * Use the session max response size as the basis for setting |
721 | * GETDEVICEINFO's maxcount | 721 | * GETDEVICEINFO's maxcount |
722 | */ | 722 | */ |
723 | max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; | 723 | max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; |
724 | max_pages = max_resp_sz >> PAGE_SHIFT; | 724 | max_pages = max_resp_sz >> PAGE_SHIFT; |
725 | dprintk("%s inode %p max_resp_sz %u max_pages %d\n", | 725 | dprintk("%s inode %p max_resp_sz %u max_pages %d\n", |
726 | __func__, inode, max_resp_sz, max_pages); | 726 | __func__, inode, max_resp_sz, max_pages); |
727 | 727 | ||
728 | pdev = kzalloc(sizeof(struct pnfs_device), gfp_flags); | 728 | pdev = kzalloc(sizeof(struct pnfs_device), gfp_flags); |
729 | if (pdev == NULL) | 729 | if (pdev == NULL) |
730 | return NULL; | 730 | return NULL; |
731 | 731 | ||
732 | pages = kzalloc(max_pages * sizeof(struct page *), gfp_flags); | 732 | pages = kzalloc(max_pages * sizeof(struct page *), gfp_flags); |
733 | if (pages == NULL) { | 733 | if (pages == NULL) { |
734 | kfree(pdev); | 734 | kfree(pdev); |
735 | return NULL; | 735 | return NULL; |
736 | } | 736 | } |
737 | for (i = 0; i < max_pages; i++) { | 737 | for (i = 0; i < max_pages; i++) { |
738 | pages[i] = alloc_page(gfp_flags); | 738 | pages[i] = alloc_page(gfp_flags); |
739 | if (!pages[i]) | 739 | if (!pages[i]) |
740 | goto out_free; | 740 | goto out_free; |
741 | } | 741 | } |
742 | 742 | ||
743 | memcpy(&pdev->dev_id, dev_id, sizeof(*dev_id)); | 743 | memcpy(&pdev->dev_id, dev_id, sizeof(*dev_id)); |
744 | pdev->layout_type = LAYOUT_NFSV4_1_FILES; | 744 | pdev->layout_type = LAYOUT_NFSV4_1_FILES; |
745 | pdev->pages = pages; | 745 | pdev->pages = pages; |
746 | pdev->pgbase = 0; | 746 | pdev->pgbase = 0; |
747 | pdev->pglen = PAGE_SIZE * max_pages; | 747 | pdev->pglen = PAGE_SIZE * max_pages; |
748 | pdev->mincount = 0; | 748 | pdev->mincount = 0; |
749 | 749 | ||
750 | rc = nfs4_proc_getdeviceinfo(server, pdev); | 750 | rc = nfs4_proc_getdeviceinfo(server, pdev); |
751 | dprintk("%s getdevice info returns %d\n", __func__, rc); | 751 | dprintk("%s getdevice info returns %d\n", __func__, rc); |
752 | if (rc) | 752 | if (rc) |
753 | goto out_free; | 753 | goto out_free; |
754 | 754 | ||
755 | /* | 755 | /* |
756 | * Found new device, need to decode it and then add it to the | 756 | * Found new device, need to decode it and then add it to the |
757 | * list of known devices for this mountpoint. | 757 | * list of known devices for this mountpoint. |
758 | */ | 758 | */ |
759 | dsaddr = decode_and_add_device(inode, pdev, gfp_flags); | 759 | dsaddr = decode_and_add_device(inode, pdev, gfp_flags); |
760 | out_free: | 760 | out_free: |
761 | for (i = 0; i < max_pages; i++) | 761 | for (i = 0; i < max_pages; i++) |
762 | __free_page(pages[i]); | 762 | __free_page(pages[i]); |
763 | kfree(pages); | 763 | kfree(pages); |
764 | kfree(pdev); | 764 | kfree(pdev); |
765 | dprintk("<-- %s dsaddr %p\n", __func__, dsaddr); | 765 | dprintk("<-- %s dsaddr %p\n", __func__, dsaddr); |
766 | return dsaddr; | 766 | return dsaddr; |
767 | } | 767 | } |
768 | 768 | ||
769 | void | 769 | void |
770 | nfs4_fl_put_deviceid(struct nfs4_file_layout_dsaddr *dsaddr) | 770 | nfs4_fl_put_deviceid(struct nfs4_file_layout_dsaddr *dsaddr) |
771 | { | 771 | { |
772 | nfs4_put_deviceid_node(&dsaddr->id_node); | 772 | nfs4_put_deviceid_node(&dsaddr->id_node); |
773 | } | 773 | } |
774 | 774 | ||
775 | /* | 775 | /* |
776 | * Want res = (offset - layout->pattern_offset)/ layout->stripe_unit | 776 | * Want res = (offset - layout->pattern_offset)/ layout->stripe_unit |
777 | * Then: ((res + fsi) % dsaddr->stripe_count) | 777 | * Then: ((res + fsi) % dsaddr->stripe_count) |
778 | */ | 778 | */ |
779 | u32 | 779 | u32 |
780 | nfs4_fl_calc_j_index(struct pnfs_layout_segment *lseg, loff_t offset) | 780 | nfs4_fl_calc_j_index(struct pnfs_layout_segment *lseg, loff_t offset) |
781 | { | 781 | { |
782 | struct nfs4_filelayout_segment *flseg = FILELAYOUT_LSEG(lseg); | 782 | struct nfs4_filelayout_segment *flseg = FILELAYOUT_LSEG(lseg); |
783 | u64 tmp; | 783 | u64 tmp; |
784 | 784 | ||
785 | tmp = offset - flseg->pattern_offset; | 785 | tmp = offset - flseg->pattern_offset; |
786 | do_div(tmp, flseg->stripe_unit); | 786 | do_div(tmp, flseg->stripe_unit); |
787 | tmp += flseg->first_stripe_index; | 787 | tmp += flseg->first_stripe_index; |
788 | return do_div(tmp, flseg->dsaddr->stripe_count); | 788 | return do_div(tmp, flseg->dsaddr->stripe_count); |
789 | } | 789 | } |
790 | 790 | ||
791 | u32 | 791 | u32 |
792 | nfs4_fl_calc_ds_index(struct pnfs_layout_segment *lseg, u32 j) | 792 | nfs4_fl_calc_ds_index(struct pnfs_layout_segment *lseg, u32 j) |
793 | { | 793 | { |
794 | return FILELAYOUT_LSEG(lseg)->dsaddr->stripe_indices[j]; | 794 | return FILELAYOUT_LSEG(lseg)->dsaddr->stripe_indices[j]; |
795 | } | 795 | } |
796 | 796 | ||
797 | struct nfs_fh * | 797 | struct nfs_fh * |
798 | nfs4_fl_select_ds_fh(struct pnfs_layout_segment *lseg, u32 j) | 798 | nfs4_fl_select_ds_fh(struct pnfs_layout_segment *lseg, u32 j) |
799 | { | 799 | { |
800 | struct nfs4_filelayout_segment *flseg = FILELAYOUT_LSEG(lseg); | 800 | struct nfs4_filelayout_segment *flseg = FILELAYOUT_LSEG(lseg); |
801 | u32 i; | 801 | u32 i; |
802 | 802 | ||
803 | if (flseg->stripe_type == STRIPE_SPARSE) { | 803 | if (flseg->stripe_type == STRIPE_SPARSE) { |
804 | if (flseg->num_fh == 1) | 804 | if (flseg->num_fh == 1) |
805 | i = 0; | 805 | i = 0; |
806 | else if (flseg->num_fh == 0) | 806 | else if (flseg->num_fh == 0) |
807 | /* Use the MDS OPEN fh set in nfs_read_rpcsetup */ | 807 | /* Use the MDS OPEN fh set in nfs_read_rpcsetup */ |
808 | return NULL; | 808 | return NULL; |
809 | else | 809 | else |
810 | i = nfs4_fl_calc_ds_index(lseg, j); | 810 | i = nfs4_fl_calc_ds_index(lseg, j); |
811 | } else | 811 | } else |
812 | i = j; | 812 | i = j; |
813 | return flseg->fh_array[i]; | 813 | return flseg->fh_array[i]; |
814 | } | 814 | } |
815 | 815 | ||
816 | static void | 816 | static void |
817 | filelayout_mark_devid_negative(struct nfs4_file_layout_dsaddr *dsaddr, | 817 | filelayout_mark_devid_negative(struct nfs4_file_layout_dsaddr *dsaddr, |
818 | int err, const char *ds_remotestr) | 818 | int err, const char *ds_remotestr) |
819 | { | 819 | { |
820 | u32 *p = (u32 *)&dsaddr->id_node.deviceid; | 820 | u32 *p = (u32 *)&dsaddr->id_node.deviceid; |
821 | 821 | ||
822 | printk(KERN_ERR "NFS: data server %s connection error %d." | 822 | printk(KERN_ERR "NFS: data server %s connection error %d." |
823 | " Deviceid [%x%x%x%x] marked out of use.\n", | 823 | " Deviceid [%x%x%x%x] marked out of use.\n", |
824 | ds_remotestr, err, p[0], p[1], p[2], p[3]); | 824 | ds_remotestr, err, p[0], p[1], p[2], p[3]); |
825 | 825 | ||
826 | spin_lock(&nfs4_ds_cache_lock); | 826 | spin_lock(&nfs4_ds_cache_lock); |
827 | dsaddr->flags |= NFS4_DEVICE_ID_NEG_ENTRY; | 827 | dsaddr->flags |= NFS4_DEVICE_ID_NEG_ENTRY; |
828 | spin_unlock(&nfs4_ds_cache_lock); | 828 | spin_unlock(&nfs4_ds_cache_lock); |
829 | } | 829 | } |
830 | 830 | ||
831 | struct nfs4_pnfs_ds * | 831 | struct nfs4_pnfs_ds * |
832 | nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx) | 832 | nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx) |
833 | { | 833 | { |
834 | struct nfs4_file_layout_dsaddr *dsaddr = FILELAYOUT_LSEG(lseg)->dsaddr; | 834 | struct nfs4_file_layout_dsaddr *dsaddr = FILELAYOUT_LSEG(lseg)->dsaddr; |
835 | struct nfs4_pnfs_ds *ds = dsaddr->ds_list[ds_idx]; | 835 | struct nfs4_pnfs_ds *ds = dsaddr->ds_list[ds_idx]; |
836 | 836 | ||
837 | if (ds == NULL) { | 837 | if (ds == NULL) { |
838 | printk(KERN_ERR "%s: No data server for offset index %d\n", | 838 | printk(KERN_ERR "%s: No data server for offset index %d\n", |
839 | __func__, ds_idx); | 839 | __func__, ds_idx); |
840 | return NULL; | 840 | return NULL; |
841 | } | 841 | } |
842 | 842 | ||
843 | if (!ds->ds_clp) { | 843 | if (!ds->ds_clp) { |
844 | struct nfs_server *s = NFS_SERVER(lseg->pls_layout->plh_inode); | 844 | struct nfs_server *s = NFS_SERVER(lseg->pls_layout->plh_inode); |
845 | int err; | 845 | int err; |
846 | 846 | ||
847 | if (dsaddr->flags & NFS4_DEVICE_ID_NEG_ENTRY) { | 847 | if (dsaddr->flags & NFS4_DEVICE_ID_NEG_ENTRY) { |
848 | /* Already tried to connect, don't try again */ | 848 | /* Already tried to connect, don't try again */ |
849 | dprintk("%s Deviceid marked out of use\n", __func__); | 849 | dprintk("%s Deviceid marked out of use\n", __func__); |
850 | return NULL; | 850 | return NULL; |
851 | } | 851 | } |
852 | err = nfs4_ds_connect(s, ds); | 852 | err = nfs4_ds_connect(s, ds); |
853 | if (err) { | 853 | if (err) { |
854 | filelayout_mark_devid_negative(dsaddr, err, | 854 | filelayout_mark_devid_negative(dsaddr, err, |
855 | ds->ds_remotestr); | 855 | ds->ds_remotestr); |
856 | return NULL; | 856 | return NULL; |
857 | } | 857 | } |
858 | } | 858 | } |
859 | return ds; | 859 | return ds; |
860 | } | 860 | } |
861 | 861 |
fs/nfs/nfs4proc.c
1 | /* | 1 | /* |
2 | * fs/nfs/nfs4proc.c | 2 | * fs/nfs/nfs4proc.c |
3 | * | 3 | * |
4 | * Client-side procedure declarations for NFSv4. | 4 | * Client-side procedure declarations for NFSv4. |
5 | * | 5 | * |
6 | * Copyright (c) 2002 The Regents of the University of Michigan. | 6 | * Copyright (c) 2002 The Regents of the University of Michigan. |
7 | * All rights reserved. | 7 | * All rights reserved. |
8 | * | 8 | * |
9 | * Kendrick Smith <kmsmith@umich.edu> | 9 | * Kendrick Smith <kmsmith@umich.edu> |
10 | * Andy Adamson <andros@umich.edu> | 10 | * Andy Adamson <andros@umich.edu> |
11 | * | 11 | * |
12 | * Redistribution and use in source and binary forms, with or without | 12 | * Redistribution and use in source and binary forms, with or without |
13 | * modification, are permitted provided that the following conditions | 13 | * modification, are permitted provided that the following conditions |
14 | * are met: | 14 | * are met: |
15 | * | 15 | * |
16 | * 1. Redistributions of source code must retain the above copyright | 16 | * 1. Redistributions of source code must retain the above copyright |
17 | * notice, this list of conditions and the following disclaimer. | 17 | * notice, this list of conditions and the following disclaimer. |
18 | * 2. Redistributions in binary form must reproduce the above copyright | 18 | * 2. Redistributions in binary form must reproduce the above copyright |
19 | * notice, this list of conditions and the following disclaimer in the | 19 | * notice, this list of conditions and the following disclaimer in the |
20 | * documentation and/or other materials provided with the distribution. | 20 | * documentation and/or other materials provided with the distribution. |
21 | * 3. Neither the name of the University nor the names of its | 21 | * 3. Neither the name of the University nor the names of its |
22 | * contributors may be used to endorse or promote products derived | 22 | * contributors may be used to endorse or promote products derived |
23 | * from this software without specific prior written permission. | 23 | * from this software without specific prior written permission. |
24 | * | 24 | * |
25 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED | 25 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED |
26 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | 26 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF |
27 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | 27 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
28 | * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | 28 | * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
29 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | 29 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
30 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | 30 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
31 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | 31 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR |
32 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF | 32 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF |
33 | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING | 33 | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING |
34 | * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | 34 | * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
35 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 35 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
36 | */ | 36 | */ |
37 | 37 | ||
38 | #include <linux/mm.h> | 38 | #include <linux/mm.h> |
39 | #include <linux/delay.h> | 39 | #include <linux/delay.h> |
40 | #include <linux/errno.h> | 40 | #include <linux/errno.h> |
41 | #include <linux/string.h> | 41 | #include <linux/string.h> |
42 | #include <linux/ratelimit.h> | 42 | #include <linux/ratelimit.h> |
43 | #include <linux/printk.h> | 43 | #include <linux/printk.h> |
44 | #include <linux/slab.h> | 44 | #include <linux/slab.h> |
45 | #include <linux/sunrpc/clnt.h> | 45 | #include <linux/sunrpc/clnt.h> |
46 | #include <linux/sunrpc/gss_api.h> | 46 | #include <linux/sunrpc/gss_api.h> |
47 | #include <linux/nfs.h> | 47 | #include <linux/nfs.h> |
48 | #include <linux/nfs4.h> | 48 | #include <linux/nfs4.h> |
49 | #include <linux/nfs_fs.h> | 49 | #include <linux/nfs_fs.h> |
50 | #include <linux/nfs_page.h> | 50 | #include <linux/nfs_page.h> |
51 | #include <linux/nfs_mount.h> | 51 | #include <linux/nfs_mount.h> |
52 | #include <linux/namei.h> | 52 | #include <linux/namei.h> |
53 | #include <linux/mount.h> | 53 | #include <linux/mount.h> |
54 | #include <linux/module.h> | 54 | #include <linux/module.h> |
55 | #include <linux/nfs_idmap.h> | 55 | #include <linux/nfs_idmap.h> |
56 | #include <linux/sunrpc/bc_xprt.h> | 56 | #include <linux/sunrpc/bc_xprt.h> |
57 | #include <linux/xattr.h> | 57 | #include <linux/xattr.h> |
58 | #include <linux/utsname.h> | 58 | #include <linux/utsname.h> |
59 | #include <linux/freezer.h> | 59 | #include <linux/freezer.h> |
60 | 60 | ||
61 | #include "nfs4_fs.h" | 61 | #include "nfs4_fs.h" |
62 | #include "delegation.h" | 62 | #include "delegation.h" |
63 | #include "internal.h" | 63 | #include "internal.h" |
64 | #include "iostat.h" | 64 | #include "iostat.h" |
65 | #include "callback.h" | 65 | #include "callback.h" |
66 | #include "pnfs.h" | 66 | #include "pnfs.h" |
67 | 67 | ||
68 | #define NFSDBG_FACILITY NFSDBG_PROC | 68 | #define NFSDBG_FACILITY NFSDBG_PROC |
69 | 69 | ||
70 | #define NFS4_POLL_RETRY_MIN (HZ/10) | 70 | #define NFS4_POLL_RETRY_MIN (HZ/10) |
71 | #define NFS4_POLL_RETRY_MAX (15*HZ) | 71 | #define NFS4_POLL_RETRY_MAX (15*HZ) |
72 | 72 | ||
73 | #define NFS4_MAX_LOOP_ON_RECOVER (10) | 73 | #define NFS4_MAX_LOOP_ON_RECOVER (10) |
74 | 74 | ||
75 | struct nfs4_opendata; | 75 | struct nfs4_opendata; |
76 | static int _nfs4_proc_open(struct nfs4_opendata *data); | 76 | static int _nfs4_proc_open(struct nfs4_opendata *data); |
77 | static int _nfs4_recover_proc_open(struct nfs4_opendata *data); | 77 | static int _nfs4_recover_proc_open(struct nfs4_opendata *data); |
78 | static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); | 78 | static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); |
79 | static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *); | 79 | static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *); |
80 | static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr); | 80 | static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr); |
81 | static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, | 81 | static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, |
82 | struct nfs_fattr *fattr, struct iattr *sattr, | 82 | struct nfs_fattr *fattr, struct iattr *sattr, |
83 | struct nfs4_state *state); | 83 | struct nfs4_state *state); |
84 | #ifdef CONFIG_NFS_V4_1 | 84 | #ifdef CONFIG_NFS_V4_1 |
85 | static int nfs41_test_stateid(struct nfs_server *, struct nfs4_state *); | 85 | static int nfs41_test_stateid(struct nfs_server *, struct nfs4_state *); |
86 | static int nfs41_free_stateid(struct nfs_server *, struct nfs4_state *); | 86 | static int nfs41_free_stateid(struct nfs_server *, struct nfs4_state *); |
87 | #endif | 87 | #endif |
88 | /* Prevent leaks of NFSv4 errors into userland */ | 88 | /* Prevent leaks of NFSv4 errors into userland */ |
89 | static int nfs4_map_errors(int err) | 89 | static int nfs4_map_errors(int err) |
90 | { | 90 | { |
91 | if (err >= -1000) | 91 | if (err >= -1000) |
92 | return err; | 92 | return err; |
93 | switch (err) { | 93 | switch (err) { |
94 | case -NFS4ERR_RESOURCE: | 94 | case -NFS4ERR_RESOURCE: |
95 | return -EREMOTEIO; | 95 | return -EREMOTEIO; |
96 | case -NFS4ERR_WRONGSEC: | 96 | case -NFS4ERR_WRONGSEC: |
97 | return -EPERM; | 97 | return -EPERM; |
98 | case -NFS4ERR_BADOWNER: | 98 | case -NFS4ERR_BADOWNER: |
99 | case -NFS4ERR_BADNAME: | 99 | case -NFS4ERR_BADNAME: |
100 | return -EINVAL; | 100 | return -EINVAL; |
101 | default: | 101 | default: |
102 | dprintk("%s could not handle NFSv4 error %d\n", | 102 | dprintk("%s could not handle NFSv4 error %d\n", |
103 | __func__, -err); | 103 | __func__, -err); |
104 | break; | 104 | break; |
105 | } | 105 | } |
106 | return -EIO; | 106 | return -EIO; |
107 | } | 107 | } |
108 | 108 | ||
109 | /* | 109 | /* |
110 | * This is our standard bitmap for GETATTR requests. | 110 | * This is our standard bitmap for GETATTR requests. |
111 | */ | 111 | */ |
112 | const u32 nfs4_fattr_bitmap[2] = { | 112 | const u32 nfs4_fattr_bitmap[2] = { |
113 | FATTR4_WORD0_TYPE | 113 | FATTR4_WORD0_TYPE |
114 | | FATTR4_WORD0_CHANGE | 114 | | FATTR4_WORD0_CHANGE |
115 | | FATTR4_WORD0_SIZE | 115 | | FATTR4_WORD0_SIZE |
116 | | FATTR4_WORD0_FSID | 116 | | FATTR4_WORD0_FSID |
117 | | FATTR4_WORD0_FILEID, | 117 | | FATTR4_WORD0_FILEID, |
118 | FATTR4_WORD1_MODE | 118 | FATTR4_WORD1_MODE |
119 | | FATTR4_WORD1_NUMLINKS | 119 | | FATTR4_WORD1_NUMLINKS |
120 | | FATTR4_WORD1_OWNER | 120 | | FATTR4_WORD1_OWNER |
121 | | FATTR4_WORD1_OWNER_GROUP | 121 | | FATTR4_WORD1_OWNER_GROUP |
122 | | FATTR4_WORD1_RAWDEV | 122 | | FATTR4_WORD1_RAWDEV |
123 | | FATTR4_WORD1_SPACE_USED | 123 | | FATTR4_WORD1_SPACE_USED |
124 | | FATTR4_WORD1_TIME_ACCESS | 124 | | FATTR4_WORD1_TIME_ACCESS |
125 | | FATTR4_WORD1_TIME_METADATA | 125 | | FATTR4_WORD1_TIME_METADATA |
126 | | FATTR4_WORD1_TIME_MODIFY | 126 | | FATTR4_WORD1_TIME_MODIFY |
127 | }; | 127 | }; |
128 | 128 | ||
129 | const u32 nfs4_statfs_bitmap[2] = { | 129 | const u32 nfs4_statfs_bitmap[2] = { |
130 | FATTR4_WORD0_FILES_AVAIL | 130 | FATTR4_WORD0_FILES_AVAIL |
131 | | FATTR4_WORD0_FILES_FREE | 131 | | FATTR4_WORD0_FILES_FREE |
132 | | FATTR4_WORD0_FILES_TOTAL, | 132 | | FATTR4_WORD0_FILES_TOTAL, |
133 | FATTR4_WORD1_SPACE_AVAIL | 133 | FATTR4_WORD1_SPACE_AVAIL |
134 | | FATTR4_WORD1_SPACE_FREE | 134 | | FATTR4_WORD1_SPACE_FREE |
135 | | FATTR4_WORD1_SPACE_TOTAL | 135 | | FATTR4_WORD1_SPACE_TOTAL |
136 | }; | 136 | }; |
137 | 137 | ||
138 | const u32 nfs4_pathconf_bitmap[2] = { | 138 | const u32 nfs4_pathconf_bitmap[2] = { |
139 | FATTR4_WORD0_MAXLINK | 139 | FATTR4_WORD0_MAXLINK |
140 | | FATTR4_WORD0_MAXNAME, | 140 | | FATTR4_WORD0_MAXNAME, |
141 | 0 | 141 | 0 |
142 | }; | 142 | }; |
143 | 143 | ||
144 | const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE | 144 | const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE |
145 | | FATTR4_WORD0_MAXREAD | 145 | | FATTR4_WORD0_MAXREAD |
146 | | FATTR4_WORD0_MAXWRITE | 146 | | FATTR4_WORD0_MAXWRITE |
147 | | FATTR4_WORD0_LEASE_TIME, | 147 | | FATTR4_WORD0_LEASE_TIME, |
148 | FATTR4_WORD1_TIME_DELTA | 148 | FATTR4_WORD1_TIME_DELTA |
149 | | FATTR4_WORD1_FS_LAYOUT_TYPES, | 149 | | FATTR4_WORD1_FS_LAYOUT_TYPES, |
150 | FATTR4_WORD2_LAYOUT_BLKSIZE | 150 | FATTR4_WORD2_LAYOUT_BLKSIZE |
151 | }; | 151 | }; |
152 | 152 | ||
153 | const u32 nfs4_fs_locations_bitmap[2] = { | 153 | const u32 nfs4_fs_locations_bitmap[2] = { |
154 | FATTR4_WORD0_TYPE | 154 | FATTR4_WORD0_TYPE |
155 | | FATTR4_WORD0_CHANGE | 155 | | FATTR4_WORD0_CHANGE |
156 | | FATTR4_WORD0_SIZE | 156 | | FATTR4_WORD0_SIZE |
157 | | FATTR4_WORD0_FSID | 157 | | FATTR4_WORD0_FSID |
158 | | FATTR4_WORD0_FILEID | 158 | | FATTR4_WORD0_FILEID |
159 | | FATTR4_WORD0_FS_LOCATIONS, | 159 | | FATTR4_WORD0_FS_LOCATIONS, |
160 | FATTR4_WORD1_MODE | 160 | FATTR4_WORD1_MODE |
161 | | FATTR4_WORD1_NUMLINKS | 161 | | FATTR4_WORD1_NUMLINKS |
162 | | FATTR4_WORD1_OWNER | 162 | | FATTR4_WORD1_OWNER |
163 | | FATTR4_WORD1_OWNER_GROUP | 163 | | FATTR4_WORD1_OWNER_GROUP |
164 | | FATTR4_WORD1_RAWDEV | 164 | | FATTR4_WORD1_RAWDEV |
165 | | FATTR4_WORD1_SPACE_USED | 165 | | FATTR4_WORD1_SPACE_USED |
166 | | FATTR4_WORD1_TIME_ACCESS | 166 | | FATTR4_WORD1_TIME_ACCESS |
167 | | FATTR4_WORD1_TIME_METADATA | 167 | | FATTR4_WORD1_TIME_METADATA |
168 | | FATTR4_WORD1_TIME_MODIFY | 168 | | FATTR4_WORD1_TIME_MODIFY |
169 | | FATTR4_WORD1_MOUNTED_ON_FILEID | 169 | | FATTR4_WORD1_MOUNTED_ON_FILEID |
170 | }; | 170 | }; |
171 | 171 | ||
172 | static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry, | 172 | static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry, |
173 | struct nfs4_readdir_arg *readdir) | 173 | struct nfs4_readdir_arg *readdir) |
174 | { | 174 | { |
175 | __be32 *start, *p; | 175 | __be32 *start, *p; |
176 | 176 | ||
177 | BUG_ON(readdir->count < 80); | 177 | BUG_ON(readdir->count < 80); |
178 | if (cookie > 2) { | 178 | if (cookie > 2) { |
179 | readdir->cookie = cookie; | 179 | readdir->cookie = cookie; |
180 | memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier)); | 180 | memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier)); |
181 | return; | 181 | return; |
182 | } | 182 | } |
183 | 183 | ||
184 | readdir->cookie = 0; | 184 | readdir->cookie = 0; |
185 | memset(&readdir->verifier, 0, sizeof(readdir->verifier)); | 185 | memset(&readdir->verifier, 0, sizeof(readdir->verifier)); |
186 | if (cookie == 2) | 186 | if (cookie == 2) |
187 | return; | 187 | return; |
188 | 188 | ||
189 | /* | 189 | /* |
190 | * NFSv4 servers do not return entries for '.' and '..' | 190 | * NFSv4 servers do not return entries for '.' and '..' |
191 | * Therefore, we fake these entries here. We let '.' | 191 | * Therefore, we fake these entries here. We let '.' |
192 | * have cookie 0 and '..' have cookie 1. Note that | 192 | * have cookie 0 and '..' have cookie 1. Note that |
193 | * when talking to the server, we always send cookie 0 | 193 | * when talking to the server, we always send cookie 0 |
194 | * instead of 1 or 2. | 194 | * instead of 1 or 2. |
195 | */ | 195 | */ |
196 | start = p = kmap_atomic(*readdir->pages, KM_USER0); | 196 | start = p = kmap_atomic(*readdir->pages, KM_USER0); |
197 | 197 | ||
198 | if (cookie == 0) { | 198 | if (cookie == 0) { |
199 | *p++ = xdr_one; /* next */ | 199 | *p++ = xdr_one; /* next */ |
200 | *p++ = xdr_zero; /* cookie, first word */ | 200 | *p++ = xdr_zero; /* cookie, first word */ |
201 | *p++ = xdr_one; /* cookie, second word */ | 201 | *p++ = xdr_one; /* cookie, second word */ |
202 | *p++ = xdr_one; /* entry len */ | 202 | *p++ = xdr_one; /* entry len */ |
203 | memcpy(p, ".\0\0\0", 4); /* entry */ | 203 | memcpy(p, ".\0\0\0", 4); /* entry */ |
204 | p++; | 204 | p++; |
205 | *p++ = xdr_one; /* bitmap length */ | 205 | *p++ = xdr_one; /* bitmap length */ |
206 | *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */ | 206 | *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */ |
207 | *p++ = htonl(8); /* attribute buffer length */ | 207 | *p++ = htonl(8); /* attribute buffer length */ |
208 | p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_inode)); | 208 | p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_inode)); |
209 | } | 209 | } |
210 | 210 | ||
211 | *p++ = xdr_one; /* next */ | 211 | *p++ = xdr_one; /* next */ |
212 | *p++ = xdr_zero; /* cookie, first word */ | 212 | *p++ = xdr_zero; /* cookie, first word */ |
213 | *p++ = xdr_two; /* cookie, second word */ | 213 | *p++ = xdr_two; /* cookie, second word */ |
214 | *p++ = xdr_two; /* entry len */ | 214 | *p++ = xdr_two; /* entry len */ |
215 | memcpy(p, "..\0\0", 4); /* entry */ | 215 | memcpy(p, "..\0\0", 4); /* entry */ |
216 | p++; | 216 | p++; |
217 | *p++ = xdr_one; /* bitmap length */ | 217 | *p++ = xdr_one; /* bitmap length */ |
218 | *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */ | 218 | *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */ |
219 | *p++ = htonl(8); /* attribute buffer length */ | 219 | *p++ = htonl(8); /* attribute buffer length */ |
220 | p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_parent->d_inode)); | 220 | p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_parent->d_inode)); |
221 | 221 | ||
222 | readdir->pgbase = (char *)p - (char *)start; | 222 | readdir->pgbase = (char *)p - (char *)start; |
223 | readdir->count -= readdir->pgbase; | 223 | readdir->count -= readdir->pgbase; |
224 | kunmap_atomic(start, KM_USER0); | 224 | kunmap_atomic(start, KM_USER0); |
225 | } | 225 | } |
226 | 226 | ||
227 | static int nfs4_wait_clnt_recover(struct nfs_client *clp) | 227 | static int nfs4_wait_clnt_recover(struct nfs_client *clp) |
228 | { | 228 | { |
229 | int res; | 229 | int res; |
230 | 230 | ||
231 | might_sleep(); | 231 | might_sleep(); |
232 | 232 | ||
233 | res = wait_on_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING, | 233 | res = wait_on_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING, |
234 | nfs_wait_bit_killable, TASK_KILLABLE); | 234 | nfs_wait_bit_killable, TASK_KILLABLE); |
235 | return res; | 235 | return res; |
236 | } | 236 | } |
237 | 237 | ||
238 | static int nfs4_delay(struct rpc_clnt *clnt, long *timeout) | 238 | static int nfs4_delay(struct rpc_clnt *clnt, long *timeout) |
239 | { | 239 | { |
240 | int res = 0; | 240 | int res = 0; |
241 | 241 | ||
242 | might_sleep(); | 242 | might_sleep(); |
243 | 243 | ||
244 | if (*timeout <= 0) | 244 | if (*timeout <= 0) |
245 | *timeout = NFS4_POLL_RETRY_MIN; | 245 | *timeout = NFS4_POLL_RETRY_MIN; |
246 | if (*timeout > NFS4_POLL_RETRY_MAX) | 246 | if (*timeout > NFS4_POLL_RETRY_MAX) |
247 | *timeout = NFS4_POLL_RETRY_MAX; | 247 | *timeout = NFS4_POLL_RETRY_MAX; |
248 | freezable_schedule_timeout_killable(*timeout); | 248 | freezable_schedule_timeout_killable(*timeout); |
249 | if (fatal_signal_pending(current)) | 249 | if (fatal_signal_pending(current)) |
250 | res = -ERESTARTSYS; | 250 | res = -ERESTARTSYS; |
251 | *timeout <<= 1; | 251 | *timeout <<= 1; |
252 | return res; | 252 | return res; |
253 | } | 253 | } |
254 | 254 | ||
255 | /* This is the error handling routine for processes that are allowed | 255 | /* This is the error handling routine for processes that are allowed |
256 | * to sleep. | 256 | * to sleep. |
257 | */ | 257 | */ |
258 | static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception) | 258 | static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception) |
259 | { | 259 | { |
260 | struct nfs_client *clp = server->nfs_client; | 260 | struct nfs_client *clp = server->nfs_client; |
261 | struct nfs4_state *state = exception->state; | 261 | struct nfs4_state *state = exception->state; |
262 | int ret = errorcode; | 262 | int ret = errorcode; |
263 | 263 | ||
264 | exception->retry = 0; | 264 | exception->retry = 0; |
265 | switch(errorcode) { | 265 | switch(errorcode) { |
266 | case 0: | 266 | case 0: |
267 | return 0; | 267 | return 0; |
268 | case -NFS4ERR_ADMIN_REVOKED: | 268 | case -NFS4ERR_ADMIN_REVOKED: |
269 | case -NFS4ERR_BAD_STATEID: | 269 | case -NFS4ERR_BAD_STATEID: |
270 | case -NFS4ERR_OPENMODE: | 270 | case -NFS4ERR_OPENMODE: |
271 | if (state == NULL) | 271 | if (state == NULL) |
272 | break; | 272 | break; |
273 | nfs4_schedule_stateid_recovery(server, state); | 273 | nfs4_schedule_stateid_recovery(server, state); |
274 | goto wait_on_recovery; | 274 | goto wait_on_recovery; |
275 | case -NFS4ERR_EXPIRED: | 275 | case -NFS4ERR_EXPIRED: |
276 | if (state != NULL) | 276 | if (state != NULL) |
277 | nfs4_schedule_stateid_recovery(server, state); | 277 | nfs4_schedule_stateid_recovery(server, state); |
278 | case -NFS4ERR_STALE_STATEID: | 278 | case -NFS4ERR_STALE_STATEID: |
279 | case -NFS4ERR_STALE_CLIENTID: | 279 | case -NFS4ERR_STALE_CLIENTID: |
280 | nfs4_schedule_lease_recovery(clp); | 280 | nfs4_schedule_lease_recovery(clp); |
281 | goto wait_on_recovery; | 281 | goto wait_on_recovery; |
282 | #if defined(CONFIG_NFS_V4_1) | 282 | #if defined(CONFIG_NFS_V4_1) |
283 | case -NFS4ERR_BADSESSION: | 283 | case -NFS4ERR_BADSESSION: |
284 | case -NFS4ERR_BADSLOT: | 284 | case -NFS4ERR_BADSLOT: |
285 | case -NFS4ERR_BAD_HIGH_SLOT: | 285 | case -NFS4ERR_BAD_HIGH_SLOT: |
286 | case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: | 286 | case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: |
287 | case -NFS4ERR_DEADSESSION: | 287 | case -NFS4ERR_DEADSESSION: |
288 | case -NFS4ERR_SEQ_FALSE_RETRY: | 288 | case -NFS4ERR_SEQ_FALSE_RETRY: |
289 | case -NFS4ERR_SEQ_MISORDERED: | 289 | case -NFS4ERR_SEQ_MISORDERED: |
290 | dprintk("%s ERROR: %d Reset session\n", __func__, | 290 | dprintk("%s ERROR: %d Reset session\n", __func__, |
291 | errorcode); | 291 | errorcode); |
292 | nfs4_schedule_session_recovery(clp->cl_session); | 292 | nfs4_schedule_session_recovery(clp->cl_session); |
293 | exception->retry = 1; | 293 | exception->retry = 1; |
294 | break; | 294 | break; |
295 | #endif /* defined(CONFIG_NFS_V4_1) */ | 295 | #endif /* defined(CONFIG_NFS_V4_1) */ |
296 | case -NFS4ERR_FILE_OPEN: | 296 | case -NFS4ERR_FILE_OPEN: |
297 | if (exception->timeout > HZ) { | 297 | if (exception->timeout > HZ) { |
298 | /* We have retried a decent amount, time to | 298 | /* We have retried a decent amount, time to |
299 | * fail | 299 | * fail |
300 | */ | 300 | */ |
301 | ret = -EBUSY; | 301 | ret = -EBUSY; |
302 | break; | 302 | break; |
303 | } | 303 | } |
304 | case -NFS4ERR_GRACE: | 304 | case -NFS4ERR_GRACE: |
305 | case -NFS4ERR_DELAY: | 305 | case -NFS4ERR_DELAY: |
306 | case -EKEYEXPIRED: | 306 | case -EKEYEXPIRED: |
307 | ret = nfs4_delay(server->client, &exception->timeout); | 307 | ret = nfs4_delay(server->client, &exception->timeout); |
308 | if (ret != 0) | 308 | if (ret != 0) |
309 | break; | 309 | break; |
310 | case -NFS4ERR_RETRY_UNCACHED_REP: | 310 | case -NFS4ERR_RETRY_UNCACHED_REP: |
311 | case -NFS4ERR_OLD_STATEID: | 311 | case -NFS4ERR_OLD_STATEID: |
312 | exception->retry = 1; | 312 | exception->retry = 1; |
313 | break; | 313 | break; |
314 | case -NFS4ERR_BADOWNER: | 314 | case -NFS4ERR_BADOWNER: |
315 | /* The following works around a Linux server bug! */ | 315 | /* The following works around a Linux server bug! */ |
316 | case -NFS4ERR_BADNAME: | 316 | case -NFS4ERR_BADNAME: |
317 | if (server->caps & NFS_CAP_UIDGID_NOMAP) { | 317 | if (server->caps & NFS_CAP_UIDGID_NOMAP) { |
318 | server->caps &= ~NFS_CAP_UIDGID_NOMAP; | 318 | server->caps &= ~NFS_CAP_UIDGID_NOMAP; |
319 | exception->retry = 1; | 319 | exception->retry = 1; |
320 | printk(KERN_WARNING "NFS: v4 server %s " | 320 | printk(KERN_WARNING "NFS: v4 server %s " |
321 | "does not accept raw " | 321 | "does not accept raw " |
322 | "uid/gids. " | 322 | "uid/gids. " |
323 | "Reenabling the idmapper.\n", | 323 | "Reenabling the idmapper.\n", |
324 | server->nfs_client->cl_hostname); | 324 | server->nfs_client->cl_hostname); |
325 | } | 325 | } |
326 | } | 326 | } |
327 | /* We failed to handle the error */ | 327 | /* We failed to handle the error */ |
328 | return nfs4_map_errors(ret); | 328 | return nfs4_map_errors(ret); |
329 | wait_on_recovery: | 329 | wait_on_recovery: |
330 | ret = nfs4_wait_clnt_recover(clp); | 330 | ret = nfs4_wait_clnt_recover(clp); |
331 | if (ret == 0) | 331 | if (ret == 0) |
332 | exception->retry = 1; | 332 | exception->retry = 1; |
333 | return ret; | 333 | return ret; |
334 | } | 334 | } |
335 | 335 | ||
336 | 336 | ||
337 | static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp) | 337 | static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp) |
338 | { | 338 | { |
339 | spin_lock(&clp->cl_lock); | 339 | spin_lock(&clp->cl_lock); |
340 | if (time_before(clp->cl_last_renewal,timestamp)) | 340 | if (time_before(clp->cl_last_renewal,timestamp)) |
341 | clp->cl_last_renewal = timestamp; | 341 | clp->cl_last_renewal = timestamp; |
342 | spin_unlock(&clp->cl_lock); | 342 | spin_unlock(&clp->cl_lock); |
343 | } | 343 | } |
344 | 344 | ||
345 | static void renew_lease(const struct nfs_server *server, unsigned long timestamp) | 345 | static void renew_lease(const struct nfs_server *server, unsigned long timestamp) |
346 | { | 346 | { |
347 | do_renew_lease(server->nfs_client, timestamp); | 347 | do_renew_lease(server->nfs_client, timestamp); |
348 | } | 348 | } |
349 | 349 | ||
350 | #if defined(CONFIG_NFS_V4_1) | 350 | #if defined(CONFIG_NFS_V4_1) |
351 | 351 | ||
352 | /* | 352 | /* |
353 | * nfs4_free_slot - free a slot and efficiently update slot table. | 353 | * nfs4_free_slot - free a slot and efficiently update slot table. |
354 | * | 354 | * |
355 | * freeing a slot is trivially done by clearing its respective bit | 355 | * freeing a slot is trivially done by clearing its respective bit |
356 | * in the bitmap. | 356 | * in the bitmap. |
357 | * If the freed slotid equals highest_used_slotid we want to update it | 357 | * If the freed slotid equals highest_used_slotid we want to update it |
358 | * so that the server would be able to size down the slot table if needed, | 358 | * so that the server would be able to size down the slot table if needed, |
359 | * otherwise we know that the highest_used_slotid is still in use. | 359 | * otherwise we know that the highest_used_slotid is still in use. |
360 | * When updating highest_used_slotid there may be "holes" in the bitmap | 360 | * When updating highest_used_slotid there may be "holes" in the bitmap |
361 | * so we need to scan down from highest_used_slotid to 0 looking for the now | 361 | * so we need to scan down from highest_used_slotid to 0 looking for the now |
362 | * highest slotid in use. | 362 | * highest slotid in use. |
363 | * If none found, highest_used_slotid is set to -1. | 363 | * If none found, highest_used_slotid is set to -1. |
364 | * | 364 | * |
365 | * Must be called while holding tbl->slot_tbl_lock | 365 | * Must be called while holding tbl->slot_tbl_lock |
366 | */ | 366 | */ |
367 | static void | 367 | static void |
368 | nfs4_free_slot(struct nfs4_slot_table *tbl, u8 free_slotid) | 368 | nfs4_free_slot(struct nfs4_slot_table *tbl, u8 free_slotid) |
369 | { | 369 | { |
370 | int slotid = free_slotid; | 370 | int slotid = free_slotid; |
371 | 371 | ||
372 | BUG_ON(slotid < 0 || slotid >= NFS4_MAX_SLOT_TABLE); | 372 | BUG_ON(slotid < 0 || slotid >= NFS4_MAX_SLOT_TABLE); |
373 | /* clear used bit in bitmap */ | 373 | /* clear used bit in bitmap */ |
374 | __clear_bit(slotid, tbl->used_slots); | 374 | __clear_bit(slotid, tbl->used_slots); |
375 | 375 | ||
376 | /* update highest_used_slotid when it is freed */ | 376 | /* update highest_used_slotid when it is freed */ |
377 | if (slotid == tbl->highest_used_slotid) { | 377 | if (slotid == tbl->highest_used_slotid) { |
378 | slotid = find_last_bit(tbl->used_slots, tbl->max_slots); | 378 | slotid = find_last_bit(tbl->used_slots, tbl->max_slots); |
379 | if (slotid < tbl->max_slots) | 379 | if (slotid < tbl->max_slots) |
380 | tbl->highest_used_slotid = slotid; | 380 | tbl->highest_used_slotid = slotid; |
381 | else | 381 | else |
382 | tbl->highest_used_slotid = -1; | 382 | tbl->highest_used_slotid = -1; |
383 | } | 383 | } |
384 | dprintk("%s: free_slotid %u highest_used_slotid %d\n", __func__, | 384 | dprintk("%s: free_slotid %u highest_used_slotid %d\n", __func__, |
385 | free_slotid, tbl->highest_used_slotid); | 385 | free_slotid, tbl->highest_used_slotid); |
386 | } | 386 | } |
387 | 387 | ||
388 | /* | 388 | /* |
389 | * Signal state manager thread if session fore channel is drained | 389 | * Signal state manager thread if session fore channel is drained |
390 | */ | 390 | */ |
391 | static void nfs4_check_drain_fc_complete(struct nfs4_session *ses) | 391 | static void nfs4_check_drain_fc_complete(struct nfs4_session *ses) |
392 | { | 392 | { |
393 | struct rpc_task *task; | 393 | struct rpc_task *task; |
394 | 394 | ||
395 | if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state)) { | 395 | if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state)) { |
396 | task = rpc_wake_up_next(&ses->fc_slot_table.slot_tbl_waitq); | 396 | task = rpc_wake_up_next(&ses->fc_slot_table.slot_tbl_waitq); |
397 | if (task) | 397 | if (task) |
398 | rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); | 398 | rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); |
399 | return; | 399 | return; |
400 | } | 400 | } |
401 | 401 | ||
402 | if (ses->fc_slot_table.highest_used_slotid != -1) | 402 | if (ses->fc_slot_table.highest_used_slotid != -1) |
403 | return; | 403 | return; |
404 | 404 | ||
405 | dprintk("%s COMPLETE: Session Fore Channel Drained\n", __func__); | 405 | dprintk("%s COMPLETE: Session Fore Channel Drained\n", __func__); |
406 | complete(&ses->fc_slot_table.complete); | 406 | complete(&ses->fc_slot_table.complete); |
407 | } | 407 | } |
408 | 408 | ||
409 | /* | 409 | /* |
410 | * Signal state manager thread if session back channel is drained | 410 | * Signal state manager thread if session back channel is drained |
411 | */ | 411 | */ |
412 | void nfs4_check_drain_bc_complete(struct nfs4_session *ses) | 412 | void nfs4_check_drain_bc_complete(struct nfs4_session *ses) |
413 | { | 413 | { |
414 | if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state) || | 414 | if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state) || |
415 | ses->bc_slot_table.highest_used_slotid != -1) | 415 | ses->bc_slot_table.highest_used_slotid != -1) |
416 | return; | 416 | return; |
417 | dprintk("%s COMPLETE: Session Back Channel Drained\n", __func__); | 417 | dprintk("%s COMPLETE: Session Back Channel Drained\n", __func__); |
418 | complete(&ses->bc_slot_table.complete); | 418 | complete(&ses->bc_slot_table.complete); |
419 | } | 419 | } |
420 | 420 | ||
421 | static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) | 421 | static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) |
422 | { | 422 | { |
423 | struct nfs4_slot_table *tbl; | 423 | struct nfs4_slot_table *tbl; |
424 | 424 | ||
425 | tbl = &res->sr_session->fc_slot_table; | 425 | tbl = &res->sr_session->fc_slot_table; |
426 | if (!res->sr_slot) { | 426 | if (!res->sr_slot) { |
427 | /* just wake up the next guy waiting since | 427 | /* just wake up the next guy waiting since |
428 | * we may have not consumed a slot after all */ | 428 | * we may have not consumed a slot after all */ |
429 | dprintk("%s: No slot\n", __func__); | 429 | dprintk("%s: No slot\n", __func__); |
430 | return; | 430 | return; |
431 | } | 431 | } |
432 | 432 | ||
433 | spin_lock(&tbl->slot_tbl_lock); | 433 | spin_lock(&tbl->slot_tbl_lock); |
434 | nfs4_free_slot(tbl, res->sr_slot - tbl->slots); | 434 | nfs4_free_slot(tbl, res->sr_slot - tbl->slots); |
435 | nfs4_check_drain_fc_complete(res->sr_session); | 435 | nfs4_check_drain_fc_complete(res->sr_session); |
436 | spin_unlock(&tbl->slot_tbl_lock); | 436 | spin_unlock(&tbl->slot_tbl_lock); |
437 | res->sr_slot = NULL; | 437 | res->sr_slot = NULL; |
438 | } | 438 | } |
439 | 439 | ||
440 | static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) | 440 | static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) |
441 | { | 441 | { |
442 | unsigned long timestamp; | 442 | unsigned long timestamp; |
443 | struct nfs_client *clp; | 443 | struct nfs_client *clp; |
444 | 444 | ||
445 | /* | 445 | /* |
446 | * sr_status remains 1 if an RPC level error occurred. The server | 446 | * sr_status remains 1 if an RPC level error occurred. The server |
447 | * may or may not have processed the sequence operation.. | 447 | * may or may not have processed the sequence operation.. |
448 | * Proceed as if the server received and processed the sequence | 448 | * Proceed as if the server received and processed the sequence |
449 | * operation. | 449 | * operation. |
450 | */ | 450 | */ |
451 | if (res->sr_status == 1) | 451 | if (res->sr_status == 1) |
452 | res->sr_status = NFS_OK; | 452 | res->sr_status = NFS_OK; |
453 | 453 | ||
454 | /* don't increment the sequence number if the task wasn't sent */ | 454 | /* don't increment the sequence number if the task wasn't sent */ |
455 | if (!RPC_WAS_SENT(task)) | 455 | if (!RPC_WAS_SENT(task)) |
456 | goto out; | 456 | goto out; |
457 | 457 | ||
458 | /* Check the SEQUENCE operation status */ | 458 | /* Check the SEQUENCE operation status */ |
459 | switch (res->sr_status) { | 459 | switch (res->sr_status) { |
460 | case 0: | 460 | case 0: |
461 | /* Update the slot's sequence and clientid lease timer */ | 461 | /* Update the slot's sequence and clientid lease timer */ |
462 | ++res->sr_slot->seq_nr; | 462 | ++res->sr_slot->seq_nr; |
463 | timestamp = res->sr_renewal_time; | 463 | timestamp = res->sr_renewal_time; |
464 | clp = res->sr_session->clp; | 464 | clp = res->sr_session->clp; |
465 | do_renew_lease(clp, timestamp); | 465 | do_renew_lease(clp, timestamp); |
466 | /* Check sequence flags */ | 466 | /* Check sequence flags */ |
467 | if (res->sr_status_flags != 0) | 467 | if (res->sr_status_flags != 0) |
468 | nfs4_schedule_lease_recovery(clp); | 468 | nfs4_schedule_lease_recovery(clp); |
469 | break; | 469 | break; |
470 | case -NFS4ERR_DELAY: | 470 | case -NFS4ERR_DELAY: |
471 | /* The server detected a resend of the RPC call and | 471 | /* The server detected a resend of the RPC call and |
472 | * returned NFS4ERR_DELAY as per Section 2.10.6.2 | 472 | * returned NFS4ERR_DELAY as per Section 2.10.6.2 |
473 | * of RFC5661. | 473 | * of RFC5661. |
474 | */ | 474 | */ |
475 | dprintk("%s: slot=%td seq=%d: Operation in progress\n", | 475 | dprintk("%s: slot=%td seq=%d: Operation in progress\n", |
476 | __func__, | 476 | __func__, |
477 | res->sr_slot - res->sr_session->fc_slot_table.slots, | 477 | res->sr_slot - res->sr_session->fc_slot_table.slots, |
478 | res->sr_slot->seq_nr); | 478 | res->sr_slot->seq_nr); |
479 | goto out_retry; | 479 | goto out_retry; |
480 | default: | 480 | default: |
481 | /* Just update the slot sequence no. */ | 481 | /* Just update the slot sequence no. */ |
482 | ++res->sr_slot->seq_nr; | 482 | ++res->sr_slot->seq_nr; |
483 | } | 483 | } |
484 | out: | 484 | out: |
485 | /* The session may be reset by one of the error handlers. */ | 485 | /* The session may be reset by one of the error handlers. */ |
486 | dprintk("%s: Error %d free the slot \n", __func__, res->sr_status); | 486 | dprintk("%s: Error %d free the slot \n", __func__, res->sr_status); |
487 | nfs41_sequence_free_slot(res); | 487 | nfs41_sequence_free_slot(res); |
488 | return 1; | 488 | return 1; |
489 | out_retry: | 489 | out_retry: |
490 | if (!rpc_restart_call(task)) | 490 | if (!rpc_restart_call(task)) |
491 | goto out; | 491 | goto out; |
492 | rpc_delay(task, NFS4_POLL_RETRY_MAX); | 492 | rpc_delay(task, NFS4_POLL_RETRY_MAX); |
493 | return 0; | 493 | return 0; |
494 | } | 494 | } |
495 | 495 | ||
496 | static int nfs4_sequence_done(struct rpc_task *task, | 496 | static int nfs4_sequence_done(struct rpc_task *task, |
497 | struct nfs4_sequence_res *res) | 497 | struct nfs4_sequence_res *res) |
498 | { | 498 | { |
499 | if (res->sr_session == NULL) | 499 | if (res->sr_session == NULL) |
500 | return 1; | 500 | return 1; |
501 | return nfs41_sequence_done(task, res); | 501 | return nfs41_sequence_done(task, res); |
502 | } | 502 | } |
503 | 503 | ||
504 | /* | 504 | /* |
505 | * nfs4_find_slot - efficiently look for a free slot | 505 | * nfs4_find_slot - efficiently look for a free slot |
506 | * | 506 | * |
507 | * nfs4_find_slot looks for an unset bit in the used_slots bitmap. | 507 | * nfs4_find_slot looks for an unset bit in the used_slots bitmap. |
508 | * If found, we mark the slot as used, update the highest_used_slotid, | 508 | * If found, we mark the slot as used, update the highest_used_slotid, |
509 | * and respectively set up the sequence operation args. | 509 | * and respectively set up the sequence operation args. |
510 | * The slot number is returned if found, or NFS4_MAX_SLOT_TABLE otherwise. | 510 | * The slot number is returned if found, or NFS4_MAX_SLOT_TABLE otherwise. |
511 | * | 511 | * |
512 | * Note: must be called with under the slot_tbl_lock. | 512 | * Note: must be called with under the slot_tbl_lock. |
513 | */ | 513 | */ |
514 | static u8 | 514 | static u8 |
515 | nfs4_find_slot(struct nfs4_slot_table *tbl) | 515 | nfs4_find_slot(struct nfs4_slot_table *tbl) |
516 | { | 516 | { |
517 | int slotid; | 517 | int slotid; |
518 | u8 ret_id = NFS4_MAX_SLOT_TABLE; | 518 | u8 ret_id = NFS4_MAX_SLOT_TABLE; |
519 | BUILD_BUG_ON((u8)NFS4_MAX_SLOT_TABLE != (int)NFS4_MAX_SLOT_TABLE); | 519 | BUILD_BUG_ON((u8)NFS4_MAX_SLOT_TABLE != (int)NFS4_MAX_SLOT_TABLE); |
520 | 520 | ||
521 | dprintk("--> %s used_slots=%04lx highest_used=%d max_slots=%d\n", | 521 | dprintk("--> %s used_slots=%04lx highest_used=%d max_slots=%d\n", |
522 | __func__, tbl->used_slots[0], tbl->highest_used_slotid, | 522 | __func__, tbl->used_slots[0], tbl->highest_used_slotid, |
523 | tbl->max_slots); | 523 | tbl->max_slots); |
524 | slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slots); | 524 | slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slots); |
525 | if (slotid >= tbl->max_slots) | 525 | if (slotid >= tbl->max_slots) |
526 | goto out; | 526 | goto out; |
527 | __set_bit(slotid, tbl->used_slots); | 527 | __set_bit(slotid, tbl->used_slots); |
528 | if (slotid > tbl->highest_used_slotid) | 528 | if (slotid > tbl->highest_used_slotid) |
529 | tbl->highest_used_slotid = slotid; | 529 | tbl->highest_used_slotid = slotid; |
530 | ret_id = slotid; | 530 | ret_id = slotid; |
531 | out: | 531 | out: |
532 | dprintk("<-- %s used_slots=%04lx highest_used=%d slotid=%d \n", | 532 | dprintk("<-- %s used_slots=%04lx highest_used=%d slotid=%d \n", |
533 | __func__, tbl->used_slots[0], tbl->highest_used_slotid, ret_id); | 533 | __func__, tbl->used_slots[0], tbl->highest_used_slotid, ret_id); |
534 | return ret_id; | 534 | return ret_id; |
535 | } | 535 | } |
536 | 536 | ||
537 | int nfs41_setup_sequence(struct nfs4_session *session, | 537 | int nfs41_setup_sequence(struct nfs4_session *session, |
538 | struct nfs4_sequence_args *args, | 538 | struct nfs4_sequence_args *args, |
539 | struct nfs4_sequence_res *res, | 539 | struct nfs4_sequence_res *res, |
540 | int cache_reply, | 540 | int cache_reply, |
541 | struct rpc_task *task) | 541 | struct rpc_task *task) |
542 | { | 542 | { |
543 | struct nfs4_slot *slot; | 543 | struct nfs4_slot *slot; |
544 | struct nfs4_slot_table *tbl; | 544 | struct nfs4_slot_table *tbl; |
545 | u8 slotid; | 545 | u8 slotid; |
546 | 546 | ||
547 | dprintk("--> %s\n", __func__); | 547 | dprintk("--> %s\n", __func__); |
548 | /* slot already allocated? */ | 548 | /* slot already allocated? */ |
549 | if (res->sr_slot != NULL) | 549 | if (res->sr_slot != NULL) |
550 | return 0; | 550 | return 0; |
551 | 551 | ||
552 | tbl = &session->fc_slot_table; | 552 | tbl = &session->fc_slot_table; |
553 | 553 | ||
554 | spin_lock(&tbl->slot_tbl_lock); | 554 | spin_lock(&tbl->slot_tbl_lock); |
555 | if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) && | 555 | if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) && |
556 | !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) { | 556 | !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) { |
557 | /* The state manager will wait until the slot table is empty */ | 557 | /* The state manager will wait until the slot table is empty */ |
558 | rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); | 558 | rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); |
559 | spin_unlock(&tbl->slot_tbl_lock); | 559 | spin_unlock(&tbl->slot_tbl_lock); |
560 | dprintk("%s session is draining\n", __func__); | 560 | dprintk("%s session is draining\n", __func__); |
561 | return -EAGAIN; | 561 | return -EAGAIN; |
562 | } | 562 | } |
563 | 563 | ||
564 | if (!rpc_queue_empty(&tbl->slot_tbl_waitq) && | 564 | if (!rpc_queue_empty(&tbl->slot_tbl_waitq) && |
565 | !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) { | 565 | !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) { |
566 | rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); | 566 | rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); |
567 | spin_unlock(&tbl->slot_tbl_lock); | 567 | spin_unlock(&tbl->slot_tbl_lock); |
568 | dprintk("%s enforce FIFO order\n", __func__); | 568 | dprintk("%s enforce FIFO order\n", __func__); |
569 | return -EAGAIN; | 569 | return -EAGAIN; |
570 | } | 570 | } |
571 | 571 | ||
572 | slotid = nfs4_find_slot(tbl); | 572 | slotid = nfs4_find_slot(tbl); |
573 | if (slotid == NFS4_MAX_SLOT_TABLE) { | 573 | if (slotid == NFS4_MAX_SLOT_TABLE) { |
574 | rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); | 574 | rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); |
575 | spin_unlock(&tbl->slot_tbl_lock); | 575 | spin_unlock(&tbl->slot_tbl_lock); |
576 | dprintk("<-- %s: no free slots\n", __func__); | 576 | dprintk("<-- %s: no free slots\n", __func__); |
577 | return -EAGAIN; | 577 | return -EAGAIN; |
578 | } | 578 | } |
579 | spin_unlock(&tbl->slot_tbl_lock); | 579 | spin_unlock(&tbl->slot_tbl_lock); |
580 | 580 | ||
581 | rpc_task_set_priority(task, RPC_PRIORITY_NORMAL); | 581 | rpc_task_set_priority(task, RPC_PRIORITY_NORMAL); |
582 | slot = tbl->slots + slotid; | 582 | slot = tbl->slots + slotid; |
583 | args->sa_session = session; | 583 | args->sa_session = session; |
584 | args->sa_slotid = slotid; | 584 | args->sa_slotid = slotid; |
585 | args->sa_cache_this = cache_reply; | 585 | args->sa_cache_this = cache_reply; |
586 | 586 | ||
587 | dprintk("<-- %s slotid=%d seqid=%d\n", __func__, slotid, slot->seq_nr); | 587 | dprintk("<-- %s slotid=%d seqid=%d\n", __func__, slotid, slot->seq_nr); |
588 | 588 | ||
589 | res->sr_session = session; | 589 | res->sr_session = session; |
590 | res->sr_slot = slot; | 590 | res->sr_slot = slot; |
591 | res->sr_renewal_time = jiffies; | 591 | res->sr_renewal_time = jiffies; |
592 | res->sr_status_flags = 0; | 592 | res->sr_status_flags = 0; |
593 | /* | 593 | /* |
594 | * sr_status is only set in decode_sequence, and so will remain | 594 | * sr_status is only set in decode_sequence, and so will remain |
595 | * set to 1 if an rpc level failure occurs. | 595 | * set to 1 if an rpc level failure occurs. |
596 | */ | 596 | */ |
597 | res->sr_status = 1; | 597 | res->sr_status = 1; |
598 | return 0; | 598 | return 0; |
599 | } | 599 | } |
600 | EXPORT_SYMBOL_GPL(nfs41_setup_sequence); | 600 | EXPORT_SYMBOL_GPL(nfs41_setup_sequence); |
601 | 601 | ||
602 | int nfs4_setup_sequence(const struct nfs_server *server, | 602 | int nfs4_setup_sequence(const struct nfs_server *server, |
603 | struct nfs4_sequence_args *args, | 603 | struct nfs4_sequence_args *args, |
604 | struct nfs4_sequence_res *res, | 604 | struct nfs4_sequence_res *res, |
605 | int cache_reply, | 605 | int cache_reply, |
606 | struct rpc_task *task) | 606 | struct rpc_task *task) |
607 | { | 607 | { |
608 | struct nfs4_session *session = nfs4_get_session(server); | 608 | struct nfs4_session *session = nfs4_get_session(server); |
609 | int ret = 0; | 609 | int ret = 0; |
610 | 610 | ||
611 | if (session == NULL) { | 611 | if (session == NULL) { |
612 | args->sa_session = NULL; | 612 | args->sa_session = NULL; |
613 | res->sr_session = NULL; | 613 | res->sr_session = NULL; |
614 | goto out; | 614 | goto out; |
615 | } | 615 | } |
616 | 616 | ||
617 | dprintk("--> %s clp %p session %p sr_slot %td\n", | 617 | dprintk("--> %s clp %p session %p sr_slot %td\n", |
618 | __func__, session->clp, session, res->sr_slot ? | 618 | __func__, session->clp, session, res->sr_slot ? |
619 | res->sr_slot - session->fc_slot_table.slots : -1); | 619 | res->sr_slot - session->fc_slot_table.slots : -1); |
620 | 620 | ||
621 | ret = nfs41_setup_sequence(session, args, res, cache_reply, | 621 | ret = nfs41_setup_sequence(session, args, res, cache_reply, |
622 | task); | 622 | task); |
623 | out: | 623 | out: |
624 | dprintk("<-- %s status=%d\n", __func__, ret); | 624 | dprintk("<-- %s status=%d\n", __func__, ret); |
625 | return ret; | 625 | return ret; |
626 | } | 626 | } |
627 | 627 | ||
628 | struct nfs41_call_sync_data { | 628 | struct nfs41_call_sync_data { |
629 | const struct nfs_server *seq_server; | 629 | const struct nfs_server *seq_server; |
630 | struct nfs4_sequence_args *seq_args; | 630 | struct nfs4_sequence_args *seq_args; |
631 | struct nfs4_sequence_res *seq_res; | 631 | struct nfs4_sequence_res *seq_res; |
632 | int cache_reply; | 632 | int cache_reply; |
633 | }; | 633 | }; |
634 | 634 | ||
635 | static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata) | 635 | static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata) |
636 | { | 636 | { |
637 | struct nfs41_call_sync_data *data = calldata; | 637 | struct nfs41_call_sync_data *data = calldata; |
638 | 638 | ||
639 | dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server); | 639 | dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server); |
640 | 640 | ||
641 | if (nfs4_setup_sequence(data->seq_server, data->seq_args, | 641 | if (nfs4_setup_sequence(data->seq_server, data->seq_args, |
642 | data->seq_res, data->cache_reply, task)) | 642 | data->seq_res, data->cache_reply, task)) |
643 | return; | 643 | return; |
644 | rpc_call_start(task); | 644 | rpc_call_start(task); |
645 | } | 645 | } |
646 | 646 | ||
647 | static void nfs41_call_priv_sync_prepare(struct rpc_task *task, void *calldata) | 647 | static void nfs41_call_priv_sync_prepare(struct rpc_task *task, void *calldata) |
648 | { | 648 | { |
649 | rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); | 649 | rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); |
650 | nfs41_call_sync_prepare(task, calldata); | 650 | nfs41_call_sync_prepare(task, calldata); |
651 | } | 651 | } |
652 | 652 | ||
653 | static void nfs41_call_sync_done(struct rpc_task *task, void *calldata) | 653 | static void nfs41_call_sync_done(struct rpc_task *task, void *calldata) |
654 | { | 654 | { |
655 | struct nfs41_call_sync_data *data = calldata; | 655 | struct nfs41_call_sync_data *data = calldata; |
656 | 656 | ||
657 | nfs41_sequence_done(task, data->seq_res); | 657 | nfs41_sequence_done(task, data->seq_res); |
658 | } | 658 | } |
659 | 659 | ||
660 | struct rpc_call_ops nfs41_call_sync_ops = { | 660 | struct rpc_call_ops nfs41_call_sync_ops = { |
661 | .rpc_call_prepare = nfs41_call_sync_prepare, | 661 | .rpc_call_prepare = nfs41_call_sync_prepare, |
662 | .rpc_call_done = nfs41_call_sync_done, | 662 | .rpc_call_done = nfs41_call_sync_done, |
663 | }; | 663 | }; |
664 | 664 | ||
665 | struct rpc_call_ops nfs41_call_priv_sync_ops = { | 665 | struct rpc_call_ops nfs41_call_priv_sync_ops = { |
666 | .rpc_call_prepare = nfs41_call_priv_sync_prepare, | 666 | .rpc_call_prepare = nfs41_call_priv_sync_prepare, |
667 | .rpc_call_done = nfs41_call_sync_done, | 667 | .rpc_call_done = nfs41_call_sync_done, |
668 | }; | 668 | }; |
669 | 669 | ||
670 | static int nfs4_call_sync_sequence(struct rpc_clnt *clnt, | 670 | static int nfs4_call_sync_sequence(struct rpc_clnt *clnt, |
671 | struct nfs_server *server, | 671 | struct nfs_server *server, |
672 | struct rpc_message *msg, | 672 | struct rpc_message *msg, |
673 | struct nfs4_sequence_args *args, | 673 | struct nfs4_sequence_args *args, |
674 | struct nfs4_sequence_res *res, | 674 | struct nfs4_sequence_res *res, |
675 | int cache_reply, | 675 | int cache_reply, |
676 | int privileged) | 676 | int privileged) |
677 | { | 677 | { |
678 | int ret; | 678 | int ret; |
679 | struct rpc_task *task; | 679 | struct rpc_task *task; |
680 | struct nfs41_call_sync_data data = { | 680 | struct nfs41_call_sync_data data = { |
681 | .seq_server = server, | 681 | .seq_server = server, |
682 | .seq_args = args, | 682 | .seq_args = args, |
683 | .seq_res = res, | 683 | .seq_res = res, |
684 | .cache_reply = cache_reply, | 684 | .cache_reply = cache_reply, |
685 | }; | 685 | }; |
686 | struct rpc_task_setup task_setup = { | 686 | struct rpc_task_setup task_setup = { |
687 | .rpc_client = clnt, | 687 | .rpc_client = clnt, |
688 | .rpc_message = msg, | 688 | .rpc_message = msg, |
689 | .callback_ops = &nfs41_call_sync_ops, | 689 | .callback_ops = &nfs41_call_sync_ops, |
690 | .callback_data = &data | 690 | .callback_data = &data |
691 | }; | 691 | }; |
692 | 692 | ||
693 | res->sr_slot = NULL; | 693 | res->sr_slot = NULL; |
694 | if (privileged) | 694 | if (privileged) |
695 | task_setup.callback_ops = &nfs41_call_priv_sync_ops; | 695 | task_setup.callback_ops = &nfs41_call_priv_sync_ops; |
696 | task = rpc_run_task(&task_setup); | 696 | task = rpc_run_task(&task_setup); |
697 | if (IS_ERR(task)) | 697 | if (IS_ERR(task)) |
698 | ret = PTR_ERR(task); | 698 | ret = PTR_ERR(task); |
699 | else { | 699 | else { |
700 | ret = task->tk_status; | 700 | ret = task->tk_status; |
701 | rpc_put_task(task); | 701 | rpc_put_task(task); |
702 | } | 702 | } |
703 | return ret; | 703 | return ret; |
704 | } | 704 | } |
705 | 705 | ||
706 | int _nfs4_call_sync_session(struct rpc_clnt *clnt, | 706 | int _nfs4_call_sync_session(struct rpc_clnt *clnt, |
707 | struct nfs_server *server, | 707 | struct nfs_server *server, |
708 | struct rpc_message *msg, | 708 | struct rpc_message *msg, |
709 | struct nfs4_sequence_args *args, | 709 | struct nfs4_sequence_args *args, |
710 | struct nfs4_sequence_res *res, | 710 | struct nfs4_sequence_res *res, |
711 | int cache_reply) | 711 | int cache_reply) |
712 | { | 712 | { |
713 | return nfs4_call_sync_sequence(clnt, server, msg, args, res, cache_reply, 0); | 713 | return nfs4_call_sync_sequence(clnt, server, msg, args, res, cache_reply, 0); |
714 | } | 714 | } |
715 | 715 | ||
716 | #else | 716 | #else |
717 | static int nfs4_sequence_done(struct rpc_task *task, | 717 | static int nfs4_sequence_done(struct rpc_task *task, |
718 | struct nfs4_sequence_res *res) | 718 | struct nfs4_sequence_res *res) |
719 | { | 719 | { |
720 | return 1; | 720 | return 1; |
721 | } | 721 | } |
722 | #endif /* CONFIG_NFS_V4_1 */ | 722 | #endif /* CONFIG_NFS_V4_1 */ |
723 | 723 | ||
724 | int _nfs4_call_sync(struct rpc_clnt *clnt, | 724 | int _nfs4_call_sync(struct rpc_clnt *clnt, |
725 | struct nfs_server *server, | 725 | struct nfs_server *server, |
726 | struct rpc_message *msg, | 726 | struct rpc_message *msg, |
727 | struct nfs4_sequence_args *args, | 727 | struct nfs4_sequence_args *args, |
728 | struct nfs4_sequence_res *res, | 728 | struct nfs4_sequence_res *res, |
729 | int cache_reply) | 729 | int cache_reply) |
730 | { | 730 | { |
731 | args->sa_session = res->sr_session = NULL; | 731 | args->sa_session = res->sr_session = NULL; |
732 | return rpc_call_sync(clnt, msg, 0); | 732 | return rpc_call_sync(clnt, msg, 0); |
733 | } | 733 | } |
734 | 734 | ||
735 | static inline | 735 | static inline |
736 | int nfs4_call_sync(struct rpc_clnt *clnt, | 736 | int nfs4_call_sync(struct rpc_clnt *clnt, |
737 | struct nfs_server *server, | 737 | struct nfs_server *server, |
738 | struct rpc_message *msg, | 738 | struct rpc_message *msg, |
739 | struct nfs4_sequence_args *args, | 739 | struct nfs4_sequence_args *args, |
740 | struct nfs4_sequence_res *res, | 740 | struct nfs4_sequence_res *res, |
741 | int cache_reply) | 741 | int cache_reply) |
742 | { | 742 | { |
743 | return server->nfs_client->cl_mvops->call_sync(clnt, server, msg, | 743 | return server->nfs_client->cl_mvops->call_sync(clnt, server, msg, |
744 | args, res, cache_reply); | 744 | args, res, cache_reply); |
745 | } | 745 | } |
746 | 746 | ||
747 | static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo) | 747 | static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo) |
748 | { | 748 | { |
749 | struct nfs_inode *nfsi = NFS_I(dir); | 749 | struct nfs_inode *nfsi = NFS_I(dir); |
750 | 750 | ||
751 | spin_lock(&dir->i_lock); | 751 | spin_lock(&dir->i_lock); |
752 | nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA; | 752 | nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA; |
753 | if (!cinfo->atomic || cinfo->before != dir->i_version) | 753 | if (!cinfo->atomic || cinfo->before != dir->i_version) |
754 | nfs_force_lookup_revalidate(dir); | 754 | nfs_force_lookup_revalidate(dir); |
755 | dir->i_version = cinfo->after; | 755 | dir->i_version = cinfo->after; |
756 | spin_unlock(&dir->i_lock); | 756 | spin_unlock(&dir->i_lock); |
757 | } | 757 | } |
758 | 758 | ||
759 | struct nfs4_opendata { | 759 | struct nfs4_opendata { |
760 | struct kref kref; | 760 | struct kref kref; |
761 | struct nfs_openargs o_arg; | 761 | struct nfs_openargs o_arg; |
762 | struct nfs_openres o_res; | 762 | struct nfs_openres o_res; |
763 | struct nfs_open_confirmargs c_arg; | 763 | struct nfs_open_confirmargs c_arg; |
764 | struct nfs_open_confirmres c_res; | 764 | struct nfs_open_confirmres c_res; |
765 | struct nfs4_string owner_name; | 765 | struct nfs4_string owner_name; |
766 | struct nfs4_string group_name; | 766 | struct nfs4_string group_name; |
767 | struct nfs_fattr f_attr; | 767 | struct nfs_fattr f_attr; |
768 | struct nfs_fattr dir_attr; | 768 | struct nfs_fattr dir_attr; |
769 | struct dentry *dir; | 769 | struct dentry *dir; |
770 | struct dentry *dentry; | 770 | struct dentry *dentry; |
771 | struct nfs4_state_owner *owner; | 771 | struct nfs4_state_owner *owner; |
772 | struct nfs4_state *state; | 772 | struct nfs4_state *state; |
773 | struct iattr attrs; | 773 | struct iattr attrs; |
774 | unsigned long timestamp; | 774 | unsigned long timestamp; |
775 | unsigned int rpc_done : 1; | 775 | unsigned int rpc_done : 1; |
776 | int rpc_status; | 776 | int rpc_status; |
777 | int cancelled; | 777 | int cancelled; |
778 | }; | 778 | }; |
779 | 779 | ||
780 | 780 | ||
781 | static void nfs4_init_opendata_res(struct nfs4_opendata *p) | 781 | static void nfs4_init_opendata_res(struct nfs4_opendata *p) |
782 | { | 782 | { |
783 | p->o_res.f_attr = &p->f_attr; | 783 | p->o_res.f_attr = &p->f_attr; |
784 | p->o_res.dir_attr = &p->dir_attr; | 784 | p->o_res.dir_attr = &p->dir_attr; |
785 | p->o_res.seqid = p->o_arg.seqid; | 785 | p->o_res.seqid = p->o_arg.seqid; |
786 | p->c_res.seqid = p->c_arg.seqid; | 786 | p->c_res.seqid = p->c_arg.seqid; |
787 | p->o_res.server = p->o_arg.server; | 787 | p->o_res.server = p->o_arg.server; |
788 | nfs_fattr_init(&p->f_attr); | 788 | nfs_fattr_init(&p->f_attr); |
789 | nfs_fattr_init(&p->dir_attr); | 789 | nfs_fattr_init(&p->dir_attr); |
790 | nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name); | 790 | nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name); |
791 | } | 791 | } |
792 | 792 | ||
793 | static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, | 793 | static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, |
794 | struct nfs4_state_owner *sp, fmode_t fmode, int flags, | 794 | struct nfs4_state_owner *sp, fmode_t fmode, int flags, |
795 | const struct iattr *attrs, | 795 | const struct iattr *attrs, |
796 | gfp_t gfp_mask) | 796 | gfp_t gfp_mask) |
797 | { | 797 | { |
798 | struct dentry *parent = dget_parent(dentry); | 798 | struct dentry *parent = dget_parent(dentry); |
799 | struct inode *dir = parent->d_inode; | 799 | struct inode *dir = parent->d_inode; |
800 | struct nfs_server *server = NFS_SERVER(dir); | 800 | struct nfs_server *server = NFS_SERVER(dir); |
801 | struct nfs4_opendata *p; | 801 | struct nfs4_opendata *p; |
802 | 802 | ||
803 | p = kzalloc(sizeof(*p), gfp_mask); | 803 | p = kzalloc(sizeof(*p), gfp_mask); |
804 | if (p == NULL) | 804 | if (p == NULL) |
805 | goto err; | 805 | goto err; |
806 | p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid, gfp_mask); | 806 | p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid, gfp_mask); |
807 | if (p->o_arg.seqid == NULL) | 807 | if (p->o_arg.seqid == NULL) |
808 | goto err_free; | 808 | goto err_free; |
809 | nfs_sb_active(dentry->d_sb); | 809 | nfs_sb_active(dentry->d_sb); |
810 | p->dentry = dget(dentry); | 810 | p->dentry = dget(dentry); |
811 | p->dir = parent; | 811 | p->dir = parent; |
812 | p->owner = sp; | 812 | p->owner = sp; |
813 | atomic_inc(&sp->so_count); | 813 | atomic_inc(&sp->so_count); |
814 | p->o_arg.fh = NFS_FH(dir); | 814 | p->o_arg.fh = NFS_FH(dir); |
815 | p->o_arg.open_flags = flags; | 815 | p->o_arg.open_flags = flags; |
816 | p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE); | 816 | p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE); |
817 | p->o_arg.clientid = server->nfs_client->cl_clientid; | 817 | p->o_arg.clientid = server->nfs_client->cl_clientid; |
818 | p->o_arg.id = sp->so_owner_id.id; | 818 | p->o_arg.id = sp->so_owner_id.id; |
819 | p->o_arg.name = &dentry->d_name; | 819 | p->o_arg.name = &dentry->d_name; |
820 | p->o_arg.server = server; | 820 | p->o_arg.server = server; |
821 | p->o_arg.bitmask = server->attr_bitmask; | 821 | p->o_arg.bitmask = server->attr_bitmask; |
822 | p->o_arg.dir_bitmask = server->cache_consistency_bitmask; | 822 | p->o_arg.dir_bitmask = server->cache_consistency_bitmask; |
823 | p->o_arg.claim = NFS4_OPEN_CLAIM_NULL; | 823 | p->o_arg.claim = NFS4_OPEN_CLAIM_NULL; |
824 | if (flags & O_CREAT) { | 824 | if (flags & O_CREAT) { |
825 | u32 *s; | 825 | u32 *s; |
826 | 826 | ||
827 | p->o_arg.u.attrs = &p->attrs; | 827 | p->o_arg.u.attrs = &p->attrs; |
828 | memcpy(&p->attrs, attrs, sizeof(p->attrs)); | 828 | memcpy(&p->attrs, attrs, sizeof(p->attrs)); |
829 | s = (u32 *) p->o_arg.u.verifier.data; | 829 | s = (u32 *) p->o_arg.u.verifier.data; |
830 | s[0] = jiffies; | 830 | s[0] = jiffies; |
831 | s[1] = current->pid; | 831 | s[1] = current->pid; |
832 | } | 832 | } |
833 | p->c_arg.fh = &p->o_res.fh; | 833 | p->c_arg.fh = &p->o_res.fh; |
834 | p->c_arg.stateid = &p->o_res.stateid; | 834 | p->c_arg.stateid = &p->o_res.stateid; |
835 | p->c_arg.seqid = p->o_arg.seqid; | 835 | p->c_arg.seqid = p->o_arg.seqid; |
836 | nfs4_init_opendata_res(p); | 836 | nfs4_init_opendata_res(p); |
837 | kref_init(&p->kref); | 837 | kref_init(&p->kref); |
838 | return p; | 838 | return p; |
839 | err_free: | 839 | err_free: |
840 | kfree(p); | 840 | kfree(p); |
841 | err: | 841 | err: |
842 | dput(parent); | 842 | dput(parent); |
843 | return NULL; | 843 | return NULL; |
844 | } | 844 | } |
845 | 845 | ||
846 | static void nfs4_opendata_free(struct kref *kref) | 846 | static void nfs4_opendata_free(struct kref *kref) |
847 | { | 847 | { |
848 | struct nfs4_opendata *p = container_of(kref, | 848 | struct nfs4_opendata *p = container_of(kref, |
849 | struct nfs4_opendata, kref); | 849 | struct nfs4_opendata, kref); |
850 | struct super_block *sb = p->dentry->d_sb; | 850 | struct super_block *sb = p->dentry->d_sb; |
851 | 851 | ||
852 | nfs_free_seqid(p->o_arg.seqid); | 852 | nfs_free_seqid(p->o_arg.seqid); |
853 | if (p->state != NULL) | 853 | if (p->state != NULL) |
854 | nfs4_put_open_state(p->state); | 854 | nfs4_put_open_state(p->state); |
855 | nfs4_put_state_owner(p->owner); | 855 | nfs4_put_state_owner(p->owner); |
856 | dput(p->dir); | 856 | dput(p->dir); |
857 | dput(p->dentry); | 857 | dput(p->dentry); |
858 | nfs_sb_deactive(sb); | 858 | nfs_sb_deactive(sb); |
859 | nfs_fattr_free_names(&p->f_attr); | 859 | nfs_fattr_free_names(&p->f_attr); |
860 | kfree(p); | 860 | kfree(p); |
861 | } | 861 | } |
862 | 862 | ||
863 | static void nfs4_opendata_put(struct nfs4_opendata *p) | 863 | static void nfs4_opendata_put(struct nfs4_opendata *p) |
864 | { | 864 | { |
865 | if (p != NULL) | 865 | if (p != NULL) |
866 | kref_put(&p->kref, nfs4_opendata_free); | 866 | kref_put(&p->kref, nfs4_opendata_free); |
867 | } | 867 | } |
868 | 868 | ||
869 | static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task) | 869 | static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task) |
870 | { | 870 | { |
871 | int ret; | 871 | int ret; |
872 | 872 | ||
873 | ret = rpc_wait_for_completion_task(task); | 873 | ret = rpc_wait_for_completion_task(task); |
874 | return ret; | 874 | return ret; |
875 | } | 875 | } |
876 | 876 | ||
877 | static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode) | 877 | static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode) |
878 | { | 878 | { |
879 | int ret = 0; | 879 | int ret = 0; |
880 | 880 | ||
881 | if (open_mode & O_EXCL) | 881 | if (open_mode & O_EXCL) |
882 | goto out; | 882 | goto out; |
883 | switch (mode & (FMODE_READ|FMODE_WRITE)) { | 883 | switch (mode & (FMODE_READ|FMODE_WRITE)) { |
884 | case FMODE_READ: | 884 | case FMODE_READ: |
885 | ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0 | 885 | ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0 |
886 | && state->n_rdonly != 0; | 886 | && state->n_rdonly != 0; |
887 | break; | 887 | break; |
888 | case FMODE_WRITE: | 888 | case FMODE_WRITE: |
889 | ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0 | 889 | ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0 |
890 | && state->n_wronly != 0; | 890 | && state->n_wronly != 0; |
891 | break; | 891 | break; |
892 | case FMODE_READ|FMODE_WRITE: | 892 | case FMODE_READ|FMODE_WRITE: |
893 | ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0 | 893 | ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0 |
894 | && state->n_rdwr != 0; | 894 | && state->n_rdwr != 0; |
895 | } | 895 | } |
896 | out: | 896 | out: |
897 | return ret; | 897 | return ret; |
898 | } | 898 | } |
899 | 899 | ||
900 | static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode) | 900 | static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode) |
901 | { | 901 | { |
902 | if (delegation == NULL) | 902 | if (delegation == NULL) |
903 | return 0; | 903 | return 0; |
904 | if ((delegation->type & fmode) != fmode) | 904 | if ((delegation->type & fmode) != fmode) |
905 | return 0; | 905 | return 0; |
906 | if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) | 906 | if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) |
907 | return 0; | 907 | return 0; |
908 | nfs_mark_delegation_referenced(delegation); | 908 | nfs_mark_delegation_referenced(delegation); |
909 | return 1; | 909 | return 1; |
910 | } | 910 | } |
911 | 911 | ||
912 | static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode) | 912 | static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode) |
913 | { | 913 | { |
914 | switch (fmode) { | 914 | switch (fmode) { |
915 | case FMODE_WRITE: | 915 | case FMODE_WRITE: |
916 | state->n_wronly++; | 916 | state->n_wronly++; |
917 | break; | 917 | break; |
918 | case FMODE_READ: | 918 | case FMODE_READ: |
919 | state->n_rdonly++; | 919 | state->n_rdonly++; |
920 | break; | 920 | break; |
921 | case FMODE_READ|FMODE_WRITE: | 921 | case FMODE_READ|FMODE_WRITE: |
922 | state->n_rdwr++; | 922 | state->n_rdwr++; |
923 | } | 923 | } |
924 | nfs4_state_set_mode_locked(state, state->state | fmode); | 924 | nfs4_state_set_mode_locked(state, state->state | fmode); |
925 | } | 925 | } |
926 | 926 | ||
927 | static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) | 927 | static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) |
928 | { | 928 | { |
929 | if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) | 929 | if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) |
930 | memcpy(state->stateid.data, stateid->data, sizeof(state->stateid.data)); | 930 | memcpy(state->stateid.data, stateid->data, sizeof(state->stateid.data)); |
931 | memcpy(state->open_stateid.data, stateid->data, sizeof(state->open_stateid.data)); | 931 | memcpy(state->open_stateid.data, stateid->data, sizeof(state->open_stateid.data)); |
932 | switch (fmode) { | 932 | switch (fmode) { |
933 | case FMODE_READ: | 933 | case FMODE_READ: |
934 | set_bit(NFS_O_RDONLY_STATE, &state->flags); | 934 | set_bit(NFS_O_RDONLY_STATE, &state->flags); |
935 | break; | 935 | break; |
936 | case FMODE_WRITE: | 936 | case FMODE_WRITE: |
937 | set_bit(NFS_O_WRONLY_STATE, &state->flags); | 937 | set_bit(NFS_O_WRONLY_STATE, &state->flags); |
938 | break; | 938 | break; |
939 | case FMODE_READ|FMODE_WRITE: | 939 | case FMODE_READ|FMODE_WRITE: |
940 | set_bit(NFS_O_RDWR_STATE, &state->flags); | 940 | set_bit(NFS_O_RDWR_STATE, &state->flags); |
941 | } | 941 | } |
942 | } | 942 | } |
943 | 943 | ||
944 | static void nfs_set_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) | 944 | static void nfs_set_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) |
945 | { | 945 | { |
946 | write_seqlock(&state->seqlock); | 946 | write_seqlock(&state->seqlock); |
947 | nfs_set_open_stateid_locked(state, stateid, fmode); | 947 | nfs_set_open_stateid_locked(state, stateid, fmode); |
948 | write_sequnlock(&state->seqlock); | 948 | write_sequnlock(&state->seqlock); |
949 | } | 949 | } |
950 | 950 | ||
951 | static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode) | 951 | static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode) |
952 | { | 952 | { |
953 | /* | 953 | /* |
954 | * Protect the call to nfs4_state_set_mode_locked and | 954 | * Protect the call to nfs4_state_set_mode_locked and |
955 | * serialise the stateid update | 955 | * serialise the stateid update |
956 | */ | 956 | */ |
957 | write_seqlock(&state->seqlock); | 957 | write_seqlock(&state->seqlock); |
958 | if (deleg_stateid != NULL) { | 958 | if (deleg_stateid != NULL) { |
959 | memcpy(state->stateid.data, deleg_stateid->data, sizeof(state->stateid.data)); | 959 | memcpy(state->stateid.data, deleg_stateid->data, sizeof(state->stateid.data)); |
960 | set_bit(NFS_DELEGATED_STATE, &state->flags); | 960 | set_bit(NFS_DELEGATED_STATE, &state->flags); |
961 | } | 961 | } |
962 | if (open_stateid != NULL) | 962 | if (open_stateid != NULL) |
963 | nfs_set_open_stateid_locked(state, open_stateid, fmode); | 963 | nfs_set_open_stateid_locked(state, open_stateid, fmode); |
964 | write_sequnlock(&state->seqlock); | 964 | write_sequnlock(&state->seqlock); |
965 | spin_lock(&state->owner->so_lock); | 965 | spin_lock(&state->owner->so_lock); |
966 | update_open_stateflags(state, fmode); | 966 | update_open_stateflags(state, fmode); |
967 | spin_unlock(&state->owner->so_lock); | 967 | spin_unlock(&state->owner->so_lock); |
968 | } | 968 | } |
969 | 969 | ||
970 | static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode) | 970 | static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode) |
971 | { | 971 | { |
972 | struct nfs_inode *nfsi = NFS_I(state->inode); | 972 | struct nfs_inode *nfsi = NFS_I(state->inode); |
973 | struct nfs_delegation *deleg_cur; | 973 | struct nfs_delegation *deleg_cur; |
974 | int ret = 0; | 974 | int ret = 0; |
975 | 975 | ||
976 | fmode &= (FMODE_READ|FMODE_WRITE); | 976 | fmode &= (FMODE_READ|FMODE_WRITE); |
977 | 977 | ||
978 | rcu_read_lock(); | 978 | rcu_read_lock(); |
979 | deleg_cur = rcu_dereference(nfsi->delegation); | 979 | deleg_cur = rcu_dereference(nfsi->delegation); |
980 | if (deleg_cur == NULL) | 980 | if (deleg_cur == NULL) |
981 | goto no_delegation; | 981 | goto no_delegation; |
982 | 982 | ||
983 | spin_lock(&deleg_cur->lock); | 983 | spin_lock(&deleg_cur->lock); |
984 | if (nfsi->delegation != deleg_cur || | 984 | if (nfsi->delegation != deleg_cur || |
985 | (deleg_cur->type & fmode) != fmode) | 985 | (deleg_cur->type & fmode) != fmode) |
986 | goto no_delegation_unlock; | 986 | goto no_delegation_unlock; |
987 | 987 | ||
988 | if (delegation == NULL) | 988 | if (delegation == NULL) |
989 | delegation = &deleg_cur->stateid; | 989 | delegation = &deleg_cur->stateid; |
990 | else if (memcmp(deleg_cur->stateid.data, delegation->data, NFS4_STATEID_SIZE) != 0) | 990 | else if (memcmp(deleg_cur->stateid.data, delegation->data, NFS4_STATEID_SIZE) != 0) |
991 | goto no_delegation_unlock; | 991 | goto no_delegation_unlock; |
992 | 992 | ||
993 | nfs_mark_delegation_referenced(deleg_cur); | 993 | nfs_mark_delegation_referenced(deleg_cur); |
994 | __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode); | 994 | __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode); |
995 | ret = 1; | 995 | ret = 1; |
996 | no_delegation_unlock: | 996 | no_delegation_unlock: |
997 | spin_unlock(&deleg_cur->lock); | 997 | spin_unlock(&deleg_cur->lock); |
998 | no_delegation: | 998 | no_delegation: |
999 | rcu_read_unlock(); | 999 | rcu_read_unlock(); |
1000 | 1000 | ||
1001 | if (!ret && open_stateid != NULL) { | 1001 | if (!ret && open_stateid != NULL) { |
1002 | __update_open_stateid(state, open_stateid, NULL, fmode); | 1002 | __update_open_stateid(state, open_stateid, NULL, fmode); |
1003 | ret = 1; | 1003 | ret = 1; |
1004 | } | 1004 | } |
1005 | 1005 | ||
1006 | return ret; | 1006 | return ret; |
1007 | } | 1007 | } |
1008 | 1008 | ||
1009 | 1009 | ||
1010 | static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode) | 1010 | static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode) |
1011 | { | 1011 | { |
1012 | struct nfs_delegation *delegation; | 1012 | struct nfs_delegation *delegation; |
1013 | 1013 | ||
1014 | rcu_read_lock(); | 1014 | rcu_read_lock(); |
1015 | delegation = rcu_dereference(NFS_I(inode)->delegation); | 1015 | delegation = rcu_dereference(NFS_I(inode)->delegation); |
1016 | if (delegation == NULL || (delegation->type & fmode) == fmode) { | 1016 | if (delegation == NULL || (delegation->type & fmode) == fmode) { |
1017 | rcu_read_unlock(); | 1017 | rcu_read_unlock(); |
1018 | return; | 1018 | return; |
1019 | } | 1019 | } |
1020 | rcu_read_unlock(); | 1020 | rcu_read_unlock(); |
1021 | nfs_inode_return_delegation(inode); | 1021 | nfs_inode_return_delegation(inode); |
1022 | } | 1022 | } |
1023 | 1023 | ||
1024 | static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) | 1024 | static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) |
1025 | { | 1025 | { |
1026 | struct nfs4_state *state = opendata->state; | 1026 | struct nfs4_state *state = opendata->state; |
1027 | struct nfs_inode *nfsi = NFS_I(state->inode); | 1027 | struct nfs_inode *nfsi = NFS_I(state->inode); |
1028 | struct nfs_delegation *delegation; | 1028 | struct nfs_delegation *delegation; |
1029 | int open_mode = opendata->o_arg.open_flags & O_EXCL; | 1029 | int open_mode = opendata->o_arg.open_flags & O_EXCL; |
1030 | fmode_t fmode = opendata->o_arg.fmode; | 1030 | fmode_t fmode = opendata->o_arg.fmode; |
1031 | nfs4_stateid stateid; | 1031 | nfs4_stateid stateid; |
1032 | int ret = -EAGAIN; | 1032 | int ret = -EAGAIN; |
1033 | 1033 | ||
1034 | for (;;) { | 1034 | for (;;) { |
1035 | if (can_open_cached(state, fmode, open_mode)) { | 1035 | if (can_open_cached(state, fmode, open_mode)) { |
1036 | spin_lock(&state->owner->so_lock); | 1036 | spin_lock(&state->owner->so_lock); |
1037 | if (can_open_cached(state, fmode, open_mode)) { | 1037 | if (can_open_cached(state, fmode, open_mode)) { |
1038 | update_open_stateflags(state, fmode); | 1038 | update_open_stateflags(state, fmode); |
1039 | spin_unlock(&state->owner->so_lock); | 1039 | spin_unlock(&state->owner->so_lock); |
1040 | goto out_return_state; | 1040 | goto out_return_state; |
1041 | } | 1041 | } |
1042 | spin_unlock(&state->owner->so_lock); | 1042 | spin_unlock(&state->owner->so_lock); |
1043 | } | 1043 | } |
1044 | rcu_read_lock(); | 1044 | rcu_read_lock(); |
1045 | delegation = rcu_dereference(nfsi->delegation); | 1045 | delegation = rcu_dereference(nfsi->delegation); |
1046 | if (!can_open_delegated(delegation, fmode)) { | 1046 | if (!can_open_delegated(delegation, fmode)) { |
1047 | rcu_read_unlock(); | 1047 | rcu_read_unlock(); |
1048 | break; | 1048 | break; |
1049 | } | 1049 | } |
1050 | /* Save the delegation */ | 1050 | /* Save the delegation */ |
1051 | memcpy(stateid.data, delegation->stateid.data, sizeof(stateid.data)); | 1051 | memcpy(stateid.data, delegation->stateid.data, sizeof(stateid.data)); |
1052 | rcu_read_unlock(); | 1052 | rcu_read_unlock(); |
1053 | ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode); | 1053 | ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode); |
1054 | if (ret != 0) | 1054 | if (ret != 0) |
1055 | goto out; | 1055 | goto out; |
1056 | ret = -EAGAIN; | 1056 | ret = -EAGAIN; |
1057 | 1057 | ||
1058 | /* Try to update the stateid using the delegation */ | 1058 | /* Try to update the stateid using the delegation */ |
1059 | if (update_open_stateid(state, NULL, &stateid, fmode)) | 1059 | if (update_open_stateid(state, NULL, &stateid, fmode)) |
1060 | goto out_return_state; | 1060 | goto out_return_state; |
1061 | } | 1061 | } |
1062 | out: | 1062 | out: |
1063 | return ERR_PTR(ret); | 1063 | return ERR_PTR(ret); |
1064 | out_return_state: | 1064 | out_return_state: |
1065 | atomic_inc(&state->count); | 1065 | atomic_inc(&state->count); |
1066 | return state; | 1066 | return state; |
1067 | } | 1067 | } |
1068 | 1068 | ||
1069 | static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) | 1069 | static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) |
1070 | { | 1070 | { |
1071 | struct inode *inode; | 1071 | struct inode *inode; |
1072 | struct nfs4_state *state = NULL; | 1072 | struct nfs4_state *state = NULL; |
1073 | struct nfs_delegation *delegation; | 1073 | struct nfs_delegation *delegation; |
1074 | int ret; | 1074 | int ret; |
1075 | 1075 | ||
1076 | if (!data->rpc_done) { | 1076 | if (!data->rpc_done) { |
1077 | state = nfs4_try_open_cached(data); | 1077 | state = nfs4_try_open_cached(data); |
1078 | goto out; | 1078 | goto out; |
1079 | } | 1079 | } |
1080 | 1080 | ||
1081 | ret = -EAGAIN; | 1081 | ret = -EAGAIN; |
1082 | if (!(data->f_attr.valid & NFS_ATTR_FATTR)) | 1082 | if (!(data->f_attr.valid & NFS_ATTR_FATTR)) |
1083 | goto err; | 1083 | goto err; |
1084 | inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr); | 1084 | inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr); |
1085 | ret = PTR_ERR(inode); | 1085 | ret = PTR_ERR(inode); |
1086 | if (IS_ERR(inode)) | 1086 | if (IS_ERR(inode)) |
1087 | goto err; | 1087 | goto err; |
1088 | ret = -ENOMEM; | 1088 | ret = -ENOMEM; |
1089 | state = nfs4_get_open_state(inode, data->owner); | 1089 | state = nfs4_get_open_state(inode, data->owner); |
1090 | if (state == NULL) | 1090 | if (state == NULL) |
1091 | goto err_put_inode; | 1091 | goto err_put_inode; |
1092 | if (data->o_res.delegation_type != 0) { | 1092 | if (data->o_res.delegation_type != 0) { |
1093 | int delegation_flags = 0; | 1093 | int delegation_flags = 0; |
1094 | 1094 | ||
1095 | rcu_read_lock(); | 1095 | rcu_read_lock(); |
1096 | delegation = rcu_dereference(NFS_I(inode)->delegation); | 1096 | delegation = rcu_dereference(NFS_I(inode)->delegation); |
1097 | if (delegation) | 1097 | if (delegation) |
1098 | delegation_flags = delegation->flags; | 1098 | delegation_flags = delegation->flags; |
1099 | rcu_read_unlock(); | 1099 | rcu_read_unlock(); |
1100 | if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) { | 1100 | if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) { |
1101 | pr_err_ratelimited("NFS: Broken NFSv4 server %s is " | 1101 | pr_err_ratelimited("NFS: Broken NFSv4 server %s is " |
1102 | "returning a delegation for " | 1102 | "returning a delegation for " |
1103 | "OPEN(CLAIM_DELEGATE_CUR)\n", | 1103 | "OPEN(CLAIM_DELEGATE_CUR)\n", |
1104 | NFS_CLIENT(inode)->cl_server); | 1104 | NFS_CLIENT(inode)->cl_server); |
1105 | } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0) | 1105 | } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0) |
1106 | nfs_inode_set_delegation(state->inode, | 1106 | nfs_inode_set_delegation(state->inode, |
1107 | data->owner->so_cred, | 1107 | data->owner->so_cred, |
1108 | &data->o_res); | 1108 | &data->o_res); |
1109 | else | 1109 | else |
1110 | nfs_inode_reclaim_delegation(state->inode, | 1110 | nfs_inode_reclaim_delegation(state->inode, |
1111 | data->owner->so_cred, | 1111 | data->owner->so_cred, |
1112 | &data->o_res); | 1112 | &data->o_res); |
1113 | } | 1113 | } |
1114 | 1114 | ||
1115 | update_open_stateid(state, &data->o_res.stateid, NULL, | 1115 | update_open_stateid(state, &data->o_res.stateid, NULL, |
1116 | data->o_arg.fmode); | 1116 | data->o_arg.fmode); |
1117 | iput(inode); | 1117 | iput(inode); |
1118 | out: | 1118 | out: |
1119 | return state; | 1119 | return state; |
1120 | err_put_inode: | 1120 | err_put_inode: |
1121 | iput(inode); | 1121 | iput(inode); |
1122 | err: | 1122 | err: |
1123 | return ERR_PTR(ret); | 1123 | return ERR_PTR(ret); |
1124 | } | 1124 | } |
1125 | 1125 | ||
1126 | static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state) | 1126 | static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state) |
1127 | { | 1127 | { |
1128 | struct nfs_inode *nfsi = NFS_I(state->inode); | 1128 | struct nfs_inode *nfsi = NFS_I(state->inode); |
1129 | struct nfs_open_context *ctx; | 1129 | struct nfs_open_context *ctx; |
1130 | 1130 | ||
1131 | spin_lock(&state->inode->i_lock); | 1131 | spin_lock(&state->inode->i_lock); |
1132 | list_for_each_entry(ctx, &nfsi->open_files, list) { | 1132 | list_for_each_entry(ctx, &nfsi->open_files, list) { |
1133 | if (ctx->state != state) | 1133 | if (ctx->state != state) |
1134 | continue; | 1134 | continue; |
1135 | get_nfs_open_context(ctx); | 1135 | get_nfs_open_context(ctx); |
1136 | spin_unlock(&state->inode->i_lock); | 1136 | spin_unlock(&state->inode->i_lock); |
1137 | return ctx; | 1137 | return ctx; |
1138 | } | 1138 | } |
1139 | spin_unlock(&state->inode->i_lock); | 1139 | spin_unlock(&state->inode->i_lock); |
1140 | return ERR_PTR(-ENOENT); | 1140 | return ERR_PTR(-ENOENT); |
1141 | } | 1141 | } |
1142 | 1142 | ||
1143 | static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, struct nfs4_state *state) | 1143 | static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, struct nfs4_state *state) |
1144 | { | 1144 | { |
1145 | struct nfs4_opendata *opendata; | 1145 | struct nfs4_opendata *opendata; |
1146 | 1146 | ||
1147 | opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, NULL, GFP_NOFS); | 1147 | opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, NULL, GFP_NOFS); |
1148 | if (opendata == NULL) | 1148 | if (opendata == NULL) |
1149 | return ERR_PTR(-ENOMEM); | 1149 | return ERR_PTR(-ENOMEM); |
1150 | opendata->state = state; | 1150 | opendata->state = state; |
1151 | atomic_inc(&state->count); | 1151 | atomic_inc(&state->count); |
1152 | return opendata; | 1152 | return opendata; |
1153 | } | 1153 | } |
1154 | 1154 | ||
1155 | static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res) | 1155 | static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res) |
1156 | { | 1156 | { |
1157 | struct nfs4_state *newstate; | 1157 | struct nfs4_state *newstate; |
1158 | int ret; | 1158 | int ret; |
1159 | 1159 | ||
1160 | opendata->o_arg.open_flags = 0; | 1160 | opendata->o_arg.open_flags = 0; |
1161 | opendata->o_arg.fmode = fmode; | 1161 | opendata->o_arg.fmode = fmode; |
1162 | memset(&opendata->o_res, 0, sizeof(opendata->o_res)); | 1162 | memset(&opendata->o_res, 0, sizeof(opendata->o_res)); |
1163 | memset(&opendata->c_res, 0, sizeof(opendata->c_res)); | 1163 | memset(&opendata->c_res, 0, sizeof(opendata->c_res)); |
1164 | nfs4_init_opendata_res(opendata); | 1164 | nfs4_init_opendata_res(opendata); |
1165 | ret = _nfs4_recover_proc_open(opendata); | 1165 | ret = _nfs4_recover_proc_open(opendata); |
1166 | if (ret != 0) | 1166 | if (ret != 0) |
1167 | return ret; | 1167 | return ret; |
1168 | newstate = nfs4_opendata_to_nfs4_state(opendata); | 1168 | newstate = nfs4_opendata_to_nfs4_state(opendata); |
1169 | if (IS_ERR(newstate)) | 1169 | if (IS_ERR(newstate)) |
1170 | return PTR_ERR(newstate); | 1170 | return PTR_ERR(newstate); |
1171 | nfs4_close_state(newstate, fmode); | 1171 | nfs4_close_state(newstate, fmode); |
1172 | *res = newstate; | 1172 | *res = newstate; |
1173 | return 0; | 1173 | return 0; |
1174 | } | 1174 | } |
1175 | 1175 | ||
1176 | static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state) | 1176 | static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state) |
1177 | { | 1177 | { |
1178 | struct nfs4_state *newstate; | 1178 | struct nfs4_state *newstate; |
1179 | int ret; | 1179 | int ret; |
1180 | 1180 | ||
1181 | /* memory barrier prior to reading state->n_* */ | 1181 | /* memory barrier prior to reading state->n_* */ |
1182 | clear_bit(NFS_DELEGATED_STATE, &state->flags); | 1182 | clear_bit(NFS_DELEGATED_STATE, &state->flags); |
1183 | smp_rmb(); | 1183 | smp_rmb(); |
1184 | if (state->n_rdwr != 0) { | 1184 | if (state->n_rdwr != 0) { |
1185 | clear_bit(NFS_O_RDWR_STATE, &state->flags); | 1185 | clear_bit(NFS_O_RDWR_STATE, &state->flags); |
1186 | ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate); | 1186 | ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate); |
1187 | if (ret != 0) | 1187 | if (ret != 0) |
1188 | return ret; | 1188 | return ret; |
1189 | if (newstate != state) | 1189 | if (newstate != state) |
1190 | return -ESTALE; | 1190 | return -ESTALE; |
1191 | } | 1191 | } |
1192 | if (state->n_wronly != 0) { | 1192 | if (state->n_wronly != 0) { |
1193 | clear_bit(NFS_O_WRONLY_STATE, &state->flags); | 1193 | clear_bit(NFS_O_WRONLY_STATE, &state->flags); |
1194 | ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate); | 1194 | ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate); |
1195 | if (ret != 0) | 1195 | if (ret != 0) |
1196 | return ret; | 1196 | return ret; |
1197 | if (newstate != state) | 1197 | if (newstate != state) |
1198 | return -ESTALE; | 1198 | return -ESTALE; |
1199 | } | 1199 | } |
1200 | if (state->n_rdonly != 0) { | 1200 | if (state->n_rdonly != 0) { |
1201 | clear_bit(NFS_O_RDONLY_STATE, &state->flags); | 1201 | clear_bit(NFS_O_RDONLY_STATE, &state->flags); |
1202 | ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate); | 1202 | ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate); |
1203 | if (ret != 0) | 1203 | if (ret != 0) |
1204 | return ret; | 1204 | return ret; |
1205 | if (newstate != state) | 1205 | if (newstate != state) |
1206 | return -ESTALE; | 1206 | return -ESTALE; |
1207 | } | 1207 | } |
1208 | /* | 1208 | /* |
1209 | * We may have performed cached opens for all three recoveries. | 1209 | * We may have performed cached opens for all three recoveries. |
1210 | * Check if we need to update the current stateid. | 1210 | * Check if we need to update the current stateid. |
1211 | */ | 1211 | */ |
1212 | if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 && | 1212 | if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 && |
1213 | memcmp(state->stateid.data, state->open_stateid.data, sizeof(state->stateid.data)) != 0) { | 1213 | memcmp(state->stateid.data, state->open_stateid.data, sizeof(state->stateid.data)) != 0) { |
1214 | write_seqlock(&state->seqlock); | 1214 | write_seqlock(&state->seqlock); |
1215 | if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) | 1215 | if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) |
1216 | memcpy(state->stateid.data, state->open_stateid.data, sizeof(state->stateid.data)); | 1216 | memcpy(state->stateid.data, state->open_stateid.data, sizeof(state->stateid.data)); |
1217 | write_sequnlock(&state->seqlock); | 1217 | write_sequnlock(&state->seqlock); |
1218 | } | 1218 | } |
1219 | return 0; | 1219 | return 0; |
1220 | } | 1220 | } |
1221 | 1221 | ||
1222 | /* | 1222 | /* |
1223 | * OPEN_RECLAIM: | 1223 | * OPEN_RECLAIM: |
1224 | * reclaim state on the server after a reboot. | 1224 | * reclaim state on the server after a reboot. |
1225 | */ | 1225 | */ |
1226 | static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) | 1226 | static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) |
1227 | { | 1227 | { |
1228 | struct nfs_delegation *delegation; | 1228 | struct nfs_delegation *delegation; |
1229 | struct nfs4_opendata *opendata; | 1229 | struct nfs4_opendata *opendata; |
1230 | fmode_t delegation_type = 0; | 1230 | fmode_t delegation_type = 0; |
1231 | int status; | 1231 | int status; |
1232 | 1232 | ||
1233 | opendata = nfs4_open_recoverdata_alloc(ctx, state); | 1233 | opendata = nfs4_open_recoverdata_alloc(ctx, state); |
1234 | if (IS_ERR(opendata)) | 1234 | if (IS_ERR(opendata)) |
1235 | return PTR_ERR(opendata); | 1235 | return PTR_ERR(opendata); |
1236 | opendata->o_arg.claim = NFS4_OPEN_CLAIM_PREVIOUS; | 1236 | opendata->o_arg.claim = NFS4_OPEN_CLAIM_PREVIOUS; |
1237 | opendata->o_arg.fh = NFS_FH(state->inode); | 1237 | opendata->o_arg.fh = NFS_FH(state->inode); |
1238 | rcu_read_lock(); | 1238 | rcu_read_lock(); |
1239 | delegation = rcu_dereference(NFS_I(state->inode)->delegation); | 1239 | delegation = rcu_dereference(NFS_I(state->inode)->delegation); |
1240 | if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0) | 1240 | if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0) |
1241 | delegation_type = delegation->type; | 1241 | delegation_type = delegation->type; |
1242 | rcu_read_unlock(); | 1242 | rcu_read_unlock(); |
1243 | opendata->o_arg.u.delegation_type = delegation_type; | 1243 | opendata->o_arg.u.delegation_type = delegation_type; |
1244 | status = nfs4_open_recover(opendata, state); | 1244 | status = nfs4_open_recover(opendata, state); |
1245 | nfs4_opendata_put(opendata); | 1245 | nfs4_opendata_put(opendata); |
1246 | return status; | 1246 | return status; |
1247 | } | 1247 | } |
1248 | 1248 | ||
1249 | static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) | 1249 | static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) |
1250 | { | 1250 | { |
1251 | struct nfs_server *server = NFS_SERVER(state->inode); | 1251 | struct nfs_server *server = NFS_SERVER(state->inode); |
1252 | struct nfs4_exception exception = { }; | 1252 | struct nfs4_exception exception = { }; |
1253 | int err; | 1253 | int err; |
1254 | do { | 1254 | do { |
1255 | err = _nfs4_do_open_reclaim(ctx, state); | 1255 | err = _nfs4_do_open_reclaim(ctx, state); |
1256 | if (err != -NFS4ERR_DELAY) | 1256 | if (err != -NFS4ERR_DELAY) |
1257 | break; | 1257 | break; |
1258 | nfs4_handle_exception(server, err, &exception); | 1258 | nfs4_handle_exception(server, err, &exception); |
1259 | } while (exception.retry); | 1259 | } while (exception.retry); |
1260 | return err; | 1260 | return err; |
1261 | } | 1261 | } |
1262 | 1262 | ||
1263 | static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state) | 1263 | static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state) |
1264 | { | 1264 | { |
1265 | struct nfs_open_context *ctx; | 1265 | struct nfs_open_context *ctx; |
1266 | int ret; | 1266 | int ret; |
1267 | 1267 | ||
1268 | ctx = nfs4_state_find_open_context(state); | 1268 | ctx = nfs4_state_find_open_context(state); |
1269 | if (IS_ERR(ctx)) | 1269 | if (IS_ERR(ctx)) |
1270 | return PTR_ERR(ctx); | 1270 | return PTR_ERR(ctx); |
1271 | ret = nfs4_do_open_reclaim(ctx, state); | 1271 | ret = nfs4_do_open_reclaim(ctx, state); |
1272 | put_nfs_open_context(ctx); | 1272 | put_nfs_open_context(ctx); |
1273 | return ret; | 1273 | return ret; |
1274 | } | 1274 | } |
1275 | 1275 | ||
1276 | static int _nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid) | 1276 | static int _nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid) |
1277 | { | 1277 | { |
1278 | struct nfs4_opendata *opendata; | 1278 | struct nfs4_opendata *opendata; |
1279 | int ret; | 1279 | int ret; |
1280 | 1280 | ||
1281 | opendata = nfs4_open_recoverdata_alloc(ctx, state); | 1281 | opendata = nfs4_open_recoverdata_alloc(ctx, state); |
1282 | if (IS_ERR(opendata)) | 1282 | if (IS_ERR(opendata)) |
1283 | return PTR_ERR(opendata); | 1283 | return PTR_ERR(opendata); |
1284 | opendata->o_arg.claim = NFS4_OPEN_CLAIM_DELEGATE_CUR; | 1284 | opendata->o_arg.claim = NFS4_OPEN_CLAIM_DELEGATE_CUR; |
1285 | memcpy(opendata->o_arg.u.delegation.data, stateid->data, | 1285 | memcpy(opendata->o_arg.u.delegation.data, stateid->data, |
1286 | sizeof(opendata->o_arg.u.delegation.data)); | 1286 | sizeof(opendata->o_arg.u.delegation.data)); |
1287 | ret = nfs4_open_recover(opendata, state); | 1287 | ret = nfs4_open_recover(opendata, state); |
1288 | nfs4_opendata_put(opendata); | 1288 | nfs4_opendata_put(opendata); |
1289 | return ret; | 1289 | return ret; |
1290 | } | 1290 | } |
1291 | 1291 | ||
1292 | int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid) | 1292 | int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid) |
1293 | { | 1293 | { |
1294 | struct nfs4_exception exception = { }; | 1294 | struct nfs4_exception exception = { }; |
1295 | struct nfs_server *server = NFS_SERVER(state->inode); | 1295 | struct nfs_server *server = NFS_SERVER(state->inode); |
1296 | int err; | 1296 | int err; |
1297 | do { | 1297 | do { |
1298 | err = _nfs4_open_delegation_recall(ctx, state, stateid); | 1298 | err = _nfs4_open_delegation_recall(ctx, state, stateid); |
1299 | switch (err) { | 1299 | switch (err) { |
1300 | case 0: | 1300 | case 0: |
1301 | case -ENOENT: | 1301 | case -ENOENT: |
1302 | case -ESTALE: | 1302 | case -ESTALE: |
1303 | goto out; | 1303 | goto out; |
1304 | case -NFS4ERR_BADSESSION: | 1304 | case -NFS4ERR_BADSESSION: |
1305 | case -NFS4ERR_BADSLOT: | 1305 | case -NFS4ERR_BADSLOT: |
1306 | case -NFS4ERR_BAD_HIGH_SLOT: | 1306 | case -NFS4ERR_BAD_HIGH_SLOT: |
1307 | case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: | 1307 | case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: |
1308 | case -NFS4ERR_DEADSESSION: | 1308 | case -NFS4ERR_DEADSESSION: |
1309 | nfs4_schedule_session_recovery(server->nfs_client->cl_session); | 1309 | nfs4_schedule_session_recovery(server->nfs_client->cl_session); |
1310 | goto out; | 1310 | goto out; |
1311 | case -NFS4ERR_STALE_CLIENTID: | 1311 | case -NFS4ERR_STALE_CLIENTID: |
1312 | case -NFS4ERR_STALE_STATEID: | 1312 | case -NFS4ERR_STALE_STATEID: |
1313 | case -NFS4ERR_EXPIRED: | 1313 | case -NFS4ERR_EXPIRED: |
1314 | /* Don't recall a delegation if it was lost */ | 1314 | /* Don't recall a delegation if it was lost */ |
1315 | nfs4_schedule_lease_recovery(server->nfs_client); | 1315 | nfs4_schedule_lease_recovery(server->nfs_client); |
1316 | goto out; | 1316 | goto out; |
1317 | case -ERESTARTSYS: | 1317 | case -ERESTARTSYS: |
1318 | /* | 1318 | /* |
1319 | * The show must go on: exit, but mark the | 1319 | * The show must go on: exit, but mark the |
1320 | * stateid as needing recovery. | 1320 | * stateid as needing recovery. |
1321 | */ | 1321 | */ |
1322 | case -NFS4ERR_ADMIN_REVOKED: | 1322 | case -NFS4ERR_ADMIN_REVOKED: |
1323 | case -NFS4ERR_BAD_STATEID: | 1323 | case -NFS4ERR_BAD_STATEID: |
1324 | nfs4_schedule_stateid_recovery(server, state); | 1324 | nfs4_schedule_stateid_recovery(server, state); |
1325 | case -EKEYEXPIRED: | 1325 | case -EKEYEXPIRED: |
1326 | /* | 1326 | /* |
1327 | * User RPCSEC_GSS context has expired. | 1327 | * User RPCSEC_GSS context has expired. |
1328 | * We cannot recover this stateid now, so | 1328 | * We cannot recover this stateid now, so |
1329 | * skip it and allow recovery thread to | 1329 | * skip it and allow recovery thread to |
1330 | * proceed. | 1330 | * proceed. |
1331 | */ | 1331 | */ |
1332 | case -ENOMEM: | 1332 | case -ENOMEM: |
1333 | err = 0; | 1333 | err = 0; |
1334 | goto out; | 1334 | goto out; |
1335 | } | 1335 | } |
1336 | err = nfs4_handle_exception(server, err, &exception); | 1336 | err = nfs4_handle_exception(server, err, &exception); |
1337 | } while (exception.retry); | 1337 | } while (exception.retry); |
1338 | out: | 1338 | out: |
1339 | return err; | 1339 | return err; |
1340 | } | 1340 | } |
1341 | 1341 | ||
1342 | static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata) | 1342 | static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata) |
1343 | { | 1343 | { |
1344 | struct nfs4_opendata *data = calldata; | 1344 | struct nfs4_opendata *data = calldata; |
1345 | 1345 | ||
1346 | data->rpc_status = task->tk_status; | 1346 | data->rpc_status = task->tk_status; |
1347 | if (data->rpc_status == 0) { | 1347 | if (data->rpc_status == 0) { |
1348 | memcpy(data->o_res.stateid.data, data->c_res.stateid.data, | 1348 | memcpy(data->o_res.stateid.data, data->c_res.stateid.data, |
1349 | sizeof(data->o_res.stateid.data)); | 1349 | sizeof(data->o_res.stateid.data)); |
1350 | nfs_confirm_seqid(&data->owner->so_seqid, 0); | 1350 | nfs_confirm_seqid(&data->owner->so_seqid, 0); |
1351 | renew_lease(data->o_res.server, data->timestamp); | 1351 | renew_lease(data->o_res.server, data->timestamp); |
1352 | data->rpc_done = 1; | 1352 | data->rpc_done = 1; |
1353 | } | 1353 | } |
1354 | } | 1354 | } |
1355 | 1355 | ||
1356 | static void nfs4_open_confirm_release(void *calldata) | 1356 | static void nfs4_open_confirm_release(void *calldata) |
1357 | { | 1357 | { |
1358 | struct nfs4_opendata *data = calldata; | 1358 | struct nfs4_opendata *data = calldata; |
1359 | struct nfs4_state *state = NULL; | 1359 | struct nfs4_state *state = NULL; |
1360 | 1360 | ||
1361 | /* If this request hasn't been cancelled, do nothing */ | 1361 | /* If this request hasn't been cancelled, do nothing */ |
1362 | if (data->cancelled == 0) | 1362 | if (data->cancelled == 0) |
1363 | goto out_free; | 1363 | goto out_free; |
1364 | /* In case of error, no cleanup! */ | 1364 | /* In case of error, no cleanup! */ |
1365 | if (!data->rpc_done) | 1365 | if (!data->rpc_done) |
1366 | goto out_free; | 1366 | goto out_free; |
1367 | state = nfs4_opendata_to_nfs4_state(data); | 1367 | state = nfs4_opendata_to_nfs4_state(data); |
1368 | if (!IS_ERR(state)) | 1368 | if (!IS_ERR(state)) |
1369 | nfs4_close_state(state, data->o_arg.fmode); | 1369 | nfs4_close_state(state, data->o_arg.fmode); |
1370 | out_free: | 1370 | out_free: |
1371 | nfs4_opendata_put(data); | 1371 | nfs4_opendata_put(data); |
1372 | } | 1372 | } |
1373 | 1373 | ||
1374 | static const struct rpc_call_ops nfs4_open_confirm_ops = { | 1374 | static const struct rpc_call_ops nfs4_open_confirm_ops = { |
1375 | .rpc_call_done = nfs4_open_confirm_done, | 1375 | .rpc_call_done = nfs4_open_confirm_done, |
1376 | .rpc_release = nfs4_open_confirm_release, | 1376 | .rpc_release = nfs4_open_confirm_release, |
1377 | }; | 1377 | }; |
1378 | 1378 | ||
1379 | /* | 1379 | /* |
1380 | * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata | 1380 | * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata |
1381 | */ | 1381 | */ |
1382 | static int _nfs4_proc_open_confirm(struct nfs4_opendata *data) | 1382 | static int _nfs4_proc_open_confirm(struct nfs4_opendata *data) |
1383 | { | 1383 | { |
1384 | struct nfs_server *server = NFS_SERVER(data->dir->d_inode); | 1384 | struct nfs_server *server = NFS_SERVER(data->dir->d_inode); |
1385 | struct rpc_task *task; | 1385 | struct rpc_task *task; |
1386 | struct rpc_message msg = { | 1386 | struct rpc_message msg = { |
1387 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM], | 1387 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM], |
1388 | .rpc_argp = &data->c_arg, | 1388 | .rpc_argp = &data->c_arg, |
1389 | .rpc_resp = &data->c_res, | 1389 | .rpc_resp = &data->c_res, |
1390 | .rpc_cred = data->owner->so_cred, | 1390 | .rpc_cred = data->owner->so_cred, |
1391 | }; | 1391 | }; |
1392 | struct rpc_task_setup task_setup_data = { | 1392 | struct rpc_task_setup task_setup_data = { |
1393 | .rpc_client = server->client, | 1393 | .rpc_client = server->client, |
1394 | .rpc_message = &msg, | 1394 | .rpc_message = &msg, |
1395 | .callback_ops = &nfs4_open_confirm_ops, | 1395 | .callback_ops = &nfs4_open_confirm_ops, |
1396 | .callback_data = data, | 1396 | .callback_data = data, |
1397 | .workqueue = nfsiod_workqueue, | 1397 | .workqueue = nfsiod_workqueue, |
1398 | .flags = RPC_TASK_ASYNC, | 1398 | .flags = RPC_TASK_ASYNC, |
1399 | }; | 1399 | }; |
1400 | int status; | 1400 | int status; |
1401 | 1401 | ||
1402 | kref_get(&data->kref); | 1402 | kref_get(&data->kref); |
1403 | data->rpc_done = 0; | 1403 | data->rpc_done = 0; |
1404 | data->rpc_status = 0; | 1404 | data->rpc_status = 0; |
1405 | data->timestamp = jiffies; | 1405 | data->timestamp = jiffies; |
1406 | task = rpc_run_task(&task_setup_data); | 1406 | task = rpc_run_task(&task_setup_data); |
1407 | if (IS_ERR(task)) | 1407 | if (IS_ERR(task)) |
1408 | return PTR_ERR(task); | 1408 | return PTR_ERR(task); |
1409 | status = nfs4_wait_for_completion_rpc_task(task); | 1409 | status = nfs4_wait_for_completion_rpc_task(task); |
1410 | if (status != 0) { | 1410 | if (status != 0) { |
1411 | data->cancelled = 1; | 1411 | data->cancelled = 1; |
1412 | smp_wmb(); | 1412 | smp_wmb(); |
1413 | } else | 1413 | } else |
1414 | status = data->rpc_status; | 1414 | status = data->rpc_status; |
1415 | rpc_put_task(task); | 1415 | rpc_put_task(task); |
1416 | return status; | 1416 | return status; |
1417 | } | 1417 | } |
1418 | 1418 | ||
1419 | static void nfs4_open_prepare(struct rpc_task *task, void *calldata) | 1419 | static void nfs4_open_prepare(struct rpc_task *task, void *calldata) |
1420 | { | 1420 | { |
1421 | struct nfs4_opendata *data = calldata; | 1421 | struct nfs4_opendata *data = calldata; |
1422 | struct nfs4_state_owner *sp = data->owner; | 1422 | struct nfs4_state_owner *sp = data->owner; |
1423 | 1423 | ||
1424 | if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0) | 1424 | if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0) |
1425 | return; | 1425 | return; |
1426 | /* | 1426 | /* |
1427 | * Check if we still need to send an OPEN call, or if we can use | 1427 | * Check if we still need to send an OPEN call, or if we can use |
1428 | * a delegation instead. | 1428 | * a delegation instead. |
1429 | */ | 1429 | */ |
1430 | if (data->state != NULL) { | 1430 | if (data->state != NULL) { |
1431 | struct nfs_delegation *delegation; | 1431 | struct nfs_delegation *delegation; |
1432 | 1432 | ||
1433 | if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags)) | 1433 | if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags)) |
1434 | goto out_no_action; | 1434 | goto out_no_action; |
1435 | rcu_read_lock(); | 1435 | rcu_read_lock(); |
1436 | delegation = rcu_dereference(NFS_I(data->state->inode)->delegation); | 1436 | delegation = rcu_dereference(NFS_I(data->state->inode)->delegation); |
1437 | if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR && | 1437 | if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR && |
1438 | can_open_delegated(delegation, data->o_arg.fmode)) | 1438 | can_open_delegated(delegation, data->o_arg.fmode)) |
1439 | goto unlock_no_action; | 1439 | goto unlock_no_action; |
1440 | rcu_read_unlock(); | 1440 | rcu_read_unlock(); |
1441 | } | 1441 | } |
1442 | /* Update sequence id. */ | 1442 | /* Update sequence id. */ |
1443 | data->o_arg.id = sp->so_owner_id.id; | 1443 | data->o_arg.id = sp->so_owner_id.id; |
1444 | data->o_arg.clientid = sp->so_server->nfs_client->cl_clientid; | 1444 | data->o_arg.clientid = sp->so_server->nfs_client->cl_clientid; |
1445 | if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) { | 1445 | if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) { |
1446 | task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; | 1446 | task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; |
1447 | nfs_copy_fh(&data->o_res.fh, data->o_arg.fh); | 1447 | nfs_copy_fh(&data->o_res.fh, data->o_arg.fh); |
1448 | } | 1448 | } |
1449 | data->timestamp = jiffies; | 1449 | data->timestamp = jiffies; |
1450 | if (nfs4_setup_sequence(data->o_arg.server, | 1450 | if (nfs4_setup_sequence(data->o_arg.server, |
1451 | &data->o_arg.seq_args, | 1451 | &data->o_arg.seq_args, |
1452 | &data->o_res.seq_res, 1, task)) | 1452 | &data->o_res.seq_res, 1, task)) |
1453 | return; | 1453 | return; |
1454 | rpc_call_start(task); | 1454 | rpc_call_start(task); |
1455 | return; | 1455 | return; |
1456 | unlock_no_action: | 1456 | unlock_no_action: |
1457 | rcu_read_unlock(); | 1457 | rcu_read_unlock(); |
1458 | out_no_action: | 1458 | out_no_action: |
1459 | task->tk_action = NULL; | 1459 | task->tk_action = NULL; |
1460 | 1460 | ||
1461 | } | 1461 | } |
1462 | 1462 | ||
1463 | static void nfs4_recover_open_prepare(struct rpc_task *task, void *calldata) | 1463 | static void nfs4_recover_open_prepare(struct rpc_task *task, void *calldata) |
1464 | { | 1464 | { |
1465 | rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); | 1465 | rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); |
1466 | nfs4_open_prepare(task, calldata); | 1466 | nfs4_open_prepare(task, calldata); |
1467 | } | 1467 | } |
1468 | 1468 | ||
1469 | static void nfs4_open_done(struct rpc_task *task, void *calldata) | 1469 | static void nfs4_open_done(struct rpc_task *task, void *calldata) |
1470 | { | 1470 | { |
1471 | struct nfs4_opendata *data = calldata; | 1471 | struct nfs4_opendata *data = calldata; |
1472 | 1472 | ||
1473 | data->rpc_status = task->tk_status; | 1473 | data->rpc_status = task->tk_status; |
1474 | 1474 | ||
1475 | if (!nfs4_sequence_done(task, &data->o_res.seq_res)) | 1475 | if (!nfs4_sequence_done(task, &data->o_res.seq_res)) |
1476 | return; | 1476 | return; |
1477 | 1477 | ||
1478 | if (task->tk_status == 0) { | 1478 | if (task->tk_status == 0) { |
1479 | switch (data->o_res.f_attr->mode & S_IFMT) { | 1479 | switch (data->o_res.f_attr->mode & S_IFMT) { |
1480 | case S_IFREG: | 1480 | case S_IFREG: |
1481 | break; | 1481 | break; |
1482 | case S_IFLNK: | 1482 | case S_IFLNK: |
1483 | data->rpc_status = -ELOOP; | 1483 | data->rpc_status = -ELOOP; |
1484 | break; | 1484 | break; |
1485 | case S_IFDIR: | 1485 | case S_IFDIR: |
1486 | data->rpc_status = -EISDIR; | 1486 | data->rpc_status = -EISDIR; |
1487 | break; | 1487 | break; |
1488 | default: | 1488 | default: |
1489 | data->rpc_status = -ENOTDIR; | 1489 | data->rpc_status = -ENOTDIR; |
1490 | } | 1490 | } |
1491 | renew_lease(data->o_res.server, data->timestamp); | 1491 | renew_lease(data->o_res.server, data->timestamp); |
1492 | if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)) | 1492 | if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)) |
1493 | nfs_confirm_seqid(&data->owner->so_seqid, 0); | 1493 | nfs_confirm_seqid(&data->owner->so_seqid, 0); |
1494 | } | 1494 | } |
1495 | data->rpc_done = 1; | 1495 | data->rpc_done = 1; |
1496 | } | 1496 | } |
1497 | 1497 | ||
1498 | static void nfs4_open_release(void *calldata) | 1498 | static void nfs4_open_release(void *calldata) |
1499 | { | 1499 | { |
1500 | struct nfs4_opendata *data = calldata; | 1500 | struct nfs4_opendata *data = calldata; |
1501 | struct nfs4_state *state = NULL; | 1501 | struct nfs4_state *state = NULL; |
1502 | 1502 | ||
1503 | /* If this request hasn't been cancelled, do nothing */ | 1503 | /* If this request hasn't been cancelled, do nothing */ |
1504 | if (data->cancelled == 0) | 1504 | if (data->cancelled == 0) |
1505 | goto out_free; | 1505 | goto out_free; |
1506 | /* In case of error, no cleanup! */ | 1506 | /* In case of error, no cleanup! */ |
1507 | if (data->rpc_status != 0 || !data->rpc_done) | 1507 | if (data->rpc_status != 0 || !data->rpc_done) |
1508 | goto out_free; | 1508 | goto out_free; |
1509 | /* In case we need an open_confirm, no cleanup! */ | 1509 | /* In case we need an open_confirm, no cleanup! */ |
1510 | if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM) | 1510 | if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM) |
1511 | goto out_free; | 1511 | goto out_free; |
1512 | state = nfs4_opendata_to_nfs4_state(data); | 1512 | state = nfs4_opendata_to_nfs4_state(data); |
1513 | if (!IS_ERR(state)) | 1513 | if (!IS_ERR(state)) |
1514 | nfs4_close_state(state, data->o_arg.fmode); | 1514 | nfs4_close_state(state, data->o_arg.fmode); |
1515 | out_free: | 1515 | out_free: |
1516 | nfs4_opendata_put(data); | 1516 | nfs4_opendata_put(data); |
1517 | } | 1517 | } |
1518 | 1518 | ||
1519 | static const struct rpc_call_ops nfs4_open_ops = { | 1519 | static const struct rpc_call_ops nfs4_open_ops = { |
1520 | .rpc_call_prepare = nfs4_open_prepare, | 1520 | .rpc_call_prepare = nfs4_open_prepare, |
1521 | .rpc_call_done = nfs4_open_done, | 1521 | .rpc_call_done = nfs4_open_done, |
1522 | .rpc_release = nfs4_open_release, | 1522 | .rpc_release = nfs4_open_release, |
1523 | }; | 1523 | }; |
1524 | 1524 | ||
1525 | static const struct rpc_call_ops nfs4_recover_open_ops = { | 1525 | static const struct rpc_call_ops nfs4_recover_open_ops = { |
1526 | .rpc_call_prepare = nfs4_recover_open_prepare, | 1526 | .rpc_call_prepare = nfs4_recover_open_prepare, |
1527 | .rpc_call_done = nfs4_open_done, | 1527 | .rpc_call_done = nfs4_open_done, |
1528 | .rpc_release = nfs4_open_release, | 1528 | .rpc_release = nfs4_open_release, |
1529 | }; | 1529 | }; |
1530 | 1530 | ||
1531 | static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover) | 1531 | static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover) |
1532 | { | 1532 | { |
1533 | struct inode *dir = data->dir->d_inode; | 1533 | struct inode *dir = data->dir->d_inode; |
1534 | struct nfs_server *server = NFS_SERVER(dir); | 1534 | struct nfs_server *server = NFS_SERVER(dir); |
1535 | struct nfs_openargs *o_arg = &data->o_arg; | 1535 | struct nfs_openargs *o_arg = &data->o_arg; |
1536 | struct nfs_openres *o_res = &data->o_res; | 1536 | struct nfs_openres *o_res = &data->o_res; |
1537 | struct rpc_task *task; | 1537 | struct rpc_task *task; |
1538 | struct rpc_message msg = { | 1538 | struct rpc_message msg = { |
1539 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN], | 1539 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN], |
1540 | .rpc_argp = o_arg, | 1540 | .rpc_argp = o_arg, |
1541 | .rpc_resp = o_res, | 1541 | .rpc_resp = o_res, |
1542 | .rpc_cred = data->owner->so_cred, | 1542 | .rpc_cred = data->owner->so_cred, |
1543 | }; | 1543 | }; |
1544 | struct rpc_task_setup task_setup_data = { | 1544 | struct rpc_task_setup task_setup_data = { |
1545 | .rpc_client = server->client, | 1545 | .rpc_client = server->client, |
1546 | .rpc_message = &msg, | 1546 | .rpc_message = &msg, |
1547 | .callback_ops = &nfs4_open_ops, | 1547 | .callback_ops = &nfs4_open_ops, |
1548 | .callback_data = data, | 1548 | .callback_data = data, |
1549 | .workqueue = nfsiod_workqueue, | 1549 | .workqueue = nfsiod_workqueue, |
1550 | .flags = RPC_TASK_ASYNC, | 1550 | .flags = RPC_TASK_ASYNC, |
1551 | }; | 1551 | }; |
1552 | int status; | 1552 | int status; |
1553 | 1553 | ||
1554 | kref_get(&data->kref); | 1554 | kref_get(&data->kref); |
1555 | data->rpc_done = 0; | 1555 | data->rpc_done = 0; |
1556 | data->rpc_status = 0; | 1556 | data->rpc_status = 0; |
1557 | data->cancelled = 0; | 1557 | data->cancelled = 0; |
1558 | if (isrecover) | 1558 | if (isrecover) |
1559 | task_setup_data.callback_ops = &nfs4_recover_open_ops; | 1559 | task_setup_data.callback_ops = &nfs4_recover_open_ops; |
1560 | task = rpc_run_task(&task_setup_data); | 1560 | task = rpc_run_task(&task_setup_data); |
1561 | if (IS_ERR(task)) | 1561 | if (IS_ERR(task)) |
1562 | return PTR_ERR(task); | 1562 | return PTR_ERR(task); |
1563 | status = nfs4_wait_for_completion_rpc_task(task); | 1563 | status = nfs4_wait_for_completion_rpc_task(task); |
1564 | if (status != 0) { | 1564 | if (status != 0) { |
1565 | data->cancelled = 1; | 1565 | data->cancelled = 1; |
1566 | smp_wmb(); | 1566 | smp_wmb(); |
1567 | } else | 1567 | } else |
1568 | status = data->rpc_status; | 1568 | status = data->rpc_status; |
1569 | rpc_put_task(task); | 1569 | rpc_put_task(task); |
1570 | 1570 | ||
1571 | return status; | 1571 | return status; |
1572 | } | 1572 | } |
1573 | 1573 | ||
1574 | static int _nfs4_recover_proc_open(struct nfs4_opendata *data) | 1574 | static int _nfs4_recover_proc_open(struct nfs4_opendata *data) |
1575 | { | 1575 | { |
1576 | struct inode *dir = data->dir->d_inode; | 1576 | struct inode *dir = data->dir->d_inode; |
1577 | struct nfs_openres *o_res = &data->o_res; | 1577 | struct nfs_openres *o_res = &data->o_res; |
1578 | int status; | 1578 | int status; |
1579 | 1579 | ||
1580 | status = nfs4_run_open_task(data, 1); | 1580 | status = nfs4_run_open_task(data, 1); |
1581 | if (status != 0 || !data->rpc_done) | 1581 | if (status != 0 || !data->rpc_done) |
1582 | return status; | 1582 | return status; |
1583 | 1583 | ||
1584 | nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr); | 1584 | nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr); |
1585 | 1585 | ||
1586 | nfs_refresh_inode(dir, o_res->dir_attr); | 1586 | nfs_refresh_inode(dir, o_res->dir_attr); |
1587 | 1587 | ||
1588 | if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { | 1588 | if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { |
1589 | status = _nfs4_proc_open_confirm(data); | 1589 | status = _nfs4_proc_open_confirm(data); |
1590 | if (status != 0) | 1590 | if (status != 0) |
1591 | return status; | 1591 | return status; |
1592 | } | 1592 | } |
1593 | 1593 | ||
1594 | return status; | 1594 | return status; |
1595 | } | 1595 | } |
1596 | 1596 | ||
1597 | /* | 1597 | /* |
1598 | * Note: On error, nfs4_proc_open will free the struct nfs4_opendata | 1598 | * Note: On error, nfs4_proc_open will free the struct nfs4_opendata |
1599 | */ | 1599 | */ |
1600 | static int _nfs4_proc_open(struct nfs4_opendata *data) | 1600 | static int _nfs4_proc_open(struct nfs4_opendata *data) |
1601 | { | 1601 | { |
1602 | struct inode *dir = data->dir->d_inode; | 1602 | struct inode *dir = data->dir->d_inode; |
1603 | struct nfs_server *server = NFS_SERVER(dir); | 1603 | struct nfs_server *server = NFS_SERVER(dir); |
1604 | struct nfs_openargs *o_arg = &data->o_arg; | 1604 | struct nfs_openargs *o_arg = &data->o_arg; |
1605 | struct nfs_openres *o_res = &data->o_res; | 1605 | struct nfs_openres *o_res = &data->o_res; |
1606 | int status; | 1606 | int status; |
1607 | 1607 | ||
1608 | status = nfs4_run_open_task(data, 0); | 1608 | status = nfs4_run_open_task(data, 0); |
1609 | if (!data->rpc_done) | 1609 | if (!data->rpc_done) |
1610 | return status; | 1610 | return status; |
1611 | if (status != 0) { | 1611 | if (status != 0) { |
1612 | if (status == -NFS4ERR_BADNAME && | 1612 | if (status == -NFS4ERR_BADNAME && |
1613 | !(o_arg->open_flags & O_CREAT)) | 1613 | !(o_arg->open_flags & O_CREAT)) |
1614 | return -ENOENT; | 1614 | return -ENOENT; |
1615 | return status; | 1615 | return status; |
1616 | } | 1616 | } |
1617 | 1617 | ||
1618 | nfs_fattr_map_and_free_names(server, &data->f_attr); | 1618 | nfs_fattr_map_and_free_names(server, &data->f_attr); |
1619 | 1619 | ||
1620 | if (o_arg->open_flags & O_CREAT) { | 1620 | if (o_arg->open_flags & O_CREAT) { |
1621 | update_changeattr(dir, &o_res->cinfo); | 1621 | update_changeattr(dir, &o_res->cinfo); |
1622 | nfs_post_op_update_inode(dir, o_res->dir_attr); | 1622 | nfs_post_op_update_inode(dir, o_res->dir_attr); |
1623 | } else | 1623 | } else |
1624 | nfs_refresh_inode(dir, o_res->dir_attr); | 1624 | nfs_refresh_inode(dir, o_res->dir_attr); |
1625 | if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) | 1625 | if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) |
1626 | server->caps &= ~NFS_CAP_POSIX_LOCK; | 1626 | server->caps &= ~NFS_CAP_POSIX_LOCK; |
1627 | if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { | 1627 | if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { |
1628 | status = _nfs4_proc_open_confirm(data); | 1628 | status = _nfs4_proc_open_confirm(data); |
1629 | if (status != 0) | 1629 | if (status != 0) |
1630 | return status; | 1630 | return status; |
1631 | } | 1631 | } |
1632 | if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) | 1632 | if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) |
1633 | _nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr); | 1633 | _nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr); |
1634 | return 0; | 1634 | return 0; |
1635 | } | 1635 | } |
1636 | 1636 | ||
1637 | static int nfs4_client_recover_expired_lease(struct nfs_client *clp) | 1637 | static int nfs4_client_recover_expired_lease(struct nfs_client *clp) |
1638 | { | 1638 | { |
1639 | unsigned int loop; | 1639 | unsigned int loop; |
1640 | int ret; | 1640 | int ret; |
1641 | 1641 | ||
1642 | for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) { | 1642 | for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) { |
1643 | ret = nfs4_wait_clnt_recover(clp); | 1643 | ret = nfs4_wait_clnt_recover(clp); |
1644 | if (ret != 0) | 1644 | if (ret != 0) |
1645 | break; | 1645 | break; |
1646 | if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) && | 1646 | if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) && |
1647 | !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state)) | 1647 | !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state)) |
1648 | break; | 1648 | break; |
1649 | nfs4_schedule_state_manager(clp); | 1649 | nfs4_schedule_state_manager(clp); |
1650 | ret = -EIO; | 1650 | ret = -EIO; |
1651 | } | 1651 | } |
1652 | return ret; | 1652 | return ret; |
1653 | } | 1653 | } |
1654 | 1654 | ||
1655 | static int nfs4_recover_expired_lease(struct nfs_server *server) | 1655 | static int nfs4_recover_expired_lease(struct nfs_server *server) |
1656 | { | 1656 | { |
1657 | return nfs4_client_recover_expired_lease(server->nfs_client); | 1657 | return nfs4_client_recover_expired_lease(server->nfs_client); |
1658 | } | 1658 | } |
1659 | 1659 | ||
1660 | /* | 1660 | /* |
1661 | * OPEN_EXPIRED: | 1661 | * OPEN_EXPIRED: |
1662 | * reclaim state on the server after a network partition. | 1662 | * reclaim state on the server after a network partition. |
1663 | * Assumes caller holds the appropriate lock | 1663 | * Assumes caller holds the appropriate lock |
1664 | */ | 1664 | */ |
1665 | static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) | 1665 | static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) |
1666 | { | 1666 | { |
1667 | struct nfs4_opendata *opendata; | 1667 | struct nfs4_opendata *opendata; |
1668 | int ret; | 1668 | int ret; |
1669 | 1669 | ||
1670 | opendata = nfs4_open_recoverdata_alloc(ctx, state); | 1670 | opendata = nfs4_open_recoverdata_alloc(ctx, state); |
1671 | if (IS_ERR(opendata)) | 1671 | if (IS_ERR(opendata)) |
1672 | return PTR_ERR(opendata); | 1672 | return PTR_ERR(opendata); |
1673 | ret = nfs4_open_recover(opendata, state); | 1673 | ret = nfs4_open_recover(opendata, state); |
1674 | if (ret == -ESTALE) | 1674 | if (ret == -ESTALE) |
1675 | d_drop(ctx->dentry); | 1675 | d_drop(ctx->dentry); |
1676 | nfs4_opendata_put(opendata); | 1676 | nfs4_opendata_put(opendata); |
1677 | return ret; | 1677 | return ret; |
1678 | } | 1678 | } |
1679 | 1679 | ||
1680 | static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) | 1680 | static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) |
1681 | { | 1681 | { |
1682 | struct nfs_server *server = NFS_SERVER(state->inode); | 1682 | struct nfs_server *server = NFS_SERVER(state->inode); |
1683 | struct nfs4_exception exception = { }; | 1683 | struct nfs4_exception exception = { }; |
1684 | int err; | 1684 | int err; |
1685 | 1685 | ||
1686 | do { | 1686 | do { |
1687 | err = _nfs4_open_expired(ctx, state); | 1687 | err = _nfs4_open_expired(ctx, state); |
1688 | switch (err) { | 1688 | switch (err) { |
1689 | default: | 1689 | default: |
1690 | goto out; | 1690 | goto out; |
1691 | case -NFS4ERR_GRACE: | 1691 | case -NFS4ERR_GRACE: |
1692 | case -NFS4ERR_DELAY: | 1692 | case -NFS4ERR_DELAY: |
1693 | nfs4_handle_exception(server, err, &exception); | 1693 | nfs4_handle_exception(server, err, &exception); |
1694 | err = 0; | 1694 | err = 0; |
1695 | } | 1695 | } |
1696 | } while (exception.retry); | 1696 | } while (exception.retry); |
1697 | out: | 1697 | out: |
1698 | return err; | 1698 | return err; |
1699 | } | 1699 | } |
1700 | 1700 | ||
1701 | static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) | 1701 | static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) |
1702 | { | 1702 | { |
1703 | struct nfs_open_context *ctx; | 1703 | struct nfs_open_context *ctx; |
1704 | int ret; | 1704 | int ret; |
1705 | 1705 | ||
1706 | ctx = nfs4_state_find_open_context(state); | 1706 | ctx = nfs4_state_find_open_context(state); |
1707 | if (IS_ERR(ctx)) | 1707 | if (IS_ERR(ctx)) |
1708 | return PTR_ERR(ctx); | 1708 | return PTR_ERR(ctx); |
1709 | ret = nfs4_do_open_expired(ctx, state); | 1709 | ret = nfs4_do_open_expired(ctx, state); |
1710 | put_nfs_open_context(ctx); | 1710 | put_nfs_open_context(ctx); |
1711 | return ret; | 1711 | return ret; |
1712 | } | 1712 | } |
1713 | 1713 | ||
1714 | #if defined(CONFIG_NFS_V4_1) | 1714 | #if defined(CONFIG_NFS_V4_1) |
1715 | static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) | 1715 | static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) |
1716 | { | 1716 | { |
1717 | int status; | 1717 | int status; |
1718 | struct nfs_server *server = NFS_SERVER(state->inode); | 1718 | struct nfs_server *server = NFS_SERVER(state->inode); |
1719 | 1719 | ||
1720 | status = nfs41_test_stateid(server, state); | 1720 | status = nfs41_test_stateid(server, state); |
1721 | if (status == NFS_OK) | 1721 | if (status == NFS_OK) |
1722 | return 0; | 1722 | return 0; |
1723 | nfs41_free_stateid(server, state); | 1723 | nfs41_free_stateid(server, state); |
1724 | return nfs4_open_expired(sp, state); | 1724 | return nfs4_open_expired(sp, state); |
1725 | } | 1725 | } |
1726 | #endif | 1726 | #endif |
1727 | 1727 | ||
1728 | /* | 1728 | /* |
1729 | * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-* | 1729 | * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-* |
1730 | * fields corresponding to attributes that were used to store the verifier. | 1730 | * fields corresponding to attributes that were used to store the verifier. |
1731 | * Make sure we clobber those fields in the later setattr call | 1731 | * Make sure we clobber those fields in the later setattr call |
1732 | */ | 1732 | */ |
1733 | static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct iattr *sattr) | 1733 | static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct iattr *sattr) |
1734 | { | 1734 | { |
1735 | if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_ACCESS) && | 1735 | if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_ACCESS) && |
1736 | !(sattr->ia_valid & ATTR_ATIME_SET)) | 1736 | !(sattr->ia_valid & ATTR_ATIME_SET)) |
1737 | sattr->ia_valid |= ATTR_ATIME; | 1737 | sattr->ia_valid |= ATTR_ATIME; |
1738 | 1738 | ||
1739 | if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_MODIFY) && | 1739 | if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_MODIFY) && |
1740 | !(sattr->ia_valid & ATTR_MTIME_SET)) | 1740 | !(sattr->ia_valid & ATTR_MTIME_SET)) |
1741 | sattr->ia_valid |= ATTR_MTIME; | 1741 | sattr->ia_valid |= ATTR_MTIME; |
1742 | } | 1742 | } |
1743 | 1743 | ||
1744 | /* | 1744 | /* |
1745 | * Returns a referenced nfs4_state | 1745 | * Returns a referenced nfs4_state |
1746 | */ | 1746 | */ |
1747 | static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred, struct nfs4_state **res) | 1747 | static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred, struct nfs4_state **res) |
1748 | { | 1748 | { |
1749 | struct nfs4_state_owner *sp; | 1749 | struct nfs4_state_owner *sp; |
1750 | struct nfs4_state *state = NULL; | 1750 | struct nfs4_state *state = NULL; |
1751 | struct nfs_server *server = NFS_SERVER(dir); | 1751 | struct nfs_server *server = NFS_SERVER(dir); |
1752 | struct nfs4_opendata *opendata; | 1752 | struct nfs4_opendata *opendata; |
1753 | int status; | 1753 | int status; |
1754 | 1754 | ||
1755 | /* Protect against reboot recovery conflicts */ | 1755 | /* Protect against reboot recovery conflicts */ |
1756 | status = -ENOMEM; | 1756 | status = -ENOMEM; |
1757 | if (!(sp = nfs4_get_state_owner(server, cred))) { | 1757 | if (!(sp = nfs4_get_state_owner(server, cred))) { |
1758 | dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n"); | 1758 | dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n"); |
1759 | goto out_err; | 1759 | goto out_err; |
1760 | } | 1760 | } |
1761 | status = nfs4_recover_expired_lease(server); | 1761 | status = nfs4_recover_expired_lease(server); |
1762 | if (status != 0) | 1762 | if (status != 0) |
1763 | goto err_put_state_owner; | 1763 | goto err_put_state_owner; |
1764 | if (dentry->d_inode != NULL) | 1764 | if (dentry->d_inode != NULL) |
1765 | nfs4_return_incompatible_delegation(dentry->d_inode, fmode); | 1765 | nfs4_return_incompatible_delegation(dentry->d_inode, fmode); |
1766 | status = -ENOMEM; | 1766 | status = -ENOMEM; |
1767 | opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr, GFP_KERNEL); | 1767 | opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr, GFP_KERNEL); |
1768 | if (opendata == NULL) | 1768 | if (opendata == NULL) |
1769 | goto err_put_state_owner; | 1769 | goto err_put_state_owner; |
1770 | 1770 | ||
1771 | if (dentry->d_inode != NULL) | 1771 | if (dentry->d_inode != NULL) |
1772 | opendata->state = nfs4_get_open_state(dentry->d_inode, sp); | 1772 | opendata->state = nfs4_get_open_state(dentry->d_inode, sp); |
1773 | 1773 | ||
1774 | status = _nfs4_proc_open(opendata); | 1774 | status = _nfs4_proc_open(opendata); |
1775 | if (status != 0) | 1775 | if (status != 0) |
1776 | goto err_opendata_put; | 1776 | goto err_opendata_put; |
1777 | 1777 | ||
1778 | state = nfs4_opendata_to_nfs4_state(opendata); | 1778 | state = nfs4_opendata_to_nfs4_state(opendata); |
1779 | status = PTR_ERR(state); | 1779 | status = PTR_ERR(state); |
1780 | if (IS_ERR(state)) | 1780 | if (IS_ERR(state)) |
1781 | goto err_opendata_put; | 1781 | goto err_opendata_put; |
1782 | if (server->caps & NFS_CAP_POSIX_LOCK) | 1782 | if (server->caps & NFS_CAP_POSIX_LOCK) |
1783 | set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); | 1783 | set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); |
1784 | 1784 | ||
1785 | if (opendata->o_arg.open_flags & O_EXCL) { | 1785 | if (opendata->o_arg.open_flags & O_EXCL) { |
1786 | nfs4_exclusive_attrset(opendata, sattr); | 1786 | nfs4_exclusive_attrset(opendata, sattr); |
1787 | 1787 | ||
1788 | nfs_fattr_init(opendata->o_res.f_attr); | 1788 | nfs_fattr_init(opendata->o_res.f_attr); |
1789 | status = nfs4_do_setattr(state->inode, cred, | 1789 | status = nfs4_do_setattr(state->inode, cred, |
1790 | opendata->o_res.f_attr, sattr, | 1790 | opendata->o_res.f_attr, sattr, |
1791 | state); | 1791 | state); |
1792 | if (status == 0) | 1792 | if (status == 0) |
1793 | nfs_setattr_update_inode(state->inode, sattr); | 1793 | nfs_setattr_update_inode(state->inode, sattr); |
1794 | nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr); | 1794 | nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr); |
1795 | } | 1795 | } |
1796 | nfs4_opendata_put(opendata); | 1796 | nfs4_opendata_put(opendata); |
1797 | nfs4_put_state_owner(sp); | 1797 | nfs4_put_state_owner(sp); |
1798 | *res = state; | 1798 | *res = state; |
1799 | return 0; | 1799 | return 0; |
1800 | err_opendata_put: | 1800 | err_opendata_put: |
1801 | nfs4_opendata_put(opendata); | 1801 | nfs4_opendata_put(opendata); |
1802 | err_put_state_owner: | 1802 | err_put_state_owner: |
1803 | nfs4_put_state_owner(sp); | 1803 | nfs4_put_state_owner(sp); |
1804 | out_err: | 1804 | out_err: |
1805 | *res = NULL; | 1805 | *res = NULL; |
1806 | return status; | 1806 | return status; |
1807 | } | 1807 | } |
1808 | 1808 | ||
1809 | 1809 | ||
1810 | static struct nfs4_state *nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred) | 1810 | static struct nfs4_state *nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred) |
1811 | { | 1811 | { |
1812 | struct nfs4_exception exception = { }; | 1812 | struct nfs4_exception exception = { }; |
1813 | struct nfs4_state *res; | 1813 | struct nfs4_state *res; |
1814 | int status; | 1814 | int status; |
1815 | 1815 | ||
1816 | do { | 1816 | do { |
1817 | status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred, &res); | 1817 | status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred, &res); |
1818 | if (status == 0) | 1818 | if (status == 0) |
1819 | break; | 1819 | break; |
1820 | /* NOTE: BAD_SEQID means the server and client disagree about the | 1820 | /* NOTE: BAD_SEQID means the server and client disagree about the |
1821 | * book-keeping w.r.t. state-changing operations | 1821 | * book-keeping w.r.t. state-changing operations |
1822 | * (OPEN/CLOSE/LOCK/LOCKU...) | 1822 | * (OPEN/CLOSE/LOCK/LOCKU...) |
1823 | * It is actually a sign of a bug on the client or on the server. | 1823 | * It is actually a sign of a bug on the client or on the server. |
1824 | * | 1824 | * |
1825 | * If we receive a BAD_SEQID error in the particular case of | 1825 | * If we receive a BAD_SEQID error in the particular case of |
1826 | * doing an OPEN, we assume that nfs_increment_open_seqid() will | 1826 | * doing an OPEN, we assume that nfs_increment_open_seqid() will |
1827 | * have unhashed the old state_owner for us, and that we can | 1827 | * have unhashed the old state_owner for us, and that we can |
1828 | * therefore safely retry using a new one. We should still warn | 1828 | * therefore safely retry using a new one. We should still warn |
1829 | * the user though... | 1829 | * the user though... |
1830 | */ | 1830 | */ |
1831 | if (status == -NFS4ERR_BAD_SEQID) { | 1831 | if (status == -NFS4ERR_BAD_SEQID) { |
1832 | printk(KERN_WARNING "NFS: v4 server %s " | 1832 | printk(KERN_WARNING "NFS: v4 server %s " |
1833 | " returned a bad sequence-id error!\n", | 1833 | " returned a bad sequence-id error!\n", |
1834 | NFS_SERVER(dir)->nfs_client->cl_hostname); | 1834 | NFS_SERVER(dir)->nfs_client->cl_hostname); |
1835 | exception.retry = 1; | 1835 | exception.retry = 1; |
1836 | continue; | 1836 | continue; |
1837 | } | 1837 | } |
1838 | /* | 1838 | /* |
1839 | * BAD_STATEID on OPEN means that the server cancelled our | 1839 | * BAD_STATEID on OPEN means that the server cancelled our |
1840 | * state before it received the OPEN_CONFIRM. | 1840 | * state before it received the OPEN_CONFIRM. |
1841 | * Recover by retrying the request as per the discussion | 1841 | * Recover by retrying the request as per the discussion |
1842 | * on Page 181 of RFC3530. | 1842 | * on Page 181 of RFC3530. |
1843 | */ | 1843 | */ |
1844 | if (status == -NFS4ERR_BAD_STATEID) { | 1844 | if (status == -NFS4ERR_BAD_STATEID) { |
1845 | exception.retry = 1; | 1845 | exception.retry = 1; |
1846 | continue; | 1846 | continue; |
1847 | } | 1847 | } |
1848 | if (status == -EAGAIN) { | 1848 | if (status == -EAGAIN) { |
1849 | /* We must have found a delegation */ | 1849 | /* We must have found a delegation */ |
1850 | exception.retry = 1; | 1850 | exception.retry = 1; |
1851 | continue; | 1851 | continue; |
1852 | } | 1852 | } |
1853 | res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir), | 1853 | res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir), |
1854 | status, &exception)); | 1854 | status, &exception)); |
1855 | } while (exception.retry); | 1855 | } while (exception.retry); |
1856 | return res; | 1856 | return res; |
1857 | } | 1857 | } |
1858 | 1858 | ||
1859 | static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, | 1859 | static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, |
1860 | struct nfs_fattr *fattr, struct iattr *sattr, | 1860 | struct nfs_fattr *fattr, struct iattr *sattr, |
1861 | struct nfs4_state *state) | 1861 | struct nfs4_state *state) |
1862 | { | 1862 | { |
1863 | struct nfs_server *server = NFS_SERVER(inode); | 1863 | struct nfs_server *server = NFS_SERVER(inode); |
1864 | struct nfs_setattrargs arg = { | 1864 | struct nfs_setattrargs arg = { |
1865 | .fh = NFS_FH(inode), | 1865 | .fh = NFS_FH(inode), |
1866 | .iap = sattr, | 1866 | .iap = sattr, |
1867 | .server = server, | 1867 | .server = server, |
1868 | .bitmask = server->attr_bitmask, | 1868 | .bitmask = server->attr_bitmask, |
1869 | }; | 1869 | }; |
1870 | struct nfs_setattrres res = { | 1870 | struct nfs_setattrres res = { |
1871 | .fattr = fattr, | 1871 | .fattr = fattr, |
1872 | .server = server, | 1872 | .server = server, |
1873 | }; | 1873 | }; |
1874 | struct rpc_message msg = { | 1874 | struct rpc_message msg = { |
1875 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], | 1875 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], |
1876 | .rpc_argp = &arg, | 1876 | .rpc_argp = &arg, |
1877 | .rpc_resp = &res, | 1877 | .rpc_resp = &res, |
1878 | .rpc_cred = cred, | 1878 | .rpc_cred = cred, |
1879 | }; | 1879 | }; |
1880 | unsigned long timestamp = jiffies; | 1880 | unsigned long timestamp = jiffies; |
1881 | int status; | 1881 | int status; |
1882 | 1882 | ||
1883 | nfs_fattr_init(fattr); | 1883 | nfs_fattr_init(fattr); |
1884 | 1884 | ||
1885 | if (nfs4_copy_delegation_stateid(&arg.stateid, inode)) { | 1885 | if (nfs4_copy_delegation_stateid(&arg.stateid, inode)) { |
1886 | /* Use that stateid */ | 1886 | /* Use that stateid */ |
1887 | } else if (state != NULL) { | 1887 | } else if (state != NULL) { |
1888 | nfs4_copy_stateid(&arg.stateid, state, current->files, current->tgid); | 1888 | nfs4_copy_stateid(&arg.stateid, state, current->files, current->tgid); |
1889 | } else | 1889 | } else |
1890 | memcpy(&arg.stateid, &zero_stateid, sizeof(arg.stateid)); | 1890 | memcpy(&arg.stateid, &zero_stateid, sizeof(arg.stateid)); |
1891 | 1891 | ||
1892 | status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); | 1892 | status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); |
1893 | if (status == 0 && state != NULL) | 1893 | if (status == 0 && state != NULL) |
1894 | renew_lease(server, timestamp); | 1894 | renew_lease(server, timestamp); |
1895 | return status; | 1895 | return status; |
1896 | } | 1896 | } |
1897 | 1897 | ||
1898 | static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, | 1898 | static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, |
1899 | struct nfs_fattr *fattr, struct iattr *sattr, | 1899 | struct nfs_fattr *fattr, struct iattr *sattr, |
1900 | struct nfs4_state *state) | 1900 | struct nfs4_state *state) |
1901 | { | 1901 | { |
1902 | struct nfs_server *server = NFS_SERVER(inode); | 1902 | struct nfs_server *server = NFS_SERVER(inode); |
1903 | struct nfs4_exception exception = { }; | 1903 | struct nfs4_exception exception = { }; |
1904 | int err; | 1904 | int err; |
1905 | do { | 1905 | do { |
1906 | err = nfs4_handle_exception(server, | 1906 | err = nfs4_handle_exception(server, |
1907 | _nfs4_do_setattr(inode, cred, fattr, sattr, state), | 1907 | _nfs4_do_setattr(inode, cred, fattr, sattr, state), |
1908 | &exception); | 1908 | &exception); |
1909 | } while (exception.retry); | 1909 | } while (exception.retry); |
1910 | return err; | 1910 | return err; |
1911 | } | 1911 | } |
1912 | 1912 | ||
1913 | struct nfs4_closedata { | 1913 | struct nfs4_closedata { |
1914 | struct inode *inode; | 1914 | struct inode *inode; |
1915 | struct nfs4_state *state; | 1915 | struct nfs4_state *state; |
1916 | struct nfs_closeargs arg; | 1916 | struct nfs_closeargs arg; |
1917 | struct nfs_closeres res; | 1917 | struct nfs_closeres res; |
1918 | struct nfs_fattr fattr; | 1918 | struct nfs_fattr fattr; |
1919 | unsigned long timestamp; | 1919 | unsigned long timestamp; |
1920 | bool roc; | 1920 | bool roc; |
1921 | u32 roc_barrier; | 1921 | u32 roc_barrier; |
1922 | }; | 1922 | }; |
1923 | 1923 | ||
1924 | static void nfs4_free_closedata(void *data) | 1924 | static void nfs4_free_closedata(void *data) |
1925 | { | 1925 | { |
1926 | struct nfs4_closedata *calldata = data; | 1926 | struct nfs4_closedata *calldata = data; |
1927 | struct nfs4_state_owner *sp = calldata->state->owner; | 1927 | struct nfs4_state_owner *sp = calldata->state->owner; |
1928 | struct super_block *sb = calldata->state->inode->i_sb; | 1928 | struct super_block *sb = calldata->state->inode->i_sb; |
1929 | 1929 | ||
1930 | if (calldata->roc) | 1930 | if (calldata->roc) |
1931 | pnfs_roc_release(calldata->state->inode); | 1931 | pnfs_roc_release(calldata->state->inode); |
1932 | nfs4_put_open_state(calldata->state); | 1932 | nfs4_put_open_state(calldata->state); |
1933 | nfs_free_seqid(calldata->arg.seqid); | 1933 | nfs_free_seqid(calldata->arg.seqid); |
1934 | nfs4_put_state_owner(sp); | 1934 | nfs4_put_state_owner(sp); |
1935 | nfs_sb_deactive(sb); | 1935 | nfs_sb_deactive(sb); |
1936 | kfree(calldata); | 1936 | kfree(calldata); |
1937 | } | 1937 | } |
1938 | 1938 | ||
1939 | static void nfs4_close_clear_stateid_flags(struct nfs4_state *state, | 1939 | static void nfs4_close_clear_stateid_flags(struct nfs4_state *state, |
1940 | fmode_t fmode) | 1940 | fmode_t fmode) |
1941 | { | 1941 | { |
1942 | spin_lock(&state->owner->so_lock); | 1942 | spin_lock(&state->owner->so_lock); |
1943 | if (!(fmode & FMODE_READ)) | 1943 | if (!(fmode & FMODE_READ)) |
1944 | clear_bit(NFS_O_RDONLY_STATE, &state->flags); | 1944 | clear_bit(NFS_O_RDONLY_STATE, &state->flags); |
1945 | if (!(fmode & FMODE_WRITE)) | 1945 | if (!(fmode & FMODE_WRITE)) |
1946 | clear_bit(NFS_O_WRONLY_STATE, &state->flags); | 1946 | clear_bit(NFS_O_WRONLY_STATE, &state->flags); |
1947 | clear_bit(NFS_O_RDWR_STATE, &state->flags); | 1947 | clear_bit(NFS_O_RDWR_STATE, &state->flags); |
1948 | spin_unlock(&state->owner->so_lock); | 1948 | spin_unlock(&state->owner->so_lock); |
1949 | } | 1949 | } |
1950 | 1950 | ||
1951 | static void nfs4_close_done(struct rpc_task *task, void *data) | 1951 | static void nfs4_close_done(struct rpc_task *task, void *data) |
1952 | { | 1952 | { |
1953 | struct nfs4_closedata *calldata = data; | 1953 | struct nfs4_closedata *calldata = data; |
1954 | struct nfs4_state *state = calldata->state; | 1954 | struct nfs4_state *state = calldata->state; |
1955 | struct nfs_server *server = NFS_SERVER(calldata->inode); | 1955 | struct nfs_server *server = NFS_SERVER(calldata->inode); |
1956 | 1956 | ||
1957 | if (!nfs4_sequence_done(task, &calldata->res.seq_res)) | 1957 | if (!nfs4_sequence_done(task, &calldata->res.seq_res)) |
1958 | return; | 1958 | return; |
1959 | /* hmm. we are done with the inode, and in the process of freeing | 1959 | /* hmm. we are done with the inode, and in the process of freeing |
1960 | * the state_owner. we keep this around to process errors | 1960 | * the state_owner. we keep this around to process errors |
1961 | */ | 1961 | */ |
1962 | switch (task->tk_status) { | 1962 | switch (task->tk_status) { |
1963 | case 0: | 1963 | case 0: |
1964 | if (calldata->roc) | 1964 | if (calldata->roc) |
1965 | pnfs_roc_set_barrier(state->inode, | 1965 | pnfs_roc_set_barrier(state->inode, |
1966 | calldata->roc_barrier); | 1966 | calldata->roc_barrier); |
1967 | nfs_set_open_stateid(state, &calldata->res.stateid, 0); | 1967 | nfs_set_open_stateid(state, &calldata->res.stateid, 0); |
1968 | renew_lease(server, calldata->timestamp); | 1968 | renew_lease(server, calldata->timestamp); |
1969 | nfs4_close_clear_stateid_flags(state, | 1969 | nfs4_close_clear_stateid_flags(state, |
1970 | calldata->arg.fmode); | 1970 | calldata->arg.fmode); |
1971 | break; | 1971 | break; |
1972 | case -NFS4ERR_STALE_STATEID: | 1972 | case -NFS4ERR_STALE_STATEID: |
1973 | case -NFS4ERR_OLD_STATEID: | 1973 | case -NFS4ERR_OLD_STATEID: |
1974 | case -NFS4ERR_BAD_STATEID: | 1974 | case -NFS4ERR_BAD_STATEID: |
1975 | case -NFS4ERR_EXPIRED: | 1975 | case -NFS4ERR_EXPIRED: |
1976 | if (calldata->arg.fmode == 0) | 1976 | if (calldata->arg.fmode == 0) |
1977 | break; | 1977 | break; |
1978 | default: | 1978 | default: |
1979 | if (nfs4_async_handle_error(task, server, state) == -EAGAIN) | 1979 | if (nfs4_async_handle_error(task, server, state) == -EAGAIN) |
1980 | rpc_restart_call_prepare(task); | 1980 | rpc_restart_call_prepare(task); |
1981 | } | 1981 | } |
1982 | nfs_release_seqid(calldata->arg.seqid); | 1982 | nfs_release_seqid(calldata->arg.seqid); |
1983 | nfs_refresh_inode(calldata->inode, calldata->res.fattr); | 1983 | nfs_refresh_inode(calldata->inode, calldata->res.fattr); |
1984 | } | 1984 | } |
1985 | 1985 | ||
1986 | static void nfs4_close_prepare(struct rpc_task *task, void *data) | 1986 | static void nfs4_close_prepare(struct rpc_task *task, void *data) |
1987 | { | 1987 | { |
1988 | struct nfs4_closedata *calldata = data; | 1988 | struct nfs4_closedata *calldata = data; |
1989 | struct nfs4_state *state = calldata->state; | 1989 | struct nfs4_state *state = calldata->state; |
1990 | int call_close = 0; | 1990 | int call_close = 0; |
1991 | 1991 | ||
1992 | if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) | 1992 | if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) |
1993 | return; | 1993 | return; |
1994 | 1994 | ||
1995 | task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE]; | 1995 | task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE]; |
1996 | calldata->arg.fmode = FMODE_READ|FMODE_WRITE; | 1996 | calldata->arg.fmode = FMODE_READ|FMODE_WRITE; |
1997 | spin_lock(&state->owner->so_lock); | 1997 | spin_lock(&state->owner->so_lock); |
1998 | /* Calculate the change in open mode */ | 1998 | /* Calculate the change in open mode */ |
1999 | if (state->n_rdwr == 0) { | 1999 | if (state->n_rdwr == 0) { |
2000 | if (state->n_rdonly == 0) { | 2000 | if (state->n_rdonly == 0) { |
2001 | call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags); | 2001 | call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags); |
2002 | call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags); | 2002 | call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags); |
2003 | calldata->arg.fmode &= ~FMODE_READ; | 2003 | calldata->arg.fmode &= ~FMODE_READ; |
2004 | } | 2004 | } |
2005 | if (state->n_wronly == 0) { | 2005 | if (state->n_wronly == 0) { |
2006 | call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags); | 2006 | call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags); |
2007 | call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags); | 2007 | call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags); |
2008 | calldata->arg.fmode &= ~FMODE_WRITE; | 2008 | calldata->arg.fmode &= ~FMODE_WRITE; |
2009 | } | 2009 | } |
2010 | } | 2010 | } |
2011 | spin_unlock(&state->owner->so_lock); | 2011 | spin_unlock(&state->owner->so_lock); |
2012 | 2012 | ||
2013 | if (!call_close) { | 2013 | if (!call_close) { |
2014 | /* Note: exit _without_ calling nfs4_close_done */ | 2014 | /* Note: exit _without_ calling nfs4_close_done */ |
2015 | task->tk_action = NULL; | 2015 | task->tk_action = NULL; |
2016 | return; | 2016 | return; |
2017 | } | 2017 | } |
2018 | 2018 | ||
2019 | if (calldata->arg.fmode == 0) { | 2019 | if (calldata->arg.fmode == 0) { |
2020 | task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; | 2020 | task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; |
2021 | if (calldata->roc && | 2021 | if (calldata->roc && |
2022 | pnfs_roc_drain(calldata->inode, &calldata->roc_barrier)) { | 2022 | pnfs_roc_drain(calldata->inode, &calldata->roc_barrier)) { |
2023 | rpc_sleep_on(&NFS_SERVER(calldata->inode)->roc_rpcwaitq, | 2023 | rpc_sleep_on(&NFS_SERVER(calldata->inode)->roc_rpcwaitq, |
2024 | task, NULL); | 2024 | task, NULL); |
2025 | return; | 2025 | return; |
2026 | } | 2026 | } |
2027 | } | 2027 | } |
2028 | 2028 | ||
2029 | nfs_fattr_init(calldata->res.fattr); | 2029 | nfs_fattr_init(calldata->res.fattr); |
2030 | calldata->timestamp = jiffies; | 2030 | calldata->timestamp = jiffies; |
2031 | if (nfs4_setup_sequence(NFS_SERVER(calldata->inode), | 2031 | if (nfs4_setup_sequence(NFS_SERVER(calldata->inode), |
2032 | &calldata->arg.seq_args, &calldata->res.seq_res, | 2032 | &calldata->arg.seq_args, &calldata->res.seq_res, |
2033 | 1, task)) | 2033 | 1, task)) |
2034 | return; | 2034 | return; |
2035 | rpc_call_start(task); | 2035 | rpc_call_start(task); |
2036 | } | 2036 | } |
2037 | 2037 | ||
2038 | static const struct rpc_call_ops nfs4_close_ops = { | 2038 | static const struct rpc_call_ops nfs4_close_ops = { |
2039 | .rpc_call_prepare = nfs4_close_prepare, | 2039 | .rpc_call_prepare = nfs4_close_prepare, |
2040 | .rpc_call_done = nfs4_close_done, | 2040 | .rpc_call_done = nfs4_close_done, |
2041 | .rpc_release = nfs4_free_closedata, | 2041 | .rpc_release = nfs4_free_closedata, |
2042 | }; | 2042 | }; |
2043 | 2043 | ||
2044 | /* | 2044 | /* |
2045 | * It is possible for data to be read/written from a mem-mapped file | 2045 | * It is possible for data to be read/written from a mem-mapped file |
2046 | * after the sys_close call (which hits the vfs layer as a flush). | 2046 | * after the sys_close call (which hits the vfs layer as a flush). |
2047 | * This means that we can't safely call nfsv4 close on a file until | 2047 | * This means that we can't safely call nfsv4 close on a file until |
2048 | * the inode is cleared. This in turn means that we are not good | 2048 | * the inode is cleared. This in turn means that we are not good |
2049 | * NFSv4 citizens - we do not indicate to the server to update the file's | 2049 | * NFSv4 citizens - we do not indicate to the server to update the file's |
2050 | * share state even when we are done with one of the three share | 2050 | * share state even when we are done with one of the three share |
2051 | * stateid's in the inode. | 2051 | * stateid's in the inode. |
2052 | * | 2052 | * |
2053 | * NOTE: Caller must be holding the sp->so_owner semaphore! | 2053 | * NOTE: Caller must be holding the sp->so_owner semaphore! |
2054 | */ | 2054 | */ |
2055 | int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc) | 2055 | int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc) |
2056 | { | 2056 | { |
2057 | struct nfs_server *server = NFS_SERVER(state->inode); | 2057 | struct nfs_server *server = NFS_SERVER(state->inode); |
2058 | struct nfs4_closedata *calldata; | 2058 | struct nfs4_closedata *calldata; |
2059 | struct nfs4_state_owner *sp = state->owner; | 2059 | struct nfs4_state_owner *sp = state->owner; |
2060 | struct rpc_task *task; | 2060 | struct rpc_task *task; |
2061 | struct rpc_message msg = { | 2061 | struct rpc_message msg = { |
2062 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE], | 2062 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE], |
2063 | .rpc_cred = state->owner->so_cred, | 2063 | .rpc_cred = state->owner->so_cred, |
2064 | }; | 2064 | }; |
2065 | struct rpc_task_setup task_setup_data = { | 2065 | struct rpc_task_setup task_setup_data = { |
2066 | .rpc_client = server->client, | 2066 | .rpc_client = server->client, |
2067 | .rpc_message = &msg, | 2067 | .rpc_message = &msg, |
2068 | .callback_ops = &nfs4_close_ops, | 2068 | .callback_ops = &nfs4_close_ops, |
2069 | .workqueue = nfsiod_workqueue, | 2069 | .workqueue = nfsiod_workqueue, |
2070 | .flags = RPC_TASK_ASYNC, | 2070 | .flags = RPC_TASK_ASYNC, |
2071 | }; | 2071 | }; |
2072 | int status = -ENOMEM; | 2072 | int status = -ENOMEM; |
2073 | 2073 | ||
2074 | calldata = kzalloc(sizeof(*calldata), gfp_mask); | 2074 | calldata = kzalloc(sizeof(*calldata), gfp_mask); |
2075 | if (calldata == NULL) | 2075 | if (calldata == NULL) |
2076 | goto out; | 2076 | goto out; |
2077 | calldata->inode = state->inode; | 2077 | calldata->inode = state->inode; |
2078 | calldata->state = state; | 2078 | calldata->state = state; |
2079 | calldata->arg.fh = NFS_FH(state->inode); | 2079 | calldata->arg.fh = NFS_FH(state->inode); |
2080 | calldata->arg.stateid = &state->open_stateid; | 2080 | calldata->arg.stateid = &state->open_stateid; |
2081 | /* Serialization for the sequence id */ | 2081 | /* Serialization for the sequence id */ |
2082 | calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid, gfp_mask); | 2082 | calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid, gfp_mask); |
2083 | if (calldata->arg.seqid == NULL) | 2083 | if (calldata->arg.seqid == NULL) |
2084 | goto out_free_calldata; | 2084 | goto out_free_calldata; |
2085 | calldata->arg.fmode = 0; | 2085 | calldata->arg.fmode = 0; |
2086 | calldata->arg.bitmask = server->cache_consistency_bitmask; | 2086 | calldata->arg.bitmask = server->cache_consistency_bitmask; |
2087 | calldata->res.fattr = &calldata->fattr; | 2087 | calldata->res.fattr = &calldata->fattr; |
2088 | calldata->res.seqid = calldata->arg.seqid; | 2088 | calldata->res.seqid = calldata->arg.seqid; |
2089 | calldata->res.server = server; | 2089 | calldata->res.server = server; |
2090 | calldata->roc = roc; | 2090 | calldata->roc = roc; |
2091 | nfs_sb_active(calldata->inode->i_sb); | 2091 | nfs_sb_active(calldata->inode->i_sb); |
2092 | 2092 | ||
2093 | msg.rpc_argp = &calldata->arg; | 2093 | msg.rpc_argp = &calldata->arg; |
2094 | msg.rpc_resp = &calldata->res; | 2094 | msg.rpc_resp = &calldata->res; |
2095 | task_setup_data.callback_data = calldata; | 2095 | task_setup_data.callback_data = calldata; |
2096 | task = rpc_run_task(&task_setup_data); | 2096 | task = rpc_run_task(&task_setup_data); |
2097 | if (IS_ERR(task)) | 2097 | if (IS_ERR(task)) |
2098 | return PTR_ERR(task); | 2098 | return PTR_ERR(task); |
2099 | status = 0; | 2099 | status = 0; |
2100 | if (wait) | 2100 | if (wait) |
2101 | status = rpc_wait_for_completion_task(task); | 2101 | status = rpc_wait_for_completion_task(task); |
2102 | rpc_put_task(task); | 2102 | rpc_put_task(task); |
2103 | return status; | 2103 | return status; |
2104 | out_free_calldata: | 2104 | out_free_calldata: |
2105 | kfree(calldata); | 2105 | kfree(calldata); |
2106 | out: | 2106 | out: |
2107 | if (roc) | 2107 | if (roc) |
2108 | pnfs_roc_release(state->inode); | 2108 | pnfs_roc_release(state->inode); |
2109 | nfs4_put_open_state(state); | 2109 | nfs4_put_open_state(state); |
2110 | nfs4_put_state_owner(sp); | 2110 | nfs4_put_state_owner(sp); |
2111 | return status; | 2111 | return status; |
2112 | } | 2112 | } |
2113 | 2113 | ||
2114 | static struct inode * | 2114 | static struct inode * |
2115 | nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags, struct iattr *attr) | 2115 | nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags, struct iattr *attr) |
2116 | { | 2116 | { |
2117 | struct nfs4_state *state; | 2117 | struct nfs4_state *state; |
2118 | 2118 | ||
2119 | /* Protect against concurrent sillydeletes */ | 2119 | /* Protect against concurrent sillydeletes */ |
2120 | state = nfs4_do_open(dir, ctx->dentry, ctx->mode, open_flags, attr, ctx->cred); | 2120 | state = nfs4_do_open(dir, ctx->dentry, ctx->mode, open_flags, attr, ctx->cred); |
2121 | if (IS_ERR(state)) | 2121 | if (IS_ERR(state)) |
2122 | return ERR_CAST(state); | 2122 | return ERR_CAST(state); |
2123 | ctx->state = state; | 2123 | ctx->state = state; |
2124 | return igrab(state->inode); | 2124 | return igrab(state->inode); |
2125 | } | 2125 | } |
2126 | 2126 | ||
2127 | static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync) | 2127 | static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync) |
2128 | { | 2128 | { |
2129 | if (ctx->state == NULL) | 2129 | if (ctx->state == NULL) |
2130 | return; | 2130 | return; |
2131 | if (is_sync) | 2131 | if (is_sync) |
2132 | nfs4_close_sync(ctx->state, ctx->mode); | 2132 | nfs4_close_sync(ctx->state, ctx->mode); |
2133 | else | 2133 | else |
2134 | nfs4_close_state(ctx->state, ctx->mode); | 2134 | nfs4_close_state(ctx->state, ctx->mode); |
2135 | } | 2135 | } |
2136 | 2136 | ||
2137 | static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) | 2137 | static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) |
2138 | { | 2138 | { |
2139 | struct nfs4_server_caps_arg args = { | 2139 | struct nfs4_server_caps_arg args = { |
2140 | .fhandle = fhandle, | 2140 | .fhandle = fhandle, |
2141 | }; | 2141 | }; |
2142 | struct nfs4_server_caps_res res = {}; | 2142 | struct nfs4_server_caps_res res = {}; |
2143 | struct rpc_message msg = { | 2143 | struct rpc_message msg = { |
2144 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS], | 2144 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS], |
2145 | .rpc_argp = &args, | 2145 | .rpc_argp = &args, |
2146 | .rpc_resp = &res, | 2146 | .rpc_resp = &res, |
2147 | }; | 2147 | }; |
2148 | int status; | 2148 | int status; |
2149 | 2149 | ||
2150 | status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); | 2150 | status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); |
2151 | if (status == 0) { | 2151 | if (status == 0) { |
2152 | memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask)); | 2152 | memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask)); |
2153 | server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS| | 2153 | server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS| |
2154 | NFS_CAP_SYMLINKS|NFS_CAP_FILEID| | 2154 | NFS_CAP_SYMLINKS|NFS_CAP_FILEID| |
2155 | NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER| | 2155 | NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER| |
2156 | NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME| | 2156 | NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME| |
2157 | NFS_CAP_CTIME|NFS_CAP_MTIME); | 2157 | NFS_CAP_CTIME|NFS_CAP_MTIME); |
2158 | if (res.attr_bitmask[0] & FATTR4_WORD0_ACL) | 2158 | if (res.attr_bitmask[0] & FATTR4_WORD0_ACL) |
2159 | server->caps |= NFS_CAP_ACLS; | 2159 | server->caps |= NFS_CAP_ACLS; |
2160 | if (res.has_links != 0) | 2160 | if (res.has_links != 0) |
2161 | server->caps |= NFS_CAP_HARDLINKS; | 2161 | server->caps |= NFS_CAP_HARDLINKS; |
2162 | if (res.has_symlinks != 0) | 2162 | if (res.has_symlinks != 0) |
2163 | server->caps |= NFS_CAP_SYMLINKS; | 2163 | server->caps |= NFS_CAP_SYMLINKS; |
2164 | if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID) | 2164 | if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID) |
2165 | server->caps |= NFS_CAP_FILEID; | 2165 | server->caps |= NFS_CAP_FILEID; |
2166 | if (res.attr_bitmask[1] & FATTR4_WORD1_MODE) | 2166 | if (res.attr_bitmask[1] & FATTR4_WORD1_MODE) |
2167 | server->caps |= NFS_CAP_MODE; | 2167 | server->caps |= NFS_CAP_MODE; |
2168 | if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS) | 2168 | if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS) |
2169 | server->caps |= NFS_CAP_NLINK; | 2169 | server->caps |= NFS_CAP_NLINK; |
2170 | if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER) | 2170 | if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER) |
2171 | server->caps |= NFS_CAP_OWNER; | 2171 | server->caps |= NFS_CAP_OWNER; |
2172 | if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP) | 2172 | if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP) |
2173 | server->caps |= NFS_CAP_OWNER_GROUP; | 2173 | server->caps |= NFS_CAP_OWNER_GROUP; |
2174 | if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS) | 2174 | if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS) |
2175 | server->caps |= NFS_CAP_ATIME; | 2175 | server->caps |= NFS_CAP_ATIME; |
2176 | if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA) | 2176 | if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA) |
2177 | server->caps |= NFS_CAP_CTIME; | 2177 | server->caps |= NFS_CAP_CTIME; |
2178 | if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY) | 2178 | if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY) |
2179 | server->caps |= NFS_CAP_MTIME; | 2179 | server->caps |= NFS_CAP_MTIME; |
2180 | 2180 | ||
2181 | memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask)); | 2181 | memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask)); |
2182 | server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE; | 2182 | server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE; |
2183 | server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY; | 2183 | server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY; |
2184 | server->acl_bitmask = res.acl_bitmask; | 2184 | server->acl_bitmask = res.acl_bitmask; |
2185 | } | 2185 | } |
2186 | 2186 | ||
2187 | return status; | 2187 | return status; |
2188 | } | 2188 | } |
2189 | 2189 | ||
2190 | int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) | 2190 | int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) |
2191 | { | 2191 | { |
2192 | struct nfs4_exception exception = { }; | 2192 | struct nfs4_exception exception = { }; |
2193 | int err; | 2193 | int err; |
2194 | do { | 2194 | do { |
2195 | err = nfs4_handle_exception(server, | 2195 | err = nfs4_handle_exception(server, |
2196 | _nfs4_server_capabilities(server, fhandle), | 2196 | _nfs4_server_capabilities(server, fhandle), |
2197 | &exception); | 2197 | &exception); |
2198 | } while (exception.retry); | 2198 | } while (exception.retry); |
2199 | return err; | 2199 | return err; |
2200 | } | 2200 | } |
2201 | 2201 | ||
2202 | static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, | 2202 | static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, |
2203 | struct nfs_fsinfo *info) | 2203 | struct nfs_fsinfo *info) |
2204 | { | 2204 | { |
2205 | struct nfs4_lookup_root_arg args = { | 2205 | struct nfs4_lookup_root_arg args = { |
2206 | .bitmask = nfs4_fattr_bitmap, | 2206 | .bitmask = nfs4_fattr_bitmap, |
2207 | }; | 2207 | }; |
2208 | struct nfs4_lookup_res res = { | 2208 | struct nfs4_lookup_res res = { |
2209 | .server = server, | 2209 | .server = server, |
2210 | .fattr = info->fattr, | 2210 | .fattr = info->fattr, |
2211 | .fh = fhandle, | 2211 | .fh = fhandle, |
2212 | }; | 2212 | }; |
2213 | struct rpc_message msg = { | 2213 | struct rpc_message msg = { |
2214 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT], | 2214 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT], |
2215 | .rpc_argp = &args, | 2215 | .rpc_argp = &args, |
2216 | .rpc_resp = &res, | 2216 | .rpc_resp = &res, |
2217 | }; | 2217 | }; |
2218 | 2218 | ||
2219 | nfs_fattr_init(info->fattr); | 2219 | nfs_fattr_init(info->fattr); |
2220 | return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); | 2220 | return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); |
2221 | } | 2221 | } |
2222 | 2222 | ||
2223 | static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, | 2223 | static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, |
2224 | struct nfs_fsinfo *info) | 2224 | struct nfs_fsinfo *info) |
2225 | { | 2225 | { |
2226 | struct nfs4_exception exception = { }; | 2226 | struct nfs4_exception exception = { }; |
2227 | int err; | 2227 | int err; |
2228 | do { | 2228 | do { |
2229 | err = _nfs4_lookup_root(server, fhandle, info); | 2229 | err = _nfs4_lookup_root(server, fhandle, info); |
2230 | switch (err) { | 2230 | switch (err) { |
2231 | case 0: | 2231 | case 0: |
2232 | case -NFS4ERR_WRONGSEC: | 2232 | case -NFS4ERR_WRONGSEC: |
2233 | break; | 2233 | break; |
2234 | default: | 2234 | default: |
2235 | err = nfs4_handle_exception(server, err, &exception); | 2235 | err = nfs4_handle_exception(server, err, &exception); |
2236 | } | 2236 | } |
2237 | } while (exception.retry); | 2237 | } while (exception.retry); |
2238 | return err; | 2238 | return err; |
2239 | } | 2239 | } |
2240 | 2240 | ||
2241 | static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, | 2241 | static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, |
2242 | struct nfs_fsinfo *info, rpc_authflavor_t flavor) | 2242 | struct nfs_fsinfo *info, rpc_authflavor_t flavor) |
2243 | { | 2243 | { |
2244 | struct rpc_auth *auth; | 2244 | struct rpc_auth *auth; |
2245 | int ret; | 2245 | int ret; |
2246 | 2246 | ||
2247 | auth = rpcauth_create(flavor, server->client); | 2247 | auth = rpcauth_create(flavor, server->client); |
2248 | if (!auth) { | 2248 | if (!auth) { |
2249 | ret = -EIO; | 2249 | ret = -EIO; |
2250 | goto out; | 2250 | goto out; |
2251 | } | 2251 | } |
2252 | ret = nfs4_lookup_root(server, fhandle, info); | 2252 | ret = nfs4_lookup_root(server, fhandle, info); |
2253 | out: | 2253 | out: |
2254 | return ret; | 2254 | return ret; |
2255 | } | 2255 | } |
2256 | 2256 | ||
2257 | static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, | 2257 | static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, |
2258 | struct nfs_fsinfo *info) | 2258 | struct nfs_fsinfo *info) |
2259 | { | 2259 | { |
2260 | int i, len, status = 0; | 2260 | int i, len, status = 0; |
2261 | rpc_authflavor_t flav_array[NFS_MAX_SECFLAVORS]; | 2261 | rpc_authflavor_t flav_array[NFS_MAX_SECFLAVORS]; |
2262 | 2262 | ||
2263 | len = gss_mech_list_pseudoflavors(&flav_array[0]); | 2263 | len = gss_mech_list_pseudoflavors(&flav_array[0]); |
2264 | flav_array[len] = RPC_AUTH_NULL; | 2264 | flav_array[len] = RPC_AUTH_NULL; |
2265 | len += 1; | 2265 | len += 1; |
2266 | 2266 | ||
2267 | for (i = 0; i < len; i++) { | 2267 | for (i = 0; i < len; i++) { |
2268 | status = nfs4_lookup_root_sec(server, fhandle, info, flav_array[i]); | 2268 | status = nfs4_lookup_root_sec(server, fhandle, info, flav_array[i]); |
2269 | if (status == -NFS4ERR_WRONGSEC || status == -EACCES) | 2269 | if (status == -NFS4ERR_WRONGSEC || status == -EACCES) |
2270 | continue; | 2270 | continue; |
2271 | break; | 2271 | break; |
2272 | } | 2272 | } |
2273 | /* | 2273 | /* |
2274 | * -EACCESS could mean that the user doesn't have correct permissions | 2274 | * -EACCESS could mean that the user doesn't have correct permissions |
2275 | * to access the mount. It could also mean that we tried to mount | 2275 | * to access the mount. It could also mean that we tried to mount |
2276 | * with a gss auth flavor, but rpc.gssd isn't running. Either way, | 2276 | * with a gss auth flavor, but rpc.gssd isn't running. Either way, |
2277 | * existing mount programs don't handle -EACCES very well so it should | 2277 | * existing mount programs don't handle -EACCES very well so it should |
2278 | * be mapped to -EPERM instead. | 2278 | * be mapped to -EPERM instead. |
2279 | */ | 2279 | */ |
2280 | if (status == -EACCES) | 2280 | if (status == -EACCES) |
2281 | status = -EPERM; | 2281 | status = -EPERM; |
2282 | return status; | 2282 | return status; |
2283 | } | 2283 | } |
2284 | 2284 | ||
2285 | /* | 2285 | /* |
2286 | * get the file handle for the "/" directory on the server | 2286 | * get the file handle for the "/" directory on the server |
2287 | */ | 2287 | */ |
2288 | static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle, | 2288 | static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle, |
2289 | struct nfs_fsinfo *info) | 2289 | struct nfs_fsinfo *info) |
2290 | { | 2290 | { |
2291 | int minor_version = server->nfs_client->cl_minorversion; | 2291 | int minor_version = server->nfs_client->cl_minorversion; |
2292 | int status = nfs4_lookup_root(server, fhandle, info); | 2292 | int status = nfs4_lookup_root(server, fhandle, info); |
2293 | if ((status == -NFS4ERR_WRONGSEC) && !(server->flags & NFS_MOUNT_SECFLAVOUR)) | 2293 | if ((status == -NFS4ERR_WRONGSEC) && !(server->flags & NFS_MOUNT_SECFLAVOUR)) |
2294 | /* | 2294 | /* |
2295 | * A status of -NFS4ERR_WRONGSEC will be mapped to -EPERM | 2295 | * A status of -NFS4ERR_WRONGSEC will be mapped to -EPERM |
2296 | * by nfs4_map_errors() as this function exits. | 2296 | * by nfs4_map_errors() as this function exits. |
2297 | */ | 2297 | */ |
2298 | status = nfs_v4_minor_ops[minor_version]->find_root_sec(server, fhandle, info); | 2298 | status = nfs_v4_minor_ops[minor_version]->find_root_sec(server, fhandle, info); |
2299 | if (status == 0) | 2299 | if (status == 0) |
2300 | status = nfs4_server_capabilities(server, fhandle); | 2300 | status = nfs4_server_capabilities(server, fhandle); |
2301 | if (status == 0) | 2301 | if (status == 0) |
2302 | status = nfs4_do_fsinfo(server, fhandle, info); | 2302 | status = nfs4_do_fsinfo(server, fhandle, info); |
2303 | return nfs4_map_errors(status); | 2303 | return nfs4_map_errors(status); |
2304 | } | 2304 | } |
2305 | 2305 | ||
2306 | static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr); | 2306 | static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr); |
2307 | /* | 2307 | /* |
2308 | * Get locations and (maybe) other attributes of a referral. | 2308 | * Get locations and (maybe) other attributes of a referral. |
2309 | * Note that we'll actually follow the referral later when | 2309 | * Note that we'll actually follow the referral later when |
2310 | * we detect fsid mismatch in inode revalidation | 2310 | * we detect fsid mismatch in inode revalidation |
2311 | */ | 2311 | */ |
2312 | static int nfs4_get_referral(struct inode *dir, const struct qstr *name, | 2312 | static int nfs4_get_referral(struct inode *dir, const struct qstr *name, |
2313 | struct nfs_fattr *fattr, struct nfs_fh *fhandle) | 2313 | struct nfs_fattr *fattr, struct nfs_fh *fhandle) |
2314 | { | 2314 | { |
2315 | int status = -ENOMEM; | 2315 | int status = -ENOMEM; |
2316 | struct page *page = NULL; | 2316 | struct page *page = NULL; |
2317 | struct nfs4_fs_locations *locations = NULL; | 2317 | struct nfs4_fs_locations *locations = NULL; |
2318 | 2318 | ||
2319 | page = alloc_page(GFP_KERNEL); | 2319 | page = alloc_page(GFP_KERNEL); |
2320 | if (page == NULL) | 2320 | if (page == NULL) |
2321 | goto out; | 2321 | goto out; |
2322 | locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL); | 2322 | locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL); |
2323 | if (locations == NULL) | 2323 | if (locations == NULL) |
2324 | goto out; | 2324 | goto out; |
2325 | 2325 | ||
2326 | status = nfs4_proc_fs_locations(dir, name, locations, page); | 2326 | status = nfs4_proc_fs_locations(dir, name, locations, page); |
2327 | if (status != 0) | 2327 | if (status != 0) |
2328 | goto out; | 2328 | goto out; |
2329 | /* Make sure server returned a different fsid for the referral */ | 2329 | /* Make sure server returned a different fsid for the referral */ |
2330 | if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) { | 2330 | if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) { |
2331 | dprintk("%s: server did not return a different fsid for" | 2331 | dprintk("%s: server did not return a different fsid for" |
2332 | " a referral at %s\n", __func__, name->name); | 2332 | " a referral at %s\n", __func__, name->name); |
2333 | status = -EIO; | 2333 | status = -EIO; |
2334 | goto out; | 2334 | goto out; |
2335 | } | 2335 | } |
2336 | /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */ | 2336 | /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */ |
2337 | nfs_fixup_referral_attributes(&locations->fattr); | 2337 | nfs_fixup_referral_attributes(&locations->fattr); |
2338 | 2338 | ||
2339 | /* replace the lookup nfs_fattr with the locations nfs_fattr */ | 2339 | /* replace the lookup nfs_fattr with the locations nfs_fattr */ |
2340 | memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr)); | 2340 | memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr)); |
2341 | memset(fhandle, 0, sizeof(struct nfs_fh)); | 2341 | memset(fhandle, 0, sizeof(struct nfs_fh)); |
2342 | out: | 2342 | out: |
2343 | if (page) | 2343 | if (page) |
2344 | __free_page(page); | 2344 | __free_page(page); |
2345 | kfree(locations); | 2345 | kfree(locations); |
2346 | return status; | 2346 | return status; |
2347 | } | 2347 | } |
2348 | 2348 | ||
2349 | static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr) | 2349 | static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr) |
2350 | { | 2350 | { |
2351 | struct nfs4_getattr_arg args = { | 2351 | struct nfs4_getattr_arg args = { |
2352 | .fh = fhandle, | 2352 | .fh = fhandle, |
2353 | .bitmask = server->attr_bitmask, | 2353 | .bitmask = server->attr_bitmask, |
2354 | }; | 2354 | }; |
2355 | struct nfs4_getattr_res res = { | 2355 | struct nfs4_getattr_res res = { |
2356 | .fattr = fattr, | 2356 | .fattr = fattr, |
2357 | .server = server, | 2357 | .server = server, |
2358 | }; | 2358 | }; |
2359 | struct rpc_message msg = { | 2359 | struct rpc_message msg = { |
2360 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], | 2360 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], |
2361 | .rpc_argp = &args, | 2361 | .rpc_argp = &args, |
2362 | .rpc_resp = &res, | 2362 | .rpc_resp = &res, |
2363 | }; | 2363 | }; |
2364 | 2364 | ||
2365 | nfs_fattr_init(fattr); | 2365 | nfs_fattr_init(fattr); |
2366 | return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); | 2366 | return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); |
2367 | } | 2367 | } |
2368 | 2368 | ||
2369 | static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr) | 2369 | static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr) |
2370 | { | 2370 | { |
2371 | struct nfs4_exception exception = { }; | 2371 | struct nfs4_exception exception = { }; |
2372 | int err; | 2372 | int err; |
2373 | do { | 2373 | do { |
2374 | err = nfs4_handle_exception(server, | 2374 | err = nfs4_handle_exception(server, |
2375 | _nfs4_proc_getattr(server, fhandle, fattr), | 2375 | _nfs4_proc_getattr(server, fhandle, fattr), |
2376 | &exception); | 2376 | &exception); |
2377 | } while (exception.retry); | 2377 | } while (exception.retry); |
2378 | return err; | 2378 | return err; |
2379 | } | 2379 | } |
2380 | 2380 | ||
2381 | /* | 2381 | /* |
2382 | * The file is not closed if it is opened due to the a request to change | 2382 | * The file is not closed if it is opened due to the a request to change |
2383 | * the size of the file. The open call will not be needed once the | 2383 | * the size of the file. The open call will not be needed once the |
2384 | * VFS layer lookup-intents are implemented. | 2384 | * VFS layer lookup-intents are implemented. |
2385 | * | 2385 | * |
2386 | * Close is called when the inode is destroyed. | 2386 | * Close is called when the inode is destroyed. |
2387 | * If we haven't opened the file for O_WRONLY, we | 2387 | * If we haven't opened the file for O_WRONLY, we |
2388 | * need to in the size_change case to obtain a stateid. | 2388 | * need to in the size_change case to obtain a stateid. |
2389 | * | 2389 | * |
2390 | * Got race? | 2390 | * Got race? |
2391 | * Because OPEN is always done by name in nfsv4, it is | 2391 | * Because OPEN is always done by name in nfsv4, it is |
2392 | * possible that we opened a different file by the same | 2392 | * possible that we opened a different file by the same |
2393 | * name. We can recognize this race condition, but we | 2393 | * name. We can recognize this race condition, but we |
2394 | * can't do anything about it besides returning an error. | 2394 | * can't do anything about it besides returning an error. |
2395 | * | 2395 | * |
2396 | * This will be fixed with VFS changes (lookup-intent). | 2396 | * This will be fixed with VFS changes (lookup-intent). |
2397 | */ | 2397 | */ |
2398 | static int | 2398 | static int |
2399 | nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, | 2399 | nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, |
2400 | struct iattr *sattr) | 2400 | struct iattr *sattr) |
2401 | { | 2401 | { |
2402 | struct inode *inode = dentry->d_inode; | 2402 | struct inode *inode = dentry->d_inode; |
2403 | struct rpc_cred *cred = NULL; | 2403 | struct rpc_cred *cred = NULL; |
2404 | struct nfs4_state *state = NULL; | 2404 | struct nfs4_state *state = NULL; |
2405 | int status; | 2405 | int status; |
2406 | 2406 | ||
2407 | if (pnfs_ld_layoutret_on_setattr(inode)) | 2407 | if (pnfs_ld_layoutret_on_setattr(inode)) |
2408 | pnfs_return_layout(inode); | 2408 | pnfs_return_layout(inode); |
2409 | 2409 | ||
2410 | nfs_fattr_init(fattr); | 2410 | nfs_fattr_init(fattr); |
2411 | 2411 | ||
2412 | /* Search for an existing open(O_WRITE) file */ | 2412 | /* Search for an existing open(O_WRITE) file */ |
2413 | if (sattr->ia_valid & ATTR_FILE) { | 2413 | if (sattr->ia_valid & ATTR_FILE) { |
2414 | struct nfs_open_context *ctx; | 2414 | struct nfs_open_context *ctx; |
2415 | 2415 | ||
2416 | ctx = nfs_file_open_context(sattr->ia_file); | 2416 | ctx = nfs_file_open_context(sattr->ia_file); |
2417 | if (ctx) { | 2417 | if (ctx) { |
2418 | cred = ctx->cred; | 2418 | cred = ctx->cred; |
2419 | state = ctx->state; | 2419 | state = ctx->state; |
2420 | } | 2420 | } |
2421 | } | 2421 | } |
2422 | 2422 | ||
2423 | status = nfs4_do_setattr(inode, cred, fattr, sattr, state); | 2423 | status = nfs4_do_setattr(inode, cred, fattr, sattr, state); |
2424 | if (status == 0) | 2424 | if (status == 0) |
2425 | nfs_setattr_update_inode(inode, sattr); | 2425 | nfs_setattr_update_inode(inode, sattr); |
2426 | return status; | 2426 | return status; |
2427 | } | 2427 | } |
2428 | 2428 | ||
2429 | static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, | 2429 | static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, |
2430 | const struct qstr *name, struct nfs_fh *fhandle, | 2430 | const struct qstr *name, struct nfs_fh *fhandle, |
2431 | struct nfs_fattr *fattr) | 2431 | struct nfs_fattr *fattr) |
2432 | { | 2432 | { |
2433 | struct nfs_server *server = NFS_SERVER(dir); | 2433 | struct nfs_server *server = NFS_SERVER(dir); |
2434 | int status; | 2434 | int status; |
2435 | struct nfs4_lookup_arg args = { | 2435 | struct nfs4_lookup_arg args = { |
2436 | .bitmask = server->attr_bitmask, | 2436 | .bitmask = server->attr_bitmask, |
2437 | .dir_fh = NFS_FH(dir), | 2437 | .dir_fh = NFS_FH(dir), |
2438 | .name = name, | 2438 | .name = name, |
2439 | }; | 2439 | }; |
2440 | struct nfs4_lookup_res res = { | 2440 | struct nfs4_lookup_res res = { |
2441 | .server = server, | 2441 | .server = server, |
2442 | .fattr = fattr, | 2442 | .fattr = fattr, |
2443 | .fh = fhandle, | 2443 | .fh = fhandle, |
2444 | }; | 2444 | }; |
2445 | struct rpc_message msg = { | 2445 | struct rpc_message msg = { |
2446 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP], | 2446 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP], |
2447 | .rpc_argp = &args, | 2447 | .rpc_argp = &args, |
2448 | .rpc_resp = &res, | 2448 | .rpc_resp = &res, |
2449 | }; | 2449 | }; |
2450 | 2450 | ||
2451 | nfs_fattr_init(fattr); | 2451 | nfs_fattr_init(fattr); |
2452 | 2452 | ||
2453 | dprintk("NFS call lookup %s\n", name->name); | 2453 | dprintk("NFS call lookup %s\n", name->name); |
2454 | status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0); | 2454 | status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0); |
2455 | dprintk("NFS reply lookup: %d\n", status); | 2455 | dprintk("NFS reply lookup: %d\n", status); |
2456 | return status; | 2456 | return status; |
2457 | } | 2457 | } |
2458 | 2458 | ||
2459 | void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr, struct nfs_fh *fh) | 2459 | void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr, struct nfs_fh *fh) |
2460 | { | 2460 | { |
2461 | memset(fh, 0, sizeof(struct nfs_fh)); | 2461 | memset(fh, 0, sizeof(struct nfs_fh)); |
2462 | fattr->fsid.major = 1; | 2462 | fattr->fsid.major = 1; |
2463 | fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | | 2463 | fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | |
2464 | NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_FSID | NFS_ATTR_FATTR_MOUNTPOINT; | 2464 | NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_FSID | NFS_ATTR_FATTR_MOUNTPOINT; |
2465 | fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; | 2465 | fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; |
2466 | fattr->nlink = 2; | 2466 | fattr->nlink = 2; |
2467 | } | 2467 | } |
2468 | 2468 | ||
2469 | static int nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name, | 2469 | static int nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name, |
2470 | struct nfs_fh *fhandle, struct nfs_fattr *fattr) | 2470 | struct nfs_fh *fhandle, struct nfs_fattr *fattr) |
2471 | { | 2471 | { |
2472 | struct nfs4_exception exception = { }; | 2472 | struct nfs4_exception exception = { }; |
2473 | int err; | 2473 | int err; |
2474 | do { | 2474 | do { |
2475 | int status; | 2475 | int status; |
2476 | 2476 | ||
2477 | status = _nfs4_proc_lookup(clnt, dir, name, fhandle, fattr); | 2477 | status = _nfs4_proc_lookup(clnt, dir, name, fhandle, fattr); |
2478 | switch (status) { | 2478 | switch (status) { |
2479 | case -NFS4ERR_BADNAME: | 2479 | case -NFS4ERR_BADNAME: |
2480 | return -ENOENT; | 2480 | return -ENOENT; |
2481 | case -NFS4ERR_MOVED: | 2481 | case -NFS4ERR_MOVED: |
2482 | return nfs4_get_referral(dir, name, fattr, fhandle); | 2482 | return nfs4_get_referral(dir, name, fattr, fhandle); |
2483 | case -NFS4ERR_WRONGSEC: | 2483 | case -NFS4ERR_WRONGSEC: |
2484 | nfs_fixup_secinfo_attributes(fattr, fhandle); | 2484 | nfs_fixup_secinfo_attributes(fattr, fhandle); |
2485 | } | 2485 | } |
2486 | err = nfs4_handle_exception(NFS_SERVER(dir), | 2486 | err = nfs4_handle_exception(NFS_SERVER(dir), |
2487 | status, &exception); | 2487 | status, &exception); |
2488 | } while (exception.retry); | 2488 | } while (exception.retry); |
2489 | return err; | 2489 | return err; |
2490 | } | 2490 | } |
2491 | 2491 | ||
2492 | static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) | 2492 | static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) |
2493 | { | 2493 | { |
2494 | struct nfs_server *server = NFS_SERVER(inode); | 2494 | struct nfs_server *server = NFS_SERVER(inode); |
2495 | struct nfs4_accessargs args = { | 2495 | struct nfs4_accessargs args = { |
2496 | .fh = NFS_FH(inode), | 2496 | .fh = NFS_FH(inode), |
2497 | .bitmask = server->attr_bitmask, | 2497 | .bitmask = server->attr_bitmask, |
2498 | }; | 2498 | }; |
2499 | struct nfs4_accessres res = { | 2499 | struct nfs4_accessres res = { |
2500 | .server = server, | 2500 | .server = server, |
2501 | }; | 2501 | }; |
2502 | struct rpc_message msg = { | 2502 | struct rpc_message msg = { |
2503 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS], | 2503 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS], |
2504 | .rpc_argp = &args, | 2504 | .rpc_argp = &args, |
2505 | .rpc_resp = &res, | 2505 | .rpc_resp = &res, |
2506 | .rpc_cred = entry->cred, | 2506 | .rpc_cred = entry->cred, |
2507 | }; | 2507 | }; |
2508 | int mode = entry->mask; | 2508 | int mode = entry->mask; |
2509 | int status; | 2509 | int status; |
2510 | 2510 | ||
2511 | /* | 2511 | /* |
2512 | * Determine which access bits we want to ask for... | 2512 | * Determine which access bits we want to ask for... |
2513 | */ | 2513 | */ |
2514 | if (mode & MAY_READ) | 2514 | if (mode & MAY_READ) |
2515 | args.access |= NFS4_ACCESS_READ; | 2515 | args.access |= NFS4_ACCESS_READ; |
2516 | if (S_ISDIR(inode->i_mode)) { | 2516 | if (S_ISDIR(inode->i_mode)) { |
2517 | if (mode & MAY_WRITE) | 2517 | if (mode & MAY_WRITE) |
2518 | args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE; | 2518 | args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE; |
2519 | if (mode & MAY_EXEC) | 2519 | if (mode & MAY_EXEC) |
2520 | args.access |= NFS4_ACCESS_LOOKUP; | 2520 | args.access |= NFS4_ACCESS_LOOKUP; |
2521 | } else { | 2521 | } else { |
2522 | if (mode & MAY_WRITE) | 2522 | if (mode & MAY_WRITE) |
2523 | args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND; | 2523 | args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND; |
2524 | if (mode & MAY_EXEC) | 2524 | if (mode & MAY_EXEC) |
2525 | args.access |= NFS4_ACCESS_EXECUTE; | 2525 | args.access |= NFS4_ACCESS_EXECUTE; |
2526 | } | 2526 | } |
2527 | 2527 | ||
2528 | res.fattr = nfs_alloc_fattr(); | 2528 | res.fattr = nfs_alloc_fattr(); |
2529 | if (res.fattr == NULL) | 2529 | if (res.fattr == NULL) |
2530 | return -ENOMEM; | 2530 | return -ENOMEM; |
2531 | 2531 | ||
2532 | status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); | 2532 | status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); |
2533 | if (!status) { | 2533 | if (!status) { |
2534 | entry->mask = 0; | 2534 | entry->mask = 0; |
2535 | if (res.access & NFS4_ACCESS_READ) | 2535 | if (res.access & NFS4_ACCESS_READ) |
2536 | entry->mask |= MAY_READ; | 2536 | entry->mask |= MAY_READ; |
2537 | if (res.access & (NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE)) | 2537 | if (res.access & (NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE)) |
2538 | entry->mask |= MAY_WRITE; | 2538 | entry->mask |= MAY_WRITE; |
2539 | if (res.access & (NFS4_ACCESS_LOOKUP|NFS4_ACCESS_EXECUTE)) | 2539 | if (res.access & (NFS4_ACCESS_LOOKUP|NFS4_ACCESS_EXECUTE)) |
2540 | entry->mask |= MAY_EXEC; | 2540 | entry->mask |= MAY_EXEC; |
2541 | nfs_refresh_inode(inode, res.fattr); | 2541 | nfs_refresh_inode(inode, res.fattr); |
2542 | } | 2542 | } |
2543 | nfs_free_fattr(res.fattr); | 2543 | nfs_free_fattr(res.fattr); |
2544 | return status; | 2544 | return status; |
2545 | } | 2545 | } |
2546 | 2546 | ||
2547 | static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) | 2547 | static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) |
2548 | { | 2548 | { |
2549 | struct nfs4_exception exception = { }; | 2549 | struct nfs4_exception exception = { }; |
2550 | int err; | 2550 | int err; |
2551 | do { | 2551 | do { |
2552 | err = nfs4_handle_exception(NFS_SERVER(inode), | 2552 | err = nfs4_handle_exception(NFS_SERVER(inode), |
2553 | _nfs4_proc_access(inode, entry), | 2553 | _nfs4_proc_access(inode, entry), |
2554 | &exception); | 2554 | &exception); |
2555 | } while (exception.retry); | 2555 | } while (exception.retry); |
2556 | return err; | 2556 | return err; |
2557 | } | 2557 | } |
2558 | 2558 | ||
2559 | /* | 2559 | /* |
2560 | * TODO: For the time being, we don't try to get any attributes | 2560 | * TODO: For the time being, we don't try to get any attributes |
2561 | * along with any of the zero-copy operations READ, READDIR, | 2561 | * along with any of the zero-copy operations READ, READDIR, |
2562 | * READLINK, WRITE. | 2562 | * READLINK, WRITE. |
2563 | * | 2563 | * |
2564 | * In the case of the first three, we want to put the GETATTR | 2564 | * In the case of the first three, we want to put the GETATTR |
2565 | * after the read-type operation -- this is because it is hard | 2565 | * after the read-type operation -- this is because it is hard |
2566 | * to predict the length of a GETATTR response in v4, and thus | 2566 | * to predict the length of a GETATTR response in v4, and thus |
2567 | * align the READ data correctly. This means that the GETATTR | 2567 | * align the READ data correctly. This means that the GETATTR |
2568 | * may end up partially falling into the page cache, and we should | 2568 | * may end up partially falling into the page cache, and we should |
2569 | * shift it into the 'tail' of the xdr_buf before processing. | 2569 | * shift it into the 'tail' of the xdr_buf before processing. |
2570 | * To do this efficiently, we need to know the total length | 2570 | * To do this efficiently, we need to know the total length |
2571 | * of data received, which doesn't seem to be available outside | 2571 | * of data received, which doesn't seem to be available outside |
2572 | * of the RPC layer. | 2572 | * of the RPC layer. |
2573 | * | 2573 | * |
2574 | * In the case of WRITE, we also want to put the GETATTR after | 2574 | * In the case of WRITE, we also want to put the GETATTR after |
2575 | * the operation -- in this case because we want to make sure | 2575 | * the operation -- in this case because we want to make sure |
2576 | * we get the post-operation mtime and size. This means that | 2576 | * we get the post-operation mtime and size. This means that |
2577 | * we can't use xdr_encode_pages() as written: we need a variant | 2577 | * we can't use xdr_encode_pages() as written: we need a variant |
2578 | * of it which would leave room in the 'tail' iovec. | 2578 | * of it which would leave room in the 'tail' iovec. |
2579 | * | 2579 | * |
2580 | * Both of these changes to the XDR layer would in fact be quite | 2580 | * Both of these changes to the XDR layer would in fact be quite |
2581 | * minor, but I decided to leave them for a subsequent patch. | 2581 | * minor, but I decided to leave them for a subsequent patch. |
2582 | */ | 2582 | */ |
2583 | static int _nfs4_proc_readlink(struct inode *inode, struct page *page, | 2583 | static int _nfs4_proc_readlink(struct inode *inode, struct page *page, |
2584 | unsigned int pgbase, unsigned int pglen) | 2584 | unsigned int pgbase, unsigned int pglen) |
2585 | { | 2585 | { |
2586 | struct nfs4_readlink args = { | 2586 | struct nfs4_readlink args = { |
2587 | .fh = NFS_FH(inode), | 2587 | .fh = NFS_FH(inode), |
2588 | .pgbase = pgbase, | 2588 | .pgbase = pgbase, |
2589 | .pglen = pglen, | 2589 | .pglen = pglen, |
2590 | .pages = &page, | 2590 | .pages = &page, |
2591 | }; | 2591 | }; |
2592 | struct nfs4_readlink_res res; | 2592 | struct nfs4_readlink_res res; |
2593 | struct rpc_message msg = { | 2593 | struct rpc_message msg = { |
2594 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK], | 2594 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK], |
2595 | .rpc_argp = &args, | 2595 | .rpc_argp = &args, |
2596 | .rpc_resp = &res, | 2596 | .rpc_resp = &res, |
2597 | }; | 2597 | }; |
2598 | 2598 | ||
2599 | return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0); | 2599 | return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0); |
2600 | } | 2600 | } |
2601 | 2601 | ||
2602 | static int nfs4_proc_readlink(struct inode *inode, struct page *page, | 2602 | static int nfs4_proc_readlink(struct inode *inode, struct page *page, |
2603 | unsigned int pgbase, unsigned int pglen) | 2603 | unsigned int pgbase, unsigned int pglen) |
2604 | { | 2604 | { |
2605 | struct nfs4_exception exception = { }; | 2605 | struct nfs4_exception exception = { }; |
2606 | int err; | 2606 | int err; |
2607 | do { | 2607 | do { |
2608 | err = nfs4_handle_exception(NFS_SERVER(inode), | 2608 | err = nfs4_handle_exception(NFS_SERVER(inode), |
2609 | _nfs4_proc_readlink(inode, page, pgbase, pglen), | 2609 | _nfs4_proc_readlink(inode, page, pgbase, pglen), |
2610 | &exception); | 2610 | &exception); |
2611 | } while (exception.retry); | 2611 | } while (exception.retry); |
2612 | return err; | 2612 | return err; |
2613 | } | 2613 | } |
2614 | 2614 | ||
2615 | /* | 2615 | /* |
2616 | * Got race? | 2616 | * Got race? |
2617 | * We will need to arrange for the VFS layer to provide an atomic open. | 2617 | * We will need to arrange for the VFS layer to provide an atomic open. |
2618 | * Until then, this create/open method is prone to inefficiency and race | 2618 | * Until then, this create/open method is prone to inefficiency and race |
2619 | * conditions due to the lookup, create, and open VFS calls from sys_open() | 2619 | * conditions due to the lookup, create, and open VFS calls from sys_open() |
2620 | * placed on the wire. | 2620 | * placed on the wire. |
2621 | * | 2621 | * |
2622 | * Given the above sorry state of affairs, I'm simply sending an OPEN. | 2622 | * Given the above sorry state of affairs, I'm simply sending an OPEN. |
2623 | * The file will be opened again in the subsequent VFS open call | 2623 | * The file will be opened again in the subsequent VFS open call |
2624 | * (nfs4_proc_file_open). | 2624 | * (nfs4_proc_file_open). |
2625 | * | 2625 | * |
2626 | * The open for read will just hang around to be used by any process that | 2626 | * The open for read will just hang around to be used by any process that |
2627 | * opens the file O_RDONLY. This will all be resolved with the VFS changes. | 2627 | * opens the file O_RDONLY. This will all be resolved with the VFS changes. |
2628 | */ | 2628 | */ |
2629 | 2629 | ||
2630 | static int | 2630 | static int |
2631 | nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, | 2631 | nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, |
2632 | int flags, struct nfs_open_context *ctx) | 2632 | int flags, struct nfs_open_context *ctx) |
2633 | { | 2633 | { |
2634 | struct dentry *de = dentry; | 2634 | struct dentry *de = dentry; |
2635 | struct nfs4_state *state; | 2635 | struct nfs4_state *state; |
2636 | struct rpc_cred *cred = NULL; | 2636 | struct rpc_cred *cred = NULL; |
2637 | fmode_t fmode = 0; | 2637 | fmode_t fmode = 0; |
2638 | int status = 0; | 2638 | int status = 0; |
2639 | 2639 | ||
2640 | if (ctx != NULL) { | 2640 | if (ctx != NULL) { |
2641 | cred = ctx->cred; | 2641 | cred = ctx->cred; |
2642 | de = ctx->dentry; | 2642 | de = ctx->dentry; |
2643 | fmode = ctx->mode; | 2643 | fmode = ctx->mode; |
2644 | } | 2644 | } |
2645 | sattr->ia_mode &= ~current_umask(); | 2645 | sattr->ia_mode &= ~current_umask(); |
2646 | state = nfs4_do_open(dir, de, fmode, flags, sattr, cred); | 2646 | state = nfs4_do_open(dir, de, fmode, flags, sattr, cred); |
2647 | d_drop(dentry); | 2647 | d_drop(dentry); |
2648 | if (IS_ERR(state)) { | 2648 | if (IS_ERR(state)) { |
2649 | status = PTR_ERR(state); | 2649 | status = PTR_ERR(state); |
2650 | goto out; | 2650 | goto out; |
2651 | } | 2651 | } |
2652 | d_add(dentry, igrab(state->inode)); | 2652 | d_add(dentry, igrab(state->inode)); |
2653 | nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); | 2653 | nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); |
2654 | if (ctx != NULL) | 2654 | if (ctx != NULL) |
2655 | ctx->state = state; | 2655 | ctx->state = state; |
2656 | else | 2656 | else |
2657 | nfs4_close_sync(state, fmode); | 2657 | nfs4_close_sync(state, fmode); |
2658 | out: | 2658 | out: |
2659 | return status; | 2659 | return status; |
2660 | } | 2660 | } |
2661 | 2661 | ||
2662 | static int _nfs4_proc_remove(struct inode *dir, struct qstr *name) | 2662 | static int _nfs4_proc_remove(struct inode *dir, struct qstr *name) |
2663 | { | 2663 | { |
2664 | struct nfs_server *server = NFS_SERVER(dir); | 2664 | struct nfs_server *server = NFS_SERVER(dir); |
2665 | struct nfs_removeargs args = { | 2665 | struct nfs_removeargs args = { |
2666 | .fh = NFS_FH(dir), | 2666 | .fh = NFS_FH(dir), |
2667 | .name.len = name->len, | 2667 | .name.len = name->len, |
2668 | .name.name = name->name, | 2668 | .name.name = name->name, |
2669 | .bitmask = server->attr_bitmask, | 2669 | .bitmask = server->attr_bitmask, |
2670 | }; | 2670 | }; |
2671 | struct nfs_removeres res = { | 2671 | struct nfs_removeres res = { |
2672 | .server = server, | 2672 | .server = server, |
2673 | }; | 2673 | }; |
2674 | struct rpc_message msg = { | 2674 | struct rpc_message msg = { |
2675 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE], | 2675 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE], |
2676 | .rpc_argp = &args, | 2676 | .rpc_argp = &args, |
2677 | .rpc_resp = &res, | 2677 | .rpc_resp = &res, |
2678 | }; | 2678 | }; |
2679 | int status = -ENOMEM; | 2679 | int status = -ENOMEM; |
2680 | 2680 | ||
2681 | res.dir_attr = nfs_alloc_fattr(); | 2681 | res.dir_attr = nfs_alloc_fattr(); |
2682 | if (res.dir_attr == NULL) | 2682 | if (res.dir_attr == NULL) |
2683 | goto out; | 2683 | goto out; |
2684 | 2684 | ||
2685 | status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); | 2685 | status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); |
2686 | if (status == 0) { | 2686 | if (status == 0) { |
2687 | update_changeattr(dir, &res.cinfo); | 2687 | update_changeattr(dir, &res.cinfo); |
2688 | nfs_post_op_update_inode(dir, res.dir_attr); | 2688 | nfs_post_op_update_inode(dir, res.dir_attr); |
2689 | } | 2689 | } |
2690 | nfs_free_fattr(res.dir_attr); | 2690 | nfs_free_fattr(res.dir_attr); |
2691 | out: | 2691 | out: |
2692 | return status; | 2692 | return status; |
2693 | } | 2693 | } |
2694 | 2694 | ||
2695 | static int nfs4_proc_remove(struct inode *dir, struct qstr *name) | 2695 | static int nfs4_proc_remove(struct inode *dir, struct qstr *name) |
2696 | { | 2696 | { |
2697 | struct nfs4_exception exception = { }; | 2697 | struct nfs4_exception exception = { }; |
2698 | int err; | 2698 | int err; |
2699 | do { | 2699 | do { |
2700 | err = nfs4_handle_exception(NFS_SERVER(dir), | 2700 | err = nfs4_handle_exception(NFS_SERVER(dir), |
2701 | _nfs4_proc_remove(dir, name), | 2701 | _nfs4_proc_remove(dir, name), |
2702 | &exception); | 2702 | &exception); |
2703 | } while (exception.retry); | 2703 | } while (exception.retry); |
2704 | return err; | 2704 | return err; |
2705 | } | 2705 | } |
2706 | 2706 | ||
2707 | static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir) | 2707 | static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir) |
2708 | { | 2708 | { |
2709 | struct nfs_server *server = NFS_SERVER(dir); | 2709 | struct nfs_server *server = NFS_SERVER(dir); |
2710 | struct nfs_removeargs *args = msg->rpc_argp; | 2710 | struct nfs_removeargs *args = msg->rpc_argp; |
2711 | struct nfs_removeres *res = msg->rpc_resp; | 2711 | struct nfs_removeres *res = msg->rpc_resp; |
2712 | 2712 | ||
2713 | args->bitmask = server->cache_consistency_bitmask; | 2713 | args->bitmask = server->cache_consistency_bitmask; |
2714 | res->server = server; | 2714 | res->server = server; |
2715 | res->seq_res.sr_slot = NULL; | 2715 | res->seq_res.sr_slot = NULL; |
2716 | msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE]; | 2716 | msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE]; |
2717 | } | 2717 | } |
2718 | 2718 | ||
2719 | static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir) | 2719 | static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir) |
2720 | { | 2720 | { |
2721 | struct nfs_removeres *res = task->tk_msg.rpc_resp; | 2721 | struct nfs_removeres *res = task->tk_msg.rpc_resp; |
2722 | 2722 | ||
2723 | if (!nfs4_sequence_done(task, &res->seq_res)) | 2723 | if (!nfs4_sequence_done(task, &res->seq_res)) |
2724 | return 0; | 2724 | return 0; |
2725 | if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN) | 2725 | if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN) |
2726 | return 0; | 2726 | return 0; |
2727 | update_changeattr(dir, &res->cinfo); | 2727 | update_changeattr(dir, &res->cinfo); |
2728 | nfs_post_op_update_inode(dir, res->dir_attr); | 2728 | nfs_post_op_update_inode(dir, res->dir_attr); |
2729 | return 1; | 2729 | return 1; |
2730 | } | 2730 | } |
2731 | 2731 | ||
2732 | static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir) | 2732 | static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir) |
2733 | { | 2733 | { |
2734 | struct nfs_server *server = NFS_SERVER(dir); | 2734 | struct nfs_server *server = NFS_SERVER(dir); |
2735 | struct nfs_renameargs *arg = msg->rpc_argp; | 2735 | struct nfs_renameargs *arg = msg->rpc_argp; |
2736 | struct nfs_renameres *res = msg->rpc_resp; | 2736 | struct nfs_renameres *res = msg->rpc_resp; |
2737 | 2737 | ||
2738 | msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME]; | 2738 | msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME]; |
2739 | arg->bitmask = server->attr_bitmask; | 2739 | arg->bitmask = server->attr_bitmask; |
2740 | res->server = server; | 2740 | res->server = server; |
2741 | } | 2741 | } |
2742 | 2742 | ||
2743 | static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir, | 2743 | static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir, |
2744 | struct inode *new_dir) | 2744 | struct inode *new_dir) |
2745 | { | 2745 | { |
2746 | struct nfs_renameres *res = task->tk_msg.rpc_resp; | 2746 | struct nfs_renameres *res = task->tk_msg.rpc_resp; |
2747 | 2747 | ||
2748 | if (!nfs4_sequence_done(task, &res->seq_res)) | 2748 | if (!nfs4_sequence_done(task, &res->seq_res)) |
2749 | return 0; | 2749 | return 0; |
2750 | if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN) | 2750 | if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN) |
2751 | return 0; | 2751 | return 0; |
2752 | 2752 | ||
2753 | update_changeattr(old_dir, &res->old_cinfo); | 2753 | update_changeattr(old_dir, &res->old_cinfo); |
2754 | nfs_post_op_update_inode(old_dir, res->old_fattr); | 2754 | nfs_post_op_update_inode(old_dir, res->old_fattr); |
2755 | update_changeattr(new_dir, &res->new_cinfo); | 2755 | update_changeattr(new_dir, &res->new_cinfo); |
2756 | nfs_post_op_update_inode(new_dir, res->new_fattr); | 2756 | nfs_post_op_update_inode(new_dir, res->new_fattr); |
2757 | return 1; | 2757 | return 1; |
2758 | } | 2758 | } |
2759 | 2759 | ||
2760 | static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name, | 2760 | static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name, |
2761 | struct inode *new_dir, struct qstr *new_name) | 2761 | struct inode *new_dir, struct qstr *new_name) |
2762 | { | 2762 | { |
2763 | struct nfs_server *server = NFS_SERVER(old_dir); | 2763 | struct nfs_server *server = NFS_SERVER(old_dir); |
2764 | struct nfs_renameargs arg = { | 2764 | struct nfs_renameargs arg = { |
2765 | .old_dir = NFS_FH(old_dir), | 2765 | .old_dir = NFS_FH(old_dir), |
2766 | .new_dir = NFS_FH(new_dir), | 2766 | .new_dir = NFS_FH(new_dir), |
2767 | .old_name = old_name, | 2767 | .old_name = old_name, |
2768 | .new_name = new_name, | 2768 | .new_name = new_name, |
2769 | .bitmask = server->attr_bitmask, | 2769 | .bitmask = server->attr_bitmask, |
2770 | }; | 2770 | }; |
2771 | struct nfs_renameres res = { | 2771 | struct nfs_renameres res = { |
2772 | .server = server, | 2772 | .server = server, |
2773 | }; | 2773 | }; |
2774 | struct rpc_message msg = { | 2774 | struct rpc_message msg = { |
2775 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME], | 2775 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME], |
2776 | .rpc_argp = &arg, | 2776 | .rpc_argp = &arg, |
2777 | .rpc_resp = &res, | 2777 | .rpc_resp = &res, |
2778 | }; | 2778 | }; |
2779 | int status = -ENOMEM; | 2779 | int status = -ENOMEM; |
2780 | 2780 | ||
2781 | res.old_fattr = nfs_alloc_fattr(); | 2781 | res.old_fattr = nfs_alloc_fattr(); |
2782 | res.new_fattr = nfs_alloc_fattr(); | 2782 | res.new_fattr = nfs_alloc_fattr(); |
2783 | if (res.old_fattr == NULL || res.new_fattr == NULL) | 2783 | if (res.old_fattr == NULL || res.new_fattr == NULL) |
2784 | goto out; | 2784 | goto out; |
2785 | 2785 | ||
2786 | status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); | 2786 | status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); |
2787 | if (!status) { | 2787 | if (!status) { |
2788 | update_changeattr(old_dir, &res.old_cinfo); | 2788 | update_changeattr(old_dir, &res.old_cinfo); |
2789 | nfs_post_op_update_inode(old_dir, res.old_fattr); | 2789 | nfs_post_op_update_inode(old_dir, res.old_fattr); |
2790 | update_changeattr(new_dir, &res.new_cinfo); | 2790 | update_changeattr(new_dir, &res.new_cinfo); |
2791 | nfs_post_op_update_inode(new_dir, res.new_fattr); | 2791 | nfs_post_op_update_inode(new_dir, res.new_fattr); |
2792 | } | 2792 | } |
2793 | out: | 2793 | out: |
2794 | nfs_free_fattr(res.new_fattr); | 2794 | nfs_free_fattr(res.new_fattr); |
2795 | nfs_free_fattr(res.old_fattr); | 2795 | nfs_free_fattr(res.old_fattr); |
2796 | return status; | 2796 | return status; |
2797 | } | 2797 | } |
2798 | 2798 | ||
2799 | static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name, | 2799 | static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name, |
2800 | struct inode *new_dir, struct qstr *new_name) | 2800 | struct inode *new_dir, struct qstr *new_name) |
2801 | { | 2801 | { |
2802 | struct nfs4_exception exception = { }; | 2802 | struct nfs4_exception exception = { }; |
2803 | int err; | 2803 | int err; |
2804 | do { | 2804 | do { |
2805 | err = nfs4_handle_exception(NFS_SERVER(old_dir), | 2805 | err = nfs4_handle_exception(NFS_SERVER(old_dir), |
2806 | _nfs4_proc_rename(old_dir, old_name, | 2806 | _nfs4_proc_rename(old_dir, old_name, |
2807 | new_dir, new_name), | 2807 | new_dir, new_name), |
2808 | &exception); | 2808 | &exception); |
2809 | } while (exception.retry); | 2809 | } while (exception.retry); |
2810 | return err; | 2810 | return err; |
2811 | } | 2811 | } |
2812 | 2812 | ||
2813 | static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) | 2813 | static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) |
2814 | { | 2814 | { |
2815 | struct nfs_server *server = NFS_SERVER(inode); | 2815 | struct nfs_server *server = NFS_SERVER(inode); |
2816 | struct nfs4_link_arg arg = { | 2816 | struct nfs4_link_arg arg = { |
2817 | .fh = NFS_FH(inode), | 2817 | .fh = NFS_FH(inode), |
2818 | .dir_fh = NFS_FH(dir), | 2818 | .dir_fh = NFS_FH(dir), |
2819 | .name = name, | 2819 | .name = name, |
2820 | .bitmask = server->attr_bitmask, | 2820 | .bitmask = server->attr_bitmask, |
2821 | }; | 2821 | }; |
2822 | struct nfs4_link_res res = { | 2822 | struct nfs4_link_res res = { |
2823 | .server = server, | 2823 | .server = server, |
2824 | }; | 2824 | }; |
2825 | struct rpc_message msg = { | 2825 | struct rpc_message msg = { |
2826 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK], | 2826 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK], |
2827 | .rpc_argp = &arg, | 2827 | .rpc_argp = &arg, |
2828 | .rpc_resp = &res, | 2828 | .rpc_resp = &res, |
2829 | }; | 2829 | }; |
2830 | int status = -ENOMEM; | 2830 | int status = -ENOMEM; |
2831 | 2831 | ||
2832 | res.fattr = nfs_alloc_fattr(); | 2832 | res.fattr = nfs_alloc_fattr(); |
2833 | res.dir_attr = nfs_alloc_fattr(); | 2833 | res.dir_attr = nfs_alloc_fattr(); |
2834 | if (res.fattr == NULL || res.dir_attr == NULL) | 2834 | if (res.fattr == NULL || res.dir_attr == NULL) |
2835 | goto out; | 2835 | goto out; |
2836 | 2836 | ||
2837 | status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); | 2837 | status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); |
2838 | if (!status) { | 2838 | if (!status) { |
2839 | update_changeattr(dir, &res.cinfo); | 2839 | update_changeattr(dir, &res.cinfo); |
2840 | nfs_post_op_update_inode(dir, res.dir_attr); | 2840 | nfs_post_op_update_inode(dir, res.dir_attr); |
2841 | nfs_post_op_update_inode(inode, res.fattr); | 2841 | nfs_post_op_update_inode(inode, res.fattr); |
2842 | } | 2842 | } |
2843 | out: | 2843 | out: |
2844 | nfs_free_fattr(res.dir_attr); | 2844 | nfs_free_fattr(res.dir_attr); |
2845 | nfs_free_fattr(res.fattr); | 2845 | nfs_free_fattr(res.fattr); |
2846 | return status; | 2846 | return status; |
2847 | } | 2847 | } |
2848 | 2848 | ||
2849 | static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) | 2849 | static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) |
2850 | { | 2850 | { |
2851 | struct nfs4_exception exception = { }; | 2851 | struct nfs4_exception exception = { }; |
2852 | int err; | 2852 | int err; |
2853 | do { | 2853 | do { |
2854 | err = nfs4_handle_exception(NFS_SERVER(inode), | 2854 | err = nfs4_handle_exception(NFS_SERVER(inode), |
2855 | _nfs4_proc_link(inode, dir, name), | 2855 | _nfs4_proc_link(inode, dir, name), |
2856 | &exception); | 2856 | &exception); |
2857 | } while (exception.retry); | 2857 | } while (exception.retry); |
2858 | return err; | 2858 | return err; |
2859 | } | 2859 | } |
2860 | 2860 | ||
2861 | struct nfs4_createdata { | 2861 | struct nfs4_createdata { |
2862 | struct rpc_message msg; | 2862 | struct rpc_message msg; |
2863 | struct nfs4_create_arg arg; | 2863 | struct nfs4_create_arg arg; |
2864 | struct nfs4_create_res res; | 2864 | struct nfs4_create_res res; |
2865 | struct nfs_fh fh; | 2865 | struct nfs_fh fh; |
2866 | struct nfs_fattr fattr; | 2866 | struct nfs_fattr fattr; |
2867 | struct nfs_fattr dir_fattr; | 2867 | struct nfs_fattr dir_fattr; |
2868 | }; | 2868 | }; |
2869 | 2869 | ||
2870 | static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir, | 2870 | static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir, |
2871 | struct qstr *name, struct iattr *sattr, u32 ftype) | 2871 | struct qstr *name, struct iattr *sattr, u32 ftype) |
2872 | { | 2872 | { |
2873 | struct nfs4_createdata *data; | 2873 | struct nfs4_createdata *data; |
2874 | 2874 | ||
2875 | data = kzalloc(sizeof(*data), GFP_KERNEL); | 2875 | data = kzalloc(sizeof(*data), GFP_KERNEL); |
2876 | if (data != NULL) { | 2876 | if (data != NULL) { |
2877 | struct nfs_server *server = NFS_SERVER(dir); | 2877 | struct nfs_server *server = NFS_SERVER(dir); |
2878 | 2878 | ||
2879 | data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE]; | 2879 | data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE]; |
2880 | data->msg.rpc_argp = &data->arg; | 2880 | data->msg.rpc_argp = &data->arg; |
2881 | data->msg.rpc_resp = &data->res; | 2881 | data->msg.rpc_resp = &data->res; |
2882 | data->arg.dir_fh = NFS_FH(dir); | 2882 | data->arg.dir_fh = NFS_FH(dir); |
2883 | data->arg.server = server; | 2883 | data->arg.server = server; |
2884 | data->arg.name = name; | 2884 | data->arg.name = name; |
2885 | data->arg.attrs = sattr; | 2885 | data->arg.attrs = sattr; |
2886 | data->arg.ftype = ftype; | 2886 | data->arg.ftype = ftype; |
2887 | data->arg.bitmask = server->attr_bitmask; | 2887 | data->arg.bitmask = server->attr_bitmask; |
2888 | data->res.server = server; | 2888 | data->res.server = server; |
2889 | data->res.fh = &data->fh; | 2889 | data->res.fh = &data->fh; |
2890 | data->res.fattr = &data->fattr; | 2890 | data->res.fattr = &data->fattr; |
2891 | data->res.dir_fattr = &data->dir_fattr; | 2891 | data->res.dir_fattr = &data->dir_fattr; |
2892 | nfs_fattr_init(data->res.fattr); | 2892 | nfs_fattr_init(data->res.fattr); |
2893 | nfs_fattr_init(data->res.dir_fattr); | 2893 | nfs_fattr_init(data->res.dir_fattr); |
2894 | } | 2894 | } |
2895 | return data; | 2895 | return data; |
2896 | } | 2896 | } |
2897 | 2897 | ||
2898 | static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data) | 2898 | static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data) |
2899 | { | 2899 | { |
2900 | int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, | 2900 | int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, |
2901 | &data->arg.seq_args, &data->res.seq_res, 1); | 2901 | &data->arg.seq_args, &data->res.seq_res, 1); |
2902 | if (status == 0) { | 2902 | if (status == 0) { |
2903 | update_changeattr(dir, &data->res.dir_cinfo); | 2903 | update_changeattr(dir, &data->res.dir_cinfo); |
2904 | nfs_post_op_update_inode(dir, data->res.dir_fattr); | 2904 | nfs_post_op_update_inode(dir, data->res.dir_fattr); |
2905 | status = nfs_instantiate(dentry, data->res.fh, data->res.fattr); | 2905 | status = nfs_instantiate(dentry, data->res.fh, data->res.fattr); |
2906 | } | 2906 | } |
2907 | return status; | 2907 | return status; |
2908 | } | 2908 | } |
2909 | 2909 | ||
2910 | static void nfs4_free_createdata(struct nfs4_createdata *data) | 2910 | static void nfs4_free_createdata(struct nfs4_createdata *data) |
2911 | { | 2911 | { |
2912 | kfree(data); | 2912 | kfree(data); |
2913 | } | 2913 | } |
2914 | 2914 | ||
2915 | static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, | 2915 | static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, |
2916 | struct page *page, unsigned int len, struct iattr *sattr) | 2916 | struct page *page, unsigned int len, struct iattr *sattr) |
2917 | { | 2917 | { |
2918 | struct nfs4_createdata *data; | 2918 | struct nfs4_createdata *data; |
2919 | int status = -ENAMETOOLONG; | 2919 | int status = -ENAMETOOLONG; |
2920 | 2920 | ||
2921 | if (len > NFS4_MAXPATHLEN) | 2921 | if (len > NFS4_MAXPATHLEN) |
2922 | goto out; | 2922 | goto out; |
2923 | 2923 | ||
2924 | status = -ENOMEM; | 2924 | status = -ENOMEM; |
2925 | data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK); | 2925 | data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK); |
2926 | if (data == NULL) | 2926 | if (data == NULL) |
2927 | goto out; | 2927 | goto out; |
2928 | 2928 | ||
2929 | data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK]; | 2929 | data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK]; |
2930 | data->arg.u.symlink.pages = &page; | 2930 | data->arg.u.symlink.pages = &page; |
2931 | data->arg.u.symlink.len = len; | 2931 | data->arg.u.symlink.len = len; |
2932 | 2932 | ||
2933 | status = nfs4_do_create(dir, dentry, data); | 2933 | status = nfs4_do_create(dir, dentry, data); |
2934 | 2934 | ||
2935 | nfs4_free_createdata(data); | 2935 | nfs4_free_createdata(data); |
2936 | out: | 2936 | out: |
2937 | return status; | 2937 | return status; |
2938 | } | 2938 | } |
2939 | 2939 | ||
2940 | static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, | 2940 | static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, |
2941 | struct page *page, unsigned int len, struct iattr *sattr) | 2941 | struct page *page, unsigned int len, struct iattr *sattr) |
2942 | { | 2942 | { |
2943 | struct nfs4_exception exception = { }; | 2943 | struct nfs4_exception exception = { }; |
2944 | int err; | 2944 | int err; |
2945 | do { | 2945 | do { |
2946 | err = nfs4_handle_exception(NFS_SERVER(dir), | 2946 | err = nfs4_handle_exception(NFS_SERVER(dir), |
2947 | _nfs4_proc_symlink(dir, dentry, page, | 2947 | _nfs4_proc_symlink(dir, dentry, page, |
2948 | len, sattr), | 2948 | len, sattr), |
2949 | &exception); | 2949 | &exception); |
2950 | } while (exception.retry); | 2950 | } while (exception.retry); |
2951 | return err; | 2951 | return err; |
2952 | } | 2952 | } |
2953 | 2953 | ||
2954 | static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, | 2954 | static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, |
2955 | struct iattr *sattr) | 2955 | struct iattr *sattr) |
2956 | { | 2956 | { |
2957 | struct nfs4_createdata *data; | 2957 | struct nfs4_createdata *data; |
2958 | int status = -ENOMEM; | 2958 | int status = -ENOMEM; |
2959 | 2959 | ||
2960 | data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR); | 2960 | data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR); |
2961 | if (data == NULL) | 2961 | if (data == NULL) |
2962 | goto out; | 2962 | goto out; |
2963 | 2963 | ||
2964 | status = nfs4_do_create(dir, dentry, data); | 2964 | status = nfs4_do_create(dir, dentry, data); |
2965 | 2965 | ||
2966 | nfs4_free_createdata(data); | 2966 | nfs4_free_createdata(data); |
2967 | out: | 2967 | out: |
2968 | return status; | 2968 | return status; |
2969 | } | 2969 | } |
2970 | 2970 | ||
2971 | static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, | 2971 | static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, |
2972 | struct iattr *sattr) | 2972 | struct iattr *sattr) |
2973 | { | 2973 | { |
2974 | struct nfs4_exception exception = { }; | 2974 | struct nfs4_exception exception = { }; |
2975 | int err; | 2975 | int err; |
2976 | 2976 | ||
2977 | sattr->ia_mode &= ~current_umask(); | 2977 | sattr->ia_mode &= ~current_umask(); |
2978 | do { | 2978 | do { |
2979 | err = nfs4_handle_exception(NFS_SERVER(dir), | 2979 | err = nfs4_handle_exception(NFS_SERVER(dir), |
2980 | _nfs4_proc_mkdir(dir, dentry, sattr), | 2980 | _nfs4_proc_mkdir(dir, dentry, sattr), |
2981 | &exception); | 2981 | &exception); |
2982 | } while (exception.retry); | 2982 | } while (exception.retry); |
2983 | return err; | 2983 | return err; |
2984 | } | 2984 | } |
2985 | 2985 | ||
2986 | static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, | 2986 | static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, |
2987 | u64 cookie, struct page **pages, unsigned int count, int plus) | 2987 | u64 cookie, struct page **pages, unsigned int count, int plus) |
2988 | { | 2988 | { |
2989 | struct inode *dir = dentry->d_inode; | 2989 | struct inode *dir = dentry->d_inode; |
2990 | struct nfs4_readdir_arg args = { | 2990 | struct nfs4_readdir_arg args = { |
2991 | .fh = NFS_FH(dir), | 2991 | .fh = NFS_FH(dir), |
2992 | .pages = pages, | 2992 | .pages = pages, |
2993 | .pgbase = 0, | 2993 | .pgbase = 0, |
2994 | .count = count, | 2994 | .count = count, |
2995 | .bitmask = NFS_SERVER(dentry->d_inode)->attr_bitmask, | 2995 | .bitmask = NFS_SERVER(dentry->d_inode)->attr_bitmask, |
2996 | .plus = plus, | 2996 | .plus = plus, |
2997 | }; | 2997 | }; |
2998 | struct nfs4_readdir_res res; | 2998 | struct nfs4_readdir_res res; |
2999 | struct rpc_message msg = { | 2999 | struct rpc_message msg = { |
3000 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR], | 3000 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR], |
3001 | .rpc_argp = &args, | 3001 | .rpc_argp = &args, |
3002 | .rpc_resp = &res, | 3002 | .rpc_resp = &res, |
3003 | .rpc_cred = cred, | 3003 | .rpc_cred = cred, |
3004 | }; | 3004 | }; |
3005 | int status; | 3005 | int status; |
3006 | 3006 | ||
3007 | dprintk("%s: dentry = %s/%s, cookie = %Lu\n", __func__, | 3007 | dprintk("%s: dentry = %s/%s, cookie = %Lu\n", __func__, |
3008 | dentry->d_parent->d_name.name, | 3008 | dentry->d_parent->d_name.name, |
3009 | dentry->d_name.name, | 3009 | dentry->d_name.name, |
3010 | (unsigned long long)cookie); | 3010 | (unsigned long long)cookie); |
3011 | nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args); | 3011 | nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args); |
3012 | res.pgbase = args.pgbase; | 3012 | res.pgbase = args.pgbase; |
3013 | status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0); | 3013 | status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0); |
3014 | if (status >= 0) { | 3014 | if (status >= 0) { |
3015 | memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE); | 3015 | memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE); |
3016 | status += args.pgbase; | 3016 | status += args.pgbase; |
3017 | } | 3017 | } |
3018 | 3018 | ||
3019 | nfs_invalidate_atime(dir); | 3019 | nfs_invalidate_atime(dir); |
3020 | 3020 | ||
3021 | dprintk("%s: returns %d\n", __func__, status); | 3021 | dprintk("%s: returns %d\n", __func__, status); |
3022 | return status; | 3022 | return status; |
3023 | } | 3023 | } |
3024 | 3024 | ||
3025 | static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, | 3025 | static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, |
3026 | u64 cookie, struct page **pages, unsigned int count, int plus) | 3026 | u64 cookie, struct page **pages, unsigned int count, int plus) |
3027 | { | 3027 | { |
3028 | struct nfs4_exception exception = { }; | 3028 | struct nfs4_exception exception = { }; |
3029 | int err; | 3029 | int err; |
3030 | do { | 3030 | do { |
3031 | err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode), | 3031 | err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode), |
3032 | _nfs4_proc_readdir(dentry, cred, cookie, | 3032 | _nfs4_proc_readdir(dentry, cred, cookie, |
3033 | pages, count, plus), | 3033 | pages, count, plus), |
3034 | &exception); | 3034 | &exception); |
3035 | } while (exception.retry); | 3035 | } while (exception.retry); |
3036 | return err; | 3036 | return err; |
3037 | } | 3037 | } |
3038 | 3038 | ||
3039 | static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, | 3039 | static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, |
3040 | struct iattr *sattr, dev_t rdev) | 3040 | struct iattr *sattr, dev_t rdev) |
3041 | { | 3041 | { |
3042 | struct nfs4_createdata *data; | 3042 | struct nfs4_createdata *data; |
3043 | int mode = sattr->ia_mode; | 3043 | int mode = sattr->ia_mode; |
3044 | int status = -ENOMEM; | 3044 | int status = -ENOMEM; |
3045 | 3045 | ||
3046 | BUG_ON(!(sattr->ia_valid & ATTR_MODE)); | 3046 | BUG_ON(!(sattr->ia_valid & ATTR_MODE)); |
3047 | BUG_ON(!S_ISFIFO(mode) && !S_ISBLK(mode) && !S_ISCHR(mode) && !S_ISSOCK(mode)); | 3047 | BUG_ON(!S_ISFIFO(mode) && !S_ISBLK(mode) && !S_ISCHR(mode) && !S_ISSOCK(mode)); |
3048 | 3048 | ||
3049 | data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK); | 3049 | data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK); |
3050 | if (data == NULL) | 3050 | if (data == NULL) |
3051 | goto out; | 3051 | goto out; |
3052 | 3052 | ||
3053 | if (S_ISFIFO(mode)) | 3053 | if (S_ISFIFO(mode)) |
3054 | data->arg.ftype = NF4FIFO; | 3054 | data->arg.ftype = NF4FIFO; |
3055 | else if (S_ISBLK(mode)) { | 3055 | else if (S_ISBLK(mode)) { |
3056 | data->arg.ftype = NF4BLK; | 3056 | data->arg.ftype = NF4BLK; |
3057 | data->arg.u.device.specdata1 = MAJOR(rdev); | 3057 | data->arg.u.device.specdata1 = MAJOR(rdev); |
3058 | data->arg.u.device.specdata2 = MINOR(rdev); | 3058 | data->arg.u.device.specdata2 = MINOR(rdev); |
3059 | } | 3059 | } |
3060 | else if (S_ISCHR(mode)) { | 3060 | else if (S_ISCHR(mode)) { |
3061 | data->arg.ftype = NF4CHR; | 3061 | data->arg.ftype = NF4CHR; |
3062 | data->arg.u.device.specdata1 = MAJOR(rdev); | 3062 | data->arg.u.device.specdata1 = MAJOR(rdev); |
3063 | data->arg.u.device.specdata2 = MINOR(rdev); | 3063 | data->arg.u.device.specdata2 = MINOR(rdev); |
3064 | } | 3064 | } |
3065 | 3065 | ||
3066 | status = nfs4_do_create(dir, dentry, data); | 3066 | status = nfs4_do_create(dir, dentry, data); |
3067 | 3067 | ||
3068 | nfs4_free_createdata(data); | 3068 | nfs4_free_createdata(data); |
3069 | out: | 3069 | out: |
3070 | return status; | 3070 | return status; |
3071 | } | 3071 | } |
3072 | 3072 | ||
3073 | static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, | 3073 | static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, |
3074 | struct iattr *sattr, dev_t rdev) | 3074 | struct iattr *sattr, dev_t rdev) |
3075 | { | 3075 | { |
3076 | struct nfs4_exception exception = { }; | 3076 | struct nfs4_exception exception = { }; |
3077 | int err; | 3077 | int err; |
3078 | 3078 | ||
3079 | sattr->ia_mode &= ~current_umask(); | 3079 | sattr->ia_mode &= ~current_umask(); |
3080 | do { | 3080 | do { |
3081 | err = nfs4_handle_exception(NFS_SERVER(dir), | 3081 | err = nfs4_handle_exception(NFS_SERVER(dir), |
3082 | _nfs4_proc_mknod(dir, dentry, sattr, rdev), | 3082 | _nfs4_proc_mknod(dir, dentry, sattr, rdev), |
3083 | &exception); | 3083 | &exception); |
3084 | } while (exception.retry); | 3084 | } while (exception.retry); |
3085 | return err; | 3085 | return err; |
3086 | } | 3086 | } |
3087 | 3087 | ||
3088 | static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, | 3088 | static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, |
3089 | struct nfs_fsstat *fsstat) | 3089 | struct nfs_fsstat *fsstat) |
3090 | { | 3090 | { |
3091 | struct nfs4_statfs_arg args = { | 3091 | struct nfs4_statfs_arg args = { |
3092 | .fh = fhandle, | 3092 | .fh = fhandle, |
3093 | .bitmask = server->attr_bitmask, | 3093 | .bitmask = server->attr_bitmask, |
3094 | }; | 3094 | }; |
3095 | struct nfs4_statfs_res res = { | 3095 | struct nfs4_statfs_res res = { |
3096 | .fsstat = fsstat, | 3096 | .fsstat = fsstat, |
3097 | }; | 3097 | }; |
3098 | struct rpc_message msg = { | 3098 | struct rpc_message msg = { |
3099 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS], | 3099 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS], |
3100 | .rpc_argp = &args, | 3100 | .rpc_argp = &args, |
3101 | .rpc_resp = &res, | 3101 | .rpc_resp = &res, |
3102 | }; | 3102 | }; |
3103 | 3103 | ||
3104 | nfs_fattr_init(fsstat->fattr); | 3104 | nfs_fattr_init(fsstat->fattr); |
3105 | return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); | 3105 | return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); |
3106 | } | 3106 | } |
3107 | 3107 | ||
3108 | static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat) | 3108 | static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat) |
3109 | { | 3109 | { |
3110 | struct nfs4_exception exception = { }; | 3110 | struct nfs4_exception exception = { }; |
3111 | int err; | 3111 | int err; |
3112 | do { | 3112 | do { |
3113 | err = nfs4_handle_exception(server, | 3113 | err = nfs4_handle_exception(server, |
3114 | _nfs4_proc_statfs(server, fhandle, fsstat), | 3114 | _nfs4_proc_statfs(server, fhandle, fsstat), |
3115 | &exception); | 3115 | &exception); |
3116 | } while (exception.retry); | 3116 | } while (exception.retry); |
3117 | return err; | 3117 | return err; |
3118 | } | 3118 | } |
3119 | 3119 | ||
3120 | static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, | 3120 | static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, |
3121 | struct nfs_fsinfo *fsinfo) | 3121 | struct nfs_fsinfo *fsinfo) |
3122 | { | 3122 | { |
3123 | struct nfs4_fsinfo_arg args = { | 3123 | struct nfs4_fsinfo_arg args = { |
3124 | .fh = fhandle, | 3124 | .fh = fhandle, |
3125 | .bitmask = server->attr_bitmask, | 3125 | .bitmask = server->attr_bitmask, |
3126 | }; | 3126 | }; |
3127 | struct nfs4_fsinfo_res res = { | 3127 | struct nfs4_fsinfo_res res = { |
3128 | .fsinfo = fsinfo, | 3128 | .fsinfo = fsinfo, |
3129 | }; | 3129 | }; |
3130 | struct rpc_message msg = { | 3130 | struct rpc_message msg = { |
3131 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO], | 3131 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO], |
3132 | .rpc_argp = &args, | 3132 | .rpc_argp = &args, |
3133 | .rpc_resp = &res, | 3133 | .rpc_resp = &res, |
3134 | }; | 3134 | }; |
3135 | 3135 | ||
3136 | return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); | 3136 | return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); |
3137 | } | 3137 | } |
3138 | 3138 | ||
3139 | static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) | 3139 | static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) |
3140 | { | 3140 | { |
3141 | struct nfs4_exception exception = { }; | 3141 | struct nfs4_exception exception = { }; |
3142 | int err; | 3142 | int err; |
3143 | 3143 | ||
3144 | do { | 3144 | do { |
3145 | err = nfs4_handle_exception(server, | 3145 | err = nfs4_handle_exception(server, |
3146 | _nfs4_do_fsinfo(server, fhandle, fsinfo), | 3146 | _nfs4_do_fsinfo(server, fhandle, fsinfo), |
3147 | &exception); | 3147 | &exception); |
3148 | } while (exception.retry); | 3148 | } while (exception.retry); |
3149 | return err; | 3149 | return err; |
3150 | } | 3150 | } |
3151 | 3151 | ||
3152 | static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) | 3152 | static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) |
3153 | { | 3153 | { |
3154 | nfs_fattr_init(fsinfo->fattr); | 3154 | nfs_fattr_init(fsinfo->fattr); |
3155 | return nfs4_do_fsinfo(server, fhandle, fsinfo); | 3155 | return nfs4_do_fsinfo(server, fhandle, fsinfo); |
3156 | } | 3156 | } |
3157 | 3157 | ||
3158 | static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, | 3158 | static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, |
3159 | struct nfs_pathconf *pathconf) | 3159 | struct nfs_pathconf *pathconf) |
3160 | { | 3160 | { |
3161 | struct nfs4_pathconf_arg args = { | 3161 | struct nfs4_pathconf_arg args = { |
3162 | .fh = fhandle, | 3162 | .fh = fhandle, |
3163 | .bitmask = server->attr_bitmask, | 3163 | .bitmask = server->attr_bitmask, |
3164 | }; | 3164 | }; |
3165 | struct nfs4_pathconf_res res = { | 3165 | struct nfs4_pathconf_res res = { |
3166 | .pathconf = pathconf, | 3166 | .pathconf = pathconf, |
3167 | }; | 3167 | }; |
3168 | struct rpc_message msg = { | 3168 | struct rpc_message msg = { |
3169 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF], | 3169 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF], |
3170 | .rpc_argp = &args, | 3170 | .rpc_argp = &args, |
3171 | .rpc_resp = &res, | 3171 | .rpc_resp = &res, |
3172 | }; | 3172 | }; |
3173 | 3173 | ||
3174 | /* None of the pathconf attributes are mandatory to implement */ | 3174 | /* None of the pathconf attributes are mandatory to implement */ |
3175 | if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) { | 3175 | if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) { |
3176 | memset(pathconf, 0, sizeof(*pathconf)); | 3176 | memset(pathconf, 0, sizeof(*pathconf)); |
3177 | return 0; | 3177 | return 0; |
3178 | } | 3178 | } |
3179 | 3179 | ||
3180 | nfs_fattr_init(pathconf->fattr); | 3180 | nfs_fattr_init(pathconf->fattr); |
3181 | return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); | 3181 | return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); |
3182 | } | 3182 | } |
3183 | 3183 | ||
3184 | static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, | 3184 | static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, |
3185 | struct nfs_pathconf *pathconf) | 3185 | struct nfs_pathconf *pathconf) |
3186 | { | 3186 | { |
3187 | struct nfs4_exception exception = { }; | 3187 | struct nfs4_exception exception = { }; |
3188 | int err; | 3188 | int err; |
3189 | 3189 | ||
3190 | do { | 3190 | do { |
3191 | err = nfs4_handle_exception(server, | 3191 | err = nfs4_handle_exception(server, |
3192 | _nfs4_proc_pathconf(server, fhandle, pathconf), | 3192 | _nfs4_proc_pathconf(server, fhandle, pathconf), |
3193 | &exception); | 3193 | &exception); |
3194 | } while (exception.retry); | 3194 | } while (exception.retry); |
3195 | return err; | 3195 | return err; |
3196 | } | 3196 | } |
3197 | 3197 | ||
3198 | void __nfs4_read_done_cb(struct nfs_read_data *data) | 3198 | void __nfs4_read_done_cb(struct nfs_read_data *data) |
3199 | { | 3199 | { |
3200 | nfs_invalidate_atime(data->inode); | 3200 | nfs_invalidate_atime(data->inode); |
3201 | } | 3201 | } |
3202 | 3202 | ||
3203 | static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_read_data *data) | 3203 | static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_read_data *data) |
3204 | { | 3204 | { |
3205 | struct nfs_server *server = NFS_SERVER(data->inode); | 3205 | struct nfs_server *server = NFS_SERVER(data->inode); |
3206 | 3206 | ||
3207 | if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) { | 3207 | if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) { |
3208 | rpc_restart_call_prepare(task); | 3208 | rpc_restart_call_prepare(task); |
3209 | return -EAGAIN; | 3209 | return -EAGAIN; |
3210 | } | 3210 | } |
3211 | 3211 | ||
3212 | __nfs4_read_done_cb(data); | 3212 | __nfs4_read_done_cb(data); |
3213 | if (task->tk_status > 0) | 3213 | if (task->tk_status > 0) |
3214 | renew_lease(server, data->timestamp); | 3214 | renew_lease(server, data->timestamp); |
3215 | return 0; | 3215 | return 0; |
3216 | } | 3216 | } |
3217 | 3217 | ||
3218 | static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data) | 3218 | static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data) |
3219 | { | 3219 | { |
3220 | 3220 | ||
3221 | dprintk("--> %s\n", __func__); | 3221 | dprintk("--> %s\n", __func__); |
3222 | 3222 | ||
3223 | if (!nfs4_sequence_done(task, &data->res.seq_res)) | 3223 | if (!nfs4_sequence_done(task, &data->res.seq_res)) |
3224 | return -EAGAIN; | 3224 | return -EAGAIN; |
3225 | 3225 | ||
3226 | return data->read_done_cb ? data->read_done_cb(task, data) : | 3226 | return data->read_done_cb ? data->read_done_cb(task, data) : |
3227 | nfs4_read_done_cb(task, data); | 3227 | nfs4_read_done_cb(task, data); |
3228 | } | 3228 | } |
3229 | 3229 | ||
3230 | static void nfs4_proc_read_setup(struct nfs_read_data *data, struct rpc_message *msg) | 3230 | static void nfs4_proc_read_setup(struct nfs_read_data *data, struct rpc_message *msg) |
3231 | { | 3231 | { |
3232 | data->timestamp = jiffies; | 3232 | data->timestamp = jiffies; |
3233 | data->read_done_cb = nfs4_read_done_cb; | 3233 | data->read_done_cb = nfs4_read_done_cb; |
3234 | msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; | 3234 | msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; |
3235 | } | 3235 | } |
3236 | 3236 | ||
3237 | /* Reset the the nfs_read_data to send the read to the MDS. */ | 3237 | /* Reset the the nfs_read_data to send the read to the MDS. */ |
3238 | void nfs4_reset_read(struct rpc_task *task, struct nfs_read_data *data) | 3238 | void nfs4_reset_read(struct rpc_task *task, struct nfs_read_data *data) |
3239 | { | 3239 | { |
3240 | dprintk("%s Reset task for i/o through\n", __func__); | 3240 | dprintk("%s Reset task for i/o through\n", __func__); |
3241 | put_lseg(data->lseg); | 3241 | put_lseg(data->lseg); |
3242 | data->lseg = NULL; | 3242 | data->lseg = NULL; |
3243 | /* offsets will differ in the dense stripe case */ | 3243 | /* offsets will differ in the dense stripe case */ |
3244 | data->args.offset = data->mds_offset; | 3244 | data->args.offset = data->mds_offset; |
3245 | data->ds_clp = NULL; | 3245 | data->ds_clp = NULL; |
3246 | data->args.fh = NFS_FH(data->inode); | 3246 | data->args.fh = NFS_FH(data->inode); |
3247 | data->read_done_cb = nfs4_read_done_cb; | 3247 | data->read_done_cb = nfs4_read_done_cb; |
3248 | task->tk_ops = data->mds_ops; | 3248 | task->tk_ops = data->mds_ops; |
3249 | rpc_task_reset_client(task, NFS_CLIENT(data->inode)); | 3249 | rpc_task_reset_client(task, NFS_CLIENT(data->inode)); |
3250 | } | 3250 | } |
3251 | EXPORT_SYMBOL_GPL(nfs4_reset_read); | 3251 | EXPORT_SYMBOL_GPL(nfs4_reset_read); |
3252 | 3252 | ||
3253 | static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data) | 3253 | static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data) |
3254 | { | 3254 | { |
3255 | struct inode *inode = data->inode; | 3255 | struct inode *inode = data->inode; |
3256 | 3256 | ||
3257 | if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) { | 3257 | if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) { |
3258 | rpc_restart_call_prepare(task); | 3258 | rpc_restart_call_prepare(task); |
3259 | return -EAGAIN; | 3259 | return -EAGAIN; |
3260 | } | 3260 | } |
3261 | if (task->tk_status >= 0) { | 3261 | if (task->tk_status >= 0) { |
3262 | renew_lease(NFS_SERVER(inode), data->timestamp); | 3262 | renew_lease(NFS_SERVER(inode), data->timestamp); |
3263 | nfs_post_op_update_inode_force_wcc(inode, data->res.fattr); | 3263 | nfs_post_op_update_inode_force_wcc(inode, data->res.fattr); |
3264 | } | 3264 | } |
3265 | return 0; | 3265 | return 0; |
3266 | } | 3266 | } |
3267 | 3267 | ||
3268 | static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data) | 3268 | static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data) |
3269 | { | 3269 | { |
3270 | if (!nfs4_sequence_done(task, &data->res.seq_res)) | 3270 | if (!nfs4_sequence_done(task, &data->res.seq_res)) |
3271 | return -EAGAIN; | 3271 | return -EAGAIN; |
3272 | return data->write_done_cb ? data->write_done_cb(task, data) : | 3272 | return data->write_done_cb ? data->write_done_cb(task, data) : |
3273 | nfs4_write_done_cb(task, data); | 3273 | nfs4_write_done_cb(task, data); |
3274 | } | 3274 | } |
3275 | 3275 | ||
3276 | /* Reset the the nfs_write_data to send the write to the MDS. */ | 3276 | /* Reset the the nfs_write_data to send the write to the MDS. */ |
3277 | void nfs4_reset_write(struct rpc_task *task, struct nfs_write_data *data) | 3277 | void nfs4_reset_write(struct rpc_task *task, struct nfs_write_data *data) |
3278 | { | 3278 | { |
3279 | dprintk("%s Reset task for i/o through\n", __func__); | 3279 | dprintk("%s Reset task for i/o through\n", __func__); |
3280 | put_lseg(data->lseg); | 3280 | put_lseg(data->lseg); |
3281 | data->lseg = NULL; | 3281 | data->lseg = NULL; |
3282 | data->ds_clp = NULL; | 3282 | data->ds_clp = NULL; |
3283 | data->write_done_cb = nfs4_write_done_cb; | 3283 | data->write_done_cb = nfs4_write_done_cb; |
3284 | data->args.fh = NFS_FH(data->inode); | 3284 | data->args.fh = NFS_FH(data->inode); |
3285 | data->args.bitmask = data->res.server->cache_consistency_bitmask; | 3285 | data->args.bitmask = data->res.server->cache_consistency_bitmask; |
3286 | data->args.offset = data->mds_offset; | 3286 | data->args.offset = data->mds_offset; |
3287 | data->res.fattr = &data->fattr; | 3287 | data->res.fattr = &data->fattr; |
3288 | task->tk_ops = data->mds_ops; | 3288 | task->tk_ops = data->mds_ops; |
3289 | rpc_task_reset_client(task, NFS_CLIENT(data->inode)); | 3289 | rpc_task_reset_client(task, NFS_CLIENT(data->inode)); |
3290 | } | 3290 | } |
3291 | EXPORT_SYMBOL_GPL(nfs4_reset_write); | 3291 | EXPORT_SYMBOL_GPL(nfs4_reset_write); |
3292 | 3292 | ||
3293 | static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg) | 3293 | static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg) |
3294 | { | 3294 | { |
3295 | struct nfs_server *server = NFS_SERVER(data->inode); | 3295 | struct nfs_server *server = NFS_SERVER(data->inode); |
3296 | 3296 | ||
3297 | if (data->lseg) { | 3297 | if (data->lseg) { |
3298 | data->args.bitmask = NULL; | 3298 | data->args.bitmask = NULL; |
3299 | data->res.fattr = NULL; | 3299 | data->res.fattr = NULL; |
3300 | } else | 3300 | } else |
3301 | data->args.bitmask = server->cache_consistency_bitmask; | 3301 | data->args.bitmask = server->cache_consistency_bitmask; |
3302 | if (!data->write_done_cb) | 3302 | if (!data->write_done_cb) |
3303 | data->write_done_cb = nfs4_write_done_cb; | 3303 | data->write_done_cb = nfs4_write_done_cb; |
3304 | data->res.server = server; | 3304 | data->res.server = server; |
3305 | data->timestamp = jiffies; | 3305 | data->timestamp = jiffies; |
3306 | 3306 | ||
3307 | msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE]; | 3307 | msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE]; |
3308 | } | 3308 | } |
3309 | 3309 | ||
3310 | static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_write_data *data) | 3310 | static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_write_data *data) |
3311 | { | 3311 | { |
3312 | struct inode *inode = data->inode; | 3312 | struct inode *inode = data->inode; |
3313 | 3313 | ||
3314 | if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) { | 3314 | if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) { |
3315 | rpc_restart_call_prepare(task); | 3315 | rpc_restart_call_prepare(task); |
3316 | return -EAGAIN; | 3316 | return -EAGAIN; |
3317 | } | 3317 | } |
3318 | nfs_refresh_inode(inode, data->res.fattr); | 3318 | nfs_refresh_inode(inode, data->res.fattr); |
3319 | return 0; | 3319 | return 0; |
3320 | } | 3320 | } |
3321 | 3321 | ||
3322 | static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data) | 3322 | static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data) |
3323 | { | 3323 | { |
3324 | if (!nfs4_sequence_done(task, &data->res.seq_res)) | 3324 | if (!nfs4_sequence_done(task, &data->res.seq_res)) |
3325 | return -EAGAIN; | 3325 | return -EAGAIN; |
3326 | return data->write_done_cb(task, data); | 3326 | return data->write_done_cb(task, data); |
3327 | } | 3327 | } |
3328 | 3328 | ||
3329 | static void nfs4_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg) | 3329 | static void nfs4_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg) |
3330 | { | 3330 | { |
3331 | struct nfs_server *server = NFS_SERVER(data->inode); | 3331 | struct nfs_server *server = NFS_SERVER(data->inode); |
3332 | 3332 | ||
3333 | if (data->lseg) { | 3333 | if (data->lseg) { |
3334 | data->args.bitmask = NULL; | 3334 | data->args.bitmask = NULL; |
3335 | data->res.fattr = NULL; | 3335 | data->res.fattr = NULL; |
3336 | } else | 3336 | } else |
3337 | data->args.bitmask = server->cache_consistency_bitmask; | 3337 | data->args.bitmask = server->cache_consistency_bitmask; |
3338 | if (!data->write_done_cb) | 3338 | if (!data->write_done_cb) |
3339 | data->write_done_cb = nfs4_commit_done_cb; | 3339 | data->write_done_cb = nfs4_commit_done_cb; |
3340 | data->res.server = server; | 3340 | data->res.server = server; |
3341 | msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT]; | 3341 | msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT]; |
3342 | } | 3342 | } |
3343 | 3343 | ||
3344 | struct nfs4_renewdata { | 3344 | struct nfs4_renewdata { |
3345 | struct nfs_client *client; | 3345 | struct nfs_client *client; |
3346 | unsigned long timestamp; | 3346 | unsigned long timestamp; |
3347 | }; | 3347 | }; |
3348 | 3348 | ||
3349 | /* | 3349 | /* |
3350 | * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special | 3350 | * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special |
3351 | * standalone procedure for queueing an asynchronous RENEW. | 3351 | * standalone procedure for queueing an asynchronous RENEW. |
3352 | */ | 3352 | */ |
3353 | static void nfs4_renew_release(void *calldata) | 3353 | static void nfs4_renew_release(void *calldata) |
3354 | { | 3354 | { |
3355 | struct nfs4_renewdata *data = calldata; | 3355 | struct nfs4_renewdata *data = calldata; |
3356 | struct nfs_client *clp = data->client; | 3356 | struct nfs_client *clp = data->client; |
3357 | 3357 | ||
3358 | if (atomic_read(&clp->cl_count) > 1) | 3358 | if (atomic_read(&clp->cl_count) > 1) |
3359 | nfs4_schedule_state_renewal(clp); | 3359 | nfs4_schedule_state_renewal(clp); |
3360 | nfs_put_client(clp); | 3360 | nfs_put_client(clp); |
3361 | kfree(data); | 3361 | kfree(data); |
3362 | } | 3362 | } |
3363 | 3363 | ||
3364 | static void nfs4_renew_done(struct rpc_task *task, void *calldata) | 3364 | static void nfs4_renew_done(struct rpc_task *task, void *calldata) |
3365 | { | 3365 | { |
3366 | struct nfs4_renewdata *data = calldata; | 3366 | struct nfs4_renewdata *data = calldata; |
3367 | struct nfs_client *clp = data->client; | 3367 | struct nfs_client *clp = data->client; |
3368 | unsigned long timestamp = data->timestamp; | 3368 | unsigned long timestamp = data->timestamp; |
3369 | 3369 | ||
3370 | if (task->tk_status < 0) { | 3370 | if (task->tk_status < 0) { |
3371 | /* Unless we're shutting down, schedule state recovery! */ | 3371 | /* Unless we're shutting down, schedule state recovery! */ |
3372 | if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0) | 3372 | if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0) |
3373 | return; | 3373 | return; |
3374 | if (task->tk_status != NFS4ERR_CB_PATH_DOWN) { | 3374 | if (task->tk_status != NFS4ERR_CB_PATH_DOWN) { |
3375 | nfs4_schedule_lease_recovery(clp); | 3375 | nfs4_schedule_lease_recovery(clp); |
3376 | return; | 3376 | return; |
3377 | } | 3377 | } |
3378 | nfs4_schedule_path_down_recovery(clp); | 3378 | nfs4_schedule_path_down_recovery(clp); |
3379 | } | 3379 | } |
3380 | do_renew_lease(clp, timestamp); | 3380 | do_renew_lease(clp, timestamp); |
3381 | } | 3381 | } |
3382 | 3382 | ||
3383 | static const struct rpc_call_ops nfs4_renew_ops = { | 3383 | static const struct rpc_call_ops nfs4_renew_ops = { |
3384 | .rpc_call_done = nfs4_renew_done, | 3384 | .rpc_call_done = nfs4_renew_done, |
3385 | .rpc_release = nfs4_renew_release, | 3385 | .rpc_release = nfs4_renew_release, |
3386 | }; | 3386 | }; |
3387 | 3387 | ||
3388 | static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) | 3388 | static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) |
3389 | { | 3389 | { |
3390 | struct rpc_message msg = { | 3390 | struct rpc_message msg = { |
3391 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], | 3391 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], |
3392 | .rpc_argp = clp, | 3392 | .rpc_argp = clp, |
3393 | .rpc_cred = cred, | 3393 | .rpc_cred = cred, |
3394 | }; | 3394 | }; |
3395 | struct nfs4_renewdata *data; | 3395 | struct nfs4_renewdata *data; |
3396 | 3396 | ||
3397 | if (renew_flags == 0) | 3397 | if (renew_flags == 0) |
3398 | return 0; | 3398 | return 0; |
3399 | if (!atomic_inc_not_zero(&clp->cl_count)) | 3399 | if (!atomic_inc_not_zero(&clp->cl_count)) |
3400 | return -EIO; | 3400 | return -EIO; |
3401 | data = kmalloc(sizeof(*data), GFP_NOFS); | 3401 | data = kmalloc(sizeof(*data), GFP_NOFS); |
3402 | if (data == NULL) | 3402 | if (data == NULL) |
3403 | return -ENOMEM; | 3403 | return -ENOMEM; |
3404 | data->client = clp; | 3404 | data->client = clp; |
3405 | data->timestamp = jiffies; | 3405 | data->timestamp = jiffies; |
3406 | return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT, | 3406 | return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT, |
3407 | &nfs4_renew_ops, data); | 3407 | &nfs4_renew_ops, data); |
3408 | } | 3408 | } |
3409 | 3409 | ||
3410 | static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred) | 3410 | static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred) |
3411 | { | 3411 | { |
3412 | struct rpc_message msg = { | 3412 | struct rpc_message msg = { |
3413 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], | 3413 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], |
3414 | .rpc_argp = clp, | 3414 | .rpc_argp = clp, |
3415 | .rpc_cred = cred, | 3415 | .rpc_cred = cred, |
3416 | }; | 3416 | }; |
3417 | unsigned long now = jiffies; | 3417 | unsigned long now = jiffies; |
3418 | int status; | 3418 | int status; |
3419 | 3419 | ||
3420 | status = rpc_call_sync(clp->cl_rpcclient, &msg, 0); | 3420 | status = rpc_call_sync(clp->cl_rpcclient, &msg, 0); |
3421 | if (status < 0) | 3421 | if (status < 0) |
3422 | return status; | 3422 | return status; |
3423 | do_renew_lease(clp, now); | 3423 | do_renew_lease(clp, now); |
3424 | return 0; | 3424 | return 0; |
3425 | } | 3425 | } |
3426 | 3426 | ||
3427 | static inline int nfs4_server_supports_acls(struct nfs_server *server) | 3427 | static inline int nfs4_server_supports_acls(struct nfs_server *server) |
3428 | { | 3428 | { |
3429 | return (server->caps & NFS_CAP_ACLS) | 3429 | return (server->caps & NFS_CAP_ACLS) |
3430 | && (server->acl_bitmask & ACL4_SUPPORT_ALLOW_ACL) | 3430 | && (server->acl_bitmask & ACL4_SUPPORT_ALLOW_ACL) |
3431 | && (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL); | 3431 | && (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL); |
3432 | } | 3432 | } |
3433 | 3433 | ||
3434 | /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_CACHE_SIZE, and that | 3434 | /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_CACHE_SIZE, and that |
3435 | * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_CACHE_SIZE) bytes on | 3435 | * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_CACHE_SIZE) bytes on |
3436 | * the stack. | 3436 | * the stack. |
3437 | */ | 3437 | */ |
3438 | #define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT) | 3438 | #define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT) |
3439 | 3439 | ||
3440 | static int buf_to_pages_noslab(const void *buf, size_t buflen, | 3440 | static int buf_to_pages_noslab(const void *buf, size_t buflen, |
3441 | struct page **pages, unsigned int *pgbase) | 3441 | struct page **pages, unsigned int *pgbase) |
3442 | { | 3442 | { |
3443 | struct page *newpage, **spages; | 3443 | struct page *newpage, **spages; |
3444 | int rc = 0; | 3444 | int rc = 0; |
3445 | size_t len; | 3445 | size_t len; |
3446 | spages = pages; | 3446 | spages = pages; |
3447 | 3447 | ||
3448 | do { | 3448 | do { |
3449 | len = min_t(size_t, PAGE_CACHE_SIZE, buflen); | 3449 | len = min_t(size_t, PAGE_CACHE_SIZE, buflen); |
3450 | newpage = alloc_page(GFP_KERNEL); | 3450 | newpage = alloc_page(GFP_KERNEL); |
3451 | 3451 | ||
3452 | if (newpage == NULL) | 3452 | if (newpage == NULL) |
3453 | goto unwind; | 3453 | goto unwind; |
3454 | memcpy(page_address(newpage), buf, len); | 3454 | memcpy(page_address(newpage), buf, len); |
3455 | buf += len; | 3455 | buf += len; |
3456 | buflen -= len; | 3456 | buflen -= len; |
3457 | *pages++ = newpage; | 3457 | *pages++ = newpage; |
3458 | rc++; | 3458 | rc++; |
3459 | } while (buflen != 0); | 3459 | } while (buflen != 0); |
3460 | 3460 | ||
3461 | return rc; | 3461 | return rc; |
3462 | 3462 | ||
3463 | unwind: | 3463 | unwind: |
3464 | for(; rc > 0; rc--) | 3464 | for(; rc > 0; rc--) |
3465 | __free_page(spages[rc-1]); | 3465 | __free_page(spages[rc-1]); |
3466 | return -ENOMEM; | 3466 | return -ENOMEM; |
3467 | } | 3467 | } |
3468 | 3468 | ||
3469 | struct nfs4_cached_acl { | 3469 | struct nfs4_cached_acl { |
3470 | int cached; | 3470 | int cached; |
3471 | size_t len; | 3471 | size_t len; |
3472 | char data[0]; | 3472 | char data[0]; |
3473 | }; | 3473 | }; |
3474 | 3474 | ||
3475 | static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl) | 3475 | static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl) |
3476 | { | 3476 | { |
3477 | struct nfs_inode *nfsi = NFS_I(inode); | 3477 | struct nfs_inode *nfsi = NFS_I(inode); |
3478 | 3478 | ||
3479 | spin_lock(&inode->i_lock); | 3479 | spin_lock(&inode->i_lock); |
3480 | kfree(nfsi->nfs4_acl); | 3480 | kfree(nfsi->nfs4_acl); |
3481 | nfsi->nfs4_acl = acl; | 3481 | nfsi->nfs4_acl = acl; |
3482 | spin_unlock(&inode->i_lock); | 3482 | spin_unlock(&inode->i_lock); |
3483 | } | 3483 | } |
3484 | 3484 | ||
3485 | static void nfs4_zap_acl_attr(struct inode *inode) | 3485 | static void nfs4_zap_acl_attr(struct inode *inode) |
3486 | { | 3486 | { |
3487 | nfs4_set_cached_acl(inode, NULL); | 3487 | nfs4_set_cached_acl(inode, NULL); |
3488 | } | 3488 | } |
3489 | 3489 | ||
3490 | static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen) | 3490 | static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen) |
3491 | { | 3491 | { |
3492 | struct nfs_inode *nfsi = NFS_I(inode); | 3492 | struct nfs_inode *nfsi = NFS_I(inode); |
3493 | struct nfs4_cached_acl *acl; | 3493 | struct nfs4_cached_acl *acl; |
3494 | int ret = -ENOENT; | 3494 | int ret = -ENOENT; |
3495 | 3495 | ||
3496 | spin_lock(&inode->i_lock); | 3496 | spin_lock(&inode->i_lock); |
3497 | acl = nfsi->nfs4_acl; | 3497 | acl = nfsi->nfs4_acl; |
3498 | if (acl == NULL) | 3498 | if (acl == NULL) |
3499 | goto out; | 3499 | goto out; |
3500 | if (buf == NULL) /* user is just asking for length */ | 3500 | if (buf == NULL) /* user is just asking for length */ |
3501 | goto out_len; | 3501 | goto out_len; |
3502 | if (acl->cached == 0) | 3502 | if (acl->cached == 0) |
3503 | goto out; | 3503 | goto out; |
3504 | ret = -ERANGE; /* see getxattr(2) man page */ | 3504 | ret = -ERANGE; /* see getxattr(2) man page */ |
3505 | if (acl->len > buflen) | 3505 | if (acl->len > buflen) |
3506 | goto out; | 3506 | goto out; |
3507 | memcpy(buf, acl->data, acl->len); | 3507 | memcpy(buf, acl->data, acl->len); |
3508 | out_len: | 3508 | out_len: |
3509 | ret = acl->len; | 3509 | ret = acl->len; |
3510 | out: | 3510 | out: |
3511 | spin_unlock(&inode->i_lock); | 3511 | spin_unlock(&inode->i_lock); |
3512 | return ret; | 3512 | return ret; |
3513 | } | 3513 | } |
3514 | 3514 | ||
3515 | static void nfs4_write_cached_acl(struct inode *inode, const char *buf, size_t acl_len) | 3515 | static void nfs4_write_cached_acl(struct inode *inode, const char *buf, size_t acl_len) |
3516 | { | 3516 | { |
3517 | struct nfs4_cached_acl *acl; | 3517 | struct nfs4_cached_acl *acl; |
3518 | 3518 | ||
3519 | if (buf && acl_len <= PAGE_SIZE) { | 3519 | if (buf && acl_len <= PAGE_SIZE) { |
3520 | acl = kmalloc(sizeof(*acl) + acl_len, GFP_KERNEL); | 3520 | acl = kmalloc(sizeof(*acl) + acl_len, GFP_KERNEL); |
3521 | if (acl == NULL) | 3521 | if (acl == NULL) |
3522 | goto out; | 3522 | goto out; |
3523 | acl->cached = 1; | 3523 | acl->cached = 1; |
3524 | memcpy(acl->data, buf, acl_len); | 3524 | memcpy(acl->data, buf, acl_len); |
3525 | } else { | 3525 | } else { |
3526 | acl = kmalloc(sizeof(*acl), GFP_KERNEL); | 3526 | acl = kmalloc(sizeof(*acl), GFP_KERNEL); |
3527 | if (acl == NULL) | 3527 | if (acl == NULL) |
3528 | goto out; | 3528 | goto out; |
3529 | acl->cached = 0; | 3529 | acl->cached = 0; |
3530 | } | 3530 | } |
3531 | acl->len = acl_len; | 3531 | acl->len = acl_len; |
3532 | out: | 3532 | out: |
3533 | nfs4_set_cached_acl(inode, acl); | 3533 | nfs4_set_cached_acl(inode, acl); |
3534 | } | 3534 | } |
3535 | 3535 | ||
3536 | /* | 3536 | /* |
3537 | * The getxattr API returns the required buffer length when called with a | 3537 | * The getxattr API returns the required buffer length when called with a |
3538 | * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating | 3538 | * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating |
3539 | * the required buf. On a NULL buf, we send a page of data to the server | 3539 | * the required buf. On a NULL buf, we send a page of data to the server |
3540 | * guessing that the ACL request can be serviced by a page. If so, we cache | 3540 | * guessing that the ACL request can be serviced by a page. If so, we cache |
3541 | * up to the page of ACL data, and the 2nd call to getxattr is serviced by | 3541 | * up to the page of ACL data, and the 2nd call to getxattr is serviced by |
3542 | * the cache. If not so, we throw away the page, and cache the required | 3542 | * the cache. If not so, we throw away the page, and cache the required |
3543 | * length. The next getxattr call will then produce another round trip to | 3543 | * length. The next getxattr call will then produce another round trip to |
3544 | * the server, this time with the input buf of the required size. | 3544 | * the server, this time with the input buf of the required size. |
3545 | */ | 3545 | */ |
3546 | static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) | 3546 | static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) |
3547 | { | 3547 | { |
3548 | struct page *pages[NFS4ACL_MAXPAGES] = {NULL, }; | 3548 | struct page *pages[NFS4ACL_MAXPAGES] = {NULL, }; |
3549 | struct nfs_getaclargs args = { | 3549 | struct nfs_getaclargs args = { |
3550 | .fh = NFS_FH(inode), | 3550 | .fh = NFS_FH(inode), |
3551 | .acl_pages = pages, | 3551 | .acl_pages = pages, |
3552 | .acl_len = buflen, | 3552 | .acl_len = buflen, |
3553 | }; | 3553 | }; |
3554 | struct nfs_getaclres res = { | 3554 | struct nfs_getaclres res = { |
3555 | .acl_len = buflen, | 3555 | .acl_len = buflen, |
3556 | }; | 3556 | }; |
3557 | void *resp_buf; | 3557 | void *resp_buf; |
3558 | struct rpc_message msg = { | 3558 | struct rpc_message msg = { |
3559 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL], | 3559 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL], |
3560 | .rpc_argp = &args, | 3560 | .rpc_argp = &args, |
3561 | .rpc_resp = &res, | 3561 | .rpc_resp = &res, |
3562 | }; | 3562 | }; |
3563 | int ret = -ENOMEM, npages, i, acl_len = 0; | 3563 | int ret = -ENOMEM, npages, i, acl_len = 0; |
3564 | 3564 | ||
3565 | npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT; | 3565 | npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT; |
3566 | /* As long as we're doing a round trip to the server anyway, | 3566 | /* As long as we're doing a round trip to the server anyway, |
3567 | * let's be prepared for a page of acl data. */ | 3567 | * let's be prepared for a page of acl data. */ |
3568 | if (npages == 0) | 3568 | if (npages == 0) |
3569 | npages = 1; | 3569 | npages = 1; |
3570 | 3570 | ||
3571 | for (i = 0; i < npages; i++) { | 3571 | for (i = 0; i < npages; i++) { |
3572 | pages[i] = alloc_page(GFP_KERNEL); | 3572 | pages[i] = alloc_page(GFP_KERNEL); |
3573 | if (!pages[i]) | 3573 | if (!pages[i]) |
3574 | goto out_free; | 3574 | goto out_free; |
3575 | } | 3575 | } |
3576 | if (npages > 1) { | 3576 | if (npages > 1) { |
3577 | /* for decoding across pages */ | 3577 | /* for decoding across pages */ |
3578 | args.acl_scratch = alloc_page(GFP_KERNEL); | 3578 | args.acl_scratch = alloc_page(GFP_KERNEL); |
3579 | if (!args.acl_scratch) | 3579 | if (!args.acl_scratch) |
3580 | goto out_free; | 3580 | goto out_free; |
3581 | } | 3581 | } |
3582 | args.acl_len = npages * PAGE_SIZE; | 3582 | args.acl_len = npages * PAGE_SIZE; |
3583 | args.acl_pgbase = 0; | 3583 | args.acl_pgbase = 0; |
3584 | /* Let decode_getfacl know not to fail if the ACL data is larger than | 3584 | /* Let decode_getfacl know not to fail if the ACL data is larger than |
3585 | * the page we send as a guess */ | 3585 | * the page we send as a guess */ |
3586 | if (buf == NULL) | 3586 | if (buf == NULL) |
3587 | res.acl_flags |= NFS4_ACL_LEN_REQUEST; | 3587 | res.acl_flags |= NFS4_ACL_LEN_REQUEST; |
3588 | resp_buf = page_address(pages[0]); | 3588 | resp_buf = page_address(pages[0]); |
3589 | 3589 | ||
3590 | dprintk("%s buf %p buflen %ld npages %d args.acl_len %ld\n", | 3590 | dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n", |
3591 | __func__, buf, buflen, npages, args.acl_len); | 3591 | __func__, buf, buflen, npages, args.acl_len); |
3592 | ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), | 3592 | ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), |
3593 | &msg, &args.seq_args, &res.seq_res, 0); | 3593 | &msg, &args.seq_args, &res.seq_res, 0); |
3594 | if (ret) | 3594 | if (ret) |
3595 | goto out_free; | 3595 | goto out_free; |
3596 | 3596 | ||
3597 | acl_len = res.acl_len - res.acl_data_offset; | 3597 | acl_len = res.acl_len - res.acl_data_offset; |
3598 | if (acl_len > args.acl_len) | 3598 | if (acl_len > args.acl_len) |
3599 | nfs4_write_cached_acl(inode, NULL, acl_len); | 3599 | nfs4_write_cached_acl(inode, NULL, acl_len); |
3600 | else | 3600 | else |
3601 | nfs4_write_cached_acl(inode, resp_buf + res.acl_data_offset, | 3601 | nfs4_write_cached_acl(inode, resp_buf + res.acl_data_offset, |
3602 | acl_len); | 3602 | acl_len); |
3603 | if (buf) { | 3603 | if (buf) { |
3604 | ret = -ERANGE; | 3604 | ret = -ERANGE; |
3605 | if (acl_len > buflen) | 3605 | if (acl_len > buflen) |
3606 | goto out_free; | 3606 | goto out_free; |
3607 | _copy_from_pages(buf, pages, res.acl_data_offset, | 3607 | _copy_from_pages(buf, pages, res.acl_data_offset, |
3608 | res.acl_len); | 3608 | res.acl_len); |
3609 | } | 3609 | } |
3610 | ret = acl_len; | 3610 | ret = acl_len; |
3611 | out_free: | 3611 | out_free: |
3612 | for (i = 0; i < npages; i++) | 3612 | for (i = 0; i < npages; i++) |
3613 | if (pages[i]) | 3613 | if (pages[i]) |
3614 | __free_page(pages[i]); | 3614 | __free_page(pages[i]); |
3615 | if (args.acl_scratch) | 3615 | if (args.acl_scratch) |
3616 | __free_page(args.acl_scratch); | 3616 | __free_page(args.acl_scratch); |
3617 | return ret; | 3617 | return ret; |
3618 | } | 3618 | } |
3619 | 3619 | ||
3620 | static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) | 3620 | static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) |
3621 | { | 3621 | { |
3622 | struct nfs4_exception exception = { }; | 3622 | struct nfs4_exception exception = { }; |
3623 | ssize_t ret; | 3623 | ssize_t ret; |
3624 | do { | 3624 | do { |
3625 | ret = __nfs4_get_acl_uncached(inode, buf, buflen); | 3625 | ret = __nfs4_get_acl_uncached(inode, buf, buflen); |
3626 | if (ret >= 0) | 3626 | if (ret >= 0) |
3627 | break; | 3627 | break; |
3628 | ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception); | 3628 | ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception); |
3629 | } while (exception.retry); | 3629 | } while (exception.retry); |
3630 | return ret; | 3630 | return ret; |
3631 | } | 3631 | } |
3632 | 3632 | ||
3633 | static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen) | 3633 | static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen) |
3634 | { | 3634 | { |
3635 | struct nfs_server *server = NFS_SERVER(inode); | 3635 | struct nfs_server *server = NFS_SERVER(inode); |
3636 | int ret; | 3636 | int ret; |
3637 | 3637 | ||
3638 | if (!nfs4_server_supports_acls(server)) | 3638 | if (!nfs4_server_supports_acls(server)) |
3639 | return -EOPNOTSUPP; | 3639 | return -EOPNOTSUPP; |
3640 | ret = nfs_revalidate_inode(server, inode); | 3640 | ret = nfs_revalidate_inode(server, inode); |
3641 | if (ret < 0) | 3641 | if (ret < 0) |
3642 | return ret; | 3642 | return ret; |
3643 | if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL) | 3643 | if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL) |
3644 | nfs_zap_acl_cache(inode); | 3644 | nfs_zap_acl_cache(inode); |
3645 | ret = nfs4_read_cached_acl(inode, buf, buflen); | 3645 | ret = nfs4_read_cached_acl(inode, buf, buflen); |
3646 | if (ret != -ENOENT) | 3646 | if (ret != -ENOENT) |
3647 | /* -ENOENT is returned if there is no ACL or if there is an ACL | 3647 | /* -ENOENT is returned if there is no ACL or if there is an ACL |
3648 | * but no cached acl data, just the acl length */ | 3648 | * but no cached acl data, just the acl length */ |
3649 | return ret; | 3649 | return ret; |
3650 | return nfs4_get_acl_uncached(inode, buf, buflen); | 3650 | return nfs4_get_acl_uncached(inode, buf, buflen); |
3651 | } | 3651 | } |
3652 | 3652 | ||
3653 | static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) | 3653 | static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) |
3654 | { | 3654 | { |
3655 | struct nfs_server *server = NFS_SERVER(inode); | 3655 | struct nfs_server *server = NFS_SERVER(inode); |
3656 | struct page *pages[NFS4ACL_MAXPAGES]; | 3656 | struct page *pages[NFS4ACL_MAXPAGES]; |
3657 | struct nfs_setaclargs arg = { | 3657 | struct nfs_setaclargs arg = { |
3658 | .fh = NFS_FH(inode), | 3658 | .fh = NFS_FH(inode), |
3659 | .acl_pages = pages, | 3659 | .acl_pages = pages, |
3660 | .acl_len = buflen, | 3660 | .acl_len = buflen, |
3661 | }; | 3661 | }; |
3662 | struct nfs_setaclres res; | 3662 | struct nfs_setaclres res; |
3663 | struct rpc_message msg = { | 3663 | struct rpc_message msg = { |
3664 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL], | 3664 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL], |
3665 | .rpc_argp = &arg, | 3665 | .rpc_argp = &arg, |
3666 | .rpc_resp = &res, | 3666 | .rpc_resp = &res, |
3667 | }; | 3667 | }; |
3668 | int ret, i; | 3668 | int ret, i; |
3669 | 3669 | ||
3670 | if (!nfs4_server_supports_acls(server)) | 3670 | if (!nfs4_server_supports_acls(server)) |
3671 | return -EOPNOTSUPP; | 3671 | return -EOPNOTSUPP; |
3672 | i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase); | 3672 | i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase); |
3673 | if (i < 0) | 3673 | if (i < 0) |
3674 | return i; | 3674 | return i; |
3675 | nfs_inode_return_delegation(inode); | 3675 | nfs_inode_return_delegation(inode); |
3676 | ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); | 3676 | ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); |
3677 | 3677 | ||
3678 | /* | 3678 | /* |
3679 | * Free each page after tx, so the only ref left is | 3679 | * Free each page after tx, so the only ref left is |
3680 | * held by the network stack | 3680 | * held by the network stack |
3681 | */ | 3681 | */ |
3682 | for (; i > 0; i--) | 3682 | for (; i > 0; i--) |
3683 | put_page(pages[i-1]); | 3683 | put_page(pages[i-1]); |
3684 | 3684 | ||
3685 | /* | 3685 | /* |
3686 | * Acl update can result in inode attribute update. | 3686 | * Acl update can result in inode attribute update. |
3687 | * so mark the attribute cache invalid. | 3687 | * so mark the attribute cache invalid. |
3688 | */ | 3688 | */ |
3689 | spin_lock(&inode->i_lock); | 3689 | spin_lock(&inode->i_lock); |
3690 | NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR; | 3690 | NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR; |
3691 | spin_unlock(&inode->i_lock); | 3691 | spin_unlock(&inode->i_lock); |
3692 | nfs_access_zap_cache(inode); | 3692 | nfs_access_zap_cache(inode); |
3693 | nfs_zap_acl_cache(inode); | 3693 | nfs_zap_acl_cache(inode); |
3694 | return ret; | 3694 | return ret; |
3695 | } | 3695 | } |
3696 | 3696 | ||
3697 | static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) | 3697 | static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) |
3698 | { | 3698 | { |
3699 | struct nfs4_exception exception = { }; | 3699 | struct nfs4_exception exception = { }; |
3700 | int err; | 3700 | int err; |
3701 | do { | 3701 | do { |
3702 | err = nfs4_handle_exception(NFS_SERVER(inode), | 3702 | err = nfs4_handle_exception(NFS_SERVER(inode), |
3703 | __nfs4_proc_set_acl(inode, buf, buflen), | 3703 | __nfs4_proc_set_acl(inode, buf, buflen), |
3704 | &exception); | 3704 | &exception); |
3705 | } while (exception.retry); | 3705 | } while (exception.retry); |
3706 | return err; | 3706 | return err; |
3707 | } | 3707 | } |
3708 | 3708 | ||
3709 | static int | 3709 | static int |
3710 | nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state) | 3710 | nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state) |
3711 | { | 3711 | { |
3712 | struct nfs_client *clp = server->nfs_client; | 3712 | struct nfs_client *clp = server->nfs_client; |
3713 | 3713 | ||
3714 | if (task->tk_status >= 0) | 3714 | if (task->tk_status >= 0) |
3715 | return 0; | 3715 | return 0; |
3716 | switch(task->tk_status) { | 3716 | switch(task->tk_status) { |
3717 | case -NFS4ERR_ADMIN_REVOKED: | 3717 | case -NFS4ERR_ADMIN_REVOKED: |
3718 | case -NFS4ERR_BAD_STATEID: | 3718 | case -NFS4ERR_BAD_STATEID: |
3719 | case -NFS4ERR_OPENMODE: | 3719 | case -NFS4ERR_OPENMODE: |
3720 | if (state == NULL) | 3720 | if (state == NULL) |
3721 | break; | 3721 | break; |
3722 | nfs4_schedule_stateid_recovery(server, state); | 3722 | nfs4_schedule_stateid_recovery(server, state); |
3723 | goto wait_on_recovery; | 3723 | goto wait_on_recovery; |
3724 | case -NFS4ERR_EXPIRED: | 3724 | case -NFS4ERR_EXPIRED: |
3725 | if (state != NULL) | 3725 | if (state != NULL) |
3726 | nfs4_schedule_stateid_recovery(server, state); | 3726 | nfs4_schedule_stateid_recovery(server, state); |
3727 | case -NFS4ERR_STALE_STATEID: | 3727 | case -NFS4ERR_STALE_STATEID: |
3728 | case -NFS4ERR_STALE_CLIENTID: | 3728 | case -NFS4ERR_STALE_CLIENTID: |
3729 | nfs4_schedule_lease_recovery(clp); | 3729 | nfs4_schedule_lease_recovery(clp); |
3730 | goto wait_on_recovery; | 3730 | goto wait_on_recovery; |
3731 | #if defined(CONFIG_NFS_V4_1) | 3731 | #if defined(CONFIG_NFS_V4_1) |
3732 | case -NFS4ERR_BADSESSION: | 3732 | case -NFS4ERR_BADSESSION: |
3733 | case -NFS4ERR_BADSLOT: | 3733 | case -NFS4ERR_BADSLOT: |
3734 | case -NFS4ERR_BAD_HIGH_SLOT: | 3734 | case -NFS4ERR_BAD_HIGH_SLOT: |
3735 | case -NFS4ERR_DEADSESSION: | 3735 | case -NFS4ERR_DEADSESSION: |
3736 | case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: | 3736 | case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: |
3737 | case -NFS4ERR_SEQ_FALSE_RETRY: | 3737 | case -NFS4ERR_SEQ_FALSE_RETRY: |
3738 | case -NFS4ERR_SEQ_MISORDERED: | 3738 | case -NFS4ERR_SEQ_MISORDERED: |
3739 | dprintk("%s ERROR %d, Reset session\n", __func__, | 3739 | dprintk("%s ERROR %d, Reset session\n", __func__, |
3740 | task->tk_status); | 3740 | task->tk_status); |
3741 | nfs4_schedule_session_recovery(clp->cl_session); | 3741 | nfs4_schedule_session_recovery(clp->cl_session); |
3742 | task->tk_status = 0; | 3742 | task->tk_status = 0; |
3743 | return -EAGAIN; | 3743 | return -EAGAIN; |
3744 | #endif /* CONFIG_NFS_V4_1 */ | 3744 | #endif /* CONFIG_NFS_V4_1 */ |
3745 | case -NFS4ERR_DELAY: | 3745 | case -NFS4ERR_DELAY: |
3746 | nfs_inc_server_stats(server, NFSIOS_DELAY); | 3746 | nfs_inc_server_stats(server, NFSIOS_DELAY); |
3747 | case -NFS4ERR_GRACE: | 3747 | case -NFS4ERR_GRACE: |
3748 | case -EKEYEXPIRED: | 3748 | case -EKEYEXPIRED: |
3749 | rpc_delay(task, NFS4_POLL_RETRY_MAX); | 3749 | rpc_delay(task, NFS4_POLL_RETRY_MAX); |
3750 | task->tk_status = 0; | 3750 | task->tk_status = 0; |
3751 | return -EAGAIN; | 3751 | return -EAGAIN; |
3752 | case -NFS4ERR_RETRY_UNCACHED_REP: | 3752 | case -NFS4ERR_RETRY_UNCACHED_REP: |
3753 | case -NFS4ERR_OLD_STATEID: | 3753 | case -NFS4ERR_OLD_STATEID: |
3754 | task->tk_status = 0; | 3754 | task->tk_status = 0; |
3755 | return -EAGAIN; | 3755 | return -EAGAIN; |
3756 | } | 3756 | } |
3757 | task->tk_status = nfs4_map_errors(task->tk_status); | 3757 | task->tk_status = nfs4_map_errors(task->tk_status); |
3758 | return 0; | 3758 | return 0; |
3759 | wait_on_recovery: | 3759 | wait_on_recovery: |
3760 | rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); | 3760 | rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); |
3761 | if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) | 3761 | if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) |
3762 | rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); | 3762 | rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); |
3763 | task->tk_status = 0; | 3763 | task->tk_status = 0; |
3764 | return -EAGAIN; | 3764 | return -EAGAIN; |
3765 | } | 3765 | } |
3766 | 3766 | ||
3767 | int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, | 3767 | int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, |
3768 | unsigned short port, struct rpc_cred *cred, | 3768 | unsigned short port, struct rpc_cred *cred, |
3769 | struct nfs4_setclientid_res *res) | 3769 | struct nfs4_setclientid_res *res) |
3770 | { | 3770 | { |
3771 | nfs4_verifier sc_verifier; | 3771 | nfs4_verifier sc_verifier; |
3772 | struct nfs4_setclientid setclientid = { | 3772 | struct nfs4_setclientid setclientid = { |
3773 | .sc_verifier = &sc_verifier, | 3773 | .sc_verifier = &sc_verifier, |
3774 | .sc_prog = program, | 3774 | .sc_prog = program, |
3775 | .sc_cb_ident = clp->cl_cb_ident, | 3775 | .sc_cb_ident = clp->cl_cb_ident, |
3776 | }; | 3776 | }; |
3777 | struct rpc_message msg = { | 3777 | struct rpc_message msg = { |
3778 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID], | 3778 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID], |
3779 | .rpc_argp = &setclientid, | 3779 | .rpc_argp = &setclientid, |
3780 | .rpc_resp = res, | 3780 | .rpc_resp = res, |
3781 | .rpc_cred = cred, | 3781 | .rpc_cred = cred, |
3782 | }; | 3782 | }; |
3783 | __be32 *p; | 3783 | __be32 *p; |
3784 | int loop = 0; | 3784 | int loop = 0; |
3785 | int status; | 3785 | int status; |
3786 | 3786 | ||
3787 | p = (__be32*)sc_verifier.data; | 3787 | p = (__be32*)sc_verifier.data; |
3788 | *p++ = htonl((u32)clp->cl_boot_time.tv_sec); | 3788 | *p++ = htonl((u32)clp->cl_boot_time.tv_sec); |
3789 | *p = htonl((u32)clp->cl_boot_time.tv_nsec); | 3789 | *p = htonl((u32)clp->cl_boot_time.tv_nsec); |
3790 | 3790 | ||
3791 | for(;;) { | 3791 | for(;;) { |
3792 | setclientid.sc_name_len = scnprintf(setclientid.sc_name, | 3792 | setclientid.sc_name_len = scnprintf(setclientid.sc_name, |
3793 | sizeof(setclientid.sc_name), "%s/%s %s %s %u", | 3793 | sizeof(setclientid.sc_name), "%s/%s %s %s %u", |
3794 | clp->cl_ipaddr, | 3794 | clp->cl_ipaddr, |
3795 | rpc_peeraddr2str(clp->cl_rpcclient, | 3795 | rpc_peeraddr2str(clp->cl_rpcclient, |
3796 | RPC_DISPLAY_ADDR), | 3796 | RPC_DISPLAY_ADDR), |
3797 | rpc_peeraddr2str(clp->cl_rpcclient, | 3797 | rpc_peeraddr2str(clp->cl_rpcclient, |
3798 | RPC_DISPLAY_PROTO), | 3798 | RPC_DISPLAY_PROTO), |
3799 | clp->cl_rpcclient->cl_auth->au_ops->au_name, | 3799 | clp->cl_rpcclient->cl_auth->au_ops->au_name, |
3800 | clp->cl_id_uniquifier); | 3800 | clp->cl_id_uniquifier); |
3801 | setclientid.sc_netid_len = scnprintf(setclientid.sc_netid, | 3801 | setclientid.sc_netid_len = scnprintf(setclientid.sc_netid, |
3802 | sizeof(setclientid.sc_netid), | 3802 | sizeof(setclientid.sc_netid), |
3803 | rpc_peeraddr2str(clp->cl_rpcclient, | 3803 | rpc_peeraddr2str(clp->cl_rpcclient, |
3804 | RPC_DISPLAY_NETID)); | 3804 | RPC_DISPLAY_NETID)); |
3805 | setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr, | 3805 | setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr, |
3806 | sizeof(setclientid.sc_uaddr), "%s.%u.%u", | 3806 | sizeof(setclientid.sc_uaddr), "%s.%u.%u", |
3807 | clp->cl_ipaddr, port >> 8, port & 255); | 3807 | clp->cl_ipaddr, port >> 8, port & 255); |
3808 | 3808 | ||
3809 | status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); | 3809 | status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); |
3810 | if (status != -NFS4ERR_CLID_INUSE) | 3810 | if (status != -NFS4ERR_CLID_INUSE) |
3811 | break; | 3811 | break; |
3812 | if (loop != 0) { | 3812 | if (loop != 0) { |
3813 | ++clp->cl_id_uniquifier; | 3813 | ++clp->cl_id_uniquifier; |
3814 | break; | 3814 | break; |
3815 | } | 3815 | } |
3816 | ++loop; | 3816 | ++loop; |
3817 | ssleep(clp->cl_lease_time / HZ + 1); | 3817 | ssleep(clp->cl_lease_time / HZ + 1); |
3818 | } | 3818 | } |
3819 | return status; | 3819 | return status; |
3820 | } | 3820 | } |
3821 | 3821 | ||
3822 | int nfs4_proc_setclientid_confirm(struct nfs_client *clp, | 3822 | int nfs4_proc_setclientid_confirm(struct nfs_client *clp, |
3823 | struct nfs4_setclientid_res *arg, | 3823 | struct nfs4_setclientid_res *arg, |
3824 | struct rpc_cred *cred) | 3824 | struct rpc_cred *cred) |
3825 | { | 3825 | { |
3826 | struct nfs_fsinfo fsinfo; | 3826 | struct nfs_fsinfo fsinfo; |
3827 | struct rpc_message msg = { | 3827 | struct rpc_message msg = { |
3828 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM], | 3828 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM], |
3829 | .rpc_argp = arg, | 3829 | .rpc_argp = arg, |
3830 | .rpc_resp = &fsinfo, | 3830 | .rpc_resp = &fsinfo, |
3831 | .rpc_cred = cred, | 3831 | .rpc_cred = cred, |
3832 | }; | 3832 | }; |
3833 | unsigned long now; | 3833 | unsigned long now; |
3834 | int status; | 3834 | int status; |
3835 | 3835 | ||
3836 | now = jiffies; | 3836 | now = jiffies; |
3837 | status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); | 3837 | status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); |
3838 | if (status == 0) { | 3838 | if (status == 0) { |
3839 | spin_lock(&clp->cl_lock); | 3839 | spin_lock(&clp->cl_lock); |
3840 | clp->cl_lease_time = fsinfo.lease_time * HZ; | 3840 | clp->cl_lease_time = fsinfo.lease_time * HZ; |
3841 | clp->cl_last_renewal = now; | 3841 | clp->cl_last_renewal = now; |
3842 | spin_unlock(&clp->cl_lock); | 3842 | spin_unlock(&clp->cl_lock); |
3843 | } | 3843 | } |
3844 | return status; | 3844 | return status; |
3845 | } | 3845 | } |
3846 | 3846 | ||
3847 | struct nfs4_delegreturndata { | 3847 | struct nfs4_delegreturndata { |
3848 | struct nfs4_delegreturnargs args; | 3848 | struct nfs4_delegreturnargs args; |
3849 | struct nfs4_delegreturnres res; | 3849 | struct nfs4_delegreturnres res; |
3850 | struct nfs_fh fh; | 3850 | struct nfs_fh fh; |
3851 | nfs4_stateid stateid; | 3851 | nfs4_stateid stateid; |
3852 | unsigned long timestamp; | 3852 | unsigned long timestamp; |
3853 | struct nfs_fattr fattr; | 3853 | struct nfs_fattr fattr; |
3854 | int rpc_status; | 3854 | int rpc_status; |
3855 | }; | 3855 | }; |
3856 | 3856 | ||
3857 | static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) | 3857 | static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) |
3858 | { | 3858 | { |
3859 | struct nfs4_delegreturndata *data = calldata; | 3859 | struct nfs4_delegreturndata *data = calldata; |
3860 | 3860 | ||
3861 | if (!nfs4_sequence_done(task, &data->res.seq_res)) | 3861 | if (!nfs4_sequence_done(task, &data->res.seq_res)) |
3862 | return; | 3862 | return; |
3863 | 3863 | ||
3864 | switch (task->tk_status) { | 3864 | switch (task->tk_status) { |
3865 | case -NFS4ERR_STALE_STATEID: | 3865 | case -NFS4ERR_STALE_STATEID: |
3866 | case -NFS4ERR_EXPIRED: | 3866 | case -NFS4ERR_EXPIRED: |
3867 | case 0: | 3867 | case 0: |
3868 | renew_lease(data->res.server, data->timestamp); | 3868 | renew_lease(data->res.server, data->timestamp); |
3869 | break; | 3869 | break; |
3870 | default: | 3870 | default: |
3871 | if (nfs4_async_handle_error(task, data->res.server, NULL) == | 3871 | if (nfs4_async_handle_error(task, data->res.server, NULL) == |
3872 | -EAGAIN) { | 3872 | -EAGAIN) { |
3873 | rpc_restart_call_prepare(task); | 3873 | rpc_restart_call_prepare(task); |
3874 | return; | 3874 | return; |
3875 | } | 3875 | } |
3876 | } | 3876 | } |
3877 | data->rpc_status = task->tk_status; | 3877 | data->rpc_status = task->tk_status; |
3878 | } | 3878 | } |
3879 | 3879 | ||
3880 | static void nfs4_delegreturn_release(void *calldata) | 3880 | static void nfs4_delegreturn_release(void *calldata) |
3881 | { | 3881 | { |
3882 | kfree(calldata); | 3882 | kfree(calldata); |
3883 | } | 3883 | } |
3884 | 3884 | ||
3885 | #if defined(CONFIG_NFS_V4_1) | 3885 | #if defined(CONFIG_NFS_V4_1) |
3886 | static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) | 3886 | static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) |
3887 | { | 3887 | { |
3888 | struct nfs4_delegreturndata *d_data; | 3888 | struct nfs4_delegreturndata *d_data; |
3889 | 3889 | ||
3890 | d_data = (struct nfs4_delegreturndata *)data; | 3890 | d_data = (struct nfs4_delegreturndata *)data; |
3891 | 3891 | ||
3892 | if (nfs4_setup_sequence(d_data->res.server, | 3892 | if (nfs4_setup_sequence(d_data->res.server, |
3893 | &d_data->args.seq_args, | 3893 | &d_data->args.seq_args, |
3894 | &d_data->res.seq_res, 1, task)) | 3894 | &d_data->res.seq_res, 1, task)) |
3895 | return; | 3895 | return; |
3896 | rpc_call_start(task); | 3896 | rpc_call_start(task); |
3897 | } | 3897 | } |
3898 | #endif /* CONFIG_NFS_V4_1 */ | 3898 | #endif /* CONFIG_NFS_V4_1 */ |
3899 | 3899 | ||
3900 | static const struct rpc_call_ops nfs4_delegreturn_ops = { | 3900 | static const struct rpc_call_ops nfs4_delegreturn_ops = { |
3901 | #if defined(CONFIG_NFS_V4_1) | 3901 | #if defined(CONFIG_NFS_V4_1) |
3902 | .rpc_call_prepare = nfs4_delegreturn_prepare, | 3902 | .rpc_call_prepare = nfs4_delegreturn_prepare, |
3903 | #endif /* CONFIG_NFS_V4_1 */ | 3903 | #endif /* CONFIG_NFS_V4_1 */ |
3904 | .rpc_call_done = nfs4_delegreturn_done, | 3904 | .rpc_call_done = nfs4_delegreturn_done, |
3905 | .rpc_release = nfs4_delegreturn_release, | 3905 | .rpc_release = nfs4_delegreturn_release, |
3906 | }; | 3906 | }; |
3907 | 3907 | ||
3908 | static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync) | 3908 | static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync) |
3909 | { | 3909 | { |
3910 | struct nfs4_delegreturndata *data; | 3910 | struct nfs4_delegreturndata *data; |
3911 | struct nfs_server *server = NFS_SERVER(inode); | 3911 | struct nfs_server *server = NFS_SERVER(inode); |
3912 | struct rpc_task *task; | 3912 | struct rpc_task *task; |
3913 | struct rpc_message msg = { | 3913 | struct rpc_message msg = { |
3914 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN], | 3914 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN], |
3915 | .rpc_cred = cred, | 3915 | .rpc_cred = cred, |
3916 | }; | 3916 | }; |
3917 | struct rpc_task_setup task_setup_data = { | 3917 | struct rpc_task_setup task_setup_data = { |
3918 | .rpc_client = server->client, | 3918 | .rpc_client = server->client, |
3919 | .rpc_message = &msg, | 3919 | .rpc_message = &msg, |
3920 | .callback_ops = &nfs4_delegreturn_ops, | 3920 | .callback_ops = &nfs4_delegreturn_ops, |
3921 | .flags = RPC_TASK_ASYNC, | 3921 | .flags = RPC_TASK_ASYNC, |
3922 | }; | 3922 | }; |
3923 | int status = 0; | 3923 | int status = 0; |
3924 | 3924 | ||
3925 | data = kzalloc(sizeof(*data), GFP_NOFS); | 3925 | data = kzalloc(sizeof(*data), GFP_NOFS); |
3926 | if (data == NULL) | 3926 | if (data == NULL) |
3927 | return -ENOMEM; | 3927 | return -ENOMEM; |
3928 | data->args.fhandle = &data->fh; | 3928 | data->args.fhandle = &data->fh; |
3929 | data->args.stateid = &data->stateid; | 3929 | data->args.stateid = &data->stateid; |
3930 | data->args.bitmask = server->attr_bitmask; | 3930 | data->args.bitmask = server->attr_bitmask; |
3931 | nfs_copy_fh(&data->fh, NFS_FH(inode)); | 3931 | nfs_copy_fh(&data->fh, NFS_FH(inode)); |
3932 | memcpy(&data->stateid, stateid, sizeof(data->stateid)); | 3932 | memcpy(&data->stateid, stateid, sizeof(data->stateid)); |
3933 | data->res.fattr = &data->fattr; | 3933 | data->res.fattr = &data->fattr; |
3934 | data->res.server = server; | 3934 | data->res.server = server; |
3935 | nfs_fattr_init(data->res.fattr); | 3935 | nfs_fattr_init(data->res.fattr); |
3936 | data->timestamp = jiffies; | 3936 | data->timestamp = jiffies; |
3937 | data->rpc_status = 0; | 3937 | data->rpc_status = 0; |
3938 | 3938 | ||
3939 | task_setup_data.callback_data = data; | 3939 | task_setup_data.callback_data = data; |
3940 | msg.rpc_argp = &data->args; | 3940 | msg.rpc_argp = &data->args; |
3941 | msg.rpc_resp = &data->res; | 3941 | msg.rpc_resp = &data->res; |
3942 | task = rpc_run_task(&task_setup_data); | 3942 | task = rpc_run_task(&task_setup_data); |
3943 | if (IS_ERR(task)) | 3943 | if (IS_ERR(task)) |
3944 | return PTR_ERR(task); | 3944 | return PTR_ERR(task); |
3945 | if (!issync) | 3945 | if (!issync) |
3946 | goto out; | 3946 | goto out; |
3947 | status = nfs4_wait_for_completion_rpc_task(task); | 3947 | status = nfs4_wait_for_completion_rpc_task(task); |
3948 | if (status != 0) | 3948 | if (status != 0) |
3949 | goto out; | 3949 | goto out; |
3950 | status = data->rpc_status; | 3950 | status = data->rpc_status; |
3951 | if (status != 0) | 3951 | if (status != 0) |
3952 | goto out; | 3952 | goto out; |
3953 | nfs_refresh_inode(inode, &data->fattr); | 3953 | nfs_refresh_inode(inode, &data->fattr); |
3954 | out: | 3954 | out: |
3955 | rpc_put_task(task); | 3955 | rpc_put_task(task); |
3956 | return status; | 3956 | return status; |
3957 | } | 3957 | } |
3958 | 3958 | ||
3959 | int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync) | 3959 | int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync) |
3960 | { | 3960 | { |
3961 | struct nfs_server *server = NFS_SERVER(inode); | 3961 | struct nfs_server *server = NFS_SERVER(inode); |
3962 | struct nfs4_exception exception = { }; | 3962 | struct nfs4_exception exception = { }; |
3963 | int err; | 3963 | int err; |
3964 | do { | 3964 | do { |
3965 | err = _nfs4_proc_delegreturn(inode, cred, stateid, issync); | 3965 | err = _nfs4_proc_delegreturn(inode, cred, stateid, issync); |
3966 | switch (err) { | 3966 | switch (err) { |
3967 | case -NFS4ERR_STALE_STATEID: | 3967 | case -NFS4ERR_STALE_STATEID: |
3968 | case -NFS4ERR_EXPIRED: | 3968 | case -NFS4ERR_EXPIRED: |
3969 | case 0: | 3969 | case 0: |
3970 | return 0; | 3970 | return 0; |
3971 | } | 3971 | } |
3972 | err = nfs4_handle_exception(server, err, &exception); | 3972 | err = nfs4_handle_exception(server, err, &exception); |
3973 | } while (exception.retry); | 3973 | } while (exception.retry); |
3974 | return err; | 3974 | return err; |
3975 | } | 3975 | } |
3976 | 3976 | ||
3977 | #define NFS4_LOCK_MINTIMEOUT (1 * HZ) | 3977 | #define NFS4_LOCK_MINTIMEOUT (1 * HZ) |
3978 | #define NFS4_LOCK_MAXTIMEOUT (30 * HZ) | 3978 | #define NFS4_LOCK_MAXTIMEOUT (30 * HZ) |
3979 | 3979 | ||
3980 | /* | 3980 | /* |
3981 | * sleep, with exponential backoff, and retry the LOCK operation. | 3981 | * sleep, with exponential backoff, and retry the LOCK operation. |
3982 | */ | 3982 | */ |
3983 | static unsigned long | 3983 | static unsigned long |
3984 | nfs4_set_lock_task_retry(unsigned long timeout) | 3984 | nfs4_set_lock_task_retry(unsigned long timeout) |
3985 | { | 3985 | { |
3986 | freezable_schedule_timeout_killable(timeout); | 3986 | freezable_schedule_timeout_killable(timeout); |
3987 | timeout <<= 1; | 3987 | timeout <<= 1; |
3988 | if (timeout > NFS4_LOCK_MAXTIMEOUT) | 3988 | if (timeout > NFS4_LOCK_MAXTIMEOUT) |
3989 | return NFS4_LOCK_MAXTIMEOUT; | 3989 | return NFS4_LOCK_MAXTIMEOUT; |
3990 | return timeout; | 3990 | return timeout; |
3991 | } | 3991 | } |
3992 | 3992 | ||
3993 | static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) | 3993 | static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) |
3994 | { | 3994 | { |
3995 | struct inode *inode = state->inode; | 3995 | struct inode *inode = state->inode; |
3996 | struct nfs_server *server = NFS_SERVER(inode); | 3996 | struct nfs_server *server = NFS_SERVER(inode); |
3997 | struct nfs_client *clp = server->nfs_client; | 3997 | struct nfs_client *clp = server->nfs_client; |
3998 | struct nfs_lockt_args arg = { | 3998 | struct nfs_lockt_args arg = { |
3999 | .fh = NFS_FH(inode), | 3999 | .fh = NFS_FH(inode), |
4000 | .fl = request, | 4000 | .fl = request, |
4001 | }; | 4001 | }; |
4002 | struct nfs_lockt_res res = { | 4002 | struct nfs_lockt_res res = { |
4003 | .denied = request, | 4003 | .denied = request, |
4004 | }; | 4004 | }; |
4005 | struct rpc_message msg = { | 4005 | struct rpc_message msg = { |
4006 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT], | 4006 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT], |
4007 | .rpc_argp = &arg, | 4007 | .rpc_argp = &arg, |
4008 | .rpc_resp = &res, | 4008 | .rpc_resp = &res, |
4009 | .rpc_cred = state->owner->so_cred, | 4009 | .rpc_cred = state->owner->so_cred, |
4010 | }; | 4010 | }; |
4011 | struct nfs4_lock_state *lsp; | 4011 | struct nfs4_lock_state *lsp; |
4012 | int status; | 4012 | int status; |
4013 | 4013 | ||
4014 | arg.lock_owner.clientid = clp->cl_clientid; | 4014 | arg.lock_owner.clientid = clp->cl_clientid; |
4015 | status = nfs4_set_lock_state(state, request); | 4015 | status = nfs4_set_lock_state(state, request); |
4016 | if (status != 0) | 4016 | if (status != 0) |
4017 | goto out; | 4017 | goto out; |
4018 | lsp = request->fl_u.nfs4_fl.owner; | 4018 | lsp = request->fl_u.nfs4_fl.owner; |
4019 | arg.lock_owner.id = lsp->ls_id.id; | 4019 | arg.lock_owner.id = lsp->ls_id.id; |
4020 | arg.lock_owner.s_dev = server->s_dev; | 4020 | arg.lock_owner.s_dev = server->s_dev; |
4021 | status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); | 4021 | status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); |
4022 | switch (status) { | 4022 | switch (status) { |
4023 | case 0: | 4023 | case 0: |
4024 | request->fl_type = F_UNLCK; | 4024 | request->fl_type = F_UNLCK; |
4025 | break; | 4025 | break; |
4026 | case -NFS4ERR_DENIED: | 4026 | case -NFS4ERR_DENIED: |
4027 | status = 0; | 4027 | status = 0; |
4028 | } | 4028 | } |
4029 | request->fl_ops->fl_release_private(request); | 4029 | request->fl_ops->fl_release_private(request); |
4030 | out: | 4030 | out: |
4031 | return status; | 4031 | return status; |
4032 | } | 4032 | } |
4033 | 4033 | ||
4034 | static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) | 4034 | static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) |
4035 | { | 4035 | { |
4036 | struct nfs4_exception exception = { }; | 4036 | struct nfs4_exception exception = { }; |
4037 | int err; | 4037 | int err; |
4038 | 4038 | ||
4039 | do { | 4039 | do { |
4040 | err = nfs4_handle_exception(NFS_SERVER(state->inode), | 4040 | err = nfs4_handle_exception(NFS_SERVER(state->inode), |
4041 | _nfs4_proc_getlk(state, cmd, request), | 4041 | _nfs4_proc_getlk(state, cmd, request), |
4042 | &exception); | 4042 | &exception); |
4043 | } while (exception.retry); | 4043 | } while (exception.retry); |
4044 | return err; | 4044 | return err; |
4045 | } | 4045 | } |
4046 | 4046 | ||
4047 | static int do_vfs_lock(struct file *file, struct file_lock *fl) | 4047 | static int do_vfs_lock(struct file *file, struct file_lock *fl) |
4048 | { | 4048 | { |
4049 | int res = 0; | 4049 | int res = 0; |
4050 | switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) { | 4050 | switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) { |
4051 | case FL_POSIX: | 4051 | case FL_POSIX: |
4052 | res = posix_lock_file_wait(file, fl); | 4052 | res = posix_lock_file_wait(file, fl); |
4053 | break; | 4053 | break; |
4054 | case FL_FLOCK: | 4054 | case FL_FLOCK: |
4055 | res = flock_lock_file_wait(file, fl); | 4055 | res = flock_lock_file_wait(file, fl); |
4056 | break; | 4056 | break; |
4057 | default: | 4057 | default: |
4058 | BUG(); | 4058 | BUG(); |
4059 | } | 4059 | } |
4060 | return res; | 4060 | return res; |
4061 | } | 4061 | } |
4062 | 4062 | ||
4063 | struct nfs4_unlockdata { | 4063 | struct nfs4_unlockdata { |
4064 | struct nfs_locku_args arg; | 4064 | struct nfs_locku_args arg; |
4065 | struct nfs_locku_res res; | 4065 | struct nfs_locku_res res; |
4066 | struct nfs4_lock_state *lsp; | 4066 | struct nfs4_lock_state *lsp; |
4067 | struct nfs_open_context *ctx; | 4067 | struct nfs_open_context *ctx; |
4068 | struct file_lock fl; | 4068 | struct file_lock fl; |
4069 | const struct nfs_server *server; | 4069 | const struct nfs_server *server; |
4070 | unsigned long timestamp; | 4070 | unsigned long timestamp; |
4071 | }; | 4071 | }; |
4072 | 4072 | ||
4073 | static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, | 4073 | static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, |
4074 | struct nfs_open_context *ctx, | 4074 | struct nfs_open_context *ctx, |
4075 | struct nfs4_lock_state *lsp, | 4075 | struct nfs4_lock_state *lsp, |
4076 | struct nfs_seqid *seqid) | 4076 | struct nfs_seqid *seqid) |
4077 | { | 4077 | { |
4078 | struct nfs4_unlockdata *p; | 4078 | struct nfs4_unlockdata *p; |
4079 | struct inode *inode = lsp->ls_state->inode; | 4079 | struct inode *inode = lsp->ls_state->inode; |
4080 | 4080 | ||
4081 | p = kzalloc(sizeof(*p), GFP_NOFS); | 4081 | p = kzalloc(sizeof(*p), GFP_NOFS); |
4082 | if (p == NULL) | 4082 | if (p == NULL) |
4083 | return NULL; | 4083 | return NULL; |
4084 | p->arg.fh = NFS_FH(inode); | 4084 | p->arg.fh = NFS_FH(inode); |
4085 | p->arg.fl = &p->fl; | 4085 | p->arg.fl = &p->fl; |
4086 | p->arg.seqid = seqid; | 4086 | p->arg.seqid = seqid; |
4087 | p->res.seqid = seqid; | 4087 | p->res.seqid = seqid; |
4088 | p->arg.stateid = &lsp->ls_stateid; | 4088 | p->arg.stateid = &lsp->ls_stateid; |
4089 | p->lsp = lsp; | 4089 | p->lsp = lsp; |
4090 | atomic_inc(&lsp->ls_count); | 4090 | atomic_inc(&lsp->ls_count); |
4091 | /* Ensure we don't close file until we're done freeing locks! */ | 4091 | /* Ensure we don't close file until we're done freeing locks! */ |
4092 | p->ctx = get_nfs_open_context(ctx); | 4092 | p->ctx = get_nfs_open_context(ctx); |
4093 | memcpy(&p->fl, fl, sizeof(p->fl)); | 4093 | memcpy(&p->fl, fl, sizeof(p->fl)); |
4094 | p->server = NFS_SERVER(inode); | 4094 | p->server = NFS_SERVER(inode); |
4095 | return p; | 4095 | return p; |
4096 | } | 4096 | } |
4097 | 4097 | ||
4098 | static void nfs4_locku_release_calldata(void *data) | 4098 | static void nfs4_locku_release_calldata(void *data) |
4099 | { | 4099 | { |
4100 | struct nfs4_unlockdata *calldata = data; | 4100 | struct nfs4_unlockdata *calldata = data; |
4101 | nfs_free_seqid(calldata->arg.seqid); | 4101 | nfs_free_seqid(calldata->arg.seqid); |
4102 | nfs4_put_lock_state(calldata->lsp); | 4102 | nfs4_put_lock_state(calldata->lsp); |
4103 | put_nfs_open_context(calldata->ctx); | 4103 | put_nfs_open_context(calldata->ctx); |
4104 | kfree(calldata); | 4104 | kfree(calldata); |
4105 | } | 4105 | } |
4106 | 4106 | ||
4107 | static void nfs4_locku_done(struct rpc_task *task, void *data) | 4107 | static void nfs4_locku_done(struct rpc_task *task, void *data) |
4108 | { | 4108 | { |
4109 | struct nfs4_unlockdata *calldata = data; | 4109 | struct nfs4_unlockdata *calldata = data; |
4110 | 4110 | ||
4111 | if (!nfs4_sequence_done(task, &calldata->res.seq_res)) | 4111 | if (!nfs4_sequence_done(task, &calldata->res.seq_res)) |
4112 | return; | 4112 | return; |
4113 | switch (task->tk_status) { | 4113 | switch (task->tk_status) { |
4114 | case 0: | 4114 | case 0: |
4115 | memcpy(calldata->lsp->ls_stateid.data, | 4115 | memcpy(calldata->lsp->ls_stateid.data, |
4116 | calldata->res.stateid.data, | 4116 | calldata->res.stateid.data, |
4117 | sizeof(calldata->lsp->ls_stateid.data)); | 4117 | sizeof(calldata->lsp->ls_stateid.data)); |
4118 | renew_lease(calldata->server, calldata->timestamp); | 4118 | renew_lease(calldata->server, calldata->timestamp); |
4119 | break; | 4119 | break; |
4120 | case -NFS4ERR_BAD_STATEID: | 4120 | case -NFS4ERR_BAD_STATEID: |
4121 | case -NFS4ERR_OLD_STATEID: | 4121 | case -NFS4ERR_OLD_STATEID: |
4122 | case -NFS4ERR_STALE_STATEID: | 4122 | case -NFS4ERR_STALE_STATEID: |
4123 | case -NFS4ERR_EXPIRED: | 4123 | case -NFS4ERR_EXPIRED: |
4124 | break; | 4124 | break; |
4125 | default: | 4125 | default: |
4126 | if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN) | 4126 | if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN) |
4127 | rpc_restart_call_prepare(task); | 4127 | rpc_restart_call_prepare(task); |
4128 | } | 4128 | } |
4129 | } | 4129 | } |
4130 | 4130 | ||
4131 | static void nfs4_locku_prepare(struct rpc_task *task, void *data) | 4131 | static void nfs4_locku_prepare(struct rpc_task *task, void *data) |
4132 | { | 4132 | { |
4133 | struct nfs4_unlockdata *calldata = data; | 4133 | struct nfs4_unlockdata *calldata = data; |
4134 | 4134 | ||
4135 | if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) | 4135 | if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) |
4136 | return; | 4136 | return; |
4137 | if ((calldata->lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0) { | 4137 | if ((calldata->lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0) { |
4138 | /* Note: exit _without_ running nfs4_locku_done */ | 4138 | /* Note: exit _without_ running nfs4_locku_done */ |
4139 | task->tk_action = NULL; | 4139 | task->tk_action = NULL; |
4140 | return; | 4140 | return; |
4141 | } | 4141 | } |
4142 | calldata->timestamp = jiffies; | 4142 | calldata->timestamp = jiffies; |
4143 | if (nfs4_setup_sequence(calldata->server, | 4143 | if (nfs4_setup_sequence(calldata->server, |
4144 | &calldata->arg.seq_args, | 4144 | &calldata->arg.seq_args, |
4145 | &calldata->res.seq_res, 1, task)) | 4145 | &calldata->res.seq_res, 1, task)) |
4146 | return; | 4146 | return; |
4147 | rpc_call_start(task); | 4147 | rpc_call_start(task); |
4148 | } | 4148 | } |
4149 | 4149 | ||
4150 | static const struct rpc_call_ops nfs4_locku_ops = { | 4150 | static const struct rpc_call_ops nfs4_locku_ops = { |
4151 | .rpc_call_prepare = nfs4_locku_prepare, | 4151 | .rpc_call_prepare = nfs4_locku_prepare, |
4152 | .rpc_call_done = nfs4_locku_done, | 4152 | .rpc_call_done = nfs4_locku_done, |
4153 | .rpc_release = nfs4_locku_release_calldata, | 4153 | .rpc_release = nfs4_locku_release_calldata, |
4154 | }; | 4154 | }; |
4155 | 4155 | ||
4156 | static struct rpc_task *nfs4_do_unlck(struct file_lock *fl, | 4156 | static struct rpc_task *nfs4_do_unlck(struct file_lock *fl, |
4157 | struct nfs_open_context *ctx, | 4157 | struct nfs_open_context *ctx, |
4158 | struct nfs4_lock_state *lsp, | 4158 | struct nfs4_lock_state *lsp, |
4159 | struct nfs_seqid *seqid) | 4159 | struct nfs_seqid *seqid) |
4160 | { | 4160 | { |
4161 | struct nfs4_unlockdata *data; | 4161 | struct nfs4_unlockdata *data; |
4162 | struct rpc_message msg = { | 4162 | struct rpc_message msg = { |
4163 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU], | 4163 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU], |
4164 | .rpc_cred = ctx->cred, | 4164 | .rpc_cred = ctx->cred, |
4165 | }; | 4165 | }; |
4166 | struct rpc_task_setup task_setup_data = { | 4166 | struct rpc_task_setup task_setup_data = { |
4167 | .rpc_client = NFS_CLIENT(lsp->ls_state->inode), | 4167 | .rpc_client = NFS_CLIENT(lsp->ls_state->inode), |
4168 | .rpc_message = &msg, | 4168 | .rpc_message = &msg, |
4169 | .callback_ops = &nfs4_locku_ops, | 4169 | .callback_ops = &nfs4_locku_ops, |
4170 | .workqueue = nfsiod_workqueue, | 4170 | .workqueue = nfsiod_workqueue, |
4171 | .flags = RPC_TASK_ASYNC, | 4171 | .flags = RPC_TASK_ASYNC, |
4172 | }; | 4172 | }; |
4173 | 4173 | ||
4174 | /* Ensure this is an unlock - when canceling a lock, the | 4174 | /* Ensure this is an unlock - when canceling a lock, the |
4175 | * canceled lock is passed in, and it won't be an unlock. | 4175 | * canceled lock is passed in, and it won't be an unlock. |
4176 | */ | 4176 | */ |
4177 | fl->fl_type = F_UNLCK; | 4177 | fl->fl_type = F_UNLCK; |
4178 | 4178 | ||
4179 | data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid); | 4179 | data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid); |
4180 | if (data == NULL) { | 4180 | if (data == NULL) { |
4181 | nfs_free_seqid(seqid); | 4181 | nfs_free_seqid(seqid); |
4182 | return ERR_PTR(-ENOMEM); | 4182 | return ERR_PTR(-ENOMEM); |
4183 | } | 4183 | } |
4184 | 4184 | ||
4185 | msg.rpc_argp = &data->arg; | 4185 | msg.rpc_argp = &data->arg; |
4186 | msg.rpc_resp = &data->res; | 4186 | msg.rpc_resp = &data->res; |
4187 | task_setup_data.callback_data = data; | 4187 | task_setup_data.callback_data = data; |
4188 | return rpc_run_task(&task_setup_data); | 4188 | return rpc_run_task(&task_setup_data); |
4189 | } | 4189 | } |
4190 | 4190 | ||
4191 | static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request) | 4191 | static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request) |
4192 | { | 4192 | { |
4193 | struct nfs_inode *nfsi = NFS_I(state->inode); | 4193 | struct nfs_inode *nfsi = NFS_I(state->inode); |
4194 | struct nfs_seqid *seqid; | 4194 | struct nfs_seqid *seqid; |
4195 | struct nfs4_lock_state *lsp; | 4195 | struct nfs4_lock_state *lsp; |
4196 | struct rpc_task *task; | 4196 | struct rpc_task *task; |
4197 | int status = 0; | 4197 | int status = 0; |
4198 | unsigned char fl_flags = request->fl_flags; | 4198 | unsigned char fl_flags = request->fl_flags; |
4199 | 4199 | ||
4200 | status = nfs4_set_lock_state(state, request); | 4200 | status = nfs4_set_lock_state(state, request); |
4201 | /* Unlock _before_ we do the RPC call */ | 4201 | /* Unlock _before_ we do the RPC call */ |
4202 | request->fl_flags |= FL_EXISTS; | 4202 | request->fl_flags |= FL_EXISTS; |
4203 | down_read(&nfsi->rwsem); | 4203 | down_read(&nfsi->rwsem); |
4204 | if (do_vfs_lock(request->fl_file, request) == -ENOENT) { | 4204 | if (do_vfs_lock(request->fl_file, request) == -ENOENT) { |
4205 | up_read(&nfsi->rwsem); | 4205 | up_read(&nfsi->rwsem); |
4206 | goto out; | 4206 | goto out; |
4207 | } | 4207 | } |
4208 | up_read(&nfsi->rwsem); | 4208 | up_read(&nfsi->rwsem); |
4209 | if (status != 0) | 4209 | if (status != 0) |
4210 | goto out; | 4210 | goto out; |
4211 | /* Is this a delegated lock? */ | 4211 | /* Is this a delegated lock? */ |
4212 | if (test_bit(NFS_DELEGATED_STATE, &state->flags)) | 4212 | if (test_bit(NFS_DELEGATED_STATE, &state->flags)) |
4213 | goto out; | 4213 | goto out; |
4214 | lsp = request->fl_u.nfs4_fl.owner; | 4214 | lsp = request->fl_u.nfs4_fl.owner; |
4215 | seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL); | 4215 | seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL); |
4216 | status = -ENOMEM; | 4216 | status = -ENOMEM; |
4217 | if (seqid == NULL) | 4217 | if (seqid == NULL) |
4218 | goto out; | 4218 | goto out; |
4219 | task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid); | 4219 | task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid); |
4220 | status = PTR_ERR(task); | 4220 | status = PTR_ERR(task); |
4221 | if (IS_ERR(task)) | 4221 | if (IS_ERR(task)) |
4222 | goto out; | 4222 | goto out; |
4223 | status = nfs4_wait_for_completion_rpc_task(task); | 4223 | status = nfs4_wait_for_completion_rpc_task(task); |
4224 | rpc_put_task(task); | 4224 | rpc_put_task(task); |
4225 | out: | 4225 | out: |
4226 | request->fl_flags = fl_flags; | 4226 | request->fl_flags = fl_flags; |
4227 | return status; | 4227 | return status; |
4228 | } | 4228 | } |
4229 | 4229 | ||
4230 | struct nfs4_lockdata { | 4230 | struct nfs4_lockdata { |
4231 | struct nfs_lock_args arg; | 4231 | struct nfs_lock_args arg; |
4232 | struct nfs_lock_res res; | 4232 | struct nfs_lock_res res; |
4233 | struct nfs4_lock_state *lsp; | 4233 | struct nfs4_lock_state *lsp; |
4234 | struct nfs_open_context *ctx; | 4234 | struct nfs_open_context *ctx; |
4235 | struct file_lock fl; | 4235 | struct file_lock fl; |
4236 | unsigned long timestamp; | 4236 | unsigned long timestamp; |
4237 | int rpc_status; | 4237 | int rpc_status; |
4238 | int cancelled; | 4238 | int cancelled; |
4239 | struct nfs_server *server; | 4239 | struct nfs_server *server; |
4240 | }; | 4240 | }; |
4241 | 4241 | ||
4242 | static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, | 4242 | static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, |
4243 | struct nfs_open_context *ctx, struct nfs4_lock_state *lsp, | 4243 | struct nfs_open_context *ctx, struct nfs4_lock_state *lsp, |
4244 | gfp_t gfp_mask) | 4244 | gfp_t gfp_mask) |
4245 | { | 4245 | { |
4246 | struct nfs4_lockdata *p; | 4246 | struct nfs4_lockdata *p; |
4247 | struct inode *inode = lsp->ls_state->inode; | 4247 | struct inode *inode = lsp->ls_state->inode; |
4248 | struct nfs_server *server = NFS_SERVER(inode); | 4248 | struct nfs_server *server = NFS_SERVER(inode); |
4249 | 4249 | ||
4250 | p = kzalloc(sizeof(*p), gfp_mask); | 4250 | p = kzalloc(sizeof(*p), gfp_mask); |
4251 | if (p == NULL) | 4251 | if (p == NULL) |
4252 | return NULL; | 4252 | return NULL; |
4253 | 4253 | ||
4254 | p->arg.fh = NFS_FH(inode); | 4254 | p->arg.fh = NFS_FH(inode); |
4255 | p->arg.fl = &p->fl; | 4255 | p->arg.fl = &p->fl; |
4256 | p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask); | 4256 | p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask); |
4257 | if (p->arg.open_seqid == NULL) | 4257 | if (p->arg.open_seqid == NULL) |
4258 | goto out_free; | 4258 | goto out_free; |
4259 | p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid, gfp_mask); | 4259 | p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid, gfp_mask); |
4260 | if (p->arg.lock_seqid == NULL) | 4260 | if (p->arg.lock_seqid == NULL) |
4261 | goto out_free_seqid; | 4261 | goto out_free_seqid; |
4262 | p->arg.lock_stateid = &lsp->ls_stateid; | 4262 | p->arg.lock_stateid = &lsp->ls_stateid; |
4263 | p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; | 4263 | p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; |
4264 | p->arg.lock_owner.id = lsp->ls_id.id; | 4264 | p->arg.lock_owner.id = lsp->ls_id.id; |
4265 | p->arg.lock_owner.s_dev = server->s_dev; | 4265 | p->arg.lock_owner.s_dev = server->s_dev; |
4266 | p->res.lock_seqid = p->arg.lock_seqid; | 4266 | p->res.lock_seqid = p->arg.lock_seqid; |
4267 | p->lsp = lsp; | 4267 | p->lsp = lsp; |
4268 | p->server = server; | 4268 | p->server = server; |
4269 | atomic_inc(&lsp->ls_count); | 4269 | atomic_inc(&lsp->ls_count); |
4270 | p->ctx = get_nfs_open_context(ctx); | 4270 | p->ctx = get_nfs_open_context(ctx); |
4271 | memcpy(&p->fl, fl, sizeof(p->fl)); | 4271 | memcpy(&p->fl, fl, sizeof(p->fl)); |
4272 | return p; | 4272 | return p; |
4273 | out_free_seqid: | 4273 | out_free_seqid: |
4274 | nfs_free_seqid(p->arg.open_seqid); | 4274 | nfs_free_seqid(p->arg.open_seqid); |
4275 | out_free: | 4275 | out_free: |
4276 | kfree(p); | 4276 | kfree(p); |
4277 | return NULL; | 4277 | return NULL; |
4278 | } | 4278 | } |
4279 | 4279 | ||
4280 | static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) | 4280 | static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) |
4281 | { | 4281 | { |
4282 | struct nfs4_lockdata *data = calldata; | 4282 | struct nfs4_lockdata *data = calldata; |
4283 | struct nfs4_state *state = data->lsp->ls_state; | 4283 | struct nfs4_state *state = data->lsp->ls_state; |
4284 | 4284 | ||
4285 | dprintk("%s: begin!\n", __func__); | 4285 | dprintk("%s: begin!\n", __func__); |
4286 | if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0) | 4286 | if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0) |
4287 | return; | 4287 | return; |
4288 | /* Do we need to do an open_to_lock_owner? */ | 4288 | /* Do we need to do an open_to_lock_owner? */ |
4289 | if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) { | 4289 | if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) { |
4290 | if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) | 4290 | if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) |
4291 | return; | 4291 | return; |
4292 | data->arg.open_stateid = &state->stateid; | 4292 | data->arg.open_stateid = &state->stateid; |
4293 | data->arg.new_lock_owner = 1; | 4293 | data->arg.new_lock_owner = 1; |
4294 | data->res.open_seqid = data->arg.open_seqid; | 4294 | data->res.open_seqid = data->arg.open_seqid; |
4295 | } else | 4295 | } else |
4296 | data->arg.new_lock_owner = 0; | 4296 | data->arg.new_lock_owner = 0; |
4297 | data->timestamp = jiffies; | 4297 | data->timestamp = jiffies; |
4298 | if (nfs4_setup_sequence(data->server, | 4298 | if (nfs4_setup_sequence(data->server, |
4299 | &data->arg.seq_args, | 4299 | &data->arg.seq_args, |
4300 | &data->res.seq_res, 1, task)) | 4300 | &data->res.seq_res, 1, task)) |
4301 | return; | 4301 | return; |
4302 | rpc_call_start(task); | 4302 | rpc_call_start(task); |
4303 | dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status); | 4303 | dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status); |
4304 | } | 4304 | } |
4305 | 4305 | ||
4306 | static void nfs4_recover_lock_prepare(struct rpc_task *task, void *calldata) | 4306 | static void nfs4_recover_lock_prepare(struct rpc_task *task, void *calldata) |
4307 | { | 4307 | { |
4308 | rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); | 4308 | rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); |
4309 | nfs4_lock_prepare(task, calldata); | 4309 | nfs4_lock_prepare(task, calldata); |
4310 | } | 4310 | } |
4311 | 4311 | ||
4312 | static void nfs4_lock_done(struct rpc_task *task, void *calldata) | 4312 | static void nfs4_lock_done(struct rpc_task *task, void *calldata) |
4313 | { | 4313 | { |
4314 | struct nfs4_lockdata *data = calldata; | 4314 | struct nfs4_lockdata *data = calldata; |
4315 | 4315 | ||
4316 | dprintk("%s: begin!\n", __func__); | 4316 | dprintk("%s: begin!\n", __func__); |
4317 | 4317 | ||
4318 | if (!nfs4_sequence_done(task, &data->res.seq_res)) | 4318 | if (!nfs4_sequence_done(task, &data->res.seq_res)) |
4319 | return; | 4319 | return; |
4320 | 4320 | ||
4321 | data->rpc_status = task->tk_status; | 4321 | data->rpc_status = task->tk_status; |
4322 | if (data->arg.new_lock_owner != 0) { | 4322 | if (data->arg.new_lock_owner != 0) { |
4323 | if (data->rpc_status == 0) | 4323 | if (data->rpc_status == 0) |
4324 | nfs_confirm_seqid(&data->lsp->ls_seqid, 0); | 4324 | nfs_confirm_seqid(&data->lsp->ls_seqid, 0); |
4325 | else | 4325 | else |
4326 | goto out; | 4326 | goto out; |
4327 | } | 4327 | } |
4328 | if (data->rpc_status == 0) { | 4328 | if (data->rpc_status == 0) { |
4329 | memcpy(data->lsp->ls_stateid.data, data->res.stateid.data, | 4329 | memcpy(data->lsp->ls_stateid.data, data->res.stateid.data, |
4330 | sizeof(data->lsp->ls_stateid.data)); | 4330 | sizeof(data->lsp->ls_stateid.data)); |
4331 | data->lsp->ls_flags |= NFS_LOCK_INITIALIZED; | 4331 | data->lsp->ls_flags |= NFS_LOCK_INITIALIZED; |
4332 | renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), data->timestamp); | 4332 | renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), data->timestamp); |
4333 | } | 4333 | } |
4334 | out: | 4334 | out: |
4335 | dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status); | 4335 | dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status); |
4336 | } | 4336 | } |
4337 | 4337 | ||
4338 | static void nfs4_lock_release(void *calldata) | 4338 | static void nfs4_lock_release(void *calldata) |
4339 | { | 4339 | { |
4340 | struct nfs4_lockdata *data = calldata; | 4340 | struct nfs4_lockdata *data = calldata; |
4341 | 4341 | ||
4342 | dprintk("%s: begin!\n", __func__); | 4342 | dprintk("%s: begin!\n", __func__); |
4343 | nfs_free_seqid(data->arg.open_seqid); | 4343 | nfs_free_seqid(data->arg.open_seqid); |
4344 | if (data->cancelled != 0) { | 4344 | if (data->cancelled != 0) { |
4345 | struct rpc_task *task; | 4345 | struct rpc_task *task; |
4346 | task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, | 4346 | task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, |
4347 | data->arg.lock_seqid); | 4347 | data->arg.lock_seqid); |
4348 | if (!IS_ERR(task)) | 4348 | if (!IS_ERR(task)) |
4349 | rpc_put_task_async(task); | 4349 | rpc_put_task_async(task); |
4350 | dprintk("%s: cancelling lock!\n", __func__); | 4350 | dprintk("%s: cancelling lock!\n", __func__); |
4351 | } else | 4351 | } else |
4352 | nfs_free_seqid(data->arg.lock_seqid); | 4352 | nfs_free_seqid(data->arg.lock_seqid); |
4353 | nfs4_put_lock_state(data->lsp); | 4353 | nfs4_put_lock_state(data->lsp); |
4354 | put_nfs_open_context(data->ctx); | 4354 | put_nfs_open_context(data->ctx); |
4355 | kfree(data); | 4355 | kfree(data); |
4356 | dprintk("%s: done!\n", __func__); | 4356 | dprintk("%s: done!\n", __func__); |
4357 | } | 4357 | } |
4358 | 4358 | ||
4359 | static const struct rpc_call_ops nfs4_lock_ops = { | 4359 | static const struct rpc_call_ops nfs4_lock_ops = { |
4360 | .rpc_call_prepare = nfs4_lock_prepare, | 4360 | .rpc_call_prepare = nfs4_lock_prepare, |
4361 | .rpc_call_done = nfs4_lock_done, | 4361 | .rpc_call_done = nfs4_lock_done, |
4362 | .rpc_release = nfs4_lock_release, | 4362 | .rpc_release = nfs4_lock_release, |
4363 | }; | 4363 | }; |
4364 | 4364 | ||
4365 | static const struct rpc_call_ops nfs4_recover_lock_ops = { | 4365 | static const struct rpc_call_ops nfs4_recover_lock_ops = { |
4366 | .rpc_call_prepare = nfs4_recover_lock_prepare, | 4366 | .rpc_call_prepare = nfs4_recover_lock_prepare, |
4367 | .rpc_call_done = nfs4_lock_done, | 4367 | .rpc_call_done = nfs4_lock_done, |
4368 | .rpc_release = nfs4_lock_release, | 4368 | .rpc_release = nfs4_lock_release, |
4369 | }; | 4369 | }; |
4370 | 4370 | ||
4371 | static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) | 4371 | static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) |
4372 | { | 4372 | { |
4373 | switch (error) { | 4373 | switch (error) { |
4374 | case -NFS4ERR_ADMIN_REVOKED: | 4374 | case -NFS4ERR_ADMIN_REVOKED: |
4375 | case -NFS4ERR_BAD_STATEID: | 4375 | case -NFS4ERR_BAD_STATEID: |
4376 | lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; | 4376 | lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; |
4377 | if (new_lock_owner != 0 || | 4377 | if (new_lock_owner != 0 || |
4378 | (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) | 4378 | (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) |
4379 | nfs4_schedule_stateid_recovery(server, lsp->ls_state); | 4379 | nfs4_schedule_stateid_recovery(server, lsp->ls_state); |
4380 | break; | 4380 | break; |
4381 | case -NFS4ERR_STALE_STATEID: | 4381 | case -NFS4ERR_STALE_STATEID: |
4382 | lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; | 4382 | lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; |
4383 | case -NFS4ERR_EXPIRED: | 4383 | case -NFS4ERR_EXPIRED: |
4384 | nfs4_schedule_lease_recovery(server->nfs_client); | 4384 | nfs4_schedule_lease_recovery(server->nfs_client); |
4385 | }; | 4385 | }; |
4386 | } | 4386 | } |
4387 | 4387 | ||
4388 | static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type) | 4388 | static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type) |
4389 | { | 4389 | { |
4390 | struct nfs4_lockdata *data; | 4390 | struct nfs4_lockdata *data; |
4391 | struct rpc_task *task; | 4391 | struct rpc_task *task; |
4392 | struct rpc_message msg = { | 4392 | struct rpc_message msg = { |
4393 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK], | 4393 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK], |
4394 | .rpc_cred = state->owner->so_cred, | 4394 | .rpc_cred = state->owner->so_cred, |
4395 | }; | 4395 | }; |
4396 | struct rpc_task_setup task_setup_data = { | 4396 | struct rpc_task_setup task_setup_data = { |
4397 | .rpc_client = NFS_CLIENT(state->inode), | 4397 | .rpc_client = NFS_CLIENT(state->inode), |
4398 | .rpc_message = &msg, | 4398 | .rpc_message = &msg, |
4399 | .callback_ops = &nfs4_lock_ops, | 4399 | .callback_ops = &nfs4_lock_ops, |
4400 | .workqueue = nfsiod_workqueue, | 4400 | .workqueue = nfsiod_workqueue, |
4401 | .flags = RPC_TASK_ASYNC, | 4401 | .flags = RPC_TASK_ASYNC, |
4402 | }; | 4402 | }; |
4403 | int ret; | 4403 | int ret; |
4404 | 4404 | ||
4405 | dprintk("%s: begin!\n", __func__); | 4405 | dprintk("%s: begin!\n", __func__); |
4406 | data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file), | 4406 | data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file), |
4407 | fl->fl_u.nfs4_fl.owner, | 4407 | fl->fl_u.nfs4_fl.owner, |
4408 | recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS); | 4408 | recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS); |
4409 | if (data == NULL) | 4409 | if (data == NULL) |
4410 | return -ENOMEM; | 4410 | return -ENOMEM; |
4411 | if (IS_SETLKW(cmd)) | 4411 | if (IS_SETLKW(cmd)) |
4412 | data->arg.block = 1; | 4412 | data->arg.block = 1; |
4413 | if (recovery_type > NFS_LOCK_NEW) { | 4413 | if (recovery_type > NFS_LOCK_NEW) { |
4414 | if (recovery_type == NFS_LOCK_RECLAIM) | 4414 | if (recovery_type == NFS_LOCK_RECLAIM) |
4415 | data->arg.reclaim = NFS_LOCK_RECLAIM; | 4415 | data->arg.reclaim = NFS_LOCK_RECLAIM; |
4416 | task_setup_data.callback_ops = &nfs4_recover_lock_ops; | 4416 | task_setup_data.callback_ops = &nfs4_recover_lock_ops; |
4417 | } | 4417 | } |
4418 | msg.rpc_argp = &data->arg; | 4418 | msg.rpc_argp = &data->arg; |
4419 | msg.rpc_resp = &data->res; | 4419 | msg.rpc_resp = &data->res; |
4420 | task_setup_data.callback_data = data; | 4420 | task_setup_data.callback_data = data; |
4421 | task = rpc_run_task(&task_setup_data); | 4421 | task = rpc_run_task(&task_setup_data); |
4422 | if (IS_ERR(task)) | 4422 | if (IS_ERR(task)) |
4423 | return PTR_ERR(task); | 4423 | return PTR_ERR(task); |
4424 | ret = nfs4_wait_for_completion_rpc_task(task); | 4424 | ret = nfs4_wait_for_completion_rpc_task(task); |
4425 | if (ret == 0) { | 4425 | if (ret == 0) { |
4426 | ret = data->rpc_status; | 4426 | ret = data->rpc_status; |
4427 | if (ret) | 4427 | if (ret) |
4428 | nfs4_handle_setlk_error(data->server, data->lsp, | 4428 | nfs4_handle_setlk_error(data->server, data->lsp, |
4429 | data->arg.new_lock_owner, ret); | 4429 | data->arg.new_lock_owner, ret); |
4430 | } else | 4430 | } else |
4431 | data->cancelled = 1; | 4431 | data->cancelled = 1; |
4432 | rpc_put_task(task); | 4432 | rpc_put_task(task); |
4433 | dprintk("%s: done, ret = %d!\n", __func__, ret); | 4433 | dprintk("%s: done, ret = %d!\n", __func__, ret); |
4434 | return ret; | 4434 | return ret; |
4435 | } | 4435 | } |
4436 | 4436 | ||
4437 | static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request) | 4437 | static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request) |
4438 | { | 4438 | { |
4439 | struct nfs_server *server = NFS_SERVER(state->inode); | 4439 | struct nfs_server *server = NFS_SERVER(state->inode); |
4440 | struct nfs4_exception exception = { }; | 4440 | struct nfs4_exception exception = { }; |
4441 | int err; | 4441 | int err; |
4442 | 4442 | ||
4443 | do { | 4443 | do { |
4444 | /* Cache the lock if possible... */ | 4444 | /* Cache the lock if possible... */ |
4445 | if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) | 4445 | if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) |
4446 | return 0; | 4446 | return 0; |
4447 | err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM); | 4447 | err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM); |
4448 | if (err != -NFS4ERR_DELAY) | 4448 | if (err != -NFS4ERR_DELAY) |
4449 | break; | 4449 | break; |
4450 | nfs4_handle_exception(server, err, &exception); | 4450 | nfs4_handle_exception(server, err, &exception); |
4451 | } while (exception.retry); | 4451 | } while (exception.retry); |
4452 | return err; | 4452 | return err; |
4453 | } | 4453 | } |
4454 | 4454 | ||
4455 | static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request) | 4455 | static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request) |
4456 | { | 4456 | { |
4457 | struct nfs_server *server = NFS_SERVER(state->inode); | 4457 | struct nfs_server *server = NFS_SERVER(state->inode); |
4458 | struct nfs4_exception exception = { }; | 4458 | struct nfs4_exception exception = { }; |
4459 | int err; | 4459 | int err; |
4460 | 4460 | ||
4461 | err = nfs4_set_lock_state(state, request); | 4461 | err = nfs4_set_lock_state(state, request); |
4462 | if (err != 0) | 4462 | if (err != 0) |
4463 | return err; | 4463 | return err; |
4464 | do { | 4464 | do { |
4465 | if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) | 4465 | if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) |
4466 | return 0; | 4466 | return 0; |
4467 | err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED); | 4467 | err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED); |
4468 | switch (err) { | 4468 | switch (err) { |
4469 | default: | 4469 | default: |
4470 | goto out; | 4470 | goto out; |
4471 | case -NFS4ERR_GRACE: | 4471 | case -NFS4ERR_GRACE: |
4472 | case -NFS4ERR_DELAY: | 4472 | case -NFS4ERR_DELAY: |
4473 | nfs4_handle_exception(server, err, &exception); | 4473 | nfs4_handle_exception(server, err, &exception); |
4474 | err = 0; | 4474 | err = 0; |
4475 | } | 4475 | } |
4476 | } while (exception.retry); | 4476 | } while (exception.retry); |
4477 | out: | 4477 | out: |
4478 | return err; | 4478 | return err; |
4479 | } | 4479 | } |
4480 | 4480 | ||
4481 | #if defined(CONFIG_NFS_V4_1) | 4481 | #if defined(CONFIG_NFS_V4_1) |
4482 | static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request) | 4482 | static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request) |
4483 | { | 4483 | { |
4484 | int status; | 4484 | int status; |
4485 | struct nfs_server *server = NFS_SERVER(state->inode); | 4485 | struct nfs_server *server = NFS_SERVER(state->inode); |
4486 | 4486 | ||
4487 | status = nfs41_test_stateid(server, state); | 4487 | status = nfs41_test_stateid(server, state); |
4488 | if (status == NFS_OK) | 4488 | if (status == NFS_OK) |
4489 | return 0; | 4489 | return 0; |
4490 | nfs41_free_stateid(server, state); | 4490 | nfs41_free_stateid(server, state); |
4491 | return nfs4_lock_expired(state, request); | 4491 | return nfs4_lock_expired(state, request); |
4492 | } | 4492 | } |
4493 | #endif | 4493 | #endif |
4494 | 4494 | ||
4495 | static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) | 4495 | static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) |
4496 | { | 4496 | { |
4497 | struct nfs_inode *nfsi = NFS_I(state->inode); | 4497 | struct nfs_inode *nfsi = NFS_I(state->inode); |
4498 | unsigned char fl_flags = request->fl_flags; | 4498 | unsigned char fl_flags = request->fl_flags; |
4499 | int status = -ENOLCK; | 4499 | int status = -ENOLCK; |
4500 | 4500 | ||
4501 | if ((fl_flags & FL_POSIX) && | 4501 | if ((fl_flags & FL_POSIX) && |
4502 | !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags)) | 4502 | !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags)) |
4503 | goto out; | 4503 | goto out; |
4504 | /* Is this a delegated open? */ | 4504 | /* Is this a delegated open? */ |
4505 | status = nfs4_set_lock_state(state, request); | 4505 | status = nfs4_set_lock_state(state, request); |
4506 | if (status != 0) | 4506 | if (status != 0) |
4507 | goto out; | 4507 | goto out; |
4508 | request->fl_flags |= FL_ACCESS; | 4508 | request->fl_flags |= FL_ACCESS; |
4509 | status = do_vfs_lock(request->fl_file, request); | 4509 | status = do_vfs_lock(request->fl_file, request); |
4510 | if (status < 0) | 4510 | if (status < 0) |
4511 | goto out; | 4511 | goto out; |
4512 | down_read(&nfsi->rwsem); | 4512 | down_read(&nfsi->rwsem); |
4513 | if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { | 4513 | if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { |
4514 | /* Yes: cache locks! */ | 4514 | /* Yes: cache locks! */ |
4515 | /* ...but avoid races with delegation recall... */ | 4515 | /* ...but avoid races with delegation recall... */ |
4516 | request->fl_flags = fl_flags & ~FL_SLEEP; | 4516 | request->fl_flags = fl_flags & ~FL_SLEEP; |
4517 | status = do_vfs_lock(request->fl_file, request); | 4517 | status = do_vfs_lock(request->fl_file, request); |
4518 | goto out_unlock; | 4518 | goto out_unlock; |
4519 | } | 4519 | } |
4520 | status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW); | 4520 | status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW); |
4521 | if (status != 0) | 4521 | if (status != 0) |
4522 | goto out_unlock; | 4522 | goto out_unlock; |
4523 | /* Note: we always want to sleep here! */ | 4523 | /* Note: we always want to sleep here! */ |
4524 | request->fl_flags = fl_flags | FL_SLEEP; | 4524 | request->fl_flags = fl_flags | FL_SLEEP; |
4525 | if (do_vfs_lock(request->fl_file, request) < 0) | 4525 | if (do_vfs_lock(request->fl_file, request) < 0) |
4526 | printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __func__); | 4526 | printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __func__); |
4527 | out_unlock: | 4527 | out_unlock: |
4528 | up_read(&nfsi->rwsem); | 4528 | up_read(&nfsi->rwsem); |
4529 | out: | 4529 | out: |
4530 | request->fl_flags = fl_flags; | 4530 | request->fl_flags = fl_flags; |
4531 | return status; | 4531 | return status; |
4532 | } | 4532 | } |
4533 | 4533 | ||
4534 | static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) | 4534 | static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) |
4535 | { | 4535 | { |
4536 | struct nfs4_exception exception = { }; | 4536 | struct nfs4_exception exception = { }; |
4537 | int err; | 4537 | int err; |
4538 | 4538 | ||
4539 | do { | 4539 | do { |
4540 | err = _nfs4_proc_setlk(state, cmd, request); | 4540 | err = _nfs4_proc_setlk(state, cmd, request); |
4541 | if (err == -NFS4ERR_DENIED) | 4541 | if (err == -NFS4ERR_DENIED) |
4542 | err = -EAGAIN; | 4542 | err = -EAGAIN; |
4543 | err = nfs4_handle_exception(NFS_SERVER(state->inode), | 4543 | err = nfs4_handle_exception(NFS_SERVER(state->inode), |
4544 | err, &exception); | 4544 | err, &exception); |
4545 | } while (exception.retry); | 4545 | } while (exception.retry); |
4546 | return err; | 4546 | return err; |
4547 | } | 4547 | } |
4548 | 4548 | ||
4549 | static int | 4549 | static int |
4550 | nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request) | 4550 | nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request) |
4551 | { | 4551 | { |
4552 | struct nfs_open_context *ctx; | 4552 | struct nfs_open_context *ctx; |
4553 | struct nfs4_state *state; | 4553 | struct nfs4_state *state; |
4554 | unsigned long timeout = NFS4_LOCK_MINTIMEOUT; | 4554 | unsigned long timeout = NFS4_LOCK_MINTIMEOUT; |
4555 | int status; | 4555 | int status; |
4556 | 4556 | ||
4557 | /* verify open state */ | 4557 | /* verify open state */ |
4558 | ctx = nfs_file_open_context(filp); | 4558 | ctx = nfs_file_open_context(filp); |
4559 | state = ctx->state; | 4559 | state = ctx->state; |
4560 | 4560 | ||
4561 | if (request->fl_start < 0 || request->fl_end < 0) | 4561 | if (request->fl_start < 0 || request->fl_end < 0) |
4562 | return -EINVAL; | 4562 | return -EINVAL; |
4563 | 4563 | ||
4564 | if (IS_GETLK(cmd)) { | 4564 | if (IS_GETLK(cmd)) { |
4565 | if (state != NULL) | 4565 | if (state != NULL) |
4566 | return nfs4_proc_getlk(state, F_GETLK, request); | 4566 | return nfs4_proc_getlk(state, F_GETLK, request); |
4567 | return 0; | 4567 | return 0; |
4568 | } | 4568 | } |
4569 | 4569 | ||
4570 | if (!(IS_SETLK(cmd) || IS_SETLKW(cmd))) | 4570 | if (!(IS_SETLK(cmd) || IS_SETLKW(cmd))) |
4571 | return -EINVAL; | 4571 | return -EINVAL; |
4572 | 4572 | ||
4573 | if (request->fl_type == F_UNLCK) { | 4573 | if (request->fl_type == F_UNLCK) { |
4574 | if (state != NULL) | 4574 | if (state != NULL) |
4575 | return nfs4_proc_unlck(state, cmd, request); | 4575 | return nfs4_proc_unlck(state, cmd, request); |
4576 | return 0; | 4576 | return 0; |
4577 | } | 4577 | } |
4578 | 4578 | ||
4579 | if (state == NULL) | 4579 | if (state == NULL) |
4580 | return -ENOLCK; | 4580 | return -ENOLCK; |
4581 | do { | 4581 | do { |
4582 | status = nfs4_proc_setlk(state, cmd, request); | 4582 | status = nfs4_proc_setlk(state, cmd, request); |
4583 | if ((status != -EAGAIN) || IS_SETLK(cmd)) | 4583 | if ((status != -EAGAIN) || IS_SETLK(cmd)) |
4584 | break; | 4584 | break; |
4585 | timeout = nfs4_set_lock_task_retry(timeout); | 4585 | timeout = nfs4_set_lock_task_retry(timeout); |
4586 | status = -ERESTARTSYS; | 4586 | status = -ERESTARTSYS; |
4587 | if (signalled()) | 4587 | if (signalled()) |
4588 | break; | 4588 | break; |
4589 | } while(status < 0); | 4589 | } while(status < 0); |
4590 | return status; | 4590 | return status; |
4591 | } | 4591 | } |
4592 | 4592 | ||
4593 | int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl) | 4593 | int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl) |
4594 | { | 4594 | { |
4595 | struct nfs_server *server = NFS_SERVER(state->inode); | 4595 | struct nfs_server *server = NFS_SERVER(state->inode); |
4596 | struct nfs4_exception exception = { }; | 4596 | struct nfs4_exception exception = { }; |
4597 | int err; | 4597 | int err; |
4598 | 4598 | ||
4599 | err = nfs4_set_lock_state(state, fl); | 4599 | err = nfs4_set_lock_state(state, fl); |
4600 | if (err != 0) | 4600 | if (err != 0) |
4601 | goto out; | 4601 | goto out; |
4602 | do { | 4602 | do { |
4603 | err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); | 4603 | err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); |
4604 | switch (err) { | 4604 | switch (err) { |
4605 | default: | 4605 | default: |
4606 | printk(KERN_ERR "%s: unhandled error %d.\n", | 4606 | printk(KERN_ERR "%s: unhandled error %d.\n", |
4607 | __func__, err); | 4607 | __func__, err); |
4608 | case 0: | 4608 | case 0: |
4609 | case -ESTALE: | 4609 | case -ESTALE: |
4610 | goto out; | 4610 | goto out; |
4611 | case -NFS4ERR_EXPIRED: | 4611 | case -NFS4ERR_EXPIRED: |
4612 | nfs4_schedule_stateid_recovery(server, state); | 4612 | nfs4_schedule_stateid_recovery(server, state); |
4613 | case -NFS4ERR_STALE_CLIENTID: | 4613 | case -NFS4ERR_STALE_CLIENTID: |
4614 | case -NFS4ERR_STALE_STATEID: | 4614 | case -NFS4ERR_STALE_STATEID: |
4615 | nfs4_schedule_lease_recovery(server->nfs_client); | 4615 | nfs4_schedule_lease_recovery(server->nfs_client); |
4616 | goto out; | 4616 | goto out; |
4617 | case -NFS4ERR_BADSESSION: | 4617 | case -NFS4ERR_BADSESSION: |
4618 | case -NFS4ERR_BADSLOT: | 4618 | case -NFS4ERR_BADSLOT: |
4619 | case -NFS4ERR_BAD_HIGH_SLOT: | 4619 | case -NFS4ERR_BAD_HIGH_SLOT: |
4620 | case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: | 4620 | case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: |
4621 | case -NFS4ERR_DEADSESSION: | 4621 | case -NFS4ERR_DEADSESSION: |
4622 | nfs4_schedule_session_recovery(server->nfs_client->cl_session); | 4622 | nfs4_schedule_session_recovery(server->nfs_client->cl_session); |
4623 | goto out; | 4623 | goto out; |
4624 | case -ERESTARTSYS: | 4624 | case -ERESTARTSYS: |
4625 | /* | 4625 | /* |
4626 | * The show must go on: exit, but mark the | 4626 | * The show must go on: exit, but mark the |
4627 | * stateid as needing recovery. | 4627 | * stateid as needing recovery. |
4628 | */ | 4628 | */ |
4629 | case -NFS4ERR_ADMIN_REVOKED: | 4629 | case -NFS4ERR_ADMIN_REVOKED: |
4630 | case -NFS4ERR_BAD_STATEID: | 4630 | case -NFS4ERR_BAD_STATEID: |
4631 | case -NFS4ERR_OPENMODE: | 4631 | case -NFS4ERR_OPENMODE: |
4632 | nfs4_schedule_stateid_recovery(server, state); | 4632 | nfs4_schedule_stateid_recovery(server, state); |
4633 | err = 0; | 4633 | err = 0; |
4634 | goto out; | 4634 | goto out; |
4635 | case -EKEYEXPIRED: | 4635 | case -EKEYEXPIRED: |
4636 | /* | 4636 | /* |
4637 | * User RPCSEC_GSS context has expired. | 4637 | * User RPCSEC_GSS context has expired. |
4638 | * We cannot recover this stateid now, so | 4638 | * We cannot recover this stateid now, so |
4639 | * skip it and allow recovery thread to | 4639 | * skip it and allow recovery thread to |
4640 | * proceed. | 4640 | * proceed. |
4641 | */ | 4641 | */ |
4642 | err = 0; | 4642 | err = 0; |
4643 | goto out; | 4643 | goto out; |
4644 | case -ENOMEM: | 4644 | case -ENOMEM: |
4645 | case -NFS4ERR_DENIED: | 4645 | case -NFS4ERR_DENIED: |
4646 | /* kill_proc(fl->fl_pid, SIGLOST, 1); */ | 4646 | /* kill_proc(fl->fl_pid, SIGLOST, 1); */ |
4647 | err = 0; | 4647 | err = 0; |
4648 | goto out; | 4648 | goto out; |
4649 | case -NFS4ERR_DELAY: | 4649 | case -NFS4ERR_DELAY: |
4650 | break; | 4650 | break; |
4651 | } | 4651 | } |
4652 | err = nfs4_handle_exception(server, err, &exception); | 4652 | err = nfs4_handle_exception(server, err, &exception); |
4653 | } while (exception.retry); | 4653 | } while (exception.retry); |
4654 | out: | 4654 | out: |
4655 | return err; | 4655 | return err; |
4656 | } | 4656 | } |
4657 | 4657 | ||
4658 | static void nfs4_release_lockowner_release(void *calldata) | 4658 | static void nfs4_release_lockowner_release(void *calldata) |
4659 | { | 4659 | { |
4660 | kfree(calldata); | 4660 | kfree(calldata); |
4661 | } | 4661 | } |
4662 | 4662 | ||
4663 | const struct rpc_call_ops nfs4_release_lockowner_ops = { | 4663 | const struct rpc_call_ops nfs4_release_lockowner_ops = { |
4664 | .rpc_release = nfs4_release_lockowner_release, | 4664 | .rpc_release = nfs4_release_lockowner_release, |
4665 | }; | 4665 | }; |
4666 | 4666 | ||
4667 | void nfs4_release_lockowner(const struct nfs4_lock_state *lsp) | 4667 | void nfs4_release_lockowner(const struct nfs4_lock_state *lsp) |
4668 | { | 4668 | { |
4669 | struct nfs_server *server = lsp->ls_state->owner->so_server; | 4669 | struct nfs_server *server = lsp->ls_state->owner->so_server; |
4670 | struct nfs_release_lockowner_args *args; | 4670 | struct nfs_release_lockowner_args *args; |
4671 | struct rpc_message msg = { | 4671 | struct rpc_message msg = { |
4672 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER], | 4672 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER], |
4673 | }; | 4673 | }; |
4674 | 4674 | ||
4675 | if (server->nfs_client->cl_mvops->minor_version != 0) | 4675 | if (server->nfs_client->cl_mvops->minor_version != 0) |
4676 | return; | 4676 | return; |
4677 | args = kmalloc(sizeof(*args), GFP_NOFS); | 4677 | args = kmalloc(sizeof(*args), GFP_NOFS); |
4678 | if (!args) | 4678 | if (!args) |
4679 | return; | 4679 | return; |
4680 | args->lock_owner.clientid = server->nfs_client->cl_clientid; | 4680 | args->lock_owner.clientid = server->nfs_client->cl_clientid; |
4681 | args->lock_owner.id = lsp->ls_id.id; | 4681 | args->lock_owner.id = lsp->ls_id.id; |
4682 | args->lock_owner.s_dev = server->s_dev; | 4682 | args->lock_owner.s_dev = server->s_dev; |
4683 | msg.rpc_argp = args; | 4683 | msg.rpc_argp = args; |
4684 | rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, args); | 4684 | rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, args); |
4685 | } | 4685 | } |
4686 | 4686 | ||
4687 | #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl" | 4687 | #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl" |
4688 | 4688 | ||
4689 | static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key, | 4689 | static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key, |
4690 | const void *buf, size_t buflen, | 4690 | const void *buf, size_t buflen, |
4691 | int flags, int type) | 4691 | int flags, int type) |
4692 | { | 4692 | { |
4693 | if (strcmp(key, "") != 0) | 4693 | if (strcmp(key, "") != 0) |
4694 | return -EINVAL; | 4694 | return -EINVAL; |
4695 | 4695 | ||
4696 | return nfs4_proc_set_acl(dentry->d_inode, buf, buflen); | 4696 | return nfs4_proc_set_acl(dentry->d_inode, buf, buflen); |
4697 | } | 4697 | } |
4698 | 4698 | ||
4699 | static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key, | 4699 | static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key, |
4700 | void *buf, size_t buflen, int type) | 4700 | void *buf, size_t buflen, int type) |
4701 | { | 4701 | { |
4702 | if (strcmp(key, "") != 0) | 4702 | if (strcmp(key, "") != 0) |
4703 | return -EINVAL; | 4703 | return -EINVAL; |
4704 | 4704 | ||
4705 | return nfs4_proc_get_acl(dentry->d_inode, buf, buflen); | 4705 | return nfs4_proc_get_acl(dentry->d_inode, buf, buflen); |
4706 | } | 4706 | } |
4707 | 4707 | ||
4708 | static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list, | 4708 | static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list, |
4709 | size_t list_len, const char *name, | 4709 | size_t list_len, const char *name, |
4710 | size_t name_len, int type) | 4710 | size_t name_len, int type) |
4711 | { | 4711 | { |
4712 | size_t len = sizeof(XATTR_NAME_NFSV4_ACL); | 4712 | size_t len = sizeof(XATTR_NAME_NFSV4_ACL); |
4713 | 4713 | ||
4714 | if (!nfs4_server_supports_acls(NFS_SERVER(dentry->d_inode))) | 4714 | if (!nfs4_server_supports_acls(NFS_SERVER(dentry->d_inode))) |
4715 | return 0; | 4715 | return 0; |
4716 | 4716 | ||
4717 | if (list && len <= list_len) | 4717 | if (list && len <= list_len) |
4718 | memcpy(list, XATTR_NAME_NFSV4_ACL, len); | 4718 | memcpy(list, XATTR_NAME_NFSV4_ACL, len); |
4719 | return len; | 4719 | return len; |
4720 | } | 4720 | } |
4721 | 4721 | ||
4722 | /* | 4722 | /* |
4723 | * nfs_fhget will use either the mounted_on_fileid or the fileid | 4723 | * nfs_fhget will use either the mounted_on_fileid or the fileid |
4724 | */ | 4724 | */ |
4725 | static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr) | 4725 | static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr) |
4726 | { | 4726 | { |
4727 | if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) || | 4727 | if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) || |
4728 | (fattr->valid & NFS_ATTR_FATTR_FILEID)) && | 4728 | (fattr->valid & NFS_ATTR_FATTR_FILEID)) && |
4729 | (fattr->valid & NFS_ATTR_FATTR_FSID) && | 4729 | (fattr->valid & NFS_ATTR_FATTR_FSID) && |
4730 | (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL))) | 4730 | (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL))) |
4731 | return; | 4731 | return; |
4732 | 4732 | ||
4733 | fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | | 4733 | fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | |
4734 | NFS_ATTR_FATTR_NLINK; | 4734 | NFS_ATTR_FATTR_NLINK; |
4735 | fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; | 4735 | fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; |
4736 | fattr->nlink = 2; | 4736 | fattr->nlink = 2; |
4737 | } | 4737 | } |
4738 | 4738 | ||
4739 | int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name, | 4739 | int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name, |
4740 | struct nfs4_fs_locations *fs_locations, struct page *page) | 4740 | struct nfs4_fs_locations *fs_locations, struct page *page) |
4741 | { | 4741 | { |
4742 | struct nfs_server *server = NFS_SERVER(dir); | 4742 | struct nfs_server *server = NFS_SERVER(dir); |
4743 | u32 bitmask[2] = { | 4743 | u32 bitmask[2] = { |
4744 | [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, | 4744 | [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, |
4745 | }; | 4745 | }; |
4746 | struct nfs4_fs_locations_arg args = { | 4746 | struct nfs4_fs_locations_arg args = { |
4747 | .dir_fh = NFS_FH(dir), | 4747 | .dir_fh = NFS_FH(dir), |
4748 | .name = name, | 4748 | .name = name, |
4749 | .page = page, | 4749 | .page = page, |
4750 | .bitmask = bitmask, | 4750 | .bitmask = bitmask, |
4751 | }; | 4751 | }; |
4752 | struct nfs4_fs_locations_res res = { | 4752 | struct nfs4_fs_locations_res res = { |
4753 | .fs_locations = fs_locations, | 4753 | .fs_locations = fs_locations, |
4754 | }; | 4754 | }; |
4755 | struct rpc_message msg = { | 4755 | struct rpc_message msg = { |
4756 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], | 4756 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], |
4757 | .rpc_argp = &args, | 4757 | .rpc_argp = &args, |
4758 | .rpc_resp = &res, | 4758 | .rpc_resp = &res, |
4759 | }; | 4759 | }; |
4760 | int status; | 4760 | int status; |
4761 | 4761 | ||
4762 | dprintk("%s: start\n", __func__); | 4762 | dprintk("%s: start\n", __func__); |
4763 | 4763 | ||
4764 | /* Ask for the fileid of the absent filesystem if mounted_on_fileid | 4764 | /* Ask for the fileid of the absent filesystem if mounted_on_fileid |
4765 | * is not supported */ | 4765 | * is not supported */ |
4766 | if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID) | 4766 | if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID) |
4767 | bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID; | 4767 | bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID; |
4768 | else | 4768 | else |
4769 | bitmask[0] |= FATTR4_WORD0_FILEID; | 4769 | bitmask[0] |= FATTR4_WORD0_FILEID; |
4770 | 4770 | ||
4771 | nfs_fattr_init(&fs_locations->fattr); | 4771 | nfs_fattr_init(&fs_locations->fattr); |
4772 | fs_locations->server = server; | 4772 | fs_locations->server = server; |
4773 | fs_locations->nlocations = 0; | 4773 | fs_locations->nlocations = 0; |
4774 | status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); | 4774 | status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); |
4775 | dprintk("%s: returned status = %d\n", __func__, status); | 4775 | dprintk("%s: returned status = %d\n", __func__, status); |
4776 | return status; | 4776 | return status; |
4777 | } | 4777 | } |
4778 | 4778 | ||
4779 | static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors) | 4779 | static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors) |
4780 | { | 4780 | { |
4781 | int status; | 4781 | int status; |
4782 | struct nfs4_secinfo_arg args = { | 4782 | struct nfs4_secinfo_arg args = { |
4783 | .dir_fh = NFS_FH(dir), | 4783 | .dir_fh = NFS_FH(dir), |
4784 | .name = name, | 4784 | .name = name, |
4785 | }; | 4785 | }; |
4786 | struct nfs4_secinfo_res res = { | 4786 | struct nfs4_secinfo_res res = { |
4787 | .flavors = flavors, | 4787 | .flavors = flavors, |
4788 | }; | 4788 | }; |
4789 | struct rpc_message msg = { | 4789 | struct rpc_message msg = { |
4790 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO], | 4790 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO], |
4791 | .rpc_argp = &args, | 4791 | .rpc_argp = &args, |
4792 | .rpc_resp = &res, | 4792 | .rpc_resp = &res, |
4793 | }; | 4793 | }; |
4794 | 4794 | ||
4795 | dprintk("NFS call secinfo %s\n", name->name); | 4795 | dprintk("NFS call secinfo %s\n", name->name); |
4796 | status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0); | 4796 | status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0); |
4797 | dprintk("NFS reply secinfo: %d\n", status); | 4797 | dprintk("NFS reply secinfo: %d\n", status); |
4798 | return status; | 4798 | return status; |
4799 | } | 4799 | } |
4800 | 4800 | ||
4801 | int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors) | 4801 | int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors) |
4802 | { | 4802 | { |
4803 | struct nfs4_exception exception = { }; | 4803 | struct nfs4_exception exception = { }; |
4804 | int err; | 4804 | int err; |
4805 | do { | 4805 | do { |
4806 | err = nfs4_handle_exception(NFS_SERVER(dir), | 4806 | err = nfs4_handle_exception(NFS_SERVER(dir), |
4807 | _nfs4_proc_secinfo(dir, name, flavors), | 4807 | _nfs4_proc_secinfo(dir, name, flavors), |
4808 | &exception); | 4808 | &exception); |
4809 | } while (exception.retry); | 4809 | } while (exception.retry); |
4810 | return err; | 4810 | return err; |
4811 | } | 4811 | } |
4812 | 4812 | ||
4813 | #ifdef CONFIG_NFS_V4_1 | 4813 | #ifdef CONFIG_NFS_V4_1 |
4814 | /* | 4814 | /* |
4815 | * Check the exchange flags returned by the server for invalid flags, having | 4815 | * Check the exchange flags returned by the server for invalid flags, having |
4816 | * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or | 4816 | * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or |
4817 | * DS flags set. | 4817 | * DS flags set. |
4818 | */ | 4818 | */ |
4819 | static int nfs4_check_cl_exchange_flags(u32 flags) | 4819 | static int nfs4_check_cl_exchange_flags(u32 flags) |
4820 | { | 4820 | { |
4821 | if (flags & ~EXCHGID4_FLAG_MASK_R) | 4821 | if (flags & ~EXCHGID4_FLAG_MASK_R) |
4822 | goto out_inval; | 4822 | goto out_inval; |
4823 | if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) && | 4823 | if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) && |
4824 | (flags & EXCHGID4_FLAG_USE_NON_PNFS)) | 4824 | (flags & EXCHGID4_FLAG_USE_NON_PNFS)) |
4825 | goto out_inval; | 4825 | goto out_inval; |
4826 | if (!(flags & (EXCHGID4_FLAG_MASK_PNFS))) | 4826 | if (!(flags & (EXCHGID4_FLAG_MASK_PNFS))) |
4827 | goto out_inval; | 4827 | goto out_inval; |
4828 | return NFS_OK; | 4828 | return NFS_OK; |
4829 | out_inval: | 4829 | out_inval: |
4830 | return -NFS4ERR_INVAL; | 4830 | return -NFS4ERR_INVAL; |
4831 | } | 4831 | } |
4832 | 4832 | ||
4833 | static bool | 4833 | static bool |
4834 | nfs41_same_server_scope(struct server_scope *a, struct server_scope *b) | 4834 | nfs41_same_server_scope(struct server_scope *a, struct server_scope *b) |
4835 | { | 4835 | { |
4836 | if (a->server_scope_sz == b->server_scope_sz && | 4836 | if (a->server_scope_sz == b->server_scope_sz && |
4837 | memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0) | 4837 | memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0) |
4838 | return true; | 4838 | return true; |
4839 | 4839 | ||
4840 | return false; | 4840 | return false; |
4841 | } | 4841 | } |
4842 | 4842 | ||
4843 | /* | 4843 | /* |
4844 | * nfs4_proc_exchange_id() | 4844 | * nfs4_proc_exchange_id() |
4845 | * | 4845 | * |
4846 | * Since the clientid has expired, all compounds using sessions | 4846 | * Since the clientid has expired, all compounds using sessions |
4847 | * associated with the stale clientid will be returning | 4847 | * associated with the stale clientid will be returning |
4848 | * NFS4ERR_BADSESSION in the sequence operation, and will therefore | 4848 | * NFS4ERR_BADSESSION in the sequence operation, and will therefore |
4849 | * be in some phase of session reset. | 4849 | * be in some phase of session reset. |
4850 | */ | 4850 | */ |
4851 | int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred) | 4851 | int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred) |
4852 | { | 4852 | { |
4853 | nfs4_verifier verifier; | 4853 | nfs4_verifier verifier; |
4854 | struct nfs41_exchange_id_args args = { | 4854 | struct nfs41_exchange_id_args args = { |
4855 | .client = clp, | 4855 | .client = clp, |
4856 | .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER, | 4856 | .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER, |
4857 | }; | 4857 | }; |
4858 | struct nfs41_exchange_id_res res = { | 4858 | struct nfs41_exchange_id_res res = { |
4859 | .client = clp, | 4859 | .client = clp, |
4860 | }; | 4860 | }; |
4861 | int status; | 4861 | int status; |
4862 | struct rpc_message msg = { | 4862 | struct rpc_message msg = { |
4863 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID], | 4863 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID], |
4864 | .rpc_argp = &args, | 4864 | .rpc_argp = &args, |
4865 | .rpc_resp = &res, | 4865 | .rpc_resp = &res, |
4866 | .rpc_cred = cred, | 4866 | .rpc_cred = cred, |
4867 | }; | 4867 | }; |
4868 | __be32 *p; | 4868 | __be32 *p; |
4869 | 4869 | ||
4870 | dprintk("--> %s\n", __func__); | 4870 | dprintk("--> %s\n", __func__); |
4871 | BUG_ON(clp == NULL); | 4871 | BUG_ON(clp == NULL); |
4872 | 4872 | ||
4873 | p = (u32 *)verifier.data; | 4873 | p = (u32 *)verifier.data; |
4874 | *p++ = htonl((u32)clp->cl_boot_time.tv_sec); | 4874 | *p++ = htonl((u32)clp->cl_boot_time.tv_sec); |
4875 | *p = htonl((u32)clp->cl_boot_time.tv_nsec); | 4875 | *p = htonl((u32)clp->cl_boot_time.tv_nsec); |
4876 | args.verifier = &verifier; | 4876 | args.verifier = &verifier; |
4877 | 4877 | ||
4878 | args.id_len = scnprintf(args.id, sizeof(args.id), | 4878 | args.id_len = scnprintf(args.id, sizeof(args.id), |
4879 | "%s/%s.%s/%u", | 4879 | "%s/%s.%s/%u", |
4880 | clp->cl_ipaddr, | 4880 | clp->cl_ipaddr, |
4881 | init_utsname()->nodename, | 4881 | init_utsname()->nodename, |
4882 | init_utsname()->domainname, | 4882 | init_utsname()->domainname, |
4883 | clp->cl_rpcclient->cl_auth->au_flavor); | 4883 | clp->cl_rpcclient->cl_auth->au_flavor); |
4884 | 4884 | ||
4885 | res.server_scope = kzalloc(sizeof(struct server_scope), GFP_KERNEL); | 4885 | res.server_scope = kzalloc(sizeof(struct server_scope), GFP_KERNEL); |
4886 | if (unlikely(!res.server_scope)) | 4886 | if (unlikely(!res.server_scope)) |
4887 | return -ENOMEM; | 4887 | return -ENOMEM; |
4888 | 4888 | ||
4889 | status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); | 4889 | status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); |
4890 | if (!status) | 4890 | if (!status) |
4891 | status = nfs4_check_cl_exchange_flags(clp->cl_exchange_flags); | 4891 | status = nfs4_check_cl_exchange_flags(clp->cl_exchange_flags); |
4892 | 4892 | ||
4893 | if (!status) { | 4893 | if (!status) { |
4894 | if (clp->server_scope && | 4894 | if (clp->server_scope && |
4895 | !nfs41_same_server_scope(clp->server_scope, | 4895 | !nfs41_same_server_scope(clp->server_scope, |
4896 | res.server_scope)) { | 4896 | res.server_scope)) { |
4897 | dprintk("%s: server_scope mismatch detected\n", | 4897 | dprintk("%s: server_scope mismatch detected\n", |
4898 | __func__); | 4898 | __func__); |
4899 | set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state); | 4899 | set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state); |
4900 | kfree(clp->server_scope); | 4900 | kfree(clp->server_scope); |
4901 | clp->server_scope = NULL; | 4901 | clp->server_scope = NULL; |
4902 | } | 4902 | } |
4903 | 4903 | ||
4904 | if (!clp->server_scope) | 4904 | if (!clp->server_scope) |
4905 | clp->server_scope = res.server_scope; | 4905 | clp->server_scope = res.server_scope; |
4906 | else | 4906 | else |
4907 | kfree(res.server_scope); | 4907 | kfree(res.server_scope); |
4908 | } | 4908 | } |
4909 | 4909 | ||
4910 | dprintk("<-- %s status= %d\n", __func__, status); | 4910 | dprintk("<-- %s status= %d\n", __func__, status); |
4911 | return status; | 4911 | return status; |
4912 | } | 4912 | } |
4913 | 4913 | ||
4914 | struct nfs4_get_lease_time_data { | 4914 | struct nfs4_get_lease_time_data { |
4915 | struct nfs4_get_lease_time_args *args; | 4915 | struct nfs4_get_lease_time_args *args; |
4916 | struct nfs4_get_lease_time_res *res; | 4916 | struct nfs4_get_lease_time_res *res; |
4917 | struct nfs_client *clp; | 4917 | struct nfs_client *clp; |
4918 | }; | 4918 | }; |
4919 | 4919 | ||
4920 | static void nfs4_get_lease_time_prepare(struct rpc_task *task, | 4920 | static void nfs4_get_lease_time_prepare(struct rpc_task *task, |
4921 | void *calldata) | 4921 | void *calldata) |
4922 | { | 4922 | { |
4923 | int ret; | 4923 | int ret; |
4924 | struct nfs4_get_lease_time_data *data = | 4924 | struct nfs4_get_lease_time_data *data = |
4925 | (struct nfs4_get_lease_time_data *)calldata; | 4925 | (struct nfs4_get_lease_time_data *)calldata; |
4926 | 4926 | ||
4927 | dprintk("--> %s\n", __func__); | 4927 | dprintk("--> %s\n", __func__); |
4928 | rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); | 4928 | rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); |
4929 | /* just setup sequence, do not trigger session recovery | 4929 | /* just setup sequence, do not trigger session recovery |
4930 | since we're invoked within one */ | 4930 | since we're invoked within one */ |
4931 | ret = nfs41_setup_sequence(data->clp->cl_session, | 4931 | ret = nfs41_setup_sequence(data->clp->cl_session, |
4932 | &data->args->la_seq_args, | 4932 | &data->args->la_seq_args, |
4933 | &data->res->lr_seq_res, 0, task); | 4933 | &data->res->lr_seq_res, 0, task); |
4934 | 4934 | ||
4935 | BUG_ON(ret == -EAGAIN); | 4935 | BUG_ON(ret == -EAGAIN); |
4936 | rpc_call_start(task); | 4936 | rpc_call_start(task); |
4937 | dprintk("<-- %s\n", __func__); | 4937 | dprintk("<-- %s\n", __func__); |
4938 | } | 4938 | } |
4939 | 4939 | ||
4940 | /* | 4940 | /* |
4941 | * Called from nfs4_state_manager thread for session setup, so don't recover | 4941 | * Called from nfs4_state_manager thread for session setup, so don't recover |
4942 | * from sequence operation or clientid errors. | 4942 | * from sequence operation or clientid errors. |
4943 | */ | 4943 | */ |
4944 | static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata) | 4944 | static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata) |
4945 | { | 4945 | { |
4946 | struct nfs4_get_lease_time_data *data = | 4946 | struct nfs4_get_lease_time_data *data = |
4947 | (struct nfs4_get_lease_time_data *)calldata; | 4947 | (struct nfs4_get_lease_time_data *)calldata; |
4948 | 4948 | ||
4949 | dprintk("--> %s\n", __func__); | 4949 | dprintk("--> %s\n", __func__); |
4950 | if (!nfs41_sequence_done(task, &data->res->lr_seq_res)) | 4950 | if (!nfs41_sequence_done(task, &data->res->lr_seq_res)) |
4951 | return; | 4951 | return; |
4952 | switch (task->tk_status) { | 4952 | switch (task->tk_status) { |
4953 | case -NFS4ERR_DELAY: | 4953 | case -NFS4ERR_DELAY: |
4954 | case -NFS4ERR_GRACE: | 4954 | case -NFS4ERR_GRACE: |
4955 | dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status); | 4955 | dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status); |
4956 | rpc_delay(task, NFS4_POLL_RETRY_MIN); | 4956 | rpc_delay(task, NFS4_POLL_RETRY_MIN); |
4957 | task->tk_status = 0; | 4957 | task->tk_status = 0; |
4958 | /* fall through */ | 4958 | /* fall through */ |
4959 | case -NFS4ERR_RETRY_UNCACHED_REP: | 4959 | case -NFS4ERR_RETRY_UNCACHED_REP: |
4960 | rpc_restart_call_prepare(task); | 4960 | rpc_restart_call_prepare(task); |
4961 | return; | 4961 | return; |
4962 | } | 4962 | } |
4963 | dprintk("<-- %s\n", __func__); | 4963 | dprintk("<-- %s\n", __func__); |
4964 | } | 4964 | } |
4965 | 4965 | ||
4966 | struct rpc_call_ops nfs4_get_lease_time_ops = { | 4966 | struct rpc_call_ops nfs4_get_lease_time_ops = { |
4967 | .rpc_call_prepare = nfs4_get_lease_time_prepare, | 4967 | .rpc_call_prepare = nfs4_get_lease_time_prepare, |
4968 | .rpc_call_done = nfs4_get_lease_time_done, | 4968 | .rpc_call_done = nfs4_get_lease_time_done, |
4969 | }; | 4969 | }; |
4970 | 4970 | ||
4971 | int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo) | 4971 | int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo) |
4972 | { | 4972 | { |
4973 | struct rpc_task *task; | 4973 | struct rpc_task *task; |
4974 | struct nfs4_get_lease_time_args args; | 4974 | struct nfs4_get_lease_time_args args; |
4975 | struct nfs4_get_lease_time_res res = { | 4975 | struct nfs4_get_lease_time_res res = { |
4976 | .lr_fsinfo = fsinfo, | 4976 | .lr_fsinfo = fsinfo, |
4977 | }; | 4977 | }; |
4978 | struct nfs4_get_lease_time_data data = { | 4978 | struct nfs4_get_lease_time_data data = { |
4979 | .args = &args, | 4979 | .args = &args, |
4980 | .res = &res, | 4980 | .res = &res, |
4981 | .clp = clp, | 4981 | .clp = clp, |
4982 | }; | 4982 | }; |
4983 | struct rpc_message msg = { | 4983 | struct rpc_message msg = { |
4984 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME], | 4984 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME], |
4985 | .rpc_argp = &args, | 4985 | .rpc_argp = &args, |
4986 | .rpc_resp = &res, | 4986 | .rpc_resp = &res, |
4987 | }; | 4987 | }; |
4988 | struct rpc_task_setup task_setup = { | 4988 | struct rpc_task_setup task_setup = { |
4989 | .rpc_client = clp->cl_rpcclient, | 4989 | .rpc_client = clp->cl_rpcclient, |
4990 | .rpc_message = &msg, | 4990 | .rpc_message = &msg, |
4991 | .callback_ops = &nfs4_get_lease_time_ops, | 4991 | .callback_ops = &nfs4_get_lease_time_ops, |
4992 | .callback_data = &data, | 4992 | .callback_data = &data, |
4993 | .flags = RPC_TASK_TIMEOUT, | 4993 | .flags = RPC_TASK_TIMEOUT, |
4994 | }; | 4994 | }; |
4995 | int status; | 4995 | int status; |
4996 | 4996 | ||
4997 | dprintk("--> %s\n", __func__); | 4997 | dprintk("--> %s\n", __func__); |
4998 | task = rpc_run_task(&task_setup); | 4998 | task = rpc_run_task(&task_setup); |
4999 | 4999 | ||
5000 | if (IS_ERR(task)) | 5000 | if (IS_ERR(task)) |
5001 | status = PTR_ERR(task); | 5001 | status = PTR_ERR(task); |
5002 | else { | 5002 | else { |
5003 | status = task->tk_status; | 5003 | status = task->tk_status; |
5004 | rpc_put_task(task); | 5004 | rpc_put_task(task); |
5005 | } | 5005 | } |
5006 | dprintk("<-- %s return %d\n", __func__, status); | 5006 | dprintk("<-- %s return %d\n", __func__, status); |
5007 | 5007 | ||
5008 | return status; | 5008 | return status; |
5009 | } | 5009 | } |
5010 | 5010 | ||
5011 | /* | 5011 | /* |
5012 | * Reset a slot table | 5012 | * Reset a slot table |
5013 | */ | 5013 | */ |
5014 | static int nfs4_reset_slot_table(struct nfs4_slot_table *tbl, u32 max_reqs, | 5014 | static int nfs4_reset_slot_table(struct nfs4_slot_table *tbl, u32 max_reqs, |
5015 | int ivalue) | 5015 | int ivalue) |
5016 | { | 5016 | { |
5017 | struct nfs4_slot *new = NULL; | 5017 | struct nfs4_slot *new = NULL; |
5018 | int i; | 5018 | int i; |
5019 | int ret = 0; | 5019 | int ret = 0; |
5020 | 5020 | ||
5021 | dprintk("--> %s: max_reqs=%u, tbl->max_slots %d\n", __func__, | 5021 | dprintk("--> %s: max_reqs=%u, tbl->max_slots %d\n", __func__, |
5022 | max_reqs, tbl->max_slots); | 5022 | max_reqs, tbl->max_slots); |
5023 | 5023 | ||
5024 | /* Does the newly negotiated max_reqs match the existing slot table? */ | 5024 | /* Does the newly negotiated max_reqs match the existing slot table? */ |
5025 | if (max_reqs != tbl->max_slots) { | 5025 | if (max_reqs != tbl->max_slots) { |
5026 | ret = -ENOMEM; | 5026 | ret = -ENOMEM; |
5027 | new = kmalloc(max_reqs * sizeof(struct nfs4_slot), | 5027 | new = kmalloc(max_reqs * sizeof(struct nfs4_slot), |
5028 | GFP_NOFS); | 5028 | GFP_NOFS); |
5029 | if (!new) | 5029 | if (!new) |
5030 | goto out; | 5030 | goto out; |
5031 | ret = 0; | 5031 | ret = 0; |
5032 | kfree(tbl->slots); | 5032 | kfree(tbl->slots); |
5033 | } | 5033 | } |
5034 | spin_lock(&tbl->slot_tbl_lock); | 5034 | spin_lock(&tbl->slot_tbl_lock); |
5035 | if (new) { | 5035 | if (new) { |
5036 | tbl->slots = new; | 5036 | tbl->slots = new; |
5037 | tbl->max_slots = max_reqs; | 5037 | tbl->max_slots = max_reqs; |
5038 | } | 5038 | } |
5039 | for (i = 0; i < tbl->max_slots; ++i) | 5039 | for (i = 0; i < tbl->max_slots; ++i) |
5040 | tbl->slots[i].seq_nr = ivalue; | 5040 | tbl->slots[i].seq_nr = ivalue; |
5041 | spin_unlock(&tbl->slot_tbl_lock); | 5041 | spin_unlock(&tbl->slot_tbl_lock); |
5042 | dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__, | 5042 | dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__, |
5043 | tbl, tbl->slots, tbl->max_slots); | 5043 | tbl, tbl->slots, tbl->max_slots); |
5044 | out: | 5044 | out: |
5045 | dprintk("<-- %s: return %d\n", __func__, ret); | 5045 | dprintk("<-- %s: return %d\n", __func__, ret); |
5046 | return ret; | 5046 | return ret; |
5047 | } | 5047 | } |
5048 | 5048 | ||
5049 | /* Destroy the slot table */ | 5049 | /* Destroy the slot table */ |
5050 | static void nfs4_destroy_slot_tables(struct nfs4_session *session) | 5050 | static void nfs4_destroy_slot_tables(struct nfs4_session *session) |
5051 | { | 5051 | { |
5052 | if (session->fc_slot_table.slots != NULL) { | 5052 | if (session->fc_slot_table.slots != NULL) { |
5053 | kfree(session->fc_slot_table.slots); | 5053 | kfree(session->fc_slot_table.slots); |
5054 | session->fc_slot_table.slots = NULL; | 5054 | session->fc_slot_table.slots = NULL; |
5055 | } | 5055 | } |
5056 | if (session->bc_slot_table.slots != NULL) { | 5056 | if (session->bc_slot_table.slots != NULL) { |
5057 | kfree(session->bc_slot_table.slots); | 5057 | kfree(session->bc_slot_table.slots); |
5058 | session->bc_slot_table.slots = NULL; | 5058 | session->bc_slot_table.slots = NULL; |
5059 | } | 5059 | } |
5060 | return; | 5060 | return; |
5061 | } | 5061 | } |
5062 | 5062 | ||
5063 | /* | 5063 | /* |
5064 | * Initialize slot table | 5064 | * Initialize slot table |
5065 | */ | 5065 | */ |
5066 | static int nfs4_init_slot_table(struct nfs4_slot_table *tbl, | 5066 | static int nfs4_init_slot_table(struct nfs4_slot_table *tbl, |
5067 | int max_slots, int ivalue) | 5067 | int max_slots, int ivalue) |
5068 | { | 5068 | { |
5069 | struct nfs4_slot *slot; | 5069 | struct nfs4_slot *slot; |
5070 | int ret = -ENOMEM; | 5070 | int ret = -ENOMEM; |
5071 | 5071 | ||
5072 | BUG_ON(max_slots > NFS4_MAX_SLOT_TABLE); | 5072 | BUG_ON(max_slots > NFS4_MAX_SLOT_TABLE); |
5073 | 5073 | ||
5074 | dprintk("--> %s: max_reqs=%u\n", __func__, max_slots); | 5074 | dprintk("--> %s: max_reqs=%u\n", __func__, max_slots); |
5075 | 5075 | ||
5076 | slot = kcalloc(max_slots, sizeof(struct nfs4_slot), GFP_NOFS); | 5076 | slot = kcalloc(max_slots, sizeof(struct nfs4_slot), GFP_NOFS); |
5077 | if (!slot) | 5077 | if (!slot) |
5078 | goto out; | 5078 | goto out; |
5079 | ret = 0; | 5079 | ret = 0; |
5080 | 5080 | ||
5081 | spin_lock(&tbl->slot_tbl_lock); | 5081 | spin_lock(&tbl->slot_tbl_lock); |
5082 | tbl->max_slots = max_slots; | 5082 | tbl->max_slots = max_slots; |
5083 | tbl->slots = slot; | 5083 | tbl->slots = slot; |
5084 | tbl->highest_used_slotid = -1; /* no slot is currently used */ | 5084 | tbl->highest_used_slotid = -1; /* no slot is currently used */ |
5085 | spin_unlock(&tbl->slot_tbl_lock); | 5085 | spin_unlock(&tbl->slot_tbl_lock); |
5086 | dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__, | 5086 | dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__, |
5087 | tbl, tbl->slots, tbl->max_slots); | 5087 | tbl, tbl->slots, tbl->max_slots); |
5088 | out: | 5088 | out: |
5089 | dprintk("<-- %s: return %d\n", __func__, ret); | 5089 | dprintk("<-- %s: return %d\n", __func__, ret); |
5090 | return ret; | 5090 | return ret; |
5091 | } | 5091 | } |
5092 | 5092 | ||
5093 | /* | 5093 | /* |
5094 | * Initialize or reset the forechannel and backchannel tables | 5094 | * Initialize or reset the forechannel and backchannel tables |
5095 | */ | 5095 | */ |
5096 | static int nfs4_setup_session_slot_tables(struct nfs4_session *ses) | 5096 | static int nfs4_setup_session_slot_tables(struct nfs4_session *ses) |
5097 | { | 5097 | { |
5098 | struct nfs4_slot_table *tbl; | 5098 | struct nfs4_slot_table *tbl; |
5099 | int status; | 5099 | int status; |
5100 | 5100 | ||
5101 | dprintk("--> %s\n", __func__); | 5101 | dprintk("--> %s\n", __func__); |
5102 | /* Fore channel */ | 5102 | /* Fore channel */ |
5103 | tbl = &ses->fc_slot_table; | 5103 | tbl = &ses->fc_slot_table; |
5104 | if (tbl->slots == NULL) { | 5104 | if (tbl->slots == NULL) { |
5105 | status = nfs4_init_slot_table(tbl, ses->fc_attrs.max_reqs, 1); | 5105 | status = nfs4_init_slot_table(tbl, ses->fc_attrs.max_reqs, 1); |
5106 | if (status) /* -ENOMEM */ | 5106 | if (status) /* -ENOMEM */ |
5107 | return status; | 5107 | return status; |
5108 | } else { | 5108 | } else { |
5109 | status = nfs4_reset_slot_table(tbl, ses->fc_attrs.max_reqs, 1); | 5109 | status = nfs4_reset_slot_table(tbl, ses->fc_attrs.max_reqs, 1); |
5110 | if (status) | 5110 | if (status) |
5111 | return status; | 5111 | return status; |
5112 | } | 5112 | } |
5113 | /* Back channel */ | 5113 | /* Back channel */ |
5114 | tbl = &ses->bc_slot_table; | 5114 | tbl = &ses->bc_slot_table; |
5115 | if (tbl->slots == NULL) { | 5115 | if (tbl->slots == NULL) { |
5116 | status = nfs4_init_slot_table(tbl, ses->bc_attrs.max_reqs, 0); | 5116 | status = nfs4_init_slot_table(tbl, ses->bc_attrs.max_reqs, 0); |
5117 | if (status) | 5117 | if (status) |
5118 | /* Fore and back channel share a connection so get | 5118 | /* Fore and back channel share a connection so get |
5119 | * both slot tables or neither */ | 5119 | * both slot tables or neither */ |
5120 | nfs4_destroy_slot_tables(ses); | 5120 | nfs4_destroy_slot_tables(ses); |
5121 | } else | 5121 | } else |
5122 | status = nfs4_reset_slot_table(tbl, ses->bc_attrs.max_reqs, 0); | 5122 | status = nfs4_reset_slot_table(tbl, ses->bc_attrs.max_reqs, 0); |
5123 | return status; | 5123 | return status; |
5124 | } | 5124 | } |
5125 | 5125 | ||
5126 | struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp) | 5126 | struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp) |
5127 | { | 5127 | { |
5128 | struct nfs4_session *session; | 5128 | struct nfs4_session *session; |
5129 | struct nfs4_slot_table *tbl; | 5129 | struct nfs4_slot_table *tbl; |
5130 | 5130 | ||
5131 | session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS); | 5131 | session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS); |
5132 | if (!session) | 5132 | if (!session) |
5133 | return NULL; | 5133 | return NULL; |
5134 | 5134 | ||
5135 | tbl = &session->fc_slot_table; | 5135 | tbl = &session->fc_slot_table; |
5136 | tbl->highest_used_slotid = -1; | 5136 | tbl->highest_used_slotid = -1; |
5137 | spin_lock_init(&tbl->slot_tbl_lock); | 5137 | spin_lock_init(&tbl->slot_tbl_lock); |
5138 | rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table"); | 5138 | rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table"); |
5139 | init_completion(&tbl->complete); | 5139 | init_completion(&tbl->complete); |
5140 | 5140 | ||
5141 | tbl = &session->bc_slot_table; | 5141 | tbl = &session->bc_slot_table; |
5142 | tbl->highest_used_slotid = -1; | 5142 | tbl->highest_used_slotid = -1; |
5143 | spin_lock_init(&tbl->slot_tbl_lock); | 5143 | spin_lock_init(&tbl->slot_tbl_lock); |
5144 | rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table"); | 5144 | rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table"); |
5145 | init_completion(&tbl->complete); | 5145 | init_completion(&tbl->complete); |
5146 | 5146 | ||
5147 | session->session_state = 1<<NFS4_SESSION_INITING; | 5147 | session->session_state = 1<<NFS4_SESSION_INITING; |
5148 | 5148 | ||
5149 | session->clp = clp; | 5149 | session->clp = clp; |
5150 | return session; | 5150 | return session; |
5151 | } | 5151 | } |
5152 | 5152 | ||
5153 | void nfs4_destroy_session(struct nfs4_session *session) | 5153 | void nfs4_destroy_session(struct nfs4_session *session) |
5154 | { | 5154 | { |
5155 | nfs4_proc_destroy_session(session); | 5155 | nfs4_proc_destroy_session(session); |
5156 | dprintk("%s Destroy backchannel for xprt %p\n", | 5156 | dprintk("%s Destroy backchannel for xprt %p\n", |
5157 | __func__, session->clp->cl_rpcclient->cl_xprt); | 5157 | __func__, session->clp->cl_rpcclient->cl_xprt); |
5158 | xprt_destroy_backchannel(session->clp->cl_rpcclient->cl_xprt, | 5158 | xprt_destroy_backchannel(session->clp->cl_rpcclient->cl_xprt, |
5159 | NFS41_BC_MIN_CALLBACKS); | 5159 | NFS41_BC_MIN_CALLBACKS); |
5160 | nfs4_destroy_slot_tables(session); | 5160 | nfs4_destroy_slot_tables(session); |
5161 | kfree(session); | 5161 | kfree(session); |
5162 | } | 5162 | } |
5163 | 5163 | ||
5164 | /* | 5164 | /* |
5165 | * Initialize the values to be used by the client in CREATE_SESSION | 5165 | * Initialize the values to be used by the client in CREATE_SESSION |
5166 | * If nfs4_init_session set the fore channel request and response sizes, | 5166 | * If nfs4_init_session set the fore channel request and response sizes, |
5167 | * use them. | 5167 | * use them. |
5168 | * | 5168 | * |
5169 | * Set the back channel max_resp_sz_cached to zero to force the client to | 5169 | * Set the back channel max_resp_sz_cached to zero to force the client to |
5170 | * always set csa_cachethis to FALSE because the current implementation | 5170 | * always set csa_cachethis to FALSE because the current implementation |
5171 | * of the back channel DRC only supports caching the CB_SEQUENCE operation. | 5171 | * of the back channel DRC only supports caching the CB_SEQUENCE operation. |
5172 | */ | 5172 | */ |
5173 | static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args) | 5173 | static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args) |
5174 | { | 5174 | { |
5175 | struct nfs4_session *session = args->client->cl_session; | 5175 | struct nfs4_session *session = args->client->cl_session; |
5176 | unsigned int mxrqst_sz = session->fc_attrs.max_rqst_sz, | 5176 | unsigned int mxrqst_sz = session->fc_attrs.max_rqst_sz, |
5177 | mxresp_sz = session->fc_attrs.max_resp_sz; | 5177 | mxresp_sz = session->fc_attrs.max_resp_sz; |
5178 | 5178 | ||
5179 | if (mxrqst_sz == 0) | 5179 | if (mxrqst_sz == 0) |
5180 | mxrqst_sz = NFS_MAX_FILE_IO_SIZE; | 5180 | mxrqst_sz = NFS_MAX_FILE_IO_SIZE; |
5181 | if (mxresp_sz == 0) | 5181 | if (mxresp_sz == 0) |
5182 | mxresp_sz = NFS_MAX_FILE_IO_SIZE; | 5182 | mxresp_sz = NFS_MAX_FILE_IO_SIZE; |
5183 | /* Fore channel attributes */ | 5183 | /* Fore channel attributes */ |
5184 | args->fc_attrs.max_rqst_sz = mxrqst_sz; | 5184 | args->fc_attrs.max_rqst_sz = mxrqst_sz; |
5185 | args->fc_attrs.max_resp_sz = mxresp_sz; | 5185 | args->fc_attrs.max_resp_sz = mxresp_sz; |
5186 | args->fc_attrs.max_ops = NFS4_MAX_OPS; | 5186 | args->fc_attrs.max_ops = NFS4_MAX_OPS; |
5187 | args->fc_attrs.max_reqs = session->clp->cl_rpcclient->cl_xprt->max_reqs; | 5187 | args->fc_attrs.max_reqs = session->clp->cl_rpcclient->cl_xprt->max_reqs; |
5188 | 5188 | ||
5189 | dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u " | 5189 | dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u " |
5190 | "max_ops=%u max_reqs=%u\n", | 5190 | "max_ops=%u max_reqs=%u\n", |
5191 | __func__, | 5191 | __func__, |
5192 | args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz, | 5192 | args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz, |
5193 | args->fc_attrs.max_ops, args->fc_attrs.max_reqs); | 5193 | args->fc_attrs.max_ops, args->fc_attrs.max_reqs); |
5194 | 5194 | ||
5195 | /* Back channel attributes */ | 5195 | /* Back channel attributes */ |
5196 | args->bc_attrs.max_rqst_sz = PAGE_SIZE; | 5196 | args->bc_attrs.max_rqst_sz = PAGE_SIZE; |
5197 | args->bc_attrs.max_resp_sz = PAGE_SIZE; | 5197 | args->bc_attrs.max_resp_sz = PAGE_SIZE; |
5198 | args->bc_attrs.max_resp_sz_cached = 0; | 5198 | args->bc_attrs.max_resp_sz_cached = 0; |
5199 | args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS; | 5199 | args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS; |
5200 | args->bc_attrs.max_reqs = 1; | 5200 | args->bc_attrs.max_reqs = 1; |
5201 | 5201 | ||
5202 | dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u " | 5202 | dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u " |
5203 | "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n", | 5203 | "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n", |
5204 | __func__, | 5204 | __func__, |
5205 | args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz, | 5205 | args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz, |
5206 | args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops, | 5206 | args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops, |
5207 | args->bc_attrs.max_reqs); | 5207 | args->bc_attrs.max_reqs); |
5208 | } | 5208 | } |
5209 | 5209 | ||
5210 | static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session) | 5210 | static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session) |
5211 | { | 5211 | { |
5212 | struct nfs4_channel_attrs *sent = &args->fc_attrs; | 5212 | struct nfs4_channel_attrs *sent = &args->fc_attrs; |
5213 | struct nfs4_channel_attrs *rcvd = &session->fc_attrs; | 5213 | struct nfs4_channel_attrs *rcvd = &session->fc_attrs; |
5214 | 5214 | ||
5215 | if (rcvd->max_resp_sz > sent->max_resp_sz) | 5215 | if (rcvd->max_resp_sz > sent->max_resp_sz) |
5216 | return -EINVAL; | 5216 | return -EINVAL; |
5217 | /* | 5217 | /* |
5218 | * Our requested max_ops is the minimum we need; we're not | 5218 | * Our requested max_ops is the minimum we need; we're not |
5219 | * prepared to break up compounds into smaller pieces than that. | 5219 | * prepared to break up compounds into smaller pieces than that. |
5220 | * So, no point even trying to continue if the server won't | 5220 | * So, no point even trying to continue if the server won't |
5221 | * cooperate: | 5221 | * cooperate: |
5222 | */ | 5222 | */ |
5223 | if (rcvd->max_ops < sent->max_ops) | 5223 | if (rcvd->max_ops < sent->max_ops) |
5224 | return -EINVAL; | 5224 | return -EINVAL; |
5225 | if (rcvd->max_reqs == 0) | 5225 | if (rcvd->max_reqs == 0) |
5226 | return -EINVAL; | 5226 | return -EINVAL; |
5227 | return 0; | 5227 | return 0; |
5228 | } | 5228 | } |
5229 | 5229 | ||
5230 | static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session) | 5230 | static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session) |
5231 | { | 5231 | { |
5232 | struct nfs4_channel_attrs *sent = &args->bc_attrs; | 5232 | struct nfs4_channel_attrs *sent = &args->bc_attrs; |
5233 | struct nfs4_channel_attrs *rcvd = &session->bc_attrs; | 5233 | struct nfs4_channel_attrs *rcvd = &session->bc_attrs; |
5234 | 5234 | ||
5235 | if (rcvd->max_rqst_sz > sent->max_rqst_sz) | 5235 | if (rcvd->max_rqst_sz > sent->max_rqst_sz) |
5236 | return -EINVAL; | 5236 | return -EINVAL; |
5237 | if (rcvd->max_resp_sz < sent->max_resp_sz) | 5237 | if (rcvd->max_resp_sz < sent->max_resp_sz) |
5238 | return -EINVAL; | 5238 | return -EINVAL; |
5239 | if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached) | 5239 | if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached) |
5240 | return -EINVAL; | 5240 | return -EINVAL; |
5241 | /* These would render the backchannel useless: */ | 5241 | /* These would render the backchannel useless: */ |
5242 | if (rcvd->max_ops == 0) | 5242 | if (rcvd->max_ops == 0) |
5243 | return -EINVAL; | 5243 | return -EINVAL; |
5244 | if (rcvd->max_reqs == 0) | 5244 | if (rcvd->max_reqs == 0) |
5245 | return -EINVAL; | 5245 | return -EINVAL; |
5246 | return 0; | 5246 | return 0; |
5247 | } | 5247 | } |
5248 | 5248 | ||
5249 | static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args, | 5249 | static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args, |
5250 | struct nfs4_session *session) | 5250 | struct nfs4_session *session) |
5251 | { | 5251 | { |
5252 | int ret; | 5252 | int ret; |
5253 | 5253 | ||
5254 | ret = nfs4_verify_fore_channel_attrs(args, session); | 5254 | ret = nfs4_verify_fore_channel_attrs(args, session); |
5255 | if (ret) | 5255 | if (ret) |
5256 | return ret; | 5256 | return ret; |
5257 | return nfs4_verify_back_channel_attrs(args, session); | 5257 | return nfs4_verify_back_channel_attrs(args, session); |
5258 | } | 5258 | } |
5259 | 5259 | ||
5260 | static int _nfs4_proc_create_session(struct nfs_client *clp) | 5260 | static int _nfs4_proc_create_session(struct nfs_client *clp) |
5261 | { | 5261 | { |
5262 | struct nfs4_session *session = clp->cl_session; | 5262 | struct nfs4_session *session = clp->cl_session; |
5263 | struct nfs41_create_session_args args = { | 5263 | struct nfs41_create_session_args args = { |
5264 | .client = clp, | 5264 | .client = clp, |
5265 | .cb_program = NFS4_CALLBACK, | 5265 | .cb_program = NFS4_CALLBACK, |
5266 | }; | 5266 | }; |
5267 | struct nfs41_create_session_res res = { | 5267 | struct nfs41_create_session_res res = { |
5268 | .client = clp, | 5268 | .client = clp, |
5269 | }; | 5269 | }; |
5270 | struct rpc_message msg = { | 5270 | struct rpc_message msg = { |
5271 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION], | 5271 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION], |
5272 | .rpc_argp = &args, | 5272 | .rpc_argp = &args, |
5273 | .rpc_resp = &res, | 5273 | .rpc_resp = &res, |
5274 | }; | 5274 | }; |
5275 | int status; | 5275 | int status; |
5276 | 5276 | ||
5277 | nfs4_init_channel_attrs(&args); | 5277 | nfs4_init_channel_attrs(&args); |
5278 | args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN); | 5278 | args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN); |
5279 | 5279 | ||
5280 | status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); | 5280 | status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); |
5281 | 5281 | ||
5282 | if (!status) | 5282 | if (!status) |
5283 | /* Verify the session's negotiated channel_attrs values */ | 5283 | /* Verify the session's negotiated channel_attrs values */ |
5284 | status = nfs4_verify_channel_attrs(&args, session); | 5284 | status = nfs4_verify_channel_attrs(&args, session); |
5285 | if (!status) { | 5285 | if (!status) { |
5286 | /* Increment the clientid slot sequence id */ | 5286 | /* Increment the clientid slot sequence id */ |
5287 | clp->cl_seqid++; | 5287 | clp->cl_seqid++; |
5288 | } | 5288 | } |
5289 | 5289 | ||
5290 | return status; | 5290 | return status; |
5291 | } | 5291 | } |
5292 | 5292 | ||
5293 | /* | 5293 | /* |
5294 | * Issues a CREATE_SESSION operation to the server. | 5294 | * Issues a CREATE_SESSION operation to the server. |
5295 | * It is the responsibility of the caller to verify the session is | 5295 | * It is the responsibility of the caller to verify the session is |
5296 | * expired before calling this routine. | 5296 | * expired before calling this routine. |
5297 | */ | 5297 | */ |
5298 | int nfs4_proc_create_session(struct nfs_client *clp) | 5298 | int nfs4_proc_create_session(struct nfs_client *clp) |
5299 | { | 5299 | { |
5300 | int status; | 5300 | int status; |
5301 | unsigned *ptr; | 5301 | unsigned *ptr; |
5302 | struct nfs4_session *session = clp->cl_session; | 5302 | struct nfs4_session *session = clp->cl_session; |
5303 | 5303 | ||
5304 | dprintk("--> %s clp=%p session=%p\n", __func__, clp, session); | 5304 | dprintk("--> %s clp=%p session=%p\n", __func__, clp, session); |
5305 | 5305 | ||
5306 | status = _nfs4_proc_create_session(clp); | 5306 | status = _nfs4_proc_create_session(clp); |
5307 | if (status) | 5307 | if (status) |
5308 | goto out; | 5308 | goto out; |
5309 | 5309 | ||
5310 | /* Init or reset the session slot tables */ | 5310 | /* Init or reset the session slot tables */ |
5311 | status = nfs4_setup_session_slot_tables(session); | 5311 | status = nfs4_setup_session_slot_tables(session); |
5312 | dprintk("slot table setup returned %d\n", status); | 5312 | dprintk("slot table setup returned %d\n", status); |
5313 | if (status) | 5313 | if (status) |
5314 | goto out; | 5314 | goto out; |
5315 | 5315 | ||
5316 | ptr = (unsigned *)&session->sess_id.data[0]; | 5316 | ptr = (unsigned *)&session->sess_id.data[0]; |
5317 | dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__, | 5317 | dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__, |
5318 | clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]); | 5318 | clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]); |
5319 | out: | 5319 | out: |
5320 | dprintk("<-- %s\n", __func__); | 5320 | dprintk("<-- %s\n", __func__); |
5321 | return status; | 5321 | return status; |
5322 | } | 5322 | } |
5323 | 5323 | ||
5324 | /* | 5324 | /* |
5325 | * Issue the over-the-wire RPC DESTROY_SESSION. | 5325 | * Issue the over-the-wire RPC DESTROY_SESSION. |
5326 | * The caller must serialize access to this routine. | 5326 | * The caller must serialize access to this routine. |
5327 | */ | 5327 | */ |
5328 | int nfs4_proc_destroy_session(struct nfs4_session *session) | 5328 | int nfs4_proc_destroy_session(struct nfs4_session *session) |
5329 | { | 5329 | { |
5330 | int status = 0; | 5330 | int status = 0; |
5331 | struct rpc_message msg; | 5331 | struct rpc_message msg; |
5332 | 5332 | ||
5333 | dprintk("--> nfs4_proc_destroy_session\n"); | 5333 | dprintk("--> nfs4_proc_destroy_session\n"); |
5334 | 5334 | ||
5335 | /* session is still being setup */ | 5335 | /* session is still being setup */ |
5336 | if (session->clp->cl_cons_state != NFS_CS_READY) | 5336 | if (session->clp->cl_cons_state != NFS_CS_READY) |
5337 | return status; | 5337 | return status; |
5338 | 5338 | ||
5339 | msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION]; | 5339 | msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION]; |
5340 | msg.rpc_argp = session; | 5340 | msg.rpc_argp = session; |
5341 | msg.rpc_resp = NULL; | 5341 | msg.rpc_resp = NULL; |
5342 | msg.rpc_cred = NULL; | 5342 | msg.rpc_cred = NULL; |
5343 | status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); | 5343 | status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); |
5344 | 5344 | ||
5345 | if (status) | 5345 | if (status) |
5346 | printk(KERN_WARNING | 5346 | printk(KERN_WARNING |
5347 | "Got error %d from the server on DESTROY_SESSION. " | 5347 | "Got error %d from the server on DESTROY_SESSION. " |
5348 | "Session has been destroyed regardless...\n", status); | 5348 | "Session has been destroyed regardless...\n", status); |
5349 | 5349 | ||
5350 | dprintk("<-- nfs4_proc_destroy_session\n"); | 5350 | dprintk("<-- nfs4_proc_destroy_session\n"); |
5351 | return status; | 5351 | return status; |
5352 | } | 5352 | } |
5353 | 5353 | ||
5354 | int nfs4_init_session(struct nfs_server *server) | 5354 | int nfs4_init_session(struct nfs_server *server) |
5355 | { | 5355 | { |
5356 | struct nfs_client *clp = server->nfs_client; | 5356 | struct nfs_client *clp = server->nfs_client; |
5357 | struct nfs4_session *session; | 5357 | struct nfs4_session *session; |
5358 | unsigned int rsize, wsize; | 5358 | unsigned int rsize, wsize; |
5359 | int ret; | 5359 | int ret; |
5360 | 5360 | ||
5361 | if (!nfs4_has_session(clp)) | 5361 | if (!nfs4_has_session(clp)) |
5362 | return 0; | 5362 | return 0; |
5363 | 5363 | ||
5364 | session = clp->cl_session; | 5364 | session = clp->cl_session; |
5365 | if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) | 5365 | if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) |
5366 | return 0; | 5366 | return 0; |
5367 | 5367 | ||
5368 | rsize = server->rsize; | 5368 | rsize = server->rsize; |
5369 | if (rsize == 0) | 5369 | if (rsize == 0) |
5370 | rsize = NFS_MAX_FILE_IO_SIZE; | 5370 | rsize = NFS_MAX_FILE_IO_SIZE; |
5371 | wsize = server->wsize; | 5371 | wsize = server->wsize; |
5372 | if (wsize == 0) | 5372 | if (wsize == 0) |
5373 | wsize = NFS_MAX_FILE_IO_SIZE; | 5373 | wsize = NFS_MAX_FILE_IO_SIZE; |
5374 | 5374 | ||
5375 | session->fc_attrs.max_rqst_sz = wsize + nfs41_maxwrite_overhead; | 5375 | session->fc_attrs.max_rqst_sz = wsize + nfs41_maxwrite_overhead; |
5376 | session->fc_attrs.max_resp_sz = rsize + nfs41_maxread_overhead; | 5376 | session->fc_attrs.max_resp_sz = rsize + nfs41_maxread_overhead; |
5377 | 5377 | ||
5378 | ret = nfs4_recover_expired_lease(server); | 5378 | ret = nfs4_recover_expired_lease(server); |
5379 | if (!ret) | 5379 | if (!ret) |
5380 | ret = nfs4_check_client_ready(clp); | 5380 | ret = nfs4_check_client_ready(clp); |
5381 | return ret; | 5381 | return ret; |
5382 | } | 5382 | } |
5383 | 5383 | ||
5384 | int nfs4_init_ds_session(struct nfs_client *clp) | 5384 | int nfs4_init_ds_session(struct nfs_client *clp) |
5385 | { | 5385 | { |
5386 | struct nfs4_session *session = clp->cl_session; | 5386 | struct nfs4_session *session = clp->cl_session; |
5387 | int ret; | 5387 | int ret; |
5388 | 5388 | ||
5389 | if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) | 5389 | if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) |
5390 | return 0; | 5390 | return 0; |
5391 | 5391 | ||
5392 | ret = nfs4_client_recover_expired_lease(clp); | 5392 | ret = nfs4_client_recover_expired_lease(clp); |
5393 | if (!ret) | 5393 | if (!ret) |
5394 | /* Test for the DS role */ | 5394 | /* Test for the DS role */ |
5395 | if (!is_ds_client(clp)) | 5395 | if (!is_ds_client(clp)) |
5396 | ret = -ENODEV; | 5396 | ret = -ENODEV; |
5397 | if (!ret) | 5397 | if (!ret) |
5398 | ret = nfs4_check_client_ready(clp); | 5398 | ret = nfs4_check_client_ready(clp); |
5399 | return ret; | 5399 | return ret; |
5400 | 5400 | ||
5401 | } | 5401 | } |
5402 | EXPORT_SYMBOL_GPL(nfs4_init_ds_session); | 5402 | EXPORT_SYMBOL_GPL(nfs4_init_ds_session); |
5403 | 5403 | ||
5404 | 5404 | ||
5405 | /* | 5405 | /* |
5406 | * Renew the cl_session lease. | 5406 | * Renew the cl_session lease. |
5407 | */ | 5407 | */ |
5408 | struct nfs4_sequence_data { | 5408 | struct nfs4_sequence_data { |
5409 | struct nfs_client *clp; | 5409 | struct nfs_client *clp; |
5410 | struct nfs4_sequence_args args; | 5410 | struct nfs4_sequence_args args; |
5411 | struct nfs4_sequence_res res; | 5411 | struct nfs4_sequence_res res; |
5412 | }; | 5412 | }; |
5413 | 5413 | ||
5414 | static void nfs41_sequence_release(void *data) | 5414 | static void nfs41_sequence_release(void *data) |
5415 | { | 5415 | { |
5416 | struct nfs4_sequence_data *calldata = data; | 5416 | struct nfs4_sequence_data *calldata = data; |
5417 | struct nfs_client *clp = calldata->clp; | 5417 | struct nfs_client *clp = calldata->clp; |
5418 | 5418 | ||
5419 | if (atomic_read(&clp->cl_count) > 1) | 5419 | if (atomic_read(&clp->cl_count) > 1) |
5420 | nfs4_schedule_state_renewal(clp); | 5420 | nfs4_schedule_state_renewal(clp); |
5421 | nfs_put_client(clp); | 5421 | nfs_put_client(clp); |
5422 | kfree(calldata); | 5422 | kfree(calldata); |
5423 | } | 5423 | } |
5424 | 5424 | ||
5425 | static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp) | 5425 | static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp) |
5426 | { | 5426 | { |
5427 | switch(task->tk_status) { | 5427 | switch(task->tk_status) { |
5428 | case -NFS4ERR_DELAY: | 5428 | case -NFS4ERR_DELAY: |
5429 | rpc_delay(task, NFS4_POLL_RETRY_MAX); | 5429 | rpc_delay(task, NFS4_POLL_RETRY_MAX); |
5430 | return -EAGAIN; | 5430 | return -EAGAIN; |
5431 | default: | 5431 | default: |
5432 | nfs4_schedule_lease_recovery(clp); | 5432 | nfs4_schedule_lease_recovery(clp); |
5433 | } | 5433 | } |
5434 | return 0; | 5434 | return 0; |
5435 | } | 5435 | } |
5436 | 5436 | ||
5437 | static void nfs41_sequence_call_done(struct rpc_task *task, void *data) | 5437 | static void nfs41_sequence_call_done(struct rpc_task *task, void *data) |
5438 | { | 5438 | { |
5439 | struct nfs4_sequence_data *calldata = data; | 5439 | struct nfs4_sequence_data *calldata = data; |
5440 | struct nfs_client *clp = calldata->clp; | 5440 | struct nfs_client *clp = calldata->clp; |
5441 | 5441 | ||
5442 | if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp)) | 5442 | if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp)) |
5443 | return; | 5443 | return; |
5444 | 5444 | ||
5445 | if (task->tk_status < 0) { | 5445 | if (task->tk_status < 0) { |
5446 | dprintk("%s ERROR %d\n", __func__, task->tk_status); | 5446 | dprintk("%s ERROR %d\n", __func__, task->tk_status); |
5447 | if (atomic_read(&clp->cl_count) == 1) | 5447 | if (atomic_read(&clp->cl_count) == 1) |
5448 | goto out; | 5448 | goto out; |
5449 | 5449 | ||
5450 | if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) { | 5450 | if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) { |
5451 | rpc_restart_call_prepare(task); | 5451 | rpc_restart_call_prepare(task); |
5452 | return; | 5452 | return; |
5453 | } | 5453 | } |
5454 | } | 5454 | } |
5455 | dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred); | 5455 | dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred); |
5456 | out: | 5456 | out: |
5457 | dprintk("<-- %s\n", __func__); | 5457 | dprintk("<-- %s\n", __func__); |
5458 | } | 5458 | } |
5459 | 5459 | ||
5460 | static void nfs41_sequence_prepare(struct rpc_task *task, void *data) | 5460 | static void nfs41_sequence_prepare(struct rpc_task *task, void *data) |
5461 | { | 5461 | { |
5462 | struct nfs4_sequence_data *calldata = data; | 5462 | struct nfs4_sequence_data *calldata = data; |
5463 | struct nfs_client *clp = calldata->clp; | 5463 | struct nfs_client *clp = calldata->clp; |
5464 | struct nfs4_sequence_args *args; | 5464 | struct nfs4_sequence_args *args; |
5465 | struct nfs4_sequence_res *res; | 5465 | struct nfs4_sequence_res *res; |
5466 | 5466 | ||
5467 | args = task->tk_msg.rpc_argp; | 5467 | args = task->tk_msg.rpc_argp; |
5468 | res = task->tk_msg.rpc_resp; | 5468 | res = task->tk_msg.rpc_resp; |
5469 | 5469 | ||
5470 | if (nfs41_setup_sequence(clp->cl_session, args, res, 0, task)) | 5470 | if (nfs41_setup_sequence(clp->cl_session, args, res, 0, task)) |
5471 | return; | 5471 | return; |
5472 | rpc_call_start(task); | 5472 | rpc_call_start(task); |
5473 | } | 5473 | } |
5474 | 5474 | ||
5475 | static const struct rpc_call_ops nfs41_sequence_ops = { | 5475 | static const struct rpc_call_ops nfs41_sequence_ops = { |
5476 | .rpc_call_done = nfs41_sequence_call_done, | 5476 | .rpc_call_done = nfs41_sequence_call_done, |
5477 | .rpc_call_prepare = nfs41_sequence_prepare, | 5477 | .rpc_call_prepare = nfs41_sequence_prepare, |
5478 | .rpc_release = nfs41_sequence_release, | 5478 | .rpc_release = nfs41_sequence_release, |
5479 | }; | 5479 | }; |
5480 | 5480 | ||
5481 | static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) | 5481 | static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) |
5482 | { | 5482 | { |
5483 | struct nfs4_sequence_data *calldata; | 5483 | struct nfs4_sequence_data *calldata; |
5484 | struct rpc_message msg = { | 5484 | struct rpc_message msg = { |
5485 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE], | 5485 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE], |
5486 | .rpc_cred = cred, | 5486 | .rpc_cred = cred, |
5487 | }; | 5487 | }; |
5488 | struct rpc_task_setup task_setup_data = { | 5488 | struct rpc_task_setup task_setup_data = { |
5489 | .rpc_client = clp->cl_rpcclient, | 5489 | .rpc_client = clp->cl_rpcclient, |
5490 | .rpc_message = &msg, | 5490 | .rpc_message = &msg, |
5491 | .callback_ops = &nfs41_sequence_ops, | 5491 | .callback_ops = &nfs41_sequence_ops, |
5492 | .flags = RPC_TASK_ASYNC | RPC_TASK_SOFT, | 5492 | .flags = RPC_TASK_ASYNC | RPC_TASK_SOFT, |
5493 | }; | 5493 | }; |
5494 | 5494 | ||
5495 | if (!atomic_inc_not_zero(&clp->cl_count)) | 5495 | if (!atomic_inc_not_zero(&clp->cl_count)) |
5496 | return ERR_PTR(-EIO); | 5496 | return ERR_PTR(-EIO); |
5497 | calldata = kzalloc(sizeof(*calldata), GFP_NOFS); | 5497 | calldata = kzalloc(sizeof(*calldata), GFP_NOFS); |
5498 | if (calldata == NULL) { | 5498 | if (calldata == NULL) { |
5499 | nfs_put_client(clp); | 5499 | nfs_put_client(clp); |
5500 | return ERR_PTR(-ENOMEM); | 5500 | return ERR_PTR(-ENOMEM); |
5501 | } | 5501 | } |
5502 | msg.rpc_argp = &calldata->args; | 5502 | msg.rpc_argp = &calldata->args; |
5503 | msg.rpc_resp = &calldata->res; | 5503 | msg.rpc_resp = &calldata->res; |
5504 | calldata->clp = clp; | 5504 | calldata->clp = clp; |
5505 | task_setup_data.callback_data = calldata; | 5505 | task_setup_data.callback_data = calldata; |
5506 | 5506 | ||
5507 | return rpc_run_task(&task_setup_data); | 5507 | return rpc_run_task(&task_setup_data); |
5508 | } | 5508 | } |
5509 | 5509 | ||
5510 | static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) | 5510 | static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) |
5511 | { | 5511 | { |
5512 | struct rpc_task *task; | 5512 | struct rpc_task *task; |
5513 | int ret = 0; | 5513 | int ret = 0; |
5514 | 5514 | ||
5515 | if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0) | 5515 | if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0) |
5516 | return 0; | 5516 | return 0; |
5517 | task = _nfs41_proc_sequence(clp, cred); | 5517 | task = _nfs41_proc_sequence(clp, cred); |
5518 | if (IS_ERR(task)) | 5518 | if (IS_ERR(task)) |
5519 | ret = PTR_ERR(task); | 5519 | ret = PTR_ERR(task); |
5520 | else | 5520 | else |
5521 | rpc_put_task_async(task); | 5521 | rpc_put_task_async(task); |
5522 | dprintk("<-- %s status=%d\n", __func__, ret); | 5522 | dprintk("<-- %s status=%d\n", __func__, ret); |
5523 | return ret; | 5523 | return ret; |
5524 | } | 5524 | } |
5525 | 5525 | ||
5526 | static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) | 5526 | static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) |
5527 | { | 5527 | { |
5528 | struct rpc_task *task; | 5528 | struct rpc_task *task; |
5529 | int ret; | 5529 | int ret; |
5530 | 5530 | ||
5531 | task = _nfs41_proc_sequence(clp, cred); | 5531 | task = _nfs41_proc_sequence(clp, cred); |
5532 | if (IS_ERR(task)) { | 5532 | if (IS_ERR(task)) { |
5533 | ret = PTR_ERR(task); | 5533 | ret = PTR_ERR(task); |
5534 | goto out; | 5534 | goto out; |
5535 | } | 5535 | } |
5536 | ret = rpc_wait_for_completion_task(task); | 5536 | ret = rpc_wait_for_completion_task(task); |
5537 | if (!ret) { | 5537 | if (!ret) { |
5538 | struct nfs4_sequence_res *res = task->tk_msg.rpc_resp; | 5538 | struct nfs4_sequence_res *res = task->tk_msg.rpc_resp; |
5539 | 5539 | ||
5540 | if (task->tk_status == 0) | 5540 | if (task->tk_status == 0) |
5541 | nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); | 5541 | nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); |
5542 | ret = task->tk_status; | 5542 | ret = task->tk_status; |
5543 | } | 5543 | } |
5544 | rpc_put_task(task); | 5544 | rpc_put_task(task); |
5545 | out: | 5545 | out: |
5546 | dprintk("<-- %s status=%d\n", __func__, ret); | 5546 | dprintk("<-- %s status=%d\n", __func__, ret); |
5547 | return ret; | 5547 | return ret; |
5548 | } | 5548 | } |
5549 | 5549 | ||
5550 | struct nfs4_reclaim_complete_data { | 5550 | struct nfs4_reclaim_complete_data { |
5551 | struct nfs_client *clp; | 5551 | struct nfs_client *clp; |
5552 | struct nfs41_reclaim_complete_args arg; | 5552 | struct nfs41_reclaim_complete_args arg; |
5553 | struct nfs41_reclaim_complete_res res; | 5553 | struct nfs41_reclaim_complete_res res; |
5554 | }; | 5554 | }; |
5555 | 5555 | ||
5556 | static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data) | 5556 | static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data) |
5557 | { | 5557 | { |
5558 | struct nfs4_reclaim_complete_data *calldata = data; | 5558 | struct nfs4_reclaim_complete_data *calldata = data; |
5559 | 5559 | ||
5560 | rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); | 5560 | rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); |
5561 | if (nfs41_setup_sequence(calldata->clp->cl_session, | 5561 | if (nfs41_setup_sequence(calldata->clp->cl_session, |
5562 | &calldata->arg.seq_args, | 5562 | &calldata->arg.seq_args, |
5563 | &calldata->res.seq_res, 0, task)) | 5563 | &calldata->res.seq_res, 0, task)) |
5564 | return; | 5564 | return; |
5565 | 5565 | ||
5566 | rpc_call_start(task); | 5566 | rpc_call_start(task); |
5567 | } | 5567 | } |
5568 | 5568 | ||
5569 | static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp) | 5569 | static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp) |
5570 | { | 5570 | { |
5571 | switch(task->tk_status) { | 5571 | switch(task->tk_status) { |
5572 | case 0: | 5572 | case 0: |
5573 | case -NFS4ERR_COMPLETE_ALREADY: | 5573 | case -NFS4ERR_COMPLETE_ALREADY: |
5574 | case -NFS4ERR_WRONG_CRED: /* What to do here? */ | 5574 | case -NFS4ERR_WRONG_CRED: /* What to do here? */ |
5575 | break; | 5575 | break; |
5576 | case -NFS4ERR_DELAY: | 5576 | case -NFS4ERR_DELAY: |
5577 | rpc_delay(task, NFS4_POLL_RETRY_MAX); | 5577 | rpc_delay(task, NFS4_POLL_RETRY_MAX); |
5578 | /* fall through */ | 5578 | /* fall through */ |
5579 | case -NFS4ERR_RETRY_UNCACHED_REP: | 5579 | case -NFS4ERR_RETRY_UNCACHED_REP: |
5580 | return -EAGAIN; | 5580 | return -EAGAIN; |
5581 | default: | 5581 | default: |
5582 | nfs4_schedule_lease_recovery(clp); | 5582 | nfs4_schedule_lease_recovery(clp); |
5583 | } | 5583 | } |
5584 | return 0; | 5584 | return 0; |
5585 | } | 5585 | } |
5586 | 5586 | ||
5587 | static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data) | 5587 | static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data) |
5588 | { | 5588 | { |
5589 | struct nfs4_reclaim_complete_data *calldata = data; | 5589 | struct nfs4_reclaim_complete_data *calldata = data; |
5590 | struct nfs_client *clp = calldata->clp; | 5590 | struct nfs_client *clp = calldata->clp; |
5591 | struct nfs4_sequence_res *res = &calldata->res.seq_res; | 5591 | struct nfs4_sequence_res *res = &calldata->res.seq_res; |
5592 | 5592 | ||
5593 | dprintk("--> %s\n", __func__); | 5593 | dprintk("--> %s\n", __func__); |
5594 | if (!nfs41_sequence_done(task, res)) | 5594 | if (!nfs41_sequence_done(task, res)) |
5595 | return; | 5595 | return; |
5596 | 5596 | ||
5597 | if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) { | 5597 | if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) { |
5598 | rpc_restart_call_prepare(task); | 5598 | rpc_restart_call_prepare(task); |
5599 | return; | 5599 | return; |
5600 | } | 5600 | } |
5601 | dprintk("<-- %s\n", __func__); | 5601 | dprintk("<-- %s\n", __func__); |
5602 | } | 5602 | } |
5603 | 5603 | ||
5604 | static void nfs4_free_reclaim_complete_data(void *data) | 5604 | static void nfs4_free_reclaim_complete_data(void *data) |
5605 | { | 5605 | { |
5606 | struct nfs4_reclaim_complete_data *calldata = data; | 5606 | struct nfs4_reclaim_complete_data *calldata = data; |
5607 | 5607 | ||
5608 | kfree(calldata); | 5608 | kfree(calldata); |
5609 | } | 5609 | } |
5610 | 5610 | ||
5611 | static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = { | 5611 | static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = { |
5612 | .rpc_call_prepare = nfs4_reclaim_complete_prepare, | 5612 | .rpc_call_prepare = nfs4_reclaim_complete_prepare, |
5613 | .rpc_call_done = nfs4_reclaim_complete_done, | 5613 | .rpc_call_done = nfs4_reclaim_complete_done, |
5614 | .rpc_release = nfs4_free_reclaim_complete_data, | 5614 | .rpc_release = nfs4_free_reclaim_complete_data, |
5615 | }; | 5615 | }; |
5616 | 5616 | ||
5617 | /* | 5617 | /* |
5618 | * Issue a global reclaim complete. | 5618 | * Issue a global reclaim complete. |
5619 | */ | 5619 | */ |
5620 | static int nfs41_proc_reclaim_complete(struct nfs_client *clp) | 5620 | static int nfs41_proc_reclaim_complete(struct nfs_client *clp) |
5621 | { | 5621 | { |
5622 | struct nfs4_reclaim_complete_data *calldata; | 5622 | struct nfs4_reclaim_complete_data *calldata; |
5623 | struct rpc_task *task; | 5623 | struct rpc_task *task; |
5624 | struct rpc_message msg = { | 5624 | struct rpc_message msg = { |
5625 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE], | 5625 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE], |
5626 | }; | 5626 | }; |
5627 | struct rpc_task_setup task_setup_data = { | 5627 | struct rpc_task_setup task_setup_data = { |
5628 | .rpc_client = clp->cl_rpcclient, | 5628 | .rpc_client = clp->cl_rpcclient, |
5629 | .rpc_message = &msg, | 5629 | .rpc_message = &msg, |
5630 | .callback_ops = &nfs4_reclaim_complete_call_ops, | 5630 | .callback_ops = &nfs4_reclaim_complete_call_ops, |
5631 | .flags = RPC_TASK_ASYNC, | 5631 | .flags = RPC_TASK_ASYNC, |
5632 | }; | 5632 | }; |
5633 | int status = -ENOMEM; | 5633 | int status = -ENOMEM; |
5634 | 5634 | ||
5635 | dprintk("--> %s\n", __func__); | 5635 | dprintk("--> %s\n", __func__); |
5636 | calldata = kzalloc(sizeof(*calldata), GFP_NOFS); | 5636 | calldata = kzalloc(sizeof(*calldata), GFP_NOFS); |
5637 | if (calldata == NULL) | 5637 | if (calldata == NULL) |
5638 | goto out; | 5638 | goto out; |
5639 | calldata->clp = clp; | 5639 | calldata->clp = clp; |
5640 | calldata->arg.one_fs = 0; | 5640 | calldata->arg.one_fs = 0; |
5641 | 5641 | ||
5642 | msg.rpc_argp = &calldata->arg; | 5642 | msg.rpc_argp = &calldata->arg; |
5643 | msg.rpc_resp = &calldata->res; | 5643 | msg.rpc_resp = &calldata->res; |
5644 | task_setup_data.callback_data = calldata; | 5644 | task_setup_data.callback_data = calldata; |
5645 | task = rpc_run_task(&task_setup_data); | 5645 | task = rpc_run_task(&task_setup_data); |
5646 | if (IS_ERR(task)) { | 5646 | if (IS_ERR(task)) { |
5647 | status = PTR_ERR(task); | 5647 | status = PTR_ERR(task); |
5648 | goto out; | 5648 | goto out; |
5649 | } | 5649 | } |
5650 | status = nfs4_wait_for_completion_rpc_task(task); | 5650 | status = nfs4_wait_for_completion_rpc_task(task); |
5651 | if (status == 0) | 5651 | if (status == 0) |
5652 | status = task->tk_status; | 5652 | status = task->tk_status; |
5653 | rpc_put_task(task); | 5653 | rpc_put_task(task); |
5654 | return 0; | 5654 | return 0; |
5655 | out: | 5655 | out: |
5656 | dprintk("<-- %s status=%d\n", __func__, status); | 5656 | dprintk("<-- %s status=%d\n", __func__, status); |
5657 | return status; | 5657 | return status; |
5658 | } | 5658 | } |
5659 | 5659 | ||
5660 | static void | 5660 | static void |
5661 | nfs4_layoutget_prepare(struct rpc_task *task, void *calldata) | 5661 | nfs4_layoutget_prepare(struct rpc_task *task, void *calldata) |
5662 | { | 5662 | { |
5663 | struct nfs4_layoutget *lgp = calldata; | 5663 | struct nfs4_layoutget *lgp = calldata; |
5664 | struct nfs_server *server = NFS_SERVER(lgp->args.inode); | 5664 | struct nfs_server *server = NFS_SERVER(lgp->args.inode); |
5665 | 5665 | ||
5666 | dprintk("--> %s\n", __func__); | 5666 | dprintk("--> %s\n", __func__); |
5667 | /* Note the is a race here, where a CB_LAYOUTRECALL can come in | 5667 | /* Note the is a race here, where a CB_LAYOUTRECALL can come in |
5668 | * right now covering the LAYOUTGET we are about to send. | 5668 | * right now covering the LAYOUTGET we are about to send. |
5669 | * However, that is not so catastrophic, and there seems | 5669 | * However, that is not so catastrophic, and there seems |
5670 | * to be no way to prevent it completely. | 5670 | * to be no way to prevent it completely. |
5671 | */ | 5671 | */ |
5672 | if (nfs4_setup_sequence(server, &lgp->args.seq_args, | 5672 | if (nfs4_setup_sequence(server, &lgp->args.seq_args, |
5673 | &lgp->res.seq_res, 0, task)) | 5673 | &lgp->res.seq_res, 0, task)) |
5674 | return; | 5674 | return; |
5675 | if (pnfs_choose_layoutget_stateid(&lgp->args.stateid, | 5675 | if (pnfs_choose_layoutget_stateid(&lgp->args.stateid, |
5676 | NFS_I(lgp->args.inode)->layout, | 5676 | NFS_I(lgp->args.inode)->layout, |
5677 | lgp->args.ctx->state)) { | 5677 | lgp->args.ctx->state)) { |
5678 | rpc_exit(task, NFS4_OK); | 5678 | rpc_exit(task, NFS4_OK); |
5679 | return; | 5679 | return; |
5680 | } | 5680 | } |
5681 | rpc_call_start(task); | 5681 | rpc_call_start(task); |
5682 | } | 5682 | } |
5683 | 5683 | ||
5684 | static void nfs4_layoutget_done(struct rpc_task *task, void *calldata) | 5684 | static void nfs4_layoutget_done(struct rpc_task *task, void *calldata) |
5685 | { | 5685 | { |
5686 | struct nfs4_layoutget *lgp = calldata; | 5686 | struct nfs4_layoutget *lgp = calldata; |
5687 | struct nfs_server *server = NFS_SERVER(lgp->args.inode); | 5687 | struct nfs_server *server = NFS_SERVER(lgp->args.inode); |
5688 | 5688 | ||
5689 | dprintk("--> %s\n", __func__); | 5689 | dprintk("--> %s\n", __func__); |
5690 | 5690 | ||
5691 | if (!nfs4_sequence_done(task, &lgp->res.seq_res)) | 5691 | if (!nfs4_sequence_done(task, &lgp->res.seq_res)) |
5692 | return; | 5692 | return; |
5693 | 5693 | ||
5694 | switch (task->tk_status) { | 5694 | switch (task->tk_status) { |
5695 | case 0: | 5695 | case 0: |
5696 | break; | 5696 | break; |
5697 | case -NFS4ERR_LAYOUTTRYLATER: | 5697 | case -NFS4ERR_LAYOUTTRYLATER: |
5698 | case -NFS4ERR_RECALLCONFLICT: | 5698 | case -NFS4ERR_RECALLCONFLICT: |
5699 | task->tk_status = -NFS4ERR_DELAY; | 5699 | task->tk_status = -NFS4ERR_DELAY; |
5700 | /* Fall through */ | 5700 | /* Fall through */ |
5701 | default: | 5701 | default: |
5702 | if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) { | 5702 | if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) { |
5703 | rpc_restart_call_prepare(task); | 5703 | rpc_restart_call_prepare(task); |
5704 | return; | 5704 | return; |
5705 | } | 5705 | } |
5706 | } | 5706 | } |
5707 | dprintk("<-- %s\n", __func__); | 5707 | dprintk("<-- %s\n", __func__); |
5708 | } | 5708 | } |
5709 | 5709 | ||
5710 | static void nfs4_layoutget_release(void *calldata) | 5710 | static void nfs4_layoutget_release(void *calldata) |
5711 | { | 5711 | { |
5712 | struct nfs4_layoutget *lgp = calldata; | 5712 | struct nfs4_layoutget *lgp = calldata; |
5713 | 5713 | ||
5714 | dprintk("--> %s\n", __func__); | 5714 | dprintk("--> %s\n", __func__); |
5715 | put_nfs_open_context(lgp->args.ctx); | 5715 | put_nfs_open_context(lgp->args.ctx); |
5716 | kfree(calldata); | 5716 | kfree(calldata); |
5717 | dprintk("<-- %s\n", __func__); | 5717 | dprintk("<-- %s\n", __func__); |
5718 | } | 5718 | } |
5719 | 5719 | ||
5720 | static const struct rpc_call_ops nfs4_layoutget_call_ops = { | 5720 | static const struct rpc_call_ops nfs4_layoutget_call_ops = { |
5721 | .rpc_call_prepare = nfs4_layoutget_prepare, | 5721 | .rpc_call_prepare = nfs4_layoutget_prepare, |
5722 | .rpc_call_done = nfs4_layoutget_done, | 5722 | .rpc_call_done = nfs4_layoutget_done, |
5723 | .rpc_release = nfs4_layoutget_release, | 5723 | .rpc_release = nfs4_layoutget_release, |
5724 | }; | 5724 | }; |
5725 | 5725 | ||
5726 | int nfs4_proc_layoutget(struct nfs4_layoutget *lgp) | 5726 | int nfs4_proc_layoutget(struct nfs4_layoutget *lgp) |
5727 | { | 5727 | { |
5728 | struct nfs_server *server = NFS_SERVER(lgp->args.inode); | 5728 | struct nfs_server *server = NFS_SERVER(lgp->args.inode); |
5729 | struct rpc_task *task; | 5729 | struct rpc_task *task; |
5730 | struct rpc_message msg = { | 5730 | struct rpc_message msg = { |
5731 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET], | 5731 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET], |
5732 | .rpc_argp = &lgp->args, | 5732 | .rpc_argp = &lgp->args, |
5733 | .rpc_resp = &lgp->res, | 5733 | .rpc_resp = &lgp->res, |
5734 | }; | 5734 | }; |
5735 | struct rpc_task_setup task_setup_data = { | 5735 | struct rpc_task_setup task_setup_data = { |
5736 | .rpc_client = server->client, | 5736 | .rpc_client = server->client, |
5737 | .rpc_message = &msg, | 5737 | .rpc_message = &msg, |
5738 | .callback_ops = &nfs4_layoutget_call_ops, | 5738 | .callback_ops = &nfs4_layoutget_call_ops, |
5739 | .callback_data = lgp, | 5739 | .callback_data = lgp, |
5740 | .flags = RPC_TASK_ASYNC, | 5740 | .flags = RPC_TASK_ASYNC, |
5741 | }; | 5741 | }; |
5742 | int status = 0; | 5742 | int status = 0; |
5743 | 5743 | ||
5744 | dprintk("--> %s\n", __func__); | 5744 | dprintk("--> %s\n", __func__); |
5745 | 5745 | ||
5746 | lgp->res.layoutp = &lgp->args.layout; | 5746 | lgp->res.layoutp = &lgp->args.layout; |
5747 | lgp->res.seq_res.sr_slot = NULL; | 5747 | lgp->res.seq_res.sr_slot = NULL; |
5748 | task = rpc_run_task(&task_setup_data); | 5748 | task = rpc_run_task(&task_setup_data); |
5749 | if (IS_ERR(task)) | 5749 | if (IS_ERR(task)) |
5750 | return PTR_ERR(task); | 5750 | return PTR_ERR(task); |
5751 | status = nfs4_wait_for_completion_rpc_task(task); | 5751 | status = nfs4_wait_for_completion_rpc_task(task); |
5752 | if (status == 0) | 5752 | if (status == 0) |
5753 | status = task->tk_status; | 5753 | status = task->tk_status; |
5754 | if (status == 0) | 5754 | if (status == 0) |
5755 | status = pnfs_layout_process(lgp); | 5755 | status = pnfs_layout_process(lgp); |
5756 | rpc_put_task(task); | 5756 | rpc_put_task(task); |
5757 | dprintk("<-- %s status=%d\n", __func__, status); | 5757 | dprintk("<-- %s status=%d\n", __func__, status); |
5758 | return status; | 5758 | return status; |
5759 | } | 5759 | } |
5760 | 5760 | ||
5761 | static void | 5761 | static void |
5762 | nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata) | 5762 | nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata) |
5763 | { | 5763 | { |
5764 | struct nfs4_layoutreturn *lrp = calldata; | 5764 | struct nfs4_layoutreturn *lrp = calldata; |
5765 | 5765 | ||
5766 | dprintk("--> %s\n", __func__); | 5766 | dprintk("--> %s\n", __func__); |
5767 | if (nfs41_setup_sequence(lrp->clp->cl_session, &lrp->args.seq_args, | 5767 | if (nfs41_setup_sequence(lrp->clp->cl_session, &lrp->args.seq_args, |
5768 | &lrp->res.seq_res, 0, task)) | 5768 | &lrp->res.seq_res, 0, task)) |
5769 | return; | 5769 | return; |
5770 | rpc_call_start(task); | 5770 | rpc_call_start(task); |
5771 | } | 5771 | } |
5772 | 5772 | ||
5773 | static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) | 5773 | static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) |
5774 | { | 5774 | { |
5775 | struct nfs4_layoutreturn *lrp = calldata; | 5775 | struct nfs4_layoutreturn *lrp = calldata; |
5776 | struct nfs_server *server; | 5776 | struct nfs_server *server; |
5777 | struct pnfs_layout_hdr *lo = lrp->args.layout; | 5777 | struct pnfs_layout_hdr *lo = lrp->args.layout; |
5778 | 5778 | ||
5779 | dprintk("--> %s\n", __func__); | 5779 | dprintk("--> %s\n", __func__); |
5780 | 5780 | ||
5781 | if (!nfs4_sequence_done(task, &lrp->res.seq_res)) | 5781 | if (!nfs4_sequence_done(task, &lrp->res.seq_res)) |
5782 | return; | 5782 | return; |
5783 | 5783 | ||
5784 | server = NFS_SERVER(lrp->args.inode); | 5784 | server = NFS_SERVER(lrp->args.inode); |
5785 | if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) { | 5785 | if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) { |
5786 | rpc_restart_call_prepare(task); | 5786 | rpc_restart_call_prepare(task); |
5787 | return; | 5787 | return; |
5788 | } | 5788 | } |
5789 | spin_lock(&lo->plh_inode->i_lock); | 5789 | spin_lock(&lo->plh_inode->i_lock); |
5790 | if (task->tk_status == 0) { | 5790 | if (task->tk_status == 0) { |
5791 | if (lrp->res.lrs_present) { | 5791 | if (lrp->res.lrs_present) { |
5792 | pnfs_set_layout_stateid(lo, &lrp->res.stateid, true); | 5792 | pnfs_set_layout_stateid(lo, &lrp->res.stateid, true); |
5793 | } else | 5793 | } else |
5794 | BUG_ON(!list_empty(&lo->plh_segs)); | 5794 | BUG_ON(!list_empty(&lo->plh_segs)); |
5795 | } | 5795 | } |
5796 | lo->plh_block_lgets--; | 5796 | lo->plh_block_lgets--; |
5797 | spin_unlock(&lo->plh_inode->i_lock); | 5797 | spin_unlock(&lo->plh_inode->i_lock); |
5798 | dprintk("<-- %s\n", __func__); | 5798 | dprintk("<-- %s\n", __func__); |
5799 | } | 5799 | } |
5800 | 5800 | ||
5801 | static void nfs4_layoutreturn_release(void *calldata) | 5801 | static void nfs4_layoutreturn_release(void *calldata) |
5802 | { | 5802 | { |
5803 | struct nfs4_layoutreturn *lrp = calldata; | 5803 | struct nfs4_layoutreturn *lrp = calldata; |
5804 | 5804 | ||
5805 | dprintk("--> %s\n", __func__); | 5805 | dprintk("--> %s\n", __func__); |
5806 | put_layout_hdr(lrp->args.layout); | 5806 | put_layout_hdr(lrp->args.layout); |
5807 | kfree(calldata); | 5807 | kfree(calldata); |
5808 | dprintk("<-- %s\n", __func__); | 5808 | dprintk("<-- %s\n", __func__); |
5809 | } | 5809 | } |
5810 | 5810 | ||
5811 | static const struct rpc_call_ops nfs4_layoutreturn_call_ops = { | 5811 | static const struct rpc_call_ops nfs4_layoutreturn_call_ops = { |
5812 | .rpc_call_prepare = nfs4_layoutreturn_prepare, | 5812 | .rpc_call_prepare = nfs4_layoutreturn_prepare, |
5813 | .rpc_call_done = nfs4_layoutreturn_done, | 5813 | .rpc_call_done = nfs4_layoutreturn_done, |
5814 | .rpc_release = nfs4_layoutreturn_release, | 5814 | .rpc_release = nfs4_layoutreturn_release, |
5815 | }; | 5815 | }; |
5816 | 5816 | ||
5817 | int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp) | 5817 | int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp) |
5818 | { | 5818 | { |
5819 | struct rpc_task *task; | 5819 | struct rpc_task *task; |
5820 | struct rpc_message msg = { | 5820 | struct rpc_message msg = { |
5821 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN], | 5821 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN], |
5822 | .rpc_argp = &lrp->args, | 5822 | .rpc_argp = &lrp->args, |
5823 | .rpc_resp = &lrp->res, | 5823 | .rpc_resp = &lrp->res, |
5824 | }; | 5824 | }; |
5825 | struct rpc_task_setup task_setup_data = { | 5825 | struct rpc_task_setup task_setup_data = { |
5826 | .rpc_client = lrp->clp->cl_rpcclient, | 5826 | .rpc_client = lrp->clp->cl_rpcclient, |
5827 | .rpc_message = &msg, | 5827 | .rpc_message = &msg, |
5828 | .callback_ops = &nfs4_layoutreturn_call_ops, | 5828 | .callback_ops = &nfs4_layoutreturn_call_ops, |
5829 | .callback_data = lrp, | 5829 | .callback_data = lrp, |
5830 | }; | 5830 | }; |
5831 | int status; | 5831 | int status; |
5832 | 5832 | ||
5833 | dprintk("--> %s\n", __func__); | 5833 | dprintk("--> %s\n", __func__); |
5834 | task = rpc_run_task(&task_setup_data); | 5834 | task = rpc_run_task(&task_setup_data); |
5835 | if (IS_ERR(task)) | 5835 | if (IS_ERR(task)) |
5836 | return PTR_ERR(task); | 5836 | return PTR_ERR(task); |
5837 | status = task->tk_status; | 5837 | status = task->tk_status; |
5838 | dprintk("<-- %s status=%d\n", __func__, status); | 5838 | dprintk("<-- %s status=%d\n", __func__, status); |
5839 | rpc_put_task(task); | 5839 | rpc_put_task(task); |
5840 | return status; | 5840 | return status; |
5841 | } | 5841 | } |
5842 | 5842 | ||
5843 | /* | 5843 | /* |
5844 | * Retrieve the list of Data Server devices from the MDS. | 5844 | * Retrieve the list of Data Server devices from the MDS. |
5845 | */ | 5845 | */ |
5846 | static int _nfs4_getdevicelist(struct nfs_server *server, | 5846 | static int _nfs4_getdevicelist(struct nfs_server *server, |
5847 | const struct nfs_fh *fh, | 5847 | const struct nfs_fh *fh, |
5848 | struct pnfs_devicelist *devlist) | 5848 | struct pnfs_devicelist *devlist) |
5849 | { | 5849 | { |
5850 | struct nfs4_getdevicelist_args args = { | 5850 | struct nfs4_getdevicelist_args args = { |
5851 | .fh = fh, | 5851 | .fh = fh, |
5852 | .layoutclass = server->pnfs_curr_ld->id, | 5852 | .layoutclass = server->pnfs_curr_ld->id, |
5853 | }; | 5853 | }; |
5854 | struct nfs4_getdevicelist_res res = { | 5854 | struct nfs4_getdevicelist_res res = { |
5855 | .devlist = devlist, | 5855 | .devlist = devlist, |
5856 | }; | 5856 | }; |
5857 | struct rpc_message msg = { | 5857 | struct rpc_message msg = { |
5858 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICELIST], | 5858 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICELIST], |
5859 | .rpc_argp = &args, | 5859 | .rpc_argp = &args, |
5860 | .rpc_resp = &res, | 5860 | .rpc_resp = &res, |
5861 | }; | 5861 | }; |
5862 | int status; | 5862 | int status; |
5863 | 5863 | ||
5864 | dprintk("--> %s\n", __func__); | 5864 | dprintk("--> %s\n", __func__); |
5865 | status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, | 5865 | status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, |
5866 | &res.seq_res, 0); | 5866 | &res.seq_res, 0); |
5867 | dprintk("<-- %s status=%d\n", __func__, status); | 5867 | dprintk("<-- %s status=%d\n", __func__, status); |
5868 | return status; | 5868 | return status; |
5869 | } | 5869 | } |
5870 | 5870 | ||
5871 | int nfs4_proc_getdevicelist(struct nfs_server *server, | 5871 | int nfs4_proc_getdevicelist(struct nfs_server *server, |
5872 | const struct nfs_fh *fh, | 5872 | const struct nfs_fh *fh, |
5873 | struct pnfs_devicelist *devlist) | 5873 | struct pnfs_devicelist *devlist) |
5874 | { | 5874 | { |
5875 | struct nfs4_exception exception = { }; | 5875 | struct nfs4_exception exception = { }; |
5876 | int err; | 5876 | int err; |
5877 | 5877 | ||
5878 | do { | 5878 | do { |
5879 | err = nfs4_handle_exception(server, | 5879 | err = nfs4_handle_exception(server, |
5880 | _nfs4_getdevicelist(server, fh, devlist), | 5880 | _nfs4_getdevicelist(server, fh, devlist), |
5881 | &exception); | 5881 | &exception); |
5882 | } while (exception.retry); | 5882 | } while (exception.retry); |
5883 | 5883 | ||
5884 | dprintk("%s: err=%d, num_devs=%u\n", __func__, | 5884 | dprintk("%s: err=%d, num_devs=%u\n", __func__, |
5885 | err, devlist->num_devs); | 5885 | err, devlist->num_devs); |
5886 | 5886 | ||
5887 | return err; | 5887 | return err; |
5888 | } | 5888 | } |
5889 | EXPORT_SYMBOL_GPL(nfs4_proc_getdevicelist); | 5889 | EXPORT_SYMBOL_GPL(nfs4_proc_getdevicelist); |
5890 | 5890 | ||
5891 | static int | 5891 | static int |
5892 | _nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev) | 5892 | _nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev) |
5893 | { | 5893 | { |
5894 | struct nfs4_getdeviceinfo_args args = { | 5894 | struct nfs4_getdeviceinfo_args args = { |
5895 | .pdev = pdev, | 5895 | .pdev = pdev, |
5896 | }; | 5896 | }; |
5897 | struct nfs4_getdeviceinfo_res res = { | 5897 | struct nfs4_getdeviceinfo_res res = { |
5898 | .pdev = pdev, | 5898 | .pdev = pdev, |
5899 | }; | 5899 | }; |
5900 | struct rpc_message msg = { | 5900 | struct rpc_message msg = { |
5901 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO], | 5901 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO], |
5902 | .rpc_argp = &args, | 5902 | .rpc_argp = &args, |
5903 | .rpc_resp = &res, | 5903 | .rpc_resp = &res, |
5904 | }; | 5904 | }; |
5905 | int status; | 5905 | int status; |
5906 | 5906 | ||
5907 | dprintk("--> %s\n", __func__); | 5907 | dprintk("--> %s\n", __func__); |
5908 | status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); | 5908 | status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); |
5909 | dprintk("<-- %s status=%d\n", __func__, status); | 5909 | dprintk("<-- %s status=%d\n", __func__, status); |
5910 | 5910 | ||
5911 | return status; | 5911 | return status; |
5912 | } | 5912 | } |
5913 | 5913 | ||
5914 | int nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev) | 5914 | int nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev) |
5915 | { | 5915 | { |
5916 | struct nfs4_exception exception = { }; | 5916 | struct nfs4_exception exception = { }; |
5917 | int err; | 5917 | int err; |
5918 | 5918 | ||
5919 | do { | 5919 | do { |
5920 | err = nfs4_handle_exception(server, | 5920 | err = nfs4_handle_exception(server, |
5921 | _nfs4_proc_getdeviceinfo(server, pdev), | 5921 | _nfs4_proc_getdeviceinfo(server, pdev), |
5922 | &exception); | 5922 | &exception); |
5923 | } while (exception.retry); | 5923 | } while (exception.retry); |
5924 | return err; | 5924 | return err; |
5925 | } | 5925 | } |
5926 | EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo); | 5926 | EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo); |
5927 | 5927 | ||
5928 | static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata) | 5928 | static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata) |
5929 | { | 5929 | { |
5930 | struct nfs4_layoutcommit_data *data = calldata; | 5930 | struct nfs4_layoutcommit_data *data = calldata; |
5931 | struct nfs_server *server = NFS_SERVER(data->args.inode); | 5931 | struct nfs_server *server = NFS_SERVER(data->args.inode); |
5932 | 5932 | ||
5933 | if (nfs4_setup_sequence(server, &data->args.seq_args, | 5933 | if (nfs4_setup_sequence(server, &data->args.seq_args, |
5934 | &data->res.seq_res, 1, task)) | 5934 | &data->res.seq_res, 1, task)) |
5935 | return; | 5935 | return; |
5936 | rpc_call_start(task); | 5936 | rpc_call_start(task); |
5937 | } | 5937 | } |
5938 | 5938 | ||
5939 | static void | 5939 | static void |
5940 | nfs4_layoutcommit_done(struct rpc_task *task, void *calldata) | 5940 | nfs4_layoutcommit_done(struct rpc_task *task, void *calldata) |
5941 | { | 5941 | { |
5942 | struct nfs4_layoutcommit_data *data = calldata; | 5942 | struct nfs4_layoutcommit_data *data = calldata; |
5943 | struct nfs_server *server = NFS_SERVER(data->args.inode); | 5943 | struct nfs_server *server = NFS_SERVER(data->args.inode); |
5944 | 5944 | ||
5945 | if (!nfs4_sequence_done(task, &data->res.seq_res)) | 5945 | if (!nfs4_sequence_done(task, &data->res.seq_res)) |
5946 | return; | 5946 | return; |
5947 | 5947 | ||
5948 | switch (task->tk_status) { /* Just ignore these failures */ | 5948 | switch (task->tk_status) { /* Just ignore these failures */ |
5949 | case NFS4ERR_DELEG_REVOKED: /* layout was recalled */ | 5949 | case NFS4ERR_DELEG_REVOKED: /* layout was recalled */ |
5950 | case NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */ | 5950 | case NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */ |
5951 | case NFS4ERR_BADLAYOUT: /* no layout */ | 5951 | case NFS4ERR_BADLAYOUT: /* no layout */ |
5952 | case NFS4ERR_GRACE: /* loca_recalim always false */ | 5952 | case NFS4ERR_GRACE: /* loca_recalim always false */ |
5953 | task->tk_status = 0; | 5953 | task->tk_status = 0; |
5954 | } | 5954 | } |
5955 | 5955 | ||
5956 | if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) { | 5956 | if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) { |
5957 | rpc_restart_call_prepare(task); | 5957 | rpc_restart_call_prepare(task); |
5958 | return; | 5958 | return; |
5959 | } | 5959 | } |
5960 | 5960 | ||
5961 | if (task->tk_status == 0) | 5961 | if (task->tk_status == 0) |
5962 | nfs_post_op_update_inode_force_wcc(data->args.inode, | 5962 | nfs_post_op_update_inode_force_wcc(data->args.inode, |
5963 | data->res.fattr); | 5963 | data->res.fattr); |
5964 | } | 5964 | } |
5965 | 5965 | ||
5966 | static void nfs4_layoutcommit_release(void *calldata) | 5966 | static void nfs4_layoutcommit_release(void *calldata) |
5967 | { | 5967 | { |
5968 | struct nfs4_layoutcommit_data *data = calldata; | 5968 | struct nfs4_layoutcommit_data *data = calldata; |
5969 | struct pnfs_layout_segment *lseg, *tmp; | 5969 | struct pnfs_layout_segment *lseg, *tmp; |
5970 | unsigned long *bitlock = &NFS_I(data->args.inode)->flags; | 5970 | unsigned long *bitlock = &NFS_I(data->args.inode)->flags; |
5971 | 5971 | ||
5972 | pnfs_cleanup_layoutcommit(data); | 5972 | pnfs_cleanup_layoutcommit(data); |
5973 | /* Matched by references in pnfs_set_layoutcommit */ | 5973 | /* Matched by references in pnfs_set_layoutcommit */ |
5974 | list_for_each_entry_safe(lseg, tmp, &data->lseg_list, pls_lc_list) { | 5974 | list_for_each_entry_safe(lseg, tmp, &data->lseg_list, pls_lc_list) { |
5975 | list_del_init(&lseg->pls_lc_list); | 5975 | list_del_init(&lseg->pls_lc_list); |
5976 | if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, | 5976 | if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, |
5977 | &lseg->pls_flags)) | 5977 | &lseg->pls_flags)) |
5978 | put_lseg(lseg); | 5978 | put_lseg(lseg); |
5979 | } | 5979 | } |
5980 | 5980 | ||
5981 | clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock); | 5981 | clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock); |
5982 | smp_mb__after_clear_bit(); | 5982 | smp_mb__after_clear_bit(); |
5983 | wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING); | 5983 | wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING); |
5984 | 5984 | ||
5985 | put_rpccred(data->cred); | 5985 | put_rpccred(data->cred); |
5986 | kfree(data); | 5986 | kfree(data); |
5987 | } | 5987 | } |
5988 | 5988 | ||
5989 | static const struct rpc_call_ops nfs4_layoutcommit_ops = { | 5989 | static const struct rpc_call_ops nfs4_layoutcommit_ops = { |
5990 | .rpc_call_prepare = nfs4_layoutcommit_prepare, | 5990 | .rpc_call_prepare = nfs4_layoutcommit_prepare, |
5991 | .rpc_call_done = nfs4_layoutcommit_done, | 5991 | .rpc_call_done = nfs4_layoutcommit_done, |
5992 | .rpc_release = nfs4_layoutcommit_release, | 5992 | .rpc_release = nfs4_layoutcommit_release, |
5993 | }; | 5993 | }; |
5994 | 5994 | ||
5995 | int | 5995 | int |
5996 | nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync) | 5996 | nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync) |
5997 | { | 5997 | { |
5998 | struct rpc_message msg = { | 5998 | struct rpc_message msg = { |
5999 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT], | 5999 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT], |
6000 | .rpc_argp = &data->args, | 6000 | .rpc_argp = &data->args, |
6001 | .rpc_resp = &data->res, | 6001 | .rpc_resp = &data->res, |
6002 | .rpc_cred = data->cred, | 6002 | .rpc_cred = data->cred, |
6003 | }; | 6003 | }; |
6004 | struct rpc_task_setup task_setup_data = { | 6004 | struct rpc_task_setup task_setup_data = { |
6005 | .task = &data->task, | 6005 | .task = &data->task, |
6006 | .rpc_client = NFS_CLIENT(data->args.inode), | 6006 | .rpc_client = NFS_CLIENT(data->args.inode), |
6007 | .rpc_message = &msg, | 6007 | .rpc_message = &msg, |
6008 | .callback_ops = &nfs4_layoutcommit_ops, | 6008 | .callback_ops = &nfs4_layoutcommit_ops, |
6009 | .callback_data = data, | 6009 | .callback_data = data, |
6010 | .flags = RPC_TASK_ASYNC, | 6010 | .flags = RPC_TASK_ASYNC, |
6011 | }; | 6011 | }; |
6012 | struct rpc_task *task; | 6012 | struct rpc_task *task; |
6013 | int status = 0; | 6013 | int status = 0; |
6014 | 6014 | ||
6015 | dprintk("NFS: %4d initiating layoutcommit call. sync %d " | 6015 | dprintk("NFS: %4d initiating layoutcommit call. sync %d " |
6016 | "lbw: %llu inode %lu\n", | 6016 | "lbw: %llu inode %lu\n", |
6017 | data->task.tk_pid, sync, | 6017 | data->task.tk_pid, sync, |
6018 | data->args.lastbytewritten, | 6018 | data->args.lastbytewritten, |
6019 | data->args.inode->i_ino); | 6019 | data->args.inode->i_ino); |
6020 | 6020 | ||
6021 | task = rpc_run_task(&task_setup_data); | 6021 | task = rpc_run_task(&task_setup_data); |
6022 | if (IS_ERR(task)) | 6022 | if (IS_ERR(task)) |
6023 | return PTR_ERR(task); | 6023 | return PTR_ERR(task); |
6024 | if (sync == false) | 6024 | if (sync == false) |
6025 | goto out; | 6025 | goto out; |
6026 | status = nfs4_wait_for_completion_rpc_task(task); | 6026 | status = nfs4_wait_for_completion_rpc_task(task); |
6027 | if (status != 0) | 6027 | if (status != 0) |
6028 | goto out; | 6028 | goto out; |
6029 | status = task->tk_status; | 6029 | status = task->tk_status; |
6030 | out: | 6030 | out: |
6031 | dprintk("%s: status %d\n", __func__, status); | 6031 | dprintk("%s: status %d\n", __func__, status); |
6032 | rpc_put_task(task); | 6032 | rpc_put_task(task); |
6033 | return status; | 6033 | return status; |
6034 | } | 6034 | } |
6035 | 6035 | ||
6036 | static int | 6036 | static int |
6037 | _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, | 6037 | _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, |
6038 | struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors) | 6038 | struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors) |
6039 | { | 6039 | { |
6040 | struct nfs41_secinfo_no_name_args args = { | 6040 | struct nfs41_secinfo_no_name_args args = { |
6041 | .style = SECINFO_STYLE_CURRENT_FH, | 6041 | .style = SECINFO_STYLE_CURRENT_FH, |
6042 | }; | 6042 | }; |
6043 | struct nfs4_secinfo_res res = { | 6043 | struct nfs4_secinfo_res res = { |
6044 | .flavors = flavors, | 6044 | .flavors = flavors, |
6045 | }; | 6045 | }; |
6046 | struct rpc_message msg = { | 6046 | struct rpc_message msg = { |
6047 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME], | 6047 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME], |
6048 | .rpc_argp = &args, | 6048 | .rpc_argp = &args, |
6049 | .rpc_resp = &res, | 6049 | .rpc_resp = &res, |
6050 | }; | 6050 | }; |
6051 | return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); | 6051 | return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); |
6052 | } | 6052 | } |
6053 | 6053 | ||
6054 | static int | 6054 | static int |
6055 | nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, | 6055 | nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, |
6056 | struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors) | 6056 | struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors) |
6057 | { | 6057 | { |
6058 | struct nfs4_exception exception = { }; | 6058 | struct nfs4_exception exception = { }; |
6059 | int err; | 6059 | int err; |
6060 | do { | 6060 | do { |
6061 | err = _nfs41_proc_secinfo_no_name(server, fhandle, info, flavors); | 6061 | err = _nfs41_proc_secinfo_no_name(server, fhandle, info, flavors); |
6062 | switch (err) { | 6062 | switch (err) { |
6063 | case 0: | 6063 | case 0: |
6064 | case -NFS4ERR_WRONGSEC: | 6064 | case -NFS4ERR_WRONGSEC: |
6065 | case -NFS4ERR_NOTSUPP: | 6065 | case -NFS4ERR_NOTSUPP: |
6066 | break; | 6066 | break; |
6067 | default: | 6067 | default: |
6068 | err = nfs4_handle_exception(server, err, &exception); | 6068 | err = nfs4_handle_exception(server, err, &exception); |
6069 | } | 6069 | } |
6070 | } while (exception.retry); | 6070 | } while (exception.retry); |
6071 | return err; | 6071 | return err; |
6072 | } | 6072 | } |
6073 | 6073 | ||
6074 | static int | 6074 | static int |
6075 | nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, | 6075 | nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, |
6076 | struct nfs_fsinfo *info) | 6076 | struct nfs_fsinfo *info) |
6077 | { | 6077 | { |
6078 | int err; | 6078 | int err; |
6079 | struct page *page; | 6079 | struct page *page; |
6080 | rpc_authflavor_t flavor; | 6080 | rpc_authflavor_t flavor; |
6081 | struct nfs4_secinfo_flavors *flavors; | 6081 | struct nfs4_secinfo_flavors *flavors; |
6082 | 6082 | ||
6083 | page = alloc_page(GFP_KERNEL); | 6083 | page = alloc_page(GFP_KERNEL); |
6084 | if (!page) { | 6084 | if (!page) { |
6085 | err = -ENOMEM; | 6085 | err = -ENOMEM; |
6086 | goto out; | 6086 | goto out; |
6087 | } | 6087 | } |
6088 | 6088 | ||
6089 | flavors = page_address(page); | 6089 | flavors = page_address(page); |
6090 | err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors); | 6090 | err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors); |
6091 | 6091 | ||
6092 | /* | 6092 | /* |
6093 | * Fall back on "guess and check" method if | 6093 | * Fall back on "guess and check" method if |
6094 | * the server doesn't support SECINFO_NO_NAME | 6094 | * the server doesn't support SECINFO_NO_NAME |
6095 | */ | 6095 | */ |
6096 | if (err == -NFS4ERR_WRONGSEC || err == -NFS4ERR_NOTSUPP) { | 6096 | if (err == -NFS4ERR_WRONGSEC || err == -NFS4ERR_NOTSUPP) { |
6097 | err = nfs4_find_root_sec(server, fhandle, info); | 6097 | err = nfs4_find_root_sec(server, fhandle, info); |
6098 | goto out_freepage; | 6098 | goto out_freepage; |
6099 | } | 6099 | } |
6100 | if (err) | 6100 | if (err) |
6101 | goto out_freepage; | 6101 | goto out_freepage; |
6102 | 6102 | ||
6103 | flavor = nfs_find_best_sec(flavors); | 6103 | flavor = nfs_find_best_sec(flavors); |
6104 | if (err == 0) | 6104 | if (err == 0) |
6105 | err = nfs4_lookup_root_sec(server, fhandle, info, flavor); | 6105 | err = nfs4_lookup_root_sec(server, fhandle, info, flavor); |
6106 | 6106 | ||
6107 | out_freepage: | 6107 | out_freepage: |
6108 | put_page(page); | 6108 | put_page(page); |
6109 | if (err == -EACCES) | 6109 | if (err == -EACCES) |
6110 | return -EPERM; | 6110 | return -EPERM; |
6111 | out: | 6111 | out: |
6112 | return err; | 6112 | return err; |
6113 | } | 6113 | } |
6114 | static int _nfs41_test_stateid(struct nfs_server *server, struct nfs4_state *state) | 6114 | static int _nfs41_test_stateid(struct nfs_server *server, struct nfs4_state *state) |
6115 | { | 6115 | { |
6116 | int status; | 6116 | int status; |
6117 | struct nfs41_test_stateid_args args = { | 6117 | struct nfs41_test_stateid_args args = { |
6118 | .stateid = &state->stateid, | 6118 | .stateid = &state->stateid, |
6119 | }; | 6119 | }; |
6120 | struct nfs41_test_stateid_res res; | 6120 | struct nfs41_test_stateid_res res; |
6121 | struct rpc_message msg = { | 6121 | struct rpc_message msg = { |
6122 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID], | 6122 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID], |
6123 | .rpc_argp = &args, | 6123 | .rpc_argp = &args, |
6124 | .rpc_resp = &res, | 6124 | .rpc_resp = &res, |
6125 | }; | 6125 | }; |
6126 | args.seq_args.sa_session = res.seq_res.sr_session = NULL; | 6126 | args.seq_args.sa_session = res.seq_res.sr_session = NULL; |
6127 | status = nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 0, 1); | 6127 | status = nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 0, 1); |
6128 | return status; | 6128 | return status; |
6129 | } | 6129 | } |
6130 | 6130 | ||
6131 | static int nfs41_test_stateid(struct nfs_server *server, struct nfs4_state *state) | 6131 | static int nfs41_test_stateid(struct nfs_server *server, struct nfs4_state *state) |
6132 | { | 6132 | { |
6133 | struct nfs4_exception exception = { }; | 6133 | struct nfs4_exception exception = { }; |
6134 | int err; | 6134 | int err; |
6135 | do { | 6135 | do { |
6136 | err = nfs4_handle_exception(server, | 6136 | err = nfs4_handle_exception(server, |
6137 | _nfs41_test_stateid(server, state), | 6137 | _nfs41_test_stateid(server, state), |
6138 | &exception); | 6138 | &exception); |
6139 | } while (exception.retry); | 6139 | } while (exception.retry); |
6140 | return err; | 6140 | return err; |
6141 | } | 6141 | } |
6142 | 6142 | ||
6143 | static int _nfs4_free_stateid(struct nfs_server *server, struct nfs4_state *state) | 6143 | static int _nfs4_free_stateid(struct nfs_server *server, struct nfs4_state *state) |
6144 | { | 6144 | { |
6145 | int status; | 6145 | int status; |
6146 | struct nfs41_free_stateid_args args = { | 6146 | struct nfs41_free_stateid_args args = { |
6147 | .stateid = &state->stateid, | 6147 | .stateid = &state->stateid, |
6148 | }; | 6148 | }; |
6149 | struct nfs41_free_stateid_res res; | 6149 | struct nfs41_free_stateid_res res; |
6150 | struct rpc_message msg = { | 6150 | struct rpc_message msg = { |
6151 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID], | 6151 | .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID], |
6152 | .rpc_argp = &args, | 6152 | .rpc_argp = &args, |
6153 | .rpc_resp = &res, | 6153 | .rpc_resp = &res, |
6154 | }; | 6154 | }; |
6155 | 6155 | ||
6156 | args.seq_args.sa_session = res.seq_res.sr_session = NULL; | 6156 | args.seq_args.sa_session = res.seq_res.sr_session = NULL; |
6157 | status = nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 0, 1); | 6157 | status = nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 0, 1); |
6158 | return status; | 6158 | return status; |
6159 | } | 6159 | } |
6160 | 6160 | ||
6161 | static int nfs41_free_stateid(struct nfs_server *server, struct nfs4_state *state) | 6161 | static int nfs41_free_stateid(struct nfs_server *server, struct nfs4_state *state) |
6162 | { | 6162 | { |
6163 | struct nfs4_exception exception = { }; | 6163 | struct nfs4_exception exception = { }; |
6164 | int err; | 6164 | int err; |
6165 | do { | 6165 | do { |
6166 | err = nfs4_handle_exception(server, | 6166 | err = nfs4_handle_exception(server, |
6167 | _nfs4_free_stateid(server, state), | 6167 | _nfs4_free_stateid(server, state), |
6168 | &exception); | 6168 | &exception); |
6169 | } while (exception.retry); | 6169 | } while (exception.retry); |
6170 | return err; | 6170 | return err; |
6171 | } | 6171 | } |
6172 | #endif /* CONFIG_NFS_V4_1 */ | 6172 | #endif /* CONFIG_NFS_V4_1 */ |
6173 | 6173 | ||
6174 | struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = { | 6174 | struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = { |
6175 | .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, | 6175 | .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, |
6176 | .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, | 6176 | .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, |
6177 | .recover_open = nfs4_open_reclaim, | 6177 | .recover_open = nfs4_open_reclaim, |
6178 | .recover_lock = nfs4_lock_reclaim, | 6178 | .recover_lock = nfs4_lock_reclaim, |
6179 | .establish_clid = nfs4_init_clientid, | 6179 | .establish_clid = nfs4_init_clientid, |
6180 | .get_clid_cred = nfs4_get_setclientid_cred, | 6180 | .get_clid_cred = nfs4_get_setclientid_cred, |
6181 | }; | 6181 | }; |
6182 | 6182 | ||
6183 | #if defined(CONFIG_NFS_V4_1) | 6183 | #if defined(CONFIG_NFS_V4_1) |
6184 | struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = { | 6184 | struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = { |
6185 | .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, | 6185 | .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, |
6186 | .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, | 6186 | .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, |
6187 | .recover_open = nfs4_open_reclaim, | 6187 | .recover_open = nfs4_open_reclaim, |
6188 | .recover_lock = nfs4_lock_reclaim, | 6188 | .recover_lock = nfs4_lock_reclaim, |
6189 | .establish_clid = nfs41_init_clientid, | 6189 | .establish_clid = nfs41_init_clientid, |
6190 | .get_clid_cred = nfs4_get_exchange_id_cred, | 6190 | .get_clid_cred = nfs4_get_exchange_id_cred, |
6191 | .reclaim_complete = nfs41_proc_reclaim_complete, | 6191 | .reclaim_complete = nfs41_proc_reclaim_complete, |
6192 | }; | 6192 | }; |
6193 | #endif /* CONFIG_NFS_V4_1 */ | 6193 | #endif /* CONFIG_NFS_V4_1 */ |
6194 | 6194 | ||
6195 | struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = { | 6195 | struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = { |
6196 | .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, | 6196 | .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, |
6197 | .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, | 6197 | .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, |
6198 | .recover_open = nfs4_open_expired, | 6198 | .recover_open = nfs4_open_expired, |
6199 | .recover_lock = nfs4_lock_expired, | 6199 | .recover_lock = nfs4_lock_expired, |
6200 | .establish_clid = nfs4_init_clientid, | 6200 | .establish_clid = nfs4_init_clientid, |
6201 | .get_clid_cred = nfs4_get_setclientid_cred, | 6201 | .get_clid_cred = nfs4_get_setclientid_cred, |
6202 | }; | 6202 | }; |
6203 | 6203 | ||
6204 | #if defined(CONFIG_NFS_V4_1) | 6204 | #if defined(CONFIG_NFS_V4_1) |
6205 | struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = { | 6205 | struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = { |
6206 | .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, | 6206 | .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, |
6207 | .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, | 6207 | .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, |
6208 | .recover_open = nfs41_open_expired, | 6208 | .recover_open = nfs41_open_expired, |
6209 | .recover_lock = nfs41_lock_expired, | 6209 | .recover_lock = nfs41_lock_expired, |
6210 | .establish_clid = nfs41_init_clientid, | 6210 | .establish_clid = nfs41_init_clientid, |
6211 | .get_clid_cred = nfs4_get_exchange_id_cred, | 6211 | .get_clid_cred = nfs4_get_exchange_id_cred, |
6212 | }; | 6212 | }; |
6213 | #endif /* CONFIG_NFS_V4_1 */ | 6213 | #endif /* CONFIG_NFS_V4_1 */ |
6214 | 6214 | ||
6215 | struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = { | 6215 | struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = { |
6216 | .sched_state_renewal = nfs4_proc_async_renew, | 6216 | .sched_state_renewal = nfs4_proc_async_renew, |
6217 | .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked, | 6217 | .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked, |
6218 | .renew_lease = nfs4_proc_renew, | 6218 | .renew_lease = nfs4_proc_renew, |
6219 | }; | 6219 | }; |
6220 | 6220 | ||
6221 | #if defined(CONFIG_NFS_V4_1) | 6221 | #if defined(CONFIG_NFS_V4_1) |
6222 | struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = { | 6222 | struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = { |
6223 | .sched_state_renewal = nfs41_proc_async_sequence, | 6223 | .sched_state_renewal = nfs41_proc_async_sequence, |
6224 | .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked, | 6224 | .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked, |
6225 | .renew_lease = nfs4_proc_sequence, | 6225 | .renew_lease = nfs4_proc_sequence, |
6226 | }; | 6226 | }; |
6227 | #endif | 6227 | #endif |
6228 | 6228 | ||
6229 | static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = { | 6229 | static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = { |
6230 | .minor_version = 0, | 6230 | .minor_version = 0, |
6231 | .call_sync = _nfs4_call_sync, | 6231 | .call_sync = _nfs4_call_sync, |
6232 | .validate_stateid = nfs4_validate_delegation_stateid, | 6232 | .validate_stateid = nfs4_validate_delegation_stateid, |
6233 | .find_root_sec = nfs4_find_root_sec, | 6233 | .find_root_sec = nfs4_find_root_sec, |
6234 | .reboot_recovery_ops = &nfs40_reboot_recovery_ops, | 6234 | .reboot_recovery_ops = &nfs40_reboot_recovery_ops, |
6235 | .nograce_recovery_ops = &nfs40_nograce_recovery_ops, | 6235 | .nograce_recovery_ops = &nfs40_nograce_recovery_ops, |
6236 | .state_renewal_ops = &nfs40_state_renewal_ops, | 6236 | .state_renewal_ops = &nfs40_state_renewal_ops, |
6237 | }; | 6237 | }; |
6238 | 6238 | ||
6239 | #if defined(CONFIG_NFS_V4_1) | 6239 | #if defined(CONFIG_NFS_V4_1) |
6240 | static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = { | 6240 | static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = { |
6241 | .minor_version = 1, | 6241 | .minor_version = 1, |
6242 | .call_sync = _nfs4_call_sync_session, | 6242 | .call_sync = _nfs4_call_sync_session, |
6243 | .validate_stateid = nfs41_validate_delegation_stateid, | 6243 | .validate_stateid = nfs41_validate_delegation_stateid, |
6244 | .find_root_sec = nfs41_find_root_sec, | 6244 | .find_root_sec = nfs41_find_root_sec, |
6245 | .reboot_recovery_ops = &nfs41_reboot_recovery_ops, | 6245 | .reboot_recovery_ops = &nfs41_reboot_recovery_ops, |
6246 | .nograce_recovery_ops = &nfs41_nograce_recovery_ops, | 6246 | .nograce_recovery_ops = &nfs41_nograce_recovery_ops, |
6247 | .state_renewal_ops = &nfs41_state_renewal_ops, | 6247 | .state_renewal_ops = &nfs41_state_renewal_ops, |
6248 | }; | 6248 | }; |
6249 | #endif | 6249 | #endif |
6250 | 6250 | ||
6251 | const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = { | 6251 | const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = { |
6252 | [0] = &nfs_v4_0_minor_ops, | 6252 | [0] = &nfs_v4_0_minor_ops, |
6253 | #if defined(CONFIG_NFS_V4_1) | 6253 | #if defined(CONFIG_NFS_V4_1) |
6254 | [1] = &nfs_v4_1_minor_ops, | 6254 | [1] = &nfs_v4_1_minor_ops, |
6255 | #endif | 6255 | #endif |
6256 | }; | 6256 | }; |
6257 | 6257 | ||
6258 | static const struct inode_operations nfs4_file_inode_operations = { | 6258 | static const struct inode_operations nfs4_file_inode_operations = { |
6259 | .permission = nfs_permission, | 6259 | .permission = nfs_permission, |
6260 | .getattr = nfs_getattr, | 6260 | .getattr = nfs_getattr, |
6261 | .setattr = nfs_setattr, | 6261 | .setattr = nfs_setattr, |
6262 | .getxattr = generic_getxattr, | 6262 | .getxattr = generic_getxattr, |
6263 | .setxattr = generic_setxattr, | 6263 | .setxattr = generic_setxattr, |
6264 | .listxattr = generic_listxattr, | 6264 | .listxattr = generic_listxattr, |
6265 | .removexattr = generic_removexattr, | 6265 | .removexattr = generic_removexattr, |
6266 | }; | 6266 | }; |
6267 | 6267 | ||
6268 | const struct nfs_rpc_ops nfs_v4_clientops = { | 6268 | const struct nfs_rpc_ops nfs_v4_clientops = { |
6269 | .version = 4, /* protocol version */ | 6269 | .version = 4, /* protocol version */ |
6270 | .dentry_ops = &nfs4_dentry_operations, | 6270 | .dentry_ops = &nfs4_dentry_operations, |
6271 | .dir_inode_ops = &nfs4_dir_inode_operations, | 6271 | .dir_inode_ops = &nfs4_dir_inode_operations, |
6272 | .file_inode_ops = &nfs4_file_inode_operations, | 6272 | .file_inode_ops = &nfs4_file_inode_operations, |
6273 | .file_ops = &nfs4_file_operations, | 6273 | .file_ops = &nfs4_file_operations, |
6274 | .getroot = nfs4_proc_get_root, | 6274 | .getroot = nfs4_proc_get_root, |
6275 | .getattr = nfs4_proc_getattr, | 6275 | .getattr = nfs4_proc_getattr, |
6276 | .setattr = nfs4_proc_setattr, | 6276 | .setattr = nfs4_proc_setattr, |
6277 | .lookup = nfs4_proc_lookup, | 6277 | .lookup = nfs4_proc_lookup, |
6278 | .access = nfs4_proc_access, | 6278 | .access = nfs4_proc_access, |
6279 | .readlink = nfs4_proc_readlink, | 6279 | .readlink = nfs4_proc_readlink, |
6280 | .create = nfs4_proc_create, | 6280 | .create = nfs4_proc_create, |
6281 | .remove = nfs4_proc_remove, | 6281 | .remove = nfs4_proc_remove, |
6282 | .unlink_setup = nfs4_proc_unlink_setup, | 6282 | .unlink_setup = nfs4_proc_unlink_setup, |
6283 | .unlink_done = nfs4_proc_unlink_done, | 6283 | .unlink_done = nfs4_proc_unlink_done, |
6284 | .rename = nfs4_proc_rename, | 6284 | .rename = nfs4_proc_rename, |
6285 | .rename_setup = nfs4_proc_rename_setup, | 6285 | .rename_setup = nfs4_proc_rename_setup, |
6286 | .rename_done = nfs4_proc_rename_done, | 6286 | .rename_done = nfs4_proc_rename_done, |
6287 | .link = nfs4_proc_link, | 6287 | .link = nfs4_proc_link, |
6288 | .symlink = nfs4_proc_symlink, | 6288 | .symlink = nfs4_proc_symlink, |
6289 | .mkdir = nfs4_proc_mkdir, | 6289 | .mkdir = nfs4_proc_mkdir, |
6290 | .rmdir = nfs4_proc_remove, | 6290 | .rmdir = nfs4_proc_remove, |
6291 | .readdir = nfs4_proc_readdir, | 6291 | .readdir = nfs4_proc_readdir, |
6292 | .mknod = nfs4_proc_mknod, | 6292 | .mknod = nfs4_proc_mknod, |
6293 | .statfs = nfs4_proc_statfs, | 6293 | .statfs = nfs4_proc_statfs, |
6294 | .fsinfo = nfs4_proc_fsinfo, | 6294 | .fsinfo = nfs4_proc_fsinfo, |
6295 | .pathconf = nfs4_proc_pathconf, | 6295 | .pathconf = nfs4_proc_pathconf, |
6296 | .set_capabilities = nfs4_server_capabilities, | 6296 | .set_capabilities = nfs4_server_capabilities, |
6297 | .decode_dirent = nfs4_decode_dirent, | 6297 | .decode_dirent = nfs4_decode_dirent, |
6298 | .read_setup = nfs4_proc_read_setup, | 6298 | .read_setup = nfs4_proc_read_setup, |
6299 | .read_done = nfs4_read_done, | 6299 | .read_done = nfs4_read_done, |
6300 | .write_setup = nfs4_proc_write_setup, | 6300 | .write_setup = nfs4_proc_write_setup, |
6301 | .write_done = nfs4_write_done, | 6301 | .write_done = nfs4_write_done, |
6302 | .commit_setup = nfs4_proc_commit_setup, | 6302 | .commit_setup = nfs4_proc_commit_setup, |
6303 | .commit_done = nfs4_commit_done, | 6303 | .commit_done = nfs4_commit_done, |
6304 | .lock = nfs4_proc_lock, | 6304 | .lock = nfs4_proc_lock, |
6305 | .clear_acl_cache = nfs4_zap_acl_attr, | 6305 | .clear_acl_cache = nfs4_zap_acl_attr, |
6306 | .close_context = nfs4_close_context, | 6306 | .close_context = nfs4_close_context, |
6307 | .open_context = nfs4_atomic_open, | 6307 | .open_context = nfs4_atomic_open, |
6308 | .init_client = nfs4_init_client, | 6308 | .init_client = nfs4_init_client, |
6309 | .secinfo = nfs4_proc_secinfo, | 6309 | .secinfo = nfs4_proc_secinfo, |
6310 | }; | 6310 | }; |
6311 | 6311 | ||
6312 | static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = { | 6312 | static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = { |
6313 | .prefix = XATTR_NAME_NFSV4_ACL, | 6313 | .prefix = XATTR_NAME_NFSV4_ACL, |
6314 | .list = nfs4_xattr_list_nfs4_acl, | 6314 | .list = nfs4_xattr_list_nfs4_acl, |
6315 | .get = nfs4_xattr_get_nfs4_acl, | 6315 | .get = nfs4_xattr_get_nfs4_acl, |
6316 | .set = nfs4_xattr_set_nfs4_acl, | 6316 | .set = nfs4_xattr_set_nfs4_acl, |
6317 | }; | 6317 | }; |
6318 | 6318 | ||
6319 | const struct xattr_handler *nfs4_xattr_handlers[] = { | 6319 | const struct xattr_handler *nfs4_xattr_handlers[] = { |
6320 | &nfs4_xattr_nfs4_acl_handler, | 6320 | &nfs4_xattr_nfs4_acl_handler, |
6321 | NULL | 6321 | NULL |
6322 | }; | 6322 | }; |
6323 | 6323 | ||
6324 | /* | 6324 | /* |
6325 | * Local variables: | 6325 | * Local variables: |
6326 | * c-basic-offset: 8 | 6326 | * c-basic-offset: 8 |
6327 | * End: | 6327 | * End: |
6328 | */ | 6328 | */ |
6329 | 6329 |