Commit f6d47a1761896dcd89e3184399a8962dff17267d
Committed by
Miklos Szeredi
1 parent
26c3679101
Exists in
master
and in
7 other branches
fuse: fix poll notify
Move fuse_copy_finish() to before calling fuse_notify_poll_wakeup(). This is not a big issue because fuse_notify_poll_wakeup() should be atomic, but it's cleaner this way, and later uses of notification will need to be able to finish the copying before performing some actions. Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Showing 1 changed file with 9 additions and 4 deletions Inline Diff
fs/fuse/dev.c
1 | /* | 1 | /* |
2 | FUSE: Filesystem in Userspace | 2 | FUSE: Filesystem in Userspace |
3 | Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu> | 3 | Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu> |
4 | 4 | ||
5 | This program can be distributed under the terms of the GNU GPL. | 5 | This program can be distributed under the terms of the GNU GPL. |
6 | See the file COPYING. | 6 | See the file COPYING. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include "fuse_i.h" | 9 | #include "fuse_i.h" |
10 | 10 | ||
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/poll.h> | 13 | #include <linux/poll.h> |
14 | #include <linux/uio.h> | 14 | #include <linux/uio.h> |
15 | #include <linux/miscdevice.h> | 15 | #include <linux/miscdevice.h> |
16 | #include <linux/pagemap.h> | 16 | #include <linux/pagemap.h> |
17 | #include <linux/file.h> | 17 | #include <linux/file.h> |
18 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
19 | 19 | ||
20 | MODULE_ALIAS_MISCDEV(FUSE_MINOR); | 20 | MODULE_ALIAS_MISCDEV(FUSE_MINOR); |
21 | 21 | ||
22 | static struct kmem_cache *fuse_req_cachep; | 22 | static struct kmem_cache *fuse_req_cachep; |
23 | 23 | ||
24 | static struct fuse_conn *fuse_get_conn(struct file *file) | 24 | static struct fuse_conn *fuse_get_conn(struct file *file) |
25 | { | 25 | { |
26 | /* | 26 | /* |
27 | * Lockless access is OK, because file->private data is set | 27 | * Lockless access is OK, because file->private data is set |
28 | * once during mount and is valid until the file is released. | 28 | * once during mount and is valid until the file is released. |
29 | */ | 29 | */ |
30 | return file->private_data; | 30 | return file->private_data; |
31 | } | 31 | } |
32 | 32 | ||
33 | static void fuse_request_init(struct fuse_req *req) | 33 | static void fuse_request_init(struct fuse_req *req) |
34 | { | 34 | { |
35 | memset(req, 0, sizeof(*req)); | 35 | memset(req, 0, sizeof(*req)); |
36 | INIT_LIST_HEAD(&req->list); | 36 | INIT_LIST_HEAD(&req->list); |
37 | INIT_LIST_HEAD(&req->intr_entry); | 37 | INIT_LIST_HEAD(&req->intr_entry); |
38 | init_waitqueue_head(&req->waitq); | 38 | init_waitqueue_head(&req->waitq); |
39 | atomic_set(&req->count, 1); | 39 | atomic_set(&req->count, 1); |
40 | } | 40 | } |
41 | 41 | ||
42 | struct fuse_req *fuse_request_alloc(void) | 42 | struct fuse_req *fuse_request_alloc(void) |
43 | { | 43 | { |
44 | struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_KERNEL); | 44 | struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_KERNEL); |
45 | if (req) | 45 | if (req) |
46 | fuse_request_init(req); | 46 | fuse_request_init(req); |
47 | return req; | 47 | return req; |
48 | } | 48 | } |
49 | 49 | ||
50 | struct fuse_req *fuse_request_alloc_nofs(void) | 50 | struct fuse_req *fuse_request_alloc_nofs(void) |
51 | { | 51 | { |
52 | struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_NOFS); | 52 | struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_NOFS); |
53 | if (req) | 53 | if (req) |
54 | fuse_request_init(req); | 54 | fuse_request_init(req); |
55 | return req; | 55 | return req; |
56 | } | 56 | } |
57 | 57 | ||
58 | void fuse_request_free(struct fuse_req *req) | 58 | void fuse_request_free(struct fuse_req *req) |
59 | { | 59 | { |
60 | kmem_cache_free(fuse_req_cachep, req); | 60 | kmem_cache_free(fuse_req_cachep, req); |
61 | } | 61 | } |
62 | 62 | ||
63 | static void block_sigs(sigset_t *oldset) | 63 | static void block_sigs(sigset_t *oldset) |
64 | { | 64 | { |
65 | sigset_t mask; | 65 | sigset_t mask; |
66 | 66 | ||
67 | siginitsetinv(&mask, sigmask(SIGKILL)); | 67 | siginitsetinv(&mask, sigmask(SIGKILL)); |
68 | sigprocmask(SIG_BLOCK, &mask, oldset); | 68 | sigprocmask(SIG_BLOCK, &mask, oldset); |
69 | } | 69 | } |
70 | 70 | ||
71 | static void restore_sigs(sigset_t *oldset) | 71 | static void restore_sigs(sigset_t *oldset) |
72 | { | 72 | { |
73 | sigprocmask(SIG_SETMASK, oldset, NULL); | 73 | sigprocmask(SIG_SETMASK, oldset, NULL); |
74 | } | 74 | } |
75 | 75 | ||
76 | static void __fuse_get_request(struct fuse_req *req) | 76 | static void __fuse_get_request(struct fuse_req *req) |
77 | { | 77 | { |
78 | atomic_inc(&req->count); | 78 | atomic_inc(&req->count); |
79 | } | 79 | } |
80 | 80 | ||
81 | /* Must be called with > 1 refcount */ | 81 | /* Must be called with > 1 refcount */ |
82 | static void __fuse_put_request(struct fuse_req *req) | 82 | static void __fuse_put_request(struct fuse_req *req) |
83 | { | 83 | { |
84 | BUG_ON(atomic_read(&req->count) < 2); | 84 | BUG_ON(atomic_read(&req->count) < 2); |
85 | atomic_dec(&req->count); | 85 | atomic_dec(&req->count); |
86 | } | 86 | } |
87 | 87 | ||
88 | static void fuse_req_init_context(struct fuse_req *req) | 88 | static void fuse_req_init_context(struct fuse_req *req) |
89 | { | 89 | { |
90 | req->in.h.uid = current_fsuid(); | 90 | req->in.h.uid = current_fsuid(); |
91 | req->in.h.gid = current_fsgid(); | 91 | req->in.h.gid = current_fsgid(); |
92 | req->in.h.pid = current->pid; | 92 | req->in.h.pid = current->pid; |
93 | } | 93 | } |
94 | 94 | ||
95 | struct fuse_req *fuse_get_req(struct fuse_conn *fc) | 95 | struct fuse_req *fuse_get_req(struct fuse_conn *fc) |
96 | { | 96 | { |
97 | struct fuse_req *req; | 97 | struct fuse_req *req; |
98 | sigset_t oldset; | 98 | sigset_t oldset; |
99 | int intr; | 99 | int intr; |
100 | int err; | 100 | int err; |
101 | 101 | ||
102 | atomic_inc(&fc->num_waiting); | 102 | atomic_inc(&fc->num_waiting); |
103 | block_sigs(&oldset); | 103 | block_sigs(&oldset); |
104 | intr = wait_event_interruptible(fc->blocked_waitq, !fc->blocked); | 104 | intr = wait_event_interruptible(fc->blocked_waitq, !fc->blocked); |
105 | restore_sigs(&oldset); | 105 | restore_sigs(&oldset); |
106 | err = -EINTR; | 106 | err = -EINTR; |
107 | if (intr) | 107 | if (intr) |
108 | goto out; | 108 | goto out; |
109 | 109 | ||
110 | err = -ENOTCONN; | 110 | err = -ENOTCONN; |
111 | if (!fc->connected) | 111 | if (!fc->connected) |
112 | goto out; | 112 | goto out; |
113 | 113 | ||
114 | req = fuse_request_alloc(); | 114 | req = fuse_request_alloc(); |
115 | err = -ENOMEM; | 115 | err = -ENOMEM; |
116 | if (!req) | 116 | if (!req) |
117 | goto out; | 117 | goto out; |
118 | 118 | ||
119 | fuse_req_init_context(req); | 119 | fuse_req_init_context(req); |
120 | req->waiting = 1; | 120 | req->waiting = 1; |
121 | return req; | 121 | return req; |
122 | 122 | ||
123 | out: | 123 | out: |
124 | atomic_dec(&fc->num_waiting); | 124 | atomic_dec(&fc->num_waiting); |
125 | return ERR_PTR(err); | 125 | return ERR_PTR(err); |
126 | } | 126 | } |
127 | 127 | ||
128 | /* | 128 | /* |
129 | * Return request in fuse_file->reserved_req. However that may | 129 | * Return request in fuse_file->reserved_req. However that may |
130 | * currently be in use. If that is the case, wait for it to become | 130 | * currently be in use. If that is the case, wait for it to become |
131 | * available. | 131 | * available. |
132 | */ | 132 | */ |
133 | static struct fuse_req *get_reserved_req(struct fuse_conn *fc, | 133 | static struct fuse_req *get_reserved_req(struct fuse_conn *fc, |
134 | struct file *file) | 134 | struct file *file) |
135 | { | 135 | { |
136 | struct fuse_req *req = NULL; | 136 | struct fuse_req *req = NULL; |
137 | struct fuse_file *ff = file->private_data; | 137 | struct fuse_file *ff = file->private_data; |
138 | 138 | ||
139 | do { | 139 | do { |
140 | wait_event(fc->reserved_req_waitq, ff->reserved_req); | 140 | wait_event(fc->reserved_req_waitq, ff->reserved_req); |
141 | spin_lock(&fc->lock); | 141 | spin_lock(&fc->lock); |
142 | if (ff->reserved_req) { | 142 | if (ff->reserved_req) { |
143 | req = ff->reserved_req; | 143 | req = ff->reserved_req; |
144 | ff->reserved_req = NULL; | 144 | ff->reserved_req = NULL; |
145 | get_file(file); | 145 | get_file(file); |
146 | req->stolen_file = file; | 146 | req->stolen_file = file; |
147 | } | 147 | } |
148 | spin_unlock(&fc->lock); | 148 | spin_unlock(&fc->lock); |
149 | } while (!req); | 149 | } while (!req); |
150 | 150 | ||
151 | return req; | 151 | return req; |
152 | } | 152 | } |
153 | 153 | ||
154 | /* | 154 | /* |
155 | * Put stolen request back into fuse_file->reserved_req | 155 | * Put stolen request back into fuse_file->reserved_req |
156 | */ | 156 | */ |
157 | static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req) | 157 | static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req) |
158 | { | 158 | { |
159 | struct file *file = req->stolen_file; | 159 | struct file *file = req->stolen_file; |
160 | struct fuse_file *ff = file->private_data; | 160 | struct fuse_file *ff = file->private_data; |
161 | 161 | ||
162 | spin_lock(&fc->lock); | 162 | spin_lock(&fc->lock); |
163 | fuse_request_init(req); | 163 | fuse_request_init(req); |
164 | BUG_ON(ff->reserved_req); | 164 | BUG_ON(ff->reserved_req); |
165 | ff->reserved_req = req; | 165 | ff->reserved_req = req; |
166 | wake_up_all(&fc->reserved_req_waitq); | 166 | wake_up_all(&fc->reserved_req_waitq); |
167 | spin_unlock(&fc->lock); | 167 | spin_unlock(&fc->lock); |
168 | fput(file); | 168 | fput(file); |
169 | } | 169 | } |
170 | 170 | ||
171 | /* | 171 | /* |
172 | * Gets a requests for a file operation, always succeeds | 172 | * Gets a requests for a file operation, always succeeds |
173 | * | 173 | * |
174 | * This is used for sending the FLUSH request, which must get to | 174 | * This is used for sending the FLUSH request, which must get to |
175 | * userspace, due to POSIX locks which may need to be unlocked. | 175 | * userspace, due to POSIX locks which may need to be unlocked. |
176 | * | 176 | * |
177 | * If allocation fails due to OOM, use the reserved request in | 177 | * If allocation fails due to OOM, use the reserved request in |
178 | * fuse_file. | 178 | * fuse_file. |
179 | * | 179 | * |
180 | * This is very unlikely to deadlock accidentally, since the | 180 | * This is very unlikely to deadlock accidentally, since the |
181 | * filesystem should not have it's own file open. If deadlock is | 181 | * filesystem should not have it's own file open. If deadlock is |
182 | * intentional, it can still be broken by "aborting" the filesystem. | 182 | * intentional, it can still be broken by "aborting" the filesystem. |
183 | */ | 183 | */ |
184 | struct fuse_req *fuse_get_req_nofail(struct fuse_conn *fc, struct file *file) | 184 | struct fuse_req *fuse_get_req_nofail(struct fuse_conn *fc, struct file *file) |
185 | { | 185 | { |
186 | struct fuse_req *req; | 186 | struct fuse_req *req; |
187 | 187 | ||
188 | atomic_inc(&fc->num_waiting); | 188 | atomic_inc(&fc->num_waiting); |
189 | wait_event(fc->blocked_waitq, !fc->blocked); | 189 | wait_event(fc->blocked_waitq, !fc->blocked); |
190 | req = fuse_request_alloc(); | 190 | req = fuse_request_alloc(); |
191 | if (!req) | 191 | if (!req) |
192 | req = get_reserved_req(fc, file); | 192 | req = get_reserved_req(fc, file); |
193 | 193 | ||
194 | fuse_req_init_context(req); | 194 | fuse_req_init_context(req); |
195 | req->waiting = 1; | 195 | req->waiting = 1; |
196 | return req; | 196 | return req; |
197 | } | 197 | } |
198 | 198 | ||
199 | void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) | 199 | void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) |
200 | { | 200 | { |
201 | if (atomic_dec_and_test(&req->count)) { | 201 | if (atomic_dec_and_test(&req->count)) { |
202 | if (req->waiting) | 202 | if (req->waiting) |
203 | atomic_dec(&fc->num_waiting); | 203 | atomic_dec(&fc->num_waiting); |
204 | 204 | ||
205 | if (req->stolen_file) | 205 | if (req->stolen_file) |
206 | put_reserved_req(fc, req); | 206 | put_reserved_req(fc, req); |
207 | else | 207 | else |
208 | fuse_request_free(req); | 208 | fuse_request_free(req); |
209 | } | 209 | } |
210 | } | 210 | } |
211 | 211 | ||
212 | static unsigned len_args(unsigned numargs, struct fuse_arg *args) | 212 | static unsigned len_args(unsigned numargs, struct fuse_arg *args) |
213 | { | 213 | { |
214 | unsigned nbytes = 0; | 214 | unsigned nbytes = 0; |
215 | unsigned i; | 215 | unsigned i; |
216 | 216 | ||
217 | for (i = 0; i < numargs; i++) | 217 | for (i = 0; i < numargs; i++) |
218 | nbytes += args[i].size; | 218 | nbytes += args[i].size; |
219 | 219 | ||
220 | return nbytes; | 220 | return nbytes; |
221 | } | 221 | } |
222 | 222 | ||
223 | static u64 fuse_get_unique(struct fuse_conn *fc) | 223 | static u64 fuse_get_unique(struct fuse_conn *fc) |
224 | { | 224 | { |
225 | fc->reqctr++; | 225 | fc->reqctr++; |
226 | /* zero is special */ | 226 | /* zero is special */ |
227 | if (fc->reqctr == 0) | 227 | if (fc->reqctr == 0) |
228 | fc->reqctr = 1; | 228 | fc->reqctr = 1; |
229 | 229 | ||
230 | return fc->reqctr; | 230 | return fc->reqctr; |
231 | } | 231 | } |
232 | 232 | ||
233 | static void queue_request(struct fuse_conn *fc, struct fuse_req *req) | 233 | static void queue_request(struct fuse_conn *fc, struct fuse_req *req) |
234 | { | 234 | { |
235 | req->in.h.unique = fuse_get_unique(fc); | 235 | req->in.h.unique = fuse_get_unique(fc); |
236 | req->in.h.len = sizeof(struct fuse_in_header) + | 236 | req->in.h.len = sizeof(struct fuse_in_header) + |
237 | len_args(req->in.numargs, (struct fuse_arg *) req->in.args); | 237 | len_args(req->in.numargs, (struct fuse_arg *) req->in.args); |
238 | list_add_tail(&req->list, &fc->pending); | 238 | list_add_tail(&req->list, &fc->pending); |
239 | req->state = FUSE_REQ_PENDING; | 239 | req->state = FUSE_REQ_PENDING; |
240 | if (!req->waiting) { | 240 | if (!req->waiting) { |
241 | req->waiting = 1; | 241 | req->waiting = 1; |
242 | atomic_inc(&fc->num_waiting); | 242 | atomic_inc(&fc->num_waiting); |
243 | } | 243 | } |
244 | wake_up(&fc->waitq); | 244 | wake_up(&fc->waitq); |
245 | kill_fasync(&fc->fasync, SIGIO, POLL_IN); | 245 | kill_fasync(&fc->fasync, SIGIO, POLL_IN); |
246 | } | 246 | } |
247 | 247 | ||
248 | static void flush_bg_queue(struct fuse_conn *fc) | 248 | static void flush_bg_queue(struct fuse_conn *fc) |
249 | { | 249 | { |
250 | while (fc->active_background < FUSE_MAX_BACKGROUND && | 250 | while (fc->active_background < FUSE_MAX_BACKGROUND && |
251 | !list_empty(&fc->bg_queue)) { | 251 | !list_empty(&fc->bg_queue)) { |
252 | struct fuse_req *req; | 252 | struct fuse_req *req; |
253 | 253 | ||
254 | req = list_entry(fc->bg_queue.next, struct fuse_req, list); | 254 | req = list_entry(fc->bg_queue.next, struct fuse_req, list); |
255 | list_del(&req->list); | 255 | list_del(&req->list); |
256 | fc->active_background++; | 256 | fc->active_background++; |
257 | queue_request(fc, req); | 257 | queue_request(fc, req); |
258 | } | 258 | } |
259 | } | 259 | } |
260 | 260 | ||
261 | /* | 261 | /* |
262 | * This function is called when a request is finished. Either a reply | 262 | * This function is called when a request is finished. Either a reply |
263 | * has arrived or it was aborted (and not yet sent) or some error | 263 | * has arrived or it was aborted (and not yet sent) or some error |
264 | * occurred during communication with userspace, or the device file | 264 | * occurred during communication with userspace, or the device file |
265 | * was closed. The requester thread is woken up (if still waiting), | 265 | * was closed. The requester thread is woken up (if still waiting), |
266 | * the 'end' callback is called if given, else the reference to the | 266 | * the 'end' callback is called if given, else the reference to the |
267 | * request is released | 267 | * request is released |
268 | * | 268 | * |
269 | * Called with fc->lock, unlocks it | 269 | * Called with fc->lock, unlocks it |
270 | */ | 270 | */ |
271 | static void request_end(struct fuse_conn *fc, struct fuse_req *req) | 271 | static void request_end(struct fuse_conn *fc, struct fuse_req *req) |
272 | __releases(&fc->lock) | 272 | __releases(&fc->lock) |
273 | { | 273 | { |
274 | void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; | 274 | void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; |
275 | req->end = NULL; | 275 | req->end = NULL; |
276 | list_del(&req->list); | 276 | list_del(&req->list); |
277 | list_del(&req->intr_entry); | 277 | list_del(&req->intr_entry); |
278 | req->state = FUSE_REQ_FINISHED; | 278 | req->state = FUSE_REQ_FINISHED; |
279 | if (req->background) { | 279 | if (req->background) { |
280 | if (fc->num_background == FUSE_MAX_BACKGROUND) { | 280 | if (fc->num_background == FUSE_MAX_BACKGROUND) { |
281 | fc->blocked = 0; | 281 | fc->blocked = 0; |
282 | wake_up_all(&fc->blocked_waitq); | 282 | wake_up_all(&fc->blocked_waitq); |
283 | } | 283 | } |
284 | if (fc->num_background == FUSE_CONGESTION_THRESHOLD && | 284 | if (fc->num_background == FUSE_CONGESTION_THRESHOLD && |
285 | fc->connected) { | 285 | fc->connected) { |
286 | clear_bdi_congested(&fc->bdi, READ); | 286 | clear_bdi_congested(&fc->bdi, READ); |
287 | clear_bdi_congested(&fc->bdi, WRITE); | 287 | clear_bdi_congested(&fc->bdi, WRITE); |
288 | } | 288 | } |
289 | fc->num_background--; | 289 | fc->num_background--; |
290 | fc->active_background--; | 290 | fc->active_background--; |
291 | flush_bg_queue(fc); | 291 | flush_bg_queue(fc); |
292 | } | 292 | } |
293 | spin_unlock(&fc->lock); | 293 | spin_unlock(&fc->lock); |
294 | wake_up(&req->waitq); | 294 | wake_up(&req->waitq); |
295 | if (end) | 295 | if (end) |
296 | end(fc, req); | 296 | end(fc, req); |
297 | fuse_put_request(fc, req); | 297 | fuse_put_request(fc, req); |
298 | } | 298 | } |
299 | 299 | ||
300 | static void wait_answer_interruptible(struct fuse_conn *fc, | 300 | static void wait_answer_interruptible(struct fuse_conn *fc, |
301 | struct fuse_req *req) | 301 | struct fuse_req *req) |
302 | __releases(&fc->lock) | 302 | __releases(&fc->lock) |
303 | __acquires(&fc->lock) | 303 | __acquires(&fc->lock) |
304 | { | 304 | { |
305 | if (signal_pending(current)) | 305 | if (signal_pending(current)) |
306 | return; | 306 | return; |
307 | 307 | ||
308 | spin_unlock(&fc->lock); | 308 | spin_unlock(&fc->lock); |
309 | wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED); | 309 | wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED); |
310 | spin_lock(&fc->lock); | 310 | spin_lock(&fc->lock); |
311 | } | 311 | } |
312 | 312 | ||
313 | static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req) | 313 | static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req) |
314 | { | 314 | { |
315 | list_add_tail(&req->intr_entry, &fc->interrupts); | 315 | list_add_tail(&req->intr_entry, &fc->interrupts); |
316 | wake_up(&fc->waitq); | 316 | wake_up(&fc->waitq); |
317 | kill_fasync(&fc->fasync, SIGIO, POLL_IN); | 317 | kill_fasync(&fc->fasync, SIGIO, POLL_IN); |
318 | } | 318 | } |
319 | 319 | ||
320 | static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) | 320 | static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) |
321 | __releases(&fc->lock) | 321 | __releases(&fc->lock) |
322 | __acquires(&fc->lock) | 322 | __acquires(&fc->lock) |
323 | { | 323 | { |
324 | if (!fc->no_interrupt) { | 324 | if (!fc->no_interrupt) { |
325 | /* Any signal may interrupt this */ | 325 | /* Any signal may interrupt this */ |
326 | wait_answer_interruptible(fc, req); | 326 | wait_answer_interruptible(fc, req); |
327 | 327 | ||
328 | if (req->aborted) | 328 | if (req->aborted) |
329 | goto aborted; | 329 | goto aborted; |
330 | if (req->state == FUSE_REQ_FINISHED) | 330 | if (req->state == FUSE_REQ_FINISHED) |
331 | return; | 331 | return; |
332 | 332 | ||
333 | req->interrupted = 1; | 333 | req->interrupted = 1; |
334 | if (req->state == FUSE_REQ_SENT) | 334 | if (req->state == FUSE_REQ_SENT) |
335 | queue_interrupt(fc, req); | 335 | queue_interrupt(fc, req); |
336 | } | 336 | } |
337 | 337 | ||
338 | if (!req->force) { | 338 | if (!req->force) { |
339 | sigset_t oldset; | 339 | sigset_t oldset; |
340 | 340 | ||
341 | /* Only fatal signals may interrupt this */ | 341 | /* Only fatal signals may interrupt this */ |
342 | block_sigs(&oldset); | 342 | block_sigs(&oldset); |
343 | wait_answer_interruptible(fc, req); | 343 | wait_answer_interruptible(fc, req); |
344 | restore_sigs(&oldset); | 344 | restore_sigs(&oldset); |
345 | 345 | ||
346 | if (req->aborted) | 346 | if (req->aborted) |
347 | goto aborted; | 347 | goto aborted; |
348 | if (req->state == FUSE_REQ_FINISHED) | 348 | if (req->state == FUSE_REQ_FINISHED) |
349 | return; | 349 | return; |
350 | 350 | ||
351 | /* Request is not yet in userspace, bail out */ | 351 | /* Request is not yet in userspace, bail out */ |
352 | if (req->state == FUSE_REQ_PENDING) { | 352 | if (req->state == FUSE_REQ_PENDING) { |
353 | list_del(&req->list); | 353 | list_del(&req->list); |
354 | __fuse_put_request(req); | 354 | __fuse_put_request(req); |
355 | req->out.h.error = -EINTR; | 355 | req->out.h.error = -EINTR; |
356 | return; | 356 | return; |
357 | } | 357 | } |
358 | } | 358 | } |
359 | 359 | ||
360 | /* | 360 | /* |
361 | * Either request is already in userspace, or it was forced. | 361 | * Either request is already in userspace, or it was forced. |
362 | * Wait it out. | 362 | * Wait it out. |
363 | */ | 363 | */ |
364 | spin_unlock(&fc->lock); | 364 | spin_unlock(&fc->lock); |
365 | wait_event(req->waitq, req->state == FUSE_REQ_FINISHED); | 365 | wait_event(req->waitq, req->state == FUSE_REQ_FINISHED); |
366 | spin_lock(&fc->lock); | 366 | spin_lock(&fc->lock); |
367 | 367 | ||
368 | if (!req->aborted) | 368 | if (!req->aborted) |
369 | return; | 369 | return; |
370 | 370 | ||
371 | aborted: | 371 | aborted: |
372 | BUG_ON(req->state != FUSE_REQ_FINISHED); | 372 | BUG_ON(req->state != FUSE_REQ_FINISHED); |
373 | if (req->locked) { | 373 | if (req->locked) { |
374 | /* This is uninterruptible sleep, because data is | 374 | /* This is uninterruptible sleep, because data is |
375 | being copied to/from the buffers of req. During | 375 | being copied to/from the buffers of req. During |
376 | locked state, there mustn't be any filesystem | 376 | locked state, there mustn't be any filesystem |
377 | operation (e.g. page fault), since that could lead | 377 | operation (e.g. page fault), since that could lead |
378 | to deadlock */ | 378 | to deadlock */ |
379 | spin_unlock(&fc->lock); | 379 | spin_unlock(&fc->lock); |
380 | wait_event(req->waitq, !req->locked); | 380 | wait_event(req->waitq, !req->locked); |
381 | spin_lock(&fc->lock); | 381 | spin_lock(&fc->lock); |
382 | } | 382 | } |
383 | } | 383 | } |
384 | 384 | ||
385 | void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req) | 385 | void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req) |
386 | { | 386 | { |
387 | req->isreply = 1; | 387 | req->isreply = 1; |
388 | spin_lock(&fc->lock); | 388 | spin_lock(&fc->lock); |
389 | if (!fc->connected) | 389 | if (!fc->connected) |
390 | req->out.h.error = -ENOTCONN; | 390 | req->out.h.error = -ENOTCONN; |
391 | else if (fc->conn_error) | 391 | else if (fc->conn_error) |
392 | req->out.h.error = -ECONNREFUSED; | 392 | req->out.h.error = -ECONNREFUSED; |
393 | else { | 393 | else { |
394 | queue_request(fc, req); | 394 | queue_request(fc, req); |
395 | /* acquire extra reference, since request is still needed | 395 | /* acquire extra reference, since request is still needed |
396 | after request_end() */ | 396 | after request_end() */ |
397 | __fuse_get_request(req); | 397 | __fuse_get_request(req); |
398 | 398 | ||
399 | request_wait_answer(fc, req); | 399 | request_wait_answer(fc, req); |
400 | } | 400 | } |
401 | spin_unlock(&fc->lock); | 401 | spin_unlock(&fc->lock); |
402 | } | 402 | } |
403 | 403 | ||
404 | static void fuse_request_send_nowait_locked(struct fuse_conn *fc, | 404 | static void fuse_request_send_nowait_locked(struct fuse_conn *fc, |
405 | struct fuse_req *req) | 405 | struct fuse_req *req) |
406 | { | 406 | { |
407 | req->background = 1; | 407 | req->background = 1; |
408 | fc->num_background++; | 408 | fc->num_background++; |
409 | if (fc->num_background == FUSE_MAX_BACKGROUND) | 409 | if (fc->num_background == FUSE_MAX_BACKGROUND) |
410 | fc->blocked = 1; | 410 | fc->blocked = 1; |
411 | if (fc->num_background == FUSE_CONGESTION_THRESHOLD) { | 411 | if (fc->num_background == FUSE_CONGESTION_THRESHOLD) { |
412 | set_bdi_congested(&fc->bdi, READ); | 412 | set_bdi_congested(&fc->bdi, READ); |
413 | set_bdi_congested(&fc->bdi, WRITE); | 413 | set_bdi_congested(&fc->bdi, WRITE); |
414 | } | 414 | } |
415 | list_add_tail(&req->list, &fc->bg_queue); | 415 | list_add_tail(&req->list, &fc->bg_queue); |
416 | flush_bg_queue(fc); | 416 | flush_bg_queue(fc); |
417 | } | 417 | } |
418 | 418 | ||
419 | static void fuse_request_send_nowait(struct fuse_conn *fc, struct fuse_req *req) | 419 | static void fuse_request_send_nowait(struct fuse_conn *fc, struct fuse_req *req) |
420 | { | 420 | { |
421 | spin_lock(&fc->lock); | 421 | spin_lock(&fc->lock); |
422 | if (fc->connected) { | 422 | if (fc->connected) { |
423 | fuse_request_send_nowait_locked(fc, req); | 423 | fuse_request_send_nowait_locked(fc, req); |
424 | spin_unlock(&fc->lock); | 424 | spin_unlock(&fc->lock); |
425 | } else { | 425 | } else { |
426 | req->out.h.error = -ENOTCONN; | 426 | req->out.h.error = -ENOTCONN; |
427 | request_end(fc, req); | 427 | request_end(fc, req); |
428 | } | 428 | } |
429 | } | 429 | } |
430 | 430 | ||
431 | void fuse_request_send_noreply(struct fuse_conn *fc, struct fuse_req *req) | 431 | void fuse_request_send_noreply(struct fuse_conn *fc, struct fuse_req *req) |
432 | { | 432 | { |
433 | req->isreply = 0; | 433 | req->isreply = 0; |
434 | fuse_request_send_nowait(fc, req); | 434 | fuse_request_send_nowait(fc, req); |
435 | } | 435 | } |
436 | 436 | ||
437 | void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req) | 437 | void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req) |
438 | { | 438 | { |
439 | req->isreply = 1; | 439 | req->isreply = 1; |
440 | fuse_request_send_nowait(fc, req); | 440 | fuse_request_send_nowait(fc, req); |
441 | } | 441 | } |
442 | 442 | ||
443 | /* | 443 | /* |
444 | * Called under fc->lock | 444 | * Called under fc->lock |
445 | * | 445 | * |
446 | * fc->connected must have been checked previously | 446 | * fc->connected must have been checked previously |
447 | */ | 447 | */ |
448 | void fuse_request_send_background_locked(struct fuse_conn *fc, | 448 | void fuse_request_send_background_locked(struct fuse_conn *fc, |
449 | struct fuse_req *req) | 449 | struct fuse_req *req) |
450 | { | 450 | { |
451 | req->isreply = 1; | 451 | req->isreply = 1; |
452 | fuse_request_send_nowait_locked(fc, req); | 452 | fuse_request_send_nowait_locked(fc, req); |
453 | } | 453 | } |
454 | 454 | ||
455 | /* | 455 | /* |
456 | * Lock the request. Up to the next unlock_request() there mustn't be | 456 | * Lock the request. Up to the next unlock_request() there mustn't be |
457 | * anything that could cause a page-fault. If the request was already | 457 | * anything that could cause a page-fault. If the request was already |
458 | * aborted bail out. | 458 | * aborted bail out. |
459 | */ | 459 | */ |
460 | static int lock_request(struct fuse_conn *fc, struct fuse_req *req) | 460 | static int lock_request(struct fuse_conn *fc, struct fuse_req *req) |
461 | { | 461 | { |
462 | int err = 0; | 462 | int err = 0; |
463 | if (req) { | 463 | if (req) { |
464 | spin_lock(&fc->lock); | 464 | spin_lock(&fc->lock); |
465 | if (req->aborted) | 465 | if (req->aborted) |
466 | err = -ENOENT; | 466 | err = -ENOENT; |
467 | else | 467 | else |
468 | req->locked = 1; | 468 | req->locked = 1; |
469 | spin_unlock(&fc->lock); | 469 | spin_unlock(&fc->lock); |
470 | } | 470 | } |
471 | return err; | 471 | return err; |
472 | } | 472 | } |
473 | 473 | ||
474 | /* | 474 | /* |
475 | * Unlock request. If it was aborted during being locked, the | 475 | * Unlock request. If it was aborted during being locked, the |
476 | * requester thread is currently waiting for it to be unlocked, so | 476 | * requester thread is currently waiting for it to be unlocked, so |
477 | * wake it up. | 477 | * wake it up. |
478 | */ | 478 | */ |
479 | static void unlock_request(struct fuse_conn *fc, struct fuse_req *req) | 479 | static void unlock_request(struct fuse_conn *fc, struct fuse_req *req) |
480 | { | 480 | { |
481 | if (req) { | 481 | if (req) { |
482 | spin_lock(&fc->lock); | 482 | spin_lock(&fc->lock); |
483 | req->locked = 0; | 483 | req->locked = 0; |
484 | if (req->aborted) | 484 | if (req->aborted) |
485 | wake_up(&req->waitq); | 485 | wake_up(&req->waitq); |
486 | spin_unlock(&fc->lock); | 486 | spin_unlock(&fc->lock); |
487 | } | 487 | } |
488 | } | 488 | } |
489 | 489 | ||
490 | struct fuse_copy_state { | 490 | struct fuse_copy_state { |
491 | struct fuse_conn *fc; | 491 | struct fuse_conn *fc; |
492 | int write; | 492 | int write; |
493 | struct fuse_req *req; | 493 | struct fuse_req *req; |
494 | const struct iovec *iov; | 494 | const struct iovec *iov; |
495 | unsigned long nr_segs; | 495 | unsigned long nr_segs; |
496 | unsigned long seglen; | 496 | unsigned long seglen; |
497 | unsigned long addr; | 497 | unsigned long addr; |
498 | struct page *pg; | 498 | struct page *pg; |
499 | void *mapaddr; | 499 | void *mapaddr; |
500 | void *buf; | 500 | void *buf; |
501 | unsigned len; | 501 | unsigned len; |
502 | }; | 502 | }; |
503 | 503 | ||
504 | static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc, | 504 | static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc, |
505 | int write, struct fuse_req *req, | 505 | int write, struct fuse_req *req, |
506 | const struct iovec *iov, unsigned long nr_segs) | 506 | const struct iovec *iov, unsigned long nr_segs) |
507 | { | 507 | { |
508 | memset(cs, 0, sizeof(*cs)); | 508 | memset(cs, 0, sizeof(*cs)); |
509 | cs->fc = fc; | 509 | cs->fc = fc; |
510 | cs->write = write; | 510 | cs->write = write; |
511 | cs->req = req; | 511 | cs->req = req; |
512 | cs->iov = iov; | 512 | cs->iov = iov; |
513 | cs->nr_segs = nr_segs; | 513 | cs->nr_segs = nr_segs; |
514 | } | 514 | } |
515 | 515 | ||
516 | /* Unmap and put previous page of userspace buffer */ | 516 | /* Unmap and put previous page of userspace buffer */ |
517 | static void fuse_copy_finish(struct fuse_copy_state *cs) | 517 | static void fuse_copy_finish(struct fuse_copy_state *cs) |
518 | { | 518 | { |
519 | if (cs->mapaddr) { | 519 | if (cs->mapaddr) { |
520 | kunmap_atomic(cs->mapaddr, KM_USER0); | 520 | kunmap_atomic(cs->mapaddr, KM_USER0); |
521 | if (cs->write) { | 521 | if (cs->write) { |
522 | flush_dcache_page(cs->pg); | 522 | flush_dcache_page(cs->pg); |
523 | set_page_dirty_lock(cs->pg); | 523 | set_page_dirty_lock(cs->pg); |
524 | } | 524 | } |
525 | put_page(cs->pg); | 525 | put_page(cs->pg); |
526 | cs->mapaddr = NULL; | 526 | cs->mapaddr = NULL; |
527 | } | 527 | } |
528 | } | 528 | } |
529 | 529 | ||
530 | /* | 530 | /* |
531 | * Get another pagefull of userspace buffer, and map it to kernel | 531 | * Get another pagefull of userspace buffer, and map it to kernel |
532 | * address space, and lock request | 532 | * address space, and lock request |
533 | */ | 533 | */ |
534 | static int fuse_copy_fill(struct fuse_copy_state *cs) | 534 | static int fuse_copy_fill(struct fuse_copy_state *cs) |
535 | { | 535 | { |
536 | unsigned long offset; | 536 | unsigned long offset; |
537 | int err; | 537 | int err; |
538 | 538 | ||
539 | unlock_request(cs->fc, cs->req); | 539 | unlock_request(cs->fc, cs->req); |
540 | fuse_copy_finish(cs); | 540 | fuse_copy_finish(cs); |
541 | if (!cs->seglen) { | 541 | if (!cs->seglen) { |
542 | BUG_ON(!cs->nr_segs); | 542 | BUG_ON(!cs->nr_segs); |
543 | cs->seglen = cs->iov[0].iov_len; | 543 | cs->seglen = cs->iov[0].iov_len; |
544 | cs->addr = (unsigned long) cs->iov[0].iov_base; | 544 | cs->addr = (unsigned long) cs->iov[0].iov_base; |
545 | cs->iov++; | 545 | cs->iov++; |
546 | cs->nr_segs--; | 546 | cs->nr_segs--; |
547 | } | 547 | } |
548 | down_read(¤t->mm->mmap_sem); | 548 | down_read(¤t->mm->mmap_sem); |
549 | err = get_user_pages(current, current->mm, cs->addr, 1, cs->write, 0, | 549 | err = get_user_pages(current, current->mm, cs->addr, 1, cs->write, 0, |
550 | &cs->pg, NULL); | 550 | &cs->pg, NULL); |
551 | up_read(¤t->mm->mmap_sem); | 551 | up_read(¤t->mm->mmap_sem); |
552 | if (err < 0) | 552 | if (err < 0) |
553 | return err; | 553 | return err; |
554 | BUG_ON(err != 1); | 554 | BUG_ON(err != 1); |
555 | offset = cs->addr % PAGE_SIZE; | 555 | offset = cs->addr % PAGE_SIZE; |
556 | cs->mapaddr = kmap_atomic(cs->pg, KM_USER0); | 556 | cs->mapaddr = kmap_atomic(cs->pg, KM_USER0); |
557 | cs->buf = cs->mapaddr + offset; | 557 | cs->buf = cs->mapaddr + offset; |
558 | cs->len = min(PAGE_SIZE - offset, cs->seglen); | 558 | cs->len = min(PAGE_SIZE - offset, cs->seglen); |
559 | cs->seglen -= cs->len; | 559 | cs->seglen -= cs->len; |
560 | cs->addr += cs->len; | 560 | cs->addr += cs->len; |
561 | 561 | ||
562 | return lock_request(cs->fc, cs->req); | 562 | return lock_request(cs->fc, cs->req); |
563 | } | 563 | } |
564 | 564 | ||
565 | /* Do as much copy to/from userspace buffer as we can */ | 565 | /* Do as much copy to/from userspace buffer as we can */ |
566 | static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size) | 566 | static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size) |
567 | { | 567 | { |
568 | unsigned ncpy = min(*size, cs->len); | 568 | unsigned ncpy = min(*size, cs->len); |
569 | if (val) { | 569 | if (val) { |
570 | if (cs->write) | 570 | if (cs->write) |
571 | memcpy(cs->buf, *val, ncpy); | 571 | memcpy(cs->buf, *val, ncpy); |
572 | else | 572 | else |
573 | memcpy(*val, cs->buf, ncpy); | 573 | memcpy(*val, cs->buf, ncpy); |
574 | *val += ncpy; | 574 | *val += ncpy; |
575 | } | 575 | } |
576 | *size -= ncpy; | 576 | *size -= ncpy; |
577 | cs->len -= ncpy; | 577 | cs->len -= ncpy; |
578 | cs->buf += ncpy; | 578 | cs->buf += ncpy; |
579 | return ncpy; | 579 | return ncpy; |
580 | } | 580 | } |
581 | 581 | ||
582 | /* | 582 | /* |
583 | * Copy a page in the request to/from the userspace buffer. Must be | 583 | * Copy a page in the request to/from the userspace buffer. Must be |
584 | * done atomically | 584 | * done atomically |
585 | */ | 585 | */ |
586 | static int fuse_copy_page(struct fuse_copy_state *cs, struct page *page, | 586 | static int fuse_copy_page(struct fuse_copy_state *cs, struct page *page, |
587 | unsigned offset, unsigned count, int zeroing) | 587 | unsigned offset, unsigned count, int zeroing) |
588 | { | 588 | { |
589 | if (page && zeroing && count < PAGE_SIZE) { | 589 | if (page && zeroing && count < PAGE_SIZE) { |
590 | void *mapaddr = kmap_atomic(page, KM_USER1); | 590 | void *mapaddr = kmap_atomic(page, KM_USER1); |
591 | memset(mapaddr, 0, PAGE_SIZE); | 591 | memset(mapaddr, 0, PAGE_SIZE); |
592 | kunmap_atomic(mapaddr, KM_USER1); | 592 | kunmap_atomic(mapaddr, KM_USER1); |
593 | } | 593 | } |
594 | while (count) { | 594 | while (count) { |
595 | if (!cs->len) { | 595 | if (!cs->len) { |
596 | int err = fuse_copy_fill(cs); | 596 | int err = fuse_copy_fill(cs); |
597 | if (err) | 597 | if (err) |
598 | return err; | 598 | return err; |
599 | } | 599 | } |
600 | if (page) { | 600 | if (page) { |
601 | void *mapaddr = kmap_atomic(page, KM_USER1); | 601 | void *mapaddr = kmap_atomic(page, KM_USER1); |
602 | void *buf = mapaddr + offset; | 602 | void *buf = mapaddr + offset; |
603 | offset += fuse_copy_do(cs, &buf, &count); | 603 | offset += fuse_copy_do(cs, &buf, &count); |
604 | kunmap_atomic(mapaddr, KM_USER1); | 604 | kunmap_atomic(mapaddr, KM_USER1); |
605 | } else | 605 | } else |
606 | offset += fuse_copy_do(cs, NULL, &count); | 606 | offset += fuse_copy_do(cs, NULL, &count); |
607 | } | 607 | } |
608 | if (page && !cs->write) | 608 | if (page && !cs->write) |
609 | flush_dcache_page(page); | 609 | flush_dcache_page(page); |
610 | return 0; | 610 | return 0; |
611 | } | 611 | } |
612 | 612 | ||
613 | /* Copy pages in the request to/from userspace buffer */ | 613 | /* Copy pages in the request to/from userspace buffer */ |
614 | static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes, | 614 | static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes, |
615 | int zeroing) | 615 | int zeroing) |
616 | { | 616 | { |
617 | unsigned i; | 617 | unsigned i; |
618 | struct fuse_req *req = cs->req; | 618 | struct fuse_req *req = cs->req; |
619 | unsigned offset = req->page_offset; | 619 | unsigned offset = req->page_offset; |
620 | unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset); | 620 | unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset); |
621 | 621 | ||
622 | for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) { | 622 | for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) { |
623 | struct page *page = req->pages[i]; | 623 | struct page *page = req->pages[i]; |
624 | int err = fuse_copy_page(cs, page, offset, count, zeroing); | 624 | int err = fuse_copy_page(cs, page, offset, count, zeroing); |
625 | if (err) | 625 | if (err) |
626 | return err; | 626 | return err; |
627 | 627 | ||
628 | nbytes -= count; | 628 | nbytes -= count; |
629 | count = min(nbytes, (unsigned) PAGE_SIZE); | 629 | count = min(nbytes, (unsigned) PAGE_SIZE); |
630 | offset = 0; | 630 | offset = 0; |
631 | } | 631 | } |
632 | return 0; | 632 | return 0; |
633 | } | 633 | } |
634 | 634 | ||
635 | /* Copy a single argument in the request to/from userspace buffer */ | 635 | /* Copy a single argument in the request to/from userspace buffer */ |
636 | static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size) | 636 | static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size) |
637 | { | 637 | { |
638 | while (size) { | 638 | while (size) { |
639 | if (!cs->len) { | 639 | if (!cs->len) { |
640 | int err = fuse_copy_fill(cs); | 640 | int err = fuse_copy_fill(cs); |
641 | if (err) | 641 | if (err) |
642 | return err; | 642 | return err; |
643 | } | 643 | } |
644 | fuse_copy_do(cs, &val, &size); | 644 | fuse_copy_do(cs, &val, &size); |
645 | } | 645 | } |
646 | return 0; | 646 | return 0; |
647 | } | 647 | } |
648 | 648 | ||
649 | /* Copy request arguments to/from userspace buffer */ | 649 | /* Copy request arguments to/from userspace buffer */ |
650 | static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs, | 650 | static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs, |
651 | unsigned argpages, struct fuse_arg *args, | 651 | unsigned argpages, struct fuse_arg *args, |
652 | int zeroing) | 652 | int zeroing) |
653 | { | 653 | { |
654 | int err = 0; | 654 | int err = 0; |
655 | unsigned i; | 655 | unsigned i; |
656 | 656 | ||
657 | for (i = 0; !err && i < numargs; i++) { | 657 | for (i = 0; !err && i < numargs; i++) { |
658 | struct fuse_arg *arg = &args[i]; | 658 | struct fuse_arg *arg = &args[i]; |
659 | if (i == numargs - 1 && argpages) | 659 | if (i == numargs - 1 && argpages) |
660 | err = fuse_copy_pages(cs, arg->size, zeroing); | 660 | err = fuse_copy_pages(cs, arg->size, zeroing); |
661 | else | 661 | else |
662 | err = fuse_copy_one(cs, arg->value, arg->size); | 662 | err = fuse_copy_one(cs, arg->value, arg->size); |
663 | } | 663 | } |
664 | return err; | 664 | return err; |
665 | } | 665 | } |
666 | 666 | ||
667 | static int request_pending(struct fuse_conn *fc) | 667 | static int request_pending(struct fuse_conn *fc) |
668 | { | 668 | { |
669 | return !list_empty(&fc->pending) || !list_empty(&fc->interrupts); | 669 | return !list_empty(&fc->pending) || !list_empty(&fc->interrupts); |
670 | } | 670 | } |
671 | 671 | ||
672 | /* Wait until a request is available on the pending list */ | 672 | /* Wait until a request is available on the pending list */ |
673 | static void request_wait(struct fuse_conn *fc) | 673 | static void request_wait(struct fuse_conn *fc) |
674 | __releases(&fc->lock) | 674 | __releases(&fc->lock) |
675 | __acquires(&fc->lock) | 675 | __acquires(&fc->lock) |
676 | { | 676 | { |
677 | DECLARE_WAITQUEUE(wait, current); | 677 | DECLARE_WAITQUEUE(wait, current); |
678 | 678 | ||
679 | add_wait_queue_exclusive(&fc->waitq, &wait); | 679 | add_wait_queue_exclusive(&fc->waitq, &wait); |
680 | while (fc->connected && !request_pending(fc)) { | 680 | while (fc->connected && !request_pending(fc)) { |
681 | set_current_state(TASK_INTERRUPTIBLE); | 681 | set_current_state(TASK_INTERRUPTIBLE); |
682 | if (signal_pending(current)) | 682 | if (signal_pending(current)) |
683 | break; | 683 | break; |
684 | 684 | ||
685 | spin_unlock(&fc->lock); | 685 | spin_unlock(&fc->lock); |
686 | schedule(); | 686 | schedule(); |
687 | spin_lock(&fc->lock); | 687 | spin_lock(&fc->lock); |
688 | } | 688 | } |
689 | set_current_state(TASK_RUNNING); | 689 | set_current_state(TASK_RUNNING); |
690 | remove_wait_queue(&fc->waitq, &wait); | 690 | remove_wait_queue(&fc->waitq, &wait); |
691 | } | 691 | } |
692 | 692 | ||
693 | /* | 693 | /* |
694 | * Transfer an interrupt request to userspace | 694 | * Transfer an interrupt request to userspace |
695 | * | 695 | * |
696 | * Unlike other requests this is assembled on demand, without a need | 696 | * Unlike other requests this is assembled on demand, without a need |
697 | * to allocate a separate fuse_req structure. | 697 | * to allocate a separate fuse_req structure. |
698 | * | 698 | * |
699 | * Called with fc->lock held, releases it | 699 | * Called with fc->lock held, releases it |
700 | */ | 700 | */ |
701 | static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_req *req, | 701 | static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_req *req, |
702 | const struct iovec *iov, unsigned long nr_segs) | 702 | const struct iovec *iov, unsigned long nr_segs) |
703 | __releases(&fc->lock) | 703 | __releases(&fc->lock) |
704 | { | 704 | { |
705 | struct fuse_copy_state cs; | 705 | struct fuse_copy_state cs; |
706 | struct fuse_in_header ih; | 706 | struct fuse_in_header ih; |
707 | struct fuse_interrupt_in arg; | 707 | struct fuse_interrupt_in arg; |
708 | unsigned reqsize = sizeof(ih) + sizeof(arg); | 708 | unsigned reqsize = sizeof(ih) + sizeof(arg); |
709 | int err; | 709 | int err; |
710 | 710 | ||
711 | list_del_init(&req->intr_entry); | 711 | list_del_init(&req->intr_entry); |
712 | req->intr_unique = fuse_get_unique(fc); | 712 | req->intr_unique = fuse_get_unique(fc); |
713 | memset(&ih, 0, sizeof(ih)); | 713 | memset(&ih, 0, sizeof(ih)); |
714 | memset(&arg, 0, sizeof(arg)); | 714 | memset(&arg, 0, sizeof(arg)); |
715 | ih.len = reqsize; | 715 | ih.len = reqsize; |
716 | ih.opcode = FUSE_INTERRUPT; | 716 | ih.opcode = FUSE_INTERRUPT; |
717 | ih.unique = req->intr_unique; | 717 | ih.unique = req->intr_unique; |
718 | arg.unique = req->in.h.unique; | 718 | arg.unique = req->in.h.unique; |
719 | 719 | ||
720 | spin_unlock(&fc->lock); | 720 | spin_unlock(&fc->lock); |
721 | if (iov_length(iov, nr_segs) < reqsize) | 721 | if (iov_length(iov, nr_segs) < reqsize) |
722 | return -EINVAL; | 722 | return -EINVAL; |
723 | 723 | ||
724 | fuse_copy_init(&cs, fc, 1, NULL, iov, nr_segs); | 724 | fuse_copy_init(&cs, fc, 1, NULL, iov, nr_segs); |
725 | err = fuse_copy_one(&cs, &ih, sizeof(ih)); | 725 | err = fuse_copy_one(&cs, &ih, sizeof(ih)); |
726 | if (!err) | 726 | if (!err) |
727 | err = fuse_copy_one(&cs, &arg, sizeof(arg)); | 727 | err = fuse_copy_one(&cs, &arg, sizeof(arg)); |
728 | fuse_copy_finish(&cs); | 728 | fuse_copy_finish(&cs); |
729 | 729 | ||
730 | return err ? err : reqsize; | 730 | return err ? err : reqsize; |
731 | } | 731 | } |
732 | 732 | ||
733 | /* | 733 | /* |
734 | * Read a single request into the userspace filesystem's buffer. This | 734 | * Read a single request into the userspace filesystem's buffer. This |
735 | * function waits until a request is available, then removes it from | 735 | * function waits until a request is available, then removes it from |
736 | * the pending list and copies request data to userspace buffer. If | 736 | * the pending list and copies request data to userspace buffer. If |
737 | * no reply is needed (FORGET) or request has been aborted or there | 737 | * no reply is needed (FORGET) or request has been aborted or there |
738 | * was an error during the copying then it's finished by calling | 738 | * was an error during the copying then it's finished by calling |
739 | * request_end(). Otherwise add it to the processing list, and set | 739 | * request_end(). Otherwise add it to the processing list, and set |
740 | * the 'sent' flag. | 740 | * the 'sent' flag. |
741 | */ | 741 | */ |
742 | static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov, | 742 | static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov, |
743 | unsigned long nr_segs, loff_t pos) | 743 | unsigned long nr_segs, loff_t pos) |
744 | { | 744 | { |
745 | int err; | 745 | int err; |
746 | struct fuse_req *req; | 746 | struct fuse_req *req; |
747 | struct fuse_in *in; | 747 | struct fuse_in *in; |
748 | struct fuse_copy_state cs; | 748 | struct fuse_copy_state cs; |
749 | unsigned reqsize; | 749 | unsigned reqsize; |
750 | struct file *file = iocb->ki_filp; | 750 | struct file *file = iocb->ki_filp; |
751 | struct fuse_conn *fc = fuse_get_conn(file); | 751 | struct fuse_conn *fc = fuse_get_conn(file); |
752 | if (!fc) | 752 | if (!fc) |
753 | return -EPERM; | 753 | return -EPERM; |
754 | 754 | ||
755 | restart: | 755 | restart: |
756 | spin_lock(&fc->lock); | 756 | spin_lock(&fc->lock); |
757 | err = -EAGAIN; | 757 | err = -EAGAIN; |
758 | if ((file->f_flags & O_NONBLOCK) && fc->connected && | 758 | if ((file->f_flags & O_NONBLOCK) && fc->connected && |
759 | !request_pending(fc)) | 759 | !request_pending(fc)) |
760 | goto err_unlock; | 760 | goto err_unlock; |
761 | 761 | ||
762 | request_wait(fc); | 762 | request_wait(fc); |
763 | err = -ENODEV; | 763 | err = -ENODEV; |
764 | if (!fc->connected) | 764 | if (!fc->connected) |
765 | goto err_unlock; | 765 | goto err_unlock; |
766 | err = -ERESTARTSYS; | 766 | err = -ERESTARTSYS; |
767 | if (!request_pending(fc)) | 767 | if (!request_pending(fc)) |
768 | goto err_unlock; | 768 | goto err_unlock; |
769 | 769 | ||
770 | if (!list_empty(&fc->interrupts)) { | 770 | if (!list_empty(&fc->interrupts)) { |
771 | req = list_entry(fc->interrupts.next, struct fuse_req, | 771 | req = list_entry(fc->interrupts.next, struct fuse_req, |
772 | intr_entry); | 772 | intr_entry); |
773 | return fuse_read_interrupt(fc, req, iov, nr_segs); | 773 | return fuse_read_interrupt(fc, req, iov, nr_segs); |
774 | } | 774 | } |
775 | 775 | ||
776 | req = list_entry(fc->pending.next, struct fuse_req, list); | 776 | req = list_entry(fc->pending.next, struct fuse_req, list); |
777 | req->state = FUSE_REQ_READING; | 777 | req->state = FUSE_REQ_READING; |
778 | list_move(&req->list, &fc->io); | 778 | list_move(&req->list, &fc->io); |
779 | 779 | ||
780 | in = &req->in; | 780 | in = &req->in; |
781 | reqsize = in->h.len; | 781 | reqsize = in->h.len; |
782 | /* If request is too large, reply with an error and restart the read */ | 782 | /* If request is too large, reply with an error and restart the read */ |
783 | if (iov_length(iov, nr_segs) < reqsize) { | 783 | if (iov_length(iov, nr_segs) < reqsize) { |
784 | req->out.h.error = -EIO; | 784 | req->out.h.error = -EIO; |
785 | /* SETXATTR is special, since it may contain too large data */ | 785 | /* SETXATTR is special, since it may contain too large data */ |
786 | if (in->h.opcode == FUSE_SETXATTR) | 786 | if (in->h.opcode == FUSE_SETXATTR) |
787 | req->out.h.error = -E2BIG; | 787 | req->out.h.error = -E2BIG; |
788 | request_end(fc, req); | 788 | request_end(fc, req); |
789 | goto restart; | 789 | goto restart; |
790 | } | 790 | } |
791 | spin_unlock(&fc->lock); | 791 | spin_unlock(&fc->lock); |
792 | fuse_copy_init(&cs, fc, 1, req, iov, nr_segs); | 792 | fuse_copy_init(&cs, fc, 1, req, iov, nr_segs); |
793 | err = fuse_copy_one(&cs, &in->h, sizeof(in->h)); | 793 | err = fuse_copy_one(&cs, &in->h, sizeof(in->h)); |
794 | if (!err) | 794 | if (!err) |
795 | err = fuse_copy_args(&cs, in->numargs, in->argpages, | 795 | err = fuse_copy_args(&cs, in->numargs, in->argpages, |
796 | (struct fuse_arg *) in->args, 0); | 796 | (struct fuse_arg *) in->args, 0); |
797 | fuse_copy_finish(&cs); | 797 | fuse_copy_finish(&cs); |
798 | spin_lock(&fc->lock); | 798 | spin_lock(&fc->lock); |
799 | req->locked = 0; | 799 | req->locked = 0; |
800 | if (req->aborted) { | 800 | if (req->aborted) { |
801 | request_end(fc, req); | 801 | request_end(fc, req); |
802 | return -ENODEV; | 802 | return -ENODEV; |
803 | } | 803 | } |
804 | if (err) { | 804 | if (err) { |
805 | req->out.h.error = -EIO; | 805 | req->out.h.error = -EIO; |
806 | request_end(fc, req); | 806 | request_end(fc, req); |
807 | return err; | 807 | return err; |
808 | } | 808 | } |
809 | if (!req->isreply) | 809 | if (!req->isreply) |
810 | request_end(fc, req); | 810 | request_end(fc, req); |
811 | else { | 811 | else { |
812 | req->state = FUSE_REQ_SENT; | 812 | req->state = FUSE_REQ_SENT; |
813 | list_move_tail(&req->list, &fc->processing); | 813 | list_move_tail(&req->list, &fc->processing); |
814 | if (req->interrupted) | 814 | if (req->interrupted) |
815 | queue_interrupt(fc, req); | 815 | queue_interrupt(fc, req); |
816 | spin_unlock(&fc->lock); | 816 | spin_unlock(&fc->lock); |
817 | } | 817 | } |
818 | return reqsize; | 818 | return reqsize; |
819 | 819 | ||
820 | err_unlock: | 820 | err_unlock: |
821 | spin_unlock(&fc->lock); | 821 | spin_unlock(&fc->lock); |
822 | return err; | 822 | return err; |
823 | } | 823 | } |
824 | 824 | ||
825 | static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size, | 825 | static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size, |
826 | struct fuse_copy_state *cs) | 826 | struct fuse_copy_state *cs) |
827 | { | 827 | { |
828 | struct fuse_notify_poll_wakeup_out outarg; | 828 | struct fuse_notify_poll_wakeup_out outarg; |
829 | int err; | 829 | int err = -EINVAL; |
830 | 830 | ||
831 | if (size != sizeof(outarg)) | 831 | if (size != sizeof(outarg)) |
832 | return -EINVAL; | 832 | goto err; |
833 | 833 | ||
834 | err = fuse_copy_one(cs, &outarg, sizeof(outarg)); | 834 | err = fuse_copy_one(cs, &outarg, sizeof(outarg)); |
835 | if (err) | 835 | if (err) |
836 | return err; | 836 | goto err; |
837 | 837 | ||
838 | fuse_copy_finish(cs); | ||
838 | return fuse_notify_poll_wakeup(fc, &outarg); | 839 | return fuse_notify_poll_wakeup(fc, &outarg); |
840 | |||
841 | err: | ||
842 | fuse_copy_finish(cs); | ||
843 | return err; | ||
839 | } | 844 | } |
840 | 845 | ||
841 | static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, | 846 | static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, |
842 | unsigned int size, struct fuse_copy_state *cs) | 847 | unsigned int size, struct fuse_copy_state *cs) |
843 | { | 848 | { |
844 | switch (code) { | 849 | switch (code) { |
845 | case FUSE_NOTIFY_POLL: | 850 | case FUSE_NOTIFY_POLL: |
846 | return fuse_notify_poll(fc, size, cs); | 851 | return fuse_notify_poll(fc, size, cs); |
847 | 852 | ||
848 | default: | 853 | default: |
854 | fuse_copy_finish(cs); | ||
849 | return -EINVAL; | 855 | return -EINVAL; |
850 | } | 856 | } |
851 | } | 857 | } |
852 | 858 | ||
853 | /* Look up request on processing list by unique ID */ | 859 | /* Look up request on processing list by unique ID */ |
854 | static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique) | 860 | static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique) |
855 | { | 861 | { |
856 | struct list_head *entry; | 862 | struct list_head *entry; |
857 | 863 | ||
858 | list_for_each(entry, &fc->processing) { | 864 | list_for_each(entry, &fc->processing) { |
859 | struct fuse_req *req; | 865 | struct fuse_req *req; |
860 | req = list_entry(entry, struct fuse_req, list); | 866 | req = list_entry(entry, struct fuse_req, list); |
861 | if (req->in.h.unique == unique || req->intr_unique == unique) | 867 | if (req->in.h.unique == unique || req->intr_unique == unique) |
862 | return req; | 868 | return req; |
863 | } | 869 | } |
864 | return NULL; | 870 | return NULL; |
865 | } | 871 | } |
866 | 872 | ||
867 | static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out, | 873 | static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out, |
868 | unsigned nbytes) | 874 | unsigned nbytes) |
869 | { | 875 | { |
870 | unsigned reqsize = sizeof(struct fuse_out_header); | 876 | unsigned reqsize = sizeof(struct fuse_out_header); |
871 | 877 | ||
872 | if (out->h.error) | 878 | if (out->h.error) |
873 | return nbytes != reqsize ? -EINVAL : 0; | 879 | return nbytes != reqsize ? -EINVAL : 0; |
874 | 880 | ||
875 | reqsize += len_args(out->numargs, out->args); | 881 | reqsize += len_args(out->numargs, out->args); |
876 | 882 | ||
877 | if (reqsize < nbytes || (reqsize > nbytes && !out->argvar)) | 883 | if (reqsize < nbytes || (reqsize > nbytes && !out->argvar)) |
878 | return -EINVAL; | 884 | return -EINVAL; |
879 | else if (reqsize > nbytes) { | 885 | else if (reqsize > nbytes) { |
880 | struct fuse_arg *lastarg = &out->args[out->numargs-1]; | 886 | struct fuse_arg *lastarg = &out->args[out->numargs-1]; |
881 | unsigned diffsize = reqsize - nbytes; | 887 | unsigned diffsize = reqsize - nbytes; |
882 | if (diffsize > lastarg->size) | 888 | if (diffsize > lastarg->size) |
883 | return -EINVAL; | 889 | return -EINVAL; |
884 | lastarg->size -= diffsize; | 890 | lastarg->size -= diffsize; |
885 | } | 891 | } |
886 | return fuse_copy_args(cs, out->numargs, out->argpages, out->args, | 892 | return fuse_copy_args(cs, out->numargs, out->argpages, out->args, |
887 | out->page_zeroing); | 893 | out->page_zeroing); |
888 | } | 894 | } |
889 | 895 | ||
890 | /* | 896 | /* |
891 | * Write a single reply to a request. First the header is copied from | 897 | * Write a single reply to a request. First the header is copied from |
892 | * the write buffer. The request is then searched on the processing | 898 | * the write buffer. The request is then searched on the processing |
893 | * list by the unique ID found in the header. If found, then remove | 899 | * list by the unique ID found in the header. If found, then remove |
894 | * it from the list and copy the rest of the buffer to the request. | 900 | * it from the list and copy the rest of the buffer to the request. |
895 | * The request is finished by calling request_end() | 901 | * The request is finished by calling request_end() |
896 | */ | 902 | */ |
897 | static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov, | 903 | static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov, |
898 | unsigned long nr_segs, loff_t pos) | 904 | unsigned long nr_segs, loff_t pos) |
899 | { | 905 | { |
900 | int err; | 906 | int err; |
901 | unsigned nbytes = iov_length(iov, nr_segs); | 907 | unsigned nbytes = iov_length(iov, nr_segs); |
902 | struct fuse_req *req; | 908 | struct fuse_req *req; |
903 | struct fuse_out_header oh; | 909 | struct fuse_out_header oh; |
904 | struct fuse_copy_state cs; | 910 | struct fuse_copy_state cs; |
905 | struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp); | 911 | struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp); |
906 | if (!fc) | 912 | if (!fc) |
907 | return -EPERM; | 913 | return -EPERM; |
908 | 914 | ||
909 | fuse_copy_init(&cs, fc, 0, NULL, iov, nr_segs); | 915 | fuse_copy_init(&cs, fc, 0, NULL, iov, nr_segs); |
910 | if (nbytes < sizeof(struct fuse_out_header)) | 916 | if (nbytes < sizeof(struct fuse_out_header)) |
911 | return -EINVAL; | 917 | return -EINVAL; |
912 | 918 | ||
913 | err = fuse_copy_one(&cs, &oh, sizeof(oh)); | 919 | err = fuse_copy_one(&cs, &oh, sizeof(oh)); |
914 | if (err) | 920 | if (err) |
915 | goto err_finish; | 921 | goto err_finish; |
916 | 922 | ||
917 | err = -EINVAL; | 923 | err = -EINVAL; |
918 | if (oh.len != nbytes) | 924 | if (oh.len != nbytes) |
919 | goto err_finish; | 925 | goto err_finish; |
920 | 926 | ||
921 | /* | 927 | /* |
922 | * Zero oh.unique indicates unsolicited notification message | 928 | * Zero oh.unique indicates unsolicited notification message |
923 | * and error contains notification code. | 929 | * and error contains notification code. |
924 | */ | 930 | */ |
925 | if (!oh.unique) { | 931 | if (!oh.unique) { |
926 | err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), &cs); | 932 | err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), &cs); |
927 | fuse_copy_finish(&cs); | ||
928 | return err ? err : nbytes; | 933 | return err ? err : nbytes; |
929 | } | 934 | } |
930 | 935 | ||
931 | err = -EINVAL; | 936 | err = -EINVAL; |
932 | if (oh.error <= -1000 || oh.error > 0) | 937 | if (oh.error <= -1000 || oh.error > 0) |
933 | goto err_finish; | 938 | goto err_finish; |
934 | 939 | ||
935 | spin_lock(&fc->lock); | 940 | spin_lock(&fc->lock); |
936 | err = -ENOENT; | 941 | err = -ENOENT; |
937 | if (!fc->connected) | 942 | if (!fc->connected) |
938 | goto err_unlock; | 943 | goto err_unlock; |
939 | 944 | ||
940 | req = request_find(fc, oh.unique); | 945 | req = request_find(fc, oh.unique); |
941 | if (!req) | 946 | if (!req) |
942 | goto err_unlock; | 947 | goto err_unlock; |
943 | 948 | ||
944 | if (req->aborted) { | 949 | if (req->aborted) { |
945 | spin_unlock(&fc->lock); | 950 | spin_unlock(&fc->lock); |
946 | fuse_copy_finish(&cs); | 951 | fuse_copy_finish(&cs); |
947 | spin_lock(&fc->lock); | 952 | spin_lock(&fc->lock); |
948 | request_end(fc, req); | 953 | request_end(fc, req); |
949 | return -ENOENT; | 954 | return -ENOENT; |
950 | } | 955 | } |
951 | /* Is it an interrupt reply? */ | 956 | /* Is it an interrupt reply? */ |
952 | if (req->intr_unique == oh.unique) { | 957 | if (req->intr_unique == oh.unique) { |
953 | err = -EINVAL; | 958 | err = -EINVAL; |
954 | if (nbytes != sizeof(struct fuse_out_header)) | 959 | if (nbytes != sizeof(struct fuse_out_header)) |
955 | goto err_unlock; | 960 | goto err_unlock; |
956 | 961 | ||
957 | if (oh.error == -ENOSYS) | 962 | if (oh.error == -ENOSYS) |
958 | fc->no_interrupt = 1; | 963 | fc->no_interrupt = 1; |
959 | else if (oh.error == -EAGAIN) | 964 | else if (oh.error == -EAGAIN) |
960 | queue_interrupt(fc, req); | 965 | queue_interrupt(fc, req); |
961 | 966 | ||
962 | spin_unlock(&fc->lock); | 967 | spin_unlock(&fc->lock); |
963 | fuse_copy_finish(&cs); | 968 | fuse_copy_finish(&cs); |
964 | return nbytes; | 969 | return nbytes; |
965 | } | 970 | } |
966 | 971 | ||
967 | req->state = FUSE_REQ_WRITING; | 972 | req->state = FUSE_REQ_WRITING; |
968 | list_move(&req->list, &fc->io); | 973 | list_move(&req->list, &fc->io); |
969 | req->out.h = oh; | 974 | req->out.h = oh; |
970 | req->locked = 1; | 975 | req->locked = 1; |
971 | cs.req = req; | 976 | cs.req = req; |
972 | spin_unlock(&fc->lock); | 977 | spin_unlock(&fc->lock); |
973 | 978 | ||
974 | err = copy_out_args(&cs, &req->out, nbytes); | 979 | err = copy_out_args(&cs, &req->out, nbytes); |
975 | fuse_copy_finish(&cs); | 980 | fuse_copy_finish(&cs); |
976 | 981 | ||
977 | spin_lock(&fc->lock); | 982 | spin_lock(&fc->lock); |
978 | req->locked = 0; | 983 | req->locked = 0; |
979 | if (!err) { | 984 | if (!err) { |
980 | if (req->aborted) | 985 | if (req->aborted) |
981 | err = -ENOENT; | 986 | err = -ENOENT; |
982 | } else if (!req->aborted) | 987 | } else if (!req->aborted) |
983 | req->out.h.error = -EIO; | 988 | req->out.h.error = -EIO; |
984 | request_end(fc, req); | 989 | request_end(fc, req); |
985 | 990 | ||
986 | return err ? err : nbytes; | 991 | return err ? err : nbytes; |
987 | 992 | ||
988 | err_unlock: | 993 | err_unlock: |
989 | spin_unlock(&fc->lock); | 994 | spin_unlock(&fc->lock); |
990 | err_finish: | 995 | err_finish: |
991 | fuse_copy_finish(&cs); | 996 | fuse_copy_finish(&cs); |
992 | return err; | 997 | return err; |
993 | } | 998 | } |
994 | 999 | ||
995 | static unsigned fuse_dev_poll(struct file *file, poll_table *wait) | 1000 | static unsigned fuse_dev_poll(struct file *file, poll_table *wait) |
996 | { | 1001 | { |
997 | unsigned mask = POLLOUT | POLLWRNORM; | 1002 | unsigned mask = POLLOUT | POLLWRNORM; |
998 | struct fuse_conn *fc = fuse_get_conn(file); | 1003 | struct fuse_conn *fc = fuse_get_conn(file); |
999 | if (!fc) | 1004 | if (!fc) |
1000 | return POLLERR; | 1005 | return POLLERR; |
1001 | 1006 | ||
1002 | poll_wait(file, &fc->waitq, wait); | 1007 | poll_wait(file, &fc->waitq, wait); |
1003 | 1008 | ||
1004 | spin_lock(&fc->lock); | 1009 | spin_lock(&fc->lock); |
1005 | if (!fc->connected) | 1010 | if (!fc->connected) |
1006 | mask = POLLERR; | 1011 | mask = POLLERR; |
1007 | else if (request_pending(fc)) | 1012 | else if (request_pending(fc)) |
1008 | mask |= POLLIN | POLLRDNORM; | 1013 | mask |= POLLIN | POLLRDNORM; |
1009 | spin_unlock(&fc->lock); | 1014 | spin_unlock(&fc->lock); |
1010 | 1015 | ||
1011 | return mask; | 1016 | return mask; |
1012 | } | 1017 | } |
1013 | 1018 | ||
1014 | /* | 1019 | /* |
1015 | * Abort all requests on the given list (pending or processing) | 1020 | * Abort all requests on the given list (pending or processing) |
1016 | * | 1021 | * |
1017 | * This function releases and reacquires fc->lock | 1022 | * This function releases and reacquires fc->lock |
1018 | */ | 1023 | */ |
1019 | static void end_requests(struct fuse_conn *fc, struct list_head *head) | 1024 | static void end_requests(struct fuse_conn *fc, struct list_head *head) |
1020 | __releases(&fc->lock) | 1025 | __releases(&fc->lock) |
1021 | __acquires(&fc->lock) | 1026 | __acquires(&fc->lock) |
1022 | { | 1027 | { |
1023 | while (!list_empty(head)) { | 1028 | while (!list_empty(head)) { |
1024 | struct fuse_req *req; | 1029 | struct fuse_req *req; |
1025 | req = list_entry(head->next, struct fuse_req, list); | 1030 | req = list_entry(head->next, struct fuse_req, list); |
1026 | req->out.h.error = -ECONNABORTED; | 1031 | req->out.h.error = -ECONNABORTED; |
1027 | request_end(fc, req); | 1032 | request_end(fc, req); |
1028 | spin_lock(&fc->lock); | 1033 | spin_lock(&fc->lock); |
1029 | } | 1034 | } |
1030 | } | 1035 | } |
1031 | 1036 | ||
1032 | /* | 1037 | /* |
1033 | * Abort requests under I/O | 1038 | * Abort requests under I/O |
1034 | * | 1039 | * |
1035 | * The requests are set to aborted and finished, and the request | 1040 | * The requests are set to aborted and finished, and the request |
1036 | * waiter is woken up. This will make request_wait_answer() wait | 1041 | * waiter is woken up. This will make request_wait_answer() wait |
1037 | * until the request is unlocked and then return. | 1042 | * until the request is unlocked and then return. |
1038 | * | 1043 | * |
1039 | * If the request is asynchronous, then the end function needs to be | 1044 | * If the request is asynchronous, then the end function needs to be |
1040 | * called after waiting for the request to be unlocked (if it was | 1045 | * called after waiting for the request to be unlocked (if it was |
1041 | * locked). | 1046 | * locked). |
1042 | */ | 1047 | */ |
1043 | static void end_io_requests(struct fuse_conn *fc) | 1048 | static void end_io_requests(struct fuse_conn *fc) |
1044 | __releases(&fc->lock) | 1049 | __releases(&fc->lock) |
1045 | __acquires(&fc->lock) | 1050 | __acquires(&fc->lock) |
1046 | { | 1051 | { |
1047 | while (!list_empty(&fc->io)) { | 1052 | while (!list_empty(&fc->io)) { |
1048 | struct fuse_req *req = | 1053 | struct fuse_req *req = |
1049 | list_entry(fc->io.next, struct fuse_req, list); | 1054 | list_entry(fc->io.next, struct fuse_req, list); |
1050 | void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; | 1055 | void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; |
1051 | 1056 | ||
1052 | req->aborted = 1; | 1057 | req->aborted = 1; |
1053 | req->out.h.error = -ECONNABORTED; | 1058 | req->out.h.error = -ECONNABORTED; |
1054 | req->state = FUSE_REQ_FINISHED; | 1059 | req->state = FUSE_REQ_FINISHED; |
1055 | list_del_init(&req->list); | 1060 | list_del_init(&req->list); |
1056 | wake_up(&req->waitq); | 1061 | wake_up(&req->waitq); |
1057 | if (end) { | 1062 | if (end) { |
1058 | req->end = NULL; | 1063 | req->end = NULL; |
1059 | __fuse_get_request(req); | 1064 | __fuse_get_request(req); |
1060 | spin_unlock(&fc->lock); | 1065 | spin_unlock(&fc->lock); |
1061 | wait_event(req->waitq, !req->locked); | 1066 | wait_event(req->waitq, !req->locked); |
1062 | end(fc, req); | 1067 | end(fc, req); |
1063 | fuse_put_request(fc, req); | 1068 | fuse_put_request(fc, req); |
1064 | spin_lock(&fc->lock); | 1069 | spin_lock(&fc->lock); |
1065 | } | 1070 | } |
1066 | } | 1071 | } |
1067 | } | 1072 | } |
1068 | 1073 | ||
1069 | /* | 1074 | /* |
1070 | * Abort all requests. | 1075 | * Abort all requests. |
1071 | * | 1076 | * |
1072 | * Emergency exit in case of a malicious or accidental deadlock, or | 1077 | * Emergency exit in case of a malicious or accidental deadlock, or |
1073 | * just a hung filesystem. | 1078 | * just a hung filesystem. |
1074 | * | 1079 | * |
1075 | * The same effect is usually achievable through killing the | 1080 | * The same effect is usually achievable through killing the |
1076 | * filesystem daemon and all users of the filesystem. The exception | 1081 | * filesystem daemon and all users of the filesystem. The exception |
1077 | * is the combination of an asynchronous request and the tricky | 1082 | * is the combination of an asynchronous request and the tricky |
1078 | * deadlock (see Documentation/filesystems/fuse.txt). | 1083 | * deadlock (see Documentation/filesystems/fuse.txt). |
1079 | * | 1084 | * |
1080 | * During the aborting, progression of requests from the pending and | 1085 | * During the aborting, progression of requests from the pending and |
1081 | * processing lists onto the io list, and progression of new requests | 1086 | * processing lists onto the io list, and progression of new requests |
1082 | * onto the pending list is prevented by req->connected being false. | 1087 | * onto the pending list is prevented by req->connected being false. |
1083 | * | 1088 | * |
1084 | * Progression of requests under I/O to the processing list is | 1089 | * Progression of requests under I/O to the processing list is |
1085 | * prevented by the req->aborted flag being true for these requests. | 1090 | * prevented by the req->aborted flag being true for these requests. |
1086 | * For this reason requests on the io list must be aborted first. | 1091 | * For this reason requests on the io list must be aborted first. |
1087 | */ | 1092 | */ |
1088 | void fuse_abort_conn(struct fuse_conn *fc) | 1093 | void fuse_abort_conn(struct fuse_conn *fc) |
1089 | { | 1094 | { |
1090 | spin_lock(&fc->lock); | 1095 | spin_lock(&fc->lock); |
1091 | if (fc->connected) { | 1096 | if (fc->connected) { |
1092 | fc->connected = 0; | 1097 | fc->connected = 0; |
1093 | fc->blocked = 0; | 1098 | fc->blocked = 0; |
1094 | end_io_requests(fc); | 1099 | end_io_requests(fc); |
1095 | end_requests(fc, &fc->pending); | 1100 | end_requests(fc, &fc->pending); |
1096 | end_requests(fc, &fc->processing); | 1101 | end_requests(fc, &fc->processing); |
1097 | wake_up_all(&fc->waitq); | 1102 | wake_up_all(&fc->waitq); |
1098 | wake_up_all(&fc->blocked_waitq); | 1103 | wake_up_all(&fc->blocked_waitq); |
1099 | kill_fasync(&fc->fasync, SIGIO, POLL_IN); | 1104 | kill_fasync(&fc->fasync, SIGIO, POLL_IN); |
1100 | } | 1105 | } |
1101 | spin_unlock(&fc->lock); | 1106 | spin_unlock(&fc->lock); |
1102 | } | 1107 | } |
1103 | 1108 | ||
1104 | static int fuse_dev_release(struct inode *inode, struct file *file) | 1109 | static int fuse_dev_release(struct inode *inode, struct file *file) |
1105 | { | 1110 | { |
1106 | struct fuse_conn *fc = fuse_get_conn(file); | 1111 | struct fuse_conn *fc = fuse_get_conn(file); |
1107 | if (fc) { | 1112 | if (fc) { |
1108 | spin_lock(&fc->lock); | 1113 | spin_lock(&fc->lock); |
1109 | fc->connected = 0; | 1114 | fc->connected = 0; |
1110 | end_requests(fc, &fc->pending); | 1115 | end_requests(fc, &fc->pending); |
1111 | end_requests(fc, &fc->processing); | 1116 | end_requests(fc, &fc->processing); |
1112 | spin_unlock(&fc->lock); | 1117 | spin_unlock(&fc->lock); |
1113 | fuse_conn_put(fc); | 1118 | fuse_conn_put(fc); |
1114 | } | 1119 | } |
1115 | 1120 | ||
1116 | return 0; | 1121 | return 0; |
1117 | } | 1122 | } |
1118 | 1123 | ||
1119 | static int fuse_dev_fasync(int fd, struct file *file, int on) | 1124 | static int fuse_dev_fasync(int fd, struct file *file, int on) |
1120 | { | 1125 | { |
1121 | struct fuse_conn *fc = fuse_get_conn(file); | 1126 | struct fuse_conn *fc = fuse_get_conn(file); |
1122 | if (!fc) | 1127 | if (!fc) |
1123 | return -EPERM; | 1128 | return -EPERM; |
1124 | 1129 | ||
1125 | /* No locking - fasync_helper does its own locking */ | 1130 | /* No locking - fasync_helper does its own locking */ |
1126 | return fasync_helper(fd, file, on, &fc->fasync); | 1131 | return fasync_helper(fd, file, on, &fc->fasync); |
1127 | } | 1132 | } |
1128 | 1133 | ||
1129 | const struct file_operations fuse_dev_operations = { | 1134 | const struct file_operations fuse_dev_operations = { |
1130 | .owner = THIS_MODULE, | 1135 | .owner = THIS_MODULE, |
1131 | .llseek = no_llseek, | 1136 | .llseek = no_llseek, |
1132 | .read = do_sync_read, | 1137 | .read = do_sync_read, |
1133 | .aio_read = fuse_dev_read, | 1138 | .aio_read = fuse_dev_read, |
1134 | .write = do_sync_write, | 1139 | .write = do_sync_write, |
1135 | .aio_write = fuse_dev_write, | 1140 | .aio_write = fuse_dev_write, |
1136 | .poll = fuse_dev_poll, | 1141 | .poll = fuse_dev_poll, |
1137 | .release = fuse_dev_release, | 1142 | .release = fuse_dev_release, |
1138 | .fasync = fuse_dev_fasync, | 1143 | .fasync = fuse_dev_fasync, |
1139 | }; | 1144 | }; |
1140 | 1145 | ||
1141 | static struct miscdevice fuse_miscdevice = { | 1146 | static struct miscdevice fuse_miscdevice = { |
1142 | .minor = FUSE_MINOR, | 1147 | .minor = FUSE_MINOR, |
1143 | .name = "fuse", | 1148 | .name = "fuse", |
1144 | .fops = &fuse_dev_operations, | 1149 | .fops = &fuse_dev_operations, |
1145 | }; | 1150 | }; |
1146 | 1151 | ||
1147 | int __init fuse_dev_init(void) | 1152 | int __init fuse_dev_init(void) |
1148 | { | 1153 | { |
1149 | int err = -ENOMEM; | 1154 | int err = -ENOMEM; |
1150 | fuse_req_cachep = kmem_cache_create("fuse_request", | 1155 | fuse_req_cachep = kmem_cache_create("fuse_request", |
1151 | sizeof(struct fuse_req), | 1156 | sizeof(struct fuse_req), |
1152 | 0, 0, NULL); | 1157 | 0, 0, NULL); |
1153 | if (!fuse_req_cachep) | 1158 | if (!fuse_req_cachep) |
1154 | goto out; | 1159 | goto out; |
1155 | 1160 | ||
1156 | err = misc_register(&fuse_miscdevice); | 1161 | err = misc_register(&fuse_miscdevice); |
1157 | if (err) | 1162 | if (err) |
1158 | goto out_cache_clean; | 1163 | goto out_cache_clean; |
1159 | 1164 | ||
1160 | return 0; | 1165 | return 0; |
1161 | 1166 | ||
1162 | out_cache_clean: | 1167 | out_cache_clean: |
1163 | kmem_cache_destroy(fuse_req_cachep); | 1168 | kmem_cache_destroy(fuse_req_cachep); |
1164 | out: | 1169 | out: |
1165 | return err; | 1170 | return err; |
1166 | } | 1171 | } |
1167 | 1172 | ||
1168 | void fuse_dev_cleanup(void) | 1173 | void fuse_dev_cleanup(void) |
1169 | { | 1174 | { |
1170 | misc_deregister(&fuse_miscdevice); | 1175 | misc_deregister(&fuse_miscdevice); |
1171 | kmem_cache_destroy(fuse_req_cachep); | 1176 | kmem_cache_destroy(fuse_req_cachep); |
1172 | } | 1177 | } |