Commit eb4cac10d9f7b006da842e2d37414d13e1333781
Committed by
Linus Torvalds
1 parent
0492c37137
Exists in
master
and in
7 other branches
NFS: Fix a list corruption problem
We must remove the request from whatever list it is currently on before we can add it to the dirty list. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 1 changed file with 3 additions and 1 deletions Inline Diff
fs/nfs/write.c
1 | /* | 1 | /* |
2 | * linux/fs/nfs/write.c | 2 | * linux/fs/nfs/write.c |
3 | * | 3 | * |
4 | * Write file data over NFS. | 4 | * Write file data over NFS. |
5 | * | 5 | * |
6 | * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de> | 6 | * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de> |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/types.h> | 9 | #include <linux/types.h> |
10 | #include <linux/slab.h> | 10 | #include <linux/slab.h> |
11 | #include <linux/mm.h> | 11 | #include <linux/mm.h> |
12 | #include <linux/pagemap.h> | 12 | #include <linux/pagemap.h> |
13 | #include <linux/file.h> | 13 | #include <linux/file.h> |
14 | #include <linux/writeback.h> | 14 | #include <linux/writeback.h> |
15 | #include <linux/swap.h> | 15 | #include <linux/swap.h> |
16 | 16 | ||
17 | #include <linux/sunrpc/clnt.h> | 17 | #include <linux/sunrpc/clnt.h> |
18 | #include <linux/nfs_fs.h> | 18 | #include <linux/nfs_fs.h> |
19 | #include <linux/nfs_mount.h> | 19 | #include <linux/nfs_mount.h> |
20 | #include <linux/nfs_page.h> | 20 | #include <linux/nfs_page.h> |
21 | #include <linux/backing-dev.h> | 21 | #include <linux/backing-dev.h> |
22 | 22 | ||
23 | #include <asm/uaccess.h> | 23 | #include <asm/uaccess.h> |
24 | #include <linux/smp_lock.h> | 24 | #include <linux/smp_lock.h> |
25 | 25 | ||
26 | #include "delegation.h" | 26 | #include "delegation.h" |
27 | #include "internal.h" | 27 | #include "internal.h" |
28 | #include "iostat.h" | 28 | #include "iostat.h" |
29 | 29 | ||
30 | #define NFSDBG_FACILITY NFSDBG_PAGECACHE | 30 | #define NFSDBG_FACILITY NFSDBG_PAGECACHE |
31 | 31 | ||
32 | #define MIN_POOL_WRITE (32) | 32 | #define MIN_POOL_WRITE (32) |
33 | #define MIN_POOL_COMMIT (4) | 33 | #define MIN_POOL_COMMIT (4) |
34 | 34 | ||
35 | /* | 35 | /* |
36 | * Local function declarations | 36 | * Local function declarations |
37 | */ | 37 | */ |
38 | static struct nfs_page * nfs_update_request(struct nfs_open_context*, | 38 | static struct nfs_page * nfs_update_request(struct nfs_open_context*, |
39 | struct page *, | 39 | struct page *, |
40 | unsigned int, unsigned int); | 40 | unsigned int, unsigned int); |
41 | static void nfs_mark_request_dirty(struct nfs_page *req); | 41 | static void nfs_mark_request_dirty(struct nfs_page *req); |
42 | static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how); | 42 | static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how); |
43 | static const struct rpc_call_ops nfs_write_partial_ops; | 43 | static const struct rpc_call_ops nfs_write_partial_ops; |
44 | static const struct rpc_call_ops nfs_write_full_ops; | 44 | static const struct rpc_call_ops nfs_write_full_ops; |
45 | static const struct rpc_call_ops nfs_commit_ops; | 45 | static const struct rpc_call_ops nfs_commit_ops; |
46 | 46 | ||
47 | static struct kmem_cache *nfs_wdata_cachep; | 47 | static struct kmem_cache *nfs_wdata_cachep; |
48 | static mempool_t *nfs_wdata_mempool; | 48 | static mempool_t *nfs_wdata_mempool; |
49 | static mempool_t *nfs_commit_mempool; | 49 | static mempool_t *nfs_commit_mempool; |
50 | 50 | ||
51 | struct nfs_write_data *nfs_commit_alloc(void) | 51 | struct nfs_write_data *nfs_commit_alloc(void) |
52 | { | 52 | { |
53 | struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS); | 53 | struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS); |
54 | 54 | ||
55 | if (p) { | 55 | if (p) { |
56 | memset(p, 0, sizeof(*p)); | 56 | memset(p, 0, sizeof(*p)); |
57 | INIT_LIST_HEAD(&p->pages); | 57 | INIT_LIST_HEAD(&p->pages); |
58 | } | 58 | } |
59 | return p; | 59 | return p; |
60 | } | 60 | } |
61 | 61 | ||
62 | void nfs_commit_rcu_free(struct rcu_head *head) | 62 | void nfs_commit_rcu_free(struct rcu_head *head) |
63 | { | 63 | { |
64 | struct nfs_write_data *p = container_of(head, struct nfs_write_data, task.u.tk_rcu); | 64 | struct nfs_write_data *p = container_of(head, struct nfs_write_data, task.u.tk_rcu); |
65 | if (p && (p->pagevec != &p->page_array[0])) | 65 | if (p && (p->pagevec != &p->page_array[0])) |
66 | kfree(p->pagevec); | 66 | kfree(p->pagevec); |
67 | mempool_free(p, nfs_commit_mempool); | 67 | mempool_free(p, nfs_commit_mempool); |
68 | } | 68 | } |
69 | 69 | ||
70 | void nfs_commit_free(struct nfs_write_data *wdata) | 70 | void nfs_commit_free(struct nfs_write_data *wdata) |
71 | { | 71 | { |
72 | call_rcu_bh(&wdata->task.u.tk_rcu, nfs_commit_rcu_free); | 72 | call_rcu_bh(&wdata->task.u.tk_rcu, nfs_commit_rcu_free); |
73 | } | 73 | } |
74 | 74 | ||
75 | struct nfs_write_data *nfs_writedata_alloc(size_t len) | 75 | struct nfs_write_data *nfs_writedata_alloc(size_t len) |
76 | { | 76 | { |
77 | unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; | 77 | unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; |
78 | struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS); | 78 | struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS); |
79 | 79 | ||
80 | if (p) { | 80 | if (p) { |
81 | memset(p, 0, sizeof(*p)); | 81 | memset(p, 0, sizeof(*p)); |
82 | INIT_LIST_HEAD(&p->pages); | 82 | INIT_LIST_HEAD(&p->pages); |
83 | p->npages = pagecount; | 83 | p->npages = pagecount; |
84 | if (pagecount <= ARRAY_SIZE(p->page_array)) | 84 | if (pagecount <= ARRAY_SIZE(p->page_array)) |
85 | p->pagevec = p->page_array; | 85 | p->pagevec = p->page_array; |
86 | else { | 86 | else { |
87 | p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS); | 87 | p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS); |
88 | if (!p->pagevec) { | 88 | if (!p->pagevec) { |
89 | mempool_free(p, nfs_wdata_mempool); | 89 | mempool_free(p, nfs_wdata_mempool); |
90 | p = NULL; | 90 | p = NULL; |
91 | } | 91 | } |
92 | } | 92 | } |
93 | } | 93 | } |
94 | return p; | 94 | return p; |
95 | } | 95 | } |
96 | 96 | ||
97 | static void nfs_writedata_rcu_free(struct rcu_head *head) | 97 | static void nfs_writedata_rcu_free(struct rcu_head *head) |
98 | { | 98 | { |
99 | struct nfs_write_data *p = container_of(head, struct nfs_write_data, task.u.tk_rcu); | 99 | struct nfs_write_data *p = container_of(head, struct nfs_write_data, task.u.tk_rcu); |
100 | if (p && (p->pagevec != &p->page_array[0])) | 100 | if (p && (p->pagevec != &p->page_array[0])) |
101 | kfree(p->pagevec); | 101 | kfree(p->pagevec); |
102 | mempool_free(p, nfs_wdata_mempool); | 102 | mempool_free(p, nfs_wdata_mempool); |
103 | } | 103 | } |
104 | 104 | ||
105 | static void nfs_writedata_free(struct nfs_write_data *wdata) | 105 | static void nfs_writedata_free(struct nfs_write_data *wdata) |
106 | { | 106 | { |
107 | call_rcu_bh(&wdata->task.u.tk_rcu, nfs_writedata_rcu_free); | 107 | call_rcu_bh(&wdata->task.u.tk_rcu, nfs_writedata_rcu_free); |
108 | } | 108 | } |
109 | 109 | ||
110 | void nfs_writedata_release(void *wdata) | 110 | void nfs_writedata_release(void *wdata) |
111 | { | 111 | { |
112 | nfs_writedata_free(wdata); | 112 | nfs_writedata_free(wdata); |
113 | } | 113 | } |
114 | 114 | ||
115 | static struct nfs_page *nfs_page_find_request_locked(struct page *page) | 115 | static struct nfs_page *nfs_page_find_request_locked(struct page *page) |
116 | { | 116 | { |
117 | struct nfs_page *req = NULL; | 117 | struct nfs_page *req = NULL; |
118 | 118 | ||
119 | if (PagePrivate(page)) { | 119 | if (PagePrivate(page)) { |
120 | req = (struct nfs_page *)page_private(page); | 120 | req = (struct nfs_page *)page_private(page); |
121 | if (req != NULL) | 121 | if (req != NULL) |
122 | atomic_inc(&req->wb_count); | 122 | atomic_inc(&req->wb_count); |
123 | } | 123 | } |
124 | return req; | 124 | return req; |
125 | } | 125 | } |
126 | 126 | ||
127 | static struct nfs_page *nfs_page_find_request(struct page *page) | 127 | static struct nfs_page *nfs_page_find_request(struct page *page) |
128 | { | 128 | { |
129 | struct nfs_page *req = NULL; | 129 | struct nfs_page *req = NULL; |
130 | spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock; | 130 | spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock; |
131 | 131 | ||
132 | spin_lock(req_lock); | 132 | spin_lock(req_lock); |
133 | req = nfs_page_find_request_locked(page); | 133 | req = nfs_page_find_request_locked(page); |
134 | spin_unlock(req_lock); | 134 | spin_unlock(req_lock); |
135 | return req; | 135 | return req; |
136 | } | 136 | } |
137 | 137 | ||
138 | /* Adjust the file length if we're writing beyond the end */ | 138 | /* Adjust the file length if we're writing beyond the end */ |
139 | static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count) | 139 | static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count) |
140 | { | 140 | { |
141 | struct inode *inode = page->mapping->host; | 141 | struct inode *inode = page->mapping->host; |
142 | loff_t end, i_size = i_size_read(inode); | 142 | loff_t end, i_size = i_size_read(inode); |
143 | unsigned long end_index = (i_size - 1) >> PAGE_CACHE_SHIFT; | 143 | unsigned long end_index = (i_size - 1) >> PAGE_CACHE_SHIFT; |
144 | 144 | ||
145 | if (i_size > 0 && page->index < end_index) | 145 | if (i_size > 0 && page->index < end_index) |
146 | return; | 146 | return; |
147 | end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count); | 147 | end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count); |
148 | if (i_size >= end) | 148 | if (i_size >= end) |
149 | return; | 149 | return; |
150 | nfs_inc_stats(inode, NFSIOS_EXTENDWRITE); | 150 | nfs_inc_stats(inode, NFSIOS_EXTENDWRITE); |
151 | i_size_write(inode, end); | 151 | i_size_write(inode, end); |
152 | } | 152 | } |
153 | 153 | ||
154 | /* A writeback failed: mark the page as bad, and invalidate the page cache */ | 154 | /* A writeback failed: mark the page as bad, and invalidate the page cache */ |
155 | static void nfs_set_pageerror(struct page *page) | 155 | static void nfs_set_pageerror(struct page *page) |
156 | { | 156 | { |
157 | SetPageError(page); | 157 | SetPageError(page); |
158 | nfs_zap_mapping(page->mapping->host, page->mapping); | 158 | nfs_zap_mapping(page->mapping->host, page->mapping); |
159 | } | 159 | } |
160 | 160 | ||
161 | /* We can set the PG_uptodate flag if we see that a write request | 161 | /* We can set the PG_uptodate flag if we see that a write request |
162 | * covers the full page. | 162 | * covers the full page. |
163 | */ | 163 | */ |
164 | static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count) | 164 | static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count) |
165 | { | 165 | { |
166 | if (PageUptodate(page)) | 166 | if (PageUptodate(page)) |
167 | return; | 167 | return; |
168 | if (base != 0) | 168 | if (base != 0) |
169 | return; | 169 | return; |
170 | if (count != nfs_page_length(page)) | 170 | if (count != nfs_page_length(page)) |
171 | return; | 171 | return; |
172 | if (count != PAGE_CACHE_SIZE) | 172 | if (count != PAGE_CACHE_SIZE) |
173 | memclear_highpage_flush(page, count, PAGE_CACHE_SIZE - count); | 173 | memclear_highpage_flush(page, count, PAGE_CACHE_SIZE - count); |
174 | SetPageUptodate(page); | 174 | SetPageUptodate(page); |
175 | } | 175 | } |
176 | 176 | ||
177 | static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page, | 177 | static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page, |
178 | unsigned int offset, unsigned int count) | 178 | unsigned int offset, unsigned int count) |
179 | { | 179 | { |
180 | struct nfs_page *req; | 180 | struct nfs_page *req; |
181 | int ret; | 181 | int ret; |
182 | 182 | ||
183 | for (;;) { | 183 | for (;;) { |
184 | req = nfs_update_request(ctx, page, offset, count); | 184 | req = nfs_update_request(ctx, page, offset, count); |
185 | if (!IS_ERR(req)) | 185 | if (!IS_ERR(req)) |
186 | break; | 186 | break; |
187 | ret = PTR_ERR(req); | 187 | ret = PTR_ERR(req); |
188 | if (ret != -EBUSY) | 188 | if (ret != -EBUSY) |
189 | return ret; | 189 | return ret; |
190 | ret = nfs_wb_page(page->mapping->host, page); | 190 | ret = nfs_wb_page(page->mapping->host, page); |
191 | if (ret != 0) | 191 | if (ret != 0) |
192 | return ret; | 192 | return ret; |
193 | } | 193 | } |
194 | /* Update file length */ | 194 | /* Update file length */ |
195 | nfs_grow_file(page, offset, count); | 195 | nfs_grow_file(page, offset, count); |
196 | /* Set the PG_uptodate flag? */ | 196 | /* Set the PG_uptodate flag? */ |
197 | nfs_mark_uptodate(page, offset, count); | 197 | nfs_mark_uptodate(page, offset, count); |
198 | nfs_unlock_request(req); | 198 | nfs_unlock_request(req); |
199 | return 0; | 199 | return 0; |
200 | } | 200 | } |
201 | 201 | ||
202 | static int wb_priority(struct writeback_control *wbc) | 202 | static int wb_priority(struct writeback_control *wbc) |
203 | { | 203 | { |
204 | if (wbc->for_reclaim) | 204 | if (wbc->for_reclaim) |
205 | return FLUSH_HIGHPRI; | 205 | return FLUSH_HIGHPRI; |
206 | if (wbc->for_kupdate) | 206 | if (wbc->for_kupdate) |
207 | return FLUSH_LOWPRI; | 207 | return FLUSH_LOWPRI; |
208 | return 0; | 208 | return 0; |
209 | } | 209 | } |
210 | 210 | ||
211 | /* | 211 | /* |
212 | * NFS congestion control | 212 | * NFS congestion control |
213 | */ | 213 | */ |
214 | 214 | ||
215 | int nfs_congestion_kb; | 215 | int nfs_congestion_kb; |
216 | 216 | ||
217 | #define NFS_CONGESTION_ON_THRESH (nfs_congestion_kb >> (PAGE_SHIFT-10)) | 217 | #define NFS_CONGESTION_ON_THRESH (nfs_congestion_kb >> (PAGE_SHIFT-10)) |
218 | #define NFS_CONGESTION_OFF_THRESH \ | 218 | #define NFS_CONGESTION_OFF_THRESH \ |
219 | (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2)) | 219 | (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2)) |
220 | 220 | ||
221 | static int nfs_set_page_writeback(struct page *page) | 221 | static int nfs_set_page_writeback(struct page *page) |
222 | { | 222 | { |
223 | int ret = test_set_page_writeback(page); | 223 | int ret = test_set_page_writeback(page); |
224 | 224 | ||
225 | if (!ret) { | 225 | if (!ret) { |
226 | struct inode *inode = page->mapping->host; | 226 | struct inode *inode = page->mapping->host; |
227 | struct nfs_server *nfss = NFS_SERVER(inode); | 227 | struct nfs_server *nfss = NFS_SERVER(inode); |
228 | 228 | ||
229 | if (atomic_inc_return(&nfss->writeback) > | 229 | if (atomic_inc_return(&nfss->writeback) > |
230 | NFS_CONGESTION_ON_THRESH) | 230 | NFS_CONGESTION_ON_THRESH) |
231 | set_bdi_congested(&nfss->backing_dev_info, WRITE); | 231 | set_bdi_congested(&nfss->backing_dev_info, WRITE); |
232 | } | 232 | } |
233 | return ret; | 233 | return ret; |
234 | } | 234 | } |
235 | 235 | ||
236 | static void nfs_end_page_writeback(struct page *page) | 236 | static void nfs_end_page_writeback(struct page *page) |
237 | { | 237 | { |
238 | struct inode *inode = page->mapping->host; | 238 | struct inode *inode = page->mapping->host; |
239 | struct nfs_server *nfss = NFS_SERVER(inode); | 239 | struct nfs_server *nfss = NFS_SERVER(inode); |
240 | 240 | ||
241 | end_page_writeback(page); | 241 | end_page_writeback(page); |
242 | if (atomic_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) { | 242 | if (atomic_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) { |
243 | clear_bdi_congested(&nfss->backing_dev_info, WRITE); | 243 | clear_bdi_congested(&nfss->backing_dev_info, WRITE); |
244 | congestion_end(WRITE); | 244 | congestion_end(WRITE); |
245 | } | 245 | } |
246 | } | 246 | } |
247 | 247 | ||
248 | /* | 248 | /* |
249 | * Find an associated nfs write request, and prepare to flush it out | 249 | * Find an associated nfs write request, and prepare to flush it out |
250 | * Returns 1 if there was no write request, or if the request was | 250 | * Returns 1 if there was no write request, or if the request was |
251 | * already tagged by nfs_set_page_dirty.Returns 0 if the request | 251 | * already tagged by nfs_set_page_dirty.Returns 0 if the request |
252 | * was not tagged. | 252 | * was not tagged. |
253 | * May also return an error if the user signalled nfs_wait_on_request(). | 253 | * May also return an error if the user signalled nfs_wait_on_request(). |
254 | */ | 254 | */ |
255 | static int nfs_page_mark_flush(struct page *page) | 255 | static int nfs_page_mark_flush(struct page *page) |
256 | { | 256 | { |
257 | struct nfs_page *req; | 257 | struct nfs_page *req; |
258 | spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock; | 258 | spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock; |
259 | int ret; | 259 | int ret; |
260 | 260 | ||
261 | spin_lock(req_lock); | 261 | spin_lock(req_lock); |
262 | for(;;) { | 262 | for(;;) { |
263 | req = nfs_page_find_request_locked(page); | 263 | req = nfs_page_find_request_locked(page); |
264 | if (req == NULL) { | 264 | if (req == NULL) { |
265 | spin_unlock(req_lock); | 265 | spin_unlock(req_lock); |
266 | return 1; | 266 | return 1; |
267 | } | 267 | } |
268 | if (nfs_lock_request_dontget(req)) | 268 | if (nfs_lock_request_dontget(req)) |
269 | break; | 269 | break; |
270 | /* Note: If we hold the page lock, as is the case in nfs_writepage, | 270 | /* Note: If we hold the page lock, as is the case in nfs_writepage, |
271 | * then the call to nfs_lock_request_dontget() will always | 271 | * then the call to nfs_lock_request_dontget() will always |
272 | * succeed provided that someone hasn't already marked the | 272 | * succeed provided that someone hasn't already marked the |
273 | * request as dirty (in which case we don't care). | 273 | * request as dirty (in which case we don't care). |
274 | */ | 274 | */ |
275 | spin_unlock(req_lock); | 275 | spin_unlock(req_lock); |
276 | ret = nfs_wait_on_request(req); | 276 | ret = nfs_wait_on_request(req); |
277 | nfs_release_request(req); | 277 | nfs_release_request(req); |
278 | if (ret != 0) | 278 | if (ret != 0) |
279 | return ret; | 279 | return ret; |
280 | spin_lock(req_lock); | 280 | spin_lock(req_lock); |
281 | } | 281 | } |
282 | spin_unlock(req_lock); | 282 | spin_unlock(req_lock); |
283 | if (nfs_set_page_writeback(page) == 0) | 283 | if (nfs_set_page_writeback(page) == 0) { |
284 | nfs_list_remove_request(req); | ||
284 | nfs_mark_request_dirty(req); | 285 | nfs_mark_request_dirty(req); |
286 | } | ||
285 | ret = test_bit(PG_NEED_FLUSH, &req->wb_flags); | 287 | ret = test_bit(PG_NEED_FLUSH, &req->wb_flags); |
286 | nfs_unlock_request(req); | 288 | nfs_unlock_request(req); |
287 | return ret; | 289 | return ret; |
288 | } | 290 | } |
289 | 291 | ||
290 | /* | 292 | /* |
291 | * Write an mmapped page to the server. | 293 | * Write an mmapped page to the server. |
292 | */ | 294 | */ |
293 | static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc) | 295 | static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc) |
294 | { | 296 | { |
295 | struct nfs_open_context *ctx; | 297 | struct nfs_open_context *ctx; |
296 | struct inode *inode = page->mapping->host; | 298 | struct inode *inode = page->mapping->host; |
297 | unsigned offset; | 299 | unsigned offset; |
298 | int err; | 300 | int err; |
299 | 301 | ||
300 | nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); | 302 | nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); |
301 | nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1); | 303 | nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1); |
302 | 304 | ||
303 | err = nfs_page_mark_flush(page); | 305 | err = nfs_page_mark_flush(page); |
304 | if (err <= 0) | 306 | if (err <= 0) |
305 | goto out; | 307 | goto out; |
306 | err = 0; | 308 | err = 0; |
307 | offset = nfs_page_length(page); | 309 | offset = nfs_page_length(page); |
308 | if (!offset) | 310 | if (!offset) |
309 | goto out; | 311 | goto out; |
310 | 312 | ||
311 | ctx = nfs_find_open_context(inode, NULL, FMODE_WRITE); | 313 | ctx = nfs_find_open_context(inode, NULL, FMODE_WRITE); |
312 | if (ctx == NULL) { | 314 | if (ctx == NULL) { |
313 | err = -EBADF; | 315 | err = -EBADF; |
314 | goto out; | 316 | goto out; |
315 | } | 317 | } |
316 | err = nfs_writepage_setup(ctx, page, 0, offset); | 318 | err = nfs_writepage_setup(ctx, page, 0, offset); |
317 | put_nfs_open_context(ctx); | 319 | put_nfs_open_context(ctx); |
318 | if (err != 0) | 320 | if (err != 0) |
319 | goto out; | 321 | goto out; |
320 | err = nfs_page_mark_flush(page); | 322 | err = nfs_page_mark_flush(page); |
321 | if (err > 0) | 323 | if (err > 0) |
322 | err = 0; | 324 | err = 0; |
323 | out: | 325 | out: |
324 | if (!wbc->for_writepages) | 326 | if (!wbc->for_writepages) |
325 | nfs_flush_mapping(page->mapping, wbc, FLUSH_STABLE|wb_priority(wbc)); | 327 | nfs_flush_mapping(page->mapping, wbc, FLUSH_STABLE|wb_priority(wbc)); |
326 | return err; | 328 | return err; |
327 | } | 329 | } |
328 | 330 | ||
329 | int nfs_writepage(struct page *page, struct writeback_control *wbc) | 331 | int nfs_writepage(struct page *page, struct writeback_control *wbc) |
330 | { | 332 | { |
331 | int err; | 333 | int err; |
332 | 334 | ||
333 | err = nfs_writepage_locked(page, wbc); | 335 | err = nfs_writepage_locked(page, wbc); |
334 | unlock_page(page); | 336 | unlock_page(page); |
335 | return err; | 337 | return err; |
336 | } | 338 | } |
337 | 339 | ||
338 | int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) | 340 | int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) |
339 | { | 341 | { |
340 | struct inode *inode = mapping->host; | 342 | struct inode *inode = mapping->host; |
341 | int err; | 343 | int err; |
342 | 344 | ||
343 | nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES); | 345 | nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES); |
344 | 346 | ||
345 | err = generic_writepages(mapping, wbc); | 347 | err = generic_writepages(mapping, wbc); |
346 | if (err) | 348 | if (err) |
347 | return err; | 349 | return err; |
348 | err = nfs_flush_mapping(mapping, wbc, wb_priority(wbc)); | 350 | err = nfs_flush_mapping(mapping, wbc, wb_priority(wbc)); |
349 | if (err < 0) | 351 | if (err < 0) |
350 | goto out; | 352 | goto out; |
351 | nfs_add_stats(inode, NFSIOS_WRITEPAGES, err); | 353 | nfs_add_stats(inode, NFSIOS_WRITEPAGES, err); |
352 | err = 0; | 354 | err = 0; |
353 | out: | 355 | out: |
354 | return err; | 356 | return err; |
355 | } | 357 | } |
356 | 358 | ||
357 | /* | 359 | /* |
358 | * Insert a write request into an inode | 360 | * Insert a write request into an inode |
359 | */ | 361 | */ |
360 | static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req) | 362 | static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req) |
361 | { | 363 | { |
362 | struct nfs_inode *nfsi = NFS_I(inode); | 364 | struct nfs_inode *nfsi = NFS_I(inode); |
363 | int error; | 365 | int error; |
364 | 366 | ||
365 | error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req); | 367 | error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req); |
366 | BUG_ON(error == -EEXIST); | 368 | BUG_ON(error == -EEXIST); |
367 | if (error) | 369 | if (error) |
368 | return error; | 370 | return error; |
369 | if (!nfsi->npages) { | 371 | if (!nfsi->npages) { |
370 | igrab(inode); | 372 | igrab(inode); |
371 | nfs_begin_data_update(inode); | 373 | nfs_begin_data_update(inode); |
372 | if (nfs_have_delegation(inode, FMODE_WRITE)) | 374 | if (nfs_have_delegation(inode, FMODE_WRITE)) |
373 | nfsi->change_attr++; | 375 | nfsi->change_attr++; |
374 | } | 376 | } |
375 | SetPagePrivate(req->wb_page); | 377 | SetPagePrivate(req->wb_page); |
376 | set_page_private(req->wb_page, (unsigned long)req); | 378 | set_page_private(req->wb_page, (unsigned long)req); |
377 | nfsi->npages++; | 379 | nfsi->npages++; |
378 | atomic_inc(&req->wb_count); | 380 | atomic_inc(&req->wb_count); |
379 | return 0; | 381 | return 0; |
380 | } | 382 | } |
381 | 383 | ||
382 | /* | 384 | /* |
383 | * Remove a write request from an inode | 385 | * Remove a write request from an inode |
384 | */ | 386 | */ |
385 | static void nfs_inode_remove_request(struct nfs_page *req) | 387 | static void nfs_inode_remove_request(struct nfs_page *req) |
386 | { | 388 | { |
387 | struct inode *inode = req->wb_context->dentry->d_inode; | 389 | struct inode *inode = req->wb_context->dentry->d_inode; |
388 | struct nfs_inode *nfsi = NFS_I(inode); | 390 | struct nfs_inode *nfsi = NFS_I(inode); |
389 | 391 | ||
390 | BUG_ON (!NFS_WBACK_BUSY(req)); | 392 | BUG_ON (!NFS_WBACK_BUSY(req)); |
391 | 393 | ||
392 | spin_lock(&nfsi->req_lock); | 394 | spin_lock(&nfsi->req_lock); |
393 | set_page_private(req->wb_page, 0); | 395 | set_page_private(req->wb_page, 0); |
394 | ClearPagePrivate(req->wb_page); | 396 | ClearPagePrivate(req->wb_page); |
395 | radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index); | 397 | radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index); |
396 | nfsi->npages--; | 398 | nfsi->npages--; |
397 | if (!nfsi->npages) { | 399 | if (!nfsi->npages) { |
398 | spin_unlock(&nfsi->req_lock); | 400 | spin_unlock(&nfsi->req_lock); |
399 | nfs_end_data_update(inode); | 401 | nfs_end_data_update(inode); |
400 | iput(inode); | 402 | iput(inode); |
401 | } else | 403 | } else |
402 | spin_unlock(&nfsi->req_lock); | 404 | spin_unlock(&nfsi->req_lock); |
403 | nfs_clear_request(req); | 405 | nfs_clear_request(req); |
404 | nfs_release_request(req); | 406 | nfs_release_request(req); |
405 | } | 407 | } |
406 | 408 | ||
407 | /* | 409 | /* |
408 | * Add a request to the inode's dirty list. | 410 | * Add a request to the inode's dirty list. |
409 | */ | 411 | */ |
410 | static void | 412 | static void |
411 | nfs_mark_request_dirty(struct nfs_page *req) | 413 | nfs_mark_request_dirty(struct nfs_page *req) |
412 | { | 414 | { |
413 | struct inode *inode = req->wb_context->dentry->d_inode; | 415 | struct inode *inode = req->wb_context->dentry->d_inode; |
414 | struct nfs_inode *nfsi = NFS_I(inode); | 416 | struct nfs_inode *nfsi = NFS_I(inode); |
415 | 417 | ||
416 | spin_lock(&nfsi->req_lock); | 418 | spin_lock(&nfsi->req_lock); |
417 | radix_tree_tag_set(&nfsi->nfs_page_tree, | 419 | radix_tree_tag_set(&nfsi->nfs_page_tree, |
418 | req->wb_index, NFS_PAGE_TAG_DIRTY); | 420 | req->wb_index, NFS_PAGE_TAG_DIRTY); |
419 | nfs_list_add_request(req, &nfsi->dirty); | 421 | nfs_list_add_request(req, &nfsi->dirty); |
420 | nfsi->ndirty++; | 422 | nfsi->ndirty++; |
421 | spin_unlock(&nfsi->req_lock); | 423 | spin_unlock(&nfsi->req_lock); |
422 | __mark_inode_dirty(inode, I_DIRTY_PAGES); | 424 | __mark_inode_dirty(inode, I_DIRTY_PAGES); |
423 | } | 425 | } |
424 | 426 | ||
425 | static void | 427 | static void |
426 | nfs_redirty_request(struct nfs_page *req) | 428 | nfs_redirty_request(struct nfs_page *req) |
427 | { | 429 | { |
428 | __set_page_dirty_nobuffers(req->wb_page); | 430 | __set_page_dirty_nobuffers(req->wb_page); |
429 | } | 431 | } |
430 | 432 | ||
431 | /* | 433 | /* |
432 | * Check if a request is dirty | 434 | * Check if a request is dirty |
433 | */ | 435 | */ |
434 | static inline int | 436 | static inline int |
435 | nfs_dirty_request(struct nfs_page *req) | 437 | nfs_dirty_request(struct nfs_page *req) |
436 | { | 438 | { |
437 | struct page *page = req->wb_page; | 439 | struct page *page = req->wb_page; |
438 | 440 | ||
439 | if (page == NULL) | 441 | if (page == NULL) |
440 | return 0; | 442 | return 0; |
441 | return !PageWriteback(req->wb_page); | 443 | return !PageWriteback(req->wb_page); |
442 | } | 444 | } |
443 | 445 | ||
444 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) | 446 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) |
445 | /* | 447 | /* |
446 | * Add a request to the inode's commit list. | 448 | * Add a request to the inode's commit list. |
447 | */ | 449 | */ |
448 | static void | 450 | static void |
449 | nfs_mark_request_commit(struct nfs_page *req) | 451 | nfs_mark_request_commit(struct nfs_page *req) |
450 | { | 452 | { |
451 | struct inode *inode = req->wb_context->dentry->d_inode; | 453 | struct inode *inode = req->wb_context->dentry->d_inode; |
452 | struct nfs_inode *nfsi = NFS_I(inode); | 454 | struct nfs_inode *nfsi = NFS_I(inode); |
453 | 455 | ||
454 | spin_lock(&nfsi->req_lock); | 456 | spin_lock(&nfsi->req_lock); |
455 | nfs_list_add_request(req, &nfsi->commit); | 457 | nfs_list_add_request(req, &nfsi->commit); |
456 | nfsi->ncommit++; | 458 | nfsi->ncommit++; |
457 | spin_unlock(&nfsi->req_lock); | 459 | spin_unlock(&nfsi->req_lock); |
458 | inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); | 460 | inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); |
459 | __mark_inode_dirty(inode, I_DIRTY_DATASYNC); | 461 | __mark_inode_dirty(inode, I_DIRTY_DATASYNC); |
460 | } | 462 | } |
461 | #endif | 463 | #endif |
462 | 464 | ||
463 | /* | 465 | /* |
464 | * Wait for a request to complete. | 466 | * Wait for a request to complete. |
465 | * | 467 | * |
466 | * Interruptible by signals only if mounted with intr flag. | 468 | * Interruptible by signals only if mounted with intr flag. |
467 | */ | 469 | */ |
468 | static int nfs_wait_on_requests_locked(struct inode *inode, unsigned long idx_start, unsigned int npages) | 470 | static int nfs_wait_on_requests_locked(struct inode *inode, unsigned long idx_start, unsigned int npages) |
469 | { | 471 | { |
470 | struct nfs_inode *nfsi = NFS_I(inode); | 472 | struct nfs_inode *nfsi = NFS_I(inode); |
471 | struct nfs_page *req; | 473 | struct nfs_page *req; |
472 | unsigned long idx_end, next; | 474 | unsigned long idx_end, next; |
473 | unsigned int res = 0; | 475 | unsigned int res = 0; |
474 | int error; | 476 | int error; |
475 | 477 | ||
476 | if (npages == 0) | 478 | if (npages == 0) |
477 | idx_end = ~0; | 479 | idx_end = ~0; |
478 | else | 480 | else |
479 | idx_end = idx_start + npages - 1; | 481 | idx_end = idx_start + npages - 1; |
480 | 482 | ||
481 | next = idx_start; | 483 | next = idx_start; |
482 | while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_WRITEBACK)) { | 484 | while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_WRITEBACK)) { |
483 | if (req->wb_index > idx_end) | 485 | if (req->wb_index > idx_end) |
484 | break; | 486 | break; |
485 | 487 | ||
486 | next = req->wb_index + 1; | 488 | next = req->wb_index + 1; |
487 | BUG_ON(!NFS_WBACK_BUSY(req)); | 489 | BUG_ON(!NFS_WBACK_BUSY(req)); |
488 | 490 | ||
489 | atomic_inc(&req->wb_count); | 491 | atomic_inc(&req->wb_count); |
490 | spin_unlock(&nfsi->req_lock); | 492 | spin_unlock(&nfsi->req_lock); |
491 | error = nfs_wait_on_request(req); | 493 | error = nfs_wait_on_request(req); |
492 | nfs_release_request(req); | 494 | nfs_release_request(req); |
493 | spin_lock(&nfsi->req_lock); | 495 | spin_lock(&nfsi->req_lock); |
494 | if (error < 0) | 496 | if (error < 0) |
495 | return error; | 497 | return error; |
496 | res++; | 498 | res++; |
497 | } | 499 | } |
498 | return res; | 500 | return res; |
499 | } | 501 | } |
500 | 502 | ||
501 | static void nfs_cancel_dirty_list(struct list_head *head) | 503 | static void nfs_cancel_dirty_list(struct list_head *head) |
502 | { | 504 | { |
503 | struct nfs_page *req; | 505 | struct nfs_page *req; |
504 | while(!list_empty(head)) { | 506 | while(!list_empty(head)) { |
505 | req = nfs_list_entry(head->next); | 507 | req = nfs_list_entry(head->next); |
506 | nfs_list_remove_request(req); | 508 | nfs_list_remove_request(req); |
507 | nfs_end_page_writeback(req->wb_page); | 509 | nfs_end_page_writeback(req->wb_page); |
508 | nfs_inode_remove_request(req); | 510 | nfs_inode_remove_request(req); |
509 | nfs_clear_page_writeback(req); | 511 | nfs_clear_page_writeback(req); |
510 | } | 512 | } |
511 | } | 513 | } |
512 | 514 | ||
513 | static void nfs_cancel_commit_list(struct list_head *head) | 515 | static void nfs_cancel_commit_list(struct list_head *head) |
514 | { | 516 | { |
515 | struct nfs_page *req; | 517 | struct nfs_page *req; |
516 | 518 | ||
517 | while(!list_empty(head)) { | 519 | while(!list_empty(head)) { |
518 | req = nfs_list_entry(head->next); | 520 | req = nfs_list_entry(head->next); |
519 | dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); | 521 | dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); |
520 | nfs_list_remove_request(req); | 522 | nfs_list_remove_request(req); |
521 | nfs_inode_remove_request(req); | 523 | nfs_inode_remove_request(req); |
522 | nfs_unlock_request(req); | 524 | nfs_unlock_request(req); |
523 | } | 525 | } |
524 | } | 526 | } |
525 | 527 | ||
526 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) | 528 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) |
527 | /* | 529 | /* |
528 | * nfs_scan_commit - Scan an inode for commit requests | 530 | * nfs_scan_commit - Scan an inode for commit requests |
529 | * @inode: NFS inode to scan | 531 | * @inode: NFS inode to scan |
530 | * @dst: destination list | 532 | * @dst: destination list |
531 | * @idx_start: lower bound of page->index to scan. | 533 | * @idx_start: lower bound of page->index to scan. |
532 | * @npages: idx_start + npages sets the upper bound to scan. | 534 | * @npages: idx_start + npages sets the upper bound to scan. |
533 | * | 535 | * |
534 | * Moves requests from the inode's 'commit' request list. | 536 | * Moves requests from the inode's 'commit' request list. |
535 | * The requests are *not* checked to ensure that they form a contiguous set. | 537 | * The requests are *not* checked to ensure that they form a contiguous set. |
536 | */ | 538 | */ |
537 | static int | 539 | static int |
538 | nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages) | 540 | nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages) |
539 | { | 541 | { |
540 | struct nfs_inode *nfsi = NFS_I(inode); | 542 | struct nfs_inode *nfsi = NFS_I(inode); |
541 | int res = 0; | 543 | int res = 0; |
542 | 544 | ||
543 | if (nfsi->ncommit != 0) { | 545 | if (nfsi->ncommit != 0) { |
544 | res = nfs_scan_list(nfsi, &nfsi->commit, dst, idx_start, npages); | 546 | res = nfs_scan_list(nfsi, &nfsi->commit, dst, idx_start, npages); |
545 | nfsi->ncommit -= res; | 547 | nfsi->ncommit -= res; |
546 | if ((nfsi->ncommit == 0) != list_empty(&nfsi->commit)) | 548 | if ((nfsi->ncommit == 0) != list_empty(&nfsi->commit)) |
547 | printk(KERN_ERR "NFS: desynchronized value of nfs_i.ncommit.\n"); | 549 | printk(KERN_ERR "NFS: desynchronized value of nfs_i.ncommit.\n"); |
548 | } | 550 | } |
549 | return res; | 551 | return res; |
550 | } | 552 | } |
551 | #else | 553 | #else |
552 | static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages) | 554 | static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages) |
553 | { | 555 | { |
554 | return 0; | 556 | return 0; |
555 | } | 557 | } |
556 | #endif | 558 | #endif |
557 | 559 | ||
558 | static int nfs_wait_on_write_congestion(struct address_space *mapping) | 560 | static int nfs_wait_on_write_congestion(struct address_space *mapping) |
559 | { | 561 | { |
560 | struct inode *inode = mapping->host; | 562 | struct inode *inode = mapping->host; |
561 | struct backing_dev_info *bdi = mapping->backing_dev_info; | 563 | struct backing_dev_info *bdi = mapping->backing_dev_info; |
562 | int ret = 0; | 564 | int ret = 0; |
563 | 565 | ||
564 | might_sleep(); | 566 | might_sleep(); |
565 | 567 | ||
566 | if (!bdi_write_congested(bdi)) | 568 | if (!bdi_write_congested(bdi)) |
567 | return 0; | 569 | return 0; |
568 | 570 | ||
569 | nfs_inc_stats(inode, NFSIOS_CONGESTIONWAIT); | 571 | nfs_inc_stats(inode, NFSIOS_CONGESTIONWAIT); |
570 | 572 | ||
571 | do { | 573 | do { |
572 | struct rpc_clnt *clnt = NFS_CLIENT(inode); | 574 | struct rpc_clnt *clnt = NFS_CLIENT(inode); |
573 | sigset_t oldset; | 575 | sigset_t oldset; |
574 | 576 | ||
575 | rpc_clnt_sigmask(clnt, &oldset); | 577 | rpc_clnt_sigmask(clnt, &oldset); |
576 | ret = congestion_wait_interruptible(WRITE, HZ/10); | 578 | ret = congestion_wait_interruptible(WRITE, HZ/10); |
577 | rpc_clnt_sigunmask(clnt, &oldset); | 579 | rpc_clnt_sigunmask(clnt, &oldset); |
578 | if (ret == -ERESTARTSYS) | 580 | if (ret == -ERESTARTSYS) |
579 | break; | 581 | break; |
580 | ret = 0; | 582 | ret = 0; |
581 | } while (bdi_write_congested(bdi)); | 583 | } while (bdi_write_congested(bdi)); |
582 | 584 | ||
583 | return ret; | 585 | return ret; |
584 | } | 586 | } |
585 | 587 | ||
586 | /* | 588 | /* |
587 | * Try to update any existing write request, or create one if there is none. | 589 | * Try to update any existing write request, or create one if there is none. |
588 | * In order to match, the request's credentials must match those of | 590 | * In order to match, the request's credentials must match those of |
589 | * the calling process. | 591 | * the calling process. |
590 | * | 592 | * |
591 | * Note: Should always be called with the Page Lock held! | 593 | * Note: Should always be called with the Page Lock held! |
592 | */ | 594 | */ |
593 | static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx, | 595 | static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx, |
594 | struct page *page, unsigned int offset, unsigned int bytes) | 596 | struct page *page, unsigned int offset, unsigned int bytes) |
595 | { | 597 | { |
596 | struct address_space *mapping = page->mapping; | 598 | struct address_space *mapping = page->mapping; |
597 | struct inode *inode = mapping->host; | 599 | struct inode *inode = mapping->host; |
598 | struct nfs_inode *nfsi = NFS_I(inode); | 600 | struct nfs_inode *nfsi = NFS_I(inode); |
599 | struct nfs_page *req, *new = NULL; | 601 | struct nfs_page *req, *new = NULL; |
600 | unsigned long rqend, end; | 602 | unsigned long rqend, end; |
601 | 603 | ||
602 | end = offset + bytes; | 604 | end = offset + bytes; |
603 | 605 | ||
604 | if (nfs_wait_on_write_congestion(mapping)) | 606 | if (nfs_wait_on_write_congestion(mapping)) |
605 | return ERR_PTR(-ERESTARTSYS); | 607 | return ERR_PTR(-ERESTARTSYS); |
606 | for (;;) { | 608 | for (;;) { |
607 | /* Loop over all inode entries and see if we find | 609 | /* Loop over all inode entries and see if we find |
608 | * A request for the page we wish to update | 610 | * A request for the page we wish to update |
609 | */ | 611 | */ |
610 | spin_lock(&nfsi->req_lock); | 612 | spin_lock(&nfsi->req_lock); |
611 | req = nfs_page_find_request_locked(page); | 613 | req = nfs_page_find_request_locked(page); |
612 | if (req) { | 614 | if (req) { |
613 | if (!nfs_lock_request_dontget(req)) { | 615 | if (!nfs_lock_request_dontget(req)) { |
614 | int error; | 616 | int error; |
615 | 617 | ||
616 | spin_unlock(&nfsi->req_lock); | 618 | spin_unlock(&nfsi->req_lock); |
617 | error = nfs_wait_on_request(req); | 619 | error = nfs_wait_on_request(req); |
618 | nfs_release_request(req); | 620 | nfs_release_request(req); |
619 | if (error < 0) { | 621 | if (error < 0) { |
620 | if (new) | 622 | if (new) |
621 | nfs_release_request(new); | 623 | nfs_release_request(new); |
622 | return ERR_PTR(error); | 624 | return ERR_PTR(error); |
623 | } | 625 | } |
624 | continue; | 626 | continue; |
625 | } | 627 | } |
626 | spin_unlock(&nfsi->req_lock); | 628 | spin_unlock(&nfsi->req_lock); |
627 | if (new) | 629 | if (new) |
628 | nfs_release_request(new); | 630 | nfs_release_request(new); |
629 | break; | 631 | break; |
630 | } | 632 | } |
631 | 633 | ||
632 | if (new) { | 634 | if (new) { |
633 | int error; | 635 | int error; |
634 | nfs_lock_request_dontget(new); | 636 | nfs_lock_request_dontget(new); |
635 | error = nfs_inode_add_request(inode, new); | 637 | error = nfs_inode_add_request(inode, new); |
636 | if (error) { | 638 | if (error) { |
637 | spin_unlock(&nfsi->req_lock); | 639 | spin_unlock(&nfsi->req_lock); |
638 | nfs_unlock_request(new); | 640 | nfs_unlock_request(new); |
639 | return ERR_PTR(error); | 641 | return ERR_PTR(error); |
640 | } | 642 | } |
641 | spin_unlock(&nfsi->req_lock); | 643 | spin_unlock(&nfsi->req_lock); |
642 | return new; | 644 | return new; |
643 | } | 645 | } |
644 | spin_unlock(&nfsi->req_lock); | 646 | spin_unlock(&nfsi->req_lock); |
645 | 647 | ||
646 | new = nfs_create_request(ctx, inode, page, offset, bytes); | 648 | new = nfs_create_request(ctx, inode, page, offset, bytes); |
647 | if (IS_ERR(new)) | 649 | if (IS_ERR(new)) |
648 | return new; | 650 | return new; |
649 | } | 651 | } |
650 | 652 | ||
651 | /* We have a request for our page. | 653 | /* We have a request for our page. |
652 | * If the creds don't match, or the | 654 | * If the creds don't match, or the |
653 | * page addresses don't match, | 655 | * page addresses don't match, |
654 | * tell the caller to wait on the conflicting | 656 | * tell the caller to wait on the conflicting |
655 | * request. | 657 | * request. |
656 | */ | 658 | */ |
657 | rqend = req->wb_offset + req->wb_bytes; | 659 | rqend = req->wb_offset + req->wb_bytes; |
658 | if (req->wb_context != ctx | 660 | if (req->wb_context != ctx |
659 | || req->wb_page != page | 661 | || req->wb_page != page |
660 | || !nfs_dirty_request(req) | 662 | || !nfs_dirty_request(req) |
661 | || offset > rqend || end < req->wb_offset) { | 663 | || offset > rqend || end < req->wb_offset) { |
662 | nfs_unlock_request(req); | 664 | nfs_unlock_request(req); |
663 | return ERR_PTR(-EBUSY); | 665 | return ERR_PTR(-EBUSY); |
664 | } | 666 | } |
665 | 667 | ||
666 | /* Okay, the request matches. Update the region */ | 668 | /* Okay, the request matches. Update the region */ |
667 | if (offset < req->wb_offset) { | 669 | if (offset < req->wb_offset) { |
668 | req->wb_offset = offset; | 670 | req->wb_offset = offset; |
669 | req->wb_pgbase = offset; | 671 | req->wb_pgbase = offset; |
670 | req->wb_bytes = rqend - req->wb_offset; | 672 | req->wb_bytes = rqend - req->wb_offset; |
671 | } | 673 | } |
672 | 674 | ||
673 | if (end > rqend) | 675 | if (end > rqend) |
674 | req->wb_bytes = end - req->wb_offset; | 676 | req->wb_bytes = end - req->wb_offset; |
675 | 677 | ||
676 | return req; | 678 | return req; |
677 | } | 679 | } |
678 | 680 | ||
679 | int nfs_flush_incompatible(struct file *file, struct page *page) | 681 | int nfs_flush_incompatible(struct file *file, struct page *page) |
680 | { | 682 | { |
681 | struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data; | 683 | struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data; |
682 | struct nfs_page *req; | 684 | struct nfs_page *req; |
683 | int do_flush, status; | 685 | int do_flush, status; |
684 | /* | 686 | /* |
685 | * Look for a request corresponding to this page. If there | 687 | * Look for a request corresponding to this page. If there |
686 | * is one, and it belongs to another file, we flush it out | 688 | * is one, and it belongs to another file, we flush it out |
687 | * before we try to copy anything into the page. Do this | 689 | * before we try to copy anything into the page. Do this |
688 | * due to the lack of an ACCESS-type call in NFSv2. | 690 | * due to the lack of an ACCESS-type call in NFSv2. |
689 | * Also do the same if we find a request from an existing | 691 | * Also do the same if we find a request from an existing |
690 | * dropped page. | 692 | * dropped page. |
691 | */ | 693 | */ |
692 | do { | 694 | do { |
693 | req = nfs_page_find_request(page); | 695 | req = nfs_page_find_request(page); |
694 | if (req == NULL) | 696 | if (req == NULL) |
695 | return 0; | 697 | return 0; |
696 | do_flush = req->wb_page != page || req->wb_context != ctx | 698 | do_flush = req->wb_page != page || req->wb_context != ctx |
697 | || !nfs_dirty_request(req); | 699 | || !nfs_dirty_request(req); |
698 | nfs_release_request(req); | 700 | nfs_release_request(req); |
699 | if (!do_flush) | 701 | if (!do_flush) |
700 | return 0; | 702 | return 0; |
701 | status = nfs_wb_page(page->mapping->host, page); | 703 | status = nfs_wb_page(page->mapping->host, page); |
702 | } while (status == 0); | 704 | } while (status == 0); |
703 | return status; | 705 | return status; |
704 | } | 706 | } |
705 | 707 | ||
706 | /* | 708 | /* |
707 | * Update and possibly write a cached page of an NFS file. | 709 | * Update and possibly write a cached page of an NFS file. |
708 | * | 710 | * |
709 | * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad | 711 | * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad |
710 | * things with a page scheduled for an RPC call (e.g. invalidate it). | 712 | * things with a page scheduled for an RPC call (e.g. invalidate it). |
711 | */ | 713 | */ |
712 | int nfs_updatepage(struct file *file, struct page *page, | 714 | int nfs_updatepage(struct file *file, struct page *page, |
713 | unsigned int offset, unsigned int count) | 715 | unsigned int offset, unsigned int count) |
714 | { | 716 | { |
715 | struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data; | 717 | struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data; |
716 | struct inode *inode = page->mapping->host; | 718 | struct inode *inode = page->mapping->host; |
717 | int status = 0; | 719 | int status = 0; |
718 | 720 | ||
719 | nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE); | 721 | nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE); |
720 | 722 | ||
721 | dprintk("NFS: nfs_updatepage(%s/%s %d@%Ld)\n", | 723 | dprintk("NFS: nfs_updatepage(%s/%s %d@%Ld)\n", |
722 | file->f_path.dentry->d_parent->d_name.name, | 724 | file->f_path.dentry->d_parent->d_name.name, |
723 | file->f_path.dentry->d_name.name, count, | 725 | file->f_path.dentry->d_name.name, count, |
724 | (long long)(page_offset(page) +offset)); | 726 | (long long)(page_offset(page) +offset)); |
725 | 727 | ||
726 | /* If we're not using byte range locks, and we know the page | 728 | /* If we're not using byte range locks, and we know the page |
727 | * is entirely in cache, it may be more efficient to avoid | 729 | * is entirely in cache, it may be more efficient to avoid |
728 | * fragmenting write requests. | 730 | * fragmenting write requests. |
729 | */ | 731 | */ |
730 | if (PageUptodate(page) && inode->i_flock == NULL && !(file->f_mode & O_SYNC)) { | 732 | if (PageUptodate(page) && inode->i_flock == NULL && !(file->f_mode & O_SYNC)) { |
731 | count = max(count + offset, nfs_page_length(page)); | 733 | count = max(count + offset, nfs_page_length(page)); |
732 | offset = 0; | 734 | offset = 0; |
733 | } | 735 | } |
734 | 736 | ||
735 | status = nfs_writepage_setup(ctx, page, offset, count); | 737 | status = nfs_writepage_setup(ctx, page, offset, count); |
736 | __set_page_dirty_nobuffers(page); | 738 | __set_page_dirty_nobuffers(page); |
737 | 739 | ||
738 | dprintk("NFS: nfs_updatepage returns %d (isize %Ld)\n", | 740 | dprintk("NFS: nfs_updatepage returns %d (isize %Ld)\n", |
739 | status, (long long)i_size_read(inode)); | 741 | status, (long long)i_size_read(inode)); |
740 | if (status < 0) | 742 | if (status < 0) |
741 | nfs_set_pageerror(page); | 743 | nfs_set_pageerror(page); |
742 | return status; | 744 | return status; |
743 | } | 745 | } |
744 | 746 | ||
745 | static void nfs_writepage_release(struct nfs_page *req) | 747 | static void nfs_writepage_release(struct nfs_page *req) |
746 | { | 748 | { |
747 | nfs_end_page_writeback(req->wb_page); | 749 | nfs_end_page_writeback(req->wb_page); |
748 | 750 | ||
749 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) | 751 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) |
750 | if (!PageError(req->wb_page)) { | 752 | if (!PageError(req->wb_page)) { |
751 | if (NFS_NEED_RESCHED(req)) { | 753 | if (NFS_NEED_RESCHED(req)) { |
752 | nfs_redirty_request(req); | 754 | nfs_redirty_request(req); |
753 | goto out; | 755 | goto out; |
754 | } else if (NFS_NEED_COMMIT(req)) { | 756 | } else if (NFS_NEED_COMMIT(req)) { |
755 | nfs_mark_request_commit(req); | 757 | nfs_mark_request_commit(req); |
756 | goto out; | 758 | goto out; |
757 | } | 759 | } |
758 | } | 760 | } |
759 | nfs_inode_remove_request(req); | 761 | nfs_inode_remove_request(req); |
760 | 762 | ||
761 | out: | 763 | out: |
762 | nfs_clear_commit(req); | 764 | nfs_clear_commit(req); |
763 | nfs_clear_reschedule(req); | 765 | nfs_clear_reschedule(req); |
764 | #else | 766 | #else |
765 | nfs_inode_remove_request(req); | 767 | nfs_inode_remove_request(req); |
766 | #endif | 768 | #endif |
767 | nfs_clear_page_writeback(req); | 769 | nfs_clear_page_writeback(req); |
768 | } | 770 | } |
769 | 771 | ||
770 | static inline int flush_task_priority(int how) | 772 | static inline int flush_task_priority(int how) |
771 | { | 773 | { |
772 | switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) { | 774 | switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) { |
773 | case FLUSH_HIGHPRI: | 775 | case FLUSH_HIGHPRI: |
774 | return RPC_PRIORITY_HIGH; | 776 | return RPC_PRIORITY_HIGH; |
775 | case FLUSH_LOWPRI: | 777 | case FLUSH_LOWPRI: |
776 | return RPC_PRIORITY_LOW; | 778 | return RPC_PRIORITY_LOW; |
777 | } | 779 | } |
778 | return RPC_PRIORITY_NORMAL; | 780 | return RPC_PRIORITY_NORMAL; |
779 | } | 781 | } |
780 | 782 | ||
781 | /* | 783 | /* |
782 | * Set up the argument/result storage required for the RPC call. | 784 | * Set up the argument/result storage required for the RPC call. |
783 | */ | 785 | */ |
784 | static void nfs_write_rpcsetup(struct nfs_page *req, | 786 | static void nfs_write_rpcsetup(struct nfs_page *req, |
785 | struct nfs_write_data *data, | 787 | struct nfs_write_data *data, |
786 | const struct rpc_call_ops *call_ops, | 788 | const struct rpc_call_ops *call_ops, |
787 | unsigned int count, unsigned int offset, | 789 | unsigned int count, unsigned int offset, |
788 | int how) | 790 | int how) |
789 | { | 791 | { |
790 | struct inode *inode; | 792 | struct inode *inode; |
791 | int flags; | 793 | int flags; |
792 | 794 | ||
793 | /* Set up the RPC argument and reply structs | 795 | /* Set up the RPC argument and reply structs |
794 | * NB: take care not to mess about with data->commit et al. */ | 796 | * NB: take care not to mess about with data->commit et al. */ |
795 | 797 | ||
796 | data->req = req; | 798 | data->req = req; |
797 | data->inode = inode = req->wb_context->dentry->d_inode; | 799 | data->inode = inode = req->wb_context->dentry->d_inode; |
798 | data->cred = req->wb_context->cred; | 800 | data->cred = req->wb_context->cred; |
799 | 801 | ||
800 | data->args.fh = NFS_FH(inode); | 802 | data->args.fh = NFS_FH(inode); |
801 | data->args.offset = req_offset(req) + offset; | 803 | data->args.offset = req_offset(req) + offset; |
802 | data->args.pgbase = req->wb_pgbase + offset; | 804 | data->args.pgbase = req->wb_pgbase + offset; |
803 | data->args.pages = data->pagevec; | 805 | data->args.pages = data->pagevec; |
804 | data->args.count = count; | 806 | data->args.count = count; |
805 | data->args.context = req->wb_context; | 807 | data->args.context = req->wb_context; |
806 | 808 | ||
807 | data->res.fattr = &data->fattr; | 809 | data->res.fattr = &data->fattr; |
808 | data->res.count = count; | 810 | data->res.count = count; |
809 | data->res.verf = &data->verf; | 811 | data->res.verf = &data->verf; |
810 | nfs_fattr_init(&data->fattr); | 812 | nfs_fattr_init(&data->fattr); |
811 | 813 | ||
812 | /* Set up the initial task struct. */ | 814 | /* Set up the initial task struct. */ |
813 | flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC; | 815 | flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC; |
814 | rpc_init_task(&data->task, NFS_CLIENT(inode), flags, call_ops, data); | 816 | rpc_init_task(&data->task, NFS_CLIENT(inode), flags, call_ops, data); |
815 | NFS_PROTO(inode)->write_setup(data, how); | 817 | NFS_PROTO(inode)->write_setup(data, how); |
816 | 818 | ||
817 | data->task.tk_priority = flush_task_priority(how); | 819 | data->task.tk_priority = flush_task_priority(how); |
818 | data->task.tk_cookie = (unsigned long)inode; | 820 | data->task.tk_cookie = (unsigned long)inode; |
819 | 821 | ||
820 | dprintk("NFS: %5u initiated write call " | 822 | dprintk("NFS: %5u initiated write call " |
821 | "(req %s/%Ld, %u bytes @ offset %Lu)\n", | 823 | "(req %s/%Ld, %u bytes @ offset %Lu)\n", |
822 | data->task.tk_pid, | 824 | data->task.tk_pid, |
823 | inode->i_sb->s_id, | 825 | inode->i_sb->s_id, |
824 | (long long)NFS_FILEID(inode), | 826 | (long long)NFS_FILEID(inode), |
825 | count, | 827 | count, |
826 | (unsigned long long)data->args.offset); | 828 | (unsigned long long)data->args.offset); |
827 | } | 829 | } |
828 | 830 | ||
829 | static void nfs_execute_write(struct nfs_write_data *data) | 831 | static void nfs_execute_write(struct nfs_write_data *data) |
830 | { | 832 | { |
831 | struct rpc_clnt *clnt = NFS_CLIENT(data->inode); | 833 | struct rpc_clnt *clnt = NFS_CLIENT(data->inode); |
832 | sigset_t oldset; | 834 | sigset_t oldset; |
833 | 835 | ||
834 | rpc_clnt_sigmask(clnt, &oldset); | 836 | rpc_clnt_sigmask(clnt, &oldset); |
835 | rpc_execute(&data->task); | 837 | rpc_execute(&data->task); |
836 | rpc_clnt_sigunmask(clnt, &oldset); | 838 | rpc_clnt_sigunmask(clnt, &oldset); |
837 | } | 839 | } |
838 | 840 | ||
839 | /* | 841 | /* |
840 | * Generate multiple small requests to write out a single | 842 | * Generate multiple small requests to write out a single |
841 | * contiguous dirty area on one page. | 843 | * contiguous dirty area on one page. |
842 | */ | 844 | */ |
843 | static int nfs_flush_multi(struct inode *inode, struct list_head *head, int how) | 845 | static int nfs_flush_multi(struct inode *inode, struct list_head *head, int how) |
844 | { | 846 | { |
845 | struct nfs_page *req = nfs_list_entry(head->next); | 847 | struct nfs_page *req = nfs_list_entry(head->next); |
846 | struct page *page = req->wb_page; | 848 | struct page *page = req->wb_page; |
847 | struct nfs_write_data *data; | 849 | struct nfs_write_data *data; |
848 | size_t wsize = NFS_SERVER(inode)->wsize, nbytes; | 850 | size_t wsize = NFS_SERVER(inode)->wsize, nbytes; |
849 | unsigned int offset; | 851 | unsigned int offset; |
850 | int requests = 0; | 852 | int requests = 0; |
851 | LIST_HEAD(list); | 853 | LIST_HEAD(list); |
852 | 854 | ||
853 | nfs_list_remove_request(req); | 855 | nfs_list_remove_request(req); |
854 | 856 | ||
855 | nbytes = req->wb_bytes; | 857 | nbytes = req->wb_bytes; |
856 | do { | 858 | do { |
857 | size_t len = min(nbytes, wsize); | 859 | size_t len = min(nbytes, wsize); |
858 | 860 | ||
859 | data = nfs_writedata_alloc(len); | 861 | data = nfs_writedata_alloc(len); |
860 | if (!data) | 862 | if (!data) |
861 | goto out_bad; | 863 | goto out_bad; |
862 | list_add(&data->pages, &list); | 864 | list_add(&data->pages, &list); |
863 | requests++; | 865 | requests++; |
864 | nbytes -= len; | 866 | nbytes -= len; |
865 | } while (nbytes != 0); | 867 | } while (nbytes != 0); |
866 | atomic_set(&req->wb_complete, requests); | 868 | atomic_set(&req->wb_complete, requests); |
867 | 869 | ||
868 | ClearPageError(page); | 870 | ClearPageError(page); |
869 | offset = 0; | 871 | offset = 0; |
870 | nbytes = req->wb_bytes; | 872 | nbytes = req->wb_bytes; |
871 | do { | 873 | do { |
872 | data = list_entry(list.next, struct nfs_write_data, pages); | 874 | data = list_entry(list.next, struct nfs_write_data, pages); |
873 | list_del_init(&data->pages); | 875 | list_del_init(&data->pages); |
874 | 876 | ||
875 | data->pagevec[0] = page; | 877 | data->pagevec[0] = page; |
876 | 878 | ||
877 | if (nbytes > wsize) { | 879 | if (nbytes > wsize) { |
878 | nfs_write_rpcsetup(req, data, &nfs_write_partial_ops, | 880 | nfs_write_rpcsetup(req, data, &nfs_write_partial_ops, |
879 | wsize, offset, how); | 881 | wsize, offset, how); |
880 | offset += wsize; | 882 | offset += wsize; |
881 | nbytes -= wsize; | 883 | nbytes -= wsize; |
882 | } else { | 884 | } else { |
883 | nfs_write_rpcsetup(req, data, &nfs_write_partial_ops, | 885 | nfs_write_rpcsetup(req, data, &nfs_write_partial_ops, |
884 | nbytes, offset, how); | 886 | nbytes, offset, how); |
885 | nbytes = 0; | 887 | nbytes = 0; |
886 | } | 888 | } |
887 | nfs_execute_write(data); | 889 | nfs_execute_write(data); |
888 | } while (nbytes != 0); | 890 | } while (nbytes != 0); |
889 | 891 | ||
890 | return 0; | 892 | return 0; |
891 | 893 | ||
892 | out_bad: | 894 | out_bad: |
893 | while (!list_empty(&list)) { | 895 | while (!list_empty(&list)) { |
894 | data = list_entry(list.next, struct nfs_write_data, pages); | 896 | data = list_entry(list.next, struct nfs_write_data, pages); |
895 | list_del(&data->pages); | 897 | list_del(&data->pages); |
896 | nfs_writedata_release(data); | 898 | nfs_writedata_release(data); |
897 | } | 899 | } |
898 | nfs_end_page_writeback(req->wb_page); | 900 | nfs_end_page_writeback(req->wb_page); |
899 | nfs_redirty_request(req); | 901 | nfs_redirty_request(req); |
900 | nfs_clear_page_writeback(req); | 902 | nfs_clear_page_writeback(req); |
901 | return -ENOMEM; | 903 | return -ENOMEM; |
902 | } | 904 | } |
903 | 905 | ||
904 | /* | 906 | /* |
905 | * Create an RPC task for the given write request and kick it. | 907 | * Create an RPC task for the given write request and kick it. |
906 | * The page must have been locked by the caller. | 908 | * The page must have been locked by the caller. |
907 | * | 909 | * |
908 | * It may happen that the page we're passed is not marked dirty. | 910 | * It may happen that the page we're passed is not marked dirty. |
909 | * This is the case if nfs_updatepage detects a conflicting request | 911 | * This is the case if nfs_updatepage detects a conflicting request |
910 | * that has been written but not committed. | 912 | * that has been written but not committed. |
911 | */ | 913 | */ |
912 | static int nfs_flush_one(struct inode *inode, struct list_head *head, int how) | 914 | static int nfs_flush_one(struct inode *inode, struct list_head *head, int how) |
913 | { | 915 | { |
914 | struct nfs_page *req; | 916 | struct nfs_page *req; |
915 | struct page **pages; | 917 | struct page **pages; |
916 | struct nfs_write_data *data; | 918 | struct nfs_write_data *data; |
917 | unsigned int count; | 919 | unsigned int count; |
918 | 920 | ||
919 | data = nfs_writedata_alloc(NFS_SERVER(inode)->wsize); | 921 | data = nfs_writedata_alloc(NFS_SERVER(inode)->wsize); |
920 | if (!data) | 922 | if (!data) |
921 | goto out_bad; | 923 | goto out_bad; |
922 | 924 | ||
923 | pages = data->pagevec; | 925 | pages = data->pagevec; |
924 | count = 0; | 926 | count = 0; |
925 | while (!list_empty(head)) { | 927 | while (!list_empty(head)) { |
926 | req = nfs_list_entry(head->next); | 928 | req = nfs_list_entry(head->next); |
927 | nfs_list_remove_request(req); | 929 | nfs_list_remove_request(req); |
928 | nfs_list_add_request(req, &data->pages); | 930 | nfs_list_add_request(req, &data->pages); |
929 | ClearPageError(req->wb_page); | 931 | ClearPageError(req->wb_page); |
930 | *pages++ = req->wb_page; | 932 | *pages++ = req->wb_page; |
931 | count += req->wb_bytes; | 933 | count += req->wb_bytes; |
932 | } | 934 | } |
933 | req = nfs_list_entry(data->pages.next); | 935 | req = nfs_list_entry(data->pages.next); |
934 | 936 | ||
935 | /* Set up the argument struct */ | 937 | /* Set up the argument struct */ |
936 | nfs_write_rpcsetup(req, data, &nfs_write_full_ops, count, 0, how); | 938 | nfs_write_rpcsetup(req, data, &nfs_write_full_ops, count, 0, how); |
937 | 939 | ||
938 | nfs_execute_write(data); | 940 | nfs_execute_write(data); |
939 | return 0; | 941 | return 0; |
940 | out_bad: | 942 | out_bad: |
941 | while (!list_empty(head)) { | 943 | while (!list_empty(head)) { |
942 | struct nfs_page *req = nfs_list_entry(head->next); | 944 | struct nfs_page *req = nfs_list_entry(head->next); |
943 | nfs_list_remove_request(req); | 945 | nfs_list_remove_request(req); |
944 | nfs_end_page_writeback(req->wb_page); | 946 | nfs_end_page_writeback(req->wb_page); |
945 | nfs_redirty_request(req); | 947 | nfs_redirty_request(req); |
946 | nfs_clear_page_writeback(req); | 948 | nfs_clear_page_writeback(req); |
947 | } | 949 | } |
948 | return -ENOMEM; | 950 | return -ENOMEM; |
949 | } | 951 | } |
950 | 952 | ||
951 | static int nfs_flush_list(struct inode *inode, struct list_head *head, int npages, int how) | 953 | static int nfs_flush_list(struct inode *inode, struct list_head *head, int npages, int how) |
952 | { | 954 | { |
953 | LIST_HEAD(one_request); | 955 | LIST_HEAD(one_request); |
954 | int (*flush_one)(struct inode *, struct list_head *, int); | 956 | int (*flush_one)(struct inode *, struct list_head *, int); |
955 | struct nfs_page *req; | 957 | struct nfs_page *req; |
956 | int wpages = NFS_SERVER(inode)->wpages; | 958 | int wpages = NFS_SERVER(inode)->wpages; |
957 | int wsize = NFS_SERVER(inode)->wsize; | 959 | int wsize = NFS_SERVER(inode)->wsize; |
958 | int error; | 960 | int error; |
959 | 961 | ||
960 | flush_one = nfs_flush_one; | 962 | flush_one = nfs_flush_one; |
961 | if (wsize < PAGE_CACHE_SIZE) | 963 | if (wsize < PAGE_CACHE_SIZE) |
962 | flush_one = nfs_flush_multi; | 964 | flush_one = nfs_flush_multi; |
963 | /* For single writes, FLUSH_STABLE is more efficient */ | 965 | /* For single writes, FLUSH_STABLE is more efficient */ |
964 | if (npages <= wpages && npages == NFS_I(inode)->npages | 966 | if (npages <= wpages && npages == NFS_I(inode)->npages |
965 | && nfs_list_entry(head->next)->wb_bytes <= wsize) | 967 | && nfs_list_entry(head->next)->wb_bytes <= wsize) |
966 | how |= FLUSH_STABLE; | 968 | how |= FLUSH_STABLE; |
967 | 969 | ||
968 | do { | 970 | do { |
969 | nfs_coalesce_requests(head, &one_request, wpages); | 971 | nfs_coalesce_requests(head, &one_request, wpages); |
970 | req = nfs_list_entry(one_request.next); | 972 | req = nfs_list_entry(one_request.next); |
971 | error = flush_one(inode, &one_request, how); | 973 | error = flush_one(inode, &one_request, how); |
972 | if (error < 0) | 974 | if (error < 0) |
973 | goto out_err; | 975 | goto out_err; |
974 | } while (!list_empty(head)); | 976 | } while (!list_empty(head)); |
975 | return 0; | 977 | return 0; |
976 | out_err: | 978 | out_err: |
977 | while (!list_empty(head)) { | 979 | while (!list_empty(head)) { |
978 | req = nfs_list_entry(head->next); | 980 | req = nfs_list_entry(head->next); |
979 | nfs_list_remove_request(req); | 981 | nfs_list_remove_request(req); |
980 | nfs_end_page_writeback(req->wb_page); | 982 | nfs_end_page_writeback(req->wb_page); |
981 | nfs_redirty_request(req); | 983 | nfs_redirty_request(req); |
982 | nfs_clear_page_writeback(req); | 984 | nfs_clear_page_writeback(req); |
983 | } | 985 | } |
984 | return error; | 986 | return error; |
985 | } | 987 | } |
986 | 988 | ||
987 | /* | 989 | /* |
988 | * Handle a write reply that flushed part of a page. | 990 | * Handle a write reply that flushed part of a page. |
989 | */ | 991 | */ |
990 | static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata) | 992 | static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata) |
991 | { | 993 | { |
992 | struct nfs_write_data *data = calldata; | 994 | struct nfs_write_data *data = calldata; |
993 | struct nfs_page *req = data->req; | 995 | struct nfs_page *req = data->req; |
994 | struct page *page = req->wb_page; | 996 | struct page *page = req->wb_page; |
995 | 997 | ||
996 | dprintk("NFS: write (%s/%Ld %d@%Ld)", | 998 | dprintk("NFS: write (%s/%Ld %d@%Ld)", |
997 | req->wb_context->dentry->d_inode->i_sb->s_id, | 999 | req->wb_context->dentry->d_inode->i_sb->s_id, |
998 | (long long)NFS_FILEID(req->wb_context->dentry->d_inode), | 1000 | (long long)NFS_FILEID(req->wb_context->dentry->d_inode), |
999 | req->wb_bytes, | 1001 | req->wb_bytes, |
1000 | (long long)req_offset(req)); | 1002 | (long long)req_offset(req)); |
1001 | 1003 | ||
1002 | if (nfs_writeback_done(task, data) != 0) | 1004 | if (nfs_writeback_done(task, data) != 0) |
1003 | return; | 1005 | return; |
1004 | 1006 | ||
1005 | if (task->tk_status < 0) { | 1007 | if (task->tk_status < 0) { |
1006 | nfs_set_pageerror(page); | 1008 | nfs_set_pageerror(page); |
1007 | req->wb_context->error = task->tk_status; | 1009 | req->wb_context->error = task->tk_status; |
1008 | dprintk(", error = %d\n", task->tk_status); | 1010 | dprintk(", error = %d\n", task->tk_status); |
1009 | } else { | 1011 | } else { |
1010 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) | 1012 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) |
1011 | if (data->verf.committed < NFS_FILE_SYNC) { | 1013 | if (data->verf.committed < NFS_FILE_SYNC) { |
1012 | if (!NFS_NEED_COMMIT(req)) { | 1014 | if (!NFS_NEED_COMMIT(req)) { |
1013 | nfs_defer_commit(req); | 1015 | nfs_defer_commit(req); |
1014 | memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf)); | 1016 | memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf)); |
1015 | dprintk(" defer commit\n"); | 1017 | dprintk(" defer commit\n"); |
1016 | } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) { | 1018 | } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) { |
1017 | nfs_defer_reschedule(req); | 1019 | nfs_defer_reschedule(req); |
1018 | dprintk(" server reboot detected\n"); | 1020 | dprintk(" server reboot detected\n"); |
1019 | } | 1021 | } |
1020 | } else | 1022 | } else |
1021 | #endif | 1023 | #endif |
1022 | dprintk(" OK\n"); | 1024 | dprintk(" OK\n"); |
1023 | } | 1025 | } |
1024 | 1026 | ||
1025 | if (atomic_dec_and_test(&req->wb_complete)) | 1027 | if (atomic_dec_and_test(&req->wb_complete)) |
1026 | nfs_writepage_release(req); | 1028 | nfs_writepage_release(req); |
1027 | } | 1029 | } |
1028 | 1030 | ||
1029 | static const struct rpc_call_ops nfs_write_partial_ops = { | 1031 | static const struct rpc_call_ops nfs_write_partial_ops = { |
1030 | .rpc_call_done = nfs_writeback_done_partial, | 1032 | .rpc_call_done = nfs_writeback_done_partial, |
1031 | .rpc_release = nfs_writedata_release, | 1033 | .rpc_release = nfs_writedata_release, |
1032 | }; | 1034 | }; |
1033 | 1035 | ||
1034 | /* | 1036 | /* |
1035 | * Handle a write reply that flushes a whole page. | 1037 | * Handle a write reply that flushes a whole page. |
1036 | * | 1038 | * |
1037 | * FIXME: There is an inherent race with invalidate_inode_pages and | 1039 | * FIXME: There is an inherent race with invalidate_inode_pages and |
1038 | * writebacks since the page->count is kept > 1 for as long | 1040 | * writebacks since the page->count is kept > 1 for as long |
1039 | * as the page has a write request pending. | 1041 | * as the page has a write request pending. |
1040 | */ | 1042 | */ |
1041 | static void nfs_writeback_done_full(struct rpc_task *task, void *calldata) | 1043 | static void nfs_writeback_done_full(struct rpc_task *task, void *calldata) |
1042 | { | 1044 | { |
1043 | struct nfs_write_data *data = calldata; | 1045 | struct nfs_write_data *data = calldata; |
1044 | struct nfs_page *req; | 1046 | struct nfs_page *req; |
1045 | struct page *page; | 1047 | struct page *page; |
1046 | 1048 | ||
1047 | if (nfs_writeback_done(task, data) != 0) | 1049 | if (nfs_writeback_done(task, data) != 0) |
1048 | return; | 1050 | return; |
1049 | 1051 | ||
1050 | /* Update attributes as result of writeback. */ | 1052 | /* Update attributes as result of writeback. */ |
1051 | while (!list_empty(&data->pages)) { | 1053 | while (!list_empty(&data->pages)) { |
1052 | req = nfs_list_entry(data->pages.next); | 1054 | req = nfs_list_entry(data->pages.next); |
1053 | nfs_list_remove_request(req); | 1055 | nfs_list_remove_request(req); |
1054 | page = req->wb_page; | 1056 | page = req->wb_page; |
1055 | 1057 | ||
1056 | dprintk("NFS: write (%s/%Ld %d@%Ld)", | 1058 | dprintk("NFS: write (%s/%Ld %d@%Ld)", |
1057 | req->wb_context->dentry->d_inode->i_sb->s_id, | 1059 | req->wb_context->dentry->d_inode->i_sb->s_id, |
1058 | (long long)NFS_FILEID(req->wb_context->dentry->d_inode), | 1060 | (long long)NFS_FILEID(req->wb_context->dentry->d_inode), |
1059 | req->wb_bytes, | 1061 | req->wb_bytes, |
1060 | (long long)req_offset(req)); | 1062 | (long long)req_offset(req)); |
1061 | 1063 | ||
1062 | if (task->tk_status < 0) { | 1064 | if (task->tk_status < 0) { |
1063 | nfs_set_pageerror(page); | 1065 | nfs_set_pageerror(page); |
1064 | req->wb_context->error = task->tk_status; | 1066 | req->wb_context->error = task->tk_status; |
1065 | nfs_end_page_writeback(page); | 1067 | nfs_end_page_writeback(page); |
1066 | nfs_inode_remove_request(req); | 1068 | nfs_inode_remove_request(req); |
1067 | dprintk(", error = %d\n", task->tk_status); | 1069 | dprintk(", error = %d\n", task->tk_status); |
1068 | goto next; | 1070 | goto next; |
1069 | } | 1071 | } |
1070 | nfs_end_page_writeback(page); | 1072 | nfs_end_page_writeback(page); |
1071 | 1073 | ||
1072 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) | 1074 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) |
1073 | if (data->args.stable != NFS_UNSTABLE || data->verf.committed == NFS_FILE_SYNC) { | 1075 | if (data->args.stable != NFS_UNSTABLE || data->verf.committed == NFS_FILE_SYNC) { |
1074 | nfs_inode_remove_request(req); | 1076 | nfs_inode_remove_request(req); |
1075 | dprintk(" OK\n"); | 1077 | dprintk(" OK\n"); |
1076 | goto next; | 1078 | goto next; |
1077 | } | 1079 | } |
1078 | memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf)); | 1080 | memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf)); |
1079 | nfs_mark_request_commit(req); | 1081 | nfs_mark_request_commit(req); |
1080 | dprintk(" marked for commit\n"); | 1082 | dprintk(" marked for commit\n"); |
1081 | #else | 1083 | #else |
1082 | nfs_inode_remove_request(req); | 1084 | nfs_inode_remove_request(req); |
1083 | #endif | 1085 | #endif |
1084 | next: | 1086 | next: |
1085 | nfs_clear_page_writeback(req); | 1087 | nfs_clear_page_writeback(req); |
1086 | } | 1088 | } |
1087 | } | 1089 | } |
1088 | 1090 | ||
1089 | static const struct rpc_call_ops nfs_write_full_ops = { | 1091 | static const struct rpc_call_ops nfs_write_full_ops = { |
1090 | .rpc_call_done = nfs_writeback_done_full, | 1092 | .rpc_call_done = nfs_writeback_done_full, |
1091 | .rpc_release = nfs_writedata_release, | 1093 | .rpc_release = nfs_writedata_release, |
1092 | }; | 1094 | }; |
1093 | 1095 | ||
1094 | 1096 | ||
1095 | /* | 1097 | /* |
1096 | * This function is called when the WRITE call is complete. | 1098 | * This function is called when the WRITE call is complete. |
1097 | */ | 1099 | */ |
1098 | int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data) | 1100 | int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data) |
1099 | { | 1101 | { |
1100 | struct nfs_writeargs *argp = &data->args; | 1102 | struct nfs_writeargs *argp = &data->args; |
1101 | struct nfs_writeres *resp = &data->res; | 1103 | struct nfs_writeres *resp = &data->res; |
1102 | int status; | 1104 | int status; |
1103 | 1105 | ||
1104 | dprintk("NFS: %5u nfs_writeback_done (status %d)\n", | 1106 | dprintk("NFS: %5u nfs_writeback_done (status %d)\n", |
1105 | task->tk_pid, task->tk_status); | 1107 | task->tk_pid, task->tk_status); |
1106 | 1108 | ||
1107 | /* | 1109 | /* |
1108 | * ->write_done will attempt to use post-op attributes to detect | 1110 | * ->write_done will attempt to use post-op attributes to detect |
1109 | * conflicting writes by other clients. A strict interpretation | 1111 | * conflicting writes by other clients. A strict interpretation |
1110 | * of close-to-open would allow us to continue caching even if | 1112 | * of close-to-open would allow us to continue caching even if |
1111 | * another writer had changed the file, but some applications | 1113 | * another writer had changed the file, but some applications |
1112 | * depend on tighter cache coherency when writing. | 1114 | * depend on tighter cache coherency when writing. |
1113 | */ | 1115 | */ |
1114 | status = NFS_PROTO(data->inode)->write_done(task, data); | 1116 | status = NFS_PROTO(data->inode)->write_done(task, data); |
1115 | if (status != 0) | 1117 | if (status != 0) |
1116 | return status; | 1118 | return status; |
1117 | nfs_add_stats(data->inode, NFSIOS_SERVERWRITTENBYTES, resp->count); | 1119 | nfs_add_stats(data->inode, NFSIOS_SERVERWRITTENBYTES, resp->count); |
1118 | 1120 | ||
1119 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) | 1121 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) |
1120 | if (resp->verf->committed < argp->stable && task->tk_status >= 0) { | 1122 | if (resp->verf->committed < argp->stable && task->tk_status >= 0) { |
1121 | /* We tried a write call, but the server did not | 1123 | /* We tried a write call, but the server did not |
1122 | * commit data to stable storage even though we | 1124 | * commit data to stable storage even though we |
1123 | * requested it. | 1125 | * requested it. |
1124 | * Note: There is a known bug in Tru64 < 5.0 in which | 1126 | * Note: There is a known bug in Tru64 < 5.0 in which |
1125 | * the server reports NFS_DATA_SYNC, but performs | 1127 | * the server reports NFS_DATA_SYNC, but performs |
1126 | * NFS_FILE_SYNC. We therefore implement this checking | 1128 | * NFS_FILE_SYNC. We therefore implement this checking |
1127 | * as a dprintk() in order to avoid filling syslog. | 1129 | * as a dprintk() in order to avoid filling syslog. |
1128 | */ | 1130 | */ |
1129 | static unsigned long complain; | 1131 | static unsigned long complain; |
1130 | 1132 | ||
1131 | if (time_before(complain, jiffies)) { | 1133 | if (time_before(complain, jiffies)) { |
1132 | dprintk("NFS: faulty NFS server %s:" | 1134 | dprintk("NFS: faulty NFS server %s:" |
1133 | " (committed = %d) != (stable = %d)\n", | 1135 | " (committed = %d) != (stable = %d)\n", |
1134 | NFS_SERVER(data->inode)->nfs_client->cl_hostname, | 1136 | NFS_SERVER(data->inode)->nfs_client->cl_hostname, |
1135 | resp->verf->committed, argp->stable); | 1137 | resp->verf->committed, argp->stable); |
1136 | complain = jiffies + 300 * HZ; | 1138 | complain = jiffies + 300 * HZ; |
1137 | } | 1139 | } |
1138 | } | 1140 | } |
1139 | #endif | 1141 | #endif |
1140 | /* Is this a short write? */ | 1142 | /* Is this a short write? */ |
1141 | if (task->tk_status >= 0 && resp->count < argp->count) { | 1143 | if (task->tk_status >= 0 && resp->count < argp->count) { |
1142 | static unsigned long complain; | 1144 | static unsigned long complain; |
1143 | 1145 | ||
1144 | nfs_inc_stats(data->inode, NFSIOS_SHORTWRITE); | 1146 | nfs_inc_stats(data->inode, NFSIOS_SHORTWRITE); |
1145 | 1147 | ||
1146 | /* Has the server at least made some progress? */ | 1148 | /* Has the server at least made some progress? */ |
1147 | if (resp->count != 0) { | 1149 | if (resp->count != 0) { |
1148 | /* Was this an NFSv2 write or an NFSv3 stable write? */ | 1150 | /* Was this an NFSv2 write or an NFSv3 stable write? */ |
1149 | if (resp->verf->committed != NFS_UNSTABLE) { | 1151 | if (resp->verf->committed != NFS_UNSTABLE) { |
1150 | /* Resend from where the server left off */ | 1152 | /* Resend from where the server left off */ |
1151 | argp->offset += resp->count; | 1153 | argp->offset += resp->count; |
1152 | argp->pgbase += resp->count; | 1154 | argp->pgbase += resp->count; |
1153 | argp->count -= resp->count; | 1155 | argp->count -= resp->count; |
1154 | } else { | 1156 | } else { |
1155 | /* Resend as a stable write in order to avoid | 1157 | /* Resend as a stable write in order to avoid |
1156 | * headaches in the case of a server crash. | 1158 | * headaches in the case of a server crash. |
1157 | */ | 1159 | */ |
1158 | argp->stable = NFS_FILE_SYNC; | 1160 | argp->stable = NFS_FILE_SYNC; |
1159 | } | 1161 | } |
1160 | rpc_restart_call(task); | 1162 | rpc_restart_call(task); |
1161 | return -EAGAIN; | 1163 | return -EAGAIN; |
1162 | } | 1164 | } |
1163 | if (time_before(complain, jiffies)) { | 1165 | if (time_before(complain, jiffies)) { |
1164 | printk(KERN_WARNING | 1166 | printk(KERN_WARNING |
1165 | "NFS: Server wrote zero bytes, expected %u.\n", | 1167 | "NFS: Server wrote zero bytes, expected %u.\n", |
1166 | argp->count); | 1168 | argp->count); |
1167 | complain = jiffies + 300 * HZ; | 1169 | complain = jiffies + 300 * HZ; |
1168 | } | 1170 | } |
1169 | /* Can't do anything about it except throw an error. */ | 1171 | /* Can't do anything about it except throw an error. */ |
1170 | task->tk_status = -EIO; | 1172 | task->tk_status = -EIO; |
1171 | } | 1173 | } |
1172 | return 0; | 1174 | return 0; |
1173 | } | 1175 | } |
1174 | 1176 | ||
1175 | 1177 | ||
1176 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) | 1178 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) |
1177 | void nfs_commit_release(void *wdata) | 1179 | void nfs_commit_release(void *wdata) |
1178 | { | 1180 | { |
1179 | nfs_commit_free(wdata); | 1181 | nfs_commit_free(wdata); |
1180 | } | 1182 | } |
1181 | 1183 | ||
1182 | /* | 1184 | /* |
1183 | * Set up the argument/result storage required for the RPC call. | 1185 | * Set up the argument/result storage required for the RPC call. |
1184 | */ | 1186 | */ |
1185 | static void nfs_commit_rpcsetup(struct list_head *head, | 1187 | static void nfs_commit_rpcsetup(struct list_head *head, |
1186 | struct nfs_write_data *data, | 1188 | struct nfs_write_data *data, |
1187 | int how) | 1189 | int how) |
1188 | { | 1190 | { |
1189 | struct nfs_page *first; | 1191 | struct nfs_page *first; |
1190 | struct inode *inode; | 1192 | struct inode *inode; |
1191 | int flags; | 1193 | int flags; |
1192 | 1194 | ||
1193 | /* Set up the RPC argument and reply structs | 1195 | /* Set up the RPC argument and reply structs |
1194 | * NB: take care not to mess about with data->commit et al. */ | 1196 | * NB: take care not to mess about with data->commit et al. */ |
1195 | 1197 | ||
1196 | list_splice_init(head, &data->pages); | 1198 | list_splice_init(head, &data->pages); |
1197 | first = nfs_list_entry(data->pages.next); | 1199 | first = nfs_list_entry(data->pages.next); |
1198 | inode = first->wb_context->dentry->d_inode; | 1200 | inode = first->wb_context->dentry->d_inode; |
1199 | 1201 | ||
1200 | data->inode = inode; | 1202 | data->inode = inode; |
1201 | data->cred = first->wb_context->cred; | 1203 | data->cred = first->wb_context->cred; |
1202 | 1204 | ||
1203 | data->args.fh = NFS_FH(data->inode); | 1205 | data->args.fh = NFS_FH(data->inode); |
1204 | /* Note: we always request a commit of the entire inode */ | 1206 | /* Note: we always request a commit of the entire inode */ |
1205 | data->args.offset = 0; | 1207 | data->args.offset = 0; |
1206 | data->args.count = 0; | 1208 | data->args.count = 0; |
1207 | data->res.count = 0; | 1209 | data->res.count = 0; |
1208 | data->res.fattr = &data->fattr; | 1210 | data->res.fattr = &data->fattr; |
1209 | data->res.verf = &data->verf; | 1211 | data->res.verf = &data->verf; |
1210 | nfs_fattr_init(&data->fattr); | 1212 | nfs_fattr_init(&data->fattr); |
1211 | 1213 | ||
1212 | /* Set up the initial task struct. */ | 1214 | /* Set up the initial task struct. */ |
1213 | flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC; | 1215 | flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC; |
1214 | rpc_init_task(&data->task, NFS_CLIENT(inode), flags, &nfs_commit_ops, data); | 1216 | rpc_init_task(&data->task, NFS_CLIENT(inode), flags, &nfs_commit_ops, data); |
1215 | NFS_PROTO(inode)->commit_setup(data, how); | 1217 | NFS_PROTO(inode)->commit_setup(data, how); |
1216 | 1218 | ||
1217 | data->task.tk_priority = flush_task_priority(how); | 1219 | data->task.tk_priority = flush_task_priority(how); |
1218 | data->task.tk_cookie = (unsigned long)inode; | 1220 | data->task.tk_cookie = (unsigned long)inode; |
1219 | 1221 | ||
1220 | dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid); | 1222 | dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid); |
1221 | } | 1223 | } |
1222 | 1224 | ||
1223 | /* | 1225 | /* |
1224 | * Commit dirty pages | 1226 | * Commit dirty pages |
1225 | */ | 1227 | */ |
1226 | static int | 1228 | static int |
1227 | nfs_commit_list(struct inode *inode, struct list_head *head, int how) | 1229 | nfs_commit_list(struct inode *inode, struct list_head *head, int how) |
1228 | { | 1230 | { |
1229 | struct nfs_write_data *data; | 1231 | struct nfs_write_data *data; |
1230 | struct nfs_page *req; | 1232 | struct nfs_page *req; |
1231 | 1233 | ||
1232 | data = nfs_commit_alloc(); | 1234 | data = nfs_commit_alloc(); |
1233 | 1235 | ||
1234 | if (!data) | 1236 | if (!data) |
1235 | goto out_bad; | 1237 | goto out_bad; |
1236 | 1238 | ||
1237 | /* Set up the argument struct */ | 1239 | /* Set up the argument struct */ |
1238 | nfs_commit_rpcsetup(head, data, how); | 1240 | nfs_commit_rpcsetup(head, data, how); |
1239 | 1241 | ||
1240 | nfs_execute_write(data); | 1242 | nfs_execute_write(data); |
1241 | return 0; | 1243 | return 0; |
1242 | out_bad: | 1244 | out_bad: |
1243 | while (!list_empty(head)) { | 1245 | while (!list_empty(head)) { |
1244 | req = nfs_list_entry(head->next); | 1246 | req = nfs_list_entry(head->next); |
1245 | nfs_list_remove_request(req); | 1247 | nfs_list_remove_request(req); |
1246 | nfs_mark_request_commit(req); | 1248 | nfs_mark_request_commit(req); |
1247 | dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); | 1249 | dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); |
1248 | nfs_clear_page_writeback(req); | 1250 | nfs_clear_page_writeback(req); |
1249 | } | 1251 | } |
1250 | return -ENOMEM; | 1252 | return -ENOMEM; |
1251 | } | 1253 | } |
1252 | 1254 | ||
1253 | /* | 1255 | /* |
1254 | * COMMIT call returned | 1256 | * COMMIT call returned |
1255 | */ | 1257 | */ |
1256 | static void nfs_commit_done(struct rpc_task *task, void *calldata) | 1258 | static void nfs_commit_done(struct rpc_task *task, void *calldata) |
1257 | { | 1259 | { |
1258 | struct nfs_write_data *data = calldata; | 1260 | struct nfs_write_data *data = calldata; |
1259 | struct nfs_page *req; | 1261 | struct nfs_page *req; |
1260 | 1262 | ||
1261 | dprintk("NFS: %5u nfs_commit_done (status %d)\n", | 1263 | dprintk("NFS: %5u nfs_commit_done (status %d)\n", |
1262 | task->tk_pid, task->tk_status); | 1264 | task->tk_pid, task->tk_status); |
1263 | 1265 | ||
1264 | /* Call the NFS version-specific code */ | 1266 | /* Call the NFS version-specific code */ |
1265 | if (NFS_PROTO(data->inode)->commit_done(task, data) != 0) | 1267 | if (NFS_PROTO(data->inode)->commit_done(task, data) != 0) |
1266 | return; | 1268 | return; |
1267 | 1269 | ||
1268 | while (!list_empty(&data->pages)) { | 1270 | while (!list_empty(&data->pages)) { |
1269 | req = nfs_list_entry(data->pages.next); | 1271 | req = nfs_list_entry(data->pages.next); |
1270 | nfs_list_remove_request(req); | 1272 | nfs_list_remove_request(req); |
1271 | dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); | 1273 | dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); |
1272 | 1274 | ||
1273 | dprintk("NFS: commit (%s/%Ld %d@%Ld)", | 1275 | dprintk("NFS: commit (%s/%Ld %d@%Ld)", |
1274 | req->wb_context->dentry->d_inode->i_sb->s_id, | 1276 | req->wb_context->dentry->d_inode->i_sb->s_id, |
1275 | (long long)NFS_FILEID(req->wb_context->dentry->d_inode), | 1277 | (long long)NFS_FILEID(req->wb_context->dentry->d_inode), |
1276 | req->wb_bytes, | 1278 | req->wb_bytes, |
1277 | (long long)req_offset(req)); | 1279 | (long long)req_offset(req)); |
1278 | if (task->tk_status < 0) { | 1280 | if (task->tk_status < 0) { |
1279 | req->wb_context->error = task->tk_status; | 1281 | req->wb_context->error = task->tk_status; |
1280 | nfs_inode_remove_request(req); | 1282 | nfs_inode_remove_request(req); |
1281 | dprintk(", error = %d\n", task->tk_status); | 1283 | dprintk(", error = %d\n", task->tk_status); |
1282 | goto next; | 1284 | goto next; |
1283 | } | 1285 | } |
1284 | 1286 | ||
1285 | /* Okay, COMMIT succeeded, apparently. Check the verifier | 1287 | /* Okay, COMMIT succeeded, apparently. Check the verifier |
1286 | * returned by the server against all stored verfs. */ | 1288 | * returned by the server against all stored verfs. */ |
1287 | if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) { | 1289 | if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) { |
1288 | /* We have a match */ | 1290 | /* We have a match */ |
1289 | nfs_inode_remove_request(req); | 1291 | nfs_inode_remove_request(req); |
1290 | dprintk(" OK\n"); | 1292 | dprintk(" OK\n"); |
1291 | goto next; | 1293 | goto next; |
1292 | } | 1294 | } |
1293 | /* We have a mismatch. Write the page again */ | 1295 | /* We have a mismatch. Write the page again */ |
1294 | dprintk(" mismatch\n"); | 1296 | dprintk(" mismatch\n"); |
1295 | nfs_redirty_request(req); | 1297 | nfs_redirty_request(req); |
1296 | next: | 1298 | next: |
1297 | nfs_clear_page_writeback(req); | 1299 | nfs_clear_page_writeback(req); |
1298 | } | 1300 | } |
1299 | } | 1301 | } |
1300 | 1302 | ||
1301 | static const struct rpc_call_ops nfs_commit_ops = { | 1303 | static const struct rpc_call_ops nfs_commit_ops = { |
1302 | .rpc_call_done = nfs_commit_done, | 1304 | .rpc_call_done = nfs_commit_done, |
1303 | .rpc_release = nfs_commit_release, | 1305 | .rpc_release = nfs_commit_release, |
1304 | }; | 1306 | }; |
1305 | #else | 1307 | #else |
1306 | static inline int nfs_commit_list(struct inode *inode, struct list_head *head, int how) | 1308 | static inline int nfs_commit_list(struct inode *inode, struct list_head *head, int how) |
1307 | { | 1309 | { |
1308 | return 0; | 1310 | return 0; |
1309 | } | 1311 | } |
1310 | #endif | 1312 | #endif |
1311 | 1313 | ||
1312 | static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how) | 1314 | static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how) |
1313 | { | 1315 | { |
1314 | struct nfs_inode *nfsi = NFS_I(mapping->host); | 1316 | struct nfs_inode *nfsi = NFS_I(mapping->host); |
1315 | LIST_HEAD(head); | 1317 | LIST_HEAD(head); |
1316 | long res; | 1318 | long res; |
1317 | 1319 | ||
1318 | spin_lock(&nfsi->req_lock); | 1320 | spin_lock(&nfsi->req_lock); |
1319 | res = nfs_scan_dirty(mapping, wbc, &head); | 1321 | res = nfs_scan_dirty(mapping, wbc, &head); |
1320 | spin_unlock(&nfsi->req_lock); | 1322 | spin_unlock(&nfsi->req_lock); |
1321 | if (res) { | 1323 | if (res) { |
1322 | int error = nfs_flush_list(mapping->host, &head, res, how); | 1324 | int error = nfs_flush_list(mapping->host, &head, res, how); |
1323 | if (error < 0) | 1325 | if (error < 0) |
1324 | return error; | 1326 | return error; |
1325 | } | 1327 | } |
1326 | return res; | 1328 | return res; |
1327 | } | 1329 | } |
1328 | 1330 | ||
1329 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) | 1331 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) |
1330 | int nfs_commit_inode(struct inode *inode, int how) | 1332 | int nfs_commit_inode(struct inode *inode, int how) |
1331 | { | 1333 | { |
1332 | struct nfs_inode *nfsi = NFS_I(inode); | 1334 | struct nfs_inode *nfsi = NFS_I(inode); |
1333 | LIST_HEAD(head); | 1335 | LIST_HEAD(head); |
1334 | int res; | 1336 | int res; |
1335 | 1337 | ||
1336 | spin_lock(&nfsi->req_lock); | 1338 | spin_lock(&nfsi->req_lock); |
1337 | res = nfs_scan_commit(inode, &head, 0, 0); | 1339 | res = nfs_scan_commit(inode, &head, 0, 0); |
1338 | spin_unlock(&nfsi->req_lock); | 1340 | spin_unlock(&nfsi->req_lock); |
1339 | if (res) { | 1341 | if (res) { |
1340 | int error = nfs_commit_list(inode, &head, how); | 1342 | int error = nfs_commit_list(inode, &head, how); |
1341 | if (error < 0) | 1343 | if (error < 0) |
1342 | return error; | 1344 | return error; |
1343 | } | 1345 | } |
1344 | return res; | 1346 | return res; |
1345 | } | 1347 | } |
1346 | #endif | 1348 | #endif |
1347 | 1349 | ||
1348 | long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how) | 1350 | long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how) |
1349 | { | 1351 | { |
1350 | struct inode *inode = mapping->host; | 1352 | struct inode *inode = mapping->host; |
1351 | struct nfs_inode *nfsi = NFS_I(inode); | 1353 | struct nfs_inode *nfsi = NFS_I(inode); |
1352 | unsigned long idx_start, idx_end; | 1354 | unsigned long idx_start, idx_end; |
1353 | unsigned int npages = 0; | 1355 | unsigned int npages = 0; |
1354 | LIST_HEAD(head); | 1356 | LIST_HEAD(head); |
1355 | int nocommit = how & FLUSH_NOCOMMIT; | 1357 | int nocommit = how & FLUSH_NOCOMMIT; |
1356 | long pages, ret; | 1358 | long pages, ret; |
1357 | 1359 | ||
1358 | /* FIXME */ | 1360 | /* FIXME */ |
1359 | if (wbc->range_cyclic) | 1361 | if (wbc->range_cyclic) |
1360 | idx_start = 0; | 1362 | idx_start = 0; |
1361 | else { | 1363 | else { |
1362 | idx_start = wbc->range_start >> PAGE_CACHE_SHIFT; | 1364 | idx_start = wbc->range_start >> PAGE_CACHE_SHIFT; |
1363 | idx_end = wbc->range_end >> PAGE_CACHE_SHIFT; | 1365 | idx_end = wbc->range_end >> PAGE_CACHE_SHIFT; |
1364 | if (idx_end > idx_start) { | 1366 | if (idx_end > idx_start) { |
1365 | unsigned long l_npages = 1 + idx_end - idx_start; | 1367 | unsigned long l_npages = 1 + idx_end - idx_start; |
1366 | npages = l_npages; | 1368 | npages = l_npages; |
1367 | if (sizeof(npages) != sizeof(l_npages) && | 1369 | if (sizeof(npages) != sizeof(l_npages) && |
1368 | (unsigned long)npages != l_npages) | 1370 | (unsigned long)npages != l_npages) |
1369 | npages = 0; | 1371 | npages = 0; |
1370 | } | 1372 | } |
1371 | } | 1373 | } |
1372 | how &= ~FLUSH_NOCOMMIT; | 1374 | how &= ~FLUSH_NOCOMMIT; |
1373 | spin_lock(&nfsi->req_lock); | 1375 | spin_lock(&nfsi->req_lock); |
1374 | do { | 1376 | do { |
1375 | wbc->pages_skipped = 0; | 1377 | wbc->pages_skipped = 0; |
1376 | ret = nfs_wait_on_requests_locked(inode, idx_start, npages); | 1378 | ret = nfs_wait_on_requests_locked(inode, idx_start, npages); |
1377 | if (ret != 0) | 1379 | if (ret != 0) |
1378 | continue; | 1380 | continue; |
1379 | pages = nfs_scan_dirty(mapping, wbc, &head); | 1381 | pages = nfs_scan_dirty(mapping, wbc, &head); |
1380 | if (pages != 0) { | 1382 | if (pages != 0) { |
1381 | spin_unlock(&nfsi->req_lock); | 1383 | spin_unlock(&nfsi->req_lock); |
1382 | if (how & FLUSH_INVALIDATE) { | 1384 | if (how & FLUSH_INVALIDATE) { |
1383 | nfs_cancel_dirty_list(&head); | 1385 | nfs_cancel_dirty_list(&head); |
1384 | ret = pages; | 1386 | ret = pages; |
1385 | } else | 1387 | } else |
1386 | ret = nfs_flush_list(inode, &head, pages, how); | 1388 | ret = nfs_flush_list(inode, &head, pages, how); |
1387 | spin_lock(&nfsi->req_lock); | 1389 | spin_lock(&nfsi->req_lock); |
1388 | continue; | 1390 | continue; |
1389 | } | 1391 | } |
1390 | if (wbc->pages_skipped != 0) | 1392 | if (wbc->pages_skipped != 0) |
1391 | continue; | 1393 | continue; |
1392 | if (nocommit) | 1394 | if (nocommit) |
1393 | break; | 1395 | break; |
1394 | pages = nfs_scan_commit(inode, &head, idx_start, npages); | 1396 | pages = nfs_scan_commit(inode, &head, idx_start, npages); |
1395 | if (pages == 0) { | 1397 | if (pages == 0) { |
1396 | if (wbc->pages_skipped != 0) | 1398 | if (wbc->pages_skipped != 0) |
1397 | continue; | 1399 | continue; |
1398 | break; | 1400 | break; |
1399 | } | 1401 | } |
1400 | if (how & FLUSH_INVALIDATE) { | 1402 | if (how & FLUSH_INVALIDATE) { |
1401 | spin_unlock(&nfsi->req_lock); | 1403 | spin_unlock(&nfsi->req_lock); |
1402 | nfs_cancel_commit_list(&head); | 1404 | nfs_cancel_commit_list(&head); |
1403 | ret = pages; | 1405 | ret = pages; |
1404 | spin_lock(&nfsi->req_lock); | 1406 | spin_lock(&nfsi->req_lock); |
1405 | continue; | 1407 | continue; |
1406 | } | 1408 | } |
1407 | pages += nfs_scan_commit(inode, &head, 0, 0); | 1409 | pages += nfs_scan_commit(inode, &head, 0, 0); |
1408 | spin_unlock(&nfsi->req_lock); | 1410 | spin_unlock(&nfsi->req_lock); |
1409 | ret = nfs_commit_list(inode, &head, how); | 1411 | ret = nfs_commit_list(inode, &head, how); |
1410 | spin_lock(&nfsi->req_lock); | 1412 | spin_lock(&nfsi->req_lock); |
1411 | } while (ret >= 0); | 1413 | } while (ret >= 0); |
1412 | spin_unlock(&nfsi->req_lock); | 1414 | spin_unlock(&nfsi->req_lock); |
1413 | return ret; | 1415 | return ret; |
1414 | } | 1416 | } |
1415 | 1417 | ||
1416 | /* | 1418 | /* |
1417 | * flush the inode to disk. | 1419 | * flush the inode to disk. |
1418 | */ | 1420 | */ |
1419 | int nfs_wb_all(struct inode *inode) | 1421 | int nfs_wb_all(struct inode *inode) |
1420 | { | 1422 | { |
1421 | struct address_space *mapping = inode->i_mapping; | 1423 | struct address_space *mapping = inode->i_mapping; |
1422 | struct writeback_control wbc = { | 1424 | struct writeback_control wbc = { |
1423 | .bdi = mapping->backing_dev_info, | 1425 | .bdi = mapping->backing_dev_info, |
1424 | .sync_mode = WB_SYNC_ALL, | 1426 | .sync_mode = WB_SYNC_ALL, |
1425 | .nr_to_write = LONG_MAX, | 1427 | .nr_to_write = LONG_MAX, |
1426 | .for_writepages = 1, | 1428 | .for_writepages = 1, |
1427 | .range_cyclic = 1, | 1429 | .range_cyclic = 1, |
1428 | }; | 1430 | }; |
1429 | int ret; | 1431 | int ret; |
1430 | 1432 | ||
1431 | ret = generic_writepages(mapping, &wbc); | 1433 | ret = generic_writepages(mapping, &wbc); |
1432 | if (ret < 0) | 1434 | if (ret < 0) |
1433 | goto out; | 1435 | goto out; |
1434 | ret = nfs_sync_mapping_wait(mapping, &wbc, 0); | 1436 | ret = nfs_sync_mapping_wait(mapping, &wbc, 0); |
1435 | if (ret >= 0) | 1437 | if (ret >= 0) |
1436 | return 0; | 1438 | return 0; |
1437 | out: | 1439 | out: |
1438 | __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); | 1440 | __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); |
1439 | return ret; | 1441 | return ret; |
1440 | } | 1442 | } |
1441 | 1443 | ||
1442 | int nfs_sync_mapping_range(struct address_space *mapping, loff_t range_start, loff_t range_end, int how) | 1444 | int nfs_sync_mapping_range(struct address_space *mapping, loff_t range_start, loff_t range_end, int how) |
1443 | { | 1445 | { |
1444 | struct writeback_control wbc = { | 1446 | struct writeback_control wbc = { |
1445 | .bdi = mapping->backing_dev_info, | 1447 | .bdi = mapping->backing_dev_info, |
1446 | .sync_mode = WB_SYNC_ALL, | 1448 | .sync_mode = WB_SYNC_ALL, |
1447 | .nr_to_write = LONG_MAX, | 1449 | .nr_to_write = LONG_MAX, |
1448 | .range_start = range_start, | 1450 | .range_start = range_start, |
1449 | .range_end = range_end, | 1451 | .range_end = range_end, |
1450 | .for_writepages = 1, | 1452 | .for_writepages = 1, |
1451 | }; | 1453 | }; |
1452 | int ret; | 1454 | int ret; |
1453 | 1455 | ||
1454 | if (!(how & FLUSH_NOWRITEPAGE)) { | 1456 | if (!(how & FLUSH_NOWRITEPAGE)) { |
1455 | ret = generic_writepages(mapping, &wbc); | 1457 | ret = generic_writepages(mapping, &wbc); |
1456 | if (ret < 0) | 1458 | if (ret < 0) |
1457 | goto out; | 1459 | goto out; |
1458 | } | 1460 | } |
1459 | ret = nfs_sync_mapping_wait(mapping, &wbc, how); | 1461 | ret = nfs_sync_mapping_wait(mapping, &wbc, how); |
1460 | if (ret >= 0) | 1462 | if (ret >= 0) |
1461 | return 0; | 1463 | return 0; |
1462 | out: | 1464 | out: |
1463 | __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); | 1465 | __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); |
1464 | return ret; | 1466 | return ret; |
1465 | } | 1467 | } |
1466 | 1468 | ||
1467 | int nfs_wb_page_priority(struct inode *inode, struct page *page, int how) | 1469 | int nfs_wb_page_priority(struct inode *inode, struct page *page, int how) |
1468 | { | 1470 | { |
1469 | loff_t range_start = page_offset(page); | 1471 | loff_t range_start = page_offset(page); |
1470 | loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); | 1472 | loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); |
1471 | struct writeback_control wbc = { | 1473 | struct writeback_control wbc = { |
1472 | .bdi = page->mapping->backing_dev_info, | 1474 | .bdi = page->mapping->backing_dev_info, |
1473 | .sync_mode = WB_SYNC_ALL, | 1475 | .sync_mode = WB_SYNC_ALL, |
1474 | .nr_to_write = LONG_MAX, | 1476 | .nr_to_write = LONG_MAX, |
1475 | .range_start = range_start, | 1477 | .range_start = range_start, |
1476 | .range_end = range_end, | 1478 | .range_end = range_end, |
1477 | }; | 1479 | }; |
1478 | int ret; | 1480 | int ret; |
1479 | 1481 | ||
1480 | BUG_ON(!PageLocked(page)); | 1482 | BUG_ON(!PageLocked(page)); |
1481 | if (!(how & FLUSH_NOWRITEPAGE) && clear_page_dirty_for_io(page)) { | 1483 | if (!(how & FLUSH_NOWRITEPAGE) && clear_page_dirty_for_io(page)) { |
1482 | ret = nfs_writepage_locked(page, &wbc); | 1484 | ret = nfs_writepage_locked(page, &wbc); |
1483 | if (ret < 0) | 1485 | if (ret < 0) |
1484 | goto out; | 1486 | goto out; |
1485 | } | 1487 | } |
1486 | if (!PagePrivate(page)) | 1488 | if (!PagePrivate(page)) |
1487 | return 0; | 1489 | return 0; |
1488 | ret = nfs_sync_mapping_wait(page->mapping, &wbc, how); | 1490 | ret = nfs_sync_mapping_wait(page->mapping, &wbc, how); |
1489 | if (ret >= 0) | 1491 | if (ret >= 0) |
1490 | return 0; | 1492 | return 0; |
1491 | out: | 1493 | out: |
1492 | __mark_inode_dirty(inode, I_DIRTY_PAGES); | 1494 | __mark_inode_dirty(inode, I_DIRTY_PAGES); |
1493 | return ret; | 1495 | return ret; |
1494 | } | 1496 | } |
1495 | 1497 | ||
1496 | /* | 1498 | /* |
1497 | * Write back all requests on one page - we do this before reading it. | 1499 | * Write back all requests on one page - we do this before reading it. |
1498 | */ | 1500 | */ |
1499 | int nfs_wb_page(struct inode *inode, struct page* page) | 1501 | int nfs_wb_page(struct inode *inode, struct page* page) |
1500 | { | 1502 | { |
1501 | return nfs_wb_page_priority(inode, page, FLUSH_STABLE); | 1503 | return nfs_wb_page_priority(inode, page, FLUSH_STABLE); |
1502 | } | 1504 | } |
1503 | 1505 | ||
1504 | int nfs_set_page_dirty(struct page *page) | 1506 | int nfs_set_page_dirty(struct page *page) |
1505 | { | 1507 | { |
1506 | struct nfs_page *req; | 1508 | struct nfs_page *req; |
1507 | 1509 | ||
1508 | req = nfs_page_find_request(page); | 1510 | req = nfs_page_find_request(page); |
1509 | if (req != NULL) { | 1511 | if (req != NULL) { |
1510 | /* Mark any existing write requests for flushing */ | 1512 | /* Mark any existing write requests for flushing */ |
1511 | set_bit(PG_NEED_FLUSH, &req->wb_flags); | 1513 | set_bit(PG_NEED_FLUSH, &req->wb_flags); |
1512 | nfs_release_request(req); | 1514 | nfs_release_request(req); |
1513 | } | 1515 | } |
1514 | return __set_page_dirty_nobuffers(page); | 1516 | return __set_page_dirty_nobuffers(page); |
1515 | } | 1517 | } |
1516 | 1518 | ||
1517 | 1519 | ||
1518 | int __init nfs_init_writepagecache(void) | 1520 | int __init nfs_init_writepagecache(void) |
1519 | { | 1521 | { |
1520 | nfs_wdata_cachep = kmem_cache_create("nfs_write_data", | 1522 | nfs_wdata_cachep = kmem_cache_create("nfs_write_data", |
1521 | sizeof(struct nfs_write_data), | 1523 | sizeof(struct nfs_write_data), |
1522 | 0, SLAB_HWCACHE_ALIGN, | 1524 | 0, SLAB_HWCACHE_ALIGN, |
1523 | NULL, NULL); | 1525 | NULL, NULL); |
1524 | if (nfs_wdata_cachep == NULL) | 1526 | if (nfs_wdata_cachep == NULL) |
1525 | return -ENOMEM; | 1527 | return -ENOMEM; |
1526 | 1528 | ||
1527 | nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE, | 1529 | nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE, |
1528 | nfs_wdata_cachep); | 1530 | nfs_wdata_cachep); |
1529 | if (nfs_wdata_mempool == NULL) | 1531 | if (nfs_wdata_mempool == NULL) |
1530 | return -ENOMEM; | 1532 | return -ENOMEM; |
1531 | 1533 | ||
1532 | nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT, | 1534 | nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT, |
1533 | nfs_wdata_cachep); | 1535 | nfs_wdata_cachep); |
1534 | if (nfs_commit_mempool == NULL) | 1536 | if (nfs_commit_mempool == NULL) |
1535 | return -ENOMEM; | 1537 | return -ENOMEM; |
1536 | 1538 | ||
1537 | /* | 1539 | /* |
1538 | * NFS congestion size, scale with available memory. | 1540 | * NFS congestion size, scale with available memory. |
1539 | * | 1541 | * |
1540 | * 64MB: 8192k | 1542 | * 64MB: 8192k |
1541 | * 128MB: 11585k | 1543 | * 128MB: 11585k |
1542 | * 256MB: 16384k | 1544 | * 256MB: 16384k |
1543 | * 512MB: 23170k | 1545 | * 512MB: 23170k |
1544 | * 1GB: 32768k | 1546 | * 1GB: 32768k |
1545 | * 2GB: 46340k | 1547 | * 2GB: 46340k |
1546 | * 4GB: 65536k | 1548 | * 4GB: 65536k |
1547 | * 8GB: 92681k | 1549 | * 8GB: 92681k |
1548 | * 16GB: 131072k | 1550 | * 16GB: 131072k |
1549 | * | 1551 | * |
1550 | * This allows larger machines to have larger/more transfers. | 1552 | * This allows larger machines to have larger/more transfers. |
1551 | * Limit the default to 256M | 1553 | * Limit the default to 256M |
1552 | */ | 1554 | */ |
1553 | nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10); | 1555 | nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10); |
1554 | if (nfs_congestion_kb > 256*1024) | 1556 | if (nfs_congestion_kb > 256*1024) |
1555 | nfs_congestion_kb = 256*1024; | 1557 | nfs_congestion_kb = 256*1024; |
1556 | 1558 | ||
1557 | return 0; | 1559 | return 0; |
1558 | } | 1560 | } |
1559 | 1561 | ||
1560 | void nfs_destroy_writepagecache(void) | 1562 | void nfs_destroy_writepagecache(void) |
1561 | { | 1563 | { |
1562 | mempool_destroy(nfs_commit_mempool); | 1564 | mempool_destroy(nfs_commit_mempool); |
1563 | mempool_destroy(nfs_wdata_mempool); | 1565 | mempool_destroy(nfs_wdata_mempool); |
1564 | kmem_cache_destroy(nfs_wdata_cachep); | 1566 | kmem_cache_destroy(nfs_wdata_cachep); |
1565 | } | 1567 | } |
1566 | 1568 | ||
1567 | 1569 |