Blame view

fs/afs/write.c 23 KB
2874c5fd2   Thomas Gleixner   treewide: Replace...
1
  // SPDX-License-Identifier: GPL-2.0-or-later
31143d5d5   David Howells   AFS: implement ba...
2
3
4
5
  /* handling of writes to regular files and writing back to the server
   *
   * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
   * Written by David Howells (dhowells@redhat.com)
31143d5d5   David Howells   AFS: implement ba...
6
   */
4343d0087   David Howells   afs: Get rid of t...
7

4af3c9cc4   Alexey Dobriyan   Drop some headers...
8
  #include <linux/backing-dev.h>
31143d5d5   David Howells   AFS: implement ba...
9
10
11
12
13
  #include <linux/slab.h>
  #include <linux/fs.h>
  #include <linux/pagemap.h>
  #include <linux/writeback.h>
  #include <linux/pagevec.h>
3003bbd06   David Howells   afs: Use the netf...
14
15
  #include <linux/netfs.h>
  #include <linux/fscache.h>
31143d5d5   David Howells   AFS: implement ba...
16
  #include "internal.h"
31143d5d5   David Howells   AFS: implement ba...
17
18
19
20
21
22
23
24
25
26
  /*
   * mark a page as having been made dirty and thus needing writeback
   */
  int afs_set_page_dirty(struct page *page)
  {
  	_enter("");
  	return __set_page_dirty_nobuffers(page);
  }
  
  /*
31143d5d5   David Howells   AFS: implement ba...
27
   * prepare to perform part of a write to a page
31143d5d5   David Howells   AFS: implement ba...
28
   */
15b4650e5   Nick Piggin   afs: convert to n...
29
30
  int afs_write_begin(struct file *file, struct address_space *mapping,
  		    loff_t pos, unsigned len, unsigned flags,
21db2cdc6   David Howells   afs: Fix page lea...
31
  		    struct page **_page, void **fsdata)
31143d5d5   David Howells   AFS: implement ba...
32
  {
496ad9aa8   Al Viro   new helper: file_...
33
  	struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
15b4650e5   Nick Piggin   afs: convert to n...
34
  	struct page *page;
4343d0087   David Howells   afs: Get rid of t...
35
  	unsigned long priv;
e87b03f58   David Howells   afs: Prepare for ...
36
37
38
  	unsigned f, from;
  	unsigned t, to;
  	pgoff_t index;
31143d5d5   David Howells   AFS: implement ba...
39
  	int ret;
e87b03f58   David Howells   afs: Prepare for ...
40
41
  	_enter("{%llx:%llu},%llx,%x",
  	       vnode->fid.vid, vnode->fid.vnode, pos, len);
31143d5d5   David Howells   AFS: implement ba...
42

3003bbd06   David Howells   afs: Use the netf...
43
44
45
46
47
48
49
50
  	/* Prefetch area to be written into the cache if we're caching this
  	 * file.  We need to do this before we get a lock on the page in case
  	 * there's more than one writer competing for the same cache block.
  	 */
  	ret = netfs_write_begin(file, mapping, pos, len, flags, &page, fsdata,
  				&afs_req_ops, NULL);
  	if (ret < 0)
  		return ret;
630f5dda8   David Howells   afs: Wait on PG_f...
51

e87b03f58   David Howells   afs: Prepare for ...
52
53
54
  	index = page->index;
  	from = pos - index * PAGE_SIZE;
  	to = from + len;
31143d5d5   David Howells   AFS: implement ba...
55
  try_again:
4343d0087   David Howells   afs: Get rid of t...
56
57
58
  	/* See if this page is already partially written in a way that we can
  	 * merge the new write with.
  	 */
4343d0087   David Howells   afs: Get rid of t...
59
60
  	if (PagePrivate(page)) {
  		priv = page_private(page);
67d78a6f6   David Howells   afs: Pass page in...
61
62
  		f = afs_page_dirty_from(page, priv);
  		t = afs_page_dirty_to(page, priv);
4343d0087   David Howells   afs: Get rid of t...
63
  		ASSERTCMP(f, <=, t);
31143d5d5   David Howells   AFS: implement ba...
64

5a039c322   David Howells   afs: Make afs_wri...
65
  		if (PageWriteback(page)) {
67d78a6f6   David Howells   afs: Pass page in...
66
  			trace_afs_page_dirty(vnode, tracepoint_string("alrdy"), page);
5a039c322   David Howells   afs: Make afs_wri...
67
68
  			goto flush_conflicting_write;
  		}
5a8132761   David Howells   afs: Do better ac...
69
70
71
72
73
74
  		/* If the file is being filled locally, allow inter-write
  		 * spaces to be merged into writes.  If it's not, only write
  		 * back what the user gives us.
  		 */
  		if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) &&
  		    (to < f || from > t))
4343d0087   David Howells   afs: Get rid of t...
75
  			goto flush_conflicting_write;
31143d5d5   David Howells   AFS: implement ba...
76
  	}
21db2cdc6   David Howells   afs: Fix page lea...
77
  	*_page = page;
4343d0087   David Howells   afs: Get rid of t...
78
  	_leave(" = 0");
31143d5d5   David Howells   AFS: implement ba...
79
  	return 0;
4343d0087   David Howells   afs: Get rid of t...
80
81
82
83
  	/* The previous write and this write aren't adjacent or overlapping, so
  	 * flush the page out.
  	 */
  flush_conflicting_write:
31143d5d5   David Howells   AFS: implement ba...
84
  	_debug("flush conflict");
4343d0087   David Howells   afs: Get rid of t...
85
  	ret = write_one_page(page);
21db2cdc6   David Howells   afs: Fix page lea...
86
87
  	if (ret < 0)
  		goto error;
31143d5d5   David Howells   AFS: implement ba...
88

4343d0087   David Howells   afs: Get rid of t...
89
  	ret = lock_page_killable(page);
21db2cdc6   David Howells   afs: Fix page lea...
90
91
  	if (ret < 0)
  		goto error;
31143d5d5   David Howells   AFS: implement ba...
92
  	goto try_again;
21db2cdc6   David Howells   afs: Fix page lea...
93
94
95
96
97
  
  error:
  	put_page(page);
  	_leave(" = %d", ret);
  	return ret;
31143d5d5   David Howells   AFS: implement ba...
98
99
100
101
102
  }
  
  /*
   * finalise part of a write to a page
   */
15b4650e5   Nick Piggin   afs: convert to n...
103
104
105
  int afs_write_end(struct file *file, struct address_space *mapping,
  		  loff_t pos, unsigned len, unsigned copied,
  		  struct page *page, void *fsdata)
31143d5d5   David Howells   AFS: implement ba...
106
  {
496ad9aa8   Al Viro   new helper: file_...
107
  	struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
f792e3ac8   David Howells   afs: Fix where pa...
108
  	unsigned long priv;
e87b03f58   David Howells   afs: Prepare for ...
109
  	unsigned int f, from = pos & (thp_size(page) - 1);
f792e3ac8   David Howells   afs: Fix where pa...
110
  	unsigned int t, to = from + copied;
31143d5d5   David Howells   AFS: implement ba...
111
  	loff_t i_size, maybe_i_size;
3b6492df4   David Howells   afs: Increase to ...
112
  	_enter("{%llx:%llu},{%lx}",
15b4650e5   Nick Piggin   afs: convert to n...
113
  	       vnode->fid.vid, vnode->fid.vnode, page->index);
31143d5d5   David Howells   AFS: implement ba...
114

66e9c6a86   David Howells   afs: Fix afs_writ...
115
116
117
118
119
120
121
122
  	if (!PageUptodate(page)) {
  		if (copied < len) {
  			copied = 0;
  			goto out;
  		}
  
  		SetPageUptodate(page);
  	}
3ad216ee7   David Howells   afs: Fix afs_writ...
123
124
  	if (copied == 0)
  		goto out;
15b4650e5   Nick Piggin   afs: convert to n...
125
  	maybe_i_size = pos + copied;
31143d5d5   David Howells   AFS: implement ba...
126
127
128
  
  	i_size = i_size_read(&vnode->vfs_inode);
  	if (maybe_i_size > i_size) {
1f32ef798   David Howells   afs: afs_write_en...
129
  		write_seqlock(&vnode->cb_lock);
31143d5d5   David Howells   AFS: implement ba...
130
131
  		i_size = i_size_read(&vnode->vfs_inode);
  		if (maybe_i_size > i_size)
9d37e1cab   David Howells   afs: Fix updating...
132
  			afs_set_i_size(vnode, maybe_i_size);
1f32ef798   David Howells   afs: afs_write_en...
133
  		write_sequnlock(&vnode->cb_lock);
31143d5d5   David Howells   AFS: implement ba...
134
  	}
f792e3ac8   David Howells   afs: Fix where pa...
135
136
  	if (PagePrivate(page)) {
  		priv = page_private(page);
67d78a6f6   David Howells   afs: Pass page in...
137
138
  		f = afs_page_dirty_from(page, priv);
  		t = afs_page_dirty_to(page, priv);
f792e3ac8   David Howells   afs: Fix where pa...
139
140
141
142
  		if (from < f)
  			f = from;
  		if (to > t)
  			t = to;
67d78a6f6   David Howells   afs: Pass page in...
143
  		priv = afs_page_dirty(page, f, t);
f792e3ac8   David Howells   afs: Fix where pa...
144
  		set_page_private(page, priv);
67d78a6f6   David Howells   afs: Pass page in...
145
  		trace_afs_page_dirty(vnode, tracepoint_string("dirty+"), page);
f792e3ac8   David Howells   afs: Fix where pa...
146
  	} else {
67d78a6f6   David Howells   afs: Pass page in...
147
  		priv = afs_page_dirty(page, from, to);
f792e3ac8   David Howells   afs: Fix where pa...
148
  		attach_page_private(page, (void *)priv);
67d78a6f6   David Howells   afs: Pass page in...
149
  		trace_afs_page_dirty(vnode, tracepoint_string("dirty"), page);
f792e3ac8   David Howells   afs: Fix where pa...
150
  	}
e87b03f58   David Howells   afs: Prepare for ...
151
152
  	if (set_page_dirty(page))
  		_debug("dirtied %lx", page->index);
afae457d8   David Howells   afs: Fix missing ...
153
154
  
  out:
15b4650e5   Nick Piggin   afs: convert to n...
155
  	unlock_page(page);
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
156
  	put_page(page);
3003bbd06   David Howells   afs: Use the netf...
157
  	return copied;
31143d5d5   David Howells   AFS: implement ba...
158
159
160
161
162
  }
  
  /*
   * kill all the pages in the given range
   */
4343d0087   David Howells   afs: Get rid of t...
163
  static void afs_kill_pages(struct address_space *mapping,
e87b03f58   David Howells   afs: Prepare for ...
164
  			   loff_t start, loff_t len)
31143d5d5   David Howells   AFS: implement ba...
165
  {
4343d0087   David Howells   afs: Get rid of t...
166
  	struct afs_vnode *vnode = AFS_FS_I(mapping->host);
31143d5d5   David Howells   AFS: implement ba...
167
  	struct pagevec pv;
e87b03f58   David Howells   afs: Prepare for ...
168
  	unsigned int loop, psize;
31143d5d5   David Howells   AFS: implement ba...
169

e87b03f58   David Howells   afs: Prepare for ...
170
171
  	_enter("{%llx:%llu},%llx @%llx",
  	       vnode->fid.vid, vnode->fid.vnode, len, start);
31143d5d5   David Howells   AFS: implement ba...
172

866798201   Mel Gorman   mm, pagevec: remo...
173
  	pagevec_init(&pv);
31143d5d5   David Howells   AFS: implement ba...
174
175
  
  	do {
e87b03f58   David Howells   afs: Prepare for ...
176
  		_debug("kill %llx @%llx", len, start);
31143d5d5   David Howells   AFS: implement ba...
177

e87b03f58   David Howells   afs: Prepare for ...
178
179
180
181
  		pv.nr = find_get_pages_contig(mapping, start / PAGE_SIZE,
  					      PAGEVEC_SIZE, pv.pages);
  		if (pv.nr == 0)
  			break;
31143d5d5   David Howells   AFS: implement ba...
182

e87b03f58   David Howells   afs: Prepare for ...
183
  		for (loop = 0; loop < pv.nr; loop++) {
7286a35e8   David Howells   afs: Fix afs_kill...
184
  			struct page *page = pv.pages[loop];
e87b03f58   David Howells   afs: Prepare for ...
185
186
187
188
189
190
191
  
  			if (page->index * PAGE_SIZE >= start + len)
  				break;
  
  			psize = thp_size(page);
  			start += psize;
  			len -= psize;
7286a35e8   David Howells   afs: Fix afs_kill...
192
  			ClearPageUptodate(page);
4343d0087   David Howells   afs: Get rid of t...
193
  			end_page_writeback(page);
4343d0087   David Howells   afs: Get rid of t...
194
195
  			lock_page(page);
  			generic_error_remove_page(mapping, page);
21bd68f19   Marc Dionne   afs: Unlock pages...
196
  			unlock_page(page);
31143d5d5   David Howells   AFS: implement ba...
197
198
199
  		}
  
  		__pagevec_release(&pv);
e87b03f58   David Howells   afs: Prepare for ...
200
  	} while (len > 0);
31143d5d5   David Howells   AFS: implement ba...
201
202
203
204
205
  
  	_leave("");
  }
  
  /*
4343d0087   David Howells   afs: Get rid of t...
206
207
208
209
   * Redirty all the pages in a given range.
   */
  static void afs_redirty_pages(struct writeback_control *wbc,
  			      struct address_space *mapping,
e87b03f58   David Howells   afs: Prepare for ...
210
  			      loff_t start, loff_t len)
4343d0087   David Howells   afs: Get rid of t...
211
212
213
  {
  	struct afs_vnode *vnode = AFS_FS_I(mapping->host);
  	struct pagevec pv;
e87b03f58   David Howells   afs: Prepare for ...
214
  	unsigned int loop, psize;
4343d0087   David Howells   afs: Get rid of t...
215

e87b03f58   David Howells   afs: Prepare for ...
216
217
  	_enter("{%llx:%llu},%llx @%llx",
  	       vnode->fid.vid, vnode->fid.vnode, len, start);
4343d0087   David Howells   afs: Get rid of t...
218

487e2c9f4   Linus Torvalds   Merge tag 'afs-ne...
219
  	pagevec_init(&pv);
4343d0087   David Howells   afs: Get rid of t...
220
221
  
  	do {
e87b03f58   David Howells   afs: Prepare for ...
222
  		_debug("redirty %llx @%llx", len, start);
4343d0087   David Howells   afs: Get rid of t...
223

e87b03f58   David Howells   afs: Prepare for ...
224
225
226
227
  		pv.nr = find_get_pages_contig(mapping, start / PAGE_SIZE,
  					      PAGEVEC_SIZE, pv.pages);
  		if (pv.nr == 0)
  			break;
4343d0087   David Howells   afs: Get rid of t...
228

e87b03f58   David Howells   afs: Prepare for ...
229
  		for (loop = 0; loop < pv.nr; loop++) {
4343d0087   David Howells   afs: Get rid of t...
230
  			struct page *page = pv.pages[loop];
e87b03f58   David Howells   afs: Prepare for ...
231
232
233
234
235
236
  			if (page->index * PAGE_SIZE >= start + len)
  				break;
  
  			psize = thp_size(page);
  			start += psize;
  			len -= psize;
4343d0087   David Howells   afs: Get rid of t...
237
238
  			redirty_page_for_writepage(wbc, page);
  			end_page_writeback(page);
4343d0087   David Howells   afs: Get rid of t...
239
240
241
  		}
  
  		__pagevec_release(&pv);
e87b03f58   David Howells   afs: Prepare for ...
242
  	} while (len > 0);
31143d5d5   David Howells   AFS: implement ba...
243
244
245
246
247
  
  	_leave("");
  }
  
  /*
a58823ac4   David Howells   afs: Fix applicat...
248
249
   * completion of write to server
   */
e87b03f58   David Howells   afs: Prepare for ...
250
  static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsigned int len)
a58823ac4   David Howells   afs: Fix applicat...
251
  {
bd80d8a80   David Howells   afs: Use ITER_XAR...
252
253
  	struct address_space *mapping = vnode->vfs_inode.i_mapping;
  	struct page *page;
e87b03f58   David Howells   afs: Prepare for ...
254
  	pgoff_t end;
bd80d8a80   David Howells   afs: Use ITER_XAR...
255

e87b03f58   David Howells   afs: Prepare for ...
256
  	XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE);
a58823ac4   David Howells   afs: Fix applicat...
257

e87b03f58   David Howells   afs: Prepare for ...
258
259
  	_enter("{%llx:%llu},{%x @%llx}",
  	       vnode->fid.vid, vnode->fid.vnode, len, start);
a58823ac4   David Howells   afs: Fix applicat...
260

bd80d8a80   David Howells   afs: Use ITER_XAR...
261
  	rcu_read_lock();
a58823ac4   David Howells   afs: Fix applicat...
262

e87b03f58   David Howells   afs: Prepare for ...
263
264
265
266
267
268
  	end = (start + len - 1) / PAGE_SIZE;
  	xas_for_each(&xas, page, end) {
  		if (!PageWriteback(page)) {
  			kdebug("bad %x @%llx page %lx %lx", len, start, page->index, end);
  			ASSERT(PageWriteback(page));
  		}
a58823ac4   David Howells   afs: Fix applicat...
269

bd80d8a80   David Howells   afs: Use ITER_XAR...
270
  		trace_afs_page_dirty(vnode, tracepoint_string("clear"), page);
e87b03f58   David Howells   afs: Prepare for ...
271
  		detach_page_private(page);
bd80d8a80   David Howells   afs: Use ITER_XAR...
272
273
  		page_endio(page, true, 0);
  	}
a58823ac4   David Howells   afs: Fix applicat...
274

bd80d8a80   David Howells   afs: Use ITER_XAR...
275
  	rcu_read_unlock();
a58823ac4   David Howells   afs: Fix applicat...
276
277
278
279
280
281
  
  	afs_prune_wb_keys(vnode);
  	_leave("");
  }
  
  /*
e49c7b2f6   David Howells   afs: Build an abs...
282
283
284
   * Find a key to use for the writeback.  We cached the keys used to author the
   * writes on the vnode.  *_wbk will contain the last writeback key used or NULL
   * and we need to start from there if it's set.
d2ddc776a   David Howells   afs: Overhaul vol...
285
   */
e49c7b2f6   David Howells   afs: Build an abs...
286
287
  static int afs_get_writeback_key(struct afs_vnode *vnode,
  				 struct afs_wb_key **_wbk)
d2ddc776a   David Howells   afs: Overhaul vol...
288
  {
4343d0087   David Howells   afs: Get rid of t...
289
290
291
  	struct afs_wb_key *wbk = NULL;
  	struct list_head *p;
  	int ret = -ENOKEY, ret2;
d2ddc776a   David Howells   afs: Overhaul vol...
292

4343d0087   David Howells   afs: Get rid of t...
293
  	spin_lock(&vnode->wb_lock);
e49c7b2f6   David Howells   afs: Build an abs...
294
295
296
297
  	if (*_wbk)
  		p = (*_wbk)->vnode_link.next;
  	else
  		p = vnode->wb_keys.next;
4343d0087   David Howells   afs: Get rid of t...
298

4343d0087   David Howells   afs: Get rid of t...
299
300
301
302
  	while (p != &vnode->wb_keys) {
  		wbk = list_entry(p, struct afs_wb_key, vnode_link);
  		_debug("wbk %u", key_serial(wbk->key));
  		ret2 = key_validate(wbk->key);
e49c7b2f6   David Howells   afs: Build an abs...
303
304
305
306
307
308
309
  		if (ret2 == 0) {
  			refcount_inc(&wbk->usage);
  			_debug("USE WB KEY %u", key_serial(wbk->key));
  			break;
  		}
  
  		wbk = NULL;
4343d0087   David Howells   afs: Get rid of t...
310
311
312
313
314
315
  		if (ret == -ENOKEY)
  			ret = ret2;
  		p = p->next;
  	}
  
  	spin_unlock(&vnode->wb_lock);
e49c7b2f6   David Howells   afs: Build an abs...
316
317
318
319
320
  	if (*_wbk)
  		afs_put_wb_key(*_wbk);
  	*_wbk = wbk;
  	return 0;
  }
4343d0087   David Howells   afs: Get rid of t...
321

e49c7b2f6   David Howells   afs: Build an abs...
322
323
324
  static void afs_store_data_success(struct afs_operation *op)
  {
  	struct afs_vnode *vnode = op->file[0].vnode;
4343d0087   David Howells   afs: Get rid of t...
325

da8d07551   David Howells   afs: Concoct ctimes
326
  	op->ctime = op->file[0].scb.status.mtime_client;
e49c7b2f6   David Howells   afs: Build an abs...
327
328
  	afs_vnode_commit_status(op, &op->file[0]);
  	if (op->error == 0) {
d383e346f   David Howells   afs: Fix afs_laun...
329
  		if (!op->store.laundering)
e87b03f58   David Howells   afs: Prepare for ...
330
  			afs_pages_written_back(vnode, op->store.pos, op->store.size);
e49c7b2f6   David Howells   afs: Build an abs...
331
  		afs_stat_v(vnode, n_stores);
bd80d8a80   David Howells   afs: Use ITER_XAR...
332
  		atomic_long_add(op->store.size, &afs_v2net(vnode)->n_store_bytes);
e49c7b2f6   David Howells   afs: Build an abs...
333
334
  	}
  }
4343d0087   David Howells   afs: Get rid of t...
335

e49c7b2f6   David Howells   afs: Build an abs...
336
337
338
339
340
  static const struct afs_operation_ops afs_store_data_operation = {
  	.issue_afs_rpc	= afs_fs_store_data,
  	.issue_yfs_rpc	= yfs_fs_store_data,
  	.success	= afs_store_data_success,
  };
a58823ac4   David Howells   afs: Fix applicat...
341

e49c7b2f6   David Howells   afs: Build an abs...
342
343
344
  /*
   * write to a file
   */
e87b03f58   David Howells   afs: Prepare for ...
345
  static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t pos,
bd80d8a80   David Howells   afs: Use ITER_XAR...
346
  			  bool laundering)
e49c7b2f6   David Howells   afs: Build an abs...
347
  {
e49c7b2f6   David Howells   afs: Build an abs...
348
349
  	struct afs_operation *op;
  	struct afs_wb_key *wbk = NULL;
bd80d8a80   David Howells   afs: Use ITER_XAR...
350
351
  	loff_t size = iov_iter_count(iter), i_size;
  	int ret = -ENOKEY;
e49c7b2f6   David Howells   afs: Build an abs...
352

bd80d8a80   David Howells   afs: Use ITER_XAR...
353
  	_enter("%s{%llx:%llu.%u},%llx,%llx",
e49c7b2f6   David Howells   afs: Build an abs...
354
355
356
357
  	       vnode->volume->name,
  	       vnode->fid.vid,
  	       vnode->fid.vnode,
  	       vnode->fid.unique,
bd80d8a80   David Howells   afs: Use ITER_XAR...
358
  	       size, pos);
d2ddc776a   David Howells   afs: Overhaul vol...
359

e49c7b2f6   David Howells   afs: Build an abs...
360
361
362
363
  	ret = afs_get_writeback_key(vnode, &wbk);
  	if (ret) {
  		_leave(" = %d [no keys]", ret);
  		return ret;
d2ddc776a   David Howells   afs: Overhaul vol...
364
  	}
e49c7b2f6   David Howells   afs: Build an abs...
365
366
367
368
369
  	op = afs_alloc_operation(wbk->key, vnode->volume);
  	if (IS_ERR(op)) {
  		afs_put_wb_key(wbk);
  		return -ENOMEM;
  	}
bd80d8a80   David Howells   afs: Use ITER_XAR...
370
  	i_size = i_size_read(&vnode->vfs_inode);
e49c7b2f6   David Howells   afs: Build an abs...
371
372
  	afs_op_set_vnode(op, 0, vnode);
  	op->file[0].dv_delta = 1;
22650f148   David Howells   afs: Fix speculat...
373
  	op->file[0].modification = true;
bd80d8a80   David Howells   afs: Use ITER_XAR...
374
375
  	op->store.write_iter = iter;
  	op->store.pos = pos;
bd80d8a80   David Howells   afs: Use ITER_XAR...
376
377
  	op->store.size = size;
  	op->store.i_size = max(pos + size, i_size);
d383e346f   David Howells   afs: Fix afs_laun...
378
  	op->store.laundering = laundering;
b3597945c   David Howells   afs: Fix afs_stor...
379
  	op->mtime = vnode->vfs_inode.i_mtime;
811f04bac   David Howells   afs: Fix interrup...
380
  	op->flags |= AFS_OPERATION_UNINTR;
e49c7b2f6   David Howells   afs: Build an abs...
381
382
383
384
385
386
387
  	op->ops = &afs_store_data_operation;
  
  try_next_key:
  	afs_begin_vnode_operation(op);
  	afs_wait_for_operation(op);
  
  	switch (op->error) {
4343d0087   David Howells   afs: Get rid of t...
388
389
390
391
392
393
394
  	case -EACCES:
  	case -EPERM:
  	case -ENOKEY:
  	case -EKEYEXPIRED:
  	case -EKEYREJECTED:
  	case -EKEYREVOKED:
  		_debug("next");
e49c7b2f6   David Howells   afs: Build an abs...
395
396
397
398
399
400
401
402
  
  		ret = afs_get_writeback_key(vnode, &wbk);
  		if (ret == 0) {
  			key_put(op->key);
  			op->key = key_get(wbk->key);
  			goto try_next_key;
  		}
  		break;
4343d0087   David Howells   afs: Get rid of t...
403
404
405
  	}
  
  	afs_put_wb_key(wbk);
e49c7b2f6   David Howells   afs: Build an abs...
406
407
  	_leave(" = %d", op->error);
  	return afs_put_operation(op);
d2ddc776a   David Howells   afs: Overhaul vol...
408
409
410
  }
  
  /*
810caa3e6   David Howells   afs: Extract writ...
411
412
413
414
415
   * Extend the region to be written back to include subsequent contiguously
   * dirty pages if possible, but don't sleep while doing so.
   *
   * If this page holds new content, then we can include filler zeros in the
   * writeback.
31143d5d5   David Howells   AFS: implement ba...
416
   */
810caa3e6   David Howells   afs: Extract writ...
417
418
419
  static void afs_extend_writeback(struct address_space *mapping,
  				 struct afs_vnode *vnode,
  				 long *_count,
e87b03f58   David Howells   afs: Prepare for ...
420
421
422
423
  				 loff_t start,
  				 loff_t max_len,
  				 bool new_content,
  				 unsigned int *_len)
31143d5d5   David Howells   AFS: implement ba...
424
  {
e87b03f58   David Howells   afs: Prepare for ...
425
426
427
428
429
430
431
432
433
434
435
436
  	struct pagevec pvec;
  	struct page *page;
  	unsigned long priv;
  	unsigned int psize, filler = 0;
  	unsigned int f, t;
  	loff_t len = *_len;
  	pgoff_t index = (start + len) / PAGE_SIZE;
  	bool stop = true;
  	unsigned int i;
  
  	XA_STATE(xas, &mapping->i_pages, index);
  	pagevec_init(&pvec);
4343d0087   David Howells   afs: Get rid of t...
437

31143d5d5   David Howells   AFS: implement ba...
438
  	do {
e87b03f58   David Howells   afs: Prepare for ...
439
440
441
442
443
  		/* Firstly, we gather up a batch of contiguous dirty pages
  		 * under the RCU read lock - but we can't clear the dirty flags
  		 * there if any of those pages are mapped.
  		 */
  		rcu_read_lock();
31143d5d5   David Howells   AFS: implement ba...
444

e87b03f58   David Howells   afs: Prepare for ...
445
446
447
448
449
450
451
  		xas_for_each(&xas, page, ULONG_MAX) {
  			stop = true;
  			if (xas_retry(&xas, page))
  				continue;
  			if (xa_is_value(page))
  				break;
  			if (page->index != index)
5a8132761   David Howells   afs: Do better ac...
452
  				break;
e87b03f58   David Howells   afs: Prepare for ...
453
454
455
456
457
458
459
  
  			if (!page_cache_get_speculative(page)) {
  				xas_reset(&xas);
  				continue;
  			}
  
  			/* Has the page moved or been split? */
581b2027a   David Howells   afs: Fix page leak
460
461
  			if (unlikely(page != xas_reload(&xas))) {
  				put_page(page);
31143d5d5   David Howells   AFS: implement ba...
462
  				break;
581b2027a   David Howells   afs: Fix page leak
463
  			}
e87b03f58   David Howells   afs: Prepare for ...
464

581b2027a   David Howells   afs: Fix page leak
465
466
  			if (!trylock_page(page)) {
  				put_page(page);
31143d5d5   David Howells   AFS: implement ba...
467
  				break;
581b2027a   David Howells   afs: Fix page leak
468
  			}
4343d0087   David Howells   afs: Get rid of t...
469
  			if (!PageDirty(page) || PageWriteback(page)) {
31143d5d5   David Howells   AFS: implement ba...
470
  				unlock_page(page);
581b2027a   David Howells   afs: Fix page leak
471
  				put_page(page);
31143d5d5   David Howells   AFS: implement ba...
472
473
  				break;
  			}
4343d0087   David Howells   afs: Get rid of t...
474

e87b03f58   David Howells   afs: Prepare for ...
475
  			psize = thp_size(page);
4343d0087   David Howells   afs: Get rid of t...
476
  			priv = page_private(page);
67d78a6f6   David Howells   afs: Pass page in...
477
478
  			f = afs_page_dirty_from(page, priv);
  			t = afs_page_dirty_to(page, priv);
810caa3e6   David Howells   afs: Extract writ...
479
  			if (f != 0 && !new_content) {
31143d5d5   David Howells   AFS: implement ba...
480
  				unlock_page(page);
581b2027a   David Howells   afs: Fix page leak
481
  				put_page(page);
31143d5d5   David Howells   AFS: implement ba...
482
483
  				break;
  			}
4343d0087   David Howells   afs: Get rid of t...
484

e87b03f58   David Howells   afs: Prepare for ...
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
  			len += filler + t;
  			filler = psize - t;
  			if (len >= max_len || *_count <= 0)
  				stop = true;
  			else if (t == psize || new_content)
  				stop = false;
  
  			index += thp_nr_pages(page);
  			if (!pagevec_add(&pvec, page))
  				break;
  			if (stop)
  				break;
  		}
  
  		if (!stop)
  			xas_pause(&xas);
  		rcu_read_unlock();
  
  		/* Now, if we obtained any pages, we can shift them to being
  		 * writable and mark them for caching.
  		 */
  		if (!pagevec_count(&pvec))
  			break;
  
  		for (i = 0; i < pagevec_count(&pvec); i++) {
  			page = pvec.pages[i];
67d78a6f6   David Howells   afs: Pass page in...
511
  			trace_afs_page_dirty(vnode, tracepoint_string("store+"), page);
13524ab3c   David Howells   afs: Trace page d...
512

31143d5d5   David Howells   AFS: implement ba...
513
514
515
516
  			if (!clear_page_dirty_for_io(page))
  				BUG();
  			if (test_set_page_writeback(page))
  				BUG();
e87b03f58   David Howells   afs: Prepare for ...
517
518
  
  			*_count -= thp_nr_pages(page);
31143d5d5   David Howells   AFS: implement ba...
519
  			unlock_page(page);
31143d5d5   David Howells   AFS: implement ba...
520
  		}
e87b03f58   David Howells   afs: Prepare for ...
521
522
523
  		pagevec_release(&pvec);
  		cond_resched();
  	} while (!stop);
31143d5d5   David Howells   AFS: implement ba...
524

e87b03f58   David Howells   afs: Prepare for ...
525
  	*_len = len;
810caa3e6   David Howells   afs: Extract writ...
526
527
528
529
530
531
  }
  
  /*
   * Synchronously write back the locked page and any subsequent non-locked dirty
   * pages.
   */
e87b03f58   David Howells   afs: Prepare for ...
532
533
534
535
  static ssize_t afs_write_back_from_locked_page(struct address_space *mapping,
  					       struct writeback_control *wbc,
  					       struct page *page,
  					       loff_t start, loff_t end)
810caa3e6   David Howells   afs: Extract writ...
536
537
538
  {
  	struct afs_vnode *vnode = AFS_FS_I(mapping->host);
  	struct iov_iter iter;
e87b03f58   David Howells   afs: Prepare for ...
539
540
541
  	unsigned long priv;
  	unsigned int offset, to, len, max_len;
  	loff_t i_size = i_size_read(&vnode->vfs_inode);
810caa3e6   David Howells   afs: Extract writ...
542
  	bool new_content = test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
e87b03f58   David Howells   afs: Prepare for ...
543
  	long count = wbc->nr_to_write;
810caa3e6   David Howells   afs: Extract writ...
544
  	int ret;
e87b03f58   David Howells   afs: Prepare for ...
545
  	_enter(",%lx,%llx-%llx", page->index, start, end);
810caa3e6   David Howells   afs: Extract writ...
546

e87b03f58   David Howells   afs: Prepare for ...
547
  	if (test_set_page_writeback(page))
810caa3e6   David Howells   afs: Extract writ...
548
  		BUG();
e87b03f58   David Howells   afs: Prepare for ...
549
  	count -= thp_nr_pages(page);
810caa3e6   David Howells   afs: Extract writ...
550
551
552
553
554
  	/* Find all consecutive lockable dirty pages that have contiguous
  	 * written regions, stopping when we find a page that is not
  	 * immediately lockable, is not dirty or is missing, or we reach the
  	 * end of the range.
  	 */
e87b03f58   David Howells   afs: Prepare for ...
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
  	priv = page_private(page);
  	offset = afs_page_dirty_from(page, priv);
  	to = afs_page_dirty_to(page, priv);
  	trace_afs_page_dirty(vnode, tracepoint_string("store"), page);
  
  	len = to - offset;
  	start += offset;
  	if (start < i_size) {
  		/* Trim the write to the EOF; the extra data is ignored.  Also
  		 * put an upper limit on the size of a single storedata op.
  		 */
  		max_len = 65536 * 4096;
  		max_len = min_t(unsigned long long, max_len, end - start + 1);
  		max_len = min_t(unsigned long long, max_len, i_size - start);
  
  		if (len < max_len &&
  		    (to == thp_size(page) || new_content))
  			afs_extend_writeback(mapping, vnode, &count,
  					     start, max_len, new_content, &len);
  		len = min_t(loff_t, len, max_len);
  	}
810caa3e6   David Howells   afs: Extract writ...
576

4343d0087   David Howells   afs: Get rid of t...
577
578
579
580
  	/* We now have a contiguous set of dirty pages, each with writeback
  	 * set; the first page is still locked at this point, but all the rest
  	 * have been unlocked.
  	 */
e87b03f58   David Howells   afs: Prepare for ...
581
  	unlock_page(page);
793fe82ee   David Howells   afs: Fix truncati...
582

e87b03f58   David Howells   afs: Prepare for ...
583
584
  	if (start < i_size) {
  		_debug("write back %x @%llx [%llx]", len, start, i_size);
bd80d8a80   David Howells   afs: Use ITER_XAR...
585

e87b03f58   David Howells   afs: Prepare for ...
586
587
  		iov_iter_xarray(&iter, WRITE, &mapping->i_pages, start, len);
  		ret = afs_store_data(vnode, &iter, start, false);
bd80d8a80   David Howells   afs: Use ITER_XAR...
588
  	} else {
e87b03f58   David Howells   afs: Prepare for ...
589
  		_debug("write discard %x @%llx [%llx]", len, start, i_size);
bd80d8a80   David Howells   afs: Use ITER_XAR...
590
  		/* The dirty region was entirely beyond the EOF. */
e87b03f58   David Howells   afs: Prepare for ...
591
  		afs_pages_written_back(vnode, start, len);
bd80d8a80   David Howells   afs: Use ITER_XAR...
592
593
  		ret = 0;
  	}
31143d5d5   David Howells   AFS: implement ba...
594

4343d0087   David Howells   afs: Get rid of t...
595
596
  	switch (ret) {
  	case 0:
e87b03f58   David Howells   afs: Prepare for ...
597
598
  		wbc->nr_to_write = count;
  		ret = len;
4343d0087   David Howells   afs: Get rid of t...
599
600
601
602
603
  		break;
  
  	default:
  		pr_notice("kAFS: Unexpected error from FS.StoreData %d
  ", ret);
df561f668   Gustavo A. R. Silva   treewide: Use fal...
604
  		fallthrough;
4343d0087   David Howells   afs: Get rid of t...
605
606
607
608
609
610
  	case -EACCES:
  	case -EPERM:
  	case -ENOKEY:
  	case -EKEYEXPIRED:
  	case -EKEYREJECTED:
  	case -EKEYREVOKED:
dd0728692   David Howells   afs: Adjust ACK i...
611
  	case -ENETRESET:
e87b03f58   David Howells   afs: Prepare for ...
612
  		afs_redirty_pages(wbc, mapping, start, len);
4343d0087   David Howells   afs: Get rid of t...
613
614
615
616
617
  		mapping_set_error(mapping, ret);
  		break;
  
  	case -EDQUOT:
  	case -ENOSPC:
e87b03f58   David Howells   afs: Prepare for ...
618
  		afs_redirty_pages(wbc, mapping, start, len);
4343d0087   David Howells   afs: Get rid of t...
619
620
621
622
623
624
625
626
627
628
  		mapping_set_error(mapping, -ENOSPC);
  		break;
  
  	case -EROFS:
  	case -EIO:
  	case -EREMOTEIO:
  	case -EFBIG:
  	case -ENOENT:
  	case -ENOMEDIUM:
  	case -ENXIO:
f51375cd9   David Howells   afs: Add a couple...
629
  		trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail);
e87b03f58   David Howells   afs: Prepare for ...
630
  		afs_kill_pages(mapping, start, len);
4343d0087   David Howells   afs: Get rid of t...
631
632
  		mapping_set_error(mapping, ret);
  		break;
31143d5d5   David Howells   AFS: implement ba...
633
634
635
636
637
638
639
640
641
642
643
644
  	}
  
  	_leave(" = %d", ret);
  	return ret;
  }
  
  /*
   * write a page back to the server
   * - the caller locked the page for us
   */
  int afs_writepage(struct page *page, struct writeback_control *wbc)
  {
e87b03f58   David Howells   afs: Prepare for ...
645
646
  	ssize_t ret;
  	loff_t start;
31143d5d5   David Howells   AFS: implement ba...
647
648
  
  	_enter("{%lx},", page->index);
e87b03f58   David Howells   afs: Prepare for ...
649
  	start = page->index * PAGE_SIZE;
4343d0087   David Howells   afs: Get rid of t...
650
  	ret = afs_write_back_from_locked_page(page->mapping, wbc, page,
e87b03f58   David Howells   afs: Prepare for ...
651
  					      start, LLONG_MAX - start);
31143d5d5   David Howells   AFS: implement ba...
652
  	if (ret < 0) {
e87b03f58   David Howells   afs: Prepare for ...
653
654
  		_leave(" = %zd", ret);
  		return ret;
31143d5d5   David Howells   AFS: implement ba...
655
  	}
31143d5d5   David Howells   AFS: implement ba...
656
657
658
659
660
661
662
  	_leave(" = 0");
  	return 0;
  }
  
  /*
   * write a region of pages back to the server
   */
c1206a2c6   Adrian Bunk   fs/afs/: possible...
663
664
  static int afs_writepages_region(struct address_space *mapping,
  				 struct writeback_control *wbc,
e87b03f58   David Howells   afs: Prepare for ...
665
  				 loff_t start, loff_t end, loff_t *_next)
31143d5d5   David Howells   AFS: implement ba...
666
  {
31143d5d5   David Howells   AFS: implement ba...
667
  	struct page *page;
e87b03f58   David Howells   afs: Prepare for ...
668
669
  	ssize_t ret;
  	int n;
31143d5d5   David Howells   AFS: implement ba...
670

e87b03f58   David Howells   afs: Prepare for ...
671
  	_enter("%llx,%llx,", start, end);
31143d5d5   David Howells   AFS: implement ba...
672
673
  
  	do {
e87b03f58   David Howells   afs: Prepare for ...
674
675
676
677
  		pgoff_t index = start / PAGE_SIZE;
  
  		n = find_get_pages_range_tag(mapping, &index, end / PAGE_SIZE,
  					     PAGECACHE_TAG_DIRTY, 1, &page);
31143d5d5   David Howells   AFS: implement ba...
678
679
  		if (!n)
  			break;
e87b03f58   David Howells   afs: Prepare for ...
680
  		start = (loff_t)page->index * PAGE_SIZE; /* May regress with THPs */
31143d5d5   David Howells   AFS: implement ba...
681
  		_debug("wback %lx", page->index);
e87b03f58   David Howells   afs: Prepare for ...
682
  		/* At this point we hold neither the i_pages lock nor the
b93b01631   Matthew Wilcox   page cache: use x...
683
684
685
  		 * page lock: the page may be truncated or invalidated
  		 * (changing page->mapping to NULL), or even swizzled
  		 * back from swapper_space to tmpfs file mapping
31143d5d5   David Howells   AFS: implement ba...
686
  		 */
e87b03f58   David Howells   afs: Prepare for ...
687
688
689
690
691
692
693
694
695
696
697
  		if (wbc->sync_mode != WB_SYNC_NONE) {
  			ret = lock_page_killable(page);
  			if (ret < 0) {
  				put_page(page);
  				return ret;
  			}
  		} else {
  			if (!trylock_page(page)) {
  				put_page(page);
  				return 0;
  			}
4343d0087   David Howells   afs: Get rid of t...
698
  		}
31143d5d5   David Howells   AFS: implement ba...
699

c5051c7bc   David Howells   afs: Don't wait f...
700
  		if (page->mapping != mapping || !PageDirty(page)) {
e87b03f58   David Howells   afs: Prepare for ...
701
  			start += thp_size(page);
31143d5d5   David Howells   AFS: implement ba...
702
  			unlock_page(page);
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
703
  			put_page(page);
31143d5d5   David Howells   AFS: implement ba...
704
705
  			continue;
  		}
c5051c7bc   David Howells   afs: Don't wait f...
706
  		if (PageWriteback(page)) {
31143d5d5   David Howells   AFS: implement ba...
707
  			unlock_page(page);
c5051c7bc   David Howells   afs: Don't wait f...
708
709
  			if (wbc->sync_mode != WB_SYNC_NONE)
  				wait_on_page_writeback(page);
29c8bbbd6   David Howells   afs: Fix missing ...
710
  			put_page(page);
31143d5d5   David Howells   AFS: implement ba...
711
712
  			continue;
  		}
65a151094   David Howells   afs: ->writepage(...
713
714
  		if (!clear_page_dirty_for_io(page))
  			BUG();
e87b03f58   David Howells   afs: Prepare for ...
715
  		ret = afs_write_back_from_locked_page(mapping, wbc, page, start, end);
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
716
  		put_page(page);
31143d5d5   David Howells   AFS: implement ba...
717
  		if (ret < 0) {
e87b03f58   David Howells   afs: Prepare for ...
718
  			_leave(" = %zd", ret);
31143d5d5   David Howells   AFS: implement ba...
719
720
  			return ret;
  		}
dc2557308   Marc Dionne   afs: Fix partial ...
721
  		start += ret;
31143d5d5   David Howells   AFS: implement ba...
722

31143d5d5   David Howells   AFS: implement ba...
723
  		cond_resched();
e87b03f58   David Howells   afs: Prepare for ...
724
  	} while (wbc->nr_to_write > 0);
31143d5d5   David Howells   AFS: implement ba...
725

e87b03f58   David Howells   afs: Prepare for ...
726
727
  	*_next = start;
  	_leave(" = 0 [%llx]", *_next);
31143d5d5   David Howells   AFS: implement ba...
728
729
730
731
732
733
734
735
736
  	return 0;
  }
  
  /*
   * write some of the pending data back to the server
   */
  int afs_writepages(struct address_space *mapping,
  		   struct writeback_control *wbc)
  {
ec0fa0b65   David Howells   afs: Fix deadlock...
737
  	struct afs_vnode *vnode = AFS_FS_I(mapping->host);
e87b03f58   David Howells   afs: Prepare for ...
738
  	loff_t start, next;
31143d5d5   David Howells   AFS: implement ba...
739
740
741
  	int ret;
  
  	_enter("");
ec0fa0b65   David Howells   afs: Fix deadlock...
742
743
744
745
746
747
748
749
  	/* We have to be careful as we can end up racing with setattr()
  	 * truncating the pagecache since the caller doesn't take a lock here
  	 * to prevent it.
  	 */
  	if (wbc->sync_mode == WB_SYNC_ALL)
  		down_read(&vnode->validate_lock);
  	else if (!down_read_trylock(&vnode->validate_lock))
  		return 0;
31143d5d5   David Howells   AFS: implement ba...
750
  	if (wbc->range_cyclic) {
e87b03f58   David Howells   afs: Prepare for ...
751
752
  		start = mapping->writeback_index * PAGE_SIZE;
  		ret = afs_writepages_region(mapping, wbc, start, LLONG_MAX, &next);
afe694986   Tom Rix   afs: check functi...
753
754
755
756
757
758
759
760
761
762
  		if (ret == 0) {
  			mapping->writeback_index = next / PAGE_SIZE;
  			if (start > 0 && wbc->nr_to_write > 0) {
  				ret = afs_writepages_region(mapping, wbc, 0,
  							    start, &next);
  				if (ret == 0)
  					mapping->writeback_index =
  						next / PAGE_SIZE;
  			}
  		}
31143d5d5   David Howells   AFS: implement ba...
763
  	} else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
e87b03f58   David Howells   afs: Prepare for ...
764
  		ret = afs_writepages_region(mapping, wbc, 0, LLONG_MAX, &next);
afe694986   Tom Rix   afs: check functi...
765
  		if (wbc->nr_to_write > 0 && ret == 0)
5a972474c   David Howells   afs: Fix setting ...
766
  			mapping->writeback_index = next / PAGE_SIZE;
31143d5d5   David Howells   AFS: implement ba...
767
  	} else {
e87b03f58   David Howells   afs: Prepare for ...
768
769
  		ret = afs_writepages_region(mapping, wbc,
  					    wbc->range_start, wbc->range_end, &next);
31143d5d5   David Howells   AFS: implement ba...
770
  	}
ec0fa0b65   David Howells   afs: Fix deadlock...
771
  	up_read(&vnode->validate_lock);
31143d5d5   David Howells   AFS: implement ba...
772
773
774
775
776
  	_leave(" = %d", ret);
  	return ret;
  }
  
  /*
31143d5d5   David Howells   AFS: implement ba...
777
778
   * write to an AFS file
   */
50b5551d1   Al Viro   afs: switch to ->...
779
  ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
31143d5d5   David Howells   AFS: implement ba...
780
  {
496ad9aa8   Al Viro   new helper: file_...
781
  	struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
3978d8165   David Howells   afs: Add missing ...
782
  	struct afs_file *af = iocb->ki_filp->private_data;
31143d5d5   David Howells   AFS: implement ba...
783
  	ssize_t result;
50b5551d1   Al Viro   afs: switch to ->...
784
  	size_t count = iov_iter_count(from);
31143d5d5   David Howells   AFS: implement ba...
785

3b6492df4   David Howells   afs: Increase to ...
786
  	_enter("{%llx:%llu},{%zu},",
50b5551d1   Al Viro   afs: switch to ->...
787
  	       vnode->fid.vid, vnode->fid.vnode, count);
31143d5d5   David Howells   AFS: implement ba...
788
789
790
791
792
793
794
795
796
797
  
  	if (IS_SWAPFILE(&vnode->vfs_inode)) {
  		printk(KERN_INFO
  		       "AFS: Attempt to write to active swap file!
  ");
  		return -EBUSY;
  	}
  
  	if (!count)
  		return 0;
3978d8165   David Howells   afs: Add missing ...
798
799
800
  	result = afs_validate(vnode, af->key);
  	if (result < 0)
  		return result;
50b5551d1   Al Viro   afs: switch to ->...
801
  	result = generic_file_write_iter(iocb, from);
31143d5d5   David Howells   AFS: implement ba...
802

31143d5d5   David Howells   AFS: implement ba...
803
804
805
806
807
  	_leave(" = %zd", result);
  	return result;
  }
  
  /*
31143d5d5   David Howells   AFS: implement ba...
808
809
810
811
   * flush any dirty pages for this process, and check for write errors.
   * - the return status from this call provides a reliable indication of
   *   whether any write errors occurred for this process.
   */
02c24a821   Josef Bacik   fs: push i_mutex ...
812
  int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
31143d5d5   David Howells   AFS: implement ba...
813
  {
3978d8165   David Howells   afs: Add missing ...
814
815
816
  	struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
  	struct afs_file *af = file->private_data;
  	int ret;
31143d5d5   David Howells   AFS: implement ba...
817

3b6492df4   David Howells   afs: Increase to ...
818
  	_enter("{%llx:%llu},{n=%pD},%d",
3c981bfc5   Al Viro   afs_fsync: don't ...
819
  	       vnode->fid.vid, vnode->fid.vnode, file,
31143d5d5   David Howells   AFS: implement ba...
820
  	       datasync);
3978d8165   David Howells   afs: Add missing ...
821
822
823
  	ret = afs_validate(vnode, af->key);
  	if (ret < 0)
  		return ret;
4343d0087   David Howells   afs: Get rid of t...
824
  	return file_write_and_wait_range(file, start, end);
31143d5d5   David Howells   AFS: implement ba...
825
  }
9b3f26c91   David Howells   FS-Cache: Make kA...
826
827
828
829
830
  
  /*
   * notification that a previously read-only page is about to become writable
   * - if it returns an error, the caller will deliver a bus error signal
   */
0722f1862   Souptick Joarder   fs/afs: use new r...
831
  vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
9b3f26c91   David Howells   FS-Cache: Make kA...
832
  {
e87b03f58   David Howells   afs: Prepare for ...
833
  	struct page *page = thp_head(vmf->page);
1cf7a1518   David Howells   afs: Implement sh...
834
835
836
  	struct file *file = vmf->vma->vm_file;
  	struct inode *inode = file_inode(file);
  	struct afs_vnode *vnode = AFS_FS_I(inode);
3978d8165   David Howells   afs: Add missing ...
837
  	struct afs_file *af = file->private_data;
1cf7a1518   David Howells   afs: Implement sh...
838
  	unsigned long priv;
9620ad86d   Matthew Wilcox (Oracle)   afs: Re-enable fr...
839
  	vm_fault_t ret = VM_FAULT_RETRY;
9b3f26c91   David Howells   FS-Cache: Make kA...
840

e87b03f58   David Howells   afs: Prepare for ...
841
  	_enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, page->index);
9b3f26c91   David Howells   FS-Cache: Make kA...
842

3978d8165   David Howells   afs: Add missing ...
843
  	afs_validate(vnode, af->key);
1cf7a1518   David Howells   afs: Implement sh...
844
  	sb_start_pagefault(inode->i_sb);
9b3f26c91   David Howells   FS-Cache: Make kA...
845

1cf7a1518   David Howells   afs: Implement sh...
846
847
848
  	/* Wait for the page to be written to the cache before we allow it to
  	 * be modified.  We then assume the entire page will need writing back.
  	 */
630f5dda8   David Howells   afs: Wait on PG_f...
849
  #ifdef CONFIG_AFS_FSCACHE
e87b03f58   David Howells   afs: Prepare for ...
850
  	if (PageFsCache(page) &&
5cbf03985   David Howells   afs: Use new netf...
851
  	    wait_on_page_fscache_killable(page) < 0)
9620ad86d   Matthew Wilcox (Oracle)   afs: Re-enable fr...
852
  		goto out;
630f5dda8   David Howells   afs: Wait on PG_f...
853
  #endif
9b3f26c91   David Howells   FS-Cache: Make kA...
854

e87b03f58   David Howells   afs: Prepare for ...
855
  	if (wait_on_page_writeback_killable(page))
9620ad86d   Matthew Wilcox (Oracle)   afs: Re-enable fr...
856
  		goto out;
1cf7a1518   David Howells   afs: Implement sh...
857

e87b03f58   David Howells   afs: Prepare for ...
858
  	if (lock_page_killable(page) < 0)
9620ad86d   Matthew Wilcox (Oracle)   afs: Re-enable fr...
859
  		goto out;
1cf7a1518   David Howells   afs: Implement sh...
860
861
862
863
864
  
  	/* We mustn't change page->private until writeback is complete as that
  	 * details the portion of the page we need to write back and we might
  	 * need to redirty the page if there's a problem.
  	 */
5cbf03985   David Howells   afs: Use new netf...
865
866
  	if (wait_on_page_writeback_killable(page) < 0) {
  		unlock_page(page);
9620ad86d   Matthew Wilcox (Oracle)   afs: Re-enable fr...
867
  		goto out;
5cbf03985   David Howells   afs: Use new netf...
868
  	}
1cf7a1518   David Howells   afs: Implement sh...
869

e87b03f58   David Howells   afs: Prepare for ...
870
  	priv = afs_page_dirty(page, 0, thp_size(page));
f86726a69   David Howells   afs: Fix afs_inva...
871
  	priv = afs_page_dirty_mmapped(priv);
e87b03f58   David Howells   afs: Prepare for ...
872
873
874
875
876
877
878
  	if (PagePrivate(page)) {
  		set_page_private(page, priv);
  		trace_afs_page_dirty(vnode, tracepoint_string("mkwrite+"), page);
  	} else {
  		attach_page_private(page, (void *)priv);
  		trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"), page);
  	}
bb4134892   David Howells   afs: Fix non-sett...
879
  	file_update_time(file);
1cf7a1518   David Howells   afs: Implement sh...
880

9620ad86d   Matthew Wilcox (Oracle)   afs: Re-enable fr...
881
882
  	ret = VM_FAULT_LOCKED;
  out:
1cf7a1518   David Howells   afs: Implement sh...
883
  	sb_end_pagefault(inode->i_sb);
9620ad86d   Matthew Wilcox (Oracle)   afs: Re-enable fr...
884
  	return ret;
9b3f26c91   David Howells   FS-Cache: Make kA...
885
  }
4343d0087   David Howells   afs: Get rid of t...
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
  
  /*
   * Prune the keys cached for writeback.  The caller must hold vnode->wb_lock.
   */
  void afs_prune_wb_keys(struct afs_vnode *vnode)
  {
  	LIST_HEAD(graveyard);
  	struct afs_wb_key *wbk, *tmp;
  
  	/* Discard unused keys */
  	spin_lock(&vnode->wb_lock);
  
  	if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
  	    !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) {
  		list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
  			if (refcount_read(&wbk->usage) == 1)
  				list_move(&wbk->vnode_link, &graveyard);
  		}
  	}
  
  	spin_unlock(&vnode->wb_lock);
  
  	while (!list_empty(&graveyard)) {
  		wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link);
  		list_del(&wbk->vnode_link);
  		afs_put_wb_key(wbk);
  	}
  }
  
  /*
   * Clean up a page during invalidation.
   */
  int afs_launder_page(struct page *page)
  {
  	struct address_space *mapping = page->mapping;
  	struct afs_vnode *vnode = AFS_FS_I(mapping->host);
bd80d8a80   David Howells   afs: Use ITER_XAR...
922
923
  	struct iov_iter iter;
  	struct bio_vec bv[1];
4343d0087   David Howells   afs: Get rid of t...
924
925
926
927
928
929
930
931
932
  	unsigned long priv;
  	unsigned int f, t;
  	int ret = 0;
  
  	_enter("{%lx}", page->index);
  
  	priv = page_private(page);
  	if (clear_page_dirty_for_io(page)) {
  		f = 0;
e87b03f58   David Howells   afs: Prepare for ...
933
  		t = thp_size(page);
4343d0087   David Howells   afs: Get rid of t...
934
  		if (PagePrivate(page)) {
67d78a6f6   David Howells   afs: Pass page in...
935
936
  			f = afs_page_dirty_from(page, priv);
  			t = afs_page_dirty_to(page, priv);
4343d0087   David Howells   afs: Get rid of t...
937
  		}
bd80d8a80   David Howells   afs: Use ITER_XAR...
938
939
940
941
  		bv[0].bv_page = page;
  		bv[0].bv_offset = f;
  		bv[0].bv_len = t - f;
  		iov_iter_bvec(&iter, WRITE, bv, 1, bv[0].bv_len);
67d78a6f6   David Howells   afs: Pass page in...
942
  		trace_afs_page_dirty(vnode, tracepoint_string("launder"), page);
5c0522484   David Howells   afs: Fix afs_laun...
943
  		ret = afs_store_data(vnode, &iter, page_offset(page) + f, true);
4343d0087   David Howells   afs: Get rid of t...
944
  	}
67d78a6f6   David Howells   afs: Pass page in...
945
  	trace_afs_page_dirty(vnode, tracepoint_string("laundered"), page);
e87b03f58   David Howells   afs: Prepare for ...
946
  	detach_page_private(page);
630f5dda8   David Howells   afs: Wait on PG_f...
947
  	wait_on_page_fscache(page);
4343d0087   David Howells   afs: Get rid of t...
948
  	return ret;
9b3f26c91   David Howells   FS-Cache: Make kA...
949
  }