Blame view

fs/nfsd/nfs4layouts.c 18.6 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
9cf514ccf   Christoph Hellwig   nfsd: implement p...
2
3
4
  /*
   * Copyright (c) 2014 Christoph Hellwig.
   */
f99d4fbda   Christoph Hellwig   nfsd: add SCSI la...
5
  #include <linux/blkdev.h>
c5c707f96   Christoph Hellwig   nfsd: implement p...
6
7
  #include <linux/kmod.h>
  #include <linux/file.h>
9cf514ccf   Christoph Hellwig   nfsd: implement p...
8
9
  #include <linux/jhash.h>
  #include <linux/sched.h>
c5c707f96   Christoph Hellwig   nfsd: implement p...
10
  #include <linux/sunrpc/addr.h>
9cf514ccf   Christoph Hellwig   nfsd: implement p...
11
12
13
  
  #include "pnfs.h"
  #include "netns.h"
31ef83dc0   Christoph Hellwig   nfsd: add trace e...
14
  #include "trace.h"
9cf514ccf   Christoph Hellwig   nfsd: implement p...
15
16
17
18
19
20
21
22
23
24
25
  
  #define NFSDDBG_FACILITY                NFSDDBG_PNFS
  
  struct nfs4_layout {
  	struct list_head		lo_perstate;
  	struct nfs4_layout_stateid	*lo_state;
  	struct nfsd4_layout_seg		lo_seg;
  };
  
  static struct kmem_cache *nfs4_layout_cache;
  static struct kmem_cache *nfs4_layout_stateid_cache;
c4cb89746   Julia Lawall   nfsd: constify nf...
26
  static const struct nfsd4_callback_ops nfsd4_cb_layout_ops;
c5c707f96   Christoph Hellwig   nfsd: implement p...
27
  static const struct lock_manager_operations nfsd4_layouts_lm_ops;
9cf514ccf   Christoph Hellwig   nfsd: implement p...
28
  const struct nfsd4_layout_ops *nfsd4_layout_ops[LAYOUT_TYPE_MAX] =  {
9b9960a0c   Tom Haynes   nfsd: Add a super...
29
30
31
  #ifdef CONFIG_NFSD_FLEXFILELAYOUT
  	[LAYOUT_FLEX_FILES]	= &ff_layout_ops,
  #endif
81c393290   Christoph Hellwig   nfsd: add a new c...
32
  #ifdef CONFIG_NFSD_BLOCKLAYOUT
8650b8a05   Christoph Hellwig   nfsd: pNFS block ...
33
  	[LAYOUT_BLOCK_VOLUME]	= &bl_layout_ops,
81c393290   Christoph Hellwig   nfsd: add a new c...
34
  #endif
f99d4fbda   Christoph Hellwig   nfsd: add SCSI la...
35
36
37
  #ifdef CONFIG_NFSD_SCSILAYOUT
  	[LAYOUT_SCSI]		= &scsi_layout_ops,
  #endif
9cf514ccf   Christoph Hellwig   nfsd: implement p...
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
  };
  
  /* pNFS device ID to export fsid mapping */
  #define DEVID_HASH_BITS	8
  #define DEVID_HASH_SIZE	(1 << DEVID_HASH_BITS)
  #define DEVID_HASH_MASK	(DEVID_HASH_SIZE - 1)
  static u64 nfsd_devid_seq = 1;
  static struct list_head nfsd_devid_hash[DEVID_HASH_SIZE];
  static DEFINE_SPINLOCK(nfsd_devid_lock);
  
  static inline u32 devid_hashfn(u64 idx)
  {
  	return jhash_2words(idx, idx >> 32, 0) & DEVID_HASH_MASK;
  }
  
  static void
  nfsd4_alloc_devid_map(const struct svc_fh *fhp)
  {
  	const struct knfsd_fh *fh = &fhp->fh_handle;
  	size_t fsid_len = key_len(fh->fh_fsid_type);
  	struct nfsd4_deviceid_map *map, *old;
  	int i;
  
  	map = kzalloc(sizeof(*map) + fsid_len, GFP_KERNEL);
  	if (!map)
  		return;
  
  	map->fsid_type = fh->fh_fsid_type;
  	memcpy(&map->fsid, fh->fh_fsid, fsid_len);
  
  	spin_lock(&nfsd_devid_lock);
  	if (fhp->fh_export->ex_devid_map)
  		goto out_unlock;
  
  	for (i = 0; i < DEVID_HASH_SIZE; i++) {
  		list_for_each_entry(old, &nfsd_devid_hash[i], hash) {
  			if (old->fsid_type != fh->fh_fsid_type)
  				continue;
  			if (memcmp(old->fsid, fh->fh_fsid,
  					key_len(old->fsid_type)))
  				continue;
  
  			fhp->fh_export->ex_devid_map = old;
  			goto out_unlock;
  		}
  	}
  
  	map->idx = nfsd_devid_seq++;
  	list_add_tail_rcu(&map->hash, &nfsd_devid_hash[devid_hashfn(map->idx)]);
  	fhp->fh_export->ex_devid_map = map;
  	map = NULL;
  
  out_unlock:
  	spin_unlock(&nfsd_devid_lock);
  	kfree(map);
  }
  
  struct nfsd4_deviceid_map *
  nfsd4_find_devid_map(int idx)
  {
  	struct nfsd4_deviceid_map *map, *ret = NULL;
  
  	rcu_read_lock();
  	list_for_each_entry_rcu(map, &nfsd_devid_hash[devid_hashfn(idx)], hash)
  		if (map->idx == idx)
  			ret = map;
  	rcu_read_unlock();
  
  	return ret;
  }
  
  int
  nfsd4_set_deviceid(struct nfsd4_deviceid *id, const struct svc_fh *fhp,
  		u32 device_generation)
  {
  	if (!fhp->fh_export->ex_devid_map) {
  		nfsd4_alloc_devid_map(fhp);
  		if (!fhp->fh_export->ex_devid_map)
  			return -ENOMEM;
  	}
  
  	id->fsid_idx = fhp->fh_export->ex_devid_map->idx;
  	id->generation = device_generation;
  	id->pad = 0;
  	return 0;
  }
  
  void nfsd4_setup_layout_type(struct svc_export *exp)
  {
9b9960a0c   Tom Haynes   nfsd: Add a super...
127
  #if defined(CONFIG_NFSD_BLOCKLAYOUT) || defined(CONFIG_NFSD_SCSILAYOUT)
8650b8a05   Christoph Hellwig   nfsd: pNFS block ...
128
  	struct super_block *sb = exp->ex_path.mnt->mnt_sb;
9b9960a0c   Tom Haynes   nfsd: Add a super...
129
  #endif
8650b8a05   Christoph Hellwig   nfsd: pNFS block ...
130

f3f03330d   Christoph Hellwig   nfsd: require an ...
131
  	if (!(exp->ex_flags & NFSEXP_PNFS))
9cf514ccf   Christoph Hellwig   nfsd: implement p...
132
  		return;
8650b8a05   Christoph Hellwig   nfsd: pNFS block ...
133

9b9960a0c   Tom Haynes   nfsd: Add a super...
134
  #ifdef CONFIG_NFSD_FLEXFILELAYOUT
8a4c39268   Jeff Layton   nfsd: allow nfsd ...
135
  	exp->ex_layout_types |= 1 << LAYOUT_FLEX_FILES;
9b9960a0c   Tom Haynes   nfsd: Add a super...
136
  #endif
81c393290   Christoph Hellwig   nfsd: add a new c...
137
  #ifdef CONFIG_NFSD_BLOCKLAYOUT
8650b8a05   Christoph Hellwig   nfsd: pNFS block ...
138
139
140
  	if (sb->s_export_op->get_uuid &&
  	    sb->s_export_op->map_blocks &&
  	    sb->s_export_op->commit_blocks)
8a4c39268   Jeff Layton   nfsd: allow nfsd ...
141
  		exp->ex_layout_types |= 1 << LAYOUT_BLOCK_VOLUME;
81c393290   Christoph Hellwig   nfsd: add a new c...
142
  #endif
f99d4fbda   Christoph Hellwig   nfsd: add SCSI la...
143
  #ifdef CONFIG_NFSD_SCSILAYOUT
f99d4fbda   Christoph Hellwig   nfsd: add SCSI la...
144
145
  	if (sb->s_export_op->map_blocks &&
  	    sb->s_export_op->commit_blocks &&
8163496e7   Benjamin Coddington   nfsd: don't adver...
146
147
  	    sb->s_bdev && sb->s_bdev->bd_disk->fops->pr_ops &&
  		blk_queue_scsi_passthrough(sb->s_bdev->bd_disk->queue))
8a4c39268   Jeff Layton   nfsd: allow nfsd ...
148
  		exp->ex_layout_types |= 1 << LAYOUT_SCSI;
f99d4fbda   Christoph Hellwig   nfsd: add SCSI la...
149
  #endif
9cf514ccf   Christoph Hellwig   nfsd: implement p...
150
151
152
153
154
155
156
157
  }
  
  static void
  nfsd4_free_layout_stateid(struct nfs4_stid *stid)
  {
  	struct nfs4_layout_stateid *ls = layoutstateid(stid);
  	struct nfs4_client *clp = ls->ls_stid.sc_client;
  	struct nfs4_file *fp = ls->ls_stid.sc_file;
f394b62b7   Chuck Lever   nfsd: Add "nfsd_"...
158
  	trace_nfsd_layoutstate_free(&ls->ls_stid.sc_stateid);
31ef83dc0   Christoph Hellwig   nfsd: add trace e...
159

9cf514ccf   Christoph Hellwig   nfsd: implement p...
160
161
162
163
164
165
166
  	spin_lock(&clp->cl_lock);
  	list_del_init(&ls->ls_perclnt);
  	spin_unlock(&clp->cl_lock);
  
  	spin_lock(&fp->fi_lock);
  	list_del_init(&ls->ls_perfile);
  	spin_unlock(&fp->fi_lock);
1983a66f5   Jeff Layton   nfsd: don't set a...
167
  	if (!nfsd4_layout_ops[ls->ls_layout_type]->disable_recalls)
eb82dd393   Jeff Layton   nfsd: convert fi_...
168
169
  		vfs_setlease(ls->ls_file->nf_file, F_UNLCK, NULL, (void **)&ls);
  	nfsd_file_put(ls->ls_file);
c5c707f96   Christoph Hellwig   nfsd: implement p...
170
171
172
  
  	if (ls->ls_recalled)
  		atomic_dec(&ls->ls_stid.sc_file->fi_lo_recalls);
9cf514ccf   Christoph Hellwig   nfsd: implement p...
173
174
  	kmem_cache_free(nfs4_layout_stateid_cache, ls);
  }
c5c707f96   Christoph Hellwig   nfsd: implement p...
175
176
177
178
179
  static int
  nfsd4_layout_setlease(struct nfs4_layout_stateid *ls)
  {
  	struct file_lock *fl;
  	int status;
1983a66f5   Jeff Layton   nfsd: don't set a...
180
181
  	if (nfsd4_layout_ops[ls->ls_layout_type]->disable_recalls)
  		return 0;
c5c707f96   Christoph Hellwig   nfsd: implement p...
182
183
184
185
186
187
188
189
190
191
  	fl = locks_alloc_lock();
  	if (!fl)
  		return -ENOMEM;
  	locks_init_lock(fl);
  	fl->fl_lmops = &nfsd4_layouts_lm_ops;
  	fl->fl_flags = FL_LAYOUT;
  	fl->fl_type = F_RDLCK;
  	fl->fl_end = OFFSET_MAX;
  	fl->fl_owner = ls;
  	fl->fl_pid = current->tgid;
eb82dd393   Jeff Layton   nfsd: convert fi_...
192
  	fl->fl_file = ls->ls_file->nf_file;
c5c707f96   Christoph Hellwig   nfsd: implement p...
193
194
195
196
197
198
199
200
201
  
  	status = vfs_setlease(fl->fl_file, fl->fl_type, &fl, NULL);
  	if (status) {
  		locks_free_lock(fl);
  		return status;
  	}
  	BUG_ON(fl != NULL);
  	return 0;
  }
9cf514ccf   Christoph Hellwig   nfsd: implement p...
202
203
204
205
206
207
208
209
  static struct nfs4_layout_stateid *
  nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate,
  		struct nfs4_stid *parent, u32 layout_type)
  {
  	struct nfs4_client *clp = cstate->clp;
  	struct nfs4_file *fp = parent->sc_file;
  	struct nfs4_layout_stateid *ls;
  	struct nfs4_stid *stp;
d19fb70dd   Kinglong Mee   NFSD: Fix a null ...
210
211
  	stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache,
  					nfsd4_free_layout_stateid);
9cf514ccf   Christoph Hellwig   nfsd: implement p...
212
213
  	if (!stp)
  		return NULL;
d19fb70dd   Kinglong Mee   NFSD: Fix a null ...
214

9cf514ccf   Christoph Hellwig   nfsd: implement p...
215
216
217
218
219
220
221
222
  	get_nfs4_file(fp);
  	stp->sc_file = fp;
  
  	ls = layoutstateid(stp);
  	INIT_LIST_HEAD(&ls->ls_perclnt);
  	INIT_LIST_HEAD(&ls->ls_perfile);
  	spin_lock_init(&ls->ls_lock);
  	INIT_LIST_HEAD(&ls->ls_layouts);
cc8a55320   Jeff Layton   nfsd: serialize l...
223
  	mutex_init(&ls->ls_mutex);
9cf514ccf   Christoph Hellwig   nfsd: implement p...
224
  	ls->ls_layout_type = layout_type;
c5c707f96   Christoph Hellwig   nfsd: implement p...
225
226
227
228
  	nfsd4_init_cb(&ls->ls_recall, clp, &nfsd4_cb_layout_ops,
  			NFSPROC4_CLNT_CB_LAYOUT);
  
  	if (parent->sc_type == NFS4_DELEG_STID)
eb82dd393   Jeff Layton   nfsd: convert fi_...
229
  		ls->ls_file = nfsd_file_get(fp->fi_deleg_file);
c5c707f96   Christoph Hellwig   nfsd: implement p...
230
231
232
233
234
  	else
  		ls->ls_file = find_any_file(fp);
  	BUG_ON(!ls->ls_file);
  
  	if (nfsd4_layout_setlease(ls)) {
eb82dd393   Jeff Layton   nfsd: convert fi_...
235
  		nfsd_file_put(ls->ls_file);
c5c707f96   Christoph Hellwig   nfsd: implement p...
236
237
238
239
  		put_nfs4_file(fp);
  		kmem_cache_free(nfs4_layout_stateid_cache, ls);
  		return NULL;
  	}
9cf514ccf   Christoph Hellwig   nfsd: implement p...
240
241
242
243
244
245
246
247
248
  
  	spin_lock(&clp->cl_lock);
  	stp->sc_type = NFS4_LAYOUT_STID;
  	list_add(&ls->ls_perclnt, &clp->cl_lo_states);
  	spin_unlock(&clp->cl_lock);
  
  	spin_lock(&fp->fi_lock);
  	list_add(&ls->ls_perfile, &fp->fi_lo_states);
  	spin_unlock(&fp->fi_lock);
f394b62b7   Chuck Lever   nfsd: Add "nfsd_"...
249
  	trace_nfsd_layoutstate_alloc(&ls->ls_stid.sc_stateid);
9cf514ccf   Christoph Hellwig   nfsd: implement p...
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
  	return ls;
  }
  
  __be32
  nfsd4_preprocess_layout_stateid(struct svc_rqst *rqstp,
  		struct nfsd4_compound_state *cstate, stateid_t *stateid,
  		bool create, u32 layout_type, struct nfs4_layout_stateid **lsp)
  {
  	struct nfs4_layout_stateid *ls;
  	struct nfs4_stid *stid;
  	unsigned char typemask = NFS4_LAYOUT_STID;
  	__be32 status;
  
  	if (create)
  		typemask |= (NFS4_OPEN_STID | NFS4_LOCK_STID | NFS4_DELEG_STID);
  
  	status = nfsd4_lookup_stateid(cstate, stateid, typemask, &stid,
  			net_generic(SVC_NET(rqstp), nfsd_net_id));
  	if (status)
  		goto out;
  
  	if (!fh_match(&cstate->current_fh.fh_handle,
  		      &stid->sc_file->fi_fhandle)) {
  		status = nfserr_bad_stateid;
  		goto out_put_stid;
  	}
  
  	if (stid->sc_type != NFS4_LAYOUT_STID) {
  		ls = nfsd4_alloc_layout_stateid(cstate, stid, layout_type);
  		nfs4_put_stid(stid);
  
  		status = nfserr_jukebox;
  		if (!ls)
  			goto out;
cc8a55320   Jeff Layton   nfsd: serialize l...
284
  		mutex_lock(&ls->ls_mutex);
9cf514ccf   Christoph Hellwig   nfsd: implement p...
285
286
287
288
  	} else {
  		ls = container_of(stid, struct nfs4_layout_stateid, ls_stid);
  
  		status = nfserr_bad_stateid;
cc8a55320   Jeff Layton   nfsd: serialize l...
289
  		mutex_lock(&ls->ls_mutex);
14b7f4a1e   Jeff Layton   nfsd: handle seqi...
290
  		if (nfsd4_stateid_generation_after(stateid, &stid->sc_stateid))
cc8a55320   Jeff Layton   nfsd: serialize l...
291
  			goto out_unlock_stid;
9cf514ccf   Christoph Hellwig   nfsd: implement p...
292
  		if (layout_type != ls->ls_layout_type)
cc8a55320   Jeff Layton   nfsd: serialize l...
293
  			goto out_unlock_stid;
9cf514ccf   Christoph Hellwig   nfsd: implement p...
294
295
296
297
  	}
  
  	*lsp = ls;
  	return 0;
cc8a55320   Jeff Layton   nfsd: serialize l...
298
299
  out_unlock_stid:
  	mutex_unlock(&ls->ls_mutex);
9cf514ccf   Christoph Hellwig   nfsd: implement p...
300
301
302
303
304
  out_put_stid:
  	nfs4_put_stid(stid);
  out:
  	return status;
  }
c5c707f96   Christoph Hellwig   nfsd: implement p...
305
306
307
308
309
310
311
312
313
314
315
  static void
  nfsd4_recall_file_layout(struct nfs4_layout_stateid *ls)
  {
  	spin_lock(&ls->ls_lock);
  	if (ls->ls_recalled)
  		goto out_unlock;
  
  	ls->ls_recalled = true;
  	atomic_inc(&ls->ls_stid.sc_file->fi_lo_recalls);
  	if (list_empty(&ls->ls_layouts))
  		goto out_unlock;
f394b62b7   Chuck Lever   nfsd: Add "nfsd_"...
316
  	trace_nfsd_layout_recall(&ls->ls_stid.sc_stateid);
31ef83dc0   Christoph Hellwig   nfsd: add trace e...
317

a15dfcd52   Elena Reshetova   fs, nfsd: convert...
318
  	refcount_inc(&ls->ls_stid.sc_count);
c5c707f96   Christoph Hellwig   nfsd: implement p...
319
320
321
322
323
  	nfsd4_run_cb(&ls->ls_recall);
  
  out_unlock:
  	spin_unlock(&ls->ls_lock);
  }
9cf514ccf   Christoph Hellwig   nfsd: implement p...
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
  static inline u64
  layout_end(struct nfsd4_layout_seg *seg)
  {
  	u64 end = seg->offset + seg->length;
  	return end >= seg->offset ? end : NFS4_MAX_UINT64;
  }
  
  static void
  layout_update_len(struct nfsd4_layout_seg *lo, u64 end)
  {
  	if (end == NFS4_MAX_UINT64)
  		lo->length = NFS4_MAX_UINT64;
  	else
  		lo->length = end - lo->offset;
  }
  
  static bool
  layouts_overlapping(struct nfs4_layout *lo, struct nfsd4_layout_seg *s)
  {
  	if (s->iomode != IOMODE_ANY && s->iomode != lo->lo_seg.iomode)
  		return false;
  	if (layout_end(&lo->lo_seg) <= s->offset)
  		return false;
  	if (layout_end(s) <= lo->lo_seg.offset)
  		return false;
  	return true;
  }
  
  static bool
  layouts_try_merge(struct nfsd4_layout_seg *lo, struct nfsd4_layout_seg *new)
  {
  	if (lo->iomode != new->iomode)
  		return false;
  	if (layout_end(new) < lo->offset)
  		return false;
  	if (layout_end(lo) < new->offset)
  		return false;
  
  	lo->offset = min(lo->offset, new->offset);
  	layout_update_len(lo, max(layout_end(lo), layout_end(new)));
  	return true;
  }
c5c707f96   Christoph Hellwig   nfsd: implement p...
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
  static __be32
  nfsd4_recall_conflict(struct nfs4_layout_stateid *ls)
  {
  	struct nfs4_file *fp = ls->ls_stid.sc_file;
  	struct nfs4_layout_stateid *l, *n;
  	__be32 nfserr = nfs_ok;
  
  	assert_spin_locked(&fp->fi_lock);
  
  	list_for_each_entry_safe(l, n, &fp->fi_lo_states, ls_perfile) {
  		if (l != ls) {
  			nfsd4_recall_file_layout(l);
  			nfserr = nfserr_recallconflict;
  		}
  	}
  
  	return nfserr;
  }
9cf514ccf   Christoph Hellwig   nfsd: implement p...
384
385
386
387
  __be32
  nfsd4_insert_layout(struct nfsd4_layoutget *lgp, struct nfs4_layout_stateid *ls)
  {
  	struct nfsd4_layout_seg *seg = &lgp->lg_seg;
c5c707f96   Christoph Hellwig   nfsd: implement p...
388
  	struct nfs4_file *fp = ls->ls_stid.sc_file;
9cf514ccf   Christoph Hellwig   nfsd: implement p...
389
  	struct nfs4_layout *lp, *new = NULL;
c5c707f96   Christoph Hellwig   nfsd: implement p...
390
  	__be32 nfserr;
9cf514ccf   Christoph Hellwig   nfsd: implement p...
391

c5c707f96   Christoph Hellwig   nfsd: implement p...
392
393
394
395
  	spin_lock(&fp->fi_lock);
  	nfserr = nfsd4_recall_conflict(ls);
  	if (nfserr)
  		goto out;
9cf514ccf   Christoph Hellwig   nfsd: implement p...
396
397
398
399
400
401
  	spin_lock(&ls->ls_lock);
  	list_for_each_entry(lp, &ls->ls_layouts, lo_perstate) {
  		if (layouts_try_merge(&lp->lo_seg, seg))
  			goto done;
  	}
  	spin_unlock(&ls->ls_lock);
c5c707f96   Christoph Hellwig   nfsd: implement p...
402
  	spin_unlock(&fp->fi_lock);
9cf514ccf   Christoph Hellwig   nfsd: implement p...
403
404
405
406
407
408
  
  	new = kmem_cache_alloc(nfs4_layout_cache, GFP_KERNEL);
  	if (!new)
  		return nfserr_jukebox;
  	memcpy(&new->lo_seg, seg, sizeof(lp->lo_seg));
  	new->lo_state = ls;
c5c707f96   Christoph Hellwig   nfsd: implement p...
409
410
411
412
  	spin_lock(&fp->fi_lock);
  	nfserr = nfsd4_recall_conflict(ls);
  	if (nfserr)
  		goto out;
9cf514ccf   Christoph Hellwig   nfsd: implement p...
413
414
415
416
417
  	spin_lock(&ls->ls_lock);
  	list_for_each_entry(lp, &ls->ls_layouts, lo_perstate) {
  		if (layouts_try_merge(&lp->lo_seg, seg))
  			goto done;
  	}
a15dfcd52   Elena Reshetova   fs, nfsd: convert...
418
  	refcount_inc(&ls->ls_stid.sc_count);
9cf514ccf   Christoph Hellwig   nfsd: implement p...
419
420
421
  	list_add_tail(&new->lo_perstate, &ls->ls_layouts);
  	new = NULL;
  done:
9767feb2c   Jeff Layton   nfsd: ensure that...
422
  	nfs4_inc_and_copy_stateid(&lgp->lg_sid, &ls->ls_stid);
9cf514ccf   Christoph Hellwig   nfsd: implement p...
423
  	spin_unlock(&ls->ls_lock);
c5c707f96   Christoph Hellwig   nfsd: implement p...
424
425
  out:
  	spin_unlock(&fp->fi_lock);
9cf514ccf   Christoph Hellwig   nfsd: implement p...
426
427
  	if (new)
  		kmem_cache_free(nfs4_layout_cache, new);
c5c707f96   Christoph Hellwig   nfsd: implement p...
428
  	return nfserr;
9cf514ccf   Christoph Hellwig   nfsd: implement p...
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
  }
  
  static void
  nfsd4_free_layouts(struct list_head *reaplist)
  {
  	while (!list_empty(reaplist)) {
  		struct nfs4_layout *lp = list_first_entry(reaplist,
  				struct nfs4_layout, lo_perstate);
  
  		list_del(&lp->lo_perstate);
  		nfs4_put_stid(&lp->lo_state->ls_stid);
  		kmem_cache_free(nfs4_layout_cache, lp);
  	}
  }
  
  static void
  nfsd4_return_file_layout(struct nfs4_layout *lp, struct nfsd4_layout_seg *seg,
  		struct list_head *reaplist)
  {
  	struct nfsd4_layout_seg *lo = &lp->lo_seg;
  	u64 end = layout_end(lo);
  
  	if (seg->offset <= lo->offset) {
  		if (layout_end(seg) >= end) {
  			list_move_tail(&lp->lo_perstate, reaplist);
  			return;
  		}
7890203da   Kinglong Mee   NFSD: Fix bad upd...
456
  		lo->offset = layout_end(seg);
9cf514ccf   Christoph Hellwig   nfsd: implement p...
457
458
459
460
461
462
463
  	} else {
  		/* retain the whole layout segment on a split. */
  		if (layout_end(seg) < end) {
  			dprintk("%s: split not supported
  ", __func__);
  			return;
  		}
7890203da   Kinglong Mee   NFSD: Fix bad upd...
464
  		end = seg->offset;
9cf514ccf   Christoph Hellwig   nfsd: implement p...
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
  	}
  
  	layout_update_len(lo, end);
  }
  
  __be32
  nfsd4_return_file_layouts(struct svc_rqst *rqstp,
  		struct nfsd4_compound_state *cstate,
  		struct nfsd4_layoutreturn *lrp)
  {
  	struct nfs4_layout_stateid *ls;
  	struct nfs4_layout *lp, *n;
  	LIST_HEAD(reaplist);
  	__be32 nfserr;
  	int found = 0;
  
  	nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate, &lrp->lr_sid,
  						false, lrp->lr_layout_type,
  						&ls);
31ef83dc0   Christoph Hellwig   nfsd: add trace e...
484
  	if (nfserr) {
f394b62b7   Chuck Lever   nfsd: Add "nfsd_"...
485
  		trace_nfsd_layout_return_lookup_fail(&lrp->lr_sid);
9cf514ccf   Christoph Hellwig   nfsd: implement p...
486
  		return nfserr;
31ef83dc0   Christoph Hellwig   nfsd: add trace e...
487
  	}
9cf514ccf   Christoph Hellwig   nfsd: implement p...
488
489
490
491
492
493
494
495
496
  
  	spin_lock(&ls->ls_lock);
  	list_for_each_entry_safe(lp, n, &ls->ls_layouts, lo_perstate) {
  		if (layouts_overlapping(lp, &lrp->lr_seg)) {
  			nfsd4_return_file_layout(lp, &lrp->lr_seg, &reaplist);
  			found++;
  		}
  	}
  	if (!list_empty(&ls->ls_layouts)) {
9767feb2c   Jeff Layton   nfsd: ensure that...
497
498
  		if (found)
  			nfs4_inc_and_copy_stateid(&lrp->lr_sid, &ls->ls_stid);
9cf514ccf   Christoph Hellwig   nfsd: implement p...
499
500
  		lrp->lrs_present = 1;
  	} else {
f394b62b7   Chuck Lever   nfsd: Add "nfsd_"...
501
  		trace_nfsd_layoutstate_unhash(&ls->ls_stid.sc_stateid);
9cf514ccf   Christoph Hellwig   nfsd: implement p...
502
503
504
505
  		nfs4_unhash_stid(&ls->ls_stid);
  		lrp->lrs_present = 0;
  	}
  	spin_unlock(&ls->ls_lock);
cc8a55320   Jeff Layton   nfsd: serialize l...
506
  	mutex_unlock(&ls->ls_mutex);
9cf514ccf   Christoph Hellwig   nfsd: implement p...
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
  	nfs4_put_stid(&ls->ls_stid);
  	nfsd4_free_layouts(&reaplist);
  	return nfs_ok;
  }
  
  __be32
  nfsd4_return_client_layouts(struct svc_rqst *rqstp,
  		struct nfsd4_compound_state *cstate,
  		struct nfsd4_layoutreturn *lrp)
  {
  	struct nfs4_layout_stateid *ls, *n;
  	struct nfs4_client *clp = cstate->clp;
  	struct nfs4_layout *lp, *t;
  	LIST_HEAD(reaplist);
  
  	lrp->lrs_present = 0;
  
  	spin_lock(&clp->cl_lock);
  	list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt) {
6f8f28ec5   Kinglong Mee   NFSD: Check layou...
526
527
  		if (ls->ls_layout_type != lrp->lr_layout_type)
  			continue;
9cf514ccf   Christoph Hellwig   nfsd: implement p...
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
  		if (lrp->lr_return_type == RETURN_FSID &&
  		    !fh_fsid_match(&ls->ls_stid.sc_file->fi_fhandle,
  				   &cstate->current_fh.fh_handle))
  			continue;
  
  		spin_lock(&ls->ls_lock);
  		list_for_each_entry_safe(lp, t, &ls->ls_layouts, lo_perstate) {
  			if (lrp->lr_seg.iomode == IOMODE_ANY ||
  			    lrp->lr_seg.iomode == lp->lo_seg.iomode)
  				list_move_tail(&lp->lo_perstate, &reaplist);
  		}
  		spin_unlock(&ls->ls_lock);
  	}
  	spin_unlock(&clp->cl_lock);
  
  	nfsd4_free_layouts(&reaplist);
  	return 0;
  }
  
  static void
  nfsd4_return_all_layouts(struct nfs4_layout_stateid *ls,
  		struct list_head *reaplist)
  {
  	spin_lock(&ls->ls_lock);
  	list_splice_init(&ls->ls_layouts, reaplist);
  	spin_unlock(&ls->ls_lock);
  }
  
  void
  nfsd4_return_all_client_layouts(struct nfs4_client *clp)
  {
  	struct nfs4_layout_stateid *ls, *n;
  	LIST_HEAD(reaplist);
  
  	spin_lock(&clp->cl_lock);
  	list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt)
  		nfsd4_return_all_layouts(ls, &reaplist);
  	spin_unlock(&clp->cl_lock);
  
  	nfsd4_free_layouts(&reaplist);
  }
  
  void
  nfsd4_return_all_file_layouts(struct nfs4_client *clp, struct nfs4_file *fp)
  {
  	struct nfs4_layout_stateid *ls, *n;
  	LIST_HEAD(reaplist);
  
  	spin_lock(&fp->fi_lock);
  	list_for_each_entry_safe(ls, n, &fp->fi_lo_states, ls_perfile) {
  		if (ls->ls_stid.sc_client == clp)
  			nfsd4_return_all_layouts(ls, &reaplist);
  	}
  	spin_unlock(&fp->fi_lock);
  
  	nfsd4_free_layouts(&reaplist);
  }
c5c707f96   Christoph Hellwig   nfsd: implement p...
585
586
587
588
589
  static void
  nfsd4_cb_layout_fail(struct nfs4_layout_stateid *ls)
  {
  	struct nfs4_client *clp = ls->ls_stid.sc_client;
  	char addr_str[INET6_ADDRSTRLEN];
377e7a27c   Greg Kroah-Hartman   Make static userm...
590
  	static char const nfsd_recall_failed[] = "/sbin/nfsd-recall-failed";
c5c707f96   Christoph Hellwig   nfsd: implement p...
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
  	static char *envp[] = {
  		"HOME=/",
  		"TERM=linux",
  		"PATH=/sbin:/usr/sbin:/bin:/usr/bin",
  		NULL
  	};
  	char *argv[8];
  	int error;
  
  	rpc_ntop((struct sockaddr *)&clp->cl_addr, addr_str, sizeof(addr_str));
  
  	printk(KERN_WARNING
  		"nfsd: client %s failed to respond to layout recall. "
  		"  Fencing..
  ", addr_str);
377e7a27c   Greg Kroah-Hartman   Make static userm...
606
  	argv[0] = (char *)nfsd_recall_failed;
c5c707f96   Christoph Hellwig   nfsd: implement p...
607
  	argv[1] = addr_str;
eb82dd393   Jeff Layton   nfsd: convert fi_...
608
  	argv[2] = ls->ls_file->nf_file->f_path.mnt->mnt_sb->s_id;
c5c707f96   Christoph Hellwig   nfsd: implement p...
609
  	argv[3] = NULL;
377e7a27c   Greg Kroah-Hartman   Make static userm...
610
611
  	error = call_usermodehelper(nfsd_recall_failed, argv, envp,
  				    UMH_WAIT_PROC);
c5c707f96   Christoph Hellwig   nfsd: implement p...
612
613
614
615
616
617
  	if (error) {
  		printk(KERN_ERR "nfsd: fence failed for client %s: %d!
  ",
  			addr_str, error);
  	}
  }
cc8a55320   Jeff Layton   nfsd: serialize l...
618
619
620
621
622
623
624
  static void
  nfsd4_cb_layout_prepare(struct nfsd4_callback *cb)
  {
  	struct nfs4_layout_stateid *ls =
  		container_of(cb, struct nfs4_layout_stateid, ls_recall);
  
  	mutex_lock(&ls->ls_mutex);
9767feb2c   Jeff Layton   nfsd: ensure that...
625
  	nfs4_inc_and_copy_stateid(&ls->ls_recall_sid, &ls->ls_stid);
be20aa00c   Jeff Layton   nfsd: don't hold ...
626
  	mutex_unlock(&ls->ls_mutex);
cc8a55320   Jeff Layton   nfsd: serialize l...
627
  }
c5c707f96   Christoph Hellwig   nfsd: implement p...
628
629
630
631
632
  static int
  nfsd4_cb_layout_done(struct nfsd4_callback *cb, struct rpc_task *task)
  {
  	struct nfs4_layout_stateid *ls =
  		container_of(cb, struct nfs4_layout_stateid, ls_recall);
6b9b21073   Jeff Layton   nfsd: give up on ...
633
634
  	struct nfsd_net *nn;
  	ktime_t now, cutoff;
f99d4fbda   Christoph Hellwig   nfsd: add SCSI la...
635
  	const struct nfsd4_layout_ops *ops;
c5c707f96   Christoph Hellwig   nfsd: implement p...
636

6b9b21073   Jeff Layton   nfsd: give up on ...
637

c5c707f96   Christoph Hellwig   nfsd: implement p...
638
639
  	switch (task->tk_status) {
  	case 0:
6b9b21073   Jeff Layton   nfsd: give up on ...
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
  	case -NFS4ERR_DELAY:
  		/*
  		 * Anything left? If not, then call it done. Note that we don't
  		 * take the spinlock since this is an optimization and nothing
  		 * should get added until the cb counter goes to zero.
  		 */
  		if (list_empty(&ls->ls_layouts))
  			return 1;
  
  		/* Poll the client until it's done with the layout */
  		now = ktime_get();
  		nn = net_generic(ls->ls_stid.sc_client->net, nfsd_net_id);
  
  		/* Client gets 2 lease periods to return it */
  		cutoff = ktime_add_ns(task->tk_start,
2561c92b1   Arnd Bergmann   nfsd: fix delay t...
655
  					 (u64)nn->nfsd4_lease * NSEC_PER_SEC * 2);
6b9b21073   Jeff Layton   nfsd: give up on ...
656
657
658
659
660
  
  		if (ktime_before(now, cutoff)) {
  			rpc_delay(task, HZ/100); /* 10 mili-seconds */
  			return 0;
  		}
df561f668   Gustavo A. R. Silva   treewide: Use fal...
661
  		fallthrough;
c5c707f96   Christoph Hellwig   nfsd: implement p...
662
663
664
665
  	default:
  		/*
  		 * Unknown error or non-responding client, we'll need to fence.
  		 */
f394b62b7   Chuck Lever   nfsd: Add "nfsd_"...
666
  		trace_nfsd_layout_recall_fail(&ls->ls_stid.sc_stateid);
f99d4fbda   Christoph Hellwig   nfsd: add SCSI la...
667
668
669
670
671
672
  
  		ops = nfsd4_layout_ops[ls->ls_layout_type];
  		if (ops->fence_client)
  			ops->fence_client(ls);
  		else
  			nfsd4_cb_layout_fail(ls);
1c73b9d24   Scott Mayhew   nfsd: update call...
673
  		return 1;
851238a22   Jeff Layton   nfsd: fix error h...
674
  	case -NFS4ERR_NOMATCHING_LAYOUT:
f394b62b7   Chuck Lever   nfsd: Add "nfsd_"...
675
  		trace_nfsd_layout_recall_done(&ls->ls_stid.sc_stateid);
851238a22   Jeff Layton   nfsd: fix error h...
676
677
  		task->tk_status = 0;
  		return 1;
c5c707f96   Christoph Hellwig   nfsd: implement p...
678
679
680
681
682
683
684
685
686
  	}
  }
  
  static void
  nfsd4_cb_layout_release(struct nfsd4_callback *cb)
  {
  	struct nfs4_layout_stateid *ls =
  		container_of(cb, struct nfs4_layout_stateid, ls_recall);
  	LIST_HEAD(reaplist);
f394b62b7   Chuck Lever   nfsd: Add "nfsd_"...
687
  	trace_nfsd_layout_recall_release(&ls->ls_stid.sc_stateid);
31ef83dc0   Christoph Hellwig   nfsd: add trace e...
688

c5c707f96   Christoph Hellwig   nfsd: implement p...
689
690
691
692
  	nfsd4_return_all_layouts(ls, &reaplist);
  	nfsd4_free_layouts(&reaplist);
  	nfs4_put_stid(&ls->ls_stid);
  }
c4cb89746   Julia Lawall   nfsd: constify nf...
693
  static const struct nfsd4_callback_ops nfsd4_cb_layout_ops = {
cc8a55320   Jeff Layton   nfsd: serialize l...
694
  	.prepare	= nfsd4_cb_layout_prepare,
c5c707f96   Christoph Hellwig   nfsd: implement p...
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
  	.done		= nfsd4_cb_layout_done,
  	.release	= nfsd4_cb_layout_release,
  };
  
  static bool
  nfsd4_layout_lm_break(struct file_lock *fl)
  {
  	/*
  	 * We don't want the locks code to timeout the lease for us;
  	 * we'll remove it ourself if a layout isn't returned
  	 * in time:
  	 */
  	fl->fl_break_time = 0;
  	nfsd4_recall_file_layout(fl->fl_owner);
  	return false;
  }
  
  static int
  nfsd4_layout_lm_change(struct file_lock *onlist, int arg,
  		struct list_head *dispose)
  {
  	BUG_ON(!(arg & F_UNLCK));
  	return lease_modify(onlist, arg, dispose);
  }
  
  static const struct lock_manager_operations nfsd4_layouts_lm_ops = {
  	.lm_break	= nfsd4_layout_lm_break,
  	.lm_change	= nfsd4_layout_lm_change,
  };
9cf514ccf   Christoph Hellwig   nfsd: implement p...
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
  int
  nfsd4_init_pnfs(void)
  {
  	int i;
  
  	for (i = 0; i < DEVID_HASH_SIZE; i++)
  		INIT_LIST_HEAD(&nfsd_devid_hash[i]);
  
  	nfs4_layout_cache = kmem_cache_create("nfs4_layout",
  			sizeof(struct nfs4_layout), 0, 0, NULL);
  	if (!nfs4_layout_cache)
  		return -ENOMEM;
  
  	nfs4_layout_stateid_cache = kmem_cache_create("nfs4_layout_stateid",
  			sizeof(struct nfs4_layout_stateid), 0, 0, NULL);
  	if (!nfs4_layout_stateid_cache) {
  		kmem_cache_destroy(nfs4_layout_cache);
  		return -ENOMEM;
  	}
  	return 0;
  }
  
  void
  nfsd4_exit_pnfs(void)
  {
  	int i;
  
  	kmem_cache_destroy(nfs4_layout_cache);
  	kmem_cache_destroy(nfs4_layout_stateid_cache);
  
  	for (i = 0; i < DEVID_HASH_SIZE; i++) {
  		struct nfsd4_deviceid_map *map, *n;
  
  		list_for_each_entry_safe(map, n, &nfsd_devid_hash[i], hash)
  			kfree(map);
  	}
  }