Blame view

net/sunrpc/svc_xprt.c 38.7 KB
1d8206b97   Tom Tucker   svc: Add an svc t...
1
2
3
4
5
6
7
8
  /*
   * linux/net/sunrpc/svc_xprt.c
   *
   * Author: Tom Tucker <tom@opengridcomputing.com>
   */
  
  #include <linux/sched.h>
  #include <linux/errno.h>
1d8206b97   Tom Tucker   svc: Add an svc t...
9
  #include <linux/freezer.h>
7086721f9   Jeff Layton   SUNRPC: have svc_...
10
  #include <linux/kthread.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
11
  #include <linux/slab.h>
1d8206b97   Tom Tucker   svc: Add an svc t...
12
  #include <net/sock.h>
c3d4879e0   Scott Mayhew   sunrpc: Add a fun...
13
  #include <linux/sunrpc/addr.h>
1d8206b97   Tom Tucker   svc: Add an svc t...
14
15
  #include <linux/sunrpc/stats.h>
  #include <linux/sunrpc/svc_xprt.h>
dcf1a3573   H Hartley Sweeten   net/sunrpc/svc_xp...
16
  #include <linux/sunrpc/svcsock.h>
99de8ea96   J. Bruce Fields   rpc: keep backcha...
17
  #include <linux/sunrpc/xprt.h>
3a9a231d9   Paul Gortmaker   net: Fix files ex...
18
  #include <linux/module.h>
c3d4879e0   Scott Mayhew   sunrpc: Add a fun...
19
  #include <linux/netdevice.h>
860a0d9e5   Jeff Layton   sunrpc: add some ...
20
  #include <trace/events/sunrpc.h>
1d8206b97   Tom Tucker   svc: Add an svc t...
21
22
  
  #define RPCDBG_FACILITY	RPCDBG_SVCXPRT
ff3ac5c3d   Trond Myklebust   SUNRPC: Add a ser...
23
24
  static unsigned int svc_rpc_per_connection_limit __read_mostly;
  module_param(svc_rpc_per_connection_limit, uint, 0644);
0f0257eaa   Tom Tucker   svc: Move the xpr...
25
26
27
28
  static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt);
  static int svc_deferred_recv(struct svc_rqst *rqstp);
  static struct cache_deferred_req *svc_defer(struct cache_req *req);
  static void svc_age_temp_xprts(unsigned long closure);
7710ec36b   J. Bruce Fields   svcrpc: make svc_...
29
  static void svc_delete_xprt(struct svc_xprt *xprt);
0f0257eaa   Tom Tucker   svc: Move the xpr...
30
31
32
33
34
35
36
  
  /* apparently the "standard" is that clients close
   * idle connections after 5 minutes, servers after
   * 6 minutes
   *   http://www.connectathon.org/talks96/nfstcp.pdf
   */
  static int svc_conn_age_period = 6*60;
1d8206b97   Tom Tucker   svc: Add an svc t...
37
38
39
  /* List of registered transport classes */
  static DEFINE_SPINLOCK(svc_xprt_class_lock);
  static LIST_HEAD(svc_xprt_class_list);
0f0257eaa   Tom Tucker   svc: Move the xpr...
40
41
42
43
44
  /* SMP locking strategy:
   *
   *	svc_pool->sp_lock protects most of the fields of that pool.
   *	svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt.
   *	when both need to be taken (rare), svc_serv->sv_lock is first.
3c5199143   Jeff Layton   sunrpc/lockd: fix...
45
   *	The "service mutex" protects svc_serv->sv_nrthread.
0f0257eaa   Tom Tucker   svc: Move the xpr...
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
   *	svc_sock->sk_lock protects the svc_sock->sk_deferred list
   *             and the ->sk_info_authunix cache.
   *
   *	The XPT_BUSY bit in xprt->xpt_flags prevents a transport being
   *	enqueued multiply. During normal transport processing this bit
   *	is set by svc_xprt_enqueue and cleared by svc_xprt_received.
   *	Providers should not manipulate this bit directly.
   *
   *	Some flags can be set to certain values at any time
   *	providing that certain rules are followed:
   *
   *	XPT_CONN, XPT_DATA:
   *		- Can be set or cleared at any time.
   *		- After a set, svc_xprt_enqueue must be called to enqueue
   *		  the transport for processing.
   *		- After a clear, the transport must be read/accepted.
   *		  If this succeeds, it must be set again.
   *	XPT_CLOSE:
   *		- Can set at any time. It is never cleared.
   *      XPT_DEAD:
   *		- Can only be set while XPT_BUSY is held which ensures
   *		  that no other thread will be using the transport or will
   *		  try to set XPT_DEAD.
   */
1d8206b97   Tom Tucker   svc: Add an svc t...
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
  int svc_reg_xprt_class(struct svc_xprt_class *xcl)
  {
  	struct svc_xprt_class *cl;
  	int res = -EEXIST;
  
  	dprintk("svc: Adding svc transport class '%s'
  ", xcl->xcl_name);
  
  	INIT_LIST_HEAD(&xcl->xcl_list);
  	spin_lock(&svc_xprt_class_lock);
  	/* Make sure there isn't already a class with the same name */
  	list_for_each_entry(cl, &svc_xprt_class_list, xcl_list) {
  		if (strcmp(xcl->xcl_name, cl->xcl_name) == 0)
  			goto out;
  	}
  	list_add_tail(&xcl->xcl_list, &svc_xprt_class_list);
  	res = 0;
  out:
  	spin_unlock(&svc_xprt_class_lock);
  	return res;
  }
  EXPORT_SYMBOL_GPL(svc_reg_xprt_class);
  
  void svc_unreg_xprt_class(struct svc_xprt_class *xcl)
  {
  	dprintk("svc: Removing svc transport class '%s'
  ", xcl->xcl_name);
  	spin_lock(&svc_xprt_class_lock);
  	list_del_init(&xcl->xcl_list);
  	spin_unlock(&svc_xprt_class_lock);
  }
  EXPORT_SYMBOL_GPL(svc_unreg_xprt_class);
dc9a16e49   Tom Tucker   svc: Add /proc/sy...
102
103
104
105
106
  /*
   * Format the transport list for printing
   */
  int svc_print_xprts(char *buf, int maxlen)
  {
8f3a6de31   Pavel Emelyanov   sunrpc: Turn list...
107
  	struct svc_xprt_class *xcl;
dc9a16e49   Tom Tucker   svc: Add /proc/sy...
108
109
110
111
112
  	char tmpstr[80];
  	int len = 0;
  	buf[0] = '\0';
  
  	spin_lock(&svc_xprt_class_lock);
8f3a6de31   Pavel Emelyanov   sunrpc: Turn list...
113
  	list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) {
dc9a16e49   Tom Tucker   svc: Add /proc/sy...
114
  		int slen;
dc9a16e49   Tom Tucker   svc: Add /proc/sy...
115
116
117
118
119
120
121
122
123
124
125
126
127
  
  		sprintf(tmpstr, "%s %d
  ", xcl->xcl_name, xcl->xcl_max_payload);
  		slen = strlen(tmpstr);
  		if (len + slen > maxlen)
  			break;
  		len += slen;
  		strcat(buf, tmpstr);
  	}
  	spin_unlock(&svc_xprt_class_lock);
  
  	return len;
  }
e1b3157f9   Tom Tucker   svc: Change sk_in...
128
129
130
131
132
  static void svc_xprt_free(struct kref *kref)
  {
  	struct svc_xprt *xprt =
  		container_of(kref, struct svc_xprt, xpt_ref);
  	struct module *owner = xprt->xpt_class->xcl_owner;
e3bfca01c   Pavel Emelyanov   sunrpc: Make xprt...
133
134
  	if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags))
  		svcauth_unix_info_release(xprt);
4fb8518bd   Pavel Emelyanov   sunrpc: Tag svc_x...
135
  	put_net(xprt->xpt_net);
99de8ea96   J. Bruce Fields   rpc: keep backcha...
136
137
138
  	/* See comment on corresponding get in xs_setup_bc_tcp(): */
  	if (xprt->xpt_bc_xprt)
  		xprt_put(xprt->xpt_bc_xprt);
39a9beab5   J. Bruce Fields   rpc: share one xp...
139
140
  	if (xprt->xpt_bc_xps)
  		xprt_switch_put(xprt->xpt_bc_xps);
e1b3157f9   Tom Tucker   svc: Change sk_in...
141
142
143
144
145
146
147
148
149
  	xprt->xpt_ops->xpo_free(xprt);
  	module_put(owner);
  }
  
  void svc_xprt_put(struct svc_xprt *xprt)
  {
  	kref_put(&xprt->xpt_ref, svc_xprt_free);
  }
  EXPORT_SYMBOL_GPL(svc_xprt_put);
1d8206b97   Tom Tucker   svc: Add an svc t...
150
151
152
153
  /*
   * Called by transport drivers to initialize the transport independent
   * portion of the transport instance.
   */
bd4620ddf   Stanislav Kinsbursky   SUNRPC: create sv...
154
155
  void svc_xprt_init(struct net *net, struct svc_xprt_class *xcl,
  		   struct svc_xprt *xprt, struct svc_serv *serv)
1d8206b97   Tom Tucker   svc: Add an svc t...
156
157
158
159
  {
  	memset(xprt, 0, sizeof(*xprt));
  	xprt->xpt_class = xcl;
  	xprt->xpt_ops = xcl->xcl_ops;
e1b3157f9   Tom Tucker   svc: Change sk_in...
160
  	kref_init(&xprt->xpt_ref);
bb5cf160b   Tom Tucker   svc: Move sk_serv...
161
  	xprt->xpt_server = serv;
7a1820838   Tom Tucker   svc: Make close t...
162
163
  	INIT_LIST_HEAD(&xprt->xpt_list);
  	INIT_LIST_HEAD(&xprt->xpt_ready);
8c7b0172a   Tom Tucker   svc: Make deferra...
164
  	INIT_LIST_HEAD(&xprt->xpt_deferred);
edc7a8940   J. Bruce Fields   nfsd: provide cal...
165
  	INIT_LIST_HEAD(&xprt->xpt_users);
a50fea26b   Tom Tucker   svc: Make svc_sen...
166
  	mutex_init(&xprt->xpt_mutex);
def13d740   Tom Tucker   svc: Move the aut...
167
  	spin_lock_init(&xprt->xpt_lock);
4e5caaa5f   Tom Tucker   svc: Move create ...
168
  	set_bit(XPT_BUSY, &xprt->xpt_flags);
4cfc7e601   Rahul Iyer   nfsd41: sunrpc: A...
169
  	rpc_init_wait_queue(&xprt->xpt_bc_pending, "xpt_bc_pending");
bd4620ddf   Stanislav Kinsbursky   SUNRPC: create sv...
170
  	xprt->xpt_net = get_net(net);
1d8206b97   Tom Tucker   svc: Add an svc t...
171
172
  }
  EXPORT_SYMBOL_GPL(svc_xprt_init);
b700cbb11   Tom Tucker   svc: Add a generi...
173

5dd248f6f   Chuck Lever   SUNRPC: Use prope...
174
175
  static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
  					 struct svc_serv *serv,
62832c039   Pavel Emelyanov   sunrpc: Pull net ...
176
  					 struct net *net,
9652ada3f   Chuck Lever   SUNRPC: Change sv...
177
178
179
  					 const int family,
  					 const unsigned short port,
  					 int flags)
b700cbb11   Tom Tucker   svc: Add a generi...
180
  {
b700cbb11   Tom Tucker   svc: Add a generi...
181
182
  	struct sockaddr_in sin = {
  		.sin_family		= AF_INET,
e6f1cebf7   Al Viro   [NET] endianness ...
183
  		.sin_addr.s_addr	= htonl(INADDR_ANY),
b700cbb11   Tom Tucker   svc: Add a generi...
184
185
  		.sin_port		= htons(port),
  	};
dfd56b8b3   Eric Dumazet   net: use IS_ENABL...
186
  #if IS_ENABLED(CONFIG_IPV6)
5dd248f6f   Chuck Lever   SUNRPC: Use prope...
187
188
189
190
191
  	struct sockaddr_in6 sin6 = {
  		.sin6_family		= AF_INET6,
  		.sin6_addr		= IN6ADDR_ANY_INIT,
  		.sin6_port		= htons(port),
  	};
dfd56b8b3   Eric Dumazet   net: use IS_ENABL...
192
  #endif
5dd248f6f   Chuck Lever   SUNRPC: Use prope...
193
194
  	struct sockaddr *sap;
  	size_t len;
9652ada3f   Chuck Lever   SUNRPC: Change sv...
195
196
  	switch (family) {
  	case PF_INET:
5dd248f6f   Chuck Lever   SUNRPC: Use prope...
197
198
199
  		sap = (struct sockaddr *)&sin;
  		len = sizeof(sin);
  		break;
dfd56b8b3   Eric Dumazet   net: use IS_ENABL...
200
  #if IS_ENABLED(CONFIG_IPV6)
9652ada3f   Chuck Lever   SUNRPC: Change sv...
201
  	case PF_INET6:
5dd248f6f   Chuck Lever   SUNRPC: Use prope...
202
203
204
  		sap = (struct sockaddr *)&sin6;
  		len = sizeof(sin6);
  		break;
dfd56b8b3   Eric Dumazet   net: use IS_ENABL...
205
  #endif
5dd248f6f   Chuck Lever   SUNRPC: Use prope...
206
207
208
  	default:
  		return ERR_PTR(-EAFNOSUPPORT);
  	}
62832c039   Pavel Emelyanov   sunrpc: Pull net ...
209
  	return xcl->xcl_ops->xpo_create(serv, net, sap, len, flags);
5dd248f6f   Chuck Lever   SUNRPC: Use prope...
210
  }
6741019c8   J. Bruce Fields   svcrpc: make svc_...
211
212
213
214
215
216
217
218
219
220
  /*
   * svc_xprt_received conditionally queues the transport for processing
   * by another thread. The caller must hold the XPT_BUSY bit and must
   * not thereafter touch transport data.
   *
   * Note: XPT_DATA only gets cleared when a read-attempt finds no (or
   * insufficient) data.
   */
  static void svc_xprt_received(struct svc_xprt *xprt)
  {
acf06a7fa   Jeff Layton   sunrpc: only call...
221
222
  	if (!test_bit(XPT_BUSY, &xprt->xpt_flags)) {
  		WARN_ONCE(1, "xprt=0x%p already busy!", xprt);
ff1fdb9b8   Weston Andros Adamson   SUNRPC: remove BU...
223
  		return;
acf06a7fa   Jeff Layton   sunrpc: only call...
224
  	}
6741019c8   J. Bruce Fields   svcrpc: make svc_...
225
  	/* As soon as we clear busy, the xprt could be closed and
b9e13cdfa   Jeff Layton   nfsd/sunrpc: turn...
226
  	 * 'put', so we need a reference to call svc_enqueue_xprt with:
6741019c8   J. Bruce Fields   svcrpc: make svc_...
227
228
  	 */
  	svc_xprt_get(xprt);
0971374e2   Trond Myklebust   SUNRPC: Reduce co...
229
  	smp_mb__before_atomic();
6741019c8   J. Bruce Fields   svcrpc: make svc_...
230
  	clear_bit(XPT_BUSY, &xprt->xpt_flags);
b9e13cdfa   Jeff Layton   nfsd/sunrpc: turn...
231
  	xprt->xpt_server->sv_ops->svo_enqueue_xprt(xprt);
6741019c8   J. Bruce Fields   svcrpc: make svc_...
232
233
  	svc_xprt_put(xprt);
  }
39b553013   J. Bruce Fields   svcrpc: share som...
234
235
236
237
238
239
240
241
  void svc_add_new_perm_xprt(struct svc_serv *serv, struct svc_xprt *new)
  {
  	clear_bit(XPT_TEMP, &new->xpt_flags);
  	spin_lock_bh(&serv->sv_lock);
  	list_add(&new->xpt_list, &serv->sv_permsocks);
  	spin_unlock_bh(&serv->sv_lock);
  	svc_xprt_received(new);
  }
d96b9c939   J. Bruce Fields   svcrpc: autoload ...
242
  int _svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
fc5d00b04   Pavel Emelyanov   sunrpc: Add net a...
243
244
  		    struct net *net, const int family,
  		    const unsigned short port, int flags)
5dd248f6f   Chuck Lever   SUNRPC: Use prope...
245
246
  {
  	struct svc_xprt_class *xcl;
b700cbb11   Tom Tucker   svc: Add a generi...
247
248
  	spin_lock(&svc_xprt_class_lock);
  	list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) {
4e5caaa5f   Tom Tucker   svc: Move create ...
249
  		struct svc_xprt *newxprt;
ed2849d3e   NeilBrown   sunrpc: prevent u...
250
  		unsigned short newport;
4e5caaa5f   Tom Tucker   svc: Move create ...
251
252
253
254
255
256
257
258
  
  		if (strcmp(xprt_name, xcl->xcl_name))
  			continue;
  
  		if (!try_module_get(xcl->xcl_owner))
  			goto err;
  
  		spin_unlock(&svc_xprt_class_lock);
62832c039   Pavel Emelyanov   sunrpc: Pull net ...
259
  		newxprt = __svc_xpo_create(xcl, serv, net, family, port, flags);
4e5caaa5f   Tom Tucker   svc: Move create ...
260
261
262
  		if (IS_ERR(newxprt)) {
  			module_put(xcl->xcl_owner);
  			return PTR_ERR(newxprt);
b700cbb11   Tom Tucker   svc: Add a generi...
263
  		}
39b553013   J. Bruce Fields   svcrpc: share som...
264
  		svc_add_new_perm_xprt(serv, newxprt);
ed2849d3e   NeilBrown   sunrpc: prevent u...
265
  		newport = svc_xprt_local_port(newxprt);
ed2849d3e   NeilBrown   sunrpc: prevent u...
266
  		return newport;
b700cbb11   Tom Tucker   svc: Add a generi...
267
  	}
4e5caaa5f   Tom Tucker   svc: Move create ...
268
   err:
b700cbb11   Tom Tucker   svc: Add a generi...
269
  	spin_unlock(&svc_xprt_class_lock);
687179081   Chuck Lever   SUNRPC: NFS kerne...
270
271
272
  	/* This errno is exposed to user space.  Provide a reasonable
  	 * perror msg for a bad transport. */
  	return -EPROTONOSUPPORT;
b700cbb11   Tom Tucker   svc: Add a generi...
273
  }
d96b9c939   J. Bruce Fields   svcrpc: autoload ...
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
  
  int svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
  		    struct net *net, const int family,
  		    const unsigned short port, int flags)
  {
  	int err;
  
  	dprintk("svc: creating transport %s[%d]
  ", xprt_name, port);
  	err = _svc_create_xprt(serv, xprt_name, net, family, port, flags);
  	if (err == -EPROTONOSUPPORT) {
  		request_module("svc%s", xprt_name);
  		err = _svc_create_xprt(serv, xprt_name, net, family, port, flags);
  	}
  	if (err)
  		dprintk("svc: transport %s not found, err %d
  ",
  			xprt_name, err);
  	return err;
  }
b700cbb11   Tom Tucker   svc: Add a generi...
294
  EXPORT_SYMBOL_GPL(svc_create_xprt);
9dbc240f1   Tom Tucker   svc: Move the soc...
295
296
297
298
299
300
  
  /*
   * Copy the local and remote xprt addresses to the rqstp structure
   */
  void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt)
  {
9dbc240f1   Tom Tucker   svc: Move the soc...
301
302
303
304
305
306
307
  	memcpy(&rqstp->rq_addr, &xprt->xpt_remote, xprt->xpt_remotelen);
  	rqstp->rq_addrlen = xprt->xpt_remotelen;
  
  	/*
  	 * Destination address in request is needed for binding the
  	 * source address in RPC replies/callbacks later.
  	 */
849a1cf13   Mi Jinlong   SUNRPC: Replace s...
308
309
  	memcpy(&rqstp->rq_daddr, &xprt->xpt_local, xprt->xpt_locallen);
  	rqstp->rq_daddrlen = xprt->xpt_locallen;
9dbc240f1   Tom Tucker   svc: Move the soc...
310
311
  }
  EXPORT_SYMBOL_GPL(svc_xprt_copy_addrs);
0f0257eaa   Tom Tucker   svc: Move the xpr...
312
313
314
315
316
317
318
319
320
321
322
323
  /**
   * svc_print_addr - Format rq_addr field for printing
   * @rqstp: svc_rqst struct containing address to print
   * @buf: target buffer for formatted address
   * @len: length of target buffer
   *
   */
  char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len)
  {
  	return __svc_print_addr(svc_addr(rqstp), buf, len);
  }
  EXPORT_SYMBOL_GPL(svc_print_addr);
ff3ac5c3d   Trond Myklebust   SUNRPC: Add a ser...
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
  static bool svc_xprt_slots_in_range(struct svc_xprt *xprt)
  {
  	unsigned int limit = svc_rpc_per_connection_limit;
  	int nrqsts = atomic_read(&xprt->xpt_nr_rqsts);
  
  	return limit == 0 || (nrqsts >= 0 && nrqsts < limit);
  }
  
  static bool svc_xprt_reserve_slot(struct svc_rqst *rqstp, struct svc_xprt *xprt)
  {
  	if (!test_bit(RQ_DATA, &rqstp->rq_flags)) {
  		if (!svc_xprt_slots_in_range(xprt))
  			return false;
  		atomic_inc(&xprt->xpt_nr_rqsts);
  		set_bit(RQ_DATA, &rqstp->rq_flags);
  	}
  	return true;
  }
  
  static void svc_xprt_release_slot(struct svc_rqst *rqstp)
  {
  	struct svc_xprt	*xprt = rqstp->rq_xprt;
  	if (test_and_clear_bit(RQ_DATA, &rqstp->rq_flags)) {
  		atomic_dec(&xprt->xpt_nr_rqsts);
  		svc_xprt_enqueue(xprt);
  	}
  }
9c335c0b8   J. Bruce Fields   svcrpc: fix wspac...
351
352
353
354
  static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt)
  {
  	if (xprt->xpt_flags & ((1<<XPT_CONN)|(1<<XPT_CLOSE)))
  		return true;
82ea2d761   Trond Myklebust   SUNRPC: Add a tra...
355
  	if (xprt->xpt_flags & ((1<<XPT_DATA)|(1<<XPT_DEFERRED))) {
ff3ac5c3d   Trond Myklebust   SUNRPC: Add a ser...
356
357
  		if (xprt->xpt_ops->xpo_has_wspace(xprt) &&
  		    svc_xprt_slots_in_range(xprt))
82ea2d761   Trond Myklebust   SUNRPC: Add a tra...
358
359
360
361
  			return true;
  		trace_svc_xprt_no_write_space(xprt);
  		return false;
  	}
9c335c0b8   J. Bruce Fields   svcrpc: fix wspac...
362
363
  	return false;
  }
b9e13cdfa   Jeff Layton   nfsd/sunrpc: turn...
364
  void svc_xprt_do_enqueue(struct svc_xprt *xprt)
0f0257eaa   Tom Tucker   svc: Move the xpr...
365
  {
0f0257eaa   Tom Tucker   svc: Move the xpr...
366
  	struct svc_pool *pool;
83a712e0a   Jeff Layton   sunrpc: add some ...
367
  	struct svc_rqst	*rqstp = NULL;
0f0257eaa   Tom Tucker   svc: Move the xpr...
368
  	int cpu;
b1691bc03   Jeff Layton   sunrpc: convert t...
369
  	bool queued = false;
0f0257eaa   Tom Tucker   svc: Move the xpr...
370

9c335c0b8   J. Bruce Fields   svcrpc: fix wspac...
371
  	if (!svc_xprt_has_something_to_do(xprt))
83a712e0a   Jeff Layton   sunrpc: add some ...
372
  		goto out;
0f0257eaa   Tom Tucker   svc: Move the xpr...
373

0f0257eaa   Tom Tucker   svc: Move the xpr...
374
375
376
377
378
379
380
381
382
  	/* Mark transport as busy. It will remain in this state until
  	 * the provider calls svc_xprt_received. We update XPT_BUSY
  	 * atomically because it also guards against trying to enqueue
  	 * the transport twice.
  	 */
  	if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) {
  		/* Don't enqueue transport while already enqueued */
  		dprintk("svc: transport %p busy, not enqueued
  ", xprt);
83a712e0a   Jeff Layton   sunrpc: add some ...
383
  		goto out;
0f0257eaa   Tom Tucker   svc: Move the xpr...
384
  	}
0f0257eaa   Tom Tucker   svc: Move the xpr...
385

0c0746d03   Trond Myklebust   SUNRPC: More opti...
386
387
  	cpu = get_cpu();
  	pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
0c0746d03   Trond Myklebust   SUNRPC: More opti...
388

403c7b444   Jeff Layton   sunrpc: fix poten...
389
  	atomic_long_inc(&pool->sp_stats.packets);
0c0746d03   Trond Myklebust   SUNRPC: More opti...
390

b1691bc03   Jeff Layton   sunrpc: convert t...
391
392
393
394
395
396
397
398
399
400
401
402
403
  redo_search:
  	/* find a thread for this xprt */
  	rcu_read_lock();
  	list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) {
  		/* Do a lockless check first */
  		if (test_bit(RQ_BUSY, &rqstp->rq_flags))
  			continue;
  
  		/*
  		 * Once the xprt has been queued, it can only be dequeued by
  		 * the task that intends to service it. All we can do at that
  		 * point is to try to wake this thread back up so that it can
  		 * do so.
983c68446   Trond Myklebust   SUNRPC: get rid o...
404
  		 */
b1691bc03   Jeff Layton   sunrpc: convert t...
405
406
407
408
409
410
411
412
413
414
415
416
417
418
  		if (!queued) {
  			spin_lock_bh(&rqstp->rq_lock);
  			if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags)) {
  				/* already busy, move on... */
  				spin_unlock_bh(&rqstp->rq_lock);
  				continue;
  			}
  
  			/* this one will do */
  			rqstp->rq_xprt = xprt;
  			svc_xprt_get(xprt);
  			spin_unlock_bh(&rqstp->rq_lock);
  		}
  		rcu_read_unlock();
403c7b444   Jeff Layton   sunrpc: fix poten...
419
  		atomic_long_inc(&pool->sp_stats.threads_woken);
b1691bc03   Jeff Layton   sunrpc: convert t...
420
421
  		wake_up_process(rqstp->rq_task);
  		put_cpu();
83a712e0a   Jeff Layton   sunrpc: add some ...
422
  		goto out;
b1691bc03   Jeff Layton   sunrpc: convert t...
423
424
425
426
427
428
429
430
431
432
433
  	}
  	rcu_read_unlock();
  
  	/*
  	 * We didn't find an idle thread to use, so we need to queue the xprt.
  	 * Do so and then search again. If we find one, we can't hook this one
  	 * up to it directly but we can wake the thread up in the hopes that it
  	 * will pick it up once it searches for a xprt to service.
  	 */
  	if (!queued) {
  		queued = true;
0f0257eaa   Tom Tucker   svc: Move the xpr...
434
435
  		dprintk("svc: transport %p put into queue
  ", xprt);
b1691bc03   Jeff Layton   sunrpc: convert t...
436
  		spin_lock_bh(&pool->sp_lock);
0f0257eaa   Tom Tucker   svc: Move the xpr...
437
  		list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
03cf6c9f4   Greg Banks   knfsd: add file t...
438
  		pool->sp_stats.sockets_queued++;
b1691bc03   Jeff Layton   sunrpc: convert t...
439
440
  		spin_unlock_bh(&pool->sp_lock);
  		goto redo_search;
0f0257eaa   Tom Tucker   svc: Move the xpr...
441
  	}
83a712e0a   Jeff Layton   sunrpc: add some ...
442
  	rqstp = NULL;
983c68446   Trond Myklebust   SUNRPC: get rid o...
443
  	put_cpu();
83a712e0a   Jeff Layton   sunrpc: add some ...
444
445
  out:
  	trace_svc_xprt_do_enqueue(xprt, rqstp);
0f0257eaa   Tom Tucker   svc: Move the xpr...
446
  }
b9e13cdfa   Jeff Layton   nfsd/sunrpc: turn...
447
  EXPORT_SYMBOL_GPL(svc_xprt_do_enqueue);
0971374e2   Trond Myklebust   SUNRPC: Reduce co...
448
449
450
451
452
453
454
455
456
457
  
  /*
   * Queue up a transport with data pending. If there are idle nfsd
   * processes, wake 'em up.
   *
   */
  void svc_xprt_enqueue(struct svc_xprt *xprt)
  {
  	if (test_bit(XPT_BUSY, &xprt->xpt_flags))
  		return;
b9e13cdfa   Jeff Layton   nfsd/sunrpc: turn...
458
  	xprt->xpt_server->sv_ops->svo_enqueue_xprt(xprt);
0971374e2   Trond Myklebust   SUNRPC: Reduce co...
459
  }
0f0257eaa   Tom Tucker   svc: Move the xpr...
460
461
462
  EXPORT_SYMBOL_GPL(svc_xprt_enqueue);
  
  /*
b1691bc03   Jeff Layton   sunrpc: convert t...
463
   * Dequeue the first transport, if there is one.
0f0257eaa   Tom Tucker   svc: Move the xpr...
464
465
466
   */
  static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool)
  {
b1691bc03   Jeff Layton   sunrpc: convert t...
467
  	struct svc_xprt	*xprt = NULL;
0f0257eaa   Tom Tucker   svc: Move the xpr...
468
469
  
  	if (list_empty(&pool->sp_sockets))
83a712e0a   Jeff Layton   sunrpc: add some ...
470
  		goto out;
0f0257eaa   Tom Tucker   svc: Move the xpr...
471

b1691bc03   Jeff Layton   sunrpc: convert t...
472
473
474
475
476
477
  	spin_lock_bh(&pool->sp_lock);
  	if (likely(!list_empty(&pool->sp_sockets))) {
  		xprt = list_first_entry(&pool->sp_sockets,
  					struct svc_xprt, xpt_ready);
  		list_del_init(&xprt->xpt_ready);
  		svc_xprt_get(xprt);
0f0257eaa   Tom Tucker   svc: Move the xpr...
478

b1691bc03   Jeff Layton   sunrpc: convert t...
479
480
  		dprintk("svc: transport %p dequeued, inuse=%d
  ",
2c935bc57   Peter Zijlstra   locking/atomic, k...
481
  			xprt, kref_read(&xprt->xpt_ref));
b1691bc03   Jeff Layton   sunrpc: convert t...
482
483
  	}
  	spin_unlock_bh(&pool->sp_lock);
83a712e0a   Jeff Layton   sunrpc: add some ...
484
485
  out:
  	trace_svc_xprt_dequeue(xprt);
0f0257eaa   Tom Tucker   svc: Move the xpr...
486
487
  	return xprt;
  }
0f0257eaa   Tom Tucker   svc: Move the xpr...
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
  /**
   * svc_reserve - change the space reserved for the reply to a request.
   * @rqstp:  The request in question
   * @space: new max space to reserve
   *
   * Each request reserves some space on the output queue of the transport
   * to make sure the reply fits.  This function reduces that reserved
   * space to be the amount of space used already, plus @space.
   *
   */
  void svc_reserve(struct svc_rqst *rqstp, int space)
  {
  	space += rqstp->rq_res.head[0].iov_len;
  
  	if (space < rqstp->rq_reserved) {
  		struct svc_xprt *xprt = rqstp->rq_xprt;
  		atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved);
  		rqstp->rq_reserved = space;
  
  		svc_xprt_enqueue(xprt);
  	}
  }
24c3767e4   Trond Myklebust   SUNRPC: The sunrp...
510
  EXPORT_SYMBOL_GPL(svc_reserve);
0f0257eaa   Tom Tucker   svc: Move the xpr...
511
512
513
514
515
516
  
  static void svc_xprt_release(struct svc_rqst *rqstp)
  {
  	struct svc_xprt	*xprt = rqstp->rq_xprt;
  
  	rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp);
2779e3ae3   Tom Tucker   svc: Move kfree o...
517
518
  	kfree(rqstp->rq_deferred);
  	rqstp->rq_deferred = NULL;
0f0257eaa   Tom Tucker   svc: Move the xpr...
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
  	svc_free_res_pages(rqstp);
  	rqstp->rq_res.page_len = 0;
  	rqstp->rq_res.page_base = 0;
  
  	/* Reset response buffer and release
  	 * the reservation.
  	 * But first, check that enough space was reserved
  	 * for the reply, otherwise we have a bug!
  	 */
  	if ((rqstp->rq_res.len) >  rqstp->rq_reserved)
  		printk(KERN_ERR "RPC request reserved %d but used %d
  ",
  		       rqstp->rq_reserved,
  		       rqstp->rq_res.len);
  
  	rqstp->rq_res.head[0].iov_len = 0;
  	svc_reserve(rqstp, 0);
ff3ac5c3d   Trond Myklebust   SUNRPC: Add a ser...
536
  	svc_xprt_release_slot(rqstp);
0f0257eaa   Tom Tucker   svc: Move the xpr...
537
  	rqstp->rq_xprt = NULL;
0f0257eaa   Tom Tucker   svc: Move the xpr...
538
539
540
541
  	svc_xprt_put(xprt);
  }
  
  /*
ceff739c5   Jeff Layton   sunrpc: have svc_...
542
543
544
545
546
   * Some svc_serv's will have occasional work to do, even when a xprt is not
   * waiting to be serviced. This function is there to "kick" a task in one of
   * those services so that it can wake up and do that work. Note that we only
   * bother with pool 0 as we don't need to wake up more than one thread for
   * this purpose.
0f0257eaa   Tom Tucker   svc: Move the xpr...
547
548
549
550
   */
  void svc_wake_up(struct svc_serv *serv)
  {
  	struct svc_rqst	*rqstp;
0f0257eaa   Tom Tucker   svc: Move the xpr...
551
  	struct svc_pool *pool;
ceff739c5   Jeff Layton   sunrpc: have svc_...
552
  	pool = &serv->sv_pools[0];
b1691bc03   Jeff Layton   sunrpc: convert t...
553
554
555
556
557
558
  	rcu_read_lock();
  	list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) {
  		/* skip any that aren't queued */
  		if (test_bit(RQ_BUSY, &rqstp->rq_flags))
  			continue;
  		rcu_read_unlock();
ceff739c5   Jeff Layton   sunrpc: have svc_...
559
560
561
  		dprintk("svc: daemon %p woken up.
  ", rqstp);
  		wake_up_process(rqstp->rq_task);
83a712e0a   Jeff Layton   sunrpc: add some ...
562
  		trace_svc_wake_up(rqstp->rq_task->pid);
b1691bc03   Jeff Layton   sunrpc: convert t...
563
564
565
566
567
568
569
  		return;
  	}
  	rcu_read_unlock();
  
  	/* No free entries available */
  	set_bit(SP_TASK_PENDING, &pool->sp_flags);
  	smp_wmb();
83a712e0a   Jeff Layton   sunrpc: add some ...
570
  	trace_svc_wake_up(0);
0f0257eaa   Tom Tucker   svc: Move the xpr...
571
  }
24c3767e4   Trond Myklebust   SUNRPC: The sunrp...
572
  EXPORT_SYMBOL_GPL(svc_wake_up);
0f0257eaa   Tom Tucker   svc: Move the xpr...
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
  
  int svc_port_is_privileged(struct sockaddr *sin)
  {
  	switch (sin->sa_family) {
  	case AF_INET:
  		return ntohs(((struct sockaddr_in *)sin)->sin_port)
  			< PROT_SOCK;
  	case AF_INET6:
  		return ntohs(((struct sockaddr_in6 *)sin)->sin6_port)
  			< PROT_SOCK;
  	default:
  		return 0;
  	}
  }
  
  /*
c9233eb7b   Jeff Layton   sunrpc: add sv_ma...
589
590
591
592
   * Make sure that we don't have too many active connections. If we have,
   * something must be dropped. It's not clear what will happen if we allow
   * "too many" connections, but when dealing with network-facing software,
   * we have to code defensively. Here we do that by imposing hard limits.
0f0257eaa   Tom Tucker   svc: Move the xpr...
593
594
595
596
597
598
599
600
   *
   * There's no point in trying to do random drop here for DoS
   * prevention. The NFS clients does 1 reconnect in 15 seconds. An
   * attacker can easily beat that.
   *
   * The only somewhat efficient mechanism would be if drop old
   * connections from the same IP first. But right now we don't even
   * record the client IP in svc_sock.
c9233eb7b   Jeff Layton   sunrpc: add sv_ma...
601
602
603
604
   *
   * single-threaded services that expect a lot of clients will probably
   * need to set sv_maxconn to override the default value which is based
   * on the number of threads
0f0257eaa   Tom Tucker   svc: Move the xpr...
605
606
607
   */
  static void svc_check_conn_limits(struct svc_serv *serv)
  {
c9233eb7b   Jeff Layton   sunrpc: add sv_ma...
608
609
610
611
  	unsigned int limit = serv->sv_maxconn ? serv->sv_maxconn :
  				(serv->sv_nrthreads+3) * 20;
  
  	if (serv->sv_tmpcnt > limit) {
0f0257eaa   Tom Tucker   svc: Move the xpr...
612
613
614
  		struct svc_xprt *xprt = NULL;
  		spin_lock_bh(&serv->sv_lock);
  		if (!list_empty(&serv->sv_tempsocks)) {
e87cc4728   Joe Perches   net: Convert net_...
615
616
617
618
619
620
  			/* Try to help the admin */
  			net_notice_ratelimited("%s: too many open connections, consider increasing the %s
  ",
  					       serv->sv_name, serv->sv_maxconn ?
  					       "max number of connections" :
  					       "number of threads");
0f0257eaa   Tom Tucker   svc: Move the xpr...
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
  			/*
  			 * Always select the oldest connection. It's not fair,
  			 * but so is life
  			 */
  			xprt = list_entry(serv->sv_tempsocks.prev,
  					  struct svc_xprt,
  					  xpt_list);
  			set_bit(XPT_CLOSE, &xprt->xpt_flags);
  			svc_xprt_get(xprt);
  		}
  		spin_unlock_bh(&serv->sv_lock);
  
  		if (xprt) {
  			svc_xprt_enqueue(xprt);
  			svc_xprt_put(xprt);
  		}
  	}
  }
e1d83ee67   Rashika Kheria   net: Mark functio...
639
  static int svc_alloc_arg(struct svc_rqst *rqstp)
0f0257eaa   Tom Tucker   svc: Move the xpr...
640
  {
6797fa5a0   J. Bruce Fields   svcrpc: break up ...
641
642
643
644
  	struct svc_serv *serv = rqstp->rq_server;
  	struct xdr_buf *arg;
  	int pages;
  	int i;
0f0257eaa   Tom Tucker   svc: Move the xpr...
645
646
  
  	/* now allocate needed pages.  If we get a failure, sleep briefly */
8c6ae4980   Chuck Lever   sunrpc: Allocate ...
647
648
649
650
651
  	pages = (serv->sv_max_mesg + 2 * PAGE_SIZE) >> PAGE_SHIFT;
  	if (pages > RPCSVC_MAXPAGES) {
  		pr_warn_once("svc: warning: pages=%u > RPCSVC_MAXPAGES=%lu
  ",
  			     pages, RPCSVC_MAXPAGES);
b25cd058f   Weston Andros Adamson   SUNRPC: remove BU...
652
  		/* use as many pages as possible */
8c6ae4980   Chuck Lever   sunrpc: Allocate ...
653
654
  		pages = RPCSVC_MAXPAGES;
  	}
0f0257eaa   Tom Tucker   svc: Move the xpr...
655
656
657
658
  	for (i = 0; i < pages ; i++)
  		while (rqstp->rq_pages[i] == NULL) {
  			struct page *p = alloc_page(GFP_KERNEL);
  			if (!p) {
7b54fe61f   Jeff Layton   SUNRPC: allow svc...
659
660
661
  				set_current_state(TASK_INTERRUPTIBLE);
  				if (signalled() || kthread_should_stop()) {
  					set_current_state(TASK_RUNNING);
7086721f9   Jeff Layton   SUNRPC: have svc_...
662
  					return -EINTR;
7b54fe61f   Jeff Layton   SUNRPC: allow svc...
663
664
  				}
  				schedule_timeout(msecs_to_jiffies(500));
0f0257eaa   Tom Tucker   svc: Move the xpr...
665
666
667
  			}
  			rqstp->rq_pages[i] = p;
  		}
2825a7f90   J. Bruce Fields   nfsd4: allow enco...
668
  	rqstp->rq_page_end = &rqstp->rq_pages[i];
0f0257eaa   Tom Tucker   svc: Move the xpr...
669
  	rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */
0f0257eaa   Tom Tucker   svc: Move the xpr...
670
671
672
673
674
675
676
677
678
679
680
  
  	/* Make arg->head point to first page and arg->pages point to rest */
  	arg = &rqstp->rq_arg;
  	arg->head[0].iov_base = page_address(rqstp->rq_pages[0]);
  	arg->head[0].iov_len = PAGE_SIZE;
  	arg->pages = rqstp->rq_pages + 1;
  	arg->page_base = 0;
  	/* save at least one page for response */
  	arg->page_len = (pages-2)*PAGE_SIZE;
  	arg->len = (pages-1)*PAGE_SIZE;
  	arg->tail[0].iov_len = 0;
6797fa5a0   J. Bruce Fields   svcrpc: break up ...
681
682
  	return 0;
  }
0f0257eaa   Tom Tucker   svc: Move the xpr...
683

b1691bc03   Jeff Layton   sunrpc: convert t...
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
  static bool
  rqst_should_sleep(struct svc_rqst *rqstp)
  {
  	struct svc_pool		*pool = rqstp->rq_pool;
  
  	/* did someone call svc_wake_up? */
  	if (test_and_clear_bit(SP_TASK_PENDING, &pool->sp_flags))
  		return false;
  
  	/* was a socket queued? */
  	if (!list_empty(&pool->sp_sockets))
  		return false;
  
  	/* are we shutting down? */
  	if (signalled() || kthread_should_stop())
  		return false;
  
  	/* are we freezing? */
  	if (freezing(current))
  		return false;
  
  	return true;
  }
e1d83ee67   Rashika Kheria   net: Mark functio...
707
  static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
6797fa5a0   J. Bruce Fields   svcrpc: break up ...
708
709
710
  {
  	struct svc_xprt *xprt;
  	struct svc_pool		*pool = rqstp->rq_pool;
a4aa8054a   Trond Myklebust   SUNRPC: Fix broke...
711
  	long			time_left = 0;
0f0257eaa   Tom Tucker   svc: Move the xpr...
712

b1691bc03   Jeff Layton   sunrpc: convert t...
713
714
  	/* rq_xprt should be clear on entry */
  	WARN_ON_ONCE(rqstp->rq_xprt);
f16b6e8d8   NeilBrown   sunrpc/cache: all...
715
716
717
718
  	/* Normally we will wait up to 5 seconds for any required
  	 * cache information to be provided.
  	 */
  	rqstp->rq_chandle.thread_wait = 5*HZ;
0f0257eaa   Tom Tucker   svc: Move the xpr...
719
720
721
  	xprt = svc_xprt_dequeue(pool);
  	if (xprt) {
  		rqstp->rq_xprt = xprt;
f16b6e8d8   NeilBrown   sunrpc/cache: all...
722
723
  
  		/* As there is a shortage of threads and this request
6610f720e   J. Bruce Fields   svcrpc: minor cac...
724
  		 * had to be queued, don't allow the thread to wait so
f16b6e8d8   NeilBrown   sunrpc/cache: all...
725
726
727
  		 * long for cache updates.
  		 */
  		rqstp->rq_chandle.thread_wait = 1*HZ;
4d5db3f53   Jeff Layton   sunrpc: convert s...
728
  		clear_bit(SP_TASK_PENDING, &pool->sp_flags);
b1691bc03   Jeff Layton   sunrpc: convert t...
729
730
  		return xprt;
  	}
7086721f9   Jeff Layton   SUNRPC: have svc_...
731

b1691bc03   Jeff Layton   sunrpc: convert t...
732
733
734
735
736
737
738
  	/*
  	 * We have to be able to interrupt this wait
  	 * to bring down the daemons ...
  	 */
  	set_current_state(TASK_INTERRUPTIBLE);
  	clear_bit(RQ_BUSY, &rqstp->rq_flags);
  	smp_mb();
0f0257eaa   Tom Tucker   svc: Move the xpr...
739

b1691bc03   Jeff Layton   sunrpc: convert t...
740
741
742
743
  	if (likely(rqst_should_sleep(rqstp)))
  		time_left = schedule_timeout(timeout);
  	else
  		__set_current_state(TASK_RUNNING);
0f0257eaa   Tom Tucker   svc: Move the xpr...
744

b1691bc03   Jeff Layton   sunrpc: convert t...
745
  	try_to_freeze();
0f0257eaa   Tom Tucker   svc: Move the xpr...
746

b1691bc03   Jeff Layton   sunrpc: convert t...
747
748
749
  	spin_lock_bh(&rqstp->rq_lock);
  	set_bit(RQ_BUSY, &rqstp->rq_flags);
  	spin_unlock_bh(&rqstp->rq_lock);
106f359cf   Trond Myklebust   SUNRPC: Do not gr...
750

b1691bc03   Jeff Layton   sunrpc: convert t...
751
752
753
754
755
756
757
758
759
760
  	xprt = rqstp->rq_xprt;
  	if (xprt != NULL)
  		return xprt;
  
  	if (!time_left)
  		atomic_long_inc(&pool->sp_stats.threads_timedout);
  
  	if (signalled() || kthread_should_stop())
  		return ERR_PTR(-EINTR);
  	return ERR_PTR(-EAGAIN);
6797fa5a0   J. Bruce Fields   svcrpc: break up ...
761
  }
e1d83ee67   Rashika Kheria   net: Mark functio...
762
  static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt)
65b2e6656   J. Bruce Fields   svcrpc: split up ...
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
  {
  	spin_lock_bh(&serv->sv_lock);
  	set_bit(XPT_TEMP, &newxpt->xpt_flags);
  	list_add(&newxpt->xpt_list, &serv->sv_tempsocks);
  	serv->sv_tmpcnt++;
  	if (serv->sv_temptimer.function == NULL) {
  		/* setup timer to age temp transports */
  		setup_timer(&serv->sv_temptimer, svc_age_temp_xprts,
  			    (unsigned long)serv);
  		mod_timer(&serv->sv_temptimer,
  			  jiffies + svc_conn_age_period * HZ);
  	}
  	spin_unlock_bh(&serv->sv_lock);
  	svc_xprt_received(newxpt);
  }
6797fa5a0   J. Bruce Fields   svcrpc: break up ...
778
779
780
781
  static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
  {
  	struct svc_serv *serv = rqstp->rq_server;
  	int len = 0;
0f0257eaa   Tom Tucker   svc: Move the xpr...
782

1b644b6e6   J. Bruce Fields   Revert "sunrpc: m...
783
784
785
  	if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
  		dprintk("svc_recv: found XPT_CLOSE
  ");
546125d16   Scott Mayhew   sunrpc: don't cal...
786
787
  		if (test_and_clear_bit(XPT_KILL_TEMP, &xprt->xpt_flags))
  			xprt->xpt_ops->xpo_kill_temp_xprt(xprt);
1b644b6e6   J. Bruce Fields   Revert "sunrpc: m...
788
  		svc_delete_xprt(xprt);
ca7896cd8   J. Bruce Fields   nfsd4: centralize...
789
  		/* Leave XPT_BUSY set on the dead xprt: */
83a712e0a   Jeff Layton   sunrpc: add some ...
790
  		goto out;
ca7896cd8   J. Bruce Fields   nfsd4: centralize...
791
792
  	}
  	if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
0f0257eaa   Tom Tucker   svc: Move the xpr...
793
  		struct svc_xprt *newxpt;
65b2e6656   J. Bruce Fields   svcrpc: split up ...
794
795
796
797
798
799
  		/*
  		 * We know this module_get will succeed because the
  		 * listener holds a reference too
  		 */
  		__module_get(xprt->xpt_class->xcl_owner);
  		svc_check_conn_limits(xprt->xpt_server);
0f0257eaa   Tom Tucker   svc: Move the xpr...
800
  		newxpt = xprt->xpt_ops->xpo_accept(xprt);
65b2e6656   J. Bruce Fields   svcrpc: split up ...
801
802
  		if (newxpt)
  			svc_add_new_temp_xprt(serv, newxpt);
c789102c2   Trond Myklebust   SUNRPC: Fix a mod...
803
804
  		else
  			module_put(xprt->xpt_class->xcl_owner);
ff3ac5c3d   Trond Myklebust   SUNRPC: Add a ser...
805
  	} else if (svc_xprt_reserve_slot(rqstp, xprt)) {
6797fa5a0   J. Bruce Fields   svcrpc: break up ...
806
  		/* XPT_DATA|XPT_DEFERRED case: */
0f0257eaa   Tom Tucker   svc: Move the xpr...
807
808
  		dprintk("svc: server %p, pool %u, transport %p, inuse=%d
  ",
6797fa5a0   J. Bruce Fields   svcrpc: break up ...
809
  			rqstp, rqstp->rq_pool->sp_id, xprt,
2c935bc57   Peter Zijlstra   locking/atomic, k...
810
  			kref_read(&xprt->xpt_ref));
0f0257eaa   Tom Tucker   svc: Move the xpr...
811
  		rqstp->rq_deferred = svc_deferred_dequeue(xprt);
ca7896cd8   J. Bruce Fields   nfsd4: centralize...
812
  		if (rqstp->rq_deferred)
0f0257eaa   Tom Tucker   svc: Move the xpr...
813
  			len = svc_deferred_recv(rqstp);
ca7896cd8   J. Bruce Fields   nfsd4: centralize...
814
  		else
0f0257eaa   Tom Tucker   svc: Move the xpr...
815
816
817
  			len = xprt->xpt_ops->xpo_recvfrom(rqstp);
  		dprintk("svc: got len=%d
  ", len);
d10f27a75   J. Bruce Fields   svcrpc: fix svc_x...
818
819
  		rqstp->rq_reserved = serv->sv_max_mesg;
  		atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
0f0257eaa   Tom Tucker   svc: Move the xpr...
820
  	}
6797fa5a0   J. Bruce Fields   svcrpc: break up ...
821
  	/* clear XPT_BUSY: */
ca7896cd8   J. Bruce Fields   nfsd4: centralize...
822
  	svc_xprt_received(xprt);
83a712e0a   Jeff Layton   sunrpc: add some ...
823
824
  out:
  	trace_svc_handle_xprt(xprt, len);
6797fa5a0   J. Bruce Fields   svcrpc: break up ...
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
  	return len;
  }
  
  /*
   * Receive the next request on any transport.  This code is carefully
   * organised not to touch any cachelines in the shared svc_serv
   * structure, only cachelines in the local svc_pool.
   */
  int svc_recv(struct svc_rqst *rqstp, long timeout)
  {
  	struct svc_xprt		*xprt = NULL;
  	struct svc_serv		*serv = rqstp->rq_server;
  	int			len, err;
  
  	dprintk("svc: server %p waiting for data (to = %ld)
  ",
  		rqstp, timeout);
  
  	if (rqstp->rq_xprt)
  		printk(KERN_ERR
  			"svc_recv: service %p, transport not NULL!
  ",
  			 rqstp);
983c68446   Trond Myklebust   SUNRPC: get rid o...
848

6797fa5a0   J. Bruce Fields   svcrpc: break up ...
849
850
  	err = svc_alloc_arg(rqstp);
  	if (err)
860a0d9e5   Jeff Layton   sunrpc: add some ...
851
  		goto out;
6797fa5a0   J. Bruce Fields   svcrpc: break up ...
852
853
854
  
  	try_to_freeze();
  	cond_resched();
860a0d9e5   Jeff Layton   sunrpc: add some ...
855
  	err = -EINTR;
6797fa5a0   J. Bruce Fields   svcrpc: break up ...
856
  	if (signalled() || kthread_should_stop())
860a0d9e5   Jeff Layton   sunrpc: add some ...
857
  		goto out;
6797fa5a0   J. Bruce Fields   svcrpc: break up ...
858
859
  
  	xprt = svc_get_next_xprt(rqstp, timeout);
860a0d9e5   Jeff Layton   sunrpc: add some ...
860
861
862
863
  	if (IS_ERR(xprt)) {
  		err = PTR_ERR(xprt);
  		goto out;
  	}
6797fa5a0   J. Bruce Fields   svcrpc: break up ...
864
865
  
  	len = svc_handle_xprt(rqstp, xprt);
0f0257eaa   Tom Tucker   svc: Move the xpr...
866
867
  
  	/* No data, incomplete (TCP) read, or accept() */
860a0d9e5   Jeff Layton   sunrpc: add some ...
868
  	err = -EAGAIN;
9f9d2ebe6   J. Bruce Fields   svcrpc: make xpo_...
869
  	if (len <= 0)
860a0d9e5   Jeff Layton   sunrpc: add some ...
870
  		goto out_release;
ca7896cd8   J. Bruce Fields   nfsd4: centralize...
871

0f0257eaa   Tom Tucker   svc: Move the xpr...
872
  	clear_bit(XPT_OLD, &xprt->xpt_flags);
4d152e2c9   Jeff Layton   sunrpc: add a gen...
873
874
875
876
  	if (xprt->xpt_ops->xpo_secure_port(rqstp))
  		set_bit(RQ_SECURE, &rqstp->rq_flags);
  	else
  		clear_bit(RQ_SECURE, &rqstp->rq_flags);
0f0257eaa   Tom Tucker   svc: Move the xpr...
877
  	rqstp->rq_chandle.defer = svc_defer;
860a0d9e5   Jeff Layton   sunrpc: add some ...
878
  	rqstp->rq_xid = svc_getu32(&rqstp->rq_arg.head[0]);
0f0257eaa   Tom Tucker   svc: Move the xpr...
879
880
881
  
  	if (serv->sv_stats)
  		serv->sv_stats->netcnt++;
860a0d9e5   Jeff Layton   sunrpc: add some ...
882
  	trace_svc_recv(rqstp, len);
0f0257eaa   Tom Tucker   svc: Move the xpr...
883
  	return len;
860a0d9e5   Jeff Layton   sunrpc: add some ...
884
  out_release:
ca7896cd8   J. Bruce Fields   nfsd4: centralize...
885
886
  	rqstp->rq_res.len = 0;
  	svc_xprt_release(rqstp);
860a0d9e5   Jeff Layton   sunrpc: add some ...
887
888
889
  out:
  	trace_svc_recv(rqstp, err);
  	return err;
0f0257eaa   Tom Tucker   svc: Move the xpr...
890
  }
24c3767e4   Trond Myklebust   SUNRPC: The sunrp...
891
  EXPORT_SYMBOL_GPL(svc_recv);
0f0257eaa   Tom Tucker   svc: Move the xpr...
892
893
894
895
896
897
  
  /*
   * Drop request
   */
  void svc_drop(struct svc_rqst *rqstp)
  {
104f6351f   Trond Myklebust   SUNRPC: Add trace...
898
  	trace_svc_drop(rqstp);
0f0257eaa   Tom Tucker   svc: Move the xpr...
899
900
901
902
  	dprintk("svc: xprt %p dropped request
  ", rqstp->rq_xprt);
  	svc_xprt_release(rqstp);
  }
24c3767e4   Trond Myklebust   SUNRPC: The sunrp...
903
  EXPORT_SYMBOL_GPL(svc_drop);
0f0257eaa   Tom Tucker   svc: Move the xpr...
904
905
906
907
908
909
910
  
  /*
   * Return reply to client.
   */
  int svc_send(struct svc_rqst *rqstp)
  {
  	struct svc_xprt	*xprt;
860a0d9e5   Jeff Layton   sunrpc: add some ...
911
  	int		len = -EFAULT;
0f0257eaa   Tom Tucker   svc: Move the xpr...
912
913
914
915
  	struct xdr_buf	*xb;
  
  	xprt = rqstp->rq_xprt;
  	if (!xprt)
860a0d9e5   Jeff Layton   sunrpc: add some ...
916
  		goto out;
0f0257eaa   Tom Tucker   svc: Move the xpr...
917
918
919
920
921
922
923
924
925
926
927
928
  
  	/* release the receive skb before sending the reply */
  	rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp);
  
  	/* calculate over-all length */
  	xb = &rqstp->rq_res;
  	xb->len = xb->head[0].iov_len +
  		xb->page_len +
  		xb->tail[0].iov_len;
  
  	/* Grab mutex to serialize outgoing data. */
  	mutex_lock(&xprt->xpt_mutex);
f06f00a24   J. Bruce Fields   svcrpc: sends on ...
929
930
  	if (test_bit(XPT_DEAD, &xprt->xpt_flags)
  			|| test_bit(XPT_CLOSE, &xprt->xpt_flags))
0f0257eaa   Tom Tucker   svc: Move the xpr...
931
932
933
934
  		len = -ENOTCONN;
  	else
  		len = xprt->xpt_ops->xpo_sendto(rqstp);
  	mutex_unlock(&xprt->xpt_mutex);
4cfc7e601   Rahul Iyer   nfsd41: sunrpc: A...
935
  	rpc_wake_up(&xprt->xpt_bc_pending);
0f0257eaa   Tom Tucker   svc: Move the xpr...
936
937
938
  	svc_xprt_release(rqstp);
  
  	if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN)
860a0d9e5   Jeff Layton   sunrpc: add some ...
939
940
941
  		len = 0;
  out:
  	trace_svc_send(rqstp, len);
0f0257eaa   Tom Tucker   svc: Move the xpr...
942
943
944
945
946
947
948
949
950
951
952
953
  	return len;
  }
  
  /*
   * Timer function to close old temporary transports, using
   * a mark-and-sweep algorithm.
   */
  static void svc_age_temp_xprts(unsigned long closure)
  {
  	struct svc_serv *serv = (struct svc_serv *)closure;
  	struct svc_xprt *xprt;
  	struct list_head *le, *next;
0f0257eaa   Tom Tucker   svc: Move the xpr...
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
  
  	dprintk("svc_age_temp_xprts
  ");
  
  	if (!spin_trylock_bh(&serv->sv_lock)) {
  		/* busy, try again 1 sec later */
  		dprintk("svc_age_temp_xprts: busy
  ");
  		mod_timer(&serv->sv_temptimer, jiffies + HZ);
  		return;
  	}
  
  	list_for_each_safe(le, next, &serv->sv_tempsocks) {
  		xprt = list_entry(le, struct svc_xprt, xpt_list);
  
  		/* First time through, just mark it OLD. Second time
  		 * through, close it. */
  		if (!test_and_set_bit(XPT_OLD, &xprt->xpt_flags))
  			continue;
2c935bc57   Peter Zijlstra   locking/atomic, k...
973
  		if (kref_read(&xprt->xpt_ref) > 1 ||
f64f9e719   Joe Perches   net: Move && and ...
974
  		    test_bit(XPT_BUSY, &xprt->xpt_flags))
0f0257eaa   Tom Tucker   svc: Move the xpr...
975
  			continue;
e75bafbff   J. Bruce Fields   svcrpc: make svc_...
976
  		list_del_init(le);
0f0257eaa   Tom Tucker   svc: Move the xpr...
977
  		set_bit(XPT_CLOSE, &xprt->xpt_flags);
0f0257eaa   Tom Tucker   svc: Move the xpr...
978
979
980
981
982
  		dprintk("queuing xprt %p for closing
  ", xprt);
  
  		/* a thread will dequeue and close it soon */
  		svc_xprt_enqueue(xprt);
0f0257eaa   Tom Tucker   svc: Move the xpr...
983
  	}
e75bafbff   J. Bruce Fields   svcrpc: make svc_...
984
  	spin_unlock_bh(&serv->sv_lock);
0f0257eaa   Tom Tucker   svc: Move the xpr...
985
986
987
  
  	mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ);
  }
c3d4879e0   Scott Mayhew   sunrpc: Add a fun...
988
989
990
991
992
993
994
995
996
  /* Close temporary transports whose xpt_local matches server_addr immediately
   * instead of waiting for them to be picked up by the timer.
   *
   * This is meant to be called from a notifier_block that runs when an ip
   * address is deleted.
   */
  void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr)
  {
  	struct svc_xprt *xprt;
c3d4879e0   Scott Mayhew   sunrpc: Add a fun...
997
998
  	struct list_head *le, *next;
  	LIST_HEAD(to_be_closed);
c3d4879e0   Scott Mayhew   sunrpc: Add a fun...
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
  
  	spin_lock_bh(&serv->sv_lock);
  	list_for_each_safe(le, next, &serv->sv_tempsocks) {
  		xprt = list_entry(le, struct svc_xprt, xpt_list);
  		if (rpc_cmp_addr(server_addr, (struct sockaddr *)
  				&xprt->xpt_local)) {
  			dprintk("svc_age_temp_xprts_now: found %p
  ", xprt);
  			list_move(le, &to_be_closed);
  		}
  	}
  	spin_unlock_bh(&serv->sv_lock);
  
  	while (!list_empty(&to_be_closed)) {
  		le = to_be_closed.next;
  		list_del_init(le);
  		xprt = list_entry(le, struct svc_xprt, xpt_list);
546125d16   Scott Mayhew   sunrpc: don't cal...
1016
1017
1018
1019
1020
1021
  		set_bit(XPT_CLOSE, &xprt->xpt_flags);
  		set_bit(XPT_KILL_TEMP, &xprt->xpt_flags);
  		dprintk("svc_age_temp_xprts_now: queuing xprt %p for closing
  ",
  				xprt);
  		svc_xprt_enqueue(xprt);
c3d4879e0   Scott Mayhew   sunrpc: Add a fun...
1022
1023
1024
  	}
  }
  EXPORT_SYMBOL_GPL(svc_age_temp_xprts_now);
edc7a8940   J. Bruce Fields   nfsd: provide cal...
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
  static void call_xpt_users(struct svc_xprt *xprt)
  {
  	struct svc_xpt_user *u;
  
  	spin_lock(&xprt->xpt_lock);
  	while (!list_empty(&xprt->xpt_users)) {
  		u = list_first_entry(&xprt->xpt_users, struct svc_xpt_user, list);
  		list_del(&u->list);
  		u->callback(u);
  	}
  	spin_unlock(&xprt->xpt_lock);
  }
0f0257eaa   Tom Tucker   svc: Move the xpr...
1037
1038
1039
  /*
   * Remove a dead transport
   */
7710ec36b   J. Bruce Fields   svcrpc: make svc_...
1040
  static void svc_delete_xprt(struct svc_xprt *xprt)
0f0257eaa   Tom Tucker   svc: Move the xpr...
1041
1042
  {
  	struct svc_serv	*serv = xprt->xpt_server;
22945e4a1   Tom Tucker   svc: Clean up def...
1043
1044
1045
1046
  	struct svc_deferred_req *dr;
  
  	/* Only do this once */
  	if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags))
ac9303eb7   J. Bruce Fields   svcrpc: assume sv...
1047
  		BUG();
0f0257eaa   Tom Tucker   svc: Move the xpr...
1048
1049
1050
1051
1052
1053
  
  	dprintk("svc: svc_delete_xprt(%p)
  ", xprt);
  	xprt->xpt_ops->xpo_detach(xprt);
  
  	spin_lock_bh(&serv->sv_lock);
8d65ef760   Jeff Layton   sunrpc: eliminate...
1054
  	list_del_init(&xprt->xpt_list);
010472980   Weston Andros Adamson   SUNRPC: remove BU...
1055
  	WARN_ON_ONCE(!list_empty(&xprt->xpt_ready));
22945e4a1   Tom Tucker   svc: Clean up def...
1056
1057
  	if (test_bit(XPT_TEMP, &xprt->xpt_flags))
  		serv->sv_tmpcnt--;
788e69e54   J. Bruce Fields   svcrpc: don't hol...
1058
  	spin_unlock_bh(&serv->sv_lock);
22945e4a1   Tom Tucker   svc: Clean up def...
1059

ab1b18f70   Neil Brown   sunrpc: remove un...
1060
  	while ((dr = svc_deferred_dequeue(xprt)) != NULL)
22945e4a1   Tom Tucker   svc: Clean up def...
1061
  		kfree(dr);
22945e4a1   Tom Tucker   svc: Clean up def...
1062

edc7a8940   J. Bruce Fields   nfsd: provide cal...
1063
  	call_xpt_users(xprt);
22945e4a1   Tom Tucker   svc: Clean up def...
1064
  	svc_xprt_put(xprt);
0f0257eaa   Tom Tucker   svc: Move the xpr...
1065
1066
1067
1068
1069
1070
1071
1072
  }
  
  void svc_close_xprt(struct svc_xprt *xprt)
  {
  	set_bit(XPT_CLOSE, &xprt->xpt_flags);
  	if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags))
  		/* someone else will have to effect the close */
  		return;
b17633162   J. Bruce Fields   svcrpc: svc_close...
1073
1074
1075
1076
1077
1078
  	/*
  	 * We expect svc_close_xprt() to work even when no threads are
  	 * running (e.g., while configuring the server before starting
  	 * any threads), so if the transport isn't busy, we delete
  	 * it ourself:
  	 */
0f0257eaa   Tom Tucker   svc: Move the xpr...
1079
  	svc_delete_xprt(xprt);
0f0257eaa   Tom Tucker   svc: Move the xpr...
1080
  }
a217813f9   Tom Tucker   knfsd: Support ad...
1081
  EXPORT_SYMBOL_GPL(svc_close_xprt);
0f0257eaa   Tom Tucker   svc: Move the xpr...
1082

cc630d9f4   J. Bruce Fields   svcrpc: fix rpc s...
1083
  static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net)
0f0257eaa   Tom Tucker   svc: Move the xpr...
1084
1085
  {
  	struct svc_xprt *xprt;
cc630d9f4   J. Bruce Fields   svcrpc: fix rpc s...
1086
  	int ret = 0;
0f0257eaa   Tom Tucker   svc: Move the xpr...
1087

719f8bcc8   J. Bruce Fields   svcrpc: fix xpt_l...
1088
  	spin_lock(&serv->sv_lock);
b4f36f88b   J. Bruce Fields   svcrpc: avoid mem...
1089
  	list_for_each_entry(xprt, xprt_list, xpt_list) {
7b147f1ff   Stanislav Kinsbursky   SUNRPC: service d...
1090
1091
  		if (xprt->xpt_net != net)
  			continue;
cc630d9f4   J. Bruce Fields   svcrpc: fix rpc s...
1092
  		ret++;
0f0257eaa   Tom Tucker   svc: Move the xpr...
1093
  		set_bit(XPT_CLOSE, &xprt->xpt_flags);
cc630d9f4   J. Bruce Fields   svcrpc: fix rpc s...
1094
  		svc_xprt_enqueue(xprt);
0f0257eaa   Tom Tucker   svc: Move the xpr...
1095
  	}
719f8bcc8   J. Bruce Fields   svcrpc: fix xpt_l...
1096
  	spin_unlock(&serv->sv_lock);
cc630d9f4   J. Bruce Fields   svcrpc: fix rpc s...
1097
  	return ret;
0f0257eaa   Tom Tucker   svc: Move the xpr...
1098
  }
cc630d9f4   J. Bruce Fields   svcrpc: fix rpc s...
1099
  static struct svc_xprt *svc_dequeue_net(struct svc_serv *serv, struct net *net)
0f0257eaa   Tom Tucker   svc: Move the xpr...
1100
  {
b4f36f88b   J. Bruce Fields   svcrpc: avoid mem...
1101
  	struct svc_pool *pool;
0f0257eaa   Tom Tucker   svc: Move the xpr...
1102
1103
  	struct svc_xprt *xprt;
  	struct svc_xprt *tmp;
b4f36f88b   J. Bruce Fields   svcrpc: avoid mem...
1104
  	int i;
b4f36f88b   J. Bruce Fields   svcrpc: avoid mem...
1105
1106
1107
1108
  	for (i = 0; i < serv->sv_nrpools; i++) {
  		pool = &serv->sv_pools[i];
  
  		spin_lock_bh(&pool->sp_lock);
6f5133652   Stanislav Kinsbursky   SUNRPC: clear svc...
1109
  		list_for_each_entry_safe(xprt, tmp, &pool->sp_sockets, xpt_ready) {
7b147f1ff   Stanislav Kinsbursky   SUNRPC: service d...
1110
1111
  			if (xprt->xpt_net != net)
  				continue;
b4f36f88b   J. Bruce Fields   svcrpc: avoid mem...
1112
  			list_del_init(&xprt->xpt_ready);
cc630d9f4   J. Bruce Fields   svcrpc: fix rpc s...
1113
1114
  			spin_unlock_bh(&pool->sp_lock);
  			return xprt;
b4f36f88b   J. Bruce Fields   svcrpc: avoid mem...
1115
1116
1117
  		}
  		spin_unlock_bh(&pool->sp_lock);
  	}
cc630d9f4   J. Bruce Fields   svcrpc: fix rpc s...
1118
  	return NULL;
6f5133652   Stanislav Kinsbursky   SUNRPC: clear svc...
1119
  }
cc630d9f4   J. Bruce Fields   svcrpc: fix rpc s...
1120
  static void svc_clean_up_xprts(struct svc_serv *serv, struct net *net)
6f5133652   Stanislav Kinsbursky   SUNRPC: clear svc...
1121
1122
  {
  	struct svc_xprt *xprt;
719f8bcc8   J. Bruce Fields   svcrpc: fix xpt_l...
1123

cc630d9f4   J. Bruce Fields   svcrpc: fix rpc s...
1124
1125
  	while ((xprt = svc_dequeue_net(serv, net))) {
  		set_bit(XPT_CLOSE, &xprt->xpt_flags);
719f8bcc8   J. Bruce Fields   svcrpc: fix xpt_l...
1126
  		svc_delete_xprt(xprt);
cc630d9f4   J. Bruce Fields   svcrpc: fix rpc s...
1127
  	}
3a22bf506   Stanislav Kinsbursky   SUNRPC: clear svc...
1128
  }
cc630d9f4   J. Bruce Fields   svcrpc: fix rpc s...
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
  /*
   * Server threads may still be running (especially in the case where the
   * service is still running in other network namespaces).
   *
   * So we shut down sockets the same way we would on a running server, by
   * setting XPT_CLOSE, enqueuing, and letting a thread pick it up to do
   * the close.  In the case there are no such other threads,
   * threads running, svc_clean_up_xprts() does a simple version of a
   * server's main event loop, and in the case where there are other
   * threads, we may need to wait a little while and then check again to
   * see if they're done.
   */
7b147f1ff   Stanislav Kinsbursky   SUNRPC: service d...
1141
  void svc_close_net(struct svc_serv *serv, struct net *net)
3a22bf506   Stanislav Kinsbursky   SUNRPC: clear svc...
1142
  {
cc630d9f4   J. Bruce Fields   svcrpc: fix rpc s...
1143
  	int delay = 0;
6f5133652   Stanislav Kinsbursky   SUNRPC: clear svc...
1144

cc630d9f4   J. Bruce Fields   svcrpc: fix rpc s...
1145
1146
1147
1148
1149
1150
  	while (svc_close_list(serv, &serv->sv_permsocks, net) +
  	       svc_close_list(serv, &serv->sv_tempsocks, net)) {
  
  		svc_clean_up_xprts(serv, net);
  		msleep(delay++);
  	}
0f0257eaa   Tom Tucker   svc: Move the xpr...
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
  }
  
  /*
   * Handle defer and revisit of requests
   */
  
  static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
  {
  	struct svc_deferred_req *dr =
  		container_of(dreq, struct svc_deferred_req, handle);
  	struct svc_xprt *xprt = dr->xprt;
22945e4a1   Tom Tucker   svc: Clean up def...
1162
1163
1164
1165
1166
1167
  	spin_lock(&xprt->xpt_lock);
  	set_bit(XPT_DEFERRED, &xprt->xpt_flags);
  	if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) {
  		spin_unlock(&xprt->xpt_lock);
  		dprintk("revisit canceled
  ");
0f0257eaa   Tom Tucker   svc: Move the xpr...
1168
  		svc_xprt_put(xprt);
104f6351f   Trond Myklebust   SUNRPC: Add trace...
1169
  		trace_svc_drop_deferred(dr);
0f0257eaa   Tom Tucker   svc: Move the xpr...
1170
1171
1172
1173
1174
1175
  		kfree(dr);
  		return;
  	}
  	dprintk("revisit queued
  ");
  	dr->xprt = NULL;
0f0257eaa   Tom Tucker   svc: Move the xpr...
1176
1177
  	list_add(&dr->handle.recent, &xprt->xpt_deferred);
  	spin_unlock(&xprt->xpt_lock);
0f0257eaa   Tom Tucker   svc: Move the xpr...
1178
1179
1180
  	svc_xprt_enqueue(xprt);
  	svc_xprt_put(xprt);
  }
260c1d129   Tom Tucker   svc: Add transpor...
1181
1182
1183
1184
1185
1186
1187
1188
1189
  /*
   * Save the request off for later processing. The request buffer looks
   * like this:
   *
   * <xprt-header><rpc-header><rpc-pagelist><rpc-tail>
   *
   * This code can only handle requests that consist of an xprt-header
   * and rpc-header.
   */
0f0257eaa   Tom Tucker   svc: Move the xpr...
1190
1191
1192
  static struct cache_deferred_req *svc_defer(struct cache_req *req)
  {
  	struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle);
0f0257eaa   Tom Tucker   svc: Move the xpr...
1193
  	struct svc_deferred_req *dr;
30660e04b   Jeff Layton   sunrpc: move rq_u...
1194
  	if (rqstp->rq_arg.page_len || !test_bit(RQ_USEDEFERRAL, &rqstp->rq_flags))
0f0257eaa   Tom Tucker   svc: Move the xpr...
1195
1196
1197
1198
1199
  		return NULL; /* if more than a page, give up FIXME */
  	if (rqstp->rq_deferred) {
  		dr = rqstp->rq_deferred;
  		rqstp->rq_deferred = NULL;
  	} else {
260c1d129   Tom Tucker   svc: Add transpor...
1200
1201
  		size_t skip;
  		size_t size;
0f0257eaa   Tom Tucker   svc: Move the xpr...
1202
  		/* FIXME maybe discard if size too large */
260c1d129   Tom Tucker   svc: Add transpor...
1203
  		size = sizeof(struct svc_deferred_req) + rqstp->rq_arg.len;
0f0257eaa   Tom Tucker   svc: Move the xpr...
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
  		dr = kmalloc(size, GFP_KERNEL);
  		if (dr == NULL)
  			return NULL;
  
  		dr->handle.owner = rqstp->rq_server;
  		dr->prot = rqstp->rq_prot;
  		memcpy(&dr->addr, &rqstp->rq_addr, rqstp->rq_addrlen);
  		dr->addrlen = rqstp->rq_addrlen;
  		dr->daddr = rqstp->rq_daddr;
  		dr->argslen = rqstp->rq_arg.len >> 2;
260c1d129   Tom Tucker   svc: Add transpor...
1214
1215
1216
1217
1218
1219
  		dr->xprt_hlen = rqstp->rq_xprt_hlen;
  
  		/* back up head to the start of the buffer and copy */
  		skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len;
  		memcpy(dr->args, rqstp->rq_arg.head[0].iov_base - skip,
  		       dr->argslen << 2);
0f0257eaa   Tom Tucker   svc: Move the xpr...
1220
1221
1222
  	}
  	svc_xprt_get(rqstp->rq_xprt);
  	dr->xprt = rqstp->rq_xprt;
78b65eb3f   Jeff Layton   sunrpc: move rq_d...
1223
  	set_bit(RQ_DROPME, &rqstp->rq_flags);
0f0257eaa   Tom Tucker   svc: Move the xpr...
1224
1225
  
  	dr->handle.revisit = svc_revisit;
104f6351f   Trond Myklebust   SUNRPC: Add trace...
1226
  	trace_svc_defer(rqstp);
0f0257eaa   Tom Tucker   svc: Move the xpr...
1227
1228
1229
1230
1231
1232
1233
1234
1235
  	return &dr->handle;
  }
  
  /*
   * recv data from a deferred request into an active one
   */
  static int svc_deferred_recv(struct svc_rqst *rqstp)
  {
  	struct svc_deferred_req *dr = rqstp->rq_deferred;
260c1d129   Tom Tucker   svc: Add transpor...
1236
1237
1238
1239
  	/* setup iov_base past transport header */
  	rqstp->rq_arg.head[0].iov_base = dr->args + (dr->xprt_hlen>>2);
  	/* The iov_len does not include the transport header bytes */
  	rqstp->rq_arg.head[0].iov_len = (dr->argslen<<2) - dr->xprt_hlen;
0f0257eaa   Tom Tucker   svc: Move the xpr...
1240
  	rqstp->rq_arg.page_len = 0;
260c1d129   Tom Tucker   svc: Add transpor...
1241
1242
  	/* The rq_arg.len includes the transport header bytes */
  	rqstp->rq_arg.len     = dr->argslen<<2;
0f0257eaa   Tom Tucker   svc: Move the xpr...
1243
1244
1245
  	rqstp->rq_prot        = dr->prot;
  	memcpy(&rqstp->rq_addr, &dr->addr, dr->addrlen);
  	rqstp->rq_addrlen     = dr->addrlen;
260c1d129   Tom Tucker   svc: Add transpor...
1246
1247
  	/* Save off transport header len in case we get deferred again */
  	rqstp->rq_xprt_hlen   = dr->xprt_hlen;
0f0257eaa   Tom Tucker   svc: Move the xpr...
1248
1249
  	rqstp->rq_daddr       = dr->daddr;
  	rqstp->rq_respages    = rqstp->rq_pages;
260c1d129   Tom Tucker   svc: Add transpor...
1250
  	return (dr->argslen<<2) - dr->xprt_hlen;
0f0257eaa   Tom Tucker   svc: Move the xpr...
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
  }
  
  
  static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt)
  {
  	struct svc_deferred_req *dr = NULL;
  
  	if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags))
  		return NULL;
  	spin_lock(&xprt->xpt_lock);
0f0257eaa   Tom Tucker   svc: Move the xpr...
1261
1262
1263
1264
1265
  	if (!list_empty(&xprt->xpt_deferred)) {
  		dr = list_entry(xprt->xpt_deferred.next,
  				struct svc_deferred_req,
  				handle.recent);
  		list_del_init(&dr->handle.recent);
104f6351f   Trond Myklebust   SUNRPC: Add trace...
1266
  		trace_svc_revisit_deferred(dr);
62bac4af3   J. Bruce Fields   svcrpc: don't set...
1267
1268
  	} else
  		clear_bit(XPT_DEFERRED, &xprt->xpt_flags);
0f0257eaa   Tom Tucker   svc: Move the xpr...
1269
1270
1271
  	spin_unlock(&xprt->xpt_lock);
  	return dr;
  }
7fcb98d58   Tom Tucker   svc: Add svc API ...
1272

156e62094   Chuck Lever   SUNRPC: Clean up ...
1273
1274
1275
1276
  /**
   * svc_find_xprt - find an RPC transport instance
   * @serv: pointer to svc_serv to search
   * @xcl_name: C string containing transport's class name
4cb54ca20   Stanislav Kinsbursky   SUNRPC: search fo...
1277
   * @net: owner net pointer
156e62094   Chuck Lever   SUNRPC: Clean up ...
1278
1279
1280
   * @af: Address family of transport's local address
   * @port: transport's IP port number
   *
7fcb98d58   Tom Tucker   svc: Add svc API ...
1281
1282
1283
1284
1285
1286
1287
1288
   * Return the transport instance pointer for the endpoint accepting
   * connections/peer traffic from the specified transport class,
   * address family and port.
   *
   * Specifying 0 for the address family or port is effectively a
   * wild-card, and will result in matching the first transport in the
   * service's list that has a matching class name.
   */
156e62094   Chuck Lever   SUNRPC: Clean up ...
1289
  struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name,
4cb54ca20   Stanislav Kinsbursky   SUNRPC: search fo...
1290
1291
  			       struct net *net, const sa_family_t af,
  			       const unsigned short port)
7fcb98d58   Tom Tucker   svc: Add svc API ...
1292
1293
1294
1295
1296
  {
  	struct svc_xprt *xprt;
  	struct svc_xprt *found = NULL;
  
  	/* Sanity check the args */
156e62094   Chuck Lever   SUNRPC: Clean up ...
1297
  	if (serv == NULL || xcl_name == NULL)
7fcb98d58   Tom Tucker   svc: Add svc API ...
1298
1299
1300
1301
  		return found;
  
  	spin_lock_bh(&serv->sv_lock);
  	list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) {
4cb54ca20   Stanislav Kinsbursky   SUNRPC: search fo...
1302
1303
  		if (xprt->xpt_net != net)
  			continue;
7fcb98d58   Tom Tucker   svc: Add svc API ...
1304
1305
1306
1307
  		if (strcmp(xprt->xpt_class->xcl_name, xcl_name))
  			continue;
  		if (af != AF_UNSPEC && af != xprt->xpt_local.ss_family)
  			continue;
156e62094   Chuck Lever   SUNRPC: Clean up ...
1308
  		if (port != 0 && port != svc_xprt_local_port(xprt))
7fcb98d58   Tom Tucker   svc: Add svc API ...
1309
1310
  			continue;
  		found = xprt;
a217813f9   Tom Tucker   knfsd: Support ad...
1311
  		svc_xprt_get(xprt);
7fcb98d58   Tom Tucker   svc: Add svc API ...
1312
1313
1314
1315
1316
1317
  		break;
  	}
  	spin_unlock_bh(&serv->sv_lock);
  	return found;
  }
  EXPORT_SYMBOL_GPL(svc_find_xprt);
9571af18f   Tom Tucker   svc: Add svc_xprt...
1318

335c54bdc   Chuck Lever   NFSD: Prevent a b...
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
  static int svc_one_xprt_name(const struct svc_xprt *xprt,
  			     char *pos, int remaining)
  {
  	int len;
  
  	len = snprintf(pos, remaining, "%s %u
  ",
  			xprt->xpt_class->xcl_name,
  			svc_xprt_local_port(xprt));
  	if (len >= remaining)
  		return -ENAMETOOLONG;
  	return len;
  }
  
  /**
   * svc_xprt_names - format a buffer with a list of transport names
   * @serv: pointer to an RPC service
   * @buf: pointer to a buffer to be filled in
   * @buflen: length of buffer to be filled in
   *
   * Fills in @buf with a string containing a list of transport names,
   * each name terminated with '
  '.
   *
   * Returns positive length of the filled-in string on success; otherwise
   * a negative errno value is returned if an error occurs.
9571af18f   Tom Tucker   svc: Add svc_xprt...
1345
   */
335c54bdc   Chuck Lever   NFSD: Prevent a b...
1346
  int svc_xprt_names(struct svc_serv *serv, char *buf, const int buflen)
9571af18f   Tom Tucker   svc: Add svc_xprt...
1347
1348
  {
  	struct svc_xprt *xprt;
335c54bdc   Chuck Lever   NFSD: Prevent a b...
1349
1350
  	int len, totlen;
  	char *pos;
9571af18f   Tom Tucker   svc: Add svc_xprt...
1351
1352
1353
1354
1355
1356
  
  	/* Sanity check args */
  	if (!serv)
  		return 0;
  
  	spin_lock_bh(&serv->sv_lock);
335c54bdc   Chuck Lever   NFSD: Prevent a b...
1357
1358
1359
  
  	pos = buf;
  	totlen = 0;
9571af18f   Tom Tucker   svc: Add svc_xprt...
1360
  	list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) {
335c54bdc   Chuck Lever   NFSD: Prevent a b...
1361
1362
1363
1364
1365
1366
  		len = svc_one_xprt_name(xprt, pos, buflen - totlen);
  		if (len < 0) {
  			*buf = '\0';
  			totlen = len;
  		}
  		if (len <= 0)
9571af18f   Tom Tucker   svc: Add svc_xprt...
1367
  			break;
335c54bdc   Chuck Lever   NFSD: Prevent a b...
1368
1369
  
  		pos += len;
9571af18f   Tom Tucker   svc: Add svc_xprt...
1370
1371
  		totlen += len;
  	}
335c54bdc   Chuck Lever   NFSD: Prevent a b...
1372

9571af18f   Tom Tucker   svc: Add svc_xprt...
1373
1374
1375
1376
  	spin_unlock_bh(&serv->sv_lock);
  	return totlen;
  }
  EXPORT_SYMBOL_GPL(svc_xprt_names);
03cf6c9f4   Greg Banks   knfsd: add file t...
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
  
  
  /*----------------------------------------------------------------------------*/
  
  static void *svc_pool_stats_start(struct seq_file *m, loff_t *pos)
  {
  	unsigned int pidx = (unsigned int)*pos;
  	struct svc_serv *serv = m->private;
  
  	dprintk("svc_pool_stats_start, *pidx=%u
  ", pidx);
03cf6c9f4   Greg Banks   knfsd: add file t...
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
  	if (!pidx)
  		return SEQ_START_TOKEN;
  	return (pidx > serv->sv_nrpools ? NULL : &serv->sv_pools[pidx-1]);
  }
  
  static void *svc_pool_stats_next(struct seq_file *m, void *p, loff_t *pos)
  {
  	struct svc_pool *pool = p;
  	struct svc_serv *serv = m->private;
  
  	dprintk("svc_pool_stats_next, *pos=%llu
  ", *pos);
  
  	if (p == SEQ_START_TOKEN) {
  		pool = &serv->sv_pools[0];
  	} else {
  		unsigned int pidx = (pool - &serv->sv_pools[0]);
  		if (pidx < serv->sv_nrpools-1)
  			pool = &serv->sv_pools[pidx+1];
  		else
  			pool = NULL;
  	}
  	++*pos;
  	return pool;
  }
  
  static void svc_pool_stats_stop(struct seq_file *m, void *p)
  {
03cf6c9f4   Greg Banks   knfsd: add file t...
1416
1417
1418
1419
1420
1421
1422
  }
  
  static int svc_pool_stats_show(struct seq_file *m, void *p)
  {
  	struct svc_pool *pool = p;
  
  	if (p == SEQ_START_TOKEN) {
78c210efd   J. Bruce Fields   Revert "knfsd: av...
1423
1424
  		seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken threads-timedout
  ");
03cf6c9f4   Greg Banks   knfsd: add file t...
1425
1426
  		return 0;
  	}
78c210efd   J. Bruce Fields   Revert "knfsd: av...
1427
1428
  	seq_printf(m, "%u %lu %lu %lu %lu
  ",
03cf6c9f4   Greg Banks   knfsd: add file t...
1429
  		pool->sp_id,
403c7b444   Jeff Layton   sunrpc: fix poten...
1430
  		(unsigned long)atomic_long_read(&pool->sp_stats.packets),
03cf6c9f4   Greg Banks   knfsd: add file t...
1431
  		pool->sp_stats.sockets_queued,
403c7b444   Jeff Layton   sunrpc: fix poten...
1432
1433
  		(unsigned long)atomic_long_read(&pool->sp_stats.threads_woken),
  		(unsigned long)atomic_long_read(&pool->sp_stats.threads_timedout));
03cf6c9f4   Greg Banks   knfsd: add file t...
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
  
  	return 0;
  }
  
  static const struct seq_operations svc_pool_stats_seq_ops = {
  	.start	= svc_pool_stats_start,
  	.next	= svc_pool_stats_next,
  	.stop	= svc_pool_stats_stop,
  	.show	= svc_pool_stats_show,
  };
  
  int svc_pool_stats_open(struct svc_serv *serv, struct file *file)
  {
  	int err;
  
  	err = seq_open(file, &svc_pool_stats_seq_ops);
  	if (!err)
  		((struct seq_file *) file->private_data)->private = serv;
  	return err;
  }
  EXPORT_SYMBOL(svc_pool_stats_open);
  
  /*----------------------------------------------------------------------------*/