Commit c36adb2a7f9132b37d4b669b2e2c04e46d5188b2

Authored by Tom Tucker
Committed by J. Bruce Fields
1 parent eab996d4ac

svc: Make svc_recv transport neutral

All of the transport field and functions used by svc_recv are now
transport independent. Change the svc_recv function to use the svc_xprt
structure directly instead of the transport specific svc_sock structure.

Signed-off-by: Tom Tucker <tom@opengridcomputing.com>
Acked-by: Neil Brown <neilb@suse.de>
Reviewed-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Greg Banks <gnb@sgi.com>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>

Showing 1 changed file with 32 additions and 31 deletions Side-by-side Diff

net/sunrpc/svcsock.c
... ... @@ -310,22 +310,21 @@
310 310 /*
311 311 * Dequeue the first socket. Must be called with the pool->sp_lock held.
312 312 */
313   -static inline struct svc_sock *
314   -svc_sock_dequeue(struct svc_pool *pool)
  313 +static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool)
315 314 {
316   - struct svc_sock *svsk;
  315 + struct svc_xprt *xprt;
317 316  
318 317 if (list_empty(&pool->sp_sockets))
319 318 return NULL;
320 319  
321   - svsk = list_entry(pool->sp_sockets.next,
322   - struct svc_sock, sk_xprt.xpt_ready);
323   - list_del_init(&svsk->sk_xprt.xpt_ready);
  320 + xprt = list_entry(pool->sp_sockets.next,
  321 + struct svc_xprt, xpt_ready);
  322 + list_del_init(&xprt->xpt_ready);
324 323  
325   - dprintk("svc: socket %p dequeued, inuse=%d\n",
326   - svsk->sk_sk, atomic_read(&svsk->sk_xprt.xpt_ref.refcount));
  324 + dprintk("svc: transport %p dequeued, inuse=%d\n",
  325 + xprt, atomic_read(&xprt->xpt_ref.refcount));
327 326  
328   - return svsk;
  327 + return xprt;
329 328 }
330 329  
331 330 /*
332 331  
333 332  
334 333  
... ... @@ -1475,20 +1474,20 @@
1475 1474 int
1476 1475 svc_recv(struct svc_rqst *rqstp, long timeout)
1477 1476 {
1478   - struct svc_sock *svsk = NULL;
  1477 + struct svc_xprt *xprt = NULL;
1479 1478 struct svc_serv *serv = rqstp->rq_server;
1480 1479 struct svc_pool *pool = rqstp->rq_pool;
1481 1480 int len, i;
1482   - int pages;
  1481 + int pages;
1483 1482 struct xdr_buf *arg;
1484 1483 DECLARE_WAITQUEUE(wait, current);
1485 1484  
1486 1485 dprintk("svc: server %p waiting for data (to = %ld)\n",
1487 1486 rqstp, timeout);
1488 1487  
1489   - if (rqstp->rq_sock)
  1488 + if (rqstp->rq_xprt)
1490 1489 printk(KERN_ERR
1491   - "svc_recv: service %p, socket not NULL!\n",
  1490 + "svc_recv: service %p, transport not NULL!\n",
1492 1491 rqstp);
1493 1492 if (waitqueue_active(&rqstp->rq_wait))
1494 1493 printk(KERN_ERR
1495 1494  
... ... @@ -1525,11 +1524,12 @@
1525 1524 return -EINTR;
1526 1525  
1527 1526 spin_lock_bh(&pool->sp_lock);
1528   - if ((svsk = svc_sock_dequeue(pool)) != NULL) {
1529   - rqstp->rq_sock = svsk;
1530   - svc_xprt_get(&svsk->sk_xprt);
  1527 + xprt = svc_xprt_dequeue(pool);
  1528 + if (xprt) {
  1529 + rqstp->rq_xprt = xprt;
  1530 + svc_xprt_get(xprt);
1531 1531 rqstp->rq_reserved = serv->sv_max_mesg;
1532   - atomic_add(rqstp->rq_reserved, &svsk->sk_xprt.xpt_reserved);
  1532 + atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
1533 1533 } else {
1534 1534 /* No data pending. Go to sleep */
1535 1535 svc_thread_enqueue(pool, rqstp);
... ... @@ -1549,7 +1549,8 @@
1549 1549 spin_lock_bh(&pool->sp_lock);
1550 1550 remove_wait_queue(&rqstp->rq_wait, &wait);
1551 1551  
1552   - if (!(svsk = rqstp->rq_sock)) {
  1552 + xprt = rqstp->rq_xprt;
  1553 + if (!xprt) {
1553 1554 svc_thread_dequeue(pool, rqstp);
1554 1555 spin_unlock_bh(&pool->sp_lock);
1555 1556 dprintk("svc: server %p, no data yet\n", rqstp);
1556 1557  
1557 1558  
1558 1559  
1559 1560  
1560 1561  
1561 1562  
1562 1563  
... ... @@ -1559,32 +1560,32 @@
1559 1560 spin_unlock_bh(&pool->sp_lock);
1560 1561  
1561 1562 len = 0;
1562   - if (test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags)) {
  1563 + if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
1563 1564 dprintk("svc_recv: found XPT_CLOSE\n");
1564   - svc_delete_xprt(&svsk->sk_xprt);
1565   - } else if (test_bit(XPT_LISTENER, &svsk->sk_xprt.xpt_flags)) {
  1565 + svc_delete_xprt(xprt);
  1566 + } else if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
1566 1567 struct svc_xprt *newxpt;
1567   - newxpt = svsk->sk_xprt.xpt_ops->xpo_accept(&svsk->sk_xprt);
  1568 + newxpt = xprt->xpt_ops->xpo_accept(xprt);
1568 1569 if (newxpt) {
1569 1570 /*
1570 1571 * We know this module_get will succeed because the
1571 1572 * listener holds a reference too
1572 1573 */
1573 1574 __module_get(newxpt->xpt_class->xcl_owner);
1574   - svc_check_conn_limits(svsk->sk_xprt.xpt_server);
  1575 + svc_check_conn_limits(xprt->xpt_server);
1575 1576 svc_xprt_received(newxpt);
1576 1577 }
1577   - svc_xprt_received(&svsk->sk_xprt);
  1578 + svc_xprt_received(xprt);
1578 1579 } else {
1579   - dprintk("svc: server %p, pool %u, socket %p, inuse=%d\n",
1580   - rqstp, pool->sp_id, svsk,
1581   - atomic_read(&svsk->sk_xprt.xpt_ref.refcount));
1582   - rqstp->rq_deferred = svc_deferred_dequeue(&svsk->sk_xprt);
  1580 + dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n",
  1581 + rqstp, pool->sp_id, xprt,
  1582 + atomic_read(&xprt->xpt_ref.refcount));
  1583 + rqstp->rq_deferred = svc_deferred_dequeue(xprt);
1583 1584 if (rqstp->rq_deferred) {
1584   - svc_xprt_received(&svsk->sk_xprt);
  1585 + svc_xprt_received(xprt);
1585 1586 len = svc_deferred_recv(rqstp);
1586 1587 } else
1587   - len = svsk->sk_xprt.xpt_ops->xpo_recvfrom(rqstp);
  1588 + len = xprt->xpt_ops->xpo_recvfrom(rqstp);
1588 1589 dprintk("svc: got len=%d\n", len);
1589 1590 }
1590 1591  
... ... @@ -1594,7 +1595,7 @@
1594 1595 svc_xprt_release(rqstp);
1595 1596 return -EAGAIN;
1596 1597 }
1597   - clear_bit(XPT_OLD, &svsk->sk_xprt.xpt_flags);
  1598 + clear_bit(XPT_OLD, &xprt->xpt_flags);
1598 1599  
1599 1600 rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp));
1600 1601 rqstp->rq_chandle.defer = svc_defer;