Commit 4cfc7e6019caa3e97d2a81c48c8d575d7b38d751

Authored by Rahul Iyer
Committed by J. Bruce Fields
1 parent 6951867b99

nfsd41: sunrpc: Added rpc server-side backchannel handling

When the call direction is a reply, copy the xid and call direction into the
req->rq_private_buf.head[0].iov_base otherwise rpc_verify_header returns
rpc_garbage.

Signed-off-by: Rahul Iyer <iyer@netapp.com>
Signed-off-by: Mike Sager <sager@netapp.com>
Signed-off-by: Marc Eshel <eshel@almaden.ibm.com>
Signed-off-by: Benny Halevy <bhalevy@panasas.com>
Signed-off-by: Ricardo Labiaga <Ricardo.Labiaga@netapp.com>
Signed-off-by: Andy Adamson <andros@netapp.com>
Signed-off-by: Benny Halevy <bhalevy@panasas.com>
[get rid of CONFIG_NFSD_V4_1]
[sunrpc: refactoring of svc_tcp_recvfrom]
[nfsd41: sunrpc: create common send routine for the fore and the back channels]
[nfsd41: sunrpc: Use free_page() to free server backchannel pages]
[nfsd41: sunrpc: Document server backchannel locking]
[nfsd41: sunrpc: remove bc_connect_worker()]
[nfsd41: sunrpc: Define xprt_server_backchannel()[
[nfsd41: sunrpc: remove bc_close and bc_init_auto_disconnect dummy functions]
[nfsd41: sunrpc: eliminate unneeded switch statement in xs_setup_tcp()]
[nfsd41: sunrpc: Don't auto close the server backchannel connection]
[nfsd41: sunrpc: Remove unused functions]
Signed-off-by: Alexandros Batsakis <batsakis@netapp.com>
Signed-off-by: Ricardo Labiaga <Ricardo.Labiaga@netapp.com>
Signed-off-by: Benny Halevy <bhalevy@panasas.com>
[nfsd41: change bc_sock to bc_xprt]
[nfsd41: sunrpc: move struct rpc_buffer def into a common header file]
[nfsd41: sunrpc: use rpc_sleep in bc_send_request so not to block on mutex]
[removed cosmetic changes]
Signed-off-by: Benny Halevy <bhalevy@panasas.com>
[sunrpc: add new xprt class for nfsv4.1 backchannel]
[sunrpc: v2.1 change handling of auto_close and init_auto_disconnect operations for the nfsv4.1 backchannel]
Signed-off-by: Alexandros Batsakis <batsakis@netapp.com>
[reverted more cosmetic leftovers]
[got rid of xprt_server_backchannel]
[separated "nfsd41: sunrpc: add new xprt class for nfsv4.1 backchannel"]
Signed-off-by: Benny Halevy <bhalevy@panasas.com>
Cc: Trond Myklebust <trond.myklebust@netapp.com>
[sunrpc: change idle timeout value for the backchannel]
Signed-off-by: Alexandros Batsakis <batsakis@netapp.com>
Signed-off-by: Benny Halevy <bhalevy@panasas.com>
Acked-by: Trond Myklebust <trond.myklebust@netapp.com>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>

Showing 8 changed files with 303 additions and 39 deletions Side-by-side Diff

include/linux/sunrpc/svc_xprt.h
... ... @@ -65,6 +65,7 @@
65 65 size_t xpt_locallen; /* length of address */
66 66 struct sockaddr_storage xpt_remote; /* remote peer's address */
67 67 size_t xpt_remotelen; /* length of address */
  68 + struct rpc_wait_queue xpt_bc_pending; /* backchannel wait queue */
68 69 };
69 70  
70 71 int svc_reg_xprt_class(struct svc_xprt_class *);
include/linux/sunrpc/svcsock.h
... ... @@ -28,6 +28,7 @@
28 28 /* private TCP part */
29 29 u32 sk_reclen; /* length of record */
30 30 u32 sk_tcplen; /* current read length */
  31 + struct rpc_xprt *sk_bc_xprt; /* NFSv4.1 backchannel xprt */
31 32 };
32 33  
33 34 /*
include/linux/sunrpc/xprt.h
... ... @@ -179,6 +179,7 @@
179 179 spinlock_t reserve_lock; /* lock slot table */
180 180 u32 xid; /* Next XID value to use */
181 181 struct rpc_task * snd_task; /* Task blocked in send */
  182 + struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
182 183 #if defined(CONFIG_NFS_V4_1)
183 184 struct svc_serv *bc_serv; /* The RPC service which will */
184 185 /* process the callback */
... ... @@ -43,5 +43,9 @@
43 43 (task->tk_msg.rpc_proc->p_decode != NULL);
44 44 }
45 45  
  46 +int svc_send_common(struct socket *sock, struct xdr_buf *xdr,
  47 + struct page *headpage, unsigned long headoffset,
  48 + struct page *tailpage, unsigned long tailoffset);
  49 +
46 50 #endif /* _NET_SUNRPC_SUNRPC_H */
net/sunrpc/svc_xprt.c
... ... @@ -160,6 +160,7 @@
160 160 mutex_init(&xprt->xpt_mutex);
161 161 spin_lock_init(&xprt->xpt_lock);
162 162 set_bit(XPT_BUSY, &xprt->xpt_flags);
  163 + rpc_init_wait_queue(&xprt->xpt_bc_pending, "xpt_bc_pending");
163 164 }
164 165 EXPORT_SYMBOL_GPL(svc_xprt_init);
165 166  
... ... @@ -810,6 +811,7 @@
810 811 else
811 812 len = xprt->xpt_ops->xpo_sendto(rqstp);
812 813 mutex_unlock(&xprt->xpt_mutex);
  814 + rpc_wake_up(&xprt->xpt_bc_pending);
813 815 svc_xprt_release(rqstp);
814 816  
815 817 if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN)
net/sunrpc/svcsock.c
... ... @@ -49,6 +49,7 @@
49 49 #include <linux/sunrpc/msg_prot.h>
50 50 #include <linux/sunrpc/svcsock.h>
51 51 #include <linux/sunrpc/stats.h>
  52 +#include <linux/sunrpc/xprt.h>
52 53  
53 54 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
54 55  
55 56  
56 57  
57 58  
58 59  
59 60  
... ... @@ -153,49 +154,27 @@
153 154 }
154 155  
155 156 /*
156   - * Generic sendto routine
  157 + * send routine intended to be shared by the fore- and back-channel
157 158 */
158   -static int svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
  159 +int svc_send_common(struct socket *sock, struct xdr_buf *xdr,
  160 + struct page *headpage, unsigned long headoffset,
  161 + struct page *tailpage, unsigned long tailoffset)
159 162 {
160   - struct svc_sock *svsk =
161   - container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt);
162   - struct socket *sock = svsk->sk_sock;
163   - int slen;
164   - union {
165   - struct cmsghdr hdr;
166   - long all[SVC_PKTINFO_SPACE / sizeof(long)];
167   - } buffer;
168   - struct cmsghdr *cmh = &buffer.hdr;
169   - int len = 0;
170 163 int result;
171 164 int size;
172 165 struct page **ppage = xdr->pages;
173 166 size_t base = xdr->page_base;
174 167 unsigned int pglen = xdr->page_len;
175 168 unsigned int flags = MSG_MORE;
176   - RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]);
  169 + int slen;
  170 + int len = 0;
177 171  
178 172 slen = xdr->len;
179 173  
180   - if (rqstp->rq_prot == IPPROTO_UDP) {
181   - struct msghdr msg = {
182   - .msg_name = &rqstp->rq_addr,
183   - .msg_namelen = rqstp->rq_addrlen,
184   - .msg_control = cmh,
185   - .msg_controllen = sizeof(buffer),
186   - .msg_flags = MSG_MORE,
187   - };
188   -
189   - svc_set_cmsg_data(rqstp, cmh);
190   -
191   - if (sock_sendmsg(sock, &msg, 0) < 0)
192   - goto out;
193   - }
194   -
195 174 /* send head */
196 175 if (slen == xdr->head[0].iov_len)
197 176 flags = 0;
198   - len = kernel_sendpage(sock, rqstp->rq_respages[0], 0,
  177 + len = kernel_sendpage(sock, headpage, headoffset,
199 178 xdr->head[0].iov_len, flags);
200 179 if (len != xdr->head[0].iov_len)
201 180 goto out;
202 181  
203 182  
204 183  
... ... @@ -219,17 +198,59 @@
219 198 base = 0;
220 199 ppage++;
221 200 }
  201 +
222 202 /* send tail */
223 203 if (xdr->tail[0].iov_len) {
224   - result = kernel_sendpage(sock, rqstp->rq_respages[0],
225   - ((unsigned long)xdr->tail[0].iov_base)
226   - & (PAGE_SIZE-1),
227   - xdr->tail[0].iov_len, 0);
228   -
  204 + result = kernel_sendpage(sock, tailpage, tailoffset,
  205 + xdr->tail[0].iov_len, 0);
229 206 if (result > 0)
230 207 len += result;
231 208 }
  209 +
232 210 out:
  211 + return len;
  212 +}
  213 +
  214 +
  215 +/*
  216 + * Generic sendto routine
  217 + */
  218 +static int svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
  219 +{
  220 + struct svc_sock *svsk =
  221 + container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt);
  222 + struct socket *sock = svsk->sk_sock;
  223 + union {
  224 + struct cmsghdr hdr;
  225 + long all[SVC_PKTINFO_SPACE / sizeof(long)];
  226 + } buffer;
  227 + struct cmsghdr *cmh = &buffer.hdr;
  228 + int len = 0;
  229 + unsigned long tailoff;
  230 + unsigned long headoff;
  231 + RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]);
  232 +
  233 + if (rqstp->rq_prot == IPPROTO_UDP) {
  234 + struct msghdr msg = {
  235 + .msg_name = &rqstp->rq_addr,
  236 + .msg_namelen = rqstp->rq_addrlen,
  237 + .msg_control = cmh,
  238 + .msg_controllen = sizeof(buffer),
  239 + .msg_flags = MSG_MORE,
  240 + };
  241 +
  242 + svc_set_cmsg_data(rqstp, cmh);
  243 +
  244 + if (sock_sendmsg(sock, &msg, 0) < 0)
  245 + goto out;
  246 + }
  247 +
  248 + tailoff = ((unsigned long)xdr->tail[0].iov_base) & (PAGE_SIZE-1);
  249 + headoff = 0;
  250 + len = svc_send_common(sock, xdr, rqstp->rq_respages[0], headoff,
  251 + rqstp->rq_respages[0], tailoff);
  252 +
  253 +out:
233 254 dprintk("svc: socket %p sendto([%p %Zu... ], %d) = %d (addr %s)\n",
234 255 svsk, xdr->head[0].iov_base, xdr->head[0].iov_len,
235 256 xdr->len, len, svc_print_addr(rqstp, buf, sizeof(buf)));
... ... @@ -951,6 +972,57 @@
951 972 return -EAGAIN;
952 973 }
953 974  
  975 +static int svc_process_calldir(struct svc_sock *svsk, struct svc_rqst *rqstp,
  976 + struct rpc_rqst **reqpp, struct kvec *vec)
  977 +{
  978 + struct rpc_rqst *req = NULL;
  979 + u32 *p;
  980 + u32 xid;
  981 + u32 calldir;
  982 + int len;
  983 +
  984 + len = svc_recvfrom(rqstp, vec, 1, 8);
  985 + if (len < 0)
  986 + goto error;
  987 +
  988 + p = (u32 *)rqstp->rq_arg.head[0].iov_base;
  989 + xid = *p++;
  990 + calldir = *p;
  991 +
  992 + if (calldir == 0) {
  993 + /* REQUEST is the most common case */
  994 + vec[0] = rqstp->rq_arg.head[0];
  995 + } else {
  996 + /* REPLY */
  997 + if (svsk->sk_bc_xprt)
  998 + req = xprt_lookup_rqst(svsk->sk_bc_xprt, xid);
  999 +
  1000 + if (!req) {
  1001 + printk(KERN_NOTICE
  1002 + "%s: Got unrecognized reply: "
  1003 + "calldir 0x%x sk_bc_xprt %p xid %08x\n",
  1004 + __func__, ntohl(calldir),
  1005 + svsk->sk_bc_xprt, xid);
  1006 + vec[0] = rqstp->rq_arg.head[0];
  1007 + goto out;
  1008 + }
  1009 +
  1010 + memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
  1011 + sizeof(struct xdr_buf));
  1012 + /* copy the xid and call direction */
  1013 + memcpy(req->rq_private_buf.head[0].iov_base,
  1014 + rqstp->rq_arg.head[0].iov_base, 8);
  1015 + vec[0] = req->rq_private_buf.head[0];
  1016 + }
  1017 + out:
  1018 + vec[0].iov_base += 8;
  1019 + vec[0].iov_len -= 8;
  1020 + len = svsk->sk_reclen - 8;
  1021 + error:
  1022 + *reqpp = req;
  1023 + return len;
  1024 +}
  1025 +
954 1026 /*
955 1027 * Receive data from a TCP socket.
956 1028 */
... ... @@ -962,6 +1034,7 @@
962 1034 int len;
963 1035 struct kvec *vec;
964 1036 int pnum, vlen;
  1037 + struct rpc_rqst *req = NULL;
965 1038  
966 1039 dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
967 1040 svsk, test_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags),
968 1041  
... ... @@ -975,9 +1048,27 @@
975 1048 vec = rqstp->rq_vec;
976 1049 vec[0] = rqstp->rq_arg.head[0];
977 1050 vlen = PAGE_SIZE;
  1051 +
  1052 + /*
  1053 + * We have enough data for the whole tcp record. Let's try and read the
  1054 + * first 8 bytes to get the xid and the call direction. We can use this
  1055 + * to figure out if this is a call or a reply to a callback. If
  1056 + * sk_reclen is < 8 (xid and calldir), then this is a malformed packet.
  1057 + * In that case, don't bother with the calldir and just read the data.
  1058 + * It will be rejected in svc_process.
  1059 + */
  1060 + if (len >= 8) {
  1061 + len = svc_process_calldir(svsk, rqstp, &req, vec);
  1062 + if (len < 0)
  1063 + goto err_again;
  1064 + vlen -= 8;
  1065 + }
  1066 +
978 1067 pnum = 1;
979 1068 while (vlen < len) {
980   - vec[pnum].iov_base = page_address(rqstp->rq_pages[pnum]);
  1069 + vec[pnum].iov_base = (req) ?
  1070 + page_address(req->rq_private_buf.pages[pnum - 1]) :
  1071 + page_address(rqstp->rq_pages[pnum]);
981 1072 vec[pnum].iov_len = PAGE_SIZE;
982 1073 pnum++;
983 1074 vlen += PAGE_SIZE;
... ... @@ -989,6 +1080,16 @@
989 1080 if (len < 0)
990 1081 goto err_again;
991 1082  
  1083 + /*
  1084 + * Account for the 8 bytes we read earlier
  1085 + */
  1086 + len += 8;
  1087 +
  1088 + if (req) {
  1089 + xprt_complete_rqst(req->rq_task, len);
  1090 + len = 0;
  1091 + goto out;
  1092 + }
992 1093 dprintk("svc: TCP complete record (%d bytes)\n", len);
993 1094 rqstp->rq_arg.len = len;
994 1095 rqstp->rq_arg.page_base = 0;
... ... @@ -1002,6 +1103,7 @@
1002 1103 rqstp->rq_xprt_ctxt = NULL;
1003 1104 rqstp->rq_prot = IPPROTO_TCP;
1004 1105  
  1106 +out:
1005 1107 /* Reset TCP read info */
1006 1108 svsk->sk_reclen = 0;
1007 1109 svsk->sk_tcplen = 0;
... ... @@ -832,6 +832,11 @@
832 832 spin_unlock_bh(&xprt->transport_lock);
833 833 }
834 834  
  835 +static inline int xprt_has_timer(struct rpc_xprt *xprt)
  836 +{
  837 + return xprt->idle_timeout != 0;
  838 +}
  839 +
835 840 /**
836 841 * xprt_prepare_transmit - reserve the transport before sending a request
837 842 * @task: RPC task about to send a request
... ... @@ -1013,7 +1018,7 @@
1013 1018 if (!list_empty(&req->rq_list))
1014 1019 list_del(&req->rq_list);
1015 1020 xprt->last_used = jiffies;
1016   - if (list_empty(&xprt->recv))
  1021 + if (list_empty(&xprt->recv) && xprt_has_timer(xprt))
1017 1022 mod_timer(&xprt->timer,
1018 1023 xprt->last_used + xprt->idle_timeout);
1019 1024 spin_unlock_bh(&xprt->transport_lock);
... ... @@ -1082,8 +1087,11 @@
1082 1087 #endif /* CONFIG_NFS_V4_1 */
1083 1088  
1084 1089 INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
1085   - setup_timer(&xprt->timer, xprt_init_autodisconnect,
1086   - (unsigned long)xprt);
  1090 + if (xprt_has_timer(xprt))
  1091 + setup_timer(&xprt->timer, xprt_init_autodisconnect,
  1092 + (unsigned long)xprt);
  1093 + else
  1094 + init_timer(&xprt->timer);
1087 1095 xprt->last_used = jiffies;
1088 1096 xprt->cwnd = RPC_INITCWND;
1089 1097 xprt->bind_index = 0;
... ... @@ -1102,7 +1110,6 @@
1102 1110  
1103 1111 dprintk("RPC: created transport %p with %u slots\n", xprt,
1104 1112 xprt->max_reqs);
1105   -
1106 1113 return xprt;
1107 1114 }
1108 1115  
net/sunrpc/xprtsock.c
... ... @@ -32,6 +32,7 @@
32 32 #include <linux/tcp.h>
33 33 #include <linux/sunrpc/clnt.h>
34 34 #include <linux/sunrpc/sched.h>
  35 +#include <linux/sunrpc/svcsock.h>
35 36 #include <linux/sunrpc/xprtsock.h>
36 37 #include <linux/file.h>
37 38 #ifdef CONFIG_NFS_V4_1
... ... @@ -43,6 +44,7 @@
43 44 #include <net/udp.h>
44 45 #include <net/tcp.h>
45 46  
  47 +#include "sunrpc.h"
46 48 /*
47 49 * xprtsock tunables
48 50 */
... ... @@ -2098,6 +2100,134 @@
2098 2100 xprt->stat.bklog_u);
2099 2101 }
2100 2102  
  2103 +/*
  2104 + * Allocate a bunch of pages for a scratch buffer for the rpc code. The reason
  2105 + * we allocate pages instead doing a kmalloc like rpc_malloc is because we want
  2106 + * to use the server side send routines.
  2107 + */
  2108 +void *bc_malloc(struct rpc_task *task, size_t size)
  2109 +{
  2110 + struct page *page;
  2111 + struct rpc_buffer *buf;
  2112 +
  2113 + BUG_ON(size > PAGE_SIZE - sizeof(struct rpc_buffer));
  2114 + page = alloc_page(GFP_KERNEL);
  2115 +
  2116 + if (!page)
  2117 + return NULL;
  2118 +
  2119 + buf = page_address(page);
  2120 + buf->len = PAGE_SIZE;
  2121 +
  2122 + return buf->data;
  2123 +}
  2124 +
  2125 +/*
  2126 + * Free the space allocated in the bc_alloc routine
  2127 + */
  2128 +void bc_free(void *buffer)
  2129 +{
  2130 + struct rpc_buffer *buf;
  2131 +
  2132 + if (!buffer)
  2133 + return;
  2134 +
  2135 + buf = container_of(buffer, struct rpc_buffer, data);
  2136 + free_page((unsigned long)buf);
  2137 +}
  2138 +
  2139 +/*
  2140 + * Use the svc_sock to send the callback. Must be called with svsk->sk_mutex
  2141 + * held. Borrows heavily from svc_tcp_sendto and xs_tcp_send_request.
  2142 + */
  2143 +static int bc_sendto(struct rpc_rqst *req)
  2144 +{
  2145 + int len;
  2146 + struct xdr_buf *xbufp = &req->rq_snd_buf;
  2147 + struct rpc_xprt *xprt = req->rq_xprt;
  2148 + struct sock_xprt *transport =
  2149 + container_of(xprt, struct sock_xprt, xprt);
  2150 + struct socket *sock = transport->sock;
  2151 + unsigned long headoff;
  2152 + unsigned long tailoff;
  2153 +
  2154 + /*
  2155 + * Set up the rpc header and record marker stuff
  2156 + */
  2157 + xs_encode_tcp_record_marker(xbufp);
  2158 +
  2159 + tailoff = (unsigned long)xbufp->tail[0].iov_base & ~PAGE_MASK;
  2160 + headoff = (unsigned long)xbufp->head[0].iov_base & ~PAGE_MASK;
  2161 + len = svc_send_common(sock, xbufp,
  2162 + virt_to_page(xbufp->head[0].iov_base), headoff,
  2163 + xbufp->tail[0].iov_base, tailoff);
  2164 +
  2165 + if (len != xbufp->len) {
  2166 + printk(KERN_NOTICE "Error sending entire callback!\n");
  2167 + len = -EAGAIN;
  2168 + }
  2169 +
  2170 + return len;
  2171 +}
  2172 +
  2173 +/*
  2174 + * The send routine. Borrows from svc_send
  2175 + */
  2176 +static int bc_send_request(struct rpc_task *task)
  2177 +{
  2178 + struct rpc_rqst *req = task->tk_rqstp;
  2179 + struct svc_xprt *xprt;
  2180 + struct svc_sock *svsk;
  2181 + u32 len;
  2182 +
  2183 + dprintk("sending request with xid: %08x\n", ntohl(req->rq_xid));
  2184 + /*
  2185 + * Get the server socket associated with this callback xprt
  2186 + */
  2187 + xprt = req->rq_xprt->bc_xprt;
  2188 + svsk = container_of(xprt, struct svc_sock, sk_xprt);
  2189 +
  2190 + /*
  2191 + * Grab the mutex to serialize data as the connection is shared
  2192 + * with the fore channel
  2193 + */
  2194 + if (!mutex_trylock(&xprt->xpt_mutex)) {
  2195 + rpc_sleep_on(&xprt->xpt_bc_pending, task, NULL);
  2196 + if (!mutex_trylock(&xprt->xpt_mutex))
  2197 + return -EAGAIN;
  2198 + rpc_wake_up_queued_task(&xprt->xpt_bc_pending, task);
  2199 + }
  2200 + if (test_bit(XPT_DEAD, &xprt->xpt_flags))
  2201 + len = -ENOTCONN;
  2202 + else
  2203 + len = bc_sendto(req);
  2204 + mutex_unlock(&xprt->xpt_mutex);
  2205 +
  2206 + if (len > 0)
  2207 + len = 0;
  2208 +
  2209 + return len;
  2210 +}
  2211 +
  2212 +/*
  2213 + * The close routine. Since this is client initiated, we do nothing
  2214 + */
  2215 +
  2216 +static void bc_close(struct rpc_xprt *xprt)
  2217 +{
  2218 + return;
  2219 +}
  2220 +
  2221 +/*
  2222 + * The xprt destroy routine. Again, because this connection is client
  2223 + * initiated, we do nothing
  2224 + */
  2225 +
  2226 +static void bc_destroy(struct rpc_xprt *xprt)
  2227 +{
  2228 + return;
  2229 +}
  2230 +
2101 2231 static struct rpc_xprt_ops xs_udp_ops = {
2102 2232 .set_buffer_size = xs_udp_set_buffer_size,
2103 2233 .reserve_xprt = xprt_reserve_xprt_cong,
... ... @@ -2131,6 +2261,22 @@
2131 2261 #endif /* CONFIG_NFS_V4_1 */
2132 2262 .close = xs_tcp_close,
2133 2263 .destroy = xs_destroy,
  2264 + .print_stats = xs_tcp_print_stats,
  2265 +};
  2266 +
  2267 +/*
  2268 + * The rpc_xprt_ops for the server backchannel
  2269 + */
  2270 +
  2271 +static struct rpc_xprt_ops bc_tcp_ops = {
  2272 + .reserve_xprt = xprt_reserve_xprt,
  2273 + .release_xprt = xprt_release_xprt,
  2274 + .buf_alloc = bc_malloc,
  2275 + .buf_free = bc_free,
  2276 + .send_request = bc_send_request,
  2277 + .set_retrans_timeout = xprt_set_retrans_timeout_def,
  2278 + .close = bc_close,
  2279 + .destroy = bc_destroy,
2134 2280 .print_stats = xs_tcp_print_stats,
2135 2281 };
2136 2282