Blame view
net/caif/caif_socket.c
26.6 KB
e6f95ec8d
|
1 2 |
/* * Copyright (C) ST-Ericsson AB 2010 |
26ee65e68
|
3 |
* Author: Sjur Brendeland |
e6f95ec8d
|
4 5 |
* License terms: GNU General Public License (GPL) version 2 */ |
b31fa5bad
|
6 |
#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ |
e6f95ec8d
|
7 8 9 10 11 12 13 14 15 16 17 |
#include <linux/fs.h> #include <linux/init.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/mutex.h> #include <linux/list.h> #include <linux/wait.h> #include <linux/poll.h> #include <linux/tcp.h> #include <linux/uaccess.h> |
bece7b239
|
18 |
#include <linux/debugfs.h> |
e6f95ec8d
|
19 |
#include <linux/caif/caif_socket.h> |
447648128
|
20 |
#include <linux/pkt_sched.h> |
bece7b239
|
21 22 |
#include <net/sock.h> #include <net/tcp_states.h> |
e6f95ec8d
|
23 24 25 26 27 |
#include <net/caif/caif_layer.h> #include <net/caif/caif_dev.h> #include <net/caif/cfpkt.h> MODULE_LICENSE("GPL"); |
bece7b239
|
28 |
MODULE_ALIAS_NETPROTO(AF_CAIF); |
bece7b239
|
29 30 31 32 33 34 35 36 37 38 39 40 41 |
/* * CAIF state is re-using the TCP socket states. * caif_states stored in sk_state reflect the state as reported by * the CAIF stack, while sk_socket->state is the state of the socket. */ enum caif_states { CAIF_CONNECTED = TCP_ESTABLISHED, CAIF_CONNECTING = TCP_SYN_SENT, CAIF_DISCONNECTED = TCP_CLOSE }; #define TX_FLOW_ON_BIT 1 #define RX_FLOW_ON_BIT 2 |
e6f95ec8d
|
42 |
|
e6f95ec8d
|
43 |
struct caifsock { |
bece7b239
|
44 |
struct sock sk; /* must be first member */ |
e6f95ec8d
|
45 |
struct cflayer layer; |
e6f95ec8d
|
46 |
u32 flow_state; |
e6f95ec8d
|
47 |
struct caif_connect_request conn_req; |
bece7b239
|
48 |
struct mutex readlock; |
e6f95ec8d
|
49 |
struct dentry *debugfs_socket_dir; |
2aa40aef9
|
50 |
int headroom, tailroom, maxframe; |
e6f95ec8d
|
51 |
}; |
bece7b239
|
52 53 54 55 56 57 58 59 60 61 62 |
static int rx_flow_is_on(struct caifsock *cf_sk) { return test_bit(RX_FLOW_ON_BIT, (void *) &cf_sk->flow_state); } static int tx_flow_is_on(struct caifsock *cf_sk) { return test_bit(TX_FLOW_ON_BIT, (void *) &cf_sk->flow_state); } |
e6f95ec8d
|
63 |
|
bece7b239
|
64 |
static void set_rx_flow_off(struct caifsock *cf_sk) |
e6f95ec8d
|
65 |
{ |
bece7b239
|
66 67 68 |
clear_bit(RX_FLOW_ON_BIT, (void *) &cf_sk->flow_state); } |
e6f95ec8d
|
69 |
|
bece7b239
|
70 71 72 73 74 |
static void set_rx_flow_on(struct caifsock *cf_sk) { set_bit(RX_FLOW_ON_BIT, (void *) &cf_sk->flow_state); } |
e6f95ec8d
|
75 |
|
bece7b239
|
76 77 78 79 80 |
static void set_tx_flow_off(struct caifsock *cf_sk) { clear_bit(TX_FLOW_ON_BIT, (void *) &cf_sk->flow_state); } |
e6f95ec8d
|
81 |
|
bece7b239
|
82 83 84 85 86 |
static void set_tx_flow_on(struct caifsock *cf_sk) { set_bit(TX_FLOW_ON_BIT, (void *) &cf_sk->flow_state); } |
e6f95ec8d
|
87 |
|
bece7b239
|
88 89 90 91 92 93 |
static void caif_read_lock(struct sock *sk) { struct caifsock *cf_sk; cf_sk = container_of(sk, struct caifsock, sk); mutex_lock(&cf_sk->readlock); } |
e6f95ec8d
|
94 |
|
bece7b239
|
95 96 97 98 99 100 |
static void caif_read_unlock(struct sock *sk) { struct caifsock *cf_sk; cf_sk = container_of(sk, struct caifsock, sk); mutex_unlock(&cf_sk->readlock); } |
e6f95ec8d
|
101 |
|
a9a8f1070
|
102 |
static int sk_rcvbuf_lowwater(struct caifsock *cf_sk) |
bece7b239
|
103 104 105 106 |
{ /* A quarter of full buffer is used a low water mark */ return cf_sk->sk.sk_rcvbuf / 4; } |
e6f95ec8d
|
107 |
|
a9a8f1070
|
108 |
static void caif_flow_ctrl(struct sock *sk, int mode) |
bece7b239
|
109 110 111 |
{ struct caifsock *cf_sk; cf_sk = container_of(sk, struct caifsock, sk); |
ca6a09f25
|
112 |
if (cf_sk->layer.dn && cf_sk->layer.dn->modemcmd) |
bece7b239
|
113 114 |
cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, mode); } |
e6f95ec8d
|
115 |
|
bece7b239
|
116 117 118 119 |
/* * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are * not dropped, but CAIF is sending flow off instead. */ |
a9a8f1070
|
120 |
static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
bece7b239
|
121 122 |
{ int err; |
bece7b239
|
123 124 125 |
unsigned long flags; struct sk_buff_head *list = &sk->sk_receive_queue; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); |
e6f95ec8d
|
126 |
|
bece7b239
|
127 |
if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= |
95c961747
|
128 |
(unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) { |
e87cc4728
|
129 130 131 132 |
net_dbg_ratelimited("sending flow OFF (queue len = %d %d) ", atomic_read(&cf_sk->sk.sk_rmem_alloc), sk_rcvbuf_lowwater(cf_sk)); |
bece7b239
|
133 |
set_rx_flow_off(cf_sk); |
ca6a09f25
|
134 |
caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); |
bece7b239
|
135 |
} |
e6f95ec8d
|
136 |
|
bece7b239
|
137 138 139 |
err = sk_filter(sk, skb); if (err) return err; |
c76562b67
|
140 |
if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) { |
bece7b239
|
141 |
set_rx_flow_off(cf_sk); |
e87cc4728
|
142 143 |
net_dbg_ratelimited("sending flow OFF due to rmem_schedule "); |
ca6a09f25
|
144 |
caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); |
bece7b239
|
145 146 147 148 149 150 151 152 |
} skb->dev = NULL; skb_set_owner_r(skb, sk); /* Cache the SKB length before we tack it onto the receive * queue. Once it is added it no longer belongs to us and * may be freed by other threads of control pulling packets * from the queue. */ |
bece7b239
|
153 154 155 156 157 158 |
spin_lock_irqsave(&list->lock, flags); if (!sock_flag(sk, SOCK_DEAD)) __skb_queue_tail(list, skb); spin_unlock_irqrestore(&list->lock, flags); if (!sock_flag(sk, SOCK_DEAD)) |
676d23690
|
159 |
sk->sk_data_ready(sk); |
bece7b239
|
160 161 |
else kfree_skb(skb); |
e6f95ec8d
|
162 163 |
return 0; } |
bece7b239
|
164 165 |
/* Packet Receive Callback function called from CAIF Stack */ static int caif_sktrecv_cb(struct cflayer *layr, struct cfpkt *pkt) |
e6f95ec8d
|
166 167 |
{ struct caifsock *cf_sk; |
bece7b239
|
168 |
struct sk_buff *skb; |
e6f95ec8d
|
169 170 |
cf_sk = container_of(layr, struct caifsock, layer); |
bece7b239
|
171 172 173 |
skb = cfpkt_tonative(pkt); if (unlikely(cf_sk->sk.sk_state != CAIF_CONNECTED)) { |
3f874adc4
|
174 |
kfree_skb(skb); |
bece7b239
|
175 176 177 178 179 |
return 0; } caif_queue_rcv_skb(&cf_sk->sk, skb); return 0; } |
e6f95ec8d
|
180 |
|
b3ccfbe40
|
181 182 183 184 185 186 187 188 189 190 191 |
static void cfsk_hold(struct cflayer *layr) { struct caifsock *cf_sk = container_of(layr, struct caifsock, layer); sock_hold(&cf_sk->sk); } static void cfsk_put(struct cflayer *layr) { struct caifsock *cf_sk = container_of(layr, struct caifsock, layer); sock_put(&cf_sk->sk); } |
bece7b239
|
192 193 |
/* Packet Control Callback function called from CAIF */ static void caif_ctrl_cb(struct cflayer *layr, |
3bffc475f
|
194 195 |
enum caif_ctrlcmd flow, int phyid) |
bece7b239
|
196 197 |
{ struct caifsock *cf_sk = container_of(layr, struct caifsock, layer); |
e6f95ec8d
|
198 199 |
switch (flow) { case CAIF_CTRLCMD_FLOW_ON_IND: |
bece7b239
|
200 |
/* OK from modem to start sending again */ |
bece7b239
|
201 202 |
set_tx_flow_on(cf_sk); cf_sk->sk.sk_state_change(&cf_sk->sk); |
e6f95ec8d
|
203 204 205 |
break; case CAIF_CTRLCMD_FLOW_OFF_IND: |
bece7b239
|
206 |
/* Modem asks us to shut up */ |
bece7b239
|
207 208 |
set_tx_flow_off(cf_sk); cf_sk->sk.sk_state_change(&cf_sk->sk); |
e6f95ec8d
|
209 210 211 |
break; case CAIF_CTRLCMD_INIT_RSP: |
bece7b239
|
212 |
/* We're now connected */ |
b3ccfbe40
|
213 214 |
caif_client_register_refcnt(&cf_sk->layer, cfsk_hold, cfsk_put); |
bece7b239
|
215 216 |
cf_sk->sk.sk_state = CAIF_CONNECTED; set_tx_flow_on(cf_sk); |
eaa8c5f3c
|
217 |
cf_sk->sk.sk_shutdown = 0; |
bece7b239
|
218 |
cf_sk->sk.sk_state_change(&cf_sk->sk); |
e6f95ec8d
|
219 220 221 |
break; case CAIF_CTRLCMD_DEINIT_RSP: |
bece7b239
|
222 223 224 |
/* We're now disconnected */ cf_sk->sk.sk_state = CAIF_DISCONNECTED; cf_sk->sk.sk_state_change(&cf_sk->sk); |
e6f95ec8d
|
225 226 227 |
break; case CAIF_CTRLCMD_INIT_FAIL_RSP: |
bece7b239
|
228 |
/* Connect request failed */ |
bece7b239
|
229 230 231 232 233 234 235 236 237 |
cf_sk->sk.sk_err = ECONNREFUSED; cf_sk->sk.sk_state = CAIF_DISCONNECTED; cf_sk->sk.sk_shutdown = SHUTDOWN_MASK; /* * Socket "standards" seems to require POLLOUT to * be set at connect failure. */ set_tx_flow_on(cf_sk); cf_sk->sk.sk_state_change(&cf_sk->sk); |
e6f95ec8d
|
238 239 240 |
break; case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND: |
bece7b239
|
241 |
/* Modem has closed this connection, or device is down. */ |
bece7b239
|
242 243 244 245 |
cf_sk->sk.sk_shutdown = SHUTDOWN_MASK; cf_sk->sk.sk_err = ECONNRESET; set_rx_flow_on(cf_sk); cf_sk->sk.sk_error_report(&cf_sk->sk); |
e6f95ec8d
|
246 247 248 |
break; default: |
b31fa5bad
|
249 250 |
pr_debug("Unexpected flow command %d ", flow); |
e6f95ec8d
|
251 252 |
} } |
bece7b239
|
253 |
static void caif_check_flow_release(struct sock *sk) |
e6f95ec8d
|
254 |
{ |
bece7b239
|
255 |
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); |
e6f95ec8d
|
256 |
|
bece7b239
|
257 258 |
if (rx_flow_is_on(cf_sk)) return; |
e6f95ec8d
|
259 |
|
bece7b239
|
260 |
if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) { |
bece7b239
|
261 |
set_rx_flow_on(cf_sk); |
ca6a09f25
|
262 |
caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ); |
bece7b239
|
263 264 |
} } |
dcda138d2
|
265 |
|
bece7b239
|
266 |
/* |
dcda138d2
|
267 268 |
* Copied from unix_dgram_recvmsg, but removed credit checks, * changed locking, address handling and added MSG_TRUNC. |
bece7b239
|
269 |
*/ |
bece7b239
|
270 |
static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, |
3bffc475f
|
271 |
struct msghdr *m, size_t len, int flags) |
e6f95ec8d
|
272 273 274 |
{ struct sock *sk = sock->sk; |
e6f95ec8d
|
275 |
struct sk_buff *skb; |
dcda138d2
|
276 277 |
int ret; int copylen; |
e6f95ec8d
|
278 |
|
dcda138d2
|
279 280 281 |
ret = -EOPNOTSUPP; if (m->msg_flags&MSG_OOB) goto read_error; |
e6f95ec8d
|
282 |
|
bece7b239
|
283 284 |
skb = skb_recv_datagram(sk, flags, 0 , &ret); if (!skb) |
e6f95ec8d
|
285 |
goto read_error; |
dcda138d2
|
286 287 288 289 |
copylen = skb->len; if (len < copylen) { m->msg_flags |= MSG_TRUNC; copylen = len; |
e6f95ec8d
|
290 |
} |
dcda138d2
|
291 |
ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, copylen); |
bece7b239
|
292 |
if (ret) |
dcda138d2
|
293 |
goto out_free; |
e6f95ec8d
|
294 |
|
dcda138d2
|
295 296 |
ret = (flags & MSG_TRUNC) ? skb->len : copylen; out_free: |
bece7b239
|
297 |
skb_free_datagram(sk, skb); |
bece7b239
|
298 |
caif_check_flow_release(sk); |
dcda138d2
|
299 |
return ret; |
e6f95ec8d
|
300 |
|
bece7b239
|
301 302 303 |
read_error: return ret; } |
e6f95ec8d
|
304 |
|
e6f95ec8d
|
305 |
|
bece7b239
|
306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 |
/* Copied from unix_stream_wait_data, identical except for lock call. */ static long caif_stream_data_wait(struct sock *sk, long timeo) { DEFINE_WAIT(wait); lock_sock(sk); for (;;) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (!skb_queue_empty(&sk->sk_receive_queue) || sk->sk_err || sk->sk_state != CAIF_CONNECTED || sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN) || signal_pending(current) || !timeo) break; |
e6f95ec8d
|
323 |
|
bece7b239
|
324 325 326 327 328 |
set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); |
e6f95ec8d
|
329 |
} |
bece7b239
|
330 331 332 333 |
finish_wait(sk_sleep(sk), &wait); release_sock(sk); return timeo; } |
e6f95ec8d
|
334 |
|
bece7b239
|
335 336 337 338 339 |
/* * Copied from unix_stream_recvmsg, but removed credit checks, * changed locking calls, changed address handling. */ static int caif_stream_recvmsg(struct kiocb *iocb, struct socket *sock, |
3bffc475f
|
340 341 |
struct msghdr *msg, size_t size, int flags) |
bece7b239
|
342 343 344 345 346 347 |
{ struct sock *sk = sock->sk; int copied = 0; int target; int err = 0; long timeo; |
e6f95ec8d
|
348 |
|
bece7b239
|
349 350 351 |
err = -EOPNOTSUPP; if (flags&MSG_OOB) goto out; |
e6f95ec8d
|
352 |
|
bece7b239
|
353 354 355 356 357 358 359 |
/* * Lock the socket to prevent queue disordering * while sleeps in memcpy_tomsg */ err = -EAGAIN; if (sk->sk_state == CAIF_CONNECTING) goto out; |
e6f95ec8d
|
360 |
|
bece7b239
|
361 362 363 |
caif_read_lock(sk); target = sock_rcvlowat(sk, flags&MSG_WAITALL, size); timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT); |
e6f95ec8d
|
364 |
|
bece7b239
|
365 366 367 |
do { int chunk; struct sk_buff *skb; |
e6f95ec8d
|
368 |
|
bece7b239
|
369 370 371 |
lock_sock(sk); skb = skb_dequeue(&sk->sk_receive_queue); caif_check_flow_release(sk); |
e6f95ec8d
|
372 |
|
bece7b239
|
373 374 375 376 377 378 379 380 381 382 383 384 |
if (skb == NULL) { if (copied >= target) goto unlock; /* * POSIX 1003.1g mandates this order. */ err = sock_error(sk); if (err) goto unlock; err = -ECONNRESET; if (sk->sk_shutdown & RCV_SHUTDOWN) goto unlock; |
e6f95ec8d
|
385 |
|
bece7b239
|
386 387 388 389 390 |
err = -EPIPE; if (sk->sk_state != CAIF_CONNECTED) goto unlock; if (sock_flag(sk, SOCK_DEAD)) goto unlock; |
e6f95ec8d
|
391 |
|
bece7b239
|
392 |
release_sock(sk); |
e6f95ec8d
|
393 |
|
bece7b239
|
394 395 396 |
err = -EAGAIN; if (!timeo) break; |
e6f95ec8d
|
397 |
|
bece7b239
|
398 |
caif_read_unlock(sk); |
e6f95ec8d
|
399 |
|
bece7b239
|
400 |
timeo = caif_stream_data_wait(sk, timeo); |
e6f95ec8d
|
401 |
|
bece7b239
|
402 403 404 405 406 407 408 409 410 |
if (signal_pending(current)) { err = sock_intr_errno(timeo); goto out; } caif_read_lock(sk); continue; unlock: release_sock(sk); break; |
e6f95ec8d
|
411 |
} |
bece7b239
|
412 413 414 415 416 417 418 419 420 421 |
release_sock(sk); chunk = min_t(unsigned int, skb->len, size); if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) { skb_queue_head(&sk->sk_receive_queue, skb); if (copied == 0) copied = -EFAULT; break; } copied += chunk; size -= chunk; |
e6f95ec8d
|
422 |
|
bece7b239
|
423 424 425 |
/* Mark read part of skb as used */ if (!(flags & MSG_PEEK)) { skb_pull(skb, chunk); |
e6f95ec8d
|
426 |
|
bece7b239
|
427 428 429 430 431 432 |
/* put the skb back if we didn't use it up. */ if (skb->len) { skb_queue_head(&sk->sk_receive_queue, skb); break; } kfree_skb(skb); |
e6f95ec8d
|
433 |
|
e6f95ec8d
|
434 |
} else { |
bece7b239
|
435 436 437 438 439 440 |
/* * It is questionable, see note in unix_dgram_recvmsg. */ /* put message back and return */ skb_queue_head(&sk->sk_receive_queue, skb); break; |
e6f95ec8d
|
441 |
} |
bece7b239
|
442 443 |
} while (size); caif_read_unlock(sk); |
e6f95ec8d
|
444 |
|
bece7b239
|
445 446 447 |
out: return copied ? : err; } |
e6f95ec8d
|
448 |
|
bece7b239
|
449 450 451 452 453 |
/* * Copied from sock.c:sock_wait_for_wmem, but change to wait for * CAIF flow-on and sock_writable. */ static long caif_wait_for_flow_on(struct caifsock *cf_sk, |
3bffc475f
|
454 |
int wait_writeable, long timeo, int *err) |
bece7b239
|
455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 |
{ struct sock *sk = &cf_sk->sk; DEFINE_WAIT(wait); for (;;) { *err = 0; if (tx_flow_is_on(cf_sk) && (!wait_writeable || sock_writeable(&cf_sk->sk))) break; *err = -ETIMEDOUT; if (!timeo) break; *err = -ERESTARTSYS; if (signal_pending(current)) break; prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); *err = -ECONNRESET; if (sk->sk_shutdown & SHUTDOWN_MASK) break; *err = -sk->sk_err; if (sk->sk_err) break; *err = -EPIPE; if (cf_sk->sk.sk_state != CAIF_CONNECTED) break; timeo = schedule_timeout(timeo); |
e6f95ec8d
|
480 |
} |
bece7b239
|
481 482 483 |
finish_wait(sk_sleep(sk), &wait); return timeo; } |
e6f95ec8d
|
484 |
|
bece7b239
|
485 486 487 488 489 490 491 492 |
/* * Transmit a SKB. The device may temporarily request re-transmission * by returning EAGAIN. */ static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk, int noblock, long timeo) { struct cfpkt *pkt; |
e6f95ec8d
|
493 |
|
bece7b239
|
494 |
pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb); |
3f874adc4
|
495 |
memset(skb->cb, 0, sizeof(struct caif_payload_info)); |
447648128
|
496 |
cfpkt_set_prio(pkt, cf_sk->sk.sk_priority); |
e6f95ec8d
|
497 |
|
ba7605745
|
498 499 |
if (cf_sk->layer.dn == NULL) { kfree_skb(skb); |
4dd820c08
|
500 |
return -EINVAL; |
ba7605745
|
501 |
} |
e6f95ec8d
|
502 |
|
4dd820c08
|
503 |
return cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt); |
bece7b239
|
504 |
} |
e6f95ec8d
|
505 |
|
bece7b239
|
506 507 |
/* Copied from af_unix:unix_dgram_sendmsg, and adapted to CAIF */ static int caif_seqpkt_sendmsg(struct kiocb *kiocb, struct socket *sock, |
3bffc475f
|
508 |
struct msghdr *msg, size_t len) |
bece7b239
|
509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 |
{ struct sock *sk = sock->sk; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); int buffer_size; int ret = 0; struct sk_buff *skb = NULL; int noblock; long timeo; caif_assert(cf_sk); ret = sock_error(sk); if (ret) goto err; ret = -EOPNOTSUPP; if (msg->msg_flags&MSG_OOB) goto err; ret = -EOPNOTSUPP; if (msg->msg_namelen) goto err; ret = -EINVAL; if (unlikely(msg->msg_iov->iov_base == NULL)) goto err; noblock = msg->msg_flags & MSG_DONTWAIT; |
bece7b239
|
534 535 536 |
timeo = sock_sndtimeo(sk, noblock); timeo = caif_wait_for_flow_on(container_of(sk, struct caifsock, sk), 1, timeo, &ret); |
2aa40aef9
|
537 538 |
if (ret) goto err; |
bece7b239
|
539 540 541 542 543 |
ret = -EPIPE; if (cf_sk->sk.sk_state != CAIF_CONNECTED || sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN)) goto err; |
2aa40aef9
|
544 545 546 547 548 549 |
/* Error if trying to write more than maximum frame size. */ ret = -EMSGSIZE; if (len > cf_sk->maxframe && cf_sk->sk.sk_protocol != CAIFPROTO_RFM) goto err; buffer_size = len + cf_sk->headroom + cf_sk->tailroom; |
bece7b239
|
550 551 |
ret = -ENOMEM; skb = sock_alloc_send_skb(sk, buffer_size, noblock, &ret); |
2aa40aef9
|
552 553 |
if (!skb || skb_tailroom(skb) < buffer_size) |
bece7b239
|
554 |
goto err; |
2aa40aef9
|
555 556 |
skb_reserve(skb, cf_sk->headroom); |
bece7b239
|
557 558 559 560 561 562 563 |
ret = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); if (ret) goto err; ret = transmit_skb(skb, cf_sk, noblock, timeo); if (ret < 0) |
c85c2951d
|
564 565 |
/* skb is already freed */ return ret; |
bece7b239
|
566 567 568 569 570 |
return len; err: kfree_skb(skb); return ret; } |
e6f95ec8d
|
571 |
|
bece7b239
|
572 573 574 575 576 577 |
/* * Copied from unix_stream_sendmsg and adapted to CAIF: * Changed removed permission handling and added waiting for flow on * and other minor adaptations. */ static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, |
3bffc475f
|
578 |
struct msghdr *msg, size_t len) |
bece7b239
|
579 580 581 582 583 584 585 |
{ struct sock *sk = sock->sk; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); int err, size; struct sk_buff *skb; int sent = 0; long timeo; |
e6f95ec8d
|
586 |
|
bece7b239
|
587 |
err = -EOPNOTSUPP; |
bece7b239
|
588 589 |
if (unlikely(msg->msg_flags&MSG_OOB)) goto out_err; |
e6f95ec8d
|
590 |
|
bece7b239
|
591 592 |
if (unlikely(msg->msg_namelen)) goto out_err; |
e6f95ec8d
|
593 |
|
bece7b239
|
594 595 |
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); timeo = caif_wait_for_flow_on(cf_sk, 1, timeo, &err); |
e6f95ec8d
|
596 |
|
bece7b239
|
597 598 |
if (unlikely(sk->sk_shutdown & SEND_SHUTDOWN)) goto pipe_err; |
e6f95ec8d
|
599 |
|
bece7b239
|
600 |
while (sent < len) { |
e6f95ec8d
|
601 |
|
bece7b239
|
602 |
size = len-sent; |
e6f95ec8d
|
603 |
|
2aa40aef9
|
604 605 |
if (size > cf_sk->maxframe) size = cf_sk->maxframe; |
e6f95ec8d
|
606 |
|
bece7b239
|
607 608 609 |
/* If size is more than half of sndbuf, chop up message */ if (size > ((sk->sk_sndbuf >> 1) - 64)) size = (sk->sk_sndbuf >> 1) - 64; |
e6f95ec8d
|
610 |
|
bece7b239
|
611 612 |
if (size > SKB_MAX_ALLOC) size = SKB_MAX_ALLOC; |
e6f95ec8d
|
613 |
|
bece7b239
|
614 |
skb = sock_alloc_send_skb(sk, |
2aa40aef9
|
615 616 |
size + cf_sk->headroom + cf_sk->tailroom, |
bece7b239
|
617 618 619 620 |
msg->msg_flags&MSG_DONTWAIT, &err); if (skb == NULL) goto out_err; |
e6f95ec8d
|
621 |
|
2aa40aef9
|
622 |
skb_reserve(skb, cf_sk->headroom); |
bece7b239
|
623 624 625 626 627 628 629 630 |
/* * If you pass two values to the sock_alloc_send_skb * it tries to grab the large buffer with GFP_NOFS * (which can fail easily), and if it fails grab the * fallback size buffer which is under a page and will * succeed. [Alan] */ size = min_t(int, size, skb_tailroom(skb)); |
e6f95ec8d
|
631 |
|
bece7b239
|
632 633 634 635 636 637 638 |
err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); if (err) { kfree_skb(skb); goto out_err; } err = transmit_skb(skb, cf_sk, msg->msg_flags&MSG_DONTWAIT, timeo); |
ba7605745
|
639 640 |
if (err < 0) /* skb is already freed */ |
bece7b239
|
641 |
goto pipe_err; |
ba7605745
|
642 |
|
bece7b239
|
643 |
sent += size; |
e6f95ec8d
|
644 |
} |
e6f95ec8d
|
645 |
|
bece7b239
|
646 |
return sent; |
e6f95ec8d
|
647 |
|
bece7b239
|
648 649 650 651 652 653 |
pipe_err: if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL)) send_sig(SIGPIPE, current, 0); err = -EPIPE; out_err: return sent ? : err; |
e6f95ec8d
|
654 655 656 |
} static int setsockopt(struct socket *sock, |
3bffc475f
|
657 |
int lvl, int opt, char __user *ov, unsigned int ol) |
e6f95ec8d
|
658 659 660 |
{ struct sock *sk = sock->sk; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); |
f2527ec43
|
661 |
int linksel; |
e6f95ec8d
|
662 |
|
bece7b239
|
663 |
if (cf_sk->sk.sk_socket->state != SS_UNCONNECTED) |
e6f95ec8d
|
664 |
return -ENOPROTOOPT; |
bece7b239
|
665 |
|
e6f95ec8d
|
666 667 |
switch (opt) { case CAIFSO_LINK_SELECT: |
bece7b239
|
668 |
if (ol < sizeof(int)) |
e6f95ec8d
|
669 |
return -EINVAL; |
e6f95ec8d
|
670 671 672 673 674 675 676 677 |
if (lvl != SOL_CAIF) goto bad_sol; if (copy_from_user(&linksel, ov, sizeof(int))) return -EINVAL; lock_sock(&(cf_sk->sk)); cf_sk->conn_req.link_selector = linksel; release_sock(&cf_sk->sk); return 0; |
e6f95ec8d
|
678 679 680 681 682 |
case CAIFSO_REQ_PARAM: if (lvl != SOL_CAIF) goto bad_sol; if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL) return -ENOPROTOOPT; |
e6f95ec8d
|
683 |
lock_sock(&(cf_sk->sk)); |
bece7b239
|
684 685 |
if (ol > sizeof(cf_sk->conn_req.param.data) || copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) { |
e6f95ec8d
|
686 |
release_sock(&cf_sk->sk); |
e6f95ec8d
|
687 688 |
return -EINVAL; } |
91b5c98c2
|
689 |
cf_sk->conn_req.param.size = ol; |
e6f95ec8d
|
690 691 692 693 |
release_sock(&cf_sk->sk); return 0; default: |
bece7b239
|
694 |
return -ENOPROTOOPT; |
e6f95ec8d
|
695 696 697 698 |
} return 0; bad_sol: |
e6f95ec8d
|
699 700 701 |
return -ENOPROTOOPT; } |
bece7b239
|
702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 |
/* * caif_connect() - Connect a CAIF Socket * Copied and modified af_irda.c:irda_connect(). * * Note : by consulting "errno", the user space caller may learn the cause * of the failure. Most of them are visible in the function, others may come * from subroutines called and are listed here : * o -EAFNOSUPPORT: bad socket family or type. * o -ESOCKTNOSUPPORT: bad socket type or protocol * o -EINVAL: bad socket address, or CAIF link type * o -ECONNREFUSED: remote end refused the connection. * o -EINPROGRESS: connect request sent but timed out (or non-blocking) * o -EISCONN: already connected. * o -ETIMEDOUT: Connection timed out (send timeout) * o -ENODEV: No link layer to send request * o -ECONNRESET: Received Shutdown indication or lost link layer * o -ENOMEM: Out of memory * * State Strategy: * o sk_state: holds the CAIF_* protocol state, it's updated by * caif_ctrl_cb. * o sock->state: holds the SS_* socket state and is updated by connect and * disconnect. */ static int caif_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) |
e6f95ec8d
|
728 |
{ |
e6f95ec8d
|
729 |
struct sock *sk = sock->sk; |
bece7b239
|
730 731 732 |
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); long timeo; int err; |
2aa40aef9
|
733 |
int ifindex, headroom, tailroom; |
79315068f
|
734 |
unsigned int mtu; |
2aa40aef9
|
735 |
struct net_device *dev; |
bece7b239
|
736 |
lock_sock(sk); |
e6f95ec8d
|
737 |
|
bece7b239
|
738 739 |
err = -EAFNOSUPPORT; if (uaddr->sa_family != AF_CAIF) |
e6f95ec8d
|
740 |
goto out; |
bece7b239
|
741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 |
switch (sock->state) { case SS_UNCONNECTED: /* Normal case, a fresh connect */ caif_assert(sk->sk_state == CAIF_DISCONNECTED); break; case SS_CONNECTING: switch (sk->sk_state) { case CAIF_CONNECTED: sock->state = SS_CONNECTED; err = -EISCONN; goto out; case CAIF_DISCONNECTED: /* Reconnect allowed */ break; case CAIF_CONNECTING: err = -EALREADY; if (flags & O_NONBLOCK) goto out; goto wait_connect; } break; case SS_CONNECTED: caif_assert(sk->sk_state == CAIF_CONNECTED || sk->sk_state == CAIF_DISCONNECTED); if (sk->sk_shutdown & SHUTDOWN_MASK) { /* Allow re-connect after SHUTDOWN_IND */ |
bee925db9
|
767 |
caif_disconnect_client(sock_net(sk), &cf_sk->layer); |
54e90fb5c
|
768 |
caif_free_client(&cf_sk->layer); |
bece7b239
|
769 770 771 772 773 774 775 776 777 |
break; } /* No reconnect on a seqpacket socket */ err = -EISCONN; goto out; case SS_DISCONNECTING: case SS_FREE: caif_assert(1); /*Should never happen */ break; |
e6f95ec8d
|
778 |
} |
bece7b239
|
779 780 781 |
sk->sk_state = CAIF_DISCONNECTED; sock->state = SS_UNCONNECTED; sk_stream_kill_queues(&cf_sk->sk); |
e6f95ec8d
|
782 |
|
bece7b239
|
783 |
err = -EINVAL; |
f5d72af9f
|
784 |
if (addr_len != sizeof(struct sockaddr_caif)) |
bece7b239
|
785 |
goto out; |
e6f95ec8d
|
786 |
|
bece7b239
|
787 |
memcpy(&cf_sk->conn_req.sockaddr, uaddr, |
e6f95ec8d
|
788 |
sizeof(struct sockaddr_caif)); |
bece7b239
|
789 790 791 |
/* Move to connecting socket, start sending Connect Requests */ sock->state = SS_CONNECTING; sk->sk_state = CAIF_CONNECTING; |
33b2f5598
|
792 |
/* Check priority value comming from socket */ |
f2527ec43
|
793 794 795 796 797 798 799 800 801 802 |
/* if priority value is out of range it will be ajusted */ if (cf_sk->sk.sk_priority > CAIF_PRIO_MAX) cf_sk->conn_req.priority = CAIF_PRIO_MAX; else if (cf_sk->sk.sk_priority < CAIF_PRIO_MIN) cf_sk->conn_req.priority = CAIF_PRIO_MIN; else cf_sk->conn_req.priority = cf_sk->sk.sk_priority; /*ifindex = id of the interface.*/ cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if; |
bece7b239
|
803 |
cf_sk->layer.receive = caif_sktrecv_cb; |
b3ccfbe40
|
804 |
|
bee925db9
|
805 |
err = caif_connect_client(sock_net(sk), &cf_sk->conn_req, |
2aa40aef9
|
806 |
&cf_sk->layer, &ifindex, &headroom, &tailroom); |
b3ccfbe40
|
807 |
|
bece7b239
|
808 809 810 811 |
if (err < 0) { cf_sk->sk.sk_socket->state = SS_UNCONNECTED; cf_sk->sk.sk_state = CAIF_DISCONNECTED; goto out; |
e6f95ec8d
|
812 |
} |
79315068f
|
813 814 815 816 817 818 819 820 |
err = -ENODEV; rcu_read_lock(); dev = dev_get_by_index_rcu(sock_net(sk), ifindex); if (!dev) { rcu_read_unlock(); goto out; } |
2aa40aef9
|
821 |
cf_sk->headroom = LL_RESERVED_SPACE_EXTRA(dev, headroom); |
79315068f
|
822 823 |
mtu = dev->mtu; rcu_read_unlock(); |
2aa40aef9
|
824 |
cf_sk->tailroom = tailroom; |
79315068f
|
825 |
cf_sk->maxframe = mtu - (headroom + tailroom); |
2aa40aef9
|
826 |
if (cf_sk->maxframe < 1) { |
b31fa5bad
|
827 828 |
pr_warn("CAIF Interface MTU too small (%d) ", dev->mtu); |
f2527ec43
|
829 |
err = -ENODEV; |
2aa40aef9
|
830 831 |
goto out; } |
e6f95ec8d
|
832 |
|
bece7b239
|
833 834 |
err = -EINPROGRESS; wait_connect: |
e6f95ec8d
|
835 |
|
bece7b239
|
836 837 |
if (sk->sk_state != CAIF_CONNECTED && (flags & O_NONBLOCK)) goto out; |
e6f95ec8d
|
838 |
|
bece7b239
|
839 840 841 |
timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); release_sock(sk); |
9e4b816bc
|
842 843 |
err = -ERESTARTSYS; timeo = wait_event_interruptible_timeout(*sk_sleep(sk), |
bece7b239
|
844 845 846 |
sk->sk_state != CAIF_CONNECTING, timeo); lock_sock(sk); |
9e4b816bc
|
847 |
if (timeo < 0) |
bece7b239
|
848 |
goto out; /* -ERESTARTSYS */ |
e6f95ec8d
|
849 |
|
9e4b816bc
|
850 851 852 |
err = -ETIMEDOUT; if (timeo == 0 && sk->sk_state != CAIF_CONNECTED) goto out; |
bece7b239
|
853 854 855 856 857 858 859 860 861 |
if (sk->sk_state != CAIF_CONNECTED) { sock->state = SS_UNCONNECTED; err = sock_error(sk); if (!err) err = -ECONNREFUSED; goto out; } sock->state = SS_CONNECTED; err = 0; |
e6f95ec8d
|
862 |
out: |
bece7b239
|
863 864 |
release_sock(sk); return err; |
e6f95ec8d
|
865 |
} |
bece7b239
|
866 867 868 869 870 |
/* * caif_release() - Disconnect a CAIF Socket * Copied and modified af_irda.c:irda_release(). */ static int caif_release(struct socket *sock) |
e6f95ec8d
|
871 |
{ |
e6f95ec8d
|
872 |
struct sock *sk = sock->sk; |
bece7b239
|
873 |
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); |
e6f95ec8d
|
874 |
|
bece7b239
|
875 876 |
if (!sk) return 0; |
e6f95ec8d
|
877 |
|
bece7b239
|
878 |
set_tx_flow_off(cf_sk); |
e6f95ec8d
|
879 880 |
/* |
bece7b239
|
881 882 883 |
* Ensure that packets are not queued after this point in time. * caif_queue_rcv_skb checks SOCK_DEAD holding the queue lock, * this ensures no packets when sock is dead. |
e6f95ec8d
|
884 |
*/ |
c85c2951d
|
885 |
spin_lock_bh(&sk->sk_receive_queue.lock); |
bece7b239
|
886 |
sock_set_flag(sk, SOCK_DEAD); |
c85c2951d
|
887 |
spin_unlock_bh(&sk->sk_receive_queue.lock); |
bece7b239
|
888 |
sock->sk = NULL; |
e6f95ec8d
|
889 |
|
33b2f5598
|
890 |
WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir)); |
e6f95ec8d
|
891 892 |
if (cf_sk->debugfs_socket_dir != NULL) debugfs_remove_recursive(cf_sk->debugfs_socket_dir); |
e6f95ec8d
|
893 |
lock_sock(&(cf_sk->sk)); |
bece7b239
|
894 895 |
sk->sk_state = CAIF_DISCONNECTED; sk->sk_shutdown = SHUTDOWN_MASK; |
e6f95ec8d
|
896 |
|
54e90fb5c
|
897 |
caif_disconnect_client(sock_net(sk), &cf_sk->layer); |
bece7b239
|
898 899 |
cf_sk->sk.sk_socket->state = SS_DISCONNECTING; wake_up_interruptible_poll(sk_sleep(sk), POLLERR|POLLHUP); |
e6f95ec8d
|
900 |
|
bece7b239
|
901 |
sock_orphan(sk); |
bece7b239
|
902 903 904 |
sk_stream_kill_queues(&cf_sk->sk); release_sock(sk); sock_put(sk); |
54e90fb5c
|
905 |
return 0; |
bece7b239
|
906 |
} |
e6f95ec8d
|
907 |
|
bece7b239
|
908 909 |
/* Copied from af_unix.c:unix_poll(), added CAIF tx_flow handling */ static unsigned int caif_poll(struct file *file, |
3bffc475f
|
910 |
struct socket *sock, poll_table *wait) |
bece7b239
|
911 912 913 914 |
{ struct sock *sk = sock->sk; unsigned int mask; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); |
e6f95ec8d
|
915 |
|
bece7b239
|
916 917 |
sock_poll_wait(file, sk_sleep(sk), wait); mask = 0; |
e6f95ec8d
|
918 |
|
bece7b239
|
919 920 921 922 923 924 925 |
/* exceptional events? */ if (sk->sk_err) mask |= POLLERR; if (sk->sk_shutdown == SHUTDOWN_MASK) mask |= POLLHUP; if (sk->sk_shutdown & RCV_SHUTDOWN) mask |= POLLRDHUP; |
e6f95ec8d
|
926 |
|
bece7b239
|
927 928 929 930 |
/* readable? */ if (!skb_queue_empty(&sk->sk_receive_queue) || (sk->sk_shutdown & RCV_SHUTDOWN)) mask |= POLLIN | POLLRDNORM; |
e6f95ec8d
|
931 |
|
e6f95ec8d
|
932 |
/* |
bece7b239
|
933 934 |
* we set writable also when the other side has shut down the * connection. This prevents stuck sockets. |
e6f95ec8d
|
935 |
*/ |
bece7b239
|
936 937 938 939 |
if (sock_writeable(sk) && tx_flow_is_on(cf_sk)) mask |= POLLOUT | POLLWRNORM | POLLWRBAND; return mask; |
e6f95ec8d
|
940 |
} |
bece7b239
|
941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 |
static const struct proto_ops caif_seqpacket_ops = { .family = PF_CAIF, .owner = THIS_MODULE, .release = caif_release, .bind = sock_no_bind, .connect = caif_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, .poll = caif_poll, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = setsockopt, .getsockopt = sock_no_getsockopt, .sendmsg = caif_seqpkt_sendmsg, .recvmsg = caif_seqpkt_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; static const struct proto_ops caif_stream_ops = { |
e6f95ec8d
|
963 964 965 966 967 968 969 970 971 972 973 |
.family = PF_CAIF, .owner = THIS_MODULE, .release = caif_release, .bind = sock_no_bind, .connect = caif_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, .poll = caif_poll, .ioctl = sock_no_ioctl, .listen = sock_no_listen, |
bece7b239
|
974 |
.shutdown = sock_no_shutdown, |
e6f95ec8d
|
975 976 |
.setsockopt = setsockopt, .getsockopt = sock_no_getsockopt, |
bece7b239
|
977 978 |
.sendmsg = caif_stream_sendmsg, .recvmsg = caif_stream_recvmsg, |
e6f95ec8d
|
979 |
.mmap = sock_no_mmap, |
bece7b239
|
980 |
.sendpage = sock_no_sendpage, |
e6f95ec8d
|
981 982 983 984 985 |
}; /* This function is called when a socket is finally destroyed. */ static void caif_sock_destructor(struct sock *sk) { |
bece7b239
|
986 |
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); |
e6f95ec8d
|
987 988 989 990 |
caif_assert(!atomic_read(&sk->sk_wmem_alloc)); caif_assert(sk_unhashed(sk)); caif_assert(!sk->sk_socket); if (!sock_flag(sk, SOCK_DEAD)) { |
33b2f5598
|
991 992 |
pr_debug("Attempt to release alive CAIF socket: %p ", sk); |
e6f95ec8d
|
993 994 |
return; } |
bece7b239
|
995 |
sk_stream_kill_queues(&cf_sk->sk); |
b3ccfbe40
|
996 |
caif_free_client(&cf_sk->layer); |
e6f95ec8d
|
997 998 999 |
} static int caif_create(struct net *net, struct socket *sock, int protocol, |
3bffc475f
|
1000 |
int kern) |
e6f95ec8d
|
1001 1002 1003 |
{ struct sock *sk = NULL; struct caifsock *cf_sk = NULL; |
e6f95ec8d
|
1004 1005 1006 1007 |
static struct proto prot = {.name = "PF_CAIF", .owner = THIS_MODULE, .obj_size = sizeof(struct caifsock), }; |
bece7b239
|
1008 1009 |
if (!capable(CAP_SYS_ADMIN) && !capable(CAP_NET_ADMIN)) return -EPERM; |
e6f95ec8d
|
1010 1011 |
/* * The sock->type specifies the socket type to use. |
bece7b239
|
1012 1013 1014 |
* The CAIF socket is a packet stream in the sense * that it is packet based. CAIF trusts the reliability * of the link, no resending is implemented. |
e6f95ec8d
|
1015 |
*/ |
bece7b239
|
1016 1017 1018 1019 1020 |
if (sock->type == SOCK_SEQPACKET) sock->ops = &caif_seqpacket_ops; else if (sock->type == SOCK_STREAM) sock->ops = &caif_stream_ops; else |
e6f95ec8d
|
1021 |
return -ESOCKTNOSUPPORT; |
e6f95ec8d
|
1022 1023 1024 |
if (protocol < 0 || protocol >= CAIFPROTO_MAX) return -EPROTONOSUPPORT; /* |
bece7b239
|
1025 1026 |
* Set the socket state to unconnected. The socket state * is really not used at all in the net/core or socket.c but the |
e6f95ec8d
|
1027 1028 |
* initialization makes sure that sock->state is not uninitialized. */ |
e6f95ec8d
|
1029 1030 1031 1032 1033 1034 1035 1036 |
sk = sk_alloc(net, PF_CAIF, GFP_KERNEL, &prot); if (!sk) return -ENOMEM; cf_sk = container_of(sk, struct caifsock, sk); /* Store the protocol */ sk->sk_protocol = (unsigned char) protocol; |
447648128
|
1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 |
/* Initialize default priority for well-known cases */ switch (protocol) { case CAIFPROTO_AT: sk->sk_priority = TC_PRIO_CONTROL; break; case CAIFPROTO_RFM: sk->sk_priority = TC_PRIO_INTERACTIVE_BULK; break; default: sk->sk_priority = TC_PRIO_BESTEFFORT; } |
e6f95ec8d
|
1048 1049 1050 1051 1052 1053 1054 1055 |
/* * Lock in order to try to stop someone from opening the socket * too early. */ lock_sock(&(cf_sk->sk)); /* Initialize the nozero default sock structure data. */ sock_init_data(sock, sk); |
e6f95ec8d
|
1056 |
sk->sk_destruct = caif_sock_destructor; |
e6f95ec8d
|
1057 |
|
bece7b239
|
1058 1059 1060 1061 |
mutex_init(&cf_sk->readlock); /* single task reading lock */ cf_sk->layer.ctrlcmd = caif_ctrl_cb; cf_sk->sk.sk_socket->state = SS_UNCONNECTED; cf_sk->sk.sk_state = CAIF_DISCONNECTED; |
e6f95ec8d
|
1062 |
|
bece7b239
|
1063 1064 |
set_tx_flow_off(cf_sk); set_rx_flow_on(cf_sk); |
e6f95ec8d
|
1065 1066 |
/* Set default options on configuration */ |
bece7b239
|
1067 |
cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY; |
e6f95ec8d
|
1068 |
cf_sk->conn_req.protocol = protocol; |
e6f95ec8d
|
1069 1070 |
release_sock(&cf_sk->sk); return 0; |
e6f95ec8d
|
1071 |
} |
bece7b239
|
1072 |
|
e6f95ec8d
|
1073 1074 1075 1076 1077 |
static struct net_proto_family caif_family_ops = { .family = PF_CAIF, .create = caif_create, .owner = THIS_MODULE, }; |
4a695823b
|
1078 |
static int __init caif_sktinit_module(void) |
e6f95ec8d
|
1079 |
{ |
bece7b239
|
1080 |
int err = sock_register(&caif_family_ops); |
e6f95ec8d
|
1081 1082 |
if (!err) return err; |
e6f95ec8d
|
1083 1084 |
return 0; } |
e6f95ec8d
|
1085 1086 1087 |
static void __exit caif_sktexit_module(void) { sock_unregister(PF_CAIF); |
e6f95ec8d
|
1088 |
} |
e6f95ec8d
|
1089 1090 |
module_init(caif_sktinit_module); module_exit(caif_sktexit_module); |