Commit c85c2951d4da1236e32f1858db418221e624aba5

Authored by sjur.brandeland@stericsson.com
Committed by David S. Miller
1 parent bee925db9a

caif: Handle dev_queue_xmit errors.

Do proper handling of dev_queue_xmit errors in order to
avoid double free of skb and leaks in error conditions.
In cfctrl pending requests are removed when CAIF Link layer goes down.

Signed-off-by: Sjur Brændeland <sjur.brandeland@stericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

Showing 7 changed files with 119 additions and 47 deletions Inline Diff

include/net/caif/cfctrl.h
1 /* 1 /*
2 * Copyright (C) ST-Ericsson AB 2010 2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com 3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2 4 * License terms: GNU General Public License (GPL) version 2
5 */ 5 */
6 6
7 #ifndef CFCTRL_H_ 7 #ifndef CFCTRL_H_
8 #define CFCTRL_H_ 8 #define CFCTRL_H_
9 #include <net/caif/caif_layer.h> 9 #include <net/caif/caif_layer.h>
10 #include <net/caif/cfsrvl.h> 10 #include <net/caif/cfsrvl.h>
11 11
12 /* CAIF Control packet commands */ 12 /* CAIF Control packet commands */
13 enum cfctrl_cmd { 13 enum cfctrl_cmd {
14 CFCTRL_CMD_LINK_SETUP = 0, 14 CFCTRL_CMD_LINK_SETUP = 0,
15 CFCTRL_CMD_LINK_DESTROY = 1, 15 CFCTRL_CMD_LINK_DESTROY = 1,
16 CFCTRL_CMD_LINK_ERR = 2, 16 CFCTRL_CMD_LINK_ERR = 2,
17 CFCTRL_CMD_ENUM = 3, 17 CFCTRL_CMD_ENUM = 3,
18 CFCTRL_CMD_SLEEP = 4, 18 CFCTRL_CMD_SLEEP = 4,
19 CFCTRL_CMD_WAKE = 5, 19 CFCTRL_CMD_WAKE = 5,
20 CFCTRL_CMD_LINK_RECONF = 6, 20 CFCTRL_CMD_LINK_RECONF = 6,
21 CFCTRL_CMD_START_REASON = 7, 21 CFCTRL_CMD_START_REASON = 7,
22 CFCTRL_CMD_RADIO_SET = 8, 22 CFCTRL_CMD_RADIO_SET = 8,
23 CFCTRL_CMD_MODEM_SET = 9, 23 CFCTRL_CMD_MODEM_SET = 9,
24 CFCTRL_CMD_MASK = 0xf 24 CFCTRL_CMD_MASK = 0xf
25 }; 25 };
26 26
27 /* Channel types */ 27 /* Channel types */
28 enum cfctrl_srv { 28 enum cfctrl_srv {
29 CFCTRL_SRV_DECM = 0, 29 CFCTRL_SRV_DECM = 0,
30 CFCTRL_SRV_VEI = 1, 30 CFCTRL_SRV_VEI = 1,
31 CFCTRL_SRV_VIDEO = 2, 31 CFCTRL_SRV_VIDEO = 2,
32 CFCTRL_SRV_DBG = 3, 32 CFCTRL_SRV_DBG = 3,
33 CFCTRL_SRV_DATAGRAM = 4, 33 CFCTRL_SRV_DATAGRAM = 4,
34 CFCTRL_SRV_RFM = 5, 34 CFCTRL_SRV_RFM = 5,
35 CFCTRL_SRV_UTIL = 6, 35 CFCTRL_SRV_UTIL = 6,
36 CFCTRL_SRV_MASK = 0xf 36 CFCTRL_SRV_MASK = 0xf
37 }; 37 };
38 38
39 #define CFCTRL_RSP_BIT 0x20 39 #define CFCTRL_RSP_BIT 0x20
40 #define CFCTRL_ERR_BIT 0x10 40 #define CFCTRL_ERR_BIT 0x10
41 41
42 struct cfctrl_rsp { 42 struct cfctrl_rsp {
43 void (*linksetup_rsp)(struct cflayer *layer, u8 linkid, 43 void (*linksetup_rsp)(struct cflayer *layer, u8 linkid,
44 enum cfctrl_srv serv, u8 phyid, 44 enum cfctrl_srv serv, u8 phyid,
45 struct cflayer *adapt_layer); 45 struct cflayer *adapt_layer);
46 void (*linkdestroy_rsp)(struct cflayer *layer, u8 linkid); 46 void (*linkdestroy_rsp)(struct cflayer *layer, u8 linkid);
47 void (*linkerror_ind)(void); 47 void (*linkerror_ind)(void);
48 void (*enum_rsp)(void); 48 void (*enum_rsp)(void);
49 void (*sleep_rsp)(void); 49 void (*sleep_rsp)(void);
50 void (*wake_rsp)(void); 50 void (*wake_rsp)(void);
51 void (*restart_rsp)(void); 51 void (*restart_rsp)(void);
52 void (*radioset_rsp)(void); 52 void (*radioset_rsp)(void);
53 void (*reject_rsp)(struct cflayer *layer, u8 linkid, 53 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
54 struct cflayer *client_layer); 54 struct cflayer *client_layer);
55 }; 55 };
56 56
57 /* Link Setup Parameters for CAIF-Links. */ 57 /* Link Setup Parameters for CAIF-Links. */
58 struct cfctrl_link_param { 58 struct cfctrl_link_param {
59 enum cfctrl_srv linktype;/* (T3,T0) Type of Channel */ 59 enum cfctrl_srv linktype;/* (T3,T0) Type of Channel */
60 u8 priority; /* (P4,P0) Priority of the channel */ 60 u8 priority; /* (P4,P0) Priority of the channel */
61 u8 phyid; /* (U2-U0) Physical interface to connect */ 61 u8 phyid; /* (U2-U0) Physical interface to connect */
62 u8 endpoint; /* (E1,E0) Endpoint for data channels */ 62 u8 endpoint; /* (E1,E0) Endpoint for data channels */
63 u8 chtype; /* (H1,H0) Channel-Type, applies to 63 u8 chtype; /* (H1,H0) Channel-Type, applies to
64 * VEI, DEBUG */ 64 * VEI, DEBUG */
65 union { 65 union {
66 struct { 66 struct {
67 u8 connid; /* (D7,D0) Video LinkId */ 67 u8 connid; /* (D7,D0) Video LinkId */
68 } video; 68 } video;
69 69
70 struct { 70 struct {
71 u32 connid; /* (N31,Ngit0) Connection ID used 71 u32 connid; /* (N31,Ngit0) Connection ID used
72 * for Datagram */ 72 * for Datagram */
73 } datagram; 73 } datagram;
74 74
75 struct { 75 struct {
76 u32 connid; /* Connection ID used for RFM */ 76 u32 connid; /* Connection ID used for RFM */
77 char volume[20]; /* Volume to mount for RFM */ 77 char volume[20]; /* Volume to mount for RFM */
78 } rfm; /* Configuration for RFM */ 78 } rfm; /* Configuration for RFM */
79 79
80 struct { 80 struct {
81 u16 fifosize_kb; /* Psock FIFO size in KB */ 81 u16 fifosize_kb; /* Psock FIFO size in KB */
82 u16 fifosize_bufs; /* Psock # signal buffers */ 82 u16 fifosize_bufs; /* Psock # signal buffers */
83 char name[16]; /* Name of the PSOCK service */ 83 char name[16]; /* Name of the PSOCK service */
84 u8 params[255]; /* Link setup Parameters> */ 84 u8 params[255]; /* Link setup Parameters> */
85 u16 paramlen; /* Length of Link Setup 85 u16 paramlen; /* Length of Link Setup
86 * Parameters */ 86 * Parameters */
87 } utility; /* Configuration for Utility Links (Psock) */ 87 } utility; /* Configuration for Utility Links (Psock) */
88 } u; 88 } u;
89 }; 89 };
90 90
91 /* This structure is used internally in CFCTRL */ 91 /* This structure is used internally in CFCTRL */
92 struct cfctrl_request_info { 92 struct cfctrl_request_info {
93 int sequence_no; 93 int sequence_no;
94 enum cfctrl_cmd cmd; 94 enum cfctrl_cmd cmd;
95 u8 channel_id; 95 u8 channel_id;
96 struct cfctrl_link_param param; 96 struct cfctrl_link_param param;
97 struct cflayer *client_layer; 97 struct cflayer *client_layer;
98 struct list_head list; 98 struct list_head list;
99 }; 99 };
100 100
101 struct cfctrl { 101 struct cfctrl {
102 struct cfsrvl serv; 102 struct cfsrvl serv;
103 struct cfctrl_rsp res; 103 struct cfctrl_rsp res;
104 atomic_t req_seq_no; 104 atomic_t req_seq_no;
105 atomic_t rsp_seq_no; 105 atomic_t rsp_seq_no;
106 struct list_head list; 106 struct list_head list;
107 /* Protects from simultaneous access to first_req list */ 107 /* Protects from simultaneous access to first_req list */
108 spinlock_t info_list_lock; 108 spinlock_t info_list_lock;
109 #ifndef CAIF_NO_LOOP 109 #ifndef CAIF_NO_LOOP
110 u8 loop_linkid; 110 u8 loop_linkid;
111 int loop_linkused[256]; 111 int loop_linkused[256];
112 /* Protects simultaneous access to loop_linkid and loop_linkused */ 112 /* Protects simultaneous access to loop_linkid and loop_linkused */
113 spinlock_t loop_linkid_lock; 113 spinlock_t loop_linkid_lock;
114 #endif 114 #endif
115 115
116 }; 116 };
117 117
118 void cfctrl_enum_req(struct cflayer *cfctrl, u8 physlinkid); 118 void cfctrl_enum_req(struct cflayer *cfctrl, u8 physlinkid);
119 int cfctrl_linkup_request(struct cflayer *cfctrl, 119 int cfctrl_linkup_request(struct cflayer *cfctrl,
120 struct cfctrl_link_param *param, 120 struct cfctrl_link_param *param,
121 struct cflayer *user_layer); 121 struct cflayer *user_layer);
122 int cfctrl_linkdown_req(struct cflayer *cfctrl, u8 linkid, 122 int cfctrl_linkdown_req(struct cflayer *cfctrl, u8 linkid,
123 struct cflayer *client); 123 struct cflayer *client);
124 124
125 struct cflayer *cfctrl_create(void); 125 struct cflayer *cfctrl_create(void);
126 struct cfctrl_rsp *cfctrl_get_respfuncs(struct cflayer *layer); 126 struct cfctrl_rsp *cfctrl_get_respfuncs(struct cflayer *layer);
127 void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer); 127 int cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer);
128 void cfctrl_remove(struct cflayer *layr);
128 129
129 #endif /* CFCTRL_H_ */ 130 #endif /* CFCTRL_H_ */
130 131
1 /* 1 /*
2 * CAIF Interface registration. 2 * CAIF Interface registration.
3 * Copyright (C) ST-Ericsson AB 2010 3 * Copyright (C) ST-Ericsson AB 2010
4 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com 4 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
5 * License terms: GNU General Public License (GPL) version 2 5 * License terms: GNU General Public License (GPL) version 2
6 * 6 *
7 * Borrowed heavily from file: pn_dev.c. Thanks to 7 * Borrowed heavily from file: pn_dev.c. Thanks to
8 * Remi Denis-Courmont <remi.denis-courmont@nokia.com> 8 * Remi Denis-Courmont <remi.denis-courmont@nokia.com>
9 * and Sakari Ailus <sakari.ailus@nokia.com> 9 * and Sakari Ailus <sakari.ailus@nokia.com>
10 */ 10 */
11 11
12 #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ 12 #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
13 13
14 #include <linux/version.h> 14 #include <linux/version.h>
15 #include <linux/kernel.h> 15 #include <linux/kernel.h>
16 #include <linux/if_arp.h> 16 #include <linux/if_arp.h>
17 #include <linux/net.h> 17 #include <linux/net.h>
18 #include <linux/netdevice.h> 18 #include <linux/netdevice.h>
19 #include <linux/mutex.h> 19 #include <linux/mutex.h>
20 #include <net/netns/generic.h> 20 #include <net/netns/generic.h>
21 #include <net/net_namespace.h> 21 #include <net/net_namespace.h>
22 #include <net/pkt_sched.h> 22 #include <net/pkt_sched.h>
23 #include <net/caif/caif_device.h> 23 #include <net/caif/caif_device.h>
24 #include <net/caif/caif_layer.h> 24 #include <net/caif/caif_layer.h>
25 #include <net/caif/cfpkt.h> 25 #include <net/caif/cfpkt.h>
26 #include <net/caif/cfcnfg.h> 26 #include <net/caif/cfcnfg.h>
27 27
28 MODULE_LICENSE("GPL"); 28 MODULE_LICENSE("GPL");
29 29
30 /* Used for local tracking of the CAIF net devices */ 30 /* Used for local tracking of the CAIF net devices */
31 struct caif_device_entry { 31 struct caif_device_entry {
32 struct cflayer layer; 32 struct cflayer layer;
33 struct list_head list; 33 struct list_head list;
34 struct net_device *netdev; 34 struct net_device *netdev;
35 int __percpu *pcpu_refcnt; 35 int __percpu *pcpu_refcnt;
36 }; 36 };
37 37
38 struct caif_device_entry_list { 38 struct caif_device_entry_list {
39 struct list_head list; 39 struct list_head list;
40 /* Protects simulanous deletes in list */ 40 /* Protects simulanous deletes in list */
41 struct mutex lock; 41 struct mutex lock;
42 }; 42 };
43 43
44 struct caif_net { 44 struct caif_net {
45 struct cfcnfg *cfg; 45 struct cfcnfg *cfg;
46 struct caif_device_entry_list caifdevs; 46 struct caif_device_entry_list caifdevs;
47 }; 47 };
48 48
49 static int caif_net_id; 49 static int caif_net_id;
50 50
51 struct cfcnfg *get_cfcnfg(struct net *net) 51 struct cfcnfg *get_cfcnfg(struct net *net)
52 { 52 {
53 struct caif_net *caifn; 53 struct caif_net *caifn;
54 BUG_ON(!net); 54 BUG_ON(!net);
55 caifn = net_generic(net, caif_net_id); 55 caifn = net_generic(net, caif_net_id);
56 BUG_ON(!caifn); 56 BUG_ON(!caifn);
57 return caifn->cfg; 57 return caifn->cfg;
58 } 58 }
59 EXPORT_SYMBOL(get_cfcnfg); 59 EXPORT_SYMBOL(get_cfcnfg);
60 60
61 static struct caif_device_entry_list *caif_device_list(struct net *net) 61 static struct caif_device_entry_list *caif_device_list(struct net *net)
62 { 62 {
63 struct caif_net *caifn; 63 struct caif_net *caifn;
64 BUG_ON(!net); 64 BUG_ON(!net);
65 caifn = net_generic(net, caif_net_id); 65 caifn = net_generic(net, caif_net_id);
66 BUG_ON(!caifn); 66 BUG_ON(!caifn);
67 return &caifn->caifdevs; 67 return &caifn->caifdevs;
68 } 68 }
69 69
70 static void caifd_put(struct caif_device_entry *e) 70 static void caifd_put(struct caif_device_entry *e)
71 { 71 {
72 irqsafe_cpu_dec(*e->pcpu_refcnt); 72 irqsafe_cpu_dec(*e->pcpu_refcnt);
73 } 73 }
74 74
75 static void caifd_hold(struct caif_device_entry *e) 75 static void caifd_hold(struct caif_device_entry *e)
76 { 76 {
77 irqsafe_cpu_inc(*e->pcpu_refcnt); 77 irqsafe_cpu_inc(*e->pcpu_refcnt);
78 } 78 }
79 79
80 static int caifd_refcnt_read(struct caif_device_entry *e) 80 static int caifd_refcnt_read(struct caif_device_entry *e)
81 { 81 {
82 int i, refcnt = 0; 82 int i, refcnt = 0;
83 for_each_possible_cpu(i) 83 for_each_possible_cpu(i)
84 refcnt += *per_cpu_ptr(e->pcpu_refcnt, i); 84 refcnt += *per_cpu_ptr(e->pcpu_refcnt, i);
85 return refcnt; 85 return refcnt;
86 } 86 }
87 87
88 /* Allocate new CAIF device. */ 88 /* Allocate new CAIF device. */
89 static struct caif_device_entry *caif_device_alloc(struct net_device *dev) 89 static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
90 { 90 {
91 struct caif_device_entry_list *caifdevs; 91 struct caif_device_entry_list *caifdevs;
92 struct caif_device_entry *caifd; 92 struct caif_device_entry *caifd;
93 93
94 caifdevs = caif_device_list(dev_net(dev)); 94 caifdevs = caif_device_list(dev_net(dev));
95 BUG_ON(!caifdevs); 95 BUG_ON(!caifdevs);
96 96
97 caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC); 97 caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC);
98 if (!caifd) 98 if (!caifd)
99 return NULL; 99 return NULL;
100 caifd->pcpu_refcnt = alloc_percpu(int); 100 caifd->pcpu_refcnt = alloc_percpu(int);
101 caifd->netdev = dev; 101 caifd->netdev = dev;
102 dev_hold(dev); 102 dev_hold(dev);
103 return caifd; 103 return caifd;
104 } 104 }
105 105
106 static struct caif_device_entry *caif_get(struct net_device *dev) 106 static struct caif_device_entry *caif_get(struct net_device *dev)
107 { 107 {
108 struct caif_device_entry_list *caifdevs = 108 struct caif_device_entry_list *caifdevs =
109 caif_device_list(dev_net(dev)); 109 caif_device_list(dev_net(dev));
110 struct caif_device_entry *caifd; 110 struct caif_device_entry *caifd;
111 BUG_ON(!caifdevs); 111 BUG_ON(!caifdevs);
112 list_for_each_entry_rcu(caifd, &caifdevs->list, list) { 112 list_for_each_entry_rcu(caifd, &caifdevs->list, list) {
113 if (caifd->netdev == dev) 113 if (caifd->netdev == dev)
114 return caifd; 114 return caifd;
115 } 115 }
116 return NULL; 116 return NULL;
117 } 117 }
118 118
119 static int transmit(struct cflayer *layer, struct cfpkt *pkt) 119 static int transmit(struct cflayer *layer, struct cfpkt *pkt)
120 { 120 {
121 int err;
121 struct caif_device_entry *caifd = 122 struct caif_device_entry *caifd =
122 container_of(layer, struct caif_device_entry, layer); 123 container_of(layer, struct caif_device_entry, layer);
123 struct sk_buff *skb; 124 struct sk_buff *skb;
124 125
125 skb = cfpkt_tonative(pkt); 126 skb = cfpkt_tonative(pkt);
126 skb->dev = caifd->netdev; 127 skb->dev = caifd->netdev;
127 128
128 dev_queue_xmit(skb); 129 err = dev_queue_xmit(skb);
130 if (err > 0)
131 err = -EIO;
129 132
130 return 0; 133 return err;
131 } 134 }
132 135
133 /* 136 /*
134 * Stuff received packets into the CAIF stack. 137 * Stuff received packets into the CAIF stack.
135 * On error, returns non-zero and releases the skb. 138 * On error, returns non-zero and releases the skb.
136 */ 139 */
137 static int receive(struct sk_buff *skb, struct net_device *dev, 140 static int receive(struct sk_buff *skb, struct net_device *dev,
138 struct packet_type *pkttype, struct net_device *orig_dev) 141 struct packet_type *pkttype, struct net_device *orig_dev)
139 { 142 {
140 struct cfpkt *pkt; 143 struct cfpkt *pkt;
141 struct caif_device_entry *caifd; 144 struct caif_device_entry *caifd;
142 145
143 pkt = cfpkt_fromnative(CAIF_DIR_IN, skb); 146 pkt = cfpkt_fromnative(CAIF_DIR_IN, skb);
144 147
145 rcu_read_lock(); 148 rcu_read_lock();
146 caifd = caif_get(dev); 149 caifd = caif_get(dev);
147 150
148 if (!caifd || !caifd->layer.up || !caifd->layer.up->receive || 151 if (!caifd || !caifd->layer.up || !caifd->layer.up->receive ||
149 !netif_oper_up(caifd->netdev)) { 152 !netif_oper_up(caifd->netdev)) {
150 rcu_read_unlock(); 153 rcu_read_unlock();
151 kfree_skb(skb); 154 kfree_skb(skb);
152 return NET_RX_DROP; 155 return NET_RX_DROP;
153 } 156 }
154 157
155 /* Hold reference to netdevice while using CAIF stack */ 158 /* Hold reference to netdevice while using CAIF stack */
156 caifd_hold(caifd); 159 caifd_hold(caifd);
157 rcu_read_unlock(); 160 rcu_read_unlock();
158 161
159 caifd->layer.up->receive(caifd->layer.up, pkt); 162 caifd->layer.up->receive(caifd->layer.up, pkt);
160 163
161 /* Release reference to stack upwards */ 164 /* Release reference to stack upwards */
162 caifd_put(caifd); 165 caifd_put(caifd);
163 return 0; 166 return 0;
164 } 167 }
165 168
166 static struct packet_type caif_packet_type __read_mostly = { 169 static struct packet_type caif_packet_type __read_mostly = {
167 .type = cpu_to_be16(ETH_P_CAIF), 170 .type = cpu_to_be16(ETH_P_CAIF),
168 .func = receive, 171 .func = receive,
169 }; 172 };
170 173
171 static void dev_flowctrl(struct net_device *dev, int on) 174 static void dev_flowctrl(struct net_device *dev, int on)
172 { 175 {
173 struct caif_device_entry *caifd; 176 struct caif_device_entry *caifd;
174 177
175 rcu_read_lock(); 178 rcu_read_lock();
176 179
177 caifd = caif_get(dev); 180 caifd = caif_get(dev);
178 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) { 181 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
179 rcu_read_unlock(); 182 rcu_read_unlock();
180 return; 183 return;
181 } 184 }
182 185
183 caifd_hold(caifd); 186 caifd_hold(caifd);
184 rcu_read_unlock(); 187 rcu_read_unlock();
185 188
186 caifd->layer.up->ctrlcmd(caifd->layer.up, 189 caifd->layer.up->ctrlcmd(caifd->layer.up,
187 on ? 190 on ?
188 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND : 191 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND :
189 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND, 192 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
190 caifd->layer.id); 193 caifd->layer.id);
191 caifd_put(caifd); 194 caifd_put(caifd);
192 } 195 }
193 196
194 /* notify Caif of device events */ 197 /* notify Caif of device events */
195 static int caif_device_notify(struct notifier_block *me, unsigned long what, 198 static int caif_device_notify(struct notifier_block *me, unsigned long what,
196 void *arg) 199 void *arg)
197 { 200 {
198 struct net_device *dev = arg; 201 struct net_device *dev = arg;
199 struct caif_device_entry *caifd = NULL; 202 struct caif_device_entry *caifd = NULL;
200 struct caif_dev_common *caifdev; 203 struct caif_dev_common *caifdev;
201 enum cfcnfg_phy_preference pref; 204 enum cfcnfg_phy_preference pref;
202 enum cfcnfg_phy_type phy_type; 205 enum cfcnfg_phy_type phy_type;
203 struct cfcnfg *cfg; 206 struct cfcnfg *cfg;
204 struct caif_device_entry_list *caifdevs = 207 struct caif_device_entry_list *caifdevs =
205 caif_device_list(dev_net(dev)); 208 caif_device_list(dev_net(dev));
206 209
207 if (dev->type != ARPHRD_CAIF) 210 if (dev->type != ARPHRD_CAIF)
208 return 0; 211 return 0;
209 212
210 cfg = get_cfcnfg(dev_net(dev)); 213 cfg = get_cfcnfg(dev_net(dev));
211 if (cfg == NULL) 214 if (cfg == NULL)
212 return 0; 215 return 0;
213 216
214 switch (what) { 217 switch (what) {
215 case NETDEV_REGISTER: 218 case NETDEV_REGISTER:
216 caifd = caif_device_alloc(dev); 219 caifd = caif_device_alloc(dev);
217 if (!caifd) 220 if (!caifd)
218 return 0; 221 return 0;
219 222
220 caifdev = netdev_priv(dev); 223 caifdev = netdev_priv(dev);
221 caifdev->flowctrl = dev_flowctrl; 224 caifdev->flowctrl = dev_flowctrl;
222 225
223 caifd->layer.transmit = transmit; 226 caifd->layer.transmit = transmit;
224 227
225 if (caifdev->use_frag) 228 if (caifdev->use_frag)
226 phy_type = CFPHYTYPE_FRAG; 229 phy_type = CFPHYTYPE_FRAG;
227 else 230 else
228 phy_type = CFPHYTYPE_CAIF; 231 phy_type = CFPHYTYPE_CAIF;
229 232
230 switch (caifdev->link_select) { 233 switch (caifdev->link_select) {
231 case CAIF_LINK_HIGH_BANDW: 234 case CAIF_LINK_HIGH_BANDW:
232 pref = CFPHYPREF_HIGH_BW; 235 pref = CFPHYPREF_HIGH_BW;
233 break; 236 break;
234 case CAIF_LINK_LOW_LATENCY: 237 case CAIF_LINK_LOW_LATENCY:
235 pref = CFPHYPREF_LOW_LAT; 238 pref = CFPHYPREF_LOW_LAT;
236 break; 239 break;
237 default: 240 default:
238 pref = CFPHYPREF_HIGH_BW; 241 pref = CFPHYPREF_HIGH_BW;
239 break; 242 break;
240 } 243 }
241 strncpy(caifd->layer.name, dev->name, 244 strncpy(caifd->layer.name, dev->name,
242 sizeof(caifd->layer.name) - 1); 245 sizeof(caifd->layer.name) - 1);
243 caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0; 246 caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
244 247
245 mutex_lock(&caifdevs->lock); 248 mutex_lock(&caifdevs->lock);
246 list_add_rcu(&caifd->list, &caifdevs->list); 249 list_add_rcu(&caifd->list, &caifdevs->list);
247 250
248 cfcnfg_add_phy_layer(cfg, 251 cfcnfg_add_phy_layer(cfg,
249 phy_type, 252 phy_type,
250 dev, 253 dev,
251 &caifd->layer, 254 &caifd->layer,
252 pref, 255 pref,
253 caifdev->use_fcs, 256 caifdev->use_fcs,
254 caifdev->use_stx); 257 caifdev->use_stx);
255 mutex_unlock(&caifdevs->lock); 258 mutex_unlock(&caifdevs->lock);
256 break; 259 break;
257 260
258 case NETDEV_UP: 261 case NETDEV_UP:
259 rcu_read_lock(); 262 rcu_read_lock();
260 263
261 caifd = caif_get(dev); 264 caifd = caif_get(dev);
262 if (caifd == NULL) { 265 if (caifd == NULL) {
263 rcu_read_unlock(); 266 rcu_read_unlock();
264 break; 267 break;
265 } 268 }
266 269
267 cfcnfg_set_phy_state(cfg, &caifd->layer, true); 270 cfcnfg_set_phy_state(cfg, &caifd->layer, true);
268 rcu_read_unlock(); 271 rcu_read_unlock();
269 272
270 break; 273 break;
271 274
272 case NETDEV_DOWN: 275 case NETDEV_DOWN:
273 rcu_read_lock(); 276 rcu_read_lock();
274 277
275 caifd = caif_get(dev); 278 caifd = caif_get(dev);
276 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) { 279 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
277 rcu_read_unlock(); 280 rcu_read_unlock();
278 return -EINVAL; 281 return -EINVAL;
279 } 282 }
280 283
281 cfcnfg_set_phy_state(cfg, &caifd->layer, false); 284 cfcnfg_set_phy_state(cfg, &caifd->layer, false);
282 caifd_hold(caifd); 285 caifd_hold(caifd);
283 rcu_read_unlock(); 286 rcu_read_unlock();
284 287
285 caifd->layer.up->ctrlcmd(caifd->layer.up, 288 caifd->layer.up->ctrlcmd(caifd->layer.up,
286 _CAIF_CTRLCMD_PHYIF_DOWN_IND, 289 _CAIF_CTRLCMD_PHYIF_DOWN_IND,
287 caifd->layer.id); 290 caifd->layer.id);
288 caifd_put(caifd); 291 caifd_put(caifd);
289 break; 292 break;
290 293
291 case NETDEV_UNREGISTER: 294 case NETDEV_UNREGISTER:
292 mutex_lock(&caifdevs->lock); 295 mutex_lock(&caifdevs->lock);
293 296
294 caifd = caif_get(dev); 297 caifd = caif_get(dev);
295 if (caifd == NULL) { 298 if (caifd == NULL) {
296 mutex_unlock(&caifdevs->lock); 299 mutex_unlock(&caifdevs->lock);
297 break; 300 break;
298 } 301 }
299 list_del_rcu(&caifd->list); 302 list_del_rcu(&caifd->list);
300 303
301 /* 304 /*
302 * NETDEV_UNREGISTER is called repeatedly until all reference 305 * NETDEV_UNREGISTER is called repeatedly until all reference
303 * counts for the net-device are released. If references to 306 * counts for the net-device are released. If references to
304 * caifd is taken, simply ignore NETDEV_UNREGISTER and wait for 307 * caifd is taken, simply ignore NETDEV_UNREGISTER and wait for
305 * the next call to NETDEV_UNREGISTER. 308 * the next call to NETDEV_UNREGISTER.
306 * 309 *
307 * If any packets are in flight down the CAIF Stack, 310 * If any packets are in flight down the CAIF Stack,
308 * cfcnfg_del_phy_layer will return nonzero. 311 * cfcnfg_del_phy_layer will return nonzero.
309 * If no packets are in flight, the CAIF Stack associated 312 * If no packets are in flight, the CAIF Stack associated
310 * with the net-device un-registering is freed. 313 * with the net-device un-registering is freed.
311 */ 314 */
312 315
313 if (caifd_refcnt_read(caifd) != 0 || 316 if (caifd_refcnt_read(caifd) != 0 ||
314 cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0) { 317 cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0) {
315 318
316 pr_info("Wait for device inuse\n"); 319 pr_info("Wait for device inuse\n");
317 /* Enrole device if CAIF Stack is still in use */ 320 /* Enrole device if CAIF Stack is still in use */
318 list_add_rcu(&caifd->list, &caifdevs->list); 321 list_add_rcu(&caifd->list, &caifdevs->list);
319 mutex_unlock(&caifdevs->lock); 322 mutex_unlock(&caifdevs->lock);
320 break; 323 break;
321 } 324 }
322 325
323 synchronize_rcu(); 326 synchronize_rcu();
324 dev_put(caifd->netdev); 327 dev_put(caifd->netdev);
325 free_percpu(caifd->pcpu_refcnt); 328 free_percpu(caifd->pcpu_refcnt);
326 kfree(caifd); 329 kfree(caifd);
327 330
328 mutex_unlock(&caifdevs->lock); 331 mutex_unlock(&caifdevs->lock);
329 break; 332 break;
330 } 333 }
331 return 0; 334 return 0;
332 } 335 }
333 336
334 static struct notifier_block caif_device_notifier = { 337 static struct notifier_block caif_device_notifier = {
335 .notifier_call = caif_device_notify, 338 .notifier_call = caif_device_notify,
336 .priority = 0, 339 .priority = 0,
337 }; 340 };
338 341
339 /* Per-namespace Caif devices handling */ 342 /* Per-namespace Caif devices handling */
340 static int caif_init_net(struct net *net) 343 static int caif_init_net(struct net *net)
341 { 344 {
342 struct caif_net *caifn = net_generic(net, caif_net_id); 345 struct caif_net *caifn = net_generic(net, caif_net_id);
343 BUG_ON(!caifn); 346 BUG_ON(!caifn);
344 INIT_LIST_HEAD(&caifn->caifdevs.list); 347 INIT_LIST_HEAD(&caifn->caifdevs.list);
345 mutex_init(&caifn->caifdevs.lock); 348 mutex_init(&caifn->caifdevs.lock);
346 349
347 caifn->cfg = cfcnfg_create(); 350 caifn->cfg = cfcnfg_create();
348 if (!caifn->cfg) { 351 if (!caifn->cfg) {
349 pr_warn("can't create cfcnfg\n"); 352 pr_warn("can't create cfcnfg\n");
350 return -ENOMEM; 353 return -ENOMEM;
351 } 354 }
352 355
353 return 0; 356 return 0;
354 } 357 }
355 358
356 static void caif_exit_net(struct net *net) 359 static void caif_exit_net(struct net *net)
357 { 360 {
358 struct caif_device_entry *caifd, *tmp; 361 struct caif_device_entry *caifd, *tmp;
359 struct caif_device_entry_list *caifdevs = 362 struct caif_device_entry_list *caifdevs =
360 caif_device_list(net); 363 caif_device_list(net);
361 struct cfcnfg *cfg; 364 struct cfcnfg *cfg;
362 365
363 rtnl_lock(); 366 rtnl_lock();
364 mutex_lock(&caifdevs->lock); 367 mutex_lock(&caifdevs->lock);
365 368
366 cfg = get_cfcnfg(net); 369 cfg = get_cfcnfg(net);
367 if (cfg == NULL) { 370 if (cfg == NULL) {
368 mutex_unlock(&caifdevs->lock); 371 mutex_unlock(&caifdevs->lock);
369 return; 372 return;
370 } 373 }
371 374
372 list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) { 375 list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) {
373 int i = 0; 376 int i = 0;
374 list_del_rcu(&caifd->list); 377 list_del_rcu(&caifd->list);
375 cfcnfg_set_phy_state(cfg, &caifd->layer, false); 378 cfcnfg_set_phy_state(cfg, &caifd->layer, false);
376 379
377 while (i < 10 && 380 while (i < 10 &&
378 (caifd_refcnt_read(caifd) != 0 || 381 (caifd_refcnt_read(caifd) != 0 ||
379 cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0)) { 382 cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0)) {
380 383
381 pr_info("Wait for device inuse\n"); 384 pr_info("Wait for device inuse\n");
382 msleep(250); 385 msleep(250);
383 i++; 386 i++;
384 } 387 }
385 synchronize_rcu(); 388 synchronize_rcu();
386 dev_put(caifd->netdev); 389 dev_put(caifd->netdev);
387 free_percpu(caifd->pcpu_refcnt); 390 free_percpu(caifd->pcpu_refcnt);
388 kfree(caifd); 391 kfree(caifd);
389 } 392 }
390 cfcnfg_remove(cfg); 393 cfcnfg_remove(cfg);
391 394
392 mutex_unlock(&caifdevs->lock); 395 mutex_unlock(&caifdevs->lock);
393 rtnl_unlock(); 396 rtnl_unlock();
394 } 397 }
395 398
396 static struct pernet_operations caif_net_ops = { 399 static struct pernet_operations caif_net_ops = {
397 .init = caif_init_net, 400 .init = caif_init_net,
398 .exit = caif_exit_net, 401 .exit = caif_exit_net,
399 .id = &caif_net_id, 402 .id = &caif_net_id,
400 .size = sizeof(struct caif_net), 403 .size = sizeof(struct caif_net),
401 }; 404 };
402 405
403 /* Initialize Caif devices list */ 406 /* Initialize Caif devices list */
404 static int __init caif_device_init(void) 407 static int __init caif_device_init(void)
405 { 408 {
406 int result; 409 int result;
407 410
408 result = register_pernet_device(&caif_net_ops); 411 result = register_pernet_device(&caif_net_ops);
409 412
410 if (result) 413 if (result)
411 return result; 414 return result;
412 415
413 register_netdevice_notifier(&caif_device_notifier); 416 register_netdevice_notifier(&caif_device_notifier);
414 dev_add_pack(&caif_packet_type); 417 dev_add_pack(&caif_packet_type);
415 418
416 return result; 419 return result;
417 } 420 }
418 421
419 static void __exit caif_device_exit(void) 422 static void __exit caif_device_exit(void)
420 { 423 {
421 unregister_pernet_device(&caif_net_ops); 424 unregister_pernet_device(&caif_net_ops);
422 unregister_netdevice_notifier(&caif_device_notifier); 425 unregister_netdevice_notifier(&caif_device_notifier);
423 dev_remove_pack(&caif_packet_type); 426 dev_remove_pack(&caif_packet_type);
424 } 427 }
425 428
426 module_init(caif_device_init); 429 module_init(caif_device_init);
427 module_exit(caif_device_exit); 430 module_exit(caif_device_exit);
428 431
net/caif/caif_socket.c
1 /* 1 /*
2 * Copyright (C) ST-Ericsson AB 2010 2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland sjur.brandeland@stericsson.com 3 * Author: Sjur Brendeland sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2 4 * License terms: GNU General Public License (GPL) version 2
5 */ 5 */
6 6
7 #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ 7 #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
8 8
9 #include <linux/fs.h> 9 #include <linux/fs.h>
10 #include <linux/init.h> 10 #include <linux/init.h>
11 #include <linux/module.h> 11 #include <linux/module.h>
12 #include <linux/sched.h> 12 #include <linux/sched.h>
13 #include <linux/spinlock.h> 13 #include <linux/spinlock.h>
14 #include <linux/mutex.h> 14 #include <linux/mutex.h>
15 #include <linux/list.h> 15 #include <linux/list.h>
16 #include <linux/wait.h> 16 #include <linux/wait.h>
17 #include <linux/poll.h> 17 #include <linux/poll.h>
18 #include <linux/tcp.h> 18 #include <linux/tcp.h>
19 #include <linux/uaccess.h> 19 #include <linux/uaccess.h>
20 #include <linux/debugfs.h> 20 #include <linux/debugfs.h>
21 #include <linux/caif/caif_socket.h> 21 #include <linux/caif/caif_socket.h>
22 #include <asm/atomic.h> 22 #include <asm/atomic.h>
23 #include <net/sock.h> 23 #include <net/sock.h>
24 #include <net/tcp_states.h> 24 #include <net/tcp_states.h>
25 #include <net/caif/caif_layer.h> 25 #include <net/caif/caif_layer.h>
26 #include <net/caif/caif_dev.h> 26 #include <net/caif/caif_dev.h>
27 #include <net/caif/cfpkt.h> 27 #include <net/caif/cfpkt.h>
28 28
29 MODULE_LICENSE("GPL"); 29 MODULE_LICENSE("GPL");
30 MODULE_ALIAS_NETPROTO(AF_CAIF); 30 MODULE_ALIAS_NETPROTO(AF_CAIF);
31 31
32 /* 32 /*
33 * CAIF state is re-using the TCP socket states. 33 * CAIF state is re-using the TCP socket states.
34 * caif_states stored in sk_state reflect the state as reported by 34 * caif_states stored in sk_state reflect the state as reported by
35 * the CAIF stack, while sk_socket->state is the state of the socket. 35 * the CAIF stack, while sk_socket->state is the state of the socket.
36 */ 36 */
37 enum caif_states { 37 enum caif_states {
38 CAIF_CONNECTED = TCP_ESTABLISHED, 38 CAIF_CONNECTED = TCP_ESTABLISHED,
39 CAIF_CONNECTING = TCP_SYN_SENT, 39 CAIF_CONNECTING = TCP_SYN_SENT,
40 CAIF_DISCONNECTED = TCP_CLOSE 40 CAIF_DISCONNECTED = TCP_CLOSE
41 }; 41 };
42 42
43 #define TX_FLOW_ON_BIT 1 43 #define TX_FLOW_ON_BIT 1
44 #define RX_FLOW_ON_BIT 2 44 #define RX_FLOW_ON_BIT 2
45 45
46 static struct dentry *debugfsdir; 46 static struct dentry *debugfsdir;
47 47
48 #ifdef CONFIG_DEBUG_FS 48 #ifdef CONFIG_DEBUG_FS
49 struct debug_fs_counter { 49 struct debug_fs_counter {
50 atomic_t caif_nr_socks; 50 atomic_t caif_nr_socks;
51 atomic_t num_connect_req; 51 atomic_t num_connect_req;
52 atomic_t num_connect_resp; 52 atomic_t num_connect_resp;
53 atomic_t num_connect_fail_resp; 53 atomic_t num_connect_fail_resp;
54 atomic_t num_disconnect; 54 atomic_t num_disconnect;
55 atomic_t num_remote_shutdown_ind; 55 atomic_t num_remote_shutdown_ind;
56 atomic_t num_tx_flow_off_ind; 56 atomic_t num_tx_flow_off_ind;
57 atomic_t num_tx_flow_on_ind; 57 atomic_t num_tx_flow_on_ind;
58 atomic_t num_rx_flow_off; 58 atomic_t num_rx_flow_off;
59 atomic_t num_rx_flow_on; 59 atomic_t num_rx_flow_on;
60 }; 60 };
61 static struct debug_fs_counter cnt; 61 static struct debug_fs_counter cnt;
62 #define dbfs_atomic_inc(v) atomic_inc(v) 62 #define dbfs_atomic_inc(v) atomic_inc(v)
63 #define dbfs_atomic_dec(v) atomic_dec(v) 63 #define dbfs_atomic_dec(v) atomic_dec(v)
64 #else 64 #else
65 #define dbfs_atomic_inc(v) 65 #define dbfs_atomic_inc(v)
66 #define dbfs_atomic_dec(v) 66 #define dbfs_atomic_dec(v)
67 #endif 67 #endif
68 68
69 struct caifsock { 69 struct caifsock {
70 struct sock sk; /* must be first member */ 70 struct sock sk; /* must be first member */
71 struct cflayer layer; 71 struct cflayer layer;
72 char name[CAIF_LAYER_NAME_SZ]; /* Used for debugging */ 72 char name[CAIF_LAYER_NAME_SZ]; /* Used for debugging */
73 u32 flow_state; 73 u32 flow_state;
74 struct caif_connect_request conn_req; 74 struct caif_connect_request conn_req;
75 struct mutex readlock; 75 struct mutex readlock;
76 struct dentry *debugfs_socket_dir; 76 struct dentry *debugfs_socket_dir;
77 int headroom, tailroom, maxframe; 77 int headroom, tailroom, maxframe;
78 }; 78 };
79 79
80 static int rx_flow_is_on(struct caifsock *cf_sk) 80 static int rx_flow_is_on(struct caifsock *cf_sk)
81 { 81 {
82 return test_bit(RX_FLOW_ON_BIT, 82 return test_bit(RX_FLOW_ON_BIT,
83 (void *) &cf_sk->flow_state); 83 (void *) &cf_sk->flow_state);
84 } 84 }
85 85
86 static int tx_flow_is_on(struct caifsock *cf_sk) 86 static int tx_flow_is_on(struct caifsock *cf_sk)
87 { 87 {
88 return test_bit(TX_FLOW_ON_BIT, 88 return test_bit(TX_FLOW_ON_BIT,
89 (void *) &cf_sk->flow_state); 89 (void *) &cf_sk->flow_state);
90 } 90 }
91 91
92 static void set_rx_flow_off(struct caifsock *cf_sk) 92 static void set_rx_flow_off(struct caifsock *cf_sk)
93 { 93 {
94 clear_bit(RX_FLOW_ON_BIT, 94 clear_bit(RX_FLOW_ON_BIT,
95 (void *) &cf_sk->flow_state); 95 (void *) &cf_sk->flow_state);
96 } 96 }
97 97
98 static void set_rx_flow_on(struct caifsock *cf_sk) 98 static void set_rx_flow_on(struct caifsock *cf_sk)
99 { 99 {
100 set_bit(RX_FLOW_ON_BIT, 100 set_bit(RX_FLOW_ON_BIT,
101 (void *) &cf_sk->flow_state); 101 (void *) &cf_sk->flow_state);
102 } 102 }
103 103
104 static void set_tx_flow_off(struct caifsock *cf_sk) 104 static void set_tx_flow_off(struct caifsock *cf_sk)
105 { 105 {
106 clear_bit(TX_FLOW_ON_BIT, 106 clear_bit(TX_FLOW_ON_BIT,
107 (void *) &cf_sk->flow_state); 107 (void *) &cf_sk->flow_state);
108 } 108 }
109 109
110 static void set_tx_flow_on(struct caifsock *cf_sk) 110 static void set_tx_flow_on(struct caifsock *cf_sk)
111 { 111 {
112 set_bit(TX_FLOW_ON_BIT, 112 set_bit(TX_FLOW_ON_BIT,
113 (void *) &cf_sk->flow_state); 113 (void *) &cf_sk->flow_state);
114 } 114 }
115 115
116 static void caif_read_lock(struct sock *sk) 116 static void caif_read_lock(struct sock *sk)
117 { 117 {
118 struct caifsock *cf_sk; 118 struct caifsock *cf_sk;
119 cf_sk = container_of(sk, struct caifsock, sk); 119 cf_sk = container_of(sk, struct caifsock, sk);
120 mutex_lock(&cf_sk->readlock); 120 mutex_lock(&cf_sk->readlock);
121 } 121 }
122 122
123 static void caif_read_unlock(struct sock *sk) 123 static void caif_read_unlock(struct sock *sk)
124 { 124 {
125 struct caifsock *cf_sk; 125 struct caifsock *cf_sk;
126 cf_sk = container_of(sk, struct caifsock, sk); 126 cf_sk = container_of(sk, struct caifsock, sk);
127 mutex_unlock(&cf_sk->readlock); 127 mutex_unlock(&cf_sk->readlock);
128 } 128 }
129 129
130 static int sk_rcvbuf_lowwater(struct caifsock *cf_sk) 130 static int sk_rcvbuf_lowwater(struct caifsock *cf_sk)
131 { 131 {
132 /* A quarter of full buffer is used a low water mark */ 132 /* A quarter of full buffer is used a low water mark */
133 return cf_sk->sk.sk_rcvbuf / 4; 133 return cf_sk->sk.sk_rcvbuf / 4;
134 } 134 }
135 135
136 static void caif_flow_ctrl(struct sock *sk, int mode) 136 static void caif_flow_ctrl(struct sock *sk, int mode)
137 { 137 {
138 struct caifsock *cf_sk; 138 struct caifsock *cf_sk;
139 cf_sk = container_of(sk, struct caifsock, sk); 139 cf_sk = container_of(sk, struct caifsock, sk);
140 if (cf_sk->layer.dn && cf_sk->layer.dn->modemcmd) 140 if (cf_sk->layer.dn && cf_sk->layer.dn->modemcmd)
141 cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, mode); 141 cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, mode);
142 } 142 }
143 143
144 /* 144 /*
145 * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are 145 * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are
146 * not dropped, but CAIF is sending flow off instead. 146 * not dropped, but CAIF is sending flow off instead.
147 */ 147 */
148 static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 148 static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
149 { 149 {
150 int err; 150 int err;
151 int skb_len; 151 int skb_len;
152 unsigned long flags; 152 unsigned long flags;
153 struct sk_buff_head *list = &sk->sk_receive_queue; 153 struct sk_buff_head *list = &sk->sk_receive_queue;
154 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); 154 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
155 155
156 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 156 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
157 (unsigned)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) { 157 (unsigned)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
158 pr_debug("sending flow OFF (queue len = %d %d)\n", 158 pr_debug("sending flow OFF (queue len = %d %d)\n",
159 atomic_read(&cf_sk->sk.sk_rmem_alloc), 159 atomic_read(&cf_sk->sk.sk_rmem_alloc),
160 sk_rcvbuf_lowwater(cf_sk)); 160 sk_rcvbuf_lowwater(cf_sk));
161 set_rx_flow_off(cf_sk); 161 set_rx_flow_off(cf_sk);
162 dbfs_atomic_inc(&cnt.num_rx_flow_off); 162 dbfs_atomic_inc(&cnt.num_rx_flow_off);
163 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); 163 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
164 } 164 }
165 165
166 err = sk_filter(sk, skb); 166 err = sk_filter(sk, skb);
167 if (err) 167 if (err)
168 return err; 168 return err;
169 if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) { 169 if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) {
170 set_rx_flow_off(cf_sk); 170 set_rx_flow_off(cf_sk);
171 pr_debug("sending flow OFF due to rmem_schedule\n"); 171 pr_debug("sending flow OFF due to rmem_schedule\n");
172 dbfs_atomic_inc(&cnt.num_rx_flow_off); 172 dbfs_atomic_inc(&cnt.num_rx_flow_off);
173 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); 173 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
174 } 174 }
175 skb->dev = NULL; 175 skb->dev = NULL;
176 skb_set_owner_r(skb, sk); 176 skb_set_owner_r(skb, sk);
177 /* Cache the SKB length before we tack it onto the receive 177 /* Cache the SKB length before we tack it onto the receive
178 * queue. Once it is added it no longer belongs to us and 178 * queue. Once it is added it no longer belongs to us and
179 * may be freed by other threads of control pulling packets 179 * may be freed by other threads of control pulling packets
180 * from the queue. 180 * from the queue.
181 */ 181 */
182 skb_len = skb->len; 182 skb_len = skb->len;
183 spin_lock_irqsave(&list->lock, flags); 183 spin_lock_irqsave(&list->lock, flags);
184 if (!sock_flag(sk, SOCK_DEAD)) 184 if (!sock_flag(sk, SOCK_DEAD))
185 __skb_queue_tail(list, skb); 185 __skb_queue_tail(list, skb);
186 spin_unlock_irqrestore(&list->lock, flags); 186 spin_unlock_irqrestore(&list->lock, flags);
187 187
188 if (!sock_flag(sk, SOCK_DEAD)) 188 if (!sock_flag(sk, SOCK_DEAD))
189 sk->sk_data_ready(sk, skb_len); 189 sk->sk_data_ready(sk, skb_len);
190 else 190 else
191 kfree_skb(skb); 191 kfree_skb(skb);
192 return 0; 192 return 0;
193 } 193 }
194 194
195 /* Packet Receive Callback function called from CAIF Stack */ 195 /* Packet Receive Callback function called from CAIF Stack */
196 static int caif_sktrecv_cb(struct cflayer *layr, struct cfpkt *pkt) 196 static int caif_sktrecv_cb(struct cflayer *layr, struct cfpkt *pkt)
197 { 197 {
198 struct caifsock *cf_sk; 198 struct caifsock *cf_sk;
199 struct sk_buff *skb; 199 struct sk_buff *skb;
200 200
201 cf_sk = container_of(layr, struct caifsock, layer); 201 cf_sk = container_of(layr, struct caifsock, layer);
202 skb = cfpkt_tonative(pkt); 202 skb = cfpkt_tonative(pkt);
203 203
204 if (unlikely(cf_sk->sk.sk_state != CAIF_CONNECTED)) { 204 if (unlikely(cf_sk->sk.sk_state != CAIF_CONNECTED)) {
205 cfpkt_destroy(pkt); 205 cfpkt_destroy(pkt);
206 return 0; 206 return 0;
207 } 207 }
208 caif_queue_rcv_skb(&cf_sk->sk, skb); 208 caif_queue_rcv_skb(&cf_sk->sk, skb);
209 return 0; 209 return 0;
210 } 210 }
211 211
212 static void cfsk_hold(struct cflayer *layr) 212 static void cfsk_hold(struct cflayer *layr)
213 { 213 {
214 struct caifsock *cf_sk = container_of(layr, struct caifsock, layer); 214 struct caifsock *cf_sk = container_of(layr, struct caifsock, layer);
215 sock_hold(&cf_sk->sk); 215 sock_hold(&cf_sk->sk);
216 } 216 }
217 217
218 static void cfsk_put(struct cflayer *layr) 218 static void cfsk_put(struct cflayer *layr)
219 { 219 {
220 struct caifsock *cf_sk = container_of(layr, struct caifsock, layer); 220 struct caifsock *cf_sk = container_of(layr, struct caifsock, layer);
221 sock_put(&cf_sk->sk); 221 sock_put(&cf_sk->sk);
222 } 222 }
223 223
224 /* Packet Control Callback function called from CAIF */ 224 /* Packet Control Callback function called from CAIF */
225 static void caif_ctrl_cb(struct cflayer *layr, 225 static void caif_ctrl_cb(struct cflayer *layr,
226 enum caif_ctrlcmd flow, 226 enum caif_ctrlcmd flow,
227 int phyid) 227 int phyid)
228 { 228 {
229 struct caifsock *cf_sk = container_of(layr, struct caifsock, layer); 229 struct caifsock *cf_sk = container_of(layr, struct caifsock, layer);
230 switch (flow) { 230 switch (flow) {
231 case CAIF_CTRLCMD_FLOW_ON_IND: 231 case CAIF_CTRLCMD_FLOW_ON_IND:
232 /* OK from modem to start sending again */ 232 /* OK from modem to start sending again */
233 dbfs_atomic_inc(&cnt.num_tx_flow_on_ind); 233 dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
234 set_tx_flow_on(cf_sk); 234 set_tx_flow_on(cf_sk);
235 cf_sk->sk.sk_state_change(&cf_sk->sk); 235 cf_sk->sk.sk_state_change(&cf_sk->sk);
236 break; 236 break;
237 237
238 case CAIF_CTRLCMD_FLOW_OFF_IND: 238 case CAIF_CTRLCMD_FLOW_OFF_IND:
239 /* Modem asks us to shut up */ 239 /* Modem asks us to shut up */
240 dbfs_atomic_inc(&cnt.num_tx_flow_off_ind); 240 dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
241 set_tx_flow_off(cf_sk); 241 set_tx_flow_off(cf_sk);
242 cf_sk->sk.sk_state_change(&cf_sk->sk); 242 cf_sk->sk.sk_state_change(&cf_sk->sk);
243 break; 243 break;
244 244
245 case CAIF_CTRLCMD_INIT_RSP: 245 case CAIF_CTRLCMD_INIT_RSP:
246 /* We're now connected */ 246 /* We're now connected */
247 caif_client_register_refcnt(&cf_sk->layer, 247 caif_client_register_refcnt(&cf_sk->layer,
248 cfsk_hold, cfsk_put); 248 cfsk_hold, cfsk_put);
249 dbfs_atomic_inc(&cnt.num_connect_resp); 249 dbfs_atomic_inc(&cnt.num_connect_resp);
250 cf_sk->sk.sk_state = CAIF_CONNECTED; 250 cf_sk->sk.sk_state = CAIF_CONNECTED;
251 set_tx_flow_on(cf_sk); 251 set_tx_flow_on(cf_sk);
252 cf_sk->sk.sk_state_change(&cf_sk->sk); 252 cf_sk->sk.sk_state_change(&cf_sk->sk);
253 break; 253 break;
254 254
255 case CAIF_CTRLCMD_DEINIT_RSP: 255 case CAIF_CTRLCMD_DEINIT_RSP:
256 /* We're now disconnected */ 256 /* We're now disconnected */
257 cf_sk->sk.sk_state = CAIF_DISCONNECTED; 257 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
258 cf_sk->sk.sk_state_change(&cf_sk->sk); 258 cf_sk->sk.sk_state_change(&cf_sk->sk);
259 break; 259 break;
260 260
261 case CAIF_CTRLCMD_INIT_FAIL_RSP: 261 case CAIF_CTRLCMD_INIT_FAIL_RSP:
262 /* Connect request failed */ 262 /* Connect request failed */
263 dbfs_atomic_inc(&cnt.num_connect_fail_resp); 263 dbfs_atomic_inc(&cnt.num_connect_fail_resp);
264 cf_sk->sk.sk_err = ECONNREFUSED; 264 cf_sk->sk.sk_err = ECONNREFUSED;
265 cf_sk->sk.sk_state = CAIF_DISCONNECTED; 265 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
266 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK; 266 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
267 /* 267 /*
268 * Socket "standards" seems to require POLLOUT to 268 * Socket "standards" seems to require POLLOUT to
269 * be set at connect failure. 269 * be set at connect failure.
270 */ 270 */
271 set_tx_flow_on(cf_sk); 271 set_tx_flow_on(cf_sk);
272 cf_sk->sk.sk_state_change(&cf_sk->sk); 272 cf_sk->sk.sk_state_change(&cf_sk->sk);
273 break; 273 break;
274 274
275 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND: 275 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
276 /* Modem has closed this connection, or device is down. */ 276 /* Modem has closed this connection, or device is down. */
277 dbfs_atomic_inc(&cnt.num_remote_shutdown_ind); 277 dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
278 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK; 278 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
279 cf_sk->sk.sk_err = ECONNRESET; 279 cf_sk->sk.sk_err = ECONNRESET;
280 set_rx_flow_on(cf_sk); 280 set_rx_flow_on(cf_sk);
281 cf_sk->sk.sk_error_report(&cf_sk->sk); 281 cf_sk->sk.sk_error_report(&cf_sk->sk);
282 break; 282 break;
283 283
284 default: 284 default:
285 pr_debug("Unexpected flow command %d\n", flow); 285 pr_debug("Unexpected flow command %d\n", flow);
286 } 286 }
287 } 287 }
288 288
289 static void caif_check_flow_release(struct sock *sk) 289 static void caif_check_flow_release(struct sock *sk)
290 { 290 {
291 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); 291 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
292 292
293 if (rx_flow_is_on(cf_sk)) 293 if (rx_flow_is_on(cf_sk))
294 return; 294 return;
295 295
296 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) { 296 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
297 dbfs_atomic_inc(&cnt.num_rx_flow_on); 297 dbfs_atomic_inc(&cnt.num_rx_flow_on);
298 set_rx_flow_on(cf_sk); 298 set_rx_flow_on(cf_sk);
299 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ); 299 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
300 } 300 }
301 } 301 }
302 302
303 /* 303 /*
304 * Copied from unix_dgram_recvmsg, but removed credit checks, 304 * Copied from unix_dgram_recvmsg, but removed credit checks,
305 * changed locking, address handling and added MSG_TRUNC. 305 * changed locking, address handling and added MSG_TRUNC.
306 */ 306 */
307 static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, 307 static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
308 struct msghdr *m, size_t len, int flags) 308 struct msghdr *m, size_t len, int flags)
309 309
310 { 310 {
311 struct sock *sk = sock->sk; 311 struct sock *sk = sock->sk;
312 struct sk_buff *skb; 312 struct sk_buff *skb;
313 int ret; 313 int ret;
314 int copylen; 314 int copylen;
315 315
316 ret = -EOPNOTSUPP; 316 ret = -EOPNOTSUPP;
317 if (m->msg_flags&MSG_OOB) 317 if (m->msg_flags&MSG_OOB)
318 goto read_error; 318 goto read_error;
319 319
320 skb = skb_recv_datagram(sk, flags, 0 , &ret); 320 skb = skb_recv_datagram(sk, flags, 0 , &ret);
321 if (!skb) 321 if (!skb)
322 goto read_error; 322 goto read_error;
323 copylen = skb->len; 323 copylen = skb->len;
324 if (len < copylen) { 324 if (len < copylen) {
325 m->msg_flags |= MSG_TRUNC; 325 m->msg_flags |= MSG_TRUNC;
326 copylen = len; 326 copylen = len;
327 } 327 }
328 328
329 ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, copylen); 329 ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, copylen);
330 if (ret) 330 if (ret)
331 goto out_free; 331 goto out_free;
332 332
333 ret = (flags & MSG_TRUNC) ? skb->len : copylen; 333 ret = (flags & MSG_TRUNC) ? skb->len : copylen;
334 out_free: 334 out_free:
335 skb_free_datagram(sk, skb); 335 skb_free_datagram(sk, skb);
336 caif_check_flow_release(sk); 336 caif_check_flow_release(sk);
337 return ret; 337 return ret;
338 338
339 read_error: 339 read_error:
340 return ret; 340 return ret;
341 } 341 }
342 342
343 343
344 /* Copied from unix_stream_wait_data, identical except for lock call. */ 344 /* Copied from unix_stream_wait_data, identical except for lock call. */
345 static long caif_stream_data_wait(struct sock *sk, long timeo) 345 static long caif_stream_data_wait(struct sock *sk, long timeo)
346 { 346 {
347 DEFINE_WAIT(wait); 347 DEFINE_WAIT(wait);
348 lock_sock(sk); 348 lock_sock(sk);
349 349
350 for (;;) { 350 for (;;) {
351 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 351 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
352 352
353 if (!skb_queue_empty(&sk->sk_receive_queue) || 353 if (!skb_queue_empty(&sk->sk_receive_queue) ||
354 sk->sk_err || 354 sk->sk_err ||
355 sk->sk_state != CAIF_CONNECTED || 355 sk->sk_state != CAIF_CONNECTED ||
356 sock_flag(sk, SOCK_DEAD) || 356 sock_flag(sk, SOCK_DEAD) ||
357 (sk->sk_shutdown & RCV_SHUTDOWN) || 357 (sk->sk_shutdown & RCV_SHUTDOWN) ||
358 signal_pending(current) || 358 signal_pending(current) ||
359 !timeo) 359 !timeo)
360 break; 360 break;
361 361
362 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 362 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
363 release_sock(sk); 363 release_sock(sk);
364 timeo = schedule_timeout(timeo); 364 timeo = schedule_timeout(timeo);
365 lock_sock(sk); 365 lock_sock(sk);
366 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 366 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
367 } 367 }
368 368
369 finish_wait(sk_sleep(sk), &wait); 369 finish_wait(sk_sleep(sk), &wait);
370 release_sock(sk); 370 release_sock(sk);
371 return timeo; 371 return timeo;
372 } 372 }
373 373
374 374
375 /* 375 /*
376 * Copied from unix_stream_recvmsg, but removed credit checks, 376 * Copied from unix_stream_recvmsg, but removed credit checks,
377 * changed locking calls, changed address handling. 377 * changed locking calls, changed address handling.
378 */ 378 */
379 static int caif_stream_recvmsg(struct kiocb *iocb, struct socket *sock, 379 static int caif_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
380 struct msghdr *msg, size_t size, 380 struct msghdr *msg, size_t size,
381 int flags) 381 int flags)
382 { 382 {
383 struct sock *sk = sock->sk; 383 struct sock *sk = sock->sk;
384 int copied = 0; 384 int copied = 0;
385 int target; 385 int target;
386 int err = 0; 386 int err = 0;
387 long timeo; 387 long timeo;
388 388
389 err = -EOPNOTSUPP; 389 err = -EOPNOTSUPP;
390 if (flags&MSG_OOB) 390 if (flags&MSG_OOB)
391 goto out; 391 goto out;
392 392
393 msg->msg_namelen = 0; 393 msg->msg_namelen = 0;
394 394
395 /* 395 /*
396 * Lock the socket to prevent queue disordering 396 * Lock the socket to prevent queue disordering
397 * while sleeps in memcpy_tomsg 397 * while sleeps in memcpy_tomsg
398 */ 398 */
399 err = -EAGAIN; 399 err = -EAGAIN;
400 if (sk->sk_state == CAIF_CONNECTING) 400 if (sk->sk_state == CAIF_CONNECTING)
401 goto out; 401 goto out;
402 402
403 caif_read_lock(sk); 403 caif_read_lock(sk);
404 target = sock_rcvlowat(sk, flags&MSG_WAITALL, size); 404 target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
405 timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT); 405 timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
406 406
407 do { 407 do {
408 int chunk; 408 int chunk;
409 struct sk_buff *skb; 409 struct sk_buff *skb;
410 410
411 lock_sock(sk); 411 lock_sock(sk);
412 skb = skb_dequeue(&sk->sk_receive_queue); 412 skb = skb_dequeue(&sk->sk_receive_queue);
413 caif_check_flow_release(sk); 413 caif_check_flow_release(sk);
414 414
415 if (skb == NULL) { 415 if (skb == NULL) {
416 if (copied >= target) 416 if (copied >= target)
417 goto unlock; 417 goto unlock;
418 /* 418 /*
419 * POSIX 1003.1g mandates this order. 419 * POSIX 1003.1g mandates this order.
420 */ 420 */
421 err = sock_error(sk); 421 err = sock_error(sk);
422 if (err) 422 if (err)
423 goto unlock; 423 goto unlock;
424 err = -ECONNRESET; 424 err = -ECONNRESET;
425 if (sk->sk_shutdown & RCV_SHUTDOWN) 425 if (sk->sk_shutdown & RCV_SHUTDOWN)
426 goto unlock; 426 goto unlock;
427 427
428 err = -EPIPE; 428 err = -EPIPE;
429 if (sk->sk_state != CAIF_CONNECTED) 429 if (sk->sk_state != CAIF_CONNECTED)
430 goto unlock; 430 goto unlock;
431 if (sock_flag(sk, SOCK_DEAD)) 431 if (sock_flag(sk, SOCK_DEAD))
432 goto unlock; 432 goto unlock;
433 433
434 release_sock(sk); 434 release_sock(sk);
435 435
436 err = -EAGAIN; 436 err = -EAGAIN;
437 if (!timeo) 437 if (!timeo)
438 break; 438 break;
439 439
440 caif_read_unlock(sk); 440 caif_read_unlock(sk);
441 441
442 timeo = caif_stream_data_wait(sk, timeo); 442 timeo = caif_stream_data_wait(sk, timeo);
443 443
444 if (signal_pending(current)) { 444 if (signal_pending(current)) {
445 err = sock_intr_errno(timeo); 445 err = sock_intr_errno(timeo);
446 goto out; 446 goto out;
447 } 447 }
448 caif_read_lock(sk); 448 caif_read_lock(sk);
449 continue; 449 continue;
450 unlock: 450 unlock:
451 release_sock(sk); 451 release_sock(sk);
452 break; 452 break;
453 } 453 }
454 release_sock(sk); 454 release_sock(sk);
455 chunk = min_t(unsigned int, skb->len, size); 455 chunk = min_t(unsigned int, skb->len, size);
456 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) { 456 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
457 skb_queue_head(&sk->sk_receive_queue, skb); 457 skb_queue_head(&sk->sk_receive_queue, skb);
458 if (copied == 0) 458 if (copied == 0)
459 copied = -EFAULT; 459 copied = -EFAULT;
460 break; 460 break;
461 } 461 }
462 copied += chunk; 462 copied += chunk;
463 size -= chunk; 463 size -= chunk;
464 464
465 /* Mark read part of skb as used */ 465 /* Mark read part of skb as used */
466 if (!(flags & MSG_PEEK)) { 466 if (!(flags & MSG_PEEK)) {
467 skb_pull(skb, chunk); 467 skb_pull(skb, chunk);
468 468
469 /* put the skb back if we didn't use it up. */ 469 /* put the skb back if we didn't use it up. */
470 if (skb->len) { 470 if (skb->len) {
471 skb_queue_head(&sk->sk_receive_queue, skb); 471 skb_queue_head(&sk->sk_receive_queue, skb);
472 break; 472 break;
473 } 473 }
474 kfree_skb(skb); 474 kfree_skb(skb);
475 475
476 } else { 476 } else {
477 /* 477 /*
478 * It is questionable, see note in unix_dgram_recvmsg. 478 * It is questionable, see note in unix_dgram_recvmsg.
479 */ 479 */
480 /* put message back and return */ 480 /* put message back and return */
481 skb_queue_head(&sk->sk_receive_queue, skb); 481 skb_queue_head(&sk->sk_receive_queue, skb);
482 break; 482 break;
483 } 483 }
484 } while (size); 484 } while (size);
485 caif_read_unlock(sk); 485 caif_read_unlock(sk);
486 486
487 out: 487 out:
488 return copied ? : err; 488 return copied ? : err;
489 } 489 }
490 490
491 /* 491 /*
492 * Copied from sock.c:sock_wait_for_wmem, but change to wait for 492 * Copied from sock.c:sock_wait_for_wmem, but change to wait for
493 * CAIF flow-on and sock_writable. 493 * CAIF flow-on and sock_writable.
494 */ 494 */
495 static long caif_wait_for_flow_on(struct caifsock *cf_sk, 495 static long caif_wait_for_flow_on(struct caifsock *cf_sk,
496 int wait_writeable, long timeo, int *err) 496 int wait_writeable, long timeo, int *err)
497 { 497 {
498 struct sock *sk = &cf_sk->sk; 498 struct sock *sk = &cf_sk->sk;
499 DEFINE_WAIT(wait); 499 DEFINE_WAIT(wait);
500 for (;;) { 500 for (;;) {
501 *err = 0; 501 *err = 0;
502 if (tx_flow_is_on(cf_sk) && 502 if (tx_flow_is_on(cf_sk) &&
503 (!wait_writeable || sock_writeable(&cf_sk->sk))) 503 (!wait_writeable || sock_writeable(&cf_sk->sk)))
504 break; 504 break;
505 *err = -ETIMEDOUT; 505 *err = -ETIMEDOUT;
506 if (!timeo) 506 if (!timeo)
507 break; 507 break;
508 *err = -ERESTARTSYS; 508 *err = -ERESTARTSYS;
509 if (signal_pending(current)) 509 if (signal_pending(current))
510 break; 510 break;
511 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 511 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
512 *err = -ECONNRESET; 512 *err = -ECONNRESET;
513 if (sk->sk_shutdown & SHUTDOWN_MASK) 513 if (sk->sk_shutdown & SHUTDOWN_MASK)
514 break; 514 break;
515 *err = -sk->sk_err; 515 *err = -sk->sk_err;
516 if (sk->sk_err) 516 if (sk->sk_err)
517 break; 517 break;
518 *err = -EPIPE; 518 *err = -EPIPE;
519 if (cf_sk->sk.sk_state != CAIF_CONNECTED) 519 if (cf_sk->sk.sk_state != CAIF_CONNECTED)
520 break; 520 break;
521 timeo = schedule_timeout(timeo); 521 timeo = schedule_timeout(timeo);
522 } 522 }
523 finish_wait(sk_sleep(sk), &wait); 523 finish_wait(sk_sleep(sk), &wait);
524 return timeo; 524 return timeo;
525 } 525 }
526 526
527 /* 527 /*
528 * Transmit a SKB. The device may temporarily request re-transmission 528 * Transmit a SKB. The device may temporarily request re-transmission
529 * by returning EAGAIN. 529 * by returning EAGAIN.
530 */ 530 */
531 static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk, 531 static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk,
532 int noblock, long timeo) 532 int noblock, long timeo)
533 { 533 {
534 struct cfpkt *pkt; 534 struct cfpkt *pkt;
535 535
536 pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb); 536 pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb);
537 memset(cfpkt_info(pkt), 0, sizeof(struct caif_payload_info)); 537 memset(cfpkt_info(pkt), 0, sizeof(struct caif_payload_info));
538 538
539 if (cf_sk->layer.dn == NULL) 539 if (cf_sk->layer.dn == NULL)
540 return -EINVAL; 540 return -EINVAL;
541 541
542 return cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt); 542 return cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt);
543 } 543 }
544 544
545 /* Copied from af_unix:unix_dgram_sendmsg, and adapted to CAIF */ 545 /* Copied from af_unix:unix_dgram_sendmsg, and adapted to CAIF */
546 static int caif_seqpkt_sendmsg(struct kiocb *kiocb, struct socket *sock, 546 static int caif_seqpkt_sendmsg(struct kiocb *kiocb, struct socket *sock,
547 struct msghdr *msg, size_t len) 547 struct msghdr *msg, size_t len)
548 { 548 {
549 struct sock *sk = sock->sk; 549 struct sock *sk = sock->sk;
550 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); 550 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
551 int buffer_size; 551 int buffer_size;
552 int ret = 0; 552 int ret = 0;
553 struct sk_buff *skb = NULL; 553 struct sk_buff *skb = NULL;
554 int noblock; 554 int noblock;
555 long timeo; 555 long timeo;
556 caif_assert(cf_sk); 556 caif_assert(cf_sk);
557 ret = sock_error(sk); 557 ret = sock_error(sk);
558 if (ret) 558 if (ret)
559 goto err; 559 goto err;
560 560
561 ret = -EOPNOTSUPP; 561 ret = -EOPNOTSUPP;
562 if (msg->msg_flags&MSG_OOB) 562 if (msg->msg_flags&MSG_OOB)
563 goto err; 563 goto err;
564 564
565 ret = -EOPNOTSUPP; 565 ret = -EOPNOTSUPP;
566 if (msg->msg_namelen) 566 if (msg->msg_namelen)
567 goto err; 567 goto err;
568 568
569 ret = -EINVAL; 569 ret = -EINVAL;
570 if (unlikely(msg->msg_iov->iov_base == NULL)) 570 if (unlikely(msg->msg_iov->iov_base == NULL))
571 goto err; 571 goto err;
572 noblock = msg->msg_flags & MSG_DONTWAIT; 572 noblock = msg->msg_flags & MSG_DONTWAIT;
573 573
574 timeo = sock_sndtimeo(sk, noblock); 574 timeo = sock_sndtimeo(sk, noblock);
575 timeo = caif_wait_for_flow_on(container_of(sk, struct caifsock, sk), 575 timeo = caif_wait_for_flow_on(container_of(sk, struct caifsock, sk),
576 1, timeo, &ret); 576 1, timeo, &ret);
577 577
578 if (ret) 578 if (ret)
579 goto err; 579 goto err;
580 ret = -EPIPE; 580 ret = -EPIPE;
581 if (cf_sk->sk.sk_state != CAIF_CONNECTED || 581 if (cf_sk->sk.sk_state != CAIF_CONNECTED ||
582 sock_flag(sk, SOCK_DEAD) || 582 sock_flag(sk, SOCK_DEAD) ||
583 (sk->sk_shutdown & RCV_SHUTDOWN)) 583 (sk->sk_shutdown & RCV_SHUTDOWN))
584 goto err; 584 goto err;
585 585
586 /* Error if trying to write more than maximum frame size. */ 586 /* Error if trying to write more than maximum frame size. */
587 ret = -EMSGSIZE; 587 ret = -EMSGSIZE;
588 if (len > cf_sk->maxframe && cf_sk->sk.sk_protocol != CAIFPROTO_RFM) 588 if (len > cf_sk->maxframe && cf_sk->sk.sk_protocol != CAIFPROTO_RFM)
589 goto err; 589 goto err;
590 590
591 buffer_size = len + cf_sk->headroom + cf_sk->tailroom; 591 buffer_size = len + cf_sk->headroom + cf_sk->tailroom;
592 592
593 ret = -ENOMEM; 593 ret = -ENOMEM;
594 skb = sock_alloc_send_skb(sk, buffer_size, noblock, &ret); 594 skb = sock_alloc_send_skb(sk, buffer_size, noblock, &ret);
595 595
596 if (!skb || skb_tailroom(skb) < buffer_size) 596 if (!skb || skb_tailroom(skb) < buffer_size)
597 goto err; 597 goto err;
598 598
599 skb_reserve(skb, cf_sk->headroom); 599 skb_reserve(skb, cf_sk->headroom);
600 600
601 ret = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); 601 ret = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
602 602
603 if (ret) 603 if (ret)
604 goto err; 604 goto err;
605 ret = transmit_skb(skb, cf_sk, noblock, timeo); 605 ret = transmit_skb(skb, cf_sk, noblock, timeo);
606 if (ret < 0) 606 if (ret < 0)
607 goto err; 607 /* skb is already freed */
608 return ret;
609
608 return len; 610 return len;
609 err: 611 err:
610 kfree_skb(skb); 612 kfree_skb(skb);
611 return ret; 613 return ret;
612 } 614 }
613 615
614 /* 616 /*
615 * Copied from unix_stream_sendmsg and adapted to CAIF: 617 * Copied from unix_stream_sendmsg and adapted to CAIF:
616 * Changed removed permission handling and added waiting for flow on 618 * Changed removed permission handling and added waiting for flow on
617 * and other minor adaptations. 619 * and other minor adaptations.
618 */ 620 */
619 static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, 621 static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
620 struct msghdr *msg, size_t len) 622 struct msghdr *msg, size_t len)
621 { 623 {
622 struct sock *sk = sock->sk; 624 struct sock *sk = sock->sk;
623 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); 625 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
624 int err, size; 626 int err, size;
625 struct sk_buff *skb; 627 struct sk_buff *skb;
626 int sent = 0; 628 int sent = 0;
627 long timeo; 629 long timeo;
628 630
629 err = -EOPNOTSUPP; 631 err = -EOPNOTSUPP;
630 if (unlikely(msg->msg_flags&MSG_OOB)) 632 if (unlikely(msg->msg_flags&MSG_OOB))
631 goto out_err; 633 goto out_err;
632 634
633 if (unlikely(msg->msg_namelen)) 635 if (unlikely(msg->msg_namelen))
634 goto out_err; 636 goto out_err;
635 637
636 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 638 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
637 timeo = caif_wait_for_flow_on(cf_sk, 1, timeo, &err); 639 timeo = caif_wait_for_flow_on(cf_sk, 1, timeo, &err);
638 640
639 if (unlikely(sk->sk_shutdown & SEND_SHUTDOWN)) 641 if (unlikely(sk->sk_shutdown & SEND_SHUTDOWN))
640 goto pipe_err; 642 goto pipe_err;
641 643
642 while (sent < len) { 644 while (sent < len) {
643 645
644 size = len-sent; 646 size = len-sent;
645 647
646 if (size > cf_sk->maxframe) 648 if (size > cf_sk->maxframe)
647 size = cf_sk->maxframe; 649 size = cf_sk->maxframe;
648 650
649 /* If size is more than half of sndbuf, chop up message */ 651 /* If size is more than half of sndbuf, chop up message */
650 if (size > ((sk->sk_sndbuf >> 1) - 64)) 652 if (size > ((sk->sk_sndbuf >> 1) - 64))
651 size = (sk->sk_sndbuf >> 1) - 64; 653 size = (sk->sk_sndbuf >> 1) - 64;
652 654
653 if (size > SKB_MAX_ALLOC) 655 if (size > SKB_MAX_ALLOC)
654 size = SKB_MAX_ALLOC; 656 size = SKB_MAX_ALLOC;
655 657
656 skb = sock_alloc_send_skb(sk, 658 skb = sock_alloc_send_skb(sk,
657 size + cf_sk->headroom + 659 size + cf_sk->headroom +
658 cf_sk->tailroom, 660 cf_sk->tailroom,
659 msg->msg_flags&MSG_DONTWAIT, 661 msg->msg_flags&MSG_DONTWAIT,
660 &err); 662 &err);
661 if (skb == NULL) 663 if (skb == NULL)
662 goto out_err; 664 goto out_err;
663 665
664 skb_reserve(skb, cf_sk->headroom); 666 skb_reserve(skb, cf_sk->headroom);
665 /* 667 /*
666 * If you pass two values to the sock_alloc_send_skb 668 * If you pass two values to the sock_alloc_send_skb
667 * it tries to grab the large buffer with GFP_NOFS 669 * it tries to grab the large buffer with GFP_NOFS
668 * (which can fail easily), and if it fails grab the 670 * (which can fail easily), and if it fails grab the
669 * fallback size buffer which is under a page and will 671 * fallback size buffer which is under a page and will
670 * succeed. [Alan] 672 * succeed. [Alan]
671 */ 673 */
672 size = min_t(int, size, skb_tailroom(skb)); 674 size = min_t(int, size, skb_tailroom(skb));
673 675
674 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); 676 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
675 if (err) { 677 if (err) {
676 kfree_skb(skb); 678 kfree_skb(skb);
677 goto out_err; 679 goto out_err;
678 } 680 }
679 err = transmit_skb(skb, cf_sk, 681 err = transmit_skb(skb, cf_sk,
680 msg->msg_flags&MSG_DONTWAIT, timeo); 682 msg->msg_flags&MSG_DONTWAIT, timeo);
681 if (err < 0) { 683 if (err < 0) {
682 kfree_skb(skb); 684 kfree_skb(skb);
683 goto pipe_err; 685 goto pipe_err;
684 } 686 }
685 sent += size; 687 sent += size;
686 } 688 }
687 689
688 return sent; 690 return sent;
689 691
690 pipe_err: 692 pipe_err:
691 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL)) 693 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
692 send_sig(SIGPIPE, current, 0); 694 send_sig(SIGPIPE, current, 0);
693 err = -EPIPE; 695 err = -EPIPE;
694 out_err: 696 out_err:
695 return sent ? : err; 697 return sent ? : err;
696 } 698 }
697 699
698 static int setsockopt(struct socket *sock, 700 static int setsockopt(struct socket *sock,
699 int lvl, int opt, char __user *ov, unsigned int ol) 701 int lvl, int opt, char __user *ov, unsigned int ol)
700 { 702 {
701 struct sock *sk = sock->sk; 703 struct sock *sk = sock->sk;
702 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); 704 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
703 int linksel; 705 int linksel;
704 706
705 if (cf_sk->sk.sk_socket->state != SS_UNCONNECTED) 707 if (cf_sk->sk.sk_socket->state != SS_UNCONNECTED)
706 return -ENOPROTOOPT; 708 return -ENOPROTOOPT;
707 709
708 switch (opt) { 710 switch (opt) {
709 case CAIFSO_LINK_SELECT: 711 case CAIFSO_LINK_SELECT:
710 if (ol < sizeof(int)) 712 if (ol < sizeof(int))
711 return -EINVAL; 713 return -EINVAL;
712 if (lvl != SOL_CAIF) 714 if (lvl != SOL_CAIF)
713 goto bad_sol; 715 goto bad_sol;
714 if (copy_from_user(&linksel, ov, sizeof(int))) 716 if (copy_from_user(&linksel, ov, sizeof(int)))
715 return -EINVAL; 717 return -EINVAL;
716 lock_sock(&(cf_sk->sk)); 718 lock_sock(&(cf_sk->sk));
717 cf_sk->conn_req.link_selector = linksel; 719 cf_sk->conn_req.link_selector = linksel;
718 release_sock(&cf_sk->sk); 720 release_sock(&cf_sk->sk);
719 return 0; 721 return 0;
720 722
721 case CAIFSO_REQ_PARAM: 723 case CAIFSO_REQ_PARAM:
722 if (lvl != SOL_CAIF) 724 if (lvl != SOL_CAIF)
723 goto bad_sol; 725 goto bad_sol;
724 if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL) 726 if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL)
725 return -ENOPROTOOPT; 727 return -ENOPROTOOPT;
726 lock_sock(&(cf_sk->sk)); 728 lock_sock(&(cf_sk->sk));
727 if (ol > sizeof(cf_sk->conn_req.param.data) || 729 if (ol > sizeof(cf_sk->conn_req.param.data) ||
728 copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) { 730 copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) {
729 release_sock(&cf_sk->sk); 731 release_sock(&cf_sk->sk);
730 return -EINVAL; 732 return -EINVAL;
731 } 733 }
732 cf_sk->conn_req.param.size = ol; 734 cf_sk->conn_req.param.size = ol;
733 release_sock(&cf_sk->sk); 735 release_sock(&cf_sk->sk);
734 return 0; 736 return 0;
735 737
736 default: 738 default:
737 return -ENOPROTOOPT; 739 return -ENOPROTOOPT;
738 } 740 }
739 741
740 return 0; 742 return 0;
741 bad_sol: 743 bad_sol:
742 return -ENOPROTOOPT; 744 return -ENOPROTOOPT;
743 745
744 } 746 }
745 747
746 /* 748 /*
747 * caif_connect() - Connect a CAIF Socket 749 * caif_connect() - Connect a CAIF Socket
748 * Copied and modified af_irda.c:irda_connect(). 750 * Copied and modified af_irda.c:irda_connect().
749 * 751 *
750 * Note : by consulting "errno", the user space caller may learn the cause 752 * Note : by consulting "errno", the user space caller may learn the cause
751 * of the failure. Most of them are visible in the function, others may come 753 * of the failure. Most of them are visible in the function, others may come
752 * from subroutines called and are listed here : 754 * from subroutines called and are listed here :
753 * o -EAFNOSUPPORT: bad socket family or type. 755 * o -EAFNOSUPPORT: bad socket family or type.
754 * o -ESOCKTNOSUPPORT: bad socket type or protocol 756 * o -ESOCKTNOSUPPORT: bad socket type or protocol
755 * o -EINVAL: bad socket address, or CAIF link type 757 * o -EINVAL: bad socket address, or CAIF link type
756 * o -ECONNREFUSED: remote end refused the connection. 758 * o -ECONNREFUSED: remote end refused the connection.
757 * o -EINPROGRESS: connect request sent but timed out (or non-blocking) 759 * o -EINPROGRESS: connect request sent but timed out (or non-blocking)
758 * o -EISCONN: already connected. 760 * o -EISCONN: already connected.
759 * o -ETIMEDOUT: Connection timed out (send timeout) 761 * o -ETIMEDOUT: Connection timed out (send timeout)
760 * o -ENODEV: No link layer to send request 762 * o -ENODEV: No link layer to send request
761 * o -ECONNRESET: Received Shutdown indication or lost link layer 763 * o -ECONNRESET: Received Shutdown indication or lost link layer
762 * o -ENOMEM: Out of memory 764 * o -ENOMEM: Out of memory
763 * 765 *
764 * State Strategy: 766 * State Strategy:
765 * o sk_state: holds the CAIF_* protocol state, it's updated by 767 * o sk_state: holds the CAIF_* protocol state, it's updated by
766 * caif_ctrl_cb. 768 * caif_ctrl_cb.
767 * o sock->state: holds the SS_* socket state and is updated by connect and 769 * o sock->state: holds the SS_* socket state and is updated by connect and
768 * disconnect. 770 * disconnect.
769 */ 771 */
770 static int caif_connect(struct socket *sock, struct sockaddr *uaddr, 772 static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
771 int addr_len, int flags) 773 int addr_len, int flags)
772 { 774 {
773 struct sock *sk = sock->sk; 775 struct sock *sk = sock->sk;
774 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); 776 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
775 long timeo; 777 long timeo;
776 int err; 778 int err;
777 int ifindex, headroom, tailroom; 779 int ifindex, headroom, tailroom;
778 unsigned int mtu; 780 unsigned int mtu;
779 struct net_device *dev; 781 struct net_device *dev;
780 782
781 lock_sock(sk); 783 lock_sock(sk);
782 784
783 err = -EAFNOSUPPORT; 785 err = -EAFNOSUPPORT;
784 if (uaddr->sa_family != AF_CAIF) 786 if (uaddr->sa_family != AF_CAIF)
785 goto out; 787 goto out;
786 788
787 switch (sock->state) { 789 switch (sock->state) {
788 case SS_UNCONNECTED: 790 case SS_UNCONNECTED:
789 /* Normal case, a fresh connect */ 791 /* Normal case, a fresh connect */
790 caif_assert(sk->sk_state == CAIF_DISCONNECTED); 792 caif_assert(sk->sk_state == CAIF_DISCONNECTED);
791 break; 793 break;
792 case SS_CONNECTING: 794 case SS_CONNECTING:
793 switch (sk->sk_state) { 795 switch (sk->sk_state) {
794 case CAIF_CONNECTED: 796 case CAIF_CONNECTED:
795 sock->state = SS_CONNECTED; 797 sock->state = SS_CONNECTED;
796 err = -EISCONN; 798 err = -EISCONN;
797 goto out; 799 goto out;
798 case CAIF_DISCONNECTED: 800 case CAIF_DISCONNECTED:
799 /* Reconnect allowed */ 801 /* Reconnect allowed */
800 break; 802 break;
801 case CAIF_CONNECTING: 803 case CAIF_CONNECTING:
802 err = -EALREADY; 804 err = -EALREADY;
803 if (flags & O_NONBLOCK) 805 if (flags & O_NONBLOCK)
804 goto out; 806 goto out;
805 goto wait_connect; 807 goto wait_connect;
806 } 808 }
807 break; 809 break;
808 case SS_CONNECTED: 810 case SS_CONNECTED:
809 caif_assert(sk->sk_state == CAIF_CONNECTED || 811 caif_assert(sk->sk_state == CAIF_CONNECTED ||
810 sk->sk_state == CAIF_DISCONNECTED); 812 sk->sk_state == CAIF_DISCONNECTED);
811 if (sk->sk_shutdown & SHUTDOWN_MASK) { 813 if (sk->sk_shutdown & SHUTDOWN_MASK) {
812 /* Allow re-connect after SHUTDOWN_IND */ 814 /* Allow re-connect after SHUTDOWN_IND */
813 caif_disconnect_client(sock_net(sk), &cf_sk->layer); 815 caif_disconnect_client(sock_net(sk), &cf_sk->layer);
814 break; 816 break;
815 } 817 }
816 /* No reconnect on a seqpacket socket */ 818 /* No reconnect on a seqpacket socket */
817 err = -EISCONN; 819 err = -EISCONN;
818 goto out; 820 goto out;
819 case SS_DISCONNECTING: 821 case SS_DISCONNECTING:
820 case SS_FREE: 822 case SS_FREE:
821 caif_assert(1); /*Should never happen */ 823 caif_assert(1); /*Should never happen */
822 break; 824 break;
823 } 825 }
824 sk->sk_state = CAIF_DISCONNECTED; 826 sk->sk_state = CAIF_DISCONNECTED;
825 sock->state = SS_UNCONNECTED; 827 sock->state = SS_UNCONNECTED;
826 sk_stream_kill_queues(&cf_sk->sk); 828 sk_stream_kill_queues(&cf_sk->sk);
827 829
828 err = -EINVAL; 830 err = -EINVAL;
829 if (addr_len != sizeof(struct sockaddr_caif)) 831 if (addr_len != sizeof(struct sockaddr_caif))
830 goto out; 832 goto out;
831 833
832 memcpy(&cf_sk->conn_req.sockaddr, uaddr, 834 memcpy(&cf_sk->conn_req.sockaddr, uaddr,
833 sizeof(struct sockaddr_caif)); 835 sizeof(struct sockaddr_caif));
834 836
835 /* Move to connecting socket, start sending Connect Requests */ 837 /* Move to connecting socket, start sending Connect Requests */
836 sock->state = SS_CONNECTING; 838 sock->state = SS_CONNECTING;
837 sk->sk_state = CAIF_CONNECTING; 839 sk->sk_state = CAIF_CONNECTING;
838 840
839 /* Check priority value coming from socket */ 841 /* Check priority value coming from socket */
840 /* if priority value is out of range it will be ajusted */ 842 /* if priority value is out of range it will be ajusted */
841 if (cf_sk->sk.sk_priority > CAIF_PRIO_MAX) 843 if (cf_sk->sk.sk_priority > CAIF_PRIO_MAX)
842 cf_sk->conn_req.priority = CAIF_PRIO_MAX; 844 cf_sk->conn_req.priority = CAIF_PRIO_MAX;
843 else if (cf_sk->sk.sk_priority < CAIF_PRIO_MIN) 845 else if (cf_sk->sk.sk_priority < CAIF_PRIO_MIN)
844 cf_sk->conn_req.priority = CAIF_PRIO_MIN; 846 cf_sk->conn_req.priority = CAIF_PRIO_MIN;
845 else 847 else
846 cf_sk->conn_req.priority = cf_sk->sk.sk_priority; 848 cf_sk->conn_req.priority = cf_sk->sk.sk_priority;
847 849
848 /*ifindex = id of the interface.*/ 850 /*ifindex = id of the interface.*/
849 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if; 851 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
850 852
851 dbfs_atomic_inc(&cnt.num_connect_req); 853 dbfs_atomic_inc(&cnt.num_connect_req);
852 cf_sk->layer.receive = caif_sktrecv_cb; 854 cf_sk->layer.receive = caif_sktrecv_cb;
853 855
854 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req, 856 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
855 &cf_sk->layer, &ifindex, &headroom, &tailroom); 857 &cf_sk->layer, &ifindex, &headroom, &tailroom);
856 858
857 if (err < 0) { 859 if (err < 0) {
858 cf_sk->sk.sk_socket->state = SS_UNCONNECTED; 860 cf_sk->sk.sk_socket->state = SS_UNCONNECTED;
859 cf_sk->sk.sk_state = CAIF_DISCONNECTED; 861 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
860 goto out; 862 goto out;
861 } 863 }
862 864
863 err = -ENODEV; 865 err = -ENODEV;
864 rcu_read_lock(); 866 rcu_read_lock();
865 dev = dev_get_by_index_rcu(sock_net(sk), ifindex); 867 dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
866 if (!dev) { 868 if (!dev) {
867 rcu_read_unlock(); 869 rcu_read_unlock();
868 goto out; 870 goto out;
869 } 871 }
870 cf_sk->headroom = LL_RESERVED_SPACE_EXTRA(dev, headroom); 872 cf_sk->headroom = LL_RESERVED_SPACE_EXTRA(dev, headroom);
871 mtu = dev->mtu; 873 mtu = dev->mtu;
872 rcu_read_unlock(); 874 rcu_read_unlock();
873 875
874 cf_sk->tailroom = tailroom; 876 cf_sk->tailroom = tailroom;
875 cf_sk->maxframe = mtu - (headroom + tailroom); 877 cf_sk->maxframe = mtu - (headroom + tailroom);
876 if (cf_sk->maxframe < 1) { 878 if (cf_sk->maxframe < 1) {
877 pr_warn("CAIF Interface MTU too small (%d)\n", dev->mtu); 879 pr_warn("CAIF Interface MTU too small (%d)\n", dev->mtu);
878 err = -ENODEV; 880 err = -ENODEV;
879 goto out; 881 goto out;
880 } 882 }
881 883
882 err = -EINPROGRESS; 884 err = -EINPROGRESS;
883 wait_connect: 885 wait_connect:
884 886
885 if (sk->sk_state != CAIF_CONNECTED && (flags & O_NONBLOCK)) 887 if (sk->sk_state != CAIF_CONNECTED && (flags & O_NONBLOCK))
886 goto out; 888 goto out;
887 889
888 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); 890 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
889 891
890 release_sock(sk); 892 release_sock(sk);
891 err = -ERESTARTSYS; 893 err = -ERESTARTSYS;
892 timeo = wait_event_interruptible_timeout(*sk_sleep(sk), 894 timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
893 sk->sk_state != CAIF_CONNECTING, 895 sk->sk_state != CAIF_CONNECTING,
894 timeo); 896 timeo);
895 lock_sock(sk); 897 lock_sock(sk);
896 if (timeo < 0) 898 if (timeo < 0)
897 goto out; /* -ERESTARTSYS */ 899 goto out; /* -ERESTARTSYS */
898 900
899 err = -ETIMEDOUT; 901 err = -ETIMEDOUT;
900 if (timeo == 0 && sk->sk_state != CAIF_CONNECTED) 902 if (timeo == 0 && sk->sk_state != CAIF_CONNECTED)
901 goto out; 903 goto out;
902 if (sk->sk_state != CAIF_CONNECTED) { 904 if (sk->sk_state != CAIF_CONNECTED) {
903 sock->state = SS_UNCONNECTED; 905 sock->state = SS_UNCONNECTED;
904 err = sock_error(sk); 906 err = sock_error(sk);
905 if (!err) 907 if (!err)
906 err = -ECONNREFUSED; 908 err = -ECONNREFUSED;
907 goto out; 909 goto out;
908 } 910 }
909 sock->state = SS_CONNECTED; 911 sock->state = SS_CONNECTED;
910 err = 0; 912 err = 0;
911 out: 913 out:
912 release_sock(sk); 914 release_sock(sk);
913 return err; 915 return err;
914 } 916 }
915 917
916 /* 918 /*
917 * caif_release() - Disconnect a CAIF Socket 919 * caif_release() - Disconnect a CAIF Socket
918 * Copied and modified af_irda.c:irda_release(). 920 * Copied and modified af_irda.c:irda_release().
919 */ 921 */
920 static int caif_release(struct socket *sock) 922 static int caif_release(struct socket *sock)
921 { 923 {
922 struct sock *sk = sock->sk; 924 struct sock *sk = sock->sk;
923 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); 925 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
924 int res = 0; 926 int res = 0;
925 927
926 if (!sk) 928 if (!sk)
927 return 0; 929 return 0;
928 930
929 set_tx_flow_off(cf_sk); 931 set_tx_flow_off(cf_sk);
930 932
931 /* 933 /*
932 * Ensure that packets are not queued after this point in time. 934 * Ensure that packets are not queued after this point in time.
933 * caif_queue_rcv_skb checks SOCK_DEAD holding the queue lock, 935 * caif_queue_rcv_skb checks SOCK_DEAD holding the queue lock,
934 * this ensures no packets when sock is dead. 936 * this ensures no packets when sock is dead.
935 */ 937 */
936 spin_lock(&sk->sk_receive_queue.lock); 938 spin_lock_bh(&sk->sk_receive_queue.lock);
937 sock_set_flag(sk, SOCK_DEAD); 939 sock_set_flag(sk, SOCK_DEAD);
938 spin_unlock(&sk->sk_receive_queue.lock); 940 spin_unlock_bh(&sk->sk_receive_queue.lock);
939 sock->sk = NULL; 941 sock->sk = NULL;
940 942
941 dbfs_atomic_inc(&cnt.num_disconnect); 943 dbfs_atomic_inc(&cnt.num_disconnect);
942 944
943 if (cf_sk->debugfs_socket_dir != NULL) 945 if (cf_sk->debugfs_socket_dir != NULL)
944 debugfs_remove_recursive(cf_sk->debugfs_socket_dir); 946 debugfs_remove_recursive(cf_sk->debugfs_socket_dir);
945 947
946 lock_sock(&(cf_sk->sk)); 948 lock_sock(&(cf_sk->sk));
947 sk->sk_state = CAIF_DISCONNECTED; 949 sk->sk_state = CAIF_DISCONNECTED;
948 sk->sk_shutdown = SHUTDOWN_MASK; 950 sk->sk_shutdown = SHUTDOWN_MASK;
949 951
950 if (cf_sk->sk.sk_socket->state == SS_CONNECTED || 952 if (cf_sk->sk.sk_socket->state == SS_CONNECTED ||
951 cf_sk->sk.sk_socket->state == SS_CONNECTING) 953 cf_sk->sk.sk_socket->state == SS_CONNECTING)
952 res = caif_disconnect_client(sock_net(sk), &cf_sk->layer); 954 res = caif_disconnect_client(sock_net(sk), &cf_sk->layer);
953 955
954 cf_sk->sk.sk_socket->state = SS_DISCONNECTING; 956 cf_sk->sk.sk_socket->state = SS_DISCONNECTING;
955 wake_up_interruptible_poll(sk_sleep(sk), POLLERR|POLLHUP); 957 wake_up_interruptible_poll(sk_sleep(sk), POLLERR|POLLHUP);
956 958
957 sock_orphan(sk); 959 sock_orphan(sk);
958 sk_stream_kill_queues(&cf_sk->sk); 960 sk_stream_kill_queues(&cf_sk->sk);
959 release_sock(sk); 961 release_sock(sk);
960 sock_put(sk); 962 sock_put(sk);
961 return res; 963 return res;
962 } 964 }
963 965
964 /* Copied from af_unix.c:unix_poll(), added CAIF tx_flow handling */ 966 /* Copied from af_unix.c:unix_poll(), added CAIF tx_flow handling */
965 static unsigned int caif_poll(struct file *file, 967 static unsigned int caif_poll(struct file *file,
966 struct socket *sock, poll_table *wait) 968 struct socket *sock, poll_table *wait)
967 { 969 {
968 struct sock *sk = sock->sk; 970 struct sock *sk = sock->sk;
969 unsigned int mask; 971 unsigned int mask;
970 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); 972 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
971 973
972 sock_poll_wait(file, sk_sleep(sk), wait); 974 sock_poll_wait(file, sk_sleep(sk), wait);
973 mask = 0; 975 mask = 0;
974 976
975 /* exceptional events? */ 977 /* exceptional events? */
976 if (sk->sk_err) 978 if (sk->sk_err)
977 mask |= POLLERR; 979 mask |= POLLERR;
978 if (sk->sk_shutdown == SHUTDOWN_MASK) 980 if (sk->sk_shutdown == SHUTDOWN_MASK)
979 mask |= POLLHUP; 981 mask |= POLLHUP;
980 if (sk->sk_shutdown & RCV_SHUTDOWN) 982 if (sk->sk_shutdown & RCV_SHUTDOWN)
981 mask |= POLLRDHUP; 983 mask |= POLLRDHUP;
982 984
983 /* readable? */ 985 /* readable? */
984 if (!skb_queue_empty(&sk->sk_receive_queue) || 986 if (!skb_queue_empty(&sk->sk_receive_queue) ||
985 (sk->sk_shutdown & RCV_SHUTDOWN)) 987 (sk->sk_shutdown & RCV_SHUTDOWN))
986 mask |= POLLIN | POLLRDNORM; 988 mask |= POLLIN | POLLRDNORM;
987 989
988 /* 990 /*
989 * we set writable also when the other side has shut down the 991 * we set writable also when the other side has shut down the
990 * connection. This prevents stuck sockets. 992 * connection. This prevents stuck sockets.
991 */ 993 */
992 if (sock_writeable(sk) && tx_flow_is_on(cf_sk)) 994 if (sock_writeable(sk) && tx_flow_is_on(cf_sk))
993 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 995 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
994 996
995 return mask; 997 return mask;
996 } 998 }
997 999
998 static const struct proto_ops caif_seqpacket_ops = { 1000 static const struct proto_ops caif_seqpacket_ops = {
999 .family = PF_CAIF, 1001 .family = PF_CAIF,
1000 .owner = THIS_MODULE, 1002 .owner = THIS_MODULE,
1001 .release = caif_release, 1003 .release = caif_release,
1002 .bind = sock_no_bind, 1004 .bind = sock_no_bind,
1003 .connect = caif_connect, 1005 .connect = caif_connect,
1004 .socketpair = sock_no_socketpair, 1006 .socketpair = sock_no_socketpair,
1005 .accept = sock_no_accept, 1007 .accept = sock_no_accept,
1006 .getname = sock_no_getname, 1008 .getname = sock_no_getname,
1007 .poll = caif_poll, 1009 .poll = caif_poll,
1008 .ioctl = sock_no_ioctl, 1010 .ioctl = sock_no_ioctl,
1009 .listen = sock_no_listen, 1011 .listen = sock_no_listen,
1010 .shutdown = sock_no_shutdown, 1012 .shutdown = sock_no_shutdown,
1011 .setsockopt = setsockopt, 1013 .setsockopt = setsockopt,
1012 .getsockopt = sock_no_getsockopt, 1014 .getsockopt = sock_no_getsockopt,
1013 .sendmsg = caif_seqpkt_sendmsg, 1015 .sendmsg = caif_seqpkt_sendmsg,
1014 .recvmsg = caif_seqpkt_recvmsg, 1016 .recvmsg = caif_seqpkt_recvmsg,
1015 .mmap = sock_no_mmap, 1017 .mmap = sock_no_mmap,
1016 .sendpage = sock_no_sendpage, 1018 .sendpage = sock_no_sendpage,
1017 }; 1019 };
1018 1020
1019 static const struct proto_ops caif_stream_ops = { 1021 static const struct proto_ops caif_stream_ops = {
1020 .family = PF_CAIF, 1022 .family = PF_CAIF,
1021 .owner = THIS_MODULE, 1023 .owner = THIS_MODULE,
1022 .release = caif_release, 1024 .release = caif_release,
1023 .bind = sock_no_bind, 1025 .bind = sock_no_bind,
1024 .connect = caif_connect, 1026 .connect = caif_connect,
1025 .socketpair = sock_no_socketpair, 1027 .socketpair = sock_no_socketpair,
1026 .accept = sock_no_accept, 1028 .accept = sock_no_accept,
1027 .getname = sock_no_getname, 1029 .getname = sock_no_getname,
1028 .poll = caif_poll, 1030 .poll = caif_poll,
1029 .ioctl = sock_no_ioctl, 1031 .ioctl = sock_no_ioctl,
1030 .listen = sock_no_listen, 1032 .listen = sock_no_listen,
1031 .shutdown = sock_no_shutdown, 1033 .shutdown = sock_no_shutdown,
1032 .setsockopt = setsockopt, 1034 .setsockopt = setsockopt,
1033 .getsockopt = sock_no_getsockopt, 1035 .getsockopt = sock_no_getsockopt,
1034 .sendmsg = caif_stream_sendmsg, 1036 .sendmsg = caif_stream_sendmsg,
1035 .recvmsg = caif_stream_recvmsg, 1037 .recvmsg = caif_stream_recvmsg,
1036 .mmap = sock_no_mmap, 1038 .mmap = sock_no_mmap,
1037 .sendpage = sock_no_sendpage, 1039 .sendpage = sock_no_sendpage,
1038 }; 1040 };
1039 1041
1040 /* This function is called when a socket is finally destroyed. */ 1042 /* This function is called when a socket is finally destroyed. */
1041 static void caif_sock_destructor(struct sock *sk) 1043 static void caif_sock_destructor(struct sock *sk)
1042 { 1044 {
1043 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); 1045 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
1044 caif_assert(!atomic_read(&sk->sk_wmem_alloc)); 1046 caif_assert(!atomic_read(&sk->sk_wmem_alloc));
1045 caif_assert(sk_unhashed(sk)); 1047 caif_assert(sk_unhashed(sk));
1046 caif_assert(!sk->sk_socket); 1048 caif_assert(!sk->sk_socket);
1047 if (!sock_flag(sk, SOCK_DEAD)) { 1049 if (!sock_flag(sk, SOCK_DEAD)) {
1048 pr_info("Attempt to release alive CAIF socket: %p\n", sk); 1050 pr_info("Attempt to release alive CAIF socket: %p\n", sk);
1049 return; 1051 return;
1050 } 1052 }
1051 sk_stream_kill_queues(&cf_sk->sk); 1053 sk_stream_kill_queues(&cf_sk->sk);
1052 dbfs_atomic_dec(&cnt.caif_nr_socks); 1054 dbfs_atomic_dec(&cnt.caif_nr_socks);
1053 caif_free_client(&cf_sk->layer); 1055 caif_free_client(&cf_sk->layer);
1054 } 1056 }
1055 1057
1056 static int caif_create(struct net *net, struct socket *sock, int protocol, 1058 static int caif_create(struct net *net, struct socket *sock, int protocol,
1057 int kern) 1059 int kern)
1058 { 1060 {
1059 struct sock *sk = NULL; 1061 struct sock *sk = NULL;
1060 struct caifsock *cf_sk = NULL; 1062 struct caifsock *cf_sk = NULL;
1061 static struct proto prot = {.name = "PF_CAIF", 1063 static struct proto prot = {.name = "PF_CAIF",
1062 .owner = THIS_MODULE, 1064 .owner = THIS_MODULE,
1063 .obj_size = sizeof(struct caifsock), 1065 .obj_size = sizeof(struct caifsock),
1064 }; 1066 };
1065 1067
1066 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_NET_ADMIN)) 1068 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_NET_ADMIN))
1067 return -EPERM; 1069 return -EPERM;
1068 /* 1070 /*
1069 * The sock->type specifies the socket type to use. 1071 * The sock->type specifies the socket type to use.
1070 * The CAIF socket is a packet stream in the sense 1072 * The CAIF socket is a packet stream in the sense
1071 * that it is packet based. CAIF trusts the reliability 1073 * that it is packet based. CAIF trusts the reliability
1072 * of the link, no resending is implemented. 1074 * of the link, no resending is implemented.
1073 */ 1075 */
1074 if (sock->type == SOCK_SEQPACKET) 1076 if (sock->type == SOCK_SEQPACKET)
1075 sock->ops = &caif_seqpacket_ops; 1077 sock->ops = &caif_seqpacket_ops;
1076 else if (sock->type == SOCK_STREAM) 1078 else if (sock->type == SOCK_STREAM)
1077 sock->ops = &caif_stream_ops; 1079 sock->ops = &caif_stream_ops;
1078 else 1080 else
1079 return -ESOCKTNOSUPPORT; 1081 return -ESOCKTNOSUPPORT;
1080 1082
1081 if (protocol < 0 || protocol >= CAIFPROTO_MAX) 1083 if (protocol < 0 || protocol >= CAIFPROTO_MAX)
1082 return -EPROTONOSUPPORT; 1084 return -EPROTONOSUPPORT;
1083 /* 1085 /*
1084 * Set the socket state to unconnected. The socket state 1086 * Set the socket state to unconnected. The socket state
1085 * is really not used at all in the net/core or socket.c but the 1087 * is really not used at all in the net/core or socket.c but the
1086 * initialization makes sure that sock->state is not uninitialized. 1088 * initialization makes sure that sock->state is not uninitialized.
1087 */ 1089 */
1088 sk = sk_alloc(net, PF_CAIF, GFP_KERNEL, &prot); 1090 sk = sk_alloc(net, PF_CAIF, GFP_KERNEL, &prot);
1089 if (!sk) 1091 if (!sk)
1090 return -ENOMEM; 1092 return -ENOMEM;
1091 1093
1092 cf_sk = container_of(sk, struct caifsock, sk); 1094 cf_sk = container_of(sk, struct caifsock, sk);
1093 1095
1094 /* Store the protocol */ 1096 /* Store the protocol */
1095 sk->sk_protocol = (unsigned char) protocol; 1097 sk->sk_protocol = (unsigned char) protocol;
1096 1098
1097 /* 1099 /*
1098 * Lock in order to try to stop someone from opening the socket 1100 * Lock in order to try to stop someone from opening the socket
1099 * too early. 1101 * too early.
1100 */ 1102 */
1101 lock_sock(&(cf_sk->sk)); 1103 lock_sock(&(cf_sk->sk));
1102 1104
1103 /* Initialize the nozero default sock structure data. */ 1105 /* Initialize the nozero default sock structure data. */
1104 sock_init_data(sock, sk); 1106 sock_init_data(sock, sk);
1105 sk->sk_destruct = caif_sock_destructor; 1107 sk->sk_destruct = caif_sock_destructor;
1106 1108
1107 mutex_init(&cf_sk->readlock); /* single task reading lock */ 1109 mutex_init(&cf_sk->readlock); /* single task reading lock */
1108 cf_sk->layer.ctrlcmd = caif_ctrl_cb; 1110 cf_sk->layer.ctrlcmd = caif_ctrl_cb;
1109 cf_sk->sk.sk_socket->state = SS_UNCONNECTED; 1111 cf_sk->sk.sk_socket->state = SS_UNCONNECTED;
1110 cf_sk->sk.sk_state = CAIF_DISCONNECTED; 1112 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
1111 1113
1112 set_tx_flow_off(cf_sk); 1114 set_tx_flow_off(cf_sk);
1113 set_rx_flow_on(cf_sk); 1115 set_rx_flow_on(cf_sk);
1114 1116
1115 /* Set default options on configuration */ 1117 /* Set default options on configuration */
1116 cf_sk->sk.sk_priority= CAIF_PRIO_NORMAL; 1118 cf_sk->sk.sk_priority= CAIF_PRIO_NORMAL;
1117 cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY; 1119 cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY;
1118 cf_sk->conn_req.protocol = protocol; 1120 cf_sk->conn_req.protocol = protocol;
1119 /* Increase the number of sockets created. */ 1121 /* Increase the number of sockets created. */
1120 dbfs_atomic_inc(&cnt.caif_nr_socks); 1122 dbfs_atomic_inc(&cnt.caif_nr_socks);
1121 #ifdef CONFIG_DEBUG_FS 1123 #ifdef CONFIG_DEBUG_FS
1122 if (!IS_ERR(debugfsdir)) { 1124 if (!IS_ERR(debugfsdir)) {
1123 /* Fill in some information concerning the misc socket. */ 1125 /* Fill in some information concerning the misc socket. */
1124 snprintf(cf_sk->name, sizeof(cf_sk->name), "cfsk%d", 1126 snprintf(cf_sk->name, sizeof(cf_sk->name), "cfsk%d",
1125 atomic_read(&cnt.caif_nr_socks)); 1127 atomic_read(&cnt.caif_nr_socks));
1126 1128
1127 cf_sk->debugfs_socket_dir = 1129 cf_sk->debugfs_socket_dir =
1128 debugfs_create_dir(cf_sk->name, debugfsdir); 1130 debugfs_create_dir(cf_sk->name, debugfsdir);
1129 debugfs_create_u32("sk_state", S_IRUSR | S_IWUSR, 1131 debugfs_create_u32("sk_state", S_IRUSR | S_IWUSR,
1130 cf_sk->debugfs_socket_dir, 1132 cf_sk->debugfs_socket_dir,
1131 (u32 *) &cf_sk->sk.sk_state); 1133 (u32 *) &cf_sk->sk.sk_state);
1132 debugfs_create_u32("flow_state", S_IRUSR | S_IWUSR, 1134 debugfs_create_u32("flow_state", S_IRUSR | S_IWUSR,
1133 cf_sk->debugfs_socket_dir, &cf_sk->flow_state); 1135 cf_sk->debugfs_socket_dir, &cf_sk->flow_state);
1134 debugfs_create_u32("sk_rmem_alloc", S_IRUSR | S_IWUSR, 1136 debugfs_create_u32("sk_rmem_alloc", S_IRUSR | S_IWUSR,
1135 cf_sk->debugfs_socket_dir, 1137 cf_sk->debugfs_socket_dir,
1136 (u32 *) &cf_sk->sk.sk_rmem_alloc); 1138 (u32 *) &cf_sk->sk.sk_rmem_alloc);
1137 debugfs_create_u32("sk_wmem_alloc", S_IRUSR | S_IWUSR, 1139 debugfs_create_u32("sk_wmem_alloc", S_IRUSR | S_IWUSR,
1138 cf_sk->debugfs_socket_dir, 1140 cf_sk->debugfs_socket_dir,
1139 (u32 *) &cf_sk->sk.sk_wmem_alloc); 1141 (u32 *) &cf_sk->sk.sk_wmem_alloc);
1140 debugfs_create_u32("identity", S_IRUSR | S_IWUSR, 1142 debugfs_create_u32("identity", S_IRUSR | S_IWUSR,
1141 cf_sk->debugfs_socket_dir, 1143 cf_sk->debugfs_socket_dir,
1142 (u32 *) &cf_sk->layer.id); 1144 (u32 *) &cf_sk->layer.id);
1143 } 1145 }
1144 #endif 1146 #endif
1145 release_sock(&cf_sk->sk); 1147 release_sock(&cf_sk->sk);
1146 return 0; 1148 return 0;
1147 } 1149 }
1148 1150
1149 1151
1150 static struct net_proto_family caif_family_ops = { 1152 static struct net_proto_family caif_family_ops = {
1151 .family = PF_CAIF, 1153 .family = PF_CAIF,
1152 .create = caif_create, 1154 .create = caif_create,
1153 .owner = THIS_MODULE, 1155 .owner = THIS_MODULE,
1154 }; 1156 };
1155 1157
1156 static int af_caif_init(void) 1158 static int af_caif_init(void)
1157 { 1159 {
1158 int err = sock_register(&caif_family_ops); 1160 int err = sock_register(&caif_family_ops);
1159 if (!err) 1161 if (!err)
1160 return err; 1162 return err;
1161 return 0; 1163 return 0;
1162 } 1164 }
1163 1165
1164 static int __init caif_sktinit_module(void) 1166 static int __init caif_sktinit_module(void)
1165 { 1167 {
1166 #ifdef CONFIG_DEBUG_FS 1168 #ifdef CONFIG_DEBUG_FS
1167 debugfsdir = debugfs_create_dir("caif_sk", NULL); 1169 debugfsdir = debugfs_create_dir("caif_sk", NULL);
1168 if (!IS_ERR(debugfsdir)) { 1170 if (!IS_ERR(debugfsdir)) {
1169 debugfs_create_u32("num_sockets", S_IRUSR | S_IWUSR, 1171 debugfs_create_u32("num_sockets", S_IRUSR | S_IWUSR,
1170 debugfsdir, 1172 debugfsdir,
1171 (u32 *) &cnt.caif_nr_socks); 1173 (u32 *) &cnt.caif_nr_socks);
1172 debugfs_create_u32("num_connect_req", S_IRUSR | S_IWUSR, 1174 debugfs_create_u32("num_connect_req", S_IRUSR | S_IWUSR,
1173 debugfsdir, 1175 debugfsdir,
1174 (u32 *) &cnt.num_connect_req); 1176 (u32 *) &cnt.num_connect_req);
1175 debugfs_create_u32("num_connect_resp", S_IRUSR | S_IWUSR, 1177 debugfs_create_u32("num_connect_resp", S_IRUSR | S_IWUSR,
1176 debugfsdir, 1178 debugfsdir,
1177 (u32 *) &cnt.num_connect_resp); 1179 (u32 *) &cnt.num_connect_resp);
1178 debugfs_create_u32("num_connect_fail_resp", S_IRUSR | S_IWUSR, 1180 debugfs_create_u32("num_connect_fail_resp", S_IRUSR | S_IWUSR,
1179 debugfsdir, 1181 debugfsdir,
1180 (u32 *) &cnt.num_connect_fail_resp); 1182 (u32 *) &cnt.num_connect_fail_resp);
1181 debugfs_create_u32("num_disconnect", S_IRUSR | S_IWUSR, 1183 debugfs_create_u32("num_disconnect", S_IRUSR | S_IWUSR,
1182 debugfsdir, 1184 debugfsdir,
1183 (u32 *) &cnt.num_disconnect); 1185 (u32 *) &cnt.num_disconnect);
1184 debugfs_create_u32("num_remote_shutdown_ind", 1186 debugfs_create_u32("num_remote_shutdown_ind",
1185 S_IRUSR | S_IWUSR, debugfsdir, 1187 S_IRUSR | S_IWUSR, debugfsdir,
1186 (u32 *) &cnt.num_remote_shutdown_ind); 1188 (u32 *) &cnt.num_remote_shutdown_ind);
1187 debugfs_create_u32("num_tx_flow_off_ind", S_IRUSR | S_IWUSR, 1189 debugfs_create_u32("num_tx_flow_off_ind", S_IRUSR | S_IWUSR,
1188 debugfsdir, 1190 debugfsdir,
1189 (u32 *) &cnt.num_tx_flow_off_ind); 1191 (u32 *) &cnt.num_tx_flow_off_ind);
1190 debugfs_create_u32("num_tx_flow_on_ind", S_IRUSR | S_IWUSR, 1192 debugfs_create_u32("num_tx_flow_on_ind", S_IRUSR | S_IWUSR,
1191 debugfsdir, 1193 debugfsdir,
1192 (u32 *) &cnt.num_tx_flow_on_ind); 1194 (u32 *) &cnt.num_tx_flow_on_ind);
1193 debugfs_create_u32("num_rx_flow_off", S_IRUSR | S_IWUSR, 1195 debugfs_create_u32("num_rx_flow_off", S_IRUSR | S_IWUSR,
1194 debugfsdir, 1196 debugfsdir,
1195 (u32 *) &cnt.num_rx_flow_off); 1197 (u32 *) &cnt.num_rx_flow_off);
1196 debugfs_create_u32("num_rx_flow_on", S_IRUSR | S_IWUSR, 1198 debugfs_create_u32("num_rx_flow_on", S_IRUSR | S_IWUSR,
1197 debugfsdir, 1199 debugfsdir,
1198 (u32 *) &cnt.num_rx_flow_on); 1200 (u32 *) &cnt.num_rx_flow_on);
1199 } 1201 }
1200 #endif 1202 #endif
1201 return af_caif_init(); 1203 return af_caif_init();
1202 } 1204 }
1203 1205
1204 static void __exit caif_sktexit_module(void) 1206 static void __exit caif_sktexit_module(void)
1205 { 1207 {
1206 sock_unregister(PF_CAIF); 1208 sock_unregister(PF_CAIF);
1207 if (debugfsdir != NULL) 1209 if (debugfsdir != NULL)
1208 debugfs_remove_recursive(debugfsdir); 1210 debugfs_remove_recursive(debugfsdir);
1209 } 1211 }
1210 module_init(caif_sktinit_module); 1212 module_init(caif_sktinit_module);
1211 module_exit(caif_sktexit_module); 1213 module_exit(caif_sktexit_module);
1212 1214
1 /* 1 /*
2 * Copyright (C) ST-Ericsson AB 2010 2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com 3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2 4 * License terms: GNU General Public License (GPL) version 2
5 */ 5 */
6 6
7 #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ 7 #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
8 8
9 #include <linux/kernel.h> 9 #include <linux/kernel.h>
10 #include <linux/stddef.h> 10 #include <linux/stddef.h>
11 #include <linux/slab.h> 11 #include <linux/slab.h>
12 #include <linux/netdevice.h> 12 #include <linux/netdevice.h>
13 #include <linux/module.h> 13 #include <linux/module.h>
14 #include <net/caif/caif_layer.h> 14 #include <net/caif/caif_layer.h>
15 #include <net/caif/cfpkt.h> 15 #include <net/caif/cfpkt.h>
16 #include <net/caif/cfcnfg.h> 16 #include <net/caif/cfcnfg.h>
17 #include <net/caif/cfctrl.h> 17 #include <net/caif/cfctrl.h>
18 #include <net/caif/cfmuxl.h> 18 #include <net/caif/cfmuxl.h>
19 #include <net/caif/cffrml.h> 19 #include <net/caif/cffrml.h>
20 #include <net/caif/cfserl.h> 20 #include <net/caif/cfserl.h>
21 #include <net/caif/cfsrvl.h> 21 #include <net/caif/cfsrvl.h>
22 #include <net/caif/caif_dev.h> 22 #include <net/caif/caif_dev.h>
23 23
24 #define container_obj(layr) container_of(layr, struct cfcnfg, layer) 24 #define container_obj(layr) container_of(layr, struct cfcnfg, layer)
25 25
26 /* Information about CAIF physical interfaces held by Config Module in order 26 /* Information about CAIF physical interfaces held by Config Module in order
27 * to manage physical interfaces 27 * to manage physical interfaces
28 */ 28 */
29 struct cfcnfg_phyinfo { 29 struct cfcnfg_phyinfo {
30 struct list_head node; 30 struct list_head node;
31 bool up; 31 bool up;
32 32
33 /* Pointer to the layer below the MUX (framing layer) */ 33 /* Pointer to the layer below the MUX (framing layer) */
34 struct cflayer *frm_layer; 34 struct cflayer *frm_layer;
35 /* Pointer to the lowest actual physical layer */ 35 /* Pointer to the lowest actual physical layer */
36 struct cflayer *phy_layer; 36 struct cflayer *phy_layer;
37 /* Unique identifier of the physical interface */ 37 /* Unique identifier of the physical interface */
38 unsigned int id; 38 unsigned int id;
39 /* Preference of the physical in interface */ 39 /* Preference of the physical in interface */
40 enum cfcnfg_phy_preference pref; 40 enum cfcnfg_phy_preference pref;
41 41
42 /* Information about the physical device */ 42 /* Information about the physical device */
43 struct dev_info dev_info; 43 struct dev_info dev_info;
44 44
45 /* Interface index */ 45 /* Interface index */
46 int ifindex; 46 int ifindex;
47 47
48 /* Use Start of frame extension */ 48 /* Use Start of frame extension */
49 bool use_stx; 49 bool use_stx;
50 50
51 /* Use Start of frame checksum */ 51 /* Use Start of frame checksum */
52 bool use_fcs; 52 bool use_fcs;
53 }; 53 };
54 54
55 struct cfcnfg { 55 struct cfcnfg {
56 struct cflayer layer; 56 struct cflayer layer;
57 struct cflayer *ctrl; 57 struct cflayer *ctrl;
58 struct cflayer *mux; 58 struct cflayer *mux;
59 struct list_head phys; 59 struct list_head phys;
60 struct mutex lock; 60 struct mutex lock;
61 }; 61 };
62 62
63 static void cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, 63 static void cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id,
64 enum cfctrl_srv serv, u8 phyid, 64 enum cfctrl_srv serv, u8 phyid,
65 struct cflayer *adapt_layer); 65 struct cflayer *adapt_layer);
66 static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id); 66 static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id);
67 static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id, 67 static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id,
68 struct cflayer *adapt_layer); 68 struct cflayer *adapt_layer);
69 static void cfctrl_resp_func(void); 69 static void cfctrl_resp_func(void);
70 static void cfctrl_enum_resp(void); 70 static void cfctrl_enum_resp(void);
71 71
72 struct cfcnfg *cfcnfg_create(void) 72 struct cfcnfg *cfcnfg_create(void)
73 { 73 {
74 struct cfcnfg *this; 74 struct cfcnfg *this;
75 struct cfctrl_rsp *resp; 75 struct cfctrl_rsp *resp;
76 76
77 might_sleep(); 77 might_sleep();
78 78
79 /* Initiate this layer */ 79 /* Initiate this layer */
80 this = kzalloc(sizeof(struct cfcnfg), GFP_ATOMIC); 80 this = kzalloc(sizeof(struct cfcnfg), GFP_ATOMIC);
81 if (!this) { 81 if (!this) {
82 pr_warn("Out of memory\n"); 82 pr_warn("Out of memory\n");
83 return NULL; 83 return NULL;
84 } 84 }
85 this->mux = cfmuxl_create(); 85 this->mux = cfmuxl_create();
86 if (!this->mux) 86 if (!this->mux)
87 goto out_of_mem; 87 goto out_of_mem;
88 this->ctrl = cfctrl_create(); 88 this->ctrl = cfctrl_create();
89 if (!this->ctrl) 89 if (!this->ctrl)
90 goto out_of_mem; 90 goto out_of_mem;
91 /* Initiate response functions */ 91 /* Initiate response functions */
92 resp = cfctrl_get_respfuncs(this->ctrl); 92 resp = cfctrl_get_respfuncs(this->ctrl);
93 resp->enum_rsp = cfctrl_enum_resp; 93 resp->enum_rsp = cfctrl_enum_resp;
94 resp->linkerror_ind = cfctrl_resp_func; 94 resp->linkerror_ind = cfctrl_resp_func;
95 resp->linkdestroy_rsp = cfcnfg_linkdestroy_rsp; 95 resp->linkdestroy_rsp = cfcnfg_linkdestroy_rsp;
96 resp->sleep_rsp = cfctrl_resp_func; 96 resp->sleep_rsp = cfctrl_resp_func;
97 resp->wake_rsp = cfctrl_resp_func; 97 resp->wake_rsp = cfctrl_resp_func;
98 resp->restart_rsp = cfctrl_resp_func; 98 resp->restart_rsp = cfctrl_resp_func;
99 resp->radioset_rsp = cfctrl_resp_func; 99 resp->radioset_rsp = cfctrl_resp_func;
100 resp->linksetup_rsp = cfcnfg_linkup_rsp; 100 resp->linksetup_rsp = cfcnfg_linkup_rsp;
101 resp->reject_rsp = cfcnfg_reject_rsp; 101 resp->reject_rsp = cfcnfg_reject_rsp;
102 INIT_LIST_HEAD(&this->phys); 102 INIT_LIST_HEAD(&this->phys);
103 103
104 cfmuxl_set_uplayer(this->mux, this->ctrl, 0); 104 cfmuxl_set_uplayer(this->mux, this->ctrl, 0);
105 layer_set_dn(this->ctrl, this->mux); 105 layer_set_dn(this->ctrl, this->mux);
106 layer_set_up(this->ctrl, this); 106 layer_set_up(this->ctrl, this);
107 mutex_init(&this->lock); 107 mutex_init(&this->lock);
108 108
109 return this; 109 return this;
110 out_of_mem: 110 out_of_mem:
111 pr_warn("Out of memory\n"); 111 pr_warn("Out of memory\n");
112 112
113 synchronize_rcu(); 113 synchronize_rcu();
114 114
115 kfree(this->mux); 115 kfree(this->mux);
116 kfree(this->ctrl); 116 kfree(this->ctrl);
117 kfree(this); 117 kfree(this);
118 return NULL; 118 return NULL;
119 } 119 }
120 EXPORT_SYMBOL(cfcnfg_create); 120 EXPORT_SYMBOL(cfcnfg_create);
121 121
122 void cfcnfg_remove(struct cfcnfg *cfg) 122 void cfcnfg_remove(struct cfcnfg *cfg)
123 { 123 {
124 might_sleep(); 124 might_sleep();
125 if (cfg) { 125 if (cfg) {
126 synchronize_rcu(); 126 synchronize_rcu();
127 127
128 kfree(cfg->mux); 128 kfree(cfg->mux);
129 kfree(cfg->ctrl); 129 cfctrl_remove(cfg->ctrl);
130 kfree(cfg); 130 kfree(cfg);
131 } 131 }
132 } 132 }
133 133
134 static void cfctrl_resp_func(void) 134 static void cfctrl_resp_func(void)
135 { 135 {
136 } 136 }
137 137
138 static struct cfcnfg_phyinfo *cfcnfg_get_phyinfo_rcu(struct cfcnfg *cnfg, 138 static struct cfcnfg_phyinfo *cfcnfg_get_phyinfo_rcu(struct cfcnfg *cnfg,
139 u8 phyid) 139 u8 phyid)
140 { 140 {
141 struct cfcnfg_phyinfo *phy; 141 struct cfcnfg_phyinfo *phy;
142 142
143 list_for_each_entry_rcu(phy, &cnfg->phys, node) 143 list_for_each_entry_rcu(phy, &cnfg->phys, node)
144 if (phy->id == phyid) 144 if (phy->id == phyid)
145 return phy; 145 return phy;
146 return NULL; 146 return NULL;
147 } 147 }
148 148
149 static void cfctrl_enum_resp(void) 149 static void cfctrl_enum_resp(void)
150 { 150 {
151 } 151 }
152 152
153 static struct dev_info *cfcnfg_get_phyid(struct cfcnfg *cnfg, 153 static struct dev_info *cfcnfg_get_phyid(struct cfcnfg *cnfg,
154 enum cfcnfg_phy_preference phy_pref) 154 enum cfcnfg_phy_preference phy_pref)
155 { 155 {
156 /* Try to match with specified preference */ 156 /* Try to match with specified preference */
157 struct cfcnfg_phyinfo *phy; 157 struct cfcnfg_phyinfo *phy;
158 158
159 list_for_each_entry_rcu(phy, &cnfg->phys, node) { 159 list_for_each_entry_rcu(phy, &cnfg->phys, node) {
160 if (phy->up && phy->pref == phy_pref && 160 if (phy->up && phy->pref == phy_pref &&
161 phy->frm_layer != NULL) 161 phy->frm_layer != NULL)
162 162
163 return &phy->dev_info; 163 return &phy->dev_info;
164 } 164 }
165 165
166 /* Otherwise just return something */ 166 /* Otherwise just return something */
167 list_for_each_entry_rcu(phy, &cnfg->phys, node) 167 list_for_each_entry_rcu(phy, &cnfg->phys, node)
168 if (phy->up) 168 if (phy->up)
169 return &phy->dev_info; 169 return &phy->dev_info;
170 170
171 return NULL; 171 return NULL;
172 } 172 }
173 173
174 static int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi) 174 static int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi)
175 { 175 {
176 struct cfcnfg_phyinfo *phy; 176 struct cfcnfg_phyinfo *phy;
177 177
178 list_for_each_entry_rcu(phy, &cnfg->phys, node) 178 list_for_each_entry_rcu(phy, &cnfg->phys, node)
179 if (phy->ifindex == ifi && phy->up) 179 if (phy->ifindex == ifi && phy->up)
180 return phy->id; 180 return phy->id;
181 return -ENODEV; 181 return -ENODEV;
182 } 182 }
183 183
184 int caif_disconnect_client(struct net *net, struct cflayer *adap_layer) 184 int caif_disconnect_client(struct net *net, struct cflayer *adap_layer)
185 { 185 {
186 u8 channel_id = 0; 186 u8 channel_id = 0;
187 int ret = 0; 187 int ret = 0;
188 struct cflayer *servl = NULL; 188 struct cflayer *servl = NULL;
189 struct cfcnfg *cfg = get_cfcnfg(net); 189 struct cfcnfg *cfg = get_cfcnfg(net);
190 190
191 caif_assert(adap_layer != NULL); 191 caif_assert(adap_layer != NULL);
192 192
193 channel_id = adap_layer->id; 193 channel_id = adap_layer->id;
194 if (adap_layer->dn == NULL || channel_id == 0) { 194 if (adap_layer->dn == NULL || channel_id == 0) {
195 pr_err("adap_layer->dn == NULL or adap_layer->id is 0\n"); 195 pr_err("adap_layer->dn == NULL or adap_layer->id is 0\n");
196 ret = -ENOTCONN; 196 ret = -ENOTCONN;
197 goto end; 197 goto end;
198 } 198 }
199 199
200 servl = cfmuxl_remove_uplayer(cfg->mux, channel_id); 200 servl = cfmuxl_remove_uplayer(cfg->mux, channel_id);
201 if (servl == NULL) { 201 if (servl == NULL) {
202 pr_err("PROTOCOL ERROR - " 202 pr_err("PROTOCOL ERROR - "
203 "Error removing service_layer Channel_Id(%d)", 203 "Error removing service_layer Channel_Id(%d)",
204 channel_id); 204 channel_id);
205 ret = -EINVAL; 205 ret = -EINVAL;
206 goto end; 206 goto end;
207 } 207 }
208 208
209 ret = cfctrl_linkdown_req(cfg->ctrl, channel_id, adap_layer); 209 ret = cfctrl_linkdown_req(cfg->ctrl, channel_id, adap_layer);
210 210
211 end: 211 end:
212 cfctrl_cancel_req(cfg->ctrl, adap_layer); 212 cfctrl_cancel_req(cfg->ctrl, adap_layer);
213 213
214 /* Do RCU sync before initiating cleanup */ 214 /* Do RCU sync before initiating cleanup */
215 synchronize_rcu(); 215 synchronize_rcu();
216 if (adap_layer->ctrlcmd != NULL) 216 if (adap_layer->ctrlcmd != NULL)
217 adap_layer->ctrlcmd(adap_layer, CAIF_CTRLCMD_DEINIT_RSP, 0); 217 adap_layer->ctrlcmd(adap_layer, CAIF_CTRLCMD_DEINIT_RSP, 0);
218 return ret; 218 return ret;
219 219
220 } 220 }
221 EXPORT_SYMBOL(caif_disconnect_client); 221 EXPORT_SYMBOL(caif_disconnect_client);
222 222
223 static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id) 223 static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id)
224 { 224 {
225 } 225 }
226 226
227 static const int protohead[CFCTRL_SRV_MASK] = { 227 static const int protohead[CFCTRL_SRV_MASK] = {
228 [CFCTRL_SRV_VEI] = 4, 228 [CFCTRL_SRV_VEI] = 4,
229 [CFCTRL_SRV_DATAGRAM] = 7, 229 [CFCTRL_SRV_DATAGRAM] = 7,
230 [CFCTRL_SRV_UTIL] = 4, 230 [CFCTRL_SRV_UTIL] = 4,
231 [CFCTRL_SRV_RFM] = 3, 231 [CFCTRL_SRV_RFM] = 3,
232 [CFCTRL_SRV_DBG] = 3, 232 [CFCTRL_SRV_DBG] = 3,
233 }; 233 };
234 234
235 235
236 static int caif_connect_req_to_link_param(struct cfcnfg *cnfg, 236 static int caif_connect_req_to_link_param(struct cfcnfg *cnfg,
237 struct caif_connect_request *s, 237 struct caif_connect_request *s,
238 struct cfctrl_link_param *l) 238 struct cfctrl_link_param *l)
239 { 239 {
240 struct dev_info *dev_info; 240 struct dev_info *dev_info;
241 enum cfcnfg_phy_preference pref; 241 enum cfcnfg_phy_preference pref;
242 int res; 242 int res;
243 243
244 memset(l, 0, sizeof(*l)); 244 memset(l, 0, sizeof(*l));
245 /* In caif protocol low value is high priority */ 245 /* In caif protocol low value is high priority */
246 l->priority = CAIF_PRIO_MAX - s->priority + 1; 246 l->priority = CAIF_PRIO_MAX - s->priority + 1;
247 247
248 if (s->ifindex != 0) { 248 if (s->ifindex != 0) {
249 res = cfcnfg_get_id_from_ifi(cnfg, s->ifindex); 249 res = cfcnfg_get_id_from_ifi(cnfg, s->ifindex);
250 if (res < 0) 250 if (res < 0)
251 return res; 251 return res;
252 l->phyid = res; 252 l->phyid = res;
253 } else { 253 } else {
254 switch (s->link_selector) { 254 switch (s->link_selector) {
255 case CAIF_LINK_HIGH_BANDW: 255 case CAIF_LINK_HIGH_BANDW:
256 pref = CFPHYPREF_HIGH_BW; 256 pref = CFPHYPREF_HIGH_BW;
257 break; 257 break;
258 case CAIF_LINK_LOW_LATENCY: 258 case CAIF_LINK_LOW_LATENCY:
259 pref = CFPHYPREF_LOW_LAT; 259 pref = CFPHYPREF_LOW_LAT;
260 break; 260 break;
261 default: 261 default:
262 return -EINVAL; 262 return -EINVAL;
263 } 263 }
264 dev_info = cfcnfg_get_phyid(cnfg, pref); 264 dev_info = cfcnfg_get_phyid(cnfg, pref);
265 if (dev_info == NULL) 265 if (dev_info == NULL)
266 return -ENODEV; 266 return -ENODEV;
267 l->phyid = dev_info->id; 267 l->phyid = dev_info->id;
268 } 268 }
269 switch (s->protocol) { 269 switch (s->protocol) {
270 case CAIFPROTO_AT: 270 case CAIFPROTO_AT:
271 l->linktype = CFCTRL_SRV_VEI; 271 l->linktype = CFCTRL_SRV_VEI;
272 l->endpoint = (s->sockaddr.u.at.type >> 2) & 0x3; 272 l->endpoint = (s->sockaddr.u.at.type >> 2) & 0x3;
273 l->chtype = s->sockaddr.u.at.type & 0x3; 273 l->chtype = s->sockaddr.u.at.type & 0x3;
274 break; 274 break;
275 case CAIFPROTO_DATAGRAM: 275 case CAIFPROTO_DATAGRAM:
276 l->linktype = CFCTRL_SRV_DATAGRAM; 276 l->linktype = CFCTRL_SRV_DATAGRAM;
277 l->chtype = 0x00; 277 l->chtype = 0x00;
278 l->u.datagram.connid = s->sockaddr.u.dgm.connection_id; 278 l->u.datagram.connid = s->sockaddr.u.dgm.connection_id;
279 break; 279 break;
280 case CAIFPROTO_DATAGRAM_LOOP: 280 case CAIFPROTO_DATAGRAM_LOOP:
281 l->linktype = CFCTRL_SRV_DATAGRAM; 281 l->linktype = CFCTRL_SRV_DATAGRAM;
282 l->chtype = 0x03; 282 l->chtype = 0x03;
283 l->endpoint = 0x00; 283 l->endpoint = 0x00;
284 l->u.datagram.connid = s->sockaddr.u.dgm.connection_id; 284 l->u.datagram.connid = s->sockaddr.u.dgm.connection_id;
285 break; 285 break;
286 case CAIFPROTO_RFM: 286 case CAIFPROTO_RFM:
287 l->linktype = CFCTRL_SRV_RFM; 287 l->linktype = CFCTRL_SRV_RFM;
288 l->u.datagram.connid = s->sockaddr.u.rfm.connection_id; 288 l->u.datagram.connid = s->sockaddr.u.rfm.connection_id;
289 strncpy(l->u.rfm.volume, s->sockaddr.u.rfm.volume, 289 strncpy(l->u.rfm.volume, s->sockaddr.u.rfm.volume,
290 sizeof(l->u.rfm.volume)-1); 290 sizeof(l->u.rfm.volume)-1);
291 l->u.rfm.volume[sizeof(l->u.rfm.volume)-1] = 0; 291 l->u.rfm.volume[sizeof(l->u.rfm.volume)-1] = 0;
292 break; 292 break;
293 case CAIFPROTO_UTIL: 293 case CAIFPROTO_UTIL:
294 l->linktype = CFCTRL_SRV_UTIL; 294 l->linktype = CFCTRL_SRV_UTIL;
295 l->endpoint = 0x00; 295 l->endpoint = 0x00;
296 l->chtype = 0x00; 296 l->chtype = 0x00;
297 strncpy(l->u.utility.name, s->sockaddr.u.util.service, 297 strncpy(l->u.utility.name, s->sockaddr.u.util.service,
298 sizeof(l->u.utility.name)-1); 298 sizeof(l->u.utility.name)-1);
299 l->u.utility.name[sizeof(l->u.utility.name)-1] = 0; 299 l->u.utility.name[sizeof(l->u.utility.name)-1] = 0;
300 caif_assert(sizeof(l->u.utility.name) > 10); 300 caif_assert(sizeof(l->u.utility.name) > 10);
301 l->u.utility.paramlen = s->param.size; 301 l->u.utility.paramlen = s->param.size;
302 if (l->u.utility.paramlen > sizeof(l->u.utility.params)) 302 if (l->u.utility.paramlen > sizeof(l->u.utility.params))
303 l->u.utility.paramlen = sizeof(l->u.utility.params); 303 l->u.utility.paramlen = sizeof(l->u.utility.params);
304 304
305 memcpy(l->u.utility.params, s->param.data, 305 memcpy(l->u.utility.params, s->param.data,
306 l->u.utility.paramlen); 306 l->u.utility.paramlen);
307 307
308 break; 308 break;
309 case CAIFPROTO_DEBUG: 309 case CAIFPROTO_DEBUG:
310 l->linktype = CFCTRL_SRV_DBG; 310 l->linktype = CFCTRL_SRV_DBG;
311 l->endpoint = s->sockaddr.u.dbg.service; 311 l->endpoint = s->sockaddr.u.dbg.service;
312 l->chtype = s->sockaddr.u.dbg.type; 312 l->chtype = s->sockaddr.u.dbg.type;
313 break; 313 break;
314 default: 314 default:
315 return -EINVAL; 315 return -EINVAL;
316 } 316 }
317 return 0; 317 return 0;
318 } 318 }
319 319
320 int caif_connect_client(struct net *net, struct caif_connect_request *conn_req, 320 int caif_connect_client(struct net *net, struct caif_connect_request *conn_req,
321 struct cflayer *adap_layer, int *ifindex, 321 struct cflayer *adap_layer, int *ifindex,
322 int *proto_head, 322 int *proto_head,
323 int *proto_tail) 323 int *proto_tail)
324 { 324 {
325 struct cflayer *frml; 325 struct cflayer *frml;
326 struct cfcnfg_phyinfo *phy; 326 struct cfcnfg_phyinfo *phy;
327 int err; 327 int err;
328 struct cfctrl_link_param param; 328 struct cfctrl_link_param param;
329 struct cfcnfg *cfg = get_cfcnfg(net); 329 struct cfcnfg *cfg = get_cfcnfg(net);
330 caif_assert(cfg != NULL); 330 caif_assert(cfg != NULL);
331 331
332 rcu_read_lock(); 332 rcu_read_lock();
333 err = caif_connect_req_to_link_param(cfg, conn_req, &param); 333 err = caif_connect_req_to_link_param(cfg, conn_req, &param);
334 if (err) 334 if (err)
335 goto unlock; 335 goto unlock;
336 336
337 phy = cfcnfg_get_phyinfo_rcu(cfg, param.phyid); 337 phy = cfcnfg_get_phyinfo_rcu(cfg, param.phyid);
338 if (!phy) { 338 if (!phy) {
339 err = -ENODEV; 339 err = -ENODEV;
340 goto unlock; 340 goto unlock;
341 } 341 }
342 err = -EINVAL; 342 err = -EINVAL;
343 343
344 if (adap_layer == NULL) { 344 if (adap_layer == NULL) {
345 pr_err("adap_layer is zero\n"); 345 pr_err("adap_layer is zero\n");
346 goto unlock; 346 goto unlock;
347 } 347 }
348 if (adap_layer->receive == NULL) { 348 if (adap_layer->receive == NULL) {
349 pr_err("adap_layer->receive is NULL\n"); 349 pr_err("adap_layer->receive is NULL\n");
350 goto unlock; 350 goto unlock;
351 } 351 }
352 if (adap_layer->ctrlcmd == NULL) { 352 if (adap_layer->ctrlcmd == NULL) {
353 pr_err("adap_layer->ctrlcmd == NULL\n"); 353 pr_err("adap_layer->ctrlcmd == NULL\n");
354 goto unlock; 354 goto unlock;
355 } 355 }
356 356
357 err = -ENODEV; 357 err = -ENODEV;
358 frml = phy->frm_layer; 358 frml = phy->frm_layer;
359 if (frml == NULL) { 359 if (frml == NULL) {
360 pr_err("Specified PHY type does not exist!\n"); 360 pr_err("Specified PHY type does not exist!\n");
361 goto unlock; 361 goto unlock;
362 } 362 }
363 caif_assert(param.phyid == phy->id); 363 caif_assert(param.phyid == phy->id);
364 caif_assert(phy->frm_layer->id == 364 caif_assert(phy->frm_layer->id ==
365 param.phyid); 365 param.phyid);
366 caif_assert(phy->phy_layer->id == 366 caif_assert(phy->phy_layer->id ==
367 param.phyid); 367 param.phyid);
368 368
369 *ifindex = phy->ifindex; 369 *ifindex = phy->ifindex;
370 *proto_tail = 2; 370 *proto_tail = 2;
371 *proto_head = 371 *proto_head =
372 372
373 protohead[param.linktype] + (phy->use_stx ? 1 : 0); 373 protohead[param.linktype] + (phy->use_stx ? 1 : 0);
374 374
375 rcu_read_unlock(); 375 rcu_read_unlock();
376 376
377 /* FIXME: ENUMERATE INITIALLY WHEN ACTIVATING PHYSICAL INTERFACE */ 377 /* FIXME: ENUMERATE INITIALLY WHEN ACTIVATING PHYSICAL INTERFACE */
378 cfctrl_enum_req(cfg->ctrl, param.phyid); 378 cfctrl_enum_req(cfg->ctrl, param.phyid);
379 return cfctrl_linkup_request(cfg->ctrl, &param, adap_layer); 379 return cfctrl_linkup_request(cfg->ctrl, &param, adap_layer);
380 380
381 unlock: 381 unlock:
382 rcu_read_unlock(); 382 rcu_read_unlock();
383 return err; 383 return err;
384 } 384 }
385 EXPORT_SYMBOL(caif_connect_client); 385 EXPORT_SYMBOL(caif_connect_client);
386 386
387 static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id, 387 static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id,
388 struct cflayer *adapt_layer) 388 struct cflayer *adapt_layer)
389 { 389 {
390 if (adapt_layer != NULL && adapt_layer->ctrlcmd != NULL) 390 if (adapt_layer != NULL && adapt_layer->ctrlcmd != NULL)
391 adapt_layer->ctrlcmd(adapt_layer, 391 adapt_layer->ctrlcmd(adapt_layer,
392 CAIF_CTRLCMD_INIT_FAIL_RSP, 0); 392 CAIF_CTRLCMD_INIT_FAIL_RSP, 0);
393 } 393 }
394 394
395 static void 395 static void
396 cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv, 396 cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
397 u8 phyid, struct cflayer *adapt_layer) 397 u8 phyid, struct cflayer *adapt_layer)
398 { 398 {
399 struct cfcnfg *cnfg = container_obj(layer); 399 struct cfcnfg *cnfg = container_obj(layer);
400 struct cflayer *servicel = NULL; 400 struct cflayer *servicel = NULL;
401 struct cfcnfg_phyinfo *phyinfo; 401 struct cfcnfg_phyinfo *phyinfo;
402 struct net_device *netdev; 402 struct net_device *netdev;
403 403
404 rcu_read_lock(); 404 rcu_read_lock();
405 405
406 if (adapt_layer == NULL) { 406 if (adapt_layer == NULL) {
407 pr_debug("link setup response but no client exist," 407 pr_debug("link setup response but no client exist,"
408 "send linkdown back\n"); 408 "send linkdown back\n");
409 cfctrl_linkdown_req(cnfg->ctrl, channel_id, NULL); 409 cfctrl_linkdown_req(cnfg->ctrl, channel_id, NULL);
410 goto unlock; 410 goto unlock;
411 } 411 }
412 412
413 caif_assert(cnfg != NULL); 413 caif_assert(cnfg != NULL);
414 caif_assert(phyid != 0); 414 caif_assert(phyid != 0);
415 415
416 phyinfo = cfcnfg_get_phyinfo_rcu(cnfg, phyid); 416 phyinfo = cfcnfg_get_phyinfo_rcu(cnfg, phyid);
417 if (phyinfo == NULL) { 417 if (phyinfo == NULL) {
418 pr_err("ERROR: Link Layer Device dissapeared" 418 pr_err("ERROR: Link Layer Device dissapeared"
419 "while connecting\n"); 419 "while connecting\n");
420 goto unlock; 420 goto unlock;
421 } 421 }
422 422
423 caif_assert(phyinfo != NULL); 423 caif_assert(phyinfo != NULL);
424 caif_assert(phyinfo->id == phyid); 424 caif_assert(phyinfo->id == phyid);
425 caif_assert(phyinfo->phy_layer != NULL); 425 caif_assert(phyinfo->phy_layer != NULL);
426 caif_assert(phyinfo->phy_layer->id == phyid); 426 caif_assert(phyinfo->phy_layer->id == phyid);
427 427
428 adapt_layer->id = channel_id; 428 adapt_layer->id = channel_id;
429 429
430 switch (serv) { 430 switch (serv) {
431 case CFCTRL_SRV_VEI: 431 case CFCTRL_SRV_VEI:
432 servicel = cfvei_create(channel_id, &phyinfo->dev_info); 432 servicel = cfvei_create(channel_id, &phyinfo->dev_info);
433 break; 433 break;
434 case CFCTRL_SRV_DATAGRAM: 434 case CFCTRL_SRV_DATAGRAM:
435 servicel = cfdgml_create(channel_id, 435 servicel = cfdgml_create(channel_id,
436 &phyinfo->dev_info); 436 &phyinfo->dev_info);
437 break; 437 break;
438 case CFCTRL_SRV_RFM: 438 case CFCTRL_SRV_RFM:
439 netdev = phyinfo->dev_info.dev; 439 netdev = phyinfo->dev_info.dev;
440 servicel = cfrfml_create(channel_id, &phyinfo->dev_info, 440 servicel = cfrfml_create(channel_id, &phyinfo->dev_info,
441 netdev->mtu); 441 netdev->mtu);
442 break; 442 break;
443 case CFCTRL_SRV_UTIL: 443 case CFCTRL_SRV_UTIL:
444 servicel = cfutill_create(channel_id, &phyinfo->dev_info); 444 servicel = cfutill_create(channel_id, &phyinfo->dev_info);
445 break; 445 break;
446 case CFCTRL_SRV_VIDEO: 446 case CFCTRL_SRV_VIDEO:
447 servicel = cfvidl_create(channel_id, &phyinfo->dev_info); 447 servicel = cfvidl_create(channel_id, &phyinfo->dev_info);
448 break; 448 break;
449 case CFCTRL_SRV_DBG: 449 case CFCTRL_SRV_DBG:
450 servicel = cfdbgl_create(channel_id, &phyinfo->dev_info); 450 servicel = cfdbgl_create(channel_id, &phyinfo->dev_info);
451 break; 451 break;
452 default: 452 default:
453 pr_err("Protocol error. Link setup response " 453 pr_err("Protocol error. Link setup response "
454 "- unknown channel type\n"); 454 "- unknown channel type\n");
455 goto unlock; 455 goto unlock;
456 } 456 }
457 if (!servicel) { 457 if (!servicel) {
458 pr_warn("Out of memory\n"); 458 pr_warn("Out of memory\n");
459 goto unlock; 459 goto unlock;
460 } 460 }
461 layer_set_dn(servicel, cnfg->mux); 461 layer_set_dn(servicel, cnfg->mux);
462 cfmuxl_set_uplayer(cnfg->mux, servicel, channel_id); 462 cfmuxl_set_uplayer(cnfg->mux, servicel, channel_id);
463 layer_set_up(servicel, adapt_layer); 463 layer_set_up(servicel, adapt_layer);
464 layer_set_dn(adapt_layer, servicel); 464 layer_set_dn(adapt_layer, servicel);
465 465
466 rcu_read_unlock(); 466 rcu_read_unlock();
467 467
468 servicel->ctrlcmd(servicel, CAIF_CTRLCMD_INIT_RSP, 0); 468 servicel->ctrlcmd(servicel, CAIF_CTRLCMD_INIT_RSP, 0);
469 return; 469 return;
470 unlock: 470 unlock:
471 rcu_read_unlock(); 471 rcu_read_unlock();
472 } 472 }
473 473
474 void 474 void
475 cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type, 475 cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
476 struct net_device *dev, struct cflayer *phy_layer, 476 struct net_device *dev, struct cflayer *phy_layer,
477 enum cfcnfg_phy_preference pref, 477 enum cfcnfg_phy_preference pref,
478 bool fcs, bool stx) 478 bool fcs, bool stx)
479 { 479 {
480 struct cflayer *frml; 480 struct cflayer *frml;
481 struct cflayer *phy_driver = NULL; 481 struct cflayer *phy_driver = NULL;
482 struct cfcnfg_phyinfo *phyinfo; 482 struct cfcnfg_phyinfo *phyinfo;
483 int i; 483 int i;
484 u8 phyid; 484 u8 phyid;
485 485
486 mutex_lock(&cnfg->lock); 486 mutex_lock(&cnfg->lock);
487 487
488 /* CAIF protocol allow maximum 6 link-layers */ 488 /* CAIF protocol allow maximum 6 link-layers */
489 for (i = 0; i < 7; i++) { 489 for (i = 0; i < 7; i++) {
490 phyid = (dev->ifindex + i) & 0x7; 490 phyid = (dev->ifindex + i) & 0x7;
491 if (phyid == 0) 491 if (phyid == 0)
492 continue; 492 continue;
493 if (cfcnfg_get_phyinfo_rcu(cnfg, phyid) == NULL) 493 if (cfcnfg_get_phyinfo_rcu(cnfg, phyid) == NULL)
494 goto got_phyid; 494 goto got_phyid;
495 } 495 }
496 pr_warn("Too many CAIF Link Layers (max 6)\n"); 496 pr_warn("Too many CAIF Link Layers (max 6)\n");
497 goto out; 497 goto out;
498 498
499 got_phyid: 499 got_phyid:
500 phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC); 500 phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC);
501 501
502 switch (phy_type) { 502 switch (phy_type) {
503 case CFPHYTYPE_FRAG: 503 case CFPHYTYPE_FRAG:
504 phy_driver = 504 phy_driver =
505 cfserl_create(CFPHYTYPE_FRAG, phyid, stx); 505 cfserl_create(CFPHYTYPE_FRAG, phyid, stx);
506 if (!phy_driver) { 506 if (!phy_driver) {
507 pr_warn("Out of memory\n"); 507 pr_warn("Out of memory\n");
508 goto out; 508 goto out;
509 } 509 }
510 break; 510 break;
511 case CFPHYTYPE_CAIF: 511 case CFPHYTYPE_CAIF:
512 phy_driver = NULL; 512 phy_driver = NULL;
513 break; 513 break;
514 default: 514 default:
515 goto out; 515 goto out;
516 } 516 }
517 phy_layer->id = phyid; 517 phy_layer->id = phyid;
518 phyinfo->pref = pref; 518 phyinfo->pref = pref;
519 phyinfo->id = phyid; 519 phyinfo->id = phyid;
520 phyinfo->dev_info.id = phyid; 520 phyinfo->dev_info.id = phyid;
521 phyinfo->dev_info.dev = dev; 521 phyinfo->dev_info.dev = dev;
522 phyinfo->phy_layer = phy_layer; 522 phyinfo->phy_layer = phy_layer;
523 phyinfo->ifindex = dev->ifindex; 523 phyinfo->ifindex = dev->ifindex;
524 phyinfo->use_stx = stx; 524 phyinfo->use_stx = stx;
525 phyinfo->use_fcs = fcs; 525 phyinfo->use_fcs = fcs;
526 526
527 phy_layer->type = phy_type; 527 phy_layer->type = phy_type;
528 frml = cffrml_create(phyid, fcs); 528 frml = cffrml_create(phyid, fcs);
529 529
530 if (!frml) { 530 if (!frml) {
531 pr_warn("Out of memory\n"); 531 pr_warn("Out of memory\n");
532 kfree(phyinfo); 532 kfree(phyinfo);
533 goto out; 533 goto out;
534 } 534 }
535 phyinfo->frm_layer = frml; 535 phyinfo->frm_layer = frml;
536 layer_set_up(frml, cnfg->mux); 536 layer_set_up(frml, cnfg->mux);
537 537
538 if (phy_driver != NULL) { 538 if (phy_driver != NULL) {
539 phy_driver->id = phyid; 539 phy_driver->id = phyid;
540 layer_set_dn(frml, phy_driver); 540 layer_set_dn(frml, phy_driver);
541 layer_set_up(phy_driver, frml); 541 layer_set_up(phy_driver, frml);
542 layer_set_dn(phy_driver, phy_layer); 542 layer_set_dn(phy_driver, phy_layer);
543 layer_set_up(phy_layer, phy_driver); 543 layer_set_up(phy_layer, phy_driver);
544 } else { 544 } else {
545 layer_set_dn(frml, phy_layer); 545 layer_set_dn(frml, phy_layer);
546 layer_set_up(phy_layer, frml); 546 layer_set_up(phy_layer, frml);
547 } 547 }
548 548
549 list_add_rcu(&phyinfo->node, &cnfg->phys); 549 list_add_rcu(&phyinfo->node, &cnfg->phys);
550 out: 550 out:
551 mutex_unlock(&cnfg->lock); 551 mutex_unlock(&cnfg->lock);
552 } 552 }
553 EXPORT_SYMBOL(cfcnfg_add_phy_layer); 553 EXPORT_SYMBOL(cfcnfg_add_phy_layer);
554 554
555 int cfcnfg_set_phy_state(struct cfcnfg *cnfg, struct cflayer *phy_layer, 555 int cfcnfg_set_phy_state(struct cfcnfg *cnfg, struct cflayer *phy_layer,
556 bool up) 556 bool up)
557 { 557 {
558 struct cfcnfg_phyinfo *phyinfo; 558 struct cfcnfg_phyinfo *phyinfo;
559 559
560 rcu_read_lock(); 560 rcu_read_lock();
561 phyinfo = cfcnfg_get_phyinfo_rcu(cnfg, phy_layer->id); 561 phyinfo = cfcnfg_get_phyinfo_rcu(cnfg, phy_layer->id);
562 if (phyinfo == NULL) { 562 if (phyinfo == NULL) {
563 rcu_read_unlock(); 563 rcu_read_unlock();
564 return -ENODEV; 564 return -ENODEV;
565 } 565 }
566 566
567 if (phyinfo->up == up) { 567 if (phyinfo->up == up) {
568 rcu_read_unlock(); 568 rcu_read_unlock();
569 return 0; 569 return 0;
570 } 570 }
571 phyinfo->up = up; 571 phyinfo->up = up;
572 572
573 if (up) { 573 if (up) {
574 cffrml_hold(phyinfo->frm_layer); 574 cffrml_hold(phyinfo->frm_layer);
575 cfmuxl_set_dnlayer(cnfg->mux, phyinfo->frm_layer, 575 cfmuxl_set_dnlayer(cnfg->mux, phyinfo->frm_layer,
576 phy_layer->id); 576 phy_layer->id);
577 } else { 577 } else {
578 cfmuxl_remove_dnlayer(cnfg->mux, phy_layer->id); 578 cfmuxl_remove_dnlayer(cnfg->mux, phy_layer->id);
579 cffrml_put(phyinfo->frm_layer); 579 cffrml_put(phyinfo->frm_layer);
580 } 580 }
581 581
582 rcu_read_unlock(); 582 rcu_read_unlock();
583 return 0; 583 return 0;
584 } 584 }
585 EXPORT_SYMBOL(cfcnfg_set_phy_state); 585 EXPORT_SYMBOL(cfcnfg_set_phy_state);
586 586
587 int cfcnfg_del_phy_layer(struct cfcnfg *cnfg, struct cflayer *phy_layer) 587 int cfcnfg_del_phy_layer(struct cfcnfg *cnfg, struct cflayer *phy_layer)
588 { 588 {
589 struct cflayer *frml, *frml_dn; 589 struct cflayer *frml, *frml_dn;
590 u16 phyid; 590 u16 phyid;
591 struct cfcnfg_phyinfo *phyinfo; 591 struct cfcnfg_phyinfo *phyinfo;
592 592
593 might_sleep(); 593 might_sleep();
594 594
595 mutex_lock(&cnfg->lock); 595 mutex_lock(&cnfg->lock);
596 596
597 phyid = phy_layer->id; 597 phyid = phy_layer->id;
598 phyinfo = cfcnfg_get_phyinfo_rcu(cnfg, phyid); 598 phyinfo = cfcnfg_get_phyinfo_rcu(cnfg, phyid);
599 599
600 if (phyinfo == NULL) { 600 if (phyinfo == NULL) {
601 mutex_unlock(&cnfg->lock); 601 mutex_unlock(&cnfg->lock);
602 return 0; 602 return 0;
603 } 603 }
604 caif_assert(phyid == phyinfo->id); 604 caif_assert(phyid == phyinfo->id);
605 caif_assert(phy_layer == phyinfo->phy_layer); 605 caif_assert(phy_layer == phyinfo->phy_layer);
606 caif_assert(phy_layer->id == phyid); 606 caif_assert(phy_layer->id == phyid);
607 caif_assert(phyinfo->frm_layer->id == phyid); 607 caif_assert(phyinfo->frm_layer->id == phyid);
608 608
609 list_del_rcu(&phyinfo->node); 609 list_del_rcu(&phyinfo->node);
610 synchronize_rcu(); 610 synchronize_rcu();
611 611
612 /* Fail if reference count is not zero */ 612 /* Fail if reference count is not zero */
613 if (cffrml_refcnt_read(phyinfo->frm_layer) != 0) { 613 if (cffrml_refcnt_read(phyinfo->frm_layer) != 0) {
614 pr_info("Wait for device inuse\n"); 614 pr_info("Wait for device inuse\n");
615 list_add_rcu(&phyinfo->node, &cnfg->phys); 615 list_add_rcu(&phyinfo->node, &cnfg->phys);
616 mutex_unlock(&cnfg->lock); 616 mutex_unlock(&cnfg->lock);
617 return -EAGAIN; 617 return -EAGAIN;
618 } 618 }
619 619
620 frml = phyinfo->frm_layer; 620 frml = phyinfo->frm_layer;
621 frml_dn = frml->dn; 621 frml_dn = frml->dn;
622 cffrml_set_uplayer(frml, NULL); 622 cffrml_set_uplayer(frml, NULL);
623 cffrml_set_dnlayer(frml, NULL); 623 cffrml_set_dnlayer(frml, NULL);
624 if (phy_layer != frml_dn) { 624 if (phy_layer != frml_dn) {
625 layer_set_up(frml_dn, NULL); 625 layer_set_up(frml_dn, NULL);
626 layer_set_dn(frml_dn, NULL); 626 layer_set_dn(frml_dn, NULL);
627 } 627 }
628 layer_set_up(phy_layer, NULL); 628 layer_set_up(phy_layer, NULL);
629 629
630 if (phyinfo->phy_layer != frml_dn) 630 if (phyinfo->phy_layer != frml_dn)
631 kfree(frml_dn); 631 kfree(frml_dn);
632 632
633 cffrml_free(frml); 633 cffrml_free(frml);
634 kfree(phyinfo); 634 kfree(phyinfo);
635 mutex_unlock(&cnfg->lock); 635 mutex_unlock(&cnfg->lock);
636 636
637 return 0; 637 return 0;
638 } 638 }
639 EXPORT_SYMBOL(cfcnfg_del_phy_layer); 639 EXPORT_SYMBOL(cfcnfg_del_phy_layer);
640 640
1 /* 1 /*
2 * Copyright (C) ST-Ericsson AB 2010 2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com 3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2 4 * License terms: GNU General Public License (GPL) version 2
5 */ 5 */
6 6
7 #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ 7 #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
8 8
9 #include <linux/stddef.h> 9 #include <linux/stddef.h>
10 #include <linux/spinlock.h> 10 #include <linux/spinlock.h>
11 #include <linux/slab.h> 11 #include <linux/slab.h>
12 #include <net/caif/caif_layer.h> 12 #include <net/caif/caif_layer.h>
13 #include <net/caif/cfpkt.h> 13 #include <net/caif/cfpkt.h>
14 #include <net/caif/cfctrl.h> 14 #include <net/caif/cfctrl.h>
15 15
16 #define container_obj(layr) container_of(layr, struct cfctrl, serv.layer) 16 #define container_obj(layr) container_of(layr, struct cfctrl, serv.layer)
17 #define UTILITY_NAME_LENGTH 16 17 #define UTILITY_NAME_LENGTH 16
18 #define CFPKT_CTRL_PKT_LEN 20 18 #define CFPKT_CTRL_PKT_LEN 20
19 19
20
21 #ifdef CAIF_NO_LOOP 20 #ifdef CAIF_NO_LOOP
22 static int handle_loop(struct cfctrl *ctrl, 21 static int handle_loop(struct cfctrl *ctrl,
23 int cmd, struct cfpkt *pkt){ 22 int cmd, struct cfpkt *pkt){
24 return -1; 23 return -1;
25 } 24 }
26 #else 25 #else
27 static int handle_loop(struct cfctrl *ctrl, 26 static int handle_loop(struct cfctrl *ctrl,
28 int cmd, struct cfpkt *pkt); 27 int cmd, struct cfpkt *pkt);
29 #endif 28 #endif
30 static int cfctrl_recv(struct cflayer *layr, struct cfpkt *pkt); 29 static int cfctrl_recv(struct cflayer *layr, struct cfpkt *pkt);
31 static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, 30 static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
32 int phyid); 31 int phyid);
33 32
34 33
35 struct cflayer *cfctrl_create(void) 34 struct cflayer *cfctrl_create(void)
36 { 35 {
37 struct dev_info dev_info; 36 struct dev_info dev_info;
38 struct cfctrl *this = 37 struct cfctrl *this =
39 kmalloc(sizeof(struct cfctrl), GFP_ATOMIC); 38 kmalloc(sizeof(struct cfctrl), GFP_ATOMIC);
40 if (!this) { 39 if (!this) {
41 pr_warn("Out of memory\n"); 40 pr_warn("Out of memory\n");
42 return NULL; 41 return NULL;
43 } 42 }
44 caif_assert(offsetof(struct cfctrl, serv.layer) == 0); 43 caif_assert(offsetof(struct cfctrl, serv.layer) == 0);
45 memset(&dev_info, 0, sizeof(dev_info)); 44 memset(&dev_info, 0, sizeof(dev_info));
46 dev_info.id = 0xff; 45 dev_info.id = 0xff;
47 memset(this, 0, sizeof(*this)); 46 memset(this, 0, sizeof(*this));
48 cfsrvl_init(&this->serv, 0, &dev_info, false); 47 cfsrvl_init(&this->serv, 0, &dev_info, false);
49 atomic_set(&this->req_seq_no, 1); 48 atomic_set(&this->req_seq_no, 1);
50 atomic_set(&this->rsp_seq_no, 1); 49 atomic_set(&this->rsp_seq_no, 1);
51 this->serv.layer.receive = cfctrl_recv; 50 this->serv.layer.receive = cfctrl_recv;
52 sprintf(this->serv.layer.name, "ctrl"); 51 sprintf(this->serv.layer.name, "ctrl");
53 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd; 52 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
53 #ifndef CAIF_NO_LOOP
54 spin_lock_init(&this->loop_linkid_lock); 54 spin_lock_init(&this->loop_linkid_lock);
55 this->loop_linkid = 1;
56 #endif
55 spin_lock_init(&this->info_list_lock); 57 spin_lock_init(&this->info_list_lock);
56 INIT_LIST_HEAD(&this->list); 58 INIT_LIST_HEAD(&this->list);
57 this->loop_linkid = 1;
58 return &this->serv.layer; 59 return &this->serv.layer;
59 } 60 }
60 61
62 void cfctrl_remove(struct cflayer *layer)
63 {
64 struct cfctrl_request_info *p, *tmp;
65 struct cfctrl *ctrl = container_obj(layer);
66
67 spin_lock_bh(&ctrl->info_list_lock);
68 list_for_each_entry_safe(p, tmp, &ctrl->list, list) {
69 list_del(&p->list);
70 kfree(p);
71 }
72 spin_unlock_bh(&ctrl->info_list_lock);
73 kfree(layer);
74 }
75
61 static bool param_eq(const struct cfctrl_link_param *p1, 76 static bool param_eq(const struct cfctrl_link_param *p1,
62 const struct cfctrl_link_param *p2) 77 const struct cfctrl_link_param *p2)
63 { 78 {
64 bool eq = 79 bool eq =
65 p1->linktype == p2->linktype && 80 p1->linktype == p2->linktype &&
66 p1->priority == p2->priority && 81 p1->priority == p2->priority &&
67 p1->phyid == p2->phyid && 82 p1->phyid == p2->phyid &&
68 p1->endpoint == p2->endpoint && p1->chtype == p2->chtype; 83 p1->endpoint == p2->endpoint && p1->chtype == p2->chtype;
69 84
70 if (!eq) 85 if (!eq)
71 return false; 86 return false;
72 87
73 switch (p1->linktype) { 88 switch (p1->linktype) {
74 case CFCTRL_SRV_VEI: 89 case CFCTRL_SRV_VEI:
75 return true; 90 return true;
76 case CFCTRL_SRV_DATAGRAM: 91 case CFCTRL_SRV_DATAGRAM:
77 return p1->u.datagram.connid == p2->u.datagram.connid; 92 return p1->u.datagram.connid == p2->u.datagram.connid;
78 case CFCTRL_SRV_RFM: 93 case CFCTRL_SRV_RFM:
79 return 94 return
80 p1->u.rfm.connid == p2->u.rfm.connid && 95 p1->u.rfm.connid == p2->u.rfm.connid &&
81 strcmp(p1->u.rfm.volume, p2->u.rfm.volume) == 0; 96 strcmp(p1->u.rfm.volume, p2->u.rfm.volume) == 0;
82 case CFCTRL_SRV_UTIL: 97 case CFCTRL_SRV_UTIL:
83 return 98 return
84 p1->u.utility.fifosize_kb == p2->u.utility.fifosize_kb 99 p1->u.utility.fifosize_kb == p2->u.utility.fifosize_kb
85 && p1->u.utility.fifosize_bufs == 100 && p1->u.utility.fifosize_bufs ==
86 p2->u.utility.fifosize_bufs 101 p2->u.utility.fifosize_bufs
87 && strcmp(p1->u.utility.name, p2->u.utility.name) == 0 102 && strcmp(p1->u.utility.name, p2->u.utility.name) == 0
88 && p1->u.utility.paramlen == p2->u.utility.paramlen 103 && p1->u.utility.paramlen == p2->u.utility.paramlen
89 && memcmp(p1->u.utility.params, p2->u.utility.params, 104 && memcmp(p1->u.utility.params, p2->u.utility.params,
90 p1->u.utility.paramlen) == 0; 105 p1->u.utility.paramlen) == 0;
91 106
92 case CFCTRL_SRV_VIDEO: 107 case CFCTRL_SRV_VIDEO:
93 return p1->u.video.connid == p2->u.video.connid; 108 return p1->u.video.connid == p2->u.video.connid;
94 case CFCTRL_SRV_DBG: 109 case CFCTRL_SRV_DBG:
95 return true; 110 return true;
96 case CFCTRL_SRV_DECM: 111 case CFCTRL_SRV_DECM:
97 return false; 112 return false;
98 default: 113 default:
99 return false; 114 return false;
100 } 115 }
101 return false; 116 return false;
102 } 117 }
103 118
104 static bool cfctrl_req_eq(const struct cfctrl_request_info *r1, 119 static bool cfctrl_req_eq(const struct cfctrl_request_info *r1,
105 const struct cfctrl_request_info *r2) 120 const struct cfctrl_request_info *r2)
106 { 121 {
107 if (r1->cmd != r2->cmd) 122 if (r1->cmd != r2->cmd)
108 return false; 123 return false;
109 if (r1->cmd == CFCTRL_CMD_LINK_SETUP) 124 if (r1->cmd == CFCTRL_CMD_LINK_SETUP)
110 return param_eq(&r1->param, &r2->param); 125 return param_eq(&r1->param, &r2->param);
111 else 126 else
112 return r1->channel_id == r2->channel_id; 127 return r1->channel_id == r2->channel_id;
113 } 128 }
114 129
115 /* Insert request at the end */ 130 /* Insert request at the end */
116 static void cfctrl_insert_req(struct cfctrl *ctrl, 131 static void cfctrl_insert_req(struct cfctrl *ctrl,
117 struct cfctrl_request_info *req) 132 struct cfctrl_request_info *req)
118 { 133 {
119 spin_lock(&ctrl->info_list_lock); 134 spin_lock_bh(&ctrl->info_list_lock);
120 atomic_inc(&ctrl->req_seq_no); 135 atomic_inc(&ctrl->req_seq_no);
121 req->sequence_no = atomic_read(&ctrl->req_seq_no); 136 req->sequence_no = atomic_read(&ctrl->req_seq_no);
122 list_add_tail(&req->list, &ctrl->list); 137 list_add_tail(&req->list, &ctrl->list);
123 spin_unlock(&ctrl->info_list_lock); 138 spin_unlock_bh(&ctrl->info_list_lock);
124 } 139 }
125 140
126 /* Compare and remove request */ 141 /* Compare and remove request */
127 static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl, 142 static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
128 struct cfctrl_request_info *req) 143 struct cfctrl_request_info *req)
129 { 144 {
130 struct cfctrl_request_info *p, *tmp, *first; 145 struct cfctrl_request_info *p, *tmp, *first;
131 146
132 spin_lock(&ctrl->info_list_lock);
133 first = list_first_entry(&ctrl->list, struct cfctrl_request_info, list); 147 first = list_first_entry(&ctrl->list, struct cfctrl_request_info, list);
134 148
135 list_for_each_entry_safe(p, tmp, &ctrl->list, list) { 149 list_for_each_entry_safe(p, tmp, &ctrl->list, list) {
136 if (cfctrl_req_eq(req, p)) { 150 if (cfctrl_req_eq(req, p)) {
137 if (p != first) 151 if (p != first)
138 pr_warn("Requests are not received in order\n"); 152 pr_warn("Requests are not received in order\n");
139 153
140 atomic_set(&ctrl->rsp_seq_no, 154 atomic_set(&ctrl->rsp_seq_no,
141 p->sequence_no); 155 p->sequence_no);
142 list_del(&p->list); 156 list_del(&p->list);
143 goto out; 157 goto out;
144 } 158 }
145 } 159 }
146 p = NULL; 160 p = NULL;
147 out: 161 out:
148 spin_unlock(&ctrl->info_list_lock);
149 return p; 162 return p;
150 } 163 }
151 164
152 struct cfctrl_rsp *cfctrl_get_respfuncs(struct cflayer *layer) 165 struct cfctrl_rsp *cfctrl_get_respfuncs(struct cflayer *layer)
153 { 166 {
154 struct cfctrl *this = container_obj(layer); 167 struct cfctrl *this = container_obj(layer);
155 return &this->res; 168 return &this->res;
156 } 169 }
157 170
158 static void init_info(struct caif_payload_info *info, struct cfctrl *cfctrl) 171 static void init_info(struct caif_payload_info *info, struct cfctrl *cfctrl)
159 { 172 {
160 info->hdr_len = 0; 173 info->hdr_len = 0;
161 info->channel_id = cfctrl->serv.layer.id; 174 info->channel_id = cfctrl->serv.layer.id;
162 info->dev_info = &cfctrl->serv.dev_info; 175 info->dev_info = &cfctrl->serv.dev_info;
163 } 176 }
164 177
165 void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid) 178 void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid)
166 { 179 {
167 struct cfctrl *cfctrl = container_obj(layer); 180 struct cfctrl *cfctrl = container_obj(layer);
168 int ret; 181 int ret;
169 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); 182 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
170 if (!pkt) { 183 if (!pkt) {
171 pr_warn("Out of memory\n"); 184 pr_warn("Out of memory\n");
172 return; 185 return;
173 } 186 }
174 caif_assert(offsetof(struct cfctrl, serv.layer) == 0); 187 caif_assert(offsetof(struct cfctrl, serv.layer) == 0);
175 init_info(cfpkt_info(pkt), cfctrl); 188 init_info(cfpkt_info(pkt), cfctrl);
176 cfpkt_info(pkt)->dev_info->id = physlinkid; 189 cfpkt_info(pkt)->dev_info->id = physlinkid;
177 cfctrl->serv.dev_info.id = physlinkid; 190 cfctrl->serv.dev_info.id = physlinkid;
178 cfpkt_addbdy(pkt, CFCTRL_CMD_ENUM); 191 cfpkt_addbdy(pkt, CFCTRL_CMD_ENUM);
179 cfpkt_addbdy(pkt, physlinkid); 192 cfpkt_addbdy(pkt, physlinkid);
180 ret = 193 ret =
181 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); 194 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
182 if (ret < 0) {
183 pr_err("Could not transmit enum message\n");
184 cfpkt_destroy(pkt);
185 }
186 } 195 }
187 196
188 int cfctrl_linkup_request(struct cflayer *layer, 197 int cfctrl_linkup_request(struct cflayer *layer,
189 struct cfctrl_link_param *param, 198 struct cfctrl_link_param *param,
190 struct cflayer *user_layer) 199 struct cflayer *user_layer)
191 { 200 {
192 struct cfctrl *cfctrl = container_obj(layer); 201 struct cfctrl *cfctrl = container_obj(layer);
193 u32 tmp32; 202 u32 tmp32;
194 u16 tmp16; 203 u16 tmp16;
195 u8 tmp8; 204 u8 tmp8;
196 struct cfctrl_request_info *req; 205 struct cfctrl_request_info *req;
197 int ret; 206 int ret;
198 char utility_name[16]; 207 char utility_name[16];
199 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); 208 struct cfpkt *pkt;
209
210 if (cfctrl_cancel_req(layer, user_layer) > 0) {
211 /* Slight Paranoia, check if already connecting */
212 pr_err("Duplicate connect request for same client\n");
213 WARN_ON(1);
214 return -EALREADY;
215 }
216
217 pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
200 if (!pkt) { 218 if (!pkt) {
201 pr_warn("Out of memory\n"); 219 pr_warn("Out of memory\n");
202 return -ENOMEM; 220 return -ENOMEM;
203 } 221 }
204 cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_SETUP); 222 cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_SETUP);
205 cfpkt_addbdy(pkt, (param->chtype << 4) + param->linktype); 223 cfpkt_addbdy(pkt, (param->chtype << 4) | param->linktype);
206 cfpkt_addbdy(pkt, (param->priority << 3) + param->phyid); 224 cfpkt_addbdy(pkt, (param->priority << 3) | param->phyid);
207 cfpkt_addbdy(pkt, param->endpoint & 0x03); 225 cfpkt_addbdy(pkt, param->endpoint & 0x03);
208 226
209 switch (param->linktype) { 227 switch (param->linktype) {
210 case CFCTRL_SRV_VEI: 228 case CFCTRL_SRV_VEI:
211 break; 229 break;
212 case CFCTRL_SRV_VIDEO: 230 case CFCTRL_SRV_VIDEO:
213 cfpkt_addbdy(pkt, (u8) param->u.video.connid); 231 cfpkt_addbdy(pkt, (u8) param->u.video.connid);
214 break; 232 break;
215 case CFCTRL_SRV_DBG: 233 case CFCTRL_SRV_DBG:
216 break; 234 break;
217 case CFCTRL_SRV_DATAGRAM: 235 case CFCTRL_SRV_DATAGRAM:
218 tmp32 = cpu_to_le32(param->u.datagram.connid); 236 tmp32 = cpu_to_le32(param->u.datagram.connid);
219 cfpkt_add_body(pkt, &tmp32, 4); 237 cfpkt_add_body(pkt, &tmp32, 4);
220 break; 238 break;
221 case CFCTRL_SRV_RFM: 239 case CFCTRL_SRV_RFM:
222 /* Construct a frame, convert DatagramConnectionID to network 240 /* Construct a frame, convert DatagramConnectionID to network
223 * format long and copy it out... 241 * format long and copy it out...
224 */ 242 */
225 tmp32 = cpu_to_le32(param->u.rfm.connid); 243 tmp32 = cpu_to_le32(param->u.rfm.connid);
226 cfpkt_add_body(pkt, &tmp32, 4); 244 cfpkt_add_body(pkt, &tmp32, 4);
227 /* Add volume name, including zero termination... */ 245 /* Add volume name, including zero termination... */
228 cfpkt_add_body(pkt, param->u.rfm.volume, 246 cfpkt_add_body(pkt, param->u.rfm.volume,
229 strlen(param->u.rfm.volume) + 1); 247 strlen(param->u.rfm.volume) + 1);
230 break; 248 break;
231 case CFCTRL_SRV_UTIL: 249 case CFCTRL_SRV_UTIL:
232 tmp16 = cpu_to_le16(param->u.utility.fifosize_kb); 250 tmp16 = cpu_to_le16(param->u.utility.fifosize_kb);
233 cfpkt_add_body(pkt, &tmp16, 2); 251 cfpkt_add_body(pkt, &tmp16, 2);
234 tmp16 = cpu_to_le16(param->u.utility.fifosize_bufs); 252 tmp16 = cpu_to_le16(param->u.utility.fifosize_bufs);
235 cfpkt_add_body(pkt, &tmp16, 2); 253 cfpkt_add_body(pkt, &tmp16, 2);
236 memset(utility_name, 0, sizeof(utility_name)); 254 memset(utility_name, 0, sizeof(utility_name));
237 strncpy(utility_name, param->u.utility.name, 255 strncpy(utility_name, param->u.utility.name,
238 UTILITY_NAME_LENGTH - 1); 256 UTILITY_NAME_LENGTH - 1);
239 cfpkt_add_body(pkt, utility_name, UTILITY_NAME_LENGTH); 257 cfpkt_add_body(pkt, utility_name, UTILITY_NAME_LENGTH);
240 tmp8 = param->u.utility.paramlen; 258 tmp8 = param->u.utility.paramlen;
241 cfpkt_add_body(pkt, &tmp8, 1); 259 cfpkt_add_body(pkt, &tmp8, 1);
242 cfpkt_add_body(pkt, param->u.utility.params, 260 cfpkt_add_body(pkt, param->u.utility.params,
243 param->u.utility.paramlen); 261 param->u.utility.paramlen);
244 break; 262 break;
245 default: 263 default:
246 pr_warn("Request setup of bad link type = %d\n", 264 pr_warn("Request setup of bad link type = %d\n",
247 param->linktype); 265 param->linktype);
248 return -EINVAL; 266 return -EINVAL;
249 } 267 }
250 req = kzalloc(sizeof(*req), GFP_KERNEL); 268 req = kzalloc(sizeof(*req), GFP_KERNEL);
251 if (!req) { 269 if (!req) {
252 pr_warn("Out of memory\n"); 270 pr_warn("Out of memory\n");
253 return -ENOMEM; 271 return -ENOMEM;
254 } 272 }
255 req->client_layer = user_layer; 273 req->client_layer = user_layer;
256 req->cmd = CFCTRL_CMD_LINK_SETUP; 274 req->cmd = CFCTRL_CMD_LINK_SETUP;
257 req->param = *param; 275 req->param = *param;
258 cfctrl_insert_req(cfctrl, req); 276 cfctrl_insert_req(cfctrl, req);
259 init_info(cfpkt_info(pkt), cfctrl); 277 init_info(cfpkt_info(pkt), cfctrl);
260 /* 278 /*
261 * NOTE:Always send linkup and linkdown request on the same 279 * NOTE:Always send linkup and linkdown request on the same
262 * device as the payload. Otherwise old queued up payload 280 * device as the payload. Otherwise old queued up payload
263 * might arrive with the newly allocated channel ID. 281 * might arrive with the newly allocated channel ID.
264 */ 282 */
265 cfpkt_info(pkt)->dev_info->id = param->phyid; 283 cfpkt_info(pkt)->dev_info->id = param->phyid;
266 ret = 284 ret =
267 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); 285 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
268 if (ret < 0) { 286 if (ret < 0) {
269 pr_err("Could not transmit linksetup request\n"); 287 int count;
270 cfpkt_destroy(pkt); 288
271 return -ENODEV; 289 count = cfctrl_cancel_req(&cfctrl->serv.layer,
290 user_layer);
291 if (count != 1)
292 pr_err("Could not remove request (%d)", count);
293 return -ENODEV;
272 } 294 }
273 return 0; 295 return 0;
274 } 296 }
275 297
276 int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid, 298 int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid,
277 struct cflayer *client) 299 struct cflayer *client)
278 { 300 {
279 int ret; 301 int ret;
280 struct cfctrl *cfctrl = container_obj(layer); 302 struct cfctrl *cfctrl = container_obj(layer);
281 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); 303 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
282 if (!pkt) { 304 if (!pkt) {
283 pr_warn("Out of memory\n"); 305 pr_warn("Out of memory\n");
284 return -ENOMEM; 306 return -ENOMEM;
285 } 307 }
286 cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY); 308 cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY);
287 cfpkt_addbdy(pkt, channelid); 309 cfpkt_addbdy(pkt, channelid);
288 init_info(cfpkt_info(pkt), cfctrl); 310 init_info(cfpkt_info(pkt), cfctrl);
289 ret = 311 ret =
290 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); 312 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
291 if (ret < 0) { 313 #ifndef CAIF_NO_LOOP
292 pr_err("Could not transmit link-down request\n"); 314 cfctrl->loop_linkused[channelid] = 0;
293 cfpkt_destroy(pkt); 315 #endif
294 }
295 return ret; 316 return ret;
296 } 317 }
297 318
298 void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer) 319 int cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer)
299 { 320 {
300 struct cfctrl_request_info *p, *tmp; 321 struct cfctrl_request_info *p, *tmp;
301 struct cfctrl *ctrl = container_obj(layr); 322 struct cfctrl *ctrl = container_obj(layr);
302 spin_lock(&ctrl->info_list_lock); 323 int found = 0;
324 spin_lock_bh(&ctrl->info_list_lock);
303 325
304 list_for_each_entry_safe(p, tmp, &ctrl->list, list) { 326 list_for_each_entry_safe(p, tmp, &ctrl->list, list) {
305 if (p->client_layer == adap_layer) { 327 if (p->client_layer == adap_layer) {
306 pr_debug("cancel req :%d\n", p->sequence_no);
307 list_del(&p->list); 328 list_del(&p->list);
308 kfree(p); 329 kfree(p);
330 found++;
309 } 331 }
310 } 332 }
311 333
312 spin_unlock(&ctrl->info_list_lock); 334 spin_unlock_bh(&ctrl->info_list_lock);
335 return found;
313 } 336 }
314 337
315 static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt) 338 static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
316 { 339 {
317 u8 cmdrsp; 340 u8 cmdrsp;
318 u8 cmd; 341 u8 cmd;
319 int ret = -1; 342 int ret = -1;
320 u16 tmp16; 343 u16 tmp16;
321 u8 len; 344 u8 len;
322 u8 param[255]; 345 u8 param[255];
323 u8 linkid; 346 u8 linkid;
324 struct cfctrl *cfctrl = container_obj(layer); 347 struct cfctrl *cfctrl = container_obj(layer);
325 struct cfctrl_request_info rsp, *req; 348 struct cfctrl_request_info rsp, *req;
326 349
327 350
328 cfpkt_extr_head(pkt, &cmdrsp, 1); 351 cfpkt_extr_head(pkt, &cmdrsp, 1);
329 cmd = cmdrsp & CFCTRL_CMD_MASK; 352 cmd = cmdrsp & CFCTRL_CMD_MASK;
330 if (cmd != CFCTRL_CMD_LINK_ERR 353 if (cmd != CFCTRL_CMD_LINK_ERR
331 && CFCTRL_RSP_BIT != (CFCTRL_RSP_BIT & cmdrsp)) { 354 && CFCTRL_RSP_BIT != (CFCTRL_RSP_BIT & cmdrsp)) {
332 if (handle_loop(cfctrl, cmd, pkt) != 0) 355 if (handle_loop(cfctrl, cmd, pkt) != 0)
333 cmdrsp |= CFCTRL_ERR_BIT; 356 cmdrsp |= CFCTRL_ERR_BIT;
334 } 357 }
335 358
336 switch (cmd) { 359 switch (cmd) {
337 case CFCTRL_CMD_LINK_SETUP: 360 case CFCTRL_CMD_LINK_SETUP:
338 { 361 {
339 enum cfctrl_srv serv; 362 enum cfctrl_srv serv;
340 enum cfctrl_srv servtype; 363 enum cfctrl_srv servtype;
341 u8 endpoint; 364 u8 endpoint;
342 u8 physlinkid; 365 u8 physlinkid;
343 u8 prio; 366 u8 prio;
344 u8 tmp; 367 u8 tmp;
345 u32 tmp32; 368 u32 tmp32;
346 u8 *cp; 369 u8 *cp;
347 int i; 370 int i;
348 struct cfctrl_link_param linkparam; 371 struct cfctrl_link_param linkparam;
349 memset(&linkparam, 0, sizeof(linkparam)); 372 memset(&linkparam, 0, sizeof(linkparam));
350 373
351 cfpkt_extr_head(pkt, &tmp, 1); 374 cfpkt_extr_head(pkt, &tmp, 1);
352 375
353 serv = tmp & CFCTRL_SRV_MASK; 376 serv = tmp & CFCTRL_SRV_MASK;
354 linkparam.linktype = serv; 377 linkparam.linktype = serv;
355 378
356 servtype = tmp >> 4; 379 servtype = tmp >> 4;
357 linkparam.chtype = servtype; 380 linkparam.chtype = servtype;
358 381
359 cfpkt_extr_head(pkt, &tmp, 1); 382 cfpkt_extr_head(pkt, &tmp, 1);
360 physlinkid = tmp & 0x07; 383 physlinkid = tmp & 0x07;
361 prio = tmp >> 3; 384 prio = tmp >> 3;
362 385
363 linkparam.priority = prio; 386 linkparam.priority = prio;
364 linkparam.phyid = physlinkid; 387 linkparam.phyid = physlinkid;
365 cfpkt_extr_head(pkt, &endpoint, 1); 388 cfpkt_extr_head(pkt, &endpoint, 1);
366 linkparam.endpoint = endpoint & 0x03; 389 linkparam.endpoint = endpoint & 0x03;
367 390
368 switch (serv) { 391 switch (serv) {
369 case CFCTRL_SRV_VEI: 392 case CFCTRL_SRV_VEI:
370 case CFCTRL_SRV_DBG: 393 case CFCTRL_SRV_DBG:
371 if (CFCTRL_ERR_BIT & cmdrsp) 394 if (CFCTRL_ERR_BIT & cmdrsp)
372 break; 395 break;
373 /* Link ID */ 396 /* Link ID */
374 cfpkt_extr_head(pkt, &linkid, 1); 397 cfpkt_extr_head(pkt, &linkid, 1);
375 break; 398 break;
376 case CFCTRL_SRV_VIDEO: 399 case CFCTRL_SRV_VIDEO:
377 cfpkt_extr_head(pkt, &tmp, 1); 400 cfpkt_extr_head(pkt, &tmp, 1);
378 linkparam.u.video.connid = tmp; 401 linkparam.u.video.connid = tmp;
379 if (CFCTRL_ERR_BIT & cmdrsp) 402 if (CFCTRL_ERR_BIT & cmdrsp)
380 break; 403 break;
381 /* Link ID */ 404 /* Link ID */
382 cfpkt_extr_head(pkt, &linkid, 1); 405 cfpkt_extr_head(pkt, &linkid, 1);
383 break; 406 break;
384 407
385 case CFCTRL_SRV_DATAGRAM: 408 case CFCTRL_SRV_DATAGRAM:
386 cfpkt_extr_head(pkt, &tmp32, 4); 409 cfpkt_extr_head(pkt, &tmp32, 4);
387 linkparam.u.datagram.connid = 410 linkparam.u.datagram.connid =
388 le32_to_cpu(tmp32); 411 le32_to_cpu(tmp32);
389 if (CFCTRL_ERR_BIT & cmdrsp) 412 if (CFCTRL_ERR_BIT & cmdrsp)
390 break; 413 break;
391 /* Link ID */ 414 /* Link ID */
392 cfpkt_extr_head(pkt, &linkid, 1); 415 cfpkt_extr_head(pkt, &linkid, 1);
393 break; 416 break;
394 case CFCTRL_SRV_RFM: 417 case CFCTRL_SRV_RFM:
395 /* Construct a frame, convert 418 /* Construct a frame, convert
396 * DatagramConnectionID 419 * DatagramConnectionID
397 * to network format long and copy it out... 420 * to network format long and copy it out...
398 */ 421 */
399 cfpkt_extr_head(pkt, &tmp32, 4); 422 cfpkt_extr_head(pkt, &tmp32, 4);
400 linkparam.u.rfm.connid = 423 linkparam.u.rfm.connid =
401 le32_to_cpu(tmp32); 424 le32_to_cpu(tmp32);
402 cp = (u8 *) linkparam.u.rfm.volume; 425 cp = (u8 *) linkparam.u.rfm.volume;
403 for (cfpkt_extr_head(pkt, &tmp, 1); 426 for (cfpkt_extr_head(pkt, &tmp, 1);
404 cfpkt_more(pkt) && tmp != '\0'; 427 cfpkt_more(pkt) && tmp != '\0';
405 cfpkt_extr_head(pkt, &tmp, 1)) 428 cfpkt_extr_head(pkt, &tmp, 1))
406 *cp++ = tmp; 429 *cp++ = tmp;
407 *cp = '\0'; 430 *cp = '\0';
408 431
409 if (CFCTRL_ERR_BIT & cmdrsp) 432 if (CFCTRL_ERR_BIT & cmdrsp)
410 break; 433 break;
411 /* Link ID */ 434 /* Link ID */
412 cfpkt_extr_head(pkt, &linkid, 1); 435 cfpkt_extr_head(pkt, &linkid, 1);
413 436
414 break; 437 break;
415 case CFCTRL_SRV_UTIL: 438 case CFCTRL_SRV_UTIL:
416 /* Construct a frame, convert 439 /* Construct a frame, convert
417 * DatagramConnectionID 440 * DatagramConnectionID
418 * to network format long and copy it out... 441 * to network format long and copy it out...
419 */ 442 */
420 /* Fifosize KB */ 443 /* Fifosize KB */
421 cfpkt_extr_head(pkt, &tmp16, 2); 444 cfpkt_extr_head(pkt, &tmp16, 2);
422 linkparam.u.utility.fifosize_kb = 445 linkparam.u.utility.fifosize_kb =
423 le16_to_cpu(tmp16); 446 le16_to_cpu(tmp16);
424 /* Fifosize bufs */ 447 /* Fifosize bufs */
425 cfpkt_extr_head(pkt, &tmp16, 2); 448 cfpkt_extr_head(pkt, &tmp16, 2);
426 linkparam.u.utility.fifosize_bufs = 449 linkparam.u.utility.fifosize_bufs =
427 le16_to_cpu(tmp16); 450 le16_to_cpu(tmp16);
428 /* name */ 451 /* name */
429 cp = (u8 *) linkparam.u.utility.name; 452 cp = (u8 *) linkparam.u.utility.name;
430 caif_assert(sizeof(linkparam.u.utility.name) 453 caif_assert(sizeof(linkparam.u.utility.name)
431 >= UTILITY_NAME_LENGTH); 454 >= UTILITY_NAME_LENGTH);
432 for (i = 0; 455 for (i = 0;
433 i < UTILITY_NAME_LENGTH 456 i < UTILITY_NAME_LENGTH
434 && cfpkt_more(pkt); i++) { 457 && cfpkt_more(pkt); i++) {
435 cfpkt_extr_head(pkt, &tmp, 1); 458 cfpkt_extr_head(pkt, &tmp, 1);
436 *cp++ = tmp; 459 *cp++ = tmp;
437 } 460 }
438 /* Length */ 461 /* Length */
439 cfpkt_extr_head(pkt, &len, 1); 462 cfpkt_extr_head(pkt, &len, 1);
440 linkparam.u.utility.paramlen = len; 463 linkparam.u.utility.paramlen = len;
441 /* Param Data */ 464 /* Param Data */
442 cp = linkparam.u.utility.params; 465 cp = linkparam.u.utility.params;
443 while (cfpkt_more(pkt) && len--) { 466 while (cfpkt_more(pkt) && len--) {
444 cfpkt_extr_head(pkt, &tmp, 1); 467 cfpkt_extr_head(pkt, &tmp, 1);
445 *cp++ = tmp; 468 *cp++ = tmp;
446 } 469 }
447 if (CFCTRL_ERR_BIT & cmdrsp) 470 if (CFCTRL_ERR_BIT & cmdrsp)
448 break; 471 break;
449 /* Link ID */ 472 /* Link ID */
450 cfpkt_extr_head(pkt, &linkid, 1); 473 cfpkt_extr_head(pkt, &linkid, 1);
451 /* Length */ 474 /* Length */
452 cfpkt_extr_head(pkt, &len, 1); 475 cfpkt_extr_head(pkt, &len, 1);
453 /* Param Data */ 476 /* Param Data */
454 cfpkt_extr_head(pkt, &param, len); 477 cfpkt_extr_head(pkt, &param, len);
455 break; 478 break;
456 default: 479 default:
457 pr_warn("Request setup - invalid link type (%d)\n", 480 pr_warn("Request setup - invalid link type (%d)\n",
458 serv); 481 serv);
459 goto error; 482 goto error;
460 } 483 }
461 484
462 rsp.cmd = cmd; 485 rsp.cmd = cmd;
463 rsp.param = linkparam; 486 rsp.param = linkparam;
487 spin_lock_bh(&cfctrl->info_list_lock);
464 req = cfctrl_remove_req(cfctrl, &rsp); 488 req = cfctrl_remove_req(cfctrl, &rsp);
465 489
466 if (CFCTRL_ERR_BIT == (CFCTRL_ERR_BIT & cmdrsp) || 490 if (CFCTRL_ERR_BIT == (CFCTRL_ERR_BIT & cmdrsp) ||
467 cfpkt_erroneous(pkt)) { 491 cfpkt_erroneous(pkt)) {
468 pr_err("Invalid O/E bit or parse error on CAIF control channel\n"); 492 pr_err("Invalid O/E bit or parse error on CAIF control channel\n");
469 cfctrl->res.reject_rsp(cfctrl->serv.layer.up, 493 cfctrl->res.reject_rsp(cfctrl->serv.layer.up,
470 0, 494 0,
471 req ? req->client_layer 495 req ? req->client_layer
472 : NULL); 496 : NULL);
473 } else { 497 } else {
474 cfctrl->res.linksetup_rsp(cfctrl->serv. 498 cfctrl->res.linksetup_rsp(cfctrl->serv.
475 layer.up, linkid, 499 layer.up, linkid,
476 serv, physlinkid, 500 serv, physlinkid,
477 req ? req-> 501 req ? req->
478 client_layer : NULL); 502 client_layer : NULL);
479 } 503 }
480 504
481 if (req != NULL) 505 if (req != NULL)
482 kfree(req); 506 kfree(req);
507
508 spin_unlock_bh(&cfctrl->info_list_lock);
483 } 509 }
484 break; 510 break;
485 case CFCTRL_CMD_LINK_DESTROY: 511 case CFCTRL_CMD_LINK_DESTROY:
486 cfpkt_extr_head(pkt, &linkid, 1); 512 cfpkt_extr_head(pkt, &linkid, 1);
487 cfctrl->res.linkdestroy_rsp(cfctrl->serv.layer.up, linkid); 513 cfctrl->res.linkdestroy_rsp(cfctrl->serv.layer.up, linkid);
488 break; 514 break;
489 case CFCTRL_CMD_LINK_ERR: 515 case CFCTRL_CMD_LINK_ERR:
490 pr_err("Frame Error Indication received\n"); 516 pr_err("Frame Error Indication received\n");
491 cfctrl->res.linkerror_ind(); 517 cfctrl->res.linkerror_ind();
492 break; 518 break;
493 case CFCTRL_CMD_ENUM: 519 case CFCTRL_CMD_ENUM:
494 cfctrl->res.enum_rsp(); 520 cfctrl->res.enum_rsp();
495 break; 521 break;
496 case CFCTRL_CMD_SLEEP: 522 case CFCTRL_CMD_SLEEP:
497 cfctrl->res.sleep_rsp(); 523 cfctrl->res.sleep_rsp();
498 break; 524 break;
499 case CFCTRL_CMD_WAKE: 525 case CFCTRL_CMD_WAKE:
500 cfctrl->res.wake_rsp(); 526 cfctrl->res.wake_rsp();
501 break; 527 break;
502 case CFCTRL_CMD_LINK_RECONF: 528 case CFCTRL_CMD_LINK_RECONF:
503 cfctrl->res.restart_rsp(); 529 cfctrl->res.restart_rsp();
504 break; 530 break;
505 case CFCTRL_CMD_RADIO_SET: 531 case CFCTRL_CMD_RADIO_SET:
506 cfctrl->res.radioset_rsp(); 532 cfctrl->res.radioset_rsp();
507 break; 533 break;
508 default: 534 default:
509 pr_err("Unrecognized Control Frame\n"); 535 pr_err("Unrecognized Control Frame\n");
510 goto error; 536 goto error;
511 break; 537 break;
512 } 538 }
513 ret = 0; 539 ret = 0;
514 error: 540 error:
515 cfpkt_destroy(pkt); 541 cfpkt_destroy(pkt);
516 return ret; 542 return ret;
517 } 543 }
518 544
519 static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, 545 static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
520 int phyid) 546 int phyid)
521 { 547 {
522 struct cfctrl *this = container_obj(layr); 548 struct cfctrl *this = container_obj(layr);
523 switch (ctrl) { 549 switch (ctrl) {
524 case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND: 550 case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND:
525 case CAIF_CTRLCMD_FLOW_OFF_IND: 551 case CAIF_CTRLCMD_FLOW_OFF_IND:
526 spin_lock(&this->info_list_lock); 552 spin_lock_bh(&this->info_list_lock);
527 if (!list_empty(&this->list)) { 553 if (!list_empty(&this->list)) {
528 pr_debug("Received flow off in control layer\n"); 554 pr_debug("Received flow off in control layer\n");
529 } 555 }
530 spin_unlock(&this->info_list_lock); 556 spin_unlock_bh(&this->info_list_lock);
531 break; 557 break;
558 case _CAIF_CTRLCMD_PHYIF_DOWN_IND: {
559 struct cfctrl_request_info *p, *tmp;
560
561 /* Find all connect request and report failure */
562 spin_lock_bh(&this->info_list_lock);
563 list_for_each_entry_safe(p, tmp, &this->list, list) {
564 if (p->param.phyid == phyid) {
565 list_del(&p->list);
566 p->client_layer->ctrlcmd(p->client_layer,
567 CAIF_CTRLCMD_INIT_FAIL_RSP,
568 phyid);
569 kfree(p);
570 }
571 }
572 spin_unlock_bh(&this->info_list_lock);
573 break;
574 }
532 default: 575 default:
533 break; 576 break;
534 } 577 }
535 } 578 }
536 579
537 #ifndef CAIF_NO_LOOP 580 #ifndef CAIF_NO_LOOP
538 static int handle_loop(struct cfctrl *ctrl, int cmd, struct cfpkt *pkt) 581 static int handle_loop(struct cfctrl *ctrl, int cmd, struct cfpkt *pkt)
539 { 582 {
540 static int last_linkid; 583 static int last_linkid;
584 static int dec;
541 u8 linkid, linktype, tmp; 585 u8 linkid, linktype, tmp;
542 switch (cmd) { 586 switch (cmd) {
543 case CFCTRL_CMD_LINK_SETUP: 587 case CFCTRL_CMD_LINK_SETUP:
544 spin_lock(&ctrl->loop_linkid_lock); 588 spin_lock_bh(&ctrl->loop_linkid_lock);
545 for (linkid = last_linkid + 1; linkid < 255; linkid++) 589 if (!dec) {
546 if (!ctrl->loop_linkused[linkid]) 590 for (linkid = last_linkid + 1; linkid < 255; linkid++)
547 goto found; 591 if (!ctrl->loop_linkused[linkid])
592 goto found;
593 }
594 dec = 1;
548 for (linkid = last_linkid - 1; linkid > 0; linkid--) 595 for (linkid = last_linkid - 1; linkid > 0; linkid--)
549 if (!ctrl->loop_linkused[linkid]) 596 if (!ctrl->loop_linkused[linkid])
550 goto found; 597 goto found;
551 spin_unlock(&ctrl->loop_linkid_lock); 598 spin_unlock_bh(&ctrl->loop_linkid_lock);
552 pr_err("Out of link-ids\n"); 599
553 return -EINVAL;
554 found: 600 found:
601 if (linkid < 10)
602 dec = 0;
603
555 if (!ctrl->loop_linkused[linkid]) 604 if (!ctrl->loop_linkused[linkid])
556 ctrl->loop_linkused[linkid] = 1; 605 ctrl->loop_linkused[linkid] = 1;
557 606
558 last_linkid = linkid; 607 last_linkid = linkid;
559 608
560 cfpkt_add_trail(pkt, &linkid, 1); 609 cfpkt_add_trail(pkt, &linkid, 1);
561 spin_unlock(&ctrl->loop_linkid_lock); 610 spin_unlock_bh(&ctrl->loop_linkid_lock);
562 cfpkt_peek_head(pkt, &linktype, 1); 611 cfpkt_peek_head(pkt, &linktype, 1);
563 if (linktype == CFCTRL_SRV_UTIL) { 612 if (linktype == CFCTRL_SRV_UTIL) {
564 tmp = 0x01; 613 tmp = 0x01;
565 cfpkt_add_trail(pkt, &tmp, 1); 614 cfpkt_add_trail(pkt, &tmp, 1);
566 cfpkt_add_trail(pkt, &tmp, 1); 615 cfpkt_add_trail(pkt, &tmp, 1);
567 } 616 }
568 break; 617 break;
569 618
570 case CFCTRL_CMD_LINK_DESTROY: 619 case CFCTRL_CMD_LINK_DESTROY:
571 spin_lock(&ctrl->loop_linkid_lock); 620 spin_lock_bh(&ctrl->loop_linkid_lock);
1 /* 1 /*
2 * CAIF Framing Layer. 2 * CAIF Framing Layer.
3 * 3 *
4 * Copyright (C) ST-Ericsson AB 2010 4 * Copyright (C) ST-Ericsson AB 2010
5 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com 5 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
6 * License terms: GNU General Public License (GPL) version 2 6 * License terms: GNU General Public License (GPL) version 2
7 */ 7 */
8 8
9 #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ 9 #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
10 10
11 #include <linux/stddef.h> 11 #include <linux/stddef.h>
12 #include <linux/spinlock.h> 12 #include <linux/spinlock.h>
13 #include <linux/slab.h> 13 #include <linux/slab.h>
14 #include <linux/crc-ccitt.h> 14 #include <linux/crc-ccitt.h>
15 #include <linux/netdevice.h> 15 #include <linux/netdevice.h>
16 #include <net/caif/caif_layer.h> 16 #include <net/caif/caif_layer.h>
17 #include <net/caif/cfpkt.h> 17 #include <net/caif/cfpkt.h>
18 #include <net/caif/cffrml.h> 18 #include <net/caif/cffrml.h>
19 19
20 #define container_obj(layr) container_of(layr, struct cffrml, layer) 20 #define container_obj(layr) container_of(layr, struct cffrml, layer)
21 21
22 struct cffrml { 22 struct cffrml {
23 struct cflayer layer; 23 struct cflayer layer;
24 bool dofcs; /* !< FCS active */ 24 bool dofcs; /* !< FCS active */
25 int __percpu *pcpu_refcnt; 25 int __percpu *pcpu_refcnt;
26 }; 26 };
27 27
28 static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt); 28 static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt);
29 static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt); 29 static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt);
30 static void cffrml_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, 30 static void cffrml_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
31 int phyid); 31 int phyid);
32 32
33 static u32 cffrml_rcv_error; 33 static u32 cffrml_rcv_error;
34 static u32 cffrml_rcv_checsum_error; 34 static u32 cffrml_rcv_checsum_error;
35 struct cflayer *cffrml_create(u16 phyid, bool use_fcs) 35 struct cflayer *cffrml_create(u16 phyid, bool use_fcs)
36
37 { 36 {
38 struct cffrml *this = kmalloc(sizeof(struct cffrml), GFP_ATOMIC); 37 struct cffrml *this = kmalloc(sizeof(struct cffrml), GFP_ATOMIC);
39 if (!this) { 38 if (!this) {
40 pr_warn("Out of memory\n"); 39 pr_warn("Out of memory\n");
41 return NULL; 40 return NULL;
42 } 41 }
43 this->pcpu_refcnt = alloc_percpu(int); 42 this->pcpu_refcnt = alloc_percpu(int);
44 if (this->pcpu_refcnt == NULL) { 43 if (this->pcpu_refcnt == NULL) {
45 kfree(this); 44 kfree(this);
46 return NULL; 45 return NULL;
47 } 46 }
48 47
49 caif_assert(offsetof(struct cffrml, layer) == 0); 48 caif_assert(offsetof(struct cffrml, layer) == 0);
50 49
51 memset(this, 0, sizeof(struct cflayer)); 50 memset(this, 0, sizeof(struct cflayer));
52 this->layer.receive = cffrml_receive; 51 this->layer.receive = cffrml_receive;
53 this->layer.transmit = cffrml_transmit; 52 this->layer.transmit = cffrml_transmit;
54 this->layer.ctrlcmd = cffrml_ctrlcmd; 53 this->layer.ctrlcmd = cffrml_ctrlcmd;
55 snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "frm%d", phyid); 54 snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "frm%d", phyid);
56 this->dofcs = use_fcs; 55 this->dofcs = use_fcs;
57 this->layer.id = phyid; 56 this->layer.id = phyid;
58 return (struct cflayer *) this; 57 return (struct cflayer *) this;
59 } 58 }
60 59
61 void cffrml_free(struct cflayer *layer) 60 void cffrml_free(struct cflayer *layer)
62 { 61 {
63 struct cffrml *this = container_obj(layer); 62 struct cffrml *this = container_obj(layer);
64 free_percpu(this->pcpu_refcnt); 63 free_percpu(this->pcpu_refcnt);
65 kfree(layer); 64 kfree(layer);
66 } 65 }
67 66
68 void cffrml_set_uplayer(struct cflayer *this, struct cflayer *up) 67 void cffrml_set_uplayer(struct cflayer *this, struct cflayer *up)
69 { 68 {
70 this->up = up; 69 this->up = up;
71 } 70 }
72 71
73 void cffrml_set_dnlayer(struct cflayer *this, struct cflayer *dn) 72 void cffrml_set_dnlayer(struct cflayer *this, struct cflayer *dn)
74 { 73 {
75 this->dn = dn; 74 this->dn = dn;
76 } 75 }
77 76
78 static u16 cffrml_checksum(u16 chks, void *buf, u16 len) 77 static u16 cffrml_checksum(u16 chks, void *buf, u16 len)
79 { 78 {
80 /* FIXME: FCS should be moved to glue in order to use OS-Specific 79 /* FIXME: FCS should be moved to glue in order to use OS-Specific
81 * solutions 80 * solutions
82 */ 81 */
83 return crc_ccitt(chks, buf, len); 82 return crc_ccitt(chks, buf, len);
84 } 83 }
85 84
86 static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt) 85 static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt)
87 { 86 {
88 u16 tmp; 87 u16 tmp;
89 u16 len; 88 u16 len;
90 u16 hdrchks; 89 u16 hdrchks;
91 u16 pktchks; 90 u16 pktchks;
92 struct cffrml *this; 91 struct cffrml *this;
93 this = container_obj(layr); 92 this = container_obj(layr);
94 93
95 cfpkt_extr_head(pkt, &tmp, 2); 94 cfpkt_extr_head(pkt, &tmp, 2);
96 len = le16_to_cpu(tmp); 95 len = le16_to_cpu(tmp);
97 96
98 /* Subtract for FCS on length if FCS is not used. */ 97 /* Subtract for FCS on length if FCS is not used. */
99 if (!this->dofcs) 98 if (!this->dofcs)
100 len -= 2; 99 len -= 2;
101 100
102 if (cfpkt_setlen(pkt, len) < 0) { 101 if (cfpkt_setlen(pkt, len) < 0) {
103 ++cffrml_rcv_error; 102 ++cffrml_rcv_error;
104 pr_err("Framing length error (%d)\n", len); 103 pr_err("Framing length error (%d)\n", len);
105 cfpkt_destroy(pkt); 104 cfpkt_destroy(pkt);
106 return -EPROTO; 105 return -EPROTO;
107 } 106 }
108 /* 107 /*
109 * Don't do extract if FCS is false, rather do setlen - then we don't 108 * Don't do extract if FCS is false, rather do setlen - then we don't
110 * get a cache-miss. 109 * get a cache-miss.
111 */ 110 */
112 if (this->dofcs) { 111 if (this->dofcs) {
113 cfpkt_extr_trail(pkt, &tmp, 2); 112 cfpkt_extr_trail(pkt, &tmp, 2);
114 hdrchks = le16_to_cpu(tmp); 113 hdrchks = le16_to_cpu(tmp);
115 pktchks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff); 114 pktchks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff);
116 if (pktchks != hdrchks) { 115 if (pktchks != hdrchks) {
117 cfpkt_add_trail(pkt, &tmp, 2); 116 cfpkt_add_trail(pkt, &tmp, 2);
118 ++cffrml_rcv_error; 117 ++cffrml_rcv_error;
119 ++cffrml_rcv_checsum_error; 118 ++cffrml_rcv_checsum_error;
120 pr_info("Frame checksum error (0x%x != 0x%x)\n", 119 pr_info("Frame checksum error (0x%x != 0x%x)\n",
121 hdrchks, pktchks); 120 hdrchks, pktchks);
122 return -EILSEQ; 121 return -EILSEQ;
123 } 122 }
124 } 123 }
125 if (cfpkt_erroneous(pkt)) { 124 if (cfpkt_erroneous(pkt)) {
126 ++cffrml_rcv_error; 125 ++cffrml_rcv_error;
127 pr_err("Packet is erroneous!\n"); 126 pr_err("Packet is erroneous!\n");
128 cfpkt_destroy(pkt); 127 cfpkt_destroy(pkt);
129 return -EPROTO; 128 return -EPROTO;
130 } 129 }
130
131 if (layr->up == NULL) {
132 pr_err("Layr up is missing!\n");
133 cfpkt_destroy(pkt);
134 return -EINVAL;
135 }
136
131 return layr->up->receive(layr->up, pkt); 137 return layr->up->receive(layr->up, pkt);
132 } 138 }
133 139
134 static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt) 140 static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt)
135 { 141 {
136 int tmp; 142 int tmp;
137 u16 chks; 143 u16 chks;
138 u16 len; 144 u16 len;
139 struct cffrml *this = container_obj(layr); 145 struct cffrml *this = container_obj(layr);
140 if (this->dofcs) { 146 if (this->dofcs) {
141 chks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff); 147 chks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff);
142 tmp = cpu_to_le16(chks); 148 tmp = cpu_to_le16(chks);
143 cfpkt_add_trail(pkt, &tmp, 2); 149 cfpkt_add_trail(pkt, &tmp, 2);
144 } else { 150 } else {
145 cfpkt_pad_trail(pkt, 2); 151 cfpkt_pad_trail(pkt, 2);
146 } 152 }
147 len = cfpkt_getlen(pkt); 153 len = cfpkt_getlen(pkt);
148 tmp = cpu_to_le16(len); 154 tmp = cpu_to_le16(len);
149 cfpkt_add_head(pkt, &tmp, 2); 155 cfpkt_add_head(pkt, &tmp, 2);
150 cfpkt_info(pkt)->hdr_len += 2; 156 cfpkt_info(pkt)->hdr_len += 2;
151 if (cfpkt_erroneous(pkt)) { 157 if (cfpkt_erroneous(pkt)) {
152 pr_err("Packet is erroneous!\n"); 158 pr_err("Packet is erroneous!\n");
159 cfpkt_destroy(pkt);
153 return -EPROTO; 160 return -EPROTO;
154 } 161 }
162
163 if (layr->dn == NULL) {
164 cfpkt_destroy(pkt);
165 return -ENODEV;
166
167 }
155 return layr->dn->transmit(layr->dn, pkt); 168 return layr->dn->transmit(layr->dn, pkt);
156 } 169 }
157 170
158 static void cffrml_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, 171 static void cffrml_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
159 int phyid) 172 int phyid)
160 { 173 {
161 if (layr->up->ctrlcmd) 174 if (layr->up && layr->up->ctrlcmd)
162 layr->up->ctrlcmd(layr->up, ctrl, layr->id); 175 layr->up->ctrlcmd(layr->up, ctrl, layr->id);
163 } 176 }
164 177
165 void cffrml_put(struct cflayer *layr) 178 void cffrml_put(struct cflayer *layr)
166 { 179 {
167 struct cffrml *this = container_obj(layr); 180 struct cffrml *this = container_obj(layr);
168 if (layr != NULL && this->pcpu_refcnt != NULL) 181 if (layr != NULL && this->pcpu_refcnt != NULL)
169 irqsafe_cpu_dec(*this->pcpu_refcnt); 182 irqsafe_cpu_dec(*this->pcpu_refcnt);
170 } 183 }
171 184
172 void cffrml_hold(struct cflayer *layr) 185 void cffrml_hold(struct cflayer *layr)
173 { 186 {
174 struct cffrml *this = container_obj(layr); 187 struct cffrml *this = container_obj(layr);
175 if (layr != NULL && this->pcpu_refcnt != NULL) 188 if (layr != NULL && this->pcpu_refcnt != NULL)
176 irqsafe_cpu_inc(*this->pcpu_refcnt); 189 irqsafe_cpu_inc(*this->pcpu_refcnt);
177 } 190 }
178 191
179 int cffrml_refcnt_read(struct cflayer *layr) 192 int cffrml_refcnt_read(struct cflayer *layr)
180 { 193 {
181 int i, refcnt = 0; 194 int i, refcnt = 0;
182 struct cffrml *this = container_obj(layr); 195 struct cffrml *this = container_obj(layr);
183 for_each_possible_cpu(i) 196 for_each_possible_cpu(i)
184 refcnt += *per_cpu_ptr(this->pcpu_refcnt, i); 197 refcnt += *per_cpu_ptr(this->pcpu_refcnt, i);
185 return refcnt; 198 return refcnt;
186 } 199 }
1 /* 1 /*
2 * Copyright (C) ST-Ericsson AB 2010 2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com 3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2 4 * License terms: GNU General Public License (GPL) version 2
5 */ 5 */
6 6
7 #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ 7 #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
8 8
9 #include <linux/stddef.h> 9 #include <linux/stddef.h>
10 #include <linux/slab.h> 10 #include <linux/slab.h>
11 #include <net/caif/caif_layer.h> 11 #include <net/caif/caif_layer.h>
12 #include <net/caif/cfsrvl.h> 12 #include <net/caif/cfsrvl.h>
13 #include <net/caif/cfpkt.h> 13 #include <net/caif/cfpkt.h>
14 14
15 #define VEI_PAYLOAD 0x00 15 #define VEI_PAYLOAD 0x00
16 #define VEI_CMD_BIT 0x80 16 #define VEI_CMD_BIT 0x80
17 #define VEI_FLOW_OFF 0x81 17 #define VEI_FLOW_OFF 0x81
18 #define VEI_FLOW_ON 0x80 18 #define VEI_FLOW_ON 0x80
19 #define VEI_SET_PIN 0x82 19 #define VEI_SET_PIN 0x82
20 20
21 #define container_obj(layr) container_of(layr, struct cfsrvl, layer) 21 #define container_obj(layr) container_of(layr, struct cfsrvl, layer)
22 22
23 static int cfvei_receive(struct cflayer *layr, struct cfpkt *pkt); 23 static int cfvei_receive(struct cflayer *layr, struct cfpkt *pkt);
24 static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt); 24 static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt);
25 25
26 struct cflayer *cfvei_create(u8 channel_id, struct dev_info *dev_info) 26 struct cflayer *cfvei_create(u8 channel_id, struct dev_info *dev_info)
27 { 27 {
28 struct cfsrvl *vei = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); 28 struct cfsrvl *vei = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
29 if (!vei) { 29 if (!vei) {
30 pr_warn("Out of memory\n"); 30 pr_warn("Out of memory\n");
31 return NULL; 31 return NULL;
32 } 32 }
33 caif_assert(offsetof(struct cfsrvl, layer) == 0); 33 caif_assert(offsetof(struct cfsrvl, layer) == 0);
34 memset(vei, 0, sizeof(struct cfsrvl)); 34 memset(vei, 0, sizeof(struct cfsrvl));
35 cfsrvl_init(vei, channel_id, dev_info, true); 35 cfsrvl_init(vei, channel_id, dev_info, true);
36 vei->layer.receive = cfvei_receive; 36 vei->layer.receive = cfvei_receive;
37 vei->layer.transmit = cfvei_transmit; 37 vei->layer.transmit = cfvei_transmit;
38 snprintf(vei->layer.name, CAIF_LAYER_NAME_SZ - 1, "vei%d", channel_id); 38 snprintf(vei->layer.name, CAIF_LAYER_NAME_SZ - 1, "vei%d", channel_id);
39 return &vei->layer; 39 return &vei->layer;
40 } 40 }
41 41
42 static int cfvei_receive(struct cflayer *layr, struct cfpkt *pkt) 42 static int cfvei_receive(struct cflayer *layr, struct cfpkt *pkt)
43 { 43 {
44 u8 cmd; 44 u8 cmd;
45 int ret; 45 int ret;
46 caif_assert(layr->up != NULL); 46 caif_assert(layr->up != NULL);
47 caif_assert(layr->receive != NULL); 47 caif_assert(layr->receive != NULL);
48 caif_assert(layr->ctrlcmd != NULL); 48 caif_assert(layr->ctrlcmd != NULL);
49 49
50 50
51 if (cfpkt_extr_head(pkt, &cmd, 1) < 0) { 51 if (cfpkt_extr_head(pkt, &cmd, 1) < 0) {
52 pr_err("Packet is erroneous!\n"); 52 pr_err("Packet is erroneous!\n");
53 cfpkt_destroy(pkt); 53 cfpkt_destroy(pkt);
54 return -EPROTO; 54 return -EPROTO;
55 } 55 }
56 switch (cmd) { 56 switch (cmd) {
57 case VEI_PAYLOAD: 57 case VEI_PAYLOAD:
58 ret = layr->up->receive(layr->up, pkt); 58 ret = layr->up->receive(layr->up, pkt);
59 return ret; 59 return ret;
60 case VEI_FLOW_OFF: 60 case VEI_FLOW_OFF:
61 layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_OFF_IND, 0); 61 layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_OFF_IND, 0);
62 cfpkt_destroy(pkt); 62 cfpkt_destroy(pkt);
63 return 0; 63 return 0;
64 case VEI_FLOW_ON: 64 case VEI_FLOW_ON:
65 layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_ON_IND, 0); 65 layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_ON_IND, 0);
66 cfpkt_destroy(pkt); 66 cfpkt_destroy(pkt);
67 return 0; 67 return 0;
68 case VEI_SET_PIN: /* SET RS232 PIN */ 68 case VEI_SET_PIN: /* SET RS232 PIN */
69 cfpkt_destroy(pkt); 69 cfpkt_destroy(pkt);
70 return 0; 70 return 0;
71 default: /* SET RS232 PIN */ 71 default: /* SET RS232 PIN */
72 pr_warn("Unknown VEI control packet %d (0x%x)!\n", cmd, cmd); 72 pr_warn("Unknown VEI control packet %d (0x%x)!\n", cmd, cmd);
73 cfpkt_destroy(pkt); 73 cfpkt_destroy(pkt);
74 return -EPROTO; 74 return -EPROTO;
75 } 75 }
76 } 76 }
77 77
78 static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt) 78 static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt)
79 { 79 {
80 u8 tmp = 0; 80 u8 tmp = 0;
81 struct caif_payload_info *info; 81 struct caif_payload_info *info;
82 int ret; 82 int ret;
83 struct cfsrvl *service = container_obj(layr); 83 struct cfsrvl *service = container_obj(layr);
84 if (!cfsrvl_ready(service, &ret)) 84 if (!cfsrvl_ready(service, &ret))
85 return ret; 85 goto err;
86 caif_assert(layr->dn != NULL); 86 caif_assert(layr->dn != NULL);
87 caif_assert(layr->dn->transmit != NULL); 87 caif_assert(layr->dn->transmit != NULL);
88 88
89 if (cfpkt_add_head(pkt, &tmp, 1) < 0) { 89 if (cfpkt_add_head(pkt, &tmp, 1) < 0) {
90 pr_err("Packet is erroneous!\n"); 90 pr_err("Packet is erroneous!\n");
91 return -EPROTO; 91 ret = -EPROTO;
92 goto err;
92 } 93 }
93 94
94 /* Add info-> for MUX-layer to route the packet out. */ 95 /* Add info-> for MUX-layer to route the packet out. */
95 info = cfpkt_info(pkt); 96 info = cfpkt_info(pkt);
96 info->channel_id = service->layer.id; 97 info->channel_id = service->layer.id;
97 info->hdr_len = 1; 98 info->hdr_len = 1;
98 info->dev_info = &service->dev_info; 99 info->dev_info = &service->dev_info;
99 return layr->dn->transmit(layr->dn, pkt); 100 return layr->dn->transmit(layr->dn, pkt);
101 err:
102 cfpkt_destroy(pkt);
103 return ret;
100 } 104 }
101 105