Commit 943da25d95c7e8fd8c39dbf09e030f5da46f5d85

Authored by Marcel Holtmann
1 parent b914a250e7

Bluetooth: Add controller types for BR/EDR and 802.11 AMP

With the Bluetooth 3.0 specification and the introduction of alternate
MAC/PHY (AMP) support, it is required to differentiate between primary
BR/EDR controllers and 802.11 AMP controllers. So introduce a special
type inside HCI device for differentiation.

For now all AMP controllers will be treated as raw devices until an
AMP manager has been implemented.

Signed-off-by: Marcel Holtmann <marcel@holtmann.org>

Showing 4 changed files with 30 additions and 1 deletions Inline Diff

include/net/bluetooth/hci.h
1 /* 1 /*
2 BlueZ - Bluetooth protocol stack for Linux 2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated 3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 6
7 This program is free software; you can redistribute it and/or modify 7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as 8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation; 9 published by the Free Software Foundation;
10 10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED. 22 SOFTWARE IS DISCLAIMED.
23 */ 23 */
24 24
25 #ifndef __HCI_H 25 #ifndef __HCI_H
26 #define __HCI_H 26 #define __HCI_H
27 27
28 #define HCI_MAX_ACL_SIZE 1024 28 #define HCI_MAX_ACL_SIZE 1024
29 #define HCI_MAX_SCO_SIZE 255 29 #define HCI_MAX_SCO_SIZE 255
30 #define HCI_MAX_EVENT_SIZE 260 30 #define HCI_MAX_EVENT_SIZE 260
31 #define HCI_MAX_FRAME_SIZE (HCI_MAX_ACL_SIZE + 4) 31 #define HCI_MAX_FRAME_SIZE (HCI_MAX_ACL_SIZE + 4)
32 32
33 /* HCI dev events */ 33 /* HCI dev events */
34 #define HCI_DEV_REG 1 34 #define HCI_DEV_REG 1
35 #define HCI_DEV_UNREG 2 35 #define HCI_DEV_UNREG 2
36 #define HCI_DEV_UP 3 36 #define HCI_DEV_UP 3
37 #define HCI_DEV_DOWN 4 37 #define HCI_DEV_DOWN 4
38 #define HCI_DEV_SUSPEND 5 38 #define HCI_DEV_SUSPEND 5
39 #define HCI_DEV_RESUME 6 39 #define HCI_DEV_RESUME 6
40 40
41 /* HCI notify events */ 41 /* HCI notify events */
42 #define HCI_NOTIFY_CONN_ADD 1 42 #define HCI_NOTIFY_CONN_ADD 1
43 #define HCI_NOTIFY_CONN_DEL 2 43 #define HCI_NOTIFY_CONN_DEL 2
44 #define HCI_NOTIFY_VOICE_SETTING 3 44 #define HCI_NOTIFY_VOICE_SETTING 3
45 45
46 /* HCI bus types */ 46 /* HCI bus types */
47 #define HCI_VIRTUAL 0 47 #define HCI_VIRTUAL 0
48 #define HCI_USB 1 48 #define HCI_USB 1
49 #define HCI_PCCARD 2 49 #define HCI_PCCARD 2
50 #define HCI_UART 3 50 #define HCI_UART 3
51 #define HCI_RS232 4 51 #define HCI_RS232 4
52 #define HCI_PCI 5 52 #define HCI_PCI 5
53 #define HCI_SDIO 6 53 #define HCI_SDIO 6
54 54
55 /* HCI controller types */
56 #define HCI_BREDR 0x00
57 #define HCI_80211 0x01
58
55 /* HCI device quirks */ 59 /* HCI device quirks */
56 enum { 60 enum {
57 HCI_QUIRK_NO_RESET, 61 HCI_QUIRK_NO_RESET,
58 HCI_QUIRK_RAW_DEVICE, 62 HCI_QUIRK_RAW_DEVICE,
59 HCI_QUIRK_FIXUP_BUFFER_SIZE 63 HCI_QUIRK_FIXUP_BUFFER_SIZE
60 }; 64 };
61 65
62 /* HCI device flags */ 66 /* HCI device flags */
63 enum { 67 enum {
64 HCI_UP, 68 HCI_UP,
65 HCI_INIT, 69 HCI_INIT,
66 HCI_RUNNING, 70 HCI_RUNNING,
67 71
68 HCI_PSCAN, 72 HCI_PSCAN,
69 HCI_ISCAN, 73 HCI_ISCAN,
70 HCI_AUTH, 74 HCI_AUTH,
71 HCI_ENCRYPT, 75 HCI_ENCRYPT,
72 HCI_INQUIRY, 76 HCI_INQUIRY,
73 77
74 HCI_RAW, 78 HCI_RAW,
75 }; 79 };
76 80
77 /* HCI ioctl defines */ 81 /* HCI ioctl defines */
78 #define HCIDEVUP _IOW('H', 201, int) 82 #define HCIDEVUP _IOW('H', 201, int)
79 #define HCIDEVDOWN _IOW('H', 202, int) 83 #define HCIDEVDOWN _IOW('H', 202, int)
80 #define HCIDEVRESET _IOW('H', 203, int) 84 #define HCIDEVRESET _IOW('H', 203, int)
81 #define HCIDEVRESTAT _IOW('H', 204, int) 85 #define HCIDEVRESTAT _IOW('H', 204, int)
82 86
83 #define HCIGETDEVLIST _IOR('H', 210, int) 87 #define HCIGETDEVLIST _IOR('H', 210, int)
84 #define HCIGETDEVINFO _IOR('H', 211, int) 88 #define HCIGETDEVINFO _IOR('H', 211, int)
85 #define HCIGETCONNLIST _IOR('H', 212, int) 89 #define HCIGETCONNLIST _IOR('H', 212, int)
86 #define HCIGETCONNINFO _IOR('H', 213, int) 90 #define HCIGETCONNINFO _IOR('H', 213, int)
87 #define HCIGETAUTHINFO _IOR('H', 215, int) 91 #define HCIGETAUTHINFO _IOR('H', 215, int)
88 92
89 #define HCISETRAW _IOW('H', 220, int) 93 #define HCISETRAW _IOW('H', 220, int)
90 #define HCISETSCAN _IOW('H', 221, int) 94 #define HCISETSCAN _IOW('H', 221, int)
91 #define HCISETAUTH _IOW('H', 222, int) 95 #define HCISETAUTH _IOW('H', 222, int)
92 #define HCISETENCRYPT _IOW('H', 223, int) 96 #define HCISETENCRYPT _IOW('H', 223, int)
93 #define HCISETPTYPE _IOW('H', 224, int) 97 #define HCISETPTYPE _IOW('H', 224, int)
94 #define HCISETLINKPOL _IOW('H', 225, int) 98 #define HCISETLINKPOL _IOW('H', 225, int)
95 #define HCISETLINKMODE _IOW('H', 226, int) 99 #define HCISETLINKMODE _IOW('H', 226, int)
96 #define HCISETACLMTU _IOW('H', 227, int) 100 #define HCISETACLMTU _IOW('H', 227, int)
97 #define HCISETSCOMTU _IOW('H', 228, int) 101 #define HCISETSCOMTU _IOW('H', 228, int)
98 102
99 #define HCIINQUIRY _IOR('H', 240, int) 103 #define HCIINQUIRY _IOR('H', 240, int)
100 104
101 /* HCI timeouts */ 105 /* HCI timeouts */
102 #define HCI_CONNECT_TIMEOUT (40000) /* 40 seconds */ 106 #define HCI_CONNECT_TIMEOUT (40000) /* 40 seconds */
103 #define HCI_DISCONN_TIMEOUT (2000) /* 2 seconds */ 107 #define HCI_DISCONN_TIMEOUT (2000) /* 2 seconds */
104 #define HCI_PAIRING_TIMEOUT (60000) /* 60 seconds */ 108 #define HCI_PAIRING_TIMEOUT (60000) /* 60 seconds */
105 #define HCI_IDLE_TIMEOUT (6000) /* 6 seconds */ 109 #define HCI_IDLE_TIMEOUT (6000) /* 6 seconds */
106 #define HCI_INIT_TIMEOUT (10000) /* 10 seconds */ 110 #define HCI_INIT_TIMEOUT (10000) /* 10 seconds */
107 111
108 /* HCI data types */ 112 /* HCI data types */
109 #define HCI_COMMAND_PKT 0x01 113 #define HCI_COMMAND_PKT 0x01
110 #define HCI_ACLDATA_PKT 0x02 114 #define HCI_ACLDATA_PKT 0x02
111 #define HCI_SCODATA_PKT 0x03 115 #define HCI_SCODATA_PKT 0x03
112 #define HCI_EVENT_PKT 0x04 116 #define HCI_EVENT_PKT 0x04
113 #define HCI_VENDOR_PKT 0xff 117 #define HCI_VENDOR_PKT 0xff
114 118
115 /* HCI packet types */ 119 /* HCI packet types */
116 #define HCI_DM1 0x0008 120 #define HCI_DM1 0x0008
117 #define HCI_DM3 0x0400 121 #define HCI_DM3 0x0400
118 #define HCI_DM5 0x4000 122 #define HCI_DM5 0x4000
119 #define HCI_DH1 0x0010 123 #define HCI_DH1 0x0010
120 #define HCI_DH3 0x0800 124 #define HCI_DH3 0x0800
121 #define HCI_DH5 0x8000 125 #define HCI_DH5 0x8000
122 126
123 #define HCI_HV1 0x0020 127 #define HCI_HV1 0x0020
124 #define HCI_HV2 0x0040 128 #define HCI_HV2 0x0040
125 #define HCI_HV3 0x0080 129 #define HCI_HV3 0x0080
126 130
127 #define SCO_PTYPE_MASK (HCI_HV1 | HCI_HV2 | HCI_HV3) 131 #define SCO_PTYPE_MASK (HCI_HV1 | HCI_HV2 | HCI_HV3)
128 #define ACL_PTYPE_MASK (~SCO_PTYPE_MASK) 132 #define ACL_PTYPE_MASK (~SCO_PTYPE_MASK)
129 133
130 /* eSCO packet types */ 134 /* eSCO packet types */
131 #define ESCO_HV1 0x0001 135 #define ESCO_HV1 0x0001
132 #define ESCO_HV2 0x0002 136 #define ESCO_HV2 0x0002
133 #define ESCO_HV3 0x0004 137 #define ESCO_HV3 0x0004
134 #define ESCO_EV3 0x0008 138 #define ESCO_EV3 0x0008
135 #define ESCO_EV4 0x0010 139 #define ESCO_EV4 0x0010
136 #define ESCO_EV5 0x0020 140 #define ESCO_EV5 0x0020
137 #define ESCO_2EV3 0x0040 141 #define ESCO_2EV3 0x0040
138 #define ESCO_3EV3 0x0080 142 #define ESCO_3EV3 0x0080
139 #define ESCO_2EV5 0x0100 143 #define ESCO_2EV5 0x0100
140 #define ESCO_3EV5 0x0200 144 #define ESCO_3EV5 0x0200
141 145
142 #define SCO_ESCO_MASK (ESCO_HV1 | ESCO_HV2 | ESCO_HV3) 146 #define SCO_ESCO_MASK (ESCO_HV1 | ESCO_HV2 | ESCO_HV3)
143 #define EDR_ESCO_MASK (ESCO_2EV3 | ESCO_3EV3 | ESCO_2EV5 | ESCO_3EV5) 147 #define EDR_ESCO_MASK (ESCO_2EV3 | ESCO_3EV3 | ESCO_2EV5 | ESCO_3EV5)
144 148
145 /* ACL flags */ 149 /* ACL flags */
146 #define ACL_CONT 0x01 150 #define ACL_CONT 0x01
147 #define ACL_START 0x02 151 #define ACL_START 0x02
148 #define ACL_ACTIVE_BCAST 0x04 152 #define ACL_ACTIVE_BCAST 0x04
149 #define ACL_PICO_BCAST 0x08 153 #define ACL_PICO_BCAST 0x08
150 154
151 /* Baseband links */ 155 /* Baseband links */
152 #define SCO_LINK 0x00 156 #define SCO_LINK 0x00
153 #define ACL_LINK 0x01 157 #define ACL_LINK 0x01
154 #define ESCO_LINK 0x02 158 #define ESCO_LINK 0x02
155 159
156 /* LMP features */ 160 /* LMP features */
157 #define LMP_3SLOT 0x01 161 #define LMP_3SLOT 0x01
158 #define LMP_5SLOT 0x02 162 #define LMP_5SLOT 0x02
159 #define LMP_ENCRYPT 0x04 163 #define LMP_ENCRYPT 0x04
160 #define LMP_SOFFSET 0x08 164 #define LMP_SOFFSET 0x08
161 #define LMP_TACCURACY 0x10 165 #define LMP_TACCURACY 0x10
162 #define LMP_RSWITCH 0x20 166 #define LMP_RSWITCH 0x20
163 #define LMP_HOLD 0x40 167 #define LMP_HOLD 0x40
164 #define LMP_SNIFF 0x80 168 #define LMP_SNIFF 0x80
165 169
166 #define LMP_PARK 0x01 170 #define LMP_PARK 0x01
167 #define LMP_RSSI 0x02 171 #define LMP_RSSI 0x02
168 #define LMP_QUALITY 0x04 172 #define LMP_QUALITY 0x04
169 #define LMP_SCO 0x08 173 #define LMP_SCO 0x08
170 #define LMP_HV2 0x10 174 #define LMP_HV2 0x10
171 #define LMP_HV3 0x20 175 #define LMP_HV3 0x20
172 #define LMP_ULAW 0x40 176 #define LMP_ULAW 0x40
173 #define LMP_ALAW 0x80 177 #define LMP_ALAW 0x80
174 178
175 #define LMP_CVSD 0x01 179 #define LMP_CVSD 0x01
176 #define LMP_PSCHEME 0x02 180 #define LMP_PSCHEME 0x02
177 #define LMP_PCONTROL 0x04 181 #define LMP_PCONTROL 0x04
178 182
179 #define LMP_ESCO 0x80 183 #define LMP_ESCO 0x80
180 184
181 #define LMP_EV4 0x01 185 #define LMP_EV4 0x01
182 #define LMP_EV5 0x02 186 #define LMP_EV5 0x02
183 187
184 #define LMP_SNIFF_SUBR 0x02 188 #define LMP_SNIFF_SUBR 0x02
185 #define LMP_EDR_ESCO_2M 0x20 189 #define LMP_EDR_ESCO_2M 0x20
186 #define LMP_EDR_ESCO_3M 0x40 190 #define LMP_EDR_ESCO_3M 0x40
187 #define LMP_EDR_3S_ESCO 0x80 191 #define LMP_EDR_3S_ESCO 0x80
188 192
189 #define LMP_SIMPLE_PAIR 0x08 193 #define LMP_SIMPLE_PAIR 0x08
190 194
191 /* Connection modes */ 195 /* Connection modes */
192 #define HCI_CM_ACTIVE 0x0000 196 #define HCI_CM_ACTIVE 0x0000
193 #define HCI_CM_HOLD 0x0001 197 #define HCI_CM_HOLD 0x0001
194 #define HCI_CM_SNIFF 0x0002 198 #define HCI_CM_SNIFF 0x0002
195 #define HCI_CM_PARK 0x0003 199 #define HCI_CM_PARK 0x0003
196 200
197 /* Link policies */ 201 /* Link policies */
198 #define HCI_LP_RSWITCH 0x0001 202 #define HCI_LP_RSWITCH 0x0001
199 #define HCI_LP_HOLD 0x0002 203 #define HCI_LP_HOLD 0x0002
200 #define HCI_LP_SNIFF 0x0004 204 #define HCI_LP_SNIFF 0x0004
201 #define HCI_LP_PARK 0x0008 205 #define HCI_LP_PARK 0x0008
202 206
203 /* Link modes */ 207 /* Link modes */
204 #define HCI_LM_ACCEPT 0x8000 208 #define HCI_LM_ACCEPT 0x8000
205 #define HCI_LM_MASTER 0x0001 209 #define HCI_LM_MASTER 0x0001
206 #define HCI_LM_AUTH 0x0002 210 #define HCI_LM_AUTH 0x0002
207 #define HCI_LM_ENCRYPT 0x0004 211 #define HCI_LM_ENCRYPT 0x0004
208 #define HCI_LM_TRUSTED 0x0008 212 #define HCI_LM_TRUSTED 0x0008
209 #define HCI_LM_RELIABLE 0x0010 213 #define HCI_LM_RELIABLE 0x0010
210 #define HCI_LM_SECURE 0x0020 214 #define HCI_LM_SECURE 0x0020
211 215
212 /* Authentication types */ 216 /* Authentication types */
213 #define HCI_AT_NO_BONDING 0x00 217 #define HCI_AT_NO_BONDING 0x00
214 #define HCI_AT_NO_BONDING_MITM 0x01 218 #define HCI_AT_NO_BONDING_MITM 0x01
215 #define HCI_AT_DEDICATED_BONDING 0x02 219 #define HCI_AT_DEDICATED_BONDING 0x02
216 #define HCI_AT_DEDICATED_BONDING_MITM 0x03 220 #define HCI_AT_DEDICATED_BONDING_MITM 0x03
217 #define HCI_AT_GENERAL_BONDING 0x04 221 #define HCI_AT_GENERAL_BONDING 0x04
218 #define HCI_AT_GENERAL_BONDING_MITM 0x05 222 #define HCI_AT_GENERAL_BONDING_MITM 0x05
219 223
220 /* ----- HCI Commands ---- */ 224 /* ----- HCI Commands ---- */
221 #define HCI_OP_INQUIRY 0x0401 225 #define HCI_OP_INQUIRY 0x0401
222 struct hci_cp_inquiry { 226 struct hci_cp_inquiry {
223 __u8 lap[3]; 227 __u8 lap[3];
224 __u8 length; 228 __u8 length;
225 __u8 num_rsp; 229 __u8 num_rsp;
226 } __attribute__ ((packed)); 230 } __attribute__ ((packed));
227 231
228 #define HCI_OP_INQUIRY_CANCEL 0x0402 232 #define HCI_OP_INQUIRY_CANCEL 0x0402
229 233
230 #define HCI_OP_EXIT_PERIODIC_INQ 0x0404 234 #define HCI_OP_EXIT_PERIODIC_INQ 0x0404
231 235
232 #define HCI_OP_CREATE_CONN 0x0405 236 #define HCI_OP_CREATE_CONN 0x0405
233 struct hci_cp_create_conn { 237 struct hci_cp_create_conn {
234 bdaddr_t bdaddr; 238 bdaddr_t bdaddr;
235 __le16 pkt_type; 239 __le16 pkt_type;
236 __u8 pscan_rep_mode; 240 __u8 pscan_rep_mode;
237 __u8 pscan_mode; 241 __u8 pscan_mode;
238 __le16 clock_offset; 242 __le16 clock_offset;
239 __u8 role_switch; 243 __u8 role_switch;
240 } __attribute__ ((packed)); 244 } __attribute__ ((packed));
241 245
242 #define HCI_OP_DISCONNECT 0x0406 246 #define HCI_OP_DISCONNECT 0x0406
243 struct hci_cp_disconnect { 247 struct hci_cp_disconnect {
244 __le16 handle; 248 __le16 handle;
245 __u8 reason; 249 __u8 reason;
246 } __attribute__ ((packed)); 250 } __attribute__ ((packed));
247 251
248 #define HCI_OP_ADD_SCO 0x0407 252 #define HCI_OP_ADD_SCO 0x0407
249 struct hci_cp_add_sco { 253 struct hci_cp_add_sco {
250 __le16 handle; 254 __le16 handle;
251 __le16 pkt_type; 255 __le16 pkt_type;
252 } __attribute__ ((packed)); 256 } __attribute__ ((packed));
253 257
254 #define HCI_OP_CREATE_CONN_CANCEL 0x0408 258 #define HCI_OP_CREATE_CONN_CANCEL 0x0408
255 struct hci_cp_create_conn_cancel { 259 struct hci_cp_create_conn_cancel {
256 bdaddr_t bdaddr; 260 bdaddr_t bdaddr;
257 } __attribute__ ((packed)); 261 } __attribute__ ((packed));
258 262
259 #define HCI_OP_ACCEPT_CONN_REQ 0x0409 263 #define HCI_OP_ACCEPT_CONN_REQ 0x0409
260 struct hci_cp_accept_conn_req { 264 struct hci_cp_accept_conn_req {
261 bdaddr_t bdaddr; 265 bdaddr_t bdaddr;
262 __u8 role; 266 __u8 role;
263 } __attribute__ ((packed)); 267 } __attribute__ ((packed));
264 268
265 #define HCI_OP_REJECT_CONN_REQ 0x040a 269 #define HCI_OP_REJECT_CONN_REQ 0x040a
266 struct hci_cp_reject_conn_req { 270 struct hci_cp_reject_conn_req {
267 bdaddr_t bdaddr; 271 bdaddr_t bdaddr;
268 __u8 reason; 272 __u8 reason;
269 } __attribute__ ((packed)); 273 } __attribute__ ((packed));
270 274
271 #define HCI_OP_LINK_KEY_REPLY 0x040b 275 #define HCI_OP_LINK_KEY_REPLY 0x040b
272 struct hci_cp_link_key_reply { 276 struct hci_cp_link_key_reply {
273 bdaddr_t bdaddr; 277 bdaddr_t bdaddr;
274 __u8 link_key[16]; 278 __u8 link_key[16];
275 } __attribute__ ((packed)); 279 } __attribute__ ((packed));
276 280
277 #define HCI_OP_LINK_KEY_NEG_REPLY 0x040c 281 #define HCI_OP_LINK_KEY_NEG_REPLY 0x040c
278 struct hci_cp_link_key_neg_reply { 282 struct hci_cp_link_key_neg_reply {
279 bdaddr_t bdaddr; 283 bdaddr_t bdaddr;
280 } __attribute__ ((packed)); 284 } __attribute__ ((packed));
281 285
282 #define HCI_OP_PIN_CODE_REPLY 0x040d 286 #define HCI_OP_PIN_CODE_REPLY 0x040d
283 struct hci_cp_pin_code_reply { 287 struct hci_cp_pin_code_reply {
284 bdaddr_t bdaddr; 288 bdaddr_t bdaddr;
285 __u8 pin_len; 289 __u8 pin_len;
286 __u8 pin_code[16]; 290 __u8 pin_code[16];
287 } __attribute__ ((packed)); 291 } __attribute__ ((packed));
288 292
289 #define HCI_OP_PIN_CODE_NEG_REPLY 0x040e 293 #define HCI_OP_PIN_CODE_NEG_REPLY 0x040e
290 struct hci_cp_pin_code_neg_reply { 294 struct hci_cp_pin_code_neg_reply {
291 bdaddr_t bdaddr; 295 bdaddr_t bdaddr;
292 } __attribute__ ((packed)); 296 } __attribute__ ((packed));
293 297
294 #define HCI_OP_CHANGE_CONN_PTYPE 0x040f 298 #define HCI_OP_CHANGE_CONN_PTYPE 0x040f
295 struct hci_cp_change_conn_ptype { 299 struct hci_cp_change_conn_ptype {
296 __le16 handle; 300 __le16 handle;
297 __le16 pkt_type; 301 __le16 pkt_type;
298 } __attribute__ ((packed)); 302 } __attribute__ ((packed));
299 303
300 #define HCI_OP_AUTH_REQUESTED 0x0411 304 #define HCI_OP_AUTH_REQUESTED 0x0411
301 struct hci_cp_auth_requested { 305 struct hci_cp_auth_requested {
302 __le16 handle; 306 __le16 handle;
303 } __attribute__ ((packed)); 307 } __attribute__ ((packed));
304 308
305 #define HCI_OP_SET_CONN_ENCRYPT 0x0413 309 #define HCI_OP_SET_CONN_ENCRYPT 0x0413
306 struct hci_cp_set_conn_encrypt { 310 struct hci_cp_set_conn_encrypt {
307 __le16 handle; 311 __le16 handle;
308 __u8 encrypt; 312 __u8 encrypt;
309 } __attribute__ ((packed)); 313 } __attribute__ ((packed));
310 314
311 #define HCI_OP_CHANGE_CONN_LINK_KEY 0x0415 315 #define HCI_OP_CHANGE_CONN_LINK_KEY 0x0415
312 struct hci_cp_change_conn_link_key { 316 struct hci_cp_change_conn_link_key {
313 __le16 handle; 317 __le16 handle;
314 } __attribute__ ((packed)); 318 } __attribute__ ((packed));
315 319
316 #define HCI_OP_REMOTE_NAME_REQ 0x0419 320 #define HCI_OP_REMOTE_NAME_REQ 0x0419
317 struct hci_cp_remote_name_req { 321 struct hci_cp_remote_name_req {
318 bdaddr_t bdaddr; 322 bdaddr_t bdaddr;
319 __u8 pscan_rep_mode; 323 __u8 pscan_rep_mode;
320 __u8 pscan_mode; 324 __u8 pscan_mode;
321 __le16 clock_offset; 325 __le16 clock_offset;
322 } __attribute__ ((packed)); 326 } __attribute__ ((packed));
323 327
324 #define HCI_OP_REMOTE_NAME_REQ_CANCEL 0x041a 328 #define HCI_OP_REMOTE_NAME_REQ_CANCEL 0x041a
325 struct hci_cp_remote_name_req_cancel { 329 struct hci_cp_remote_name_req_cancel {
326 bdaddr_t bdaddr; 330 bdaddr_t bdaddr;
327 } __attribute__ ((packed)); 331 } __attribute__ ((packed));
328 332
329 #define HCI_OP_READ_REMOTE_FEATURES 0x041b 333 #define HCI_OP_READ_REMOTE_FEATURES 0x041b
330 struct hci_cp_read_remote_features { 334 struct hci_cp_read_remote_features {
331 __le16 handle; 335 __le16 handle;
332 } __attribute__ ((packed)); 336 } __attribute__ ((packed));
333 337
334 #define HCI_OP_READ_REMOTE_EXT_FEATURES 0x041c 338 #define HCI_OP_READ_REMOTE_EXT_FEATURES 0x041c
335 struct hci_cp_read_remote_ext_features { 339 struct hci_cp_read_remote_ext_features {
336 __le16 handle; 340 __le16 handle;
337 __u8 page; 341 __u8 page;
338 } __attribute__ ((packed)); 342 } __attribute__ ((packed));
339 343
340 #define HCI_OP_READ_REMOTE_VERSION 0x041d 344 #define HCI_OP_READ_REMOTE_VERSION 0x041d
341 struct hci_cp_read_remote_version { 345 struct hci_cp_read_remote_version {
342 __le16 handle; 346 __le16 handle;
343 } __attribute__ ((packed)); 347 } __attribute__ ((packed));
344 348
345 #define HCI_OP_SETUP_SYNC_CONN 0x0428 349 #define HCI_OP_SETUP_SYNC_CONN 0x0428
346 struct hci_cp_setup_sync_conn { 350 struct hci_cp_setup_sync_conn {
347 __le16 handle; 351 __le16 handle;
348 __le32 tx_bandwidth; 352 __le32 tx_bandwidth;
349 __le32 rx_bandwidth; 353 __le32 rx_bandwidth;
350 __le16 max_latency; 354 __le16 max_latency;
351 __le16 voice_setting; 355 __le16 voice_setting;
352 __u8 retrans_effort; 356 __u8 retrans_effort;
353 __le16 pkt_type; 357 __le16 pkt_type;
354 } __attribute__ ((packed)); 358 } __attribute__ ((packed));
355 359
356 #define HCI_OP_ACCEPT_SYNC_CONN_REQ 0x0429 360 #define HCI_OP_ACCEPT_SYNC_CONN_REQ 0x0429
357 struct hci_cp_accept_sync_conn_req { 361 struct hci_cp_accept_sync_conn_req {
358 bdaddr_t bdaddr; 362 bdaddr_t bdaddr;
359 __le32 tx_bandwidth; 363 __le32 tx_bandwidth;
360 __le32 rx_bandwidth; 364 __le32 rx_bandwidth;
361 __le16 max_latency; 365 __le16 max_latency;
362 __le16 content_format; 366 __le16 content_format;
363 __u8 retrans_effort; 367 __u8 retrans_effort;
364 __le16 pkt_type; 368 __le16 pkt_type;
365 } __attribute__ ((packed)); 369 } __attribute__ ((packed));
366 370
367 #define HCI_OP_REJECT_SYNC_CONN_REQ 0x042a 371 #define HCI_OP_REJECT_SYNC_CONN_REQ 0x042a
368 struct hci_cp_reject_sync_conn_req { 372 struct hci_cp_reject_sync_conn_req {
369 bdaddr_t bdaddr; 373 bdaddr_t bdaddr;
370 __u8 reason; 374 __u8 reason;
371 } __attribute__ ((packed)); 375 } __attribute__ ((packed));
372 376
373 #define HCI_OP_SNIFF_MODE 0x0803 377 #define HCI_OP_SNIFF_MODE 0x0803
374 struct hci_cp_sniff_mode { 378 struct hci_cp_sniff_mode {
375 __le16 handle; 379 __le16 handle;
376 __le16 max_interval; 380 __le16 max_interval;
377 __le16 min_interval; 381 __le16 min_interval;
378 __le16 attempt; 382 __le16 attempt;
379 __le16 timeout; 383 __le16 timeout;
380 } __attribute__ ((packed)); 384 } __attribute__ ((packed));
381 385
382 #define HCI_OP_EXIT_SNIFF_MODE 0x0804 386 #define HCI_OP_EXIT_SNIFF_MODE 0x0804
383 struct hci_cp_exit_sniff_mode { 387 struct hci_cp_exit_sniff_mode {
384 __le16 handle; 388 __le16 handle;
385 } __attribute__ ((packed)); 389 } __attribute__ ((packed));
386 390
387 #define HCI_OP_ROLE_DISCOVERY 0x0809 391 #define HCI_OP_ROLE_DISCOVERY 0x0809
388 struct hci_cp_role_discovery { 392 struct hci_cp_role_discovery {
389 __le16 handle; 393 __le16 handle;
390 } __attribute__ ((packed)); 394 } __attribute__ ((packed));
391 struct hci_rp_role_discovery { 395 struct hci_rp_role_discovery {
392 __u8 status; 396 __u8 status;
393 __le16 handle; 397 __le16 handle;
394 __u8 role; 398 __u8 role;
395 } __attribute__ ((packed)); 399 } __attribute__ ((packed));
396 400
397 #define HCI_OP_SWITCH_ROLE 0x080b 401 #define HCI_OP_SWITCH_ROLE 0x080b
398 struct hci_cp_switch_role { 402 struct hci_cp_switch_role {
399 bdaddr_t bdaddr; 403 bdaddr_t bdaddr;
400 __u8 role; 404 __u8 role;
401 } __attribute__ ((packed)); 405 } __attribute__ ((packed));
402 406
403 #define HCI_OP_READ_LINK_POLICY 0x080c 407 #define HCI_OP_READ_LINK_POLICY 0x080c
404 struct hci_cp_read_link_policy { 408 struct hci_cp_read_link_policy {
405 __le16 handle; 409 __le16 handle;
406 } __attribute__ ((packed)); 410 } __attribute__ ((packed));
407 struct hci_rp_read_link_policy { 411 struct hci_rp_read_link_policy {
408 __u8 status; 412 __u8 status;
409 __le16 handle; 413 __le16 handle;
410 __le16 policy; 414 __le16 policy;
411 } __attribute__ ((packed)); 415 } __attribute__ ((packed));
412 416
413 #define HCI_OP_WRITE_LINK_POLICY 0x080d 417 #define HCI_OP_WRITE_LINK_POLICY 0x080d
414 struct hci_cp_write_link_policy { 418 struct hci_cp_write_link_policy {
415 __le16 handle; 419 __le16 handle;
416 __le16 policy; 420 __le16 policy;
417 } __attribute__ ((packed)); 421 } __attribute__ ((packed));
418 struct hci_rp_write_link_policy { 422 struct hci_rp_write_link_policy {
419 __u8 status; 423 __u8 status;
420 __le16 handle; 424 __le16 handle;
421 } __attribute__ ((packed)); 425 } __attribute__ ((packed));
422 426
423 #define HCI_OP_READ_DEF_LINK_POLICY 0x080e 427 #define HCI_OP_READ_DEF_LINK_POLICY 0x080e
424 struct hci_rp_read_def_link_policy { 428 struct hci_rp_read_def_link_policy {
425 __u8 status; 429 __u8 status;
426 __le16 policy; 430 __le16 policy;
427 } __attribute__ ((packed)); 431 } __attribute__ ((packed));
428 432
429 #define HCI_OP_WRITE_DEF_LINK_POLICY 0x080f 433 #define HCI_OP_WRITE_DEF_LINK_POLICY 0x080f
430 struct hci_cp_write_def_link_policy { 434 struct hci_cp_write_def_link_policy {
431 __le16 policy; 435 __le16 policy;
432 } __attribute__ ((packed)); 436 } __attribute__ ((packed));
433 437
434 #define HCI_OP_SNIFF_SUBRATE 0x0811 438 #define HCI_OP_SNIFF_SUBRATE 0x0811
435 struct hci_cp_sniff_subrate { 439 struct hci_cp_sniff_subrate {
436 __le16 handle; 440 __le16 handle;
437 __le16 max_latency; 441 __le16 max_latency;
438 __le16 min_remote_timeout; 442 __le16 min_remote_timeout;
439 __le16 min_local_timeout; 443 __le16 min_local_timeout;
440 } __attribute__ ((packed)); 444 } __attribute__ ((packed));
441 445
442 #define HCI_OP_SET_EVENT_MASK 0x0c01 446 #define HCI_OP_SET_EVENT_MASK 0x0c01
443 struct hci_cp_set_event_mask { 447 struct hci_cp_set_event_mask {
444 __u8 mask[8]; 448 __u8 mask[8];
445 } __attribute__ ((packed)); 449 } __attribute__ ((packed));
446 450
447 #define HCI_OP_RESET 0x0c03 451 #define HCI_OP_RESET 0x0c03
448 452
449 #define HCI_OP_SET_EVENT_FLT 0x0c05 453 #define HCI_OP_SET_EVENT_FLT 0x0c05
450 struct hci_cp_set_event_flt { 454 struct hci_cp_set_event_flt {
451 __u8 flt_type; 455 __u8 flt_type;
452 __u8 cond_type; 456 __u8 cond_type;
453 __u8 condition[0]; 457 __u8 condition[0];
454 } __attribute__ ((packed)); 458 } __attribute__ ((packed));
455 459
456 /* Filter types */ 460 /* Filter types */
457 #define HCI_FLT_CLEAR_ALL 0x00 461 #define HCI_FLT_CLEAR_ALL 0x00
458 #define HCI_FLT_INQ_RESULT 0x01 462 #define HCI_FLT_INQ_RESULT 0x01
459 #define HCI_FLT_CONN_SETUP 0x02 463 #define HCI_FLT_CONN_SETUP 0x02
460 464
461 /* CONN_SETUP Condition types */ 465 /* CONN_SETUP Condition types */
462 #define HCI_CONN_SETUP_ALLOW_ALL 0x00 466 #define HCI_CONN_SETUP_ALLOW_ALL 0x00
463 #define HCI_CONN_SETUP_ALLOW_CLASS 0x01 467 #define HCI_CONN_SETUP_ALLOW_CLASS 0x01
464 #define HCI_CONN_SETUP_ALLOW_BDADDR 0x02 468 #define HCI_CONN_SETUP_ALLOW_BDADDR 0x02
465 469
466 /* CONN_SETUP Conditions */ 470 /* CONN_SETUP Conditions */
467 #define HCI_CONN_SETUP_AUTO_OFF 0x01 471 #define HCI_CONN_SETUP_AUTO_OFF 0x01
468 #define HCI_CONN_SETUP_AUTO_ON 0x02 472 #define HCI_CONN_SETUP_AUTO_ON 0x02
469 473
470 #define HCI_OP_WRITE_LOCAL_NAME 0x0c13 474 #define HCI_OP_WRITE_LOCAL_NAME 0x0c13
471 struct hci_cp_write_local_name { 475 struct hci_cp_write_local_name {
472 __u8 name[248]; 476 __u8 name[248];
473 } __attribute__ ((packed)); 477 } __attribute__ ((packed));
474 478
475 #define HCI_OP_READ_LOCAL_NAME 0x0c14 479 #define HCI_OP_READ_LOCAL_NAME 0x0c14
476 struct hci_rp_read_local_name { 480 struct hci_rp_read_local_name {
477 __u8 status; 481 __u8 status;
478 __u8 name[248]; 482 __u8 name[248];
479 } __attribute__ ((packed)); 483 } __attribute__ ((packed));
480 484
481 #define HCI_OP_WRITE_CA_TIMEOUT 0x0c16 485 #define HCI_OP_WRITE_CA_TIMEOUT 0x0c16
482 486
483 #define HCI_OP_WRITE_PG_TIMEOUT 0x0c18 487 #define HCI_OP_WRITE_PG_TIMEOUT 0x0c18
484 488
485 #define HCI_OP_WRITE_SCAN_ENABLE 0x0c1a 489 #define HCI_OP_WRITE_SCAN_ENABLE 0x0c1a
486 #define SCAN_DISABLED 0x00 490 #define SCAN_DISABLED 0x00
487 #define SCAN_INQUIRY 0x01 491 #define SCAN_INQUIRY 0x01
488 #define SCAN_PAGE 0x02 492 #define SCAN_PAGE 0x02
489 493
490 #define HCI_OP_READ_AUTH_ENABLE 0x0c1f 494 #define HCI_OP_READ_AUTH_ENABLE 0x0c1f
491 495
492 #define HCI_OP_WRITE_AUTH_ENABLE 0x0c20 496 #define HCI_OP_WRITE_AUTH_ENABLE 0x0c20
493 #define AUTH_DISABLED 0x00 497 #define AUTH_DISABLED 0x00
494 #define AUTH_ENABLED 0x01 498 #define AUTH_ENABLED 0x01
495 499
496 #define HCI_OP_READ_ENCRYPT_MODE 0x0c21 500 #define HCI_OP_READ_ENCRYPT_MODE 0x0c21
497 501
498 #define HCI_OP_WRITE_ENCRYPT_MODE 0x0c22 502 #define HCI_OP_WRITE_ENCRYPT_MODE 0x0c22
499 #define ENCRYPT_DISABLED 0x00 503 #define ENCRYPT_DISABLED 0x00
500 #define ENCRYPT_P2P 0x01 504 #define ENCRYPT_P2P 0x01
501 #define ENCRYPT_BOTH 0x02 505 #define ENCRYPT_BOTH 0x02
502 506
503 #define HCI_OP_READ_CLASS_OF_DEV 0x0c23 507 #define HCI_OP_READ_CLASS_OF_DEV 0x0c23
504 struct hci_rp_read_class_of_dev { 508 struct hci_rp_read_class_of_dev {
505 __u8 status; 509 __u8 status;
506 __u8 dev_class[3]; 510 __u8 dev_class[3];
507 } __attribute__ ((packed)); 511 } __attribute__ ((packed));
508 512
509 #define HCI_OP_WRITE_CLASS_OF_DEV 0x0c24 513 #define HCI_OP_WRITE_CLASS_OF_DEV 0x0c24
510 struct hci_cp_write_class_of_dev { 514 struct hci_cp_write_class_of_dev {
511 __u8 dev_class[3]; 515 __u8 dev_class[3];
512 } __attribute__ ((packed)); 516 } __attribute__ ((packed));
513 517
514 #define HCI_OP_READ_VOICE_SETTING 0x0c25 518 #define HCI_OP_READ_VOICE_SETTING 0x0c25
515 struct hci_rp_read_voice_setting { 519 struct hci_rp_read_voice_setting {
516 __u8 status; 520 __u8 status;
517 __le16 voice_setting; 521 __le16 voice_setting;
518 } __attribute__ ((packed)); 522 } __attribute__ ((packed));
519 523
520 #define HCI_OP_WRITE_VOICE_SETTING 0x0c26 524 #define HCI_OP_WRITE_VOICE_SETTING 0x0c26
521 struct hci_cp_write_voice_setting { 525 struct hci_cp_write_voice_setting {
522 __le16 voice_setting; 526 __le16 voice_setting;
523 } __attribute__ ((packed)); 527 } __attribute__ ((packed));
524 528
525 #define HCI_OP_HOST_BUFFER_SIZE 0x0c33 529 #define HCI_OP_HOST_BUFFER_SIZE 0x0c33
526 struct hci_cp_host_buffer_size { 530 struct hci_cp_host_buffer_size {
527 __le16 acl_mtu; 531 __le16 acl_mtu;
528 __u8 sco_mtu; 532 __u8 sco_mtu;
529 __le16 acl_max_pkt; 533 __le16 acl_max_pkt;
530 __le16 sco_max_pkt; 534 __le16 sco_max_pkt;
531 } __attribute__ ((packed)); 535 } __attribute__ ((packed));
532 536
533 #define HCI_OP_READ_SSP_MODE 0x0c55 537 #define HCI_OP_READ_SSP_MODE 0x0c55
534 struct hci_rp_read_ssp_mode { 538 struct hci_rp_read_ssp_mode {
535 __u8 status; 539 __u8 status;
536 __u8 mode; 540 __u8 mode;
537 } __attribute__ ((packed)); 541 } __attribute__ ((packed));
538 542
539 #define HCI_OP_WRITE_SSP_MODE 0x0c56 543 #define HCI_OP_WRITE_SSP_MODE 0x0c56
540 struct hci_cp_write_ssp_mode { 544 struct hci_cp_write_ssp_mode {
541 __u8 mode; 545 __u8 mode;
542 } __attribute__ ((packed)); 546 } __attribute__ ((packed));
543 547
544 #define HCI_OP_READ_LOCAL_VERSION 0x1001 548 #define HCI_OP_READ_LOCAL_VERSION 0x1001
545 struct hci_rp_read_local_version { 549 struct hci_rp_read_local_version {
546 __u8 status; 550 __u8 status;
547 __u8 hci_ver; 551 __u8 hci_ver;
548 __le16 hci_rev; 552 __le16 hci_rev;
549 __u8 lmp_ver; 553 __u8 lmp_ver;
550 __le16 manufacturer; 554 __le16 manufacturer;
551 __le16 lmp_subver; 555 __le16 lmp_subver;
552 } __attribute__ ((packed)); 556 } __attribute__ ((packed));
553 557
554 #define HCI_OP_READ_LOCAL_COMMANDS 0x1002 558 #define HCI_OP_READ_LOCAL_COMMANDS 0x1002
555 struct hci_rp_read_local_commands { 559 struct hci_rp_read_local_commands {
556 __u8 status; 560 __u8 status;
557 __u8 commands[64]; 561 __u8 commands[64];
558 } __attribute__ ((packed)); 562 } __attribute__ ((packed));
559 563
560 #define HCI_OP_READ_LOCAL_FEATURES 0x1003 564 #define HCI_OP_READ_LOCAL_FEATURES 0x1003
561 struct hci_rp_read_local_features { 565 struct hci_rp_read_local_features {
562 __u8 status; 566 __u8 status;
563 __u8 features[8]; 567 __u8 features[8];
564 } __attribute__ ((packed)); 568 } __attribute__ ((packed));
565 569
566 #define HCI_OP_READ_LOCAL_EXT_FEATURES 0x1004 570 #define HCI_OP_READ_LOCAL_EXT_FEATURES 0x1004
567 struct hci_rp_read_local_ext_features { 571 struct hci_rp_read_local_ext_features {
568 __u8 status; 572 __u8 status;
569 __u8 page; 573 __u8 page;
570 __u8 max_page; 574 __u8 max_page;
571 __u8 features[8]; 575 __u8 features[8];
572 } __attribute__ ((packed)); 576 } __attribute__ ((packed));
573 577
574 #define HCI_OP_READ_BUFFER_SIZE 0x1005 578 #define HCI_OP_READ_BUFFER_SIZE 0x1005
575 struct hci_rp_read_buffer_size { 579 struct hci_rp_read_buffer_size {
576 __u8 status; 580 __u8 status;
577 __le16 acl_mtu; 581 __le16 acl_mtu;
578 __u8 sco_mtu; 582 __u8 sco_mtu;
579 __le16 acl_max_pkt; 583 __le16 acl_max_pkt;
580 __le16 sco_max_pkt; 584 __le16 sco_max_pkt;
581 } __attribute__ ((packed)); 585 } __attribute__ ((packed));
582 586
583 #define HCI_OP_READ_BD_ADDR 0x1009 587 #define HCI_OP_READ_BD_ADDR 0x1009
584 struct hci_rp_read_bd_addr { 588 struct hci_rp_read_bd_addr {
585 __u8 status; 589 __u8 status;
586 bdaddr_t bdaddr; 590 bdaddr_t bdaddr;
587 } __attribute__ ((packed)); 591 } __attribute__ ((packed));
588 592
589 /* ---- HCI Events ---- */ 593 /* ---- HCI Events ---- */
590 #define HCI_EV_INQUIRY_COMPLETE 0x01 594 #define HCI_EV_INQUIRY_COMPLETE 0x01
591 595
592 #define HCI_EV_INQUIRY_RESULT 0x02 596 #define HCI_EV_INQUIRY_RESULT 0x02
593 struct inquiry_info { 597 struct inquiry_info {
594 bdaddr_t bdaddr; 598 bdaddr_t bdaddr;
595 __u8 pscan_rep_mode; 599 __u8 pscan_rep_mode;
596 __u8 pscan_period_mode; 600 __u8 pscan_period_mode;
597 __u8 pscan_mode; 601 __u8 pscan_mode;
598 __u8 dev_class[3]; 602 __u8 dev_class[3];
599 __le16 clock_offset; 603 __le16 clock_offset;
600 } __attribute__ ((packed)); 604 } __attribute__ ((packed));
601 605
602 #define HCI_EV_CONN_COMPLETE 0x03 606 #define HCI_EV_CONN_COMPLETE 0x03
603 struct hci_ev_conn_complete { 607 struct hci_ev_conn_complete {
604 __u8 status; 608 __u8 status;
605 __le16 handle; 609 __le16 handle;
606 bdaddr_t bdaddr; 610 bdaddr_t bdaddr;
607 __u8 link_type; 611 __u8 link_type;
608 __u8 encr_mode; 612 __u8 encr_mode;
609 } __attribute__ ((packed)); 613 } __attribute__ ((packed));
610 614
611 #define HCI_EV_CONN_REQUEST 0x04 615 #define HCI_EV_CONN_REQUEST 0x04
612 struct hci_ev_conn_request { 616 struct hci_ev_conn_request {
613 bdaddr_t bdaddr; 617 bdaddr_t bdaddr;
614 __u8 dev_class[3]; 618 __u8 dev_class[3];
615 __u8 link_type; 619 __u8 link_type;
616 } __attribute__ ((packed)); 620 } __attribute__ ((packed));
617 621
618 #define HCI_EV_DISCONN_COMPLETE 0x05 622 #define HCI_EV_DISCONN_COMPLETE 0x05
619 struct hci_ev_disconn_complete { 623 struct hci_ev_disconn_complete {
620 __u8 status; 624 __u8 status;
621 __le16 handle; 625 __le16 handle;
622 __u8 reason; 626 __u8 reason;
623 } __attribute__ ((packed)); 627 } __attribute__ ((packed));
624 628
625 #define HCI_EV_AUTH_COMPLETE 0x06 629 #define HCI_EV_AUTH_COMPLETE 0x06
626 struct hci_ev_auth_complete { 630 struct hci_ev_auth_complete {
627 __u8 status; 631 __u8 status;
628 __le16 handle; 632 __le16 handle;
629 } __attribute__ ((packed)); 633 } __attribute__ ((packed));
630 634
631 #define HCI_EV_REMOTE_NAME 0x07 635 #define HCI_EV_REMOTE_NAME 0x07
632 struct hci_ev_remote_name { 636 struct hci_ev_remote_name {
633 __u8 status; 637 __u8 status;
634 bdaddr_t bdaddr; 638 bdaddr_t bdaddr;
635 __u8 name[248]; 639 __u8 name[248];
636 } __attribute__ ((packed)); 640 } __attribute__ ((packed));
637 641
638 #define HCI_EV_ENCRYPT_CHANGE 0x08 642 #define HCI_EV_ENCRYPT_CHANGE 0x08
639 struct hci_ev_encrypt_change { 643 struct hci_ev_encrypt_change {
640 __u8 status; 644 __u8 status;
641 __le16 handle; 645 __le16 handle;
642 __u8 encrypt; 646 __u8 encrypt;
643 } __attribute__ ((packed)); 647 } __attribute__ ((packed));
644 648
645 #define HCI_EV_CHANGE_LINK_KEY_COMPLETE 0x09 649 #define HCI_EV_CHANGE_LINK_KEY_COMPLETE 0x09
646 struct hci_ev_change_link_key_complete { 650 struct hci_ev_change_link_key_complete {
647 __u8 status; 651 __u8 status;
648 __le16 handle; 652 __le16 handle;
649 } __attribute__ ((packed)); 653 } __attribute__ ((packed));
650 654
651 #define HCI_EV_REMOTE_FEATURES 0x0b 655 #define HCI_EV_REMOTE_FEATURES 0x0b
652 struct hci_ev_remote_features { 656 struct hci_ev_remote_features {
653 __u8 status; 657 __u8 status;
654 __le16 handle; 658 __le16 handle;
655 __u8 features[8]; 659 __u8 features[8];
656 } __attribute__ ((packed)); 660 } __attribute__ ((packed));
657 661
658 #define HCI_EV_REMOTE_VERSION 0x0c 662 #define HCI_EV_REMOTE_VERSION 0x0c
659 struct hci_ev_remote_version { 663 struct hci_ev_remote_version {
660 __u8 status; 664 __u8 status;
661 __le16 handle; 665 __le16 handle;
662 __u8 lmp_ver; 666 __u8 lmp_ver;
663 __le16 manufacturer; 667 __le16 manufacturer;
664 __le16 lmp_subver; 668 __le16 lmp_subver;
665 } __attribute__ ((packed)); 669 } __attribute__ ((packed));
666 670
667 #define HCI_EV_QOS_SETUP_COMPLETE 0x0d 671 #define HCI_EV_QOS_SETUP_COMPLETE 0x0d
668 struct hci_qos { 672 struct hci_qos {
669 __u8 service_type; 673 __u8 service_type;
670 __u32 token_rate; 674 __u32 token_rate;
671 __u32 peak_bandwidth; 675 __u32 peak_bandwidth;
672 __u32 latency; 676 __u32 latency;
673 __u32 delay_variation; 677 __u32 delay_variation;
674 } __attribute__ ((packed)); 678 } __attribute__ ((packed));
675 struct hci_ev_qos_setup_complete { 679 struct hci_ev_qos_setup_complete {
676 __u8 status; 680 __u8 status;
677 __le16 handle; 681 __le16 handle;
678 struct hci_qos qos; 682 struct hci_qos qos;
679 } __attribute__ ((packed)); 683 } __attribute__ ((packed));
680 684
681 #define HCI_EV_CMD_COMPLETE 0x0e 685 #define HCI_EV_CMD_COMPLETE 0x0e
682 struct hci_ev_cmd_complete { 686 struct hci_ev_cmd_complete {
683 __u8 ncmd; 687 __u8 ncmd;
684 __le16 opcode; 688 __le16 opcode;
685 } __attribute__ ((packed)); 689 } __attribute__ ((packed));
686 690
687 #define HCI_EV_CMD_STATUS 0x0f 691 #define HCI_EV_CMD_STATUS 0x0f
688 struct hci_ev_cmd_status { 692 struct hci_ev_cmd_status {
689 __u8 status; 693 __u8 status;
690 __u8 ncmd; 694 __u8 ncmd;
691 __le16 opcode; 695 __le16 opcode;
692 } __attribute__ ((packed)); 696 } __attribute__ ((packed));
693 697
694 #define HCI_EV_ROLE_CHANGE 0x12 698 #define HCI_EV_ROLE_CHANGE 0x12
695 struct hci_ev_role_change { 699 struct hci_ev_role_change {
696 __u8 status; 700 __u8 status;
697 bdaddr_t bdaddr; 701 bdaddr_t bdaddr;
698 __u8 role; 702 __u8 role;
699 } __attribute__ ((packed)); 703 } __attribute__ ((packed));
700 704
701 #define HCI_EV_NUM_COMP_PKTS 0x13 705 #define HCI_EV_NUM_COMP_PKTS 0x13
702 struct hci_ev_num_comp_pkts { 706 struct hci_ev_num_comp_pkts {
703 __u8 num_hndl; 707 __u8 num_hndl;
704 /* variable length part */ 708 /* variable length part */
705 } __attribute__ ((packed)); 709 } __attribute__ ((packed));
706 710
707 #define HCI_EV_MODE_CHANGE 0x14 711 #define HCI_EV_MODE_CHANGE 0x14
708 struct hci_ev_mode_change { 712 struct hci_ev_mode_change {
709 __u8 status; 713 __u8 status;
710 __le16 handle; 714 __le16 handle;
711 __u8 mode; 715 __u8 mode;
712 __le16 interval; 716 __le16 interval;
713 } __attribute__ ((packed)); 717 } __attribute__ ((packed));
714 718
715 #define HCI_EV_PIN_CODE_REQ 0x16 719 #define HCI_EV_PIN_CODE_REQ 0x16
716 struct hci_ev_pin_code_req { 720 struct hci_ev_pin_code_req {
717 bdaddr_t bdaddr; 721 bdaddr_t bdaddr;
718 } __attribute__ ((packed)); 722 } __attribute__ ((packed));
719 723
720 #define HCI_EV_LINK_KEY_REQ 0x17 724 #define HCI_EV_LINK_KEY_REQ 0x17
721 struct hci_ev_link_key_req { 725 struct hci_ev_link_key_req {
722 bdaddr_t bdaddr; 726 bdaddr_t bdaddr;
723 } __attribute__ ((packed)); 727 } __attribute__ ((packed));
724 728
725 #define HCI_EV_LINK_KEY_NOTIFY 0x18 729 #define HCI_EV_LINK_KEY_NOTIFY 0x18
726 struct hci_ev_link_key_notify { 730 struct hci_ev_link_key_notify {
727 bdaddr_t bdaddr; 731 bdaddr_t bdaddr;
728 __u8 link_key[16]; 732 __u8 link_key[16];
729 __u8 key_type; 733 __u8 key_type;
730 } __attribute__ ((packed)); 734 } __attribute__ ((packed));
731 735
732 #define HCI_EV_CLOCK_OFFSET 0x1c 736 #define HCI_EV_CLOCK_OFFSET 0x1c
733 struct hci_ev_clock_offset { 737 struct hci_ev_clock_offset {
734 __u8 status; 738 __u8 status;
735 __le16 handle; 739 __le16 handle;
736 __le16 clock_offset; 740 __le16 clock_offset;
737 } __attribute__ ((packed)); 741 } __attribute__ ((packed));
738 742
739 #define HCI_EV_PKT_TYPE_CHANGE 0x1d 743 #define HCI_EV_PKT_TYPE_CHANGE 0x1d
740 struct hci_ev_pkt_type_change { 744 struct hci_ev_pkt_type_change {
741 __u8 status; 745 __u8 status;
742 __le16 handle; 746 __le16 handle;
743 __le16 pkt_type; 747 __le16 pkt_type;
744 } __attribute__ ((packed)); 748 } __attribute__ ((packed));
745 749
746 #define HCI_EV_PSCAN_REP_MODE 0x20 750 #define HCI_EV_PSCAN_REP_MODE 0x20
747 struct hci_ev_pscan_rep_mode { 751 struct hci_ev_pscan_rep_mode {
748 bdaddr_t bdaddr; 752 bdaddr_t bdaddr;
749 __u8 pscan_rep_mode; 753 __u8 pscan_rep_mode;
750 } __attribute__ ((packed)); 754 } __attribute__ ((packed));
751 755
752 #define HCI_EV_INQUIRY_RESULT_WITH_RSSI 0x22 756 #define HCI_EV_INQUIRY_RESULT_WITH_RSSI 0x22
753 struct inquiry_info_with_rssi { 757 struct inquiry_info_with_rssi {
754 bdaddr_t bdaddr; 758 bdaddr_t bdaddr;
755 __u8 pscan_rep_mode; 759 __u8 pscan_rep_mode;
756 __u8 pscan_period_mode; 760 __u8 pscan_period_mode;
757 __u8 dev_class[3]; 761 __u8 dev_class[3];
758 __le16 clock_offset; 762 __le16 clock_offset;
759 __s8 rssi; 763 __s8 rssi;
760 } __attribute__ ((packed)); 764 } __attribute__ ((packed));
761 struct inquiry_info_with_rssi_and_pscan_mode { 765 struct inquiry_info_with_rssi_and_pscan_mode {
762 bdaddr_t bdaddr; 766 bdaddr_t bdaddr;
763 __u8 pscan_rep_mode; 767 __u8 pscan_rep_mode;
764 __u8 pscan_period_mode; 768 __u8 pscan_period_mode;
765 __u8 pscan_mode; 769 __u8 pscan_mode;
766 __u8 dev_class[3]; 770 __u8 dev_class[3];
767 __le16 clock_offset; 771 __le16 clock_offset;
768 __s8 rssi; 772 __s8 rssi;
769 } __attribute__ ((packed)); 773 } __attribute__ ((packed));
770 774
771 #define HCI_EV_REMOTE_EXT_FEATURES 0x23 775 #define HCI_EV_REMOTE_EXT_FEATURES 0x23
772 struct hci_ev_remote_ext_features { 776 struct hci_ev_remote_ext_features {
773 __u8 status; 777 __u8 status;
774 __le16 handle; 778 __le16 handle;
775 __u8 page; 779 __u8 page;
776 __u8 max_page; 780 __u8 max_page;
777 __u8 features[8]; 781 __u8 features[8];
778 } __attribute__ ((packed)); 782 } __attribute__ ((packed));
779 783
780 #define HCI_EV_SYNC_CONN_COMPLETE 0x2c 784 #define HCI_EV_SYNC_CONN_COMPLETE 0x2c
781 struct hci_ev_sync_conn_complete { 785 struct hci_ev_sync_conn_complete {
782 __u8 status; 786 __u8 status;
783 __le16 handle; 787 __le16 handle;
784 bdaddr_t bdaddr; 788 bdaddr_t bdaddr;
785 __u8 link_type; 789 __u8 link_type;
786 __u8 tx_interval; 790 __u8 tx_interval;
787 __u8 retrans_window; 791 __u8 retrans_window;
788 __le16 rx_pkt_len; 792 __le16 rx_pkt_len;
789 __le16 tx_pkt_len; 793 __le16 tx_pkt_len;
790 __u8 air_mode; 794 __u8 air_mode;
791 } __attribute__ ((packed)); 795 } __attribute__ ((packed));
792 796
793 #define HCI_EV_SYNC_CONN_CHANGED 0x2d 797 #define HCI_EV_SYNC_CONN_CHANGED 0x2d
794 struct hci_ev_sync_conn_changed { 798 struct hci_ev_sync_conn_changed {
795 __u8 status; 799 __u8 status;
796 __le16 handle; 800 __le16 handle;
797 __u8 tx_interval; 801 __u8 tx_interval;
798 __u8 retrans_window; 802 __u8 retrans_window;
799 __le16 rx_pkt_len; 803 __le16 rx_pkt_len;
800 __le16 tx_pkt_len; 804 __le16 tx_pkt_len;
801 } __attribute__ ((packed)); 805 } __attribute__ ((packed));
802 806
803 #define HCI_EV_SNIFF_SUBRATE 0x2e 807 #define HCI_EV_SNIFF_SUBRATE 0x2e
804 struct hci_ev_sniff_subrate { 808 struct hci_ev_sniff_subrate {
805 __u8 status; 809 __u8 status;
806 __le16 handle; 810 __le16 handle;
807 __le16 max_tx_latency; 811 __le16 max_tx_latency;
808 __le16 max_rx_latency; 812 __le16 max_rx_latency;
809 __le16 max_remote_timeout; 813 __le16 max_remote_timeout;
810 __le16 max_local_timeout; 814 __le16 max_local_timeout;
811 } __attribute__ ((packed)); 815 } __attribute__ ((packed));
812 816
813 #define HCI_EV_EXTENDED_INQUIRY_RESULT 0x2f 817 #define HCI_EV_EXTENDED_INQUIRY_RESULT 0x2f
814 struct extended_inquiry_info { 818 struct extended_inquiry_info {
815 bdaddr_t bdaddr; 819 bdaddr_t bdaddr;
816 __u8 pscan_rep_mode; 820 __u8 pscan_rep_mode;
817 __u8 pscan_period_mode; 821 __u8 pscan_period_mode;
818 __u8 dev_class[3]; 822 __u8 dev_class[3];
819 __le16 clock_offset; 823 __le16 clock_offset;
820 __s8 rssi; 824 __s8 rssi;
821 __u8 data[240]; 825 __u8 data[240];
822 } __attribute__ ((packed)); 826 } __attribute__ ((packed));
823 827
824 #define HCI_EV_IO_CAPA_REQUEST 0x31 828 #define HCI_EV_IO_CAPA_REQUEST 0x31
825 struct hci_ev_io_capa_request { 829 struct hci_ev_io_capa_request {
826 bdaddr_t bdaddr; 830 bdaddr_t bdaddr;
827 } __attribute__ ((packed)); 831 } __attribute__ ((packed));
828 832
829 #define HCI_EV_SIMPLE_PAIR_COMPLETE 0x36 833 #define HCI_EV_SIMPLE_PAIR_COMPLETE 0x36
830 struct hci_ev_simple_pair_complete { 834 struct hci_ev_simple_pair_complete {
831 __u8 status; 835 __u8 status;
832 bdaddr_t bdaddr; 836 bdaddr_t bdaddr;
833 } __attribute__ ((packed)); 837 } __attribute__ ((packed));
834 838
835 #define HCI_EV_REMOTE_HOST_FEATURES 0x3d 839 #define HCI_EV_REMOTE_HOST_FEATURES 0x3d
836 struct hci_ev_remote_host_features { 840 struct hci_ev_remote_host_features {
837 bdaddr_t bdaddr; 841 bdaddr_t bdaddr;
838 __u8 features[8]; 842 __u8 features[8];
839 } __attribute__ ((packed)); 843 } __attribute__ ((packed));
840 844
841 /* Internal events generated by Bluetooth stack */ 845 /* Internal events generated by Bluetooth stack */
842 #define HCI_EV_STACK_INTERNAL 0xfd 846 #define HCI_EV_STACK_INTERNAL 0xfd
843 struct hci_ev_stack_internal { 847 struct hci_ev_stack_internal {
844 __u16 type; 848 __u16 type;
845 __u8 data[0]; 849 __u8 data[0];
846 } __attribute__ ((packed)); 850 } __attribute__ ((packed));
847 851
848 #define HCI_EV_SI_DEVICE 0x01 852 #define HCI_EV_SI_DEVICE 0x01
849 struct hci_ev_si_device { 853 struct hci_ev_si_device {
850 __u16 event; 854 __u16 event;
851 __u16 dev_id; 855 __u16 dev_id;
852 } __attribute__ ((packed)); 856 } __attribute__ ((packed));
853 857
854 #define HCI_EV_SI_SECURITY 0x02 858 #define HCI_EV_SI_SECURITY 0x02
855 struct hci_ev_si_security { 859 struct hci_ev_si_security {
856 __u16 event; 860 __u16 event;
857 __u16 proto; 861 __u16 proto;
858 __u16 subproto; 862 __u16 subproto;
859 __u8 incoming; 863 __u8 incoming;
860 } __attribute__ ((packed)); 864 } __attribute__ ((packed));
861 865
862 /* ---- HCI Packet structures ---- */ 866 /* ---- HCI Packet structures ---- */
863 #define HCI_COMMAND_HDR_SIZE 3 867 #define HCI_COMMAND_HDR_SIZE 3
864 #define HCI_EVENT_HDR_SIZE 2 868 #define HCI_EVENT_HDR_SIZE 2
865 #define HCI_ACL_HDR_SIZE 4 869 #define HCI_ACL_HDR_SIZE 4
866 #define HCI_SCO_HDR_SIZE 3 870 #define HCI_SCO_HDR_SIZE 3
867 871
868 struct hci_command_hdr { 872 struct hci_command_hdr {
869 __le16 opcode; /* OCF & OGF */ 873 __le16 opcode; /* OCF & OGF */
870 __u8 plen; 874 __u8 plen;
871 } __attribute__ ((packed)); 875 } __attribute__ ((packed));
872 876
873 struct hci_event_hdr { 877 struct hci_event_hdr {
874 __u8 evt; 878 __u8 evt;
875 __u8 plen; 879 __u8 plen;
876 } __attribute__ ((packed)); 880 } __attribute__ ((packed));
877 881
878 struct hci_acl_hdr { 882 struct hci_acl_hdr {
879 __le16 handle; /* Handle & Flags(PB, BC) */ 883 __le16 handle; /* Handle & Flags(PB, BC) */
880 __le16 dlen; 884 __le16 dlen;
881 } __attribute__ ((packed)); 885 } __attribute__ ((packed));
882 886
883 struct hci_sco_hdr { 887 struct hci_sco_hdr {
884 __le16 handle; 888 __le16 handle;
885 __u8 dlen; 889 __u8 dlen;
886 } __attribute__ ((packed)); 890 } __attribute__ ((packed));
887 891
888 #ifdef __KERNEL__ 892 #ifdef __KERNEL__
889 #include <linux/skbuff.h> 893 #include <linux/skbuff.h>
890 static inline struct hci_event_hdr *hci_event_hdr(const struct sk_buff *skb) 894 static inline struct hci_event_hdr *hci_event_hdr(const struct sk_buff *skb)
891 { 895 {
892 return (struct hci_event_hdr *) skb->data; 896 return (struct hci_event_hdr *) skb->data;
893 } 897 }
894 898
895 static inline struct hci_acl_hdr *hci_acl_hdr(const struct sk_buff *skb) 899 static inline struct hci_acl_hdr *hci_acl_hdr(const struct sk_buff *skb)
896 { 900 {
897 return (struct hci_acl_hdr *) skb->data; 901 return (struct hci_acl_hdr *) skb->data;
898 } 902 }
899 903
900 static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb) 904 static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb)
901 { 905 {
902 return (struct hci_sco_hdr *) skb->data; 906 return (struct hci_sco_hdr *) skb->data;
903 } 907 }
904 #endif 908 #endif
905 909
906 /* Command opcode pack/unpack */ 910 /* Command opcode pack/unpack */
907 #define hci_opcode_pack(ogf, ocf) (__u16) ((ocf & 0x03ff)|(ogf << 10)) 911 #define hci_opcode_pack(ogf, ocf) (__u16) ((ocf & 0x03ff)|(ogf << 10))
908 #define hci_opcode_ogf(op) (op >> 10) 912 #define hci_opcode_ogf(op) (op >> 10)
909 #define hci_opcode_ocf(op) (op & 0x03ff) 913 #define hci_opcode_ocf(op) (op & 0x03ff)
910 914
911 /* ACL handle and flags pack/unpack */ 915 /* ACL handle and flags pack/unpack */
912 #define hci_handle_pack(h, f) (__u16) ((h & 0x0fff)|(f << 12)) 916 #define hci_handle_pack(h, f) (__u16) ((h & 0x0fff)|(f << 12))
913 #define hci_handle(h) (h & 0x0fff) 917 #define hci_handle(h) (h & 0x0fff)
914 #define hci_flags(h) (h >> 12) 918 #define hci_flags(h) (h >> 12)
915 919
916 /* ---- HCI Sockets ---- */ 920 /* ---- HCI Sockets ---- */
917 921
918 /* Socket options */ 922 /* Socket options */
919 #define HCI_DATA_DIR 1 923 #define HCI_DATA_DIR 1
920 #define HCI_FILTER 2 924 #define HCI_FILTER 2
921 #define HCI_TIME_STAMP 3 925 #define HCI_TIME_STAMP 3
922 926
923 /* CMSG flags */ 927 /* CMSG flags */
924 #define HCI_CMSG_DIR 0x0001 928 #define HCI_CMSG_DIR 0x0001
925 #define HCI_CMSG_TSTAMP 0x0002 929 #define HCI_CMSG_TSTAMP 0x0002
926 930
927 struct sockaddr_hci { 931 struct sockaddr_hci {
928 sa_family_t hci_family; 932 sa_family_t hci_family;
929 unsigned short hci_dev; 933 unsigned short hci_dev;
930 }; 934 };
931 #define HCI_DEV_NONE 0xffff 935 #define HCI_DEV_NONE 0xffff
932 936
933 struct hci_filter { 937 struct hci_filter {
934 unsigned long type_mask; 938 unsigned long type_mask;
935 unsigned long event_mask[2]; 939 unsigned long event_mask[2];
936 __le16 opcode; 940 __le16 opcode;
937 }; 941 };
938 942
939 struct hci_ufilter { 943 struct hci_ufilter {
940 __u32 type_mask; 944 __u32 type_mask;
941 __u32 event_mask[2]; 945 __u32 event_mask[2];
942 __le16 opcode; 946 __le16 opcode;
943 }; 947 };
944 948
945 #define HCI_FLT_TYPE_BITS 31 949 #define HCI_FLT_TYPE_BITS 31
946 #define HCI_FLT_EVENT_BITS 63 950 #define HCI_FLT_EVENT_BITS 63
947 #define HCI_FLT_OGF_BITS 63 951 #define HCI_FLT_OGF_BITS 63
948 #define HCI_FLT_OCF_BITS 127 952 #define HCI_FLT_OCF_BITS 127
949 953
950 /* ---- HCI Ioctl requests structures ---- */ 954 /* ---- HCI Ioctl requests structures ---- */
951 struct hci_dev_stats { 955 struct hci_dev_stats {
952 __u32 err_rx; 956 __u32 err_rx;
953 __u32 err_tx; 957 __u32 err_tx;
954 __u32 cmd_tx; 958 __u32 cmd_tx;
955 __u32 evt_rx; 959 __u32 evt_rx;
956 __u32 acl_tx; 960 __u32 acl_tx;
957 __u32 acl_rx; 961 __u32 acl_rx;
958 __u32 sco_tx; 962 __u32 sco_tx;
959 __u32 sco_rx; 963 __u32 sco_rx;
960 __u32 byte_rx; 964 __u32 byte_rx;
961 __u32 byte_tx; 965 __u32 byte_tx;
962 }; 966 };
963 967
964 struct hci_dev_info { 968 struct hci_dev_info {
965 __u16 dev_id; 969 __u16 dev_id;
966 char name[8]; 970 char name[8];
967 971
968 bdaddr_t bdaddr; 972 bdaddr_t bdaddr;
969 973
970 __u32 flags; 974 __u32 flags;
971 __u8 type; 975 __u8 type;
972 976
973 __u8 features[8]; 977 __u8 features[8];
974 978
975 __u32 pkt_type; 979 __u32 pkt_type;
976 __u32 link_policy; 980 __u32 link_policy;
977 __u32 link_mode; 981 __u32 link_mode;
978 982
979 __u16 acl_mtu; 983 __u16 acl_mtu;
980 __u16 acl_pkts; 984 __u16 acl_pkts;
981 __u16 sco_mtu; 985 __u16 sco_mtu;
982 __u16 sco_pkts; 986 __u16 sco_pkts;
983 987
984 struct hci_dev_stats stat; 988 struct hci_dev_stats stat;
985 }; 989 };
986 990
987 struct hci_conn_info { 991 struct hci_conn_info {
988 __u16 handle; 992 __u16 handle;
989 bdaddr_t bdaddr; 993 bdaddr_t bdaddr;
990 __u8 type; 994 __u8 type;
991 __u8 out; 995 __u8 out;
992 __u16 state; 996 __u16 state;
993 __u32 link_mode; 997 __u32 link_mode;
994 }; 998 };
995 999
996 struct hci_dev_req { 1000 struct hci_dev_req {
997 __u16 dev_id; 1001 __u16 dev_id;
998 __u32 dev_opt; 1002 __u32 dev_opt;
999 }; 1003 };
1000 1004
1001 struct hci_dev_list_req { 1005 struct hci_dev_list_req {
1002 __u16 dev_num; 1006 __u16 dev_num;
1003 struct hci_dev_req dev_req[0]; /* hci_dev_req structures */ 1007 struct hci_dev_req dev_req[0]; /* hci_dev_req structures */
1004 }; 1008 };
1005 1009
1006 struct hci_conn_list_req { 1010 struct hci_conn_list_req {
1007 __u16 dev_id; 1011 __u16 dev_id;
1008 __u16 conn_num; 1012 __u16 conn_num;
1009 struct hci_conn_info conn_info[0]; 1013 struct hci_conn_info conn_info[0];
1010 }; 1014 };
1011 1015
1012 struct hci_conn_info_req { 1016 struct hci_conn_info_req {
1013 bdaddr_t bdaddr; 1017 bdaddr_t bdaddr;
1014 __u8 type; 1018 __u8 type;
1015 struct hci_conn_info conn_info[0]; 1019 struct hci_conn_info conn_info[0];
1016 }; 1020 };
1017 1021
1018 struct hci_auth_info_req { 1022 struct hci_auth_info_req {
1019 bdaddr_t bdaddr; 1023 bdaddr_t bdaddr;
1020 __u8 type; 1024 __u8 type;
1021 }; 1025 };
1022 1026
1023 struct hci_inquiry_req { 1027 struct hci_inquiry_req {
1024 __u16 dev_id; 1028 __u16 dev_id;
1025 __u16 flags; 1029 __u16 flags;
1026 __u8 lap[3]; 1030 __u8 lap[3];
1027 __u8 length; 1031 __u8 length;
1028 __u8 num_rsp; 1032 __u8 num_rsp;
1029 }; 1033 };
1030 #define IREQ_CACHE_FLUSH 0x0001 1034 #define IREQ_CACHE_FLUSH 0x0001
1031 1035
1032 #endif /* __HCI_H */ 1036 #endif /* __HCI_H */
1033 1037
include/net/bluetooth/hci_core.h
1 /* 1 /*
2 BlueZ - Bluetooth protocol stack for Linux 2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated 3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 6
7 This program is free software; you can redistribute it and/or modify 7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as 8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation; 9 published by the Free Software Foundation;
10 10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED. 22 SOFTWARE IS DISCLAIMED.
23 */ 23 */
24 24
25 #ifndef __HCI_CORE_H 25 #ifndef __HCI_CORE_H
26 #define __HCI_CORE_H 26 #define __HCI_CORE_H
27 27
28 #include <net/bluetooth/hci.h> 28 #include <net/bluetooth/hci.h>
29 29
30 /* HCI upper protocols */ 30 /* HCI upper protocols */
31 #define HCI_PROTO_L2CAP 0 31 #define HCI_PROTO_L2CAP 0
32 #define HCI_PROTO_SCO 1 32 #define HCI_PROTO_SCO 1
33 33
34 /* HCI Core structures */ 34 /* HCI Core structures */
35 struct inquiry_data { 35 struct inquiry_data {
36 bdaddr_t bdaddr; 36 bdaddr_t bdaddr;
37 __u8 pscan_rep_mode; 37 __u8 pscan_rep_mode;
38 __u8 pscan_period_mode; 38 __u8 pscan_period_mode;
39 __u8 pscan_mode; 39 __u8 pscan_mode;
40 __u8 dev_class[3]; 40 __u8 dev_class[3];
41 __le16 clock_offset; 41 __le16 clock_offset;
42 __s8 rssi; 42 __s8 rssi;
43 __u8 ssp_mode; 43 __u8 ssp_mode;
44 }; 44 };
45 45
46 struct inquiry_entry { 46 struct inquiry_entry {
47 struct inquiry_entry *next; 47 struct inquiry_entry *next;
48 __u32 timestamp; 48 __u32 timestamp;
49 struct inquiry_data data; 49 struct inquiry_data data;
50 }; 50 };
51 51
52 struct inquiry_cache { 52 struct inquiry_cache {
53 spinlock_t lock; 53 spinlock_t lock;
54 __u32 timestamp; 54 __u32 timestamp;
55 struct inquiry_entry *list; 55 struct inquiry_entry *list;
56 }; 56 };
57 57
58 struct hci_conn_hash { 58 struct hci_conn_hash {
59 struct list_head list; 59 struct list_head list;
60 spinlock_t lock; 60 spinlock_t lock;
61 unsigned int acl_num; 61 unsigned int acl_num;
62 unsigned int sco_num; 62 unsigned int sco_num;
63 }; 63 };
64 64
65 struct hci_dev { 65 struct hci_dev {
66 struct list_head list; 66 struct list_head list;
67 spinlock_t lock; 67 spinlock_t lock;
68 atomic_t refcnt; 68 atomic_t refcnt;
69 69
70 char name[8]; 70 char name[8];
71 unsigned long flags; 71 unsigned long flags;
72 __u16 id; 72 __u16 id;
73 __u8 bus; 73 __u8 bus;
74 __u8 dev_type;
74 bdaddr_t bdaddr; 75 bdaddr_t bdaddr;
75 __u8 dev_name[248]; 76 __u8 dev_name[248];
76 __u8 dev_class[3]; 77 __u8 dev_class[3];
77 __u8 features[8]; 78 __u8 features[8];
78 __u8 commands[64]; 79 __u8 commands[64];
79 __u8 ssp_mode; 80 __u8 ssp_mode;
80 __u8 hci_ver; 81 __u8 hci_ver;
81 __u16 hci_rev; 82 __u16 hci_rev;
82 __u16 manufacturer; 83 __u16 manufacturer;
83 __u16 voice_setting; 84 __u16 voice_setting;
84 85
85 __u16 pkt_type; 86 __u16 pkt_type;
86 __u16 esco_type; 87 __u16 esco_type;
87 __u16 link_policy; 88 __u16 link_policy;
88 __u16 link_mode; 89 __u16 link_mode;
89 90
90 __u32 idle_timeout; 91 __u32 idle_timeout;
91 __u16 sniff_min_interval; 92 __u16 sniff_min_interval;
92 __u16 sniff_max_interval; 93 __u16 sniff_max_interval;
93 94
94 unsigned long quirks; 95 unsigned long quirks;
95 96
96 atomic_t cmd_cnt; 97 atomic_t cmd_cnt;
97 unsigned int acl_cnt; 98 unsigned int acl_cnt;
98 unsigned int sco_cnt; 99 unsigned int sco_cnt;
99 100
100 unsigned int acl_mtu; 101 unsigned int acl_mtu;
101 unsigned int sco_mtu; 102 unsigned int sco_mtu;
102 unsigned int acl_pkts; 103 unsigned int acl_pkts;
103 unsigned int sco_pkts; 104 unsigned int sco_pkts;
104 105
105 unsigned long cmd_last_tx; 106 unsigned long cmd_last_tx;
106 unsigned long acl_last_tx; 107 unsigned long acl_last_tx;
107 unsigned long sco_last_tx; 108 unsigned long sco_last_tx;
108 109
109 struct tasklet_struct cmd_task; 110 struct tasklet_struct cmd_task;
110 struct tasklet_struct rx_task; 111 struct tasklet_struct rx_task;
111 struct tasklet_struct tx_task; 112 struct tasklet_struct tx_task;
112 113
113 struct sk_buff_head rx_q; 114 struct sk_buff_head rx_q;
114 struct sk_buff_head raw_q; 115 struct sk_buff_head raw_q;
115 struct sk_buff_head cmd_q; 116 struct sk_buff_head cmd_q;
116 117
117 struct sk_buff *sent_cmd; 118 struct sk_buff *sent_cmd;
118 struct sk_buff *reassembly[3]; 119 struct sk_buff *reassembly[3];
119 120
120 struct mutex req_lock; 121 struct mutex req_lock;
121 wait_queue_head_t req_wait_q; 122 wait_queue_head_t req_wait_q;
122 __u32 req_status; 123 __u32 req_status;
123 __u32 req_result; 124 __u32 req_result;
124 125
125 struct inquiry_cache inq_cache; 126 struct inquiry_cache inq_cache;
126 struct hci_conn_hash conn_hash; 127 struct hci_conn_hash conn_hash;
127 128
128 struct hci_dev_stats stat; 129 struct hci_dev_stats stat;
129 130
130 struct sk_buff_head driver_init; 131 struct sk_buff_head driver_init;
131 132
132 void *driver_data; 133 void *driver_data;
133 void *core_data; 134 void *core_data;
134 135
135 atomic_t promisc; 136 atomic_t promisc;
136 137
137 struct dentry *debugfs; 138 struct dentry *debugfs;
138 139
139 struct device *parent; 140 struct device *parent;
140 struct device dev; 141 struct device dev;
141 142
142 struct rfkill *rfkill; 143 struct rfkill *rfkill;
143 144
144 struct module *owner; 145 struct module *owner;
145 146
146 int (*open)(struct hci_dev *hdev); 147 int (*open)(struct hci_dev *hdev);
147 int (*close)(struct hci_dev *hdev); 148 int (*close)(struct hci_dev *hdev);
148 int (*flush)(struct hci_dev *hdev); 149 int (*flush)(struct hci_dev *hdev);
149 int (*send)(struct sk_buff *skb); 150 int (*send)(struct sk_buff *skb);
150 void (*destruct)(struct hci_dev *hdev); 151 void (*destruct)(struct hci_dev *hdev);
151 void (*notify)(struct hci_dev *hdev, unsigned int evt); 152 void (*notify)(struct hci_dev *hdev, unsigned int evt);
152 int (*ioctl)(struct hci_dev *hdev, unsigned int cmd, unsigned long arg); 153 int (*ioctl)(struct hci_dev *hdev, unsigned int cmd, unsigned long arg);
153 }; 154 };
154 155
155 struct hci_conn { 156 struct hci_conn {
156 struct list_head list; 157 struct list_head list;
157 158
158 atomic_t refcnt; 159 atomic_t refcnt;
159 spinlock_t lock; 160 spinlock_t lock;
160 161
161 bdaddr_t dst; 162 bdaddr_t dst;
162 __u16 handle; 163 __u16 handle;
163 __u16 state; 164 __u16 state;
164 __u8 mode; 165 __u8 mode;
165 __u8 type; 166 __u8 type;
166 __u8 out; 167 __u8 out;
167 __u8 attempt; 168 __u8 attempt;
168 __u8 dev_class[3]; 169 __u8 dev_class[3];
169 __u8 features[8]; 170 __u8 features[8];
170 __u8 ssp_mode; 171 __u8 ssp_mode;
171 __u16 interval; 172 __u16 interval;
172 __u16 pkt_type; 173 __u16 pkt_type;
173 __u16 link_policy; 174 __u16 link_policy;
174 __u32 link_mode; 175 __u32 link_mode;
175 __u8 auth_type; 176 __u8 auth_type;
176 __u8 sec_level; 177 __u8 sec_level;
177 __u8 power_save; 178 __u8 power_save;
178 __u16 disc_timeout; 179 __u16 disc_timeout;
179 unsigned long pend; 180 unsigned long pend;
180 181
181 unsigned int sent; 182 unsigned int sent;
182 183
183 struct sk_buff_head data_q; 184 struct sk_buff_head data_q;
184 185
185 struct timer_list disc_timer; 186 struct timer_list disc_timer;
186 struct timer_list idle_timer; 187 struct timer_list idle_timer;
187 188
188 struct work_struct work_add; 189 struct work_struct work_add;
189 struct work_struct work_del; 190 struct work_struct work_del;
190 191
191 struct device dev; 192 struct device dev;
192 atomic_t devref; 193 atomic_t devref;
193 194
194 struct hci_dev *hdev; 195 struct hci_dev *hdev;
195 void *l2cap_data; 196 void *l2cap_data;
196 void *sco_data; 197 void *sco_data;
197 void *priv; 198 void *priv;
198 199
199 struct hci_conn *link; 200 struct hci_conn *link;
200 }; 201 };
201 202
202 extern struct hci_proto *hci_proto[]; 203 extern struct hci_proto *hci_proto[];
203 extern struct list_head hci_dev_list; 204 extern struct list_head hci_dev_list;
204 extern struct list_head hci_cb_list; 205 extern struct list_head hci_cb_list;
205 extern rwlock_t hci_dev_list_lock; 206 extern rwlock_t hci_dev_list_lock;
206 extern rwlock_t hci_cb_list_lock; 207 extern rwlock_t hci_cb_list_lock;
207 208
208 /* ----- Inquiry cache ----- */ 209 /* ----- Inquiry cache ----- */
209 #define INQUIRY_CACHE_AGE_MAX (HZ*30) // 30 seconds 210 #define INQUIRY_CACHE_AGE_MAX (HZ*30) // 30 seconds
210 #define INQUIRY_ENTRY_AGE_MAX (HZ*60) // 60 seconds 211 #define INQUIRY_ENTRY_AGE_MAX (HZ*60) // 60 seconds
211 212
212 #define inquiry_cache_lock(c) spin_lock(&c->lock) 213 #define inquiry_cache_lock(c) spin_lock(&c->lock)
213 #define inquiry_cache_unlock(c) spin_unlock(&c->lock) 214 #define inquiry_cache_unlock(c) spin_unlock(&c->lock)
214 #define inquiry_cache_lock_bh(c) spin_lock_bh(&c->lock) 215 #define inquiry_cache_lock_bh(c) spin_lock_bh(&c->lock)
215 #define inquiry_cache_unlock_bh(c) spin_unlock_bh(&c->lock) 216 #define inquiry_cache_unlock_bh(c) spin_unlock_bh(&c->lock)
216 217
217 static inline void inquiry_cache_init(struct hci_dev *hdev) 218 static inline void inquiry_cache_init(struct hci_dev *hdev)
218 { 219 {
219 struct inquiry_cache *c = &hdev->inq_cache; 220 struct inquiry_cache *c = &hdev->inq_cache;
220 spin_lock_init(&c->lock); 221 spin_lock_init(&c->lock);
221 c->list = NULL; 222 c->list = NULL;
222 } 223 }
223 224
224 static inline int inquiry_cache_empty(struct hci_dev *hdev) 225 static inline int inquiry_cache_empty(struct hci_dev *hdev)
225 { 226 {
226 struct inquiry_cache *c = &hdev->inq_cache; 227 struct inquiry_cache *c = &hdev->inq_cache;
227 return (c->list == NULL); 228 return (c->list == NULL);
228 } 229 }
229 230
230 static inline long inquiry_cache_age(struct hci_dev *hdev) 231 static inline long inquiry_cache_age(struct hci_dev *hdev)
231 { 232 {
232 struct inquiry_cache *c = &hdev->inq_cache; 233 struct inquiry_cache *c = &hdev->inq_cache;
233 return jiffies - c->timestamp; 234 return jiffies - c->timestamp;
234 } 235 }
235 236
236 static inline long inquiry_entry_age(struct inquiry_entry *e) 237 static inline long inquiry_entry_age(struct inquiry_entry *e)
237 { 238 {
238 return jiffies - e->timestamp; 239 return jiffies - e->timestamp;
239 } 240 }
240 241
241 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr); 242 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
242 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data); 243 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data);
243 244
244 /* ----- HCI Connections ----- */ 245 /* ----- HCI Connections ----- */
245 enum { 246 enum {
246 HCI_CONN_AUTH_PEND, 247 HCI_CONN_AUTH_PEND,
247 HCI_CONN_ENCRYPT_PEND, 248 HCI_CONN_ENCRYPT_PEND,
248 HCI_CONN_RSWITCH_PEND, 249 HCI_CONN_RSWITCH_PEND,
249 HCI_CONN_MODE_CHANGE_PEND, 250 HCI_CONN_MODE_CHANGE_PEND,
250 }; 251 };
251 252
252 static inline void hci_conn_hash_init(struct hci_dev *hdev) 253 static inline void hci_conn_hash_init(struct hci_dev *hdev)
253 { 254 {
254 struct hci_conn_hash *h = &hdev->conn_hash; 255 struct hci_conn_hash *h = &hdev->conn_hash;
255 INIT_LIST_HEAD(&h->list); 256 INIT_LIST_HEAD(&h->list);
256 spin_lock_init(&h->lock); 257 spin_lock_init(&h->lock);
257 h->acl_num = 0; 258 h->acl_num = 0;
258 h->sco_num = 0; 259 h->sco_num = 0;
259 } 260 }
260 261
261 static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c) 262 static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
262 { 263 {
263 struct hci_conn_hash *h = &hdev->conn_hash; 264 struct hci_conn_hash *h = &hdev->conn_hash;
264 list_add(&c->list, &h->list); 265 list_add(&c->list, &h->list);
265 if (c->type == ACL_LINK) 266 if (c->type == ACL_LINK)
266 h->acl_num++; 267 h->acl_num++;
267 else 268 else
268 h->sco_num++; 269 h->sco_num++;
269 } 270 }
270 271
271 static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c) 272 static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
272 { 273 {
273 struct hci_conn_hash *h = &hdev->conn_hash; 274 struct hci_conn_hash *h = &hdev->conn_hash;
274 list_del(&c->list); 275 list_del(&c->list);
275 if (c->type == ACL_LINK) 276 if (c->type == ACL_LINK)
276 h->acl_num--; 277 h->acl_num--;
277 else 278 else
278 h->sco_num--; 279 h->sco_num--;
279 } 280 }
280 281
281 static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev, 282 static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
282 __u16 handle) 283 __u16 handle)
283 { 284 {
284 struct hci_conn_hash *h = &hdev->conn_hash; 285 struct hci_conn_hash *h = &hdev->conn_hash;
285 struct list_head *p; 286 struct list_head *p;
286 struct hci_conn *c; 287 struct hci_conn *c;
287 288
288 list_for_each(p, &h->list) { 289 list_for_each(p, &h->list) {
289 c = list_entry(p, struct hci_conn, list); 290 c = list_entry(p, struct hci_conn, list);
290 if (c->handle == handle) 291 if (c->handle == handle)
291 return c; 292 return c;
292 } 293 }
293 return NULL; 294 return NULL;
294 } 295 }
295 296
296 static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev, 297 static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
297 __u8 type, bdaddr_t *ba) 298 __u8 type, bdaddr_t *ba)
298 { 299 {
299 struct hci_conn_hash *h = &hdev->conn_hash; 300 struct hci_conn_hash *h = &hdev->conn_hash;
300 struct list_head *p; 301 struct list_head *p;
301 struct hci_conn *c; 302 struct hci_conn *c;
302 303
303 list_for_each(p, &h->list) { 304 list_for_each(p, &h->list) {
304 c = list_entry(p, struct hci_conn, list); 305 c = list_entry(p, struct hci_conn, list);
305 if (c->type == type && !bacmp(&c->dst, ba)) 306 if (c->type == type && !bacmp(&c->dst, ba))
306 return c; 307 return c;
307 } 308 }
308 return NULL; 309 return NULL;
309 } 310 }
310 311
311 static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev, 312 static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
312 __u8 type, __u16 state) 313 __u8 type, __u16 state)
313 { 314 {
314 struct hci_conn_hash *h = &hdev->conn_hash; 315 struct hci_conn_hash *h = &hdev->conn_hash;
315 struct list_head *p; 316 struct list_head *p;
316 struct hci_conn *c; 317 struct hci_conn *c;
317 318
318 list_for_each(p, &h->list) { 319 list_for_each(p, &h->list) {
319 c = list_entry(p, struct hci_conn, list); 320 c = list_entry(p, struct hci_conn, list);
320 if (c->type == type && c->state == state) 321 if (c->type == type && c->state == state)
321 return c; 322 return c;
322 } 323 }
323 return NULL; 324 return NULL;
324 } 325 }
325 326
326 void hci_acl_connect(struct hci_conn *conn); 327 void hci_acl_connect(struct hci_conn *conn);
327 void hci_acl_disconn(struct hci_conn *conn, __u8 reason); 328 void hci_acl_disconn(struct hci_conn *conn, __u8 reason);
328 void hci_add_sco(struct hci_conn *conn, __u16 handle); 329 void hci_add_sco(struct hci_conn *conn, __u16 handle);
329 void hci_setup_sync(struct hci_conn *conn, __u16 handle); 330 void hci_setup_sync(struct hci_conn *conn, __u16 handle);
330 331
331 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst); 332 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
332 int hci_conn_del(struct hci_conn *conn); 333 int hci_conn_del(struct hci_conn *conn);
333 void hci_conn_hash_flush(struct hci_dev *hdev); 334 void hci_conn_hash_flush(struct hci_dev *hdev);
334 void hci_conn_check_pending(struct hci_dev *hdev); 335 void hci_conn_check_pending(struct hci_dev *hdev);
335 336
336 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type); 337 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type);
337 int hci_conn_check_link_mode(struct hci_conn *conn); 338 int hci_conn_check_link_mode(struct hci_conn *conn);
338 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type); 339 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type);
339 int hci_conn_change_link_key(struct hci_conn *conn); 340 int hci_conn_change_link_key(struct hci_conn *conn);
340 int hci_conn_switch_role(struct hci_conn *conn, __u8 role); 341 int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
341 342
342 void hci_conn_enter_active_mode(struct hci_conn *conn); 343 void hci_conn_enter_active_mode(struct hci_conn *conn);
343 void hci_conn_enter_sniff_mode(struct hci_conn *conn); 344 void hci_conn_enter_sniff_mode(struct hci_conn *conn);
344 345
345 void hci_conn_hold_device(struct hci_conn *conn); 346 void hci_conn_hold_device(struct hci_conn *conn);
346 void hci_conn_put_device(struct hci_conn *conn); 347 void hci_conn_put_device(struct hci_conn *conn);
347 348
348 static inline void hci_conn_hold(struct hci_conn *conn) 349 static inline void hci_conn_hold(struct hci_conn *conn)
349 { 350 {
350 atomic_inc(&conn->refcnt); 351 atomic_inc(&conn->refcnt);
351 del_timer(&conn->disc_timer); 352 del_timer(&conn->disc_timer);
352 } 353 }
353 354
354 static inline void hci_conn_put(struct hci_conn *conn) 355 static inline void hci_conn_put(struct hci_conn *conn)
355 { 356 {
356 if (atomic_dec_and_test(&conn->refcnt)) { 357 if (atomic_dec_and_test(&conn->refcnt)) {
357 unsigned long timeo; 358 unsigned long timeo;
358 if (conn->type == ACL_LINK) { 359 if (conn->type == ACL_LINK) {
359 del_timer(&conn->idle_timer); 360 del_timer(&conn->idle_timer);
360 if (conn->state == BT_CONNECTED) { 361 if (conn->state == BT_CONNECTED) {
361 timeo = msecs_to_jiffies(conn->disc_timeout); 362 timeo = msecs_to_jiffies(conn->disc_timeout);
362 if (!conn->out) 363 if (!conn->out)
363 timeo *= 2; 364 timeo *= 2;
364 } else 365 } else
365 timeo = msecs_to_jiffies(10); 366 timeo = msecs_to_jiffies(10);
366 } else 367 } else
367 timeo = msecs_to_jiffies(10); 368 timeo = msecs_to_jiffies(10);
368 mod_timer(&conn->disc_timer, jiffies + timeo); 369 mod_timer(&conn->disc_timer, jiffies + timeo);
369 } 370 }
370 } 371 }
371 372
372 /* ----- HCI Devices ----- */ 373 /* ----- HCI Devices ----- */
373 static inline void __hci_dev_put(struct hci_dev *d) 374 static inline void __hci_dev_put(struct hci_dev *d)
374 { 375 {
375 if (atomic_dec_and_test(&d->refcnt)) 376 if (atomic_dec_and_test(&d->refcnt))
376 d->destruct(d); 377 d->destruct(d);
377 } 378 }
378 379
379 static inline void hci_dev_put(struct hci_dev *d) 380 static inline void hci_dev_put(struct hci_dev *d)
380 { 381 {
381 __hci_dev_put(d); 382 __hci_dev_put(d);
382 module_put(d->owner); 383 module_put(d->owner);
383 } 384 }
384 385
385 static inline struct hci_dev *__hci_dev_hold(struct hci_dev *d) 386 static inline struct hci_dev *__hci_dev_hold(struct hci_dev *d)
386 { 387 {
387 atomic_inc(&d->refcnt); 388 atomic_inc(&d->refcnt);
388 return d; 389 return d;
389 } 390 }
390 391
391 static inline struct hci_dev *hci_dev_hold(struct hci_dev *d) 392 static inline struct hci_dev *hci_dev_hold(struct hci_dev *d)
392 { 393 {
393 if (try_module_get(d->owner)) 394 if (try_module_get(d->owner))
394 return __hci_dev_hold(d); 395 return __hci_dev_hold(d);
395 return NULL; 396 return NULL;
396 } 397 }
397 398
398 #define hci_dev_lock(d) spin_lock(&d->lock) 399 #define hci_dev_lock(d) spin_lock(&d->lock)
399 #define hci_dev_unlock(d) spin_unlock(&d->lock) 400 #define hci_dev_unlock(d) spin_unlock(&d->lock)
400 #define hci_dev_lock_bh(d) spin_lock_bh(&d->lock) 401 #define hci_dev_lock_bh(d) spin_lock_bh(&d->lock)
401 #define hci_dev_unlock_bh(d) spin_unlock_bh(&d->lock) 402 #define hci_dev_unlock_bh(d) spin_unlock_bh(&d->lock)
402 403
403 struct hci_dev *hci_dev_get(int index); 404 struct hci_dev *hci_dev_get(int index);
404 struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst); 405 struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
405 406
406 struct hci_dev *hci_alloc_dev(void); 407 struct hci_dev *hci_alloc_dev(void);
407 void hci_free_dev(struct hci_dev *hdev); 408 void hci_free_dev(struct hci_dev *hdev);
408 int hci_register_dev(struct hci_dev *hdev); 409 int hci_register_dev(struct hci_dev *hdev);
409 int hci_unregister_dev(struct hci_dev *hdev); 410 int hci_unregister_dev(struct hci_dev *hdev);
410 int hci_suspend_dev(struct hci_dev *hdev); 411 int hci_suspend_dev(struct hci_dev *hdev);
411 int hci_resume_dev(struct hci_dev *hdev); 412 int hci_resume_dev(struct hci_dev *hdev);
412 int hci_dev_open(__u16 dev); 413 int hci_dev_open(__u16 dev);
413 int hci_dev_close(__u16 dev); 414 int hci_dev_close(__u16 dev);
414 int hci_dev_reset(__u16 dev); 415 int hci_dev_reset(__u16 dev);
415 int hci_dev_reset_stat(__u16 dev); 416 int hci_dev_reset_stat(__u16 dev);
416 int hci_dev_cmd(unsigned int cmd, void __user *arg); 417 int hci_dev_cmd(unsigned int cmd, void __user *arg);
417 int hci_get_dev_list(void __user *arg); 418 int hci_get_dev_list(void __user *arg);
418 int hci_get_dev_info(void __user *arg); 419 int hci_get_dev_info(void __user *arg);
419 int hci_get_conn_list(void __user *arg); 420 int hci_get_conn_list(void __user *arg);
420 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg); 421 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg);
421 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg); 422 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg);
422 int hci_inquiry(void __user *arg); 423 int hci_inquiry(void __user *arg);
423 424
424 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); 425 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
425 426
426 int hci_recv_frame(struct sk_buff *skb); 427 int hci_recv_frame(struct sk_buff *skb);
427 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count); 428 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count);
428 429
429 int hci_register_sysfs(struct hci_dev *hdev); 430 int hci_register_sysfs(struct hci_dev *hdev);
430 void hci_unregister_sysfs(struct hci_dev *hdev); 431 void hci_unregister_sysfs(struct hci_dev *hdev);
431 void hci_conn_init_sysfs(struct hci_conn *conn); 432 void hci_conn_init_sysfs(struct hci_conn *conn);
432 void hci_conn_add_sysfs(struct hci_conn *conn); 433 void hci_conn_add_sysfs(struct hci_conn *conn);
433 void hci_conn_del_sysfs(struct hci_conn *conn); 434 void hci_conn_del_sysfs(struct hci_conn *conn);
434 435
435 #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->parent = (pdev)) 436 #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->parent = (pdev))
436 437
437 /* ----- LMP capabilities ----- */ 438 /* ----- LMP capabilities ----- */
438 #define lmp_rswitch_capable(dev) ((dev)->features[0] & LMP_RSWITCH) 439 #define lmp_rswitch_capable(dev) ((dev)->features[0] & LMP_RSWITCH)
439 #define lmp_encrypt_capable(dev) ((dev)->features[0] & LMP_ENCRYPT) 440 #define lmp_encrypt_capable(dev) ((dev)->features[0] & LMP_ENCRYPT)
440 #define lmp_sniff_capable(dev) ((dev)->features[0] & LMP_SNIFF) 441 #define lmp_sniff_capable(dev) ((dev)->features[0] & LMP_SNIFF)
441 #define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR) 442 #define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR)
442 #define lmp_esco_capable(dev) ((dev)->features[3] & LMP_ESCO) 443 #define lmp_esco_capable(dev) ((dev)->features[3] & LMP_ESCO)
443 #define lmp_ssp_capable(dev) ((dev)->features[6] & LMP_SIMPLE_PAIR) 444 #define lmp_ssp_capable(dev) ((dev)->features[6] & LMP_SIMPLE_PAIR)
444 445
445 /* ----- HCI protocols ----- */ 446 /* ----- HCI protocols ----- */
446 struct hci_proto { 447 struct hci_proto {
447 char *name; 448 char *name;
448 unsigned int id; 449 unsigned int id;
449 unsigned long flags; 450 unsigned long flags;
450 451
451 void *priv; 452 void *priv;
452 453
453 int (*connect_ind) (struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type); 454 int (*connect_ind) (struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type);
454 int (*connect_cfm) (struct hci_conn *conn, __u8 status); 455 int (*connect_cfm) (struct hci_conn *conn, __u8 status);
455 int (*disconn_ind) (struct hci_conn *conn); 456 int (*disconn_ind) (struct hci_conn *conn);
456 int (*disconn_cfm) (struct hci_conn *conn, __u8 reason); 457 int (*disconn_cfm) (struct hci_conn *conn, __u8 reason);
457 int (*recv_acldata) (struct hci_conn *conn, struct sk_buff *skb, __u16 flags); 458 int (*recv_acldata) (struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
458 int (*recv_scodata) (struct hci_conn *conn, struct sk_buff *skb); 459 int (*recv_scodata) (struct hci_conn *conn, struct sk_buff *skb);
459 int (*security_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt); 460 int (*security_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt);
460 }; 461 };
461 462
462 static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type) 463 static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
463 { 464 {
464 register struct hci_proto *hp; 465 register struct hci_proto *hp;
465 int mask = 0; 466 int mask = 0;
466 467
467 hp = hci_proto[HCI_PROTO_L2CAP]; 468 hp = hci_proto[HCI_PROTO_L2CAP];
468 if (hp && hp->connect_ind) 469 if (hp && hp->connect_ind)
469 mask |= hp->connect_ind(hdev, bdaddr, type); 470 mask |= hp->connect_ind(hdev, bdaddr, type);
470 471
471 hp = hci_proto[HCI_PROTO_SCO]; 472 hp = hci_proto[HCI_PROTO_SCO];
472 if (hp && hp->connect_ind) 473 if (hp && hp->connect_ind)
473 mask |= hp->connect_ind(hdev, bdaddr, type); 474 mask |= hp->connect_ind(hdev, bdaddr, type);
474 475
475 return mask; 476 return mask;
476 } 477 }
477 478
478 static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status) 479 static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
479 { 480 {
480 register struct hci_proto *hp; 481 register struct hci_proto *hp;
481 482
482 hp = hci_proto[HCI_PROTO_L2CAP]; 483 hp = hci_proto[HCI_PROTO_L2CAP];
483 if (hp && hp->connect_cfm) 484 if (hp && hp->connect_cfm)
484 hp->connect_cfm(conn, status); 485 hp->connect_cfm(conn, status);
485 486
486 hp = hci_proto[HCI_PROTO_SCO]; 487 hp = hci_proto[HCI_PROTO_SCO];
487 if (hp && hp->connect_cfm) 488 if (hp && hp->connect_cfm)
488 hp->connect_cfm(conn, status); 489 hp->connect_cfm(conn, status);
489 } 490 }
490 491
491 static inline int hci_proto_disconn_ind(struct hci_conn *conn) 492 static inline int hci_proto_disconn_ind(struct hci_conn *conn)
492 { 493 {
493 register struct hci_proto *hp; 494 register struct hci_proto *hp;
494 int reason = 0x13; 495 int reason = 0x13;
495 496
496 hp = hci_proto[HCI_PROTO_L2CAP]; 497 hp = hci_proto[HCI_PROTO_L2CAP];
497 if (hp && hp->disconn_ind) 498 if (hp && hp->disconn_ind)
498 reason = hp->disconn_ind(conn); 499 reason = hp->disconn_ind(conn);
499 500
500 hp = hci_proto[HCI_PROTO_SCO]; 501 hp = hci_proto[HCI_PROTO_SCO];
501 if (hp && hp->disconn_ind) 502 if (hp && hp->disconn_ind)
502 reason = hp->disconn_ind(conn); 503 reason = hp->disconn_ind(conn);
503 504
504 return reason; 505 return reason;
505 } 506 }
506 507
507 static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason) 508 static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason)
508 { 509 {
509 register struct hci_proto *hp; 510 register struct hci_proto *hp;
510 511
511 hp = hci_proto[HCI_PROTO_L2CAP]; 512 hp = hci_proto[HCI_PROTO_L2CAP];
512 if (hp && hp->disconn_cfm) 513 if (hp && hp->disconn_cfm)
513 hp->disconn_cfm(conn, reason); 514 hp->disconn_cfm(conn, reason);
514 515
515 hp = hci_proto[HCI_PROTO_SCO]; 516 hp = hci_proto[HCI_PROTO_SCO];
516 if (hp && hp->disconn_cfm) 517 if (hp && hp->disconn_cfm)
517 hp->disconn_cfm(conn, reason); 518 hp->disconn_cfm(conn, reason);
518 } 519 }
519 520
520 static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status) 521 static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
521 { 522 {
522 register struct hci_proto *hp; 523 register struct hci_proto *hp;
523 __u8 encrypt; 524 __u8 encrypt;
524 525
525 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) 526 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
526 return; 527 return;
527 528
528 encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00; 529 encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
529 530
530 hp = hci_proto[HCI_PROTO_L2CAP]; 531 hp = hci_proto[HCI_PROTO_L2CAP];
531 if (hp && hp->security_cfm) 532 if (hp && hp->security_cfm)
532 hp->security_cfm(conn, status, encrypt); 533 hp->security_cfm(conn, status, encrypt);
533 534
534 hp = hci_proto[HCI_PROTO_SCO]; 535 hp = hci_proto[HCI_PROTO_SCO];
535 if (hp && hp->security_cfm) 536 if (hp && hp->security_cfm)
536 hp->security_cfm(conn, status, encrypt); 537 hp->security_cfm(conn, status, encrypt);
537 } 538 }
538 539
539 static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt) 540 static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
540 { 541 {
541 register struct hci_proto *hp; 542 register struct hci_proto *hp;
542 543
543 hp = hci_proto[HCI_PROTO_L2CAP]; 544 hp = hci_proto[HCI_PROTO_L2CAP];
544 if (hp && hp->security_cfm) 545 if (hp && hp->security_cfm)
545 hp->security_cfm(conn, status, encrypt); 546 hp->security_cfm(conn, status, encrypt);
546 547
547 hp = hci_proto[HCI_PROTO_SCO]; 548 hp = hci_proto[HCI_PROTO_SCO];
548 if (hp && hp->security_cfm) 549 if (hp && hp->security_cfm)
549 hp->security_cfm(conn, status, encrypt); 550 hp->security_cfm(conn, status, encrypt);
550 } 551 }
551 552
552 int hci_register_proto(struct hci_proto *hproto); 553 int hci_register_proto(struct hci_proto *hproto);
553 int hci_unregister_proto(struct hci_proto *hproto); 554 int hci_unregister_proto(struct hci_proto *hproto);
554 555
555 /* ----- HCI callbacks ----- */ 556 /* ----- HCI callbacks ----- */
556 struct hci_cb { 557 struct hci_cb {
557 struct list_head list; 558 struct list_head list;
558 559
559 char *name; 560 char *name;
560 561
561 void (*security_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt); 562 void (*security_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt);
562 void (*key_change_cfm) (struct hci_conn *conn, __u8 status); 563 void (*key_change_cfm) (struct hci_conn *conn, __u8 status);
563 void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role); 564 void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role);
564 }; 565 };
565 566
566 static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status) 567 static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
567 { 568 {
568 struct list_head *p; 569 struct list_head *p;
569 __u8 encrypt; 570 __u8 encrypt;
570 571
571 hci_proto_auth_cfm(conn, status); 572 hci_proto_auth_cfm(conn, status);
572 573
573 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) 574 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
574 return; 575 return;
575 576
576 encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00; 577 encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
577 578
578 read_lock_bh(&hci_cb_list_lock); 579 read_lock_bh(&hci_cb_list_lock);
579 list_for_each(p, &hci_cb_list) { 580 list_for_each(p, &hci_cb_list) {
580 struct hci_cb *cb = list_entry(p, struct hci_cb, list); 581 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
581 if (cb->security_cfm) 582 if (cb->security_cfm)
582 cb->security_cfm(conn, status, encrypt); 583 cb->security_cfm(conn, status, encrypt);
583 } 584 }
584 read_unlock_bh(&hci_cb_list_lock); 585 read_unlock_bh(&hci_cb_list_lock);
585 } 586 }
586 587
587 static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt) 588 static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
588 { 589 {
589 struct list_head *p; 590 struct list_head *p;
590 591
591 if (conn->sec_level == BT_SECURITY_SDP) 592 if (conn->sec_level == BT_SECURITY_SDP)
592 conn->sec_level = BT_SECURITY_LOW; 593 conn->sec_level = BT_SECURITY_LOW;
593 594
594 hci_proto_encrypt_cfm(conn, status, encrypt); 595 hci_proto_encrypt_cfm(conn, status, encrypt);
595 596
596 read_lock_bh(&hci_cb_list_lock); 597 read_lock_bh(&hci_cb_list_lock);
597 list_for_each(p, &hci_cb_list) { 598 list_for_each(p, &hci_cb_list) {
598 struct hci_cb *cb = list_entry(p, struct hci_cb, list); 599 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
599 if (cb->security_cfm) 600 if (cb->security_cfm)
600 cb->security_cfm(conn, status, encrypt); 601 cb->security_cfm(conn, status, encrypt);
601 } 602 }
602 read_unlock_bh(&hci_cb_list_lock); 603 read_unlock_bh(&hci_cb_list_lock);
603 } 604 }
604 605
605 static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status) 606 static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
606 { 607 {
607 struct list_head *p; 608 struct list_head *p;
608 609
609 read_lock_bh(&hci_cb_list_lock); 610 read_lock_bh(&hci_cb_list_lock);
610 list_for_each(p, &hci_cb_list) { 611 list_for_each(p, &hci_cb_list) {
611 struct hci_cb *cb = list_entry(p, struct hci_cb, list); 612 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
612 if (cb->key_change_cfm) 613 if (cb->key_change_cfm)
613 cb->key_change_cfm(conn, status); 614 cb->key_change_cfm(conn, status);
614 } 615 }
615 read_unlock_bh(&hci_cb_list_lock); 616 read_unlock_bh(&hci_cb_list_lock);
616 } 617 }
617 618
618 static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, __u8 role) 619 static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, __u8 role)
619 { 620 {
620 struct list_head *p; 621 struct list_head *p;
621 622
622 read_lock_bh(&hci_cb_list_lock); 623 read_lock_bh(&hci_cb_list_lock);
623 list_for_each(p, &hci_cb_list) { 624 list_for_each(p, &hci_cb_list) {
624 struct hci_cb *cb = list_entry(p, struct hci_cb, list); 625 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
625 if (cb->role_switch_cfm) 626 if (cb->role_switch_cfm)
626 cb->role_switch_cfm(conn, status, role); 627 cb->role_switch_cfm(conn, status, role);
627 } 628 }
628 read_unlock_bh(&hci_cb_list_lock); 629 read_unlock_bh(&hci_cb_list_lock);
629 } 630 }
630 631
631 int hci_register_cb(struct hci_cb *hcb); 632 int hci_register_cb(struct hci_cb *hcb);
632 int hci_unregister_cb(struct hci_cb *hcb); 633 int hci_unregister_cb(struct hci_cb *hcb);
633 634
634 int hci_register_notifier(struct notifier_block *nb); 635 int hci_register_notifier(struct notifier_block *nb);
635 int hci_unregister_notifier(struct notifier_block *nb); 636 int hci_unregister_notifier(struct notifier_block *nb);
636 637
637 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param); 638 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param);
638 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags); 639 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
639 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb); 640 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
640 641
641 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode); 642 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
642 643
643 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data); 644 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data);
644 645
645 /* ----- HCI Sockets ----- */ 646 /* ----- HCI Sockets ----- */
646 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb); 647 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
647 648
648 /* HCI info for socket */ 649 /* HCI info for socket */
649 #define hci_pi(sk) ((struct hci_pinfo *) sk) 650 #define hci_pi(sk) ((struct hci_pinfo *) sk)
650 651
651 struct hci_pinfo { 652 struct hci_pinfo {
652 struct bt_sock bt; 653 struct bt_sock bt;
653 struct hci_dev *hdev; 654 struct hci_dev *hdev;
654 struct hci_filter filter; 655 struct hci_filter filter;
655 __u32 cmsg_mask; 656 __u32 cmsg_mask;
656 }; 657 };
657 658
658 /* HCI security filter */ 659 /* HCI security filter */
659 #define HCI_SFLT_MAX_OGF 5 660 #define HCI_SFLT_MAX_OGF 5
660 661
661 struct hci_sec_filter { 662 struct hci_sec_filter {
662 __u32 type_mask; 663 __u32 type_mask;
663 __u32 event_mask[2]; 664 __u32 event_mask[2];
664 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4]; 665 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
665 }; 666 };
666 667
667 /* ----- HCI requests ----- */ 668 /* ----- HCI requests ----- */
668 #define HCI_REQ_DONE 0 669 #define HCI_REQ_DONE 0
669 #define HCI_REQ_PEND 1 670 #define HCI_REQ_PEND 1
670 #define HCI_REQ_CANCELED 2 671 #define HCI_REQ_CANCELED 2
671 672
672 #define hci_req_lock(d) mutex_lock(&d->req_lock) 673 #define hci_req_lock(d) mutex_lock(&d->req_lock)
673 #define hci_req_unlock(d) mutex_unlock(&d->req_lock) 674 #define hci_req_unlock(d) mutex_unlock(&d->req_lock)
674 675
675 void hci_req_complete(struct hci_dev *hdev, int result); 676 void hci_req_complete(struct hci_dev *hdev, int result);
676 677
677 #endif /* __HCI_CORE_H */ 678 #endif /* __HCI_CORE_H */
678 679
net/bluetooth/hci_core.c
1 /* 1 /*
2 BlueZ - Bluetooth protocol stack for Linux 2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated 3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 6
7 This program is free software; you can redistribute it and/or modify 7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as 8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation; 9 published by the Free Software Foundation;
10 10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED. 22 SOFTWARE IS DISCLAIMED.
23 */ 23 */
24 24
25 /* Bluetooth HCI core. */ 25 /* Bluetooth HCI core. */
26 26
27 #include <linux/jiffies.h> 27 #include <linux/jiffies.h>
28 #include <linux/module.h> 28 #include <linux/module.h>
29 #include <linux/kmod.h> 29 #include <linux/kmod.h>
30 30
31 #include <linux/types.h> 31 #include <linux/types.h>
32 #include <linux/errno.h> 32 #include <linux/errno.h>
33 #include <linux/kernel.h> 33 #include <linux/kernel.h>
34 #include <linux/sched.h> 34 #include <linux/sched.h>
35 #include <linux/slab.h> 35 #include <linux/slab.h>
36 #include <linux/poll.h> 36 #include <linux/poll.h>
37 #include <linux/fcntl.h> 37 #include <linux/fcntl.h>
38 #include <linux/init.h> 38 #include <linux/init.h>
39 #include <linux/skbuff.h> 39 #include <linux/skbuff.h>
40 #include <linux/interrupt.h> 40 #include <linux/interrupt.h>
41 #include <linux/notifier.h> 41 #include <linux/notifier.h>
42 #include <linux/rfkill.h> 42 #include <linux/rfkill.h>
43 #include <net/sock.h> 43 #include <net/sock.h>
44 44
45 #include <asm/system.h> 45 #include <asm/system.h>
46 #include <asm/uaccess.h> 46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h> 47 #include <asm/unaligned.h>
48 48
49 #include <net/bluetooth/bluetooth.h> 49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h> 50 #include <net/bluetooth/hci_core.h>
51 51
52 static void hci_cmd_task(unsigned long arg); 52 static void hci_cmd_task(unsigned long arg);
53 static void hci_rx_task(unsigned long arg); 53 static void hci_rx_task(unsigned long arg);
54 static void hci_tx_task(unsigned long arg); 54 static void hci_tx_task(unsigned long arg);
55 static void hci_notify(struct hci_dev *hdev, int event); 55 static void hci_notify(struct hci_dev *hdev, int event);
56 56
57 static DEFINE_RWLOCK(hci_task_lock); 57 static DEFINE_RWLOCK(hci_task_lock);
58 58
59 /* HCI device list */ 59 /* HCI device list */
60 LIST_HEAD(hci_dev_list); 60 LIST_HEAD(hci_dev_list);
61 DEFINE_RWLOCK(hci_dev_list_lock); 61 DEFINE_RWLOCK(hci_dev_list_lock);
62 62
63 /* HCI callback list */ 63 /* HCI callback list */
64 LIST_HEAD(hci_cb_list); 64 LIST_HEAD(hci_cb_list);
65 DEFINE_RWLOCK(hci_cb_list_lock); 65 DEFINE_RWLOCK(hci_cb_list_lock);
66 66
67 /* HCI protocols */ 67 /* HCI protocols */
68 #define HCI_MAX_PROTO 2 68 #define HCI_MAX_PROTO 2
69 struct hci_proto *hci_proto[HCI_MAX_PROTO]; 69 struct hci_proto *hci_proto[HCI_MAX_PROTO];
70 70
71 /* HCI notifiers list */ 71 /* HCI notifiers list */
72 static ATOMIC_NOTIFIER_HEAD(hci_notifier); 72 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
73 73
74 /* ---- HCI notifications ---- */ 74 /* ---- HCI notifications ---- */
75 75
76 int hci_register_notifier(struct notifier_block *nb) 76 int hci_register_notifier(struct notifier_block *nb)
77 { 77 {
78 return atomic_notifier_chain_register(&hci_notifier, nb); 78 return atomic_notifier_chain_register(&hci_notifier, nb);
79 } 79 }
80 80
81 int hci_unregister_notifier(struct notifier_block *nb) 81 int hci_unregister_notifier(struct notifier_block *nb)
82 { 82 {
83 return atomic_notifier_chain_unregister(&hci_notifier, nb); 83 return atomic_notifier_chain_unregister(&hci_notifier, nb);
84 } 84 }
85 85
86 static void hci_notify(struct hci_dev *hdev, int event) 86 static void hci_notify(struct hci_dev *hdev, int event)
87 { 87 {
88 atomic_notifier_call_chain(&hci_notifier, event, hdev); 88 atomic_notifier_call_chain(&hci_notifier, event, hdev);
89 } 89 }
90 90
91 /* ---- HCI requests ---- */ 91 /* ---- HCI requests ---- */
92 92
93 void hci_req_complete(struct hci_dev *hdev, int result) 93 void hci_req_complete(struct hci_dev *hdev, int result)
94 { 94 {
95 BT_DBG("%s result 0x%2.2x", hdev->name, result); 95 BT_DBG("%s result 0x%2.2x", hdev->name, result);
96 96
97 if (hdev->req_status == HCI_REQ_PEND) { 97 if (hdev->req_status == HCI_REQ_PEND) {
98 hdev->req_result = result; 98 hdev->req_result = result;
99 hdev->req_status = HCI_REQ_DONE; 99 hdev->req_status = HCI_REQ_DONE;
100 wake_up_interruptible(&hdev->req_wait_q); 100 wake_up_interruptible(&hdev->req_wait_q);
101 } 101 }
102 } 102 }
103 103
104 static void hci_req_cancel(struct hci_dev *hdev, int err) 104 static void hci_req_cancel(struct hci_dev *hdev, int err)
105 { 105 {
106 BT_DBG("%s err 0x%2.2x", hdev->name, err); 106 BT_DBG("%s err 0x%2.2x", hdev->name, err);
107 107
108 if (hdev->req_status == HCI_REQ_PEND) { 108 if (hdev->req_status == HCI_REQ_PEND) {
109 hdev->req_result = err; 109 hdev->req_result = err;
110 hdev->req_status = HCI_REQ_CANCELED; 110 hdev->req_status = HCI_REQ_CANCELED;
111 wake_up_interruptible(&hdev->req_wait_q); 111 wake_up_interruptible(&hdev->req_wait_q);
112 } 112 }
113 } 113 }
114 114
115 /* Execute request and wait for completion. */ 115 /* Execute request and wait for completion. */
116 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), 116 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
117 unsigned long opt, __u32 timeout) 117 unsigned long opt, __u32 timeout)
118 { 118 {
119 DECLARE_WAITQUEUE(wait, current); 119 DECLARE_WAITQUEUE(wait, current);
120 int err = 0; 120 int err = 0;
121 121
122 BT_DBG("%s start", hdev->name); 122 BT_DBG("%s start", hdev->name);
123 123
124 hdev->req_status = HCI_REQ_PEND; 124 hdev->req_status = HCI_REQ_PEND;
125 125
126 add_wait_queue(&hdev->req_wait_q, &wait); 126 add_wait_queue(&hdev->req_wait_q, &wait);
127 set_current_state(TASK_INTERRUPTIBLE); 127 set_current_state(TASK_INTERRUPTIBLE);
128 128
129 req(hdev, opt); 129 req(hdev, opt);
130 schedule_timeout(timeout); 130 schedule_timeout(timeout);
131 131
132 remove_wait_queue(&hdev->req_wait_q, &wait); 132 remove_wait_queue(&hdev->req_wait_q, &wait);
133 133
134 if (signal_pending(current)) 134 if (signal_pending(current))
135 return -EINTR; 135 return -EINTR;
136 136
137 switch (hdev->req_status) { 137 switch (hdev->req_status) {
138 case HCI_REQ_DONE: 138 case HCI_REQ_DONE:
139 err = -bt_err(hdev->req_result); 139 err = -bt_err(hdev->req_result);
140 break; 140 break;
141 141
142 case HCI_REQ_CANCELED: 142 case HCI_REQ_CANCELED:
143 err = -hdev->req_result; 143 err = -hdev->req_result;
144 break; 144 break;
145 145
146 default: 146 default:
147 err = -ETIMEDOUT; 147 err = -ETIMEDOUT;
148 break; 148 break;
149 } 149 }
150 150
151 hdev->req_status = hdev->req_result = 0; 151 hdev->req_status = hdev->req_result = 0;
152 152
153 BT_DBG("%s end: err %d", hdev->name, err); 153 BT_DBG("%s end: err %d", hdev->name, err);
154 154
155 return err; 155 return err;
156 } 156 }
157 157
158 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), 158 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
159 unsigned long opt, __u32 timeout) 159 unsigned long opt, __u32 timeout)
160 { 160 {
161 int ret; 161 int ret;
162 162
163 if (!test_bit(HCI_UP, &hdev->flags)) 163 if (!test_bit(HCI_UP, &hdev->flags))
164 return -ENETDOWN; 164 return -ENETDOWN;
165 165
166 /* Serialize all requests */ 166 /* Serialize all requests */
167 hci_req_lock(hdev); 167 hci_req_lock(hdev);
168 ret = __hci_request(hdev, req, opt, timeout); 168 ret = __hci_request(hdev, req, opt, timeout);
169 hci_req_unlock(hdev); 169 hci_req_unlock(hdev);
170 170
171 return ret; 171 return ret;
172 } 172 }
173 173
174 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt) 174 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
175 { 175 {
176 BT_DBG("%s %ld", hdev->name, opt); 176 BT_DBG("%s %ld", hdev->name, opt);
177 177
178 /* Reset device */ 178 /* Reset device */
179 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL); 179 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
180 } 180 }
181 181
182 static void hci_init_req(struct hci_dev *hdev, unsigned long opt) 182 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
183 { 183 {
184 struct sk_buff *skb; 184 struct sk_buff *skb;
185 __le16 param; 185 __le16 param;
186 __u8 flt_type; 186 __u8 flt_type;
187 187
188 BT_DBG("%s %ld", hdev->name, opt); 188 BT_DBG("%s %ld", hdev->name, opt);
189 189
190 /* Driver initialization */ 190 /* Driver initialization */
191 191
192 /* Special commands */ 192 /* Special commands */
193 while ((skb = skb_dequeue(&hdev->driver_init))) { 193 while ((skb = skb_dequeue(&hdev->driver_init))) {
194 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; 194 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
195 skb->dev = (void *) hdev; 195 skb->dev = (void *) hdev;
196 196
197 skb_queue_tail(&hdev->cmd_q, skb); 197 skb_queue_tail(&hdev->cmd_q, skb);
198 tasklet_schedule(&hdev->cmd_task); 198 tasklet_schedule(&hdev->cmd_task);
199 } 199 }
200 skb_queue_purge(&hdev->driver_init); 200 skb_queue_purge(&hdev->driver_init);
201 201
202 /* Mandatory initialization */ 202 /* Mandatory initialization */
203 203
204 /* Reset */ 204 /* Reset */
205 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) 205 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
206 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL); 206 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
207 207
208 /* Read Local Supported Features */ 208 /* Read Local Supported Features */
209 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL); 209 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
210 210
211 /* Read Local Version */ 211 /* Read Local Version */
212 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL); 212 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
213 213
214 /* Read Buffer Size (ACL mtu, max pkt, etc.) */ 214 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
215 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL); 215 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
216 216
217 #if 0 217 #if 0
218 /* Host buffer size */ 218 /* Host buffer size */
219 { 219 {
220 struct hci_cp_host_buffer_size cp; 220 struct hci_cp_host_buffer_size cp;
221 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE); 221 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
222 cp.sco_mtu = HCI_MAX_SCO_SIZE; 222 cp.sco_mtu = HCI_MAX_SCO_SIZE;
223 cp.acl_max_pkt = cpu_to_le16(0xffff); 223 cp.acl_max_pkt = cpu_to_le16(0xffff);
224 cp.sco_max_pkt = cpu_to_le16(0xffff); 224 cp.sco_max_pkt = cpu_to_le16(0xffff);
225 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp); 225 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
226 } 226 }
227 #endif 227 #endif
228 228
229 /* Read BD Address */ 229 /* Read BD Address */
230 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL); 230 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
231 231
232 /* Read Class of Device */ 232 /* Read Class of Device */
233 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL); 233 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
234 234
235 /* Read Local Name */ 235 /* Read Local Name */
236 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL); 236 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
237 237
238 /* Read Voice Setting */ 238 /* Read Voice Setting */
239 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL); 239 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
240 240
241 /* Optional initialization */ 241 /* Optional initialization */
242 242
243 /* Clear Event Filters */ 243 /* Clear Event Filters */
244 flt_type = HCI_FLT_CLEAR_ALL; 244 flt_type = HCI_FLT_CLEAR_ALL;
245 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type); 245 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
246 246
247 /* Page timeout ~20 secs */ 247 /* Page timeout ~20 secs */
248 param = cpu_to_le16(0x8000); 248 param = cpu_to_le16(0x8000);
249 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param); 249 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
250 250
251 /* Connection accept timeout ~20 secs */ 251 /* Connection accept timeout ~20 secs */
252 param = cpu_to_le16(0x7d00); 252 param = cpu_to_le16(0x7d00);
253 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param); 253 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
254 } 254 }
255 255
256 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt) 256 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
257 { 257 {
258 __u8 scan = opt; 258 __u8 scan = opt;
259 259
260 BT_DBG("%s %x", hdev->name, scan); 260 BT_DBG("%s %x", hdev->name, scan);
261 261
262 /* Inquiry and Page scans */ 262 /* Inquiry and Page scans */
263 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); 263 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
264 } 264 }
265 265
266 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt) 266 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
267 { 267 {
268 __u8 auth = opt; 268 __u8 auth = opt;
269 269
270 BT_DBG("%s %x", hdev->name, auth); 270 BT_DBG("%s %x", hdev->name, auth);
271 271
272 /* Authentication */ 272 /* Authentication */
273 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth); 273 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
274 } 274 }
275 275
276 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt) 276 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
277 { 277 {
278 __u8 encrypt = opt; 278 __u8 encrypt = opt;
279 279
280 BT_DBG("%s %x", hdev->name, encrypt); 280 BT_DBG("%s %x", hdev->name, encrypt);
281 281
282 /* Encryption */ 282 /* Encryption */
283 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt); 283 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
284 } 284 }
285 285
286 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt) 286 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
287 { 287 {
288 __le16 policy = cpu_to_le16(opt); 288 __le16 policy = cpu_to_le16(opt);
289 289
290 BT_DBG("%s %x", hdev->name, policy); 290 BT_DBG("%s %x", hdev->name, policy);
291 291
292 /* Default link policy */ 292 /* Default link policy */
293 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy); 293 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
294 } 294 }
295 295
296 /* Get HCI device by index. 296 /* Get HCI device by index.
297 * Device is held on return. */ 297 * Device is held on return. */
298 struct hci_dev *hci_dev_get(int index) 298 struct hci_dev *hci_dev_get(int index)
299 { 299 {
300 struct hci_dev *hdev = NULL; 300 struct hci_dev *hdev = NULL;
301 struct list_head *p; 301 struct list_head *p;
302 302
303 BT_DBG("%d", index); 303 BT_DBG("%d", index);
304 304
305 if (index < 0) 305 if (index < 0)
306 return NULL; 306 return NULL;
307 307
308 read_lock(&hci_dev_list_lock); 308 read_lock(&hci_dev_list_lock);
309 list_for_each(p, &hci_dev_list) { 309 list_for_each(p, &hci_dev_list) {
310 struct hci_dev *d = list_entry(p, struct hci_dev, list); 310 struct hci_dev *d = list_entry(p, struct hci_dev, list);
311 if (d->id == index) { 311 if (d->id == index) {
312 hdev = hci_dev_hold(d); 312 hdev = hci_dev_hold(d);
313 break; 313 break;
314 } 314 }
315 } 315 }
316 read_unlock(&hci_dev_list_lock); 316 read_unlock(&hci_dev_list_lock);
317 return hdev; 317 return hdev;
318 } 318 }
319 319
320 /* ---- Inquiry support ---- */ 320 /* ---- Inquiry support ---- */
321 static void inquiry_cache_flush(struct hci_dev *hdev) 321 static void inquiry_cache_flush(struct hci_dev *hdev)
322 { 322 {
323 struct inquiry_cache *cache = &hdev->inq_cache; 323 struct inquiry_cache *cache = &hdev->inq_cache;
324 struct inquiry_entry *next = cache->list, *e; 324 struct inquiry_entry *next = cache->list, *e;
325 325
326 BT_DBG("cache %p", cache); 326 BT_DBG("cache %p", cache);
327 327
328 cache->list = NULL; 328 cache->list = NULL;
329 while ((e = next)) { 329 while ((e = next)) {
330 next = e->next; 330 next = e->next;
331 kfree(e); 331 kfree(e);
332 } 332 }
333 } 333 }
334 334
335 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr) 335 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
336 { 336 {
337 struct inquiry_cache *cache = &hdev->inq_cache; 337 struct inquiry_cache *cache = &hdev->inq_cache;
338 struct inquiry_entry *e; 338 struct inquiry_entry *e;
339 339
340 BT_DBG("cache %p, %s", cache, batostr(bdaddr)); 340 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
341 341
342 for (e = cache->list; e; e = e->next) 342 for (e = cache->list; e; e = e->next)
343 if (!bacmp(&e->data.bdaddr, bdaddr)) 343 if (!bacmp(&e->data.bdaddr, bdaddr))
344 break; 344 break;
345 return e; 345 return e;
346 } 346 }
347 347
348 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data) 348 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
349 { 349 {
350 struct inquiry_cache *cache = &hdev->inq_cache; 350 struct inquiry_cache *cache = &hdev->inq_cache;
351 struct inquiry_entry *e; 351 struct inquiry_entry *e;
352 352
353 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr)); 353 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
354 354
355 if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) { 355 if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
356 /* Entry not in the cache. Add new one. */ 356 /* Entry not in the cache. Add new one. */
357 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC))) 357 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
358 return; 358 return;
359 e->next = cache->list; 359 e->next = cache->list;
360 cache->list = e; 360 cache->list = e;
361 } 361 }
362 362
363 memcpy(&e->data, data, sizeof(*data)); 363 memcpy(&e->data, data, sizeof(*data));
364 e->timestamp = jiffies; 364 e->timestamp = jiffies;
365 cache->timestamp = jiffies; 365 cache->timestamp = jiffies;
366 } 366 }
367 367
368 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf) 368 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
369 { 369 {
370 struct inquiry_cache *cache = &hdev->inq_cache; 370 struct inquiry_cache *cache = &hdev->inq_cache;
371 struct inquiry_info *info = (struct inquiry_info *) buf; 371 struct inquiry_info *info = (struct inquiry_info *) buf;
372 struct inquiry_entry *e; 372 struct inquiry_entry *e;
373 int copied = 0; 373 int copied = 0;
374 374
375 for (e = cache->list; e && copied < num; e = e->next, copied++) { 375 for (e = cache->list; e && copied < num; e = e->next, copied++) {
376 struct inquiry_data *data = &e->data; 376 struct inquiry_data *data = &e->data;
377 bacpy(&info->bdaddr, &data->bdaddr); 377 bacpy(&info->bdaddr, &data->bdaddr);
378 info->pscan_rep_mode = data->pscan_rep_mode; 378 info->pscan_rep_mode = data->pscan_rep_mode;
379 info->pscan_period_mode = data->pscan_period_mode; 379 info->pscan_period_mode = data->pscan_period_mode;
380 info->pscan_mode = data->pscan_mode; 380 info->pscan_mode = data->pscan_mode;
381 memcpy(info->dev_class, data->dev_class, 3); 381 memcpy(info->dev_class, data->dev_class, 3);
382 info->clock_offset = data->clock_offset; 382 info->clock_offset = data->clock_offset;
383 info++; 383 info++;
384 } 384 }
385 385
386 BT_DBG("cache %p, copied %d", cache, copied); 386 BT_DBG("cache %p, copied %d", cache, copied);
387 return copied; 387 return copied;
388 } 388 }
389 389
390 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt) 390 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
391 { 391 {
392 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt; 392 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
393 struct hci_cp_inquiry cp; 393 struct hci_cp_inquiry cp;
394 394
395 BT_DBG("%s", hdev->name); 395 BT_DBG("%s", hdev->name);
396 396
397 if (test_bit(HCI_INQUIRY, &hdev->flags)) 397 if (test_bit(HCI_INQUIRY, &hdev->flags))
398 return; 398 return;
399 399
400 /* Start Inquiry */ 400 /* Start Inquiry */
401 memcpy(&cp.lap, &ir->lap, 3); 401 memcpy(&cp.lap, &ir->lap, 3);
402 cp.length = ir->length; 402 cp.length = ir->length;
403 cp.num_rsp = ir->num_rsp; 403 cp.num_rsp = ir->num_rsp;
404 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp); 404 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
405 } 405 }
406 406
407 int hci_inquiry(void __user *arg) 407 int hci_inquiry(void __user *arg)
408 { 408 {
409 __u8 __user *ptr = arg; 409 __u8 __user *ptr = arg;
410 struct hci_inquiry_req ir; 410 struct hci_inquiry_req ir;
411 struct hci_dev *hdev; 411 struct hci_dev *hdev;
412 int err = 0, do_inquiry = 0, max_rsp; 412 int err = 0, do_inquiry = 0, max_rsp;
413 long timeo; 413 long timeo;
414 __u8 *buf; 414 __u8 *buf;
415 415
416 if (copy_from_user(&ir, ptr, sizeof(ir))) 416 if (copy_from_user(&ir, ptr, sizeof(ir)))
417 return -EFAULT; 417 return -EFAULT;
418 418
419 if (!(hdev = hci_dev_get(ir.dev_id))) 419 if (!(hdev = hci_dev_get(ir.dev_id)))
420 return -ENODEV; 420 return -ENODEV;
421 421
422 hci_dev_lock_bh(hdev); 422 hci_dev_lock_bh(hdev);
423 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || 423 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
424 inquiry_cache_empty(hdev) || 424 inquiry_cache_empty(hdev) ||
425 ir.flags & IREQ_CACHE_FLUSH) { 425 ir.flags & IREQ_CACHE_FLUSH) {
426 inquiry_cache_flush(hdev); 426 inquiry_cache_flush(hdev);
427 do_inquiry = 1; 427 do_inquiry = 1;
428 } 428 }
429 hci_dev_unlock_bh(hdev); 429 hci_dev_unlock_bh(hdev);
430 430
431 timeo = ir.length * msecs_to_jiffies(2000); 431 timeo = ir.length * msecs_to_jiffies(2000);
432 if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0) 432 if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
433 goto done; 433 goto done;
434 434
435 /* for unlimited number of responses we will use buffer with 255 entries */ 435 /* for unlimited number of responses we will use buffer with 255 entries */
436 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp; 436 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
437 437
438 /* cache_dump can't sleep. Therefore we allocate temp buffer and then 438 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
439 * copy it to the user space. 439 * copy it to the user space.
440 */ 440 */
441 if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) { 441 if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
442 err = -ENOMEM; 442 err = -ENOMEM;
443 goto done; 443 goto done;
444 } 444 }
445 445
446 hci_dev_lock_bh(hdev); 446 hci_dev_lock_bh(hdev);
447 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf); 447 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
448 hci_dev_unlock_bh(hdev); 448 hci_dev_unlock_bh(hdev);
449 449
450 BT_DBG("num_rsp %d", ir.num_rsp); 450 BT_DBG("num_rsp %d", ir.num_rsp);
451 451
452 if (!copy_to_user(ptr, &ir, sizeof(ir))) { 452 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
453 ptr += sizeof(ir); 453 ptr += sizeof(ir);
454 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) * 454 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
455 ir.num_rsp)) 455 ir.num_rsp))
456 err = -EFAULT; 456 err = -EFAULT;
457 } else 457 } else
458 err = -EFAULT; 458 err = -EFAULT;
459 459
460 kfree(buf); 460 kfree(buf);
461 461
462 done: 462 done:
463 hci_dev_put(hdev); 463 hci_dev_put(hdev);
464 return err; 464 return err;
465 } 465 }
466 466
467 /* ---- HCI ioctl helpers ---- */ 467 /* ---- HCI ioctl helpers ---- */
468 468
469 int hci_dev_open(__u16 dev) 469 int hci_dev_open(__u16 dev)
470 { 470 {
471 struct hci_dev *hdev; 471 struct hci_dev *hdev;
472 int ret = 0; 472 int ret = 0;
473 473
474 if (!(hdev = hci_dev_get(dev))) 474 if (!(hdev = hci_dev_get(dev)))
475 return -ENODEV; 475 return -ENODEV;
476 476
477 BT_DBG("%s %p", hdev->name, hdev); 477 BT_DBG("%s %p", hdev->name, hdev);
478 478
479 hci_req_lock(hdev); 479 hci_req_lock(hdev);
480 480
481 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) { 481 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
482 ret = -ERFKILL; 482 ret = -ERFKILL;
483 goto done; 483 goto done;
484 } 484 }
485 485
486 if (test_bit(HCI_UP, &hdev->flags)) { 486 if (test_bit(HCI_UP, &hdev->flags)) {
487 ret = -EALREADY; 487 ret = -EALREADY;
488 goto done; 488 goto done;
489 } 489 }
490 490
491 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) 491 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
492 set_bit(HCI_RAW, &hdev->flags); 492 set_bit(HCI_RAW, &hdev->flags);
493 493
494 /* Treat all non BR/EDR controllers as raw devices for now */
495 if (hdev->dev_type != HCI_BREDR)
496 set_bit(HCI_RAW, &hdev->flags);
497
494 if (hdev->open(hdev)) { 498 if (hdev->open(hdev)) {
495 ret = -EIO; 499 ret = -EIO;
496 goto done; 500 goto done;
497 } 501 }
498 502
499 if (!test_bit(HCI_RAW, &hdev->flags)) { 503 if (!test_bit(HCI_RAW, &hdev->flags)) {
500 atomic_set(&hdev->cmd_cnt, 1); 504 atomic_set(&hdev->cmd_cnt, 1);
501 set_bit(HCI_INIT, &hdev->flags); 505 set_bit(HCI_INIT, &hdev->flags);
502 506
503 //__hci_request(hdev, hci_reset_req, 0, HZ); 507 //__hci_request(hdev, hci_reset_req, 0, HZ);
504 ret = __hci_request(hdev, hci_init_req, 0, 508 ret = __hci_request(hdev, hci_init_req, 0,
505 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 509 msecs_to_jiffies(HCI_INIT_TIMEOUT));
506 510
507 clear_bit(HCI_INIT, &hdev->flags); 511 clear_bit(HCI_INIT, &hdev->flags);
508 } 512 }
509 513
510 if (!ret) { 514 if (!ret) {
511 hci_dev_hold(hdev); 515 hci_dev_hold(hdev);
512 set_bit(HCI_UP, &hdev->flags); 516 set_bit(HCI_UP, &hdev->flags);
513 hci_notify(hdev, HCI_DEV_UP); 517 hci_notify(hdev, HCI_DEV_UP);
514 } else { 518 } else {
515 /* Init failed, cleanup */ 519 /* Init failed, cleanup */
516 tasklet_kill(&hdev->rx_task); 520 tasklet_kill(&hdev->rx_task);
517 tasklet_kill(&hdev->tx_task); 521 tasklet_kill(&hdev->tx_task);
518 tasklet_kill(&hdev->cmd_task); 522 tasklet_kill(&hdev->cmd_task);
519 523
520 skb_queue_purge(&hdev->cmd_q); 524 skb_queue_purge(&hdev->cmd_q);
521 skb_queue_purge(&hdev->rx_q); 525 skb_queue_purge(&hdev->rx_q);
522 526
523 if (hdev->flush) 527 if (hdev->flush)
524 hdev->flush(hdev); 528 hdev->flush(hdev);
525 529
526 if (hdev->sent_cmd) { 530 if (hdev->sent_cmd) {
527 kfree_skb(hdev->sent_cmd); 531 kfree_skb(hdev->sent_cmd);
528 hdev->sent_cmd = NULL; 532 hdev->sent_cmd = NULL;
529 } 533 }
530 534
531 hdev->close(hdev); 535 hdev->close(hdev);
532 hdev->flags = 0; 536 hdev->flags = 0;
533 } 537 }
534 538
535 done: 539 done:
536 hci_req_unlock(hdev); 540 hci_req_unlock(hdev);
537 hci_dev_put(hdev); 541 hci_dev_put(hdev);
538 return ret; 542 return ret;
539 } 543 }
540 544
541 static int hci_dev_do_close(struct hci_dev *hdev) 545 static int hci_dev_do_close(struct hci_dev *hdev)
542 { 546 {
543 BT_DBG("%s %p", hdev->name, hdev); 547 BT_DBG("%s %p", hdev->name, hdev);
544 548
545 hci_req_cancel(hdev, ENODEV); 549 hci_req_cancel(hdev, ENODEV);
546 hci_req_lock(hdev); 550 hci_req_lock(hdev);
547 551
548 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) { 552 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
549 hci_req_unlock(hdev); 553 hci_req_unlock(hdev);
550 return 0; 554 return 0;
551 } 555 }
552 556
553 /* Kill RX and TX tasks */ 557 /* Kill RX and TX tasks */
554 tasklet_kill(&hdev->rx_task); 558 tasklet_kill(&hdev->rx_task);
555 tasklet_kill(&hdev->tx_task); 559 tasklet_kill(&hdev->tx_task);
556 560
557 hci_dev_lock_bh(hdev); 561 hci_dev_lock_bh(hdev);
558 inquiry_cache_flush(hdev); 562 inquiry_cache_flush(hdev);
559 hci_conn_hash_flush(hdev); 563 hci_conn_hash_flush(hdev);
560 hci_dev_unlock_bh(hdev); 564 hci_dev_unlock_bh(hdev);
561 565
562 hci_notify(hdev, HCI_DEV_DOWN); 566 hci_notify(hdev, HCI_DEV_DOWN);
563 567
564 if (hdev->flush) 568 if (hdev->flush)
565 hdev->flush(hdev); 569 hdev->flush(hdev);
566 570
567 /* Reset device */ 571 /* Reset device */
568 skb_queue_purge(&hdev->cmd_q); 572 skb_queue_purge(&hdev->cmd_q);
569 atomic_set(&hdev->cmd_cnt, 1); 573 atomic_set(&hdev->cmd_cnt, 1);
570 if (!test_bit(HCI_RAW, &hdev->flags)) { 574 if (!test_bit(HCI_RAW, &hdev->flags)) {
571 set_bit(HCI_INIT, &hdev->flags); 575 set_bit(HCI_INIT, &hdev->flags);
572 __hci_request(hdev, hci_reset_req, 0, 576 __hci_request(hdev, hci_reset_req, 0,
573 msecs_to_jiffies(250)); 577 msecs_to_jiffies(250));
574 clear_bit(HCI_INIT, &hdev->flags); 578 clear_bit(HCI_INIT, &hdev->flags);
575 } 579 }
576 580
577 /* Kill cmd task */ 581 /* Kill cmd task */
578 tasklet_kill(&hdev->cmd_task); 582 tasklet_kill(&hdev->cmd_task);
579 583
580 /* Drop queues */ 584 /* Drop queues */
581 skb_queue_purge(&hdev->rx_q); 585 skb_queue_purge(&hdev->rx_q);
582 skb_queue_purge(&hdev->cmd_q); 586 skb_queue_purge(&hdev->cmd_q);
583 skb_queue_purge(&hdev->raw_q); 587 skb_queue_purge(&hdev->raw_q);
584 588
585 /* Drop last sent command */ 589 /* Drop last sent command */
586 if (hdev->sent_cmd) { 590 if (hdev->sent_cmd) {
587 kfree_skb(hdev->sent_cmd); 591 kfree_skb(hdev->sent_cmd);
588 hdev->sent_cmd = NULL; 592 hdev->sent_cmd = NULL;
589 } 593 }
590 594
591 /* After this point our queues are empty 595 /* After this point our queues are empty
592 * and no tasks are scheduled. */ 596 * and no tasks are scheduled. */
593 hdev->close(hdev); 597 hdev->close(hdev);
594 598
595 /* Clear flags */ 599 /* Clear flags */
596 hdev->flags = 0; 600 hdev->flags = 0;
597 601
598 hci_req_unlock(hdev); 602 hci_req_unlock(hdev);
599 603
600 hci_dev_put(hdev); 604 hci_dev_put(hdev);
601 return 0; 605 return 0;
602 } 606 }
603 607
604 int hci_dev_close(__u16 dev) 608 int hci_dev_close(__u16 dev)
605 { 609 {
606 struct hci_dev *hdev; 610 struct hci_dev *hdev;
607 int err; 611 int err;
608 612
609 if (!(hdev = hci_dev_get(dev))) 613 if (!(hdev = hci_dev_get(dev)))
610 return -ENODEV; 614 return -ENODEV;
611 err = hci_dev_do_close(hdev); 615 err = hci_dev_do_close(hdev);
612 hci_dev_put(hdev); 616 hci_dev_put(hdev);
613 return err; 617 return err;
614 } 618 }
615 619
616 int hci_dev_reset(__u16 dev) 620 int hci_dev_reset(__u16 dev)
617 { 621 {
618 struct hci_dev *hdev; 622 struct hci_dev *hdev;
619 int ret = 0; 623 int ret = 0;
620 624
621 if (!(hdev = hci_dev_get(dev))) 625 if (!(hdev = hci_dev_get(dev)))
622 return -ENODEV; 626 return -ENODEV;
623 627
624 hci_req_lock(hdev); 628 hci_req_lock(hdev);
625 tasklet_disable(&hdev->tx_task); 629 tasklet_disable(&hdev->tx_task);
626 630
627 if (!test_bit(HCI_UP, &hdev->flags)) 631 if (!test_bit(HCI_UP, &hdev->flags))
628 goto done; 632 goto done;
629 633
630 /* Drop queues */ 634 /* Drop queues */
631 skb_queue_purge(&hdev->rx_q); 635 skb_queue_purge(&hdev->rx_q);
632 skb_queue_purge(&hdev->cmd_q); 636 skb_queue_purge(&hdev->cmd_q);
633 637
634 hci_dev_lock_bh(hdev); 638 hci_dev_lock_bh(hdev);
635 inquiry_cache_flush(hdev); 639 inquiry_cache_flush(hdev);
636 hci_conn_hash_flush(hdev); 640 hci_conn_hash_flush(hdev);
637 hci_dev_unlock_bh(hdev); 641 hci_dev_unlock_bh(hdev);
638 642
639 if (hdev->flush) 643 if (hdev->flush)
640 hdev->flush(hdev); 644 hdev->flush(hdev);
641 645
642 atomic_set(&hdev->cmd_cnt, 1); 646 atomic_set(&hdev->cmd_cnt, 1);
643 hdev->acl_cnt = 0; hdev->sco_cnt = 0; 647 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
644 648
645 if (!test_bit(HCI_RAW, &hdev->flags)) 649 if (!test_bit(HCI_RAW, &hdev->flags))
646 ret = __hci_request(hdev, hci_reset_req, 0, 650 ret = __hci_request(hdev, hci_reset_req, 0,
647 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 651 msecs_to_jiffies(HCI_INIT_TIMEOUT));
648 652
649 done: 653 done:
650 tasklet_enable(&hdev->tx_task); 654 tasklet_enable(&hdev->tx_task);
651 hci_req_unlock(hdev); 655 hci_req_unlock(hdev);
652 hci_dev_put(hdev); 656 hci_dev_put(hdev);
653 return ret; 657 return ret;
654 } 658 }
655 659
656 int hci_dev_reset_stat(__u16 dev) 660 int hci_dev_reset_stat(__u16 dev)
657 { 661 {
658 struct hci_dev *hdev; 662 struct hci_dev *hdev;
659 int ret = 0; 663 int ret = 0;
660 664
661 if (!(hdev = hci_dev_get(dev))) 665 if (!(hdev = hci_dev_get(dev)))
662 return -ENODEV; 666 return -ENODEV;
663 667
664 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); 668 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
665 669
666 hci_dev_put(hdev); 670 hci_dev_put(hdev);
667 671
668 return ret; 672 return ret;
669 } 673 }
670 674
671 int hci_dev_cmd(unsigned int cmd, void __user *arg) 675 int hci_dev_cmd(unsigned int cmd, void __user *arg)
672 { 676 {
673 struct hci_dev *hdev; 677 struct hci_dev *hdev;
674 struct hci_dev_req dr; 678 struct hci_dev_req dr;
675 int err = 0; 679 int err = 0;
676 680
677 if (copy_from_user(&dr, arg, sizeof(dr))) 681 if (copy_from_user(&dr, arg, sizeof(dr)))
678 return -EFAULT; 682 return -EFAULT;
679 683
680 if (!(hdev = hci_dev_get(dr.dev_id))) 684 if (!(hdev = hci_dev_get(dr.dev_id)))
681 return -ENODEV; 685 return -ENODEV;
682 686
683 switch (cmd) { 687 switch (cmd) {
684 case HCISETAUTH: 688 case HCISETAUTH:
685 err = hci_request(hdev, hci_auth_req, dr.dev_opt, 689 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
686 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 690 msecs_to_jiffies(HCI_INIT_TIMEOUT));
687 break; 691 break;
688 692
689 case HCISETENCRYPT: 693 case HCISETENCRYPT:
690 if (!lmp_encrypt_capable(hdev)) { 694 if (!lmp_encrypt_capable(hdev)) {
691 err = -EOPNOTSUPP; 695 err = -EOPNOTSUPP;
692 break; 696 break;
693 } 697 }
694 698
695 if (!test_bit(HCI_AUTH, &hdev->flags)) { 699 if (!test_bit(HCI_AUTH, &hdev->flags)) {
696 /* Auth must be enabled first */ 700 /* Auth must be enabled first */
697 err = hci_request(hdev, hci_auth_req, dr.dev_opt, 701 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
698 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 702 msecs_to_jiffies(HCI_INIT_TIMEOUT));
699 if (err) 703 if (err)
700 break; 704 break;
701 } 705 }
702 706
703 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt, 707 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
704 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 708 msecs_to_jiffies(HCI_INIT_TIMEOUT));
705 break; 709 break;
706 710
707 case HCISETSCAN: 711 case HCISETSCAN:
708 err = hci_request(hdev, hci_scan_req, dr.dev_opt, 712 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
709 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 713 msecs_to_jiffies(HCI_INIT_TIMEOUT));
710 break; 714 break;
711 715
712 case HCISETLINKPOL: 716 case HCISETLINKPOL:
713 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt, 717 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
714 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 718 msecs_to_jiffies(HCI_INIT_TIMEOUT));
715 break; 719 break;
716 720
717 case HCISETLINKMODE: 721 case HCISETLINKMODE:
718 hdev->link_mode = ((__u16) dr.dev_opt) & 722 hdev->link_mode = ((__u16) dr.dev_opt) &
719 (HCI_LM_MASTER | HCI_LM_ACCEPT); 723 (HCI_LM_MASTER | HCI_LM_ACCEPT);
720 break; 724 break;
721 725
722 case HCISETPTYPE: 726 case HCISETPTYPE:
723 hdev->pkt_type = (__u16) dr.dev_opt; 727 hdev->pkt_type = (__u16) dr.dev_opt;
724 break; 728 break;
725 729
726 case HCISETACLMTU: 730 case HCISETACLMTU:
727 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1); 731 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
728 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0); 732 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
729 break; 733 break;
730 734
731 case HCISETSCOMTU: 735 case HCISETSCOMTU:
732 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1); 736 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
733 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0); 737 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
734 break; 738 break;
735 739
736 default: 740 default:
737 err = -EINVAL; 741 err = -EINVAL;
738 break; 742 break;
739 } 743 }
740 744
741 hci_dev_put(hdev); 745 hci_dev_put(hdev);
742 return err; 746 return err;
743 } 747 }
744 748
745 int hci_get_dev_list(void __user *arg) 749 int hci_get_dev_list(void __user *arg)
746 { 750 {
747 struct hci_dev_list_req *dl; 751 struct hci_dev_list_req *dl;
748 struct hci_dev_req *dr; 752 struct hci_dev_req *dr;
749 struct list_head *p; 753 struct list_head *p;
750 int n = 0, size, err; 754 int n = 0, size, err;
751 __u16 dev_num; 755 __u16 dev_num;
752 756
753 if (get_user(dev_num, (__u16 __user *) arg)) 757 if (get_user(dev_num, (__u16 __user *) arg))
754 return -EFAULT; 758 return -EFAULT;
755 759
756 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr)) 760 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
757 return -EINVAL; 761 return -EINVAL;
758 762
759 size = sizeof(*dl) + dev_num * sizeof(*dr); 763 size = sizeof(*dl) + dev_num * sizeof(*dr);
760 764
761 if (!(dl = kzalloc(size, GFP_KERNEL))) 765 if (!(dl = kzalloc(size, GFP_KERNEL)))
762 return -ENOMEM; 766 return -ENOMEM;
763 767
764 dr = dl->dev_req; 768 dr = dl->dev_req;
765 769
766 read_lock_bh(&hci_dev_list_lock); 770 read_lock_bh(&hci_dev_list_lock);
767 list_for_each(p, &hci_dev_list) { 771 list_for_each(p, &hci_dev_list) {
768 struct hci_dev *hdev; 772 struct hci_dev *hdev;
769 hdev = list_entry(p, struct hci_dev, list); 773 hdev = list_entry(p, struct hci_dev, list);
770 (dr + n)->dev_id = hdev->id; 774 (dr + n)->dev_id = hdev->id;
771 (dr + n)->dev_opt = hdev->flags; 775 (dr + n)->dev_opt = hdev->flags;
772 if (++n >= dev_num) 776 if (++n >= dev_num)
773 break; 777 break;
774 } 778 }
775 read_unlock_bh(&hci_dev_list_lock); 779 read_unlock_bh(&hci_dev_list_lock);
776 780
777 dl->dev_num = n; 781 dl->dev_num = n;
778 size = sizeof(*dl) + n * sizeof(*dr); 782 size = sizeof(*dl) + n * sizeof(*dr);
779 783
780 err = copy_to_user(arg, dl, size); 784 err = copy_to_user(arg, dl, size);
781 kfree(dl); 785 kfree(dl);
782 786
783 return err ? -EFAULT : 0; 787 return err ? -EFAULT : 0;
784 } 788 }
785 789
786 int hci_get_dev_info(void __user *arg) 790 int hci_get_dev_info(void __user *arg)
787 { 791 {
788 struct hci_dev *hdev; 792 struct hci_dev *hdev;
789 struct hci_dev_info di; 793 struct hci_dev_info di;
790 int err = 0; 794 int err = 0;
791 795
792 if (copy_from_user(&di, arg, sizeof(di))) 796 if (copy_from_user(&di, arg, sizeof(di)))
793 return -EFAULT; 797 return -EFAULT;
794 798
795 if (!(hdev = hci_dev_get(di.dev_id))) 799 if (!(hdev = hci_dev_get(di.dev_id)))
796 return -ENODEV; 800 return -ENODEV;
797 801
798 strcpy(di.name, hdev->name); 802 strcpy(di.name, hdev->name);
799 di.bdaddr = hdev->bdaddr; 803 di.bdaddr = hdev->bdaddr;
800 di.type = hdev->bus; 804 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
801 di.flags = hdev->flags; 805 di.flags = hdev->flags;
802 di.pkt_type = hdev->pkt_type; 806 di.pkt_type = hdev->pkt_type;
803 di.acl_mtu = hdev->acl_mtu; 807 di.acl_mtu = hdev->acl_mtu;
804 di.acl_pkts = hdev->acl_pkts; 808 di.acl_pkts = hdev->acl_pkts;
805 di.sco_mtu = hdev->sco_mtu; 809 di.sco_mtu = hdev->sco_mtu;
806 di.sco_pkts = hdev->sco_pkts; 810 di.sco_pkts = hdev->sco_pkts;
807 di.link_policy = hdev->link_policy; 811 di.link_policy = hdev->link_policy;
808 di.link_mode = hdev->link_mode; 812 di.link_mode = hdev->link_mode;
809 813
810 memcpy(&di.stat, &hdev->stat, sizeof(di.stat)); 814 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
811 memcpy(&di.features, &hdev->features, sizeof(di.features)); 815 memcpy(&di.features, &hdev->features, sizeof(di.features));
812 816
813 if (copy_to_user(arg, &di, sizeof(di))) 817 if (copy_to_user(arg, &di, sizeof(di)))
814 err = -EFAULT; 818 err = -EFAULT;
815 819
816 hci_dev_put(hdev); 820 hci_dev_put(hdev);
817 821
818 return err; 822 return err;
819 } 823 }
820 824
821 /* ---- Interface to HCI drivers ---- */ 825 /* ---- Interface to HCI drivers ---- */
822 826
823 static int hci_rfkill_set_block(void *data, bool blocked) 827 static int hci_rfkill_set_block(void *data, bool blocked)
824 { 828 {
825 struct hci_dev *hdev = data; 829 struct hci_dev *hdev = data;
826 830
827 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked); 831 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
828 832
829 if (!blocked) 833 if (!blocked)
830 return 0; 834 return 0;
831 835
832 hci_dev_do_close(hdev); 836 hci_dev_do_close(hdev);
833 837
834 return 0; 838 return 0;
835 } 839 }
836 840
837 static const struct rfkill_ops hci_rfkill_ops = { 841 static const struct rfkill_ops hci_rfkill_ops = {
838 .set_block = hci_rfkill_set_block, 842 .set_block = hci_rfkill_set_block,
839 }; 843 };
840 844
841 /* Alloc HCI device */ 845 /* Alloc HCI device */
842 struct hci_dev *hci_alloc_dev(void) 846 struct hci_dev *hci_alloc_dev(void)
843 { 847 {
844 struct hci_dev *hdev; 848 struct hci_dev *hdev;
845 849
846 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL); 850 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
847 if (!hdev) 851 if (!hdev)
848 return NULL; 852 return NULL;
849 853
850 skb_queue_head_init(&hdev->driver_init); 854 skb_queue_head_init(&hdev->driver_init);
851 855
852 return hdev; 856 return hdev;
853 } 857 }
854 EXPORT_SYMBOL(hci_alloc_dev); 858 EXPORT_SYMBOL(hci_alloc_dev);
855 859
856 /* Free HCI device */ 860 /* Free HCI device */
857 void hci_free_dev(struct hci_dev *hdev) 861 void hci_free_dev(struct hci_dev *hdev)
858 { 862 {
859 skb_queue_purge(&hdev->driver_init); 863 skb_queue_purge(&hdev->driver_init);
860 864
861 /* will free via device release */ 865 /* will free via device release */
862 put_device(&hdev->dev); 866 put_device(&hdev->dev);
863 } 867 }
864 EXPORT_SYMBOL(hci_free_dev); 868 EXPORT_SYMBOL(hci_free_dev);
865 869
866 /* Register HCI device */ 870 /* Register HCI device */
867 int hci_register_dev(struct hci_dev *hdev) 871 int hci_register_dev(struct hci_dev *hdev)
868 { 872 {
869 struct list_head *head = &hci_dev_list, *p; 873 struct list_head *head = &hci_dev_list, *p;
870 int i, id = 0; 874 int i, id = 0;
871 875
872 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name, 876 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
873 hdev->bus, hdev->owner); 877 hdev->bus, hdev->owner);
874 878
875 if (!hdev->open || !hdev->close || !hdev->destruct) 879 if (!hdev->open || !hdev->close || !hdev->destruct)
876 return -EINVAL; 880 return -EINVAL;
877 881
878 write_lock_bh(&hci_dev_list_lock); 882 write_lock_bh(&hci_dev_list_lock);
879 883
880 /* Find first available device id */ 884 /* Find first available device id */
881 list_for_each(p, &hci_dev_list) { 885 list_for_each(p, &hci_dev_list) {
882 if (list_entry(p, struct hci_dev, list)->id != id) 886 if (list_entry(p, struct hci_dev, list)->id != id)
883 break; 887 break;
884 head = p; id++; 888 head = p; id++;
885 } 889 }
886 890
887 sprintf(hdev->name, "hci%d", id); 891 sprintf(hdev->name, "hci%d", id);
888 hdev->id = id; 892 hdev->id = id;
889 list_add(&hdev->list, head); 893 list_add(&hdev->list, head);
890 894
891 atomic_set(&hdev->refcnt, 1); 895 atomic_set(&hdev->refcnt, 1);
892 spin_lock_init(&hdev->lock); 896 spin_lock_init(&hdev->lock);
893 897
894 hdev->flags = 0; 898 hdev->flags = 0;
895 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); 899 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
896 hdev->esco_type = (ESCO_HV1); 900 hdev->esco_type = (ESCO_HV1);
897 hdev->link_mode = (HCI_LM_ACCEPT); 901 hdev->link_mode = (HCI_LM_ACCEPT);
898 902
899 hdev->idle_timeout = 0; 903 hdev->idle_timeout = 0;
900 hdev->sniff_max_interval = 800; 904 hdev->sniff_max_interval = 800;
901 hdev->sniff_min_interval = 80; 905 hdev->sniff_min_interval = 80;
902 906
903 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev); 907 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
904 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev); 908 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
905 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev); 909 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
906 910
907 skb_queue_head_init(&hdev->rx_q); 911 skb_queue_head_init(&hdev->rx_q);
908 skb_queue_head_init(&hdev->cmd_q); 912 skb_queue_head_init(&hdev->cmd_q);
909 skb_queue_head_init(&hdev->raw_q); 913 skb_queue_head_init(&hdev->raw_q);
910 914
911 for (i = 0; i < 3; i++) 915 for (i = 0; i < 3; i++)
912 hdev->reassembly[i] = NULL; 916 hdev->reassembly[i] = NULL;
913 917
914 init_waitqueue_head(&hdev->req_wait_q); 918 init_waitqueue_head(&hdev->req_wait_q);
915 mutex_init(&hdev->req_lock); 919 mutex_init(&hdev->req_lock);
916 920
917 inquiry_cache_init(hdev); 921 inquiry_cache_init(hdev);
918 922
919 hci_conn_hash_init(hdev); 923 hci_conn_hash_init(hdev);
920 924
921 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); 925 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
922 926
923 atomic_set(&hdev->promisc, 0); 927 atomic_set(&hdev->promisc, 0);
924 928
925 write_unlock_bh(&hci_dev_list_lock); 929 write_unlock_bh(&hci_dev_list_lock);
926 930
927 hci_register_sysfs(hdev); 931 hci_register_sysfs(hdev);
928 932
929 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, 933 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
930 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev); 934 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
931 if (hdev->rfkill) { 935 if (hdev->rfkill) {
932 if (rfkill_register(hdev->rfkill) < 0) { 936 if (rfkill_register(hdev->rfkill) < 0) {
933 rfkill_destroy(hdev->rfkill); 937 rfkill_destroy(hdev->rfkill);
934 hdev->rfkill = NULL; 938 hdev->rfkill = NULL;
935 } 939 }
936 } 940 }
937 941
938 hci_notify(hdev, HCI_DEV_REG); 942 hci_notify(hdev, HCI_DEV_REG);
939 943
940 return id; 944 return id;
941 } 945 }
942 EXPORT_SYMBOL(hci_register_dev); 946 EXPORT_SYMBOL(hci_register_dev);
943 947
944 /* Unregister HCI device */ 948 /* Unregister HCI device */
945 int hci_unregister_dev(struct hci_dev *hdev) 949 int hci_unregister_dev(struct hci_dev *hdev)
946 { 950 {
947 int i; 951 int i;
948 952
949 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 953 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
950 954
951 write_lock_bh(&hci_dev_list_lock); 955 write_lock_bh(&hci_dev_list_lock);
952 list_del(&hdev->list); 956 list_del(&hdev->list);
953 write_unlock_bh(&hci_dev_list_lock); 957 write_unlock_bh(&hci_dev_list_lock);
954 958
955 hci_dev_do_close(hdev); 959 hci_dev_do_close(hdev);
956 960
957 for (i = 0; i < 3; i++) 961 for (i = 0; i < 3; i++)
958 kfree_skb(hdev->reassembly[i]); 962 kfree_skb(hdev->reassembly[i]);
959 963
960 hci_notify(hdev, HCI_DEV_UNREG); 964 hci_notify(hdev, HCI_DEV_UNREG);
961 965
962 if (hdev->rfkill) { 966 if (hdev->rfkill) {
963 rfkill_unregister(hdev->rfkill); 967 rfkill_unregister(hdev->rfkill);
964 rfkill_destroy(hdev->rfkill); 968 rfkill_destroy(hdev->rfkill);
965 } 969 }
966 970
967 hci_unregister_sysfs(hdev); 971 hci_unregister_sysfs(hdev);
968 972
969 __hci_dev_put(hdev); 973 __hci_dev_put(hdev);
970 974
971 return 0; 975 return 0;
972 } 976 }
973 EXPORT_SYMBOL(hci_unregister_dev); 977 EXPORT_SYMBOL(hci_unregister_dev);
974 978
975 /* Suspend HCI device */ 979 /* Suspend HCI device */
976 int hci_suspend_dev(struct hci_dev *hdev) 980 int hci_suspend_dev(struct hci_dev *hdev)
977 { 981 {
978 hci_notify(hdev, HCI_DEV_SUSPEND); 982 hci_notify(hdev, HCI_DEV_SUSPEND);
979 return 0; 983 return 0;
980 } 984 }
981 EXPORT_SYMBOL(hci_suspend_dev); 985 EXPORT_SYMBOL(hci_suspend_dev);
982 986
983 /* Resume HCI device */ 987 /* Resume HCI device */
984 int hci_resume_dev(struct hci_dev *hdev) 988 int hci_resume_dev(struct hci_dev *hdev)
985 { 989 {
986 hci_notify(hdev, HCI_DEV_RESUME); 990 hci_notify(hdev, HCI_DEV_RESUME);
987 return 0; 991 return 0;
988 } 992 }
989 EXPORT_SYMBOL(hci_resume_dev); 993 EXPORT_SYMBOL(hci_resume_dev);
990 994
991 /* Receive frame from HCI drivers */ 995 /* Receive frame from HCI drivers */
992 int hci_recv_frame(struct sk_buff *skb) 996 int hci_recv_frame(struct sk_buff *skb)
993 { 997 {
994 struct hci_dev *hdev = (struct hci_dev *) skb->dev; 998 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
995 if (!hdev || (!test_bit(HCI_UP, &hdev->flags) 999 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
996 && !test_bit(HCI_INIT, &hdev->flags))) { 1000 && !test_bit(HCI_INIT, &hdev->flags))) {
997 kfree_skb(skb); 1001 kfree_skb(skb);
998 return -ENXIO; 1002 return -ENXIO;
999 } 1003 }
1000 1004
1001 /* Incomming skb */ 1005 /* Incomming skb */
1002 bt_cb(skb)->incoming = 1; 1006 bt_cb(skb)->incoming = 1;
1003 1007
1004 /* Time stamp */ 1008 /* Time stamp */
1005 __net_timestamp(skb); 1009 __net_timestamp(skb);
1006 1010
1007 /* Queue frame for rx task */ 1011 /* Queue frame for rx task */
1008 skb_queue_tail(&hdev->rx_q, skb); 1012 skb_queue_tail(&hdev->rx_q, skb);
1009 tasklet_schedule(&hdev->rx_task); 1013 tasklet_schedule(&hdev->rx_task);
1010 1014
1011 return 0; 1015 return 0;
1012 } 1016 }
1013 EXPORT_SYMBOL(hci_recv_frame); 1017 EXPORT_SYMBOL(hci_recv_frame);
1014 1018
1015 /* Receive packet type fragment */ 1019 /* Receive packet type fragment */
1016 #define __reassembly(hdev, type) ((hdev)->reassembly[(type) - 2]) 1020 #define __reassembly(hdev, type) ((hdev)->reassembly[(type) - 2])
1017 1021
1018 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count) 1022 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1019 { 1023 {
1020 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) 1024 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1021 return -EILSEQ; 1025 return -EILSEQ;
1022 1026
1023 while (count) { 1027 while (count) {
1024 struct sk_buff *skb = __reassembly(hdev, type); 1028 struct sk_buff *skb = __reassembly(hdev, type);
1025 struct { int expect; } *scb; 1029 struct { int expect; } *scb;
1026 int len = 0; 1030 int len = 0;
1027 1031
1028 if (!skb) { 1032 if (!skb) {
1029 /* Start of the frame */ 1033 /* Start of the frame */
1030 1034
1031 switch (type) { 1035 switch (type) {
1032 case HCI_EVENT_PKT: 1036 case HCI_EVENT_PKT:
1033 if (count >= HCI_EVENT_HDR_SIZE) { 1037 if (count >= HCI_EVENT_HDR_SIZE) {
1034 struct hci_event_hdr *h = data; 1038 struct hci_event_hdr *h = data;
1035 len = HCI_EVENT_HDR_SIZE + h->plen; 1039 len = HCI_EVENT_HDR_SIZE + h->plen;
1036 } else 1040 } else
1037 return -EILSEQ; 1041 return -EILSEQ;
1038 break; 1042 break;
1039 1043
1040 case HCI_ACLDATA_PKT: 1044 case HCI_ACLDATA_PKT:
1041 if (count >= HCI_ACL_HDR_SIZE) { 1045 if (count >= HCI_ACL_HDR_SIZE) {
1042 struct hci_acl_hdr *h = data; 1046 struct hci_acl_hdr *h = data;
1043 len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen); 1047 len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen);
1044 } else 1048 } else
1045 return -EILSEQ; 1049 return -EILSEQ;
1046 break; 1050 break;
1047 1051
1048 case HCI_SCODATA_PKT: 1052 case HCI_SCODATA_PKT:
1049 if (count >= HCI_SCO_HDR_SIZE) { 1053 if (count >= HCI_SCO_HDR_SIZE) {
1050 struct hci_sco_hdr *h = data; 1054 struct hci_sco_hdr *h = data;
1051 len = HCI_SCO_HDR_SIZE + h->dlen; 1055 len = HCI_SCO_HDR_SIZE + h->dlen;
1052 } else 1056 } else
1053 return -EILSEQ; 1057 return -EILSEQ;
1054 break; 1058 break;
1055 } 1059 }
1056 1060
1057 skb = bt_skb_alloc(len, GFP_ATOMIC); 1061 skb = bt_skb_alloc(len, GFP_ATOMIC);
1058 if (!skb) { 1062 if (!skb) {
1059 BT_ERR("%s no memory for packet", hdev->name); 1063 BT_ERR("%s no memory for packet", hdev->name);
1060 return -ENOMEM; 1064 return -ENOMEM;
1061 } 1065 }
1062 1066
1063 skb->dev = (void *) hdev; 1067 skb->dev = (void *) hdev;
1064 bt_cb(skb)->pkt_type = type; 1068 bt_cb(skb)->pkt_type = type;
1065 1069
1066 __reassembly(hdev, type) = skb; 1070 __reassembly(hdev, type) = skb;
1067 1071
1068 scb = (void *) skb->cb; 1072 scb = (void *) skb->cb;
1069 scb->expect = len; 1073 scb->expect = len;
1070 } else { 1074 } else {
1071 /* Continuation */ 1075 /* Continuation */
1072 1076
1073 scb = (void *) skb->cb; 1077 scb = (void *) skb->cb;
1074 len = scb->expect; 1078 len = scb->expect;
1075 } 1079 }
1076 1080
1077 len = min(len, count); 1081 len = min(len, count);
1078 1082
1079 memcpy(skb_put(skb, len), data, len); 1083 memcpy(skb_put(skb, len), data, len);
1080 1084
1081 scb->expect -= len; 1085 scb->expect -= len;
1082 1086
1083 if (scb->expect == 0) { 1087 if (scb->expect == 0) {
1084 /* Complete frame */ 1088 /* Complete frame */
1085 1089
1086 __reassembly(hdev, type) = NULL; 1090 __reassembly(hdev, type) = NULL;
1087 1091
1088 bt_cb(skb)->pkt_type = type; 1092 bt_cb(skb)->pkt_type = type;
1089 hci_recv_frame(skb); 1093 hci_recv_frame(skb);
1090 } 1094 }
1091 1095
1092 count -= len; data += len; 1096 count -= len; data += len;
1093 } 1097 }
1094 1098
1095 return 0; 1099 return 0;
1096 } 1100 }
1097 EXPORT_SYMBOL(hci_recv_fragment); 1101 EXPORT_SYMBOL(hci_recv_fragment);
1098 1102
1099 /* ---- Interface to upper protocols ---- */ 1103 /* ---- Interface to upper protocols ---- */
1100 1104
1101 /* Register/Unregister protocols. 1105 /* Register/Unregister protocols.
1102 * hci_task_lock is used to ensure that no tasks are running. */ 1106 * hci_task_lock is used to ensure that no tasks are running. */
1103 int hci_register_proto(struct hci_proto *hp) 1107 int hci_register_proto(struct hci_proto *hp)
1104 { 1108 {
1105 int err = 0; 1109 int err = 0;
1106 1110
1107 BT_DBG("%p name %s id %d", hp, hp->name, hp->id); 1111 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1108 1112
1109 if (hp->id >= HCI_MAX_PROTO) 1113 if (hp->id >= HCI_MAX_PROTO)
1110 return -EINVAL; 1114 return -EINVAL;
1111 1115
1112 write_lock_bh(&hci_task_lock); 1116 write_lock_bh(&hci_task_lock);
1113 1117
1114 if (!hci_proto[hp->id]) 1118 if (!hci_proto[hp->id])
1115 hci_proto[hp->id] = hp; 1119 hci_proto[hp->id] = hp;
1116 else 1120 else
1117 err = -EEXIST; 1121 err = -EEXIST;
1118 1122
1119 write_unlock_bh(&hci_task_lock); 1123 write_unlock_bh(&hci_task_lock);
1120 1124
1121 return err; 1125 return err;
1122 } 1126 }
1123 EXPORT_SYMBOL(hci_register_proto); 1127 EXPORT_SYMBOL(hci_register_proto);
1124 1128
1125 int hci_unregister_proto(struct hci_proto *hp) 1129 int hci_unregister_proto(struct hci_proto *hp)
1126 { 1130 {
1127 int err = 0; 1131 int err = 0;
1128 1132
1129 BT_DBG("%p name %s id %d", hp, hp->name, hp->id); 1133 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1130 1134
1131 if (hp->id >= HCI_MAX_PROTO) 1135 if (hp->id >= HCI_MAX_PROTO)
1132 return -EINVAL; 1136 return -EINVAL;
1133 1137
1134 write_lock_bh(&hci_task_lock); 1138 write_lock_bh(&hci_task_lock);
1135 1139
1136 if (hci_proto[hp->id]) 1140 if (hci_proto[hp->id])
1137 hci_proto[hp->id] = NULL; 1141 hci_proto[hp->id] = NULL;
1138 else 1142 else
1139 err = -ENOENT; 1143 err = -ENOENT;
1140 1144
1141 write_unlock_bh(&hci_task_lock); 1145 write_unlock_bh(&hci_task_lock);
1142 1146
1143 return err; 1147 return err;
1144 } 1148 }
1145 EXPORT_SYMBOL(hci_unregister_proto); 1149 EXPORT_SYMBOL(hci_unregister_proto);
1146 1150
1147 int hci_register_cb(struct hci_cb *cb) 1151 int hci_register_cb(struct hci_cb *cb)
1148 { 1152 {
1149 BT_DBG("%p name %s", cb, cb->name); 1153 BT_DBG("%p name %s", cb, cb->name);
1150 1154
1151 write_lock_bh(&hci_cb_list_lock); 1155 write_lock_bh(&hci_cb_list_lock);
1152 list_add(&cb->list, &hci_cb_list); 1156 list_add(&cb->list, &hci_cb_list);
1153 write_unlock_bh(&hci_cb_list_lock); 1157 write_unlock_bh(&hci_cb_list_lock);
1154 1158
1155 return 0; 1159 return 0;
1156 } 1160 }
1157 EXPORT_SYMBOL(hci_register_cb); 1161 EXPORT_SYMBOL(hci_register_cb);
1158 1162
1159 int hci_unregister_cb(struct hci_cb *cb) 1163 int hci_unregister_cb(struct hci_cb *cb)
1160 { 1164 {
1161 BT_DBG("%p name %s", cb, cb->name); 1165 BT_DBG("%p name %s", cb, cb->name);
1162 1166
1163 write_lock_bh(&hci_cb_list_lock); 1167 write_lock_bh(&hci_cb_list_lock);
1164 list_del(&cb->list); 1168 list_del(&cb->list);
1165 write_unlock_bh(&hci_cb_list_lock); 1169 write_unlock_bh(&hci_cb_list_lock);
1166 1170
1167 return 0; 1171 return 0;
1168 } 1172 }
1169 EXPORT_SYMBOL(hci_unregister_cb); 1173 EXPORT_SYMBOL(hci_unregister_cb);
1170 1174
1171 static int hci_send_frame(struct sk_buff *skb) 1175 static int hci_send_frame(struct sk_buff *skb)
1172 { 1176 {
1173 struct hci_dev *hdev = (struct hci_dev *) skb->dev; 1177 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1174 1178
1175 if (!hdev) { 1179 if (!hdev) {
1176 kfree_skb(skb); 1180 kfree_skb(skb);
1177 return -ENODEV; 1181 return -ENODEV;
1178 } 1182 }
1179 1183
1180 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len); 1184 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1181 1185
1182 if (atomic_read(&hdev->promisc)) { 1186 if (atomic_read(&hdev->promisc)) {
1183 /* Time stamp */ 1187 /* Time stamp */
1184 __net_timestamp(skb); 1188 __net_timestamp(skb);
1185 1189
1186 hci_send_to_sock(hdev, skb); 1190 hci_send_to_sock(hdev, skb);
1187 } 1191 }
1188 1192
1189 /* Get rid of skb owner, prior to sending to the driver. */ 1193 /* Get rid of skb owner, prior to sending to the driver. */
1190 skb_orphan(skb); 1194 skb_orphan(skb);
1191 1195
1192 return hdev->send(skb); 1196 return hdev->send(skb);
1193 } 1197 }
1194 1198
1195 /* Send HCI command */ 1199 /* Send HCI command */
1196 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param) 1200 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1197 { 1201 {
1198 int len = HCI_COMMAND_HDR_SIZE + plen; 1202 int len = HCI_COMMAND_HDR_SIZE + plen;
1199 struct hci_command_hdr *hdr; 1203 struct hci_command_hdr *hdr;
1200 struct sk_buff *skb; 1204 struct sk_buff *skb;
1201 1205
1202 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen); 1206 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1203 1207
1204 skb = bt_skb_alloc(len, GFP_ATOMIC); 1208 skb = bt_skb_alloc(len, GFP_ATOMIC);
1205 if (!skb) { 1209 if (!skb) {
1206 BT_ERR("%s no memory for command", hdev->name); 1210 BT_ERR("%s no memory for command", hdev->name);
1207 return -ENOMEM; 1211 return -ENOMEM;
1208 } 1212 }
1209 1213
1210 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE); 1214 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1211 hdr->opcode = cpu_to_le16(opcode); 1215 hdr->opcode = cpu_to_le16(opcode);
1212 hdr->plen = plen; 1216 hdr->plen = plen;
1213 1217
1214 if (plen) 1218 if (plen)
1215 memcpy(skb_put(skb, plen), param, plen); 1219 memcpy(skb_put(skb, plen), param, plen);
1216 1220
1217 BT_DBG("skb len %d", skb->len); 1221 BT_DBG("skb len %d", skb->len);
1218 1222
1219 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; 1223 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1220 skb->dev = (void *) hdev; 1224 skb->dev = (void *) hdev;
1221 1225
1222 skb_queue_tail(&hdev->cmd_q, skb); 1226 skb_queue_tail(&hdev->cmd_q, skb);
1223 tasklet_schedule(&hdev->cmd_task); 1227 tasklet_schedule(&hdev->cmd_task);
1224 1228
1225 return 0; 1229 return 0;
1226 } 1230 }
1227 1231
1228 /* Get data from the previously sent command */ 1232 /* Get data from the previously sent command */
1229 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode) 1233 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1230 { 1234 {
1231 struct hci_command_hdr *hdr; 1235 struct hci_command_hdr *hdr;
1232 1236
1233 if (!hdev->sent_cmd) 1237 if (!hdev->sent_cmd)
1234 return NULL; 1238 return NULL;
1235 1239
1236 hdr = (void *) hdev->sent_cmd->data; 1240 hdr = (void *) hdev->sent_cmd->data;
1237 1241
1238 if (hdr->opcode != cpu_to_le16(opcode)) 1242 if (hdr->opcode != cpu_to_le16(opcode))
1239 return NULL; 1243 return NULL;
1240 1244
1241 BT_DBG("%s opcode 0x%x", hdev->name, opcode); 1245 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1242 1246
1243 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE; 1247 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1244 } 1248 }
1245 1249
1246 /* Send ACL data */ 1250 /* Send ACL data */
1247 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags) 1251 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1248 { 1252 {
1249 struct hci_acl_hdr *hdr; 1253 struct hci_acl_hdr *hdr;
1250 int len = skb->len; 1254 int len = skb->len;
1251 1255
1252 skb_push(skb, HCI_ACL_HDR_SIZE); 1256 skb_push(skb, HCI_ACL_HDR_SIZE);
1253 skb_reset_transport_header(skb); 1257 skb_reset_transport_header(skb);
1254 hdr = (struct hci_acl_hdr *)skb_transport_header(skb); 1258 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1255 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags)); 1259 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1256 hdr->dlen = cpu_to_le16(len); 1260 hdr->dlen = cpu_to_le16(len);
1257 } 1261 }
1258 1262
1259 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags) 1263 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1260 { 1264 {
1261 struct hci_dev *hdev = conn->hdev; 1265 struct hci_dev *hdev = conn->hdev;
1262 struct sk_buff *list; 1266 struct sk_buff *list;
1263 1267
1264 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags); 1268 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1265 1269
1266 skb->dev = (void *) hdev; 1270 skb->dev = (void *) hdev;
1267 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; 1271 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1268 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START); 1272 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1269 1273
1270 if (!(list = skb_shinfo(skb)->frag_list)) { 1274 if (!(list = skb_shinfo(skb)->frag_list)) {
1271 /* Non fragmented */ 1275 /* Non fragmented */
1272 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); 1276 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1273 1277
1274 skb_queue_tail(&conn->data_q, skb); 1278 skb_queue_tail(&conn->data_q, skb);
1275 } else { 1279 } else {
1276 /* Fragmented */ 1280 /* Fragmented */
1277 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); 1281 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1278 1282
1279 skb_shinfo(skb)->frag_list = NULL; 1283 skb_shinfo(skb)->frag_list = NULL;
1280 1284
1281 /* Queue all fragments atomically */ 1285 /* Queue all fragments atomically */
1282 spin_lock_bh(&conn->data_q.lock); 1286 spin_lock_bh(&conn->data_q.lock);
1283 1287
1284 __skb_queue_tail(&conn->data_q, skb); 1288 __skb_queue_tail(&conn->data_q, skb);
1285 do { 1289 do {
1286 skb = list; list = list->next; 1290 skb = list; list = list->next;
1287 1291
1288 skb->dev = (void *) hdev; 1292 skb->dev = (void *) hdev;
1289 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; 1293 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1290 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT); 1294 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1291 1295
1292 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); 1296 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1293 1297
1294 __skb_queue_tail(&conn->data_q, skb); 1298 __skb_queue_tail(&conn->data_q, skb);
1295 } while (list); 1299 } while (list);
1296 1300
1297 spin_unlock_bh(&conn->data_q.lock); 1301 spin_unlock_bh(&conn->data_q.lock);
1298 } 1302 }
1299 1303
1300 tasklet_schedule(&hdev->tx_task); 1304 tasklet_schedule(&hdev->tx_task);
1301 1305
1302 return 0; 1306 return 0;
1303 } 1307 }
1304 EXPORT_SYMBOL(hci_send_acl); 1308 EXPORT_SYMBOL(hci_send_acl);
1305 1309
1306 /* Send SCO data */ 1310 /* Send SCO data */
1307 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) 1311 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1308 { 1312 {
1309 struct hci_dev *hdev = conn->hdev; 1313 struct hci_dev *hdev = conn->hdev;
1310 struct hci_sco_hdr hdr; 1314 struct hci_sco_hdr hdr;
1311 1315
1312 BT_DBG("%s len %d", hdev->name, skb->len); 1316 BT_DBG("%s len %d", hdev->name, skb->len);
1313 1317
1314 if (skb->len > hdev->sco_mtu) { 1318 if (skb->len > hdev->sco_mtu) {
1315 kfree_skb(skb); 1319 kfree_skb(skb);
1316 return -EINVAL; 1320 return -EINVAL;
1317 } 1321 }
1318 1322
1319 hdr.handle = cpu_to_le16(conn->handle); 1323 hdr.handle = cpu_to_le16(conn->handle);
1320 hdr.dlen = skb->len; 1324 hdr.dlen = skb->len;
1321 1325
1322 skb_push(skb, HCI_SCO_HDR_SIZE); 1326 skb_push(skb, HCI_SCO_HDR_SIZE);
1323 skb_reset_transport_header(skb); 1327 skb_reset_transport_header(skb);
1324 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE); 1328 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1325 1329
1326 skb->dev = (void *) hdev; 1330 skb->dev = (void *) hdev;
1327 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT; 1331 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1328 1332
1329 skb_queue_tail(&conn->data_q, skb); 1333 skb_queue_tail(&conn->data_q, skb);
1330 tasklet_schedule(&hdev->tx_task); 1334 tasklet_schedule(&hdev->tx_task);
1331 1335
1332 return 0; 1336 return 0;
1333 } 1337 }
1334 EXPORT_SYMBOL(hci_send_sco); 1338 EXPORT_SYMBOL(hci_send_sco);
1335 1339
1336 /* ---- HCI TX task (outgoing data) ---- */ 1340 /* ---- HCI TX task (outgoing data) ---- */
1337 1341
1338 /* HCI Connection scheduler */ 1342 /* HCI Connection scheduler */
1339 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote) 1343 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1340 { 1344 {
1341 struct hci_conn_hash *h = &hdev->conn_hash; 1345 struct hci_conn_hash *h = &hdev->conn_hash;
1342 struct hci_conn *conn = NULL; 1346 struct hci_conn *conn = NULL;
1343 int num = 0, min = ~0; 1347 int num = 0, min = ~0;
1344 struct list_head *p; 1348 struct list_head *p;
1345 1349
1346 /* We don't have to lock device here. Connections are always 1350 /* We don't have to lock device here. Connections are always
1347 * added and removed with TX task disabled. */ 1351 * added and removed with TX task disabled. */
1348 list_for_each(p, &h->list) { 1352 list_for_each(p, &h->list) {
1349 struct hci_conn *c; 1353 struct hci_conn *c;
1350 c = list_entry(p, struct hci_conn, list); 1354 c = list_entry(p, struct hci_conn, list);
1351 1355
1352 if (c->type != type || skb_queue_empty(&c->data_q)) 1356 if (c->type != type || skb_queue_empty(&c->data_q))
1353 continue; 1357 continue;
1354 1358
1355 if (c->state != BT_CONNECTED && c->state != BT_CONFIG) 1359 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1356 continue; 1360 continue;
1357 1361
1358 num++; 1362 num++;
1359 1363
1360 if (c->sent < min) { 1364 if (c->sent < min) {
1361 min = c->sent; 1365 min = c->sent;
1362 conn = c; 1366 conn = c;
1363 } 1367 }
1364 } 1368 }
1365 1369
1366 if (conn) { 1370 if (conn) {
1367 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt); 1371 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1368 int q = cnt / num; 1372 int q = cnt / num;
1369 *quote = q ? q : 1; 1373 *quote = q ? q : 1;
1370 } else 1374 } else
1371 *quote = 0; 1375 *quote = 0;
1372 1376
1373 BT_DBG("conn %p quote %d", conn, *quote); 1377 BT_DBG("conn %p quote %d", conn, *quote);
1374 return conn; 1378 return conn;
1375 } 1379 }
1376 1380
1377 static inline void hci_acl_tx_to(struct hci_dev *hdev) 1381 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1378 { 1382 {
1379 struct hci_conn_hash *h = &hdev->conn_hash; 1383 struct hci_conn_hash *h = &hdev->conn_hash;
1380 struct list_head *p; 1384 struct list_head *p;
1381 struct hci_conn *c; 1385 struct hci_conn *c;
1382 1386
1383 BT_ERR("%s ACL tx timeout", hdev->name); 1387 BT_ERR("%s ACL tx timeout", hdev->name);
1384 1388
1385 /* Kill stalled connections */ 1389 /* Kill stalled connections */
1386 list_for_each(p, &h->list) { 1390 list_for_each(p, &h->list) {
1387 c = list_entry(p, struct hci_conn, list); 1391 c = list_entry(p, struct hci_conn, list);
1388 if (c->type == ACL_LINK && c->sent) { 1392 if (c->type == ACL_LINK && c->sent) {
1389 BT_ERR("%s killing stalled ACL connection %s", 1393 BT_ERR("%s killing stalled ACL connection %s",
1390 hdev->name, batostr(&c->dst)); 1394 hdev->name, batostr(&c->dst));
1391 hci_acl_disconn(c, 0x13); 1395 hci_acl_disconn(c, 0x13);
1392 } 1396 }
1393 } 1397 }
1394 } 1398 }
1395 1399
1396 static inline void hci_sched_acl(struct hci_dev *hdev) 1400 static inline void hci_sched_acl(struct hci_dev *hdev)
1397 { 1401 {
1398 struct hci_conn *conn; 1402 struct hci_conn *conn;
1399 struct sk_buff *skb; 1403 struct sk_buff *skb;
1400 int quote; 1404 int quote;
1401 1405
1402 BT_DBG("%s", hdev->name); 1406 BT_DBG("%s", hdev->name);
1403 1407
1404 if (!test_bit(HCI_RAW, &hdev->flags)) { 1408 if (!test_bit(HCI_RAW, &hdev->flags)) {
1405 /* ACL tx timeout must be longer than maximum 1409 /* ACL tx timeout must be longer than maximum
1406 * link supervision timeout (40.9 seconds) */ 1410 * link supervision timeout (40.9 seconds) */
1407 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45)) 1411 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1408 hci_acl_tx_to(hdev); 1412 hci_acl_tx_to(hdev);
1409 } 1413 }
1410 1414
1411 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) { 1415 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1412 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 1416 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1413 BT_DBG("skb %p len %d", skb, skb->len); 1417 BT_DBG("skb %p len %d", skb, skb->len);
1414 1418
1415 hci_conn_enter_active_mode(conn); 1419 hci_conn_enter_active_mode(conn);
1416 1420
1417 hci_send_frame(skb); 1421 hci_send_frame(skb);
1418 hdev->acl_last_tx = jiffies; 1422 hdev->acl_last_tx = jiffies;
1419 1423
1420 hdev->acl_cnt--; 1424 hdev->acl_cnt--;
1421 conn->sent++; 1425 conn->sent++;
1422 } 1426 }
1423 } 1427 }
1424 } 1428 }
1425 1429
1426 /* Schedule SCO */ 1430 /* Schedule SCO */
1427 static inline void hci_sched_sco(struct hci_dev *hdev) 1431 static inline void hci_sched_sco(struct hci_dev *hdev)
1428 { 1432 {
1429 struct hci_conn *conn; 1433 struct hci_conn *conn;
1430 struct sk_buff *skb; 1434 struct sk_buff *skb;
1431 int quote; 1435 int quote;
1432 1436
1433 BT_DBG("%s", hdev->name); 1437 BT_DBG("%s", hdev->name);
1434 1438
1435 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) { 1439 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1436 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 1440 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1437 BT_DBG("skb %p len %d", skb, skb->len); 1441 BT_DBG("skb %p len %d", skb, skb->len);
1438 hci_send_frame(skb); 1442 hci_send_frame(skb);
1439 1443
1440 conn->sent++; 1444 conn->sent++;
1441 if (conn->sent == ~0) 1445 if (conn->sent == ~0)
1442 conn->sent = 0; 1446 conn->sent = 0;
1443 } 1447 }
1444 } 1448 }
1445 } 1449 }
1446 1450
1447 static inline void hci_sched_esco(struct hci_dev *hdev) 1451 static inline void hci_sched_esco(struct hci_dev *hdev)
1448 { 1452 {
1449 struct hci_conn *conn; 1453 struct hci_conn *conn;
1450 struct sk_buff *skb; 1454 struct sk_buff *skb;
1451 int quote; 1455 int quote;
1452 1456
1453 BT_DBG("%s", hdev->name); 1457 BT_DBG("%s", hdev->name);
1454 1458
1455 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) { 1459 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1456 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 1460 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1457 BT_DBG("skb %p len %d", skb, skb->len); 1461 BT_DBG("skb %p len %d", skb, skb->len);
1458 hci_send_frame(skb); 1462 hci_send_frame(skb);
1459 1463
1460 conn->sent++; 1464 conn->sent++;
1461 if (conn->sent == ~0) 1465 if (conn->sent == ~0)
1462 conn->sent = 0; 1466 conn->sent = 0;
1463 } 1467 }
1464 } 1468 }
1465 } 1469 }
1466 1470
1467 static void hci_tx_task(unsigned long arg) 1471 static void hci_tx_task(unsigned long arg)
1468 { 1472 {
1469 struct hci_dev *hdev = (struct hci_dev *) arg; 1473 struct hci_dev *hdev = (struct hci_dev *) arg;
1470 struct sk_buff *skb; 1474 struct sk_buff *skb;
1471 1475
1472 read_lock(&hci_task_lock); 1476 read_lock(&hci_task_lock);
1473 1477
1474 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt); 1478 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1475 1479
1476 /* Schedule queues and send stuff to HCI driver */ 1480 /* Schedule queues and send stuff to HCI driver */
1477 1481
1478 hci_sched_acl(hdev); 1482 hci_sched_acl(hdev);
1479 1483
1480 hci_sched_sco(hdev); 1484 hci_sched_sco(hdev);
1481 1485
1482 hci_sched_esco(hdev); 1486 hci_sched_esco(hdev);
1483 1487
1484 /* Send next queued raw (unknown type) packet */ 1488 /* Send next queued raw (unknown type) packet */
1485 while ((skb = skb_dequeue(&hdev->raw_q))) 1489 while ((skb = skb_dequeue(&hdev->raw_q)))
1486 hci_send_frame(skb); 1490 hci_send_frame(skb);
1487 1491
1488 read_unlock(&hci_task_lock); 1492 read_unlock(&hci_task_lock);
1489 } 1493 }
1490 1494
1491 /* ----- HCI RX task (incoming data proccessing) ----- */ 1495 /* ----- HCI RX task (incoming data proccessing) ----- */
1492 1496
1493 /* ACL data packet */ 1497 /* ACL data packet */
1494 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) 1498 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1495 { 1499 {
1496 struct hci_acl_hdr *hdr = (void *) skb->data; 1500 struct hci_acl_hdr *hdr = (void *) skb->data;
1497 struct hci_conn *conn; 1501 struct hci_conn *conn;
1498 __u16 handle, flags; 1502 __u16 handle, flags;
1499 1503
1500 skb_pull(skb, HCI_ACL_HDR_SIZE); 1504 skb_pull(skb, HCI_ACL_HDR_SIZE);
1501 1505
1502 handle = __le16_to_cpu(hdr->handle); 1506 handle = __le16_to_cpu(hdr->handle);
1503 flags = hci_flags(handle); 1507 flags = hci_flags(handle);
1504 handle = hci_handle(handle); 1508 handle = hci_handle(handle);
1505 1509
1506 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags); 1510 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1507 1511
1508 hdev->stat.acl_rx++; 1512 hdev->stat.acl_rx++;
1509 1513
1510 hci_dev_lock(hdev); 1514 hci_dev_lock(hdev);
1511 conn = hci_conn_hash_lookup_handle(hdev, handle); 1515 conn = hci_conn_hash_lookup_handle(hdev, handle);
1512 hci_dev_unlock(hdev); 1516 hci_dev_unlock(hdev);
1513 1517
1514 if (conn) { 1518 if (conn) {
1515 register struct hci_proto *hp; 1519 register struct hci_proto *hp;
1516 1520
1517 hci_conn_enter_active_mode(conn); 1521 hci_conn_enter_active_mode(conn);
1518 1522
1519 /* Send to upper protocol */ 1523 /* Send to upper protocol */
1520 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) { 1524 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1521 hp->recv_acldata(conn, skb, flags); 1525 hp->recv_acldata(conn, skb, flags);
1522 return; 1526 return;
1523 } 1527 }
1524 } else { 1528 } else {
1525 BT_ERR("%s ACL packet for unknown connection handle %d", 1529 BT_ERR("%s ACL packet for unknown connection handle %d",
1526 hdev->name, handle); 1530 hdev->name, handle);
1527 } 1531 }
1528 1532
1529 kfree_skb(skb); 1533 kfree_skb(skb);
1530 } 1534 }
1531 1535
1532 /* SCO data packet */ 1536 /* SCO data packet */
1533 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) 1537 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1534 { 1538 {
1535 struct hci_sco_hdr *hdr = (void *) skb->data; 1539 struct hci_sco_hdr *hdr = (void *) skb->data;
1536 struct hci_conn *conn; 1540 struct hci_conn *conn;
1537 __u16 handle; 1541 __u16 handle;
1538 1542
1539 skb_pull(skb, HCI_SCO_HDR_SIZE); 1543 skb_pull(skb, HCI_SCO_HDR_SIZE);
1540 1544
1541 handle = __le16_to_cpu(hdr->handle); 1545 handle = __le16_to_cpu(hdr->handle);
1542 1546
1543 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle); 1547 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1544 1548
1545 hdev->stat.sco_rx++; 1549 hdev->stat.sco_rx++;
1546 1550
1547 hci_dev_lock(hdev); 1551 hci_dev_lock(hdev);
1548 conn = hci_conn_hash_lookup_handle(hdev, handle); 1552 conn = hci_conn_hash_lookup_handle(hdev, handle);
1549 hci_dev_unlock(hdev); 1553 hci_dev_unlock(hdev);
1550 1554
1551 if (conn) { 1555 if (conn) {
1552 register struct hci_proto *hp; 1556 register struct hci_proto *hp;
1553 1557
1554 /* Send to upper protocol */ 1558 /* Send to upper protocol */
1555 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) { 1559 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1556 hp->recv_scodata(conn, skb); 1560 hp->recv_scodata(conn, skb);
1557 return; 1561 return;
1558 } 1562 }
1559 } else { 1563 } else {
1560 BT_ERR("%s SCO packet for unknown connection handle %d", 1564 BT_ERR("%s SCO packet for unknown connection handle %d",
1561 hdev->name, handle); 1565 hdev->name, handle);
1562 } 1566 }
1563 1567
1564 kfree_skb(skb); 1568 kfree_skb(skb);
1565 } 1569 }
1566 1570
1567 static void hci_rx_task(unsigned long arg) 1571 static void hci_rx_task(unsigned long arg)
1568 { 1572 {
1569 struct hci_dev *hdev = (struct hci_dev *) arg; 1573 struct hci_dev *hdev = (struct hci_dev *) arg;
1570 struct sk_buff *skb; 1574 struct sk_buff *skb;
1571 1575
1572 BT_DBG("%s", hdev->name); 1576 BT_DBG("%s", hdev->name);
1573 1577
1574 read_lock(&hci_task_lock); 1578 read_lock(&hci_task_lock);
1575 1579
1576 while ((skb = skb_dequeue(&hdev->rx_q))) { 1580 while ((skb = skb_dequeue(&hdev->rx_q))) {
1577 if (atomic_read(&hdev->promisc)) { 1581 if (atomic_read(&hdev->promisc)) {
1578 /* Send copy to the sockets */ 1582 /* Send copy to the sockets */
1579 hci_send_to_sock(hdev, skb); 1583 hci_send_to_sock(hdev, skb);
1580 } 1584 }
1581 1585
1582 if (test_bit(HCI_RAW, &hdev->flags)) { 1586 if (test_bit(HCI_RAW, &hdev->flags)) {
1583 kfree_skb(skb); 1587 kfree_skb(skb);
1584 continue; 1588 continue;
1585 } 1589 }
1586 1590
1587 if (test_bit(HCI_INIT, &hdev->flags)) { 1591 if (test_bit(HCI_INIT, &hdev->flags)) {
1588 /* Don't process data packets in this states. */ 1592 /* Don't process data packets in this states. */
1589 switch (bt_cb(skb)->pkt_type) { 1593 switch (bt_cb(skb)->pkt_type) {
1590 case HCI_ACLDATA_PKT: 1594 case HCI_ACLDATA_PKT:
1591 case HCI_SCODATA_PKT: 1595 case HCI_SCODATA_PKT:
1592 kfree_skb(skb); 1596 kfree_skb(skb);
1593 continue; 1597 continue;
1594 } 1598 }
1595 } 1599 }
1596 1600
1597 /* Process frame */ 1601 /* Process frame */
1598 switch (bt_cb(skb)->pkt_type) { 1602 switch (bt_cb(skb)->pkt_type) {
1599 case HCI_EVENT_PKT: 1603 case HCI_EVENT_PKT:
1600 hci_event_packet(hdev, skb); 1604 hci_event_packet(hdev, skb);
1601 break; 1605 break;
1602 1606
1603 case HCI_ACLDATA_PKT: 1607 case HCI_ACLDATA_PKT:
1604 BT_DBG("%s ACL data packet", hdev->name); 1608 BT_DBG("%s ACL data packet", hdev->name);
1605 hci_acldata_packet(hdev, skb); 1609 hci_acldata_packet(hdev, skb);
1606 break; 1610 break;
1607 1611
1608 case HCI_SCODATA_PKT: 1612 case HCI_SCODATA_PKT:
1609 BT_DBG("%s SCO data packet", hdev->name); 1613 BT_DBG("%s SCO data packet", hdev->name);
1610 hci_scodata_packet(hdev, skb); 1614 hci_scodata_packet(hdev, skb);
1611 break; 1615 break;
1612 1616
1613 default: 1617 default:
1614 kfree_skb(skb); 1618 kfree_skb(skb);
1615 break; 1619 break;
1616 } 1620 }
1617 } 1621 }
1618 1622
1619 read_unlock(&hci_task_lock); 1623 read_unlock(&hci_task_lock);
1620 } 1624 }
1621 1625
1622 static void hci_cmd_task(unsigned long arg) 1626 static void hci_cmd_task(unsigned long arg)
1623 { 1627 {
1624 struct hci_dev *hdev = (struct hci_dev *) arg; 1628 struct hci_dev *hdev = (struct hci_dev *) arg;
1625 struct sk_buff *skb; 1629 struct sk_buff *skb;
1626 1630
1627 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt)); 1631 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1628 1632
1629 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) { 1633 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1630 BT_ERR("%s command tx timeout", hdev->name); 1634 BT_ERR("%s command tx timeout", hdev->name);
1631 atomic_set(&hdev->cmd_cnt, 1); 1635 atomic_set(&hdev->cmd_cnt, 1);
1632 } 1636 }
1633 1637
1634 /* Send queued commands */ 1638 /* Send queued commands */
1635 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) { 1639 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1636 kfree_skb(hdev->sent_cmd); 1640 kfree_skb(hdev->sent_cmd);
1637 1641
1638 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) { 1642 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1639 atomic_dec(&hdev->cmd_cnt); 1643 atomic_dec(&hdev->cmd_cnt);
1640 hci_send_frame(skb); 1644 hci_send_frame(skb);
1641 hdev->cmd_last_tx = jiffies; 1645 hdev->cmd_last_tx = jiffies;
1642 } else { 1646 } else {
1643 skb_queue_head(&hdev->cmd_q, skb); 1647 skb_queue_head(&hdev->cmd_q, skb);
1644 tasklet_schedule(&hdev->cmd_task); 1648 tasklet_schedule(&hdev->cmd_task);
1645 } 1649 }
1646 } 1650 }
1647 } 1651 }
1648 1652
net/bluetooth/hci_sysfs.c
1 /* Bluetooth HCI driver model support. */ 1 /* Bluetooth HCI driver model support. */
2 2
3 #include <linux/kernel.h> 3 #include <linux/kernel.h>
4 #include <linux/init.h> 4 #include <linux/init.h>
5 #include <linux/debugfs.h> 5 #include <linux/debugfs.h>
6 6
7 #include <net/bluetooth/bluetooth.h> 7 #include <net/bluetooth/bluetooth.h>
8 #include <net/bluetooth/hci_core.h> 8 #include <net/bluetooth/hci_core.h>
9 9
10 struct class *bt_class = NULL; 10 struct class *bt_class = NULL;
11 EXPORT_SYMBOL_GPL(bt_class); 11 EXPORT_SYMBOL_GPL(bt_class);
12 12
13 struct dentry *bt_debugfs = NULL; 13 struct dentry *bt_debugfs = NULL;
14 EXPORT_SYMBOL_GPL(bt_debugfs); 14 EXPORT_SYMBOL_GPL(bt_debugfs);
15 15
16 static struct workqueue_struct *bt_workq; 16 static struct workqueue_struct *bt_workq;
17 17
18 static inline char *link_typetostr(int type) 18 static inline char *link_typetostr(int type)
19 { 19 {
20 switch (type) { 20 switch (type) {
21 case ACL_LINK: 21 case ACL_LINK:
22 return "ACL"; 22 return "ACL";
23 case SCO_LINK: 23 case SCO_LINK:
24 return "SCO"; 24 return "SCO";
25 case ESCO_LINK: 25 case ESCO_LINK:
26 return "eSCO"; 26 return "eSCO";
27 default: 27 default:
28 return "UNKNOWN"; 28 return "UNKNOWN";
29 } 29 }
30 } 30 }
31 31
32 static ssize_t show_link_type(struct device *dev, struct device_attribute *attr, char *buf) 32 static ssize_t show_link_type(struct device *dev, struct device_attribute *attr, char *buf)
33 { 33 {
34 struct hci_conn *conn = dev_get_drvdata(dev); 34 struct hci_conn *conn = dev_get_drvdata(dev);
35 return sprintf(buf, "%s\n", link_typetostr(conn->type)); 35 return sprintf(buf, "%s\n", link_typetostr(conn->type));
36 } 36 }
37 37
38 static ssize_t show_link_address(struct device *dev, struct device_attribute *attr, char *buf) 38 static ssize_t show_link_address(struct device *dev, struct device_attribute *attr, char *buf)
39 { 39 {
40 struct hci_conn *conn = dev_get_drvdata(dev); 40 struct hci_conn *conn = dev_get_drvdata(dev);
41 bdaddr_t bdaddr; 41 bdaddr_t bdaddr;
42 baswap(&bdaddr, &conn->dst); 42 baswap(&bdaddr, &conn->dst);
43 return sprintf(buf, "%s\n", batostr(&bdaddr)); 43 return sprintf(buf, "%s\n", batostr(&bdaddr));
44 } 44 }
45 45
46 static ssize_t show_link_features(struct device *dev, struct device_attribute *attr, char *buf) 46 static ssize_t show_link_features(struct device *dev, struct device_attribute *attr, char *buf)
47 { 47 {
48 struct hci_conn *conn = dev_get_drvdata(dev); 48 struct hci_conn *conn = dev_get_drvdata(dev);
49 49
50 return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 50 return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
51 conn->features[0], conn->features[1], 51 conn->features[0], conn->features[1],
52 conn->features[2], conn->features[3], 52 conn->features[2], conn->features[3],
53 conn->features[4], conn->features[5], 53 conn->features[4], conn->features[5],
54 conn->features[6], conn->features[7]); 54 conn->features[6], conn->features[7]);
55 } 55 }
56 56
57 #define LINK_ATTR(_name,_mode,_show,_store) \ 57 #define LINK_ATTR(_name,_mode,_show,_store) \
58 struct device_attribute link_attr_##_name = __ATTR(_name,_mode,_show,_store) 58 struct device_attribute link_attr_##_name = __ATTR(_name,_mode,_show,_store)
59 59
60 static LINK_ATTR(type, S_IRUGO, show_link_type, NULL); 60 static LINK_ATTR(type, S_IRUGO, show_link_type, NULL);
61 static LINK_ATTR(address, S_IRUGO, show_link_address, NULL); 61 static LINK_ATTR(address, S_IRUGO, show_link_address, NULL);
62 static LINK_ATTR(features, S_IRUGO, show_link_features, NULL); 62 static LINK_ATTR(features, S_IRUGO, show_link_features, NULL);
63 63
64 static struct attribute *bt_link_attrs[] = { 64 static struct attribute *bt_link_attrs[] = {
65 &link_attr_type.attr, 65 &link_attr_type.attr,
66 &link_attr_address.attr, 66 &link_attr_address.attr,
67 &link_attr_features.attr, 67 &link_attr_features.attr,
68 NULL 68 NULL
69 }; 69 };
70 70
71 static struct attribute_group bt_link_group = { 71 static struct attribute_group bt_link_group = {
72 .attrs = bt_link_attrs, 72 .attrs = bt_link_attrs,
73 }; 73 };
74 74
75 static const struct attribute_group *bt_link_groups[] = { 75 static const struct attribute_group *bt_link_groups[] = {
76 &bt_link_group, 76 &bt_link_group,
77 NULL 77 NULL
78 }; 78 };
79 79
80 static void bt_link_release(struct device *dev) 80 static void bt_link_release(struct device *dev)
81 { 81 {
82 void *data = dev_get_drvdata(dev); 82 void *data = dev_get_drvdata(dev);
83 kfree(data); 83 kfree(data);
84 } 84 }
85 85
86 static struct device_type bt_link = { 86 static struct device_type bt_link = {
87 .name = "link", 87 .name = "link",
88 .groups = bt_link_groups, 88 .groups = bt_link_groups,
89 .release = bt_link_release, 89 .release = bt_link_release,
90 }; 90 };
91 91
92 static void add_conn(struct work_struct *work) 92 static void add_conn(struct work_struct *work)
93 { 93 {
94 struct hci_conn *conn = container_of(work, struct hci_conn, work_add); 94 struct hci_conn *conn = container_of(work, struct hci_conn, work_add);
95 struct hci_dev *hdev = conn->hdev; 95 struct hci_dev *hdev = conn->hdev;
96 96
97 dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); 97 dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
98 98
99 dev_set_drvdata(&conn->dev, conn); 99 dev_set_drvdata(&conn->dev, conn);
100 100
101 if (device_add(&conn->dev) < 0) { 101 if (device_add(&conn->dev) < 0) {
102 BT_ERR("Failed to register connection device"); 102 BT_ERR("Failed to register connection device");
103 return; 103 return;
104 } 104 }
105 105
106 hci_dev_hold(hdev); 106 hci_dev_hold(hdev);
107 } 107 }
108 108
109 /* 109 /*
110 * The rfcomm tty device will possibly retain even when conn 110 * The rfcomm tty device will possibly retain even when conn
111 * is down, and sysfs doesn't support move zombie device, 111 * is down, and sysfs doesn't support move zombie device,
112 * so we should move the device before conn device is destroyed. 112 * so we should move the device before conn device is destroyed.
113 */ 113 */
114 static int __match_tty(struct device *dev, void *data) 114 static int __match_tty(struct device *dev, void *data)
115 { 115 {
116 return !strncmp(dev_name(dev), "rfcomm", 6); 116 return !strncmp(dev_name(dev), "rfcomm", 6);
117 } 117 }
118 118
119 static void del_conn(struct work_struct *work) 119 static void del_conn(struct work_struct *work)
120 { 120 {
121 struct hci_conn *conn = container_of(work, struct hci_conn, work_del); 121 struct hci_conn *conn = container_of(work, struct hci_conn, work_del);
122 struct hci_dev *hdev = conn->hdev; 122 struct hci_dev *hdev = conn->hdev;
123 123
124 if (!device_is_registered(&conn->dev)) 124 if (!device_is_registered(&conn->dev))
125 return; 125 return;
126 126
127 while (1) { 127 while (1) {
128 struct device *dev; 128 struct device *dev;
129 129
130 dev = device_find_child(&conn->dev, NULL, __match_tty); 130 dev = device_find_child(&conn->dev, NULL, __match_tty);
131 if (!dev) 131 if (!dev)
132 break; 132 break;
133 device_move(dev, NULL, DPM_ORDER_DEV_LAST); 133 device_move(dev, NULL, DPM_ORDER_DEV_LAST);
134 put_device(dev); 134 put_device(dev);
135 } 135 }
136 136
137 device_del(&conn->dev); 137 device_del(&conn->dev);
138 put_device(&conn->dev); 138 put_device(&conn->dev);
139 139
140 hci_dev_put(hdev); 140 hci_dev_put(hdev);
141 } 141 }
142 142
143 void hci_conn_init_sysfs(struct hci_conn *conn) 143 void hci_conn_init_sysfs(struct hci_conn *conn)
144 { 144 {
145 struct hci_dev *hdev = conn->hdev; 145 struct hci_dev *hdev = conn->hdev;
146 146
147 BT_DBG("conn %p", conn); 147 BT_DBG("conn %p", conn);
148 148
149 conn->dev.type = &bt_link; 149 conn->dev.type = &bt_link;
150 conn->dev.class = bt_class; 150 conn->dev.class = bt_class;
151 conn->dev.parent = &hdev->dev; 151 conn->dev.parent = &hdev->dev;
152 152
153 device_initialize(&conn->dev); 153 device_initialize(&conn->dev);
154 154
155 INIT_WORK(&conn->work_add, add_conn); 155 INIT_WORK(&conn->work_add, add_conn);
156 INIT_WORK(&conn->work_del, del_conn); 156 INIT_WORK(&conn->work_del, del_conn);
157 } 157 }
158 158
159 void hci_conn_add_sysfs(struct hci_conn *conn) 159 void hci_conn_add_sysfs(struct hci_conn *conn)
160 { 160 {
161 BT_DBG("conn %p", conn); 161 BT_DBG("conn %p", conn);
162 162
163 queue_work(bt_workq, &conn->work_add); 163 queue_work(bt_workq, &conn->work_add);
164 } 164 }
165 165
166 void hci_conn_del_sysfs(struct hci_conn *conn) 166 void hci_conn_del_sysfs(struct hci_conn *conn)
167 { 167 {
168 BT_DBG("conn %p", conn); 168 BT_DBG("conn %p", conn);
169 169
170 queue_work(bt_workq, &conn->work_del); 170 queue_work(bt_workq, &conn->work_del);
171 } 171 }
172 172
173 static inline char *host_bustostr(int bus) 173 static inline char *host_bustostr(int bus)
174 { 174 {
175 switch (bus) { 175 switch (bus) {
176 case HCI_VIRTUAL: 176 case HCI_VIRTUAL:
177 return "VIRTUAL"; 177 return "VIRTUAL";
178 case HCI_USB: 178 case HCI_USB:
179 return "USB"; 179 return "USB";
180 case HCI_PCCARD: 180 case HCI_PCCARD:
181 return "PCCARD"; 181 return "PCCARD";
182 case HCI_UART: 182 case HCI_UART:
183 return "UART"; 183 return "UART";
184 case HCI_RS232: 184 case HCI_RS232:
185 return "RS232"; 185 return "RS232";
186 case HCI_PCI: 186 case HCI_PCI:
187 return "PCI"; 187 return "PCI";
188 case HCI_SDIO: 188 case HCI_SDIO:
189 return "SDIO"; 189 return "SDIO";
190 default: 190 default:
191 return "UNKNOWN"; 191 return "UNKNOWN";
192 } 192 }
193 } 193 }
194 194
195 static inline char *host_typetostr(int type)
196 {
197 switch (type) {
198 case HCI_BREDR:
199 return "BR/EDR";
200 case HCI_80211:
201 return "802.11";
202 default:
203 return "UNKNOWN";
204 }
205 }
206
195 static ssize_t show_bus(struct device *dev, struct device_attribute *attr, char *buf) 207 static ssize_t show_bus(struct device *dev, struct device_attribute *attr, char *buf)
196 { 208 {
197 struct hci_dev *hdev = dev_get_drvdata(dev); 209 struct hci_dev *hdev = dev_get_drvdata(dev);
198 return sprintf(buf, "%s\n", host_bustostr(hdev->bus)); 210 return sprintf(buf, "%s\n", host_bustostr(hdev->bus));
199 } 211 }
200 212
213 static ssize_t show_type(struct device *dev, struct device_attribute *attr, char *buf)
214 {
215 struct hci_dev *hdev = dev_get_drvdata(dev);
216 return sprintf(buf, "%s\n", host_typetostr(hdev->dev_type));
217 }
218
201 static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf) 219 static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf)
202 { 220 {
203 struct hci_dev *hdev = dev_get_drvdata(dev); 221 struct hci_dev *hdev = dev_get_drvdata(dev);
204 char name[249]; 222 char name[249];
205 int i; 223 int i;
206 224
207 for (i = 0; i < 248; i++) 225 for (i = 0; i < 248; i++)
208 name[i] = hdev->dev_name[i]; 226 name[i] = hdev->dev_name[i];
209 227
210 name[248] = '\0'; 228 name[248] = '\0';
211 return sprintf(buf, "%s\n", name); 229 return sprintf(buf, "%s\n", name);
212 } 230 }
213 231
214 static ssize_t show_class(struct device *dev, struct device_attribute *attr, char *buf) 232 static ssize_t show_class(struct device *dev, struct device_attribute *attr, char *buf)
215 { 233 {
216 struct hci_dev *hdev = dev_get_drvdata(dev); 234 struct hci_dev *hdev = dev_get_drvdata(dev);
217 return sprintf(buf, "0x%.2x%.2x%.2x\n", 235 return sprintf(buf, "0x%.2x%.2x%.2x\n",
218 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); 236 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
219 } 237 }
220 238
221 static ssize_t show_address(struct device *dev, struct device_attribute *attr, char *buf) 239 static ssize_t show_address(struct device *dev, struct device_attribute *attr, char *buf)
222 { 240 {
223 struct hci_dev *hdev = dev_get_drvdata(dev); 241 struct hci_dev *hdev = dev_get_drvdata(dev);
224 bdaddr_t bdaddr; 242 bdaddr_t bdaddr;
225 baswap(&bdaddr, &hdev->bdaddr); 243 baswap(&bdaddr, &hdev->bdaddr);
226 return sprintf(buf, "%s\n", batostr(&bdaddr)); 244 return sprintf(buf, "%s\n", batostr(&bdaddr));
227 } 245 }
228 246
229 static ssize_t show_features(struct device *dev, struct device_attribute *attr, char *buf) 247 static ssize_t show_features(struct device *dev, struct device_attribute *attr, char *buf)
230 { 248 {
231 struct hci_dev *hdev = dev_get_drvdata(dev); 249 struct hci_dev *hdev = dev_get_drvdata(dev);
232 250
233 return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 251 return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
234 hdev->features[0], hdev->features[1], 252 hdev->features[0], hdev->features[1],
235 hdev->features[2], hdev->features[3], 253 hdev->features[2], hdev->features[3],
236 hdev->features[4], hdev->features[5], 254 hdev->features[4], hdev->features[5],
237 hdev->features[6], hdev->features[7]); 255 hdev->features[6], hdev->features[7]);
238 } 256 }
239 257
240 static ssize_t show_manufacturer(struct device *dev, struct device_attribute *attr, char *buf) 258 static ssize_t show_manufacturer(struct device *dev, struct device_attribute *attr, char *buf)
241 { 259 {
242 struct hci_dev *hdev = dev_get_drvdata(dev); 260 struct hci_dev *hdev = dev_get_drvdata(dev);
243 return sprintf(buf, "%d\n", hdev->manufacturer); 261 return sprintf(buf, "%d\n", hdev->manufacturer);
244 } 262 }
245 263
246 static ssize_t show_hci_version(struct device *dev, struct device_attribute *attr, char *buf) 264 static ssize_t show_hci_version(struct device *dev, struct device_attribute *attr, char *buf)
247 { 265 {
248 struct hci_dev *hdev = dev_get_drvdata(dev); 266 struct hci_dev *hdev = dev_get_drvdata(dev);
249 return sprintf(buf, "%d\n", hdev->hci_ver); 267 return sprintf(buf, "%d\n", hdev->hci_ver);
250 } 268 }
251 269
252 static ssize_t show_hci_revision(struct device *dev, struct device_attribute *attr, char *buf) 270 static ssize_t show_hci_revision(struct device *dev, struct device_attribute *attr, char *buf)
253 { 271 {
254 struct hci_dev *hdev = dev_get_drvdata(dev); 272 struct hci_dev *hdev = dev_get_drvdata(dev);
255 return sprintf(buf, "%d\n", hdev->hci_rev); 273 return sprintf(buf, "%d\n", hdev->hci_rev);
256 } 274 }
257 275
258 static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *attr, char *buf) 276 static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *attr, char *buf)
259 { 277 {
260 struct hci_dev *hdev = dev_get_drvdata(dev); 278 struct hci_dev *hdev = dev_get_drvdata(dev);
261 return sprintf(buf, "%d\n", hdev->idle_timeout); 279 return sprintf(buf, "%d\n", hdev->idle_timeout);
262 } 280 }
263 281
264 static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 282 static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
265 { 283 {
266 struct hci_dev *hdev = dev_get_drvdata(dev); 284 struct hci_dev *hdev = dev_get_drvdata(dev);
267 char *ptr; 285 char *ptr;
268 __u32 val; 286 __u32 val;
269 287
270 val = simple_strtoul(buf, &ptr, 10); 288 val = simple_strtoul(buf, &ptr, 10);
271 if (ptr == buf) 289 if (ptr == buf)
272 return -EINVAL; 290 return -EINVAL;
273 291
274 if (val != 0 && (val < 500 || val > 3600000)) 292 if (val != 0 && (val < 500 || val > 3600000))
275 return -EINVAL; 293 return -EINVAL;
276 294
277 hdev->idle_timeout = val; 295 hdev->idle_timeout = val;
278 296
279 return count; 297 return count;
280 } 298 }
281 299
282 static ssize_t show_sniff_max_interval(struct device *dev, struct device_attribute *attr, char *buf) 300 static ssize_t show_sniff_max_interval(struct device *dev, struct device_attribute *attr, char *buf)
283 { 301 {
284 struct hci_dev *hdev = dev_get_drvdata(dev); 302 struct hci_dev *hdev = dev_get_drvdata(dev);
285 return sprintf(buf, "%d\n", hdev->sniff_max_interval); 303 return sprintf(buf, "%d\n", hdev->sniff_max_interval);
286 } 304 }
287 305
288 static ssize_t store_sniff_max_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 306 static ssize_t store_sniff_max_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
289 { 307 {
290 struct hci_dev *hdev = dev_get_drvdata(dev); 308 struct hci_dev *hdev = dev_get_drvdata(dev);
291 char *ptr; 309 char *ptr;
292 __u16 val; 310 __u16 val;
293 311
294 val = simple_strtoul(buf, &ptr, 10); 312 val = simple_strtoul(buf, &ptr, 10);
295 if (ptr == buf) 313 if (ptr == buf)
296 return -EINVAL; 314 return -EINVAL;
297 315
298 if (val < 0x0002 || val > 0xFFFE || val % 2) 316 if (val < 0x0002 || val > 0xFFFE || val % 2)
299 return -EINVAL; 317 return -EINVAL;
300 318
301 if (val < hdev->sniff_min_interval) 319 if (val < hdev->sniff_min_interval)
302 return -EINVAL; 320 return -EINVAL;
303 321
304 hdev->sniff_max_interval = val; 322 hdev->sniff_max_interval = val;
305 323
306 return count; 324 return count;
307 } 325 }
308 326
309 static ssize_t show_sniff_min_interval(struct device *dev, struct device_attribute *attr, char *buf) 327 static ssize_t show_sniff_min_interval(struct device *dev, struct device_attribute *attr, char *buf)
310 { 328 {
311 struct hci_dev *hdev = dev_get_drvdata(dev); 329 struct hci_dev *hdev = dev_get_drvdata(dev);
312 return sprintf(buf, "%d\n", hdev->sniff_min_interval); 330 return sprintf(buf, "%d\n", hdev->sniff_min_interval);
313 } 331 }
314 332
315 static ssize_t store_sniff_min_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 333 static ssize_t store_sniff_min_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
316 { 334 {
317 struct hci_dev *hdev = dev_get_drvdata(dev); 335 struct hci_dev *hdev = dev_get_drvdata(dev);
318 char *ptr; 336 char *ptr;
319 __u16 val; 337 __u16 val;
320 338
321 val = simple_strtoul(buf, &ptr, 10); 339 val = simple_strtoul(buf, &ptr, 10);
322 if (ptr == buf) 340 if (ptr == buf)
323 return -EINVAL; 341 return -EINVAL;
324 342
325 if (val < 0x0002 || val > 0xFFFE || val % 2) 343 if (val < 0x0002 || val > 0xFFFE || val % 2)
326 return -EINVAL; 344 return -EINVAL;
327 345
328 if (val > hdev->sniff_max_interval) 346 if (val > hdev->sniff_max_interval)
329 return -EINVAL; 347 return -EINVAL;
330 348
331 hdev->sniff_min_interval = val; 349 hdev->sniff_min_interval = val;
332 350
333 return count; 351 return count;
334 } 352 }
335 353
336 static DEVICE_ATTR(bus, S_IRUGO, show_bus, NULL); 354 static DEVICE_ATTR(bus, S_IRUGO, show_bus, NULL);
355 static DEVICE_ATTR(type, S_IRUGO, show_type, NULL);
337 static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); 356 static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
338 static DEVICE_ATTR(class, S_IRUGO, show_class, NULL); 357 static DEVICE_ATTR(class, S_IRUGO, show_class, NULL);
339 static DEVICE_ATTR(address, S_IRUGO, show_address, NULL); 358 static DEVICE_ATTR(address, S_IRUGO, show_address, NULL);
340 static DEVICE_ATTR(features, S_IRUGO, show_features, NULL); 359 static DEVICE_ATTR(features, S_IRUGO, show_features, NULL);
341 static DEVICE_ATTR(manufacturer, S_IRUGO, show_manufacturer, NULL); 360 static DEVICE_ATTR(manufacturer, S_IRUGO, show_manufacturer, NULL);
342 static DEVICE_ATTR(hci_version, S_IRUGO, show_hci_version, NULL); 361 static DEVICE_ATTR(hci_version, S_IRUGO, show_hci_version, NULL);
343 static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL); 362 static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL);
344 363
345 static DEVICE_ATTR(idle_timeout, S_IRUGO | S_IWUSR, 364 static DEVICE_ATTR(idle_timeout, S_IRUGO | S_IWUSR,
346 show_idle_timeout, store_idle_timeout); 365 show_idle_timeout, store_idle_timeout);
347 static DEVICE_ATTR(sniff_max_interval, S_IRUGO | S_IWUSR, 366 static DEVICE_ATTR(sniff_max_interval, S_IRUGO | S_IWUSR,
348 show_sniff_max_interval, store_sniff_max_interval); 367 show_sniff_max_interval, store_sniff_max_interval);
349 static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR, 368 static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR,
350 show_sniff_min_interval, store_sniff_min_interval); 369 show_sniff_min_interval, store_sniff_min_interval);
351 370
352 static struct attribute *bt_host_attrs[] = { 371 static struct attribute *bt_host_attrs[] = {
353 &dev_attr_bus.attr, 372 &dev_attr_bus.attr,
373 &dev_attr_type.attr,
354 &dev_attr_name.attr, 374 &dev_attr_name.attr,
355 &dev_attr_class.attr, 375 &dev_attr_class.attr,
356 &dev_attr_address.attr, 376 &dev_attr_address.attr,
357 &dev_attr_features.attr, 377 &dev_attr_features.attr,
358 &dev_attr_manufacturer.attr, 378 &dev_attr_manufacturer.attr,
359 &dev_attr_hci_version.attr, 379 &dev_attr_hci_version.attr,
360 &dev_attr_hci_revision.attr, 380 &dev_attr_hci_revision.attr,
361 &dev_attr_idle_timeout.attr, 381 &dev_attr_idle_timeout.attr,
362 &dev_attr_sniff_max_interval.attr, 382 &dev_attr_sniff_max_interval.attr,
363 &dev_attr_sniff_min_interval.attr, 383 &dev_attr_sniff_min_interval.attr,
364 NULL 384 NULL
365 }; 385 };
366 386
367 static struct attribute_group bt_host_group = { 387 static struct attribute_group bt_host_group = {
368 .attrs = bt_host_attrs, 388 .attrs = bt_host_attrs,
369 }; 389 };
370 390
371 static const struct attribute_group *bt_host_groups[] = { 391 static const struct attribute_group *bt_host_groups[] = {
372 &bt_host_group, 392 &bt_host_group,
373 NULL 393 NULL
374 }; 394 };
375 395
376 static void bt_host_release(struct device *dev) 396 static void bt_host_release(struct device *dev)
377 { 397 {
378 void *data = dev_get_drvdata(dev); 398 void *data = dev_get_drvdata(dev);
379 kfree(data); 399 kfree(data);
380 } 400 }
381 401
382 static struct device_type bt_host = { 402 static struct device_type bt_host = {
383 .name = "host", 403 .name = "host",
384 .groups = bt_host_groups, 404 .groups = bt_host_groups,
385 .release = bt_host_release, 405 .release = bt_host_release,
386 }; 406 };
387 407
388 static int inquiry_cache_open(struct inode *inode, struct file *file) 408 static int inquiry_cache_open(struct inode *inode, struct file *file)
389 { 409 {
390 file->private_data = inode->i_private; 410 file->private_data = inode->i_private;
391 return 0; 411 return 0;
392 } 412 }
393 413
394 static ssize_t inquiry_cache_read(struct file *file, char __user *userbuf, 414 static ssize_t inquiry_cache_read(struct file *file, char __user *userbuf,
395 size_t count, loff_t *ppos) 415 size_t count, loff_t *ppos)
396 { 416 {
397 struct hci_dev *hdev = file->private_data; 417 struct hci_dev *hdev = file->private_data;
398 struct inquiry_cache *cache = &hdev->inq_cache; 418 struct inquiry_cache *cache = &hdev->inq_cache;
399 struct inquiry_entry *e; 419 struct inquiry_entry *e;
400 char buf[4096]; 420 char buf[4096];
401 int n = 0; 421 int n = 0;
402 422
403 hci_dev_lock_bh(hdev); 423 hci_dev_lock_bh(hdev);
404 424
405 for (e = cache->list; e; e = e->next) { 425 for (e = cache->list; e; e = e->next) {
406 struct inquiry_data *data = &e->data; 426 struct inquiry_data *data = &e->data;
407 bdaddr_t bdaddr; 427 bdaddr_t bdaddr;
408 baswap(&bdaddr, &data->bdaddr); 428 baswap(&bdaddr, &data->bdaddr);
409 n += sprintf(buf + n, "%s %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n", 429 n += sprintf(buf + n, "%s %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
410 batostr(&bdaddr), 430 batostr(&bdaddr),
411 data->pscan_rep_mode, data->pscan_period_mode, 431 data->pscan_rep_mode, data->pscan_period_mode,
412 data->pscan_mode, data->dev_class[2], 432 data->pscan_mode, data->dev_class[2],
413 data->dev_class[1], data->dev_class[0], 433 data->dev_class[1], data->dev_class[0],
414 __le16_to_cpu(data->clock_offset), 434 __le16_to_cpu(data->clock_offset),
415 data->rssi, data->ssp_mode, e->timestamp); 435 data->rssi, data->ssp_mode, e->timestamp);
416 } 436 }
417 437
418 hci_dev_unlock_bh(hdev); 438 hci_dev_unlock_bh(hdev);
419 439
420 return simple_read_from_buffer(userbuf, count, ppos, buf, n); 440 return simple_read_from_buffer(userbuf, count, ppos, buf, n);
421 } 441 }
422 442
423 static const struct file_operations inquiry_cache_fops = { 443 static const struct file_operations inquiry_cache_fops = {
424 .open = inquiry_cache_open, 444 .open = inquiry_cache_open,
425 .read = inquiry_cache_read, 445 .read = inquiry_cache_read,
426 }; 446 };
427 447
428 int hci_register_sysfs(struct hci_dev *hdev) 448 int hci_register_sysfs(struct hci_dev *hdev)
429 { 449 {
430 struct device *dev = &hdev->dev; 450 struct device *dev = &hdev->dev;
431 int err; 451 int err;
432 452
433 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 453 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
434 454
435 dev->type = &bt_host; 455 dev->type = &bt_host;
436 dev->class = bt_class; 456 dev->class = bt_class;
437 dev->parent = hdev->parent; 457 dev->parent = hdev->parent;
438 458
439 dev_set_name(dev, "%s", hdev->name); 459 dev_set_name(dev, "%s", hdev->name);
440 460
441 dev_set_drvdata(dev, hdev); 461 dev_set_drvdata(dev, hdev);
442 462
443 err = device_register(dev); 463 err = device_register(dev);
444 if (err < 0) 464 if (err < 0)
445 return err; 465 return err;
446 466
447 if (!bt_debugfs) 467 if (!bt_debugfs)
448 return 0; 468 return 0;
449 469
450 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs); 470 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
451 if (!hdev->debugfs) 471 if (!hdev->debugfs)
452 return 0; 472 return 0;
453 473
454 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs, 474 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
455 hdev, &inquiry_cache_fops); 475 hdev, &inquiry_cache_fops);
456 476
457 return 0; 477 return 0;
458 } 478 }
459 479
460 void hci_unregister_sysfs(struct hci_dev *hdev) 480 void hci_unregister_sysfs(struct hci_dev *hdev)
461 { 481 {
462 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 482 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
463 483
464 debugfs_remove_recursive(hdev->debugfs); 484 debugfs_remove_recursive(hdev->debugfs);
465 485
466 device_del(&hdev->dev); 486 device_del(&hdev->dev);
467 } 487 }
468 488
469 int __init bt_sysfs_init(void) 489 int __init bt_sysfs_init(void)
470 { 490 {
471 bt_workq = create_singlethread_workqueue("bluetooth"); 491 bt_workq = create_singlethread_workqueue("bluetooth");
472 if (!bt_workq) 492 if (!bt_workq)
473 return -ENOMEM; 493 return -ENOMEM;
474 494
475 bt_debugfs = debugfs_create_dir("bluetooth", NULL); 495 bt_debugfs = debugfs_create_dir("bluetooth", NULL);
476 496
477 bt_class = class_create(THIS_MODULE, "bluetooth"); 497 bt_class = class_create(THIS_MODULE, "bluetooth");
478 if (IS_ERR(bt_class)) { 498 if (IS_ERR(bt_class)) {
479 destroy_workqueue(bt_workq); 499 destroy_workqueue(bt_workq);
480 return PTR_ERR(bt_class); 500 return PTR_ERR(bt_class);
481 } 501 }
482 502
483 return 0; 503 return 0;
484 } 504 }
485 505
486 void bt_sysfs_cleanup(void) 506 void bt_sysfs_cleanup(void)
487 { 507 {
488 class_destroy(bt_class); 508 class_destroy(bt_class);
489 509
490 debugfs_remove_recursive(bt_debugfs); 510 debugfs_remove_recursive(bt_debugfs);
491 511
492 destroy_workqueue(bt_workq); 512 destroy_workqueue(bt_workq);
493 } 513 }
494 514