2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/uaccess.h>
44 #include <linux/crc16.h>
47 #include <asm/system.h>
48 #include <asm/unaligned.h>
50 #include <net/bluetooth/bluetooth.h>
51 #include <net/bluetooth/hci_core.h>
52 #include <net/bluetooth/l2cap.h>
54 #define VERSION "2.14"
56 static int enable_ertm
= 0;
57 static int max_transmit
= L2CAP_DEFAULT_MAX_TX
;
59 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
60 static u8 l2cap_fixed_chan
[8] = { 0x02, };
62 static const struct proto_ops l2cap_sock_ops
;
64 static struct bt_sock_list l2cap_sk_list
= {
65 .lock
= __RW_LOCK_UNLOCKED(l2cap_sk_list
.lock
)
68 static void __l2cap_sock_close(struct sock
*sk
, int reason
);
69 static void l2cap_sock_close(struct sock
*sk
);
70 static void l2cap_sock_kill(struct sock
*sk
);
72 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
73 u8 code
, u8 ident
, u16 dlen
, void *data
);
75 /* ---- L2CAP timers ---- */
76 static void l2cap_sock_timeout(unsigned long arg
)
78 struct sock
*sk
= (struct sock
*) arg
;
81 BT_DBG("sock %p state %d", sk
, sk
->sk_state
);
85 if (sk
->sk_state
== BT_CONNECTED
|| sk
->sk_state
== BT_CONFIG
)
86 reason
= ECONNREFUSED
;
87 else if (sk
->sk_state
== BT_CONNECT
&&
88 l2cap_pi(sk
)->sec_level
!= BT_SECURITY_SDP
)
89 reason
= ECONNREFUSED
;
93 __l2cap_sock_close(sk
, reason
);
101 static void l2cap_sock_set_timer(struct sock
*sk
, long timeout
)
103 BT_DBG("sk %p state %d timeout %ld", sk
, sk
->sk_state
, timeout
);
104 sk_reset_timer(sk
, &sk
->sk_timer
, jiffies
+ timeout
);
107 static void l2cap_sock_clear_timer(struct sock
*sk
)
109 BT_DBG("sock %p state %d", sk
, sk
->sk_state
);
110 sk_stop_timer(sk
, &sk
->sk_timer
);
113 /* ---- L2CAP channels ---- */
114 static struct sock
*__l2cap_get_chan_by_dcid(struct l2cap_chan_list
*l
, u16 cid
)
117 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
118 if (l2cap_pi(s
)->dcid
== cid
)
124 static struct sock
*__l2cap_get_chan_by_scid(struct l2cap_chan_list
*l
, u16 cid
)
127 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
128 if (l2cap_pi(s
)->scid
== cid
)
134 /* Find channel with given SCID.
135 * Returns locked socket */
136 static inline struct sock
*l2cap_get_chan_by_scid(struct l2cap_chan_list
*l
, u16 cid
)
140 s
= __l2cap_get_chan_by_scid(l
, cid
);
143 read_unlock(&l
->lock
);
147 static struct sock
*__l2cap_get_chan_by_ident(struct l2cap_chan_list
*l
, u8 ident
)
150 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
151 if (l2cap_pi(s
)->ident
== ident
)
157 static inline struct sock
*l2cap_get_chan_by_ident(struct l2cap_chan_list
*l
, u8 ident
)
161 s
= __l2cap_get_chan_by_ident(l
, ident
);
164 read_unlock(&l
->lock
);
168 static u16
l2cap_alloc_cid(struct l2cap_chan_list
*l
)
170 u16 cid
= L2CAP_CID_DYN_START
;
172 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
173 if (!__l2cap_get_chan_by_scid(l
, cid
))
180 static inline void __l2cap_chan_link(struct l2cap_chan_list
*l
, struct sock
*sk
)
185 l2cap_pi(l
->head
)->prev_c
= sk
;
187 l2cap_pi(sk
)->next_c
= l
->head
;
188 l2cap_pi(sk
)->prev_c
= NULL
;
192 static inline void l2cap_chan_unlink(struct l2cap_chan_list
*l
, struct sock
*sk
)
194 struct sock
*next
= l2cap_pi(sk
)->next_c
, *prev
= l2cap_pi(sk
)->prev_c
;
196 write_lock_bh(&l
->lock
);
201 l2cap_pi(next
)->prev_c
= prev
;
203 l2cap_pi(prev
)->next_c
= next
;
204 write_unlock_bh(&l
->lock
);
209 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct sock
*sk
, struct sock
*parent
)
211 struct l2cap_chan_list
*l
= &conn
->chan_list
;
213 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
214 l2cap_pi(sk
)->psm
, l2cap_pi(sk
)->dcid
);
216 conn
->disc_reason
= 0x13;
218 l2cap_pi(sk
)->conn
= conn
;
220 if (sk
->sk_type
== SOCK_SEQPACKET
) {
221 /* Alloc CID for connection-oriented socket */
222 l2cap_pi(sk
)->scid
= l2cap_alloc_cid(l
);
223 } else if (sk
->sk_type
== SOCK_DGRAM
) {
224 /* Connectionless socket */
225 l2cap_pi(sk
)->scid
= L2CAP_CID_CONN_LESS
;
226 l2cap_pi(sk
)->dcid
= L2CAP_CID_CONN_LESS
;
227 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
229 /* Raw socket can send/recv signalling messages only */
230 l2cap_pi(sk
)->scid
= L2CAP_CID_SIGNALING
;
231 l2cap_pi(sk
)->dcid
= L2CAP_CID_SIGNALING
;
232 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
235 __l2cap_chan_link(l
, sk
);
238 bt_accept_enqueue(parent
, sk
);
242 * Must be called on the locked socket. */
243 static void l2cap_chan_del(struct sock
*sk
, int err
)
245 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
246 struct sock
*parent
= bt_sk(sk
)->parent
;
248 l2cap_sock_clear_timer(sk
);
250 BT_DBG("sk %p, conn %p, err %d", sk
, conn
, err
);
253 /* Unlink from channel list */
254 l2cap_chan_unlink(&conn
->chan_list
, sk
);
255 l2cap_pi(sk
)->conn
= NULL
;
256 hci_conn_put(conn
->hcon
);
259 sk
->sk_state
= BT_CLOSED
;
260 sock_set_flag(sk
, SOCK_ZAPPED
);
266 bt_accept_unlink(sk
);
267 parent
->sk_data_ready(parent
, 0);
269 sk
->sk_state_change(sk
);
272 /* Service level security */
273 static inline int l2cap_check_security(struct sock
*sk
)
275 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
278 if (l2cap_pi(sk
)->psm
== cpu_to_le16(0x0001)) {
279 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
280 auth_type
= HCI_AT_NO_BONDING_MITM
;
282 auth_type
= HCI_AT_NO_BONDING
;
284 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_LOW
)
285 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
287 switch (l2cap_pi(sk
)->sec_level
) {
288 case BT_SECURITY_HIGH
:
289 auth_type
= HCI_AT_GENERAL_BONDING_MITM
;
291 case BT_SECURITY_MEDIUM
:
292 auth_type
= HCI_AT_GENERAL_BONDING
;
295 auth_type
= HCI_AT_NO_BONDING
;
300 return hci_conn_security(conn
->hcon
, l2cap_pi(sk
)->sec_level
,
304 static inline u8
l2cap_get_ident(struct l2cap_conn
*conn
)
308 /* Get next available identificator.
309 * 1 - 128 are used by kernel.
310 * 129 - 199 are reserved.
311 * 200 - 254 are used by utilities like l2ping, etc.
314 spin_lock_bh(&conn
->lock
);
316 if (++conn
->tx_ident
> 128)
321 spin_unlock_bh(&conn
->lock
);
326 static inline int l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
328 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
330 BT_DBG("code 0x%2.2x", code
);
335 return hci_send_acl(conn
->hcon
, skb
, 0);
338 static inline int l2cap_send_sframe(struct l2cap_pinfo
*pi
, u16 control
)
341 struct l2cap_hdr
*lh
;
342 struct l2cap_conn
*conn
= pi
->conn
;
343 int count
, hlen
= L2CAP_HDR_SIZE
+ 2;
345 if (pi
->fcs
== L2CAP_FCS_CRC16
)
348 BT_DBG("pi %p, control 0x%2.2x", pi
, control
);
350 count
= min_t(unsigned int, conn
->mtu
, hlen
);
351 control
|= L2CAP_CTRL_FRAME_TYPE
;
353 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
357 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
358 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
359 lh
->cid
= cpu_to_le16(pi
->dcid
);
360 put_unaligned_le16(control
, skb_put(skb
, 2));
362 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
363 u16 fcs
= crc16(0, (u8
*)lh
, count
- 2);
364 put_unaligned_le16(fcs
, skb_put(skb
, 2));
367 return hci_send_acl(pi
->conn
->hcon
, skb
, 0);
370 static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo
*pi
, u16 control
)
372 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
)
373 control
|= L2CAP_SUPER_RCV_NOT_READY
;
375 control
|= L2CAP_SUPER_RCV_READY
;
377 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
379 return l2cap_send_sframe(pi
, control
);
382 static void l2cap_do_start(struct sock
*sk
)
384 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
386 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
387 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
390 if (l2cap_check_security(sk
)) {
391 struct l2cap_conn_req req
;
392 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
393 req
.psm
= l2cap_pi(sk
)->psm
;
395 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
397 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
398 L2CAP_CONN_REQ
, sizeof(req
), &req
);
401 struct l2cap_info_req req
;
402 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
404 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
405 conn
->info_ident
= l2cap_get_ident(conn
);
407 mod_timer(&conn
->info_timer
, jiffies
+
408 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
410 l2cap_send_cmd(conn
, conn
->info_ident
,
411 L2CAP_INFO_REQ
, sizeof(req
), &req
);
415 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct sock
*sk
)
417 struct l2cap_disconn_req req
;
419 req
.dcid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
420 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
421 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
422 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
425 /* ---- L2CAP connections ---- */
426 static void l2cap_conn_start(struct l2cap_conn
*conn
)
428 struct l2cap_chan_list
*l
= &conn
->chan_list
;
431 BT_DBG("conn %p", conn
);
435 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
438 if (sk
->sk_type
!= SOCK_SEQPACKET
) {
443 if (sk
->sk_state
== BT_CONNECT
) {
444 if (l2cap_check_security(sk
)) {
445 struct l2cap_conn_req req
;
446 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
447 req
.psm
= l2cap_pi(sk
)->psm
;
449 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
451 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
452 L2CAP_CONN_REQ
, sizeof(req
), &req
);
454 } else if (sk
->sk_state
== BT_CONNECT2
) {
455 struct l2cap_conn_rsp rsp
;
456 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
457 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
459 if (l2cap_check_security(sk
)) {
460 if (bt_sk(sk
)->defer_setup
) {
461 struct sock
*parent
= bt_sk(sk
)->parent
;
462 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
463 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
464 parent
->sk_data_ready(parent
, 0);
467 sk
->sk_state
= BT_CONFIG
;
468 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
469 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
472 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
473 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
476 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
477 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
483 read_unlock(&l
->lock
);
486 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
488 struct l2cap_chan_list
*l
= &conn
->chan_list
;
491 BT_DBG("conn %p", conn
);
495 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
498 if (sk
->sk_type
!= SOCK_SEQPACKET
) {
499 l2cap_sock_clear_timer(sk
);
500 sk
->sk_state
= BT_CONNECTED
;
501 sk
->sk_state_change(sk
);
502 } else if (sk
->sk_state
== BT_CONNECT
)
508 read_unlock(&l
->lock
);
511 /* Notify sockets that we cannot guaranty reliability anymore */
512 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
514 struct l2cap_chan_list
*l
= &conn
->chan_list
;
517 BT_DBG("conn %p", conn
);
521 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
522 if (l2cap_pi(sk
)->force_reliable
)
526 read_unlock(&l
->lock
);
529 static void l2cap_info_timeout(unsigned long arg
)
531 struct l2cap_conn
*conn
= (void *) arg
;
533 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
534 conn
->info_ident
= 0;
536 l2cap_conn_start(conn
);
539 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
541 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
546 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
550 hcon
->l2cap_data
= conn
;
553 BT_DBG("hcon %p conn %p", hcon
, conn
);
555 conn
->mtu
= hcon
->hdev
->acl_mtu
;
556 conn
->src
= &hcon
->hdev
->bdaddr
;
557 conn
->dst
= &hcon
->dst
;
561 spin_lock_init(&conn
->lock
);
562 rwlock_init(&conn
->chan_list
.lock
);
564 setup_timer(&conn
->info_timer
, l2cap_info_timeout
,
565 (unsigned long) conn
);
567 conn
->disc_reason
= 0x13;
572 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
574 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
580 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
582 kfree_skb(conn
->rx_skb
);
585 while ((sk
= conn
->chan_list
.head
)) {
587 l2cap_chan_del(sk
, err
);
592 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
593 del_timer_sync(&conn
->info_timer
);
595 hcon
->l2cap_data
= NULL
;
599 static inline void l2cap_chan_add(struct l2cap_conn
*conn
, struct sock
*sk
, struct sock
*parent
)
601 struct l2cap_chan_list
*l
= &conn
->chan_list
;
602 write_lock_bh(&l
->lock
);
603 __l2cap_chan_add(conn
, sk
, parent
);
604 write_unlock_bh(&l
->lock
);
607 /* ---- Socket interface ---- */
608 static struct sock
*__l2cap_get_sock_by_addr(__le16 psm
, bdaddr_t
*src
)
611 struct hlist_node
*node
;
612 sk_for_each(sk
, node
, &l2cap_sk_list
.head
)
613 if (l2cap_pi(sk
)->sport
== psm
&& !bacmp(&bt_sk(sk
)->src
, src
))
620 /* Find socket with psm and source bdaddr.
621 * Returns closest match.
623 static struct sock
*__l2cap_get_sock_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
625 struct sock
*sk
= NULL
, *sk1
= NULL
;
626 struct hlist_node
*node
;
628 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
629 if (state
&& sk
->sk_state
!= state
)
632 if (l2cap_pi(sk
)->psm
== psm
) {
634 if (!bacmp(&bt_sk(sk
)->src
, src
))
638 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
642 return node
? sk
: sk1
;
645 /* Find socket with given address (psm, src).
646 * Returns locked socket */
647 static inline struct sock
*l2cap_get_sock_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
650 read_lock(&l2cap_sk_list
.lock
);
651 s
= __l2cap_get_sock_by_psm(state
, psm
, src
);
654 read_unlock(&l2cap_sk_list
.lock
);
658 static void l2cap_sock_destruct(struct sock
*sk
)
662 skb_queue_purge(&sk
->sk_receive_queue
);
663 skb_queue_purge(&sk
->sk_write_queue
);
666 static void l2cap_sock_cleanup_listen(struct sock
*parent
)
670 BT_DBG("parent %p", parent
);
672 /* Close not yet accepted channels */
673 while ((sk
= bt_accept_dequeue(parent
, NULL
)))
674 l2cap_sock_close(sk
);
676 parent
->sk_state
= BT_CLOSED
;
677 sock_set_flag(parent
, SOCK_ZAPPED
);
680 /* Kill socket (only if zapped and orphan)
681 * Must be called on unlocked socket.
683 static void l2cap_sock_kill(struct sock
*sk
)
685 if (!sock_flag(sk
, SOCK_ZAPPED
) || sk
->sk_socket
)
688 BT_DBG("sk %p state %d", sk
, sk
->sk_state
);
690 /* Kill poor orphan */
691 bt_sock_unlink(&l2cap_sk_list
, sk
);
692 sock_set_flag(sk
, SOCK_DEAD
);
696 static void __l2cap_sock_close(struct sock
*sk
, int reason
)
698 BT_DBG("sk %p state %d socket %p", sk
, sk
->sk_state
, sk
->sk_socket
);
700 switch (sk
->sk_state
) {
702 l2cap_sock_cleanup_listen(sk
);
707 if (sk
->sk_type
== SOCK_SEQPACKET
) {
708 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
710 sk
->sk_state
= BT_DISCONN
;
711 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
712 l2cap_send_disconn_req(conn
, sk
);
714 l2cap_chan_del(sk
, reason
);
718 if (sk
->sk_type
== SOCK_SEQPACKET
) {
719 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
720 struct l2cap_conn_rsp rsp
;
723 if (bt_sk(sk
)->defer_setup
)
724 result
= L2CAP_CR_SEC_BLOCK
;
726 result
= L2CAP_CR_BAD_PSM
;
728 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
729 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
730 rsp
.result
= cpu_to_le16(result
);
731 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
732 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
733 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
735 l2cap_chan_del(sk
, reason
);
740 l2cap_chan_del(sk
, reason
);
744 sock_set_flag(sk
, SOCK_ZAPPED
);
749 /* Must be called on unlocked socket. */
750 static void l2cap_sock_close(struct sock
*sk
)
752 l2cap_sock_clear_timer(sk
);
754 __l2cap_sock_close(sk
, ECONNRESET
);
759 static void l2cap_sock_init(struct sock
*sk
, struct sock
*parent
)
761 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
766 sk
->sk_type
= parent
->sk_type
;
767 bt_sk(sk
)->defer_setup
= bt_sk(parent
)->defer_setup
;
769 pi
->imtu
= l2cap_pi(parent
)->imtu
;
770 pi
->omtu
= l2cap_pi(parent
)->omtu
;
771 pi
->mode
= l2cap_pi(parent
)->mode
;
772 pi
->fcs
= l2cap_pi(parent
)->fcs
;
773 pi
->sec_level
= l2cap_pi(parent
)->sec_level
;
774 pi
->role_switch
= l2cap_pi(parent
)->role_switch
;
775 pi
->force_reliable
= l2cap_pi(parent
)->force_reliable
;
777 pi
->imtu
= L2CAP_DEFAULT_MTU
;
779 pi
->mode
= L2CAP_MODE_BASIC
;
780 pi
->fcs
= L2CAP_FCS_CRC16
;
781 pi
->sec_level
= BT_SECURITY_LOW
;
783 pi
->force_reliable
= 0;
786 /* Default config options */
788 pi
->flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
789 skb_queue_head_init(TX_QUEUE(sk
));
790 skb_queue_head_init(SREJ_QUEUE(sk
));
791 INIT_LIST_HEAD(SREJ_LIST(sk
));
794 static struct proto l2cap_proto
= {
796 .owner
= THIS_MODULE
,
797 .obj_size
= sizeof(struct l2cap_pinfo
)
800 static struct sock
*l2cap_sock_alloc(struct net
*net
, struct socket
*sock
, int proto
, gfp_t prio
)
804 sk
= sk_alloc(net
, PF_BLUETOOTH
, prio
, &l2cap_proto
);
808 sock_init_data(sock
, sk
);
809 INIT_LIST_HEAD(&bt_sk(sk
)->accept_q
);
811 sk
->sk_destruct
= l2cap_sock_destruct
;
812 sk
->sk_sndtimeo
= msecs_to_jiffies(L2CAP_CONN_TIMEOUT
);
814 sock_reset_flag(sk
, SOCK_ZAPPED
);
816 sk
->sk_protocol
= proto
;
817 sk
->sk_state
= BT_OPEN
;
819 setup_timer(&sk
->sk_timer
, l2cap_sock_timeout
, (unsigned long) sk
);
821 bt_sock_link(&l2cap_sk_list
, sk
);
825 static int l2cap_sock_create(struct net
*net
, struct socket
*sock
, int protocol
,
830 BT_DBG("sock %p", sock
);
832 sock
->state
= SS_UNCONNECTED
;
834 if (sock
->type
!= SOCK_SEQPACKET
&&
835 sock
->type
!= SOCK_DGRAM
&& sock
->type
!= SOCK_RAW
)
836 return -ESOCKTNOSUPPORT
;
838 if (sock
->type
== SOCK_RAW
&& !kern
&& !capable(CAP_NET_RAW
))
841 sock
->ops
= &l2cap_sock_ops
;
843 sk
= l2cap_sock_alloc(net
, sock
, protocol
, GFP_ATOMIC
);
847 l2cap_sock_init(sk
, NULL
);
851 static int l2cap_sock_bind(struct socket
*sock
, struct sockaddr
*addr
, int alen
)
853 struct sock
*sk
= sock
->sk
;
854 struct sockaddr_l2 la
;
859 if (!addr
|| addr
->sa_family
!= AF_BLUETOOTH
)
862 memset(&la
, 0, sizeof(la
));
863 len
= min_t(unsigned int, sizeof(la
), alen
);
864 memcpy(&la
, addr
, len
);
871 if (sk
->sk_state
!= BT_OPEN
) {
876 if (la
.l2_psm
&& __le16_to_cpu(la
.l2_psm
) < 0x1001 &&
877 !capable(CAP_NET_BIND_SERVICE
)) {
882 write_lock_bh(&l2cap_sk_list
.lock
);
884 if (la
.l2_psm
&& __l2cap_get_sock_by_addr(la
.l2_psm
, &la
.l2_bdaddr
)) {
887 /* Save source address */
888 bacpy(&bt_sk(sk
)->src
, &la
.l2_bdaddr
);
889 l2cap_pi(sk
)->psm
= la
.l2_psm
;
890 l2cap_pi(sk
)->sport
= la
.l2_psm
;
891 sk
->sk_state
= BT_BOUND
;
893 if (__le16_to_cpu(la
.l2_psm
) == 0x0001 ||
894 __le16_to_cpu(la
.l2_psm
) == 0x0003)
895 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
898 write_unlock_bh(&l2cap_sk_list
.lock
);
905 static int l2cap_do_connect(struct sock
*sk
)
907 bdaddr_t
*src
= &bt_sk(sk
)->src
;
908 bdaddr_t
*dst
= &bt_sk(sk
)->dst
;
909 struct l2cap_conn
*conn
;
910 struct hci_conn
*hcon
;
911 struct hci_dev
*hdev
;
915 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src
), batostr(dst
),
918 hdev
= hci_get_route(dst
, src
);
920 return -EHOSTUNREACH
;
922 hci_dev_lock_bh(hdev
);
926 if (sk
->sk_type
== SOCK_RAW
) {
927 switch (l2cap_pi(sk
)->sec_level
) {
928 case BT_SECURITY_HIGH
:
929 auth_type
= HCI_AT_DEDICATED_BONDING_MITM
;
931 case BT_SECURITY_MEDIUM
:
932 auth_type
= HCI_AT_DEDICATED_BONDING
;
935 auth_type
= HCI_AT_NO_BONDING
;
938 } else if (l2cap_pi(sk
)->psm
== cpu_to_le16(0x0001)) {
939 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
940 auth_type
= HCI_AT_NO_BONDING_MITM
;
942 auth_type
= HCI_AT_NO_BONDING
;
944 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_LOW
)
945 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
947 switch (l2cap_pi(sk
)->sec_level
) {
948 case BT_SECURITY_HIGH
:
949 auth_type
= HCI_AT_GENERAL_BONDING_MITM
;
951 case BT_SECURITY_MEDIUM
:
952 auth_type
= HCI_AT_GENERAL_BONDING
;
955 auth_type
= HCI_AT_NO_BONDING
;
960 hcon
= hci_connect(hdev
, ACL_LINK
, dst
,
961 l2cap_pi(sk
)->sec_level
, auth_type
);
965 conn
= l2cap_conn_add(hcon
, 0);
973 /* Update source addr of the socket */
974 bacpy(src
, conn
->src
);
976 l2cap_chan_add(conn
, sk
, NULL
);
978 sk
->sk_state
= BT_CONNECT
;
979 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
981 if (hcon
->state
== BT_CONNECTED
) {
982 if (sk
->sk_type
!= SOCK_SEQPACKET
) {
983 l2cap_sock_clear_timer(sk
);
984 sk
->sk_state
= BT_CONNECTED
;
990 hci_dev_unlock_bh(hdev
);
995 static int l2cap_sock_connect(struct socket
*sock
, struct sockaddr
*addr
, int alen
, int flags
)
997 struct sock
*sk
= sock
->sk
;
998 struct sockaddr_l2 la
;
1001 BT_DBG("sk %p", sk
);
1003 if (!addr
|| addr
->sa_family
!= AF_BLUETOOTH
)
1006 memset(&la
, 0, sizeof(la
));
1007 len
= min_t(unsigned int, sizeof(la
), alen
);
1008 memcpy(&la
, addr
, len
);
1015 if (sk
->sk_type
== SOCK_SEQPACKET
&& !la
.l2_psm
) {
1020 switch (l2cap_pi(sk
)->mode
) {
1021 case L2CAP_MODE_BASIC
:
1023 case L2CAP_MODE_ERTM
:
1024 case L2CAP_MODE_STREAMING
:
1033 switch (sk
->sk_state
) {
1037 /* Already connecting */
1041 /* Already connected */
1054 /* Set destination address and psm */
1055 bacpy(&bt_sk(sk
)->dst
, &la
.l2_bdaddr
);
1056 l2cap_pi(sk
)->psm
= la
.l2_psm
;
1058 err
= l2cap_do_connect(sk
);
1063 err
= bt_sock_wait_state(sk
, BT_CONNECTED
,
1064 sock_sndtimeo(sk
, flags
& O_NONBLOCK
));
1070 static int l2cap_sock_listen(struct socket
*sock
, int backlog
)
1072 struct sock
*sk
= sock
->sk
;
1075 BT_DBG("sk %p backlog %d", sk
, backlog
);
1079 if (sk
->sk_state
!= BT_BOUND
|| sock
->type
!= SOCK_SEQPACKET
) {
1084 switch (l2cap_pi(sk
)->mode
) {
1085 case L2CAP_MODE_BASIC
:
1087 case L2CAP_MODE_ERTM
:
1088 case L2CAP_MODE_STREAMING
:
1097 if (!l2cap_pi(sk
)->psm
) {
1098 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1103 write_lock_bh(&l2cap_sk_list
.lock
);
1105 for (psm
= 0x1001; psm
< 0x1100; psm
+= 2)
1106 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm
), src
)) {
1107 l2cap_pi(sk
)->psm
= cpu_to_le16(psm
);
1108 l2cap_pi(sk
)->sport
= cpu_to_le16(psm
);
1113 write_unlock_bh(&l2cap_sk_list
.lock
);
1119 sk
->sk_max_ack_backlog
= backlog
;
1120 sk
->sk_ack_backlog
= 0;
1121 sk
->sk_state
= BT_LISTEN
;
1128 static int l2cap_sock_accept(struct socket
*sock
, struct socket
*newsock
, int flags
)
1130 DECLARE_WAITQUEUE(wait
, current
);
1131 struct sock
*sk
= sock
->sk
, *nsk
;
1135 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
1137 if (sk
->sk_state
!= BT_LISTEN
) {
1142 timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
1144 BT_DBG("sk %p timeo %ld", sk
, timeo
);
1146 /* Wait for an incoming connection. (wake-one). */
1147 add_wait_queue_exclusive(sk
->sk_sleep
, &wait
);
1148 while (!(nsk
= bt_accept_dequeue(sk
, newsock
))) {
1149 set_current_state(TASK_INTERRUPTIBLE
);
1156 timeo
= schedule_timeout(timeo
);
1157 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
1159 if (sk
->sk_state
!= BT_LISTEN
) {
1164 if (signal_pending(current
)) {
1165 err
= sock_intr_errno(timeo
);
1169 set_current_state(TASK_RUNNING
);
1170 remove_wait_queue(sk
->sk_sleep
, &wait
);
1175 newsock
->state
= SS_CONNECTED
;
1177 BT_DBG("new socket %p", nsk
);
1184 static int l2cap_sock_getname(struct socket
*sock
, struct sockaddr
*addr
, int *len
, int peer
)
1186 struct sockaddr_l2
*la
= (struct sockaddr_l2
*) addr
;
1187 struct sock
*sk
= sock
->sk
;
1189 BT_DBG("sock %p, sk %p", sock
, sk
);
1191 addr
->sa_family
= AF_BLUETOOTH
;
1192 *len
= sizeof(struct sockaddr_l2
);
1195 la
->l2_psm
= l2cap_pi(sk
)->psm
;
1196 bacpy(&la
->l2_bdaddr
, &bt_sk(sk
)->dst
);
1197 la
->l2_cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1199 la
->l2_psm
= l2cap_pi(sk
)->sport
;
1200 bacpy(&la
->l2_bdaddr
, &bt_sk(sk
)->src
);
1201 la
->l2_cid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
1207 static void l2cap_monitor_timeout(unsigned long arg
)
1209 struct sock
*sk
= (void *) arg
;
1213 if (l2cap_pi(sk
)->retry_count
>= l2cap_pi(sk
)->remote_max_tx
) {
1214 l2cap_send_disconn_req(l2cap_pi(sk
)->conn
, sk
);
1219 l2cap_pi(sk
)->retry_count
++;
1220 __mod_monitor_timer();
1222 control
= L2CAP_CTRL_POLL
;
1223 l2cap_send_rr_or_rnr(l2cap_pi(sk
), control
);
1227 static void l2cap_retrans_timeout(unsigned long arg
)
1229 struct sock
*sk
= (void *) arg
;
1233 l2cap_pi(sk
)->retry_count
= 1;
1234 __mod_monitor_timer();
1236 l2cap_pi(sk
)->conn_state
|= L2CAP_CONN_WAIT_F
;
1238 control
= L2CAP_CTRL_POLL
;
1239 l2cap_send_rr_or_rnr(l2cap_pi(sk
), control
);
1243 static void l2cap_drop_acked_frames(struct sock
*sk
)
1245 struct sk_buff
*skb
;
1247 while ((skb
= skb_peek(TX_QUEUE(sk
)))) {
1248 if (bt_cb(skb
)->tx_seq
== l2cap_pi(sk
)->expected_ack_seq
)
1251 skb
= skb_dequeue(TX_QUEUE(sk
));
1254 l2cap_pi(sk
)->unacked_frames
--;
1257 if (!l2cap_pi(sk
)->unacked_frames
)
1258 del_timer(&l2cap_pi(sk
)->retrans_timer
);
1263 static inline int l2cap_do_send(struct sock
*sk
, struct sk_buff
*skb
)
1265 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1268 BT_DBG("sk %p, skb %p len %d", sk
, skb
, skb
->len
);
1270 err
= hci_send_acl(pi
->conn
->hcon
, skb
, 0);
1277 static int l2cap_streaming_send(struct sock
*sk
)
1279 struct sk_buff
*skb
, *tx_skb
;
1280 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1284 while ((skb
= sk
->sk_send_head
)) {
1285 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1287 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1288 control
|= pi
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
1289 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1291 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
) {
1292 fcs
= crc16(0, (u8
*)tx_skb
->data
, tx_skb
->len
- 2);
1293 put_unaligned_le16(fcs
, tx_skb
->data
+ tx_skb
->len
- 2);
1296 err
= l2cap_do_send(sk
, tx_skb
);
1298 l2cap_send_disconn_req(pi
->conn
, sk
);
1302 pi
->next_tx_seq
= (pi
->next_tx_seq
+ 1) % 64;
1304 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1305 sk
->sk_send_head
= NULL
;
1307 sk
->sk_send_head
= skb_queue_next(TX_QUEUE(sk
), skb
);
1309 skb
= skb_dequeue(TX_QUEUE(sk
));
1315 static int l2cap_retransmit_frame(struct sock
*sk
, u8 tx_seq
)
1317 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1318 struct sk_buff
*skb
, *tx_skb
;
1322 skb
= skb_peek(TX_QUEUE(sk
));
1324 if (bt_cb(skb
)->tx_seq
!= tx_seq
) {
1325 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1327 skb
= skb_queue_next(TX_QUEUE(sk
), skb
);
1331 if (pi
->remote_max_tx
&&
1332 bt_cb(skb
)->retries
== pi
->remote_max_tx
) {
1333 l2cap_send_disconn_req(pi
->conn
, sk
);
1337 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1338 bt_cb(skb
)->retries
++;
1339 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1340 control
|= (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1341 | (tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1342 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1344 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
) {
1345 fcs
= crc16(0, (u8
*)tx_skb
->data
, tx_skb
->len
- 2);
1346 put_unaligned_le16(fcs
, tx_skb
->data
+ tx_skb
->len
- 2);
1349 err
= l2cap_do_send(sk
, tx_skb
);
1351 l2cap_send_disconn_req(pi
->conn
, sk
);
1359 static int l2cap_ertm_send(struct sock
*sk
)
1361 struct sk_buff
*skb
, *tx_skb
;
1362 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1366 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
)
1369 while ((skb
= sk
->sk_send_head
) && (!l2cap_tx_window_full(sk
)) &&
1370 !(pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
)) {
1371 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1373 if (pi
->remote_max_tx
&&
1374 bt_cb(skb
)->retries
== pi
->remote_max_tx
) {
1375 l2cap_send_disconn_req(pi
->conn
, sk
);
1379 bt_cb(skb
)->retries
++;
1381 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1382 control
|= (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1383 | (pi
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1384 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1387 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
) {
1388 fcs
= crc16(0, (u8
*)skb
->data
, tx_skb
->len
- 2);
1389 put_unaligned_le16(fcs
, skb
->data
+ tx_skb
->len
- 2);
1392 err
= l2cap_do_send(sk
, tx_skb
);
1394 l2cap_send_disconn_req(pi
->conn
, sk
);
1397 __mod_retrans_timer();
1399 bt_cb(skb
)->tx_seq
= pi
->next_tx_seq
;
1400 pi
->next_tx_seq
= (pi
->next_tx_seq
+ 1) % 64;
1402 pi
->unacked_frames
++;
1404 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1405 sk
->sk_send_head
= NULL
;
1407 sk
->sk_send_head
= skb_queue_next(TX_QUEUE(sk
), skb
);
1413 static inline int l2cap_skbuff_fromiovec(struct sock
*sk
, struct msghdr
*msg
, int len
, int count
, struct sk_buff
*skb
)
1415 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1416 struct sk_buff
**frag
;
1419 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
)) {
1426 /* Continuation fragments (no L2CAP header) */
1427 frag
= &skb_shinfo(skb
)->frag_list
;
1429 count
= min_t(unsigned int, conn
->mtu
, len
);
1431 *frag
= bt_skb_send_alloc(sk
, count
, msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1434 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1440 frag
= &(*frag
)->next
;
1446 static struct sk_buff
*l2cap_create_connless_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1448 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1449 struct sk_buff
*skb
;
1450 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1451 struct l2cap_hdr
*lh
;
1453 BT_DBG("sk %p len %d", sk
, (int)len
);
1455 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1456 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1457 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1459 return ERR_PTR(-ENOMEM
);
1461 /* Create L2CAP header */
1462 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1463 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1464 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1465 put_unaligned_le16(l2cap_pi(sk
)->psm
, skb_put(skb
, 2));
1467 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1468 if (unlikely(err
< 0)) {
1470 return ERR_PTR(err
);
1475 static struct sk_buff
*l2cap_create_basic_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1477 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1478 struct sk_buff
*skb
;
1479 int err
, count
, hlen
= L2CAP_HDR_SIZE
;
1480 struct l2cap_hdr
*lh
;
1482 BT_DBG("sk %p len %d", sk
, (int)len
);
1484 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1485 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1486 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1488 return ERR_PTR(-ENOMEM
);
1490 /* Create L2CAP header */
1491 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1492 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1493 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1495 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1496 if (unlikely(err
< 0)) {
1498 return ERR_PTR(err
);
1503 static struct sk_buff
*l2cap_create_iframe_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
, u16 control
, u16 sdulen
)
1505 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1506 struct sk_buff
*skb
;
1507 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1508 struct l2cap_hdr
*lh
;
1510 BT_DBG("sk %p len %d", sk
, (int)len
);
1515 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1518 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1519 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1520 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1522 return ERR_PTR(-ENOMEM
);
1524 /* Create L2CAP header */
1525 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1526 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1527 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1528 put_unaligned_le16(control
, skb_put(skb
, 2));
1530 put_unaligned_le16(sdulen
, skb_put(skb
, 2));
1532 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1533 if (unlikely(err
< 0)) {
1535 return ERR_PTR(err
);
1538 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1539 put_unaligned_le16(0, skb_put(skb
, 2));
1541 bt_cb(skb
)->retries
= 0;
1545 static inline int l2cap_sar_segment_sdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1547 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1548 struct sk_buff
*skb
;
1549 struct sk_buff_head sar_queue
;
1553 __skb_queue_head_init(&sar_queue
);
1554 control
= L2CAP_SDU_START
;
1555 skb
= l2cap_create_iframe_pdu(sk
, msg
, pi
->max_pdu_size
, control
, len
);
1557 return PTR_ERR(skb
);
1559 __skb_queue_tail(&sar_queue
, skb
);
1560 len
-= pi
->max_pdu_size
;
1561 size
+=pi
->max_pdu_size
;
1567 if (len
> pi
->max_pdu_size
) {
1568 control
|= L2CAP_SDU_CONTINUE
;
1569 buflen
= pi
->max_pdu_size
;
1571 control
|= L2CAP_SDU_END
;
1575 skb
= l2cap_create_iframe_pdu(sk
, msg
, buflen
, control
, 0);
1577 skb_queue_purge(&sar_queue
);
1578 return PTR_ERR(skb
);
1581 __skb_queue_tail(&sar_queue
, skb
);
1586 skb_queue_splice_tail(&sar_queue
, TX_QUEUE(sk
));
1587 if (sk
->sk_send_head
== NULL
)
1588 sk
->sk_send_head
= sar_queue
.next
;
1593 static int l2cap_sock_sendmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
, size_t len
)
1595 struct sock
*sk
= sock
->sk
;
1596 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1597 struct sk_buff
*skb
;
1601 BT_DBG("sock %p, sk %p", sock
, sk
);
1603 err
= sock_error(sk
);
1607 if (msg
->msg_flags
& MSG_OOB
)
1610 /* Check outgoing MTU */
1611 if (sk
->sk_type
== SOCK_SEQPACKET
&& pi
->mode
== L2CAP_MODE_BASIC
&&
1617 if (sk
->sk_state
!= BT_CONNECTED
) {
1622 /* Connectionless channel */
1623 if (sk
->sk_type
== SOCK_DGRAM
) {
1624 skb
= l2cap_create_connless_pdu(sk
, msg
, len
);
1625 err
= l2cap_do_send(sk
, skb
);
1630 case L2CAP_MODE_BASIC
:
1631 /* Create a basic PDU */
1632 skb
= l2cap_create_basic_pdu(sk
, msg
, len
);
1638 err
= l2cap_do_send(sk
, skb
);
1643 case L2CAP_MODE_ERTM
:
1644 case L2CAP_MODE_STREAMING
:
1645 /* Entire SDU fits into one PDU */
1646 if (len
<= pi
->max_pdu_size
) {
1647 control
= L2CAP_SDU_UNSEGMENTED
;
1648 skb
= l2cap_create_iframe_pdu(sk
, msg
, len
, control
, 0);
1653 __skb_queue_tail(TX_QUEUE(sk
), skb
);
1654 if (sk
->sk_send_head
== NULL
)
1655 sk
->sk_send_head
= skb
;
1657 /* Segment SDU into multiples PDUs */
1658 err
= l2cap_sar_segment_sdu(sk
, msg
, len
);
1663 if (pi
->mode
== L2CAP_MODE_STREAMING
)
1664 err
= l2cap_streaming_send(sk
);
1666 err
= l2cap_ertm_send(sk
);
1673 BT_DBG("bad state %1.1x", pi
->mode
);
1682 static int l2cap_sock_recvmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
, size_t len
, int flags
)
1684 struct sock
*sk
= sock
->sk
;
1688 if (sk
->sk_state
== BT_CONNECT2
&& bt_sk(sk
)->defer_setup
) {
1689 struct l2cap_conn_rsp rsp
;
1691 sk
->sk_state
= BT_CONFIG
;
1693 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1694 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
1695 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
1696 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
1697 l2cap_send_cmd(l2cap_pi(sk
)->conn
, l2cap_pi(sk
)->ident
,
1698 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
1706 return bt_sock_recvmsg(iocb
, sock
, msg
, len
, flags
);
1709 static int l2cap_sock_setsockopt_old(struct socket
*sock
, int optname
, char __user
*optval
, unsigned int optlen
)
1711 struct sock
*sk
= sock
->sk
;
1712 struct l2cap_options opts
;
1716 BT_DBG("sk %p", sk
);
1722 opts
.imtu
= l2cap_pi(sk
)->imtu
;
1723 opts
.omtu
= l2cap_pi(sk
)->omtu
;
1724 opts
.flush_to
= l2cap_pi(sk
)->flush_to
;
1725 opts
.mode
= l2cap_pi(sk
)->mode
;
1726 opts
.fcs
= l2cap_pi(sk
)->fcs
;
1728 len
= min_t(unsigned int, sizeof(opts
), optlen
);
1729 if (copy_from_user((char *) &opts
, optval
, len
)) {
1734 l2cap_pi(sk
)->imtu
= opts
.imtu
;
1735 l2cap_pi(sk
)->omtu
= opts
.omtu
;
1736 l2cap_pi(sk
)->mode
= opts
.mode
;
1737 l2cap_pi(sk
)->fcs
= opts
.fcs
;
1741 if (get_user(opt
, (u32 __user
*) optval
)) {
1746 if (opt
& L2CAP_LM_AUTH
)
1747 l2cap_pi(sk
)->sec_level
= BT_SECURITY_LOW
;
1748 if (opt
& L2CAP_LM_ENCRYPT
)
1749 l2cap_pi(sk
)->sec_level
= BT_SECURITY_MEDIUM
;
1750 if (opt
& L2CAP_LM_SECURE
)
1751 l2cap_pi(sk
)->sec_level
= BT_SECURITY_HIGH
;
1753 l2cap_pi(sk
)->role_switch
= (opt
& L2CAP_LM_MASTER
);
1754 l2cap_pi(sk
)->force_reliable
= (opt
& L2CAP_LM_RELIABLE
);
1766 static int l2cap_sock_setsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, unsigned int optlen
)
1768 struct sock
*sk
= sock
->sk
;
1769 struct bt_security sec
;
1773 BT_DBG("sk %p", sk
);
1775 if (level
== SOL_L2CAP
)
1776 return l2cap_sock_setsockopt_old(sock
, optname
, optval
, optlen
);
1778 if (level
!= SOL_BLUETOOTH
)
1779 return -ENOPROTOOPT
;
1785 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_RAW
) {
1790 sec
.level
= BT_SECURITY_LOW
;
1792 len
= min_t(unsigned int, sizeof(sec
), optlen
);
1793 if (copy_from_user((char *) &sec
, optval
, len
)) {
1798 if (sec
.level
< BT_SECURITY_LOW
||
1799 sec
.level
> BT_SECURITY_HIGH
) {
1804 l2cap_pi(sk
)->sec_level
= sec
.level
;
1807 case BT_DEFER_SETUP
:
1808 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_LISTEN
) {
1813 if (get_user(opt
, (u32 __user
*) optval
)) {
1818 bt_sk(sk
)->defer_setup
= opt
;
1830 static int l2cap_sock_getsockopt_old(struct socket
*sock
, int optname
, char __user
*optval
, int __user
*optlen
)
1832 struct sock
*sk
= sock
->sk
;
1833 struct l2cap_options opts
;
1834 struct l2cap_conninfo cinfo
;
1838 BT_DBG("sk %p", sk
);
1840 if (get_user(len
, optlen
))
1847 opts
.imtu
= l2cap_pi(sk
)->imtu
;
1848 opts
.omtu
= l2cap_pi(sk
)->omtu
;
1849 opts
.flush_to
= l2cap_pi(sk
)->flush_to
;
1850 opts
.mode
= l2cap_pi(sk
)->mode
;
1851 opts
.fcs
= l2cap_pi(sk
)->fcs
;
1853 len
= min_t(unsigned int, len
, sizeof(opts
));
1854 if (copy_to_user(optval
, (char *) &opts
, len
))
1860 switch (l2cap_pi(sk
)->sec_level
) {
1861 case BT_SECURITY_LOW
:
1862 opt
= L2CAP_LM_AUTH
;
1864 case BT_SECURITY_MEDIUM
:
1865 opt
= L2CAP_LM_AUTH
| L2CAP_LM_ENCRYPT
;
1867 case BT_SECURITY_HIGH
:
1868 opt
= L2CAP_LM_AUTH
| L2CAP_LM_ENCRYPT
|
1876 if (l2cap_pi(sk
)->role_switch
)
1877 opt
|= L2CAP_LM_MASTER
;
1879 if (l2cap_pi(sk
)->force_reliable
)
1880 opt
|= L2CAP_LM_RELIABLE
;
1882 if (put_user(opt
, (u32 __user
*) optval
))
1886 case L2CAP_CONNINFO
:
1887 if (sk
->sk_state
!= BT_CONNECTED
&&
1888 !(sk
->sk_state
== BT_CONNECT2
&&
1889 bt_sk(sk
)->defer_setup
)) {
1894 cinfo
.hci_handle
= l2cap_pi(sk
)->conn
->hcon
->handle
;
1895 memcpy(cinfo
.dev_class
, l2cap_pi(sk
)->conn
->hcon
->dev_class
, 3);
1897 len
= min_t(unsigned int, len
, sizeof(cinfo
));
1898 if (copy_to_user(optval
, (char *) &cinfo
, len
))
1912 static int l2cap_sock_getsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, int __user
*optlen
)
1914 struct sock
*sk
= sock
->sk
;
1915 struct bt_security sec
;
1918 BT_DBG("sk %p", sk
);
1920 if (level
== SOL_L2CAP
)
1921 return l2cap_sock_getsockopt_old(sock
, optname
, optval
, optlen
);
1923 if (level
!= SOL_BLUETOOTH
)
1924 return -ENOPROTOOPT
;
1926 if (get_user(len
, optlen
))
1933 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_RAW
) {
1938 sec
.level
= l2cap_pi(sk
)->sec_level
;
1940 len
= min_t(unsigned int, len
, sizeof(sec
));
1941 if (copy_to_user(optval
, (char *) &sec
, len
))
1946 case BT_DEFER_SETUP
:
1947 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_LISTEN
) {
1952 if (put_user(bt_sk(sk
)->defer_setup
, (u32 __user
*) optval
))
1966 static int l2cap_sock_shutdown(struct socket
*sock
, int how
)
1968 struct sock
*sk
= sock
->sk
;
1971 BT_DBG("sock %p, sk %p", sock
, sk
);
1977 if (!sk
->sk_shutdown
) {
1978 sk
->sk_shutdown
= SHUTDOWN_MASK
;
1979 l2cap_sock_clear_timer(sk
);
1980 __l2cap_sock_close(sk
, 0);
1982 if (sock_flag(sk
, SOCK_LINGER
) && sk
->sk_lingertime
)
1983 err
= bt_sock_wait_state(sk
, BT_CLOSED
,
1990 static int l2cap_sock_release(struct socket
*sock
)
1992 struct sock
*sk
= sock
->sk
;
1995 BT_DBG("sock %p, sk %p", sock
, sk
);
2000 err
= l2cap_sock_shutdown(sock
, 2);
2003 l2cap_sock_kill(sk
);
2007 static void l2cap_chan_ready(struct sock
*sk
)
2009 struct sock
*parent
= bt_sk(sk
)->parent
;
2011 BT_DBG("sk %p, parent %p", sk
, parent
);
2013 l2cap_pi(sk
)->conf_state
= 0;
2014 l2cap_sock_clear_timer(sk
);
2017 /* Outgoing channel.
2018 * Wake up socket sleeping on connect.
2020 sk
->sk_state
= BT_CONNECTED
;
2021 sk
->sk_state_change(sk
);
2023 /* Incoming channel.
2024 * Wake up socket sleeping on accept.
2026 parent
->sk_data_ready(parent
, 0);
2030 /* Copy frame to all raw sockets on that connection */
2031 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2033 struct l2cap_chan_list
*l
= &conn
->chan_list
;
2034 struct sk_buff
*nskb
;
2037 BT_DBG("conn %p", conn
);
2039 read_lock(&l
->lock
);
2040 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
2041 if (sk
->sk_type
!= SOCK_RAW
)
2044 /* Don't send frame to the socket it came from */
2047 nskb
= skb_clone(skb
, GFP_ATOMIC
);
2051 if (sock_queue_rcv_skb(sk
, nskb
))
2054 read_unlock(&l
->lock
);
2057 /* ---- L2CAP signalling commands ---- */
2058 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
2059 u8 code
, u8 ident
, u16 dlen
, void *data
)
2061 struct sk_buff
*skb
, **frag
;
2062 struct l2cap_cmd_hdr
*cmd
;
2063 struct l2cap_hdr
*lh
;
2066 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2067 conn
, code
, ident
, dlen
);
2069 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2070 count
= min_t(unsigned int, conn
->mtu
, len
);
2072 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
2076 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2077 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2078 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
2080 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2083 cmd
->len
= cpu_to_le16(dlen
);
2086 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2087 memcpy(skb_put(skb
, count
), data
, count
);
2093 /* Continuation fragments (no L2CAP header) */
2094 frag
= &skb_shinfo(skb
)->frag_list
;
2096 count
= min_t(unsigned int, conn
->mtu
, len
);
2098 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
2102 memcpy(skb_put(*frag
, count
), data
, count
);
2107 frag
= &(*frag
)->next
;
2117 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
2119 struct l2cap_conf_opt
*opt
= *ptr
;
2122 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2130 *val
= *((u8
*) opt
->val
);
2134 *val
= __le16_to_cpu(*((__le16
*) opt
->val
));
2138 *val
= __le32_to_cpu(*((__le32
*) opt
->val
));
2142 *val
= (unsigned long) opt
->val
;
2146 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
2150 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2152 struct l2cap_conf_opt
*opt
= *ptr
;
2154 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
2161 *((u8
*) opt
->val
) = val
;
2165 *((__le16
*) opt
->val
) = cpu_to_le16(val
);
2169 *((__le32
*) opt
->val
) = cpu_to_le32(val
);
2173 memcpy(opt
->val
, (void *) val
, len
);
2177 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2180 static inline void l2cap_ertm_init(struct sock
*sk
)
2182 l2cap_pi(sk
)->expected_ack_seq
= 0;
2183 l2cap_pi(sk
)->unacked_frames
= 0;
2184 l2cap_pi(sk
)->buffer_seq
= 0;
2185 l2cap_pi(sk
)->num_to_ack
= 0;
2187 setup_timer(&l2cap_pi(sk
)->retrans_timer
,
2188 l2cap_retrans_timeout
, (unsigned long) sk
);
2189 setup_timer(&l2cap_pi(sk
)->monitor_timer
,
2190 l2cap_monitor_timeout
, (unsigned long) sk
);
2192 __skb_queue_head_init(SREJ_QUEUE(sk
));
2195 static int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
2197 u32 local_feat_mask
= l2cap_feat_mask
;
2199 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
2202 case L2CAP_MODE_ERTM
:
2203 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
2204 case L2CAP_MODE_STREAMING
:
2205 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
2211 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
2214 case L2CAP_MODE_STREAMING
:
2215 case L2CAP_MODE_ERTM
:
2216 if (l2cap_mode_supported(mode
, remote_feat_mask
))
2220 return L2CAP_MODE_BASIC
;
2224 static int l2cap_build_conf_req(struct sock
*sk
, void *data
)
2226 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2227 struct l2cap_conf_req
*req
= data
;
2228 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2229 void *ptr
= req
->data
;
2231 BT_DBG("sk %p", sk
);
2233 if (pi
->num_conf_req
|| pi
->num_conf_rsp
)
2237 case L2CAP_MODE_STREAMING
:
2238 case L2CAP_MODE_ERTM
:
2239 pi
->conf_state
|= L2CAP_CONF_STATE2_DEVICE
;
2240 if (!l2cap_mode_supported(pi
->mode
, pi
->conn
->feat_mask
))
2241 l2cap_send_disconn_req(pi
->conn
, sk
);
2244 pi
->mode
= l2cap_select_mode(rfc
.mode
, pi
->conn
->feat_mask
);
2250 case L2CAP_MODE_BASIC
:
2251 if (pi
->imtu
!= L2CAP_DEFAULT_MTU
)
2252 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->imtu
);
2255 case L2CAP_MODE_ERTM
:
2256 rfc
.mode
= L2CAP_MODE_ERTM
;
2257 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
2258 rfc
.max_transmit
= max_transmit
;
2259 rfc
.retrans_timeout
= 0;
2260 rfc
.monitor_timeout
= 0;
2261 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
2263 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2264 sizeof(rfc
), (unsigned long) &rfc
);
2266 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2269 if (pi
->fcs
== L2CAP_FCS_NONE
||
2270 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
2271 pi
->fcs
= L2CAP_FCS_NONE
;
2272 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
2276 case L2CAP_MODE_STREAMING
:
2277 rfc
.mode
= L2CAP_MODE_STREAMING
;
2279 rfc
.max_transmit
= 0;
2280 rfc
.retrans_timeout
= 0;
2281 rfc
.monitor_timeout
= 0;
2282 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
2284 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2285 sizeof(rfc
), (unsigned long) &rfc
);
2287 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2290 if (pi
->fcs
== L2CAP_FCS_NONE
||
2291 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
2292 pi
->fcs
= L2CAP_FCS_NONE
;
2293 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
2298 /* FIXME: Need actual value of the flush timeout */
2299 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2300 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2302 req
->dcid
= cpu_to_le16(pi
->dcid
);
2303 req
->flags
= cpu_to_le16(0);
2308 static int l2cap_parse_conf_req(struct sock
*sk
, void *data
)
2310 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2311 struct l2cap_conf_rsp
*rsp
= data
;
2312 void *ptr
= rsp
->data
;
2313 void *req
= pi
->conf_req
;
2314 int len
= pi
->conf_len
;
2315 int type
, hint
, olen
;
2317 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2318 u16 mtu
= L2CAP_DEFAULT_MTU
;
2319 u16 result
= L2CAP_CONF_SUCCESS
;
2321 BT_DBG("sk %p", sk
);
2323 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2324 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
2326 hint
= type
& L2CAP_CONF_HINT
;
2327 type
&= L2CAP_CONF_MASK
;
2330 case L2CAP_CONF_MTU
:
2334 case L2CAP_CONF_FLUSH_TO
:
2338 case L2CAP_CONF_QOS
:
2341 case L2CAP_CONF_RFC
:
2342 if (olen
== sizeof(rfc
))
2343 memcpy(&rfc
, (void *) val
, olen
);
2346 case L2CAP_CONF_FCS
:
2347 if (val
== L2CAP_FCS_NONE
)
2348 pi
->conf_state
|= L2CAP_CONF_NO_FCS_RECV
;
2356 result
= L2CAP_CONF_UNKNOWN
;
2357 *((u8
*) ptr
++) = type
;
2362 if (pi
->num_conf_rsp
|| pi
->num_conf_req
)
2366 case L2CAP_MODE_STREAMING
:
2367 case L2CAP_MODE_ERTM
:
2368 pi
->conf_state
|= L2CAP_CONF_STATE2_DEVICE
;
2369 if (!l2cap_mode_supported(pi
->mode
, pi
->conn
->feat_mask
))
2370 return -ECONNREFUSED
;
2373 pi
->mode
= l2cap_select_mode(rfc
.mode
, pi
->conn
->feat_mask
);
2378 if (pi
->mode
!= rfc
.mode
) {
2379 result
= L2CAP_CONF_UNACCEPT
;
2380 rfc
.mode
= pi
->mode
;
2382 if (pi
->num_conf_rsp
== 1)
2383 return -ECONNREFUSED
;
2385 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2386 sizeof(rfc
), (unsigned long) &rfc
);
2390 if (result
== L2CAP_CONF_SUCCESS
) {
2391 /* Configure output options and let the other side know
2392 * which ones we don't like. */
2394 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
2395 result
= L2CAP_CONF_UNACCEPT
;
2398 pi
->conf_state
|= L2CAP_CONF_MTU_DONE
;
2400 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->omtu
);
2403 case L2CAP_MODE_BASIC
:
2404 pi
->fcs
= L2CAP_FCS_NONE
;
2405 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2408 case L2CAP_MODE_ERTM
:
2409 pi
->remote_tx_win
= rfc
.txwin_size
;
2410 pi
->remote_max_tx
= rfc
.max_transmit
;
2411 pi
->max_pdu_size
= rfc
.max_pdu_size
;
2413 rfc
.retrans_timeout
= L2CAP_DEFAULT_RETRANS_TO
;
2414 rfc
.monitor_timeout
= L2CAP_DEFAULT_MONITOR_TO
;
2416 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2418 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2419 sizeof(rfc
), (unsigned long) &rfc
);
2423 case L2CAP_MODE_STREAMING
:
2424 pi
->remote_tx_win
= rfc
.txwin_size
;
2425 pi
->max_pdu_size
= rfc
.max_pdu_size
;
2427 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2429 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2430 sizeof(rfc
), (unsigned long) &rfc
);
2435 result
= L2CAP_CONF_UNACCEPT
;
2437 memset(&rfc
, 0, sizeof(rfc
));
2438 rfc
.mode
= pi
->mode
;
2441 if (result
== L2CAP_CONF_SUCCESS
)
2442 pi
->conf_state
|= L2CAP_CONF_OUTPUT_DONE
;
2444 rsp
->scid
= cpu_to_le16(pi
->dcid
);
2445 rsp
->result
= cpu_to_le16(result
);
2446 rsp
->flags
= cpu_to_le16(0x0000);
2451 static int l2cap_parse_conf_rsp(struct sock
*sk
, void *rsp
, int len
, void *data
, u16
*result
)
2453 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2454 struct l2cap_conf_req
*req
= data
;
2455 void *ptr
= req
->data
;
2458 struct l2cap_conf_rfc rfc
;
2460 BT_DBG("sk %p, rsp %p, len %d, req %p", sk
, rsp
, len
, data
);
2462 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2463 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2466 case L2CAP_CONF_MTU
:
2467 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
2468 *result
= L2CAP_CONF_UNACCEPT
;
2469 pi
->omtu
= L2CAP_DEFAULT_MIN_MTU
;
2472 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->omtu
);
2475 case L2CAP_CONF_FLUSH_TO
:
2477 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
2481 case L2CAP_CONF_RFC
:
2482 if (olen
== sizeof(rfc
))
2483 memcpy(&rfc
, (void *)val
, olen
);
2485 if ((pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
) &&
2486 rfc
.mode
!= pi
->mode
)
2487 return -ECONNREFUSED
;
2489 pi
->mode
= rfc
.mode
;
2492 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2493 sizeof(rfc
), (unsigned long) &rfc
);
2498 if (*result
== L2CAP_CONF_SUCCESS
) {
2500 case L2CAP_MODE_ERTM
:
2501 pi
->remote_tx_win
= rfc
.txwin_size
;
2502 pi
->retrans_timeout
= rfc
.retrans_timeout
;
2503 pi
->monitor_timeout
= rfc
.monitor_timeout
;
2504 pi
->max_pdu_size
= le16_to_cpu(rfc
.max_pdu_size
);
2506 case L2CAP_MODE_STREAMING
:
2507 pi
->max_pdu_size
= le16_to_cpu(rfc
.max_pdu_size
);
2512 req
->dcid
= cpu_to_le16(pi
->dcid
);
2513 req
->flags
= cpu_to_le16(0x0000);
2518 static int l2cap_build_conf_rsp(struct sock
*sk
, void *data
, u16 result
, u16 flags
)
2520 struct l2cap_conf_rsp
*rsp
= data
;
2521 void *ptr
= rsp
->data
;
2523 BT_DBG("sk %p", sk
);
2525 rsp
->scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
2526 rsp
->result
= cpu_to_le16(result
);
2527 rsp
->flags
= cpu_to_le16(flags
);
2532 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2534 struct l2cap_cmd_rej
*rej
= (struct l2cap_cmd_rej
*) data
;
2536 if (rej
->reason
!= 0x0000)
2539 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
2540 cmd
->ident
== conn
->info_ident
) {
2541 del_timer(&conn
->info_timer
);
2543 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2544 conn
->info_ident
= 0;
2546 l2cap_conn_start(conn
);
2552 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2554 struct l2cap_chan_list
*list
= &conn
->chan_list
;
2555 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
2556 struct l2cap_conn_rsp rsp
;
2557 struct sock
*sk
, *parent
;
2558 int result
, status
= L2CAP_CS_NO_INFO
;
2560 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
2561 __le16 psm
= req
->psm
;
2563 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm
, scid
);
2565 /* Check if we have socket listening on psm */
2566 parent
= l2cap_get_sock_by_psm(BT_LISTEN
, psm
, conn
->src
);
2568 result
= L2CAP_CR_BAD_PSM
;
2572 /* Check if the ACL is secure enough (if not SDP) */
2573 if (psm
!= cpu_to_le16(0x0001) &&
2574 !hci_conn_check_link_mode(conn
->hcon
)) {
2575 conn
->disc_reason
= 0x05;
2576 result
= L2CAP_CR_SEC_BLOCK
;
2580 result
= L2CAP_CR_NO_MEM
;
2582 /* Check for backlog size */
2583 if (sk_acceptq_is_full(parent
)) {
2584 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
2588 sk
= l2cap_sock_alloc(sock_net(parent
), NULL
, BTPROTO_L2CAP
, GFP_ATOMIC
);
2592 write_lock_bh(&list
->lock
);
2594 /* Check if we already have channel with that dcid */
2595 if (__l2cap_get_chan_by_dcid(list
, scid
)) {
2596 write_unlock_bh(&list
->lock
);
2597 sock_set_flag(sk
, SOCK_ZAPPED
);
2598 l2cap_sock_kill(sk
);
2602 hci_conn_hold(conn
->hcon
);
2604 l2cap_sock_init(sk
, parent
);
2605 bacpy(&bt_sk(sk
)->src
, conn
->src
);
2606 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
2607 l2cap_pi(sk
)->psm
= psm
;
2608 l2cap_pi(sk
)->dcid
= scid
;
2610 __l2cap_chan_add(conn
, sk
, parent
);
2611 dcid
= l2cap_pi(sk
)->scid
;
2613 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
2615 l2cap_pi(sk
)->ident
= cmd
->ident
;
2617 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
2618 if (l2cap_check_security(sk
)) {
2619 if (bt_sk(sk
)->defer_setup
) {
2620 sk
->sk_state
= BT_CONNECT2
;
2621 result
= L2CAP_CR_PEND
;
2622 status
= L2CAP_CS_AUTHOR_PEND
;
2623 parent
->sk_data_ready(parent
, 0);
2625 sk
->sk_state
= BT_CONFIG
;
2626 result
= L2CAP_CR_SUCCESS
;
2627 status
= L2CAP_CS_NO_INFO
;
2630 sk
->sk_state
= BT_CONNECT2
;
2631 result
= L2CAP_CR_PEND
;
2632 status
= L2CAP_CS_AUTHEN_PEND
;
2635 sk
->sk_state
= BT_CONNECT2
;
2636 result
= L2CAP_CR_PEND
;
2637 status
= L2CAP_CS_NO_INFO
;
2640 write_unlock_bh(&list
->lock
);
2643 bh_unlock_sock(parent
);
2646 rsp
.scid
= cpu_to_le16(scid
);
2647 rsp
.dcid
= cpu_to_le16(dcid
);
2648 rsp
.result
= cpu_to_le16(result
);
2649 rsp
.status
= cpu_to_le16(status
);
2650 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2652 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
2653 struct l2cap_info_req info
;
2654 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2656 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
2657 conn
->info_ident
= l2cap_get_ident(conn
);
2659 mod_timer(&conn
->info_timer
, jiffies
+
2660 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
2662 l2cap_send_cmd(conn
, conn
->info_ident
,
2663 L2CAP_INFO_REQ
, sizeof(info
), &info
);
2669 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2671 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
2672 u16 scid
, dcid
, result
, status
;
2676 scid
= __le16_to_cpu(rsp
->scid
);
2677 dcid
= __le16_to_cpu(rsp
->dcid
);
2678 result
= __le16_to_cpu(rsp
->result
);
2679 status
= __le16_to_cpu(rsp
->status
);
2681 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid
, scid
, result
, status
);
2684 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
2688 sk
= l2cap_get_chan_by_ident(&conn
->chan_list
, cmd
->ident
);
2694 case L2CAP_CR_SUCCESS
:
2695 sk
->sk_state
= BT_CONFIG
;
2696 l2cap_pi(sk
)->ident
= 0;
2697 l2cap_pi(sk
)->dcid
= dcid
;
2698 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
2700 l2cap_pi(sk
)->conf_state
&= ~L2CAP_CONF_CONNECT_PEND
;
2702 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2703 l2cap_build_conf_req(sk
, req
), req
);
2704 l2cap_pi(sk
)->num_conf_req
++;
2708 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
2712 l2cap_chan_del(sk
, ECONNREFUSED
);
2720 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
2722 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
2728 dcid
= __le16_to_cpu(req
->dcid
);
2729 flags
= __le16_to_cpu(req
->flags
);
2731 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
2733 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, dcid
);
2737 if (sk
->sk_state
== BT_DISCONN
)
2740 /* Reject if config buffer is too small. */
2741 len
= cmd_len
- sizeof(*req
);
2742 if (l2cap_pi(sk
)->conf_len
+ len
> sizeof(l2cap_pi(sk
)->conf_req
)) {
2743 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2744 l2cap_build_conf_rsp(sk
, rsp
,
2745 L2CAP_CONF_REJECT
, flags
), rsp
);
2750 memcpy(l2cap_pi(sk
)->conf_req
+ l2cap_pi(sk
)->conf_len
, req
->data
, len
);
2751 l2cap_pi(sk
)->conf_len
+= len
;
2753 if (flags
& 0x0001) {
2754 /* Incomplete config. Send empty response. */
2755 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2756 l2cap_build_conf_rsp(sk
, rsp
,
2757 L2CAP_CONF_SUCCESS
, 0x0001), rsp
);
2761 /* Complete config. */
2762 len
= l2cap_parse_conf_req(sk
, rsp
);
2764 l2cap_send_disconn_req(conn
, sk
);
2768 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
2769 l2cap_pi(sk
)->num_conf_rsp
++;
2771 /* Reset config buffer. */
2772 l2cap_pi(sk
)->conf_len
= 0;
2774 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
))
2777 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_INPUT_DONE
) {
2778 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_NO_FCS_RECV
) ||
2779 l2cap_pi(sk
)->fcs
!= L2CAP_FCS_NONE
)
2780 l2cap_pi(sk
)->fcs
= L2CAP_FCS_CRC16
;
2782 sk
->sk_state
= BT_CONNECTED
;
2784 l2cap_pi(sk
)->next_tx_seq
= 0;
2785 l2cap_pi(sk
)->expected_tx_seq
= 0;
2786 __skb_queue_head_init(TX_QUEUE(sk
));
2787 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
2788 l2cap_ertm_init(sk
);
2790 l2cap_chan_ready(sk
);
2794 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
)) {
2796 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2797 l2cap_build_conf_req(sk
, buf
), buf
);
2798 l2cap_pi(sk
)->num_conf_req
++;
2806 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2808 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
2809 u16 scid
, flags
, result
;
2812 scid
= __le16_to_cpu(rsp
->scid
);
2813 flags
= __le16_to_cpu(rsp
->flags
);
2814 result
= __le16_to_cpu(rsp
->result
);
2816 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2817 scid
, flags
, result
);
2819 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
2824 case L2CAP_CONF_SUCCESS
:
2827 case L2CAP_CONF_UNACCEPT
:
2828 if (l2cap_pi(sk
)->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
2829 int len
= cmd
->len
- sizeof(*rsp
);
2832 /* throw out any old stored conf requests */
2833 result
= L2CAP_CONF_SUCCESS
;
2834 len
= l2cap_parse_conf_rsp(sk
, rsp
->data
,
2837 l2cap_send_disconn_req(conn
, sk
);
2841 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
2842 L2CAP_CONF_REQ
, len
, req
);
2843 l2cap_pi(sk
)->num_conf_req
++;
2844 if (result
!= L2CAP_CONF_SUCCESS
)
2850 sk
->sk_state
= BT_DISCONN
;
2851 sk
->sk_err
= ECONNRESET
;
2852 l2cap_sock_set_timer(sk
, HZ
* 5);
2853 l2cap_send_disconn_req(conn
, sk
);
2860 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_INPUT_DONE
;
2862 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
) {
2863 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_NO_FCS_RECV
) ||
2864 l2cap_pi(sk
)->fcs
!= L2CAP_FCS_NONE
)
2865 l2cap_pi(sk
)->fcs
= L2CAP_FCS_CRC16
;
2867 sk
->sk_state
= BT_CONNECTED
;
2868 l2cap_pi(sk
)->next_tx_seq
= 0;
2869 l2cap_pi(sk
)->expected_tx_seq
= 0;
2870 __skb_queue_head_init(TX_QUEUE(sk
));
2871 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
2872 l2cap_ertm_init(sk
);
2874 l2cap_chan_ready(sk
);
2882 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2884 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
2885 struct l2cap_disconn_rsp rsp
;
2889 scid
= __le16_to_cpu(req
->scid
);
2890 dcid
= __le16_to_cpu(req
->dcid
);
2892 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
2894 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, dcid
);
2898 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
2899 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
2900 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
2902 sk
->sk_shutdown
= SHUTDOWN_MASK
;
2904 skb_queue_purge(TX_QUEUE(sk
));
2906 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
) {
2907 skb_queue_purge(SREJ_QUEUE(sk
));
2908 del_timer(&l2cap_pi(sk
)->retrans_timer
);
2909 del_timer(&l2cap_pi(sk
)->monitor_timer
);
2912 l2cap_chan_del(sk
, ECONNRESET
);
2915 l2cap_sock_kill(sk
);
2919 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2921 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
2925 scid
= __le16_to_cpu(rsp
->scid
);
2926 dcid
= __le16_to_cpu(rsp
->dcid
);
2928 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
2930 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
2934 skb_queue_purge(TX_QUEUE(sk
));
2936 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
) {
2937 skb_queue_purge(SREJ_QUEUE(sk
));
2938 del_timer(&l2cap_pi(sk
)->retrans_timer
);
2939 del_timer(&l2cap_pi(sk
)->monitor_timer
);
2942 l2cap_chan_del(sk
, 0);
2945 l2cap_sock_kill(sk
);
2949 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2951 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
2954 type
= __le16_to_cpu(req
->type
);
2956 BT_DBG("type 0x%4.4x", type
);
2958 if (type
== L2CAP_IT_FEAT_MASK
) {
2960 u32 feat_mask
= l2cap_feat_mask
;
2961 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
2962 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2963 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
2965 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
2967 put_unaligned_le32(feat_mask
, rsp
->data
);
2968 l2cap_send_cmd(conn
, cmd
->ident
,
2969 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
2970 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
2972 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
2973 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
2974 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
2975 memcpy(buf
+ 4, l2cap_fixed_chan
, 8);
2976 l2cap_send_cmd(conn
, cmd
->ident
,
2977 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
2979 struct l2cap_info_rsp rsp
;
2980 rsp
.type
= cpu_to_le16(type
);
2981 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
2982 l2cap_send_cmd(conn
, cmd
->ident
,
2983 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
2989 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2991 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
2994 type
= __le16_to_cpu(rsp
->type
);
2995 result
= __le16_to_cpu(rsp
->result
);
2997 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
2999 del_timer(&conn
->info_timer
);
3001 if (type
== L2CAP_IT_FEAT_MASK
) {
3002 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
3004 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
3005 struct l2cap_info_req req
;
3006 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3008 conn
->info_ident
= l2cap_get_ident(conn
);
3010 l2cap_send_cmd(conn
, conn
->info_ident
,
3011 L2CAP_INFO_REQ
, sizeof(req
), &req
);
3013 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3014 conn
->info_ident
= 0;
3016 l2cap_conn_start(conn
);
3018 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3019 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3020 conn
->info_ident
= 0;
3022 l2cap_conn_start(conn
);
3028 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
3030 u8
*data
= skb
->data
;
3032 struct l2cap_cmd_hdr cmd
;
3035 l2cap_raw_recv(conn
, skb
);
3037 while (len
>= L2CAP_CMD_HDR_SIZE
) {
3039 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
3040 data
+= L2CAP_CMD_HDR_SIZE
;
3041 len
-= L2CAP_CMD_HDR_SIZE
;
3043 cmd_len
= le16_to_cpu(cmd
.len
);
3045 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
3047 if (cmd_len
> len
|| !cmd
.ident
) {
3048 BT_DBG("corrupted command");
3053 case L2CAP_COMMAND_REJ
:
3054 l2cap_command_rej(conn
, &cmd
, data
);
3057 case L2CAP_CONN_REQ
:
3058 err
= l2cap_connect_req(conn
, &cmd
, data
);
3061 case L2CAP_CONN_RSP
:
3062 err
= l2cap_connect_rsp(conn
, &cmd
, data
);
3065 case L2CAP_CONF_REQ
:
3066 err
= l2cap_config_req(conn
, &cmd
, cmd_len
, data
);
3069 case L2CAP_CONF_RSP
:
3070 err
= l2cap_config_rsp(conn
, &cmd
, data
);
3073 case L2CAP_DISCONN_REQ
:
3074 err
= l2cap_disconnect_req(conn
, &cmd
, data
);
3077 case L2CAP_DISCONN_RSP
:
3078 err
= l2cap_disconnect_rsp(conn
, &cmd
, data
);
3081 case L2CAP_ECHO_REQ
:
3082 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
3085 case L2CAP_ECHO_RSP
:
3088 case L2CAP_INFO_REQ
:
3089 err
= l2cap_information_req(conn
, &cmd
, data
);
3092 case L2CAP_INFO_RSP
:
3093 err
= l2cap_information_rsp(conn
, &cmd
, data
);
3097 BT_ERR("Unknown signaling command 0x%2.2x", cmd
.code
);
3103 struct l2cap_cmd_rej rej
;
3104 BT_DBG("error %d", err
);
3106 /* FIXME: Map err to a valid reason */
3107 rej
.reason
= cpu_to_le16(0);
3108 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
3118 static int l2cap_check_fcs(struct l2cap_pinfo
*pi
, struct sk_buff
*skb
)
3120 u16 our_fcs
, rcv_fcs
;
3121 int hdr_size
= L2CAP_HDR_SIZE
+ 2;
3123 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
3124 skb_trim(skb
, skb
->len
- 2);
3125 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
3126 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
3128 if (our_fcs
!= rcv_fcs
)
3134 static void l2cap_add_to_srej_queue(struct sock
*sk
, struct sk_buff
*skb
, u8 tx_seq
, u8 sar
)
3136 struct sk_buff
*next_skb
;
3138 bt_cb(skb
)->tx_seq
= tx_seq
;
3139 bt_cb(skb
)->sar
= sar
;
3141 next_skb
= skb_peek(SREJ_QUEUE(sk
));
3143 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3148 if (bt_cb(next_skb
)->tx_seq
> tx_seq
) {
3149 __skb_queue_before(SREJ_QUEUE(sk
), next_skb
, skb
);
3153 if (skb_queue_is_last(SREJ_QUEUE(sk
), next_skb
))
3156 } while((next_skb
= skb_queue_next(SREJ_QUEUE(sk
), next_skb
)));
3158 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3161 static int l2cap_sar_reassembly_sdu(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3163 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3164 struct sk_buff
*_skb
;
3167 switch (control
& L2CAP_CTRL_SAR
) {
3168 case L2CAP_SDU_UNSEGMENTED
:
3169 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3174 err
= sock_queue_rcv_skb(sk
, skb
);
3180 case L2CAP_SDU_START
:
3181 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3186 pi
->sdu_len
= get_unaligned_le16(skb
->data
);
3189 pi
->sdu
= bt_skb_alloc(pi
->sdu_len
, GFP_ATOMIC
);
3195 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3197 pi
->conn_state
|= L2CAP_CONN_SAR_SDU
;
3198 pi
->partial_sdu_len
= skb
->len
;
3202 case L2CAP_SDU_CONTINUE
:
3203 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3206 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3208 pi
->partial_sdu_len
+= skb
->len
;
3209 if (pi
->partial_sdu_len
> pi
->sdu_len
)
3217 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3220 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3222 pi
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
3223 pi
->partial_sdu_len
+= skb
->len
;
3225 if (pi
->partial_sdu_len
== pi
->sdu_len
) {
3226 _skb
= skb_clone(pi
->sdu
, GFP_ATOMIC
);
3227 err
= sock_queue_rcv_skb(sk
, _skb
);
3241 static void l2cap_check_srej_gap(struct sock
*sk
, u8 tx_seq
)
3243 struct sk_buff
*skb
;
3246 while((skb
= skb_peek(SREJ_QUEUE(sk
)))) {
3247 if (bt_cb(skb
)->tx_seq
!= tx_seq
)
3250 skb
= skb_dequeue(SREJ_QUEUE(sk
));
3251 control
|= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
3252 l2cap_sar_reassembly_sdu(sk
, skb
, control
);
3253 l2cap_pi(sk
)->buffer_seq_srej
=
3254 (l2cap_pi(sk
)->buffer_seq_srej
+ 1) % 64;
3259 static void l2cap_resend_srejframe(struct sock
*sk
, u8 tx_seq
)
3261 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3262 struct srej_list
*l
, *tmp
;
3265 list_for_each_entry_safe(l
,tmp
, SREJ_LIST(sk
), list
) {
3266 if (l
->tx_seq
== tx_seq
) {
3271 control
= L2CAP_SUPER_SELECT_REJECT
;
3272 control
|= l
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3273 l2cap_send_sframe(pi
, control
);
3275 list_add_tail(&l
->list
, SREJ_LIST(sk
));
3279 static void l2cap_send_srejframe(struct sock
*sk
, u8 tx_seq
)
3281 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3282 struct srej_list
*new;
3285 while (tx_seq
!= pi
->expected_tx_seq
) {
3286 control
= L2CAP_SUPER_SELECT_REJECT
;
3287 control
|= pi
->expected_tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3288 if (pi
->conn_state
& L2CAP_CONN_SEND_PBIT
) {
3289 control
|= L2CAP_CTRL_POLL
;
3290 pi
->conn_state
&= ~L2CAP_CONN_SEND_PBIT
;
3292 l2cap_send_sframe(pi
, control
);
3294 new = kzalloc(sizeof(struct srej_list
), GFP_ATOMIC
);
3295 new->tx_seq
= pi
->expected_tx_seq
++;
3296 list_add_tail(&new->list
, SREJ_LIST(sk
));
3298 pi
->expected_tx_seq
++;
3301 static inline int l2cap_data_channel_iframe(struct sock
*sk
, u16 rx_control
, struct sk_buff
*skb
)
3303 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3304 u8 tx_seq
= __get_txseq(rx_control
);
3305 u8 req_seq
= __get_reqseq(rx_control
);
3307 u8 sar
= rx_control
>> L2CAP_CTRL_SAR_SHIFT
;
3310 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk
, rx_control
, skb
->len
);
3312 pi
->expected_ack_seq
= req_seq
;
3313 l2cap_drop_acked_frames(sk
);
3315 if (tx_seq
== pi
->expected_tx_seq
)
3318 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3319 struct srej_list
*first
;
3321 first
= list_first_entry(SREJ_LIST(sk
),
3322 struct srej_list
, list
);
3323 if (tx_seq
== first
->tx_seq
) {
3324 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3325 l2cap_check_srej_gap(sk
, tx_seq
);
3327 list_del(&first
->list
);
3330 if (list_empty(SREJ_LIST(sk
))) {
3331 pi
->buffer_seq
= pi
->buffer_seq_srej
;
3332 pi
->conn_state
&= ~L2CAP_CONN_SREJ_SENT
;
3335 struct srej_list
*l
;
3336 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3338 list_for_each_entry(l
, SREJ_LIST(sk
), list
) {
3339 if (l
->tx_seq
== tx_seq
) {
3340 l2cap_resend_srejframe(sk
, tx_seq
);
3344 l2cap_send_srejframe(sk
, tx_seq
);
3347 pi
->conn_state
|= L2CAP_CONN_SREJ_SENT
;
3349 INIT_LIST_HEAD(SREJ_LIST(sk
));
3350 pi
->buffer_seq_srej
= pi
->buffer_seq
;
3352 __skb_queue_head_init(SREJ_QUEUE(sk
));
3353 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3355 pi
->conn_state
|= L2CAP_CONN_SEND_PBIT
;
3357 l2cap_send_srejframe(sk
, tx_seq
);
3362 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3364 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3365 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3369 if (rx_control
& L2CAP_CTRL_FINAL
) {
3370 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
3371 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3373 sk
->sk_send_head
= TX_QUEUE(sk
)->next
;
3374 pi
->next_tx_seq
= pi
->expected_ack_seq
;
3375 l2cap_ertm_send(sk
);
3379 pi
->buffer_seq
= (pi
->buffer_seq
+ 1) % 64;
3381 err
= l2cap_sar_reassembly_sdu(sk
, skb
, rx_control
);
3385 pi
->num_to_ack
= (pi
->num_to_ack
+ 1) % L2CAP_DEFAULT_NUM_TO_ACK
;
3386 if (pi
->num_to_ack
== L2CAP_DEFAULT_NUM_TO_ACK
- 1) {
3387 tx_control
|= L2CAP_SUPER_RCV_READY
;
3388 tx_control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3389 l2cap_send_sframe(pi
, tx_control
);
3394 static inline int l2cap_data_channel_sframe(struct sock
*sk
, u16 rx_control
, struct sk_buff
*skb
)
3396 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3397 u8 tx_seq
= __get_reqseq(rx_control
);
3399 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk
, rx_control
, skb
->len
);
3401 switch (rx_control
& L2CAP_CTRL_SUPERVISE
) {
3402 case L2CAP_SUPER_RCV_READY
:
3403 if (rx_control
& L2CAP_CTRL_POLL
) {
3404 u16 control
= L2CAP_CTRL_FINAL
;
3405 control
|= L2CAP_SUPER_RCV_READY
|
3406 (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
);
3407 l2cap_send_sframe(l2cap_pi(sk
), control
);
3408 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3410 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
3411 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3412 pi
->expected_ack_seq
= tx_seq
;
3413 l2cap_drop_acked_frames(sk
);
3415 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
3416 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3418 sk
->sk_send_head
= TX_QUEUE(sk
)->next
;
3419 pi
->next_tx_seq
= pi
->expected_ack_seq
;
3420 l2cap_ertm_send(sk
);
3423 if (!(pi
->conn_state
& L2CAP_CONN_WAIT_F
))
3426 pi
->conn_state
&= ~L2CAP_CONN_WAIT_F
;
3427 del_timer(&pi
->monitor_timer
);
3429 if (pi
->unacked_frames
> 0)
3430 __mod_retrans_timer();
3432 pi
->expected_ack_seq
= tx_seq
;
3433 l2cap_drop_acked_frames(sk
);
3435 if ((pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
3436 (pi
->unacked_frames
> 0))
3437 __mod_retrans_timer();
3439 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3440 l2cap_ertm_send(sk
);
3444 case L2CAP_SUPER_REJECT
:
3445 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3447 pi
->expected_ack_seq
= __get_reqseq(rx_control
);
3448 l2cap_drop_acked_frames(sk
);
3450 if (rx_control
& L2CAP_CTRL_FINAL
) {
3451 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
3452 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3454 sk
->sk_send_head
= TX_QUEUE(sk
)->next
;
3455 pi
->next_tx_seq
= pi
->expected_ack_seq
;
3456 l2cap_ertm_send(sk
);
3459 sk
->sk_send_head
= TX_QUEUE(sk
)->next
;
3460 pi
->next_tx_seq
= pi
->expected_ack_seq
;
3461 l2cap_ertm_send(sk
);
3463 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
3464 pi
->srej_save_reqseq
= tx_seq
;
3465 pi
->conn_state
|= L2CAP_CONN_REJ_ACT
;
3471 case L2CAP_SUPER_SELECT_REJECT
:
3472 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3474 if (rx_control
& L2CAP_CTRL_POLL
) {
3475 pi
->expected_ack_seq
= tx_seq
;
3476 l2cap_drop_acked_frames(sk
);
3477 l2cap_retransmit_frame(sk
, tx_seq
);
3478 l2cap_ertm_send(sk
);
3479 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
3480 pi
->srej_save_reqseq
= tx_seq
;
3481 pi
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
3483 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
3484 if ((pi
->conn_state
& L2CAP_CONN_SREJ_ACT
) &&
3485 pi
->srej_save_reqseq
== tx_seq
)
3486 pi
->conn_state
&= ~L2CAP_CONN_SREJ_ACT
;
3488 l2cap_retransmit_frame(sk
, tx_seq
);
3491 l2cap_retransmit_frame(sk
, tx_seq
);
3492 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
3493 pi
->srej_save_reqseq
= tx_seq
;
3494 pi
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
3499 case L2CAP_SUPER_RCV_NOT_READY
:
3500 pi
->conn_state
|= L2CAP_CONN_REMOTE_BUSY
;
3501 pi
->expected_ack_seq
= tx_seq
;
3502 l2cap_drop_acked_frames(sk
);
3504 del_timer(&l2cap_pi(sk
)->retrans_timer
);
3505 if (rx_control
& L2CAP_CTRL_POLL
) {
3506 u16 control
= L2CAP_CTRL_FINAL
;
3507 l2cap_send_rr_or_rnr(l2cap_pi(sk
), control
);
3515 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
3518 struct l2cap_pinfo
*pi
;
3523 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, cid
);
3525 BT_DBG("unknown cid 0x%4.4x", cid
);
3531 BT_DBG("sk %p, len %d", sk
, skb
->len
);
3533 if (sk
->sk_state
!= BT_CONNECTED
)
3537 case L2CAP_MODE_BASIC
:
3538 /* If socket recv buffers overflows we drop data here
3539 * which is *bad* because L2CAP has to be reliable.
3540 * But we don't have any other choice. L2CAP doesn't
3541 * provide flow control mechanism. */
3543 if (pi
->imtu
< skb
->len
)
3546 if (!sock_queue_rcv_skb(sk
, skb
))
3550 case L2CAP_MODE_ERTM
:
3551 control
= get_unaligned_le16(skb
->data
);
3555 if (__is_sar_start(control
))
3558 if (pi
->fcs
== L2CAP_FCS_CRC16
)
3562 * We can just drop the corrupted I-frame here.
3563 * Receiver will miss it and start proper recovery
3564 * procedures and ask retransmission.
3566 if (len
> L2CAP_DEFAULT_MAX_PDU_SIZE
)
3569 if (l2cap_check_fcs(pi
, skb
))
3572 if (__is_iframe(control
))
3573 err
= l2cap_data_channel_iframe(sk
, control
, skb
);
3575 err
= l2cap_data_channel_sframe(sk
, control
, skb
);
3581 case L2CAP_MODE_STREAMING
:
3582 control
= get_unaligned_le16(skb
->data
);
3586 if (__is_sar_start(control
))
3589 if (pi
->fcs
== L2CAP_FCS_CRC16
)
3592 if (len
> L2CAP_DEFAULT_MAX_PDU_SIZE
|| __is_sframe(control
))
3595 if (l2cap_check_fcs(pi
, skb
))
3598 tx_seq
= __get_txseq(control
);
3600 if (pi
->expected_tx_seq
== tx_seq
)
3601 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3603 pi
->expected_tx_seq
= tx_seq
+ 1;
3605 err
= l2cap_sar_reassembly_sdu(sk
, skb
, control
);
3610 BT_DBG("sk %p: bad mode 0x%2.2x", sk
, l2cap_pi(sk
)->mode
);
3624 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
3628 sk
= l2cap_get_sock_by_psm(0, psm
, conn
->src
);
3632 BT_DBG("sk %p, len %d", sk
, skb
->len
);
3634 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_CONNECTED
)
3637 if (l2cap_pi(sk
)->imtu
< skb
->len
)
3640 if (!sock_queue_rcv_skb(sk
, skb
))
3652 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
3654 struct l2cap_hdr
*lh
= (void *) skb
->data
;
3658 skb_pull(skb
, L2CAP_HDR_SIZE
);
3659 cid
= __le16_to_cpu(lh
->cid
);
3660 len
= __le16_to_cpu(lh
->len
);
3662 if (len
!= skb
->len
) {
3667 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
3670 case L2CAP_CID_SIGNALING
:
3671 l2cap_sig_channel(conn
, skb
);
3674 case L2CAP_CID_CONN_LESS
:
3675 psm
= get_unaligned_le16(skb
->data
);
3677 l2cap_conless_channel(conn
, psm
, skb
);
3681 l2cap_data_channel(conn
, cid
, skb
);
3686 /* ---- L2CAP interface with lower layer (HCI) ---- */
3688 static int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
3690 int exact
= 0, lm1
= 0, lm2
= 0;
3691 register struct sock
*sk
;
3692 struct hlist_node
*node
;
3694 if (type
!= ACL_LINK
)
3697 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
3699 /* Find listening sockets and check their link_mode */
3700 read_lock(&l2cap_sk_list
.lock
);
3701 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
3702 if (sk
->sk_state
!= BT_LISTEN
)
3705 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
3706 lm1
|= HCI_LM_ACCEPT
;
3707 if (l2cap_pi(sk
)->role_switch
)
3708 lm1
|= HCI_LM_MASTER
;
3710 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
3711 lm2
|= HCI_LM_ACCEPT
;
3712 if (l2cap_pi(sk
)->role_switch
)
3713 lm2
|= HCI_LM_MASTER
;
3716 read_unlock(&l2cap_sk_list
.lock
);
3718 return exact
? lm1
: lm2
;
3721 static int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
3723 struct l2cap_conn
*conn
;
3725 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
3727 if (hcon
->type
!= ACL_LINK
)
3731 conn
= l2cap_conn_add(hcon
, status
);
3733 l2cap_conn_ready(conn
);
3735 l2cap_conn_del(hcon
, bt_err(status
));
3740 static int l2cap_disconn_ind(struct hci_conn
*hcon
)
3742 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
3744 BT_DBG("hcon %p", hcon
);
3746 if (hcon
->type
!= ACL_LINK
|| !conn
)
3749 return conn
->disc_reason
;
3752 static int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
3754 BT_DBG("hcon %p reason %d", hcon
, reason
);
3756 if (hcon
->type
!= ACL_LINK
)
3759 l2cap_conn_del(hcon
, bt_err(reason
));
3764 static inline void l2cap_check_encryption(struct sock
*sk
, u8 encrypt
)
3766 if (sk
->sk_type
!= SOCK_SEQPACKET
)
3769 if (encrypt
== 0x00) {
3770 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
) {
3771 l2cap_sock_clear_timer(sk
);
3772 l2cap_sock_set_timer(sk
, HZ
* 5);
3773 } else if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
3774 __l2cap_sock_close(sk
, ECONNREFUSED
);
3776 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
)
3777 l2cap_sock_clear_timer(sk
);
3781 static int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
3783 struct l2cap_chan_list
*l
;
3784 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
3790 l
= &conn
->chan_list
;
3792 BT_DBG("conn %p", conn
);
3794 read_lock(&l
->lock
);
3796 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
3799 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_CONNECT_PEND
) {
3804 if (!status
&& (sk
->sk_state
== BT_CONNECTED
||
3805 sk
->sk_state
== BT_CONFIG
)) {
3806 l2cap_check_encryption(sk
, encrypt
);
3811 if (sk
->sk_state
== BT_CONNECT
) {
3813 struct l2cap_conn_req req
;
3814 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
3815 req
.psm
= l2cap_pi(sk
)->psm
;
3817 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
3819 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
3820 L2CAP_CONN_REQ
, sizeof(req
), &req
);
3822 l2cap_sock_clear_timer(sk
);
3823 l2cap_sock_set_timer(sk
, HZ
/ 10);
3825 } else if (sk
->sk_state
== BT_CONNECT2
) {
3826 struct l2cap_conn_rsp rsp
;
3830 sk
->sk_state
= BT_CONFIG
;
3831 result
= L2CAP_CR_SUCCESS
;
3833 sk
->sk_state
= BT_DISCONN
;
3834 l2cap_sock_set_timer(sk
, HZ
/ 10);
3835 result
= L2CAP_CR_SEC_BLOCK
;
3838 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
3839 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
3840 rsp
.result
= cpu_to_le16(result
);
3841 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
3842 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
3843 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
3849 read_unlock(&l
->lock
);
3854 static int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
3856 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
3858 if (!conn
&& !(conn
= l2cap_conn_add(hcon
, 0)))
3861 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
3863 if (flags
& ACL_START
) {
3864 struct l2cap_hdr
*hdr
;
3868 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
3869 kfree_skb(conn
->rx_skb
);
3870 conn
->rx_skb
= NULL
;
3872 l2cap_conn_unreliable(conn
, ECOMM
);
3876 BT_ERR("Frame is too short (len %d)", skb
->len
);
3877 l2cap_conn_unreliable(conn
, ECOMM
);
3881 hdr
= (struct l2cap_hdr
*) skb
->data
;
3882 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
3884 if (len
== skb
->len
) {
3885 /* Complete frame received */
3886 l2cap_recv_frame(conn
, skb
);
3890 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
3892 if (skb
->len
> len
) {
3893 BT_ERR("Frame is too long (len %d, expected len %d)",
3895 l2cap_conn_unreliable(conn
, ECOMM
);
3899 /* Allocate skb for the complete frame (with header) */
3900 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
3904 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
3906 conn
->rx_len
= len
- skb
->len
;
3908 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
3910 if (!conn
->rx_len
) {
3911 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
3912 l2cap_conn_unreliable(conn
, ECOMM
);
3916 if (skb
->len
> conn
->rx_len
) {
3917 BT_ERR("Fragment is too long (len %d, expected %d)",
3918 skb
->len
, conn
->rx_len
);
3919 kfree_skb(conn
->rx_skb
);
3920 conn
->rx_skb
= NULL
;
3922 l2cap_conn_unreliable(conn
, ECOMM
);
3926 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
3928 conn
->rx_len
-= skb
->len
;
3930 if (!conn
->rx_len
) {
3931 /* Complete frame received */
3932 l2cap_recv_frame(conn
, conn
->rx_skb
);
3933 conn
->rx_skb
= NULL
;
3942 static ssize_t
l2cap_sysfs_show(struct class *dev
, char *buf
)
3945 struct hlist_node
*node
;
3948 read_lock_bh(&l2cap_sk_list
.lock
);
3950 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
3951 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3953 str
+= sprintf(str
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
3954 batostr(&bt_sk(sk
)->src
), batostr(&bt_sk(sk
)->dst
),
3955 sk
->sk_state
, __le16_to_cpu(pi
->psm
), pi
->scid
,
3956 pi
->dcid
, pi
->imtu
, pi
->omtu
, pi
->sec_level
);
3959 read_unlock_bh(&l2cap_sk_list
.lock
);
3964 static CLASS_ATTR(l2cap
, S_IRUGO
, l2cap_sysfs_show
, NULL
);
3966 static const struct proto_ops l2cap_sock_ops
= {
3967 .family
= PF_BLUETOOTH
,
3968 .owner
= THIS_MODULE
,
3969 .release
= l2cap_sock_release
,
3970 .bind
= l2cap_sock_bind
,
3971 .connect
= l2cap_sock_connect
,
3972 .listen
= l2cap_sock_listen
,
3973 .accept
= l2cap_sock_accept
,
3974 .getname
= l2cap_sock_getname
,
3975 .sendmsg
= l2cap_sock_sendmsg
,
3976 .recvmsg
= l2cap_sock_recvmsg
,
3977 .poll
= bt_sock_poll
,
3978 .ioctl
= bt_sock_ioctl
,
3979 .mmap
= sock_no_mmap
,
3980 .socketpair
= sock_no_socketpair
,
3981 .shutdown
= l2cap_sock_shutdown
,
3982 .setsockopt
= l2cap_sock_setsockopt
,
3983 .getsockopt
= l2cap_sock_getsockopt
3986 static const struct net_proto_family l2cap_sock_family_ops
= {
3987 .family
= PF_BLUETOOTH
,
3988 .owner
= THIS_MODULE
,
3989 .create
= l2cap_sock_create
,
3992 static struct hci_proto l2cap_hci_proto
= {
3994 .id
= HCI_PROTO_L2CAP
,
3995 .connect_ind
= l2cap_connect_ind
,
3996 .connect_cfm
= l2cap_connect_cfm
,
3997 .disconn_ind
= l2cap_disconn_ind
,
3998 .disconn_cfm
= l2cap_disconn_cfm
,
3999 .security_cfm
= l2cap_security_cfm
,
4000 .recv_acldata
= l2cap_recv_acldata
4003 static int __init
l2cap_init(void)
4007 err
= proto_register(&l2cap_proto
, 0);
4011 err
= bt_sock_register(BTPROTO_L2CAP
, &l2cap_sock_family_ops
);
4013 BT_ERR("L2CAP socket registration failed");
4017 err
= hci_register_proto(&l2cap_hci_proto
);
4019 BT_ERR("L2CAP protocol registration failed");
4020 bt_sock_unregister(BTPROTO_L2CAP
);
4024 if (class_create_file(bt_class
, &class_attr_l2cap
) < 0)
4025 BT_ERR("Failed to create L2CAP info file");
4027 BT_INFO("L2CAP ver %s", VERSION
);
4028 BT_INFO("L2CAP socket layer initialized");
4033 proto_unregister(&l2cap_proto
);
4037 static void __exit
l2cap_exit(void)
4039 class_remove_file(bt_class
, &class_attr_l2cap
);
4041 if (bt_sock_unregister(BTPROTO_L2CAP
) < 0)
4042 BT_ERR("L2CAP socket unregistration failed");
4044 if (hci_unregister_proto(&l2cap_hci_proto
) < 0)
4045 BT_ERR("L2CAP protocol unregistration failed");
4047 proto_unregister(&l2cap_proto
);
4050 void l2cap_load(void)
4052 /* Dummy function to trigger automatic L2CAP module loading by
4053 * other modules that use L2CAP sockets but don't use any other
4054 * symbols from it. */
4057 EXPORT_SYMBOL(l2cap_load
);
4059 module_init(l2cap_init
);
4060 module_exit(l2cap_exit
);
4062 module_param(enable_ertm
, bool, 0644);
4063 MODULE_PARM_DESC(enable_ertm
, "Enable enhanced retransmission mode");
4065 module_param(max_transmit
, uint
, 0644);
4066 MODULE_PARM_DESC(max_transmit
, "Max transmit value (default = 3)");
4068 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4069 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION
);
4070 MODULE_VERSION(VERSION
);
4071 MODULE_LICENSE("GPL");
4072 MODULE_ALIAS("bt-proto-0");