2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
62 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
63 static u8 l2cap_fixed_chan
[8] = { 0x02, };
65 static LIST_HEAD(chan_list
);
66 static DEFINE_RWLOCK(chan_list_lock
);
68 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
69 u8 code
, u8 ident
, u16 dlen
, void *data
);
70 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
72 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
73 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
,
74 struct l2cap_chan
*chan
, int err
);
76 static int l2cap_ertm_data_rcv(struct sock
*sk
, struct sk_buff
*skb
);
78 /* ---- L2CAP channels ---- */
80 static inline void chan_hold(struct l2cap_chan
*c
)
82 atomic_inc(&c
->refcnt
);
85 static inline void chan_put(struct l2cap_chan
*c
)
87 if (atomic_dec_and_test(&c
->refcnt
))
91 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
, u16 cid
)
95 list_for_each_entry(c
, &conn
->chan_l
, list
) {
103 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
105 struct l2cap_chan
*c
;
107 list_for_each_entry(c
, &conn
->chan_l
, list
) {
114 /* Find channel with given SCID.
115 * Returns locked socket */
116 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
118 struct l2cap_chan
*c
;
120 read_lock(&conn
->chan_lock
);
121 c
= __l2cap_get_chan_by_scid(conn
, cid
);
124 read_unlock(&conn
->chan_lock
);
128 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
, u8 ident
)
130 struct l2cap_chan
*c
;
132 list_for_each_entry(c
, &conn
->chan_l
, list
) {
133 if (c
->ident
== ident
)
139 static inline struct l2cap_chan
*l2cap_get_chan_by_ident(struct l2cap_conn
*conn
, u8 ident
)
141 struct l2cap_chan
*c
;
143 read_lock(&conn
->chan_lock
);
144 c
= __l2cap_get_chan_by_ident(conn
, ident
);
147 read_unlock(&conn
->chan_lock
);
151 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
153 struct l2cap_chan
*c
;
155 list_for_each_entry(c
, &chan_list
, global_l
) {
156 if (c
->sport
== psm
&& !bacmp(&bt_sk(c
->sk
)->src
, src
))
165 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
169 write_lock_bh(&chan_list_lock
);
171 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
184 for (p
= 0x1001; p
< 0x1100; p
+= 2)
185 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
186 chan
->psm
= cpu_to_le16(p
);
187 chan
->sport
= cpu_to_le16(p
);
194 write_unlock_bh(&chan_list_lock
);
198 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
200 write_lock_bh(&chan_list_lock
);
204 write_unlock_bh(&chan_list_lock
);
209 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
211 u16 cid
= L2CAP_CID_DYN_START
;
213 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
214 if (!__l2cap_get_chan_by_scid(conn
, cid
))
221 static void l2cap_set_timer(struct l2cap_chan
*chan
, struct timer_list
*timer
, long timeout
)
223 BT_DBG("chan %p state %d timeout %ld", chan
->sk
, chan
->state
, timeout
);
225 if (!mod_timer(timer
, jiffies
+ msecs_to_jiffies(timeout
)))
229 static void l2cap_clear_timer(struct l2cap_chan
*chan
, struct timer_list
*timer
)
231 BT_DBG("chan %p state %d", chan
, chan
->state
);
233 if (timer_pending(timer
) && del_timer(timer
))
237 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
240 chan
->ops
->state_change(chan
->data
, state
);
243 static void l2cap_chan_timeout(unsigned long arg
)
245 struct l2cap_chan
*chan
= (struct l2cap_chan
*) arg
;
246 struct sock
*sk
= chan
->sk
;
249 BT_DBG("chan %p state %d", chan
, chan
->state
);
253 if (sock_owned_by_user(sk
)) {
254 /* sk is owned by user. Try again later */
255 __set_chan_timer(chan
, HZ
/ 5);
261 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
262 reason
= ECONNREFUSED
;
263 else if (chan
->state
== BT_CONNECT
&&
264 chan
->sec_level
!= BT_SECURITY_SDP
)
265 reason
= ECONNREFUSED
;
269 l2cap_chan_close(chan
, reason
);
273 chan
->ops
->close(chan
->data
);
277 struct l2cap_chan
*l2cap_chan_create(struct sock
*sk
)
279 struct l2cap_chan
*chan
;
281 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
287 write_lock_bh(&chan_list_lock
);
288 list_add(&chan
->global_l
, &chan_list
);
289 write_unlock_bh(&chan_list_lock
);
291 setup_timer(&chan
->chan_timer
, l2cap_chan_timeout
, (unsigned long) chan
);
293 chan
->state
= BT_OPEN
;
295 atomic_set(&chan
->refcnt
, 1);
300 void l2cap_chan_destroy(struct l2cap_chan
*chan
)
302 write_lock_bh(&chan_list_lock
);
303 list_del(&chan
->global_l
);
304 write_unlock_bh(&chan_list_lock
);
309 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
311 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
312 chan
->psm
, chan
->dcid
);
314 conn
->disc_reason
= 0x13;
318 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
) {
319 if (conn
->hcon
->type
== LE_LINK
) {
321 chan
->omtu
= L2CAP_LE_DEFAULT_MTU
;
322 chan
->scid
= L2CAP_CID_LE_DATA
;
323 chan
->dcid
= L2CAP_CID_LE_DATA
;
325 /* Alloc CID for connection-oriented socket */
326 chan
->scid
= l2cap_alloc_cid(conn
);
327 chan
->omtu
= L2CAP_DEFAULT_MTU
;
329 } else if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
330 /* Connectionless socket */
331 chan
->scid
= L2CAP_CID_CONN_LESS
;
332 chan
->dcid
= L2CAP_CID_CONN_LESS
;
333 chan
->omtu
= L2CAP_DEFAULT_MTU
;
335 /* Raw socket can send/recv signalling messages only */
336 chan
->scid
= L2CAP_CID_SIGNALING
;
337 chan
->dcid
= L2CAP_CID_SIGNALING
;
338 chan
->omtu
= L2CAP_DEFAULT_MTU
;
343 list_add(&chan
->list
, &conn
->chan_l
);
347 * Must be called on the locked socket. */
348 static void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
350 struct sock
*sk
= chan
->sk
;
351 struct l2cap_conn
*conn
= chan
->conn
;
352 struct sock
*parent
= bt_sk(sk
)->parent
;
354 __clear_chan_timer(chan
);
356 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
359 /* Delete from channel list */
360 write_lock_bh(&conn
->chan_lock
);
361 list_del(&chan
->list
);
362 write_unlock_bh(&conn
->chan_lock
);
366 hci_conn_put(conn
->hcon
);
369 l2cap_state_change(chan
, BT_CLOSED
);
370 sock_set_flag(sk
, SOCK_ZAPPED
);
376 bt_accept_unlink(sk
);
377 parent
->sk_data_ready(parent
, 0);
379 sk
->sk_state_change(sk
);
381 if (!(test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
) &&
382 test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)))
385 skb_queue_purge(&chan
->tx_q
);
387 if (chan
->mode
== L2CAP_MODE_ERTM
) {
388 struct srej_list
*l
, *tmp
;
390 __clear_retrans_timer(chan
);
391 __clear_monitor_timer(chan
);
392 __clear_ack_timer(chan
);
394 skb_queue_purge(&chan
->srej_q
);
396 list_for_each_entry_safe(l
, tmp
, &chan
->srej_l
, list
) {
403 static void l2cap_chan_cleanup_listen(struct sock
*parent
)
407 BT_DBG("parent %p", parent
);
409 /* Close not yet accepted channels */
410 while ((sk
= bt_accept_dequeue(parent
, NULL
))) {
411 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
412 __clear_chan_timer(chan
);
414 l2cap_chan_close(chan
, ECONNRESET
);
416 chan
->ops
->close(chan
->data
);
420 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
422 struct l2cap_conn
*conn
= chan
->conn
;
423 struct sock
*sk
= chan
->sk
;
425 BT_DBG("chan %p state %d socket %p", chan
, chan
->state
, sk
->sk_socket
);
427 switch (chan
->state
) {
429 l2cap_chan_cleanup_listen(sk
);
431 l2cap_state_change(chan
, BT_CLOSED
);
432 sock_set_flag(sk
, SOCK_ZAPPED
);
437 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
438 conn
->hcon
->type
== ACL_LINK
) {
439 __clear_chan_timer(chan
);
440 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
441 l2cap_send_disconn_req(conn
, chan
, reason
);
443 l2cap_chan_del(chan
, reason
);
447 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
448 conn
->hcon
->type
== ACL_LINK
) {
449 struct l2cap_conn_rsp rsp
;
452 if (bt_sk(sk
)->defer_setup
)
453 result
= L2CAP_CR_SEC_BLOCK
;
455 result
= L2CAP_CR_BAD_PSM
;
456 l2cap_state_change(chan
, BT_DISCONN
);
458 rsp
.scid
= cpu_to_le16(chan
->dcid
);
459 rsp
.dcid
= cpu_to_le16(chan
->scid
);
460 rsp
.result
= cpu_to_le16(result
);
461 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
462 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
466 l2cap_chan_del(chan
, reason
);
471 l2cap_chan_del(chan
, reason
);
475 sock_set_flag(sk
, SOCK_ZAPPED
);
480 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
482 if (chan
->chan_type
== L2CAP_CHAN_RAW
) {
483 switch (chan
->sec_level
) {
484 case BT_SECURITY_HIGH
:
485 return HCI_AT_DEDICATED_BONDING_MITM
;
486 case BT_SECURITY_MEDIUM
:
487 return HCI_AT_DEDICATED_BONDING
;
489 return HCI_AT_NO_BONDING
;
491 } else if (chan
->psm
== cpu_to_le16(0x0001)) {
492 if (chan
->sec_level
== BT_SECURITY_LOW
)
493 chan
->sec_level
= BT_SECURITY_SDP
;
495 if (chan
->sec_level
== BT_SECURITY_HIGH
)
496 return HCI_AT_NO_BONDING_MITM
;
498 return HCI_AT_NO_BONDING
;
500 switch (chan
->sec_level
) {
501 case BT_SECURITY_HIGH
:
502 return HCI_AT_GENERAL_BONDING_MITM
;
503 case BT_SECURITY_MEDIUM
:
504 return HCI_AT_GENERAL_BONDING
;
506 return HCI_AT_NO_BONDING
;
511 /* Service level security */
512 static inline int l2cap_check_security(struct l2cap_chan
*chan
)
514 struct l2cap_conn
*conn
= chan
->conn
;
517 auth_type
= l2cap_get_auth_type(chan
);
519 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
);
522 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
526 /* Get next available identificator.
527 * 1 - 128 are used by kernel.
528 * 129 - 199 are reserved.
529 * 200 - 254 are used by utilities like l2ping, etc.
532 spin_lock_bh(&conn
->lock
);
534 if (++conn
->tx_ident
> 128)
539 spin_unlock_bh(&conn
->lock
);
544 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
546 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
549 BT_DBG("code 0x%2.2x", code
);
554 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
555 flags
= ACL_START_NO_FLUSH
;
559 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
561 hci_send_acl(conn
->hcon
, skb
, flags
);
564 static inline void l2cap_send_sframe(struct l2cap_chan
*chan
, u16 control
)
567 struct l2cap_hdr
*lh
;
568 struct l2cap_conn
*conn
= chan
->conn
;
572 if (chan
->state
!= BT_CONNECTED
)
575 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
576 hlen
= L2CAP_EXT_HDR_SIZE
;
578 hlen
= L2CAP_ENH_HDR_SIZE
;
580 if (chan
->fcs
== L2CAP_FCS_CRC16
)
583 BT_DBG("chan %p, control 0x%2.2x", chan
, control
);
585 count
= min_t(unsigned int, conn
->mtu
, hlen
);
587 control
|= __set_sframe(chan
);
589 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
590 control
|= __set_ctrl_final(chan
);
592 if (test_and_clear_bit(CONN_SEND_PBIT
, &chan
->conn_state
))
593 control
|= __set_ctrl_poll(chan
);
595 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
599 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
600 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
601 lh
->cid
= cpu_to_le16(chan
->dcid
);
602 put_unaligned_le16(control
, skb_put(skb
, 2));
604 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
605 u16 fcs
= crc16(0, (u8
*)lh
, count
- 2);
606 put_unaligned_le16(fcs
, skb_put(skb
, 2));
609 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
610 flags
= ACL_START_NO_FLUSH
;
614 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
616 hci_send_acl(chan
->conn
->hcon
, skb
, flags
);
619 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, u16 control
)
621 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
622 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RNR
);
623 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
625 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
627 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
629 l2cap_send_sframe(chan
, control
);
632 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
634 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
637 static void l2cap_do_start(struct l2cap_chan
*chan
)
639 struct l2cap_conn
*conn
= chan
->conn
;
641 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
642 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
645 if (l2cap_check_security(chan
) &&
646 __l2cap_no_conn_pending(chan
)) {
647 struct l2cap_conn_req req
;
648 req
.scid
= cpu_to_le16(chan
->scid
);
651 chan
->ident
= l2cap_get_ident(conn
);
652 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
654 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
,
658 struct l2cap_info_req req
;
659 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
661 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
662 conn
->info_ident
= l2cap_get_ident(conn
);
664 mod_timer(&conn
->info_timer
, jiffies
+
665 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
667 l2cap_send_cmd(conn
, conn
->info_ident
,
668 L2CAP_INFO_REQ
, sizeof(req
), &req
);
672 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
674 u32 local_feat_mask
= l2cap_feat_mask
;
676 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
679 case L2CAP_MODE_ERTM
:
680 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
681 case L2CAP_MODE_STREAMING
:
682 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
688 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
, int err
)
691 struct l2cap_disconn_req req
;
698 if (chan
->mode
== L2CAP_MODE_ERTM
) {
699 __clear_retrans_timer(chan
);
700 __clear_monitor_timer(chan
);
701 __clear_ack_timer(chan
);
704 req
.dcid
= cpu_to_le16(chan
->dcid
);
705 req
.scid
= cpu_to_le16(chan
->scid
);
706 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
707 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
709 l2cap_state_change(chan
, BT_DISCONN
);
713 /* ---- L2CAP connections ---- */
714 static void l2cap_conn_start(struct l2cap_conn
*conn
)
716 struct l2cap_chan
*chan
, *tmp
;
718 BT_DBG("conn %p", conn
);
720 read_lock(&conn
->chan_lock
);
722 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
723 struct sock
*sk
= chan
->sk
;
727 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
732 if (chan
->state
== BT_CONNECT
) {
733 struct l2cap_conn_req req
;
735 if (!l2cap_check_security(chan
) ||
736 !__l2cap_no_conn_pending(chan
)) {
741 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
742 && test_bit(CONF_STATE2_DEVICE
,
743 &chan
->conf_state
)) {
744 /* l2cap_chan_close() calls list_del(chan)
745 * so release the lock */
746 read_unlock(&conn
->chan_lock
);
747 l2cap_chan_close(chan
, ECONNRESET
);
748 read_lock(&conn
->chan_lock
);
753 req
.scid
= cpu_to_le16(chan
->scid
);
756 chan
->ident
= l2cap_get_ident(conn
);
757 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
759 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
,
762 } else if (chan
->state
== BT_CONNECT2
) {
763 struct l2cap_conn_rsp rsp
;
765 rsp
.scid
= cpu_to_le16(chan
->dcid
);
766 rsp
.dcid
= cpu_to_le16(chan
->scid
);
768 if (l2cap_check_security(chan
)) {
769 if (bt_sk(sk
)->defer_setup
) {
770 struct sock
*parent
= bt_sk(sk
)->parent
;
771 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
772 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
774 parent
->sk_data_ready(parent
, 0);
777 l2cap_state_change(chan
, BT_CONFIG
);
778 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
779 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
782 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
783 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
786 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
789 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
790 rsp
.result
!= L2CAP_CR_SUCCESS
) {
795 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
796 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
797 l2cap_build_conf_req(chan
, buf
), buf
);
798 chan
->num_conf_req
++;
804 read_unlock(&conn
->chan_lock
);
807 /* Find socket with cid and source bdaddr.
808 * Returns closest match, locked.
810 static struct l2cap_chan
*l2cap_global_chan_by_scid(int state
, __le16 cid
, bdaddr_t
*src
)
812 struct l2cap_chan
*c
, *c1
= NULL
;
814 read_lock(&chan_list_lock
);
816 list_for_each_entry(c
, &chan_list
, global_l
) {
817 struct sock
*sk
= c
->sk
;
819 if (state
&& c
->state
!= state
)
822 if (c
->scid
== cid
) {
824 if (!bacmp(&bt_sk(sk
)->src
, src
)) {
825 read_unlock(&chan_list_lock
);
830 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
835 read_unlock(&chan_list_lock
);
840 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
842 struct sock
*parent
, *sk
;
843 struct l2cap_chan
*chan
, *pchan
;
847 /* Check if we have socket listening on cid */
848 pchan
= l2cap_global_chan_by_scid(BT_LISTEN
, L2CAP_CID_LE_DATA
,
855 bh_lock_sock(parent
);
857 /* Check for backlog size */
858 if (sk_acceptq_is_full(parent
)) {
859 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
863 chan
= pchan
->ops
->new_connection(pchan
->data
);
869 write_lock_bh(&conn
->chan_lock
);
871 hci_conn_hold(conn
->hcon
);
873 bacpy(&bt_sk(sk
)->src
, conn
->src
);
874 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
876 bt_accept_enqueue(parent
, sk
);
878 __l2cap_chan_add(conn
, chan
);
880 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
882 l2cap_state_change(chan
, BT_CONNECTED
);
883 parent
->sk_data_ready(parent
, 0);
885 write_unlock_bh(&conn
->chan_lock
);
888 bh_unlock_sock(parent
);
891 static void l2cap_chan_ready(struct sock
*sk
)
893 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
894 struct sock
*parent
= bt_sk(sk
)->parent
;
896 BT_DBG("sk %p, parent %p", sk
, parent
);
898 chan
->conf_state
= 0;
899 __clear_chan_timer(chan
);
901 l2cap_state_change(chan
, BT_CONNECTED
);
902 sk
->sk_state_change(sk
);
905 parent
->sk_data_ready(parent
, 0);
908 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
910 struct l2cap_chan
*chan
;
912 BT_DBG("conn %p", conn
);
914 if (!conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
915 l2cap_le_conn_ready(conn
);
917 if (conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
918 smp_conn_security(conn
, conn
->hcon
->pending_sec_level
);
920 read_lock(&conn
->chan_lock
);
922 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
923 struct sock
*sk
= chan
->sk
;
927 if (conn
->hcon
->type
== LE_LINK
) {
928 if (smp_conn_security(conn
, chan
->sec_level
))
929 l2cap_chan_ready(sk
);
931 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
932 __clear_chan_timer(chan
);
933 l2cap_state_change(chan
, BT_CONNECTED
);
934 sk
->sk_state_change(sk
);
936 } else if (chan
->state
== BT_CONNECT
)
937 l2cap_do_start(chan
);
942 read_unlock(&conn
->chan_lock
);
945 /* Notify sockets that we cannot guaranty reliability anymore */
946 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
948 struct l2cap_chan
*chan
;
950 BT_DBG("conn %p", conn
);
952 read_lock(&conn
->chan_lock
);
954 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
955 struct sock
*sk
= chan
->sk
;
957 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
961 read_unlock(&conn
->chan_lock
);
964 static void l2cap_info_timeout(unsigned long arg
)
966 struct l2cap_conn
*conn
= (void *) arg
;
968 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
969 conn
->info_ident
= 0;
971 l2cap_conn_start(conn
);
974 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
976 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
977 struct l2cap_chan
*chan
, *l
;
983 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
985 kfree_skb(conn
->rx_skb
);
988 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
991 l2cap_chan_del(chan
, err
);
993 chan
->ops
->close(chan
->data
);
996 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
997 del_timer_sync(&conn
->info_timer
);
999 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &hcon
->pend
)) {
1000 del_timer(&conn
->security_timer
);
1001 smp_chan_destroy(conn
);
1004 hcon
->l2cap_data
= NULL
;
1008 static void security_timeout(unsigned long arg
)
1010 struct l2cap_conn
*conn
= (void *) arg
;
1012 l2cap_conn_del(conn
->hcon
, ETIMEDOUT
);
1015 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
1017 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1022 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
1026 hcon
->l2cap_data
= conn
;
1029 BT_DBG("hcon %p conn %p", hcon
, conn
);
1031 if (hcon
->hdev
->le_mtu
&& hcon
->type
== LE_LINK
)
1032 conn
->mtu
= hcon
->hdev
->le_mtu
;
1034 conn
->mtu
= hcon
->hdev
->acl_mtu
;
1036 conn
->src
= &hcon
->hdev
->bdaddr
;
1037 conn
->dst
= &hcon
->dst
;
1039 conn
->feat_mask
= 0;
1041 spin_lock_init(&conn
->lock
);
1042 rwlock_init(&conn
->chan_lock
);
1044 INIT_LIST_HEAD(&conn
->chan_l
);
1046 if (hcon
->type
== LE_LINK
)
1047 setup_timer(&conn
->security_timer
, security_timeout
,
1048 (unsigned long) conn
);
1050 setup_timer(&conn
->info_timer
, l2cap_info_timeout
,
1051 (unsigned long) conn
);
1053 conn
->disc_reason
= 0x13;
1058 static inline void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
1060 write_lock_bh(&conn
->chan_lock
);
1061 __l2cap_chan_add(conn
, chan
);
1062 write_unlock_bh(&conn
->chan_lock
);
1065 /* ---- Socket interface ---- */
1067 /* Find socket with psm and source bdaddr.
1068 * Returns closest match.
1070 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
1072 struct l2cap_chan
*c
, *c1
= NULL
;
1074 read_lock(&chan_list_lock
);
1076 list_for_each_entry(c
, &chan_list
, global_l
) {
1077 struct sock
*sk
= c
->sk
;
1079 if (state
&& c
->state
!= state
)
1082 if (c
->psm
== psm
) {
1084 if (!bacmp(&bt_sk(sk
)->src
, src
)) {
1085 read_unlock(&chan_list_lock
);
1090 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
1095 read_unlock(&chan_list_lock
);
1100 int l2cap_chan_connect(struct l2cap_chan
*chan
)
1102 struct sock
*sk
= chan
->sk
;
1103 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1104 bdaddr_t
*dst
= &bt_sk(sk
)->dst
;
1105 struct l2cap_conn
*conn
;
1106 struct hci_conn
*hcon
;
1107 struct hci_dev
*hdev
;
1111 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src
), batostr(dst
),
1114 hdev
= hci_get_route(dst
, src
);
1116 return -EHOSTUNREACH
;
1118 hci_dev_lock_bh(hdev
);
1120 auth_type
= l2cap_get_auth_type(chan
);
1122 if (chan
->dcid
== L2CAP_CID_LE_DATA
)
1123 hcon
= hci_connect(hdev
, LE_LINK
, dst
,
1124 chan
->sec_level
, auth_type
);
1126 hcon
= hci_connect(hdev
, ACL_LINK
, dst
,
1127 chan
->sec_level
, auth_type
);
1130 err
= PTR_ERR(hcon
);
1134 conn
= l2cap_conn_add(hcon
, 0);
1141 /* Update source addr of the socket */
1142 bacpy(src
, conn
->src
);
1144 l2cap_chan_add(conn
, chan
);
1146 l2cap_state_change(chan
, BT_CONNECT
);
1147 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
1149 if (hcon
->state
== BT_CONNECTED
) {
1150 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1151 __clear_chan_timer(chan
);
1152 if (l2cap_check_security(chan
))
1153 l2cap_state_change(chan
, BT_CONNECTED
);
1155 l2cap_do_start(chan
);
1161 hci_dev_unlock_bh(hdev
);
1166 int __l2cap_wait_ack(struct sock
*sk
)
1168 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
1169 DECLARE_WAITQUEUE(wait
, current
);
1173 add_wait_queue(sk_sleep(sk
), &wait
);
1174 set_current_state(TASK_INTERRUPTIBLE
);
1175 while (chan
->unacked_frames
> 0 && chan
->conn
) {
1179 if (signal_pending(current
)) {
1180 err
= sock_intr_errno(timeo
);
1185 timeo
= schedule_timeout(timeo
);
1187 set_current_state(TASK_INTERRUPTIBLE
);
1189 err
= sock_error(sk
);
1193 set_current_state(TASK_RUNNING
);
1194 remove_wait_queue(sk_sleep(sk
), &wait
);
1198 static void l2cap_monitor_timeout(unsigned long arg
)
1200 struct l2cap_chan
*chan
= (void *) arg
;
1201 struct sock
*sk
= chan
->sk
;
1203 BT_DBG("chan %p", chan
);
1206 if (chan
->retry_count
>= chan
->remote_max_tx
) {
1207 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1212 chan
->retry_count
++;
1213 __set_monitor_timer(chan
);
1215 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_POLL
);
1219 static void l2cap_retrans_timeout(unsigned long arg
)
1221 struct l2cap_chan
*chan
= (void *) arg
;
1222 struct sock
*sk
= chan
->sk
;
1224 BT_DBG("chan %p", chan
);
1227 chan
->retry_count
= 1;
1228 __set_monitor_timer(chan
);
1230 set_bit(CONN_WAIT_F
, &chan
->conn_state
);
1232 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_POLL
);
1236 static void l2cap_drop_acked_frames(struct l2cap_chan
*chan
)
1238 struct sk_buff
*skb
;
1240 while ((skb
= skb_peek(&chan
->tx_q
)) &&
1241 chan
->unacked_frames
) {
1242 if (bt_cb(skb
)->tx_seq
== chan
->expected_ack_seq
)
1245 skb
= skb_dequeue(&chan
->tx_q
);
1248 chan
->unacked_frames
--;
1251 if (!chan
->unacked_frames
)
1252 __clear_retrans_timer(chan
);
1255 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
1257 struct hci_conn
*hcon
= chan
->conn
->hcon
;
1260 BT_DBG("chan %p, skb %p len %d", chan
, skb
, skb
->len
);
1262 if (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
1263 lmp_no_flush_capable(hcon
->hdev
))
1264 flags
= ACL_START_NO_FLUSH
;
1268 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
1269 hci_send_acl(hcon
, skb
, flags
);
1272 static void l2cap_streaming_send(struct l2cap_chan
*chan
)
1274 struct sk_buff
*skb
;
1277 while ((skb
= skb_dequeue(&chan
->tx_q
))) {
1278 control
= get_unaligned_le16(skb
->data
+ L2CAP_HDR_SIZE
);
1279 control
|= __set_txseq(chan
, chan
->next_tx_seq
);
1280 put_unaligned_le16(control
, skb
->data
+ L2CAP_HDR_SIZE
);
1282 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1283 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
- 2);
1284 put_unaligned_le16(fcs
, skb
->data
+ skb
->len
- 2);
1287 l2cap_do_send(chan
, skb
);
1289 chan
->next_tx_seq
= (chan
->next_tx_seq
+ 1) % 64;
1293 static void l2cap_retransmit_one_frame(struct l2cap_chan
*chan
, u16 tx_seq
)
1295 struct sk_buff
*skb
, *tx_skb
;
1298 skb
= skb_peek(&chan
->tx_q
);
1303 if (bt_cb(skb
)->tx_seq
== tx_seq
)
1306 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1309 } while ((skb
= skb_queue_next(&chan
->tx_q
, skb
)));
1311 if (chan
->remote_max_tx
&&
1312 bt_cb(skb
)->retries
== chan
->remote_max_tx
) {
1313 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1317 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1318 bt_cb(skb
)->retries
++;
1319 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1320 control
&= __get_sar_mask(chan
);
1322 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1323 control
|= __set_ctrl_final(chan
);
1325 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
1326 control
|= __set_txseq(chan
, tx_seq
);
1328 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1330 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1331 fcs
= crc16(0, (u8
*)tx_skb
->data
, tx_skb
->len
- 2);
1332 put_unaligned_le16(fcs
, tx_skb
->data
+ tx_skb
->len
- 2);
1335 l2cap_do_send(chan
, tx_skb
);
1338 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
1340 struct sk_buff
*skb
, *tx_skb
;
1344 if (chan
->state
!= BT_CONNECTED
)
1347 while ((skb
= chan
->tx_send_head
) && (!l2cap_tx_window_full(chan
))) {
1349 if (chan
->remote_max_tx
&&
1350 bt_cb(skb
)->retries
== chan
->remote_max_tx
) {
1351 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1355 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1357 bt_cb(skb
)->retries
++;
1359 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1360 control
&= __get_sar_mask(chan
);
1362 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1363 control
|= __set_ctrl_final(chan
);
1365 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
1366 control
|= __set_txseq(chan
, chan
->next_tx_seq
);
1367 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1370 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1371 fcs
= crc16(0, (u8
*)skb
->data
, tx_skb
->len
- 2);
1372 put_unaligned_le16(fcs
, skb
->data
+ tx_skb
->len
- 2);
1375 l2cap_do_send(chan
, tx_skb
);
1377 __set_retrans_timer(chan
);
1379 bt_cb(skb
)->tx_seq
= chan
->next_tx_seq
;
1380 chan
->next_tx_seq
= (chan
->next_tx_seq
+ 1) % 64;
1382 if (bt_cb(skb
)->retries
== 1)
1383 chan
->unacked_frames
++;
1385 chan
->frames_sent
++;
1387 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1388 chan
->tx_send_head
= NULL
;
1390 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1398 static int l2cap_retransmit_frames(struct l2cap_chan
*chan
)
1402 if (!skb_queue_empty(&chan
->tx_q
))
1403 chan
->tx_send_head
= chan
->tx_q
.next
;
1405 chan
->next_tx_seq
= chan
->expected_ack_seq
;
1406 ret
= l2cap_ertm_send(chan
);
1410 static void l2cap_send_ack(struct l2cap_chan
*chan
)
1414 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
1416 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
1417 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RNR
);
1418 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
1419 l2cap_send_sframe(chan
, control
);
1423 if (l2cap_ertm_send(chan
) > 0)
1426 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
1427 l2cap_send_sframe(chan
, control
);
1430 static void l2cap_send_srejtail(struct l2cap_chan
*chan
)
1432 struct srej_list
*tail
;
1435 control
= __set_ctrl_super(chan
, L2CAP_SUPER_SREJ
);
1436 control
|= __set_ctrl_final(chan
);
1438 tail
= list_entry((&chan
->srej_l
)->prev
, struct srej_list
, list
);
1439 control
|= __set_reqseq(chan
, tail
->tx_seq
);
1441 l2cap_send_sframe(chan
, control
);
1444 static inline int l2cap_skbuff_fromiovec(struct sock
*sk
, struct msghdr
*msg
, int len
, int count
, struct sk_buff
*skb
)
1446 struct l2cap_conn
*conn
= l2cap_pi(sk
)->chan
->conn
;
1447 struct sk_buff
**frag
;
1450 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1456 /* Continuation fragments (no L2CAP header) */
1457 frag
= &skb_shinfo(skb
)->frag_list
;
1459 count
= min_t(unsigned int, conn
->mtu
, len
);
1461 *frag
= bt_skb_send_alloc(sk
, count
, msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1464 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1470 frag
= &(*frag
)->next
;
1476 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
1478 struct sock
*sk
= chan
->sk
;
1479 struct l2cap_conn
*conn
= chan
->conn
;
1480 struct sk_buff
*skb
;
1481 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1482 struct l2cap_hdr
*lh
;
1484 BT_DBG("sk %p len %d", sk
, (int)len
);
1486 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1487 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1488 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1490 return ERR_PTR(err
);
1492 /* Create L2CAP header */
1493 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1494 lh
->cid
= cpu_to_le16(chan
->dcid
);
1495 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1496 put_unaligned_le16(chan
->psm
, skb_put(skb
, 2));
1498 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1499 if (unlikely(err
< 0)) {
1501 return ERR_PTR(err
);
1506 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
1508 struct sock
*sk
= chan
->sk
;
1509 struct l2cap_conn
*conn
= chan
->conn
;
1510 struct sk_buff
*skb
;
1511 int err
, count
, hlen
= L2CAP_HDR_SIZE
;
1512 struct l2cap_hdr
*lh
;
1514 BT_DBG("sk %p len %d", sk
, (int)len
);
1516 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1517 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1518 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1520 return ERR_PTR(err
);
1522 /* Create L2CAP header */
1523 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1524 lh
->cid
= cpu_to_le16(chan
->dcid
);
1525 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1527 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1528 if (unlikely(err
< 0)) {
1530 return ERR_PTR(err
);
1535 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
1536 struct msghdr
*msg
, size_t len
,
1537 u16 control
, u16 sdulen
)
1539 struct sock
*sk
= chan
->sk
;
1540 struct l2cap_conn
*conn
= chan
->conn
;
1541 struct sk_buff
*skb
;
1542 int err
, count
, hlen
;
1543 struct l2cap_hdr
*lh
;
1545 BT_DBG("sk %p len %d", sk
, (int)len
);
1548 return ERR_PTR(-ENOTCONN
);
1550 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1551 hlen
= L2CAP_EXT_HDR_SIZE
;
1553 hlen
= L2CAP_ENH_HDR_SIZE
;
1558 if (chan
->fcs
== L2CAP_FCS_CRC16
)
1561 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1562 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1563 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1565 return ERR_PTR(err
);
1567 /* Create L2CAP header */
1568 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1569 lh
->cid
= cpu_to_le16(chan
->dcid
);
1570 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1571 put_unaligned_le16(control
, skb_put(skb
, 2));
1573 put_unaligned_le16(sdulen
, skb_put(skb
, 2));
1575 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1576 if (unlikely(err
< 0)) {
1578 return ERR_PTR(err
);
1581 if (chan
->fcs
== L2CAP_FCS_CRC16
)
1582 put_unaligned_le16(0, skb_put(skb
, 2));
1584 bt_cb(skb
)->retries
= 0;
1588 static int l2cap_sar_segment_sdu(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
1590 struct sk_buff
*skb
;
1591 struct sk_buff_head sar_queue
;
1595 skb_queue_head_init(&sar_queue
);
1596 control
= __set_ctrl_sar(chan
, L2CAP_SAR_START
);
1597 skb
= l2cap_create_iframe_pdu(chan
, msg
, chan
->remote_mps
, control
, len
);
1599 return PTR_ERR(skb
);
1601 __skb_queue_tail(&sar_queue
, skb
);
1602 len
-= chan
->remote_mps
;
1603 size
+= chan
->remote_mps
;
1608 if (len
> chan
->remote_mps
) {
1609 control
= __set_ctrl_sar(chan
, L2CAP_SAR_CONTINUE
);
1610 buflen
= chan
->remote_mps
;
1612 control
= __set_ctrl_sar(chan
, L2CAP_SAR_END
);
1616 skb
= l2cap_create_iframe_pdu(chan
, msg
, buflen
, control
, 0);
1618 skb_queue_purge(&sar_queue
);
1619 return PTR_ERR(skb
);
1622 __skb_queue_tail(&sar_queue
, skb
);
1626 skb_queue_splice_tail(&sar_queue
, &chan
->tx_q
);
1627 if (chan
->tx_send_head
== NULL
)
1628 chan
->tx_send_head
= sar_queue
.next
;
1633 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
1635 struct sk_buff
*skb
;
1639 /* Connectionless channel */
1640 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
1641 skb
= l2cap_create_connless_pdu(chan
, msg
, len
);
1643 return PTR_ERR(skb
);
1645 l2cap_do_send(chan
, skb
);
1649 switch (chan
->mode
) {
1650 case L2CAP_MODE_BASIC
:
1651 /* Check outgoing MTU */
1652 if (len
> chan
->omtu
)
1655 /* Create a basic PDU */
1656 skb
= l2cap_create_basic_pdu(chan
, msg
, len
);
1658 return PTR_ERR(skb
);
1660 l2cap_do_send(chan
, skb
);
1664 case L2CAP_MODE_ERTM
:
1665 case L2CAP_MODE_STREAMING
:
1666 /* Entire SDU fits into one PDU */
1667 if (len
<= chan
->remote_mps
) {
1668 control
= __set_ctrl_sar(chan
, L2CAP_SAR_UNSEGMENTED
);
1669 skb
= l2cap_create_iframe_pdu(chan
, msg
, len
, control
,
1672 return PTR_ERR(skb
);
1674 __skb_queue_tail(&chan
->tx_q
, skb
);
1676 if (chan
->tx_send_head
== NULL
)
1677 chan
->tx_send_head
= skb
;
1680 /* Segment SDU into multiples PDUs */
1681 err
= l2cap_sar_segment_sdu(chan
, msg
, len
);
1686 if (chan
->mode
== L2CAP_MODE_STREAMING
) {
1687 l2cap_streaming_send(chan
);
1692 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
1693 test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
1698 err
= l2cap_ertm_send(chan
);
1705 BT_DBG("bad state %1.1x", chan
->mode
);
1712 /* Copy frame to all raw sockets on that connection */
1713 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
1715 struct sk_buff
*nskb
;
1716 struct l2cap_chan
*chan
;
1718 BT_DBG("conn %p", conn
);
1720 read_lock(&conn
->chan_lock
);
1721 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1722 struct sock
*sk
= chan
->sk
;
1723 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
1726 /* Don't send frame to the socket it came from */
1729 nskb
= skb_clone(skb
, GFP_ATOMIC
);
1733 if (chan
->ops
->recv(chan
->data
, nskb
))
1736 read_unlock(&conn
->chan_lock
);
1739 /* ---- L2CAP signalling commands ---- */
1740 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
1741 u8 code
, u8 ident
, u16 dlen
, void *data
)
1743 struct sk_buff
*skb
, **frag
;
1744 struct l2cap_cmd_hdr
*cmd
;
1745 struct l2cap_hdr
*lh
;
1748 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1749 conn
, code
, ident
, dlen
);
1751 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
1752 count
= min_t(unsigned int, conn
->mtu
, len
);
1754 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
1758 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1759 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
1761 if (conn
->hcon
->type
== LE_LINK
)
1762 lh
->cid
= cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
1764 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
1766 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
1769 cmd
->len
= cpu_to_le16(dlen
);
1772 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
1773 memcpy(skb_put(skb
, count
), data
, count
);
1779 /* Continuation fragments (no L2CAP header) */
1780 frag
= &skb_shinfo(skb
)->frag_list
;
1782 count
= min_t(unsigned int, conn
->mtu
, len
);
1784 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
1788 memcpy(skb_put(*frag
, count
), data
, count
);
1793 frag
= &(*frag
)->next
;
1803 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
1805 struct l2cap_conf_opt
*opt
= *ptr
;
1808 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
1816 *val
= *((u8
*) opt
->val
);
1820 *val
= get_unaligned_le16(opt
->val
);
1824 *val
= get_unaligned_le32(opt
->val
);
1828 *val
= (unsigned long) opt
->val
;
1832 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
1836 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
1838 struct l2cap_conf_opt
*opt
= *ptr
;
1840 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
1847 *((u8
*) opt
->val
) = val
;
1851 put_unaligned_le16(val
, opt
->val
);
1855 put_unaligned_le32(val
, opt
->val
);
1859 memcpy(opt
->val
, (void *) val
, len
);
1863 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
1866 static void l2cap_ack_timeout(unsigned long arg
)
1868 struct l2cap_chan
*chan
= (void *) arg
;
1870 bh_lock_sock(chan
->sk
);
1871 l2cap_send_ack(chan
);
1872 bh_unlock_sock(chan
->sk
);
1875 static inline void l2cap_ertm_init(struct l2cap_chan
*chan
)
1877 struct sock
*sk
= chan
->sk
;
1879 chan
->expected_ack_seq
= 0;
1880 chan
->unacked_frames
= 0;
1881 chan
->buffer_seq
= 0;
1882 chan
->num_acked
= 0;
1883 chan
->frames_sent
= 0;
1885 setup_timer(&chan
->retrans_timer
, l2cap_retrans_timeout
,
1886 (unsigned long) chan
);
1887 setup_timer(&chan
->monitor_timer
, l2cap_monitor_timeout
,
1888 (unsigned long) chan
);
1889 setup_timer(&chan
->ack_timer
, l2cap_ack_timeout
, (unsigned long) chan
);
1891 skb_queue_head_init(&chan
->srej_q
);
1893 INIT_LIST_HEAD(&chan
->srej_l
);
1896 sk
->sk_backlog_rcv
= l2cap_ertm_data_rcv
;
1899 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
1902 case L2CAP_MODE_STREAMING
:
1903 case L2CAP_MODE_ERTM
:
1904 if (l2cap_mode_supported(mode
, remote_feat_mask
))
1908 return L2CAP_MODE_BASIC
;
1912 static inline bool __l2cap_ews_supported(struct l2cap_chan
*chan
)
1914 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
;
1917 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
1919 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
1920 __l2cap_ews_supported(chan
))
1921 /* use extended control field */
1922 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
1924 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
1925 L2CAP_DEFAULT_TX_WINDOW
);
1928 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
1930 struct l2cap_conf_req
*req
= data
;
1931 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
1932 void *ptr
= req
->data
;
1934 BT_DBG("chan %p", chan
);
1936 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
1939 switch (chan
->mode
) {
1940 case L2CAP_MODE_STREAMING
:
1941 case L2CAP_MODE_ERTM
:
1942 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
1947 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
1952 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
1953 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
1955 switch (chan
->mode
) {
1956 case L2CAP_MODE_BASIC
:
1957 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
1958 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
1961 rfc
.mode
= L2CAP_MODE_BASIC
;
1963 rfc
.max_transmit
= 0;
1964 rfc
.retrans_timeout
= 0;
1965 rfc
.monitor_timeout
= 0;
1966 rfc
.max_pdu_size
= 0;
1968 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
1969 (unsigned long) &rfc
);
1972 case L2CAP_MODE_ERTM
:
1973 rfc
.mode
= L2CAP_MODE_ERTM
;
1974 rfc
.max_transmit
= chan
->max_tx
;
1975 rfc
.retrans_timeout
= 0;
1976 rfc
.monitor_timeout
= 0;
1977 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
1978 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> chan
->conn
->mtu
- 10)
1979 rfc
.max_pdu_size
= cpu_to_le16(chan
->conn
->mtu
- 10);
1981 l2cap_txwin_setup(chan
);
1983 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
1984 L2CAP_DEFAULT_TX_WINDOW
);
1986 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
1987 (unsigned long) &rfc
);
1989 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
1992 if (chan
->fcs
== L2CAP_FCS_NONE
||
1993 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
1994 chan
->fcs
= L2CAP_FCS_NONE
;
1995 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
1998 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1999 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
2003 case L2CAP_MODE_STREAMING
:
2004 rfc
.mode
= L2CAP_MODE_STREAMING
;
2006 rfc
.max_transmit
= 0;
2007 rfc
.retrans_timeout
= 0;
2008 rfc
.monitor_timeout
= 0;
2009 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
2010 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> chan
->conn
->mtu
- 10)
2011 rfc
.max_pdu_size
= cpu_to_le16(chan
->conn
->mtu
- 10);
2013 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2014 (unsigned long) &rfc
);
2016 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2019 if (chan
->fcs
== L2CAP_FCS_NONE
||
2020 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2021 chan
->fcs
= L2CAP_FCS_NONE
;
2022 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2027 req
->dcid
= cpu_to_le16(chan
->dcid
);
2028 req
->flags
= cpu_to_le16(0);
2033 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
2035 struct l2cap_conf_rsp
*rsp
= data
;
2036 void *ptr
= rsp
->data
;
2037 void *req
= chan
->conf_req
;
2038 int len
= chan
->conf_len
;
2039 int type
, hint
, olen
;
2041 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2042 u16 mtu
= L2CAP_DEFAULT_MTU
;
2043 u16 result
= L2CAP_CONF_SUCCESS
;
2045 BT_DBG("chan %p", chan
);
2047 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2048 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
2050 hint
= type
& L2CAP_CONF_HINT
;
2051 type
&= L2CAP_CONF_MASK
;
2054 case L2CAP_CONF_MTU
:
2058 case L2CAP_CONF_FLUSH_TO
:
2059 chan
->flush_to
= val
;
2062 case L2CAP_CONF_QOS
:
2065 case L2CAP_CONF_RFC
:
2066 if (olen
== sizeof(rfc
))
2067 memcpy(&rfc
, (void *) val
, olen
);
2070 case L2CAP_CONF_FCS
:
2071 if (val
== L2CAP_FCS_NONE
)
2072 set_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
);
2076 case L2CAP_CONF_EWS
:
2078 return -ECONNREFUSED
;
2080 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
2081 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
2082 chan
->remote_tx_win
= val
;
2089 result
= L2CAP_CONF_UNKNOWN
;
2090 *((u8
*) ptr
++) = type
;
2095 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
2098 switch (chan
->mode
) {
2099 case L2CAP_MODE_STREAMING
:
2100 case L2CAP_MODE_ERTM
:
2101 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
2102 chan
->mode
= l2cap_select_mode(rfc
.mode
,
2103 chan
->conn
->feat_mask
);
2107 if (chan
->mode
!= rfc
.mode
)
2108 return -ECONNREFUSED
;
2114 if (chan
->mode
!= rfc
.mode
) {
2115 result
= L2CAP_CONF_UNACCEPT
;
2116 rfc
.mode
= chan
->mode
;
2118 if (chan
->num_conf_rsp
== 1)
2119 return -ECONNREFUSED
;
2121 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2122 sizeof(rfc
), (unsigned long) &rfc
);
2126 if (result
== L2CAP_CONF_SUCCESS
) {
2127 /* Configure output options and let the other side know
2128 * which ones we don't like. */
2130 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
2131 result
= L2CAP_CONF_UNACCEPT
;
2134 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
2136 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
2139 case L2CAP_MODE_BASIC
:
2140 chan
->fcs
= L2CAP_FCS_NONE
;
2141 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
2144 case L2CAP_MODE_ERTM
:
2145 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
2146 chan
->remote_tx_win
= rfc
.txwin_size
;
2148 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
2150 chan
->remote_max_tx
= rfc
.max_transmit
;
2152 if (le16_to_cpu(rfc
.max_pdu_size
) > chan
->conn
->mtu
- 10)
2153 rfc
.max_pdu_size
= cpu_to_le16(chan
->conn
->mtu
- 10);
2155 chan
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
2157 rfc
.retrans_timeout
=
2158 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO
);
2159 rfc
.monitor_timeout
=
2160 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO
);
2162 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
2164 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2165 sizeof(rfc
), (unsigned long) &rfc
);
2169 case L2CAP_MODE_STREAMING
:
2170 if (le16_to_cpu(rfc
.max_pdu_size
) > chan
->conn
->mtu
- 10)
2171 rfc
.max_pdu_size
= cpu_to_le16(chan
->conn
->mtu
- 10);
2173 chan
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
2175 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
2177 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2178 sizeof(rfc
), (unsigned long) &rfc
);
2183 result
= L2CAP_CONF_UNACCEPT
;
2185 memset(&rfc
, 0, sizeof(rfc
));
2186 rfc
.mode
= chan
->mode
;
2189 if (result
== L2CAP_CONF_SUCCESS
)
2190 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
2192 rsp
->scid
= cpu_to_le16(chan
->dcid
);
2193 rsp
->result
= cpu_to_le16(result
);
2194 rsp
->flags
= cpu_to_le16(0x0000);
2199 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
, void *data
, u16
*result
)
2201 struct l2cap_conf_req
*req
= data
;
2202 void *ptr
= req
->data
;
2205 struct l2cap_conf_rfc rfc
;
2207 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
2209 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2210 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2213 case L2CAP_CONF_MTU
:
2214 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
2215 *result
= L2CAP_CONF_UNACCEPT
;
2216 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
2219 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
2222 case L2CAP_CONF_FLUSH_TO
:
2223 chan
->flush_to
= val
;
2224 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
2228 case L2CAP_CONF_RFC
:
2229 if (olen
== sizeof(rfc
))
2230 memcpy(&rfc
, (void *)val
, olen
);
2232 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
2233 rfc
.mode
!= chan
->mode
)
2234 return -ECONNREFUSED
;
2238 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2239 sizeof(rfc
), (unsigned long) &rfc
);
2242 case L2CAP_CONF_EWS
:
2243 chan
->tx_win
= min_t(u16
, val
,
2244 L2CAP_DEFAULT_EXT_WINDOW
);
2245 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
,
2251 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
2252 return -ECONNREFUSED
;
2254 chan
->mode
= rfc
.mode
;
2256 if (*result
== L2CAP_CONF_SUCCESS
) {
2258 case L2CAP_MODE_ERTM
:
2259 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2260 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2261 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2263 case L2CAP_MODE_STREAMING
:
2264 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2268 req
->dcid
= cpu_to_le16(chan
->dcid
);
2269 req
->flags
= cpu_to_le16(0x0000);
2274 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
, u16 result
, u16 flags
)
2276 struct l2cap_conf_rsp
*rsp
= data
;
2277 void *ptr
= rsp
->data
;
2279 BT_DBG("chan %p", chan
);
2281 rsp
->scid
= cpu_to_le16(chan
->dcid
);
2282 rsp
->result
= cpu_to_le16(result
);
2283 rsp
->flags
= cpu_to_le16(flags
);
2288 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
2290 struct l2cap_conn_rsp rsp
;
2291 struct l2cap_conn
*conn
= chan
->conn
;
2294 rsp
.scid
= cpu_to_le16(chan
->dcid
);
2295 rsp
.dcid
= cpu_to_le16(chan
->scid
);
2296 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
2297 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
2298 l2cap_send_cmd(conn
, chan
->ident
,
2299 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2301 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
2304 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2305 l2cap_build_conf_req(chan
, buf
), buf
);
2306 chan
->num_conf_req
++;
2309 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
2313 struct l2cap_conf_rfc rfc
;
2315 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
2317 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
2320 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2321 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2324 case L2CAP_CONF_RFC
:
2325 if (olen
== sizeof(rfc
))
2326 memcpy(&rfc
, (void *)val
, olen
);
2333 case L2CAP_MODE_ERTM
:
2334 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2335 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2336 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2338 case L2CAP_MODE_STREAMING
:
2339 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2343 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2345 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
2347 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
2350 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
2351 cmd
->ident
== conn
->info_ident
) {
2352 del_timer(&conn
->info_timer
);
2354 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2355 conn
->info_ident
= 0;
2357 l2cap_conn_start(conn
);
2363 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2365 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
2366 struct l2cap_conn_rsp rsp
;
2367 struct l2cap_chan
*chan
= NULL
, *pchan
;
2368 struct sock
*parent
, *sk
= NULL
;
2369 int result
, status
= L2CAP_CS_NO_INFO
;
2371 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
2372 __le16 psm
= req
->psm
;
2374 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm
, scid
);
2376 /* Check if we have socket listening on psm */
2377 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, conn
->src
);
2379 result
= L2CAP_CR_BAD_PSM
;
2385 bh_lock_sock(parent
);
2387 /* Check if the ACL is secure enough (if not SDP) */
2388 if (psm
!= cpu_to_le16(0x0001) &&
2389 !hci_conn_check_link_mode(conn
->hcon
)) {
2390 conn
->disc_reason
= 0x05;
2391 result
= L2CAP_CR_SEC_BLOCK
;
2395 result
= L2CAP_CR_NO_MEM
;
2397 /* Check for backlog size */
2398 if (sk_acceptq_is_full(parent
)) {
2399 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
2403 chan
= pchan
->ops
->new_connection(pchan
->data
);
2409 write_lock_bh(&conn
->chan_lock
);
2411 /* Check if we already have channel with that dcid */
2412 if (__l2cap_get_chan_by_dcid(conn
, scid
)) {
2413 write_unlock_bh(&conn
->chan_lock
);
2414 sock_set_flag(sk
, SOCK_ZAPPED
);
2415 chan
->ops
->close(chan
->data
);
2419 hci_conn_hold(conn
->hcon
);
2421 bacpy(&bt_sk(sk
)->src
, conn
->src
);
2422 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
2426 bt_accept_enqueue(parent
, sk
);
2428 __l2cap_chan_add(conn
, chan
);
2432 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
2434 chan
->ident
= cmd
->ident
;
2436 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
2437 if (l2cap_check_security(chan
)) {
2438 if (bt_sk(sk
)->defer_setup
) {
2439 l2cap_state_change(chan
, BT_CONNECT2
);
2440 result
= L2CAP_CR_PEND
;
2441 status
= L2CAP_CS_AUTHOR_PEND
;
2442 parent
->sk_data_ready(parent
, 0);
2444 l2cap_state_change(chan
, BT_CONFIG
);
2445 result
= L2CAP_CR_SUCCESS
;
2446 status
= L2CAP_CS_NO_INFO
;
2449 l2cap_state_change(chan
, BT_CONNECT2
);
2450 result
= L2CAP_CR_PEND
;
2451 status
= L2CAP_CS_AUTHEN_PEND
;
2454 l2cap_state_change(chan
, BT_CONNECT2
);
2455 result
= L2CAP_CR_PEND
;
2456 status
= L2CAP_CS_NO_INFO
;
2459 write_unlock_bh(&conn
->chan_lock
);
2462 bh_unlock_sock(parent
);
2465 rsp
.scid
= cpu_to_le16(scid
);
2466 rsp
.dcid
= cpu_to_le16(dcid
);
2467 rsp
.result
= cpu_to_le16(result
);
2468 rsp
.status
= cpu_to_le16(status
);
2469 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2471 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
2472 struct l2cap_info_req info
;
2473 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2475 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
2476 conn
->info_ident
= l2cap_get_ident(conn
);
2478 mod_timer(&conn
->info_timer
, jiffies
+
2479 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
2481 l2cap_send_cmd(conn
, conn
->info_ident
,
2482 L2CAP_INFO_REQ
, sizeof(info
), &info
);
2485 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
2486 result
== L2CAP_CR_SUCCESS
) {
2488 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
2489 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2490 l2cap_build_conf_req(chan
, buf
), buf
);
2491 chan
->num_conf_req
++;
2497 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2499 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
2500 u16 scid
, dcid
, result
, status
;
2501 struct l2cap_chan
*chan
;
2505 scid
= __le16_to_cpu(rsp
->scid
);
2506 dcid
= __le16_to_cpu(rsp
->dcid
);
2507 result
= __le16_to_cpu(rsp
->result
);
2508 status
= __le16_to_cpu(rsp
->status
);
2510 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid
, scid
, result
, status
);
2513 chan
= l2cap_get_chan_by_scid(conn
, scid
);
2517 chan
= l2cap_get_chan_by_ident(conn
, cmd
->ident
);
2525 case L2CAP_CR_SUCCESS
:
2526 l2cap_state_change(chan
, BT_CONFIG
);
2529 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
2531 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
2534 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2535 l2cap_build_conf_req(chan
, req
), req
);
2536 chan
->num_conf_req
++;
2540 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
2544 /* don't delete l2cap channel if sk is owned by user */
2545 if (sock_owned_by_user(sk
)) {
2546 l2cap_state_change(chan
, BT_DISCONN
);
2547 __clear_chan_timer(chan
);
2548 __set_chan_timer(chan
, HZ
/ 5);
2552 l2cap_chan_del(chan
, ECONNREFUSED
);
2560 static inline void set_default_fcs(struct l2cap_chan
*chan
)
2562 /* FCS is enabled only in ERTM or streaming mode, if one or both
2565 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
2566 chan
->fcs
= L2CAP_FCS_NONE
;
2567 else if (!test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
))
2568 chan
->fcs
= L2CAP_FCS_CRC16
;
2571 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
2573 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
2576 struct l2cap_chan
*chan
;
2580 dcid
= __le16_to_cpu(req
->dcid
);
2581 flags
= __le16_to_cpu(req
->flags
);
2583 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
2585 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
2591 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
2592 struct l2cap_cmd_rej_cid rej
;
2594 rej
.reason
= cpu_to_le16(L2CAP_REJ_INVALID_CID
);
2595 rej
.scid
= cpu_to_le16(chan
->scid
);
2596 rej
.dcid
= cpu_to_le16(chan
->dcid
);
2598 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
2603 /* Reject if config buffer is too small. */
2604 len
= cmd_len
- sizeof(*req
);
2605 if (len
< 0 || chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
2606 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2607 l2cap_build_conf_rsp(chan
, rsp
,
2608 L2CAP_CONF_REJECT
, flags
), rsp
);
2613 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
2614 chan
->conf_len
+= len
;
2616 if (flags
& 0x0001) {
2617 /* Incomplete config. Send empty response. */
2618 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2619 l2cap_build_conf_rsp(chan
, rsp
,
2620 L2CAP_CONF_SUCCESS
, 0x0001), rsp
);
2624 /* Complete config. */
2625 len
= l2cap_parse_conf_req(chan
, rsp
);
2627 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2631 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
2632 chan
->num_conf_rsp
++;
2634 /* Reset config buffer. */
2637 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
2640 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
2641 set_default_fcs(chan
);
2643 l2cap_state_change(chan
, BT_CONNECTED
);
2645 chan
->next_tx_seq
= 0;
2646 chan
->expected_tx_seq
= 0;
2647 skb_queue_head_init(&chan
->tx_q
);
2648 if (chan
->mode
== L2CAP_MODE_ERTM
)
2649 l2cap_ertm_init(chan
);
2651 l2cap_chan_ready(sk
);
2655 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
2657 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2658 l2cap_build_conf_req(chan
, buf
), buf
);
2659 chan
->num_conf_req
++;
2667 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2669 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
2670 u16 scid
, flags
, result
;
2671 struct l2cap_chan
*chan
;
2673 int len
= cmd
->len
- sizeof(*rsp
);
2675 scid
= __le16_to_cpu(rsp
->scid
);
2676 flags
= __le16_to_cpu(rsp
->flags
);
2677 result
= __le16_to_cpu(rsp
->result
);
2679 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2680 scid
, flags
, result
);
2682 chan
= l2cap_get_chan_by_scid(conn
, scid
);
2689 case L2CAP_CONF_SUCCESS
:
2690 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
2693 case L2CAP_CONF_UNACCEPT
:
2694 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
2697 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
2698 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2702 /* throw out any old stored conf requests */
2703 result
= L2CAP_CONF_SUCCESS
;
2704 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
2707 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2711 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
2712 L2CAP_CONF_REQ
, len
, req
);
2713 chan
->num_conf_req
++;
2714 if (result
!= L2CAP_CONF_SUCCESS
)
2720 sk
->sk_err
= ECONNRESET
;
2721 __set_chan_timer(chan
, HZ
* 5);
2722 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2729 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
2731 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
2732 set_default_fcs(chan
);
2734 l2cap_state_change(chan
, BT_CONNECTED
);
2735 chan
->next_tx_seq
= 0;
2736 chan
->expected_tx_seq
= 0;
2737 skb_queue_head_init(&chan
->tx_q
);
2738 if (chan
->mode
== L2CAP_MODE_ERTM
)
2739 l2cap_ertm_init(chan
);
2741 l2cap_chan_ready(sk
);
2749 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2751 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
2752 struct l2cap_disconn_rsp rsp
;
2754 struct l2cap_chan
*chan
;
2757 scid
= __le16_to_cpu(req
->scid
);
2758 dcid
= __le16_to_cpu(req
->dcid
);
2760 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
2762 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
2768 rsp
.dcid
= cpu_to_le16(chan
->scid
);
2769 rsp
.scid
= cpu_to_le16(chan
->dcid
);
2770 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
2772 sk
->sk_shutdown
= SHUTDOWN_MASK
;
2774 /* don't delete l2cap channel if sk is owned by user */
2775 if (sock_owned_by_user(sk
)) {
2776 l2cap_state_change(chan
, BT_DISCONN
);
2777 __clear_chan_timer(chan
);
2778 __set_chan_timer(chan
, HZ
/ 5);
2783 l2cap_chan_del(chan
, ECONNRESET
);
2786 chan
->ops
->close(chan
->data
);
2790 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2792 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
2794 struct l2cap_chan
*chan
;
2797 scid
= __le16_to_cpu(rsp
->scid
);
2798 dcid
= __le16_to_cpu(rsp
->dcid
);
2800 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
2802 chan
= l2cap_get_chan_by_scid(conn
, scid
);
2808 /* don't delete l2cap channel if sk is owned by user */
2809 if (sock_owned_by_user(sk
)) {
2810 l2cap_state_change(chan
,BT_DISCONN
);
2811 __clear_chan_timer(chan
);
2812 __set_chan_timer(chan
, HZ
/ 5);
2817 l2cap_chan_del(chan
, 0);
2820 chan
->ops
->close(chan
->data
);
2824 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2826 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
2829 type
= __le16_to_cpu(req
->type
);
2831 BT_DBG("type 0x%4.4x", type
);
2833 if (type
== L2CAP_IT_FEAT_MASK
) {
2835 u32 feat_mask
= l2cap_feat_mask
;
2836 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
2837 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2838 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
2840 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
2843 feat_mask
|= L2CAP_FEAT_EXT_FLOW
2844 | L2CAP_FEAT_EXT_WINDOW
;
2846 put_unaligned_le32(feat_mask
, rsp
->data
);
2847 l2cap_send_cmd(conn
, cmd
->ident
,
2848 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
2849 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
2851 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
2852 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
2853 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
2854 memcpy(buf
+ 4, l2cap_fixed_chan
, 8);
2855 l2cap_send_cmd(conn
, cmd
->ident
,
2856 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
2858 struct l2cap_info_rsp rsp
;
2859 rsp
.type
= cpu_to_le16(type
);
2860 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
2861 l2cap_send_cmd(conn
, cmd
->ident
,
2862 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
2868 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2870 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
2873 type
= __le16_to_cpu(rsp
->type
);
2874 result
= __le16_to_cpu(rsp
->result
);
2876 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
2878 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2879 if (cmd
->ident
!= conn
->info_ident
||
2880 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
2883 del_timer(&conn
->info_timer
);
2885 if (result
!= L2CAP_IR_SUCCESS
) {
2886 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2887 conn
->info_ident
= 0;
2889 l2cap_conn_start(conn
);
2894 if (type
== L2CAP_IT_FEAT_MASK
) {
2895 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
2897 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
2898 struct l2cap_info_req req
;
2899 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
2901 conn
->info_ident
= l2cap_get_ident(conn
);
2903 l2cap_send_cmd(conn
, conn
->info_ident
,
2904 L2CAP_INFO_REQ
, sizeof(req
), &req
);
2906 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2907 conn
->info_ident
= 0;
2909 l2cap_conn_start(conn
);
2911 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
2912 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2913 conn
->info_ident
= 0;
2915 l2cap_conn_start(conn
);
2921 static inline int l2cap_check_conn_param(u16 min
, u16 max
, u16 latency
,
2926 if (min
> max
|| min
< 6 || max
> 3200)
2929 if (to_multiplier
< 10 || to_multiplier
> 3200)
2932 if (max
>= to_multiplier
* 8)
2935 max_latency
= (to_multiplier
* 8 / max
) - 1;
2936 if (latency
> 499 || latency
> max_latency
)
2942 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
2943 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2945 struct hci_conn
*hcon
= conn
->hcon
;
2946 struct l2cap_conn_param_update_req
*req
;
2947 struct l2cap_conn_param_update_rsp rsp
;
2948 u16 min
, max
, latency
, to_multiplier
, cmd_len
;
2951 if (!(hcon
->link_mode
& HCI_LM_MASTER
))
2954 cmd_len
= __le16_to_cpu(cmd
->len
);
2955 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
2958 req
= (struct l2cap_conn_param_update_req
*) data
;
2959 min
= __le16_to_cpu(req
->min
);
2960 max
= __le16_to_cpu(req
->max
);
2961 latency
= __le16_to_cpu(req
->latency
);
2962 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
2964 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2965 min
, max
, latency
, to_multiplier
);
2967 memset(&rsp
, 0, sizeof(rsp
));
2969 err
= l2cap_check_conn_param(min
, max
, latency
, to_multiplier
);
2971 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
2973 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
2975 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
2979 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
2984 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
2985 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
2989 switch (cmd
->code
) {
2990 case L2CAP_COMMAND_REJ
:
2991 l2cap_command_rej(conn
, cmd
, data
);
2994 case L2CAP_CONN_REQ
:
2995 err
= l2cap_connect_req(conn
, cmd
, data
);
2998 case L2CAP_CONN_RSP
:
2999 err
= l2cap_connect_rsp(conn
, cmd
, data
);
3002 case L2CAP_CONF_REQ
:
3003 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
3006 case L2CAP_CONF_RSP
:
3007 err
= l2cap_config_rsp(conn
, cmd
, data
);
3010 case L2CAP_DISCONN_REQ
:
3011 err
= l2cap_disconnect_req(conn
, cmd
, data
);
3014 case L2CAP_DISCONN_RSP
:
3015 err
= l2cap_disconnect_rsp(conn
, cmd
, data
);
3018 case L2CAP_ECHO_REQ
:
3019 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
3022 case L2CAP_ECHO_RSP
:
3025 case L2CAP_INFO_REQ
:
3026 err
= l2cap_information_req(conn
, cmd
, data
);
3029 case L2CAP_INFO_RSP
:
3030 err
= l2cap_information_rsp(conn
, cmd
, data
);
3034 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
3042 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
3043 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3045 switch (cmd
->code
) {
3046 case L2CAP_COMMAND_REJ
:
3049 case L2CAP_CONN_PARAM_UPDATE_REQ
:
3050 return l2cap_conn_param_update_req(conn
, cmd
, data
);
3052 case L2CAP_CONN_PARAM_UPDATE_RSP
:
3056 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
3061 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
3062 struct sk_buff
*skb
)
3064 u8
*data
= skb
->data
;
3066 struct l2cap_cmd_hdr cmd
;
3069 l2cap_raw_recv(conn
, skb
);
3071 while (len
>= L2CAP_CMD_HDR_SIZE
) {
3073 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
3074 data
+= L2CAP_CMD_HDR_SIZE
;
3075 len
-= L2CAP_CMD_HDR_SIZE
;
3077 cmd_len
= le16_to_cpu(cmd
.len
);
3079 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
3081 if (cmd_len
> len
|| !cmd
.ident
) {
3082 BT_DBG("corrupted command");
3086 if (conn
->hcon
->type
== LE_LINK
)
3087 err
= l2cap_le_sig_cmd(conn
, &cmd
, data
);
3089 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
3092 struct l2cap_cmd_rej_unk rej
;
3094 BT_ERR("Wrong link type (%d)", err
);
3096 /* FIXME: Map err to a valid reason */
3097 rej
.reason
= cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
3098 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
3108 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
3110 u16 our_fcs
, rcv_fcs
;
3113 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3114 hdr_size
= L2CAP_EXT_HDR_SIZE
;
3116 hdr_size
= L2CAP_ENH_HDR_SIZE
;
3118 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
3119 skb_trim(skb
, skb
->len
- 2);
3120 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
3121 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
3123 if (our_fcs
!= rcv_fcs
)
3129 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
3133 chan
->frames_sent
= 0;
3135 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
3137 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
3138 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RNR
);
3139 l2cap_send_sframe(chan
, control
);
3140 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
3143 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
3144 l2cap_retransmit_frames(chan
);
3146 l2cap_ertm_send(chan
);
3148 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
3149 chan
->frames_sent
== 0) {
3150 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
3151 l2cap_send_sframe(chan
, control
);
3155 static int l2cap_add_to_srej_queue(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u16 tx_seq
, u8 sar
)
3157 struct sk_buff
*next_skb
;
3158 int tx_seq_offset
, next_tx_seq_offset
;
3160 bt_cb(skb
)->tx_seq
= tx_seq
;
3161 bt_cb(skb
)->sar
= sar
;
3163 next_skb
= skb_peek(&chan
->srej_q
);
3165 __skb_queue_tail(&chan
->srej_q
, skb
);
3169 tx_seq_offset
= (tx_seq
- chan
->buffer_seq
) % 64;
3170 if (tx_seq_offset
< 0)
3171 tx_seq_offset
+= 64;
3174 if (bt_cb(next_skb
)->tx_seq
== tx_seq
)
3177 next_tx_seq_offset
= (bt_cb(next_skb
)->tx_seq
-
3178 chan
->buffer_seq
) % 64;
3179 if (next_tx_seq_offset
< 0)
3180 next_tx_seq_offset
+= 64;
3182 if (next_tx_seq_offset
> tx_seq_offset
) {
3183 __skb_queue_before(&chan
->srej_q
, next_skb
, skb
);
3187 if (skb_queue_is_last(&chan
->srej_q
, next_skb
))
3190 } while ((next_skb
= skb_queue_next(&chan
->srej_q
, next_skb
)));
3192 __skb_queue_tail(&chan
->srej_q
, skb
);
3197 static void append_skb_frag(struct sk_buff
*skb
,
3198 struct sk_buff
*new_frag
, struct sk_buff
**last_frag
)
3200 /* skb->len reflects data in skb as well as all fragments
3201 * skb->data_len reflects only data in fragments
3203 if (!skb_has_frag_list(skb
))
3204 skb_shinfo(skb
)->frag_list
= new_frag
;
3206 new_frag
->next
= NULL
;
3208 (*last_frag
)->next
= new_frag
;
3209 *last_frag
= new_frag
;
3211 skb
->len
+= new_frag
->len
;
3212 skb
->data_len
+= new_frag
->len
;
3213 skb
->truesize
+= new_frag
->truesize
;
3216 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u16 control
)
3220 switch (__get_ctrl_sar(chan
, control
)) {
3221 case L2CAP_SAR_UNSEGMENTED
:
3225 err
= chan
->ops
->recv(chan
->data
, skb
);
3228 case L2CAP_SAR_START
:
3232 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
3235 if (chan
->sdu_len
> chan
->imtu
) {
3240 if (skb
->len
>= chan
->sdu_len
)
3244 chan
->sdu_last_frag
= skb
;
3250 case L2CAP_SAR_CONTINUE
:
3254 append_skb_frag(chan
->sdu
, skb
,
3255 &chan
->sdu_last_frag
);
3258 if (chan
->sdu
->len
>= chan
->sdu_len
)
3268 append_skb_frag(chan
->sdu
, skb
,
3269 &chan
->sdu_last_frag
);
3272 if (chan
->sdu
->len
!= chan
->sdu_len
)
3275 err
= chan
->ops
->recv(chan
->data
, chan
->sdu
);
3278 /* Reassembly complete */
3280 chan
->sdu_last_frag
= NULL
;
3288 kfree_skb(chan
->sdu
);
3290 chan
->sdu_last_frag
= NULL
;
3297 static void l2cap_ertm_enter_local_busy(struct l2cap_chan
*chan
)
3301 BT_DBG("chan %p, Enter local busy", chan
);
3303 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
3305 control
= __set_reqseq(chan
, chan
->buffer_seq
);
3306 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RNR
);
3307 l2cap_send_sframe(chan
, control
);
3309 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
3311 __clear_ack_timer(chan
);
3314 static void l2cap_ertm_exit_local_busy(struct l2cap_chan
*chan
)
3318 if (!test_bit(CONN_RNR_SENT
, &chan
->conn_state
))
3321 control
= __set_reqseq(chan
, chan
->buffer_seq
);
3322 control
|= __set_ctrl_poll(chan
);
3323 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
3324 l2cap_send_sframe(chan
, control
);
3325 chan
->retry_count
= 1;
3327 __clear_retrans_timer(chan
);
3328 __set_monitor_timer(chan
);
3330 set_bit(CONN_WAIT_F
, &chan
->conn_state
);
3333 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
3334 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
3336 BT_DBG("chan %p, Exit local busy", chan
);
3339 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
3341 if (chan
->mode
== L2CAP_MODE_ERTM
) {
3343 l2cap_ertm_enter_local_busy(chan
);
3345 l2cap_ertm_exit_local_busy(chan
);
3349 static void l2cap_check_srej_gap(struct l2cap_chan
*chan
, u16 tx_seq
)
3351 struct sk_buff
*skb
;
3354 while ((skb
= skb_peek(&chan
->srej_q
)) &&
3355 !test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
3358 if (bt_cb(skb
)->tx_seq
!= tx_seq
)
3361 skb
= skb_dequeue(&chan
->srej_q
);
3362 control
= __set_ctrl_sar(chan
, bt_cb(skb
)->sar
);
3363 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
3366 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3370 chan
->buffer_seq_srej
=
3371 (chan
->buffer_seq_srej
+ 1) % 64;
3372 tx_seq
= (tx_seq
+ 1) % 64;
3376 static void l2cap_resend_srejframe(struct l2cap_chan
*chan
, u16 tx_seq
)
3378 struct srej_list
*l
, *tmp
;
3381 list_for_each_entry_safe(l
, tmp
, &chan
->srej_l
, list
) {
3382 if (l
->tx_seq
== tx_seq
) {
3387 control
= __set_ctrl_super(chan
, L2CAP_SUPER_SREJ
);
3388 control
|= __set_reqseq(chan
, l
->tx_seq
);
3389 l2cap_send_sframe(chan
, control
);
3391 list_add_tail(&l
->list
, &chan
->srej_l
);
3395 static void l2cap_send_srejframe(struct l2cap_chan
*chan
, u16 tx_seq
)
3397 struct srej_list
*new;
3400 while (tx_seq
!= chan
->expected_tx_seq
) {
3401 control
= __set_ctrl_super(chan
, L2CAP_SUPER_SREJ
);
3402 control
|= __set_reqseq(chan
, chan
->expected_tx_seq
);
3403 l2cap_send_sframe(chan
, control
);
3405 new = kzalloc(sizeof(struct srej_list
), GFP_ATOMIC
);
3406 new->tx_seq
= chan
->expected_tx_seq
;
3407 chan
->expected_tx_seq
= (chan
->expected_tx_seq
+ 1) % 64;
3408 list_add_tail(&new->list
, &chan
->srej_l
);
3410 chan
->expected_tx_seq
= (chan
->expected_tx_seq
+ 1) % 64;
3413 static inline int l2cap_data_channel_iframe(struct l2cap_chan
*chan
, u16 rx_control
, struct sk_buff
*skb
)
3415 u16 tx_seq
= __get_txseq(chan
, rx_control
);
3416 u16 req_seq
= __get_reqseq(chan
, rx_control
);
3417 u8 sar
= __get_ctrl_sar(chan
, rx_control
);
3418 int tx_seq_offset
, expected_tx_seq_offset
;
3419 int num_to_ack
= (chan
->tx_win
/6) + 1;
3422 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan
, skb
->len
,
3423 tx_seq
, rx_control
);
3425 if (__is_ctrl_final(chan
, rx_control
) &&
3426 test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
3427 __clear_monitor_timer(chan
);
3428 if (chan
->unacked_frames
> 0)
3429 __set_retrans_timer(chan
);
3430 clear_bit(CONN_WAIT_F
, &chan
->conn_state
);
3433 chan
->expected_ack_seq
= req_seq
;
3434 l2cap_drop_acked_frames(chan
);
3436 tx_seq_offset
= (tx_seq
- chan
->buffer_seq
) % 64;
3437 if (tx_seq_offset
< 0)
3438 tx_seq_offset
+= 64;
3440 /* invalid tx_seq */
3441 if (tx_seq_offset
>= chan
->tx_win
) {
3442 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3446 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
3449 if (tx_seq
== chan
->expected_tx_seq
)
3452 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
3453 struct srej_list
*first
;
3455 first
= list_first_entry(&chan
->srej_l
,
3456 struct srej_list
, list
);
3457 if (tx_seq
== first
->tx_seq
) {
3458 l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
);
3459 l2cap_check_srej_gap(chan
, tx_seq
);
3461 list_del(&first
->list
);
3464 if (list_empty(&chan
->srej_l
)) {
3465 chan
->buffer_seq
= chan
->buffer_seq_srej
;
3466 clear_bit(CONN_SREJ_SENT
, &chan
->conn_state
);
3467 l2cap_send_ack(chan
);
3468 BT_DBG("chan %p, Exit SREJ_SENT", chan
);
3471 struct srej_list
*l
;
3473 /* duplicated tx_seq */
3474 if (l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
) < 0)
3477 list_for_each_entry(l
, &chan
->srej_l
, list
) {
3478 if (l
->tx_seq
== tx_seq
) {
3479 l2cap_resend_srejframe(chan
, tx_seq
);
3483 l2cap_send_srejframe(chan
, tx_seq
);
3486 expected_tx_seq_offset
=
3487 (chan
->expected_tx_seq
- chan
->buffer_seq
) % 64;
3488 if (expected_tx_seq_offset
< 0)
3489 expected_tx_seq_offset
+= 64;
3491 /* duplicated tx_seq */
3492 if (tx_seq_offset
< expected_tx_seq_offset
)
3495 set_bit(CONN_SREJ_SENT
, &chan
->conn_state
);
3497 BT_DBG("chan %p, Enter SREJ", chan
);
3499 INIT_LIST_HEAD(&chan
->srej_l
);
3500 chan
->buffer_seq_srej
= chan
->buffer_seq
;
3502 __skb_queue_head_init(&chan
->srej_q
);
3503 l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
);
3505 set_bit(CONN_SEND_PBIT
, &chan
->conn_state
);
3507 l2cap_send_srejframe(chan
, tx_seq
);
3509 __clear_ack_timer(chan
);
3514 chan
->expected_tx_seq
= (chan
->expected_tx_seq
+ 1) % 64;
3516 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
3517 bt_cb(skb
)->tx_seq
= tx_seq
;
3518 bt_cb(skb
)->sar
= sar
;
3519 __skb_queue_tail(&chan
->srej_q
, skb
);
3523 err
= l2cap_reassemble_sdu(chan
, skb
, rx_control
);
3524 chan
->buffer_seq
= (chan
->buffer_seq
+ 1) % 64;
3526 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3530 if (__is_ctrl_final(chan
, rx_control
)) {
3531 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
3532 l2cap_retransmit_frames(chan
);
3535 __set_ack_timer(chan
);
3537 chan
->num_acked
= (chan
->num_acked
+ 1) % num_to_ack
;
3538 if (chan
->num_acked
== num_to_ack
- 1)
3539 l2cap_send_ack(chan
);
3548 static inline void l2cap_data_channel_rrframe(struct l2cap_chan
*chan
, u16 rx_control
)
3550 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan
,
3551 __get_reqseq(chan
, rx_control
), rx_control
);
3553 chan
->expected_ack_seq
= __get_reqseq(chan
, rx_control
);
3554 l2cap_drop_acked_frames(chan
);
3556 if (__is_ctrl_poll(chan
, rx_control
)) {
3557 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
3558 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
3559 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
3560 (chan
->unacked_frames
> 0))
3561 __set_retrans_timer(chan
);
3563 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
3564 l2cap_send_srejtail(chan
);
3566 l2cap_send_i_or_rr_or_rnr(chan
);
3569 } else if (__is_ctrl_final(chan
, rx_control
)) {
3570 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
3572 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
3573 l2cap_retransmit_frames(chan
);
3576 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
3577 (chan
->unacked_frames
> 0))
3578 __set_retrans_timer(chan
);
3580 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
3581 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
))
3582 l2cap_send_ack(chan
);
3584 l2cap_ertm_send(chan
);
3588 static inline void l2cap_data_channel_rejframe(struct l2cap_chan
*chan
, u16 rx_control
)
3590 u16 tx_seq
= __get_reqseq(chan
, rx_control
);
3592 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan
, tx_seq
, rx_control
);
3594 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
3596 chan
->expected_ack_seq
= tx_seq
;
3597 l2cap_drop_acked_frames(chan
);
3599 if (__is_ctrl_final(chan
, rx_control
)) {
3600 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
3601 l2cap_retransmit_frames(chan
);
3603 l2cap_retransmit_frames(chan
);
3605 if (test_bit(CONN_WAIT_F
, &chan
->conn_state
))
3606 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
3609 static inline void l2cap_data_channel_srejframe(struct l2cap_chan
*chan
, u16 rx_control
)
3611 u16 tx_seq
= __get_reqseq(chan
, rx_control
);
3613 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan
, tx_seq
, rx_control
);
3615 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
3617 if (__is_ctrl_poll(chan
, rx_control
)) {
3618 chan
->expected_ack_seq
= tx_seq
;
3619 l2cap_drop_acked_frames(chan
);
3621 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
3622 l2cap_retransmit_one_frame(chan
, tx_seq
);
3624 l2cap_ertm_send(chan
);
3626 if (test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
3627 chan
->srej_save_reqseq
= tx_seq
;
3628 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
3630 } else if (__is_ctrl_final(chan
, rx_control
)) {
3631 if (test_bit(CONN_SREJ_ACT
, &chan
->conn_state
) &&
3632 chan
->srej_save_reqseq
== tx_seq
)
3633 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
3635 l2cap_retransmit_one_frame(chan
, tx_seq
);
3637 l2cap_retransmit_one_frame(chan
, tx_seq
);
3638 if (test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
3639 chan
->srej_save_reqseq
= tx_seq
;
3640 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
3645 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan
*chan
, u16 rx_control
)
3647 u16 tx_seq
= __get_reqseq(chan
, rx_control
);
3649 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan
, tx_seq
, rx_control
);
3651 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
3652 chan
->expected_ack_seq
= tx_seq
;
3653 l2cap_drop_acked_frames(chan
);
3655 if (__is_ctrl_poll(chan
, rx_control
))
3656 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
3658 if (!test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
3659 __clear_retrans_timer(chan
);
3660 if (__is_ctrl_poll(chan
, rx_control
))
3661 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_FINAL
);
3665 if (__is_ctrl_poll(chan
, rx_control
)) {
3666 l2cap_send_srejtail(chan
);
3668 rx_control
= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
3669 l2cap_send_sframe(chan
, rx_control
);
3673 static inline int l2cap_data_channel_sframe(struct l2cap_chan
*chan
, u16 rx_control
, struct sk_buff
*skb
)
3675 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan
, rx_control
, skb
->len
);
3677 if (__is_ctrl_final(chan
, rx_control
) &&
3678 test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
3679 __clear_monitor_timer(chan
);
3680 if (chan
->unacked_frames
> 0)
3681 __set_retrans_timer(chan
);
3682 clear_bit(CONN_WAIT_F
, &chan
->conn_state
);
3685 switch (__get_ctrl_super(chan
, rx_control
)) {
3686 case L2CAP_SUPER_RR
:
3687 l2cap_data_channel_rrframe(chan
, rx_control
);
3690 case L2CAP_SUPER_REJ
:
3691 l2cap_data_channel_rejframe(chan
, rx_control
);
3694 case L2CAP_SUPER_SREJ
:
3695 l2cap_data_channel_srejframe(chan
, rx_control
);
3698 case L2CAP_SUPER_RNR
:
3699 l2cap_data_channel_rnrframe(chan
, rx_control
);
3707 static int l2cap_ertm_data_rcv(struct sock
*sk
, struct sk_buff
*skb
)
3709 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
3712 int len
, next_tx_seq_offset
, req_seq_offset
;
3714 control
= get_unaligned_le16(skb
->data
);
3719 * We can just drop the corrupted I-frame here.
3720 * Receiver will miss it and start proper recovery
3721 * procedures and ask retransmission.
3723 if (l2cap_check_fcs(chan
, skb
))
3726 if (__is_sar_start(chan
, control
) && !__is_sframe(chan
, control
))
3729 if (chan
->fcs
== L2CAP_FCS_CRC16
)
3732 if (len
> chan
->mps
) {
3733 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3737 req_seq
= __get_reqseq(chan
, control
);
3738 req_seq_offset
= (req_seq
- chan
->expected_ack_seq
) % 64;
3739 if (req_seq_offset
< 0)
3740 req_seq_offset
+= 64;
3742 next_tx_seq_offset
=
3743 (chan
->next_tx_seq
- chan
->expected_ack_seq
) % 64;
3744 if (next_tx_seq_offset
< 0)
3745 next_tx_seq_offset
+= 64;
3747 /* check for invalid req-seq */
3748 if (req_seq_offset
> next_tx_seq_offset
) {
3749 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3753 if (!__is_sframe(chan
, control
)) {
3755 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3759 l2cap_data_channel_iframe(chan
, control
, skb
);
3763 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3767 l2cap_data_channel_sframe(chan
, control
, skb
);
3777 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
3779 struct l2cap_chan
*chan
;
3780 struct sock
*sk
= NULL
;
3785 chan
= l2cap_get_chan_by_scid(conn
, cid
);
3787 BT_DBG("unknown cid 0x%4.4x", cid
);
3793 BT_DBG("chan %p, len %d", chan
, skb
->len
);
3795 if (chan
->state
!= BT_CONNECTED
)
3798 switch (chan
->mode
) {
3799 case L2CAP_MODE_BASIC
:
3800 /* If socket recv buffers overflows we drop data here
3801 * which is *bad* because L2CAP has to be reliable.
3802 * But we don't have any other choice. L2CAP doesn't
3803 * provide flow control mechanism. */
3805 if (chan
->imtu
< skb
->len
)
3808 if (!chan
->ops
->recv(chan
->data
, skb
))
3812 case L2CAP_MODE_ERTM
:
3813 if (!sock_owned_by_user(sk
)) {
3814 l2cap_ertm_data_rcv(sk
, skb
);
3816 if (sk_add_backlog(sk
, skb
))
3822 case L2CAP_MODE_STREAMING
:
3823 control
= get_unaligned_le16(skb
->data
);
3827 if (l2cap_check_fcs(chan
, skb
))
3830 if (__is_sar_start(chan
, control
))
3833 if (chan
->fcs
== L2CAP_FCS_CRC16
)
3836 if (len
> chan
->mps
|| len
< 0 || __is_sframe(chan
, control
))
3839 tx_seq
= __get_txseq(chan
, control
);
3841 if (chan
->expected_tx_seq
!= tx_seq
) {
3842 /* Frame(s) missing - must discard partial SDU */
3843 kfree_skb(chan
->sdu
);
3845 chan
->sdu_last_frag
= NULL
;
3848 /* TODO: Notify userland of missing data */
3851 chan
->expected_tx_seq
= (tx_seq
+ 1) % 64;
3853 if (l2cap_reassemble_sdu(chan
, skb
, control
) == -EMSGSIZE
)
3854 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3859 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
3873 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
3875 struct sock
*sk
= NULL
;
3876 struct l2cap_chan
*chan
;
3878 chan
= l2cap_global_chan_by_psm(0, psm
, conn
->src
);
3886 BT_DBG("sk %p, len %d", sk
, skb
->len
);
3888 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
3891 if (chan
->imtu
< skb
->len
)
3894 if (!chan
->ops
->recv(chan
->data
, skb
))
3906 static inline int l2cap_att_channel(struct l2cap_conn
*conn
, __le16 cid
, struct sk_buff
*skb
)
3908 struct sock
*sk
= NULL
;
3909 struct l2cap_chan
*chan
;
3911 chan
= l2cap_global_chan_by_scid(0, cid
, conn
->src
);
3919 BT_DBG("sk %p, len %d", sk
, skb
->len
);
3921 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
3924 if (chan
->imtu
< skb
->len
)
3927 if (!chan
->ops
->recv(chan
->data
, skb
))
3939 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
3941 struct l2cap_hdr
*lh
= (void *) skb
->data
;
3945 skb_pull(skb
, L2CAP_HDR_SIZE
);
3946 cid
= __le16_to_cpu(lh
->cid
);
3947 len
= __le16_to_cpu(lh
->len
);
3949 if (len
!= skb
->len
) {
3954 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
3957 case L2CAP_CID_LE_SIGNALING
:
3958 case L2CAP_CID_SIGNALING
:
3959 l2cap_sig_channel(conn
, skb
);
3962 case L2CAP_CID_CONN_LESS
:
3963 psm
= get_unaligned_le16(skb
->data
);
3965 l2cap_conless_channel(conn
, psm
, skb
);
3968 case L2CAP_CID_LE_DATA
:
3969 l2cap_att_channel(conn
, cid
, skb
);
3973 if (smp_sig_channel(conn
, skb
))
3974 l2cap_conn_del(conn
->hcon
, EACCES
);
3978 l2cap_data_channel(conn
, cid
, skb
);
3983 /* ---- L2CAP interface with lower layer (HCI) ---- */
3985 static int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
3987 int exact
= 0, lm1
= 0, lm2
= 0;
3988 struct l2cap_chan
*c
;
3990 if (type
!= ACL_LINK
)
3993 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
3995 /* Find listening sockets and check their link_mode */
3996 read_lock(&chan_list_lock
);
3997 list_for_each_entry(c
, &chan_list
, global_l
) {
3998 struct sock
*sk
= c
->sk
;
4000 if (c
->state
!= BT_LISTEN
)
4003 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
4004 lm1
|= HCI_LM_ACCEPT
;
4005 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
4006 lm1
|= HCI_LM_MASTER
;
4008 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
4009 lm2
|= HCI_LM_ACCEPT
;
4010 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
4011 lm2
|= HCI_LM_MASTER
;
4014 read_unlock(&chan_list_lock
);
4016 return exact
? lm1
: lm2
;
4019 static int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
4021 struct l2cap_conn
*conn
;
4023 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
4025 if (!(hcon
->type
== ACL_LINK
|| hcon
->type
== LE_LINK
))
4029 conn
= l2cap_conn_add(hcon
, status
);
4031 l2cap_conn_ready(conn
);
4033 l2cap_conn_del(hcon
, bt_to_errno(status
));
4038 static int l2cap_disconn_ind(struct hci_conn
*hcon
)
4040 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4042 BT_DBG("hcon %p", hcon
);
4044 if ((hcon
->type
!= ACL_LINK
&& hcon
->type
!= LE_LINK
) || !conn
)
4047 return conn
->disc_reason
;
4050 static int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
4052 BT_DBG("hcon %p reason %d", hcon
, reason
);
4054 if (!(hcon
->type
== ACL_LINK
|| hcon
->type
== LE_LINK
))
4057 l2cap_conn_del(hcon
, bt_to_errno(reason
));
4062 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
4064 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
4067 if (encrypt
== 0x00) {
4068 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
4069 __clear_chan_timer(chan
);
4070 __set_chan_timer(chan
, HZ
* 5);
4071 } else if (chan
->sec_level
== BT_SECURITY_HIGH
)
4072 l2cap_chan_close(chan
, ECONNREFUSED
);
4074 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
4075 __clear_chan_timer(chan
);
4079 static int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
4081 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4082 struct l2cap_chan
*chan
;
4087 BT_DBG("conn %p", conn
);
4089 if (hcon
->type
== LE_LINK
) {
4090 smp_distribute_keys(conn
, 0);
4091 del_timer(&conn
->security_timer
);
4094 read_lock(&conn
->chan_lock
);
4096 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
4097 struct sock
*sk
= chan
->sk
;
4101 BT_DBG("chan->scid %d", chan
->scid
);
4103 if (chan
->scid
== L2CAP_CID_LE_DATA
) {
4104 if (!status
&& encrypt
) {
4105 chan
->sec_level
= hcon
->sec_level
;
4106 l2cap_chan_ready(sk
);
4113 if (test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
)) {
4118 if (!status
&& (chan
->state
== BT_CONNECTED
||
4119 chan
->state
== BT_CONFIG
)) {
4120 l2cap_check_encryption(chan
, encrypt
);
4125 if (chan
->state
== BT_CONNECT
) {
4127 struct l2cap_conn_req req
;
4128 req
.scid
= cpu_to_le16(chan
->scid
);
4129 req
.psm
= chan
->psm
;
4131 chan
->ident
= l2cap_get_ident(conn
);
4132 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
4134 l2cap_send_cmd(conn
, chan
->ident
,
4135 L2CAP_CONN_REQ
, sizeof(req
), &req
);
4137 __clear_chan_timer(chan
);
4138 __set_chan_timer(chan
, HZ
/ 10);
4140 } else if (chan
->state
== BT_CONNECT2
) {
4141 struct l2cap_conn_rsp rsp
;
4145 if (bt_sk(sk
)->defer_setup
) {
4146 struct sock
*parent
= bt_sk(sk
)->parent
;
4147 res
= L2CAP_CR_PEND
;
4148 stat
= L2CAP_CS_AUTHOR_PEND
;
4150 parent
->sk_data_ready(parent
, 0);
4152 l2cap_state_change(chan
, BT_CONFIG
);
4153 res
= L2CAP_CR_SUCCESS
;
4154 stat
= L2CAP_CS_NO_INFO
;
4157 l2cap_state_change(chan
, BT_DISCONN
);
4158 __set_chan_timer(chan
, HZ
/ 10);
4159 res
= L2CAP_CR_SEC_BLOCK
;
4160 stat
= L2CAP_CS_NO_INFO
;
4163 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4164 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4165 rsp
.result
= cpu_to_le16(res
);
4166 rsp
.status
= cpu_to_le16(stat
);
4167 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
4174 read_unlock(&conn
->chan_lock
);
4179 static int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
4181 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4184 conn
= l2cap_conn_add(hcon
, 0);
4189 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
4191 if (!(flags
& ACL_CONT
)) {
4192 struct l2cap_hdr
*hdr
;
4193 struct l2cap_chan
*chan
;
4198 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
4199 kfree_skb(conn
->rx_skb
);
4200 conn
->rx_skb
= NULL
;
4202 l2cap_conn_unreliable(conn
, ECOMM
);
4205 /* Start fragment always begin with Basic L2CAP header */
4206 if (skb
->len
< L2CAP_HDR_SIZE
) {
4207 BT_ERR("Frame is too short (len %d)", skb
->len
);
4208 l2cap_conn_unreliable(conn
, ECOMM
);
4212 hdr
= (struct l2cap_hdr
*) skb
->data
;
4213 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
4214 cid
= __le16_to_cpu(hdr
->cid
);
4216 if (len
== skb
->len
) {
4217 /* Complete frame received */
4218 l2cap_recv_frame(conn
, skb
);
4222 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
4224 if (skb
->len
> len
) {
4225 BT_ERR("Frame is too long (len %d, expected len %d)",
4227 l2cap_conn_unreliable(conn
, ECOMM
);
4231 chan
= l2cap_get_chan_by_scid(conn
, cid
);
4233 if (chan
&& chan
->sk
) {
4234 struct sock
*sk
= chan
->sk
;
4236 if (chan
->imtu
< len
- L2CAP_HDR_SIZE
) {
4237 BT_ERR("Frame exceeding recv MTU (len %d, "
4241 l2cap_conn_unreliable(conn
, ECOMM
);
4247 /* Allocate skb for the complete frame (with header) */
4248 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
4252 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4254 conn
->rx_len
= len
- skb
->len
;
4256 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
4258 if (!conn
->rx_len
) {
4259 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
4260 l2cap_conn_unreliable(conn
, ECOMM
);
4264 if (skb
->len
> conn
->rx_len
) {
4265 BT_ERR("Fragment is too long (len %d, expected %d)",
4266 skb
->len
, conn
->rx_len
);
4267 kfree_skb(conn
->rx_skb
);
4268 conn
->rx_skb
= NULL
;
4270 l2cap_conn_unreliable(conn
, ECOMM
);
4274 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4276 conn
->rx_len
-= skb
->len
;
4278 if (!conn
->rx_len
) {
4279 /* Complete frame received */
4280 l2cap_recv_frame(conn
, conn
->rx_skb
);
4281 conn
->rx_skb
= NULL
;
4290 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
4292 struct l2cap_chan
*c
;
4294 read_lock_bh(&chan_list_lock
);
4296 list_for_each_entry(c
, &chan_list
, global_l
) {
4297 struct sock
*sk
= c
->sk
;
4299 seq_printf(f
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4300 batostr(&bt_sk(sk
)->src
),
4301 batostr(&bt_sk(sk
)->dst
),
4302 c
->state
, __le16_to_cpu(c
->psm
),
4303 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
4304 c
->sec_level
, c
->mode
);
4307 read_unlock_bh(&chan_list_lock
);
4312 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
4314 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
4317 static const struct file_operations l2cap_debugfs_fops
= {
4318 .open
= l2cap_debugfs_open
,
4320 .llseek
= seq_lseek
,
4321 .release
= single_release
,
4324 static struct dentry
*l2cap_debugfs
;
4326 static struct hci_proto l2cap_hci_proto
= {
4328 .id
= HCI_PROTO_L2CAP
,
4329 .connect_ind
= l2cap_connect_ind
,
4330 .connect_cfm
= l2cap_connect_cfm
,
4331 .disconn_ind
= l2cap_disconn_ind
,
4332 .disconn_cfm
= l2cap_disconn_cfm
,
4333 .security_cfm
= l2cap_security_cfm
,
4334 .recv_acldata
= l2cap_recv_acldata
4337 int __init
l2cap_init(void)
4341 err
= l2cap_init_sockets();
4345 err
= hci_register_proto(&l2cap_hci_proto
);
4347 BT_ERR("L2CAP protocol registration failed");
4348 bt_sock_unregister(BTPROTO_L2CAP
);
4353 l2cap_debugfs
= debugfs_create_file("l2cap", 0444,
4354 bt_debugfs
, NULL
, &l2cap_debugfs_fops
);
4356 BT_ERR("Failed to create L2CAP debug file");
4362 l2cap_cleanup_sockets();
4366 void l2cap_exit(void)
4368 debugfs_remove(l2cap_debugfs
);
4370 if (hci_unregister_proto(&l2cap_hci_proto
) < 0)
4371 BT_ERR("L2CAP protocol unregistration failed");
4373 l2cap_cleanup_sockets();
4376 module_param(disable_ertm
, bool, 0644);
4377 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");
4379 module_param(enable_hs
, bool, 0644);
4380 MODULE_PARM_DESC(enable_hs
, "Enable High Speed");