2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
44 #define LE_FLOWCTL_MAX_CREDITS 65535
48 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
| L2CAP_FEAT_UCD
;
49 static u8 l2cap_fixed_chan
[8] = { L2CAP_FC_SIG_BREDR
| L2CAP_FC_CONNLESS
, };
51 static LIST_HEAD(chan_list
);
52 static DEFINE_RWLOCK(chan_list_lock
);
54 static u16 le_max_credits
= L2CAP_LE_MAX_CREDITS
;
55 static u16 le_default_mps
= L2CAP_LE_DEFAULT_MPS
;
57 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
58 u8 code
, u8 ident
, u16 dlen
, void *data
);
59 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
61 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
62 static void l2cap_send_disconn_req(struct l2cap_chan
*chan
, int err
);
64 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
65 struct sk_buff_head
*skbs
, u8 event
);
67 static inline __u8
bdaddr_type(struct hci_conn
*hcon
, __u8 type
)
69 if (hcon
->type
== LE_LINK
) {
70 if (type
== ADDR_LE_DEV_PUBLIC
)
71 return BDADDR_LE_PUBLIC
;
73 return BDADDR_LE_RANDOM
;
79 /* ---- L2CAP channels ---- */
81 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
86 list_for_each_entry(c
, &conn
->chan_l
, list
) {
93 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
98 list_for_each_entry(c
, &conn
->chan_l
, list
) {
105 /* Find channel with given SCID.
106 * Returns locked channel. */
107 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
110 struct l2cap_chan
*c
;
112 mutex_lock(&conn
->chan_lock
);
113 c
= __l2cap_get_chan_by_scid(conn
, cid
);
116 mutex_unlock(&conn
->chan_lock
);
121 /* Find channel with given DCID.
122 * Returns locked channel.
124 static struct l2cap_chan
*l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
127 struct l2cap_chan
*c
;
129 mutex_lock(&conn
->chan_lock
);
130 c
= __l2cap_get_chan_by_dcid(conn
, cid
);
133 mutex_unlock(&conn
->chan_lock
);
138 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
,
141 struct l2cap_chan
*c
;
143 list_for_each_entry(c
, &conn
->chan_l
, list
) {
144 if (c
->ident
== ident
)
150 static struct l2cap_chan
*l2cap_get_chan_by_ident(struct l2cap_conn
*conn
,
153 struct l2cap_chan
*c
;
155 mutex_lock(&conn
->chan_lock
);
156 c
= __l2cap_get_chan_by_ident(conn
, ident
);
159 mutex_unlock(&conn
->chan_lock
);
164 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
166 struct l2cap_chan
*c
;
168 list_for_each_entry(c
, &chan_list
, global_l
) {
169 if (c
->sport
== psm
&& !bacmp(&c
->src
, src
))
175 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
179 write_lock(&chan_list_lock
);
181 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
194 for (p
= 0x1001; p
< 0x1100; p
+= 2)
195 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
196 chan
->psm
= cpu_to_le16(p
);
197 chan
->sport
= cpu_to_le16(p
);
204 write_unlock(&chan_list_lock
);
207 EXPORT_SYMBOL_GPL(l2cap_add_psm
);
209 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
211 write_lock(&chan_list_lock
);
215 write_unlock(&chan_list_lock
);
220 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
224 if (conn
->hcon
->type
== LE_LINK
)
225 dyn_end
= L2CAP_CID_LE_DYN_END
;
227 dyn_end
= L2CAP_CID_DYN_END
;
229 for (cid
= L2CAP_CID_DYN_START
; cid
< dyn_end
; cid
++) {
230 if (!__l2cap_get_chan_by_scid(conn
, cid
))
237 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
239 BT_DBG("chan %p %s -> %s", chan
, state_to_string(chan
->state
),
240 state_to_string(state
));
243 chan
->ops
->state_change(chan
, state
, 0);
246 static inline void l2cap_state_change_and_error(struct l2cap_chan
*chan
,
250 chan
->ops
->state_change(chan
, chan
->state
, err
);
253 static inline void l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
255 chan
->ops
->state_change(chan
, chan
->state
, err
);
258 static void __set_retrans_timer(struct l2cap_chan
*chan
)
260 if (!delayed_work_pending(&chan
->monitor_timer
) &&
261 chan
->retrans_timeout
) {
262 l2cap_set_timer(chan
, &chan
->retrans_timer
,
263 msecs_to_jiffies(chan
->retrans_timeout
));
267 static void __set_monitor_timer(struct l2cap_chan
*chan
)
269 __clear_retrans_timer(chan
);
270 if (chan
->monitor_timeout
) {
271 l2cap_set_timer(chan
, &chan
->monitor_timer
,
272 msecs_to_jiffies(chan
->monitor_timeout
));
276 static struct sk_buff
*l2cap_ertm_seq_in_queue(struct sk_buff_head
*head
,
281 skb_queue_walk(head
, skb
) {
282 if (bt_cb(skb
)->control
.txseq
== seq
)
289 /* ---- L2CAP sequence number lists ---- */
291 /* For ERTM, ordered lists of sequence numbers must be tracked for
292 * SREJ requests that are received and for frames that are to be
293 * retransmitted. These seq_list functions implement a singly-linked
294 * list in an array, where membership in the list can also be checked
295 * in constant time. Items can also be added to the tail of the list
296 * and removed from the head in constant time, without further memory
300 static int l2cap_seq_list_init(struct l2cap_seq_list
*seq_list
, u16 size
)
302 size_t alloc_size
, i
;
304 /* Allocated size is a power of 2 to map sequence numbers
305 * (which may be up to 14 bits) in to a smaller array that is
306 * sized for the negotiated ERTM transmit windows.
308 alloc_size
= roundup_pow_of_two(size
);
310 seq_list
->list
= kmalloc(sizeof(u16
) * alloc_size
, GFP_KERNEL
);
314 seq_list
->mask
= alloc_size
- 1;
315 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
316 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
317 for (i
= 0; i
< alloc_size
; i
++)
318 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
323 static inline void l2cap_seq_list_free(struct l2cap_seq_list
*seq_list
)
325 kfree(seq_list
->list
);
328 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list
*seq_list
,
331 /* Constant-time check for list membership */
332 return seq_list
->list
[seq
& seq_list
->mask
] != L2CAP_SEQ_LIST_CLEAR
;
335 static inline u16
l2cap_seq_list_pop(struct l2cap_seq_list
*seq_list
)
337 u16 seq
= seq_list
->head
;
338 u16 mask
= seq_list
->mask
;
340 seq_list
->head
= seq_list
->list
[seq
& mask
];
341 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
343 if (seq_list
->head
== L2CAP_SEQ_LIST_TAIL
) {
344 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
345 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
351 static void l2cap_seq_list_clear(struct l2cap_seq_list
*seq_list
)
355 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
)
358 for (i
= 0; i
<= seq_list
->mask
; i
++)
359 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
361 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
362 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
365 static void l2cap_seq_list_append(struct l2cap_seq_list
*seq_list
, u16 seq
)
367 u16 mask
= seq_list
->mask
;
369 /* All appends happen in constant time */
371 if (seq_list
->list
[seq
& mask
] != L2CAP_SEQ_LIST_CLEAR
)
374 if (seq_list
->tail
== L2CAP_SEQ_LIST_CLEAR
)
375 seq_list
->head
= seq
;
377 seq_list
->list
[seq_list
->tail
& mask
] = seq
;
379 seq_list
->tail
= seq
;
380 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_TAIL
;
383 static void l2cap_chan_timeout(struct work_struct
*work
)
385 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
387 struct l2cap_conn
*conn
= chan
->conn
;
390 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
392 mutex_lock(&conn
->chan_lock
);
393 l2cap_chan_lock(chan
);
395 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
396 reason
= ECONNREFUSED
;
397 else if (chan
->state
== BT_CONNECT
&&
398 chan
->sec_level
!= BT_SECURITY_SDP
)
399 reason
= ECONNREFUSED
;
403 l2cap_chan_close(chan
, reason
);
405 l2cap_chan_unlock(chan
);
407 chan
->ops
->close(chan
);
408 mutex_unlock(&conn
->chan_lock
);
410 l2cap_chan_put(chan
);
413 struct l2cap_chan
*l2cap_chan_create(void)
415 struct l2cap_chan
*chan
;
417 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
421 mutex_init(&chan
->lock
);
423 write_lock(&chan_list_lock
);
424 list_add(&chan
->global_l
, &chan_list
);
425 write_unlock(&chan_list_lock
);
427 INIT_DELAYED_WORK(&chan
->chan_timer
, l2cap_chan_timeout
);
429 chan
->state
= BT_OPEN
;
431 kref_init(&chan
->kref
);
433 /* This flag is cleared in l2cap_chan_ready() */
434 set_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
);
436 BT_DBG("chan %p", chan
);
440 EXPORT_SYMBOL_GPL(l2cap_chan_create
);
442 static void l2cap_chan_destroy(struct kref
*kref
)
444 struct l2cap_chan
*chan
= container_of(kref
, struct l2cap_chan
, kref
);
446 BT_DBG("chan %p", chan
);
448 write_lock(&chan_list_lock
);
449 list_del(&chan
->global_l
);
450 write_unlock(&chan_list_lock
);
455 void l2cap_chan_hold(struct l2cap_chan
*c
)
457 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
462 void l2cap_chan_put(struct l2cap_chan
*c
)
464 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
466 kref_put(&c
->kref
, l2cap_chan_destroy
);
468 EXPORT_SYMBOL_GPL(l2cap_chan_put
);
470 void l2cap_chan_set_defaults(struct l2cap_chan
*chan
)
472 chan
->fcs
= L2CAP_FCS_CRC16
;
473 chan
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
474 chan
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
475 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
476 chan
->remote_max_tx
= chan
->max_tx
;
477 chan
->remote_tx_win
= chan
->tx_win
;
478 chan
->ack_win
= L2CAP_DEFAULT_TX_WINDOW
;
479 chan
->sec_level
= BT_SECURITY_LOW
;
480 chan
->flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
481 chan
->retrans_timeout
= L2CAP_DEFAULT_RETRANS_TO
;
482 chan
->monitor_timeout
= L2CAP_DEFAULT_MONITOR_TO
;
483 chan
->conf_state
= 0;
485 set_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
487 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults
);
489 static void l2cap_le_flowctl_init(struct l2cap_chan
*chan
)
492 chan
->sdu_last_frag
= NULL
;
494 chan
->tx_credits
= 0;
495 chan
->rx_credits
= le_max_credits
;
496 chan
->mps
= min_t(u16
, chan
->imtu
, le_default_mps
);
498 skb_queue_head_init(&chan
->tx_q
);
501 void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
503 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
504 __le16_to_cpu(chan
->psm
), chan
->dcid
);
506 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
510 switch (chan
->chan_type
) {
511 case L2CAP_CHAN_CONN_ORIENTED
:
512 /* Alloc CID for connection-oriented socket */
513 chan
->scid
= l2cap_alloc_cid(conn
);
514 if (conn
->hcon
->type
== ACL_LINK
)
515 chan
->omtu
= L2CAP_DEFAULT_MTU
;
518 case L2CAP_CHAN_CONN_LESS
:
519 /* Connectionless socket */
520 chan
->scid
= L2CAP_CID_CONN_LESS
;
521 chan
->dcid
= L2CAP_CID_CONN_LESS
;
522 chan
->omtu
= L2CAP_DEFAULT_MTU
;
525 case L2CAP_CHAN_FIXED
:
526 /* Caller will set CID and CID specific MTU values */
530 /* Raw socket can send/recv signalling messages only */
531 chan
->scid
= L2CAP_CID_SIGNALING
;
532 chan
->dcid
= L2CAP_CID_SIGNALING
;
533 chan
->omtu
= L2CAP_DEFAULT_MTU
;
536 chan
->local_id
= L2CAP_BESTEFFORT_ID
;
537 chan
->local_stype
= L2CAP_SERV_BESTEFFORT
;
538 chan
->local_msdu
= L2CAP_DEFAULT_MAX_SDU_SIZE
;
539 chan
->local_sdu_itime
= L2CAP_DEFAULT_SDU_ITIME
;
540 chan
->local_acc_lat
= L2CAP_DEFAULT_ACC_LAT
;
541 chan
->local_flush_to
= L2CAP_EFS_DEFAULT_FLUSH_TO
;
543 l2cap_chan_hold(chan
);
545 hci_conn_hold(conn
->hcon
);
547 list_add(&chan
->list
, &conn
->chan_l
);
550 void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
552 mutex_lock(&conn
->chan_lock
);
553 __l2cap_chan_add(conn
, chan
);
554 mutex_unlock(&conn
->chan_lock
);
557 void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
559 struct l2cap_conn
*conn
= chan
->conn
;
561 __clear_chan_timer(chan
);
563 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
566 struct amp_mgr
*mgr
= conn
->hcon
->amp_mgr
;
567 /* Delete from channel list */
568 list_del(&chan
->list
);
570 l2cap_chan_put(chan
);
574 if (chan
->scid
!= L2CAP_CID_A2MP
)
575 hci_conn_drop(conn
->hcon
);
577 if (mgr
&& mgr
->bredr_chan
== chan
)
578 mgr
->bredr_chan
= NULL
;
581 if (chan
->hs_hchan
) {
582 struct hci_chan
*hs_hchan
= chan
->hs_hchan
;
584 BT_DBG("chan %p disconnect hs_hchan %p", chan
, hs_hchan
);
585 amp_disconnect_logical_link(hs_hchan
);
588 chan
->ops
->teardown(chan
, err
);
590 if (test_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
))
594 case L2CAP_MODE_BASIC
:
597 case L2CAP_MODE_LE_FLOWCTL
:
598 skb_queue_purge(&chan
->tx_q
);
601 case L2CAP_MODE_ERTM
:
602 __clear_retrans_timer(chan
);
603 __clear_monitor_timer(chan
);
604 __clear_ack_timer(chan
);
606 skb_queue_purge(&chan
->srej_q
);
608 l2cap_seq_list_free(&chan
->srej_list
);
609 l2cap_seq_list_free(&chan
->retrans_list
);
613 case L2CAP_MODE_STREAMING
:
614 skb_queue_purge(&chan
->tx_q
);
620 EXPORT_SYMBOL_GPL(l2cap_chan_del
);
622 void l2cap_conn_update_id_addr(struct hci_conn
*hcon
)
624 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
625 struct l2cap_chan
*chan
;
627 mutex_lock(&conn
->chan_lock
);
629 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
630 l2cap_chan_lock(chan
);
631 bacpy(&chan
->dst
, &hcon
->dst
);
632 chan
->dst_type
= bdaddr_type(hcon
, hcon
->dst_type
);
633 l2cap_chan_unlock(chan
);
636 mutex_unlock(&conn
->chan_lock
);
639 static void l2cap_chan_le_connect_reject(struct l2cap_chan
*chan
)
641 struct l2cap_conn
*conn
= chan
->conn
;
642 struct l2cap_le_conn_rsp rsp
;
645 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
))
646 result
= L2CAP_CR_AUTHORIZATION
;
648 result
= L2CAP_CR_BAD_PSM
;
650 l2cap_state_change(chan
, BT_DISCONN
);
652 rsp
.dcid
= cpu_to_le16(chan
->scid
);
653 rsp
.mtu
= cpu_to_le16(chan
->imtu
);
654 rsp
.mps
= cpu_to_le16(chan
->mps
);
655 rsp
.credits
= cpu_to_le16(chan
->rx_credits
);
656 rsp
.result
= cpu_to_le16(result
);
658 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CONN_RSP
, sizeof(rsp
),
662 static void l2cap_chan_connect_reject(struct l2cap_chan
*chan
)
664 struct l2cap_conn
*conn
= chan
->conn
;
665 struct l2cap_conn_rsp rsp
;
668 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
))
669 result
= L2CAP_CR_SEC_BLOCK
;
671 result
= L2CAP_CR_BAD_PSM
;
673 l2cap_state_change(chan
, BT_DISCONN
);
675 rsp
.scid
= cpu_to_le16(chan
->dcid
);
676 rsp
.dcid
= cpu_to_le16(chan
->scid
);
677 rsp
.result
= cpu_to_le16(result
);
678 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
680 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
683 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
685 struct l2cap_conn
*conn
= chan
->conn
;
687 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
689 switch (chan
->state
) {
691 chan
->ops
->teardown(chan
, 0);
696 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
) {
697 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
698 l2cap_send_disconn_req(chan
, reason
);
700 l2cap_chan_del(chan
, reason
);
704 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
) {
705 if (conn
->hcon
->type
== ACL_LINK
)
706 l2cap_chan_connect_reject(chan
);
707 else if (conn
->hcon
->type
== LE_LINK
)
708 l2cap_chan_le_connect_reject(chan
);
711 l2cap_chan_del(chan
, reason
);
716 l2cap_chan_del(chan
, reason
);
720 chan
->ops
->teardown(chan
, 0);
724 EXPORT_SYMBOL(l2cap_chan_close
);
726 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
728 switch (chan
->chan_type
) {
730 switch (chan
->sec_level
) {
731 case BT_SECURITY_HIGH
:
732 case BT_SECURITY_FIPS
:
733 return HCI_AT_DEDICATED_BONDING_MITM
;
734 case BT_SECURITY_MEDIUM
:
735 return HCI_AT_DEDICATED_BONDING
;
737 return HCI_AT_NO_BONDING
;
740 case L2CAP_CHAN_CONN_LESS
:
741 if (chan
->psm
== cpu_to_le16(L2CAP_PSM_3DSP
)) {
742 if (chan
->sec_level
== BT_SECURITY_LOW
)
743 chan
->sec_level
= BT_SECURITY_SDP
;
745 if (chan
->sec_level
== BT_SECURITY_HIGH
||
746 chan
->sec_level
== BT_SECURITY_FIPS
)
747 return HCI_AT_NO_BONDING_MITM
;
749 return HCI_AT_NO_BONDING
;
751 case L2CAP_CHAN_CONN_ORIENTED
:
752 if (chan
->psm
== cpu_to_le16(L2CAP_PSM_SDP
)) {
753 if (chan
->sec_level
== BT_SECURITY_LOW
)
754 chan
->sec_level
= BT_SECURITY_SDP
;
756 if (chan
->sec_level
== BT_SECURITY_HIGH
||
757 chan
->sec_level
== BT_SECURITY_FIPS
)
758 return HCI_AT_NO_BONDING_MITM
;
760 return HCI_AT_NO_BONDING
;
764 switch (chan
->sec_level
) {
765 case BT_SECURITY_HIGH
:
766 case BT_SECURITY_FIPS
:
767 return HCI_AT_GENERAL_BONDING_MITM
;
768 case BT_SECURITY_MEDIUM
:
769 return HCI_AT_GENERAL_BONDING
;
771 return HCI_AT_NO_BONDING
;
777 /* Service level security */
778 int l2cap_chan_check_security(struct l2cap_chan
*chan
, bool initiator
)
780 struct l2cap_conn
*conn
= chan
->conn
;
783 if (conn
->hcon
->type
== LE_LINK
)
784 return smp_conn_security(conn
->hcon
, chan
->sec_level
);
786 auth_type
= l2cap_get_auth_type(chan
);
788 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
,
792 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
796 /* Get next available identificator.
797 * 1 - 128 are used by kernel.
798 * 129 - 199 are reserved.
799 * 200 - 254 are used by utilities like l2ping, etc.
802 mutex_lock(&conn
->ident_lock
);
804 if (++conn
->tx_ident
> 128)
809 mutex_unlock(&conn
->ident_lock
);
814 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
817 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
820 BT_DBG("code 0x%2.2x", code
);
825 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
826 flags
= ACL_START_NO_FLUSH
;
830 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
831 skb
->priority
= HCI_PRIO_MAX
;
833 hci_send_acl(conn
->hchan
, skb
, flags
);
836 static bool __chan_is_moving(struct l2cap_chan
*chan
)
838 return chan
->move_state
!= L2CAP_MOVE_STABLE
&&
839 chan
->move_state
!= L2CAP_MOVE_WAIT_PREPARE
;
842 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
844 struct hci_conn
*hcon
= chan
->conn
->hcon
;
847 BT_DBG("chan %p, skb %p len %d priority %u", chan
, skb
, skb
->len
,
850 if (chan
->hs_hcon
&& !__chan_is_moving(chan
)) {
852 hci_send_acl(chan
->hs_hchan
, skb
, ACL_COMPLETE
);
859 if (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
860 lmp_no_flush_capable(hcon
->hdev
))
861 flags
= ACL_START_NO_FLUSH
;
865 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
866 hci_send_acl(chan
->conn
->hchan
, skb
, flags
);
869 static void __unpack_enhanced_control(u16 enh
, struct l2cap_ctrl
*control
)
871 control
->reqseq
= (enh
& L2CAP_CTRL_REQSEQ
) >> L2CAP_CTRL_REQSEQ_SHIFT
;
872 control
->final
= (enh
& L2CAP_CTRL_FINAL
) >> L2CAP_CTRL_FINAL_SHIFT
;
874 if (enh
& L2CAP_CTRL_FRAME_TYPE
) {
877 control
->poll
= (enh
& L2CAP_CTRL_POLL
) >> L2CAP_CTRL_POLL_SHIFT
;
878 control
->super
= (enh
& L2CAP_CTRL_SUPERVISE
) >> L2CAP_CTRL_SUPER_SHIFT
;
885 control
->sar
= (enh
& L2CAP_CTRL_SAR
) >> L2CAP_CTRL_SAR_SHIFT
;
886 control
->txseq
= (enh
& L2CAP_CTRL_TXSEQ
) >> L2CAP_CTRL_TXSEQ_SHIFT
;
893 static void __unpack_extended_control(u32 ext
, struct l2cap_ctrl
*control
)
895 control
->reqseq
= (ext
& L2CAP_EXT_CTRL_REQSEQ
) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
896 control
->final
= (ext
& L2CAP_EXT_CTRL_FINAL
) >> L2CAP_EXT_CTRL_FINAL_SHIFT
;
898 if (ext
& L2CAP_EXT_CTRL_FRAME_TYPE
) {
901 control
->poll
= (ext
& L2CAP_EXT_CTRL_POLL
) >> L2CAP_EXT_CTRL_POLL_SHIFT
;
902 control
->super
= (ext
& L2CAP_EXT_CTRL_SUPERVISE
) >> L2CAP_EXT_CTRL_SUPER_SHIFT
;
909 control
->sar
= (ext
& L2CAP_EXT_CTRL_SAR
) >> L2CAP_EXT_CTRL_SAR_SHIFT
;
910 control
->txseq
= (ext
& L2CAP_EXT_CTRL_TXSEQ
) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
917 static inline void __unpack_control(struct l2cap_chan
*chan
,
920 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
921 __unpack_extended_control(get_unaligned_le32(skb
->data
),
922 &bt_cb(skb
)->control
);
923 skb_pull(skb
, L2CAP_EXT_CTRL_SIZE
);
925 __unpack_enhanced_control(get_unaligned_le16(skb
->data
),
926 &bt_cb(skb
)->control
);
927 skb_pull(skb
, L2CAP_ENH_CTRL_SIZE
);
931 static u32
__pack_extended_control(struct l2cap_ctrl
*control
)
935 packed
= control
->reqseq
<< L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
936 packed
|= control
->final
<< L2CAP_EXT_CTRL_FINAL_SHIFT
;
938 if (control
->sframe
) {
939 packed
|= control
->poll
<< L2CAP_EXT_CTRL_POLL_SHIFT
;
940 packed
|= control
->super
<< L2CAP_EXT_CTRL_SUPER_SHIFT
;
941 packed
|= L2CAP_EXT_CTRL_FRAME_TYPE
;
943 packed
|= control
->sar
<< L2CAP_EXT_CTRL_SAR_SHIFT
;
944 packed
|= control
->txseq
<< L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
950 static u16
__pack_enhanced_control(struct l2cap_ctrl
*control
)
954 packed
= control
->reqseq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
955 packed
|= control
->final
<< L2CAP_CTRL_FINAL_SHIFT
;
957 if (control
->sframe
) {
958 packed
|= control
->poll
<< L2CAP_CTRL_POLL_SHIFT
;
959 packed
|= control
->super
<< L2CAP_CTRL_SUPER_SHIFT
;
960 packed
|= L2CAP_CTRL_FRAME_TYPE
;
962 packed
|= control
->sar
<< L2CAP_CTRL_SAR_SHIFT
;
963 packed
|= control
->txseq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
969 static inline void __pack_control(struct l2cap_chan
*chan
,
970 struct l2cap_ctrl
*control
,
973 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
974 put_unaligned_le32(__pack_extended_control(control
),
975 skb
->data
+ L2CAP_HDR_SIZE
);
977 put_unaligned_le16(__pack_enhanced_control(control
),
978 skb
->data
+ L2CAP_HDR_SIZE
);
982 static inline unsigned int __ertm_hdr_size(struct l2cap_chan
*chan
)
984 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
985 return L2CAP_EXT_HDR_SIZE
;
987 return L2CAP_ENH_HDR_SIZE
;
990 static struct sk_buff
*l2cap_create_sframe_pdu(struct l2cap_chan
*chan
,
994 struct l2cap_hdr
*lh
;
995 int hlen
= __ertm_hdr_size(chan
);
997 if (chan
->fcs
== L2CAP_FCS_CRC16
)
998 hlen
+= L2CAP_FCS_SIZE
;
1000 skb
= bt_skb_alloc(hlen
, GFP_KERNEL
);
1003 return ERR_PTR(-ENOMEM
);
1005 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1006 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
1007 lh
->cid
= cpu_to_le16(chan
->dcid
);
1009 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1010 put_unaligned_le32(control
, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
1012 put_unaligned_le16(control
, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
1014 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1015 u16 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
);
1016 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1019 skb
->priority
= HCI_PRIO_MAX
;
1023 static void l2cap_send_sframe(struct l2cap_chan
*chan
,
1024 struct l2cap_ctrl
*control
)
1026 struct sk_buff
*skb
;
1029 BT_DBG("chan %p, control %p", chan
, control
);
1031 if (!control
->sframe
)
1034 if (__chan_is_moving(chan
))
1037 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
) &&
1041 if (control
->super
== L2CAP_SUPER_RR
)
1042 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
1043 else if (control
->super
== L2CAP_SUPER_RNR
)
1044 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
1046 if (control
->super
!= L2CAP_SUPER_SREJ
) {
1047 chan
->last_acked_seq
= control
->reqseq
;
1048 __clear_ack_timer(chan
);
1051 BT_DBG("reqseq %d, final %d, poll %d, super %d", control
->reqseq
,
1052 control
->final
, control
->poll
, control
->super
);
1054 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1055 control_field
= __pack_extended_control(control
);
1057 control_field
= __pack_enhanced_control(control
);
1059 skb
= l2cap_create_sframe_pdu(chan
, control_field
);
1061 l2cap_do_send(chan
, skb
);
1064 static void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, bool poll
)
1066 struct l2cap_ctrl control
;
1068 BT_DBG("chan %p, poll %d", chan
, poll
);
1070 memset(&control
, 0, sizeof(control
));
1072 control
.poll
= poll
;
1074 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
1075 control
.super
= L2CAP_SUPER_RNR
;
1077 control
.super
= L2CAP_SUPER_RR
;
1079 control
.reqseq
= chan
->buffer_seq
;
1080 l2cap_send_sframe(chan
, &control
);
1083 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
1085 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
1088 static bool __amp_capable(struct l2cap_chan
*chan
)
1090 struct l2cap_conn
*conn
= chan
->conn
;
1091 struct hci_dev
*hdev
;
1092 bool amp_available
= false;
1094 if (!conn
->hs_enabled
)
1097 if (!(conn
->fixed_chan_mask
& L2CAP_FC_A2MP
))
1100 read_lock(&hci_dev_list_lock
);
1101 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
1102 if (hdev
->amp_type
!= AMP_TYPE_BREDR
&&
1103 test_bit(HCI_UP
, &hdev
->flags
)) {
1104 amp_available
= true;
1108 read_unlock(&hci_dev_list_lock
);
1110 if (chan
->chan_policy
== BT_CHANNEL_POLICY_AMP_PREFERRED
)
1111 return amp_available
;
1116 static bool l2cap_check_efs(struct l2cap_chan
*chan
)
1118 /* Check EFS parameters */
1122 void l2cap_send_conn_req(struct l2cap_chan
*chan
)
1124 struct l2cap_conn
*conn
= chan
->conn
;
1125 struct l2cap_conn_req req
;
1127 req
.scid
= cpu_to_le16(chan
->scid
);
1128 req
.psm
= chan
->psm
;
1130 chan
->ident
= l2cap_get_ident(conn
);
1132 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
1134 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
, sizeof(req
), &req
);
1137 static void l2cap_send_create_chan_req(struct l2cap_chan
*chan
, u8 amp_id
)
1139 struct l2cap_create_chan_req req
;
1140 req
.scid
= cpu_to_le16(chan
->scid
);
1141 req
.psm
= chan
->psm
;
1142 req
.amp_id
= amp_id
;
1144 chan
->ident
= l2cap_get_ident(chan
->conn
);
1146 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_CREATE_CHAN_REQ
,
1150 static void l2cap_move_setup(struct l2cap_chan
*chan
)
1152 struct sk_buff
*skb
;
1154 BT_DBG("chan %p", chan
);
1156 if (chan
->mode
!= L2CAP_MODE_ERTM
)
1159 __clear_retrans_timer(chan
);
1160 __clear_monitor_timer(chan
);
1161 __clear_ack_timer(chan
);
1163 chan
->retry_count
= 0;
1164 skb_queue_walk(&chan
->tx_q
, skb
) {
1165 if (bt_cb(skb
)->control
.retries
)
1166 bt_cb(skb
)->control
.retries
= 1;
1171 chan
->expected_tx_seq
= chan
->buffer_seq
;
1173 clear_bit(CONN_REJ_ACT
, &chan
->conn_state
);
1174 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
1175 l2cap_seq_list_clear(&chan
->retrans_list
);
1176 l2cap_seq_list_clear(&chan
->srej_list
);
1177 skb_queue_purge(&chan
->srej_q
);
1179 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
1180 chan
->rx_state
= L2CAP_RX_STATE_MOVE
;
1182 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
1185 static void l2cap_move_done(struct l2cap_chan
*chan
)
1187 u8 move_role
= chan
->move_role
;
1188 BT_DBG("chan %p", chan
);
1190 chan
->move_state
= L2CAP_MOVE_STABLE
;
1191 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
1193 if (chan
->mode
!= L2CAP_MODE_ERTM
)
1196 switch (move_role
) {
1197 case L2CAP_MOVE_ROLE_INITIATOR
:
1198 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_EXPLICIT_POLL
);
1199 chan
->rx_state
= L2CAP_RX_STATE_WAIT_F
;
1201 case L2CAP_MOVE_ROLE_RESPONDER
:
1202 chan
->rx_state
= L2CAP_RX_STATE_WAIT_P
;
1207 static void l2cap_chan_ready(struct l2cap_chan
*chan
)
1209 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1210 chan
->conf_state
= 0;
1211 __clear_chan_timer(chan
);
1213 if (chan
->mode
== L2CAP_MODE_LE_FLOWCTL
&& !chan
->tx_credits
)
1214 chan
->ops
->suspend(chan
);
1216 chan
->state
= BT_CONNECTED
;
1218 chan
->ops
->ready(chan
);
1221 static void l2cap_le_connect(struct l2cap_chan
*chan
)
1223 struct l2cap_conn
*conn
= chan
->conn
;
1224 struct l2cap_le_conn_req req
;
1226 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT
, &chan
->flags
))
1229 req
.psm
= chan
->psm
;
1230 req
.scid
= cpu_to_le16(chan
->scid
);
1231 req
.mtu
= cpu_to_le16(chan
->imtu
);
1232 req
.mps
= cpu_to_le16(chan
->mps
);
1233 req
.credits
= cpu_to_le16(chan
->rx_credits
);
1235 chan
->ident
= l2cap_get_ident(conn
);
1237 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CONN_REQ
,
1241 static void l2cap_le_start(struct l2cap_chan
*chan
)
1243 struct l2cap_conn
*conn
= chan
->conn
;
1245 if (!smp_conn_security(conn
->hcon
, chan
->sec_level
))
1249 l2cap_chan_ready(chan
);
1253 if (chan
->state
== BT_CONNECT
)
1254 l2cap_le_connect(chan
);
1257 static void l2cap_start_connection(struct l2cap_chan
*chan
)
1259 if (__amp_capable(chan
)) {
1260 BT_DBG("chan %p AMP capable: discover AMPs", chan
);
1261 a2mp_discover_amp(chan
);
1262 } else if (chan
->conn
->hcon
->type
== LE_LINK
) {
1263 l2cap_le_start(chan
);
1265 l2cap_send_conn_req(chan
);
1269 static void l2cap_do_start(struct l2cap_chan
*chan
)
1271 struct l2cap_conn
*conn
= chan
->conn
;
1273 if (conn
->hcon
->type
== LE_LINK
) {
1274 l2cap_le_start(chan
);
1278 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
1279 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
1282 if (l2cap_chan_check_security(chan
, true) &&
1283 __l2cap_no_conn_pending(chan
)) {
1284 l2cap_start_connection(chan
);
1287 struct l2cap_info_req req
;
1288 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
1290 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
1291 conn
->info_ident
= l2cap_get_ident(conn
);
1293 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
1295 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
1300 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
1302 u32 local_feat_mask
= l2cap_feat_mask
;
1304 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
1307 case L2CAP_MODE_ERTM
:
1308 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
1309 case L2CAP_MODE_STREAMING
:
1310 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
1316 static void l2cap_send_disconn_req(struct l2cap_chan
*chan
, int err
)
1318 struct l2cap_conn
*conn
= chan
->conn
;
1319 struct l2cap_disconn_req req
;
1324 if (chan
->mode
== L2CAP_MODE_ERTM
&& chan
->state
== BT_CONNECTED
) {
1325 __clear_retrans_timer(chan
);
1326 __clear_monitor_timer(chan
);
1327 __clear_ack_timer(chan
);
1330 if (chan
->scid
== L2CAP_CID_A2MP
) {
1331 l2cap_state_change(chan
, BT_DISCONN
);
1335 req
.dcid
= cpu_to_le16(chan
->dcid
);
1336 req
.scid
= cpu_to_le16(chan
->scid
);
1337 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_DISCONN_REQ
,
1340 l2cap_state_change_and_error(chan
, BT_DISCONN
, err
);
1343 /* ---- L2CAP connections ---- */
1344 static void l2cap_conn_start(struct l2cap_conn
*conn
)
1346 struct l2cap_chan
*chan
, *tmp
;
1348 BT_DBG("conn %p", conn
);
1350 mutex_lock(&conn
->chan_lock
);
1352 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
1353 l2cap_chan_lock(chan
);
1355 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1356 l2cap_chan_unlock(chan
);
1360 if (chan
->state
== BT_CONNECT
) {
1361 if (!l2cap_chan_check_security(chan
, true) ||
1362 !__l2cap_no_conn_pending(chan
)) {
1363 l2cap_chan_unlock(chan
);
1367 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
1368 && test_bit(CONF_STATE2_DEVICE
,
1369 &chan
->conf_state
)) {
1370 l2cap_chan_close(chan
, ECONNRESET
);
1371 l2cap_chan_unlock(chan
);
1375 l2cap_start_connection(chan
);
1377 } else if (chan
->state
== BT_CONNECT2
) {
1378 struct l2cap_conn_rsp rsp
;
1380 rsp
.scid
= cpu_to_le16(chan
->dcid
);
1381 rsp
.dcid
= cpu_to_le16(chan
->scid
);
1383 if (l2cap_chan_check_security(chan
, false)) {
1384 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
1385 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
1386 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
1387 chan
->ops
->defer(chan
);
1390 l2cap_state_change(chan
, BT_CONFIG
);
1391 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
1392 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
1395 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
1396 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
1399 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
1402 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
1403 rsp
.result
!= L2CAP_CR_SUCCESS
) {
1404 l2cap_chan_unlock(chan
);
1408 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
1409 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1410 l2cap_build_conf_req(chan
, buf
), buf
);
1411 chan
->num_conf_req
++;
1414 l2cap_chan_unlock(chan
);
1417 mutex_unlock(&conn
->chan_lock
);
1420 /* Find socket with cid and source/destination bdaddr.
1421 * Returns closest match, locked.
1423 static struct l2cap_chan
*l2cap_global_chan_by_scid(int state
, u16 cid
,
1427 struct l2cap_chan
*c
, *c1
= NULL
;
1429 read_lock(&chan_list_lock
);
1431 list_for_each_entry(c
, &chan_list
, global_l
) {
1432 if (state
&& c
->state
!= state
)
1435 if (c
->scid
== cid
) {
1436 int src_match
, dst_match
;
1437 int src_any
, dst_any
;
1440 src_match
= !bacmp(&c
->src
, src
);
1441 dst_match
= !bacmp(&c
->dst
, dst
);
1442 if (src_match
&& dst_match
) {
1443 read_unlock(&chan_list_lock
);
1448 src_any
= !bacmp(&c
->src
, BDADDR_ANY
);
1449 dst_any
= !bacmp(&c
->dst
, BDADDR_ANY
);
1450 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1451 (src_any
&& dst_any
))
1456 read_unlock(&chan_list_lock
);
1461 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
1463 struct hci_conn
*hcon
= conn
->hcon
;
1464 struct hci_dev
*hdev
= hcon
->hdev
;
1465 struct l2cap_chan
*chan
, *pchan
;
1470 /* Check if we have socket listening on cid */
1471 pchan
= l2cap_global_chan_by_scid(BT_LISTEN
, L2CAP_CID_ATT
,
1472 &hcon
->src
, &hcon
->dst
);
1476 /* Client ATT sockets should override the server one */
1477 if (__l2cap_get_chan_by_dcid(conn
, L2CAP_CID_ATT
))
1480 dst_type
= bdaddr_type(hcon
, hcon
->dst_type
);
1482 /* If device is blocked, do not create a channel for it */
1483 if (hci_bdaddr_list_lookup(&hdev
->blacklist
, &hcon
->dst
, dst_type
))
1486 /* For LE slave connections, make sure the connection interval
1487 * is in the range of the minium and maximum interval that has
1488 * been configured for this connection. If not, then trigger
1489 * the connection update procedure.
1491 if (hcon
->role
== HCI_ROLE_SLAVE
&&
1492 (hcon
->le_conn_interval
< hcon
->le_conn_min_interval
||
1493 hcon
->le_conn_interval
> hcon
->le_conn_max_interval
)) {
1494 struct l2cap_conn_param_update_req req
;
1496 req
.min
= cpu_to_le16(hcon
->le_conn_min_interval
);
1497 req
.max
= cpu_to_le16(hcon
->le_conn_max_interval
);
1498 req
.latency
= cpu_to_le16(hcon
->le_conn_latency
);
1499 req
.to_multiplier
= cpu_to_le16(hcon
->le_supv_timeout
);
1501 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
1502 L2CAP_CONN_PARAM_UPDATE_REQ
, sizeof(req
), &req
);
1505 l2cap_chan_lock(pchan
);
1507 chan
= pchan
->ops
->new_connection(pchan
);
1511 bacpy(&chan
->src
, &hcon
->src
);
1512 bacpy(&chan
->dst
, &hcon
->dst
);
1513 chan
->src_type
= bdaddr_type(hcon
, hcon
->src_type
);
1514 chan
->dst_type
= dst_type
;
1516 __l2cap_chan_add(conn
, chan
);
1519 l2cap_chan_unlock(pchan
);
1522 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
1524 struct l2cap_chan
*chan
;
1525 struct hci_conn
*hcon
= conn
->hcon
;
1527 BT_DBG("conn %p", conn
);
1529 /* For outgoing pairing which doesn't necessarily have an
1530 * associated socket (e.g. mgmt_pair_device).
1532 if (hcon
->out
&& hcon
->type
== LE_LINK
)
1533 smp_conn_security(hcon
, hcon
->pending_sec_level
);
1535 mutex_lock(&conn
->chan_lock
);
1537 if (hcon
->type
== LE_LINK
)
1538 l2cap_le_conn_ready(conn
);
1540 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1542 l2cap_chan_lock(chan
);
1544 if (chan
->scid
== L2CAP_CID_A2MP
) {
1545 l2cap_chan_unlock(chan
);
1549 if (hcon
->type
== LE_LINK
) {
1550 l2cap_le_start(chan
);
1551 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1552 l2cap_chan_ready(chan
);
1554 } else if (chan
->state
== BT_CONNECT
) {
1555 l2cap_do_start(chan
);
1558 l2cap_chan_unlock(chan
);
1561 mutex_unlock(&conn
->chan_lock
);
1563 queue_work(hcon
->hdev
->workqueue
, &conn
->pending_rx_work
);
1566 /* Notify sockets that we cannot guaranty reliability anymore */
1567 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
1569 struct l2cap_chan
*chan
;
1571 BT_DBG("conn %p", conn
);
1573 mutex_lock(&conn
->chan_lock
);
1575 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1576 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
1577 l2cap_chan_set_err(chan
, err
);
1580 mutex_unlock(&conn
->chan_lock
);
1583 static void l2cap_info_timeout(struct work_struct
*work
)
1585 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1588 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
1589 conn
->info_ident
= 0;
1591 l2cap_conn_start(conn
);
1596 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1597 * callback is called during registration. The ->remove callback is called
1598 * during unregistration.
1599 * An l2cap_user object can either be explicitly unregistered or when the
1600 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1601 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1602 * External modules must own a reference to the l2cap_conn object if they intend
1603 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1604 * any time if they don't.
1607 int l2cap_register_user(struct l2cap_conn
*conn
, struct l2cap_user
*user
)
1609 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
1612 /* We need to check whether l2cap_conn is registered. If it is not, we
1613 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1614 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1615 * relies on the parent hci_conn object to be locked. This itself relies
1616 * on the hci_dev object to be locked. So we must lock the hci device
1621 if (user
->list
.next
|| user
->list
.prev
) {
1626 /* conn->hchan is NULL after l2cap_conn_del() was called */
1632 ret
= user
->probe(conn
, user
);
1636 list_add(&user
->list
, &conn
->users
);
1640 hci_dev_unlock(hdev
);
1643 EXPORT_SYMBOL(l2cap_register_user
);
1645 void l2cap_unregister_user(struct l2cap_conn
*conn
, struct l2cap_user
*user
)
1647 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
1651 if (!user
->list
.next
|| !user
->list
.prev
)
1654 list_del(&user
->list
);
1655 user
->list
.next
= NULL
;
1656 user
->list
.prev
= NULL
;
1657 user
->remove(conn
, user
);
1660 hci_dev_unlock(hdev
);
1662 EXPORT_SYMBOL(l2cap_unregister_user
);
1664 static void l2cap_unregister_all_users(struct l2cap_conn
*conn
)
1666 struct l2cap_user
*user
;
1668 while (!list_empty(&conn
->users
)) {
1669 user
= list_first_entry(&conn
->users
, struct l2cap_user
, list
);
1670 list_del(&user
->list
);
1671 user
->list
.next
= NULL
;
1672 user
->list
.prev
= NULL
;
1673 user
->remove(conn
, user
);
1677 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
1679 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1680 struct l2cap_chan
*chan
, *l
;
1685 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
1687 kfree_skb(conn
->rx_skb
);
1689 skb_queue_purge(&conn
->pending_rx
);
1691 /* We can not call flush_work(&conn->pending_rx_work) here since we
1692 * might block if we are running on a worker from the same workqueue
1693 * pending_rx_work is waiting on.
1695 if (work_pending(&conn
->pending_rx_work
))
1696 cancel_work_sync(&conn
->pending_rx_work
);
1698 l2cap_unregister_all_users(conn
);
1700 mutex_lock(&conn
->chan_lock
);
1703 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
1704 l2cap_chan_hold(chan
);
1705 l2cap_chan_lock(chan
);
1707 l2cap_chan_del(chan
, err
);
1709 l2cap_chan_unlock(chan
);
1711 chan
->ops
->close(chan
);
1712 l2cap_chan_put(chan
);
1715 mutex_unlock(&conn
->chan_lock
);
1717 hci_chan_del(conn
->hchan
);
1719 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1720 cancel_delayed_work_sync(&conn
->info_timer
);
1722 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &hcon
->flags
)) {
1723 cancel_delayed_work_sync(&conn
->security_timer
);
1724 smp_chan_destroy(conn
);
1727 hcon
->l2cap_data
= NULL
;
1729 l2cap_conn_put(conn
);
1732 static void security_timeout(struct work_struct
*work
)
1734 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1735 security_timer
.work
);
1737 BT_DBG("conn %p", conn
);
1739 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &conn
->hcon
->flags
)) {
1740 smp_chan_destroy(conn
);
1741 l2cap_conn_del(conn
->hcon
, ETIMEDOUT
);
1745 static void l2cap_conn_free(struct kref
*ref
)
1747 struct l2cap_conn
*conn
= container_of(ref
, struct l2cap_conn
, ref
);
1749 hci_conn_put(conn
->hcon
);
1753 void l2cap_conn_get(struct l2cap_conn
*conn
)
1755 kref_get(&conn
->ref
);
1757 EXPORT_SYMBOL(l2cap_conn_get
);
1759 void l2cap_conn_put(struct l2cap_conn
*conn
)
1761 kref_put(&conn
->ref
, l2cap_conn_free
);
1763 EXPORT_SYMBOL(l2cap_conn_put
);
1765 /* ---- Socket interface ---- */
1767 /* Find socket with psm and source / destination bdaddr.
1768 * Returns closest match.
1770 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
,
1775 struct l2cap_chan
*c
, *c1
= NULL
;
1777 read_lock(&chan_list_lock
);
1779 list_for_each_entry(c
, &chan_list
, global_l
) {
1780 if (state
&& c
->state
!= state
)
1783 if (link_type
== ACL_LINK
&& c
->src_type
!= BDADDR_BREDR
)
1786 if (link_type
== LE_LINK
&& c
->src_type
== BDADDR_BREDR
)
1789 if (c
->psm
== psm
) {
1790 int src_match
, dst_match
;
1791 int src_any
, dst_any
;
1794 src_match
= !bacmp(&c
->src
, src
);
1795 dst_match
= !bacmp(&c
->dst
, dst
);
1796 if (src_match
&& dst_match
) {
1797 read_unlock(&chan_list_lock
);
1802 src_any
= !bacmp(&c
->src
, BDADDR_ANY
);
1803 dst_any
= !bacmp(&c
->dst
, BDADDR_ANY
);
1804 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1805 (src_any
&& dst_any
))
1810 read_unlock(&chan_list_lock
);
1815 static void l2cap_monitor_timeout(struct work_struct
*work
)
1817 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1818 monitor_timer
.work
);
1820 BT_DBG("chan %p", chan
);
1822 l2cap_chan_lock(chan
);
1825 l2cap_chan_unlock(chan
);
1826 l2cap_chan_put(chan
);
1830 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_MONITOR_TO
);
1832 l2cap_chan_unlock(chan
);
1833 l2cap_chan_put(chan
);
1836 static void l2cap_retrans_timeout(struct work_struct
*work
)
1838 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1839 retrans_timer
.work
);
1841 BT_DBG("chan %p", chan
);
1843 l2cap_chan_lock(chan
);
1846 l2cap_chan_unlock(chan
);
1847 l2cap_chan_put(chan
);
1851 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_RETRANS_TO
);
1852 l2cap_chan_unlock(chan
);
1853 l2cap_chan_put(chan
);
1856 static void l2cap_streaming_send(struct l2cap_chan
*chan
,
1857 struct sk_buff_head
*skbs
)
1859 struct sk_buff
*skb
;
1860 struct l2cap_ctrl
*control
;
1862 BT_DBG("chan %p, skbs %p", chan
, skbs
);
1864 if (__chan_is_moving(chan
))
1867 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
1869 while (!skb_queue_empty(&chan
->tx_q
)) {
1871 skb
= skb_dequeue(&chan
->tx_q
);
1873 bt_cb(skb
)->control
.retries
= 1;
1874 control
= &bt_cb(skb
)->control
;
1876 control
->reqseq
= 0;
1877 control
->txseq
= chan
->next_tx_seq
;
1879 __pack_control(chan
, control
, skb
);
1881 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1882 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1883 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1886 l2cap_do_send(chan
, skb
);
1888 BT_DBG("Sent txseq %u", control
->txseq
);
1890 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1891 chan
->frames_sent
++;
1895 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
1897 struct sk_buff
*skb
, *tx_skb
;
1898 struct l2cap_ctrl
*control
;
1901 BT_DBG("chan %p", chan
);
1903 if (chan
->state
!= BT_CONNECTED
)
1906 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1909 if (__chan_is_moving(chan
))
1912 while (chan
->tx_send_head
&&
1913 chan
->unacked_frames
< chan
->remote_tx_win
&&
1914 chan
->tx_state
== L2CAP_TX_STATE_XMIT
) {
1916 skb
= chan
->tx_send_head
;
1918 bt_cb(skb
)->control
.retries
= 1;
1919 control
= &bt_cb(skb
)->control
;
1921 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1924 control
->reqseq
= chan
->buffer_seq
;
1925 chan
->last_acked_seq
= chan
->buffer_seq
;
1926 control
->txseq
= chan
->next_tx_seq
;
1928 __pack_control(chan
, control
, skb
);
1930 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1931 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1932 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1935 /* Clone after data has been modified. Data is assumed to be
1936 read-only (for locking purposes) on cloned sk_buffs.
1938 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
1943 __set_retrans_timer(chan
);
1945 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1946 chan
->unacked_frames
++;
1947 chan
->frames_sent
++;
1950 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1951 chan
->tx_send_head
= NULL
;
1953 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1955 l2cap_do_send(chan
, tx_skb
);
1956 BT_DBG("Sent txseq %u", control
->txseq
);
1959 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent
,
1960 chan
->unacked_frames
, skb_queue_len(&chan
->tx_q
));
1965 static void l2cap_ertm_resend(struct l2cap_chan
*chan
)
1967 struct l2cap_ctrl control
;
1968 struct sk_buff
*skb
;
1969 struct sk_buff
*tx_skb
;
1972 BT_DBG("chan %p", chan
);
1974 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1977 if (__chan_is_moving(chan
))
1980 while (chan
->retrans_list
.head
!= L2CAP_SEQ_LIST_CLEAR
) {
1981 seq
= l2cap_seq_list_pop(&chan
->retrans_list
);
1983 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, seq
);
1985 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1990 bt_cb(skb
)->control
.retries
++;
1991 control
= bt_cb(skb
)->control
;
1993 if (chan
->max_tx
!= 0 &&
1994 bt_cb(skb
)->control
.retries
> chan
->max_tx
) {
1995 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
1996 l2cap_send_disconn_req(chan
, ECONNRESET
);
1997 l2cap_seq_list_clear(&chan
->retrans_list
);
2001 control
.reqseq
= chan
->buffer_seq
;
2002 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
2007 if (skb_cloned(skb
)) {
2008 /* Cloned sk_buffs are read-only, so we need a
2011 tx_skb
= skb_copy(skb
, GFP_KERNEL
);
2013 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
2017 l2cap_seq_list_clear(&chan
->retrans_list
);
2021 /* Update skb contents */
2022 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
2023 put_unaligned_le32(__pack_extended_control(&control
),
2024 tx_skb
->data
+ L2CAP_HDR_SIZE
);
2026 put_unaligned_le16(__pack_enhanced_control(&control
),
2027 tx_skb
->data
+ L2CAP_HDR_SIZE
);
2030 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
2031 u16 fcs
= crc16(0, (u8
*) tx_skb
->data
, tx_skb
->len
);
2032 put_unaligned_le16(fcs
, skb_put(tx_skb
,
2036 l2cap_do_send(chan
, tx_skb
);
2038 BT_DBG("Resent txseq %d", control
.txseq
);
2040 chan
->last_acked_seq
= chan
->buffer_seq
;
2044 static void l2cap_retransmit(struct l2cap_chan
*chan
,
2045 struct l2cap_ctrl
*control
)
2047 BT_DBG("chan %p, control %p", chan
, control
);
2049 l2cap_seq_list_append(&chan
->retrans_list
, control
->reqseq
);
2050 l2cap_ertm_resend(chan
);
2053 static void l2cap_retransmit_all(struct l2cap_chan
*chan
,
2054 struct l2cap_ctrl
*control
)
2056 struct sk_buff
*skb
;
2058 BT_DBG("chan %p, control %p", chan
, control
);
2061 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
2063 l2cap_seq_list_clear(&chan
->retrans_list
);
2065 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
2068 if (chan
->unacked_frames
) {
2069 skb_queue_walk(&chan
->tx_q
, skb
) {
2070 if (bt_cb(skb
)->control
.txseq
== control
->reqseq
||
2071 skb
== chan
->tx_send_head
)
2075 skb_queue_walk_from(&chan
->tx_q
, skb
) {
2076 if (skb
== chan
->tx_send_head
)
2079 l2cap_seq_list_append(&chan
->retrans_list
,
2080 bt_cb(skb
)->control
.txseq
);
2083 l2cap_ertm_resend(chan
);
2087 static void l2cap_send_ack(struct l2cap_chan
*chan
)
2089 struct l2cap_ctrl control
;
2090 u16 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
2091 chan
->last_acked_seq
);
2094 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2095 chan
, chan
->last_acked_seq
, chan
->buffer_seq
);
2097 memset(&control
, 0, sizeof(control
));
2100 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
2101 chan
->rx_state
== L2CAP_RX_STATE_RECV
) {
2102 __clear_ack_timer(chan
);
2103 control
.super
= L2CAP_SUPER_RNR
;
2104 control
.reqseq
= chan
->buffer_seq
;
2105 l2cap_send_sframe(chan
, &control
);
2107 if (!test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
)) {
2108 l2cap_ertm_send(chan
);
2109 /* If any i-frames were sent, they included an ack */
2110 if (chan
->buffer_seq
== chan
->last_acked_seq
)
2114 /* Ack now if the window is 3/4ths full.
2115 * Calculate without mul or div
2117 threshold
= chan
->ack_win
;
2118 threshold
+= threshold
<< 1;
2121 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack
,
2124 if (frames_to_ack
>= threshold
) {
2125 __clear_ack_timer(chan
);
2126 control
.super
= L2CAP_SUPER_RR
;
2127 control
.reqseq
= chan
->buffer_seq
;
2128 l2cap_send_sframe(chan
, &control
);
2133 __set_ack_timer(chan
);
2137 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan
*chan
,
2138 struct msghdr
*msg
, int len
,
2139 int count
, struct sk_buff
*skb
)
2141 struct l2cap_conn
*conn
= chan
->conn
;
2142 struct sk_buff
**frag
;
2145 if (chan
->ops
->memcpy_fromiovec(chan
, skb_put(skb
, count
),
2146 msg
->msg_iov
, count
))
2152 /* Continuation fragments (no L2CAP header) */
2153 frag
= &skb_shinfo(skb
)->frag_list
;
2155 struct sk_buff
*tmp
;
2157 count
= min_t(unsigned int, conn
->mtu
, len
);
2159 tmp
= chan
->ops
->alloc_skb(chan
, 0, count
,
2160 msg
->msg_flags
& MSG_DONTWAIT
);
2162 return PTR_ERR(tmp
);
2166 if (chan
->ops
->memcpy_fromiovec(chan
, skb_put(*frag
, count
),
2167 msg
->msg_iov
, count
))
2173 skb
->len
+= (*frag
)->len
;
2174 skb
->data_len
+= (*frag
)->len
;
2176 frag
= &(*frag
)->next
;
2182 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
,
2183 struct msghdr
*msg
, size_t len
)
2185 struct l2cap_conn
*conn
= chan
->conn
;
2186 struct sk_buff
*skb
;
2187 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ L2CAP_PSMLEN_SIZE
;
2188 struct l2cap_hdr
*lh
;
2190 BT_DBG("chan %p psm 0x%2.2x len %zu", chan
,
2191 __le16_to_cpu(chan
->psm
), len
);
2193 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2195 skb
= chan
->ops
->alloc_skb(chan
, hlen
, count
,
2196 msg
->msg_flags
& MSG_DONTWAIT
);
2200 /* Create L2CAP header */
2201 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2202 lh
->cid
= cpu_to_le16(chan
->dcid
);
2203 lh
->len
= cpu_to_le16(len
+ L2CAP_PSMLEN_SIZE
);
2204 put_unaligned(chan
->psm
, (__le16
*) skb_put(skb
, L2CAP_PSMLEN_SIZE
));
2206 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2207 if (unlikely(err
< 0)) {
2209 return ERR_PTR(err
);
2214 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
,
2215 struct msghdr
*msg
, size_t len
)
2217 struct l2cap_conn
*conn
= chan
->conn
;
2218 struct sk_buff
*skb
;
2220 struct l2cap_hdr
*lh
;
2222 BT_DBG("chan %p len %zu", chan
, len
);
2224 count
= min_t(unsigned int, (conn
->mtu
- L2CAP_HDR_SIZE
), len
);
2226 skb
= chan
->ops
->alloc_skb(chan
, L2CAP_HDR_SIZE
, count
,
2227 msg
->msg_flags
& MSG_DONTWAIT
);
2231 /* Create L2CAP header */
2232 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2233 lh
->cid
= cpu_to_le16(chan
->dcid
);
2234 lh
->len
= cpu_to_le16(len
);
2236 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2237 if (unlikely(err
< 0)) {
2239 return ERR_PTR(err
);
2244 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
2245 struct msghdr
*msg
, size_t len
,
2248 struct l2cap_conn
*conn
= chan
->conn
;
2249 struct sk_buff
*skb
;
2250 int err
, count
, hlen
;
2251 struct l2cap_hdr
*lh
;
2253 BT_DBG("chan %p len %zu", chan
, len
);
2256 return ERR_PTR(-ENOTCONN
);
2258 hlen
= __ertm_hdr_size(chan
);
2261 hlen
+= L2CAP_SDULEN_SIZE
;
2263 if (chan
->fcs
== L2CAP_FCS_CRC16
)
2264 hlen
+= L2CAP_FCS_SIZE
;
2266 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2268 skb
= chan
->ops
->alloc_skb(chan
, hlen
, count
,
2269 msg
->msg_flags
& MSG_DONTWAIT
);
2273 /* Create L2CAP header */
2274 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2275 lh
->cid
= cpu_to_le16(chan
->dcid
);
2276 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2278 /* Control header is populated later */
2279 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2280 put_unaligned_le32(0, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
2282 put_unaligned_le16(0, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
2285 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2287 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2288 if (unlikely(err
< 0)) {
2290 return ERR_PTR(err
);
2293 bt_cb(skb
)->control
.fcs
= chan
->fcs
;
2294 bt_cb(skb
)->control
.retries
= 0;
2298 static int l2cap_segment_sdu(struct l2cap_chan
*chan
,
2299 struct sk_buff_head
*seg_queue
,
2300 struct msghdr
*msg
, size_t len
)
2302 struct sk_buff
*skb
;
2307 BT_DBG("chan %p, msg %p, len %zu", chan
, msg
, len
);
2309 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2310 * so fragmented skbs are not used. The HCI layer's handling
2311 * of fragmented skbs is not compatible with ERTM's queueing.
2314 /* PDU size is derived from the HCI MTU */
2315 pdu_len
= chan
->conn
->mtu
;
2317 /* Constrain PDU size for BR/EDR connections */
2319 pdu_len
= min_t(size_t, pdu_len
, L2CAP_BREDR_MAX_PAYLOAD
);
2321 /* Adjust for largest possible L2CAP overhead. */
2323 pdu_len
-= L2CAP_FCS_SIZE
;
2325 pdu_len
-= __ertm_hdr_size(chan
);
2327 /* Remote device may have requested smaller PDUs */
2328 pdu_len
= min_t(size_t, pdu_len
, chan
->remote_mps
);
2330 if (len
<= pdu_len
) {
2331 sar
= L2CAP_SAR_UNSEGMENTED
;
2335 sar
= L2CAP_SAR_START
;
2337 pdu_len
-= L2CAP_SDULEN_SIZE
;
2341 skb
= l2cap_create_iframe_pdu(chan
, msg
, pdu_len
, sdu_len
);
2344 __skb_queue_purge(seg_queue
);
2345 return PTR_ERR(skb
);
2348 bt_cb(skb
)->control
.sar
= sar
;
2349 __skb_queue_tail(seg_queue
, skb
);
2354 pdu_len
+= L2CAP_SDULEN_SIZE
;
2357 if (len
<= pdu_len
) {
2358 sar
= L2CAP_SAR_END
;
2361 sar
= L2CAP_SAR_CONTINUE
;
2368 static struct sk_buff
*l2cap_create_le_flowctl_pdu(struct l2cap_chan
*chan
,
2370 size_t len
, u16 sdulen
)
2372 struct l2cap_conn
*conn
= chan
->conn
;
2373 struct sk_buff
*skb
;
2374 int err
, count
, hlen
;
2375 struct l2cap_hdr
*lh
;
2377 BT_DBG("chan %p len %zu", chan
, len
);
2380 return ERR_PTR(-ENOTCONN
);
2382 hlen
= L2CAP_HDR_SIZE
;
2385 hlen
+= L2CAP_SDULEN_SIZE
;
2387 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2389 skb
= chan
->ops
->alloc_skb(chan
, hlen
, count
,
2390 msg
->msg_flags
& MSG_DONTWAIT
);
2394 /* Create L2CAP header */
2395 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2396 lh
->cid
= cpu_to_le16(chan
->dcid
);
2397 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2400 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2402 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2403 if (unlikely(err
< 0)) {
2405 return ERR_PTR(err
);
2411 static int l2cap_segment_le_sdu(struct l2cap_chan
*chan
,
2412 struct sk_buff_head
*seg_queue
,
2413 struct msghdr
*msg
, size_t len
)
2415 struct sk_buff
*skb
;
2419 BT_DBG("chan %p, msg %p, len %zu", chan
, msg
, len
);
2421 pdu_len
= chan
->conn
->mtu
- L2CAP_HDR_SIZE
;
2423 pdu_len
= min_t(size_t, pdu_len
, chan
->remote_mps
);
2426 pdu_len
-= L2CAP_SDULEN_SIZE
;
2432 skb
= l2cap_create_le_flowctl_pdu(chan
, msg
, pdu_len
, sdu_len
);
2434 __skb_queue_purge(seg_queue
);
2435 return PTR_ERR(skb
);
2438 __skb_queue_tail(seg_queue
, skb
);
2444 pdu_len
+= L2CAP_SDULEN_SIZE
;
2451 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
2453 struct sk_buff
*skb
;
2455 struct sk_buff_head seg_queue
;
2460 /* Connectionless channel */
2461 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
2462 skb
= l2cap_create_connless_pdu(chan
, msg
, len
);
2464 return PTR_ERR(skb
);
2466 /* Channel lock is released before requesting new skb and then
2467 * reacquired thus we need to recheck channel state.
2469 if (chan
->state
!= BT_CONNECTED
) {
2474 l2cap_do_send(chan
, skb
);
2478 switch (chan
->mode
) {
2479 case L2CAP_MODE_LE_FLOWCTL
:
2480 /* Check outgoing MTU */
2481 if (len
> chan
->omtu
)
2484 if (!chan
->tx_credits
)
2487 __skb_queue_head_init(&seg_queue
);
2489 err
= l2cap_segment_le_sdu(chan
, &seg_queue
, msg
, len
);
2491 if (chan
->state
!= BT_CONNECTED
) {
2492 __skb_queue_purge(&seg_queue
);
2499 skb_queue_splice_tail_init(&seg_queue
, &chan
->tx_q
);
2501 while (chan
->tx_credits
&& !skb_queue_empty(&chan
->tx_q
)) {
2502 l2cap_do_send(chan
, skb_dequeue(&chan
->tx_q
));
2506 if (!chan
->tx_credits
)
2507 chan
->ops
->suspend(chan
);
2513 case L2CAP_MODE_BASIC
:
2514 /* Check outgoing MTU */
2515 if (len
> chan
->omtu
)
2518 /* Create a basic PDU */
2519 skb
= l2cap_create_basic_pdu(chan
, msg
, len
);
2521 return PTR_ERR(skb
);
2523 /* Channel lock is released before requesting new skb and then
2524 * reacquired thus we need to recheck channel state.
2526 if (chan
->state
!= BT_CONNECTED
) {
2531 l2cap_do_send(chan
, skb
);
2535 case L2CAP_MODE_ERTM
:
2536 case L2CAP_MODE_STREAMING
:
2537 /* Check outgoing MTU */
2538 if (len
> chan
->omtu
) {
2543 __skb_queue_head_init(&seg_queue
);
2545 /* Do segmentation before calling in to the state machine,
2546 * since it's possible to block while waiting for memory
2549 err
= l2cap_segment_sdu(chan
, &seg_queue
, msg
, len
);
2551 /* The channel could have been closed while segmenting,
2552 * check that it is still connected.
2554 if (chan
->state
!= BT_CONNECTED
) {
2555 __skb_queue_purge(&seg_queue
);
2562 if (chan
->mode
== L2CAP_MODE_ERTM
)
2563 l2cap_tx(chan
, NULL
, &seg_queue
, L2CAP_EV_DATA_REQUEST
);
2565 l2cap_streaming_send(chan
, &seg_queue
);
2569 /* If the skbs were not queued for sending, they'll still be in
2570 * seg_queue and need to be purged.
2572 __skb_queue_purge(&seg_queue
);
2576 BT_DBG("bad state %1.1x", chan
->mode
);
2582 EXPORT_SYMBOL_GPL(l2cap_chan_send
);
2584 static void l2cap_send_srej(struct l2cap_chan
*chan
, u16 txseq
)
2586 struct l2cap_ctrl control
;
2589 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2591 memset(&control
, 0, sizeof(control
));
2593 control
.super
= L2CAP_SUPER_SREJ
;
2595 for (seq
= chan
->expected_tx_seq
; seq
!= txseq
;
2596 seq
= __next_seq(chan
, seq
)) {
2597 if (!l2cap_ertm_seq_in_queue(&chan
->srej_q
, seq
)) {
2598 control
.reqseq
= seq
;
2599 l2cap_send_sframe(chan
, &control
);
2600 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2604 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
2607 static void l2cap_send_srej_tail(struct l2cap_chan
*chan
)
2609 struct l2cap_ctrl control
;
2611 BT_DBG("chan %p", chan
);
2613 if (chan
->srej_list
.tail
== L2CAP_SEQ_LIST_CLEAR
)
2616 memset(&control
, 0, sizeof(control
));
2618 control
.super
= L2CAP_SUPER_SREJ
;
2619 control
.reqseq
= chan
->srej_list
.tail
;
2620 l2cap_send_sframe(chan
, &control
);
2623 static void l2cap_send_srej_list(struct l2cap_chan
*chan
, u16 txseq
)
2625 struct l2cap_ctrl control
;
2629 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2631 memset(&control
, 0, sizeof(control
));
2633 control
.super
= L2CAP_SUPER_SREJ
;
2635 /* Capture initial list head to allow only one pass through the list. */
2636 initial_head
= chan
->srej_list
.head
;
2639 seq
= l2cap_seq_list_pop(&chan
->srej_list
);
2640 if (seq
== txseq
|| seq
== L2CAP_SEQ_LIST_CLEAR
)
2643 control
.reqseq
= seq
;
2644 l2cap_send_sframe(chan
, &control
);
2645 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2646 } while (chan
->srej_list
.head
!= initial_head
);
2649 static void l2cap_process_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
2651 struct sk_buff
*acked_skb
;
2654 BT_DBG("chan %p, reqseq %u", chan
, reqseq
);
2656 if (chan
->unacked_frames
== 0 || reqseq
== chan
->expected_ack_seq
)
2659 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2660 chan
->expected_ack_seq
, chan
->unacked_frames
);
2662 for (ackseq
= chan
->expected_ack_seq
; ackseq
!= reqseq
;
2663 ackseq
= __next_seq(chan
, ackseq
)) {
2665 acked_skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, ackseq
);
2667 skb_unlink(acked_skb
, &chan
->tx_q
);
2668 kfree_skb(acked_skb
);
2669 chan
->unacked_frames
--;
2673 chan
->expected_ack_seq
= reqseq
;
2675 if (chan
->unacked_frames
== 0)
2676 __clear_retrans_timer(chan
);
2678 BT_DBG("unacked_frames %u", chan
->unacked_frames
);
2681 static void l2cap_abort_rx_srej_sent(struct l2cap_chan
*chan
)
2683 BT_DBG("chan %p", chan
);
2685 chan
->expected_tx_seq
= chan
->buffer_seq
;
2686 l2cap_seq_list_clear(&chan
->srej_list
);
2687 skb_queue_purge(&chan
->srej_q
);
2688 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2691 static void l2cap_tx_state_xmit(struct l2cap_chan
*chan
,
2692 struct l2cap_ctrl
*control
,
2693 struct sk_buff_head
*skbs
, u8 event
)
2695 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2699 case L2CAP_EV_DATA_REQUEST
:
2700 if (chan
->tx_send_head
== NULL
)
2701 chan
->tx_send_head
= skb_peek(skbs
);
2703 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2704 l2cap_ertm_send(chan
);
2706 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2707 BT_DBG("Enter LOCAL_BUSY");
2708 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2710 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2711 /* The SREJ_SENT state must be aborted if we are to
2712 * enter the LOCAL_BUSY state.
2714 l2cap_abort_rx_srej_sent(chan
);
2717 l2cap_send_ack(chan
);
2720 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2721 BT_DBG("Exit LOCAL_BUSY");
2722 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2724 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2725 struct l2cap_ctrl local_control
;
2727 memset(&local_control
, 0, sizeof(local_control
));
2728 local_control
.sframe
= 1;
2729 local_control
.super
= L2CAP_SUPER_RR
;
2730 local_control
.poll
= 1;
2731 local_control
.reqseq
= chan
->buffer_seq
;
2732 l2cap_send_sframe(chan
, &local_control
);
2734 chan
->retry_count
= 1;
2735 __set_monitor_timer(chan
);
2736 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2739 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2740 l2cap_process_reqseq(chan
, control
->reqseq
);
2742 case L2CAP_EV_EXPLICIT_POLL
:
2743 l2cap_send_rr_or_rnr(chan
, 1);
2744 chan
->retry_count
= 1;
2745 __set_monitor_timer(chan
);
2746 __clear_ack_timer(chan
);
2747 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2749 case L2CAP_EV_RETRANS_TO
:
2750 l2cap_send_rr_or_rnr(chan
, 1);
2751 chan
->retry_count
= 1;
2752 __set_monitor_timer(chan
);
2753 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2755 case L2CAP_EV_RECV_FBIT
:
2756 /* Nothing to process */
2763 static void l2cap_tx_state_wait_f(struct l2cap_chan
*chan
,
2764 struct l2cap_ctrl
*control
,
2765 struct sk_buff_head
*skbs
, u8 event
)
2767 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2771 case L2CAP_EV_DATA_REQUEST
:
2772 if (chan
->tx_send_head
== NULL
)
2773 chan
->tx_send_head
= skb_peek(skbs
);
2774 /* Queue data, but don't send. */
2775 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2777 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2778 BT_DBG("Enter LOCAL_BUSY");
2779 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2781 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2782 /* The SREJ_SENT state must be aborted if we are to
2783 * enter the LOCAL_BUSY state.
2785 l2cap_abort_rx_srej_sent(chan
);
2788 l2cap_send_ack(chan
);
2791 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2792 BT_DBG("Exit LOCAL_BUSY");
2793 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2795 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2796 struct l2cap_ctrl local_control
;
2797 memset(&local_control
, 0, sizeof(local_control
));
2798 local_control
.sframe
= 1;
2799 local_control
.super
= L2CAP_SUPER_RR
;
2800 local_control
.poll
= 1;
2801 local_control
.reqseq
= chan
->buffer_seq
;
2802 l2cap_send_sframe(chan
, &local_control
);
2804 chan
->retry_count
= 1;
2805 __set_monitor_timer(chan
);
2806 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2809 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2810 l2cap_process_reqseq(chan
, control
->reqseq
);
2814 case L2CAP_EV_RECV_FBIT
:
2815 if (control
&& control
->final
) {
2816 __clear_monitor_timer(chan
);
2817 if (chan
->unacked_frames
> 0)
2818 __set_retrans_timer(chan
);
2819 chan
->retry_count
= 0;
2820 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2821 BT_DBG("recv fbit tx_state 0x2.2%x", chan
->tx_state
);
2824 case L2CAP_EV_EXPLICIT_POLL
:
2827 case L2CAP_EV_MONITOR_TO
:
2828 if (chan
->max_tx
== 0 || chan
->retry_count
< chan
->max_tx
) {
2829 l2cap_send_rr_or_rnr(chan
, 1);
2830 __set_monitor_timer(chan
);
2831 chan
->retry_count
++;
2833 l2cap_send_disconn_req(chan
, ECONNABORTED
);
2841 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
2842 struct sk_buff_head
*skbs
, u8 event
)
2844 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2845 chan
, control
, skbs
, event
, chan
->tx_state
);
2847 switch (chan
->tx_state
) {
2848 case L2CAP_TX_STATE_XMIT
:
2849 l2cap_tx_state_xmit(chan
, control
, skbs
, event
);
2851 case L2CAP_TX_STATE_WAIT_F
:
2852 l2cap_tx_state_wait_f(chan
, control
, skbs
, event
);
2860 static void l2cap_pass_to_tx(struct l2cap_chan
*chan
,
2861 struct l2cap_ctrl
*control
)
2863 BT_DBG("chan %p, control %p", chan
, control
);
2864 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_REQSEQ_AND_FBIT
);
2867 static void l2cap_pass_to_tx_fbit(struct l2cap_chan
*chan
,
2868 struct l2cap_ctrl
*control
)
2870 BT_DBG("chan %p, control %p", chan
, control
);
2871 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_FBIT
);
2874 /* Copy frame to all raw sockets on that connection */
2875 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2877 struct sk_buff
*nskb
;
2878 struct l2cap_chan
*chan
;
2880 BT_DBG("conn %p", conn
);
2882 mutex_lock(&conn
->chan_lock
);
2884 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
2885 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
2888 /* Don't send frame to the channel it came from */
2889 if (bt_cb(skb
)->chan
== chan
)
2892 nskb
= skb_clone(skb
, GFP_KERNEL
);
2895 if (chan
->ops
->recv(chan
, nskb
))
2899 mutex_unlock(&conn
->chan_lock
);
2902 /* ---- L2CAP signalling commands ---- */
2903 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
, u8 code
,
2904 u8 ident
, u16 dlen
, void *data
)
2906 struct sk_buff
*skb
, **frag
;
2907 struct l2cap_cmd_hdr
*cmd
;
2908 struct l2cap_hdr
*lh
;
2911 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2912 conn
, code
, ident
, dlen
);
2914 if (conn
->mtu
< L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
)
2917 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2918 count
= min_t(unsigned int, conn
->mtu
, len
);
2920 skb
= bt_skb_alloc(count
, GFP_KERNEL
);
2924 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2925 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2927 if (conn
->hcon
->type
== LE_LINK
)
2928 lh
->cid
= cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
2930 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
2932 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2935 cmd
->len
= cpu_to_le16(dlen
);
2938 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2939 memcpy(skb_put(skb
, count
), data
, count
);
2945 /* Continuation fragments (no L2CAP header) */
2946 frag
= &skb_shinfo(skb
)->frag_list
;
2948 count
= min_t(unsigned int, conn
->mtu
, len
);
2950 *frag
= bt_skb_alloc(count
, GFP_KERNEL
);
2954 memcpy(skb_put(*frag
, count
), data
, count
);
2959 frag
= &(*frag
)->next
;
2969 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
,
2972 struct l2cap_conf_opt
*opt
= *ptr
;
2975 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2983 *val
= *((u8
*) opt
->val
);
2987 *val
= get_unaligned_le16(opt
->val
);
2991 *val
= get_unaligned_le32(opt
->val
);
2995 *val
= (unsigned long) opt
->val
;
2999 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type
, opt
->len
, *val
);
3003 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
3005 struct l2cap_conf_opt
*opt
= *ptr
;
3007 BT_DBG("type 0x%2.2x len %u val 0x%lx", type
, len
, val
);
3014 *((u8
*) opt
->val
) = val
;
3018 put_unaligned_le16(val
, opt
->val
);
3022 put_unaligned_le32(val
, opt
->val
);
3026 memcpy(opt
->val
, (void *) val
, len
);
3030 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
3033 static void l2cap_add_opt_efs(void **ptr
, struct l2cap_chan
*chan
)
3035 struct l2cap_conf_efs efs
;
3037 switch (chan
->mode
) {
3038 case L2CAP_MODE_ERTM
:
3039 efs
.id
= chan
->local_id
;
3040 efs
.stype
= chan
->local_stype
;
3041 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
3042 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
3043 efs
.acc_lat
= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT
);
3044 efs
.flush_to
= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO
);
3047 case L2CAP_MODE_STREAMING
:
3049 efs
.stype
= L2CAP_SERV_BESTEFFORT
;
3050 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
3051 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
3060 l2cap_add_conf_opt(ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
3061 (unsigned long) &efs
);
3064 static void l2cap_ack_timeout(struct work_struct
*work
)
3066 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
3070 BT_DBG("chan %p", chan
);
3072 l2cap_chan_lock(chan
);
3074 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
3075 chan
->last_acked_seq
);
3078 l2cap_send_rr_or_rnr(chan
, 0);
3080 l2cap_chan_unlock(chan
);
3081 l2cap_chan_put(chan
);
3084 int l2cap_ertm_init(struct l2cap_chan
*chan
)
3088 chan
->next_tx_seq
= 0;
3089 chan
->expected_tx_seq
= 0;
3090 chan
->expected_ack_seq
= 0;
3091 chan
->unacked_frames
= 0;
3092 chan
->buffer_seq
= 0;
3093 chan
->frames_sent
= 0;
3094 chan
->last_acked_seq
= 0;
3096 chan
->sdu_last_frag
= NULL
;
3099 skb_queue_head_init(&chan
->tx_q
);
3101 chan
->local_amp_id
= AMP_ID_BREDR
;
3102 chan
->move_id
= AMP_ID_BREDR
;
3103 chan
->move_state
= L2CAP_MOVE_STABLE
;
3104 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
3106 if (chan
->mode
!= L2CAP_MODE_ERTM
)
3109 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
3110 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
3112 INIT_DELAYED_WORK(&chan
->retrans_timer
, l2cap_retrans_timeout
);
3113 INIT_DELAYED_WORK(&chan
->monitor_timer
, l2cap_monitor_timeout
);
3114 INIT_DELAYED_WORK(&chan
->ack_timer
, l2cap_ack_timeout
);
3116 skb_queue_head_init(&chan
->srej_q
);
3118 err
= l2cap_seq_list_init(&chan
->srej_list
, chan
->tx_win
);
3122 err
= l2cap_seq_list_init(&chan
->retrans_list
, chan
->remote_tx_win
);
3124 l2cap_seq_list_free(&chan
->srej_list
);
3129 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
3132 case L2CAP_MODE_STREAMING
:
3133 case L2CAP_MODE_ERTM
:
3134 if (l2cap_mode_supported(mode
, remote_feat_mask
))
3138 return L2CAP_MODE_BASIC
;
3142 static inline bool __l2cap_ews_supported(struct l2cap_conn
*conn
)
3144 return conn
->hs_enabled
&& conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
;
3147 static inline bool __l2cap_efs_supported(struct l2cap_conn
*conn
)
3149 return conn
->hs_enabled
&& conn
->feat_mask
& L2CAP_FEAT_EXT_FLOW
;
3152 static void __l2cap_set_ertm_timeouts(struct l2cap_chan
*chan
,
3153 struct l2cap_conf_rfc
*rfc
)
3155 if (chan
->local_amp_id
!= AMP_ID_BREDR
&& chan
->hs_hcon
) {
3156 u64 ertm_to
= chan
->hs_hcon
->hdev
->amp_be_flush_to
;
3158 /* Class 1 devices have must have ERTM timeouts
3159 * exceeding the Link Supervision Timeout. The
3160 * default Link Supervision Timeout for AMP
3161 * controllers is 10 seconds.
3163 * Class 1 devices use 0xffffffff for their
3164 * best-effort flush timeout, so the clamping logic
3165 * will result in a timeout that meets the above
3166 * requirement. ERTM timeouts are 16-bit values, so
3167 * the maximum timeout is 65.535 seconds.
3170 /* Convert timeout to milliseconds and round */
3171 ertm_to
= DIV_ROUND_UP_ULL(ertm_to
, 1000);
3173 /* This is the recommended formula for class 2 devices
3174 * that start ERTM timers when packets are sent to the
3177 ertm_to
= 3 * ertm_to
+ 500;
3179 if (ertm_to
> 0xffff)
3182 rfc
->retrans_timeout
= cpu_to_le16((u16
) ertm_to
);
3183 rfc
->monitor_timeout
= rfc
->retrans_timeout
;
3185 rfc
->retrans_timeout
= cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
3186 rfc
->monitor_timeout
= cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
3190 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
3192 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
3193 __l2cap_ews_supported(chan
->conn
)) {
3194 /* use extended control field */
3195 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3196 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3198 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
3199 L2CAP_DEFAULT_TX_WINDOW
);
3200 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
3202 chan
->ack_win
= chan
->tx_win
;
3205 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
3207 struct l2cap_conf_req
*req
= data
;
3208 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
3209 void *ptr
= req
->data
;
3212 BT_DBG("chan %p", chan
);
3214 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
3217 switch (chan
->mode
) {
3218 case L2CAP_MODE_STREAMING
:
3219 case L2CAP_MODE_ERTM
:
3220 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
3223 if (__l2cap_efs_supported(chan
->conn
))
3224 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3228 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
3233 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
3234 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3236 switch (chan
->mode
) {
3237 case L2CAP_MODE_BASIC
:
3241 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
3242 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
3245 rfc
.mode
= L2CAP_MODE_BASIC
;
3247 rfc
.max_transmit
= 0;
3248 rfc
.retrans_timeout
= 0;
3249 rfc
.monitor_timeout
= 0;
3250 rfc
.max_pdu_size
= 0;
3252 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3253 (unsigned long) &rfc
);
3256 case L2CAP_MODE_ERTM
:
3257 rfc
.mode
= L2CAP_MODE_ERTM
;
3258 rfc
.max_transmit
= chan
->max_tx
;
3260 __l2cap_set_ertm_timeouts(chan
, &rfc
);
3262 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
3263 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
3265 rfc
.max_pdu_size
= cpu_to_le16(size
);
3267 l2cap_txwin_setup(chan
);
3269 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
3270 L2CAP_DEFAULT_TX_WINDOW
);
3272 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3273 (unsigned long) &rfc
);
3275 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
3276 l2cap_add_opt_efs(&ptr
, chan
);
3278 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3279 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3282 if (chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
)
3283 if (chan
->fcs
== L2CAP_FCS_NONE
||
3284 test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
)) {
3285 chan
->fcs
= L2CAP_FCS_NONE
;
3286 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1,
3291 case L2CAP_MODE_STREAMING
:
3292 l2cap_txwin_setup(chan
);
3293 rfc
.mode
= L2CAP_MODE_STREAMING
;
3295 rfc
.max_transmit
= 0;
3296 rfc
.retrans_timeout
= 0;
3297 rfc
.monitor_timeout
= 0;
3299 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
3300 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
3302 rfc
.max_pdu_size
= cpu_to_le16(size
);
3304 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3305 (unsigned long) &rfc
);
3307 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
3308 l2cap_add_opt_efs(&ptr
, chan
);
3310 if (chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
)
3311 if (chan
->fcs
== L2CAP_FCS_NONE
||
3312 test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
)) {
3313 chan
->fcs
= L2CAP_FCS_NONE
;
3314 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1,
3320 req
->dcid
= cpu_to_le16(chan
->dcid
);
3321 req
->flags
= cpu_to_le16(0);
3326 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
3328 struct l2cap_conf_rsp
*rsp
= data
;
3329 void *ptr
= rsp
->data
;
3330 void *req
= chan
->conf_req
;
3331 int len
= chan
->conf_len
;
3332 int type
, hint
, olen
;
3334 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3335 struct l2cap_conf_efs efs
;
3337 u16 mtu
= L2CAP_DEFAULT_MTU
;
3338 u16 result
= L2CAP_CONF_SUCCESS
;
3341 BT_DBG("chan %p", chan
);
3343 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3344 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
3346 hint
= type
& L2CAP_CONF_HINT
;
3347 type
&= L2CAP_CONF_MASK
;
3350 case L2CAP_CONF_MTU
:
3354 case L2CAP_CONF_FLUSH_TO
:
3355 chan
->flush_to
= val
;
3358 case L2CAP_CONF_QOS
:
3361 case L2CAP_CONF_RFC
:
3362 if (olen
== sizeof(rfc
))
3363 memcpy(&rfc
, (void *) val
, olen
);
3366 case L2CAP_CONF_FCS
:
3367 if (val
== L2CAP_FCS_NONE
)
3368 set_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
);
3371 case L2CAP_CONF_EFS
:
3373 if (olen
== sizeof(efs
))
3374 memcpy(&efs
, (void *) val
, olen
);
3377 case L2CAP_CONF_EWS
:
3378 if (!chan
->conn
->hs_enabled
)
3379 return -ECONNREFUSED
;
3381 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3382 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
3383 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3384 chan
->remote_tx_win
= val
;
3391 result
= L2CAP_CONF_UNKNOWN
;
3392 *((u8
*) ptr
++) = type
;
3397 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
3400 switch (chan
->mode
) {
3401 case L2CAP_MODE_STREAMING
:
3402 case L2CAP_MODE_ERTM
:
3403 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
3404 chan
->mode
= l2cap_select_mode(rfc
.mode
,
3405 chan
->conn
->feat_mask
);
3410 if (__l2cap_efs_supported(chan
->conn
))
3411 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3413 return -ECONNREFUSED
;
3416 if (chan
->mode
!= rfc
.mode
)
3417 return -ECONNREFUSED
;
3423 if (chan
->mode
!= rfc
.mode
) {
3424 result
= L2CAP_CONF_UNACCEPT
;
3425 rfc
.mode
= chan
->mode
;
3427 if (chan
->num_conf_rsp
== 1)
3428 return -ECONNREFUSED
;
3430 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3431 (unsigned long) &rfc
);
3434 if (result
== L2CAP_CONF_SUCCESS
) {
3435 /* Configure output options and let the other side know
3436 * which ones we don't like. */
3438 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
3439 result
= L2CAP_CONF_UNACCEPT
;
3442 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
3444 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
3447 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3448 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3449 efs
.stype
!= chan
->local_stype
) {
3451 result
= L2CAP_CONF_UNACCEPT
;
3453 if (chan
->num_conf_req
>= 1)
3454 return -ECONNREFUSED
;
3456 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3458 (unsigned long) &efs
);
3460 /* Send PENDING Conf Rsp */
3461 result
= L2CAP_CONF_PENDING
;
3462 set_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3467 case L2CAP_MODE_BASIC
:
3468 chan
->fcs
= L2CAP_FCS_NONE
;
3469 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3472 case L2CAP_MODE_ERTM
:
3473 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
3474 chan
->remote_tx_win
= rfc
.txwin_size
;
3476 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
3478 chan
->remote_max_tx
= rfc
.max_transmit
;
3480 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3481 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3482 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3483 rfc
.max_pdu_size
= cpu_to_le16(size
);
3484 chan
->remote_mps
= size
;
3486 __l2cap_set_ertm_timeouts(chan
, &rfc
);
3488 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3490 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3491 sizeof(rfc
), (unsigned long) &rfc
);
3493 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3494 chan
->remote_id
= efs
.id
;
3495 chan
->remote_stype
= efs
.stype
;
3496 chan
->remote_msdu
= le16_to_cpu(efs
.msdu
);
3497 chan
->remote_flush_to
=
3498 le32_to_cpu(efs
.flush_to
);
3499 chan
->remote_acc_lat
=
3500 le32_to_cpu(efs
.acc_lat
);
3501 chan
->remote_sdu_itime
=
3502 le32_to_cpu(efs
.sdu_itime
);
3503 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3505 (unsigned long) &efs
);
3509 case L2CAP_MODE_STREAMING
:
3510 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3511 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3512 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3513 rfc
.max_pdu_size
= cpu_to_le16(size
);
3514 chan
->remote_mps
= size
;
3516 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3518 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3519 (unsigned long) &rfc
);
3524 result
= L2CAP_CONF_UNACCEPT
;
3526 memset(&rfc
, 0, sizeof(rfc
));
3527 rfc
.mode
= chan
->mode
;
3530 if (result
== L2CAP_CONF_SUCCESS
)
3531 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3533 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3534 rsp
->result
= cpu_to_le16(result
);
3535 rsp
->flags
= cpu_to_le16(0);
3540 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
,
3541 void *data
, u16
*result
)
3543 struct l2cap_conf_req
*req
= data
;
3544 void *ptr
= req
->data
;
3547 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3548 struct l2cap_conf_efs efs
;
3550 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
3552 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3553 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3556 case L2CAP_CONF_MTU
:
3557 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
3558 *result
= L2CAP_CONF_UNACCEPT
;
3559 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
3562 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3565 case L2CAP_CONF_FLUSH_TO
:
3566 chan
->flush_to
= val
;
3567 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
3571 case L2CAP_CONF_RFC
:
3572 if (olen
== sizeof(rfc
))
3573 memcpy(&rfc
, (void *)val
, olen
);
3575 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
3576 rfc
.mode
!= chan
->mode
)
3577 return -ECONNREFUSED
;
3581 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3582 sizeof(rfc
), (unsigned long) &rfc
);
3585 case L2CAP_CONF_EWS
:
3586 chan
->ack_win
= min_t(u16
, val
, chan
->ack_win
);
3587 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3591 case L2CAP_CONF_EFS
:
3592 if (olen
== sizeof(efs
))
3593 memcpy(&efs
, (void *)val
, olen
);
3595 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3596 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3597 efs
.stype
!= chan
->local_stype
)
3598 return -ECONNREFUSED
;
3600 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
3601 (unsigned long) &efs
);
3604 case L2CAP_CONF_FCS
:
3605 if (*result
== L2CAP_CONF_PENDING
)
3606 if (val
== L2CAP_FCS_NONE
)
3607 set_bit(CONF_RECV_NO_FCS
,
3613 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
3614 return -ECONNREFUSED
;
3616 chan
->mode
= rfc
.mode
;
3618 if (*result
== L2CAP_CONF_SUCCESS
|| *result
== L2CAP_CONF_PENDING
) {
3620 case L2CAP_MODE_ERTM
:
3621 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3622 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3623 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3624 if (!test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3625 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3628 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3629 chan
->local_msdu
= le16_to_cpu(efs
.msdu
);
3630 chan
->local_sdu_itime
=
3631 le32_to_cpu(efs
.sdu_itime
);
3632 chan
->local_acc_lat
= le32_to_cpu(efs
.acc_lat
);
3633 chan
->local_flush_to
=
3634 le32_to_cpu(efs
.flush_to
);
3638 case L2CAP_MODE_STREAMING
:
3639 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3643 req
->dcid
= cpu_to_le16(chan
->dcid
);
3644 req
->flags
= cpu_to_le16(0);
3649 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
,
3650 u16 result
, u16 flags
)
3652 struct l2cap_conf_rsp
*rsp
= data
;
3653 void *ptr
= rsp
->data
;
3655 BT_DBG("chan %p", chan
);
3657 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3658 rsp
->result
= cpu_to_le16(result
);
3659 rsp
->flags
= cpu_to_le16(flags
);
3664 void __l2cap_le_connect_rsp_defer(struct l2cap_chan
*chan
)
3666 struct l2cap_le_conn_rsp rsp
;
3667 struct l2cap_conn
*conn
= chan
->conn
;
3669 BT_DBG("chan %p", chan
);
3671 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3672 rsp
.mtu
= cpu_to_le16(chan
->imtu
);
3673 rsp
.mps
= cpu_to_le16(chan
->mps
);
3674 rsp
.credits
= cpu_to_le16(chan
->rx_credits
);
3675 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
3677 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CONN_RSP
, sizeof(rsp
),
3681 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
3683 struct l2cap_conn_rsp rsp
;
3684 struct l2cap_conn
*conn
= chan
->conn
;
3688 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3689 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3690 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
3691 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
3694 rsp_code
= L2CAP_CREATE_CHAN_RSP
;
3696 rsp_code
= L2CAP_CONN_RSP
;
3698 BT_DBG("chan %p rsp_code %u", chan
, rsp_code
);
3700 l2cap_send_cmd(conn
, chan
->ident
, rsp_code
, sizeof(rsp
), &rsp
);
3702 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3705 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3706 l2cap_build_conf_req(chan
, buf
), buf
);
3707 chan
->num_conf_req
++;
3710 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
3714 /* Use sane default values in case a misbehaving remote device
3715 * did not send an RFC or extended window size option.
3717 u16 txwin_ext
= chan
->ack_win
;
3718 struct l2cap_conf_rfc rfc
= {
3720 .retrans_timeout
= cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
),
3721 .monitor_timeout
= cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
),
3722 .max_pdu_size
= cpu_to_le16(chan
->imtu
),
3723 .txwin_size
= min_t(u16
, chan
->ack_win
, L2CAP_DEFAULT_TX_WINDOW
),
3726 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
3728 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
3731 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3732 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3735 case L2CAP_CONF_RFC
:
3736 if (olen
== sizeof(rfc
))
3737 memcpy(&rfc
, (void *)val
, olen
);
3739 case L2CAP_CONF_EWS
:
3746 case L2CAP_MODE_ERTM
:
3747 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3748 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3749 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3750 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3751 chan
->ack_win
= min_t(u16
, chan
->ack_win
, txwin_ext
);
3753 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3756 case L2CAP_MODE_STREAMING
:
3757 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3761 static inline int l2cap_command_rej(struct l2cap_conn
*conn
,
3762 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3765 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
3767 if (cmd_len
< sizeof(*rej
))
3770 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
3773 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
3774 cmd
->ident
== conn
->info_ident
) {
3775 cancel_delayed_work(&conn
->info_timer
);
3777 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3778 conn
->info_ident
= 0;
3780 l2cap_conn_start(conn
);
3786 static struct l2cap_chan
*l2cap_connect(struct l2cap_conn
*conn
,
3787 struct l2cap_cmd_hdr
*cmd
,
3788 u8
*data
, u8 rsp_code
, u8 amp_id
)
3790 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
3791 struct l2cap_conn_rsp rsp
;
3792 struct l2cap_chan
*chan
= NULL
, *pchan
;
3793 int result
, status
= L2CAP_CS_NO_INFO
;
3795 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
3796 __le16 psm
= req
->psm
;
3798 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm
), scid
);
3800 /* Check if we have socket listening on psm */
3801 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, &conn
->hcon
->src
,
3802 &conn
->hcon
->dst
, ACL_LINK
);
3804 result
= L2CAP_CR_BAD_PSM
;
3808 mutex_lock(&conn
->chan_lock
);
3809 l2cap_chan_lock(pchan
);
3811 /* Check if the ACL is secure enough (if not SDP) */
3812 if (psm
!= cpu_to_le16(L2CAP_PSM_SDP
) &&
3813 !hci_conn_check_link_mode(conn
->hcon
)) {
3814 conn
->disc_reason
= HCI_ERROR_AUTH_FAILURE
;
3815 result
= L2CAP_CR_SEC_BLOCK
;
3819 result
= L2CAP_CR_NO_MEM
;
3821 /* Check if we already have channel with that dcid */
3822 if (__l2cap_get_chan_by_dcid(conn
, scid
))
3825 chan
= pchan
->ops
->new_connection(pchan
);
3829 /* For certain devices (ex: HID mouse), support for authentication,
3830 * pairing and bonding is optional. For such devices, inorder to avoid
3831 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3832 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3834 conn
->hcon
->disc_timeout
= HCI_DISCONN_TIMEOUT
;
3836 bacpy(&chan
->src
, &conn
->hcon
->src
);
3837 bacpy(&chan
->dst
, &conn
->hcon
->dst
);
3838 chan
->src_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->src_type
);
3839 chan
->dst_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->dst_type
);
3842 chan
->local_amp_id
= amp_id
;
3844 __l2cap_chan_add(conn
, chan
);
3848 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
3850 chan
->ident
= cmd
->ident
;
3852 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
3853 if (l2cap_chan_check_security(chan
, false)) {
3854 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
3855 l2cap_state_change(chan
, BT_CONNECT2
);
3856 result
= L2CAP_CR_PEND
;
3857 status
= L2CAP_CS_AUTHOR_PEND
;
3858 chan
->ops
->defer(chan
);
3860 /* Force pending result for AMP controllers.
3861 * The connection will succeed after the
3862 * physical link is up.
3864 if (amp_id
== AMP_ID_BREDR
) {
3865 l2cap_state_change(chan
, BT_CONFIG
);
3866 result
= L2CAP_CR_SUCCESS
;
3868 l2cap_state_change(chan
, BT_CONNECT2
);
3869 result
= L2CAP_CR_PEND
;
3871 status
= L2CAP_CS_NO_INFO
;
3874 l2cap_state_change(chan
, BT_CONNECT2
);
3875 result
= L2CAP_CR_PEND
;
3876 status
= L2CAP_CS_AUTHEN_PEND
;
3879 l2cap_state_change(chan
, BT_CONNECT2
);
3880 result
= L2CAP_CR_PEND
;
3881 status
= L2CAP_CS_NO_INFO
;
3885 l2cap_chan_unlock(pchan
);
3886 mutex_unlock(&conn
->chan_lock
);
3889 rsp
.scid
= cpu_to_le16(scid
);
3890 rsp
.dcid
= cpu_to_le16(dcid
);
3891 rsp
.result
= cpu_to_le16(result
);
3892 rsp
.status
= cpu_to_le16(status
);
3893 l2cap_send_cmd(conn
, cmd
->ident
, rsp_code
, sizeof(rsp
), &rsp
);
3895 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
3896 struct l2cap_info_req info
;
3897 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3899 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
3900 conn
->info_ident
= l2cap_get_ident(conn
);
3902 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
3904 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
3905 sizeof(info
), &info
);
3908 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
3909 result
== L2CAP_CR_SUCCESS
) {
3911 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
3912 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3913 l2cap_build_conf_req(chan
, buf
), buf
);
3914 chan
->num_conf_req
++;
3920 static int l2cap_connect_req(struct l2cap_conn
*conn
,
3921 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3923 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
3924 struct hci_conn
*hcon
= conn
->hcon
;
3926 if (cmd_len
< sizeof(struct l2cap_conn_req
))
3930 if (test_bit(HCI_MGMT
, &hdev
->dev_flags
) &&
3931 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED
, &hcon
->flags
))
3932 mgmt_device_connected(hdev
, &hcon
->dst
, hcon
->type
,
3933 hcon
->dst_type
, 0, NULL
, 0,
3935 hci_dev_unlock(hdev
);
3937 l2cap_connect(conn
, cmd
, data
, L2CAP_CONN_RSP
, 0);
3941 static int l2cap_connect_create_rsp(struct l2cap_conn
*conn
,
3942 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3945 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
3946 u16 scid
, dcid
, result
, status
;
3947 struct l2cap_chan
*chan
;
3951 if (cmd_len
< sizeof(*rsp
))
3954 scid
= __le16_to_cpu(rsp
->scid
);
3955 dcid
= __le16_to_cpu(rsp
->dcid
);
3956 result
= __le16_to_cpu(rsp
->result
);
3957 status
= __le16_to_cpu(rsp
->status
);
3959 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3960 dcid
, scid
, result
, status
);
3962 mutex_lock(&conn
->chan_lock
);
3965 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3971 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
3980 l2cap_chan_lock(chan
);
3983 case L2CAP_CR_SUCCESS
:
3984 l2cap_state_change(chan
, BT_CONFIG
);
3987 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3989 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3992 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3993 l2cap_build_conf_req(chan
, req
), req
);
3994 chan
->num_conf_req
++;
3998 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
4002 l2cap_chan_del(chan
, ECONNREFUSED
);
4006 l2cap_chan_unlock(chan
);
4009 mutex_unlock(&conn
->chan_lock
);
4014 static inline void set_default_fcs(struct l2cap_chan
*chan
)
4016 /* FCS is enabled only in ERTM or streaming mode, if one or both
4019 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
4020 chan
->fcs
= L2CAP_FCS_NONE
;
4021 else if (!test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
))
4022 chan
->fcs
= L2CAP_FCS_CRC16
;
4025 static void l2cap_send_efs_conf_rsp(struct l2cap_chan
*chan
, void *data
,
4026 u8 ident
, u16 flags
)
4028 struct l2cap_conn
*conn
= chan
->conn
;
4030 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn
, chan
, ident
,
4033 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
4034 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
4036 l2cap_send_cmd(conn
, ident
, L2CAP_CONF_RSP
,
4037 l2cap_build_conf_rsp(chan
, data
,
4038 L2CAP_CONF_SUCCESS
, flags
), data
);
4041 static void cmd_reject_invalid_cid(struct l2cap_conn
*conn
, u8 ident
,
4044 struct l2cap_cmd_rej_cid rej
;
4046 rej
.reason
= cpu_to_le16(L2CAP_REJ_INVALID_CID
);
4047 rej
.scid
= __cpu_to_le16(scid
);
4048 rej
.dcid
= __cpu_to_le16(dcid
);
4050 l2cap_send_cmd(conn
, ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
4053 static inline int l2cap_config_req(struct l2cap_conn
*conn
,
4054 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4057 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
4060 struct l2cap_chan
*chan
;
4063 if (cmd_len
< sizeof(*req
))
4066 dcid
= __le16_to_cpu(req
->dcid
);
4067 flags
= __le16_to_cpu(req
->flags
);
4069 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
4071 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
4073 cmd_reject_invalid_cid(conn
, cmd
->ident
, dcid
, 0);
4077 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
4078 cmd_reject_invalid_cid(conn
, cmd
->ident
, chan
->scid
,
4083 /* Reject if config buffer is too small. */
4084 len
= cmd_len
- sizeof(*req
);
4085 if (chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
4086 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
4087 l2cap_build_conf_rsp(chan
, rsp
,
4088 L2CAP_CONF_REJECT
, flags
), rsp
);
4093 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
4094 chan
->conf_len
+= len
;
4096 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
) {
4097 /* Incomplete config. Send empty response. */
4098 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
4099 l2cap_build_conf_rsp(chan
, rsp
,
4100 L2CAP_CONF_SUCCESS
, flags
), rsp
);
4104 /* Complete config. */
4105 len
= l2cap_parse_conf_req(chan
, rsp
);
4107 l2cap_send_disconn_req(chan
, ECONNRESET
);
4111 chan
->ident
= cmd
->ident
;
4112 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
4113 chan
->num_conf_rsp
++;
4115 /* Reset config buffer. */
4118 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
4121 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
4122 set_default_fcs(chan
);
4124 if (chan
->mode
== L2CAP_MODE_ERTM
||
4125 chan
->mode
== L2CAP_MODE_STREAMING
)
4126 err
= l2cap_ertm_init(chan
);
4129 l2cap_send_disconn_req(chan
, -err
);
4131 l2cap_chan_ready(chan
);
4136 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
4138 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
4139 l2cap_build_conf_req(chan
, buf
), buf
);
4140 chan
->num_conf_req
++;
4143 /* Got Conf Rsp PENDING from remote side and asume we sent
4144 Conf Rsp PENDING in the code above */
4145 if (test_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
) &&
4146 test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
4148 /* check compatibility */
4150 /* Send rsp for BR/EDR channel */
4152 l2cap_send_efs_conf_rsp(chan
, rsp
, cmd
->ident
, flags
);
4154 chan
->ident
= cmd
->ident
;
4158 l2cap_chan_unlock(chan
);
4162 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
,
4163 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4166 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
4167 u16 scid
, flags
, result
;
4168 struct l2cap_chan
*chan
;
4169 int len
= cmd_len
- sizeof(*rsp
);
4172 if (cmd_len
< sizeof(*rsp
))
4175 scid
= __le16_to_cpu(rsp
->scid
);
4176 flags
= __le16_to_cpu(rsp
->flags
);
4177 result
= __le16_to_cpu(rsp
->result
);
4179 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid
, flags
,
4182 chan
= l2cap_get_chan_by_scid(conn
, scid
);
4187 case L2CAP_CONF_SUCCESS
:
4188 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
4189 clear_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
4192 case L2CAP_CONF_PENDING
:
4193 set_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
4195 if (test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
4198 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
4201 l2cap_send_disconn_req(chan
, ECONNRESET
);
4205 if (!chan
->hs_hcon
) {
4206 l2cap_send_efs_conf_rsp(chan
, buf
, cmd
->ident
,
4209 if (l2cap_check_efs(chan
)) {
4210 amp_create_logical_link(chan
);
4211 chan
->ident
= cmd
->ident
;
4217 case L2CAP_CONF_UNACCEPT
:
4218 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
4221 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
4222 l2cap_send_disconn_req(chan
, ECONNRESET
);
4226 /* throw out any old stored conf requests */
4227 result
= L2CAP_CONF_SUCCESS
;
4228 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
4231 l2cap_send_disconn_req(chan
, ECONNRESET
);
4235 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
4236 L2CAP_CONF_REQ
, len
, req
);
4237 chan
->num_conf_req
++;
4238 if (result
!= L2CAP_CONF_SUCCESS
)
4244 l2cap_chan_set_err(chan
, ECONNRESET
);
4246 __set_chan_timer(chan
, L2CAP_DISC_REJ_TIMEOUT
);
4247 l2cap_send_disconn_req(chan
, ECONNRESET
);
4251 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
)
4254 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
4256 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
4257 set_default_fcs(chan
);
4259 if (chan
->mode
== L2CAP_MODE_ERTM
||
4260 chan
->mode
== L2CAP_MODE_STREAMING
)
4261 err
= l2cap_ertm_init(chan
);
4264 l2cap_send_disconn_req(chan
, -err
);
4266 l2cap_chan_ready(chan
);
4270 l2cap_chan_unlock(chan
);
4274 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
,
4275 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4278 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
4279 struct l2cap_disconn_rsp rsp
;
4281 struct l2cap_chan
*chan
;
4283 if (cmd_len
!= sizeof(*req
))
4286 scid
= __le16_to_cpu(req
->scid
);
4287 dcid
= __le16_to_cpu(req
->dcid
);
4289 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
4291 mutex_lock(&conn
->chan_lock
);
4293 chan
= __l2cap_get_chan_by_scid(conn
, dcid
);
4295 mutex_unlock(&conn
->chan_lock
);
4296 cmd_reject_invalid_cid(conn
, cmd
->ident
, dcid
, scid
);
4300 l2cap_chan_lock(chan
);
4302 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4303 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4304 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
4306 chan
->ops
->set_shutdown(chan
);
4308 l2cap_chan_hold(chan
);
4309 l2cap_chan_del(chan
, ECONNRESET
);
4311 l2cap_chan_unlock(chan
);
4313 chan
->ops
->close(chan
);
4314 l2cap_chan_put(chan
);
4316 mutex_unlock(&conn
->chan_lock
);
4321 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
,
4322 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4325 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
4327 struct l2cap_chan
*chan
;
4329 if (cmd_len
!= sizeof(*rsp
))
4332 scid
= __le16_to_cpu(rsp
->scid
);
4333 dcid
= __le16_to_cpu(rsp
->dcid
);
4335 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
4337 mutex_lock(&conn
->chan_lock
);
4339 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
4341 mutex_unlock(&conn
->chan_lock
);
4345 l2cap_chan_lock(chan
);
4347 l2cap_chan_hold(chan
);
4348 l2cap_chan_del(chan
, 0);
4350 l2cap_chan_unlock(chan
);
4352 chan
->ops
->close(chan
);
4353 l2cap_chan_put(chan
);
4355 mutex_unlock(&conn
->chan_lock
);
4360 static inline int l2cap_information_req(struct l2cap_conn
*conn
,
4361 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4364 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
4367 if (cmd_len
!= sizeof(*req
))
4370 type
= __le16_to_cpu(req
->type
);
4372 BT_DBG("type 0x%4.4x", type
);
4374 if (type
== L2CAP_IT_FEAT_MASK
) {
4376 u32 feat_mask
= l2cap_feat_mask
;
4377 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
4378 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
4379 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
4381 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
4383 if (conn
->hs_enabled
)
4384 feat_mask
|= L2CAP_FEAT_EXT_FLOW
4385 | L2CAP_FEAT_EXT_WINDOW
;
4387 put_unaligned_le32(feat_mask
, rsp
->data
);
4388 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
4390 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
4392 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
4394 if (conn
->hs_enabled
)
4395 l2cap_fixed_chan
[0] |= L2CAP_FC_A2MP
;
4397 l2cap_fixed_chan
[0] &= ~L2CAP_FC_A2MP
;
4399 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4400 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
4401 memcpy(rsp
->data
, l2cap_fixed_chan
, sizeof(l2cap_fixed_chan
));
4402 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
4405 struct l2cap_info_rsp rsp
;
4406 rsp
.type
= cpu_to_le16(type
);
4407 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
4408 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(rsp
),
4415 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
,
4416 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4419 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
4422 if (cmd_len
< sizeof(*rsp
))
4425 type
= __le16_to_cpu(rsp
->type
);
4426 result
= __le16_to_cpu(rsp
->result
);
4428 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
4430 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4431 if (cmd
->ident
!= conn
->info_ident
||
4432 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
4435 cancel_delayed_work(&conn
->info_timer
);
4437 if (result
!= L2CAP_IR_SUCCESS
) {
4438 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4439 conn
->info_ident
= 0;
4441 l2cap_conn_start(conn
);
4447 case L2CAP_IT_FEAT_MASK
:
4448 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
4450 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
4451 struct l2cap_info_req req
;
4452 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4454 conn
->info_ident
= l2cap_get_ident(conn
);
4456 l2cap_send_cmd(conn
, conn
->info_ident
,
4457 L2CAP_INFO_REQ
, sizeof(req
), &req
);
4459 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4460 conn
->info_ident
= 0;
4462 l2cap_conn_start(conn
);
4466 case L2CAP_IT_FIXED_CHAN
:
4467 conn
->fixed_chan_mask
= rsp
->data
[0];
4468 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4469 conn
->info_ident
= 0;
4471 l2cap_conn_start(conn
);
4478 static int l2cap_create_channel_req(struct l2cap_conn
*conn
,
4479 struct l2cap_cmd_hdr
*cmd
,
4480 u16 cmd_len
, void *data
)
4482 struct l2cap_create_chan_req
*req
= data
;
4483 struct l2cap_create_chan_rsp rsp
;
4484 struct l2cap_chan
*chan
;
4485 struct hci_dev
*hdev
;
4488 if (cmd_len
!= sizeof(*req
))
4491 if (!conn
->hs_enabled
)
4494 psm
= le16_to_cpu(req
->psm
);
4495 scid
= le16_to_cpu(req
->scid
);
4497 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm
, scid
, req
->amp_id
);
4499 /* For controller id 0 make BR/EDR connection */
4500 if (req
->amp_id
== AMP_ID_BREDR
) {
4501 l2cap_connect(conn
, cmd
, data
, L2CAP_CREATE_CHAN_RSP
,
4506 /* Validate AMP controller id */
4507 hdev
= hci_dev_get(req
->amp_id
);
4511 if (hdev
->dev_type
!= HCI_AMP
|| !test_bit(HCI_UP
, &hdev
->flags
)) {
4516 chan
= l2cap_connect(conn
, cmd
, data
, L2CAP_CREATE_CHAN_RSP
,
4519 struct amp_mgr
*mgr
= conn
->hcon
->amp_mgr
;
4520 struct hci_conn
*hs_hcon
;
4522 hs_hcon
= hci_conn_hash_lookup_ba(hdev
, AMP_LINK
,
4526 cmd_reject_invalid_cid(conn
, cmd
->ident
, chan
->scid
,
4531 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr
, chan
, hs_hcon
);
4533 mgr
->bredr_chan
= chan
;
4534 chan
->hs_hcon
= hs_hcon
;
4535 chan
->fcs
= L2CAP_FCS_NONE
;
4536 conn
->mtu
= hdev
->block_mtu
;
4545 rsp
.scid
= cpu_to_le16(scid
);
4546 rsp
.result
= cpu_to_le16(L2CAP_CR_BAD_AMP
);
4547 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4549 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CREATE_CHAN_RSP
,
4555 static void l2cap_send_move_chan_req(struct l2cap_chan
*chan
, u8 dest_amp_id
)
4557 struct l2cap_move_chan_req req
;
4560 BT_DBG("chan %p, dest_amp_id %d", chan
, dest_amp_id
);
4562 ident
= l2cap_get_ident(chan
->conn
);
4563 chan
->ident
= ident
;
4565 req
.icid
= cpu_to_le16(chan
->scid
);
4566 req
.dest_amp_id
= dest_amp_id
;
4568 l2cap_send_cmd(chan
->conn
, ident
, L2CAP_MOVE_CHAN_REQ
, sizeof(req
),
4571 __set_chan_timer(chan
, L2CAP_MOVE_TIMEOUT
);
4574 static void l2cap_send_move_chan_rsp(struct l2cap_chan
*chan
, u16 result
)
4576 struct l2cap_move_chan_rsp rsp
;
4578 BT_DBG("chan %p, result 0x%4.4x", chan
, result
);
4580 rsp
.icid
= cpu_to_le16(chan
->dcid
);
4581 rsp
.result
= cpu_to_le16(result
);
4583 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_MOVE_CHAN_RSP
,
4587 static void l2cap_send_move_chan_cfm(struct l2cap_chan
*chan
, u16 result
)
4589 struct l2cap_move_chan_cfm cfm
;
4591 BT_DBG("chan %p, result 0x%4.4x", chan
, result
);
4593 chan
->ident
= l2cap_get_ident(chan
->conn
);
4595 cfm
.icid
= cpu_to_le16(chan
->scid
);
4596 cfm
.result
= cpu_to_le16(result
);
4598 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_MOVE_CHAN_CFM
,
4601 __set_chan_timer(chan
, L2CAP_MOVE_TIMEOUT
);
4604 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn
*conn
, u16 icid
)
4606 struct l2cap_move_chan_cfm cfm
;
4608 BT_DBG("conn %p, icid 0x%4.4x", conn
, icid
);
4610 cfm
.icid
= cpu_to_le16(icid
);
4611 cfm
.result
= cpu_to_le16(L2CAP_MC_UNCONFIRMED
);
4613 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_MOVE_CHAN_CFM
,
4617 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn
*conn
, u8 ident
,
4620 struct l2cap_move_chan_cfm_rsp rsp
;
4622 BT_DBG("icid 0x%4.4x", icid
);
4624 rsp
.icid
= cpu_to_le16(icid
);
4625 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM_RSP
, sizeof(rsp
), &rsp
);
4628 static void __release_logical_link(struct l2cap_chan
*chan
)
4630 chan
->hs_hchan
= NULL
;
4631 chan
->hs_hcon
= NULL
;
4633 /* Placeholder - release the logical link */
4636 static void l2cap_logical_fail(struct l2cap_chan
*chan
)
4638 /* Logical link setup failed */
4639 if (chan
->state
!= BT_CONNECTED
) {
4640 /* Create channel failure, disconnect */
4641 l2cap_send_disconn_req(chan
, ECONNRESET
);
4645 switch (chan
->move_role
) {
4646 case L2CAP_MOVE_ROLE_RESPONDER
:
4647 l2cap_move_done(chan
);
4648 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_NOT_SUPP
);
4650 case L2CAP_MOVE_ROLE_INITIATOR
:
4651 if (chan
->move_state
== L2CAP_MOVE_WAIT_LOGICAL_COMP
||
4652 chan
->move_state
== L2CAP_MOVE_WAIT_LOGICAL_CFM
) {
4653 /* Remote has only sent pending or
4654 * success responses, clean up
4656 l2cap_move_done(chan
);
4659 /* Other amp move states imply that the move
4660 * has already aborted
4662 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
4667 static void l2cap_logical_finish_create(struct l2cap_chan
*chan
,
4668 struct hci_chan
*hchan
)
4670 struct l2cap_conf_rsp rsp
;
4672 chan
->hs_hchan
= hchan
;
4673 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4675 l2cap_send_efs_conf_rsp(chan
, &rsp
, chan
->ident
, 0);
4677 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
4680 set_default_fcs(chan
);
4682 err
= l2cap_ertm_init(chan
);
4684 l2cap_send_disconn_req(chan
, -err
);
4686 l2cap_chan_ready(chan
);
4690 static void l2cap_logical_finish_move(struct l2cap_chan
*chan
,
4691 struct hci_chan
*hchan
)
4693 chan
->hs_hcon
= hchan
->conn
;
4694 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4696 BT_DBG("move_state %d", chan
->move_state
);
4698 switch (chan
->move_state
) {
4699 case L2CAP_MOVE_WAIT_LOGICAL_COMP
:
4700 /* Move confirm will be sent after a success
4701 * response is received
4703 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
4705 case L2CAP_MOVE_WAIT_LOGICAL_CFM
:
4706 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4707 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4708 } else if (chan
->move_role
== L2CAP_MOVE_ROLE_INITIATOR
) {
4709 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM_RSP
;
4710 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
4711 } else if (chan
->move_role
== L2CAP_MOVE_ROLE_RESPONDER
) {
4712 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4713 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_SUCCESS
);
4717 /* Move was not in expected state, free the channel */
4718 __release_logical_link(chan
);
4720 chan
->move_state
= L2CAP_MOVE_STABLE
;
4724 /* Call with chan locked */
4725 void l2cap_logical_cfm(struct l2cap_chan
*chan
, struct hci_chan
*hchan
,
4728 BT_DBG("chan %p, hchan %p, status %d", chan
, hchan
, status
);
4731 l2cap_logical_fail(chan
);
4732 __release_logical_link(chan
);
4736 if (chan
->state
!= BT_CONNECTED
) {
4737 /* Ignore logical link if channel is on BR/EDR */
4738 if (chan
->local_amp_id
!= AMP_ID_BREDR
)
4739 l2cap_logical_finish_create(chan
, hchan
);
4741 l2cap_logical_finish_move(chan
, hchan
);
4745 void l2cap_move_start(struct l2cap_chan
*chan
)
4747 BT_DBG("chan %p", chan
);
4749 if (chan
->local_amp_id
== AMP_ID_BREDR
) {
4750 if (chan
->chan_policy
!= BT_CHANNEL_POLICY_AMP_PREFERRED
)
4752 chan
->move_role
= L2CAP_MOVE_ROLE_INITIATOR
;
4753 chan
->move_state
= L2CAP_MOVE_WAIT_PREPARE
;
4754 /* Placeholder - start physical link setup */
4756 chan
->move_role
= L2CAP_MOVE_ROLE_INITIATOR
;
4757 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
4759 l2cap_move_setup(chan
);
4760 l2cap_send_move_chan_req(chan
, 0);
4764 static void l2cap_do_create(struct l2cap_chan
*chan
, int result
,
4765 u8 local_amp_id
, u8 remote_amp_id
)
4767 BT_DBG("chan %p state %s %u -> %u", chan
, state_to_string(chan
->state
),
4768 local_amp_id
, remote_amp_id
);
4770 chan
->fcs
= L2CAP_FCS_NONE
;
4772 /* Outgoing channel on AMP */
4773 if (chan
->state
== BT_CONNECT
) {
4774 if (result
== L2CAP_CR_SUCCESS
) {
4775 chan
->local_amp_id
= local_amp_id
;
4776 l2cap_send_create_chan_req(chan
, remote_amp_id
);
4778 /* Revert to BR/EDR connect */
4779 l2cap_send_conn_req(chan
);
4785 /* Incoming channel on AMP */
4786 if (__l2cap_no_conn_pending(chan
)) {
4787 struct l2cap_conn_rsp rsp
;
4789 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4790 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4792 if (result
== L2CAP_CR_SUCCESS
) {
4793 /* Send successful response */
4794 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
4795 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4797 /* Send negative response */
4798 rsp
.result
= cpu_to_le16(L2CAP_CR_NO_MEM
);
4799 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4802 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_CREATE_CHAN_RSP
,
4805 if (result
== L2CAP_CR_SUCCESS
) {
4806 l2cap_state_change(chan
, BT_CONFIG
);
4807 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
4808 l2cap_send_cmd(chan
->conn
, l2cap_get_ident(chan
->conn
),
4810 l2cap_build_conf_req(chan
, buf
), buf
);
4811 chan
->num_conf_req
++;
4816 static void l2cap_do_move_initiate(struct l2cap_chan
*chan
, u8 local_amp_id
,
4819 l2cap_move_setup(chan
);
4820 chan
->move_id
= local_amp_id
;
4821 chan
->move_state
= L2CAP_MOVE_WAIT_RSP
;
4823 l2cap_send_move_chan_req(chan
, remote_amp_id
);
4826 static void l2cap_do_move_respond(struct l2cap_chan
*chan
, int result
)
4828 struct hci_chan
*hchan
= NULL
;
4830 /* Placeholder - get hci_chan for logical link */
4833 if (hchan
->state
== BT_CONNECTED
) {
4834 /* Logical link is ready to go */
4835 chan
->hs_hcon
= hchan
->conn
;
4836 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4837 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4838 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_SUCCESS
);
4840 l2cap_logical_cfm(chan
, hchan
, L2CAP_MR_SUCCESS
);
4842 /* Wait for logical link to be ready */
4843 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
4846 /* Logical link not available */
4847 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_NOT_ALLOWED
);
4851 static void l2cap_do_move_cancel(struct l2cap_chan
*chan
, int result
)
4853 if (chan
->move_role
== L2CAP_MOVE_ROLE_RESPONDER
) {
4855 if (result
== -EINVAL
)
4856 rsp_result
= L2CAP_MR_BAD_ID
;
4858 rsp_result
= L2CAP_MR_NOT_ALLOWED
;
4860 l2cap_send_move_chan_rsp(chan
, rsp_result
);
4863 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
4864 chan
->move_state
= L2CAP_MOVE_STABLE
;
4866 /* Restart data transmission */
4867 l2cap_ertm_send(chan
);
4870 /* Invoke with locked chan */
4871 void __l2cap_physical_cfm(struct l2cap_chan
*chan
, int result
)
4873 u8 local_amp_id
= chan
->local_amp_id
;
4874 u8 remote_amp_id
= chan
->remote_amp_id
;
4876 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4877 chan
, result
, local_amp_id
, remote_amp_id
);
4879 if (chan
->state
== BT_DISCONN
|| chan
->state
== BT_CLOSED
) {
4880 l2cap_chan_unlock(chan
);
4884 if (chan
->state
!= BT_CONNECTED
) {
4885 l2cap_do_create(chan
, result
, local_amp_id
, remote_amp_id
);
4886 } else if (result
!= L2CAP_MR_SUCCESS
) {
4887 l2cap_do_move_cancel(chan
, result
);
4889 switch (chan
->move_role
) {
4890 case L2CAP_MOVE_ROLE_INITIATOR
:
4891 l2cap_do_move_initiate(chan
, local_amp_id
,
4894 case L2CAP_MOVE_ROLE_RESPONDER
:
4895 l2cap_do_move_respond(chan
, result
);
4898 l2cap_do_move_cancel(chan
, result
);
4904 static inline int l2cap_move_channel_req(struct l2cap_conn
*conn
,
4905 struct l2cap_cmd_hdr
*cmd
,
4906 u16 cmd_len
, void *data
)
4908 struct l2cap_move_chan_req
*req
= data
;
4909 struct l2cap_move_chan_rsp rsp
;
4910 struct l2cap_chan
*chan
;
4912 u16 result
= L2CAP_MR_NOT_ALLOWED
;
4914 if (cmd_len
!= sizeof(*req
))
4917 icid
= le16_to_cpu(req
->icid
);
4919 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid
, req
->dest_amp_id
);
4921 if (!conn
->hs_enabled
)
4924 chan
= l2cap_get_chan_by_dcid(conn
, icid
);
4926 rsp
.icid
= cpu_to_le16(icid
);
4927 rsp
.result
= cpu_to_le16(L2CAP_MR_NOT_ALLOWED
);
4928 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_MOVE_CHAN_RSP
,
4933 chan
->ident
= cmd
->ident
;
4935 if (chan
->scid
< L2CAP_CID_DYN_START
||
4936 chan
->chan_policy
== BT_CHANNEL_POLICY_BREDR_ONLY
||
4937 (chan
->mode
!= L2CAP_MODE_ERTM
&&
4938 chan
->mode
!= L2CAP_MODE_STREAMING
)) {
4939 result
= L2CAP_MR_NOT_ALLOWED
;
4940 goto send_move_response
;
4943 if (chan
->local_amp_id
== req
->dest_amp_id
) {
4944 result
= L2CAP_MR_SAME_ID
;
4945 goto send_move_response
;
4948 if (req
->dest_amp_id
!= AMP_ID_BREDR
) {
4949 struct hci_dev
*hdev
;
4950 hdev
= hci_dev_get(req
->dest_amp_id
);
4951 if (!hdev
|| hdev
->dev_type
!= HCI_AMP
||
4952 !test_bit(HCI_UP
, &hdev
->flags
)) {
4956 result
= L2CAP_MR_BAD_ID
;
4957 goto send_move_response
;
4962 /* Detect a move collision. Only send a collision response
4963 * if this side has "lost", otherwise proceed with the move.
4964 * The winner has the larger bd_addr.
4966 if ((__chan_is_moving(chan
) ||
4967 chan
->move_role
!= L2CAP_MOVE_ROLE_NONE
) &&
4968 bacmp(&conn
->hcon
->src
, &conn
->hcon
->dst
) > 0) {
4969 result
= L2CAP_MR_COLLISION
;
4970 goto send_move_response
;
4973 chan
->move_role
= L2CAP_MOVE_ROLE_RESPONDER
;
4974 l2cap_move_setup(chan
);
4975 chan
->move_id
= req
->dest_amp_id
;
4978 if (req
->dest_amp_id
== AMP_ID_BREDR
) {
4979 /* Moving to BR/EDR */
4980 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4981 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4982 result
= L2CAP_MR_PEND
;
4984 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4985 result
= L2CAP_MR_SUCCESS
;
4988 chan
->move_state
= L2CAP_MOVE_WAIT_PREPARE
;
4989 /* Placeholder - uncomment when amp functions are available */
4990 /*amp_accept_physical(chan, req->dest_amp_id);*/
4991 result
= L2CAP_MR_PEND
;
4995 l2cap_send_move_chan_rsp(chan
, result
);
4997 l2cap_chan_unlock(chan
);
5002 static void l2cap_move_continue(struct l2cap_conn
*conn
, u16 icid
, u16 result
)
5004 struct l2cap_chan
*chan
;
5005 struct hci_chan
*hchan
= NULL
;
5007 chan
= l2cap_get_chan_by_scid(conn
, icid
);
5009 l2cap_send_move_chan_cfm_icid(conn
, icid
);
5013 __clear_chan_timer(chan
);
5014 if (result
== L2CAP_MR_PEND
)
5015 __set_chan_timer(chan
, L2CAP_MOVE_ERTX_TIMEOUT
);
5017 switch (chan
->move_state
) {
5018 case L2CAP_MOVE_WAIT_LOGICAL_COMP
:
5019 /* Move confirm will be sent when logical link
5022 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
5024 case L2CAP_MOVE_WAIT_RSP_SUCCESS
:
5025 if (result
== L2CAP_MR_PEND
) {
5027 } else if (test_bit(CONN_LOCAL_BUSY
,
5028 &chan
->conn_state
)) {
5029 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
5031 /* Logical link is up or moving to BR/EDR,
5034 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM_RSP
;
5035 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
5038 case L2CAP_MOVE_WAIT_RSP
:
5040 if (result
== L2CAP_MR_SUCCESS
) {
5041 /* Remote is ready, send confirm immediately
5042 * after logical link is ready
5044 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
5046 /* Both logical link and move success
5047 * are required to confirm
5049 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_COMP
;
5052 /* Placeholder - get hci_chan for logical link */
5054 /* Logical link not available */
5055 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5059 /* If the logical link is not yet connected, do not
5060 * send confirmation.
5062 if (hchan
->state
!= BT_CONNECTED
)
5065 /* Logical link is already ready to go */
5067 chan
->hs_hcon
= hchan
->conn
;
5068 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
5070 if (result
== L2CAP_MR_SUCCESS
) {
5071 /* Can confirm now */
5072 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
5074 /* Now only need move success
5077 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
5080 l2cap_logical_cfm(chan
, hchan
, L2CAP_MR_SUCCESS
);
5083 /* Any other amp move state means the move failed. */
5084 chan
->move_id
= chan
->local_amp_id
;
5085 l2cap_move_done(chan
);
5086 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5089 l2cap_chan_unlock(chan
);
5092 static void l2cap_move_fail(struct l2cap_conn
*conn
, u8 ident
, u16 icid
,
5095 struct l2cap_chan
*chan
;
5097 chan
= l2cap_get_chan_by_ident(conn
, ident
);
5099 /* Could not locate channel, icid is best guess */
5100 l2cap_send_move_chan_cfm_icid(conn
, icid
);
5104 __clear_chan_timer(chan
);
5106 if (chan
->move_role
== L2CAP_MOVE_ROLE_INITIATOR
) {
5107 if (result
== L2CAP_MR_COLLISION
) {
5108 chan
->move_role
= L2CAP_MOVE_ROLE_RESPONDER
;
5110 /* Cleanup - cancel move */
5111 chan
->move_id
= chan
->local_amp_id
;
5112 l2cap_move_done(chan
);
5116 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5118 l2cap_chan_unlock(chan
);
5121 static int l2cap_move_channel_rsp(struct l2cap_conn
*conn
,
5122 struct l2cap_cmd_hdr
*cmd
,
5123 u16 cmd_len
, void *data
)
5125 struct l2cap_move_chan_rsp
*rsp
= data
;
5128 if (cmd_len
!= sizeof(*rsp
))
5131 icid
= le16_to_cpu(rsp
->icid
);
5132 result
= le16_to_cpu(rsp
->result
);
5134 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
5136 if (result
== L2CAP_MR_SUCCESS
|| result
== L2CAP_MR_PEND
)
5137 l2cap_move_continue(conn
, icid
, result
);
5139 l2cap_move_fail(conn
, cmd
->ident
, icid
, result
);
5144 static int l2cap_move_channel_confirm(struct l2cap_conn
*conn
,
5145 struct l2cap_cmd_hdr
*cmd
,
5146 u16 cmd_len
, void *data
)
5148 struct l2cap_move_chan_cfm
*cfm
= data
;
5149 struct l2cap_chan
*chan
;
5152 if (cmd_len
!= sizeof(*cfm
))
5155 icid
= le16_to_cpu(cfm
->icid
);
5156 result
= le16_to_cpu(cfm
->result
);
5158 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
5160 chan
= l2cap_get_chan_by_dcid(conn
, icid
);
5162 /* Spec requires a response even if the icid was not found */
5163 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
5167 if (chan
->move_state
== L2CAP_MOVE_WAIT_CONFIRM
) {
5168 if (result
== L2CAP_MC_CONFIRMED
) {
5169 chan
->local_amp_id
= chan
->move_id
;
5170 if (chan
->local_amp_id
== AMP_ID_BREDR
)
5171 __release_logical_link(chan
);
5173 chan
->move_id
= chan
->local_amp_id
;
5176 l2cap_move_done(chan
);
5179 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
5181 l2cap_chan_unlock(chan
);
5186 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn
*conn
,
5187 struct l2cap_cmd_hdr
*cmd
,
5188 u16 cmd_len
, void *data
)
5190 struct l2cap_move_chan_cfm_rsp
*rsp
= data
;
5191 struct l2cap_chan
*chan
;
5194 if (cmd_len
!= sizeof(*rsp
))
5197 icid
= le16_to_cpu(rsp
->icid
);
5199 BT_DBG("icid 0x%4.4x", icid
);
5201 chan
= l2cap_get_chan_by_scid(conn
, icid
);
5205 __clear_chan_timer(chan
);
5207 if (chan
->move_state
== L2CAP_MOVE_WAIT_CONFIRM_RSP
) {
5208 chan
->local_amp_id
= chan
->move_id
;
5210 if (chan
->local_amp_id
== AMP_ID_BREDR
&& chan
->hs_hchan
)
5211 __release_logical_link(chan
);
5213 l2cap_move_done(chan
);
5216 l2cap_chan_unlock(chan
);
5221 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
5222 struct l2cap_cmd_hdr
*cmd
,
5223 u16 cmd_len
, u8
*data
)
5225 struct hci_conn
*hcon
= conn
->hcon
;
5226 struct l2cap_conn_param_update_req
*req
;
5227 struct l2cap_conn_param_update_rsp rsp
;
5228 u16 min
, max
, latency
, to_multiplier
;
5231 if (hcon
->role
!= HCI_ROLE_MASTER
)
5234 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
5237 req
= (struct l2cap_conn_param_update_req
*) data
;
5238 min
= __le16_to_cpu(req
->min
);
5239 max
= __le16_to_cpu(req
->max
);
5240 latency
= __le16_to_cpu(req
->latency
);
5241 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
5243 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5244 min
, max
, latency
, to_multiplier
);
5246 memset(&rsp
, 0, sizeof(rsp
));
5248 err
= hci_check_conn_params(min
, max
, latency
, to_multiplier
);
5250 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
5252 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
5254 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
5260 store_hint
= hci_le_conn_update(hcon
, min
, max
, latency
,
5262 mgmt_new_conn_param(hcon
->hdev
, &hcon
->dst
, hcon
->dst_type
,
5263 store_hint
, min
, max
, latency
,
5271 static int l2cap_le_connect_rsp(struct l2cap_conn
*conn
,
5272 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5275 struct l2cap_le_conn_rsp
*rsp
= (struct l2cap_le_conn_rsp
*) data
;
5276 u16 dcid
, mtu
, mps
, credits
, result
;
5277 struct l2cap_chan
*chan
;
5280 if (cmd_len
< sizeof(*rsp
))
5283 dcid
= __le16_to_cpu(rsp
->dcid
);
5284 mtu
= __le16_to_cpu(rsp
->mtu
);
5285 mps
= __le16_to_cpu(rsp
->mps
);
5286 credits
= __le16_to_cpu(rsp
->credits
);
5287 result
= __le16_to_cpu(rsp
->result
);
5289 if (result
== L2CAP_CR_SUCCESS
&& (mtu
< 23 || mps
< 23))
5292 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5293 dcid
, mtu
, mps
, credits
, result
);
5295 mutex_lock(&conn
->chan_lock
);
5297 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
5305 l2cap_chan_lock(chan
);
5308 case L2CAP_CR_SUCCESS
:
5312 chan
->remote_mps
= mps
;
5313 chan
->tx_credits
= credits
;
5314 l2cap_chan_ready(chan
);
5318 l2cap_chan_del(chan
, ECONNREFUSED
);
5322 l2cap_chan_unlock(chan
);
5325 mutex_unlock(&conn
->chan_lock
);
5330 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
5331 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5336 switch (cmd
->code
) {
5337 case L2CAP_COMMAND_REJ
:
5338 l2cap_command_rej(conn
, cmd
, cmd_len
, data
);
5341 case L2CAP_CONN_REQ
:
5342 err
= l2cap_connect_req(conn
, cmd
, cmd_len
, data
);
5345 case L2CAP_CONN_RSP
:
5346 case L2CAP_CREATE_CHAN_RSP
:
5347 l2cap_connect_create_rsp(conn
, cmd
, cmd_len
, data
);
5350 case L2CAP_CONF_REQ
:
5351 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
5354 case L2CAP_CONF_RSP
:
5355 l2cap_config_rsp(conn
, cmd
, cmd_len
, data
);
5358 case L2CAP_DISCONN_REQ
:
5359 err
= l2cap_disconnect_req(conn
, cmd
, cmd_len
, data
);
5362 case L2CAP_DISCONN_RSP
:
5363 l2cap_disconnect_rsp(conn
, cmd
, cmd_len
, data
);
5366 case L2CAP_ECHO_REQ
:
5367 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
5370 case L2CAP_ECHO_RSP
:
5373 case L2CAP_INFO_REQ
:
5374 err
= l2cap_information_req(conn
, cmd
, cmd_len
, data
);
5377 case L2CAP_INFO_RSP
:
5378 l2cap_information_rsp(conn
, cmd
, cmd_len
, data
);
5381 case L2CAP_CREATE_CHAN_REQ
:
5382 err
= l2cap_create_channel_req(conn
, cmd
, cmd_len
, data
);
5385 case L2CAP_MOVE_CHAN_REQ
:
5386 err
= l2cap_move_channel_req(conn
, cmd
, cmd_len
, data
);
5389 case L2CAP_MOVE_CHAN_RSP
:
5390 l2cap_move_channel_rsp(conn
, cmd
, cmd_len
, data
);
5393 case L2CAP_MOVE_CHAN_CFM
:
5394 err
= l2cap_move_channel_confirm(conn
, cmd
, cmd_len
, data
);
5397 case L2CAP_MOVE_CHAN_CFM_RSP
:
5398 l2cap_move_channel_confirm_rsp(conn
, cmd
, cmd_len
, data
);
5402 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
5410 static int l2cap_le_connect_req(struct l2cap_conn
*conn
,
5411 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5414 struct l2cap_le_conn_req
*req
= (struct l2cap_le_conn_req
*) data
;
5415 struct l2cap_le_conn_rsp rsp
;
5416 struct l2cap_chan
*chan
, *pchan
;
5417 u16 dcid
, scid
, credits
, mtu
, mps
;
5421 if (cmd_len
!= sizeof(*req
))
5424 scid
= __le16_to_cpu(req
->scid
);
5425 mtu
= __le16_to_cpu(req
->mtu
);
5426 mps
= __le16_to_cpu(req
->mps
);
5431 if (mtu
< 23 || mps
< 23)
5434 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm
),
5437 /* Check if we have socket listening on psm */
5438 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, &conn
->hcon
->src
,
5439 &conn
->hcon
->dst
, LE_LINK
);
5441 result
= L2CAP_CR_BAD_PSM
;
5446 mutex_lock(&conn
->chan_lock
);
5447 l2cap_chan_lock(pchan
);
5449 if (!smp_sufficient_security(conn
->hcon
, pchan
->sec_level
)) {
5450 result
= L2CAP_CR_AUTHENTICATION
;
5452 goto response_unlock
;
5455 /* Check if we already have channel with that dcid */
5456 if (__l2cap_get_chan_by_dcid(conn
, scid
)) {
5457 result
= L2CAP_CR_NO_MEM
;
5459 goto response_unlock
;
5462 chan
= pchan
->ops
->new_connection(pchan
);
5464 result
= L2CAP_CR_NO_MEM
;
5465 goto response_unlock
;
5468 l2cap_le_flowctl_init(chan
);
5470 bacpy(&chan
->src
, &conn
->hcon
->src
);
5471 bacpy(&chan
->dst
, &conn
->hcon
->dst
);
5472 chan
->src_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->src_type
);
5473 chan
->dst_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->dst_type
);
5477 chan
->remote_mps
= mps
;
5478 chan
->tx_credits
= __le16_to_cpu(req
->credits
);
5480 __l2cap_chan_add(conn
, chan
);
5482 credits
= chan
->rx_credits
;
5484 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
5486 chan
->ident
= cmd
->ident
;
5488 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
5489 l2cap_state_change(chan
, BT_CONNECT2
);
5490 result
= L2CAP_CR_PEND
;
5491 chan
->ops
->defer(chan
);
5493 l2cap_chan_ready(chan
);
5494 result
= L2CAP_CR_SUCCESS
;
5498 l2cap_chan_unlock(pchan
);
5499 mutex_unlock(&conn
->chan_lock
);
5501 if (result
== L2CAP_CR_PEND
)
5506 rsp
.mtu
= cpu_to_le16(chan
->imtu
);
5507 rsp
.mps
= cpu_to_le16(chan
->mps
);
5513 rsp
.dcid
= cpu_to_le16(dcid
);
5514 rsp
.credits
= cpu_to_le16(credits
);
5515 rsp
.result
= cpu_to_le16(result
);
5517 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_LE_CONN_RSP
, sizeof(rsp
), &rsp
);
5522 static inline int l2cap_le_credits(struct l2cap_conn
*conn
,
5523 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5526 struct l2cap_le_credits
*pkt
;
5527 struct l2cap_chan
*chan
;
5528 u16 cid
, credits
, max_credits
;
5530 if (cmd_len
!= sizeof(*pkt
))
5533 pkt
= (struct l2cap_le_credits
*) data
;
5534 cid
= __le16_to_cpu(pkt
->cid
);
5535 credits
= __le16_to_cpu(pkt
->credits
);
5537 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid
, credits
);
5539 chan
= l2cap_get_chan_by_dcid(conn
, cid
);
5543 max_credits
= LE_FLOWCTL_MAX_CREDITS
- chan
->tx_credits
;
5544 if (credits
> max_credits
) {
5545 BT_ERR("LE credits overflow");
5546 l2cap_send_disconn_req(chan
, ECONNRESET
);
5548 /* Return 0 so that we don't trigger an unnecessary
5549 * command reject packet.
5554 chan
->tx_credits
+= credits
;
5556 while (chan
->tx_credits
&& !skb_queue_empty(&chan
->tx_q
)) {
5557 l2cap_do_send(chan
, skb_dequeue(&chan
->tx_q
));
5561 if (chan
->tx_credits
)
5562 chan
->ops
->resume(chan
);
5564 l2cap_chan_unlock(chan
);
5569 static inline int l2cap_le_command_rej(struct l2cap_conn
*conn
,
5570 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5573 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
5574 struct l2cap_chan
*chan
;
5576 if (cmd_len
< sizeof(*rej
))
5579 mutex_lock(&conn
->chan_lock
);
5581 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
5585 l2cap_chan_lock(chan
);
5586 l2cap_chan_del(chan
, ECONNREFUSED
);
5587 l2cap_chan_unlock(chan
);
5590 mutex_unlock(&conn
->chan_lock
);
5594 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
5595 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5600 switch (cmd
->code
) {
5601 case L2CAP_COMMAND_REJ
:
5602 l2cap_le_command_rej(conn
, cmd
, cmd_len
, data
);
5605 case L2CAP_CONN_PARAM_UPDATE_REQ
:
5606 err
= l2cap_conn_param_update_req(conn
, cmd
, cmd_len
, data
);
5609 case L2CAP_CONN_PARAM_UPDATE_RSP
:
5612 case L2CAP_LE_CONN_RSP
:
5613 l2cap_le_connect_rsp(conn
, cmd
, cmd_len
, data
);
5616 case L2CAP_LE_CONN_REQ
:
5617 err
= l2cap_le_connect_req(conn
, cmd
, cmd_len
, data
);
5620 case L2CAP_LE_CREDITS
:
5621 err
= l2cap_le_credits(conn
, cmd
, cmd_len
, data
);
5624 case L2CAP_DISCONN_REQ
:
5625 err
= l2cap_disconnect_req(conn
, cmd
, cmd_len
, data
);
5628 case L2CAP_DISCONN_RSP
:
5629 l2cap_disconnect_rsp(conn
, cmd
, cmd_len
, data
);
5633 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
5641 static inline void l2cap_le_sig_channel(struct l2cap_conn
*conn
,
5642 struct sk_buff
*skb
)
5644 struct hci_conn
*hcon
= conn
->hcon
;
5645 struct l2cap_cmd_hdr
*cmd
;
5649 if (hcon
->type
!= LE_LINK
)
5652 if (skb
->len
< L2CAP_CMD_HDR_SIZE
)
5655 cmd
= (void *) skb
->data
;
5656 skb_pull(skb
, L2CAP_CMD_HDR_SIZE
);
5658 len
= le16_to_cpu(cmd
->len
);
5660 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
->code
, len
, cmd
->ident
);
5662 if (len
!= skb
->len
|| !cmd
->ident
) {
5663 BT_DBG("corrupted command");
5667 err
= l2cap_le_sig_cmd(conn
, cmd
, len
, skb
->data
);
5669 struct l2cap_cmd_rej_unk rej
;
5671 BT_ERR("Wrong link type (%d)", err
);
5673 rej
.reason
= cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
5674 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
5682 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
5683 struct sk_buff
*skb
)
5685 struct hci_conn
*hcon
= conn
->hcon
;
5686 u8
*data
= skb
->data
;
5688 struct l2cap_cmd_hdr cmd
;
5691 l2cap_raw_recv(conn
, skb
);
5693 if (hcon
->type
!= ACL_LINK
)
5696 while (len
>= L2CAP_CMD_HDR_SIZE
) {
5698 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
5699 data
+= L2CAP_CMD_HDR_SIZE
;
5700 len
-= L2CAP_CMD_HDR_SIZE
;
5702 cmd_len
= le16_to_cpu(cmd
.len
);
5704 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
,
5707 if (cmd_len
> len
|| !cmd
.ident
) {
5708 BT_DBG("corrupted command");
5712 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
5714 struct l2cap_cmd_rej_unk rej
;
5716 BT_ERR("Wrong link type (%d)", err
);
5718 rej
.reason
= cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
5719 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
,
5731 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
5733 u16 our_fcs
, rcv_fcs
;
5736 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
5737 hdr_size
= L2CAP_EXT_HDR_SIZE
;
5739 hdr_size
= L2CAP_ENH_HDR_SIZE
;
5741 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
5742 skb_trim(skb
, skb
->len
- L2CAP_FCS_SIZE
);
5743 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
5744 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
5746 if (our_fcs
!= rcv_fcs
)
5752 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
5754 struct l2cap_ctrl control
;
5756 BT_DBG("chan %p", chan
);
5758 memset(&control
, 0, sizeof(control
));
5761 control
.reqseq
= chan
->buffer_seq
;
5762 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5764 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5765 control
.super
= L2CAP_SUPER_RNR
;
5766 l2cap_send_sframe(chan
, &control
);
5769 if (test_and_clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
5770 chan
->unacked_frames
> 0)
5771 __set_retrans_timer(chan
);
5773 /* Send pending iframes */
5774 l2cap_ertm_send(chan
);
5776 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
5777 test_bit(CONN_SEND_FBIT
, &chan
->conn_state
)) {
5778 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5781 control
.super
= L2CAP_SUPER_RR
;
5782 l2cap_send_sframe(chan
, &control
);
5786 static void append_skb_frag(struct sk_buff
*skb
, struct sk_buff
*new_frag
,
5787 struct sk_buff
**last_frag
)
5789 /* skb->len reflects data in skb as well as all fragments
5790 * skb->data_len reflects only data in fragments
5792 if (!skb_has_frag_list(skb
))
5793 skb_shinfo(skb
)->frag_list
= new_frag
;
5795 new_frag
->next
= NULL
;
5797 (*last_frag
)->next
= new_frag
;
5798 *last_frag
= new_frag
;
5800 skb
->len
+= new_frag
->len
;
5801 skb
->data_len
+= new_frag
->len
;
5802 skb
->truesize
+= new_frag
->truesize
;
5805 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
,
5806 struct l2cap_ctrl
*control
)
5810 switch (control
->sar
) {
5811 case L2CAP_SAR_UNSEGMENTED
:
5815 err
= chan
->ops
->recv(chan
, skb
);
5818 case L2CAP_SAR_START
:
5822 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
5823 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
5825 if (chan
->sdu_len
> chan
->imtu
) {
5830 if (skb
->len
>= chan
->sdu_len
)
5834 chan
->sdu_last_frag
= skb
;
5840 case L2CAP_SAR_CONTINUE
:
5844 append_skb_frag(chan
->sdu
, skb
,
5845 &chan
->sdu_last_frag
);
5848 if (chan
->sdu
->len
>= chan
->sdu_len
)
5858 append_skb_frag(chan
->sdu
, skb
,
5859 &chan
->sdu_last_frag
);
5862 if (chan
->sdu
->len
!= chan
->sdu_len
)
5865 err
= chan
->ops
->recv(chan
, chan
->sdu
);
5868 /* Reassembly complete */
5870 chan
->sdu_last_frag
= NULL
;
5878 kfree_skb(chan
->sdu
);
5880 chan
->sdu_last_frag
= NULL
;
5887 static int l2cap_resegment(struct l2cap_chan
*chan
)
5893 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
5897 if (chan
->mode
!= L2CAP_MODE_ERTM
)
5900 event
= busy
? L2CAP_EV_LOCAL_BUSY_DETECTED
: L2CAP_EV_LOCAL_BUSY_CLEAR
;
5901 l2cap_tx(chan
, NULL
, NULL
, event
);
5904 static int l2cap_rx_queued_iframes(struct l2cap_chan
*chan
)
5907 /* Pass sequential frames to l2cap_reassemble_sdu()
5908 * until a gap is encountered.
5911 BT_DBG("chan %p", chan
);
5913 while (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5914 struct sk_buff
*skb
;
5915 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5916 chan
->buffer_seq
, skb_queue_len(&chan
->srej_q
));
5918 skb
= l2cap_ertm_seq_in_queue(&chan
->srej_q
, chan
->buffer_seq
);
5923 skb_unlink(skb
, &chan
->srej_q
);
5924 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
5925 err
= l2cap_reassemble_sdu(chan
, skb
, &bt_cb(skb
)->control
);
5930 if (skb_queue_empty(&chan
->srej_q
)) {
5931 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
5932 l2cap_send_ack(chan
);
5938 static void l2cap_handle_srej(struct l2cap_chan
*chan
,
5939 struct l2cap_ctrl
*control
)
5941 struct sk_buff
*skb
;
5943 BT_DBG("chan %p, control %p", chan
, control
);
5945 if (control
->reqseq
== chan
->next_tx_seq
) {
5946 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
5947 l2cap_send_disconn_req(chan
, ECONNRESET
);
5951 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
5954 BT_DBG("Seq %d not available for retransmission",
5959 if (chan
->max_tx
!= 0 && bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
5960 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
5961 l2cap_send_disconn_req(chan
, ECONNRESET
);
5965 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5967 if (control
->poll
) {
5968 l2cap_pass_to_tx(chan
, control
);
5970 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5971 l2cap_retransmit(chan
, control
);
5972 l2cap_ertm_send(chan
);
5974 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
5975 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
5976 chan
->srej_save_reqseq
= control
->reqseq
;
5979 l2cap_pass_to_tx_fbit(chan
, control
);
5981 if (control
->final
) {
5982 if (chan
->srej_save_reqseq
!= control
->reqseq
||
5983 !test_and_clear_bit(CONN_SREJ_ACT
,
5985 l2cap_retransmit(chan
, control
);
5987 l2cap_retransmit(chan
, control
);
5988 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
5989 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
5990 chan
->srej_save_reqseq
= control
->reqseq
;
5996 static void l2cap_handle_rej(struct l2cap_chan
*chan
,
5997 struct l2cap_ctrl
*control
)
5999 struct sk_buff
*skb
;
6001 BT_DBG("chan %p, control %p", chan
, control
);
6003 if (control
->reqseq
== chan
->next_tx_seq
) {
6004 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
6005 l2cap_send_disconn_req(chan
, ECONNRESET
);
6009 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
6011 if (chan
->max_tx
&& skb
&&
6012 bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
6013 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
6014 l2cap_send_disconn_req(chan
, ECONNRESET
);
6018 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6020 l2cap_pass_to_tx(chan
, control
);
6022 if (control
->final
) {
6023 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
6024 l2cap_retransmit_all(chan
, control
);
6026 l2cap_retransmit_all(chan
, control
);
6027 l2cap_ertm_send(chan
);
6028 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
)
6029 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
6033 static u8
l2cap_classify_txseq(struct l2cap_chan
*chan
, u16 txseq
)
6035 BT_DBG("chan %p, txseq %d", chan
, txseq
);
6037 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan
->last_acked_seq
,
6038 chan
->expected_tx_seq
);
6040 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
6041 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
6043 /* See notes below regarding "double poll" and
6046 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
6047 BT_DBG("Invalid/Ignore - after SREJ");
6048 return L2CAP_TXSEQ_INVALID_IGNORE
;
6050 BT_DBG("Invalid - in window after SREJ sent");
6051 return L2CAP_TXSEQ_INVALID
;
6055 if (chan
->srej_list
.head
== txseq
) {
6056 BT_DBG("Expected SREJ");
6057 return L2CAP_TXSEQ_EXPECTED_SREJ
;
6060 if (l2cap_ertm_seq_in_queue(&chan
->srej_q
, txseq
)) {
6061 BT_DBG("Duplicate SREJ - txseq already stored");
6062 return L2CAP_TXSEQ_DUPLICATE_SREJ
;
6065 if (l2cap_seq_list_contains(&chan
->srej_list
, txseq
)) {
6066 BT_DBG("Unexpected SREJ - not requested");
6067 return L2CAP_TXSEQ_UNEXPECTED_SREJ
;
6071 if (chan
->expected_tx_seq
== txseq
) {
6072 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
6074 BT_DBG("Invalid - txseq outside tx window");
6075 return L2CAP_TXSEQ_INVALID
;
6078 return L2CAP_TXSEQ_EXPECTED
;
6082 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) <
6083 __seq_offset(chan
, chan
->expected_tx_seq
, chan
->last_acked_seq
)) {
6084 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6085 return L2CAP_TXSEQ_DUPLICATE
;
6088 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >= chan
->tx_win
) {
6089 /* A source of invalid packets is a "double poll" condition,
6090 * where delays cause us to send multiple poll packets. If
6091 * the remote stack receives and processes both polls,
6092 * sequence numbers can wrap around in such a way that a
6093 * resent frame has a sequence number that looks like new data
6094 * with a sequence gap. This would trigger an erroneous SREJ
6097 * Fortunately, this is impossible with a tx window that's
6098 * less than half of the maximum sequence number, which allows
6099 * invalid frames to be safely ignored.
6101 * With tx window sizes greater than half of the tx window
6102 * maximum, the frame is invalid and cannot be ignored. This
6103 * causes a disconnect.
6106 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
6107 BT_DBG("Invalid/Ignore - txseq outside tx window");
6108 return L2CAP_TXSEQ_INVALID_IGNORE
;
6110 BT_DBG("Invalid - txseq outside tx window");
6111 return L2CAP_TXSEQ_INVALID
;
6114 BT_DBG("Unexpected - txseq indicates missing frames");
6115 return L2CAP_TXSEQ_UNEXPECTED
;
6119 static int l2cap_rx_state_recv(struct l2cap_chan
*chan
,
6120 struct l2cap_ctrl
*control
,
6121 struct sk_buff
*skb
, u8 event
)
6124 bool skb_in_use
= false;
6126 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6130 case L2CAP_EV_RECV_IFRAME
:
6131 switch (l2cap_classify_txseq(chan
, control
->txseq
)) {
6132 case L2CAP_TXSEQ_EXPECTED
:
6133 l2cap_pass_to_tx(chan
, control
);
6135 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
6136 BT_DBG("Busy, discarding expected seq %d",
6141 chan
->expected_tx_seq
= __next_seq(chan
,
6144 chan
->buffer_seq
= chan
->expected_tx_seq
;
6147 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
6151 if (control
->final
) {
6152 if (!test_and_clear_bit(CONN_REJ_ACT
,
6153 &chan
->conn_state
)) {
6155 l2cap_retransmit_all(chan
, control
);
6156 l2cap_ertm_send(chan
);
6160 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
6161 l2cap_send_ack(chan
);
6163 case L2CAP_TXSEQ_UNEXPECTED
:
6164 l2cap_pass_to_tx(chan
, control
);
6166 /* Can't issue SREJ frames in the local busy state.
6167 * Drop this frame, it will be seen as missing
6168 * when local busy is exited.
6170 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
6171 BT_DBG("Busy, discarding unexpected seq %d",
6176 /* There was a gap in the sequence, so an SREJ
6177 * must be sent for each missing frame. The
6178 * current frame is stored for later use.
6180 skb_queue_tail(&chan
->srej_q
, skb
);
6182 BT_DBG("Queued %p (queue len %d)", skb
,
6183 skb_queue_len(&chan
->srej_q
));
6185 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
6186 l2cap_seq_list_clear(&chan
->srej_list
);
6187 l2cap_send_srej(chan
, control
->txseq
);
6189 chan
->rx_state
= L2CAP_RX_STATE_SREJ_SENT
;
6191 case L2CAP_TXSEQ_DUPLICATE
:
6192 l2cap_pass_to_tx(chan
, control
);
6194 case L2CAP_TXSEQ_INVALID_IGNORE
:
6196 case L2CAP_TXSEQ_INVALID
:
6198 l2cap_send_disconn_req(chan
, ECONNRESET
);
6202 case L2CAP_EV_RECV_RR
:
6203 l2cap_pass_to_tx(chan
, control
);
6204 if (control
->final
) {
6205 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6207 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
) &&
6208 !__chan_is_moving(chan
)) {
6210 l2cap_retransmit_all(chan
, control
);
6213 l2cap_ertm_send(chan
);
6214 } else if (control
->poll
) {
6215 l2cap_send_i_or_rr_or_rnr(chan
);
6217 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6218 &chan
->conn_state
) &&
6219 chan
->unacked_frames
)
6220 __set_retrans_timer(chan
);
6222 l2cap_ertm_send(chan
);
6225 case L2CAP_EV_RECV_RNR
:
6226 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6227 l2cap_pass_to_tx(chan
, control
);
6228 if (control
&& control
->poll
) {
6229 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6230 l2cap_send_rr_or_rnr(chan
, 0);
6232 __clear_retrans_timer(chan
);
6233 l2cap_seq_list_clear(&chan
->retrans_list
);
6235 case L2CAP_EV_RECV_REJ
:
6236 l2cap_handle_rej(chan
, control
);
6238 case L2CAP_EV_RECV_SREJ
:
6239 l2cap_handle_srej(chan
, control
);
6245 if (skb
&& !skb_in_use
) {
6246 BT_DBG("Freeing %p", skb
);
6253 static int l2cap_rx_state_srej_sent(struct l2cap_chan
*chan
,
6254 struct l2cap_ctrl
*control
,
6255 struct sk_buff
*skb
, u8 event
)
6258 u16 txseq
= control
->txseq
;
6259 bool skb_in_use
= false;
6261 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6265 case L2CAP_EV_RECV_IFRAME
:
6266 switch (l2cap_classify_txseq(chan
, txseq
)) {
6267 case L2CAP_TXSEQ_EXPECTED
:
6268 /* Keep frame for reassembly later */
6269 l2cap_pass_to_tx(chan
, control
);
6270 skb_queue_tail(&chan
->srej_q
, skb
);
6272 BT_DBG("Queued %p (queue len %d)", skb
,
6273 skb_queue_len(&chan
->srej_q
));
6275 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
6277 case L2CAP_TXSEQ_EXPECTED_SREJ
:
6278 l2cap_seq_list_pop(&chan
->srej_list
);
6280 l2cap_pass_to_tx(chan
, control
);
6281 skb_queue_tail(&chan
->srej_q
, skb
);
6283 BT_DBG("Queued %p (queue len %d)", skb
,
6284 skb_queue_len(&chan
->srej_q
));
6286 err
= l2cap_rx_queued_iframes(chan
);
6291 case L2CAP_TXSEQ_UNEXPECTED
:
6292 /* Got a frame that can't be reassembled yet.
6293 * Save it for later, and send SREJs to cover
6294 * the missing frames.
6296 skb_queue_tail(&chan
->srej_q
, skb
);
6298 BT_DBG("Queued %p (queue len %d)", skb
,
6299 skb_queue_len(&chan
->srej_q
));
6301 l2cap_pass_to_tx(chan
, control
);
6302 l2cap_send_srej(chan
, control
->txseq
);
6304 case L2CAP_TXSEQ_UNEXPECTED_SREJ
:
6305 /* This frame was requested with an SREJ, but
6306 * some expected retransmitted frames are
6307 * missing. Request retransmission of missing
6310 skb_queue_tail(&chan
->srej_q
, skb
);
6312 BT_DBG("Queued %p (queue len %d)", skb
,
6313 skb_queue_len(&chan
->srej_q
));
6315 l2cap_pass_to_tx(chan
, control
);
6316 l2cap_send_srej_list(chan
, control
->txseq
);
6318 case L2CAP_TXSEQ_DUPLICATE_SREJ
:
6319 /* We've already queued this frame. Drop this copy. */
6320 l2cap_pass_to_tx(chan
, control
);
6322 case L2CAP_TXSEQ_DUPLICATE
:
6323 /* Expecting a later sequence number, so this frame
6324 * was already received. Ignore it completely.
6327 case L2CAP_TXSEQ_INVALID_IGNORE
:
6329 case L2CAP_TXSEQ_INVALID
:
6331 l2cap_send_disconn_req(chan
, ECONNRESET
);
6335 case L2CAP_EV_RECV_RR
:
6336 l2cap_pass_to_tx(chan
, control
);
6337 if (control
->final
) {
6338 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6340 if (!test_and_clear_bit(CONN_REJ_ACT
,
6341 &chan
->conn_state
)) {
6343 l2cap_retransmit_all(chan
, control
);
6346 l2cap_ertm_send(chan
);
6347 } else if (control
->poll
) {
6348 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6349 &chan
->conn_state
) &&
6350 chan
->unacked_frames
) {
6351 __set_retrans_timer(chan
);
6354 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6355 l2cap_send_srej_tail(chan
);
6357 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6358 &chan
->conn_state
) &&
6359 chan
->unacked_frames
)
6360 __set_retrans_timer(chan
);
6362 l2cap_send_ack(chan
);
6365 case L2CAP_EV_RECV_RNR
:
6366 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6367 l2cap_pass_to_tx(chan
, control
);
6368 if (control
->poll
) {
6369 l2cap_send_srej_tail(chan
);
6371 struct l2cap_ctrl rr_control
;
6372 memset(&rr_control
, 0, sizeof(rr_control
));
6373 rr_control
.sframe
= 1;
6374 rr_control
.super
= L2CAP_SUPER_RR
;
6375 rr_control
.reqseq
= chan
->buffer_seq
;
6376 l2cap_send_sframe(chan
, &rr_control
);
6380 case L2CAP_EV_RECV_REJ
:
6381 l2cap_handle_rej(chan
, control
);
6383 case L2CAP_EV_RECV_SREJ
:
6384 l2cap_handle_srej(chan
, control
);
6388 if (skb
&& !skb_in_use
) {
6389 BT_DBG("Freeing %p", skb
);
6396 static int l2cap_finish_move(struct l2cap_chan
*chan
)
6398 BT_DBG("chan %p", chan
);
6400 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
6403 chan
->conn
->mtu
= chan
->hs_hcon
->hdev
->block_mtu
;
6405 chan
->conn
->mtu
= chan
->conn
->hcon
->hdev
->acl_mtu
;
6407 return l2cap_resegment(chan
);
6410 static int l2cap_rx_state_wait_p(struct l2cap_chan
*chan
,
6411 struct l2cap_ctrl
*control
,
6412 struct sk_buff
*skb
, u8 event
)
6416 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6422 l2cap_process_reqseq(chan
, control
->reqseq
);
6424 if (!skb_queue_empty(&chan
->tx_q
))
6425 chan
->tx_send_head
= skb_peek(&chan
->tx_q
);
6427 chan
->tx_send_head
= NULL
;
6429 /* Rewind next_tx_seq to the point expected
6432 chan
->next_tx_seq
= control
->reqseq
;
6433 chan
->unacked_frames
= 0;
6435 err
= l2cap_finish_move(chan
);
6439 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6440 l2cap_send_i_or_rr_or_rnr(chan
);
6442 if (event
== L2CAP_EV_RECV_IFRAME
)
6445 return l2cap_rx_state_recv(chan
, control
, NULL
, event
);
6448 static int l2cap_rx_state_wait_f(struct l2cap_chan
*chan
,
6449 struct l2cap_ctrl
*control
,
6450 struct sk_buff
*skb
, u8 event
)
6454 if (!control
->final
)
6457 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6459 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
6460 l2cap_process_reqseq(chan
, control
->reqseq
);
6462 if (!skb_queue_empty(&chan
->tx_q
))
6463 chan
->tx_send_head
= skb_peek(&chan
->tx_q
);
6465 chan
->tx_send_head
= NULL
;
6467 /* Rewind next_tx_seq to the point expected
6470 chan
->next_tx_seq
= control
->reqseq
;
6471 chan
->unacked_frames
= 0;
6474 chan
->conn
->mtu
= chan
->hs_hcon
->hdev
->block_mtu
;
6476 chan
->conn
->mtu
= chan
->conn
->hcon
->hdev
->acl_mtu
;
6478 err
= l2cap_resegment(chan
);
6481 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
6486 static bool __valid_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
6488 /* Make sure reqseq is for a packet that has been sent but not acked */
6491 unacked
= __seq_offset(chan
, chan
->next_tx_seq
, chan
->expected_ack_seq
);
6492 return __seq_offset(chan
, chan
->next_tx_seq
, reqseq
) <= unacked
;
6495 static int l2cap_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
6496 struct sk_buff
*skb
, u8 event
)
6500 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan
,
6501 control
, skb
, event
, chan
->rx_state
);
6503 if (__valid_reqseq(chan
, control
->reqseq
)) {
6504 switch (chan
->rx_state
) {
6505 case L2CAP_RX_STATE_RECV
:
6506 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
6508 case L2CAP_RX_STATE_SREJ_SENT
:
6509 err
= l2cap_rx_state_srej_sent(chan
, control
, skb
,
6512 case L2CAP_RX_STATE_WAIT_P
:
6513 err
= l2cap_rx_state_wait_p(chan
, control
, skb
, event
);
6515 case L2CAP_RX_STATE_WAIT_F
:
6516 err
= l2cap_rx_state_wait_f(chan
, control
, skb
, event
);
6523 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6524 control
->reqseq
, chan
->next_tx_seq
,
6525 chan
->expected_ack_seq
);
6526 l2cap_send_disconn_req(chan
, ECONNRESET
);
6532 static int l2cap_stream_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
6533 struct sk_buff
*skb
)
6537 BT_DBG("chan %p, control %p, skb %p, state %d", chan
, control
, skb
,
6540 if (l2cap_classify_txseq(chan
, control
->txseq
) ==
6541 L2CAP_TXSEQ_EXPECTED
) {
6542 l2cap_pass_to_tx(chan
, control
);
6544 BT_DBG("buffer_seq %d->%d", chan
->buffer_seq
,
6545 __next_seq(chan
, chan
->buffer_seq
));
6547 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
6549 l2cap_reassemble_sdu(chan
, skb
, control
);
6552 kfree_skb(chan
->sdu
);
6555 chan
->sdu_last_frag
= NULL
;
6559 BT_DBG("Freeing %p", skb
);
6564 chan
->last_acked_seq
= control
->txseq
;
6565 chan
->expected_tx_seq
= __next_seq(chan
, control
->txseq
);
6570 static int l2cap_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
6572 struct l2cap_ctrl
*control
= &bt_cb(skb
)->control
;
6576 __unpack_control(chan
, skb
);
6581 * We can just drop the corrupted I-frame here.
6582 * Receiver will miss it and start proper recovery
6583 * procedures and ask for retransmission.
6585 if (l2cap_check_fcs(chan
, skb
))
6588 if (!control
->sframe
&& control
->sar
== L2CAP_SAR_START
)
6589 len
-= L2CAP_SDULEN_SIZE
;
6591 if (chan
->fcs
== L2CAP_FCS_CRC16
)
6592 len
-= L2CAP_FCS_SIZE
;
6594 if (len
> chan
->mps
) {
6595 l2cap_send_disconn_req(chan
, ECONNRESET
);
6599 if (!control
->sframe
) {
6602 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6603 control
->sar
, control
->reqseq
, control
->final
,
6606 /* Validate F-bit - F=0 always valid, F=1 only
6607 * valid in TX WAIT_F
6609 if (control
->final
&& chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
)
6612 if (chan
->mode
!= L2CAP_MODE_STREAMING
) {
6613 event
= L2CAP_EV_RECV_IFRAME
;
6614 err
= l2cap_rx(chan
, control
, skb
, event
);
6616 err
= l2cap_stream_rx(chan
, control
, skb
);
6620 l2cap_send_disconn_req(chan
, ECONNRESET
);
6622 const u8 rx_func_to_event
[4] = {
6623 L2CAP_EV_RECV_RR
, L2CAP_EV_RECV_REJ
,
6624 L2CAP_EV_RECV_RNR
, L2CAP_EV_RECV_SREJ
6627 /* Only I-frames are expected in streaming mode */
6628 if (chan
->mode
== L2CAP_MODE_STREAMING
)
6631 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6632 control
->reqseq
, control
->final
, control
->poll
,
6636 BT_ERR("Trailing bytes: %d in sframe", len
);
6637 l2cap_send_disconn_req(chan
, ECONNRESET
);
6641 /* Validate F and P bits */
6642 if (control
->final
&& (control
->poll
||
6643 chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
))
6646 event
= rx_func_to_event
[control
->super
];
6647 if (l2cap_rx(chan
, control
, skb
, event
))
6648 l2cap_send_disconn_req(chan
, ECONNRESET
);
6658 static void l2cap_chan_le_send_credits(struct l2cap_chan
*chan
)
6660 struct l2cap_conn
*conn
= chan
->conn
;
6661 struct l2cap_le_credits pkt
;
6664 /* We return more credits to the sender only after the amount of
6665 * credits falls below half of the initial amount.
6667 if (chan
->rx_credits
>= (le_max_credits
+ 1) / 2)
6670 return_credits
= le_max_credits
- chan
->rx_credits
;
6672 BT_DBG("chan %p returning %u credits to sender", chan
, return_credits
);
6674 chan
->rx_credits
+= return_credits
;
6676 pkt
.cid
= cpu_to_le16(chan
->scid
);
6677 pkt
.credits
= cpu_to_le16(return_credits
);
6679 chan
->ident
= l2cap_get_ident(conn
);
6681 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CREDITS
, sizeof(pkt
), &pkt
);
6684 static int l2cap_le_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
6688 if (!chan
->rx_credits
) {
6689 BT_ERR("No credits to receive LE L2CAP data");
6690 l2cap_send_disconn_req(chan
, ECONNRESET
);
6694 if (chan
->imtu
< skb
->len
) {
6695 BT_ERR("Too big LE L2CAP PDU");
6700 BT_DBG("rx_credits %u -> %u", chan
->rx_credits
+ 1, chan
->rx_credits
);
6702 l2cap_chan_le_send_credits(chan
);
6709 sdu_len
= get_unaligned_le16(skb
->data
);
6710 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
6712 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6713 sdu_len
, skb
->len
, chan
->imtu
);
6715 if (sdu_len
> chan
->imtu
) {
6716 BT_ERR("Too big LE L2CAP SDU length received");
6721 if (skb
->len
> sdu_len
) {
6722 BT_ERR("Too much LE L2CAP data received");
6727 if (skb
->len
== sdu_len
)
6728 return chan
->ops
->recv(chan
, skb
);
6731 chan
->sdu_len
= sdu_len
;
6732 chan
->sdu_last_frag
= skb
;
6737 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6738 chan
->sdu
->len
, skb
->len
, chan
->sdu_len
);
6740 if (chan
->sdu
->len
+ skb
->len
> chan
->sdu_len
) {
6741 BT_ERR("Too much LE L2CAP data received");
6746 append_skb_frag(chan
->sdu
, skb
, &chan
->sdu_last_frag
);
6749 if (chan
->sdu
->len
== chan
->sdu_len
) {
6750 err
= chan
->ops
->recv(chan
, chan
->sdu
);
6753 chan
->sdu_last_frag
= NULL
;
6761 kfree_skb(chan
->sdu
);
6763 chan
->sdu_last_frag
= NULL
;
6767 /* We can't return an error here since we took care of the skb
6768 * freeing internally. An error return would cause the caller to
6769 * do a double-free of the skb.
6774 static void l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
,
6775 struct sk_buff
*skb
)
6777 struct l2cap_chan
*chan
;
6779 chan
= l2cap_get_chan_by_scid(conn
, cid
);
6781 if (cid
== L2CAP_CID_A2MP
) {
6782 chan
= a2mp_channel_create(conn
, skb
);
6788 l2cap_chan_lock(chan
);
6790 BT_DBG("unknown cid 0x%4.4x", cid
);
6791 /* Drop packet and return */
6797 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6799 if (chan
->state
!= BT_CONNECTED
)
6802 switch (chan
->mode
) {
6803 case L2CAP_MODE_LE_FLOWCTL
:
6804 if (l2cap_le_data_rcv(chan
, skb
) < 0)
6809 case L2CAP_MODE_BASIC
:
6810 /* If socket recv buffers overflows we drop data here
6811 * which is *bad* because L2CAP has to be reliable.
6812 * But we don't have any other choice. L2CAP doesn't
6813 * provide flow control mechanism. */
6815 if (chan
->imtu
< skb
->len
) {
6816 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6820 if (!chan
->ops
->recv(chan
, skb
))
6824 case L2CAP_MODE_ERTM
:
6825 case L2CAP_MODE_STREAMING
:
6826 l2cap_data_rcv(chan
, skb
);
6830 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
6838 l2cap_chan_unlock(chan
);
6841 static void l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
,
6842 struct sk_buff
*skb
)
6844 struct hci_conn
*hcon
= conn
->hcon
;
6845 struct l2cap_chan
*chan
;
6847 if (hcon
->type
!= ACL_LINK
)
6850 chan
= l2cap_global_chan_by_psm(0, psm
, &hcon
->src
, &hcon
->dst
,
6855 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6857 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
6860 if (chan
->imtu
< skb
->len
)
6863 /* Store remote BD_ADDR and PSM for msg_name */
6864 bacpy(&bt_cb(skb
)->bdaddr
, &hcon
->dst
);
6865 bt_cb(skb
)->psm
= psm
;
6867 if (!chan
->ops
->recv(chan
, skb
))
6874 static void l2cap_att_channel(struct l2cap_conn
*conn
,
6875 struct sk_buff
*skb
)
6877 struct hci_conn
*hcon
= conn
->hcon
;
6878 struct l2cap_chan
*chan
;
6880 if (hcon
->type
!= LE_LINK
)
6883 chan
= l2cap_global_chan_by_scid(BT_CONNECTED
, L2CAP_CID_ATT
,
6884 &hcon
->src
, &hcon
->dst
);
6888 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6890 if (chan
->imtu
< skb
->len
)
6893 if (!chan
->ops
->recv(chan
, skb
))
6900 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
6902 struct l2cap_hdr
*lh
= (void *) skb
->data
;
6903 struct hci_conn
*hcon
= conn
->hcon
;
6907 if (hcon
->state
!= BT_CONNECTED
) {
6908 BT_DBG("queueing pending rx skb");
6909 skb_queue_tail(&conn
->pending_rx
, skb
);
6913 skb_pull(skb
, L2CAP_HDR_SIZE
);
6914 cid
= __le16_to_cpu(lh
->cid
);
6915 len
= __le16_to_cpu(lh
->len
);
6917 if (len
!= skb
->len
) {
6922 /* Since we can't actively block incoming LE connections we must
6923 * at least ensure that we ignore incoming data from them.
6925 if (hcon
->type
== LE_LINK
&&
6926 hci_bdaddr_list_lookup(&hcon
->hdev
->blacklist
, &hcon
->dst
,
6927 bdaddr_type(hcon
, hcon
->dst_type
))) {
6932 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
6935 case L2CAP_CID_SIGNALING
:
6936 l2cap_sig_channel(conn
, skb
);
6939 case L2CAP_CID_CONN_LESS
:
6940 psm
= get_unaligned((__le16
*) skb
->data
);
6941 skb_pull(skb
, L2CAP_PSMLEN_SIZE
);
6942 l2cap_conless_channel(conn
, psm
, skb
);
6946 l2cap_att_channel(conn
, skb
);
6949 case L2CAP_CID_LE_SIGNALING
:
6950 l2cap_le_sig_channel(conn
, skb
);
6954 if (smp_sig_channel(conn
, skb
))
6955 l2cap_conn_del(conn
->hcon
, EACCES
);
6959 l2cap_data_channel(conn
, cid
, skb
);
6964 static void process_pending_rx(struct work_struct
*work
)
6966 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
6968 struct sk_buff
*skb
;
6972 while ((skb
= skb_dequeue(&conn
->pending_rx
)))
6973 l2cap_recv_frame(conn
, skb
);
6976 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
)
6978 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
6979 struct hci_chan
*hchan
;
6984 hchan
= hci_chan_create(hcon
);
6988 conn
= kzalloc(sizeof(*conn
), GFP_KERNEL
);
6990 hci_chan_del(hchan
);
6994 kref_init(&conn
->ref
);
6995 hcon
->l2cap_data
= conn
;
6997 hci_conn_get(conn
->hcon
);
6998 conn
->hchan
= hchan
;
7000 BT_DBG("hcon %p conn %p hchan %p", hcon
, conn
, hchan
);
7002 switch (hcon
->type
) {
7004 if (hcon
->hdev
->le_mtu
) {
7005 conn
->mtu
= hcon
->hdev
->le_mtu
;
7010 conn
->mtu
= hcon
->hdev
->acl_mtu
;
7014 conn
->feat_mask
= 0;
7016 if (hcon
->type
== ACL_LINK
)
7017 conn
->hs_enabled
= test_bit(HCI_HS_ENABLED
,
7018 &hcon
->hdev
->dev_flags
);
7020 mutex_init(&conn
->ident_lock
);
7021 mutex_init(&conn
->chan_lock
);
7023 INIT_LIST_HEAD(&conn
->chan_l
);
7024 INIT_LIST_HEAD(&conn
->users
);
7026 if (hcon
->type
== LE_LINK
)
7027 INIT_DELAYED_WORK(&conn
->security_timer
, security_timeout
);
7029 INIT_DELAYED_WORK(&conn
->info_timer
, l2cap_info_timeout
);
7031 skb_queue_head_init(&conn
->pending_rx
);
7032 INIT_WORK(&conn
->pending_rx_work
, process_pending_rx
);
7034 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
7039 static bool is_valid_psm(u16 psm
, u8 dst_type
) {
7043 if (bdaddr_type_is_le(dst_type
))
7044 return (psm
<= 0x00ff);
7046 /* PSM must be odd and lsb of upper byte must be 0 */
7047 return ((psm
& 0x0101) == 0x0001);
7050 int l2cap_chan_connect(struct l2cap_chan
*chan
, __le16 psm
, u16 cid
,
7051 bdaddr_t
*dst
, u8 dst_type
)
7053 struct l2cap_conn
*conn
;
7054 struct hci_conn
*hcon
;
7055 struct hci_dev
*hdev
;
7058 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan
->src
, dst
,
7059 dst_type
, __le16_to_cpu(psm
));
7061 hdev
= hci_get_route(dst
, &chan
->src
);
7063 return -EHOSTUNREACH
;
7067 l2cap_chan_lock(chan
);
7069 if (!is_valid_psm(__le16_to_cpu(psm
), dst_type
) && !cid
&&
7070 chan
->chan_type
!= L2CAP_CHAN_RAW
) {
7075 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& !psm
) {
7080 if (chan
->chan_type
== L2CAP_CHAN_FIXED
&& !cid
) {
7085 switch (chan
->mode
) {
7086 case L2CAP_MODE_BASIC
:
7088 case L2CAP_MODE_LE_FLOWCTL
:
7089 l2cap_le_flowctl_init(chan
);
7091 case L2CAP_MODE_ERTM
:
7092 case L2CAP_MODE_STREAMING
:
7101 switch (chan
->state
) {
7105 /* Already connecting */
7110 /* Already connected */
7124 /* Set destination address and psm */
7125 bacpy(&chan
->dst
, dst
);
7126 chan
->dst_type
= dst_type
;
7131 if (bdaddr_type_is_le(dst_type
)) {
7134 /* Convert from L2CAP channel address type to HCI address type
7136 if (dst_type
== BDADDR_LE_PUBLIC
)
7137 dst_type
= ADDR_LE_DEV_PUBLIC
;
7139 dst_type
= ADDR_LE_DEV_RANDOM
;
7141 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
7142 role
= HCI_ROLE_SLAVE
;
7144 role
= HCI_ROLE_MASTER
;
7146 hcon
= hci_connect_le(hdev
, dst
, dst_type
, chan
->sec_level
,
7147 HCI_LE_CONN_TIMEOUT
, role
);
7149 u8 auth_type
= l2cap_get_auth_type(chan
);
7150 hcon
= hci_connect_acl(hdev
, dst
, chan
->sec_level
, auth_type
);
7154 err
= PTR_ERR(hcon
);
7158 conn
= l2cap_conn_add(hcon
);
7160 hci_conn_drop(hcon
);
7165 if (cid
&& __l2cap_get_chan_by_dcid(conn
, cid
)) {
7166 hci_conn_drop(hcon
);
7171 /* Update source addr of the socket */
7172 bacpy(&chan
->src
, &hcon
->src
);
7173 chan
->src_type
= bdaddr_type(hcon
, hcon
->src_type
);
7175 l2cap_chan_unlock(chan
);
7176 l2cap_chan_add(conn
, chan
);
7177 l2cap_chan_lock(chan
);
7179 /* l2cap_chan_add takes its own ref so we can drop this one */
7180 hci_conn_drop(hcon
);
7182 l2cap_state_change(chan
, BT_CONNECT
);
7183 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
7185 /* Release chan->sport so that it can be reused by other
7186 * sockets (as it's only used for listening sockets).
7188 write_lock(&chan_list_lock
);
7190 write_unlock(&chan_list_lock
);
7192 if (hcon
->state
== BT_CONNECTED
) {
7193 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
7194 __clear_chan_timer(chan
);
7195 if (l2cap_chan_check_security(chan
, true))
7196 l2cap_state_change(chan
, BT_CONNECTED
);
7198 l2cap_do_start(chan
);
7204 l2cap_chan_unlock(chan
);
7205 hci_dev_unlock(hdev
);
7209 EXPORT_SYMBOL_GPL(l2cap_chan_connect
);
7211 /* ---- L2CAP interface with lower layer (HCI) ---- */
7213 int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
7215 int exact
= 0, lm1
= 0, lm2
= 0;
7216 struct l2cap_chan
*c
;
7218 BT_DBG("hdev %s, bdaddr %pMR", hdev
->name
, bdaddr
);
7220 /* Find listening sockets and check their link_mode */
7221 read_lock(&chan_list_lock
);
7222 list_for_each_entry(c
, &chan_list
, global_l
) {
7223 if (c
->state
!= BT_LISTEN
)
7226 if (!bacmp(&c
->src
, &hdev
->bdaddr
)) {
7227 lm1
|= HCI_LM_ACCEPT
;
7228 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
7229 lm1
|= HCI_LM_MASTER
;
7231 } else if (!bacmp(&c
->src
, BDADDR_ANY
)) {
7232 lm2
|= HCI_LM_ACCEPT
;
7233 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
7234 lm2
|= HCI_LM_MASTER
;
7237 read_unlock(&chan_list_lock
);
7239 return exact
? lm1
: lm2
;
7242 void l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
7244 struct l2cap_conn
*conn
;
7246 BT_DBG("hcon %p bdaddr %pMR status %d", hcon
, &hcon
->dst
, status
);
7249 conn
= l2cap_conn_add(hcon
);
7251 l2cap_conn_ready(conn
);
7253 l2cap_conn_del(hcon
, bt_to_errno(status
));
7257 int l2cap_disconn_ind(struct hci_conn
*hcon
)
7259 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
7261 BT_DBG("hcon %p", hcon
);
7264 return HCI_ERROR_REMOTE_USER_TERM
;
7265 return conn
->disc_reason
;
7268 void l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
7270 BT_DBG("hcon %p reason %d", hcon
, reason
);
7272 l2cap_conn_del(hcon
, bt_to_errno(reason
));
7275 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
7277 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
7280 if (encrypt
== 0x00) {
7281 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
7282 __set_chan_timer(chan
, L2CAP_ENC_TIMEOUT
);
7283 } else if (chan
->sec_level
== BT_SECURITY_HIGH
||
7284 chan
->sec_level
== BT_SECURITY_FIPS
)
7285 l2cap_chan_close(chan
, ECONNREFUSED
);
7287 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
7288 __clear_chan_timer(chan
);
7292 int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
7294 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
7295 struct l2cap_chan
*chan
;
7300 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn
, status
, encrypt
);
7302 if (hcon
->type
== LE_LINK
) {
7303 if (!status
&& encrypt
)
7304 smp_distribute_keys(conn
);
7305 cancel_delayed_work(&conn
->security_timer
);
7308 mutex_lock(&conn
->chan_lock
);
7310 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
7311 l2cap_chan_lock(chan
);
7313 BT_DBG("chan %p scid 0x%4.4x state %s", chan
, chan
->scid
,
7314 state_to_string(chan
->state
));
7316 if (chan
->scid
== L2CAP_CID_A2MP
) {
7317 l2cap_chan_unlock(chan
);
7321 if (chan
->scid
== L2CAP_CID_ATT
) {
7322 if (!status
&& encrypt
) {
7323 chan
->sec_level
= hcon
->sec_level
;
7324 l2cap_chan_ready(chan
);
7327 l2cap_chan_unlock(chan
);
7331 if (!__l2cap_no_conn_pending(chan
)) {
7332 l2cap_chan_unlock(chan
);
7336 if (!status
&& (chan
->state
== BT_CONNECTED
||
7337 chan
->state
== BT_CONFIG
)) {
7338 chan
->ops
->resume(chan
);
7339 l2cap_check_encryption(chan
, encrypt
);
7340 l2cap_chan_unlock(chan
);
7344 if (chan
->state
== BT_CONNECT
) {
7346 l2cap_start_connection(chan
);
7348 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
7349 } else if (chan
->state
== BT_CONNECT2
) {
7350 struct l2cap_conn_rsp rsp
;
7354 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
7355 res
= L2CAP_CR_PEND
;
7356 stat
= L2CAP_CS_AUTHOR_PEND
;
7357 chan
->ops
->defer(chan
);
7359 l2cap_state_change(chan
, BT_CONFIG
);
7360 res
= L2CAP_CR_SUCCESS
;
7361 stat
= L2CAP_CS_NO_INFO
;
7364 l2cap_state_change(chan
, BT_DISCONN
);
7365 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
7366 res
= L2CAP_CR_SEC_BLOCK
;
7367 stat
= L2CAP_CS_NO_INFO
;
7370 rsp
.scid
= cpu_to_le16(chan
->dcid
);
7371 rsp
.dcid
= cpu_to_le16(chan
->scid
);
7372 rsp
.result
= cpu_to_le16(res
);
7373 rsp
.status
= cpu_to_le16(stat
);
7374 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
7377 if (!test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
7378 res
== L2CAP_CR_SUCCESS
) {
7380 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
7381 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
7383 l2cap_build_conf_req(chan
, buf
),
7385 chan
->num_conf_req
++;
7389 l2cap_chan_unlock(chan
);
7392 mutex_unlock(&conn
->chan_lock
);
7397 int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
7399 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
7400 struct l2cap_hdr
*hdr
;
7403 /* For AMP controller do not create l2cap conn */
7404 if (!conn
&& hcon
->hdev
->dev_type
!= HCI_BREDR
)
7408 conn
= l2cap_conn_add(hcon
);
7413 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
7417 case ACL_START_NO_FLUSH
:
7420 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
7421 kfree_skb(conn
->rx_skb
);
7422 conn
->rx_skb
= NULL
;
7424 l2cap_conn_unreliable(conn
, ECOMM
);
7427 /* Start fragment always begin with Basic L2CAP header */
7428 if (skb
->len
< L2CAP_HDR_SIZE
) {
7429 BT_ERR("Frame is too short (len %d)", skb
->len
);
7430 l2cap_conn_unreliable(conn
, ECOMM
);
7434 hdr
= (struct l2cap_hdr
*) skb
->data
;
7435 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
7437 if (len
== skb
->len
) {
7438 /* Complete frame received */
7439 l2cap_recv_frame(conn
, skb
);
7443 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
7445 if (skb
->len
> len
) {
7446 BT_ERR("Frame is too long (len %d, expected len %d)",
7448 l2cap_conn_unreliable(conn
, ECOMM
);
7452 /* Allocate skb for the complete frame (with header) */
7453 conn
->rx_skb
= bt_skb_alloc(len
, GFP_KERNEL
);
7457 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
7459 conn
->rx_len
= len
- skb
->len
;
7463 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
7465 if (!conn
->rx_len
) {
7466 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
7467 l2cap_conn_unreliable(conn
, ECOMM
);
7471 if (skb
->len
> conn
->rx_len
) {
7472 BT_ERR("Fragment is too long (len %d, expected %d)",
7473 skb
->len
, conn
->rx_len
);
7474 kfree_skb(conn
->rx_skb
);
7475 conn
->rx_skb
= NULL
;
7477 l2cap_conn_unreliable(conn
, ECOMM
);
7481 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
7483 conn
->rx_len
-= skb
->len
;
7485 if (!conn
->rx_len
) {
7486 /* Complete frame received. l2cap_recv_frame
7487 * takes ownership of the skb so set the global
7488 * rx_skb pointer to NULL first.
7490 struct sk_buff
*rx_skb
= conn
->rx_skb
;
7491 conn
->rx_skb
= NULL
;
7492 l2cap_recv_frame(conn
, rx_skb
);
7502 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
7504 struct l2cap_chan
*c
;
7506 read_lock(&chan_list_lock
);
7508 list_for_each_entry(c
, &chan_list
, global_l
) {
7509 seq_printf(f
, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7511 c
->state
, __le16_to_cpu(c
->psm
),
7512 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
7513 c
->sec_level
, c
->mode
);
7516 read_unlock(&chan_list_lock
);
7521 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
7523 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
7526 static const struct file_operations l2cap_debugfs_fops
= {
7527 .open
= l2cap_debugfs_open
,
7529 .llseek
= seq_lseek
,
7530 .release
= single_release
,
7533 static struct dentry
*l2cap_debugfs
;
7535 int __init
l2cap_init(void)
7539 err
= l2cap_init_sockets();
7543 if (IS_ERR_OR_NULL(bt_debugfs
))
7546 l2cap_debugfs
= debugfs_create_file("l2cap", 0444, bt_debugfs
,
7547 NULL
, &l2cap_debugfs_fops
);
7549 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs
,
7551 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs
,
7557 void l2cap_exit(void)
7559 debugfs_remove(l2cap_debugfs
);
7560 l2cap_cleanup_sockets();
7563 module_param(disable_ertm
, bool, 0644);
7564 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");