2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
41 #include <net/bluetooth/amp.h>
45 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
46 static u8 l2cap_fixed_chan
[8] = { L2CAP_FC_L2CAP
, };
48 static LIST_HEAD(chan_list
);
49 static DEFINE_RWLOCK(chan_list_lock
);
51 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
52 u8 code
, u8 ident
, u16 dlen
, void *data
);
53 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
55 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
56 static void l2cap_send_disconn_req(struct l2cap_chan
*chan
, int err
);
58 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
59 struct sk_buff_head
*skbs
, u8 event
);
61 /* ---- L2CAP channels ---- */
63 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
68 list_for_each_entry(c
, &conn
->chan_l
, list
) {
75 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
80 list_for_each_entry(c
, &conn
->chan_l
, list
) {
87 /* Find channel with given SCID.
88 * Returns locked channel. */
89 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
94 mutex_lock(&conn
->chan_lock
);
95 c
= __l2cap_get_chan_by_scid(conn
, cid
);
98 mutex_unlock(&conn
->chan_lock
);
103 /* Find channel with given DCID.
104 * Returns locked channel.
106 static struct l2cap_chan
*l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
109 struct l2cap_chan
*c
;
111 mutex_lock(&conn
->chan_lock
);
112 c
= __l2cap_get_chan_by_dcid(conn
, cid
);
115 mutex_unlock(&conn
->chan_lock
);
120 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
,
123 struct l2cap_chan
*c
;
125 list_for_each_entry(c
, &conn
->chan_l
, list
) {
126 if (c
->ident
== ident
)
132 static struct l2cap_chan
*l2cap_get_chan_by_ident(struct l2cap_conn
*conn
,
135 struct l2cap_chan
*c
;
137 mutex_lock(&conn
->chan_lock
);
138 c
= __l2cap_get_chan_by_ident(conn
, ident
);
141 mutex_unlock(&conn
->chan_lock
);
146 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
148 struct l2cap_chan
*c
;
150 list_for_each_entry(c
, &chan_list
, global_l
) {
151 if (c
->sport
== psm
&& !bacmp(&bt_sk(c
->sk
)->src
, src
))
157 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
161 write_lock(&chan_list_lock
);
163 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
176 for (p
= 0x1001; p
< 0x1100; p
+= 2)
177 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
178 chan
->psm
= cpu_to_le16(p
);
179 chan
->sport
= cpu_to_le16(p
);
186 write_unlock(&chan_list_lock
);
190 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
192 write_lock(&chan_list_lock
);
196 write_unlock(&chan_list_lock
);
201 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
203 u16 cid
= L2CAP_CID_DYN_START
;
205 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
206 if (!__l2cap_get_chan_by_scid(conn
, cid
))
213 static void __l2cap_state_change(struct l2cap_chan
*chan
, int state
)
215 BT_DBG("chan %p %s -> %s", chan
, state_to_string(chan
->state
),
216 state_to_string(state
));
219 chan
->ops
->state_change(chan
, state
);
222 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
224 struct sock
*sk
= chan
->sk
;
227 __l2cap_state_change(chan
, state
);
231 static inline void __l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
233 struct sock
*sk
= chan
->sk
;
238 static inline void l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
240 struct sock
*sk
= chan
->sk
;
243 __l2cap_chan_set_err(chan
, err
);
247 static void __set_retrans_timer(struct l2cap_chan
*chan
)
249 if (!delayed_work_pending(&chan
->monitor_timer
) &&
250 chan
->retrans_timeout
) {
251 l2cap_set_timer(chan
, &chan
->retrans_timer
,
252 msecs_to_jiffies(chan
->retrans_timeout
));
256 static void __set_monitor_timer(struct l2cap_chan
*chan
)
258 __clear_retrans_timer(chan
);
259 if (chan
->monitor_timeout
) {
260 l2cap_set_timer(chan
, &chan
->monitor_timer
,
261 msecs_to_jiffies(chan
->monitor_timeout
));
265 static struct sk_buff
*l2cap_ertm_seq_in_queue(struct sk_buff_head
*head
,
270 skb_queue_walk(head
, skb
) {
271 if (bt_cb(skb
)->control
.txseq
== seq
)
278 /* ---- L2CAP sequence number lists ---- */
280 /* For ERTM, ordered lists of sequence numbers must be tracked for
281 * SREJ requests that are received and for frames that are to be
282 * retransmitted. These seq_list functions implement a singly-linked
283 * list in an array, where membership in the list can also be checked
284 * in constant time. Items can also be added to the tail of the list
285 * and removed from the head in constant time, without further memory
289 static int l2cap_seq_list_init(struct l2cap_seq_list
*seq_list
, u16 size
)
291 size_t alloc_size
, i
;
293 /* Allocated size is a power of 2 to map sequence numbers
294 * (which may be up to 14 bits) in to a smaller array that is
295 * sized for the negotiated ERTM transmit windows.
297 alloc_size
= roundup_pow_of_two(size
);
299 seq_list
->list
= kmalloc(sizeof(u16
) * alloc_size
, GFP_KERNEL
);
303 seq_list
->mask
= alloc_size
- 1;
304 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
305 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
306 for (i
= 0; i
< alloc_size
; i
++)
307 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
312 static inline void l2cap_seq_list_free(struct l2cap_seq_list
*seq_list
)
314 kfree(seq_list
->list
);
317 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list
*seq_list
,
320 /* Constant-time check for list membership */
321 return seq_list
->list
[seq
& seq_list
->mask
] != L2CAP_SEQ_LIST_CLEAR
;
324 static u16
l2cap_seq_list_remove(struct l2cap_seq_list
*seq_list
, u16 seq
)
326 u16 mask
= seq_list
->mask
;
328 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
) {
329 /* In case someone tries to pop the head of an empty list */
330 return L2CAP_SEQ_LIST_CLEAR
;
331 } else if (seq_list
->head
== seq
) {
332 /* Head can be removed in constant time */
333 seq_list
->head
= seq_list
->list
[seq
& mask
];
334 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
336 if (seq_list
->head
== L2CAP_SEQ_LIST_TAIL
) {
337 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
338 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
341 /* Walk the list to find the sequence number */
342 u16 prev
= seq_list
->head
;
343 while (seq_list
->list
[prev
& mask
] != seq
) {
344 prev
= seq_list
->list
[prev
& mask
];
345 if (prev
== L2CAP_SEQ_LIST_TAIL
)
346 return L2CAP_SEQ_LIST_CLEAR
;
349 /* Unlink the number from the list and clear it */
350 seq_list
->list
[prev
& mask
] = seq_list
->list
[seq
& mask
];
351 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
352 if (seq_list
->tail
== seq
)
353 seq_list
->tail
= prev
;
358 static inline u16
l2cap_seq_list_pop(struct l2cap_seq_list
*seq_list
)
360 /* Remove the head in constant time */
361 return l2cap_seq_list_remove(seq_list
, seq_list
->head
);
364 static void l2cap_seq_list_clear(struct l2cap_seq_list
*seq_list
)
368 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
)
371 for (i
= 0; i
<= seq_list
->mask
; i
++)
372 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
374 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
375 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
378 static void l2cap_seq_list_append(struct l2cap_seq_list
*seq_list
, u16 seq
)
380 u16 mask
= seq_list
->mask
;
382 /* All appends happen in constant time */
384 if (seq_list
->list
[seq
& mask
] != L2CAP_SEQ_LIST_CLEAR
)
387 if (seq_list
->tail
== L2CAP_SEQ_LIST_CLEAR
)
388 seq_list
->head
= seq
;
390 seq_list
->list
[seq_list
->tail
& mask
] = seq
;
392 seq_list
->tail
= seq
;
393 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_TAIL
;
396 static void l2cap_chan_timeout(struct work_struct
*work
)
398 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
400 struct l2cap_conn
*conn
= chan
->conn
;
403 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
405 mutex_lock(&conn
->chan_lock
);
406 l2cap_chan_lock(chan
);
408 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
409 reason
= ECONNREFUSED
;
410 else if (chan
->state
== BT_CONNECT
&&
411 chan
->sec_level
!= BT_SECURITY_SDP
)
412 reason
= ECONNREFUSED
;
416 l2cap_chan_close(chan
, reason
);
418 l2cap_chan_unlock(chan
);
420 chan
->ops
->close(chan
);
421 mutex_unlock(&conn
->chan_lock
);
423 l2cap_chan_put(chan
);
426 struct l2cap_chan
*l2cap_chan_create(void)
428 struct l2cap_chan
*chan
;
430 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
434 mutex_init(&chan
->lock
);
436 write_lock(&chan_list_lock
);
437 list_add(&chan
->global_l
, &chan_list
);
438 write_unlock(&chan_list_lock
);
440 INIT_DELAYED_WORK(&chan
->chan_timer
, l2cap_chan_timeout
);
442 chan
->state
= BT_OPEN
;
444 kref_init(&chan
->kref
);
446 /* This flag is cleared in l2cap_chan_ready() */
447 set_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
);
449 BT_DBG("chan %p", chan
);
454 static void l2cap_chan_destroy(struct kref
*kref
)
456 struct l2cap_chan
*chan
= container_of(kref
, struct l2cap_chan
, kref
);
458 BT_DBG("chan %p", chan
);
460 write_lock(&chan_list_lock
);
461 list_del(&chan
->global_l
);
462 write_unlock(&chan_list_lock
);
467 void l2cap_chan_hold(struct l2cap_chan
*c
)
469 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
474 void l2cap_chan_put(struct l2cap_chan
*c
)
476 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
478 kref_put(&c
->kref
, l2cap_chan_destroy
);
481 void l2cap_chan_set_defaults(struct l2cap_chan
*chan
)
483 chan
->fcs
= L2CAP_FCS_CRC16
;
484 chan
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
485 chan
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
486 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
487 chan
->ack_win
= L2CAP_DEFAULT_TX_WINDOW
;
488 chan
->sec_level
= BT_SECURITY_LOW
;
490 set_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
493 void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
495 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
496 __le16_to_cpu(chan
->psm
), chan
->dcid
);
498 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
502 switch (chan
->chan_type
) {
503 case L2CAP_CHAN_CONN_ORIENTED
:
504 if (conn
->hcon
->type
== LE_LINK
) {
506 chan
->omtu
= L2CAP_DEFAULT_MTU
;
507 chan
->scid
= L2CAP_CID_LE_DATA
;
508 chan
->dcid
= L2CAP_CID_LE_DATA
;
510 /* Alloc CID for connection-oriented socket */
511 chan
->scid
= l2cap_alloc_cid(conn
);
512 chan
->omtu
= L2CAP_DEFAULT_MTU
;
516 case L2CAP_CHAN_CONN_LESS
:
517 /* Connectionless socket */
518 chan
->scid
= L2CAP_CID_CONN_LESS
;
519 chan
->dcid
= L2CAP_CID_CONN_LESS
;
520 chan
->omtu
= L2CAP_DEFAULT_MTU
;
523 case L2CAP_CHAN_CONN_FIX_A2MP
:
524 chan
->scid
= L2CAP_CID_A2MP
;
525 chan
->dcid
= L2CAP_CID_A2MP
;
526 chan
->omtu
= L2CAP_A2MP_DEFAULT_MTU
;
527 chan
->imtu
= L2CAP_A2MP_DEFAULT_MTU
;
531 /* Raw socket can send/recv signalling messages only */
532 chan
->scid
= L2CAP_CID_SIGNALING
;
533 chan
->dcid
= L2CAP_CID_SIGNALING
;
534 chan
->omtu
= L2CAP_DEFAULT_MTU
;
537 chan
->local_id
= L2CAP_BESTEFFORT_ID
;
538 chan
->local_stype
= L2CAP_SERV_BESTEFFORT
;
539 chan
->local_msdu
= L2CAP_DEFAULT_MAX_SDU_SIZE
;
540 chan
->local_sdu_itime
= L2CAP_DEFAULT_SDU_ITIME
;
541 chan
->local_acc_lat
= L2CAP_DEFAULT_ACC_LAT
;
542 chan
->local_flush_to
= L2CAP_EFS_DEFAULT_FLUSH_TO
;
544 l2cap_chan_hold(chan
);
546 list_add(&chan
->list
, &conn
->chan_l
);
549 void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
551 mutex_lock(&conn
->chan_lock
);
552 __l2cap_chan_add(conn
, chan
);
553 mutex_unlock(&conn
->chan_lock
);
556 void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
558 struct l2cap_conn
*conn
= chan
->conn
;
560 __clear_chan_timer(chan
);
562 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
565 struct amp_mgr
*mgr
= conn
->hcon
->amp_mgr
;
566 /* Delete from channel list */
567 list_del(&chan
->list
);
569 l2cap_chan_put(chan
);
573 if (chan
->chan_type
!= L2CAP_CHAN_CONN_FIX_A2MP
)
574 hci_conn_put(conn
->hcon
);
576 if (mgr
&& mgr
->bredr_chan
== chan
)
577 mgr
->bredr_chan
= NULL
;
580 if (chan
->hs_hchan
) {
581 struct hci_chan
*hs_hchan
= chan
->hs_hchan
;
583 BT_DBG("chan %p disconnect hs_hchan %p", chan
, hs_hchan
);
584 amp_disconnect_logical_link(hs_hchan
);
587 chan
->ops
->teardown(chan
, err
);
589 if (test_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
))
593 case L2CAP_MODE_BASIC
:
596 case L2CAP_MODE_ERTM
:
597 __clear_retrans_timer(chan
);
598 __clear_monitor_timer(chan
);
599 __clear_ack_timer(chan
);
601 skb_queue_purge(&chan
->srej_q
);
603 l2cap_seq_list_free(&chan
->srej_list
);
604 l2cap_seq_list_free(&chan
->retrans_list
);
608 case L2CAP_MODE_STREAMING
:
609 skb_queue_purge(&chan
->tx_q
);
616 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
618 struct l2cap_conn
*conn
= chan
->conn
;
619 struct sock
*sk
= chan
->sk
;
621 BT_DBG("chan %p state %s sk %p", chan
, state_to_string(chan
->state
),
624 switch (chan
->state
) {
626 chan
->ops
->teardown(chan
, 0);
631 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
632 conn
->hcon
->type
== ACL_LINK
) {
633 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
634 l2cap_send_disconn_req(chan
, reason
);
636 l2cap_chan_del(chan
, reason
);
640 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
641 conn
->hcon
->type
== ACL_LINK
) {
642 struct l2cap_conn_rsp rsp
;
645 if (test_bit(BT_SK_DEFER_SETUP
, &bt_sk(sk
)->flags
))
646 result
= L2CAP_CR_SEC_BLOCK
;
648 result
= L2CAP_CR_BAD_PSM
;
649 l2cap_state_change(chan
, BT_DISCONN
);
651 rsp
.scid
= cpu_to_le16(chan
->dcid
);
652 rsp
.dcid
= cpu_to_le16(chan
->scid
);
653 rsp
.result
= cpu_to_le16(result
);
654 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
655 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
659 l2cap_chan_del(chan
, reason
);
664 l2cap_chan_del(chan
, reason
);
668 chan
->ops
->teardown(chan
, 0);
673 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
675 if (chan
->chan_type
== L2CAP_CHAN_RAW
) {
676 switch (chan
->sec_level
) {
677 case BT_SECURITY_HIGH
:
678 return HCI_AT_DEDICATED_BONDING_MITM
;
679 case BT_SECURITY_MEDIUM
:
680 return HCI_AT_DEDICATED_BONDING
;
682 return HCI_AT_NO_BONDING
;
684 } else if (chan
->psm
== __constant_cpu_to_le16(L2CAP_PSM_SDP
)) {
685 if (chan
->sec_level
== BT_SECURITY_LOW
)
686 chan
->sec_level
= BT_SECURITY_SDP
;
688 if (chan
->sec_level
== BT_SECURITY_HIGH
)
689 return HCI_AT_NO_BONDING_MITM
;
691 return HCI_AT_NO_BONDING
;
693 switch (chan
->sec_level
) {
694 case BT_SECURITY_HIGH
:
695 return HCI_AT_GENERAL_BONDING_MITM
;
696 case BT_SECURITY_MEDIUM
:
697 return HCI_AT_GENERAL_BONDING
;
699 return HCI_AT_NO_BONDING
;
704 /* Service level security */
705 int l2cap_chan_check_security(struct l2cap_chan
*chan
)
707 struct l2cap_conn
*conn
= chan
->conn
;
710 auth_type
= l2cap_get_auth_type(chan
);
712 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
);
715 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
719 /* Get next available identificator.
720 * 1 - 128 are used by kernel.
721 * 129 - 199 are reserved.
722 * 200 - 254 are used by utilities like l2ping, etc.
725 spin_lock(&conn
->lock
);
727 if (++conn
->tx_ident
> 128)
732 spin_unlock(&conn
->lock
);
737 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
740 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
743 BT_DBG("code 0x%2.2x", code
);
748 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
749 flags
= ACL_START_NO_FLUSH
;
753 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
754 skb
->priority
= HCI_PRIO_MAX
;
756 hci_send_acl(conn
->hchan
, skb
, flags
);
759 static bool __chan_is_moving(struct l2cap_chan
*chan
)
761 return chan
->move_state
!= L2CAP_MOVE_STABLE
&&
762 chan
->move_state
!= L2CAP_MOVE_WAIT_PREPARE
;
765 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
767 struct hci_conn
*hcon
= chan
->conn
->hcon
;
770 BT_DBG("chan %p, skb %p len %d priority %u", chan
, skb
, skb
->len
,
773 if (chan
->hs_hcon
&& !__chan_is_moving(chan
)) {
775 hci_send_acl(chan
->hs_hchan
, skb
, ACL_COMPLETE
);
782 if (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
783 lmp_no_flush_capable(hcon
->hdev
))
784 flags
= ACL_START_NO_FLUSH
;
788 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
789 hci_send_acl(chan
->conn
->hchan
, skb
, flags
);
792 static void __unpack_enhanced_control(u16 enh
, struct l2cap_ctrl
*control
)
794 control
->reqseq
= (enh
& L2CAP_CTRL_REQSEQ
) >> L2CAP_CTRL_REQSEQ_SHIFT
;
795 control
->final
= (enh
& L2CAP_CTRL_FINAL
) >> L2CAP_CTRL_FINAL_SHIFT
;
797 if (enh
& L2CAP_CTRL_FRAME_TYPE
) {
800 control
->poll
= (enh
& L2CAP_CTRL_POLL
) >> L2CAP_CTRL_POLL_SHIFT
;
801 control
->super
= (enh
& L2CAP_CTRL_SUPERVISE
) >> L2CAP_CTRL_SUPER_SHIFT
;
808 control
->sar
= (enh
& L2CAP_CTRL_SAR
) >> L2CAP_CTRL_SAR_SHIFT
;
809 control
->txseq
= (enh
& L2CAP_CTRL_TXSEQ
) >> L2CAP_CTRL_TXSEQ_SHIFT
;
816 static void __unpack_extended_control(u32 ext
, struct l2cap_ctrl
*control
)
818 control
->reqseq
= (ext
& L2CAP_EXT_CTRL_REQSEQ
) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
819 control
->final
= (ext
& L2CAP_EXT_CTRL_FINAL
) >> L2CAP_EXT_CTRL_FINAL_SHIFT
;
821 if (ext
& L2CAP_EXT_CTRL_FRAME_TYPE
) {
824 control
->poll
= (ext
& L2CAP_EXT_CTRL_POLL
) >> L2CAP_EXT_CTRL_POLL_SHIFT
;
825 control
->super
= (ext
& L2CAP_EXT_CTRL_SUPERVISE
) >> L2CAP_EXT_CTRL_SUPER_SHIFT
;
832 control
->sar
= (ext
& L2CAP_EXT_CTRL_SAR
) >> L2CAP_EXT_CTRL_SAR_SHIFT
;
833 control
->txseq
= (ext
& L2CAP_EXT_CTRL_TXSEQ
) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
840 static inline void __unpack_control(struct l2cap_chan
*chan
,
843 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
844 __unpack_extended_control(get_unaligned_le32(skb
->data
),
845 &bt_cb(skb
)->control
);
846 skb_pull(skb
, L2CAP_EXT_CTRL_SIZE
);
848 __unpack_enhanced_control(get_unaligned_le16(skb
->data
),
849 &bt_cb(skb
)->control
);
850 skb_pull(skb
, L2CAP_ENH_CTRL_SIZE
);
854 static u32
__pack_extended_control(struct l2cap_ctrl
*control
)
858 packed
= control
->reqseq
<< L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
859 packed
|= control
->final
<< L2CAP_EXT_CTRL_FINAL_SHIFT
;
861 if (control
->sframe
) {
862 packed
|= control
->poll
<< L2CAP_EXT_CTRL_POLL_SHIFT
;
863 packed
|= control
->super
<< L2CAP_EXT_CTRL_SUPER_SHIFT
;
864 packed
|= L2CAP_EXT_CTRL_FRAME_TYPE
;
866 packed
|= control
->sar
<< L2CAP_EXT_CTRL_SAR_SHIFT
;
867 packed
|= control
->txseq
<< L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
873 static u16
__pack_enhanced_control(struct l2cap_ctrl
*control
)
877 packed
= control
->reqseq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
878 packed
|= control
->final
<< L2CAP_CTRL_FINAL_SHIFT
;
880 if (control
->sframe
) {
881 packed
|= control
->poll
<< L2CAP_CTRL_POLL_SHIFT
;
882 packed
|= control
->super
<< L2CAP_CTRL_SUPER_SHIFT
;
883 packed
|= L2CAP_CTRL_FRAME_TYPE
;
885 packed
|= control
->sar
<< L2CAP_CTRL_SAR_SHIFT
;
886 packed
|= control
->txseq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
892 static inline void __pack_control(struct l2cap_chan
*chan
,
893 struct l2cap_ctrl
*control
,
896 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
897 put_unaligned_le32(__pack_extended_control(control
),
898 skb
->data
+ L2CAP_HDR_SIZE
);
900 put_unaligned_le16(__pack_enhanced_control(control
),
901 skb
->data
+ L2CAP_HDR_SIZE
);
905 static inline unsigned int __ertm_hdr_size(struct l2cap_chan
*chan
)
907 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
908 return L2CAP_EXT_HDR_SIZE
;
910 return L2CAP_ENH_HDR_SIZE
;
913 static struct sk_buff
*l2cap_create_sframe_pdu(struct l2cap_chan
*chan
,
917 struct l2cap_hdr
*lh
;
918 int hlen
= __ertm_hdr_size(chan
);
920 if (chan
->fcs
== L2CAP_FCS_CRC16
)
921 hlen
+= L2CAP_FCS_SIZE
;
923 skb
= bt_skb_alloc(hlen
, GFP_KERNEL
);
926 return ERR_PTR(-ENOMEM
);
928 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
929 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
930 lh
->cid
= cpu_to_le16(chan
->dcid
);
932 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
933 put_unaligned_le32(control
, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
935 put_unaligned_le16(control
, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
937 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
938 u16 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
);
939 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
942 skb
->priority
= HCI_PRIO_MAX
;
946 static void l2cap_send_sframe(struct l2cap_chan
*chan
,
947 struct l2cap_ctrl
*control
)
952 BT_DBG("chan %p, control %p", chan
, control
);
954 if (!control
->sframe
)
957 if (__chan_is_moving(chan
))
960 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
) &&
964 if (control
->super
== L2CAP_SUPER_RR
)
965 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
966 else if (control
->super
== L2CAP_SUPER_RNR
)
967 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
969 if (control
->super
!= L2CAP_SUPER_SREJ
) {
970 chan
->last_acked_seq
= control
->reqseq
;
971 __clear_ack_timer(chan
);
974 BT_DBG("reqseq %d, final %d, poll %d, super %d", control
->reqseq
,
975 control
->final
, control
->poll
, control
->super
);
977 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
978 control_field
= __pack_extended_control(control
);
980 control_field
= __pack_enhanced_control(control
);
982 skb
= l2cap_create_sframe_pdu(chan
, control_field
);
984 l2cap_do_send(chan
, skb
);
987 static void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, bool poll
)
989 struct l2cap_ctrl control
;
991 BT_DBG("chan %p, poll %d", chan
, poll
);
993 memset(&control
, 0, sizeof(control
));
997 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
998 control
.super
= L2CAP_SUPER_RNR
;
1000 control
.super
= L2CAP_SUPER_RR
;
1002 control
.reqseq
= chan
->buffer_seq
;
1003 l2cap_send_sframe(chan
, &control
);
1006 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
1008 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
1011 static bool __amp_capable(struct l2cap_chan
*chan
)
1013 struct l2cap_conn
*conn
= chan
->conn
;
1016 hci_amp_capable() &&
1017 chan
->chan_policy
== BT_CHANNEL_POLICY_AMP_PREFERRED
&&
1018 conn
->fixed_chan_mask
& L2CAP_FC_A2MP
)
1024 static bool l2cap_check_efs(struct l2cap_chan
*chan
)
1026 /* Check EFS parameters */
1030 void l2cap_send_conn_req(struct l2cap_chan
*chan
)
1032 struct l2cap_conn
*conn
= chan
->conn
;
1033 struct l2cap_conn_req req
;
1035 req
.scid
= cpu_to_le16(chan
->scid
);
1036 req
.psm
= chan
->psm
;
1038 chan
->ident
= l2cap_get_ident(conn
);
1040 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
1042 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
, sizeof(req
), &req
);
1045 static void l2cap_send_create_chan_req(struct l2cap_chan
*chan
, u8 amp_id
)
1047 struct l2cap_create_chan_req req
;
1048 req
.scid
= cpu_to_le16(chan
->scid
);
1049 req
.psm
= chan
->psm
;
1050 req
.amp_id
= amp_id
;
1052 chan
->ident
= l2cap_get_ident(chan
->conn
);
1054 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_CREATE_CHAN_REQ
,
1058 static void l2cap_move_setup(struct l2cap_chan
*chan
)
1060 struct sk_buff
*skb
;
1062 BT_DBG("chan %p", chan
);
1064 if (chan
->mode
!= L2CAP_MODE_ERTM
)
1067 __clear_retrans_timer(chan
);
1068 __clear_monitor_timer(chan
);
1069 __clear_ack_timer(chan
);
1071 chan
->retry_count
= 0;
1072 skb_queue_walk(&chan
->tx_q
, skb
) {
1073 if (bt_cb(skb
)->control
.retries
)
1074 bt_cb(skb
)->control
.retries
= 1;
1079 chan
->expected_tx_seq
= chan
->buffer_seq
;
1081 clear_bit(CONN_REJ_ACT
, &chan
->conn_state
);
1082 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
1083 l2cap_seq_list_clear(&chan
->retrans_list
);
1084 l2cap_seq_list_clear(&chan
->srej_list
);
1085 skb_queue_purge(&chan
->srej_q
);
1087 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
1088 chan
->rx_state
= L2CAP_RX_STATE_MOVE
;
1090 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
1093 static void l2cap_move_done(struct l2cap_chan
*chan
)
1095 u8 move_role
= chan
->move_role
;
1096 BT_DBG("chan %p", chan
);
1098 chan
->move_state
= L2CAP_MOVE_STABLE
;
1099 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
1101 if (chan
->mode
!= L2CAP_MODE_ERTM
)
1104 switch (move_role
) {
1105 case L2CAP_MOVE_ROLE_INITIATOR
:
1106 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_EXPLICIT_POLL
);
1107 chan
->rx_state
= L2CAP_RX_STATE_WAIT_F
;
1109 case L2CAP_MOVE_ROLE_RESPONDER
:
1110 chan
->rx_state
= L2CAP_RX_STATE_WAIT_P
;
1115 static void l2cap_chan_ready(struct l2cap_chan
*chan
)
1117 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1118 chan
->conf_state
= 0;
1119 __clear_chan_timer(chan
);
1121 chan
->state
= BT_CONNECTED
;
1123 chan
->ops
->ready(chan
);
1126 static void l2cap_start_connection(struct l2cap_chan
*chan
)
1128 if (__amp_capable(chan
)) {
1129 BT_DBG("chan %p AMP capable: discover AMPs", chan
);
1130 a2mp_discover_amp(chan
);
1132 l2cap_send_conn_req(chan
);
1136 static void l2cap_do_start(struct l2cap_chan
*chan
)
1138 struct l2cap_conn
*conn
= chan
->conn
;
1140 if (conn
->hcon
->type
== LE_LINK
) {
1141 l2cap_chan_ready(chan
);
1145 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
1146 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
1149 if (l2cap_chan_check_security(chan
) &&
1150 __l2cap_no_conn_pending(chan
)) {
1151 l2cap_start_connection(chan
);
1154 struct l2cap_info_req req
;
1155 req
.type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
1157 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
1158 conn
->info_ident
= l2cap_get_ident(conn
);
1160 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
1162 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
1167 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
1169 u32 local_feat_mask
= l2cap_feat_mask
;
1171 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
1174 case L2CAP_MODE_ERTM
:
1175 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
1176 case L2CAP_MODE_STREAMING
:
1177 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
1183 static void l2cap_send_disconn_req(struct l2cap_chan
*chan
, int err
)
1185 struct sock
*sk
= chan
->sk
;
1186 struct l2cap_conn
*conn
= chan
->conn
;
1187 struct l2cap_disconn_req req
;
1192 if (chan
->mode
== L2CAP_MODE_ERTM
&& chan
->state
== BT_CONNECTED
) {
1193 __clear_retrans_timer(chan
);
1194 __clear_monitor_timer(chan
);
1195 __clear_ack_timer(chan
);
1198 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
1199 l2cap_state_change(chan
, BT_DISCONN
);
1203 req
.dcid
= cpu_to_le16(chan
->dcid
);
1204 req
.scid
= cpu_to_le16(chan
->scid
);
1205 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_DISCONN_REQ
,
1209 __l2cap_state_change(chan
, BT_DISCONN
);
1210 __l2cap_chan_set_err(chan
, err
);
1214 /* ---- L2CAP connections ---- */
1215 static void l2cap_conn_start(struct l2cap_conn
*conn
)
1217 struct l2cap_chan
*chan
, *tmp
;
1219 BT_DBG("conn %p", conn
);
1221 mutex_lock(&conn
->chan_lock
);
1223 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
1224 struct sock
*sk
= chan
->sk
;
1226 l2cap_chan_lock(chan
);
1228 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1229 l2cap_chan_unlock(chan
);
1233 if (chan
->state
== BT_CONNECT
) {
1234 if (!l2cap_chan_check_security(chan
) ||
1235 !__l2cap_no_conn_pending(chan
)) {
1236 l2cap_chan_unlock(chan
);
1240 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
1241 && test_bit(CONF_STATE2_DEVICE
,
1242 &chan
->conf_state
)) {
1243 l2cap_chan_close(chan
, ECONNRESET
);
1244 l2cap_chan_unlock(chan
);
1248 l2cap_start_connection(chan
);
1250 } else if (chan
->state
== BT_CONNECT2
) {
1251 struct l2cap_conn_rsp rsp
;
1253 rsp
.scid
= cpu_to_le16(chan
->dcid
);
1254 rsp
.dcid
= cpu_to_le16(chan
->scid
);
1256 if (l2cap_chan_check_security(chan
)) {
1258 if (test_bit(BT_SK_DEFER_SETUP
,
1259 &bt_sk(sk
)->flags
)) {
1260 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_PEND
);
1261 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
1262 chan
->ops
->defer(chan
);
1265 __l2cap_state_change(chan
, BT_CONFIG
);
1266 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
1267 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
1271 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_PEND
);
1272 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
1275 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
1278 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
1279 rsp
.result
!= L2CAP_CR_SUCCESS
) {
1280 l2cap_chan_unlock(chan
);
1284 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
1285 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1286 l2cap_build_conf_req(chan
, buf
), buf
);
1287 chan
->num_conf_req
++;
1290 l2cap_chan_unlock(chan
);
1293 mutex_unlock(&conn
->chan_lock
);
1296 /* Find socket with cid and source/destination bdaddr.
1297 * Returns closest match, locked.
1299 static struct l2cap_chan
*l2cap_global_chan_by_scid(int state
, u16 cid
,
1303 struct l2cap_chan
*c
, *c1
= NULL
;
1305 read_lock(&chan_list_lock
);
1307 list_for_each_entry(c
, &chan_list
, global_l
) {
1308 struct sock
*sk
= c
->sk
;
1310 if (state
&& c
->state
!= state
)
1313 if (c
->scid
== cid
) {
1314 int src_match
, dst_match
;
1315 int src_any
, dst_any
;
1318 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1319 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1320 if (src_match
&& dst_match
) {
1321 read_unlock(&chan_list_lock
);
1326 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1327 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1328 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1329 (src_any
&& dst_any
))
1334 read_unlock(&chan_list_lock
);
1339 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
1341 struct sock
*parent
, *sk
;
1342 struct l2cap_chan
*chan
, *pchan
;
1346 /* Check if we have socket listening on cid */
1347 pchan
= l2cap_global_chan_by_scid(BT_LISTEN
, L2CAP_CID_LE_DATA
,
1348 conn
->src
, conn
->dst
);
1356 chan
= pchan
->ops
->new_connection(pchan
);
1362 hci_conn_hold(conn
->hcon
);
1363 conn
->hcon
->disc_timeout
= HCI_DISCONN_TIMEOUT
;
1365 bacpy(&bt_sk(sk
)->src
, conn
->src
);
1366 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
1368 l2cap_chan_add(conn
, chan
);
1370 l2cap_chan_ready(chan
);
1373 release_sock(parent
);
1376 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
1378 struct l2cap_chan
*chan
;
1379 struct hci_conn
*hcon
= conn
->hcon
;
1381 BT_DBG("conn %p", conn
);
1383 if (!hcon
->out
&& hcon
->type
== LE_LINK
)
1384 l2cap_le_conn_ready(conn
);
1386 if (hcon
->out
&& hcon
->type
== LE_LINK
)
1387 smp_conn_security(hcon
, hcon
->pending_sec_level
);
1389 mutex_lock(&conn
->chan_lock
);
1391 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1393 l2cap_chan_lock(chan
);
1395 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
1396 l2cap_chan_unlock(chan
);
1400 if (hcon
->type
== LE_LINK
) {
1401 if (smp_conn_security(hcon
, chan
->sec_level
))
1402 l2cap_chan_ready(chan
);
1404 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1405 struct sock
*sk
= chan
->sk
;
1406 __clear_chan_timer(chan
);
1408 __l2cap_state_change(chan
, BT_CONNECTED
);
1409 sk
->sk_state_change(sk
);
1412 } else if (chan
->state
== BT_CONNECT
)
1413 l2cap_do_start(chan
);
1415 l2cap_chan_unlock(chan
);
1418 mutex_unlock(&conn
->chan_lock
);
1421 /* Notify sockets that we cannot guaranty reliability anymore */
1422 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
1424 struct l2cap_chan
*chan
;
1426 BT_DBG("conn %p", conn
);
1428 mutex_lock(&conn
->chan_lock
);
1430 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1431 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
1432 l2cap_chan_set_err(chan
, err
);
1435 mutex_unlock(&conn
->chan_lock
);
1438 static void l2cap_info_timeout(struct work_struct
*work
)
1440 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1443 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
1444 conn
->info_ident
= 0;
1446 l2cap_conn_start(conn
);
1449 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
1451 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1452 struct l2cap_chan
*chan
, *l
;
1457 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
1459 kfree_skb(conn
->rx_skb
);
1461 mutex_lock(&conn
->chan_lock
);
1464 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
1465 l2cap_chan_hold(chan
);
1466 l2cap_chan_lock(chan
);
1468 l2cap_chan_del(chan
, err
);
1470 l2cap_chan_unlock(chan
);
1472 chan
->ops
->close(chan
);
1473 l2cap_chan_put(chan
);
1476 mutex_unlock(&conn
->chan_lock
);
1478 hci_chan_del(conn
->hchan
);
1480 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1481 cancel_delayed_work_sync(&conn
->info_timer
);
1483 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &hcon
->flags
)) {
1484 cancel_delayed_work_sync(&conn
->security_timer
);
1485 smp_chan_destroy(conn
);
1488 hcon
->l2cap_data
= NULL
;
1492 static void security_timeout(struct work_struct
*work
)
1494 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1495 security_timer
.work
);
1497 BT_DBG("conn %p", conn
);
1499 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &conn
->hcon
->flags
)) {
1500 smp_chan_destroy(conn
);
1501 l2cap_conn_del(conn
->hcon
, ETIMEDOUT
);
1505 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
1507 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1508 struct hci_chan
*hchan
;
1513 hchan
= hci_chan_create(hcon
);
1517 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_KERNEL
);
1519 hci_chan_del(hchan
);
1523 hcon
->l2cap_data
= conn
;
1525 conn
->hchan
= hchan
;
1527 BT_DBG("hcon %p conn %p hchan %p", hcon
, conn
, hchan
);
1529 switch (hcon
->type
) {
1531 if (hcon
->hdev
->le_mtu
) {
1532 conn
->mtu
= hcon
->hdev
->le_mtu
;
1537 conn
->mtu
= hcon
->hdev
->acl_mtu
;
1541 conn
->src
= &hcon
->hdev
->bdaddr
;
1542 conn
->dst
= &hcon
->dst
;
1544 conn
->feat_mask
= 0;
1546 spin_lock_init(&conn
->lock
);
1547 mutex_init(&conn
->chan_lock
);
1549 INIT_LIST_HEAD(&conn
->chan_l
);
1551 if (hcon
->type
== LE_LINK
)
1552 INIT_DELAYED_WORK(&conn
->security_timer
, security_timeout
);
1554 INIT_DELAYED_WORK(&conn
->info_timer
, l2cap_info_timeout
);
1556 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
1561 /* ---- Socket interface ---- */
1563 /* Find socket with psm and source / destination bdaddr.
1564 * Returns closest match.
1566 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
,
1570 struct l2cap_chan
*c
, *c1
= NULL
;
1572 read_lock(&chan_list_lock
);
1574 list_for_each_entry(c
, &chan_list
, global_l
) {
1575 struct sock
*sk
= c
->sk
;
1577 if (state
&& c
->state
!= state
)
1580 if (c
->psm
== psm
) {
1581 int src_match
, dst_match
;
1582 int src_any
, dst_any
;
1585 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1586 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1587 if (src_match
&& dst_match
) {
1588 read_unlock(&chan_list_lock
);
1593 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1594 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1595 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1596 (src_any
&& dst_any
))
1601 read_unlock(&chan_list_lock
);
1606 int l2cap_chan_connect(struct l2cap_chan
*chan
, __le16 psm
, u16 cid
,
1607 bdaddr_t
*dst
, u8 dst_type
)
1609 struct sock
*sk
= chan
->sk
;
1610 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1611 struct l2cap_conn
*conn
;
1612 struct hci_conn
*hcon
;
1613 struct hci_dev
*hdev
;
1617 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src
, dst
,
1618 dst_type
, __le16_to_cpu(psm
));
1620 hdev
= hci_get_route(dst
, src
);
1622 return -EHOSTUNREACH
;
1626 l2cap_chan_lock(chan
);
1628 /* PSM must be odd and lsb of upper byte must be 0 */
1629 if ((__le16_to_cpu(psm
) & 0x0101) != 0x0001 && !cid
&&
1630 chan
->chan_type
!= L2CAP_CHAN_RAW
) {
1635 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& !(psm
|| cid
)) {
1640 switch (chan
->mode
) {
1641 case L2CAP_MODE_BASIC
:
1643 case L2CAP_MODE_ERTM
:
1644 case L2CAP_MODE_STREAMING
:
1653 switch (chan
->state
) {
1657 /* Already connecting */
1662 /* Already connected */
1676 /* Set destination address and psm */
1678 bacpy(&bt_sk(sk
)->dst
, dst
);
1684 auth_type
= l2cap_get_auth_type(chan
);
1686 if (chan
->dcid
== L2CAP_CID_LE_DATA
)
1687 hcon
= hci_connect(hdev
, LE_LINK
, dst
, dst_type
,
1688 chan
->sec_level
, auth_type
);
1690 hcon
= hci_connect(hdev
, ACL_LINK
, dst
, dst_type
,
1691 chan
->sec_level
, auth_type
);
1694 err
= PTR_ERR(hcon
);
1698 conn
= l2cap_conn_add(hcon
, 0);
1705 if (hcon
->type
== LE_LINK
) {
1708 if (!list_empty(&conn
->chan_l
)) {
1717 /* Update source addr of the socket */
1718 bacpy(src
, conn
->src
);
1720 l2cap_chan_unlock(chan
);
1721 l2cap_chan_add(conn
, chan
);
1722 l2cap_chan_lock(chan
);
1724 l2cap_state_change(chan
, BT_CONNECT
);
1725 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
1727 if (hcon
->state
== BT_CONNECTED
) {
1728 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1729 __clear_chan_timer(chan
);
1730 if (l2cap_chan_check_security(chan
))
1731 l2cap_state_change(chan
, BT_CONNECTED
);
1733 l2cap_do_start(chan
);
1739 l2cap_chan_unlock(chan
);
1740 hci_dev_unlock(hdev
);
1745 int __l2cap_wait_ack(struct sock
*sk
)
1747 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
1748 DECLARE_WAITQUEUE(wait
, current
);
1752 add_wait_queue(sk_sleep(sk
), &wait
);
1753 set_current_state(TASK_INTERRUPTIBLE
);
1754 while (chan
->unacked_frames
> 0 && chan
->conn
) {
1758 if (signal_pending(current
)) {
1759 err
= sock_intr_errno(timeo
);
1764 timeo
= schedule_timeout(timeo
);
1766 set_current_state(TASK_INTERRUPTIBLE
);
1768 err
= sock_error(sk
);
1772 set_current_state(TASK_RUNNING
);
1773 remove_wait_queue(sk_sleep(sk
), &wait
);
1777 static void l2cap_monitor_timeout(struct work_struct
*work
)
1779 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1780 monitor_timer
.work
);
1782 BT_DBG("chan %p", chan
);
1784 l2cap_chan_lock(chan
);
1787 l2cap_chan_unlock(chan
);
1788 l2cap_chan_put(chan
);
1792 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_MONITOR_TO
);
1794 l2cap_chan_unlock(chan
);
1795 l2cap_chan_put(chan
);
1798 static void l2cap_retrans_timeout(struct work_struct
*work
)
1800 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1801 retrans_timer
.work
);
1803 BT_DBG("chan %p", chan
);
1805 l2cap_chan_lock(chan
);
1808 l2cap_chan_unlock(chan
);
1809 l2cap_chan_put(chan
);
1813 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_RETRANS_TO
);
1814 l2cap_chan_unlock(chan
);
1815 l2cap_chan_put(chan
);
1818 static void l2cap_streaming_send(struct l2cap_chan
*chan
,
1819 struct sk_buff_head
*skbs
)
1821 struct sk_buff
*skb
;
1822 struct l2cap_ctrl
*control
;
1824 BT_DBG("chan %p, skbs %p", chan
, skbs
);
1826 if (__chan_is_moving(chan
))
1829 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
1831 while (!skb_queue_empty(&chan
->tx_q
)) {
1833 skb
= skb_dequeue(&chan
->tx_q
);
1835 bt_cb(skb
)->control
.retries
= 1;
1836 control
= &bt_cb(skb
)->control
;
1838 control
->reqseq
= 0;
1839 control
->txseq
= chan
->next_tx_seq
;
1841 __pack_control(chan
, control
, skb
);
1843 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1844 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1845 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1848 l2cap_do_send(chan
, skb
);
1850 BT_DBG("Sent txseq %u", control
->txseq
);
1852 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1853 chan
->frames_sent
++;
1857 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
1859 struct sk_buff
*skb
, *tx_skb
;
1860 struct l2cap_ctrl
*control
;
1863 BT_DBG("chan %p", chan
);
1865 if (chan
->state
!= BT_CONNECTED
)
1868 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1871 if (__chan_is_moving(chan
))
1874 while (chan
->tx_send_head
&&
1875 chan
->unacked_frames
< chan
->remote_tx_win
&&
1876 chan
->tx_state
== L2CAP_TX_STATE_XMIT
) {
1878 skb
= chan
->tx_send_head
;
1880 bt_cb(skb
)->control
.retries
= 1;
1881 control
= &bt_cb(skb
)->control
;
1883 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1886 control
->reqseq
= chan
->buffer_seq
;
1887 chan
->last_acked_seq
= chan
->buffer_seq
;
1888 control
->txseq
= chan
->next_tx_seq
;
1890 __pack_control(chan
, control
, skb
);
1892 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1893 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1894 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1897 /* Clone after data has been modified. Data is assumed to be
1898 read-only (for locking purposes) on cloned sk_buffs.
1900 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
1905 __set_retrans_timer(chan
);
1907 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1908 chan
->unacked_frames
++;
1909 chan
->frames_sent
++;
1912 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1913 chan
->tx_send_head
= NULL
;
1915 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1917 l2cap_do_send(chan
, tx_skb
);
1918 BT_DBG("Sent txseq %u", control
->txseq
);
1921 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent
,
1922 chan
->unacked_frames
, skb_queue_len(&chan
->tx_q
));
1927 static void l2cap_ertm_resend(struct l2cap_chan
*chan
)
1929 struct l2cap_ctrl control
;
1930 struct sk_buff
*skb
;
1931 struct sk_buff
*tx_skb
;
1934 BT_DBG("chan %p", chan
);
1936 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1939 if (__chan_is_moving(chan
))
1942 while (chan
->retrans_list
.head
!= L2CAP_SEQ_LIST_CLEAR
) {
1943 seq
= l2cap_seq_list_pop(&chan
->retrans_list
);
1945 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, seq
);
1947 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1952 bt_cb(skb
)->control
.retries
++;
1953 control
= bt_cb(skb
)->control
;
1955 if (chan
->max_tx
!= 0 &&
1956 bt_cb(skb
)->control
.retries
> chan
->max_tx
) {
1957 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
1958 l2cap_send_disconn_req(chan
, ECONNRESET
);
1959 l2cap_seq_list_clear(&chan
->retrans_list
);
1963 control
.reqseq
= chan
->buffer_seq
;
1964 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1969 if (skb_cloned(skb
)) {
1970 /* Cloned sk_buffs are read-only, so we need a
1973 tx_skb
= skb_copy(skb
, GFP_KERNEL
);
1975 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
1979 l2cap_seq_list_clear(&chan
->retrans_list
);
1983 /* Update skb contents */
1984 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
1985 put_unaligned_le32(__pack_extended_control(&control
),
1986 tx_skb
->data
+ L2CAP_HDR_SIZE
);
1988 put_unaligned_le16(__pack_enhanced_control(&control
),
1989 tx_skb
->data
+ L2CAP_HDR_SIZE
);
1992 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1993 u16 fcs
= crc16(0, (u8
*) tx_skb
->data
, tx_skb
->len
);
1994 put_unaligned_le16(fcs
, skb_put(tx_skb
,
1998 l2cap_do_send(chan
, tx_skb
);
2000 BT_DBG("Resent txseq %d", control
.txseq
);
2002 chan
->last_acked_seq
= chan
->buffer_seq
;
2006 static void l2cap_retransmit(struct l2cap_chan
*chan
,
2007 struct l2cap_ctrl
*control
)
2009 BT_DBG("chan %p, control %p", chan
, control
);
2011 l2cap_seq_list_append(&chan
->retrans_list
, control
->reqseq
);
2012 l2cap_ertm_resend(chan
);
2015 static void l2cap_retransmit_all(struct l2cap_chan
*chan
,
2016 struct l2cap_ctrl
*control
)
2018 struct sk_buff
*skb
;
2020 BT_DBG("chan %p, control %p", chan
, control
);
2023 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
2025 l2cap_seq_list_clear(&chan
->retrans_list
);
2027 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
2030 if (chan
->unacked_frames
) {
2031 skb_queue_walk(&chan
->tx_q
, skb
) {
2032 if (bt_cb(skb
)->control
.txseq
== control
->reqseq
||
2033 skb
== chan
->tx_send_head
)
2037 skb_queue_walk_from(&chan
->tx_q
, skb
) {
2038 if (skb
== chan
->tx_send_head
)
2041 l2cap_seq_list_append(&chan
->retrans_list
,
2042 bt_cb(skb
)->control
.txseq
);
2045 l2cap_ertm_resend(chan
);
2049 static void l2cap_send_ack(struct l2cap_chan
*chan
)
2051 struct l2cap_ctrl control
;
2052 u16 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
2053 chan
->last_acked_seq
);
2056 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2057 chan
, chan
->last_acked_seq
, chan
->buffer_seq
);
2059 memset(&control
, 0, sizeof(control
));
2062 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
2063 chan
->rx_state
== L2CAP_RX_STATE_RECV
) {
2064 __clear_ack_timer(chan
);
2065 control
.super
= L2CAP_SUPER_RNR
;
2066 control
.reqseq
= chan
->buffer_seq
;
2067 l2cap_send_sframe(chan
, &control
);
2069 if (!test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
)) {
2070 l2cap_ertm_send(chan
);
2071 /* If any i-frames were sent, they included an ack */
2072 if (chan
->buffer_seq
== chan
->last_acked_seq
)
2076 /* Ack now if the window is 3/4ths full.
2077 * Calculate without mul or div
2079 threshold
= chan
->ack_win
;
2080 threshold
+= threshold
<< 1;
2083 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack
,
2086 if (frames_to_ack
>= threshold
) {
2087 __clear_ack_timer(chan
);
2088 control
.super
= L2CAP_SUPER_RR
;
2089 control
.reqseq
= chan
->buffer_seq
;
2090 l2cap_send_sframe(chan
, &control
);
2095 __set_ack_timer(chan
);
2099 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan
*chan
,
2100 struct msghdr
*msg
, int len
,
2101 int count
, struct sk_buff
*skb
)
2103 struct l2cap_conn
*conn
= chan
->conn
;
2104 struct sk_buff
**frag
;
2107 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
2113 /* Continuation fragments (no L2CAP header) */
2114 frag
= &skb_shinfo(skb
)->frag_list
;
2116 struct sk_buff
*tmp
;
2118 count
= min_t(unsigned int, conn
->mtu
, len
);
2120 tmp
= chan
->ops
->alloc_skb(chan
, count
,
2121 msg
->msg_flags
& MSG_DONTWAIT
);
2123 return PTR_ERR(tmp
);
2127 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
2130 (*frag
)->priority
= skb
->priority
;
2135 skb
->len
+= (*frag
)->len
;
2136 skb
->data_len
+= (*frag
)->len
;
2138 frag
= &(*frag
)->next
;
2144 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
,
2145 struct msghdr
*msg
, size_t len
,
2148 struct l2cap_conn
*conn
= chan
->conn
;
2149 struct sk_buff
*skb
;
2150 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ L2CAP_PSMLEN_SIZE
;
2151 struct l2cap_hdr
*lh
;
2153 BT_DBG("chan %p len %zu priority %u", chan
, len
, priority
);
2155 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2157 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2158 msg
->msg_flags
& MSG_DONTWAIT
);
2162 skb
->priority
= priority
;
2164 /* Create L2CAP header */
2165 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2166 lh
->cid
= cpu_to_le16(chan
->dcid
);
2167 lh
->len
= cpu_to_le16(len
+ L2CAP_PSMLEN_SIZE
);
2168 put_unaligned(chan
->psm
, skb_put(skb
, L2CAP_PSMLEN_SIZE
));
2170 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2171 if (unlikely(err
< 0)) {
2173 return ERR_PTR(err
);
2178 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
,
2179 struct msghdr
*msg
, size_t len
,
2182 struct l2cap_conn
*conn
= chan
->conn
;
2183 struct sk_buff
*skb
;
2185 struct l2cap_hdr
*lh
;
2187 BT_DBG("chan %p len %zu", chan
, len
);
2189 count
= min_t(unsigned int, (conn
->mtu
- L2CAP_HDR_SIZE
), len
);
2191 skb
= chan
->ops
->alloc_skb(chan
, count
+ L2CAP_HDR_SIZE
,
2192 msg
->msg_flags
& MSG_DONTWAIT
);
2196 skb
->priority
= priority
;
2198 /* Create L2CAP header */
2199 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2200 lh
->cid
= cpu_to_le16(chan
->dcid
);
2201 lh
->len
= cpu_to_le16(len
);
2203 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2204 if (unlikely(err
< 0)) {
2206 return ERR_PTR(err
);
2211 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
2212 struct msghdr
*msg
, size_t len
,
2215 struct l2cap_conn
*conn
= chan
->conn
;
2216 struct sk_buff
*skb
;
2217 int err
, count
, hlen
;
2218 struct l2cap_hdr
*lh
;
2220 BT_DBG("chan %p len %zu", chan
, len
);
2223 return ERR_PTR(-ENOTCONN
);
2225 hlen
= __ertm_hdr_size(chan
);
2228 hlen
+= L2CAP_SDULEN_SIZE
;
2230 if (chan
->fcs
== L2CAP_FCS_CRC16
)
2231 hlen
+= L2CAP_FCS_SIZE
;
2233 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2235 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2236 msg
->msg_flags
& MSG_DONTWAIT
);
2240 /* Create L2CAP header */
2241 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2242 lh
->cid
= cpu_to_le16(chan
->dcid
);
2243 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2245 /* Control header is populated later */
2246 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2247 put_unaligned_le32(0, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
2249 put_unaligned_le16(0, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
2252 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2254 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2255 if (unlikely(err
< 0)) {
2257 return ERR_PTR(err
);
2260 bt_cb(skb
)->control
.fcs
= chan
->fcs
;
2261 bt_cb(skb
)->control
.retries
= 0;
2265 static int l2cap_segment_sdu(struct l2cap_chan
*chan
,
2266 struct sk_buff_head
*seg_queue
,
2267 struct msghdr
*msg
, size_t len
)
2269 struct sk_buff
*skb
;
2274 BT_DBG("chan %p, msg %p, len %zu", chan
, msg
, len
);
2276 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2277 * so fragmented skbs are not used. The HCI layer's handling
2278 * of fragmented skbs is not compatible with ERTM's queueing.
2281 /* PDU size is derived from the HCI MTU */
2282 pdu_len
= chan
->conn
->mtu
;
2284 /* Constrain PDU size for BR/EDR connections */
2286 pdu_len
= min_t(size_t, pdu_len
, L2CAP_BREDR_MAX_PAYLOAD
);
2288 /* Adjust for largest possible L2CAP overhead. */
2290 pdu_len
-= L2CAP_FCS_SIZE
;
2292 pdu_len
-= __ertm_hdr_size(chan
);
2294 /* Remote device may have requested smaller PDUs */
2295 pdu_len
= min_t(size_t, pdu_len
, chan
->remote_mps
);
2297 if (len
<= pdu_len
) {
2298 sar
= L2CAP_SAR_UNSEGMENTED
;
2302 sar
= L2CAP_SAR_START
;
2304 pdu_len
-= L2CAP_SDULEN_SIZE
;
2308 skb
= l2cap_create_iframe_pdu(chan
, msg
, pdu_len
, sdu_len
);
2311 __skb_queue_purge(seg_queue
);
2312 return PTR_ERR(skb
);
2315 bt_cb(skb
)->control
.sar
= sar
;
2316 __skb_queue_tail(seg_queue
, skb
);
2321 pdu_len
+= L2CAP_SDULEN_SIZE
;
2324 if (len
<= pdu_len
) {
2325 sar
= L2CAP_SAR_END
;
2328 sar
= L2CAP_SAR_CONTINUE
;
2335 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
,
2338 struct sk_buff
*skb
;
2340 struct sk_buff_head seg_queue
;
2342 /* Connectionless channel */
2343 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
2344 skb
= l2cap_create_connless_pdu(chan
, msg
, len
, priority
);
2346 return PTR_ERR(skb
);
2348 l2cap_do_send(chan
, skb
);
2352 switch (chan
->mode
) {
2353 case L2CAP_MODE_BASIC
:
2354 /* Check outgoing MTU */
2355 if (len
> chan
->omtu
)
2358 /* Create a basic PDU */
2359 skb
= l2cap_create_basic_pdu(chan
, msg
, len
, priority
);
2361 return PTR_ERR(skb
);
2363 l2cap_do_send(chan
, skb
);
2367 case L2CAP_MODE_ERTM
:
2368 case L2CAP_MODE_STREAMING
:
2369 /* Check outgoing MTU */
2370 if (len
> chan
->omtu
) {
2375 __skb_queue_head_init(&seg_queue
);
2377 /* Do segmentation before calling in to the state machine,
2378 * since it's possible to block while waiting for memory
2381 err
= l2cap_segment_sdu(chan
, &seg_queue
, msg
, len
);
2383 /* The channel could have been closed while segmenting,
2384 * check that it is still connected.
2386 if (chan
->state
!= BT_CONNECTED
) {
2387 __skb_queue_purge(&seg_queue
);
2394 if (chan
->mode
== L2CAP_MODE_ERTM
)
2395 l2cap_tx(chan
, NULL
, &seg_queue
, L2CAP_EV_DATA_REQUEST
);
2397 l2cap_streaming_send(chan
, &seg_queue
);
2401 /* If the skbs were not queued for sending, they'll still be in
2402 * seg_queue and need to be purged.
2404 __skb_queue_purge(&seg_queue
);
2408 BT_DBG("bad state %1.1x", chan
->mode
);
2415 static void l2cap_send_srej(struct l2cap_chan
*chan
, u16 txseq
)
2417 struct l2cap_ctrl control
;
2420 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2422 memset(&control
, 0, sizeof(control
));
2424 control
.super
= L2CAP_SUPER_SREJ
;
2426 for (seq
= chan
->expected_tx_seq
; seq
!= txseq
;
2427 seq
= __next_seq(chan
, seq
)) {
2428 if (!l2cap_ertm_seq_in_queue(&chan
->srej_q
, seq
)) {
2429 control
.reqseq
= seq
;
2430 l2cap_send_sframe(chan
, &control
);
2431 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2435 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
2438 static void l2cap_send_srej_tail(struct l2cap_chan
*chan
)
2440 struct l2cap_ctrl control
;
2442 BT_DBG("chan %p", chan
);
2444 if (chan
->srej_list
.tail
== L2CAP_SEQ_LIST_CLEAR
)
2447 memset(&control
, 0, sizeof(control
));
2449 control
.super
= L2CAP_SUPER_SREJ
;
2450 control
.reqseq
= chan
->srej_list
.tail
;
2451 l2cap_send_sframe(chan
, &control
);
2454 static void l2cap_send_srej_list(struct l2cap_chan
*chan
, u16 txseq
)
2456 struct l2cap_ctrl control
;
2460 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2462 memset(&control
, 0, sizeof(control
));
2464 control
.super
= L2CAP_SUPER_SREJ
;
2466 /* Capture initial list head to allow only one pass through the list. */
2467 initial_head
= chan
->srej_list
.head
;
2470 seq
= l2cap_seq_list_pop(&chan
->srej_list
);
2471 if (seq
== txseq
|| seq
== L2CAP_SEQ_LIST_CLEAR
)
2474 control
.reqseq
= seq
;
2475 l2cap_send_sframe(chan
, &control
);
2476 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2477 } while (chan
->srej_list
.head
!= initial_head
);
2480 static void l2cap_process_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
2482 struct sk_buff
*acked_skb
;
2485 BT_DBG("chan %p, reqseq %u", chan
, reqseq
);
2487 if (chan
->unacked_frames
== 0 || reqseq
== chan
->expected_ack_seq
)
2490 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2491 chan
->expected_ack_seq
, chan
->unacked_frames
);
2493 for (ackseq
= chan
->expected_ack_seq
; ackseq
!= reqseq
;
2494 ackseq
= __next_seq(chan
, ackseq
)) {
2496 acked_skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, ackseq
);
2498 skb_unlink(acked_skb
, &chan
->tx_q
);
2499 kfree_skb(acked_skb
);
2500 chan
->unacked_frames
--;
2504 chan
->expected_ack_seq
= reqseq
;
2506 if (chan
->unacked_frames
== 0)
2507 __clear_retrans_timer(chan
);
2509 BT_DBG("unacked_frames %u", chan
->unacked_frames
);
2512 static void l2cap_abort_rx_srej_sent(struct l2cap_chan
*chan
)
2514 BT_DBG("chan %p", chan
);
2516 chan
->expected_tx_seq
= chan
->buffer_seq
;
2517 l2cap_seq_list_clear(&chan
->srej_list
);
2518 skb_queue_purge(&chan
->srej_q
);
2519 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2522 static void l2cap_tx_state_xmit(struct l2cap_chan
*chan
,
2523 struct l2cap_ctrl
*control
,
2524 struct sk_buff_head
*skbs
, u8 event
)
2526 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2530 case L2CAP_EV_DATA_REQUEST
:
2531 if (chan
->tx_send_head
== NULL
)
2532 chan
->tx_send_head
= skb_peek(skbs
);
2534 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2535 l2cap_ertm_send(chan
);
2537 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2538 BT_DBG("Enter LOCAL_BUSY");
2539 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2541 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2542 /* The SREJ_SENT state must be aborted if we are to
2543 * enter the LOCAL_BUSY state.
2545 l2cap_abort_rx_srej_sent(chan
);
2548 l2cap_send_ack(chan
);
2551 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2552 BT_DBG("Exit LOCAL_BUSY");
2553 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2555 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2556 struct l2cap_ctrl local_control
;
2558 memset(&local_control
, 0, sizeof(local_control
));
2559 local_control
.sframe
= 1;
2560 local_control
.super
= L2CAP_SUPER_RR
;
2561 local_control
.poll
= 1;
2562 local_control
.reqseq
= chan
->buffer_seq
;
2563 l2cap_send_sframe(chan
, &local_control
);
2565 chan
->retry_count
= 1;
2566 __set_monitor_timer(chan
);
2567 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2570 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2571 l2cap_process_reqseq(chan
, control
->reqseq
);
2573 case L2CAP_EV_EXPLICIT_POLL
:
2574 l2cap_send_rr_or_rnr(chan
, 1);
2575 chan
->retry_count
= 1;
2576 __set_monitor_timer(chan
);
2577 __clear_ack_timer(chan
);
2578 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2580 case L2CAP_EV_RETRANS_TO
:
2581 l2cap_send_rr_or_rnr(chan
, 1);
2582 chan
->retry_count
= 1;
2583 __set_monitor_timer(chan
);
2584 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2586 case L2CAP_EV_RECV_FBIT
:
2587 /* Nothing to process */
2594 static void l2cap_tx_state_wait_f(struct l2cap_chan
*chan
,
2595 struct l2cap_ctrl
*control
,
2596 struct sk_buff_head
*skbs
, u8 event
)
2598 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2602 case L2CAP_EV_DATA_REQUEST
:
2603 if (chan
->tx_send_head
== NULL
)
2604 chan
->tx_send_head
= skb_peek(skbs
);
2605 /* Queue data, but don't send. */
2606 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2608 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2609 BT_DBG("Enter LOCAL_BUSY");
2610 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2612 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2613 /* The SREJ_SENT state must be aborted if we are to
2614 * enter the LOCAL_BUSY state.
2616 l2cap_abort_rx_srej_sent(chan
);
2619 l2cap_send_ack(chan
);
2622 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2623 BT_DBG("Exit LOCAL_BUSY");
2624 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2626 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2627 struct l2cap_ctrl local_control
;
2628 memset(&local_control
, 0, sizeof(local_control
));
2629 local_control
.sframe
= 1;
2630 local_control
.super
= L2CAP_SUPER_RR
;
2631 local_control
.poll
= 1;
2632 local_control
.reqseq
= chan
->buffer_seq
;
2633 l2cap_send_sframe(chan
, &local_control
);
2635 chan
->retry_count
= 1;
2636 __set_monitor_timer(chan
);
2637 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2640 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2641 l2cap_process_reqseq(chan
, control
->reqseq
);
2645 case L2CAP_EV_RECV_FBIT
:
2646 if (control
&& control
->final
) {
2647 __clear_monitor_timer(chan
);
2648 if (chan
->unacked_frames
> 0)
2649 __set_retrans_timer(chan
);
2650 chan
->retry_count
= 0;
2651 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2652 BT_DBG("recv fbit tx_state 0x2.2%x", chan
->tx_state
);
2655 case L2CAP_EV_EXPLICIT_POLL
:
2658 case L2CAP_EV_MONITOR_TO
:
2659 if (chan
->max_tx
== 0 || chan
->retry_count
< chan
->max_tx
) {
2660 l2cap_send_rr_or_rnr(chan
, 1);
2661 __set_monitor_timer(chan
);
2662 chan
->retry_count
++;
2664 l2cap_send_disconn_req(chan
, ECONNABORTED
);
2672 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
2673 struct sk_buff_head
*skbs
, u8 event
)
2675 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2676 chan
, control
, skbs
, event
, chan
->tx_state
);
2678 switch (chan
->tx_state
) {
2679 case L2CAP_TX_STATE_XMIT
:
2680 l2cap_tx_state_xmit(chan
, control
, skbs
, event
);
2682 case L2CAP_TX_STATE_WAIT_F
:
2683 l2cap_tx_state_wait_f(chan
, control
, skbs
, event
);
2691 static void l2cap_pass_to_tx(struct l2cap_chan
*chan
,
2692 struct l2cap_ctrl
*control
)
2694 BT_DBG("chan %p, control %p", chan
, control
);
2695 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_REQSEQ_AND_FBIT
);
2698 static void l2cap_pass_to_tx_fbit(struct l2cap_chan
*chan
,
2699 struct l2cap_ctrl
*control
)
2701 BT_DBG("chan %p, control %p", chan
, control
);
2702 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_FBIT
);
2705 /* Copy frame to all raw sockets on that connection */
2706 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2708 struct sk_buff
*nskb
;
2709 struct l2cap_chan
*chan
;
2711 BT_DBG("conn %p", conn
);
2713 mutex_lock(&conn
->chan_lock
);
2715 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
2716 struct sock
*sk
= chan
->sk
;
2717 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
2720 /* Don't send frame to the socket it came from */
2723 nskb
= skb_clone(skb
, GFP_KERNEL
);
2727 if (chan
->ops
->recv(chan
, nskb
))
2731 mutex_unlock(&conn
->chan_lock
);
2734 /* ---- L2CAP signalling commands ---- */
2735 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
, u8 code
,
2736 u8 ident
, u16 dlen
, void *data
)
2738 struct sk_buff
*skb
, **frag
;
2739 struct l2cap_cmd_hdr
*cmd
;
2740 struct l2cap_hdr
*lh
;
2743 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2744 conn
, code
, ident
, dlen
);
2746 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2747 count
= min_t(unsigned int, conn
->mtu
, len
);
2749 skb
= bt_skb_alloc(count
, GFP_KERNEL
);
2753 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2754 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2756 if (conn
->hcon
->type
== LE_LINK
)
2757 lh
->cid
= __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
2759 lh
->cid
= __constant_cpu_to_le16(L2CAP_CID_SIGNALING
);
2761 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2764 cmd
->len
= cpu_to_le16(dlen
);
2767 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2768 memcpy(skb_put(skb
, count
), data
, count
);
2774 /* Continuation fragments (no L2CAP header) */
2775 frag
= &skb_shinfo(skb
)->frag_list
;
2777 count
= min_t(unsigned int, conn
->mtu
, len
);
2779 *frag
= bt_skb_alloc(count
, GFP_KERNEL
);
2783 memcpy(skb_put(*frag
, count
), data
, count
);
2788 frag
= &(*frag
)->next
;
2798 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
,
2801 struct l2cap_conf_opt
*opt
= *ptr
;
2804 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2812 *val
= *((u8
*) opt
->val
);
2816 *val
= get_unaligned_le16(opt
->val
);
2820 *val
= get_unaligned_le32(opt
->val
);
2824 *val
= (unsigned long) opt
->val
;
2828 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type
, opt
->len
, *val
);
2832 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2834 struct l2cap_conf_opt
*opt
= *ptr
;
2836 BT_DBG("type 0x%2.2x len %u val 0x%lx", type
, len
, val
);
2843 *((u8
*) opt
->val
) = val
;
2847 put_unaligned_le16(val
, opt
->val
);
2851 put_unaligned_le32(val
, opt
->val
);
2855 memcpy(opt
->val
, (void *) val
, len
);
2859 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2862 static void l2cap_add_opt_efs(void **ptr
, struct l2cap_chan
*chan
)
2864 struct l2cap_conf_efs efs
;
2866 switch (chan
->mode
) {
2867 case L2CAP_MODE_ERTM
:
2868 efs
.id
= chan
->local_id
;
2869 efs
.stype
= chan
->local_stype
;
2870 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2871 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2872 efs
.acc_lat
= __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT
);
2873 efs
.flush_to
= __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO
);
2876 case L2CAP_MODE_STREAMING
:
2878 efs
.stype
= L2CAP_SERV_BESTEFFORT
;
2879 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2880 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2889 l2cap_add_conf_opt(ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
2890 (unsigned long) &efs
);
2893 static void l2cap_ack_timeout(struct work_struct
*work
)
2895 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
2899 BT_DBG("chan %p", chan
);
2901 l2cap_chan_lock(chan
);
2903 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
2904 chan
->last_acked_seq
);
2907 l2cap_send_rr_or_rnr(chan
, 0);
2909 l2cap_chan_unlock(chan
);
2910 l2cap_chan_put(chan
);
2913 int l2cap_ertm_init(struct l2cap_chan
*chan
)
2917 chan
->next_tx_seq
= 0;
2918 chan
->expected_tx_seq
= 0;
2919 chan
->expected_ack_seq
= 0;
2920 chan
->unacked_frames
= 0;
2921 chan
->buffer_seq
= 0;
2922 chan
->frames_sent
= 0;
2923 chan
->last_acked_seq
= 0;
2925 chan
->sdu_last_frag
= NULL
;
2928 skb_queue_head_init(&chan
->tx_q
);
2930 chan
->local_amp_id
= 0;
2932 chan
->move_state
= L2CAP_MOVE_STABLE
;
2933 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
2935 if (chan
->mode
!= L2CAP_MODE_ERTM
)
2938 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2939 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2941 INIT_DELAYED_WORK(&chan
->retrans_timer
, l2cap_retrans_timeout
);
2942 INIT_DELAYED_WORK(&chan
->monitor_timer
, l2cap_monitor_timeout
);
2943 INIT_DELAYED_WORK(&chan
->ack_timer
, l2cap_ack_timeout
);
2945 skb_queue_head_init(&chan
->srej_q
);
2947 err
= l2cap_seq_list_init(&chan
->srej_list
, chan
->tx_win
);
2951 err
= l2cap_seq_list_init(&chan
->retrans_list
, chan
->remote_tx_win
);
2953 l2cap_seq_list_free(&chan
->srej_list
);
2958 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
2961 case L2CAP_MODE_STREAMING
:
2962 case L2CAP_MODE_ERTM
:
2963 if (l2cap_mode_supported(mode
, remote_feat_mask
))
2967 return L2CAP_MODE_BASIC
;
2971 static inline bool __l2cap_ews_supported(struct l2cap_chan
*chan
)
2973 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
;
2976 static inline bool __l2cap_efs_supported(struct l2cap_chan
*chan
)
2978 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_FLOW
;
2981 static void __l2cap_set_ertm_timeouts(struct l2cap_chan
*chan
,
2982 struct l2cap_conf_rfc
*rfc
)
2984 if (chan
->local_amp_id
&& chan
->hs_hcon
) {
2985 u64 ertm_to
= chan
->hs_hcon
->hdev
->amp_be_flush_to
;
2987 /* Class 1 devices have must have ERTM timeouts
2988 * exceeding the Link Supervision Timeout. The
2989 * default Link Supervision Timeout for AMP
2990 * controllers is 10 seconds.
2992 * Class 1 devices use 0xffffffff for their
2993 * best-effort flush timeout, so the clamping logic
2994 * will result in a timeout that meets the above
2995 * requirement. ERTM timeouts are 16-bit values, so
2996 * the maximum timeout is 65.535 seconds.
2999 /* Convert timeout to milliseconds and round */
3000 ertm_to
= DIV_ROUND_UP_ULL(ertm_to
, 1000);
3002 /* This is the recommended formula for class 2 devices
3003 * that start ERTM timers when packets are sent to the
3006 ertm_to
= 3 * ertm_to
+ 500;
3008 if (ertm_to
> 0xffff)
3011 rfc
->retrans_timeout
= cpu_to_le16((u16
) ertm_to
);
3012 rfc
->monitor_timeout
= rfc
->retrans_timeout
;
3014 rfc
->retrans_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
3015 rfc
->monitor_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
3019 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
3021 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
3022 __l2cap_ews_supported(chan
)) {
3023 /* use extended control field */
3024 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3025 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3027 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
3028 L2CAP_DEFAULT_TX_WINDOW
);
3029 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
3031 chan
->ack_win
= chan
->tx_win
;
3034 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
3036 struct l2cap_conf_req
*req
= data
;
3037 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
3038 void *ptr
= req
->data
;
3041 BT_DBG("chan %p", chan
);
3043 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
3046 switch (chan
->mode
) {
3047 case L2CAP_MODE_STREAMING
:
3048 case L2CAP_MODE_ERTM
:
3049 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
3052 if (__l2cap_efs_supported(chan
))
3053 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3057 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
3062 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
3063 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3065 switch (chan
->mode
) {
3066 case L2CAP_MODE_BASIC
:
3067 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
3068 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
3071 rfc
.mode
= L2CAP_MODE_BASIC
;
3073 rfc
.max_transmit
= 0;
3074 rfc
.retrans_timeout
= 0;
3075 rfc
.monitor_timeout
= 0;
3076 rfc
.max_pdu_size
= 0;
3078 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3079 (unsigned long) &rfc
);
3082 case L2CAP_MODE_ERTM
:
3083 rfc
.mode
= L2CAP_MODE_ERTM
;
3084 rfc
.max_transmit
= chan
->max_tx
;
3086 __l2cap_set_ertm_timeouts(chan
, &rfc
);
3088 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
3089 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
3091 rfc
.max_pdu_size
= cpu_to_le16(size
);
3093 l2cap_txwin_setup(chan
);
3095 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
3096 L2CAP_DEFAULT_TX_WINDOW
);
3098 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3099 (unsigned long) &rfc
);
3101 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
3102 l2cap_add_opt_efs(&ptr
, chan
);
3104 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3105 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3108 if (chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
)
3109 if (chan
->fcs
== L2CAP_FCS_NONE
||
3110 test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
)) {
3111 chan
->fcs
= L2CAP_FCS_NONE
;
3112 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1,
3117 case L2CAP_MODE_STREAMING
:
3118 l2cap_txwin_setup(chan
);
3119 rfc
.mode
= L2CAP_MODE_STREAMING
;
3121 rfc
.max_transmit
= 0;
3122 rfc
.retrans_timeout
= 0;
3123 rfc
.monitor_timeout
= 0;
3125 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
3126 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
3128 rfc
.max_pdu_size
= cpu_to_le16(size
);
3130 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3131 (unsigned long) &rfc
);
3133 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
3134 l2cap_add_opt_efs(&ptr
, chan
);
3136 if (chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
)
3137 if (chan
->fcs
== L2CAP_FCS_NONE
||
3138 test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
)) {
3139 chan
->fcs
= L2CAP_FCS_NONE
;
3140 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1,
3146 req
->dcid
= cpu_to_le16(chan
->dcid
);
3147 req
->flags
= __constant_cpu_to_le16(0);
3152 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
3154 struct l2cap_conf_rsp
*rsp
= data
;
3155 void *ptr
= rsp
->data
;
3156 void *req
= chan
->conf_req
;
3157 int len
= chan
->conf_len
;
3158 int type
, hint
, olen
;
3160 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3161 struct l2cap_conf_efs efs
;
3163 u16 mtu
= L2CAP_DEFAULT_MTU
;
3164 u16 result
= L2CAP_CONF_SUCCESS
;
3167 BT_DBG("chan %p", chan
);
3169 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3170 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
3172 hint
= type
& L2CAP_CONF_HINT
;
3173 type
&= L2CAP_CONF_MASK
;
3176 case L2CAP_CONF_MTU
:
3180 case L2CAP_CONF_FLUSH_TO
:
3181 chan
->flush_to
= val
;
3184 case L2CAP_CONF_QOS
:
3187 case L2CAP_CONF_RFC
:
3188 if (olen
== sizeof(rfc
))
3189 memcpy(&rfc
, (void *) val
, olen
);
3192 case L2CAP_CONF_FCS
:
3193 if (val
== L2CAP_FCS_NONE
)
3194 set_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
);
3197 case L2CAP_CONF_EFS
:
3199 if (olen
== sizeof(efs
))
3200 memcpy(&efs
, (void *) val
, olen
);
3203 case L2CAP_CONF_EWS
:
3205 return -ECONNREFUSED
;
3207 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3208 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
3209 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3210 chan
->remote_tx_win
= val
;
3217 result
= L2CAP_CONF_UNKNOWN
;
3218 *((u8
*) ptr
++) = type
;
3223 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
3226 switch (chan
->mode
) {
3227 case L2CAP_MODE_STREAMING
:
3228 case L2CAP_MODE_ERTM
:
3229 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
3230 chan
->mode
= l2cap_select_mode(rfc
.mode
,
3231 chan
->conn
->feat_mask
);
3236 if (__l2cap_efs_supported(chan
))
3237 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3239 return -ECONNREFUSED
;
3242 if (chan
->mode
!= rfc
.mode
)
3243 return -ECONNREFUSED
;
3249 if (chan
->mode
!= rfc
.mode
) {
3250 result
= L2CAP_CONF_UNACCEPT
;
3251 rfc
.mode
= chan
->mode
;
3253 if (chan
->num_conf_rsp
== 1)
3254 return -ECONNREFUSED
;
3256 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3257 (unsigned long) &rfc
);
3260 if (result
== L2CAP_CONF_SUCCESS
) {
3261 /* Configure output options and let the other side know
3262 * which ones we don't like. */
3264 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
3265 result
= L2CAP_CONF_UNACCEPT
;
3268 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
3270 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
3273 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3274 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3275 efs
.stype
!= chan
->local_stype
) {
3277 result
= L2CAP_CONF_UNACCEPT
;
3279 if (chan
->num_conf_req
>= 1)
3280 return -ECONNREFUSED
;
3282 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3284 (unsigned long) &efs
);
3286 /* Send PENDING Conf Rsp */
3287 result
= L2CAP_CONF_PENDING
;
3288 set_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3293 case L2CAP_MODE_BASIC
:
3294 chan
->fcs
= L2CAP_FCS_NONE
;
3295 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3298 case L2CAP_MODE_ERTM
:
3299 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
3300 chan
->remote_tx_win
= rfc
.txwin_size
;
3302 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
3304 chan
->remote_max_tx
= rfc
.max_transmit
;
3306 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3307 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3308 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3309 rfc
.max_pdu_size
= cpu_to_le16(size
);
3310 chan
->remote_mps
= size
;
3312 __l2cap_set_ertm_timeouts(chan
, &rfc
);
3314 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3316 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3317 sizeof(rfc
), (unsigned long) &rfc
);
3319 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3320 chan
->remote_id
= efs
.id
;
3321 chan
->remote_stype
= efs
.stype
;
3322 chan
->remote_msdu
= le16_to_cpu(efs
.msdu
);
3323 chan
->remote_flush_to
=
3324 le32_to_cpu(efs
.flush_to
);
3325 chan
->remote_acc_lat
=
3326 le32_to_cpu(efs
.acc_lat
);
3327 chan
->remote_sdu_itime
=
3328 le32_to_cpu(efs
.sdu_itime
);
3329 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3331 (unsigned long) &efs
);
3335 case L2CAP_MODE_STREAMING
:
3336 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3337 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3338 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3339 rfc
.max_pdu_size
= cpu_to_le16(size
);
3340 chan
->remote_mps
= size
;
3342 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3344 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3345 (unsigned long) &rfc
);
3350 result
= L2CAP_CONF_UNACCEPT
;
3352 memset(&rfc
, 0, sizeof(rfc
));
3353 rfc
.mode
= chan
->mode
;
3356 if (result
== L2CAP_CONF_SUCCESS
)
3357 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3359 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3360 rsp
->result
= cpu_to_le16(result
);
3361 rsp
->flags
= __constant_cpu_to_le16(0);
3366 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
,
3367 void *data
, u16
*result
)
3369 struct l2cap_conf_req
*req
= data
;
3370 void *ptr
= req
->data
;
3373 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3374 struct l2cap_conf_efs efs
;
3376 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
3378 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3379 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3382 case L2CAP_CONF_MTU
:
3383 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
3384 *result
= L2CAP_CONF_UNACCEPT
;
3385 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
3388 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3391 case L2CAP_CONF_FLUSH_TO
:
3392 chan
->flush_to
= val
;
3393 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
3397 case L2CAP_CONF_RFC
:
3398 if (olen
== sizeof(rfc
))
3399 memcpy(&rfc
, (void *)val
, olen
);
3401 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
3402 rfc
.mode
!= chan
->mode
)
3403 return -ECONNREFUSED
;
3407 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3408 sizeof(rfc
), (unsigned long) &rfc
);
3411 case L2CAP_CONF_EWS
:
3412 chan
->ack_win
= min_t(u16
, val
, chan
->ack_win
);
3413 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3417 case L2CAP_CONF_EFS
:
3418 if (olen
== sizeof(efs
))
3419 memcpy(&efs
, (void *)val
, olen
);
3421 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3422 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3423 efs
.stype
!= chan
->local_stype
)
3424 return -ECONNREFUSED
;
3426 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
3427 (unsigned long) &efs
);
3430 case L2CAP_CONF_FCS
:
3431 if (*result
== L2CAP_CONF_PENDING
)
3432 if (val
== L2CAP_FCS_NONE
)
3433 set_bit(CONF_RECV_NO_FCS
,
3439 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
3440 return -ECONNREFUSED
;
3442 chan
->mode
= rfc
.mode
;
3444 if (*result
== L2CAP_CONF_SUCCESS
|| *result
== L2CAP_CONF_PENDING
) {
3446 case L2CAP_MODE_ERTM
:
3447 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3448 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3449 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3450 if (!test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3451 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3454 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3455 chan
->local_msdu
= le16_to_cpu(efs
.msdu
);
3456 chan
->local_sdu_itime
=
3457 le32_to_cpu(efs
.sdu_itime
);
3458 chan
->local_acc_lat
= le32_to_cpu(efs
.acc_lat
);
3459 chan
->local_flush_to
=
3460 le32_to_cpu(efs
.flush_to
);
3464 case L2CAP_MODE_STREAMING
:
3465 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3469 req
->dcid
= cpu_to_le16(chan
->dcid
);
3470 req
->flags
= __constant_cpu_to_le16(0);
3475 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
,
3476 u16 result
, u16 flags
)
3478 struct l2cap_conf_rsp
*rsp
= data
;
3479 void *ptr
= rsp
->data
;
3481 BT_DBG("chan %p", chan
);
3483 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3484 rsp
->result
= cpu_to_le16(result
);
3485 rsp
->flags
= cpu_to_le16(flags
);
3490 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
3492 struct l2cap_conn_rsp rsp
;
3493 struct l2cap_conn
*conn
= chan
->conn
;
3497 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3498 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3499 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
3500 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
3503 rsp_code
= L2CAP_CREATE_CHAN_RSP
;
3505 rsp_code
= L2CAP_CONN_RSP
;
3507 BT_DBG("chan %p rsp_code %u", chan
, rsp_code
);
3509 l2cap_send_cmd(conn
, chan
->ident
, rsp_code
, sizeof(rsp
), &rsp
);
3511 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3514 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3515 l2cap_build_conf_req(chan
, buf
), buf
);
3516 chan
->num_conf_req
++;
3519 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
3523 /* Use sane default values in case a misbehaving remote device
3524 * did not send an RFC or extended window size option.
3526 u16 txwin_ext
= chan
->ack_win
;
3527 struct l2cap_conf_rfc rfc
= {
3529 .retrans_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
),
3530 .monitor_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
),
3531 .max_pdu_size
= cpu_to_le16(chan
->imtu
),
3532 .txwin_size
= min_t(u16
, chan
->ack_win
, L2CAP_DEFAULT_TX_WINDOW
),
3535 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
3537 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
3540 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3541 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3544 case L2CAP_CONF_RFC
:
3545 if (olen
== sizeof(rfc
))
3546 memcpy(&rfc
, (void *)val
, olen
);
3548 case L2CAP_CONF_EWS
:
3555 case L2CAP_MODE_ERTM
:
3556 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3557 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3558 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3559 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3560 chan
->ack_win
= min_t(u16
, chan
->ack_win
, txwin_ext
);
3562 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3565 case L2CAP_MODE_STREAMING
:
3566 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3570 static inline int l2cap_command_rej(struct l2cap_conn
*conn
,
3571 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3573 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
3575 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
3578 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
3579 cmd
->ident
== conn
->info_ident
) {
3580 cancel_delayed_work(&conn
->info_timer
);
3582 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3583 conn
->info_ident
= 0;
3585 l2cap_conn_start(conn
);
3591 static struct l2cap_chan
*l2cap_connect(struct l2cap_conn
*conn
,
3592 struct l2cap_cmd_hdr
*cmd
,
3593 u8
*data
, u8 rsp_code
, u8 amp_id
)
3595 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
3596 struct l2cap_conn_rsp rsp
;
3597 struct l2cap_chan
*chan
= NULL
, *pchan
;
3598 struct sock
*parent
, *sk
= NULL
;
3599 int result
, status
= L2CAP_CS_NO_INFO
;
3601 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
3602 __le16 psm
= req
->psm
;
3604 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm
), scid
);
3606 /* Check if we have socket listening on psm */
3607 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, conn
->src
, conn
->dst
);
3609 result
= L2CAP_CR_BAD_PSM
;
3615 mutex_lock(&conn
->chan_lock
);
3618 /* Check if the ACL is secure enough (if not SDP) */
3619 if (psm
!= __constant_cpu_to_le16(L2CAP_PSM_SDP
) &&
3620 !hci_conn_check_link_mode(conn
->hcon
)) {
3621 conn
->disc_reason
= HCI_ERROR_AUTH_FAILURE
;
3622 result
= L2CAP_CR_SEC_BLOCK
;
3626 result
= L2CAP_CR_NO_MEM
;
3628 /* Check if we already have channel with that dcid */
3629 if (__l2cap_get_chan_by_dcid(conn
, scid
))
3632 chan
= pchan
->ops
->new_connection(pchan
);
3638 hci_conn_hold(conn
->hcon
);
3640 bacpy(&bt_sk(sk
)->src
, conn
->src
);
3641 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
3644 chan
->local_amp_id
= amp_id
;
3646 __l2cap_chan_add(conn
, chan
);
3650 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
3652 chan
->ident
= cmd
->ident
;
3654 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
3655 if (l2cap_chan_check_security(chan
)) {
3656 if (test_bit(BT_SK_DEFER_SETUP
, &bt_sk(sk
)->flags
)) {
3657 __l2cap_state_change(chan
, BT_CONNECT2
);
3658 result
= L2CAP_CR_PEND
;
3659 status
= L2CAP_CS_AUTHOR_PEND
;
3660 chan
->ops
->defer(chan
);
3662 /* Force pending result for AMP controllers.
3663 * The connection will succeed after the
3664 * physical link is up.
3667 __l2cap_state_change(chan
, BT_CONNECT2
);
3668 result
= L2CAP_CR_PEND
;
3670 __l2cap_state_change(chan
, BT_CONFIG
);
3671 result
= L2CAP_CR_SUCCESS
;
3673 status
= L2CAP_CS_NO_INFO
;
3676 __l2cap_state_change(chan
, BT_CONNECT2
);
3677 result
= L2CAP_CR_PEND
;
3678 status
= L2CAP_CS_AUTHEN_PEND
;
3681 __l2cap_state_change(chan
, BT_CONNECT2
);
3682 result
= L2CAP_CR_PEND
;
3683 status
= L2CAP_CS_NO_INFO
;
3687 release_sock(parent
);
3688 mutex_unlock(&conn
->chan_lock
);
3691 rsp
.scid
= cpu_to_le16(scid
);
3692 rsp
.dcid
= cpu_to_le16(dcid
);
3693 rsp
.result
= cpu_to_le16(result
);
3694 rsp
.status
= cpu_to_le16(status
);
3695 l2cap_send_cmd(conn
, cmd
->ident
, rsp_code
, sizeof(rsp
), &rsp
);
3697 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
3698 struct l2cap_info_req info
;
3699 info
.type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3701 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
3702 conn
->info_ident
= l2cap_get_ident(conn
);
3704 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
3706 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
3707 sizeof(info
), &info
);
3710 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
3711 result
== L2CAP_CR_SUCCESS
) {
3713 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
3714 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3715 l2cap_build_conf_req(chan
, buf
), buf
);
3716 chan
->num_conf_req
++;
3722 static int l2cap_connect_req(struct l2cap_conn
*conn
,
3723 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3725 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
3726 struct hci_conn
*hcon
= conn
->hcon
;
3729 if (test_bit(HCI_MGMT
, &hdev
->dev_flags
) &&
3730 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED
, &hcon
->flags
))
3731 mgmt_device_connected(hdev
, &hcon
->dst
, hcon
->type
,
3732 hcon
->dst_type
, 0, NULL
, 0,
3734 hci_dev_unlock(hdev
);
3736 l2cap_connect(conn
, cmd
, data
, L2CAP_CONN_RSP
, 0);
3740 static int l2cap_connect_create_rsp(struct l2cap_conn
*conn
,
3741 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3743 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
3744 u16 scid
, dcid
, result
, status
;
3745 struct l2cap_chan
*chan
;
3749 scid
= __le16_to_cpu(rsp
->scid
);
3750 dcid
= __le16_to_cpu(rsp
->dcid
);
3751 result
= __le16_to_cpu(rsp
->result
);
3752 status
= __le16_to_cpu(rsp
->status
);
3754 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3755 dcid
, scid
, result
, status
);
3757 mutex_lock(&conn
->chan_lock
);
3760 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3766 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
3775 l2cap_chan_lock(chan
);
3778 case L2CAP_CR_SUCCESS
:
3779 l2cap_state_change(chan
, BT_CONFIG
);
3782 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3784 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3787 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3788 l2cap_build_conf_req(chan
, req
), req
);
3789 chan
->num_conf_req
++;
3793 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3797 l2cap_chan_del(chan
, ECONNREFUSED
);
3801 l2cap_chan_unlock(chan
);
3804 mutex_unlock(&conn
->chan_lock
);
3809 static inline void set_default_fcs(struct l2cap_chan
*chan
)
3811 /* FCS is enabled only in ERTM or streaming mode, if one or both
3814 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
3815 chan
->fcs
= L2CAP_FCS_NONE
;
3816 else if (!test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
))
3817 chan
->fcs
= L2CAP_FCS_CRC16
;
3820 static void l2cap_send_efs_conf_rsp(struct l2cap_chan
*chan
, void *data
,
3821 u8 ident
, u16 flags
)
3823 struct l2cap_conn
*conn
= chan
->conn
;
3825 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn
, chan
, ident
,
3828 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3829 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3831 l2cap_send_cmd(conn
, ident
, L2CAP_CONF_RSP
,
3832 l2cap_build_conf_rsp(chan
, data
,
3833 L2CAP_CONF_SUCCESS
, flags
), data
);
3836 static inline int l2cap_config_req(struct l2cap_conn
*conn
,
3837 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3840 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
3843 struct l2cap_chan
*chan
;
3846 dcid
= __le16_to_cpu(req
->dcid
);
3847 flags
= __le16_to_cpu(req
->flags
);
3849 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
3851 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
3855 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
3856 struct l2cap_cmd_rej_cid rej
;
3858 rej
.reason
= __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID
);
3859 rej
.scid
= cpu_to_le16(chan
->scid
);
3860 rej
.dcid
= cpu_to_le16(chan
->dcid
);
3862 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
3867 /* Reject if config buffer is too small. */
3868 len
= cmd_len
- sizeof(*req
);
3869 if (len
< 0 || chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
3870 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3871 l2cap_build_conf_rsp(chan
, rsp
,
3872 L2CAP_CONF_REJECT
, flags
), rsp
);
3877 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
3878 chan
->conf_len
+= len
;
3880 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
) {
3881 /* Incomplete config. Send empty response. */
3882 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3883 l2cap_build_conf_rsp(chan
, rsp
,
3884 L2CAP_CONF_SUCCESS
, flags
), rsp
);
3888 /* Complete config. */
3889 len
= l2cap_parse_conf_req(chan
, rsp
);
3891 l2cap_send_disconn_req(chan
, ECONNRESET
);
3895 chan
->ident
= cmd
->ident
;
3896 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
3897 chan
->num_conf_rsp
++;
3899 /* Reset config buffer. */
3902 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
3905 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
3906 set_default_fcs(chan
);
3908 if (chan
->mode
== L2CAP_MODE_ERTM
||
3909 chan
->mode
== L2CAP_MODE_STREAMING
)
3910 err
= l2cap_ertm_init(chan
);
3913 l2cap_send_disconn_req(chan
, -err
);
3915 l2cap_chan_ready(chan
);
3920 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
3922 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3923 l2cap_build_conf_req(chan
, buf
), buf
);
3924 chan
->num_conf_req
++;
3927 /* Got Conf Rsp PENDING from remote side and asume we sent
3928 Conf Rsp PENDING in the code above */
3929 if (test_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
) &&
3930 test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
3932 /* check compatibility */
3934 /* Send rsp for BR/EDR channel */
3936 l2cap_send_efs_conf_rsp(chan
, rsp
, cmd
->ident
, flags
);
3938 chan
->ident
= cmd
->ident
;
3942 l2cap_chan_unlock(chan
);
3946 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
,
3947 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3949 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
3950 u16 scid
, flags
, result
;
3951 struct l2cap_chan
*chan
;
3952 int len
= le16_to_cpu(cmd
->len
) - sizeof(*rsp
);
3955 scid
= __le16_to_cpu(rsp
->scid
);
3956 flags
= __le16_to_cpu(rsp
->flags
);
3957 result
= __le16_to_cpu(rsp
->result
);
3959 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid
, flags
,
3962 chan
= l2cap_get_chan_by_scid(conn
, scid
);
3967 case L2CAP_CONF_SUCCESS
:
3968 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
3969 clear_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
3972 case L2CAP_CONF_PENDING
:
3973 set_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
3975 if (test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
3978 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
3981 l2cap_send_disconn_req(chan
, ECONNRESET
);
3985 if (!chan
->hs_hcon
) {
3986 l2cap_send_efs_conf_rsp(chan
, buf
, cmd
->ident
,
3989 if (l2cap_check_efs(chan
)) {
3990 amp_create_logical_link(chan
);
3991 chan
->ident
= cmd
->ident
;
3997 case L2CAP_CONF_UNACCEPT
:
3998 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
4001 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
4002 l2cap_send_disconn_req(chan
, ECONNRESET
);
4006 /* throw out any old stored conf requests */
4007 result
= L2CAP_CONF_SUCCESS
;
4008 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
4011 l2cap_send_disconn_req(chan
, ECONNRESET
);
4015 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
4016 L2CAP_CONF_REQ
, len
, req
);
4017 chan
->num_conf_req
++;
4018 if (result
!= L2CAP_CONF_SUCCESS
)
4024 l2cap_chan_set_err(chan
, ECONNRESET
);
4026 __set_chan_timer(chan
, L2CAP_DISC_REJ_TIMEOUT
);
4027 l2cap_send_disconn_req(chan
, ECONNRESET
);
4031 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
)
4034 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
4036 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
4037 set_default_fcs(chan
);
4039 if (chan
->mode
== L2CAP_MODE_ERTM
||
4040 chan
->mode
== L2CAP_MODE_STREAMING
)
4041 err
= l2cap_ertm_init(chan
);
4044 l2cap_send_disconn_req(chan
, -err
);
4046 l2cap_chan_ready(chan
);
4050 l2cap_chan_unlock(chan
);
4054 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
,
4055 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
4057 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
4058 struct l2cap_disconn_rsp rsp
;
4060 struct l2cap_chan
*chan
;
4063 scid
= __le16_to_cpu(req
->scid
);
4064 dcid
= __le16_to_cpu(req
->dcid
);
4066 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
4068 mutex_lock(&conn
->chan_lock
);
4070 chan
= __l2cap_get_chan_by_scid(conn
, dcid
);
4072 mutex_unlock(&conn
->chan_lock
);
4076 l2cap_chan_lock(chan
);
4080 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4081 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4082 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
4085 sk
->sk_shutdown
= SHUTDOWN_MASK
;
4088 l2cap_chan_hold(chan
);
4089 l2cap_chan_del(chan
, ECONNRESET
);
4091 l2cap_chan_unlock(chan
);
4093 chan
->ops
->close(chan
);
4094 l2cap_chan_put(chan
);
4096 mutex_unlock(&conn
->chan_lock
);
4101 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
,
4102 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
4104 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
4106 struct l2cap_chan
*chan
;
4108 scid
= __le16_to_cpu(rsp
->scid
);
4109 dcid
= __le16_to_cpu(rsp
->dcid
);
4111 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
4113 mutex_lock(&conn
->chan_lock
);
4115 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
4117 mutex_unlock(&conn
->chan_lock
);
4121 l2cap_chan_lock(chan
);
4123 l2cap_chan_hold(chan
);
4124 l2cap_chan_del(chan
, 0);
4126 l2cap_chan_unlock(chan
);
4128 chan
->ops
->close(chan
);
4129 l2cap_chan_put(chan
);
4131 mutex_unlock(&conn
->chan_lock
);
4136 static inline int l2cap_information_req(struct l2cap_conn
*conn
,
4137 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
4139 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
4142 type
= __le16_to_cpu(req
->type
);
4144 BT_DBG("type 0x%4.4x", type
);
4146 if (type
== L2CAP_IT_FEAT_MASK
) {
4148 u32 feat_mask
= l2cap_feat_mask
;
4149 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
4150 rsp
->type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
4151 rsp
->result
= __constant_cpu_to_le16(L2CAP_IR_SUCCESS
);
4153 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
4156 feat_mask
|= L2CAP_FEAT_EXT_FLOW
4157 | L2CAP_FEAT_EXT_WINDOW
;
4159 put_unaligned_le32(feat_mask
, rsp
->data
);
4160 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
4162 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
4164 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
4167 l2cap_fixed_chan
[0] |= L2CAP_FC_A2MP
;
4169 l2cap_fixed_chan
[0] &= ~L2CAP_FC_A2MP
;
4171 rsp
->type
= __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4172 rsp
->result
= __constant_cpu_to_le16(L2CAP_IR_SUCCESS
);
4173 memcpy(rsp
->data
, l2cap_fixed_chan
, sizeof(l2cap_fixed_chan
));
4174 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
4177 struct l2cap_info_rsp rsp
;
4178 rsp
.type
= cpu_to_le16(type
);
4179 rsp
.result
= __constant_cpu_to_le16(L2CAP_IR_NOTSUPP
);
4180 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(rsp
),
4187 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
,
4188 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
4190 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
4193 type
= __le16_to_cpu(rsp
->type
);
4194 result
= __le16_to_cpu(rsp
->result
);
4196 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
4198 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4199 if (cmd
->ident
!= conn
->info_ident
||
4200 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
4203 cancel_delayed_work(&conn
->info_timer
);
4205 if (result
!= L2CAP_IR_SUCCESS
) {
4206 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4207 conn
->info_ident
= 0;
4209 l2cap_conn_start(conn
);
4215 case L2CAP_IT_FEAT_MASK
:
4216 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
4218 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
4219 struct l2cap_info_req req
;
4220 req
.type
= __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4222 conn
->info_ident
= l2cap_get_ident(conn
);
4224 l2cap_send_cmd(conn
, conn
->info_ident
,
4225 L2CAP_INFO_REQ
, sizeof(req
), &req
);
4227 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4228 conn
->info_ident
= 0;
4230 l2cap_conn_start(conn
);
4234 case L2CAP_IT_FIXED_CHAN
:
4235 conn
->fixed_chan_mask
= rsp
->data
[0];
4236 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4237 conn
->info_ident
= 0;
4239 l2cap_conn_start(conn
);
4246 static int l2cap_create_channel_req(struct l2cap_conn
*conn
,
4247 struct l2cap_cmd_hdr
*cmd
,
4248 u16 cmd_len
, void *data
)
4250 struct l2cap_create_chan_req
*req
= data
;
4251 struct l2cap_create_chan_rsp rsp
;
4252 struct l2cap_chan
*chan
;
4253 struct hci_dev
*hdev
;
4256 if (cmd_len
!= sizeof(*req
))
4262 psm
= le16_to_cpu(req
->psm
);
4263 scid
= le16_to_cpu(req
->scid
);
4265 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm
, scid
, req
->amp_id
);
4267 /* For controller id 0 make BR/EDR connection */
4268 if (req
->amp_id
== HCI_BREDR_ID
) {
4269 l2cap_connect(conn
, cmd
, data
, L2CAP_CREATE_CHAN_RSP
,
4274 /* Validate AMP controller id */
4275 hdev
= hci_dev_get(req
->amp_id
);
4279 if (hdev
->dev_type
!= HCI_AMP
|| !test_bit(HCI_UP
, &hdev
->flags
)) {
4284 chan
= l2cap_connect(conn
, cmd
, data
, L2CAP_CREATE_CHAN_RSP
,
4287 struct amp_mgr
*mgr
= conn
->hcon
->amp_mgr
;
4288 struct hci_conn
*hs_hcon
;
4290 hs_hcon
= hci_conn_hash_lookup_ba(hdev
, AMP_LINK
, conn
->dst
);
4296 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr
, chan
, hs_hcon
);
4298 mgr
->bredr_chan
= chan
;
4299 chan
->hs_hcon
= hs_hcon
;
4300 chan
->fcs
= L2CAP_FCS_NONE
;
4301 conn
->mtu
= hdev
->block_mtu
;
4310 rsp
.scid
= cpu_to_le16(scid
);
4311 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_BAD_AMP
);
4312 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
4314 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CREATE_CHAN_RSP
,
4320 static void l2cap_send_move_chan_req(struct l2cap_chan
*chan
, u8 dest_amp_id
)
4322 struct l2cap_move_chan_req req
;
4325 BT_DBG("chan %p, dest_amp_id %d", chan
, dest_amp_id
);
4327 ident
= l2cap_get_ident(chan
->conn
);
4328 chan
->ident
= ident
;
4330 req
.icid
= cpu_to_le16(chan
->scid
);
4331 req
.dest_amp_id
= dest_amp_id
;
4333 l2cap_send_cmd(chan
->conn
, ident
, L2CAP_MOVE_CHAN_REQ
, sizeof(req
),
4336 __set_chan_timer(chan
, L2CAP_MOVE_TIMEOUT
);
4339 static void l2cap_send_move_chan_rsp(struct l2cap_chan
*chan
, u16 result
)
4341 struct l2cap_move_chan_rsp rsp
;
4343 BT_DBG("chan %p, result 0x%4.4x", chan
, result
);
4345 rsp
.icid
= cpu_to_le16(chan
->dcid
);
4346 rsp
.result
= cpu_to_le16(result
);
4348 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_MOVE_CHAN_RSP
,
4352 static void l2cap_send_move_chan_cfm(struct l2cap_chan
*chan
, u16 result
)
4354 struct l2cap_move_chan_cfm cfm
;
4356 BT_DBG("chan %p, result 0x%4.4x", chan
, result
);
4358 chan
->ident
= l2cap_get_ident(chan
->conn
);
4360 cfm
.icid
= cpu_to_le16(chan
->scid
);
4361 cfm
.result
= cpu_to_le16(result
);
4363 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_MOVE_CHAN_CFM
,
4366 __set_chan_timer(chan
, L2CAP_MOVE_TIMEOUT
);
4369 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn
*conn
, u16 icid
)
4371 struct l2cap_move_chan_cfm cfm
;
4373 BT_DBG("conn %p, icid 0x%4.4x", conn
, icid
);
4375 cfm
.icid
= cpu_to_le16(icid
);
4376 cfm
.result
= __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED
);
4378 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_MOVE_CHAN_CFM
,
4382 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn
*conn
, u8 ident
,
4385 struct l2cap_move_chan_cfm_rsp rsp
;
4387 BT_DBG("icid 0x%4.4x", icid
);
4389 rsp
.icid
= cpu_to_le16(icid
);
4390 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM_RSP
, sizeof(rsp
), &rsp
);
4393 static void __release_logical_link(struct l2cap_chan
*chan
)
4395 chan
->hs_hchan
= NULL
;
4396 chan
->hs_hcon
= NULL
;
4398 /* Placeholder - release the logical link */
4401 static void l2cap_logical_fail(struct l2cap_chan
*chan
)
4403 /* Logical link setup failed */
4404 if (chan
->state
!= BT_CONNECTED
) {
4405 /* Create channel failure, disconnect */
4406 l2cap_send_disconn_req(chan
, ECONNRESET
);
4410 switch (chan
->move_role
) {
4411 case L2CAP_MOVE_ROLE_RESPONDER
:
4412 l2cap_move_done(chan
);
4413 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_NOT_SUPP
);
4415 case L2CAP_MOVE_ROLE_INITIATOR
:
4416 if (chan
->move_state
== L2CAP_MOVE_WAIT_LOGICAL_COMP
||
4417 chan
->move_state
== L2CAP_MOVE_WAIT_LOGICAL_CFM
) {
4418 /* Remote has only sent pending or
4419 * success responses, clean up
4421 l2cap_move_done(chan
);
4424 /* Other amp move states imply that the move
4425 * has already aborted
4427 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
4432 static void l2cap_logical_finish_create(struct l2cap_chan
*chan
,
4433 struct hci_chan
*hchan
)
4435 struct l2cap_conf_rsp rsp
;
4437 chan
->hs_hchan
= hchan
;
4438 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4440 l2cap_send_efs_conf_rsp(chan
, &rsp
, chan
->ident
, 0);
4442 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
4445 set_default_fcs(chan
);
4447 err
= l2cap_ertm_init(chan
);
4449 l2cap_send_disconn_req(chan
, -err
);
4451 l2cap_chan_ready(chan
);
4455 static void l2cap_logical_finish_move(struct l2cap_chan
*chan
,
4456 struct hci_chan
*hchan
)
4458 chan
->hs_hcon
= hchan
->conn
;
4459 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4461 BT_DBG("move_state %d", chan
->move_state
);
4463 switch (chan
->move_state
) {
4464 case L2CAP_MOVE_WAIT_LOGICAL_COMP
:
4465 /* Move confirm will be sent after a success
4466 * response is received
4468 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
4470 case L2CAP_MOVE_WAIT_LOGICAL_CFM
:
4471 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4472 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4473 } else if (chan
->move_role
== L2CAP_MOVE_ROLE_INITIATOR
) {
4474 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM_RSP
;
4475 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
4476 } else if (chan
->move_role
== L2CAP_MOVE_ROLE_RESPONDER
) {
4477 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4478 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_SUCCESS
);
4482 /* Move was not in expected state, free the channel */
4483 __release_logical_link(chan
);
4485 chan
->move_state
= L2CAP_MOVE_STABLE
;
4489 /* Call with chan locked */
4490 void l2cap_logical_cfm(struct l2cap_chan
*chan
, struct hci_chan
*hchan
,
4493 BT_DBG("chan %p, hchan %p, status %d", chan
, hchan
, status
);
4496 l2cap_logical_fail(chan
);
4497 __release_logical_link(chan
);
4501 if (chan
->state
!= BT_CONNECTED
) {
4502 /* Ignore logical link if channel is on BR/EDR */
4503 if (chan
->local_amp_id
)
4504 l2cap_logical_finish_create(chan
, hchan
);
4506 l2cap_logical_finish_move(chan
, hchan
);
4510 void l2cap_move_start(struct l2cap_chan
*chan
)
4512 BT_DBG("chan %p", chan
);
4514 if (chan
->local_amp_id
== HCI_BREDR_ID
) {
4515 if (chan
->chan_policy
!= BT_CHANNEL_POLICY_AMP_PREFERRED
)
4517 chan
->move_role
= L2CAP_MOVE_ROLE_INITIATOR
;
4518 chan
->move_state
= L2CAP_MOVE_WAIT_PREPARE
;
4519 /* Placeholder - start physical link setup */
4521 chan
->move_role
= L2CAP_MOVE_ROLE_INITIATOR
;
4522 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
4524 l2cap_move_setup(chan
);
4525 l2cap_send_move_chan_req(chan
, 0);
4529 static void l2cap_do_create(struct l2cap_chan
*chan
, int result
,
4530 u8 local_amp_id
, u8 remote_amp_id
)
4532 BT_DBG("chan %p state %s %u -> %u", chan
, state_to_string(chan
->state
),
4533 local_amp_id
, remote_amp_id
);
4535 chan
->fcs
= L2CAP_FCS_NONE
;
4537 /* Outgoing channel on AMP */
4538 if (chan
->state
== BT_CONNECT
) {
4539 if (result
== L2CAP_CR_SUCCESS
) {
4540 chan
->local_amp_id
= local_amp_id
;
4541 l2cap_send_create_chan_req(chan
, remote_amp_id
);
4543 /* Revert to BR/EDR connect */
4544 l2cap_send_conn_req(chan
);
4550 /* Incoming channel on AMP */
4551 if (__l2cap_no_conn_pending(chan
)) {
4552 struct l2cap_conn_rsp rsp
;
4554 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4555 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4557 if (result
== L2CAP_CR_SUCCESS
) {
4558 /* Send successful response */
4559 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
4560 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
4562 /* Send negative response */
4563 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_NO_MEM
);
4564 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
4567 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_CREATE_CHAN_RSP
,
4570 if (result
== L2CAP_CR_SUCCESS
) {
4571 __l2cap_state_change(chan
, BT_CONFIG
);
4572 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
4573 l2cap_send_cmd(chan
->conn
, l2cap_get_ident(chan
->conn
),
4575 l2cap_build_conf_req(chan
, buf
), buf
);
4576 chan
->num_conf_req
++;
4581 static void l2cap_do_move_initiate(struct l2cap_chan
*chan
, u8 local_amp_id
,
4584 l2cap_move_setup(chan
);
4585 chan
->move_id
= local_amp_id
;
4586 chan
->move_state
= L2CAP_MOVE_WAIT_RSP
;
4588 l2cap_send_move_chan_req(chan
, remote_amp_id
);
4591 static void l2cap_do_move_respond(struct l2cap_chan
*chan
, int result
)
4593 struct hci_chan
*hchan
= NULL
;
4595 /* Placeholder - get hci_chan for logical link */
4598 if (hchan
->state
== BT_CONNECTED
) {
4599 /* Logical link is ready to go */
4600 chan
->hs_hcon
= hchan
->conn
;
4601 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4602 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4603 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_SUCCESS
);
4605 l2cap_logical_cfm(chan
, hchan
, L2CAP_MR_SUCCESS
);
4607 /* Wait for logical link to be ready */
4608 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
4611 /* Logical link not available */
4612 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_NOT_ALLOWED
);
4616 static void l2cap_do_move_cancel(struct l2cap_chan
*chan
, int result
)
4618 if (chan
->move_role
== L2CAP_MOVE_ROLE_RESPONDER
) {
4620 if (result
== -EINVAL
)
4621 rsp_result
= L2CAP_MR_BAD_ID
;
4623 rsp_result
= L2CAP_MR_NOT_ALLOWED
;
4625 l2cap_send_move_chan_rsp(chan
, rsp_result
);
4628 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
4629 chan
->move_state
= L2CAP_MOVE_STABLE
;
4631 /* Restart data transmission */
4632 l2cap_ertm_send(chan
);
4635 /* Invoke with locked chan */
4636 void __l2cap_physical_cfm(struct l2cap_chan
*chan
, int result
)
4638 u8 local_amp_id
= chan
->local_amp_id
;
4639 u8 remote_amp_id
= chan
->remote_amp_id
;
4641 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4642 chan
, result
, local_amp_id
, remote_amp_id
);
4644 if (chan
->state
== BT_DISCONN
|| chan
->state
== BT_CLOSED
) {
4645 l2cap_chan_unlock(chan
);
4649 if (chan
->state
!= BT_CONNECTED
) {
4650 l2cap_do_create(chan
, result
, local_amp_id
, remote_amp_id
);
4651 } else if (result
!= L2CAP_MR_SUCCESS
) {
4652 l2cap_do_move_cancel(chan
, result
);
4654 switch (chan
->move_role
) {
4655 case L2CAP_MOVE_ROLE_INITIATOR
:
4656 l2cap_do_move_initiate(chan
, local_amp_id
,
4659 case L2CAP_MOVE_ROLE_RESPONDER
:
4660 l2cap_do_move_respond(chan
, result
);
4663 l2cap_do_move_cancel(chan
, result
);
4669 static inline int l2cap_move_channel_req(struct l2cap_conn
*conn
,
4670 struct l2cap_cmd_hdr
*cmd
,
4671 u16 cmd_len
, void *data
)
4673 struct l2cap_move_chan_req
*req
= data
;
4674 struct l2cap_move_chan_rsp rsp
;
4675 struct l2cap_chan
*chan
;
4677 u16 result
= L2CAP_MR_NOT_ALLOWED
;
4679 if (cmd_len
!= sizeof(*req
))
4682 icid
= le16_to_cpu(req
->icid
);
4684 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid
, req
->dest_amp_id
);
4689 chan
= l2cap_get_chan_by_dcid(conn
, icid
);
4691 rsp
.icid
= cpu_to_le16(icid
);
4692 rsp
.result
= __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED
);
4693 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_MOVE_CHAN_RSP
,
4698 chan
->ident
= cmd
->ident
;
4700 if (chan
->scid
< L2CAP_CID_DYN_START
||
4701 chan
->chan_policy
== BT_CHANNEL_POLICY_BREDR_ONLY
||
4702 (chan
->mode
!= L2CAP_MODE_ERTM
&&
4703 chan
->mode
!= L2CAP_MODE_STREAMING
)) {
4704 result
= L2CAP_MR_NOT_ALLOWED
;
4705 goto send_move_response
;
4708 if (chan
->local_amp_id
== req
->dest_amp_id
) {
4709 result
= L2CAP_MR_SAME_ID
;
4710 goto send_move_response
;
4713 if (req
->dest_amp_id
) {
4714 struct hci_dev
*hdev
;
4715 hdev
= hci_dev_get(req
->dest_amp_id
);
4716 if (!hdev
|| hdev
->dev_type
!= HCI_AMP
||
4717 !test_bit(HCI_UP
, &hdev
->flags
)) {
4721 result
= L2CAP_MR_BAD_ID
;
4722 goto send_move_response
;
4727 /* Detect a move collision. Only send a collision response
4728 * if this side has "lost", otherwise proceed with the move.
4729 * The winner has the larger bd_addr.
4731 if ((__chan_is_moving(chan
) ||
4732 chan
->move_role
!= L2CAP_MOVE_ROLE_NONE
) &&
4733 bacmp(conn
->src
, conn
->dst
) > 0) {
4734 result
= L2CAP_MR_COLLISION
;
4735 goto send_move_response
;
4738 chan
->move_role
= L2CAP_MOVE_ROLE_RESPONDER
;
4739 l2cap_move_setup(chan
);
4740 chan
->move_id
= req
->dest_amp_id
;
4743 if (!req
->dest_amp_id
) {
4744 /* Moving to BR/EDR */
4745 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4746 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4747 result
= L2CAP_MR_PEND
;
4749 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4750 result
= L2CAP_MR_SUCCESS
;
4753 chan
->move_state
= L2CAP_MOVE_WAIT_PREPARE
;
4754 /* Placeholder - uncomment when amp functions are available */
4755 /*amp_accept_physical(chan, req->dest_amp_id);*/
4756 result
= L2CAP_MR_PEND
;
4760 l2cap_send_move_chan_rsp(chan
, result
);
4762 l2cap_chan_unlock(chan
);
4767 static void l2cap_move_continue(struct l2cap_conn
*conn
, u16 icid
, u16 result
)
4769 struct l2cap_chan
*chan
;
4770 struct hci_chan
*hchan
= NULL
;
4772 chan
= l2cap_get_chan_by_scid(conn
, icid
);
4774 l2cap_send_move_chan_cfm_icid(conn
, icid
);
4778 __clear_chan_timer(chan
);
4779 if (result
== L2CAP_MR_PEND
)
4780 __set_chan_timer(chan
, L2CAP_MOVE_ERTX_TIMEOUT
);
4782 switch (chan
->move_state
) {
4783 case L2CAP_MOVE_WAIT_LOGICAL_COMP
:
4784 /* Move confirm will be sent when logical link
4787 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
4789 case L2CAP_MOVE_WAIT_RSP_SUCCESS
:
4790 if (result
== L2CAP_MR_PEND
) {
4792 } else if (test_bit(CONN_LOCAL_BUSY
,
4793 &chan
->conn_state
)) {
4794 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4796 /* Logical link is up or moving to BR/EDR,
4799 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM_RSP
;
4800 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
4803 case L2CAP_MOVE_WAIT_RSP
:
4805 if (result
== L2CAP_MR_SUCCESS
) {
4806 /* Remote is ready, send confirm immediately
4807 * after logical link is ready
4809 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
4811 /* Both logical link and move success
4812 * are required to confirm
4814 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_COMP
;
4817 /* Placeholder - get hci_chan for logical link */
4819 /* Logical link not available */
4820 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
4824 /* If the logical link is not yet connected, do not
4825 * send confirmation.
4827 if (hchan
->state
!= BT_CONNECTED
)
4830 /* Logical link is already ready to go */
4832 chan
->hs_hcon
= hchan
->conn
;
4833 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4835 if (result
== L2CAP_MR_SUCCESS
) {
4836 /* Can confirm now */
4837 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
4839 /* Now only need move success
4842 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
4845 l2cap_logical_cfm(chan
, hchan
, L2CAP_MR_SUCCESS
);
4848 /* Any other amp move state means the move failed. */
4849 chan
->move_id
= chan
->local_amp_id
;
4850 l2cap_move_done(chan
);
4851 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
4854 l2cap_chan_unlock(chan
);
4857 static void l2cap_move_fail(struct l2cap_conn
*conn
, u8 ident
, u16 icid
,
4860 struct l2cap_chan
*chan
;
4862 chan
= l2cap_get_chan_by_ident(conn
, ident
);
4864 /* Could not locate channel, icid is best guess */
4865 l2cap_send_move_chan_cfm_icid(conn
, icid
);
4869 __clear_chan_timer(chan
);
4871 if (chan
->move_role
== L2CAP_MOVE_ROLE_INITIATOR
) {
4872 if (result
== L2CAP_MR_COLLISION
) {
4873 chan
->move_role
= L2CAP_MOVE_ROLE_RESPONDER
;
4875 /* Cleanup - cancel move */
4876 chan
->move_id
= chan
->local_amp_id
;
4877 l2cap_move_done(chan
);
4881 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
4883 l2cap_chan_unlock(chan
);
4886 static int l2cap_move_channel_rsp(struct l2cap_conn
*conn
,
4887 struct l2cap_cmd_hdr
*cmd
,
4888 u16 cmd_len
, void *data
)
4890 struct l2cap_move_chan_rsp
*rsp
= data
;
4893 if (cmd_len
!= sizeof(*rsp
))
4896 icid
= le16_to_cpu(rsp
->icid
);
4897 result
= le16_to_cpu(rsp
->result
);
4899 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
4901 if (result
== L2CAP_MR_SUCCESS
|| result
== L2CAP_MR_PEND
)
4902 l2cap_move_continue(conn
, icid
, result
);
4904 l2cap_move_fail(conn
, cmd
->ident
, icid
, result
);
4909 static int l2cap_move_channel_confirm(struct l2cap_conn
*conn
,
4910 struct l2cap_cmd_hdr
*cmd
,
4911 u16 cmd_len
, void *data
)
4913 struct l2cap_move_chan_cfm
*cfm
= data
;
4914 struct l2cap_chan
*chan
;
4917 if (cmd_len
!= sizeof(*cfm
))
4920 icid
= le16_to_cpu(cfm
->icid
);
4921 result
= le16_to_cpu(cfm
->result
);
4923 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
4925 chan
= l2cap_get_chan_by_dcid(conn
, icid
);
4927 /* Spec requires a response even if the icid was not found */
4928 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
4932 if (chan
->move_state
== L2CAP_MOVE_WAIT_CONFIRM
) {
4933 if (result
== L2CAP_MC_CONFIRMED
) {
4934 chan
->local_amp_id
= chan
->move_id
;
4935 if (!chan
->local_amp_id
)
4936 __release_logical_link(chan
);
4938 chan
->move_id
= chan
->local_amp_id
;
4941 l2cap_move_done(chan
);
4944 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
4946 l2cap_chan_unlock(chan
);
4951 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn
*conn
,
4952 struct l2cap_cmd_hdr
*cmd
,
4953 u16 cmd_len
, void *data
)
4955 struct l2cap_move_chan_cfm_rsp
*rsp
= data
;
4956 struct l2cap_chan
*chan
;
4959 if (cmd_len
!= sizeof(*rsp
))
4962 icid
= le16_to_cpu(rsp
->icid
);
4964 BT_DBG("icid 0x%4.4x", icid
);
4966 chan
= l2cap_get_chan_by_scid(conn
, icid
);
4970 __clear_chan_timer(chan
);
4972 if (chan
->move_state
== L2CAP_MOVE_WAIT_CONFIRM_RSP
) {
4973 chan
->local_amp_id
= chan
->move_id
;
4975 if (!chan
->local_amp_id
&& chan
->hs_hchan
)
4976 __release_logical_link(chan
);
4978 l2cap_move_done(chan
);
4981 l2cap_chan_unlock(chan
);
4986 static inline int l2cap_check_conn_param(u16 min
, u16 max
, u16 latency
,
4991 if (min
> max
|| min
< 6 || max
> 3200)
4994 if (to_multiplier
< 10 || to_multiplier
> 3200)
4997 if (max
>= to_multiplier
* 8)
5000 max_latency
= (to_multiplier
* 8 / max
) - 1;
5001 if (latency
> 499 || latency
> max_latency
)
5007 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
5008 struct l2cap_cmd_hdr
*cmd
,
5011 struct hci_conn
*hcon
= conn
->hcon
;
5012 struct l2cap_conn_param_update_req
*req
;
5013 struct l2cap_conn_param_update_rsp rsp
;
5014 u16 min
, max
, latency
, to_multiplier
, cmd_len
;
5017 if (!(hcon
->link_mode
& HCI_LM_MASTER
))
5020 cmd_len
= __le16_to_cpu(cmd
->len
);
5021 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
5024 req
= (struct l2cap_conn_param_update_req
*) data
;
5025 min
= __le16_to_cpu(req
->min
);
5026 max
= __le16_to_cpu(req
->max
);
5027 latency
= __le16_to_cpu(req
->latency
);
5028 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
5030 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5031 min
, max
, latency
, to_multiplier
);
5033 memset(&rsp
, 0, sizeof(rsp
));
5035 err
= l2cap_check_conn_param(min
, max
, latency
, to_multiplier
);
5037 rsp
.result
= __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
5039 rsp
.result
= __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
5041 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
5045 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
5050 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
5051 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5056 switch (cmd
->code
) {
5057 case L2CAP_COMMAND_REJ
:
5058 l2cap_command_rej(conn
, cmd
, data
);
5061 case L2CAP_CONN_REQ
:
5062 err
= l2cap_connect_req(conn
, cmd
, data
);
5065 case L2CAP_CONN_RSP
:
5066 case L2CAP_CREATE_CHAN_RSP
:
5067 err
= l2cap_connect_create_rsp(conn
, cmd
, data
);
5070 case L2CAP_CONF_REQ
:
5071 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
5074 case L2CAP_CONF_RSP
:
5075 err
= l2cap_config_rsp(conn
, cmd
, data
);
5078 case L2CAP_DISCONN_REQ
:
5079 err
= l2cap_disconnect_req(conn
, cmd
, data
);
5082 case L2CAP_DISCONN_RSP
:
5083 err
= l2cap_disconnect_rsp(conn
, cmd
, data
);
5086 case L2CAP_ECHO_REQ
:
5087 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
5090 case L2CAP_ECHO_RSP
:
5093 case L2CAP_INFO_REQ
:
5094 err
= l2cap_information_req(conn
, cmd
, data
);
5097 case L2CAP_INFO_RSP
:
5098 err
= l2cap_information_rsp(conn
, cmd
, data
);
5101 case L2CAP_CREATE_CHAN_REQ
:
5102 err
= l2cap_create_channel_req(conn
, cmd
, cmd_len
, data
);
5105 case L2CAP_MOVE_CHAN_REQ
:
5106 err
= l2cap_move_channel_req(conn
, cmd
, cmd_len
, data
);
5109 case L2CAP_MOVE_CHAN_RSP
:
5110 err
= l2cap_move_channel_rsp(conn
, cmd
, cmd_len
, data
);
5113 case L2CAP_MOVE_CHAN_CFM
:
5114 err
= l2cap_move_channel_confirm(conn
, cmd
, cmd_len
, data
);
5117 case L2CAP_MOVE_CHAN_CFM_RSP
:
5118 err
= l2cap_move_channel_confirm_rsp(conn
, cmd
, cmd_len
, data
);
5122 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
5130 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
5131 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
5133 switch (cmd
->code
) {
5134 case L2CAP_COMMAND_REJ
:
5137 case L2CAP_CONN_PARAM_UPDATE_REQ
:
5138 return l2cap_conn_param_update_req(conn
, cmd
, data
);
5140 case L2CAP_CONN_PARAM_UPDATE_RSP
:
5144 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
5149 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
5150 struct sk_buff
*skb
)
5152 u8
*data
= skb
->data
;
5154 struct l2cap_cmd_hdr cmd
;
5157 l2cap_raw_recv(conn
, skb
);
5159 while (len
>= L2CAP_CMD_HDR_SIZE
) {
5161 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
5162 data
+= L2CAP_CMD_HDR_SIZE
;
5163 len
-= L2CAP_CMD_HDR_SIZE
;
5165 cmd_len
= le16_to_cpu(cmd
.len
);
5167 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
,
5170 if (cmd_len
> len
|| !cmd
.ident
) {
5171 BT_DBG("corrupted command");
5175 if (conn
->hcon
->type
== LE_LINK
)
5176 err
= l2cap_le_sig_cmd(conn
, &cmd
, data
);
5178 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
5181 struct l2cap_cmd_rej_unk rej
;
5183 BT_ERR("Wrong link type (%d)", err
);
5185 /* FIXME: Map err to a valid reason */
5186 rej
.reason
= __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
5187 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
,
5198 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
5200 u16 our_fcs
, rcv_fcs
;
5203 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
5204 hdr_size
= L2CAP_EXT_HDR_SIZE
;
5206 hdr_size
= L2CAP_ENH_HDR_SIZE
;
5208 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
5209 skb_trim(skb
, skb
->len
- L2CAP_FCS_SIZE
);
5210 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
5211 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
5213 if (our_fcs
!= rcv_fcs
)
5219 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
5221 struct l2cap_ctrl control
;
5223 BT_DBG("chan %p", chan
);
5225 memset(&control
, 0, sizeof(control
));
5228 control
.reqseq
= chan
->buffer_seq
;
5229 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5231 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5232 control
.super
= L2CAP_SUPER_RNR
;
5233 l2cap_send_sframe(chan
, &control
);
5236 if (test_and_clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
5237 chan
->unacked_frames
> 0)
5238 __set_retrans_timer(chan
);
5240 /* Send pending iframes */
5241 l2cap_ertm_send(chan
);
5243 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
5244 test_bit(CONN_SEND_FBIT
, &chan
->conn_state
)) {
5245 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5248 control
.super
= L2CAP_SUPER_RR
;
5249 l2cap_send_sframe(chan
, &control
);
5253 static void append_skb_frag(struct sk_buff
*skb
, struct sk_buff
*new_frag
,
5254 struct sk_buff
**last_frag
)
5256 /* skb->len reflects data in skb as well as all fragments
5257 * skb->data_len reflects only data in fragments
5259 if (!skb_has_frag_list(skb
))
5260 skb_shinfo(skb
)->frag_list
= new_frag
;
5262 new_frag
->next
= NULL
;
5264 (*last_frag
)->next
= new_frag
;
5265 *last_frag
= new_frag
;
5267 skb
->len
+= new_frag
->len
;
5268 skb
->data_len
+= new_frag
->len
;
5269 skb
->truesize
+= new_frag
->truesize
;
5272 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
,
5273 struct l2cap_ctrl
*control
)
5277 switch (control
->sar
) {
5278 case L2CAP_SAR_UNSEGMENTED
:
5282 err
= chan
->ops
->recv(chan
, skb
);
5285 case L2CAP_SAR_START
:
5289 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
5290 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
5292 if (chan
->sdu_len
> chan
->imtu
) {
5297 if (skb
->len
>= chan
->sdu_len
)
5301 chan
->sdu_last_frag
= skb
;
5307 case L2CAP_SAR_CONTINUE
:
5311 append_skb_frag(chan
->sdu
, skb
,
5312 &chan
->sdu_last_frag
);
5315 if (chan
->sdu
->len
>= chan
->sdu_len
)
5325 append_skb_frag(chan
->sdu
, skb
,
5326 &chan
->sdu_last_frag
);
5329 if (chan
->sdu
->len
!= chan
->sdu_len
)
5332 err
= chan
->ops
->recv(chan
, chan
->sdu
);
5335 /* Reassembly complete */
5337 chan
->sdu_last_frag
= NULL
;
5345 kfree_skb(chan
->sdu
);
5347 chan
->sdu_last_frag
= NULL
;
5354 static int l2cap_resegment(struct l2cap_chan
*chan
)
5360 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
5364 if (chan
->mode
!= L2CAP_MODE_ERTM
)
5367 event
= busy
? L2CAP_EV_LOCAL_BUSY_DETECTED
: L2CAP_EV_LOCAL_BUSY_CLEAR
;
5368 l2cap_tx(chan
, NULL
, NULL
, event
);
5371 static int l2cap_rx_queued_iframes(struct l2cap_chan
*chan
)
5374 /* Pass sequential frames to l2cap_reassemble_sdu()
5375 * until a gap is encountered.
5378 BT_DBG("chan %p", chan
);
5380 while (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5381 struct sk_buff
*skb
;
5382 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5383 chan
->buffer_seq
, skb_queue_len(&chan
->srej_q
));
5385 skb
= l2cap_ertm_seq_in_queue(&chan
->srej_q
, chan
->buffer_seq
);
5390 skb_unlink(skb
, &chan
->srej_q
);
5391 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
5392 err
= l2cap_reassemble_sdu(chan
, skb
, &bt_cb(skb
)->control
);
5397 if (skb_queue_empty(&chan
->srej_q
)) {
5398 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
5399 l2cap_send_ack(chan
);
5405 static void l2cap_handle_srej(struct l2cap_chan
*chan
,
5406 struct l2cap_ctrl
*control
)
5408 struct sk_buff
*skb
;
5410 BT_DBG("chan %p, control %p", chan
, control
);
5412 if (control
->reqseq
== chan
->next_tx_seq
) {
5413 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
5414 l2cap_send_disconn_req(chan
, ECONNRESET
);
5418 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
5421 BT_DBG("Seq %d not available for retransmission",
5426 if (chan
->max_tx
!= 0 && bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
5427 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
5428 l2cap_send_disconn_req(chan
, ECONNRESET
);
5432 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5434 if (control
->poll
) {
5435 l2cap_pass_to_tx(chan
, control
);
5437 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5438 l2cap_retransmit(chan
, control
);
5439 l2cap_ertm_send(chan
);
5441 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
5442 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
5443 chan
->srej_save_reqseq
= control
->reqseq
;
5446 l2cap_pass_to_tx_fbit(chan
, control
);
5448 if (control
->final
) {
5449 if (chan
->srej_save_reqseq
!= control
->reqseq
||
5450 !test_and_clear_bit(CONN_SREJ_ACT
,
5452 l2cap_retransmit(chan
, control
);
5454 l2cap_retransmit(chan
, control
);
5455 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
5456 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
5457 chan
->srej_save_reqseq
= control
->reqseq
;
5463 static void l2cap_handle_rej(struct l2cap_chan
*chan
,
5464 struct l2cap_ctrl
*control
)
5466 struct sk_buff
*skb
;
5468 BT_DBG("chan %p, control %p", chan
, control
);
5470 if (control
->reqseq
== chan
->next_tx_seq
) {
5471 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
5472 l2cap_send_disconn_req(chan
, ECONNRESET
);
5476 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
5478 if (chan
->max_tx
&& skb
&&
5479 bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
5480 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
5481 l2cap_send_disconn_req(chan
, ECONNRESET
);
5485 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5487 l2cap_pass_to_tx(chan
, control
);
5489 if (control
->final
) {
5490 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
5491 l2cap_retransmit_all(chan
, control
);
5493 l2cap_retransmit_all(chan
, control
);
5494 l2cap_ertm_send(chan
);
5495 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
)
5496 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
5500 static u8
l2cap_classify_txseq(struct l2cap_chan
*chan
, u16 txseq
)
5502 BT_DBG("chan %p, txseq %d", chan
, txseq
);
5504 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan
->last_acked_seq
,
5505 chan
->expected_tx_seq
);
5507 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
5508 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
5510 /* See notes below regarding "double poll" and
5513 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
5514 BT_DBG("Invalid/Ignore - after SREJ");
5515 return L2CAP_TXSEQ_INVALID_IGNORE
;
5517 BT_DBG("Invalid - in window after SREJ sent");
5518 return L2CAP_TXSEQ_INVALID
;
5522 if (chan
->srej_list
.head
== txseq
) {
5523 BT_DBG("Expected SREJ");
5524 return L2CAP_TXSEQ_EXPECTED_SREJ
;
5527 if (l2cap_ertm_seq_in_queue(&chan
->srej_q
, txseq
)) {
5528 BT_DBG("Duplicate SREJ - txseq already stored");
5529 return L2CAP_TXSEQ_DUPLICATE_SREJ
;
5532 if (l2cap_seq_list_contains(&chan
->srej_list
, txseq
)) {
5533 BT_DBG("Unexpected SREJ - not requested");
5534 return L2CAP_TXSEQ_UNEXPECTED_SREJ
;
5538 if (chan
->expected_tx_seq
== txseq
) {
5539 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
5541 BT_DBG("Invalid - txseq outside tx window");
5542 return L2CAP_TXSEQ_INVALID
;
5545 return L2CAP_TXSEQ_EXPECTED
;
5549 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) <
5550 __seq_offset(chan
, chan
->expected_tx_seq
, chan
->last_acked_seq
)) {
5551 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5552 return L2CAP_TXSEQ_DUPLICATE
;
5555 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >= chan
->tx_win
) {
5556 /* A source of invalid packets is a "double poll" condition,
5557 * where delays cause us to send multiple poll packets. If
5558 * the remote stack receives and processes both polls,
5559 * sequence numbers can wrap around in such a way that a
5560 * resent frame has a sequence number that looks like new data
5561 * with a sequence gap. This would trigger an erroneous SREJ
5564 * Fortunately, this is impossible with a tx window that's
5565 * less than half of the maximum sequence number, which allows
5566 * invalid frames to be safely ignored.
5568 * With tx window sizes greater than half of the tx window
5569 * maximum, the frame is invalid and cannot be ignored. This
5570 * causes a disconnect.
5573 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
5574 BT_DBG("Invalid/Ignore - txseq outside tx window");
5575 return L2CAP_TXSEQ_INVALID_IGNORE
;
5577 BT_DBG("Invalid - txseq outside tx window");
5578 return L2CAP_TXSEQ_INVALID
;
5581 BT_DBG("Unexpected - txseq indicates missing frames");
5582 return L2CAP_TXSEQ_UNEXPECTED
;
5586 static int l2cap_rx_state_recv(struct l2cap_chan
*chan
,
5587 struct l2cap_ctrl
*control
,
5588 struct sk_buff
*skb
, u8 event
)
5591 bool skb_in_use
= 0;
5593 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
5597 case L2CAP_EV_RECV_IFRAME
:
5598 switch (l2cap_classify_txseq(chan
, control
->txseq
)) {
5599 case L2CAP_TXSEQ_EXPECTED
:
5600 l2cap_pass_to_tx(chan
, control
);
5602 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5603 BT_DBG("Busy, discarding expected seq %d",
5608 chan
->expected_tx_seq
= __next_seq(chan
,
5611 chan
->buffer_seq
= chan
->expected_tx_seq
;
5614 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
5618 if (control
->final
) {
5619 if (!test_and_clear_bit(CONN_REJ_ACT
,
5620 &chan
->conn_state
)) {
5622 l2cap_retransmit_all(chan
, control
);
5623 l2cap_ertm_send(chan
);
5627 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
5628 l2cap_send_ack(chan
);
5630 case L2CAP_TXSEQ_UNEXPECTED
:
5631 l2cap_pass_to_tx(chan
, control
);
5633 /* Can't issue SREJ frames in the local busy state.
5634 * Drop this frame, it will be seen as missing
5635 * when local busy is exited.
5637 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5638 BT_DBG("Busy, discarding unexpected seq %d",
5643 /* There was a gap in the sequence, so an SREJ
5644 * must be sent for each missing frame. The
5645 * current frame is stored for later use.
5647 skb_queue_tail(&chan
->srej_q
, skb
);
5649 BT_DBG("Queued %p (queue len %d)", skb
,
5650 skb_queue_len(&chan
->srej_q
));
5652 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
5653 l2cap_seq_list_clear(&chan
->srej_list
);
5654 l2cap_send_srej(chan
, control
->txseq
);
5656 chan
->rx_state
= L2CAP_RX_STATE_SREJ_SENT
;
5658 case L2CAP_TXSEQ_DUPLICATE
:
5659 l2cap_pass_to_tx(chan
, control
);
5661 case L2CAP_TXSEQ_INVALID_IGNORE
:
5663 case L2CAP_TXSEQ_INVALID
:
5665 l2cap_send_disconn_req(chan
, ECONNRESET
);
5669 case L2CAP_EV_RECV_RR
:
5670 l2cap_pass_to_tx(chan
, control
);
5671 if (control
->final
) {
5672 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5674 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
) &&
5675 !__chan_is_moving(chan
)) {
5677 l2cap_retransmit_all(chan
, control
);
5680 l2cap_ertm_send(chan
);
5681 } else if (control
->poll
) {
5682 l2cap_send_i_or_rr_or_rnr(chan
);
5684 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
5685 &chan
->conn_state
) &&
5686 chan
->unacked_frames
)
5687 __set_retrans_timer(chan
);
5689 l2cap_ertm_send(chan
);
5692 case L2CAP_EV_RECV_RNR
:
5693 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5694 l2cap_pass_to_tx(chan
, control
);
5695 if (control
&& control
->poll
) {
5696 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5697 l2cap_send_rr_or_rnr(chan
, 0);
5699 __clear_retrans_timer(chan
);
5700 l2cap_seq_list_clear(&chan
->retrans_list
);
5702 case L2CAP_EV_RECV_REJ
:
5703 l2cap_handle_rej(chan
, control
);
5705 case L2CAP_EV_RECV_SREJ
:
5706 l2cap_handle_srej(chan
, control
);
5712 if (skb
&& !skb_in_use
) {
5713 BT_DBG("Freeing %p", skb
);
5720 static int l2cap_rx_state_srej_sent(struct l2cap_chan
*chan
,
5721 struct l2cap_ctrl
*control
,
5722 struct sk_buff
*skb
, u8 event
)
5725 u16 txseq
= control
->txseq
;
5726 bool skb_in_use
= 0;
5728 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
5732 case L2CAP_EV_RECV_IFRAME
:
5733 switch (l2cap_classify_txseq(chan
, txseq
)) {
5734 case L2CAP_TXSEQ_EXPECTED
:
5735 /* Keep frame for reassembly later */
5736 l2cap_pass_to_tx(chan
, control
);
5737 skb_queue_tail(&chan
->srej_q
, skb
);
5739 BT_DBG("Queued %p (queue len %d)", skb
,
5740 skb_queue_len(&chan
->srej_q
));
5742 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
5744 case L2CAP_TXSEQ_EXPECTED_SREJ
:
5745 l2cap_seq_list_pop(&chan
->srej_list
);
5747 l2cap_pass_to_tx(chan
, control
);
5748 skb_queue_tail(&chan
->srej_q
, skb
);
5750 BT_DBG("Queued %p (queue len %d)", skb
,
5751 skb_queue_len(&chan
->srej_q
));
5753 err
= l2cap_rx_queued_iframes(chan
);
5758 case L2CAP_TXSEQ_UNEXPECTED
:
5759 /* Got a frame that can't be reassembled yet.
5760 * Save it for later, and send SREJs to cover
5761 * the missing frames.
5763 skb_queue_tail(&chan
->srej_q
, skb
);
5765 BT_DBG("Queued %p (queue len %d)", skb
,
5766 skb_queue_len(&chan
->srej_q
));
5768 l2cap_pass_to_tx(chan
, control
);
5769 l2cap_send_srej(chan
, control
->txseq
);
5771 case L2CAP_TXSEQ_UNEXPECTED_SREJ
:
5772 /* This frame was requested with an SREJ, but
5773 * some expected retransmitted frames are
5774 * missing. Request retransmission of missing
5777 skb_queue_tail(&chan
->srej_q
, skb
);
5779 BT_DBG("Queued %p (queue len %d)", skb
,
5780 skb_queue_len(&chan
->srej_q
));
5782 l2cap_pass_to_tx(chan
, control
);
5783 l2cap_send_srej_list(chan
, control
->txseq
);
5785 case L2CAP_TXSEQ_DUPLICATE_SREJ
:
5786 /* We've already queued this frame. Drop this copy. */
5787 l2cap_pass_to_tx(chan
, control
);
5789 case L2CAP_TXSEQ_DUPLICATE
:
5790 /* Expecting a later sequence number, so this frame
5791 * was already received. Ignore it completely.
5794 case L2CAP_TXSEQ_INVALID_IGNORE
:
5796 case L2CAP_TXSEQ_INVALID
:
5798 l2cap_send_disconn_req(chan
, ECONNRESET
);
5802 case L2CAP_EV_RECV_RR
:
5803 l2cap_pass_to_tx(chan
, control
);
5804 if (control
->final
) {
5805 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5807 if (!test_and_clear_bit(CONN_REJ_ACT
,
5808 &chan
->conn_state
)) {
5810 l2cap_retransmit_all(chan
, control
);
5813 l2cap_ertm_send(chan
);
5814 } else if (control
->poll
) {
5815 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
5816 &chan
->conn_state
) &&
5817 chan
->unacked_frames
) {
5818 __set_retrans_timer(chan
);
5821 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5822 l2cap_send_srej_tail(chan
);
5824 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
5825 &chan
->conn_state
) &&
5826 chan
->unacked_frames
)
5827 __set_retrans_timer(chan
);
5829 l2cap_send_ack(chan
);
5832 case L2CAP_EV_RECV_RNR
:
5833 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5834 l2cap_pass_to_tx(chan
, control
);
5835 if (control
->poll
) {
5836 l2cap_send_srej_tail(chan
);
5838 struct l2cap_ctrl rr_control
;
5839 memset(&rr_control
, 0, sizeof(rr_control
));
5840 rr_control
.sframe
= 1;
5841 rr_control
.super
= L2CAP_SUPER_RR
;
5842 rr_control
.reqseq
= chan
->buffer_seq
;
5843 l2cap_send_sframe(chan
, &rr_control
);
5847 case L2CAP_EV_RECV_REJ
:
5848 l2cap_handle_rej(chan
, control
);
5850 case L2CAP_EV_RECV_SREJ
:
5851 l2cap_handle_srej(chan
, control
);
5855 if (skb
&& !skb_in_use
) {
5856 BT_DBG("Freeing %p", skb
);
5863 static int l2cap_finish_move(struct l2cap_chan
*chan
)
5865 BT_DBG("chan %p", chan
);
5867 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
5870 chan
->conn
->mtu
= chan
->hs_hcon
->hdev
->block_mtu
;
5872 chan
->conn
->mtu
= chan
->conn
->hcon
->hdev
->acl_mtu
;
5874 return l2cap_resegment(chan
);
5877 static int l2cap_rx_state_wait_p(struct l2cap_chan
*chan
,
5878 struct l2cap_ctrl
*control
,
5879 struct sk_buff
*skb
, u8 event
)
5883 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
5889 l2cap_process_reqseq(chan
, control
->reqseq
);
5891 if (!skb_queue_empty(&chan
->tx_q
))
5892 chan
->tx_send_head
= skb_peek(&chan
->tx_q
);
5894 chan
->tx_send_head
= NULL
;
5896 /* Rewind next_tx_seq to the point expected
5899 chan
->next_tx_seq
= control
->reqseq
;
5900 chan
->unacked_frames
= 0;
5902 err
= l2cap_finish_move(chan
);
5906 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5907 l2cap_send_i_or_rr_or_rnr(chan
);
5909 if (event
== L2CAP_EV_RECV_IFRAME
)
5912 return l2cap_rx_state_recv(chan
, control
, NULL
, event
);
5915 static int l2cap_rx_state_wait_f(struct l2cap_chan
*chan
,
5916 struct l2cap_ctrl
*control
,
5917 struct sk_buff
*skb
, u8 event
)
5921 if (!control
->final
)
5924 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5926 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
5927 l2cap_process_reqseq(chan
, control
->reqseq
);
5929 if (!skb_queue_empty(&chan
->tx_q
))
5930 chan
->tx_send_head
= skb_peek(&chan
->tx_q
);
5932 chan
->tx_send_head
= NULL
;
5934 /* Rewind next_tx_seq to the point expected
5937 chan
->next_tx_seq
= control
->reqseq
;
5938 chan
->unacked_frames
= 0;
5941 chan
->conn
->mtu
= chan
->hs_hcon
->hdev
->block_mtu
;
5943 chan
->conn
->mtu
= chan
->conn
->hcon
->hdev
->acl_mtu
;
5945 err
= l2cap_resegment(chan
);
5948 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
5953 static bool __valid_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
5955 /* Make sure reqseq is for a packet that has been sent but not acked */
5958 unacked
= __seq_offset(chan
, chan
->next_tx_seq
, chan
->expected_ack_seq
);
5959 return __seq_offset(chan
, chan
->next_tx_seq
, reqseq
) <= unacked
;
5962 static int l2cap_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
5963 struct sk_buff
*skb
, u8 event
)
5967 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan
,
5968 control
, skb
, event
, chan
->rx_state
);
5970 if (__valid_reqseq(chan
, control
->reqseq
)) {
5971 switch (chan
->rx_state
) {
5972 case L2CAP_RX_STATE_RECV
:
5973 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
5975 case L2CAP_RX_STATE_SREJ_SENT
:
5976 err
= l2cap_rx_state_srej_sent(chan
, control
, skb
,
5979 case L2CAP_RX_STATE_WAIT_P
:
5980 err
= l2cap_rx_state_wait_p(chan
, control
, skb
, event
);
5982 case L2CAP_RX_STATE_WAIT_F
:
5983 err
= l2cap_rx_state_wait_f(chan
, control
, skb
, event
);
5990 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5991 control
->reqseq
, chan
->next_tx_seq
,
5992 chan
->expected_ack_seq
);
5993 l2cap_send_disconn_req(chan
, ECONNRESET
);
5999 static int l2cap_stream_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
6000 struct sk_buff
*skb
)
6004 BT_DBG("chan %p, control %p, skb %p, state %d", chan
, control
, skb
,
6007 if (l2cap_classify_txseq(chan
, control
->txseq
) ==
6008 L2CAP_TXSEQ_EXPECTED
) {
6009 l2cap_pass_to_tx(chan
, control
);
6011 BT_DBG("buffer_seq %d->%d", chan
->buffer_seq
,
6012 __next_seq(chan
, chan
->buffer_seq
));
6014 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
6016 l2cap_reassemble_sdu(chan
, skb
, control
);
6019 kfree_skb(chan
->sdu
);
6022 chan
->sdu_last_frag
= NULL
;
6026 BT_DBG("Freeing %p", skb
);
6031 chan
->last_acked_seq
= control
->txseq
;
6032 chan
->expected_tx_seq
= __next_seq(chan
, control
->txseq
);
6037 static int l2cap_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
6039 struct l2cap_ctrl
*control
= &bt_cb(skb
)->control
;
6043 __unpack_control(chan
, skb
);
6048 * We can just drop the corrupted I-frame here.
6049 * Receiver will miss it and start proper recovery
6050 * procedures and ask for retransmission.
6052 if (l2cap_check_fcs(chan
, skb
))
6055 if (!control
->sframe
&& control
->sar
== L2CAP_SAR_START
)
6056 len
-= L2CAP_SDULEN_SIZE
;
6058 if (chan
->fcs
== L2CAP_FCS_CRC16
)
6059 len
-= L2CAP_FCS_SIZE
;
6061 if (len
> chan
->mps
) {
6062 l2cap_send_disconn_req(chan
, ECONNRESET
);
6066 if (!control
->sframe
) {
6069 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6070 control
->sar
, control
->reqseq
, control
->final
,
6073 /* Validate F-bit - F=0 always valid, F=1 only
6074 * valid in TX WAIT_F
6076 if (control
->final
&& chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
)
6079 if (chan
->mode
!= L2CAP_MODE_STREAMING
) {
6080 event
= L2CAP_EV_RECV_IFRAME
;
6081 err
= l2cap_rx(chan
, control
, skb
, event
);
6083 err
= l2cap_stream_rx(chan
, control
, skb
);
6087 l2cap_send_disconn_req(chan
, ECONNRESET
);
6089 const u8 rx_func_to_event
[4] = {
6090 L2CAP_EV_RECV_RR
, L2CAP_EV_RECV_REJ
,
6091 L2CAP_EV_RECV_RNR
, L2CAP_EV_RECV_SREJ
6094 /* Only I-frames are expected in streaming mode */
6095 if (chan
->mode
== L2CAP_MODE_STREAMING
)
6098 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6099 control
->reqseq
, control
->final
, control
->poll
,
6103 BT_ERR("Trailing bytes: %d in sframe", len
);
6104 l2cap_send_disconn_req(chan
, ECONNRESET
);
6108 /* Validate F and P bits */
6109 if (control
->final
&& (control
->poll
||
6110 chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
))
6113 event
= rx_func_to_event
[control
->super
];
6114 if (l2cap_rx(chan
, control
, skb
, event
))
6115 l2cap_send_disconn_req(chan
, ECONNRESET
);
6125 static void l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
,
6126 struct sk_buff
*skb
)
6128 struct l2cap_chan
*chan
;
6130 chan
= l2cap_get_chan_by_scid(conn
, cid
);
6132 if (cid
== L2CAP_CID_A2MP
) {
6133 chan
= a2mp_channel_create(conn
, skb
);
6139 l2cap_chan_lock(chan
);
6141 BT_DBG("unknown cid 0x%4.4x", cid
);
6142 /* Drop packet and return */
6148 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6150 if (chan
->state
!= BT_CONNECTED
)
6153 switch (chan
->mode
) {
6154 case L2CAP_MODE_BASIC
:
6155 /* If socket recv buffers overflows we drop data here
6156 * which is *bad* because L2CAP has to be reliable.
6157 * But we don't have any other choice. L2CAP doesn't
6158 * provide flow control mechanism. */
6160 if (chan
->imtu
< skb
->len
)
6163 if (!chan
->ops
->recv(chan
, skb
))
6167 case L2CAP_MODE_ERTM
:
6168 case L2CAP_MODE_STREAMING
:
6169 l2cap_data_rcv(chan
, skb
);
6173 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
6181 l2cap_chan_unlock(chan
);
6184 static void l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
,
6185 struct sk_buff
*skb
)
6187 struct l2cap_chan
*chan
;
6189 chan
= l2cap_global_chan_by_psm(0, psm
, conn
->src
, conn
->dst
);
6193 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6195 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
6198 if (chan
->imtu
< skb
->len
)
6201 if (!chan
->ops
->recv(chan
, skb
))
6208 static void l2cap_att_channel(struct l2cap_conn
*conn
, u16 cid
,
6209 struct sk_buff
*skb
)
6211 struct l2cap_chan
*chan
;
6213 chan
= l2cap_global_chan_by_scid(0, cid
, conn
->src
, conn
->dst
);
6217 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6219 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
6222 if (chan
->imtu
< skb
->len
)
6225 if (!chan
->ops
->recv(chan
, skb
))
6232 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
6234 struct l2cap_hdr
*lh
= (void *) skb
->data
;
6238 skb_pull(skb
, L2CAP_HDR_SIZE
);
6239 cid
= __le16_to_cpu(lh
->cid
);
6240 len
= __le16_to_cpu(lh
->len
);
6242 if (len
!= skb
->len
) {
6247 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
6250 case L2CAP_CID_LE_SIGNALING
:
6251 case L2CAP_CID_SIGNALING
:
6252 l2cap_sig_channel(conn
, skb
);
6255 case L2CAP_CID_CONN_LESS
:
6256 psm
= get_unaligned((__le16
*) skb
->data
);
6257 skb_pull(skb
, L2CAP_PSMLEN_SIZE
);
6258 l2cap_conless_channel(conn
, psm
, skb
);
6261 case L2CAP_CID_LE_DATA
:
6262 l2cap_att_channel(conn
, cid
, skb
);
6266 if (smp_sig_channel(conn
, skb
))
6267 l2cap_conn_del(conn
->hcon
, EACCES
);
6271 l2cap_data_channel(conn
, cid
, skb
);
6276 /* ---- L2CAP interface with lower layer (HCI) ---- */
6278 int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
6280 int exact
= 0, lm1
= 0, lm2
= 0;
6281 struct l2cap_chan
*c
;
6283 BT_DBG("hdev %s, bdaddr %pMR", hdev
->name
, bdaddr
);
6285 /* Find listening sockets and check their link_mode */
6286 read_lock(&chan_list_lock
);
6287 list_for_each_entry(c
, &chan_list
, global_l
) {
6288 struct sock
*sk
= c
->sk
;
6290 if (c
->state
!= BT_LISTEN
)
6293 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
6294 lm1
|= HCI_LM_ACCEPT
;
6295 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
6296 lm1
|= HCI_LM_MASTER
;
6298 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
6299 lm2
|= HCI_LM_ACCEPT
;
6300 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
6301 lm2
|= HCI_LM_MASTER
;
6304 read_unlock(&chan_list_lock
);
6306 return exact
? lm1
: lm2
;
6309 void l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
6311 struct l2cap_conn
*conn
;
6313 BT_DBG("hcon %p bdaddr %pMR status %d", hcon
, &hcon
->dst
, status
);
6316 conn
= l2cap_conn_add(hcon
, status
);
6318 l2cap_conn_ready(conn
);
6320 l2cap_conn_del(hcon
, bt_to_errno(status
));
6324 int l2cap_disconn_ind(struct hci_conn
*hcon
)
6326 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
6328 BT_DBG("hcon %p", hcon
);
6331 return HCI_ERROR_REMOTE_USER_TERM
;
6332 return conn
->disc_reason
;
6335 void l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
6337 BT_DBG("hcon %p reason %d", hcon
, reason
);
6339 l2cap_conn_del(hcon
, bt_to_errno(reason
));
6342 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
6344 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
6347 if (encrypt
== 0x00) {
6348 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
6349 __set_chan_timer(chan
, L2CAP_ENC_TIMEOUT
);
6350 } else if (chan
->sec_level
== BT_SECURITY_HIGH
)
6351 l2cap_chan_close(chan
, ECONNREFUSED
);
6353 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
6354 __clear_chan_timer(chan
);
6358 int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
6360 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
6361 struct l2cap_chan
*chan
;
6366 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn
, status
, encrypt
);
6368 if (hcon
->type
== LE_LINK
) {
6369 if (!status
&& encrypt
)
6370 smp_distribute_keys(conn
, 0);
6371 cancel_delayed_work(&conn
->security_timer
);
6374 mutex_lock(&conn
->chan_lock
);
6376 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
6377 l2cap_chan_lock(chan
);
6379 BT_DBG("chan %p scid 0x%4.4x state %s", chan
, chan
->scid
,
6380 state_to_string(chan
->state
));
6382 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
6383 l2cap_chan_unlock(chan
);
6387 if (chan
->scid
== L2CAP_CID_LE_DATA
) {
6388 if (!status
&& encrypt
) {
6389 chan
->sec_level
= hcon
->sec_level
;
6390 l2cap_chan_ready(chan
);
6393 l2cap_chan_unlock(chan
);
6397 if (!__l2cap_no_conn_pending(chan
)) {
6398 l2cap_chan_unlock(chan
);
6402 if (!status
&& (chan
->state
== BT_CONNECTED
||
6403 chan
->state
== BT_CONFIG
)) {
6404 struct sock
*sk
= chan
->sk
;
6406 clear_bit(BT_SK_SUSPEND
, &bt_sk(sk
)->flags
);
6407 sk
->sk_state_change(sk
);
6409 l2cap_check_encryption(chan
, encrypt
);
6410 l2cap_chan_unlock(chan
);
6414 if (chan
->state
== BT_CONNECT
) {
6416 l2cap_start_connection(chan
);
6418 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
6420 } else if (chan
->state
== BT_CONNECT2
) {
6421 struct sock
*sk
= chan
->sk
;
6422 struct l2cap_conn_rsp rsp
;
6428 if (test_bit(BT_SK_DEFER_SETUP
,
6429 &bt_sk(sk
)->flags
)) {
6430 res
= L2CAP_CR_PEND
;
6431 stat
= L2CAP_CS_AUTHOR_PEND
;
6432 chan
->ops
->defer(chan
);
6434 __l2cap_state_change(chan
, BT_CONFIG
);
6435 res
= L2CAP_CR_SUCCESS
;
6436 stat
= L2CAP_CS_NO_INFO
;
6439 __l2cap_state_change(chan
, BT_DISCONN
);
6440 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
6441 res
= L2CAP_CR_SEC_BLOCK
;
6442 stat
= L2CAP_CS_NO_INFO
;
6447 rsp
.scid
= cpu_to_le16(chan
->dcid
);
6448 rsp
.dcid
= cpu_to_le16(chan
->scid
);
6449 rsp
.result
= cpu_to_le16(res
);
6450 rsp
.status
= cpu_to_le16(stat
);
6451 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
6454 if (!test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
6455 res
== L2CAP_CR_SUCCESS
) {
6457 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
6458 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
6460 l2cap_build_conf_req(chan
, buf
),
6462 chan
->num_conf_req
++;
6466 l2cap_chan_unlock(chan
);
6469 mutex_unlock(&conn
->chan_lock
);
6474 int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
6476 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
6477 struct l2cap_hdr
*hdr
;
6480 /* For AMP controller do not create l2cap conn */
6481 if (!conn
&& hcon
->hdev
->dev_type
!= HCI_BREDR
)
6485 conn
= l2cap_conn_add(hcon
, 0);
6490 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
6494 case ACL_START_NO_FLUSH
:
6497 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
6498 kfree_skb(conn
->rx_skb
);
6499 conn
->rx_skb
= NULL
;
6501 l2cap_conn_unreliable(conn
, ECOMM
);
6504 /* Start fragment always begin with Basic L2CAP header */
6505 if (skb
->len
< L2CAP_HDR_SIZE
) {
6506 BT_ERR("Frame is too short (len %d)", skb
->len
);
6507 l2cap_conn_unreliable(conn
, ECOMM
);
6511 hdr
= (struct l2cap_hdr
*) skb
->data
;
6512 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
6514 if (len
== skb
->len
) {
6515 /* Complete frame received */
6516 l2cap_recv_frame(conn
, skb
);
6520 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
6522 if (skb
->len
> len
) {
6523 BT_ERR("Frame is too long (len %d, expected len %d)",
6525 l2cap_conn_unreliable(conn
, ECOMM
);
6529 /* Allocate skb for the complete frame (with header) */
6530 conn
->rx_skb
= bt_skb_alloc(len
, GFP_KERNEL
);
6534 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
6536 conn
->rx_len
= len
- skb
->len
;
6540 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
6542 if (!conn
->rx_len
) {
6543 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
6544 l2cap_conn_unreliable(conn
, ECOMM
);
6548 if (skb
->len
> conn
->rx_len
) {
6549 BT_ERR("Fragment is too long (len %d, expected %d)",
6550 skb
->len
, conn
->rx_len
);
6551 kfree_skb(conn
->rx_skb
);
6552 conn
->rx_skb
= NULL
;
6554 l2cap_conn_unreliable(conn
, ECOMM
);
6558 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
6560 conn
->rx_len
-= skb
->len
;
6562 if (!conn
->rx_len
) {
6563 /* Complete frame received */
6564 l2cap_recv_frame(conn
, conn
->rx_skb
);
6565 conn
->rx_skb
= NULL
;
6575 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
6577 struct l2cap_chan
*c
;
6579 read_lock(&chan_list_lock
);
6581 list_for_each_entry(c
, &chan_list
, global_l
) {
6582 struct sock
*sk
= c
->sk
;
6584 seq_printf(f
, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6585 &bt_sk(sk
)->src
, &bt_sk(sk
)->dst
,
6586 c
->state
, __le16_to_cpu(c
->psm
),
6587 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
6588 c
->sec_level
, c
->mode
);
6591 read_unlock(&chan_list_lock
);
6596 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
6598 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
6601 static const struct file_operations l2cap_debugfs_fops
= {
6602 .open
= l2cap_debugfs_open
,
6604 .llseek
= seq_lseek
,
6605 .release
= single_release
,
6608 static struct dentry
*l2cap_debugfs
;
6610 int __init
l2cap_init(void)
6614 err
= l2cap_init_sockets();
6619 l2cap_debugfs
= debugfs_create_file("l2cap", 0444, bt_debugfs
,
6620 NULL
, &l2cap_debugfs_fops
);
6622 BT_ERR("Failed to create L2CAP debug file");
6628 void l2cap_exit(void)
6630 debugfs_remove(l2cap_debugfs
);
6631 l2cap_cleanup_sockets();
6634 module_param(disable_ertm
, bool, 0644);
6635 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");