2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
60 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
61 static u8 l2cap_fixed_chan
[8] = { 0x02, };
63 static struct workqueue_struct
*_busy_wq
;
65 struct bt_sock_list l2cap_sk_list
= {
66 .lock
= __RW_LOCK_UNLOCKED(l2cap_sk_list
.lock
)
69 static void l2cap_busy_work(struct work_struct
*work
);
71 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
72 u8 code
, u8 ident
, u16 dlen
, void *data
);
74 static int l2cap_ertm_data_rcv(struct sock
*sk
, struct sk_buff
*skb
);
76 /* ---- L2CAP channels ---- */
77 static struct sock
*__l2cap_get_chan_by_dcid(struct l2cap_chan_list
*l
, u16 cid
)
80 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
81 if (l2cap_pi(s
)->dcid
== cid
)
87 static struct sock
*__l2cap_get_chan_by_scid(struct l2cap_chan_list
*l
, u16 cid
)
90 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
91 if (l2cap_pi(s
)->scid
== cid
)
97 /* Find channel with given SCID.
98 * Returns locked socket */
99 static inline struct sock
*l2cap_get_chan_by_scid(struct l2cap_chan_list
*l
, u16 cid
)
103 s
= __l2cap_get_chan_by_scid(l
, cid
);
106 read_unlock(&l
->lock
);
110 static struct sock
*__l2cap_get_chan_by_ident(struct l2cap_chan_list
*l
, u8 ident
)
113 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
114 if (l2cap_pi(s
)->ident
== ident
)
120 static inline struct sock
*l2cap_get_chan_by_ident(struct l2cap_chan_list
*l
, u8 ident
)
124 s
= __l2cap_get_chan_by_ident(l
, ident
);
127 read_unlock(&l
->lock
);
131 static u16
l2cap_alloc_cid(struct l2cap_chan_list
*l
)
133 u16 cid
= L2CAP_CID_DYN_START
;
135 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
136 if (!__l2cap_get_chan_by_scid(l
, cid
))
143 static inline void __l2cap_chan_link(struct l2cap_chan_list
*l
, struct sock
*sk
)
148 l2cap_pi(l
->head
)->prev_c
= sk
;
150 l2cap_pi(sk
)->next_c
= l
->head
;
151 l2cap_pi(sk
)->prev_c
= NULL
;
155 static inline void l2cap_chan_unlink(struct l2cap_chan_list
*l
, struct sock
*sk
)
157 struct sock
*next
= l2cap_pi(sk
)->next_c
, *prev
= l2cap_pi(sk
)->prev_c
;
159 write_lock_bh(&l
->lock
);
164 l2cap_pi(next
)->prev_c
= prev
;
166 l2cap_pi(prev
)->next_c
= next
;
167 write_unlock_bh(&l
->lock
);
172 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct sock
*sk
)
174 struct l2cap_chan_list
*l
= &conn
->chan_list
;
176 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
177 l2cap_pi(sk
)->psm
, l2cap_pi(sk
)->dcid
);
179 conn
->disc_reason
= 0x13;
181 l2cap_pi(sk
)->conn
= conn
;
183 if (sk
->sk_type
== SOCK_SEQPACKET
|| sk
->sk_type
== SOCK_STREAM
) {
184 if (conn
->hcon
->type
== LE_LINK
) {
186 l2cap_pi(sk
)->omtu
= L2CAP_LE_DEFAULT_MTU
;
187 l2cap_pi(sk
)->scid
= L2CAP_CID_LE_DATA
;
188 l2cap_pi(sk
)->dcid
= L2CAP_CID_LE_DATA
;
190 /* Alloc CID for connection-oriented socket */
191 l2cap_pi(sk
)->scid
= l2cap_alloc_cid(l
);
192 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
194 } else if (sk
->sk_type
== SOCK_DGRAM
) {
195 /* Connectionless socket */
196 l2cap_pi(sk
)->scid
= L2CAP_CID_CONN_LESS
;
197 l2cap_pi(sk
)->dcid
= L2CAP_CID_CONN_LESS
;
198 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
200 /* Raw socket can send/recv signalling messages only */
201 l2cap_pi(sk
)->scid
= L2CAP_CID_SIGNALING
;
202 l2cap_pi(sk
)->dcid
= L2CAP_CID_SIGNALING
;
203 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
206 __l2cap_chan_link(l
, sk
);
210 * Must be called on the locked socket. */
211 void l2cap_chan_del(struct sock
*sk
, int err
)
213 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
214 struct sock
*parent
= bt_sk(sk
)->parent
;
216 l2cap_sock_clear_timer(sk
);
218 BT_DBG("sk %p, conn %p, err %d", sk
, conn
, err
);
221 /* Unlink from channel list */
222 l2cap_chan_unlink(&conn
->chan_list
, sk
);
223 l2cap_pi(sk
)->conn
= NULL
;
224 hci_conn_put(conn
->hcon
);
227 sk
->sk_state
= BT_CLOSED
;
228 sock_set_flag(sk
, SOCK_ZAPPED
);
234 bt_accept_unlink(sk
);
235 parent
->sk_data_ready(parent
, 0);
237 sk
->sk_state_change(sk
);
239 skb_queue_purge(TX_QUEUE(sk
));
241 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
) {
242 struct srej_list
*l
, *tmp
;
244 del_timer(&l2cap_pi(sk
)->retrans_timer
);
245 del_timer(&l2cap_pi(sk
)->monitor_timer
);
246 del_timer(&l2cap_pi(sk
)->ack_timer
);
248 skb_queue_purge(SREJ_QUEUE(sk
));
249 skb_queue_purge(BUSY_QUEUE(sk
));
251 list_for_each_entry_safe(l
, tmp
, SREJ_LIST(sk
), list
) {
258 static inline u8
l2cap_get_auth_type(struct sock
*sk
)
260 if (sk
->sk_type
== SOCK_RAW
) {
261 switch (l2cap_pi(sk
)->sec_level
) {
262 case BT_SECURITY_HIGH
:
263 return HCI_AT_DEDICATED_BONDING_MITM
;
264 case BT_SECURITY_MEDIUM
:
265 return HCI_AT_DEDICATED_BONDING
;
267 return HCI_AT_NO_BONDING
;
269 } else if (l2cap_pi(sk
)->psm
== cpu_to_le16(0x0001)) {
270 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_LOW
)
271 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
273 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
274 return HCI_AT_NO_BONDING_MITM
;
276 return HCI_AT_NO_BONDING
;
278 switch (l2cap_pi(sk
)->sec_level
) {
279 case BT_SECURITY_HIGH
:
280 return HCI_AT_GENERAL_BONDING_MITM
;
281 case BT_SECURITY_MEDIUM
:
282 return HCI_AT_GENERAL_BONDING
;
284 return HCI_AT_NO_BONDING
;
289 /* Service level security */
290 static inline int l2cap_check_security(struct sock
*sk
)
292 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
295 auth_type
= l2cap_get_auth_type(sk
);
297 return hci_conn_security(conn
->hcon
, l2cap_pi(sk
)->sec_level
,
301 u8
l2cap_get_ident(struct l2cap_conn
*conn
)
305 /* Get next available identificator.
306 * 1 - 128 are used by kernel.
307 * 129 - 199 are reserved.
308 * 200 - 254 are used by utilities like l2ping, etc.
311 spin_lock_bh(&conn
->lock
);
313 if (++conn
->tx_ident
> 128)
318 spin_unlock_bh(&conn
->lock
);
323 void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
325 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
328 BT_DBG("code 0x%2.2x", code
);
333 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
334 flags
= ACL_START_NO_FLUSH
;
338 hci_send_acl(conn
->hcon
, skb
, flags
);
341 static inline void l2cap_send_sframe(struct l2cap_pinfo
*pi
, u16 control
)
344 struct l2cap_hdr
*lh
;
345 struct l2cap_conn
*conn
= pi
->conn
;
346 struct sock
*sk
= (struct sock
*)pi
;
347 int count
, hlen
= L2CAP_HDR_SIZE
+ 2;
350 if (sk
->sk_state
!= BT_CONNECTED
)
353 if (pi
->fcs
== L2CAP_FCS_CRC16
)
356 BT_DBG("pi %p, control 0x%2.2x", pi
, control
);
358 count
= min_t(unsigned int, conn
->mtu
, hlen
);
359 control
|= L2CAP_CTRL_FRAME_TYPE
;
361 if (pi
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
362 control
|= L2CAP_CTRL_FINAL
;
363 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
366 if (pi
->conn_state
& L2CAP_CONN_SEND_PBIT
) {
367 control
|= L2CAP_CTRL_POLL
;
368 pi
->conn_state
&= ~L2CAP_CONN_SEND_PBIT
;
371 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
375 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
376 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
377 lh
->cid
= cpu_to_le16(pi
->dcid
);
378 put_unaligned_le16(control
, skb_put(skb
, 2));
380 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
381 u16 fcs
= crc16(0, (u8
*)lh
, count
- 2);
382 put_unaligned_le16(fcs
, skb_put(skb
, 2));
385 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
386 flags
= ACL_START_NO_FLUSH
;
390 hci_send_acl(pi
->conn
->hcon
, skb
, flags
);
393 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo
*pi
, u16 control
)
395 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
396 control
|= L2CAP_SUPER_RCV_NOT_READY
;
397 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
399 control
|= L2CAP_SUPER_RCV_READY
;
401 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
403 l2cap_send_sframe(pi
, control
);
406 static inline int __l2cap_no_conn_pending(struct sock
*sk
)
408 return !(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_CONNECT_PEND
);
411 static void l2cap_do_start(struct sock
*sk
)
413 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
415 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
416 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
419 if (l2cap_check_security(sk
) && __l2cap_no_conn_pending(sk
)) {
420 struct l2cap_conn_req req
;
421 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
422 req
.psm
= l2cap_pi(sk
)->psm
;
424 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
425 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
427 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
428 L2CAP_CONN_REQ
, sizeof(req
), &req
);
431 struct l2cap_info_req req
;
432 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
434 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
435 conn
->info_ident
= l2cap_get_ident(conn
);
437 mod_timer(&conn
->info_timer
, jiffies
+
438 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
440 l2cap_send_cmd(conn
, conn
->info_ident
,
441 L2CAP_INFO_REQ
, sizeof(req
), &req
);
445 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
447 u32 local_feat_mask
= l2cap_feat_mask
;
449 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
452 case L2CAP_MODE_ERTM
:
453 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
454 case L2CAP_MODE_STREAMING
:
455 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
461 void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct sock
*sk
, int err
)
463 struct l2cap_disconn_req req
;
468 skb_queue_purge(TX_QUEUE(sk
));
470 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
) {
471 del_timer(&l2cap_pi(sk
)->retrans_timer
);
472 del_timer(&l2cap_pi(sk
)->monitor_timer
);
473 del_timer(&l2cap_pi(sk
)->ack_timer
);
476 req
.dcid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
477 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
478 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
479 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
481 sk
->sk_state
= BT_DISCONN
;
485 /* ---- L2CAP connections ---- */
486 static void l2cap_conn_start(struct l2cap_conn
*conn
)
488 struct l2cap_chan_list
*l
= &conn
->chan_list
;
489 struct sock_del_list del
, *tmp1
, *tmp2
;
492 BT_DBG("conn %p", conn
);
494 INIT_LIST_HEAD(&del
.list
);
498 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
501 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
502 sk
->sk_type
!= SOCK_STREAM
) {
507 if (sk
->sk_state
== BT_CONNECT
) {
508 struct l2cap_conn_req req
;
510 if (!l2cap_check_security(sk
) ||
511 !__l2cap_no_conn_pending(sk
)) {
516 if (!l2cap_mode_supported(l2cap_pi(sk
)->mode
,
518 && l2cap_pi(sk
)->conf_state
&
519 L2CAP_CONF_STATE2_DEVICE
) {
520 tmp1
= kzalloc(sizeof(struct sock_del_list
),
523 list_add_tail(&tmp1
->list
, &del
.list
);
528 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
529 req
.psm
= l2cap_pi(sk
)->psm
;
531 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
532 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
534 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
535 L2CAP_CONN_REQ
, sizeof(req
), &req
);
537 } else if (sk
->sk_state
== BT_CONNECT2
) {
538 struct l2cap_conn_rsp rsp
;
540 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
541 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
543 if (l2cap_check_security(sk
)) {
544 if (bt_sk(sk
)->defer_setup
) {
545 struct sock
*parent
= bt_sk(sk
)->parent
;
546 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
547 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
548 parent
->sk_data_ready(parent
, 0);
551 sk
->sk_state
= BT_CONFIG
;
552 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
553 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
556 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
557 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
560 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
561 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
563 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
||
564 rsp
.result
!= L2CAP_CR_SUCCESS
) {
569 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
570 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
571 l2cap_build_conf_req(sk
, buf
), buf
);
572 l2cap_pi(sk
)->num_conf_req
++;
578 read_unlock(&l
->lock
);
580 list_for_each_entry_safe(tmp1
, tmp2
, &del
.list
, list
) {
581 bh_lock_sock(tmp1
->sk
);
582 __l2cap_sock_close(tmp1
->sk
, ECONNRESET
);
583 bh_unlock_sock(tmp1
->sk
);
584 list_del(&tmp1
->list
);
589 /* Find socket with cid and source bdaddr.
590 * Returns closest match, locked.
592 static struct sock
*l2cap_get_sock_by_scid(int state
, __le16 cid
, bdaddr_t
*src
)
594 struct sock
*s
, *sk
= NULL
, *sk1
= NULL
;
595 struct hlist_node
*node
;
597 read_lock(&l2cap_sk_list
.lock
);
599 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
600 if (state
&& sk
->sk_state
!= state
)
603 if (l2cap_pi(sk
)->scid
== cid
) {
605 if (!bacmp(&bt_sk(sk
)->src
, src
))
609 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
616 read_unlock(&l2cap_sk_list
.lock
);
621 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
623 struct l2cap_chan_list
*list
= &conn
->chan_list
;
624 struct sock
*parent
, *uninitialized_var(sk
);
628 /* Check if we have socket listening on cid */
629 parent
= l2cap_get_sock_by_scid(BT_LISTEN
, L2CAP_CID_LE_DATA
,
634 /* Check for backlog size */
635 if (sk_acceptq_is_full(parent
)) {
636 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
640 sk
= l2cap_sock_alloc(sock_net(parent
), NULL
, BTPROTO_L2CAP
, GFP_ATOMIC
);
644 write_lock_bh(&list
->lock
);
646 hci_conn_hold(conn
->hcon
);
648 l2cap_sock_init(sk
, parent
);
649 bacpy(&bt_sk(sk
)->src
, conn
->src
);
650 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
652 bt_accept_enqueue(parent
, sk
);
654 __l2cap_chan_add(conn
, sk
);
656 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
658 sk
->sk_state
= BT_CONNECTED
;
659 parent
->sk_data_ready(parent
, 0);
661 write_unlock_bh(&list
->lock
);
664 bh_unlock_sock(parent
);
667 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
669 struct l2cap_chan_list
*l
= &conn
->chan_list
;
672 BT_DBG("conn %p", conn
);
674 if (!conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
675 l2cap_le_conn_ready(conn
);
679 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
682 if (conn
->hcon
->type
== LE_LINK
) {
683 l2cap_sock_clear_timer(sk
);
684 sk
->sk_state
= BT_CONNECTED
;
685 sk
->sk_state_change(sk
);
688 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
689 sk
->sk_type
!= SOCK_STREAM
) {
690 l2cap_sock_clear_timer(sk
);
691 sk
->sk_state
= BT_CONNECTED
;
692 sk
->sk_state_change(sk
);
693 } else if (sk
->sk_state
== BT_CONNECT
)
699 read_unlock(&l
->lock
);
702 /* Notify sockets that we cannot guaranty reliability anymore */
703 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
705 struct l2cap_chan_list
*l
= &conn
->chan_list
;
708 BT_DBG("conn %p", conn
);
712 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
713 if (l2cap_pi(sk
)->force_reliable
)
717 read_unlock(&l
->lock
);
720 static void l2cap_info_timeout(unsigned long arg
)
722 struct l2cap_conn
*conn
= (void *) arg
;
724 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
725 conn
->info_ident
= 0;
727 l2cap_conn_start(conn
);
730 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
732 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
737 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
741 hcon
->l2cap_data
= conn
;
744 BT_DBG("hcon %p conn %p", hcon
, conn
);
746 if (hcon
->hdev
->le_mtu
&& hcon
->type
== LE_LINK
)
747 conn
->mtu
= hcon
->hdev
->le_mtu
;
749 conn
->mtu
= hcon
->hdev
->acl_mtu
;
751 conn
->src
= &hcon
->hdev
->bdaddr
;
752 conn
->dst
= &hcon
->dst
;
756 spin_lock_init(&conn
->lock
);
757 rwlock_init(&conn
->chan_list
.lock
);
759 if (hcon
->type
!= LE_LINK
)
760 setup_timer(&conn
->info_timer
, l2cap_info_timeout
,
761 (unsigned long) conn
);
763 conn
->disc_reason
= 0x13;
768 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
770 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
776 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
778 kfree_skb(conn
->rx_skb
);
781 while ((sk
= conn
->chan_list
.head
)) {
783 l2cap_chan_del(sk
, err
);
788 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
789 del_timer_sync(&conn
->info_timer
);
791 hcon
->l2cap_data
= NULL
;
795 static inline void l2cap_chan_add(struct l2cap_conn
*conn
, struct sock
*sk
)
797 struct l2cap_chan_list
*l
= &conn
->chan_list
;
798 write_lock_bh(&l
->lock
);
799 __l2cap_chan_add(conn
, sk
);
800 write_unlock_bh(&l
->lock
);
803 /* ---- Socket interface ---- */
805 /* Find socket with psm and source bdaddr.
806 * Returns closest match.
808 static struct sock
*l2cap_get_sock_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
810 struct sock
*sk
= NULL
, *sk1
= NULL
;
811 struct hlist_node
*node
;
813 read_lock(&l2cap_sk_list
.lock
);
815 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
816 if (state
&& sk
->sk_state
!= state
)
819 if (l2cap_pi(sk
)->psm
== psm
) {
821 if (!bacmp(&bt_sk(sk
)->src
, src
))
825 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
830 read_unlock(&l2cap_sk_list
.lock
);
832 return node
? sk
: sk1
;
835 int l2cap_do_connect(struct sock
*sk
)
837 bdaddr_t
*src
= &bt_sk(sk
)->src
;
838 bdaddr_t
*dst
= &bt_sk(sk
)->dst
;
839 struct l2cap_conn
*conn
;
840 struct hci_conn
*hcon
;
841 struct hci_dev
*hdev
;
845 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src
), batostr(dst
),
848 hdev
= hci_get_route(dst
, src
);
850 return -EHOSTUNREACH
;
852 hci_dev_lock_bh(hdev
);
854 auth_type
= l2cap_get_auth_type(sk
);
856 if (l2cap_pi(sk
)->dcid
== L2CAP_CID_LE_DATA
)
857 hcon
= hci_connect(hdev
, LE_LINK
, dst
,
858 l2cap_pi(sk
)->sec_level
, auth_type
);
860 hcon
= hci_connect(hdev
, ACL_LINK
, dst
,
861 l2cap_pi(sk
)->sec_level
, auth_type
);
868 conn
= l2cap_conn_add(hcon
, 0);
875 /* Update source addr of the socket */
876 bacpy(src
, conn
->src
);
878 l2cap_chan_add(conn
, sk
);
880 sk
->sk_state
= BT_CONNECT
;
881 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
883 if (hcon
->state
== BT_CONNECTED
) {
884 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
885 sk
->sk_type
!= SOCK_STREAM
) {
886 l2cap_sock_clear_timer(sk
);
887 if (l2cap_check_security(sk
))
888 sk
->sk_state
= BT_CONNECTED
;
896 hci_dev_unlock_bh(hdev
);
901 int __l2cap_wait_ack(struct sock
*sk
)
903 DECLARE_WAITQUEUE(wait
, current
);
907 add_wait_queue(sk_sleep(sk
), &wait
);
908 while ((l2cap_pi(sk
)->unacked_frames
> 0 && l2cap_pi(sk
)->conn
)) {
909 set_current_state(TASK_INTERRUPTIBLE
);
914 if (signal_pending(current
)) {
915 err
= sock_intr_errno(timeo
);
920 timeo
= schedule_timeout(timeo
);
923 err
= sock_error(sk
);
927 set_current_state(TASK_RUNNING
);
928 remove_wait_queue(sk_sleep(sk
), &wait
);
932 static void l2cap_monitor_timeout(unsigned long arg
)
934 struct sock
*sk
= (void *) arg
;
939 if (l2cap_pi(sk
)->retry_count
>= l2cap_pi(sk
)->remote_max_tx
) {
940 l2cap_send_disconn_req(l2cap_pi(sk
)->conn
, sk
, ECONNABORTED
);
945 l2cap_pi(sk
)->retry_count
++;
946 __mod_monitor_timer();
948 l2cap_send_rr_or_rnr(l2cap_pi(sk
), L2CAP_CTRL_POLL
);
952 static void l2cap_retrans_timeout(unsigned long arg
)
954 struct sock
*sk
= (void *) arg
;
959 l2cap_pi(sk
)->retry_count
= 1;
960 __mod_monitor_timer();
962 l2cap_pi(sk
)->conn_state
|= L2CAP_CONN_WAIT_F
;
964 l2cap_send_rr_or_rnr(l2cap_pi(sk
), L2CAP_CTRL_POLL
);
968 static void l2cap_drop_acked_frames(struct sock
*sk
)
972 while ((skb
= skb_peek(TX_QUEUE(sk
))) &&
973 l2cap_pi(sk
)->unacked_frames
) {
974 if (bt_cb(skb
)->tx_seq
== l2cap_pi(sk
)->expected_ack_seq
)
977 skb
= skb_dequeue(TX_QUEUE(sk
));
980 l2cap_pi(sk
)->unacked_frames
--;
983 if (!l2cap_pi(sk
)->unacked_frames
)
984 del_timer(&l2cap_pi(sk
)->retrans_timer
);
987 void l2cap_do_send(struct sock
*sk
, struct sk_buff
*skb
)
989 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
990 struct hci_conn
*hcon
= pi
->conn
->hcon
;
993 BT_DBG("sk %p, skb %p len %d", sk
, skb
, skb
->len
);
995 if (!pi
->flushable
&& lmp_no_flush_capable(hcon
->hdev
))
996 flags
= ACL_START_NO_FLUSH
;
1000 hci_send_acl(hcon
, skb
, flags
);
1003 void l2cap_streaming_send(struct sock
*sk
)
1005 struct sk_buff
*skb
;
1006 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1009 while ((skb
= skb_dequeue(TX_QUEUE(sk
)))) {
1010 control
= get_unaligned_le16(skb
->data
+ L2CAP_HDR_SIZE
);
1011 control
|= pi
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
1012 put_unaligned_le16(control
, skb
->data
+ L2CAP_HDR_SIZE
);
1014 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1015 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
- 2);
1016 put_unaligned_le16(fcs
, skb
->data
+ skb
->len
- 2);
1019 l2cap_do_send(sk
, skb
);
1021 pi
->next_tx_seq
= (pi
->next_tx_seq
+ 1) % 64;
1025 static void l2cap_retransmit_one_frame(struct sock
*sk
, u8 tx_seq
)
1027 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1028 struct sk_buff
*skb
, *tx_skb
;
1031 skb
= skb_peek(TX_QUEUE(sk
));
1036 if (bt_cb(skb
)->tx_seq
== tx_seq
)
1039 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1042 } while ((skb
= skb_queue_next(TX_QUEUE(sk
), skb
)));
1044 if (pi
->remote_max_tx
&&
1045 bt_cb(skb
)->retries
== pi
->remote_max_tx
) {
1046 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNABORTED
);
1050 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1051 bt_cb(skb
)->retries
++;
1052 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1054 if (pi
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
1055 control
|= L2CAP_CTRL_FINAL
;
1056 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
1059 control
|= (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1060 | (tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1062 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1064 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1065 fcs
= crc16(0, (u8
*)tx_skb
->data
, tx_skb
->len
- 2);
1066 put_unaligned_le16(fcs
, tx_skb
->data
+ tx_skb
->len
- 2);
1069 l2cap_do_send(sk
, tx_skb
);
1072 int l2cap_ertm_send(struct sock
*sk
)
1074 struct sk_buff
*skb
, *tx_skb
;
1075 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1079 if (sk
->sk_state
!= BT_CONNECTED
)
1082 while ((skb
= sk
->sk_send_head
) && (!l2cap_tx_window_full(sk
))) {
1084 if (pi
->remote_max_tx
&&
1085 bt_cb(skb
)->retries
== pi
->remote_max_tx
) {
1086 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNABORTED
);
1090 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1092 bt_cb(skb
)->retries
++;
1094 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1095 control
&= L2CAP_CTRL_SAR
;
1097 if (pi
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
1098 control
|= L2CAP_CTRL_FINAL
;
1099 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
1101 control
|= (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1102 | (pi
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1103 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1106 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1107 fcs
= crc16(0, (u8
*)skb
->data
, tx_skb
->len
- 2);
1108 put_unaligned_le16(fcs
, skb
->data
+ tx_skb
->len
- 2);
1111 l2cap_do_send(sk
, tx_skb
);
1113 __mod_retrans_timer();
1115 bt_cb(skb
)->tx_seq
= pi
->next_tx_seq
;
1116 pi
->next_tx_seq
= (pi
->next_tx_seq
+ 1) % 64;
1118 if (bt_cb(skb
)->retries
== 1)
1119 pi
->unacked_frames
++;
1123 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1124 sk
->sk_send_head
= NULL
;
1126 sk
->sk_send_head
= skb_queue_next(TX_QUEUE(sk
), skb
);
1134 static int l2cap_retransmit_frames(struct sock
*sk
)
1136 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1139 if (!skb_queue_empty(TX_QUEUE(sk
)))
1140 sk
->sk_send_head
= TX_QUEUE(sk
)->next
;
1142 pi
->next_tx_seq
= pi
->expected_ack_seq
;
1143 ret
= l2cap_ertm_send(sk
);
1147 static void l2cap_send_ack(struct l2cap_pinfo
*pi
)
1149 struct sock
*sk
= (struct sock
*)pi
;
1152 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1154 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
1155 control
|= L2CAP_SUPER_RCV_NOT_READY
;
1156 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
1157 l2cap_send_sframe(pi
, control
);
1161 if (l2cap_ertm_send(sk
) > 0)
1164 control
|= L2CAP_SUPER_RCV_READY
;
1165 l2cap_send_sframe(pi
, control
);
1168 static void l2cap_send_srejtail(struct sock
*sk
)
1170 struct srej_list
*tail
;
1173 control
= L2CAP_SUPER_SELECT_REJECT
;
1174 control
|= L2CAP_CTRL_FINAL
;
1176 tail
= list_entry(SREJ_LIST(sk
)->prev
, struct srej_list
, list
);
1177 control
|= tail
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1179 l2cap_send_sframe(l2cap_pi(sk
), control
);
1182 static inline int l2cap_skbuff_fromiovec(struct sock
*sk
, struct msghdr
*msg
, int len
, int count
, struct sk_buff
*skb
)
1184 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1185 struct sk_buff
**frag
;
1188 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1194 /* Continuation fragments (no L2CAP header) */
1195 frag
= &skb_shinfo(skb
)->frag_list
;
1197 count
= min_t(unsigned int, conn
->mtu
, len
);
1199 *frag
= bt_skb_send_alloc(sk
, count
, msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1202 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1208 frag
= &(*frag
)->next
;
1214 struct sk_buff
*l2cap_create_connless_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1216 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1217 struct sk_buff
*skb
;
1218 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1219 struct l2cap_hdr
*lh
;
1221 BT_DBG("sk %p len %d", sk
, (int)len
);
1223 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1224 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1225 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1227 return ERR_PTR(err
);
1229 /* Create L2CAP header */
1230 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1231 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1232 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1233 put_unaligned_le16(l2cap_pi(sk
)->psm
, skb_put(skb
, 2));
1235 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1236 if (unlikely(err
< 0)) {
1238 return ERR_PTR(err
);
1243 struct sk_buff
*l2cap_create_basic_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1245 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1246 struct sk_buff
*skb
;
1247 int err
, count
, hlen
= L2CAP_HDR_SIZE
;
1248 struct l2cap_hdr
*lh
;
1250 BT_DBG("sk %p len %d", sk
, (int)len
);
1252 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1253 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1254 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1256 return ERR_PTR(err
);
1258 /* Create L2CAP header */
1259 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1260 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1261 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1263 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1264 if (unlikely(err
< 0)) {
1266 return ERR_PTR(err
);
1271 struct sk_buff
*l2cap_create_iframe_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
, u16 control
, u16 sdulen
)
1273 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1274 struct sk_buff
*skb
;
1275 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1276 struct l2cap_hdr
*lh
;
1278 BT_DBG("sk %p len %d", sk
, (int)len
);
1281 return ERR_PTR(-ENOTCONN
);
1286 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1289 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1290 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1291 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1293 return ERR_PTR(err
);
1295 /* Create L2CAP header */
1296 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1297 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1298 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1299 put_unaligned_le16(control
, skb_put(skb
, 2));
1301 put_unaligned_le16(sdulen
, skb_put(skb
, 2));
1303 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1304 if (unlikely(err
< 0)) {
1306 return ERR_PTR(err
);
1309 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1310 put_unaligned_le16(0, skb_put(skb
, 2));
1312 bt_cb(skb
)->retries
= 0;
1316 int l2cap_sar_segment_sdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1318 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1319 struct sk_buff
*skb
;
1320 struct sk_buff_head sar_queue
;
1324 skb_queue_head_init(&sar_queue
);
1325 control
= L2CAP_SDU_START
;
1326 skb
= l2cap_create_iframe_pdu(sk
, msg
, pi
->remote_mps
, control
, len
);
1328 return PTR_ERR(skb
);
1330 __skb_queue_tail(&sar_queue
, skb
);
1331 len
-= pi
->remote_mps
;
1332 size
+= pi
->remote_mps
;
1337 if (len
> pi
->remote_mps
) {
1338 control
= L2CAP_SDU_CONTINUE
;
1339 buflen
= pi
->remote_mps
;
1341 control
= L2CAP_SDU_END
;
1345 skb
= l2cap_create_iframe_pdu(sk
, msg
, buflen
, control
, 0);
1347 skb_queue_purge(&sar_queue
);
1348 return PTR_ERR(skb
);
1351 __skb_queue_tail(&sar_queue
, skb
);
1355 skb_queue_splice_tail(&sar_queue
, TX_QUEUE(sk
));
1356 if (sk
->sk_send_head
== NULL
)
1357 sk
->sk_send_head
= sar_queue
.next
;
1362 static void l2cap_chan_ready(struct sock
*sk
)
1364 struct sock
*parent
= bt_sk(sk
)->parent
;
1366 BT_DBG("sk %p, parent %p", sk
, parent
);
1368 l2cap_pi(sk
)->conf_state
= 0;
1369 l2cap_sock_clear_timer(sk
);
1372 /* Outgoing channel.
1373 * Wake up socket sleeping on connect.
1375 sk
->sk_state
= BT_CONNECTED
;
1376 sk
->sk_state_change(sk
);
1378 /* Incoming channel.
1379 * Wake up socket sleeping on accept.
1381 parent
->sk_data_ready(parent
, 0);
1385 /* Copy frame to all raw sockets on that connection */
1386 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
1388 struct l2cap_chan_list
*l
= &conn
->chan_list
;
1389 struct sk_buff
*nskb
;
1392 BT_DBG("conn %p", conn
);
1394 read_lock(&l
->lock
);
1395 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
1396 if (sk
->sk_type
!= SOCK_RAW
)
1399 /* Don't send frame to the socket it came from */
1402 nskb
= skb_clone(skb
, GFP_ATOMIC
);
1406 if (sock_queue_rcv_skb(sk
, nskb
))
1409 read_unlock(&l
->lock
);
1412 /* ---- L2CAP signalling commands ---- */
1413 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
1414 u8 code
, u8 ident
, u16 dlen
, void *data
)
1416 struct sk_buff
*skb
, **frag
;
1417 struct l2cap_cmd_hdr
*cmd
;
1418 struct l2cap_hdr
*lh
;
1421 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1422 conn
, code
, ident
, dlen
);
1424 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
1425 count
= min_t(unsigned int, conn
->mtu
, len
);
1427 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
1431 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1432 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
1434 if (conn
->hcon
->type
== LE_LINK
)
1435 lh
->cid
= cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
1437 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
1439 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
1442 cmd
->len
= cpu_to_le16(dlen
);
1445 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
1446 memcpy(skb_put(skb
, count
), data
, count
);
1452 /* Continuation fragments (no L2CAP header) */
1453 frag
= &skb_shinfo(skb
)->frag_list
;
1455 count
= min_t(unsigned int, conn
->mtu
, len
);
1457 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
1461 memcpy(skb_put(*frag
, count
), data
, count
);
1466 frag
= &(*frag
)->next
;
1476 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
1478 struct l2cap_conf_opt
*opt
= *ptr
;
1481 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
1489 *val
= *((u8
*) opt
->val
);
1493 *val
= get_unaligned_le16(opt
->val
);
1497 *val
= get_unaligned_le32(opt
->val
);
1501 *val
= (unsigned long) opt
->val
;
1505 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
1509 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
1511 struct l2cap_conf_opt
*opt
= *ptr
;
1513 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
1520 *((u8
*) opt
->val
) = val
;
1524 put_unaligned_le16(val
, opt
->val
);
1528 put_unaligned_le32(val
, opt
->val
);
1532 memcpy(opt
->val
, (void *) val
, len
);
1536 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
1539 static void l2cap_ack_timeout(unsigned long arg
)
1541 struct sock
*sk
= (void *) arg
;
1544 l2cap_send_ack(l2cap_pi(sk
));
1548 static inline void l2cap_ertm_init(struct sock
*sk
)
1550 l2cap_pi(sk
)->expected_ack_seq
= 0;
1551 l2cap_pi(sk
)->unacked_frames
= 0;
1552 l2cap_pi(sk
)->buffer_seq
= 0;
1553 l2cap_pi(sk
)->num_acked
= 0;
1554 l2cap_pi(sk
)->frames_sent
= 0;
1556 setup_timer(&l2cap_pi(sk
)->retrans_timer
,
1557 l2cap_retrans_timeout
, (unsigned long) sk
);
1558 setup_timer(&l2cap_pi(sk
)->monitor_timer
,
1559 l2cap_monitor_timeout
, (unsigned long) sk
);
1560 setup_timer(&l2cap_pi(sk
)->ack_timer
,
1561 l2cap_ack_timeout
, (unsigned long) sk
);
1563 __skb_queue_head_init(SREJ_QUEUE(sk
));
1564 __skb_queue_head_init(BUSY_QUEUE(sk
));
1566 INIT_WORK(&l2cap_pi(sk
)->busy_work
, l2cap_busy_work
);
1568 sk
->sk_backlog_rcv
= l2cap_ertm_data_rcv
;
1571 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
1574 case L2CAP_MODE_STREAMING
:
1575 case L2CAP_MODE_ERTM
:
1576 if (l2cap_mode_supported(mode
, remote_feat_mask
))
1580 return L2CAP_MODE_BASIC
;
1584 int l2cap_build_conf_req(struct sock
*sk
, void *data
)
1586 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1587 struct l2cap_conf_req
*req
= data
;
1588 struct l2cap_conf_rfc rfc
= { .mode
= pi
->mode
};
1589 void *ptr
= req
->data
;
1591 BT_DBG("sk %p", sk
);
1593 if (pi
->num_conf_req
|| pi
->num_conf_rsp
)
1597 case L2CAP_MODE_STREAMING
:
1598 case L2CAP_MODE_ERTM
:
1599 if (pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
)
1604 pi
->mode
= l2cap_select_mode(rfc
.mode
, pi
->conn
->feat_mask
);
1609 if (pi
->imtu
!= L2CAP_DEFAULT_MTU
)
1610 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->imtu
);
1613 case L2CAP_MODE_BASIC
:
1614 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
1615 !(pi
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
1618 rfc
.mode
= L2CAP_MODE_BASIC
;
1620 rfc
.max_transmit
= 0;
1621 rfc
.retrans_timeout
= 0;
1622 rfc
.monitor_timeout
= 0;
1623 rfc
.max_pdu_size
= 0;
1625 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
1626 (unsigned long) &rfc
);
1629 case L2CAP_MODE_ERTM
:
1630 rfc
.mode
= L2CAP_MODE_ERTM
;
1631 rfc
.txwin_size
= pi
->tx_win
;
1632 rfc
.max_transmit
= pi
->max_tx
;
1633 rfc
.retrans_timeout
= 0;
1634 rfc
.monitor_timeout
= 0;
1635 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
1636 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> pi
->conn
->mtu
- 10)
1637 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
1639 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
1640 (unsigned long) &rfc
);
1642 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
1645 if (pi
->fcs
== L2CAP_FCS_NONE
||
1646 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
1647 pi
->fcs
= L2CAP_FCS_NONE
;
1648 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
1652 case L2CAP_MODE_STREAMING
:
1653 rfc
.mode
= L2CAP_MODE_STREAMING
;
1655 rfc
.max_transmit
= 0;
1656 rfc
.retrans_timeout
= 0;
1657 rfc
.monitor_timeout
= 0;
1658 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
1659 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> pi
->conn
->mtu
- 10)
1660 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
1662 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
1663 (unsigned long) &rfc
);
1665 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
1668 if (pi
->fcs
== L2CAP_FCS_NONE
||
1669 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
1670 pi
->fcs
= L2CAP_FCS_NONE
;
1671 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
1676 req
->dcid
= cpu_to_le16(pi
->dcid
);
1677 req
->flags
= cpu_to_le16(0);
1682 static int l2cap_parse_conf_req(struct sock
*sk
, void *data
)
1684 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1685 struct l2cap_conf_rsp
*rsp
= data
;
1686 void *ptr
= rsp
->data
;
1687 void *req
= pi
->conf_req
;
1688 int len
= pi
->conf_len
;
1689 int type
, hint
, olen
;
1691 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
1692 u16 mtu
= L2CAP_DEFAULT_MTU
;
1693 u16 result
= L2CAP_CONF_SUCCESS
;
1695 BT_DBG("sk %p", sk
);
1697 while (len
>= L2CAP_CONF_OPT_SIZE
) {
1698 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
1700 hint
= type
& L2CAP_CONF_HINT
;
1701 type
&= L2CAP_CONF_MASK
;
1704 case L2CAP_CONF_MTU
:
1708 case L2CAP_CONF_FLUSH_TO
:
1712 case L2CAP_CONF_QOS
:
1715 case L2CAP_CONF_RFC
:
1716 if (olen
== sizeof(rfc
))
1717 memcpy(&rfc
, (void *) val
, olen
);
1720 case L2CAP_CONF_FCS
:
1721 if (val
== L2CAP_FCS_NONE
)
1722 pi
->conf_state
|= L2CAP_CONF_NO_FCS_RECV
;
1730 result
= L2CAP_CONF_UNKNOWN
;
1731 *((u8
*) ptr
++) = type
;
1736 if (pi
->num_conf_rsp
|| pi
->num_conf_req
> 1)
1740 case L2CAP_MODE_STREAMING
:
1741 case L2CAP_MODE_ERTM
:
1742 if (!(pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
)) {
1743 pi
->mode
= l2cap_select_mode(rfc
.mode
,
1744 pi
->conn
->feat_mask
);
1748 if (pi
->mode
!= rfc
.mode
)
1749 return -ECONNREFUSED
;
1755 if (pi
->mode
!= rfc
.mode
) {
1756 result
= L2CAP_CONF_UNACCEPT
;
1757 rfc
.mode
= pi
->mode
;
1759 if (pi
->num_conf_rsp
== 1)
1760 return -ECONNREFUSED
;
1762 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
1763 sizeof(rfc
), (unsigned long) &rfc
);
1767 if (result
== L2CAP_CONF_SUCCESS
) {
1768 /* Configure output options and let the other side know
1769 * which ones we don't like. */
1771 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
1772 result
= L2CAP_CONF_UNACCEPT
;
1775 pi
->conf_state
|= L2CAP_CONF_MTU_DONE
;
1777 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->omtu
);
1780 case L2CAP_MODE_BASIC
:
1781 pi
->fcs
= L2CAP_FCS_NONE
;
1782 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
1785 case L2CAP_MODE_ERTM
:
1786 pi
->remote_tx_win
= rfc
.txwin_size
;
1787 pi
->remote_max_tx
= rfc
.max_transmit
;
1789 if (le16_to_cpu(rfc
.max_pdu_size
) > pi
->conn
->mtu
- 10)
1790 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
1792 pi
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
1794 rfc
.retrans_timeout
=
1795 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO
);
1796 rfc
.monitor_timeout
=
1797 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO
);
1799 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
1801 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
1802 sizeof(rfc
), (unsigned long) &rfc
);
1806 case L2CAP_MODE_STREAMING
:
1807 if (le16_to_cpu(rfc
.max_pdu_size
) > pi
->conn
->mtu
- 10)
1808 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
1810 pi
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
1812 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
1814 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
1815 sizeof(rfc
), (unsigned long) &rfc
);
1820 result
= L2CAP_CONF_UNACCEPT
;
1822 memset(&rfc
, 0, sizeof(rfc
));
1823 rfc
.mode
= pi
->mode
;
1826 if (result
== L2CAP_CONF_SUCCESS
)
1827 pi
->conf_state
|= L2CAP_CONF_OUTPUT_DONE
;
1829 rsp
->scid
= cpu_to_le16(pi
->dcid
);
1830 rsp
->result
= cpu_to_le16(result
);
1831 rsp
->flags
= cpu_to_le16(0x0000);
1836 static int l2cap_parse_conf_rsp(struct sock
*sk
, void *rsp
, int len
, void *data
, u16
*result
)
1838 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1839 struct l2cap_conf_req
*req
= data
;
1840 void *ptr
= req
->data
;
1843 struct l2cap_conf_rfc rfc
;
1845 BT_DBG("sk %p, rsp %p, len %d, req %p", sk
, rsp
, len
, data
);
1847 while (len
>= L2CAP_CONF_OPT_SIZE
) {
1848 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
1851 case L2CAP_CONF_MTU
:
1852 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
1853 *result
= L2CAP_CONF_UNACCEPT
;
1854 pi
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
1857 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->imtu
);
1860 case L2CAP_CONF_FLUSH_TO
:
1862 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
1866 case L2CAP_CONF_RFC
:
1867 if (olen
== sizeof(rfc
))
1868 memcpy(&rfc
, (void *)val
, olen
);
1870 if ((pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
) &&
1871 rfc
.mode
!= pi
->mode
)
1872 return -ECONNREFUSED
;
1876 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
1877 sizeof(rfc
), (unsigned long) &rfc
);
1882 if (pi
->mode
== L2CAP_MODE_BASIC
&& pi
->mode
!= rfc
.mode
)
1883 return -ECONNREFUSED
;
1885 pi
->mode
= rfc
.mode
;
1887 if (*result
== L2CAP_CONF_SUCCESS
) {
1889 case L2CAP_MODE_ERTM
:
1890 pi
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
1891 pi
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
1892 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
1894 case L2CAP_MODE_STREAMING
:
1895 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
1899 req
->dcid
= cpu_to_le16(pi
->dcid
);
1900 req
->flags
= cpu_to_le16(0x0000);
1905 static int l2cap_build_conf_rsp(struct sock
*sk
, void *data
, u16 result
, u16 flags
)
1907 struct l2cap_conf_rsp
*rsp
= data
;
1908 void *ptr
= rsp
->data
;
1910 BT_DBG("sk %p", sk
);
1912 rsp
->scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1913 rsp
->result
= cpu_to_le16(result
);
1914 rsp
->flags
= cpu_to_le16(flags
);
1919 static void l2cap_conf_rfc_get(struct sock
*sk
, void *rsp
, int len
)
1921 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1924 struct l2cap_conf_rfc rfc
;
1926 BT_DBG("sk %p, rsp %p, len %d", sk
, rsp
, len
);
1928 if ((pi
->mode
!= L2CAP_MODE_ERTM
) && (pi
->mode
!= L2CAP_MODE_STREAMING
))
1931 while (len
>= L2CAP_CONF_OPT_SIZE
) {
1932 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
1935 case L2CAP_CONF_RFC
:
1936 if (olen
== sizeof(rfc
))
1937 memcpy(&rfc
, (void *)val
, olen
);
1944 case L2CAP_MODE_ERTM
:
1945 pi
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
1946 pi
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
1947 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
1949 case L2CAP_MODE_STREAMING
:
1950 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
1954 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
1956 struct l2cap_cmd_rej
*rej
= (struct l2cap_cmd_rej
*) data
;
1958 if (rej
->reason
!= 0x0000)
1961 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
1962 cmd
->ident
== conn
->info_ident
) {
1963 del_timer(&conn
->info_timer
);
1965 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
1966 conn
->info_ident
= 0;
1968 l2cap_conn_start(conn
);
1974 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
1976 struct l2cap_chan_list
*list
= &conn
->chan_list
;
1977 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
1978 struct l2cap_conn_rsp rsp
;
1979 struct sock
*parent
, *sk
= NULL
;
1980 int result
, status
= L2CAP_CS_NO_INFO
;
1982 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
1983 __le16 psm
= req
->psm
;
1985 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm
, scid
);
1987 /* Check if we have socket listening on psm */
1988 parent
= l2cap_get_sock_by_psm(BT_LISTEN
, psm
, conn
->src
);
1990 result
= L2CAP_CR_BAD_PSM
;
1994 bh_lock_sock(parent
);
1996 /* Check if the ACL is secure enough (if not SDP) */
1997 if (psm
!= cpu_to_le16(0x0001) &&
1998 !hci_conn_check_link_mode(conn
->hcon
)) {
1999 conn
->disc_reason
= 0x05;
2000 result
= L2CAP_CR_SEC_BLOCK
;
2004 result
= L2CAP_CR_NO_MEM
;
2006 /* Check for backlog size */
2007 if (sk_acceptq_is_full(parent
)) {
2008 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
2012 sk
= l2cap_sock_alloc(sock_net(parent
), NULL
, BTPROTO_L2CAP
, GFP_ATOMIC
);
2016 write_lock_bh(&list
->lock
);
2018 /* Check if we already have channel with that dcid */
2019 if (__l2cap_get_chan_by_dcid(list
, scid
)) {
2020 write_unlock_bh(&list
->lock
);
2021 sock_set_flag(sk
, SOCK_ZAPPED
);
2022 l2cap_sock_kill(sk
);
2026 hci_conn_hold(conn
->hcon
);
2028 l2cap_sock_init(sk
, parent
);
2029 bacpy(&bt_sk(sk
)->src
, conn
->src
);
2030 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
2031 l2cap_pi(sk
)->psm
= psm
;
2032 l2cap_pi(sk
)->dcid
= scid
;
2034 bt_accept_enqueue(parent
, sk
);
2036 __l2cap_chan_add(conn
, sk
);
2037 dcid
= l2cap_pi(sk
)->scid
;
2039 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
2041 l2cap_pi(sk
)->ident
= cmd
->ident
;
2043 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
2044 if (l2cap_check_security(sk
)) {
2045 if (bt_sk(sk
)->defer_setup
) {
2046 sk
->sk_state
= BT_CONNECT2
;
2047 result
= L2CAP_CR_PEND
;
2048 status
= L2CAP_CS_AUTHOR_PEND
;
2049 parent
->sk_data_ready(parent
, 0);
2051 sk
->sk_state
= BT_CONFIG
;
2052 result
= L2CAP_CR_SUCCESS
;
2053 status
= L2CAP_CS_NO_INFO
;
2056 sk
->sk_state
= BT_CONNECT2
;
2057 result
= L2CAP_CR_PEND
;
2058 status
= L2CAP_CS_AUTHEN_PEND
;
2061 sk
->sk_state
= BT_CONNECT2
;
2062 result
= L2CAP_CR_PEND
;
2063 status
= L2CAP_CS_NO_INFO
;
2066 write_unlock_bh(&list
->lock
);
2069 bh_unlock_sock(parent
);
2072 rsp
.scid
= cpu_to_le16(scid
);
2073 rsp
.dcid
= cpu_to_le16(dcid
);
2074 rsp
.result
= cpu_to_le16(result
);
2075 rsp
.status
= cpu_to_le16(status
);
2076 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2078 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
2079 struct l2cap_info_req info
;
2080 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2082 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
2083 conn
->info_ident
= l2cap_get_ident(conn
);
2085 mod_timer(&conn
->info_timer
, jiffies
+
2086 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
2088 l2cap_send_cmd(conn
, conn
->info_ident
,
2089 L2CAP_INFO_REQ
, sizeof(info
), &info
);
2092 if (sk
&& !(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
) &&
2093 result
== L2CAP_CR_SUCCESS
) {
2095 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
2096 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2097 l2cap_build_conf_req(sk
, buf
), buf
);
2098 l2cap_pi(sk
)->num_conf_req
++;
2104 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2106 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
2107 u16 scid
, dcid
, result
, status
;
2111 scid
= __le16_to_cpu(rsp
->scid
);
2112 dcid
= __le16_to_cpu(rsp
->dcid
);
2113 result
= __le16_to_cpu(rsp
->result
);
2114 status
= __le16_to_cpu(rsp
->status
);
2116 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid
, scid
, result
, status
);
2119 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
2123 sk
= l2cap_get_chan_by_ident(&conn
->chan_list
, cmd
->ident
);
2129 case L2CAP_CR_SUCCESS
:
2130 sk
->sk_state
= BT_CONFIG
;
2131 l2cap_pi(sk
)->ident
= 0;
2132 l2cap_pi(sk
)->dcid
= dcid
;
2133 l2cap_pi(sk
)->conf_state
&= ~L2CAP_CONF_CONNECT_PEND
;
2135 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
)
2138 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
2140 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2141 l2cap_build_conf_req(sk
, req
), req
);
2142 l2cap_pi(sk
)->num_conf_req
++;
2146 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
2150 /* don't delete l2cap channel if sk is owned by user */
2151 if (sock_owned_by_user(sk
)) {
2152 sk
->sk_state
= BT_DISCONN
;
2153 l2cap_sock_clear_timer(sk
);
2154 l2cap_sock_set_timer(sk
, HZ
/ 5);
2158 l2cap_chan_del(sk
, ECONNREFUSED
);
2166 static inline void set_default_fcs(struct l2cap_pinfo
*pi
)
2168 /* FCS is enabled only in ERTM or streaming mode, if one or both
2171 if (pi
->mode
!= L2CAP_MODE_ERTM
&& pi
->mode
!= L2CAP_MODE_STREAMING
)
2172 pi
->fcs
= L2CAP_FCS_NONE
;
2173 else if (!(pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
))
2174 pi
->fcs
= L2CAP_FCS_CRC16
;
2177 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
2179 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
2185 dcid
= __le16_to_cpu(req
->dcid
);
2186 flags
= __le16_to_cpu(req
->flags
);
2188 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
2190 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, dcid
);
2194 if (sk
->sk_state
!= BT_CONFIG
) {
2195 struct l2cap_cmd_rej rej
;
2197 rej
.reason
= cpu_to_le16(0x0002);
2198 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
2203 /* Reject if config buffer is too small. */
2204 len
= cmd_len
- sizeof(*req
);
2205 if (l2cap_pi(sk
)->conf_len
+ len
> sizeof(l2cap_pi(sk
)->conf_req
)) {
2206 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2207 l2cap_build_conf_rsp(sk
, rsp
,
2208 L2CAP_CONF_REJECT
, flags
), rsp
);
2213 memcpy(l2cap_pi(sk
)->conf_req
+ l2cap_pi(sk
)->conf_len
, req
->data
, len
);
2214 l2cap_pi(sk
)->conf_len
+= len
;
2216 if (flags
& 0x0001) {
2217 /* Incomplete config. Send empty response. */
2218 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2219 l2cap_build_conf_rsp(sk
, rsp
,
2220 L2CAP_CONF_SUCCESS
, 0x0001), rsp
);
2224 /* Complete config. */
2225 len
= l2cap_parse_conf_req(sk
, rsp
);
2227 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
2231 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
2232 l2cap_pi(sk
)->num_conf_rsp
++;
2234 /* Reset config buffer. */
2235 l2cap_pi(sk
)->conf_len
= 0;
2237 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
))
2240 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_INPUT_DONE
) {
2241 set_default_fcs(l2cap_pi(sk
));
2243 sk
->sk_state
= BT_CONNECTED
;
2245 l2cap_pi(sk
)->next_tx_seq
= 0;
2246 l2cap_pi(sk
)->expected_tx_seq
= 0;
2247 __skb_queue_head_init(TX_QUEUE(sk
));
2248 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
2249 l2cap_ertm_init(sk
);
2251 l2cap_chan_ready(sk
);
2255 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
)) {
2257 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
2258 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2259 l2cap_build_conf_req(sk
, buf
), buf
);
2260 l2cap_pi(sk
)->num_conf_req
++;
2268 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2270 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
2271 u16 scid
, flags
, result
;
2273 int len
= cmd
->len
- sizeof(*rsp
);
2275 scid
= __le16_to_cpu(rsp
->scid
);
2276 flags
= __le16_to_cpu(rsp
->flags
);
2277 result
= __le16_to_cpu(rsp
->result
);
2279 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2280 scid
, flags
, result
);
2282 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
2287 case L2CAP_CONF_SUCCESS
:
2288 l2cap_conf_rfc_get(sk
, rsp
->data
, len
);
2291 case L2CAP_CONF_UNACCEPT
:
2292 if (l2cap_pi(sk
)->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
2295 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
2296 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
2300 /* throw out any old stored conf requests */
2301 result
= L2CAP_CONF_SUCCESS
;
2302 len
= l2cap_parse_conf_rsp(sk
, rsp
->data
,
2305 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
2309 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
2310 L2CAP_CONF_REQ
, len
, req
);
2311 l2cap_pi(sk
)->num_conf_req
++;
2312 if (result
!= L2CAP_CONF_SUCCESS
)
2318 sk
->sk_err
= ECONNRESET
;
2319 l2cap_sock_set_timer(sk
, HZ
* 5);
2320 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
2327 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_INPUT_DONE
;
2329 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
) {
2330 set_default_fcs(l2cap_pi(sk
));
2332 sk
->sk_state
= BT_CONNECTED
;
2333 l2cap_pi(sk
)->next_tx_seq
= 0;
2334 l2cap_pi(sk
)->expected_tx_seq
= 0;
2335 __skb_queue_head_init(TX_QUEUE(sk
));
2336 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
2337 l2cap_ertm_init(sk
);
2339 l2cap_chan_ready(sk
);
2347 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2349 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
2350 struct l2cap_disconn_rsp rsp
;
2354 scid
= __le16_to_cpu(req
->scid
);
2355 dcid
= __le16_to_cpu(req
->dcid
);
2357 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
2359 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, dcid
);
2363 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
2364 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
2365 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
2367 sk
->sk_shutdown
= SHUTDOWN_MASK
;
2369 /* don't delete l2cap channel if sk is owned by user */
2370 if (sock_owned_by_user(sk
)) {
2371 sk
->sk_state
= BT_DISCONN
;
2372 l2cap_sock_clear_timer(sk
);
2373 l2cap_sock_set_timer(sk
, HZ
/ 5);
2378 l2cap_chan_del(sk
, ECONNRESET
);
2381 l2cap_sock_kill(sk
);
2385 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2387 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
2391 scid
= __le16_to_cpu(rsp
->scid
);
2392 dcid
= __le16_to_cpu(rsp
->dcid
);
2394 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
2396 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
2400 /* don't delete l2cap channel if sk is owned by user */
2401 if (sock_owned_by_user(sk
)) {
2402 sk
->sk_state
= BT_DISCONN
;
2403 l2cap_sock_clear_timer(sk
);
2404 l2cap_sock_set_timer(sk
, HZ
/ 5);
2409 l2cap_chan_del(sk
, 0);
2412 l2cap_sock_kill(sk
);
2416 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2418 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
2421 type
= __le16_to_cpu(req
->type
);
2423 BT_DBG("type 0x%4.4x", type
);
2425 if (type
== L2CAP_IT_FEAT_MASK
) {
2427 u32 feat_mask
= l2cap_feat_mask
;
2428 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
2429 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2430 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
2432 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
2434 put_unaligned_le32(feat_mask
, rsp
->data
);
2435 l2cap_send_cmd(conn
, cmd
->ident
,
2436 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
2437 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
2439 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
2440 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
2441 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
2442 memcpy(buf
+ 4, l2cap_fixed_chan
, 8);
2443 l2cap_send_cmd(conn
, cmd
->ident
,
2444 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
2446 struct l2cap_info_rsp rsp
;
2447 rsp
.type
= cpu_to_le16(type
);
2448 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
2449 l2cap_send_cmd(conn
, cmd
->ident
,
2450 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
2456 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2458 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
2461 type
= __le16_to_cpu(rsp
->type
);
2462 result
= __le16_to_cpu(rsp
->result
);
2464 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
2466 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2467 if (cmd
->ident
!= conn
->info_ident
||
2468 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
2471 del_timer(&conn
->info_timer
);
2473 if (result
!= L2CAP_IR_SUCCESS
) {
2474 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2475 conn
->info_ident
= 0;
2477 l2cap_conn_start(conn
);
2482 if (type
== L2CAP_IT_FEAT_MASK
) {
2483 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
2485 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
2486 struct l2cap_info_req req
;
2487 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
2489 conn
->info_ident
= l2cap_get_ident(conn
);
2491 l2cap_send_cmd(conn
, conn
->info_ident
,
2492 L2CAP_INFO_REQ
, sizeof(req
), &req
);
2494 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2495 conn
->info_ident
= 0;
2497 l2cap_conn_start(conn
);
2499 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
2500 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2501 conn
->info_ident
= 0;
2503 l2cap_conn_start(conn
);
2509 static inline int l2cap_check_conn_param(u16 min
, u16 max
, u16 latency
,
2514 if (min
> max
|| min
< 6 || max
> 3200)
2517 if (to_multiplier
< 10 || to_multiplier
> 3200)
2520 if (max
>= to_multiplier
* 8)
2523 max_latency
= (to_multiplier
* 8 / max
) - 1;
2524 if (latency
> 499 || latency
> max_latency
)
2530 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
2531 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2533 struct hci_conn
*hcon
= conn
->hcon
;
2534 struct l2cap_conn_param_update_req
*req
;
2535 struct l2cap_conn_param_update_rsp rsp
;
2536 u16 min
, max
, latency
, to_multiplier
, cmd_len
;
2539 if (!(hcon
->link_mode
& HCI_LM_MASTER
))
2542 cmd_len
= __le16_to_cpu(cmd
->len
);
2543 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
2546 req
= (struct l2cap_conn_param_update_req
*) data
;
2547 min
= __le16_to_cpu(req
->min
);
2548 max
= __le16_to_cpu(req
->max
);
2549 latency
= __le16_to_cpu(req
->latency
);
2550 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
2552 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2553 min
, max
, latency
, to_multiplier
);
2555 memset(&rsp
, 0, sizeof(rsp
));
2557 err
= l2cap_check_conn_param(min
, max
, latency
, to_multiplier
);
2559 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
2561 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
2563 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
2567 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
2572 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
2573 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
2577 switch (cmd
->code
) {
2578 case L2CAP_COMMAND_REJ
:
2579 l2cap_command_rej(conn
, cmd
, data
);
2582 case L2CAP_CONN_REQ
:
2583 err
= l2cap_connect_req(conn
, cmd
, data
);
2586 case L2CAP_CONN_RSP
:
2587 err
= l2cap_connect_rsp(conn
, cmd
, data
);
2590 case L2CAP_CONF_REQ
:
2591 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
2594 case L2CAP_CONF_RSP
:
2595 err
= l2cap_config_rsp(conn
, cmd
, data
);
2598 case L2CAP_DISCONN_REQ
:
2599 err
= l2cap_disconnect_req(conn
, cmd
, data
);
2602 case L2CAP_DISCONN_RSP
:
2603 err
= l2cap_disconnect_rsp(conn
, cmd
, data
);
2606 case L2CAP_ECHO_REQ
:
2607 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
2610 case L2CAP_ECHO_RSP
:
2613 case L2CAP_INFO_REQ
:
2614 err
= l2cap_information_req(conn
, cmd
, data
);
2617 case L2CAP_INFO_RSP
:
2618 err
= l2cap_information_rsp(conn
, cmd
, data
);
2622 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
2630 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
2631 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2633 switch (cmd
->code
) {
2634 case L2CAP_COMMAND_REJ
:
2637 case L2CAP_CONN_PARAM_UPDATE_REQ
:
2638 return l2cap_conn_param_update_req(conn
, cmd
, data
);
2640 case L2CAP_CONN_PARAM_UPDATE_RSP
:
2644 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
2649 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
2650 struct sk_buff
*skb
)
2652 u8
*data
= skb
->data
;
2654 struct l2cap_cmd_hdr cmd
;
2657 l2cap_raw_recv(conn
, skb
);
2659 while (len
>= L2CAP_CMD_HDR_SIZE
) {
2661 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
2662 data
+= L2CAP_CMD_HDR_SIZE
;
2663 len
-= L2CAP_CMD_HDR_SIZE
;
2665 cmd_len
= le16_to_cpu(cmd
.len
);
2667 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
2669 if (cmd_len
> len
|| !cmd
.ident
) {
2670 BT_DBG("corrupted command");
2674 if (conn
->hcon
->type
== LE_LINK
)
2675 err
= l2cap_le_sig_cmd(conn
, &cmd
, data
);
2677 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
2680 struct l2cap_cmd_rej rej
;
2682 BT_ERR("Wrong link type (%d)", err
);
2684 /* FIXME: Map err to a valid reason */
2685 rej
.reason
= cpu_to_le16(0);
2686 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
2696 static int l2cap_check_fcs(struct l2cap_pinfo
*pi
, struct sk_buff
*skb
)
2698 u16 our_fcs
, rcv_fcs
;
2699 int hdr_size
= L2CAP_HDR_SIZE
+ 2;
2701 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
2702 skb_trim(skb
, skb
->len
- 2);
2703 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
2704 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
2706 if (our_fcs
!= rcv_fcs
)
2712 static inline void l2cap_send_i_or_rr_or_rnr(struct sock
*sk
)
2714 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2717 pi
->frames_sent
= 0;
2719 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
2721 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
2722 control
|= L2CAP_SUPER_RCV_NOT_READY
;
2723 l2cap_send_sframe(pi
, control
);
2724 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
2727 if (pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
)
2728 l2cap_retransmit_frames(sk
);
2730 l2cap_ertm_send(sk
);
2732 if (!(pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) &&
2733 pi
->frames_sent
== 0) {
2734 control
|= L2CAP_SUPER_RCV_READY
;
2735 l2cap_send_sframe(pi
, control
);
2739 static int l2cap_add_to_srej_queue(struct sock
*sk
, struct sk_buff
*skb
, u8 tx_seq
, u8 sar
)
2741 struct sk_buff
*next_skb
;
2742 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2743 int tx_seq_offset
, next_tx_seq_offset
;
2745 bt_cb(skb
)->tx_seq
= tx_seq
;
2746 bt_cb(skb
)->sar
= sar
;
2748 next_skb
= skb_peek(SREJ_QUEUE(sk
));
2750 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
2754 tx_seq_offset
= (tx_seq
- pi
->buffer_seq
) % 64;
2755 if (tx_seq_offset
< 0)
2756 tx_seq_offset
+= 64;
2759 if (bt_cb(next_skb
)->tx_seq
== tx_seq
)
2762 next_tx_seq_offset
= (bt_cb(next_skb
)->tx_seq
-
2763 pi
->buffer_seq
) % 64;
2764 if (next_tx_seq_offset
< 0)
2765 next_tx_seq_offset
+= 64;
2767 if (next_tx_seq_offset
> tx_seq_offset
) {
2768 __skb_queue_before(SREJ_QUEUE(sk
), next_skb
, skb
);
2772 if (skb_queue_is_last(SREJ_QUEUE(sk
), next_skb
))
2775 } while ((next_skb
= skb_queue_next(SREJ_QUEUE(sk
), next_skb
)));
2777 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
2782 static int l2cap_ertm_reassembly_sdu(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
2784 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2785 struct sk_buff
*_skb
;
2788 switch (control
& L2CAP_CTRL_SAR
) {
2789 case L2CAP_SDU_UNSEGMENTED
:
2790 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
)
2793 err
= sock_queue_rcv_skb(sk
, skb
);
2799 case L2CAP_SDU_START
:
2800 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
)
2803 pi
->sdu_len
= get_unaligned_le16(skb
->data
);
2805 if (pi
->sdu_len
> pi
->imtu
)
2808 pi
->sdu
= bt_skb_alloc(pi
->sdu_len
, GFP_ATOMIC
);
2812 /* pull sdu_len bytes only after alloc, because of Local Busy
2813 * condition we have to be sure that this will be executed
2814 * only once, i.e., when alloc does not fail */
2817 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
2819 pi
->conn_state
|= L2CAP_CONN_SAR_SDU
;
2820 pi
->partial_sdu_len
= skb
->len
;
2823 case L2CAP_SDU_CONTINUE
:
2824 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
2830 pi
->partial_sdu_len
+= skb
->len
;
2831 if (pi
->partial_sdu_len
> pi
->sdu_len
)
2834 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
2839 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
2845 if (!(pi
->conn_state
& L2CAP_CONN_SAR_RETRY
)) {
2846 pi
->partial_sdu_len
+= skb
->len
;
2848 if (pi
->partial_sdu_len
> pi
->imtu
)
2851 if (pi
->partial_sdu_len
!= pi
->sdu_len
)
2854 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
2857 _skb
= skb_clone(pi
->sdu
, GFP_ATOMIC
);
2859 pi
->conn_state
|= L2CAP_CONN_SAR_RETRY
;
2863 err
= sock_queue_rcv_skb(sk
, _skb
);
2866 pi
->conn_state
|= L2CAP_CONN_SAR_RETRY
;
2870 pi
->conn_state
&= ~L2CAP_CONN_SAR_RETRY
;
2871 pi
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
2885 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
2890 static int l2cap_try_push_rx_skb(struct sock
*sk
)
2892 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2893 struct sk_buff
*skb
;
2897 while ((skb
= skb_dequeue(BUSY_QUEUE(sk
)))) {
2898 control
= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
2899 err
= l2cap_ertm_reassembly_sdu(sk
, skb
, control
);
2901 skb_queue_head(BUSY_QUEUE(sk
), skb
);
2905 pi
->buffer_seq
= (pi
->buffer_seq
+ 1) % 64;
2908 if (!(pi
->conn_state
& L2CAP_CONN_RNR_SENT
))
2911 control
= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
2912 control
|= L2CAP_SUPER_RCV_READY
| L2CAP_CTRL_POLL
;
2913 l2cap_send_sframe(pi
, control
);
2914 l2cap_pi(sk
)->retry_count
= 1;
2916 del_timer(&pi
->retrans_timer
);
2917 __mod_monitor_timer();
2919 l2cap_pi(sk
)->conn_state
|= L2CAP_CONN_WAIT_F
;
2922 pi
->conn_state
&= ~L2CAP_CONN_LOCAL_BUSY
;
2923 pi
->conn_state
&= ~L2CAP_CONN_RNR_SENT
;
2925 BT_DBG("sk %p, Exit local busy", sk
);
2930 static void l2cap_busy_work(struct work_struct
*work
)
2932 DECLARE_WAITQUEUE(wait
, current
);
2933 struct l2cap_pinfo
*pi
=
2934 container_of(work
, struct l2cap_pinfo
, busy_work
);
2935 struct sock
*sk
= (struct sock
*)pi
;
2936 int n_tries
= 0, timeo
= HZ
/5, err
;
2937 struct sk_buff
*skb
;
2941 add_wait_queue(sk_sleep(sk
), &wait
);
2942 while ((skb
= skb_peek(BUSY_QUEUE(sk
)))) {
2943 set_current_state(TASK_INTERRUPTIBLE
);
2945 if (n_tries
++ > L2CAP_LOCAL_BUSY_TRIES
) {
2947 l2cap_send_disconn_req(pi
->conn
, sk
, EBUSY
);
2954 if (signal_pending(current
)) {
2955 err
= sock_intr_errno(timeo
);
2960 timeo
= schedule_timeout(timeo
);
2963 err
= sock_error(sk
);
2967 if (l2cap_try_push_rx_skb(sk
) == 0)
2971 set_current_state(TASK_RUNNING
);
2972 remove_wait_queue(sk_sleep(sk
), &wait
);
2977 static int l2cap_push_rx_skb(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
2979 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2982 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
2983 bt_cb(skb
)->sar
= control
>> L2CAP_CTRL_SAR_SHIFT
;
2984 __skb_queue_tail(BUSY_QUEUE(sk
), skb
);
2985 return l2cap_try_push_rx_skb(sk
);
2990 err
= l2cap_ertm_reassembly_sdu(sk
, skb
, control
);
2992 pi
->buffer_seq
= (pi
->buffer_seq
+ 1) % 64;
2996 /* Busy Condition */
2997 BT_DBG("sk %p, Enter local busy", sk
);
2999 pi
->conn_state
|= L2CAP_CONN_LOCAL_BUSY
;
3000 bt_cb(skb
)->sar
= control
>> L2CAP_CTRL_SAR_SHIFT
;
3001 __skb_queue_tail(BUSY_QUEUE(sk
), skb
);
3003 sctrl
= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3004 sctrl
|= L2CAP_SUPER_RCV_NOT_READY
;
3005 l2cap_send_sframe(pi
, sctrl
);
3007 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
3009 del_timer(&pi
->ack_timer
);
3011 queue_work(_busy_wq
, &pi
->busy_work
);
3016 static int l2cap_streaming_reassembly_sdu(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3018 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3019 struct sk_buff
*_skb
;
3023 * TODO: We have to notify the userland if some data is lost with the
3027 switch (control
& L2CAP_CTRL_SAR
) {
3028 case L2CAP_SDU_UNSEGMENTED
:
3029 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3034 err
= sock_queue_rcv_skb(sk
, skb
);
3040 case L2CAP_SDU_START
:
3041 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3046 pi
->sdu_len
= get_unaligned_le16(skb
->data
);
3049 if (pi
->sdu_len
> pi
->imtu
) {
3054 pi
->sdu
= bt_skb_alloc(pi
->sdu_len
, GFP_ATOMIC
);
3060 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3062 pi
->conn_state
|= L2CAP_CONN_SAR_SDU
;
3063 pi
->partial_sdu_len
= skb
->len
;
3067 case L2CAP_SDU_CONTINUE
:
3068 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3071 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3073 pi
->partial_sdu_len
+= skb
->len
;
3074 if (pi
->partial_sdu_len
> pi
->sdu_len
)
3082 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3085 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3087 pi
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
3088 pi
->partial_sdu_len
+= skb
->len
;
3090 if (pi
->partial_sdu_len
> pi
->imtu
)
3093 if (pi
->partial_sdu_len
== pi
->sdu_len
) {
3094 _skb
= skb_clone(pi
->sdu
, GFP_ATOMIC
);
3095 err
= sock_queue_rcv_skb(sk
, _skb
);
3110 static void l2cap_check_srej_gap(struct sock
*sk
, u8 tx_seq
)
3112 struct sk_buff
*skb
;
3115 while ((skb
= skb_peek(SREJ_QUEUE(sk
)))) {
3116 if (bt_cb(skb
)->tx_seq
!= tx_seq
)
3119 skb
= skb_dequeue(SREJ_QUEUE(sk
));
3120 control
= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
3121 l2cap_ertm_reassembly_sdu(sk
, skb
, control
);
3122 l2cap_pi(sk
)->buffer_seq_srej
=
3123 (l2cap_pi(sk
)->buffer_seq_srej
+ 1) % 64;
3124 tx_seq
= (tx_seq
+ 1) % 64;
3128 static void l2cap_resend_srejframe(struct sock
*sk
, u8 tx_seq
)
3130 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3131 struct srej_list
*l
, *tmp
;
3134 list_for_each_entry_safe(l
, tmp
, SREJ_LIST(sk
), list
) {
3135 if (l
->tx_seq
== tx_seq
) {
3140 control
= L2CAP_SUPER_SELECT_REJECT
;
3141 control
|= l
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3142 l2cap_send_sframe(pi
, control
);
3144 list_add_tail(&l
->list
, SREJ_LIST(sk
));
3148 static void l2cap_send_srejframe(struct sock
*sk
, u8 tx_seq
)
3150 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3151 struct srej_list
*new;
3154 while (tx_seq
!= pi
->expected_tx_seq
) {
3155 control
= L2CAP_SUPER_SELECT_REJECT
;
3156 control
|= pi
->expected_tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3157 l2cap_send_sframe(pi
, control
);
3159 new = kzalloc(sizeof(struct srej_list
), GFP_ATOMIC
);
3160 new->tx_seq
= pi
->expected_tx_seq
;
3161 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3162 list_add_tail(&new->list
, SREJ_LIST(sk
));
3164 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3167 static inline int l2cap_data_channel_iframe(struct sock
*sk
, u16 rx_control
, struct sk_buff
*skb
)
3169 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3170 u8 tx_seq
= __get_txseq(rx_control
);
3171 u8 req_seq
= __get_reqseq(rx_control
);
3172 u8 sar
= rx_control
>> L2CAP_CTRL_SAR_SHIFT
;
3173 int tx_seq_offset
, expected_tx_seq_offset
;
3174 int num_to_ack
= (pi
->tx_win
/6) + 1;
3177 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk
, skb
->len
, tx_seq
,
3180 if (L2CAP_CTRL_FINAL
& rx_control
&&
3181 l2cap_pi(sk
)->conn_state
& L2CAP_CONN_WAIT_F
) {
3182 del_timer(&pi
->monitor_timer
);
3183 if (pi
->unacked_frames
> 0)
3184 __mod_retrans_timer();
3185 pi
->conn_state
&= ~L2CAP_CONN_WAIT_F
;
3188 pi
->expected_ack_seq
= req_seq
;
3189 l2cap_drop_acked_frames(sk
);
3191 if (tx_seq
== pi
->expected_tx_seq
)
3194 tx_seq_offset
= (tx_seq
- pi
->buffer_seq
) % 64;
3195 if (tx_seq_offset
< 0)
3196 tx_seq_offset
+= 64;
3198 /* invalid tx_seq */
3199 if (tx_seq_offset
>= pi
->tx_win
) {
3200 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
3204 if (pi
->conn_state
== L2CAP_CONN_LOCAL_BUSY
)
3207 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3208 struct srej_list
*first
;
3210 first
= list_first_entry(SREJ_LIST(sk
),
3211 struct srej_list
, list
);
3212 if (tx_seq
== first
->tx_seq
) {
3213 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3214 l2cap_check_srej_gap(sk
, tx_seq
);
3216 list_del(&first
->list
);
3219 if (list_empty(SREJ_LIST(sk
))) {
3220 pi
->buffer_seq
= pi
->buffer_seq_srej
;
3221 pi
->conn_state
&= ~L2CAP_CONN_SREJ_SENT
;
3223 BT_DBG("sk %p, Exit SREJ_SENT", sk
);
3226 struct srej_list
*l
;
3228 /* duplicated tx_seq */
3229 if (l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
) < 0)
3232 list_for_each_entry(l
, SREJ_LIST(sk
), list
) {
3233 if (l
->tx_seq
== tx_seq
) {
3234 l2cap_resend_srejframe(sk
, tx_seq
);
3238 l2cap_send_srejframe(sk
, tx_seq
);
3241 expected_tx_seq_offset
=
3242 (pi
->expected_tx_seq
- pi
->buffer_seq
) % 64;
3243 if (expected_tx_seq_offset
< 0)
3244 expected_tx_seq_offset
+= 64;
3246 /* duplicated tx_seq */
3247 if (tx_seq_offset
< expected_tx_seq_offset
)
3250 pi
->conn_state
|= L2CAP_CONN_SREJ_SENT
;
3252 BT_DBG("sk %p, Enter SREJ", sk
);
3254 INIT_LIST_HEAD(SREJ_LIST(sk
));
3255 pi
->buffer_seq_srej
= pi
->buffer_seq
;
3257 __skb_queue_head_init(SREJ_QUEUE(sk
));
3258 __skb_queue_head_init(BUSY_QUEUE(sk
));
3259 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3261 pi
->conn_state
|= L2CAP_CONN_SEND_PBIT
;
3263 l2cap_send_srejframe(sk
, tx_seq
);
3265 del_timer(&pi
->ack_timer
);
3270 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3272 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3273 bt_cb(skb
)->tx_seq
= tx_seq
;
3274 bt_cb(skb
)->sar
= sar
;
3275 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3279 err
= l2cap_push_rx_skb(sk
, skb
, rx_control
);
3283 if (rx_control
& L2CAP_CTRL_FINAL
) {
3284 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
3285 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3287 l2cap_retransmit_frames(sk
);
3292 pi
->num_acked
= (pi
->num_acked
+ 1) % num_to_ack
;
3293 if (pi
->num_acked
== num_to_ack
- 1)
3303 static inline void l2cap_data_channel_rrframe(struct sock
*sk
, u16 rx_control
)
3305 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3307 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, __get_reqseq(rx_control
),
3310 pi
->expected_ack_seq
= __get_reqseq(rx_control
);
3311 l2cap_drop_acked_frames(sk
);
3313 if (rx_control
& L2CAP_CTRL_POLL
) {
3314 pi
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
3315 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3316 if ((pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
3317 (pi
->unacked_frames
> 0))
3318 __mod_retrans_timer();
3320 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3321 l2cap_send_srejtail(sk
);
3323 l2cap_send_i_or_rr_or_rnr(sk
);
3326 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
3327 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3329 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
3330 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3332 l2cap_retransmit_frames(sk
);
3335 if ((pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
3336 (pi
->unacked_frames
> 0))
3337 __mod_retrans_timer();
3339 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3340 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
)
3343 l2cap_ertm_send(sk
);
3347 static inline void l2cap_data_channel_rejframe(struct sock
*sk
, u16 rx_control
)
3349 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3350 u8 tx_seq
= __get_reqseq(rx_control
);
3352 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, tx_seq
, rx_control
);
3354 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3356 pi
->expected_ack_seq
= tx_seq
;
3357 l2cap_drop_acked_frames(sk
);
3359 if (rx_control
& L2CAP_CTRL_FINAL
) {
3360 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
3361 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3363 l2cap_retransmit_frames(sk
);
3365 l2cap_retransmit_frames(sk
);
3367 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
)
3368 pi
->conn_state
|= L2CAP_CONN_REJ_ACT
;
3371 static inline void l2cap_data_channel_srejframe(struct sock
*sk
, u16 rx_control
)
3373 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3374 u8 tx_seq
= __get_reqseq(rx_control
);
3376 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, tx_seq
, rx_control
);
3378 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3380 if (rx_control
& L2CAP_CTRL_POLL
) {
3381 pi
->expected_ack_seq
= tx_seq
;
3382 l2cap_drop_acked_frames(sk
);
3384 pi
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
3385 l2cap_retransmit_one_frame(sk
, tx_seq
);
3387 l2cap_ertm_send(sk
);
3389 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
3390 pi
->srej_save_reqseq
= tx_seq
;
3391 pi
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
3393 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
3394 if ((pi
->conn_state
& L2CAP_CONN_SREJ_ACT
) &&
3395 pi
->srej_save_reqseq
== tx_seq
)
3396 pi
->conn_state
&= ~L2CAP_CONN_SREJ_ACT
;
3398 l2cap_retransmit_one_frame(sk
, tx_seq
);
3400 l2cap_retransmit_one_frame(sk
, tx_seq
);
3401 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
3402 pi
->srej_save_reqseq
= tx_seq
;
3403 pi
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
3408 static inline void l2cap_data_channel_rnrframe(struct sock
*sk
, u16 rx_control
)
3410 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3411 u8 tx_seq
= __get_reqseq(rx_control
);
3413 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, tx_seq
, rx_control
);
3415 pi
->conn_state
|= L2CAP_CONN_REMOTE_BUSY
;
3416 pi
->expected_ack_seq
= tx_seq
;
3417 l2cap_drop_acked_frames(sk
);
3419 if (rx_control
& L2CAP_CTRL_POLL
)
3420 pi
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
3422 if (!(pi
->conn_state
& L2CAP_CONN_SREJ_SENT
)) {
3423 del_timer(&pi
->retrans_timer
);
3424 if (rx_control
& L2CAP_CTRL_POLL
)
3425 l2cap_send_rr_or_rnr(pi
, L2CAP_CTRL_FINAL
);
3429 if (rx_control
& L2CAP_CTRL_POLL
)
3430 l2cap_send_srejtail(sk
);
3432 l2cap_send_sframe(pi
, L2CAP_SUPER_RCV_READY
);
3435 static inline int l2cap_data_channel_sframe(struct sock
*sk
, u16 rx_control
, struct sk_buff
*skb
)
3437 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk
, rx_control
, skb
->len
);
3439 if (L2CAP_CTRL_FINAL
& rx_control
&&
3440 l2cap_pi(sk
)->conn_state
& L2CAP_CONN_WAIT_F
) {
3441 del_timer(&l2cap_pi(sk
)->monitor_timer
);
3442 if (l2cap_pi(sk
)->unacked_frames
> 0)
3443 __mod_retrans_timer();
3444 l2cap_pi(sk
)->conn_state
&= ~L2CAP_CONN_WAIT_F
;
3447 switch (rx_control
& L2CAP_CTRL_SUPERVISE
) {
3448 case L2CAP_SUPER_RCV_READY
:
3449 l2cap_data_channel_rrframe(sk
, rx_control
);
3452 case L2CAP_SUPER_REJECT
:
3453 l2cap_data_channel_rejframe(sk
, rx_control
);
3456 case L2CAP_SUPER_SELECT_REJECT
:
3457 l2cap_data_channel_srejframe(sk
, rx_control
);
3460 case L2CAP_SUPER_RCV_NOT_READY
:
3461 l2cap_data_channel_rnrframe(sk
, rx_control
);
3469 static int l2cap_ertm_data_rcv(struct sock
*sk
, struct sk_buff
*skb
)
3471 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3474 int len
, next_tx_seq_offset
, req_seq_offset
;
3476 control
= get_unaligned_le16(skb
->data
);
3481 * We can just drop the corrupted I-frame here.
3482 * Receiver will miss it and start proper recovery
3483 * procedures and ask retransmission.
3485 if (l2cap_check_fcs(pi
, skb
))
3488 if (__is_sar_start(control
) && __is_iframe(control
))
3491 if (pi
->fcs
== L2CAP_FCS_CRC16
)
3494 if (len
> pi
->mps
) {
3495 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
3499 req_seq
= __get_reqseq(control
);
3500 req_seq_offset
= (req_seq
- pi
->expected_ack_seq
) % 64;
3501 if (req_seq_offset
< 0)
3502 req_seq_offset
+= 64;
3504 next_tx_seq_offset
=
3505 (pi
->next_tx_seq
- pi
->expected_ack_seq
) % 64;
3506 if (next_tx_seq_offset
< 0)
3507 next_tx_seq_offset
+= 64;
3509 /* check for invalid req-seq */
3510 if (req_seq_offset
> next_tx_seq_offset
) {
3511 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
3515 if (__is_iframe(control
)) {
3517 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
3521 l2cap_data_channel_iframe(sk
, control
, skb
);
3525 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
3529 l2cap_data_channel_sframe(sk
, control
, skb
);
3539 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
3542 struct l2cap_pinfo
*pi
;
3547 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, cid
);
3549 BT_DBG("unknown cid 0x%4.4x", cid
);
3555 BT_DBG("sk %p, len %d", sk
, skb
->len
);
3557 if (sk
->sk_state
!= BT_CONNECTED
)
3561 case L2CAP_MODE_BASIC
:
3562 /* If socket recv buffers overflows we drop data here
3563 * which is *bad* because L2CAP has to be reliable.
3564 * But we don't have any other choice. L2CAP doesn't
3565 * provide flow control mechanism. */
3567 if (pi
->imtu
< skb
->len
)
3570 if (!sock_queue_rcv_skb(sk
, skb
))
3574 case L2CAP_MODE_ERTM
:
3575 if (!sock_owned_by_user(sk
)) {
3576 l2cap_ertm_data_rcv(sk
, skb
);
3578 if (sk_add_backlog(sk
, skb
))
3584 case L2CAP_MODE_STREAMING
:
3585 control
= get_unaligned_le16(skb
->data
);
3589 if (l2cap_check_fcs(pi
, skb
))
3592 if (__is_sar_start(control
))
3595 if (pi
->fcs
== L2CAP_FCS_CRC16
)
3598 if (len
> pi
->mps
|| len
< 0 || __is_sframe(control
))
3601 tx_seq
= __get_txseq(control
);
3603 if (pi
->expected_tx_seq
== tx_seq
)
3604 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3606 pi
->expected_tx_seq
= (tx_seq
+ 1) % 64;
3608 l2cap_streaming_reassembly_sdu(sk
, skb
, control
);
3613 BT_DBG("sk %p: bad mode 0x%2.2x", sk
, pi
->mode
);
3627 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
3631 sk
= l2cap_get_sock_by_psm(0, psm
, conn
->src
);
3637 BT_DBG("sk %p, len %d", sk
, skb
->len
);
3639 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_CONNECTED
)
3642 if (l2cap_pi(sk
)->imtu
< skb
->len
)
3645 if (!sock_queue_rcv_skb(sk
, skb
))
3657 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
3659 struct l2cap_hdr
*lh
= (void *) skb
->data
;
3663 skb_pull(skb
, L2CAP_HDR_SIZE
);
3664 cid
= __le16_to_cpu(lh
->cid
);
3665 len
= __le16_to_cpu(lh
->len
);
3667 if (len
!= skb
->len
) {
3672 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
3675 case L2CAP_CID_LE_SIGNALING
:
3676 case L2CAP_CID_SIGNALING
:
3677 l2cap_sig_channel(conn
, skb
);
3680 case L2CAP_CID_CONN_LESS
:
3681 psm
= get_unaligned_le16(skb
->data
);
3683 l2cap_conless_channel(conn
, psm
, skb
);
3687 l2cap_data_channel(conn
, cid
, skb
);
3692 /* ---- L2CAP interface with lower layer (HCI) ---- */
3694 static int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
3696 int exact
= 0, lm1
= 0, lm2
= 0;
3697 register struct sock
*sk
;
3698 struct hlist_node
*node
;
3700 if (type
!= ACL_LINK
)
3703 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
3705 /* Find listening sockets and check their link_mode */
3706 read_lock(&l2cap_sk_list
.lock
);
3707 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
3708 if (sk
->sk_state
!= BT_LISTEN
)
3711 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
3712 lm1
|= HCI_LM_ACCEPT
;
3713 if (l2cap_pi(sk
)->role_switch
)
3714 lm1
|= HCI_LM_MASTER
;
3716 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
3717 lm2
|= HCI_LM_ACCEPT
;
3718 if (l2cap_pi(sk
)->role_switch
)
3719 lm2
|= HCI_LM_MASTER
;
3722 read_unlock(&l2cap_sk_list
.lock
);
3724 return exact
? lm1
: lm2
;
3727 static int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
3729 struct l2cap_conn
*conn
;
3731 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
3733 if (!(hcon
->type
== ACL_LINK
|| hcon
->type
== LE_LINK
))
3737 conn
= l2cap_conn_add(hcon
, status
);
3739 l2cap_conn_ready(conn
);
3741 l2cap_conn_del(hcon
, bt_err(status
));
3746 static int l2cap_disconn_ind(struct hci_conn
*hcon
)
3748 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
3750 BT_DBG("hcon %p", hcon
);
3752 if (hcon
->type
!= ACL_LINK
|| !conn
)
3755 return conn
->disc_reason
;
3758 static int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
3760 BT_DBG("hcon %p reason %d", hcon
, reason
);
3762 if (!(hcon
->type
== ACL_LINK
|| hcon
->type
== LE_LINK
))
3765 l2cap_conn_del(hcon
, bt_err(reason
));
3770 static inline void l2cap_check_encryption(struct sock
*sk
, u8 encrypt
)
3772 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_STREAM
)
3775 if (encrypt
== 0x00) {
3776 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
) {
3777 l2cap_sock_clear_timer(sk
);
3778 l2cap_sock_set_timer(sk
, HZ
* 5);
3779 } else if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
3780 __l2cap_sock_close(sk
, ECONNREFUSED
);
3782 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
)
3783 l2cap_sock_clear_timer(sk
);
3787 static int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
3789 struct l2cap_chan_list
*l
;
3790 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
3796 l
= &conn
->chan_list
;
3798 BT_DBG("conn %p", conn
);
3800 read_lock(&l
->lock
);
3802 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
3805 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_CONNECT_PEND
) {
3810 if (!status
&& (sk
->sk_state
== BT_CONNECTED
||
3811 sk
->sk_state
== BT_CONFIG
)) {
3812 l2cap_check_encryption(sk
, encrypt
);
3817 if (sk
->sk_state
== BT_CONNECT
) {
3819 struct l2cap_conn_req req
;
3820 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
3821 req
.psm
= l2cap_pi(sk
)->psm
;
3823 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
3824 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
3826 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
3827 L2CAP_CONN_REQ
, sizeof(req
), &req
);
3829 l2cap_sock_clear_timer(sk
);
3830 l2cap_sock_set_timer(sk
, HZ
/ 10);
3832 } else if (sk
->sk_state
== BT_CONNECT2
) {
3833 struct l2cap_conn_rsp rsp
;
3837 sk
->sk_state
= BT_CONFIG
;
3838 result
= L2CAP_CR_SUCCESS
;
3840 sk
->sk_state
= BT_DISCONN
;
3841 l2cap_sock_set_timer(sk
, HZ
/ 10);
3842 result
= L2CAP_CR_SEC_BLOCK
;
3845 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
3846 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
3847 rsp
.result
= cpu_to_le16(result
);
3848 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
3849 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
3850 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
3856 read_unlock(&l
->lock
);
3861 static int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
3863 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
3866 conn
= l2cap_conn_add(hcon
, 0);
3871 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
3873 if (!(flags
& ACL_CONT
)) {
3874 struct l2cap_hdr
*hdr
;
3880 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
3881 kfree_skb(conn
->rx_skb
);
3882 conn
->rx_skb
= NULL
;
3884 l2cap_conn_unreliable(conn
, ECOMM
);
3887 /* Start fragment always begin with Basic L2CAP header */
3888 if (skb
->len
< L2CAP_HDR_SIZE
) {
3889 BT_ERR("Frame is too short (len %d)", skb
->len
);
3890 l2cap_conn_unreliable(conn
, ECOMM
);
3894 hdr
= (struct l2cap_hdr
*) skb
->data
;
3895 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
3896 cid
= __le16_to_cpu(hdr
->cid
);
3898 if (len
== skb
->len
) {
3899 /* Complete frame received */
3900 l2cap_recv_frame(conn
, skb
);
3904 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
3906 if (skb
->len
> len
) {
3907 BT_ERR("Frame is too long (len %d, expected len %d)",
3909 l2cap_conn_unreliable(conn
, ECOMM
);
3913 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, cid
);
3915 if (sk
&& l2cap_pi(sk
)->imtu
< len
- L2CAP_HDR_SIZE
) {
3916 BT_ERR("Frame exceeding recv MTU (len %d, MTU %d)",
3917 len
, l2cap_pi(sk
)->imtu
);
3919 l2cap_conn_unreliable(conn
, ECOMM
);
3926 /* Allocate skb for the complete frame (with header) */
3927 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
3931 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
3933 conn
->rx_len
= len
- skb
->len
;
3935 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
3937 if (!conn
->rx_len
) {
3938 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
3939 l2cap_conn_unreliable(conn
, ECOMM
);
3943 if (skb
->len
> conn
->rx_len
) {
3944 BT_ERR("Fragment is too long (len %d, expected %d)",
3945 skb
->len
, conn
->rx_len
);
3946 kfree_skb(conn
->rx_skb
);
3947 conn
->rx_skb
= NULL
;
3949 l2cap_conn_unreliable(conn
, ECOMM
);
3953 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
3955 conn
->rx_len
-= skb
->len
;
3957 if (!conn
->rx_len
) {
3958 /* Complete frame received */
3959 l2cap_recv_frame(conn
, conn
->rx_skb
);
3960 conn
->rx_skb
= NULL
;
3969 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
3972 struct hlist_node
*node
;
3974 read_lock_bh(&l2cap_sk_list
.lock
);
3976 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
3977 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3979 seq_printf(f
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
3980 batostr(&bt_sk(sk
)->src
),
3981 batostr(&bt_sk(sk
)->dst
),
3982 sk
->sk_state
, __le16_to_cpu(pi
->psm
),
3984 pi
->imtu
, pi
->omtu
, pi
->sec_level
,
3988 read_unlock_bh(&l2cap_sk_list
.lock
);
3993 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
3995 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
3998 static const struct file_operations l2cap_debugfs_fops
= {
3999 .open
= l2cap_debugfs_open
,
4001 .llseek
= seq_lseek
,
4002 .release
= single_release
,
4005 static struct dentry
*l2cap_debugfs
;
4007 static struct hci_proto l2cap_hci_proto
= {
4009 .id
= HCI_PROTO_L2CAP
,
4010 .connect_ind
= l2cap_connect_ind
,
4011 .connect_cfm
= l2cap_connect_cfm
,
4012 .disconn_ind
= l2cap_disconn_ind
,
4013 .disconn_cfm
= l2cap_disconn_cfm
,
4014 .security_cfm
= l2cap_security_cfm
,
4015 .recv_acldata
= l2cap_recv_acldata
4018 int __init
l2cap_init(void)
4022 err
= l2cap_init_sockets();
4026 _busy_wq
= create_singlethread_workqueue("l2cap");
4032 err
= hci_register_proto(&l2cap_hci_proto
);
4034 BT_ERR("L2CAP protocol registration failed");
4035 bt_sock_unregister(BTPROTO_L2CAP
);
4040 l2cap_debugfs
= debugfs_create_file("l2cap", 0444,
4041 bt_debugfs
, NULL
, &l2cap_debugfs_fops
);
4043 BT_ERR("Failed to create L2CAP debug file");
4049 destroy_workqueue(_busy_wq
);
4050 l2cap_cleanup_sockets();
4054 void l2cap_exit(void)
4056 debugfs_remove(l2cap_debugfs
);
4058 flush_workqueue(_busy_wq
);
4059 destroy_workqueue(_busy_wq
);
4061 if (hci_unregister_proto(&l2cap_hci_proto
) < 0)
4062 BT_ERR("L2CAP protocol unregistration failed");
4064 l2cap_cleanup_sockets();
4067 module_param(disable_ertm
, bool, 0644);
4068 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");