2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core and sockets. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
58 #define VERSION "2.15"
60 static int disable_ertm
= 0;
62 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
63 static u8 l2cap_fixed_chan
[8] = { 0x02, };
65 static const struct proto_ops l2cap_sock_ops
;
67 static struct workqueue_struct
*_busy_wq
;
69 static struct bt_sock_list l2cap_sk_list
= {
70 .lock
= __RW_LOCK_UNLOCKED(l2cap_sk_list
.lock
)
73 static void l2cap_busy_work(struct work_struct
*work
);
75 static void __l2cap_sock_close(struct sock
*sk
, int reason
);
76 static void l2cap_sock_close(struct sock
*sk
);
77 static void l2cap_sock_kill(struct sock
*sk
);
79 static int l2cap_build_conf_req(struct sock
*sk
, void *data
);
80 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
81 u8 code
, u8 ident
, u16 dlen
, void *data
);
83 static int l2cap_ertm_data_rcv(struct sock
*sk
, struct sk_buff
*skb
);
85 /* ---- L2CAP timers ---- */
86 static void l2cap_sock_timeout(unsigned long arg
)
88 struct sock
*sk
= (struct sock
*) arg
;
91 BT_DBG("sock %p state %d", sk
, sk
->sk_state
);
95 if (sk
->sk_state
== BT_CONNECTED
|| sk
->sk_state
== BT_CONFIG
)
96 reason
= ECONNREFUSED
;
97 else if (sk
->sk_state
== BT_CONNECT
&&
98 l2cap_pi(sk
)->sec_level
!= BT_SECURITY_SDP
)
99 reason
= ECONNREFUSED
;
103 __l2cap_sock_close(sk
, reason
);
111 static void l2cap_sock_set_timer(struct sock
*sk
, long timeout
)
113 BT_DBG("sk %p state %d timeout %ld", sk
, sk
->sk_state
, timeout
);
114 sk_reset_timer(sk
, &sk
->sk_timer
, jiffies
+ timeout
);
117 static void l2cap_sock_clear_timer(struct sock
*sk
)
119 BT_DBG("sock %p state %d", sk
, sk
->sk_state
);
120 sk_stop_timer(sk
, &sk
->sk_timer
);
123 /* ---- L2CAP channels ---- */
124 static struct sock
*__l2cap_get_chan_by_dcid(struct l2cap_chan_list
*l
, u16 cid
)
127 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
128 if (l2cap_pi(s
)->dcid
== cid
)
134 static struct sock
*__l2cap_get_chan_by_scid(struct l2cap_chan_list
*l
, u16 cid
)
137 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
138 if (l2cap_pi(s
)->scid
== cid
)
144 /* Find channel with given SCID.
145 * Returns locked socket */
146 static inline struct sock
*l2cap_get_chan_by_scid(struct l2cap_chan_list
*l
, u16 cid
)
150 s
= __l2cap_get_chan_by_scid(l
, cid
);
153 read_unlock(&l
->lock
);
157 static struct sock
*__l2cap_get_chan_by_ident(struct l2cap_chan_list
*l
, u8 ident
)
160 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
161 if (l2cap_pi(s
)->ident
== ident
)
167 static inline struct sock
*l2cap_get_chan_by_ident(struct l2cap_chan_list
*l
, u8 ident
)
171 s
= __l2cap_get_chan_by_ident(l
, ident
);
174 read_unlock(&l
->lock
);
178 static u16
l2cap_alloc_cid(struct l2cap_chan_list
*l
)
180 u16 cid
= L2CAP_CID_DYN_START
;
182 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
183 if (!__l2cap_get_chan_by_scid(l
, cid
))
190 static inline void __l2cap_chan_link(struct l2cap_chan_list
*l
, struct sock
*sk
)
195 l2cap_pi(l
->head
)->prev_c
= sk
;
197 l2cap_pi(sk
)->next_c
= l
->head
;
198 l2cap_pi(sk
)->prev_c
= NULL
;
202 static inline void l2cap_chan_unlink(struct l2cap_chan_list
*l
, struct sock
*sk
)
204 struct sock
*next
= l2cap_pi(sk
)->next_c
, *prev
= l2cap_pi(sk
)->prev_c
;
206 write_lock_bh(&l
->lock
);
211 l2cap_pi(next
)->prev_c
= prev
;
213 l2cap_pi(prev
)->next_c
= next
;
214 write_unlock_bh(&l
->lock
);
219 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct sock
*sk
, struct sock
*parent
)
221 struct l2cap_chan_list
*l
= &conn
->chan_list
;
223 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
224 l2cap_pi(sk
)->psm
, l2cap_pi(sk
)->dcid
);
226 conn
->disc_reason
= 0x13;
228 l2cap_pi(sk
)->conn
= conn
;
230 if (sk
->sk_type
== SOCK_SEQPACKET
|| sk
->sk_type
== SOCK_STREAM
) {
231 /* Alloc CID for connection-oriented socket */
232 l2cap_pi(sk
)->scid
= l2cap_alloc_cid(l
);
233 } else if (sk
->sk_type
== SOCK_DGRAM
) {
234 /* Connectionless socket */
235 l2cap_pi(sk
)->scid
= L2CAP_CID_CONN_LESS
;
236 l2cap_pi(sk
)->dcid
= L2CAP_CID_CONN_LESS
;
237 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
239 /* Raw socket can send/recv signalling messages only */
240 l2cap_pi(sk
)->scid
= L2CAP_CID_SIGNALING
;
241 l2cap_pi(sk
)->dcid
= L2CAP_CID_SIGNALING
;
242 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
245 __l2cap_chan_link(l
, sk
);
248 bt_accept_enqueue(parent
, sk
);
252 * Must be called on the locked socket. */
253 static void l2cap_chan_del(struct sock
*sk
, int err
)
255 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
256 struct sock
*parent
= bt_sk(sk
)->parent
;
258 l2cap_sock_clear_timer(sk
);
260 BT_DBG("sk %p, conn %p, err %d", sk
, conn
, err
);
263 /* Unlink from channel list */
264 l2cap_chan_unlink(&conn
->chan_list
, sk
);
265 l2cap_pi(sk
)->conn
= NULL
;
266 hci_conn_put(conn
->hcon
);
269 sk
->sk_state
= BT_CLOSED
;
270 sock_set_flag(sk
, SOCK_ZAPPED
);
276 bt_accept_unlink(sk
);
277 parent
->sk_data_ready(parent
, 0);
279 sk
->sk_state_change(sk
);
281 skb_queue_purge(TX_QUEUE(sk
));
283 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
) {
284 struct srej_list
*l
, *tmp
;
286 del_timer(&l2cap_pi(sk
)->retrans_timer
);
287 del_timer(&l2cap_pi(sk
)->monitor_timer
);
288 del_timer(&l2cap_pi(sk
)->ack_timer
);
290 skb_queue_purge(SREJ_QUEUE(sk
));
291 skb_queue_purge(BUSY_QUEUE(sk
));
293 list_for_each_entry_safe(l
, tmp
, SREJ_LIST(sk
), list
) {
300 /* Service level security */
301 static inline int l2cap_check_security(struct sock
*sk
)
303 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
306 if (l2cap_pi(sk
)->psm
== cpu_to_le16(0x0001)) {
307 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
308 auth_type
= HCI_AT_NO_BONDING_MITM
;
310 auth_type
= HCI_AT_NO_BONDING
;
312 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_LOW
)
313 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
315 switch (l2cap_pi(sk
)->sec_level
) {
316 case BT_SECURITY_HIGH
:
317 auth_type
= HCI_AT_GENERAL_BONDING_MITM
;
319 case BT_SECURITY_MEDIUM
:
320 auth_type
= HCI_AT_GENERAL_BONDING
;
323 auth_type
= HCI_AT_NO_BONDING
;
328 return hci_conn_security(conn
->hcon
, l2cap_pi(sk
)->sec_level
,
332 static inline u8
l2cap_get_ident(struct l2cap_conn
*conn
)
336 /* Get next available identificator.
337 * 1 - 128 are used by kernel.
338 * 129 - 199 are reserved.
339 * 200 - 254 are used by utilities like l2ping, etc.
342 spin_lock_bh(&conn
->lock
);
344 if (++conn
->tx_ident
> 128)
349 spin_unlock_bh(&conn
->lock
);
354 static inline void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
356 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
358 BT_DBG("code 0x%2.2x", code
);
363 hci_send_acl(conn
->hcon
, skb
, 0);
366 static inline void l2cap_send_sframe(struct l2cap_pinfo
*pi
, u16 control
)
369 struct l2cap_hdr
*lh
;
370 struct l2cap_conn
*conn
= pi
->conn
;
371 struct sock
*sk
= (struct sock
*)pi
;
372 int count
, hlen
= L2CAP_HDR_SIZE
+ 2;
374 if (sk
->sk_state
!= BT_CONNECTED
)
377 if (pi
->fcs
== L2CAP_FCS_CRC16
)
380 BT_DBG("pi %p, control 0x%2.2x", pi
, control
);
382 count
= min_t(unsigned int, conn
->mtu
, hlen
);
383 control
|= L2CAP_CTRL_FRAME_TYPE
;
385 if (pi
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
386 control
|= L2CAP_CTRL_FINAL
;
387 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
390 if (pi
->conn_state
& L2CAP_CONN_SEND_PBIT
) {
391 control
|= L2CAP_CTRL_POLL
;
392 pi
->conn_state
&= ~L2CAP_CONN_SEND_PBIT
;
395 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
399 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
400 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
401 lh
->cid
= cpu_to_le16(pi
->dcid
);
402 put_unaligned_le16(control
, skb_put(skb
, 2));
404 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
405 u16 fcs
= crc16(0, (u8
*)lh
, count
- 2);
406 put_unaligned_le16(fcs
, skb_put(skb
, 2));
409 hci_send_acl(pi
->conn
->hcon
, skb
, 0);
412 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo
*pi
, u16 control
)
414 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
415 control
|= L2CAP_SUPER_RCV_NOT_READY
;
416 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
418 control
|= L2CAP_SUPER_RCV_READY
;
420 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
422 l2cap_send_sframe(pi
, control
);
425 static inline int __l2cap_no_conn_pending(struct sock
*sk
)
427 return !(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_CONNECT_PEND
);
430 static void l2cap_do_start(struct sock
*sk
)
432 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
434 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
435 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
438 if (l2cap_check_security(sk
) && __l2cap_no_conn_pending(sk
)) {
439 struct l2cap_conn_req req
;
440 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
441 req
.psm
= l2cap_pi(sk
)->psm
;
443 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
444 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
446 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
447 L2CAP_CONN_REQ
, sizeof(req
), &req
);
450 struct l2cap_info_req req
;
451 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
453 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
454 conn
->info_ident
= l2cap_get_ident(conn
);
456 mod_timer(&conn
->info_timer
, jiffies
+
457 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
459 l2cap_send_cmd(conn
, conn
->info_ident
,
460 L2CAP_INFO_REQ
, sizeof(req
), &req
);
464 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
466 u32 local_feat_mask
= l2cap_feat_mask
;
468 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
471 case L2CAP_MODE_ERTM
:
472 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
473 case L2CAP_MODE_STREAMING
:
474 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
480 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct sock
*sk
, int err
)
482 struct l2cap_disconn_req req
;
487 skb_queue_purge(TX_QUEUE(sk
));
489 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
) {
490 del_timer(&l2cap_pi(sk
)->retrans_timer
);
491 del_timer(&l2cap_pi(sk
)->monitor_timer
);
492 del_timer(&l2cap_pi(sk
)->ack_timer
);
495 req
.dcid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
496 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
497 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
498 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
500 sk
->sk_state
= BT_DISCONN
;
504 /* ---- L2CAP connections ---- */
505 static void l2cap_conn_start(struct l2cap_conn
*conn
)
507 struct l2cap_chan_list
*l
= &conn
->chan_list
;
508 struct sock_del_list del
, *tmp1
, *tmp2
;
511 BT_DBG("conn %p", conn
);
513 INIT_LIST_HEAD(&del
.list
);
517 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
520 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
521 sk
->sk_type
!= SOCK_STREAM
) {
526 if (sk
->sk_state
== BT_CONNECT
) {
527 struct l2cap_conn_req req
;
529 if (!l2cap_check_security(sk
) ||
530 !__l2cap_no_conn_pending(sk
)) {
535 if (!l2cap_mode_supported(l2cap_pi(sk
)->mode
,
537 && l2cap_pi(sk
)->conf_state
&
538 L2CAP_CONF_STATE2_DEVICE
) {
539 tmp1
= kzalloc(sizeof(struct sock_del_list
),
542 list_add_tail(&tmp1
->list
, &del
.list
);
547 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
548 req
.psm
= l2cap_pi(sk
)->psm
;
550 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
551 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
553 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
554 L2CAP_CONN_REQ
, sizeof(req
), &req
);
556 } else if (sk
->sk_state
== BT_CONNECT2
) {
557 struct l2cap_conn_rsp rsp
;
559 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
560 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
562 if (l2cap_check_security(sk
)) {
563 if (bt_sk(sk
)->defer_setup
) {
564 struct sock
*parent
= bt_sk(sk
)->parent
;
565 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
566 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
567 parent
->sk_data_ready(parent
, 0);
570 sk
->sk_state
= BT_CONFIG
;
571 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
572 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
575 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
576 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
579 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
580 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
582 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
||
583 rsp
.result
!= L2CAP_CR_SUCCESS
) {
588 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
589 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
590 l2cap_build_conf_req(sk
, buf
), buf
);
591 l2cap_pi(sk
)->num_conf_req
++;
597 read_unlock(&l
->lock
);
599 list_for_each_entry_safe(tmp1
, tmp2
, &del
.list
, list
) {
600 bh_lock_sock(tmp1
->sk
);
601 __l2cap_sock_close(tmp1
->sk
, ECONNRESET
);
602 bh_unlock_sock(tmp1
->sk
);
603 list_del(&tmp1
->list
);
608 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
610 struct l2cap_chan_list
*l
= &conn
->chan_list
;
613 BT_DBG("conn %p", conn
);
617 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
620 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
621 sk
->sk_type
!= SOCK_STREAM
) {
622 l2cap_sock_clear_timer(sk
);
623 sk
->sk_state
= BT_CONNECTED
;
624 sk
->sk_state_change(sk
);
625 } else if (sk
->sk_state
== BT_CONNECT
)
631 read_unlock(&l
->lock
);
634 /* Notify sockets that we cannot guaranty reliability anymore */
635 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
637 struct l2cap_chan_list
*l
= &conn
->chan_list
;
640 BT_DBG("conn %p", conn
);
644 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
645 if (l2cap_pi(sk
)->force_reliable
)
649 read_unlock(&l
->lock
);
652 static void l2cap_info_timeout(unsigned long arg
)
654 struct l2cap_conn
*conn
= (void *) arg
;
656 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
657 conn
->info_ident
= 0;
659 l2cap_conn_start(conn
);
662 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
664 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
669 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
673 hcon
->l2cap_data
= conn
;
676 BT_DBG("hcon %p conn %p", hcon
, conn
);
678 conn
->mtu
= hcon
->hdev
->acl_mtu
;
679 conn
->src
= &hcon
->hdev
->bdaddr
;
680 conn
->dst
= &hcon
->dst
;
684 spin_lock_init(&conn
->lock
);
685 rwlock_init(&conn
->chan_list
.lock
);
687 setup_timer(&conn
->info_timer
, l2cap_info_timeout
,
688 (unsigned long) conn
);
690 conn
->disc_reason
= 0x13;
695 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
697 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
703 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
705 kfree_skb(conn
->rx_skb
);
708 while ((sk
= conn
->chan_list
.head
)) {
710 l2cap_chan_del(sk
, err
);
715 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
716 del_timer_sync(&conn
->info_timer
);
718 hcon
->l2cap_data
= NULL
;
722 static inline void l2cap_chan_add(struct l2cap_conn
*conn
, struct sock
*sk
, struct sock
*parent
)
724 struct l2cap_chan_list
*l
= &conn
->chan_list
;
725 write_lock_bh(&l
->lock
);
726 __l2cap_chan_add(conn
, sk
, parent
);
727 write_unlock_bh(&l
->lock
);
730 /* ---- Socket interface ---- */
731 static struct sock
*__l2cap_get_sock_by_addr(__le16 psm
, bdaddr_t
*src
)
734 struct hlist_node
*node
;
735 sk_for_each(sk
, node
, &l2cap_sk_list
.head
)
736 if (l2cap_pi(sk
)->sport
== psm
&& !bacmp(&bt_sk(sk
)->src
, src
))
743 /* Find socket with psm and source bdaddr.
744 * Returns closest match.
746 static struct sock
*__l2cap_get_sock_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
748 struct sock
*sk
= NULL
, *sk1
= NULL
;
749 struct hlist_node
*node
;
751 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
752 if (state
&& sk
->sk_state
!= state
)
755 if (l2cap_pi(sk
)->psm
== psm
) {
757 if (!bacmp(&bt_sk(sk
)->src
, src
))
761 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
765 return node
? sk
: sk1
;
768 /* Find socket with given address (psm, src).
769 * Returns locked socket */
770 static inline struct sock
*l2cap_get_sock_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
773 read_lock(&l2cap_sk_list
.lock
);
774 s
= __l2cap_get_sock_by_psm(state
, psm
, src
);
777 read_unlock(&l2cap_sk_list
.lock
);
781 static void l2cap_sock_destruct(struct sock
*sk
)
785 skb_queue_purge(&sk
->sk_receive_queue
);
786 skb_queue_purge(&sk
->sk_write_queue
);
789 static void l2cap_sock_cleanup_listen(struct sock
*parent
)
793 BT_DBG("parent %p", parent
);
795 /* Close not yet accepted channels */
796 while ((sk
= bt_accept_dequeue(parent
, NULL
)))
797 l2cap_sock_close(sk
);
799 parent
->sk_state
= BT_CLOSED
;
800 sock_set_flag(parent
, SOCK_ZAPPED
);
803 /* Kill socket (only if zapped and orphan)
804 * Must be called on unlocked socket.
806 static void l2cap_sock_kill(struct sock
*sk
)
808 if (!sock_flag(sk
, SOCK_ZAPPED
) || sk
->sk_socket
)
811 BT_DBG("sk %p state %d", sk
, sk
->sk_state
);
813 /* Kill poor orphan */
814 bt_sock_unlink(&l2cap_sk_list
, sk
);
815 sock_set_flag(sk
, SOCK_DEAD
);
819 static void __l2cap_sock_close(struct sock
*sk
, int reason
)
821 BT_DBG("sk %p state %d socket %p", sk
, sk
->sk_state
, sk
->sk_socket
);
823 switch (sk
->sk_state
) {
825 l2cap_sock_cleanup_listen(sk
);
830 if (sk
->sk_type
== SOCK_SEQPACKET
||
831 sk
->sk_type
== SOCK_STREAM
) {
832 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
834 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
835 l2cap_send_disconn_req(conn
, sk
, reason
);
837 l2cap_chan_del(sk
, reason
);
841 if (sk
->sk_type
== SOCK_SEQPACKET
||
842 sk
->sk_type
== SOCK_STREAM
) {
843 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
844 struct l2cap_conn_rsp rsp
;
847 if (bt_sk(sk
)->defer_setup
)
848 result
= L2CAP_CR_SEC_BLOCK
;
850 result
= L2CAP_CR_BAD_PSM
;
852 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
853 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
854 rsp
.result
= cpu_to_le16(result
);
855 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
856 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
857 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
859 l2cap_chan_del(sk
, reason
);
864 l2cap_chan_del(sk
, reason
);
868 sock_set_flag(sk
, SOCK_ZAPPED
);
873 /* Must be called on unlocked socket. */
874 static void l2cap_sock_close(struct sock
*sk
)
876 l2cap_sock_clear_timer(sk
);
878 __l2cap_sock_close(sk
, ECONNRESET
);
883 static void l2cap_sock_init(struct sock
*sk
, struct sock
*parent
)
885 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
890 sk
->sk_type
= parent
->sk_type
;
891 bt_sk(sk
)->defer_setup
= bt_sk(parent
)->defer_setup
;
893 pi
->imtu
= l2cap_pi(parent
)->imtu
;
894 pi
->omtu
= l2cap_pi(parent
)->omtu
;
895 pi
->conf_state
= l2cap_pi(parent
)->conf_state
;
896 pi
->mode
= l2cap_pi(parent
)->mode
;
897 pi
->fcs
= l2cap_pi(parent
)->fcs
;
898 pi
->max_tx
= l2cap_pi(parent
)->max_tx
;
899 pi
->tx_win
= l2cap_pi(parent
)->tx_win
;
900 pi
->sec_level
= l2cap_pi(parent
)->sec_level
;
901 pi
->role_switch
= l2cap_pi(parent
)->role_switch
;
902 pi
->force_reliable
= l2cap_pi(parent
)->force_reliable
;
904 pi
->imtu
= L2CAP_DEFAULT_MTU
;
906 if (!disable_ertm
&& sk
->sk_type
== SOCK_STREAM
) {
907 pi
->mode
= L2CAP_MODE_ERTM
;
908 pi
->conf_state
|= L2CAP_CONF_STATE2_DEVICE
;
910 pi
->mode
= L2CAP_MODE_BASIC
;
912 pi
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
913 pi
->fcs
= L2CAP_FCS_CRC16
;
914 pi
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
915 pi
->sec_level
= BT_SECURITY_LOW
;
917 pi
->force_reliable
= 0;
920 /* Default config options */
922 pi
->flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
923 skb_queue_head_init(TX_QUEUE(sk
));
924 skb_queue_head_init(SREJ_QUEUE(sk
));
925 skb_queue_head_init(BUSY_QUEUE(sk
));
926 INIT_LIST_HEAD(SREJ_LIST(sk
));
929 static struct proto l2cap_proto
= {
931 .owner
= THIS_MODULE
,
932 .obj_size
= sizeof(struct l2cap_pinfo
)
935 static struct sock
*l2cap_sock_alloc(struct net
*net
, struct socket
*sock
, int proto
, gfp_t prio
)
939 sk
= sk_alloc(net
, PF_BLUETOOTH
, prio
, &l2cap_proto
);
943 sock_init_data(sock
, sk
);
944 INIT_LIST_HEAD(&bt_sk(sk
)->accept_q
);
946 sk
->sk_destruct
= l2cap_sock_destruct
;
947 sk
->sk_sndtimeo
= msecs_to_jiffies(L2CAP_CONN_TIMEOUT
);
949 sock_reset_flag(sk
, SOCK_ZAPPED
);
951 sk
->sk_protocol
= proto
;
952 sk
->sk_state
= BT_OPEN
;
954 setup_timer(&sk
->sk_timer
, l2cap_sock_timeout
, (unsigned long) sk
);
956 bt_sock_link(&l2cap_sk_list
, sk
);
960 static int l2cap_sock_create(struct net
*net
, struct socket
*sock
, int protocol
,
965 BT_DBG("sock %p", sock
);
967 sock
->state
= SS_UNCONNECTED
;
969 if (sock
->type
!= SOCK_SEQPACKET
&& sock
->type
!= SOCK_STREAM
&&
970 sock
->type
!= SOCK_DGRAM
&& sock
->type
!= SOCK_RAW
)
971 return -ESOCKTNOSUPPORT
;
973 if (sock
->type
== SOCK_RAW
&& !kern
&& !capable(CAP_NET_RAW
))
976 sock
->ops
= &l2cap_sock_ops
;
978 sk
= l2cap_sock_alloc(net
, sock
, protocol
, GFP_ATOMIC
);
982 l2cap_sock_init(sk
, NULL
);
986 static int l2cap_sock_bind(struct socket
*sock
, struct sockaddr
*addr
, int alen
)
988 struct sock
*sk
= sock
->sk
;
989 struct sockaddr_l2 la
;
994 if (!addr
|| addr
->sa_family
!= AF_BLUETOOTH
)
997 memset(&la
, 0, sizeof(la
));
998 len
= min_t(unsigned int, sizeof(la
), alen
);
999 memcpy(&la
, addr
, len
);
1006 if (sk
->sk_state
!= BT_OPEN
) {
1011 if (la
.l2_psm
&& __le16_to_cpu(la
.l2_psm
) < 0x1001 &&
1012 !capable(CAP_NET_BIND_SERVICE
)) {
1017 write_lock_bh(&l2cap_sk_list
.lock
);
1019 if (la
.l2_psm
&& __l2cap_get_sock_by_addr(la
.l2_psm
, &la
.l2_bdaddr
)) {
1022 /* Save source address */
1023 bacpy(&bt_sk(sk
)->src
, &la
.l2_bdaddr
);
1024 l2cap_pi(sk
)->psm
= la
.l2_psm
;
1025 l2cap_pi(sk
)->sport
= la
.l2_psm
;
1026 sk
->sk_state
= BT_BOUND
;
1028 if (__le16_to_cpu(la
.l2_psm
) == 0x0001 ||
1029 __le16_to_cpu(la
.l2_psm
) == 0x0003)
1030 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
1033 write_unlock_bh(&l2cap_sk_list
.lock
);
1040 static int l2cap_do_connect(struct sock
*sk
)
1042 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1043 bdaddr_t
*dst
= &bt_sk(sk
)->dst
;
1044 struct l2cap_conn
*conn
;
1045 struct hci_conn
*hcon
;
1046 struct hci_dev
*hdev
;
1050 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src
), batostr(dst
),
1053 hdev
= hci_get_route(dst
, src
);
1055 return -EHOSTUNREACH
;
1057 hci_dev_lock_bh(hdev
);
1061 if (sk
->sk_type
== SOCK_RAW
) {
1062 switch (l2cap_pi(sk
)->sec_level
) {
1063 case BT_SECURITY_HIGH
:
1064 auth_type
= HCI_AT_DEDICATED_BONDING_MITM
;
1066 case BT_SECURITY_MEDIUM
:
1067 auth_type
= HCI_AT_DEDICATED_BONDING
;
1070 auth_type
= HCI_AT_NO_BONDING
;
1073 } else if (l2cap_pi(sk
)->psm
== cpu_to_le16(0x0001)) {
1074 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
1075 auth_type
= HCI_AT_NO_BONDING_MITM
;
1077 auth_type
= HCI_AT_NO_BONDING
;
1079 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_LOW
)
1080 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
1082 switch (l2cap_pi(sk
)->sec_level
) {
1083 case BT_SECURITY_HIGH
:
1084 auth_type
= HCI_AT_GENERAL_BONDING_MITM
;
1086 case BT_SECURITY_MEDIUM
:
1087 auth_type
= HCI_AT_GENERAL_BONDING
;
1090 auth_type
= HCI_AT_NO_BONDING
;
1095 hcon
= hci_connect(hdev
, ACL_LINK
, dst
,
1096 l2cap_pi(sk
)->sec_level
, auth_type
);
1100 conn
= l2cap_conn_add(hcon
, 0);
1108 /* Update source addr of the socket */
1109 bacpy(src
, conn
->src
);
1111 l2cap_chan_add(conn
, sk
, NULL
);
1113 sk
->sk_state
= BT_CONNECT
;
1114 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
1116 if (hcon
->state
== BT_CONNECTED
) {
1117 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
1118 sk
->sk_type
!= SOCK_STREAM
) {
1119 l2cap_sock_clear_timer(sk
);
1120 sk
->sk_state
= BT_CONNECTED
;
1126 hci_dev_unlock_bh(hdev
);
1131 static int l2cap_sock_connect(struct socket
*sock
, struct sockaddr
*addr
, int alen
, int flags
)
1133 struct sock
*sk
= sock
->sk
;
1134 struct sockaddr_l2 la
;
1137 BT_DBG("sk %p", sk
);
1139 if (!addr
|| alen
< sizeof(addr
->sa_family
) ||
1140 addr
->sa_family
!= AF_BLUETOOTH
)
1143 memset(&la
, 0, sizeof(la
));
1144 len
= min_t(unsigned int, sizeof(la
), alen
);
1145 memcpy(&la
, addr
, len
);
1152 if ((sk
->sk_type
== SOCK_SEQPACKET
|| sk
->sk_type
== SOCK_STREAM
)
1158 switch (l2cap_pi(sk
)->mode
) {
1159 case L2CAP_MODE_BASIC
:
1161 case L2CAP_MODE_ERTM
:
1162 case L2CAP_MODE_STREAMING
:
1171 switch (sk
->sk_state
) {
1175 /* Already connecting */
1179 /* Already connected */
1193 /* Set destination address and psm */
1194 bacpy(&bt_sk(sk
)->dst
, &la
.l2_bdaddr
);
1195 l2cap_pi(sk
)->psm
= la
.l2_psm
;
1197 err
= l2cap_do_connect(sk
);
1202 err
= bt_sock_wait_state(sk
, BT_CONNECTED
,
1203 sock_sndtimeo(sk
, flags
& O_NONBLOCK
));
1209 static int l2cap_sock_listen(struct socket
*sock
, int backlog
)
1211 struct sock
*sk
= sock
->sk
;
1214 BT_DBG("sk %p backlog %d", sk
, backlog
);
1218 if ((sock
->type
!= SOCK_SEQPACKET
&& sock
->type
!= SOCK_STREAM
)
1219 || sk
->sk_state
!= BT_BOUND
) {
1224 switch (l2cap_pi(sk
)->mode
) {
1225 case L2CAP_MODE_BASIC
:
1227 case L2CAP_MODE_ERTM
:
1228 case L2CAP_MODE_STREAMING
:
1237 if (!l2cap_pi(sk
)->psm
) {
1238 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1243 write_lock_bh(&l2cap_sk_list
.lock
);
1245 for (psm
= 0x1001; psm
< 0x1100; psm
+= 2)
1246 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm
), src
)) {
1247 l2cap_pi(sk
)->psm
= cpu_to_le16(psm
);
1248 l2cap_pi(sk
)->sport
= cpu_to_le16(psm
);
1253 write_unlock_bh(&l2cap_sk_list
.lock
);
1259 sk
->sk_max_ack_backlog
= backlog
;
1260 sk
->sk_ack_backlog
= 0;
1261 sk
->sk_state
= BT_LISTEN
;
1268 static int l2cap_sock_accept(struct socket
*sock
, struct socket
*newsock
, int flags
)
1270 DECLARE_WAITQUEUE(wait
, current
);
1271 struct sock
*sk
= sock
->sk
, *nsk
;
1275 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
1277 if (sk
->sk_state
!= BT_LISTEN
) {
1282 timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
1284 BT_DBG("sk %p timeo %ld", sk
, timeo
);
1286 /* Wait for an incoming connection. (wake-one). */
1287 add_wait_queue_exclusive(sk_sleep(sk
), &wait
);
1288 while (!(nsk
= bt_accept_dequeue(sk
, newsock
))) {
1289 set_current_state(TASK_INTERRUPTIBLE
);
1296 timeo
= schedule_timeout(timeo
);
1297 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
1299 if (sk
->sk_state
!= BT_LISTEN
) {
1304 if (signal_pending(current
)) {
1305 err
= sock_intr_errno(timeo
);
1309 set_current_state(TASK_RUNNING
);
1310 remove_wait_queue(sk_sleep(sk
), &wait
);
1315 newsock
->state
= SS_CONNECTED
;
1317 BT_DBG("new socket %p", nsk
);
1324 static int l2cap_sock_getname(struct socket
*sock
, struct sockaddr
*addr
, int *len
, int peer
)
1326 struct sockaddr_l2
*la
= (struct sockaddr_l2
*) addr
;
1327 struct sock
*sk
= sock
->sk
;
1329 BT_DBG("sock %p, sk %p", sock
, sk
);
1331 addr
->sa_family
= AF_BLUETOOTH
;
1332 *len
= sizeof(struct sockaddr_l2
);
1335 la
->l2_psm
= l2cap_pi(sk
)->psm
;
1336 bacpy(&la
->l2_bdaddr
, &bt_sk(sk
)->dst
);
1337 la
->l2_cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1339 la
->l2_psm
= l2cap_pi(sk
)->sport
;
1340 bacpy(&la
->l2_bdaddr
, &bt_sk(sk
)->src
);
1341 la
->l2_cid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
1347 static int __l2cap_wait_ack(struct sock
*sk
)
1349 DECLARE_WAITQUEUE(wait
, current
);
1353 add_wait_queue(sk_sleep(sk
), &wait
);
1354 while ((l2cap_pi(sk
)->unacked_frames
> 0 && l2cap_pi(sk
)->conn
)) {
1355 set_current_state(TASK_INTERRUPTIBLE
);
1360 if (signal_pending(current
)) {
1361 err
= sock_intr_errno(timeo
);
1366 timeo
= schedule_timeout(timeo
);
1369 err
= sock_error(sk
);
1373 set_current_state(TASK_RUNNING
);
1374 remove_wait_queue(sk_sleep(sk
), &wait
);
1378 static void l2cap_monitor_timeout(unsigned long arg
)
1380 struct sock
*sk
= (void *) arg
;
1382 BT_DBG("sk %p", sk
);
1385 if (l2cap_pi(sk
)->retry_count
>= l2cap_pi(sk
)->remote_max_tx
) {
1386 l2cap_send_disconn_req(l2cap_pi(sk
)->conn
, sk
, ECONNABORTED
);
1391 l2cap_pi(sk
)->retry_count
++;
1392 __mod_monitor_timer();
1394 l2cap_send_rr_or_rnr(l2cap_pi(sk
), L2CAP_CTRL_POLL
);
1398 static void l2cap_retrans_timeout(unsigned long arg
)
1400 struct sock
*sk
= (void *) arg
;
1402 BT_DBG("sk %p", sk
);
1405 l2cap_pi(sk
)->retry_count
= 1;
1406 __mod_monitor_timer();
1408 l2cap_pi(sk
)->conn_state
|= L2CAP_CONN_WAIT_F
;
1410 l2cap_send_rr_or_rnr(l2cap_pi(sk
), L2CAP_CTRL_POLL
);
1414 static void l2cap_drop_acked_frames(struct sock
*sk
)
1416 struct sk_buff
*skb
;
1418 while ((skb
= skb_peek(TX_QUEUE(sk
))) &&
1419 l2cap_pi(sk
)->unacked_frames
) {
1420 if (bt_cb(skb
)->tx_seq
== l2cap_pi(sk
)->expected_ack_seq
)
1423 skb
= skb_dequeue(TX_QUEUE(sk
));
1426 l2cap_pi(sk
)->unacked_frames
--;
1429 if (!l2cap_pi(sk
)->unacked_frames
)
1430 del_timer(&l2cap_pi(sk
)->retrans_timer
);
1433 static inline void l2cap_do_send(struct sock
*sk
, struct sk_buff
*skb
)
1435 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1437 BT_DBG("sk %p, skb %p len %d", sk
, skb
, skb
->len
);
1439 hci_send_acl(pi
->conn
->hcon
, skb
, 0);
1442 static void l2cap_streaming_send(struct sock
*sk
)
1444 struct sk_buff
*skb
;
1445 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1448 while ((skb
= skb_dequeue(TX_QUEUE(sk
)))) {
1449 control
= get_unaligned_le16(skb
->data
+ L2CAP_HDR_SIZE
);
1450 control
|= pi
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
1451 put_unaligned_le16(control
, skb
->data
+ L2CAP_HDR_SIZE
);
1453 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1454 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
- 2);
1455 put_unaligned_le16(fcs
, skb
->data
+ skb
->len
- 2);
1458 l2cap_do_send(sk
, skb
);
1460 pi
->next_tx_seq
= (pi
->next_tx_seq
+ 1) % 64;
1464 static void l2cap_retransmit_one_frame(struct sock
*sk
, u8 tx_seq
)
1466 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1467 struct sk_buff
*skb
, *tx_skb
;
1470 skb
= skb_peek(TX_QUEUE(sk
));
1475 if (bt_cb(skb
)->tx_seq
== tx_seq
)
1478 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1481 } while ((skb
= skb_queue_next(TX_QUEUE(sk
), skb
)));
1483 if (pi
->remote_max_tx
&&
1484 bt_cb(skb
)->retries
== pi
->remote_max_tx
) {
1485 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNABORTED
);
1489 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1490 bt_cb(skb
)->retries
++;
1491 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1493 if (pi
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
1494 control
|= L2CAP_CTRL_FINAL
;
1495 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
1498 control
|= (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1499 | (tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1501 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1503 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1504 fcs
= crc16(0, (u8
*)tx_skb
->data
, tx_skb
->len
- 2);
1505 put_unaligned_le16(fcs
, tx_skb
->data
+ tx_skb
->len
- 2);
1508 l2cap_do_send(sk
, tx_skb
);
1511 static int l2cap_ertm_send(struct sock
*sk
)
1513 struct sk_buff
*skb
, *tx_skb
;
1514 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1518 if (sk
->sk_state
!= BT_CONNECTED
)
1521 while ((skb
= sk
->sk_send_head
) && (!l2cap_tx_window_full(sk
))) {
1523 if (pi
->remote_max_tx
&&
1524 bt_cb(skb
)->retries
== pi
->remote_max_tx
) {
1525 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNABORTED
);
1529 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1531 bt_cb(skb
)->retries
++;
1533 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1534 control
&= L2CAP_CTRL_SAR
;
1536 if (pi
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
1537 control
|= L2CAP_CTRL_FINAL
;
1538 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
1540 control
|= (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1541 | (pi
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1542 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1545 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1546 fcs
= crc16(0, (u8
*)skb
->data
, tx_skb
->len
- 2);
1547 put_unaligned_le16(fcs
, skb
->data
+ tx_skb
->len
- 2);
1550 l2cap_do_send(sk
, tx_skb
);
1552 __mod_retrans_timer();
1554 bt_cb(skb
)->tx_seq
= pi
->next_tx_seq
;
1555 pi
->next_tx_seq
= (pi
->next_tx_seq
+ 1) % 64;
1557 pi
->unacked_frames
++;
1560 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1561 sk
->sk_send_head
= NULL
;
1563 sk
->sk_send_head
= skb_queue_next(TX_QUEUE(sk
), skb
);
1571 static int l2cap_retransmit_frames(struct sock
*sk
)
1573 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1576 if (!skb_queue_empty(TX_QUEUE(sk
)))
1577 sk
->sk_send_head
= TX_QUEUE(sk
)->next
;
1579 pi
->next_tx_seq
= pi
->expected_ack_seq
;
1580 ret
= l2cap_ertm_send(sk
);
1584 static void l2cap_send_ack(struct l2cap_pinfo
*pi
)
1586 struct sock
*sk
= (struct sock
*)pi
;
1589 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1591 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
1592 control
|= L2CAP_SUPER_RCV_NOT_READY
;
1593 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
1594 l2cap_send_sframe(pi
, control
);
1598 if (l2cap_ertm_send(sk
) > 0)
1601 control
|= L2CAP_SUPER_RCV_READY
;
1602 l2cap_send_sframe(pi
, control
);
1605 static void l2cap_send_srejtail(struct sock
*sk
)
1607 struct srej_list
*tail
;
1610 control
= L2CAP_SUPER_SELECT_REJECT
;
1611 control
|= L2CAP_CTRL_FINAL
;
1613 tail
= list_entry(SREJ_LIST(sk
)->prev
, struct srej_list
, list
);
1614 control
|= tail
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1616 l2cap_send_sframe(l2cap_pi(sk
), control
);
1619 static inline int l2cap_skbuff_fromiovec(struct sock
*sk
, struct msghdr
*msg
, int len
, int count
, struct sk_buff
*skb
)
1621 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1622 struct sk_buff
**frag
;
1625 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1631 /* Continuation fragments (no L2CAP header) */
1632 frag
= &skb_shinfo(skb
)->frag_list
;
1634 count
= min_t(unsigned int, conn
->mtu
, len
);
1636 *frag
= bt_skb_send_alloc(sk
, count
, msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1639 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1645 frag
= &(*frag
)->next
;
1651 static struct sk_buff
*l2cap_create_connless_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1653 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1654 struct sk_buff
*skb
;
1655 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1656 struct l2cap_hdr
*lh
;
1658 BT_DBG("sk %p len %d", sk
, (int)len
);
1660 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1661 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1662 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1664 return ERR_PTR(-ENOMEM
);
1666 /* Create L2CAP header */
1667 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1668 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1669 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1670 put_unaligned_le16(l2cap_pi(sk
)->psm
, skb_put(skb
, 2));
1672 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1673 if (unlikely(err
< 0)) {
1675 return ERR_PTR(err
);
1680 static struct sk_buff
*l2cap_create_basic_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1682 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1683 struct sk_buff
*skb
;
1684 int err
, count
, hlen
= L2CAP_HDR_SIZE
;
1685 struct l2cap_hdr
*lh
;
1687 BT_DBG("sk %p len %d", sk
, (int)len
);
1689 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1690 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1691 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1693 return ERR_PTR(-ENOMEM
);
1695 /* Create L2CAP header */
1696 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1697 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1698 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1700 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1701 if (unlikely(err
< 0)) {
1703 return ERR_PTR(err
);
1708 static struct sk_buff
*l2cap_create_iframe_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
, u16 control
, u16 sdulen
)
1710 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1711 struct sk_buff
*skb
;
1712 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1713 struct l2cap_hdr
*lh
;
1715 BT_DBG("sk %p len %d", sk
, (int)len
);
1718 return ERR_PTR(-ENOTCONN
);
1723 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1726 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1727 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1728 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1730 return ERR_PTR(-ENOMEM
);
1732 /* Create L2CAP header */
1733 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1734 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1735 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1736 put_unaligned_le16(control
, skb_put(skb
, 2));
1738 put_unaligned_le16(sdulen
, skb_put(skb
, 2));
1740 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1741 if (unlikely(err
< 0)) {
1743 return ERR_PTR(err
);
1746 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1747 put_unaligned_le16(0, skb_put(skb
, 2));
1749 bt_cb(skb
)->retries
= 0;
1753 static inline int l2cap_sar_segment_sdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1755 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1756 struct sk_buff
*skb
;
1757 struct sk_buff_head sar_queue
;
1761 skb_queue_head_init(&sar_queue
);
1762 control
= L2CAP_SDU_START
;
1763 skb
= l2cap_create_iframe_pdu(sk
, msg
, pi
->remote_mps
, control
, len
);
1765 return PTR_ERR(skb
);
1767 __skb_queue_tail(&sar_queue
, skb
);
1768 len
-= pi
->remote_mps
;
1769 size
+= pi
->remote_mps
;
1774 if (len
> pi
->remote_mps
) {
1775 control
= L2CAP_SDU_CONTINUE
;
1776 buflen
= pi
->remote_mps
;
1778 control
= L2CAP_SDU_END
;
1782 skb
= l2cap_create_iframe_pdu(sk
, msg
, buflen
, control
, 0);
1784 skb_queue_purge(&sar_queue
);
1785 return PTR_ERR(skb
);
1788 __skb_queue_tail(&sar_queue
, skb
);
1792 skb_queue_splice_tail(&sar_queue
, TX_QUEUE(sk
));
1793 if (sk
->sk_send_head
== NULL
)
1794 sk
->sk_send_head
= sar_queue
.next
;
1799 static int l2cap_sock_sendmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
, size_t len
)
1801 struct sock
*sk
= sock
->sk
;
1802 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1803 struct sk_buff
*skb
;
1807 BT_DBG("sock %p, sk %p", sock
, sk
);
1809 err
= sock_error(sk
);
1813 if (msg
->msg_flags
& MSG_OOB
)
1818 if (sk
->sk_state
!= BT_CONNECTED
) {
1823 /* Connectionless channel */
1824 if (sk
->sk_type
== SOCK_DGRAM
) {
1825 skb
= l2cap_create_connless_pdu(sk
, msg
, len
);
1829 l2cap_do_send(sk
, skb
);
1836 case L2CAP_MODE_BASIC
:
1837 /* Check outgoing MTU */
1838 if (len
> pi
->omtu
) {
1843 /* Create a basic PDU */
1844 skb
= l2cap_create_basic_pdu(sk
, msg
, len
);
1850 l2cap_do_send(sk
, skb
);
1854 case L2CAP_MODE_ERTM
:
1855 case L2CAP_MODE_STREAMING
:
1856 /* Entire SDU fits into one PDU */
1857 if (len
<= pi
->remote_mps
) {
1858 control
= L2CAP_SDU_UNSEGMENTED
;
1859 skb
= l2cap_create_iframe_pdu(sk
, msg
, len
, control
, 0);
1864 __skb_queue_tail(TX_QUEUE(sk
), skb
);
1866 if (sk
->sk_send_head
== NULL
)
1867 sk
->sk_send_head
= skb
;
1870 /* Segment SDU into multiples PDUs */
1871 err
= l2cap_sar_segment_sdu(sk
, msg
, len
);
1876 if (pi
->mode
== L2CAP_MODE_STREAMING
) {
1877 l2cap_streaming_send(sk
);
1879 if (pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
&&
1880 pi
->conn_state
&& L2CAP_CONN_WAIT_F
) {
1884 err
= l2cap_ertm_send(sk
);
1892 BT_DBG("bad state %1.1x", pi
->mode
);
1901 static int l2cap_sock_recvmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
, size_t len
, int flags
)
1903 struct sock
*sk
= sock
->sk
;
1907 if (sk
->sk_state
== BT_CONNECT2
&& bt_sk(sk
)->defer_setup
) {
1908 struct l2cap_conn_rsp rsp
;
1909 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1912 sk
->sk_state
= BT_CONFIG
;
1914 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1915 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
1916 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
1917 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
1918 l2cap_send_cmd(l2cap_pi(sk
)->conn
, l2cap_pi(sk
)->ident
,
1919 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
1921 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
) {
1926 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
1927 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1928 l2cap_build_conf_req(sk
, buf
), buf
);
1929 l2cap_pi(sk
)->num_conf_req
++;
1937 return bt_sock_recvmsg(iocb
, sock
, msg
, len
, flags
);
1940 static int l2cap_sock_setsockopt_old(struct socket
*sock
, int optname
, char __user
*optval
, unsigned int optlen
)
1942 struct sock
*sk
= sock
->sk
;
1943 struct l2cap_options opts
;
1947 BT_DBG("sk %p", sk
);
1953 if (sk
->sk_state
== BT_CONNECTED
) {
1958 opts
.imtu
= l2cap_pi(sk
)->imtu
;
1959 opts
.omtu
= l2cap_pi(sk
)->omtu
;
1960 opts
.flush_to
= l2cap_pi(sk
)->flush_to
;
1961 opts
.mode
= l2cap_pi(sk
)->mode
;
1962 opts
.fcs
= l2cap_pi(sk
)->fcs
;
1963 opts
.max_tx
= l2cap_pi(sk
)->max_tx
;
1964 opts
.txwin_size
= (__u16
)l2cap_pi(sk
)->tx_win
;
1966 len
= min_t(unsigned int, sizeof(opts
), optlen
);
1967 if (copy_from_user((char *) &opts
, optval
, len
)) {
1972 if (opts
.txwin_size
> L2CAP_DEFAULT_TX_WINDOW
) {
1977 l2cap_pi(sk
)->mode
= opts
.mode
;
1978 switch (l2cap_pi(sk
)->mode
) {
1979 case L2CAP_MODE_BASIC
:
1980 l2cap_pi(sk
)->conf_state
&= ~L2CAP_CONF_STATE2_DEVICE
;
1982 case L2CAP_MODE_ERTM
:
1983 case L2CAP_MODE_STREAMING
:
1992 l2cap_pi(sk
)->imtu
= opts
.imtu
;
1993 l2cap_pi(sk
)->omtu
= opts
.omtu
;
1994 l2cap_pi(sk
)->fcs
= opts
.fcs
;
1995 l2cap_pi(sk
)->max_tx
= opts
.max_tx
;
1996 l2cap_pi(sk
)->tx_win
= (__u8
)opts
.txwin_size
;
2000 if (get_user(opt
, (u32 __user
*) optval
)) {
2005 if (opt
& L2CAP_LM_AUTH
)
2006 l2cap_pi(sk
)->sec_level
= BT_SECURITY_LOW
;
2007 if (opt
& L2CAP_LM_ENCRYPT
)
2008 l2cap_pi(sk
)->sec_level
= BT_SECURITY_MEDIUM
;
2009 if (opt
& L2CAP_LM_SECURE
)
2010 l2cap_pi(sk
)->sec_level
= BT_SECURITY_HIGH
;
2012 l2cap_pi(sk
)->role_switch
= (opt
& L2CAP_LM_MASTER
);
2013 l2cap_pi(sk
)->force_reliable
= (opt
& L2CAP_LM_RELIABLE
);
2025 static int l2cap_sock_setsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, unsigned int optlen
)
2027 struct sock
*sk
= sock
->sk
;
2028 struct bt_security sec
;
2032 BT_DBG("sk %p", sk
);
2034 if (level
== SOL_L2CAP
)
2035 return l2cap_sock_setsockopt_old(sock
, optname
, optval
, optlen
);
2037 if (level
!= SOL_BLUETOOTH
)
2038 return -ENOPROTOOPT
;
2044 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_STREAM
2045 && sk
->sk_type
!= SOCK_RAW
) {
2050 sec
.level
= BT_SECURITY_LOW
;
2052 len
= min_t(unsigned int, sizeof(sec
), optlen
);
2053 if (copy_from_user((char *) &sec
, optval
, len
)) {
2058 if (sec
.level
< BT_SECURITY_LOW
||
2059 sec
.level
> BT_SECURITY_HIGH
) {
2064 l2cap_pi(sk
)->sec_level
= sec
.level
;
2067 case BT_DEFER_SETUP
:
2068 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_LISTEN
) {
2073 if (get_user(opt
, (u32 __user
*) optval
)) {
2078 bt_sk(sk
)->defer_setup
= opt
;
2090 static int l2cap_sock_getsockopt_old(struct socket
*sock
, int optname
, char __user
*optval
, int __user
*optlen
)
2092 struct sock
*sk
= sock
->sk
;
2093 struct l2cap_options opts
;
2094 struct l2cap_conninfo cinfo
;
2098 BT_DBG("sk %p", sk
);
2100 if (get_user(len
, optlen
))
2107 opts
.imtu
= l2cap_pi(sk
)->imtu
;
2108 opts
.omtu
= l2cap_pi(sk
)->omtu
;
2109 opts
.flush_to
= l2cap_pi(sk
)->flush_to
;
2110 opts
.mode
= l2cap_pi(sk
)->mode
;
2111 opts
.fcs
= l2cap_pi(sk
)->fcs
;
2112 opts
.max_tx
= l2cap_pi(sk
)->max_tx
;
2113 opts
.txwin_size
= (__u16
)l2cap_pi(sk
)->tx_win
;
2115 len
= min_t(unsigned int, len
, sizeof(opts
));
2116 if (copy_to_user(optval
, (char *) &opts
, len
))
2122 switch (l2cap_pi(sk
)->sec_level
) {
2123 case BT_SECURITY_LOW
:
2124 opt
= L2CAP_LM_AUTH
;
2126 case BT_SECURITY_MEDIUM
:
2127 opt
= L2CAP_LM_AUTH
| L2CAP_LM_ENCRYPT
;
2129 case BT_SECURITY_HIGH
:
2130 opt
= L2CAP_LM_AUTH
| L2CAP_LM_ENCRYPT
|
2138 if (l2cap_pi(sk
)->role_switch
)
2139 opt
|= L2CAP_LM_MASTER
;
2141 if (l2cap_pi(sk
)->force_reliable
)
2142 opt
|= L2CAP_LM_RELIABLE
;
2144 if (put_user(opt
, (u32 __user
*) optval
))
2148 case L2CAP_CONNINFO
:
2149 if (sk
->sk_state
!= BT_CONNECTED
&&
2150 !(sk
->sk_state
== BT_CONNECT2
&&
2151 bt_sk(sk
)->defer_setup
)) {
2156 cinfo
.hci_handle
= l2cap_pi(sk
)->conn
->hcon
->handle
;
2157 memcpy(cinfo
.dev_class
, l2cap_pi(sk
)->conn
->hcon
->dev_class
, 3);
2159 len
= min_t(unsigned int, len
, sizeof(cinfo
));
2160 if (copy_to_user(optval
, (char *) &cinfo
, len
))
2174 static int l2cap_sock_getsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, int __user
*optlen
)
2176 struct sock
*sk
= sock
->sk
;
2177 struct bt_security sec
;
2180 BT_DBG("sk %p", sk
);
2182 if (level
== SOL_L2CAP
)
2183 return l2cap_sock_getsockopt_old(sock
, optname
, optval
, optlen
);
2185 if (level
!= SOL_BLUETOOTH
)
2186 return -ENOPROTOOPT
;
2188 if (get_user(len
, optlen
))
2195 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_STREAM
2196 && sk
->sk_type
!= SOCK_RAW
) {
2201 sec
.level
= l2cap_pi(sk
)->sec_level
;
2203 len
= min_t(unsigned int, len
, sizeof(sec
));
2204 if (copy_to_user(optval
, (char *) &sec
, len
))
2209 case BT_DEFER_SETUP
:
2210 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_LISTEN
) {
2215 if (put_user(bt_sk(sk
)->defer_setup
, (u32 __user
*) optval
))
2229 static int l2cap_sock_shutdown(struct socket
*sock
, int how
)
2231 struct sock
*sk
= sock
->sk
;
2234 BT_DBG("sock %p, sk %p", sock
, sk
);
2240 if (!sk
->sk_shutdown
) {
2241 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
2242 err
= __l2cap_wait_ack(sk
);
2244 sk
->sk_shutdown
= SHUTDOWN_MASK
;
2245 l2cap_sock_clear_timer(sk
);
2246 __l2cap_sock_close(sk
, 0);
2248 if (sock_flag(sk
, SOCK_LINGER
) && sk
->sk_lingertime
)
2249 err
= bt_sock_wait_state(sk
, BT_CLOSED
,
2253 if (!err
&& sk
->sk_err
)
2260 static int l2cap_sock_release(struct socket
*sock
)
2262 struct sock
*sk
= sock
->sk
;
2265 BT_DBG("sock %p, sk %p", sock
, sk
);
2270 err
= l2cap_sock_shutdown(sock
, 2);
2273 l2cap_sock_kill(sk
);
2277 static void l2cap_chan_ready(struct sock
*sk
)
2279 struct sock
*parent
= bt_sk(sk
)->parent
;
2281 BT_DBG("sk %p, parent %p", sk
, parent
);
2283 l2cap_pi(sk
)->conf_state
= 0;
2284 l2cap_sock_clear_timer(sk
);
2287 /* Outgoing channel.
2288 * Wake up socket sleeping on connect.
2290 sk
->sk_state
= BT_CONNECTED
;
2291 sk
->sk_state_change(sk
);
2293 /* Incoming channel.
2294 * Wake up socket sleeping on accept.
2296 parent
->sk_data_ready(parent
, 0);
2300 /* Copy frame to all raw sockets on that connection */
2301 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2303 struct l2cap_chan_list
*l
= &conn
->chan_list
;
2304 struct sk_buff
*nskb
;
2307 BT_DBG("conn %p", conn
);
2309 read_lock(&l
->lock
);
2310 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
2311 if (sk
->sk_type
!= SOCK_RAW
)
2314 /* Don't send frame to the socket it came from */
2317 nskb
= skb_clone(skb
, GFP_ATOMIC
);
2321 if (sock_queue_rcv_skb(sk
, nskb
))
2324 read_unlock(&l
->lock
);
2327 /* ---- L2CAP signalling commands ---- */
2328 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
2329 u8 code
, u8 ident
, u16 dlen
, void *data
)
2331 struct sk_buff
*skb
, **frag
;
2332 struct l2cap_cmd_hdr
*cmd
;
2333 struct l2cap_hdr
*lh
;
2336 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2337 conn
, code
, ident
, dlen
);
2339 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2340 count
= min_t(unsigned int, conn
->mtu
, len
);
2342 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
2346 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2347 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2348 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
2350 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2353 cmd
->len
= cpu_to_le16(dlen
);
2356 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2357 memcpy(skb_put(skb
, count
), data
, count
);
2363 /* Continuation fragments (no L2CAP header) */
2364 frag
= &skb_shinfo(skb
)->frag_list
;
2366 count
= min_t(unsigned int, conn
->mtu
, len
);
2368 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
2372 memcpy(skb_put(*frag
, count
), data
, count
);
2377 frag
= &(*frag
)->next
;
2387 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
2389 struct l2cap_conf_opt
*opt
= *ptr
;
2392 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2400 *val
= *((u8
*) opt
->val
);
2404 *val
= __le16_to_cpu(*((__le16
*) opt
->val
));
2408 *val
= __le32_to_cpu(*((__le32
*) opt
->val
));
2412 *val
= (unsigned long) opt
->val
;
2416 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
2420 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2422 struct l2cap_conf_opt
*opt
= *ptr
;
2424 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
2431 *((u8
*) opt
->val
) = val
;
2435 *((__le16
*) opt
->val
) = cpu_to_le16(val
);
2439 *((__le32
*) opt
->val
) = cpu_to_le32(val
);
2443 memcpy(opt
->val
, (void *) val
, len
);
2447 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2450 static void l2cap_ack_timeout(unsigned long arg
)
2452 struct sock
*sk
= (void *) arg
;
2455 l2cap_send_ack(l2cap_pi(sk
));
2459 static inline void l2cap_ertm_init(struct sock
*sk
)
2461 l2cap_pi(sk
)->expected_ack_seq
= 0;
2462 l2cap_pi(sk
)->unacked_frames
= 0;
2463 l2cap_pi(sk
)->buffer_seq
= 0;
2464 l2cap_pi(sk
)->num_acked
= 0;
2465 l2cap_pi(sk
)->frames_sent
= 0;
2467 setup_timer(&l2cap_pi(sk
)->retrans_timer
,
2468 l2cap_retrans_timeout
, (unsigned long) sk
);
2469 setup_timer(&l2cap_pi(sk
)->monitor_timer
,
2470 l2cap_monitor_timeout
, (unsigned long) sk
);
2471 setup_timer(&l2cap_pi(sk
)->ack_timer
,
2472 l2cap_ack_timeout
, (unsigned long) sk
);
2474 __skb_queue_head_init(SREJ_QUEUE(sk
));
2475 __skb_queue_head_init(BUSY_QUEUE(sk
));
2477 INIT_WORK(&l2cap_pi(sk
)->busy_work
, l2cap_busy_work
);
2479 sk
->sk_backlog_rcv
= l2cap_ertm_data_rcv
;
2482 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
2485 case L2CAP_MODE_STREAMING
:
2486 case L2CAP_MODE_ERTM
:
2487 if (l2cap_mode_supported(mode
, remote_feat_mask
))
2491 return L2CAP_MODE_BASIC
;
2495 static int l2cap_build_conf_req(struct sock
*sk
, void *data
)
2497 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2498 struct l2cap_conf_req
*req
= data
;
2499 struct l2cap_conf_rfc rfc
= { .mode
= pi
->mode
};
2500 void *ptr
= req
->data
;
2502 BT_DBG("sk %p", sk
);
2504 if (pi
->num_conf_req
|| pi
->num_conf_rsp
)
2508 case L2CAP_MODE_STREAMING
:
2509 case L2CAP_MODE_ERTM
:
2510 if (pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
)
2515 pi
->mode
= l2cap_select_mode(rfc
.mode
, pi
->conn
->feat_mask
);
2521 case L2CAP_MODE_BASIC
:
2522 if (pi
->imtu
!= L2CAP_DEFAULT_MTU
)
2523 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->imtu
);
2525 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
2526 !(pi
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
2529 rfc
.mode
= L2CAP_MODE_BASIC
;
2531 rfc
.max_transmit
= 0;
2532 rfc
.retrans_timeout
= 0;
2533 rfc
.monitor_timeout
= 0;
2534 rfc
.max_pdu_size
= 0;
2536 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2537 (unsigned long) &rfc
);
2540 case L2CAP_MODE_ERTM
:
2541 rfc
.mode
= L2CAP_MODE_ERTM
;
2542 rfc
.txwin_size
= pi
->tx_win
;
2543 rfc
.max_transmit
= pi
->max_tx
;
2544 rfc
.retrans_timeout
= 0;
2545 rfc
.monitor_timeout
= 0;
2546 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
2547 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> pi
->conn
->mtu
- 10)
2548 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
2550 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2551 (unsigned long) &rfc
);
2553 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2556 if (pi
->fcs
== L2CAP_FCS_NONE
||
2557 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
2558 pi
->fcs
= L2CAP_FCS_NONE
;
2559 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
2563 case L2CAP_MODE_STREAMING
:
2564 rfc
.mode
= L2CAP_MODE_STREAMING
;
2566 rfc
.max_transmit
= 0;
2567 rfc
.retrans_timeout
= 0;
2568 rfc
.monitor_timeout
= 0;
2569 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
2570 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> pi
->conn
->mtu
- 10)
2571 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
2573 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2574 (unsigned long) &rfc
);
2576 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2579 if (pi
->fcs
== L2CAP_FCS_NONE
||
2580 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
2581 pi
->fcs
= L2CAP_FCS_NONE
;
2582 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
2587 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2588 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2590 req
->dcid
= cpu_to_le16(pi
->dcid
);
2591 req
->flags
= cpu_to_le16(0);
2596 static int l2cap_parse_conf_req(struct sock
*sk
, void *data
)
2598 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2599 struct l2cap_conf_rsp
*rsp
= data
;
2600 void *ptr
= rsp
->data
;
2601 void *req
= pi
->conf_req
;
2602 int len
= pi
->conf_len
;
2603 int type
, hint
, olen
;
2605 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2606 u16 mtu
= L2CAP_DEFAULT_MTU
;
2607 u16 result
= L2CAP_CONF_SUCCESS
;
2609 BT_DBG("sk %p", sk
);
2611 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2612 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
2614 hint
= type
& L2CAP_CONF_HINT
;
2615 type
&= L2CAP_CONF_MASK
;
2618 case L2CAP_CONF_MTU
:
2622 case L2CAP_CONF_FLUSH_TO
:
2626 case L2CAP_CONF_QOS
:
2629 case L2CAP_CONF_RFC
:
2630 if (olen
== sizeof(rfc
))
2631 memcpy(&rfc
, (void *) val
, olen
);
2634 case L2CAP_CONF_FCS
:
2635 if (val
== L2CAP_FCS_NONE
)
2636 pi
->conf_state
|= L2CAP_CONF_NO_FCS_RECV
;
2644 result
= L2CAP_CONF_UNKNOWN
;
2645 *((u8
*) ptr
++) = type
;
2650 if (pi
->num_conf_rsp
|| pi
->num_conf_req
> 1)
2654 case L2CAP_MODE_STREAMING
:
2655 case L2CAP_MODE_ERTM
:
2656 if (!(pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
)) {
2657 pi
->mode
= l2cap_select_mode(rfc
.mode
,
2658 pi
->conn
->feat_mask
);
2662 if (pi
->mode
!= rfc
.mode
)
2663 return -ECONNREFUSED
;
2669 if (pi
->mode
!= rfc
.mode
) {
2670 result
= L2CAP_CONF_UNACCEPT
;
2671 rfc
.mode
= pi
->mode
;
2673 if (pi
->num_conf_rsp
== 1)
2674 return -ECONNREFUSED
;
2676 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2677 sizeof(rfc
), (unsigned long) &rfc
);
2681 if (result
== L2CAP_CONF_SUCCESS
) {
2682 /* Configure output options and let the other side know
2683 * which ones we don't like. */
2685 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
2686 result
= L2CAP_CONF_UNACCEPT
;
2689 pi
->conf_state
|= L2CAP_CONF_MTU_DONE
;
2691 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->omtu
);
2694 case L2CAP_MODE_BASIC
:
2695 pi
->fcs
= L2CAP_FCS_NONE
;
2696 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2699 case L2CAP_MODE_ERTM
:
2700 pi
->remote_tx_win
= rfc
.txwin_size
;
2701 pi
->remote_max_tx
= rfc
.max_transmit
;
2703 if (le16_to_cpu(rfc
.max_pdu_size
) > pi
->conn
->mtu
- 10)
2704 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
2706 pi
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
2708 rfc
.retrans_timeout
=
2709 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO
);
2710 rfc
.monitor_timeout
=
2711 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO
);
2713 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2715 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2716 sizeof(rfc
), (unsigned long) &rfc
);
2720 case L2CAP_MODE_STREAMING
:
2721 if (le16_to_cpu(rfc
.max_pdu_size
) > pi
->conn
->mtu
- 10)
2722 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
2724 pi
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
2726 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2728 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2729 sizeof(rfc
), (unsigned long) &rfc
);
2734 result
= L2CAP_CONF_UNACCEPT
;
2736 memset(&rfc
, 0, sizeof(rfc
));
2737 rfc
.mode
= pi
->mode
;
2740 if (result
== L2CAP_CONF_SUCCESS
)
2741 pi
->conf_state
|= L2CAP_CONF_OUTPUT_DONE
;
2743 rsp
->scid
= cpu_to_le16(pi
->dcid
);
2744 rsp
->result
= cpu_to_le16(result
);
2745 rsp
->flags
= cpu_to_le16(0x0000);
2750 static int l2cap_parse_conf_rsp(struct sock
*sk
, void *rsp
, int len
, void *data
, u16
*result
)
2752 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2753 struct l2cap_conf_req
*req
= data
;
2754 void *ptr
= req
->data
;
2757 struct l2cap_conf_rfc rfc
;
2759 BT_DBG("sk %p, rsp %p, len %d, req %p", sk
, rsp
, len
, data
);
2761 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2762 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2765 case L2CAP_CONF_MTU
:
2766 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
2767 *result
= L2CAP_CONF_UNACCEPT
;
2768 pi
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
2771 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->imtu
);
2774 case L2CAP_CONF_FLUSH_TO
:
2776 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
2780 case L2CAP_CONF_RFC
:
2781 if (olen
== sizeof(rfc
))
2782 memcpy(&rfc
, (void *)val
, olen
);
2784 if ((pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
) &&
2785 rfc
.mode
!= pi
->mode
)
2786 return -ECONNREFUSED
;
2790 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2791 sizeof(rfc
), (unsigned long) &rfc
);
2796 if (pi
->mode
== L2CAP_MODE_BASIC
&& pi
->mode
!= rfc
.mode
)
2797 return -ECONNREFUSED
;
2799 pi
->mode
= rfc
.mode
;
2801 if (*result
== L2CAP_CONF_SUCCESS
) {
2803 case L2CAP_MODE_ERTM
:
2804 pi
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2805 pi
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2806 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2808 case L2CAP_MODE_STREAMING
:
2809 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2813 req
->dcid
= cpu_to_le16(pi
->dcid
);
2814 req
->flags
= cpu_to_le16(0x0000);
2819 static int l2cap_build_conf_rsp(struct sock
*sk
, void *data
, u16 result
, u16 flags
)
2821 struct l2cap_conf_rsp
*rsp
= data
;
2822 void *ptr
= rsp
->data
;
2824 BT_DBG("sk %p", sk
);
2826 rsp
->scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
2827 rsp
->result
= cpu_to_le16(result
);
2828 rsp
->flags
= cpu_to_le16(flags
);
2833 static void l2cap_conf_rfc_get(struct sock
*sk
, void *rsp
, int len
)
2835 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2838 struct l2cap_conf_rfc rfc
;
2840 BT_DBG("sk %p, rsp %p, len %d", sk
, rsp
, len
);
2842 if ((pi
->mode
!= L2CAP_MODE_ERTM
) && (pi
->mode
!= L2CAP_MODE_STREAMING
))
2845 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2846 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2849 case L2CAP_CONF_RFC
:
2850 if (olen
== sizeof(rfc
))
2851 memcpy(&rfc
, (void *)val
, olen
);
2858 case L2CAP_MODE_ERTM
:
2859 pi
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2860 pi
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2861 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2863 case L2CAP_MODE_STREAMING
:
2864 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2868 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2870 struct l2cap_cmd_rej
*rej
= (struct l2cap_cmd_rej
*) data
;
2872 if (rej
->reason
!= 0x0000)
2875 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
2876 cmd
->ident
== conn
->info_ident
) {
2877 del_timer(&conn
->info_timer
);
2879 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2880 conn
->info_ident
= 0;
2882 l2cap_conn_start(conn
);
2888 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2890 struct l2cap_chan_list
*list
= &conn
->chan_list
;
2891 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
2892 struct l2cap_conn_rsp rsp
;
2893 struct sock
*parent
, *sk
= NULL
;
2894 int result
, status
= L2CAP_CS_NO_INFO
;
2896 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
2897 __le16 psm
= req
->psm
;
2899 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm
, scid
);
2901 /* Check if we have socket listening on psm */
2902 parent
= l2cap_get_sock_by_psm(BT_LISTEN
, psm
, conn
->src
);
2904 result
= L2CAP_CR_BAD_PSM
;
2908 /* Check if the ACL is secure enough (if not SDP) */
2909 if (psm
!= cpu_to_le16(0x0001) &&
2910 !hci_conn_check_link_mode(conn
->hcon
)) {
2911 conn
->disc_reason
= 0x05;
2912 result
= L2CAP_CR_SEC_BLOCK
;
2916 result
= L2CAP_CR_NO_MEM
;
2918 /* Check for backlog size */
2919 if (sk_acceptq_is_full(parent
)) {
2920 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
2924 sk
= l2cap_sock_alloc(sock_net(parent
), NULL
, BTPROTO_L2CAP
, GFP_ATOMIC
);
2928 write_lock_bh(&list
->lock
);
2930 /* Check if we already have channel with that dcid */
2931 if (__l2cap_get_chan_by_dcid(list
, scid
)) {
2932 write_unlock_bh(&list
->lock
);
2933 sock_set_flag(sk
, SOCK_ZAPPED
);
2934 l2cap_sock_kill(sk
);
2938 hci_conn_hold(conn
->hcon
);
2940 l2cap_sock_init(sk
, parent
);
2941 bacpy(&bt_sk(sk
)->src
, conn
->src
);
2942 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
2943 l2cap_pi(sk
)->psm
= psm
;
2944 l2cap_pi(sk
)->dcid
= scid
;
2946 __l2cap_chan_add(conn
, sk
, parent
);
2947 dcid
= l2cap_pi(sk
)->scid
;
2949 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
2951 l2cap_pi(sk
)->ident
= cmd
->ident
;
2953 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
2954 if (l2cap_check_security(sk
)) {
2955 if (bt_sk(sk
)->defer_setup
) {
2956 sk
->sk_state
= BT_CONNECT2
;
2957 result
= L2CAP_CR_PEND
;
2958 status
= L2CAP_CS_AUTHOR_PEND
;
2959 parent
->sk_data_ready(parent
, 0);
2961 sk
->sk_state
= BT_CONFIG
;
2962 result
= L2CAP_CR_SUCCESS
;
2963 status
= L2CAP_CS_NO_INFO
;
2966 sk
->sk_state
= BT_CONNECT2
;
2967 result
= L2CAP_CR_PEND
;
2968 status
= L2CAP_CS_AUTHEN_PEND
;
2971 sk
->sk_state
= BT_CONNECT2
;
2972 result
= L2CAP_CR_PEND
;
2973 status
= L2CAP_CS_NO_INFO
;
2976 write_unlock_bh(&list
->lock
);
2979 bh_unlock_sock(parent
);
2982 rsp
.scid
= cpu_to_le16(scid
);
2983 rsp
.dcid
= cpu_to_le16(dcid
);
2984 rsp
.result
= cpu_to_le16(result
);
2985 rsp
.status
= cpu_to_le16(status
);
2986 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2988 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
2989 struct l2cap_info_req info
;
2990 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2992 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
2993 conn
->info_ident
= l2cap_get_ident(conn
);
2995 mod_timer(&conn
->info_timer
, jiffies
+
2996 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
2998 l2cap_send_cmd(conn
, conn
->info_ident
,
2999 L2CAP_INFO_REQ
, sizeof(info
), &info
);
3002 if (sk
&& !(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
) &&
3003 result
== L2CAP_CR_SUCCESS
) {
3005 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
3006 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3007 l2cap_build_conf_req(sk
, buf
), buf
);
3008 l2cap_pi(sk
)->num_conf_req
++;
3014 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3016 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
3017 u16 scid
, dcid
, result
, status
;
3021 scid
= __le16_to_cpu(rsp
->scid
);
3022 dcid
= __le16_to_cpu(rsp
->dcid
);
3023 result
= __le16_to_cpu(rsp
->result
);
3024 status
= __le16_to_cpu(rsp
->status
);
3026 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid
, scid
, result
, status
);
3029 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
3033 sk
= l2cap_get_chan_by_ident(&conn
->chan_list
, cmd
->ident
);
3039 case L2CAP_CR_SUCCESS
:
3040 sk
->sk_state
= BT_CONFIG
;
3041 l2cap_pi(sk
)->ident
= 0;
3042 l2cap_pi(sk
)->dcid
= dcid
;
3043 l2cap_pi(sk
)->conf_state
&= ~L2CAP_CONF_CONNECT_PEND
;
3045 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
)
3048 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
3050 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3051 l2cap_build_conf_req(sk
, req
), req
);
3052 l2cap_pi(sk
)->num_conf_req
++;
3056 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
3060 l2cap_chan_del(sk
, ECONNREFUSED
);
3068 static inline void set_default_fcs(struct l2cap_pinfo
*pi
)
3070 /* FCS is enabled only in ERTM or streaming mode, if one or both
3073 if (pi
->mode
!= L2CAP_MODE_ERTM
&& pi
->mode
!= L2CAP_MODE_STREAMING
)
3074 pi
->fcs
= L2CAP_FCS_NONE
;
3075 else if (!(pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
))
3076 pi
->fcs
= L2CAP_FCS_CRC16
;
3079 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3081 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
3087 dcid
= __le16_to_cpu(req
->dcid
);
3088 flags
= __le16_to_cpu(req
->flags
);
3090 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
3092 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, dcid
);
3096 if (sk
->sk_state
== BT_DISCONN
)
3099 /* Reject if config buffer is too small. */
3100 len
= cmd_len
- sizeof(*req
);
3101 if (l2cap_pi(sk
)->conf_len
+ len
> sizeof(l2cap_pi(sk
)->conf_req
)) {
3102 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3103 l2cap_build_conf_rsp(sk
, rsp
,
3104 L2CAP_CONF_REJECT
, flags
), rsp
);
3109 memcpy(l2cap_pi(sk
)->conf_req
+ l2cap_pi(sk
)->conf_len
, req
->data
, len
);
3110 l2cap_pi(sk
)->conf_len
+= len
;
3112 if (flags
& 0x0001) {
3113 /* Incomplete config. Send empty response. */
3114 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3115 l2cap_build_conf_rsp(sk
, rsp
,
3116 L2CAP_CONF_SUCCESS
, 0x0001), rsp
);
3120 /* Complete config. */
3121 len
= l2cap_parse_conf_req(sk
, rsp
);
3123 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
3127 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
3128 l2cap_pi(sk
)->num_conf_rsp
++;
3130 /* Reset config buffer. */
3131 l2cap_pi(sk
)->conf_len
= 0;
3133 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
))
3136 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_INPUT_DONE
) {
3137 set_default_fcs(l2cap_pi(sk
));
3139 sk
->sk_state
= BT_CONNECTED
;
3141 l2cap_pi(sk
)->next_tx_seq
= 0;
3142 l2cap_pi(sk
)->expected_tx_seq
= 0;
3143 __skb_queue_head_init(TX_QUEUE(sk
));
3144 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
3145 l2cap_ertm_init(sk
);
3147 l2cap_chan_ready(sk
);
3151 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
)) {
3153 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3154 l2cap_build_conf_req(sk
, buf
), buf
);
3155 l2cap_pi(sk
)->num_conf_req
++;
3163 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3165 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
3166 u16 scid
, flags
, result
;
3168 int len
= cmd
->len
- sizeof(*rsp
);
3170 scid
= __le16_to_cpu(rsp
->scid
);
3171 flags
= __le16_to_cpu(rsp
->flags
);
3172 result
= __le16_to_cpu(rsp
->result
);
3174 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3175 scid
, flags
, result
);
3177 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
3182 case L2CAP_CONF_SUCCESS
:
3183 l2cap_conf_rfc_get(sk
, rsp
->data
, len
);
3186 case L2CAP_CONF_UNACCEPT
:
3187 if (l2cap_pi(sk
)->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
3190 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
3191 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
3195 /* throw out any old stored conf requests */
3196 result
= L2CAP_CONF_SUCCESS
;
3197 len
= l2cap_parse_conf_rsp(sk
, rsp
->data
,
3200 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
3204 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
3205 L2CAP_CONF_REQ
, len
, req
);
3206 l2cap_pi(sk
)->num_conf_req
++;
3207 if (result
!= L2CAP_CONF_SUCCESS
)
3213 sk
->sk_err
= ECONNRESET
;
3214 l2cap_sock_set_timer(sk
, HZ
* 5);
3215 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
3222 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_INPUT_DONE
;
3224 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
) {
3225 set_default_fcs(l2cap_pi(sk
));
3227 sk
->sk_state
= BT_CONNECTED
;
3228 l2cap_pi(sk
)->next_tx_seq
= 0;
3229 l2cap_pi(sk
)->expected_tx_seq
= 0;
3230 __skb_queue_head_init(TX_QUEUE(sk
));
3231 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
3232 l2cap_ertm_init(sk
);
3234 l2cap_chan_ready(sk
);
3242 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3244 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
3245 struct l2cap_disconn_rsp rsp
;
3249 scid
= __le16_to_cpu(req
->scid
);
3250 dcid
= __le16_to_cpu(req
->dcid
);
3252 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
3254 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, dcid
);
3258 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
3259 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
3260 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
3262 sk
->sk_shutdown
= SHUTDOWN_MASK
;
3264 l2cap_chan_del(sk
, ECONNRESET
);
3267 l2cap_sock_kill(sk
);
3271 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3273 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
3277 scid
= __le16_to_cpu(rsp
->scid
);
3278 dcid
= __le16_to_cpu(rsp
->dcid
);
3280 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
3282 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
3286 l2cap_chan_del(sk
, 0);
3289 l2cap_sock_kill(sk
);
3293 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3295 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
3298 type
= __le16_to_cpu(req
->type
);
3300 BT_DBG("type 0x%4.4x", type
);
3302 if (type
== L2CAP_IT_FEAT_MASK
) {
3304 u32 feat_mask
= l2cap_feat_mask
;
3305 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3306 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3307 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3309 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
3311 put_unaligned_le32(feat_mask
, rsp
->data
);
3312 l2cap_send_cmd(conn
, cmd
->ident
,
3313 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3314 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3316 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3317 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3318 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3319 memcpy(buf
+ 4, l2cap_fixed_chan
, 8);
3320 l2cap_send_cmd(conn
, cmd
->ident
,
3321 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3323 struct l2cap_info_rsp rsp
;
3324 rsp
.type
= cpu_to_le16(type
);
3325 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
3326 l2cap_send_cmd(conn
, cmd
->ident
,
3327 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
3333 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3335 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
3338 type
= __le16_to_cpu(rsp
->type
);
3339 result
= __le16_to_cpu(rsp
->result
);
3341 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
3343 del_timer(&conn
->info_timer
);
3345 if (result
!= L2CAP_IR_SUCCESS
) {
3346 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3347 conn
->info_ident
= 0;
3349 l2cap_conn_start(conn
);
3354 if (type
== L2CAP_IT_FEAT_MASK
) {
3355 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
3357 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
3358 struct l2cap_info_req req
;
3359 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3361 conn
->info_ident
= l2cap_get_ident(conn
);
3363 l2cap_send_cmd(conn
, conn
->info_ident
,
3364 L2CAP_INFO_REQ
, sizeof(req
), &req
);
3366 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3367 conn
->info_ident
= 0;
3369 l2cap_conn_start(conn
);
3371 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3372 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3373 conn
->info_ident
= 0;
3375 l2cap_conn_start(conn
);
3381 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
3383 u8
*data
= skb
->data
;
3385 struct l2cap_cmd_hdr cmd
;
3388 l2cap_raw_recv(conn
, skb
);
3390 while (len
>= L2CAP_CMD_HDR_SIZE
) {
3392 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
3393 data
+= L2CAP_CMD_HDR_SIZE
;
3394 len
-= L2CAP_CMD_HDR_SIZE
;
3396 cmd_len
= le16_to_cpu(cmd
.len
);
3398 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
3400 if (cmd_len
> len
|| !cmd
.ident
) {
3401 BT_DBG("corrupted command");
3406 case L2CAP_COMMAND_REJ
:
3407 l2cap_command_rej(conn
, &cmd
, data
);
3410 case L2CAP_CONN_REQ
:
3411 err
= l2cap_connect_req(conn
, &cmd
, data
);
3414 case L2CAP_CONN_RSP
:
3415 err
= l2cap_connect_rsp(conn
, &cmd
, data
);
3418 case L2CAP_CONF_REQ
:
3419 err
= l2cap_config_req(conn
, &cmd
, cmd_len
, data
);
3422 case L2CAP_CONF_RSP
:
3423 err
= l2cap_config_rsp(conn
, &cmd
, data
);
3426 case L2CAP_DISCONN_REQ
:
3427 err
= l2cap_disconnect_req(conn
, &cmd
, data
);
3430 case L2CAP_DISCONN_RSP
:
3431 err
= l2cap_disconnect_rsp(conn
, &cmd
, data
);
3434 case L2CAP_ECHO_REQ
:
3435 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
3438 case L2CAP_ECHO_RSP
:
3441 case L2CAP_INFO_REQ
:
3442 err
= l2cap_information_req(conn
, &cmd
, data
);
3445 case L2CAP_INFO_RSP
:
3446 err
= l2cap_information_rsp(conn
, &cmd
, data
);
3450 BT_ERR("Unknown signaling command 0x%2.2x", cmd
.code
);
3456 struct l2cap_cmd_rej rej
;
3457 BT_DBG("error %d", err
);
3459 rej
.reason
= cpu_to_le16(0);
3460 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
3470 static int l2cap_check_fcs(struct l2cap_pinfo
*pi
, struct sk_buff
*skb
)
3472 u16 our_fcs
, rcv_fcs
;
3473 int hdr_size
= L2CAP_HDR_SIZE
+ 2;
3475 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
3476 skb_trim(skb
, skb
->len
- 2);
3477 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
3478 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
3480 if (our_fcs
!= rcv_fcs
)
3486 static inline void l2cap_send_i_or_rr_or_rnr(struct sock
*sk
)
3488 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3491 pi
->frames_sent
= 0;
3493 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3495 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
3496 control
|= L2CAP_SUPER_RCV_NOT_READY
;
3497 l2cap_send_sframe(pi
, control
);
3498 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
3501 if (pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
)
3502 l2cap_retransmit_frames(sk
);
3504 l2cap_ertm_send(sk
);
3506 if (!(pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) &&
3507 pi
->frames_sent
== 0) {
3508 control
|= L2CAP_SUPER_RCV_READY
;
3509 l2cap_send_sframe(pi
, control
);
3513 static int l2cap_add_to_srej_queue(struct sock
*sk
, struct sk_buff
*skb
, u8 tx_seq
, u8 sar
)
3515 struct sk_buff
*next_skb
;
3516 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3517 int tx_seq_offset
, next_tx_seq_offset
;
3519 bt_cb(skb
)->tx_seq
= tx_seq
;
3520 bt_cb(skb
)->sar
= sar
;
3522 next_skb
= skb_peek(SREJ_QUEUE(sk
));
3524 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3528 tx_seq_offset
= (tx_seq
- pi
->buffer_seq
) % 64;
3529 if (tx_seq_offset
< 0)
3530 tx_seq_offset
+= 64;
3533 if (bt_cb(next_skb
)->tx_seq
== tx_seq
)
3536 next_tx_seq_offset
= (bt_cb(next_skb
)->tx_seq
-
3537 pi
->buffer_seq
) % 64;
3538 if (next_tx_seq_offset
< 0)
3539 next_tx_seq_offset
+= 64;
3541 if (next_tx_seq_offset
> tx_seq_offset
) {
3542 __skb_queue_before(SREJ_QUEUE(sk
), next_skb
, skb
);
3546 if (skb_queue_is_last(SREJ_QUEUE(sk
), next_skb
))
3549 } while ((next_skb
= skb_queue_next(SREJ_QUEUE(sk
), next_skb
)));
3551 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3556 static int l2cap_ertm_reassembly_sdu(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3558 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3559 struct sk_buff
*_skb
;
3562 switch (control
& L2CAP_CTRL_SAR
) {
3563 case L2CAP_SDU_UNSEGMENTED
:
3564 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
)
3567 err
= sock_queue_rcv_skb(sk
, skb
);
3573 case L2CAP_SDU_START
:
3574 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
)
3577 pi
->sdu_len
= get_unaligned_le16(skb
->data
);
3579 if (pi
->sdu_len
> pi
->imtu
)
3582 pi
->sdu
= bt_skb_alloc(pi
->sdu_len
, GFP_ATOMIC
);
3586 /* pull sdu_len bytes only after alloc, because of Local Busy
3587 * condition we have to be sure that this will be executed
3588 * only once, i.e., when alloc does not fail */
3591 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3593 pi
->conn_state
|= L2CAP_CONN_SAR_SDU
;
3594 pi
->partial_sdu_len
= skb
->len
;
3597 case L2CAP_SDU_CONTINUE
:
3598 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3604 pi
->partial_sdu_len
+= skb
->len
;
3605 if (pi
->partial_sdu_len
> pi
->sdu_len
)
3608 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3613 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3619 if (!(pi
->conn_state
& L2CAP_CONN_SAR_RETRY
)) {
3620 pi
->partial_sdu_len
+= skb
->len
;
3622 if (pi
->partial_sdu_len
> pi
->imtu
)
3625 if (pi
->partial_sdu_len
!= pi
->sdu_len
)
3628 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3631 _skb
= skb_clone(pi
->sdu
, GFP_ATOMIC
);
3633 pi
->conn_state
|= L2CAP_CONN_SAR_RETRY
;
3637 err
= sock_queue_rcv_skb(sk
, _skb
);
3640 pi
->conn_state
|= L2CAP_CONN_SAR_RETRY
;
3644 pi
->conn_state
&= ~L2CAP_CONN_SAR_RETRY
;
3645 pi
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
3659 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
3664 static int l2cap_try_push_rx_skb(struct sock
*sk
)
3666 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3667 struct sk_buff
*skb
;
3671 while ((skb
= skb_dequeue(BUSY_QUEUE(sk
)))) {
3672 control
= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
3673 err
= l2cap_ertm_reassembly_sdu(sk
, skb
, control
);
3675 skb_queue_head(BUSY_QUEUE(sk
), skb
);
3679 pi
->buffer_seq
= (pi
->buffer_seq
+ 1) % 64;
3682 if (!(pi
->conn_state
& L2CAP_CONN_RNR_SENT
))
3685 control
= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3686 control
|= L2CAP_SUPER_RCV_READY
| L2CAP_CTRL_POLL
;
3687 l2cap_send_sframe(pi
, control
);
3688 l2cap_pi(sk
)->retry_count
= 1;
3690 del_timer(&pi
->retrans_timer
);
3691 __mod_monitor_timer();
3693 l2cap_pi(sk
)->conn_state
|= L2CAP_CONN_WAIT_F
;
3696 pi
->conn_state
&= ~L2CAP_CONN_LOCAL_BUSY
;
3697 pi
->conn_state
&= ~L2CAP_CONN_RNR_SENT
;
3699 BT_DBG("sk %p, Exit local busy", sk
);
3704 static void l2cap_busy_work(struct work_struct
*work
)
3706 DECLARE_WAITQUEUE(wait
, current
);
3707 struct l2cap_pinfo
*pi
=
3708 container_of(work
, struct l2cap_pinfo
, busy_work
);
3709 struct sock
*sk
= (struct sock
*)pi
;
3710 int n_tries
= 0, timeo
= HZ
/5, err
;
3711 struct sk_buff
*skb
;
3715 add_wait_queue(sk_sleep(sk
), &wait
);
3716 while ((skb
= skb_peek(BUSY_QUEUE(sk
)))) {
3717 set_current_state(TASK_INTERRUPTIBLE
);
3719 if (n_tries
++ > L2CAP_LOCAL_BUSY_TRIES
) {
3721 l2cap_send_disconn_req(pi
->conn
, sk
, EBUSY
);
3728 if (signal_pending(current
)) {
3729 err
= sock_intr_errno(timeo
);
3734 timeo
= schedule_timeout(timeo
);
3737 err
= sock_error(sk
);
3741 if (l2cap_try_push_rx_skb(sk
) == 0)
3745 set_current_state(TASK_RUNNING
);
3746 remove_wait_queue(sk_sleep(sk
), &wait
);
3751 static int l2cap_push_rx_skb(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3753 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3756 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
3757 bt_cb(skb
)->sar
= control
>> L2CAP_CTRL_SAR_SHIFT
;
3758 __skb_queue_tail(BUSY_QUEUE(sk
), skb
);
3759 return l2cap_try_push_rx_skb(sk
);
3764 err
= l2cap_ertm_reassembly_sdu(sk
, skb
, control
);
3766 pi
->buffer_seq
= (pi
->buffer_seq
+ 1) % 64;
3770 /* Busy Condition */
3771 BT_DBG("sk %p, Enter local busy", sk
);
3773 pi
->conn_state
|= L2CAP_CONN_LOCAL_BUSY
;
3774 bt_cb(skb
)->sar
= control
>> L2CAP_CTRL_SAR_SHIFT
;
3775 __skb_queue_tail(BUSY_QUEUE(sk
), skb
);
3777 sctrl
= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3778 sctrl
|= L2CAP_SUPER_RCV_NOT_READY
;
3779 l2cap_send_sframe(pi
, sctrl
);
3781 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
3783 del_timer(&pi
->ack_timer
);
3785 queue_work(_busy_wq
, &pi
->busy_work
);
3790 static int l2cap_streaming_reassembly_sdu(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3792 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3793 struct sk_buff
*_skb
;
3797 * TODO: We have to notify the userland if some data is lost with the
3801 switch (control
& L2CAP_CTRL_SAR
) {
3802 case L2CAP_SDU_UNSEGMENTED
:
3803 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3808 err
= sock_queue_rcv_skb(sk
, skb
);
3814 case L2CAP_SDU_START
:
3815 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3820 pi
->sdu_len
= get_unaligned_le16(skb
->data
);
3823 if (pi
->sdu_len
> pi
->imtu
) {
3828 pi
->sdu
= bt_skb_alloc(pi
->sdu_len
, GFP_ATOMIC
);
3834 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3836 pi
->conn_state
|= L2CAP_CONN_SAR_SDU
;
3837 pi
->partial_sdu_len
= skb
->len
;
3841 case L2CAP_SDU_CONTINUE
:
3842 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3845 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3847 pi
->partial_sdu_len
+= skb
->len
;
3848 if (pi
->partial_sdu_len
> pi
->sdu_len
)
3856 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3859 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3861 pi
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
3862 pi
->partial_sdu_len
+= skb
->len
;
3864 if (pi
->partial_sdu_len
> pi
->imtu
)
3867 if (pi
->partial_sdu_len
== pi
->sdu_len
) {
3868 _skb
= skb_clone(pi
->sdu
, GFP_ATOMIC
);
3869 err
= sock_queue_rcv_skb(sk
, _skb
);
3884 static void l2cap_check_srej_gap(struct sock
*sk
, u8 tx_seq
)
3886 struct sk_buff
*skb
;
3889 while ((skb
= skb_peek(SREJ_QUEUE(sk
)))) {
3890 if (bt_cb(skb
)->tx_seq
!= tx_seq
)
3893 skb
= skb_dequeue(SREJ_QUEUE(sk
));
3894 control
= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
3895 l2cap_ertm_reassembly_sdu(sk
, skb
, control
);
3896 l2cap_pi(sk
)->buffer_seq_srej
=
3897 (l2cap_pi(sk
)->buffer_seq_srej
+ 1) % 64;
3898 tx_seq
= (tx_seq
+ 1) % 64;
3902 static void l2cap_resend_srejframe(struct sock
*sk
, u8 tx_seq
)
3904 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3905 struct srej_list
*l
, *tmp
;
3908 list_for_each_entry_safe(l
, tmp
, SREJ_LIST(sk
), list
) {
3909 if (l
->tx_seq
== tx_seq
) {
3914 control
= L2CAP_SUPER_SELECT_REJECT
;
3915 control
|= l
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3916 l2cap_send_sframe(pi
, control
);
3918 list_add_tail(&l
->list
, SREJ_LIST(sk
));
3922 static void l2cap_send_srejframe(struct sock
*sk
, u8 tx_seq
)
3924 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3925 struct srej_list
*new;
3928 while (tx_seq
!= pi
->expected_tx_seq
) {
3929 control
= L2CAP_SUPER_SELECT_REJECT
;
3930 control
|= pi
->expected_tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3931 l2cap_send_sframe(pi
, control
);
3933 new = kzalloc(sizeof(struct srej_list
), GFP_ATOMIC
);
3934 new->tx_seq
= pi
->expected_tx_seq
;
3935 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3936 list_add_tail(&new->list
, SREJ_LIST(sk
));
3938 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3941 static inline int l2cap_data_channel_iframe(struct sock
*sk
, u16 rx_control
, struct sk_buff
*skb
)
3943 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3944 u8 tx_seq
= __get_txseq(rx_control
);
3945 u8 req_seq
= __get_reqseq(rx_control
);
3946 u8 sar
= rx_control
>> L2CAP_CTRL_SAR_SHIFT
;
3947 int tx_seq_offset
, expected_tx_seq_offset
;
3948 int num_to_ack
= (pi
->tx_win
/6) + 1;
3951 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk
, skb
->len
, tx_seq
,
3954 if (L2CAP_CTRL_FINAL
& rx_control
&&
3955 l2cap_pi(sk
)->conn_state
& L2CAP_CONN_WAIT_F
) {
3956 del_timer(&pi
->monitor_timer
);
3957 if (pi
->unacked_frames
> 0)
3958 __mod_retrans_timer();
3959 pi
->conn_state
&= ~L2CAP_CONN_WAIT_F
;
3962 pi
->expected_ack_seq
= req_seq
;
3963 l2cap_drop_acked_frames(sk
);
3965 if (tx_seq
== pi
->expected_tx_seq
)
3968 tx_seq_offset
= (tx_seq
- pi
->buffer_seq
) % 64;
3969 if (tx_seq_offset
< 0)
3970 tx_seq_offset
+= 64;
3972 /* invalid tx_seq */
3973 if (tx_seq_offset
>= pi
->tx_win
) {
3974 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
3978 if (pi
->conn_state
== L2CAP_CONN_LOCAL_BUSY
)
3981 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3982 struct srej_list
*first
;
3984 first
= list_first_entry(SREJ_LIST(sk
),
3985 struct srej_list
, list
);
3986 if (tx_seq
== first
->tx_seq
) {
3987 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3988 l2cap_check_srej_gap(sk
, tx_seq
);
3990 list_del(&first
->list
);
3993 if (list_empty(SREJ_LIST(sk
))) {
3994 pi
->buffer_seq
= pi
->buffer_seq_srej
;
3995 pi
->conn_state
&= ~L2CAP_CONN_SREJ_SENT
;
3997 BT_DBG("sk %p, Exit SREJ_SENT", sk
);
4000 struct srej_list
*l
;
4002 /* duplicated tx_seq */
4003 if (l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
) < 0)
4006 list_for_each_entry(l
, SREJ_LIST(sk
), list
) {
4007 if (l
->tx_seq
== tx_seq
) {
4008 l2cap_resend_srejframe(sk
, tx_seq
);
4012 l2cap_send_srejframe(sk
, tx_seq
);
4015 expected_tx_seq_offset
=
4016 (pi
->expected_tx_seq
- pi
->buffer_seq
) % 64;
4017 if (expected_tx_seq_offset
< 0)
4018 expected_tx_seq_offset
+= 64;
4020 /* duplicated tx_seq */
4021 if (tx_seq_offset
< expected_tx_seq_offset
)
4024 pi
->conn_state
|= L2CAP_CONN_SREJ_SENT
;
4026 BT_DBG("sk %p, Enter SREJ", sk
);
4028 INIT_LIST_HEAD(SREJ_LIST(sk
));
4029 pi
->buffer_seq_srej
= pi
->buffer_seq
;
4031 __skb_queue_head_init(SREJ_QUEUE(sk
));
4032 __skb_queue_head_init(BUSY_QUEUE(sk
));
4033 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
4035 pi
->conn_state
|= L2CAP_CONN_SEND_PBIT
;
4037 l2cap_send_srejframe(sk
, tx_seq
);
4039 del_timer(&pi
->ack_timer
);
4044 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
4046 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
4047 bt_cb(skb
)->tx_seq
= tx_seq
;
4048 bt_cb(skb
)->sar
= sar
;
4049 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
4053 err
= l2cap_push_rx_skb(sk
, skb
, rx_control
);
4057 if (rx_control
& L2CAP_CTRL_FINAL
) {
4058 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
4059 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
4061 l2cap_retransmit_frames(sk
);
4066 pi
->num_acked
= (pi
->num_acked
+ 1) % num_to_ack
;
4067 if (pi
->num_acked
== num_to_ack
- 1)
4077 static inline void l2cap_data_channel_rrframe(struct sock
*sk
, u16 rx_control
)
4079 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4081 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, __get_reqseq(rx_control
),
4084 pi
->expected_ack_seq
= __get_reqseq(rx_control
);
4085 l2cap_drop_acked_frames(sk
);
4087 if (rx_control
& L2CAP_CTRL_POLL
) {
4088 pi
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
4089 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
4090 if ((pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
4091 (pi
->unacked_frames
> 0))
4092 __mod_retrans_timer();
4094 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
4095 l2cap_send_srejtail(sk
);
4097 l2cap_send_i_or_rr_or_rnr(sk
);
4100 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
4101 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
4103 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
4104 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
4106 l2cap_retransmit_frames(sk
);
4109 if ((pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
4110 (pi
->unacked_frames
> 0))
4111 __mod_retrans_timer();
4113 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
4114 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
4117 l2cap_ertm_send(sk
);
4122 static inline void l2cap_data_channel_rejframe(struct sock
*sk
, u16 rx_control
)
4124 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4125 u8 tx_seq
= __get_reqseq(rx_control
);
4127 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, tx_seq
, rx_control
);
4129 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
4131 pi
->expected_ack_seq
= tx_seq
;
4132 l2cap_drop_acked_frames(sk
);
4134 if (rx_control
& L2CAP_CTRL_FINAL
) {
4135 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
4136 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
4138 l2cap_retransmit_frames(sk
);
4140 l2cap_retransmit_frames(sk
);
4142 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
)
4143 pi
->conn_state
|= L2CAP_CONN_REJ_ACT
;
4146 static inline void l2cap_data_channel_srejframe(struct sock
*sk
, u16 rx_control
)
4148 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4149 u8 tx_seq
= __get_reqseq(rx_control
);
4151 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, tx_seq
, rx_control
);
4153 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
4155 if (rx_control
& L2CAP_CTRL_POLL
) {
4156 pi
->expected_ack_seq
= tx_seq
;
4157 l2cap_drop_acked_frames(sk
);
4159 pi
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
4160 l2cap_retransmit_one_frame(sk
, tx_seq
);
4162 l2cap_ertm_send(sk
);
4164 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
4165 pi
->srej_save_reqseq
= tx_seq
;
4166 pi
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
4168 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
4169 if ((pi
->conn_state
& L2CAP_CONN_SREJ_ACT
) &&
4170 pi
->srej_save_reqseq
== tx_seq
)
4171 pi
->conn_state
&= ~L2CAP_CONN_SREJ_ACT
;
4173 l2cap_retransmit_one_frame(sk
, tx_seq
);
4175 l2cap_retransmit_one_frame(sk
, tx_seq
);
4176 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
4177 pi
->srej_save_reqseq
= tx_seq
;
4178 pi
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
4183 static inline void l2cap_data_channel_rnrframe(struct sock
*sk
, u16 rx_control
)
4185 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4186 u8 tx_seq
= __get_reqseq(rx_control
);
4188 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, tx_seq
, rx_control
);
4190 pi
->conn_state
|= L2CAP_CONN_REMOTE_BUSY
;
4191 pi
->expected_ack_seq
= tx_seq
;
4192 l2cap_drop_acked_frames(sk
);
4194 if (rx_control
& L2CAP_CTRL_POLL
)
4195 pi
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
4197 if (!(pi
->conn_state
& L2CAP_CONN_SREJ_SENT
)) {
4198 del_timer(&pi
->retrans_timer
);
4199 if (rx_control
& L2CAP_CTRL_POLL
)
4200 l2cap_send_rr_or_rnr(pi
, L2CAP_CTRL_FINAL
);
4204 if (rx_control
& L2CAP_CTRL_POLL
)
4205 l2cap_send_srejtail(sk
);
4207 l2cap_send_sframe(pi
, L2CAP_SUPER_RCV_READY
);
4210 static inline int l2cap_data_channel_sframe(struct sock
*sk
, u16 rx_control
, struct sk_buff
*skb
)
4212 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk
, rx_control
, skb
->len
);
4214 if (L2CAP_CTRL_FINAL
& rx_control
&&
4215 l2cap_pi(sk
)->conn_state
& L2CAP_CONN_WAIT_F
) {
4216 del_timer(&l2cap_pi(sk
)->monitor_timer
);
4217 if (l2cap_pi(sk
)->unacked_frames
> 0)
4218 __mod_retrans_timer();
4219 l2cap_pi(sk
)->conn_state
&= ~L2CAP_CONN_WAIT_F
;
4222 switch (rx_control
& L2CAP_CTRL_SUPERVISE
) {
4223 case L2CAP_SUPER_RCV_READY
:
4224 l2cap_data_channel_rrframe(sk
, rx_control
);
4227 case L2CAP_SUPER_REJECT
:
4228 l2cap_data_channel_rejframe(sk
, rx_control
);
4231 case L2CAP_SUPER_SELECT_REJECT
:
4232 l2cap_data_channel_srejframe(sk
, rx_control
);
4235 case L2CAP_SUPER_RCV_NOT_READY
:
4236 l2cap_data_channel_rnrframe(sk
, rx_control
);
4244 static int l2cap_ertm_data_rcv(struct sock
*sk
, struct sk_buff
*skb
)
4246 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4249 int len
, next_tx_seq_offset
, req_seq_offset
;
4251 control
= get_unaligned_le16(skb
->data
);
4256 * We can just drop the corrupted I-frame here.
4257 * Receiver will miss it and start proper recovery
4258 * procedures and ask retransmission.
4260 if (l2cap_check_fcs(pi
, skb
))
4263 if (__is_sar_start(control
) && __is_iframe(control
))
4266 if (pi
->fcs
== L2CAP_FCS_CRC16
)
4269 if (len
> pi
->mps
) {
4270 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
4274 req_seq
= __get_reqseq(control
);
4275 req_seq_offset
= (req_seq
- pi
->expected_ack_seq
) % 64;
4276 if (req_seq_offset
< 0)
4277 req_seq_offset
+= 64;
4279 next_tx_seq_offset
=
4280 (pi
->next_tx_seq
- pi
->expected_ack_seq
) % 64;
4281 if (next_tx_seq_offset
< 0)
4282 next_tx_seq_offset
+= 64;
4284 /* check for invalid req-seq */
4285 if (req_seq_offset
> next_tx_seq_offset
) {
4286 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
4290 if (__is_iframe(control
)) {
4292 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
4296 l2cap_data_channel_iframe(sk
, control
, skb
);
4300 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
4304 l2cap_data_channel_sframe(sk
, control
, skb
);
4314 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
4317 struct l2cap_pinfo
*pi
;
4322 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, cid
);
4324 BT_DBG("unknown cid 0x%4.4x", cid
);
4330 BT_DBG("sk %p, len %d", sk
, skb
->len
);
4332 if (sk
->sk_state
!= BT_CONNECTED
)
4336 case L2CAP_MODE_BASIC
:
4337 /* If socket recv buffers overflows we drop data here
4338 * which is *bad* because L2CAP has to be reliable.
4339 * But we don't have any other choice. L2CAP doesn't
4340 * provide flow control mechanism. */
4342 if (pi
->imtu
< skb
->len
)
4345 if (!sock_queue_rcv_skb(sk
, skb
))
4349 case L2CAP_MODE_ERTM
:
4350 if (!sock_owned_by_user(sk
)) {
4351 l2cap_ertm_data_rcv(sk
, skb
);
4353 if (sk_add_backlog(sk
, skb
))
4359 case L2CAP_MODE_STREAMING
:
4360 control
= get_unaligned_le16(skb
->data
);
4364 if (l2cap_check_fcs(pi
, skb
))
4367 if (__is_sar_start(control
))
4370 if (pi
->fcs
== L2CAP_FCS_CRC16
)
4373 if (len
> pi
->mps
|| len
< 0 || __is_sframe(control
))
4376 tx_seq
= __get_txseq(control
);
4378 if (pi
->expected_tx_seq
== tx_seq
)
4379 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
4381 pi
->expected_tx_seq
= (tx_seq
+ 1) % 64;
4383 l2cap_streaming_reassembly_sdu(sk
, skb
, control
);
4388 BT_DBG("sk %p: bad mode 0x%2.2x", sk
, pi
->mode
);
4402 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
4406 sk
= l2cap_get_sock_by_psm(0, psm
, conn
->src
);
4410 BT_DBG("sk %p, len %d", sk
, skb
->len
);
4412 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_CONNECTED
)
4415 if (l2cap_pi(sk
)->imtu
< skb
->len
)
4418 if (!sock_queue_rcv_skb(sk
, skb
))
4430 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
4432 struct l2cap_hdr
*lh
= (void *) skb
->data
;
4436 skb_pull(skb
, L2CAP_HDR_SIZE
);
4437 cid
= __le16_to_cpu(lh
->cid
);
4438 len
= __le16_to_cpu(lh
->len
);
4440 if (len
!= skb
->len
) {
4445 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
4448 case L2CAP_CID_SIGNALING
:
4449 l2cap_sig_channel(conn
, skb
);
4452 case L2CAP_CID_CONN_LESS
:
4453 psm
= get_unaligned_le16(skb
->data
);
4455 l2cap_conless_channel(conn
, psm
, skb
);
4459 l2cap_data_channel(conn
, cid
, skb
);
4464 /* ---- L2CAP interface with lower layer (HCI) ---- */
4466 static int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
4468 int exact
= 0, lm1
= 0, lm2
= 0;
4469 register struct sock
*sk
;
4470 struct hlist_node
*node
;
4472 if (type
!= ACL_LINK
)
4475 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
4477 /* Find listening sockets and check their link_mode */
4478 read_lock(&l2cap_sk_list
.lock
);
4479 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
4480 if (sk
->sk_state
!= BT_LISTEN
)
4483 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
4484 lm1
|= HCI_LM_ACCEPT
;
4485 if (l2cap_pi(sk
)->role_switch
)
4486 lm1
|= HCI_LM_MASTER
;
4488 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
4489 lm2
|= HCI_LM_ACCEPT
;
4490 if (l2cap_pi(sk
)->role_switch
)
4491 lm2
|= HCI_LM_MASTER
;
4494 read_unlock(&l2cap_sk_list
.lock
);
4496 return exact
? lm1
: lm2
;
4499 static int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
4501 struct l2cap_conn
*conn
;
4503 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
4505 if (hcon
->type
!= ACL_LINK
)
4509 conn
= l2cap_conn_add(hcon
, status
);
4511 l2cap_conn_ready(conn
);
4513 l2cap_conn_del(hcon
, bt_err(status
));
4518 static int l2cap_disconn_ind(struct hci_conn
*hcon
)
4520 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4522 BT_DBG("hcon %p", hcon
);
4524 if (hcon
->type
!= ACL_LINK
|| !conn
)
4527 return conn
->disc_reason
;
4530 static int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
4532 BT_DBG("hcon %p reason %d", hcon
, reason
);
4534 if (hcon
->type
!= ACL_LINK
)
4537 l2cap_conn_del(hcon
, bt_err(reason
));
4542 static inline void l2cap_check_encryption(struct sock
*sk
, u8 encrypt
)
4544 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_STREAM
)
4547 if (encrypt
== 0x00) {
4548 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
) {
4549 l2cap_sock_clear_timer(sk
);
4550 l2cap_sock_set_timer(sk
, HZ
* 5);
4551 } else if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
4552 __l2cap_sock_close(sk
, ECONNREFUSED
);
4554 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
)
4555 l2cap_sock_clear_timer(sk
);
4559 static int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
4561 struct l2cap_chan_list
*l
;
4562 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4568 l
= &conn
->chan_list
;
4570 BT_DBG("conn %p", conn
);
4572 read_lock(&l
->lock
);
4574 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
4577 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_CONNECT_PEND
) {
4582 if (!status
&& (sk
->sk_state
== BT_CONNECTED
||
4583 sk
->sk_state
== BT_CONFIG
)) {
4584 l2cap_check_encryption(sk
, encrypt
);
4589 if (sk
->sk_state
== BT_CONNECT
) {
4591 struct l2cap_conn_req req
;
4592 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
4593 req
.psm
= l2cap_pi(sk
)->psm
;
4595 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
4596 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
4598 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
4599 L2CAP_CONN_REQ
, sizeof(req
), &req
);
4601 l2cap_sock_clear_timer(sk
);
4602 l2cap_sock_set_timer(sk
, HZ
/ 10);
4604 } else if (sk
->sk_state
== BT_CONNECT2
) {
4605 struct l2cap_conn_rsp rsp
;
4609 sk
->sk_state
= BT_CONFIG
;
4610 result
= L2CAP_CR_SUCCESS
;
4612 sk
->sk_state
= BT_DISCONN
;
4613 l2cap_sock_set_timer(sk
, HZ
/ 10);
4614 result
= L2CAP_CR_SEC_BLOCK
;
4617 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
4618 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
4619 rsp
.result
= cpu_to_le16(result
);
4620 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4621 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
4622 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
4628 read_unlock(&l
->lock
);
4633 static int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
4635 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4637 if (!conn
&& !(conn
= l2cap_conn_add(hcon
, 0)))
4640 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
4642 if (flags
& ACL_START
) {
4643 struct l2cap_hdr
*hdr
;
4647 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
4648 kfree_skb(conn
->rx_skb
);
4649 conn
->rx_skb
= NULL
;
4651 l2cap_conn_unreliable(conn
, ECOMM
);
4655 BT_ERR("Frame is too short (len %d)", skb
->len
);
4656 l2cap_conn_unreliable(conn
, ECOMM
);
4660 hdr
= (struct l2cap_hdr
*) skb
->data
;
4661 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
4663 if (len
== skb
->len
) {
4664 /* Complete frame received */
4665 l2cap_recv_frame(conn
, skb
);
4669 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
4671 if (skb
->len
> len
) {
4672 BT_ERR("Frame is too long (len %d, expected len %d)",
4674 l2cap_conn_unreliable(conn
, ECOMM
);
4678 /* Allocate skb for the complete frame (with header) */
4679 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
4683 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4685 conn
->rx_len
= len
- skb
->len
;
4687 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
4689 if (!conn
->rx_len
) {
4690 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
4691 l2cap_conn_unreliable(conn
, ECOMM
);
4695 if (skb
->len
> conn
->rx_len
) {
4696 BT_ERR("Fragment is too long (len %d, expected %d)",
4697 skb
->len
, conn
->rx_len
);
4698 kfree_skb(conn
->rx_skb
);
4699 conn
->rx_skb
= NULL
;
4701 l2cap_conn_unreliable(conn
, ECOMM
);
4705 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4707 conn
->rx_len
-= skb
->len
;
4709 if (!conn
->rx_len
) {
4710 /* Complete frame received */
4711 l2cap_recv_frame(conn
, conn
->rx_skb
);
4712 conn
->rx_skb
= NULL
;
4721 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
4724 struct hlist_node
*node
;
4726 read_lock_bh(&l2cap_sk_list
.lock
);
4728 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
4729 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4731 seq_printf(f
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4732 batostr(&bt_sk(sk
)->src
),
4733 batostr(&bt_sk(sk
)->dst
),
4734 sk
->sk_state
, __le16_to_cpu(pi
->psm
),
4736 pi
->imtu
, pi
->omtu
, pi
->sec_level
);
4739 read_unlock_bh(&l2cap_sk_list
.lock
);
4744 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
4746 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
4749 static const struct file_operations l2cap_debugfs_fops
= {
4750 .open
= l2cap_debugfs_open
,
4752 .llseek
= seq_lseek
,
4753 .release
= single_release
,
4756 static struct dentry
*l2cap_debugfs
;
4758 static const struct proto_ops l2cap_sock_ops
= {
4759 .family
= PF_BLUETOOTH
,
4760 .owner
= THIS_MODULE
,
4761 .release
= l2cap_sock_release
,
4762 .bind
= l2cap_sock_bind
,
4763 .connect
= l2cap_sock_connect
,
4764 .listen
= l2cap_sock_listen
,
4765 .accept
= l2cap_sock_accept
,
4766 .getname
= l2cap_sock_getname
,
4767 .sendmsg
= l2cap_sock_sendmsg
,
4768 .recvmsg
= l2cap_sock_recvmsg
,
4769 .poll
= bt_sock_poll
,
4770 .ioctl
= bt_sock_ioctl
,
4771 .mmap
= sock_no_mmap
,
4772 .socketpair
= sock_no_socketpair
,
4773 .shutdown
= l2cap_sock_shutdown
,
4774 .setsockopt
= l2cap_sock_setsockopt
,
4775 .getsockopt
= l2cap_sock_getsockopt
4778 static const struct net_proto_family l2cap_sock_family_ops
= {
4779 .family
= PF_BLUETOOTH
,
4780 .owner
= THIS_MODULE
,
4781 .create
= l2cap_sock_create
,
4784 static struct hci_proto l2cap_hci_proto
= {
4786 .id
= HCI_PROTO_L2CAP
,
4787 .connect_ind
= l2cap_connect_ind
,
4788 .connect_cfm
= l2cap_connect_cfm
,
4789 .disconn_ind
= l2cap_disconn_ind
,
4790 .disconn_cfm
= l2cap_disconn_cfm
,
4791 .security_cfm
= l2cap_security_cfm
,
4792 .recv_acldata
= l2cap_recv_acldata
4795 static int __init
l2cap_init(void)
4799 err
= proto_register(&l2cap_proto
, 0);
4803 _busy_wq
= create_singlethread_workqueue("l2cap");
4807 err
= bt_sock_register(BTPROTO_L2CAP
, &l2cap_sock_family_ops
);
4809 BT_ERR("L2CAP socket registration failed");
4813 err
= hci_register_proto(&l2cap_hci_proto
);
4815 BT_ERR("L2CAP protocol registration failed");
4816 bt_sock_unregister(BTPROTO_L2CAP
);
4821 l2cap_debugfs
= debugfs_create_file("l2cap", 0444,
4822 bt_debugfs
, NULL
, &l2cap_debugfs_fops
);
4824 BT_ERR("Failed to create L2CAP debug file");
4827 BT_INFO("L2CAP ver %s", VERSION
);
4828 BT_INFO("L2CAP socket layer initialized");
4833 proto_unregister(&l2cap_proto
);
4837 static void __exit
l2cap_exit(void)
4839 debugfs_remove(l2cap_debugfs
);
4841 flush_workqueue(_busy_wq
);
4842 destroy_workqueue(_busy_wq
);
4844 if (bt_sock_unregister(BTPROTO_L2CAP
) < 0)
4845 BT_ERR("L2CAP socket unregistration failed");
4847 if (hci_unregister_proto(&l2cap_hci_proto
) < 0)
4848 BT_ERR("L2CAP protocol unregistration failed");
4850 proto_unregister(&l2cap_proto
);
4853 void l2cap_load(void)
4855 /* Dummy function to trigger automatic L2CAP module loading by
4856 * other modules that use L2CAP sockets but don't use any other
4857 * symbols from it. */
4859 EXPORT_SYMBOL(l2cap_load
);
4861 module_init(l2cap_init
);
4862 module_exit(l2cap_exit
);
4864 module_param(disable_ertm
, bool, 0644);
4865 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");
4867 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4868 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION
);
4869 MODULE_VERSION(VERSION
);
4870 MODULE_LICENSE("GPL");
4871 MODULE_ALIAS("bt-proto-0");