2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/uaccess.h>
44 #include <linux/crc16.h>
47 #include <asm/system.h>
48 #include <asm/unaligned.h>
50 #include <net/bluetooth/bluetooth.h>
51 #include <net/bluetooth/hci_core.h>
52 #include <net/bluetooth/l2cap.h>
54 #define VERSION "2.14"
56 static int enable_ertm
= 0;
57 static int max_transmit
= L2CAP_DEFAULT_MAX_TX
;
59 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
60 static u8 l2cap_fixed_chan
[8] = { 0x02, };
62 static const struct proto_ops l2cap_sock_ops
;
64 static struct bt_sock_list l2cap_sk_list
= {
65 .lock
= __RW_LOCK_UNLOCKED(l2cap_sk_list
.lock
)
68 static void __l2cap_sock_close(struct sock
*sk
, int reason
);
69 static void l2cap_sock_close(struct sock
*sk
);
70 static void l2cap_sock_kill(struct sock
*sk
);
72 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
73 u8 code
, u8 ident
, u16 dlen
, void *data
);
75 /* ---- L2CAP timers ---- */
76 static void l2cap_sock_timeout(unsigned long arg
)
78 struct sock
*sk
= (struct sock
*) arg
;
81 BT_DBG("sock %p state %d", sk
, sk
->sk_state
);
85 if (sk
->sk_state
== BT_CONNECTED
|| sk
->sk_state
== BT_CONFIG
)
86 reason
= ECONNREFUSED
;
87 else if (sk
->sk_state
== BT_CONNECT
&&
88 l2cap_pi(sk
)->sec_level
!= BT_SECURITY_SDP
)
89 reason
= ECONNREFUSED
;
93 __l2cap_sock_close(sk
, reason
);
101 static void l2cap_sock_set_timer(struct sock
*sk
, long timeout
)
103 BT_DBG("sk %p state %d timeout %ld", sk
, sk
->sk_state
, timeout
);
104 sk_reset_timer(sk
, &sk
->sk_timer
, jiffies
+ timeout
);
107 static void l2cap_sock_clear_timer(struct sock
*sk
)
109 BT_DBG("sock %p state %d", sk
, sk
->sk_state
);
110 sk_stop_timer(sk
, &sk
->sk_timer
);
113 /* ---- L2CAP channels ---- */
114 static struct sock
*__l2cap_get_chan_by_dcid(struct l2cap_chan_list
*l
, u16 cid
)
117 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
118 if (l2cap_pi(s
)->dcid
== cid
)
124 static struct sock
*__l2cap_get_chan_by_scid(struct l2cap_chan_list
*l
, u16 cid
)
127 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
128 if (l2cap_pi(s
)->scid
== cid
)
134 /* Find channel with given SCID.
135 * Returns locked socket */
136 static inline struct sock
*l2cap_get_chan_by_scid(struct l2cap_chan_list
*l
, u16 cid
)
140 s
= __l2cap_get_chan_by_scid(l
, cid
);
143 read_unlock(&l
->lock
);
147 static struct sock
*__l2cap_get_chan_by_ident(struct l2cap_chan_list
*l
, u8 ident
)
150 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
151 if (l2cap_pi(s
)->ident
== ident
)
157 static inline struct sock
*l2cap_get_chan_by_ident(struct l2cap_chan_list
*l
, u8 ident
)
161 s
= __l2cap_get_chan_by_ident(l
, ident
);
164 read_unlock(&l
->lock
);
168 static u16
l2cap_alloc_cid(struct l2cap_chan_list
*l
)
170 u16 cid
= L2CAP_CID_DYN_START
;
172 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
173 if (!__l2cap_get_chan_by_scid(l
, cid
))
180 static inline void __l2cap_chan_link(struct l2cap_chan_list
*l
, struct sock
*sk
)
185 l2cap_pi(l
->head
)->prev_c
= sk
;
187 l2cap_pi(sk
)->next_c
= l
->head
;
188 l2cap_pi(sk
)->prev_c
= NULL
;
192 static inline void l2cap_chan_unlink(struct l2cap_chan_list
*l
, struct sock
*sk
)
194 struct sock
*next
= l2cap_pi(sk
)->next_c
, *prev
= l2cap_pi(sk
)->prev_c
;
196 write_lock_bh(&l
->lock
);
201 l2cap_pi(next
)->prev_c
= prev
;
203 l2cap_pi(prev
)->next_c
= next
;
204 write_unlock_bh(&l
->lock
);
209 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct sock
*sk
, struct sock
*parent
)
211 struct l2cap_chan_list
*l
= &conn
->chan_list
;
213 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
214 l2cap_pi(sk
)->psm
, l2cap_pi(sk
)->dcid
);
216 conn
->disc_reason
= 0x13;
218 l2cap_pi(sk
)->conn
= conn
;
220 if (sk
->sk_type
== SOCK_SEQPACKET
) {
221 /* Alloc CID for connection-oriented socket */
222 l2cap_pi(sk
)->scid
= l2cap_alloc_cid(l
);
223 } else if (sk
->sk_type
== SOCK_DGRAM
) {
224 /* Connectionless socket */
225 l2cap_pi(sk
)->scid
= L2CAP_CID_CONN_LESS
;
226 l2cap_pi(sk
)->dcid
= L2CAP_CID_CONN_LESS
;
227 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
229 /* Raw socket can send/recv signalling messages only */
230 l2cap_pi(sk
)->scid
= L2CAP_CID_SIGNALING
;
231 l2cap_pi(sk
)->dcid
= L2CAP_CID_SIGNALING
;
232 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
235 __l2cap_chan_link(l
, sk
);
238 bt_accept_enqueue(parent
, sk
);
242 * Must be called on the locked socket. */
243 static void l2cap_chan_del(struct sock
*sk
, int err
)
245 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
246 struct sock
*parent
= bt_sk(sk
)->parent
;
248 l2cap_sock_clear_timer(sk
);
250 BT_DBG("sk %p, conn %p, err %d", sk
, conn
, err
);
253 /* Unlink from channel list */
254 l2cap_chan_unlink(&conn
->chan_list
, sk
);
255 l2cap_pi(sk
)->conn
= NULL
;
256 hci_conn_put(conn
->hcon
);
259 sk
->sk_state
= BT_CLOSED
;
260 sock_set_flag(sk
, SOCK_ZAPPED
);
266 bt_accept_unlink(sk
);
267 parent
->sk_data_ready(parent
, 0);
269 sk
->sk_state_change(sk
);
272 /* Service level security */
273 static inline int l2cap_check_security(struct sock
*sk
)
275 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
278 if (l2cap_pi(sk
)->psm
== cpu_to_le16(0x0001)) {
279 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
280 auth_type
= HCI_AT_NO_BONDING_MITM
;
282 auth_type
= HCI_AT_NO_BONDING
;
284 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_LOW
)
285 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
287 switch (l2cap_pi(sk
)->sec_level
) {
288 case BT_SECURITY_HIGH
:
289 auth_type
= HCI_AT_GENERAL_BONDING_MITM
;
291 case BT_SECURITY_MEDIUM
:
292 auth_type
= HCI_AT_GENERAL_BONDING
;
295 auth_type
= HCI_AT_NO_BONDING
;
300 return hci_conn_security(conn
->hcon
, l2cap_pi(sk
)->sec_level
,
304 static inline u8
l2cap_get_ident(struct l2cap_conn
*conn
)
308 /* Get next available identificator.
309 * 1 - 128 are used by kernel.
310 * 129 - 199 are reserved.
311 * 200 - 254 are used by utilities like l2ping, etc.
314 spin_lock_bh(&conn
->lock
);
316 if (++conn
->tx_ident
> 128)
321 spin_unlock_bh(&conn
->lock
);
326 static inline int l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
328 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
330 BT_DBG("code 0x%2.2x", code
);
335 return hci_send_acl(conn
->hcon
, skb
, 0);
338 static inline int l2cap_send_sframe(struct l2cap_pinfo
*pi
, u16 control
)
341 struct l2cap_hdr
*lh
;
342 struct l2cap_conn
*conn
= pi
->conn
;
343 int count
, hlen
= L2CAP_HDR_SIZE
+ 2;
345 if (pi
->fcs
== L2CAP_FCS_CRC16
)
348 BT_DBG("pi %p, control 0x%2.2x", pi
, control
);
350 count
= min_t(unsigned int, conn
->mtu
, hlen
);
351 control
|= L2CAP_CTRL_FRAME_TYPE
;
353 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
357 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
358 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
359 lh
->cid
= cpu_to_le16(pi
->dcid
);
360 put_unaligned_le16(control
, skb_put(skb
, 2));
362 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
363 u16 fcs
= crc16(0, (u8
*)lh
, count
- 2);
364 put_unaligned_le16(fcs
, skb_put(skb
, 2));
367 return hci_send_acl(pi
->conn
->hcon
, skb
, 0);
370 static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo
*pi
, u16 control
)
372 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
)
373 control
|= L2CAP_SUPER_RCV_NOT_READY
;
375 control
|= L2CAP_SUPER_RCV_READY
;
377 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
379 return l2cap_send_sframe(pi
, control
);
382 static void l2cap_do_start(struct sock
*sk
)
384 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
386 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
387 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
390 if (l2cap_check_security(sk
)) {
391 struct l2cap_conn_req req
;
392 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
393 req
.psm
= l2cap_pi(sk
)->psm
;
395 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
397 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
398 L2CAP_CONN_REQ
, sizeof(req
), &req
);
401 struct l2cap_info_req req
;
402 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
404 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
405 conn
->info_ident
= l2cap_get_ident(conn
);
407 mod_timer(&conn
->info_timer
, jiffies
+
408 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
410 l2cap_send_cmd(conn
, conn
->info_ident
,
411 L2CAP_INFO_REQ
, sizeof(req
), &req
);
415 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct sock
*sk
)
417 struct l2cap_disconn_req req
;
419 req
.dcid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
420 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
421 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
422 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
425 /* ---- L2CAP connections ---- */
426 static void l2cap_conn_start(struct l2cap_conn
*conn
)
428 struct l2cap_chan_list
*l
= &conn
->chan_list
;
431 BT_DBG("conn %p", conn
);
435 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
438 if (sk
->sk_type
!= SOCK_SEQPACKET
) {
443 if (sk
->sk_state
== BT_CONNECT
) {
444 if (l2cap_check_security(sk
)) {
445 struct l2cap_conn_req req
;
446 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
447 req
.psm
= l2cap_pi(sk
)->psm
;
449 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
451 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
452 L2CAP_CONN_REQ
, sizeof(req
), &req
);
454 } else if (sk
->sk_state
== BT_CONNECT2
) {
455 struct l2cap_conn_rsp rsp
;
456 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
457 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
459 if (l2cap_check_security(sk
)) {
460 if (bt_sk(sk
)->defer_setup
) {
461 struct sock
*parent
= bt_sk(sk
)->parent
;
462 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
463 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
464 parent
->sk_data_ready(parent
, 0);
467 sk
->sk_state
= BT_CONFIG
;
468 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
469 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
472 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
473 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
476 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
477 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
483 read_unlock(&l
->lock
);
486 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
488 struct l2cap_chan_list
*l
= &conn
->chan_list
;
491 BT_DBG("conn %p", conn
);
495 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
498 if (sk
->sk_type
!= SOCK_SEQPACKET
) {
499 l2cap_sock_clear_timer(sk
);
500 sk
->sk_state
= BT_CONNECTED
;
501 sk
->sk_state_change(sk
);
502 } else if (sk
->sk_state
== BT_CONNECT
)
508 read_unlock(&l
->lock
);
511 /* Notify sockets that we cannot guaranty reliability anymore */
512 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
514 struct l2cap_chan_list
*l
= &conn
->chan_list
;
517 BT_DBG("conn %p", conn
);
521 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
522 if (l2cap_pi(sk
)->force_reliable
)
526 read_unlock(&l
->lock
);
529 static void l2cap_info_timeout(unsigned long arg
)
531 struct l2cap_conn
*conn
= (void *) arg
;
533 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
534 conn
->info_ident
= 0;
536 l2cap_conn_start(conn
);
539 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
541 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
546 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
550 hcon
->l2cap_data
= conn
;
553 BT_DBG("hcon %p conn %p", hcon
, conn
);
555 conn
->mtu
= hcon
->hdev
->acl_mtu
;
556 conn
->src
= &hcon
->hdev
->bdaddr
;
557 conn
->dst
= &hcon
->dst
;
561 spin_lock_init(&conn
->lock
);
562 rwlock_init(&conn
->chan_list
.lock
);
564 setup_timer(&conn
->info_timer
, l2cap_info_timeout
,
565 (unsigned long) conn
);
567 conn
->disc_reason
= 0x13;
572 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
574 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
580 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
582 kfree_skb(conn
->rx_skb
);
585 while ((sk
= conn
->chan_list
.head
)) {
587 l2cap_chan_del(sk
, err
);
592 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
593 del_timer_sync(&conn
->info_timer
);
595 hcon
->l2cap_data
= NULL
;
599 static inline void l2cap_chan_add(struct l2cap_conn
*conn
, struct sock
*sk
, struct sock
*parent
)
601 struct l2cap_chan_list
*l
= &conn
->chan_list
;
602 write_lock_bh(&l
->lock
);
603 __l2cap_chan_add(conn
, sk
, parent
);
604 write_unlock_bh(&l
->lock
);
607 /* ---- Socket interface ---- */
608 static struct sock
*__l2cap_get_sock_by_addr(__le16 psm
, bdaddr_t
*src
)
611 struct hlist_node
*node
;
612 sk_for_each(sk
, node
, &l2cap_sk_list
.head
)
613 if (l2cap_pi(sk
)->sport
== psm
&& !bacmp(&bt_sk(sk
)->src
, src
))
620 /* Find socket with psm and source bdaddr.
621 * Returns closest match.
623 static struct sock
*__l2cap_get_sock_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
625 struct sock
*sk
= NULL
, *sk1
= NULL
;
626 struct hlist_node
*node
;
628 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
629 if (state
&& sk
->sk_state
!= state
)
632 if (l2cap_pi(sk
)->psm
== psm
) {
634 if (!bacmp(&bt_sk(sk
)->src
, src
))
638 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
642 return node
? sk
: sk1
;
645 /* Find socket with given address (psm, src).
646 * Returns locked socket */
647 static inline struct sock
*l2cap_get_sock_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
650 read_lock(&l2cap_sk_list
.lock
);
651 s
= __l2cap_get_sock_by_psm(state
, psm
, src
);
654 read_unlock(&l2cap_sk_list
.lock
);
658 static void l2cap_sock_destruct(struct sock
*sk
)
662 skb_queue_purge(&sk
->sk_receive_queue
);
663 skb_queue_purge(&sk
->sk_write_queue
);
666 static void l2cap_sock_cleanup_listen(struct sock
*parent
)
670 BT_DBG("parent %p", parent
);
672 /* Close not yet accepted channels */
673 while ((sk
= bt_accept_dequeue(parent
, NULL
)))
674 l2cap_sock_close(sk
);
676 parent
->sk_state
= BT_CLOSED
;
677 sock_set_flag(parent
, SOCK_ZAPPED
);
680 /* Kill socket (only if zapped and orphan)
681 * Must be called on unlocked socket.
683 static void l2cap_sock_kill(struct sock
*sk
)
685 if (!sock_flag(sk
, SOCK_ZAPPED
) || sk
->sk_socket
)
688 BT_DBG("sk %p state %d", sk
, sk
->sk_state
);
690 /* Kill poor orphan */
691 bt_sock_unlink(&l2cap_sk_list
, sk
);
692 sock_set_flag(sk
, SOCK_DEAD
);
696 static void __l2cap_sock_close(struct sock
*sk
, int reason
)
698 BT_DBG("sk %p state %d socket %p", sk
, sk
->sk_state
, sk
->sk_socket
);
700 switch (sk
->sk_state
) {
702 l2cap_sock_cleanup_listen(sk
);
707 if (sk
->sk_type
== SOCK_SEQPACKET
) {
708 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
710 sk
->sk_state
= BT_DISCONN
;
711 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
712 l2cap_send_disconn_req(conn
, sk
);
714 l2cap_chan_del(sk
, reason
);
718 if (sk
->sk_type
== SOCK_SEQPACKET
) {
719 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
720 struct l2cap_conn_rsp rsp
;
723 if (bt_sk(sk
)->defer_setup
)
724 result
= L2CAP_CR_SEC_BLOCK
;
726 result
= L2CAP_CR_BAD_PSM
;
728 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
729 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
730 rsp
.result
= cpu_to_le16(result
);
731 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
732 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
733 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
735 l2cap_chan_del(sk
, reason
);
740 l2cap_chan_del(sk
, reason
);
744 sock_set_flag(sk
, SOCK_ZAPPED
);
749 /* Must be called on unlocked socket. */
750 static void l2cap_sock_close(struct sock
*sk
)
752 l2cap_sock_clear_timer(sk
);
754 __l2cap_sock_close(sk
, ECONNRESET
);
759 static void l2cap_sock_init(struct sock
*sk
, struct sock
*parent
)
761 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
766 sk
->sk_type
= parent
->sk_type
;
767 bt_sk(sk
)->defer_setup
= bt_sk(parent
)->defer_setup
;
769 pi
->imtu
= l2cap_pi(parent
)->imtu
;
770 pi
->omtu
= l2cap_pi(parent
)->omtu
;
771 pi
->mode
= l2cap_pi(parent
)->mode
;
772 pi
->fcs
= l2cap_pi(parent
)->fcs
;
773 pi
->sec_level
= l2cap_pi(parent
)->sec_level
;
774 pi
->role_switch
= l2cap_pi(parent
)->role_switch
;
775 pi
->force_reliable
= l2cap_pi(parent
)->force_reliable
;
777 pi
->imtu
= L2CAP_DEFAULT_MTU
;
779 pi
->mode
= L2CAP_MODE_BASIC
;
780 pi
->fcs
= L2CAP_FCS_CRC16
;
781 pi
->sec_level
= BT_SECURITY_LOW
;
783 pi
->force_reliable
= 0;
786 /* Default config options */
788 pi
->flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
789 skb_queue_head_init(TX_QUEUE(sk
));
790 skb_queue_head_init(SREJ_QUEUE(sk
));
791 INIT_LIST_HEAD(SREJ_LIST(sk
));
794 static struct proto l2cap_proto
= {
796 .owner
= THIS_MODULE
,
797 .obj_size
= sizeof(struct l2cap_pinfo
)
800 static struct sock
*l2cap_sock_alloc(struct net
*net
, struct socket
*sock
, int proto
, gfp_t prio
)
804 sk
= sk_alloc(net
, PF_BLUETOOTH
, prio
, &l2cap_proto
);
808 sock_init_data(sock
, sk
);
809 INIT_LIST_HEAD(&bt_sk(sk
)->accept_q
);
811 sk
->sk_destruct
= l2cap_sock_destruct
;
812 sk
->sk_sndtimeo
= msecs_to_jiffies(L2CAP_CONN_TIMEOUT
);
814 sock_reset_flag(sk
, SOCK_ZAPPED
);
816 sk
->sk_protocol
= proto
;
817 sk
->sk_state
= BT_OPEN
;
819 setup_timer(&sk
->sk_timer
, l2cap_sock_timeout
, (unsigned long) sk
);
821 bt_sock_link(&l2cap_sk_list
, sk
);
825 static int l2cap_sock_create(struct net
*net
, struct socket
*sock
, int protocol
,
830 BT_DBG("sock %p", sock
);
832 sock
->state
= SS_UNCONNECTED
;
834 if (sock
->type
!= SOCK_SEQPACKET
&&
835 sock
->type
!= SOCK_DGRAM
&& sock
->type
!= SOCK_RAW
)
836 return -ESOCKTNOSUPPORT
;
838 if (sock
->type
== SOCK_RAW
&& !kern
&& !capable(CAP_NET_RAW
))
841 sock
->ops
= &l2cap_sock_ops
;
843 sk
= l2cap_sock_alloc(net
, sock
, protocol
, GFP_ATOMIC
);
847 l2cap_sock_init(sk
, NULL
);
851 static int l2cap_sock_bind(struct socket
*sock
, struct sockaddr
*addr
, int alen
)
853 struct sock
*sk
= sock
->sk
;
854 struct sockaddr_l2 la
;
859 if (!addr
|| addr
->sa_family
!= AF_BLUETOOTH
)
862 memset(&la
, 0, sizeof(la
));
863 len
= min_t(unsigned int, sizeof(la
), alen
);
864 memcpy(&la
, addr
, len
);
871 if (sk
->sk_state
!= BT_OPEN
) {
876 if (la
.l2_psm
&& __le16_to_cpu(la
.l2_psm
) < 0x1001 &&
877 !capable(CAP_NET_BIND_SERVICE
)) {
882 write_lock_bh(&l2cap_sk_list
.lock
);
884 if (la
.l2_psm
&& __l2cap_get_sock_by_addr(la
.l2_psm
, &la
.l2_bdaddr
)) {
887 /* Save source address */
888 bacpy(&bt_sk(sk
)->src
, &la
.l2_bdaddr
);
889 l2cap_pi(sk
)->psm
= la
.l2_psm
;
890 l2cap_pi(sk
)->sport
= la
.l2_psm
;
891 sk
->sk_state
= BT_BOUND
;
893 if (__le16_to_cpu(la
.l2_psm
) == 0x0001 ||
894 __le16_to_cpu(la
.l2_psm
) == 0x0003)
895 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
898 write_unlock_bh(&l2cap_sk_list
.lock
);
905 static int l2cap_do_connect(struct sock
*sk
)
907 bdaddr_t
*src
= &bt_sk(sk
)->src
;
908 bdaddr_t
*dst
= &bt_sk(sk
)->dst
;
909 struct l2cap_conn
*conn
;
910 struct hci_conn
*hcon
;
911 struct hci_dev
*hdev
;
915 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src
), batostr(dst
),
918 hdev
= hci_get_route(dst
, src
);
920 return -EHOSTUNREACH
;
922 hci_dev_lock_bh(hdev
);
926 if (sk
->sk_type
== SOCK_RAW
) {
927 switch (l2cap_pi(sk
)->sec_level
) {
928 case BT_SECURITY_HIGH
:
929 auth_type
= HCI_AT_DEDICATED_BONDING_MITM
;
931 case BT_SECURITY_MEDIUM
:
932 auth_type
= HCI_AT_DEDICATED_BONDING
;
935 auth_type
= HCI_AT_NO_BONDING
;
938 } else if (l2cap_pi(sk
)->psm
== cpu_to_le16(0x0001)) {
939 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
940 auth_type
= HCI_AT_NO_BONDING_MITM
;
942 auth_type
= HCI_AT_NO_BONDING
;
944 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_LOW
)
945 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
947 switch (l2cap_pi(sk
)->sec_level
) {
948 case BT_SECURITY_HIGH
:
949 auth_type
= HCI_AT_GENERAL_BONDING_MITM
;
951 case BT_SECURITY_MEDIUM
:
952 auth_type
= HCI_AT_GENERAL_BONDING
;
955 auth_type
= HCI_AT_NO_BONDING
;
960 hcon
= hci_connect(hdev
, ACL_LINK
, dst
,
961 l2cap_pi(sk
)->sec_level
, auth_type
);
965 conn
= l2cap_conn_add(hcon
, 0);
973 /* Update source addr of the socket */
974 bacpy(src
, conn
->src
);
976 l2cap_chan_add(conn
, sk
, NULL
);
978 sk
->sk_state
= BT_CONNECT
;
979 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
981 if (hcon
->state
== BT_CONNECTED
) {
982 if (sk
->sk_type
!= SOCK_SEQPACKET
) {
983 l2cap_sock_clear_timer(sk
);
984 sk
->sk_state
= BT_CONNECTED
;
990 hci_dev_unlock_bh(hdev
);
995 static int l2cap_sock_connect(struct socket
*sock
, struct sockaddr
*addr
, int alen
, int flags
)
997 struct sock
*sk
= sock
->sk
;
998 struct sockaddr_l2 la
;
1001 BT_DBG("sk %p", sk
);
1003 if (!addr
|| addr
->sa_family
!= AF_BLUETOOTH
)
1006 memset(&la
, 0, sizeof(la
));
1007 len
= min_t(unsigned int, sizeof(la
), alen
);
1008 memcpy(&la
, addr
, len
);
1015 if (sk
->sk_type
== SOCK_SEQPACKET
&& !la
.l2_psm
) {
1020 switch (l2cap_pi(sk
)->mode
) {
1021 case L2CAP_MODE_BASIC
:
1023 case L2CAP_MODE_ERTM
:
1024 case L2CAP_MODE_STREAMING
:
1033 switch (sk
->sk_state
) {
1037 /* Already connecting */
1041 /* Already connected */
1054 /* Set destination address and psm */
1055 bacpy(&bt_sk(sk
)->dst
, &la
.l2_bdaddr
);
1056 l2cap_pi(sk
)->psm
= la
.l2_psm
;
1058 err
= l2cap_do_connect(sk
);
1063 err
= bt_sock_wait_state(sk
, BT_CONNECTED
,
1064 sock_sndtimeo(sk
, flags
& O_NONBLOCK
));
1070 static int l2cap_sock_listen(struct socket
*sock
, int backlog
)
1072 struct sock
*sk
= sock
->sk
;
1075 BT_DBG("sk %p backlog %d", sk
, backlog
);
1079 if (sk
->sk_state
!= BT_BOUND
|| sock
->type
!= SOCK_SEQPACKET
) {
1084 switch (l2cap_pi(sk
)->mode
) {
1085 case L2CAP_MODE_BASIC
:
1087 case L2CAP_MODE_ERTM
:
1088 case L2CAP_MODE_STREAMING
:
1097 if (!l2cap_pi(sk
)->psm
) {
1098 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1103 write_lock_bh(&l2cap_sk_list
.lock
);
1105 for (psm
= 0x1001; psm
< 0x1100; psm
+= 2)
1106 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm
), src
)) {
1107 l2cap_pi(sk
)->psm
= cpu_to_le16(psm
);
1108 l2cap_pi(sk
)->sport
= cpu_to_le16(psm
);
1113 write_unlock_bh(&l2cap_sk_list
.lock
);
1119 sk
->sk_max_ack_backlog
= backlog
;
1120 sk
->sk_ack_backlog
= 0;
1121 sk
->sk_state
= BT_LISTEN
;
1128 static int l2cap_sock_accept(struct socket
*sock
, struct socket
*newsock
, int flags
)
1130 DECLARE_WAITQUEUE(wait
, current
);
1131 struct sock
*sk
= sock
->sk
, *nsk
;
1135 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
1137 if (sk
->sk_state
!= BT_LISTEN
) {
1142 timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
1144 BT_DBG("sk %p timeo %ld", sk
, timeo
);
1146 /* Wait for an incoming connection. (wake-one). */
1147 add_wait_queue_exclusive(sk
->sk_sleep
, &wait
);
1148 while (!(nsk
= bt_accept_dequeue(sk
, newsock
))) {
1149 set_current_state(TASK_INTERRUPTIBLE
);
1156 timeo
= schedule_timeout(timeo
);
1157 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
1159 if (sk
->sk_state
!= BT_LISTEN
) {
1164 if (signal_pending(current
)) {
1165 err
= sock_intr_errno(timeo
);
1169 set_current_state(TASK_RUNNING
);
1170 remove_wait_queue(sk
->sk_sleep
, &wait
);
1175 newsock
->state
= SS_CONNECTED
;
1177 BT_DBG("new socket %p", nsk
);
1184 static int l2cap_sock_getname(struct socket
*sock
, struct sockaddr
*addr
, int *len
, int peer
)
1186 struct sockaddr_l2
*la
= (struct sockaddr_l2
*) addr
;
1187 struct sock
*sk
= sock
->sk
;
1189 BT_DBG("sock %p, sk %p", sock
, sk
);
1191 addr
->sa_family
= AF_BLUETOOTH
;
1192 *len
= sizeof(struct sockaddr_l2
);
1195 la
->l2_psm
= l2cap_pi(sk
)->psm
;
1196 bacpy(&la
->l2_bdaddr
, &bt_sk(sk
)->dst
);
1197 la
->l2_cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1199 la
->l2_psm
= l2cap_pi(sk
)->sport
;
1200 bacpy(&la
->l2_bdaddr
, &bt_sk(sk
)->src
);
1201 la
->l2_cid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
1207 static void l2cap_monitor_timeout(unsigned long arg
)
1209 struct sock
*sk
= (void *) arg
;
1213 if (l2cap_pi(sk
)->retry_count
>= l2cap_pi(sk
)->remote_max_tx
) {
1214 l2cap_send_disconn_req(l2cap_pi(sk
)->conn
, sk
);
1218 l2cap_pi(sk
)->retry_count
++;
1219 __mod_monitor_timer();
1221 control
= L2CAP_CTRL_POLL
;
1222 l2cap_send_rr_or_rnr(l2cap_pi(sk
), control
);
1226 static void l2cap_retrans_timeout(unsigned long arg
)
1228 struct sock
*sk
= (void *) arg
;
1232 l2cap_pi(sk
)->retry_count
= 1;
1233 __mod_monitor_timer();
1235 l2cap_pi(sk
)->conn_state
|= L2CAP_CONN_WAIT_F
;
1237 control
= L2CAP_CTRL_POLL
;
1238 l2cap_send_rr_or_rnr(l2cap_pi(sk
), control
);
1242 static void l2cap_drop_acked_frames(struct sock
*sk
)
1244 struct sk_buff
*skb
;
1246 while ((skb
= skb_peek(TX_QUEUE(sk
)))) {
1247 if (bt_cb(skb
)->tx_seq
== l2cap_pi(sk
)->expected_ack_seq
)
1250 skb
= skb_dequeue(TX_QUEUE(sk
));
1253 l2cap_pi(sk
)->unacked_frames
--;
1256 if (!l2cap_pi(sk
)->unacked_frames
)
1257 del_timer(&l2cap_pi(sk
)->retrans_timer
);
1262 static inline int l2cap_do_send(struct sock
*sk
, struct sk_buff
*skb
)
1264 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1267 BT_DBG("sk %p, skb %p len %d", sk
, skb
, skb
->len
);
1269 err
= hci_send_acl(pi
->conn
->hcon
, skb
, 0);
1276 static int l2cap_streaming_send(struct sock
*sk
)
1278 struct sk_buff
*skb
, *tx_skb
;
1279 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1283 while ((skb
= sk
->sk_send_head
)) {
1284 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1286 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1287 control
|= pi
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
1288 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1290 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
) {
1291 fcs
= crc16(0, (u8
*)tx_skb
->data
, tx_skb
->len
- 2);
1292 put_unaligned_le16(fcs
, tx_skb
->data
+ tx_skb
->len
- 2);
1295 err
= l2cap_do_send(sk
, tx_skb
);
1297 l2cap_send_disconn_req(pi
->conn
, sk
);
1301 pi
->next_tx_seq
= (pi
->next_tx_seq
+ 1) % 64;
1303 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1304 sk
->sk_send_head
= NULL
;
1306 sk
->sk_send_head
= skb_queue_next(TX_QUEUE(sk
), skb
);
1308 skb
= skb_dequeue(TX_QUEUE(sk
));
1314 static int l2cap_retransmit_frame(struct sock
*sk
, u8 tx_seq
)
1316 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1317 struct sk_buff
*skb
, *tx_skb
;
1321 skb
= skb_peek(TX_QUEUE(sk
));
1323 if (bt_cb(skb
)->tx_seq
!= tx_seq
) {
1324 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1326 skb
= skb_queue_next(TX_QUEUE(sk
), skb
);
1330 if (pi
->remote_max_tx
&&
1331 bt_cb(skb
)->retries
== pi
->remote_max_tx
) {
1332 l2cap_send_disconn_req(pi
->conn
, sk
);
1336 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1337 bt_cb(skb
)->retries
++;
1338 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1339 control
|= (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1340 | (tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1341 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1343 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
) {
1344 fcs
= crc16(0, (u8
*)tx_skb
->data
, tx_skb
->len
- 2);
1345 put_unaligned_le16(fcs
, tx_skb
->data
+ tx_skb
->len
- 2);
1348 err
= l2cap_do_send(sk
, tx_skb
);
1350 l2cap_send_disconn_req(pi
->conn
, sk
);
1358 static int l2cap_ertm_send(struct sock
*sk
)
1360 struct sk_buff
*skb
, *tx_skb
;
1361 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1365 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
)
1368 while ((skb
= sk
->sk_send_head
) && (!l2cap_tx_window_full(sk
)) &&
1369 !(pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
)) {
1370 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1372 if (pi
->remote_max_tx
&&
1373 bt_cb(skb
)->retries
== pi
->remote_max_tx
) {
1374 l2cap_send_disconn_req(pi
->conn
, sk
);
1378 bt_cb(skb
)->retries
++;
1380 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1381 control
|= (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1382 | (pi
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1383 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1386 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
) {
1387 fcs
= crc16(0, (u8
*)skb
->data
, tx_skb
->len
- 2);
1388 put_unaligned_le16(fcs
, skb
->data
+ tx_skb
->len
- 2);
1391 err
= l2cap_do_send(sk
, tx_skb
);
1393 l2cap_send_disconn_req(pi
->conn
, sk
);
1396 __mod_retrans_timer();
1398 bt_cb(skb
)->tx_seq
= pi
->next_tx_seq
;
1399 pi
->next_tx_seq
= (pi
->next_tx_seq
+ 1) % 64;
1401 pi
->unacked_frames
++;
1403 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1404 sk
->sk_send_head
= NULL
;
1406 sk
->sk_send_head
= skb_queue_next(TX_QUEUE(sk
), skb
);
1412 static inline int l2cap_skbuff_fromiovec(struct sock
*sk
, struct msghdr
*msg
, int len
, int count
, struct sk_buff
*skb
)
1414 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1415 struct sk_buff
**frag
;
1418 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
)) {
1425 /* Continuation fragments (no L2CAP header) */
1426 frag
= &skb_shinfo(skb
)->frag_list
;
1428 count
= min_t(unsigned int, conn
->mtu
, len
);
1430 *frag
= bt_skb_send_alloc(sk
, count
, msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1433 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1439 frag
= &(*frag
)->next
;
1445 static struct sk_buff
*l2cap_create_connless_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1447 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1448 struct sk_buff
*skb
;
1449 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1450 struct l2cap_hdr
*lh
;
1452 BT_DBG("sk %p len %d", sk
, (int)len
);
1454 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1455 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1456 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1458 return ERR_PTR(-ENOMEM
);
1460 /* Create L2CAP header */
1461 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1462 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1463 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1464 put_unaligned_le16(l2cap_pi(sk
)->psm
, skb_put(skb
, 2));
1466 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1467 if (unlikely(err
< 0)) {
1469 return ERR_PTR(err
);
1474 static struct sk_buff
*l2cap_create_basic_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1476 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1477 struct sk_buff
*skb
;
1478 int err
, count
, hlen
= L2CAP_HDR_SIZE
;
1479 struct l2cap_hdr
*lh
;
1481 BT_DBG("sk %p len %d", sk
, (int)len
);
1483 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1484 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1485 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1487 return ERR_PTR(-ENOMEM
);
1489 /* Create L2CAP header */
1490 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1491 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1492 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1494 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1495 if (unlikely(err
< 0)) {
1497 return ERR_PTR(err
);
1502 static struct sk_buff
*l2cap_create_iframe_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
, u16 control
, u16 sdulen
)
1504 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1505 struct sk_buff
*skb
;
1506 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1507 struct l2cap_hdr
*lh
;
1509 BT_DBG("sk %p len %d", sk
, (int)len
);
1514 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1517 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1518 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1519 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1521 return ERR_PTR(-ENOMEM
);
1523 /* Create L2CAP header */
1524 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1525 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1526 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1527 put_unaligned_le16(control
, skb_put(skb
, 2));
1529 put_unaligned_le16(sdulen
, skb_put(skb
, 2));
1531 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1532 if (unlikely(err
< 0)) {
1534 return ERR_PTR(err
);
1537 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1538 put_unaligned_le16(0, skb_put(skb
, 2));
1540 bt_cb(skb
)->retries
= 0;
1544 static inline int l2cap_sar_segment_sdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1546 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1547 struct sk_buff
*skb
;
1548 struct sk_buff_head sar_queue
;
1552 __skb_queue_head_init(&sar_queue
);
1553 control
= L2CAP_SDU_START
;
1554 skb
= l2cap_create_iframe_pdu(sk
, msg
, pi
->max_pdu_size
, control
, len
);
1556 return PTR_ERR(skb
);
1558 __skb_queue_tail(&sar_queue
, skb
);
1559 len
-= pi
->max_pdu_size
;
1560 size
+=pi
->max_pdu_size
;
1566 if (len
> pi
->max_pdu_size
) {
1567 control
|= L2CAP_SDU_CONTINUE
;
1568 buflen
= pi
->max_pdu_size
;
1570 control
|= L2CAP_SDU_END
;
1574 skb
= l2cap_create_iframe_pdu(sk
, msg
, buflen
, control
, 0);
1576 skb_queue_purge(&sar_queue
);
1577 return PTR_ERR(skb
);
1580 __skb_queue_tail(&sar_queue
, skb
);
1585 skb_queue_splice_tail(&sar_queue
, TX_QUEUE(sk
));
1586 if (sk
->sk_send_head
== NULL
)
1587 sk
->sk_send_head
= sar_queue
.next
;
1592 static int l2cap_sock_sendmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
, size_t len
)
1594 struct sock
*sk
= sock
->sk
;
1595 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1596 struct sk_buff
*skb
;
1600 BT_DBG("sock %p, sk %p", sock
, sk
);
1602 err
= sock_error(sk
);
1606 if (msg
->msg_flags
& MSG_OOB
)
1609 /* Check outgoing MTU */
1610 if (sk
->sk_type
== SOCK_SEQPACKET
&& pi
->mode
== L2CAP_MODE_BASIC
&&
1616 if (sk
->sk_state
!= BT_CONNECTED
) {
1621 /* Connectionless channel */
1622 if (sk
->sk_type
== SOCK_DGRAM
) {
1623 skb
= l2cap_create_connless_pdu(sk
, msg
, len
);
1624 err
= l2cap_do_send(sk
, skb
);
1629 case L2CAP_MODE_BASIC
:
1630 /* Create a basic PDU */
1631 skb
= l2cap_create_basic_pdu(sk
, msg
, len
);
1637 err
= l2cap_do_send(sk
, skb
);
1642 case L2CAP_MODE_ERTM
:
1643 case L2CAP_MODE_STREAMING
:
1644 /* Entire SDU fits into one PDU */
1645 if (len
<= pi
->max_pdu_size
) {
1646 control
= L2CAP_SDU_UNSEGMENTED
;
1647 skb
= l2cap_create_iframe_pdu(sk
, msg
, len
, control
, 0);
1652 __skb_queue_tail(TX_QUEUE(sk
), skb
);
1653 if (sk
->sk_send_head
== NULL
)
1654 sk
->sk_send_head
= skb
;
1656 /* Segment SDU into multiples PDUs */
1657 err
= l2cap_sar_segment_sdu(sk
, msg
, len
);
1662 if (pi
->mode
== L2CAP_MODE_STREAMING
)
1663 err
= l2cap_streaming_send(sk
);
1665 err
= l2cap_ertm_send(sk
);
1672 BT_DBG("bad state %1.1x", pi
->mode
);
1681 static int l2cap_sock_recvmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
, size_t len
, int flags
)
1683 struct sock
*sk
= sock
->sk
;
1687 if (sk
->sk_state
== BT_CONNECT2
&& bt_sk(sk
)->defer_setup
) {
1688 struct l2cap_conn_rsp rsp
;
1690 sk
->sk_state
= BT_CONFIG
;
1692 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1693 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
1694 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
1695 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
1696 l2cap_send_cmd(l2cap_pi(sk
)->conn
, l2cap_pi(sk
)->ident
,
1697 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
1705 return bt_sock_recvmsg(iocb
, sock
, msg
, len
, flags
);
1708 static int l2cap_sock_setsockopt_old(struct socket
*sock
, int optname
, char __user
*optval
, unsigned int optlen
)
1710 struct sock
*sk
= sock
->sk
;
1711 struct l2cap_options opts
;
1715 BT_DBG("sk %p", sk
);
1721 opts
.imtu
= l2cap_pi(sk
)->imtu
;
1722 opts
.omtu
= l2cap_pi(sk
)->omtu
;
1723 opts
.flush_to
= l2cap_pi(sk
)->flush_to
;
1724 opts
.mode
= l2cap_pi(sk
)->mode
;
1725 opts
.fcs
= l2cap_pi(sk
)->fcs
;
1727 len
= min_t(unsigned int, sizeof(opts
), optlen
);
1728 if (copy_from_user((char *) &opts
, optval
, len
)) {
1733 l2cap_pi(sk
)->imtu
= opts
.imtu
;
1734 l2cap_pi(sk
)->omtu
= opts
.omtu
;
1735 l2cap_pi(sk
)->mode
= opts
.mode
;
1736 l2cap_pi(sk
)->fcs
= opts
.fcs
;
1740 if (get_user(opt
, (u32 __user
*) optval
)) {
1745 if (opt
& L2CAP_LM_AUTH
)
1746 l2cap_pi(sk
)->sec_level
= BT_SECURITY_LOW
;
1747 if (opt
& L2CAP_LM_ENCRYPT
)
1748 l2cap_pi(sk
)->sec_level
= BT_SECURITY_MEDIUM
;
1749 if (opt
& L2CAP_LM_SECURE
)
1750 l2cap_pi(sk
)->sec_level
= BT_SECURITY_HIGH
;
1752 l2cap_pi(sk
)->role_switch
= (opt
& L2CAP_LM_MASTER
);
1753 l2cap_pi(sk
)->force_reliable
= (opt
& L2CAP_LM_RELIABLE
);
1765 static int l2cap_sock_setsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, unsigned int optlen
)
1767 struct sock
*sk
= sock
->sk
;
1768 struct bt_security sec
;
1772 BT_DBG("sk %p", sk
);
1774 if (level
== SOL_L2CAP
)
1775 return l2cap_sock_setsockopt_old(sock
, optname
, optval
, optlen
);
1777 if (level
!= SOL_BLUETOOTH
)
1778 return -ENOPROTOOPT
;
1784 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_RAW
) {
1789 sec
.level
= BT_SECURITY_LOW
;
1791 len
= min_t(unsigned int, sizeof(sec
), optlen
);
1792 if (copy_from_user((char *) &sec
, optval
, len
)) {
1797 if (sec
.level
< BT_SECURITY_LOW
||
1798 sec
.level
> BT_SECURITY_HIGH
) {
1803 l2cap_pi(sk
)->sec_level
= sec
.level
;
1806 case BT_DEFER_SETUP
:
1807 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_LISTEN
) {
1812 if (get_user(opt
, (u32 __user
*) optval
)) {
1817 bt_sk(sk
)->defer_setup
= opt
;
1829 static int l2cap_sock_getsockopt_old(struct socket
*sock
, int optname
, char __user
*optval
, int __user
*optlen
)
1831 struct sock
*sk
= sock
->sk
;
1832 struct l2cap_options opts
;
1833 struct l2cap_conninfo cinfo
;
1837 BT_DBG("sk %p", sk
);
1839 if (get_user(len
, optlen
))
1846 opts
.imtu
= l2cap_pi(sk
)->imtu
;
1847 opts
.omtu
= l2cap_pi(sk
)->omtu
;
1848 opts
.flush_to
= l2cap_pi(sk
)->flush_to
;
1849 opts
.mode
= l2cap_pi(sk
)->mode
;
1850 opts
.fcs
= l2cap_pi(sk
)->fcs
;
1852 len
= min_t(unsigned int, len
, sizeof(opts
));
1853 if (copy_to_user(optval
, (char *) &opts
, len
))
1859 switch (l2cap_pi(sk
)->sec_level
) {
1860 case BT_SECURITY_LOW
:
1861 opt
= L2CAP_LM_AUTH
;
1863 case BT_SECURITY_MEDIUM
:
1864 opt
= L2CAP_LM_AUTH
| L2CAP_LM_ENCRYPT
;
1866 case BT_SECURITY_HIGH
:
1867 opt
= L2CAP_LM_AUTH
| L2CAP_LM_ENCRYPT
|
1875 if (l2cap_pi(sk
)->role_switch
)
1876 opt
|= L2CAP_LM_MASTER
;
1878 if (l2cap_pi(sk
)->force_reliable
)
1879 opt
|= L2CAP_LM_RELIABLE
;
1881 if (put_user(opt
, (u32 __user
*) optval
))
1885 case L2CAP_CONNINFO
:
1886 if (sk
->sk_state
!= BT_CONNECTED
&&
1887 !(sk
->sk_state
== BT_CONNECT2
&&
1888 bt_sk(sk
)->defer_setup
)) {
1893 cinfo
.hci_handle
= l2cap_pi(sk
)->conn
->hcon
->handle
;
1894 memcpy(cinfo
.dev_class
, l2cap_pi(sk
)->conn
->hcon
->dev_class
, 3);
1896 len
= min_t(unsigned int, len
, sizeof(cinfo
));
1897 if (copy_to_user(optval
, (char *) &cinfo
, len
))
1911 static int l2cap_sock_getsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, int __user
*optlen
)
1913 struct sock
*sk
= sock
->sk
;
1914 struct bt_security sec
;
1917 BT_DBG("sk %p", sk
);
1919 if (level
== SOL_L2CAP
)
1920 return l2cap_sock_getsockopt_old(sock
, optname
, optval
, optlen
);
1922 if (level
!= SOL_BLUETOOTH
)
1923 return -ENOPROTOOPT
;
1925 if (get_user(len
, optlen
))
1932 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_RAW
) {
1937 sec
.level
= l2cap_pi(sk
)->sec_level
;
1939 len
= min_t(unsigned int, len
, sizeof(sec
));
1940 if (copy_to_user(optval
, (char *) &sec
, len
))
1945 case BT_DEFER_SETUP
:
1946 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_LISTEN
) {
1951 if (put_user(bt_sk(sk
)->defer_setup
, (u32 __user
*) optval
))
1965 static int l2cap_sock_shutdown(struct socket
*sock
, int how
)
1967 struct sock
*sk
= sock
->sk
;
1970 BT_DBG("sock %p, sk %p", sock
, sk
);
1976 if (!sk
->sk_shutdown
) {
1977 sk
->sk_shutdown
= SHUTDOWN_MASK
;
1978 l2cap_sock_clear_timer(sk
);
1979 __l2cap_sock_close(sk
, 0);
1981 if (sock_flag(sk
, SOCK_LINGER
) && sk
->sk_lingertime
)
1982 err
= bt_sock_wait_state(sk
, BT_CLOSED
,
1989 static int l2cap_sock_release(struct socket
*sock
)
1991 struct sock
*sk
= sock
->sk
;
1994 BT_DBG("sock %p, sk %p", sock
, sk
);
1999 err
= l2cap_sock_shutdown(sock
, 2);
2002 l2cap_sock_kill(sk
);
2006 static void l2cap_chan_ready(struct sock
*sk
)
2008 struct sock
*parent
= bt_sk(sk
)->parent
;
2010 BT_DBG("sk %p, parent %p", sk
, parent
);
2012 l2cap_pi(sk
)->conf_state
= 0;
2013 l2cap_sock_clear_timer(sk
);
2016 /* Outgoing channel.
2017 * Wake up socket sleeping on connect.
2019 sk
->sk_state
= BT_CONNECTED
;
2020 sk
->sk_state_change(sk
);
2022 /* Incoming channel.
2023 * Wake up socket sleeping on accept.
2025 parent
->sk_data_ready(parent
, 0);
2029 /* Copy frame to all raw sockets on that connection */
2030 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2032 struct l2cap_chan_list
*l
= &conn
->chan_list
;
2033 struct sk_buff
*nskb
;
2036 BT_DBG("conn %p", conn
);
2038 read_lock(&l
->lock
);
2039 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
2040 if (sk
->sk_type
!= SOCK_RAW
)
2043 /* Don't send frame to the socket it came from */
2046 nskb
= skb_clone(skb
, GFP_ATOMIC
);
2050 if (sock_queue_rcv_skb(sk
, nskb
))
2053 read_unlock(&l
->lock
);
2056 /* ---- L2CAP signalling commands ---- */
2057 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
2058 u8 code
, u8 ident
, u16 dlen
, void *data
)
2060 struct sk_buff
*skb
, **frag
;
2061 struct l2cap_cmd_hdr
*cmd
;
2062 struct l2cap_hdr
*lh
;
2065 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2066 conn
, code
, ident
, dlen
);
2068 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2069 count
= min_t(unsigned int, conn
->mtu
, len
);
2071 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
2075 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2076 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2077 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
2079 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2082 cmd
->len
= cpu_to_le16(dlen
);
2085 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2086 memcpy(skb_put(skb
, count
), data
, count
);
2092 /* Continuation fragments (no L2CAP header) */
2093 frag
= &skb_shinfo(skb
)->frag_list
;
2095 count
= min_t(unsigned int, conn
->mtu
, len
);
2097 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
2101 memcpy(skb_put(*frag
, count
), data
, count
);
2106 frag
= &(*frag
)->next
;
2116 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
2118 struct l2cap_conf_opt
*opt
= *ptr
;
2121 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2129 *val
= *((u8
*) opt
->val
);
2133 *val
= __le16_to_cpu(*((__le16
*) opt
->val
));
2137 *val
= __le32_to_cpu(*((__le32
*) opt
->val
));
2141 *val
= (unsigned long) opt
->val
;
2145 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
2149 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2151 struct l2cap_conf_opt
*opt
= *ptr
;
2153 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
2160 *((u8
*) opt
->val
) = val
;
2164 *((__le16
*) opt
->val
) = cpu_to_le16(val
);
2168 *((__le32
*) opt
->val
) = cpu_to_le32(val
);
2172 memcpy(opt
->val
, (void *) val
, len
);
2176 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2179 static inline void l2cap_ertm_init(struct sock
*sk
)
2181 l2cap_pi(sk
)->expected_ack_seq
= 0;
2182 l2cap_pi(sk
)->unacked_frames
= 0;
2183 l2cap_pi(sk
)->buffer_seq
= 0;
2184 l2cap_pi(sk
)->num_to_ack
= 0;
2186 setup_timer(&l2cap_pi(sk
)->retrans_timer
,
2187 l2cap_retrans_timeout
, (unsigned long) sk
);
2188 setup_timer(&l2cap_pi(sk
)->monitor_timer
,
2189 l2cap_monitor_timeout
, (unsigned long) sk
);
2191 __skb_queue_head_init(SREJ_QUEUE(sk
));
2194 static int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
2196 u32 local_feat_mask
= l2cap_feat_mask
;
2198 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
2201 case L2CAP_MODE_ERTM
:
2202 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
2203 case L2CAP_MODE_STREAMING
:
2204 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
2210 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
2213 case L2CAP_MODE_STREAMING
:
2214 case L2CAP_MODE_ERTM
:
2215 if (l2cap_mode_supported(mode
, remote_feat_mask
))
2219 return L2CAP_MODE_BASIC
;
2223 static int l2cap_build_conf_req(struct sock
*sk
, void *data
)
2225 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2226 struct l2cap_conf_req
*req
= data
;
2227 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2228 void *ptr
= req
->data
;
2230 BT_DBG("sk %p", sk
);
2232 if (pi
->num_conf_req
|| pi
->num_conf_rsp
)
2236 case L2CAP_MODE_STREAMING
:
2237 case L2CAP_MODE_ERTM
:
2238 pi
->conf_state
|= L2CAP_CONF_STATE2_DEVICE
;
2239 if (!l2cap_mode_supported(pi
->mode
, pi
->conn
->feat_mask
))
2240 l2cap_send_disconn_req(pi
->conn
, sk
);
2243 pi
->mode
= l2cap_select_mode(rfc
.mode
, pi
->conn
->feat_mask
);
2249 case L2CAP_MODE_BASIC
:
2250 if (pi
->imtu
!= L2CAP_DEFAULT_MTU
)
2251 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->imtu
);
2254 case L2CAP_MODE_ERTM
:
2255 rfc
.mode
= L2CAP_MODE_ERTM
;
2256 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
2257 rfc
.max_transmit
= max_transmit
;
2258 rfc
.retrans_timeout
= 0;
2259 rfc
.monitor_timeout
= 0;
2260 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
2262 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2263 sizeof(rfc
), (unsigned long) &rfc
);
2265 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2268 if (pi
->fcs
== L2CAP_FCS_NONE
||
2269 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
2270 pi
->fcs
= L2CAP_FCS_NONE
;
2271 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
2275 case L2CAP_MODE_STREAMING
:
2276 rfc
.mode
= L2CAP_MODE_STREAMING
;
2278 rfc
.max_transmit
= 0;
2279 rfc
.retrans_timeout
= 0;
2280 rfc
.monitor_timeout
= 0;
2281 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
2283 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2284 sizeof(rfc
), (unsigned long) &rfc
);
2286 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2289 if (pi
->fcs
== L2CAP_FCS_NONE
||
2290 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
2291 pi
->fcs
= L2CAP_FCS_NONE
;
2292 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
2297 /* FIXME: Need actual value of the flush timeout */
2298 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2299 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2301 req
->dcid
= cpu_to_le16(pi
->dcid
);
2302 req
->flags
= cpu_to_le16(0);
2307 static int l2cap_parse_conf_req(struct sock
*sk
, void *data
)
2309 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2310 struct l2cap_conf_rsp
*rsp
= data
;
2311 void *ptr
= rsp
->data
;
2312 void *req
= pi
->conf_req
;
2313 int len
= pi
->conf_len
;
2314 int type
, hint
, olen
;
2316 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2317 u16 mtu
= L2CAP_DEFAULT_MTU
;
2318 u16 result
= L2CAP_CONF_SUCCESS
;
2320 BT_DBG("sk %p", sk
);
2322 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2323 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
2325 hint
= type
& L2CAP_CONF_HINT
;
2326 type
&= L2CAP_CONF_MASK
;
2329 case L2CAP_CONF_MTU
:
2333 case L2CAP_CONF_FLUSH_TO
:
2337 case L2CAP_CONF_QOS
:
2340 case L2CAP_CONF_RFC
:
2341 if (olen
== sizeof(rfc
))
2342 memcpy(&rfc
, (void *) val
, olen
);
2345 case L2CAP_CONF_FCS
:
2346 if (val
== L2CAP_FCS_NONE
)
2347 pi
->conf_state
|= L2CAP_CONF_NO_FCS_RECV
;
2355 result
= L2CAP_CONF_UNKNOWN
;
2356 *((u8
*) ptr
++) = type
;
2361 if (pi
->num_conf_rsp
|| pi
->num_conf_req
)
2365 case L2CAP_MODE_STREAMING
:
2366 case L2CAP_MODE_ERTM
:
2367 pi
->conf_state
|= L2CAP_CONF_STATE2_DEVICE
;
2368 if (!l2cap_mode_supported(pi
->mode
, pi
->conn
->feat_mask
))
2369 return -ECONNREFUSED
;
2372 pi
->mode
= l2cap_select_mode(rfc
.mode
, pi
->conn
->feat_mask
);
2377 if (pi
->mode
!= rfc
.mode
) {
2378 result
= L2CAP_CONF_UNACCEPT
;
2379 rfc
.mode
= pi
->mode
;
2381 if (pi
->num_conf_rsp
== 1)
2382 return -ECONNREFUSED
;
2384 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2385 sizeof(rfc
), (unsigned long) &rfc
);
2389 if (result
== L2CAP_CONF_SUCCESS
) {
2390 /* Configure output options and let the other side know
2391 * which ones we don't like. */
2393 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
2394 result
= L2CAP_CONF_UNACCEPT
;
2397 pi
->conf_state
|= L2CAP_CONF_MTU_DONE
;
2399 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->omtu
);
2402 case L2CAP_MODE_BASIC
:
2403 pi
->fcs
= L2CAP_FCS_NONE
;
2404 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2407 case L2CAP_MODE_ERTM
:
2408 pi
->remote_tx_win
= rfc
.txwin_size
;
2409 pi
->remote_max_tx
= rfc
.max_transmit
;
2410 pi
->max_pdu_size
= rfc
.max_pdu_size
;
2412 rfc
.retrans_timeout
= L2CAP_DEFAULT_RETRANS_TO
;
2413 rfc
.monitor_timeout
= L2CAP_DEFAULT_MONITOR_TO
;
2415 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2417 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2418 sizeof(rfc
), (unsigned long) &rfc
);
2422 case L2CAP_MODE_STREAMING
:
2423 pi
->remote_tx_win
= rfc
.txwin_size
;
2424 pi
->max_pdu_size
= rfc
.max_pdu_size
;
2426 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2428 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2429 sizeof(rfc
), (unsigned long) &rfc
);
2434 result
= L2CAP_CONF_UNACCEPT
;
2436 memset(&rfc
, 0, sizeof(rfc
));
2437 rfc
.mode
= pi
->mode
;
2440 if (result
== L2CAP_CONF_SUCCESS
)
2441 pi
->conf_state
|= L2CAP_CONF_OUTPUT_DONE
;
2443 rsp
->scid
= cpu_to_le16(pi
->dcid
);
2444 rsp
->result
= cpu_to_le16(result
);
2445 rsp
->flags
= cpu_to_le16(0x0000);
2450 static int l2cap_parse_conf_rsp(struct sock
*sk
, void *rsp
, int len
, void *data
, u16
*result
)
2452 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2453 struct l2cap_conf_req
*req
= data
;
2454 void *ptr
= req
->data
;
2457 struct l2cap_conf_rfc rfc
;
2459 BT_DBG("sk %p, rsp %p, len %d, req %p", sk
, rsp
, len
, data
);
2461 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2462 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2465 case L2CAP_CONF_MTU
:
2466 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
2467 *result
= L2CAP_CONF_UNACCEPT
;
2468 pi
->omtu
= L2CAP_DEFAULT_MIN_MTU
;
2471 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->omtu
);
2474 case L2CAP_CONF_FLUSH_TO
:
2476 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
2480 case L2CAP_CONF_RFC
:
2481 if (olen
== sizeof(rfc
))
2482 memcpy(&rfc
, (void *)val
, olen
);
2484 if ((pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
) &&
2485 rfc
.mode
!= pi
->mode
)
2486 return -ECONNREFUSED
;
2488 pi
->mode
= rfc
.mode
;
2491 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2492 sizeof(rfc
), (unsigned long) &rfc
);
2497 if (*result
== L2CAP_CONF_SUCCESS
) {
2499 case L2CAP_MODE_ERTM
:
2500 pi
->remote_tx_win
= rfc
.txwin_size
;
2501 pi
->retrans_timeout
= rfc
.retrans_timeout
;
2502 pi
->monitor_timeout
= rfc
.monitor_timeout
;
2503 pi
->max_pdu_size
= le16_to_cpu(rfc
.max_pdu_size
);
2505 case L2CAP_MODE_STREAMING
:
2506 pi
->max_pdu_size
= le16_to_cpu(rfc
.max_pdu_size
);
2511 req
->dcid
= cpu_to_le16(pi
->dcid
);
2512 req
->flags
= cpu_to_le16(0x0000);
2517 static int l2cap_build_conf_rsp(struct sock
*sk
, void *data
, u16 result
, u16 flags
)
2519 struct l2cap_conf_rsp
*rsp
= data
;
2520 void *ptr
= rsp
->data
;
2522 BT_DBG("sk %p", sk
);
2524 rsp
->scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
2525 rsp
->result
= cpu_to_le16(result
);
2526 rsp
->flags
= cpu_to_le16(flags
);
2531 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2533 struct l2cap_cmd_rej
*rej
= (struct l2cap_cmd_rej
*) data
;
2535 if (rej
->reason
!= 0x0000)
2538 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
2539 cmd
->ident
== conn
->info_ident
) {
2540 del_timer(&conn
->info_timer
);
2542 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2543 conn
->info_ident
= 0;
2545 l2cap_conn_start(conn
);
2551 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2553 struct l2cap_chan_list
*list
= &conn
->chan_list
;
2554 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
2555 struct l2cap_conn_rsp rsp
;
2556 struct sock
*sk
, *parent
;
2557 int result
, status
= L2CAP_CS_NO_INFO
;
2559 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
2560 __le16 psm
= req
->psm
;
2562 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm
, scid
);
2564 /* Check if we have socket listening on psm */
2565 parent
= l2cap_get_sock_by_psm(BT_LISTEN
, psm
, conn
->src
);
2567 result
= L2CAP_CR_BAD_PSM
;
2571 /* Check if the ACL is secure enough (if not SDP) */
2572 if (psm
!= cpu_to_le16(0x0001) &&
2573 !hci_conn_check_link_mode(conn
->hcon
)) {
2574 conn
->disc_reason
= 0x05;
2575 result
= L2CAP_CR_SEC_BLOCK
;
2579 result
= L2CAP_CR_NO_MEM
;
2581 /* Check for backlog size */
2582 if (sk_acceptq_is_full(parent
)) {
2583 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
2587 sk
= l2cap_sock_alloc(sock_net(parent
), NULL
, BTPROTO_L2CAP
, GFP_ATOMIC
);
2591 write_lock_bh(&list
->lock
);
2593 /* Check if we already have channel with that dcid */
2594 if (__l2cap_get_chan_by_dcid(list
, scid
)) {
2595 write_unlock_bh(&list
->lock
);
2596 sock_set_flag(sk
, SOCK_ZAPPED
);
2597 l2cap_sock_kill(sk
);
2601 hci_conn_hold(conn
->hcon
);
2603 l2cap_sock_init(sk
, parent
);
2604 bacpy(&bt_sk(sk
)->src
, conn
->src
);
2605 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
2606 l2cap_pi(sk
)->psm
= psm
;
2607 l2cap_pi(sk
)->dcid
= scid
;
2609 __l2cap_chan_add(conn
, sk
, parent
);
2610 dcid
= l2cap_pi(sk
)->scid
;
2612 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
2614 l2cap_pi(sk
)->ident
= cmd
->ident
;
2616 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
2617 if (l2cap_check_security(sk
)) {
2618 if (bt_sk(sk
)->defer_setup
) {
2619 sk
->sk_state
= BT_CONNECT2
;
2620 result
= L2CAP_CR_PEND
;
2621 status
= L2CAP_CS_AUTHOR_PEND
;
2622 parent
->sk_data_ready(parent
, 0);
2624 sk
->sk_state
= BT_CONFIG
;
2625 result
= L2CAP_CR_SUCCESS
;
2626 status
= L2CAP_CS_NO_INFO
;
2629 sk
->sk_state
= BT_CONNECT2
;
2630 result
= L2CAP_CR_PEND
;
2631 status
= L2CAP_CS_AUTHEN_PEND
;
2634 sk
->sk_state
= BT_CONNECT2
;
2635 result
= L2CAP_CR_PEND
;
2636 status
= L2CAP_CS_NO_INFO
;
2639 write_unlock_bh(&list
->lock
);
2642 bh_unlock_sock(parent
);
2645 rsp
.scid
= cpu_to_le16(scid
);
2646 rsp
.dcid
= cpu_to_le16(dcid
);
2647 rsp
.result
= cpu_to_le16(result
);
2648 rsp
.status
= cpu_to_le16(status
);
2649 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2651 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
2652 struct l2cap_info_req info
;
2653 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2655 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
2656 conn
->info_ident
= l2cap_get_ident(conn
);
2658 mod_timer(&conn
->info_timer
, jiffies
+
2659 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
2661 l2cap_send_cmd(conn
, conn
->info_ident
,
2662 L2CAP_INFO_REQ
, sizeof(info
), &info
);
2668 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2670 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
2671 u16 scid
, dcid
, result
, status
;
2675 scid
= __le16_to_cpu(rsp
->scid
);
2676 dcid
= __le16_to_cpu(rsp
->dcid
);
2677 result
= __le16_to_cpu(rsp
->result
);
2678 status
= __le16_to_cpu(rsp
->status
);
2680 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid
, scid
, result
, status
);
2683 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
2687 sk
= l2cap_get_chan_by_ident(&conn
->chan_list
, cmd
->ident
);
2693 case L2CAP_CR_SUCCESS
:
2694 sk
->sk_state
= BT_CONFIG
;
2695 l2cap_pi(sk
)->ident
= 0;
2696 l2cap_pi(sk
)->dcid
= dcid
;
2697 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
2699 l2cap_pi(sk
)->conf_state
&= ~L2CAP_CONF_CONNECT_PEND
;
2701 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2702 l2cap_build_conf_req(sk
, req
), req
);
2703 l2cap_pi(sk
)->num_conf_req
++;
2707 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
2711 l2cap_chan_del(sk
, ECONNREFUSED
);
2719 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
2721 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
2727 dcid
= __le16_to_cpu(req
->dcid
);
2728 flags
= __le16_to_cpu(req
->flags
);
2730 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
2732 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, dcid
);
2736 if (sk
->sk_state
== BT_DISCONN
)
2739 /* Reject if config buffer is too small. */
2740 len
= cmd_len
- sizeof(*req
);
2741 if (l2cap_pi(sk
)->conf_len
+ len
> sizeof(l2cap_pi(sk
)->conf_req
)) {
2742 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2743 l2cap_build_conf_rsp(sk
, rsp
,
2744 L2CAP_CONF_REJECT
, flags
), rsp
);
2749 memcpy(l2cap_pi(sk
)->conf_req
+ l2cap_pi(sk
)->conf_len
, req
->data
, len
);
2750 l2cap_pi(sk
)->conf_len
+= len
;
2752 if (flags
& 0x0001) {
2753 /* Incomplete config. Send empty response. */
2754 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2755 l2cap_build_conf_rsp(sk
, rsp
,
2756 L2CAP_CONF_SUCCESS
, 0x0001), rsp
);
2760 /* Complete config. */
2761 len
= l2cap_parse_conf_req(sk
, rsp
);
2763 l2cap_send_disconn_req(conn
, sk
);
2767 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
2768 l2cap_pi(sk
)->num_conf_rsp
++;
2770 /* Reset config buffer. */
2771 l2cap_pi(sk
)->conf_len
= 0;
2773 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
))
2776 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_INPUT_DONE
) {
2777 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_NO_FCS_RECV
) ||
2778 l2cap_pi(sk
)->fcs
!= L2CAP_FCS_NONE
)
2779 l2cap_pi(sk
)->fcs
= L2CAP_FCS_CRC16
;
2781 sk
->sk_state
= BT_CONNECTED
;
2783 l2cap_pi(sk
)->next_tx_seq
= 0;
2784 l2cap_pi(sk
)->expected_tx_seq
= 0;
2785 __skb_queue_head_init(TX_QUEUE(sk
));
2786 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
2787 l2cap_ertm_init(sk
);
2789 l2cap_chan_ready(sk
);
2793 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
)) {
2795 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2796 l2cap_build_conf_req(sk
, buf
), buf
);
2797 l2cap_pi(sk
)->num_conf_req
++;
2805 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2807 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
2808 u16 scid
, flags
, result
;
2811 scid
= __le16_to_cpu(rsp
->scid
);
2812 flags
= __le16_to_cpu(rsp
->flags
);
2813 result
= __le16_to_cpu(rsp
->result
);
2815 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2816 scid
, flags
, result
);
2818 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
2823 case L2CAP_CONF_SUCCESS
:
2826 case L2CAP_CONF_UNACCEPT
:
2827 if (l2cap_pi(sk
)->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
2828 int len
= cmd
->len
- sizeof(*rsp
);
2831 /* throw out any old stored conf requests */
2832 result
= L2CAP_CONF_SUCCESS
;
2833 len
= l2cap_parse_conf_rsp(sk
, rsp
->data
,
2836 l2cap_send_disconn_req(conn
, sk
);
2840 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
2841 L2CAP_CONF_REQ
, len
, req
);
2842 l2cap_pi(sk
)->num_conf_req
++;
2843 if (result
!= L2CAP_CONF_SUCCESS
)
2849 sk
->sk_state
= BT_DISCONN
;
2850 sk
->sk_err
= ECONNRESET
;
2851 l2cap_sock_set_timer(sk
, HZ
* 5);
2852 l2cap_send_disconn_req(conn
, sk
);
2859 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_INPUT_DONE
;
2861 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
) {
2862 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_NO_FCS_RECV
) ||
2863 l2cap_pi(sk
)->fcs
!= L2CAP_FCS_NONE
)
2864 l2cap_pi(sk
)->fcs
= L2CAP_FCS_CRC16
;
2866 sk
->sk_state
= BT_CONNECTED
;
2867 l2cap_pi(sk
)->next_tx_seq
= 0;
2868 l2cap_pi(sk
)->expected_tx_seq
= 0;
2869 __skb_queue_head_init(TX_QUEUE(sk
));
2870 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
2871 l2cap_ertm_init(sk
);
2873 l2cap_chan_ready(sk
);
2881 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2883 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
2884 struct l2cap_disconn_rsp rsp
;
2888 scid
= __le16_to_cpu(req
->scid
);
2889 dcid
= __le16_to_cpu(req
->dcid
);
2891 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
2893 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, dcid
);
2897 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
2898 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
2899 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
2901 sk
->sk_shutdown
= SHUTDOWN_MASK
;
2903 skb_queue_purge(TX_QUEUE(sk
));
2905 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
) {
2906 skb_queue_purge(SREJ_QUEUE(sk
));
2907 del_timer(&l2cap_pi(sk
)->retrans_timer
);
2908 del_timer(&l2cap_pi(sk
)->monitor_timer
);
2911 l2cap_chan_del(sk
, ECONNRESET
);
2914 l2cap_sock_kill(sk
);
2918 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2920 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
2924 scid
= __le16_to_cpu(rsp
->scid
);
2925 dcid
= __le16_to_cpu(rsp
->dcid
);
2927 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
2929 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
2933 skb_queue_purge(TX_QUEUE(sk
));
2935 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
) {
2936 skb_queue_purge(SREJ_QUEUE(sk
));
2937 del_timer(&l2cap_pi(sk
)->retrans_timer
);
2938 del_timer(&l2cap_pi(sk
)->monitor_timer
);
2941 l2cap_chan_del(sk
, 0);
2944 l2cap_sock_kill(sk
);
2948 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2950 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
2953 type
= __le16_to_cpu(req
->type
);
2955 BT_DBG("type 0x%4.4x", type
);
2957 if (type
== L2CAP_IT_FEAT_MASK
) {
2959 u32 feat_mask
= l2cap_feat_mask
;
2960 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
2961 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2962 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
2964 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
2966 put_unaligned_le32(feat_mask
, rsp
->data
);
2967 l2cap_send_cmd(conn
, cmd
->ident
,
2968 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
2969 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
2971 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
2972 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
2973 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
2974 memcpy(buf
+ 4, l2cap_fixed_chan
, 8);
2975 l2cap_send_cmd(conn
, cmd
->ident
,
2976 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
2978 struct l2cap_info_rsp rsp
;
2979 rsp
.type
= cpu_to_le16(type
);
2980 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
2981 l2cap_send_cmd(conn
, cmd
->ident
,
2982 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
2988 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2990 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
2993 type
= __le16_to_cpu(rsp
->type
);
2994 result
= __le16_to_cpu(rsp
->result
);
2996 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
2998 del_timer(&conn
->info_timer
);
3000 if (type
== L2CAP_IT_FEAT_MASK
) {
3001 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
3003 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
3004 struct l2cap_info_req req
;
3005 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3007 conn
->info_ident
= l2cap_get_ident(conn
);
3009 l2cap_send_cmd(conn
, conn
->info_ident
,
3010 L2CAP_INFO_REQ
, sizeof(req
), &req
);
3012 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3013 conn
->info_ident
= 0;
3015 l2cap_conn_start(conn
);
3017 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3018 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3019 conn
->info_ident
= 0;
3021 l2cap_conn_start(conn
);
3027 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
3029 u8
*data
= skb
->data
;
3031 struct l2cap_cmd_hdr cmd
;
3034 l2cap_raw_recv(conn
, skb
);
3036 while (len
>= L2CAP_CMD_HDR_SIZE
) {
3038 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
3039 data
+= L2CAP_CMD_HDR_SIZE
;
3040 len
-= L2CAP_CMD_HDR_SIZE
;
3042 cmd_len
= le16_to_cpu(cmd
.len
);
3044 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
3046 if (cmd_len
> len
|| !cmd
.ident
) {
3047 BT_DBG("corrupted command");
3052 case L2CAP_COMMAND_REJ
:
3053 l2cap_command_rej(conn
, &cmd
, data
);
3056 case L2CAP_CONN_REQ
:
3057 err
= l2cap_connect_req(conn
, &cmd
, data
);
3060 case L2CAP_CONN_RSP
:
3061 err
= l2cap_connect_rsp(conn
, &cmd
, data
);
3064 case L2CAP_CONF_REQ
:
3065 err
= l2cap_config_req(conn
, &cmd
, cmd_len
, data
);
3068 case L2CAP_CONF_RSP
:
3069 err
= l2cap_config_rsp(conn
, &cmd
, data
);
3072 case L2CAP_DISCONN_REQ
:
3073 err
= l2cap_disconnect_req(conn
, &cmd
, data
);
3076 case L2CAP_DISCONN_RSP
:
3077 err
= l2cap_disconnect_rsp(conn
, &cmd
, data
);
3080 case L2CAP_ECHO_REQ
:
3081 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
3084 case L2CAP_ECHO_RSP
:
3087 case L2CAP_INFO_REQ
:
3088 err
= l2cap_information_req(conn
, &cmd
, data
);
3091 case L2CAP_INFO_RSP
:
3092 err
= l2cap_information_rsp(conn
, &cmd
, data
);
3096 BT_ERR("Unknown signaling command 0x%2.2x", cmd
.code
);
3102 struct l2cap_cmd_rej rej
;
3103 BT_DBG("error %d", err
);
3105 /* FIXME: Map err to a valid reason */
3106 rej
.reason
= cpu_to_le16(0);
3107 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
3117 static int l2cap_check_fcs(struct l2cap_pinfo
*pi
, struct sk_buff
*skb
)
3119 u16 our_fcs
, rcv_fcs
;
3120 int hdr_size
= L2CAP_HDR_SIZE
+ 2;
3122 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
3123 skb_trim(skb
, skb
->len
- 2);
3124 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
3125 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
3127 if (our_fcs
!= rcv_fcs
)
3133 static void l2cap_add_to_srej_queue(struct sock
*sk
, struct sk_buff
*skb
, u8 tx_seq
, u8 sar
)
3135 struct sk_buff
*next_skb
;
3137 bt_cb(skb
)->tx_seq
= tx_seq
;
3138 bt_cb(skb
)->sar
= sar
;
3140 next_skb
= skb_peek(SREJ_QUEUE(sk
));
3142 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3147 if (bt_cb(next_skb
)->tx_seq
> tx_seq
) {
3148 __skb_queue_before(SREJ_QUEUE(sk
), next_skb
, skb
);
3152 if (skb_queue_is_last(SREJ_QUEUE(sk
), next_skb
))
3155 } while((next_skb
= skb_queue_next(SREJ_QUEUE(sk
), next_skb
)));
3157 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3160 static int l2cap_sar_reassembly_sdu(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3162 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3163 struct sk_buff
*_skb
;
3166 switch (control
& L2CAP_CTRL_SAR
) {
3167 case L2CAP_SDU_UNSEGMENTED
:
3168 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3173 err
= sock_queue_rcv_skb(sk
, skb
);
3179 case L2CAP_SDU_START
:
3180 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3185 pi
->sdu_len
= get_unaligned_le16(skb
->data
);
3188 pi
->sdu
= bt_skb_alloc(pi
->sdu_len
, GFP_ATOMIC
);
3194 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3196 pi
->conn_state
|= L2CAP_CONN_SAR_SDU
;
3197 pi
->partial_sdu_len
= skb
->len
;
3201 case L2CAP_SDU_CONTINUE
:
3202 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3205 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3207 pi
->partial_sdu_len
+= skb
->len
;
3208 if (pi
->partial_sdu_len
> pi
->sdu_len
)
3216 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3219 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3221 pi
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
3222 pi
->partial_sdu_len
+= skb
->len
;
3224 if (pi
->partial_sdu_len
== pi
->sdu_len
) {
3225 _skb
= skb_clone(pi
->sdu
, GFP_ATOMIC
);
3226 err
= sock_queue_rcv_skb(sk
, _skb
);
3240 static void l2cap_check_srej_gap(struct sock
*sk
, u8 tx_seq
)
3242 struct sk_buff
*skb
;
3245 while((skb
= skb_peek(SREJ_QUEUE(sk
)))) {
3246 if (bt_cb(skb
)->tx_seq
!= tx_seq
)
3249 skb
= skb_dequeue(SREJ_QUEUE(sk
));
3250 control
|= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
3251 l2cap_sar_reassembly_sdu(sk
, skb
, control
);
3252 l2cap_pi(sk
)->buffer_seq_srej
=
3253 (l2cap_pi(sk
)->buffer_seq_srej
+ 1) % 64;
3258 static void l2cap_resend_srejframe(struct sock
*sk
, u8 tx_seq
)
3260 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3261 struct srej_list
*l
, *tmp
;
3264 list_for_each_entry_safe(l
,tmp
, SREJ_LIST(sk
), list
) {
3265 if (l
->tx_seq
== tx_seq
) {
3270 control
= L2CAP_SUPER_SELECT_REJECT
;
3271 control
|= l
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3272 l2cap_send_sframe(pi
, control
);
3274 list_add_tail(&l
->list
, SREJ_LIST(sk
));
3278 static void l2cap_send_srejframe(struct sock
*sk
, u8 tx_seq
)
3280 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3281 struct srej_list
*new;
3284 while (tx_seq
!= pi
->expected_tx_seq
) {
3285 control
= L2CAP_SUPER_SELECT_REJECT
;
3286 control
|= pi
->expected_tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3287 if (pi
->conn_state
& L2CAP_CONN_SEND_PBIT
) {
3288 control
|= L2CAP_CTRL_POLL
;
3289 pi
->conn_state
&= ~L2CAP_CONN_SEND_PBIT
;
3291 l2cap_send_sframe(pi
, control
);
3293 new = kzalloc(sizeof(struct srej_list
), GFP_ATOMIC
);
3294 new->tx_seq
= pi
->expected_tx_seq
++;
3295 list_add_tail(&new->list
, SREJ_LIST(sk
));
3297 pi
->expected_tx_seq
++;
3300 static inline int l2cap_data_channel_iframe(struct sock
*sk
, u16 rx_control
, struct sk_buff
*skb
)
3302 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3303 u8 tx_seq
= __get_txseq(rx_control
);
3304 u8 req_seq
= __get_reqseq(rx_control
);
3306 u8 sar
= rx_control
>> L2CAP_CTRL_SAR_SHIFT
;
3309 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk
, rx_control
, skb
->len
);
3311 pi
->expected_ack_seq
= req_seq
;
3312 l2cap_drop_acked_frames(sk
);
3314 if (tx_seq
== pi
->expected_tx_seq
)
3317 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3318 struct srej_list
*first
;
3320 first
= list_first_entry(SREJ_LIST(sk
),
3321 struct srej_list
, list
);
3322 if (tx_seq
== first
->tx_seq
) {
3323 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3324 l2cap_check_srej_gap(sk
, tx_seq
);
3326 list_del(&first
->list
);
3329 if (list_empty(SREJ_LIST(sk
))) {
3330 pi
->buffer_seq
= pi
->buffer_seq_srej
;
3331 pi
->conn_state
&= ~L2CAP_CONN_SREJ_SENT
;
3334 struct srej_list
*l
;
3335 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3337 list_for_each_entry(l
, SREJ_LIST(sk
), list
) {
3338 if (l
->tx_seq
== tx_seq
) {
3339 l2cap_resend_srejframe(sk
, tx_seq
);
3343 l2cap_send_srejframe(sk
, tx_seq
);
3346 pi
->conn_state
|= L2CAP_CONN_SREJ_SENT
;
3348 INIT_LIST_HEAD(SREJ_LIST(sk
));
3349 pi
->buffer_seq_srej
= pi
->buffer_seq
;
3351 __skb_queue_head_init(SREJ_QUEUE(sk
));
3352 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3354 pi
->conn_state
|= L2CAP_CONN_SEND_PBIT
;
3356 l2cap_send_srejframe(sk
, tx_seq
);
3361 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3363 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3364 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3368 if (rx_control
& L2CAP_CTRL_FINAL
) {
3369 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
3370 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3372 sk
->sk_send_head
= TX_QUEUE(sk
)->next
;
3373 pi
->next_tx_seq
= pi
->expected_ack_seq
;
3374 l2cap_ertm_send(sk
);
3378 pi
->buffer_seq
= (pi
->buffer_seq
+ 1) % 64;
3380 err
= l2cap_sar_reassembly_sdu(sk
, skb
, rx_control
);
3384 pi
->num_to_ack
= (pi
->num_to_ack
+ 1) % L2CAP_DEFAULT_NUM_TO_ACK
;
3385 if (pi
->num_to_ack
== L2CAP_DEFAULT_NUM_TO_ACK
- 1) {
3386 tx_control
|= L2CAP_SUPER_RCV_READY
;
3387 tx_control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3388 l2cap_send_sframe(pi
, tx_control
);
3393 static inline int l2cap_data_channel_sframe(struct sock
*sk
, u16 rx_control
, struct sk_buff
*skb
)
3395 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3396 u8 tx_seq
= __get_reqseq(rx_control
);
3398 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk
, rx_control
, skb
->len
);
3400 switch (rx_control
& L2CAP_CTRL_SUPERVISE
) {
3401 case L2CAP_SUPER_RCV_READY
:
3402 if (rx_control
& L2CAP_CTRL_POLL
) {
3403 u16 control
= L2CAP_CTRL_FINAL
;
3404 control
|= L2CAP_SUPER_RCV_READY
|
3405 (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
);
3406 l2cap_send_sframe(l2cap_pi(sk
), control
);
3407 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3409 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
3410 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3411 pi
->expected_ack_seq
= tx_seq
;
3412 l2cap_drop_acked_frames(sk
);
3414 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
3415 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3417 sk
->sk_send_head
= TX_QUEUE(sk
)->next
;
3418 pi
->next_tx_seq
= pi
->expected_ack_seq
;
3419 l2cap_ertm_send(sk
);
3422 if (!(pi
->conn_state
& L2CAP_CONN_WAIT_F
))
3425 pi
->conn_state
&= ~L2CAP_CONN_WAIT_F
;
3426 del_timer(&pi
->monitor_timer
);
3428 if (pi
->unacked_frames
> 0)
3429 __mod_retrans_timer();
3431 pi
->expected_ack_seq
= tx_seq
;
3432 l2cap_drop_acked_frames(sk
);
3434 if ((pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
3435 (pi
->unacked_frames
> 0))
3436 __mod_retrans_timer();
3438 l2cap_ertm_send(sk
);
3439 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3443 case L2CAP_SUPER_REJECT
:
3444 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3446 pi
->expected_ack_seq
= __get_reqseq(rx_control
);
3447 l2cap_drop_acked_frames(sk
);
3449 if (rx_control
& L2CAP_CTRL_FINAL
) {
3450 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
3451 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3453 sk
->sk_send_head
= TX_QUEUE(sk
)->next
;
3454 pi
->next_tx_seq
= pi
->expected_ack_seq
;
3455 l2cap_ertm_send(sk
);
3458 sk
->sk_send_head
= TX_QUEUE(sk
)->next
;
3459 pi
->next_tx_seq
= pi
->expected_ack_seq
;
3460 l2cap_ertm_send(sk
);
3462 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
3463 pi
->srej_save_reqseq
= tx_seq
;
3464 pi
->conn_state
|= L2CAP_CONN_REJ_ACT
;
3470 case L2CAP_SUPER_SELECT_REJECT
:
3471 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3473 if (rx_control
& L2CAP_CTRL_POLL
) {
3474 l2cap_retransmit_frame(sk
, tx_seq
);
3475 pi
->expected_ack_seq
= tx_seq
;
3476 l2cap_drop_acked_frames(sk
);
3477 l2cap_ertm_send(sk
);
3478 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
3479 pi
->srej_save_reqseq
= tx_seq
;
3480 pi
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
3482 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
3483 if ((pi
->conn_state
& L2CAP_CONN_SREJ_ACT
) &&
3484 pi
->srej_save_reqseq
== tx_seq
)
3485 pi
->conn_state
&= ~L2CAP_CONN_SREJ_ACT
;
3487 l2cap_retransmit_frame(sk
, tx_seq
);
3490 l2cap_retransmit_frame(sk
, tx_seq
);
3491 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
3492 pi
->srej_save_reqseq
= tx_seq
;
3493 pi
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
3498 case L2CAP_SUPER_RCV_NOT_READY
:
3499 pi
->conn_state
|= L2CAP_CONN_REMOTE_BUSY
;
3500 pi
->expected_ack_seq
= tx_seq
;
3501 l2cap_drop_acked_frames(sk
);
3503 del_timer(&l2cap_pi(sk
)->retrans_timer
);
3504 if (rx_control
& L2CAP_CTRL_POLL
) {
3505 u16 control
= L2CAP_CTRL_FINAL
;
3506 l2cap_send_rr_or_rnr(l2cap_pi(sk
), control
);
3514 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
3517 struct l2cap_pinfo
*pi
;
3522 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, cid
);
3524 BT_DBG("unknown cid 0x%4.4x", cid
);
3530 BT_DBG("sk %p, len %d", sk
, skb
->len
);
3532 if (sk
->sk_state
!= BT_CONNECTED
)
3536 case L2CAP_MODE_BASIC
:
3537 /* If socket recv buffers overflows we drop data here
3538 * which is *bad* because L2CAP has to be reliable.
3539 * But we don't have any other choice. L2CAP doesn't
3540 * provide flow control mechanism. */
3542 if (pi
->imtu
< skb
->len
)
3545 if (!sock_queue_rcv_skb(sk
, skb
))
3549 case L2CAP_MODE_ERTM
:
3550 control
= get_unaligned_le16(skb
->data
);
3554 if (__is_sar_start(control
))
3557 if (pi
->fcs
== L2CAP_FCS_CRC16
)
3561 * We can just drop the corrupted I-frame here.
3562 * Receiver will miss it and start proper recovery
3563 * procedures and ask retransmission.
3565 if (len
> L2CAP_DEFAULT_MAX_PDU_SIZE
)
3568 if (l2cap_check_fcs(pi
, skb
))
3571 if (__is_iframe(control
))
3572 err
= l2cap_data_channel_iframe(sk
, control
, skb
);
3574 err
= l2cap_data_channel_sframe(sk
, control
, skb
);
3580 case L2CAP_MODE_STREAMING
:
3581 control
= get_unaligned_le16(skb
->data
);
3585 if (__is_sar_start(control
))
3588 if (pi
->fcs
== L2CAP_FCS_CRC16
)
3591 if (len
> L2CAP_DEFAULT_MAX_PDU_SIZE
|| __is_sframe(control
))
3594 if (l2cap_check_fcs(pi
, skb
))
3597 tx_seq
= __get_txseq(control
);
3599 if (pi
->expected_tx_seq
== tx_seq
)
3600 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3602 pi
->expected_tx_seq
= tx_seq
+ 1;
3604 err
= l2cap_sar_reassembly_sdu(sk
, skb
, control
);
3609 BT_DBG("sk %p: bad mode 0x%2.2x", sk
, l2cap_pi(sk
)->mode
);
3623 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
3627 sk
= l2cap_get_sock_by_psm(0, psm
, conn
->src
);
3631 BT_DBG("sk %p, len %d", sk
, skb
->len
);
3633 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_CONNECTED
)
3636 if (l2cap_pi(sk
)->imtu
< skb
->len
)
3639 if (!sock_queue_rcv_skb(sk
, skb
))
3651 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
3653 struct l2cap_hdr
*lh
= (void *) skb
->data
;
3657 skb_pull(skb
, L2CAP_HDR_SIZE
);
3658 cid
= __le16_to_cpu(lh
->cid
);
3659 len
= __le16_to_cpu(lh
->len
);
3661 if (len
!= skb
->len
) {
3666 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
3669 case L2CAP_CID_SIGNALING
:
3670 l2cap_sig_channel(conn
, skb
);
3673 case L2CAP_CID_CONN_LESS
:
3674 psm
= get_unaligned_le16(skb
->data
);
3676 l2cap_conless_channel(conn
, psm
, skb
);
3680 l2cap_data_channel(conn
, cid
, skb
);
3685 /* ---- L2CAP interface with lower layer (HCI) ---- */
3687 static int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
3689 int exact
= 0, lm1
= 0, lm2
= 0;
3690 register struct sock
*sk
;
3691 struct hlist_node
*node
;
3693 if (type
!= ACL_LINK
)
3696 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
3698 /* Find listening sockets and check their link_mode */
3699 read_lock(&l2cap_sk_list
.lock
);
3700 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
3701 if (sk
->sk_state
!= BT_LISTEN
)
3704 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
3705 lm1
|= HCI_LM_ACCEPT
;
3706 if (l2cap_pi(sk
)->role_switch
)
3707 lm1
|= HCI_LM_MASTER
;
3709 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
3710 lm2
|= HCI_LM_ACCEPT
;
3711 if (l2cap_pi(sk
)->role_switch
)
3712 lm2
|= HCI_LM_MASTER
;
3715 read_unlock(&l2cap_sk_list
.lock
);
3717 return exact
? lm1
: lm2
;
3720 static int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
3722 struct l2cap_conn
*conn
;
3724 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
3726 if (hcon
->type
!= ACL_LINK
)
3730 conn
= l2cap_conn_add(hcon
, status
);
3732 l2cap_conn_ready(conn
);
3734 l2cap_conn_del(hcon
, bt_err(status
));
3739 static int l2cap_disconn_ind(struct hci_conn
*hcon
)
3741 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
3743 BT_DBG("hcon %p", hcon
);
3745 if (hcon
->type
!= ACL_LINK
|| !conn
)
3748 return conn
->disc_reason
;
3751 static int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
3753 BT_DBG("hcon %p reason %d", hcon
, reason
);
3755 if (hcon
->type
!= ACL_LINK
)
3758 l2cap_conn_del(hcon
, bt_err(reason
));
3763 static inline void l2cap_check_encryption(struct sock
*sk
, u8 encrypt
)
3765 if (sk
->sk_type
!= SOCK_SEQPACKET
)
3768 if (encrypt
== 0x00) {
3769 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
) {
3770 l2cap_sock_clear_timer(sk
);
3771 l2cap_sock_set_timer(sk
, HZ
* 5);
3772 } else if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
3773 __l2cap_sock_close(sk
, ECONNREFUSED
);
3775 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
)
3776 l2cap_sock_clear_timer(sk
);
3780 static int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
3782 struct l2cap_chan_list
*l
;
3783 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
3789 l
= &conn
->chan_list
;
3791 BT_DBG("conn %p", conn
);
3793 read_lock(&l
->lock
);
3795 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
3798 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_CONNECT_PEND
) {
3803 if (!status
&& (sk
->sk_state
== BT_CONNECTED
||
3804 sk
->sk_state
== BT_CONFIG
)) {
3805 l2cap_check_encryption(sk
, encrypt
);
3810 if (sk
->sk_state
== BT_CONNECT
) {
3812 struct l2cap_conn_req req
;
3813 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
3814 req
.psm
= l2cap_pi(sk
)->psm
;
3816 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
3818 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
3819 L2CAP_CONN_REQ
, sizeof(req
), &req
);
3821 l2cap_sock_clear_timer(sk
);
3822 l2cap_sock_set_timer(sk
, HZ
/ 10);
3824 } else if (sk
->sk_state
== BT_CONNECT2
) {
3825 struct l2cap_conn_rsp rsp
;
3829 sk
->sk_state
= BT_CONFIG
;
3830 result
= L2CAP_CR_SUCCESS
;
3832 sk
->sk_state
= BT_DISCONN
;
3833 l2cap_sock_set_timer(sk
, HZ
/ 10);
3834 result
= L2CAP_CR_SEC_BLOCK
;
3837 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
3838 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
3839 rsp
.result
= cpu_to_le16(result
);
3840 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
3841 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
3842 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
3848 read_unlock(&l
->lock
);
3853 static int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
3855 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
3857 if (!conn
&& !(conn
= l2cap_conn_add(hcon
, 0)))
3860 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
3862 if (flags
& ACL_START
) {
3863 struct l2cap_hdr
*hdr
;
3867 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
3868 kfree_skb(conn
->rx_skb
);
3869 conn
->rx_skb
= NULL
;
3871 l2cap_conn_unreliable(conn
, ECOMM
);
3875 BT_ERR("Frame is too short (len %d)", skb
->len
);
3876 l2cap_conn_unreliable(conn
, ECOMM
);
3880 hdr
= (struct l2cap_hdr
*) skb
->data
;
3881 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
3883 if (len
== skb
->len
) {
3884 /* Complete frame received */
3885 l2cap_recv_frame(conn
, skb
);
3889 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
3891 if (skb
->len
> len
) {
3892 BT_ERR("Frame is too long (len %d, expected len %d)",
3894 l2cap_conn_unreliable(conn
, ECOMM
);
3898 /* Allocate skb for the complete frame (with header) */
3899 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
3903 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
3905 conn
->rx_len
= len
- skb
->len
;
3907 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
3909 if (!conn
->rx_len
) {
3910 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
3911 l2cap_conn_unreliable(conn
, ECOMM
);
3915 if (skb
->len
> conn
->rx_len
) {
3916 BT_ERR("Fragment is too long (len %d, expected %d)",
3917 skb
->len
, conn
->rx_len
);
3918 kfree_skb(conn
->rx_skb
);
3919 conn
->rx_skb
= NULL
;
3921 l2cap_conn_unreliable(conn
, ECOMM
);
3925 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
3927 conn
->rx_len
-= skb
->len
;
3929 if (!conn
->rx_len
) {
3930 /* Complete frame received */
3931 l2cap_recv_frame(conn
, conn
->rx_skb
);
3932 conn
->rx_skb
= NULL
;
3941 static ssize_t
l2cap_sysfs_show(struct class *dev
, char *buf
)
3944 struct hlist_node
*node
;
3947 read_lock_bh(&l2cap_sk_list
.lock
);
3949 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
3950 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3952 str
+= sprintf(str
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
3953 batostr(&bt_sk(sk
)->src
), batostr(&bt_sk(sk
)->dst
),
3954 sk
->sk_state
, __le16_to_cpu(pi
->psm
), pi
->scid
,
3955 pi
->dcid
, pi
->imtu
, pi
->omtu
, pi
->sec_level
);
3958 read_unlock_bh(&l2cap_sk_list
.lock
);
3963 static CLASS_ATTR(l2cap
, S_IRUGO
, l2cap_sysfs_show
, NULL
);
3965 static const struct proto_ops l2cap_sock_ops
= {
3966 .family
= PF_BLUETOOTH
,
3967 .owner
= THIS_MODULE
,
3968 .release
= l2cap_sock_release
,
3969 .bind
= l2cap_sock_bind
,
3970 .connect
= l2cap_sock_connect
,
3971 .listen
= l2cap_sock_listen
,
3972 .accept
= l2cap_sock_accept
,
3973 .getname
= l2cap_sock_getname
,
3974 .sendmsg
= l2cap_sock_sendmsg
,
3975 .recvmsg
= l2cap_sock_recvmsg
,
3976 .poll
= bt_sock_poll
,
3977 .ioctl
= bt_sock_ioctl
,
3978 .mmap
= sock_no_mmap
,
3979 .socketpair
= sock_no_socketpair
,
3980 .shutdown
= l2cap_sock_shutdown
,
3981 .setsockopt
= l2cap_sock_setsockopt
,
3982 .getsockopt
= l2cap_sock_getsockopt
3985 static const struct net_proto_family l2cap_sock_family_ops
= {
3986 .family
= PF_BLUETOOTH
,
3987 .owner
= THIS_MODULE
,
3988 .create
= l2cap_sock_create
,
3991 static struct hci_proto l2cap_hci_proto
= {
3993 .id
= HCI_PROTO_L2CAP
,
3994 .connect_ind
= l2cap_connect_ind
,
3995 .connect_cfm
= l2cap_connect_cfm
,
3996 .disconn_ind
= l2cap_disconn_ind
,
3997 .disconn_cfm
= l2cap_disconn_cfm
,
3998 .security_cfm
= l2cap_security_cfm
,
3999 .recv_acldata
= l2cap_recv_acldata
4002 static int __init
l2cap_init(void)
4006 err
= proto_register(&l2cap_proto
, 0);
4010 err
= bt_sock_register(BTPROTO_L2CAP
, &l2cap_sock_family_ops
);
4012 BT_ERR("L2CAP socket registration failed");
4016 err
= hci_register_proto(&l2cap_hci_proto
);
4018 BT_ERR("L2CAP protocol registration failed");
4019 bt_sock_unregister(BTPROTO_L2CAP
);
4023 if (class_create_file(bt_class
, &class_attr_l2cap
) < 0)
4024 BT_ERR("Failed to create L2CAP info file");
4026 BT_INFO("L2CAP ver %s", VERSION
);
4027 BT_INFO("L2CAP socket layer initialized");
4032 proto_unregister(&l2cap_proto
);
4036 static void __exit
l2cap_exit(void)
4038 class_remove_file(bt_class
, &class_attr_l2cap
);
4040 if (bt_sock_unregister(BTPROTO_L2CAP
) < 0)
4041 BT_ERR("L2CAP socket unregistration failed");
4043 if (hci_unregister_proto(&l2cap_hci_proto
) < 0)
4044 BT_ERR("L2CAP protocol unregistration failed");
4046 proto_unregister(&l2cap_proto
);
4049 void l2cap_load(void)
4051 /* Dummy function to trigger automatic L2CAP module loading by
4052 * other modules that use L2CAP sockets but don't use any other
4053 * symbols from it. */
4056 EXPORT_SYMBOL(l2cap_load
);
4058 module_init(l2cap_init
);
4059 module_exit(l2cap_exit
);
4061 module_param(enable_ertm
, bool, 0644);
4062 MODULE_PARM_DESC(enable_ertm
, "Enable enhanced retransmission mode");
4064 module_param(max_transmit
, uint
, 0644);
4065 MODULE_PARM_DESC(max_transmit
, "Max transmit value (default = 3)");
4067 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4068 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION
);
4069 MODULE_VERSION(VERSION
);
4070 MODULE_LICENSE("GPL");
4071 MODULE_ALIAS("bt-proto-0");