2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/uaccess.h>
44 #include <linux/crc16.h>
47 #include <asm/system.h>
48 #include <asm/unaligned.h>
50 #include <net/bluetooth/bluetooth.h>
51 #include <net/bluetooth/hci_core.h>
52 #include <net/bluetooth/l2cap.h>
54 #define VERSION "2.14"
56 static int enable_ertm
= 0;
58 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
59 static u8 l2cap_fixed_chan
[8] = { 0x02, };
61 static const struct proto_ops l2cap_sock_ops
;
63 static struct bt_sock_list l2cap_sk_list
= {
64 .lock
= __RW_LOCK_UNLOCKED(l2cap_sk_list
.lock
)
67 static void __l2cap_sock_close(struct sock
*sk
, int reason
);
68 static void l2cap_sock_close(struct sock
*sk
);
69 static void l2cap_sock_kill(struct sock
*sk
);
71 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
72 u8 code
, u8 ident
, u16 dlen
, void *data
);
74 /* ---- L2CAP timers ---- */
75 static void l2cap_sock_timeout(unsigned long arg
)
77 struct sock
*sk
= (struct sock
*) arg
;
80 BT_DBG("sock %p state %d", sk
, sk
->sk_state
);
84 if (sk
->sk_state
== BT_CONNECTED
|| sk
->sk_state
== BT_CONFIG
)
85 reason
= ECONNREFUSED
;
86 else if (sk
->sk_state
== BT_CONNECT
&&
87 l2cap_pi(sk
)->sec_level
!= BT_SECURITY_SDP
)
88 reason
= ECONNREFUSED
;
92 __l2cap_sock_close(sk
, reason
);
100 static void l2cap_sock_set_timer(struct sock
*sk
, long timeout
)
102 BT_DBG("sk %p state %d timeout %ld", sk
, sk
->sk_state
, timeout
);
103 sk_reset_timer(sk
, &sk
->sk_timer
, jiffies
+ timeout
);
106 static void l2cap_sock_clear_timer(struct sock
*sk
)
108 BT_DBG("sock %p state %d", sk
, sk
->sk_state
);
109 sk_stop_timer(sk
, &sk
->sk_timer
);
112 /* ---- L2CAP channels ---- */
113 static struct sock
*__l2cap_get_chan_by_dcid(struct l2cap_chan_list
*l
, u16 cid
)
116 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
117 if (l2cap_pi(s
)->dcid
== cid
)
123 static struct sock
*__l2cap_get_chan_by_scid(struct l2cap_chan_list
*l
, u16 cid
)
126 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
127 if (l2cap_pi(s
)->scid
== cid
)
133 /* Find channel with given SCID.
134 * Returns locked socket */
135 static inline struct sock
*l2cap_get_chan_by_scid(struct l2cap_chan_list
*l
, u16 cid
)
139 s
= __l2cap_get_chan_by_scid(l
, cid
);
142 read_unlock(&l
->lock
);
146 static struct sock
*__l2cap_get_chan_by_ident(struct l2cap_chan_list
*l
, u8 ident
)
149 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
150 if (l2cap_pi(s
)->ident
== ident
)
156 static inline struct sock
*l2cap_get_chan_by_ident(struct l2cap_chan_list
*l
, u8 ident
)
160 s
= __l2cap_get_chan_by_ident(l
, ident
);
163 read_unlock(&l
->lock
);
167 static u16
l2cap_alloc_cid(struct l2cap_chan_list
*l
)
169 u16 cid
= L2CAP_CID_DYN_START
;
171 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
172 if (!__l2cap_get_chan_by_scid(l
, cid
))
179 static inline void __l2cap_chan_link(struct l2cap_chan_list
*l
, struct sock
*sk
)
184 l2cap_pi(l
->head
)->prev_c
= sk
;
186 l2cap_pi(sk
)->next_c
= l
->head
;
187 l2cap_pi(sk
)->prev_c
= NULL
;
191 static inline void l2cap_chan_unlink(struct l2cap_chan_list
*l
, struct sock
*sk
)
193 struct sock
*next
= l2cap_pi(sk
)->next_c
, *prev
= l2cap_pi(sk
)->prev_c
;
195 write_lock_bh(&l
->lock
);
200 l2cap_pi(next
)->prev_c
= prev
;
202 l2cap_pi(prev
)->next_c
= next
;
203 write_unlock_bh(&l
->lock
);
208 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct sock
*sk
, struct sock
*parent
)
210 struct l2cap_chan_list
*l
= &conn
->chan_list
;
212 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
213 l2cap_pi(sk
)->psm
, l2cap_pi(sk
)->dcid
);
215 conn
->disc_reason
= 0x13;
217 l2cap_pi(sk
)->conn
= conn
;
219 if (sk
->sk_type
== SOCK_SEQPACKET
) {
220 /* Alloc CID for connection-oriented socket */
221 l2cap_pi(sk
)->scid
= l2cap_alloc_cid(l
);
222 } else if (sk
->sk_type
== SOCK_DGRAM
) {
223 /* Connectionless socket */
224 l2cap_pi(sk
)->scid
= L2CAP_CID_CONN_LESS
;
225 l2cap_pi(sk
)->dcid
= L2CAP_CID_CONN_LESS
;
226 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
228 /* Raw socket can send/recv signalling messages only */
229 l2cap_pi(sk
)->scid
= L2CAP_CID_SIGNALING
;
230 l2cap_pi(sk
)->dcid
= L2CAP_CID_SIGNALING
;
231 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
234 __l2cap_chan_link(l
, sk
);
237 bt_accept_enqueue(parent
, sk
);
241 * Must be called on the locked socket. */
242 static void l2cap_chan_del(struct sock
*sk
, int err
)
244 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
245 struct sock
*parent
= bt_sk(sk
)->parent
;
247 l2cap_sock_clear_timer(sk
);
249 BT_DBG("sk %p, conn %p, err %d", sk
, conn
, err
);
252 /* Unlink from channel list */
253 l2cap_chan_unlink(&conn
->chan_list
, sk
);
254 l2cap_pi(sk
)->conn
= NULL
;
255 hci_conn_put(conn
->hcon
);
258 sk
->sk_state
= BT_CLOSED
;
259 sock_set_flag(sk
, SOCK_ZAPPED
);
265 bt_accept_unlink(sk
);
266 parent
->sk_data_ready(parent
, 0);
268 sk
->sk_state_change(sk
);
271 /* Service level security */
272 static inline int l2cap_check_security(struct sock
*sk
)
274 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
277 if (l2cap_pi(sk
)->psm
== cpu_to_le16(0x0001)) {
278 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
279 auth_type
= HCI_AT_NO_BONDING_MITM
;
281 auth_type
= HCI_AT_NO_BONDING
;
283 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_LOW
)
284 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
286 switch (l2cap_pi(sk
)->sec_level
) {
287 case BT_SECURITY_HIGH
:
288 auth_type
= HCI_AT_GENERAL_BONDING_MITM
;
290 case BT_SECURITY_MEDIUM
:
291 auth_type
= HCI_AT_GENERAL_BONDING
;
294 auth_type
= HCI_AT_NO_BONDING
;
299 return hci_conn_security(conn
->hcon
, l2cap_pi(sk
)->sec_level
,
303 static inline u8
l2cap_get_ident(struct l2cap_conn
*conn
)
307 /* Get next available identificator.
308 * 1 - 128 are used by kernel.
309 * 129 - 199 are reserved.
310 * 200 - 254 are used by utilities like l2ping, etc.
313 spin_lock_bh(&conn
->lock
);
315 if (++conn
->tx_ident
> 128)
320 spin_unlock_bh(&conn
->lock
);
325 static inline int l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
327 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
329 BT_DBG("code 0x%2.2x", code
);
334 return hci_send_acl(conn
->hcon
, skb
, 0);
337 static inline int l2cap_send_sframe(struct l2cap_pinfo
*pi
, u16 control
)
340 struct l2cap_hdr
*lh
;
341 struct l2cap_conn
*conn
= pi
->conn
;
342 int count
, hlen
= L2CAP_HDR_SIZE
+ 2;
344 if (pi
->fcs
== L2CAP_FCS_CRC16
)
347 BT_DBG("pi %p, control 0x%2.2x", pi
, control
);
349 count
= min_t(unsigned int, conn
->mtu
, hlen
);
350 control
|= L2CAP_CTRL_FRAME_TYPE
;
352 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
356 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
357 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
358 lh
->cid
= cpu_to_le16(pi
->dcid
);
359 put_unaligned_le16(control
, skb_put(skb
, 2));
361 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
362 u16 fcs
= crc16(0, (u8
*)lh
, count
- 2);
363 put_unaligned_le16(fcs
, skb_put(skb
, 2));
366 return hci_send_acl(pi
->conn
->hcon
, skb
, 0);
369 static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo
*pi
, u16 control
)
371 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
)
372 control
|= L2CAP_SUPER_RCV_NOT_READY
;
374 control
|= L2CAP_SUPER_RCV_READY
;
376 return l2cap_send_sframe(pi
, control
);
379 static void l2cap_do_start(struct sock
*sk
)
381 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
383 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
384 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
387 if (l2cap_check_security(sk
)) {
388 struct l2cap_conn_req req
;
389 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
390 req
.psm
= l2cap_pi(sk
)->psm
;
392 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
394 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
395 L2CAP_CONN_REQ
, sizeof(req
), &req
);
398 struct l2cap_info_req req
;
399 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
401 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
402 conn
->info_ident
= l2cap_get_ident(conn
);
404 mod_timer(&conn
->info_timer
, jiffies
+
405 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
407 l2cap_send_cmd(conn
, conn
->info_ident
,
408 L2CAP_INFO_REQ
, sizeof(req
), &req
);
412 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct sock
*sk
)
414 struct l2cap_disconn_req req
;
416 req
.dcid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
417 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
418 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
419 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
422 /* ---- L2CAP connections ---- */
423 static void l2cap_conn_start(struct l2cap_conn
*conn
)
425 struct l2cap_chan_list
*l
= &conn
->chan_list
;
428 BT_DBG("conn %p", conn
);
432 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
435 if (sk
->sk_type
!= SOCK_SEQPACKET
) {
440 if (sk
->sk_state
== BT_CONNECT
) {
441 if (l2cap_check_security(sk
)) {
442 struct l2cap_conn_req req
;
443 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
444 req
.psm
= l2cap_pi(sk
)->psm
;
446 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
448 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
449 L2CAP_CONN_REQ
, sizeof(req
), &req
);
451 } else if (sk
->sk_state
== BT_CONNECT2
) {
452 struct l2cap_conn_rsp rsp
;
453 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
454 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
456 if (l2cap_check_security(sk
)) {
457 if (bt_sk(sk
)->defer_setup
) {
458 struct sock
*parent
= bt_sk(sk
)->parent
;
459 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
460 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
461 parent
->sk_data_ready(parent
, 0);
464 sk
->sk_state
= BT_CONFIG
;
465 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
466 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
469 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
470 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
473 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
474 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
480 read_unlock(&l
->lock
);
483 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
485 struct l2cap_chan_list
*l
= &conn
->chan_list
;
488 BT_DBG("conn %p", conn
);
492 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
495 if (sk
->sk_type
!= SOCK_SEQPACKET
) {
496 l2cap_sock_clear_timer(sk
);
497 sk
->sk_state
= BT_CONNECTED
;
498 sk
->sk_state_change(sk
);
499 } else if (sk
->sk_state
== BT_CONNECT
)
505 read_unlock(&l
->lock
);
508 /* Notify sockets that we cannot guaranty reliability anymore */
509 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
511 struct l2cap_chan_list
*l
= &conn
->chan_list
;
514 BT_DBG("conn %p", conn
);
518 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
519 if (l2cap_pi(sk
)->force_reliable
)
523 read_unlock(&l
->lock
);
526 static void l2cap_info_timeout(unsigned long arg
)
528 struct l2cap_conn
*conn
= (void *) arg
;
530 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
531 conn
->info_ident
= 0;
533 l2cap_conn_start(conn
);
536 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
538 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
543 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
547 hcon
->l2cap_data
= conn
;
550 BT_DBG("hcon %p conn %p", hcon
, conn
);
552 conn
->mtu
= hcon
->hdev
->acl_mtu
;
553 conn
->src
= &hcon
->hdev
->bdaddr
;
554 conn
->dst
= &hcon
->dst
;
558 spin_lock_init(&conn
->lock
);
559 rwlock_init(&conn
->chan_list
.lock
);
561 setup_timer(&conn
->info_timer
, l2cap_info_timeout
,
562 (unsigned long) conn
);
564 conn
->disc_reason
= 0x13;
569 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
571 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
577 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
579 kfree_skb(conn
->rx_skb
);
582 while ((sk
= conn
->chan_list
.head
)) {
584 l2cap_chan_del(sk
, err
);
589 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
590 del_timer_sync(&conn
->info_timer
);
592 hcon
->l2cap_data
= NULL
;
596 static inline void l2cap_chan_add(struct l2cap_conn
*conn
, struct sock
*sk
, struct sock
*parent
)
598 struct l2cap_chan_list
*l
= &conn
->chan_list
;
599 write_lock_bh(&l
->lock
);
600 __l2cap_chan_add(conn
, sk
, parent
);
601 write_unlock_bh(&l
->lock
);
604 /* ---- Socket interface ---- */
605 static struct sock
*__l2cap_get_sock_by_addr(__le16 psm
, bdaddr_t
*src
)
608 struct hlist_node
*node
;
609 sk_for_each(sk
, node
, &l2cap_sk_list
.head
)
610 if (l2cap_pi(sk
)->sport
== psm
&& !bacmp(&bt_sk(sk
)->src
, src
))
617 /* Find socket with psm and source bdaddr.
618 * Returns closest match.
620 static struct sock
*__l2cap_get_sock_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
622 struct sock
*sk
= NULL
, *sk1
= NULL
;
623 struct hlist_node
*node
;
625 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
626 if (state
&& sk
->sk_state
!= state
)
629 if (l2cap_pi(sk
)->psm
== psm
) {
631 if (!bacmp(&bt_sk(sk
)->src
, src
))
635 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
639 return node
? sk
: sk1
;
642 /* Find socket with given address (psm, src).
643 * Returns locked socket */
644 static inline struct sock
*l2cap_get_sock_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
647 read_lock(&l2cap_sk_list
.lock
);
648 s
= __l2cap_get_sock_by_psm(state
, psm
, src
);
651 read_unlock(&l2cap_sk_list
.lock
);
655 static void l2cap_sock_destruct(struct sock
*sk
)
659 skb_queue_purge(&sk
->sk_receive_queue
);
660 skb_queue_purge(&sk
->sk_write_queue
);
663 static void l2cap_sock_cleanup_listen(struct sock
*parent
)
667 BT_DBG("parent %p", parent
);
669 /* Close not yet accepted channels */
670 while ((sk
= bt_accept_dequeue(parent
, NULL
)))
671 l2cap_sock_close(sk
);
673 parent
->sk_state
= BT_CLOSED
;
674 sock_set_flag(parent
, SOCK_ZAPPED
);
677 /* Kill socket (only if zapped and orphan)
678 * Must be called on unlocked socket.
680 static void l2cap_sock_kill(struct sock
*sk
)
682 if (!sock_flag(sk
, SOCK_ZAPPED
) || sk
->sk_socket
)
685 BT_DBG("sk %p state %d", sk
, sk
->sk_state
);
687 /* Kill poor orphan */
688 bt_sock_unlink(&l2cap_sk_list
, sk
);
689 sock_set_flag(sk
, SOCK_DEAD
);
693 static void __l2cap_sock_close(struct sock
*sk
, int reason
)
695 BT_DBG("sk %p state %d socket %p", sk
, sk
->sk_state
, sk
->sk_socket
);
697 switch (sk
->sk_state
) {
699 l2cap_sock_cleanup_listen(sk
);
704 if (sk
->sk_type
== SOCK_SEQPACKET
) {
705 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
707 sk
->sk_state
= BT_DISCONN
;
708 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
709 l2cap_send_disconn_req(conn
, sk
);
711 l2cap_chan_del(sk
, reason
);
715 if (sk
->sk_type
== SOCK_SEQPACKET
) {
716 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
717 struct l2cap_conn_rsp rsp
;
720 if (bt_sk(sk
)->defer_setup
)
721 result
= L2CAP_CR_SEC_BLOCK
;
723 result
= L2CAP_CR_BAD_PSM
;
725 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
726 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
727 rsp
.result
= cpu_to_le16(result
);
728 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
729 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
730 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
732 l2cap_chan_del(sk
, reason
);
737 l2cap_chan_del(sk
, reason
);
741 sock_set_flag(sk
, SOCK_ZAPPED
);
746 /* Must be called on unlocked socket. */
747 static void l2cap_sock_close(struct sock
*sk
)
749 l2cap_sock_clear_timer(sk
);
751 __l2cap_sock_close(sk
, ECONNRESET
);
756 static void l2cap_sock_init(struct sock
*sk
, struct sock
*parent
)
758 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
763 sk
->sk_type
= parent
->sk_type
;
764 bt_sk(sk
)->defer_setup
= bt_sk(parent
)->defer_setup
;
766 pi
->imtu
= l2cap_pi(parent
)->imtu
;
767 pi
->omtu
= l2cap_pi(parent
)->omtu
;
768 pi
->mode
= l2cap_pi(parent
)->mode
;
769 pi
->fcs
= l2cap_pi(parent
)->fcs
;
770 pi
->sec_level
= l2cap_pi(parent
)->sec_level
;
771 pi
->role_switch
= l2cap_pi(parent
)->role_switch
;
772 pi
->force_reliable
= l2cap_pi(parent
)->force_reliable
;
774 pi
->imtu
= L2CAP_DEFAULT_MTU
;
776 pi
->mode
= L2CAP_MODE_BASIC
;
777 pi
->fcs
= L2CAP_FCS_CRC16
;
778 pi
->sec_level
= BT_SECURITY_LOW
;
780 pi
->force_reliable
= 0;
783 /* Default config options */
785 pi
->flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
786 skb_queue_head_init(TX_QUEUE(sk
));
787 skb_queue_head_init(SREJ_QUEUE(sk
));
788 INIT_LIST_HEAD(SREJ_LIST(sk
));
791 static struct proto l2cap_proto
= {
793 .owner
= THIS_MODULE
,
794 .obj_size
= sizeof(struct l2cap_pinfo
)
797 static struct sock
*l2cap_sock_alloc(struct net
*net
, struct socket
*sock
, int proto
, gfp_t prio
)
801 sk
= sk_alloc(net
, PF_BLUETOOTH
, prio
, &l2cap_proto
);
805 sock_init_data(sock
, sk
);
806 INIT_LIST_HEAD(&bt_sk(sk
)->accept_q
);
808 sk
->sk_destruct
= l2cap_sock_destruct
;
809 sk
->sk_sndtimeo
= msecs_to_jiffies(L2CAP_CONN_TIMEOUT
);
811 sock_reset_flag(sk
, SOCK_ZAPPED
);
813 sk
->sk_protocol
= proto
;
814 sk
->sk_state
= BT_OPEN
;
816 setup_timer(&sk
->sk_timer
, l2cap_sock_timeout
, (unsigned long) sk
);
818 bt_sock_link(&l2cap_sk_list
, sk
);
822 static int l2cap_sock_create(struct net
*net
, struct socket
*sock
, int protocol
,
827 BT_DBG("sock %p", sock
);
829 sock
->state
= SS_UNCONNECTED
;
831 if (sock
->type
!= SOCK_SEQPACKET
&&
832 sock
->type
!= SOCK_DGRAM
&& sock
->type
!= SOCK_RAW
)
833 return -ESOCKTNOSUPPORT
;
835 if (sock
->type
== SOCK_RAW
&& !kern
&& !capable(CAP_NET_RAW
))
838 sock
->ops
= &l2cap_sock_ops
;
840 sk
= l2cap_sock_alloc(net
, sock
, protocol
, GFP_ATOMIC
);
844 l2cap_sock_init(sk
, NULL
);
848 static int l2cap_sock_bind(struct socket
*sock
, struct sockaddr
*addr
, int alen
)
850 struct sock
*sk
= sock
->sk
;
851 struct sockaddr_l2 la
;
856 if (!addr
|| addr
->sa_family
!= AF_BLUETOOTH
)
859 memset(&la
, 0, sizeof(la
));
860 len
= min_t(unsigned int, sizeof(la
), alen
);
861 memcpy(&la
, addr
, len
);
868 if (sk
->sk_state
!= BT_OPEN
) {
873 if (la
.l2_psm
&& __le16_to_cpu(la
.l2_psm
) < 0x1001 &&
874 !capable(CAP_NET_BIND_SERVICE
)) {
879 write_lock_bh(&l2cap_sk_list
.lock
);
881 if (la
.l2_psm
&& __l2cap_get_sock_by_addr(la
.l2_psm
, &la
.l2_bdaddr
)) {
884 /* Save source address */
885 bacpy(&bt_sk(sk
)->src
, &la
.l2_bdaddr
);
886 l2cap_pi(sk
)->psm
= la
.l2_psm
;
887 l2cap_pi(sk
)->sport
= la
.l2_psm
;
888 sk
->sk_state
= BT_BOUND
;
890 if (__le16_to_cpu(la
.l2_psm
) == 0x0001 ||
891 __le16_to_cpu(la
.l2_psm
) == 0x0003)
892 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
895 write_unlock_bh(&l2cap_sk_list
.lock
);
902 static int l2cap_do_connect(struct sock
*sk
)
904 bdaddr_t
*src
= &bt_sk(sk
)->src
;
905 bdaddr_t
*dst
= &bt_sk(sk
)->dst
;
906 struct l2cap_conn
*conn
;
907 struct hci_conn
*hcon
;
908 struct hci_dev
*hdev
;
912 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src
), batostr(dst
),
915 hdev
= hci_get_route(dst
, src
);
917 return -EHOSTUNREACH
;
919 hci_dev_lock_bh(hdev
);
923 if (sk
->sk_type
== SOCK_RAW
) {
924 switch (l2cap_pi(sk
)->sec_level
) {
925 case BT_SECURITY_HIGH
:
926 auth_type
= HCI_AT_DEDICATED_BONDING_MITM
;
928 case BT_SECURITY_MEDIUM
:
929 auth_type
= HCI_AT_DEDICATED_BONDING
;
932 auth_type
= HCI_AT_NO_BONDING
;
935 } else if (l2cap_pi(sk
)->psm
== cpu_to_le16(0x0001)) {
936 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
937 auth_type
= HCI_AT_NO_BONDING_MITM
;
939 auth_type
= HCI_AT_NO_BONDING
;
941 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_LOW
)
942 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
944 switch (l2cap_pi(sk
)->sec_level
) {
945 case BT_SECURITY_HIGH
:
946 auth_type
= HCI_AT_GENERAL_BONDING_MITM
;
948 case BT_SECURITY_MEDIUM
:
949 auth_type
= HCI_AT_GENERAL_BONDING
;
952 auth_type
= HCI_AT_NO_BONDING
;
957 hcon
= hci_connect(hdev
, ACL_LINK
, dst
,
958 l2cap_pi(sk
)->sec_level
, auth_type
);
962 conn
= l2cap_conn_add(hcon
, 0);
970 /* Update source addr of the socket */
971 bacpy(src
, conn
->src
);
973 l2cap_chan_add(conn
, sk
, NULL
);
975 sk
->sk_state
= BT_CONNECT
;
976 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
978 if (hcon
->state
== BT_CONNECTED
) {
979 if (sk
->sk_type
!= SOCK_SEQPACKET
) {
980 l2cap_sock_clear_timer(sk
);
981 sk
->sk_state
= BT_CONNECTED
;
987 hci_dev_unlock_bh(hdev
);
992 static int l2cap_sock_connect(struct socket
*sock
, struct sockaddr
*addr
, int alen
, int flags
)
994 struct sock
*sk
= sock
->sk
;
995 struct sockaddr_l2 la
;
1000 if (!addr
|| addr
->sa_family
!= AF_BLUETOOTH
)
1003 memset(&la
, 0, sizeof(la
));
1004 len
= min_t(unsigned int, sizeof(la
), alen
);
1005 memcpy(&la
, addr
, len
);
1012 if (sk
->sk_type
== SOCK_SEQPACKET
&& !la
.l2_psm
) {
1017 switch (l2cap_pi(sk
)->mode
) {
1018 case L2CAP_MODE_BASIC
:
1020 case L2CAP_MODE_ERTM
:
1021 case L2CAP_MODE_STREAMING
:
1030 switch (sk
->sk_state
) {
1034 /* Already connecting */
1038 /* Already connected */
1051 /* Set destination address and psm */
1052 bacpy(&bt_sk(sk
)->dst
, &la
.l2_bdaddr
);
1053 l2cap_pi(sk
)->psm
= la
.l2_psm
;
1055 err
= l2cap_do_connect(sk
);
1060 err
= bt_sock_wait_state(sk
, BT_CONNECTED
,
1061 sock_sndtimeo(sk
, flags
& O_NONBLOCK
));
1067 static int l2cap_sock_listen(struct socket
*sock
, int backlog
)
1069 struct sock
*sk
= sock
->sk
;
1072 BT_DBG("sk %p backlog %d", sk
, backlog
);
1076 if (sk
->sk_state
!= BT_BOUND
|| sock
->type
!= SOCK_SEQPACKET
) {
1081 switch (l2cap_pi(sk
)->mode
) {
1082 case L2CAP_MODE_BASIC
:
1084 case L2CAP_MODE_ERTM
:
1085 case L2CAP_MODE_STREAMING
:
1094 if (!l2cap_pi(sk
)->psm
) {
1095 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1100 write_lock_bh(&l2cap_sk_list
.lock
);
1102 for (psm
= 0x1001; psm
< 0x1100; psm
+= 2)
1103 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm
), src
)) {
1104 l2cap_pi(sk
)->psm
= cpu_to_le16(psm
);
1105 l2cap_pi(sk
)->sport
= cpu_to_le16(psm
);
1110 write_unlock_bh(&l2cap_sk_list
.lock
);
1116 sk
->sk_max_ack_backlog
= backlog
;
1117 sk
->sk_ack_backlog
= 0;
1118 sk
->sk_state
= BT_LISTEN
;
1125 static int l2cap_sock_accept(struct socket
*sock
, struct socket
*newsock
, int flags
)
1127 DECLARE_WAITQUEUE(wait
, current
);
1128 struct sock
*sk
= sock
->sk
, *nsk
;
1132 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
1134 if (sk
->sk_state
!= BT_LISTEN
) {
1139 timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
1141 BT_DBG("sk %p timeo %ld", sk
, timeo
);
1143 /* Wait for an incoming connection. (wake-one). */
1144 add_wait_queue_exclusive(sk
->sk_sleep
, &wait
);
1145 while (!(nsk
= bt_accept_dequeue(sk
, newsock
))) {
1146 set_current_state(TASK_INTERRUPTIBLE
);
1153 timeo
= schedule_timeout(timeo
);
1154 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
1156 if (sk
->sk_state
!= BT_LISTEN
) {
1161 if (signal_pending(current
)) {
1162 err
= sock_intr_errno(timeo
);
1166 set_current_state(TASK_RUNNING
);
1167 remove_wait_queue(sk
->sk_sleep
, &wait
);
1172 newsock
->state
= SS_CONNECTED
;
1174 BT_DBG("new socket %p", nsk
);
1181 static int l2cap_sock_getname(struct socket
*sock
, struct sockaddr
*addr
, int *len
, int peer
)
1183 struct sockaddr_l2
*la
= (struct sockaddr_l2
*) addr
;
1184 struct sock
*sk
= sock
->sk
;
1186 BT_DBG("sock %p, sk %p", sock
, sk
);
1188 addr
->sa_family
= AF_BLUETOOTH
;
1189 *len
= sizeof(struct sockaddr_l2
);
1192 la
->l2_psm
= l2cap_pi(sk
)->psm
;
1193 bacpy(&la
->l2_bdaddr
, &bt_sk(sk
)->dst
);
1194 la
->l2_cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1196 la
->l2_psm
= l2cap_pi(sk
)->sport
;
1197 bacpy(&la
->l2_bdaddr
, &bt_sk(sk
)->src
);
1198 la
->l2_cid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
1204 static void l2cap_monitor_timeout(unsigned long arg
)
1206 struct sock
*sk
= (void *) arg
;
1210 if (l2cap_pi(sk
)->retry_count
>= l2cap_pi(sk
)->remote_max_tx
) {
1211 l2cap_send_disconn_req(l2cap_pi(sk
)->conn
, sk
);
1215 l2cap_pi(sk
)->retry_count
++;
1216 __mod_monitor_timer();
1218 control
= L2CAP_CTRL_POLL
;
1219 l2cap_send_rr_or_rnr(l2cap_pi(sk
), control
);
1223 static void l2cap_retrans_timeout(unsigned long arg
)
1225 struct sock
*sk
= (void *) arg
;
1229 l2cap_pi(sk
)->retry_count
= 1;
1230 __mod_monitor_timer();
1232 l2cap_pi(sk
)->conn_state
|= L2CAP_CONN_WAIT_F
;
1234 control
= L2CAP_CTRL_POLL
;
1235 l2cap_send_rr_or_rnr(l2cap_pi(sk
), control
);
1239 static void l2cap_drop_acked_frames(struct sock
*sk
)
1241 struct sk_buff
*skb
;
1243 while ((skb
= skb_peek(TX_QUEUE(sk
)))) {
1244 if (bt_cb(skb
)->tx_seq
== l2cap_pi(sk
)->expected_ack_seq
)
1247 skb
= skb_dequeue(TX_QUEUE(sk
));
1250 l2cap_pi(sk
)->unacked_frames
--;
1253 if (!l2cap_pi(sk
)->unacked_frames
)
1254 del_timer(&l2cap_pi(sk
)->retrans_timer
);
1259 static inline int l2cap_do_send(struct sock
*sk
, struct sk_buff
*skb
)
1261 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1264 BT_DBG("sk %p, skb %p len %d", sk
, skb
, skb
->len
);
1266 err
= hci_send_acl(pi
->conn
->hcon
, skb
, 0);
1273 static int l2cap_streaming_send(struct sock
*sk
)
1275 struct sk_buff
*skb
, *tx_skb
;
1276 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1280 while ((skb
= sk
->sk_send_head
)) {
1281 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1283 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1284 control
|= pi
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
1285 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1287 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
) {
1288 fcs
= crc16(0, (u8
*)tx_skb
->data
, tx_skb
->len
- 2);
1289 put_unaligned_le16(fcs
, tx_skb
->data
+ tx_skb
->len
- 2);
1292 err
= l2cap_do_send(sk
, tx_skb
);
1294 l2cap_send_disconn_req(pi
->conn
, sk
);
1298 pi
->next_tx_seq
= (pi
->next_tx_seq
+ 1) % 64;
1300 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1301 sk
->sk_send_head
= NULL
;
1303 sk
->sk_send_head
= skb_queue_next(TX_QUEUE(sk
), skb
);
1305 skb
= skb_dequeue(TX_QUEUE(sk
));
1311 static int l2cap_retransmit_frame(struct sock
*sk
, u8 tx_seq
)
1313 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1314 struct sk_buff
*skb
, *tx_skb
;
1318 skb
= skb_peek(TX_QUEUE(sk
));
1320 if (bt_cb(skb
)->tx_seq
!= tx_seq
) {
1321 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1323 skb
= skb_queue_next(TX_QUEUE(sk
), skb
);
1327 if (pi
->remote_max_tx
&&
1328 bt_cb(skb
)->retries
== pi
->remote_max_tx
) {
1329 l2cap_send_disconn_req(pi
->conn
, sk
);
1333 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1334 bt_cb(skb
)->retries
++;
1335 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1336 control
|= (pi
->req_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1337 | (tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1338 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1340 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
) {
1341 fcs
= crc16(0, (u8
*)tx_skb
->data
, tx_skb
->len
- 2);
1342 put_unaligned_le16(fcs
, tx_skb
->data
+ tx_skb
->len
- 2);
1345 err
= l2cap_do_send(sk
, tx_skb
);
1347 l2cap_send_disconn_req(pi
->conn
, sk
);
1355 static int l2cap_ertm_send(struct sock
*sk
)
1357 struct sk_buff
*skb
, *tx_skb
;
1358 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1362 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
)
1365 while ((skb
= sk
->sk_send_head
) && (!l2cap_tx_window_full(sk
))
1366 && !(pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
)) {
1367 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1369 if (pi
->remote_max_tx
&&
1370 bt_cb(skb
)->retries
== pi
->remote_max_tx
) {
1371 l2cap_send_disconn_req(pi
->conn
, sk
);
1375 bt_cb(skb
)->retries
++;
1377 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1378 control
|= (pi
->req_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1379 | (pi
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1380 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1383 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
) {
1384 fcs
= crc16(0, (u8
*)skb
->data
, tx_skb
->len
- 2);
1385 put_unaligned_le16(fcs
, skb
->data
+ tx_skb
->len
- 2);
1388 err
= l2cap_do_send(sk
, tx_skb
);
1390 l2cap_send_disconn_req(pi
->conn
, sk
);
1393 __mod_retrans_timer();
1395 bt_cb(skb
)->tx_seq
= pi
->next_tx_seq
;
1396 pi
->next_tx_seq
= (pi
->next_tx_seq
+ 1) % 64;
1398 pi
->unacked_frames
++;
1400 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1401 sk
->sk_send_head
= NULL
;
1403 sk
->sk_send_head
= skb_queue_next(TX_QUEUE(sk
), skb
);
1409 static inline int l2cap_skbuff_fromiovec(struct sock
*sk
, struct msghdr
*msg
, int len
, int count
, struct sk_buff
*skb
)
1411 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1412 struct sk_buff
**frag
;
1415 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
)) {
1422 /* Continuation fragments (no L2CAP header) */
1423 frag
= &skb_shinfo(skb
)->frag_list
;
1425 count
= min_t(unsigned int, conn
->mtu
, len
);
1427 *frag
= bt_skb_send_alloc(sk
, count
, msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1430 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1436 frag
= &(*frag
)->next
;
1442 static struct sk_buff
*l2cap_create_connless_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1444 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1445 struct sk_buff
*skb
;
1446 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1447 struct l2cap_hdr
*lh
;
1449 BT_DBG("sk %p len %d", sk
, (int)len
);
1451 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1452 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1453 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1455 return ERR_PTR(-ENOMEM
);
1457 /* Create L2CAP header */
1458 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1459 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1460 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1461 put_unaligned_le16(l2cap_pi(sk
)->psm
, skb_put(skb
, 2));
1463 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1464 if (unlikely(err
< 0)) {
1466 return ERR_PTR(err
);
1471 static struct sk_buff
*l2cap_create_basic_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1473 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1474 struct sk_buff
*skb
;
1475 int err
, count
, hlen
= L2CAP_HDR_SIZE
;
1476 struct l2cap_hdr
*lh
;
1478 BT_DBG("sk %p len %d", sk
, (int)len
);
1480 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1481 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1482 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1484 return ERR_PTR(-ENOMEM
);
1486 /* Create L2CAP header */
1487 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1488 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1489 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1491 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1492 if (unlikely(err
< 0)) {
1494 return ERR_PTR(err
);
1499 static struct sk_buff
*l2cap_create_iframe_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
, u16 control
, u16 sdulen
)
1501 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1502 struct sk_buff
*skb
;
1503 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1504 struct l2cap_hdr
*lh
;
1506 BT_DBG("sk %p len %d", sk
, (int)len
);
1511 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1514 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1515 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1516 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1518 return ERR_PTR(-ENOMEM
);
1520 /* Create L2CAP header */
1521 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1522 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1523 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1524 put_unaligned_le16(control
, skb_put(skb
, 2));
1526 put_unaligned_le16(sdulen
, skb_put(skb
, 2));
1528 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1529 if (unlikely(err
< 0)) {
1531 return ERR_PTR(err
);
1534 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1535 put_unaligned_le16(0, skb_put(skb
, 2));
1537 bt_cb(skb
)->retries
= 0;
1541 static inline int l2cap_sar_segment_sdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1543 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1544 struct sk_buff
*skb
;
1545 struct sk_buff_head sar_queue
;
1549 __skb_queue_head_init(&sar_queue
);
1550 control
= L2CAP_SDU_START
;
1551 skb
= l2cap_create_iframe_pdu(sk
, msg
, pi
->max_pdu_size
, control
, len
);
1553 return PTR_ERR(skb
);
1555 __skb_queue_tail(&sar_queue
, skb
);
1556 len
-= pi
->max_pdu_size
;
1557 size
+=pi
->max_pdu_size
;
1563 if (len
> pi
->max_pdu_size
) {
1564 control
|= L2CAP_SDU_CONTINUE
;
1565 buflen
= pi
->max_pdu_size
;
1567 control
|= L2CAP_SDU_END
;
1571 skb
= l2cap_create_iframe_pdu(sk
, msg
, buflen
, control
, 0);
1573 skb_queue_purge(&sar_queue
);
1574 return PTR_ERR(skb
);
1577 __skb_queue_tail(&sar_queue
, skb
);
1582 skb_queue_splice_tail(&sar_queue
, TX_QUEUE(sk
));
1583 if (sk
->sk_send_head
== NULL
)
1584 sk
->sk_send_head
= sar_queue
.next
;
1589 static int l2cap_sock_sendmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
, size_t len
)
1591 struct sock
*sk
= sock
->sk
;
1592 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1593 struct sk_buff
*skb
;
1597 BT_DBG("sock %p, sk %p", sock
, sk
);
1599 err
= sock_error(sk
);
1603 if (msg
->msg_flags
& MSG_OOB
)
1606 /* Check outgoing MTU */
1607 if (sk
->sk_type
== SOCK_SEQPACKET
&& pi
->mode
== L2CAP_MODE_BASIC
1613 if (sk
->sk_state
!= BT_CONNECTED
) {
1618 /* Connectionless channel */
1619 if (sk
->sk_type
== SOCK_DGRAM
) {
1620 skb
= l2cap_create_connless_pdu(sk
, msg
, len
);
1621 err
= l2cap_do_send(sk
, skb
);
1626 case L2CAP_MODE_BASIC
:
1627 /* Create a basic PDU */
1628 skb
= l2cap_create_basic_pdu(sk
, msg
, len
);
1634 err
= l2cap_do_send(sk
, skb
);
1639 case L2CAP_MODE_ERTM
:
1640 case L2CAP_MODE_STREAMING
:
1641 /* Entire SDU fits into one PDU */
1642 if (len
<= pi
->max_pdu_size
) {
1643 control
= L2CAP_SDU_UNSEGMENTED
;
1644 skb
= l2cap_create_iframe_pdu(sk
, msg
, len
, control
, 0);
1649 __skb_queue_tail(TX_QUEUE(sk
), skb
);
1650 if (sk
->sk_send_head
== NULL
)
1651 sk
->sk_send_head
= skb
;
1653 /* Segment SDU into multiples PDUs */
1654 err
= l2cap_sar_segment_sdu(sk
, msg
, len
);
1659 if (pi
->mode
== L2CAP_MODE_STREAMING
)
1660 err
= l2cap_streaming_send(sk
);
1662 err
= l2cap_ertm_send(sk
);
1669 BT_DBG("bad state %1.1x", pi
->mode
);
1678 static int l2cap_sock_recvmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
, size_t len
, int flags
)
1680 struct sock
*sk
= sock
->sk
;
1684 if (sk
->sk_state
== BT_CONNECT2
&& bt_sk(sk
)->defer_setup
) {
1685 struct l2cap_conn_rsp rsp
;
1687 sk
->sk_state
= BT_CONFIG
;
1689 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1690 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
1691 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
1692 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
1693 l2cap_send_cmd(l2cap_pi(sk
)->conn
, l2cap_pi(sk
)->ident
,
1694 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
1702 return bt_sock_recvmsg(iocb
, sock
, msg
, len
, flags
);
1705 static int l2cap_sock_setsockopt_old(struct socket
*sock
, int optname
, char __user
*optval
, unsigned int optlen
)
1707 struct sock
*sk
= sock
->sk
;
1708 struct l2cap_options opts
;
1712 BT_DBG("sk %p", sk
);
1718 opts
.imtu
= l2cap_pi(sk
)->imtu
;
1719 opts
.omtu
= l2cap_pi(sk
)->omtu
;
1720 opts
.flush_to
= l2cap_pi(sk
)->flush_to
;
1721 opts
.mode
= l2cap_pi(sk
)->mode
;
1722 opts
.fcs
= l2cap_pi(sk
)->fcs
;
1724 len
= min_t(unsigned int, sizeof(opts
), optlen
);
1725 if (copy_from_user((char *) &opts
, optval
, len
)) {
1730 l2cap_pi(sk
)->imtu
= opts
.imtu
;
1731 l2cap_pi(sk
)->omtu
= opts
.omtu
;
1732 l2cap_pi(sk
)->mode
= opts
.mode
;
1733 l2cap_pi(sk
)->fcs
= opts
.fcs
;
1737 if (get_user(opt
, (u32 __user
*) optval
)) {
1742 if (opt
& L2CAP_LM_AUTH
)
1743 l2cap_pi(sk
)->sec_level
= BT_SECURITY_LOW
;
1744 if (opt
& L2CAP_LM_ENCRYPT
)
1745 l2cap_pi(sk
)->sec_level
= BT_SECURITY_MEDIUM
;
1746 if (opt
& L2CAP_LM_SECURE
)
1747 l2cap_pi(sk
)->sec_level
= BT_SECURITY_HIGH
;
1749 l2cap_pi(sk
)->role_switch
= (opt
& L2CAP_LM_MASTER
);
1750 l2cap_pi(sk
)->force_reliable
= (opt
& L2CAP_LM_RELIABLE
);
1762 static int l2cap_sock_setsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, unsigned int optlen
)
1764 struct sock
*sk
= sock
->sk
;
1765 struct bt_security sec
;
1769 BT_DBG("sk %p", sk
);
1771 if (level
== SOL_L2CAP
)
1772 return l2cap_sock_setsockopt_old(sock
, optname
, optval
, optlen
);
1774 if (level
!= SOL_BLUETOOTH
)
1775 return -ENOPROTOOPT
;
1781 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_RAW
) {
1786 sec
.level
= BT_SECURITY_LOW
;
1788 len
= min_t(unsigned int, sizeof(sec
), optlen
);
1789 if (copy_from_user((char *) &sec
, optval
, len
)) {
1794 if (sec
.level
< BT_SECURITY_LOW
||
1795 sec
.level
> BT_SECURITY_HIGH
) {
1800 l2cap_pi(sk
)->sec_level
= sec
.level
;
1803 case BT_DEFER_SETUP
:
1804 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_LISTEN
) {
1809 if (get_user(opt
, (u32 __user
*) optval
)) {
1814 bt_sk(sk
)->defer_setup
= opt
;
1826 static int l2cap_sock_getsockopt_old(struct socket
*sock
, int optname
, char __user
*optval
, int __user
*optlen
)
1828 struct sock
*sk
= sock
->sk
;
1829 struct l2cap_options opts
;
1830 struct l2cap_conninfo cinfo
;
1834 BT_DBG("sk %p", sk
);
1836 if (get_user(len
, optlen
))
1843 opts
.imtu
= l2cap_pi(sk
)->imtu
;
1844 opts
.omtu
= l2cap_pi(sk
)->omtu
;
1845 opts
.flush_to
= l2cap_pi(sk
)->flush_to
;
1846 opts
.mode
= l2cap_pi(sk
)->mode
;
1847 opts
.fcs
= l2cap_pi(sk
)->fcs
;
1849 len
= min_t(unsigned int, len
, sizeof(opts
));
1850 if (copy_to_user(optval
, (char *) &opts
, len
))
1856 switch (l2cap_pi(sk
)->sec_level
) {
1857 case BT_SECURITY_LOW
:
1858 opt
= L2CAP_LM_AUTH
;
1860 case BT_SECURITY_MEDIUM
:
1861 opt
= L2CAP_LM_AUTH
| L2CAP_LM_ENCRYPT
;
1863 case BT_SECURITY_HIGH
:
1864 opt
= L2CAP_LM_AUTH
| L2CAP_LM_ENCRYPT
|
1872 if (l2cap_pi(sk
)->role_switch
)
1873 opt
|= L2CAP_LM_MASTER
;
1875 if (l2cap_pi(sk
)->force_reliable
)
1876 opt
|= L2CAP_LM_RELIABLE
;
1878 if (put_user(opt
, (u32 __user
*) optval
))
1882 case L2CAP_CONNINFO
:
1883 if (sk
->sk_state
!= BT_CONNECTED
&&
1884 !(sk
->sk_state
== BT_CONNECT2
&&
1885 bt_sk(sk
)->defer_setup
)) {
1890 cinfo
.hci_handle
= l2cap_pi(sk
)->conn
->hcon
->handle
;
1891 memcpy(cinfo
.dev_class
, l2cap_pi(sk
)->conn
->hcon
->dev_class
, 3);
1893 len
= min_t(unsigned int, len
, sizeof(cinfo
));
1894 if (copy_to_user(optval
, (char *) &cinfo
, len
))
1908 static int l2cap_sock_getsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, int __user
*optlen
)
1910 struct sock
*sk
= sock
->sk
;
1911 struct bt_security sec
;
1914 BT_DBG("sk %p", sk
);
1916 if (level
== SOL_L2CAP
)
1917 return l2cap_sock_getsockopt_old(sock
, optname
, optval
, optlen
);
1919 if (level
!= SOL_BLUETOOTH
)
1920 return -ENOPROTOOPT
;
1922 if (get_user(len
, optlen
))
1929 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_RAW
) {
1934 sec
.level
= l2cap_pi(sk
)->sec_level
;
1936 len
= min_t(unsigned int, len
, sizeof(sec
));
1937 if (copy_to_user(optval
, (char *) &sec
, len
))
1942 case BT_DEFER_SETUP
:
1943 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_LISTEN
) {
1948 if (put_user(bt_sk(sk
)->defer_setup
, (u32 __user
*) optval
))
1962 static int l2cap_sock_shutdown(struct socket
*sock
, int how
)
1964 struct sock
*sk
= sock
->sk
;
1967 BT_DBG("sock %p, sk %p", sock
, sk
);
1973 if (!sk
->sk_shutdown
) {
1974 sk
->sk_shutdown
= SHUTDOWN_MASK
;
1975 l2cap_sock_clear_timer(sk
);
1976 __l2cap_sock_close(sk
, 0);
1978 if (sock_flag(sk
, SOCK_LINGER
) && sk
->sk_lingertime
)
1979 err
= bt_sock_wait_state(sk
, BT_CLOSED
,
1986 static int l2cap_sock_release(struct socket
*sock
)
1988 struct sock
*sk
= sock
->sk
;
1991 BT_DBG("sock %p, sk %p", sock
, sk
);
1996 err
= l2cap_sock_shutdown(sock
, 2);
1999 l2cap_sock_kill(sk
);
2003 static void l2cap_chan_ready(struct sock
*sk
)
2005 struct sock
*parent
= bt_sk(sk
)->parent
;
2007 BT_DBG("sk %p, parent %p", sk
, parent
);
2009 l2cap_pi(sk
)->conf_state
= 0;
2010 l2cap_sock_clear_timer(sk
);
2013 /* Outgoing channel.
2014 * Wake up socket sleeping on connect.
2016 sk
->sk_state
= BT_CONNECTED
;
2017 sk
->sk_state_change(sk
);
2019 /* Incoming channel.
2020 * Wake up socket sleeping on accept.
2022 parent
->sk_data_ready(parent
, 0);
2026 /* Copy frame to all raw sockets on that connection */
2027 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2029 struct l2cap_chan_list
*l
= &conn
->chan_list
;
2030 struct sk_buff
*nskb
;
2033 BT_DBG("conn %p", conn
);
2035 read_lock(&l
->lock
);
2036 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
2037 if (sk
->sk_type
!= SOCK_RAW
)
2040 /* Don't send frame to the socket it came from */
2043 nskb
= skb_clone(skb
, GFP_ATOMIC
);
2047 if (sock_queue_rcv_skb(sk
, nskb
))
2050 read_unlock(&l
->lock
);
2053 /* ---- L2CAP signalling commands ---- */
2054 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
2055 u8 code
, u8 ident
, u16 dlen
, void *data
)
2057 struct sk_buff
*skb
, **frag
;
2058 struct l2cap_cmd_hdr
*cmd
;
2059 struct l2cap_hdr
*lh
;
2062 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2063 conn
, code
, ident
, dlen
);
2065 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2066 count
= min_t(unsigned int, conn
->mtu
, len
);
2068 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
2072 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2073 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2074 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
2076 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2079 cmd
->len
= cpu_to_le16(dlen
);
2082 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2083 memcpy(skb_put(skb
, count
), data
, count
);
2089 /* Continuation fragments (no L2CAP header) */
2090 frag
= &skb_shinfo(skb
)->frag_list
;
2092 count
= min_t(unsigned int, conn
->mtu
, len
);
2094 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
2098 memcpy(skb_put(*frag
, count
), data
, count
);
2103 frag
= &(*frag
)->next
;
2113 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
2115 struct l2cap_conf_opt
*opt
= *ptr
;
2118 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2126 *val
= *((u8
*) opt
->val
);
2130 *val
= __le16_to_cpu(*((__le16
*) opt
->val
));
2134 *val
= __le32_to_cpu(*((__le32
*) opt
->val
));
2138 *val
= (unsigned long) opt
->val
;
2142 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
2146 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2148 struct l2cap_conf_opt
*opt
= *ptr
;
2150 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
2157 *((u8
*) opt
->val
) = val
;
2161 *((__le16
*) opt
->val
) = cpu_to_le16(val
);
2165 *((__le32
*) opt
->val
) = cpu_to_le32(val
);
2169 memcpy(opt
->val
, (void *) val
, len
);
2173 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2176 static int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
2178 u32 local_feat_mask
= l2cap_feat_mask
;
2180 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
2183 case L2CAP_MODE_ERTM
:
2184 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
2185 case L2CAP_MODE_STREAMING
:
2186 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
2192 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
2195 case L2CAP_MODE_STREAMING
:
2196 case L2CAP_MODE_ERTM
:
2197 if (l2cap_mode_supported(mode
, remote_feat_mask
))
2201 return L2CAP_MODE_BASIC
;
2205 static int l2cap_build_conf_req(struct sock
*sk
, void *data
)
2207 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2208 struct l2cap_conf_req
*req
= data
;
2209 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_ERTM
};
2210 void *ptr
= req
->data
;
2212 BT_DBG("sk %p", sk
);
2214 if (pi
->num_conf_req
|| pi
->num_conf_rsp
)
2218 case L2CAP_MODE_STREAMING
:
2219 case L2CAP_MODE_ERTM
:
2220 pi
->conf_state
|= L2CAP_CONF_STATE2_DEVICE
;
2221 if (!l2cap_mode_supported(pi
->mode
, pi
->conn
->feat_mask
))
2222 l2cap_send_disconn_req(pi
->conn
, sk
);
2225 pi
->mode
= l2cap_select_mode(rfc
.mode
, pi
->conn
->feat_mask
);
2231 case L2CAP_MODE_BASIC
:
2232 if (pi
->imtu
!= L2CAP_DEFAULT_MTU
)
2233 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->imtu
);
2236 case L2CAP_MODE_ERTM
:
2237 rfc
.mode
= L2CAP_MODE_ERTM
;
2238 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
2239 rfc
.max_transmit
= L2CAP_DEFAULT_MAX_TX
;
2240 rfc
.retrans_timeout
= 0;
2241 rfc
.monitor_timeout
= 0;
2242 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
2244 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2245 sizeof(rfc
), (unsigned long) &rfc
);
2247 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2250 if (pi
->fcs
== L2CAP_FCS_NONE
||
2251 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
2252 pi
->fcs
= L2CAP_FCS_NONE
;
2253 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
2257 case L2CAP_MODE_STREAMING
:
2258 rfc
.mode
= L2CAP_MODE_STREAMING
;
2260 rfc
.max_transmit
= 0;
2261 rfc
.retrans_timeout
= 0;
2262 rfc
.monitor_timeout
= 0;
2263 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
2265 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2266 sizeof(rfc
), (unsigned long) &rfc
);
2268 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2271 if (pi
->fcs
== L2CAP_FCS_NONE
||
2272 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
2273 pi
->fcs
= L2CAP_FCS_NONE
;
2274 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
2279 /* FIXME: Need actual value of the flush timeout */
2280 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2281 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2283 req
->dcid
= cpu_to_le16(pi
->dcid
);
2284 req
->flags
= cpu_to_le16(0);
2289 static int l2cap_parse_conf_req(struct sock
*sk
, void *data
)
2291 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2292 struct l2cap_conf_rsp
*rsp
= data
;
2293 void *ptr
= rsp
->data
;
2294 void *req
= pi
->conf_req
;
2295 int len
= pi
->conf_len
;
2296 int type
, hint
, olen
;
2298 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2299 u16 mtu
= L2CAP_DEFAULT_MTU
;
2300 u16 result
= L2CAP_CONF_SUCCESS
;
2302 BT_DBG("sk %p", sk
);
2304 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2305 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
2307 hint
= type
& L2CAP_CONF_HINT
;
2308 type
&= L2CAP_CONF_MASK
;
2311 case L2CAP_CONF_MTU
:
2315 case L2CAP_CONF_FLUSH_TO
:
2319 case L2CAP_CONF_QOS
:
2322 case L2CAP_CONF_RFC
:
2323 if (olen
== sizeof(rfc
))
2324 memcpy(&rfc
, (void *) val
, olen
);
2327 case L2CAP_CONF_FCS
:
2328 if (val
== L2CAP_FCS_NONE
)
2329 pi
->conf_state
|= L2CAP_CONF_NO_FCS_RECV
;
2337 result
= L2CAP_CONF_UNKNOWN
;
2338 *((u8
*) ptr
++) = type
;
2343 if (pi
->num_conf_rsp
|| pi
->num_conf_req
)
2347 case L2CAP_MODE_STREAMING
:
2348 case L2CAP_MODE_ERTM
:
2349 pi
->conf_state
|= L2CAP_CONF_STATE2_DEVICE
;
2350 if (!l2cap_mode_supported(pi
->mode
, pi
->conn
->feat_mask
))
2351 return -ECONNREFUSED
;
2354 pi
->mode
= l2cap_select_mode(rfc
.mode
, pi
->conn
->feat_mask
);
2359 if (pi
->mode
!= rfc
.mode
) {
2360 result
= L2CAP_CONF_UNACCEPT
;
2361 rfc
.mode
= pi
->mode
;
2363 if (pi
->num_conf_rsp
== 1)
2364 return -ECONNREFUSED
;
2366 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2367 sizeof(rfc
), (unsigned long) &rfc
);
2371 if (result
== L2CAP_CONF_SUCCESS
) {
2372 /* Configure output options and let the other side know
2373 * which ones we don't like. */
2375 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
2376 result
= L2CAP_CONF_UNACCEPT
;
2379 pi
->conf_state
|= L2CAP_CONF_MTU_DONE
;
2381 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->omtu
);
2384 case L2CAP_MODE_BASIC
:
2385 pi
->fcs
= L2CAP_FCS_NONE
;
2386 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2389 case L2CAP_MODE_ERTM
:
2390 pi
->remote_tx_win
= rfc
.txwin_size
;
2391 pi
->remote_max_tx
= rfc
.max_transmit
;
2392 pi
->max_pdu_size
= rfc
.max_pdu_size
;
2394 rfc
.retrans_timeout
= L2CAP_DEFAULT_RETRANS_TO
;
2395 rfc
.monitor_timeout
= L2CAP_DEFAULT_MONITOR_TO
;
2397 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2400 case L2CAP_MODE_STREAMING
:
2401 pi
->remote_tx_win
= rfc
.txwin_size
;
2402 pi
->max_pdu_size
= rfc
.max_pdu_size
;
2404 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2408 result
= L2CAP_CONF_UNACCEPT
;
2410 memset(&rfc
, 0, sizeof(rfc
));
2411 rfc
.mode
= pi
->mode
;
2414 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2415 sizeof(rfc
), (unsigned long) &rfc
);
2417 if (result
== L2CAP_CONF_SUCCESS
)
2418 pi
->conf_state
|= L2CAP_CONF_OUTPUT_DONE
;
2420 rsp
->scid
= cpu_to_le16(pi
->dcid
);
2421 rsp
->result
= cpu_to_le16(result
);
2422 rsp
->flags
= cpu_to_le16(0x0000);
2427 static int l2cap_parse_conf_rsp(struct sock
*sk
, void *rsp
, int len
, void *data
, u16
*result
)
2429 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2430 struct l2cap_conf_req
*req
= data
;
2431 void *ptr
= req
->data
;
2434 struct l2cap_conf_rfc rfc
;
2436 BT_DBG("sk %p, rsp %p, len %d, req %p", sk
, rsp
, len
, data
);
2438 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2439 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2442 case L2CAP_CONF_MTU
:
2443 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
2444 *result
= L2CAP_CONF_UNACCEPT
;
2445 pi
->omtu
= L2CAP_DEFAULT_MIN_MTU
;
2448 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->omtu
);
2451 case L2CAP_CONF_FLUSH_TO
:
2453 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
2457 case L2CAP_CONF_RFC
:
2458 if (olen
== sizeof(rfc
))
2459 memcpy(&rfc
, (void *)val
, olen
);
2461 if ((pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
) &&
2462 rfc
.mode
!= pi
->mode
)
2463 return -ECONNREFUSED
;
2465 pi
->mode
= rfc
.mode
;
2468 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2469 sizeof(rfc
), (unsigned long) &rfc
);
2474 if (*result
== L2CAP_CONF_SUCCESS
) {
2476 case L2CAP_MODE_ERTM
:
2477 pi
->remote_tx_win
= rfc
.txwin_size
;
2478 pi
->retrans_timeout
= rfc
.retrans_timeout
;
2479 pi
->monitor_timeout
= rfc
.monitor_timeout
;
2480 pi
->max_pdu_size
= le16_to_cpu(rfc
.max_pdu_size
);
2482 case L2CAP_MODE_STREAMING
:
2483 pi
->max_pdu_size
= le16_to_cpu(rfc
.max_pdu_size
);
2488 req
->dcid
= cpu_to_le16(pi
->dcid
);
2489 req
->flags
= cpu_to_le16(0x0000);
2494 static int l2cap_build_conf_rsp(struct sock
*sk
, void *data
, u16 result
, u16 flags
)
2496 struct l2cap_conf_rsp
*rsp
= data
;
2497 void *ptr
= rsp
->data
;
2499 BT_DBG("sk %p", sk
);
2501 rsp
->scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
2502 rsp
->result
= cpu_to_le16(result
);
2503 rsp
->flags
= cpu_to_le16(flags
);
2508 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2510 struct l2cap_cmd_rej
*rej
= (struct l2cap_cmd_rej
*) data
;
2512 if (rej
->reason
!= 0x0000)
2515 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
2516 cmd
->ident
== conn
->info_ident
) {
2517 del_timer(&conn
->info_timer
);
2519 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2520 conn
->info_ident
= 0;
2522 l2cap_conn_start(conn
);
2528 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2530 struct l2cap_chan_list
*list
= &conn
->chan_list
;
2531 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
2532 struct l2cap_conn_rsp rsp
;
2533 struct sock
*sk
, *parent
;
2534 int result
, status
= L2CAP_CS_NO_INFO
;
2536 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
2537 __le16 psm
= req
->psm
;
2539 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm
, scid
);
2541 /* Check if we have socket listening on psm */
2542 parent
= l2cap_get_sock_by_psm(BT_LISTEN
, psm
, conn
->src
);
2544 result
= L2CAP_CR_BAD_PSM
;
2548 /* Check if the ACL is secure enough (if not SDP) */
2549 if (psm
!= cpu_to_le16(0x0001) &&
2550 !hci_conn_check_link_mode(conn
->hcon
)) {
2551 conn
->disc_reason
= 0x05;
2552 result
= L2CAP_CR_SEC_BLOCK
;
2556 result
= L2CAP_CR_NO_MEM
;
2558 /* Check for backlog size */
2559 if (sk_acceptq_is_full(parent
)) {
2560 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
2564 sk
= l2cap_sock_alloc(sock_net(parent
), NULL
, BTPROTO_L2CAP
, GFP_ATOMIC
);
2568 write_lock_bh(&list
->lock
);
2570 /* Check if we already have channel with that dcid */
2571 if (__l2cap_get_chan_by_dcid(list
, scid
)) {
2572 write_unlock_bh(&list
->lock
);
2573 sock_set_flag(sk
, SOCK_ZAPPED
);
2574 l2cap_sock_kill(sk
);
2578 hci_conn_hold(conn
->hcon
);
2580 l2cap_sock_init(sk
, parent
);
2581 bacpy(&bt_sk(sk
)->src
, conn
->src
);
2582 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
2583 l2cap_pi(sk
)->psm
= psm
;
2584 l2cap_pi(sk
)->dcid
= scid
;
2586 __l2cap_chan_add(conn
, sk
, parent
);
2587 dcid
= l2cap_pi(sk
)->scid
;
2589 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
2591 l2cap_pi(sk
)->ident
= cmd
->ident
;
2593 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
2594 if (l2cap_check_security(sk
)) {
2595 if (bt_sk(sk
)->defer_setup
) {
2596 sk
->sk_state
= BT_CONNECT2
;
2597 result
= L2CAP_CR_PEND
;
2598 status
= L2CAP_CS_AUTHOR_PEND
;
2599 parent
->sk_data_ready(parent
, 0);
2601 sk
->sk_state
= BT_CONFIG
;
2602 result
= L2CAP_CR_SUCCESS
;
2603 status
= L2CAP_CS_NO_INFO
;
2606 sk
->sk_state
= BT_CONNECT2
;
2607 result
= L2CAP_CR_PEND
;
2608 status
= L2CAP_CS_AUTHEN_PEND
;
2611 sk
->sk_state
= BT_CONNECT2
;
2612 result
= L2CAP_CR_PEND
;
2613 status
= L2CAP_CS_NO_INFO
;
2616 write_unlock_bh(&list
->lock
);
2619 bh_unlock_sock(parent
);
2622 rsp
.scid
= cpu_to_le16(scid
);
2623 rsp
.dcid
= cpu_to_le16(dcid
);
2624 rsp
.result
= cpu_to_le16(result
);
2625 rsp
.status
= cpu_to_le16(status
);
2626 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2628 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
2629 struct l2cap_info_req info
;
2630 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2632 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
2633 conn
->info_ident
= l2cap_get_ident(conn
);
2635 mod_timer(&conn
->info_timer
, jiffies
+
2636 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
2638 l2cap_send_cmd(conn
, conn
->info_ident
,
2639 L2CAP_INFO_REQ
, sizeof(info
), &info
);
2645 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2647 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
2648 u16 scid
, dcid
, result
, status
;
2652 scid
= __le16_to_cpu(rsp
->scid
);
2653 dcid
= __le16_to_cpu(rsp
->dcid
);
2654 result
= __le16_to_cpu(rsp
->result
);
2655 status
= __le16_to_cpu(rsp
->status
);
2657 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid
, scid
, result
, status
);
2660 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
2664 sk
= l2cap_get_chan_by_ident(&conn
->chan_list
, cmd
->ident
);
2670 case L2CAP_CR_SUCCESS
:
2671 sk
->sk_state
= BT_CONFIG
;
2672 l2cap_pi(sk
)->ident
= 0;
2673 l2cap_pi(sk
)->dcid
= dcid
;
2674 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
2676 l2cap_pi(sk
)->conf_state
&= ~L2CAP_CONF_CONNECT_PEND
;
2678 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2679 l2cap_build_conf_req(sk
, req
), req
);
2680 l2cap_pi(sk
)->num_conf_req
++;
2684 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
2688 l2cap_chan_del(sk
, ECONNREFUSED
);
2696 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
2698 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
2704 dcid
= __le16_to_cpu(req
->dcid
);
2705 flags
= __le16_to_cpu(req
->flags
);
2707 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
2709 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, dcid
);
2713 if (sk
->sk_state
== BT_DISCONN
)
2716 /* Reject if config buffer is too small. */
2717 len
= cmd_len
- sizeof(*req
);
2718 if (l2cap_pi(sk
)->conf_len
+ len
> sizeof(l2cap_pi(sk
)->conf_req
)) {
2719 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2720 l2cap_build_conf_rsp(sk
, rsp
,
2721 L2CAP_CONF_REJECT
, flags
), rsp
);
2726 memcpy(l2cap_pi(sk
)->conf_req
+ l2cap_pi(sk
)->conf_len
, req
->data
, len
);
2727 l2cap_pi(sk
)->conf_len
+= len
;
2729 if (flags
& 0x0001) {
2730 /* Incomplete config. Send empty response. */
2731 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2732 l2cap_build_conf_rsp(sk
, rsp
,
2733 L2CAP_CONF_SUCCESS
, 0x0001), rsp
);
2737 /* Complete config. */
2738 len
= l2cap_parse_conf_req(sk
, rsp
);
2740 l2cap_send_disconn_req(conn
, sk
);
2744 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
2745 l2cap_pi(sk
)->num_conf_rsp
++;
2747 /* Reset config buffer. */
2748 l2cap_pi(sk
)->conf_len
= 0;
2750 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
))
2753 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_INPUT_DONE
) {
2754 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_NO_FCS_RECV
)
2755 || l2cap_pi(sk
)->fcs
!= L2CAP_FCS_NONE
)
2756 l2cap_pi(sk
)->fcs
= L2CAP_FCS_CRC16
;
2758 sk
->sk_state
= BT_CONNECTED
;
2759 l2cap_pi(sk
)->next_tx_seq
= 0;
2760 l2cap_pi(sk
)->expected_ack_seq
= 0;
2761 l2cap_pi(sk
)->unacked_frames
= 0;
2763 setup_timer(&l2cap_pi(sk
)->retrans_timer
,
2764 l2cap_retrans_timeout
, (unsigned long) sk
);
2765 setup_timer(&l2cap_pi(sk
)->monitor_timer
,
2766 l2cap_monitor_timeout
, (unsigned long) sk
);
2768 __skb_queue_head_init(TX_QUEUE(sk
));
2769 __skb_queue_head_init(SREJ_QUEUE(sk
));
2770 l2cap_chan_ready(sk
);
2774 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
)) {
2776 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2777 l2cap_build_conf_req(sk
, buf
), buf
);
2778 l2cap_pi(sk
)->num_conf_req
++;
2786 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2788 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
2789 u16 scid
, flags
, result
;
2792 scid
= __le16_to_cpu(rsp
->scid
);
2793 flags
= __le16_to_cpu(rsp
->flags
);
2794 result
= __le16_to_cpu(rsp
->result
);
2796 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2797 scid
, flags
, result
);
2799 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
2804 case L2CAP_CONF_SUCCESS
:
2807 case L2CAP_CONF_UNACCEPT
:
2808 if (l2cap_pi(sk
)->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
2809 int len
= cmd
->len
- sizeof(*rsp
);
2812 /* throw out any old stored conf requests */
2813 result
= L2CAP_CONF_SUCCESS
;
2814 len
= l2cap_parse_conf_rsp(sk
, rsp
->data
,
2817 l2cap_send_disconn_req(conn
, sk
);
2821 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
2822 L2CAP_CONF_REQ
, len
, req
);
2823 l2cap_pi(sk
)->num_conf_req
++;
2824 if (result
!= L2CAP_CONF_SUCCESS
)
2830 sk
->sk_state
= BT_DISCONN
;
2831 sk
->sk_err
= ECONNRESET
;
2832 l2cap_sock_set_timer(sk
, HZ
* 5);
2833 l2cap_send_disconn_req(conn
, sk
);
2840 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_INPUT_DONE
;
2842 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
) {
2843 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_NO_FCS_RECV
)
2844 || l2cap_pi(sk
)->fcs
!= L2CAP_FCS_NONE
)
2845 l2cap_pi(sk
)->fcs
= L2CAP_FCS_CRC16
;
2847 sk
->sk_state
= BT_CONNECTED
;
2848 l2cap_pi(sk
)->expected_tx_seq
= 0;
2849 l2cap_pi(sk
)->buffer_seq
= 0;
2850 l2cap_pi(sk
)->num_to_ack
= 0;
2851 __skb_queue_head_init(TX_QUEUE(sk
));
2852 __skb_queue_head_init(SREJ_QUEUE(sk
));
2853 l2cap_chan_ready(sk
);
2861 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2863 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
2864 struct l2cap_disconn_rsp rsp
;
2868 scid
= __le16_to_cpu(req
->scid
);
2869 dcid
= __le16_to_cpu(req
->dcid
);
2871 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
2873 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, dcid
);
2877 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
2878 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
2879 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
2881 sk
->sk_shutdown
= SHUTDOWN_MASK
;
2883 skb_queue_purge(TX_QUEUE(sk
));
2884 skb_queue_purge(SREJ_QUEUE(sk
));
2885 del_timer(&l2cap_pi(sk
)->retrans_timer
);
2886 del_timer(&l2cap_pi(sk
)->monitor_timer
);
2888 l2cap_chan_del(sk
, ECONNRESET
);
2891 l2cap_sock_kill(sk
);
2895 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2897 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
2901 scid
= __le16_to_cpu(rsp
->scid
);
2902 dcid
= __le16_to_cpu(rsp
->dcid
);
2904 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
2906 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
2910 skb_queue_purge(TX_QUEUE(sk
));
2911 skb_queue_purge(SREJ_QUEUE(sk
));
2912 del_timer(&l2cap_pi(sk
)->retrans_timer
);
2913 del_timer(&l2cap_pi(sk
)->monitor_timer
);
2915 l2cap_chan_del(sk
, 0);
2918 l2cap_sock_kill(sk
);
2922 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2924 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
2927 type
= __le16_to_cpu(req
->type
);
2929 BT_DBG("type 0x%4.4x", type
);
2931 if (type
== L2CAP_IT_FEAT_MASK
) {
2933 u32 feat_mask
= l2cap_feat_mask
;
2934 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
2935 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2936 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
2938 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
2940 put_unaligned_le32(feat_mask
, rsp
->data
);
2941 l2cap_send_cmd(conn
, cmd
->ident
,
2942 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
2943 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
2945 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
2946 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
2947 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
2948 memcpy(buf
+ 4, l2cap_fixed_chan
, 8);
2949 l2cap_send_cmd(conn
, cmd
->ident
,
2950 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
2952 struct l2cap_info_rsp rsp
;
2953 rsp
.type
= cpu_to_le16(type
);
2954 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
2955 l2cap_send_cmd(conn
, cmd
->ident
,
2956 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
2962 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2964 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
2967 type
= __le16_to_cpu(rsp
->type
);
2968 result
= __le16_to_cpu(rsp
->result
);
2970 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
2972 del_timer(&conn
->info_timer
);
2974 if (type
== L2CAP_IT_FEAT_MASK
) {
2975 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
2977 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
2978 struct l2cap_info_req req
;
2979 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
2981 conn
->info_ident
= l2cap_get_ident(conn
);
2983 l2cap_send_cmd(conn
, conn
->info_ident
,
2984 L2CAP_INFO_REQ
, sizeof(req
), &req
);
2986 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2987 conn
->info_ident
= 0;
2989 l2cap_conn_start(conn
);
2991 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
2992 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2993 conn
->info_ident
= 0;
2995 l2cap_conn_start(conn
);
3001 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
3003 u8
*data
= skb
->data
;
3005 struct l2cap_cmd_hdr cmd
;
3008 l2cap_raw_recv(conn
, skb
);
3010 while (len
>= L2CAP_CMD_HDR_SIZE
) {
3012 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
3013 data
+= L2CAP_CMD_HDR_SIZE
;
3014 len
-= L2CAP_CMD_HDR_SIZE
;
3016 cmd_len
= le16_to_cpu(cmd
.len
);
3018 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
3020 if (cmd_len
> len
|| !cmd
.ident
) {
3021 BT_DBG("corrupted command");
3026 case L2CAP_COMMAND_REJ
:
3027 l2cap_command_rej(conn
, &cmd
, data
);
3030 case L2CAP_CONN_REQ
:
3031 err
= l2cap_connect_req(conn
, &cmd
, data
);
3034 case L2CAP_CONN_RSP
:
3035 err
= l2cap_connect_rsp(conn
, &cmd
, data
);
3038 case L2CAP_CONF_REQ
:
3039 err
= l2cap_config_req(conn
, &cmd
, cmd_len
, data
);
3042 case L2CAP_CONF_RSP
:
3043 err
= l2cap_config_rsp(conn
, &cmd
, data
);
3046 case L2CAP_DISCONN_REQ
:
3047 err
= l2cap_disconnect_req(conn
, &cmd
, data
);
3050 case L2CAP_DISCONN_RSP
:
3051 err
= l2cap_disconnect_rsp(conn
, &cmd
, data
);
3054 case L2CAP_ECHO_REQ
:
3055 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
3058 case L2CAP_ECHO_RSP
:
3061 case L2CAP_INFO_REQ
:
3062 err
= l2cap_information_req(conn
, &cmd
, data
);
3065 case L2CAP_INFO_RSP
:
3066 err
= l2cap_information_rsp(conn
, &cmd
, data
);
3070 BT_ERR("Unknown signaling command 0x%2.2x", cmd
.code
);
3076 struct l2cap_cmd_rej rej
;
3077 BT_DBG("error %d", err
);
3079 /* FIXME: Map err to a valid reason */
3080 rej
.reason
= cpu_to_le16(0);
3081 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
3091 static int l2cap_check_fcs(struct l2cap_pinfo
*pi
, struct sk_buff
*skb
)
3093 u16 our_fcs
, rcv_fcs
;
3094 int hdr_size
= L2CAP_HDR_SIZE
+ 2;
3096 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
3097 skb_trim(skb
, skb
->len
- 2);
3098 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
3099 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
3101 if (our_fcs
!= rcv_fcs
)
3107 static void l2cap_add_to_srej_queue(struct sock
*sk
, struct sk_buff
*skb
, u8 tx_seq
, u8 sar
)
3109 struct sk_buff
*next_skb
;
3111 bt_cb(skb
)->tx_seq
= tx_seq
;
3112 bt_cb(skb
)->sar
= sar
;
3114 next_skb
= skb_peek(SREJ_QUEUE(sk
));
3116 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3121 if (bt_cb(next_skb
)->tx_seq
> tx_seq
) {
3122 __skb_queue_before(SREJ_QUEUE(sk
), next_skb
, skb
);
3126 if (skb_queue_is_last(SREJ_QUEUE(sk
), next_skb
))
3129 } while((next_skb
= skb_queue_next(SREJ_QUEUE(sk
), next_skb
)));
3131 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3134 static int l2cap_sar_reassembly_sdu(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3136 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3137 struct sk_buff
*_skb
;
3140 switch (control
& L2CAP_CTRL_SAR
) {
3141 case L2CAP_SDU_UNSEGMENTED
:
3142 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3147 err
= sock_queue_rcv_skb(sk
, skb
);
3153 case L2CAP_SDU_START
:
3154 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3159 pi
->sdu_len
= get_unaligned_le16(skb
->data
);
3162 pi
->sdu
= bt_skb_alloc(pi
->sdu_len
, GFP_ATOMIC
);
3168 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3170 pi
->conn_state
|= L2CAP_CONN_SAR_SDU
;
3171 pi
->partial_sdu_len
= skb
->len
;
3175 case L2CAP_SDU_CONTINUE
:
3176 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3179 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3181 pi
->partial_sdu_len
+= skb
->len
;
3182 if (pi
->partial_sdu_len
> pi
->sdu_len
)
3190 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3193 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3195 pi
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
3196 pi
->partial_sdu_len
+= skb
->len
;
3198 if (pi
->partial_sdu_len
== pi
->sdu_len
) {
3199 _skb
= skb_clone(pi
->sdu
, GFP_ATOMIC
);
3200 err
= sock_queue_rcv_skb(sk
, _skb
);
3214 static void l2cap_check_srej_gap(struct sock
*sk
, u8 tx_seq
)
3216 struct sk_buff
*skb
;
3219 while((skb
= skb_peek(SREJ_QUEUE(sk
)))) {
3220 if (bt_cb(skb
)->tx_seq
!= tx_seq
)
3223 skb
= skb_dequeue(SREJ_QUEUE(sk
));
3224 control
|= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
3225 l2cap_sar_reassembly_sdu(sk
, skb
, control
);
3226 l2cap_pi(sk
)->buffer_seq_srej
=
3227 (l2cap_pi(sk
)->buffer_seq_srej
+ 1) % 64;
3232 static void l2cap_resend_srejframe(struct sock
*sk
, u8 tx_seq
)
3234 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3235 struct srej_list
*l
, *tmp
;
3238 list_for_each_entry_safe(l
,tmp
, SREJ_LIST(sk
), list
) {
3239 if (l
->tx_seq
== tx_seq
) {
3244 control
= L2CAP_SUPER_SELECT_REJECT
;
3245 control
|= l
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3246 l2cap_send_sframe(pi
, control
);
3248 list_add_tail(&l
->list
, SREJ_LIST(sk
));
3252 static void l2cap_send_srejframe(struct sock
*sk
, u8 tx_seq
)
3254 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3255 struct srej_list
*new;
3258 while (tx_seq
!= pi
->expected_tx_seq
) {
3259 control
= L2CAP_SUPER_SELECT_REJECT
;
3260 control
|= pi
->expected_tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3261 if (pi
->conn_state
& L2CAP_CONN_SEND_PBIT
) {
3262 control
|= L2CAP_CTRL_POLL
;
3263 pi
->conn_state
&= ~L2CAP_CONN_SEND_PBIT
;
3265 l2cap_send_sframe(pi
, control
);
3267 new = kzalloc(sizeof(struct srej_list
), GFP_ATOMIC
);
3268 new->tx_seq
= pi
->expected_tx_seq
++;
3269 list_add_tail(&new->list
, SREJ_LIST(sk
));
3271 pi
->expected_tx_seq
++;
3274 static inline int l2cap_data_channel_iframe(struct sock
*sk
, u16 rx_control
, struct sk_buff
*skb
)
3276 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3277 u8 tx_seq
= __get_txseq(rx_control
);
3279 u8 sar
= rx_control
>> L2CAP_CTRL_SAR_SHIFT
;
3282 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk
, rx_control
, skb
->len
);
3284 if (tx_seq
== pi
->expected_tx_seq
)
3287 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3288 struct srej_list
*first
;
3290 first
= list_first_entry(SREJ_LIST(sk
),
3291 struct srej_list
, list
);
3292 if (tx_seq
== first
->tx_seq
) {
3293 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3294 l2cap_check_srej_gap(sk
, tx_seq
);
3296 list_del(&first
->list
);
3299 if (list_empty(SREJ_LIST(sk
))) {
3300 pi
->buffer_seq
= pi
->buffer_seq_srej
;
3301 pi
->conn_state
&= ~L2CAP_CONN_SREJ_SENT
;
3304 struct srej_list
*l
;
3305 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3307 list_for_each_entry(l
, SREJ_LIST(sk
), list
) {
3308 if (l
->tx_seq
== tx_seq
) {
3309 l2cap_resend_srejframe(sk
, tx_seq
);
3313 l2cap_send_srejframe(sk
, tx_seq
);
3316 pi
->conn_state
|= L2CAP_CONN_SREJ_SENT
;
3318 INIT_LIST_HEAD(SREJ_LIST(sk
));
3319 pi
->buffer_seq_srej
= pi
->buffer_seq
;
3321 __skb_queue_head_init(SREJ_QUEUE(sk
));
3322 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3324 pi
->conn_state
|= L2CAP_CONN_SEND_PBIT
;
3326 l2cap_send_srejframe(sk
, tx_seq
);
3331 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3333 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3334 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3338 pi
->buffer_seq
= (pi
->buffer_seq
+ 1) % 64;
3340 err
= l2cap_sar_reassembly_sdu(sk
, skb
, rx_control
);
3344 pi
->num_to_ack
= (pi
->num_to_ack
+ 1) % L2CAP_DEFAULT_NUM_TO_ACK
;
3345 if (pi
->num_to_ack
== L2CAP_DEFAULT_NUM_TO_ACK
- 1) {
3346 tx_control
|= L2CAP_SUPER_RCV_READY
;
3347 tx_control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3348 l2cap_send_sframe(pi
, tx_control
);
3353 static inline int l2cap_data_channel_sframe(struct sock
*sk
, u16 rx_control
, struct sk_buff
*skb
)
3355 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3356 u8 tx_seq
= __get_reqseq(rx_control
);
3358 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk
, rx_control
, skb
->len
);
3360 switch (rx_control
& L2CAP_CTRL_SUPERVISE
) {
3361 case L2CAP_SUPER_RCV_READY
:
3362 if (rx_control
& L2CAP_CTRL_POLL
) {
3363 u16 control
= L2CAP_CTRL_FINAL
;
3364 control
|= L2CAP_SUPER_RCV_READY
|
3365 (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
);
3366 l2cap_send_sframe(l2cap_pi(sk
), control
);
3367 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3369 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
3370 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3371 pi
->expected_ack_seq
= tx_seq
;
3372 l2cap_drop_acked_frames(sk
);
3374 if (!(pi
->conn_state
& L2CAP_CONN_WAIT_F
))
3377 pi
->conn_state
&= ~L2CAP_CONN_WAIT_F
;
3378 del_timer(&pi
->monitor_timer
);
3380 if (pi
->unacked_frames
> 0)
3381 __mod_retrans_timer();
3383 pi
->expected_ack_seq
= tx_seq
;
3384 l2cap_drop_acked_frames(sk
);
3386 if ((pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
)
3387 && (pi
->unacked_frames
> 0))
3388 __mod_retrans_timer();
3390 l2cap_ertm_send(sk
);
3391 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3395 case L2CAP_SUPER_REJECT
:
3396 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3398 pi
->expected_ack_seq
= __get_reqseq(rx_control
);
3399 l2cap_drop_acked_frames(sk
);
3401 sk
->sk_send_head
= TX_QUEUE(sk
)->next
;
3402 pi
->next_tx_seq
= pi
->expected_ack_seq
;
3404 l2cap_ertm_send(sk
);
3408 case L2CAP_SUPER_SELECT_REJECT
:
3409 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3411 if (rx_control
& L2CAP_CTRL_POLL
) {
3412 l2cap_retransmit_frame(sk
, tx_seq
);
3413 pi
->expected_ack_seq
= tx_seq
;
3414 l2cap_drop_acked_frames(sk
);
3415 l2cap_ertm_send(sk
);
3416 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
3417 pi
->srej_save_reqseq
= tx_seq
;
3418 pi
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
3420 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
3421 if ((pi
->conn_state
& L2CAP_CONN_SREJ_ACT
) &&
3422 pi
->srej_save_reqseq
== tx_seq
)
3423 pi
->srej_save_reqseq
&= ~L2CAP_CONN_SREJ_ACT
;
3425 l2cap_retransmit_frame(sk
, tx_seq
);
3428 l2cap_retransmit_frame(sk
, tx_seq
);
3429 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
3430 pi
->srej_save_reqseq
= tx_seq
;
3431 pi
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
3436 case L2CAP_SUPER_RCV_NOT_READY
:
3437 pi
->conn_state
|= L2CAP_CONN_REMOTE_BUSY
;
3438 pi
->expected_ack_seq
= tx_seq
;
3439 l2cap_drop_acked_frames(sk
);
3441 del_timer(&l2cap_pi(sk
)->retrans_timer
);
3442 if (rx_control
& L2CAP_CTRL_POLL
) {
3443 u16 control
= L2CAP_CTRL_FINAL
;
3444 l2cap_send_rr_or_rnr(l2cap_pi(sk
), control
);
3452 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
3455 struct l2cap_pinfo
*pi
;
3460 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, cid
);
3462 BT_DBG("unknown cid 0x%4.4x", cid
);
3468 BT_DBG("sk %p, len %d", sk
, skb
->len
);
3470 if (sk
->sk_state
!= BT_CONNECTED
)
3474 case L2CAP_MODE_BASIC
:
3475 /* If socket recv buffers overflows we drop data here
3476 * which is *bad* because L2CAP has to be reliable.
3477 * But we don't have any other choice. L2CAP doesn't
3478 * provide flow control mechanism. */
3480 if (pi
->imtu
< skb
->len
)
3483 if (!sock_queue_rcv_skb(sk
, skb
))
3487 case L2CAP_MODE_ERTM
:
3488 control
= get_unaligned_le16(skb
->data
);
3492 if (__is_sar_start(control
))
3495 if (pi
->fcs
== L2CAP_FCS_CRC16
)
3499 * We can just drop the corrupted I-frame here.
3500 * Receiver will miss it and start proper recovery
3501 * procedures and ask retransmission.
3503 if (len
> L2CAP_DEFAULT_MAX_PDU_SIZE
)
3506 if (l2cap_check_fcs(pi
, skb
))
3509 if (__is_iframe(control
))
3510 err
= l2cap_data_channel_iframe(sk
, control
, skb
);
3512 err
= l2cap_data_channel_sframe(sk
, control
, skb
);
3518 case L2CAP_MODE_STREAMING
:
3519 control
= get_unaligned_le16(skb
->data
);
3523 if (__is_sar_start(control
))
3526 if (pi
->fcs
== L2CAP_FCS_CRC16
)
3529 if (len
> L2CAP_DEFAULT_MAX_PDU_SIZE
|| __is_sframe(control
))
3532 if (l2cap_check_fcs(pi
, skb
))
3535 tx_seq
= __get_txseq(control
);
3537 if (pi
->expected_tx_seq
== tx_seq
)
3538 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3540 pi
->expected_tx_seq
= tx_seq
+ 1;
3542 err
= l2cap_sar_reassembly_sdu(sk
, skb
, control
);
3547 BT_DBG("sk %p: bad mode 0x%2.2x", sk
, l2cap_pi(sk
)->mode
);
3561 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
3565 sk
= l2cap_get_sock_by_psm(0, psm
, conn
->src
);
3569 BT_DBG("sk %p, len %d", sk
, skb
->len
);
3571 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_CONNECTED
)
3574 if (l2cap_pi(sk
)->imtu
< skb
->len
)
3577 if (!sock_queue_rcv_skb(sk
, skb
))
3589 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
3591 struct l2cap_hdr
*lh
= (void *) skb
->data
;
3595 skb_pull(skb
, L2CAP_HDR_SIZE
);
3596 cid
= __le16_to_cpu(lh
->cid
);
3597 len
= __le16_to_cpu(lh
->len
);
3599 if (len
!= skb
->len
) {
3604 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
3607 case L2CAP_CID_SIGNALING
:
3608 l2cap_sig_channel(conn
, skb
);
3611 case L2CAP_CID_CONN_LESS
:
3612 psm
= get_unaligned_le16(skb
->data
);
3614 l2cap_conless_channel(conn
, psm
, skb
);
3618 l2cap_data_channel(conn
, cid
, skb
);
3623 /* ---- L2CAP interface with lower layer (HCI) ---- */
3625 static int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
3627 int exact
= 0, lm1
= 0, lm2
= 0;
3628 register struct sock
*sk
;
3629 struct hlist_node
*node
;
3631 if (type
!= ACL_LINK
)
3634 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
3636 /* Find listening sockets and check their link_mode */
3637 read_lock(&l2cap_sk_list
.lock
);
3638 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
3639 if (sk
->sk_state
!= BT_LISTEN
)
3642 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
3643 lm1
|= HCI_LM_ACCEPT
;
3644 if (l2cap_pi(sk
)->role_switch
)
3645 lm1
|= HCI_LM_MASTER
;
3647 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
3648 lm2
|= HCI_LM_ACCEPT
;
3649 if (l2cap_pi(sk
)->role_switch
)
3650 lm2
|= HCI_LM_MASTER
;
3653 read_unlock(&l2cap_sk_list
.lock
);
3655 return exact
? lm1
: lm2
;
3658 static int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
3660 struct l2cap_conn
*conn
;
3662 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
3664 if (hcon
->type
!= ACL_LINK
)
3668 conn
= l2cap_conn_add(hcon
, status
);
3670 l2cap_conn_ready(conn
);
3672 l2cap_conn_del(hcon
, bt_err(status
));
3677 static int l2cap_disconn_ind(struct hci_conn
*hcon
)
3679 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
3681 BT_DBG("hcon %p", hcon
);
3683 if (hcon
->type
!= ACL_LINK
|| !conn
)
3686 return conn
->disc_reason
;
3689 static int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
3691 BT_DBG("hcon %p reason %d", hcon
, reason
);
3693 if (hcon
->type
!= ACL_LINK
)
3696 l2cap_conn_del(hcon
, bt_err(reason
));
3701 static inline void l2cap_check_encryption(struct sock
*sk
, u8 encrypt
)
3703 if (sk
->sk_type
!= SOCK_SEQPACKET
)
3706 if (encrypt
== 0x00) {
3707 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
) {
3708 l2cap_sock_clear_timer(sk
);
3709 l2cap_sock_set_timer(sk
, HZ
* 5);
3710 } else if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
3711 __l2cap_sock_close(sk
, ECONNREFUSED
);
3713 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
)
3714 l2cap_sock_clear_timer(sk
);
3718 static int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
3720 struct l2cap_chan_list
*l
;
3721 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
3727 l
= &conn
->chan_list
;
3729 BT_DBG("conn %p", conn
);
3731 read_lock(&l
->lock
);
3733 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
3736 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_CONNECT_PEND
) {
3741 if (!status
&& (sk
->sk_state
== BT_CONNECTED
||
3742 sk
->sk_state
== BT_CONFIG
)) {
3743 l2cap_check_encryption(sk
, encrypt
);
3748 if (sk
->sk_state
== BT_CONNECT
) {
3750 struct l2cap_conn_req req
;
3751 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
3752 req
.psm
= l2cap_pi(sk
)->psm
;
3754 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
3756 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
3757 L2CAP_CONN_REQ
, sizeof(req
), &req
);
3759 l2cap_sock_clear_timer(sk
);
3760 l2cap_sock_set_timer(sk
, HZ
/ 10);
3762 } else if (sk
->sk_state
== BT_CONNECT2
) {
3763 struct l2cap_conn_rsp rsp
;
3767 sk
->sk_state
= BT_CONFIG
;
3768 result
= L2CAP_CR_SUCCESS
;
3770 sk
->sk_state
= BT_DISCONN
;
3771 l2cap_sock_set_timer(sk
, HZ
/ 10);
3772 result
= L2CAP_CR_SEC_BLOCK
;
3775 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
3776 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
3777 rsp
.result
= cpu_to_le16(result
);
3778 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
3779 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
3780 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
3786 read_unlock(&l
->lock
);
3791 static int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
3793 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
3795 if (!conn
&& !(conn
= l2cap_conn_add(hcon
, 0)))
3798 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
3800 if (flags
& ACL_START
) {
3801 struct l2cap_hdr
*hdr
;
3805 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
3806 kfree_skb(conn
->rx_skb
);
3807 conn
->rx_skb
= NULL
;
3809 l2cap_conn_unreliable(conn
, ECOMM
);
3813 BT_ERR("Frame is too short (len %d)", skb
->len
);
3814 l2cap_conn_unreliable(conn
, ECOMM
);
3818 hdr
= (struct l2cap_hdr
*) skb
->data
;
3819 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
3821 if (len
== skb
->len
) {
3822 /* Complete frame received */
3823 l2cap_recv_frame(conn
, skb
);
3827 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
3829 if (skb
->len
> len
) {
3830 BT_ERR("Frame is too long (len %d, expected len %d)",
3832 l2cap_conn_unreliable(conn
, ECOMM
);
3836 /* Allocate skb for the complete frame (with header) */
3837 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
3841 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
3843 conn
->rx_len
= len
- skb
->len
;
3845 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
3847 if (!conn
->rx_len
) {
3848 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
3849 l2cap_conn_unreliable(conn
, ECOMM
);
3853 if (skb
->len
> conn
->rx_len
) {
3854 BT_ERR("Fragment is too long (len %d, expected %d)",
3855 skb
->len
, conn
->rx_len
);
3856 kfree_skb(conn
->rx_skb
);
3857 conn
->rx_skb
= NULL
;
3859 l2cap_conn_unreliable(conn
, ECOMM
);
3863 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
3865 conn
->rx_len
-= skb
->len
;
3867 if (!conn
->rx_len
) {
3868 /* Complete frame received */
3869 l2cap_recv_frame(conn
, conn
->rx_skb
);
3870 conn
->rx_skb
= NULL
;
3879 static ssize_t
l2cap_sysfs_show(struct class *dev
, char *buf
)
3882 struct hlist_node
*node
;
3885 read_lock_bh(&l2cap_sk_list
.lock
);
3887 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
3888 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3890 str
+= sprintf(str
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
3891 batostr(&bt_sk(sk
)->src
), batostr(&bt_sk(sk
)->dst
),
3892 sk
->sk_state
, __le16_to_cpu(pi
->psm
), pi
->scid
,
3893 pi
->dcid
, pi
->imtu
, pi
->omtu
, pi
->sec_level
);
3896 read_unlock_bh(&l2cap_sk_list
.lock
);
3901 static CLASS_ATTR(l2cap
, S_IRUGO
, l2cap_sysfs_show
, NULL
);
3903 static const struct proto_ops l2cap_sock_ops
= {
3904 .family
= PF_BLUETOOTH
,
3905 .owner
= THIS_MODULE
,
3906 .release
= l2cap_sock_release
,
3907 .bind
= l2cap_sock_bind
,
3908 .connect
= l2cap_sock_connect
,
3909 .listen
= l2cap_sock_listen
,
3910 .accept
= l2cap_sock_accept
,
3911 .getname
= l2cap_sock_getname
,
3912 .sendmsg
= l2cap_sock_sendmsg
,
3913 .recvmsg
= l2cap_sock_recvmsg
,
3914 .poll
= bt_sock_poll
,
3915 .ioctl
= bt_sock_ioctl
,
3916 .mmap
= sock_no_mmap
,
3917 .socketpair
= sock_no_socketpair
,
3918 .shutdown
= l2cap_sock_shutdown
,
3919 .setsockopt
= l2cap_sock_setsockopt
,
3920 .getsockopt
= l2cap_sock_getsockopt
3923 static const struct net_proto_family l2cap_sock_family_ops
= {
3924 .family
= PF_BLUETOOTH
,
3925 .owner
= THIS_MODULE
,
3926 .create
= l2cap_sock_create
,
3929 static struct hci_proto l2cap_hci_proto
= {
3931 .id
= HCI_PROTO_L2CAP
,
3932 .connect_ind
= l2cap_connect_ind
,
3933 .connect_cfm
= l2cap_connect_cfm
,
3934 .disconn_ind
= l2cap_disconn_ind
,
3935 .disconn_cfm
= l2cap_disconn_cfm
,
3936 .security_cfm
= l2cap_security_cfm
,
3937 .recv_acldata
= l2cap_recv_acldata
3940 static int __init
l2cap_init(void)
3944 err
= proto_register(&l2cap_proto
, 0);
3948 err
= bt_sock_register(BTPROTO_L2CAP
, &l2cap_sock_family_ops
);
3950 BT_ERR("L2CAP socket registration failed");
3954 err
= hci_register_proto(&l2cap_hci_proto
);
3956 BT_ERR("L2CAP protocol registration failed");
3957 bt_sock_unregister(BTPROTO_L2CAP
);
3961 if (class_create_file(bt_class
, &class_attr_l2cap
) < 0)
3962 BT_ERR("Failed to create L2CAP info file");
3964 BT_INFO("L2CAP ver %s", VERSION
);
3965 BT_INFO("L2CAP socket layer initialized");
3970 proto_unregister(&l2cap_proto
);
3974 static void __exit
l2cap_exit(void)
3976 class_remove_file(bt_class
, &class_attr_l2cap
);
3978 if (bt_sock_unregister(BTPROTO_L2CAP
) < 0)
3979 BT_ERR("L2CAP socket unregistration failed");
3981 if (hci_unregister_proto(&l2cap_hci_proto
) < 0)
3982 BT_ERR("L2CAP protocol unregistration failed");
3984 proto_unregister(&l2cap_proto
);
3987 void l2cap_load(void)
3989 /* Dummy function to trigger automatic L2CAP module loading by
3990 * other modules that use L2CAP sockets but don't use any other
3991 * symbols from it. */
3994 EXPORT_SYMBOL(l2cap_load
);
3996 module_init(l2cap_init
);
3997 module_exit(l2cap_exit
);
3999 module_param(enable_ertm
, bool, 0644);
4000 MODULE_PARM_DESC(enable_ertm
, "Enable enhanced retransmission mode");
4002 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4003 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION
);
4004 MODULE_VERSION(VERSION
);
4005 MODULE_LICENSE("GPL");
4006 MODULE_ALIAS("bt-proto-0");