2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 #ifdef CONFIG_BT_L2CAP_EXT_FEATURES
59 static int enable_ertm
= 1;
61 static int enable_ertm
= 0;
63 static int max_transmit
= L2CAP_DEFAULT_MAX_TX
;
64 static int tx_window
= L2CAP_DEFAULT_TX_WINDOW
;
66 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
67 static u8 l2cap_fixed_chan
[8] = { 0x02, };
69 static const struct proto_ops l2cap_sock_ops
;
71 static struct workqueue_struct
*_busy_wq
;
73 static struct bt_sock_list l2cap_sk_list
= {
74 .lock
= __RW_LOCK_UNLOCKED(l2cap_sk_list
.lock
)
77 static void l2cap_busy_work(struct work_struct
*work
);
79 static void __l2cap_sock_close(struct sock
*sk
, int reason
);
80 static void l2cap_sock_close(struct sock
*sk
);
81 static void l2cap_sock_kill(struct sock
*sk
);
83 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
84 u8 code
, u8 ident
, u16 dlen
, void *data
);
86 /* ---- L2CAP timers ---- */
87 static void l2cap_sock_timeout(unsigned long arg
)
89 struct sock
*sk
= (struct sock
*) arg
;
92 BT_DBG("sock %p state %d", sk
, sk
->sk_state
);
96 if (sk
->sk_state
== BT_CONNECTED
|| sk
->sk_state
== BT_CONFIG
)
97 reason
= ECONNREFUSED
;
98 else if (sk
->sk_state
== BT_CONNECT
&&
99 l2cap_pi(sk
)->sec_level
!= BT_SECURITY_SDP
)
100 reason
= ECONNREFUSED
;
104 __l2cap_sock_close(sk
, reason
);
112 static void l2cap_sock_set_timer(struct sock
*sk
, long timeout
)
114 BT_DBG("sk %p state %d timeout %ld", sk
, sk
->sk_state
, timeout
);
115 sk_reset_timer(sk
, &sk
->sk_timer
, jiffies
+ timeout
);
118 static void l2cap_sock_clear_timer(struct sock
*sk
)
120 BT_DBG("sock %p state %d", sk
, sk
->sk_state
);
121 sk_stop_timer(sk
, &sk
->sk_timer
);
124 /* ---- L2CAP channels ---- */
125 static struct sock
*__l2cap_get_chan_by_dcid(struct l2cap_chan_list
*l
, u16 cid
)
128 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
129 if (l2cap_pi(s
)->dcid
== cid
)
135 static struct sock
*__l2cap_get_chan_by_scid(struct l2cap_chan_list
*l
, u16 cid
)
138 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
139 if (l2cap_pi(s
)->scid
== cid
)
145 /* Find channel with given SCID.
146 * Returns locked socket */
147 static inline struct sock
*l2cap_get_chan_by_scid(struct l2cap_chan_list
*l
, u16 cid
)
151 s
= __l2cap_get_chan_by_scid(l
, cid
);
154 read_unlock(&l
->lock
);
158 static struct sock
*__l2cap_get_chan_by_ident(struct l2cap_chan_list
*l
, u8 ident
)
161 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
162 if (l2cap_pi(s
)->ident
== ident
)
168 static inline struct sock
*l2cap_get_chan_by_ident(struct l2cap_chan_list
*l
, u8 ident
)
172 s
= __l2cap_get_chan_by_ident(l
, ident
);
175 read_unlock(&l
->lock
);
179 static u16
l2cap_alloc_cid(struct l2cap_chan_list
*l
)
181 u16 cid
= L2CAP_CID_DYN_START
;
183 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
184 if (!__l2cap_get_chan_by_scid(l
, cid
))
191 static inline void __l2cap_chan_link(struct l2cap_chan_list
*l
, struct sock
*sk
)
196 l2cap_pi(l
->head
)->prev_c
= sk
;
198 l2cap_pi(sk
)->next_c
= l
->head
;
199 l2cap_pi(sk
)->prev_c
= NULL
;
203 static inline void l2cap_chan_unlink(struct l2cap_chan_list
*l
, struct sock
*sk
)
205 struct sock
*next
= l2cap_pi(sk
)->next_c
, *prev
= l2cap_pi(sk
)->prev_c
;
207 write_lock_bh(&l
->lock
);
212 l2cap_pi(next
)->prev_c
= prev
;
214 l2cap_pi(prev
)->next_c
= next
;
215 write_unlock_bh(&l
->lock
);
220 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct sock
*sk
, struct sock
*parent
)
222 struct l2cap_chan_list
*l
= &conn
->chan_list
;
224 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
225 l2cap_pi(sk
)->psm
, l2cap_pi(sk
)->dcid
);
227 conn
->disc_reason
= 0x13;
229 l2cap_pi(sk
)->conn
= conn
;
231 if (sk
->sk_type
== SOCK_SEQPACKET
|| sk
->sk_type
== SOCK_STREAM
) {
232 /* Alloc CID for connection-oriented socket */
233 l2cap_pi(sk
)->scid
= l2cap_alloc_cid(l
);
234 } else if (sk
->sk_type
== SOCK_DGRAM
) {
235 /* Connectionless socket */
236 l2cap_pi(sk
)->scid
= L2CAP_CID_CONN_LESS
;
237 l2cap_pi(sk
)->dcid
= L2CAP_CID_CONN_LESS
;
238 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
240 /* Raw socket can send/recv signalling messages only */
241 l2cap_pi(sk
)->scid
= L2CAP_CID_SIGNALING
;
242 l2cap_pi(sk
)->dcid
= L2CAP_CID_SIGNALING
;
243 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
246 __l2cap_chan_link(l
, sk
);
249 bt_accept_enqueue(parent
, sk
);
253 * Must be called on the locked socket. */
254 static void l2cap_chan_del(struct sock
*sk
, int err
)
256 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
257 struct sock
*parent
= bt_sk(sk
)->parent
;
259 l2cap_sock_clear_timer(sk
);
261 BT_DBG("sk %p, conn %p, err %d", sk
, conn
, err
);
264 /* Unlink from channel list */
265 l2cap_chan_unlink(&conn
->chan_list
, sk
);
266 l2cap_pi(sk
)->conn
= NULL
;
267 hci_conn_put(conn
->hcon
);
270 sk
->sk_state
= BT_CLOSED
;
271 sock_set_flag(sk
, SOCK_ZAPPED
);
277 bt_accept_unlink(sk
);
278 parent
->sk_data_ready(parent
, 0);
280 sk
->sk_state_change(sk
);
283 /* Service level security */
284 static inline int l2cap_check_security(struct sock
*sk
)
286 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
289 if (l2cap_pi(sk
)->psm
== cpu_to_le16(0x0001)) {
290 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
291 auth_type
= HCI_AT_NO_BONDING_MITM
;
293 auth_type
= HCI_AT_NO_BONDING
;
295 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_LOW
)
296 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
298 switch (l2cap_pi(sk
)->sec_level
) {
299 case BT_SECURITY_HIGH
:
300 auth_type
= HCI_AT_GENERAL_BONDING_MITM
;
302 case BT_SECURITY_MEDIUM
:
303 auth_type
= HCI_AT_GENERAL_BONDING
;
306 auth_type
= HCI_AT_NO_BONDING
;
311 return hci_conn_security(conn
->hcon
, l2cap_pi(sk
)->sec_level
,
315 static inline u8
l2cap_get_ident(struct l2cap_conn
*conn
)
319 /* Get next available identificator.
320 * 1 - 128 are used by kernel.
321 * 129 - 199 are reserved.
322 * 200 - 254 are used by utilities like l2ping, etc.
325 spin_lock_bh(&conn
->lock
);
327 if (++conn
->tx_ident
> 128)
332 spin_unlock_bh(&conn
->lock
);
337 static inline void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
339 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
341 BT_DBG("code 0x%2.2x", code
);
346 hci_send_acl(conn
->hcon
, skb
, 0);
349 static inline void l2cap_send_sframe(struct l2cap_pinfo
*pi
, u16 control
)
352 struct l2cap_hdr
*lh
;
353 struct l2cap_conn
*conn
= pi
->conn
;
354 int count
, hlen
= L2CAP_HDR_SIZE
+ 2;
356 if (pi
->fcs
== L2CAP_FCS_CRC16
)
359 BT_DBG("pi %p, control 0x%2.2x", pi
, control
);
361 count
= min_t(unsigned int, conn
->mtu
, hlen
);
362 control
|= L2CAP_CTRL_FRAME_TYPE
;
364 if (pi
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
365 control
|= L2CAP_CTRL_FINAL
;
366 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
369 if (pi
->conn_state
& L2CAP_CONN_SEND_PBIT
) {
370 control
|= L2CAP_CTRL_POLL
;
371 pi
->conn_state
&= ~L2CAP_CONN_SEND_PBIT
;
374 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
378 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
379 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
380 lh
->cid
= cpu_to_le16(pi
->dcid
);
381 put_unaligned_le16(control
, skb_put(skb
, 2));
383 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
384 u16 fcs
= crc16(0, (u8
*)lh
, count
- 2);
385 put_unaligned_le16(fcs
, skb_put(skb
, 2));
388 hci_send_acl(pi
->conn
->hcon
, skb
, 0);
391 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo
*pi
, u16 control
)
393 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
394 control
|= L2CAP_SUPER_RCV_NOT_READY
;
395 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
397 control
|= L2CAP_SUPER_RCV_READY
;
399 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
401 l2cap_send_sframe(pi
, control
);
404 static inline int __l2cap_no_conn_pending(struct sock
*sk
)
406 return !(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_CONNECT_PEND
);
409 static void l2cap_do_start(struct sock
*sk
)
411 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
413 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
414 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
417 if (l2cap_check_security(sk
) && __l2cap_no_conn_pending(sk
)) {
418 struct l2cap_conn_req req
;
419 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
420 req
.psm
= l2cap_pi(sk
)->psm
;
422 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
423 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
425 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
426 L2CAP_CONN_REQ
, sizeof(req
), &req
);
429 struct l2cap_info_req req
;
430 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
432 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
433 conn
->info_ident
= l2cap_get_ident(conn
);
435 mod_timer(&conn
->info_timer
, jiffies
+
436 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
438 l2cap_send_cmd(conn
, conn
->info_ident
,
439 L2CAP_INFO_REQ
, sizeof(req
), &req
);
443 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct sock
*sk
)
445 struct l2cap_disconn_req req
;
447 req
.dcid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
448 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
449 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
450 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
453 /* ---- L2CAP connections ---- */
454 static void l2cap_conn_start(struct l2cap_conn
*conn
)
456 struct l2cap_chan_list
*l
= &conn
->chan_list
;
459 BT_DBG("conn %p", conn
);
463 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
466 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
467 sk
->sk_type
!= SOCK_STREAM
) {
472 if (sk
->sk_state
== BT_CONNECT
) {
473 if (l2cap_check_security(sk
) &&
474 __l2cap_no_conn_pending(sk
)) {
475 struct l2cap_conn_req req
;
476 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
477 req
.psm
= l2cap_pi(sk
)->psm
;
479 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
480 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
482 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
483 L2CAP_CONN_REQ
, sizeof(req
), &req
);
485 } else if (sk
->sk_state
== BT_CONNECT2
) {
486 struct l2cap_conn_rsp rsp
;
487 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
488 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
490 if (l2cap_check_security(sk
)) {
491 if (bt_sk(sk
)->defer_setup
) {
492 struct sock
*parent
= bt_sk(sk
)->parent
;
493 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
494 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
495 parent
->sk_data_ready(parent
, 0);
498 sk
->sk_state
= BT_CONFIG
;
499 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
500 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
503 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
504 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
507 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
508 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
514 read_unlock(&l
->lock
);
517 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
519 struct l2cap_chan_list
*l
= &conn
->chan_list
;
522 BT_DBG("conn %p", conn
);
526 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
529 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
530 sk
->sk_type
!= SOCK_STREAM
) {
531 l2cap_sock_clear_timer(sk
);
532 sk
->sk_state
= BT_CONNECTED
;
533 sk
->sk_state_change(sk
);
534 } else if (sk
->sk_state
== BT_CONNECT
)
540 read_unlock(&l
->lock
);
543 /* Notify sockets that we cannot guaranty reliability anymore */
544 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
546 struct l2cap_chan_list
*l
= &conn
->chan_list
;
549 BT_DBG("conn %p", conn
);
553 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
554 if (l2cap_pi(sk
)->force_reliable
)
558 read_unlock(&l
->lock
);
561 static void l2cap_info_timeout(unsigned long arg
)
563 struct l2cap_conn
*conn
= (void *) arg
;
565 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
566 conn
->info_ident
= 0;
568 l2cap_conn_start(conn
);
571 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
573 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
578 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
582 hcon
->l2cap_data
= conn
;
585 BT_DBG("hcon %p conn %p", hcon
, conn
);
587 conn
->mtu
= hcon
->hdev
->acl_mtu
;
588 conn
->src
= &hcon
->hdev
->bdaddr
;
589 conn
->dst
= &hcon
->dst
;
593 spin_lock_init(&conn
->lock
);
594 rwlock_init(&conn
->chan_list
.lock
);
596 setup_timer(&conn
->info_timer
, l2cap_info_timeout
,
597 (unsigned long) conn
);
599 conn
->disc_reason
= 0x13;
604 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
606 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
612 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
614 kfree_skb(conn
->rx_skb
);
617 while ((sk
= conn
->chan_list
.head
)) {
619 l2cap_chan_del(sk
, err
);
624 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
625 del_timer_sync(&conn
->info_timer
);
627 hcon
->l2cap_data
= NULL
;
631 static inline void l2cap_chan_add(struct l2cap_conn
*conn
, struct sock
*sk
, struct sock
*parent
)
633 struct l2cap_chan_list
*l
= &conn
->chan_list
;
634 write_lock_bh(&l
->lock
);
635 __l2cap_chan_add(conn
, sk
, parent
);
636 write_unlock_bh(&l
->lock
);
639 /* ---- Socket interface ---- */
640 static struct sock
*__l2cap_get_sock_by_addr(__le16 psm
, bdaddr_t
*src
)
643 struct hlist_node
*node
;
644 sk_for_each(sk
, node
, &l2cap_sk_list
.head
)
645 if (l2cap_pi(sk
)->sport
== psm
&& !bacmp(&bt_sk(sk
)->src
, src
))
652 /* Find socket with psm and source bdaddr.
653 * Returns closest match.
655 static struct sock
*__l2cap_get_sock_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
657 struct sock
*sk
= NULL
, *sk1
= NULL
;
658 struct hlist_node
*node
;
660 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
661 if (state
&& sk
->sk_state
!= state
)
664 if (l2cap_pi(sk
)->psm
== psm
) {
666 if (!bacmp(&bt_sk(sk
)->src
, src
))
670 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
674 return node
? sk
: sk1
;
677 /* Find socket with given address (psm, src).
678 * Returns locked socket */
679 static inline struct sock
*l2cap_get_sock_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
682 read_lock(&l2cap_sk_list
.lock
);
683 s
= __l2cap_get_sock_by_psm(state
, psm
, src
);
686 read_unlock(&l2cap_sk_list
.lock
);
690 static void l2cap_sock_destruct(struct sock
*sk
)
694 skb_queue_purge(&sk
->sk_receive_queue
);
695 skb_queue_purge(&sk
->sk_write_queue
);
698 static void l2cap_sock_cleanup_listen(struct sock
*parent
)
702 BT_DBG("parent %p", parent
);
704 /* Close not yet accepted channels */
705 while ((sk
= bt_accept_dequeue(parent
, NULL
)))
706 l2cap_sock_close(sk
);
708 parent
->sk_state
= BT_CLOSED
;
709 sock_set_flag(parent
, SOCK_ZAPPED
);
712 /* Kill socket (only if zapped and orphan)
713 * Must be called on unlocked socket.
715 static void l2cap_sock_kill(struct sock
*sk
)
717 if (!sock_flag(sk
, SOCK_ZAPPED
) || sk
->sk_socket
)
720 BT_DBG("sk %p state %d", sk
, sk
->sk_state
);
722 /* Kill poor orphan */
723 bt_sock_unlink(&l2cap_sk_list
, sk
);
724 sock_set_flag(sk
, SOCK_DEAD
);
728 static void __l2cap_sock_close(struct sock
*sk
, int reason
)
730 BT_DBG("sk %p state %d socket %p", sk
, sk
->sk_state
, sk
->sk_socket
);
732 switch (sk
->sk_state
) {
734 l2cap_sock_cleanup_listen(sk
);
739 if (sk
->sk_type
== SOCK_SEQPACKET
||
740 sk
->sk_type
== SOCK_STREAM
) {
741 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
743 sk
->sk_state
= BT_DISCONN
;
744 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
745 l2cap_send_disconn_req(conn
, sk
);
747 l2cap_chan_del(sk
, reason
);
751 if (sk
->sk_type
== SOCK_SEQPACKET
||
752 sk
->sk_type
== SOCK_STREAM
) {
753 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
754 struct l2cap_conn_rsp rsp
;
757 if (bt_sk(sk
)->defer_setup
)
758 result
= L2CAP_CR_SEC_BLOCK
;
760 result
= L2CAP_CR_BAD_PSM
;
762 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
763 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
764 rsp
.result
= cpu_to_le16(result
);
765 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
766 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
767 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
769 l2cap_chan_del(sk
, reason
);
774 l2cap_chan_del(sk
, reason
);
778 sock_set_flag(sk
, SOCK_ZAPPED
);
783 /* Must be called on unlocked socket. */
784 static void l2cap_sock_close(struct sock
*sk
)
786 l2cap_sock_clear_timer(sk
);
788 __l2cap_sock_close(sk
, ECONNRESET
);
793 static void l2cap_sock_init(struct sock
*sk
, struct sock
*parent
)
795 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
800 sk
->sk_type
= parent
->sk_type
;
801 bt_sk(sk
)->defer_setup
= bt_sk(parent
)->defer_setup
;
803 pi
->imtu
= l2cap_pi(parent
)->imtu
;
804 pi
->omtu
= l2cap_pi(parent
)->omtu
;
805 pi
->mode
= l2cap_pi(parent
)->mode
;
806 pi
->fcs
= l2cap_pi(parent
)->fcs
;
807 pi
->max_tx
= l2cap_pi(parent
)->max_tx
;
808 pi
->tx_win
= l2cap_pi(parent
)->tx_win
;
809 pi
->sec_level
= l2cap_pi(parent
)->sec_level
;
810 pi
->role_switch
= l2cap_pi(parent
)->role_switch
;
811 pi
->force_reliable
= l2cap_pi(parent
)->force_reliable
;
813 pi
->imtu
= L2CAP_DEFAULT_MTU
;
815 if (enable_ertm
&& sk
->sk_type
== SOCK_STREAM
)
816 pi
->mode
= L2CAP_MODE_ERTM
;
818 pi
->mode
= L2CAP_MODE_BASIC
;
819 pi
->max_tx
= max_transmit
;
820 pi
->fcs
= L2CAP_FCS_CRC16
;
821 pi
->tx_win
= tx_window
;
822 pi
->sec_level
= BT_SECURITY_LOW
;
824 pi
->force_reliable
= 0;
827 /* Default config options */
829 pi
->flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
830 skb_queue_head_init(TX_QUEUE(sk
));
831 skb_queue_head_init(SREJ_QUEUE(sk
));
832 skb_queue_head_init(BUSY_QUEUE(sk
));
833 INIT_LIST_HEAD(SREJ_LIST(sk
));
836 static struct proto l2cap_proto
= {
838 .owner
= THIS_MODULE
,
839 .obj_size
= sizeof(struct l2cap_pinfo
)
842 static struct sock
*l2cap_sock_alloc(struct net
*net
, struct socket
*sock
, int proto
, gfp_t prio
)
846 sk
= sk_alloc(net
, PF_BLUETOOTH
, prio
, &l2cap_proto
);
850 sock_init_data(sock
, sk
);
851 INIT_LIST_HEAD(&bt_sk(sk
)->accept_q
);
853 sk
->sk_destruct
= l2cap_sock_destruct
;
854 sk
->sk_sndtimeo
= msecs_to_jiffies(L2CAP_CONN_TIMEOUT
);
856 sock_reset_flag(sk
, SOCK_ZAPPED
);
858 sk
->sk_protocol
= proto
;
859 sk
->sk_state
= BT_OPEN
;
861 setup_timer(&sk
->sk_timer
, l2cap_sock_timeout
, (unsigned long) sk
);
863 bt_sock_link(&l2cap_sk_list
, sk
);
867 static int l2cap_sock_create(struct net
*net
, struct socket
*sock
, int protocol
,
872 BT_DBG("sock %p", sock
);
874 sock
->state
= SS_UNCONNECTED
;
876 if (sock
->type
!= SOCK_SEQPACKET
&& sock
->type
!= SOCK_STREAM
&&
877 sock
->type
!= SOCK_DGRAM
&& sock
->type
!= SOCK_RAW
)
878 return -ESOCKTNOSUPPORT
;
880 if (sock
->type
== SOCK_RAW
&& !kern
&& !capable(CAP_NET_RAW
))
883 sock
->ops
= &l2cap_sock_ops
;
885 sk
= l2cap_sock_alloc(net
, sock
, protocol
, GFP_ATOMIC
);
889 l2cap_sock_init(sk
, NULL
);
893 static int l2cap_sock_bind(struct socket
*sock
, struct sockaddr
*addr
, int alen
)
895 struct sock
*sk
= sock
->sk
;
896 struct sockaddr_l2 la
;
901 if (!addr
|| addr
->sa_family
!= AF_BLUETOOTH
)
904 memset(&la
, 0, sizeof(la
));
905 len
= min_t(unsigned int, sizeof(la
), alen
);
906 memcpy(&la
, addr
, len
);
913 if (sk
->sk_state
!= BT_OPEN
) {
918 if (la
.l2_psm
&& __le16_to_cpu(la
.l2_psm
) < 0x1001 &&
919 !capable(CAP_NET_BIND_SERVICE
)) {
924 write_lock_bh(&l2cap_sk_list
.lock
);
926 if (la
.l2_psm
&& __l2cap_get_sock_by_addr(la
.l2_psm
, &la
.l2_bdaddr
)) {
929 /* Save source address */
930 bacpy(&bt_sk(sk
)->src
, &la
.l2_bdaddr
);
931 l2cap_pi(sk
)->psm
= la
.l2_psm
;
932 l2cap_pi(sk
)->sport
= la
.l2_psm
;
933 sk
->sk_state
= BT_BOUND
;
935 if (__le16_to_cpu(la
.l2_psm
) == 0x0001 ||
936 __le16_to_cpu(la
.l2_psm
) == 0x0003)
937 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
940 write_unlock_bh(&l2cap_sk_list
.lock
);
947 static int l2cap_do_connect(struct sock
*sk
)
949 bdaddr_t
*src
= &bt_sk(sk
)->src
;
950 bdaddr_t
*dst
= &bt_sk(sk
)->dst
;
951 struct l2cap_conn
*conn
;
952 struct hci_conn
*hcon
;
953 struct hci_dev
*hdev
;
957 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src
), batostr(dst
),
960 hdev
= hci_get_route(dst
, src
);
962 return -EHOSTUNREACH
;
964 hci_dev_lock_bh(hdev
);
968 if (sk
->sk_type
== SOCK_RAW
) {
969 switch (l2cap_pi(sk
)->sec_level
) {
970 case BT_SECURITY_HIGH
:
971 auth_type
= HCI_AT_DEDICATED_BONDING_MITM
;
973 case BT_SECURITY_MEDIUM
:
974 auth_type
= HCI_AT_DEDICATED_BONDING
;
977 auth_type
= HCI_AT_NO_BONDING
;
980 } else if (l2cap_pi(sk
)->psm
== cpu_to_le16(0x0001)) {
981 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
982 auth_type
= HCI_AT_NO_BONDING_MITM
;
984 auth_type
= HCI_AT_NO_BONDING
;
986 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_LOW
)
987 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
989 switch (l2cap_pi(sk
)->sec_level
) {
990 case BT_SECURITY_HIGH
:
991 auth_type
= HCI_AT_GENERAL_BONDING_MITM
;
993 case BT_SECURITY_MEDIUM
:
994 auth_type
= HCI_AT_GENERAL_BONDING
;
997 auth_type
= HCI_AT_NO_BONDING
;
1002 hcon
= hci_connect(hdev
, ACL_LINK
, dst
,
1003 l2cap_pi(sk
)->sec_level
, auth_type
);
1007 conn
= l2cap_conn_add(hcon
, 0);
1015 /* Update source addr of the socket */
1016 bacpy(src
, conn
->src
);
1018 l2cap_chan_add(conn
, sk
, NULL
);
1020 sk
->sk_state
= BT_CONNECT
;
1021 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
1023 if (hcon
->state
== BT_CONNECTED
) {
1024 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
1025 sk
->sk_type
!= SOCK_STREAM
) {
1026 l2cap_sock_clear_timer(sk
);
1027 sk
->sk_state
= BT_CONNECTED
;
1033 hci_dev_unlock_bh(hdev
);
1038 static int l2cap_sock_connect(struct socket
*sock
, struct sockaddr
*addr
, int alen
, int flags
)
1040 struct sock
*sk
= sock
->sk
;
1041 struct sockaddr_l2 la
;
1044 BT_DBG("sk %p", sk
);
1046 if (!addr
|| alen
< sizeof(addr
->sa_family
) ||
1047 addr
->sa_family
!= AF_BLUETOOTH
)
1050 memset(&la
, 0, sizeof(la
));
1051 len
= min_t(unsigned int, sizeof(la
), alen
);
1052 memcpy(&la
, addr
, len
);
1059 if ((sk
->sk_type
== SOCK_SEQPACKET
|| sk
->sk_type
== SOCK_STREAM
)
1065 switch (l2cap_pi(sk
)->mode
) {
1066 case L2CAP_MODE_BASIC
:
1068 case L2CAP_MODE_ERTM
:
1069 case L2CAP_MODE_STREAMING
:
1078 switch (sk
->sk_state
) {
1082 /* Already connecting */
1086 /* Already connected */
1099 /* Set destination address and psm */
1100 bacpy(&bt_sk(sk
)->dst
, &la
.l2_bdaddr
);
1101 l2cap_pi(sk
)->psm
= la
.l2_psm
;
1103 err
= l2cap_do_connect(sk
);
1108 err
= bt_sock_wait_state(sk
, BT_CONNECTED
,
1109 sock_sndtimeo(sk
, flags
& O_NONBLOCK
));
1115 static int l2cap_sock_listen(struct socket
*sock
, int backlog
)
1117 struct sock
*sk
= sock
->sk
;
1120 BT_DBG("sk %p backlog %d", sk
, backlog
);
1124 if ((sock
->type
!= SOCK_SEQPACKET
&& sock
->type
!= SOCK_STREAM
)
1125 || sk
->sk_state
!= BT_BOUND
) {
1130 switch (l2cap_pi(sk
)->mode
) {
1131 case L2CAP_MODE_BASIC
:
1133 case L2CAP_MODE_ERTM
:
1134 case L2CAP_MODE_STREAMING
:
1143 if (!l2cap_pi(sk
)->psm
) {
1144 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1149 write_lock_bh(&l2cap_sk_list
.lock
);
1151 for (psm
= 0x1001; psm
< 0x1100; psm
+= 2)
1152 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm
), src
)) {
1153 l2cap_pi(sk
)->psm
= cpu_to_le16(psm
);
1154 l2cap_pi(sk
)->sport
= cpu_to_le16(psm
);
1159 write_unlock_bh(&l2cap_sk_list
.lock
);
1165 sk
->sk_max_ack_backlog
= backlog
;
1166 sk
->sk_ack_backlog
= 0;
1167 sk
->sk_state
= BT_LISTEN
;
1174 static int l2cap_sock_accept(struct socket
*sock
, struct socket
*newsock
, int flags
)
1176 DECLARE_WAITQUEUE(wait
, current
);
1177 struct sock
*sk
= sock
->sk
, *nsk
;
1181 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
1183 if (sk
->sk_state
!= BT_LISTEN
) {
1188 timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
1190 BT_DBG("sk %p timeo %ld", sk
, timeo
);
1192 /* Wait for an incoming connection. (wake-one). */
1193 add_wait_queue_exclusive(sk_sleep(sk
), &wait
);
1194 while (!(nsk
= bt_accept_dequeue(sk
, newsock
))) {
1195 set_current_state(TASK_INTERRUPTIBLE
);
1202 timeo
= schedule_timeout(timeo
);
1203 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
1205 if (sk
->sk_state
!= BT_LISTEN
) {
1210 if (signal_pending(current
)) {
1211 err
= sock_intr_errno(timeo
);
1215 set_current_state(TASK_RUNNING
);
1216 remove_wait_queue(sk_sleep(sk
), &wait
);
1221 newsock
->state
= SS_CONNECTED
;
1223 BT_DBG("new socket %p", nsk
);
1230 static int l2cap_sock_getname(struct socket
*sock
, struct sockaddr
*addr
, int *len
, int peer
)
1232 struct sockaddr_l2
*la
= (struct sockaddr_l2
*) addr
;
1233 struct sock
*sk
= sock
->sk
;
1235 BT_DBG("sock %p, sk %p", sock
, sk
);
1237 addr
->sa_family
= AF_BLUETOOTH
;
1238 *len
= sizeof(struct sockaddr_l2
);
1241 la
->l2_psm
= l2cap_pi(sk
)->psm
;
1242 bacpy(&la
->l2_bdaddr
, &bt_sk(sk
)->dst
);
1243 la
->l2_cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1245 la
->l2_psm
= l2cap_pi(sk
)->sport
;
1246 bacpy(&la
->l2_bdaddr
, &bt_sk(sk
)->src
);
1247 la
->l2_cid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
1253 static int __l2cap_wait_ack(struct sock
*sk
)
1255 DECLARE_WAITQUEUE(wait
, current
);
1259 add_wait_queue(sk_sleep(sk
), &wait
);
1260 while ((l2cap_pi(sk
)->unacked_frames
> 0 && l2cap_pi(sk
)->conn
)) {
1261 set_current_state(TASK_INTERRUPTIBLE
);
1266 if (signal_pending(current
)) {
1267 err
= sock_intr_errno(timeo
);
1272 timeo
= schedule_timeout(timeo
);
1275 err
= sock_error(sk
);
1279 set_current_state(TASK_RUNNING
);
1280 remove_wait_queue(sk_sleep(sk
), &wait
);
1284 static void l2cap_monitor_timeout(unsigned long arg
)
1286 struct sock
*sk
= (void *) arg
;
1289 if (l2cap_pi(sk
)->retry_count
>= l2cap_pi(sk
)->remote_max_tx
) {
1290 l2cap_send_disconn_req(l2cap_pi(sk
)->conn
, sk
);
1295 l2cap_pi(sk
)->retry_count
++;
1296 __mod_monitor_timer();
1298 l2cap_send_rr_or_rnr(l2cap_pi(sk
), L2CAP_CTRL_POLL
);
1302 static void l2cap_retrans_timeout(unsigned long arg
)
1304 struct sock
*sk
= (void *) arg
;
1307 l2cap_pi(sk
)->retry_count
= 1;
1308 __mod_monitor_timer();
1310 l2cap_pi(sk
)->conn_state
|= L2CAP_CONN_WAIT_F
;
1312 l2cap_send_rr_or_rnr(l2cap_pi(sk
), L2CAP_CTRL_POLL
);
1316 static void l2cap_drop_acked_frames(struct sock
*sk
)
1318 struct sk_buff
*skb
;
1320 while ((skb
= skb_peek(TX_QUEUE(sk
))) &&
1321 l2cap_pi(sk
)->unacked_frames
) {
1322 if (bt_cb(skb
)->tx_seq
== l2cap_pi(sk
)->expected_ack_seq
)
1325 skb
= skb_dequeue(TX_QUEUE(sk
));
1328 l2cap_pi(sk
)->unacked_frames
--;
1331 if (!l2cap_pi(sk
)->unacked_frames
)
1332 del_timer(&l2cap_pi(sk
)->retrans_timer
);
1335 static inline void l2cap_do_send(struct sock
*sk
, struct sk_buff
*skb
)
1337 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1339 BT_DBG("sk %p, skb %p len %d", sk
, skb
, skb
->len
);
1341 hci_send_acl(pi
->conn
->hcon
, skb
, 0);
1344 static int l2cap_streaming_send(struct sock
*sk
)
1346 struct sk_buff
*skb
, *tx_skb
;
1347 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1350 while ((skb
= sk
->sk_send_head
)) {
1351 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1353 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1354 control
|= pi
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
1355 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1357 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1358 fcs
= crc16(0, (u8
*)tx_skb
->data
, tx_skb
->len
- 2);
1359 put_unaligned_le16(fcs
, tx_skb
->data
+ tx_skb
->len
- 2);
1362 l2cap_do_send(sk
, tx_skb
);
1364 pi
->next_tx_seq
= (pi
->next_tx_seq
+ 1) % 64;
1366 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1367 sk
->sk_send_head
= NULL
;
1369 sk
->sk_send_head
= skb_queue_next(TX_QUEUE(sk
), skb
);
1371 skb
= skb_dequeue(TX_QUEUE(sk
));
1377 static void l2cap_retransmit_one_frame(struct sock
*sk
, u8 tx_seq
)
1379 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1380 struct sk_buff
*skb
, *tx_skb
;
1383 skb
= skb_peek(TX_QUEUE(sk
));
1388 if (bt_cb(skb
)->tx_seq
== tx_seq
)
1391 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1394 } while ((skb
= skb_queue_next(TX_QUEUE(sk
), skb
)));
1396 if (pi
->remote_max_tx
&&
1397 bt_cb(skb
)->retries
== pi
->remote_max_tx
) {
1398 l2cap_send_disconn_req(pi
->conn
, sk
);
1402 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1403 bt_cb(skb
)->retries
++;
1404 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1405 control
|= (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1406 | (tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1407 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1409 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1410 fcs
= crc16(0, (u8
*)tx_skb
->data
, tx_skb
->len
- 2);
1411 put_unaligned_le16(fcs
, tx_skb
->data
+ tx_skb
->len
- 2);
1414 l2cap_do_send(sk
, tx_skb
);
1417 static int l2cap_ertm_send(struct sock
*sk
)
1419 struct sk_buff
*skb
, *tx_skb
;
1420 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1424 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
)
1427 while ((skb
= sk
->sk_send_head
) && (!l2cap_tx_window_full(sk
)) &&
1428 !(pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
)) {
1430 if (pi
->remote_max_tx
&&
1431 bt_cb(skb
)->retries
== pi
->remote_max_tx
) {
1432 l2cap_send_disconn_req(pi
->conn
, sk
);
1436 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1438 bt_cb(skb
)->retries
++;
1440 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1441 if (pi
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
1442 control
|= L2CAP_CTRL_FINAL
;
1443 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
1445 control
|= (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1446 | (pi
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1447 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1450 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1451 fcs
= crc16(0, (u8
*)skb
->data
, tx_skb
->len
- 2);
1452 put_unaligned_le16(fcs
, skb
->data
+ tx_skb
->len
- 2);
1455 l2cap_do_send(sk
, tx_skb
);
1457 __mod_retrans_timer();
1459 bt_cb(skb
)->tx_seq
= pi
->next_tx_seq
;
1460 pi
->next_tx_seq
= (pi
->next_tx_seq
+ 1) % 64;
1462 pi
->unacked_frames
++;
1465 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1466 sk
->sk_send_head
= NULL
;
1468 sk
->sk_send_head
= skb_queue_next(TX_QUEUE(sk
), skb
);
1476 static int l2cap_retransmit_frames(struct sock
*sk
)
1478 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1481 spin_lock_bh(&pi
->send_lock
);
1483 if (!skb_queue_empty(TX_QUEUE(sk
)))
1484 sk
->sk_send_head
= TX_QUEUE(sk
)->next
;
1486 pi
->next_tx_seq
= pi
->expected_ack_seq
;
1487 ret
= l2cap_ertm_send(sk
);
1489 spin_unlock_bh(&pi
->send_lock
);
1494 static void l2cap_send_ack(struct l2cap_pinfo
*pi
)
1496 struct sock
*sk
= (struct sock
*)pi
;
1500 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1502 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
1503 control
|= L2CAP_SUPER_RCV_NOT_READY
;
1504 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
1505 l2cap_send_sframe(pi
, control
);
1509 spin_lock_bh(&pi
->send_lock
);
1510 nframes
= l2cap_ertm_send(sk
);
1511 spin_unlock_bh(&pi
->send_lock
);
1516 control
|= L2CAP_SUPER_RCV_READY
;
1517 l2cap_send_sframe(pi
, control
);
1520 static void l2cap_send_srejtail(struct sock
*sk
)
1522 struct srej_list
*tail
;
1525 control
= L2CAP_SUPER_SELECT_REJECT
;
1526 control
|= L2CAP_CTRL_FINAL
;
1528 tail
= list_entry(SREJ_LIST(sk
)->prev
, struct srej_list
, list
);
1529 control
|= tail
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1531 l2cap_send_sframe(l2cap_pi(sk
), control
);
1534 static inline int l2cap_skbuff_fromiovec(struct sock
*sk
, struct msghdr
*msg
, int len
, int count
, struct sk_buff
*skb
)
1536 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1537 struct sk_buff
**frag
;
1540 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1546 /* Continuation fragments (no L2CAP header) */
1547 frag
= &skb_shinfo(skb
)->frag_list
;
1549 count
= min_t(unsigned int, conn
->mtu
, len
);
1551 *frag
= bt_skb_send_alloc(sk
, count
, msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1554 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1560 frag
= &(*frag
)->next
;
1566 static struct sk_buff
*l2cap_create_connless_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1568 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1569 struct sk_buff
*skb
;
1570 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1571 struct l2cap_hdr
*lh
;
1573 BT_DBG("sk %p len %d", sk
, (int)len
);
1575 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1576 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1577 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1579 return ERR_PTR(-ENOMEM
);
1581 /* Create L2CAP header */
1582 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1583 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1584 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1585 put_unaligned_le16(l2cap_pi(sk
)->psm
, skb_put(skb
, 2));
1587 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1588 if (unlikely(err
< 0)) {
1590 return ERR_PTR(err
);
1595 static struct sk_buff
*l2cap_create_basic_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1597 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1598 struct sk_buff
*skb
;
1599 int err
, count
, hlen
= L2CAP_HDR_SIZE
;
1600 struct l2cap_hdr
*lh
;
1602 BT_DBG("sk %p len %d", sk
, (int)len
);
1604 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1605 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1606 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1608 return ERR_PTR(-ENOMEM
);
1610 /* Create L2CAP header */
1611 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1612 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1613 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1615 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1616 if (unlikely(err
< 0)) {
1618 return ERR_PTR(err
);
1623 static struct sk_buff
*l2cap_create_iframe_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
, u16 control
, u16 sdulen
)
1625 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1626 struct sk_buff
*skb
;
1627 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1628 struct l2cap_hdr
*lh
;
1630 BT_DBG("sk %p len %d", sk
, (int)len
);
1633 return ERR_PTR(-ENOTCONN
);
1638 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1641 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1642 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1643 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1645 return ERR_PTR(-ENOMEM
);
1647 /* Create L2CAP header */
1648 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1649 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1650 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1651 put_unaligned_le16(control
, skb_put(skb
, 2));
1653 put_unaligned_le16(sdulen
, skb_put(skb
, 2));
1655 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1656 if (unlikely(err
< 0)) {
1658 return ERR_PTR(err
);
1661 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1662 put_unaligned_le16(0, skb_put(skb
, 2));
1664 bt_cb(skb
)->retries
= 0;
1668 static inline int l2cap_sar_segment_sdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1670 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1671 struct sk_buff
*skb
;
1672 struct sk_buff_head sar_queue
;
1676 skb_queue_head_init(&sar_queue
);
1677 control
= L2CAP_SDU_START
;
1678 skb
= l2cap_create_iframe_pdu(sk
, msg
, pi
->remote_mps
, control
, len
);
1680 return PTR_ERR(skb
);
1682 __skb_queue_tail(&sar_queue
, skb
);
1683 len
-= pi
->remote_mps
;
1684 size
+= pi
->remote_mps
;
1689 if (len
> pi
->remote_mps
) {
1690 control
= L2CAP_SDU_CONTINUE
;
1691 buflen
= pi
->remote_mps
;
1693 control
= L2CAP_SDU_END
;
1697 skb
= l2cap_create_iframe_pdu(sk
, msg
, buflen
, control
, 0);
1699 skb_queue_purge(&sar_queue
);
1700 return PTR_ERR(skb
);
1703 __skb_queue_tail(&sar_queue
, skb
);
1707 skb_queue_splice_tail(&sar_queue
, TX_QUEUE(sk
));
1708 spin_lock_bh(&pi
->send_lock
);
1709 if (sk
->sk_send_head
== NULL
)
1710 sk
->sk_send_head
= sar_queue
.next
;
1711 spin_unlock_bh(&pi
->send_lock
);
1716 static int l2cap_sock_sendmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
, size_t len
)
1718 struct sock
*sk
= sock
->sk
;
1719 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1720 struct sk_buff
*skb
;
1724 BT_DBG("sock %p, sk %p", sock
, sk
);
1726 err
= sock_error(sk
);
1730 if (msg
->msg_flags
& MSG_OOB
)
1735 if (sk
->sk_state
!= BT_CONNECTED
) {
1740 /* Connectionless channel */
1741 if (sk
->sk_type
== SOCK_DGRAM
) {
1742 skb
= l2cap_create_connless_pdu(sk
, msg
, len
);
1746 l2cap_do_send(sk
, skb
);
1753 case L2CAP_MODE_BASIC
:
1754 /* Check outgoing MTU */
1755 if (len
> pi
->omtu
) {
1760 /* Create a basic PDU */
1761 skb
= l2cap_create_basic_pdu(sk
, msg
, len
);
1767 l2cap_do_send(sk
, skb
);
1771 case L2CAP_MODE_ERTM
:
1772 case L2CAP_MODE_STREAMING
:
1773 /* Entire SDU fits into one PDU */
1774 if (len
<= pi
->remote_mps
) {
1775 control
= L2CAP_SDU_UNSEGMENTED
;
1776 skb
= l2cap_create_iframe_pdu(sk
, msg
, len
, control
, 0);
1781 __skb_queue_tail(TX_QUEUE(sk
), skb
);
1783 if (pi
->mode
== L2CAP_MODE_ERTM
)
1784 spin_lock_bh(&pi
->send_lock
);
1786 if (sk
->sk_send_head
== NULL
)
1787 sk
->sk_send_head
= skb
;
1789 if (pi
->mode
== L2CAP_MODE_ERTM
)
1790 spin_unlock_bh(&pi
->send_lock
);
1792 /* Segment SDU into multiples PDUs */
1793 err
= l2cap_sar_segment_sdu(sk
, msg
, len
);
1798 if (pi
->mode
== L2CAP_MODE_STREAMING
) {
1799 err
= l2cap_streaming_send(sk
);
1801 spin_lock_bh(&pi
->send_lock
);
1802 err
= l2cap_ertm_send(sk
);
1803 spin_unlock_bh(&pi
->send_lock
);
1811 BT_DBG("bad state %1.1x", pi
->mode
);
1820 static int l2cap_sock_recvmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
, size_t len
, int flags
)
1822 struct sock
*sk
= sock
->sk
;
1826 if (sk
->sk_state
== BT_CONNECT2
&& bt_sk(sk
)->defer_setup
) {
1827 struct l2cap_conn_rsp rsp
;
1829 sk
->sk_state
= BT_CONFIG
;
1831 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1832 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
1833 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
1834 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
1835 l2cap_send_cmd(l2cap_pi(sk
)->conn
, l2cap_pi(sk
)->ident
,
1836 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
1844 return bt_sock_recvmsg(iocb
, sock
, msg
, len
, flags
);
1847 static int l2cap_sock_setsockopt_old(struct socket
*sock
, int optname
, char __user
*optval
, unsigned int optlen
)
1849 struct sock
*sk
= sock
->sk
;
1850 struct l2cap_options opts
;
1854 BT_DBG("sk %p", sk
);
1860 opts
.imtu
= l2cap_pi(sk
)->imtu
;
1861 opts
.omtu
= l2cap_pi(sk
)->omtu
;
1862 opts
.flush_to
= l2cap_pi(sk
)->flush_to
;
1863 opts
.mode
= l2cap_pi(sk
)->mode
;
1864 opts
.fcs
= l2cap_pi(sk
)->fcs
;
1865 opts
.max_tx
= l2cap_pi(sk
)->max_tx
;
1866 opts
.txwin_size
= (__u16
)l2cap_pi(sk
)->tx_win
;
1868 len
= min_t(unsigned int, sizeof(opts
), optlen
);
1869 if (copy_from_user((char *) &opts
, optval
, len
)) {
1874 l2cap_pi(sk
)->mode
= opts
.mode
;
1875 switch (l2cap_pi(sk
)->mode
) {
1876 case L2CAP_MODE_BASIC
:
1878 case L2CAP_MODE_ERTM
:
1879 case L2CAP_MODE_STREAMING
:
1888 l2cap_pi(sk
)->imtu
= opts
.imtu
;
1889 l2cap_pi(sk
)->omtu
= opts
.omtu
;
1890 l2cap_pi(sk
)->fcs
= opts
.fcs
;
1891 l2cap_pi(sk
)->max_tx
= opts
.max_tx
;
1892 l2cap_pi(sk
)->tx_win
= (__u8
)opts
.txwin_size
;
1896 if (get_user(opt
, (u32 __user
*) optval
)) {
1901 if (opt
& L2CAP_LM_AUTH
)
1902 l2cap_pi(sk
)->sec_level
= BT_SECURITY_LOW
;
1903 if (opt
& L2CAP_LM_ENCRYPT
)
1904 l2cap_pi(sk
)->sec_level
= BT_SECURITY_MEDIUM
;
1905 if (opt
& L2CAP_LM_SECURE
)
1906 l2cap_pi(sk
)->sec_level
= BT_SECURITY_HIGH
;
1908 l2cap_pi(sk
)->role_switch
= (opt
& L2CAP_LM_MASTER
);
1909 l2cap_pi(sk
)->force_reliable
= (opt
& L2CAP_LM_RELIABLE
);
1921 static int l2cap_sock_setsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, unsigned int optlen
)
1923 struct sock
*sk
= sock
->sk
;
1924 struct bt_security sec
;
1928 BT_DBG("sk %p", sk
);
1930 if (level
== SOL_L2CAP
)
1931 return l2cap_sock_setsockopt_old(sock
, optname
, optval
, optlen
);
1933 if (level
!= SOL_BLUETOOTH
)
1934 return -ENOPROTOOPT
;
1940 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_STREAM
1941 && sk
->sk_type
!= SOCK_RAW
) {
1946 sec
.level
= BT_SECURITY_LOW
;
1948 len
= min_t(unsigned int, sizeof(sec
), optlen
);
1949 if (copy_from_user((char *) &sec
, optval
, len
)) {
1954 if (sec
.level
< BT_SECURITY_LOW
||
1955 sec
.level
> BT_SECURITY_HIGH
) {
1960 l2cap_pi(sk
)->sec_level
= sec
.level
;
1963 case BT_DEFER_SETUP
:
1964 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_LISTEN
) {
1969 if (get_user(opt
, (u32 __user
*) optval
)) {
1974 bt_sk(sk
)->defer_setup
= opt
;
1986 static int l2cap_sock_getsockopt_old(struct socket
*sock
, int optname
, char __user
*optval
, int __user
*optlen
)
1988 struct sock
*sk
= sock
->sk
;
1989 struct l2cap_options opts
;
1990 struct l2cap_conninfo cinfo
;
1994 BT_DBG("sk %p", sk
);
1996 if (get_user(len
, optlen
))
2003 opts
.imtu
= l2cap_pi(sk
)->imtu
;
2004 opts
.omtu
= l2cap_pi(sk
)->omtu
;
2005 opts
.flush_to
= l2cap_pi(sk
)->flush_to
;
2006 opts
.mode
= l2cap_pi(sk
)->mode
;
2007 opts
.fcs
= l2cap_pi(sk
)->fcs
;
2008 opts
.max_tx
= l2cap_pi(sk
)->max_tx
;
2009 opts
.txwin_size
= (__u16
)l2cap_pi(sk
)->tx_win
;
2011 len
= min_t(unsigned int, len
, sizeof(opts
));
2012 if (copy_to_user(optval
, (char *) &opts
, len
))
2018 switch (l2cap_pi(sk
)->sec_level
) {
2019 case BT_SECURITY_LOW
:
2020 opt
= L2CAP_LM_AUTH
;
2022 case BT_SECURITY_MEDIUM
:
2023 opt
= L2CAP_LM_AUTH
| L2CAP_LM_ENCRYPT
;
2025 case BT_SECURITY_HIGH
:
2026 opt
= L2CAP_LM_AUTH
| L2CAP_LM_ENCRYPT
|
2034 if (l2cap_pi(sk
)->role_switch
)
2035 opt
|= L2CAP_LM_MASTER
;
2037 if (l2cap_pi(sk
)->force_reliable
)
2038 opt
|= L2CAP_LM_RELIABLE
;
2040 if (put_user(opt
, (u32 __user
*) optval
))
2044 case L2CAP_CONNINFO
:
2045 if (sk
->sk_state
!= BT_CONNECTED
&&
2046 !(sk
->sk_state
== BT_CONNECT2
&&
2047 bt_sk(sk
)->defer_setup
)) {
2052 cinfo
.hci_handle
= l2cap_pi(sk
)->conn
->hcon
->handle
;
2053 memcpy(cinfo
.dev_class
, l2cap_pi(sk
)->conn
->hcon
->dev_class
, 3);
2055 len
= min_t(unsigned int, len
, sizeof(cinfo
));
2056 if (copy_to_user(optval
, (char *) &cinfo
, len
))
2070 static int l2cap_sock_getsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, int __user
*optlen
)
2072 struct sock
*sk
= sock
->sk
;
2073 struct bt_security sec
;
2076 BT_DBG("sk %p", sk
);
2078 if (level
== SOL_L2CAP
)
2079 return l2cap_sock_getsockopt_old(sock
, optname
, optval
, optlen
);
2081 if (level
!= SOL_BLUETOOTH
)
2082 return -ENOPROTOOPT
;
2084 if (get_user(len
, optlen
))
2091 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_STREAM
2092 && sk
->sk_type
!= SOCK_RAW
) {
2097 sec
.level
= l2cap_pi(sk
)->sec_level
;
2099 len
= min_t(unsigned int, len
, sizeof(sec
));
2100 if (copy_to_user(optval
, (char *) &sec
, len
))
2105 case BT_DEFER_SETUP
:
2106 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_LISTEN
) {
2111 if (put_user(bt_sk(sk
)->defer_setup
, (u32 __user
*) optval
))
2125 static int l2cap_sock_shutdown(struct socket
*sock
, int how
)
2127 struct sock
*sk
= sock
->sk
;
2130 BT_DBG("sock %p, sk %p", sock
, sk
);
2136 if (!sk
->sk_shutdown
) {
2137 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
2138 err
= __l2cap_wait_ack(sk
);
2140 sk
->sk_shutdown
= SHUTDOWN_MASK
;
2141 l2cap_sock_clear_timer(sk
);
2142 __l2cap_sock_close(sk
, 0);
2144 if (sock_flag(sk
, SOCK_LINGER
) && sk
->sk_lingertime
)
2145 err
= bt_sock_wait_state(sk
, BT_CLOSED
,
2152 static int l2cap_sock_release(struct socket
*sock
)
2154 struct sock
*sk
= sock
->sk
;
2157 BT_DBG("sock %p, sk %p", sock
, sk
);
2162 err
= l2cap_sock_shutdown(sock
, 2);
2165 l2cap_sock_kill(sk
);
2169 static void l2cap_chan_ready(struct sock
*sk
)
2171 struct sock
*parent
= bt_sk(sk
)->parent
;
2173 BT_DBG("sk %p, parent %p", sk
, parent
);
2175 l2cap_pi(sk
)->conf_state
= 0;
2176 l2cap_sock_clear_timer(sk
);
2179 /* Outgoing channel.
2180 * Wake up socket sleeping on connect.
2182 sk
->sk_state
= BT_CONNECTED
;
2183 sk
->sk_state_change(sk
);
2185 /* Incoming channel.
2186 * Wake up socket sleeping on accept.
2188 parent
->sk_data_ready(parent
, 0);
2192 /* Copy frame to all raw sockets on that connection */
2193 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2195 struct l2cap_chan_list
*l
= &conn
->chan_list
;
2196 struct sk_buff
*nskb
;
2199 BT_DBG("conn %p", conn
);
2201 read_lock(&l
->lock
);
2202 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
2203 if (sk
->sk_type
!= SOCK_RAW
)
2206 /* Don't send frame to the socket it came from */
2209 nskb
= skb_clone(skb
, GFP_ATOMIC
);
2213 if (sock_queue_rcv_skb(sk
, nskb
))
2216 read_unlock(&l
->lock
);
2219 /* ---- L2CAP signalling commands ---- */
2220 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
2221 u8 code
, u8 ident
, u16 dlen
, void *data
)
2223 struct sk_buff
*skb
, **frag
;
2224 struct l2cap_cmd_hdr
*cmd
;
2225 struct l2cap_hdr
*lh
;
2228 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2229 conn
, code
, ident
, dlen
);
2231 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2232 count
= min_t(unsigned int, conn
->mtu
, len
);
2234 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
2238 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2239 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2240 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
2242 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2245 cmd
->len
= cpu_to_le16(dlen
);
2248 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2249 memcpy(skb_put(skb
, count
), data
, count
);
2255 /* Continuation fragments (no L2CAP header) */
2256 frag
= &skb_shinfo(skb
)->frag_list
;
2258 count
= min_t(unsigned int, conn
->mtu
, len
);
2260 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
2264 memcpy(skb_put(*frag
, count
), data
, count
);
2269 frag
= &(*frag
)->next
;
2279 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
2281 struct l2cap_conf_opt
*opt
= *ptr
;
2284 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2292 *val
= *((u8
*) opt
->val
);
2296 *val
= __le16_to_cpu(*((__le16
*) opt
->val
));
2300 *val
= __le32_to_cpu(*((__le32
*) opt
->val
));
2304 *val
= (unsigned long) opt
->val
;
2308 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
2312 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2314 struct l2cap_conf_opt
*opt
= *ptr
;
2316 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
2323 *((u8
*) opt
->val
) = val
;
2327 *((__le16
*) opt
->val
) = cpu_to_le16(val
);
2331 *((__le32
*) opt
->val
) = cpu_to_le32(val
);
2335 memcpy(opt
->val
, (void *) val
, len
);
2339 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2342 static void l2cap_ack_timeout(unsigned long arg
)
2344 struct sock
*sk
= (void *) arg
;
2347 l2cap_send_ack(l2cap_pi(sk
));
2351 static inline void l2cap_ertm_init(struct sock
*sk
)
2353 l2cap_pi(sk
)->expected_ack_seq
= 0;
2354 l2cap_pi(sk
)->unacked_frames
= 0;
2355 l2cap_pi(sk
)->buffer_seq
= 0;
2356 l2cap_pi(sk
)->num_acked
= 0;
2357 l2cap_pi(sk
)->frames_sent
= 0;
2359 setup_timer(&l2cap_pi(sk
)->retrans_timer
,
2360 l2cap_retrans_timeout
, (unsigned long) sk
);
2361 setup_timer(&l2cap_pi(sk
)->monitor_timer
,
2362 l2cap_monitor_timeout
, (unsigned long) sk
);
2363 setup_timer(&l2cap_pi(sk
)->ack_timer
,
2364 l2cap_ack_timeout
, (unsigned long) sk
);
2366 __skb_queue_head_init(SREJ_QUEUE(sk
));
2367 __skb_queue_head_init(BUSY_QUEUE(sk
));
2368 spin_lock_init(&l2cap_pi(sk
)->send_lock
);
2370 INIT_WORK(&l2cap_pi(sk
)->busy_work
, l2cap_busy_work
);
2373 static int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
2375 u32 local_feat_mask
= l2cap_feat_mask
;
2377 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
2380 case L2CAP_MODE_ERTM
:
2381 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
2382 case L2CAP_MODE_STREAMING
:
2383 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
2389 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
2392 case L2CAP_MODE_STREAMING
:
2393 case L2CAP_MODE_ERTM
:
2394 if (l2cap_mode_supported(mode
, remote_feat_mask
))
2398 return L2CAP_MODE_BASIC
;
2402 static int l2cap_build_conf_req(struct sock
*sk
, void *data
)
2404 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2405 struct l2cap_conf_req
*req
= data
;
2406 struct l2cap_conf_rfc rfc
= { .mode
= pi
->mode
};
2407 void *ptr
= req
->data
;
2409 BT_DBG("sk %p", sk
);
2411 if (pi
->num_conf_req
|| pi
->num_conf_rsp
)
2415 case L2CAP_MODE_STREAMING
:
2416 case L2CAP_MODE_ERTM
:
2417 pi
->conf_state
|= L2CAP_CONF_STATE2_DEVICE
;
2418 if (!l2cap_mode_supported(pi
->mode
, pi
->conn
->feat_mask
))
2419 l2cap_send_disconn_req(pi
->conn
, sk
);
2422 pi
->mode
= l2cap_select_mode(rfc
.mode
, pi
->conn
->feat_mask
);
2428 case L2CAP_MODE_BASIC
:
2429 if (pi
->imtu
!= L2CAP_DEFAULT_MTU
)
2430 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->imtu
);
2433 case L2CAP_MODE_ERTM
:
2434 rfc
.mode
= L2CAP_MODE_ERTM
;
2435 rfc
.txwin_size
= pi
->tx_win
;
2436 rfc
.max_transmit
= pi
->max_tx
;
2437 rfc
.retrans_timeout
= 0;
2438 rfc
.monitor_timeout
= 0;
2439 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
2440 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> pi
->conn
->mtu
- 10)
2441 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
2443 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2444 sizeof(rfc
), (unsigned long) &rfc
);
2446 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2449 if (pi
->fcs
== L2CAP_FCS_NONE
||
2450 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
2451 pi
->fcs
= L2CAP_FCS_NONE
;
2452 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
2456 case L2CAP_MODE_STREAMING
:
2457 rfc
.mode
= L2CAP_MODE_STREAMING
;
2459 rfc
.max_transmit
= 0;
2460 rfc
.retrans_timeout
= 0;
2461 rfc
.monitor_timeout
= 0;
2462 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
2463 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> pi
->conn
->mtu
- 10)
2464 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
2466 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2467 sizeof(rfc
), (unsigned long) &rfc
);
2469 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2472 if (pi
->fcs
== L2CAP_FCS_NONE
||
2473 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
2474 pi
->fcs
= L2CAP_FCS_NONE
;
2475 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
2480 /* FIXME: Need actual value of the flush timeout */
2481 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2482 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2484 req
->dcid
= cpu_to_le16(pi
->dcid
);
2485 req
->flags
= cpu_to_le16(0);
2490 static int l2cap_parse_conf_req(struct sock
*sk
, void *data
)
2492 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2493 struct l2cap_conf_rsp
*rsp
= data
;
2494 void *ptr
= rsp
->data
;
2495 void *req
= pi
->conf_req
;
2496 int len
= pi
->conf_len
;
2497 int type
, hint
, olen
;
2499 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2500 u16 mtu
= L2CAP_DEFAULT_MTU
;
2501 u16 result
= L2CAP_CONF_SUCCESS
;
2503 BT_DBG("sk %p", sk
);
2505 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2506 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
2508 hint
= type
& L2CAP_CONF_HINT
;
2509 type
&= L2CAP_CONF_MASK
;
2512 case L2CAP_CONF_MTU
:
2516 case L2CAP_CONF_FLUSH_TO
:
2520 case L2CAP_CONF_QOS
:
2523 case L2CAP_CONF_RFC
:
2524 if (olen
== sizeof(rfc
))
2525 memcpy(&rfc
, (void *) val
, olen
);
2528 case L2CAP_CONF_FCS
:
2529 if (val
== L2CAP_FCS_NONE
)
2530 pi
->conf_state
|= L2CAP_CONF_NO_FCS_RECV
;
2538 result
= L2CAP_CONF_UNKNOWN
;
2539 *((u8
*) ptr
++) = type
;
2544 if (pi
->num_conf_rsp
|| pi
->num_conf_req
)
2548 case L2CAP_MODE_STREAMING
:
2549 case L2CAP_MODE_ERTM
:
2550 pi
->conf_state
|= L2CAP_CONF_STATE2_DEVICE
;
2551 if (!l2cap_mode_supported(pi
->mode
, pi
->conn
->feat_mask
))
2552 return -ECONNREFUSED
;
2555 pi
->mode
= l2cap_select_mode(rfc
.mode
, pi
->conn
->feat_mask
);
2560 if (pi
->mode
!= rfc
.mode
) {
2561 result
= L2CAP_CONF_UNACCEPT
;
2562 rfc
.mode
= pi
->mode
;
2564 if (pi
->num_conf_rsp
== 1)
2565 return -ECONNREFUSED
;
2567 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2568 sizeof(rfc
), (unsigned long) &rfc
);
2572 if (result
== L2CAP_CONF_SUCCESS
) {
2573 /* Configure output options and let the other side know
2574 * which ones we don't like. */
2576 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
2577 result
= L2CAP_CONF_UNACCEPT
;
2580 pi
->conf_state
|= L2CAP_CONF_MTU_DONE
;
2582 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->omtu
);
2585 case L2CAP_MODE_BASIC
:
2586 pi
->fcs
= L2CAP_FCS_NONE
;
2587 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2590 case L2CAP_MODE_ERTM
:
2591 pi
->remote_tx_win
= rfc
.txwin_size
;
2592 pi
->remote_max_tx
= rfc
.max_transmit
;
2593 if (rfc
.max_pdu_size
> pi
->conn
->mtu
- 10)
2594 rfc
.max_pdu_size
= le16_to_cpu(pi
->conn
->mtu
- 10);
2596 pi
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
2598 rfc
.retrans_timeout
=
2599 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO
);
2600 rfc
.monitor_timeout
=
2601 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO
);
2603 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2605 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2606 sizeof(rfc
), (unsigned long) &rfc
);
2610 case L2CAP_MODE_STREAMING
:
2611 if (rfc
.max_pdu_size
> pi
->conn
->mtu
- 10)
2612 rfc
.max_pdu_size
= le16_to_cpu(pi
->conn
->mtu
- 10);
2614 pi
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
2616 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2618 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2619 sizeof(rfc
), (unsigned long) &rfc
);
2624 result
= L2CAP_CONF_UNACCEPT
;
2626 memset(&rfc
, 0, sizeof(rfc
));
2627 rfc
.mode
= pi
->mode
;
2630 if (result
== L2CAP_CONF_SUCCESS
)
2631 pi
->conf_state
|= L2CAP_CONF_OUTPUT_DONE
;
2633 rsp
->scid
= cpu_to_le16(pi
->dcid
);
2634 rsp
->result
= cpu_to_le16(result
);
2635 rsp
->flags
= cpu_to_le16(0x0000);
2640 static int l2cap_parse_conf_rsp(struct sock
*sk
, void *rsp
, int len
, void *data
, u16
*result
)
2642 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2643 struct l2cap_conf_req
*req
= data
;
2644 void *ptr
= req
->data
;
2647 struct l2cap_conf_rfc rfc
;
2649 BT_DBG("sk %p, rsp %p, len %d, req %p", sk
, rsp
, len
, data
);
2651 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2652 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2655 case L2CAP_CONF_MTU
:
2656 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
2657 *result
= L2CAP_CONF_UNACCEPT
;
2658 pi
->omtu
= L2CAP_DEFAULT_MIN_MTU
;
2661 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->omtu
);
2664 case L2CAP_CONF_FLUSH_TO
:
2666 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
2670 case L2CAP_CONF_RFC
:
2671 if (olen
== sizeof(rfc
))
2672 memcpy(&rfc
, (void *)val
, olen
);
2674 if ((pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
) &&
2675 rfc
.mode
!= pi
->mode
)
2676 return -ECONNREFUSED
;
2678 pi
->mode
= rfc
.mode
;
2681 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2682 sizeof(rfc
), (unsigned long) &rfc
);
2687 if (*result
== L2CAP_CONF_SUCCESS
) {
2689 case L2CAP_MODE_ERTM
:
2690 pi
->remote_tx_win
= rfc
.txwin_size
;
2691 pi
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2692 pi
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2693 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2695 case L2CAP_MODE_STREAMING
:
2696 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2700 req
->dcid
= cpu_to_le16(pi
->dcid
);
2701 req
->flags
= cpu_to_le16(0x0000);
2706 static int l2cap_build_conf_rsp(struct sock
*sk
, void *data
, u16 result
, u16 flags
)
2708 struct l2cap_conf_rsp
*rsp
= data
;
2709 void *ptr
= rsp
->data
;
2711 BT_DBG("sk %p", sk
);
2713 rsp
->scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
2714 rsp
->result
= cpu_to_le16(result
);
2715 rsp
->flags
= cpu_to_le16(flags
);
2720 static void l2cap_conf_rfc_get(struct sock
*sk
, void *rsp
, int len
)
2722 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2725 struct l2cap_conf_rfc rfc
;
2727 BT_DBG("sk %p, rsp %p, len %d", sk
, rsp
, len
);
2729 if ((pi
->mode
!= L2CAP_MODE_ERTM
) && (pi
->mode
!= L2CAP_MODE_STREAMING
))
2732 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2733 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2736 case L2CAP_CONF_RFC
:
2737 if (olen
== sizeof(rfc
))
2738 memcpy(&rfc
, (void *)val
, olen
);
2745 case L2CAP_MODE_ERTM
:
2746 pi
->remote_tx_win
= rfc
.txwin_size
;
2747 pi
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2748 pi
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2749 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2751 case L2CAP_MODE_STREAMING
:
2752 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2756 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2758 struct l2cap_cmd_rej
*rej
= (struct l2cap_cmd_rej
*) data
;
2760 if (rej
->reason
!= 0x0000)
2763 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
2764 cmd
->ident
== conn
->info_ident
) {
2765 del_timer(&conn
->info_timer
);
2767 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2768 conn
->info_ident
= 0;
2770 l2cap_conn_start(conn
);
2776 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2778 struct l2cap_chan_list
*list
= &conn
->chan_list
;
2779 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
2780 struct l2cap_conn_rsp rsp
;
2781 struct sock
*sk
, *parent
;
2782 int result
, status
= L2CAP_CS_NO_INFO
;
2784 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
2785 __le16 psm
= req
->psm
;
2787 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm
, scid
);
2789 /* Check if we have socket listening on psm */
2790 parent
= l2cap_get_sock_by_psm(BT_LISTEN
, psm
, conn
->src
);
2792 result
= L2CAP_CR_BAD_PSM
;
2796 /* Check if the ACL is secure enough (if not SDP) */
2797 if (psm
!= cpu_to_le16(0x0001) &&
2798 !hci_conn_check_link_mode(conn
->hcon
)) {
2799 conn
->disc_reason
= 0x05;
2800 result
= L2CAP_CR_SEC_BLOCK
;
2804 result
= L2CAP_CR_NO_MEM
;
2806 /* Check for backlog size */
2807 if (sk_acceptq_is_full(parent
)) {
2808 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
2812 sk
= l2cap_sock_alloc(sock_net(parent
), NULL
, BTPROTO_L2CAP
, GFP_ATOMIC
);
2816 write_lock_bh(&list
->lock
);
2818 /* Check if we already have channel with that dcid */
2819 if (__l2cap_get_chan_by_dcid(list
, scid
)) {
2820 write_unlock_bh(&list
->lock
);
2821 sock_set_flag(sk
, SOCK_ZAPPED
);
2822 l2cap_sock_kill(sk
);
2826 hci_conn_hold(conn
->hcon
);
2828 l2cap_sock_init(sk
, parent
);
2829 bacpy(&bt_sk(sk
)->src
, conn
->src
);
2830 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
2831 l2cap_pi(sk
)->psm
= psm
;
2832 l2cap_pi(sk
)->dcid
= scid
;
2834 __l2cap_chan_add(conn
, sk
, parent
);
2835 dcid
= l2cap_pi(sk
)->scid
;
2837 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
2839 l2cap_pi(sk
)->ident
= cmd
->ident
;
2841 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
2842 if (l2cap_check_security(sk
)) {
2843 if (bt_sk(sk
)->defer_setup
) {
2844 sk
->sk_state
= BT_CONNECT2
;
2845 result
= L2CAP_CR_PEND
;
2846 status
= L2CAP_CS_AUTHOR_PEND
;
2847 parent
->sk_data_ready(parent
, 0);
2849 sk
->sk_state
= BT_CONFIG
;
2850 result
= L2CAP_CR_SUCCESS
;
2851 status
= L2CAP_CS_NO_INFO
;
2854 sk
->sk_state
= BT_CONNECT2
;
2855 result
= L2CAP_CR_PEND
;
2856 status
= L2CAP_CS_AUTHEN_PEND
;
2859 sk
->sk_state
= BT_CONNECT2
;
2860 result
= L2CAP_CR_PEND
;
2861 status
= L2CAP_CS_NO_INFO
;
2864 write_unlock_bh(&list
->lock
);
2867 bh_unlock_sock(parent
);
2870 rsp
.scid
= cpu_to_le16(scid
);
2871 rsp
.dcid
= cpu_to_le16(dcid
);
2872 rsp
.result
= cpu_to_le16(result
);
2873 rsp
.status
= cpu_to_le16(status
);
2874 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2876 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
2877 struct l2cap_info_req info
;
2878 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2880 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
2881 conn
->info_ident
= l2cap_get_ident(conn
);
2883 mod_timer(&conn
->info_timer
, jiffies
+
2884 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
2886 l2cap_send_cmd(conn
, conn
->info_ident
,
2887 L2CAP_INFO_REQ
, sizeof(info
), &info
);
2893 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2895 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
2896 u16 scid
, dcid
, result
, status
;
2900 scid
= __le16_to_cpu(rsp
->scid
);
2901 dcid
= __le16_to_cpu(rsp
->dcid
);
2902 result
= __le16_to_cpu(rsp
->result
);
2903 status
= __le16_to_cpu(rsp
->status
);
2905 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid
, scid
, result
, status
);
2908 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
2912 sk
= l2cap_get_chan_by_ident(&conn
->chan_list
, cmd
->ident
);
2918 case L2CAP_CR_SUCCESS
:
2919 sk
->sk_state
= BT_CONFIG
;
2920 l2cap_pi(sk
)->ident
= 0;
2921 l2cap_pi(sk
)->dcid
= dcid
;
2922 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
2923 l2cap_pi(sk
)->conf_state
&= ~L2CAP_CONF_CONNECT_PEND
;
2925 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2926 l2cap_build_conf_req(sk
, req
), req
);
2927 l2cap_pi(sk
)->num_conf_req
++;
2931 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
2935 l2cap_chan_del(sk
, ECONNREFUSED
);
2943 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
2945 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
2951 dcid
= __le16_to_cpu(req
->dcid
);
2952 flags
= __le16_to_cpu(req
->flags
);
2954 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
2956 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, dcid
);
2960 if (sk
->sk_state
== BT_DISCONN
)
2963 /* Reject if config buffer is too small. */
2964 len
= cmd_len
- sizeof(*req
);
2965 if (l2cap_pi(sk
)->conf_len
+ len
> sizeof(l2cap_pi(sk
)->conf_req
)) {
2966 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2967 l2cap_build_conf_rsp(sk
, rsp
,
2968 L2CAP_CONF_REJECT
, flags
), rsp
);
2973 memcpy(l2cap_pi(sk
)->conf_req
+ l2cap_pi(sk
)->conf_len
, req
->data
, len
);
2974 l2cap_pi(sk
)->conf_len
+= len
;
2976 if (flags
& 0x0001) {
2977 /* Incomplete config. Send empty response. */
2978 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2979 l2cap_build_conf_rsp(sk
, rsp
,
2980 L2CAP_CONF_SUCCESS
, 0x0001), rsp
);
2984 /* Complete config. */
2985 len
= l2cap_parse_conf_req(sk
, rsp
);
2987 l2cap_send_disconn_req(conn
, sk
);
2991 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
2992 l2cap_pi(sk
)->num_conf_rsp
++;
2994 /* Reset config buffer. */
2995 l2cap_pi(sk
)->conf_len
= 0;
2997 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
))
3000 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_INPUT_DONE
) {
3001 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_NO_FCS_RECV
) ||
3002 l2cap_pi(sk
)->fcs
!= L2CAP_FCS_NONE
)
3003 l2cap_pi(sk
)->fcs
= L2CAP_FCS_CRC16
;
3005 sk
->sk_state
= BT_CONNECTED
;
3007 l2cap_pi(sk
)->next_tx_seq
= 0;
3008 l2cap_pi(sk
)->expected_tx_seq
= 0;
3009 __skb_queue_head_init(TX_QUEUE(sk
));
3010 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
3011 l2cap_ertm_init(sk
);
3013 l2cap_chan_ready(sk
);
3017 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
)) {
3019 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3020 l2cap_build_conf_req(sk
, buf
), buf
);
3021 l2cap_pi(sk
)->num_conf_req
++;
3029 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3031 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
3032 u16 scid
, flags
, result
;
3034 int len
= cmd
->len
- sizeof(*rsp
);
3036 scid
= __le16_to_cpu(rsp
->scid
);
3037 flags
= __le16_to_cpu(rsp
->flags
);
3038 result
= __le16_to_cpu(rsp
->result
);
3040 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3041 scid
, flags
, result
);
3043 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
3048 case L2CAP_CONF_SUCCESS
:
3049 l2cap_conf_rfc_get(sk
, rsp
->data
, len
);
3052 case L2CAP_CONF_UNACCEPT
:
3053 if (l2cap_pi(sk
)->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
3056 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
3057 l2cap_send_disconn_req(conn
, sk
);
3061 /* throw out any old stored conf requests */
3062 result
= L2CAP_CONF_SUCCESS
;
3063 len
= l2cap_parse_conf_rsp(sk
, rsp
->data
,
3066 l2cap_send_disconn_req(conn
, sk
);
3070 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
3071 L2CAP_CONF_REQ
, len
, req
);
3072 l2cap_pi(sk
)->num_conf_req
++;
3073 if (result
!= L2CAP_CONF_SUCCESS
)
3079 sk
->sk_state
= BT_DISCONN
;
3080 sk
->sk_err
= ECONNRESET
;
3081 l2cap_sock_set_timer(sk
, HZ
* 5);
3082 l2cap_send_disconn_req(conn
, sk
);
3089 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_INPUT_DONE
;
3091 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
) {
3092 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_NO_FCS_RECV
) ||
3093 l2cap_pi(sk
)->fcs
!= L2CAP_FCS_NONE
)
3094 l2cap_pi(sk
)->fcs
= L2CAP_FCS_CRC16
;
3096 sk
->sk_state
= BT_CONNECTED
;
3097 l2cap_pi(sk
)->next_tx_seq
= 0;
3098 l2cap_pi(sk
)->expected_tx_seq
= 0;
3099 __skb_queue_head_init(TX_QUEUE(sk
));
3100 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
3101 l2cap_ertm_init(sk
);
3103 l2cap_chan_ready(sk
);
3111 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3113 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
3114 struct l2cap_disconn_rsp rsp
;
3118 scid
= __le16_to_cpu(req
->scid
);
3119 dcid
= __le16_to_cpu(req
->dcid
);
3121 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
3123 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, dcid
);
3127 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
3128 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
3129 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
3131 sk
->sk_shutdown
= SHUTDOWN_MASK
;
3133 skb_queue_purge(TX_QUEUE(sk
));
3135 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
) {
3136 skb_queue_purge(SREJ_QUEUE(sk
));
3137 skb_queue_purge(BUSY_QUEUE(sk
));
3138 del_timer(&l2cap_pi(sk
)->retrans_timer
);
3139 del_timer(&l2cap_pi(sk
)->monitor_timer
);
3140 del_timer(&l2cap_pi(sk
)->ack_timer
);
3143 l2cap_chan_del(sk
, ECONNRESET
);
3146 l2cap_sock_kill(sk
);
3150 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3152 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
3156 scid
= __le16_to_cpu(rsp
->scid
);
3157 dcid
= __le16_to_cpu(rsp
->dcid
);
3159 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
3161 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
3165 skb_queue_purge(TX_QUEUE(sk
));
3167 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
) {
3168 skb_queue_purge(SREJ_QUEUE(sk
));
3169 skb_queue_purge(BUSY_QUEUE(sk
));
3170 del_timer(&l2cap_pi(sk
)->retrans_timer
);
3171 del_timer(&l2cap_pi(sk
)->monitor_timer
);
3172 del_timer(&l2cap_pi(sk
)->ack_timer
);
3175 l2cap_chan_del(sk
, 0);
3178 l2cap_sock_kill(sk
);
3182 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3184 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
3187 type
= __le16_to_cpu(req
->type
);
3189 BT_DBG("type 0x%4.4x", type
);
3191 if (type
== L2CAP_IT_FEAT_MASK
) {
3193 u32 feat_mask
= l2cap_feat_mask
;
3194 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3195 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3196 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3198 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
3200 put_unaligned_le32(feat_mask
, rsp
->data
);
3201 l2cap_send_cmd(conn
, cmd
->ident
,
3202 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3203 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3205 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3206 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3207 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3208 memcpy(buf
+ 4, l2cap_fixed_chan
, 8);
3209 l2cap_send_cmd(conn
, cmd
->ident
,
3210 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3212 struct l2cap_info_rsp rsp
;
3213 rsp
.type
= cpu_to_le16(type
);
3214 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
3215 l2cap_send_cmd(conn
, cmd
->ident
,
3216 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
3222 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3224 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
3227 type
= __le16_to_cpu(rsp
->type
);
3228 result
= __le16_to_cpu(rsp
->result
);
3230 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
3232 del_timer(&conn
->info_timer
);
3234 if (type
== L2CAP_IT_FEAT_MASK
) {
3235 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
3237 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
3238 struct l2cap_info_req req
;
3239 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3241 conn
->info_ident
= l2cap_get_ident(conn
);
3243 l2cap_send_cmd(conn
, conn
->info_ident
,
3244 L2CAP_INFO_REQ
, sizeof(req
), &req
);
3246 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3247 conn
->info_ident
= 0;
3249 l2cap_conn_start(conn
);
3251 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3252 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3253 conn
->info_ident
= 0;
3255 l2cap_conn_start(conn
);
3261 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
3263 u8
*data
= skb
->data
;
3265 struct l2cap_cmd_hdr cmd
;
3268 l2cap_raw_recv(conn
, skb
);
3270 while (len
>= L2CAP_CMD_HDR_SIZE
) {
3272 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
3273 data
+= L2CAP_CMD_HDR_SIZE
;
3274 len
-= L2CAP_CMD_HDR_SIZE
;
3276 cmd_len
= le16_to_cpu(cmd
.len
);
3278 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
3280 if (cmd_len
> len
|| !cmd
.ident
) {
3281 BT_DBG("corrupted command");
3286 case L2CAP_COMMAND_REJ
:
3287 l2cap_command_rej(conn
, &cmd
, data
);
3290 case L2CAP_CONN_REQ
:
3291 err
= l2cap_connect_req(conn
, &cmd
, data
);
3294 case L2CAP_CONN_RSP
:
3295 err
= l2cap_connect_rsp(conn
, &cmd
, data
);
3298 case L2CAP_CONF_REQ
:
3299 err
= l2cap_config_req(conn
, &cmd
, cmd_len
, data
);
3302 case L2CAP_CONF_RSP
:
3303 err
= l2cap_config_rsp(conn
, &cmd
, data
);
3306 case L2CAP_DISCONN_REQ
:
3307 err
= l2cap_disconnect_req(conn
, &cmd
, data
);
3310 case L2CAP_DISCONN_RSP
:
3311 err
= l2cap_disconnect_rsp(conn
, &cmd
, data
);
3314 case L2CAP_ECHO_REQ
:
3315 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
3318 case L2CAP_ECHO_RSP
:
3321 case L2CAP_INFO_REQ
:
3322 err
= l2cap_information_req(conn
, &cmd
, data
);
3325 case L2CAP_INFO_RSP
:
3326 err
= l2cap_information_rsp(conn
, &cmd
, data
);
3330 BT_ERR("Unknown signaling command 0x%2.2x", cmd
.code
);
3336 struct l2cap_cmd_rej rej
;
3337 BT_DBG("error %d", err
);
3339 /* FIXME: Map err to a valid reason */
3340 rej
.reason
= cpu_to_le16(0);
3341 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
3351 static int l2cap_check_fcs(struct l2cap_pinfo
*pi
, struct sk_buff
*skb
)
3353 u16 our_fcs
, rcv_fcs
;
3354 int hdr_size
= L2CAP_HDR_SIZE
+ 2;
3356 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
3357 skb_trim(skb
, skb
->len
- 2);
3358 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
3359 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
3361 if (our_fcs
!= rcv_fcs
)
3367 static inline void l2cap_send_i_or_rr_or_rnr(struct sock
*sk
)
3369 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3372 pi
->frames_sent
= 0;
3373 pi
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
3375 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3377 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
3378 control
|= L2CAP_SUPER_RCV_NOT_READY
| L2CAP_CTRL_FINAL
;
3379 l2cap_send_sframe(pi
, control
);
3380 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
3381 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
3384 if (pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
&& pi
->unacked_frames
> 0)
3385 __mod_retrans_timer();
3387 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3389 spin_lock_bh(&pi
->send_lock
);
3390 l2cap_ertm_send(sk
);
3391 spin_unlock_bh(&pi
->send_lock
);
3393 if (!(pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) &&
3394 pi
->frames_sent
== 0) {
3395 control
|= L2CAP_SUPER_RCV_READY
;
3396 l2cap_send_sframe(pi
, control
);
3400 static int l2cap_add_to_srej_queue(struct sock
*sk
, struct sk_buff
*skb
, u8 tx_seq
, u8 sar
)
3402 struct sk_buff
*next_skb
;
3404 bt_cb(skb
)->tx_seq
= tx_seq
;
3405 bt_cb(skb
)->sar
= sar
;
3407 next_skb
= skb_peek(SREJ_QUEUE(sk
));
3409 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3414 if (bt_cb(next_skb
)->tx_seq
== tx_seq
)
3417 if (bt_cb(next_skb
)->tx_seq
> tx_seq
) {
3418 __skb_queue_before(SREJ_QUEUE(sk
), next_skb
, skb
);
3422 if (skb_queue_is_last(SREJ_QUEUE(sk
), next_skb
))
3425 } while ((next_skb
= skb_queue_next(SREJ_QUEUE(sk
), next_skb
)));
3427 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3432 static int l2cap_ertm_reassembly_sdu(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3434 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3435 struct sk_buff
*_skb
;
3438 switch (control
& L2CAP_CTRL_SAR
) {
3439 case L2CAP_SDU_UNSEGMENTED
:
3440 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
)
3443 err
= sock_queue_rcv_skb(sk
, skb
);
3449 case L2CAP_SDU_START
:
3450 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
)
3453 pi
->sdu_len
= get_unaligned_le16(skb
->data
);
3455 if (pi
->sdu_len
> pi
->imtu
)
3458 pi
->sdu
= bt_skb_alloc(pi
->sdu_len
, GFP_ATOMIC
);
3462 /* pull sdu_len bytes only after alloc, because of Local Busy
3463 * condition we have to be sure that this will be executed
3464 * only once, i.e., when alloc does not fail */
3467 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3469 pi
->conn_state
|= L2CAP_CONN_SAR_SDU
;
3470 pi
->partial_sdu_len
= skb
->len
;
3473 case L2CAP_SDU_CONTINUE
:
3474 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3480 pi
->partial_sdu_len
+= skb
->len
;
3481 if (pi
->partial_sdu_len
> pi
->sdu_len
)
3484 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3489 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3495 if (!(pi
->conn_state
& L2CAP_CONN_SAR_RETRY
)) {
3496 pi
->partial_sdu_len
+= skb
->len
;
3498 if (pi
->partial_sdu_len
> pi
->imtu
)
3501 if (pi
->partial_sdu_len
!= pi
->sdu_len
)
3504 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3507 _skb
= skb_clone(pi
->sdu
, GFP_ATOMIC
);
3509 pi
->conn_state
|= L2CAP_CONN_SAR_RETRY
;
3513 err
= sock_queue_rcv_skb(sk
, _skb
);
3516 pi
->conn_state
|= L2CAP_CONN_SAR_RETRY
;
3520 pi
->conn_state
&= ~L2CAP_CONN_SAR_RETRY
;
3521 pi
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
3535 l2cap_send_disconn_req(pi
->conn
, sk
);
3540 static void l2cap_busy_work(struct work_struct
*work
)
3542 DECLARE_WAITQUEUE(wait
, current
);
3543 struct l2cap_pinfo
*pi
=
3544 container_of(work
, struct l2cap_pinfo
, busy_work
);
3545 struct sock
*sk
= (struct sock
*)pi
;
3546 int n_tries
= 0, timeo
= HZ
/5, err
;
3547 struct sk_buff
*skb
;
3552 add_wait_queue(sk_sleep(sk
), &wait
);
3553 while ((skb
= skb_peek(BUSY_QUEUE(sk
)))) {
3554 set_current_state(TASK_INTERRUPTIBLE
);
3556 if (n_tries
++ > L2CAP_LOCAL_BUSY_TRIES
) {
3558 l2cap_send_disconn_req(pi
->conn
, sk
);
3565 if (signal_pending(current
)) {
3566 err
= sock_intr_errno(timeo
);
3571 timeo
= schedule_timeout(timeo
);
3574 err
= sock_error(sk
);
3578 while ((skb
= skb_dequeue(BUSY_QUEUE(sk
)))) {
3579 control
= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
3580 err
= l2cap_ertm_reassembly_sdu(sk
, skb
, control
);
3582 skb_queue_head(BUSY_QUEUE(sk
), skb
);
3586 pi
->buffer_seq
= (pi
->buffer_seq
+ 1) % 64;
3593 if (!(pi
->conn_state
& L2CAP_CONN_RNR_SENT
))
3596 control
= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3597 control
|= L2CAP_SUPER_RCV_READY
| L2CAP_CTRL_POLL
;
3598 l2cap_send_sframe(pi
, control
);
3599 l2cap_pi(sk
)->retry_count
= 1;
3601 del_timer(&pi
->retrans_timer
);
3602 __mod_monitor_timer();
3604 l2cap_pi(sk
)->conn_state
|= L2CAP_CONN_WAIT_F
;
3607 pi
->conn_state
&= ~L2CAP_CONN_LOCAL_BUSY
;
3608 pi
->conn_state
&= ~L2CAP_CONN_RNR_SENT
;
3610 set_current_state(TASK_RUNNING
);
3611 remove_wait_queue(sk_sleep(sk
), &wait
);
3616 static int l2cap_push_rx_skb(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3618 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3621 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
3622 bt_cb(skb
)->sar
= control
>> L2CAP_CTRL_SAR_SHIFT
;
3623 __skb_queue_tail(BUSY_QUEUE(sk
), skb
);
3627 err
= l2cap_ertm_reassembly_sdu(sk
, skb
, control
);
3629 pi
->buffer_seq
= (pi
->buffer_seq
+ 1) % 64;
3633 /* Busy Condition */
3634 pi
->conn_state
|= L2CAP_CONN_LOCAL_BUSY
;
3635 bt_cb(skb
)->sar
= control
>> L2CAP_CTRL_SAR_SHIFT
;
3636 __skb_queue_tail(BUSY_QUEUE(sk
), skb
);
3638 sctrl
= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3639 sctrl
|= L2CAP_SUPER_RCV_NOT_READY
;
3640 l2cap_send_sframe(pi
, sctrl
);
3642 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
3644 queue_work(_busy_wq
, &pi
->busy_work
);
3649 static int l2cap_streaming_reassembly_sdu(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3651 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3652 struct sk_buff
*_skb
;
3656 * TODO: We have to notify the userland if some data is lost with the
3660 switch (control
& L2CAP_CTRL_SAR
) {
3661 case L2CAP_SDU_UNSEGMENTED
:
3662 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3667 err
= sock_queue_rcv_skb(sk
, skb
);
3673 case L2CAP_SDU_START
:
3674 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3679 pi
->sdu_len
= get_unaligned_le16(skb
->data
);
3682 if (pi
->sdu_len
> pi
->imtu
) {
3687 pi
->sdu
= bt_skb_alloc(pi
->sdu_len
, GFP_ATOMIC
);
3693 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3695 pi
->conn_state
|= L2CAP_CONN_SAR_SDU
;
3696 pi
->partial_sdu_len
= skb
->len
;
3700 case L2CAP_SDU_CONTINUE
:
3701 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3704 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3706 pi
->partial_sdu_len
+= skb
->len
;
3707 if (pi
->partial_sdu_len
> pi
->sdu_len
)
3715 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3718 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3720 pi
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
3721 pi
->partial_sdu_len
+= skb
->len
;
3723 if (pi
->partial_sdu_len
> pi
->imtu
)
3726 if (pi
->partial_sdu_len
== pi
->sdu_len
) {
3727 _skb
= skb_clone(pi
->sdu
, GFP_ATOMIC
);
3728 err
= sock_queue_rcv_skb(sk
, _skb
);
3743 static void l2cap_check_srej_gap(struct sock
*sk
, u8 tx_seq
)
3745 struct sk_buff
*skb
;
3748 while ((skb
= skb_peek(SREJ_QUEUE(sk
)))) {
3749 if (bt_cb(skb
)->tx_seq
!= tx_seq
)
3752 skb
= skb_dequeue(SREJ_QUEUE(sk
));
3753 control
= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
3754 l2cap_ertm_reassembly_sdu(sk
, skb
, control
);
3755 l2cap_pi(sk
)->buffer_seq_srej
=
3756 (l2cap_pi(sk
)->buffer_seq_srej
+ 1) % 64;
3761 static void l2cap_resend_srejframe(struct sock
*sk
, u8 tx_seq
)
3763 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3764 struct srej_list
*l
, *tmp
;
3767 list_for_each_entry_safe(l
, tmp
, SREJ_LIST(sk
), list
) {
3768 if (l
->tx_seq
== tx_seq
) {
3773 control
= L2CAP_SUPER_SELECT_REJECT
;
3774 control
|= l
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3775 l2cap_send_sframe(pi
, control
);
3777 list_add_tail(&l
->list
, SREJ_LIST(sk
));
3781 static void l2cap_send_srejframe(struct sock
*sk
, u8 tx_seq
)
3783 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3784 struct srej_list
*new;
3787 while (tx_seq
!= pi
->expected_tx_seq
) {
3788 control
= L2CAP_SUPER_SELECT_REJECT
;
3789 control
|= pi
->expected_tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3790 l2cap_send_sframe(pi
, control
);
3792 new = kzalloc(sizeof(struct srej_list
), GFP_ATOMIC
);
3793 new->tx_seq
= pi
->expected_tx_seq
++;
3794 list_add_tail(&new->list
, SREJ_LIST(sk
));
3796 pi
->expected_tx_seq
++;
3799 static inline int l2cap_data_channel_iframe(struct sock
*sk
, u16 rx_control
, struct sk_buff
*skb
)
3801 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3802 u8 tx_seq
= __get_txseq(rx_control
);
3803 u8 req_seq
= __get_reqseq(rx_control
);
3804 u8 sar
= rx_control
>> L2CAP_CTRL_SAR_SHIFT
;
3805 u8 tx_seq_offset
, expected_tx_seq_offset
;
3806 int num_to_ack
= (pi
->tx_win
/6) + 1;
3809 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk
, rx_control
, skb
->len
);
3811 if (L2CAP_CTRL_FINAL
& rx_control
&&
3812 l2cap_pi(sk
)->conn_state
& L2CAP_CONN_WAIT_F
) {
3813 del_timer(&pi
->monitor_timer
);
3814 if (pi
->unacked_frames
> 0)
3815 __mod_retrans_timer();
3816 pi
->conn_state
&= ~L2CAP_CONN_WAIT_F
;
3819 pi
->expected_ack_seq
= req_seq
;
3820 l2cap_drop_acked_frames(sk
);
3822 if (tx_seq
== pi
->expected_tx_seq
)
3825 tx_seq_offset
= (tx_seq
- pi
->buffer_seq
) % 64;
3826 if (tx_seq_offset
< 0)
3827 tx_seq_offset
+= 64;
3829 /* invalid tx_seq */
3830 if (tx_seq_offset
>= pi
->tx_win
) {
3831 l2cap_send_disconn_req(pi
->conn
, sk
);
3835 if (pi
->conn_state
== L2CAP_CONN_LOCAL_BUSY
)
3838 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3839 struct srej_list
*first
;
3841 first
= list_first_entry(SREJ_LIST(sk
),
3842 struct srej_list
, list
);
3843 if (tx_seq
== first
->tx_seq
) {
3844 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3845 l2cap_check_srej_gap(sk
, tx_seq
);
3847 list_del(&first
->list
);
3850 if (list_empty(SREJ_LIST(sk
))) {
3851 pi
->buffer_seq
= pi
->buffer_seq_srej
;
3852 pi
->conn_state
&= ~L2CAP_CONN_SREJ_SENT
;
3856 struct srej_list
*l
;
3858 /* duplicated tx_seq */
3859 if (l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
) < 0)
3862 list_for_each_entry(l
, SREJ_LIST(sk
), list
) {
3863 if (l
->tx_seq
== tx_seq
) {
3864 l2cap_resend_srejframe(sk
, tx_seq
);
3868 l2cap_send_srejframe(sk
, tx_seq
);
3871 expected_tx_seq_offset
=
3872 (pi
->expected_tx_seq
- pi
->buffer_seq
) % 64;
3873 if (expected_tx_seq_offset
< 0)
3874 expected_tx_seq_offset
+= 64;
3876 /* duplicated tx_seq */
3877 if (tx_seq_offset
< expected_tx_seq_offset
)
3880 pi
->conn_state
|= L2CAP_CONN_SREJ_SENT
;
3882 INIT_LIST_HEAD(SREJ_LIST(sk
));
3883 pi
->buffer_seq_srej
= pi
->buffer_seq
;
3885 __skb_queue_head_init(SREJ_QUEUE(sk
));
3886 __skb_queue_head_init(BUSY_QUEUE(sk
));
3887 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3889 pi
->conn_state
|= L2CAP_CONN_SEND_PBIT
;
3891 l2cap_send_srejframe(sk
, tx_seq
);
3896 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3898 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3899 bt_cb(skb
)->tx_seq
= tx_seq
;
3900 bt_cb(skb
)->sar
= sar
;
3901 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3905 if (rx_control
& L2CAP_CTRL_FINAL
) {
3906 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
3907 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3909 l2cap_retransmit_frames(sk
);
3912 err
= l2cap_push_rx_skb(sk
, skb
, rx_control
);
3918 pi
->num_acked
= (pi
->num_acked
+ 1) % num_to_ack
;
3919 if (pi
->num_acked
== num_to_ack
- 1)
3929 static inline void l2cap_data_channel_rrframe(struct sock
*sk
, u16 rx_control
)
3931 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3933 pi
->expected_ack_seq
= __get_reqseq(rx_control
);
3934 l2cap_drop_acked_frames(sk
);
3936 if (rx_control
& L2CAP_CTRL_POLL
) {
3937 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3938 if ((pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
3939 (pi
->unacked_frames
> 0))
3940 __mod_retrans_timer();
3942 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3943 l2cap_send_srejtail(sk
);
3945 l2cap_send_i_or_rr_or_rnr(sk
);
3948 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
3949 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3951 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
3952 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3954 l2cap_retransmit_frames(sk
);
3957 if ((pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
3958 (pi
->unacked_frames
> 0))
3959 __mod_retrans_timer();
3961 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3962 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3965 spin_lock_bh(&pi
->send_lock
);
3966 l2cap_ertm_send(sk
);
3967 spin_unlock_bh(&pi
->send_lock
);
3972 static inline void l2cap_data_channel_rejframe(struct sock
*sk
, u16 rx_control
)
3974 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3975 u8 tx_seq
= __get_reqseq(rx_control
);
3977 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3979 pi
->expected_ack_seq
= tx_seq
;
3980 l2cap_drop_acked_frames(sk
);
3982 if (rx_control
& L2CAP_CTRL_FINAL
) {
3983 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
3984 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3986 l2cap_retransmit_frames(sk
);
3988 l2cap_retransmit_frames(sk
);
3990 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
)
3991 pi
->conn_state
|= L2CAP_CONN_REJ_ACT
;
3994 static inline void l2cap_data_channel_srejframe(struct sock
*sk
, u16 rx_control
)
3996 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3997 u8 tx_seq
= __get_reqseq(rx_control
);
3999 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
4001 if (rx_control
& L2CAP_CTRL_POLL
) {
4002 pi
->expected_ack_seq
= tx_seq
;
4003 l2cap_drop_acked_frames(sk
);
4004 l2cap_retransmit_one_frame(sk
, tx_seq
);
4006 spin_lock_bh(&pi
->send_lock
);
4007 l2cap_ertm_send(sk
);
4008 spin_unlock_bh(&pi
->send_lock
);
4010 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
4011 pi
->srej_save_reqseq
= tx_seq
;
4012 pi
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
4014 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
4015 if ((pi
->conn_state
& L2CAP_CONN_SREJ_ACT
) &&
4016 pi
->srej_save_reqseq
== tx_seq
)
4017 pi
->conn_state
&= ~L2CAP_CONN_SREJ_ACT
;
4019 l2cap_retransmit_one_frame(sk
, tx_seq
);
4021 l2cap_retransmit_one_frame(sk
, tx_seq
);
4022 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
4023 pi
->srej_save_reqseq
= tx_seq
;
4024 pi
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
4029 static inline void l2cap_data_channel_rnrframe(struct sock
*sk
, u16 rx_control
)
4031 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4032 u8 tx_seq
= __get_reqseq(rx_control
);
4034 pi
->conn_state
|= L2CAP_CONN_REMOTE_BUSY
;
4035 pi
->expected_ack_seq
= tx_seq
;
4036 l2cap_drop_acked_frames(sk
);
4038 if (!(pi
->conn_state
& L2CAP_CONN_SREJ_SENT
)) {
4039 del_timer(&pi
->retrans_timer
);
4040 if (rx_control
& L2CAP_CTRL_POLL
)
4041 l2cap_send_rr_or_rnr(pi
, L2CAP_CTRL_FINAL
);
4045 if (rx_control
& L2CAP_CTRL_POLL
)
4046 l2cap_send_srejtail(sk
);
4048 l2cap_send_sframe(pi
, L2CAP_SUPER_RCV_READY
);
4051 static inline int l2cap_data_channel_sframe(struct sock
*sk
, u16 rx_control
, struct sk_buff
*skb
)
4053 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk
, rx_control
, skb
->len
);
4055 if (L2CAP_CTRL_FINAL
& rx_control
&&
4056 l2cap_pi(sk
)->conn_state
& L2CAP_CONN_WAIT_F
) {
4057 del_timer(&l2cap_pi(sk
)->monitor_timer
);
4058 if (l2cap_pi(sk
)->unacked_frames
> 0)
4059 __mod_retrans_timer();
4060 l2cap_pi(sk
)->conn_state
&= ~L2CAP_CONN_WAIT_F
;
4063 switch (rx_control
& L2CAP_CTRL_SUPERVISE
) {
4064 case L2CAP_SUPER_RCV_READY
:
4065 l2cap_data_channel_rrframe(sk
, rx_control
);
4068 case L2CAP_SUPER_REJECT
:
4069 l2cap_data_channel_rejframe(sk
, rx_control
);
4072 case L2CAP_SUPER_SELECT_REJECT
:
4073 l2cap_data_channel_srejframe(sk
, rx_control
);
4076 case L2CAP_SUPER_RCV_NOT_READY
:
4077 l2cap_data_channel_rnrframe(sk
, rx_control
);
4085 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
4088 struct l2cap_pinfo
*pi
;
4090 u8 tx_seq
, req_seq
, next_tx_seq_offset
, req_seq_offset
;
4092 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, cid
);
4094 BT_DBG("unknown cid 0x%4.4x", cid
);
4100 BT_DBG("sk %p, len %d", sk
, skb
->len
);
4102 if (sk
->sk_state
!= BT_CONNECTED
)
4106 case L2CAP_MODE_BASIC
:
4107 /* If socket recv buffers overflows we drop data here
4108 * which is *bad* because L2CAP has to be reliable.
4109 * But we don't have any other choice. L2CAP doesn't
4110 * provide flow control mechanism. */
4112 if (pi
->imtu
< skb
->len
)
4115 if (!sock_queue_rcv_skb(sk
, skb
))
4119 case L2CAP_MODE_ERTM
:
4120 control
= get_unaligned_le16(skb
->data
);
4124 if (__is_sar_start(control
))
4127 if (pi
->fcs
== L2CAP_FCS_CRC16
)
4131 * We can just drop the corrupted I-frame here.
4132 * Receiver will miss it and start proper recovery
4133 * procedures and ask retransmission.
4135 if (len
> pi
->mps
) {
4136 l2cap_send_disconn_req(pi
->conn
, sk
);
4140 if (l2cap_check_fcs(pi
, skb
))
4143 req_seq
= __get_reqseq(control
);
4144 req_seq_offset
= (req_seq
- pi
->expected_ack_seq
) % 64;
4145 if (req_seq_offset
< 0)
4146 req_seq_offset
+= 64;
4148 next_tx_seq_offset
=
4149 (pi
->next_tx_seq
- pi
->expected_ack_seq
) % 64;
4150 if (next_tx_seq_offset
< 0)
4151 next_tx_seq_offset
+= 64;
4153 /* check for invalid req-seq */
4154 if (req_seq_offset
> next_tx_seq_offset
) {
4155 l2cap_send_disconn_req(pi
->conn
, sk
);
4159 if (__is_iframe(control
)) {
4161 l2cap_send_disconn_req(pi
->conn
, sk
);
4165 l2cap_data_channel_iframe(sk
, control
, skb
);
4168 l2cap_send_disconn_req(pi
->conn
, sk
);
4172 l2cap_data_channel_sframe(sk
, control
, skb
);
4177 case L2CAP_MODE_STREAMING
:
4178 control
= get_unaligned_le16(skb
->data
);
4182 if (__is_sar_start(control
))
4185 if (pi
->fcs
== L2CAP_FCS_CRC16
)
4188 if (len
> pi
->mps
|| len
< 4 || __is_sframe(control
))
4191 if (l2cap_check_fcs(pi
, skb
))
4194 tx_seq
= __get_txseq(control
);
4196 if (pi
->expected_tx_seq
== tx_seq
)
4197 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
4199 pi
->expected_tx_seq
= (tx_seq
+ 1) % 64;
4201 l2cap_streaming_reassembly_sdu(sk
, skb
, control
);
4206 BT_DBG("sk %p: bad mode 0x%2.2x", sk
, pi
->mode
);
4220 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
4224 sk
= l2cap_get_sock_by_psm(0, psm
, conn
->src
);
4228 BT_DBG("sk %p, len %d", sk
, skb
->len
);
4230 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_CONNECTED
)
4233 if (l2cap_pi(sk
)->imtu
< skb
->len
)
4236 if (!sock_queue_rcv_skb(sk
, skb
))
4248 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
4250 struct l2cap_hdr
*lh
= (void *) skb
->data
;
4254 skb_pull(skb
, L2CAP_HDR_SIZE
);
4255 cid
= __le16_to_cpu(lh
->cid
);
4256 len
= __le16_to_cpu(lh
->len
);
4258 if (len
!= skb
->len
) {
4263 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
4266 case L2CAP_CID_SIGNALING
:
4267 l2cap_sig_channel(conn
, skb
);
4270 case L2CAP_CID_CONN_LESS
:
4271 psm
= get_unaligned_le16(skb
->data
);
4273 l2cap_conless_channel(conn
, psm
, skb
);
4277 l2cap_data_channel(conn
, cid
, skb
);
4282 /* ---- L2CAP interface with lower layer (HCI) ---- */
4284 static int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
4286 int exact
= 0, lm1
= 0, lm2
= 0;
4287 register struct sock
*sk
;
4288 struct hlist_node
*node
;
4290 if (type
!= ACL_LINK
)
4293 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
4295 /* Find listening sockets and check their link_mode */
4296 read_lock(&l2cap_sk_list
.lock
);
4297 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
4298 if (sk
->sk_state
!= BT_LISTEN
)
4301 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
4302 lm1
|= HCI_LM_ACCEPT
;
4303 if (l2cap_pi(sk
)->role_switch
)
4304 lm1
|= HCI_LM_MASTER
;
4306 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
4307 lm2
|= HCI_LM_ACCEPT
;
4308 if (l2cap_pi(sk
)->role_switch
)
4309 lm2
|= HCI_LM_MASTER
;
4312 read_unlock(&l2cap_sk_list
.lock
);
4314 return exact
? lm1
: lm2
;
4317 static int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
4319 struct l2cap_conn
*conn
;
4321 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
4323 if (hcon
->type
!= ACL_LINK
)
4327 conn
= l2cap_conn_add(hcon
, status
);
4329 l2cap_conn_ready(conn
);
4331 l2cap_conn_del(hcon
, bt_err(status
));
4336 static int l2cap_disconn_ind(struct hci_conn
*hcon
)
4338 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4340 BT_DBG("hcon %p", hcon
);
4342 if (hcon
->type
!= ACL_LINK
|| !conn
)
4345 return conn
->disc_reason
;
4348 static int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
4350 BT_DBG("hcon %p reason %d", hcon
, reason
);
4352 if (hcon
->type
!= ACL_LINK
)
4355 l2cap_conn_del(hcon
, bt_err(reason
));
4360 static inline void l2cap_check_encryption(struct sock
*sk
, u8 encrypt
)
4362 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_STREAM
)
4365 if (encrypt
== 0x00) {
4366 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
) {
4367 l2cap_sock_clear_timer(sk
);
4368 l2cap_sock_set_timer(sk
, HZ
* 5);
4369 } else if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
4370 __l2cap_sock_close(sk
, ECONNREFUSED
);
4372 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
)
4373 l2cap_sock_clear_timer(sk
);
4377 static int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
4379 struct l2cap_chan_list
*l
;
4380 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4386 l
= &conn
->chan_list
;
4388 BT_DBG("conn %p", conn
);
4390 read_lock(&l
->lock
);
4392 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
4395 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_CONNECT_PEND
) {
4400 if (!status
&& (sk
->sk_state
== BT_CONNECTED
||
4401 sk
->sk_state
== BT_CONFIG
)) {
4402 l2cap_check_encryption(sk
, encrypt
);
4407 if (sk
->sk_state
== BT_CONNECT
) {
4409 struct l2cap_conn_req req
;
4410 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
4411 req
.psm
= l2cap_pi(sk
)->psm
;
4413 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
4414 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
4416 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
4417 L2CAP_CONN_REQ
, sizeof(req
), &req
);
4419 l2cap_sock_clear_timer(sk
);
4420 l2cap_sock_set_timer(sk
, HZ
/ 10);
4422 } else if (sk
->sk_state
== BT_CONNECT2
) {
4423 struct l2cap_conn_rsp rsp
;
4427 sk
->sk_state
= BT_CONFIG
;
4428 result
= L2CAP_CR_SUCCESS
;
4430 sk
->sk_state
= BT_DISCONN
;
4431 l2cap_sock_set_timer(sk
, HZ
/ 10);
4432 result
= L2CAP_CR_SEC_BLOCK
;
4435 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
4436 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
4437 rsp
.result
= cpu_to_le16(result
);
4438 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4439 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
4440 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
4446 read_unlock(&l
->lock
);
4451 static int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
4453 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4455 if (!conn
&& !(conn
= l2cap_conn_add(hcon
, 0)))
4458 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
4460 if (flags
& ACL_START
) {
4461 struct l2cap_hdr
*hdr
;
4465 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
4466 kfree_skb(conn
->rx_skb
);
4467 conn
->rx_skb
= NULL
;
4469 l2cap_conn_unreliable(conn
, ECOMM
);
4473 BT_ERR("Frame is too short (len %d)", skb
->len
);
4474 l2cap_conn_unreliable(conn
, ECOMM
);
4478 hdr
= (struct l2cap_hdr
*) skb
->data
;
4479 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
4481 if (len
== skb
->len
) {
4482 /* Complete frame received */
4483 l2cap_recv_frame(conn
, skb
);
4487 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
4489 if (skb
->len
> len
) {
4490 BT_ERR("Frame is too long (len %d, expected len %d)",
4492 l2cap_conn_unreliable(conn
, ECOMM
);
4496 /* Allocate skb for the complete frame (with header) */
4497 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
4501 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4503 conn
->rx_len
= len
- skb
->len
;
4505 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
4507 if (!conn
->rx_len
) {
4508 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
4509 l2cap_conn_unreliable(conn
, ECOMM
);
4513 if (skb
->len
> conn
->rx_len
) {
4514 BT_ERR("Fragment is too long (len %d, expected %d)",
4515 skb
->len
, conn
->rx_len
);
4516 kfree_skb(conn
->rx_skb
);
4517 conn
->rx_skb
= NULL
;
4519 l2cap_conn_unreliable(conn
, ECOMM
);
4523 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4525 conn
->rx_len
-= skb
->len
;
4527 if (!conn
->rx_len
) {
4528 /* Complete frame received */
4529 l2cap_recv_frame(conn
, conn
->rx_skb
);
4530 conn
->rx_skb
= NULL
;
4539 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
4542 struct hlist_node
*node
;
4544 read_lock_bh(&l2cap_sk_list
.lock
);
4546 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
4547 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4549 seq_printf(f
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4550 batostr(&bt_sk(sk
)->src
),
4551 batostr(&bt_sk(sk
)->dst
),
4552 sk
->sk_state
, __le16_to_cpu(pi
->psm
),
4554 pi
->imtu
, pi
->omtu
, pi
->sec_level
);
4557 read_unlock_bh(&l2cap_sk_list
.lock
);
4562 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
4564 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
4567 static const struct file_operations l2cap_debugfs_fops
= {
4568 .open
= l2cap_debugfs_open
,
4570 .llseek
= seq_lseek
,
4571 .release
= single_release
,
4574 static struct dentry
*l2cap_debugfs
;
4576 static const struct proto_ops l2cap_sock_ops
= {
4577 .family
= PF_BLUETOOTH
,
4578 .owner
= THIS_MODULE
,
4579 .release
= l2cap_sock_release
,
4580 .bind
= l2cap_sock_bind
,
4581 .connect
= l2cap_sock_connect
,
4582 .listen
= l2cap_sock_listen
,
4583 .accept
= l2cap_sock_accept
,
4584 .getname
= l2cap_sock_getname
,
4585 .sendmsg
= l2cap_sock_sendmsg
,
4586 .recvmsg
= l2cap_sock_recvmsg
,
4587 .poll
= bt_sock_poll
,
4588 .ioctl
= bt_sock_ioctl
,
4589 .mmap
= sock_no_mmap
,
4590 .socketpair
= sock_no_socketpair
,
4591 .shutdown
= l2cap_sock_shutdown
,
4592 .setsockopt
= l2cap_sock_setsockopt
,
4593 .getsockopt
= l2cap_sock_getsockopt
4596 static const struct net_proto_family l2cap_sock_family_ops
= {
4597 .family
= PF_BLUETOOTH
,
4598 .owner
= THIS_MODULE
,
4599 .create
= l2cap_sock_create
,
4602 static struct hci_proto l2cap_hci_proto
= {
4604 .id
= HCI_PROTO_L2CAP
,
4605 .connect_ind
= l2cap_connect_ind
,
4606 .connect_cfm
= l2cap_connect_cfm
,
4607 .disconn_ind
= l2cap_disconn_ind
,
4608 .disconn_cfm
= l2cap_disconn_cfm
,
4609 .security_cfm
= l2cap_security_cfm
,
4610 .recv_acldata
= l2cap_recv_acldata
4613 static int __init
l2cap_init(void)
4617 err
= proto_register(&l2cap_proto
, 0);
4621 _busy_wq
= create_singlethread_workqueue("l2cap");
4625 err
= bt_sock_register(BTPROTO_L2CAP
, &l2cap_sock_family_ops
);
4627 BT_ERR("L2CAP socket registration failed");
4631 err
= hci_register_proto(&l2cap_hci_proto
);
4633 BT_ERR("L2CAP protocol registration failed");
4634 bt_sock_unregister(BTPROTO_L2CAP
);
4639 l2cap_debugfs
= debugfs_create_file("l2cap", 0444,
4640 bt_debugfs
, NULL
, &l2cap_debugfs_fops
);
4642 BT_ERR("Failed to create L2CAP debug file");
4645 BT_INFO("L2CAP ver %s", VERSION
);
4646 BT_INFO("L2CAP socket layer initialized");
4651 proto_unregister(&l2cap_proto
);
4655 static void __exit
l2cap_exit(void)
4657 debugfs_remove(l2cap_debugfs
);
4659 flush_workqueue(_busy_wq
);
4660 destroy_workqueue(_busy_wq
);
4662 if (bt_sock_unregister(BTPROTO_L2CAP
) < 0)
4663 BT_ERR("L2CAP socket unregistration failed");
4665 if (hci_unregister_proto(&l2cap_hci_proto
) < 0)
4666 BT_ERR("L2CAP protocol unregistration failed");
4668 proto_unregister(&l2cap_proto
);
4671 void l2cap_load(void)
4673 /* Dummy function to trigger automatic L2CAP module loading by
4674 * other modules that use L2CAP sockets but don't use any other
4675 * symbols from it. */
4677 EXPORT_SYMBOL(l2cap_load
);
4679 module_init(l2cap_init
);
4680 module_exit(l2cap_exit
);
4682 module_param(enable_ertm
, bool, 0644);
4683 MODULE_PARM_DESC(enable_ertm
, "Enable enhanced retransmission mode");
4685 module_param(max_transmit
, uint
, 0644);
4686 MODULE_PARM_DESC(max_transmit
, "Max transmit value (default = 3)");
4688 module_param(tx_window
, uint
, 0644);
4689 MODULE_PARM_DESC(tx_window
, "Transmission window size value (default = 63)");
4691 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4692 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION
);
4693 MODULE_VERSION(VERSION
);
4694 MODULE_LICENSE("GPL");
4695 MODULE_ALIAS("bt-proto-0");