2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 #ifdef CONFIG_BT_L2CAP_EXT_FEATURES
59 static int enable_ertm
= 1;
61 static int enable_ertm
= 0;
63 static int max_transmit
= L2CAP_DEFAULT_MAX_TX
;
64 static int tx_window
= L2CAP_DEFAULT_TX_WINDOW
;
66 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
67 static u8 l2cap_fixed_chan
[8] = { 0x02, };
69 static const struct proto_ops l2cap_sock_ops
;
71 static struct bt_sock_list l2cap_sk_list
= {
72 .lock
= __RW_LOCK_UNLOCKED(l2cap_sk_list
.lock
)
75 static void __l2cap_sock_close(struct sock
*sk
, int reason
);
76 static void l2cap_sock_close(struct sock
*sk
);
77 static void l2cap_sock_kill(struct sock
*sk
);
79 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
80 u8 code
, u8 ident
, u16 dlen
, void *data
);
82 /* ---- L2CAP timers ---- */
83 static void l2cap_sock_timeout(unsigned long arg
)
85 struct sock
*sk
= (struct sock
*) arg
;
88 BT_DBG("sock %p state %d", sk
, sk
->sk_state
);
92 if (sk
->sk_state
== BT_CONNECTED
|| sk
->sk_state
== BT_CONFIG
)
93 reason
= ECONNREFUSED
;
94 else if (sk
->sk_state
== BT_CONNECT
&&
95 l2cap_pi(sk
)->sec_level
!= BT_SECURITY_SDP
)
96 reason
= ECONNREFUSED
;
100 __l2cap_sock_close(sk
, reason
);
108 static void l2cap_sock_set_timer(struct sock
*sk
, long timeout
)
110 BT_DBG("sk %p state %d timeout %ld", sk
, sk
->sk_state
, timeout
);
111 sk_reset_timer(sk
, &sk
->sk_timer
, jiffies
+ timeout
);
114 static void l2cap_sock_clear_timer(struct sock
*sk
)
116 BT_DBG("sock %p state %d", sk
, sk
->sk_state
);
117 sk_stop_timer(sk
, &sk
->sk_timer
);
120 /* ---- L2CAP channels ---- */
121 static struct sock
*__l2cap_get_chan_by_dcid(struct l2cap_chan_list
*l
, u16 cid
)
124 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
125 if (l2cap_pi(s
)->dcid
== cid
)
131 static struct sock
*__l2cap_get_chan_by_scid(struct l2cap_chan_list
*l
, u16 cid
)
134 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
135 if (l2cap_pi(s
)->scid
== cid
)
141 /* Find channel with given SCID.
142 * Returns locked socket */
143 static inline struct sock
*l2cap_get_chan_by_scid(struct l2cap_chan_list
*l
, u16 cid
)
147 s
= __l2cap_get_chan_by_scid(l
, cid
);
150 read_unlock(&l
->lock
);
154 static struct sock
*__l2cap_get_chan_by_ident(struct l2cap_chan_list
*l
, u8 ident
)
157 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
158 if (l2cap_pi(s
)->ident
== ident
)
164 static inline struct sock
*l2cap_get_chan_by_ident(struct l2cap_chan_list
*l
, u8 ident
)
168 s
= __l2cap_get_chan_by_ident(l
, ident
);
171 read_unlock(&l
->lock
);
175 static u16
l2cap_alloc_cid(struct l2cap_chan_list
*l
)
177 u16 cid
= L2CAP_CID_DYN_START
;
179 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
180 if (!__l2cap_get_chan_by_scid(l
, cid
))
187 static inline void __l2cap_chan_link(struct l2cap_chan_list
*l
, struct sock
*sk
)
192 l2cap_pi(l
->head
)->prev_c
= sk
;
194 l2cap_pi(sk
)->next_c
= l
->head
;
195 l2cap_pi(sk
)->prev_c
= NULL
;
199 static inline void l2cap_chan_unlink(struct l2cap_chan_list
*l
, struct sock
*sk
)
201 struct sock
*next
= l2cap_pi(sk
)->next_c
, *prev
= l2cap_pi(sk
)->prev_c
;
203 write_lock_bh(&l
->lock
);
208 l2cap_pi(next
)->prev_c
= prev
;
210 l2cap_pi(prev
)->next_c
= next
;
211 write_unlock_bh(&l
->lock
);
216 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct sock
*sk
, struct sock
*parent
)
218 struct l2cap_chan_list
*l
= &conn
->chan_list
;
220 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
221 l2cap_pi(sk
)->psm
, l2cap_pi(sk
)->dcid
);
223 conn
->disc_reason
= 0x13;
225 l2cap_pi(sk
)->conn
= conn
;
227 if (sk
->sk_type
== SOCK_SEQPACKET
|| sk
->sk_type
== SOCK_STREAM
) {
228 /* Alloc CID for connection-oriented socket */
229 l2cap_pi(sk
)->scid
= l2cap_alloc_cid(l
);
230 } else if (sk
->sk_type
== SOCK_DGRAM
) {
231 /* Connectionless socket */
232 l2cap_pi(sk
)->scid
= L2CAP_CID_CONN_LESS
;
233 l2cap_pi(sk
)->dcid
= L2CAP_CID_CONN_LESS
;
234 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
236 /* Raw socket can send/recv signalling messages only */
237 l2cap_pi(sk
)->scid
= L2CAP_CID_SIGNALING
;
238 l2cap_pi(sk
)->dcid
= L2CAP_CID_SIGNALING
;
239 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
242 __l2cap_chan_link(l
, sk
);
245 bt_accept_enqueue(parent
, sk
);
249 * Must be called on the locked socket. */
250 static void l2cap_chan_del(struct sock
*sk
, int err
)
252 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
253 struct sock
*parent
= bt_sk(sk
)->parent
;
255 l2cap_sock_clear_timer(sk
);
257 BT_DBG("sk %p, conn %p, err %d", sk
, conn
, err
);
260 /* Unlink from channel list */
261 l2cap_chan_unlink(&conn
->chan_list
, sk
);
262 l2cap_pi(sk
)->conn
= NULL
;
263 hci_conn_put(conn
->hcon
);
266 sk
->sk_state
= BT_CLOSED
;
267 sock_set_flag(sk
, SOCK_ZAPPED
);
273 bt_accept_unlink(sk
);
274 parent
->sk_data_ready(parent
, 0);
276 sk
->sk_state_change(sk
);
279 /* Service level security */
280 static inline int l2cap_check_security(struct sock
*sk
)
282 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
285 if (l2cap_pi(sk
)->psm
== cpu_to_le16(0x0001)) {
286 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
287 auth_type
= HCI_AT_NO_BONDING_MITM
;
289 auth_type
= HCI_AT_NO_BONDING
;
291 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_LOW
)
292 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
294 switch (l2cap_pi(sk
)->sec_level
) {
295 case BT_SECURITY_HIGH
:
296 auth_type
= HCI_AT_GENERAL_BONDING_MITM
;
298 case BT_SECURITY_MEDIUM
:
299 auth_type
= HCI_AT_GENERAL_BONDING
;
302 auth_type
= HCI_AT_NO_BONDING
;
307 return hci_conn_security(conn
->hcon
, l2cap_pi(sk
)->sec_level
,
311 static inline u8
l2cap_get_ident(struct l2cap_conn
*conn
)
315 /* Get next available identificator.
316 * 1 - 128 are used by kernel.
317 * 129 - 199 are reserved.
318 * 200 - 254 are used by utilities like l2ping, etc.
321 spin_lock_bh(&conn
->lock
);
323 if (++conn
->tx_ident
> 128)
328 spin_unlock_bh(&conn
->lock
);
333 static inline void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
335 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
337 BT_DBG("code 0x%2.2x", code
);
342 hci_send_acl(conn
->hcon
, skb
, 0);
345 static inline void l2cap_send_sframe(struct l2cap_pinfo
*pi
, u16 control
)
348 struct l2cap_hdr
*lh
;
349 struct l2cap_conn
*conn
= pi
->conn
;
350 int count
, hlen
= L2CAP_HDR_SIZE
+ 2;
352 if (pi
->fcs
== L2CAP_FCS_CRC16
)
355 BT_DBG("pi %p, control 0x%2.2x", pi
, control
);
357 count
= min_t(unsigned int, conn
->mtu
, hlen
);
358 control
|= L2CAP_CTRL_FRAME_TYPE
;
360 if (pi
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
361 control
|= L2CAP_CTRL_FINAL
;
362 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
365 if (pi
->conn_state
& L2CAP_CONN_SEND_PBIT
) {
366 control
|= L2CAP_CTRL_POLL
;
367 pi
->conn_state
&= ~L2CAP_CONN_SEND_PBIT
;
370 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
374 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
375 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
376 lh
->cid
= cpu_to_le16(pi
->dcid
);
377 put_unaligned_le16(control
, skb_put(skb
, 2));
379 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
380 u16 fcs
= crc16(0, (u8
*)lh
, count
- 2);
381 put_unaligned_le16(fcs
, skb_put(skb
, 2));
384 hci_send_acl(pi
->conn
->hcon
, skb
, 0);
387 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo
*pi
, u16 control
)
389 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
)
390 control
|= L2CAP_SUPER_RCV_NOT_READY
;
392 control
|= L2CAP_SUPER_RCV_READY
;
394 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
396 l2cap_send_sframe(pi
, control
);
399 static void l2cap_do_start(struct sock
*sk
)
401 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
403 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
404 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
407 if (l2cap_check_security(sk
)) {
408 struct l2cap_conn_req req
;
409 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
410 req
.psm
= l2cap_pi(sk
)->psm
;
412 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
414 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
415 L2CAP_CONN_REQ
, sizeof(req
), &req
);
418 struct l2cap_info_req req
;
419 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
421 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
422 conn
->info_ident
= l2cap_get_ident(conn
);
424 mod_timer(&conn
->info_timer
, jiffies
+
425 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
427 l2cap_send_cmd(conn
, conn
->info_ident
,
428 L2CAP_INFO_REQ
, sizeof(req
), &req
);
432 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct sock
*sk
)
434 struct l2cap_disconn_req req
;
436 req
.dcid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
437 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
438 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
439 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
442 /* ---- L2CAP connections ---- */
443 static void l2cap_conn_start(struct l2cap_conn
*conn
)
445 struct l2cap_chan_list
*l
= &conn
->chan_list
;
448 BT_DBG("conn %p", conn
);
452 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
455 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
456 sk
->sk_type
!= SOCK_STREAM
) {
461 if (sk
->sk_state
== BT_CONNECT
) {
462 if (l2cap_check_security(sk
)) {
463 struct l2cap_conn_req req
;
464 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
465 req
.psm
= l2cap_pi(sk
)->psm
;
467 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
469 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
470 L2CAP_CONN_REQ
, sizeof(req
), &req
);
472 } else if (sk
->sk_state
== BT_CONNECT2
) {
473 struct l2cap_conn_rsp rsp
;
474 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
475 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
477 if (l2cap_check_security(sk
)) {
478 if (bt_sk(sk
)->defer_setup
) {
479 struct sock
*parent
= bt_sk(sk
)->parent
;
480 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
481 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
482 parent
->sk_data_ready(parent
, 0);
485 sk
->sk_state
= BT_CONFIG
;
486 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
487 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
490 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
491 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
494 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
495 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
501 read_unlock(&l
->lock
);
504 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
506 struct l2cap_chan_list
*l
= &conn
->chan_list
;
509 BT_DBG("conn %p", conn
);
513 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
516 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
517 sk
->sk_type
!= SOCK_STREAM
) {
518 l2cap_sock_clear_timer(sk
);
519 sk
->sk_state
= BT_CONNECTED
;
520 sk
->sk_state_change(sk
);
521 } else if (sk
->sk_state
== BT_CONNECT
)
527 read_unlock(&l
->lock
);
530 /* Notify sockets that we cannot guaranty reliability anymore */
531 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
533 struct l2cap_chan_list
*l
= &conn
->chan_list
;
536 BT_DBG("conn %p", conn
);
540 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
541 if (l2cap_pi(sk
)->force_reliable
)
545 read_unlock(&l
->lock
);
548 static void l2cap_info_timeout(unsigned long arg
)
550 struct l2cap_conn
*conn
= (void *) arg
;
552 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
553 conn
->info_ident
= 0;
555 l2cap_conn_start(conn
);
558 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
560 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
565 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
569 hcon
->l2cap_data
= conn
;
572 BT_DBG("hcon %p conn %p", hcon
, conn
);
574 conn
->mtu
= hcon
->hdev
->acl_mtu
;
575 conn
->src
= &hcon
->hdev
->bdaddr
;
576 conn
->dst
= &hcon
->dst
;
580 spin_lock_init(&conn
->lock
);
581 rwlock_init(&conn
->chan_list
.lock
);
583 setup_timer(&conn
->info_timer
, l2cap_info_timeout
,
584 (unsigned long) conn
);
586 conn
->disc_reason
= 0x13;
591 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
593 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
599 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
601 kfree_skb(conn
->rx_skb
);
604 while ((sk
= conn
->chan_list
.head
)) {
606 l2cap_chan_del(sk
, err
);
611 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
612 del_timer_sync(&conn
->info_timer
);
614 hcon
->l2cap_data
= NULL
;
618 static inline void l2cap_chan_add(struct l2cap_conn
*conn
, struct sock
*sk
, struct sock
*parent
)
620 struct l2cap_chan_list
*l
= &conn
->chan_list
;
621 write_lock_bh(&l
->lock
);
622 __l2cap_chan_add(conn
, sk
, parent
);
623 write_unlock_bh(&l
->lock
);
626 /* ---- Socket interface ---- */
627 static struct sock
*__l2cap_get_sock_by_addr(__le16 psm
, bdaddr_t
*src
)
630 struct hlist_node
*node
;
631 sk_for_each(sk
, node
, &l2cap_sk_list
.head
)
632 if (l2cap_pi(sk
)->sport
== psm
&& !bacmp(&bt_sk(sk
)->src
, src
))
639 /* Find socket with psm and source bdaddr.
640 * Returns closest match.
642 static struct sock
*__l2cap_get_sock_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
644 struct sock
*sk
= NULL
, *sk1
= NULL
;
645 struct hlist_node
*node
;
647 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
648 if (state
&& sk
->sk_state
!= state
)
651 if (l2cap_pi(sk
)->psm
== psm
) {
653 if (!bacmp(&bt_sk(sk
)->src
, src
))
657 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
661 return node
? sk
: sk1
;
664 /* Find socket with given address (psm, src).
665 * Returns locked socket */
666 static inline struct sock
*l2cap_get_sock_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
669 read_lock(&l2cap_sk_list
.lock
);
670 s
= __l2cap_get_sock_by_psm(state
, psm
, src
);
673 read_unlock(&l2cap_sk_list
.lock
);
677 static void l2cap_sock_destruct(struct sock
*sk
)
681 skb_queue_purge(&sk
->sk_receive_queue
);
682 skb_queue_purge(&sk
->sk_write_queue
);
685 static void l2cap_sock_cleanup_listen(struct sock
*parent
)
689 BT_DBG("parent %p", parent
);
691 /* Close not yet accepted channels */
692 while ((sk
= bt_accept_dequeue(parent
, NULL
)))
693 l2cap_sock_close(sk
);
695 parent
->sk_state
= BT_CLOSED
;
696 sock_set_flag(parent
, SOCK_ZAPPED
);
699 /* Kill socket (only if zapped and orphan)
700 * Must be called on unlocked socket.
702 static void l2cap_sock_kill(struct sock
*sk
)
704 if (!sock_flag(sk
, SOCK_ZAPPED
) || sk
->sk_socket
)
707 BT_DBG("sk %p state %d", sk
, sk
->sk_state
);
709 /* Kill poor orphan */
710 bt_sock_unlink(&l2cap_sk_list
, sk
);
711 sock_set_flag(sk
, SOCK_DEAD
);
715 static void __l2cap_sock_close(struct sock
*sk
, int reason
)
717 BT_DBG("sk %p state %d socket %p", sk
, sk
->sk_state
, sk
->sk_socket
);
719 switch (sk
->sk_state
) {
721 l2cap_sock_cleanup_listen(sk
);
726 if (sk
->sk_type
== SOCK_SEQPACKET
||
727 sk
->sk_type
== SOCK_STREAM
) {
728 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
730 sk
->sk_state
= BT_DISCONN
;
731 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
732 l2cap_send_disconn_req(conn
, sk
);
734 l2cap_chan_del(sk
, reason
);
738 if (sk
->sk_type
== SOCK_SEQPACKET
||
739 sk
->sk_type
== SOCK_STREAM
) {
740 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
741 struct l2cap_conn_rsp rsp
;
744 if (bt_sk(sk
)->defer_setup
)
745 result
= L2CAP_CR_SEC_BLOCK
;
747 result
= L2CAP_CR_BAD_PSM
;
749 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
750 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
751 rsp
.result
= cpu_to_le16(result
);
752 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
753 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
754 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
756 l2cap_chan_del(sk
, reason
);
761 l2cap_chan_del(sk
, reason
);
765 sock_set_flag(sk
, SOCK_ZAPPED
);
770 /* Must be called on unlocked socket. */
771 static void l2cap_sock_close(struct sock
*sk
)
773 l2cap_sock_clear_timer(sk
);
775 __l2cap_sock_close(sk
, ECONNRESET
);
780 static void l2cap_sock_init(struct sock
*sk
, struct sock
*parent
)
782 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
787 sk
->sk_type
= parent
->sk_type
;
788 bt_sk(sk
)->defer_setup
= bt_sk(parent
)->defer_setup
;
790 pi
->imtu
= l2cap_pi(parent
)->imtu
;
791 pi
->omtu
= l2cap_pi(parent
)->omtu
;
792 pi
->mode
= l2cap_pi(parent
)->mode
;
793 pi
->fcs
= l2cap_pi(parent
)->fcs
;
794 pi
->max_tx
= l2cap_pi(parent
)->max_tx
;
795 pi
->tx_win
= l2cap_pi(parent
)->tx_win
;
796 pi
->sec_level
= l2cap_pi(parent
)->sec_level
;
797 pi
->role_switch
= l2cap_pi(parent
)->role_switch
;
798 pi
->force_reliable
= l2cap_pi(parent
)->force_reliable
;
800 pi
->imtu
= L2CAP_DEFAULT_MTU
;
802 if (enable_ertm
&& sk
->sk_type
== SOCK_STREAM
)
803 pi
->mode
= L2CAP_MODE_ERTM
;
805 pi
->mode
= L2CAP_MODE_BASIC
;
806 pi
->max_tx
= max_transmit
;
807 pi
->fcs
= L2CAP_FCS_CRC16
;
808 pi
->tx_win
= tx_window
;
809 pi
->sec_level
= BT_SECURITY_LOW
;
811 pi
->force_reliable
= 0;
814 /* Default config options */
816 pi
->flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
817 skb_queue_head_init(TX_QUEUE(sk
));
818 skb_queue_head_init(SREJ_QUEUE(sk
));
819 INIT_LIST_HEAD(SREJ_LIST(sk
));
822 static struct proto l2cap_proto
= {
824 .owner
= THIS_MODULE
,
825 .obj_size
= sizeof(struct l2cap_pinfo
)
828 static struct sock
*l2cap_sock_alloc(struct net
*net
, struct socket
*sock
, int proto
, gfp_t prio
)
832 sk
= sk_alloc(net
, PF_BLUETOOTH
, prio
, &l2cap_proto
);
836 sock_init_data(sock
, sk
);
837 INIT_LIST_HEAD(&bt_sk(sk
)->accept_q
);
839 sk
->sk_destruct
= l2cap_sock_destruct
;
840 sk
->sk_sndtimeo
= msecs_to_jiffies(L2CAP_CONN_TIMEOUT
);
842 sock_reset_flag(sk
, SOCK_ZAPPED
);
844 sk
->sk_protocol
= proto
;
845 sk
->sk_state
= BT_OPEN
;
847 setup_timer(&sk
->sk_timer
, l2cap_sock_timeout
, (unsigned long) sk
);
849 bt_sock_link(&l2cap_sk_list
, sk
);
853 static int l2cap_sock_create(struct net
*net
, struct socket
*sock
, int protocol
,
858 BT_DBG("sock %p", sock
);
860 sock
->state
= SS_UNCONNECTED
;
862 if (sock
->type
!= SOCK_SEQPACKET
&& sock
->type
!= SOCK_STREAM
&&
863 sock
->type
!= SOCK_DGRAM
&& sock
->type
!= SOCK_RAW
)
864 return -ESOCKTNOSUPPORT
;
866 if (sock
->type
== SOCK_RAW
&& !kern
&& !capable(CAP_NET_RAW
))
869 sock
->ops
= &l2cap_sock_ops
;
871 sk
= l2cap_sock_alloc(net
, sock
, protocol
, GFP_ATOMIC
);
875 l2cap_sock_init(sk
, NULL
);
879 static int l2cap_sock_bind(struct socket
*sock
, struct sockaddr
*addr
, int alen
)
881 struct sock
*sk
= sock
->sk
;
882 struct sockaddr_l2 la
;
887 if (!addr
|| addr
->sa_family
!= AF_BLUETOOTH
)
890 memset(&la
, 0, sizeof(la
));
891 len
= min_t(unsigned int, sizeof(la
), alen
);
892 memcpy(&la
, addr
, len
);
899 if (sk
->sk_state
!= BT_OPEN
) {
904 if (la
.l2_psm
&& __le16_to_cpu(la
.l2_psm
) < 0x1001 &&
905 !capable(CAP_NET_BIND_SERVICE
)) {
910 write_lock_bh(&l2cap_sk_list
.lock
);
912 if (la
.l2_psm
&& __l2cap_get_sock_by_addr(la
.l2_psm
, &la
.l2_bdaddr
)) {
915 /* Save source address */
916 bacpy(&bt_sk(sk
)->src
, &la
.l2_bdaddr
);
917 l2cap_pi(sk
)->psm
= la
.l2_psm
;
918 l2cap_pi(sk
)->sport
= la
.l2_psm
;
919 sk
->sk_state
= BT_BOUND
;
921 if (__le16_to_cpu(la
.l2_psm
) == 0x0001 ||
922 __le16_to_cpu(la
.l2_psm
) == 0x0003)
923 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
926 write_unlock_bh(&l2cap_sk_list
.lock
);
933 static int l2cap_do_connect(struct sock
*sk
)
935 bdaddr_t
*src
= &bt_sk(sk
)->src
;
936 bdaddr_t
*dst
= &bt_sk(sk
)->dst
;
937 struct l2cap_conn
*conn
;
938 struct hci_conn
*hcon
;
939 struct hci_dev
*hdev
;
943 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src
), batostr(dst
),
946 hdev
= hci_get_route(dst
, src
);
948 return -EHOSTUNREACH
;
950 hci_dev_lock_bh(hdev
);
954 if (sk
->sk_type
== SOCK_RAW
) {
955 switch (l2cap_pi(sk
)->sec_level
) {
956 case BT_SECURITY_HIGH
:
957 auth_type
= HCI_AT_DEDICATED_BONDING_MITM
;
959 case BT_SECURITY_MEDIUM
:
960 auth_type
= HCI_AT_DEDICATED_BONDING
;
963 auth_type
= HCI_AT_NO_BONDING
;
966 } else if (l2cap_pi(sk
)->psm
== cpu_to_le16(0x0001)) {
967 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
968 auth_type
= HCI_AT_NO_BONDING_MITM
;
970 auth_type
= HCI_AT_NO_BONDING
;
972 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_LOW
)
973 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
975 switch (l2cap_pi(sk
)->sec_level
) {
976 case BT_SECURITY_HIGH
:
977 auth_type
= HCI_AT_GENERAL_BONDING_MITM
;
979 case BT_SECURITY_MEDIUM
:
980 auth_type
= HCI_AT_GENERAL_BONDING
;
983 auth_type
= HCI_AT_NO_BONDING
;
988 hcon
= hci_connect(hdev
, ACL_LINK
, dst
,
989 l2cap_pi(sk
)->sec_level
, auth_type
);
993 conn
= l2cap_conn_add(hcon
, 0);
1001 /* Update source addr of the socket */
1002 bacpy(src
, conn
->src
);
1004 l2cap_chan_add(conn
, sk
, NULL
);
1006 sk
->sk_state
= BT_CONNECT
;
1007 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
1009 if (hcon
->state
== BT_CONNECTED
) {
1010 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
1011 sk
->sk_type
!= SOCK_STREAM
) {
1012 l2cap_sock_clear_timer(sk
);
1013 sk
->sk_state
= BT_CONNECTED
;
1019 hci_dev_unlock_bh(hdev
);
1024 static int l2cap_sock_connect(struct socket
*sock
, struct sockaddr
*addr
, int alen
, int flags
)
1026 struct sock
*sk
= sock
->sk
;
1027 struct sockaddr_l2 la
;
1030 BT_DBG("sk %p", sk
);
1032 if (!addr
|| alen
< sizeof(addr
->sa_family
) ||
1033 addr
->sa_family
!= AF_BLUETOOTH
)
1036 memset(&la
, 0, sizeof(la
));
1037 len
= min_t(unsigned int, sizeof(la
), alen
);
1038 memcpy(&la
, addr
, len
);
1045 if ((sk
->sk_type
== SOCK_SEQPACKET
|| sk
->sk_type
== SOCK_STREAM
)
1051 switch (l2cap_pi(sk
)->mode
) {
1052 case L2CAP_MODE_BASIC
:
1054 case L2CAP_MODE_ERTM
:
1055 case L2CAP_MODE_STREAMING
:
1064 switch (sk
->sk_state
) {
1068 /* Already connecting */
1072 /* Already connected */
1085 /* Set destination address and psm */
1086 bacpy(&bt_sk(sk
)->dst
, &la
.l2_bdaddr
);
1087 l2cap_pi(sk
)->psm
= la
.l2_psm
;
1089 err
= l2cap_do_connect(sk
);
1094 err
= bt_sock_wait_state(sk
, BT_CONNECTED
,
1095 sock_sndtimeo(sk
, flags
& O_NONBLOCK
));
1101 static int l2cap_sock_listen(struct socket
*sock
, int backlog
)
1103 struct sock
*sk
= sock
->sk
;
1106 BT_DBG("sk %p backlog %d", sk
, backlog
);
1110 if ((sock
->type
!= SOCK_SEQPACKET
&& sock
->type
!= SOCK_STREAM
)
1111 || sk
->sk_state
!= BT_BOUND
) {
1116 switch (l2cap_pi(sk
)->mode
) {
1117 case L2CAP_MODE_BASIC
:
1119 case L2CAP_MODE_ERTM
:
1120 case L2CAP_MODE_STREAMING
:
1129 if (!l2cap_pi(sk
)->psm
) {
1130 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1135 write_lock_bh(&l2cap_sk_list
.lock
);
1137 for (psm
= 0x1001; psm
< 0x1100; psm
+= 2)
1138 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm
), src
)) {
1139 l2cap_pi(sk
)->psm
= cpu_to_le16(psm
);
1140 l2cap_pi(sk
)->sport
= cpu_to_le16(psm
);
1145 write_unlock_bh(&l2cap_sk_list
.lock
);
1151 sk
->sk_max_ack_backlog
= backlog
;
1152 sk
->sk_ack_backlog
= 0;
1153 sk
->sk_state
= BT_LISTEN
;
1160 static int l2cap_sock_accept(struct socket
*sock
, struct socket
*newsock
, int flags
)
1162 DECLARE_WAITQUEUE(wait
, current
);
1163 struct sock
*sk
= sock
->sk
, *nsk
;
1167 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
1169 if (sk
->sk_state
!= BT_LISTEN
) {
1174 timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
1176 BT_DBG("sk %p timeo %ld", sk
, timeo
);
1178 /* Wait for an incoming connection. (wake-one). */
1179 add_wait_queue_exclusive(sk_sleep(sk
), &wait
);
1180 while (!(nsk
= bt_accept_dequeue(sk
, newsock
))) {
1181 set_current_state(TASK_INTERRUPTIBLE
);
1188 timeo
= schedule_timeout(timeo
);
1189 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
1191 if (sk
->sk_state
!= BT_LISTEN
) {
1196 if (signal_pending(current
)) {
1197 err
= sock_intr_errno(timeo
);
1201 set_current_state(TASK_RUNNING
);
1202 remove_wait_queue(sk_sleep(sk
), &wait
);
1207 newsock
->state
= SS_CONNECTED
;
1209 BT_DBG("new socket %p", nsk
);
1216 static int l2cap_sock_getname(struct socket
*sock
, struct sockaddr
*addr
, int *len
, int peer
)
1218 struct sockaddr_l2
*la
= (struct sockaddr_l2
*) addr
;
1219 struct sock
*sk
= sock
->sk
;
1221 BT_DBG("sock %p, sk %p", sock
, sk
);
1223 addr
->sa_family
= AF_BLUETOOTH
;
1224 *len
= sizeof(struct sockaddr_l2
);
1227 la
->l2_psm
= l2cap_pi(sk
)->psm
;
1228 bacpy(&la
->l2_bdaddr
, &bt_sk(sk
)->dst
);
1229 la
->l2_cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1231 la
->l2_psm
= l2cap_pi(sk
)->sport
;
1232 bacpy(&la
->l2_bdaddr
, &bt_sk(sk
)->src
);
1233 la
->l2_cid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
1239 static void l2cap_monitor_timeout(unsigned long arg
)
1241 struct sock
*sk
= (void *) arg
;
1244 if (l2cap_pi(sk
)->retry_count
>= l2cap_pi(sk
)->remote_max_tx
) {
1245 l2cap_send_disconn_req(l2cap_pi(sk
)->conn
, sk
);
1250 l2cap_pi(sk
)->retry_count
++;
1251 __mod_monitor_timer();
1253 l2cap_send_rr_or_rnr(l2cap_pi(sk
), L2CAP_CTRL_POLL
);
1257 static void l2cap_retrans_timeout(unsigned long arg
)
1259 struct sock
*sk
= (void *) arg
;
1262 l2cap_pi(sk
)->retry_count
= 1;
1263 __mod_monitor_timer();
1265 l2cap_pi(sk
)->conn_state
|= L2CAP_CONN_WAIT_F
;
1267 l2cap_send_rr_or_rnr(l2cap_pi(sk
), L2CAP_CTRL_POLL
);
1271 static void l2cap_drop_acked_frames(struct sock
*sk
)
1273 struct sk_buff
*skb
;
1275 while ((skb
= skb_peek(TX_QUEUE(sk
))) &&
1276 l2cap_pi(sk
)->unacked_frames
) {
1277 if (bt_cb(skb
)->tx_seq
== l2cap_pi(sk
)->expected_ack_seq
)
1280 skb
= skb_dequeue(TX_QUEUE(sk
));
1283 l2cap_pi(sk
)->unacked_frames
--;
1286 if (!l2cap_pi(sk
)->unacked_frames
)
1287 del_timer(&l2cap_pi(sk
)->retrans_timer
);
1292 static inline void l2cap_do_send(struct sock
*sk
, struct sk_buff
*skb
)
1294 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1296 BT_DBG("sk %p, skb %p len %d", sk
, skb
, skb
->len
);
1298 hci_send_acl(pi
->conn
->hcon
, skb
, 0);
1301 static int l2cap_streaming_send(struct sock
*sk
)
1303 struct sk_buff
*skb
, *tx_skb
;
1304 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1307 while ((skb
= sk
->sk_send_head
)) {
1308 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1310 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1311 control
|= pi
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
1312 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1314 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1315 fcs
= crc16(0, (u8
*)tx_skb
->data
, tx_skb
->len
- 2);
1316 put_unaligned_le16(fcs
, tx_skb
->data
+ tx_skb
->len
- 2);
1319 l2cap_do_send(sk
, tx_skb
);
1321 pi
->next_tx_seq
= (pi
->next_tx_seq
+ 1) % 64;
1323 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1324 sk
->sk_send_head
= NULL
;
1326 sk
->sk_send_head
= skb_queue_next(TX_QUEUE(sk
), skb
);
1328 skb
= skb_dequeue(TX_QUEUE(sk
));
1334 static void l2cap_retransmit_frame(struct sock
*sk
, u8 tx_seq
)
1336 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1337 struct sk_buff
*skb
, *tx_skb
;
1340 skb
= skb_peek(TX_QUEUE(sk
));
1345 if (bt_cb(skb
)->tx_seq
== tx_seq
)
1348 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1351 } while ((skb
= skb_queue_next(TX_QUEUE(sk
), skb
)));
1353 if (pi
->remote_max_tx
&&
1354 bt_cb(skb
)->retries
== pi
->remote_max_tx
) {
1355 l2cap_send_disconn_req(pi
->conn
, sk
);
1359 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1360 bt_cb(skb
)->retries
++;
1361 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1362 control
|= (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1363 | (tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1364 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1366 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1367 fcs
= crc16(0, (u8
*)tx_skb
->data
, tx_skb
->len
- 2);
1368 put_unaligned_le16(fcs
, tx_skb
->data
+ tx_skb
->len
- 2);
1371 l2cap_do_send(sk
, tx_skb
);
1374 static int l2cap_ertm_send(struct sock
*sk
)
1376 struct sk_buff
*skb
, *tx_skb
;
1377 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1381 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
)
1384 while ((skb
= sk
->sk_send_head
) && (!l2cap_tx_window_full(sk
)) &&
1385 !(pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
)) {
1387 if (pi
->remote_max_tx
&&
1388 bt_cb(skb
)->retries
== pi
->remote_max_tx
) {
1389 l2cap_send_disconn_req(pi
->conn
, sk
);
1393 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1395 bt_cb(skb
)->retries
++;
1397 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1398 if (pi
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
1399 control
|= L2CAP_CTRL_FINAL
;
1400 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
1402 control
|= (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1403 | (pi
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1404 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1407 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1408 fcs
= crc16(0, (u8
*)skb
->data
, tx_skb
->len
- 2);
1409 put_unaligned_le16(fcs
, skb
->data
+ tx_skb
->len
- 2);
1412 l2cap_do_send(sk
, tx_skb
);
1414 __mod_retrans_timer();
1416 bt_cb(skb
)->tx_seq
= pi
->next_tx_seq
;
1417 pi
->next_tx_seq
= (pi
->next_tx_seq
+ 1) % 64;
1419 pi
->unacked_frames
++;
1422 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1423 sk
->sk_send_head
= NULL
;
1425 sk
->sk_send_head
= skb_queue_next(TX_QUEUE(sk
), skb
);
1433 static void l2cap_send_ack(struct l2cap_pinfo
*pi
)
1435 struct sock
*sk
= (struct sock
*)pi
;
1438 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1440 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
1441 control
|= L2CAP_SUPER_RCV_NOT_READY
;
1442 l2cap_send_sframe(pi
, control
);
1444 } else if (l2cap_ertm_send(sk
) == 0) {
1445 control
|= L2CAP_SUPER_RCV_READY
;
1446 l2cap_send_sframe(pi
, control
);
1450 static void l2cap_send_srejtail(struct sock
*sk
)
1452 struct srej_list
*tail
;
1455 control
= L2CAP_SUPER_SELECT_REJECT
;
1456 control
|= L2CAP_CTRL_FINAL
;
1458 tail
= list_entry(SREJ_LIST(sk
)->prev
, struct srej_list
, list
);
1459 control
|= tail
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1461 l2cap_send_sframe(l2cap_pi(sk
), control
);
1464 static inline int l2cap_skbuff_fromiovec(struct sock
*sk
, struct msghdr
*msg
, int len
, int count
, struct sk_buff
*skb
)
1466 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1467 struct sk_buff
**frag
;
1470 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1476 /* Continuation fragments (no L2CAP header) */
1477 frag
= &skb_shinfo(skb
)->frag_list
;
1479 count
= min_t(unsigned int, conn
->mtu
, len
);
1481 *frag
= bt_skb_send_alloc(sk
, count
, msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1484 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1490 frag
= &(*frag
)->next
;
1496 static struct sk_buff
*l2cap_create_connless_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1498 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1499 struct sk_buff
*skb
;
1500 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1501 struct l2cap_hdr
*lh
;
1503 BT_DBG("sk %p len %d", sk
, (int)len
);
1505 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1506 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1507 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1509 return ERR_PTR(-ENOMEM
);
1511 /* Create L2CAP header */
1512 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1513 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1514 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1515 put_unaligned_le16(l2cap_pi(sk
)->psm
, skb_put(skb
, 2));
1517 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1518 if (unlikely(err
< 0)) {
1520 return ERR_PTR(err
);
1525 static struct sk_buff
*l2cap_create_basic_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1527 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1528 struct sk_buff
*skb
;
1529 int err
, count
, hlen
= L2CAP_HDR_SIZE
;
1530 struct l2cap_hdr
*lh
;
1532 BT_DBG("sk %p len %d", sk
, (int)len
);
1534 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1535 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1536 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1538 return ERR_PTR(-ENOMEM
);
1540 /* Create L2CAP header */
1541 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1542 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1543 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1545 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1546 if (unlikely(err
< 0)) {
1548 return ERR_PTR(err
);
1553 static struct sk_buff
*l2cap_create_iframe_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
, u16 control
, u16 sdulen
)
1555 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1556 struct sk_buff
*skb
;
1557 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1558 struct l2cap_hdr
*lh
;
1560 BT_DBG("sk %p len %d", sk
, (int)len
);
1563 return ERR_PTR(-ENOTCONN
);
1568 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1571 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1572 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1573 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1575 return ERR_PTR(-ENOMEM
);
1577 /* Create L2CAP header */
1578 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1579 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1580 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1581 put_unaligned_le16(control
, skb_put(skb
, 2));
1583 put_unaligned_le16(sdulen
, skb_put(skb
, 2));
1585 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1586 if (unlikely(err
< 0)) {
1588 return ERR_PTR(err
);
1591 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1592 put_unaligned_le16(0, skb_put(skb
, 2));
1594 bt_cb(skb
)->retries
= 0;
1598 static inline int l2cap_sar_segment_sdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1600 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1601 struct sk_buff
*skb
;
1602 struct sk_buff_head sar_queue
;
1606 skb_queue_head_init(&sar_queue
);
1607 control
= L2CAP_SDU_START
;
1608 skb
= l2cap_create_iframe_pdu(sk
, msg
, pi
->remote_mps
, control
, len
);
1610 return PTR_ERR(skb
);
1612 __skb_queue_tail(&sar_queue
, skb
);
1613 len
-= pi
->remote_mps
;
1614 size
+= pi
->remote_mps
;
1619 if (len
> pi
->remote_mps
) {
1620 control
= L2CAP_SDU_CONTINUE
;
1621 buflen
= pi
->remote_mps
;
1623 control
= L2CAP_SDU_END
;
1627 skb
= l2cap_create_iframe_pdu(sk
, msg
, buflen
, control
, 0);
1629 skb_queue_purge(&sar_queue
);
1630 return PTR_ERR(skb
);
1633 __skb_queue_tail(&sar_queue
, skb
);
1637 skb_queue_splice_tail(&sar_queue
, TX_QUEUE(sk
));
1638 if (sk
->sk_send_head
== NULL
)
1639 sk
->sk_send_head
= sar_queue
.next
;
1644 static int l2cap_sock_sendmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
, size_t len
)
1646 struct sock
*sk
= sock
->sk
;
1647 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1648 struct sk_buff
*skb
;
1652 BT_DBG("sock %p, sk %p", sock
, sk
);
1654 err
= sock_error(sk
);
1658 if (msg
->msg_flags
& MSG_OOB
)
1663 if (sk
->sk_state
!= BT_CONNECTED
) {
1668 /* Connectionless channel */
1669 if (sk
->sk_type
== SOCK_DGRAM
) {
1670 skb
= l2cap_create_connless_pdu(sk
, msg
, len
);
1674 l2cap_do_send(sk
, skb
);
1681 case L2CAP_MODE_BASIC
:
1682 /* Check outgoing MTU */
1683 if (len
> pi
->omtu
) {
1688 /* Create a basic PDU */
1689 skb
= l2cap_create_basic_pdu(sk
, msg
, len
);
1695 l2cap_do_send(sk
, skb
);
1699 case L2CAP_MODE_ERTM
:
1700 case L2CAP_MODE_STREAMING
:
1701 /* Entire SDU fits into one PDU */
1702 if (len
<= pi
->remote_mps
) {
1703 control
= L2CAP_SDU_UNSEGMENTED
;
1704 skb
= l2cap_create_iframe_pdu(sk
, msg
, len
, control
, 0);
1709 __skb_queue_tail(TX_QUEUE(sk
), skb
);
1710 if (sk
->sk_send_head
== NULL
)
1711 sk
->sk_send_head
= skb
;
1713 /* Segment SDU into multiples PDUs */
1714 err
= l2cap_sar_segment_sdu(sk
, msg
, len
);
1719 if (pi
->mode
== L2CAP_MODE_STREAMING
)
1720 err
= l2cap_streaming_send(sk
);
1722 err
= l2cap_ertm_send(sk
);
1729 BT_DBG("bad state %1.1x", pi
->mode
);
1738 static int l2cap_sock_recvmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
, size_t len
, int flags
)
1740 struct sock
*sk
= sock
->sk
;
1744 if (sk
->sk_state
== BT_CONNECT2
&& bt_sk(sk
)->defer_setup
) {
1745 struct l2cap_conn_rsp rsp
;
1747 sk
->sk_state
= BT_CONFIG
;
1749 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1750 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
1751 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
1752 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
1753 l2cap_send_cmd(l2cap_pi(sk
)->conn
, l2cap_pi(sk
)->ident
,
1754 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
1762 return bt_sock_recvmsg(iocb
, sock
, msg
, len
, flags
);
1765 static int l2cap_sock_setsockopt_old(struct socket
*sock
, int optname
, char __user
*optval
, unsigned int optlen
)
1767 struct sock
*sk
= sock
->sk
;
1768 struct l2cap_options opts
;
1772 BT_DBG("sk %p", sk
);
1778 opts
.imtu
= l2cap_pi(sk
)->imtu
;
1779 opts
.omtu
= l2cap_pi(sk
)->omtu
;
1780 opts
.flush_to
= l2cap_pi(sk
)->flush_to
;
1781 opts
.mode
= l2cap_pi(sk
)->mode
;
1782 opts
.fcs
= l2cap_pi(sk
)->fcs
;
1783 opts
.max_tx
= l2cap_pi(sk
)->max_tx
;
1784 opts
.txwin_size
= (__u16
)l2cap_pi(sk
)->tx_win
;
1786 len
= min_t(unsigned int, sizeof(opts
), optlen
);
1787 if (copy_from_user((char *) &opts
, optval
, len
)) {
1792 l2cap_pi(sk
)->mode
= opts
.mode
;
1793 switch (l2cap_pi(sk
)->mode
) {
1794 case L2CAP_MODE_BASIC
:
1796 case L2CAP_MODE_ERTM
:
1797 case L2CAP_MODE_STREAMING
:
1806 l2cap_pi(sk
)->imtu
= opts
.imtu
;
1807 l2cap_pi(sk
)->omtu
= opts
.omtu
;
1808 l2cap_pi(sk
)->fcs
= opts
.fcs
;
1809 l2cap_pi(sk
)->max_tx
= opts
.max_tx
;
1810 l2cap_pi(sk
)->tx_win
= (__u8
)opts
.txwin_size
;
1814 if (get_user(opt
, (u32 __user
*) optval
)) {
1819 if (opt
& L2CAP_LM_AUTH
)
1820 l2cap_pi(sk
)->sec_level
= BT_SECURITY_LOW
;
1821 if (opt
& L2CAP_LM_ENCRYPT
)
1822 l2cap_pi(sk
)->sec_level
= BT_SECURITY_MEDIUM
;
1823 if (opt
& L2CAP_LM_SECURE
)
1824 l2cap_pi(sk
)->sec_level
= BT_SECURITY_HIGH
;
1826 l2cap_pi(sk
)->role_switch
= (opt
& L2CAP_LM_MASTER
);
1827 l2cap_pi(sk
)->force_reliable
= (opt
& L2CAP_LM_RELIABLE
);
1839 static int l2cap_sock_setsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, unsigned int optlen
)
1841 struct sock
*sk
= sock
->sk
;
1842 struct bt_security sec
;
1846 BT_DBG("sk %p", sk
);
1848 if (level
== SOL_L2CAP
)
1849 return l2cap_sock_setsockopt_old(sock
, optname
, optval
, optlen
);
1851 if (level
!= SOL_BLUETOOTH
)
1852 return -ENOPROTOOPT
;
1858 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_STREAM
1859 && sk
->sk_type
!= SOCK_RAW
) {
1864 sec
.level
= BT_SECURITY_LOW
;
1866 len
= min_t(unsigned int, sizeof(sec
), optlen
);
1867 if (copy_from_user((char *) &sec
, optval
, len
)) {
1872 if (sec
.level
< BT_SECURITY_LOW
||
1873 sec
.level
> BT_SECURITY_HIGH
) {
1878 l2cap_pi(sk
)->sec_level
= sec
.level
;
1881 case BT_DEFER_SETUP
:
1882 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_LISTEN
) {
1887 if (get_user(opt
, (u32 __user
*) optval
)) {
1892 bt_sk(sk
)->defer_setup
= opt
;
1904 static int l2cap_sock_getsockopt_old(struct socket
*sock
, int optname
, char __user
*optval
, int __user
*optlen
)
1906 struct sock
*sk
= sock
->sk
;
1907 struct l2cap_options opts
;
1908 struct l2cap_conninfo cinfo
;
1912 BT_DBG("sk %p", sk
);
1914 if (get_user(len
, optlen
))
1921 opts
.imtu
= l2cap_pi(sk
)->imtu
;
1922 opts
.omtu
= l2cap_pi(sk
)->omtu
;
1923 opts
.flush_to
= l2cap_pi(sk
)->flush_to
;
1924 opts
.mode
= l2cap_pi(sk
)->mode
;
1925 opts
.fcs
= l2cap_pi(sk
)->fcs
;
1926 opts
.max_tx
= l2cap_pi(sk
)->max_tx
;
1927 opts
.txwin_size
= (__u16
)l2cap_pi(sk
)->tx_win
;
1929 len
= min_t(unsigned int, len
, sizeof(opts
));
1930 if (copy_to_user(optval
, (char *) &opts
, len
))
1936 switch (l2cap_pi(sk
)->sec_level
) {
1937 case BT_SECURITY_LOW
:
1938 opt
= L2CAP_LM_AUTH
;
1940 case BT_SECURITY_MEDIUM
:
1941 opt
= L2CAP_LM_AUTH
| L2CAP_LM_ENCRYPT
;
1943 case BT_SECURITY_HIGH
:
1944 opt
= L2CAP_LM_AUTH
| L2CAP_LM_ENCRYPT
|
1952 if (l2cap_pi(sk
)->role_switch
)
1953 opt
|= L2CAP_LM_MASTER
;
1955 if (l2cap_pi(sk
)->force_reliable
)
1956 opt
|= L2CAP_LM_RELIABLE
;
1958 if (put_user(opt
, (u32 __user
*) optval
))
1962 case L2CAP_CONNINFO
:
1963 if (sk
->sk_state
!= BT_CONNECTED
&&
1964 !(sk
->sk_state
== BT_CONNECT2
&&
1965 bt_sk(sk
)->defer_setup
)) {
1970 cinfo
.hci_handle
= l2cap_pi(sk
)->conn
->hcon
->handle
;
1971 memcpy(cinfo
.dev_class
, l2cap_pi(sk
)->conn
->hcon
->dev_class
, 3);
1973 len
= min_t(unsigned int, len
, sizeof(cinfo
));
1974 if (copy_to_user(optval
, (char *) &cinfo
, len
))
1988 static int l2cap_sock_getsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, int __user
*optlen
)
1990 struct sock
*sk
= sock
->sk
;
1991 struct bt_security sec
;
1994 BT_DBG("sk %p", sk
);
1996 if (level
== SOL_L2CAP
)
1997 return l2cap_sock_getsockopt_old(sock
, optname
, optval
, optlen
);
1999 if (level
!= SOL_BLUETOOTH
)
2000 return -ENOPROTOOPT
;
2002 if (get_user(len
, optlen
))
2009 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_STREAM
2010 && sk
->sk_type
!= SOCK_RAW
) {
2015 sec
.level
= l2cap_pi(sk
)->sec_level
;
2017 len
= min_t(unsigned int, len
, sizeof(sec
));
2018 if (copy_to_user(optval
, (char *) &sec
, len
))
2023 case BT_DEFER_SETUP
:
2024 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_LISTEN
) {
2029 if (put_user(bt_sk(sk
)->defer_setup
, (u32 __user
*) optval
))
2043 static int l2cap_sock_shutdown(struct socket
*sock
, int how
)
2045 struct sock
*sk
= sock
->sk
;
2048 BT_DBG("sock %p, sk %p", sock
, sk
);
2054 if (!sk
->sk_shutdown
) {
2055 sk
->sk_shutdown
= SHUTDOWN_MASK
;
2056 l2cap_sock_clear_timer(sk
);
2057 __l2cap_sock_close(sk
, 0);
2059 if (sock_flag(sk
, SOCK_LINGER
) && sk
->sk_lingertime
)
2060 err
= bt_sock_wait_state(sk
, BT_CLOSED
,
2067 static int l2cap_sock_release(struct socket
*sock
)
2069 struct sock
*sk
= sock
->sk
;
2072 BT_DBG("sock %p, sk %p", sock
, sk
);
2077 err
= l2cap_sock_shutdown(sock
, 2);
2080 l2cap_sock_kill(sk
);
2084 static void l2cap_chan_ready(struct sock
*sk
)
2086 struct sock
*parent
= bt_sk(sk
)->parent
;
2088 BT_DBG("sk %p, parent %p", sk
, parent
);
2090 l2cap_pi(sk
)->conf_state
= 0;
2091 l2cap_sock_clear_timer(sk
);
2094 /* Outgoing channel.
2095 * Wake up socket sleeping on connect.
2097 sk
->sk_state
= BT_CONNECTED
;
2098 sk
->sk_state_change(sk
);
2100 /* Incoming channel.
2101 * Wake up socket sleeping on accept.
2103 parent
->sk_data_ready(parent
, 0);
2107 /* Copy frame to all raw sockets on that connection */
2108 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2110 struct l2cap_chan_list
*l
= &conn
->chan_list
;
2111 struct sk_buff
*nskb
;
2114 BT_DBG("conn %p", conn
);
2116 read_lock(&l
->lock
);
2117 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
2118 if (sk
->sk_type
!= SOCK_RAW
)
2121 /* Don't send frame to the socket it came from */
2124 nskb
= skb_clone(skb
, GFP_ATOMIC
);
2128 if (sock_queue_rcv_skb(sk
, nskb
))
2131 read_unlock(&l
->lock
);
2134 /* ---- L2CAP signalling commands ---- */
2135 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
2136 u8 code
, u8 ident
, u16 dlen
, void *data
)
2138 struct sk_buff
*skb
, **frag
;
2139 struct l2cap_cmd_hdr
*cmd
;
2140 struct l2cap_hdr
*lh
;
2143 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2144 conn
, code
, ident
, dlen
);
2146 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2147 count
= min_t(unsigned int, conn
->mtu
, len
);
2149 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
2153 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2154 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2155 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
2157 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2160 cmd
->len
= cpu_to_le16(dlen
);
2163 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2164 memcpy(skb_put(skb
, count
), data
, count
);
2170 /* Continuation fragments (no L2CAP header) */
2171 frag
= &skb_shinfo(skb
)->frag_list
;
2173 count
= min_t(unsigned int, conn
->mtu
, len
);
2175 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
2179 memcpy(skb_put(*frag
, count
), data
, count
);
2184 frag
= &(*frag
)->next
;
2194 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
2196 struct l2cap_conf_opt
*opt
= *ptr
;
2199 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2207 *val
= *((u8
*) opt
->val
);
2211 *val
= __le16_to_cpu(*((__le16
*) opt
->val
));
2215 *val
= __le32_to_cpu(*((__le32
*) opt
->val
));
2219 *val
= (unsigned long) opt
->val
;
2223 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
2227 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2229 struct l2cap_conf_opt
*opt
= *ptr
;
2231 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
2238 *((u8
*) opt
->val
) = val
;
2242 *((__le16
*) opt
->val
) = cpu_to_le16(val
);
2246 *((__le32
*) opt
->val
) = cpu_to_le32(val
);
2250 memcpy(opt
->val
, (void *) val
, len
);
2254 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2257 static void l2cap_ack_timeout(unsigned long arg
)
2259 struct sock
*sk
= (void *) arg
;
2262 l2cap_send_ack(l2cap_pi(sk
));
2266 static inline void l2cap_ertm_init(struct sock
*sk
)
2268 l2cap_pi(sk
)->expected_ack_seq
= 0;
2269 l2cap_pi(sk
)->unacked_frames
= 0;
2270 l2cap_pi(sk
)->buffer_seq
= 0;
2271 l2cap_pi(sk
)->num_acked
= 0;
2272 l2cap_pi(sk
)->frames_sent
= 0;
2274 setup_timer(&l2cap_pi(sk
)->retrans_timer
,
2275 l2cap_retrans_timeout
, (unsigned long) sk
);
2276 setup_timer(&l2cap_pi(sk
)->monitor_timer
,
2277 l2cap_monitor_timeout
, (unsigned long) sk
);
2278 setup_timer(&l2cap_pi(sk
)->ack_timer
,
2279 l2cap_ack_timeout
, (unsigned long) sk
);
2281 __skb_queue_head_init(SREJ_QUEUE(sk
));
2284 static int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
2286 u32 local_feat_mask
= l2cap_feat_mask
;
2288 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
2291 case L2CAP_MODE_ERTM
:
2292 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
2293 case L2CAP_MODE_STREAMING
:
2294 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
2300 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
2303 case L2CAP_MODE_STREAMING
:
2304 case L2CAP_MODE_ERTM
:
2305 if (l2cap_mode_supported(mode
, remote_feat_mask
))
2309 return L2CAP_MODE_BASIC
;
2313 static int l2cap_build_conf_req(struct sock
*sk
, void *data
)
2315 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2316 struct l2cap_conf_req
*req
= data
;
2317 struct l2cap_conf_rfc rfc
= { .mode
= pi
->mode
};
2318 void *ptr
= req
->data
;
2320 BT_DBG("sk %p", sk
);
2322 if (pi
->num_conf_req
|| pi
->num_conf_rsp
)
2326 case L2CAP_MODE_STREAMING
:
2327 case L2CAP_MODE_ERTM
:
2328 pi
->conf_state
|= L2CAP_CONF_STATE2_DEVICE
;
2329 if (!l2cap_mode_supported(pi
->mode
, pi
->conn
->feat_mask
))
2330 l2cap_send_disconn_req(pi
->conn
, sk
);
2333 pi
->mode
= l2cap_select_mode(rfc
.mode
, pi
->conn
->feat_mask
);
2339 case L2CAP_MODE_BASIC
:
2340 if (pi
->imtu
!= L2CAP_DEFAULT_MTU
)
2341 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->imtu
);
2344 case L2CAP_MODE_ERTM
:
2345 rfc
.mode
= L2CAP_MODE_ERTM
;
2346 rfc
.txwin_size
= pi
->tx_win
;
2347 rfc
.max_transmit
= pi
->max_tx
;
2348 rfc
.retrans_timeout
= 0;
2349 rfc
.monitor_timeout
= 0;
2350 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
2351 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> pi
->conn
->mtu
- 10)
2352 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
2354 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2355 sizeof(rfc
), (unsigned long) &rfc
);
2357 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2360 if (pi
->fcs
== L2CAP_FCS_NONE
||
2361 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
2362 pi
->fcs
= L2CAP_FCS_NONE
;
2363 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
2367 case L2CAP_MODE_STREAMING
:
2368 rfc
.mode
= L2CAP_MODE_STREAMING
;
2370 rfc
.max_transmit
= 0;
2371 rfc
.retrans_timeout
= 0;
2372 rfc
.monitor_timeout
= 0;
2373 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
2374 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> pi
->conn
->mtu
- 10)
2375 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
2377 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2378 sizeof(rfc
), (unsigned long) &rfc
);
2380 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2383 if (pi
->fcs
== L2CAP_FCS_NONE
||
2384 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
2385 pi
->fcs
= L2CAP_FCS_NONE
;
2386 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
2391 /* FIXME: Need actual value of the flush timeout */
2392 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2393 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2395 req
->dcid
= cpu_to_le16(pi
->dcid
);
2396 req
->flags
= cpu_to_le16(0);
2401 static int l2cap_parse_conf_req(struct sock
*sk
, void *data
)
2403 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2404 struct l2cap_conf_rsp
*rsp
= data
;
2405 void *ptr
= rsp
->data
;
2406 void *req
= pi
->conf_req
;
2407 int len
= pi
->conf_len
;
2408 int type
, hint
, olen
;
2410 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2411 u16 mtu
= L2CAP_DEFAULT_MTU
;
2412 u16 result
= L2CAP_CONF_SUCCESS
;
2414 BT_DBG("sk %p", sk
);
2416 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2417 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
2419 hint
= type
& L2CAP_CONF_HINT
;
2420 type
&= L2CAP_CONF_MASK
;
2423 case L2CAP_CONF_MTU
:
2427 case L2CAP_CONF_FLUSH_TO
:
2431 case L2CAP_CONF_QOS
:
2434 case L2CAP_CONF_RFC
:
2435 if (olen
== sizeof(rfc
))
2436 memcpy(&rfc
, (void *) val
, olen
);
2439 case L2CAP_CONF_FCS
:
2440 if (val
== L2CAP_FCS_NONE
)
2441 pi
->conf_state
|= L2CAP_CONF_NO_FCS_RECV
;
2449 result
= L2CAP_CONF_UNKNOWN
;
2450 *((u8
*) ptr
++) = type
;
2455 if (pi
->num_conf_rsp
|| pi
->num_conf_req
)
2459 case L2CAP_MODE_STREAMING
:
2460 case L2CAP_MODE_ERTM
:
2461 pi
->conf_state
|= L2CAP_CONF_STATE2_DEVICE
;
2462 if (!l2cap_mode_supported(pi
->mode
, pi
->conn
->feat_mask
))
2463 return -ECONNREFUSED
;
2466 pi
->mode
= l2cap_select_mode(rfc
.mode
, pi
->conn
->feat_mask
);
2471 if (pi
->mode
!= rfc
.mode
) {
2472 result
= L2CAP_CONF_UNACCEPT
;
2473 rfc
.mode
= pi
->mode
;
2475 if (pi
->num_conf_rsp
== 1)
2476 return -ECONNREFUSED
;
2478 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2479 sizeof(rfc
), (unsigned long) &rfc
);
2483 if (result
== L2CAP_CONF_SUCCESS
) {
2484 /* Configure output options and let the other side know
2485 * which ones we don't like. */
2487 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
2488 result
= L2CAP_CONF_UNACCEPT
;
2491 pi
->conf_state
|= L2CAP_CONF_MTU_DONE
;
2493 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->omtu
);
2496 case L2CAP_MODE_BASIC
:
2497 pi
->fcs
= L2CAP_FCS_NONE
;
2498 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2501 case L2CAP_MODE_ERTM
:
2502 pi
->remote_tx_win
= rfc
.txwin_size
;
2503 pi
->remote_max_tx
= rfc
.max_transmit
;
2504 if (rfc
.max_pdu_size
> pi
->conn
->mtu
- 10)
2505 rfc
.max_pdu_size
= le16_to_cpu(pi
->conn
->mtu
- 10);
2507 pi
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
2509 rfc
.retrans_timeout
=
2510 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO
);
2511 rfc
.monitor_timeout
=
2512 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO
);
2514 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2516 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2517 sizeof(rfc
), (unsigned long) &rfc
);
2521 case L2CAP_MODE_STREAMING
:
2522 if (rfc
.max_pdu_size
> pi
->conn
->mtu
- 10)
2523 rfc
.max_pdu_size
= le16_to_cpu(pi
->conn
->mtu
- 10);
2525 pi
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
2527 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2529 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2530 sizeof(rfc
), (unsigned long) &rfc
);
2535 result
= L2CAP_CONF_UNACCEPT
;
2537 memset(&rfc
, 0, sizeof(rfc
));
2538 rfc
.mode
= pi
->mode
;
2541 if (result
== L2CAP_CONF_SUCCESS
)
2542 pi
->conf_state
|= L2CAP_CONF_OUTPUT_DONE
;
2544 rsp
->scid
= cpu_to_le16(pi
->dcid
);
2545 rsp
->result
= cpu_to_le16(result
);
2546 rsp
->flags
= cpu_to_le16(0x0000);
2551 static int l2cap_parse_conf_rsp(struct sock
*sk
, void *rsp
, int len
, void *data
, u16
*result
)
2553 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2554 struct l2cap_conf_req
*req
= data
;
2555 void *ptr
= req
->data
;
2558 struct l2cap_conf_rfc rfc
;
2560 BT_DBG("sk %p, rsp %p, len %d, req %p", sk
, rsp
, len
, data
);
2562 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2563 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2566 case L2CAP_CONF_MTU
:
2567 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
2568 *result
= L2CAP_CONF_UNACCEPT
;
2569 pi
->omtu
= L2CAP_DEFAULT_MIN_MTU
;
2572 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->omtu
);
2575 case L2CAP_CONF_FLUSH_TO
:
2577 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
2581 case L2CAP_CONF_RFC
:
2582 if (olen
== sizeof(rfc
))
2583 memcpy(&rfc
, (void *)val
, olen
);
2585 if ((pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
) &&
2586 rfc
.mode
!= pi
->mode
)
2587 return -ECONNREFUSED
;
2589 pi
->mode
= rfc
.mode
;
2592 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2593 sizeof(rfc
), (unsigned long) &rfc
);
2598 if (*result
== L2CAP_CONF_SUCCESS
) {
2600 case L2CAP_MODE_ERTM
:
2601 pi
->remote_tx_win
= rfc
.txwin_size
;
2602 pi
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2603 pi
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2604 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2606 case L2CAP_MODE_STREAMING
:
2607 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2611 req
->dcid
= cpu_to_le16(pi
->dcid
);
2612 req
->flags
= cpu_to_le16(0x0000);
2617 static int l2cap_build_conf_rsp(struct sock
*sk
, void *data
, u16 result
, u16 flags
)
2619 struct l2cap_conf_rsp
*rsp
= data
;
2620 void *ptr
= rsp
->data
;
2622 BT_DBG("sk %p", sk
);
2624 rsp
->scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
2625 rsp
->result
= cpu_to_le16(result
);
2626 rsp
->flags
= cpu_to_le16(flags
);
2631 static void l2cap_conf_rfc_get(struct sock
*sk
, void *rsp
, int len
)
2633 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2636 struct l2cap_conf_rfc rfc
;
2638 BT_DBG("sk %p, rsp %p, len %d", sk
, rsp
, len
);
2640 if ((pi
->mode
!= L2CAP_MODE_ERTM
) && (pi
->mode
!= L2CAP_MODE_STREAMING
))
2643 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2644 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2647 case L2CAP_CONF_RFC
:
2648 if (olen
== sizeof(rfc
))
2649 memcpy(&rfc
, (void *)val
, olen
);
2656 case L2CAP_MODE_ERTM
:
2657 pi
->remote_tx_win
= rfc
.txwin_size
;
2658 pi
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2659 pi
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2660 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2662 case L2CAP_MODE_STREAMING
:
2663 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2667 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2669 struct l2cap_cmd_rej
*rej
= (struct l2cap_cmd_rej
*) data
;
2671 if (rej
->reason
!= 0x0000)
2674 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
2675 cmd
->ident
== conn
->info_ident
) {
2676 del_timer(&conn
->info_timer
);
2678 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2679 conn
->info_ident
= 0;
2681 l2cap_conn_start(conn
);
2687 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2689 struct l2cap_chan_list
*list
= &conn
->chan_list
;
2690 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
2691 struct l2cap_conn_rsp rsp
;
2692 struct sock
*sk
, *parent
;
2693 int result
, status
= L2CAP_CS_NO_INFO
;
2695 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
2696 __le16 psm
= req
->psm
;
2698 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm
, scid
);
2700 /* Check if we have socket listening on psm */
2701 parent
= l2cap_get_sock_by_psm(BT_LISTEN
, psm
, conn
->src
);
2703 result
= L2CAP_CR_BAD_PSM
;
2707 /* Check if the ACL is secure enough (if not SDP) */
2708 if (psm
!= cpu_to_le16(0x0001) &&
2709 !hci_conn_check_link_mode(conn
->hcon
)) {
2710 conn
->disc_reason
= 0x05;
2711 result
= L2CAP_CR_SEC_BLOCK
;
2715 result
= L2CAP_CR_NO_MEM
;
2717 /* Check for backlog size */
2718 if (sk_acceptq_is_full(parent
)) {
2719 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
2723 sk
= l2cap_sock_alloc(sock_net(parent
), NULL
, BTPROTO_L2CAP
, GFP_ATOMIC
);
2727 write_lock_bh(&list
->lock
);
2729 /* Check if we already have channel with that dcid */
2730 if (__l2cap_get_chan_by_dcid(list
, scid
)) {
2731 write_unlock_bh(&list
->lock
);
2732 sock_set_flag(sk
, SOCK_ZAPPED
);
2733 l2cap_sock_kill(sk
);
2737 hci_conn_hold(conn
->hcon
);
2739 l2cap_sock_init(sk
, parent
);
2740 bacpy(&bt_sk(sk
)->src
, conn
->src
);
2741 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
2742 l2cap_pi(sk
)->psm
= psm
;
2743 l2cap_pi(sk
)->dcid
= scid
;
2745 __l2cap_chan_add(conn
, sk
, parent
);
2746 dcid
= l2cap_pi(sk
)->scid
;
2748 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
2750 l2cap_pi(sk
)->ident
= cmd
->ident
;
2752 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
2753 if (l2cap_check_security(sk
)) {
2754 if (bt_sk(sk
)->defer_setup
) {
2755 sk
->sk_state
= BT_CONNECT2
;
2756 result
= L2CAP_CR_PEND
;
2757 status
= L2CAP_CS_AUTHOR_PEND
;
2758 parent
->sk_data_ready(parent
, 0);
2760 sk
->sk_state
= BT_CONFIG
;
2761 result
= L2CAP_CR_SUCCESS
;
2762 status
= L2CAP_CS_NO_INFO
;
2765 sk
->sk_state
= BT_CONNECT2
;
2766 result
= L2CAP_CR_PEND
;
2767 status
= L2CAP_CS_AUTHEN_PEND
;
2770 sk
->sk_state
= BT_CONNECT2
;
2771 result
= L2CAP_CR_PEND
;
2772 status
= L2CAP_CS_NO_INFO
;
2775 write_unlock_bh(&list
->lock
);
2778 bh_unlock_sock(parent
);
2781 rsp
.scid
= cpu_to_le16(scid
);
2782 rsp
.dcid
= cpu_to_le16(dcid
);
2783 rsp
.result
= cpu_to_le16(result
);
2784 rsp
.status
= cpu_to_le16(status
);
2785 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2787 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
2788 struct l2cap_info_req info
;
2789 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2791 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
2792 conn
->info_ident
= l2cap_get_ident(conn
);
2794 mod_timer(&conn
->info_timer
, jiffies
+
2795 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
2797 l2cap_send_cmd(conn
, conn
->info_ident
,
2798 L2CAP_INFO_REQ
, sizeof(info
), &info
);
2804 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2806 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
2807 u16 scid
, dcid
, result
, status
;
2811 scid
= __le16_to_cpu(rsp
->scid
);
2812 dcid
= __le16_to_cpu(rsp
->dcid
);
2813 result
= __le16_to_cpu(rsp
->result
);
2814 status
= __le16_to_cpu(rsp
->status
);
2816 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid
, scid
, result
, status
);
2819 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
2823 sk
= l2cap_get_chan_by_ident(&conn
->chan_list
, cmd
->ident
);
2829 case L2CAP_CR_SUCCESS
:
2830 sk
->sk_state
= BT_CONFIG
;
2831 l2cap_pi(sk
)->ident
= 0;
2832 l2cap_pi(sk
)->dcid
= dcid
;
2833 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
2835 l2cap_pi(sk
)->conf_state
&= ~L2CAP_CONF_CONNECT_PEND
;
2837 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2838 l2cap_build_conf_req(sk
, req
), req
);
2839 l2cap_pi(sk
)->num_conf_req
++;
2843 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
2847 l2cap_chan_del(sk
, ECONNREFUSED
);
2855 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
2857 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
2863 dcid
= __le16_to_cpu(req
->dcid
);
2864 flags
= __le16_to_cpu(req
->flags
);
2866 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
2868 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, dcid
);
2872 if (sk
->sk_state
== BT_DISCONN
)
2875 /* Reject if config buffer is too small. */
2876 len
= cmd_len
- sizeof(*req
);
2877 if (l2cap_pi(sk
)->conf_len
+ len
> sizeof(l2cap_pi(sk
)->conf_req
)) {
2878 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2879 l2cap_build_conf_rsp(sk
, rsp
,
2880 L2CAP_CONF_REJECT
, flags
), rsp
);
2885 memcpy(l2cap_pi(sk
)->conf_req
+ l2cap_pi(sk
)->conf_len
, req
->data
, len
);
2886 l2cap_pi(sk
)->conf_len
+= len
;
2888 if (flags
& 0x0001) {
2889 /* Incomplete config. Send empty response. */
2890 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2891 l2cap_build_conf_rsp(sk
, rsp
,
2892 L2CAP_CONF_SUCCESS
, 0x0001), rsp
);
2896 /* Complete config. */
2897 len
= l2cap_parse_conf_req(sk
, rsp
);
2899 l2cap_send_disconn_req(conn
, sk
);
2903 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
2904 l2cap_pi(sk
)->num_conf_rsp
++;
2906 /* Reset config buffer. */
2907 l2cap_pi(sk
)->conf_len
= 0;
2909 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
))
2912 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_INPUT_DONE
) {
2913 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_NO_FCS_RECV
) ||
2914 l2cap_pi(sk
)->fcs
!= L2CAP_FCS_NONE
)
2915 l2cap_pi(sk
)->fcs
= L2CAP_FCS_CRC16
;
2917 sk
->sk_state
= BT_CONNECTED
;
2919 l2cap_pi(sk
)->next_tx_seq
= 0;
2920 l2cap_pi(sk
)->expected_tx_seq
= 0;
2921 __skb_queue_head_init(TX_QUEUE(sk
));
2922 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
2923 l2cap_ertm_init(sk
);
2925 l2cap_chan_ready(sk
);
2929 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
)) {
2931 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2932 l2cap_build_conf_req(sk
, buf
), buf
);
2933 l2cap_pi(sk
)->num_conf_req
++;
2941 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2943 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
2944 u16 scid
, flags
, result
;
2946 int len
= cmd
->len
- sizeof(*rsp
);
2948 scid
= __le16_to_cpu(rsp
->scid
);
2949 flags
= __le16_to_cpu(rsp
->flags
);
2950 result
= __le16_to_cpu(rsp
->result
);
2952 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2953 scid
, flags
, result
);
2955 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
2960 case L2CAP_CONF_SUCCESS
:
2961 l2cap_conf_rfc_get(sk
, rsp
->data
, len
);
2964 case L2CAP_CONF_UNACCEPT
:
2965 if (l2cap_pi(sk
)->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
2968 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
2969 l2cap_send_disconn_req(conn
, sk
);
2973 /* throw out any old stored conf requests */
2974 result
= L2CAP_CONF_SUCCESS
;
2975 len
= l2cap_parse_conf_rsp(sk
, rsp
->data
,
2978 l2cap_send_disconn_req(conn
, sk
);
2982 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
2983 L2CAP_CONF_REQ
, len
, req
);
2984 l2cap_pi(sk
)->num_conf_req
++;
2985 if (result
!= L2CAP_CONF_SUCCESS
)
2991 sk
->sk_state
= BT_DISCONN
;
2992 sk
->sk_err
= ECONNRESET
;
2993 l2cap_sock_set_timer(sk
, HZ
* 5);
2994 l2cap_send_disconn_req(conn
, sk
);
3001 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_INPUT_DONE
;
3003 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
) {
3004 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_NO_FCS_RECV
) ||
3005 l2cap_pi(sk
)->fcs
!= L2CAP_FCS_NONE
)
3006 l2cap_pi(sk
)->fcs
= L2CAP_FCS_CRC16
;
3008 sk
->sk_state
= BT_CONNECTED
;
3009 l2cap_pi(sk
)->next_tx_seq
= 0;
3010 l2cap_pi(sk
)->expected_tx_seq
= 0;
3011 __skb_queue_head_init(TX_QUEUE(sk
));
3012 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
3013 l2cap_ertm_init(sk
);
3015 l2cap_chan_ready(sk
);
3023 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3025 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
3026 struct l2cap_disconn_rsp rsp
;
3030 scid
= __le16_to_cpu(req
->scid
);
3031 dcid
= __le16_to_cpu(req
->dcid
);
3033 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
3035 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, dcid
);
3039 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
3040 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
3041 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
3043 sk
->sk_shutdown
= SHUTDOWN_MASK
;
3045 skb_queue_purge(TX_QUEUE(sk
));
3047 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
) {
3048 skb_queue_purge(SREJ_QUEUE(sk
));
3049 del_timer(&l2cap_pi(sk
)->retrans_timer
);
3050 del_timer(&l2cap_pi(sk
)->monitor_timer
);
3051 del_timer(&l2cap_pi(sk
)->ack_timer
);
3054 l2cap_chan_del(sk
, ECONNRESET
);
3057 l2cap_sock_kill(sk
);
3061 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3063 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
3067 scid
= __le16_to_cpu(rsp
->scid
);
3068 dcid
= __le16_to_cpu(rsp
->dcid
);
3070 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
3072 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
3076 skb_queue_purge(TX_QUEUE(sk
));
3078 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
) {
3079 skb_queue_purge(SREJ_QUEUE(sk
));
3080 del_timer(&l2cap_pi(sk
)->retrans_timer
);
3081 del_timer(&l2cap_pi(sk
)->monitor_timer
);
3082 del_timer(&l2cap_pi(sk
)->ack_timer
);
3085 l2cap_chan_del(sk
, 0);
3088 l2cap_sock_kill(sk
);
3092 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3094 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
3097 type
= __le16_to_cpu(req
->type
);
3099 BT_DBG("type 0x%4.4x", type
);
3101 if (type
== L2CAP_IT_FEAT_MASK
) {
3103 u32 feat_mask
= l2cap_feat_mask
;
3104 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3105 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3106 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3108 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
3110 put_unaligned_le32(feat_mask
, rsp
->data
);
3111 l2cap_send_cmd(conn
, cmd
->ident
,
3112 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3113 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3115 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3116 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3117 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3118 memcpy(buf
+ 4, l2cap_fixed_chan
, 8);
3119 l2cap_send_cmd(conn
, cmd
->ident
,
3120 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3122 struct l2cap_info_rsp rsp
;
3123 rsp
.type
= cpu_to_le16(type
);
3124 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
3125 l2cap_send_cmd(conn
, cmd
->ident
,
3126 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
3132 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3134 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
3137 type
= __le16_to_cpu(rsp
->type
);
3138 result
= __le16_to_cpu(rsp
->result
);
3140 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
3142 del_timer(&conn
->info_timer
);
3144 if (type
== L2CAP_IT_FEAT_MASK
) {
3145 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
3147 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
3148 struct l2cap_info_req req
;
3149 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3151 conn
->info_ident
= l2cap_get_ident(conn
);
3153 l2cap_send_cmd(conn
, conn
->info_ident
,
3154 L2CAP_INFO_REQ
, sizeof(req
), &req
);
3156 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3157 conn
->info_ident
= 0;
3159 l2cap_conn_start(conn
);
3161 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3162 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3163 conn
->info_ident
= 0;
3165 l2cap_conn_start(conn
);
3171 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
3173 u8
*data
= skb
->data
;
3175 struct l2cap_cmd_hdr cmd
;
3178 l2cap_raw_recv(conn
, skb
);
3180 while (len
>= L2CAP_CMD_HDR_SIZE
) {
3182 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
3183 data
+= L2CAP_CMD_HDR_SIZE
;
3184 len
-= L2CAP_CMD_HDR_SIZE
;
3186 cmd_len
= le16_to_cpu(cmd
.len
);
3188 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
3190 if (cmd_len
> len
|| !cmd
.ident
) {
3191 BT_DBG("corrupted command");
3196 case L2CAP_COMMAND_REJ
:
3197 l2cap_command_rej(conn
, &cmd
, data
);
3200 case L2CAP_CONN_REQ
:
3201 err
= l2cap_connect_req(conn
, &cmd
, data
);
3204 case L2CAP_CONN_RSP
:
3205 err
= l2cap_connect_rsp(conn
, &cmd
, data
);
3208 case L2CAP_CONF_REQ
:
3209 err
= l2cap_config_req(conn
, &cmd
, cmd_len
, data
);
3212 case L2CAP_CONF_RSP
:
3213 err
= l2cap_config_rsp(conn
, &cmd
, data
);
3216 case L2CAP_DISCONN_REQ
:
3217 err
= l2cap_disconnect_req(conn
, &cmd
, data
);
3220 case L2CAP_DISCONN_RSP
:
3221 err
= l2cap_disconnect_rsp(conn
, &cmd
, data
);
3224 case L2CAP_ECHO_REQ
:
3225 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
3228 case L2CAP_ECHO_RSP
:
3231 case L2CAP_INFO_REQ
:
3232 err
= l2cap_information_req(conn
, &cmd
, data
);
3235 case L2CAP_INFO_RSP
:
3236 err
= l2cap_information_rsp(conn
, &cmd
, data
);
3240 BT_ERR("Unknown signaling command 0x%2.2x", cmd
.code
);
3246 struct l2cap_cmd_rej rej
;
3247 BT_DBG("error %d", err
);
3249 /* FIXME: Map err to a valid reason */
3250 rej
.reason
= cpu_to_le16(0);
3251 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
3261 static int l2cap_check_fcs(struct l2cap_pinfo
*pi
, struct sk_buff
*skb
)
3263 u16 our_fcs
, rcv_fcs
;
3264 int hdr_size
= L2CAP_HDR_SIZE
+ 2;
3266 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
3267 skb_trim(skb
, skb
->len
- 2);
3268 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
3269 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
3271 if (our_fcs
!= rcv_fcs
)
3277 static inline void l2cap_send_i_or_rr_or_rnr(struct sock
*sk
)
3279 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3282 pi
->frames_sent
= 0;
3283 pi
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
3285 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3287 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
3288 control
|= L2CAP_SUPER_RCV_NOT_READY
| L2CAP_CTRL_FINAL
;
3289 l2cap_send_sframe(pi
, control
);
3290 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
3293 if (pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
&& pi
->unacked_frames
> 0)
3294 __mod_retrans_timer();
3296 l2cap_ertm_send(sk
);
3298 if (!(pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) &&
3299 pi
->frames_sent
== 0) {
3300 control
|= L2CAP_SUPER_RCV_READY
;
3301 l2cap_send_sframe(pi
, control
);
3305 static int l2cap_add_to_srej_queue(struct sock
*sk
, struct sk_buff
*skb
, u8 tx_seq
, u8 sar
)
3307 struct sk_buff
*next_skb
;
3309 bt_cb(skb
)->tx_seq
= tx_seq
;
3310 bt_cb(skb
)->sar
= sar
;
3312 next_skb
= skb_peek(SREJ_QUEUE(sk
));
3314 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3319 if (bt_cb(next_skb
)->tx_seq
== tx_seq
)
3322 if (bt_cb(next_skb
)->tx_seq
> tx_seq
) {
3323 __skb_queue_before(SREJ_QUEUE(sk
), next_skb
, skb
);
3327 if (skb_queue_is_last(SREJ_QUEUE(sk
), next_skb
))
3330 } while ((next_skb
= skb_queue_next(SREJ_QUEUE(sk
), next_skb
)));
3332 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3337 static int l2cap_ertm_reassembly_sdu(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3339 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3340 struct sk_buff
*_skb
;
3343 switch (control
& L2CAP_CTRL_SAR
) {
3344 case L2CAP_SDU_UNSEGMENTED
:
3345 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
)
3348 err
= sock_queue_rcv_skb(sk
, skb
);
3354 case L2CAP_SDU_START
:
3355 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
)
3358 pi
->sdu_len
= get_unaligned_le16(skb
->data
);
3361 if (pi
->sdu_len
> pi
->imtu
)
3364 pi
->sdu
= bt_skb_alloc(pi
->sdu_len
, GFP_ATOMIC
);
3370 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3372 pi
->conn_state
|= L2CAP_CONN_SAR_SDU
;
3373 pi
->partial_sdu_len
= skb
->len
;
3376 case L2CAP_SDU_CONTINUE
:
3377 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3383 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3385 pi
->partial_sdu_len
+= skb
->len
;
3386 if (pi
->partial_sdu_len
> pi
->sdu_len
)
3392 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3398 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3400 pi
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
3401 pi
->partial_sdu_len
+= skb
->len
;
3403 if (pi
->partial_sdu_len
> pi
->imtu
)
3406 if (pi
->partial_sdu_len
!= pi
->sdu_len
)
3409 _skb
= skb_clone(pi
->sdu
, GFP_ATOMIC
);
3410 err
= sock_queue_rcv_skb(sk
, _skb
);
3426 l2cap_send_disconn_req(pi
->conn
, sk
);
3431 static int l2cap_streaming_reassembly_sdu(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3433 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3434 struct sk_buff
*_skb
;
3438 * TODO: We have to notify the userland if some data is lost with the
3442 switch (control
& L2CAP_CTRL_SAR
) {
3443 case L2CAP_SDU_UNSEGMENTED
:
3444 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3449 err
= sock_queue_rcv_skb(sk
, skb
);
3455 case L2CAP_SDU_START
:
3456 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3461 pi
->sdu_len
= get_unaligned_le16(skb
->data
);
3464 if (pi
->sdu_len
> pi
->imtu
) {
3469 pi
->sdu
= bt_skb_alloc(pi
->sdu_len
, GFP_ATOMIC
);
3475 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3477 pi
->conn_state
|= L2CAP_CONN_SAR_SDU
;
3478 pi
->partial_sdu_len
= skb
->len
;
3482 case L2CAP_SDU_CONTINUE
:
3483 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3486 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3488 pi
->partial_sdu_len
+= skb
->len
;
3489 if (pi
->partial_sdu_len
> pi
->sdu_len
)
3497 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3500 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3502 pi
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
3503 pi
->partial_sdu_len
+= skb
->len
;
3505 if (pi
->partial_sdu_len
> pi
->imtu
)
3508 if (pi
->partial_sdu_len
== pi
->sdu_len
) {
3509 _skb
= skb_clone(pi
->sdu
, GFP_ATOMIC
);
3510 err
= sock_queue_rcv_skb(sk
, _skb
);
3525 static void l2cap_check_srej_gap(struct sock
*sk
, u8 tx_seq
)
3527 struct sk_buff
*skb
;
3530 while ((skb
= skb_peek(SREJ_QUEUE(sk
)))) {
3531 if (bt_cb(skb
)->tx_seq
!= tx_seq
)
3534 skb
= skb_dequeue(SREJ_QUEUE(sk
));
3535 control
= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
3536 l2cap_ertm_reassembly_sdu(sk
, skb
, control
);
3537 l2cap_pi(sk
)->buffer_seq_srej
=
3538 (l2cap_pi(sk
)->buffer_seq_srej
+ 1) % 64;
3543 static void l2cap_resend_srejframe(struct sock
*sk
, u8 tx_seq
)
3545 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3546 struct srej_list
*l
, *tmp
;
3549 list_for_each_entry_safe(l
, tmp
, SREJ_LIST(sk
), list
) {
3550 if (l
->tx_seq
== tx_seq
) {
3555 control
= L2CAP_SUPER_SELECT_REJECT
;
3556 control
|= l
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3557 l2cap_send_sframe(pi
, control
);
3559 list_add_tail(&l
->list
, SREJ_LIST(sk
));
3563 static void l2cap_send_srejframe(struct sock
*sk
, u8 tx_seq
)
3565 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3566 struct srej_list
*new;
3569 while (tx_seq
!= pi
->expected_tx_seq
) {
3570 control
= L2CAP_SUPER_SELECT_REJECT
;
3571 control
|= pi
->expected_tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3572 l2cap_send_sframe(pi
, control
);
3574 new = kzalloc(sizeof(struct srej_list
), GFP_ATOMIC
);
3575 new->tx_seq
= pi
->expected_tx_seq
++;
3576 list_add_tail(&new->list
, SREJ_LIST(sk
));
3578 pi
->expected_tx_seq
++;
3581 static inline int l2cap_data_channel_iframe(struct sock
*sk
, u16 rx_control
, struct sk_buff
*skb
)
3583 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3584 u8 tx_seq
= __get_txseq(rx_control
);
3585 u8 req_seq
= __get_reqseq(rx_control
);
3586 u8 sar
= rx_control
>> L2CAP_CTRL_SAR_SHIFT
;
3587 u8 tx_seq_offset
, expected_tx_seq_offset
;
3588 int num_to_ack
= (pi
->tx_win
/6) + 1;
3591 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk
, rx_control
, skb
->len
);
3593 if (L2CAP_CTRL_FINAL
& rx_control
&&
3594 l2cap_pi(sk
)->conn_state
& L2CAP_CONN_WAIT_F
) {
3595 del_timer(&pi
->monitor_timer
);
3596 if (pi
->unacked_frames
> 0)
3597 __mod_retrans_timer();
3598 pi
->conn_state
&= ~L2CAP_CONN_WAIT_F
;
3601 pi
->expected_ack_seq
= req_seq
;
3602 l2cap_drop_acked_frames(sk
);
3604 if (tx_seq
== pi
->expected_tx_seq
)
3607 tx_seq_offset
= (tx_seq
- pi
->buffer_seq
) % 64;
3608 if (tx_seq_offset
< 0)
3609 tx_seq_offset
+= 64;
3611 /* invalid tx_seq */
3612 if (tx_seq_offset
>= pi
->tx_win
) {
3613 l2cap_send_disconn_req(pi
->conn
, sk
);
3617 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3618 struct srej_list
*first
;
3620 first
= list_first_entry(SREJ_LIST(sk
),
3621 struct srej_list
, list
);
3622 if (tx_seq
== first
->tx_seq
) {
3623 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3624 l2cap_check_srej_gap(sk
, tx_seq
);
3626 list_del(&first
->list
);
3629 if (list_empty(SREJ_LIST(sk
))) {
3630 pi
->buffer_seq
= pi
->buffer_seq_srej
;
3631 pi
->conn_state
&= ~L2CAP_CONN_SREJ_SENT
;
3635 struct srej_list
*l
;
3637 /* duplicated tx_seq */
3638 if (l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
) < 0)
3641 list_for_each_entry(l
, SREJ_LIST(sk
), list
) {
3642 if (l
->tx_seq
== tx_seq
) {
3643 l2cap_resend_srejframe(sk
, tx_seq
);
3647 l2cap_send_srejframe(sk
, tx_seq
);
3650 expected_tx_seq_offset
=
3651 (pi
->expected_tx_seq
- pi
->buffer_seq
) % 64;
3652 if (expected_tx_seq_offset
< 0)
3653 expected_tx_seq_offset
+= 64;
3655 /* duplicated tx_seq */
3656 if (tx_seq_offset
< expected_tx_seq_offset
)
3659 pi
->conn_state
|= L2CAP_CONN_SREJ_SENT
;
3661 INIT_LIST_HEAD(SREJ_LIST(sk
));
3662 pi
->buffer_seq_srej
= pi
->buffer_seq
;
3664 __skb_queue_head_init(SREJ_QUEUE(sk
));
3665 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3667 pi
->conn_state
|= L2CAP_CONN_SEND_PBIT
;
3669 l2cap_send_srejframe(sk
, tx_seq
);
3674 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3676 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3677 bt_cb(skb
)->tx_seq
= tx_seq
;
3678 bt_cb(skb
)->sar
= sar
;
3679 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3683 if (rx_control
& L2CAP_CTRL_FINAL
) {
3684 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
3685 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3687 if (!skb_queue_empty(TX_QUEUE(sk
)))
3688 sk
->sk_send_head
= TX_QUEUE(sk
)->next
;
3689 pi
->next_tx_seq
= pi
->expected_ack_seq
;
3690 l2cap_ertm_send(sk
);
3694 pi
->buffer_seq
= (pi
->buffer_seq
+ 1) % 64;
3696 err
= l2cap_ertm_reassembly_sdu(sk
, skb
, rx_control
);
3702 pi
->num_acked
= (pi
->num_acked
+ 1) % num_to_ack
;
3703 if (pi
->num_acked
== num_to_ack
- 1)
3713 static inline void l2cap_data_channel_rrframe(struct sock
*sk
, u16 rx_control
)
3715 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3717 pi
->expected_ack_seq
= __get_reqseq(rx_control
);
3718 l2cap_drop_acked_frames(sk
);
3720 if (rx_control
& L2CAP_CTRL_POLL
) {
3721 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3722 if ((pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
3723 (pi
->unacked_frames
> 0))
3724 __mod_retrans_timer();
3726 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3727 l2cap_send_srejtail(sk
);
3729 l2cap_send_i_or_rr_or_rnr(sk
);
3730 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3733 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
3734 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3736 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
3737 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3739 if (!skb_queue_empty(TX_QUEUE(sk
)))
3740 sk
->sk_send_head
= TX_QUEUE(sk
)->next
;
3741 pi
->next_tx_seq
= pi
->expected_ack_seq
;
3742 l2cap_ertm_send(sk
);
3746 if ((pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
3747 (pi
->unacked_frames
> 0))
3748 __mod_retrans_timer();
3750 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3751 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
)
3754 l2cap_ertm_send(sk
);
3758 static inline void l2cap_data_channel_rejframe(struct sock
*sk
, u16 rx_control
)
3760 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3761 u8 tx_seq
= __get_reqseq(rx_control
);
3763 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3765 pi
->expected_ack_seq
= tx_seq
;
3766 l2cap_drop_acked_frames(sk
);
3768 if (rx_control
& L2CAP_CTRL_FINAL
) {
3769 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
3770 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3772 if (!skb_queue_empty(TX_QUEUE(sk
)))
3773 sk
->sk_send_head
= TX_QUEUE(sk
)->next
;
3774 pi
->next_tx_seq
= pi
->expected_ack_seq
;
3775 l2cap_ertm_send(sk
);
3778 if (!skb_queue_empty(TX_QUEUE(sk
)))
3779 sk
->sk_send_head
= TX_QUEUE(sk
)->next
;
3780 pi
->next_tx_seq
= pi
->expected_ack_seq
;
3781 l2cap_ertm_send(sk
);
3783 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
)
3784 pi
->conn_state
|= L2CAP_CONN_REJ_ACT
;
3787 static inline void l2cap_data_channel_srejframe(struct sock
*sk
, u16 rx_control
)
3789 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3790 u8 tx_seq
= __get_reqseq(rx_control
);
3792 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3794 if (rx_control
& L2CAP_CTRL_POLL
) {
3795 pi
->expected_ack_seq
= tx_seq
;
3796 l2cap_drop_acked_frames(sk
);
3797 l2cap_retransmit_frame(sk
, tx_seq
);
3798 l2cap_ertm_send(sk
);
3799 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
3800 pi
->srej_save_reqseq
= tx_seq
;
3801 pi
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
3803 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
3804 if ((pi
->conn_state
& L2CAP_CONN_SREJ_ACT
) &&
3805 pi
->srej_save_reqseq
== tx_seq
)
3806 pi
->conn_state
&= ~L2CAP_CONN_SREJ_ACT
;
3808 l2cap_retransmit_frame(sk
, tx_seq
);
3810 l2cap_retransmit_frame(sk
, tx_seq
);
3811 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
3812 pi
->srej_save_reqseq
= tx_seq
;
3813 pi
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
3818 static inline void l2cap_data_channel_rnrframe(struct sock
*sk
, u16 rx_control
)
3820 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3821 u8 tx_seq
= __get_reqseq(rx_control
);
3823 pi
->conn_state
|= L2CAP_CONN_REMOTE_BUSY
;
3824 pi
->expected_ack_seq
= tx_seq
;
3825 l2cap_drop_acked_frames(sk
);
3827 if (!(pi
->conn_state
& L2CAP_CONN_SREJ_SENT
)) {
3828 del_timer(&pi
->retrans_timer
);
3829 if (rx_control
& L2CAP_CTRL_POLL
)
3830 l2cap_send_rr_or_rnr(pi
, L2CAP_CTRL_FINAL
);
3834 if (rx_control
& L2CAP_CTRL_POLL
)
3835 l2cap_send_srejtail(sk
);
3837 l2cap_send_sframe(pi
, L2CAP_SUPER_RCV_READY
);
3840 static inline int l2cap_data_channel_sframe(struct sock
*sk
, u16 rx_control
, struct sk_buff
*skb
)
3842 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk
, rx_control
, skb
->len
);
3844 if (L2CAP_CTRL_FINAL
& rx_control
&&
3845 l2cap_pi(sk
)->conn_state
& L2CAP_CONN_WAIT_F
) {
3846 del_timer(&l2cap_pi(sk
)->monitor_timer
);
3847 if (l2cap_pi(sk
)->unacked_frames
> 0)
3848 __mod_retrans_timer();
3849 l2cap_pi(sk
)->conn_state
&= ~L2CAP_CONN_WAIT_F
;
3852 switch (rx_control
& L2CAP_CTRL_SUPERVISE
) {
3853 case L2CAP_SUPER_RCV_READY
:
3854 l2cap_data_channel_rrframe(sk
, rx_control
);
3857 case L2CAP_SUPER_REJECT
:
3858 l2cap_data_channel_rejframe(sk
, rx_control
);
3861 case L2CAP_SUPER_SELECT_REJECT
:
3862 l2cap_data_channel_srejframe(sk
, rx_control
);
3865 case L2CAP_SUPER_RCV_NOT_READY
:
3866 l2cap_data_channel_rnrframe(sk
, rx_control
);
3874 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
3877 struct l2cap_pinfo
*pi
;
3879 u8 tx_seq
, req_seq
, next_tx_seq_offset
, req_seq_offset
;
3881 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, cid
);
3883 BT_DBG("unknown cid 0x%4.4x", cid
);
3889 BT_DBG("sk %p, len %d", sk
, skb
->len
);
3891 if (sk
->sk_state
!= BT_CONNECTED
)
3895 case L2CAP_MODE_BASIC
:
3896 /* If socket recv buffers overflows we drop data here
3897 * which is *bad* because L2CAP has to be reliable.
3898 * But we don't have any other choice. L2CAP doesn't
3899 * provide flow control mechanism. */
3901 if (pi
->imtu
< skb
->len
)
3904 if (!sock_queue_rcv_skb(sk
, skb
))
3908 case L2CAP_MODE_ERTM
:
3909 control
= get_unaligned_le16(skb
->data
);
3913 if (__is_sar_start(control
))
3916 if (pi
->fcs
== L2CAP_FCS_CRC16
)
3920 * We can just drop the corrupted I-frame here.
3921 * Receiver will miss it and start proper recovery
3922 * procedures and ask retransmission.
3924 if (len
> pi
->mps
) {
3925 l2cap_send_disconn_req(pi
->conn
, sk
);
3929 if (l2cap_check_fcs(pi
, skb
))
3932 req_seq
= __get_reqseq(control
);
3933 req_seq_offset
= (req_seq
- pi
->expected_ack_seq
) % 64;
3934 if (req_seq_offset
< 0)
3935 req_seq_offset
+= 64;
3937 next_tx_seq_offset
=
3938 (pi
->next_tx_seq
- pi
->expected_ack_seq
) % 64;
3939 if (next_tx_seq_offset
< 0)
3940 next_tx_seq_offset
+= 64;
3942 /* check for invalid req-seq */
3943 if (req_seq_offset
> next_tx_seq_offset
) {
3944 l2cap_send_disconn_req(pi
->conn
, sk
);
3948 if (__is_iframe(control
)) {
3950 l2cap_send_disconn_req(pi
->conn
, sk
);
3954 l2cap_data_channel_iframe(sk
, control
, skb
);
3957 l2cap_send_disconn_req(pi
->conn
, sk
);
3961 l2cap_data_channel_sframe(sk
, control
, skb
);
3966 case L2CAP_MODE_STREAMING
:
3967 control
= get_unaligned_le16(skb
->data
);
3971 if (__is_sar_start(control
))
3974 if (pi
->fcs
== L2CAP_FCS_CRC16
)
3977 if (len
> pi
->mps
|| len
< 4 || __is_sframe(control
))
3980 if (l2cap_check_fcs(pi
, skb
))
3983 tx_seq
= __get_txseq(control
);
3985 if (pi
->expected_tx_seq
== tx_seq
)
3986 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3988 pi
->expected_tx_seq
= (tx_seq
+ 1) % 64;
3990 l2cap_streaming_reassembly_sdu(sk
, skb
, control
);
3995 BT_DBG("sk %p: bad mode 0x%2.2x", sk
, pi
->mode
);
4009 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
4013 sk
= l2cap_get_sock_by_psm(0, psm
, conn
->src
);
4017 BT_DBG("sk %p, len %d", sk
, skb
->len
);
4019 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_CONNECTED
)
4022 if (l2cap_pi(sk
)->imtu
< skb
->len
)
4025 if (!sock_queue_rcv_skb(sk
, skb
))
4037 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
4039 struct l2cap_hdr
*lh
= (void *) skb
->data
;
4043 skb_pull(skb
, L2CAP_HDR_SIZE
);
4044 cid
= __le16_to_cpu(lh
->cid
);
4045 len
= __le16_to_cpu(lh
->len
);
4047 if (len
!= skb
->len
) {
4052 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
4055 case L2CAP_CID_SIGNALING
:
4056 l2cap_sig_channel(conn
, skb
);
4059 case L2CAP_CID_CONN_LESS
:
4060 psm
= get_unaligned_le16(skb
->data
);
4062 l2cap_conless_channel(conn
, psm
, skb
);
4066 l2cap_data_channel(conn
, cid
, skb
);
4071 /* ---- L2CAP interface with lower layer (HCI) ---- */
4073 static int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
4075 int exact
= 0, lm1
= 0, lm2
= 0;
4076 register struct sock
*sk
;
4077 struct hlist_node
*node
;
4079 if (type
!= ACL_LINK
)
4082 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
4084 /* Find listening sockets and check their link_mode */
4085 read_lock(&l2cap_sk_list
.lock
);
4086 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
4087 if (sk
->sk_state
!= BT_LISTEN
)
4090 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
4091 lm1
|= HCI_LM_ACCEPT
;
4092 if (l2cap_pi(sk
)->role_switch
)
4093 lm1
|= HCI_LM_MASTER
;
4095 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
4096 lm2
|= HCI_LM_ACCEPT
;
4097 if (l2cap_pi(sk
)->role_switch
)
4098 lm2
|= HCI_LM_MASTER
;
4101 read_unlock(&l2cap_sk_list
.lock
);
4103 return exact
? lm1
: lm2
;
4106 static int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
4108 struct l2cap_conn
*conn
;
4110 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
4112 if (hcon
->type
!= ACL_LINK
)
4116 conn
= l2cap_conn_add(hcon
, status
);
4118 l2cap_conn_ready(conn
);
4120 l2cap_conn_del(hcon
, bt_err(status
));
4125 static int l2cap_disconn_ind(struct hci_conn
*hcon
)
4127 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4129 BT_DBG("hcon %p", hcon
);
4131 if (hcon
->type
!= ACL_LINK
|| !conn
)
4134 return conn
->disc_reason
;
4137 static int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
4139 BT_DBG("hcon %p reason %d", hcon
, reason
);
4141 if (hcon
->type
!= ACL_LINK
)
4144 l2cap_conn_del(hcon
, bt_err(reason
));
4149 static inline void l2cap_check_encryption(struct sock
*sk
, u8 encrypt
)
4151 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_STREAM
)
4154 if (encrypt
== 0x00) {
4155 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
) {
4156 l2cap_sock_clear_timer(sk
);
4157 l2cap_sock_set_timer(sk
, HZ
* 5);
4158 } else if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
4159 __l2cap_sock_close(sk
, ECONNREFUSED
);
4161 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
)
4162 l2cap_sock_clear_timer(sk
);
4166 static int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
4168 struct l2cap_chan_list
*l
;
4169 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4175 l
= &conn
->chan_list
;
4177 BT_DBG("conn %p", conn
);
4179 read_lock(&l
->lock
);
4181 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
4184 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_CONNECT_PEND
) {
4189 if (!status
&& (sk
->sk_state
== BT_CONNECTED
||
4190 sk
->sk_state
== BT_CONFIG
)) {
4191 l2cap_check_encryption(sk
, encrypt
);
4196 if (sk
->sk_state
== BT_CONNECT
) {
4198 struct l2cap_conn_req req
;
4199 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
4200 req
.psm
= l2cap_pi(sk
)->psm
;
4202 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
4204 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
4205 L2CAP_CONN_REQ
, sizeof(req
), &req
);
4207 l2cap_sock_clear_timer(sk
);
4208 l2cap_sock_set_timer(sk
, HZ
/ 10);
4210 } else if (sk
->sk_state
== BT_CONNECT2
) {
4211 struct l2cap_conn_rsp rsp
;
4215 sk
->sk_state
= BT_CONFIG
;
4216 result
= L2CAP_CR_SUCCESS
;
4218 sk
->sk_state
= BT_DISCONN
;
4219 l2cap_sock_set_timer(sk
, HZ
/ 10);
4220 result
= L2CAP_CR_SEC_BLOCK
;
4223 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
4224 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
4225 rsp
.result
= cpu_to_le16(result
);
4226 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4227 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
4228 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
4234 read_unlock(&l
->lock
);
4239 static int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
4241 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4243 if (!conn
&& !(conn
= l2cap_conn_add(hcon
, 0)))
4246 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
4248 if (flags
& ACL_START
) {
4249 struct l2cap_hdr
*hdr
;
4253 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
4254 kfree_skb(conn
->rx_skb
);
4255 conn
->rx_skb
= NULL
;
4257 l2cap_conn_unreliable(conn
, ECOMM
);
4261 BT_ERR("Frame is too short (len %d)", skb
->len
);
4262 l2cap_conn_unreliable(conn
, ECOMM
);
4266 hdr
= (struct l2cap_hdr
*) skb
->data
;
4267 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
4269 if (len
== skb
->len
) {
4270 /* Complete frame received */
4271 l2cap_recv_frame(conn
, skb
);
4275 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
4277 if (skb
->len
> len
) {
4278 BT_ERR("Frame is too long (len %d, expected len %d)",
4280 l2cap_conn_unreliable(conn
, ECOMM
);
4284 /* Allocate skb for the complete frame (with header) */
4285 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
4289 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4291 conn
->rx_len
= len
- skb
->len
;
4293 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
4295 if (!conn
->rx_len
) {
4296 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
4297 l2cap_conn_unreliable(conn
, ECOMM
);
4301 if (skb
->len
> conn
->rx_len
) {
4302 BT_ERR("Fragment is too long (len %d, expected %d)",
4303 skb
->len
, conn
->rx_len
);
4304 kfree_skb(conn
->rx_skb
);
4305 conn
->rx_skb
= NULL
;
4307 l2cap_conn_unreliable(conn
, ECOMM
);
4311 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4313 conn
->rx_len
-= skb
->len
;
4315 if (!conn
->rx_len
) {
4316 /* Complete frame received */
4317 l2cap_recv_frame(conn
, conn
->rx_skb
);
4318 conn
->rx_skb
= NULL
;
4327 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
4330 struct hlist_node
*node
;
4332 read_lock_bh(&l2cap_sk_list
.lock
);
4334 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
4335 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4337 seq_printf(f
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4338 batostr(&bt_sk(sk
)->src
),
4339 batostr(&bt_sk(sk
)->dst
),
4340 sk
->sk_state
, __le16_to_cpu(pi
->psm
),
4342 pi
->imtu
, pi
->omtu
, pi
->sec_level
);
4345 read_unlock_bh(&l2cap_sk_list
.lock
);
4350 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
4352 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
4355 static const struct file_operations l2cap_debugfs_fops
= {
4356 .open
= l2cap_debugfs_open
,
4358 .llseek
= seq_lseek
,
4359 .release
= single_release
,
4362 static struct dentry
*l2cap_debugfs
;
4364 static const struct proto_ops l2cap_sock_ops
= {
4365 .family
= PF_BLUETOOTH
,
4366 .owner
= THIS_MODULE
,
4367 .release
= l2cap_sock_release
,
4368 .bind
= l2cap_sock_bind
,
4369 .connect
= l2cap_sock_connect
,
4370 .listen
= l2cap_sock_listen
,
4371 .accept
= l2cap_sock_accept
,
4372 .getname
= l2cap_sock_getname
,
4373 .sendmsg
= l2cap_sock_sendmsg
,
4374 .recvmsg
= l2cap_sock_recvmsg
,
4375 .poll
= bt_sock_poll
,
4376 .ioctl
= bt_sock_ioctl
,
4377 .mmap
= sock_no_mmap
,
4378 .socketpair
= sock_no_socketpair
,
4379 .shutdown
= l2cap_sock_shutdown
,
4380 .setsockopt
= l2cap_sock_setsockopt
,
4381 .getsockopt
= l2cap_sock_getsockopt
4384 static const struct net_proto_family l2cap_sock_family_ops
= {
4385 .family
= PF_BLUETOOTH
,
4386 .owner
= THIS_MODULE
,
4387 .create
= l2cap_sock_create
,
4390 static struct hci_proto l2cap_hci_proto
= {
4392 .id
= HCI_PROTO_L2CAP
,
4393 .connect_ind
= l2cap_connect_ind
,
4394 .connect_cfm
= l2cap_connect_cfm
,
4395 .disconn_ind
= l2cap_disconn_ind
,
4396 .disconn_cfm
= l2cap_disconn_cfm
,
4397 .security_cfm
= l2cap_security_cfm
,
4398 .recv_acldata
= l2cap_recv_acldata
4401 static int __init
l2cap_init(void)
4405 err
= proto_register(&l2cap_proto
, 0);
4409 err
= bt_sock_register(BTPROTO_L2CAP
, &l2cap_sock_family_ops
);
4411 BT_ERR("L2CAP socket registration failed");
4415 err
= hci_register_proto(&l2cap_hci_proto
);
4417 BT_ERR("L2CAP protocol registration failed");
4418 bt_sock_unregister(BTPROTO_L2CAP
);
4423 l2cap_debugfs
= debugfs_create_file("l2cap", 0444,
4424 bt_debugfs
, NULL
, &l2cap_debugfs_fops
);
4426 BT_ERR("Failed to create L2CAP debug file");
4429 BT_INFO("L2CAP ver %s", VERSION
);
4430 BT_INFO("L2CAP socket layer initialized");
4435 proto_unregister(&l2cap_proto
);
4439 static void __exit
l2cap_exit(void)
4441 debugfs_remove(l2cap_debugfs
);
4443 if (bt_sock_unregister(BTPROTO_L2CAP
) < 0)
4444 BT_ERR("L2CAP socket unregistration failed");
4446 if (hci_unregister_proto(&l2cap_hci_proto
) < 0)
4447 BT_ERR("L2CAP protocol unregistration failed");
4449 proto_unregister(&l2cap_proto
);
4452 void l2cap_load(void)
4454 /* Dummy function to trigger automatic L2CAP module loading by
4455 * other modules that use L2CAP sockets but don't use any other
4456 * symbols from it. */
4459 EXPORT_SYMBOL(l2cap_load
);
4461 module_init(l2cap_init
);
4462 module_exit(l2cap_exit
);
4464 module_param(enable_ertm
, bool, 0644);
4465 MODULE_PARM_DESC(enable_ertm
, "Enable enhanced retransmission mode");
4467 module_param(max_transmit
, uint
, 0644);
4468 MODULE_PARM_DESC(max_transmit
, "Max transmit value (default = 3)");
4470 module_param(tx_window
, uint
, 0644);
4471 MODULE_PARM_DESC(tx_window
, "Transmission window size value (default = 63)");
4473 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4474 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION
);
4475 MODULE_VERSION(VERSION
);
4476 MODULE_LICENSE("GPL");
4477 MODULE_ALIAS("bt-proto-0");