2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 static int enable_ertm
= 0;
60 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
61 static u8 l2cap_fixed_chan
[8] = { 0x02, };
63 static const struct proto_ops l2cap_sock_ops
;
65 static struct workqueue_struct
*_busy_wq
;
67 static struct bt_sock_list l2cap_sk_list
= {
68 .lock
= __RW_LOCK_UNLOCKED(l2cap_sk_list
.lock
)
71 static void l2cap_busy_work(struct work_struct
*work
);
73 static void __l2cap_sock_close(struct sock
*sk
, int reason
);
74 static void l2cap_sock_close(struct sock
*sk
);
75 static void l2cap_sock_kill(struct sock
*sk
);
77 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
78 u8 code
, u8 ident
, u16 dlen
, void *data
);
80 static int l2cap_ertm_data_rcv(struct sock
*sk
, struct sk_buff
*skb
);
82 /* ---- L2CAP timers ---- */
83 static void l2cap_sock_timeout(unsigned long arg
)
85 struct sock
*sk
= (struct sock
*) arg
;
88 BT_DBG("sock %p state %d", sk
, sk
->sk_state
);
92 if (sk
->sk_state
== BT_CONNECTED
|| sk
->sk_state
== BT_CONFIG
)
93 reason
= ECONNREFUSED
;
94 else if (sk
->sk_state
== BT_CONNECT
&&
95 l2cap_pi(sk
)->sec_level
!= BT_SECURITY_SDP
)
96 reason
= ECONNREFUSED
;
100 __l2cap_sock_close(sk
, reason
);
108 static void l2cap_sock_set_timer(struct sock
*sk
, long timeout
)
110 BT_DBG("sk %p state %d timeout %ld", sk
, sk
->sk_state
, timeout
);
111 sk_reset_timer(sk
, &sk
->sk_timer
, jiffies
+ timeout
);
114 static void l2cap_sock_clear_timer(struct sock
*sk
)
116 BT_DBG("sock %p state %d", sk
, sk
->sk_state
);
117 sk_stop_timer(sk
, &sk
->sk_timer
);
120 /* ---- L2CAP channels ---- */
121 static struct sock
*__l2cap_get_chan_by_dcid(struct l2cap_chan_list
*l
, u16 cid
)
124 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
125 if (l2cap_pi(s
)->dcid
== cid
)
131 static struct sock
*__l2cap_get_chan_by_scid(struct l2cap_chan_list
*l
, u16 cid
)
134 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
135 if (l2cap_pi(s
)->scid
== cid
)
141 /* Find channel with given SCID.
142 * Returns locked socket */
143 static inline struct sock
*l2cap_get_chan_by_scid(struct l2cap_chan_list
*l
, u16 cid
)
147 s
= __l2cap_get_chan_by_scid(l
, cid
);
150 read_unlock(&l
->lock
);
154 static struct sock
*__l2cap_get_chan_by_ident(struct l2cap_chan_list
*l
, u8 ident
)
157 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
158 if (l2cap_pi(s
)->ident
== ident
)
164 static inline struct sock
*l2cap_get_chan_by_ident(struct l2cap_chan_list
*l
, u8 ident
)
168 s
= __l2cap_get_chan_by_ident(l
, ident
);
171 read_unlock(&l
->lock
);
175 static u16
l2cap_alloc_cid(struct l2cap_chan_list
*l
)
177 u16 cid
= L2CAP_CID_DYN_START
;
179 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
180 if (!__l2cap_get_chan_by_scid(l
, cid
))
187 static inline void __l2cap_chan_link(struct l2cap_chan_list
*l
, struct sock
*sk
)
192 l2cap_pi(l
->head
)->prev_c
= sk
;
194 l2cap_pi(sk
)->next_c
= l
->head
;
195 l2cap_pi(sk
)->prev_c
= NULL
;
199 static inline void l2cap_chan_unlink(struct l2cap_chan_list
*l
, struct sock
*sk
)
201 struct sock
*next
= l2cap_pi(sk
)->next_c
, *prev
= l2cap_pi(sk
)->prev_c
;
203 write_lock_bh(&l
->lock
);
208 l2cap_pi(next
)->prev_c
= prev
;
210 l2cap_pi(prev
)->next_c
= next
;
211 write_unlock_bh(&l
->lock
);
216 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct sock
*sk
, struct sock
*parent
)
218 struct l2cap_chan_list
*l
= &conn
->chan_list
;
220 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
221 l2cap_pi(sk
)->psm
, l2cap_pi(sk
)->dcid
);
223 conn
->disc_reason
= 0x13;
225 l2cap_pi(sk
)->conn
= conn
;
227 if (sk
->sk_type
== SOCK_SEQPACKET
|| sk
->sk_type
== SOCK_STREAM
) {
228 /* Alloc CID for connection-oriented socket */
229 l2cap_pi(sk
)->scid
= l2cap_alloc_cid(l
);
230 } else if (sk
->sk_type
== SOCK_DGRAM
) {
231 /* Connectionless socket */
232 l2cap_pi(sk
)->scid
= L2CAP_CID_CONN_LESS
;
233 l2cap_pi(sk
)->dcid
= L2CAP_CID_CONN_LESS
;
234 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
236 /* Raw socket can send/recv signalling messages only */
237 l2cap_pi(sk
)->scid
= L2CAP_CID_SIGNALING
;
238 l2cap_pi(sk
)->dcid
= L2CAP_CID_SIGNALING
;
239 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
242 __l2cap_chan_link(l
, sk
);
245 bt_accept_enqueue(parent
, sk
);
249 * Must be called on the locked socket. */
250 static void l2cap_chan_del(struct sock
*sk
, int err
)
252 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
253 struct sock
*parent
= bt_sk(sk
)->parent
;
255 l2cap_sock_clear_timer(sk
);
257 BT_DBG("sk %p, conn %p, err %d", sk
, conn
, err
);
260 /* Unlink from channel list */
261 l2cap_chan_unlink(&conn
->chan_list
, sk
);
262 l2cap_pi(sk
)->conn
= NULL
;
263 hci_conn_put(conn
->hcon
);
266 sk
->sk_state
= BT_CLOSED
;
267 sock_set_flag(sk
, SOCK_ZAPPED
);
273 bt_accept_unlink(sk
);
274 parent
->sk_data_ready(parent
, 0);
276 sk
->sk_state_change(sk
);
278 skb_queue_purge(TX_QUEUE(sk
));
280 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
) {
281 struct srej_list
*l
, *tmp
;
283 del_timer(&l2cap_pi(sk
)->retrans_timer
);
284 del_timer(&l2cap_pi(sk
)->monitor_timer
);
285 del_timer(&l2cap_pi(sk
)->ack_timer
);
287 skb_queue_purge(SREJ_QUEUE(sk
));
288 skb_queue_purge(BUSY_QUEUE(sk
));
290 list_for_each_entry_safe(l
, tmp
, SREJ_LIST(sk
), list
) {
297 /* Service level security */
298 static inline int l2cap_check_security(struct sock
*sk
)
300 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
303 if (l2cap_pi(sk
)->psm
== cpu_to_le16(0x0001)) {
304 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
305 auth_type
= HCI_AT_NO_BONDING_MITM
;
307 auth_type
= HCI_AT_NO_BONDING
;
309 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_LOW
)
310 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
312 switch (l2cap_pi(sk
)->sec_level
) {
313 case BT_SECURITY_HIGH
:
314 auth_type
= HCI_AT_GENERAL_BONDING_MITM
;
316 case BT_SECURITY_MEDIUM
:
317 auth_type
= HCI_AT_GENERAL_BONDING
;
320 auth_type
= HCI_AT_NO_BONDING
;
325 return hci_conn_security(conn
->hcon
, l2cap_pi(sk
)->sec_level
,
329 static inline u8
l2cap_get_ident(struct l2cap_conn
*conn
)
333 /* Get next available identificator.
334 * 1 - 128 are used by kernel.
335 * 129 - 199 are reserved.
336 * 200 - 254 are used by utilities like l2ping, etc.
339 spin_lock_bh(&conn
->lock
);
341 if (++conn
->tx_ident
> 128)
346 spin_unlock_bh(&conn
->lock
);
351 static inline void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
353 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
355 BT_DBG("code 0x%2.2x", code
);
360 hci_send_acl(conn
->hcon
, skb
, 0);
363 static inline void l2cap_send_sframe(struct l2cap_pinfo
*pi
, u16 control
)
366 struct l2cap_hdr
*lh
;
367 struct l2cap_conn
*conn
= pi
->conn
;
368 struct sock
*sk
= (struct sock
*)pi
;
369 int count
, hlen
= L2CAP_HDR_SIZE
+ 2;
371 if (sk
->sk_state
!= BT_CONNECTED
)
374 if (pi
->fcs
== L2CAP_FCS_CRC16
)
377 BT_DBG("pi %p, control 0x%2.2x", pi
, control
);
379 count
= min_t(unsigned int, conn
->mtu
, hlen
);
380 control
|= L2CAP_CTRL_FRAME_TYPE
;
382 if (pi
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
383 control
|= L2CAP_CTRL_FINAL
;
384 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
387 if (pi
->conn_state
& L2CAP_CONN_SEND_PBIT
) {
388 control
|= L2CAP_CTRL_POLL
;
389 pi
->conn_state
&= ~L2CAP_CONN_SEND_PBIT
;
392 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
396 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
397 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
398 lh
->cid
= cpu_to_le16(pi
->dcid
);
399 put_unaligned_le16(control
, skb_put(skb
, 2));
401 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
402 u16 fcs
= crc16(0, (u8
*)lh
, count
- 2);
403 put_unaligned_le16(fcs
, skb_put(skb
, 2));
406 hci_send_acl(pi
->conn
->hcon
, skb
, 0);
409 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo
*pi
, u16 control
)
411 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
412 control
|= L2CAP_SUPER_RCV_NOT_READY
;
413 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
415 control
|= L2CAP_SUPER_RCV_READY
;
417 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
419 l2cap_send_sframe(pi
, control
);
422 static inline int __l2cap_no_conn_pending(struct sock
*sk
)
424 return !(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_CONNECT_PEND
);
427 static void l2cap_do_start(struct sock
*sk
)
429 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
431 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
432 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
435 if (l2cap_check_security(sk
) && __l2cap_no_conn_pending(sk
)) {
436 struct l2cap_conn_req req
;
437 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
438 req
.psm
= l2cap_pi(sk
)->psm
;
440 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
441 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
443 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
444 L2CAP_CONN_REQ
, sizeof(req
), &req
);
447 struct l2cap_info_req req
;
448 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
450 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
451 conn
->info_ident
= l2cap_get_ident(conn
);
453 mod_timer(&conn
->info_timer
, jiffies
+
454 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
456 l2cap_send_cmd(conn
, conn
->info_ident
,
457 L2CAP_INFO_REQ
, sizeof(req
), &req
);
461 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
463 u32 local_feat_mask
= l2cap_feat_mask
;
465 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
468 case L2CAP_MODE_ERTM
:
469 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
470 case L2CAP_MODE_STREAMING
:
471 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
477 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct sock
*sk
, int err
)
479 struct l2cap_disconn_req req
;
484 skb_queue_purge(TX_QUEUE(sk
));
486 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
) {
487 del_timer(&l2cap_pi(sk
)->retrans_timer
);
488 del_timer(&l2cap_pi(sk
)->monitor_timer
);
489 del_timer(&l2cap_pi(sk
)->ack_timer
);
492 req
.dcid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
493 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
494 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
495 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
497 sk
->sk_state
= BT_DISCONN
;
501 /* ---- L2CAP connections ---- */
502 static void l2cap_conn_start(struct l2cap_conn
*conn
)
504 struct l2cap_chan_list
*l
= &conn
->chan_list
;
505 struct sock_del_list del
, *tmp1
, *tmp2
;
508 BT_DBG("conn %p", conn
);
510 INIT_LIST_HEAD(&del
.list
);
514 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
517 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
518 sk
->sk_type
!= SOCK_STREAM
) {
523 if (sk
->sk_state
== BT_CONNECT
) {
524 if (l2cap_check_security(sk
) &&
525 __l2cap_no_conn_pending(sk
)) {
526 struct l2cap_conn_req req
;
528 if (!l2cap_mode_supported(l2cap_pi(sk
)->mode
,
530 && l2cap_pi(sk
)->conf_state
&
531 L2CAP_CONF_STATE2_DEVICE
) {
532 tmp1
= kzalloc(sizeof(struct srej_list
),
535 list_add_tail(&tmp1
->list
, &del
.list
);
540 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
541 req
.psm
= l2cap_pi(sk
)->psm
;
543 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
544 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
546 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
547 L2CAP_CONN_REQ
, sizeof(req
), &req
);
549 } else if (sk
->sk_state
== BT_CONNECT2
) {
550 struct l2cap_conn_rsp rsp
;
551 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
552 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
554 if (l2cap_check_security(sk
)) {
555 if (bt_sk(sk
)->defer_setup
) {
556 struct sock
*parent
= bt_sk(sk
)->parent
;
557 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
558 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
559 parent
->sk_data_ready(parent
, 0);
562 sk
->sk_state
= BT_CONFIG
;
563 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
564 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
567 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
568 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
571 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
572 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
578 read_unlock(&l
->lock
);
580 list_for_each_entry_safe(tmp1
, tmp2
, &del
.list
, list
) {
581 bh_lock_sock(tmp1
->sk
);
582 __l2cap_sock_close(tmp1
->sk
, ECONNRESET
);
583 bh_unlock_sock(tmp1
->sk
);
584 list_del(&tmp1
->list
);
589 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
591 struct l2cap_chan_list
*l
= &conn
->chan_list
;
594 BT_DBG("conn %p", conn
);
598 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
601 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
602 sk
->sk_type
!= SOCK_STREAM
) {
603 l2cap_sock_clear_timer(sk
);
604 sk
->sk_state
= BT_CONNECTED
;
605 sk
->sk_state_change(sk
);
606 } else if (sk
->sk_state
== BT_CONNECT
)
612 read_unlock(&l
->lock
);
615 /* Notify sockets that we cannot guaranty reliability anymore */
616 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
618 struct l2cap_chan_list
*l
= &conn
->chan_list
;
621 BT_DBG("conn %p", conn
);
625 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
626 if (l2cap_pi(sk
)->force_reliable
)
630 read_unlock(&l
->lock
);
633 static void l2cap_info_timeout(unsigned long arg
)
635 struct l2cap_conn
*conn
= (void *) arg
;
637 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
638 conn
->info_ident
= 0;
640 l2cap_conn_start(conn
);
643 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
645 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
650 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
654 hcon
->l2cap_data
= conn
;
657 BT_DBG("hcon %p conn %p", hcon
, conn
);
659 conn
->mtu
= hcon
->hdev
->acl_mtu
;
660 conn
->src
= &hcon
->hdev
->bdaddr
;
661 conn
->dst
= &hcon
->dst
;
665 spin_lock_init(&conn
->lock
);
666 rwlock_init(&conn
->chan_list
.lock
);
668 setup_timer(&conn
->info_timer
, l2cap_info_timeout
,
669 (unsigned long) conn
);
671 conn
->disc_reason
= 0x13;
676 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
678 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
684 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
686 kfree_skb(conn
->rx_skb
);
689 while ((sk
= conn
->chan_list
.head
)) {
691 l2cap_chan_del(sk
, err
);
696 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
697 del_timer_sync(&conn
->info_timer
);
699 hcon
->l2cap_data
= NULL
;
703 static inline void l2cap_chan_add(struct l2cap_conn
*conn
, struct sock
*sk
, struct sock
*parent
)
705 struct l2cap_chan_list
*l
= &conn
->chan_list
;
706 write_lock_bh(&l
->lock
);
707 __l2cap_chan_add(conn
, sk
, parent
);
708 write_unlock_bh(&l
->lock
);
711 /* ---- Socket interface ---- */
712 static struct sock
*__l2cap_get_sock_by_addr(__le16 psm
, bdaddr_t
*src
)
715 struct hlist_node
*node
;
716 sk_for_each(sk
, node
, &l2cap_sk_list
.head
)
717 if (l2cap_pi(sk
)->sport
== psm
&& !bacmp(&bt_sk(sk
)->src
, src
))
724 /* Find socket with psm and source bdaddr.
725 * Returns closest match.
727 static struct sock
*__l2cap_get_sock_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
729 struct sock
*sk
= NULL
, *sk1
= NULL
;
730 struct hlist_node
*node
;
732 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
733 if (state
&& sk
->sk_state
!= state
)
736 if (l2cap_pi(sk
)->psm
== psm
) {
738 if (!bacmp(&bt_sk(sk
)->src
, src
))
742 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
746 return node
? sk
: sk1
;
749 /* Find socket with given address (psm, src).
750 * Returns locked socket */
751 static inline struct sock
*l2cap_get_sock_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
754 read_lock(&l2cap_sk_list
.lock
);
755 s
= __l2cap_get_sock_by_psm(state
, psm
, src
);
758 read_unlock(&l2cap_sk_list
.lock
);
762 static void l2cap_sock_destruct(struct sock
*sk
)
766 skb_queue_purge(&sk
->sk_receive_queue
);
767 skb_queue_purge(&sk
->sk_write_queue
);
770 static void l2cap_sock_cleanup_listen(struct sock
*parent
)
774 BT_DBG("parent %p", parent
);
776 /* Close not yet accepted channels */
777 while ((sk
= bt_accept_dequeue(parent
, NULL
)))
778 l2cap_sock_close(sk
);
780 parent
->sk_state
= BT_CLOSED
;
781 sock_set_flag(parent
, SOCK_ZAPPED
);
784 /* Kill socket (only if zapped and orphan)
785 * Must be called on unlocked socket.
787 static void l2cap_sock_kill(struct sock
*sk
)
789 if (!sock_flag(sk
, SOCK_ZAPPED
) || sk
->sk_socket
)
792 BT_DBG("sk %p state %d", sk
, sk
->sk_state
);
794 /* Kill poor orphan */
795 bt_sock_unlink(&l2cap_sk_list
, sk
);
796 sock_set_flag(sk
, SOCK_DEAD
);
800 static void __l2cap_sock_close(struct sock
*sk
, int reason
)
802 BT_DBG("sk %p state %d socket %p", sk
, sk
->sk_state
, sk
->sk_socket
);
804 switch (sk
->sk_state
) {
806 l2cap_sock_cleanup_listen(sk
);
811 if (sk
->sk_type
== SOCK_SEQPACKET
||
812 sk
->sk_type
== SOCK_STREAM
) {
813 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
815 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
816 l2cap_send_disconn_req(conn
, sk
, reason
);
818 l2cap_chan_del(sk
, reason
);
822 if (sk
->sk_type
== SOCK_SEQPACKET
||
823 sk
->sk_type
== SOCK_STREAM
) {
824 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
825 struct l2cap_conn_rsp rsp
;
828 if (bt_sk(sk
)->defer_setup
)
829 result
= L2CAP_CR_SEC_BLOCK
;
831 result
= L2CAP_CR_BAD_PSM
;
833 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
834 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
835 rsp
.result
= cpu_to_le16(result
);
836 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
837 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
838 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
840 l2cap_chan_del(sk
, reason
);
845 l2cap_chan_del(sk
, reason
);
849 sock_set_flag(sk
, SOCK_ZAPPED
);
854 /* Must be called on unlocked socket. */
855 static void l2cap_sock_close(struct sock
*sk
)
857 l2cap_sock_clear_timer(sk
);
859 __l2cap_sock_close(sk
, ECONNRESET
);
864 static void l2cap_sock_init(struct sock
*sk
, struct sock
*parent
)
866 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
871 sk
->sk_type
= parent
->sk_type
;
872 bt_sk(sk
)->defer_setup
= bt_sk(parent
)->defer_setup
;
874 pi
->imtu
= l2cap_pi(parent
)->imtu
;
875 pi
->omtu
= l2cap_pi(parent
)->omtu
;
876 pi
->conf_state
= l2cap_pi(parent
)->conf_state
;
877 pi
->mode
= l2cap_pi(parent
)->mode
;
878 pi
->fcs
= l2cap_pi(parent
)->fcs
;
879 pi
->max_tx
= l2cap_pi(parent
)->max_tx
;
880 pi
->tx_win
= l2cap_pi(parent
)->tx_win
;
881 pi
->sec_level
= l2cap_pi(parent
)->sec_level
;
882 pi
->role_switch
= l2cap_pi(parent
)->role_switch
;
883 pi
->force_reliable
= l2cap_pi(parent
)->force_reliable
;
885 pi
->imtu
= L2CAP_DEFAULT_MTU
;
887 if (enable_ertm
&& sk
->sk_type
== SOCK_STREAM
) {
888 pi
->mode
= L2CAP_MODE_ERTM
;
889 pi
->conf_state
|= L2CAP_CONF_STATE2_DEVICE
;
891 pi
->mode
= L2CAP_MODE_BASIC
;
893 pi
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
894 pi
->fcs
= L2CAP_FCS_CRC16
;
895 pi
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
896 pi
->sec_level
= BT_SECURITY_LOW
;
898 pi
->force_reliable
= 0;
901 /* Default config options */
903 pi
->flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
904 skb_queue_head_init(TX_QUEUE(sk
));
905 skb_queue_head_init(SREJ_QUEUE(sk
));
906 skb_queue_head_init(BUSY_QUEUE(sk
));
907 INIT_LIST_HEAD(SREJ_LIST(sk
));
910 static struct proto l2cap_proto
= {
912 .owner
= THIS_MODULE
,
913 .obj_size
= sizeof(struct l2cap_pinfo
)
916 static struct sock
*l2cap_sock_alloc(struct net
*net
, struct socket
*sock
, int proto
, gfp_t prio
)
920 sk
= sk_alloc(net
, PF_BLUETOOTH
, prio
, &l2cap_proto
);
924 sock_init_data(sock
, sk
);
925 INIT_LIST_HEAD(&bt_sk(sk
)->accept_q
);
927 sk
->sk_destruct
= l2cap_sock_destruct
;
928 sk
->sk_sndtimeo
= msecs_to_jiffies(L2CAP_CONN_TIMEOUT
);
930 sock_reset_flag(sk
, SOCK_ZAPPED
);
932 sk
->sk_protocol
= proto
;
933 sk
->sk_state
= BT_OPEN
;
935 setup_timer(&sk
->sk_timer
, l2cap_sock_timeout
, (unsigned long) sk
);
937 bt_sock_link(&l2cap_sk_list
, sk
);
941 static int l2cap_sock_create(struct net
*net
, struct socket
*sock
, int protocol
,
946 BT_DBG("sock %p", sock
);
948 sock
->state
= SS_UNCONNECTED
;
950 if (sock
->type
!= SOCK_SEQPACKET
&& sock
->type
!= SOCK_STREAM
&&
951 sock
->type
!= SOCK_DGRAM
&& sock
->type
!= SOCK_RAW
)
952 return -ESOCKTNOSUPPORT
;
954 if (sock
->type
== SOCK_RAW
&& !kern
&& !capable(CAP_NET_RAW
))
957 sock
->ops
= &l2cap_sock_ops
;
959 sk
= l2cap_sock_alloc(net
, sock
, protocol
, GFP_ATOMIC
);
963 l2cap_sock_init(sk
, NULL
);
967 static int l2cap_sock_bind(struct socket
*sock
, struct sockaddr
*addr
, int alen
)
969 struct sock
*sk
= sock
->sk
;
970 struct sockaddr_l2 la
;
975 if (!addr
|| addr
->sa_family
!= AF_BLUETOOTH
)
978 memset(&la
, 0, sizeof(la
));
979 len
= min_t(unsigned int, sizeof(la
), alen
);
980 memcpy(&la
, addr
, len
);
987 if (sk
->sk_state
!= BT_OPEN
) {
992 if (la
.l2_psm
&& __le16_to_cpu(la
.l2_psm
) < 0x1001 &&
993 !capable(CAP_NET_BIND_SERVICE
)) {
998 write_lock_bh(&l2cap_sk_list
.lock
);
1000 if (la
.l2_psm
&& __l2cap_get_sock_by_addr(la
.l2_psm
, &la
.l2_bdaddr
)) {
1003 /* Save source address */
1004 bacpy(&bt_sk(sk
)->src
, &la
.l2_bdaddr
);
1005 l2cap_pi(sk
)->psm
= la
.l2_psm
;
1006 l2cap_pi(sk
)->sport
= la
.l2_psm
;
1007 sk
->sk_state
= BT_BOUND
;
1009 if (__le16_to_cpu(la
.l2_psm
) == 0x0001 ||
1010 __le16_to_cpu(la
.l2_psm
) == 0x0003)
1011 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
1014 write_unlock_bh(&l2cap_sk_list
.lock
);
1021 static int l2cap_do_connect(struct sock
*sk
)
1023 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1024 bdaddr_t
*dst
= &bt_sk(sk
)->dst
;
1025 struct l2cap_conn
*conn
;
1026 struct hci_conn
*hcon
;
1027 struct hci_dev
*hdev
;
1031 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src
), batostr(dst
),
1034 hdev
= hci_get_route(dst
, src
);
1036 return -EHOSTUNREACH
;
1038 hci_dev_lock_bh(hdev
);
1042 if (sk
->sk_type
== SOCK_RAW
) {
1043 switch (l2cap_pi(sk
)->sec_level
) {
1044 case BT_SECURITY_HIGH
:
1045 auth_type
= HCI_AT_DEDICATED_BONDING_MITM
;
1047 case BT_SECURITY_MEDIUM
:
1048 auth_type
= HCI_AT_DEDICATED_BONDING
;
1051 auth_type
= HCI_AT_NO_BONDING
;
1054 } else if (l2cap_pi(sk
)->psm
== cpu_to_le16(0x0001)) {
1055 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
1056 auth_type
= HCI_AT_NO_BONDING_MITM
;
1058 auth_type
= HCI_AT_NO_BONDING
;
1060 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_LOW
)
1061 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
1063 switch (l2cap_pi(sk
)->sec_level
) {
1064 case BT_SECURITY_HIGH
:
1065 auth_type
= HCI_AT_GENERAL_BONDING_MITM
;
1067 case BT_SECURITY_MEDIUM
:
1068 auth_type
= HCI_AT_GENERAL_BONDING
;
1071 auth_type
= HCI_AT_NO_BONDING
;
1076 hcon
= hci_connect(hdev
, ACL_LINK
, dst
,
1077 l2cap_pi(sk
)->sec_level
, auth_type
);
1081 conn
= l2cap_conn_add(hcon
, 0);
1089 /* Update source addr of the socket */
1090 bacpy(src
, conn
->src
);
1092 l2cap_chan_add(conn
, sk
, NULL
);
1094 sk
->sk_state
= BT_CONNECT
;
1095 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
1097 if (hcon
->state
== BT_CONNECTED
) {
1098 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
1099 sk
->sk_type
!= SOCK_STREAM
) {
1100 l2cap_sock_clear_timer(sk
);
1101 sk
->sk_state
= BT_CONNECTED
;
1107 hci_dev_unlock_bh(hdev
);
1112 static int l2cap_sock_connect(struct socket
*sock
, struct sockaddr
*addr
, int alen
, int flags
)
1114 struct sock
*sk
= sock
->sk
;
1115 struct sockaddr_l2 la
;
1118 BT_DBG("sk %p", sk
);
1120 if (!addr
|| alen
< sizeof(addr
->sa_family
) ||
1121 addr
->sa_family
!= AF_BLUETOOTH
)
1124 memset(&la
, 0, sizeof(la
));
1125 len
= min_t(unsigned int, sizeof(la
), alen
);
1126 memcpy(&la
, addr
, len
);
1133 if ((sk
->sk_type
== SOCK_SEQPACKET
|| sk
->sk_type
== SOCK_STREAM
)
1139 switch (l2cap_pi(sk
)->mode
) {
1140 case L2CAP_MODE_BASIC
:
1142 case L2CAP_MODE_ERTM
:
1143 case L2CAP_MODE_STREAMING
:
1152 switch (sk
->sk_state
) {
1156 /* Already connecting */
1160 /* Already connected */
1173 /* Set destination address and psm */
1174 bacpy(&bt_sk(sk
)->dst
, &la
.l2_bdaddr
);
1175 l2cap_pi(sk
)->psm
= la
.l2_psm
;
1177 err
= l2cap_do_connect(sk
);
1182 err
= bt_sock_wait_state(sk
, BT_CONNECTED
,
1183 sock_sndtimeo(sk
, flags
& O_NONBLOCK
));
1189 static int l2cap_sock_listen(struct socket
*sock
, int backlog
)
1191 struct sock
*sk
= sock
->sk
;
1194 BT_DBG("sk %p backlog %d", sk
, backlog
);
1198 if ((sock
->type
!= SOCK_SEQPACKET
&& sock
->type
!= SOCK_STREAM
)
1199 || sk
->sk_state
!= BT_BOUND
) {
1204 switch (l2cap_pi(sk
)->mode
) {
1205 case L2CAP_MODE_BASIC
:
1207 case L2CAP_MODE_ERTM
:
1208 case L2CAP_MODE_STREAMING
:
1217 if (!l2cap_pi(sk
)->psm
) {
1218 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1223 write_lock_bh(&l2cap_sk_list
.lock
);
1225 for (psm
= 0x1001; psm
< 0x1100; psm
+= 2)
1226 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm
), src
)) {
1227 l2cap_pi(sk
)->psm
= cpu_to_le16(psm
);
1228 l2cap_pi(sk
)->sport
= cpu_to_le16(psm
);
1233 write_unlock_bh(&l2cap_sk_list
.lock
);
1239 sk
->sk_max_ack_backlog
= backlog
;
1240 sk
->sk_ack_backlog
= 0;
1241 sk
->sk_state
= BT_LISTEN
;
1248 static int l2cap_sock_accept(struct socket
*sock
, struct socket
*newsock
, int flags
)
1250 DECLARE_WAITQUEUE(wait
, current
);
1251 struct sock
*sk
= sock
->sk
, *nsk
;
1255 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
1257 if (sk
->sk_state
!= BT_LISTEN
) {
1262 timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
1264 BT_DBG("sk %p timeo %ld", sk
, timeo
);
1266 /* Wait for an incoming connection. (wake-one). */
1267 add_wait_queue_exclusive(sk_sleep(sk
), &wait
);
1268 while (!(nsk
= bt_accept_dequeue(sk
, newsock
))) {
1269 set_current_state(TASK_INTERRUPTIBLE
);
1276 timeo
= schedule_timeout(timeo
);
1277 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
1279 if (sk
->sk_state
!= BT_LISTEN
) {
1284 if (signal_pending(current
)) {
1285 err
= sock_intr_errno(timeo
);
1289 set_current_state(TASK_RUNNING
);
1290 remove_wait_queue(sk_sleep(sk
), &wait
);
1295 newsock
->state
= SS_CONNECTED
;
1297 BT_DBG("new socket %p", nsk
);
1304 static int l2cap_sock_getname(struct socket
*sock
, struct sockaddr
*addr
, int *len
, int peer
)
1306 struct sockaddr_l2
*la
= (struct sockaddr_l2
*) addr
;
1307 struct sock
*sk
= sock
->sk
;
1309 BT_DBG("sock %p, sk %p", sock
, sk
);
1311 addr
->sa_family
= AF_BLUETOOTH
;
1312 *len
= sizeof(struct sockaddr_l2
);
1315 la
->l2_psm
= l2cap_pi(sk
)->psm
;
1316 bacpy(&la
->l2_bdaddr
, &bt_sk(sk
)->dst
);
1317 la
->l2_cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1319 la
->l2_psm
= l2cap_pi(sk
)->sport
;
1320 bacpy(&la
->l2_bdaddr
, &bt_sk(sk
)->src
);
1321 la
->l2_cid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
1327 static int __l2cap_wait_ack(struct sock
*sk
)
1329 DECLARE_WAITQUEUE(wait
, current
);
1333 add_wait_queue(sk_sleep(sk
), &wait
);
1334 while ((l2cap_pi(sk
)->unacked_frames
> 0 && l2cap_pi(sk
)->conn
)) {
1335 set_current_state(TASK_INTERRUPTIBLE
);
1340 if (signal_pending(current
)) {
1341 err
= sock_intr_errno(timeo
);
1346 timeo
= schedule_timeout(timeo
);
1349 err
= sock_error(sk
);
1353 set_current_state(TASK_RUNNING
);
1354 remove_wait_queue(sk_sleep(sk
), &wait
);
1358 static void l2cap_monitor_timeout(unsigned long arg
)
1360 struct sock
*sk
= (void *) arg
;
1362 BT_DBG("sk %p", sk
);
1365 if (l2cap_pi(sk
)->retry_count
>= l2cap_pi(sk
)->remote_max_tx
) {
1366 l2cap_send_disconn_req(l2cap_pi(sk
)->conn
, sk
, ECONNABORTED
);
1371 l2cap_pi(sk
)->retry_count
++;
1372 __mod_monitor_timer();
1374 l2cap_send_rr_or_rnr(l2cap_pi(sk
), L2CAP_CTRL_POLL
);
1378 static void l2cap_retrans_timeout(unsigned long arg
)
1380 struct sock
*sk
= (void *) arg
;
1382 BT_DBG("sk %p", sk
);
1385 l2cap_pi(sk
)->retry_count
= 1;
1386 __mod_monitor_timer();
1388 l2cap_pi(sk
)->conn_state
|= L2CAP_CONN_WAIT_F
;
1390 l2cap_send_rr_or_rnr(l2cap_pi(sk
), L2CAP_CTRL_POLL
);
1394 static void l2cap_drop_acked_frames(struct sock
*sk
)
1396 struct sk_buff
*skb
;
1398 while ((skb
= skb_peek(TX_QUEUE(sk
))) &&
1399 l2cap_pi(sk
)->unacked_frames
) {
1400 if (bt_cb(skb
)->tx_seq
== l2cap_pi(sk
)->expected_ack_seq
)
1403 skb
= skb_dequeue(TX_QUEUE(sk
));
1406 l2cap_pi(sk
)->unacked_frames
--;
1409 if (!l2cap_pi(sk
)->unacked_frames
)
1410 del_timer(&l2cap_pi(sk
)->retrans_timer
);
1413 static inline void l2cap_do_send(struct sock
*sk
, struct sk_buff
*skb
)
1415 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1417 BT_DBG("sk %p, skb %p len %d", sk
, skb
, skb
->len
);
1419 hci_send_acl(pi
->conn
->hcon
, skb
, 0);
1422 static int l2cap_streaming_send(struct sock
*sk
)
1424 struct sk_buff
*skb
, *tx_skb
;
1425 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1428 while ((skb
= sk
->sk_send_head
)) {
1429 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1431 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1432 control
|= pi
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
1433 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1435 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1436 fcs
= crc16(0, (u8
*)tx_skb
->data
, tx_skb
->len
- 2);
1437 put_unaligned_le16(fcs
, tx_skb
->data
+ tx_skb
->len
- 2);
1440 l2cap_do_send(sk
, tx_skb
);
1442 pi
->next_tx_seq
= (pi
->next_tx_seq
+ 1) % 64;
1444 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1445 sk
->sk_send_head
= NULL
;
1447 sk
->sk_send_head
= skb_queue_next(TX_QUEUE(sk
), skb
);
1449 skb
= skb_dequeue(TX_QUEUE(sk
));
1455 static void l2cap_retransmit_one_frame(struct sock
*sk
, u8 tx_seq
)
1457 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1458 struct sk_buff
*skb
, *tx_skb
;
1461 skb
= skb_peek(TX_QUEUE(sk
));
1466 if (bt_cb(skb
)->tx_seq
== tx_seq
)
1469 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1472 } while ((skb
= skb_queue_next(TX_QUEUE(sk
), skb
)));
1474 if (pi
->remote_max_tx
&&
1475 bt_cb(skb
)->retries
== pi
->remote_max_tx
) {
1476 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNABORTED
);
1480 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1481 bt_cb(skb
)->retries
++;
1482 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1484 if (pi
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
1485 control
|= L2CAP_CTRL_FINAL
;
1486 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
1489 control
|= (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1490 | (tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1492 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1494 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1495 fcs
= crc16(0, (u8
*)tx_skb
->data
, tx_skb
->len
- 2);
1496 put_unaligned_le16(fcs
, tx_skb
->data
+ tx_skb
->len
- 2);
1499 l2cap_do_send(sk
, tx_skb
);
1502 static int l2cap_ertm_send(struct sock
*sk
)
1504 struct sk_buff
*skb
, *tx_skb
;
1505 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1509 if (sk
->sk_state
!= BT_CONNECTED
)
1512 while ((skb
= sk
->sk_send_head
) && (!l2cap_tx_window_full(sk
))) {
1514 if (pi
->remote_max_tx
&&
1515 bt_cb(skb
)->retries
== pi
->remote_max_tx
) {
1516 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNABORTED
);
1520 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1522 bt_cb(skb
)->retries
++;
1524 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1525 control
&= L2CAP_CTRL_SAR
;
1527 if (pi
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
1528 control
|= L2CAP_CTRL_FINAL
;
1529 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
1531 control
|= (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1532 | (pi
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1533 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1536 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1537 fcs
= crc16(0, (u8
*)skb
->data
, tx_skb
->len
- 2);
1538 put_unaligned_le16(fcs
, skb
->data
+ tx_skb
->len
- 2);
1541 l2cap_do_send(sk
, tx_skb
);
1543 __mod_retrans_timer();
1545 bt_cb(skb
)->tx_seq
= pi
->next_tx_seq
;
1546 pi
->next_tx_seq
= (pi
->next_tx_seq
+ 1) % 64;
1548 pi
->unacked_frames
++;
1551 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1552 sk
->sk_send_head
= NULL
;
1554 sk
->sk_send_head
= skb_queue_next(TX_QUEUE(sk
), skb
);
1562 static int l2cap_retransmit_frames(struct sock
*sk
)
1564 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1567 if (!skb_queue_empty(TX_QUEUE(sk
)))
1568 sk
->sk_send_head
= TX_QUEUE(sk
)->next
;
1570 pi
->next_tx_seq
= pi
->expected_ack_seq
;
1571 ret
= l2cap_ertm_send(sk
);
1575 static void l2cap_send_ack(struct l2cap_pinfo
*pi
)
1577 struct sock
*sk
= (struct sock
*)pi
;
1580 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1582 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
1583 control
|= L2CAP_SUPER_RCV_NOT_READY
;
1584 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
1585 l2cap_send_sframe(pi
, control
);
1589 if (l2cap_ertm_send(sk
) > 0)
1592 control
|= L2CAP_SUPER_RCV_READY
;
1593 l2cap_send_sframe(pi
, control
);
1596 static void l2cap_send_srejtail(struct sock
*sk
)
1598 struct srej_list
*tail
;
1601 control
= L2CAP_SUPER_SELECT_REJECT
;
1602 control
|= L2CAP_CTRL_FINAL
;
1604 tail
= list_entry(SREJ_LIST(sk
)->prev
, struct srej_list
, list
);
1605 control
|= tail
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1607 l2cap_send_sframe(l2cap_pi(sk
), control
);
1610 static inline int l2cap_skbuff_fromiovec(struct sock
*sk
, struct msghdr
*msg
, int len
, int count
, struct sk_buff
*skb
)
1612 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1613 struct sk_buff
**frag
;
1616 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1622 /* Continuation fragments (no L2CAP header) */
1623 frag
= &skb_shinfo(skb
)->frag_list
;
1625 count
= min_t(unsigned int, conn
->mtu
, len
);
1627 *frag
= bt_skb_send_alloc(sk
, count
, msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1630 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1636 frag
= &(*frag
)->next
;
1642 static struct sk_buff
*l2cap_create_connless_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1644 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1645 struct sk_buff
*skb
;
1646 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1647 struct l2cap_hdr
*lh
;
1649 BT_DBG("sk %p len %d", sk
, (int)len
);
1651 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1652 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1653 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1655 return ERR_PTR(-ENOMEM
);
1657 /* Create L2CAP header */
1658 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1659 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1660 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1661 put_unaligned_le16(l2cap_pi(sk
)->psm
, skb_put(skb
, 2));
1663 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1664 if (unlikely(err
< 0)) {
1666 return ERR_PTR(err
);
1671 static struct sk_buff
*l2cap_create_basic_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1673 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1674 struct sk_buff
*skb
;
1675 int err
, count
, hlen
= L2CAP_HDR_SIZE
;
1676 struct l2cap_hdr
*lh
;
1678 BT_DBG("sk %p len %d", sk
, (int)len
);
1680 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1681 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1682 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1684 return ERR_PTR(-ENOMEM
);
1686 /* Create L2CAP header */
1687 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1688 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1689 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1691 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1692 if (unlikely(err
< 0)) {
1694 return ERR_PTR(err
);
1699 static struct sk_buff
*l2cap_create_iframe_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
, u16 control
, u16 sdulen
)
1701 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1702 struct sk_buff
*skb
;
1703 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1704 struct l2cap_hdr
*lh
;
1706 BT_DBG("sk %p len %d", sk
, (int)len
);
1709 return ERR_PTR(-ENOTCONN
);
1714 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1717 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1718 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1719 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1721 return ERR_PTR(-ENOMEM
);
1723 /* Create L2CAP header */
1724 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1725 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1726 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1727 put_unaligned_le16(control
, skb_put(skb
, 2));
1729 put_unaligned_le16(sdulen
, skb_put(skb
, 2));
1731 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1732 if (unlikely(err
< 0)) {
1734 return ERR_PTR(err
);
1737 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1738 put_unaligned_le16(0, skb_put(skb
, 2));
1740 bt_cb(skb
)->retries
= 0;
1744 static inline int l2cap_sar_segment_sdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1746 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1747 struct sk_buff
*skb
;
1748 struct sk_buff_head sar_queue
;
1752 skb_queue_head_init(&sar_queue
);
1753 control
= L2CAP_SDU_START
;
1754 skb
= l2cap_create_iframe_pdu(sk
, msg
, pi
->remote_mps
, control
, len
);
1756 return PTR_ERR(skb
);
1758 __skb_queue_tail(&sar_queue
, skb
);
1759 len
-= pi
->remote_mps
;
1760 size
+= pi
->remote_mps
;
1765 if (len
> pi
->remote_mps
) {
1766 control
= L2CAP_SDU_CONTINUE
;
1767 buflen
= pi
->remote_mps
;
1769 control
= L2CAP_SDU_END
;
1773 skb
= l2cap_create_iframe_pdu(sk
, msg
, buflen
, control
, 0);
1775 skb_queue_purge(&sar_queue
);
1776 return PTR_ERR(skb
);
1779 __skb_queue_tail(&sar_queue
, skb
);
1783 skb_queue_splice_tail(&sar_queue
, TX_QUEUE(sk
));
1784 if (sk
->sk_send_head
== NULL
)
1785 sk
->sk_send_head
= sar_queue
.next
;
1790 static int l2cap_sock_sendmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
, size_t len
)
1792 struct sock
*sk
= sock
->sk
;
1793 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1794 struct sk_buff
*skb
;
1798 BT_DBG("sock %p, sk %p", sock
, sk
);
1800 err
= sock_error(sk
);
1804 if (msg
->msg_flags
& MSG_OOB
)
1809 if (sk
->sk_state
!= BT_CONNECTED
) {
1814 /* Connectionless channel */
1815 if (sk
->sk_type
== SOCK_DGRAM
) {
1816 skb
= l2cap_create_connless_pdu(sk
, msg
, len
);
1820 l2cap_do_send(sk
, skb
);
1827 case L2CAP_MODE_BASIC
:
1828 /* Check outgoing MTU */
1829 if (len
> pi
->omtu
) {
1834 /* Create a basic PDU */
1835 skb
= l2cap_create_basic_pdu(sk
, msg
, len
);
1841 l2cap_do_send(sk
, skb
);
1845 case L2CAP_MODE_ERTM
:
1846 case L2CAP_MODE_STREAMING
:
1847 /* Entire SDU fits into one PDU */
1848 if (len
<= pi
->remote_mps
) {
1849 control
= L2CAP_SDU_UNSEGMENTED
;
1850 skb
= l2cap_create_iframe_pdu(sk
, msg
, len
, control
, 0);
1855 __skb_queue_tail(TX_QUEUE(sk
), skb
);
1857 if (sk
->sk_send_head
== NULL
)
1858 sk
->sk_send_head
= skb
;
1861 /* Segment SDU into multiples PDUs */
1862 err
= l2cap_sar_segment_sdu(sk
, msg
, len
);
1867 if (pi
->mode
== L2CAP_MODE_STREAMING
) {
1868 err
= l2cap_streaming_send(sk
);
1870 if (pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
&&
1871 pi
->conn_state
&& L2CAP_CONN_WAIT_F
) {
1875 err
= l2cap_ertm_send(sk
);
1883 BT_DBG("bad state %1.1x", pi
->mode
);
1892 static int l2cap_sock_recvmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
, size_t len
, int flags
)
1894 struct sock
*sk
= sock
->sk
;
1898 if (sk
->sk_state
== BT_CONNECT2
&& bt_sk(sk
)->defer_setup
) {
1899 struct l2cap_conn_rsp rsp
;
1901 sk
->sk_state
= BT_CONFIG
;
1903 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1904 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
1905 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
1906 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
1907 l2cap_send_cmd(l2cap_pi(sk
)->conn
, l2cap_pi(sk
)->ident
,
1908 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
1916 return bt_sock_recvmsg(iocb
, sock
, msg
, len
, flags
);
1919 static int l2cap_sock_setsockopt_old(struct socket
*sock
, int optname
, char __user
*optval
, unsigned int optlen
)
1921 struct sock
*sk
= sock
->sk
;
1922 struct l2cap_options opts
;
1926 BT_DBG("sk %p", sk
);
1932 opts
.imtu
= l2cap_pi(sk
)->imtu
;
1933 opts
.omtu
= l2cap_pi(sk
)->omtu
;
1934 opts
.flush_to
= l2cap_pi(sk
)->flush_to
;
1935 opts
.mode
= l2cap_pi(sk
)->mode
;
1936 opts
.fcs
= l2cap_pi(sk
)->fcs
;
1937 opts
.max_tx
= l2cap_pi(sk
)->max_tx
;
1938 opts
.txwin_size
= (__u16
)l2cap_pi(sk
)->tx_win
;
1940 len
= min_t(unsigned int, sizeof(opts
), optlen
);
1941 if (copy_from_user((char *) &opts
, optval
, len
)) {
1946 if (opts
.txwin_size
> L2CAP_DEFAULT_TX_WINDOW
) {
1951 l2cap_pi(sk
)->mode
= opts
.mode
;
1952 switch (l2cap_pi(sk
)->mode
) {
1953 case L2CAP_MODE_BASIC
:
1954 l2cap_pi(sk
)->conf_state
&= ~L2CAP_CONF_STATE2_DEVICE
;
1956 case L2CAP_MODE_ERTM
:
1957 case L2CAP_MODE_STREAMING
:
1966 l2cap_pi(sk
)->imtu
= opts
.imtu
;
1967 l2cap_pi(sk
)->omtu
= opts
.omtu
;
1968 l2cap_pi(sk
)->fcs
= opts
.fcs
;
1969 l2cap_pi(sk
)->max_tx
= opts
.max_tx
;
1970 l2cap_pi(sk
)->tx_win
= (__u8
)opts
.txwin_size
;
1974 if (get_user(opt
, (u32 __user
*) optval
)) {
1979 if (opt
& L2CAP_LM_AUTH
)
1980 l2cap_pi(sk
)->sec_level
= BT_SECURITY_LOW
;
1981 if (opt
& L2CAP_LM_ENCRYPT
)
1982 l2cap_pi(sk
)->sec_level
= BT_SECURITY_MEDIUM
;
1983 if (opt
& L2CAP_LM_SECURE
)
1984 l2cap_pi(sk
)->sec_level
= BT_SECURITY_HIGH
;
1986 l2cap_pi(sk
)->role_switch
= (opt
& L2CAP_LM_MASTER
);
1987 l2cap_pi(sk
)->force_reliable
= (opt
& L2CAP_LM_RELIABLE
);
1999 static int l2cap_sock_setsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, unsigned int optlen
)
2001 struct sock
*sk
= sock
->sk
;
2002 struct bt_security sec
;
2006 BT_DBG("sk %p", sk
);
2008 if (level
== SOL_L2CAP
)
2009 return l2cap_sock_setsockopt_old(sock
, optname
, optval
, optlen
);
2011 if (level
!= SOL_BLUETOOTH
)
2012 return -ENOPROTOOPT
;
2018 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_STREAM
2019 && sk
->sk_type
!= SOCK_RAW
) {
2024 sec
.level
= BT_SECURITY_LOW
;
2026 len
= min_t(unsigned int, sizeof(sec
), optlen
);
2027 if (copy_from_user((char *) &sec
, optval
, len
)) {
2032 if (sec
.level
< BT_SECURITY_LOW
||
2033 sec
.level
> BT_SECURITY_HIGH
) {
2038 l2cap_pi(sk
)->sec_level
= sec
.level
;
2041 case BT_DEFER_SETUP
:
2042 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_LISTEN
) {
2047 if (get_user(opt
, (u32 __user
*) optval
)) {
2052 bt_sk(sk
)->defer_setup
= opt
;
2064 static int l2cap_sock_getsockopt_old(struct socket
*sock
, int optname
, char __user
*optval
, int __user
*optlen
)
2066 struct sock
*sk
= sock
->sk
;
2067 struct l2cap_options opts
;
2068 struct l2cap_conninfo cinfo
;
2072 BT_DBG("sk %p", sk
);
2074 if (get_user(len
, optlen
))
2081 opts
.imtu
= l2cap_pi(sk
)->imtu
;
2082 opts
.omtu
= l2cap_pi(sk
)->omtu
;
2083 opts
.flush_to
= l2cap_pi(sk
)->flush_to
;
2084 opts
.mode
= l2cap_pi(sk
)->mode
;
2085 opts
.fcs
= l2cap_pi(sk
)->fcs
;
2086 opts
.max_tx
= l2cap_pi(sk
)->max_tx
;
2087 opts
.txwin_size
= (__u16
)l2cap_pi(sk
)->tx_win
;
2089 len
= min_t(unsigned int, len
, sizeof(opts
));
2090 if (copy_to_user(optval
, (char *) &opts
, len
))
2096 switch (l2cap_pi(sk
)->sec_level
) {
2097 case BT_SECURITY_LOW
:
2098 opt
= L2CAP_LM_AUTH
;
2100 case BT_SECURITY_MEDIUM
:
2101 opt
= L2CAP_LM_AUTH
| L2CAP_LM_ENCRYPT
;
2103 case BT_SECURITY_HIGH
:
2104 opt
= L2CAP_LM_AUTH
| L2CAP_LM_ENCRYPT
|
2112 if (l2cap_pi(sk
)->role_switch
)
2113 opt
|= L2CAP_LM_MASTER
;
2115 if (l2cap_pi(sk
)->force_reliable
)
2116 opt
|= L2CAP_LM_RELIABLE
;
2118 if (put_user(opt
, (u32 __user
*) optval
))
2122 case L2CAP_CONNINFO
:
2123 if (sk
->sk_state
!= BT_CONNECTED
&&
2124 !(sk
->sk_state
== BT_CONNECT2
&&
2125 bt_sk(sk
)->defer_setup
)) {
2130 cinfo
.hci_handle
= l2cap_pi(sk
)->conn
->hcon
->handle
;
2131 memcpy(cinfo
.dev_class
, l2cap_pi(sk
)->conn
->hcon
->dev_class
, 3);
2133 len
= min_t(unsigned int, len
, sizeof(cinfo
));
2134 if (copy_to_user(optval
, (char *) &cinfo
, len
))
2148 static int l2cap_sock_getsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, int __user
*optlen
)
2150 struct sock
*sk
= sock
->sk
;
2151 struct bt_security sec
;
2154 BT_DBG("sk %p", sk
);
2156 if (level
== SOL_L2CAP
)
2157 return l2cap_sock_getsockopt_old(sock
, optname
, optval
, optlen
);
2159 if (level
!= SOL_BLUETOOTH
)
2160 return -ENOPROTOOPT
;
2162 if (get_user(len
, optlen
))
2169 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_STREAM
2170 && sk
->sk_type
!= SOCK_RAW
) {
2175 sec
.level
= l2cap_pi(sk
)->sec_level
;
2177 len
= min_t(unsigned int, len
, sizeof(sec
));
2178 if (copy_to_user(optval
, (char *) &sec
, len
))
2183 case BT_DEFER_SETUP
:
2184 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_LISTEN
) {
2189 if (put_user(bt_sk(sk
)->defer_setup
, (u32 __user
*) optval
))
2203 static int l2cap_sock_shutdown(struct socket
*sock
, int how
)
2205 struct sock
*sk
= sock
->sk
;
2208 BT_DBG("sock %p, sk %p", sock
, sk
);
2214 if (!sk
->sk_shutdown
) {
2215 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
2216 err
= __l2cap_wait_ack(sk
);
2218 sk
->sk_shutdown
= SHUTDOWN_MASK
;
2219 l2cap_sock_clear_timer(sk
);
2220 __l2cap_sock_close(sk
, 0);
2222 if (sock_flag(sk
, SOCK_LINGER
) && sk
->sk_lingertime
)
2223 err
= bt_sock_wait_state(sk
, BT_CLOSED
,
2227 if (!err
&& sk
->sk_err
)
2234 static int l2cap_sock_release(struct socket
*sock
)
2236 struct sock
*sk
= sock
->sk
;
2239 BT_DBG("sock %p, sk %p", sock
, sk
);
2244 err
= l2cap_sock_shutdown(sock
, 2);
2247 l2cap_sock_kill(sk
);
2251 static void l2cap_chan_ready(struct sock
*sk
)
2253 struct sock
*parent
= bt_sk(sk
)->parent
;
2255 BT_DBG("sk %p, parent %p", sk
, parent
);
2257 l2cap_pi(sk
)->conf_state
= 0;
2258 l2cap_sock_clear_timer(sk
);
2261 /* Outgoing channel.
2262 * Wake up socket sleeping on connect.
2264 sk
->sk_state
= BT_CONNECTED
;
2265 sk
->sk_state_change(sk
);
2267 /* Incoming channel.
2268 * Wake up socket sleeping on accept.
2270 parent
->sk_data_ready(parent
, 0);
2274 /* Copy frame to all raw sockets on that connection */
2275 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2277 struct l2cap_chan_list
*l
= &conn
->chan_list
;
2278 struct sk_buff
*nskb
;
2281 BT_DBG("conn %p", conn
);
2283 read_lock(&l
->lock
);
2284 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
2285 if (sk
->sk_type
!= SOCK_RAW
)
2288 /* Don't send frame to the socket it came from */
2291 nskb
= skb_clone(skb
, GFP_ATOMIC
);
2295 if (sock_queue_rcv_skb(sk
, nskb
))
2298 read_unlock(&l
->lock
);
2301 /* ---- L2CAP signalling commands ---- */
2302 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
2303 u8 code
, u8 ident
, u16 dlen
, void *data
)
2305 struct sk_buff
*skb
, **frag
;
2306 struct l2cap_cmd_hdr
*cmd
;
2307 struct l2cap_hdr
*lh
;
2310 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2311 conn
, code
, ident
, dlen
);
2313 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2314 count
= min_t(unsigned int, conn
->mtu
, len
);
2316 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
2320 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2321 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2322 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
2324 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2327 cmd
->len
= cpu_to_le16(dlen
);
2330 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2331 memcpy(skb_put(skb
, count
), data
, count
);
2337 /* Continuation fragments (no L2CAP header) */
2338 frag
= &skb_shinfo(skb
)->frag_list
;
2340 count
= min_t(unsigned int, conn
->mtu
, len
);
2342 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
2346 memcpy(skb_put(*frag
, count
), data
, count
);
2351 frag
= &(*frag
)->next
;
2361 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
2363 struct l2cap_conf_opt
*opt
= *ptr
;
2366 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2374 *val
= *((u8
*) opt
->val
);
2378 *val
= __le16_to_cpu(*((__le16
*) opt
->val
));
2382 *val
= __le32_to_cpu(*((__le32
*) opt
->val
));
2386 *val
= (unsigned long) opt
->val
;
2390 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
2394 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2396 struct l2cap_conf_opt
*opt
= *ptr
;
2398 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
2405 *((u8
*) opt
->val
) = val
;
2409 *((__le16
*) opt
->val
) = cpu_to_le16(val
);
2413 *((__le32
*) opt
->val
) = cpu_to_le32(val
);
2417 memcpy(opt
->val
, (void *) val
, len
);
2421 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2424 static void l2cap_ack_timeout(unsigned long arg
)
2426 struct sock
*sk
= (void *) arg
;
2429 l2cap_send_ack(l2cap_pi(sk
));
2433 static inline void l2cap_ertm_init(struct sock
*sk
)
2435 l2cap_pi(sk
)->expected_ack_seq
= 0;
2436 l2cap_pi(sk
)->unacked_frames
= 0;
2437 l2cap_pi(sk
)->buffer_seq
= 0;
2438 l2cap_pi(sk
)->num_acked
= 0;
2439 l2cap_pi(sk
)->frames_sent
= 0;
2441 setup_timer(&l2cap_pi(sk
)->retrans_timer
,
2442 l2cap_retrans_timeout
, (unsigned long) sk
);
2443 setup_timer(&l2cap_pi(sk
)->monitor_timer
,
2444 l2cap_monitor_timeout
, (unsigned long) sk
);
2445 setup_timer(&l2cap_pi(sk
)->ack_timer
,
2446 l2cap_ack_timeout
, (unsigned long) sk
);
2448 __skb_queue_head_init(SREJ_QUEUE(sk
));
2449 __skb_queue_head_init(BUSY_QUEUE(sk
));
2451 INIT_WORK(&l2cap_pi(sk
)->busy_work
, l2cap_busy_work
);
2453 sk
->sk_backlog_rcv
= l2cap_ertm_data_rcv
;
2456 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
2459 case L2CAP_MODE_STREAMING
:
2460 case L2CAP_MODE_ERTM
:
2461 if (l2cap_mode_supported(mode
, remote_feat_mask
))
2465 return L2CAP_MODE_BASIC
;
2469 static int l2cap_build_conf_req(struct sock
*sk
, void *data
)
2471 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2472 struct l2cap_conf_req
*req
= data
;
2473 struct l2cap_conf_rfc rfc
= { .mode
= pi
->mode
};
2474 void *ptr
= req
->data
;
2476 BT_DBG("sk %p", sk
);
2478 if (pi
->num_conf_req
|| pi
->num_conf_rsp
)
2482 case L2CAP_MODE_STREAMING
:
2483 case L2CAP_MODE_ERTM
:
2484 if (pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
)
2489 pi
->mode
= l2cap_select_mode(rfc
.mode
, pi
->conn
->feat_mask
);
2495 case L2CAP_MODE_BASIC
:
2496 if (pi
->imtu
!= L2CAP_DEFAULT_MTU
)
2497 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->imtu
);
2499 rfc
.mode
= L2CAP_MODE_BASIC
;
2501 rfc
.max_transmit
= 0;
2502 rfc
.retrans_timeout
= 0;
2503 rfc
.monitor_timeout
= 0;
2504 rfc
.max_pdu_size
= 0;
2508 case L2CAP_MODE_ERTM
:
2509 rfc
.mode
= L2CAP_MODE_ERTM
;
2510 rfc
.txwin_size
= pi
->tx_win
;
2511 rfc
.max_transmit
= pi
->max_tx
;
2512 rfc
.retrans_timeout
= 0;
2513 rfc
.monitor_timeout
= 0;
2514 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
2515 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> pi
->conn
->mtu
- 10)
2516 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
2518 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2521 if (pi
->fcs
== L2CAP_FCS_NONE
||
2522 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
2523 pi
->fcs
= L2CAP_FCS_NONE
;
2524 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
2528 case L2CAP_MODE_STREAMING
:
2529 rfc
.mode
= L2CAP_MODE_STREAMING
;
2531 rfc
.max_transmit
= 0;
2532 rfc
.retrans_timeout
= 0;
2533 rfc
.monitor_timeout
= 0;
2534 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
2535 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> pi
->conn
->mtu
- 10)
2536 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
2538 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2541 if (pi
->fcs
== L2CAP_FCS_NONE
||
2542 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
2543 pi
->fcs
= L2CAP_FCS_NONE
;
2544 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
2549 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2550 (unsigned long) &rfc
);
2552 /* FIXME: Need actual value of the flush timeout */
2553 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2554 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2556 req
->dcid
= cpu_to_le16(pi
->dcid
);
2557 req
->flags
= cpu_to_le16(0);
2562 static int l2cap_parse_conf_req(struct sock
*sk
, void *data
)
2564 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2565 struct l2cap_conf_rsp
*rsp
= data
;
2566 void *ptr
= rsp
->data
;
2567 void *req
= pi
->conf_req
;
2568 int len
= pi
->conf_len
;
2569 int type
, hint
, olen
;
2571 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2572 u16 mtu
= L2CAP_DEFAULT_MTU
;
2573 u16 result
= L2CAP_CONF_SUCCESS
;
2575 BT_DBG("sk %p", sk
);
2577 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2578 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
2580 hint
= type
& L2CAP_CONF_HINT
;
2581 type
&= L2CAP_CONF_MASK
;
2584 case L2CAP_CONF_MTU
:
2588 case L2CAP_CONF_FLUSH_TO
:
2592 case L2CAP_CONF_QOS
:
2595 case L2CAP_CONF_RFC
:
2596 if (olen
== sizeof(rfc
))
2597 memcpy(&rfc
, (void *) val
, olen
);
2600 case L2CAP_CONF_FCS
:
2601 if (val
== L2CAP_FCS_NONE
)
2602 pi
->conf_state
|= L2CAP_CONF_NO_FCS_RECV
;
2610 result
= L2CAP_CONF_UNKNOWN
;
2611 *((u8
*) ptr
++) = type
;
2616 if (pi
->num_conf_rsp
|| pi
->num_conf_req
)
2620 case L2CAP_MODE_STREAMING
:
2621 case L2CAP_MODE_ERTM
:
2622 if (!(pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
)) {
2623 pi
->mode
= l2cap_select_mode(rfc
.mode
,
2624 pi
->conn
->feat_mask
);
2628 if (pi
->mode
!= rfc
.mode
)
2629 return -ECONNREFUSED
;
2635 if (pi
->mode
!= rfc
.mode
) {
2636 result
= L2CAP_CONF_UNACCEPT
;
2637 rfc
.mode
= pi
->mode
;
2639 if (pi
->num_conf_rsp
== 1)
2640 return -ECONNREFUSED
;
2642 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2643 sizeof(rfc
), (unsigned long) &rfc
);
2647 if (result
== L2CAP_CONF_SUCCESS
) {
2648 /* Configure output options and let the other side know
2649 * which ones we don't like. */
2651 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
2652 result
= L2CAP_CONF_UNACCEPT
;
2655 pi
->conf_state
|= L2CAP_CONF_MTU_DONE
;
2657 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->omtu
);
2660 case L2CAP_MODE_BASIC
:
2661 pi
->fcs
= L2CAP_FCS_NONE
;
2662 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2665 case L2CAP_MODE_ERTM
:
2666 pi
->remote_tx_win
= rfc
.txwin_size
;
2667 pi
->remote_max_tx
= rfc
.max_transmit
;
2668 if (rfc
.max_pdu_size
> pi
->conn
->mtu
- 10)
2669 rfc
.max_pdu_size
= le16_to_cpu(pi
->conn
->mtu
- 10);
2671 pi
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
2673 rfc
.retrans_timeout
=
2674 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO
);
2675 rfc
.monitor_timeout
=
2676 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO
);
2678 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2680 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2681 sizeof(rfc
), (unsigned long) &rfc
);
2685 case L2CAP_MODE_STREAMING
:
2686 if (rfc
.max_pdu_size
> pi
->conn
->mtu
- 10)
2687 rfc
.max_pdu_size
= le16_to_cpu(pi
->conn
->mtu
- 10);
2689 pi
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
2691 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2693 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2694 sizeof(rfc
), (unsigned long) &rfc
);
2699 result
= L2CAP_CONF_UNACCEPT
;
2701 memset(&rfc
, 0, sizeof(rfc
));
2702 rfc
.mode
= pi
->mode
;
2705 if (result
== L2CAP_CONF_SUCCESS
)
2706 pi
->conf_state
|= L2CAP_CONF_OUTPUT_DONE
;
2708 rsp
->scid
= cpu_to_le16(pi
->dcid
);
2709 rsp
->result
= cpu_to_le16(result
);
2710 rsp
->flags
= cpu_to_le16(0x0000);
2715 static int l2cap_parse_conf_rsp(struct sock
*sk
, void *rsp
, int len
, void *data
, u16
*result
)
2717 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2718 struct l2cap_conf_req
*req
= data
;
2719 void *ptr
= req
->data
;
2722 struct l2cap_conf_rfc rfc
;
2724 BT_DBG("sk %p, rsp %p, len %d, req %p", sk
, rsp
, len
, data
);
2726 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2727 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2730 case L2CAP_CONF_MTU
:
2731 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
2732 *result
= L2CAP_CONF_UNACCEPT
;
2733 pi
->omtu
= L2CAP_DEFAULT_MIN_MTU
;
2736 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->omtu
);
2739 case L2CAP_CONF_FLUSH_TO
:
2741 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
2745 case L2CAP_CONF_RFC
:
2746 if (olen
== sizeof(rfc
))
2747 memcpy(&rfc
, (void *)val
, olen
);
2749 if ((pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
) &&
2750 rfc
.mode
!= pi
->mode
)
2751 return -ECONNREFUSED
;
2755 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2756 sizeof(rfc
), (unsigned long) &rfc
);
2761 if (pi
->mode
== L2CAP_MODE_BASIC
&& pi
->mode
!= rfc
.mode
)
2762 return -ECONNREFUSED
;
2764 pi
->mode
= rfc
.mode
;
2766 if (*result
== L2CAP_CONF_SUCCESS
) {
2768 case L2CAP_MODE_ERTM
:
2769 pi
->remote_tx_win
= rfc
.txwin_size
;
2770 pi
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2771 pi
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2772 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2774 case L2CAP_MODE_STREAMING
:
2775 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2779 req
->dcid
= cpu_to_le16(pi
->dcid
);
2780 req
->flags
= cpu_to_le16(0x0000);
2785 static int l2cap_build_conf_rsp(struct sock
*sk
, void *data
, u16 result
, u16 flags
)
2787 struct l2cap_conf_rsp
*rsp
= data
;
2788 void *ptr
= rsp
->data
;
2790 BT_DBG("sk %p", sk
);
2792 rsp
->scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
2793 rsp
->result
= cpu_to_le16(result
);
2794 rsp
->flags
= cpu_to_le16(flags
);
2799 static void l2cap_conf_rfc_get(struct sock
*sk
, void *rsp
, int len
)
2801 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2804 struct l2cap_conf_rfc rfc
;
2806 BT_DBG("sk %p, rsp %p, len %d", sk
, rsp
, len
);
2808 if ((pi
->mode
!= L2CAP_MODE_ERTM
) && (pi
->mode
!= L2CAP_MODE_STREAMING
))
2811 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2812 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2815 case L2CAP_CONF_RFC
:
2816 if (olen
== sizeof(rfc
))
2817 memcpy(&rfc
, (void *)val
, olen
);
2824 case L2CAP_MODE_ERTM
:
2825 pi
->remote_tx_win
= rfc
.txwin_size
;
2826 pi
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2827 pi
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2828 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2830 case L2CAP_MODE_STREAMING
:
2831 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2835 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2837 struct l2cap_cmd_rej
*rej
= (struct l2cap_cmd_rej
*) data
;
2839 if (rej
->reason
!= 0x0000)
2842 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
2843 cmd
->ident
== conn
->info_ident
) {
2844 del_timer(&conn
->info_timer
);
2846 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2847 conn
->info_ident
= 0;
2849 l2cap_conn_start(conn
);
2855 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2857 struct l2cap_chan_list
*list
= &conn
->chan_list
;
2858 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
2859 struct l2cap_conn_rsp rsp
;
2860 struct sock
*sk
, *parent
;
2861 int result
, status
= L2CAP_CS_NO_INFO
;
2863 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
2864 __le16 psm
= req
->psm
;
2866 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm
, scid
);
2868 /* Check if we have socket listening on psm */
2869 parent
= l2cap_get_sock_by_psm(BT_LISTEN
, psm
, conn
->src
);
2871 result
= L2CAP_CR_BAD_PSM
;
2875 /* Check if the ACL is secure enough (if not SDP) */
2876 if (psm
!= cpu_to_le16(0x0001) &&
2877 !hci_conn_check_link_mode(conn
->hcon
)) {
2878 conn
->disc_reason
= 0x05;
2879 result
= L2CAP_CR_SEC_BLOCK
;
2883 result
= L2CAP_CR_NO_MEM
;
2885 /* Check for backlog size */
2886 if (sk_acceptq_is_full(parent
)) {
2887 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
2891 sk
= l2cap_sock_alloc(sock_net(parent
), NULL
, BTPROTO_L2CAP
, GFP_ATOMIC
);
2895 write_lock_bh(&list
->lock
);
2897 /* Check if we already have channel with that dcid */
2898 if (__l2cap_get_chan_by_dcid(list
, scid
)) {
2899 write_unlock_bh(&list
->lock
);
2900 sock_set_flag(sk
, SOCK_ZAPPED
);
2901 l2cap_sock_kill(sk
);
2905 hci_conn_hold(conn
->hcon
);
2907 l2cap_sock_init(sk
, parent
);
2908 bacpy(&bt_sk(sk
)->src
, conn
->src
);
2909 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
2910 l2cap_pi(sk
)->psm
= psm
;
2911 l2cap_pi(sk
)->dcid
= scid
;
2913 __l2cap_chan_add(conn
, sk
, parent
);
2914 dcid
= l2cap_pi(sk
)->scid
;
2916 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
2918 l2cap_pi(sk
)->ident
= cmd
->ident
;
2920 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
2921 if (l2cap_check_security(sk
)) {
2922 if (bt_sk(sk
)->defer_setup
) {
2923 sk
->sk_state
= BT_CONNECT2
;
2924 result
= L2CAP_CR_PEND
;
2925 status
= L2CAP_CS_AUTHOR_PEND
;
2926 parent
->sk_data_ready(parent
, 0);
2928 sk
->sk_state
= BT_CONFIG
;
2929 result
= L2CAP_CR_SUCCESS
;
2930 status
= L2CAP_CS_NO_INFO
;
2933 sk
->sk_state
= BT_CONNECT2
;
2934 result
= L2CAP_CR_PEND
;
2935 status
= L2CAP_CS_AUTHEN_PEND
;
2938 sk
->sk_state
= BT_CONNECT2
;
2939 result
= L2CAP_CR_PEND
;
2940 status
= L2CAP_CS_NO_INFO
;
2943 write_unlock_bh(&list
->lock
);
2946 bh_unlock_sock(parent
);
2949 rsp
.scid
= cpu_to_le16(scid
);
2950 rsp
.dcid
= cpu_to_le16(dcid
);
2951 rsp
.result
= cpu_to_le16(result
);
2952 rsp
.status
= cpu_to_le16(status
);
2953 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2955 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
2956 struct l2cap_info_req info
;
2957 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2959 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
2960 conn
->info_ident
= l2cap_get_ident(conn
);
2962 mod_timer(&conn
->info_timer
, jiffies
+
2963 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
2965 l2cap_send_cmd(conn
, conn
->info_ident
,
2966 L2CAP_INFO_REQ
, sizeof(info
), &info
);
2972 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2974 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
2975 u16 scid
, dcid
, result
, status
;
2979 scid
= __le16_to_cpu(rsp
->scid
);
2980 dcid
= __le16_to_cpu(rsp
->dcid
);
2981 result
= __le16_to_cpu(rsp
->result
);
2982 status
= __le16_to_cpu(rsp
->status
);
2984 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid
, scid
, result
, status
);
2987 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
2991 sk
= l2cap_get_chan_by_ident(&conn
->chan_list
, cmd
->ident
);
2997 case L2CAP_CR_SUCCESS
:
2998 sk
->sk_state
= BT_CONFIG
;
2999 l2cap_pi(sk
)->ident
= 0;
3000 l2cap_pi(sk
)->dcid
= dcid
;
3001 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
3002 l2cap_pi(sk
)->conf_state
&= ~L2CAP_CONF_CONNECT_PEND
;
3004 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3005 l2cap_build_conf_req(sk
, req
), req
);
3006 l2cap_pi(sk
)->num_conf_req
++;
3010 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
3014 l2cap_chan_del(sk
, ECONNREFUSED
);
3022 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3024 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
3030 dcid
= __le16_to_cpu(req
->dcid
);
3031 flags
= __le16_to_cpu(req
->flags
);
3033 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
3035 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, dcid
);
3039 if (sk
->sk_state
!= BT_CONFIG
) {
3040 struct l2cap_cmd_rej rej
;
3042 rej
.reason
= cpu_to_le16(0x0002);
3043 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
3048 /* Reject if config buffer is too small. */
3049 len
= cmd_len
- sizeof(*req
);
3050 if (l2cap_pi(sk
)->conf_len
+ len
> sizeof(l2cap_pi(sk
)->conf_req
)) {
3051 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3052 l2cap_build_conf_rsp(sk
, rsp
,
3053 L2CAP_CONF_REJECT
, flags
), rsp
);
3058 memcpy(l2cap_pi(sk
)->conf_req
+ l2cap_pi(sk
)->conf_len
, req
->data
, len
);
3059 l2cap_pi(sk
)->conf_len
+= len
;
3061 if (flags
& 0x0001) {
3062 /* Incomplete config. Send empty response. */
3063 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3064 l2cap_build_conf_rsp(sk
, rsp
,
3065 L2CAP_CONF_SUCCESS
, 0x0001), rsp
);
3069 /* Complete config. */
3070 len
= l2cap_parse_conf_req(sk
, rsp
);
3072 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
3076 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
3077 l2cap_pi(sk
)->num_conf_rsp
++;
3079 /* Reset config buffer. */
3080 l2cap_pi(sk
)->conf_len
= 0;
3082 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
))
3085 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_INPUT_DONE
) {
3086 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_NO_FCS_RECV
) ||
3087 l2cap_pi(sk
)->fcs
!= L2CAP_FCS_NONE
)
3088 l2cap_pi(sk
)->fcs
= L2CAP_FCS_CRC16
;
3090 sk
->sk_state
= BT_CONNECTED
;
3092 l2cap_pi(sk
)->next_tx_seq
= 0;
3093 l2cap_pi(sk
)->expected_tx_seq
= 0;
3094 __skb_queue_head_init(TX_QUEUE(sk
));
3095 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
3096 l2cap_ertm_init(sk
);
3098 l2cap_chan_ready(sk
);
3102 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
)) {
3104 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3105 l2cap_build_conf_req(sk
, buf
), buf
);
3106 l2cap_pi(sk
)->num_conf_req
++;
3114 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3116 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
3117 u16 scid
, flags
, result
;
3119 int len
= cmd
->len
- sizeof(*rsp
);
3121 scid
= __le16_to_cpu(rsp
->scid
);
3122 flags
= __le16_to_cpu(rsp
->flags
);
3123 result
= __le16_to_cpu(rsp
->result
);
3125 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3126 scid
, flags
, result
);
3128 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
3133 case L2CAP_CONF_SUCCESS
:
3134 l2cap_conf_rfc_get(sk
, rsp
->data
, len
);
3137 case L2CAP_CONF_UNACCEPT
:
3138 if (l2cap_pi(sk
)->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
3141 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
3142 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
3146 /* throw out any old stored conf requests */
3147 result
= L2CAP_CONF_SUCCESS
;
3148 len
= l2cap_parse_conf_rsp(sk
, rsp
->data
,
3151 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
3155 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
3156 L2CAP_CONF_REQ
, len
, req
);
3157 l2cap_pi(sk
)->num_conf_req
++;
3158 if (result
!= L2CAP_CONF_SUCCESS
)
3164 sk
->sk_err
= ECONNRESET
;
3165 l2cap_sock_set_timer(sk
, HZ
* 5);
3166 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
3173 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_INPUT_DONE
;
3175 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
) {
3176 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_NO_FCS_RECV
) ||
3177 l2cap_pi(sk
)->fcs
!= L2CAP_FCS_NONE
)
3178 l2cap_pi(sk
)->fcs
= L2CAP_FCS_CRC16
;
3180 sk
->sk_state
= BT_CONNECTED
;
3181 l2cap_pi(sk
)->next_tx_seq
= 0;
3182 l2cap_pi(sk
)->expected_tx_seq
= 0;
3183 __skb_queue_head_init(TX_QUEUE(sk
));
3184 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
3185 l2cap_ertm_init(sk
);
3187 l2cap_chan_ready(sk
);
3195 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3197 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
3198 struct l2cap_disconn_rsp rsp
;
3202 scid
= __le16_to_cpu(req
->scid
);
3203 dcid
= __le16_to_cpu(req
->dcid
);
3205 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
3207 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, dcid
);
3211 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
3212 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
3213 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
3215 sk
->sk_shutdown
= SHUTDOWN_MASK
;
3217 l2cap_chan_del(sk
, ECONNRESET
);
3220 l2cap_sock_kill(sk
);
3224 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3226 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
3230 scid
= __le16_to_cpu(rsp
->scid
);
3231 dcid
= __le16_to_cpu(rsp
->dcid
);
3233 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
3235 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
3239 l2cap_chan_del(sk
, 0);
3242 l2cap_sock_kill(sk
);
3246 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3248 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
3251 type
= __le16_to_cpu(req
->type
);
3253 BT_DBG("type 0x%4.4x", type
);
3255 if (type
== L2CAP_IT_FEAT_MASK
) {
3257 u32 feat_mask
= l2cap_feat_mask
;
3258 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3259 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3260 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3262 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
3264 put_unaligned_le32(feat_mask
, rsp
->data
);
3265 l2cap_send_cmd(conn
, cmd
->ident
,
3266 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3267 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3269 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3270 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3271 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3272 memcpy(buf
+ 4, l2cap_fixed_chan
, 8);
3273 l2cap_send_cmd(conn
, cmd
->ident
,
3274 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3276 struct l2cap_info_rsp rsp
;
3277 rsp
.type
= cpu_to_le16(type
);
3278 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
3279 l2cap_send_cmd(conn
, cmd
->ident
,
3280 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
3286 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3288 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
3291 type
= __le16_to_cpu(rsp
->type
);
3292 result
= __le16_to_cpu(rsp
->result
);
3294 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
3296 del_timer(&conn
->info_timer
);
3298 if (type
== L2CAP_IT_FEAT_MASK
) {
3299 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
3301 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
3302 struct l2cap_info_req req
;
3303 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3305 conn
->info_ident
= l2cap_get_ident(conn
);
3307 l2cap_send_cmd(conn
, conn
->info_ident
,
3308 L2CAP_INFO_REQ
, sizeof(req
), &req
);
3310 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3311 conn
->info_ident
= 0;
3313 l2cap_conn_start(conn
);
3315 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3316 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3317 conn
->info_ident
= 0;
3319 l2cap_conn_start(conn
);
3325 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
3327 u8
*data
= skb
->data
;
3329 struct l2cap_cmd_hdr cmd
;
3332 l2cap_raw_recv(conn
, skb
);
3334 while (len
>= L2CAP_CMD_HDR_SIZE
) {
3336 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
3337 data
+= L2CAP_CMD_HDR_SIZE
;
3338 len
-= L2CAP_CMD_HDR_SIZE
;
3340 cmd_len
= le16_to_cpu(cmd
.len
);
3342 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
3344 if (cmd_len
> len
|| !cmd
.ident
) {
3345 BT_DBG("corrupted command");
3350 case L2CAP_COMMAND_REJ
:
3351 l2cap_command_rej(conn
, &cmd
, data
);
3354 case L2CAP_CONN_REQ
:
3355 err
= l2cap_connect_req(conn
, &cmd
, data
);
3358 case L2CAP_CONN_RSP
:
3359 err
= l2cap_connect_rsp(conn
, &cmd
, data
);
3362 case L2CAP_CONF_REQ
:
3363 err
= l2cap_config_req(conn
, &cmd
, cmd_len
, data
);
3366 case L2CAP_CONF_RSP
:
3367 err
= l2cap_config_rsp(conn
, &cmd
, data
);
3370 case L2CAP_DISCONN_REQ
:
3371 err
= l2cap_disconnect_req(conn
, &cmd
, data
);
3374 case L2CAP_DISCONN_RSP
:
3375 err
= l2cap_disconnect_rsp(conn
, &cmd
, data
);
3378 case L2CAP_ECHO_REQ
:
3379 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
3382 case L2CAP_ECHO_RSP
:
3385 case L2CAP_INFO_REQ
:
3386 err
= l2cap_information_req(conn
, &cmd
, data
);
3389 case L2CAP_INFO_RSP
:
3390 err
= l2cap_information_rsp(conn
, &cmd
, data
);
3394 BT_ERR("Unknown signaling command 0x%2.2x", cmd
.code
);
3400 struct l2cap_cmd_rej rej
;
3401 BT_DBG("error %d", err
);
3403 /* FIXME: Map err to a valid reason */
3404 rej
.reason
= cpu_to_le16(0);
3405 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
3415 static int l2cap_check_fcs(struct l2cap_pinfo
*pi
, struct sk_buff
*skb
)
3417 u16 our_fcs
, rcv_fcs
;
3418 int hdr_size
= L2CAP_HDR_SIZE
+ 2;
3420 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
3421 skb_trim(skb
, skb
->len
- 2);
3422 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
3423 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
3425 if (our_fcs
!= rcv_fcs
)
3431 static inline void l2cap_send_i_or_rr_or_rnr(struct sock
*sk
)
3433 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3436 pi
->frames_sent
= 0;
3438 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3440 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
3441 control
|= L2CAP_SUPER_RCV_NOT_READY
;
3442 l2cap_send_sframe(pi
, control
);
3443 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
3446 if (pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
)
3447 l2cap_retransmit_frames(sk
);
3449 l2cap_ertm_send(sk
);
3451 if (!(pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) &&
3452 pi
->frames_sent
== 0) {
3453 control
|= L2CAP_SUPER_RCV_READY
;
3454 l2cap_send_sframe(pi
, control
);
3458 static int l2cap_add_to_srej_queue(struct sock
*sk
, struct sk_buff
*skb
, u8 tx_seq
, u8 sar
)
3460 struct sk_buff
*next_skb
;
3461 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3462 int tx_seq_offset
, next_tx_seq_offset
;
3464 bt_cb(skb
)->tx_seq
= tx_seq
;
3465 bt_cb(skb
)->sar
= sar
;
3467 next_skb
= skb_peek(SREJ_QUEUE(sk
));
3469 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3473 tx_seq_offset
= (tx_seq
- pi
->buffer_seq
) % 64;
3474 if (tx_seq_offset
< 0)
3475 tx_seq_offset
+= 64;
3478 if (bt_cb(next_skb
)->tx_seq
== tx_seq
)
3481 next_tx_seq_offset
= (bt_cb(next_skb
)->tx_seq
-
3482 pi
->buffer_seq
) % 64;
3483 if (next_tx_seq_offset
< 0)
3484 next_tx_seq_offset
+= 64;
3486 if (next_tx_seq_offset
> tx_seq_offset
) {
3487 __skb_queue_before(SREJ_QUEUE(sk
), next_skb
, skb
);
3491 if (skb_queue_is_last(SREJ_QUEUE(sk
), next_skb
))
3494 } while ((next_skb
= skb_queue_next(SREJ_QUEUE(sk
), next_skb
)));
3496 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3501 static int l2cap_ertm_reassembly_sdu(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3503 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3504 struct sk_buff
*_skb
;
3507 switch (control
& L2CAP_CTRL_SAR
) {
3508 case L2CAP_SDU_UNSEGMENTED
:
3509 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
)
3512 err
= sock_queue_rcv_skb(sk
, skb
);
3518 case L2CAP_SDU_START
:
3519 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
)
3522 pi
->sdu_len
= get_unaligned_le16(skb
->data
);
3524 if (pi
->sdu_len
> pi
->imtu
)
3527 pi
->sdu
= bt_skb_alloc(pi
->sdu_len
, GFP_ATOMIC
);
3531 /* pull sdu_len bytes only after alloc, because of Local Busy
3532 * condition we have to be sure that this will be executed
3533 * only once, i.e., when alloc does not fail */
3536 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3538 pi
->conn_state
|= L2CAP_CONN_SAR_SDU
;
3539 pi
->partial_sdu_len
= skb
->len
;
3542 case L2CAP_SDU_CONTINUE
:
3543 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3549 pi
->partial_sdu_len
+= skb
->len
;
3550 if (pi
->partial_sdu_len
> pi
->sdu_len
)
3553 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3558 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3564 if (!(pi
->conn_state
& L2CAP_CONN_SAR_RETRY
)) {
3565 pi
->partial_sdu_len
+= skb
->len
;
3567 if (pi
->partial_sdu_len
> pi
->imtu
)
3570 if (pi
->partial_sdu_len
!= pi
->sdu_len
)
3573 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3576 _skb
= skb_clone(pi
->sdu
, GFP_ATOMIC
);
3578 pi
->conn_state
|= L2CAP_CONN_SAR_RETRY
;
3582 err
= sock_queue_rcv_skb(sk
, _skb
);
3585 pi
->conn_state
|= L2CAP_CONN_SAR_RETRY
;
3589 pi
->conn_state
&= ~L2CAP_CONN_SAR_RETRY
;
3590 pi
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
3604 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
3609 static int l2cap_try_push_rx_skb(struct sock
*sk
)
3611 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3612 struct sk_buff
*skb
;
3616 while ((skb
= skb_dequeue(BUSY_QUEUE(sk
)))) {
3617 control
= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
3618 err
= l2cap_ertm_reassembly_sdu(sk
, skb
, control
);
3620 skb_queue_head(BUSY_QUEUE(sk
), skb
);
3624 pi
->buffer_seq
= (pi
->buffer_seq
+ 1) % 64;
3627 if (!(pi
->conn_state
& L2CAP_CONN_RNR_SENT
))
3630 control
= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3631 control
|= L2CAP_SUPER_RCV_READY
| L2CAP_CTRL_POLL
;
3632 l2cap_send_sframe(pi
, control
);
3633 l2cap_pi(sk
)->retry_count
= 1;
3635 del_timer(&pi
->retrans_timer
);
3636 __mod_monitor_timer();
3638 l2cap_pi(sk
)->conn_state
|= L2CAP_CONN_WAIT_F
;
3641 pi
->conn_state
&= ~L2CAP_CONN_LOCAL_BUSY
;
3642 pi
->conn_state
&= ~L2CAP_CONN_RNR_SENT
;
3644 BT_DBG("sk %p, Exit local busy", sk
);
3649 static void l2cap_busy_work(struct work_struct
*work
)
3651 DECLARE_WAITQUEUE(wait
, current
);
3652 struct l2cap_pinfo
*pi
=
3653 container_of(work
, struct l2cap_pinfo
, busy_work
);
3654 struct sock
*sk
= (struct sock
*)pi
;
3655 int n_tries
= 0, timeo
= HZ
/5, err
;
3656 struct sk_buff
*skb
;
3660 add_wait_queue(sk_sleep(sk
), &wait
);
3661 while ((skb
= skb_peek(BUSY_QUEUE(sk
)))) {
3662 set_current_state(TASK_INTERRUPTIBLE
);
3664 if (n_tries
++ > L2CAP_LOCAL_BUSY_TRIES
) {
3666 l2cap_send_disconn_req(pi
->conn
, sk
, EBUSY
);
3673 if (signal_pending(current
)) {
3674 err
= sock_intr_errno(timeo
);
3679 timeo
= schedule_timeout(timeo
);
3682 err
= sock_error(sk
);
3686 if (l2cap_try_push_rx_skb(sk
) == 0)
3690 set_current_state(TASK_RUNNING
);
3691 remove_wait_queue(sk_sleep(sk
), &wait
);
3696 static int l2cap_push_rx_skb(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3698 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3701 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
3702 bt_cb(skb
)->sar
= control
>> L2CAP_CTRL_SAR_SHIFT
;
3703 __skb_queue_tail(BUSY_QUEUE(sk
), skb
);
3704 return l2cap_try_push_rx_skb(sk
);
3709 err
= l2cap_ertm_reassembly_sdu(sk
, skb
, control
);
3711 pi
->buffer_seq
= (pi
->buffer_seq
+ 1) % 64;
3715 /* Busy Condition */
3716 BT_DBG("sk %p, Enter local busy", sk
);
3718 pi
->conn_state
|= L2CAP_CONN_LOCAL_BUSY
;
3719 bt_cb(skb
)->sar
= control
>> L2CAP_CTRL_SAR_SHIFT
;
3720 __skb_queue_tail(BUSY_QUEUE(sk
), skb
);
3722 sctrl
= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3723 sctrl
|= L2CAP_SUPER_RCV_NOT_READY
;
3724 l2cap_send_sframe(pi
, sctrl
);
3726 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
3728 del_timer(&pi
->ack_timer
);
3730 queue_work(_busy_wq
, &pi
->busy_work
);
3735 static int l2cap_streaming_reassembly_sdu(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3737 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3738 struct sk_buff
*_skb
;
3742 * TODO: We have to notify the userland if some data is lost with the
3746 switch (control
& L2CAP_CTRL_SAR
) {
3747 case L2CAP_SDU_UNSEGMENTED
:
3748 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3753 err
= sock_queue_rcv_skb(sk
, skb
);
3759 case L2CAP_SDU_START
:
3760 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3765 pi
->sdu_len
= get_unaligned_le16(skb
->data
);
3768 if (pi
->sdu_len
> pi
->imtu
) {
3773 pi
->sdu
= bt_skb_alloc(pi
->sdu_len
, GFP_ATOMIC
);
3779 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3781 pi
->conn_state
|= L2CAP_CONN_SAR_SDU
;
3782 pi
->partial_sdu_len
= skb
->len
;
3786 case L2CAP_SDU_CONTINUE
:
3787 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3790 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3792 pi
->partial_sdu_len
+= skb
->len
;
3793 if (pi
->partial_sdu_len
> pi
->sdu_len
)
3801 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3804 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3806 pi
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
3807 pi
->partial_sdu_len
+= skb
->len
;
3809 if (pi
->partial_sdu_len
> pi
->imtu
)
3812 if (pi
->partial_sdu_len
== pi
->sdu_len
) {
3813 _skb
= skb_clone(pi
->sdu
, GFP_ATOMIC
);
3814 err
= sock_queue_rcv_skb(sk
, _skb
);
3829 static void l2cap_check_srej_gap(struct sock
*sk
, u8 tx_seq
)
3831 struct sk_buff
*skb
;
3834 while ((skb
= skb_peek(SREJ_QUEUE(sk
)))) {
3835 if (bt_cb(skb
)->tx_seq
!= tx_seq
)
3838 skb
= skb_dequeue(SREJ_QUEUE(sk
));
3839 control
= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
3840 l2cap_ertm_reassembly_sdu(sk
, skb
, control
);
3841 l2cap_pi(sk
)->buffer_seq_srej
=
3842 (l2cap_pi(sk
)->buffer_seq_srej
+ 1) % 64;
3843 tx_seq
= (tx_seq
+ 1) % 64;
3847 static void l2cap_resend_srejframe(struct sock
*sk
, u8 tx_seq
)
3849 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3850 struct srej_list
*l
, *tmp
;
3853 list_for_each_entry_safe(l
, tmp
, SREJ_LIST(sk
), list
) {
3854 if (l
->tx_seq
== tx_seq
) {
3859 control
= L2CAP_SUPER_SELECT_REJECT
;
3860 control
|= l
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3861 l2cap_send_sframe(pi
, control
);
3863 list_add_tail(&l
->list
, SREJ_LIST(sk
));
3867 static void l2cap_send_srejframe(struct sock
*sk
, u8 tx_seq
)
3869 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3870 struct srej_list
*new;
3873 while (tx_seq
!= pi
->expected_tx_seq
) {
3874 control
= L2CAP_SUPER_SELECT_REJECT
;
3875 control
|= pi
->expected_tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3876 l2cap_send_sframe(pi
, control
);
3878 new = kzalloc(sizeof(struct srej_list
), GFP_ATOMIC
);
3879 new->tx_seq
= pi
->expected_tx_seq
;
3880 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3881 list_add_tail(&new->list
, SREJ_LIST(sk
));
3883 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3886 static inline int l2cap_data_channel_iframe(struct sock
*sk
, u16 rx_control
, struct sk_buff
*skb
)
3888 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3889 u8 tx_seq
= __get_txseq(rx_control
);
3890 u8 req_seq
= __get_reqseq(rx_control
);
3891 u8 sar
= rx_control
>> L2CAP_CTRL_SAR_SHIFT
;
3892 int tx_seq_offset
, expected_tx_seq_offset
;
3893 int num_to_ack
= (pi
->tx_win
/6) + 1;
3896 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk
, skb
->len
, tx_seq
,
3899 if (L2CAP_CTRL_FINAL
& rx_control
&&
3900 l2cap_pi(sk
)->conn_state
& L2CAP_CONN_WAIT_F
) {
3901 del_timer(&pi
->monitor_timer
);
3902 if (pi
->unacked_frames
> 0)
3903 __mod_retrans_timer();
3904 pi
->conn_state
&= ~L2CAP_CONN_WAIT_F
;
3907 pi
->expected_ack_seq
= req_seq
;
3908 l2cap_drop_acked_frames(sk
);
3910 if (tx_seq
== pi
->expected_tx_seq
)
3913 tx_seq_offset
= (tx_seq
- pi
->buffer_seq
) % 64;
3914 if (tx_seq_offset
< 0)
3915 tx_seq_offset
+= 64;
3917 /* invalid tx_seq */
3918 if (tx_seq_offset
>= pi
->tx_win
) {
3919 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
3923 if (pi
->conn_state
== L2CAP_CONN_LOCAL_BUSY
)
3926 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3927 struct srej_list
*first
;
3929 first
= list_first_entry(SREJ_LIST(sk
),
3930 struct srej_list
, list
);
3931 if (tx_seq
== first
->tx_seq
) {
3932 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3933 l2cap_check_srej_gap(sk
, tx_seq
);
3935 list_del(&first
->list
);
3938 if (list_empty(SREJ_LIST(sk
))) {
3939 pi
->buffer_seq
= pi
->buffer_seq_srej
;
3940 pi
->conn_state
&= ~L2CAP_CONN_SREJ_SENT
;
3942 BT_DBG("sk %p, Exit SREJ_SENT", sk
);
3945 struct srej_list
*l
;
3947 /* duplicated tx_seq */
3948 if (l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
) < 0)
3951 list_for_each_entry(l
, SREJ_LIST(sk
), list
) {
3952 if (l
->tx_seq
== tx_seq
) {
3953 l2cap_resend_srejframe(sk
, tx_seq
);
3957 l2cap_send_srejframe(sk
, tx_seq
);
3960 expected_tx_seq_offset
=
3961 (pi
->expected_tx_seq
- pi
->buffer_seq
) % 64;
3962 if (expected_tx_seq_offset
< 0)
3963 expected_tx_seq_offset
+= 64;
3965 /* duplicated tx_seq */
3966 if (tx_seq_offset
< expected_tx_seq_offset
)
3969 pi
->conn_state
|= L2CAP_CONN_SREJ_SENT
;
3971 BT_DBG("sk %p, Enter SREJ", sk
);
3973 INIT_LIST_HEAD(SREJ_LIST(sk
));
3974 pi
->buffer_seq_srej
= pi
->buffer_seq
;
3976 __skb_queue_head_init(SREJ_QUEUE(sk
));
3977 __skb_queue_head_init(BUSY_QUEUE(sk
));
3978 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3980 pi
->conn_state
|= L2CAP_CONN_SEND_PBIT
;
3982 l2cap_send_srejframe(sk
, tx_seq
);
3984 del_timer(&pi
->ack_timer
);
3989 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3991 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3992 bt_cb(skb
)->tx_seq
= tx_seq
;
3993 bt_cb(skb
)->sar
= sar
;
3994 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3998 err
= l2cap_push_rx_skb(sk
, skb
, rx_control
);
4002 if (rx_control
& L2CAP_CTRL_FINAL
) {
4003 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
4004 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
4006 l2cap_retransmit_frames(sk
);
4011 pi
->num_acked
= (pi
->num_acked
+ 1) % num_to_ack
;
4012 if (pi
->num_acked
== num_to_ack
- 1)
4022 static inline void l2cap_data_channel_rrframe(struct sock
*sk
, u16 rx_control
)
4024 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4026 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, __get_reqseq(rx_control
),
4029 pi
->expected_ack_seq
= __get_reqseq(rx_control
);
4030 l2cap_drop_acked_frames(sk
);
4032 if (rx_control
& L2CAP_CTRL_POLL
) {
4033 pi
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
4034 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
4035 if ((pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
4036 (pi
->unacked_frames
> 0))
4037 __mod_retrans_timer();
4039 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
4040 l2cap_send_srejtail(sk
);
4042 l2cap_send_i_or_rr_or_rnr(sk
);
4045 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
4046 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
4048 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
4049 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
4051 l2cap_retransmit_frames(sk
);
4054 if ((pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
4055 (pi
->unacked_frames
> 0))
4056 __mod_retrans_timer();
4058 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
4059 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
4062 l2cap_ertm_send(sk
);
4067 static inline void l2cap_data_channel_rejframe(struct sock
*sk
, u16 rx_control
)
4069 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4070 u8 tx_seq
= __get_reqseq(rx_control
);
4072 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, tx_seq
, rx_control
);
4074 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
4076 pi
->expected_ack_seq
= tx_seq
;
4077 l2cap_drop_acked_frames(sk
);
4079 if (rx_control
& L2CAP_CTRL_FINAL
) {
4080 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
4081 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
4083 l2cap_retransmit_frames(sk
);
4085 l2cap_retransmit_frames(sk
);
4087 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
)
4088 pi
->conn_state
|= L2CAP_CONN_REJ_ACT
;
4091 static inline void l2cap_data_channel_srejframe(struct sock
*sk
, u16 rx_control
)
4093 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4094 u8 tx_seq
= __get_reqseq(rx_control
);
4096 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, tx_seq
, rx_control
);
4098 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
4100 if (rx_control
& L2CAP_CTRL_POLL
) {
4101 pi
->expected_ack_seq
= tx_seq
;
4102 l2cap_drop_acked_frames(sk
);
4104 pi
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
4105 l2cap_retransmit_one_frame(sk
, tx_seq
);
4107 l2cap_ertm_send(sk
);
4109 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
4110 pi
->srej_save_reqseq
= tx_seq
;
4111 pi
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
4113 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
4114 if ((pi
->conn_state
& L2CAP_CONN_SREJ_ACT
) &&
4115 pi
->srej_save_reqseq
== tx_seq
)
4116 pi
->conn_state
&= ~L2CAP_CONN_SREJ_ACT
;
4118 l2cap_retransmit_one_frame(sk
, tx_seq
);
4120 l2cap_retransmit_one_frame(sk
, tx_seq
);
4121 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
4122 pi
->srej_save_reqseq
= tx_seq
;
4123 pi
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
4128 static inline void l2cap_data_channel_rnrframe(struct sock
*sk
, u16 rx_control
)
4130 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4131 u8 tx_seq
= __get_reqseq(rx_control
);
4133 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, tx_seq
, rx_control
);
4135 pi
->conn_state
|= L2CAP_CONN_REMOTE_BUSY
;
4136 pi
->expected_ack_seq
= tx_seq
;
4137 l2cap_drop_acked_frames(sk
);
4139 if (rx_control
& L2CAP_CTRL_POLL
)
4140 pi
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
4142 if (!(pi
->conn_state
& L2CAP_CONN_SREJ_SENT
)) {
4143 del_timer(&pi
->retrans_timer
);
4144 if (rx_control
& L2CAP_CTRL_POLL
)
4145 l2cap_send_rr_or_rnr(pi
, L2CAP_CTRL_FINAL
);
4149 if (rx_control
& L2CAP_CTRL_POLL
)
4150 l2cap_send_srejtail(sk
);
4152 l2cap_send_sframe(pi
, L2CAP_SUPER_RCV_READY
);
4155 static inline int l2cap_data_channel_sframe(struct sock
*sk
, u16 rx_control
, struct sk_buff
*skb
)
4157 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk
, rx_control
, skb
->len
);
4159 if (L2CAP_CTRL_FINAL
& rx_control
&&
4160 l2cap_pi(sk
)->conn_state
& L2CAP_CONN_WAIT_F
) {
4161 del_timer(&l2cap_pi(sk
)->monitor_timer
);
4162 if (l2cap_pi(sk
)->unacked_frames
> 0)
4163 __mod_retrans_timer();
4164 l2cap_pi(sk
)->conn_state
&= ~L2CAP_CONN_WAIT_F
;
4167 switch (rx_control
& L2CAP_CTRL_SUPERVISE
) {
4168 case L2CAP_SUPER_RCV_READY
:
4169 l2cap_data_channel_rrframe(sk
, rx_control
);
4172 case L2CAP_SUPER_REJECT
:
4173 l2cap_data_channel_rejframe(sk
, rx_control
);
4176 case L2CAP_SUPER_SELECT_REJECT
:
4177 l2cap_data_channel_srejframe(sk
, rx_control
);
4180 case L2CAP_SUPER_RCV_NOT_READY
:
4181 l2cap_data_channel_rnrframe(sk
, rx_control
);
4189 static int l2cap_ertm_data_rcv(struct sock
*sk
, struct sk_buff
*skb
)
4191 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4194 int len
, next_tx_seq_offset
, req_seq_offset
;
4196 control
= get_unaligned_le16(skb
->data
);
4201 * We can just drop the corrupted I-frame here.
4202 * Receiver will miss it and start proper recovery
4203 * procedures and ask retransmission.
4205 if (l2cap_check_fcs(pi
, skb
))
4208 if (__is_sar_start(control
) && __is_iframe(control
))
4211 if (pi
->fcs
== L2CAP_FCS_CRC16
)
4214 if (len
> pi
->mps
) {
4215 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
4219 req_seq
= __get_reqseq(control
);
4220 req_seq_offset
= (req_seq
- pi
->expected_ack_seq
) % 64;
4221 if (req_seq_offset
< 0)
4222 req_seq_offset
+= 64;
4224 next_tx_seq_offset
=
4225 (pi
->next_tx_seq
- pi
->expected_ack_seq
) % 64;
4226 if (next_tx_seq_offset
< 0)
4227 next_tx_seq_offset
+= 64;
4229 /* check for invalid req-seq */
4230 if (req_seq_offset
> next_tx_seq_offset
) {
4231 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
4235 if (__is_iframe(control
)) {
4237 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
4241 l2cap_data_channel_iframe(sk
, control
, skb
);
4245 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
4249 l2cap_data_channel_sframe(sk
, control
, skb
);
4259 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
4262 struct l2cap_pinfo
*pi
;
4267 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, cid
);
4269 BT_DBG("unknown cid 0x%4.4x", cid
);
4275 BT_DBG("sk %p, len %d", sk
, skb
->len
);
4277 if (sk
->sk_state
!= BT_CONNECTED
)
4281 case L2CAP_MODE_BASIC
:
4282 /* If socket recv buffers overflows we drop data here
4283 * which is *bad* because L2CAP has to be reliable.
4284 * But we don't have any other choice. L2CAP doesn't
4285 * provide flow control mechanism. */
4287 if (pi
->imtu
< skb
->len
)
4290 if (!sock_queue_rcv_skb(sk
, skb
))
4294 case L2CAP_MODE_ERTM
:
4295 if (!sock_owned_by_user(sk
)) {
4296 l2cap_ertm_data_rcv(sk
, skb
);
4298 if (sk_add_backlog(sk
, skb
))
4304 case L2CAP_MODE_STREAMING
:
4305 control
= get_unaligned_le16(skb
->data
);
4309 if (l2cap_check_fcs(pi
, skb
))
4312 if (__is_sar_start(control
))
4315 if (pi
->fcs
== L2CAP_FCS_CRC16
)
4318 if (len
> pi
->mps
|| len
< 0 || __is_sframe(control
))
4321 tx_seq
= __get_txseq(control
);
4323 if (pi
->expected_tx_seq
== tx_seq
)
4324 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
4326 pi
->expected_tx_seq
= (tx_seq
+ 1) % 64;
4328 l2cap_streaming_reassembly_sdu(sk
, skb
, control
);
4333 BT_DBG("sk %p: bad mode 0x%2.2x", sk
, pi
->mode
);
4347 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
4351 sk
= l2cap_get_sock_by_psm(0, psm
, conn
->src
);
4355 BT_DBG("sk %p, len %d", sk
, skb
->len
);
4357 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_CONNECTED
)
4360 if (l2cap_pi(sk
)->imtu
< skb
->len
)
4363 if (!sock_queue_rcv_skb(sk
, skb
))
4375 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
4377 struct l2cap_hdr
*lh
= (void *) skb
->data
;
4381 skb_pull(skb
, L2CAP_HDR_SIZE
);
4382 cid
= __le16_to_cpu(lh
->cid
);
4383 len
= __le16_to_cpu(lh
->len
);
4385 if (len
!= skb
->len
) {
4390 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
4393 case L2CAP_CID_SIGNALING
:
4394 l2cap_sig_channel(conn
, skb
);
4397 case L2CAP_CID_CONN_LESS
:
4398 psm
= get_unaligned_le16(skb
->data
);
4400 l2cap_conless_channel(conn
, psm
, skb
);
4404 l2cap_data_channel(conn
, cid
, skb
);
4409 /* ---- L2CAP interface with lower layer (HCI) ---- */
4411 static int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
4413 int exact
= 0, lm1
= 0, lm2
= 0;
4414 register struct sock
*sk
;
4415 struct hlist_node
*node
;
4417 if (type
!= ACL_LINK
)
4420 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
4422 /* Find listening sockets and check their link_mode */
4423 read_lock(&l2cap_sk_list
.lock
);
4424 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
4425 if (sk
->sk_state
!= BT_LISTEN
)
4428 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
4429 lm1
|= HCI_LM_ACCEPT
;
4430 if (l2cap_pi(sk
)->role_switch
)
4431 lm1
|= HCI_LM_MASTER
;
4433 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
4434 lm2
|= HCI_LM_ACCEPT
;
4435 if (l2cap_pi(sk
)->role_switch
)
4436 lm2
|= HCI_LM_MASTER
;
4439 read_unlock(&l2cap_sk_list
.lock
);
4441 return exact
? lm1
: lm2
;
4444 static int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
4446 struct l2cap_conn
*conn
;
4448 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
4450 if (hcon
->type
!= ACL_LINK
)
4454 conn
= l2cap_conn_add(hcon
, status
);
4456 l2cap_conn_ready(conn
);
4458 l2cap_conn_del(hcon
, bt_err(status
));
4463 static int l2cap_disconn_ind(struct hci_conn
*hcon
)
4465 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4467 BT_DBG("hcon %p", hcon
);
4469 if (hcon
->type
!= ACL_LINK
|| !conn
)
4472 return conn
->disc_reason
;
4475 static int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
4477 BT_DBG("hcon %p reason %d", hcon
, reason
);
4479 if (hcon
->type
!= ACL_LINK
)
4482 l2cap_conn_del(hcon
, bt_err(reason
));
4487 static inline void l2cap_check_encryption(struct sock
*sk
, u8 encrypt
)
4489 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_STREAM
)
4492 if (encrypt
== 0x00) {
4493 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
) {
4494 l2cap_sock_clear_timer(sk
);
4495 l2cap_sock_set_timer(sk
, HZ
* 5);
4496 } else if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
4497 __l2cap_sock_close(sk
, ECONNREFUSED
);
4499 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
)
4500 l2cap_sock_clear_timer(sk
);
4504 static int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
4506 struct l2cap_chan_list
*l
;
4507 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4513 l
= &conn
->chan_list
;
4515 BT_DBG("conn %p", conn
);
4517 read_lock(&l
->lock
);
4519 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
4522 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_CONNECT_PEND
) {
4527 if (!status
&& (sk
->sk_state
== BT_CONNECTED
||
4528 sk
->sk_state
== BT_CONFIG
)) {
4529 l2cap_check_encryption(sk
, encrypt
);
4534 if (sk
->sk_state
== BT_CONNECT
) {
4536 struct l2cap_conn_req req
;
4537 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
4538 req
.psm
= l2cap_pi(sk
)->psm
;
4540 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
4541 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
4543 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
4544 L2CAP_CONN_REQ
, sizeof(req
), &req
);
4546 l2cap_sock_clear_timer(sk
);
4547 l2cap_sock_set_timer(sk
, HZ
/ 10);
4549 } else if (sk
->sk_state
== BT_CONNECT2
) {
4550 struct l2cap_conn_rsp rsp
;
4554 sk
->sk_state
= BT_CONFIG
;
4555 result
= L2CAP_CR_SUCCESS
;
4557 sk
->sk_state
= BT_DISCONN
;
4558 l2cap_sock_set_timer(sk
, HZ
/ 10);
4559 result
= L2CAP_CR_SEC_BLOCK
;
4562 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
4563 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
4564 rsp
.result
= cpu_to_le16(result
);
4565 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4566 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
4567 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
4573 read_unlock(&l
->lock
);
4578 static int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
4580 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4582 if (!conn
&& !(conn
= l2cap_conn_add(hcon
, 0)))
4585 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
4587 if (flags
& ACL_START
) {
4588 struct l2cap_hdr
*hdr
;
4592 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
4593 kfree_skb(conn
->rx_skb
);
4594 conn
->rx_skb
= NULL
;
4596 l2cap_conn_unreliable(conn
, ECOMM
);
4600 BT_ERR("Frame is too short (len %d)", skb
->len
);
4601 l2cap_conn_unreliable(conn
, ECOMM
);
4605 hdr
= (struct l2cap_hdr
*) skb
->data
;
4606 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
4608 if (len
== skb
->len
) {
4609 /* Complete frame received */
4610 l2cap_recv_frame(conn
, skb
);
4614 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
4616 if (skb
->len
> len
) {
4617 BT_ERR("Frame is too long (len %d, expected len %d)",
4619 l2cap_conn_unreliable(conn
, ECOMM
);
4623 /* Allocate skb for the complete frame (with header) */
4624 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
4628 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4630 conn
->rx_len
= len
- skb
->len
;
4632 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
4634 if (!conn
->rx_len
) {
4635 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
4636 l2cap_conn_unreliable(conn
, ECOMM
);
4640 if (skb
->len
> conn
->rx_len
) {
4641 BT_ERR("Fragment is too long (len %d, expected %d)",
4642 skb
->len
, conn
->rx_len
);
4643 kfree_skb(conn
->rx_skb
);
4644 conn
->rx_skb
= NULL
;
4646 l2cap_conn_unreliable(conn
, ECOMM
);
4650 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4652 conn
->rx_len
-= skb
->len
;
4654 if (!conn
->rx_len
) {
4655 /* Complete frame received */
4656 l2cap_recv_frame(conn
, conn
->rx_skb
);
4657 conn
->rx_skb
= NULL
;
4666 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
4669 struct hlist_node
*node
;
4671 read_lock_bh(&l2cap_sk_list
.lock
);
4673 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
4674 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4676 seq_printf(f
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4677 batostr(&bt_sk(sk
)->src
),
4678 batostr(&bt_sk(sk
)->dst
),
4679 sk
->sk_state
, __le16_to_cpu(pi
->psm
),
4681 pi
->imtu
, pi
->omtu
, pi
->sec_level
);
4684 read_unlock_bh(&l2cap_sk_list
.lock
);
4689 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
4691 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
4694 static const struct file_operations l2cap_debugfs_fops
= {
4695 .open
= l2cap_debugfs_open
,
4697 .llseek
= seq_lseek
,
4698 .release
= single_release
,
4701 static struct dentry
*l2cap_debugfs
;
4703 static const struct proto_ops l2cap_sock_ops
= {
4704 .family
= PF_BLUETOOTH
,
4705 .owner
= THIS_MODULE
,
4706 .release
= l2cap_sock_release
,
4707 .bind
= l2cap_sock_bind
,
4708 .connect
= l2cap_sock_connect
,
4709 .listen
= l2cap_sock_listen
,
4710 .accept
= l2cap_sock_accept
,
4711 .getname
= l2cap_sock_getname
,
4712 .sendmsg
= l2cap_sock_sendmsg
,
4713 .recvmsg
= l2cap_sock_recvmsg
,
4714 .poll
= bt_sock_poll
,
4715 .ioctl
= bt_sock_ioctl
,
4716 .mmap
= sock_no_mmap
,
4717 .socketpair
= sock_no_socketpair
,
4718 .shutdown
= l2cap_sock_shutdown
,
4719 .setsockopt
= l2cap_sock_setsockopt
,
4720 .getsockopt
= l2cap_sock_getsockopt
4723 static const struct net_proto_family l2cap_sock_family_ops
= {
4724 .family
= PF_BLUETOOTH
,
4725 .owner
= THIS_MODULE
,
4726 .create
= l2cap_sock_create
,
4729 static struct hci_proto l2cap_hci_proto
= {
4731 .id
= HCI_PROTO_L2CAP
,
4732 .connect_ind
= l2cap_connect_ind
,
4733 .connect_cfm
= l2cap_connect_cfm
,
4734 .disconn_ind
= l2cap_disconn_ind
,
4735 .disconn_cfm
= l2cap_disconn_cfm
,
4736 .security_cfm
= l2cap_security_cfm
,
4737 .recv_acldata
= l2cap_recv_acldata
4740 static int __init
l2cap_init(void)
4744 err
= proto_register(&l2cap_proto
, 0);
4748 _busy_wq
= create_singlethread_workqueue("l2cap");
4752 err
= bt_sock_register(BTPROTO_L2CAP
, &l2cap_sock_family_ops
);
4754 BT_ERR("L2CAP socket registration failed");
4758 err
= hci_register_proto(&l2cap_hci_proto
);
4760 BT_ERR("L2CAP protocol registration failed");
4761 bt_sock_unregister(BTPROTO_L2CAP
);
4766 l2cap_debugfs
= debugfs_create_file("l2cap", 0444,
4767 bt_debugfs
, NULL
, &l2cap_debugfs_fops
);
4769 BT_ERR("Failed to create L2CAP debug file");
4772 BT_INFO("L2CAP ver %s", VERSION
);
4773 BT_INFO("L2CAP socket layer initialized");
4778 proto_unregister(&l2cap_proto
);
4782 static void __exit
l2cap_exit(void)
4784 debugfs_remove(l2cap_debugfs
);
4786 flush_workqueue(_busy_wq
);
4787 destroy_workqueue(_busy_wq
);
4789 if (bt_sock_unregister(BTPROTO_L2CAP
) < 0)
4790 BT_ERR("L2CAP socket unregistration failed");
4792 if (hci_unregister_proto(&l2cap_hci_proto
) < 0)
4793 BT_ERR("L2CAP protocol unregistration failed");
4795 proto_unregister(&l2cap_proto
);
4798 void l2cap_load(void)
4800 /* Dummy function to trigger automatic L2CAP module loading by
4801 * other modules that use L2CAP sockets but don't use any other
4802 * symbols from it. */
4804 EXPORT_SYMBOL(l2cap_load
);
4806 module_init(l2cap_init
);
4807 module_exit(l2cap_exit
);
4809 module_param(enable_ertm
, bool, 0644);
4810 MODULE_PARM_DESC(enable_ertm
, "Enable enhanced retransmission mode");
4812 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4813 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION
);
4814 MODULE_VERSION(VERSION
);
4815 MODULE_LICENSE("GPL");
4816 MODULE_ALIAS("bt-proto-0");