Bluetooth: EWS: recalculate L2CAP header size
[linux-2.6/libata-dev.git] / net / bluetooth / l2cap_core.c
blob439e7150f1506163639744b470df3bcc114a4ab4
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
59 int disable_ertm;
60 int enable_hs;
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { 0x02, };
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 void *data);
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
76 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
78 /* ---- L2CAP channels ---- */
80 static inline void chan_hold(struct l2cap_chan *c)
82 atomic_inc(&c->refcnt);
85 static inline void chan_put(struct l2cap_chan *c)
87 if (atomic_dec_and_test(&c->refcnt))
88 kfree(c);
91 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
93 struct l2cap_chan *c;
95 list_for_each_entry(c, &conn->chan_l, list) {
96 if (c->dcid == cid)
97 return c;
99 return NULL;
103 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
105 struct l2cap_chan *c;
107 list_for_each_entry(c, &conn->chan_l, list) {
108 if (c->scid == cid)
109 return c;
111 return NULL;
114 /* Find channel with given SCID.
115 * Returns locked socket */
116 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
118 struct l2cap_chan *c;
120 read_lock(&conn->chan_lock);
121 c = __l2cap_get_chan_by_scid(conn, cid);
122 if (c)
123 bh_lock_sock(c->sk);
124 read_unlock(&conn->chan_lock);
125 return c;
128 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
130 struct l2cap_chan *c;
132 list_for_each_entry(c, &conn->chan_l, list) {
133 if (c->ident == ident)
134 return c;
136 return NULL;
139 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
141 struct l2cap_chan *c;
143 read_lock(&conn->chan_lock);
144 c = __l2cap_get_chan_by_ident(conn, ident);
145 if (c)
146 bh_lock_sock(c->sk);
147 read_unlock(&conn->chan_lock);
148 return c;
151 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
153 struct l2cap_chan *c;
155 list_for_each_entry(c, &chan_list, global_l) {
156 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
157 goto found;
160 c = NULL;
161 found:
162 return c;
165 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
167 int err;
169 write_lock_bh(&chan_list_lock);
171 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
172 err = -EADDRINUSE;
173 goto done;
176 if (psm) {
177 chan->psm = psm;
178 chan->sport = psm;
179 err = 0;
180 } else {
181 u16 p;
183 err = -EINVAL;
184 for (p = 0x1001; p < 0x1100; p += 2)
185 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
186 chan->psm = cpu_to_le16(p);
187 chan->sport = cpu_to_le16(p);
188 err = 0;
189 break;
193 done:
194 write_unlock_bh(&chan_list_lock);
195 return err;
198 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
200 write_lock_bh(&chan_list_lock);
202 chan->scid = scid;
204 write_unlock_bh(&chan_list_lock);
206 return 0;
209 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
211 u16 cid = L2CAP_CID_DYN_START;
213 for (; cid < L2CAP_CID_DYN_END; cid++) {
214 if (!__l2cap_get_chan_by_scid(conn, cid))
215 return cid;
218 return 0;
221 static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
223 BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout);
225 if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
226 chan_hold(chan);
229 static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
231 BT_DBG("chan %p state %d", chan, chan->state);
233 if (timer_pending(timer) && del_timer(timer))
234 chan_put(chan);
237 static void l2cap_state_change(struct l2cap_chan *chan, int state)
239 chan->state = state;
240 chan->ops->state_change(chan->data, state);
243 static void l2cap_chan_timeout(unsigned long arg)
245 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
246 struct sock *sk = chan->sk;
247 int reason;
249 BT_DBG("chan %p state %d", chan, chan->state);
251 bh_lock_sock(sk);
253 if (sock_owned_by_user(sk)) {
254 /* sk is owned by user. Try again later */
255 __set_chan_timer(chan, HZ / 5);
256 bh_unlock_sock(sk);
257 chan_put(chan);
258 return;
261 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
262 reason = ECONNREFUSED;
263 else if (chan->state == BT_CONNECT &&
264 chan->sec_level != BT_SECURITY_SDP)
265 reason = ECONNREFUSED;
266 else
267 reason = ETIMEDOUT;
269 l2cap_chan_close(chan, reason);
271 bh_unlock_sock(sk);
273 chan->ops->close(chan->data);
274 chan_put(chan);
277 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
279 struct l2cap_chan *chan;
281 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
282 if (!chan)
283 return NULL;
285 chan->sk = sk;
287 write_lock_bh(&chan_list_lock);
288 list_add(&chan->global_l, &chan_list);
289 write_unlock_bh(&chan_list_lock);
291 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
293 chan->state = BT_OPEN;
295 atomic_set(&chan->refcnt, 1);
297 return chan;
300 void l2cap_chan_destroy(struct l2cap_chan *chan)
302 write_lock_bh(&chan_list_lock);
303 list_del(&chan->global_l);
304 write_unlock_bh(&chan_list_lock);
306 chan_put(chan);
309 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
311 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
312 chan->psm, chan->dcid);
314 conn->disc_reason = 0x13;
316 chan->conn = conn;
318 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
319 if (conn->hcon->type == LE_LINK) {
320 /* LE connection */
321 chan->omtu = L2CAP_LE_DEFAULT_MTU;
322 chan->scid = L2CAP_CID_LE_DATA;
323 chan->dcid = L2CAP_CID_LE_DATA;
324 } else {
325 /* Alloc CID for connection-oriented socket */
326 chan->scid = l2cap_alloc_cid(conn);
327 chan->omtu = L2CAP_DEFAULT_MTU;
329 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
330 /* Connectionless socket */
331 chan->scid = L2CAP_CID_CONN_LESS;
332 chan->dcid = L2CAP_CID_CONN_LESS;
333 chan->omtu = L2CAP_DEFAULT_MTU;
334 } else {
335 /* Raw socket can send/recv signalling messages only */
336 chan->scid = L2CAP_CID_SIGNALING;
337 chan->dcid = L2CAP_CID_SIGNALING;
338 chan->omtu = L2CAP_DEFAULT_MTU;
341 chan_hold(chan);
343 list_add(&chan->list, &conn->chan_l);
346 /* Delete channel.
347 * Must be called on the locked socket. */
348 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
350 struct sock *sk = chan->sk;
351 struct l2cap_conn *conn = chan->conn;
352 struct sock *parent = bt_sk(sk)->parent;
354 __clear_chan_timer(chan);
356 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
358 if (conn) {
359 /* Delete from channel list */
360 write_lock_bh(&conn->chan_lock);
361 list_del(&chan->list);
362 write_unlock_bh(&conn->chan_lock);
363 chan_put(chan);
365 chan->conn = NULL;
366 hci_conn_put(conn->hcon);
369 l2cap_state_change(chan, BT_CLOSED);
370 sock_set_flag(sk, SOCK_ZAPPED);
372 if (err)
373 sk->sk_err = err;
375 if (parent) {
376 bt_accept_unlink(sk);
377 parent->sk_data_ready(parent, 0);
378 } else
379 sk->sk_state_change(sk);
381 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
382 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
383 return;
385 skb_queue_purge(&chan->tx_q);
387 if (chan->mode == L2CAP_MODE_ERTM) {
388 struct srej_list *l, *tmp;
390 __clear_retrans_timer(chan);
391 __clear_monitor_timer(chan);
392 __clear_ack_timer(chan);
394 skb_queue_purge(&chan->srej_q);
396 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
397 list_del(&l->list);
398 kfree(l);
403 static void l2cap_chan_cleanup_listen(struct sock *parent)
405 struct sock *sk;
407 BT_DBG("parent %p", parent);
409 /* Close not yet accepted channels */
410 while ((sk = bt_accept_dequeue(parent, NULL))) {
411 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
412 __clear_chan_timer(chan);
413 lock_sock(sk);
414 l2cap_chan_close(chan, ECONNRESET);
415 release_sock(sk);
416 chan->ops->close(chan->data);
420 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
422 struct l2cap_conn *conn = chan->conn;
423 struct sock *sk = chan->sk;
425 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
427 switch (chan->state) {
428 case BT_LISTEN:
429 l2cap_chan_cleanup_listen(sk);
431 l2cap_state_change(chan, BT_CLOSED);
432 sock_set_flag(sk, SOCK_ZAPPED);
433 break;
435 case BT_CONNECTED:
436 case BT_CONFIG:
437 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
438 conn->hcon->type == ACL_LINK) {
439 __clear_chan_timer(chan);
440 __set_chan_timer(chan, sk->sk_sndtimeo);
441 l2cap_send_disconn_req(conn, chan, reason);
442 } else
443 l2cap_chan_del(chan, reason);
444 break;
446 case BT_CONNECT2:
447 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
448 conn->hcon->type == ACL_LINK) {
449 struct l2cap_conn_rsp rsp;
450 __u16 result;
452 if (bt_sk(sk)->defer_setup)
453 result = L2CAP_CR_SEC_BLOCK;
454 else
455 result = L2CAP_CR_BAD_PSM;
456 l2cap_state_change(chan, BT_DISCONN);
458 rsp.scid = cpu_to_le16(chan->dcid);
459 rsp.dcid = cpu_to_le16(chan->scid);
460 rsp.result = cpu_to_le16(result);
461 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
462 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
463 sizeof(rsp), &rsp);
466 l2cap_chan_del(chan, reason);
467 break;
469 case BT_CONNECT:
470 case BT_DISCONN:
471 l2cap_chan_del(chan, reason);
472 break;
474 default:
475 sock_set_flag(sk, SOCK_ZAPPED);
476 break;
480 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
482 if (chan->chan_type == L2CAP_CHAN_RAW) {
483 switch (chan->sec_level) {
484 case BT_SECURITY_HIGH:
485 return HCI_AT_DEDICATED_BONDING_MITM;
486 case BT_SECURITY_MEDIUM:
487 return HCI_AT_DEDICATED_BONDING;
488 default:
489 return HCI_AT_NO_BONDING;
491 } else if (chan->psm == cpu_to_le16(0x0001)) {
492 if (chan->sec_level == BT_SECURITY_LOW)
493 chan->sec_level = BT_SECURITY_SDP;
495 if (chan->sec_level == BT_SECURITY_HIGH)
496 return HCI_AT_NO_BONDING_MITM;
497 else
498 return HCI_AT_NO_BONDING;
499 } else {
500 switch (chan->sec_level) {
501 case BT_SECURITY_HIGH:
502 return HCI_AT_GENERAL_BONDING_MITM;
503 case BT_SECURITY_MEDIUM:
504 return HCI_AT_GENERAL_BONDING;
505 default:
506 return HCI_AT_NO_BONDING;
511 /* Service level security */
512 static inline int l2cap_check_security(struct l2cap_chan *chan)
514 struct l2cap_conn *conn = chan->conn;
515 __u8 auth_type;
517 auth_type = l2cap_get_auth_type(chan);
519 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
522 static u8 l2cap_get_ident(struct l2cap_conn *conn)
524 u8 id;
526 /* Get next available identificator.
527 * 1 - 128 are used by kernel.
528 * 129 - 199 are reserved.
529 * 200 - 254 are used by utilities like l2ping, etc.
532 spin_lock_bh(&conn->lock);
534 if (++conn->tx_ident > 128)
535 conn->tx_ident = 1;
537 id = conn->tx_ident;
539 spin_unlock_bh(&conn->lock);
541 return id;
544 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
546 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
547 u8 flags;
549 BT_DBG("code 0x%2.2x", code);
551 if (!skb)
552 return;
554 if (lmp_no_flush_capable(conn->hcon->hdev))
555 flags = ACL_START_NO_FLUSH;
556 else
557 flags = ACL_START;
559 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
561 hci_send_acl(conn->hcon, skb, flags);
564 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
566 struct sk_buff *skb;
567 struct l2cap_hdr *lh;
568 struct l2cap_conn *conn = chan->conn;
569 int count, hlen;
570 u8 flags;
572 if (chan->state != BT_CONNECTED)
573 return;
575 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
576 hlen = L2CAP_EXT_HDR_SIZE;
577 else
578 hlen = L2CAP_ENH_HDR_SIZE;
580 if (chan->fcs == L2CAP_FCS_CRC16)
581 hlen += 2;
583 BT_DBG("chan %p, control 0x%2.2x", chan, control);
585 count = min_t(unsigned int, conn->mtu, hlen);
587 control |= __set_sframe(chan);
589 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
590 control |= __set_ctrl_final(chan);
592 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
593 control |= __set_ctrl_poll(chan);
595 skb = bt_skb_alloc(count, GFP_ATOMIC);
596 if (!skb)
597 return;
599 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
600 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
601 lh->cid = cpu_to_le16(chan->dcid);
602 put_unaligned_le16(control, skb_put(skb, 2));
604 if (chan->fcs == L2CAP_FCS_CRC16) {
605 u16 fcs = crc16(0, (u8 *)lh, count - 2);
606 put_unaligned_le16(fcs, skb_put(skb, 2));
609 if (lmp_no_flush_capable(conn->hcon->hdev))
610 flags = ACL_START_NO_FLUSH;
611 else
612 flags = ACL_START;
614 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
616 hci_send_acl(chan->conn->hcon, skb, flags);
619 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
621 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
622 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
623 set_bit(CONN_RNR_SENT, &chan->conn_state);
624 } else
625 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
627 control |= __set_reqseq(chan, chan->buffer_seq);
629 l2cap_send_sframe(chan, control);
632 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
634 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
637 static void l2cap_do_start(struct l2cap_chan *chan)
639 struct l2cap_conn *conn = chan->conn;
641 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
642 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
643 return;
645 if (l2cap_check_security(chan) &&
646 __l2cap_no_conn_pending(chan)) {
647 struct l2cap_conn_req req;
648 req.scid = cpu_to_le16(chan->scid);
649 req.psm = chan->psm;
651 chan->ident = l2cap_get_ident(conn);
652 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
654 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
655 sizeof(req), &req);
657 } else {
658 struct l2cap_info_req req;
659 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
661 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
662 conn->info_ident = l2cap_get_ident(conn);
664 mod_timer(&conn->info_timer, jiffies +
665 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
667 l2cap_send_cmd(conn, conn->info_ident,
668 L2CAP_INFO_REQ, sizeof(req), &req);
672 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
674 u32 local_feat_mask = l2cap_feat_mask;
675 if (!disable_ertm)
676 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
678 switch (mode) {
679 case L2CAP_MODE_ERTM:
680 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
681 case L2CAP_MODE_STREAMING:
682 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
683 default:
684 return 0x00;
688 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
690 struct sock *sk;
691 struct l2cap_disconn_req req;
693 if (!conn)
694 return;
696 sk = chan->sk;
698 if (chan->mode == L2CAP_MODE_ERTM) {
699 __clear_retrans_timer(chan);
700 __clear_monitor_timer(chan);
701 __clear_ack_timer(chan);
704 req.dcid = cpu_to_le16(chan->dcid);
705 req.scid = cpu_to_le16(chan->scid);
706 l2cap_send_cmd(conn, l2cap_get_ident(conn),
707 L2CAP_DISCONN_REQ, sizeof(req), &req);
709 l2cap_state_change(chan, BT_DISCONN);
710 sk->sk_err = err;
713 /* ---- L2CAP connections ---- */
714 static void l2cap_conn_start(struct l2cap_conn *conn)
716 struct l2cap_chan *chan, *tmp;
718 BT_DBG("conn %p", conn);
720 read_lock(&conn->chan_lock);
722 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
723 struct sock *sk = chan->sk;
725 bh_lock_sock(sk);
727 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
728 bh_unlock_sock(sk);
729 continue;
732 if (chan->state == BT_CONNECT) {
733 struct l2cap_conn_req req;
735 if (!l2cap_check_security(chan) ||
736 !__l2cap_no_conn_pending(chan)) {
737 bh_unlock_sock(sk);
738 continue;
741 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
742 && test_bit(CONF_STATE2_DEVICE,
743 &chan->conf_state)) {
744 /* l2cap_chan_close() calls list_del(chan)
745 * so release the lock */
746 read_unlock(&conn->chan_lock);
747 l2cap_chan_close(chan, ECONNRESET);
748 read_lock(&conn->chan_lock);
749 bh_unlock_sock(sk);
750 continue;
753 req.scid = cpu_to_le16(chan->scid);
754 req.psm = chan->psm;
756 chan->ident = l2cap_get_ident(conn);
757 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
759 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
760 sizeof(req), &req);
762 } else if (chan->state == BT_CONNECT2) {
763 struct l2cap_conn_rsp rsp;
764 char buf[128];
765 rsp.scid = cpu_to_le16(chan->dcid);
766 rsp.dcid = cpu_to_le16(chan->scid);
768 if (l2cap_check_security(chan)) {
769 if (bt_sk(sk)->defer_setup) {
770 struct sock *parent = bt_sk(sk)->parent;
771 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
772 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
773 if (parent)
774 parent->sk_data_ready(parent, 0);
776 } else {
777 l2cap_state_change(chan, BT_CONFIG);
778 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
779 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
781 } else {
782 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
783 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
786 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
787 sizeof(rsp), &rsp);
789 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
790 rsp.result != L2CAP_CR_SUCCESS) {
791 bh_unlock_sock(sk);
792 continue;
795 set_bit(CONF_REQ_SENT, &chan->conf_state);
796 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
797 l2cap_build_conf_req(chan, buf), buf);
798 chan->num_conf_req++;
801 bh_unlock_sock(sk);
804 read_unlock(&conn->chan_lock);
807 /* Find socket with cid and source bdaddr.
808 * Returns closest match, locked.
810 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
812 struct l2cap_chan *c, *c1 = NULL;
814 read_lock(&chan_list_lock);
816 list_for_each_entry(c, &chan_list, global_l) {
817 struct sock *sk = c->sk;
819 if (state && c->state != state)
820 continue;
822 if (c->scid == cid) {
823 /* Exact match. */
824 if (!bacmp(&bt_sk(sk)->src, src)) {
825 read_unlock(&chan_list_lock);
826 return c;
829 /* Closest match */
830 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
831 c1 = c;
835 read_unlock(&chan_list_lock);
837 return c1;
840 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
842 struct sock *parent, *sk;
843 struct l2cap_chan *chan, *pchan;
845 BT_DBG("");
847 /* Check if we have socket listening on cid */
848 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
849 conn->src);
850 if (!pchan)
851 return;
853 parent = pchan->sk;
855 bh_lock_sock(parent);
857 /* Check for backlog size */
858 if (sk_acceptq_is_full(parent)) {
859 BT_DBG("backlog full %d", parent->sk_ack_backlog);
860 goto clean;
863 chan = pchan->ops->new_connection(pchan->data);
864 if (!chan)
865 goto clean;
867 sk = chan->sk;
869 write_lock_bh(&conn->chan_lock);
871 hci_conn_hold(conn->hcon);
873 bacpy(&bt_sk(sk)->src, conn->src);
874 bacpy(&bt_sk(sk)->dst, conn->dst);
876 bt_accept_enqueue(parent, sk);
878 __l2cap_chan_add(conn, chan);
880 __set_chan_timer(chan, sk->sk_sndtimeo);
882 l2cap_state_change(chan, BT_CONNECTED);
883 parent->sk_data_ready(parent, 0);
885 write_unlock_bh(&conn->chan_lock);
887 clean:
888 bh_unlock_sock(parent);
891 static void l2cap_chan_ready(struct sock *sk)
893 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
894 struct sock *parent = bt_sk(sk)->parent;
896 BT_DBG("sk %p, parent %p", sk, parent);
898 chan->conf_state = 0;
899 __clear_chan_timer(chan);
901 l2cap_state_change(chan, BT_CONNECTED);
902 sk->sk_state_change(sk);
904 if (parent)
905 parent->sk_data_ready(parent, 0);
908 static void l2cap_conn_ready(struct l2cap_conn *conn)
910 struct l2cap_chan *chan;
912 BT_DBG("conn %p", conn);
914 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
915 l2cap_le_conn_ready(conn);
917 if (conn->hcon->out && conn->hcon->type == LE_LINK)
918 smp_conn_security(conn, conn->hcon->pending_sec_level);
920 read_lock(&conn->chan_lock);
922 list_for_each_entry(chan, &conn->chan_l, list) {
923 struct sock *sk = chan->sk;
925 bh_lock_sock(sk);
927 if (conn->hcon->type == LE_LINK) {
928 if (smp_conn_security(conn, chan->sec_level))
929 l2cap_chan_ready(sk);
931 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
932 __clear_chan_timer(chan);
933 l2cap_state_change(chan, BT_CONNECTED);
934 sk->sk_state_change(sk);
936 } else if (chan->state == BT_CONNECT)
937 l2cap_do_start(chan);
939 bh_unlock_sock(sk);
942 read_unlock(&conn->chan_lock);
945 /* Notify sockets that we cannot guaranty reliability anymore */
946 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
948 struct l2cap_chan *chan;
950 BT_DBG("conn %p", conn);
952 read_lock(&conn->chan_lock);
954 list_for_each_entry(chan, &conn->chan_l, list) {
955 struct sock *sk = chan->sk;
957 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
958 sk->sk_err = err;
961 read_unlock(&conn->chan_lock);
964 static void l2cap_info_timeout(unsigned long arg)
966 struct l2cap_conn *conn = (void *) arg;
968 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
969 conn->info_ident = 0;
971 l2cap_conn_start(conn);
974 static void l2cap_conn_del(struct hci_conn *hcon, int err)
976 struct l2cap_conn *conn = hcon->l2cap_data;
977 struct l2cap_chan *chan, *l;
978 struct sock *sk;
980 if (!conn)
981 return;
983 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
985 kfree_skb(conn->rx_skb);
987 /* Kill channels */
988 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
989 sk = chan->sk;
990 bh_lock_sock(sk);
991 l2cap_chan_del(chan, err);
992 bh_unlock_sock(sk);
993 chan->ops->close(chan->data);
996 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
997 del_timer_sync(&conn->info_timer);
999 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
1000 del_timer(&conn->security_timer);
1001 smp_chan_destroy(conn);
1004 hcon->l2cap_data = NULL;
1005 kfree(conn);
1008 static void security_timeout(unsigned long arg)
1010 struct l2cap_conn *conn = (void *) arg;
1012 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1015 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1017 struct l2cap_conn *conn = hcon->l2cap_data;
1019 if (conn || status)
1020 return conn;
1022 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1023 if (!conn)
1024 return NULL;
1026 hcon->l2cap_data = conn;
1027 conn->hcon = hcon;
1029 BT_DBG("hcon %p conn %p", hcon, conn);
1031 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1032 conn->mtu = hcon->hdev->le_mtu;
1033 else
1034 conn->mtu = hcon->hdev->acl_mtu;
1036 conn->src = &hcon->hdev->bdaddr;
1037 conn->dst = &hcon->dst;
1039 conn->feat_mask = 0;
1041 spin_lock_init(&conn->lock);
1042 rwlock_init(&conn->chan_lock);
1044 INIT_LIST_HEAD(&conn->chan_l);
1046 if (hcon->type == LE_LINK)
1047 setup_timer(&conn->security_timer, security_timeout,
1048 (unsigned long) conn);
1049 else
1050 setup_timer(&conn->info_timer, l2cap_info_timeout,
1051 (unsigned long) conn);
1053 conn->disc_reason = 0x13;
1055 return conn;
1058 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1060 write_lock_bh(&conn->chan_lock);
1061 __l2cap_chan_add(conn, chan);
1062 write_unlock_bh(&conn->chan_lock);
1065 /* ---- Socket interface ---- */
1067 /* Find socket with psm and source bdaddr.
1068 * Returns closest match.
1070 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1072 struct l2cap_chan *c, *c1 = NULL;
1074 read_lock(&chan_list_lock);
1076 list_for_each_entry(c, &chan_list, global_l) {
1077 struct sock *sk = c->sk;
1079 if (state && c->state != state)
1080 continue;
1082 if (c->psm == psm) {
1083 /* Exact match. */
1084 if (!bacmp(&bt_sk(sk)->src, src)) {
1085 read_unlock(&chan_list_lock);
1086 return c;
1089 /* Closest match */
1090 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1091 c1 = c;
1095 read_unlock(&chan_list_lock);
1097 return c1;
1100 int l2cap_chan_connect(struct l2cap_chan *chan)
1102 struct sock *sk = chan->sk;
1103 bdaddr_t *src = &bt_sk(sk)->src;
1104 bdaddr_t *dst = &bt_sk(sk)->dst;
1105 struct l2cap_conn *conn;
1106 struct hci_conn *hcon;
1107 struct hci_dev *hdev;
1108 __u8 auth_type;
1109 int err;
1111 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1112 chan->psm);
1114 hdev = hci_get_route(dst, src);
1115 if (!hdev)
1116 return -EHOSTUNREACH;
1118 hci_dev_lock_bh(hdev);
1120 auth_type = l2cap_get_auth_type(chan);
1122 if (chan->dcid == L2CAP_CID_LE_DATA)
1123 hcon = hci_connect(hdev, LE_LINK, dst,
1124 chan->sec_level, auth_type);
1125 else
1126 hcon = hci_connect(hdev, ACL_LINK, dst,
1127 chan->sec_level, auth_type);
1129 if (IS_ERR(hcon)) {
1130 err = PTR_ERR(hcon);
1131 goto done;
1134 conn = l2cap_conn_add(hcon, 0);
1135 if (!conn) {
1136 hci_conn_put(hcon);
1137 err = -ENOMEM;
1138 goto done;
1141 /* Update source addr of the socket */
1142 bacpy(src, conn->src);
1144 l2cap_chan_add(conn, chan);
1146 l2cap_state_change(chan, BT_CONNECT);
1147 __set_chan_timer(chan, sk->sk_sndtimeo);
1149 if (hcon->state == BT_CONNECTED) {
1150 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1151 __clear_chan_timer(chan);
1152 if (l2cap_check_security(chan))
1153 l2cap_state_change(chan, BT_CONNECTED);
1154 } else
1155 l2cap_do_start(chan);
1158 err = 0;
1160 done:
1161 hci_dev_unlock_bh(hdev);
1162 hci_dev_put(hdev);
1163 return err;
1166 int __l2cap_wait_ack(struct sock *sk)
1168 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1169 DECLARE_WAITQUEUE(wait, current);
1170 int err = 0;
1171 int timeo = HZ/5;
1173 add_wait_queue(sk_sleep(sk), &wait);
1174 set_current_state(TASK_INTERRUPTIBLE);
1175 while (chan->unacked_frames > 0 && chan->conn) {
1176 if (!timeo)
1177 timeo = HZ/5;
1179 if (signal_pending(current)) {
1180 err = sock_intr_errno(timeo);
1181 break;
1184 release_sock(sk);
1185 timeo = schedule_timeout(timeo);
1186 lock_sock(sk);
1187 set_current_state(TASK_INTERRUPTIBLE);
1189 err = sock_error(sk);
1190 if (err)
1191 break;
1193 set_current_state(TASK_RUNNING);
1194 remove_wait_queue(sk_sleep(sk), &wait);
1195 return err;
1198 static void l2cap_monitor_timeout(unsigned long arg)
1200 struct l2cap_chan *chan = (void *) arg;
1201 struct sock *sk = chan->sk;
1203 BT_DBG("chan %p", chan);
1205 bh_lock_sock(sk);
1206 if (chan->retry_count >= chan->remote_max_tx) {
1207 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1208 bh_unlock_sock(sk);
1209 return;
1212 chan->retry_count++;
1213 __set_monitor_timer(chan);
1215 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1216 bh_unlock_sock(sk);
1219 static void l2cap_retrans_timeout(unsigned long arg)
1221 struct l2cap_chan *chan = (void *) arg;
1222 struct sock *sk = chan->sk;
1224 BT_DBG("chan %p", chan);
1226 bh_lock_sock(sk);
1227 chan->retry_count = 1;
1228 __set_monitor_timer(chan);
1230 set_bit(CONN_WAIT_F, &chan->conn_state);
1232 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1233 bh_unlock_sock(sk);
1236 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1238 struct sk_buff *skb;
1240 while ((skb = skb_peek(&chan->tx_q)) &&
1241 chan->unacked_frames) {
1242 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1243 break;
1245 skb = skb_dequeue(&chan->tx_q);
1246 kfree_skb(skb);
1248 chan->unacked_frames--;
1251 if (!chan->unacked_frames)
1252 __clear_retrans_timer(chan);
1255 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1257 struct hci_conn *hcon = chan->conn->hcon;
1258 u16 flags;
1260 BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
1262 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
1263 lmp_no_flush_capable(hcon->hdev))
1264 flags = ACL_START_NO_FLUSH;
1265 else
1266 flags = ACL_START;
1268 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1269 hci_send_acl(hcon, skb, flags);
1272 static void l2cap_streaming_send(struct l2cap_chan *chan)
1274 struct sk_buff *skb;
1275 u16 control, fcs;
1277 while ((skb = skb_dequeue(&chan->tx_q))) {
1278 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1279 control |= __set_txseq(chan, chan->next_tx_seq);
1280 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1282 if (chan->fcs == L2CAP_FCS_CRC16) {
1283 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1284 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1287 l2cap_do_send(chan, skb);
1289 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1293 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1295 struct sk_buff *skb, *tx_skb;
1296 u16 control, fcs;
1298 skb = skb_peek(&chan->tx_q);
1299 if (!skb)
1300 return;
1302 do {
1303 if (bt_cb(skb)->tx_seq == tx_seq)
1304 break;
1306 if (skb_queue_is_last(&chan->tx_q, skb))
1307 return;
1309 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1311 if (chan->remote_max_tx &&
1312 bt_cb(skb)->retries == chan->remote_max_tx) {
1313 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1314 return;
1317 tx_skb = skb_clone(skb, GFP_ATOMIC);
1318 bt_cb(skb)->retries++;
1319 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1320 control &= __get_sar_mask(chan);
1322 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1323 control |= __set_ctrl_final(chan);
1325 control |= __set_reqseq(chan, chan->buffer_seq);
1326 control |= __set_txseq(chan, tx_seq);
1328 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1330 if (chan->fcs == L2CAP_FCS_CRC16) {
1331 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1332 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1335 l2cap_do_send(chan, tx_skb);
1338 static int l2cap_ertm_send(struct l2cap_chan *chan)
1340 struct sk_buff *skb, *tx_skb;
1341 u16 control, fcs;
1342 int nsent = 0;
1344 if (chan->state != BT_CONNECTED)
1345 return -ENOTCONN;
1347 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1349 if (chan->remote_max_tx &&
1350 bt_cb(skb)->retries == chan->remote_max_tx) {
1351 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1352 break;
1355 tx_skb = skb_clone(skb, GFP_ATOMIC);
1357 bt_cb(skb)->retries++;
1359 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1360 control &= __get_sar_mask(chan);
1362 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1363 control |= __set_ctrl_final(chan);
1365 control |= __set_reqseq(chan, chan->buffer_seq);
1366 control |= __set_txseq(chan, chan->next_tx_seq);
1367 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1370 if (chan->fcs == L2CAP_FCS_CRC16) {
1371 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1372 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1375 l2cap_do_send(chan, tx_skb);
1377 __set_retrans_timer(chan);
1379 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1380 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1382 if (bt_cb(skb)->retries == 1)
1383 chan->unacked_frames++;
1385 chan->frames_sent++;
1387 if (skb_queue_is_last(&chan->tx_q, skb))
1388 chan->tx_send_head = NULL;
1389 else
1390 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1392 nsent++;
1395 return nsent;
1398 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1400 int ret;
1402 if (!skb_queue_empty(&chan->tx_q))
1403 chan->tx_send_head = chan->tx_q.next;
1405 chan->next_tx_seq = chan->expected_ack_seq;
1406 ret = l2cap_ertm_send(chan);
1407 return ret;
1410 static void l2cap_send_ack(struct l2cap_chan *chan)
1412 u16 control = 0;
1414 control |= __set_reqseq(chan, chan->buffer_seq);
1416 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1417 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1418 set_bit(CONN_RNR_SENT, &chan->conn_state);
1419 l2cap_send_sframe(chan, control);
1420 return;
1423 if (l2cap_ertm_send(chan) > 0)
1424 return;
1426 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1427 l2cap_send_sframe(chan, control);
1430 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1432 struct srej_list *tail;
1433 u16 control;
1435 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1436 control |= __set_ctrl_final(chan);
1438 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1439 control |= __set_reqseq(chan, tail->tx_seq);
1441 l2cap_send_sframe(chan, control);
1444 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1446 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1447 struct sk_buff **frag;
1448 int err, sent = 0;
1450 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1451 return -EFAULT;
1453 sent += count;
1454 len -= count;
1456 /* Continuation fragments (no L2CAP header) */
1457 frag = &skb_shinfo(skb)->frag_list;
1458 while (len) {
1459 count = min_t(unsigned int, conn->mtu, len);
1461 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1462 if (!*frag)
1463 return err;
1464 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1465 return -EFAULT;
1467 sent += count;
1468 len -= count;
1470 frag = &(*frag)->next;
1473 return sent;
1476 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1478 struct sock *sk = chan->sk;
1479 struct l2cap_conn *conn = chan->conn;
1480 struct sk_buff *skb;
1481 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1482 struct l2cap_hdr *lh;
1484 BT_DBG("sk %p len %d", sk, (int)len);
1486 count = min_t(unsigned int, (conn->mtu - hlen), len);
1487 skb = bt_skb_send_alloc(sk, count + hlen,
1488 msg->msg_flags & MSG_DONTWAIT, &err);
1489 if (!skb)
1490 return ERR_PTR(err);
1492 /* Create L2CAP header */
1493 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1494 lh->cid = cpu_to_le16(chan->dcid);
1495 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1496 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1498 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1499 if (unlikely(err < 0)) {
1500 kfree_skb(skb);
1501 return ERR_PTR(err);
1503 return skb;
1506 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1508 struct sock *sk = chan->sk;
1509 struct l2cap_conn *conn = chan->conn;
1510 struct sk_buff *skb;
1511 int err, count, hlen = L2CAP_HDR_SIZE;
1512 struct l2cap_hdr *lh;
1514 BT_DBG("sk %p len %d", sk, (int)len);
1516 count = min_t(unsigned int, (conn->mtu - hlen), len);
1517 skb = bt_skb_send_alloc(sk, count + hlen,
1518 msg->msg_flags & MSG_DONTWAIT, &err);
1519 if (!skb)
1520 return ERR_PTR(err);
1522 /* Create L2CAP header */
1523 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1524 lh->cid = cpu_to_le16(chan->dcid);
1525 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1527 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1528 if (unlikely(err < 0)) {
1529 kfree_skb(skb);
1530 return ERR_PTR(err);
1532 return skb;
1535 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1536 struct msghdr *msg, size_t len,
1537 u16 control, u16 sdulen)
1539 struct sock *sk = chan->sk;
1540 struct l2cap_conn *conn = chan->conn;
1541 struct sk_buff *skb;
1542 int err, count, hlen;
1543 struct l2cap_hdr *lh;
1545 BT_DBG("sk %p len %d", sk, (int)len);
1547 if (!conn)
1548 return ERR_PTR(-ENOTCONN);
1550 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1551 hlen = L2CAP_EXT_HDR_SIZE;
1552 else
1553 hlen = L2CAP_ENH_HDR_SIZE;
1555 if (sdulen)
1556 hlen += 2;
1558 if (chan->fcs == L2CAP_FCS_CRC16)
1559 hlen += 2;
1561 count = min_t(unsigned int, (conn->mtu - hlen), len);
1562 skb = bt_skb_send_alloc(sk, count + hlen,
1563 msg->msg_flags & MSG_DONTWAIT, &err);
1564 if (!skb)
1565 return ERR_PTR(err);
1567 /* Create L2CAP header */
1568 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1569 lh->cid = cpu_to_le16(chan->dcid);
1570 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1571 put_unaligned_le16(control, skb_put(skb, 2));
1572 if (sdulen)
1573 put_unaligned_le16(sdulen, skb_put(skb, 2));
1575 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1576 if (unlikely(err < 0)) {
1577 kfree_skb(skb);
1578 return ERR_PTR(err);
1581 if (chan->fcs == L2CAP_FCS_CRC16)
1582 put_unaligned_le16(0, skb_put(skb, 2));
1584 bt_cb(skb)->retries = 0;
1585 return skb;
1588 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1590 struct sk_buff *skb;
1591 struct sk_buff_head sar_queue;
1592 u16 control;
1593 size_t size = 0;
1595 skb_queue_head_init(&sar_queue);
1596 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1597 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1598 if (IS_ERR(skb))
1599 return PTR_ERR(skb);
1601 __skb_queue_tail(&sar_queue, skb);
1602 len -= chan->remote_mps;
1603 size += chan->remote_mps;
1605 while (len > 0) {
1606 size_t buflen;
1608 if (len > chan->remote_mps) {
1609 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1610 buflen = chan->remote_mps;
1611 } else {
1612 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1613 buflen = len;
1616 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1617 if (IS_ERR(skb)) {
1618 skb_queue_purge(&sar_queue);
1619 return PTR_ERR(skb);
1622 __skb_queue_tail(&sar_queue, skb);
1623 len -= buflen;
1624 size += buflen;
1626 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1627 if (chan->tx_send_head == NULL)
1628 chan->tx_send_head = sar_queue.next;
1630 return size;
1633 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1635 struct sk_buff *skb;
1636 u16 control;
1637 int err;
1639 /* Connectionless channel */
1640 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1641 skb = l2cap_create_connless_pdu(chan, msg, len);
1642 if (IS_ERR(skb))
1643 return PTR_ERR(skb);
1645 l2cap_do_send(chan, skb);
1646 return len;
1649 switch (chan->mode) {
1650 case L2CAP_MODE_BASIC:
1651 /* Check outgoing MTU */
1652 if (len > chan->omtu)
1653 return -EMSGSIZE;
1655 /* Create a basic PDU */
1656 skb = l2cap_create_basic_pdu(chan, msg, len);
1657 if (IS_ERR(skb))
1658 return PTR_ERR(skb);
1660 l2cap_do_send(chan, skb);
1661 err = len;
1662 break;
1664 case L2CAP_MODE_ERTM:
1665 case L2CAP_MODE_STREAMING:
1666 /* Entire SDU fits into one PDU */
1667 if (len <= chan->remote_mps) {
1668 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1669 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1671 if (IS_ERR(skb))
1672 return PTR_ERR(skb);
1674 __skb_queue_tail(&chan->tx_q, skb);
1676 if (chan->tx_send_head == NULL)
1677 chan->tx_send_head = skb;
1679 } else {
1680 /* Segment SDU into multiples PDUs */
1681 err = l2cap_sar_segment_sdu(chan, msg, len);
1682 if (err < 0)
1683 return err;
1686 if (chan->mode == L2CAP_MODE_STREAMING) {
1687 l2cap_streaming_send(chan);
1688 err = len;
1689 break;
1692 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1693 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1694 err = len;
1695 break;
1698 err = l2cap_ertm_send(chan);
1699 if (err >= 0)
1700 err = len;
1702 break;
1704 default:
1705 BT_DBG("bad state %1.1x", chan->mode);
1706 err = -EBADFD;
1709 return err;
1712 /* Copy frame to all raw sockets on that connection */
1713 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1715 struct sk_buff *nskb;
1716 struct l2cap_chan *chan;
1718 BT_DBG("conn %p", conn);
1720 read_lock(&conn->chan_lock);
1721 list_for_each_entry(chan, &conn->chan_l, list) {
1722 struct sock *sk = chan->sk;
1723 if (chan->chan_type != L2CAP_CHAN_RAW)
1724 continue;
1726 /* Don't send frame to the socket it came from */
1727 if (skb->sk == sk)
1728 continue;
1729 nskb = skb_clone(skb, GFP_ATOMIC);
1730 if (!nskb)
1731 continue;
1733 if (chan->ops->recv(chan->data, nskb))
1734 kfree_skb(nskb);
1736 read_unlock(&conn->chan_lock);
1739 /* ---- L2CAP signalling commands ---- */
1740 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1741 u8 code, u8 ident, u16 dlen, void *data)
1743 struct sk_buff *skb, **frag;
1744 struct l2cap_cmd_hdr *cmd;
1745 struct l2cap_hdr *lh;
1746 int len, count;
1748 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1749 conn, code, ident, dlen);
1751 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1752 count = min_t(unsigned int, conn->mtu, len);
1754 skb = bt_skb_alloc(count, GFP_ATOMIC);
1755 if (!skb)
1756 return NULL;
1758 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1759 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1761 if (conn->hcon->type == LE_LINK)
1762 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1763 else
1764 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1766 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1767 cmd->code = code;
1768 cmd->ident = ident;
1769 cmd->len = cpu_to_le16(dlen);
1771 if (dlen) {
1772 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1773 memcpy(skb_put(skb, count), data, count);
1774 data += count;
1777 len -= skb->len;
1779 /* Continuation fragments (no L2CAP header) */
1780 frag = &skb_shinfo(skb)->frag_list;
1781 while (len) {
1782 count = min_t(unsigned int, conn->mtu, len);
1784 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1785 if (!*frag)
1786 goto fail;
1788 memcpy(skb_put(*frag, count), data, count);
1790 len -= count;
1791 data += count;
1793 frag = &(*frag)->next;
1796 return skb;
1798 fail:
1799 kfree_skb(skb);
1800 return NULL;
1803 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1805 struct l2cap_conf_opt *opt = *ptr;
1806 int len;
1808 len = L2CAP_CONF_OPT_SIZE + opt->len;
1809 *ptr += len;
1811 *type = opt->type;
1812 *olen = opt->len;
1814 switch (opt->len) {
1815 case 1:
1816 *val = *((u8 *) opt->val);
1817 break;
1819 case 2:
1820 *val = get_unaligned_le16(opt->val);
1821 break;
1823 case 4:
1824 *val = get_unaligned_le32(opt->val);
1825 break;
1827 default:
1828 *val = (unsigned long) opt->val;
1829 break;
1832 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1833 return len;
1836 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1838 struct l2cap_conf_opt *opt = *ptr;
1840 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1842 opt->type = type;
1843 opt->len = len;
1845 switch (len) {
1846 case 1:
1847 *((u8 *) opt->val) = val;
1848 break;
1850 case 2:
1851 put_unaligned_le16(val, opt->val);
1852 break;
1854 case 4:
1855 put_unaligned_le32(val, opt->val);
1856 break;
1858 default:
1859 memcpy(opt->val, (void *) val, len);
1860 break;
1863 *ptr += L2CAP_CONF_OPT_SIZE + len;
1866 static void l2cap_ack_timeout(unsigned long arg)
1868 struct l2cap_chan *chan = (void *) arg;
1870 bh_lock_sock(chan->sk);
1871 l2cap_send_ack(chan);
1872 bh_unlock_sock(chan->sk);
1875 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1877 struct sock *sk = chan->sk;
1879 chan->expected_ack_seq = 0;
1880 chan->unacked_frames = 0;
1881 chan->buffer_seq = 0;
1882 chan->num_acked = 0;
1883 chan->frames_sent = 0;
1885 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1886 (unsigned long) chan);
1887 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1888 (unsigned long) chan);
1889 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1891 skb_queue_head_init(&chan->srej_q);
1893 INIT_LIST_HEAD(&chan->srej_l);
1896 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1899 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1901 switch (mode) {
1902 case L2CAP_MODE_STREAMING:
1903 case L2CAP_MODE_ERTM:
1904 if (l2cap_mode_supported(mode, remote_feat_mask))
1905 return mode;
1906 /* fall through */
1907 default:
1908 return L2CAP_MODE_BASIC;
1912 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
1914 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
1917 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
1919 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
1920 __l2cap_ews_supported(chan))
1921 /* use extended control field */
1922 set_bit(FLAG_EXT_CTRL, &chan->flags);
1923 else
1924 chan->tx_win = min_t(u16, chan->tx_win,
1925 L2CAP_DEFAULT_TX_WINDOW);
1928 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1930 struct l2cap_conf_req *req = data;
1931 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1932 void *ptr = req->data;
1934 BT_DBG("chan %p", chan);
1936 if (chan->num_conf_req || chan->num_conf_rsp)
1937 goto done;
1939 switch (chan->mode) {
1940 case L2CAP_MODE_STREAMING:
1941 case L2CAP_MODE_ERTM:
1942 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
1943 break;
1945 /* fall through */
1946 default:
1947 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
1948 break;
1951 done:
1952 if (chan->imtu != L2CAP_DEFAULT_MTU)
1953 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1955 switch (chan->mode) {
1956 case L2CAP_MODE_BASIC:
1957 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1958 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
1959 break;
1961 rfc.mode = L2CAP_MODE_BASIC;
1962 rfc.txwin_size = 0;
1963 rfc.max_transmit = 0;
1964 rfc.retrans_timeout = 0;
1965 rfc.monitor_timeout = 0;
1966 rfc.max_pdu_size = 0;
1968 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1969 (unsigned long) &rfc);
1970 break;
1972 case L2CAP_MODE_ERTM:
1973 rfc.mode = L2CAP_MODE_ERTM;
1974 rfc.max_transmit = chan->max_tx;
1975 rfc.retrans_timeout = 0;
1976 rfc.monitor_timeout = 0;
1977 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1978 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1979 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1981 l2cap_txwin_setup(chan);
1983 rfc.txwin_size = min_t(u16, chan->tx_win,
1984 L2CAP_DEFAULT_TX_WINDOW);
1986 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1987 (unsigned long) &rfc);
1989 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1990 break;
1992 if (chan->fcs == L2CAP_FCS_NONE ||
1993 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1994 chan->fcs = L2CAP_FCS_NONE;
1995 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1998 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1999 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2000 chan->tx_win);
2001 break;
2003 case L2CAP_MODE_STREAMING:
2004 rfc.mode = L2CAP_MODE_STREAMING;
2005 rfc.txwin_size = 0;
2006 rfc.max_transmit = 0;
2007 rfc.retrans_timeout = 0;
2008 rfc.monitor_timeout = 0;
2009 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2010 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
2011 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2013 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2014 (unsigned long) &rfc);
2016 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2017 break;
2019 if (chan->fcs == L2CAP_FCS_NONE ||
2020 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2021 chan->fcs = L2CAP_FCS_NONE;
2022 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2024 break;
2027 req->dcid = cpu_to_le16(chan->dcid);
2028 req->flags = cpu_to_le16(0);
2030 return ptr - data;
2033 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2035 struct l2cap_conf_rsp *rsp = data;
2036 void *ptr = rsp->data;
2037 void *req = chan->conf_req;
2038 int len = chan->conf_len;
2039 int type, hint, olen;
2040 unsigned long val;
2041 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2042 u16 mtu = L2CAP_DEFAULT_MTU;
2043 u16 result = L2CAP_CONF_SUCCESS;
2045 BT_DBG("chan %p", chan);
2047 while (len >= L2CAP_CONF_OPT_SIZE) {
2048 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2050 hint = type & L2CAP_CONF_HINT;
2051 type &= L2CAP_CONF_MASK;
2053 switch (type) {
2054 case L2CAP_CONF_MTU:
2055 mtu = val;
2056 break;
2058 case L2CAP_CONF_FLUSH_TO:
2059 chan->flush_to = val;
2060 break;
2062 case L2CAP_CONF_QOS:
2063 break;
2065 case L2CAP_CONF_RFC:
2066 if (olen == sizeof(rfc))
2067 memcpy(&rfc, (void *) val, olen);
2068 break;
2070 case L2CAP_CONF_FCS:
2071 if (val == L2CAP_FCS_NONE)
2072 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2074 break;
2076 case L2CAP_CONF_EWS:
2077 if (!enable_hs)
2078 return -ECONNREFUSED;
2080 set_bit(FLAG_EXT_CTRL, &chan->flags);
2081 set_bit(CONF_EWS_RECV, &chan->conf_state);
2082 chan->remote_tx_win = val;
2083 break;
2085 default:
2086 if (hint)
2087 break;
2089 result = L2CAP_CONF_UNKNOWN;
2090 *((u8 *) ptr++) = type;
2091 break;
2095 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2096 goto done;
2098 switch (chan->mode) {
2099 case L2CAP_MODE_STREAMING:
2100 case L2CAP_MODE_ERTM:
2101 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2102 chan->mode = l2cap_select_mode(rfc.mode,
2103 chan->conn->feat_mask);
2104 break;
2107 if (chan->mode != rfc.mode)
2108 return -ECONNREFUSED;
2110 break;
2113 done:
2114 if (chan->mode != rfc.mode) {
2115 result = L2CAP_CONF_UNACCEPT;
2116 rfc.mode = chan->mode;
2118 if (chan->num_conf_rsp == 1)
2119 return -ECONNREFUSED;
2121 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2122 sizeof(rfc), (unsigned long) &rfc);
2126 if (result == L2CAP_CONF_SUCCESS) {
2127 /* Configure output options and let the other side know
2128 * which ones we don't like. */
2130 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2131 result = L2CAP_CONF_UNACCEPT;
2132 else {
2133 chan->omtu = mtu;
2134 set_bit(CONF_MTU_DONE, &chan->conf_state);
2136 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2138 switch (rfc.mode) {
2139 case L2CAP_MODE_BASIC:
2140 chan->fcs = L2CAP_FCS_NONE;
2141 set_bit(CONF_MODE_DONE, &chan->conf_state);
2142 break;
2144 case L2CAP_MODE_ERTM:
2145 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2146 chan->remote_tx_win = rfc.txwin_size;
2147 else
2148 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2150 chan->remote_max_tx = rfc.max_transmit;
2152 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2153 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2155 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2157 rfc.retrans_timeout =
2158 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2159 rfc.monitor_timeout =
2160 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2162 set_bit(CONF_MODE_DONE, &chan->conf_state);
2164 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2165 sizeof(rfc), (unsigned long) &rfc);
2167 break;
2169 case L2CAP_MODE_STREAMING:
2170 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2171 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2173 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2175 set_bit(CONF_MODE_DONE, &chan->conf_state);
2177 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2178 sizeof(rfc), (unsigned long) &rfc);
2180 break;
2182 default:
2183 result = L2CAP_CONF_UNACCEPT;
2185 memset(&rfc, 0, sizeof(rfc));
2186 rfc.mode = chan->mode;
2189 if (result == L2CAP_CONF_SUCCESS)
2190 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2192 rsp->scid = cpu_to_le16(chan->dcid);
2193 rsp->result = cpu_to_le16(result);
2194 rsp->flags = cpu_to_le16(0x0000);
2196 return ptr - data;
2199 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2201 struct l2cap_conf_req *req = data;
2202 void *ptr = req->data;
2203 int type, olen;
2204 unsigned long val;
2205 struct l2cap_conf_rfc rfc;
2207 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2209 while (len >= L2CAP_CONF_OPT_SIZE) {
2210 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2212 switch (type) {
2213 case L2CAP_CONF_MTU:
2214 if (val < L2CAP_DEFAULT_MIN_MTU) {
2215 *result = L2CAP_CONF_UNACCEPT;
2216 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2217 } else
2218 chan->imtu = val;
2219 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2220 break;
2222 case L2CAP_CONF_FLUSH_TO:
2223 chan->flush_to = val;
2224 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2225 2, chan->flush_to);
2226 break;
2228 case L2CAP_CONF_RFC:
2229 if (olen == sizeof(rfc))
2230 memcpy(&rfc, (void *)val, olen);
2232 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2233 rfc.mode != chan->mode)
2234 return -ECONNREFUSED;
2236 chan->fcs = 0;
2238 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2239 sizeof(rfc), (unsigned long) &rfc);
2240 break;
2242 case L2CAP_CONF_EWS:
2243 chan->tx_win = min_t(u16, val,
2244 L2CAP_DEFAULT_EXT_WINDOW);
2245 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS,
2246 2, chan->tx_win);
2247 break;
2251 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2252 return -ECONNREFUSED;
2254 chan->mode = rfc.mode;
2256 if (*result == L2CAP_CONF_SUCCESS) {
2257 switch (rfc.mode) {
2258 case L2CAP_MODE_ERTM:
2259 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2260 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2261 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2262 break;
2263 case L2CAP_MODE_STREAMING:
2264 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2268 req->dcid = cpu_to_le16(chan->dcid);
2269 req->flags = cpu_to_le16(0x0000);
2271 return ptr - data;
2274 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2276 struct l2cap_conf_rsp *rsp = data;
2277 void *ptr = rsp->data;
2279 BT_DBG("chan %p", chan);
2281 rsp->scid = cpu_to_le16(chan->dcid);
2282 rsp->result = cpu_to_le16(result);
2283 rsp->flags = cpu_to_le16(flags);
2285 return ptr - data;
2288 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2290 struct l2cap_conn_rsp rsp;
2291 struct l2cap_conn *conn = chan->conn;
2292 u8 buf[128];
2294 rsp.scid = cpu_to_le16(chan->dcid);
2295 rsp.dcid = cpu_to_le16(chan->scid);
2296 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2297 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2298 l2cap_send_cmd(conn, chan->ident,
2299 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2301 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2302 return;
2304 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2305 l2cap_build_conf_req(chan, buf), buf);
2306 chan->num_conf_req++;
2309 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2311 int type, olen;
2312 unsigned long val;
2313 struct l2cap_conf_rfc rfc;
2315 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2317 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2318 return;
2320 while (len >= L2CAP_CONF_OPT_SIZE) {
2321 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2323 switch (type) {
2324 case L2CAP_CONF_RFC:
2325 if (olen == sizeof(rfc))
2326 memcpy(&rfc, (void *)val, olen);
2327 goto done;
2331 done:
2332 switch (rfc.mode) {
2333 case L2CAP_MODE_ERTM:
2334 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2335 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2336 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2337 break;
2338 case L2CAP_MODE_STREAMING:
2339 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2343 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2345 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2347 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2348 return 0;
2350 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2351 cmd->ident == conn->info_ident) {
2352 del_timer(&conn->info_timer);
2354 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2355 conn->info_ident = 0;
2357 l2cap_conn_start(conn);
2360 return 0;
2363 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2365 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2366 struct l2cap_conn_rsp rsp;
2367 struct l2cap_chan *chan = NULL, *pchan;
2368 struct sock *parent, *sk = NULL;
2369 int result, status = L2CAP_CS_NO_INFO;
2371 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2372 __le16 psm = req->psm;
2374 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2376 /* Check if we have socket listening on psm */
2377 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2378 if (!pchan) {
2379 result = L2CAP_CR_BAD_PSM;
2380 goto sendresp;
2383 parent = pchan->sk;
2385 bh_lock_sock(parent);
2387 /* Check if the ACL is secure enough (if not SDP) */
2388 if (psm != cpu_to_le16(0x0001) &&
2389 !hci_conn_check_link_mode(conn->hcon)) {
2390 conn->disc_reason = 0x05;
2391 result = L2CAP_CR_SEC_BLOCK;
2392 goto response;
2395 result = L2CAP_CR_NO_MEM;
2397 /* Check for backlog size */
2398 if (sk_acceptq_is_full(parent)) {
2399 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2400 goto response;
2403 chan = pchan->ops->new_connection(pchan->data);
2404 if (!chan)
2405 goto response;
2407 sk = chan->sk;
2409 write_lock_bh(&conn->chan_lock);
2411 /* Check if we already have channel with that dcid */
2412 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2413 write_unlock_bh(&conn->chan_lock);
2414 sock_set_flag(sk, SOCK_ZAPPED);
2415 chan->ops->close(chan->data);
2416 goto response;
2419 hci_conn_hold(conn->hcon);
2421 bacpy(&bt_sk(sk)->src, conn->src);
2422 bacpy(&bt_sk(sk)->dst, conn->dst);
2423 chan->psm = psm;
2424 chan->dcid = scid;
2426 bt_accept_enqueue(parent, sk);
2428 __l2cap_chan_add(conn, chan);
2430 dcid = chan->scid;
2432 __set_chan_timer(chan, sk->sk_sndtimeo);
2434 chan->ident = cmd->ident;
2436 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2437 if (l2cap_check_security(chan)) {
2438 if (bt_sk(sk)->defer_setup) {
2439 l2cap_state_change(chan, BT_CONNECT2);
2440 result = L2CAP_CR_PEND;
2441 status = L2CAP_CS_AUTHOR_PEND;
2442 parent->sk_data_ready(parent, 0);
2443 } else {
2444 l2cap_state_change(chan, BT_CONFIG);
2445 result = L2CAP_CR_SUCCESS;
2446 status = L2CAP_CS_NO_INFO;
2448 } else {
2449 l2cap_state_change(chan, BT_CONNECT2);
2450 result = L2CAP_CR_PEND;
2451 status = L2CAP_CS_AUTHEN_PEND;
2453 } else {
2454 l2cap_state_change(chan, BT_CONNECT2);
2455 result = L2CAP_CR_PEND;
2456 status = L2CAP_CS_NO_INFO;
2459 write_unlock_bh(&conn->chan_lock);
2461 response:
2462 bh_unlock_sock(parent);
2464 sendresp:
2465 rsp.scid = cpu_to_le16(scid);
2466 rsp.dcid = cpu_to_le16(dcid);
2467 rsp.result = cpu_to_le16(result);
2468 rsp.status = cpu_to_le16(status);
2469 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2471 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2472 struct l2cap_info_req info;
2473 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2475 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2476 conn->info_ident = l2cap_get_ident(conn);
2478 mod_timer(&conn->info_timer, jiffies +
2479 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2481 l2cap_send_cmd(conn, conn->info_ident,
2482 L2CAP_INFO_REQ, sizeof(info), &info);
2485 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2486 result == L2CAP_CR_SUCCESS) {
2487 u8 buf[128];
2488 set_bit(CONF_REQ_SENT, &chan->conf_state);
2489 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2490 l2cap_build_conf_req(chan, buf), buf);
2491 chan->num_conf_req++;
2494 return 0;
2497 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2499 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2500 u16 scid, dcid, result, status;
2501 struct l2cap_chan *chan;
2502 struct sock *sk;
2503 u8 req[128];
2505 scid = __le16_to_cpu(rsp->scid);
2506 dcid = __le16_to_cpu(rsp->dcid);
2507 result = __le16_to_cpu(rsp->result);
2508 status = __le16_to_cpu(rsp->status);
2510 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2512 if (scid) {
2513 chan = l2cap_get_chan_by_scid(conn, scid);
2514 if (!chan)
2515 return -EFAULT;
2516 } else {
2517 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2518 if (!chan)
2519 return -EFAULT;
2522 sk = chan->sk;
2524 switch (result) {
2525 case L2CAP_CR_SUCCESS:
2526 l2cap_state_change(chan, BT_CONFIG);
2527 chan->ident = 0;
2528 chan->dcid = dcid;
2529 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2531 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2532 break;
2534 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2535 l2cap_build_conf_req(chan, req), req);
2536 chan->num_conf_req++;
2537 break;
2539 case L2CAP_CR_PEND:
2540 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2541 break;
2543 default:
2544 /* don't delete l2cap channel if sk is owned by user */
2545 if (sock_owned_by_user(sk)) {
2546 l2cap_state_change(chan, BT_DISCONN);
2547 __clear_chan_timer(chan);
2548 __set_chan_timer(chan, HZ / 5);
2549 break;
2552 l2cap_chan_del(chan, ECONNREFUSED);
2553 break;
2556 bh_unlock_sock(sk);
2557 return 0;
2560 static inline void set_default_fcs(struct l2cap_chan *chan)
2562 /* FCS is enabled only in ERTM or streaming mode, if one or both
2563 * sides request it.
2565 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2566 chan->fcs = L2CAP_FCS_NONE;
2567 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2568 chan->fcs = L2CAP_FCS_CRC16;
2571 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2573 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2574 u16 dcid, flags;
2575 u8 rsp[64];
2576 struct l2cap_chan *chan;
2577 struct sock *sk;
2578 int len;
2580 dcid = __le16_to_cpu(req->dcid);
2581 flags = __le16_to_cpu(req->flags);
2583 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2585 chan = l2cap_get_chan_by_scid(conn, dcid);
2586 if (!chan)
2587 return -ENOENT;
2589 sk = chan->sk;
2591 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2592 struct l2cap_cmd_rej_cid rej;
2594 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2595 rej.scid = cpu_to_le16(chan->scid);
2596 rej.dcid = cpu_to_le16(chan->dcid);
2598 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2599 sizeof(rej), &rej);
2600 goto unlock;
2603 /* Reject if config buffer is too small. */
2604 len = cmd_len - sizeof(*req);
2605 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2606 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2607 l2cap_build_conf_rsp(chan, rsp,
2608 L2CAP_CONF_REJECT, flags), rsp);
2609 goto unlock;
2612 /* Store config. */
2613 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2614 chan->conf_len += len;
2616 if (flags & 0x0001) {
2617 /* Incomplete config. Send empty response. */
2618 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2619 l2cap_build_conf_rsp(chan, rsp,
2620 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2621 goto unlock;
2624 /* Complete config. */
2625 len = l2cap_parse_conf_req(chan, rsp);
2626 if (len < 0) {
2627 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2628 goto unlock;
2631 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2632 chan->num_conf_rsp++;
2634 /* Reset config buffer. */
2635 chan->conf_len = 0;
2637 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2638 goto unlock;
2640 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2641 set_default_fcs(chan);
2643 l2cap_state_change(chan, BT_CONNECTED);
2645 chan->next_tx_seq = 0;
2646 chan->expected_tx_seq = 0;
2647 skb_queue_head_init(&chan->tx_q);
2648 if (chan->mode == L2CAP_MODE_ERTM)
2649 l2cap_ertm_init(chan);
2651 l2cap_chan_ready(sk);
2652 goto unlock;
2655 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2656 u8 buf[64];
2657 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2658 l2cap_build_conf_req(chan, buf), buf);
2659 chan->num_conf_req++;
2662 unlock:
2663 bh_unlock_sock(sk);
2664 return 0;
2667 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2669 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2670 u16 scid, flags, result;
2671 struct l2cap_chan *chan;
2672 struct sock *sk;
2673 int len = cmd->len - sizeof(*rsp);
2675 scid = __le16_to_cpu(rsp->scid);
2676 flags = __le16_to_cpu(rsp->flags);
2677 result = __le16_to_cpu(rsp->result);
2679 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2680 scid, flags, result);
2682 chan = l2cap_get_chan_by_scid(conn, scid);
2683 if (!chan)
2684 return 0;
2686 sk = chan->sk;
2688 switch (result) {
2689 case L2CAP_CONF_SUCCESS:
2690 l2cap_conf_rfc_get(chan, rsp->data, len);
2691 break;
2693 case L2CAP_CONF_UNACCEPT:
2694 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2695 char req[64];
2697 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2698 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2699 goto done;
2702 /* throw out any old stored conf requests */
2703 result = L2CAP_CONF_SUCCESS;
2704 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2705 req, &result);
2706 if (len < 0) {
2707 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2708 goto done;
2711 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2712 L2CAP_CONF_REQ, len, req);
2713 chan->num_conf_req++;
2714 if (result != L2CAP_CONF_SUCCESS)
2715 goto done;
2716 break;
2719 default:
2720 sk->sk_err = ECONNRESET;
2721 __set_chan_timer(chan, HZ * 5);
2722 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2723 goto done;
2726 if (flags & 0x01)
2727 goto done;
2729 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2731 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2732 set_default_fcs(chan);
2734 l2cap_state_change(chan, BT_CONNECTED);
2735 chan->next_tx_seq = 0;
2736 chan->expected_tx_seq = 0;
2737 skb_queue_head_init(&chan->tx_q);
2738 if (chan->mode == L2CAP_MODE_ERTM)
2739 l2cap_ertm_init(chan);
2741 l2cap_chan_ready(sk);
2744 done:
2745 bh_unlock_sock(sk);
2746 return 0;
2749 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2751 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2752 struct l2cap_disconn_rsp rsp;
2753 u16 dcid, scid;
2754 struct l2cap_chan *chan;
2755 struct sock *sk;
2757 scid = __le16_to_cpu(req->scid);
2758 dcid = __le16_to_cpu(req->dcid);
2760 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2762 chan = l2cap_get_chan_by_scid(conn, dcid);
2763 if (!chan)
2764 return 0;
2766 sk = chan->sk;
2768 rsp.dcid = cpu_to_le16(chan->scid);
2769 rsp.scid = cpu_to_le16(chan->dcid);
2770 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2772 sk->sk_shutdown = SHUTDOWN_MASK;
2774 /* don't delete l2cap channel if sk is owned by user */
2775 if (sock_owned_by_user(sk)) {
2776 l2cap_state_change(chan, BT_DISCONN);
2777 __clear_chan_timer(chan);
2778 __set_chan_timer(chan, HZ / 5);
2779 bh_unlock_sock(sk);
2780 return 0;
2783 l2cap_chan_del(chan, ECONNRESET);
2784 bh_unlock_sock(sk);
2786 chan->ops->close(chan->data);
2787 return 0;
2790 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2792 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2793 u16 dcid, scid;
2794 struct l2cap_chan *chan;
2795 struct sock *sk;
2797 scid = __le16_to_cpu(rsp->scid);
2798 dcid = __le16_to_cpu(rsp->dcid);
2800 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2802 chan = l2cap_get_chan_by_scid(conn, scid);
2803 if (!chan)
2804 return 0;
2806 sk = chan->sk;
2808 /* don't delete l2cap channel if sk is owned by user */
2809 if (sock_owned_by_user(sk)) {
2810 l2cap_state_change(chan,BT_DISCONN);
2811 __clear_chan_timer(chan);
2812 __set_chan_timer(chan, HZ / 5);
2813 bh_unlock_sock(sk);
2814 return 0;
2817 l2cap_chan_del(chan, 0);
2818 bh_unlock_sock(sk);
2820 chan->ops->close(chan->data);
2821 return 0;
2824 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2826 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2827 u16 type;
2829 type = __le16_to_cpu(req->type);
2831 BT_DBG("type 0x%4.4x", type);
2833 if (type == L2CAP_IT_FEAT_MASK) {
2834 u8 buf[8];
2835 u32 feat_mask = l2cap_feat_mask;
2836 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2837 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2838 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2839 if (!disable_ertm)
2840 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2841 | L2CAP_FEAT_FCS;
2842 if (enable_hs)
2843 feat_mask |= L2CAP_FEAT_EXT_FLOW
2844 | L2CAP_FEAT_EXT_WINDOW;
2846 put_unaligned_le32(feat_mask, rsp->data);
2847 l2cap_send_cmd(conn, cmd->ident,
2848 L2CAP_INFO_RSP, sizeof(buf), buf);
2849 } else if (type == L2CAP_IT_FIXED_CHAN) {
2850 u8 buf[12];
2851 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2852 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2853 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2854 memcpy(buf + 4, l2cap_fixed_chan, 8);
2855 l2cap_send_cmd(conn, cmd->ident,
2856 L2CAP_INFO_RSP, sizeof(buf), buf);
2857 } else {
2858 struct l2cap_info_rsp rsp;
2859 rsp.type = cpu_to_le16(type);
2860 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2861 l2cap_send_cmd(conn, cmd->ident,
2862 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2865 return 0;
2868 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2870 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2871 u16 type, result;
2873 type = __le16_to_cpu(rsp->type);
2874 result = __le16_to_cpu(rsp->result);
2876 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2878 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2879 if (cmd->ident != conn->info_ident ||
2880 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2881 return 0;
2883 del_timer(&conn->info_timer);
2885 if (result != L2CAP_IR_SUCCESS) {
2886 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2887 conn->info_ident = 0;
2889 l2cap_conn_start(conn);
2891 return 0;
2894 if (type == L2CAP_IT_FEAT_MASK) {
2895 conn->feat_mask = get_unaligned_le32(rsp->data);
2897 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2898 struct l2cap_info_req req;
2899 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2901 conn->info_ident = l2cap_get_ident(conn);
2903 l2cap_send_cmd(conn, conn->info_ident,
2904 L2CAP_INFO_REQ, sizeof(req), &req);
2905 } else {
2906 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2907 conn->info_ident = 0;
2909 l2cap_conn_start(conn);
2911 } else if (type == L2CAP_IT_FIXED_CHAN) {
2912 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2913 conn->info_ident = 0;
2915 l2cap_conn_start(conn);
2918 return 0;
2921 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2922 u16 to_multiplier)
2924 u16 max_latency;
2926 if (min > max || min < 6 || max > 3200)
2927 return -EINVAL;
2929 if (to_multiplier < 10 || to_multiplier > 3200)
2930 return -EINVAL;
2932 if (max >= to_multiplier * 8)
2933 return -EINVAL;
2935 max_latency = (to_multiplier * 8 / max) - 1;
2936 if (latency > 499 || latency > max_latency)
2937 return -EINVAL;
2939 return 0;
2942 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2943 struct l2cap_cmd_hdr *cmd, u8 *data)
2945 struct hci_conn *hcon = conn->hcon;
2946 struct l2cap_conn_param_update_req *req;
2947 struct l2cap_conn_param_update_rsp rsp;
2948 u16 min, max, latency, to_multiplier, cmd_len;
2949 int err;
2951 if (!(hcon->link_mode & HCI_LM_MASTER))
2952 return -EINVAL;
2954 cmd_len = __le16_to_cpu(cmd->len);
2955 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2956 return -EPROTO;
2958 req = (struct l2cap_conn_param_update_req *) data;
2959 min = __le16_to_cpu(req->min);
2960 max = __le16_to_cpu(req->max);
2961 latency = __le16_to_cpu(req->latency);
2962 to_multiplier = __le16_to_cpu(req->to_multiplier);
2964 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2965 min, max, latency, to_multiplier);
2967 memset(&rsp, 0, sizeof(rsp));
2969 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2970 if (err)
2971 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2972 else
2973 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2975 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2976 sizeof(rsp), &rsp);
2978 if (!err)
2979 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2981 return 0;
2984 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2985 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2987 int err = 0;
2989 switch (cmd->code) {
2990 case L2CAP_COMMAND_REJ:
2991 l2cap_command_rej(conn, cmd, data);
2992 break;
2994 case L2CAP_CONN_REQ:
2995 err = l2cap_connect_req(conn, cmd, data);
2996 break;
2998 case L2CAP_CONN_RSP:
2999 err = l2cap_connect_rsp(conn, cmd, data);
3000 break;
3002 case L2CAP_CONF_REQ:
3003 err = l2cap_config_req(conn, cmd, cmd_len, data);
3004 break;
3006 case L2CAP_CONF_RSP:
3007 err = l2cap_config_rsp(conn, cmd, data);
3008 break;
3010 case L2CAP_DISCONN_REQ:
3011 err = l2cap_disconnect_req(conn, cmd, data);
3012 break;
3014 case L2CAP_DISCONN_RSP:
3015 err = l2cap_disconnect_rsp(conn, cmd, data);
3016 break;
3018 case L2CAP_ECHO_REQ:
3019 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3020 break;
3022 case L2CAP_ECHO_RSP:
3023 break;
3025 case L2CAP_INFO_REQ:
3026 err = l2cap_information_req(conn, cmd, data);
3027 break;
3029 case L2CAP_INFO_RSP:
3030 err = l2cap_information_rsp(conn, cmd, data);
3031 break;
3033 default:
3034 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3035 err = -EINVAL;
3036 break;
3039 return err;
3042 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3043 struct l2cap_cmd_hdr *cmd, u8 *data)
3045 switch (cmd->code) {
3046 case L2CAP_COMMAND_REJ:
3047 return 0;
3049 case L2CAP_CONN_PARAM_UPDATE_REQ:
3050 return l2cap_conn_param_update_req(conn, cmd, data);
3052 case L2CAP_CONN_PARAM_UPDATE_RSP:
3053 return 0;
3055 default:
3056 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3057 return -EINVAL;
3061 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3062 struct sk_buff *skb)
3064 u8 *data = skb->data;
3065 int len = skb->len;
3066 struct l2cap_cmd_hdr cmd;
3067 int err;
3069 l2cap_raw_recv(conn, skb);
3071 while (len >= L2CAP_CMD_HDR_SIZE) {
3072 u16 cmd_len;
3073 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3074 data += L2CAP_CMD_HDR_SIZE;
3075 len -= L2CAP_CMD_HDR_SIZE;
3077 cmd_len = le16_to_cpu(cmd.len);
3079 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3081 if (cmd_len > len || !cmd.ident) {
3082 BT_DBG("corrupted command");
3083 break;
3086 if (conn->hcon->type == LE_LINK)
3087 err = l2cap_le_sig_cmd(conn, &cmd, data);
3088 else
3089 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3091 if (err) {
3092 struct l2cap_cmd_rej_unk rej;
3094 BT_ERR("Wrong link type (%d)", err);
3096 /* FIXME: Map err to a valid reason */
3097 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3098 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3101 data += cmd_len;
3102 len -= cmd_len;
3105 kfree_skb(skb);
3108 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3110 u16 our_fcs, rcv_fcs;
3111 int hdr_size;
3113 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3114 hdr_size = L2CAP_EXT_HDR_SIZE;
3115 else
3116 hdr_size = L2CAP_ENH_HDR_SIZE;
3118 if (chan->fcs == L2CAP_FCS_CRC16) {
3119 skb_trim(skb, skb->len - 2);
3120 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3121 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3123 if (our_fcs != rcv_fcs)
3124 return -EBADMSG;
3126 return 0;
3129 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3131 u16 control = 0;
3133 chan->frames_sent = 0;
3135 control |= __set_reqseq(chan, chan->buffer_seq);
3137 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3138 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3139 l2cap_send_sframe(chan, control);
3140 set_bit(CONN_RNR_SENT, &chan->conn_state);
3143 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3144 l2cap_retransmit_frames(chan);
3146 l2cap_ertm_send(chan);
3148 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3149 chan->frames_sent == 0) {
3150 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3151 l2cap_send_sframe(chan, control);
3155 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3157 struct sk_buff *next_skb;
3158 int tx_seq_offset, next_tx_seq_offset;
3160 bt_cb(skb)->tx_seq = tx_seq;
3161 bt_cb(skb)->sar = sar;
3163 next_skb = skb_peek(&chan->srej_q);
3164 if (!next_skb) {
3165 __skb_queue_tail(&chan->srej_q, skb);
3166 return 0;
3169 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3170 if (tx_seq_offset < 0)
3171 tx_seq_offset += 64;
3173 do {
3174 if (bt_cb(next_skb)->tx_seq == tx_seq)
3175 return -EINVAL;
3177 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3178 chan->buffer_seq) % 64;
3179 if (next_tx_seq_offset < 0)
3180 next_tx_seq_offset += 64;
3182 if (next_tx_seq_offset > tx_seq_offset) {
3183 __skb_queue_before(&chan->srej_q, next_skb, skb);
3184 return 0;
3187 if (skb_queue_is_last(&chan->srej_q, next_skb))
3188 break;
3190 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
3192 __skb_queue_tail(&chan->srej_q, skb);
3194 return 0;
3197 static void append_skb_frag(struct sk_buff *skb,
3198 struct sk_buff *new_frag, struct sk_buff **last_frag)
3200 /* skb->len reflects data in skb as well as all fragments
3201 * skb->data_len reflects only data in fragments
3203 if (!skb_has_frag_list(skb))
3204 skb_shinfo(skb)->frag_list = new_frag;
3206 new_frag->next = NULL;
3208 (*last_frag)->next = new_frag;
3209 *last_frag = new_frag;
3211 skb->len += new_frag->len;
3212 skb->data_len += new_frag->len;
3213 skb->truesize += new_frag->truesize;
3216 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3218 int err = -EINVAL;
3220 switch (__get_ctrl_sar(chan, control)) {
3221 case L2CAP_SAR_UNSEGMENTED:
3222 if (chan->sdu)
3223 break;
3225 err = chan->ops->recv(chan->data, skb);
3226 break;
3228 case L2CAP_SAR_START:
3229 if (chan->sdu)
3230 break;
3232 chan->sdu_len = get_unaligned_le16(skb->data);
3233 skb_pull(skb, 2);
3235 if (chan->sdu_len > chan->imtu) {
3236 err = -EMSGSIZE;
3237 break;
3240 if (skb->len >= chan->sdu_len)
3241 break;
3243 chan->sdu = skb;
3244 chan->sdu_last_frag = skb;
3246 skb = NULL;
3247 err = 0;
3248 break;
3250 case L2CAP_SAR_CONTINUE:
3251 if (!chan->sdu)
3252 break;
3254 append_skb_frag(chan->sdu, skb,
3255 &chan->sdu_last_frag);
3256 skb = NULL;
3258 if (chan->sdu->len >= chan->sdu_len)
3259 break;
3261 err = 0;
3262 break;
3264 case L2CAP_SAR_END:
3265 if (!chan->sdu)
3266 break;
3268 append_skb_frag(chan->sdu, skb,
3269 &chan->sdu_last_frag);
3270 skb = NULL;
3272 if (chan->sdu->len != chan->sdu_len)
3273 break;
3275 err = chan->ops->recv(chan->data, chan->sdu);
3277 if (!err) {
3278 /* Reassembly complete */
3279 chan->sdu = NULL;
3280 chan->sdu_last_frag = NULL;
3281 chan->sdu_len = 0;
3283 break;
3286 if (err) {
3287 kfree_skb(skb);
3288 kfree_skb(chan->sdu);
3289 chan->sdu = NULL;
3290 chan->sdu_last_frag = NULL;
3291 chan->sdu_len = 0;
3294 return err;
3297 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3299 u16 control;
3301 BT_DBG("chan %p, Enter local busy", chan);
3303 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3305 control = __set_reqseq(chan, chan->buffer_seq);
3306 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3307 l2cap_send_sframe(chan, control);
3309 set_bit(CONN_RNR_SENT, &chan->conn_state);
3311 __clear_ack_timer(chan);
3314 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3316 u16 control;
3318 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3319 goto done;
3321 control = __set_reqseq(chan, chan->buffer_seq);
3322 control |= __set_ctrl_poll(chan);
3323 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3324 l2cap_send_sframe(chan, control);
3325 chan->retry_count = 1;
3327 __clear_retrans_timer(chan);
3328 __set_monitor_timer(chan);
3330 set_bit(CONN_WAIT_F, &chan->conn_state);
3332 done:
3333 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3334 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3336 BT_DBG("chan %p, Exit local busy", chan);
3339 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3341 if (chan->mode == L2CAP_MODE_ERTM) {
3342 if (busy)
3343 l2cap_ertm_enter_local_busy(chan);
3344 else
3345 l2cap_ertm_exit_local_busy(chan);
3349 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3351 struct sk_buff *skb;
3352 u16 control;
3354 while ((skb = skb_peek(&chan->srej_q)) &&
3355 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3356 int err;
3358 if (bt_cb(skb)->tx_seq != tx_seq)
3359 break;
3361 skb = skb_dequeue(&chan->srej_q);
3362 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3363 err = l2cap_reassemble_sdu(chan, skb, control);
3365 if (err < 0) {
3366 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3367 break;
3370 chan->buffer_seq_srej =
3371 (chan->buffer_seq_srej + 1) % 64;
3372 tx_seq = (tx_seq + 1) % 64;
3376 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3378 struct srej_list *l, *tmp;
3379 u16 control;
3381 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3382 if (l->tx_seq == tx_seq) {
3383 list_del(&l->list);
3384 kfree(l);
3385 return;
3387 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3388 control |= __set_reqseq(chan, l->tx_seq);
3389 l2cap_send_sframe(chan, control);
3390 list_del(&l->list);
3391 list_add_tail(&l->list, &chan->srej_l);
3395 static void l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3397 struct srej_list *new;
3398 u16 control;
3400 while (tx_seq != chan->expected_tx_seq) {
3401 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3402 control |= __set_reqseq(chan, chan->expected_tx_seq);
3403 l2cap_send_sframe(chan, control);
3405 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3406 new->tx_seq = chan->expected_tx_seq;
3407 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3408 list_add_tail(&new->list, &chan->srej_l);
3410 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3413 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3415 u16 tx_seq = __get_txseq(chan, rx_control);
3416 u16 req_seq = __get_reqseq(chan, rx_control);
3417 u8 sar = __get_ctrl_sar(chan, rx_control);
3418 int tx_seq_offset, expected_tx_seq_offset;
3419 int num_to_ack = (chan->tx_win/6) + 1;
3420 int err = 0;
3422 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3423 tx_seq, rx_control);
3425 if (__is_ctrl_final(chan, rx_control) &&
3426 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3427 __clear_monitor_timer(chan);
3428 if (chan->unacked_frames > 0)
3429 __set_retrans_timer(chan);
3430 clear_bit(CONN_WAIT_F, &chan->conn_state);
3433 chan->expected_ack_seq = req_seq;
3434 l2cap_drop_acked_frames(chan);
3436 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3437 if (tx_seq_offset < 0)
3438 tx_seq_offset += 64;
3440 /* invalid tx_seq */
3441 if (tx_seq_offset >= chan->tx_win) {
3442 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3443 goto drop;
3446 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3447 goto drop;
3449 if (tx_seq == chan->expected_tx_seq)
3450 goto expected;
3452 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3453 struct srej_list *first;
3455 first = list_first_entry(&chan->srej_l,
3456 struct srej_list, list);
3457 if (tx_seq == first->tx_seq) {
3458 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3459 l2cap_check_srej_gap(chan, tx_seq);
3461 list_del(&first->list);
3462 kfree(first);
3464 if (list_empty(&chan->srej_l)) {
3465 chan->buffer_seq = chan->buffer_seq_srej;
3466 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3467 l2cap_send_ack(chan);
3468 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3470 } else {
3471 struct srej_list *l;
3473 /* duplicated tx_seq */
3474 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3475 goto drop;
3477 list_for_each_entry(l, &chan->srej_l, list) {
3478 if (l->tx_seq == tx_seq) {
3479 l2cap_resend_srejframe(chan, tx_seq);
3480 return 0;
3483 l2cap_send_srejframe(chan, tx_seq);
3485 } else {
3486 expected_tx_seq_offset =
3487 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3488 if (expected_tx_seq_offset < 0)
3489 expected_tx_seq_offset += 64;
3491 /* duplicated tx_seq */
3492 if (tx_seq_offset < expected_tx_seq_offset)
3493 goto drop;
3495 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3497 BT_DBG("chan %p, Enter SREJ", chan);
3499 INIT_LIST_HEAD(&chan->srej_l);
3500 chan->buffer_seq_srej = chan->buffer_seq;
3502 __skb_queue_head_init(&chan->srej_q);
3503 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3505 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3507 l2cap_send_srejframe(chan, tx_seq);
3509 __clear_ack_timer(chan);
3511 return 0;
3513 expected:
3514 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3516 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3517 bt_cb(skb)->tx_seq = tx_seq;
3518 bt_cb(skb)->sar = sar;
3519 __skb_queue_tail(&chan->srej_q, skb);
3520 return 0;
3523 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3524 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3525 if (err < 0) {
3526 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3527 return err;
3530 if (__is_ctrl_final(chan, rx_control)) {
3531 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3532 l2cap_retransmit_frames(chan);
3535 __set_ack_timer(chan);
3537 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3538 if (chan->num_acked == num_to_ack - 1)
3539 l2cap_send_ack(chan);
3541 return 0;
3543 drop:
3544 kfree_skb(skb);
3545 return 0;
3548 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3550 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan,
3551 __get_reqseq(chan, rx_control), rx_control);
3553 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
3554 l2cap_drop_acked_frames(chan);
3556 if (__is_ctrl_poll(chan, rx_control)) {
3557 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3558 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3559 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3560 (chan->unacked_frames > 0))
3561 __set_retrans_timer(chan);
3563 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3564 l2cap_send_srejtail(chan);
3565 } else {
3566 l2cap_send_i_or_rr_or_rnr(chan);
3569 } else if (__is_ctrl_final(chan, rx_control)) {
3570 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3572 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3573 l2cap_retransmit_frames(chan);
3575 } else {
3576 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3577 (chan->unacked_frames > 0))
3578 __set_retrans_timer(chan);
3580 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3581 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
3582 l2cap_send_ack(chan);
3583 else
3584 l2cap_ertm_send(chan);
3588 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3590 u16 tx_seq = __get_reqseq(chan, rx_control);
3592 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3594 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3596 chan->expected_ack_seq = tx_seq;
3597 l2cap_drop_acked_frames(chan);
3599 if (__is_ctrl_final(chan, rx_control)) {
3600 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3601 l2cap_retransmit_frames(chan);
3602 } else {
3603 l2cap_retransmit_frames(chan);
3605 if (test_bit(CONN_WAIT_F, &chan->conn_state))
3606 set_bit(CONN_REJ_ACT, &chan->conn_state);
3609 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3611 u16 tx_seq = __get_reqseq(chan, rx_control);
3613 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3615 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3617 if (__is_ctrl_poll(chan, rx_control)) {
3618 chan->expected_ack_seq = tx_seq;
3619 l2cap_drop_acked_frames(chan);
3621 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3622 l2cap_retransmit_one_frame(chan, tx_seq);
3624 l2cap_ertm_send(chan);
3626 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3627 chan->srej_save_reqseq = tx_seq;
3628 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3630 } else if (__is_ctrl_final(chan, rx_control)) {
3631 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
3632 chan->srej_save_reqseq == tx_seq)
3633 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
3634 else
3635 l2cap_retransmit_one_frame(chan, tx_seq);
3636 } else {
3637 l2cap_retransmit_one_frame(chan, tx_seq);
3638 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3639 chan->srej_save_reqseq = tx_seq;
3640 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3645 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3647 u16 tx_seq = __get_reqseq(chan, rx_control);
3649 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3651 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3652 chan->expected_ack_seq = tx_seq;
3653 l2cap_drop_acked_frames(chan);
3655 if (__is_ctrl_poll(chan, rx_control))
3656 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3658 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3659 __clear_retrans_timer(chan);
3660 if (__is_ctrl_poll(chan, rx_control))
3661 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3662 return;
3665 if (__is_ctrl_poll(chan, rx_control)) {
3666 l2cap_send_srejtail(chan);
3667 } else {
3668 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
3669 l2cap_send_sframe(chan, rx_control);
3673 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3675 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3677 if (__is_ctrl_final(chan, rx_control) &&
3678 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3679 __clear_monitor_timer(chan);
3680 if (chan->unacked_frames > 0)
3681 __set_retrans_timer(chan);
3682 clear_bit(CONN_WAIT_F, &chan->conn_state);
3685 switch (__get_ctrl_super(chan, rx_control)) {
3686 case L2CAP_SUPER_RR:
3687 l2cap_data_channel_rrframe(chan, rx_control);
3688 break;
3690 case L2CAP_SUPER_REJ:
3691 l2cap_data_channel_rejframe(chan, rx_control);
3692 break;
3694 case L2CAP_SUPER_SREJ:
3695 l2cap_data_channel_srejframe(chan, rx_control);
3696 break;
3698 case L2CAP_SUPER_RNR:
3699 l2cap_data_channel_rnrframe(chan, rx_control);
3700 break;
3703 kfree_skb(skb);
3704 return 0;
3707 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3709 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3710 u16 control;
3711 u16 req_seq;
3712 int len, next_tx_seq_offset, req_seq_offset;
3714 control = get_unaligned_le16(skb->data);
3715 skb_pull(skb, 2);
3716 len = skb->len;
3719 * We can just drop the corrupted I-frame here.
3720 * Receiver will miss it and start proper recovery
3721 * procedures and ask retransmission.
3723 if (l2cap_check_fcs(chan, skb))
3724 goto drop;
3726 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
3727 len -= 2;
3729 if (chan->fcs == L2CAP_FCS_CRC16)
3730 len -= 2;
3732 if (len > chan->mps) {
3733 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3734 goto drop;
3737 req_seq = __get_reqseq(chan, control);
3738 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3739 if (req_seq_offset < 0)
3740 req_seq_offset += 64;
3742 next_tx_seq_offset =
3743 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3744 if (next_tx_seq_offset < 0)
3745 next_tx_seq_offset += 64;
3747 /* check for invalid req-seq */
3748 if (req_seq_offset > next_tx_seq_offset) {
3749 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3750 goto drop;
3753 if (!__is_sframe(chan, control)) {
3754 if (len < 0) {
3755 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3756 goto drop;
3759 l2cap_data_channel_iframe(chan, control, skb);
3760 } else {
3761 if (len != 0) {
3762 BT_ERR("%d", len);
3763 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3764 goto drop;
3767 l2cap_data_channel_sframe(chan, control, skb);
3770 return 0;
3772 drop:
3773 kfree_skb(skb);
3774 return 0;
3777 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3779 struct l2cap_chan *chan;
3780 struct sock *sk = NULL;
3781 u16 control;
3782 u16 tx_seq;
3783 int len;
3785 chan = l2cap_get_chan_by_scid(conn, cid);
3786 if (!chan) {
3787 BT_DBG("unknown cid 0x%4.4x", cid);
3788 goto drop;
3791 sk = chan->sk;
3793 BT_DBG("chan %p, len %d", chan, skb->len);
3795 if (chan->state != BT_CONNECTED)
3796 goto drop;
3798 switch (chan->mode) {
3799 case L2CAP_MODE_BASIC:
3800 /* If socket recv buffers overflows we drop data here
3801 * which is *bad* because L2CAP has to be reliable.
3802 * But we don't have any other choice. L2CAP doesn't
3803 * provide flow control mechanism. */
3805 if (chan->imtu < skb->len)
3806 goto drop;
3808 if (!chan->ops->recv(chan->data, skb))
3809 goto done;
3810 break;
3812 case L2CAP_MODE_ERTM:
3813 if (!sock_owned_by_user(sk)) {
3814 l2cap_ertm_data_rcv(sk, skb);
3815 } else {
3816 if (sk_add_backlog(sk, skb))
3817 goto drop;
3820 goto done;
3822 case L2CAP_MODE_STREAMING:
3823 control = get_unaligned_le16(skb->data);
3824 skb_pull(skb, 2);
3825 len = skb->len;
3827 if (l2cap_check_fcs(chan, skb))
3828 goto drop;
3830 if (__is_sar_start(chan, control))
3831 len -= 2;
3833 if (chan->fcs == L2CAP_FCS_CRC16)
3834 len -= 2;
3836 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
3837 goto drop;
3839 tx_seq = __get_txseq(chan, control);
3841 if (chan->expected_tx_seq != tx_seq) {
3842 /* Frame(s) missing - must discard partial SDU */
3843 kfree_skb(chan->sdu);
3844 chan->sdu = NULL;
3845 chan->sdu_last_frag = NULL;
3846 chan->sdu_len = 0;
3848 /* TODO: Notify userland of missing data */
3851 chan->expected_tx_seq = (tx_seq + 1) % 64;
3853 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
3854 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3856 goto done;
3858 default:
3859 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
3860 break;
3863 drop:
3864 kfree_skb(skb);
3866 done:
3867 if (sk)
3868 bh_unlock_sock(sk);
3870 return 0;
3873 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3875 struct sock *sk = NULL;
3876 struct l2cap_chan *chan;
3878 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
3879 if (!chan)
3880 goto drop;
3882 sk = chan->sk;
3884 bh_lock_sock(sk);
3886 BT_DBG("sk %p, len %d", sk, skb->len);
3888 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3889 goto drop;
3891 if (chan->imtu < skb->len)
3892 goto drop;
3894 if (!chan->ops->recv(chan->data, skb))
3895 goto done;
3897 drop:
3898 kfree_skb(skb);
3900 done:
3901 if (sk)
3902 bh_unlock_sock(sk);
3903 return 0;
3906 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3908 struct sock *sk = NULL;
3909 struct l2cap_chan *chan;
3911 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
3912 if (!chan)
3913 goto drop;
3915 sk = chan->sk;
3917 bh_lock_sock(sk);
3919 BT_DBG("sk %p, len %d", sk, skb->len);
3921 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3922 goto drop;
3924 if (chan->imtu < skb->len)
3925 goto drop;
3927 if (!chan->ops->recv(chan->data, skb))
3928 goto done;
3930 drop:
3931 kfree_skb(skb);
3933 done:
3934 if (sk)
3935 bh_unlock_sock(sk);
3936 return 0;
3939 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3941 struct l2cap_hdr *lh = (void *) skb->data;
3942 u16 cid, len;
3943 __le16 psm;
3945 skb_pull(skb, L2CAP_HDR_SIZE);
3946 cid = __le16_to_cpu(lh->cid);
3947 len = __le16_to_cpu(lh->len);
3949 if (len != skb->len) {
3950 kfree_skb(skb);
3951 return;
3954 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3956 switch (cid) {
3957 case L2CAP_CID_LE_SIGNALING:
3958 case L2CAP_CID_SIGNALING:
3959 l2cap_sig_channel(conn, skb);
3960 break;
3962 case L2CAP_CID_CONN_LESS:
3963 psm = get_unaligned_le16(skb->data);
3964 skb_pull(skb, 2);
3965 l2cap_conless_channel(conn, psm, skb);
3966 break;
3968 case L2CAP_CID_LE_DATA:
3969 l2cap_att_channel(conn, cid, skb);
3970 break;
3972 case L2CAP_CID_SMP:
3973 if (smp_sig_channel(conn, skb))
3974 l2cap_conn_del(conn->hcon, EACCES);
3975 break;
3977 default:
3978 l2cap_data_channel(conn, cid, skb);
3979 break;
3983 /* ---- L2CAP interface with lower layer (HCI) ---- */
3985 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3987 int exact = 0, lm1 = 0, lm2 = 0;
3988 struct l2cap_chan *c;
3990 if (type != ACL_LINK)
3991 return -EINVAL;
3993 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3995 /* Find listening sockets and check their link_mode */
3996 read_lock(&chan_list_lock);
3997 list_for_each_entry(c, &chan_list, global_l) {
3998 struct sock *sk = c->sk;
4000 if (c->state != BT_LISTEN)
4001 continue;
4003 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4004 lm1 |= HCI_LM_ACCEPT;
4005 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4006 lm1 |= HCI_LM_MASTER;
4007 exact++;
4008 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4009 lm2 |= HCI_LM_ACCEPT;
4010 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4011 lm2 |= HCI_LM_MASTER;
4014 read_unlock(&chan_list_lock);
4016 return exact ? lm1 : lm2;
4019 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4021 struct l2cap_conn *conn;
4023 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4025 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4026 return -EINVAL;
4028 if (!status) {
4029 conn = l2cap_conn_add(hcon, status);
4030 if (conn)
4031 l2cap_conn_ready(conn);
4032 } else
4033 l2cap_conn_del(hcon, bt_to_errno(status));
4035 return 0;
4038 static int l2cap_disconn_ind(struct hci_conn *hcon)
4040 struct l2cap_conn *conn = hcon->l2cap_data;
4042 BT_DBG("hcon %p", hcon);
4044 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
4045 return 0x13;
4047 return conn->disc_reason;
4050 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4052 BT_DBG("hcon %p reason %d", hcon, reason);
4054 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4055 return -EINVAL;
4057 l2cap_conn_del(hcon, bt_to_errno(reason));
4059 return 0;
4062 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4064 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4065 return;
4067 if (encrypt == 0x00) {
4068 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4069 __clear_chan_timer(chan);
4070 __set_chan_timer(chan, HZ * 5);
4071 } else if (chan->sec_level == BT_SECURITY_HIGH)
4072 l2cap_chan_close(chan, ECONNREFUSED);
4073 } else {
4074 if (chan->sec_level == BT_SECURITY_MEDIUM)
4075 __clear_chan_timer(chan);
4079 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4081 struct l2cap_conn *conn = hcon->l2cap_data;
4082 struct l2cap_chan *chan;
4084 if (!conn)
4085 return 0;
4087 BT_DBG("conn %p", conn);
4089 if (hcon->type == LE_LINK) {
4090 smp_distribute_keys(conn, 0);
4091 del_timer(&conn->security_timer);
4094 read_lock(&conn->chan_lock);
4096 list_for_each_entry(chan, &conn->chan_l, list) {
4097 struct sock *sk = chan->sk;
4099 bh_lock_sock(sk);
4101 BT_DBG("chan->scid %d", chan->scid);
4103 if (chan->scid == L2CAP_CID_LE_DATA) {
4104 if (!status && encrypt) {
4105 chan->sec_level = hcon->sec_level;
4106 l2cap_chan_ready(sk);
4109 bh_unlock_sock(sk);
4110 continue;
4113 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4114 bh_unlock_sock(sk);
4115 continue;
4118 if (!status && (chan->state == BT_CONNECTED ||
4119 chan->state == BT_CONFIG)) {
4120 l2cap_check_encryption(chan, encrypt);
4121 bh_unlock_sock(sk);
4122 continue;
4125 if (chan->state == BT_CONNECT) {
4126 if (!status) {
4127 struct l2cap_conn_req req;
4128 req.scid = cpu_to_le16(chan->scid);
4129 req.psm = chan->psm;
4131 chan->ident = l2cap_get_ident(conn);
4132 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4134 l2cap_send_cmd(conn, chan->ident,
4135 L2CAP_CONN_REQ, sizeof(req), &req);
4136 } else {
4137 __clear_chan_timer(chan);
4138 __set_chan_timer(chan, HZ / 10);
4140 } else if (chan->state == BT_CONNECT2) {
4141 struct l2cap_conn_rsp rsp;
4142 __u16 res, stat;
4144 if (!status) {
4145 if (bt_sk(sk)->defer_setup) {
4146 struct sock *parent = bt_sk(sk)->parent;
4147 res = L2CAP_CR_PEND;
4148 stat = L2CAP_CS_AUTHOR_PEND;
4149 if (parent)
4150 parent->sk_data_ready(parent, 0);
4151 } else {
4152 l2cap_state_change(chan, BT_CONFIG);
4153 res = L2CAP_CR_SUCCESS;
4154 stat = L2CAP_CS_NO_INFO;
4156 } else {
4157 l2cap_state_change(chan, BT_DISCONN);
4158 __set_chan_timer(chan, HZ / 10);
4159 res = L2CAP_CR_SEC_BLOCK;
4160 stat = L2CAP_CS_NO_INFO;
4163 rsp.scid = cpu_to_le16(chan->dcid);
4164 rsp.dcid = cpu_to_le16(chan->scid);
4165 rsp.result = cpu_to_le16(res);
4166 rsp.status = cpu_to_le16(stat);
4167 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4168 sizeof(rsp), &rsp);
4171 bh_unlock_sock(sk);
4174 read_unlock(&conn->chan_lock);
4176 return 0;
4179 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4181 struct l2cap_conn *conn = hcon->l2cap_data;
4183 if (!conn)
4184 conn = l2cap_conn_add(hcon, 0);
4186 if (!conn)
4187 goto drop;
4189 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4191 if (!(flags & ACL_CONT)) {
4192 struct l2cap_hdr *hdr;
4193 struct l2cap_chan *chan;
4194 u16 cid;
4195 int len;
4197 if (conn->rx_len) {
4198 BT_ERR("Unexpected start frame (len %d)", skb->len);
4199 kfree_skb(conn->rx_skb);
4200 conn->rx_skb = NULL;
4201 conn->rx_len = 0;
4202 l2cap_conn_unreliable(conn, ECOMM);
4205 /* Start fragment always begin with Basic L2CAP header */
4206 if (skb->len < L2CAP_HDR_SIZE) {
4207 BT_ERR("Frame is too short (len %d)", skb->len);
4208 l2cap_conn_unreliable(conn, ECOMM);
4209 goto drop;
4212 hdr = (struct l2cap_hdr *) skb->data;
4213 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4214 cid = __le16_to_cpu(hdr->cid);
4216 if (len == skb->len) {
4217 /* Complete frame received */
4218 l2cap_recv_frame(conn, skb);
4219 return 0;
4222 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4224 if (skb->len > len) {
4225 BT_ERR("Frame is too long (len %d, expected len %d)",
4226 skb->len, len);
4227 l2cap_conn_unreliable(conn, ECOMM);
4228 goto drop;
4231 chan = l2cap_get_chan_by_scid(conn, cid);
4233 if (chan && chan->sk) {
4234 struct sock *sk = chan->sk;
4236 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4237 BT_ERR("Frame exceeding recv MTU (len %d, "
4238 "MTU %d)", len,
4239 chan->imtu);
4240 bh_unlock_sock(sk);
4241 l2cap_conn_unreliable(conn, ECOMM);
4242 goto drop;
4244 bh_unlock_sock(sk);
4247 /* Allocate skb for the complete frame (with header) */
4248 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4249 if (!conn->rx_skb)
4250 goto drop;
4252 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4253 skb->len);
4254 conn->rx_len = len - skb->len;
4255 } else {
4256 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4258 if (!conn->rx_len) {
4259 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4260 l2cap_conn_unreliable(conn, ECOMM);
4261 goto drop;
4264 if (skb->len > conn->rx_len) {
4265 BT_ERR("Fragment is too long (len %d, expected %d)",
4266 skb->len, conn->rx_len);
4267 kfree_skb(conn->rx_skb);
4268 conn->rx_skb = NULL;
4269 conn->rx_len = 0;
4270 l2cap_conn_unreliable(conn, ECOMM);
4271 goto drop;
4274 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4275 skb->len);
4276 conn->rx_len -= skb->len;
4278 if (!conn->rx_len) {
4279 /* Complete frame received */
4280 l2cap_recv_frame(conn, conn->rx_skb);
4281 conn->rx_skb = NULL;
4285 drop:
4286 kfree_skb(skb);
4287 return 0;
4290 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4292 struct l2cap_chan *c;
4294 read_lock_bh(&chan_list_lock);
4296 list_for_each_entry(c, &chan_list, global_l) {
4297 struct sock *sk = c->sk;
4299 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4300 batostr(&bt_sk(sk)->src),
4301 batostr(&bt_sk(sk)->dst),
4302 c->state, __le16_to_cpu(c->psm),
4303 c->scid, c->dcid, c->imtu, c->omtu,
4304 c->sec_level, c->mode);
4307 read_unlock_bh(&chan_list_lock);
4309 return 0;
4312 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4314 return single_open(file, l2cap_debugfs_show, inode->i_private);
4317 static const struct file_operations l2cap_debugfs_fops = {
4318 .open = l2cap_debugfs_open,
4319 .read = seq_read,
4320 .llseek = seq_lseek,
4321 .release = single_release,
4324 static struct dentry *l2cap_debugfs;
4326 static struct hci_proto l2cap_hci_proto = {
4327 .name = "L2CAP",
4328 .id = HCI_PROTO_L2CAP,
4329 .connect_ind = l2cap_connect_ind,
4330 .connect_cfm = l2cap_connect_cfm,
4331 .disconn_ind = l2cap_disconn_ind,
4332 .disconn_cfm = l2cap_disconn_cfm,
4333 .security_cfm = l2cap_security_cfm,
4334 .recv_acldata = l2cap_recv_acldata
4337 int __init l2cap_init(void)
4339 int err;
4341 err = l2cap_init_sockets();
4342 if (err < 0)
4343 return err;
4345 err = hci_register_proto(&l2cap_hci_proto);
4346 if (err < 0) {
4347 BT_ERR("L2CAP protocol registration failed");
4348 bt_sock_unregister(BTPROTO_L2CAP);
4349 goto error;
4352 if (bt_debugfs) {
4353 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4354 bt_debugfs, NULL, &l2cap_debugfs_fops);
4355 if (!l2cap_debugfs)
4356 BT_ERR("Failed to create L2CAP debug file");
4359 return 0;
4361 error:
4362 l2cap_cleanup_sockets();
4363 return err;
4366 void l2cap_exit(void)
4368 debugfs_remove(l2cap_debugfs);
4370 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4371 BT_ERR("L2CAP protocol unregistration failed");
4373 l2cap_cleanup_sockets();
4376 module_param(disable_ertm, bool, 0644);
4377 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4379 module_param(enable_hs, bool, 0644);
4380 MODULE_PARM_DESC(enable_hs, "Enable High Speed");