ath5k: add missing checks for rfgain probe
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / bluetooth / l2cap_core.c
blobf7f8e2cd3f701b2588b1f1bfd98ca250745c4796
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
59 int disable_ertm;
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { 0x02, };
64 static LIST_HEAD(chan_list);
65 static DEFINE_RWLOCK(chan_list_lock);
67 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
68 u8 code, u8 ident, u16 dlen, void *data);
69 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
70 void *data);
71 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
72 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
73 struct l2cap_chan *chan, int err);
75 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
77 /* ---- L2CAP channels ---- */
79 static inline void chan_hold(struct l2cap_chan *c)
81 atomic_inc(&c->refcnt);
84 static inline void chan_put(struct l2cap_chan *c)
86 if (atomic_dec_and_test(&c->refcnt))
87 kfree(c);
90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
92 struct l2cap_chan *c;
94 list_for_each_entry(c, &conn->chan_l, list) {
95 if (c->dcid == cid)
96 return c;
98 return NULL;
102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
104 struct l2cap_chan *c;
106 list_for_each_entry(c, &conn->chan_l, list) {
107 if (c->scid == cid)
108 return c;
110 return NULL;
113 /* Find channel with given SCID.
114 * Returns locked socket */
115 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
117 struct l2cap_chan *c;
119 read_lock(&conn->chan_lock);
120 c = __l2cap_get_chan_by_scid(conn, cid);
121 if (c)
122 bh_lock_sock(c->sk);
123 read_unlock(&conn->chan_lock);
124 return c;
127 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
129 struct l2cap_chan *c;
131 list_for_each_entry(c, &conn->chan_l, list) {
132 if (c->ident == ident)
133 return c;
135 return NULL;
138 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
140 struct l2cap_chan *c;
142 read_lock(&conn->chan_lock);
143 c = __l2cap_get_chan_by_ident(conn, ident);
144 if (c)
145 bh_lock_sock(c->sk);
146 read_unlock(&conn->chan_lock);
147 return c;
150 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
152 struct l2cap_chan *c;
154 list_for_each_entry(c, &chan_list, global_l) {
155 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
156 goto found;
159 c = NULL;
160 found:
161 return c;
164 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
166 int err;
168 write_lock_bh(&chan_list_lock);
170 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
171 err = -EADDRINUSE;
172 goto done;
175 if (psm) {
176 chan->psm = psm;
177 chan->sport = psm;
178 err = 0;
179 } else {
180 u16 p;
182 err = -EINVAL;
183 for (p = 0x1001; p < 0x1100; p += 2)
184 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
185 chan->psm = cpu_to_le16(p);
186 chan->sport = cpu_to_le16(p);
187 err = 0;
188 break;
192 done:
193 write_unlock_bh(&chan_list_lock);
194 return err;
197 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
199 write_lock_bh(&chan_list_lock);
201 chan->scid = scid;
203 write_unlock_bh(&chan_list_lock);
205 return 0;
208 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
210 u16 cid = L2CAP_CID_DYN_START;
212 for (; cid < L2CAP_CID_DYN_END; cid++) {
213 if (!__l2cap_get_chan_by_scid(conn, cid))
214 return cid;
217 return 0;
220 static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
222 BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout);
224 if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
225 chan_hold(chan);
228 static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
230 BT_DBG("chan %p state %d", chan, chan->state);
232 if (timer_pending(timer) && del_timer(timer))
233 chan_put(chan);
236 static void l2cap_state_change(struct l2cap_chan *chan, int state)
238 chan->state = state;
239 chan->ops->state_change(chan->data, state);
242 static void l2cap_chan_timeout(unsigned long arg)
244 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
245 struct sock *sk = chan->sk;
246 int reason;
248 BT_DBG("chan %p state %d", chan, chan->state);
250 bh_lock_sock(sk);
252 if (sock_owned_by_user(sk)) {
253 /* sk is owned by user. Try again later */
254 __set_chan_timer(chan, HZ / 5);
255 bh_unlock_sock(sk);
256 chan_put(chan);
257 return;
260 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
261 reason = ECONNREFUSED;
262 else if (chan->state == BT_CONNECT &&
263 chan->sec_level != BT_SECURITY_SDP)
264 reason = ECONNREFUSED;
265 else
266 reason = ETIMEDOUT;
268 l2cap_chan_close(chan, reason);
270 bh_unlock_sock(sk);
272 chan->ops->close(chan->data);
273 chan_put(chan);
276 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
278 struct l2cap_chan *chan;
280 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
281 if (!chan)
282 return NULL;
284 chan->sk = sk;
286 write_lock_bh(&chan_list_lock);
287 list_add(&chan->global_l, &chan_list);
288 write_unlock_bh(&chan_list_lock);
290 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
292 chan->state = BT_OPEN;
294 atomic_set(&chan->refcnt, 1);
296 return chan;
299 void l2cap_chan_destroy(struct l2cap_chan *chan)
301 write_lock_bh(&chan_list_lock);
302 list_del(&chan->global_l);
303 write_unlock_bh(&chan_list_lock);
305 chan_put(chan);
308 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
310 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
311 chan->psm, chan->dcid);
313 conn->disc_reason = 0x13;
315 chan->conn = conn;
317 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
318 if (conn->hcon->type == LE_LINK) {
319 /* LE connection */
320 chan->omtu = L2CAP_LE_DEFAULT_MTU;
321 chan->scid = L2CAP_CID_LE_DATA;
322 chan->dcid = L2CAP_CID_LE_DATA;
323 } else {
324 /* Alloc CID for connection-oriented socket */
325 chan->scid = l2cap_alloc_cid(conn);
326 chan->omtu = L2CAP_DEFAULT_MTU;
328 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
329 /* Connectionless socket */
330 chan->scid = L2CAP_CID_CONN_LESS;
331 chan->dcid = L2CAP_CID_CONN_LESS;
332 chan->omtu = L2CAP_DEFAULT_MTU;
333 } else {
334 /* Raw socket can send/recv signalling messages only */
335 chan->scid = L2CAP_CID_SIGNALING;
336 chan->dcid = L2CAP_CID_SIGNALING;
337 chan->omtu = L2CAP_DEFAULT_MTU;
340 chan_hold(chan);
342 list_add(&chan->list, &conn->chan_l);
345 /* Delete channel.
346 * Must be called on the locked socket. */
347 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
349 struct sock *sk = chan->sk;
350 struct l2cap_conn *conn = chan->conn;
351 struct sock *parent = bt_sk(sk)->parent;
353 __clear_chan_timer(chan);
355 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
357 if (conn) {
358 /* Delete from channel list */
359 write_lock_bh(&conn->chan_lock);
360 list_del(&chan->list);
361 write_unlock_bh(&conn->chan_lock);
362 chan_put(chan);
364 chan->conn = NULL;
365 hci_conn_put(conn->hcon);
368 l2cap_state_change(chan, BT_CLOSED);
369 sock_set_flag(sk, SOCK_ZAPPED);
371 if (err)
372 sk->sk_err = err;
374 if (parent) {
375 bt_accept_unlink(sk);
376 parent->sk_data_ready(parent, 0);
377 } else
378 sk->sk_state_change(sk);
380 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
381 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
382 return;
384 skb_queue_purge(&chan->tx_q);
386 if (chan->mode == L2CAP_MODE_ERTM) {
387 struct srej_list *l, *tmp;
389 __clear_retrans_timer(chan);
390 __clear_monitor_timer(chan);
391 __clear_ack_timer(chan);
393 skb_queue_purge(&chan->srej_q);
395 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
396 list_del(&l->list);
397 kfree(l);
402 static void l2cap_chan_cleanup_listen(struct sock *parent)
404 struct sock *sk;
406 BT_DBG("parent %p", parent);
408 /* Close not yet accepted channels */
409 while ((sk = bt_accept_dequeue(parent, NULL))) {
410 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
411 __clear_chan_timer(chan);
412 lock_sock(sk);
413 l2cap_chan_close(chan, ECONNRESET);
414 release_sock(sk);
415 chan->ops->close(chan->data);
419 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
421 struct l2cap_conn *conn = chan->conn;
422 struct sock *sk = chan->sk;
424 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
426 switch (chan->state) {
427 case BT_LISTEN:
428 l2cap_chan_cleanup_listen(sk);
430 l2cap_state_change(chan, BT_CLOSED);
431 sock_set_flag(sk, SOCK_ZAPPED);
432 break;
434 case BT_CONNECTED:
435 case BT_CONFIG:
436 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
437 conn->hcon->type == ACL_LINK) {
438 __clear_chan_timer(chan);
439 __set_chan_timer(chan, sk->sk_sndtimeo);
440 l2cap_send_disconn_req(conn, chan, reason);
441 } else
442 l2cap_chan_del(chan, reason);
443 break;
445 case BT_CONNECT2:
446 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
447 conn->hcon->type == ACL_LINK) {
448 struct l2cap_conn_rsp rsp;
449 __u16 result;
451 if (bt_sk(sk)->defer_setup)
452 result = L2CAP_CR_SEC_BLOCK;
453 else
454 result = L2CAP_CR_BAD_PSM;
455 l2cap_state_change(chan, BT_DISCONN);
457 rsp.scid = cpu_to_le16(chan->dcid);
458 rsp.dcid = cpu_to_le16(chan->scid);
459 rsp.result = cpu_to_le16(result);
460 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
461 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
462 sizeof(rsp), &rsp);
465 l2cap_chan_del(chan, reason);
466 break;
468 case BT_CONNECT:
469 case BT_DISCONN:
470 l2cap_chan_del(chan, reason);
471 break;
473 default:
474 sock_set_flag(sk, SOCK_ZAPPED);
475 break;
479 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
481 if (chan->chan_type == L2CAP_CHAN_RAW) {
482 switch (chan->sec_level) {
483 case BT_SECURITY_HIGH:
484 return HCI_AT_DEDICATED_BONDING_MITM;
485 case BT_SECURITY_MEDIUM:
486 return HCI_AT_DEDICATED_BONDING;
487 default:
488 return HCI_AT_NO_BONDING;
490 } else if (chan->psm == cpu_to_le16(0x0001)) {
491 if (chan->sec_level == BT_SECURITY_LOW)
492 chan->sec_level = BT_SECURITY_SDP;
494 if (chan->sec_level == BT_SECURITY_HIGH)
495 return HCI_AT_NO_BONDING_MITM;
496 else
497 return HCI_AT_NO_BONDING;
498 } else {
499 switch (chan->sec_level) {
500 case BT_SECURITY_HIGH:
501 return HCI_AT_GENERAL_BONDING_MITM;
502 case BT_SECURITY_MEDIUM:
503 return HCI_AT_GENERAL_BONDING;
504 default:
505 return HCI_AT_NO_BONDING;
510 /* Service level security */
511 static inline int l2cap_check_security(struct l2cap_chan *chan)
513 struct l2cap_conn *conn = chan->conn;
514 __u8 auth_type;
516 auth_type = l2cap_get_auth_type(chan);
518 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
521 static u8 l2cap_get_ident(struct l2cap_conn *conn)
523 u8 id;
525 /* Get next available identificator.
526 * 1 - 128 are used by kernel.
527 * 129 - 199 are reserved.
528 * 200 - 254 are used by utilities like l2ping, etc.
531 spin_lock_bh(&conn->lock);
533 if (++conn->tx_ident > 128)
534 conn->tx_ident = 1;
536 id = conn->tx_ident;
538 spin_unlock_bh(&conn->lock);
540 return id;
543 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
545 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
546 u8 flags;
548 BT_DBG("code 0x%2.2x", code);
550 if (!skb)
551 return;
553 if (lmp_no_flush_capable(conn->hcon->hdev))
554 flags = ACL_START_NO_FLUSH;
555 else
556 flags = ACL_START;
558 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
560 hci_send_acl(conn->hcon, skb, flags);
563 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
565 struct sk_buff *skb;
566 struct l2cap_hdr *lh;
567 struct l2cap_conn *conn = chan->conn;
568 int count, hlen = L2CAP_HDR_SIZE + 2;
569 u8 flags;
571 if (chan->state != BT_CONNECTED)
572 return;
574 if (chan->fcs == L2CAP_FCS_CRC16)
575 hlen += 2;
577 BT_DBG("chan %p, control 0x%2.2x", chan, control);
579 count = min_t(unsigned int, conn->mtu, hlen);
580 control |= L2CAP_CTRL_FRAME_TYPE;
582 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
583 control |= L2CAP_CTRL_FINAL;
585 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
586 control |= L2CAP_CTRL_POLL;
588 skb = bt_skb_alloc(count, GFP_ATOMIC);
589 if (!skb)
590 return;
592 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
593 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
594 lh->cid = cpu_to_le16(chan->dcid);
595 put_unaligned_le16(control, skb_put(skb, 2));
597 if (chan->fcs == L2CAP_FCS_CRC16) {
598 u16 fcs = crc16(0, (u8 *)lh, count - 2);
599 put_unaligned_le16(fcs, skb_put(skb, 2));
602 if (lmp_no_flush_capable(conn->hcon->hdev))
603 flags = ACL_START_NO_FLUSH;
604 else
605 flags = ACL_START;
607 bt_cb(skb)->force_active = chan->force_active;
609 hci_send_acl(chan->conn->hcon, skb, flags);
612 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
614 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
615 control |= L2CAP_SUPER_RCV_NOT_READY;
616 set_bit(CONN_RNR_SENT, &chan->conn_state);
617 } else
618 control |= L2CAP_SUPER_RCV_READY;
620 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
622 l2cap_send_sframe(chan, control);
625 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
627 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
630 static void l2cap_do_start(struct l2cap_chan *chan)
632 struct l2cap_conn *conn = chan->conn;
634 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
635 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
636 return;
638 if (l2cap_check_security(chan) &&
639 __l2cap_no_conn_pending(chan)) {
640 struct l2cap_conn_req req;
641 req.scid = cpu_to_le16(chan->scid);
642 req.psm = chan->psm;
644 chan->ident = l2cap_get_ident(conn);
645 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
647 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
648 sizeof(req), &req);
650 } else {
651 struct l2cap_info_req req;
652 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
654 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
655 conn->info_ident = l2cap_get_ident(conn);
657 mod_timer(&conn->info_timer, jiffies +
658 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
660 l2cap_send_cmd(conn, conn->info_ident,
661 L2CAP_INFO_REQ, sizeof(req), &req);
665 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
667 u32 local_feat_mask = l2cap_feat_mask;
668 if (!disable_ertm)
669 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
671 switch (mode) {
672 case L2CAP_MODE_ERTM:
673 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
674 case L2CAP_MODE_STREAMING:
675 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
676 default:
677 return 0x00;
681 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
683 struct sock *sk;
684 struct l2cap_disconn_req req;
686 if (!conn)
687 return;
689 sk = chan->sk;
691 if (chan->mode == L2CAP_MODE_ERTM) {
692 __clear_retrans_timer(chan);
693 __clear_monitor_timer(chan);
694 __clear_ack_timer(chan);
697 req.dcid = cpu_to_le16(chan->dcid);
698 req.scid = cpu_to_le16(chan->scid);
699 l2cap_send_cmd(conn, l2cap_get_ident(conn),
700 L2CAP_DISCONN_REQ, sizeof(req), &req);
702 l2cap_state_change(chan, BT_DISCONN);
703 sk->sk_err = err;
706 /* ---- L2CAP connections ---- */
707 static void l2cap_conn_start(struct l2cap_conn *conn)
709 struct l2cap_chan *chan, *tmp;
711 BT_DBG("conn %p", conn);
713 read_lock(&conn->chan_lock);
715 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
716 struct sock *sk = chan->sk;
718 bh_lock_sock(sk);
720 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
721 bh_unlock_sock(sk);
722 continue;
725 if (chan->state == BT_CONNECT) {
726 struct l2cap_conn_req req;
728 if (!l2cap_check_security(chan) ||
729 !__l2cap_no_conn_pending(chan)) {
730 bh_unlock_sock(sk);
731 continue;
734 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
735 && test_bit(CONF_STATE2_DEVICE,
736 &chan->conf_state)) {
737 /* l2cap_chan_close() calls list_del(chan)
738 * so release the lock */
739 read_unlock(&conn->chan_lock);
740 l2cap_chan_close(chan, ECONNRESET);
741 read_lock(&conn->chan_lock);
742 bh_unlock_sock(sk);
743 continue;
746 req.scid = cpu_to_le16(chan->scid);
747 req.psm = chan->psm;
749 chan->ident = l2cap_get_ident(conn);
750 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
752 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
753 sizeof(req), &req);
755 } else if (chan->state == BT_CONNECT2) {
756 struct l2cap_conn_rsp rsp;
757 char buf[128];
758 rsp.scid = cpu_to_le16(chan->dcid);
759 rsp.dcid = cpu_to_le16(chan->scid);
761 if (l2cap_check_security(chan)) {
762 if (bt_sk(sk)->defer_setup) {
763 struct sock *parent = bt_sk(sk)->parent;
764 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
765 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
766 parent->sk_data_ready(parent, 0);
768 } else {
769 l2cap_state_change(chan, BT_CONFIG);
770 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
771 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
773 } else {
774 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
775 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
778 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
779 sizeof(rsp), &rsp);
781 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
782 rsp.result != L2CAP_CR_SUCCESS) {
783 bh_unlock_sock(sk);
784 continue;
787 set_bit(CONF_REQ_SENT, &chan->conf_state);
788 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
789 l2cap_build_conf_req(chan, buf), buf);
790 chan->num_conf_req++;
793 bh_unlock_sock(sk);
796 read_unlock(&conn->chan_lock);
799 /* Find socket with cid and source bdaddr.
800 * Returns closest match, locked.
802 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
804 struct l2cap_chan *c, *c1 = NULL;
806 read_lock(&chan_list_lock);
808 list_for_each_entry(c, &chan_list, global_l) {
809 struct sock *sk = c->sk;
811 if (state && c->state != state)
812 continue;
814 if (c->scid == cid) {
815 /* Exact match. */
816 if (!bacmp(&bt_sk(sk)->src, src)) {
817 read_unlock(&chan_list_lock);
818 return c;
821 /* Closest match */
822 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
823 c1 = c;
827 read_unlock(&chan_list_lock);
829 return c1;
832 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
834 struct sock *parent, *sk;
835 struct l2cap_chan *chan, *pchan;
837 BT_DBG("");
839 /* Check if we have socket listening on cid */
840 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
841 conn->src);
842 if (!pchan)
843 return;
845 parent = pchan->sk;
847 bh_lock_sock(parent);
849 /* Check for backlog size */
850 if (sk_acceptq_is_full(parent)) {
851 BT_DBG("backlog full %d", parent->sk_ack_backlog);
852 goto clean;
855 chan = pchan->ops->new_connection(pchan->data);
856 if (!chan)
857 goto clean;
859 sk = chan->sk;
861 write_lock_bh(&conn->chan_lock);
863 hci_conn_hold(conn->hcon);
865 bacpy(&bt_sk(sk)->src, conn->src);
866 bacpy(&bt_sk(sk)->dst, conn->dst);
868 bt_accept_enqueue(parent, sk);
870 __l2cap_chan_add(conn, chan);
872 __set_chan_timer(chan, sk->sk_sndtimeo);
874 l2cap_state_change(chan, BT_CONNECTED);
875 parent->sk_data_ready(parent, 0);
877 write_unlock_bh(&conn->chan_lock);
879 clean:
880 bh_unlock_sock(parent);
883 static void l2cap_chan_ready(struct sock *sk)
885 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
886 struct sock *parent = bt_sk(sk)->parent;
888 BT_DBG("sk %p, parent %p", sk, parent);
890 chan->conf_state = 0;
891 __clear_chan_timer(chan);
893 l2cap_state_change(chan, BT_CONNECTED);
894 sk->sk_state_change(sk);
896 if (parent)
897 parent->sk_data_ready(parent, 0);
900 static void l2cap_conn_ready(struct l2cap_conn *conn)
902 struct l2cap_chan *chan;
904 BT_DBG("conn %p", conn);
906 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
907 l2cap_le_conn_ready(conn);
909 read_lock(&conn->chan_lock);
911 list_for_each_entry(chan, &conn->chan_l, list) {
912 struct sock *sk = chan->sk;
914 bh_lock_sock(sk);
916 if (conn->hcon->type == LE_LINK) {
917 if (smp_conn_security(conn, chan->sec_level))
918 l2cap_chan_ready(sk);
920 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
921 __clear_chan_timer(chan);
922 l2cap_state_change(chan, BT_CONNECTED);
923 sk->sk_state_change(sk);
925 } else if (chan->state == BT_CONNECT)
926 l2cap_do_start(chan);
928 bh_unlock_sock(sk);
931 read_unlock(&conn->chan_lock);
934 /* Notify sockets that we cannot guaranty reliability anymore */
935 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
937 struct l2cap_chan *chan;
939 BT_DBG("conn %p", conn);
941 read_lock(&conn->chan_lock);
943 list_for_each_entry(chan, &conn->chan_l, list) {
944 struct sock *sk = chan->sk;
946 if (chan->force_reliable)
947 sk->sk_err = err;
950 read_unlock(&conn->chan_lock);
953 static void l2cap_info_timeout(unsigned long arg)
955 struct l2cap_conn *conn = (void *) arg;
957 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
958 conn->info_ident = 0;
960 l2cap_conn_start(conn);
963 static void l2cap_conn_del(struct hci_conn *hcon, int err)
965 struct l2cap_conn *conn = hcon->l2cap_data;
966 struct l2cap_chan *chan, *l;
967 struct sock *sk;
969 if (!conn)
970 return;
972 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
974 kfree_skb(conn->rx_skb);
976 /* Kill channels */
977 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
978 sk = chan->sk;
979 bh_lock_sock(sk);
980 l2cap_chan_del(chan, err);
981 bh_unlock_sock(sk);
982 chan->ops->close(chan->data);
985 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
986 del_timer_sync(&conn->info_timer);
988 if (test_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend))
989 del_timer(&conn->security_timer);
991 hcon->l2cap_data = NULL;
992 kfree(conn);
995 static void security_timeout(unsigned long arg)
997 struct l2cap_conn *conn = (void *) arg;
999 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1002 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1004 struct l2cap_conn *conn = hcon->l2cap_data;
1006 if (conn || status)
1007 return conn;
1009 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1010 if (!conn)
1011 return NULL;
1013 hcon->l2cap_data = conn;
1014 conn->hcon = hcon;
1016 BT_DBG("hcon %p conn %p", hcon, conn);
1018 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1019 conn->mtu = hcon->hdev->le_mtu;
1020 else
1021 conn->mtu = hcon->hdev->acl_mtu;
1023 conn->src = &hcon->hdev->bdaddr;
1024 conn->dst = &hcon->dst;
1026 conn->feat_mask = 0;
1028 spin_lock_init(&conn->lock);
1029 rwlock_init(&conn->chan_lock);
1031 INIT_LIST_HEAD(&conn->chan_l);
1033 if (hcon->type == LE_LINK)
1034 setup_timer(&conn->security_timer, security_timeout,
1035 (unsigned long) conn);
1036 else
1037 setup_timer(&conn->info_timer, l2cap_info_timeout,
1038 (unsigned long) conn);
1040 conn->disc_reason = 0x13;
1042 return conn;
1045 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1047 write_lock_bh(&conn->chan_lock);
1048 __l2cap_chan_add(conn, chan);
1049 write_unlock_bh(&conn->chan_lock);
1052 /* ---- Socket interface ---- */
1054 /* Find socket with psm and source bdaddr.
1055 * Returns closest match.
1057 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1059 struct l2cap_chan *c, *c1 = NULL;
1061 read_lock(&chan_list_lock);
1063 list_for_each_entry(c, &chan_list, global_l) {
1064 struct sock *sk = c->sk;
1066 if (state && c->state != state)
1067 continue;
1069 if (c->psm == psm) {
1070 /* Exact match. */
1071 if (!bacmp(&bt_sk(sk)->src, src)) {
1072 read_unlock(&chan_list_lock);
1073 return c;
1076 /* Closest match */
1077 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1078 c1 = c;
1082 read_unlock(&chan_list_lock);
1084 return c1;
1087 int l2cap_chan_connect(struct l2cap_chan *chan)
1089 struct sock *sk = chan->sk;
1090 bdaddr_t *src = &bt_sk(sk)->src;
1091 bdaddr_t *dst = &bt_sk(sk)->dst;
1092 struct l2cap_conn *conn;
1093 struct hci_conn *hcon;
1094 struct hci_dev *hdev;
1095 __u8 auth_type;
1096 int err;
1098 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1099 chan->psm);
1101 hdev = hci_get_route(dst, src);
1102 if (!hdev)
1103 return -EHOSTUNREACH;
1105 hci_dev_lock_bh(hdev);
1107 auth_type = l2cap_get_auth_type(chan);
1109 if (chan->dcid == L2CAP_CID_LE_DATA)
1110 hcon = hci_connect(hdev, LE_LINK, dst,
1111 chan->sec_level, auth_type);
1112 else
1113 hcon = hci_connect(hdev, ACL_LINK, dst,
1114 chan->sec_level, auth_type);
1116 if (IS_ERR(hcon)) {
1117 err = PTR_ERR(hcon);
1118 goto done;
1121 conn = l2cap_conn_add(hcon, 0);
1122 if (!conn) {
1123 hci_conn_put(hcon);
1124 err = -ENOMEM;
1125 goto done;
1128 /* Update source addr of the socket */
1129 bacpy(src, conn->src);
1131 l2cap_chan_add(conn, chan);
1133 l2cap_state_change(chan, BT_CONNECT);
1134 __set_chan_timer(chan, sk->sk_sndtimeo);
1136 if (hcon->state == BT_CONNECTED) {
1137 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1138 __clear_chan_timer(chan);
1139 if (l2cap_check_security(chan))
1140 l2cap_state_change(chan, BT_CONNECTED);
1141 } else
1142 l2cap_do_start(chan);
1145 err = 0;
1147 done:
1148 hci_dev_unlock_bh(hdev);
1149 hci_dev_put(hdev);
1150 return err;
1153 int __l2cap_wait_ack(struct sock *sk)
1155 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1156 DECLARE_WAITQUEUE(wait, current);
1157 int err = 0;
1158 int timeo = HZ/5;
1160 add_wait_queue(sk_sleep(sk), &wait);
1161 while ((chan->unacked_frames > 0 && chan->conn)) {
1162 set_current_state(TASK_INTERRUPTIBLE);
1164 if (!timeo)
1165 timeo = HZ/5;
1167 if (signal_pending(current)) {
1168 err = sock_intr_errno(timeo);
1169 break;
1172 release_sock(sk);
1173 timeo = schedule_timeout(timeo);
1174 lock_sock(sk);
1176 err = sock_error(sk);
1177 if (err)
1178 break;
1180 set_current_state(TASK_RUNNING);
1181 remove_wait_queue(sk_sleep(sk), &wait);
1182 return err;
1185 static void l2cap_monitor_timeout(unsigned long arg)
1187 struct l2cap_chan *chan = (void *) arg;
1188 struct sock *sk = chan->sk;
1190 BT_DBG("chan %p", chan);
1192 bh_lock_sock(sk);
1193 if (chan->retry_count >= chan->remote_max_tx) {
1194 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1195 bh_unlock_sock(sk);
1196 return;
1199 chan->retry_count++;
1200 __set_monitor_timer(chan);
1202 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1203 bh_unlock_sock(sk);
1206 static void l2cap_retrans_timeout(unsigned long arg)
1208 struct l2cap_chan *chan = (void *) arg;
1209 struct sock *sk = chan->sk;
1211 BT_DBG("chan %p", chan);
1213 bh_lock_sock(sk);
1214 chan->retry_count = 1;
1215 __set_monitor_timer(chan);
1217 set_bit(CONN_WAIT_F, &chan->conn_state);
1219 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1220 bh_unlock_sock(sk);
1223 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1225 struct sk_buff *skb;
1227 while ((skb = skb_peek(&chan->tx_q)) &&
1228 chan->unacked_frames) {
1229 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1230 break;
1232 skb = skb_dequeue(&chan->tx_q);
1233 kfree_skb(skb);
1235 chan->unacked_frames--;
1238 if (!chan->unacked_frames)
1239 __clear_retrans_timer(chan);
1242 void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1244 struct hci_conn *hcon = chan->conn->hcon;
1245 u16 flags;
1247 BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
1249 if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
1250 flags = ACL_START_NO_FLUSH;
1251 else
1252 flags = ACL_START;
1254 bt_cb(skb)->force_active = chan->force_active;
1255 hci_send_acl(hcon, skb, flags);
1258 void l2cap_streaming_send(struct l2cap_chan *chan)
1260 struct sk_buff *skb;
1261 u16 control, fcs;
1263 while ((skb = skb_dequeue(&chan->tx_q))) {
1264 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1265 control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1266 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1268 if (chan->fcs == L2CAP_FCS_CRC16) {
1269 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1270 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1273 l2cap_do_send(chan, skb);
1275 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1279 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1281 struct sk_buff *skb, *tx_skb;
1282 u16 control, fcs;
1284 skb = skb_peek(&chan->tx_q);
1285 if (!skb)
1286 return;
1288 do {
1289 if (bt_cb(skb)->tx_seq == tx_seq)
1290 break;
1292 if (skb_queue_is_last(&chan->tx_q, skb))
1293 return;
1295 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1297 if (chan->remote_max_tx &&
1298 bt_cb(skb)->retries == chan->remote_max_tx) {
1299 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1300 return;
1303 tx_skb = skb_clone(skb, GFP_ATOMIC);
1304 bt_cb(skb)->retries++;
1305 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1306 control &= L2CAP_CTRL_SAR;
1308 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1309 control |= L2CAP_CTRL_FINAL;
1311 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1312 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1314 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1316 if (chan->fcs == L2CAP_FCS_CRC16) {
1317 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1318 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1321 l2cap_do_send(chan, tx_skb);
1324 int l2cap_ertm_send(struct l2cap_chan *chan)
1326 struct sk_buff *skb, *tx_skb;
1327 u16 control, fcs;
1328 int nsent = 0;
1330 if (chan->state != BT_CONNECTED)
1331 return -ENOTCONN;
1333 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1335 if (chan->remote_max_tx &&
1336 bt_cb(skb)->retries == chan->remote_max_tx) {
1337 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1338 break;
1341 tx_skb = skb_clone(skb, GFP_ATOMIC);
1343 bt_cb(skb)->retries++;
1345 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1346 control &= L2CAP_CTRL_SAR;
1348 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1349 control |= L2CAP_CTRL_FINAL;
1351 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1352 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1353 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1356 if (chan->fcs == L2CAP_FCS_CRC16) {
1357 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1358 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1361 l2cap_do_send(chan, tx_skb);
1363 __set_retrans_timer(chan);
1365 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1366 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1368 if (bt_cb(skb)->retries == 1)
1369 chan->unacked_frames++;
1371 chan->frames_sent++;
1373 if (skb_queue_is_last(&chan->tx_q, skb))
1374 chan->tx_send_head = NULL;
1375 else
1376 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1378 nsent++;
1381 return nsent;
1384 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1386 int ret;
1388 if (!skb_queue_empty(&chan->tx_q))
1389 chan->tx_send_head = chan->tx_q.next;
1391 chan->next_tx_seq = chan->expected_ack_seq;
1392 ret = l2cap_ertm_send(chan);
1393 return ret;
1396 static void l2cap_send_ack(struct l2cap_chan *chan)
1398 u16 control = 0;
1400 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1402 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1403 control |= L2CAP_SUPER_RCV_NOT_READY;
1404 set_bit(CONN_RNR_SENT, &chan->conn_state);
1405 l2cap_send_sframe(chan, control);
1406 return;
1409 if (l2cap_ertm_send(chan) > 0)
1410 return;
1412 control |= L2CAP_SUPER_RCV_READY;
1413 l2cap_send_sframe(chan, control);
1416 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1418 struct srej_list *tail;
1419 u16 control;
1421 control = L2CAP_SUPER_SELECT_REJECT;
1422 control |= L2CAP_CTRL_FINAL;
1424 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1425 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1427 l2cap_send_sframe(chan, control);
1430 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1432 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1433 struct sk_buff **frag;
1434 int err, sent = 0;
1436 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1437 return -EFAULT;
1439 sent += count;
1440 len -= count;
1442 /* Continuation fragments (no L2CAP header) */
1443 frag = &skb_shinfo(skb)->frag_list;
1444 while (len) {
1445 count = min_t(unsigned int, conn->mtu, len);
1447 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1448 if (!*frag)
1449 return err;
1450 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1451 return -EFAULT;
1453 sent += count;
1454 len -= count;
1456 frag = &(*frag)->next;
1459 return sent;
1462 struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1464 struct sock *sk = chan->sk;
1465 struct l2cap_conn *conn = chan->conn;
1466 struct sk_buff *skb;
1467 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1468 struct l2cap_hdr *lh;
1470 BT_DBG("sk %p len %d", sk, (int)len);
1472 count = min_t(unsigned int, (conn->mtu - hlen), len);
1473 skb = bt_skb_send_alloc(sk, count + hlen,
1474 msg->msg_flags & MSG_DONTWAIT, &err);
1475 if (!skb)
1476 return ERR_PTR(err);
1478 /* Create L2CAP header */
1479 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1480 lh->cid = cpu_to_le16(chan->dcid);
1481 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1482 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1484 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1485 if (unlikely(err < 0)) {
1486 kfree_skb(skb);
1487 return ERR_PTR(err);
1489 return skb;
1492 struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1494 struct sock *sk = chan->sk;
1495 struct l2cap_conn *conn = chan->conn;
1496 struct sk_buff *skb;
1497 int err, count, hlen = L2CAP_HDR_SIZE;
1498 struct l2cap_hdr *lh;
1500 BT_DBG("sk %p len %d", sk, (int)len);
1502 count = min_t(unsigned int, (conn->mtu - hlen), len);
1503 skb = bt_skb_send_alloc(sk, count + hlen,
1504 msg->msg_flags & MSG_DONTWAIT, &err);
1505 if (!skb)
1506 return ERR_PTR(err);
1508 /* Create L2CAP header */
1509 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1510 lh->cid = cpu_to_le16(chan->dcid);
1511 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1513 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1514 if (unlikely(err < 0)) {
1515 kfree_skb(skb);
1516 return ERR_PTR(err);
1518 return skb;
1521 struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1523 struct sock *sk = chan->sk;
1524 struct l2cap_conn *conn = chan->conn;
1525 struct sk_buff *skb;
1526 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1527 struct l2cap_hdr *lh;
1529 BT_DBG("sk %p len %d", sk, (int)len);
1531 if (!conn)
1532 return ERR_PTR(-ENOTCONN);
1534 if (sdulen)
1535 hlen += 2;
1537 if (chan->fcs == L2CAP_FCS_CRC16)
1538 hlen += 2;
1540 count = min_t(unsigned int, (conn->mtu - hlen), len);
1541 skb = bt_skb_send_alloc(sk, count + hlen,
1542 msg->msg_flags & MSG_DONTWAIT, &err);
1543 if (!skb)
1544 return ERR_PTR(err);
1546 /* Create L2CAP header */
1547 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1548 lh->cid = cpu_to_le16(chan->dcid);
1549 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1550 put_unaligned_le16(control, skb_put(skb, 2));
1551 if (sdulen)
1552 put_unaligned_le16(sdulen, skb_put(skb, 2));
1554 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1555 if (unlikely(err < 0)) {
1556 kfree_skb(skb);
1557 return ERR_PTR(err);
1560 if (chan->fcs == L2CAP_FCS_CRC16)
1561 put_unaligned_le16(0, skb_put(skb, 2));
1563 bt_cb(skb)->retries = 0;
1564 return skb;
1567 int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1569 struct sk_buff *skb;
1570 struct sk_buff_head sar_queue;
1571 u16 control;
1572 size_t size = 0;
1574 skb_queue_head_init(&sar_queue);
1575 control = L2CAP_SDU_START;
1576 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1577 if (IS_ERR(skb))
1578 return PTR_ERR(skb);
1580 __skb_queue_tail(&sar_queue, skb);
1581 len -= chan->remote_mps;
1582 size += chan->remote_mps;
1584 while (len > 0) {
1585 size_t buflen;
1587 if (len > chan->remote_mps) {
1588 control = L2CAP_SDU_CONTINUE;
1589 buflen = chan->remote_mps;
1590 } else {
1591 control = L2CAP_SDU_END;
1592 buflen = len;
1595 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1596 if (IS_ERR(skb)) {
1597 skb_queue_purge(&sar_queue);
1598 return PTR_ERR(skb);
1601 __skb_queue_tail(&sar_queue, skb);
1602 len -= buflen;
1603 size += buflen;
1605 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1606 if (chan->tx_send_head == NULL)
1607 chan->tx_send_head = sar_queue.next;
1609 return size;
1612 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1614 struct sk_buff *skb;
1615 u16 control;
1616 int err;
1618 /* Connectionless channel */
1619 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1620 skb = l2cap_create_connless_pdu(chan, msg, len);
1621 if (IS_ERR(skb))
1622 return PTR_ERR(skb);
1624 l2cap_do_send(chan, skb);
1625 return len;
1628 switch (chan->mode) {
1629 case L2CAP_MODE_BASIC:
1630 /* Check outgoing MTU */
1631 if (len > chan->omtu)
1632 return -EMSGSIZE;
1634 /* Create a basic PDU */
1635 skb = l2cap_create_basic_pdu(chan, msg, len);
1636 if (IS_ERR(skb))
1637 return PTR_ERR(skb);
1639 l2cap_do_send(chan, skb);
1640 err = len;
1641 break;
1643 case L2CAP_MODE_ERTM:
1644 case L2CAP_MODE_STREAMING:
1645 /* Entire SDU fits into one PDU */
1646 if (len <= chan->remote_mps) {
1647 control = L2CAP_SDU_UNSEGMENTED;
1648 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1650 if (IS_ERR(skb))
1651 return PTR_ERR(skb);
1653 __skb_queue_tail(&chan->tx_q, skb);
1655 if (chan->tx_send_head == NULL)
1656 chan->tx_send_head = skb;
1658 } else {
1659 /* Segment SDU into multiples PDUs */
1660 err = l2cap_sar_segment_sdu(chan, msg, len);
1661 if (err < 0)
1662 return err;
1665 if (chan->mode == L2CAP_MODE_STREAMING) {
1666 l2cap_streaming_send(chan);
1667 err = len;
1668 break;
1671 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1672 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1673 err = len;
1674 break;
1677 err = l2cap_ertm_send(chan);
1678 if (err >= 0)
1679 err = len;
1681 break;
1683 default:
1684 BT_DBG("bad state %1.1x", chan->mode);
1685 err = -EBADFD;
1688 return err;
1691 /* Copy frame to all raw sockets on that connection */
1692 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1694 struct sk_buff *nskb;
1695 struct l2cap_chan *chan;
1697 BT_DBG("conn %p", conn);
1699 read_lock(&conn->chan_lock);
1700 list_for_each_entry(chan, &conn->chan_l, list) {
1701 struct sock *sk = chan->sk;
1702 if (chan->chan_type != L2CAP_CHAN_RAW)
1703 continue;
1705 /* Don't send frame to the socket it came from */
1706 if (skb->sk == sk)
1707 continue;
1708 nskb = skb_clone(skb, GFP_ATOMIC);
1709 if (!nskb)
1710 continue;
1712 if (chan->ops->recv(chan->data, nskb))
1713 kfree_skb(nskb);
1715 read_unlock(&conn->chan_lock);
1718 /* ---- L2CAP signalling commands ---- */
1719 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1720 u8 code, u8 ident, u16 dlen, void *data)
1722 struct sk_buff *skb, **frag;
1723 struct l2cap_cmd_hdr *cmd;
1724 struct l2cap_hdr *lh;
1725 int len, count;
1727 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1728 conn, code, ident, dlen);
1730 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1731 count = min_t(unsigned int, conn->mtu, len);
1733 skb = bt_skb_alloc(count, GFP_ATOMIC);
1734 if (!skb)
1735 return NULL;
1737 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1738 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1740 if (conn->hcon->type == LE_LINK)
1741 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1742 else
1743 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1745 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1746 cmd->code = code;
1747 cmd->ident = ident;
1748 cmd->len = cpu_to_le16(dlen);
1750 if (dlen) {
1751 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1752 memcpy(skb_put(skb, count), data, count);
1753 data += count;
1756 len -= skb->len;
1758 /* Continuation fragments (no L2CAP header) */
1759 frag = &skb_shinfo(skb)->frag_list;
1760 while (len) {
1761 count = min_t(unsigned int, conn->mtu, len);
1763 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1764 if (!*frag)
1765 goto fail;
1767 memcpy(skb_put(*frag, count), data, count);
1769 len -= count;
1770 data += count;
1772 frag = &(*frag)->next;
1775 return skb;
1777 fail:
1778 kfree_skb(skb);
1779 return NULL;
1782 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1784 struct l2cap_conf_opt *opt = *ptr;
1785 int len;
1787 len = L2CAP_CONF_OPT_SIZE + opt->len;
1788 *ptr += len;
1790 *type = opt->type;
1791 *olen = opt->len;
1793 switch (opt->len) {
1794 case 1:
1795 *val = *((u8 *) opt->val);
1796 break;
1798 case 2:
1799 *val = get_unaligned_le16(opt->val);
1800 break;
1802 case 4:
1803 *val = get_unaligned_le32(opt->val);
1804 break;
1806 default:
1807 *val = (unsigned long) opt->val;
1808 break;
1811 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1812 return len;
1815 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1817 struct l2cap_conf_opt *opt = *ptr;
1819 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1821 opt->type = type;
1822 opt->len = len;
1824 switch (len) {
1825 case 1:
1826 *((u8 *) opt->val) = val;
1827 break;
1829 case 2:
1830 put_unaligned_le16(val, opt->val);
1831 break;
1833 case 4:
1834 put_unaligned_le32(val, opt->val);
1835 break;
1837 default:
1838 memcpy(opt->val, (void *) val, len);
1839 break;
1842 *ptr += L2CAP_CONF_OPT_SIZE + len;
1845 static void l2cap_ack_timeout(unsigned long arg)
1847 struct l2cap_chan *chan = (void *) arg;
1849 bh_lock_sock(chan->sk);
1850 l2cap_send_ack(chan);
1851 bh_unlock_sock(chan->sk);
1854 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1856 struct sock *sk = chan->sk;
1858 chan->expected_ack_seq = 0;
1859 chan->unacked_frames = 0;
1860 chan->buffer_seq = 0;
1861 chan->num_acked = 0;
1862 chan->frames_sent = 0;
1864 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1865 (unsigned long) chan);
1866 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1867 (unsigned long) chan);
1868 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1870 skb_queue_head_init(&chan->srej_q);
1872 INIT_LIST_HEAD(&chan->srej_l);
1875 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1878 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1880 switch (mode) {
1881 case L2CAP_MODE_STREAMING:
1882 case L2CAP_MODE_ERTM:
1883 if (l2cap_mode_supported(mode, remote_feat_mask))
1884 return mode;
1885 /* fall through */
1886 default:
1887 return L2CAP_MODE_BASIC;
1891 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1893 struct l2cap_conf_req *req = data;
1894 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1895 void *ptr = req->data;
1897 BT_DBG("chan %p", chan);
1899 if (chan->num_conf_req || chan->num_conf_rsp)
1900 goto done;
1902 switch (chan->mode) {
1903 case L2CAP_MODE_STREAMING:
1904 case L2CAP_MODE_ERTM:
1905 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
1906 break;
1908 /* fall through */
1909 default:
1910 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
1911 break;
1914 done:
1915 if (chan->imtu != L2CAP_DEFAULT_MTU)
1916 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1918 switch (chan->mode) {
1919 case L2CAP_MODE_BASIC:
1920 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1921 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
1922 break;
1924 rfc.mode = L2CAP_MODE_BASIC;
1925 rfc.txwin_size = 0;
1926 rfc.max_transmit = 0;
1927 rfc.retrans_timeout = 0;
1928 rfc.monitor_timeout = 0;
1929 rfc.max_pdu_size = 0;
1931 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1932 (unsigned long) &rfc);
1933 break;
1935 case L2CAP_MODE_ERTM:
1936 rfc.mode = L2CAP_MODE_ERTM;
1937 rfc.txwin_size = chan->tx_win;
1938 rfc.max_transmit = chan->max_tx;
1939 rfc.retrans_timeout = 0;
1940 rfc.monitor_timeout = 0;
1941 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1942 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1943 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1945 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1946 (unsigned long) &rfc);
1948 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1949 break;
1951 if (chan->fcs == L2CAP_FCS_NONE ||
1952 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1953 chan->fcs = L2CAP_FCS_NONE;
1954 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1956 break;
1958 case L2CAP_MODE_STREAMING:
1959 rfc.mode = L2CAP_MODE_STREAMING;
1960 rfc.txwin_size = 0;
1961 rfc.max_transmit = 0;
1962 rfc.retrans_timeout = 0;
1963 rfc.monitor_timeout = 0;
1964 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1965 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1966 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1968 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1969 (unsigned long) &rfc);
1971 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1972 break;
1974 if (chan->fcs == L2CAP_FCS_NONE ||
1975 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1976 chan->fcs = L2CAP_FCS_NONE;
1977 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1979 break;
1982 req->dcid = cpu_to_le16(chan->dcid);
1983 req->flags = cpu_to_le16(0);
1985 return ptr - data;
1988 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1990 struct l2cap_conf_rsp *rsp = data;
1991 void *ptr = rsp->data;
1992 void *req = chan->conf_req;
1993 int len = chan->conf_len;
1994 int type, hint, olen;
1995 unsigned long val;
1996 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1997 u16 mtu = L2CAP_DEFAULT_MTU;
1998 u16 result = L2CAP_CONF_SUCCESS;
2000 BT_DBG("chan %p", chan);
2002 while (len >= L2CAP_CONF_OPT_SIZE) {
2003 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2005 hint = type & L2CAP_CONF_HINT;
2006 type &= L2CAP_CONF_MASK;
2008 switch (type) {
2009 case L2CAP_CONF_MTU:
2010 mtu = val;
2011 break;
2013 case L2CAP_CONF_FLUSH_TO:
2014 chan->flush_to = val;
2015 break;
2017 case L2CAP_CONF_QOS:
2018 break;
2020 case L2CAP_CONF_RFC:
2021 if (olen == sizeof(rfc))
2022 memcpy(&rfc, (void *) val, olen);
2023 break;
2025 case L2CAP_CONF_FCS:
2026 if (val == L2CAP_FCS_NONE)
2027 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2029 break;
2031 default:
2032 if (hint)
2033 break;
2035 result = L2CAP_CONF_UNKNOWN;
2036 *((u8 *) ptr++) = type;
2037 break;
2041 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2042 goto done;
2044 switch (chan->mode) {
2045 case L2CAP_MODE_STREAMING:
2046 case L2CAP_MODE_ERTM:
2047 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2048 chan->mode = l2cap_select_mode(rfc.mode,
2049 chan->conn->feat_mask);
2050 break;
2053 if (chan->mode != rfc.mode)
2054 return -ECONNREFUSED;
2056 break;
2059 done:
2060 if (chan->mode != rfc.mode) {
2061 result = L2CAP_CONF_UNACCEPT;
2062 rfc.mode = chan->mode;
2064 if (chan->num_conf_rsp == 1)
2065 return -ECONNREFUSED;
2067 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2068 sizeof(rfc), (unsigned long) &rfc);
2072 if (result == L2CAP_CONF_SUCCESS) {
2073 /* Configure output options and let the other side know
2074 * which ones we don't like. */
2076 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2077 result = L2CAP_CONF_UNACCEPT;
2078 else {
2079 chan->omtu = mtu;
2080 set_bit(CONF_MTU_DONE, &chan->conf_state);
2082 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2084 switch (rfc.mode) {
2085 case L2CAP_MODE_BASIC:
2086 chan->fcs = L2CAP_FCS_NONE;
2087 set_bit(CONF_MODE_DONE, &chan->conf_state);
2088 break;
2090 case L2CAP_MODE_ERTM:
2091 chan->remote_tx_win = rfc.txwin_size;
2092 chan->remote_max_tx = rfc.max_transmit;
2094 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2095 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2097 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2099 rfc.retrans_timeout =
2100 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2101 rfc.monitor_timeout =
2102 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2104 set_bit(CONF_MODE_DONE, &chan->conf_state);
2106 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2107 sizeof(rfc), (unsigned long) &rfc);
2109 break;
2111 case L2CAP_MODE_STREAMING:
2112 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2113 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2115 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2117 set_bit(CONF_MODE_DONE, &chan->conf_state);
2119 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2120 sizeof(rfc), (unsigned long) &rfc);
2122 break;
2124 default:
2125 result = L2CAP_CONF_UNACCEPT;
2127 memset(&rfc, 0, sizeof(rfc));
2128 rfc.mode = chan->mode;
2131 if (result == L2CAP_CONF_SUCCESS)
2132 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2134 rsp->scid = cpu_to_le16(chan->dcid);
2135 rsp->result = cpu_to_le16(result);
2136 rsp->flags = cpu_to_le16(0x0000);
2138 return ptr - data;
2141 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2143 struct l2cap_conf_req *req = data;
2144 void *ptr = req->data;
2145 int type, olen;
2146 unsigned long val;
2147 struct l2cap_conf_rfc rfc;
2149 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2151 while (len >= L2CAP_CONF_OPT_SIZE) {
2152 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2154 switch (type) {
2155 case L2CAP_CONF_MTU:
2156 if (val < L2CAP_DEFAULT_MIN_MTU) {
2157 *result = L2CAP_CONF_UNACCEPT;
2158 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2159 } else
2160 chan->imtu = val;
2161 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2162 break;
2164 case L2CAP_CONF_FLUSH_TO:
2165 chan->flush_to = val;
2166 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2167 2, chan->flush_to);
2168 break;
2170 case L2CAP_CONF_RFC:
2171 if (olen == sizeof(rfc))
2172 memcpy(&rfc, (void *)val, olen);
2174 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2175 rfc.mode != chan->mode)
2176 return -ECONNREFUSED;
2178 chan->fcs = 0;
2180 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2181 sizeof(rfc), (unsigned long) &rfc);
2182 break;
2186 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2187 return -ECONNREFUSED;
2189 chan->mode = rfc.mode;
2191 if (*result == L2CAP_CONF_SUCCESS) {
2192 switch (rfc.mode) {
2193 case L2CAP_MODE_ERTM:
2194 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2195 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2196 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2197 break;
2198 case L2CAP_MODE_STREAMING:
2199 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2203 req->dcid = cpu_to_le16(chan->dcid);
2204 req->flags = cpu_to_le16(0x0000);
2206 return ptr - data;
2209 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2211 struct l2cap_conf_rsp *rsp = data;
2212 void *ptr = rsp->data;
2214 BT_DBG("chan %p", chan);
2216 rsp->scid = cpu_to_le16(chan->dcid);
2217 rsp->result = cpu_to_le16(result);
2218 rsp->flags = cpu_to_le16(flags);
2220 return ptr - data;
2223 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2225 struct l2cap_conn_rsp rsp;
2226 struct l2cap_conn *conn = chan->conn;
2227 u8 buf[128];
2229 rsp.scid = cpu_to_le16(chan->dcid);
2230 rsp.dcid = cpu_to_le16(chan->scid);
2231 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2232 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2233 l2cap_send_cmd(conn, chan->ident,
2234 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2236 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2237 return;
2239 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2240 l2cap_build_conf_req(chan, buf), buf);
2241 chan->num_conf_req++;
2244 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2246 int type, olen;
2247 unsigned long val;
2248 struct l2cap_conf_rfc rfc;
2250 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2252 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2253 return;
2255 while (len >= L2CAP_CONF_OPT_SIZE) {
2256 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2258 switch (type) {
2259 case L2CAP_CONF_RFC:
2260 if (olen == sizeof(rfc))
2261 memcpy(&rfc, (void *)val, olen);
2262 goto done;
2266 done:
2267 switch (rfc.mode) {
2268 case L2CAP_MODE_ERTM:
2269 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2270 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2271 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2272 break;
2273 case L2CAP_MODE_STREAMING:
2274 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2278 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2280 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2282 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2283 return 0;
2285 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2286 cmd->ident == conn->info_ident) {
2287 del_timer(&conn->info_timer);
2289 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2290 conn->info_ident = 0;
2292 l2cap_conn_start(conn);
2295 return 0;
2298 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2300 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2301 struct l2cap_conn_rsp rsp;
2302 struct l2cap_chan *chan = NULL, *pchan;
2303 struct sock *parent, *sk = NULL;
2304 int result, status = L2CAP_CS_NO_INFO;
2306 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2307 __le16 psm = req->psm;
2309 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2311 /* Check if we have socket listening on psm */
2312 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2313 if (!pchan) {
2314 result = L2CAP_CR_BAD_PSM;
2315 goto sendresp;
2318 parent = pchan->sk;
2320 bh_lock_sock(parent);
2322 /* Check if the ACL is secure enough (if not SDP) */
2323 if (psm != cpu_to_le16(0x0001) &&
2324 !hci_conn_check_link_mode(conn->hcon)) {
2325 conn->disc_reason = 0x05;
2326 result = L2CAP_CR_SEC_BLOCK;
2327 goto response;
2330 result = L2CAP_CR_NO_MEM;
2332 /* Check for backlog size */
2333 if (sk_acceptq_is_full(parent)) {
2334 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2335 goto response;
2338 chan = pchan->ops->new_connection(pchan->data);
2339 if (!chan)
2340 goto response;
2342 sk = chan->sk;
2344 write_lock_bh(&conn->chan_lock);
2346 /* Check if we already have channel with that dcid */
2347 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2348 write_unlock_bh(&conn->chan_lock);
2349 sock_set_flag(sk, SOCK_ZAPPED);
2350 chan->ops->close(chan->data);
2351 goto response;
2354 hci_conn_hold(conn->hcon);
2356 bacpy(&bt_sk(sk)->src, conn->src);
2357 bacpy(&bt_sk(sk)->dst, conn->dst);
2358 chan->psm = psm;
2359 chan->dcid = scid;
2361 bt_accept_enqueue(parent, sk);
2363 __l2cap_chan_add(conn, chan);
2365 dcid = chan->scid;
2367 __set_chan_timer(chan, sk->sk_sndtimeo);
2369 chan->ident = cmd->ident;
2371 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2372 if (l2cap_check_security(chan)) {
2373 if (bt_sk(sk)->defer_setup) {
2374 l2cap_state_change(chan, BT_CONNECT2);
2375 result = L2CAP_CR_PEND;
2376 status = L2CAP_CS_AUTHOR_PEND;
2377 parent->sk_data_ready(parent, 0);
2378 } else {
2379 l2cap_state_change(chan, BT_CONFIG);
2380 result = L2CAP_CR_SUCCESS;
2381 status = L2CAP_CS_NO_INFO;
2383 } else {
2384 l2cap_state_change(chan, BT_CONNECT2);
2385 result = L2CAP_CR_PEND;
2386 status = L2CAP_CS_AUTHEN_PEND;
2388 } else {
2389 l2cap_state_change(chan, BT_CONNECT2);
2390 result = L2CAP_CR_PEND;
2391 status = L2CAP_CS_NO_INFO;
2394 write_unlock_bh(&conn->chan_lock);
2396 response:
2397 bh_unlock_sock(parent);
2399 sendresp:
2400 rsp.scid = cpu_to_le16(scid);
2401 rsp.dcid = cpu_to_le16(dcid);
2402 rsp.result = cpu_to_le16(result);
2403 rsp.status = cpu_to_le16(status);
2404 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2406 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2407 struct l2cap_info_req info;
2408 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2410 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2411 conn->info_ident = l2cap_get_ident(conn);
2413 mod_timer(&conn->info_timer, jiffies +
2414 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2416 l2cap_send_cmd(conn, conn->info_ident,
2417 L2CAP_INFO_REQ, sizeof(info), &info);
2420 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2421 result == L2CAP_CR_SUCCESS) {
2422 u8 buf[128];
2423 set_bit(CONF_REQ_SENT, &chan->conf_state);
2424 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2425 l2cap_build_conf_req(chan, buf), buf);
2426 chan->num_conf_req++;
2429 return 0;
2432 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2434 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2435 u16 scid, dcid, result, status;
2436 struct l2cap_chan *chan;
2437 struct sock *sk;
2438 u8 req[128];
2440 scid = __le16_to_cpu(rsp->scid);
2441 dcid = __le16_to_cpu(rsp->dcid);
2442 result = __le16_to_cpu(rsp->result);
2443 status = __le16_to_cpu(rsp->status);
2445 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2447 if (scid) {
2448 chan = l2cap_get_chan_by_scid(conn, scid);
2449 if (!chan)
2450 return -EFAULT;
2451 } else {
2452 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2453 if (!chan)
2454 return -EFAULT;
2457 sk = chan->sk;
2459 switch (result) {
2460 case L2CAP_CR_SUCCESS:
2461 l2cap_state_change(chan, BT_CONFIG);
2462 chan->ident = 0;
2463 chan->dcid = dcid;
2464 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2466 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2467 break;
2469 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2470 l2cap_build_conf_req(chan, req), req);
2471 chan->num_conf_req++;
2472 break;
2474 case L2CAP_CR_PEND:
2475 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2476 break;
2478 default:
2479 /* don't delete l2cap channel if sk is owned by user */
2480 if (sock_owned_by_user(sk)) {
2481 l2cap_state_change(chan, BT_DISCONN);
2482 __clear_chan_timer(chan);
2483 __set_chan_timer(chan, HZ / 5);
2484 break;
2487 l2cap_chan_del(chan, ECONNREFUSED);
2488 break;
2491 bh_unlock_sock(sk);
2492 return 0;
2495 static inline void set_default_fcs(struct l2cap_chan *chan)
2497 /* FCS is enabled only in ERTM or streaming mode, if one or both
2498 * sides request it.
2500 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2501 chan->fcs = L2CAP_FCS_NONE;
2502 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2503 chan->fcs = L2CAP_FCS_CRC16;
2506 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2508 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2509 u16 dcid, flags;
2510 u8 rsp[64];
2511 struct l2cap_chan *chan;
2512 struct sock *sk;
2513 int len;
2515 dcid = __le16_to_cpu(req->dcid);
2516 flags = __le16_to_cpu(req->flags);
2518 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2520 chan = l2cap_get_chan_by_scid(conn, dcid);
2521 if (!chan)
2522 return -ENOENT;
2524 sk = chan->sk;
2526 if ((bt_sk(sk)->defer_setup && chan->state != BT_CONNECT2) ||
2527 (!bt_sk(sk)->defer_setup && chan->state != BT_CONFIG)) {
2528 struct l2cap_cmd_rej_cid rej;
2530 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2531 rej.scid = cpu_to_le16(chan->scid);
2532 rej.dcid = cpu_to_le16(chan->dcid);
2534 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2535 sizeof(rej), &rej);
2536 goto unlock;
2539 /* Reject if config buffer is too small. */
2540 len = cmd_len - sizeof(*req);
2541 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2542 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2543 l2cap_build_conf_rsp(chan, rsp,
2544 L2CAP_CONF_REJECT, flags), rsp);
2545 goto unlock;
2548 /* Store config. */
2549 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2550 chan->conf_len += len;
2552 if (flags & 0x0001) {
2553 /* Incomplete config. Send empty response. */
2554 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2555 l2cap_build_conf_rsp(chan, rsp,
2556 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2557 goto unlock;
2560 /* Complete config. */
2561 len = l2cap_parse_conf_req(chan, rsp);
2562 if (len < 0) {
2563 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2564 goto unlock;
2567 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2568 chan->num_conf_rsp++;
2570 /* Reset config buffer. */
2571 chan->conf_len = 0;
2573 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2574 goto unlock;
2576 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2577 set_default_fcs(chan);
2579 l2cap_state_change(chan, BT_CONNECTED);
2581 chan->next_tx_seq = 0;
2582 chan->expected_tx_seq = 0;
2583 skb_queue_head_init(&chan->tx_q);
2584 if (chan->mode == L2CAP_MODE_ERTM)
2585 l2cap_ertm_init(chan);
2587 l2cap_chan_ready(sk);
2588 goto unlock;
2591 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2592 u8 buf[64];
2593 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2594 l2cap_build_conf_req(chan, buf), buf);
2595 chan->num_conf_req++;
2598 unlock:
2599 bh_unlock_sock(sk);
2600 return 0;
2603 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2605 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2606 u16 scid, flags, result;
2607 struct l2cap_chan *chan;
2608 struct sock *sk;
2609 int len = cmd->len - sizeof(*rsp);
2611 scid = __le16_to_cpu(rsp->scid);
2612 flags = __le16_to_cpu(rsp->flags);
2613 result = __le16_to_cpu(rsp->result);
2615 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2616 scid, flags, result);
2618 chan = l2cap_get_chan_by_scid(conn, scid);
2619 if (!chan)
2620 return 0;
2622 sk = chan->sk;
2624 switch (result) {
2625 case L2CAP_CONF_SUCCESS:
2626 l2cap_conf_rfc_get(chan, rsp->data, len);
2627 break;
2629 case L2CAP_CONF_UNACCEPT:
2630 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2631 char req[64];
2633 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2634 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2635 goto done;
2638 /* throw out any old stored conf requests */
2639 result = L2CAP_CONF_SUCCESS;
2640 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2641 req, &result);
2642 if (len < 0) {
2643 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2644 goto done;
2647 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2648 L2CAP_CONF_REQ, len, req);
2649 chan->num_conf_req++;
2650 if (result != L2CAP_CONF_SUCCESS)
2651 goto done;
2652 break;
2655 default:
2656 sk->sk_err = ECONNRESET;
2657 __set_chan_timer(chan, HZ * 5);
2658 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2659 goto done;
2662 if (flags & 0x01)
2663 goto done;
2665 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2667 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2668 set_default_fcs(chan);
2670 l2cap_state_change(chan, BT_CONNECTED);
2671 chan->next_tx_seq = 0;
2672 chan->expected_tx_seq = 0;
2673 skb_queue_head_init(&chan->tx_q);
2674 if (chan->mode == L2CAP_MODE_ERTM)
2675 l2cap_ertm_init(chan);
2677 l2cap_chan_ready(sk);
2680 done:
2681 bh_unlock_sock(sk);
2682 return 0;
2685 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2687 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2688 struct l2cap_disconn_rsp rsp;
2689 u16 dcid, scid;
2690 struct l2cap_chan *chan;
2691 struct sock *sk;
2693 scid = __le16_to_cpu(req->scid);
2694 dcid = __le16_to_cpu(req->dcid);
2696 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2698 chan = l2cap_get_chan_by_scid(conn, dcid);
2699 if (!chan)
2700 return 0;
2702 sk = chan->sk;
2704 rsp.dcid = cpu_to_le16(chan->scid);
2705 rsp.scid = cpu_to_le16(chan->dcid);
2706 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2708 sk->sk_shutdown = SHUTDOWN_MASK;
2710 /* don't delete l2cap channel if sk is owned by user */
2711 if (sock_owned_by_user(sk)) {
2712 l2cap_state_change(chan, BT_DISCONN);
2713 __clear_chan_timer(chan);
2714 __set_chan_timer(chan, HZ / 5);
2715 bh_unlock_sock(sk);
2716 return 0;
2719 l2cap_chan_del(chan, ECONNRESET);
2720 bh_unlock_sock(sk);
2722 chan->ops->close(chan->data);
2723 return 0;
2726 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2728 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2729 u16 dcid, scid;
2730 struct l2cap_chan *chan;
2731 struct sock *sk;
2733 scid = __le16_to_cpu(rsp->scid);
2734 dcid = __le16_to_cpu(rsp->dcid);
2736 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2738 chan = l2cap_get_chan_by_scid(conn, scid);
2739 if (!chan)
2740 return 0;
2742 sk = chan->sk;
2744 /* don't delete l2cap channel if sk is owned by user */
2745 if (sock_owned_by_user(sk)) {
2746 l2cap_state_change(chan,BT_DISCONN);
2747 __clear_chan_timer(chan);
2748 __set_chan_timer(chan, HZ / 5);
2749 bh_unlock_sock(sk);
2750 return 0;
2753 l2cap_chan_del(chan, 0);
2754 bh_unlock_sock(sk);
2756 chan->ops->close(chan->data);
2757 return 0;
2760 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2762 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2763 u16 type;
2765 type = __le16_to_cpu(req->type);
2767 BT_DBG("type 0x%4.4x", type);
2769 if (type == L2CAP_IT_FEAT_MASK) {
2770 u8 buf[8];
2771 u32 feat_mask = l2cap_feat_mask;
2772 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2773 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2774 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2775 if (!disable_ertm)
2776 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2777 | L2CAP_FEAT_FCS;
2778 put_unaligned_le32(feat_mask, rsp->data);
2779 l2cap_send_cmd(conn, cmd->ident,
2780 L2CAP_INFO_RSP, sizeof(buf), buf);
2781 } else if (type == L2CAP_IT_FIXED_CHAN) {
2782 u8 buf[12];
2783 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2784 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2785 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2786 memcpy(buf + 4, l2cap_fixed_chan, 8);
2787 l2cap_send_cmd(conn, cmd->ident,
2788 L2CAP_INFO_RSP, sizeof(buf), buf);
2789 } else {
2790 struct l2cap_info_rsp rsp;
2791 rsp.type = cpu_to_le16(type);
2792 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2793 l2cap_send_cmd(conn, cmd->ident,
2794 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2797 return 0;
2800 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2802 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2803 u16 type, result;
2805 type = __le16_to_cpu(rsp->type);
2806 result = __le16_to_cpu(rsp->result);
2808 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2810 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2811 if (cmd->ident != conn->info_ident ||
2812 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2813 return 0;
2815 del_timer(&conn->info_timer);
2817 if (result != L2CAP_IR_SUCCESS) {
2818 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2819 conn->info_ident = 0;
2821 l2cap_conn_start(conn);
2823 return 0;
2826 if (type == L2CAP_IT_FEAT_MASK) {
2827 conn->feat_mask = get_unaligned_le32(rsp->data);
2829 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2830 struct l2cap_info_req req;
2831 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2833 conn->info_ident = l2cap_get_ident(conn);
2835 l2cap_send_cmd(conn, conn->info_ident,
2836 L2CAP_INFO_REQ, sizeof(req), &req);
2837 } else {
2838 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2839 conn->info_ident = 0;
2841 l2cap_conn_start(conn);
2843 } else if (type == L2CAP_IT_FIXED_CHAN) {
2844 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2845 conn->info_ident = 0;
2847 l2cap_conn_start(conn);
2850 return 0;
2853 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2854 u16 to_multiplier)
2856 u16 max_latency;
2858 if (min > max || min < 6 || max > 3200)
2859 return -EINVAL;
2861 if (to_multiplier < 10 || to_multiplier > 3200)
2862 return -EINVAL;
2864 if (max >= to_multiplier * 8)
2865 return -EINVAL;
2867 max_latency = (to_multiplier * 8 / max) - 1;
2868 if (latency > 499 || latency > max_latency)
2869 return -EINVAL;
2871 return 0;
2874 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2875 struct l2cap_cmd_hdr *cmd, u8 *data)
2877 struct hci_conn *hcon = conn->hcon;
2878 struct l2cap_conn_param_update_req *req;
2879 struct l2cap_conn_param_update_rsp rsp;
2880 u16 min, max, latency, to_multiplier, cmd_len;
2881 int err;
2883 if (!(hcon->link_mode & HCI_LM_MASTER))
2884 return -EINVAL;
2886 cmd_len = __le16_to_cpu(cmd->len);
2887 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2888 return -EPROTO;
2890 req = (struct l2cap_conn_param_update_req *) data;
2891 min = __le16_to_cpu(req->min);
2892 max = __le16_to_cpu(req->max);
2893 latency = __le16_to_cpu(req->latency);
2894 to_multiplier = __le16_to_cpu(req->to_multiplier);
2896 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2897 min, max, latency, to_multiplier);
2899 memset(&rsp, 0, sizeof(rsp));
2901 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2902 if (err)
2903 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2904 else
2905 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2907 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2908 sizeof(rsp), &rsp);
2910 if (!err)
2911 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2913 return 0;
2916 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2917 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2919 int err = 0;
2921 switch (cmd->code) {
2922 case L2CAP_COMMAND_REJ:
2923 l2cap_command_rej(conn, cmd, data);
2924 break;
2926 case L2CAP_CONN_REQ:
2927 err = l2cap_connect_req(conn, cmd, data);
2928 break;
2930 case L2CAP_CONN_RSP:
2931 err = l2cap_connect_rsp(conn, cmd, data);
2932 break;
2934 case L2CAP_CONF_REQ:
2935 err = l2cap_config_req(conn, cmd, cmd_len, data);
2936 break;
2938 case L2CAP_CONF_RSP:
2939 err = l2cap_config_rsp(conn, cmd, data);
2940 break;
2942 case L2CAP_DISCONN_REQ:
2943 err = l2cap_disconnect_req(conn, cmd, data);
2944 break;
2946 case L2CAP_DISCONN_RSP:
2947 err = l2cap_disconnect_rsp(conn, cmd, data);
2948 break;
2950 case L2CAP_ECHO_REQ:
2951 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2952 break;
2954 case L2CAP_ECHO_RSP:
2955 break;
2957 case L2CAP_INFO_REQ:
2958 err = l2cap_information_req(conn, cmd, data);
2959 break;
2961 case L2CAP_INFO_RSP:
2962 err = l2cap_information_rsp(conn, cmd, data);
2963 break;
2965 default:
2966 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2967 err = -EINVAL;
2968 break;
2971 return err;
2974 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2975 struct l2cap_cmd_hdr *cmd, u8 *data)
2977 switch (cmd->code) {
2978 case L2CAP_COMMAND_REJ:
2979 return 0;
2981 case L2CAP_CONN_PARAM_UPDATE_REQ:
2982 return l2cap_conn_param_update_req(conn, cmd, data);
2984 case L2CAP_CONN_PARAM_UPDATE_RSP:
2985 return 0;
2987 default:
2988 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2989 return -EINVAL;
2993 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2994 struct sk_buff *skb)
2996 u8 *data = skb->data;
2997 int len = skb->len;
2998 struct l2cap_cmd_hdr cmd;
2999 int err;
3001 l2cap_raw_recv(conn, skb);
3003 while (len >= L2CAP_CMD_HDR_SIZE) {
3004 u16 cmd_len;
3005 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3006 data += L2CAP_CMD_HDR_SIZE;
3007 len -= L2CAP_CMD_HDR_SIZE;
3009 cmd_len = le16_to_cpu(cmd.len);
3011 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3013 if (cmd_len > len || !cmd.ident) {
3014 BT_DBG("corrupted command");
3015 break;
3018 if (conn->hcon->type == LE_LINK)
3019 err = l2cap_le_sig_cmd(conn, &cmd, data);
3020 else
3021 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3023 if (err) {
3024 struct l2cap_cmd_rej_unk rej;
3026 BT_ERR("Wrong link type (%d)", err);
3028 /* FIXME: Map err to a valid reason */
3029 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3030 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3033 data += cmd_len;
3034 len -= cmd_len;
3037 kfree_skb(skb);
3040 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3042 u16 our_fcs, rcv_fcs;
3043 int hdr_size = L2CAP_HDR_SIZE + 2;
3045 if (chan->fcs == L2CAP_FCS_CRC16) {
3046 skb_trim(skb, skb->len - 2);
3047 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3048 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3050 if (our_fcs != rcv_fcs)
3051 return -EBADMSG;
3053 return 0;
3056 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3058 u16 control = 0;
3060 chan->frames_sent = 0;
3062 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3064 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3065 control |= L2CAP_SUPER_RCV_NOT_READY;
3066 l2cap_send_sframe(chan, control);
3067 set_bit(CONN_RNR_SENT, &chan->conn_state);
3070 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3071 l2cap_retransmit_frames(chan);
3073 l2cap_ertm_send(chan);
3075 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3076 chan->frames_sent == 0) {
3077 control |= L2CAP_SUPER_RCV_READY;
3078 l2cap_send_sframe(chan, control);
3082 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
3084 struct sk_buff *next_skb;
3085 int tx_seq_offset, next_tx_seq_offset;
3087 bt_cb(skb)->tx_seq = tx_seq;
3088 bt_cb(skb)->sar = sar;
3090 next_skb = skb_peek(&chan->srej_q);
3091 if (!next_skb) {
3092 __skb_queue_tail(&chan->srej_q, skb);
3093 return 0;
3096 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3097 if (tx_seq_offset < 0)
3098 tx_seq_offset += 64;
3100 do {
3101 if (bt_cb(next_skb)->tx_seq == tx_seq)
3102 return -EINVAL;
3104 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3105 chan->buffer_seq) % 64;
3106 if (next_tx_seq_offset < 0)
3107 next_tx_seq_offset += 64;
3109 if (next_tx_seq_offset > tx_seq_offset) {
3110 __skb_queue_before(&chan->srej_q, next_skb, skb);
3111 return 0;
3114 if (skb_queue_is_last(&chan->srej_q, next_skb))
3115 break;
3117 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
3119 __skb_queue_tail(&chan->srej_q, skb);
3121 return 0;
3124 static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3126 struct sk_buff *_skb;
3127 int err;
3129 switch (control & L2CAP_CTRL_SAR) {
3130 case L2CAP_SDU_UNSEGMENTED:
3131 if (test_bit(CONN_SAR_SDU, &chan->conn_state))
3132 goto drop;
3134 return chan->ops->recv(chan->data, skb);
3136 case L2CAP_SDU_START:
3137 if (test_bit(CONN_SAR_SDU, &chan->conn_state))
3138 goto drop;
3140 chan->sdu_len = get_unaligned_le16(skb->data);
3142 if (chan->sdu_len > chan->imtu)
3143 goto disconnect;
3145 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3146 if (!chan->sdu)
3147 return -ENOMEM;
3149 /* pull sdu_len bytes only after alloc, because of Local Busy
3150 * condition we have to be sure that this will be executed
3151 * only once, i.e., when alloc does not fail */
3152 skb_pull(skb, 2);
3154 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3156 set_bit(CONN_SAR_SDU, &chan->conn_state);
3157 chan->partial_sdu_len = skb->len;
3158 break;
3160 case L2CAP_SDU_CONTINUE:
3161 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3162 goto disconnect;
3164 if (!chan->sdu)
3165 goto disconnect;
3167 chan->partial_sdu_len += skb->len;
3168 if (chan->partial_sdu_len > chan->sdu_len)
3169 goto drop;
3171 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3173 break;
3175 case L2CAP_SDU_END:
3176 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3177 goto disconnect;
3179 if (!chan->sdu)
3180 goto disconnect;
3182 chan->partial_sdu_len += skb->len;
3184 if (chan->partial_sdu_len > chan->imtu)
3185 goto drop;
3187 if (chan->partial_sdu_len != chan->sdu_len)
3188 goto drop;
3190 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3192 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3193 if (!_skb) {
3194 return -ENOMEM;
3197 err = chan->ops->recv(chan->data, _skb);
3198 if (err < 0) {
3199 kfree_skb(_skb);
3200 return err;
3203 clear_bit(CONN_SAR_SDU, &chan->conn_state);
3205 kfree_skb(chan->sdu);
3206 break;
3209 kfree_skb(skb);
3210 return 0;
3212 drop:
3213 kfree_skb(chan->sdu);
3214 chan->sdu = NULL;
3216 disconnect:
3217 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3218 kfree_skb(skb);
3219 return 0;
3222 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3224 u16 control;
3226 BT_DBG("chan %p, Enter local busy", chan);
3228 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3230 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3231 control |= L2CAP_SUPER_RCV_NOT_READY;
3232 l2cap_send_sframe(chan, control);
3234 set_bit(CONN_RNR_SENT, &chan->conn_state);
3236 __clear_ack_timer(chan);
3239 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3241 u16 control;
3243 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3244 goto done;
3246 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3247 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3248 l2cap_send_sframe(chan, control);
3249 chan->retry_count = 1;
3251 __clear_retrans_timer(chan);
3252 __set_monitor_timer(chan);
3254 set_bit(CONN_WAIT_F, &chan->conn_state);
3256 done:
3257 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3258 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3260 BT_DBG("chan %p, Exit local busy", chan);
3263 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3265 if (chan->mode == L2CAP_MODE_ERTM) {
3266 if (busy)
3267 l2cap_ertm_enter_local_busy(chan);
3268 else
3269 l2cap_ertm_exit_local_busy(chan);
3273 static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3275 struct sk_buff *_skb;
3276 int err = -EINVAL;
3279 * TODO: We have to notify the userland if some data is lost with the
3280 * Streaming Mode.
3283 switch (control & L2CAP_CTRL_SAR) {
3284 case L2CAP_SDU_UNSEGMENTED:
3285 if (test_bit(CONN_SAR_SDU, &chan->conn_state)) {
3286 kfree_skb(chan->sdu);
3287 break;
3290 err = chan->ops->recv(chan->data, skb);
3291 if (!err)
3292 return 0;
3294 break;
3296 case L2CAP_SDU_START:
3297 if (test_bit(CONN_SAR_SDU, &chan->conn_state)) {
3298 kfree_skb(chan->sdu);
3299 break;
3302 chan->sdu_len = get_unaligned_le16(skb->data);
3303 skb_pull(skb, 2);
3305 if (chan->sdu_len > chan->imtu) {
3306 err = -EMSGSIZE;
3307 break;
3310 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3311 if (!chan->sdu) {
3312 err = -ENOMEM;
3313 break;
3316 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3318 set_bit(CONN_SAR_SDU, &chan->conn_state);
3319 chan->partial_sdu_len = skb->len;
3320 err = 0;
3321 break;
3323 case L2CAP_SDU_CONTINUE:
3324 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3325 break;
3327 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3329 chan->partial_sdu_len += skb->len;
3330 if (chan->partial_sdu_len > chan->sdu_len)
3331 kfree_skb(chan->sdu);
3332 else
3333 err = 0;
3335 break;
3337 case L2CAP_SDU_END:
3338 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3339 break;
3341 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3343 clear_bit(CONN_SAR_SDU, &chan->conn_state);
3344 chan->partial_sdu_len += skb->len;
3346 if (chan->partial_sdu_len > chan->imtu)
3347 goto drop;
3349 if (chan->partial_sdu_len == chan->sdu_len) {
3350 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3351 err = chan->ops->recv(chan->data, _skb);
3352 if (err < 0)
3353 kfree_skb(_skb);
3355 err = 0;
3357 drop:
3358 kfree_skb(chan->sdu);
3359 break;
3362 kfree_skb(skb);
3363 return err;
3366 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3368 struct sk_buff *skb;
3369 u16 control;
3371 while ((skb = skb_peek(&chan->srej_q)) &&
3372 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3373 int err;
3375 if (bt_cb(skb)->tx_seq != tx_seq)
3376 break;
3378 skb = skb_dequeue(&chan->srej_q);
3379 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3380 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3382 if (err < 0) {
3383 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3384 break;
3387 chan->buffer_seq_srej =
3388 (chan->buffer_seq_srej + 1) % 64;
3389 tx_seq = (tx_seq + 1) % 64;
3393 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3395 struct srej_list *l, *tmp;
3396 u16 control;
3398 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3399 if (l->tx_seq == tx_seq) {
3400 list_del(&l->list);
3401 kfree(l);
3402 return;
3404 control = L2CAP_SUPER_SELECT_REJECT;
3405 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3406 l2cap_send_sframe(chan, control);
3407 list_del(&l->list);
3408 list_add_tail(&l->list, &chan->srej_l);
3412 static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3414 struct srej_list *new;
3415 u16 control;
3417 while (tx_seq != chan->expected_tx_seq) {
3418 control = L2CAP_SUPER_SELECT_REJECT;
3419 control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3420 l2cap_send_sframe(chan, control);
3422 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3423 new->tx_seq = chan->expected_tx_seq;
3424 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3425 list_add_tail(&new->list, &chan->srej_l);
3427 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3430 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3432 u8 tx_seq = __get_txseq(rx_control);
3433 u8 req_seq = __get_reqseq(rx_control);
3434 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3435 int tx_seq_offset, expected_tx_seq_offset;
3436 int num_to_ack = (chan->tx_win/6) + 1;
3437 int err = 0;
3439 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3440 tx_seq, rx_control);
3442 if (L2CAP_CTRL_FINAL & rx_control &&
3443 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3444 __clear_monitor_timer(chan);
3445 if (chan->unacked_frames > 0)
3446 __set_retrans_timer(chan);
3447 clear_bit(CONN_WAIT_F, &chan->conn_state);
3450 chan->expected_ack_seq = req_seq;
3451 l2cap_drop_acked_frames(chan);
3453 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3454 if (tx_seq_offset < 0)
3455 tx_seq_offset += 64;
3457 /* invalid tx_seq */
3458 if (tx_seq_offset >= chan->tx_win) {
3459 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3460 goto drop;
3463 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3464 goto drop;
3466 if (tx_seq == chan->expected_tx_seq)
3467 goto expected;
3469 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3470 struct srej_list *first;
3472 first = list_first_entry(&chan->srej_l,
3473 struct srej_list, list);
3474 if (tx_seq == first->tx_seq) {
3475 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3476 l2cap_check_srej_gap(chan, tx_seq);
3478 list_del(&first->list);
3479 kfree(first);
3481 if (list_empty(&chan->srej_l)) {
3482 chan->buffer_seq = chan->buffer_seq_srej;
3483 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3484 l2cap_send_ack(chan);
3485 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3487 } else {
3488 struct srej_list *l;
3490 /* duplicated tx_seq */
3491 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3492 goto drop;
3494 list_for_each_entry(l, &chan->srej_l, list) {
3495 if (l->tx_seq == tx_seq) {
3496 l2cap_resend_srejframe(chan, tx_seq);
3497 return 0;
3500 l2cap_send_srejframe(chan, tx_seq);
3502 } else {
3503 expected_tx_seq_offset =
3504 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3505 if (expected_tx_seq_offset < 0)
3506 expected_tx_seq_offset += 64;
3508 /* duplicated tx_seq */
3509 if (tx_seq_offset < expected_tx_seq_offset)
3510 goto drop;
3512 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3514 BT_DBG("chan %p, Enter SREJ", chan);
3516 INIT_LIST_HEAD(&chan->srej_l);
3517 chan->buffer_seq_srej = chan->buffer_seq;
3519 __skb_queue_head_init(&chan->srej_q);
3520 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3522 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3524 l2cap_send_srejframe(chan, tx_seq);
3526 __clear_ack_timer(chan);
3528 return 0;
3530 expected:
3531 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3533 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3534 bt_cb(skb)->tx_seq = tx_seq;
3535 bt_cb(skb)->sar = sar;
3536 __skb_queue_tail(&chan->srej_q, skb);
3537 return 0;
3540 err = l2cap_ertm_reassembly_sdu(chan, skb, rx_control);
3541 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3542 if (err < 0) {
3543 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3544 return err;
3547 if (rx_control & L2CAP_CTRL_FINAL) {
3548 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3549 l2cap_retransmit_frames(chan);
3552 __set_ack_timer(chan);
3554 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3555 if (chan->num_acked == num_to_ack - 1)
3556 l2cap_send_ack(chan);
3558 return 0;
3560 drop:
3561 kfree_skb(skb);
3562 return 0;
3565 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3567 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
3568 rx_control);
3570 chan->expected_ack_seq = __get_reqseq(rx_control);
3571 l2cap_drop_acked_frames(chan);
3573 if (rx_control & L2CAP_CTRL_POLL) {
3574 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3575 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3576 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3577 (chan->unacked_frames > 0))
3578 __set_retrans_timer(chan);
3580 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3581 l2cap_send_srejtail(chan);
3582 } else {
3583 l2cap_send_i_or_rr_or_rnr(chan);
3586 } else if (rx_control & L2CAP_CTRL_FINAL) {
3587 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3589 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3590 l2cap_retransmit_frames(chan);
3592 } else {
3593 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3594 (chan->unacked_frames > 0))
3595 __set_retrans_timer(chan);
3597 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3598 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
3599 l2cap_send_ack(chan);
3600 else
3601 l2cap_ertm_send(chan);
3605 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3607 u8 tx_seq = __get_reqseq(rx_control);
3609 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3611 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3613 chan->expected_ack_seq = tx_seq;
3614 l2cap_drop_acked_frames(chan);
3616 if (rx_control & L2CAP_CTRL_FINAL) {
3617 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3618 l2cap_retransmit_frames(chan);
3619 } else {
3620 l2cap_retransmit_frames(chan);
3622 if (test_bit(CONN_WAIT_F, &chan->conn_state))
3623 set_bit(CONN_REJ_ACT, &chan->conn_state);
3626 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3628 u8 tx_seq = __get_reqseq(rx_control);
3630 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3632 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3634 if (rx_control & L2CAP_CTRL_POLL) {
3635 chan->expected_ack_seq = tx_seq;
3636 l2cap_drop_acked_frames(chan);
3638 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3639 l2cap_retransmit_one_frame(chan, tx_seq);
3641 l2cap_ertm_send(chan);
3643 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3644 chan->srej_save_reqseq = tx_seq;
3645 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3647 } else if (rx_control & L2CAP_CTRL_FINAL) {
3648 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
3649 chan->srej_save_reqseq == tx_seq)
3650 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
3651 else
3652 l2cap_retransmit_one_frame(chan, tx_seq);
3653 } else {
3654 l2cap_retransmit_one_frame(chan, tx_seq);
3655 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3656 chan->srej_save_reqseq = tx_seq;
3657 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3662 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3664 u8 tx_seq = __get_reqseq(rx_control);
3666 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3668 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3669 chan->expected_ack_seq = tx_seq;
3670 l2cap_drop_acked_frames(chan);
3672 if (rx_control & L2CAP_CTRL_POLL)
3673 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3675 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3676 __clear_retrans_timer(chan);
3677 if (rx_control & L2CAP_CTRL_POLL)
3678 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3679 return;
3682 if (rx_control & L2CAP_CTRL_POLL)
3683 l2cap_send_srejtail(chan);
3684 else
3685 l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3688 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3690 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3692 if (L2CAP_CTRL_FINAL & rx_control &&
3693 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3694 __clear_monitor_timer(chan);
3695 if (chan->unacked_frames > 0)
3696 __set_retrans_timer(chan);
3697 clear_bit(CONN_WAIT_F, &chan->conn_state);
3700 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3701 case L2CAP_SUPER_RCV_READY:
3702 l2cap_data_channel_rrframe(chan, rx_control);
3703 break;
3705 case L2CAP_SUPER_REJECT:
3706 l2cap_data_channel_rejframe(chan, rx_control);
3707 break;
3709 case L2CAP_SUPER_SELECT_REJECT:
3710 l2cap_data_channel_srejframe(chan, rx_control);
3711 break;
3713 case L2CAP_SUPER_RCV_NOT_READY:
3714 l2cap_data_channel_rnrframe(chan, rx_control);
3715 break;
3718 kfree_skb(skb);
3719 return 0;
3722 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3724 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3725 u16 control;
3726 u8 req_seq;
3727 int len, next_tx_seq_offset, req_seq_offset;
3729 control = get_unaligned_le16(skb->data);
3730 skb_pull(skb, 2);
3731 len = skb->len;
3734 * We can just drop the corrupted I-frame here.
3735 * Receiver will miss it and start proper recovery
3736 * procedures and ask retransmission.
3738 if (l2cap_check_fcs(chan, skb))
3739 goto drop;
3741 if (__is_sar_start(control) && __is_iframe(control))
3742 len -= 2;
3744 if (chan->fcs == L2CAP_FCS_CRC16)
3745 len -= 2;
3747 if (len > chan->mps) {
3748 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3749 goto drop;
3752 req_seq = __get_reqseq(control);
3753 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3754 if (req_seq_offset < 0)
3755 req_seq_offset += 64;
3757 next_tx_seq_offset =
3758 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3759 if (next_tx_seq_offset < 0)
3760 next_tx_seq_offset += 64;
3762 /* check for invalid req-seq */
3763 if (req_seq_offset > next_tx_seq_offset) {
3764 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3765 goto drop;
3768 if (__is_iframe(control)) {
3769 if (len < 0) {
3770 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3771 goto drop;
3774 l2cap_data_channel_iframe(chan, control, skb);
3775 } else {
3776 if (len != 0) {
3777 BT_ERR("%d", len);
3778 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3779 goto drop;
3782 l2cap_data_channel_sframe(chan, control, skb);
3785 return 0;
3787 drop:
3788 kfree_skb(skb);
3789 return 0;
3792 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3794 struct l2cap_chan *chan;
3795 struct sock *sk = NULL;
3796 u16 control;
3797 u8 tx_seq;
3798 int len;
3800 chan = l2cap_get_chan_by_scid(conn, cid);
3801 if (!chan) {
3802 BT_DBG("unknown cid 0x%4.4x", cid);
3803 goto drop;
3806 sk = chan->sk;
3808 BT_DBG("chan %p, len %d", chan, skb->len);
3810 if (chan->state != BT_CONNECTED)
3811 goto drop;
3813 switch (chan->mode) {
3814 case L2CAP_MODE_BASIC:
3815 /* If socket recv buffers overflows we drop data here
3816 * which is *bad* because L2CAP has to be reliable.
3817 * But we don't have any other choice. L2CAP doesn't
3818 * provide flow control mechanism. */
3820 if (chan->imtu < skb->len)
3821 goto drop;
3823 if (!chan->ops->recv(chan->data, skb))
3824 goto done;
3825 break;
3827 case L2CAP_MODE_ERTM:
3828 if (!sock_owned_by_user(sk)) {
3829 l2cap_ertm_data_rcv(sk, skb);
3830 } else {
3831 if (sk_add_backlog(sk, skb))
3832 goto drop;
3835 goto done;
3837 case L2CAP_MODE_STREAMING:
3838 control = get_unaligned_le16(skb->data);
3839 skb_pull(skb, 2);
3840 len = skb->len;
3842 if (l2cap_check_fcs(chan, skb))
3843 goto drop;
3845 if (__is_sar_start(control))
3846 len -= 2;
3848 if (chan->fcs == L2CAP_FCS_CRC16)
3849 len -= 2;
3851 if (len > chan->mps || len < 0 || __is_sframe(control))
3852 goto drop;
3854 tx_seq = __get_txseq(control);
3856 if (chan->expected_tx_seq == tx_seq)
3857 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3858 else
3859 chan->expected_tx_seq = (tx_seq + 1) % 64;
3861 l2cap_streaming_reassembly_sdu(chan, skb, control);
3863 goto done;
3865 default:
3866 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
3867 break;
3870 drop:
3871 kfree_skb(skb);
3873 done:
3874 if (sk)
3875 bh_unlock_sock(sk);
3877 return 0;
3880 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3882 struct sock *sk = NULL;
3883 struct l2cap_chan *chan;
3885 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
3886 if (!chan)
3887 goto drop;
3889 sk = chan->sk;
3891 bh_lock_sock(sk);
3893 BT_DBG("sk %p, len %d", sk, skb->len);
3895 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3896 goto drop;
3898 if (chan->imtu < skb->len)
3899 goto drop;
3901 if (!chan->ops->recv(chan->data, skb))
3902 goto done;
3904 drop:
3905 kfree_skb(skb);
3907 done:
3908 if (sk)
3909 bh_unlock_sock(sk);
3910 return 0;
3913 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3915 struct sock *sk = NULL;
3916 struct l2cap_chan *chan;
3918 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
3919 if (!chan)
3920 goto drop;
3922 sk = chan->sk;
3924 bh_lock_sock(sk);
3926 BT_DBG("sk %p, len %d", sk, skb->len);
3928 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3929 goto drop;
3931 if (chan->imtu < skb->len)
3932 goto drop;
3934 if (!chan->ops->recv(chan->data, skb))
3935 goto done;
3937 drop:
3938 kfree_skb(skb);
3940 done:
3941 if (sk)
3942 bh_unlock_sock(sk);
3943 return 0;
3946 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3948 struct l2cap_hdr *lh = (void *) skb->data;
3949 u16 cid, len;
3950 __le16 psm;
3952 skb_pull(skb, L2CAP_HDR_SIZE);
3953 cid = __le16_to_cpu(lh->cid);
3954 len = __le16_to_cpu(lh->len);
3956 if (len != skb->len) {
3957 kfree_skb(skb);
3958 return;
3961 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3963 switch (cid) {
3964 case L2CAP_CID_LE_SIGNALING:
3965 case L2CAP_CID_SIGNALING:
3966 l2cap_sig_channel(conn, skb);
3967 break;
3969 case L2CAP_CID_CONN_LESS:
3970 psm = get_unaligned_le16(skb->data);
3971 skb_pull(skb, 2);
3972 l2cap_conless_channel(conn, psm, skb);
3973 break;
3975 case L2CAP_CID_LE_DATA:
3976 l2cap_att_channel(conn, cid, skb);
3977 break;
3979 case L2CAP_CID_SMP:
3980 if (smp_sig_channel(conn, skb))
3981 l2cap_conn_del(conn->hcon, EACCES);
3982 break;
3984 default:
3985 l2cap_data_channel(conn, cid, skb);
3986 break;
3990 /* ---- L2CAP interface with lower layer (HCI) ---- */
3992 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3994 int exact = 0, lm1 = 0, lm2 = 0;
3995 struct l2cap_chan *c;
3997 if (type != ACL_LINK)
3998 return -EINVAL;
4000 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4002 /* Find listening sockets and check their link_mode */
4003 read_lock(&chan_list_lock);
4004 list_for_each_entry(c, &chan_list, global_l) {
4005 struct sock *sk = c->sk;
4007 if (c->state != BT_LISTEN)
4008 continue;
4010 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4011 lm1 |= HCI_LM_ACCEPT;
4012 if (c->role_switch)
4013 lm1 |= HCI_LM_MASTER;
4014 exact++;
4015 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4016 lm2 |= HCI_LM_ACCEPT;
4017 if (c->role_switch)
4018 lm2 |= HCI_LM_MASTER;
4021 read_unlock(&chan_list_lock);
4023 return exact ? lm1 : lm2;
4026 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4028 struct l2cap_conn *conn;
4030 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4032 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4033 return -EINVAL;
4035 if (!status) {
4036 conn = l2cap_conn_add(hcon, status);
4037 if (conn)
4038 l2cap_conn_ready(conn);
4039 } else
4040 l2cap_conn_del(hcon, bt_to_errno(status));
4042 return 0;
4045 static int l2cap_disconn_ind(struct hci_conn *hcon)
4047 struct l2cap_conn *conn = hcon->l2cap_data;
4049 BT_DBG("hcon %p", hcon);
4051 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
4052 return 0x13;
4054 return conn->disc_reason;
4057 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4059 BT_DBG("hcon %p reason %d", hcon, reason);
4061 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4062 return -EINVAL;
4064 l2cap_conn_del(hcon, bt_to_errno(reason));
4066 return 0;
4069 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4071 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4072 return;
4074 if (encrypt == 0x00) {
4075 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4076 __clear_chan_timer(chan);
4077 __set_chan_timer(chan, HZ * 5);
4078 } else if (chan->sec_level == BT_SECURITY_HIGH)
4079 l2cap_chan_close(chan, ECONNREFUSED);
4080 } else {
4081 if (chan->sec_level == BT_SECURITY_MEDIUM)
4082 __clear_chan_timer(chan);
4086 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4088 struct l2cap_conn *conn = hcon->l2cap_data;
4089 struct l2cap_chan *chan;
4091 if (!conn)
4092 return 0;
4094 BT_DBG("conn %p", conn);
4096 read_lock(&conn->chan_lock);
4098 list_for_each_entry(chan, &conn->chan_l, list) {
4099 struct sock *sk = chan->sk;
4101 bh_lock_sock(sk);
4103 BT_DBG("chan->scid %d", chan->scid);
4105 if (chan->scid == L2CAP_CID_LE_DATA) {
4106 if (!status && encrypt) {
4107 chan->sec_level = hcon->sec_level;
4108 del_timer(&conn->security_timer);
4109 l2cap_chan_ready(sk);
4110 smp_distribute_keys(conn, 0);
4113 bh_unlock_sock(sk);
4114 continue;
4117 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4118 bh_unlock_sock(sk);
4119 continue;
4122 if (!status && (chan->state == BT_CONNECTED ||
4123 chan->state == BT_CONFIG)) {
4124 l2cap_check_encryption(chan, encrypt);
4125 bh_unlock_sock(sk);
4126 continue;
4129 if (chan->state == BT_CONNECT) {
4130 if (!status) {
4131 struct l2cap_conn_req req;
4132 req.scid = cpu_to_le16(chan->scid);
4133 req.psm = chan->psm;
4135 chan->ident = l2cap_get_ident(conn);
4136 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4138 l2cap_send_cmd(conn, chan->ident,
4139 L2CAP_CONN_REQ, sizeof(req), &req);
4140 } else {
4141 __clear_chan_timer(chan);
4142 __set_chan_timer(chan, HZ / 10);
4144 } else if (chan->state == BT_CONNECT2) {
4145 struct l2cap_conn_rsp rsp;
4146 __u16 res, stat;
4148 if (!status) {
4149 if (bt_sk(sk)->defer_setup) {
4150 struct sock *parent = bt_sk(sk)->parent;
4151 res = L2CAP_CR_PEND;
4152 stat = L2CAP_CS_AUTHOR_PEND;
4153 parent->sk_data_ready(parent, 0);
4154 } else {
4155 l2cap_state_change(chan, BT_CONFIG);
4156 res = L2CAP_CR_SUCCESS;
4157 stat = L2CAP_CS_NO_INFO;
4159 } else {
4160 l2cap_state_change(chan, BT_DISCONN);
4161 __set_chan_timer(chan, HZ / 10);
4162 res = L2CAP_CR_SEC_BLOCK;
4163 stat = L2CAP_CS_NO_INFO;
4166 rsp.scid = cpu_to_le16(chan->dcid);
4167 rsp.dcid = cpu_to_le16(chan->scid);
4168 rsp.result = cpu_to_le16(res);
4169 rsp.status = cpu_to_le16(stat);
4170 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4171 sizeof(rsp), &rsp);
4174 bh_unlock_sock(sk);
4177 read_unlock(&conn->chan_lock);
4179 return 0;
4182 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4184 struct l2cap_conn *conn = hcon->l2cap_data;
4186 if (!conn)
4187 conn = l2cap_conn_add(hcon, 0);
4189 if (!conn)
4190 goto drop;
4192 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4194 if (!(flags & ACL_CONT)) {
4195 struct l2cap_hdr *hdr;
4196 struct l2cap_chan *chan;
4197 u16 cid;
4198 int len;
4200 if (conn->rx_len) {
4201 BT_ERR("Unexpected start frame (len %d)", skb->len);
4202 kfree_skb(conn->rx_skb);
4203 conn->rx_skb = NULL;
4204 conn->rx_len = 0;
4205 l2cap_conn_unreliable(conn, ECOMM);
4208 /* Start fragment always begin with Basic L2CAP header */
4209 if (skb->len < L2CAP_HDR_SIZE) {
4210 BT_ERR("Frame is too short (len %d)", skb->len);
4211 l2cap_conn_unreliable(conn, ECOMM);
4212 goto drop;
4215 hdr = (struct l2cap_hdr *) skb->data;
4216 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4217 cid = __le16_to_cpu(hdr->cid);
4219 if (len == skb->len) {
4220 /* Complete frame received */
4221 l2cap_recv_frame(conn, skb);
4222 return 0;
4225 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4227 if (skb->len > len) {
4228 BT_ERR("Frame is too long (len %d, expected len %d)",
4229 skb->len, len);
4230 l2cap_conn_unreliable(conn, ECOMM);
4231 goto drop;
4234 chan = l2cap_get_chan_by_scid(conn, cid);
4236 if (chan && chan->sk) {
4237 struct sock *sk = chan->sk;
4239 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4240 BT_ERR("Frame exceeding recv MTU (len %d, "
4241 "MTU %d)", len,
4242 chan->imtu);
4243 bh_unlock_sock(sk);
4244 l2cap_conn_unreliable(conn, ECOMM);
4245 goto drop;
4247 bh_unlock_sock(sk);
4250 /* Allocate skb for the complete frame (with header) */
4251 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4252 if (!conn->rx_skb)
4253 goto drop;
4255 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4256 skb->len);
4257 conn->rx_len = len - skb->len;
4258 } else {
4259 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4261 if (!conn->rx_len) {
4262 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4263 l2cap_conn_unreliable(conn, ECOMM);
4264 goto drop;
4267 if (skb->len > conn->rx_len) {
4268 BT_ERR("Fragment is too long (len %d, expected %d)",
4269 skb->len, conn->rx_len);
4270 kfree_skb(conn->rx_skb);
4271 conn->rx_skb = NULL;
4272 conn->rx_len = 0;
4273 l2cap_conn_unreliable(conn, ECOMM);
4274 goto drop;
4277 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4278 skb->len);
4279 conn->rx_len -= skb->len;
4281 if (!conn->rx_len) {
4282 /* Complete frame received */
4283 l2cap_recv_frame(conn, conn->rx_skb);
4284 conn->rx_skb = NULL;
4288 drop:
4289 kfree_skb(skb);
4290 return 0;
4293 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4295 struct l2cap_chan *c;
4297 read_lock_bh(&chan_list_lock);
4299 list_for_each_entry(c, &chan_list, global_l) {
4300 struct sock *sk = c->sk;
4302 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4303 batostr(&bt_sk(sk)->src),
4304 batostr(&bt_sk(sk)->dst),
4305 c->state, __le16_to_cpu(c->psm),
4306 c->scid, c->dcid, c->imtu, c->omtu,
4307 c->sec_level, c->mode);
4310 read_unlock_bh(&chan_list_lock);
4312 return 0;
4315 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4317 return single_open(file, l2cap_debugfs_show, inode->i_private);
4320 static const struct file_operations l2cap_debugfs_fops = {
4321 .open = l2cap_debugfs_open,
4322 .read = seq_read,
4323 .llseek = seq_lseek,
4324 .release = single_release,
4327 static struct dentry *l2cap_debugfs;
4329 static struct hci_proto l2cap_hci_proto = {
4330 .name = "L2CAP",
4331 .id = HCI_PROTO_L2CAP,
4332 .connect_ind = l2cap_connect_ind,
4333 .connect_cfm = l2cap_connect_cfm,
4334 .disconn_ind = l2cap_disconn_ind,
4335 .disconn_cfm = l2cap_disconn_cfm,
4336 .security_cfm = l2cap_security_cfm,
4337 .recv_acldata = l2cap_recv_acldata
4340 int __init l2cap_init(void)
4342 int err;
4344 err = l2cap_init_sockets();
4345 if (err < 0)
4346 return err;
4348 err = hci_register_proto(&l2cap_hci_proto);
4349 if (err < 0) {
4350 BT_ERR("L2CAP protocol registration failed");
4351 bt_sock_unregister(BTPROTO_L2CAP);
4352 goto error;
4355 if (bt_debugfs) {
4356 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4357 bt_debugfs, NULL, &l2cap_debugfs_fops);
4358 if (!l2cap_debugfs)
4359 BT_ERR("Failed to create L2CAP debug file");
4362 return 0;
4364 error:
4365 l2cap_cleanup_sockets();
4366 return err;
4369 void l2cap_exit(void)
4371 debugfs_remove(l2cap_debugfs);
4373 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4374 BT_ERR("L2CAP protocol unregistration failed");
4376 l2cap_cleanup_sockets();
4379 module_param(disable_ertm, bool, 0644);
4380 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");