drm/radeon/kms: add new NI pci ids
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / bluetooth / l2cap_core.c
blobebff14c690786ae8e4e0766dfd51cc7e16d3e2ce
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
58 int disable_ertm;
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
63 static struct workqueue_struct *_busy_wq;
65 LIST_HEAD(chan_list);
66 DEFINE_RWLOCK(chan_list_lock);
68 static void l2cap_busy_work(struct work_struct *work);
70 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
71 u8 code, u8 ident, u16 dlen, void *data);
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
74 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
76 /* ---- L2CAP channels ---- */
77 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
79 struct l2cap_chan *c;
81 list_for_each_entry(c, &conn->chan_l, list) {
82 if (c->dcid == cid)
83 return c;
85 return NULL;
89 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
91 struct l2cap_chan *c;
93 list_for_each_entry(c, &conn->chan_l, list) {
94 if (c->scid == cid)
95 return c;
97 return NULL;
100 /* Find channel with given SCID.
101 * Returns locked socket */
102 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
104 struct l2cap_chan *c;
106 read_lock(&conn->chan_lock);
107 c = __l2cap_get_chan_by_scid(conn, cid);
108 if (c)
109 bh_lock_sock(c->sk);
110 read_unlock(&conn->chan_lock);
111 return c;
114 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
116 struct l2cap_chan *c;
118 list_for_each_entry(c, &conn->chan_l, list) {
119 if (c->ident == ident)
120 return c;
122 return NULL;
125 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
127 struct l2cap_chan *c;
129 read_lock(&conn->chan_lock);
130 c = __l2cap_get_chan_by_ident(conn, ident);
131 if (c)
132 bh_lock_sock(c->sk);
133 read_unlock(&conn->chan_lock);
134 return c;
137 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
139 struct l2cap_chan *c;
141 list_for_each_entry(c, &chan_list, global_l) {
142 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
143 goto found;
146 c = NULL;
147 found:
148 return c;
151 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
153 int err;
155 write_lock_bh(&chan_list_lock);
157 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
158 err = -EADDRINUSE;
159 goto done;
162 if (psm) {
163 chan->psm = psm;
164 chan->sport = psm;
165 err = 0;
166 } else {
167 u16 p;
169 err = -EINVAL;
170 for (p = 0x1001; p < 0x1100; p += 2)
171 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
172 chan->psm = cpu_to_le16(p);
173 chan->sport = cpu_to_le16(p);
174 err = 0;
175 break;
179 done:
180 write_unlock_bh(&chan_list_lock);
181 return err;
184 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
186 write_lock_bh(&chan_list_lock);
188 chan->scid = scid;
190 write_unlock_bh(&chan_list_lock);
192 return 0;
195 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
197 u16 cid = L2CAP_CID_DYN_START;
199 for (; cid < L2CAP_CID_DYN_END; cid++) {
200 if (!__l2cap_get_chan_by_scid(conn, cid))
201 return cid;
204 return 0;
207 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
209 struct l2cap_chan *chan;
211 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
212 if (!chan)
213 return NULL;
215 chan->sk = sk;
217 write_lock_bh(&chan_list_lock);
218 list_add(&chan->global_l, &chan_list);
219 write_unlock_bh(&chan_list_lock);
221 return chan;
224 void l2cap_chan_destroy(struct l2cap_chan *chan)
226 write_lock_bh(&chan_list_lock);
227 list_del(&chan->global_l);
228 write_unlock_bh(&chan_list_lock);
230 kfree(chan);
233 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
235 struct sock *sk = chan->sk;
237 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
238 chan->psm, chan->dcid);
240 conn->disc_reason = 0x13;
242 chan->conn = conn;
244 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
245 if (conn->hcon->type == LE_LINK) {
246 /* LE connection */
247 chan->omtu = L2CAP_LE_DEFAULT_MTU;
248 chan->scid = L2CAP_CID_LE_DATA;
249 chan->dcid = L2CAP_CID_LE_DATA;
250 } else {
251 /* Alloc CID for connection-oriented socket */
252 chan->scid = l2cap_alloc_cid(conn);
253 chan->omtu = L2CAP_DEFAULT_MTU;
255 } else if (sk->sk_type == SOCK_DGRAM) {
256 /* Connectionless socket */
257 chan->scid = L2CAP_CID_CONN_LESS;
258 chan->dcid = L2CAP_CID_CONN_LESS;
259 chan->omtu = L2CAP_DEFAULT_MTU;
260 } else {
261 /* Raw socket can send/recv signalling messages only */
262 chan->scid = L2CAP_CID_SIGNALING;
263 chan->dcid = L2CAP_CID_SIGNALING;
264 chan->omtu = L2CAP_DEFAULT_MTU;
267 sock_hold(sk);
269 list_add(&chan->list, &conn->chan_l);
272 /* Delete channel.
273 * Must be called on the locked socket. */
274 void l2cap_chan_del(struct l2cap_chan *chan, int err)
276 struct sock *sk = chan->sk;
277 struct l2cap_conn *conn = chan->conn;
278 struct sock *parent = bt_sk(sk)->parent;
280 l2cap_sock_clear_timer(sk);
282 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
284 if (conn) {
285 /* Delete from channel list */
286 write_lock_bh(&conn->chan_lock);
287 list_del(&chan->list);
288 write_unlock_bh(&conn->chan_lock);
289 __sock_put(sk);
291 chan->conn = NULL;
292 hci_conn_put(conn->hcon);
295 sk->sk_state = BT_CLOSED;
296 sock_set_flag(sk, SOCK_ZAPPED);
298 if (err)
299 sk->sk_err = err;
301 if (parent) {
302 bt_accept_unlink(sk);
303 parent->sk_data_ready(parent, 0);
304 } else
305 sk->sk_state_change(sk);
307 if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE &&
308 chan->conf_state & L2CAP_CONF_INPUT_DONE))
309 return;
311 skb_queue_purge(&chan->tx_q);
313 if (chan->mode == L2CAP_MODE_ERTM) {
314 struct srej_list *l, *tmp;
316 del_timer(&chan->retrans_timer);
317 del_timer(&chan->monitor_timer);
318 del_timer(&chan->ack_timer);
320 skb_queue_purge(&chan->srej_q);
321 skb_queue_purge(&chan->busy_q);
323 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
324 list_del(&l->list);
325 kfree(l);
330 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
332 struct sock *sk = chan->sk;
334 if (sk->sk_type == SOCK_RAW) {
335 switch (chan->sec_level) {
336 case BT_SECURITY_HIGH:
337 return HCI_AT_DEDICATED_BONDING_MITM;
338 case BT_SECURITY_MEDIUM:
339 return HCI_AT_DEDICATED_BONDING;
340 default:
341 return HCI_AT_NO_BONDING;
343 } else if (chan->psm == cpu_to_le16(0x0001)) {
344 if (chan->sec_level == BT_SECURITY_LOW)
345 chan->sec_level = BT_SECURITY_SDP;
347 if (chan->sec_level == BT_SECURITY_HIGH)
348 return HCI_AT_NO_BONDING_MITM;
349 else
350 return HCI_AT_NO_BONDING;
351 } else {
352 switch (chan->sec_level) {
353 case BT_SECURITY_HIGH:
354 return HCI_AT_GENERAL_BONDING_MITM;
355 case BT_SECURITY_MEDIUM:
356 return HCI_AT_GENERAL_BONDING;
357 default:
358 return HCI_AT_NO_BONDING;
363 /* Service level security */
364 static inline int l2cap_check_security(struct l2cap_chan *chan)
366 struct l2cap_conn *conn = chan->conn;
367 __u8 auth_type;
369 auth_type = l2cap_get_auth_type(chan);
371 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
374 u8 l2cap_get_ident(struct l2cap_conn *conn)
376 u8 id;
378 /* Get next available identificator.
379 * 1 - 128 are used by kernel.
380 * 129 - 199 are reserved.
381 * 200 - 254 are used by utilities like l2ping, etc.
384 spin_lock_bh(&conn->lock);
386 if (++conn->tx_ident > 128)
387 conn->tx_ident = 1;
389 id = conn->tx_ident;
391 spin_unlock_bh(&conn->lock);
393 return id;
396 void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
398 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
399 u8 flags;
401 BT_DBG("code 0x%2.2x", code);
403 if (!skb)
404 return;
406 if (lmp_no_flush_capable(conn->hcon->hdev))
407 flags = ACL_START_NO_FLUSH;
408 else
409 flags = ACL_START;
411 hci_send_acl(conn->hcon, skb, flags);
414 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
416 struct sk_buff *skb;
417 struct l2cap_hdr *lh;
418 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
419 struct l2cap_conn *conn = chan->conn;
420 struct sock *sk = (struct sock *)pi;
421 int count, hlen = L2CAP_HDR_SIZE + 2;
422 u8 flags;
424 if (sk->sk_state != BT_CONNECTED)
425 return;
427 if (chan->fcs == L2CAP_FCS_CRC16)
428 hlen += 2;
430 BT_DBG("chan %p, control 0x%2.2x", chan, control);
432 count = min_t(unsigned int, conn->mtu, hlen);
433 control |= L2CAP_CTRL_FRAME_TYPE;
435 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
436 control |= L2CAP_CTRL_FINAL;
437 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
440 if (chan->conn_state & L2CAP_CONN_SEND_PBIT) {
441 control |= L2CAP_CTRL_POLL;
442 chan->conn_state &= ~L2CAP_CONN_SEND_PBIT;
445 skb = bt_skb_alloc(count, GFP_ATOMIC);
446 if (!skb)
447 return;
449 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
450 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
451 lh->cid = cpu_to_le16(chan->dcid);
452 put_unaligned_le16(control, skb_put(skb, 2));
454 if (chan->fcs == L2CAP_FCS_CRC16) {
455 u16 fcs = crc16(0, (u8 *)lh, count - 2);
456 put_unaligned_le16(fcs, skb_put(skb, 2));
459 if (lmp_no_flush_capable(conn->hcon->hdev))
460 flags = ACL_START_NO_FLUSH;
461 else
462 flags = ACL_START;
464 hci_send_acl(chan->conn->hcon, skb, flags);
467 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
469 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
470 control |= L2CAP_SUPER_RCV_NOT_READY;
471 chan->conn_state |= L2CAP_CONN_RNR_SENT;
472 } else
473 control |= L2CAP_SUPER_RCV_READY;
475 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
477 l2cap_send_sframe(chan, control);
480 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
482 return !(chan->conf_state & L2CAP_CONF_CONNECT_PEND);
485 static void l2cap_do_start(struct l2cap_chan *chan)
487 struct l2cap_conn *conn = chan->conn;
489 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
490 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
491 return;
493 if (l2cap_check_security(chan) &&
494 __l2cap_no_conn_pending(chan)) {
495 struct l2cap_conn_req req;
496 req.scid = cpu_to_le16(chan->scid);
497 req.psm = chan->psm;
499 chan->ident = l2cap_get_ident(conn);
500 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
502 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
503 sizeof(req), &req);
505 } else {
506 struct l2cap_info_req req;
507 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
509 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
510 conn->info_ident = l2cap_get_ident(conn);
512 mod_timer(&conn->info_timer, jiffies +
513 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
515 l2cap_send_cmd(conn, conn->info_ident,
516 L2CAP_INFO_REQ, sizeof(req), &req);
520 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
522 u32 local_feat_mask = l2cap_feat_mask;
523 if (!disable_ertm)
524 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
526 switch (mode) {
527 case L2CAP_MODE_ERTM:
528 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
529 case L2CAP_MODE_STREAMING:
530 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
531 default:
532 return 0x00;
536 void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
538 struct sock *sk;
539 struct l2cap_disconn_req req;
541 if (!conn)
542 return;
544 sk = chan->sk;
546 if (chan->mode == L2CAP_MODE_ERTM) {
547 del_timer(&chan->retrans_timer);
548 del_timer(&chan->monitor_timer);
549 del_timer(&chan->ack_timer);
552 req.dcid = cpu_to_le16(chan->dcid);
553 req.scid = cpu_to_le16(chan->scid);
554 l2cap_send_cmd(conn, l2cap_get_ident(conn),
555 L2CAP_DISCONN_REQ, sizeof(req), &req);
557 sk->sk_state = BT_DISCONN;
558 sk->sk_err = err;
561 /* ---- L2CAP connections ---- */
562 static void l2cap_conn_start(struct l2cap_conn *conn)
564 struct l2cap_chan *chan, *tmp;
566 BT_DBG("conn %p", conn);
568 read_lock(&conn->chan_lock);
570 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
571 struct sock *sk = chan->sk;
573 bh_lock_sock(sk);
575 if (sk->sk_type != SOCK_SEQPACKET &&
576 sk->sk_type != SOCK_STREAM) {
577 bh_unlock_sock(sk);
578 continue;
581 if (sk->sk_state == BT_CONNECT) {
582 struct l2cap_conn_req req;
584 if (!l2cap_check_security(chan) ||
585 !__l2cap_no_conn_pending(chan)) {
586 bh_unlock_sock(sk);
587 continue;
590 if (!l2cap_mode_supported(chan->mode,
591 conn->feat_mask)
592 && chan->conf_state &
593 L2CAP_CONF_STATE2_DEVICE) {
594 /* __l2cap_sock_close() calls list_del(chan)
595 * so release the lock */
596 read_unlock_bh(&conn->chan_lock);
597 __l2cap_sock_close(sk, ECONNRESET);
598 read_lock_bh(&conn->chan_lock);
599 bh_unlock_sock(sk);
600 continue;
603 req.scid = cpu_to_le16(chan->scid);
604 req.psm = chan->psm;
606 chan->ident = l2cap_get_ident(conn);
607 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
609 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
610 sizeof(req), &req);
612 } else if (sk->sk_state == BT_CONNECT2) {
613 struct l2cap_conn_rsp rsp;
614 char buf[128];
615 rsp.scid = cpu_to_le16(chan->dcid);
616 rsp.dcid = cpu_to_le16(chan->scid);
618 if (l2cap_check_security(chan)) {
619 if (bt_sk(sk)->defer_setup) {
620 struct sock *parent = bt_sk(sk)->parent;
621 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
622 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
623 parent->sk_data_ready(parent, 0);
625 } else {
626 sk->sk_state = BT_CONFIG;
627 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
628 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
630 } else {
631 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
632 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
635 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
636 sizeof(rsp), &rsp);
638 if (chan->conf_state & L2CAP_CONF_REQ_SENT ||
639 rsp.result != L2CAP_CR_SUCCESS) {
640 bh_unlock_sock(sk);
641 continue;
644 chan->conf_state |= L2CAP_CONF_REQ_SENT;
645 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
646 l2cap_build_conf_req(chan, buf), buf);
647 chan->num_conf_req++;
650 bh_unlock_sock(sk);
653 read_unlock(&conn->chan_lock);
656 /* Find socket with cid and source bdaddr.
657 * Returns closest match, locked.
659 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
661 struct l2cap_chan *c, *c1 = NULL;
663 read_lock(&chan_list_lock);
665 list_for_each_entry(c, &chan_list, global_l) {
666 struct sock *sk = c->sk;
668 if (state && sk->sk_state != state)
669 continue;
671 if (c->scid == cid) {
672 /* Exact match. */
673 if (!bacmp(&bt_sk(sk)->src, src)) {
674 read_unlock(&chan_list_lock);
675 return c;
678 /* Closest match */
679 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
680 c1 = c;
684 read_unlock(&chan_list_lock);
686 return c1;
689 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
691 struct sock *parent, *sk;
692 struct l2cap_chan *chan, *pchan;
694 BT_DBG("");
696 /* Check if we have socket listening on cid */
697 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
698 conn->src);
699 if (!pchan)
700 return;
702 parent = pchan->sk;
704 bh_lock_sock(parent);
706 /* Check for backlog size */
707 if (sk_acceptq_is_full(parent)) {
708 BT_DBG("backlog full %d", parent->sk_ack_backlog);
709 goto clean;
712 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
713 if (!sk)
714 goto clean;
716 chan = l2cap_chan_create(sk);
717 if (!chan) {
718 l2cap_sock_kill(sk);
719 goto clean;
722 l2cap_pi(sk)->chan = chan;
724 write_lock_bh(&conn->chan_lock);
726 hci_conn_hold(conn->hcon);
728 l2cap_sock_init(sk, parent);
730 bacpy(&bt_sk(sk)->src, conn->src);
731 bacpy(&bt_sk(sk)->dst, conn->dst);
733 bt_accept_enqueue(parent, sk);
735 __l2cap_chan_add(conn, chan);
737 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
739 sk->sk_state = BT_CONNECTED;
740 parent->sk_data_ready(parent, 0);
742 write_unlock_bh(&conn->chan_lock);
744 clean:
745 bh_unlock_sock(parent);
748 static void l2cap_conn_ready(struct l2cap_conn *conn)
750 struct l2cap_chan *chan;
752 BT_DBG("conn %p", conn);
754 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
755 l2cap_le_conn_ready(conn);
757 read_lock(&conn->chan_lock);
759 list_for_each_entry(chan, &conn->chan_l, list) {
760 struct sock *sk = chan->sk;
762 bh_lock_sock(sk);
764 if (conn->hcon->type == LE_LINK) {
765 l2cap_sock_clear_timer(sk);
766 sk->sk_state = BT_CONNECTED;
767 sk->sk_state_change(sk);
770 if (sk->sk_type != SOCK_SEQPACKET &&
771 sk->sk_type != SOCK_STREAM) {
772 l2cap_sock_clear_timer(sk);
773 sk->sk_state = BT_CONNECTED;
774 sk->sk_state_change(sk);
775 } else if (sk->sk_state == BT_CONNECT)
776 l2cap_do_start(chan);
778 bh_unlock_sock(sk);
781 read_unlock(&conn->chan_lock);
784 /* Notify sockets that we cannot guaranty reliability anymore */
785 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
787 struct l2cap_chan *chan;
789 BT_DBG("conn %p", conn);
791 read_lock(&conn->chan_lock);
793 list_for_each_entry(chan, &conn->chan_l, list) {
794 struct sock *sk = chan->sk;
796 if (chan->force_reliable)
797 sk->sk_err = err;
800 read_unlock(&conn->chan_lock);
803 static void l2cap_info_timeout(unsigned long arg)
805 struct l2cap_conn *conn = (void *) arg;
807 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
808 conn->info_ident = 0;
810 l2cap_conn_start(conn);
813 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
815 struct l2cap_conn *conn = hcon->l2cap_data;
817 if (conn || status)
818 return conn;
820 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
821 if (!conn)
822 return NULL;
824 hcon->l2cap_data = conn;
825 conn->hcon = hcon;
827 BT_DBG("hcon %p conn %p", hcon, conn);
829 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
830 conn->mtu = hcon->hdev->le_mtu;
831 else
832 conn->mtu = hcon->hdev->acl_mtu;
834 conn->src = &hcon->hdev->bdaddr;
835 conn->dst = &hcon->dst;
837 conn->feat_mask = 0;
839 spin_lock_init(&conn->lock);
840 rwlock_init(&conn->chan_lock);
842 INIT_LIST_HEAD(&conn->chan_l);
844 if (hcon->type != LE_LINK)
845 setup_timer(&conn->info_timer, l2cap_info_timeout,
846 (unsigned long) conn);
848 conn->disc_reason = 0x13;
850 return conn;
853 static void l2cap_conn_del(struct hci_conn *hcon, int err)
855 struct l2cap_conn *conn = hcon->l2cap_data;
856 struct l2cap_chan *chan, *l;
857 struct sock *sk;
859 if (!conn)
860 return;
862 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
864 kfree_skb(conn->rx_skb);
866 /* Kill channels */
867 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
868 sk = chan->sk;
869 bh_lock_sock(sk);
870 l2cap_chan_del(chan, err);
871 bh_unlock_sock(sk);
872 l2cap_sock_kill(sk);
875 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
876 del_timer_sync(&conn->info_timer);
878 hcon->l2cap_data = NULL;
879 kfree(conn);
882 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
884 write_lock_bh(&conn->chan_lock);
885 __l2cap_chan_add(conn, chan);
886 write_unlock_bh(&conn->chan_lock);
889 /* ---- Socket interface ---- */
891 /* Find socket with psm and source bdaddr.
892 * Returns closest match.
894 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
896 struct l2cap_chan *c, *c1 = NULL;
898 read_lock(&chan_list_lock);
900 list_for_each_entry(c, &chan_list, global_l) {
901 struct sock *sk = c->sk;
903 if (state && sk->sk_state != state)
904 continue;
906 if (c->psm == psm) {
907 /* Exact match. */
908 if (!bacmp(&bt_sk(sk)->src, src)) {
909 read_unlock(&chan_list_lock);
910 return c;
913 /* Closest match */
914 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
915 c1 = c;
919 read_unlock(&chan_list_lock);
921 return c1;
924 int l2cap_chan_connect(struct l2cap_chan *chan)
926 struct sock *sk = chan->sk;
927 bdaddr_t *src = &bt_sk(sk)->src;
928 bdaddr_t *dst = &bt_sk(sk)->dst;
929 struct l2cap_conn *conn;
930 struct hci_conn *hcon;
931 struct hci_dev *hdev;
932 __u8 auth_type;
933 int err;
935 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
936 chan->psm);
938 hdev = hci_get_route(dst, src);
939 if (!hdev)
940 return -EHOSTUNREACH;
942 hci_dev_lock_bh(hdev);
944 auth_type = l2cap_get_auth_type(chan);
946 if (chan->dcid == L2CAP_CID_LE_DATA)
947 hcon = hci_connect(hdev, LE_LINK, dst,
948 chan->sec_level, auth_type);
949 else
950 hcon = hci_connect(hdev, ACL_LINK, dst,
951 chan->sec_level, auth_type);
953 if (IS_ERR(hcon)) {
954 err = PTR_ERR(hcon);
955 goto done;
958 conn = l2cap_conn_add(hcon, 0);
959 if (!conn) {
960 hci_conn_put(hcon);
961 err = -ENOMEM;
962 goto done;
965 /* Update source addr of the socket */
966 bacpy(src, conn->src);
968 l2cap_chan_add(conn, chan);
970 sk->sk_state = BT_CONNECT;
971 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
973 if (hcon->state == BT_CONNECTED) {
974 if (sk->sk_type != SOCK_SEQPACKET &&
975 sk->sk_type != SOCK_STREAM) {
976 l2cap_sock_clear_timer(sk);
977 if (l2cap_check_security(chan))
978 sk->sk_state = BT_CONNECTED;
979 } else
980 l2cap_do_start(chan);
983 err = 0;
985 done:
986 hci_dev_unlock_bh(hdev);
987 hci_dev_put(hdev);
988 return err;
991 int __l2cap_wait_ack(struct sock *sk)
993 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
994 DECLARE_WAITQUEUE(wait, current);
995 int err = 0;
996 int timeo = HZ/5;
998 add_wait_queue(sk_sleep(sk), &wait);
999 while ((chan->unacked_frames > 0 && chan->conn)) {
1000 set_current_state(TASK_INTERRUPTIBLE);
1002 if (!timeo)
1003 timeo = HZ/5;
1005 if (signal_pending(current)) {
1006 err = sock_intr_errno(timeo);
1007 break;
1010 release_sock(sk);
1011 timeo = schedule_timeout(timeo);
1012 lock_sock(sk);
1014 err = sock_error(sk);
1015 if (err)
1016 break;
1018 set_current_state(TASK_RUNNING);
1019 remove_wait_queue(sk_sleep(sk), &wait);
1020 return err;
1023 static void l2cap_monitor_timeout(unsigned long arg)
1025 struct l2cap_chan *chan = (void *) arg;
1026 struct sock *sk = chan->sk;
1028 BT_DBG("chan %p", chan);
1030 bh_lock_sock(sk);
1031 if (chan->retry_count >= chan->remote_max_tx) {
1032 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1033 bh_unlock_sock(sk);
1034 return;
1037 chan->retry_count++;
1038 __mod_monitor_timer();
1040 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1041 bh_unlock_sock(sk);
1044 static void l2cap_retrans_timeout(unsigned long arg)
1046 struct l2cap_chan *chan = (void *) arg;
1047 struct sock *sk = chan->sk;
1049 BT_DBG("chan %p", chan);
1051 bh_lock_sock(sk);
1052 chan->retry_count = 1;
1053 __mod_monitor_timer();
1055 chan->conn_state |= L2CAP_CONN_WAIT_F;
1057 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1058 bh_unlock_sock(sk);
1061 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1063 struct sk_buff *skb;
1065 while ((skb = skb_peek(&chan->tx_q)) &&
1066 chan->unacked_frames) {
1067 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1068 break;
1070 skb = skb_dequeue(&chan->tx_q);
1071 kfree_skb(skb);
1073 chan->unacked_frames--;
1076 if (!chan->unacked_frames)
1077 del_timer(&chan->retrans_timer);
1080 void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1082 struct hci_conn *hcon = chan->conn->hcon;
1083 u16 flags;
1085 BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
1087 if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
1088 flags = ACL_START_NO_FLUSH;
1089 else
1090 flags = ACL_START;
1092 hci_send_acl(hcon, skb, flags);
1095 void l2cap_streaming_send(struct l2cap_chan *chan)
1097 struct sk_buff *skb;
1098 u16 control, fcs;
1100 while ((skb = skb_dequeue(&chan->tx_q))) {
1101 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1102 control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1103 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1105 if (chan->fcs == L2CAP_FCS_CRC16) {
1106 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1107 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1110 l2cap_do_send(chan, skb);
1112 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1116 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1118 struct sk_buff *skb, *tx_skb;
1119 u16 control, fcs;
1121 skb = skb_peek(&chan->tx_q);
1122 if (!skb)
1123 return;
1125 do {
1126 if (bt_cb(skb)->tx_seq == tx_seq)
1127 break;
1129 if (skb_queue_is_last(&chan->tx_q, skb))
1130 return;
1132 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1134 if (chan->remote_max_tx &&
1135 bt_cb(skb)->retries == chan->remote_max_tx) {
1136 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1137 return;
1140 tx_skb = skb_clone(skb, GFP_ATOMIC);
1141 bt_cb(skb)->retries++;
1142 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1143 control &= L2CAP_CTRL_SAR;
1145 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1146 control |= L2CAP_CTRL_FINAL;
1147 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1150 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1151 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1153 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1155 if (chan->fcs == L2CAP_FCS_CRC16) {
1156 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1157 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1160 l2cap_do_send(chan, tx_skb);
1163 int l2cap_ertm_send(struct l2cap_chan *chan)
1165 struct sk_buff *skb, *tx_skb;
1166 struct sock *sk = chan->sk;
1167 u16 control, fcs;
1168 int nsent = 0;
1170 if (sk->sk_state != BT_CONNECTED)
1171 return -ENOTCONN;
1173 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1175 if (chan->remote_max_tx &&
1176 bt_cb(skb)->retries == chan->remote_max_tx) {
1177 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1178 break;
1181 tx_skb = skb_clone(skb, GFP_ATOMIC);
1183 bt_cb(skb)->retries++;
1185 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1186 control &= L2CAP_CTRL_SAR;
1188 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1189 control |= L2CAP_CTRL_FINAL;
1190 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1192 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1193 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1194 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1197 if (chan->fcs == L2CAP_FCS_CRC16) {
1198 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1199 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1202 l2cap_do_send(chan, tx_skb);
1204 __mod_retrans_timer();
1206 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1207 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1209 if (bt_cb(skb)->retries == 1)
1210 chan->unacked_frames++;
1212 chan->frames_sent++;
1214 if (skb_queue_is_last(&chan->tx_q, skb))
1215 chan->tx_send_head = NULL;
1216 else
1217 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1219 nsent++;
1222 return nsent;
1225 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1227 int ret;
1229 if (!skb_queue_empty(&chan->tx_q))
1230 chan->tx_send_head = chan->tx_q.next;
1232 chan->next_tx_seq = chan->expected_ack_seq;
1233 ret = l2cap_ertm_send(chan);
1234 return ret;
1237 static void l2cap_send_ack(struct l2cap_chan *chan)
1239 u16 control = 0;
1241 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1243 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1244 control |= L2CAP_SUPER_RCV_NOT_READY;
1245 chan->conn_state |= L2CAP_CONN_RNR_SENT;
1246 l2cap_send_sframe(chan, control);
1247 return;
1250 if (l2cap_ertm_send(chan) > 0)
1251 return;
1253 control |= L2CAP_SUPER_RCV_READY;
1254 l2cap_send_sframe(chan, control);
1257 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1259 struct srej_list *tail;
1260 u16 control;
1262 control = L2CAP_SUPER_SELECT_REJECT;
1263 control |= L2CAP_CTRL_FINAL;
1265 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1266 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1268 l2cap_send_sframe(chan, control);
1271 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1273 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1274 struct sk_buff **frag;
1275 int err, sent = 0;
1277 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1278 return -EFAULT;
1280 sent += count;
1281 len -= count;
1283 /* Continuation fragments (no L2CAP header) */
1284 frag = &skb_shinfo(skb)->frag_list;
1285 while (len) {
1286 count = min_t(unsigned int, conn->mtu, len);
1288 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1289 if (!*frag)
1290 return err;
1291 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1292 return -EFAULT;
1294 sent += count;
1295 len -= count;
1297 frag = &(*frag)->next;
1300 return sent;
1303 struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1305 struct sock *sk = chan->sk;
1306 struct l2cap_conn *conn = chan->conn;
1307 struct sk_buff *skb;
1308 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1309 struct l2cap_hdr *lh;
1311 BT_DBG("sk %p len %d", sk, (int)len);
1313 count = min_t(unsigned int, (conn->mtu - hlen), len);
1314 skb = bt_skb_send_alloc(sk, count + hlen,
1315 msg->msg_flags & MSG_DONTWAIT, &err);
1316 if (!skb)
1317 return ERR_PTR(err);
1319 /* Create L2CAP header */
1320 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1321 lh->cid = cpu_to_le16(chan->dcid);
1322 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1323 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1325 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1326 if (unlikely(err < 0)) {
1327 kfree_skb(skb);
1328 return ERR_PTR(err);
1330 return skb;
1333 struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1335 struct sock *sk = chan->sk;
1336 struct l2cap_conn *conn = chan->conn;
1337 struct sk_buff *skb;
1338 int err, count, hlen = L2CAP_HDR_SIZE;
1339 struct l2cap_hdr *lh;
1341 BT_DBG("sk %p len %d", sk, (int)len);
1343 count = min_t(unsigned int, (conn->mtu - hlen), len);
1344 skb = bt_skb_send_alloc(sk, count + hlen,
1345 msg->msg_flags & MSG_DONTWAIT, &err);
1346 if (!skb)
1347 return ERR_PTR(err);
1349 /* Create L2CAP header */
1350 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1351 lh->cid = cpu_to_le16(chan->dcid);
1352 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1354 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1355 if (unlikely(err < 0)) {
1356 kfree_skb(skb);
1357 return ERR_PTR(err);
1359 return skb;
1362 struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1364 struct sock *sk = chan->sk;
1365 struct l2cap_conn *conn = chan->conn;
1366 struct sk_buff *skb;
1367 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1368 struct l2cap_hdr *lh;
1370 BT_DBG("sk %p len %d", sk, (int)len);
1372 if (!conn)
1373 return ERR_PTR(-ENOTCONN);
1375 if (sdulen)
1376 hlen += 2;
1378 if (chan->fcs == L2CAP_FCS_CRC16)
1379 hlen += 2;
1381 count = min_t(unsigned int, (conn->mtu - hlen), len);
1382 skb = bt_skb_send_alloc(sk, count + hlen,
1383 msg->msg_flags & MSG_DONTWAIT, &err);
1384 if (!skb)
1385 return ERR_PTR(err);
1387 /* Create L2CAP header */
1388 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1389 lh->cid = cpu_to_le16(chan->dcid);
1390 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1391 put_unaligned_le16(control, skb_put(skb, 2));
1392 if (sdulen)
1393 put_unaligned_le16(sdulen, skb_put(skb, 2));
1395 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1396 if (unlikely(err < 0)) {
1397 kfree_skb(skb);
1398 return ERR_PTR(err);
1401 if (chan->fcs == L2CAP_FCS_CRC16)
1402 put_unaligned_le16(0, skb_put(skb, 2));
1404 bt_cb(skb)->retries = 0;
1405 return skb;
1408 int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1410 struct sk_buff *skb;
1411 struct sk_buff_head sar_queue;
1412 u16 control;
1413 size_t size = 0;
1415 skb_queue_head_init(&sar_queue);
1416 control = L2CAP_SDU_START;
1417 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1418 if (IS_ERR(skb))
1419 return PTR_ERR(skb);
1421 __skb_queue_tail(&sar_queue, skb);
1422 len -= chan->remote_mps;
1423 size += chan->remote_mps;
1425 while (len > 0) {
1426 size_t buflen;
1428 if (len > chan->remote_mps) {
1429 control = L2CAP_SDU_CONTINUE;
1430 buflen = chan->remote_mps;
1431 } else {
1432 control = L2CAP_SDU_END;
1433 buflen = len;
1436 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1437 if (IS_ERR(skb)) {
1438 skb_queue_purge(&sar_queue);
1439 return PTR_ERR(skb);
1442 __skb_queue_tail(&sar_queue, skb);
1443 len -= buflen;
1444 size += buflen;
1446 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1447 if (chan->tx_send_head == NULL)
1448 chan->tx_send_head = sar_queue.next;
1450 return size;
1453 static void l2cap_chan_ready(struct sock *sk)
1455 struct sock *parent = bt_sk(sk)->parent;
1456 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1458 BT_DBG("sk %p, parent %p", sk, parent);
1460 chan->conf_state = 0;
1461 l2cap_sock_clear_timer(sk);
1463 if (!parent) {
1464 /* Outgoing channel.
1465 * Wake up socket sleeping on connect.
1467 sk->sk_state = BT_CONNECTED;
1468 sk->sk_state_change(sk);
1469 } else {
1470 /* Incoming channel.
1471 * Wake up socket sleeping on accept.
1473 parent->sk_data_ready(parent, 0);
1477 /* Copy frame to all raw sockets on that connection */
1478 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1480 struct sk_buff *nskb;
1481 struct l2cap_chan *chan;
1483 BT_DBG("conn %p", conn);
1485 read_lock(&conn->chan_lock);
1486 list_for_each_entry(chan, &conn->chan_l, list) {
1487 struct sock *sk = chan->sk;
1488 if (sk->sk_type != SOCK_RAW)
1489 continue;
1491 /* Don't send frame to the socket it came from */
1492 if (skb->sk == sk)
1493 continue;
1494 nskb = skb_clone(skb, GFP_ATOMIC);
1495 if (!nskb)
1496 continue;
1498 if (sock_queue_rcv_skb(sk, nskb))
1499 kfree_skb(nskb);
1501 read_unlock(&conn->chan_lock);
1504 /* ---- L2CAP signalling commands ---- */
1505 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1506 u8 code, u8 ident, u16 dlen, void *data)
1508 struct sk_buff *skb, **frag;
1509 struct l2cap_cmd_hdr *cmd;
1510 struct l2cap_hdr *lh;
1511 int len, count;
1513 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1514 conn, code, ident, dlen);
1516 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1517 count = min_t(unsigned int, conn->mtu, len);
1519 skb = bt_skb_alloc(count, GFP_ATOMIC);
1520 if (!skb)
1521 return NULL;
1523 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1524 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1526 if (conn->hcon->type == LE_LINK)
1527 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1528 else
1529 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1531 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1532 cmd->code = code;
1533 cmd->ident = ident;
1534 cmd->len = cpu_to_le16(dlen);
1536 if (dlen) {
1537 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1538 memcpy(skb_put(skb, count), data, count);
1539 data += count;
1542 len -= skb->len;
1544 /* Continuation fragments (no L2CAP header) */
1545 frag = &skb_shinfo(skb)->frag_list;
1546 while (len) {
1547 count = min_t(unsigned int, conn->mtu, len);
1549 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1550 if (!*frag)
1551 goto fail;
1553 memcpy(skb_put(*frag, count), data, count);
1555 len -= count;
1556 data += count;
1558 frag = &(*frag)->next;
1561 return skb;
1563 fail:
1564 kfree_skb(skb);
1565 return NULL;
1568 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1570 struct l2cap_conf_opt *opt = *ptr;
1571 int len;
1573 len = L2CAP_CONF_OPT_SIZE + opt->len;
1574 *ptr += len;
1576 *type = opt->type;
1577 *olen = opt->len;
1579 switch (opt->len) {
1580 case 1:
1581 *val = *((u8 *) opt->val);
1582 break;
1584 case 2:
1585 *val = get_unaligned_le16(opt->val);
1586 break;
1588 case 4:
1589 *val = get_unaligned_le32(opt->val);
1590 break;
1592 default:
1593 *val = (unsigned long) opt->val;
1594 break;
1597 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1598 return len;
1601 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1603 struct l2cap_conf_opt *opt = *ptr;
1605 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1607 opt->type = type;
1608 opt->len = len;
1610 switch (len) {
1611 case 1:
1612 *((u8 *) opt->val) = val;
1613 break;
1615 case 2:
1616 put_unaligned_le16(val, opt->val);
1617 break;
1619 case 4:
1620 put_unaligned_le32(val, opt->val);
1621 break;
1623 default:
1624 memcpy(opt->val, (void *) val, len);
1625 break;
1628 *ptr += L2CAP_CONF_OPT_SIZE + len;
1631 static void l2cap_ack_timeout(unsigned long arg)
1633 struct l2cap_chan *chan = (void *) arg;
1635 bh_lock_sock(chan->sk);
1636 l2cap_send_ack(chan);
1637 bh_unlock_sock(chan->sk);
1640 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1642 struct sock *sk = chan->sk;
1644 chan->expected_ack_seq = 0;
1645 chan->unacked_frames = 0;
1646 chan->buffer_seq = 0;
1647 chan->num_acked = 0;
1648 chan->frames_sent = 0;
1650 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1651 (unsigned long) chan);
1652 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1653 (unsigned long) chan);
1654 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1656 skb_queue_head_init(&chan->srej_q);
1657 skb_queue_head_init(&chan->busy_q);
1659 INIT_LIST_HEAD(&chan->srej_l);
1661 INIT_WORK(&chan->busy_work, l2cap_busy_work);
1663 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1666 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1668 switch (mode) {
1669 case L2CAP_MODE_STREAMING:
1670 case L2CAP_MODE_ERTM:
1671 if (l2cap_mode_supported(mode, remote_feat_mask))
1672 return mode;
1673 /* fall through */
1674 default:
1675 return L2CAP_MODE_BASIC;
1679 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1681 struct l2cap_conf_req *req = data;
1682 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1683 void *ptr = req->data;
1685 BT_DBG("chan %p", chan);
1687 if (chan->num_conf_req || chan->num_conf_rsp)
1688 goto done;
1690 switch (chan->mode) {
1691 case L2CAP_MODE_STREAMING:
1692 case L2CAP_MODE_ERTM:
1693 if (chan->conf_state & L2CAP_CONF_STATE2_DEVICE)
1694 break;
1696 /* fall through */
1697 default:
1698 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
1699 break;
1702 done:
1703 if (chan->imtu != L2CAP_DEFAULT_MTU)
1704 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1706 switch (chan->mode) {
1707 case L2CAP_MODE_BASIC:
1708 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1709 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
1710 break;
1712 rfc.mode = L2CAP_MODE_BASIC;
1713 rfc.txwin_size = 0;
1714 rfc.max_transmit = 0;
1715 rfc.retrans_timeout = 0;
1716 rfc.monitor_timeout = 0;
1717 rfc.max_pdu_size = 0;
1719 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1720 (unsigned long) &rfc);
1721 break;
1723 case L2CAP_MODE_ERTM:
1724 rfc.mode = L2CAP_MODE_ERTM;
1725 rfc.txwin_size = chan->tx_win;
1726 rfc.max_transmit = chan->max_tx;
1727 rfc.retrans_timeout = 0;
1728 rfc.monitor_timeout = 0;
1729 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1730 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1731 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1733 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1734 (unsigned long) &rfc);
1736 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1737 break;
1739 if (chan->fcs == L2CAP_FCS_NONE ||
1740 chan->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1741 chan->fcs = L2CAP_FCS_NONE;
1742 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1744 break;
1746 case L2CAP_MODE_STREAMING:
1747 rfc.mode = L2CAP_MODE_STREAMING;
1748 rfc.txwin_size = 0;
1749 rfc.max_transmit = 0;
1750 rfc.retrans_timeout = 0;
1751 rfc.monitor_timeout = 0;
1752 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1753 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1754 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1756 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1757 (unsigned long) &rfc);
1759 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1760 break;
1762 if (chan->fcs == L2CAP_FCS_NONE ||
1763 chan->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1764 chan->fcs = L2CAP_FCS_NONE;
1765 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1767 break;
1770 req->dcid = cpu_to_le16(chan->dcid);
1771 req->flags = cpu_to_le16(0);
1773 return ptr - data;
1776 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1778 struct l2cap_conf_rsp *rsp = data;
1779 void *ptr = rsp->data;
1780 void *req = chan->conf_req;
1781 int len = chan->conf_len;
1782 int type, hint, olen;
1783 unsigned long val;
1784 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1785 u16 mtu = L2CAP_DEFAULT_MTU;
1786 u16 result = L2CAP_CONF_SUCCESS;
1788 BT_DBG("chan %p", chan);
1790 while (len >= L2CAP_CONF_OPT_SIZE) {
1791 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1793 hint = type & L2CAP_CONF_HINT;
1794 type &= L2CAP_CONF_MASK;
1796 switch (type) {
1797 case L2CAP_CONF_MTU:
1798 mtu = val;
1799 break;
1801 case L2CAP_CONF_FLUSH_TO:
1802 chan->flush_to = val;
1803 break;
1805 case L2CAP_CONF_QOS:
1806 break;
1808 case L2CAP_CONF_RFC:
1809 if (olen == sizeof(rfc))
1810 memcpy(&rfc, (void *) val, olen);
1811 break;
1813 case L2CAP_CONF_FCS:
1814 if (val == L2CAP_FCS_NONE)
1815 chan->conf_state |= L2CAP_CONF_NO_FCS_RECV;
1817 break;
1819 default:
1820 if (hint)
1821 break;
1823 result = L2CAP_CONF_UNKNOWN;
1824 *((u8 *) ptr++) = type;
1825 break;
1829 if (chan->num_conf_rsp || chan->num_conf_req > 1)
1830 goto done;
1832 switch (chan->mode) {
1833 case L2CAP_MODE_STREAMING:
1834 case L2CAP_MODE_ERTM:
1835 if (!(chan->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
1836 chan->mode = l2cap_select_mode(rfc.mode,
1837 chan->conn->feat_mask);
1838 break;
1841 if (chan->mode != rfc.mode)
1842 return -ECONNREFUSED;
1844 break;
1847 done:
1848 if (chan->mode != rfc.mode) {
1849 result = L2CAP_CONF_UNACCEPT;
1850 rfc.mode = chan->mode;
1852 if (chan->num_conf_rsp == 1)
1853 return -ECONNREFUSED;
1855 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1856 sizeof(rfc), (unsigned long) &rfc);
1860 if (result == L2CAP_CONF_SUCCESS) {
1861 /* Configure output options and let the other side know
1862 * which ones we don't like. */
1864 if (mtu < L2CAP_DEFAULT_MIN_MTU)
1865 result = L2CAP_CONF_UNACCEPT;
1866 else {
1867 chan->omtu = mtu;
1868 chan->conf_state |= L2CAP_CONF_MTU_DONE;
1870 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
1872 switch (rfc.mode) {
1873 case L2CAP_MODE_BASIC:
1874 chan->fcs = L2CAP_FCS_NONE;
1875 chan->conf_state |= L2CAP_CONF_MODE_DONE;
1876 break;
1878 case L2CAP_MODE_ERTM:
1879 chan->remote_tx_win = rfc.txwin_size;
1880 chan->remote_max_tx = rfc.max_transmit;
1882 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
1883 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1885 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1887 rfc.retrans_timeout =
1888 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
1889 rfc.monitor_timeout =
1890 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
1892 chan->conf_state |= L2CAP_CONF_MODE_DONE;
1894 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1895 sizeof(rfc), (unsigned long) &rfc);
1897 break;
1899 case L2CAP_MODE_STREAMING:
1900 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
1901 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1903 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1905 chan->conf_state |= L2CAP_CONF_MODE_DONE;
1907 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1908 sizeof(rfc), (unsigned long) &rfc);
1910 break;
1912 default:
1913 result = L2CAP_CONF_UNACCEPT;
1915 memset(&rfc, 0, sizeof(rfc));
1916 rfc.mode = chan->mode;
1919 if (result == L2CAP_CONF_SUCCESS)
1920 chan->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1922 rsp->scid = cpu_to_le16(chan->dcid);
1923 rsp->result = cpu_to_le16(result);
1924 rsp->flags = cpu_to_le16(0x0000);
1926 return ptr - data;
1929 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
1931 struct l2cap_conf_req *req = data;
1932 void *ptr = req->data;
1933 int type, olen;
1934 unsigned long val;
1935 struct l2cap_conf_rfc rfc;
1937 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
1939 while (len >= L2CAP_CONF_OPT_SIZE) {
1940 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1942 switch (type) {
1943 case L2CAP_CONF_MTU:
1944 if (val < L2CAP_DEFAULT_MIN_MTU) {
1945 *result = L2CAP_CONF_UNACCEPT;
1946 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
1947 } else
1948 chan->imtu = val;
1949 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1950 break;
1952 case L2CAP_CONF_FLUSH_TO:
1953 chan->flush_to = val;
1954 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
1955 2, chan->flush_to);
1956 break;
1958 case L2CAP_CONF_RFC:
1959 if (olen == sizeof(rfc))
1960 memcpy(&rfc, (void *)val, olen);
1962 if ((chan->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
1963 rfc.mode != chan->mode)
1964 return -ECONNREFUSED;
1966 chan->fcs = 0;
1968 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1969 sizeof(rfc), (unsigned long) &rfc);
1970 break;
1974 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
1975 return -ECONNREFUSED;
1977 chan->mode = rfc.mode;
1979 if (*result == L2CAP_CONF_SUCCESS) {
1980 switch (rfc.mode) {
1981 case L2CAP_MODE_ERTM:
1982 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1983 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1984 chan->mps = le16_to_cpu(rfc.max_pdu_size);
1985 break;
1986 case L2CAP_MODE_STREAMING:
1987 chan->mps = le16_to_cpu(rfc.max_pdu_size);
1991 req->dcid = cpu_to_le16(chan->dcid);
1992 req->flags = cpu_to_le16(0x0000);
1994 return ptr - data;
1997 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
1999 struct l2cap_conf_rsp *rsp = data;
2000 void *ptr = rsp->data;
2002 BT_DBG("chan %p", chan);
2004 rsp->scid = cpu_to_le16(chan->dcid);
2005 rsp->result = cpu_to_le16(result);
2006 rsp->flags = cpu_to_le16(flags);
2008 return ptr - data;
2011 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2013 struct l2cap_conn_rsp rsp;
2014 struct l2cap_conn *conn = chan->conn;
2015 u8 buf[128];
2017 rsp.scid = cpu_to_le16(chan->dcid);
2018 rsp.dcid = cpu_to_le16(chan->scid);
2019 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2020 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2021 l2cap_send_cmd(conn, chan->ident,
2022 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2024 if (chan->conf_state & L2CAP_CONF_REQ_SENT)
2025 return;
2027 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2028 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2029 l2cap_build_conf_req(chan, buf), buf);
2030 chan->num_conf_req++;
2033 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2035 int type, olen;
2036 unsigned long val;
2037 struct l2cap_conf_rfc rfc;
2039 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2041 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2042 return;
2044 while (len >= L2CAP_CONF_OPT_SIZE) {
2045 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2047 switch (type) {
2048 case L2CAP_CONF_RFC:
2049 if (olen == sizeof(rfc))
2050 memcpy(&rfc, (void *)val, olen);
2051 goto done;
2055 done:
2056 switch (rfc.mode) {
2057 case L2CAP_MODE_ERTM:
2058 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2059 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2060 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2061 break;
2062 case L2CAP_MODE_STREAMING:
2063 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2067 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2069 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2071 if (rej->reason != 0x0000)
2072 return 0;
2074 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2075 cmd->ident == conn->info_ident) {
2076 del_timer(&conn->info_timer);
2078 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2079 conn->info_ident = 0;
2081 l2cap_conn_start(conn);
2084 return 0;
2087 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2089 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2090 struct l2cap_conn_rsp rsp;
2091 struct l2cap_chan *chan = NULL, *pchan;
2092 struct sock *parent, *sk = NULL;
2093 int result, status = L2CAP_CS_NO_INFO;
2095 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2096 __le16 psm = req->psm;
2098 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2100 /* Check if we have socket listening on psm */
2101 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2102 if (!pchan) {
2103 result = L2CAP_CR_BAD_PSM;
2104 goto sendresp;
2107 parent = pchan->sk;
2109 bh_lock_sock(parent);
2111 /* Check if the ACL is secure enough (if not SDP) */
2112 if (psm != cpu_to_le16(0x0001) &&
2113 !hci_conn_check_link_mode(conn->hcon)) {
2114 conn->disc_reason = 0x05;
2115 result = L2CAP_CR_SEC_BLOCK;
2116 goto response;
2119 result = L2CAP_CR_NO_MEM;
2121 /* Check for backlog size */
2122 if (sk_acceptq_is_full(parent)) {
2123 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2124 goto response;
2127 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2128 if (!sk)
2129 goto response;
2131 chan = l2cap_chan_create(sk);
2132 if (!chan) {
2133 l2cap_sock_kill(sk);
2134 goto response;
2137 l2cap_pi(sk)->chan = chan;
2139 write_lock_bh(&conn->chan_lock);
2141 /* Check if we already have channel with that dcid */
2142 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2143 write_unlock_bh(&conn->chan_lock);
2144 sock_set_flag(sk, SOCK_ZAPPED);
2145 l2cap_sock_kill(sk);
2146 goto response;
2149 hci_conn_hold(conn->hcon);
2151 l2cap_sock_init(sk, parent);
2152 bacpy(&bt_sk(sk)->src, conn->src);
2153 bacpy(&bt_sk(sk)->dst, conn->dst);
2154 chan->psm = psm;
2155 chan->dcid = scid;
2157 bt_accept_enqueue(parent, sk);
2159 __l2cap_chan_add(conn, chan);
2161 dcid = chan->scid;
2163 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2165 chan->ident = cmd->ident;
2167 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2168 if (l2cap_check_security(chan)) {
2169 if (bt_sk(sk)->defer_setup) {
2170 sk->sk_state = BT_CONNECT2;
2171 result = L2CAP_CR_PEND;
2172 status = L2CAP_CS_AUTHOR_PEND;
2173 parent->sk_data_ready(parent, 0);
2174 } else {
2175 sk->sk_state = BT_CONFIG;
2176 result = L2CAP_CR_SUCCESS;
2177 status = L2CAP_CS_NO_INFO;
2179 } else {
2180 sk->sk_state = BT_CONNECT2;
2181 result = L2CAP_CR_PEND;
2182 status = L2CAP_CS_AUTHEN_PEND;
2184 } else {
2185 sk->sk_state = BT_CONNECT2;
2186 result = L2CAP_CR_PEND;
2187 status = L2CAP_CS_NO_INFO;
2190 write_unlock_bh(&conn->chan_lock);
2192 response:
2193 bh_unlock_sock(parent);
2195 sendresp:
2196 rsp.scid = cpu_to_le16(scid);
2197 rsp.dcid = cpu_to_le16(dcid);
2198 rsp.result = cpu_to_le16(result);
2199 rsp.status = cpu_to_le16(status);
2200 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2202 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2203 struct l2cap_info_req info;
2204 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2206 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2207 conn->info_ident = l2cap_get_ident(conn);
2209 mod_timer(&conn->info_timer, jiffies +
2210 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2212 l2cap_send_cmd(conn, conn->info_ident,
2213 L2CAP_INFO_REQ, sizeof(info), &info);
2216 if (chan && !(chan->conf_state & L2CAP_CONF_REQ_SENT) &&
2217 result == L2CAP_CR_SUCCESS) {
2218 u8 buf[128];
2219 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2220 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2221 l2cap_build_conf_req(chan, buf), buf);
2222 chan->num_conf_req++;
2225 return 0;
2228 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2230 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2231 u16 scid, dcid, result, status;
2232 struct l2cap_chan *chan;
2233 struct sock *sk;
2234 u8 req[128];
2236 scid = __le16_to_cpu(rsp->scid);
2237 dcid = __le16_to_cpu(rsp->dcid);
2238 result = __le16_to_cpu(rsp->result);
2239 status = __le16_to_cpu(rsp->status);
2241 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2243 if (scid) {
2244 chan = l2cap_get_chan_by_scid(conn, scid);
2245 if (!chan)
2246 return -EFAULT;
2247 } else {
2248 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2249 if (!chan)
2250 return -EFAULT;
2253 sk = chan->sk;
2255 switch (result) {
2256 case L2CAP_CR_SUCCESS:
2257 sk->sk_state = BT_CONFIG;
2258 chan->ident = 0;
2259 chan->dcid = dcid;
2260 chan->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2262 if (chan->conf_state & L2CAP_CONF_REQ_SENT)
2263 break;
2265 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2267 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2268 l2cap_build_conf_req(chan, req), req);
2269 chan->num_conf_req++;
2270 break;
2272 case L2CAP_CR_PEND:
2273 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
2274 break;
2276 default:
2277 /* don't delete l2cap channel if sk is owned by user */
2278 if (sock_owned_by_user(sk)) {
2279 sk->sk_state = BT_DISCONN;
2280 l2cap_sock_clear_timer(sk);
2281 l2cap_sock_set_timer(sk, HZ / 5);
2282 break;
2285 l2cap_chan_del(chan, ECONNREFUSED);
2286 break;
2289 bh_unlock_sock(sk);
2290 return 0;
2293 static inline void set_default_fcs(struct l2cap_chan *chan)
2295 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
2297 /* FCS is enabled only in ERTM or streaming mode, if one or both
2298 * sides request it.
2300 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2301 chan->fcs = L2CAP_FCS_NONE;
2302 else if (!(pi->chan->conf_state & L2CAP_CONF_NO_FCS_RECV))
2303 chan->fcs = L2CAP_FCS_CRC16;
2306 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2308 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2309 u16 dcid, flags;
2310 u8 rsp[64];
2311 struct l2cap_chan *chan;
2312 struct sock *sk;
2313 int len;
2315 dcid = __le16_to_cpu(req->dcid);
2316 flags = __le16_to_cpu(req->flags);
2318 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2320 chan = l2cap_get_chan_by_scid(conn, dcid);
2321 if (!chan)
2322 return -ENOENT;
2324 sk = chan->sk;
2326 if ((bt_sk(sk)->defer_setup && sk->sk_state != BT_CONNECT2) ||
2327 (!bt_sk(sk)->defer_setup && sk->sk_state != BT_CONFIG)) {
2328 struct l2cap_cmd_rej rej;
2330 rej.reason = cpu_to_le16(0x0002);
2331 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2332 sizeof(rej), &rej);
2333 goto unlock;
2336 /* Reject if config buffer is too small. */
2337 len = cmd_len - sizeof(*req);
2338 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2339 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2340 l2cap_build_conf_rsp(chan, rsp,
2341 L2CAP_CONF_REJECT, flags), rsp);
2342 goto unlock;
2345 /* Store config. */
2346 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2347 chan->conf_len += len;
2349 if (flags & 0x0001) {
2350 /* Incomplete config. Send empty response. */
2351 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2352 l2cap_build_conf_rsp(chan, rsp,
2353 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2354 goto unlock;
2357 /* Complete config. */
2358 len = l2cap_parse_conf_req(chan, rsp);
2359 if (len < 0) {
2360 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2361 goto unlock;
2364 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2365 chan->num_conf_rsp++;
2367 /* Reset config buffer. */
2368 chan->conf_len = 0;
2370 if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE))
2371 goto unlock;
2373 if (chan->conf_state & L2CAP_CONF_INPUT_DONE) {
2374 set_default_fcs(chan);
2376 sk->sk_state = BT_CONNECTED;
2378 chan->next_tx_seq = 0;
2379 chan->expected_tx_seq = 0;
2380 skb_queue_head_init(&chan->tx_q);
2381 if (chan->mode == L2CAP_MODE_ERTM)
2382 l2cap_ertm_init(chan);
2384 l2cap_chan_ready(sk);
2385 goto unlock;
2388 if (!(chan->conf_state & L2CAP_CONF_REQ_SENT)) {
2389 u8 buf[64];
2390 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2391 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2392 l2cap_build_conf_req(chan, buf), buf);
2393 chan->num_conf_req++;
2396 unlock:
2397 bh_unlock_sock(sk);
2398 return 0;
2401 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2403 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2404 u16 scid, flags, result;
2405 struct l2cap_chan *chan;
2406 struct sock *sk;
2407 int len = cmd->len - sizeof(*rsp);
2409 scid = __le16_to_cpu(rsp->scid);
2410 flags = __le16_to_cpu(rsp->flags);
2411 result = __le16_to_cpu(rsp->result);
2413 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2414 scid, flags, result);
2416 chan = l2cap_get_chan_by_scid(conn, scid);
2417 if (!chan)
2418 return 0;
2420 sk = chan->sk;
2422 switch (result) {
2423 case L2CAP_CONF_SUCCESS:
2424 l2cap_conf_rfc_get(chan, rsp->data, len);
2425 break;
2427 case L2CAP_CONF_UNACCEPT:
2428 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2429 char req[64];
2431 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2432 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2433 goto done;
2436 /* throw out any old stored conf requests */
2437 result = L2CAP_CONF_SUCCESS;
2438 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2439 req, &result);
2440 if (len < 0) {
2441 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2442 goto done;
2445 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2446 L2CAP_CONF_REQ, len, req);
2447 chan->num_conf_req++;
2448 if (result != L2CAP_CONF_SUCCESS)
2449 goto done;
2450 break;
2453 default:
2454 sk->sk_err = ECONNRESET;
2455 l2cap_sock_set_timer(sk, HZ * 5);
2456 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2457 goto done;
2460 if (flags & 0x01)
2461 goto done;
2463 chan->conf_state |= L2CAP_CONF_INPUT_DONE;
2465 if (chan->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2466 set_default_fcs(chan);
2468 sk->sk_state = BT_CONNECTED;
2469 chan->next_tx_seq = 0;
2470 chan->expected_tx_seq = 0;
2471 skb_queue_head_init(&chan->tx_q);
2472 if (chan->mode == L2CAP_MODE_ERTM)
2473 l2cap_ertm_init(chan);
2475 l2cap_chan_ready(sk);
2478 done:
2479 bh_unlock_sock(sk);
2480 return 0;
2483 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2485 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2486 struct l2cap_disconn_rsp rsp;
2487 u16 dcid, scid;
2488 struct l2cap_chan *chan;
2489 struct sock *sk;
2491 scid = __le16_to_cpu(req->scid);
2492 dcid = __le16_to_cpu(req->dcid);
2494 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2496 chan = l2cap_get_chan_by_scid(conn, dcid);
2497 if (!chan)
2498 return 0;
2500 sk = chan->sk;
2502 rsp.dcid = cpu_to_le16(chan->scid);
2503 rsp.scid = cpu_to_le16(chan->dcid);
2504 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2506 sk->sk_shutdown = SHUTDOWN_MASK;
2508 /* don't delete l2cap channel if sk is owned by user */
2509 if (sock_owned_by_user(sk)) {
2510 sk->sk_state = BT_DISCONN;
2511 l2cap_sock_clear_timer(sk);
2512 l2cap_sock_set_timer(sk, HZ / 5);
2513 bh_unlock_sock(sk);
2514 return 0;
2517 l2cap_chan_del(chan, ECONNRESET);
2518 bh_unlock_sock(sk);
2520 l2cap_sock_kill(sk);
2521 return 0;
2524 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2526 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2527 u16 dcid, scid;
2528 struct l2cap_chan *chan;
2529 struct sock *sk;
2531 scid = __le16_to_cpu(rsp->scid);
2532 dcid = __le16_to_cpu(rsp->dcid);
2534 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2536 chan = l2cap_get_chan_by_scid(conn, scid);
2537 if (!chan)
2538 return 0;
2540 sk = chan->sk;
2542 /* don't delete l2cap channel if sk is owned by user */
2543 if (sock_owned_by_user(sk)) {
2544 sk->sk_state = BT_DISCONN;
2545 l2cap_sock_clear_timer(sk);
2546 l2cap_sock_set_timer(sk, HZ / 5);
2547 bh_unlock_sock(sk);
2548 return 0;
2551 l2cap_chan_del(chan, 0);
2552 bh_unlock_sock(sk);
2554 l2cap_sock_kill(sk);
2555 return 0;
2558 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2560 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2561 u16 type;
2563 type = __le16_to_cpu(req->type);
2565 BT_DBG("type 0x%4.4x", type);
2567 if (type == L2CAP_IT_FEAT_MASK) {
2568 u8 buf[8];
2569 u32 feat_mask = l2cap_feat_mask;
2570 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2571 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2572 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2573 if (!disable_ertm)
2574 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2575 | L2CAP_FEAT_FCS;
2576 put_unaligned_le32(feat_mask, rsp->data);
2577 l2cap_send_cmd(conn, cmd->ident,
2578 L2CAP_INFO_RSP, sizeof(buf), buf);
2579 } else if (type == L2CAP_IT_FIXED_CHAN) {
2580 u8 buf[12];
2581 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2582 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2583 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2584 memcpy(buf + 4, l2cap_fixed_chan, 8);
2585 l2cap_send_cmd(conn, cmd->ident,
2586 L2CAP_INFO_RSP, sizeof(buf), buf);
2587 } else {
2588 struct l2cap_info_rsp rsp;
2589 rsp.type = cpu_to_le16(type);
2590 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2591 l2cap_send_cmd(conn, cmd->ident,
2592 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2595 return 0;
2598 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2600 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2601 u16 type, result;
2603 type = __le16_to_cpu(rsp->type);
2604 result = __le16_to_cpu(rsp->result);
2606 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2608 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2609 if (cmd->ident != conn->info_ident ||
2610 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2611 return 0;
2613 del_timer(&conn->info_timer);
2615 if (result != L2CAP_IR_SUCCESS) {
2616 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2617 conn->info_ident = 0;
2619 l2cap_conn_start(conn);
2621 return 0;
2624 if (type == L2CAP_IT_FEAT_MASK) {
2625 conn->feat_mask = get_unaligned_le32(rsp->data);
2627 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2628 struct l2cap_info_req req;
2629 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2631 conn->info_ident = l2cap_get_ident(conn);
2633 l2cap_send_cmd(conn, conn->info_ident,
2634 L2CAP_INFO_REQ, sizeof(req), &req);
2635 } else {
2636 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2637 conn->info_ident = 0;
2639 l2cap_conn_start(conn);
2641 } else if (type == L2CAP_IT_FIXED_CHAN) {
2642 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2643 conn->info_ident = 0;
2645 l2cap_conn_start(conn);
2648 return 0;
2651 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2652 u16 to_multiplier)
2654 u16 max_latency;
2656 if (min > max || min < 6 || max > 3200)
2657 return -EINVAL;
2659 if (to_multiplier < 10 || to_multiplier > 3200)
2660 return -EINVAL;
2662 if (max >= to_multiplier * 8)
2663 return -EINVAL;
2665 max_latency = (to_multiplier * 8 / max) - 1;
2666 if (latency > 499 || latency > max_latency)
2667 return -EINVAL;
2669 return 0;
2672 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2673 struct l2cap_cmd_hdr *cmd, u8 *data)
2675 struct hci_conn *hcon = conn->hcon;
2676 struct l2cap_conn_param_update_req *req;
2677 struct l2cap_conn_param_update_rsp rsp;
2678 u16 min, max, latency, to_multiplier, cmd_len;
2679 int err;
2681 if (!(hcon->link_mode & HCI_LM_MASTER))
2682 return -EINVAL;
2684 cmd_len = __le16_to_cpu(cmd->len);
2685 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2686 return -EPROTO;
2688 req = (struct l2cap_conn_param_update_req *) data;
2689 min = __le16_to_cpu(req->min);
2690 max = __le16_to_cpu(req->max);
2691 latency = __le16_to_cpu(req->latency);
2692 to_multiplier = __le16_to_cpu(req->to_multiplier);
2694 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2695 min, max, latency, to_multiplier);
2697 memset(&rsp, 0, sizeof(rsp));
2699 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2700 if (err)
2701 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2702 else
2703 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2705 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2706 sizeof(rsp), &rsp);
2708 if (!err)
2709 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2711 return 0;
2714 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2715 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2717 int err = 0;
2719 switch (cmd->code) {
2720 case L2CAP_COMMAND_REJ:
2721 l2cap_command_rej(conn, cmd, data);
2722 break;
2724 case L2CAP_CONN_REQ:
2725 err = l2cap_connect_req(conn, cmd, data);
2726 break;
2728 case L2CAP_CONN_RSP:
2729 err = l2cap_connect_rsp(conn, cmd, data);
2730 break;
2732 case L2CAP_CONF_REQ:
2733 err = l2cap_config_req(conn, cmd, cmd_len, data);
2734 break;
2736 case L2CAP_CONF_RSP:
2737 err = l2cap_config_rsp(conn, cmd, data);
2738 break;
2740 case L2CAP_DISCONN_REQ:
2741 err = l2cap_disconnect_req(conn, cmd, data);
2742 break;
2744 case L2CAP_DISCONN_RSP:
2745 err = l2cap_disconnect_rsp(conn, cmd, data);
2746 break;
2748 case L2CAP_ECHO_REQ:
2749 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2750 break;
2752 case L2CAP_ECHO_RSP:
2753 break;
2755 case L2CAP_INFO_REQ:
2756 err = l2cap_information_req(conn, cmd, data);
2757 break;
2759 case L2CAP_INFO_RSP:
2760 err = l2cap_information_rsp(conn, cmd, data);
2761 break;
2763 default:
2764 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2765 err = -EINVAL;
2766 break;
2769 return err;
2772 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2773 struct l2cap_cmd_hdr *cmd, u8 *data)
2775 switch (cmd->code) {
2776 case L2CAP_COMMAND_REJ:
2777 return 0;
2779 case L2CAP_CONN_PARAM_UPDATE_REQ:
2780 return l2cap_conn_param_update_req(conn, cmd, data);
2782 case L2CAP_CONN_PARAM_UPDATE_RSP:
2783 return 0;
2785 default:
2786 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2787 return -EINVAL;
2791 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2792 struct sk_buff *skb)
2794 u8 *data = skb->data;
2795 int len = skb->len;
2796 struct l2cap_cmd_hdr cmd;
2797 int err;
2799 l2cap_raw_recv(conn, skb);
2801 while (len >= L2CAP_CMD_HDR_SIZE) {
2802 u16 cmd_len;
2803 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2804 data += L2CAP_CMD_HDR_SIZE;
2805 len -= L2CAP_CMD_HDR_SIZE;
2807 cmd_len = le16_to_cpu(cmd.len);
2809 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2811 if (cmd_len > len || !cmd.ident) {
2812 BT_DBG("corrupted command");
2813 break;
2816 if (conn->hcon->type == LE_LINK)
2817 err = l2cap_le_sig_cmd(conn, &cmd, data);
2818 else
2819 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
2821 if (err) {
2822 struct l2cap_cmd_rej rej;
2824 BT_ERR("Wrong link type (%d)", err);
2826 /* FIXME: Map err to a valid reason */
2827 rej.reason = cpu_to_le16(0);
2828 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2831 data += cmd_len;
2832 len -= cmd_len;
2835 kfree_skb(skb);
2838 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
2840 u16 our_fcs, rcv_fcs;
2841 int hdr_size = L2CAP_HDR_SIZE + 2;
2843 if (chan->fcs == L2CAP_FCS_CRC16) {
2844 skb_trim(skb, skb->len - 2);
2845 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
2846 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
2848 if (our_fcs != rcv_fcs)
2849 return -EBADMSG;
2851 return 0;
2854 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
2856 u16 control = 0;
2858 chan->frames_sent = 0;
2860 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2862 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2863 control |= L2CAP_SUPER_RCV_NOT_READY;
2864 l2cap_send_sframe(chan, control);
2865 chan->conn_state |= L2CAP_CONN_RNR_SENT;
2868 if (chan->conn_state & L2CAP_CONN_REMOTE_BUSY)
2869 l2cap_retransmit_frames(chan);
2871 l2cap_ertm_send(chan);
2873 if (!(chan->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
2874 chan->frames_sent == 0) {
2875 control |= L2CAP_SUPER_RCV_READY;
2876 l2cap_send_sframe(chan, control);
2880 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
2882 struct sk_buff *next_skb;
2883 int tx_seq_offset, next_tx_seq_offset;
2885 bt_cb(skb)->tx_seq = tx_seq;
2886 bt_cb(skb)->sar = sar;
2888 next_skb = skb_peek(&chan->srej_q);
2889 if (!next_skb) {
2890 __skb_queue_tail(&chan->srej_q, skb);
2891 return 0;
2894 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
2895 if (tx_seq_offset < 0)
2896 tx_seq_offset += 64;
2898 do {
2899 if (bt_cb(next_skb)->tx_seq == tx_seq)
2900 return -EINVAL;
2902 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
2903 chan->buffer_seq) % 64;
2904 if (next_tx_seq_offset < 0)
2905 next_tx_seq_offset += 64;
2907 if (next_tx_seq_offset > tx_seq_offset) {
2908 __skb_queue_before(&chan->srej_q, next_skb, skb);
2909 return 0;
2912 if (skb_queue_is_last(&chan->srej_q, next_skb))
2913 break;
2915 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
2917 __skb_queue_tail(&chan->srej_q, skb);
2919 return 0;
2922 static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
2924 struct sk_buff *_skb;
2925 int err;
2927 switch (control & L2CAP_CTRL_SAR) {
2928 case L2CAP_SDU_UNSEGMENTED:
2929 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
2930 goto drop;
2932 err = sock_queue_rcv_skb(chan->sk, skb);
2933 if (!err)
2934 return err;
2936 break;
2938 case L2CAP_SDU_START:
2939 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
2940 goto drop;
2942 chan->sdu_len = get_unaligned_le16(skb->data);
2944 if (chan->sdu_len > chan->imtu)
2945 goto disconnect;
2947 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
2948 if (!chan->sdu)
2949 return -ENOMEM;
2951 /* pull sdu_len bytes only after alloc, because of Local Busy
2952 * condition we have to be sure that this will be executed
2953 * only once, i.e., when alloc does not fail */
2954 skb_pull(skb, 2);
2956 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2958 chan->conn_state |= L2CAP_CONN_SAR_SDU;
2959 chan->partial_sdu_len = skb->len;
2960 break;
2962 case L2CAP_SDU_CONTINUE:
2963 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
2964 goto disconnect;
2966 if (!chan->sdu)
2967 goto disconnect;
2969 chan->partial_sdu_len += skb->len;
2970 if (chan->partial_sdu_len > chan->sdu_len)
2971 goto drop;
2973 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2975 break;
2977 case L2CAP_SDU_END:
2978 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
2979 goto disconnect;
2981 if (!chan->sdu)
2982 goto disconnect;
2984 if (!(chan->conn_state & L2CAP_CONN_SAR_RETRY)) {
2985 chan->partial_sdu_len += skb->len;
2987 if (chan->partial_sdu_len > chan->imtu)
2988 goto drop;
2990 if (chan->partial_sdu_len != chan->sdu_len)
2991 goto drop;
2993 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2996 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
2997 if (!_skb) {
2998 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
2999 return -ENOMEM;
3002 err = sock_queue_rcv_skb(chan->sk, _skb);
3003 if (err < 0) {
3004 kfree_skb(_skb);
3005 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
3006 return err;
3009 chan->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3010 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
3012 kfree_skb(chan->sdu);
3013 break;
3016 kfree_skb(skb);
3017 return 0;
3019 drop:
3020 kfree_skb(chan->sdu);
3021 chan->sdu = NULL;
3023 disconnect:
3024 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3025 kfree_skb(skb);
3026 return 0;
3029 static int l2cap_try_push_rx_skb(struct l2cap_chan *chan)
3031 struct sk_buff *skb;
3032 u16 control;
3033 int err;
3035 while ((skb = skb_dequeue(&chan->busy_q))) {
3036 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3037 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3038 if (err < 0) {
3039 skb_queue_head(&chan->busy_q, skb);
3040 return -EBUSY;
3043 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3046 if (!(chan->conn_state & L2CAP_CONN_RNR_SENT))
3047 goto done;
3049 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3050 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3051 l2cap_send_sframe(chan, control);
3052 chan->retry_count = 1;
3054 del_timer(&chan->retrans_timer);
3055 __mod_monitor_timer();
3057 chan->conn_state |= L2CAP_CONN_WAIT_F;
3059 done:
3060 chan->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3061 chan->conn_state &= ~L2CAP_CONN_RNR_SENT;
3063 BT_DBG("chan %p, Exit local busy", chan);
3065 return 0;
3068 static void l2cap_busy_work(struct work_struct *work)
3070 DECLARE_WAITQUEUE(wait, current);
3071 struct l2cap_chan *chan =
3072 container_of(work, struct l2cap_chan, busy_work);
3073 struct sock *sk = chan->sk;
3074 int n_tries = 0, timeo = HZ/5, err;
3075 struct sk_buff *skb;
3077 lock_sock(sk);
3079 add_wait_queue(sk_sleep(sk), &wait);
3080 while ((skb = skb_peek(&chan->busy_q))) {
3081 set_current_state(TASK_INTERRUPTIBLE);
3083 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3084 err = -EBUSY;
3085 l2cap_send_disconn_req(chan->conn, chan, EBUSY);
3086 break;
3089 if (!timeo)
3090 timeo = HZ/5;
3092 if (signal_pending(current)) {
3093 err = sock_intr_errno(timeo);
3094 break;
3097 release_sock(sk);
3098 timeo = schedule_timeout(timeo);
3099 lock_sock(sk);
3101 err = sock_error(sk);
3102 if (err)
3103 break;
3105 if (l2cap_try_push_rx_skb(chan) == 0)
3106 break;
3109 set_current_state(TASK_RUNNING);
3110 remove_wait_queue(sk_sleep(sk), &wait);
3112 release_sock(sk);
3115 static int l2cap_push_rx_skb(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3117 int sctrl, err;
3119 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3120 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3121 __skb_queue_tail(&chan->busy_q, skb);
3122 return l2cap_try_push_rx_skb(chan);
3127 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3128 if (err >= 0) {
3129 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3130 return err;
3133 /* Busy Condition */
3134 BT_DBG("chan %p, Enter local busy", chan);
3136 chan->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3137 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3138 __skb_queue_tail(&chan->busy_q, skb);
3140 sctrl = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3141 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3142 l2cap_send_sframe(chan, sctrl);
3144 chan->conn_state |= L2CAP_CONN_RNR_SENT;
3146 del_timer(&chan->ack_timer);
3148 queue_work(_busy_wq, &chan->busy_work);
3150 return err;
3153 static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3155 struct sk_buff *_skb;
3156 int err = -EINVAL;
3159 * TODO: We have to notify the userland if some data is lost with the
3160 * Streaming Mode.
3163 switch (control & L2CAP_CTRL_SAR) {
3164 case L2CAP_SDU_UNSEGMENTED:
3165 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3166 kfree_skb(chan->sdu);
3167 break;
3170 err = sock_queue_rcv_skb(chan->sk, skb);
3171 if (!err)
3172 return 0;
3174 break;
3176 case L2CAP_SDU_START:
3177 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3178 kfree_skb(chan->sdu);
3179 break;
3182 chan->sdu_len = get_unaligned_le16(skb->data);
3183 skb_pull(skb, 2);
3185 if (chan->sdu_len > chan->imtu) {
3186 err = -EMSGSIZE;
3187 break;
3190 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3191 if (!chan->sdu) {
3192 err = -ENOMEM;
3193 break;
3196 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3198 chan->conn_state |= L2CAP_CONN_SAR_SDU;
3199 chan->partial_sdu_len = skb->len;
3200 err = 0;
3201 break;
3203 case L2CAP_SDU_CONTINUE:
3204 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3205 break;
3207 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3209 chan->partial_sdu_len += skb->len;
3210 if (chan->partial_sdu_len > chan->sdu_len)
3211 kfree_skb(chan->sdu);
3212 else
3213 err = 0;
3215 break;
3217 case L2CAP_SDU_END:
3218 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3219 break;
3221 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3223 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
3224 chan->partial_sdu_len += skb->len;
3226 if (chan->partial_sdu_len > chan->imtu)
3227 goto drop;
3229 if (chan->partial_sdu_len == chan->sdu_len) {
3230 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3231 err = sock_queue_rcv_skb(chan->sk, _skb);
3232 if (err < 0)
3233 kfree_skb(_skb);
3235 err = 0;
3237 drop:
3238 kfree_skb(chan->sdu);
3239 break;
3242 kfree_skb(skb);
3243 return err;
3246 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3248 struct sk_buff *skb;
3249 u16 control;
3251 while ((skb = skb_peek(&chan->srej_q))) {
3252 if (bt_cb(skb)->tx_seq != tx_seq)
3253 break;
3255 skb = skb_dequeue(&chan->srej_q);
3256 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3257 l2cap_ertm_reassembly_sdu(chan, skb, control);
3258 chan->buffer_seq_srej =
3259 (chan->buffer_seq_srej + 1) % 64;
3260 tx_seq = (tx_seq + 1) % 64;
3264 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3266 struct srej_list *l, *tmp;
3267 u16 control;
3269 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3270 if (l->tx_seq == tx_seq) {
3271 list_del(&l->list);
3272 kfree(l);
3273 return;
3275 control = L2CAP_SUPER_SELECT_REJECT;
3276 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3277 l2cap_send_sframe(chan, control);
3278 list_del(&l->list);
3279 list_add_tail(&l->list, &chan->srej_l);
3283 static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3285 struct srej_list *new;
3286 u16 control;
3288 while (tx_seq != chan->expected_tx_seq) {
3289 control = L2CAP_SUPER_SELECT_REJECT;
3290 control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3291 l2cap_send_sframe(chan, control);
3293 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3294 new->tx_seq = chan->expected_tx_seq;
3295 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3296 list_add_tail(&new->list, &chan->srej_l);
3298 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3301 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3303 u8 tx_seq = __get_txseq(rx_control);
3304 u8 req_seq = __get_reqseq(rx_control);
3305 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3306 int tx_seq_offset, expected_tx_seq_offset;
3307 int num_to_ack = (chan->tx_win/6) + 1;
3308 int err = 0;
3310 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3311 tx_seq, rx_control);
3313 if (L2CAP_CTRL_FINAL & rx_control &&
3314 chan->conn_state & L2CAP_CONN_WAIT_F) {
3315 del_timer(&chan->monitor_timer);
3316 if (chan->unacked_frames > 0)
3317 __mod_retrans_timer();
3318 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3321 chan->expected_ack_seq = req_seq;
3322 l2cap_drop_acked_frames(chan);
3324 if (tx_seq == chan->expected_tx_seq)
3325 goto expected;
3327 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3328 if (tx_seq_offset < 0)
3329 tx_seq_offset += 64;
3331 /* invalid tx_seq */
3332 if (tx_seq_offset >= chan->tx_win) {
3333 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3334 goto drop;
3337 if (chan->conn_state == L2CAP_CONN_LOCAL_BUSY)
3338 goto drop;
3340 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3341 struct srej_list *first;
3343 first = list_first_entry(&chan->srej_l,
3344 struct srej_list, list);
3345 if (tx_seq == first->tx_seq) {
3346 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3347 l2cap_check_srej_gap(chan, tx_seq);
3349 list_del(&first->list);
3350 kfree(first);
3352 if (list_empty(&chan->srej_l)) {
3353 chan->buffer_seq = chan->buffer_seq_srej;
3354 chan->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3355 l2cap_send_ack(chan);
3356 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3358 } else {
3359 struct srej_list *l;
3361 /* duplicated tx_seq */
3362 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3363 goto drop;
3365 list_for_each_entry(l, &chan->srej_l, list) {
3366 if (l->tx_seq == tx_seq) {
3367 l2cap_resend_srejframe(chan, tx_seq);
3368 return 0;
3371 l2cap_send_srejframe(chan, tx_seq);
3373 } else {
3374 expected_tx_seq_offset =
3375 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3376 if (expected_tx_seq_offset < 0)
3377 expected_tx_seq_offset += 64;
3379 /* duplicated tx_seq */
3380 if (tx_seq_offset < expected_tx_seq_offset)
3381 goto drop;
3383 chan->conn_state |= L2CAP_CONN_SREJ_SENT;
3385 BT_DBG("chan %p, Enter SREJ", chan);
3387 INIT_LIST_HEAD(&chan->srej_l);
3388 chan->buffer_seq_srej = chan->buffer_seq;
3390 __skb_queue_head_init(&chan->srej_q);
3391 __skb_queue_head_init(&chan->busy_q);
3392 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3394 chan->conn_state |= L2CAP_CONN_SEND_PBIT;
3396 l2cap_send_srejframe(chan, tx_seq);
3398 del_timer(&chan->ack_timer);
3400 return 0;
3402 expected:
3403 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3405 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3406 bt_cb(skb)->tx_seq = tx_seq;
3407 bt_cb(skb)->sar = sar;
3408 __skb_queue_tail(&chan->srej_q, skb);
3409 return 0;
3412 err = l2cap_push_rx_skb(chan, skb, rx_control);
3413 if (err < 0)
3414 return 0;
3416 if (rx_control & L2CAP_CTRL_FINAL) {
3417 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3418 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3419 else
3420 l2cap_retransmit_frames(chan);
3423 __mod_ack_timer();
3425 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3426 if (chan->num_acked == num_to_ack - 1)
3427 l2cap_send_ack(chan);
3429 return 0;
3431 drop:
3432 kfree_skb(skb);
3433 return 0;
3436 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3438 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
3439 rx_control);
3441 chan->expected_ack_seq = __get_reqseq(rx_control);
3442 l2cap_drop_acked_frames(chan);
3444 if (rx_control & L2CAP_CTRL_POLL) {
3445 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3446 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3447 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3448 (chan->unacked_frames > 0))
3449 __mod_retrans_timer();
3451 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3452 l2cap_send_srejtail(chan);
3453 } else {
3454 l2cap_send_i_or_rr_or_rnr(chan);
3457 } else if (rx_control & L2CAP_CTRL_FINAL) {
3458 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3460 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3461 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3462 else
3463 l2cap_retransmit_frames(chan);
3465 } else {
3466 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3467 (chan->unacked_frames > 0))
3468 __mod_retrans_timer();
3470 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3471 if (chan->conn_state & L2CAP_CONN_SREJ_SENT)
3472 l2cap_send_ack(chan);
3473 else
3474 l2cap_ertm_send(chan);
3478 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3480 u8 tx_seq = __get_reqseq(rx_control);
3482 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3484 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3486 chan->expected_ack_seq = tx_seq;
3487 l2cap_drop_acked_frames(chan);
3489 if (rx_control & L2CAP_CTRL_FINAL) {
3490 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3491 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3492 else
3493 l2cap_retransmit_frames(chan);
3494 } else {
3495 l2cap_retransmit_frames(chan);
3497 if (chan->conn_state & L2CAP_CONN_WAIT_F)
3498 chan->conn_state |= L2CAP_CONN_REJ_ACT;
3501 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3503 u8 tx_seq = __get_reqseq(rx_control);
3505 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3507 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3509 if (rx_control & L2CAP_CTRL_POLL) {
3510 chan->expected_ack_seq = tx_seq;
3511 l2cap_drop_acked_frames(chan);
3513 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3514 l2cap_retransmit_one_frame(chan, tx_seq);
3516 l2cap_ertm_send(chan);
3518 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3519 chan->srej_save_reqseq = tx_seq;
3520 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3522 } else if (rx_control & L2CAP_CTRL_FINAL) {
3523 if ((chan->conn_state & L2CAP_CONN_SREJ_ACT) &&
3524 chan->srej_save_reqseq == tx_seq)
3525 chan->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3526 else
3527 l2cap_retransmit_one_frame(chan, tx_seq);
3528 } else {
3529 l2cap_retransmit_one_frame(chan, tx_seq);
3530 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3531 chan->srej_save_reqseq = tx_seq;
3532 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3537 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3539 u8 tx_seq = __get_reqseq(rx_control);
3541 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3543 chan->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3544 chan->expected_ack_seq = tx_seq;
3545 l2cap_drop_acked_frames(chan);
3547 if (rx_control & L2CAP_CTRL_POLL)
3548 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3550 if (!(chan->conn_state & L2CAP_CONN_SREJ_SENT)) {
3551 del_timer(&chan->retrans_timer);
3552 if (rx_control & L2CAP_CTRL_POLL)
3553 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3554 return;
3557 if (rx_control & L2CAP_CTRL_POLL)
3558 l2cap_send_srejtail(chan);
3559 else
3560 l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3563 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3565 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3567 if (L2CAP_CTRL_FINAL & rx_control &&
3568 chan->conn_state & L2CAP_CONN_WAIT_F) {
3569 del_timer(&chan->monitor_timer);
3570 if (chan->unacked_frames > 0)
3571 __mod_retrans_timer();
3572 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3575 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3576 case L2CAP_SUPER_RCV_READY:
3577 l2cap_data_channel_rrframe(chan, rx_control);
3578 break;
3580 case L2CAP_SUPER_REJECT:
3581 l2cap_data_channel_rejframe(chan, rx_control);
3582 break;
3584 case L2CAP_SUPER_SELECT_REJECT:
3585 l2cap_data_channel_srejframe(chan, rx_control);
3586 break;
3588 case L2CAP_SUPER_RCV_NOT_READY:
3589 l2cap_data_channel_rnrframe(chan, rx_control);
3590 break;
3593 kfree_skb(skb);
3594 return 0;
3597 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3599 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3600 u16 control;
3601 u8 req_seq;
3602 int len, next_tx_seq_offset, req_seq_offset;
3604 control = get_unaligned_le16(skb->data);
3605 skb_pull(skb, 2);
3606 len = skb->len;
3609 * We can just drop the corrupted I-frame here.
3610 * Receiver will miss it and start proper recovery
3611 * procedures and ask retransmission.
3613 if (l2cap_check_fcs(chan, skb))
3614 goto drop;
3616 if (__is_sar_start(control) && __is_iframe(control))
3617 len -= 2;
3619 if (chan->fcs == L2CAP_FCS_CRC16)
3620 len -= 2;
3622 if (len > chan->mps) {
3623 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3624 goto drop;
3627 req_seq = __get_reqseq(control);
3628 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3629 if (req_seq_offset < 0)
3630 req_seq_offset += 64;
3632 next_tx_seq_offset =
3633 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3634 if (next_tx_seq_offset < 0)
3635 next_tx_seq_offset += 64;
3637 /* check for invalid req-seq */
3638 if (req_seq_offset > next_tx_seq_offset) {
3639 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3640 goto drop;
3643 if (__is_iframe(control)) {
3644 if (len < 0) {
3645 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3646 goto drop;
3649 l2cap_data_channel_iframe(chan, control, skb);
3650 } else {
3651 if (len != 0) {
3652 BT_ERR("%d", len);
3653 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3654 goto drop;
3657 l2cap_data_channel_sframe(chan, control, skb);
3660 return 0;
3662 drop:
3663 kfree_skb(skb);
3664 return 0;
3667 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3669 struct l2cap_chan *chan;
3670 struct sock *sk = NULL;
3671 struct l2cap_pinfo *pi;
3672 u16 control;
3673 u8 tx_seq;
3674 int len;
3676 chan = l2cap_get_chan_by_scid(conn, cid);
3677 if (!chan) {
3678 BT_DBG("unknown cid 0x%4.4x", cid);
3679 goto drop;
3682 sk = chan->sk;
3683 pi = l2cap_pi(sk);
3685 BT_DBG("chan %p, len %d", chan, skb->len);
3687 if (sk->sk_state != BT_CONNECTED)
3688 goto drop;
3690 switch (chan->mode) {
3691 case L2CAP_MODE_BASIC:
3692 /* If socket recv buffers overflows we drop data here
3693 * which is *bad* because L2CAP has to be reliable.
3694 * But we don't have any other choice. L2CAP doesn't
3695 * provide flow control mechanism. */
3697 if (chan->imtu < skb->len)
3698 goto drop;
3700 if (!sock_queue_rcv_skb(sk, skb))
3701 goto done;
3702 break;
3704 case L2CAP_MODE_ERTM:
3705 if (!sock_owned_by_user(sk)) {
3706 l2cap_ertm_data_rcv(sk, skb);
3707 } else {
3708 if (sk_add_backlog(sk, skb))
3709 goto drop;
3712 goto done;
3714 case L2CAP_MODE_STREAMING:
3715 control = get_unaligned_le16(skb->data);
3716 skb_pull(skb, 2);
3717 len = skb->len;
3719 if (l2cap_check_fcs(chan, skb))
3720 goto drop;
3722 if (__is_sar_start(control))
3723 len -= 2;
3725 if (chan->fcs == L2CAP_FCS_CRC16)
3726 len -= 2;
3728 if (len > chan->mps || len < 0 || __is_sframe(control))
3729 goto drop;
3731 tx_seq = __get_txseq(control);
3733 if (chan->expected_tx_seq == tx_seq)
3734 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3735 else
3736 chan->expected_tx_seq = (tx_seq + 1) % 64;
3738 l2cap_streaming_reassembly_sdu(chan, skb, control);
3740 goto done;
3742 default:
3743 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
3744 break;
3747 drop:
3748 kfree_skb(skb);
3750 done:
3751 if (sk)
3752 bh_unlock_sock(sk);
3754 return 0;
3757 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3759 struct sock *sk = NULL;
3760 struct l2cap_chan *chan;
3762 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
3763 if (!chan)
3764 goto drop;
3766 sk = chan->sk;
3768 bh_lock_sock(sk);
3770 BT_DBG("sk %p, len %d", sk, skb->len);
3772 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3773 goto drop;
3775 if (l2cap_pi(sk)->chan->imtu < skb->len)
3776 goto drop;
3778 if (!sock_queue_rcv_skb(sk, skb))
3779 goto done;
3781 drop:
3782 kfree_skb(skb);
3784 done:
3785 if (sk)
3786 bh_unlock_sock(sk);
3787 return 0;
3790 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3792 struct sock *sk = NULL;
3793 struct l2cap_chan *chan;
3795 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
3796 if (!chan)
3797 goto drop;
3799 sk = chan->sk;
3801 bh_lock_sock(sk);
3803 BT_DBG("sk %p, len %d", sk, skb->len);
3805 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3806 goto drop;
3808 if (l2cap_pi(sk)->chan->imtu < skb->len)
3809 goto drop;
3811 if (!sock_queue_rcv_skb(sk, skb))
3812 goto done;
3814 drop:
3815 kfree_skb(skb);
3817 done:
3818 if (sk)
3819 bh_unlock_sock(sk);
3820 return 0;
3823 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3825 struct l2cap_hdr *lh = (void *) skb->data;
3826 u16 cid, len;
3827 __le16 psm;
3829 skb_pull(skb, L2CAP_HDR_SIZE);
3830 cid = __le16_to_cpu(lh->cid);
3831 len = __le16_to_cpu(lh->len);
3833 if (len != skb->len) {
3834 kfree_skb(skb);
3835 return;
3838 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3840 switch (cid) {
3841 case L2CAP_CID_LE_SIGNALING:
3842 case L2CAP_CID_SIGNALING:
3843 l2cap_sig_channel(conn, skb);
3844 break;
3846 case L2CAP_CID_CONN_LESS:
3847 psm = get_unaligned_le16(skb->data);
3848 skb_pull(skb, 2);
3849 l2cap_conless_channel(conn, psm, skb);
3850 break;
3852 case L2CAP_CID_LE_DATA:
3853 l2cap_att_channel(conn, cid, skb);
3854 break;
3856 default:
3857 l2cap_data_channel(conn, cid, skb);
3858 break;
3862 /* ---- L2CAP interface with lower layer (HCI) ---- */
3864 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3866 int exact = 0, lm1 = 0, lm2 = 0;
3867 struct l2cap_chan *c;
3869 if (type != ACL_LINK)
3870 return -EINVAL;
3872 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3874 /* Find listening sockets and check their link_mode */
3875 read_lock(&chan_list_lock);
3876 list_for_each_entry(c, &chan_list, global_l) {
3877 struct sock *sk = c->sk;
3879 if (sk->sk_state != BT_LISTEN)
3880 continue;
3882 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3883 lm1 |= HCI_LM_ACCEPT;
3884 if (c->role_switch)
3885 lm1 |= HCI_LM_MASTER;
3886 exact++;
3887 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3888 lm2 |= HCI_LM_ACCEPT;
3889 if (c->role_switch)
3890 lm2 |= HCI_LM_MASTER;
3893 read_unlock(&chan_list_lock);
3895 return exact ? lm1 : lm2;
3898 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3900 struct l2cap_conn *conn;
3902 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3904 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3905 return -EINVAL;
3907 if (!status) {
3908 conn = l2cap_conn_add(hcon, status);
3909 if (conn)
3910 l2cap_conn_ready(conn);
3911 } else
3912 l2cap_conn_del(hcon, bt_err(status));
3914 return 0;
3917 static int l2cap_disconn_ind(struct hci_conn *hcon)
3919 struct l2cap_conn *conn = hcon->l2cap_data;
3921 BT_DBG("hcon %p", hcon);
3923 if (hcon->type != ACL_LINK || !conn)
3924 return 0x13;
3926 return conn->disc_reason;
3929 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3931 BT_DBG("hcon %p reason %d", hcon, reason);
3933 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3934 return -EINVAL;
3936 l2cap_conn_del(hcon, bt_err(reason));
3938 return 0;
3941 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
3943 struct sock *sk = chan->sk;
3945 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
3946 return;
3948 if (encrypt == 0x00) {
3949 if (chan->sec_level == BT_SECURITY_MEDIUM) {
3950 l2cap_sock_clear_timer(sk);
3951 l2cap_sock_set_timer(sk, HZ * 5);
3952 } else if (chan->sec_level == BT_SECURITY_HIGH)
3953 __l2cap_sock_close(sk, ECONNREFUSED);
3954 } else {
3955 if (chan->sec_level == BT_SECURITY_MEDIUM)
3956 l2cap_sock_clear_timer(sk);
3960 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3962 struct l2cap_conn *conn = hcon->l2cap_data;
3963 struct l2cap_chan *chan;
3965 if (!conn)
3966 return 0;
3968 BT_DBG("conn %p", conn);
3970 read_lock(&conn->chan_lock);
3972 list_for_each_entry(chan, &conn->chan_l, list) {
3973 struct sock *sk = chan->sk;
3975 bh_lock_sock(sk);
3977 if (chan->conf_state & L2CAP_CONF_CONNECT_PEND) {
3978 bh_unlock_sock(sk);
3979 continue;
3982 if (!status && (sk->sk_state == BT_CONNECTED ||
3983 sk->sk_state == BT_CONFIG)) {
3984 l2cap_check_encryption(chan, encrypt);
3985 bh_unlock_sock(sk);
3986 continue;
3989 if (sk->sk_state == BT_CONNECT) {
3990 if (!status) {
3991 struct l2cap_conn_req req;
3992 req.scid = cpu_to_le16(chan->scid);
3993 req.psm = chan->psm;
3995 chan->ident = l2cap_get_ident(conn);
3996 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
3998 l2cap_send_cmd(conn, chan->ident,
3999 L2CAP_CONN_REQ, sizeof(req), &req);
4000 } else {
4001 l2cap_sock_clear_timer(sk);
4002 l2cap_sock_set_timer(sk, HZ / 10);
4004 } else if (sk->sk_state == BT_CONNECT2) {
4005 struct l2cap_conn_rsp rsp;
4006 __u16 res, stat;
4008 if (!status) {
4009 if (bt_sk(sk)->defer_setup) {
4010 struct sock *parent = bt_sk(sk)->parent;
4011 res = L2CAP_CR_PEND;
4012 stat = L2CAP_CS_AUTHOR_PEND;
4013 parent->sk_data_ready(parent, 0);
4014 } else {
4015 sk->sk_state = BT_CONFIG;
4016 res = L2CAP_CR_SUCCESS;
4017 stat = L2CAP_CS_NO_INFO;
4019 } else {
4020 sk->sk_state = BT_DISCONN;
4021 l2cap_sock_set_timer(sk, HZ / 10);
4022 res = L2CAP_CR_SEC_BLOCK;
4023 stat = L2CAP_CS_NO_INFO;
4026 rsp.scid = cpu_to_le16(chan->dcid);
4027 rsp.dcid = cpu_to_le16(chan->scid);
4028 rsp.result = cpu_to_le16(res);
4029 rsp.status = cpu_to_le16(stat);
4030 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4031 sizeof(rsp), &rsp);
4034 bh_unlock_sock(sk);
4037 read_unlock(&conn->chan_lock);
4039 return 0;
4042 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4044 struct l2cap_conn *conn = hcon->l2cap_data;
4046 if (!conn)
4047 conn = l2cap_conn_add(hcon, 0);
4049 if (!conn)
4050 goto drop;
4052 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4054 if (!(flags & ACL_CONT)) {
4055 struct l2cap_hdr *hdr;
4056 struct l2cap_chan *chan;
4057 u16 cid;
4058 int len;
4060 if (conn->rx_len) {
4061 BT_ERR("Unexpected start frame (len %d)", skb->len);
4062 kfree_skb(conn->rx_skb);
4063 conn->rx_skb = NULL;
4064 conn->rx_len = 0;
4065 l2cap_conn_unreliable(conn, ECOMM);
4068 /* Start fragment always begin with Basic L2CAP header */
4069 if (skb->len < L2CAP_HDR_SIZE) {
4070 BT_ERR("Frame is too short (len %d)", skb->len);
4071 l2cap_conn_unreliable(conn, ECOMM);
4072 goto drop;
4075 hdr = (struct l2cap_hdr *) skb->data;
4076 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4077 cid = __le16_to_cpu(hdr->cid);
4079 if (len == skb->len) {
4080 /* Complete frame received */
4081 l2cap_recv_frame(conn, skb);
4082 return 0;
4085 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4087 if (skb->len > len) {
4088 BT_ERR("Frame is too long (len %d, expected len %d)",
4089 skb->len, len);
4090 l2cap_conn_unreliable(conn, ECOMM);
4091 goto drop;
4094 chan = l2cap_get_chan_by_scid(conn, cid);
4096 if (chan && chan->sk) {
4097 struct sock *sk = chan->sk;
4099 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4100 BT_ERR("Frame exceeding recv MTU (len %d, "
4101 "MTU %d)", len,
4102 chan->imtu);
4103 bh_unlock_sock(sk);
4104 l2cap_conn_unreliable(conn, ECOMM);
4105 goto drop;
4107 bh_unlock_sock(sk);
4110 /* Allocate skb for the complete frame (with header) */
4111 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4112 if (!conn->rx_skb)
4113 goto drop;
4115 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4116 skb->len);
4117 conn->rx_len = len - skb->len;
4118 } else {
4119 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4121 if (!conn->rx_len) {
4122 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4123 l2cap_conn_unreliable(conn, ECOMM);
4124 goto drop;
4127 if (skb->len > conn->rx_len) {
4128 BT_ERR("Fragment is too long (len %d, expected %d)",
4129 skb->len, conn->rx_len);
4130 kfree_skb(conn->rx_skb);
4131 conn->rx_skb = NULL;
4132 conn->rx_len = 0;
4133 l2cap_conn_unreliable(conn, ECOMM);
4134 goto drop;
4137 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4138 skb->len);
4139 conn->rx_len -= skb->len;
4141 if (!conn->rx_len) {
4142 /* Complete frame received */
4143 l2cap_recv_frame(conn, conn->rx_skb);
4144 conn->rx_skb = NULL;
4148 drop:
4149 kfree_skb(skb);
4150 return 0;
4153 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4155 struct l2cap_chan *c;
4157 read_lock_bh(&chan_list_lock);
4159 list_for_each_entry(c, &chan_list, global_l) {
4160 struct sock *sk = c->sk;
4162 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4163 batostr(&bt_sk(sk)->src),
4164 batostr(&bt_sk(sk)->dst),
4165 sk->sk_state, __le16_to_cpu(c->psm),
4166 c->scid, c->dcid, c->imtu, c->omtu,
4167 c->sec_level, c->mode);
4170 read_unlock_bh(&chan_list_lock);
4172 return 0;
4175 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4177 return single_open(file, l2cap_debugfs_show, inode->i_private);
4180 static const struct file_operations l2cap_debugfs_fops = {
4181 .open = l2cap_debugfs_open,
4182 .read = seq_read,
4183 .llseek = seq_lseek,
4184 .release = single_release,
4187 static struct dentry *l2cap_debugfs;
4189 static struct hci_proto l2cap_hci_proto = {
4190 .name = "L2CAP",
4191 .id = HCI_PROTO_L2CAP,
4192 .connect_ind = l2cap_connect_ind,
4193 .connect_cfm = l2cap_connect_cfm,
4194 .disconn_ind = l2cap_disconn_ind,
4195 .disconn_cfm = l2cap_disconn_cfm,
4196 .security_cfm = l2cap_security_cfm,
4197 .recv_acldata = l2cap_recv_acldata
4200 int __init l2cap_init(void)
4202 int err;
4204 err = l2cap_init_sockets();
4205 if (err < 0)
4206 return err;
4208 _busy_wq = create_singlethread_workqueue("l2cap");
4209 if (!_busy_wq) {
4210 err = -ENOMEM;
4211 goto error;
4214 err = hci_register_proto(&l2cap_hci_proto);
4215 if (err < 0) {
4216 BT_ERR("L2CAP protocol registration failed");
4217 bt_sock_unregister(BTPROTO_L2CAP);
4218 goto error;
4221 if (bt_debugfs) {
4222 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4223 bt_debugfs, NULL, &l2cap_debugfs_fops);
4224 if (!l2cap_debugfs)
4225 BT_ERR("Failed to create L2CAP debug file");
4228 return 0;
4230 error:
4231 destroy_workqueue(_busy_wq);
4232 l2cap_cleanup_sockets();
4233 return err;
4236 void l2cap_exit(void)
4238 debugfs_remove(l2cap_debugfs);
4240 flush_workqueue(_busy_wq);
4241 destroy_workqueue(_busy_wq);
4243 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4244 BT_ERR("L2CAP protocol unregistration failed");
4246 l2cap_cleanup_sockets();
4249 module_param(disable_ertm, bool, 0644);
4250 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");