blkcg: drop stuff unused after per-queue policy activation update
[linux-2.6.git] / net / bluetooth / l2cap_core.c
blobb8e17e4dac8b4179d9ac82ad916baf28ed333662
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
8 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License version 2 as
12 published by the Free Software Foundation;
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
17 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
18 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
19 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
24 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
25 SOFTWARE IS DISCLAIMED.
28 /* Bluetooth L2CAP core. */
30 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/poll.h>
39 #include <linux/fcntl.h>
40 #include <linux/init.h>
41 #include <linux/interrupt.h>
42 #include <linux/socket.h>
43 #include <linux/skbuff.h>
44 #include <linux/list.h>
45 #include <linux/device.h>
46 #include <linux/debugfs.h>
47 #include <linux/seq_file.h>
48 #include <linux/uaccess.h>
49 #include <linux/crc16.h>
50 #include <net/sock.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
59 bool disable_ertm;
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
64 static LIST_HEAD(chan_list);
65 static DEFINE_RWLOCK(chan_list_lock);
67 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
68 u8 code, u8 ident, u16 dlen, void *data);
69 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
70 void *data);
71 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
72 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
73 struct l2cap_chan *chan, int err);
75 /* ---- L2CAP channels ---- */
77 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
79 struct l2cap_chan *c;
81 list_for_each_entry(c, &conn->chan_l, list) {
82 if (c->dcid == cid)
83 return c;
85 return NULL;
88 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
90 struct l2cap_chan *c;
92 list_for_each_entry(c, &conn->chan_l, list) {
93 if (c->scid == cid)
94 return c;
96 return NULL;
99 /* Find channel with given SCID.
100 * Returns locked socket */
101 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
103 struct l2cap_chan *c;
105 mutex_lock(&conn->chan_lock);
106 c = __l2cap_get_chan_by_scid(conn, cid);
107 mutex_unlock(&conn->chan_lock);
109 return c;
112 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
114 struct l2cap_chan *c;
116 list_for_each_entry(c, &conn->chan_l, list) {
117 if (c->ident == ident)
118 return c;
120 return NULL;
123 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
125 struct l2cap_chan *c;
127 mutex_lock(&conn->chan_lock);
128 c = __l2cap_get_chan_by_ident(conn, ident);
129 mutex_unlock(&conn->chan_lock);
131 return c;
134 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
136 struct l2cap_chan *c;
138 list_for_each_entry(c, &chan_list, global_l) {
139 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
140 return c;
142 return NULL;
145 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
147 int err;
149 write_lock(&chan_list_lock);
151 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
152 err = -EADDRINUSE;
153 goto done;
156 if (psm) {
157 chan->psm = psm;
158 chan->sport = psm;
159 err = 0;
160 } else {
161 u16 p;
163 err = -EINVAL;
164 for (p = 0x1001; p < 0x1100; p += 2)
165 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
166 chan->psm = cpu_to_le16(p);
167 chan->sport = cpu_to_le16(p);
168 err = 0;
169 break;
173 done:
174 write_unlock(&chan_list_lock);
175 return err;
178 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
180 write_lock(&chan_list_lock);
182 chan->scid = scid;
184 write_unlock(&chan_list_lock);
186 return 0;
189 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
191 u16 cid = L2CAP_CID_DYN_START;
193 for (; cid < L2CAP_CID_DYN_END; cid++) {
194 if (!__l2cap_get_chan_by_scid(conn, cid))
195 return cid;
198 return 0;
201 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
203 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
204 state_to_string(state));
206 chan->state = state;
207 chan->ops->state_change(chan->data, state);
210 static void l2cap_state_change(struct l2cap_chan *chan, int state)
212 struct sock *sk = chan->sk;
214 lock_sock(sk);
215 __l2cap_state_change(chan, state);
216 release_sock(sk);
219 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
221 struct sock *sk = chan->sk;
223 sk->sk_err = err;
226 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
228 struct sock *sk = chan->sk;
230 lock_sock(sk);
231 __l2cap_chan_set_err(chan, err);
232 release_sock(sk);
235 static void l2cap_chan_timeout(struct work_struct *work)
237 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
238 chan_timer.work);
239 struct l2cap_conn *conn = chan->conn;
240 int reason;
242 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
244 mutex_lock(&conn->chan_lock);
245 l2cap_chan_lock(chan);
247 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
248 reason = ECONNREFUSED;
249 else if (chan->state == BT_CONNECT &&
250 chan->sec_level != BT_SECURITY_SDP)
251 reason = ECONNREFUSED;
252 else
253 reason = ETIMEDOUT;
255 l2cap_chan_close(chan, reason);
257 l2cap_chan_unlock(chan);
259 chan->ops->close(chan->data);
260 mutex_unlock(&conn->chan_lock);
262 l2cap_chan_put(chan);
265 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
267 struct l2cap_chan *chan;
269 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
270 if (!chan)
271 return NULL;
273 mutex_init(&chan->lock);
275 chan->sk = sk;
277 write_lock(&chan_list_lock);
278 list_add(&chan->global_l, &chan_list);
279 write_unlock(&chan_list_lock);
281 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
283 chan->state = BT_OPEN;
285 atomic_set(&chan->refcnt, 1);
287 BT_DBG("sk %p chan %p", sk, chan);
289 return chan;
292 void l2cap_chan_destroy(struct l2cap_chan *chan)
294 write_lock(&chan_list_lock);
295 list_del(&chan->global_l);
296 write_unlock(&chan_list_lock);
298 l2cap_chan_put(chan);
301 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
303 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
304 chan->psm, chan->dcid);
306 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
308 chan->conn = conn;
310 switch (chan->chan_type) {
311 case L2CAP_CHAN_CONN_ORIENTED:
312 if (conn->hcon->type == LE_LINK) {
313 /* LE connection */
314 chan->omtu = L2CAP_LE_DEFAULT_MTU;
315 chan->scid = L2CAP_CID_LE_DATA;
316 chan->dcid = L2CAP_CID_LE_DATA;
317 } else {
318 /* Alloc CID for connection-oriented socket */
319 chan->scid = l2cap_alloc_cid(conn);
320 chan->omtu = L2CAP_DEFAULT_MTU;
322 break;
324 case L2CAP_CHAN_CONN_LESS:
325 /* Connectionless socket */
326 chan->scid = L2CAP_CID_CONN_LESS;
327 chan->dcid = L2CAP_CID_CONN_LESS;
328 chan->omtu = L2CAP_DEFAULT_MTU;
329 break;
331 default:
332 /* Raw socket can send/recv signalling messages only */
333 chan->scid = L2CAP_CID_SIGNALING;
334 chan->dcid = L2CAP_CID_SIGNALING;
335 chan->omtu = L2CAP_DEFAULT_MTU;
338 chan->local_id = L2CAP_BESTEFFORT_ID;
339 chan->local_stype = L2CAP_SERV_BESTEFFORT;
340 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
341 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
342 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
343 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
345 l2cap_chan_hold(chan);
347 list_add(&chan->list, &conn->chan_l);
350 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
352 mutex_lock(&conn->chan_lock);
353 __l2cap_chan_add(conn, chan);
354 mutex_unlock(&conn->chan_lock);
357 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
359 struct sock *sk = chan->sk;
360 struct l2cap_conn *conn = chan->conn;
361 struct sock *parent = bt_sk(sk)->parent;
363 __clear_chan_timer(chan);
365 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
367 if (conn) {
368 /* Delete from channel list */
369 list_del(&chan->list);
371 l2cap_chan_put(chan);
373 chan->conn = NULL;
374 hci_conn_put(conn->hcon);
377 lock_sock(sk);
379 __l2cap_state_change(chan, BT_CLOSED);
380 sock_set_flag(sk, SOCK_ZAPPED);
382 if (err)
383 __l2cap_chan_set_err(chan, err);
385 if (parent) {
386 bt_accept_unlink(sk);
387 parent->sk_data_ready(parent, 0);
388 } else
389 sk->sk_state_change(sk);
391 release_sock(sk);
393 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
394 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
395 return;
397 skb_queue_purge(&chan->tx_q);
399 if (chan->mode == L2CAP_MODE_ERTM) {
400 struct srej_list *l, *tmp;
402 __clear_retrans_timer(chan);
403 __clear_monitor_timer(chan);
404 __clear_ack_timer(chan);
406 skb_queue_purge(&chan->srej_q);
408 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
409 list_del(&l->list);
410 kfree(l);
415 static void l2cap_chan_cleanup_listen(struct sock *parent)
417 struct sock *sk;
419 BT_DBG("parent %p", parent);
421 /* Close not yet accepted channels */
422 while ((sk = bt_accept_dequeue(parent, NULL))) {
423 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
425 l2cap_chan_lock(chan);
426 __clear_chan_timer(chan);
427 l2cap_chan_close(chan, ECONNRESET);
428 l2cap_chan_unlock(chan);
430 chan->ops->close(chan->data);
434 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
436 struct l2cap_conn *conn = chan->conn;
437 struct sock *sk = chan->sk;
439 BT_DBG("chan %p state %s sk %p", chan,
440 state_to_string(chan->state), sk);
442 switch (chan->state) {
443 case BT_LISTEN:
444 lock_sock(sk);
445 l2cap_chan_cleanup_listen(sk);
447 __l2cap_state_change(chan, BT_CLOSED);
448 sock_set_flag(sk, SOCK_ZAPPED);
449 release_sock(sk);
450 break;
452 case BT_CONNECTED:
453 case BT_CONFIG:
454 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
455 conn->hcon->type == ACL_LINK) {
456 __clear_chan_timer(chan);
457 __set_chan_timer(chan, sk->sk_sndtimeo);
458 l2cap_send_disconn_req(conn, chan, reason);
459 } else
460 l2cap_chan_del(chan, reason);
461 break;
463 case BT_CONNECT2:
464 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
465 conn->hcon->type == ACL_LINK) {
466 struct l2cap_conn_rsp rsp;
467 __u16 result;
469 if (bt_sk(sk)->defer_setup)
470 result = L2CAP_CR_SEC_BLOCK;
471 else
472 result = L2CAP_CR_BAD_PSM;
473 l2cap_state_change(chan, BT_DISCONN);
475 rsp.scid = cpu_to_le16(chan->dcid);
476 rsp.dcid = cpu_to_le16(chan->scid);
477 rsp.result = cpu_to_le16(result);
478 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
479 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
480 sizeof(rsp), &rsp);
483 l2cap_chan_del(chan, reason);
484 break;
486 case BT_CONNECT:
487 case BT_DISCONN:
488 l2cap_chan_del(chan, reason);
489 break;
491 default:
492 lock_sock(sk);
493 sock_set_flag(sk, SOCK_ZAPPED);
494 release_sock(sk);
495 break;
499 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
501 if (chan->chan_type == L2CAP_CHAN_RAW) {
502 switch (chan->sec_level) {
503 case BT_SECURITY_HIGH:
504 return HCI_AT_DEDICATED_BONDING_MITM;
505 case BT_SECURITY_MEDIUM:
506 return HCI_AT_DEDICATED_BONDING;
507 default:
508 return HCI_AT_NO_BONDING;
510 } else if (chan->psm == cpu_to_le16(0x0001)) {
511 if (chan->sec_level == BT_SECURITY_LOW)
512 chan->sec_level = BT_SECURITY_SDP;
514 if (chan->sec_level == BT_SECURITY_HIGH)
515 return HCI_AT_NO_BONDING_MITM;
516 else
517 return HCI_AT_NO_BONDING;
518 } else {
519 switch (chan->sec_level) {
520 case BT_SECURITY_HIGH:
521 return HCI_AT_GENERAL_BONDING_MITM;
522 case BT_SECURITY_MEDIUM:
523 return HCI_AT_GENERAL_BONDING;
524 default:
525 return HCI_AT_NO_BONDING;
530 /* Service level security */
531 int l2cap_chan_check_security(struct l2cap_chan *chan)
533 struct l2cap_conn *conn = chan->conn;
534 __u8 auth_type;
536 auth_type = l2cap_get_auth_type(chan);
538 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
541 static u8 l2cap_get_ident(struct l2cap_conn *conn)
543 u8 id;
545 /* Get next available identificator.
546 * 1 - 128 are used by kernel.
547 * 129 - 199 are reserved.
548 * 200 - 254 are used by utilities like l2ping, etc.
551 spin_lock(&conn->lock);
553 if (++conn->tx_ident > 128)
554 conn->tx_ident = 1;
556 id = conn->tx_ident;
558 spin_unlock(&conn->lock);
560 return id;
563 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
565 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
566 u8 flags;
568 BT_DBG("code 0x%2.2x", code);
570 if (!skb)
571 return;
573 if (lmp_no_flush_capable(conn->hcon->hdev))
574 flags = ACL_START_NO_FLUSH;
575 else
576 flags = ACL_START;
578 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
579 skb->priority = HCI_PRIO_MAX;
581 hci_send_acl(conn->hchan, skb, flags);
584 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
586 struct hci_conn *hcon = chan->conn->hcon;
587 u16 flags;
589 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
590 skb->priority);
592 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
593 lmp_no_flush_capable(hcon->hdev))
594 flags = ACL_START_NO_FLUSH;
595 else
596 flags = ACL_START;
598 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
599 hci_send_acl(chan->conn->hchan, skb, flags);
602 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
604 struct sk_buff *skb;
605 struct l2cap_hdr *lh;
606 struct l2cap_conn *conn = chan->conn;
607 int count, hlen;
609 if (chan->state != BT_CONNECTED)
610 return;
612 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
613 hlen = L2CAP_EXT_HDR_SIZE;
614 else
615 hlen = L2CAP_ENH_HDR_SIZE;
617 if (chan->fcs == L2CAP_FCS_CRC16)
618 hlen += L2CAP_FCS_SIZE;
620 BT_DBG("chan %p, control 0x%8.8x", chan, control);
622 count = min_t(unsigned int, conn->mtu, hlen);
624 control |= __set_sframe(chan);
626 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
627 control |= __set_ctrl_final(chan);
629 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
630 control |= __set_ctrl_poll(chan);
632 skb = bt_skb_alloc(count, GFP_ATOMIC);
633 if (!skb)
634 return;
636 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
637 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
638 lh->cid = cpu_to_le16(chan->dcid);
640 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
642 if (chan->fcs == L2CAP_FCS_CRC16) {
643 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
644 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
647 skb->priority = HCI_PRIO_MAX;
648 l2cap_do_send(chan, skb);
651 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
653 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
654 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
655 set_bit(CONN_RNR_SENT, &chan->conn_state);
656 } else
657 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
659 control |= __set_reqseq(chan, chan->buffer_seq);
661 l2cap_send_sframe(chan, control);
664 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
666 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
669 static void l2cap_send_conn_req(struct l2cap_chan *chan)
671 struct l2cap_conn *conn = chan->conn;
672 struct l2cap_conn_req req;
674 req.scid = cpu_to_le16(chan->scid);
675 req.psm = chan->psm;
677 chan->ident = l2cap_get_ident(conn);
679 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
681 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
684 static void l2cap_do_start(struct l2cap_chan *chan)
686 struct l2cap_conn *conn = chan->conn;
688 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
689 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
690 return;
692 if (l2cap_chan_check_security(chan) &&
693 __l2cap_no_conn_pending(chan))
694 l2cap_send_conn_req(chan);
695 } else {
696 struct l2cap_info_req req;
697 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
699 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
700 conn->info_ident = l2cap_get_ident(conn);
702 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
704 l2cap_send_cmd(conn, conn->info_ident,
705 L2CAP_INFO_REQ, sizeof(req), &req);
709 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
711 u32 local_feat_mask = l2cap_feat_mask;
712 if (!disable_ertm)
713 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
715 switch (mode) {
716 case L2CAP_MODE_ERTM:
717 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
718 case L2CAP_MODE_STREAMING:
719 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
720 default:
721 return 0x00;
725 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
727 struct sock *sk = chan->sk;
728 struct l2cap_disconn_req req;
730 if (!conn)
731 return;
733 if (chan->mode == L2CAP_MODE_ERTM) {
734 __clear_retrans_timer(chan);
735 __clear_monitor_timer(chan);
736 __clear_ack_timer(chan);
739 req.dcid = cpu_to_le16(chan->dcid);
740 req.scid = cpu_to_le16(chan->scid);
741 l2cap_send_cmd(conn, l2cap_get_ident(conn),
742 L2CAP_DISCONN_REQ, sizeof(req), &req);
744 lock_sock(sk);
745 __l2cap_state_change(chan, BT_DISCONN);
746 __l2cap_chan_set_err(chan, err);
747 release_sock(sk);
750 /* ---- L2CAP connections ---- */
751 static void l2cap_conn_start(struct l2cap_conn *conn)
753 struct l2cap_chan *chan, *tmp;
755 BT_DBG("conn %p", conn);
757 mutex_lock(&conn->chan_lock);
759 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
760 struct sock *sk = chan->sk;
762 l2cap_chan_lock(chan);
764 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
765 l2cap_chan_unlock(chan);
766 continue;
769 if (chan->state == BT_CONNECT) {
770 if (!l2cap_chan_check_security(chan) ||
771 !__l2cap_no_conn_pending(chan)) {
772 l2cap_chan_unlock(chan);
773 continue;
776 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
777 && test_bit(CONF_STATE2_DEVICE,
778 &chan->conf_state)) {
779 l2cap_chan_close(chan, ECONNRESET);
780 l2cap_chan_unlock(chan);
781 continue;
784 l2cap_send_conn_req(chan);
786 } else if (chan->state == BT_CONNECT2) {
787 struct l2cap_conn_rsp rsp;
788 char buf[128];
789 rsp.scid = cpu_to_le16(chan->dcid);
790 rsp.dcid = cpu_to_le16(chan->scid);
792 if (l2cap_chan_check_security(chan)) {
793 lock_sock(sk);
794 if (bt_sk(sk)->defer_setup) {
795 struct sock *parent = bt_sk(sk)->parent;
796 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
797 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
798 if (parent)
799 parent->sk_data_ready(parent, 0);
801 } else {
802 __l2cap_state_change(chan, BT_CONFIG);
803 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
804 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
806 release_sock(sk);
807 } else {
808 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
809 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
812 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
813 sizeof(rsp), &rsp);
815 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
816 rsp.result != L2CAP_CR_SUCCESS) {
817 l2cap_chan_unlock(chan);
818 continue;
821 set_bit(CONF_REQ_SENT, &chan->conf_state);
822 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
823 l2cap_build_conf_req(chan, buf), buf);
824 chan->num_conf_req++;
827 l2cap_chan_unlock(chan);
830 mutex_unlock(&conn->chan_lock);
833 /* Find socket with cid and source bdaddr.
834 * Returns closest match, locked.
836 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
838 struct l2cap_chan *c, *c1 = NULL;
840 read_lock(&chan_list_lock);
842 list_for_each_entry(c, &chan_list, global_l) {
843 struct sock *sk = c->sk;
845 if (state && c->state != state)
846 continue;
848 if (c->scid == cid) {
849 /* Exact match. */
850 if (!bacmp(&bt_sk(sk)->src, src)) {
851 read_unlock(&chan_list_lock);
852 return c;
855 /* Closest match */
856 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
857 c1 = c;
861 read_unlock(&chan_list_lock);
863 return c1;
866 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
868 struct sock *parent, *sk;
869 struct l2cap_chan *chan, *pchan;
871 BT_DBG("");
873 /* Check if we have socket listening on cid */
874 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
875 conn->src);
876 if (!pchan)
877 return;
879 parent = pchan->sk;
881 lock_sock(parent);
883 /* Check for backlog size */
884 if (sk_acceptq_is_full(parent)) {
885 BT_DBG("backlog full %d", parent->sk_ack_backlog);
886 goto clean;
889 chan = pchan->ops->new_connection(pchan->data);
890 if (!chan)
891 goto clean;
893 sk = chan->sk;
895 hci_conn_hold(conn->hcon);
897 bacpy(&bt_sk(sk)->src, conn->src);
898 bacpy(&bt_sk(sk)->dst, conn->dst);
900 bt_accept_enqueue(parent, sk);
902 l2cap_chan_add(conn, chan);
904 __set_chan_timer(chan, sk->sk_sndtimeo);
906 __l2cap_state_change(chan, BT_CONNECTED);
907 parent->sk_data_ready(parent, 0);
909 clean:
910 release_sock(parent);
913 static void l2cap_chan_ready(struct l2cap_chan *chan)
915 struct sock *sk = chan->sk;
916 struct sock *parent;
918 lock_sock(sk);
920 parent = bt_sk(sk)->parent;
922 BT_DBG("sk %p, parent %p", sk, parent);
924 chan->conf_state = 0;
925 __clear_chan_timer(chan);
927 __l2cap_state_change(chan, BT_CONNECTED);
928 sk->sk_state_change(sk);
930 if (parent)
931 parent->sk_data_ready(parent, 0);
933 release_sock(sk);
936 static void l2cap_conn_ready(struct l2cap_conn *conn)
938 struct l2cap_chan *chan;
940 BT_DBG("conn %p", conn);
942 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
943 l2cap_le_conn_ready(conn);
945 if (conn->hcon->out && conn->hcon->type == LE_LINK)
946 smp_conn_security(conn, conn->hcon->pending_sec_level);
948 mutex_lock(&conn->chan_lock);
950 list_for_each_entry(chan, &conn->chan_l, list) {
952 l2cap_chan_lock(chan);
954 if (conn->hcon->type == LE_LINK) {
955 if (smp_conn_security(conn, chan->sec_level))
956 l2cap_chan_ready(chan);
958 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
959 struct sock *sk = chan->sk;
960 __clear_chan_timer(chan);
961 lock_sock(sk);
962 __l2cap_state_change(chan, BT_CONNECTED);
963 sk->sk_state_change(sk);
964 release_sock(sk);
966 } else if (chan->state == BT_CONNECT)
967 l2cap_do_start(chan);
969 l2cap_chan_unlock(chan);
972 mutex_unlock(&conn->chan_lock);
975 /* Notify sockets that we cannot guaranty reliability anymore */
976 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
978 struct l2cap_chan *chan;
980 BT_DBG("conn %p", conn);
982 mutex_lock(&conn->chan_lock);
984 list_for_each_entry(chan, &conn->chan_l, list) {
985 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
986 __l2cap_chan_set_err(chan, err);
989 mutex_unlock(&conn->chan_lock);
992 static void l2cap_info_timeout(struct work_struct *work)
994 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
995 info_timer.work);
997 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
998 conn->info_ident = 0;
1000 l2cap_conn_start(conn);
1003 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1005 struct l2cap_conn *conn = hcon->l2cap_data;
1006 struct l2cap_chan *chan, *l;
1008 if (!conn)
1009 return;
1011 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1013 kfree_skb(conn->rx_skb);
1015 mutex_lock(&conn->chan_lock);
1017 /* Kill channels */
1018 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1019 l2cap_chan_lock(chan);
1021 l2cap_chan_del(chan, err);
1023 l2cap_chan_unlock(chan);
1025 chan->ops->close(chan->data);
1028 mutex_unlock(&conn->chan_lock);
1030 hci_chan_del(conn->hchan);
1032 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1033 cancel_delayed_work_sync(&conn->info_timer);
1035 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1036 cancel_delayed_work_sync(&conn->security_timer);
1037 smp_chan_destroy(conn);
1040 hcon->l2cap_data = NULL;
1041 kfree(conn);
1044 static void security_timeout(struct work_struct *work)
1046 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1047 security_timer.work);
1049 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1052 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1054 struct l2cap_conn *conn = hcon->l2cap_data;
1055 struct hci_chan *hchan;
1057 if (conn || status)
1058 return conn;
1060 hchan = hci_chan_create(hcon);
1061 if (!hchan)
1062 return NULL;
1064 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1065 if (!conn) {
1066 hci_chan_del(hchan);
1067 return NULL;
1070 hcon->l2cap_data = conn;
1071 conn->hcon = hcon;
1072 conn->hchan = hchan;
1074 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1076 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1077 conn->mtu = hcon->hdev->le_mtu;
1078 else
1079 conn->mtu = hcon->hdev->acl_mtu;
1081 conn->src = &hcon->hdev->bdaddr;
1082 conn->dst = &hcon->dst;
1084 conn->feat_mask = 0;
1086 spin_lock_init(&conn->lock);
1087 mutex_init(&conn->chan_lock);
1089 INIT_LIST_HEAD(&conn->chan_l);
1091 if (hcon->type == LE_LINK)
1092 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1093 else
1094 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1096 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1098 return conn;
1101 /* ---- Socket interface ---- */
1103 /* Find socket with psm and source bdaddr.
1104 * Returns closest match.
1106 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1108 struct l2cap_chan *c, *c1 = NULL;
1110 read_lock(&chan_list_lock);
1112 list_for_each_entry(c, &chan_list, global_l) {
1113 struct sock *sk = c->sk;
1115 if (state && c->state != state)
1116 continue;
1118 if (c->psm == psm) {
1119 /* Exact match. */
1120 if (!bacmp(&bt_sk(sk)->src, src)) {
1121 read_unlock(&chan_list_lock);
1122 return c;
1125 /* Closest match */
1126 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1127 c1 = c;
1131 read_unlock(&chan_list_lock);
1133 return c1;
1136 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
1138 struct sock *sk = chan->sk;
1139 bdaddr_t *src = &bt_sk(sk)->src;
1140 struct l2cap_conn *conn;
1141 struct hci_conn *hcon;
1142 struct hci_dev *hdev;
1143 __u8 auth_type;
1144 int err;
1146 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1147 chan->psm);
1149 hdev = hci_get_route(dst, src);
1150 if (!hdev)
1151 return -EHOSTUNREACH;
1153 hci_dev_lock(hdev);
1155 l2cap_chan_lock(chan);
1157 /* PSM must be odd and lsb of upper byte must be 0 */
1158 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1159 chan->chan_type != L2CAP_CHAN_RAW) {
1160 err = -EINVAL;
1161 goto done;
1164 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1165 err = -EINVAL;
1166 goto done;
1169 switch (chan->mode) {
1170 case L2CAP_MODE_BASIC:
1171 break;
1172 case L2CAP_MODE_ERTM:
1173 case L2CAP_MODE_STREAMING:
1174 if (!disable_ertm)
1175 break;
1176 /* fall through */
1177 default:
1178 err = -ENOTSUPP;
1179 goto done;
1182 lock_sock(sk);
1184 switch (sk->sk_state) {
1185 case BT_CONNECT:
1186 case BT_CONNECT2:
1187 case BT_CONFIG:
1188 /* Already connecting */
1189 err = 0;
1190 release_sock(sk);
1191 goto done;
1193 case BT_CONNECTED:
1194 /* Already connected */
1195 err = -EISCONN;
1196 release_sock(sk);
1197 goto done;
1199 case BT_OPEN:
1200 case BT_BOUND:
1201 /* Can connect */
1202 break;
1204 default:
1205 err = -EBADFD;
1206 release_sock(sk);
1207 goto done;
1210 /* Set destination address and psm */
1211 bacpy(&bt_sk(sk)->dst, dst);
1213 release_sock(sk);
1215 chan->psm = psm;
1216 chan->dcid = cid;
1218 auth_type = l2cap_get_auth_type(chan);
1220 if (chan->dcid == L2CAP_CID_LE_DATA)
1221 hcon = hci_connect(hdev, LE_LINK, dst,
1222 chan->sec_level, auth_type);
1223 else
1224 hcon = hci_connect(hdev, ACL_LINK, dst,
1225 chan->sec_level, auth_type);
1227 if (IS_ERR(hcon)) {
1228 err = PTR_ERR(hcon);
1229 goto done;
1232 conn = l2cap_conn_add(hcon, 0);
1233 if (!conn) {
1234 hci_conn_put(hcon);
1235 err = -ENOMEM;
1236 goto done;
1239 /* Update source addr of the socket */
1240 bacpy(src, conn->src);
1242 l2cap_chan_unlock(chan);
1243 l2cap_chan_add(conn, chan);
1244 l2cap_chan_lock(chan);
1246 l2cap_state_change(chan, BT_CONNECT);
1247 __set_chan_timer(chan, sk->sk_sndtimeo);
1249 if (hcon->state == BT_CONNECTED) {
1250 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1251 __clear_chan_timer(chan);
1252 if (l2cap_chan_check_security(chan))
1253 l2cap_state_change(chan, BT_CONNECTED);
1254 } else
1255 l2cap_do_start(chan);
1258 err = 0;
1260 done:
1261 l2cap_chan_unlock(chan);
1262 hci_dev_unlock(hdev);
1263 hci_dev_put(hdev);
1264 return err;
1267 int __l2cap_wait_ack(struct sock *sk)
1269 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1270 DECLARE_WAITQUEUE(wait, current);
1271 int err = 0;
1272 int timeo = HZ/5;
1274 add_wait_queue(sk_sleep(sk), &wait);
1275 set_current_state(TASK_INTERRUPTIBLE);
1276 while (chan->unacked_frames > 0 && chan->conn) {
1277 if (!timeo)
1278 timeo = HZ/5;
1280 if (signal_pending(current)) {
1281 err = sock_intr_errno(timeo);
1282 break;
1285 release_sock(sk);
1286 timeo = schedule_timeout(timeo);
1287 lock_sock(sk);
1288 set_current_state(TASK_INTERRUPTIBLE);
1290 err = sock_error(sk);
1291 if (err)
1292 break;
1294 set_current_state(TASK_RUNNING);
1295 remove_wait_queue(sk_sleep(sk), &wait);
1296 return err;
1299 static void l2cap_monitor_timeout(struct work_struct *work)
1301 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1302 monitor_timer.work);
1304 BT_DBG("chan %p", chan);
1306 l2cap_chan_lock(chan);
1308 if (chan->retry_count >= chan->remote_max_tx) {
1309 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1310 l2cap_chan_unlock(chan);
1311 return;
1314 chan->retry_count++;
1315 __set_monitor_timer(chan);
1317 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1318 l2cap_chan_unlock(chan);
1321 static void l2cap_retrans_timeout(struct work_struct *work)
1323 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1324 retrans_timer.work);
1326 BT_DBG("chan %p", chan);
1328 l2cap_chan_lock(chan);
1330 chan->retry_count = 1;
1331 __set_monitor_timer(chan);
1333 set_bit(CONN_WAIT_F, &chan->conn_state);
1335 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1337 l2cap_chan_unlock(chan);
1340 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1342 struct sk_buff *skb;
1344 while ((skb = skb_peek(&chan->tx_q)) &&
1345 chan->unacked_frames) {
1346 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1347 break;
1349 skb = skb_dequeue(&chan->tx_q);
1350 kfree_skb(skb);
1352 chan->unacked_frames--;
1355 if (!chan->unacked_frames)
1356 __clear_retrans_timer(chan);
1359 static void l2cap_streaming_send(struct l2cap_chan *chan)
1361 struct sk_buff *skb;
1362 u32 control;
1363 u16 fcs;
1365 while ((skb = skb_dequeue(&chan->tx_q))) {
1366 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1367 control |= __set_txseq(chan, chan->next_tx_seq);
1368 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1370 if (chan->fcs == L2CAP_FCS_CRC16) {
1371 fcs = crc16(0, (u8 *)skb->data,
1372 skb->len - L2CAP_FCS_SIZE);
1373 put_unaligned_le16(fcs,
1374 skb->data + skb->len - L2CAP_FCS_SIZE);
1377 l2cap_do_send(chan, skb);
1379 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1383 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1385 struct sk_buff *skb, *tx_skb;
1386 u16 fcs;
1387 u32 control;
1389 skb = skb_peek(&chan->tx_q);
1390 if (!skb)
1391 return;
1393 while (bt_cb(skb)->tx_seq != tx_seq) {
1394 if (skb_queue_is_last(&chan->tx_q, skb))
1395 return;
1397 skb = skb_queue_next(&chan->tx_q, skb);
1400 if (chan->remote_max_tx &&
1401 bt_cb(skb)->retries == chan->remote_max_tx) {
1402 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1403 return;
1406 tx_skb = skb_clone(skb, GFP_ATOMIC);
1407 bt_cb(skb)->retries++;
1409 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1410 control &= __get_sar_mask(chan);
1412 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1413 control |= __set_ctrl_final(chan);
1415 control |= __set_reqseq(chan, chan->buffer_seq);
1416 control |= __set_txseq(chan, tx_seq);
1418 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1420 if (chan->fcs == L2CAP_FCS_CRC16) {
1421 fcs = crc16(0, (u8 *)tx_skb->data,
1422 tx_skb->len - L2CAP_FCS_SIZE);
1423 put_unaligned_le16(fcs,
1424 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1427 l2cap_do_send(chan, tx_skb);
1430 static int l2cap_ertm_send(struct l2cap_chan *chan)
1432 struct sk_buff *skb, *tx_skb;
1433 u16 fcs;
1434 u32 control;
1435 int nsent = 0;
1437 if (chan->state != BT_CONNECTED)
1438 return -ENOTCONN;
1440 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1442 if (chan->remote_max_tx &&
1443 bt_cb(skb)->retries == chan->remote_max_tx) {
1444 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1445 break;
1448 tx_skb = skb_clone(skb, GFP_ATOMIC);
1450 bt_cb(skb)->retries++;
1452 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1453 control &= __get_sar_mask(chan);
1455 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1456 control |= __set_ctrl_final(chan);
1458 control |= __set_reqseq(chan, chan->buffer_seq);
1459 control |= __set_txseq(chan, chan->next_tx_seq);
1461 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1463 if (chan->fcs == L2CAP_FCS_CRC16) {
1464 fcs = crc16(0, (u8 *)skb->data,
1465 tx_skb->len - L2CAP_FCS_SIZE);
1466 put_unaligned_le16(fcs, skb->data +
1467 tx_skb->len - L2CAP_FCS_SIZE);
1470 l2cap_do_send(chan, tx_skb);
1472 __set_retrans_timer(chan);
1474 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1476 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1478 if (bt_cb(skb)->retries == 1) {
1479 chan->unacked_frames++;
1481 if (!nsent++)
1482 __clear_ack_timer(chan);
1485 chan->frames_sent++;
1487 if (skb_queue_is_last(&chan->tx_q, skb))
1488 chan->tx_send_head = NULL;
1489 else
1490 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1493 return nsent;
1496 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1498 int ret;
1500 if (!skb_queue_empty(&chan->tx_q))
1501 chan->tx_send_head = chan->tx_q.next;
1503 chan->next_tx_seq = chan->expected_ack_seq;
1504 ret = l2cap_ertm_send(chan);
1505 return ret;
1508 static void __l2cap_send_ack(struct l2cap_chan *chan)
1510 u32 control = 0;
1512 control |= __set_reqseq(chan, chan->buffer_seq);
1514 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1515 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1516 set_bit(CONN_RNR_SENT, &chan->conn_state);
1517 l2cap_send_sframe(chan, control);
1518 return;
1521 if (l2cap_ertm_send(chan) > 0)
1522 return;
1524 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1525 l2cap_send_sframe(chan, control);
1528 static void l2cap_send_ack(struct l2cap_chan *chan)
1530 __clear_ack_timer(chan);
1531 __l2cap_send_ack(chan);
1534 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1536 struct srej_list *tail;
1537 u32 control;
1539 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1540 control |= __set_ctrl_final(chan);
1542 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1543 control |= __set_reqseq(chan, tail->tx_seq);
1545 l2cap_send_sframe(chan, control);
1548 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1549 struct msghdr *msg, int len,
1550 int count, struct sk_buff *skb)
1552 struct l2cap_conn *conn = chan->conn;
1553 struct sk_buff **frag;
1554 int err, sent = 0;
1556 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1557 return -EFAULT;
1559 sent += count;
1560 len -= count;
1562 /* Continuation fragments (no L2CAP header) */
1563 frag = &skb_shinfo(skb)->frag_list;
1564 while (len) {
1565 count = min_t(unsigned int, conn->mtu, len);
1567 *frag = chan->ops->alloc_skb(chan, count,
1568 msg->msg_flags & MSG_DONTWAIT,
1569 &err);
1571 if (!*frag)
1572 return err;
1573 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1574 return -EFAULT;
1576 (*frag)->priority = skb->priority;
1578 sent += count;
1579 len -= count;
1581 frag = &(*frag)->next;
1584 return sent;
1587 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1588 struct msghdr *msg, size_t len,
1589 u32 priority)
1591 struct l2cap_conn *conn = chan->conn;
1592 struct sk_buff *skb;
1593 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1594 struct l2cap_hdr *lh;
1596 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1598 count = min_t(unsigned int, (conn->mtu - hlen), len);
1600 skb = chan->ops->alloc_skb(chan, count + hlen,
1601 msg->msg_flags & MSG_DONTWAIT, &err);
1603 if (!skb)
1604 return ERR_PTR(err);
1606 skb->priority = priority;
1608 /* Create L2CAP header */
1609 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1610 lh->cid = cpu_to_le16(chan->dcid);
1611 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1612 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1614 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1615 if (unlikely(err < 0)) {
1616 kfree_skb(skb);
1617 return ERR_PTR(err);
1619 return skb;
1622 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1623 struct msghdr *msg, size_t len,
1624 u32 priority)
1626 struct l2cap_conn *conn = chan->conn;
1627 struct sk_buff *skb;
1628 int err, count, hlen = L2CAP_HDR_SIZE;
1629 struct l2cap_hdr *lh;
1631 BT_DBG("chan %p len %d", chan, (int)len);
1633 count = min_t(unsigned int, (conn->mtu - hlen), len);
1635 skb = chan->ops->alloc_skb(chan, count + hlen,
1636 msg->msg_flags & MSG_DONTWAIT, &err);
1638 if (!skb)
1639 return ERR_PTR(err);
1641 skb->priority = priority;
1643 /* Create L2CAP header */
1644 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1645 lh->cid = cpu_to_le16(chan->dcid);
1646 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1648 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1649 if (unlikely(err < 0)) {
1650 kfree_skb(skb);
1651 return ERR_PTR(err);
1653 return skb;
1656 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1657 struct msghdr *msg, size_t len,
1658 u32 control, u16 sdulen)
1660 struct l2cap_conn *conn = chan->conn;
1661 struct sk_buff *skb;
1662 int err, count, hlen;
1663 struct l2cap_hdr *lh;
1665 BT_DBG("chan %p len %d", chan, (int)len);
1667 if (!conn)
1668 return ERR_PTR(-ENOTCONN);
1670 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1671 hlen = L2CAP_EXT_HDR_SIZE;
1672 else
1673 hlen = L2CAP_ENH_HDR_SIZE;
1675 if (sdulen)
1676 hlen += L2CAP_SDULEN_SIZE;
1678 if (chan->fcs == L2CAP_FCS_CRC16)
1679 hlen += L2CAP_FCS_SIZE;
1681 count = min_t(unsigned int, (conn->mtu - hlen), len);
1683 skb = chan->ops->alloc_skb(chan, count + hlen,
1684 msg->msg_flags & MSG_DONTWAIT, &err);
1686 if (!skb)
1687 return ERR_PTR(err);
1689 /* Create L2CAP header */
1690 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1691 lh->cid = cpu_to_le16(chan->dcid);
1692 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1694 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1696 if (sdulen)
1697 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1699 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1700 if (unlikely(err < 0)) {
1701 kfree_skb(skb);
1702 return ERR_PTR(err);
1705 if (chan->fcs == L2CAP_FCS_CRC16)
1706 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1708 bt_cb(skb)->retries = 0;
1709 return skb;
1712 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1714 struct sk_buff *skb;
1715 struct sk_buff_head sar_queue;
1716 u32 control;
1717 size_t size = 0;
1719 skb_queue_head_init(&sar_queue);
1720 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1721 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1722 if (IS_ERR(skb))
1723 return PTR_ERR(skb);
1725 __skb_queue_tail(&sar_queue, skb);
1726 len -= chan->remote_mps;
1727 size += chan->remote_mps;
1729 while (len > 0) {
1730 size_t buflen;
1732 if (len > chan->remote_mps) {
1733 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1734 buflen = chan->remote_mps;
1735 } else {
1736 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1737 buflen = len;
1740 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1741 if (IS_ERR(skb)) {
1742 skb_queue_purge(&sar_queue);
1743 return PTR_ERR(skb);
1746 __skb_queue_tail(&sar_queue, skb);
1747 len -= buflen;
1748 size += buflen;
1750 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1751 if (chan->tx_send_head == NULL)
1752 chan->tx_send_head = sar_queue.next;
1754 return size;
1757 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1758 u32 priority)
1760 struct sk_buff *skb;
1761 u32 control;
1762 int err;
1764 /* Connectionless channel */
1765 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1766 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1767 if (IS_ERR(skb))
1768 return PTR_ERR(skb);
1770 l2cap_do_send(chan, skb);
1771 return len;
1774 switch (chan->mode) {
1775 case L2CAP_MODE_BASIC:
1776 /* Check outgoing MTU */
1777 if (len > chan->omtu)
1778 return -EMSGSIZE;
1780 /* Create a basic PDU */
1781 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1782 if (IS_ERR(skb))
1783 return PTR_ERR(skb);
1785 l2cap_do_send(chan, skb);
1786 err = len;
1787 break;
1789 case L2CAP_MODE_ERTM:
1790 case L2CAP_MODE_STREAMING:
1791 /* Entire SDU fits into one PDU */
1792 if (len <= chan->remote_mps) {
1793 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1794 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1796 if (IS_ERR(skb))
1797 return PTR_ERR(skb);
1799 __skb_queue_tail(&chan->tx_q, skb);
1801 if (chan->tx_send_head == NULL)
1802 chan->tx_send_head = skb;
1804 } else {
1805 /* Segment SDU into multiples PDUs */
1806 err = l2cap_sar_segment_sdu(chan, msg, len);
1807 if (err < 0)
1808 return err;
1811 if (chan->mode == L2CAP_MODE_STREAMING) {
1812 l2cap_streaming_send(chan);
1813 err = len;
1814 break;
1817 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1818 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1819 err = len;
1820 break;
1823 err = l2cap_ertm_send(chan);
1824 if (err >= 0)
1825 err = len;
1827 break;
1829 default:
1830 BT_DBG("bad state %1.1x", chan->mode);
1831 err = -EBADFD;
1834 return err;
1837 /* Copy frame to all raw sockets on that connection */
1838 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1840 struct sk_buff *nskb;
1841 struct l2cap_chan *chan;
1843 BT_DBG("conn %p", conn);
1845 mutex_lock(&conn->chan_lock);
1847 list_for_each_entry(chan, &conn->chan_l, list) {
1848 struct sock *sk = chan->sk;
1849 if (chan->chan_type != L2CAP_CHAN_RAW)
1850 continue;
1852 /* Don't send frame to the socket it came from */
1853 if (skb->sk == sk)
1854 continue;
1855 nskb = skb_clone(skb, GFP_ATOMIC);
1856 if (!nskb)
1857 continue;
1859 if (chan->ops->recv(chan->data, nskb))
1860 kfree_skb(nskb);
1863 mutex_unlock(&conn->chan_lock);
1866 /* ---- L2CAP signalling commands ---- */
1867 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1868 u8 code, u8 ident, u16 dlen, void *data)
1870 struct sk_buff *skb, **frag;
1871 struct l2cap_cmd_hdr *cmd;
1872 struct l2cap_hdr *lh;
1873 int len, count;
1875 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1876 conn, code, ident, dlen);
1878 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1879 count = min_t(unsigned int, conn->mtu, len);
1881 skb = bt_skb_alloc(count, GFP_ATOMIC);
1882 if (!skb)
1883 return NULL;
1885 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1886 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1888 if (conn->hcon->type == LE_LINK)
1889 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1890 else
1891 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1893 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1894 cmd->code = code;
1895 cmd->ident = ident;
1896 cmd->len = cpu_to_le16(dlen);
1898 if (dlen) {
1899 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1900 memcpy(skb_put(skb, count), data, count);
1901 data += count;
1904 len -= skb->len;
1906 /* Continuation fragments (no L2CAP header) */
1907 frag = &skb_shinfo(skb)->frag_list;
1908 while (len) {
1909 count = min_t(unsigned int, conn->mtu, len);
1911 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1912 if (!*frag)
1913 goto fail;
1915 memcpy(skb_put(*frag, count), data, count);
1917 len -= count;
1918 data += count;
1920 frag = &(*frag)->next;
1923 return skb;
1925 fail:
1926 kfree_skb(skb);
1927 return NULL;
1930 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1932 struct l2cap_conf_opt *opt = *ptr;
1933 int len;
1935 len = L2CAP_CONF_OPT_SIZE + opt->len;
1936 *ptr += len;
1938 *type = opt->type;
1939 *olen = opt->len;
1941 switch (opt->len) {
1942 case 1:
1943 *val = *((u8 *) opt->val);
1944 break;
1946 case 2:
1947 *val = get_unaligned_le16(opt->val);
1948 break;
1950 case 4:
1951 *val = get_unaligned_le32(opt->val);
1952 break;
1954 default:
1955 *val = (unsigned long) opt->val;
1956 break;
1959 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1960 return len;
1963 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1965 struct l2cap_conf_opt *opt = *ptr;
1967 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1969 opt->type = type;
1970 opt->len = len;
1972 switch (len) {
1973 case 1:
1974 *((u8 *) opt->val) = val;
1975 break;
1977 case 2:
1978 put_unaligned_le16(val, opt->val);
1979 break;
1981 case 4:
1982 put_unaligned_le32(val, opt->val);
1983 break;
1985 default:
1986 memcpy(opt->val, (void *) val, len);
1987 break;
1990 *ptr += L2CAP_CONF_OPT_SIZE + len;
1993 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1995 struct l2cap_conf_efs efs;
1997 switch (chan->mode) {
1998 case L2CAP_MODE_ERTM:
1999 efs.id = chan->local_id;
2000 efs.stype = chan->local_stype;
2001 efs.msdu = cpu_to_le16(chan->local_msdu);
2002 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2003 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2004 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2005 break;
2007 case L2CAP_MODE_STREAMING:
2008 efs.id = 1;
2009 efs.stype = L2CAP_SERV_BESTEFFORT;
2010 efs.msdu = cpu_to_le16(chan->local_msdu);
2011 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2012 efs.acc_lat = 0;
2013 efs.flush_to = 0;
2014 break;
2016 default:
2017 return;
2020 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2021 (unsigned long) &efs);
2024 static void l2cap_ack_timeout(struct work_struct *work)
2026 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2027 ack_timer.work);
2029 BT_DBG("chan %p", chan);
2031 l2cap_chan_lock(chan);
2033 __l2cap_send_ack(chan);
2035 l2cap_chan_unlock(chan);
2037 l2cap_chan_put(chan);
2040 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
2042 chan->expected_ack_seq = 0;
2043 chan->unacked_frames = 0;
2044 chan->buffer_seq = 0;
2045 chan->num_acked = 0;
2046 chan->frames_sent = 0;
2048 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2049 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2050 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2052 skb_queue_head_init(&chan->srej_q);
2054 INIT_LIST_HEAD(&chan->srej_l);
2057 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2059 switch (mode) {
2060 case L2CAP_MODE_STREAMING:
2061 case L2CAP_MODE_ERTM:
2062 if (l2cap_mode_supported(mode, remote_feat_mask))
2063 return mode;
2064 /* fall through */
2065 default:
2066 return L2CAP_MODE_BASIC;
2070 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2072 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2075 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2077 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2080 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2082 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2083 __l2cap_ews_supported(chan)) {
2084 /* use extended control field */
2085 set_bit(FLAG_EXT_CTRL, &chan->flags);
2086 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2087 } else {
2088 chan->tx_win = min_t(u16, chan->tx_win,
2089 L2CAP_DEFAULT_TX_WINDOW);
2090 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2094 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2096 struct l2cap_conf_req *req = data;
2097 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2098 void *ptr = req->data;
2099 u16 size;
2101 BT_DBG("chan %p", chan);
2103 if (chan->num_conf_req || chan->num_conf_rsp)
2104 goto done;
2106 switch (chan->mode) {
2107 case L2CAP_MODE_STREAMING:
2108 case L2CAP_MODE_ERTM:
2109 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2110 break;
2112 if (__l2cap_efs_supported(chan))
2113 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2115 /* fall through */
2116 default:
2117 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2118 break;
2121 done:
2122 if (chan->imtu != L2CAP_DEFAULT_MTU)
2123 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2125 switch (chan->mode) {
2126 case L2CAP_MODE_BASIC:
2127 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2128 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2129 break;
2131 rfc.mode = L2CAP_MODE_BASIC;
2132 rfc.txwin_size = 0;
2133 rfc.max_transmit = 0;
2134 rfc.retrans_timeout = 0;
2135 rfc.monitor_timeout = 0;
2136 rfc.max_pdu_size = 0;
2138 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2139 (unsigned long) &rfc);
2140 break;
2142 case L2CAP_MODE_ERTM:
2143 rfc.mode = L2CAP_MODE_ERTM;
2144 rfc.max_transmit = chan->max_tx;
2145 rfc.retrans_timeout = 0;
2146 rfc.monitor_timeout = 0;
2148 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2149 L2CAP_EXT_HDR_SIZE -
2150 L2CAP_SDULEN_SIZE -
2151 L2CAP_FCS_SIZE);
2152 rfc.max_pdu_size = cpu_to_le16(size);
2154 l2cap_txwin_setup(chan);
2156 rfc.txwin_size = min_t(u16, chan->tx_win,
2157 L2CAP_DEFAULT_TX_WINDOW);
2159 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2160 (unsigned long) &rfc);
2162 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2163 l2cap_add_opt_efs(&ptr, chan);
2165 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2166 break;
2168 if (chan->fcs == L2CAP_FCS_NONE ||
2169 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2170 chan->fcs = L2CAP_FCS_NONE;
2171 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2174 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2175 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2176 chan->tx_win);
2177 break;
2179 case L2CAP_MODE_STREAMING:
2180 rfc.mode = L2CAP_MODE_STREAMING;
2181 rfc.txwin_size = 0;
2182 rfc.max_transmit = 0;
2183 rfc.retrans_timeout = 0;
2184 rfc.monitor_timeout = 0;
2186 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2187 L2CAP_EXT_HDR_SIZE -
2188 L2CAP_SDULEN_SIZE -
2189 L2CAP_FCS_SIZE);
2190 rfc.max_pdu_size = cpu_to_le16(size);
2192 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2193 (unsigned long) &rfc);
2195 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2196 l2cap_add_opt_efs(&ptr, chan);
2198 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2199 break;
2201 if (chan->fcs == L2CAP_FCS_NONE ||
2202 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2203 chan->fcs = L2CAP_FCS_NONE;
2204 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2206 break;
2209 req->dcid = cpu_to_le16(chan->dcid);
2210 req->flags = cpu_to_le16(0);
2212 return ptr - data;
2215 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2217 struct l2cap_conf_rsp *rsp = data;
2218 void *ptr = rsp->data;
2219 void *req = chan->conf_req;
2220 int len = chan->conf_len;
2221 int type, hint, olen;
2222 unsigned long val;
2223 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2224 struct l2cap_conf_efs efs;
2225 u8 remote_efs = 0;
2226 u16 mtu = L2CAP_DEFAULT_MTU;
2227 u16 result = L2CAP_CONF_SUCCESS;
2228 u16 size;
2230 BT_DBG("chan %p", chan);
2232 while (len >= L2CAP_CONF_OPT_SIZE) {
2233 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2235 hint = type & L2CAP_CONF_HINT;
2236 type &= L2CAP_CONF_MASK;
2238 switch (type) {
2239 case L2CAP_CONF_MTU:
2240 mtu = val;
2241 break;
2243 case L2CAP_CONF_FLUSH_TO:
2244 chan->flush_to = val;
2245 break;
2247 case L2CAP_CONF_QOS:
2248 break;
2250 case L2CAP_CONF_RFC:
2251 if (olen == sizeof(rfc))
2252 memcpy(&rfc, (void *) val, olen);
2253 break;
2255 case L2CAP_CONF_FCS:
2256 if (val == L2CAP_FCS_NONE)
2257 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2258 break;
2260 case L2CAP_CONF_EFS:
2261 remote_efs = 1;
2262 if (olen == sizeof(efs))
2263 memcpy(&efs, (void *) val, olen);
2264 break;
2266 case L2CAP_CONF_EWS:
2267 if (!enable_hs)
2268 return -ECONNREFUSED;
2270 set_bit(FLAG_EXT_CTRL, &chan->flags);
2271 set_bit(CONF_EWS_RECV, &chan->conf_state);
2272 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2273 chan->remote_tx_win = val;
2274 break;
2276 default:
2277 if (hint)
2278 break;
2280 result = L2CAP_CONF_UNKNOWN;
2281 *((u8 *) ptr++) = type;
2282 break;
2286 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2287 goto done;
2289 switch (chan->mode) {
2290 case L2CAP_MODE_STREAMING:
2291 case L2CAP_MODE_ERTM:
2292 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2293 chan->mode = l2cap_select_mode(rfc.mode,
2294 chan->conn->feat_mask);
2295 break;
2298 if (remote_efs) {
2299 if (__l2cap_efs_supported(chan))
2300 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2301 else
2302 return -ECONNREFUSED;
2305 if (chan->mode != rfc.mode)
2306 return -ECONNREFUSED;
2308 break;
2311 done:
2312 if (chan->mode != rfc.mode) {
2313 result = L2CAP_CONF_UNACCEPT;
2314 rfc.mode = chan->mode;
2316 if (chan->num_conf_rsp == 1)
2317 return -ECONNREFUSED;
2319 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2320 sizeof(rfc), (unsigned long) &rfc);
2323 if (result == L2CAP_CONF_SUCCESS) {
2324 /* Configure output options and let the other side know
2325 * which ones we don't like. */
2327 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2328 result = L2CAP_CONF_UNACCEPT;
2329 else {
2330 chan->omtu = mtu;
2331 set_bit(CONF_MTU_DONE, &chan->conf_state);
2333 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2335 if (remote_efs) {
2336 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2337 efs.stype != L2CAP_SERV_NOTRAFIC &&
2338 efs.stype != chan->local_stype) {
2340 result = L2CAP_CONF_UNACCEPT;
2342 if (chan->num_conf_req >= 1)
2343 return -ECONNREFUSED;
2345 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2346 sizeof(efs),
2347 (unsigned long) &efs);
2348 } else {
2349 /* Send PENDING Conf Rsp */
2350 result = L2CAP_CONF_PENDING;
2351 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2355 switch (rfc.mode) {
2356 case L2CAP_MODE_BASIC:
2357 chan->fcs = L2CAP_FCS_NONE;
2358 set_bit(CONF_MODE_DONE, &chan->conf_state);
2359 break;
2361 case L2CAP_MODE_ERTM:
2362 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2363 chan->remote_tx_win = rfc.txwin_size;
2364 else
2365 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2367 chan->remote_max_tx = rfc.max_transmit;
2369 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2370 chan->conn->mtu -
2371 L2CAP_EXT_HDR_SIZE -
2372 L2CAP_SDULEN_SIZE -
2373 L2CAP_FCS_SIZE);
2374 rfc.max_pdu_size = cpu_to_le16(size);
2375 chan->remote_mps = size;
2377 rfc.retrans_timeout =
2378 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2379 rfc.monitor_timeout =
2380 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2382 set_bit(CONF_MODE_DONE, &chan->conf_state);
2384 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2385 sizeof(rfc), (unsigned long) &rfc);
2387 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2388 chan->remote_id = efs.id;
2389 chan->remote_stype = efs.stype;
2390 chan->remote_msdu = le16_to_cpu(efs.msdu);
2391 chan->remote_flush_to =
2392 le32_to_cpu(efs.flush_to);
2393 chan->remote_acc_lat =
2394 le32_to_cpu(efs.acc_lat);
2395 chan->remote_sdu_itime =
2396 le32_to_cpu(efs.sdu_itime);
2397 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2398 sizeof(efs), (unsigned long) &efs);
2400 break;
2402 case L2CAP_MODE_STREAMING:
2403 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2404 chan->conn->mtu -
2405 L2CAP_EXT_HDR_SIZE -
2406 L2CAP_SDULEN_SIZE -
2407 L2CAP_FCS_SIZE);
2408 rfc.max_pdu_size = cpu_to_le16(size);
2409 chan->remote_mps = size;
2411 set_bit(CONF_MODE_DONE, &chan->conf_state);
2413 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2414 sizeof(rfc), (unsigned long) &rfc);
2416 break;
2418 default:
2419 result = L2CAP_CONF_UNACCEPT;
2421 memset(&rfc, 0, sizeof(rfc));
2422 rfc.mode = chan->mode;
2425 if (result == L2CAP_CONF_SUCCESS)
2426 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2428 rsp->scid = cpu_to_le16(chan->dcid);
2429 rsp->result = cpu_to_le16(result);
2430 rsp->flags = cpu_to_le16(0x0000);
2432 return ptr - data;
2435 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2437 struct l2cap_conf_req *req = data;
2438 void *ptr = req->data;
2439 int type, olen;
2440 unsigned long val;
2441 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2442 struct l2cap_conf_efs efs;
2444 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2446 while (len >= L2CAP_CONF_OPT_SIZE) {
2447 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2449 switch (type) {
2450 case L2CAP_CONF_MTU:
2451 if (val < L2CAP_DEFAULT_MIN_MTU) {
2452 *result = L2CAP_CONF_UNACCEPT;
2453 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2454 } else
2455 chan->imtu = val;
2456 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2457 break;
2459 case L2CAP_CONF_FLUSH_TO:
2460 chan->flush_to = val;
2461 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2462 2, chan->flush_to);
2463 break;
2465 case L2CAP_CONF_RFC:
2466 if (olen == sizeof(rfc))
2467 memcpy(&rfc, (void *)val, olen);
2469 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2470 rfc.mode != chan->mode)
2471 return -ECONNREFUSED;
2473 chan->fcs = 0;
2475 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2476 sizeof(rfc), (unsigned long) &rfc);
2477 break;
2479 case L2CAP_CONF_EWS:
2480 chan->tx_win = min_t(u16, val,
2481 L2CAP_DEFAULT_EXT_WINDOW);
2482 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2483 chan->tx_win);
2484 break;
2486 case L2CAP_CONF_EFS:
2487 if (olen == sizeof(efs))
2488 memcpy(&efs, (void *)val, olen);
2490 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2491 efs.stype != L2CAP_SERV_NOTRAFIC &&
2492 efs.stype != chan->local_stype)
2493 return -ECONNREFUSED;
2495 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2496 sizeof(efs), (unsigned long) &efs);
2497 break;
2501 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2502 return -ECONNREFUSED;
2504 chan->mode = rfc.mode;
2506 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2507 switch (rfc.mode) {
2508 case L2CAP_MODE_ERTM:
2509 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2510 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2511 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2513 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2514 chan->local_msdu = le16_to_cpu(efs.msdu);
2515 chan->local_sdu_itime =
2516 le32_to_cpu(efs.sdu_itime);
2517 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2518 chan->local_flush_to =
2519 le32_to_cpu(efs.flush_to);
2521 break;
2523 case L2CAP_MODE_STREAMING:
2524 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2528 req->dcid = cpu_to_le16(chan->dcid);
2529 req->flags = cpu_to_le16(0x0000);
2531 return ptr - data;
2534 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2536 struct l2cap_conf_rsp *rsp = data;
2537 void *ptr = rsp->data;
2539 BT_DBG("chan %p", chan);
2541 rsp->scid = cpu_to_le16(chan->dcid);
2542 rsp->result = cpu_to_le16(result);
2543 rsp->flags = cpu_to_le16(flags);
2545 return ptr - data;
2548 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2550 struct l2cap_conn_rsp rsp;
2551 struct l2cap_conn *conn = chan->conn;
2552 u8 buf[128];
2554 rsp.scid = cpu_to_le16(chan->dcid);
2555 rsp.dcid = cpu_to_le16(chan->scid);
2556 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2557 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2558 l2cap_send_cmd(conn, chan->ident,
2559 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2561 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2562 return;
2564 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2565 l2cap_build_conf_req(chan, buf), buf);
2566 chan->num_conf_req++;
2569 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2571 int type, olen;
2572 unsigned long val;
2573 struct l2cap_conf_rfc rfc;
2575 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2577 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2578 return;
2580 while (len >= L2CAP_CONF_OPT_SIZE) {
2581 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2583 switch (type) {
2584 case L2CAP_CONF_RFC:
2585 if (olen == sizeof(rfc))
2586 memcpy(&rfc, (void *)val, olen);
2587 goto done;
2591 /* Use sane default values in case a misbehaving remote device
2592 * did not send an RFC option.
2594 rfc.mode = chan->mode;
2595 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2596 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2597 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2599 BT_ERR("Expected RFC option was not found, using defaults");
2601 done:
2602 switch (rfc.mode) {
2603 case L2CAP_MODE_ERTM:
2604 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2605 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2606 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2607 break;
2608 case L2CAP_MODE_STREAMING:
2609 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2613 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2615 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2617 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2618 return 0;
2620 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2621 cmd->ident == conn->info_ident) {
2622 cancel_delayed_work(&conn->info_timer);
2624 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2625 conn->info_ident = 0;
2627 l2cap_conn_start(conn);
2630 return 0;
2633 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2635 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2636 struct l2cap_conn_rsp rsp;
2637 struct l2cap_chan *chan = NULL, *pchan;
2638 struct sock *parent, *sk = NULL;
2639 int result, status = L2CAP_CS_NO_INFO;
2641 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2642 __le16 psm = req->psm;
2644 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2646 /* Check if we have socket listening on psm */
2647 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2648 if (!pchan) {
2649 result = L2CAP_CR_BAD_PSM;
2650 goto sendresp;
2653 parent = pchan->sk;
2655 mutex_lock(&conn->chan_lock);
2656 lock_sock(parent);
2658 /* Check if the ACL is secure enough (if not SDP) */
2659 if (psm != cpu_to_le16(0x0001) &&
2660 !hci_conn_check_link_mode(conn->hcon)) {
2661 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2662 result = L2CAP_CR_SEC_BLOCK;
2663 goto response;
2666 result = L2CAP_CR_NO_MEM;
2668 /* Check for backlog size */
2669 if (sk_acceptq_is_full(parent)) {
2670 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2671 goto response;
2674 chan = pchan->ops->new_connection(pchan->data);
2675 if (!chan)
2676 goto response;
2678 sk = chan->sk;
2680 /* Check if we already have channel with that dcid */
2681 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2682 sock_set_flag(sk, SOCK_ZAPPED);
2683 chan->ops->close(chan->data);
2684 goto response;
2687 hci_conn_hold(conn->hcon);
2689 bacpy(&bt_sk(sk)->src, conn->src);
2690 bacpy(&bt_sk(sk)->dst, conn->dst);
2691 chan->psm = psm;
2692 chan->dcid = scid;
2694 bt_accept_enqueue(parent, sk);
2696 __l2cap_chan_add(conn, chan);
2698 dcid = chan->scid;
2700 __set_chan_timer(chan, sk->sk_sndtimeo);
2702 chan->ident = cmd->ident;
2704 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2705 if (l2cap_chan_check_security(chan)) {
2706 if (bt_sk(sk)->defer_setup) {
2707 __l2cap_state_change(chan, BT_CONNECT2);
2708 result = L2CAP_CR_PEND;
2709 status = L2CAP_CS_AUTHOR_PEND;
2710 parent->sk_data_ready(parent, 0);
2711 } else {
2712 __l2cap_state_change(chan, BT_CONFIG);
2713 result = L2CAP_CR_SUCCESS;
2714 status = L2CAP_CS_NO_INFO;
2716 } else {
2717 __l2cap_state_change(chan, BT_CONNECT2);
2718 result = L2CAP_CR_PEND;
2719 status = L2CAP_CS_AUTHEN_PEND;
2721 } else {
2722 __l2cap_state_change(chan, BT_CONNECT2);
2723 result = L2CAP_CR_PEND;
2724 status = L2CAP_CS_NO_INFO;
2727 response:
2728 release_sock(parent);
2729 mutex_unlock(&conn->chan_lock);
2731 sendresp:
2732 rsp.scid = cpu_to_le16(scid);
2733 rsp.dcid = cpu_to_le16(dcid);
2734 rsp.result = cpu_to_le16(result);
2735 rsp.status = cpu_to_le16(status);
2736 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2738 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2739 struct l2cap_info_req info;
2740 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2742 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2743 conn->info_ident = l2cap_get_ident(conn);
2745 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
2747 l2cap_send_cmd(conn, conn->info_ident,
2748 L2CAP_INFO_REQ, sizeof(info), &info);
2751 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2752 result == L2CAP_CR_SUCCESS) {
2753 u8 buf[128];
2754 set_bit(CONF_REQ_SENT, &chan->conf_state);
2755 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2756 l2cap_build_conf_req(chan, buf), buf);
2757 chan->num_conf_req++;
2760 return 0;
2763 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2765 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2766 u16 scid, dcid, result, status;
2767 struct l2cap_chan *chan;
2768 u8 req[128];
2769 int err;
2771 scid = __le16_to_cpu(rsp->scid);
2772 dcid = __le16_to_cpu(rsp->dcid);
2773 result = __le16_to_cpu(rsp->result);
2774 status = __le16_to_cpu(rsp->status);
2776 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
2777 dcid, scid, result, status);
2779 mutex_lock(&conn->chan_lock);
2781 if (scid) {
2782 chan = __l2cap_get_chan_by_scid(conn, scid);
2783 if (!chan) {
2784 err = -EFAULT;
2785 goto unlock;
2787 } else {
2788 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
2789 if (!chan) {
2790 err = -EFAULT;
2791 goto unlock;
2795 err = 0;
2797 l2cap_chan_lock(chan);
2799 switch (result) {
2800 case L2CAP_CR_SUCCESS:
2801 l2cap_state_change(chan, BT_CONFIG);
2802 chan->ident = 0;
2803 chan->dcid = dcid;
2804 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2806 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2807 break;
2809 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2810 l2cap_build_conf_req(chan, req), req);
2811 chan->num_conf_req++;
2812 break;
2814 case L2CAP_CR_PEND:
2815 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2816 break;
2818 default:
2819 l2cap_chan_del(chan, ECONNREFUSED);
2820 break;
2823 l2cap_chan_unlock(chan);
2825 unlock:
2826 mutex_unlock(&conn->chan_lock);
2828 return err;
2831 static inline void set_default_fcs(struct l2cap_chan *chan)
2833 /* FCS is enabled only in ERTM or streaming mode, if one or both
2834 * sides request it.
2836 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2837 chan->fcs = L2CAP_FCS_NONE;
2838 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2839 chan->fcs = L2CAP_FCS_CRC16;
2842 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2844 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2845 u16 dcid, flags;
2846 u8 rsp[64];
2847 struct l2cap_chan *chan;
2848 int len;
2850 dcid = __le16_to_cpu(req->dcid);
2851 flags = __le16_to_cpu(req->flags);
2853 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2855 chan = l2cap_get_chan_by_scid(conn, dcid);
2856 if (!chan)
2857 return -ENOENT;
2859 l2cap_chan_lock(chan);
2861 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2862 struct l2cap_cmd_rej_cid rej;
2864 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2865 rej.scid = cpu_to_le16(chan->scid);
2866 rej.dcid = cpu_to_le16(chan->dcid);
2868 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2869 sizeof(rej), &rej);
2870 goto unlock;
2873 /* Reject if config buffer is too small. */
2874 len = cmd_len - sizeof(*req);
2875 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2876 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2877 l2cap_build_conf_rsp(chan, rsp,
2878 L2CAP_CONF_REJECT, flags), rsp);
2879 goto unlock;
2882 /* Store config. */
2883 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2884 chan->conf_len += len;
2886 if (flags & 0x0001) {
2887 /* Incomplete config. Send empty response. */
2888 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2889 l2cap_build_conf_rsp(chan, rsp,
2890 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2891 goto unlock;
2894 /* Complete config. */
2895 len = l2cap_parse_conf_req(chan, rsp);
2896 if (len < 0) {
2897 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2898 goto unlock;
2901 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2902 chan->num_conf_rsp++;
2904 /* Reset config buffer. */
2905 chan->conf_len = 0;
2907 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2908 goto unlock;
2910 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2911 set_default_fcs(chan);
2913 l2cap_state_change(chan, BT_CONNECTED);
2915 chan->next_tx_seq = 0;
2916 chan->expected_tx_seq = 0;
2917 skb_queue_head_init(&chan->tx_q);
2918 if (chan->mode == L2CAP_MODE_ERTM)
2919 l2cap_ertm_init(chan);
2921 l2cap_chan_ready(chan);
2922 goto unlock;
2925 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2926 u8 buf[64];
2927 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2928 l2cap_build_conf_req(chan, buf), buf);
2929 chan->num_conf_req++;
2932 /* Got Conf Rsp PENDING from remote side and asume we sent
2933 Conf Rsp PENDING in the code above */
2934 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2935 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2937 /* check compatibility */
2939 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2940 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2942 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2943 l2cap_build_conf_rsp(chan, rsp,
2944 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2947 unlock:
2948 l2cap_chan_unlock(chan);
2949 return 0;
2952 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2954 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2955 u16 scid, flags, result;
2956 struct l2cap_chan *chan;
2957 int len = cmd->len - sizeof(*rsp);
2959 scid = __le16_to_cpu(rsp->scid);
2960 flags = __le16_to_cpu(rsp->flags);
2961 result = __le16_to_cpu(rsp->result);
2963 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2964 scid, flags, result);
2966 chan = l2cap_get_chan_by_scid(conn, scid);
2967 if (!chan)
2968 return 0;
2970 l2cap_chan_lock(chan);
2972 switch (result) {
2973 case L2CAP_CONF_SUCCESS:
2974 l2cap_conf_rfc_get(chan, rsp->data, len);
2975 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2976 break;
2978 case L2CAP_CONF_PENDING:
2979 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2981 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2982 char buf[64];
2984 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2985 buf, &result);
2986 if (len < 0) {
2987 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2988 goto done;
2991 /* check compatibility */
2993 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2994 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2996 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2997 l2cap_build_conf_rsp(chan, buf,
2998 L2CAP_CONF_SUCCESS, 0x0000), buf);
3000 goto done;
3002 case L2CAP_CONF_UNACCEPT:
3003 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3004 char req[64];
3006 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3007 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3008 goto done;
3011 /* throw out any old stored conf requests */
3012 result = L2CAP_CONF_SUCCESS;
3013 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3014 req, &result);
3015 if (len < 0) {
3016 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3017 goto done;
3020 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3021 L2CAP_CONF_REQ, len, req);
3022 chan->num_conf_req++;
3023 if (result != L2CAP_CONF_SUCCESS)
3024 goto done;
3025 break;
3028 default:
3029 l2cap_chan_set_err(chan, ECONNRESET);
3031 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3032 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3033 goto done;
3036 if (flags & 0x01)
3037 goto done;
3039 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3041 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3042 set_default_fcs(chan);
3044 l2cap_state_change(chan, BT_CONNECTED);
3045 chan->next_tx_seq = 0;
3046 chan->expected_tx_seq = 0;
3047 skb_queue_head_init(&chan->tx_q);
3048 if (chan->mode == L2CAP_MODE_ERTM)
3049 l2cap_ertm_init(chan);
3051 l2cap_chan_ready(chan);
3054 done:
3055 l2cap_chan_unlock(chan);
3056 return 0;
3059 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3061 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3062 struct l2cap_disconn_rsp rsp;
3063 u16 dcid, scid;
3064 struct l2cap_chan *chan;
3065 struct sock *sk;
3067 scid = __le16_to_cpu(req->scid);
3068 dcid = __le16_to_cpu(req->dcid);
3070 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3072 mutex_lock(&conn->chan_lock);
3074 chan = __l2cap_get_chan_by_scid(conn, dcid);
3075 if (!chan) {
3076 mutex_unlock(&conn->chan_lock);
3077 return 0;
3080 l2cap_chan_lock(chan);
3082 sk = chan->sk;
3084 rsp.dcid = cpu_to_le16(chan->scid);
3085 rsp.scid = cpu_to_le16(chan->dcid);
3086 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3088 lock_sock(sk);
3089 sk->sk_shutdown = SHUTDOWN_MASK;
3090 release_sock(sk);
3092 l2cap_chan_del(chan, ECONNRESET);
3094 l2cap_chan_unlock(chan);
3096 chan->ops->close(chan->data);
3098 mutex_unlock(&conn->chan_lock);
3100 return 0;
3103 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3105 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3106 u16 dcid, scid;
3107 struct l2cap_chan *chan;
3109 scid = __le16_to_cpu(rsp->scid);
3110 dcid = __le16_to_cpu(rsp->dcid);
3112 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3114 mutex_lock(&conn->chan_lock);
3116 chan = __l2cap_get_chan_by_scid(conn, scid);
3117 if (!chan) {
3118 mutex_unlock(&conn->chan_lock);
3119 return 0;
3122 l2cap_chan_lock(chan);
3124 l2cap_chan_del(chan, 0);
3126 l2cap_chan_unlock(chan);
3128 chan->ops->close(chan->data);
3130 mutex_unlock(&conn->chan_lock);
3132 return 0;
3135 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3137 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3138 u16 type;
3140 type = __le16_to_cpu(req->type);
3142 BT_DBG("type 0x%4.4x", type);
3144 if (type == L2CAP_IT_FEAT_MASK) {
3145 u8 buf[8];
3146 u32 feat_mask = l2cap_feat_mask;
3147 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3148 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3149 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3150 if (!disable_ertm)
3151 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3152 | L2CAP_FEAT_FCS;
3153 if (enable_hs)
3154 feat_mask |= L2CAP_FEAT_EXT_FLOW
3155 | L2CAP_FEAT_EXT_WINDOW;
3157 put_unaligned_le32(feat_mask, rsp->data);
3158 l2cap_send_cmd(conn, cmd->ident,
3159 L2CAP_INFO_RSP, sizeof(buf), buf);
3160 } else if (type == L2CAP_IT_FIXED_CHAN) {
3161 u8 buf[12];
3162 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3164 if (enable_hs)
3165 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3166 else
3167 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3169 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3170 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3171 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3172 l2cap_send_cmd(conn, cmd->ident,
3173 L2CAP_INFO_RSP, sizeof(buf), buf);
3174 } else {
3175 struct l2cap_info_rsp rsp;
3176 rsp.type = cpu_to_le16(type);
3177 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3178 l2cap_send_cmd(conn, cmd->ident,
3179 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3182 return 0;
3185 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3187 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3188 u16 type, result;
3190 type = __le16_to_cpu(rsp->type);
3191 result = __le16_to_cpu(rsp->result);
3193 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3195 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3196 if (cmd->ident != conn->info_ident ||
3197 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3198 return 0;
3200 cancel_delayed_work(&conn->info_timer);
3202 if (result != L2CAP_IR_SUCCESS) {
3203 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3204 conn->info_ident = 0;
3206 l2cap_conn_start(conn);
3208 return 0;
3211 switch (type) {
3212 case L2CAP_IT_FEAT_MASK:
3213 conn->feat_mask = get_unaligned_le32(rsp->data);
3215 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3216 struct l2cap_info_req req;
3217 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3219 conn->info_ident = l2cap_get_ident(conn);
3221 l2cap_send_cmd(conn, conn->info_ident,
3222 L2CAP_INFO_REQ, sizeof(req), &req);
3223 } else {
3224 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3225 conn->info_ident = 0;
3227 l2cap_conn_start(conn);
3229 break;
3231 case L2CAP_IT_FIXED_CHAN:
3232 conn->fixed_chan_mask = rsp->data[0];
3233 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3234 conn->info_ident = 0;
3236 l2cap_conn_start(conn);
3237 break;
3240 return 0;
3243 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3244 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3245 void *data)
3247 struct l2cap_create_chan_req *req = data;
3248 struct l2cap_create_chan_rsp rsp;
3249 u16 psm, scid;
3251 if (cmd_len != sizeof(*req))
3252 return -EPROTO;
3254 if (!enable_hs)
3255 return -EINVAL;
3257 psm = le16_to_cpu(req->psm);
3258 scid = le16_to_cpu(req->scid);
3260 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3262 /* Placeholder: Always reject */
3263 rsp.dcid = 0;
3264 rsp.scid = cpu_to_le16(scid);
3265 rsp.result = L2CAP_CR_NO_MEM;
3266 rsp.status = L2CAP_CS_NO_INFO;
3268 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3269 sizeof(rsp), &rsp);
3271 return 0;
3274 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3275 struct l2cap_cmd_hdr *cmd, void *data)
3277 BT_DBG("conn %p", conn);
3279 return l2cap_connect_rsp(conn, cmd, data);
3282 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3283 u16 icid, u16 result)
3285 struct l2cap_move_chan_rsp rsp;
3287 BT_DBG("icid %d, result %d", icid, result);
3289 rsp.icid = cpu_to_le16(icid);
3290 rsp.result = cpu_to_le16(result);
3292 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3295 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3296 struct l2cap_chan *chan, u16 icid, u16 result)
3298 struct l2cap_move_chan_cfm cfm;
3299 u8 ident;
3301 BT_DBG("icid %d, result %d", icid, result);
3303 ident = l2cap_get_ident(conn);
3304 if (chan)
3305 chan->ident = ident;
3307 cfm.icid = cpu_to_le16(icid);
3308 cfm.result = cpu_to_le16(result);
3310 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3313 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3314 u16 icid)
3316 struct l2cap_move_chan_cfm_rsp rsp;
3318 BT_DBG("icid %d", icid);
3320 rsp.icid = cpu_to_le16(icid);
3321 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3324 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3325 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3327 struct l2cap_move_chan_req *req = data;
3328 u16 icid = 0;
3329 u16 result = L2CAP_MR_NOT_ALLOWED;
3331 if (cmd_len != sizeof(*req))
3332 return -EPROTO;
3334 icid = le16_to_cpu(req->icid);
3336 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3338 if (!enable_hs)
3339 return -EINVAL;
3341 /* Placeholder: Always refuse */
3342 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3344 return 0;
3347 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3348 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3350 struct l2cap_move_chan_rsp *rsp = data;
3351 u16 icid, result;
3353 if (cmd_len != sizeof(*rsp))
3354 return -EPROTO;
3356 icid = le16_to_cpu(rsp->icid);
3357 result = le16_to_cpu(rsp->result);
3359 BT_DBG("icid %d, result %d", icid, result);
3361 /* Placeholder: Always unconfirmed */
3362 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3364 return 0;
3367 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3368 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3370 struct l2cap_move_chan_cfm *cfm = data;
3371 u16 icid, result;
3373 if (cmd_len != sizeof(*cfm))
3374 return -EPROTO;
3376 icid = le16_to_cpu(cfm->icid);
3377 result = le16_to_cpu(cfm->result);
3379 BT_DBG("icid %d, result %d", icid, result);
3381 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3383 return 0;
3386 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3387 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3389 struct l2cap_move_chan_cfm_rsp *rsp = data;
3390 u16 icid;
3392 if (cmd_len != sizeof(*rsp))
3393 return -EPROTO;
3395 icid = le16_to_cpu(rsp->icid);
3397 BT_DBG("icid %d", icid);
3399 return 0;
3402 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3403 u16 to_multiplier)
3405 u16 max_latency;
3407 if (min > max || min < 6 || max > 3200)
3408 return -EINVAL;
3410 if (to_multiplier < 10 || to_multiplier > 3200)
3411 return -EINVAL;
3413 if (max >= to_multiplier * 8)
3414 return -EINVAL;
3416 max_latency = (to_multiplier * 8 / max) - 1;
3417 if (latency > 499 || latency > max_latency)
3418 return -EINVAL;
3420 return 0;
3423 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3424 struct l2cap_cmd_hdr *cmd, u8 *data)
3426 struct hci_conn *hcon = conn->hcon;
3427 struct l2cap_conn_param_update_req *req;
3428 struct l2cap_conn_param_update_rsp rsp;
3429 u16 min, max, latency, to_multiplier, cmd_len;
3430 int err;
3432 if (!(hcon->link_mode & HCI_LM_MASTER))
3433 return -EINVAL;
3435 cmd_len = __le16_to_cpu(cmd->len);
3436 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3437 return -EPROTO;
3439 req = (struct l2cap_conn_param_update_req *) data;
3440 min = __le16_to_cpu(req->min);
3441 max = __le16_to_cpu(req->max);
3442 latency = __le16_to_cpu(req->latency);
3443 to_multiplier = __le16_to_cpu(req->to_multiplier);
3445 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3446 min, max, latency, to_multiplier);
3448 memset(&rsp, 0, sizeof(rsp));
3450 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3451 if (err)
3452 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3453 else
3454 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3456 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3457 sizeof(rsp), &rsp);
3459 if (!err)
3460 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3462 return 0;
3465 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3466 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3468 int err = 0;
3470 switch (cmd->code) {
3471 case L2CAP_COMMAND_REJ:
3472 l2cap_command_rej(conn, cmd, data);
3473 break;
3475 case L2CAP_CONN_REQ:
3476 err = l2cap_connect_req(conn, cmd, data);
3477 break;
3479 case L2CAP_CONN_RSP:
3480 err = l2cap_connect_rsp(conn, cmd, data);
3481 break;
3483 case L2CAP_CONF_REQ:
3484 err = l2cap_config_req(conn, cmd, cmd_len, data);
3485 break;
3487 case L2CAP_CONF_RSP:
3488 err = l2cap_config_rsp(conn, cmd, data);
3489 break;
3491 case L2CAP_DISCONN_REQ:
3492 err = l2cap_disconnect_req(conn, cmd, data);
3493 break;
3495 case L2CAP_DISCONN_RSP:
3496 err = l2cap_disconnect_rsp(conn, cmd, data);
3497 break;
3499 case L2CAP_ECHO_REQ:
3500 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3501 break;
3503 case L2CAP_ECHO_RSP:
3504 break;
3506 case L2CAP_INFO_REQ:
3507 err = l2cap_information_req(conn, cmd, data);
3508 break;
3510 case L2CAP_INFO_RSP:
3511 err = l2cap_information_rsp(conn, cmd, data);
3512 break;
3514 case L2CAP_CREATE_CHAN_REQ:
3515 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3516 break;
3518 case L2CAP_CREATE_CHAN_RSP:
3519 err = l2cap_create_channel_rsp(conn, cmd, data);
3520 break;
3522 case L2CAP_MOVE_CHAN_REQ:
3523 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3524 break;
3526 case L2CAP_MOVE_CHAN_RSP:
3527 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3528 break;
3530 case L2CAP_MOVE_CHAN_CFM:
3531 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3532 break;
3534 case L2CAP_MOVE_CHAN_CFM_RSP:
3535 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3536 break;
3538 default:
3539 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3540 err = -EINVAL;
3541 break;
3544 return err;
3547 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3548 struct l2cap_cmd_hdr *cmd, u8 *data)
3550 switch (cmd->code) {
3551 case L2CAP_COMMAND_REJ:
3552 return 0;
3554 case L2CAP_CONN_PARAM_UPDATE_REQ:
3555 return l2cap_conn_param_update_req(conn, cmd, data);
3557 case L2CAP_CONN_PARAM_UPDATE_RSP:
3558 return 0;
3560 default:
3561 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3562 return -EINVAL;
3566 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3567 struct sk_buff *skb)
3569 u8 *data = skb->data;
3570 int len = skb->len;
3571 struct l2cap_cmd_hdr cmd;
3572 int err;
3574 l2cap_raw_recv(conn, skb);
3576 while (len >= L2CAP_CMD_HDR_SIZE) {
3577 u16 cmd_len;
3578 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3579 data += L2CAP_CMD_HDR_SIZE;
3580 len -= L2CAP_CMD_HDR_SIZE;
3582 cmd_len = le16_to_cpu(cmd.len);
3584 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3586 if (cmd_len > len || !cmd.ident) {
3587 BT_DBG("corrupted command");
3588 break;
3591 if (conn->hcon->type == LE_LINK)
3592 err = l2cap_le_sig_cmd(conn, &cmd, data);
3593 else
3594 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3596 if (err) {
3597 struct l2cap_cmd_rej_unk rej;
3599 BT_ERR("Wrong link type (%d)", err);
3601 /* FIXME: Map err to a valid reason */
3602 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3603 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3606 data += cmd_len;
3607 len -= cmd_len;
3610 kfree_skb(skb);
3613 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3615 u16 our_fcs, rcv_fcs;
3616 int hdr_size;
3618 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3619 hdr_size = L2CAP_EXT_HDR_SIZE;
3620 else
3621 hdr_size = L2CAP_ENH_HDR_SIZE;
3623 if (chan->fcs == L2CAP_FCS_CRC16) {
3624 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3625 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3626 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3628 if (our_fcs != rcv_fcs)
3629 return -EBADMSG;
3631 return 0;
3634 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3636 u32 control = 0;
3638 chan->frames_sent = 0;
3640 control |= __set_reqseq(chan, chan->buffer_seq);
3642 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3643 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3644 l2cap_send_sframe(chan, control);
3645 set_bit(CONN_RNR_SENT, &chan->conn_state);
3648 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3649 l2cap_retransmit_frames(chan);
3651 l2cap_ertm_send(chan);
3653 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3654 chan->frames_sent == 0) {
3655 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3656 l2cap_send_sframe(chan, control);
3660 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3662 struct sk_buff *next_skb;
3663 int tx_seq_offset, next_tx_seq_offset;
3665 bt_cb(skb)->tx_seq = tx_seq;
3666 bt_cb(skb)->sar = sar;
3668 next_skb = skb_peek(&chan->srej_q);
3670 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3672 while (next_skb) {
3673 if (bt_cb(next_skb)->tx_seq == tx_seq)
3674 return -EINVAL;
3676 next_tx_seq_offset = __seq_offset(chan,
3677 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3679 if (next_tx_seq_offset > tx_seq_offset) {
3680 __skb_queue_before(&chan->srej_q, next_skb, skb);
3681 return 0;
3684 if (skb_queue_is_last(&chan->srej_q, next_skb))
3685 next_skb = NULL;
3686 else
3687 next_skb = skb_queue_next(&chan->srej_q, next_skb);
3690 __skb_queue_tail(&chan->srej_q, skb);
3692 return 0;
3695 static void append_skb_frag(struct sk_buff *skb,
3696 struct sk_buff *new_frag, struct sk_buff **last_frag)
3698 /* skb->len reflects data in skb as well as all fragments
3699 * skb->data_len reflects only data in fragments
3701 if (!skb_has_frag_list(skb))
3702 skb_shinfo(skb)->frag_list = new_frag;
3704 new_frag->next = NULL;
3706 (*last_frag)->next = new_frag;
3707 *last_frag = new_frag;
3709 skb->len += new_frag->len;
3710 skb->data_len += new_frag->len;
3711 skb->truesize += new_frag->truesize;
3714 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3716 int err = -EINVAL;
3718 switch (__get_ctrl_sar(chan, control)) {
3719 case L2CAP_SAR_UNSEGMENTED:
3720 if (chan->sdu)
3721 break;
3723 err = chan->ops->recv(chan->data, skb);
3724 break;
3726 case L2CAP_SAR_START:
3727 if (chan->sdu)
3728 break;
3730 chan->sdu_len = get_unaligned_le16(skb->data);
3731 skb_pull(skb, L2CAP_SDULEN_SIZE);
3733 if (chan->sdu_len > chan->imtu) {
3734 err = -EMSGSIZE;
3735 break;
3738 if (skb->len >= chan->sdu_len)
3739 break;
3741 chan->sdu = skb;
3742 chan->sdu_last_frag = skb;
3744 skb = NULL;
3745 err = 0;
3746 break;
3748 case L2CAP_SAR_CONTINUE:
3749 if (!chan->sdu)
3750 break;
3752 append_skb_frag(chan->sdu, skb,
3753 &chan->sdu_last_frag);
3754 skb = NULL;
3756 if (chan->sdu->len >= chan->sdu_len)
3757 break;
3759 err = 0;
3760 break;
3762 case L2CAP_SAR_END:
3763 if (!chan->sdu)
3764 break;
3766 append_skb_frag(chan->sdu, skb,
3767 &chan->sdu_last_frag);
3768 skb = NULL;
3770 if (chan->sdu->len != chan->sdu_len)
3771 break;
3773 err = chan->ops->recv(chan->data, chan->sdu);
3775 if (!err) {
3776 /* Reassembly complete */
3777 chan->sdu = NULL;
3778 chan->sdu_last_frag = NULL;
3779 chan->sdu_len = 0;
3781 break;
3784 if (err) {
3785 kfree_skb(skb);
3786 kfree_skb(chan->sdu);
3787 chan->sdu = NULL;
3788 chan->sdu_last_frag = NULL;
3789 chan->sdu_len = 0;
3792 return err;
3795 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3797 BT_DBG("chan %p, Enter local busy", chan);
3799 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3801 __set_ack_timer(chan);
3804 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3806 u32 control;
3808 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3809 goto done;
3811 control = __set_reqseq(chan, chan->buffer_seq);
3812 control |= __set_ctrl_poll(chan);
3813 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3814 l2cap_send_sframe(chan, control);
3815 chan->retry_count = 1;
3817 __clear_retrans_timer(chan);
3818 __set_monitor_timer(chan);
3820 set_bit(CONN_WAIT_F, &chan->conn_state);
3822 done:
3823 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3824 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3826 BT_DBG("chan %p, Exit local busy", chan);
3829 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3831 if (chan->mode == L2CAP_MODE_ERTM) {
3832 if (busy)
3833 l2cap_ertm_enter_local_busy(chan);
3834 else
3835 l2cap_ertm_exit_local_busy(chan);
3839 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3841 struct sk_buff *skb;
3842 u32 control;
3844 while ((skb = skb_peek(&chan->srej_q)) &&
3845 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3846 int err;
3848 if (bt_cb(skb)->tx_seq != tx_seq)
3849 break;
3851 skb = skb_dequeue(&chan->srej_q);
3852 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3853 err = l2cap_reassemble_sdu(chan, skb, control);
3855 if (err < 0) {
3856 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3857 break;
3860 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3861 tx_seq = __next_seq(chan, tx_seq);
3865 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3867 struct srej_list *l, *tmp;
3868 u32 control;
3870 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3871 if (l->tx_seq == tx_seq) {
3872 list_del(&l->list);
3873 kfree(l);
3874 return;
3876 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3877 control |= __set_reqseq(chan, l->tx_seq);
3878 l2cap_send_sframe(chan, control);
3879 list_del(&l->list);
3880 list_add_tail(&l->list, &chan->srej_l);
3884 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3886 struct srej_list *new;
3887 u32 control;
3889 while (tx_seq != chan->expected_tx_seq) {
3890 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3891 control |= __set_reqseq(chan, chan->expected_tx_seq);
3892 l2cap_send_sframe(chan, control);
3894 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3895 if (!new)
3896 return -ENOMEM;
3898 new->tx_seq = chan->expected_tx_seq;
3900 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3902 list_add_tail(&new->list, &chan->srej_l);
3905 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3907 return 0;
3910 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3912 u16 tx_seq = __get_txseq(chan, rx_control);
3913 u16 req_seq = __get_reqseq(chan, rx_control);
3914 u8 sar = __get_ctrl_sar(chan, rx_control);
3915 int tx_seq_offset, expected_tx_seq_offset;
3916 int num_to_ack = (chan->tx_win/6) + 1;
3917 int err = 0;
3919 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3920 tx_seq, rx_control);
3922 if (__is_ctrl_final(chan, rx_control) &&
3923 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3924 __clear_monitor_timer(chan);
3925 if (chan->unacked_frames > 0)
3926 __set_retrans_timer(chan);
3927 clear_bit(CONN_WAIT_F, &chan->conn_state);
3930 chan->expected_ack_seq = req_seq;
3931 l2cap_drop_acked_frames(chan);
3933 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3935 /* invalid tx_seq */
3936 if (tx_seq_offset >= chan->tx_win) {
3937 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3938 goto drop;
3941 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3942 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3943 l2cap_send_ack(chan);
3944 goto drop;
3947 if (tx_seq == chan->expected_tx_seq)
3948 goto expected;
3950 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3951 struct srej_list *first;
3953 first = list_first_entry(&chan->srej_l,
3954 struct srej_list, list);
3955 if (tx_seq == first->tx_seq) {
3956 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3957 l2cap_check_srej_gap(chan, tx_seq);
3959 list_del(&first->list);
3960 kfree(first);
3962 if (list_empty(&chan->srej_l)) {
3963 chan->buffer_seq = chan->buffer_seq_srej;
3964 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3965 l2cap_send_ack(chan);
3966 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3968 } else {
3969 struct srej_list *l;
3971 /* duplicated tx_seq */
3972 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3973 goto drop;
3975 list_for_each_entry(l, &chan->srej_l, list) {
3976 if (l->tx_seq == tx_seq) {
3977 l2cap_resend_srejframe(chan, tx_seq);
3978 return 0;
3982 err = l2cap_send_srejframe(chan, tx_seq);
3983 if (err < 0) {
3984 l2cap_send_disconn_req(chan->conn, chan, -err);
3985 return err;
3988 } else {
3989 expected_tx_seq_offset = __seq_offset(chan,
3990 chan->expected_tx_seq, chan->buffer_seq);
3992 /* duplicated tx_seq */
3993 if (tx_seq_offset < expected_tx_seq_offset)
3994 goto drop;
3996 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3998 BT_DBG("chan %p, Enter SREJ", chan);
4000 INIT_LIST_HEAD(&chan->srej_l);
4001 chan->buffer_seq_srej = chan->buffer_seq;
4003 __skb_queue_head_init(&chan->srej_q);
4004 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4006 /* Set P-bit only if there are some I-frames to ack. */
4007 if (__clear_ack_timer(chan))
4008 set_bit(CONN_SEND_PBIT, &chan->conn_state);
4010 err = l2cap_send_srejframe(chan, tx_seq);
4011 if (err < 0) {
4012 l2cap_send_disconn_req(chan->conn, chan, -err);
4013 return err;
4016 return 0;
4018 expected:
4019 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4021 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4022 bt_cb(skb)->tx_seq = tx_seq;
4023 bt_cb(skb)->sar = sar;
4024 __skb_queue_tail(&chan->srej_q, skb);
4025 return 0;
4028 err = l2cap_reassemble_sdu(chan, skb, rx_control);
4029 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4031 if (err < 0) {
4032 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4033 return err;
4036 if (__is_ctrl_final(chan, rx_control)) {
4037 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4038 l2cap_retransmit_frames(chan);
4042 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
4043 if (chan->num_acked == num_to_ack - 1)
4044 l2cap_send_ack(chan);
4045 else
4046 __set_ack_timer(chan);
4048 return 0;
4050 drop:
4051 kfree_skb(skb);
4052 return 0;
4055 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
4057 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
4058 __get_reqseq(chan, rx_control), rx_control);
4060 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
4061 l2cap_drop_acked_frames(chan);
4063 if (__is_ctrl_poll(chan, rx_control)) {
4064 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4065 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4066 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4067 (chan->unacked_frames > 0))
4068 __set_retrans_timer(chan);
4070 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4071 l2cap_send_srejtail(chan);
4072 } else {
4073 l2cap_send_i_or_rr_or_rnr(chan);
4076 } else if (__is_ctrl_final(chan, rx_control)) {
4077 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4079 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4080 l2cap_retransmit_frames(chan);
4082 } else {
4083 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4084 (chan->unacked_frames > 0))
4085 __set_retrans_timer(chan);
4087 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4088 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4089 l2cap_send_ack(chan);
4090 else
4091 l2cap_ertm_send(chan);
4095 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4097 u16 tx_seq = __get_reqseq(chan, rx_control);
4099 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4101 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4103 chan->expected_ack_seq = tx_seq;
4104 l2cap_drop_acked_frames(chan);
4106 if (__is_ctrl_final(chan, rx_control)) {
4107 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4108 l2cap_retransmit_frames(chan);
4109 } else {
4110 l2cap_retransmit_frames(chan);
4112 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4113 set_bit(CONN_REJ_ACT, &chan->conn_state);
4116 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4118 u16 tx_seq = __get_reqseq(chan, rx_control);
4120 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4122 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4124 if (__is_ctrl_poll(chan, rx_control)) {
4125 chan->expected_ack_seq = tx_seq;
4126 l2cap_drop_acked_frames(chan);
4128 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4129 l2cap_retransmit_one_frame(chan, tx_seq);
4131 l2cap_ertm_send(chan);
4133 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4134 chan->srej_save_reqseq = tx_seq;
4135 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4137 } else if (__is_ctrl_final(chan, rx_control)) {
4138 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4139 chan->srej_save_reqseq == tx_seq)
4140 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4141 else
4142 l2cap_retransmit_one_frame(chan, tx_seq);
4143 } else {
4144 l2cap_retransmit_one_frame(chan, tx_seq);
4145 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4146 chan->srej_save_reqseq = tx_seq;
4147 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4152 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4154 u16 tx_seq = __get_reqseq(chan, rx_control);
4156 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4158 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4159 chan->expected_ack_seq = tx_seq;
4160 l2cap_drop_acked_frames(chan);
4162 if (__is_ctrl_poll(chan, rx_control))
4163 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4165 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4166 __clear_retrans_timer(chan);
4167 if (__is_ctrl_poll(chan, rx_control))
4168 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4169 return;
4172 if (__is_ctrl_poll(chan, rx_control)) {
4173 l2cap_send_srejtail(chan);
4174 } else {
4175 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4176 l2cap_send_sframe(chan, rx_control);
4180 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4182 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4184 if (__is_ctrl_final(chan, rx_control) &&
4185 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4186 __clear_monitor_timer(chan);
4187 if (chan->unacked_frames > 0)
4188 __set_retrans_timer(chan);
4189 clear_bit(CONN_WAIT_F, &chan->conn_state);
4192 switch (__get_ctrl_super(chan, rx_control)) {
4193 case L2CAP_SUPER_RR:
4194 l2cap_data_channel_rrframe(chan, rx_control);
4195 break;
4197 case L2CAP_SUPER_REJ:
4198 l2cap_data_channel_rejframe(chan, rx_control);
4199 break;
4201 case L2CAP_SUPER_SREJ:
4202 l2cap_data_channel_srejframe(chan, rx_control);
4203 break;
4205 case L2CAP_SUPER_RNR:
4206 l2cap_data_channel_rnrframe(chan, rx_control);
4207 break;
4210 kfree_skb(skb);
4211 return 0;
4214 static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4216 u32 control;
4217 u16 req_seq;
4218 int len, next_tx_seq_offset, req_seq_offset;
4220 control = __get_control(chan, skb->data);
4221 skb_pull(skb, __ctrl_size(chan));
4222 len = skb->len;
4225 * We can just drop the corrupted I-frame here.
4226 * Receiver will miss it and start proper recovery
4227 * procedures and ask retransmission.
4229 if (l2cap_check_fcs(chan, skb))
4230 goto drop;
4232 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4233 len -= L2CAP_SDULEN_SIZE;
4235 if (chan->fcs == L2CAP_FCS_CRC16)
4236 len -= L2CAP_FCS_SIZE;
4238 if (len > chan->mps) {
4239 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4240 goto drop;
4243 req_seq = __get_reqseq(chan, control);
4245 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4247 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4248 chan->expected_ack_seq);
4250 /* check for invalid req-seq */
4251 if (req_seq_offset > next_tx_seq_offset) {
4252 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4253 goto drop;
4256 if (!__is_sframe(chan, control)) {
4257 if (len < 0) {
4258 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4259 goto drop;
4262 l2cap_data_channel_iframe(chan, control, skb);
4263 } else {
4264 if (len != 0) {
4265 BT_ERR("%d", len);
4266 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4267 goto drop;
4270 l2cap_data_channel_sframe(chan, control, skb);
4273 return 0;
4275 drop:
4276 kfree_skb(skb);
4277 return 0;
4280 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4282 struct l2cap_chan *chan;
4283 u32 control;
4284 u16 tx_seq;
4285 int len;
4287 chan = l2cap_get_chan_by_scid(conn, cid);
4288 if (!chan) {
4289 BT_DBG("unknown cid 0x%4.4x", cid);
4290 /* Drop packet and return */
4291 kfree_skb(skb);
4292 return 0;
4295 l2cap_chan_lock(chan);
4297 BT_DBG("chan %p, len %d", chan, skb->len);
4299 if (chan->state != BT_CONNECTED)
4300 goto drop;
4302 switch (chan->mode) {
4303 case L2CAP_MODE_BASIC:
4304 /* If socket recv buffers overflows we drop data here
4305 * which is *bad* because L2CAP has to be reliable.
4306 * But we don't have any other choice. L2CAP doesn't
4307 * provide flow control mechanism. */
4309 if (chan->imtu < skb->len)
4310 goto drop;
4312 if (!chan->ops->recv(chan->data, skb))
4313 goto done;
4314 break;
4316 case L2CAP_MODE_ERTM:
4317 l2cap_ertm_data_rcv(chan, skb);
4319 goto done;
4321 case L2CAP_MODE_STREAMING:
4322 control = __get_control(chan, skb->data);
4323 skb_pull(skb, __ctrl_size(chan));
4324 len = skb->len;
4326 if (l2cap_check_fcs(chan, skb))
4327 goto drop;
4329 if (__is_sar_start(chan, control))
4330 len -= L2CAP_SDULEN_SIZE;
4332 if (chan->fcs == L2CAP_FCS_CRC16)
4333 len -= L2CAP_FCS_SIZE;
4335 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4336 goto drop;
4338 tx_seq = __get_txseq(chan, control);
4340 if (chan->expected_tx_seq != tx_seq) {
4341 /* Frame(s) missing - must discard partial SDU */
4342 kfree_skb(chan->sdu);
4343 chan->sdu = NULL;
4344 chan->sdu_last_frag = NULL;
4345 chan->sdu_len = 0;
4347 /* TODO: Notify userland of missing data */
4350 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4352 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4353 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4355 goto done;
4357 default:
4358 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4359 break;
4362 drop:
4363 kfree_skb(skb);
4365 done:
4366 l2cap_chan_unlock(chan);
4368 return 0;
4371 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4373 struct l2cap_chan *chan;
4375 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4376 if (!chan)
4377 goto drop;
4379 BT_DBG("chan %p, len %d", chan, skb->len);
4381 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4382 goto drop;
4384 if (chan->imtu < skb->len)
4385 goto drop;
4387 if (!chan->ops->recv(chan->data, skb))
4388 return 0;
4390 drop:
4391 kfree_skb(skb);
4393 return 0;
4396 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4398 struct l2cap_chan *chan;
4400 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4401 if (!chan)
4402 goto drop;
4404 BT_DBG("chan %p, len %d", chan, skb->len);
4406 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4407 goto drop;
4409 if (chan->imtu < skb->len)
4410 goto drop;
4412 if (!chan->ops->recv(chan->data, skb))
4413 return 0;
4415 drop:
4416 kfree_skb(skb);
4418 return 0;
4421 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4423 struct l2cap_hdr *lh = (void *) skb->data;
4424 u16 cid, len;
4425 __le16 psm;
4427 skb_pull(skb, L2CAP_HDR_SIZE);
4428 cid = __le16_to_cpu(lh->cid);
4429 len = __le16_to_cpu(lh->len);
4431 if (len != skb->len) {
4432 kfree_skb(skb);
4433 return;
4436 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4438 switch (cid) {
4439 case L2CAP_CID_LE_SIGNALING:
4440 case L2CAP_CID_SIGNALING:
4441 l2cap_sig_channel(conn, skb);
4442 break;
4444 case L2CAP_CID_CONN_LESS:
4445 psm = get_unaligned_le16(skb->data);
4446 skb_pull(skb, 2);
4447 l2cap_conless_channel(conn, psm, skb);
4448 break;
4450 case L2CAP_CID_LE_DATA:
4451 l2cap_att_channel(conn, cid, skb);
4452 break;
4454 case L2CAP_CID_SMP:
4455 if (smp_sig_channel(conn, skb))
4456 l2cap_conn_del(conn->hcon, EACCES);
4457 break;
4459 default:
4460 l2cap_data_channel(conn, cid, skb);
4461 break;
4465 /* ---- L2CAP interface with lower layer (HCI) ---- */
4467 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
4469 int exact = 0, lm1 = 0, lm2 = 0;
4470 struct l2cap_chan *c;
4472 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4474 /* Find listening sockets and check their link_mode */
4475 read_lock(&chan_list_lock);
4476 list_for_each_entry(c, &chan_list, global_l) {
4477 struct sock *sk = c->sk;
4479 if (c->state != BT_LISTEN)
4480 continue;
4482 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4483 lm1 |= HCI_LM_ACCEPT;
4484 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4485 lm1 |= HCI_LM_MASTER;
4486 exact++;
4487 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4488 lm2 |= HCI_LM_ACCEPT;
4489 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4490 lm2 |= HCI_LM_MASTER;
4493 read_unlock(&chan_list_lock);
4495 return exact ? lm1 : lm2;
4498 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4500 struct l2cap_conn *conn;
4502 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4504 if (!status) {
4505 conn = l2cap_conn_add(hcon, status);
4506 if (conn)
4507 l2cap_conn_ready(conn);
4508 } else
4509 l2cap_conn_del(hcon, bt_to_errno(status));
4511 return 0;
4514 int l2cap_disconn_ind(struct hci_conn *hcon)
4516 struct l2cap_conn *conn = hcon->l2cap_data;
4518 BT_DBG("hcon %p", hcon);
4520 if (!conn)
4521 return HCI_ERROR_REMOTE_USER_TERM;
4522 return conn->disc_reason;
4525 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4527 BT_DBG("hcon %p reason %d", hcon, reason);
4529 l2cap_conn_del(hcon, bt_to_errno(reason));
4530 return 0;
4533 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4535 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4536 return;
4538 if (encrypt == 0x00) {
4539 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4540 __clear_chan_timer(chan);
4541 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4542 } else if (chan->sec_level == BT_SECURITY_HIGH)
4543 l2cap_chan_close(chan, ECONNREFUSED);
4544 } else {
4545 if (chan->sec_level == BT_SECURITY_MEDIUM)
4546 __clear_chan_timer(chan);
4550 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4552 struct l2cap_conn *conn = hcon->l2cap_data;
4553 struct l2cap_chan *chan;
4555 if (!conn)
4556 return 0;
4558 BT_DBG("conn %p", conn);
4560 if (hcon->type == LE_LINK) {
4561 smp_distribute_keys(conn, 0);
4562 cancel_delayed_work(&conn->security_timer);
4565 mutex_lock(&conn->chan_lock);
4567 list_for_each_entry(chan, &conn->chan_l, list) {
4568 l2cap_chan_lock(chan);
4570 BT_DBG("chan->scid %d", chan->scid);
4572 if (chan->scid == L2CAP_CID_LE_DATA) {
4573 if (!status && encrypt) {
4574 chan->sec_level = hcon->sec_level;
4575 l2cap_chan_ready(chan);
4578 l2cap_chan_unlock(chan);
4579 continue;
4582 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4583 l2cap_chan_unlock(chan);
4584 continue;
4587 if (!status && (chan->state == BT_CONNECTED ||
4588 chan->state == BT_CONFIG)) {
4589 l2cap_check_encryption(chan, encrypt);
4590 l2cap_chan_unlock(chan);
4591 continue;
4594 if (chan->state == BT_CONNECT) {
4595 if (!status) {
4596 l2cap_send_conn_req(chan);
4597 } else {
4598 __clear_chan_timer(chan);
4599 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4601 } else if (chan->state == BT_CONNECT2) {
4602 struct sock *sk = chan->sk;
4603 struct l2cap_conn_rsp rsp;
4604 __u16 res, stat;
4606 lock_sock(sk);
4608 if (!status) {
4609 if (bt_sk(sk)->defer_setup) {
4610 struct sock *parent = bt_sk(sk)->parent;
4611 res = L2CAP_CR_PEND;
4612 stat = L2CAP_CS_AUTHOR_PEND;
4613 if (parent)
4614 parent->sk_data_ready(parent, 0);
4615 } else {
4616 __l2cap_state_change(chan, BT_CONFIG);
4617 res = L2CAP_CR_SUCCESS;
4618 stat = L2CAP_CS_NO_INFO;
4620 } else {
4621 __l2cap_state_change(chan, BT_DISCONN);
4622 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4623 res = L2CAP_CR_SEC_BLOCK;
4624 stat = L2CAP_CS_NO_INFO;
4627 release_sock(sk);
4629 rsp.scid = cpu_to_le16(chan->dcid);
4630 rsp.dcid = cpu_to_le16(chan->scid);
4631 rsp.result = cpu_to_le16(res);
4632 rsp.status = cpu_to_le16(stat);
4633 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4634 sizeof(rsp), &rsp);
4637 l2cap_chan_unlock(chan);
4640 mutex_unlock(&conn->chan_lock);
4642 return 0;
4645 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4647 struct l2cap_conn *conn = hcon->l2cap_data;
4649 if (!conn)
4650 conn = l2cap_conn_add(hcon, 0);
4652 if (!conn)
4653 goto drop;
4655 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4657 if (!(flags & ACL_CONT)) {
4658 struct l2cap_hdr *hdr;
4659 struct l2cap_chan *chan;
4660 u16 cid;
4661 int len;
4663 if (conn->rx_len) {
4664 BT_ERR("Unexpected start frame (len %d)", skb->len);
4665 kfree_skb(conn->rx_skb);
4666 conn->rx_skb = NULL;
4667 conn->rx_len = 0;
4668 l2cap_conn_unreliable(conn, ECOMM);
4671 /* Start fragment always begin with Basic L2CAP header */
4672 if (skb->len < L2CAP_HDR_SIZE) {
4673 BT_ERR("Frame is too short (len %d)", skb->len);
4674 l2cap_conn_unreliable(conn, ECOMM);
4675 goto drop;
4678 hdr = (struct l2cap_hdr *) skb->data;
4679 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4680 cid = __le16_to_cpu(hdr->cid);
4682 if (len == skb->len) {
4683 /* Complete frame received */
4684 l2cap_recv_frame(conn, skb);
4685 return 0;
4688 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4690 if (skb->len > len) {
4691 BT_ERR("Frame is too long (len %d, expected len %d)",
4692 skb->len, len);
4693 l2cap_conn_unreliable(conn, ECOMM);
4694 goto drop;
4697 chan = l2cap_get_chan_by_scid(conn, cid);
4699 if (chan && chan->sk) {
4700 struct sock *sk = chan->sk;
4701 lock_sock(sk);
4703 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4704 BT_ERR("Frame exceeding recv MTU (len %d, "
4705 "MTU %d)", len,
4706 chan->imtu);
4707 release_sock(sk);
4708 l2cap_conn_unreliable(conn, ECOMM);
4709 goto drop;
4711 release_sock(sk);
4714 /* Allocate skb for the complete frame (with header) */
4715 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4716 if (!conn->rx_skb)
4717 goto drop;
4719 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4720 skb->len);
4721 conn->rx_len = len - skb->len;
4722 } else {
4723 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4725 if (!conn->rx_len) {
4726 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4727 l2cap_conn_unreliable(conn, ECOMM);
4728 goto drop;
4731 if (skb->len > conn->rx_len) {
4732 BT_ERR("Fragment is too long (len %d, expected %d)",
4733 skb->len, conn->rx_len);
4734 kfree_skb(conn->rx_skb);
4735 conn->rx_skb = NULL;
4736 conn->rx_len = 0;
4737 l2cap_conn_unreliable(conn, ECOMM);
4738 goto drop;
4741 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4742 skb->len);
4743 conn->rx_len -= skb->len;
4745 if (!conn->rx_len) {
4746 /* Complete frame received */
4747 l2cap_recv_frame(conn, conn->rx_skb);
4748 conn->rx_skb = NULL;
4752 drop:
4753 kfree_skb(skb);
4754 return 0;
4757 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4759 struct l2cap_chan *c;
4761 read_lock(&chan_list_lock);
4763 list_for_each_entry(c, &chan_list, global_l) {
4764 struct sock *sk = c->sk;
4766 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4767 batostr(&bt_sk(sk)->src),
4768 batostr(&bt_sk(sk)->dst),
4769 c->state, __le16_to_cpu(c->psm),
4770 c->scid, c->dcid, c->imtu, c->omtu,
4771 c->sec_level, c->mode);
4774 read_unlock(&chan_list_lock);
4776 return 0;
4779 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4781 return single_open(file, l2cap_debugfs_show, inode->i_private);
4784 static const struct file_operations l2cap_debugfs_fops = {
4785 .open = l2cap_debugfs_open,
4786 .read = seq_read,
4787 .llseek = seq_lseek,
4788 .release = single_release,
4791 static struct dentry *l2cap_debugfs;
4793 int __init l2cap_init(void)
4795 int err;
4797 err = l2cap_init_sockets();
4798 if (err < 0)
4799 return err;
4801 if (bt_debugfs) {
4802 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4803 bt_debugfs, NULL, &l2cap_debugfs_fops);
4804 if (!l2cap_debugfs)
4805 BT_ERR("Failed to create L2CAP debug file");
4808 return 0;
4811 void l2cap_exit(void)
4813 debugfs_remove(l2cap_debugfs);
4814 l2cap_cleanup_sockets();
4817 module_param(disable_ertm, bool, 0644);
4818 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");