thp: avoid dumping huge zero page
[linux-2.6/libata-dev.git] / net / bluetooth / l2cap_core.c
blob22e658322845b9f16bcf8de52c1ba7c401bfef87
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
41 #include <net/bluetooth/amp.h>
43 bool disable_ertm;
45 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
46 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
48 static LIST_HEAD(chan_list);
49 static DEFINE_RWLOCK(chan_list_lock);
51 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
52 u8 code, u8 ident, u16 dlen, void *data);
53 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
54 void *data);
55 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
56 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event);
61 /* ---- L2CAP channels ---- */
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
64 u16 cid)
66 struct l2cap_chan *c;
68 list_for_each_entry(c, &conn->chan_l, list) {
69 if (c->dcid == cid)
70 return c;
72 return NULL;
75 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
76 u16 cid)
78 struct l2cap_chan *c;
80 list_for_each_entry(c, &conn->chan_l, list) {
81 if (c->scid == cid)
82 return c;
84 return NULL;
87 /* Find channel with given SCID.
88 * Returns locked channel. */
89 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
90 u16 cid)
92 struct l2cap_chan *c;
94 mutex_lock(&conn->chan_lock);
95 c = __l2cap_get_chan_by_scid(conn, cid);
96 if (c)
97 l2cap_chan_lock(c);
98 mutex_unlock(&conn->chan_lock);
100 return c;
103 /* Find channel with given DCID.
104 * Returns locked channel.
106 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
107 u16 cid)
109 struct l2cap_chan *c;
111 mutex_lock(&conn->chan_lock);
112 c = __l2cap_get_chan_by_dcid(conn, cid);
113 if (c)
114 l2cap_chan_lock(c);
115 mutex_unlock(&conn->chan_lock);
117 return c;
120 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
121 u8 ident)
123 struct l2cap_chan *c;
125 list_for_each_entry(c, &conn->chan_l, list) {
126 if (c->ident == ident)
127 return c;
129 return NULL;
132 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
133 u8 ident)
135 struct l2cap_chan *c;
137 mutex_lock(&conn->chan_lock);
138 c = __l2cap_get_chan_by_ident(conn, ident);
139 if (c)
140 l2cap_chan_lock(c);
141 mutex_unlock(&conn->chan_lock);
143 return c;
146 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
148 struct l2cap_chan *c;
150 list_for_each_entry(c, &chan_list, global_l) {
151 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
152 return c;
154 return NULL;
157 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
159 int err;
161 write_lock(&chan_list_lock);
163 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
164 err = -EADDRINUSE;
165 goto done;
168 if (psm) {
169 chan->psm = psm;
170 chan->sport = psm;
171 err = 0;
172 } else {
173 u16 p;
175 err = -EINVAL;
176 for (p = 0x1001; p < 0x1100; p += 2)
177 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
178 chan->psm = cpu_to_le16(p);
179 chan->sport = cpu_to_le16(p);
180 err = 0;
181 break;
185 done:
186 write_unlock(&chan_list_lock);
187 return err;
190 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
192 write_lock(&chan_list_lock);
194 chan->scid = scid;
196 write_unlock(&chan_list_lock);
198 return 0;
201 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
203 u16 cid = L2CAP_CID_DYN_START;
205 for (; cid < L2CAP_CID_DYN_END; cid++) {
206 if (!__l2cap_get_chan_by_scid(conn, cid))
207 return cid;
210 return 0;
213 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
215 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
216 state_to_string(state));
218 chan->state = state;
219 chan->ops->state_change(chan, state);
222 static void l2cap_state_change(struct l2cap_chan *chan, int state)
224 struct sock *sk = chan->sk;
226 lock_sock(sk);
227 __l2cap_state_change(chan, state);
228 release_sock(sk);
231 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
233 struct sock *sk = chan->sk;
235 sk->sk_err = err;
238 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
240 struct sock *sk = chan->sk;
242 lock_sock(sk);
243 __l2cap_chan_set_err(chan, err);
244 release_sock(sk);
247 static void __set_retrans_timer(struct l2cap_chan *chan)
249 if (!delayed_work_pending(&chan->monitor_timer) &&
250 chan->retrans_timeout) {
251 l2cap_set_timer(chan, &chan->retrans_timer,
252 msecs_to_jiffies(chan->retrans_timeout));
256 static void __set_monitor_timer(struct l2cap_chan *chan)
258 __clear_retrans_timer(chan);
259 if (chan->monitor_timeout) {
260 l2cap_set_timer(chan, &chan->monitor_timer,
261 msecs_to_jiffies(chan->monitor_timeout));
265 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
266 u16 seq)
268 struct sk_buff *skb;
270 skb_queue_walk(head, skb) {
271 if (bt_cb(skb)->control.txseq == seq)
272 return skb;
275 return NULL;
278 /* ---- L2CAP sequence number lists ---- */
280 /* For ERTM, ordered lists of sequence numbers must be tracked for
281 * SREJ requests that are received and for frames that are to be
282 * retransmitted. These seq_list functions implement a singly-linked
283 * list in an array, where membership in the list can also be checked
284 * in constant time. Items can also be added to the tail of the list
285 * and removed from the head in constant time, without further memory
286 * allocs or frees.
289 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
291 size_t alloc_size, i;
293 /* Allocated size is a power of 2 to map sequence numbers
294 * (which may be up to 14 bits) in to a smaller array that is
295 * sized for the negotiated ERTM transmit windows.
297 alloc_size = roundup_pow_of_two(size);
299 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
300 if (!seq_list->list)
301 return -ENOMEM;
303 seq_list->mask = alloc_size - 1;
304 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
305 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 for (i = 0; i < alloc_size; i++)
307 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
309 return 0;
312 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
314 kfree(seq_list->list);
317 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
318 u16 seq)
320 /* Constant-time check for list membership */
321 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
324 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
326 u16 mask = seq_list->mask;
328 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
329 /* In case someone tries to pop the head of an empty list */
330 return L2CAP_SEQ_LIST_CLEAR;
331 } else if (seq_list->head == seq) {
332 /* Head can be removed in constant time */
333 seq_list->head = seq_list->list[seq & mask];
334 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
336 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
337 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
340 } else {
341 /* Walk the list to find the sequence number */
342 u16 prev = seq_list->head;
343 while (seq_list->list[prev & mask] != seq) {
344 prev = seq_list->list[prev & mask];
345 if (prev == L2CAP_SEQ_LIST_TAIL)
346 return L2CAP_SEQ_LIST_CLEAR;
349 /* Unlink the number from the list and clear it */
350 seq_list->list[prev & mask] = seq_list->list[seq & mask];
351 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
352 if (seq_list->tail == seq)
353 seq_list->tail = prev;
355 return seq;
358 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
360 /* Remove the head in constant time */
361 return l2cap_seq_list_remove(seq_list, seq_list->head);
364 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
366 u16 i;
368 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
369 return;
371 for (i = 0; i <= seq_list->mask; i++)
372 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
374 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
378 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
380 u16 mask = seq_list->mask;
382 /* All appends happen in constant time */
384 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
385 return;
387 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 seq_list->head = seq;
389 else
390 seq_list->list[seq_list->tail & mask] = seq;
392 seq_list->tail = seq;
393 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
396 static void l2cap_chan_timeout(struct work_struct *work)
398 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
399 chan_timer.work);
400 struct l2cap_conn *conn = chan->conn;
401 int reason;
403 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
405 mutex_lock(&conn->chan_lock);
406 l2cap_chan_lock(chan);
408 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
409 reason = ECONNREFUSED;
410 else if (chan->state == BT_CONNECT &&
411 chan->sec_level != BT_SECURITY_SDP)
412 reason = ECONNREFUSED;
413 else
414 reason = ETIMEDOUT;
416 l2cap_chan_close(chan, reason);
418 l2cap_chan_unlock(chan);
420 chan->ops->close(chan);
421 mutex_unlock(&conn->chan_lock);
423 l2cap_chan_put(chan);
426 struct l2cap_chan *l2cap_chan_create(void)
428 struct l2cap_chan *chan;
430 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
431 if (!chan)
432 return NULL;
434 mutex_init(&chan->lock);
436 write_lock(&chan_list_lock);
437 list_add(&chan->global_l, &chan_list);
438 write_unlock(&chan_list_lock);
440 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
442 chan->state = BT_OPEN;
444 kref_init(&chan->kref);
446 /* This flag is cleared in l2cap_chan_ready() */
447 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
449 BT_DBG("chan %p", chan);
451 return chan;
454 static void l2cap_chan_destroy(struct kref *kref)
456 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
458 BT_DBG("chan %p", chan);
460 write_lock(&chan_list_lock);
461 list_del(&chan->global_l);
462 write_unlock(&chan_list_lock);
464 kfree(chan);
467 void l2cap_chan_hold(struct l2cap_chan *c)
469 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
471 kref_get(&c->kref);
474 void l2cap_chan_put(struct l2cap_chan *c)
476 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
478 kref_put(&c->kref, l2cap_chan_destroy);
481 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
483 chan->fcs = L2CAP_FCS_CRC16;
484 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
485 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
486 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
487 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
488 chan->sec_level = BT_SECURITY_LOW;
490 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
493 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
495 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
496 __le16_to_cpu(chan->psm), chan->dcid);
498 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
500 chan->conn = conn;
502 switch (chan->chan_type) {
503 case L2CAP_CHAN_CONN_ORIENTED:
504 if (conn->hcon->type == LE_LINK) {
505 /* LE connection */
506 chan->omtu = L2CAP_DEFAULT_MTU;
507 chan->scid = L2CAP_CID_LE_DATA;
508 chan->dcid = L2CAP_CID_LE_DATA;
509 } else {
510 /* Alloc CID for connection-oriented socket */
511 chan->scid = l2cap_alloc_cid(conn);
512 chan->omtu = L2CAP_DEFAULT_MTU;
514 break;
516 case L2CAP_CHAN_CONN_LESS:
517 /* Connectionless socket */
518 chan->scid = L2CAP_CID_CONN_LESS;
519 chan->dcid = L2CAP_CID_CONN_LESS;
520 chan->omtu = L2CAP_DEFAULT_MTU;
521 break;
523 case L2CAP_CHAN_CONN_FIX_A2MP:
524 chan->scid = L2CAP_CID_A2MP;
525 chan->dcid = L2CAP_CID_A2MP;
526 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
527 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
528 break;
530 default:
531 /* Raw socket can send/recv signalling messages only */
532 chan->scid = L2CAP_CID_SIGNALING;
533 chan->dcid = L2CAP_CID_SIGNALING;
534 chan->omtu = L2CAP_DEFAULT_MTU;
537 chan->local_id = L2CAP_BESTEFFORT_ID;
538 chan->local_stype = L2CAP_SERV_BESTEFFORT;
539 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
540 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
541 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
542 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
544 l2cap_chan_hold(chan);
546 list_add(&chan->list, &conn->chan_l);
549 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
551 mutex_lock(&conn->chan_lock);
552 __l2cap_chan_add(conn, chan);
553 mutex_unlock(&conn->chan_lock);
556 void l2cap_chan_del(struct l2cap_chan *chan, int err)
558 struct l2cap_conn *conn = chan->conn;
560 __clear_chan_timer(chan);
562 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
564 if (conn) {
565 struct amp_mgr *mgr = conn->hcon->amp_mgr;
566 /* Delete from channel list */
567 list_del(&chan->list);
569 l2cap_chan_put(chan);
571 chan->conn = NULL;
573 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
574 hci_conn_put(conn->hcon);
576 if (mgr && mgr->bredr_chan == chan)
577 mgr->bredr_chan = NULL;
580 if (chan->hs_hchan) {
581 struct hci_chan *hs_hchan = chan->hs_hchan;
583 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
584 amp_disconnect_logical_link(hs_hchan);
587 chan->ops->teardown(chan, err);
589 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
590 return;
592 switch(chan->mode) {
593 case L2CAP_MODE_BASIC:
594 break;
596 case L2CAP_MODE_ERTM:
597 __clear_retrans_timer(chan);
598 __clear_monitor_timer(chan);
599 __clear_ack_timer(chan);
601 skb_queue_purge(&chan->srej_q);
603 l2cap_seq_list_free(&chan->srej_list);
604 l2cap_seq_list_free(&chan->retrans_list);
606 /* fall through */
608 case L2CAP_MODE_STREAMING:
609 skb_queue_purge(&chan->tx_q);
610 break;
613 return;
616 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
618 struct l2cap_conn *conn = chan->conn;
619 struct sock *sk = chan->sk;
621 BT_DBG("chan %p state %s sk %p", chan, state_to_string(chan->state),
622 sk);
624 switch (chan->state) {
625 case BT_LISTEN:
626 chan->ops->teardown(chan, 0);
627 break;
629 case BT_CONNECTED:
630 case BT_CONFIG:
631 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
632 conn->hcon->type == ACL_LINK) {
633 __set_chan_timer(chan, sk->sk_sndtimeo);
634 l2cap_send_disconn_req(chan, reason);
635 } else
636 l2cap_chan_del(chan, reason);
637 break;
639 case BT_CONNECT2:
640 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
641 conn->hcon->type == ACL_LINK) {
642 struct l2cap_conn_rsp rsp;
643 __u16 result;
645 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
646 result = L2CAP_CR_SEC_BLOCK;
647 else
648 result = L2CAP_CR_BAD_PSM;
649 l2cap_state_change(chan, BT_DISCONN);
651 rsp.scid = cpu_to_le16(chan->dcid);
652 rsp.dcid = cpu_to_le16(chan->scid);
653 rsp.result = cpu_to_le16(result);
654 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
655 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
656 sizeof(rsp), &rsp);
659 l2cap_chan_del(chan, reason);
660 break;
662 case BT_CONNECT:
663 case BT_DISCONN:
664 l2cap_chan_del(chan, reason);
665 break;
667 default:
668 chan->ops->teardown(chan, 0);
669 break;
673 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
675 if (chan->chan_type == L2CAP_CHAN_RAW) {
676 switch (chan->sec_level) {
677 case BT_SECURITY_HIGH:
678 return HCI_AT_DEDICATED_BONDING_MITM;
679 case BT_SECURITY_MEDIUM:
680 return HCI_AT_DEDICATED_BONDING;
681 default:
682 return HCI_AT_NO_BONDING;
684 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
685 if (chan->sec_level == BT_SECURITY_LOW)
686 chan->sec_level = BT_SECURITY_SDP;
688 if (chan->sec_level == BT_SECURITY_HIGH)
689 return HCI_AT_NO_BONDING_MITM;
690 else
691 return HCI_AT_NO_BONDING;
692 } else {
693 switch (chan->sec_level) {
694 case BT_SECURITY_HIGH:
695 return HCI_AT_GENERAL_BONDING_MITM;
696 case BT_SECURITY_MEDIUM:
697 return HCI_AT_GENERAL_BONDING;
698 default:
699 return HCI_AT_NO_BONDING;
704 /* Service level security */
705 int l2cap_chan_check_security(struct l2cap_chan *chan)
707 struct l2cap_conn *conn = chan->conn;
708 __u8 auth_type;
710 auth_type = l2cap_get_auth_type(chan);
712 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
715 static u8 l2cap_get_ident(struct l2cap_conn *conn)
717 u8 id;
719 /* Get next available identificator.
720 * 1 - 128 are used by kernel.
721 * 129 - 199 are reserved.
722 * 200 - 254 are used by utilities like l2ping, etc.
725 spin_lock(&conn->lock);
727 if (++conn->tx_ident > 128)
728 conn->tx_ident = 1;
730 id = conn->tx_ident;
732 spin_unlock(&conn->lock);
734 return id;
737 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
738 void *data)
740 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
741 u8 flags;
743 BT_DBG("code 0x%2.2x", code);
745 if (!skb)
746 return;
748 if (lmp_no_flush_capable(conn->hcon->hdev))
749 flags = ACL_START_NO_FLUSH;
750 else
751 flags = ACL_START;
753 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
754 skb->priority = HCI_PRIO_MAX;
756 hci_send_acl(conn->hchan, skb, flags);
759 static bool __chan_is_moving(struct l2cap_chan *chan)
761 return chan->move_state != L2CAP_MOVE_STABLE &&
762 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
765 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
767 struct hci_conn *hcon = chan->conn->hcon;
768 u16 flags;
770 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
771 skb->priority);
773 if (chan->hs_hcon && !__chan_is_moving(chan)) {
774 if (chan->hs_hchan)
775 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
776 else
777 kfree_skb(skb);
779 return;
782 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
783 lmp_no_flush_capable(hcon->hdev))
784 flags = ACL_START_NO_FLUSH;
785 else
786 flags = ACL_START;
788 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
789 hci_send_acl(chan->conn->hchan, skb, flags);
792 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
794 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
795 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
797 if (enh & L2CAP_CTRL_FRAME_TYPE) {
798 /* S-Frame */
799 control->sframe = 1;
800 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
801 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
803 control->sar = 0;
804 control->txseq = 0;
805 } else {
806 /* I-Frame */
807 control->sframe = 0;
808 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
809 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
811 control->poll = 0;
812 control->super = 0;
816 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
818 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
819 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
821 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
822 /* S-Frame */
823 control->sframe = 1;
824 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
825 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
827 control->sar = 0;
828 control->txseq = 0;
829 } else {
830 /* I-Frame */
831 control->sframe = 0;
832 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
833 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
835 control->poll = 0;
836 control->super = 0;
840 static inline void __unpack_control(struct l2cap_chan *chan,
841 struct sk_buff *skb)
843 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
844 __unpack_extended_control(get_unaligned_le32(skb->data),
845 &bt_cb(skb)->control);
846 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
847 } else {
848 __unpack_enhanced_control(get_unaligned_le16(skb->data),
849 &bt_cb(skb)->control);
850 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
854 static u32 __pack_extended_control(struct l2cap_ctrl *control)
856 u32 packed;
858 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
859 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
861 if (control->sframe) {
862 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
863 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
864 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
865 } else {
866 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
867 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
870 return packed;
873 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
875 u16 packed;
877 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
878 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
880 if (control->sframe) {
881 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
882 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
883 packed |= L2CAP_CTRL_FRAME_TYPE;
884 } else {
885 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
886 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
889 return packed;
892 static inline void __pack_control(struct l2cap_chan *chan,
893 struct l2cap_ctrl *control,
894 struct sk_buff *skb)
896 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
897 put_unaligned_le32(__pack_extended_control(control),
898 skb->data + L2CAP_HDR_SIZE);
899 } else {
900 put_unaligned_le16(__pack_enhanced_control(control),
901 skb->data + L2CAP_HDR_SIZE);
905 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
907 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
908 return L2CAP_EXT_HDR_SIZE;
909 else
910 return L2CAP_ENH_HDR_SIZE;
913 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
914 u32 control)
916 struct sk_buff *skb;
917 struct l2cap_hdr *lh;
918 int hlen = __ertm_hdr_size(chan);
920 if (chan->fcs == L2CAP_FCS_CRC16)
921 hlen += L2CAP_FCS_SIZE;
923 skb = bt_skb_alloc(hlen, GFP_KERNEL);
925 if (!skb)
926 return ERR_PTR(-ENOMEM);
928 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
929 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
930 lh->cid = cpu_to_le16(chan->dcid);
932 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
933 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
934 else
935 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
937 if (chan->fcs == L2CAP_FCS_CRC16) {
938 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
939 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
942 skb->priority = HCI_PRIO_MAX;
943 return skb;
946 static void l2cap_send_sframe(struct l2cap_chan *chan,
947 struct l2cap_ctrl *control)
949 struct sk_buff *skb;
950 u32 control_field;
952 BT_DBG("chan %p, control %p", chan, control);
954 if (!control->sframe)
955 return;
957 if (__chan_is_moving(chan))
958 return;
960 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
961 !control->poll)
962 control->final = 1;
964 if (control->super == L2CAP_SUPER_RR)
965 clear_bit(CONN_RNR_SENT, &chan->conn_state);
966 else if (control->super == L2CAP_SUPER_RNR)
967 set_bit(CONN_RNR_SENT, &chan->conn_state);
969 if (control->super != L2CAP_SUPER_SREJ) {
970 chan->last_acked_seq = control->reqseq;
971 __clear_ack_timer(chan);
974 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
975 control->final, control->poll, control->super);
977 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
978 control_field = __pack_extended_control(control);
979 else
980 control_field = __pack_enhanced_control(control);
982 skb = l2cap_create_sframe_pdu(chan, control_field);
983 if (!IS_ERR(skb))
984 l2cap_do_send(chan, skb);
987 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
989 struct l2cap_ctrl control;
991 BT_DBG("chan %p, poll %d", chan, poll);
993 memset(&control, 0, sizeof(control));
994 control.sframe = 1;
995 control.poll = poll;
997 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
998 control.super = L2CAP_SUPER_RNR;
999 else
1000 control.super = L2CAP_SUPER_RR;
1002 control.reqseq = chan->buffer_seq;
1003 l2cap_send_sframe(chan, &control);
1006 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1008 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1011 static bool __amp_capable(struct l2cap_chan *chan)
1013 struct l2cap_conn *conn = chan->conn;
1015 if (enable_hs &&
1016 hci_amp_capable() &&
1017 chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED &&
1018 conn->fixed_chan_mask & L2CAP_FC_A2MP)
1019 return true;
1020 else
1021 return false;
1024 static bool l2cap_check_efs(struct l2cap_chan *chan)
1026 /* Check EFS parameters */
1027 return true;
1030 void l2cap_send_conn_req(struct l2cap_chan *chan)
1032 struct l2cap_conn *conn = chan->conn;
1033 struct l2cap_conn_req req;
1035 req.scid = cpu_to_le16(chan->scid);
1036 req.psm = chan->psm;
1038 chan->ident = l2cap_get_ident(conn);
1040 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1042 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1045 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1047 struct l2cap_create_chan_req req;
1048 req.scid = cpu_to_le16(chan->scid);
1049 req.psm = chan->psm;
1050 req.amp_id = amp_id;
1052 chan->ident = l2cap_get_ident(chan->conn);
1054 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1055 sizeof(req), &req);
1058 static void l2cap_move_setup(struct l2cap_chan *chan)
1060 struct sk_buff *skb;
1062 BT_DBG("chan %p", chan);
1064 if (chan->mode != L2CAP_MODE_ERTM)
1065 return;
1067 __clear_retrans_timer(chan);
1068 __clear_monitor_timer(chan);
1069 __clear_ack_timer(chan);
1071 chan->retry_count = 0;
1072 skb_queue_walk(&chan->tx_q, skb) {
1073 if (bt_cb(skb)->control.retries)
1074 bt_cb(skb)->control.retries = 1;
1075 else
1076 break;
1079 chan->expected_tx_seq = chan->buffer_seq;
1081 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1082 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1083 l2cap_seq_list_clear(&chan->retrans_list);
1084 l2cap_seq_list_clear(&chan->srej_list);
1085 skb_queue_purge(&chan->srej_q);
1087 chan->tx_state = L2CAP_TX_STATE_XMIT;
1088 chan->rx_state = L2CAP_RX_STATE_MOVE;
1090 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1093 static void l2cap_move_done(struct l2cap_chan *chan)
1095 u8 move_role = chan->move_role;
1096 BT_DBG("chan %p", chan);
1098 chan->move_state = L2CAP_MOVE_STABLE;
1099 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1101 if (chan->mode != L2CAP_MODE_ERTM)
1102 return;
1104 switch (move_role) {
1105 case L2CAP_MOVE_ROLE_INITIATOR:
1106 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1107 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1108 break;
1109 case L2CAP_MOVE_ROLE_RESPONDER:
1110 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1111 break;
1115 static void l2cap_chan_ready(struct l2cap_chan *chan)
1117 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1118 chan->conf_state = 0;
1119 __clear_chan_timer(chan);
1121 chan->state = BT_CONNECTED;
1123 chan->ops->ready(chan);
1126 static void l2cap_start_connection(struct l2cap_chan *chan)
1128 if (__amp_capable(chan)) {
1129 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1130 a2mp_discover_amp(chan);
1131 } else {
1132 l2cap_send_conn_req(chan);
1136 static void l2cap_do_start(struct l2cap_chan *chan)
1138 struct l2cap_conn *conn = chan->conn;
1140 if (conn->hcon->type == LE_LINK) {
1141 l2cap_chan_ready(chan);
1142 return;
1145 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1146 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1147 return;
1149 if (l2cap_chan_check_security(chan) &&
1150 __l2cap_no_conn_pending(chan)) {
1151 l2cap_start_connection(chan);
1153 } else {
1154 struct l2cap_info_req req;
1155 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1157 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1158 conn->info_ident = l2cap_get_ident(conn);
1160 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1162 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1163 sizeof(req), &req);
1167 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1169 u32 local_feat_mask = l2cap_feat_mask;
1170 if (!disable_ertm)
1171 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1173 switch (mode) {
1174 case L2CAP_MODE_ERTM:
1175 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1176 case L2CAP_MODE_STREAMING:
1177 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1178 default:
1179 return 0x00;
1183 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1185 struct sock *sk = chan->sk;
1186 struct l2cap_conn *conn = chan->conn;
1187 struct l2cap_disconn_req req;
1189 if (!conn)
1190 return;
1192 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1193 __clear_retrans_timer(chan);
1194 __clear_monitor_timer(chan);
1195 __clear_ack_timer(chan);
1198 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1199 l2cap_state_change(chan, BT_DISCONN);
1200 return;
1203 req.dcid = cpu_to_le16(chan->dcid);
1204 req.scid = cpu_to_le16(chan->scid);
1205 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1206 sizeof(req), &req);
1208 lock_sock(sk);
1209 __l2cap_state_change(chan, BT_DISCONN);
1210 __l2cap_chan_set_err(chan, err);
1211 release_sock(sk);
1214 /* ---- L2CAP connections ---- */
1215 static void l2cap_conn_start(struct l2cap_conn *conn)
1217 struct l2cap_chan *chan, *tmp;
1219 BT_DBG("conn %p", conn);
1221 mutex_lock(&conn->chan_lock);
1223 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1224 struct sock *sk = chan->sk;
1226 l2cap_chan_lock(chan);
1228 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1229 l2cap_chan_unlock(chan);
1230 continue;
1233 if (chan->state == BT_CONNECT) {
1234 if (!l2cap_chan_check_security(chan) ||
1235 !__l2cap_no_conn_pending(chan)) {
1236 l2cap_chan_unlock(chan);
1237 continue;
1240 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1241 && test_bit(CONF_STATE2_DEVICE,
1242 &chan->conf_state)) {
1243 l2cap_chan_close(chan, ECONNRESET);
1244 l2cap_chan_unlock(chan);
1245 continue;
1248 l2cap_start_connection(chan);
1250 } else if (chan->state == BT_CONNECT2) {
1251 struct l2cap_conn_rsp rsp;
1252 char buf[128];
1253 rsp.scid = cpu_to_le16(chan->dcid);
1254 rsp.dcid = cpu_to_le16(chan->scid);
1256 if (l2cap_chan_check_security(chan)) {
1257 lock_sock(sk);
1258 if (test_bit(BT_SK_DEFER_SETUP,
1259 &bt_sk(sk)->flags)) {
1260 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1261 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1262 chan->ops->defer(chan);
1264 } else {
1265 __l2cap_state_change(chan, BT_CONFIG);
1266 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1267 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1269 release_sock(sk);
1270 } else {
1271 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1272 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1275 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1276 sizeof(rsp), &rsp);
1278 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1279 rsp.result != L2CAP_CR_SUCCESS) {
1280 l2cap_chan_unlock(chan);
1281 continue;
1284 set_bit(CONF_REQ_SENT, &chan->conf_state);
1285 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1286 l2cap_build_conf_req(chan, buf), buf);
1287 chan->num_conf_req++;
1290 l2cap_chan_unlock(chan);
1293 mutex_unlock(&conn->chan_lock);
1296 /* Find socket with cid and source/destination bdaddr.
1297 * Returns closest match, locked.
1299 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1300 bdaddr_t *src,
1301 bdaddr_t *dst)
1303 struct l2cap_chan *c, *c1 = NULL;
1305 read_lock(&chan_list_lock);
1307 list_for_each_entry(c, &chan_list, global_l) {
1308 struct sock *sk = c->sk;
1310 if (state && c->state != state)
1311 continue;
1313 if (c->scid == cid) {
1314 int src_match, dst_match;
1315 int src_any, dst_any;
1317 /* Exact match. */
1318 src_match = !bacmp(&bt_sk(sk)->src, src);
1319 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1320 if (src_match && dst_match) {
1321 read_unlock(&chan_list_lock);
1322 return c;
1325 /* Closest match */
1326 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1327 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1328 if ((src_match && dst_any) || (src_any && dst_match) ||
1329 (src_any && dst_any))
1330 c1 = c;
1334 read_unlock(&chan_list_lock);
1336 return c1;
1339 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1341 struct sock *parent, *sk;
1342 struct l2cap_chan *chan, *pchan;
1344 BT_DBG("");
1346 /* Check if we have socket listening on cid */
1347 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1348 conn->src, conn->dst);
1349 if (!pchan)
1350 return;
1352 parent = pchan->sk;
1354 lock_sock(parent);
1356 chan = pchan->ops->new_connection(pchan);
1357 if (!chan)
1358 goto clean;
1360 sk = chan->sk;
1362 hci_conn_hold(conn->hcon);
1363 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
1365 bacpy(&bt_sk(sk)->src, conn->src);
1366 bacpy(&bt_sk(sk)->dst, conn->dst);
1368 l2cap_chan_add(conn, chan);
1370 l2cap_chan_ready(chan);
1372 clean:
1373 release_sock(parent);
1376 static void l2cap_conn_ready(struct l2cap_conn *conn)
1378 struct l2cap_chan *chan;
1379 struct hci_conn *hcon = conn->hcon;
1381 BT_DBG("conn %p", conn);
1383 if (!hcon->out && hcon->type == LE_LINK)
1384 l2cap_le_conn_ready(conn);
1386 if (hcon->out && hcon->type == LE_LINK)
1387 smp_conn_security(hcon, hcon->pending_sec_level);
1389 mutex_lock(&conn->chan_lock);
1391 list_for_each_entry(chan, &conn->chan_l, list) {
1393 l2cap_chan_lock(chan);
1395 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1396 l2cap_chan_unlock(chan);
1397 continue;
1400 if (hcon->type == LE_LINK) {
1401 if (smp_conn_security(hcon, chan->sec_level))
1402 l2cap_chan_ready(chan);
1404 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1405 struct sock *sk = chan->sk;
1406 __clear_chan_timer(chan);
1407 lock_sock(sk);
1408 __l2cap_state_change(chan, BT_CONNECTED);
1409 sk->sk_state_change(sk);
1410 release_sock(sk);
1412 } else if (chan->state == BT_CONNECT)
1413 l2cap_do_start(chan);
1415 l2cap_chan_unlock(chan);
1418 mutex_unlock(&conn->chan_lock);
1421 /* Notify sockets that we cannot guaranty reliability anymore */
1422 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1424 struct l2cap_chan *chan;
1426 BT_DBG("conn %p", conn);
1428 mutex_lock(&conn->chan_lock);
1430 list_for_each_entry(chan, &conn->chan_l, list) {
1431 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1432 l2cap_chan_set_err(chan, err);
1435 mutex_unlock(&conn->chan_lock);
1438 static void l2cap_info_timeout(struct work_struct *work)
1440 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1441 info_timer.work);
1443 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1444 conn->info_ident = 0;
1446 l2cap_conn_start(conn);
1449 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1451 struct l2cap_conn *conn = hcon->l2cap_data;
1452 struct l2cap_chan *chan, *l;
1454 if (!conn)
1455 return;
1457 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1459 kfree_skb(conn->rx_skb);
1461 mutex_lock(&conn->chan_lock);
1463 /* Kill channels */
1464 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1465 l2cap_chan_hold(chan);
1466 l2cap_chan_lock(chan);
1468 l2cap_chan_del(chan, err);
1470 l2cap_chan_unlock(chan);
1472 chan->ops->close(chan);
1473 l2cap_chan_put(chan);
1476 mutex_unlock(&conn->chan_lock);
1478 hci_chan_del(conn->hchan);
1480 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1481 cancel_delayed_work_sync(&conn->info_timer);
1483 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1484 cancel_delayed_work_sync(&conn->security_timer);
1485 smp_chan_destroy(conn);
1488 hcon->l2cap_data = NULL;
1489 kfree(conn);
1492 static void security_timeout(struct work_struct *work)
1494 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1495 security_timer.work);
1497 BT_DBG("conn %p", conn);
1499 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1500 smp_chan_destroy(conn);
1501 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1505 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1507 struct l2cap_conn *conn = hcon->l2cap_data;
1508 struct hci_chan *hchan;
1510 if (conn || status)
1511 return conn;
1513 hchan = hci_chan_create(hcon);
1514 if (!hchan)
1515 return NULL;
1517 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1518 if (!conn) {
1519 hci_chan_del(hchan);
1520 return NULL;
1523 hcon->l2cap_data = conn;
1524 conn->hcon = hcon;
1525 conn->hchan = hchan;
1527 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1529 switch (hcon->type) {
1530 case AMP_LINK:
1531 conn->mtu = hcon->hdev->block_mtu;
1532 break;
1534 case LE_LINK:
1535 if (hcon->hdev->le_mtu) {
1536 conn->mtu = hcon->hdev->le_mtu;
1537 break;
1539 /* fall through */
1541 default:
1542 conn->mtu = hcon->hdev->acl_mtu;
1543 break;
1546 conn->src = &hcon->hdev->bdaddr;
1547 conn->dst = &hcon->dst;
1549 conn->feat_mask = 0;
1551 spin_lock_init(&conn->lock);
1552 mutex_init(&conn->chan_lock);
1554 INIT_LIST_HEAD(&conn->chan_l);
1556 if (hcon->type == LE_LINK)
1557 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1558 else
1559 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1561 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1563 return conn;
1566 /* ---- Socket interface ---- */
1568 /* Find socket with psm and source / destination bdaddr.
1569 * Returns closest match.
1571 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1572 bdaddr_t *src,
1573 bdaddr_t *dst)
1575 struct l2cap_chan *c, *c1 = NULL;
1577 read_lock(&chan_list_lock);
1579 list_for_each_entry(c, &chan_list, global_l) {
1580 struct sock *sk = c->sk;
1582 if (state && c->state != state)
1583 continue;
1585 if (c->psm == psm) {
1586 int src_match, dst_match;
1587 int src_any, dst_any;
1589 /* Exact match. */
1590 src_match = !bacmp(&bt_sk(sk)->src, src);
1591 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1592 if (src_match && dst_match) {
1593 read_unlock(&chan_list_lock);
1594 return c;
1597 /* Closest match */
1598 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1599 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1600 if ((src_match && dst_any) || (src_any && dst_match) ||
1601 (src_any && dst_any))
1602 c1 = c;
1606 read_unlock(&chan_list_lock);
1608 return c1;
1611 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1612 bdaddr_t *dst, u8 dst_type)
1614 struct sock *sk = chan->sk;
1615 bdaddr_t *src = &bt_sk(sk)->src;
1616 struct l2cap_conn *conn;
1617 struct hci_conn *hcon;
1618 struct hci_dev *hdev;
1619 __u8 auth_type;
1620 int err;
1622 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src, dst,
1623 dst_type, __le16_to_cpu(psm));
1625 hdev = hci_get_route(dst, src);
1626 if (!hdev)
1627 return -EHOSTUNREACH;
1629 hci_dev_lock(hdev);
1631 l2cap_chan_lock(chan);
1633 /* PSM must be odd and lsb of upper byte must be 0 */
1634 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1635 chan->chan_type != L2CAP_CHAN_RAW) {
1636 err = -EINVAL;
1637 goto done;
1640 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1641 err = -EINVAL;
1642 goto done;
1645 switch (chan->mode) {
1646 case L2CAP_MODE_BASIC:
1647 break;
1648 case L2CAP_MODE_ERTM:
1649 case L2CAP_MODE_STREAMING:
1650 if (!disable_ertm)
1651 break;
1652 /* fall through */
1653 default:
1654 err = -ENOTSUPP;
1655 goto done;
1658 switch (chan->state) {
1659 case BT_CONNECT:
1660 case BT_CONNECT2:
1661 case BT_CONFIG:
1662 /* Already connecting */
1663 err = 0;
1664 goto done;
1666 case BT_CONNECTED:
1667 /* Already connected */
1668 err = -EISCONN;
1669 goto done;
1671 case BT_OPEN:
1672 case BT_BOUND:
1673 /* Can connect */
1674 break;
1676 default:
1677 err = -EBADFD;
1678 goto done;
1681 /* Set destination address and psm */
1682 lock_sock(sk);
1683 bacpy(&bt_sk(sk)->dst, dst);
1684 release_sock(sk);
1686 chan->psm = psm;
1687 chan->dcid = cid;
1689 auth_type = l2cap_get_auth_type(chan);
1691 if (chan->dcid == L2CAP_CID_LE_DATA)
1692 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1693 chan->sec_level, auth_type);
1694 else
1695 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1696 chan->sec_level, auth_type);
1698 if (IS_ERR(hcon)) {
1699 err = PTR_ERR(hcon);
1700 goto done;
1703 conn = l2cap_conn_add(hcon, 0);
1704 if (!conn) {
1705 hci_conn_put(hcon);
1706 err = -ENOMEM;
1707 goto done;
1710 if (hcon->type == LE_LINK) {
1711 err = 0;
1713 if (!list_empty(&conn->chan_l)) {
1714 err = -EBUSY;
1715 hci_conn_put(hcon);
1718 if (err)
1719 goto done;
1722 /* Update source addr of the socket */
1723 bacpy(src, conn->src);
1725 l2cap_chan_unlock(chan);
1726 l2cap_chan_add(conn, chan);
1727 l2cap_chan_lock(chan);
1729 l2cap_state_change(chan, BT_CONNECT);
1730 __set_chan_timer(chan, sk->sk_sndtimeo);
1732 if (hcon->state == BT_CONNECTED) {
1733 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1734 __clear_chan_timer(chan);
1735 if (l2cap_chan_check_security(chan))
1736 l2cap_state_change(chan, BT_CONNECTED);
1737 } else
1738 l2cap_do_start(chan);
1741 err = 0;
1743 done:
1744 l2cap_chan_unlock(chan);
1745 hci_dev_unlock(hdev);
1746 hci_dev_put(hdev);
1747 return err;
1750 int __l2cap_wait_ack(struct sock *sk)
1752 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1753 DECLARE_WAITQUEUE(wait, current);
1754 int err = 0;
1755 int timeo = HZ/5;
1757 add_wait_queue(sk_sleep(sk), &wait);
1758 set_current_state(TASK_INTERRUPTIBLE);
1759 while (chan->unacked_frames > 0 && chan->conn) {
1760 if (!timeo)
1761 timeo = HZ/5;
1763 if (signal_pending(current)) {
1764 err = sock_intr_errno(timeo);
1765 break;
1768 release_sock(sk);
1769 timeo = schedule_timeout(timeo);
1770 lock_sock(sk);
1771 set_current_state(TASK_INTERRUPTIBLE);
1773 err = sock_error(sk);
1774 if (err)
1775 break;
1777 set_current_state(TASK_RUNNING);
1778 remove_wait_queue(sk_sleep(sk), &wait);
1779 return err;
1782 static void l2cap_monitor_timeout(struct work_struct *work)
1784 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1785 monitor_timer.work);
1787 BT_DBG("chan %p", chan);
1789 l2cap_chan_lock(chan);
1791 if (!chan->conn) {
1792 l2cap_chan_unlock(chan);
1793 l2cap_chan_put(chan);
1794 return;
1797 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1799 l2cap_chan_unlock(chan);
1800 l2cap_chan_put(chan);
1803 static void l2cap_retrans_timeout(struct work_struct *work)
1805 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1806 retrans_timer.work);
1808 BT_DBG("chan %p", chan);
1810 l2cap_chan_lock(chan);
1812 if (!chan->conn) {
1813 l2cap_chan_unlock(chan);
1814 l2cap_chan_put(chan);
1815 return;
1818 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1819 l2cap_chan_unlock(chan);
1820 l2cap_chan_put(chan);
1823 static void l2cap_streaming_send(struct l2cap_chan *chan,
1824 struct sk_buff_head *skbs)
1826 struct sk_buff *skb;
1827 struct l2cap_ctrl *control;
1829 BT_DBG("chan %p, skbs %p", chan, skbs);
1831 if (__chan_is_moving(chan))
1832 return;
1834 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1836 while (!skb_queue_empty(&chan->tx_q)) {
1838 skb = skb_dequeue(&chan->tx_q);
1840 bt_cb(skb)->control.retries = 1;
1841 control = &bt_cb(skb)->control;
1843 control->reqseq = 0;
1844 control->txseq = chan->next_tx_seq;
1846 __pack_control(chan, control, skb);
1848 if (chan->fcs == L2CAP_FCS_CRC16) {
1849 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1850 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1853 l2cap_do_send(chan, skb);
1855 BT_DBG("Sent txseq %u", control->txseq);
1857 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1858 chan->frames_sent++;
1862 static int l2cap_ertm_send(struct l2cap_chan *chan)
1864 struct sk_buff *skb, *tx_skb;
1865 struct l2cap_ctrl *control;
1866 int sent = 0;
1868 BT_DBG("chan %p", chan);
1870 if (chan->state != BT_CONNECTED)
1871 return -ENOTCONN;
1873 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1874 return 0;
1876 if (__chan_is_moving(chan))
1877 return 0;
1879 while (chan->tx_send_head &&
1880 chan->unacked_frames < chan->remote_tx_win &&
1881 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1883 skb = chan->tx_send_head;
1885 bt_cb(skb)->control.retries = 1;
1886 control = &bt_cb(skb)->control;
1888 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1889 control->final = 1;
1891 control->reqseq = chan->buffer_seq;
1892 chan->last_acked_seq = chan->buffer_seq;
1893 control->txseq = chan->next_tx_seq;
1895 __pack_control(chan, control, skb);
1897 if (chan->fcs == L2CAP_FCS_CRC16) {
1898 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1899 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1902 /* Clone after data has been modified. Data is assumed to be
1903 read-only (for locking purposes) on cloned sk_buffs.
1905 tx_skb = skb_clone(skb, GFP_KERNEL);
1907 if (!tx_skb)
1908 break;
1910 __set_retrans_timer(chan);
1912 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1913 chan->unacked_frames++;
1914 chan->frames_sent++;
1915 sent++;
1917 if (skb_queue_is_last(&chan->tx_q, skb))
1918 chan->tx_send_head = NULL;
1919 else
1920 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1922 l2cap_do_send(chan, tx_skb);
1923 BT_DBG("Sent txseq %u", control->txseq);
1926 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1927 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1929 return sent;
1932 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1934 struct l2cap_ctrl control;
1935 struct sk_buff *skb;
1936 struct sk_buff *tx_skb;
1937 u16 seq;
1939 BT_DBG("chan %p", chan);
1941 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1942 return;
1944 if (__chan_is_moving(chan))
1945 return;
1947 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1948 seq = l2cap_seq_list_pop(&chan->retrans_list);
1950 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1951 if (!skb) {
1952 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1953 seq);
1954 continue;
1957 bt_cb(skb)->control.retries++;
1958 control = bt_cb(skb)->control;
1960 if (chan->max_tx != 0 &&
1961 bt_cb(skb)->control.retries > chan->max_tx) {
1962 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1963 l2cap_send_disconn_req(chan, ECONNRESET);
1964 l2cap_seq_list_clear(&chan->retrans_list);
1965 break;
1968 control.reqseq = chan->buffer_seq;
1969 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1970 control.final = 1;
1971 else
1972 control.final = 0;
1974 if (skb_cloned(skb)) {
1975 /* Cloned sk_buffs are read-only, so we need a
1976 * writeable copy
1978 tx_skb = skb_copy(skb, GFP_KERNEL);
1979 } else {
1980 tx_skb = skb_clone(skb, GFP_KERNEL);
1983 if (!tx_skb) {
1984 l2cap_seq_list_clear(&chan->retrans_list);
1985 break;
1988 /* Update skb contents */
1989 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1990 put_unaligned_le32(__pack_extended_control(&control),
1991 tx_skb->data + L2CAP_HDR_SIZE);
1992 } else {
1993 put_unaligned_le16(__pack_enhanced_control(&control),
1994 tx_skb->data + L2CAP_HDR_SIZE);
1997 if (chan->fcs == L2CAP_FCS_CRC16) {
1998 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1999 put_unaligned_le16(fcs, skb_put(tx_skb,
2000 L2CAP_FCS_SIZE));
2003 l2cap_do_send(chan, tx_skb);
2005 BT_DBG("Resent txseq %d", control.txseq);
2007 chan->last_acked_seq = chan->buffer_seq;
2011 static void l2cap_retransmit(struct l2cap_chan *chan,
2012 struct l2cap_ctrl *control)
2014 BT_DBG("chan %p, control %p", chan, control);
2016 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2017 l2cap_ertm_resend(chan);
2020 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2021 struct l2cap_ctrl *control)
2023 struct sk_buff *skb;
2025 BT_DBG("chan %p, control %p", chan, control);
2027 if (control->poll)
2028 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2030 l2cap_seq_list_clear(&chan->retrans_list);
2032 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2033 return;
2035 if (chan->unacked_frames) {
2036 skb_queue_walk(&chan->tx_q, skb) {
2037 if (bt_cb(skb)->control.txseq == control->reqseq ||
2038 skb == chan->tx_send_head)
2039 break;
2042 skb_queue_walk_from(&chan->tx_q, skb) {
2043 if (skb == chan->tx_send_head)
2044 break;
2046 l2cap_seq_list_append(&chan->retrans_list,
2047 bt_cb(skb)->control.txseq);
2050 l2cap_ertm_resend(chan);
2054 static void l2cap_send_ack(struct l2cap_chan *chan)
2056 struct l2cap_ctrl control;
2057 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2058 chan->last_acked_seq);
2059 int threshold;
2061 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2062 chan, chan->last_acked_seq, chan->buffer_seq);
2064 memset(&control, 0, sizeof(control));
2065 control.sframe = 1;
2067 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2068 chan->rx_state == L2CAP_RX_STATE_RECV) {
2069 __clear_ack_timer(chan);
2070 control.super = L2CAP_SUPER_RNR;
2071 control.reqseq = chan->buffer_seq;
2072 l2cap_send_sframe(chan, &control);
2073 } else {
2074 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2075 l2cap_ertm_send(chan);
2076 /* If any i-frames were sent, they included an ack */
2077 if (chan->buffer_seq == chan->last_acked_seq)
2078 frames_to_ack = 0;
2081 /* Ack now if the window is 3/4ths full.
2082 * Calculate without mul or div
2084 threshold = chan->ack_win;
2085 threshold += threshold << 1;
2086 threshold >>= 2;
2088 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2089 threshold);
2091 if (frames_to_ack >= threshold) {
2092 __clear_ack_timer(chan);
2093 control.super = L2CAP_SUPER_RR;
2094 control.reqseq = chan->buffer_seq;
2095 l2cap_send_sframe(chan, &control);
2096 frames_to_ack = 0;
2099 if (frames_to_ack)
2100 __set_ack_timer(chan);
2104 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2105 struct msghdr *msg, int len,
2106 int count, struct sk_buff *skb)
2108 struct l2cap_conn *conn = chan->conn;
2109 struct sk_buff **frag;
2110 int sent = 0;
2112 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2113 return -EFAULT;
2115 sent += count;
2116 len -= count;
2118 /* Continuation fragments (no L2CAP header) */
2119 frag = &skb_shinfo(skb)->frag_list;
2120 while (len) {
2121 struct sk_buff *tmp;
2123 count = min_t(unsigned int, conn->mtu, len);
2125 tmp = chan->ops->alloc_skb(chan, count,
2126 msg->msg_flags & MSG_DONTWAIT);
2127 if (IS_ERR(tmp))
2128 return PTR_ERR(tmp);
2130 *frag = tmp;
2132 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2133 return -EFAULT;
2135 (*frag)->priority = skb->priority;
2137 sent += count;
2138 len -= count;
2140 skb->len += (*frag)->len;
2141 skb->data_len += (*frag)->len;
2143 frag = &(*frag)->next;
2146 return sent;
2149 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2150 struct msghdr *msg, size_t len,
2151 u32 priority)
2153 struct l2cap_conn *conn = chan->conn;
2154 struct sk_buff *skb;
2155 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2156 struct l2cap_hdr *lh;
2158 BT_DBG("chan %p len %zu priority %u", chan, len, priority);
2160 count = min_t(unsigned int, (conn->mtu - hlen), len);
2162 skb = chan->ops->alloc_skb(chan, count + hlen,
2163 msg->msg_flags & MSG_DONTWAIT);
2164 if (IS_ERR(skb))
2165 return skb;
2167 skb->priority = priority;
2169 /* Create L2CAP header */
2170 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2171 lh->cid = cpu_to_le16(chan->dcid);
2172 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2173 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
2175 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2176 if (unlikely(err < 0)) {
2177 kfree_skb(skb);
2178 return ERR_PTR(err);
2180 return skb;
2183 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2184 struct msghdr *msg, size_t len,
2185 u32 priority)
2187 struct l2cap_conn *conn = chan->conn;
2188 struct sk_buff *skb;
2189 int err, count;
2190 struct l2cap_hdr *lh;
2192 BT_DBG("chan %p len %zu", chan, len);
2194 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2196 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2197 msg->msg_flags & MSG_DONTWAIT);
2198 if (IS_ERR(skb))
2199 return skb;
2201 skb->priority = priority;
2203 /* Create L2CAP header */
2204 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2205 lh->cid = cpu_to_le16(chan->dcid);
2206 lh->len = cpu_to_le16(len);
2208 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2209 if (unlikely(err < 0)) {
2210 kfree_skb(skb);
2211 return ERR_PTR(err);
2213 return skb;
2216 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2217 struct msghdr *msg, size_t len,
2218 u16 sdulen)
2220 struct l2cap_conn *conn = chan->conn;
2221 struct sk_buff *skb;
2222 int err, count, hlen;
2223 struct l2cap_hdr *lh;
2225 BT_DBG("chan %p len %zu", chan, len);
2227 if (!conn)
2228 return ERR_PTR(-ENOTCONN);
2230 hlen = __ertm_hdr_size(chan);
2232 if (sdulen)
2233 hlen += L2CAP_SDULEN_SIZE;
2235 if (chan->fcs == L2CAP_FCS_CRC16)
2236 hlen += L2CAP_FCS_SIZE;
2238 count = min_t(unsigned int, (conn->mtu - hlen), len);
2240 skb = chan->ops->alloc_skb(chan, count + hlen,
2241 msg->msg_flags & MSG_DONTWAIT);
2242 if (IS_ERR(skb))
2243 return skb;
2245 /* Create L2CAP header */
2246 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2247 lh->cid = cpu_to_le16(chan->dcid);
2248 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2250 /* Control header is populated later */
2251 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2252 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2253 else
2254 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2256 if (sdulen)
2257 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2259 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2260 if (unlikely(err < 0)) {
2261 kfree_skb(skb);
2262 return ERR_PTR(err);
2265 bt_cb(skb)->control.fcs = chan->fcs;
2266 bt_cb(skb)->control.retries = 0;
2267 return skb;
2270 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2271 struct sk_buff_head *seg_queue,
2272 struct msghdr *msg, size_t len)
2274 struct sk_buff *skb;
2275 u16 sdu_len;
2276 size_t pdu_len;
2277 u8 sar;
2279 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2281 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2282 * so fragmented skbs are not used. The HCI layer's handling
2283 * of fragmented skbs is not compatible with ERTM's queueing.
2286 /* PDU size is derived from the HCI MTU */
2287 pdu_len = chan->conn->mtu;
2289 /* Constrain PDU size for BR/EDR connections */
2290 if (!chan->hs_hcon)
2291 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2293 /* Adjust for largest possible L2CAP overhead. */
2294 if (chan->fcs)
2295 pdu_len -= L2CAP_FCS_SIZE;
2297 pdu_len -= __ertm_hdr_size(chan);
2299 /* Remote device may have requested smaller PDUs */
2300 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2302 if (len <= pdu_len) {
2303 sar = L2CAP_SAR_UNSEGMENTED;
2304 sdu_len = 0;
2305 pdu_len = len;
2306 } else {
2307 sar = L2CAP_SAR_START;
2308 sdu_len = len;
2309 pdu_len -= L2CAP_SDULEN_SIZE;
2312 while (len > 0) {
2313 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2315 if (IS_ERR(skb)) {
2316 __skb_queue_purge(seg_queue);
2317 return PTR_ERR(skb);
2320 bt_cb(skb)->control.sar = sar;
2321 __skb_queue_tail(seg_queue, skb);
2323 len -= pdu_len;
2324 if (sdu_len) {
2325 sdu_len = 0;
2326 pdu_len += L2CAP_SDULEN_SIZE;
2329 if (len <= pdu_len) {
2330 sar = L2CAP_SAR_END;
2331 pdu_len = len;
2332 } else {
2333 sar = L2CAP_SAR_CONTINUE;
2337 return 0;
2340 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2341 u32 priority)
2343 struct sk_buff *skb;
2344 int err;
2345 struct sk_buff_head seg_queue;
2347 /* Connectionless channel */
2348 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2349 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2350 if (IS_ERR(skb))
2351 return PTR_ERR(skb);
2353 l2cap_do_send(chan, skb);
2354 return len;
2357 switch (chan->mode) {
2358 case L2CAP_MODE_BASIC:
2359 /* Check outgoing MTU */
2360 if (len > chan->omtu)
2361 return -EMSGSIZE;
2363 /* Create a basic PDU */
2364 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2365 if (IS_ERR(skb))
2366 return PTR_ERR(skb);
2368 l2cap_do_send(chan, skb);
2369 err = len;
2370 break;
2372 case L2CAP_MODE_ERTM:
2373 case L2CAP_MODE_STREAMING:
2374 /* Check outgoing MTU */
2375 if (len > chan->omtu) {
2376 err = -EMSGSIZE;
2377 break;
2380 __skb_queue_head_init(&seg_queue);
2382 /* Do segmentation before calling in to the state machine,
2383 * since it's possible to block while waiting for memory
2384 * allocation.
2386 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2388 /* The channel could have been closed while segmenting,
2389 * check that it is still connected.
2391 if (chan->state != BT_CONNECTED) {
2392 __skb_queue_purge(&seg_queue);
2393 err = -ENOTCONN;
2396 if (err)
2397 break;
2399 if (chan->mode == L2CAP_MODE_ERTM)
2400 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2401 else
2402 l2cap_streaming_send(chan, &seg_queue);
2404 err = len;
2406 /* If the skbs were not queued for sending, they'll still be in
2407 * seg_queue and need to be purged.
2409 __skb_queue_purge(&seg_queue);
2410 break;
2412 default:
2413 BT_DBG("bad state %1.1x", chan->mode);
2414 err = -EBADFD;
2417 return err;
2420 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2422 struct l2cap_ctrl control;
2423 u16 seq;
2425 BT_DBG("chan %p, txseq %u", chan, txseq);
2427 memset(&control, 0, sizeof(control));
2428 control.sframe = 1;
2429 control.super = L2CAP_SUPER_SREJ;
2431 for (seq = chan->expected_tx_seq; seq != txseq;
2432 seq = __next_seq(chan, seq)) {
2433 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2434 control.reqseq = seq;
2435 l2cap_send_sframe(chan, &control);
2436 l2cap_seq_list_append(&chan->srej_list, seq);
2440 chan->expected_tx_seq = __next_seq(chan, txseq);
2443 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2445 struct l2cap_ctrl control;
2447 BT_DBG("chan %p", chan);
2449 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2450 return;
2452 memset(&control, 0, sizeof(control));
2453 control.sframe = 1;
2454 control.super = L2CAP_SUPER_SREJ;
2455 control.reqseq = chan->srej_list.tail;
2456 l2cap_send_sframe(chan, &control);
2459 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2461 struct l2cap_ctrl control;
2462 u16 initial_head;
2463 u16 seq;
2465 BT_DBG("chan %p, txseq %u", chan, txseq);
2467 memset(&control, 0, sizeof(control));
2468 control.sframe = 1;
2469 control.super = L2CAP_SUPER_SREJ;
2471 /* Capture initial list head to allow only one pass through the list. */
2472 initial_head = chan->srej_list.head;
2474 do {
2475 seq = l2cap_seq_list_pop(&chan->srej_list);
2476 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2477 break;
2479 control.reqseq = seq;
2480 l2cap_send_sframe(chan, &control);
2481 l2cap_seq_list_append(&chan->srej_list, seq);
2482 } while (chan->srej_list.head != initial_head);
2485 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2487 struct sk_buff *acked_skb;
2488 u16 ackseq;
2490 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2492 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2493 return;
2495 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2496 chan->expected_ack_seq, chan->unacked_frames);
2498 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2499 ackseq = __next_seq(chan, ackseq)) {
2501 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2502 if (acked_skb) {
2503 skb_unlink(acked_skb, &chan->tx_q);
2504 kfree_skb(acked_skb);
2505 chan->unacked_frames--;
2509 chan->expected_ack_seq = reqseq;
2511 if (chan->unacked_frames == 0)
2512 __clear_retrans_timer(chan);
2514 BT_DBG("unacked_frames %u", chan->unacked_frames);
2517 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2519 BT_DBG("chan %p", chan);
2521 chan->expected_tx_seq = chan->buffer_seq;
2522 l2cap_seq_list_clear(&chan->srej_list);
2523 skb_queue_purge(&chan->srej_q);
2524 chan->rx_state = L2CAP_RX_STATE_RECV;
2527 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2528 struct l2cap_ctrl *control,
2529 struct sk_buff_head *skbs, u8 event)
2531 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2532 event);
2534 switch (event) {
2535 case L2CAP_EV_DATA_REQUEST:
2536 if (chan->tx_send_head == NULL)
2537 chan->tx_send_head = skb_peek(skbs);
2539 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2540 l2cap_ertm_send(chan);
2541 break;
2542 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2543 BT_DBG("Enter LOCAL_BUSY");
2544 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2546 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2547 /* The SREJ_SENT state must be aborted if we are to
2548 * enter the LOCAL_BUSY state.
2550 l2cap_abort_rx_srej_sent(chan);
2553 l2cap_send_ack(chan);
2555 break;
2556 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2557 BT_DBG("Exit LOCAL_BUSY");
2558 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2560 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2561 struct l2cap_ctrl local_control;
2563 memset(&local_control, 0, sizeof(local_control));
2564 local_control.sframe = 1;
2565 local_control.super = L2CAP_SUPER_RR;
2566 local_control.poll = 1;
2567 local_control.reqseq = chan->buffer_seq;
2568 l2cap_send_sframe(chan, &local_control);
2570 chan->retry_count = 1;
2571 __set_monitor_timer(chan);
2572 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2574 break;
2575 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2576 l2cap_process_reqseq(chan, control->reqseq);
2577 break;
2578 case L2CAP_EV_EXPLICIT_POLL:
2579 l2cap_send_rr_or_rnr(chan, 1);
2580 chan->retry_count = 1;
2581 __set_monitor_timer(chan);
2582 __clear_ack_timer(chan);
2583 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2584 break;
2585 case L2CAP_EV_RETRANS_TO:
2586 l2cap_send_rr_or_rnr(chan, 1);
2587 chan->retry_count = 1;
2588 __set_monitor_timer(chan);
2589 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2590 break;
2591 case L2CAP_EV_RECV_FBIT:
2592 /* Nothing to process */
2593 break;
2594 default:
2595 break;
2599 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2600 struct l2cap_ctrl *control,
2601 struct sk_buff_head *skbs, u8 event)
2603 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2604 event);
2606 switch (event) {
2607 case L2CAP_EV_DATA_REQUEST:
2608 if (chan->tx_send_head == NULL)
2609 chan->tx_send_head = skb_peek(skbs);
2610 /* Queue data, but don't send. */
2611 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2612 break;
2613 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2614 BT_DBG("Enter LOCAL_BUSY");
2615 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2617 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2618 /* The SREJ_SENT state must be aborted if we are to
2619 * enter the LOCAL_BUSY state.
2621 l2cap_abort_rx_srej_sent(chan);
2624 l2cap_send_ack(chan);
2626 break;
2627 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2628 BT_DBG("Exit LOCAL_BUSY");
2629 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2631 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2632 struct l2cap_ctrl local_control;
2633 memset(&local_control, 0, sizeof(local_control));
2634 local_control.sframe = 1;
2635 local_control.super = L2CAP_SUPER_RR;
2636 local_control.poll = 1;
2637 local_control.reqseq = chan->buffer_seq;
2638 l2cap_send_sframe(chan, &local_control);
2640 chan->retry_count = 1;
2641 __set_monitor_timer(chan);
2642 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2644 break;
2645 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2646 l2cap_process_reqseq(chan, control->reqseq);
2648 /* Fall through */
2650 case L2CAP_EV_RECV_FBIT:
2651 if (control && control->final) {
2652 __clear_monitor_timer(chan);
2653 if (chan->unacked_frames > 0)
2654 __set_retrans_timer(chan);
2655 chan->retry_count = 0;
2656 chan->tx_state = L2CAP_TX_STATE_XMIT;
2657 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2659 break;
2660 case L2CAP_EV_EXPLICIT_POLL:
2661 /* Ignore */
2662 break;
2663 case L2CAP_EV_MONITOR_TO:
2664 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2665 l2cap_send_rr_or_rnr(chan, 1);
2666 __set_monitor_timer(chan);
2667 chan->retry_count++;
2668 } else {
2669 l2cap_send_disconn_req(chan, ECONNABORTED);
2671 break;
2672 default:
2673 break;
2677 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2678 struct sk_buff_head *skbs, u8 event)
2680 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2681 chan, control, skbs, event, chan->tx_state);
2683 switch (chan->tx_state) {
2684 case L2CAP_TX_STATE_XMIT:
2685 l2cap_tx_state_xmit(chan, control, skbs, event);
2686 break;
2687 case L2CAP_TX_STATE_WAIT_F:
2688 l2cap_tx_state_wait_f(chan, control, skbs, event);
2689 break;
2690 default:
2691 /* Ignore event */
2692 break;
2696 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2697 struct l2cap_ctrl *control)
2699 BT_DBG("chan %p, control %p", chan, control);
2700 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2703 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2704 struct l2cap_ctrl *control)
2706 BT_DBG("chan %p, control %p", chan, control);
2707 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2710 /* Copy frame to all raw sockets on that connection */
2711 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2713 struct sk_buff *nskb;
2714 struct l2cap_chan *chan;
2716 BT_DBG("conn %p", conn);
2718 mutex_lock(&conn->chan_lock);
2720 list_for_each_entry(chan, &conn->chan_l, list) {
2721 struct sock *sk = chan->sk;
2722 if (chan->chan_type != L2CAP_CHAN_RAW)
2723 continue;
2725 /* Don't send frame to the socket it came from */
2726 if (skb->sk == sk)
2727 continue;
2728 nskb = skb_clone(skb, GFP_KERNEL);
2729 if (!nskb)
2730 continue;
2732 if (chan->ops->recv(chan, nskb))
2733 kfree_skb(nskb);
2736 mutex_unlock(&conn->chan_lock);
2739 /* ---- L2CAP signalling commands ---- */
2740 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2741 u8 ident, u16 dlen, void *data)
2743 struct sk_buff *skb, **frag;
2744 struct l2cap_cmd_hdr *cmd;
2745 struct l2cap_hdr *lh;
2746 int len, count;
2748 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2749 conn, code, ident, dlen);
2751 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2752 count = min_t(unsigned int, conn->mtu, len);
2754 skb = bt_skb_alloc(count, GFP_KERNEL);
2755 if (!skb)
2756 return NULL;
2758 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2759 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2761 if (conn->hcon->type == LE_LINK)
2762 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2763 else
2764 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2766 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2767 cmd->code = code;
2768 cmd->ident = ident;
2769 cmd->len = cpu_to_le16(dlen);
2771 if (dlen) {
2772 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2773 memcpy(skb_put(skb, count), data, count);
2774 data += count;
2777 len -= skb->len;
2779 /* Continuation fragments (no L2CAP header) */
2780 frag = &skb_shinfo(skb)->frag_list;
2781 while (len) {
2782 count = min_t(unsigned int, conn->mtu, len);
2784 *frag = bt_skb_alloc(count, GFP_KERNEL);
2785 if (!*frag)
2786 goto fail;
2788 memcpy(skb_put(*frag, count), data, count);
2790 len -= count;
2791 data += count;
2793 frag = &(*frag)->next;
2796 return skb;
2798 fail:
2799 kfree_skb(skb);
2800 return NULL;
2803 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2804 unsigned long *val)
2806 struct l2cap_conf_opt *opt = *ptr;
2807 int len;
2809 len = L2CAP_CONF_OPT_SIZE + opt->len;
2810 *ptr += len;
2812 *type = opt->type;
2813 *olen = opt->len;
2815 switch (opt->len) {
2816 case 1:
2817 *val = *((u8 *) opt->val);
2818 break;
2820 case 2:
2821 *val = get_unaligned_le16(opt->val);
2822 break;
2824 case 4:
2825 *val = get_unaligned_le32(opt->val);
2826 break;
2828 default:
2829 *val = (unsigned long) opt->val;
2830 break;
2833 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2834 return len;
2837 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2839 struct l2cap_conf_opt *opt = *ptr;
2841 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2843 opt->type = type;
2844 opt->len = len;
2846 switch (len) {
2847 case 1:
2848 *((u8 *) opt->val) = val;
2849 break;
2851 case 2:
2852 put_unaligned_le16(val, opt->val);
2853 break;
2855 case 4:
2856 put_unaligned_le32(val, opt->val);
2857 break;
2859 default:
2860 memcpy(opt->val, (void *) val, len);
2861 break;
2864 *ptr += L2CAP_CONF_OPT_SIZE + len;
2867 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2869 struct l2cap_conf_efs efs;
2871 switch (chan->mode) {
2872 case L2CAP_MODE_ERTM:
2873 efs.id = chan->local_id;
2874 efs.stype = chan->local_stype;
2875 efs.msdu = cpu_to_le16(chan->local_msdu);
2876 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2877 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2878 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2879 break;
2881 case L2CAP_MODE_STREAMING:
2882 efs.id = 1;
2883 efs.stype = L2CAP_SERV_BESTEFFORT;
2884 efs.msdu = cpu_to_le16(chan->local_msdu);
2885 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2886 efs.acc_lat = 0;
2887 efs.flush_to = 0;
2888 break;
2890 default:
2891 return;
2894 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2895 (unsigned long) &efs);
2898 static void l2cap_ack_timeout(struct work_struct *work)
2900 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2901 ack_timer.work);
2902 u16 frames_to_ack;
2904 BT_DBG("chan %p", chan);
2906 l2cap_chan_lock(chan);
2908 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2909 chan->last_acked_seq);
2911 if (frames_to_ack)
2912 l2cap_send_rr_or_rnr(chan, 0);
2914 l2cap_chan_unlock(chan);
2915 l2cap_chan_put(chan);
2918 int l2cap_ertm_init(struct l2cap_chan *chan)
2920 int err;
2922 chan->next_tx_seq = 0;
2923 chan->expected_tx_seq = 0;
2924 chan->expected_ack_seq = 0;
2925 chan->unacked_frames = 0;
2926 chan->buffer_seq = 0;
2927 chan->frames_sent = 0;
2928 chan->last_acked_seq = 0;
2929 chan->sdu = NULL;
2930 chan->sdu_last_frag = NULL;
2931 chan->sdu_len = 0;
2933 skb_queue_head_init(&chan->tx_q);
2935 chan->local_amp_id = 0;
2936 chan->move_id = 0;
2937 chan->move_state = L2CAP_MOVE_STABLE;
2938 chan->move_role = L2CAP_MOVE_ROLE_NONE;
2940 if (chan->mode != L2CAP_MODE_ERTM)
2941 return 0;
2943 chan->rx_state = L2CAP_RX_STATE_RECV;
2944 chan->tx_state = L2CAP_TX_STATE_XMIT;
2946 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2947 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2948 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2950 skb_queue_head_init(&chan->srej_q);
2952 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2953 if (err < 0)
2954 return err;
2956 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2957 if (err < 0)
2958 l2cap_seq_list_free(&chan->srej_list);
2960 return err;
2963 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2965 switch (mode) {
2966 case L2CAP_MODE_STREAMING:
2967 case L2CAP_MODE_ERTM:
2968 if (l2cap_mode_supported(mode, remote_feat_mask))
2969 return mode;
2970 /* fall through */
2971 default:
2972 return L2CAP_MODE_BASIC;
2976 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2978 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2981 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2983 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2986 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
2987 struct l2cap_conf_rfc *rfc)
2989 if (chan->local_amp_id && chan->hs_hcon) {
2990 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
2992 /* Class 1 devices have must have ERTM timeouts
2993 * exceeding the Link Supervision Timeout. The
2994 * default Link Supervision Timeout for AMP
2995 * controllers is 10 seconds.
2997 * Class 1 devices use 0xffffffff for their
2998 * best-effort flush timeout, so the clamping logic
2999 * will result in a timeout that meets the above
3000 * requirement. ERTM timeouts are 16-bit values, so
3001 * the maximum timeout is 65.535 seconds.
3004 /* Convert timeout to milliseconds and round */
3005 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3007 /* This is the recommended formula for class 2 devices
3008 * that start ERTM timers when packets are sent to the
3009 * controller.
3011 ertm_to = 3 * ertm_to + 500;
3013 if (ertm_to > 0xffff)
3014 ertm_to = 0xffff;
3016 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3017 rfc->monitor_timeout = rfc->retrans_timeout;
3018 } else {
3019 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3020 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3024 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3026 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3027 __l2cap_ews_supported(chan)) {
3028 /* use extended control field */
3029 set_bit(FLAG_EXT_CTRL, &chan->flags);
3030 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3031 } else {
3032 chan->tx_win = min_t(u16, chan->tx_win,
3033 L2CAP_DEFAULT_TX_WINDOW);
3034 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3036 chan->ack_win = chan->tx_win;
3039 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3041 struct l2cap_conf_req *req = data;
3042 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3043 void *ptr = req->data;
3044 u16 size;
3046 BT_DBG("chan %p", chan);
3048 if (chan->num_conf_req || chan->num_conf_rsp)
3049 goto done;
3051 switch (chan->mode) {
3052 case L2CAP_MODE_STREAMING:
3053 case L2CAP_MODE_ERTM:
3054 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3055 break;
3057 if (__l2cap_efs_supported(chan))
3058 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3060 /* fall through */
3061 default:
3062 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3063 break;
3066 done:
3067 if (chan->imtu != L2CAP_DEFAULT_MTU)
3068 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3070 switch (chan->mode) {
3071 case L2CAP_MODE_BASIC:
3072 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3073 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3074 break;
3076 rfc.mode = L2CAP_MODE_BASIC;
3077 rfc.txwin_size = 0;
3078 rfc.max_transmit = 0;
3079 rfc.retrans_timeout = 0;
3080 rfc.monitor_timeout = 0;
3081 rfc.max_pdu_size = 0;
3083 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3084 (unsigned long) &rfc);
3085 break;
3087 case L2CAP_MODE_ERTM:
3088 rfc.mode = L2CAP_MODE_ERTM;
3089 rfc.max_transmit = chan->max_tx;
3091 __l2cap_set_ertm_timeouts(chan, &rfc);
3093 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3094 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3095 L2CAP_FCS_SIZE);
3096 rfc.max_pdu_size = cpu_to_le16(size);
3098 l2cap_txwin_setup(chan);
3100 rfc.txwin_size = min_t(u16, chan->tx_win,
3101 L2CAP_DEFAULT_TX_WINDOW);
3103 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3104 (unsigned long) &rfc);
3106 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3107 l2cap_add_opt_efs(&ptr, chan);
3109 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3110 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3111 chan->tx_win);
3113 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3114 if (chan->fcs == L2CAP_FCS_NONE ||
3115 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3116 chan->fcs = L2CAP_FCS_NONE;
3117 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3118 chan->fcs);
3120 break;
3122 case L2CAP_MODE_STREAMING:
3123 l2cap_txwin_setup(chan);
3124 rfc.mode = L2CAP_MODE_STREAMING;
3125 rfc.txwin_size = 0;
3126 rfc.max_transmit = 0;
3127 rfc.retrans_timeout = 0;
3128 rfc.monitor_timeout = 0;
3130 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3131 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3132 L2CAP_FCS_SIZE);
3133 rfc.max_pdu_size = cpu_to_le16(size);
3135 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3136 (unsigned long) &rfc);
3138 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3139 l2cap_add_opt_efs(&ptr, chan);
3141 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3142 if (chan->fcs == L2CAP_FCS_NONE ||
3143 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3144 chan->fcs = L2CAP_FCS_NONE;
3145 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3146 chan->fcs);
3148 break;
3151 req->dcid = cpu_to_le16(chan->dcid);
3152 req->flags = __constant_cpu_to_le16(0);
3154 return ptr - data;
3157 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3159 struct l2cap_conf_rsp *rsp = data;
3160 void *ptr = rsp->data;
3161 void *req = chan->conf_req;
3162 int len = chan->conf_len;
3163 int type, hint, olen;
3164 unsigned long val;
3165 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3166 struct l2cap_conf_efs efs;
3167 u8 remote_efs = 0;
3168 u16 mtu = L2CAP_DEFAULT_MTU;
3169 u16 result = L2CAP_CONF_SUCCESS;
3170 u16 size;
3172 BT_DBG("chan %p", chan);
3174 while (len >= L2CAP_CONF_OPT_SIZE) {
3175 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3177 hint = type & L2CAP_CONF_HINT;
3178 type &= L2CAP_CONF_MASK;
3180 switch (type) {
3181 case L2CAP_CONF_MTU:
3182 mtu = val;
3183 break;
3185 case L2CAP_CONF_FLUSH_TO:
3186 chan->flush_to = val;
3187 break;
3189 case L2CAP_CONF_QOS:
3190 break;
3192 case L2CAP_CONF_RFC:
3193 if (olen == sizeof(rfc))
3194 memcpy(&rfc, (void *) val, olen);
3195 break;
3197 case L2CAP_CONF_FCS:
3198 if (val == L2CAP_FCS_NONE)
3199 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3200 break;
3202 case L2CAP_CONF_EFS:
3203 remote_efs = 1;
3204 if (olen == sizeof(efs))
3205 memcpy(&efs, (void *) val, olen);
3206 break;
3208 case L2CAP_CONF_EWS:
3209 if (!enable_hs)
3210 return -ECONNREFUSED;
3212 set_bit(FLAG_EXT_CTRL, &chan->flags);
3213 set_bit(CONF_EWS_RECV, &chan->conf_state);
3214 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3215 chan->remote_tx_win = val;
3216 break;
3218 default:
3219 if (hint)
3220 break;
3222 result = L2CAP_CONF_UNKNOWN;
3223 *((u8 *) ptr++) = type;
3224 break;
3228 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3229 goto done;
3231 switch (chan->mode) {
3232 case L2CAP_MODE_STREAMING:
3233 case L2CAP_MODE_ERTM:
3234 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3235 chan->mode = l2cap_select_mode(rfc.mode,
3236 chan->conn->feat_mask);
3237 break;
3240 if (remote_efs) {
3241 if (__l2cap_efs_supported(chan))
3242 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3243 else
3244 return -ECONNREFUSED;
3247 if (chan->mode != rfc.mode)
3248 return -ECONNREFUSED;
3250 break;
3253 done:
3254 if (chan->mode != rfc.mode) {
3255 result = L2CAP_CONF_UNACCEPT;
3256 rfc.mode = chan->mode;
3258 if (chan->num_conf_rsp == 1)
3259 return -ECONNREFUSED;
3261 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3262 (unsigned long) &rfc);
3265 if (result == L2CAP_CONF_SUCCESS) {
3266 /* Configure output options and let the other side know
3267 * which ones we don't like. */
3269 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3270 result = L2CAP_CONF_UNACCEPT;
3271 else {
3272 chan->omtu = mtu;
3273 set_bit(CONF_MTU_DONE, &chan->conf_state);
3275 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3277 if (remote_efs) {
3278 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3279 efs.stype != L2CAP_SERV_NOTRAFIC &&
3280 efs.stype != chan->local_stype) {
3282 result = L2CAP_CONF_UNACCEPT;
3284 if (chan->num_conf_req >= 1)
3285 return -ECONNREFUSED;
3287 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3288 sizeof(efs),
3289 (unsigned long) &efs);
3290 } else {
3291 /* Send PENDING Conf Rsp */
3292 result = L2CAP_CONF_PENDING;
3293 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3297 switch (rfc.mode) {
3298 case L2CAP_MODE_BASIC:
3299 chan->fcs = L2CAP_FCS_NONE;
3300 set_bit(CONF_MODE_DONE, &chan->conf_state);
3301 break;
3303 case L2CAP_MODE_ERTM:
3304 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3305 chan->remote_tx_win = rfc.txwin_size;
3306 else
3307 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3309 chan->remote_max_tx = rfc.max_transmit;
3311 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3312 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3313 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3314 rfc.max_pdu_size = cpu_to_le16(size);
3315 chan->remote_mps = size;
3317 __l2cap_set_ertm_timeouts(chan, &rfc);
3319 set_bit(CONF_MODE_DONE, &chan->conf_state);
3321 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3322 sizeof(rfc), (unsigned long) &rfc);
3324 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3325 chan->remote_id = efs.id;
3326 chan->remote_stype = efs.stype;
3327 chan->remote_msdu = le16_to_cpu(efs.msdu);
3328 chan->remote_flush_to =
3329 le32_to_cpu(efs.flush_to);
3330 chan->remote_acc_lat =
3331 le32_to_cpu(efs.acc_lat);
3332 chan->remote_sdu_itime =
3333 le32_to_cpu(efs.sdu_itime);
3334 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3335 sizeof(efs),
3336 (unsigned long) &efs);
3338 break;
3340 case L2CAP_MODE_STREAMING:
3341 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3342 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3343 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3344 rfc.max_pdu_size = cpu_to_le16(size);
3345 chan->remote_mps = size;
3347 set_bit(CONF_MODE_DONE, &chan->conf_state);
3349 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3350 (unsigned long) &rfc);
3352 break;
3354 default:
3355 result = L2CAP_CONF_UNACCEPT;
3357 memset(&rfc, 0, sizeof(rfc));
3358 rfc.mode = chan->mode;
3361 if (result == L2CAP_CONF_SUCCESS)
3362 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3364 rsp->scid = cpu_to_le16(chan->dcid);
3365 rsp->result = cpu_to_le16(result);
3366 rsp->flags = __constant_cpu_to_le16(0);
3368 return ptr - data;
3371 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3372 void *data, u16 *result)
3374 struct l2cap_conf_req *req = data;
3375 void *ptr = req->data;
3376 int type, olen;
3377 unsigned long val;
3378 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3379 struct l2cap_conf_efs efs;
3381 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3383 while (len >= L2CAP_CONF_OPT_SIZE) {
3384 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3386 switch (type) {
3387 case L2CAP_CONF_MTU:
3388 if (val < L2CAP_DEFAULT_MIN_MTU) {
3389 *result = L2CAP_CONF_UNACCEPT;
3390 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3391 } else
3392 chan->imtu = val;
3393 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3394 break;
3396 case L2CAP_CONF_FLUSH_TO:
3397 chan->flush_to = val;
3398 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3399 2, chan->flush_to);
3400 break;
3402 case L2CAP_CONF_RFC:
3403 if (olen == sizeof(rfc))
3404 memcpy(&rfc, (void *)val, olen);
3406 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3407 rfc.mode != chan->mode)
3408 return -ECONNREFUSED;
3410 chan->fcs = 0;
3412 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3413 sizeof(rfc), (unsigned long) &rfc);
3414 break;
3416 case L2CAP_CONF_EWS:
3417 chan->ack_win = min_t(u16, val, chan->ack_win);
3418 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3419 chan->tx_win);
3420 break;
3422 case L2CAP_CONF_EFS:
3423 if (olen == sizeof(efs))
3424 memcpy(&efs, (void *)val, olen);
3426 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3427 efs.stype != L2CAP_SERV_NOTRAFIC &&
3428 efs.stype != chan->local_stype)
3429 return -ECONNREFUSED;
3431 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3432 (unsigned long) &efs);
3433 break;
3435 case L2CAP_CONF_FCS:
3436 if (*result == L2CAP_CONF_PENDING)
3437 if (val == L2CAP_FCS_NONE)
3438 set_bit(CONF_RECV_NO_FCS,
3439 &chan->conf_state);
3440 break;
3444 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3445 return -ECONNREFUSED;
3447 chan->mode = rfc.mode;
3449 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3450 switch (rfc.mode) {
3451 case L2CAP_MODE_ERTM:
3452 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3453 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3454 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3455 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3456 chan->ack_win = min_t(u16, chan->ack_win,
3457 rfc.txwin_size);
3459 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3460 chan->local_msdu = le16_to_cpu(efs.msdu);
3461 chan->local_sdu_itime =
3462 le32_to_cpu(efs.sdu_itime);
3463 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3464 chan->local_flush_to =
3465 le32_to_cpu(efs.flush_to);
3467 break;
3469 case L2CAP_MODE_STREAMING:
3470 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3474 req->dcid = cpu_to_le16(chan->dcid);
3475 req->flags = __constant_cpu_to_le16(0);
3477 return ptr - data;
3480 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3481 u16 result, u16 flags)
3483 struct l2cap_conf_rsp *rsp = data;
3484 void *ptr = rsp->data;
3486 BT_DBG("chan %p", chan);
3488 rsp->scid = cpu_to_le16(chan->dcid);
3489 rsp->result = cpu_to_le16(result);
3490 rsp->flags = cpu_to_le16(flags);
3492 return ptr - data;
3495 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3497 struct l2cap_conn_rsp rsp;
3498 struct l2cap_conn *conn = chan->conn;
3499 u8 buf[128];
3500 u8 rsp_code;
3502 rsp.scid = cpu_to_le16(chan->dcid);
3503 rsp.dcid = cpu_to_le16(chan->scid);
3504 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3505 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3507 if (chan->hs_hcon)
3508 rsp_code = L2CAP_CREATE_CHAN_RSP;
3509 else
3510 rsp_code = L2CAP_CONN_RSP;
3512 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3514 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3516 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3517 return;
3519 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3520 l2cap_build_conf_req(chan, buf), buf);
3521 chan->num_conf_req++;
3524 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3526 int type, olen;
3527 unsigned long val;
3528 /* Use sane default values in case a misbehaving remote device
3529 * did not send an RFC or extended window size option.
3531 u16 txwin_ext = chan->ack_win;
3532 struct l2cap_conf_rfc rfc = {
3533 .mode = chan->mode,
3534 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3535 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3536 .max_pdu_size = cpu_to_le16(chan->imtu),
3537 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3540 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3542 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3543 return;
3545 while (len >= L2CAP_CONF_OPT_SIZE) {
3546 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3548 switch (type) {
3549 case L2CAP_CONF_RFC:
3550 if (olen == sizeof(rfc))
3551 memcpy(&rfc, (void *)val, olen);
3552 break;
3553 case L2CAP_CONF_EWS:
3554 txwin_ext = val;
3555 break;
3559 switch (rfc.mode) {
3560 case L2CAP_MODE_ERTM:
3561 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3562 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3563 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3564 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3565 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3566 else
3567 chan->ack_win = min_t(u16, chan->ack_win,
3568 rfc.txwin_size);
3569 break;
3570 case L2CAP_MODE_STREAMING:
3571 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3575 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3576 struct l2cap_cmd_hdr *cmd, u8 *data)
3578 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3580 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3581 return 0;
3583 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3584 cmd->ident == conn->info_ident) {
3585 cancel_delayed_work(&conn->info_timer);
3587 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3588 conn->info_ident = 0;
3590 l2cap_conn_start(conn);
3593 return 0;
3596 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3597 struct l2cap_cmd_hdr *cmd,
3598 u8 *data, u8 rsp_code, u8 amp_id)
3600 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3601 struct l2cap_conn_rsp rsp;
3602 struct l2cap_chan *chan = NULL, *pchan;
3603 struct sock *parent, *sk = NULL;
3604 int result, status = L2CAP_CS_NO_INFO;
3606 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3607 __le16 psm = req->psm;
3609 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3611 /* Check if we have socket listening on psm */
3612 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3613 if (!pchan) {
3614 result = L2CAP_CR_BAD_PSM;
3615 goto sendresp;
3618 parent = pchan->sk;
3620 mutex_lock(&conn->chan_lock);
3621 lock_sock(parent);
3623 /* Check if the ACL is secure enough (if not SDP) */
3624 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3625 !hci_conn_check_link_mode(conn->hcon)) {
3626 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3627 result = L2CAP_CR_SEC_BLOCK;
3628 goto response;
3631 result = L2CAP_CR_NO_MEM;
3633 /* Check if we already have channel with that dcid */
3634 if (__l2cap_get_chan_by_dcid(conn, scid))
3635 goto response;
3637 chan = pchan->ops->new_connection(pchan);
3638 if (!chan)
3639 goto response;
3641 sk = chan->sk;
3643 hci_conn_hold(conn->hcon);
3645 bacpy(&bt_sk(sk)->src, conn->src);
3646 bacpy(&bt_sk(sk)->dst, conn->dst);
3647 chan->psm = psm;
3648 chan->dcid = scid;
3649 chan->local_amp_id = amp_id;
3651 __l2cap_chan_add(conn, chan);
3653 dcid = chan->scid;
3655 __set_chan_timer(chan, sk->sk_sndtimeo);
3657 chan->ident = cmd->ident;
3659 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3660 if (l2cap_chan_check_security(chan)) {
3661 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3662 __l2cap_state_change(chan, BT_CONNECT2);
3663 result = L2CAP_CR_PEND;
3664 status = L2CAP_CS_AUTHOR_PEND;
3665 chan->ops->defer(chan);
3666 } else {
3667 /* Force pending result for AMP controllers.
3668 * The connection will succeed after the
3669 * physical link is up.
3671 if (amp_id) {
3672 __l2cap_state_change(chan, BT_CONNECT2);
3673 result = L2CAP_CR_PEND;
3674 } else {
3675 __l2cap_state_change(chan, BT_CONFIG);
3676 result = L2CAP_CR_SUCCESS;
3678 status = L2CAP_CS_NO_INFO;
3680 } else {
3681 __l2cap_state_change(chan, BT_CONNECT2);
3682 result = L2CAP_CR_PEND;
3683 status = L2CAP_CS_AUTHEN_PEND;
3685 } else {
3686 __l2cap_state_change(chan, BT_CONNECT2);
3687 result = L2CAP_CR_PEND;
3688 status = L2CAP_CS_NO_INFO;
3691 response:
3692 release_sock(parent);
3693 mutex_unlock(&conn->chan_lock);
3695 sendresp:
3696 rsp.scid = cpu_to_le16(scid);
3697 rsp.dcid = cpu_to_le16(dcid);
3698 rsp.result = cpu_to_le16(result);
3699 rsp.status = cpu_to_le16(status);
3700 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3702 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3703 struct l2cap_info_req info;
3704 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3706 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3707 conn->info_ident = l2cap_get_ident(conn);
3709 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3711 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3712 sizeof(info), &info);
3715 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3716 result == L2CAP_CR_SUCCESS) {
3717 u8 buf[128];
3718 set_bit(CONF_REQ_SENT, &chan->conf_state);
3719 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3720 l2cap_build_conf_req(chan, buf), buf);
3721 chan->num_conf_req++;
3724 return chan;
3727 static int l2cap_connect_req(struct l2cap_conn *conn,
3728 struct l2cap_cmd_hdr *cmd, u8 *data)
3730 struct hci_dev *hdev = conn->hcon->hdev;
3731 struct hci_conn *hcon = conn->hcon;
3733 hci_dev_lock(hdev);
3734 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3735 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3736 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3737 hcon->dst_type, 0, NULL, 0,
3738 hcon->dev_class);
3739 hci_dev_unlock(hdev);
3741 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3742 return 0;
3745 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3746 struct l2cap_cmd_hdr *cmd, u8 *data)
3748 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3749 u16 scid, dcid, result, status;
3750 struct l2cap_chan *chan;
3751 u8 req[128];
3752 int err;
3754 scid = __le16_to_cpu(rsp->scid);
3755 dcid = __le16_to_cpu(rsp->dcid);
3756 result = __le16_to_cpu(rsp->result);
3757 status = __le16_to_cpu(rsp->status);
3759 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3760 dcid, scid, result, status);
3762 mutex_lock(&conn->chan_lock);
3764 if (scid) {
3765 chan = __l2cap_get_chan_by_scid(conn, scid);
3766 if (!chan) {
3767 err = -EFAULT;
3768 goto unlock;
3770 } else {
3771 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3772 if (!chan) {
3773 err = -EFAULT;
3774 goto unlock;
3778 err = 0;
3780 l2cap_chan_lock(chan);
3782 switch (result) {
3783 case L2CAP_CR_SUCCESS:
3784 l2cap_state_change(chan, BT_CONFIG);
3785 chan->ident = 0;
3786 chan->dcid = dcid;
3787 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3789 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3790 break;
3792 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3793 l2cap_build_conf_req(chan, req), req);
3794 chan->num_conf_req++;
3795 break;
3797 case L2CAP_CR_PEND:
3798 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3799 break;
3801 default:
3802 l2cap_chan_del(chan, ECONNREFUSED);
3803 break;
3806 l2cap_chan_unlock(chan);
3808 unlock:
3809 mutex_unlock(&conn->chan_lock);
3811 return err;
3814 static inline void set_default_fcs(struct l2cap_chan *chan)
3816 /* FCS is enabled only in ERTM or streaming mode, if one or both
3817 * sides request it.
3819 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3820 chan->fcs = L2CAP_FCS_NONE;
3821 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3822 chan->fcs = L2CAP_FCS_CRC16;
3825 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3826 u8 ident, u16 flags)
3828 struct l2cap_conn *conn = chan->conn;
3830 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3831 flags);
3833 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3834 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3836 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3837 l2cap_build_conf_rsp(chan, data,
3838 L2CAP_CONF_SUCCESS, flags), data);
3841 static inline int l2cap_config_req(struct l2cap_conn *conn,
3842 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3843 u8 *data)
3845 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3846 u16 dcid, flags;
3847 u8 rsp[64];
3848 struct l2cap_chan *chan;
3849 int len, err = 0;
3851 dcid = __le16_to_cpu(req->dcid);
3852 flags = __le16_to_cpu(req->flags);
3854 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3856 chan = l2cap_get_chan_by_scid(conn, dcid);
3857 if (!chan)
3858 return -ENOENT;
3860 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3861 struct l2cap_cmd_rej_cid rej;
3863 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3864 rej.scid = cpu_to_le16(chan->scid);
3865 rej.dcid = cpu_to_le16(chan->dcid);
3867 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3868 sizeof(rej), &rej);
3869 goto unlock;
3872 /* Reject if config buffer is too small. */
3873 len = cmd_len - sizeof(*req);
3874 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3875 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3876 l2cap_build_conf_rsp(chan, rsp,
3877 L2CAP_CONF_REJECT, flags), rsp);
3878 goto unlock;
3881 /* Store config. */
3882 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3883 chan->conf_len += len;
3885 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3886 /* Incomplete config. Send empty response. */
3887 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3888 l2cap_build_conf_rsp(chan, rsp,
3889 L2CAP_CONF_SUCCESS, flags), rsp);
3890 goto unlock;
3893 /* Complete config. */
3894 len = l2cap_parse_conf_req(chan, rsp);
3895 if (len < 0) {
3896 l2cap_send_disconn_req(chan, ECONNRESET);
3897 goto unlock;
3900 chan->ident = cmd->ident;
3901 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3902 chan->num_conf_rsp++;
3904 /* Reset config buffer. */
3905 chan->conf_len = 0;
3907 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3908 goto unlock;
3910 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3911 set_default_fcs(chan);
3913 if (chan->mode == L2CAP_MODE_ERTM ||
3914 chan->mode == L2CAP_MODE_STREAMING)
3915 err = l2cap_ertm_init(chan);
3917 if (err < 0)
3918 l2cap_send_disconn_req(chan, -err);
3919 else
3920 l2cap_chan_ready(chan);
3922 goto unlock;
3925 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3926 u8 buf[64];
3927 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3928 l2cap_build_conf_req(chan, buf), buf);
3929 chan->num_conf_req++;
3932 /* Got Conf Rsp PENDING from remote side and asume we sent
3933 Conf Rsp PENDING in the code above */
3934 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3935 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3937 /* check compatibility */
3939 /* Send rsp for BR/EDR channel */
3940 if (!chan->hs_hcon)
3941 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
3942 else
3943 chan->ident = cmd->ident;
3946 unlock:
3947 l2cap_chan_unlock(chan);
3948 return err;
3951 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
3952 struct l2cap_cmd_hdr *cmd, u8 *data)
3954 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3955 u16 scid, flags, result;
3956 struct l2cap_chan *chan;
3957 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3958 int err = 0;
3960 scid = __le16_to_cpu(rsp->scid);
3961 flags = __le16_to_cpu(rsp->flags);
3962 result = __le16_to_cpu(rsp->result);
3964 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3965 result, len);
3967 chan = l2cap_get_chan_by_scid(conn, scid);
3968 if (!chan)
3969 return 0;
3971 switch (result) {
3972 case L2CAP_CONF_SUCCESS:
3973 l2cap_conf_rfc_get(chan, rsp->data, len);
3974 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3975 break;
3977 case L2CAP_CONF_PENDING:
3978 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3980 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3981 char buf[64];
3983 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3984 buf, &result);
3985 if (len < 0) {
3986 l2cap_send_disconn_req(chan, ECONNRESET);
3987 goto done;
3990 if (!chan->hs_hcon) {
3991 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
3993 } else {
3994 if (l2cap_check_efs(chan)) {
3995 amp_create_logical_link(chan);
3996 chan->ident = cmd->ident;
4000 goto done;
4002 case L2CAP_CONF_UNACCEPT:
4003 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4004 char req[64];
4006 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4007 l2cap_send_disconn_req(chan, ECONNRESET);
4008 goto done;
4011 /* throw out any old stored conf requests */
4012 result = L2CAP_CONF_SUCCESS;
4013 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4014 req, &result);
4015 if (len < 0) {
4016 l2cap_send_disconn_req(chan, ECONNRESET);
4017 goto done;
4020 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4021 L2CAP_CONF_REQ, len, req);
4022 chan->num_conf_req++;
4023 if (result != L2CAP_CONF_SUCCESS)
4024 goto done;
4025 break;
4028 default:
4029 l2cap_chan_set_err(chan, ECONNRESET);
4031 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4032 l2cap_send_disconn_req(chan, ECONNRESET);
4033 goto done;
4036 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4037 goto done;
4039 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4041 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4042 set_default_fcs(chan);
4044 if (chan->mode == L2CAP_MODE_ERTM ||
4045 chan->mode == L2CAP_MODE_STREAMING)
4046 err = l2cap_ertm_init(chan);
4048 if (err < 0)
4049 l2cap_send_disconn_req(chan, -err);
4050 else
4051 l2cap_chan_ready(chan);
4054 done:
4055 l2cap_chan_unlock(chan);
4056 return err;
4059 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4060 struct l2cap_cmd_hdr *cmd, u8 *data)
4062 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4063 struct l2cap_disconn_rsp rsp;
4064 u16 dcid, scid;
4065 struct l2cap_chan *chan;
4066 struct sock *sk;
4068 scid = __le16_to_cpu(req->scid);
4069 dcid = __le16_to_cpu(req->dcid);
4071 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4073 mutex_lock(&conn->chan_lock);
4075 chan = __l2cap_get_chan_by_scid(conn, dcid);
4076 if (!chan) {
4077 mutex_unlock(&conn->chan_lock);
4078 return 0;
4081 l2cap_chan_lock(chan);
4083 sk = chan->sk;
4085 rsp.dcid = cpu_to_le16(chan->scid);
4086 rsp.scid = cpu_to_le16(chan->dcid);
4087 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4089 lock_sock(sk);
4090 sk->sk_shutdown = SHUTDOWN_MASK;
4091 release_sock(sk);
4093 l2cap_chan_hold(chan);
4094 l2cap_chan_del(chan, ECONNRESET);
4096 l2cap_chan_unlock(chan);
4098 chan->ops->close(chan);
4099 l2cap_chan_put(chan);
4101 mutex_unlock(&conn->chan_lock);
4103 return 0;
4106 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4107 struct l2cap_cmd_hdr *cmd, u8 *data)
4109 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4110 u16 dcid, scid;
4111 struct l2cap_chan *chan;
4113 scid = __le16_to_cpu(rsp->scid);
4114 dcid = __le16_to_cpu(rsp->dcid);
4116 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4118 mutex_lock(&conn->chan_lock);
4120 chan = __l2cap_get_chan_by_scid(conn, scid);
4121 if (!chan) {
4122 mutex_unlock(&conn->chan_lock);
4123 return 0;
4126 l2cap_chan_lock(chan);
4128 l2cap_chan_hold(chan);
4129 l2cap_chan_del(chan, 0);
4131 l2cap_chan_unlock(chan);
4133 chan->ops->close(chan);
4134 l2cap_chan_put(chan);
4136 mutex_unlock(&conn->chan_lock);
4138 return 0;
4141 static inline int l2cap_information_req(struct l2cap_conn *conn,
4142 struct l2cap_cmd_hdr *cmd, u8 *data)
4144 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4145 u16 type;
4147 type = __le16_to_cpu(req->type);
4149 BT_DBG("type 0x%4.4x", type);
4151 if (type == L2CAP_IT_FEAT_MASK) {
4152 u8 buf[8];
4153 u32 feat_mask = l2cap_feat_mask;
4154 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4155 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4156 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4157 if (!disable_ertm)
4158 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4159 | L2CAP_FEAT_FCS;
4160 if (enable_hs)
4161 feat_mask |= L2CAP_FEAT_EXT_FLOW
4162 | L2CAP_FEAT_EXT_WINDOW;
4164 put_unaligned_le32(feat_mask, rsp->data);
4165 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4166 buf);
4167 } else if (type == L2CAP_IT_FIXED_CHAN) {
4168 u8 buf[12];
4169 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4171 if (enable_hs)
4172 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4173 else
4174 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4176 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4177 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4178 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4179 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4180 buf);
4181 } else {
4182 struct l2cap_info_rsp rsp;
4183 rsp.type = cpu_to_le16(type);
4184 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4185 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4186 &rsp);
4189 return 0;
4192 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4193 struct l2cap_cmd_hdr *cmd, u8 *data)
4195 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4196 u16 type, result;
4198 type = __le16_to_cpu(rsp->type);
4199 result = __le16_to_cpu(rsp->result);
4201 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4203 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4204 if (cmd->ident != conn->info_ident ||
4205 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4206 return 0;
4208 cancel_delayed_work(&conn->info_timer);
4210 if (result != L2CAP_IR_SUCCESS) {
4211 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4212 conn->info_ident = 0;
4214 l2cap_conn_start(conn);
4216 return 0;
4219 switch (type) {
4220 case L2CAP_IT_FEAT_MASK:
4221 conn->feat_mask = get_unaligned_le32(rsp->data);
4223 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4224 struct l2cap_info_req req;
4225 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4227 conn->info_ident = l2cap_get_ident(conn);
4229 l2cap_send_cmd(conn, conn->info_ident,
4230 L2CAP_INFO_REQ, sizeof(req), &req);
4231 } else {
4232 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4233 conn->info_ident = 0;
4235 l2cap_conn_start(conn);
4237 break;
4239 case L2CAP_IT_FIXED_CHAN:
4240 conn->fixed_chan_mask = rsp->data[0];
4241 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4242 conn->info_ident = 0;
4244 l2cap_conn_start(conn);
4245 break;
4248 return 0;
4251 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4252 struct l2cap_cmd_hdr *cmd,
4253 u16 cmd_len, void *data)
4255 struct l2cap_create_chan_req *req = data;
4256 struct l2cap_create_chan_rsp rsp;
4257 struct l2cap_chan *chan;
4258 struct hci_dev *hdev;
4259 u16 psm, scid;
4261 if (cmd_len != sizeof(*req))
4262 return -EPROTO;
4264 if (!enable_hs)
4265 return -EINVAL;
4267 psm = le16_to_cpu(req->psm);
4268 scid = le16_to_cpu(req->scid);
4270 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4272 /* For controller id 0 make BR/EDR connection */
4273 if (req->amp_id == HCI_BREDR_ID) {
4274 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4275 req->amp_id);
4276 return 0;
4279 /* Validate AMP controller id */
4280 hdev = hci_dev_get(req->amp_id);
4281 if (!hdev)
4282 goto error;
4284 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4285 hci_dev_put(hdev);
4286 goto error;
4289 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4290 req->amp_id);
4291 if (chan) {
4292 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4293 struct hci_conn *hs_hcon;
4295 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, conn->dst);
4296 if (!hs_hcon) {
4297 hci_dev_put(hdev);
4298 return -EFAULT;
4301 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4303 mgr->bredr_chan = chan;
4304 chan->hs_hcon = hs_hcon;
4305 chan->fcs = L2CAP_FCS_NONE;
4306 conn->mtu = hdev->block_mtu;
4309 hci_dev_put(hdev);
4311 return 0;
4313 error:
4314 rsp.dcid = 0;
4315 rsp.scid = cpu_to_le16(scid);
4316 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4317 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4319 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4320 sizeof(rsp), &rsp);
4322 return -EFAULT;
4325 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4327 struct l2cap_move_chan_req req;
4328 u8 ident;
4330 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4332 ident = l2cap_get_ident(chan->conn);
4333 chan->ident = ident;
4335 req.icid = cpu_to_le16(chan->scid);
4336 req.dest_amp_id = dest_amp_id;
4338 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4339 &req);
4341 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4344 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4346 struct l2cap_move_chan_rsp rsp;
4348 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4350 rsp.icid = cpu_to_le16(chan->dcid);
4351 rsp.result = cpu_to_le16(result);
4353 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4354 sizeof(rsp), &rsp);
4357 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4359 struct l2cap_move_chan_cfm cfm;
4361 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4363 chan->ident = l2cap_get_ident(chan->conn);
4365 cfm.icid = cpu_to_le16(chan->scid);
4366 cfm.result = cpu_to_le16(result);
4368 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4369 sizeof(cfm), &cfm);
4371 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4374 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4376 struct l2cap_move_chan_cfm cfm;
4378 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4380 cfm.icid = cpu_to_le16(icid);
4381 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4383 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4384 sizeof(cfm), &cfm);
4387 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4388 u16 icid)
4390 struct l2cap_move_chan_cfm_rsp rsp;
4392 BT_DBG("icid 0x%4.4x", icid);
4394 rsp.icid = cpu_to_le16(icid);
4395 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4398 static void __release_logical_link(struct l2cap_chan *chan)
4400 chan->hs_hchan = NULL;
4401 chan->hs_hcon = NULL;
4403 /* Placeholder - release the logical link */
4406 static void l2cap_logical_fail(struct l2cap_chan *chan)
4408 /* Logical link setup failed */
4409 if (chan->state != BT_CONNECTED) {
4410 /* Create channel failure, disconnect */
4411 l2cap_send_disconn_req(chan, ECONNRESET);
4412 return;
4415 switch (chan->move_role) {
4416 case L2CAP_MOVE_ROLE_RESPONDER:
4417 l2cap_move_done(chan);
4418 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4419 break;
4420 case L2CAP_MOVE_ROLE_INITIATOR:
4421 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4422 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4423 /* Remote has only sent pending or
4424 * success responses, clean up
4426 l2cap_move_done(chan);
4429 /* Other amp move states imply that the move
4430 * has already aborted
4432 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4433 break;
4437 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4438 struct hci_chan *hchan)
4440 struct l2cap_conf_rsp rsp;
4442 chan->hs_hchan = hchan;
4443 chan->hs_hcon->l2cap_data = chan->conn;
4445 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4447 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4448 int err;
4450 set_default_fcs(chan);
4452 err = l2cap_ertm_init(chan);
4453 if (err < 0)
4454 l2cap_send_disconn_req(chan, -err);
4455 else
4456 l2cap_chan_ready(chan);
4460 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4461 struct hci_chan *hchan)
4463 chan->hs_hcon = hchan->conn;
4464 chan->hs_hcon->l2cap_data = chan->conn;
4466 BT_DBG("move_state %d", chan->move_state);
4468 switch (chan->move_state) {
4469 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4470 /* Move confirm will be sent after a success
4471 * response is received
4473 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4474 break;
4475 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4476 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4477 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4478 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4479 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4480 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4481 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4482 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4483 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4485 break;
4486 default:
4487 /* Move was not in expected state, free the channel */
4488 __release_logical_link(chan);
4490 chan->move_state = L2CAP_MOVE_STABLE;
4494 /* Call with chan locked */
4495 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4496 u8 status)
4498 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4500 if (status) {
4501 l2cap_logical_fail(chan);
4502 __release_logical_link(chan);
4503 return;
4506 if (chan->state != BT_CONNECTED) {
4507 /* Ignore logical link if channel is on BR/EDR */
4508 if (chan->local_amp_id)
4509 l2cap_logical_finish_create(chan, hchan);
4510 } else {
4511 l2cap_logical_finish_move(chan, hchan);
4515 void l2cap_move_start(struct l2cap_chan *chan)
4517 BT_DBG("chan %p", chan);
4519 if (chan->local_amp_id == HCI_BREDR_ID) {
4520 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4521 return;
4522 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4523 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4524 /* Placeholder - start physical link setup */
4525 } else {
4526 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4527 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4528 chan->move_id = 0;
4529 l2cap_move_setup(chan);
4530 l2cap_send_move_chan_req(chan, 0);
4534 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4535 u8 local_amp_id, u8 remote_amp_id)
4537 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4538 local_amp_id, remote_amp_id);
4540 chan->fcs = L2CAP_FCS_NONE;
4542 /* Outgoing channel on AMP */
4543 if (chan->state == BT_CONNECT) {
4544 if (result == L2CAP_CR_SUCCESS) {
4545 chan->local_amp_id = local_amp_id;
4546 l2cap_send_create_chan_req(chan, remote_amp_id);
4547 } else {
4548 /* Revert to BR/EDR connect */
4549 l2cap_send_conn_req(chan);
4552 return;
4555 /* Incoming channel on AMP */
4556 if (__l2cap_no_conn_pending(chan)) {
4557 struct l2cap_conn_rsp rsp;
4558 char buf[128];
4559 rsp.scid = cpu_to_le16(chan->dcid);
4560 rsp.dcid = cpu_to_le16(chan->scid);
4562 if (result == L2CAP_CR_SUCCESS) {
4563 /* Send successful response */
4564 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4565 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4566 } else {
4567 /* Send negative response */
4568 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4569 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4572 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4573 sizeof(rsp), &rsp);
4575 if (result == L2CAP_CR_SUCCESS) {
4576 __l2cap_state_change(chan, BT_CONFIG);
4577 set_bit(CONF_REQ_SENT, &chan->conf_state);
4578 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4579 L2CAP_CONF_REQ,
4580 l2cap_build_conf_req(chan, buf), buf);
4581 chan->num_conf_req++;
4586 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4587 u8 remote_amp_id)
4589 l2cap_move_setup(chan);
4590 chan->move_id = local_amp_id;
4591 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4593 l2cap_send_move_chan_req(chan, remote_amp_id);
4596 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4598 struct hci_chan *hchan = NULL;
4600 /* Placeholder - get hci_chan for logical link */
4602 if (hchan) {
4603 if (hchan->state == BT_CONNECTED) {
4604 /* Logical link is ready to go */
4605 chan->hs_hcon = hchan->conn;
4606 chan->hs_hcon->l2cap_data = chan->conn;
4607 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4608 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4610 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4611 } else {
4612 /* Wait for logical link to be ready */
4613 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4615 } else {
4616 /* Logical link not available */
4617 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4621 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4623 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4624 u8 rsp_result;
4625 if (result == -EINVAL)
4626 rsp_result = L2CAP_MR_BAD_ID;
4627 else
4628 rsp_result = L2CAP_MR_NOT_ALLOWED;
4630 l2cap_send_move_chan_rsp(chan, rsp_result);
4633 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4634 chan->move_state = L2CAP_MOVE_STABLE;
4636 /* Restart data transmission */
4637 l2cap_ertm_send(chan);
4640 /* Invoke with locked chan */
4641 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4643 u8 local_amp_id = chan->local_amp_id;
4644 u8 remote_amp_id = chan->remote_amp_id;
4646 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4647 chan, result, local_amp_id, remote_amp_id);
4649 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4650 l2cap_chan_unlock(chan);
4651 return;
4654 if (chan->state != BT_CONNECTED) {
4655 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4656 } else if (result != L2CAP_MR_SUCCESS) {
4657 l2cap_do_move_cancel(chan, result);
4658 } else {
4659 switch (chan->move_role) {
4660 case L2CAP_MOVE_ROLE_INITIATOR:
4661 l2cap_do_move_initiate(chan, local_amp_id,
4662 remote_amp_id);
4663 break;
4664 case L2CAP_MOVE_ROLE_RESPONDER:
4665 l2cap_do_move_respond(chan, result);
4666 break;
4667 default:
4668 l2cap_do_move_cancel(chan, result);
4669 break;
4674 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4675 struct l2cap_cmd_hdr *cmd,
4676 u16 cmd_len, void *data)
4678 struct l2cap_move_chan_req *req = data;
4679 struct l2cap_move_chan_rsp rsp;
4680 struct l2cap_chan *chan;
4681 u16 icid = 0;
4682 u16 result = L2CAP_MR_NOT_ALLOWED;
4684 if (cmd_len != sizeof(*req))
4685 return -EPROTO;
4687 icid = le16_to_cpu(req->icid);
4689 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4691 if (!enable_hs)
4692 return -EINVAL;
4694 chan = l2cap_get_chan_by_dcid(conn, icid);
4695 if (!chan) {
4696 rsp.icid = cpu_to_le16(icid);
4697 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4698 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4699 sizeof(rsp), &rsp);
4700 return 0;
4703 chan->ident = cmd->ident;
4705 if (chan->scid < L2CAP_CID_DYN_START ||
4706 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4707 (chan->mode != L2CAP_MODE_ERTM &&
4708 chan->mode != L2CAP_MODE_STREAMING)) {
4709 result = L2CAP_MR_NOT_ALLOWED;
4710 goto send_move_response;
4713 if (chan->local_amp_id == req->dest_amp_id) {
4714 result = L2CAP_MR_SAME_ID;
4715 goto send_move_response;
4718 if (req->dest_amp_id) {
4719 struct hci_dev *hdev;
4720 hdev = hci_dev_get(req->dest_amp_id);
4721 if (!hdev || hdev->dev_type != HCI_AMP ||
4722 !test_bit(HCI_UP, &hdev->flags)) {
4723 if (hdev)
4724 hci_dev_put(hdev);
4726 result = L2CAP_MR_BAD_ID;
4727 goto send_move_response;
4729 hci_dev_put(hdev);
4732 /* Detect a move collision. Only send a collision response
4733 * if this side has "lost", otherwise proceed with the move.
4734 * The winner has the larger bd_addr.
4736 if ((__chan_is_moving(chan) ||
4737 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4738 bacmp(conn->src, conn->dst) > 0) {
4739 result = L2CAP_MR_COLLISION;
4740 goto send_move_response;
4743 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4744 l2cap_move_setup(chan);
4745 chan->move_id = req->dest_amp_id;
4746 icid = chan->dcid;
4748 if (!req->dest_amp_id) {
4749 /* Moving to BR/EDR */
4750 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4751 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4752 result = L2CAP_MR_PEND;
4753 } else {
4754 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4755 result = L2CAP_MR_SUCCESS;
4757 } else {
4758 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4759 /* Placeholder - uncomment when amp functions are available */
4760 /*amp_accept_physical(chan, req->dest_amp_id);*/
4761 result = L2CAP_MR_PEND;
4764 send_move_response:
4765 l2cap_send_move_chan_rsp(chan, result);
4767 l2cap_chan_unlock(chan);
4769 return 0;
4772 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4774 struct l2cap_chan *chan;
4775 struct hci_chan *hchan = NULL;
4777 chan = l2cap_get_chan_by_scid(conn, icid);
4778 if (!chan) {
4779 l2cap_send_move_chan_cfm_icid(conn, icid);
4780 return;
4783 __clear_chan_timer(chan);
4784 if (result == L2CAP_MR_PEND)
4785 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4787 switch (chan->move_state) {
4788 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4789 /* Move confirm will be sent when logical link
4790 * is complete.
4792 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4793 break;
4794 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4795 if (result == L2CAP_MR_PEND) {
4796 break;
4797 } else if (test_bit(CONN_LOCAL_BUSY,
4798 &chan->conn_state)) {
4799 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4800 } else {
4801 /* Logical link is up or moving to BR/EDR,
4802 * proceed with move
4804 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4805 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4807 break;
4808 case L2CAP_MOVE_WAIT_RSP:
4809 /* Moving to AMP */
4810 if (result == L2CAP_MR_SUCCESS) {
4811 /* Remote is ready, send confirm immediately
4812 * after logical link is ready
4814 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4815 } else {
4816 /* Both logical link and move success
4817 * are required to confirm
4819 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4822 /* Placeholder - get hci_chan for logical link */
4823 if (!hchan) {
4824 /* Logical link not available */
4825 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4826 break;
4829 /* If the logical link is not yet connected, do not
4830 * send confirmation.
4832 if (hchan->state != BT_CONNECTED)
4833 break;
4835 /* Logical link is already ready to go */
4837 chan->hs_hcon = hchan->conn;
4838 chan->hs_hcon->l2cap_data = chan->conn;
4840 if (result == L2CAP_MR_SUCCESS) {
4841 /* Can confirm now */
4842 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4843 } else {
4844 /* Now only need move success
4845 * to confirm
4847 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4850 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4851 break;
4852 default:
4853 /* Any other amp move state means the move failed. */
4854 chan->move_id = chan->local_amp_id;
4855 l2cap_move_done(chan);
4856 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4859 l2cap_chan_unlock(chan);
4862 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
4863 u16 result)
4865 struct l2cap_chan *chan;
4867 chan = l2cap_get_chan_by_ident(conn, ident);
4868 if (!chan) {
4869 /* Could not locate channel, icid is best guess */
4870 l2cap_send_move_chan_cfm_icid(conn, icid);
4871 return;
4874 __clear_chan_timer(chan);
4876 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4877 if (result == L2CAP_MR_COLLISION) {
4878 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4879 } else {
4880 /* Cleanup - cancel move */
4881 chan->move_id = chan->local_amp_id;
4882 l2cap_move_done(chan);
4886 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4888 l2cap_chan_unlock(chan);
4891 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4892 struct l2cap_cmd_hdr *cmd,
4893 u16 cmd_len, void *data)
4895 struct l2cap_move_chan_rsp *rsp = data;
4896 u16 icid, result;
4898 if (cmd_len != sizeof(*rsp))
4899 return -EPROTO;
4901 icid = le16_to_cpu(rsp->icid);
4902 result = le16_to_cpu(rsp->result);
4904 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4906 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
4907 l2cap_move_continue(conn, icid, result);
4908 else
4909 l2cap_move_fail(conn, cmd->ident, icid, result);
4911 return 0;
4914 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4915 struct l2cap_cmd_hdr *cmd,
4916 u16 cmd_len, void *data)
4918 struct l2cap_move_chan_cfm *cfm = data;
4919 struct l2cap_chan *chan;
4920 u16 icid, result;
4922 if (cmd_len != sizeof(*cfm))
4923 return -EPROTO;
4925 icid = le16_to_cpu(cfm->icid);
4926 result = le16_to_cpu(cfm->result);
4928 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4930 chan = l2cap_get_chan_by_dcid(conn, icid);
4931 if (!chan) {
4932 /* Spec requires a response even if the icid was not found */
4933 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4934 return 0;
4937 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
4938 if (result == L2CAP_MC_CONFIRMED) {
4939 chan->local_amp_id = chan->move_id;
4940 if (!chan->local_amp_id)
4941 __release_logical_link(chan);
4942 } else {
4943 chan->move_id = chan->local_amp_id;
4946 l2cap_move_done(chan);
4949 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4951 l2cap_chan_unlock(chan);
4953 return 0;
4956 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4957 struct l2cap_cmd_hdr *cmd,
4958 u16 cmd_len, void *data)
4960 struct l2cap_move_chan_cfm_rsp *rsp = data;
4961 struct l2cap_chan *chan;
4962 u16 icid;
4964 if (cmd_len != sizeof(*rsp))
4965 return -EPROTO;
4967 icid = le16_to_cpu(rsp->icid);
4969 BT_DBG("icid 0x%4.4x", icid);
4971 chan = l2cap_get_chan_by_scid(conn, icid);
4972 if (!chan)
4973 return 0;
4975 __clear_chan_timer(chan);
4977 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
4978 chan->local_amp_id = chan->move_id;
4980 if (!chan->local_amp_id && chan->hs_hchan)
4981 __release_logical_link(chan);
4983 l2cap_move_done(chan);
4986 l2cap_chan_unlock(chan);
4988 return 0;
4991 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4992 u16 to_multiplier)
4994 u16 max_latency;
4996 if (min > max || min < 6 || max > 3200)
4997 return -EINVAL;
4999 if (to_multiplier < 10 || to_multiplier > 3200)
5000 return -EINVAL;
5002 if (max >= to_multiplier * 8)
5003 return -EINVAL;
5005 max_latency = (to_multiplier * 8 / max) - 1;
5006 if (latency > 499 || latency > max_latency)
5007 return -EINVAL;
5009 return 0;
5012 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5013 struct l2cap_cmd_hdr *cmd,
5014 u8 *data)
5016 struct hci_conn *hcon = conn->hcon;
5017 struct l2cap_conn_param_update_req *req;
5018 struct l2cap_conn_param_update_rsp rsp;
5019 u16 min, max, latency, to_multiplier, cmd_len;
5020 int err;
5022 if (!(hcon->link_mode & HCI_LM_MASTER))
5023 return -EINVAL;
5025 cmd_len = __le16_to_cpu(cmd->len);
5026 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5027 return -EPROTO;
5029 req = (struct l2cap_conn_param_update_req *) data;
5030 min = __le16_to_cpu(req->min);
5031 max = __le16_to_cpu(req->max);
5032 latency = __le16_to_cpu(req->latency);
5033 to_multiplier = __le16_to_cpu(req->to_multiplier);
5035 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5036 min, max, latency, to_multiplier);
5038 memset(&rsp, 0, sizeof(rsp));
5040 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5041 if (err)
5042 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5043 else
5044 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5046 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5047 sizeof(rsp), &rsp);
5049 if (!err)
5050 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5052 return 0;
5055 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5056 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5057 u8 *data)
5059 int err = 0;
5061 switch (cmd->code) {
5062 case L2CAP_COMMAND_REJ:
5063 l2cap_command_rej(conn, cmd, data);
5064 break;
5066 case L2CAP_CONN_REQ:
5067 err = l2cap_connect_req(conn, cmd, data);
5068 break;
5070 case L2CAP_CONN_RSP:
5071 case L2CAP_CREATE_CHAN_RSP:
5072 err = l2cap_connect_create_rsp(conn, cmd, data);
5073 break;
5075 case L2CAP_CONF_REQ:
5076 err = l2cap_config_req(conn, cmd, cmd_len, data);
5077 break;
5079 case L2CAP_CONF_RSP:
5080 err = l2cap_config_rsp(conn, cmd, data);
5081 break;
5083 case L2CAP_DISCONN_REQ:
5084 err = l2cap_disconnect_req(conn, cmd, data);
5085 break;
5087 case L2CAP_DISCONN_RSP:
5088 err = l2cap_disconnect_rsp(conn, cmd, data);
5089 break;
5091 case L2CAP_ECHO_REQ:
5092 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5093 break;
5095 case L2CAP_ECHO_RSP:
5096 break;
5098 case L2CAP_INFO_REQ:
5099 err = l2cap_information_req(conn, cmd, data);
5100 break;
5102 case L2CAP_INFO_RSP:
5103 err = l2cap_information_rsp(conn, cmd, data);
5104 break;
5106 case L2CAP_CREATE_CHAN_REQ:
5107 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5108 break;
5110 case L2CAP_MOVE_CHAN_REQ:
5111 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5112 break;
5114 case L2CAP_MOVE_CHAN_RSP:
5115 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5116 break;
5118 case L2CAP_MOVE_CHAN_CFM:
5119 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5120 break;
5122 case L2CAP_MOVE_CHAN_CFM_RSP:
5123 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5124 break;
5126 default:
5127 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5128 err = -EINVAL;
5129 break;
5132 return err;
5135 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5136 struct l2cap_cmd_hdr *cmd, u8 *data)
5138 switch (cmd->code) {
5139 case L2CAP_COMMAND_REJ:
5140 return 0;
5142 case L2CAP_CONN_PARAM_UPDATE_REQ:
5143 return l2cap_conn_param_update_req(conn, cmd, data);
5145 case L2CAP_CONN_PARAM_UPDATE_RSP:
5146 return 0;
5148 default:
5149 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5150 return -EINVAL;
5154 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5155 struct sk_buff *skb)
5157 u8 *data = skb->data;
5158 int len = skb->len;
5159 struct l2cap_cmd_hdr cmd;
5160 int err;
5162 l2cap_raw_recv(conn, skb);
5164 while (len >= L2CAP_CMD_HDR_SIZE) {
5165 u16 cmd_len;
5166 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5167 data += L2CAP_CMD_HDR_SIZE;
5168 len -= L2CAP_CMD_HDR_SIZE;
5170 cmd_len = le16_to_cpu(cmd.len);
5172 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5173 cmd.ident);
5175 if (cmd_len > len || !cmd.ident) {
5176 BT_DBG("corrupted command");
5177 break;
5180 if (conn->hcon->type == LE_LINK)
5181 err = l2cap_le_sig_cmd(conn, &cmd, data);
5182 else
5183 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5185 if (err) {
5186 struct l2cap_cmd_rej_unk rej;
5188 BT_ERR("Wrong link type (%d)", err);
5190 /* FIXME: Map err to a valid reason */
5191 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5192 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5193 sizeof(rej), &rej);
5196 data += cmd_len;
5197 len -= cmd_len;
5200 kfree_skb(skb);
5203 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5205 u16 our_fcs, rcv_fcs;
5206 int hdr_size;
5208 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5209 hdr_size = L2CAP_EXT_HDR_SIZE;
5210 else
5211 hdr_size = L2CAP_ENH_HDR_SIZE;
5213 if (chan->fcs == L2CAP_FCS_CRC16) {
5214 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5215 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5216 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5218 if (our_fcs != rcv_fcs)
5219 return -EBADMSG;
5221 return 0;
5224 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5226 struct l2cap_ctrl control;
5228 BT_DBG("chan %p", chan);
5230 memset(&control, 0, sizeof(control));
5231 control.sframe = 1;
5232 control.final = 1;
5233 control.reqseq = chan->buffer_seq;
5234 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5236 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5237 control.super = L2CAP_SUPER_RNR;
5238 l2cap_send_sframe(chan, &control);
5241 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5242 chan->unacked_frames > 0)
5243 __set_retrans_timer(chan);
5245 /* Send pending iframes */
5246 l2cap_ertm_send(chan);
5248 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5249 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5250 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5251 * send it now.
5253 control.super = L2CAP_SUPER_RR;
5254 l2cap_send_sframe(chan, &control);
5258 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5259 struct sk_buff **last_frag)
5261 /* skb->len reflects data in skb as well as all fragments
5262 * skb->data_len reflects only data in fragments
5264 if (!skb_has_frag_list(skb))
5265 skb_shinfo(skb)->frag_list = new_frag;
5267 new_frag->next = NULL;
5269 (*last_frag)->next = new_frag;
5270 *last_frag = new_frag;
5272 skb->len += new_frag->len;
5273 skb->data_len += new_frag->len;
5274 skb->truesize += new_frag->truesize;
5277 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5278 struct l2cap_ctrl *control)
5280 int err = -EINVAL;
5282 switch (control->sar) {
5283 case L2CAP_SAR_UNSEGMENTED:
5284 if (chan->sdu)
5285 break;
5287 err = chan->ops->recv(chan, skb);
5288 break;
5290 case L2CAP_SAR_START:
5291 if (chan->sdu)
5292 break;
5294 chan->sdu_len = get_unaligned_le16(skb->data);
5295 skb_pull(skb, L2CAP_SDULEN_SIZE);
5297 if (chan->sdu_len > chan->imtu) {
5298 err = -EMSGSIZE;
5299 break;
5302 if (skb->len >= chan->sdu_len)
5303 break;
5305 chan->sdu = skb;
5306 chan->sdu_last_frag = skb;
5308 skb = NULL;
5309 err = 0;
5310 break;
5312 case L2CAP_SAR_CONTINUE:
5313 if (!chan->sdu)
5314 break;
5316 append_skb_frag(chan->sdu, skb,
5317 &chan->sdu_last_frag);
5318 skb = NULL;
5320 if (chan->sdu->len >= chan->sdu_len)
5321 break;
5323 err = 0;
5324 break;
5326 case L2CAP_SAR_END:
5327 if (!chan->sdu)
5328 break;
5330 append_skb_frag(chan->sdu, skb,
5331 &chan->sdu_last_frag);
5332 skb = NULL;
5334 if (chan->sdu->len != chan->sdu_len)
5335 break;
5337 err = chan->ops->recv(chan, chan->sdu);
5339 if (!err) {
5340 /* Reassembly complete */
5341 chan->sdu = NULL;
5342 chan->sdu_last_frag = NULL;
5343 chan->sdu_len = 0;
5345 break;
5348 if (err) {
5349 kfree_skb(skb);
5350 kfree_skb(chan->sdu);
5351 chan->sdu = NULL;
5352 chan->sdu_last_frag = NULL;
5353 chan->sdu_len = 0;
5356 return err;
5359 static int l2cap_resegment(struct l2cap_chan *chan)
5361 /* Placeholder */
5362 return 0;
5365 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5367 u8 event;
5369 if (chan->mode != L2CAP_MODE_ERTM)
5370 return;
5372 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5373 l2cap_tx(chan, NULL, NULL, event);
5376 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5378 int err = 0;
5379 /* Pass sequential frames to l2cap_reassemble_sdu()
5380 * until a gap is encountered.
5383 BT_DBG("chan %p", chan);
5385 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5386 struct sk_buff *skb;
5387 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5388 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5390 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5392 if (!skb)
5393 break;
5395 skb_unlink(skb, &chan->srej_q);
5396 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5397 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5398 if (err)
5399 break;
5402 if (skb_queue_empty(&chan->srej_q)) {
5403 chan->rx_state = L2CAP_RX_STATE_RECV;
5404 l2cap_send_ack(chan);
5407 return err;
5410 static void l2cap_handle_srej(struct l2cap_chan *chan,
5411 struct l2cap_ctrl *control)
5413 struct sk_buff *skb;
5415 BT_DBG("chan %p, control %p", chan, control);
5417 if (control->reqseq == chan->next_tx_seq) {
5418 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5419 l2cap_send_disconn_req(chan, ECONNRESET);
5420 return;
5423 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5425 if (skb == NULL) {
5426 BT_DBG("Seq %d not available for retransmission",
5427 control->reqseq);
5428 return;
5431 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5432 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5433 l2cap_send_disconn_req(chan, ECONNRESET);
5434 return;
5437 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5439 if (control->poll) {
5440 l2cap_pass_to_tx(chan, control);
5442 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5443 l2cap_retransmit(chan, control);
5444 l2cap_ertm_send(chan);
5446 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5447 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5448 chan->srej_save_reqseq = control->reqseq;
5450 } else {
5451 l2cap_pass_to_tx_fbit(chan, control);
5453 if (control->final) {
5454 if (chan->srej_save_reqseq != control->reqseq ||
5455 !test_and_clear_bit(CONN_SREJ_ACT,
5456 &chan->conn_state))
5457 l2cap_retransmit(chan, control);
5458 } else {
5459 l2cap_retransmit(chan, control);
5460 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5461 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5462 chan->srej_save_reqseq = control->reqseq;
5468 static void l2cap_handle_rej(struct l2cap_chan *chan,
5469 struct l2cap_ctrl *control)
5471 struct sk_buff *skb;
5473 BT_DBG("chan %p, control %p", chan, control);
5475 if (control->reqseq == chan->next_tx_seq) {
5476 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5477 l2cap_send_disconn_req(chan, ECONNRESET);
5478 return;
5481 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5483 if (chan->max_tx && skb &&
5484 bt_cb(skb)->control.retries >= chan->max_tx) {
5485 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5486 l2cap_send_disconn_req(chan, ECONNRESET);
5487 return;
5490 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5492 l2cap_pass_to_tx(chan, control);
5494 if (control->final) {
5495 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5496 l2cap_retransmit_all(chan, control);
5497 } else {
5498 l2cap_retransmit_all(chan, control);
5499 l2cap_ertm_send(chan);
5500 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5501 set_bit(CONN_REJ_ACT, &chan->conn_state);
5505 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5507 BT_DBG("chan %p, txseq %d", chan, txseq);
5509 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5510 chan->expected_tx_seq);
5512 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5513 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5514 chan->tx_win) {
5515 /* See notes below regarding "double poll" and
5516 * invalid packets.
5518 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5519 BT_DBG("Invalid/Ignore - after SREJ");
5520 return L2CAP_TXSEQ_INVALID_IGNORE;
5521 } else {
5522 BT_DBG("Invalid - in window after SREJ sent");
5523 return L2CAP_TXSEQ_INVALID;
5527 if (chan->srej_list.head == txseq) {
5528 BT_DBG("Expected SREJ");
5529 return L2CAP_TXSEQ_EXPECTED_SREJ;
5532 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5533 BT_DBG("Duplicate SREJ - txseq already stored");
5534 return L2CAP_TXSEQ_DUPLICATE_SREJ;
5537 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5538 BT_DBG("Unexpected SREJ - not requested");
5539 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5543 if (chan->expected_tx_seq == txseq) {
5544 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5545 chan->tx_win) {
5546 BT_DBG("Invalid - txseq outside tx window");
5547 return L2CAP_TXSEQ_INVALID;
5548 } else {
5549 BT_DBG("Expected");
5550 return L2CAP_TXSEQ_EXPECTED;
5554 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5555 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5556 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5557 return L2CAP_TXSEQ_DUPLICATE;
5560 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5561 /* A source of invalid packets is a "double poll" condition,
5562 * where delays cause us to send multiple poll packets. If
5563 * the remote stack receives and processes both polls,
5564 * sequence numbers can wrap around in such a way that a
5565 * resent frame has a sequence number that looks like new data
5566 * with a sequence gap. This would trigger an erroneous SREJ
5567 * request.
5569 * Fortunately, this is impossible with a tx window that's
5570 * less than half of the maximum sequence number, which allows
5571 * invalid frames to be safely ignored.
5573 * With tx window sizes greater than half of the tx window
5574 * maximum, the frame is invalid and cannot be ignored. This
5575 * causes a disconnect.
5578 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5579 BT_DBG("Invalid/Ignore - txseq outside tx window");
5580 return L2CAP_TXSEQ_INVALID_IGNORE;
5581 } else {
5582 BT_DBG("Invalid - txseq outside tx window");
5583 return L2CAP_TXSEQ_INVALID;
5585 } else {
5586 BT_DBG("Unexpected - txseq indicates missing frames");
5587 return L2CAP_TXSEQ_UNEXPECTED;
5591 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5592 struct l2cap_ctrl *control,
5593 struct sk_buff *skb, u8 event)
5595 int err = 0;
5596 bool skb_in_use = 0;
5598 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5599 event);
5601 switch (event) {
5602 case L2CAP_EV_RECV_IFRAME:
5603 switch (l2cap_classify_txseq(chan, control->txseq)) {
5604 case L2CAP_TXSEQ_EXPECTED:
5605 l2cap_pass_to_tx(chan, control);
5607 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5608 BT_DBG("Busy, discarding expected seq %d",
5609 control->txseq);
5610 break;
5613 chan->expected_tx_seq = __next_seq(chan,
5614 control->txseq);
5616 chan->buffer_seq = chan->expected_tx_seq;
5617 skb_in_use = 1;
5619 err = l2cap_reassemble_sdu(chan, skb, control);
5620 if (err)
5621 break;
5623 if (control->final) {
5624 if (!test_and_clear_bit(CONN_REJ_ACT,
5625 &chan->conn_state)) {
5626 control->final = 0;
5627 l2cap_retransmit_all(chan, control);
5628 l2cap_ertm_send(chan);
5632 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5633 l2cap_send_ack(chan);
5634 break;
5635 case L2CAP_TXSEQ_UNEXPECTED:
5636 l2cap_pass_to_tx(chan, control);
5638 /* Can't issue SREJ frames in the local busy state.
5639 * Drop this frame, it will be seen as missing
5640 * when local busy is exited.
5642 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5643 BT_DBG("Busy, discarding unexpected seq %d",
5644 control->txseq);
5645 break;
5648 /* There was a gap in the sequence, so an SREJ
5649 * must be sent for each missing frame. The
5650 * current frame is stored for later use.
5652 skb_queue_tail(&chan->srej_q, skb);
5653 skb_in_use = 1;
5654 BT_DBG("Queued %p (queue len %d)", skb,
5655 skb_queue_len(&chan->srej_q));
5657 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
5658 l2cap_seq_list_clear(&chan->srej_list);
5659 l2cap_send_srej(chan, control->txseq);
5661 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
5662 break;
5663 case L2CAP_TXSEQ_DUPLICATE:
5664 l2cap_pass_to_tx(chan, control);
5665 break;
5666 case L2CAP_TXSEQ_INVALID_IGNORE:
5667 break;
5668 case L2CAP_TXSEQ_INVALID:
5669 default:
5670 l2cap_send_disconn_req(chan, ECONNRESET);
5671 break;
5673 break;
5674 case L2CAP_EV_RECV_RR:
5675 l2cap_pass_to_tx(chan, control);
5676 if (control->final) {
5677 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5679 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
5680 !__chan_is_moving(chan)) {
5681 control->final = 0;
5682 l2cap_retransmit_all(chan, control);
5685 l2cap_ertm_send(chan);
5686 } else if (control->poll) {
5687 l2cap_send_i_or_rr_or_rnr(chan);
5688 } else {
5689 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5690 &chan->conn_state) &&
5691 chan->unacked_frames)
5692 __set_retrans_timer(chan);
5694 l2cap_ertm_send(chan);
5696 break;
5697 case L2CAP_EV_RECV_RNR:
5698 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5699 l2cap_pass_to_tx(chan, control);
5700 if (control && control->poll) {
5701 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5702 l2cap_send_rr_or_rnr(chan, 0);
5704 __clear_retrans_timer(chan);
5705 l2cap_seq_list_clear(&chan->retrans_list);
5706 break;
5707 case L2CAP_EV_RECV_REJ:
5708 l2cap_handle_rej(chan, control);
5709 break;
5710 case L2CAP_EV_RECV_SREJ:
5711 l2cap_handle_srej(chan, control);
5712 break;
5713 default:
5714 break;
5717 if (skb && !skb_in_use) {
5718 BT_DBG("Freeing %p", skb);
5719 kfree_skb(skb);
5722 return err;
5725 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
5726 struct l2cap_ctrl *control,
5727 struct sk_buff *skb, u8 event)
5729 int err = 0;
5730 u16 txseq = control->txseq;
5731 bool skb_in_use = 0;
5733 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5734 event);
5736 switch (event) {
5737 case L2CAP_EV_RECV_IFRAME:
5738 switch (l2cap_classify_txseq(chan, txseq)) {
5739 case L2CAP_TXSEQ_EXPECTED:
5740 /* Keep frame for reassembly later */
5741 l2cap_pass_to_tx(chan, control);
5742 skb_queue_tail(&chan->srej_q, skb);
5743 skb_in_use = 1;
5744 BT_DBG("Queued %p (queue len %d)", skb,
5745 skb_queue_len(&chan->srej_q));
5747 chan->expected_tx_seq = __next_seq(chan, txseq);
5748 break;
5749 case L2CAP_TXSEQ_EXPECTED_SREJ:
5750 l2cap_seq_list_pop(&chan->srej_list);
5752 l2cap_pass_to_tx(chan, control);
5753 skb_queue_tail(&chan->srej_q, skb);
5754 skb_in_use = 1;
5755 BT_DBG("Queued %p (queue len %d)", skb,
5756 skb_queue_len(&chan->srej_q));
5758 err = l2cap_rx_queued_iframes(chan);
5759 if (err)
5760 break;
5762 break;
5763 case L2CAP_TXSEQ_UNEXPECTED:
5764 /* Got a frame that can't be reassembled yet.
5765 * Save it for later, and send SREJs to cover
5766 * the missing frames.
5768 skb_queue_tail(&chan->srej_q, skb);
5769 skb_in_use = 1;
5770 BT_DBG("Queued %p (queue len %d)", skb,
5771 skb_queue_len(&chan->srej_q));
5773 l2cap_pass_to_tx(chan, control);
5774 l2cap_send_srej(chan, control->txseq);
5775 break;
5776 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
5777 /* This frame was requested with an SREJ, but
5778 * some expected retransmitted frames are
5779 * missing. Request retransmission of missing
5780 * SREJ'd frames.
5782 skb_queue_tail(&chan->srej_q, skb);
5783 skb_in_use = 1;
5784 BT_DBG("Queued %p (queue len %d)", skb,
5785 skb_queue_len(&chan->srej_q));
5787 l2cap_pass_to_tx(chan, control);
5788 l2cap_send_srej_list(chan, control->txseq);
5789 break;
5790 case L2CAP_TXSEQ_DUPLICATE_SREJ:
5791 /* We've already queued this frame. Drop this copy. */
5792 l2cap_pass_to_tx(chan, control);
5793 break;
5794 case L2CAP_TXSEQ_DUPLICATE:
5795 /* Expecting a later sequence number, so this frame
5796 * was already received. Ignore it completely.
5798 break;
5799 case L2CAP_TXSEQ_INVALID_IGNORE:
5800 break;
5801 case L2CAP_TXSEQ_INVALID:
5802 default:
5803 l2cap_send_disconn_req(chan, ECONNRESET);
5804 break;
5806 break;
5807 case L2CAP_EV_RECV_RR:
5808 l2cap_pass_to_tx(chan, control);
5809 if (control->final) {
5810 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5812 if (!test_and_clear_bit(CONN_REJ_ACT,
5813 &chan->conn_state)) {
5814 control->final = 0;
5815 l2cap_retransmit_all(chan, control);
5818 l2cap_ertm_send(chan);
5819 } else if (control->poll) {
5820 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5821 &chan->conn_state) &&
5822 chan->unacked_frames) {
5823 __set_retrans_timer(chan);
5826 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5827 l2cap_send_srej_tail(chan);
5828 } else {
5829 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5830 &chan->conn_state) &&
5831 chan->unacked_frames)
5832 __set_retrans_timer(chan);
5834 l2cap_send_ack(chan);
5836 break;
5837 case L2CAP_EV_RECV_RNR:
5838 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5839 l2cap_pass_to_tx(chan, control);
5840 if (control->poll) {
5841 l2cap_send_srej_tail(chan);
5842 } else {
5843 struct l2cap_ctrl rr_control;
5844 memset(&rr_control, 0, sizeof(rr_control));
5845 rr_control.sframe = 1;
5846 rr_control.super = L2CAP_SUPER_RR;
5847 rr_control.reqseq = chan->buffer_seq;
5848 l2cap_send_sframe(chan, &rr_control);
5851 break;
5852 case L2CAP_EV_RECV_REJ:
5853 l2cap_handle_rej(chan, control);
5854 break;
5855 case L2CAP_EV_RECV_SREJ:
5856 l2cap_handle_srej(chan, control);
5857 break;
5860 if (skb && !skb_in_use) {
5861 BT_DBG("Freeing %p", skb);
5862 kfree_skb(skb);
5865 return err;
5868 static int l2cap_finish_move(struct l2cap_chan *chan)
5870 BT_DBG("chan %p", chan);
5872 chan->rx_state = L2CAP_RX_STATE_RECV;
5874 if (chan->hs_hcon)
5875 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
5876 else
5877 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
5879 return l2cap_resegment(chan);
5882 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
5883 struct l2cap_ctrl *control,
5884 struct sk_buff *skb, u8 event)
5886 int err;
5888 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5889 event);
5891 if (!control->poll)
5892 return -EPROTO;
5894 l2cap_process_reqseq(chan, control->reqseq);
5896 if (!skb_queue_empty(&chan->tx_q))
5897 chan->tx_send_head = skb_peek(&chan->tx_q);
5898 else
5899 chan->tx_send_head = NULL;
5901 /* Rewind next_tx_seq to the point expected
5902 * by the receiver.
5904 chan->next_tx_seq = control->reqseq;
5905 chan->unacked_frames = 0;
5907 err = l2cap_finish_move(chan);
5908 if (err)
5909 return err;
5911 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5912 l2cap_send_i_or_rr_or_rnr(chan);
5914 if (event == L2CAP_EV_RECV_IFRAME)
5915 return -EPROTO;
5917 return l2cap_rx_state_recv(chan, control, NULL, event);
5920 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
5921 struct l2cap_ctrl *control,
5922 struct sk_buff *skb, u8 event)
5924 int err;
5926 if (!control->final)
5927 return -EPROTO;
5929 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5931 chan->rx_state = L2CAP_RX_STATE_RECV;
5932 l2cap_process_reqseq(chan, control->reqseq);
5934 if (!skb_queue_empty(&chan->tx_q))
5935 chan->tx_send_head = skb_peek(&chan->tx_q);
5936 else
5937 chan->tx_send_head = NULL;
5939 /* Rewind next_tx_seq to the point expected
5940 * by the receiver.
5942 chan->next_tx_seq = control->reqseq;
5943 chan->unacked_frames = 0;
5945 if (chan->hs_hcon)
5946 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
5947 else
5948 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
5950 err = l2cap_resegment(chan);
5952 if (!err)
5953 err = l2cap_rx_state_recv(chan, control, skb, event);
5955 return err;
5958 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
5960 /* Make sure reqseq is for a packet that has been sent but not acked */
5961 u16 unacked;
5963 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
5964 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
5967 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5968 struct sk_buff *skb, u8 event)
5970 int err = 0;
5972 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
5973 control, skb, event, chan->rx_state);
5975 if (__valid_reqseq(chan, control->reqseq)) {
5976 switch (chan->rx_state) {
5977 case L2CAP_RX_STATE_RECV:
5978 err = l2cap_rx_state_recv(chan, control, skb, event);
5979 break;
5980 case L2CAP_RX_STATE_SREJ_SENT:
5981 err = l2cap_rx_state_srej_sent(chan, control, skb,
5982 event);
5983 break;
5984 case L2CAP_RX_STATE_WAIT_P:
5985 err = l2cap_rx_state_wait_p(chan, control, skb, event);
5986 break;
5987 case L2CAP_RX_STATE_WAIT_F:
5988 err = l2cap_rx_state_wait_f(chan, control, skb, event);
5989 break;
5990 default:
5991 /* shut it down */
5992 break;
5994 } else {
5995 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5996 control->reqseq, chan->next_tx_seq,
5997 chan->expected_ack_seq);
5998 l2cap_send_disconn_req(chan, ECONNRESET);
6001 return err;
6004 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6005 struct sk_buff *skb)
6007 int err = 0;
6009 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6010 chan->rx_state);
6012 if (l2cap_classify_txseq(chan, control->txseq) ==
6013 L2CAP_TXSEQ_EXPECTED) {
6014 l2cap_pass_to_tx(chan, control);
6016 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6017 __next_seq(chan, chan->buffer_seq));
6019 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6021 l2cap_reassemble_sdu(chan, skb, control);
6022 } else {
6023 if (chan->sdu) {
6024 kfree_skb(chan->sdu);
6025 chan->sdu = NULL;
6027 chan->sdu_last_frag = NULL;
6028 chan->sdu_len = 0;
6030 if (skb) {
6031 BT_DBG("Freeing %p", skb);
6032 kfree_skb(skb);
6036 chan->last_acked_seq = control->txseq;
6037 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6039 return err;
6042 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6044 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6045 u16 len;
6046 u8 event;
6048 __unpack_control(chan, skb);
6050 len = skb->len;
6053 * We can just drop the corrupted I-frame here.
6054 * Receiver will miss it and start proper recovery
6055 * procedures and ask for retransmission.
6057 if (l2cap_check_fcs(chan, skb))
6058 goto drop;
6060 if (!control->sframe && control->sar == L2CAP_SAR_START)
6061 len -= L2CAP_SDULEN_SIZE;
6063 if (chan->fcs == L2CAP_FCS_CRC16)
6064 len -= L2CAP_FCS_SIZE;
6066 if (len > chan->mps) {
6067 l2cap_send_disconn_req(chan, ECONNRESET);
6068 goto drop;
6071 if (!control->sframe) {
6072 int err;
6074 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6075 control->sar, control->reqseq, control->final,
6076 control->txseq);
6078 /* Validate F-bit - F=0 always valid, F=1 only
6079 * valid in TX WAIT_F
6081 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6082 goto drop;
6084 if (chan->mode != L2CAP_MODE_STREAMING) {
6085 event = L2CAP_EV_RECV_IFRAME;
6086 err = l2cap_rx(chan, control, skb, event);
6087 } else {
6088 err = l2cap_stream_rx(chan, control, skb);
6091 if (err)
6092 l2cap_send_disconn_req(chan, ECONNRESET);
6093 } else {
6094 const u8 rx_func_to_event[4] = {
6095 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6096 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6099 /* Only I-frames are expected in streaming mode */
6100 if (chan->mode == L2CAP_MODE_STREAMING)
6101 goto drop;
6103 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6104 control->reqseq, control->final, control->poll,
6105 control->super);
6107 if (len != 0) {
6108 BT_ERR("Trailing bytes: %d in sframe", len);
6109 l2cap_send_disconn_req(chan, ECONNRESET);
6110 goto drop;
6113 /* Validate F and P bits */
6114 if (control->final && (control->poll ||
6115 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6116 goto drop;
6118 event = rx_func_to_event[control->super];
6119 if (l2cap_rx(chan, control, skb, event))
6120 l2cap_send_disconn_req(chan, ECONNRESET);
6123 return 0;
6125 drop:
6126 kfree_skb(skb);
6127 return 0;
6130 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6131 struct sk_buff *skb)
6133 struct l2cap_chan *chan;
6135 chan = l2cap_get_chan_by_scid(conn, cid);
6136 if (!chan) {
6137 if (cid == L2CAP_CID_A2MP) {
6138 chan = a2mp_channel_create(conn, skb);
6139 if (!chan) {
6140 kfree_skb(skb);
6141 return;
6144 l2cap_chan_lock(chan);
6145 } else {
6146 BT_DBG("unknown cid 0x%4.4x", cid);
6147 /* Drop packet and return */
6148 kfree_skb(skb);
6149 return;
6153 BT_DBG("chan %p, len %d", chan, skb->len);
6155 if (chan->state != BT_CONNECTED)
6156 goto drop;
6158 switch (chan->mode) {
6159 case L2CAP_MODE_BASIC:
6160 /* If socket recv buffers overflows we drop data here
6161 * which is *bad* because L2CAP has to be reliable.
6162 * But we don't have any other choice. L2CAP doesn't
6163 * provide flow control mechanism. */
6165 if (chan->imtu < skb->len)
6166 goto drop;
6168 if (!chan->ops->recv(chan, skb))
6169 goto done;
6170 break;
6172 case L2CAP_MODE_ERTM:
6173 case L2CAP_MODE_STREAMING:
6174 l2cap_data_rcv(chan, skb);
6175 goto done;
6177 default:
6178 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6179 break;
6182 drop:
6183 kfree_skb(skb);
6185 done:
6186 l2cap_chan_unlock(chan);
6189 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6190 struct sk_buff *skb)
6192 struct l2cap_chan *chan;
6194 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
6195 if (!chan)
6196 goto drop;
6198 BT_DBG("chan %p, len %d", chan, skb->len);
6200 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6201 goto drop;
6203 if (chan->imtu < skb->len)
6204 goto drop;
6206 if (!chan->ops->recv(chan, skb))
6207 return;
6209 drop:
6210 kfree_skb(skb);
6213 static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
6214 struct sk_buff *skb)
6216 struct l2cap_chan *chan;
6218 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
6219 if (!chan)
6220 goto drop;
6222 BT_DBG("chan %p, len %d", chan, skb->len);
6224 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6225 goto drop;
6227 if (chan->imtu < skb->len)
6228 goto drop;
6230 if (!chan->ops->recv(chan, skb))
6231 return;
6233 drop:
6234 kfree_skb(skb);
6237 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6239 struct l2cap_hdr *lh = (void *) skb->data;
6240 u16 cid, len;
6241 __le16 psm;
6243 skb_pull(skb, L2CAP_HDR_SIZE);
6244 cid = __le16_to_cpu(lh->cid);
6245 len = __le16_to_cpu(lh->len);
6247 if (len != skb->len) {
6248 kfree_skb(skb);
6249 return;
6252 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6254 switch (cid) {
6255 case L2CAP_CID_LE_SIGNALING:
6256 case L2CAP_CID_SIGNALING:
6257 l2cap_sig_channel(conn, skb);
6258 break;
6260 case L2CAP_CID_CONN_LESS:
6261 psm = get_unaligned((__le16 *) skb->data);
6262 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6263 l2cap_conless_channel(conn, psm, skb);
6264 break;
6266 case L2CAP_CID_LE_DATA:
6267 l2cap_att_channel(conn, cid, skb);
6268 break;
6270 case L2CAP_CID_SMP:
6271 if (smp_sig_channel(conn, skb))
6272 l2cap_conn_del(conn->hcon, EACCES);
6273 break;
6275 default:
6276 l2cap_data_channel(conn, cid, skb);
6277 break;
6281 /* ---- L2CAP interface with lower layer (HCI) ---- */
6283 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
6285 int exact = 0, lm1 = 0, lm2 = 0;
6286 struct l2cap_chan *c;
6288 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
6290 /* Find listening sockets and check their link_mode */
6291 read_lock(&chan_list_lock);
6292 list_for_each_entry(c, &chan_list, global_l) {
6293 struct sock *sk = c->sk;
6295 if (c->state != BT_LISTEN)
6296 continue;
6298 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
6299 lm1 |= HCI_LM_ACCEPT;
6300 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6301 lm1 |= HCI_LM_MASTER;
6302 exact++;
6303 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
6304 lm2 |= HCI_LM_ACCEPT;
6305 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6306 lm2 |= HCI_LM_MASTER;
6309 read_unlock(&chan_list_lock);
6311 return exact ? lm1 : lm2;
6314 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
6316 struct l2cap_conn *conn;
6318 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
6320 if (!status) {
6321 conn = l2cap_conn_add(hcon, status);
6322 if (conn)
6323 l2cap_conn_ready(conn);
6324 } else {
6325 l2cap_conn_del(hcon, bt_to_errno(status));
6329 int l2cap_disconn_ind(struct hci_conn *hcon)
6331 struct l2cap_conn *conn = hcon->l2cap_data;
6333 BT_DBG("hcon %p", hcon);
6335 if (!conn)
6336 return HCI_ERROR_REMOTE_USER_TERM;
6337 return conn->disc_reason;
6340 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
6342 BT_DBG("hcon %p reason %d", hcon, reason);
6344 l2cap_conn_del(hcon, bt_to_errno(reason));
6347 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
6349 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
6350 return;
6352 if (encrypt == 0x00) {
6353 if (chan->sec_level == BT_SECURITY_MEDIUM) {
6354 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
6355 } else if (chan->sec_level == BT_SECURITY_HIGH)
6356 l2cap_chan_close(chan, ECONNREFUSED);
6357 } else {
6358 if (chan->sec_level == BT_SECURITY_MEDIUM)
6359 __clear_chan_timer(chan);
6363 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
6365 struct l2cap_conn *conn = hcon->l2cap_data;
6366 struct l2cap_chan *chan;
6368 if (!conn)
6369 return 0;
6371 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
6373 if (hcon->type == LE_LINK) {
6374 if (!status && encrypt)
6375 smp_distribute_keys(conn, 0);
6376 cancel_delayed_work(&conn->security_timer);
6379 mutex_lock(&conn->chan_lock);
6381 list_for_each_entry(chan, &conn->chan_l, list) {
6382 l2cap_chan_lock(chan);
6384 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
6385 state_to_string(chan->state));
6387 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
6388 l2cap_chan_unlock(chan);
6389 continue;
6392 if (chan->scid == L2CAP_CID_LE_DATA) {
6393 if (!status && encrypt) {
6394 chan->sec_level = hcon->sec_level;
6395 l2cap_chan_ready(chan);
6398 l2cap_chan_unlock(chan);
6399 continue;
6402 if (!__l2cap_no_conn_pending(chan)) {
6403 l2cap_chan_unlock(chan);
6404 continue;
6407 if (!status && (chan->state == BT_CONNECTED ||
6408 chan->state == BT_CONFIG)) {
6409 struct sock *sk = chan->sk;
6411 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
6412 sk->sk_state_change(sk);
6414 l2cap_check_encryption(chan, encrypt);
6415 l2cap_chan_unlock(chan);
6416 continue;
6419 if (chan->state == BT_CONNECT) {
6420 if (!status) {
6421 l2cap_start_connection(chan);
6422 } else {
6423 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6425 } else if (chan->state == BT_CONNECT2) {
6426 struct sock *sk = chan->sk;
6427 struct l2cap_conn_rsp rsp;
6428 __u16 res, stat;
6430 lock_sock(sk);
6432 if (!status) {
6433 if (test_bit(BT_SK_DEFER_SETUP,
6434 &bt_sk(sk)->flags)) {
6435 res = L2CAP_CR_PEND;
6436 stat = L2CAP_CS_AUTHOR_PEND;
6437 chan->ops->defer(chan);
6438 } else {
6439 __l2cap_state_change(chan, BT_CONFIG);
6440 res = L2CAP_CR_SUCCESS;
6441 stat = L2CAP_CS_NO_INFO;
6443 } else {
6444 __l2cap_state_change(chan, BT_DISCONN);
6445 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6446 res = L2CAP_CR_SEC_BLOCK;
6447 stat = L2CAP_CS_NO_INFO;
6450 release_sock(sk);
6452 rsp.scid = cpu_to_le16(chan->dcid);
6453 rsp.dcid = cpu_to_le16(chan->scid);
6454 rsp.result = cpu_to_le16(res);
6455 rsp.status = cpu_to_le16(stat);
6456 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
6457 sizeof(rsp), &rsp);
6459 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
6460 res == L2CAP_CR_SUCCESS) {
6461 char buf[128];
6462 set_bit(CONF_REQ_SENT, &chan->conf_state);
6463 l2cap_send_cmd(conn, l2cap_get_ident(conn),
6464 L2CAP_CONF_REQ,
6465 l2cap_build_conf_req(chan, buf),
6466 buf);
6467 chan->num_conf_req++;
6471 l2cap_chan_unlock(chan);
6474 mutex_unlock(&conn->chan_lock);
6476 return 0;
6479 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
6481 struct l2cap_conn *conn = hcon->l2cap_data;
6482 struct l2cap_hdr *hdr;
6483 int len;
6485 /* For AMP controller do not create l2cap conn */
6486 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
6487 goto drop;
6489 if (!conn)
6490 conn = l2cap_conn_add(hcon, 0);
6492 if (!conn)
6493 goto drop;
6495 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
6497 switch (flags) {
6498 case ACL_START:
6499 case ACL_START_NO_FLUSH:
6500 case ACL_COMPLETE:
6501 if (conn->rx_len) {
6502 BT_ERR("Unexpected start frame (len %d)", skb->len);
6503 kfree_skb(conn->rx_skb);
6504 conn->rx_skb = NULL;
6505 conn->rx_len = 0;
6506 l2cap_conn_unreliable(conn, ECOMM);
6509 /* Start fragment always begin with Basic L2CAP header */
6510 if (skb->len < L2CAP_HDR_SIZE) {
6511 BT_ERR("Frame is too short (len %d)", skb->len);
6512 l2cap_conn_unreliable(conn, ECOMM);
6513 goto drop;
6516 hdr = (struct l2cap_hdr *) skb->data;
6517 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
6519 if (len == skb->len) {
6520 /* Complete frame received */
6521 l2cap_recv_frame(conn, skb);
6522 return 0;
6525 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
6527 if (skb->len > len) {
6528 BT_ERR("Frame is too long (len %d, expected len %d)",
6529 skb->len, len);
6530 l2cap_conn_unreliable(conn, ECOMM);
6531 goto drop;
6534 /* Allocate skb for the complete frame (with header) */
6535 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
6536 if (!conn->rx_skb)
6537 goto drop;
6539 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6540 skb->len);
6541 conn->rx_len = len - skb->len;
6542 break;
6544 case ACL_CONT:
6545 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
6547 if (!conn->rx_len) {
6548 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
6549 l2cap_conn_unreliable(conn, ECOMM);
6550 goto drop;
6553 if (skb->len > conn->rx_len) {
6554 BT_ERR("Fragment is too long (len %d, expected %d)",
6555 skb->len, conn->rx_len);
6556 kfree_skb(conn->rx_skb);
6557 conn->rx_skb = NULL;
6558 conn->rx_len = 0;
6559 l2cap_conn_unreliable(conn, ECOMM);
6560 goto drop;
6563 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6564 skb->len);
6565 conn->rx_len -= skb->len;
6567 if (!conn->rx_len) {
6568 /* Complete frame received */
6569 l2cap_recv_frame(conn, conn->rx_skb);
6570 conn->rx_skb = NULL;
6572 break;
6575 drop:
6576 kfree_skb(skb);
6577 return 0;
6580 static int l2cap_debugfs_show(struct seq_file *f, void *p)
6582 struct l2cap_chan *c;
6584 read_lock(&chan_list_lock);
6586 list_for_each_entry(c, &chan_list, global_l) {
6587 struct sock *sk = c->sk;
6589 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6590 &bt_sk(sk)->src, &bt_sk(sk)->dst,
6591 c->state, __le16_to_cpu(c->psm),
6592 c->scid, c->dcid, c->imtu, c->omtu,
6593 c->sec_level, c->mode);
6596 read_unlock(&chan_list_lock);
6598 return 0;
6601 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
6603 return single_open(file, l2cap_debugfs_show, inode->i_private);
6606 static const struct file_operations l2cap_debugfs_fops = {
6607 .open = l2cap_debugfs_open,
6608 .read = seq_read,
6609 .llseek = seq_lseek,
6610 .release = single_release,
6613 static struct dentry *l2cap_debugfs;
6615 int __init l2cap_init(void)
6617 int err;
6619 err = l2cap_init_sockets();
6620 if (err < 0)
6621 return err;
6623 if (bt_debugfs) {
6624 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
6625 NULL, &l2cap_debugfs_fops);
6626 if (!l2cap_debugfs)
6627 BT_ERR("Failed to create L2CAP debug file");
6630 return 0;
6633 void l2cap_exit(void)
6635 debugfs_remove(l2cap_debugfs);
6636 l2cap_cleanup_sockets();
6639 module_param(disable_ertm, bool, 0644);
6640 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");