net/mlx4_en: Fix setting initial MAC address
[linux-2.6/libata-dev.git] / net / bluetooth / l2cap_core.c
blob7c7e9321f1ea4263e0c51e792a14e0454d4a0e12
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
41 #include <net/bluetooth/amp.h>
43 bool disable_ertm;
45 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
46 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
48 static LIST_HEAD(chan_list);
49 static DEFINE_RWLOCK(chan_list_lock);
51 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
52 u8 code, u8 ident, u16 dlen, void *data);
53 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
54 void *data);
55 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
56 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event);
61 /* ---- L2CAP channels ---- */
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
64 u16 cid)
66 struct l2cap_chan *c;
68 list_for_each_entry(c, &conn->chan_l, list) {
69 if (c->dcid == cid)
70 return c;
72 return NULL;
75 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
76 u16 cid)
78 struct l2cap_chan *c;
80 list_for_each_entry(c, &conn->chan_l, list) {
81 if (c->scid == cid)
82 return c;
84 return NULL;
87 /* Find channel with given SCID.
88 * Returns locked channel. */
89 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
90 u16 cid)
92 struct l2cap_chan *c;
94 mutex_lock(&conn->chan_lock);
95 c = __l2cap_get_chan_by_scid(conn, cid);
96 if (c)
97 l2cap_chan_lock(c);
98 mutex_unlock(&conn->chan_lock);
100 return c;
103 /* Find channel with given DCID.
104 * Returns locked channel.
106 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
107 u16 cid)
109 struct l2cap_chan *c;
111 mutex_lock(&conn->chan_lock);
112 c = __l2cap_get_chan_by_dcid(conn, cid);
113 if (c)
114 l2cap_chan_lock(c);
115 mutex_unlock(&conn->chan_lock);
117 return c;
120 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
121 u8 ident)
123 struct l2cap_chan *c;
125 list_for_each_entry(c, &conn->chan_l, list) {
126 if (c->ident == ident)
127 return c;
129 return NULL;
132 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
133 u8 ident)
135 struct l2cap_chan *c;
137 mutex_lock(&conn->chan_lock);
138 c = __l2cap_get_chan_by_ident(conn, ident);
139 if (c)
140 l2cap_chan_lock(c);
141 mutex_unlock(&conn->chan_lock);
143 return c;
146 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
148 struct l2cap_chan *c;
150 list_for_each_entry(c, &chan_list, global_l) {
151 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
152 return c;
154 return NULL;
157 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
159 int err;
161 write_lock(&chan_list_lock);
163 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
164 err = -EADDRINUSE;
165 goto done;
168 if (psm) {
169 chan->psm = psm;
170 chan->sport = psm;
171 err = 0;
172 } else {
173 u16 p;
175 err = -EINVAL;
176 for (p = 0x1001; p < 0x1100; p += 2)
177 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
178 chan->psm = cpu_to_le16(p);
179 chan->sport = cpu_to_le16(p);
180 err = 0;
181 break;
185 done:
186 write_unlock(&chan_list_lock);
187 return err;
190 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
192 write_lock(&chan_list_lock);
194 chan->scid = scid;
196 write_unlock(&chan_list_lock);
198 return 0;
201 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
203 u16 cid = L2CAP_CID_DYN_START;
205 for (; cid < L2CAP_CID_DYN_END; cid++) {
206 if (!__l2cap_get_chan_by_scid(conn, cid))
207 return cid;
210 return 0;
213 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
215 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
216 state_to_string(state));
218 chan->state = state;
219 chan->ops->state_change(chan, state);
222 static void l2cap_state_change(struct l2cap_chan *chan, int state)
224 struct sock *sk = chan->sk;
226 lock_sock(sk);
227 __l2cap_state_change(chan, state);
228 release_sock(sk);
231 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
233 struct sock *sk = chan->sk;
235 sk->sk_err = err;
238 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
240 struct sock *sk = chan->sk;
242 lock_sock(sk);
243 __l2cap_chan_set_err(chan, err);
244 release_sock(sk);
247 static void __set_retrans_timer(struct l2cap_chan *chan)
249 if (!delayed_work_pending(&chan->monitor_timer) &&
250 chan->retrans_timeout) {
251 l2cap_set_timer(chan, &chan->retrans_timer,
252 msecs_to_jiffies(chan->retrans_timeout));
256 static void __set_monitor_timer(struct l2cap_chan *chan)
258 __clear_retrans_timer(chan);
259 if (chan->monitor_timeout) {
260 l2cap_set_timer(chan, &chan->monitor_timer,
261 msecs_to_jiffies(chan->monitor_timeout));
265 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
266 u16 seq)
268 struct sk_buff *skb;
270 skb_queue_walk(head, skb) {
271 if (bt_cb(skb)->control.txseq == seq)
272 return skb;
275 return NULL;
278 /* ---- L2CAP sequence number lists ---- */
280 /* For ERTM, ordered lists of sequence numbers must be tracked for
281 * SREJ requests that are received and for frames that are to be
282 * retransmitted. These seq_list functions implement a singly-linked
283 * list in an array, where membership in the list can also be checked
284 * in constant time. Items can also be added to the tail of the list
285 * and removed from the head in constant time, without further memory
286 * allocs or frees.
289 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
291 size_t alloc_size, i;
293 /* Allocated size is a power of 2 to map sequence numbers
294 * (which may be up to 14 bits) in to a smaller array that is
295 * sized for the negotiated ERTM transmit windows.
297 alloc_size = roundup_pow_of_two(size);
299 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
300 if (!seq_list->list)
301 return -ENOMEM;
303 seq_list->mask = alloc_size - 1;
304 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
305 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 for (i = 0; i < alloc_size; i++)
307 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
309 return 0;
312 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
314 kfree(seq_list->list);
317 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
318 u16 seq)
320 /* Constant-time check for list membership */
321 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
324 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
326 u16 mask = seq_list->mask;
328 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
329 /* In case someone tries to pop the head of an empty list */
330 return L2CAP_SEQ_LIST_CLEAR;
331 } else if (seq_list->head == seq) {
332 /* Head can be removed in constant time */
333 seq_list->head = seq_list->list[seq & mask];
334 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
336 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
337 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
340 } else {
341 /* Walk the list to find the sequence number */
342 u16 prev = seq_list->head;
343 while (seq_list->list[prev & mask] != seq) {
344 prev = seq_list->list[prev & mask];
345 if (prev == L2CAP_SEQ_LIST_TAIL)
346 return L2CAP_SEQ_LIST_CLEAR;
349 /* Unlink the number from the list and clear it */
350 seq_list->list[prev & mask] = seq_list->list[seq & mask];
351 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
352 if (seq_list->tail == seq)
353 seq_list->tail = prev;
355 return seq;
358 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
360 /* Remove the head in constant time */
361 return l2cap_seq_list_remove(seq_list, seq_list->head);
364 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
366 u16 i;
368 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
369 return;
371 for (i = 0; i <= seq_list->mask; i++)
372 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
374 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
378 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
380 u16 mask = seq_list->mask;
382 /* All appends happen in constant time */
384 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
385 return;
387 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 seq_list->head = seq;
389 else
390 seq_list->list[seq_list->tail & mask] = seq;
392 seq_list->tail = seq;
393 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
396 static void l2cap_chan_timeout(struct work_struct *work)
398 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
399 chan_timer.work);
400 struct l2cap_conn *conn = chan->conn;
401 int reason;
403 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
405 mutex_lock(&conn->chan_lock);
406 l2cap_chan_lock(chan);
408 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
409 reason = ECONNREFUSED;
410 else if (chan->state == BT_CONNECT &&
411 chan->sec_level != BT_SECURITY_SDP)
412 reason = ECONNREFUSED;
413 else
414 reason = ETIMEDOUT;
416 l2cap_chan_close(chan, reason);
418 l2cap_chan_unlock(chan);
420 chan->ops->close(chan);
421 mutex_unlock(&conn->chan_lock);
423 l2cap_chan_put(chan);
426 struct l2cap_chan *l2cap_chan_create(void)
428 struct l2cap_chan *chan;
430 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
431 if (!chan)
432 return NULL;
434 mutex_init(&chan->lock);
436 write_lock(&chan_list_lock);
437 list_add(&chan->global_l, &chan_list);
438 write_unlock(&chan_list_lock);
440 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
442 chan->state = BT_OPEN;
444 kref_init(&chan->kref);
446 /* This flag is cleared in l2cap_chan_ready() */
447 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
449 BT_DBG("chan %p", chan);
451 return chan;
454 static void l2cap_chan_destroy(struct kref *kref)
456 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
458 BT_DBG("chan %p", chan);
460 write_lock(&chan_list_lock);
461 list_del(&chan->global_l);
462 write_unlock(&chan_list_lock);
464 kfree(chan);
467 void l2cap_chan_hold(struct l2cap_chan *c)
469 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
471 kref_get(&c->kref);
474 void l2cap_chan_put(struct l2cap_chan *c)
476 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
478 kref_put(&c->kref, l2cap_chan_destroy);
481 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
483 chan->fcs = L2CAP_FCS_CRC16;
484 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
485 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
486 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
487 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
488 chan->sec_level = BT_SECURITY_LOW;
490 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
493 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
495 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
496 __le16_to_cpu(chan->psm), chan->dcid);
498 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
500 chan->conn = conn;
502 switch (chan->chan_type) {
503 case L2CAP_CHAN_CONN_ORIENTED:
504 if (conn->hcon->type == LE_LINK) {
505 /* LE connection */
506 chan->omtu = L2CAP_DEFAULT_MTU;
507 chan->scid = L2CAP_CID_LE_DATA;
508 chan->dcid = L2CAP_CID_LE_DATA;
509 } else {
510 /* Alloc CID for connection-oriented socket */
511 chan->scid = l2cap_alloc_cid(conn);
512 chan->omtu = L2CAP_DEFAULT_MTU;
514 break;
516 case L2CAP_CHAN_CONN_LESS:
517 /* Connectionless socket */
518 chan->scid = L2CAP_CID_CONN_LESS;
519 chan->dcid = L2CAP_CID_CONN_LESS;
520 chan->omtu = L2CAP_DEFAULT_MTU;
521 break;
523 case L2CAP_CHAN_CONN_FIX_A2MP:
524 chan->scid = L2CAP_CID_A2MP;
525 chan->dcid = L2CAP_CID_A2MP;
526 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
527 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
528 break;
530 default:
531 /* Raw socket can send/recv signalling messages only */
532 chan->scid = L2CAP_CID_SIGNALING;
533 chan->dcid = L2CAP_CID_SIGNALING;
534 chan->omtu = L2CAP_DEFAULT_MTU;
537 chan->local_id = L2CAP_BESTEFFORT_ID;
538 chan->local_stype = L2CAP_SERV_BESTEFFORT;
539 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
540 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
541 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
542 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
544 l2cap_chan_hold(chan);
546 list_add(&chan->list, &conn->chan_l);
549 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
551 mutex_lock(&conn->chan_lock);
552 __l2cap_chan_add(conn, chan);
553 mutex_unlock(&conn->chan_lock);
556 void l2cap_chan_del(struct l2cap_chan *chan, int err)
558 struct l2cap_conn *conn = chan->conn;
560 __clear_chan_timer(chan);
562 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
564 if (conn) {
565 struct amp_mgr *mgr = conn->hcon->amp_mgr;
566 /* Delete from channel list */
567 list_del(&chan->list);
569 l2cap_chan_put(chan);
571 chan->conn = NULL;
573 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
574 hci_conn_put(conn->hcon);
576 if (mgr && mgr->bredr_chan == chan)
577 mgr->bredr_chan = NULL;
580 if (chan->hs_hchan) {
581 struct hci_chan *hs_hchan = chan->hs_hchan;
583 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
584 amp_disconnect_logical_link(hs_hchan);
587 chan->ops->teardown(chan, err);
589 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
590 return;
592 switch(chan->mode) {
593 case L2CAP_MODE_BASIC:
594 break;
596 case L2CAP_MODE_ERTM:
597 __clear_retrans_timer(chan);
598 __clear_monitor_timer(chan);
599 __clear_ack_timer(chan);
601 skb_queue_purge(&chan->srej_q);
603 l2cap_seq_list_free(&chan->srej_list);
604 l2cap_seq_list_free(&chan->retrans_list);
606 /* fall through */
608 case L2CAP_MODE_STREAMING:
609 skb_queue_purge(&chan->tx_q);
610 break;
613 return;
616 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
618 struct l2cap_conn *conn = chan->conn;
619 struct sock *sk = chan->sk;
621 BT_DBG("chan %p state %s sk %p", chan, state_to_string(chan->state),
622 sk);
624 switch (chan->state) {
625 case BT_LISTEN:
626 chan->ops->teardown(chan, 0);
627 break;
629 case BT_CONNECTED:
630 case BT_CONFIG:
631 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
632 conn->hcon->type == ACL_LINK) {
633 __set_chan_timer(chan, sk->sk_sndtimeo);
634 l2cap_send_disconn_req(chan, reason);
635 } else
636 l2cap_chan_del(chan, reason);
637 break;
639 case BT_CONNECT2:
640 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
641 conn->hcon->type == ACL_LINK) {
642 struct l2cap_conn_rsp rsp;
643 __u16 result;
645 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
646 result = L2CAP_CR_SEC_BLOCK;
647 else
648 result = L2CAP_CR_BAD_PSM;
649 l2cap_state_change(chan, BT_DISCONN);
651 rsp.scid = cpu_to_le16(chan->dcid);
652 rsp.dcid = cpu_to_le16(chan->scid);
653 rsp.result = cpu_to_le16(result);
654 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
655 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
656 sizeof(rsp), &rsp);
659 l2cap_chan_del(chan, reason);
660 break;
662 case BT_CONNECT:
663 case BT_DISCONN:
664 l2cap_chan_del(chan, reason);
665 break;
667 default:
668 chan->ops->teardown(chan, 0);
669 break;
673 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
675 if (chan->chan_type == L2CAP_CHAN_RAW) {
676 switch (chan->sec_level) {
677 case BT_SECURITY_HIGH:
678 return HCI_AT_DEDICATED_BONDING_MITM;
679 case BT_SECURITY_MEDIUM:
680 return HCI_AT_DEDICATED_BONDING;
681 default:
682 return HCI_AT_NO_BONDING;
684 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
685 if (chan->sec_level == BT_SECURITY_LOW)
686 chan->sec_level = BT_SECURITY_SDP;
688 if (chan->sec_level == BT_SECURITY_HIGH)
689 return HCI_AT_NO_BONDING_MITM;
690 else
691 return HCI_AT_NO_BONDING;
692 } else {
693 switch (chan->sec_level) {
694 case BT_SECURITY_HIGH:
695 return HCI_AT_GENERAL_BONDING_MITM;
696 case BT_SECURITY_MEDIUM:
697 return HCI_AT_GENERAL_BONDING;
698 default:
699 return HCI_AT_NO_BONDING;
704 /* Service level security */
705 int l2cap_chan_check_security(struct l2cap_chan *chan)
707 struct l2cap_conn *conn = chan->conn;
708 __u8 auth_type;
710 auth_type = l2cap_get_auth_type(chan);
712 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
715 static u8 l2cap_get_ident(struct l2cap_conn *conn)
717 u8 id;
719 /* Get next available identificator.
720 * 1 - 128 are used by kernel.
721 * 129 - 199 are reserved.
722 * 200 - 254 are used by utilities like l2ping, etc.
725 spin_lock(&conn->lock);
727 if (++conn->tx_ident > 128)
728 conn->tx_ident = 1;
730 id = conn->tx_ident;
732 spin_unlock(&conn->lock);
734 return id;
737 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
738 void *data)
740 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
741 u8 flags;
743 BT_DBG("code 0x%2.2x", code);
745 if (!skb)
746 return;
748 if (lmp_no_flush_capable(conn->hcon->hdev))
749 flags = ACL_START_NO_FLUSH;
750 else
751 flags = ACL_START;
753 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
754 skb->priority = HCI_PRIO_MAX;
756 hci_send_acl(conn->hchan, skb, flags);
759 static bool __chan_is_moving(struct l2cap_chan *chan)
761 return chan->move_state != L2CAP_MOVE_STABLE &&
762 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
765 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
767 struct hci_conn *hcon = chan->conn->hcon;
768 u16 flags;
770 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
771 skb->priority);
773 if (chan->hs_hcon && !__chan_is_moving(chan)) {
774 if (chan->hs_hchan)
775 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
776 else
777 kfree_skb(skb);
779 return;
782 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
783 lmp_no_flush_capable(hcon->hdev))
784 flags = ACL_START_NO_FLUSH;
785 else
786 flags = ACL_START;
788 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
789 hci_send_acl(chan->conn->hchan, skb, flags);
792 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
794 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
795 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
797 if (enh & L2CAP_CTRL_FRAME_TYPE) {
798 /* S-Frame */
799 control->sframe = 1;
800 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
801 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
803 control->sar = 0;
804 control->txseq = 0;
805 } else {
806 /* I-Frame */
807 control->sframe = 0;
808 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
809 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
811 control->poll = 0;
812 control->super = 0;
816 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
818 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
819 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
821 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
822 /* S-Frame */
823 control->sframe = 1;
824 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
825 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
827 control->sar = 0;
828 control->txseq = 0;
829 } else {
830 /* I-Frame */
831 control->sframe = 0;
832 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
833 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
835 control->poll = 0;
836 control->super = 0;
840 static inline void __unpack_control(struct l2cap_chan *chan,
841 struct sk_buff *skb)
843 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
844 __unpack_extended_control(get_unaligned_le32(skb->data),
845 &bt_cb(skb)->control);
846 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
847 } else {
848 __unpack_enhanced_control(get_unaligned_le16(skb->data),
849 &bt_cb(skb)->control);
850 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
854 static u32 __pack_extended_control(struct l2cap_ctrl *control)
856 u32 packed;
858 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
859 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
861 if (control->sframe) {
862 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
863 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
864 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
865 } else {
866 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
867 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
870 return packed;
873 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
875 u16 packed;
877 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
878 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
880 if (control->sframe) {
881 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
882 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
883 packed |= L2CAP_CTRL_FRAME_TYPE;
884 } else {
885 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
886 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
889 return packed;
892 static inline void __pack_control(struct l2cap_chan *chan,
893 struct l2cap_ctrl *control,
894 struct sk_buff *skb)
896 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
897 put_unaligned_le32(__pack_extended_control(control),
898 skb->data + L2CAP_HDR_SIZE);
899 } else {
900 put_unaligned_le16(__pack_enhanced_control(control),
901 skb->data + L2CAP_HDR_SIZE);
905 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
907 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
908 return L2CAP_EXT_HDR_SIZE;
909 else
910 return L2CAP_ENH_HDR_SIZE;
913 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
914 u32 control)
916 struct sk_buff *skb;
917 struct l2cap_hdr *lh;
918 int hlen = __ertm_hdr_size(chan);
920 if (chan->fcs == L2CAP_FCS_CRC16)
921 hlen += L2CAP_FCS_SIZE;
923 skb = bt_skb_alloc(hlen, GFP_KERNEL);
925 if (!skb)
926 return ERR_PTR(-ENOMEM);
928 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
929 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
930 lh->cid = cpu_to_le16(chan->dcid);
932 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
933 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
934 else
935 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
937 if (chan->fcs == L2CAP_FCS_CRC16) {
938 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
939 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
942 skb->priority = HCI_PRIO_MAX;
943 return skb;
946 static void l2cap_send_sframe(struct l2cap_chan *chan,
947 struct l2cap_ctrl *control)
949 struct sk_buff *skb;
950 u32 control_field;
952 BT_DBG("chan %p, control %p", chan, control);
954 if (!control->sframe)
955 return;
957 if (__chan_is_moving(chan))
958 return;
960 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
961 !control->poll)
962 control->final = 1;
964 if (control->super == L2CAP_SUPER_RR)
965 clear_bit(CONN_RNR_SENT, &chan->conn_state);
966 else if (control->super == L2CAP_SUPER_RNR)
967 set_bit(CONN_RNR_SENT, &chan->conn_state);
969 if (control->super != L2CAP_SUPER_SREJ) {
970 chan->last_acked_seq = control->reqseq;
971 __clear_ack_timer(chan);
974 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
975 control->final, control->poll, control->super);
977 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
978 control_field = __pack_extended_control(control);
979 else
980 control_field = __pack_enhanced_control(control);
982 skb = l2cap_create_sframe_pdu(chan, control_field);
983 if (!IS_ERR(skb))
984 l2cap_do_send(chan, skb);
987 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
989 struct l2cap_ctrl control;
991 BT_DBG("chan %p, poll %d", chan, poll);
993 memset(&control, 0, sizeof(control));
994 control.sframe = 1;
995 control.poll = poll;
997 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
998 control.super = L2CAP_SUPER_RNR;
999 else
1000 control.super = L2CAP_SUPER_RR;
1002 control.reqseq = chan->buffer_seq;
1003 l2cap_send_sframe(chan, &control);
1006 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1008 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1011 static bool __amp_capable(struct l2cap_chan *chan)
1013 struct l2cap_conn *conn = chan->conn;
1015 if (enable_hs &&
1016 hci_amp_capable() &&
1017 chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED &&
1018 conn->fixed_chan_mask & L2CAP_FC_A2MP)
1019 return true;
1020 else
1021 return false;
1024 static bool l2cap_check_efs(struct l2cap_chan *chan)
1026 /* Check EFS parameters */
1027 return true;
1030 void l2cap_send_conn_req(struct l2cap_chan *chan)
1032 struct l2cap_conn *conn = chan->conn;
1033 struct l2cap_conn_req req;
1035 req.scid = cpu_to_le16(chan->scid);
1036 req.psm = chan->psm;
1038 chan->ident = l2cap_get_ident(conn);
1040 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1042 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1045 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1047 struct l2cap_create_chan_req req;
1048 req.scid = cpu_to_le16(chan->scid);
1049 req.psm = chan->psm;
1050 req.amp_id = amp_id;
1052 chan->ident = l2cap_get_ident(chan->conn);
1054 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1055 sizeof(req), &req);
1058 static void l2cap_move_setup(struct l2cap_chan *chan)
1060 struct sk_buff *skb;
1062 BT_DBG("chan %p", chan);
1064 if (chan->mode != L2CAP_MODE_ERTM)
1065 return;
1067 __clear_retrans_timer(chan);
1068 __clear_monitor_timer(chan);
1069 __clear_ack_timer(chan);
1071 chan->retry_count = 0;
1072 skb_queue_walk(&chan->tx_q, skb) {
1073 if (bt_cb(skb)->control.retries)
1074 bt_cb(skb)->control.retries = 1;
1075 else
1076 break;
1079 chan->expected_tx_seq = chan->buffer_seq;
1081 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1082 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1083 l2cap_seq_list_clear(&chan->retrans_list);
1084 l2cap_seq_list_clear(&chan->srej_list);
1085 skb_queue_purge(&chan->srej_q);
1087 chan->tx_state = L2CAP_TX_STATE_XMIT;
1088 chan->rx_state = L2CAP_RX_STATE_MOVE;
1090 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1093 static void l2cap_move_done(struct l2cap_chan *chan)
1095 u8 move_role = chan->move_role;
1096 BT_DBG("chan %p", chan);
1098 chan->move_state = L2CAP_MOVE_STABLE;
1099 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1101 if (chan->mode != L2CAP_MODE_ERTM)
1102 return;
1104 switch (move_role) {
1105 case L2CAP_MOVE_ROLE_INITIATOR:
1106 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1107 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1108 break;
1109 case L2CAP_MOVE_ROLE_RESPONDER:
1110 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1111 break;
1115 static void l2cap_chan_ready(struct l2cap_chan *chan)
1117 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1118 chan->conf_state = 0;
1119 __clear_chan_timer(chan);
1121 chan->state = BT_CONNECTED;
1123 chan->ops->ready(chan);
1126 static void l2cap_start_connection(struct l2cap_chan *chan)
1128 if (__amp_capable(chan)) {
1129 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1130 a2mp_discover_amp(chan);
1131 } else {
1132 l2cap_send_conn_req(chan);
1136 static void l2cap_do_start(struct l2cap_chan *chan)
1138 struct l2cap_conn *conn = chan->conn;
1140 if (conn->hcon->type == LE_LINK) {
1141 l2cap_chan_ready(chan);
1142 return;
1145 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1146 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1147 return;
1149 if (l2cap_chan_check_security(chan) &&
1150 __l2cap_no_conn_pending(chan)) {
1151 l2cap_start_connection(chan);
1153 } else {
1154 struct l2cap_info_req req;
1155 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1157 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1158 conn->info_ident = l2cap_get_ident(conn);
1160 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1162 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1163 sizeof(req), &req);
1167 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1169 u32 local_feat_mask = l2cap_feat_mask;
1170 if (!disable_ertm)
1171 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1173 switch (mode) {
1174 case L2CAP_MODE_ERTM:
1175 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1176 case L2CAP_MODE_STREAMING:
1177 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1178 default:
1179 return 0x00;
1183 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1185 struct sock *sk = chan->sk;
1186 struct l2cap_conn *conn = chan->conn;
1187 struct l2cap_disconn_req req;
1189 if (!conn)
1190 return;
1192 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1193 __clear_retrans_timer(chan);
1194 __clear_monitor_timer(chan);
1195 __clear_ack_timer(chan);
1198 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1199 l2cap_state_change(chan, BT_DISCONN);
1200 return;
1203 req.dcid = cpu_to_le16(chan->dcid);
1204 req.scid = cpu_to_le16(chan->scid);
1205 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1206 sizeof(req), &req);
1208 lock_sock(sk);
1209 __l2cap_state_change(chan, BT_DISCONN);
1210 __l2cap_chan_set_err(chan, err);
1211 release_sock(sk);
1214 /* ---- L2CAP connections ---- */
1215 static void l2cap_conn_start(struct l2cap_conn *conn)
1217 struct l2cap_chan *chan, *tmp;
1219 BT_DBG("conn %p", conn);
1221 mutex_lock(&conn->chan_lock);
1223 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1224 struct sock *sk = chan->sk;
1226 l2cap_chan_lock(chan);
1228 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1229 l2cap_chan_unlock(chan);
1230 continue;
1233 if (chan->state == BT_CONNECT) {
1234 if (!l2cap_chan_check_security(chan) ||
1235 !__l2cap_no_conn_pending(chan)) {
1236 l2cap_chan_unlock(chan);
1237 continue;
1240 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1241 && test_bit(CONF_STATE2_DEVICE,
1242 &chan->conf_state)) {
1243 l2cap_chan_close(chan, ECONNRESET);
1244 l2cap_chan_unlock(chan);
1245 continue;
1248 l2cap_start_connection(chan);
1250 } else if (chan->state == BT_CONNECT2) {
1251 struct l2cap_conn_rsp rsp;
1252 char buf[128];
1253 rsp.scid = cpu_to_le16(chan->dcid);
1254 rsp.dcid = cpu_to_le16(chan->scid);
1256 if (l2cap_chan_check_security(chan)) {
1257 lock_sock(sk);
1258 if (test_bit(BT_SK_DEFER_SETUP,
1259 &bt_sk(sk)->flags)) {
1260 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1261 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1262 chan->ops->defer(chan);
1264 } else {
1265 __l2cap_state_change(chan, BT_CONFIG);
1266 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1267 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1269 release_sock(sk);
1270 } else {
1271 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1272 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1275 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1276 sizeof(rsp), &rsp);
1278 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1279 rsp.result != L2CAP_CR_SUCCESS) {
1280 l2cap_chan_unlock(chan);
1281 continue;
1284 set_bit(CONF_REQ_SENT, &chan->conf_state);
1285 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1286 l2cap_build_conf_req(chan, buf), buf);
1287 chan->num_conf_req++;
1290 l2cap_chan_unlock(chan);
1293 mutex_unlock(&conn->chan_lock);
1296 /* Find socket with cid and source/destination bdaddr.
1297 * Returns closest match, locked.
1299 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1300 bdaddr_t *src,
1301 bdaddr_t *dst)
1303 struct l2cap_chan *c, *c1 = NULL;
1305 read_lock(&chan_list_lock);
1307 list_for_each_entry(c, &chan_list, global_l) {
1308 struct sock *sk = c->sk;
1310 if (state && c->state != state)
1311 continue;
1313 if (c->scid == cid) {
1314 int src_match, dst_match;
1315 int src_any, dst_any;
1317 /* Exact match. */
1318 src_match = !bacmp(&bt_sk(sk)->src, src);
1319 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1320 if (src_match && dst_match) {
1321 read_unlock(&chan_list_lock);
1322 return c;
1325 /* Closest match */
1326 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1327 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1328 if ((src_match && dst_any) || (src_any && dst_match) ||
1329 (src_any && dst_any))
1330 c1 = c;
1334 read_unlock(&chan_list_lock);
1336 return c1;
1339 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1341 struct sock *parent, *sk;
1342 struct l2cap_chan *chan, *pchan;
1344 BT_DBG("");
1346 /* Check if we have socket listening on cid */
1347 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1348 conn->src, conn->dst);
1349 if (!pchan)
1350 return;
1352 parent = pchan->sk;
1354 lock_sock(parent);
1356 chan = pchan->ops->new_connection(pchan);
1357 if (!chan)
1358 goto clean;
1360 sk = chan->sk;
1362 hci_conn_hold(conn->hcon);
1363 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
1365 bacpy(&bt_sk(sk)->src, conn->src);
1366 bacpy(&bt_sk(sk)->dst, conn->dst);
1368 l2cap_chan_add(conn, chan);
1370 l2cap_chan_ready(chan);
1372 clean:
1373 release_sock(parent);
1376 static void l2cap_conn_ready(struct l2cap_conn *conn)
1378 struct l2cap_chan *chan;
1379 struct hci_conn *hcon = conn->hcon;
1381 BT_DBG("conn %p", conn);
1383 if (!hcon->out && hcon->type == LE_LINK)
1384 l2cap_le_conn_ready(conn);
1386 if (hcon->out && hcon->type == LE_LINK)
1387 smp_conn_security(hcon, hcon->pending_sec_level);
1389 mutex_lock(&conn->chan_lock);
1391 list_for_each_entry(chan, &conn->chan_l, list) {
1393 l2cap_chan_lock(chan);
1395 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1396 l2cap_chan_unlock(chan);
1397 continue;
1400 if (hcon->type == LE_LINK) {
1401 if (smp_conn_security(hcon, chan->sec_level))
1402 l2cap_chan_ready(chan);
1404 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1405 struct sock *sk = chan->sk;
1406 __clear_chan_timer(chan);
1407 lock_sock(sk);
1408 __l2cap_state_change(chan, BT_CONNECTED);
1409 sk->sk_state_change(sk);
1410 release_sock(sk);
1412 } else if (chan->state == BT_CONNECT)
1413 l2cap_do_start(chan);
1415 l2cap_chan_unlock(chan);
1418 mutex_unlock(&conn->chan_lock);
1421 /* Notify sockets that we cannot guaranty reliability anymore */
1422 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1424 struct l2cap_chan *chan;
1426 BT_DBG("conn %p", conn);
1428 mutex_lock(&conn->chan_lock);
1430 list_for_each_entry(chan, &conn->chan_l, list) {
1431 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1432 l2cap_chan_set_err(chan, err);
1435 mutex_unlock(&conn->chan_lock);
1438 static void l2cap_info_timeout(struct work_struct *work)
1440 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1441 info_timer.work);
1443 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1444 conn->info_ident = 0;
1446 l2cap_conn_start(conn);
1449 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1451 struct l2cap_conn *conn = hcon->l2cap_data;
1452 struct l2cap_chan *chan, *l;
1454 if (!conn)
1455 return;
1457 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1459 kfree_skb(conn->rx_skb);
1461 mutex_lock(&conn->chan_lock);
1463 /* Kill channels */
1464 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1465 l2cap_chan_hold(chan);
1466 l2cap_chan_lock(chan);
1468 l2cap_chan_del(chan, err);
1470 l2cap_chan_unlock(chan);
1472 chan->ops->close(chan);
1473 l2cap_chan_put(chan);
1476 mutex_unlock(&conn->chan_lock);
1478 hci_chan_del(conn->hchan);
1480 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1481 cancel_delayed_work_sync(&conn->info_timer);
1483 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1484 cancel_delayed_work_sync(&conn->security_timer);
1485 smp_chan_destroy(conn);
1488 hcon->l2cap_data = NULL;
1489 kfree(conn);
1492 static void security_timeout(struct work_struct *work)
1494 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1495 security_timer.work);
1497 BT_DBG("conn %p", conn);
1499 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1500 smp_chan_destroy(conn);
1501 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1505 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1507 struct l2cap_conn *conn = hcon->l2cap_data;
1508 struct hci_chan *hchan;
1510 if (conn || status)
1511 return conn;
1513 hchan = hci_chan_create(hcon);
1514 if (!hchan)
1515 return NULL;
1517 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1518 if (!conn) {
1519 hci_chan_del(hchan);
1520 return NULL;
1523 hcon->l2cap_data = conn;
1524 conn->hcon = hcon;
1525 conn->hchan = hchan;
1527 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1529 switch (hcon->type) {
1530 case LE_LINK:
1531 if (hcon->hdev->le_mtu) {
1532 conn->mtu = hcon->hdev->le_mtu;
1533 break;
1535 /* fall through */
1536 default:
1537 conn->mtu = hcon->hdev->acl_mtu;
1538 break;
1541 conn->src = &hcon->hdev->bdaddr;
1542 conn->dst = &hcon->dst;
1544 conn->feat_mask = 0;
1546 spin_lock_init(&conn->lock);
1547 mutex_init(&conn->chan_lock);
1549 INIT_LIST_HEAD(&conn->chan_l);
1551 if (hcon->type == LE_LINK)
1552 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1553 else
1554 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1556 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1558 return conn;
1561 /* ---- Socket interface ---- */
1563 /* Find socket with psm and source / destination bdaddr.
1564 * Returns closest match.
1566 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1567 bdaddr_t *src,
1568 bdaddr_t *dst)
1570 struct l2cap_chan *c, *c1 = NULL;
1572 read_lock(&chan_list_lock);
1574 list_for_each_entry(c, &chan_list, global_l) {
1575 struct sock *sk = c->sk;
1577 if (state && c->state != state)
1578 continue;
1580 if (c->psm == psm) {
1581 int src_match, dst_match;
1582 int src_any, dst_any;
1584 /* Exact match. */
1585 src_match = !bacmp(&bt_sk(sk)->src, src);
1586 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1587 if (src_match && dst_match) {
1588 read_unlock(&chan_list_lock);
1589 return c;
1592 /* Closest match */
1593 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1594 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1595 if ((src_match && dst_any) || (src_any && dst_match) ||
1596 (src_any && dst_any))
1597 c1 = c;
1601 read_unlock(&chan_list_lock);
1603 return c1;
1606 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1607 bdaddr_t *dst, u8 dst_type)
1609 struct sock *sk = chan->sk;
1610 bdaddr_t *src = &bt_sk(sk)->src;
1611 struct l2cap_conn *conn;
1612 struct hci_conn *hcon;
1613 struct hci_dev *hdev;
1614 __u8 auth_type;
1615 int err;
1617 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src, dst,
1618 dst_type, __le16_to_cpu(psm));
1620 hdev = hci_get_route(dst, src);
1621 if (!hdev)
1622 return -EHOSTUNREACH;
1624 hci_dev_lock(hdev);
1626 l2cap_chan_lock(chan);
1628 /* PSM must be odd and lsb of upper byte must be 0 */
1629 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1630 chan->chan_type != L2CAP_CHAN_RAW) {
1631 err = -EINVAL;
1632 goto done;
1635 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1636 err = -EINVAL;
1637 goto done;
1640 switch (chan->mode) {
1641 case L2CAP_MODE_BASIC:
1642 break;
1643 case L2CAP_MODE_ERTM:
1644 case L2CAP_MODE_STREAMING:
1645 if (!disable_ertm)
1646 break;
1647 /* fall through */
1648 default:
1649 err = -ENOTSUPP;
1650 goto done;
1653 switch (chan->state) {
1654 case BT_CONNECT:
1655 case BT_CONNECT2:
1656 case BT_CONFIG:
1657 /* Already connecting */
1658 err = 0;
1659 goto done;
1661 case BT_CONNECTED:
1662 /* Already connected */
1663 err = -EISCONN;
1664 goto done;
1666 case BT_OPEN:
1667 case BT_BOUND:
1668 /* Can connect */
1669 break;
1671 default:
1672 err = -EBADFD;
1673 goto done;
1676 /* Set destination address and psm */
1677 lock_sock(sk);
1678 bacpy(&bt_sk(sk)->dst, dst);
1679 release_sock(sk);
1681 chan->psm = psm;
1682 chan->dcid = cid;
1684 auth_type = l2cap_get_auth_type(chan);
1686 if (chan->dcid == L2CAP_CID_LE_DATA)
1687 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1688 chan->sec_level, auth_type);
1689 else
1690 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1691 chan->sec_level, auth_type);
1693 if (IS_ERR(hcon)) {
1694 err = PTR_ERR(hcon);
1695 goto done;
1698 conn = l2cap_conn_add(hcon, 0);
1699 if (!conn) {
1700 hci_conn_put(hcon);
1701 err = -ENOMEM;
1702 goto done;
1705 if (hcon->type == LE_LINK) {
1706 err = 0;
1708 if (!list_empty(&conn->chan_l)) {
1709 err = -EBUSY;
1710 hci_conn_put(hcon);
1713 if (err)
1714 goto done;
1717 /* Update source addr of the socket */
1718 bacpy(src, conn->src);
1720 l2cap_chan_unlock(chan);
1721 l2cap_chan_add(conn, chan);
1722 l2cap_chan_lock(chan);
1724 l2cap_state_change(chan, BT_CONNECT);
1725 __set_chan_timer(chan, sk->sk_sndtimeo);
1727 if (hcon->state == BT_CONNECTED) {
1728 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1729 __clear_chan_timer(chan);
1730 if (l2cap_chan_check_security(chan))
1731 l2cap_state_change(chan, BT_CONNECTED);
1732 } else
1733 l2cap_do_start(chan);
1736 err = 0;
1738 done:
1739 l2cap_chan_unlock(chan);
1740 hci_dev_unlock(hdev);
1741 hci_dev_put(hdev);
1742 return err;
1745 int __l2cap_wait_ack(struct sock *sk)
1747 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1748 DECLARE_WAITQUEUE(wait, current);
1749 int err = 0;
1750 int timeo = HZ/5;
1752 add_wait_queue(sk_sleep(sk), &wait);
1753 set_current_state(TASK_INTERRUPTIBLE);
1754 while (chan->unacked_frames > 0 && chan->conn) {
1755 if (!timeo)
1756 timeo = HZ/5;
1758 if (signal_pending(current)) {
1759 err = sock_intr_errno(timeo);
1760 break;
1763 release_sock(sk);
1764 timeo = schedule_timeout(timeo);
1765 lock_sock(sk);
1766 set_current_state(TASK_INTERRUPTIBLE);
1768 err = sock_error(sk);
1769 if (err)
1770 break;
1772 set_current_state(TASK_RUNNING);
1773 remove_wait_queue(sk_sleep(sk), &wait);
1774 return err;
1777 static void l2cap_monitor_timeout(struct work_struct *work)
1779 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1780 monitor_timer.work);
1782 BT_DBG("chan %p", chan);
1784 l2cap_chan_lock(chan);
1786 if (!chan->conn) {
1787 l2cap_chan_unlock(chan);
1788 l2cap_chan_put(chan);
1789 return;
1792 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1794 l2cap_chan_unlock(chan);
1795 l2cap_chan_put(chan);
1798 static void l2cap_retrans_timeout(struct work_struct *work)
1800 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1801 retrans_timer.work);
1803 BT_DBG("chan %p", chan);
1805 l2cap_chan_lock(chan);
1807 if (!chan->conn) {
1808 l2cap_chan_unlock(chan);
1809 l2cap_chan_put(chan);
1810 return;
1813 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1814 l2cap_chan_unlock(chan);
1815 l2cap_chan_put(chan);
1818 static void l2cap_streaming_send(struct l2cap_chan *chan,
1819 struct sk_buff_head *skbs)
1821 struct sk_buff *skb;
1822 struct l2cap_ctrl *control;
1824 BT_DBG("chan %p, skbs %p", chan, skbs);
1826 if (__chan_is_moving(chan))
1827 return;
1829 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1831 while (!skb_queue_empty(&chan->tx_q)) {
1833 skb = skb_dequeue(&chan->tx_q);
1835 bt_cb(skb)->control.retries = 1;
1836 control = &bt_cb(skb)->control;
1838 control->reqseq = 0;
1839 control->txseq = chan->next_tx_seq;
1841 __pack_control(chan, control, skb);
1843 if (chan->fcs == L2CAP_FCS_CRC16) {
1844 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1845 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1848 l2cap_do_send(chan, skb);
1850 BT_DBG("Sent txseq %u", control->txseq);
1852 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1853 chan->frames_sent++;
1857 static int l2cap_ertm_send(struct l2cap_chan *chan)
1859 struct sk_buff *skb, *tx_skb;
1860 struct l2cap_ctrl *control;
1861 int sent = 0;
1863 BT_DBG("chan %p", chan);
1865 if (chan->state != BT_CONNECTED)
1866 return -ENOTCONN;
1868 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1869 return 0;
1871 if (__chan_is_moving(chan))
1872 return 0;
1874 while (chan->tx_send_head &&
1875 chan->unacked_frames < chan->remote_tx_win &&
1876 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1878 skb = chan->tx_send_head;
1880 bt_cb(skb)->control.retries = 1;
1881 control = &bt_cb(skb)->control;
1883 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1884 control->final = 1;
1886 control->reqseq = chan->buffer_seq;
1887 chan->last_acked_seq = chan->buffer_seq;
1888 control->txseq = chan->next_tx_seq;
1890 __pack_control(chan, control, skb);
1892 if (chan->fcs == L2CAP_FCS_CRC16) {
1893 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1894 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1897 /* Clone after data has been modified. Data is assumed to be
1898 read-only (for locking purposes) on cloned sk_buffs.
1900 tx_skb = skb_clone(skb, GFP_KERNEL);
1902 if (!tx_skb)
1903 break;
1905 __set_retrans_timer(chan);
1907 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1908 chan->unacked_frames++;
1909 chan->frames_sent++;
1910 sent++;
1912 if (skb_queue_is_last(&chan->tx_q, skb))
1913 chan->tx_send_head = NULL;
1914 else
1915 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1917 l2cap_do_send(chan, tx_skb);
1918 BT_DBG("Sent txseq %u", control->txseq);
1921 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1922 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1924 return sent;
1927 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1929 struct l2cap_ctrl control;
1930 struct sk_buff *skb;
1931 struct sk_buff *tx_skb;
1932 u16 seq;
1934 BT_DBG("chan %p", chan);
1936 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1937 return;
1939 if (__chan_is_moving(chan))
1940 return;
1942 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1943 seq = l2cap_seq_list_pop(&chan->retrans_list);
1945 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1946 if (!skb) {
1947 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1948 seq);
1949 continue;
1952 bt_cb(skb)->control.retries++;
1953 control = bt_cb(skb)->control;
1955 if (chan->max_tx != 0 &&
1956 bt_cb(skb)->control.retries > chan->max_tx) {
1957 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1958 l2cap_send_disconn_req(chan, ECONNRESET);
1959 l2cap_seq_list_clear(&chan->retrans_list);
1960 break;
1963 control.reqseq = chan->buffer_seq;
1964 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1965 control.final = 1;
1966 else
1967 control.final = 0;
1969 if (skb_cloned(skb)) {
1970 /* Cloned sk_buffs are read-only, so we need a
1971 * writeable copy
1973 tx_skb = skb_copy(skb, GFP_KERNEL);
1974 } else {
1975 tx_skb = skb_clone(skb, GFP_KERNEL);
1978 if (!tx_skb) {
1979 l2cap_seq_list_clear(&chan->retrans_list);
1980 break;
1983 /* Update skb contents */
1984 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1985 put_unaligned_le32(__pack_extended_control(&control),
1986 tx_skb->data + L2CAP_HDR_SIZE);
1987 } else {
1988 put_unaligned_le16(__pack_enhanced_control(&control),
1989 tx_skb->data + L2CAP_HDR_SIZE);
1992 if (chan->fcs == L2CAP_FCS_CRC16) {
1993 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1994 put_unaligned_le16(fcs, skb_put(tx_skb,
1995 L2CAP_FCS_SIZE));
1998 l2cap_do_send(chan, tx_skb);
2000 BT_DBG("Resent txseq %d", control.txseq);
2002 chan->last_acked_seq = chan->buffer_seq;
2006 static void l2cap_retransmit(struct l2cap_chan *chan,
2007 struct l2cap_ctrl *control)
2009 BT_DBG("chan %p, control %p", chan, control);
2011 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2012 l2cap_ertm_resend(chan);
2015 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2016 struct l2cap_ctrl *control)
2018 struct sk_buff *skb;
2020 BT_DBG("chan %p, control %p", chan, control);
2022 if (control->poll)
2023 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2025 l2cap_seq_list_clear(&chan->retrans_list);
2027 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2028 return;
2030 if (chan->unacked_frames) {
2031 skb_queue_walk(&chan->tx_q, skb) {
2032 if (bt_cb(skb)->control.txseq == control->reqseq ||
2033 skb == chan->tx_send_head)
2034 break;
2037 skb_queue_walk_from(&chan->tx_q, skb) {
2038 if (skb == chan->tx_send_head)
2039 break;
2041 l2cap_seq_list_append(&chan->retrans_list,
2042 bt_cb(skb)->control.txseq);
2045 l2cap_ertm_resend(chan);
2049 static void l2cap_send_ack(struct l2cap_chan *chan)
2051 struct l2cap_ctrl control;
2052 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2053 chan->last_acked_seq);
2054 int threshold;
2056 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2057 chan, chan->last_acked_seq, chan->buffer_seq);
2059 memset(&control, 0, sizeof(control));
2060 control.sframe = 1;
2062 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2063 chan->rx_state == L2CAP_RX_STATE_RECV) {
2064 __clear_ack_timer(chan);
2065 control.super = L2CAP_SUPER_RNR;
2066 control.reqseq = chan->buffer_seq;
2067 l2cap_send_sframe(chan, &control);
2068 } else {
2069 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2070 l2cap_ertm_send(chan);
2071 /* If any i-frames were sent, they included an ack */
2072 if (chan->buffer_seq == chan->last_acked_seq)
2073 frames_to_ack = 0;
2076 /* Ack now if the window is 3/4ths full.
2077 * Calculate without mul or div
2079 threshold = chan->ack_win;
2080 threshold += threshold << 1;
2081 threshold >>= 2;
2083 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2084 threshold);
2086 if (frames_to_ack >= threshold) {
2087 __clear_ack_timer(chan);
2088 control.super = L2CAP_SUPER_RR;
2089 control.reqseq = chan->buffer_seq;
2090 l2cap_send_sframe(chan, &control);
2091 frames_to_ack = 0;
2094 if (frames_to_ack)
2095 __set_ack_timer(chan);
2099 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2100 struct msghdr *msg, int len,
2101 int count, struct sk_buff *skb)
2103 struct l2cap_conn *conn = chan->conn;
2104 struct sk_buff **frag;
2105 int sent = 0;
2107 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2108 return -EFAULT;
2110 sent += count;
2111 len -= count;
2113 /* Continuation fragments (no L2CAP header) */
2114 frag = &skb_shinfo(skb)->frag_list;
2115 while (len) {
2116 struct sk_buff *tmp;
2118 count = min_t(unsigned int, conn->mtu, len);
2120 tmp = chan->ops->alloc_skb(chan, count,
2121 msg->msg_flags & MSG_DONTWAIT);
2122 if (IS_ERR(tmp))
2123 return PTR_ERR(tmp);
2125 *frag = tmp;
2127 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2128 return -EFAULT;
2130 (*frag)->priority = skb->priority;
2132 sent += count;
2133 len -= count;
2135 skb->len += (*frag)->len;
2136 skb->data_len += (*frag)->len;
2138 frag = &(*frag)->next;
2141 return sent;
2144 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2145 struct msghdr *msg, size_t len,
2146 u32 priority)
2148 struct l2cap_conn *conn = chan->conn;
2149 struct sk_buff *skb;
2150 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2151 struct l2cap_hdr *lh;
2153 BT_DBG("chan %p len %zu priority %u", chan, len, priority);
2155 count = min_t(unsigned int, (conn->mtu - hlen), len);
2157 skb = chan->ops->alloc_skb(chan, count + hlen,
2158 msg->msg_flags & MSG_DONTWAIT);
2159 if (IS_ERR(skb))
2160 return skb;
2162 skb->priority = priority;
2164 /* Create L2CAP header */
2165 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2166 lh->cid = cpu_to_le16(chan->dcid);
2167 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2168 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
2170 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2171 if (unlikely(err < 0)) {
2172 kfree_skb(skb);
2173 return ERR_PTR(err);
2175 return skb;
2178 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2179 struct msghdr *msg, size_t len,
2180 u32 priority)
2182 struct l2cap_conn *conn = chan->conn;
2183 struct sk_buff *skb;
2184 int err, count;
2185 struct l2cap_hdr *lh;
2187 BT_DBG("chan %p len %zu", chan, len);
2189 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2191 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2192 msg->msg_flags & MSG_DONTWAIT);
2193 if (IS_ERR(skb))
2194 return skb;
2196 skb->priority = priority;
2198 /* Create L2CAP header */
2199 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2200 lh->cid = cpu_to_le16(chan->dcid);
2201 lh->len = cpu_to_le16(len);
2203 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2204 if (unlikely(err < 0)) {
2205 kfree_skb(skb);
2206 return ERR_PTR(err);
2208 return skb;
2211 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2212 struct msghdr *msg, size_t len,
2213 u16 sdulen)
2215 struct l2cap_conn *conn = chan->conn;
2216 struct sk_buff *skb;
2217 int err, count, hlen;
2218 struct l2cap_hdr *lh;
2220 BT_DBG("chan %p len %zu", chan, len);
2222 if (!conn)
2223 return ERR_PTR(-ENOTCONN);
2225 hlen = __ertm_hdr_size(chan);
2227 if (sdulen)
2228 hlen += L2CAP_SDULEN_SIZE;
2230 if (chan->fcs == L2CAP_FCS_CRC16)
2231 hlen += L2CAP_FCS_SIZE;
2233 count = min_t(unsigned int, (conn->mtu - hlen), len);
2235 skb = chan->ops->alloc_skb(chan, count + hlen,
2236 msg->msg_flags & MSG_DONTWAIT);
2237 if (IS_ERR(skb))
2238 return skb;
2240 /* Create L2CAP header */
2241 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2242 lh->cid = cpu_to_le16(chan->dcid);
2243 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2245 /* Control header is populated later */
2246 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2247 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2248 else
2249 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2251 if (sdulen)
2252 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2254 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2255 if (unlikely(err < 0)) {
2256 kfree_skb(skb);
2257 return ERR_PTR(err);
2260 bt_cb(skb)->control.fcs = chan->fcs;
2261 bt_cb(skb)->control.retries = 0;
2262 return skb;
2265 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2266 struct sk_buff_head *seg_queue,
2267 struct msghdr *msg, size_t len)
2269 struct sk_buff *skb;
2270 u16 sdu_len;
2271 size_t pdu_len;
2272 u8 sar;
2274 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2276 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2277 * so fragmented skbs are not used. The HCI layer's handling
2278 * of fragmented skbs is not compatible with ERTM's queueing.
2281 /* PDU size is derived from the HCI MTU */
2282 pdu_len = chan->conn->mtu;
2284 /* Constrain PDU size for BR/EDR connections */
2285 if (!chan->hs_hcon)
2286 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2288 /* Adjust for largest possible L2CAP overhead. */
2289 if (chan->fcs)
2290 pdu_len -= L2CAP_FCS_SIZE;
2292 pdu_len -= __ertm_hdr_size(chan);
2294 /* Remote device may have requested smaller PDUs */
2295 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2297 if (len <= pdu_len) {
2298 sar = L2CAP_SAR_UNSEGMENTED;
2299 sdu_len = 0;
2300 pdu_len = len;
2301 } else {
2302 sar = L2CAP_SAR_START;
2303 sdu_len = len;
2304 pdu_len -= L2CAP_SDULEN_SIZE;
2307 while (len > 0) {
2308 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2310 if (IS_ERR(skb)) {
2311 __skb_queue_purge(seg_queue);
2312 return PTR_ERR(skb);
2315 bt_cb(skb)->control.sar = sar;
2316 __skb_queue_tail(seg_queue, skb);
2318 len -= pdu_len;
2319 if (sdu_len) {
2320 sdu_len = 0;
2321 pdu_len += L2CAP_SDULEN_SIZE;
2324 if (len <= pdu_len) {
2325 sar = L2CAP_SAR_END;
2326 pdu_len = len;
2327 } else {
2328 sar = L2CAP_SAR_CONTINUE;
2332 return 0;
2335 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2336 u32 priority)
2338 struct sk_buff *skb;
2339 int err;
2340 struct sk_buff_head seg_queue;
2342 /* Connectionless channel */
2343 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2344 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2345 if (IS_ERR(skb))
2346 return PTR_ERR(skb);
2348 l2cap_do_send(chan, skb);
2349 return len;
2352 switch (chan->mode) {
2353 case L2CAP_MODE_BASIC:
2354 /* Check outgoing MTU */
2355 if (len > chan->omtu)
2356 return -EMSGSIZE;
2358 /* Create a basic PDU */
2359 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2360 if (IS_ERR(skb))
2361 return PTR_ERR(skb);
2363 l2cap_do_send(chan, skb);
2364 err = len;
2365 break;
2367 case L2CAP_MODE_ERTM:
2368 case L2CAP_MODE_STREAMING:
2369 /* Check outgoing MTU */
2370 if (len > chan->omtu) {
2371 err = -EMSGSIZE;
2372 break;
2375 __skb_queue_head_init(&seg_queue);
2377 /* Do segmentation before calling in to the state machine,
2378 * since it's possible to block while waiting for memory
2379 * allocation.
2381 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2383 /* The channel could have been closed while segmenting,
2384 * check that it is still connected.
2386 if (chan->state != BT_CONNECTED) {
2387 __skb_queue_purge(&seg_queue);
2388 err = -ENOTCONN;
2391 if (err)
2392 break;
2394 if (chan->mode == L2CAP_MODE_ERTM)
2395 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2396 else
2397 l2cap_streaming_send(chan, &seg_queue);
2399 err = len;
2401 /* If the skbs were not queued for sending, they'll still be in
2402 * seg_queue and need to be purged.
2404 __skb_queue_purge(&seg_queue);
2405 break;
2407 default:
2408 BT_DBG("bad state %1.1x", chan->mode);
2409 err = -EBADFD;
2412 return err;
2415 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2417 struct l2cap_ctrl control;
2418 u16 seq;
2420 BT_DBG("chan %p, txseq %u", chan, txseq);
2422 memset(&control, 0, sizeof(control));
2423 control.sframe = 1;
2424 control.super = L2CAP_SUPER_SREJ;
2426 for (seq = chan->expected_tx_seq; seq != txseq;
2427 seq = __next_seq(chan, seq)) {
2428 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2429 control.reqseq = seq;
2430 l2cap_send_sframe(chan, &control);
2431 l2cap_seq_list_append(&chan->srej_list, seq);
2435 chan->expected_tx_seq = __next_seq(chan, txseq);
2438 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2440 struct l2cap_ctrl control;
2442 BT_DBG("chan %p", chan);
2444 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2445 return;
2447 memset(&control, 0, sizeof(control));
2448 control.sframe = 1;
2449 control.super = L2CAP_SUPER_SREJ;
2450 control.reqseq = chan->srej_list.tail;
2451 l2cap_send_sframe(chan, &control);
2454 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2456 struct l2cap_ctrl control;
2457 u16 initial_head;
2458 u16 seq;
2460 BT_DBG("chan %p, txseq %u", chan, txseq);
2462 memset(&control, 0, sizeof(control));
2463 control.sframe = 1;
2464 control.super = L2CAP_SUPER_SREJ;
2466 /* Capture initial list head to allow only one pass through the list. */
2467 initial_head = chan->srej_list.head;
2469 do {
2470 seq = l2cap_seq_list_pop(&chan->srej_list);
2471 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2472 break;
2474 control.reqseq = seq;
2475 l2cap_send_sframe(chan, &control);
2476 l2cap_seq_list_append(&chan->srej_list, seq);
2477 } while (chan->srej_list.head != initial_head);
2480 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2482 struct sk_buff *acked_skb;
2483 u16 ackseq;
2485 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2487 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2488 return;
2490 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2491 chan->expected_ack_seq, chan->unacked_frames);
2493 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2494 ackseq = __next_seq(chan, ackseq)) {
2496 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2497 if (acked_skb) {
2498 skb_unlink(acked_skb, &chan->tx_q);
2499 kfree_skb(acked_skb);
2500 chan->unacked_frames--;
2504 chan->expected_ack_seq = reqseq;
2506 if (chan->unacked_frames == 0)
2507 __clear_retrans_timer(chan);
2509 BT_DBG("unacked_frames %u", chan->unacked_frames);
2512 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2514 BT_DBG("chan %p", chan);
2516 chan->expected_tx_seq = chan->buffer_seq;
2517 l2cap_seq_list_clear(&chan->srej_list);
2518 skb_queue_purge(&chan->srej_q);
2519 chan->rx_state = L2CAP_RX_STATE_RECV;
2522 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2523 struct l2cap_ctrl *control,
2524 struct sk_buff_head *skbs, u8 event)
2526 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2527 event);
2529 switch (event) {
2530 case L2CAP_EV_DATA_REQUEST:
2531 if (chan->tx_send_head == NULL)
2532 chan->tx_send_head = skb_peek(skbs);
2534 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2535 l2cap_ertm_send(chan);
2536 break;
2537 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2538 BT_DBG("Enter LOCAL_BUSY");
2539 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2541 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2542 /* The SREJ_SENT state must be aborted if we are to
2543 * enter the LOCAL_BUSY state.
2545 l2cap_abort_rx_srej_sent(chan);
2548 l2cap_send_ack(chan);
2550 break;
2551 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2552 BT_DBG("Exit LOCAL_BUSY");
2553 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2555 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2556 struct l2cap_ctrl local_control;
2558 memset(&local_control, 0, sizeof(local_control));
2559 local_control.sframe = 1;
2560 local_control.super = L2CAP_SUPER_RR;
2561 local_control.poll = 1;
2562 local_control.reqseq = chan->buffer_seq;
2563 l2cap_send_sframe(chan, &local_control);
2565 chan->retry_count = 1;
2566 __set_monitor_timer(chan);
2567 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2569 break;
2570 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2571 l2cap_process_reqseq(chan, control->reqseq);
2572 break;
2573 case L2CAP_EV_EXPLICIT_POLL:
2574 l2cap_send_rr_or_rnr(chan, 1);
2575 chan->retry_count = 1;
2576 __set_monitor_timer(chan);
2577 __clear_ack_timer(chan);
2578 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2579 break;
2580 case L2CAP_EV_RETRANS_TO:
2581 l2cap_send_rr_or_rnr(chan, 1);
2582 chan->retry_count = 1;
2583 __set_monitor_timer(chan);
2584 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2585 break;
2586 case L2CAP_EV_RECV_FBIT:
2587 /* Nothing to process */
2588 break;
2589 default:
2590 break;
2594 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2595 struct l2cap_ctrl *control,
2596 struct sk_buff_head *skbs, u8 event)
2598 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2599 event);
2601 switch (event) {
2602 case L2CAP_EV_DATA_REQUEST:
2603 if (chan->tx_send_head == NULL)
2604 chan->tx_send_head = skb_peek(skbs);
2605 /* Queue data, but don't send. */
2606 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2607 break;
2608 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2609 BT_DBG("Enter LOCAL_BUSY");
2610 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2612 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2613 /* The SREJ_SENT state must be aborted if we are to
2614 * enter the LOCAL_BUSY state.
2616 l2cap_abort_rx_srej_sent(chan);
2619 l2cap_send_ack(chan);
2621 break;
2622 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2623 BT_DBG("Exit LOCAL_BUSY");
2624 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2626 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2627 struct l2cap_ctrl local_control;
2628 memset(&local_control, 0, sizeof(local_control));
2629 local_control.sframe = 1;
2630 local_control.super = L2CAP_SUPER_RR;
2631 local_control.poll = 1;
2632 local_control.reqseq = chan->buffer_seq;
2633 l2cap_send_sframe(chan, &local_control);
2635 chan->retry_count = 1;
2636 __set_monitor_timer(chan);
2637 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2639 break;
2640 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2641 l2cap_process_reqseq(chan, control->reqseq);
2643 /* Fall through */
2645 case L2CAP_EV_RECV_FBIT:
2646 if (control && control->final) {
2647 __clear_monitor_timer(chan);
2648 if (chan->unacked_frames > 0)
2649 __set_retrans_timer(chan);
2650 chan->retry_count = 0;
2651 chan->tx_state = L2CAP_TX_STATE_XMIT;
2652 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2654 break;
2655 case L2CAP_EV_EXPLICIT_POLL:
2656 /* Ignore */
2657 break;
2658 case L2CAP_EV_MONITOR_TO:
2659 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2660 l2cap_send_rr_or_rnr(chan, 1);
2661 __set_monitor_timer(chan);
2662 chan->retry_count++;
2663 } else {
2664 l2cap_send_disconn_req(chan, ECONNABORTED);
2666 break;
2667 default:
2668 break;
2672 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2673 struct sk_buff_head *skbs, u8 event)
2675 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2676 chan, control, skbs, event, chan->tx_state);
2678 switch (chan->tx_state) {
2679 case L2CAP_TX_STATE_XMIT:
2680 l2cap_tx_state_xmit(chan, control, skbs, event);
2681 break;
2682 case L2CAP_TX_STATE_WAIT_F:
2683 l2cap_tx_state_wait_f(chan, control, skbs, event);
2684 break;
2685 default:
2686 /* Ignore event */
2687 break;
2691 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2692 struct l2cap_ctrl *control)
2694 BT_DBG("chan %p, control %p", chan, control);
2695 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2698 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2699 struct l2cap_ctrl *control)
2701 BT_DBG("chan %p, control %p", chan, control);
2702 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2705 /* Copy frame to all raw sockets on that connection */
2706 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2708 struct sk_buff *nskb;
2709 struct l2cap_chan *chan;
2711 BT_DBG("conn %p", conn);
2713 mutex_lock(&conn->chan_lock);
2715 list_for_each_entry(chan, &conn->chan_l, list) {
2716 struct sock *sk = chan->sk;
2717 if (chan->chan_type != L2CAP_CHAN_RAW)
2718 continue;
2720 /* Don't send frame to the socket it came from */
2721 if (skb->sk == sk)
2722 continue;
2723 nskb = skb_clone(skb, GFP_KERNEL);
2724 if (!nskb)
2725 continue;
2727 if (chan->ops->recv(chan, nskb))
2728 kfree_skb(nskb);
2731 mutex_unlock(&conn->chan_lock);
2734 /* ---- L2CAP signalling commands ---- */
2735 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2736 u8 ident, u16 dlen, void *data)
2738 struct sk_buff *skb, **frag;
2739 struct l2cap_cmd_hdr *cmd;
2740 struct l2cap_hdr *lh;
2741 int len, count;
2743 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2744 conn, code, ident, dlen);
2746 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2747 count = min_t(unsigned int, conn->mtu, len);
2749 skb = bt_skb_alloc(count, GFP_KERNEL);
2750 if (!skb)
2751 return NULL;
2753 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2754 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2756 if (conn->hcon->type == LE_LINK)
2757 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2758 else
2759 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2761 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2762 cmd->code = code;
2763 cmd->ident = ident;
2764 cmd->len = cpu_to_le16(dlen);
2766 if (dlen) {
2767 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2768 memcpy(skb_put(skb, count), data, count);
2769 data += count;
2772 len -= skb->len;
2774 /* Continuation fragments (no L2CAP header) */
2775 frag = &skb_shinfo(skb)->frag_list;
2776 while (len) {
2777 count = min_t(unsigned int, conn->mtu, len);
2779 *frag = bt_skb_alloc(count, GFP_KERNEL);
2780 if (!*frag)
2781 goto fail;
2783 memcpy(skb_put(*frag, count), data, count);
2785 len -= count;
2786 data += count;
2788 frag = &(*frag)->next;
2791 return skb;
2793 fail:
2794 kfree_skb(skb);
2795 return NULL;
2798 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2799 unsigned long *val)
2801 struct l2cap_conf_opt *opt = *ptr;
2802 int len;
2804 len = L2CAP_CONF_OPT_SIZE + opt->len;
2805 *ptr += len;
2807 *type = opt->type;
2808 *olen = opt->len;
2810 switch (opt->len) {
2811 case 1:
2812 *val = *((u8 *) opt->val);
2813 break;
2815 case 2:
2816 *val = get_unaligned_le16(opt->val);
2817 break;
2819 case 4:
2820 *val = get_unaligned_le32(opt->val);
2821 break;
2823 default:
2824 *val = (unsigned long) opt->val;
2825 break;
2828 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2829 return len;
2832 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2834 struct l2cap_conf_opt *opt = *ptr;
2836 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2838 opt->type = type;
2839 opt->len = len;
2841 switch (len) {
2842 case 1:
2843 *((u8 *) opt->val) = val;
2844 break;
2846 case 2:
2847 put_unaligned_le16(val, opt->val);
2848 break;
2850 case 4:
2851 put_unaligned_le32(val, opt->val);
2852 break;
2854 default:
2855 memcpy(opt->val, (void *) val, len);
2856 break;
2859 *ptr += L2CAP_CONF_OPT_SIZE + len;
2862 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2864 struct l2cap_conf_efs efs;
2866 switch (chan->mode) {
2867 case L2CAP_MODE_ERTM:
2868 efs.id = chan->local_id;
2869 efs.stype = chan->local_stype;
2870 efs.msdu = cpu_to_le16(chan->local_msdu);
2871 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2872 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2873 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2874 break;
2876 case L2CAP_MODE_STREAMING:
2877 efs.id = 1;
2878 efs.stype = L2CAP_SERV_BESTEFFORT;
2879 efs.msdu = cpu_to_le16(chan->local_msdu);
2880 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2881 efs.acc_lat = 0;
2882 efs.flush_to = 0;
2883 break;
2885 default:
2886 return;
2889 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2890 (unsigned long) &efs);
2893 static void l2cap_ack_timeout(struct work_struct *work)
2895 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2896 ack_timer.work);
2897 u16 frames_to_ack;
2899 BT_DBG("chan %p", chan);
2901 l2cap_chan_lock(chan);
2903 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2904 chan->last_acked_seq);
2906 if (frames_to_ack)
2907 l2cap_send_rr_or_rnr(chan, 0);
2909 l2cap_chan_unlock(chan);
2910 l2cap_chan_put(chan);
2913 int l2cap_ertm_init(struct l2cap_chan *chan)
2915 int err;
2917 chan->next_tx_seq = 0;
2918 chan->expected_tx_seq = 0;
2919 chan->expected_ack_seq = 0;
2920 chan->unacked_frames = 0;
2921 chan->buffer_seq = 0;
2922 chan->frames_sent = 0;
2923 chan->last_acked_seq = 0;
2924 chan->sdu = NULL;
2925 chan->sdu_last_frag = NULL;
2926 chan->sdu_len = 0;
2928 skb_queue_head_init(&chan->tx_q);
2930 chan->local_amp_id = 0;
2931 chan->move_id = 0;
2932 chan->move_state = L2CAP_MOVE_STABLE;
2933 chan->move_role = L2CAP_MOVE_ROLE_NONE;
2935 if (chan->mode != L2CAP_MODE_ERTM)
2936 return 0;
2938 chan->rx_state = L2CAP_RX_STATE_RECV;
2939 chan->tx_state = L2CAP_TX_STATE_XMIT;
2941 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2942 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2943 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2945 skb_queue_head_init(&chan->srej_q);
2947 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2948 if (err < 0)
2949 return err;
2951 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2952 if (err < 0)
2953 l2cap_seq_list_free(&chan->srej_list);
2955 return err;
2958 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2960 switch (mode) {
2961 case L2CAP_MODE_STREAMING:
2962 case L2CAP_MODE_ERTM:
2963 if (l2cap_mode_supported(mode, remote_feat_mask))
2964 return mode;
2965 /* fall through */
2966 default:
2967 return L2CAP_MODE_BASIC;
2971 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2973 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2976 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2978 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2981 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
2982 struct l2cap_conf_rfc *rfc)
2984 if (chan->local_amp_id && chan->hs_hcon) {
2985 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
2987 /* Class 1 devices have must have ERTM timeouts
2988 * exceeding the Link Supervision Timeout. The
2989 * default Link Supervision Timeout for AMP
2990 * controllers is 10 seconds.
2992 * Class 1 devices use 0xffffffff for their
2993 * best-effort flush timeout, so the clamping logic
2994 * will result in a timeout that meets the above
2995 * requirement. ERTM timeouts are 16-bit values, so
2996 * the maximum timeout is 65.535 seconds.
2999 /* Convert timeout to milliseconds and round */
3000 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3002 /* This is the recommended formula for class 2 devices
3003 * that start ERTM timers when packets are sent to the
3004 * controller.
3006 ertm_to = 3 * ertm_to + 500;
3008 if (ertm_to > 0xffff)
3009 ertm_to = 0xffff;
3011 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3012 rfc->monitor_timeout = rfc->retrans_timeout;
3013 } else {
3014 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3015 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3019 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3021 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3022 __l2cap_ews_supported(chan)) {
3023 /* use extended control field */
3024 set_bit(FLAG_EXT_CTRL, &chan->flags);
3025 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3026 } else {
3027 chan->tx_win = min_t(u16, chan->tx_win,
3028 L2CAP_DEFAULT_TX_WINDOW);
3029 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3031 chan->ack_win = chan->tx_win;
3034 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3036 struct l2cap_conf_req *req = data;
3037 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3038 void *ptr = req->data;
3039 u16 size;
3041 BT_DBG("chan %p", chan);
3043 if (chan->num_conf_req || chan->num_conf_rsp)
3044 goto done;
3046 switch (chan->mode) {
3047 case L2CAP_MODE_STREAMING:
3048 case L2CAP_MODE_ERTM:
3049 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3050 break;
3052 if (__l2cap_efs_supported(chan))
3053 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3055 /* fall through */
3056 default:
3057 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3058 break;
3061 done:
3062 if (chan->imtu != L2CAP_DEFAULT_MTU)
3063 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3065 switch (chan->mode) {
3066 case L2CAP_MODE_BASIC:
3067 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3068 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3069 break;
3071 rfc.mode = L2CAP_MODE_BASIC;
3072 rfc.txwin_size = 0;
3073 rfc.max_transmit = 0;
3074 rfc.retrans_timeout = 0;
3075 rfc.monitor_timeout = 0;
3076 rfc.max_pdu_size = 0;
3078 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3079 (unsigned long) &rfc);
3080 break;
3082 case L2CAP_MODE_ERTM:
3083 rfc.mode = L2CAP_MODE_ERTM;
3084 rfc.max_transmit = chan->max_tx;
3086 __l2cap_set_ertm_timeouts(chan, &rfc);
3088 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3089 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3090 L2CAP_FCS_SIZE);
3091 rfc.max_pdu_size = cpu_to_le16(size);
3093 l2cap_txwin_setup(chan);
3095 rfc.txwin_size = min_t(u16, chan->tx_win,
3096 L2CAP_DEFAULT_TX_WINDOW);
3098 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3099 (unsigned long) &rfc);
3101 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3102 l2cap_add_opt_efs(&ptr, chan);
3104 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3105 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3106 chan->tx_win);
3108 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3109 if (chan->fcs == L2CAP_FCS_NONE ||
3110 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3111 chan->fcs = L2CAP_FCS_NONE;
3112 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3113 chan->fcs);
3115 break;
3117 case L2CAP_MODE_STREAMING:
3118 l2cap_txwin_setup(chan);
3119 rfc.mode = L2CAP_MODE_STREAMING;
3120 rfc.txwin_size = 0;
3121 rfc.max_transmit = 0;
3122 rfc.retrans_timeout = 0;
3123 rfc.monitor_timeout = 0;
3125 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3126 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3127 L2CAP_FCS_SIZE);
3128 rfc.max_pdu_size = cpu_to_le16(size);
3130 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3131 (unsigned long) &rfc);
3133 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3134 l2cap_add_opt_efs(&ptr, chan);
3136 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3137 if (chan->fcs == L2CAP_FCS_NONE ||
3138 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3139 chan->fcs = L2CAP_FCS_NONE;
3140 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3141 chan->fcs);
3143 break;
3146 req->dcid = cpu_to_le16(chan->dcid);
3147 req->flags = __constant_cpu_to_le16(0);
3149 return ptr - data;
3152 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3154 struct l2cap_conf_rsp *rsp = data;
3155 void *ptr = rsp->data;
3156 void *req = chan->conf_req;
3157 int len = chan->conf_len;
3158 int type, hint, olen;
3159 unsigned long val;
3160 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3161 struct l2cap_conf_efs efs;
3162 u8 remote_efs = 0;
3163 u16 mtu = L2CAP_DEFAULT_MTU;
3164 u16 result = L2CAP_CONF_SUCCESS;
3165 u16 size;
3167 BT_DBG("chan %p", chan);
3169 while (len >= L2CAP_CONF_OPT_SIZE) {
3170 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3172 hint = type & L2CAP_CONF_HINT;
3173 type &= L2CAP_CONF_MASK;
3175 switch (type) {
3176 case L2CAP_CONF_MTU:
3177 mtu = val;
3178 break;
3180 case L2CAP_CONF_FLUSH_TO:
3181 chan->flush_to = val;
3182 break;
3184 case L2CAP_CONF_QOS:
3185 break;
3187 case L2CAP_CONF_RFC:
3188 if (olen == sizeof(rfc))
3189 memcpy(&rfc, (void *) val, olen);
3190 break;
3192 case L2CAP_CONF_FCS:
3193 if (val == L2CAP_FCS_NONE)
3194 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3195 break;
3197 case L2CAP_CONF_EFS:
3198 remote_efs = 1;
3199 if (olen == sizeof(efs))
3200 memcpy(&efs, (void *) val, olen);
3201 break;
3203 case L2CAP_CONF_EWS:
3204 if (!enable_hs)
3205 return -ECONNREFUSED;
3207 set_bit(FLAG_EXT_CTRL, &chan->flags);
3208 set_bit(CONF_EWS_RECV, &chan->conf_state);
3209 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3210 chan->remote_tx_win = val;
3211 break;
3213 default:
3214 if (hint)
3215 break;
3217 result = L2CAP_CONF_UNKNOWN;
3218 *((u8 *) ptr++) = type;
3219 break;
3223 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3224 goto done;
3226 switch (chan->mode) {
3227 case L2CAP_MODE_STREAMING:
3228 case L2CAP_MODE_ERTM:
3229 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3230 chan->mode = l2cap_select_mode(rfc.mode,
3231 chan->conn->feat_mask);
3232 break;
3235 if (remote_efs) {
3236 if (__l2cap_efs_supported(chan))
3237 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3238 else
3239 return -ECONNREFUSED;
3242 if (chan->mode != rfc.mode)
3243 return -ECONNREFUSED;
3245 break;
3248 done:
3249 if (chan->mode != rfc.mode) {
3250 result = L2CAP_CONF_UNACCEPT;
3251 rfc.mode = chan->mode;
3253 if (chan->num_conf_rsp == 1)
3254 return -ECONNREFUSED;
3256 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3257 (unsigned long) &rfc);
3260 if (result == L2CAP_CONF_SUCCESS) {
3261 /* Configure output options and let the other side know
3262 * which ones we don't like. */
3264 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3265 result = L2CAP_CONF_UNACCEPT;
3266 else {
3267 chan->omtu = mtu;
3268 set_bit(CONF_MTU_DONE, &chan->conf_state);
3270 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3272 if (remote_efs) {
3273 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3274 efs.stype != L2CAP_SERV_NOTRAFIC &&
3275 efs.stype != chan->local_stype) {
3277 result = L2CAP_CONF_UNACCEPT;
3279 if (chan->num_conf_req >= 1)
3280 return -ECONNREFUSED;
3282 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3283 sizeof(efs),
3284 (unsigned long) &efs);
3285 } else {
3286 /* Send PENDING Conf Rsp */
3287 result = L2CAP_CONF_PENDING;
3288 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3292 switch (rfc.mode) {
3293 case L2CAP_MODE_BASIC:
3294 chan->fcs = L2CAP_FCS_NONE;
3295 set_bit(CONF_MODE_DONE, &chan->conf_state);
3296 break;
3298 case L2CAP_MODE_ERTM:
3299 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3300 chan->remote_tx_win = rfc.txwin_size;
3301 else
3302 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3304 chan->remote_max_tx = rfc.max_transmit;
3306 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3307 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3308 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3309 rfc.max_pdu_size = cpu_to_le16(size);
3310 chan->remote_mps = size;
3312 __l2cap_set_ertm_timeouts(chan, &rfc);
3314 set_bit(CONF_MODE_DONE, &chan->conf_state);
3316 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3317 sizeof(rfc), (unsigned long) &rfc);
3319 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3320 chan->remote_id = efs.id;
3321 chan->remote_stype = efs.stype;
3322 chan->remote_msdu = le16_to_cpu(efs.msdu);
3323 chan->remote_flush_to =
3324 le32_to_cpu(efs.flush_to);
3325 chan->remote_acc_lat =
3326 le32_to_cpu(efs.acc_lat);
3327 chan->remote_sdu_itime =
3328 le32_to_cpu(efs.sdu_itime);
3329 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3330 sizeof(efs),
3331 (unsigned long) &efs);
3333 break;
3335 case L2CAP_MODE_STREAMING:
3336 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3337 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3338 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3339 rfc.max_pdu_size = cpu_to_le16(size);
3340 chan->remote_mps = size;
3342 set_bit(CONF_MODE_DONE, &chan->conf_state);
3344 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3345 (unsigned long) &rfc);
3347 break;
3349 default:
3350 result = L2CAP_CONF_UNACCEPT;
3352 memset(&rfc, 0, sizeof(rfc));
3353 rfc.mode = chan->mode;
3356 if (result == L2CAP_CONF_SUCCESS)
3357 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3359 rsp->scid = cpu_to_le16(chan->dcid);
3360 rsp->result = cpu_to_le16(result);
3361 rsp->flags = __constant_cpu_to_le16(0);
3363 return ptr - data;
3366 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3367 void *data, u16 *result)
3369 struct l2cap_conf_req *req = data;
3370 void *ptr = req->data;
3371 int type, olen;
3372 unsigned long val;
3373 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3374 struct l2cap_conf_efs efs;
3376 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3378 while (len >= L2CAP_CONF_OPT_SIZE) {
3379 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3381 switch (type) {
3382 case L2CAP_CONF_MTU:
3383 if (val < L2CAP_DEFAULT_MIN_MTU) {
3384 *result = L2CAP_CONF_UNACCEPT;
3385 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3386 } else
3387 chan->imtu = val;
3388 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3389 break;
3391 case L2CAP_CONF_FLUSH_TO:
3392 chan->flush_to = val;
3393 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3394 2, chan->flush_to);
3395 break;
3397 case L2CAP_CONF_RFC:
3398 if (olen == sizeof(rfc))
3399 memcpy(&rfc, (void *)val, olen);
3401 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3402 rfc.mode != chan->mode)
3403 return -ECONNREFUSED;
3405 chan->fcs = 0;
3407 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3408 sizeof(rfc), (unsigned long) &rfc);
3409 break;
3411 case L2CAP_CONF_EWS:
3412 chan->ack_win = min_t(u16, val, chan->ack_win);
3413 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3414 chan->tx_win);
3415 break;
3417 case L2CAP_CONF_EFS:
3418 if (olen == sizeof(efs))
3419 memcpy(&efs, (void *)val, olen);
3421 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3422 efs.stype != L2CAP_SERV_NOTRAFIC &&
3423 efs.stype != chan->local_stype)
3424 return -ECONNREFUSED;
3426 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3427 (unsigned long) &efs);
3428 break;
3430 case L2CAP_CONF_FCS:
3431 if (*result == L2CAP_CONF_PENDING)
3432 if (val == L2CAP_FCS_NONE)
3433 set_bit(CONF_RECV_NO_FCS,
3434 &chan->conf_state);
3435 break;
3439 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3440 return -ECONNREFUSED;
3442 chan->mode = rfc.mode;
3444 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3445 switch (rfc.mode) {
3446 case L2CAP_MODE_ERTM:
3447 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3448 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3449 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3450 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3451 chan->ack_win = min_t(u16, chan->ack_win,
3452 rfc.txwin_size);
3454 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3455 chan->local_msdu = le16_to_cpu(efs.msdu);
3456 chan->local_sdu_itime =
3457 le32_to_cpu(efs.sdu_itime);
3458 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3459 chan->local_flush_to =
3460 le32_to_cpu(efs.flush_to);
3462 break;
3464 case L2CAP_MODE_STREAMING:
3465 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3469 req->dcid = cpu_to_le16(chan->dcid);
3470 req->flags = __constant_cpu_to_le16(0);
3472 return ptr - data;
3475 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3476 u16 result, u16 flags)
3478 struct l2cap_conf_rsp *rsp = data;
3479 void *ptr = rsp->data;
3481 BT_DBG("chan %p", chan);
3483 rsp->scid = cpu_to_le16(chan->dcid);
3484 rsp->result = cpu_to_le16(result);
3485 rsp->flags = cpu_to_le16(flags);
3487 return ptr - data;
3490 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3492 struct l2cap_conn_rsp rsp;
3493 struct l2cap_conn *conn = chan->conn;
3494 u8 buf[128];
3495 u8 rsp_code;
3497 rsp.scid = cpu_to_le16(chan->dcid);
3498 rsp.dcid = cpu_to_le16(chan->scid);
3499 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3500 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3502 if (chan->hs_hcon)
3503 rsp_code = L2CAP_CREATE_CHAN_RSP;
3504 else
3505 rsp_code = L2CAP_CONN_RSP;
3507 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3509 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3511 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3512 return;
3514 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3515 l2cap_build_conf_req(chan, buf), buf);
3516 chan->num_conf_req++;
3519 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3521 int type, olen;
3522 unsigned long val;
3523 /* Use sane default values in case a misbehaving remote device
3524 * did not send an RFC or extended window size option.
3526 u16 txwin_ext = chan->ack_win;
3527 struct l2cap_conf_rfc rfc = {
3528 .mode = chan->mode,
3529 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3530 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3531 .max_pdu_size = cpu_to_le16(chan->imtu),
3532 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3535 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3537 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3538 return;
3540 while (len >= L2CAP_CONF_OPT_SIZE) {
3541 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3543 switch (type) {
3544 case L2CAP_CONF_RFC:
3545 if (olen == sizeof(rfc))
3546 memcpy(&rfc, (void *)val, olen);
3547 break;
3548 case L2CAP_CONF_EWS:
3549 txwin_ext = val;
3550 break;
3554 switch (rfc.mode) {
3555 case L2CAP_MODE_ERTM:
3556 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3557 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3558 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3559 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3560 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3561 else
3562 chan->ack_win = min_t(u16, chan->ack_win,
3563 rfc.txwin_size);
3564 break;
3565 case L2CAP_MODE_STREAMING:
3566 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3570 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3571 struct l2cap_cmd_hdr *cmd, u8 *data)
3573 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3575 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3576 return 0;
3578 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3579 cmd->ident == conn->info_ident) {
3580 cancel_delayed_work(&conn->info_timer);
3582 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3583 conn->info_ident = 0;
3585 l2cap_conn_start(conn);
3588 return 0;
3591 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3592 struct l2cap_cmd_hdr *cmd,
3593 u8 *data, u8 rsp_code, u8 amp_id)
3595 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3596 struct l2cap_conn_rsp rsp;
3597 struct l2cap_chan *chan = NULL, *pchan;
3598 struct sock *parent, *sk = NULL;
3599 int result, status = L2CAP_CS_NO_INFO;
3601 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3602 __le16 psm = req->psm;
3604 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3606 /* Check if we have socket listening on psm */
3607 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3608 if (!pchan) {
3609 result = L2CAP_CR_BAD_PSM;
3610 goto sendresp;
3613 parent = pchan->sk;
3615 mutex_lock(&conn->chan_lock);
3616 lock_sock(parent);
3618 /* Check if the ACL is secure enough (if not SDP) */
3619 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3620 !hci_conn_check_link_mode(conn->hcon)) {
3621 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3622 result = L2CAP_CR_SEC_BLOCK;
3623 goto response;
3626 result = L2CAP_CR_NO_MEM;
3628 /* Check if we already have channel with that dcid */
3629 if (__l2cap_get_chan_by_dcid(conn, scid))
3630 goto response;
3632 chan = pchan->ops->new_connection(pchan);
3633 if (!chan)
3634 goto response;
3636 sk = chan->sk;
3638 hci_conn_hold(conn->hcon);
3640 bacpy(&bt_sk(sk)->src, conn->src);
3641 bacpy(&bt_sk(sk)->dst, conn->dst);
3642 chan->psm = psm;
3643 chan->dcid = scid;
3644 chan->local_amp_id = amp_id;
3646 __l2cap_chan_add(conn, chan);
3648 dcid = chan->scid;
3650 __set_chan_timer(chan, sk->sk_sndtimeo);
3652 chan->ident = cmd->ident;
3654 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3655 if (l2cap_chan_check_security(chan)) {
3656 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3657 __l2cap_state_change(chan, BT_CONNECT2);
3658 result = L2CAP_CR_PEND;
3659 status = L2CAP_CS_AUTHOR_PEND;
3660 chan->ops->defer(chan);
3661 } else {
3662 /* Force pending result for AMP controllers.
3663 * The connection will succeed after the
3664 * physical link is up.
3666 if (amp_id) {
3667 __l2cap_state_change(chan, BT_CONNECT2);
3668 result = L2CAP_CR_PEND;
3669 } else {
3670 __l2cap_state_change(chan, BT_CONFIG);
3671 result = L2CAP_CR_SUCCESS;
3673 status = L2CAP_CS_NO_INFO;
3675 } else {
3676 __l2cap_state_change(chan, BT_CONNECT2);
3677 result = L2CAP_CR_PEND;
3678 status = L2CAP_CS_AUTHEN_PEND;
3680 } else {
3681 __l2cap_state_change(chan, BT_CONNECT2);
3682 result = L2CAP_CR_PEND;
3683 status = L2CAP_CS_NO_INFO;
3686 response:
3687 release_sock(parent);
3688 mutex_unlock(&conn->chan_lock);
3690 sendresp:
3691 rsp.scid = cpu_to_le16(scid);
3692 rsp.dcid = cpu_to_le16(dcid);
3693 rsp.result = cpu_to_le16(result);
3694 rsp.status = cpu_to_le16(status);
3695 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3697 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3698 struct l2cap_info_req info;
3699 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3701 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3702 conn->info_ident = l2cap_get_ident(conn);
3704 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3706 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3707 sizeof(info), &info);
3710 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3711 result == L2CAP_CR_SUCCESS) {
3712 u8 buf[128];
3713 set_bit(CONF_REQ_SENT, &chan->conf_state);
3714 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3715 l2cap_build_conf_req(chan, buf), buf);
3716 chan->num_conf_req++;
3719 return chan;
3722 static int l2cap_connect_req(struct l2cap_conn *conn,
3723 struct l2cap_cmd_hdr *cmd, u8 *data)
3725 struct hci_dev *hdev = conn->hcon->hdev;
3726 struct hci_conn *hcon = conn->hcon;
3728 hci_dev_lock(hdev);
3729 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3730 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3731 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3732 hcon->dst_type, 0, NULL, 0,
3733 hcon->dev_class);
3734 hci_dev_unlock(hdev);
3736 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3737 return 0;
3740 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3741 struct l2cap_cmd_hdr *cmd, u8 *data)
3743 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3744 u16 scid, dcid, result, status;
3745 struct l2cap_chan *chan;
3746 u8 req[128];
3747 int err;
3749 scid = __le16_to_cpu(rsp->scid);
3750 dcid = __le16_to_cpu(rsp->dcid);
3751 result = __le16_to_cpu(rsp->result);
3752 status = __le16_to_cpu(rsp->status);
3754 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3755 dcid, scid, result, status);
3757 mutex_lock(&conn->chan_lock);
3759 if (scid) {
3760 chan = __l2cap_get_chan_by_scid(conn, scid);
3761 if (!chan) {
3762 err = -EFAULT;
3763 goto unlock;
3765 } else {
3766 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3767 if (!chan) {
3768 err = -EFAULT;
3769 goto unlock;
3773 err = 0;
3775 l2cap_chan_lock(chan);
3777 switch (result) {
3778 case L2CAP_CR_SUCCESS:
3779 l2cap_state_change(chan, BT_CONFIG);
3780 chan->ident = 0;
3781 chan->dcid = dcid;
3782 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3784 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3785 break;
3787 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3788 l2cap_build_conf_req(chan, req), req);
3789 chan->num_conf_req++;
3790 break;
3792 case L2CAP_CR_PEND:
3793 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3794 break;
3796 default:
3797 l2cap_chan_del(chan, ECONNREFUSED);
3798 break;
3801 l2cap_chan_unlock(chan);
3803 unlock:
3804 mutex_unlock(&conn->chan_lock);
3806 return err;
3809 static inline void set_default_fcs(struct l2cap_chan *chan)
3811 /* FCS is enabled only in ERTM or streaming mode, if one or both
3812 * sides request it.
3814 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3815 chan->fcs = L2CAP_FCS_NONE;
3816 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3817 chan->fcs = L2CAP_FCS_CRC16;
3820 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3821 u8 ident, u16 flags)
3823 struct l2cap_conn *conn = chan->conn;
3825 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3826 flags);
3828 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3829 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3831 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3832 l2cap_build_conf_rsp(chan, data,
3833 L2CAP_CONF_SUCCESS, flags), data);
3836 static inline int l2cap_config_req(struct l2cap_conn *conn,
3837 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3838 u8 *data)
3840 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3841 u16 dcid, flags;
3842 u8 rsp[64];
3843 struct l2cap_chan *chan;
3844 int len, err = 0;
3846 dcid = __le16_to_cpu(req->dcid);
3847 flags = __le16_to_cpu(req->flags);
3849 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3851 chan = l2cap_get_chan_by_scid(conn, dcid);
3852 if (!chan)
3853 return -ENOENT;
3855 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3856 struct l2cap_cmd_rej_cid rej;
3858 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3859 rej.scid = cpu_to_le16(chan->scid);
3860 rej.dcid = cpu_to_le16(chan->dcid);
3862 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3863 sizeof(rej), &rej);
3864 goto unlock;
3867 /* Reject if config buffer is too small. */
3868 len = cmd_len - sizeof(*req);
3869 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3870 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3871 l2cap_build_conf_rsp(chan, rsp,
3872 L2CAP_CONF_REJECT, flags), rsp);
3873 goto unlock;
3876 /* Store config. */
3877 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3878 chan->conf_len += len;
3880 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3881 /* Incomplete config. Send empty response. */
3882 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3883 l2cap_build_conf_rsp(chan, rsp,
3884 L2CAP_CONF_SUCCESS, flags), rsp);
3885 goto unlock;
3888 /* Complete config. */
3889 len = l2cap_parse_conf_req(chan, rsp);
3890 if (len < 0) {
3891 l2cap_send_disconn_req(chan, ECONNRESET);
3892 goto unlock;
3895 chan->ident = cmd->ident;
3896 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3897 chan->num_conf_rsp++;
3899 /* Reset config buffer. */
3900 chan->conf_len = 0;
3902 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3903 goto unlock;
3905 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3906 set_default_fcs(chan);
3908 if (chan->mode == L2CAP_MODE_ERTM ||
3909 chan->mode == L2CAP_MODE_STREAMING)
3910 err = l2cap_ertm_init(chan);
3912 if (err < 0)
3913 l2cap_send_disconn_req(chan, -err);
3914 else
3915 l2cap_chan_ready(chan);
3917 goto unlock;
3920 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3921 u8 buf[64];
3922 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3923 l2cap_build_conf_req(chan, buf), buf);
3924 chan->num_conf_req++;
3927 /* Got Conf Rsp PENDING from remote side and asume we sent
3928 Conf Rsp PENDING in the code above */
3929 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3930 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3932 /* check compatibility */
3934 /* Send rsp for BR/EDR channel */
3935 if (!chan->hs_hcon)
3936 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
3937 else
3938 chan->ident = cmd->ident;
3941 unlock:
3942 l2cap_chan_unlock(chan);
3943 return err;
3946 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
3947 struct l2cap_cmd_hdr *cmd, u8 *data)
3949 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3950 u16 scid, flags, result;
3951 struct l2cap_chan *chan;
3952 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3953 int err = 0;
3955 scid = __le16_to_cpu(rsp->scid);
3956 flags = __le16_to_cpu(rsp->flags);
3957 result = __le16_to_cpu(rsp->result);
3959 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3960 result, len);
3962 chan = l2cap_get_chan_by_scid(conn, scid);
3963 if (!chan)
3964 return 0;
3966 switch (result) {
3967 case L2CAP_CONF_SUCCESS:
3968 l2cap_conf_rfc_get(chan, rsp->data, len);
3969 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3970 break;
3972 case L2CAP_CONF_PENDING:
3973 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3975 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3976 char buf[64];
3978 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3979 buf, &result);
3980 if (len < 0) {
3981 l2cap_send_disconn_req(chan, ECONNRESET);
3982 goto done;
3985 if (!chan->hs_hcon) {
3986 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
3988 } else {
3989 if (l2cap_check_efs(chan)) {
3990 amp_create_logical_link(chan);
3991 chan->ident = cmd->ident;
3995 goto done;
3997 case L2CAP_CONF_UNACCEPT:
3998 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3999 char req[64];
4001 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4002 l2cap_send_disconn_req(chan, ECONNRESET);
4003 goto done;
4006 /* throw out any old stored conf requests */
4007 result = L2CAP_CONF_SUCCESS;
4008 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4009 req, &result);
4010 if (len < 0) {
4011 l2cap_send_disconn_req(chan, ECONNRESET);
4012 goto done;
4015 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4016 L2CAP_CONF_REQ, len, req);
4017 chan->num_conf_req++;
4018 if (result != L2CAP_CONF_SUCCESS)
4019 goto done;
4020 break;
4023 default:
4024 l2cap_chan_set_err(chan, ECONNRESET);
4026 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4027 l2cap_send_disconn_req(chan, ECONNRESET);
4028 goto done;
4031 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4032 goto done;
4034 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4036 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4037 set_default_fcs(chan);
4039 if (chan->mode == L2CAP_MODE_ERTM ||
4040 chan->mode == L2CAP_MODE_STREAMING)
4041 err = l2cap_ertm_init(chan);
4043 if (err < 0)
4044 l2cap_send_disconn_req(chan, -err);
4045 else
4046 l2cap_chan_ready(chan);
4049 done:
4050 l2cap_chan_unlock(chan);
4051 return err;
4054 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4055 struct l2cap_cmd_hdr *cmd, u8 *data)
4057 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4058 struct l2cap_disconn_rsp rsp;
4059 u16 dcid, scid;
4060 struct l2cap_chan *chan;
4061 struct sock *sk;
4063 scid = __le16_to_cpu(req->scid);
4064 dcid = __le16_to_cpu(req->dcid);
4066 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4068 mutex_lock(&conn->chan_lock);
4070 chan = __l2cap_get_chan_by_scid(conn, dcid);
4071 if (!chan) {
4072 mutex_unlock(&conn->chan_lock);
4073 return 0;
4076 l2cap_chan_lock(chan);
4078 sk = chan->sk;
4080 rsp.dcid = cpu_to_le16(chan->scid);
4081 rsp.scid = cpu_to_le16(chan->dcid);
4082 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4084 lock_sock(sk);
4085 sk->sk_shutdown = SHUTDOWN_MASK;
4086 release_sock(sk);
4088 l2cap_chan_hold(chan);
4089 l2cap_chan_del(chan, ECONNRESET);
4091 l2cap_chan_unlock(chan);
4093 chan->ops->close(chan);
4094 l2cap_chan_put(chan);
4096 mutex_unlock(&conn->chan_lock);
4098 return 0;
4101 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4102 struct l2cap_cmd_hdr *cmd, u8 *data)
4104 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4105 u16 dcid, scid;
4106 struct l2cap_chan *chan;
4108 scid = __le16_to_cpu(rsp->scid);
4109 dcid = __le16_to_cpu(rsp->dcid);
4111 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4113 mutex_lock(&conn->chan_lock);
4115 chan = __l2cap_get_chan_by_scid(conn, scid);
4116 if (!chan) {
4117 mutex_unlock(&conn->chan_lock);
4118 return 0;
4121 l2cap_chan_lock(chan);
4123 l2cap_chan_hold(chan);
4124 l2cap_chan_del(chan, 0);
4126 l2cap_chan_unlock(chan);
4128 chan->ops->close(chan);
4129 l2cap_chan_put(chan);
4131 mutex_unlock(&conn->chan_lock);
4133 return 0;
4136 static inline int l2cap_information_req(struct l2cap_conn *conn,
4137 struct l2cap_cmd_hdr *cmd, u8 *data)
4139 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4140 u16 type;
4142 type = __le16_to_cpu(req->type);
4144 BT_DBG("type 0x%4.4x", type);
4146 if (type == L2CAP_IT_FEAT_MASK) {
4147 u8 buf[8];
4148 u32 feat_mask = l2cap_feat_mask;
4149 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4150 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4151 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4152 if (!disable_ertm)
4153 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4154 | L2CAP_FEAT_FCS;
4155 if (enable_hs)
4156 feat_mask |= L2CAP_FEAT_EXT_FLOW
4157 | L2CAP_FEAT_EXT_WINDOW;
4159 put_unaligned_le32(feat_mask, rsp->data);
4160 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4161 buf);
4162 } else if (type == L2CAP_IT_FIXED_CHAN) {
4163 u8 buf[12];
4164 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4166 if (enable_hs)
4167 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4168 else
4169 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4171 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4172 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4173 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4174 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4175 buf);
4176 } else {
4177 struct l2cap_info_rsp rsp;
4178 rsp.type = cpu_to_le16(type);
4179 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4180 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4181 &rsp);
4184 return 0;
4187 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4188 struct l2cap_cmd_hdr *cmd, u8 *data)
4190 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4191 u16 type, result;
4193 type = __le16_to_cpu(rsp->type);
4194 result = __le16_to_cpu(rsp->result);
4196 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4198 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4199 if (cmd->ident != conn->info_ident ||
4200 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4201 return 0;
4203 cancel_delayed_work(&conn->info_timer);
4205 if (result != L2CAP_IR_SUCCESS) {
4206 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4207 conn->info_ident = 0;
4209 l2cap_conn_start(conn);
4211 return 0;
4214 switch (type) {
4215 case L2CAP_IT_FEAT_MASK:
4216 conn->feat_mask = get_unaligned_le32(rsp->data);
4218 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4219 struct l2cap_info_req req;
4220 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4222 conn->info_ident = l2cap_get_ident(conn);
4224 l2cap_send_cmd(conn, conn->info_ident,
4225 L2CAP_INFO_REQ, sizeof(req), &req);
4226 } else {
4227 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4228 conn->info_ident = 0;
4230 l2cap_conn_start(conn);
4232 break;
4234 case L2CAP_IT_FIXED_CHAN:
4235 conn->fixed_chan_mask = rsp->data[0];
4236 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4237 conn->info_ident = 0;
4239 l2cap_conn_start(conn);
4240 break;
4243 return 0;
4246 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4247 struct l2cap_cmd_hdr *cmd,
4248 u16 cmd_len, void *data)
4250 struct l2cap_create_chan_req *req = data;
4251 struct l2cap_create_chan_rsp rsp;
4252 struct l2cap_chan *chan;
4253 struct hci_dev *hdev;
4254 u16 psm, scid;
4256 if (cmd_len != sizeof(*req))
4257 return -EPROTO;
4259 if (!enable_hs)
4260 return -EINVAL;
4262 psm = le16_to_cpu(req->psm);
4263 scid = le16_to_cpu(req->scid);
4265 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4267 /* For controller id 0 make BR/EDR connection */
4268 if (req->amp_id == HCI_BREDR_ID) {
4269 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4270 req->amp_id);
4271 return 0;
4274 /* Validate AMP controller id */
4275 hdev = hci_dev_get(req->amp_id);
4276 if (!hdev)
4277 goto error;
4279 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4280 hci_dev_put(hdev);
4281 goto error;
4284 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4285 req->amp_id);
4286 if (chan) {
4287 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4288 struct hci_conn *hs_hcon;
4290 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, conn->dst);
4291 if (!hs_hcon) {
4292 hci_dev_put(hdev);
4293 return -EFAULT;
4296 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4298 mgr->bredr_chan = chan;
4299 chan->hs_hcon = hs_hcon;
4300 chan->fcs = L2CAP_FCS_NONE;
4301 conn->mtu = hdev->block_mtu;
4304 hci_dev_put(hdev);
4306 return 0;
4308 error:
4309 rsp.dcid = 0;
4310 rsp.scid = cpu_to_le16(scid);
4311 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4312 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4314 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4315 sizeof(rsp), &rsp);
4317 return -EFAULT;
4320 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4322 struct l2cap_move_chan_req req;
4323 u8 ident;
4325 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4327 ident = l2cap_get_ident(chan->conn);
4328 chan->ident = ident;
4330 req.icid = cpu_to_le16(chan->scid);
4331 req.dest_amp_id = dest_amp_id;
4333 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4334 &req);
4336 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4339 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4341 struct l2cap_move_chan_rsp rsp;
4343 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4345 rsp.icid = cpu_to_le16(chan->dcid);
4346 rsp.result = cpu_to_le16(result);
4348 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4349 sizeof(rsp), &rsp);
4352 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4354 struct l2cap_move_chan_cfm cfm;
4356 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4358 chan->ident = l2cap_get_ident(chan->conn);
4360 cfm.icid = cpu_to_le16(chan->scid);
4361 cfm.result = cpu_to_le16(result);
4363 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4364 sizeof(cfm), &cfm);
4366 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4369 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4371 struct l2cap_move_chan_cfm cfm;
4373 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4375 cfm.icid = cpu_to_le16(icid);
4376 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4378 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4379 sizeof(cfm), &cfm);
4382 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4383 u16 icid)
4385 struct l2cap_move_chan_cfm_rsp rsp;
4387 BT_DBG("icid 0x%4.4x", icid);
4389 rsp.icid = cpu_to_le16(icid);
4390 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4393 static void __release_logical_link(struct l2cap_chan *chan)
4395 chan->hs_hchan = NULL;
4396 chan->hs_hcon = NULL;
4398 /* Placeholder - release the logical link */
4401 static void l2cap_logical_fail(struct l2cap_chan *chan)
4403 /* Logical link setup failed */
4404 if (chan->state != BT_CONNECTED) {
4405 /* Create channel failure, disconnect */
4406 l2cap_send_disconn_req(chan, ECONNRESET);
4407 return;
4410 switch (chan->move_role) {
4411 case L2CAP_MOVE_ROLE_RESPONDER:
4412 l2cap_move_done(chan);
4413 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4414 break;
4415 case L2CAP_MOVE_ROLE_INITIATOR:
4416 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4417 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4418 /* Remote has only sent pending or
4419 * success responses, clean up
4421 l2cap_move_done(chan);
4424 /* Other amp move states imply that the move
4425 * has already aborted
4427 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4428 break;
4432 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4433 struct hci_chan *hchan)
4435 struct l2cap_conf_rsp rsp;
4437 chan->hs_hchan = hchan;
4438 chan->hs_hcon->l2cap_data = chan->conn;
4440 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4442 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4443 int err;
4445 set_default_fcs(chan);
4447 err = l2cap_ertm_init(chan);
4448 if (err < 0)
4449 l2cap_send_disconn_req(chan, -err);
4450 else
4451 l2cap_chan_ready(chan);
4455 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4456 struct hci_chan *hchan)
4458 chan->hs_hcon = hchan->conn;
4459 chan->hs_hcon->l2cap_data = chan->conn;
4461 BT_DBG("move_state %d", chan->move_state);
4463 switch (chan->move_state) {
4464 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4465 /* Move confirm will be sent after a success
4466 * response is received
4468 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4469 break;
4470 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4471 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4472 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4473 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4474 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4475 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4476 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4477 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4478 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4480 break;
4481 default:
4482 /* Move was not in expected state, free the channel */
4483 __release_logical_link(chan);
4485 chan->move_state = L2CAP_MOVE_STABLE;
4489 /* Call with chan locked */
4490 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4491 u8 status)
4493 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4495 if (status) {
4496 l2cap_logical_fail(chan);
4497 __release_logical_link(chan);
4498 return;
4501 if (chan->state != BT_CONNECTED) {
4502 /* Ignore logical link if channel is on BR/EDR */
4503 if (chan->local_amp_id)
4504 l2cap_logical_finish_create(chan, hchan);
4505 } else {
4506 l2cap_logical_finish_move(chan, hchan);
4510 void l2cap_move_start(struct l2cap_chan *chan)
4512 BT_DBG("chan %p", chan);
4514 if (chan->local_amp_id == HCI_BREDR_ID) {
4515 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4516 return;
4517 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4518 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4519 /* Placeholder - start physical link setup */
4520 } else {
4521 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4522 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4523 chan->move_id = 0;
4524 l2cap_move_setup(chan);
4525 l2cap_send_move_chan_req(chan, 0);
4529 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4530 u8 local_amp_id, u8 remote_amp_id)
4532 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4533 local_amp_id, remote_amp_id);
4535 chan->fcs = L2CAP_FCS_NONE;
4537 /* Outgoing channel on AMP */
4538 if (chan->state == BT_CONNECT) {
4539 if (result == L2CAP_CR_SUCCESS) {
4540 chan->local_amp_id = local_amp_id;
4541 l2cap_send_create_chan_req(chan, remote_amp_id);
4542 } else {
4543 /* Revert to BR/EDR connect */
4544 l2cap_send_conn_req(chan);
4547 return;
4550 /* Incoming channel on AMP */
4551 if (__l2cap_no_conn_pending(chan)) {
4552 struct l2cap_conn_rsp rsp;
4553 char buf[128];
4554 rsp.scid = cpu_to_le16(chan->dcid);
4555 rsp.dcid = cpu_to_le16(chan->scid);
4557 if (result == L2CAP_CR_SUCCESS) {
4558 /* Send successful response */
4559 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4560 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4561 } else {
4562 /* Send negative response */
4563 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4564 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4567 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4568 sizeof(rsp), &rsp);
4570 if (result == L2CAP_CR_SUCCESS) {
4571 __l2cap_state_change(chan, BT_CONFIG);
4572 set_bit(CONF_REQ_SENT, &chan->conf_state);
4573 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4574 L2CAP_CONF_REQ,
4575 l2cap_build_conf_req(chan, buf), buf);
4576 chan->num_conf_req++;
4581 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4582 u8 remote_amp_id)
4584 l2cap_move_setup(chan);
4585 chan->move_id = local_amp_id;
4586 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4588 l2cap_send_move_chan_req(chan, remote_amp_id);
4591 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4593 struct hci_chan *hchan = NULL;
4595 /* Placeholder - get hci_chan for logical link */
4597 if (hchan) {
4598 if (hchan->state == BT_CONNECTED) {
4599 /* Logical link is ready to go */
4600 chan->hs_hcon = hchan->conn;
4601 chan->hs_hcon->l2cap_data = chan->conn;
4602 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4603 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4605 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4606 } else {
4607 /* Wait for logical link to be ready */
4608 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4610 } else {
4611 /* Logical link not available */
4612 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4616 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4618 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4619 u8 rsp_result;
4620 if (result == -EINVAL)
4621 rsp_result = L2CAP_MR_BAD_ID;
4622 else
4623 rsp_result = L2CAP_MR_NOT_ALLOWED;
4625 l2cap_send_move_chan_rsp(chan, rsp_result);
4628 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4629 chan->move_state = L2CAP_MOVE_STABLE;
4631 /* Restart data transmission */
4632 l2cap_ertm_send(chan);
4635 /* Invoke with locked chan */
4636 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4638 u8 local_amp_id = chan->local_amp_id;
4639 u8 remote_amp_id = chan->remote_amp_id;
4641 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4642 chan, result, local_amp_id, remote_amp_id);
4644 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4645 l2cap_chan_unlock(chan);
4646 return;
4649 if (chan->state != BT_CONNECTED) {
4650 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4651 } else if (result != L2CAP_MR_SUCCESS) {
4652 l2cap_do_move_cancel(chan, result);
4653 } else {
4654 switch (chan->move_role) {
4655 case L2CAP_MOVE_ROLE_INITIATOR:
4656 l2cap_do_move_initiate(chan, local_amp_id,
4657 remote_amp_id);
4658 break;
4659 case L2CAP_MOVE_ROLE_RESPONDER:
4660 l2cap_do_move_respond(chan, result);
4661 break;
4662 default:
4663 l2cap_do_move_cancel(chan, result);
4664 break;
4669 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4670 struct l2cap_cmd_hdr *cmd,
4671 u16 cmd_len, void *data)
4673 struct l2cap_move_chan_req *req = data;
4674 struct l2cap_move_chan_rsp rsp;
4675 struct l2cap_chan *chan;
4676 u16 icid = 0;
4677 u16 result = L2CAP_MR_NOT_ALLOWED;
4679 if (cmd_len != sizeof(*req))
4680 return -EPROTO;
4682 icid = le16_to_cpu(req->icid);
4684 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4686 if (!enable_hs)
4687 return -EINVAL;
4689 chan = l2cap_get_chan_by_dcid(conn, icid);
4690 if (!chan) {
4691 rsp.icid = cpu_to_le16(icid);
4692 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4693 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4694 sizeof(rsp), &rsp);
4695 return 0;
4698 chan->ident = cmd->ident;
4700 if (chan->scid < L2CAP_CID_DYN_START ||
4701 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4702 (chan->mode != L2CAP_MODE_ERTM &&
4703 chan->mode != L2CAP_MODE_STREAMING)) {
4704 result = L2CAP_MR_NOT_ALLOWED;
4705 goto send_move_response;
4708 if (chan->local_amp_id == req->dest_amp_id) {
4709 result = L2CAP_MR_SAME_ID;
4710 goto send_move_response;
4713 if (req->dest_amp_id) {
4714 struct hci_dev *hdev;
4715 hdev = hci_dev_get(req->dest_amp_id);
4716 if (!hdev || hdev->dev_type != HCI_AMP ||
4717 !test_bit(HCI_UP, &hdev->flags)) {
4718 if (hdev)
4719 hci_dev_put(hdev);
4721 result = L2CAP_MR_BAD_ID;
4722 goto send_move_response;
4724 hci_dev_put(hdev);
4727 /* Detect a move collision. Only send a collision response
4728 * if this side has "lost", otherwise proceed with the move.
4729 * The winner has the larger bd_addr.
4731 if ((__chan_is_moving(chan) ||
4732 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4733 bacmp(conn->src, conn->dst) > 0) {
4734 result = L2CAP_MR_COLLISION;
4735 goto send_move_response;
4738 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4739 l2cap_move_setup(chan);
4740 chan->move_id = req->dest_amp_id;
4741 icid = chan->dcid;
4743 if (!req->dest_amp_id) {
4744 /* Moving to BR/EDR */
4745 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4746 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4747 result = L2CAP_MR_PEND;
4748 } else {
4749 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4750 result = L2CAP_MR_SUCCESS;
4752 } else {
4753 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4754 /* Placeholder - uncomment when amp functions are available */
4755 /*amp_accept_physical(chan, req->dest_amp_id);*/
4756 result = L2CAP_MR_PEND;
4759 send_move_response:
4760 l2cap_send_move_chan_rsp(chan, result);
4762 l2cap_chan_unlock(chan);
4764 return 0;
4767 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4769 struct l2cap_chan *chan;
4770 struct hci_chan *hchan = NULL;
4772 chan = l2cap_get_chan_by_scid(conn, icid);
4773 if (!chan) {
4774 l2cap_send_move_chan_cfm_icid(conn, icid);
4775 return;
4778 __clear_chan_timer(chan);
4779 if (result == L2CAP_MR_PEND)
4780 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4782 switch (chan->move_state) {
4783 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4784 /* Move confirm will be sent when logical link
4785 * is complete.
4787 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4788 break;
4789 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4790 if (result == L2CAP_MR_PEND) {
4791 break;
4792 } else if (test_bit(CONN_LOCAL_BUSY,
4793 &chan->conn_state)) {
4794 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4795 } else {
4796 /* Logical link is up or moving to BR/EDR,
4797 * proceed with move
4799 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4800 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4802 break;
4803 case L2CAP_MOVE_WAIT_RSP:
4804 /* Moving to AMP */
4805 if (result == L2CAP_MR_SUCCESS) {
4806 /* Remote is ready, send confirm immediately
4807 * after logical link is ready
4809 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4810 } else {
4811 /* Both logical link and move success
4812 * are required to confirm
4814 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4817 /* Placeholder - get hci_chan for logical link */
4818 if (!hchan) {
4819 /* Logical link not available */
4820 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4821 break;
4824 /* If the logical link is not yet connected, do not
4825 * send confirmation.
4827 if (hchan->state != BT_CONNECTED)
4828 break;
4830 /* Logical link is already ready to go */
4832 chan->hs_hcon = hchan->conn;
4833 chan->hs_hcon->l2cap_data = chan->conn;
4835 if (result == L2CAP_MR_SUCCESS) {
4836 /* Can confirm now */
4837 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4838 } else {
4839 /* Now only need move success
4840 * to confirm
4842 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4845 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4846 break;
4847 default:
4848 /* Any other amp move state means the move failed. */
4849 chan->move_id = chan->local_amp_id;
4850 l2cap_move_done(chan);
4851 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4854 l2cap_chan_unlock(chan);
4857 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
4858 u16 result)
4860 struct l2cap_chan *chan;
4862 chan = l2cap_get_chan_by_ident(conn, ident);
4863 if (!chan) {
4864 /* Could not locate channel, icid is best guess */
4865 l2cap_send_move_chan_cfm_icid(conn, icid);
4866 return;
4869 __clear_chan_timer(chan);
4871 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4872 if (result == L2CAP_MR_COLLISION) {
4873 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4874 } else {
4875 /* Cleanup - cancel move */
4876 chan->move_id = chan->local_amp_id;
4877 l2cap_move_done(chan);
4881 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4883 l2cap_chan_unlock(chan);
4886 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4887 struct l2cap_cmd_hdr *cmd,
4888 u16 cmd_len, void *data)
4890 struct l2cap_move_chan_rsp *rsp = data;
4891 u16 icid, result;
4893 if (cmd_len != sizeof(*rsp))
4894 return -EPROTO;
4896 icid = le16_to_cpu(rsp->icid);
4897 result = le16_to_cpu(rsp->result);
4899 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4901 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
4902 l2cap_move_continue(conn, icid, result);
4903 else
4904 l2cap_move_fail(conn, cmd->ident, icid, result);
4906 return 0;
4909 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4910 struct l2cap_cmd_hdr *cmd,
4911 u16 cmd_len, void *data)
4913 struct l2cap_move_chan_cfm *cfm = data;
4914 struct l2cap_chan *chan;
4915 u16 icid, result;
4917 if (cmd_len != sizeof(*cfm))
4918 return -EPROTO;
4920 icid = le16_to_cpu(cfm->icid);
4921 result = le16_to_cpu(cfm->result);
4923 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4925 chan = l2cap_get_chan_by_dcid(conn, icid);
4926 if (!chan) {
4927 /* Spec requires a response even if the icid was not found */
4928 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4929 return 0;
4932 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
4933 if (result == L2CAP_MC_CONFIRMED) {
4934 chan->local_amp_id = chan->move_id;
4935 if (!chan->local_amp_id)
4936 __release_logical_link(chan);
4937 } else {
4938 chan->move_id = chan->local_amp_id;
4941 l2cap_move_done(chan);
4944 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4946 l2cap_chan_unlock(chan);
4948 return 0;
4951 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4952 struct l2cap_cmd_hdr *cmd,
4953 u16 cmd_len, void *data)
4955 struct l2cap_move_chan_cfm_rsp *rsp = data;
4956 struct l2cap_chan *chan;
4957 u16 icid;
4959 if (cmd_len != sizeof(*rsp))
4960 return -EPROTO;
4962 icid = le16_to_cpu(rsp->icid);
4964 BT_DBG("icid 0x%4.4x", icid);
4966 chan = l2cap_get_chan_by_scid(conn, icid);
4967 if (!chan)
4968 return 0;
4970 __clear_chan_timer(chan);
4972 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
4973 chan->local_amp_id = chan->move_id;
4975 if (!chan->local_amp_id && chan->hs_hchan)
4976 __release_logical_link(chan);
4978 l2cap_move_done(chan);
4981 l2cap_chan_unlock(chan);
4983 return 0;
4986 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4987 u16 to_multiplier)
4989 u16 max_latency;
4991 if (min > max || min < 6 || max > 3200)
4992 return -EINVAL;
4994 if (to_multiplier < 10 || to_multiplier > 3200)
4995 return -EINVAL;
4997 if (max >= to_multiplier * 8)
4998 return -EINVAL;
5000 max_latency = (to_multiplier * 8 / max) - 1;
5001 if (latency > 499 || latency > max_latency)
5002 return -EINVAL;
5004 return 0;
5007 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5008 struct l2cap_cmd_hdr *cmd,
5009 u8 *data)
5011 struct hci_conn *hcon = conn->hcon;
5012 struct l2cap_conn_param_update_req *req;
5013 struct l2cap_conn_param_update_rsp rsp;
5014 u16 min, max, latency, to_multiplier, cmd_len;
5015 int err;
5017 if (!(hcon->link_mode & HCI_LM_MASTER))
5018 return -EINVAL;
5020 cmd_len = __le16_to_cpu(cmd->len);
5021 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5022 return -EPROTO;
5024 req = (struct l2cap_conn_param_update_req *) data;
5025 min = __le16_to_cpu(req->min);
5026 max = __le16_to_cpu(req->max);
5027 latency = __le16_to_cpu(req->latency);
5028 to_multiplier = __le16_to_cpu(req->to_multiplier);
5030 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5031 min, max, latency, to_multiplier);
5033 memset(&rsp, 0, sizeof(rsp));
5035 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5036 if (err)
5037 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5038 else
5039 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5041 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5042 sizeof(rsp), &rsp);
5044 if (!err)
5045 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5047 return 0;
5050 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5051 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5052 u8 *data)
5054 int err = 0;
5056 switch (cmd->code) {
5057 case L2CAP_COMMAND_REJ:
5058 l2cap_command_rej(conn, cmd, data);
5059 break;
5061 case L2CAP_CONN_REQ:
5062 err = l2cap_connect_req(conn, cmd, data);
5063 break;
5065 case L2CAP_CONN_RSP:
5066 case L2CAP_CREATE_CHAN_RSP:
5067 err = l2cap_connect_create_rsp(conn, cmd, data);
5068 break;
5070 case L2CAP_CONF_REQ:
5071 err = l2cap_config_req(conn, cmd, cmd_len, data);
5072 break;
5074 case L2CAP_CONF_RSP:
5075 err = l2cap_config_rsp(conn, cmd, data);
5076 break;
5078 case L2CAP_DISCONN_REQ:
5079 err = l2cap_disconnect_req(conn, cmd, data);
5080 break;
5082 case L2CAP_DISCONN_RSP:
5083 err = l2cap_disconnect_rsp(conn, cmd, data);
5084 break;
5086 case L2CAP_ECHO_REQ:
5087 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5088 break;
5090 case L2CAP_ECHO_RSP:
5091 break;
5093 case L2CAP_INFO_REQ:
5094 err = l2cap_information_req(conn, cmd, data);
5095 break;
5097 case L2CAP_INFO_RSP:
5098 err = l2cap_information_rsp(conn, cmd, data);
5099 break;
5101 case L2CAP_CREATE_CHAN_REQ:
5102 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5103 break;
5105 case L2CAP_MOVE_CHAN_REQ:
5106 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5107 break;
5109 case L2CAP_MOVE_CHAN_RSP:
5110 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5111 break;
5113 case L2CAP_MOVE_CHAN_CFM:
5114 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5115 break;
5117 case L2CAP_MOVE_CHAN_CFM_RSP:
5118 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5119 break;
5121 default:
5122 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5123 err = -EINVAL;
5124 break;
5127 return err;
5130 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5131 struct l2cap_cmd_hdr *cmd, u8 *data)
5133 switch (cmd->code) {
5134 case L2CAP_COMMAND_REJ:
5135 return 0;
5137 case L2CAP_CONN_PARAM_UPDATE_REQ:
5138 return l2cap_conn_param_update_req(conn, cmd, data);
5140 case L2CAP_CONN_PARAM_UPDATE_RSP:
5141 return 0;
5143 default:
5144 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5145 return -EINVAL;
5149 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5150 struct sk_buff *skb)
5152 u8 *data = skb->data;
5153 int len = skb->len;
5154 struct l2cap_cmd_hdr cmd;
5155 int err;
5157 l2cap_raw_recv(conn, skb);
5159 while (len >= L2CAP_CMD_HDR_SIZE) {
5160 u16 cmd_len;
5161 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5162 data += L2CAP_CMD_HDR_SIZE;
5163 len -= L2CAP_CMD_HDR_SIZE;
5165 cmd_len = le16_to_cpu(cmd.len);
5167 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5168 cmd.ident);
5170 if (cmd_len > len || !cmd.ident) {
5171 BT_DBG("corrupted command");
5172 break;
5175 if (conn->hcon->type == LE_LINK)
5176 err = l2cap_le_sig_cmd(conn, &cmd, data);
5177 else
5178 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5180 if (err) {
5181 struct l2cap_cmd_rej_unk rej;
5183 BT_ERR("Wrong link type (%d)", err);
5185 /* FIXME: Map err to a valid reason */
5186 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5187 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5188 sizeof(rej), &rej);
5191 data += cmd_len;
5192 len -= cmd_len;
5195 kfree_skb(skb);
5198 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5200 u16 our_fcs, rcv_fcs;
5201 int hdr_size;
5203 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5204 hdr_size = L2CAP_EXT_HDR_SIZE;
5205 else
5206 hdr_size = L2CAP_ENH_HDR_SIZE;
5208 if (chan->fcs == L2CAP_FCS_CRC16) {
5209 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5210 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5211 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5213 if (our_fcs != rcv_fcs)
5214 return -EBADMSG;
5216 return 0;
5219 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5221 struct l2cap_ctrl control;
5223 BT_DBG("chan %p", chan);
5225 memset(&control, 0, sizeof(control));
5226 control.sframe = 1;
5227 control.final = 1;
5228 control.reqseq = chan->buffer_seq;
5229 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5231 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5232 control.super = L2CAP_SUPER_RNR;
5233 l2cap_send_sframe(chan, &control);
5236 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5237 chan->unacked_frames > 0)
5238 __set_retrans_timer(chan);
5240 /* Send pending iframes */
5241 l2cap_ertm_send(chan);
5243 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5244 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5245 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5246 * send it now.
5248 control.super = L2CAP_SUPER_RR;
5249 l2cap_send_sframe(chan, &control);
5253 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5254 struct sk_buff **last_frag)
5256 /* skb->len reflects data in skb as well as all fragments
5257 * skb->data_len reflects only data in fragments
5259 if (!skb_has_frag_list(skb))
5260 skb_shinfo(skb)->frag_list = new_frag;
5262 new_frag->next = NULL;
5264 (*last_frag)->next = new_frag;
5265 *last_frag = new_frag;
5267 skb->len += new_frag->len;
5268 skb->data_len += new_frag->len;
5269 skb->truesize += new_frag->truesize;
5272 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5273 struct l2cap_ctrl *control)
5275 int err = -EINVAL;
5277 switch (control->sar) {
5278 case L2CAP_SAR_UNSEGMENTED:
5279 if (chan->sdu)
5280 break;
5282 err = chan->ops->recv(chan, skb);
5283 break;
5285 case L2CAP_SAR_START:
5286 if (chan->sdu)
5287 break;
5289 chan->sdu_len = get_unaligned_le16(skb->data);
5290 skb_pull(skb, L2CAP_SDULEN_SIZE);
5292 if (chan->sdu_len > chan->imtu) {
5293 err = -EMSGSIZE;
5294 break;
5297 if (skb->len >= chan->sdu_len)
5298 break;
5300 chan->sdu = skb;
5301 chan->sdu_last_frag = skb;
5303 skb = NULL;
5304 err = 0;
5305 break;
5307 case L2CAP_SAR_CONTINUE:
5308 if (!chan->sdu)
5309 break;
5311 append_skb_frag(chan->sdu, skb,
5312 &chan->sdu_last_frag);
5313 skb = NULL;
5315 if (chan->sdu->len >= chan->sdu_len)
5316 break;
5318 err = 0;
5319 break;
5321 case L2CAP_SAR_END:
5322 if (!chan->sdu)
5323 break;
5325 append_skb_frag(chan->sdu, skb,
5326 &chan->sdu_last_frag);
5327 skb = NULL;
5329 if (chan->sdu->len != chan->sdu_len)
5330 break;
5332 err = chan->ops->recv(chan, chan->sdu);
5334 if (!err) {
5335 /* Reassembly complete */
5336 chan->sdu = NULL;
5337 chan->sdu_last_frag = NULL;
5338 chan->sdu_len = 0;
5340 break;
5343 if (err) {
5344 kfree_skb(skb);
5345 kfree_skb(chan->sdu);
5346 chan->sdu = NULL;
5347 chan->sdu_last_frag = NULL;
5348 chan->sdu_len = 0;
5351 return err;
5354 static int l2cap_resegment(struct l2cap_chan *chan)
5356 /* Placeholder */
5357 return 0;
5360 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5362 u8 event;
5364 if (chan->mode != L2CAP_MODE_ERTM)
5365 return;
5367 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5368 l2cap_tx(chan, NULL, NULL, event);
5371 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5373 int err = 0;
5374 /* Pass sequential frames to l2cap_reassemble_sdu()
5375 * until a gap is encountered.
5378 BT_DBG("chan %p", chan);
5380 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5381 struct sk_buff *skb;
5382 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5383 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5385 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5387 if (!skb)
5388 break;
5390 skb_unlink(skb, &chan->srej_q);
5391 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5392 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5393 if (err)
5394 break;
5397 if (skb_queue_empty(&chan->srej_q)) {
5398 chan->rx_state = L2CAP_RX_STATE_RECV;
5399 l2cap_send_ack(chan);
5402 return err;
5405 static void l2cap_handle_srej(struct l2cap_chan *chan,
5406 struct l2cap_ctrl *control)
5408 struct sk_buff *skb;
5410 BT_DBG("chan %p, control %p", chan, control);
5412 if (control->reqseq == chan->next_tx_seq) {
5413 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5414 l2cap_send_disconn_req(chan, ECONNRESET);
5415 return;
5418 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5420 if (skb == NULL) {
5421 BT_DBG("Seq %d not available for retransmission",
5422 control->reqseq);
5423 return;
5426 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5427 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5428 l2cap_send_disconn_req(chan, ECONNRESET);
5429 return;
5432 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5434 if (control->poll) {
5435 l2cap_pass_to_tx(chan, control);
5437 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5438 l2cap_retransmit(chan, control);
5439 l2cap_ertm_send(chan);
5441 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5442 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5443 chan->srej_save_reqseq = control->reqseq;
5445 } else {
5446 l2cap_pass_to_tx_fbit(chan, control);
5448 if (control->final) {
5449 if (chan->srej_save_reqseq != control->reqseq ||
5450 !test_and_clear_bit(CONN_SREJ_ACT,
5451 &chan->conn_state))
5452 l2cap_retransmit(chan, control);
5453 } else {
5454 l2cap_retransmit(chan, control);
5455 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5456 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5457 chan->srej_save_reqseq = control->reqseq;
5463 static void l2cap_handle_rej(struct l2cap_chan *chan,
5464 struct l2cap_ctrl *control)
5466 struct sk_buff *skb;
5468 BT_DBG("chan %p, control %p", chan, control);
5470 if (control->reqseq == chan->next_tx_seq) {
5471 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5472 l2cap_send_disconn_req(chan, ECONNRESET);
5473 return;
5476 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5478 if (chan->max_tx && skb &&
5479 bt_cb(skb)->control.retries >= chan->max_tx) {
5480 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5481 l2cap_send_disconn_req(chan, ECONNRESET);
5482 return;
5485 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5487 l2cap_pass_to_tx(chan, control);
5489 if (control->final) {
5490 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5491 l2cap_retransmit_all(chan, control);
5492 } else {
5493 l2cap_retransmit_all(chan, control);
5494 l2cap_ertm_send(chan);
5495 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5496 set_bit(CONN_REJ_ACT, &chan->conn_state);
5500 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5502 BT_DBG("chan %p, txseq %d", chan, txseq);
5504 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5505 chan->expected_tx_seq);
5507 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5508 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5509 chan->tx_win) {
5510 /* See notes below regarding "double poll" and
5511 * invalid packets.
5513 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5514 BT_DBG("Invalid/Ignore - after SREJ");
5515 return L2CAP_TXSEQ_INVALID_IGNORE;
5516 } else {
5517 BT_DBG("Invalid - in window after SREJ sent");
5518 return L2CAP_TXSEQ_INVALID;
5522 if (chan->srej_list.head == txseq) {
5523 BT_DBG("Expected SREJ");
5524 return L2CAP_TXSEQ_EXPECTED_SREJ;
5527 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5528 BT_DBG("Duplicate SREJ - txseq already stored");
5529 return L2CAP_TXSEQ_DUPLICATE_SREJ;
5532 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5533 BT_DBG("Unexpected SREJ - not requested");
5534 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5538 if (chan->expected_tx_seq == txseq) {
5539 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5540 chan->tx_win) {
5541 BT_DBG("Invalid - txseq outside tx window");
5542 return L2CAP_TXSEQ_INVALID;
5543 } else {
5544 BT_DBG("Expected");
5545 return L2CAP_TXSEQ_EXPECTED;
5549 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5550 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5551 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5552 return L2CAP_TXSEQ_DUPLICATE;
5555 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5556 /* A source of invalid packets is a "double poll" condition,
5557 * where delays cause us to send multiple poll packets. If
5558 * the remote stack receives and processes both polls,
5559 * sequence numbers can wrap around in such a way that a
5560 * resent frame has a sequence number that looks like new data
5561 * with a sequence gap. This would trigger an erroneous SREJ
5562 * request.
5564 * Fortunately, this is impossible with a tx window that's
5565 * less than half of the maximum sequence number, which allows
5566 * invalid frames to be safely ignored.
5568 * With tx window sizes greater than half of the tx window
5569 * maximum, the frame is invalid and cannot be ignored. This
5570 * causes a disconnect.
5573 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5574 BT_DBG("Invalid/Ignore - txseq outside tx window");
5575 return L2CAP_TXSEQ_INVALID_IGNORE;
5576 } else {
5577 BT_DBG("Invalid - txseq outside tx window");
5578 return L2CAP_TXSEQ_INVALID;
5580 } else {
5581 BT_DBG("Unexpected - txseq indicates missing frames");
5582 return L2CAP_TXSEQ_UNEXPECTED;
5586 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5587 struct l2cap_ctrl *control,
5588 struct sk_buff *skb, u8 event)
5590 int err = 0;
5591 bool skb_in_use = 0;
5593 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5594 event);
5596 switch (event) {
5597 case L2CAP_EV_RECV_IFRAME:
5598 switch (l2cap_classify_txseq(chan, control->txseq)) {
5599 case L2CAP_TXSEQ_EXPECTED:
5600 l2cap_pass_to_tx(chan, control);
5602 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5603 BT_DBG("Busy, discarding expected seq %d",
5604 control->txseq);
5605 break;
5608 chan->expected_tx_seq = __next_seq(chan,
5609 control->txseq);
5611 chan->buffer_seq = chan->expected_tx_seq;
5612 skb_in_use = 1;
5614 err = l2cap_reassemble_sdu(chan, skb, control);
5615 if (err)
5616 break;
5618 if (control->final) {
5619 if (!test_and_clear_bit(CONN_REJ_ACT,
5620 &chan->conn_state)) {
5621 control->final = 0;
5622 l2cap_retransmit_all(chan, control);
5623 l2cap_ertm_send(chan);
5627 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5628 l2cap_send_ack(chan);
5629 break;
5630 case L2CAP_TXSEQ_UNEXPECTED:
5631 l2cap_pass_to_tx(chan, control);
5633 /* Can't issue SREJ frames in the local busy state.
5634 * Drop this frame, it will be seen as missing
5635 * when local busy is exited.
5637 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5638 BT_DBG("Busy, discarding unexpected seq %d",
5639 control->txseq);
5640 break;
5643 /* There was a gap in the sequence, so an SREJ
5644 * must be sent for each missing frame. The
5645 * current frame is stored for later use.
5647 skb_queue_tail(&chan->srej_q, skb);
5648 skb_in_use = 1;
5649 BT_DBG("Queued %p (queue len %d)", skb,
5650 skb_queue_len(&chan->srej_q));
5652 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
5653 l2cap_seq_list_clear(&chan->srej_list);
5654 l2cap_send_srej(chan, control->txseq);
5656 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
5657 break;
5658 case L2CAP_TXSEQ_DUPLICATE:
5659 l2cap_pass_to_tx(chan, control);
5660 break;
5661 case L2CAP_TXSEQ_INVALID_IGNORE:
5662 break;
5663 case L2CAP_TXSEQ_INVALID:
5664 default:
5665 l2cap_send_disconn_req(chan, ECONNRESET);
5666 break;
5668 break;
5669 case L2CAP_EV_RECV_RR:
5670 l2cap_pass_to_tx(chan, control);
5671 if (control->final) {
5672 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5674 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
5675 !__chan_is_moving(chan)) {
5676 control->final = 0;
5677 l2cap_retransmit_all(chan, control);
5680 l2cap_ertm_send(chan);
5681 } else if (control->poll) {
5682 l2cap_send_i_or_rr_or_rnr(chan);
5683 } else {
5684 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5685 &chan->conn_state) &&
5686 chan->unacked_frames)
5687 __set_retrans_timer(chan);
5689 l2cap_ertm_send(chan);
5691 break;
5692 case L2CAP_EV_RECV_RNR:
5693 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5694 l2cap_pass_to_tx(chan, control);
5695 if (control && control->poll) {
5696 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5697 l2cap_send_rr_or_rnr(chan, 0);
5699 __clear_retrans_timer(chan);
5700 l2cap_seq_list_clear(&chan->retrans_list);
5701 break;
5702 case L2CAP_EV_RECV_REJ:
5703 l2cap_handle_rej(chan, control);
5704 break;
5705 case L2CAP_EV_RECV_SREJ:
5706 l2cap_handle_srej(chan, control);
5707 break;
5708 default:
5709 break;
5712 if (skb && !skb_in_use) {
5713 BT_DBG("Freeing %p", skb);
5714 kfree_skb(skb);
5717 return err;
5720 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
5721 struct l2cap_ctrl *control,
5722 struct sk_buff *skb, u8 event)
5724 int err = 0;
5725 u16 txseq = control->txseq;
5726 bool skb_in_use = 0;
5728 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5729 event);
5731 switch (event) {
5732 case L2CAP_EV_RECV_IFRAME:
5733 switch (l2cap_classify_txseq(chan, txseq)) {
5734 case L2CAP_TXSEQ_EXPECTED:
5735 /* Keep frame for reassembly later */
5736 l2cap_pass_to_tx(chan, control);
5737 skb_queue_tail(&chan->srej_q, skb);
5738 skb_in_use = 1;
5739 BT_DBG("Queued %p (queue len %d)", skb,
5740 skb_queue_len(&chan->srej_q));
5742 chan->expected_tx_seq = __next_seq(chan, txseq);
5743 break;
5744 case L2CAP_TXSEQ_EXPECTED_SREJ:
5745 l2cap_seq_list_pop(&chan->srej_list);
5747 l2cap_pass_to_tx(chan, control);
5748 skb_queue_tail(&chan->srej_q, skb);
5749 skb_in_use = 1;
5750 BT_DBG("Queued %p (queue len %d)", skb,
5751 skb_queue_len(&chan->srej_q));
5753 err = l2cap_rx_queued_iframes(chan);
5754 if (err)
5755 break;
5757 break;
5758 case L2CAP_TXSEQ_UNEXPECTED:
5759 /* Got a frame that can't be reassembled yet.
5760 * Save it for later, and send SREJs to cover
5761 * the missing frames.
5763 skb_queue_tail(&chan->srej_q, skb);
5764 skb_in_use = 1;
5765 BT_DBG("Queued %p (queue len %d)", skb,
5766 skb_queue_len(&chan->srej_q));
5768 l2cap_pass_to_tx(chan, control);
5769 l2cap_send_srej(chan, control->txseq);
5770 break;
5771 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
5772 /* This frame was requested with an SREJ, but
5773 * some expected retransmitted frames are
5774 * missing. Request retransmission of missing
5775 * SREJ'd frames.
5777 skb_queue_tail(&chan->srej_q, skb);
5778 skb_in_use = 1;
5779 BT_DBG("Queued %p (queue len %d)", skb,
5780 skb_queue_len(&chan->srej_q));
5782 l2cap_pass_to_tx(chan, control);
5783 l2cap_send_srej_list(chan, control->txseq);
5784 break;
5785 case L2CAP_TXSEQ_DUPLICATE_SREJ:
5786 /* We've already queued this frame. Drop this copy. */
5787 l2cap_pass_to_tx(chan, control);
5788 break;
5789 case L2CAP_TXSEQ_DUPLICATE:
5790 /* Expecting a later sequence number, so this frame
5791 * was already received. Ignore it completely.
5793 break;
5794 case L2CAP_TXSEQ_INVALID_IGNORE:
5795 break;
5796 case L2CAP_TXSEQ_INVALID:
5797 default:
5798 l2cap_send_disconn_req(chan, ECONNRESET);
5799 break;
5801 break;
5802 case L2CAP_EV_RECV_RR:
5803 l2cap_pass_to_tx(chan, control);
5804 if (control->final) {
5805 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5807 if (!test_and_clear_bit(CONN_REJ_ACT,
5808 &chan->conn_state)) {
5809 control->final = 0;
5810 l2cap_retransmit_all(chan, control);
5813 l2cap_ertm_send(chan);
5814 } else if (control->poll) {
5815 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5816 &chan->conn_state) &&
5817 chan->unacked_frames) {
5818 __set_retrans_timer(chan);
5821 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5822 l2cap_send_srej_tail(chan);
5823 } else {
5824 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5825 &chan->conn_state) &&
5826 chan->unacked_frames)
5827 __set_retrans_timer(chan);
5829 l2cap_send_ack(chan);
5831 break;
5832 case L2CAP_EV_RECV_RNR:
5833 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5834 l2cap_pass_to_tx(chan, control);
5835 if (control->poll) {
5836 l2cap_send_srej_tail(chan);
5837 } else {
5838 struct l2cap_ctrl rr_control;
5839 memset(&rr_control, 0, sizeof(rr_control));
5840 rr_control.sframe = 1;
5841 rr_control.super = L2CAP_SUPER_RR;
5842 rr_control.reqseq = chan->buffer_seq;
5843 l2cap_send_sframe(chan, &rr_control);
5846 break;
5847 case L2CAP_EV_RECV_REJ:
5848 l2cap_handle_rej(chan, control);
5849 break;
5850 case L2CAP_EV_RECV_SREJ:
5851 l2cap_handle_srej(chan, control);
5852 break;
5855 if (skb && !skb_in_use) {
5856 BT_DBG("Freeing %p", skb);
5857 kfree_skb(skb);
5860 return err;
5863 static int l2cap_finish_move(struct l2cap_chan *chan)
5865 BT_DBG("chan %p", chan);
5867 chan->rx_state = L2CAP_RX_STATE_RECV;
5869 if (chan->hs_hcon)
5870 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
5871 else
5872 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
5874 return l2cap_resegment(chan);
5877 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
5878 struct l2cap_ctrl *control,
5879 struct sk_buff *skb, u8 event)
5881 int err;
5883 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5884 event);
5886 if (!control->poll)
5887 return -EPROTO;
5889 l2cap_process_reqseq(chan, control->reqseq);
5891 if (!skb_queue_empty(&chan->tx_q))
5892 chan->tx_send_head = skb_peek(&chan->tx_q);
5893 else
5894 chan->tx_send_head = NULL;
5896 /* Rewind next_tx_seq to the point expected
5897 * by the receiver.
5899 chan->next_tx_seq = control->reqseq;
5900 chan->unacked_frames = 0;
5902 err = l2cap_finish_move(chan);
5903 if (err)
5904 return err;
5906 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5907 l2cap_send_i_or_rr_or_rnr(chan);
5909 if (event == L2CAP_EV_RECV_IFRAME)
5910 return -EPROTO;
5912 return l2cap_rx_state_recv(chan, control, NULL, event);
5915 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
5916 struct l2cap_ctrl *control,
5917 struct sk_buff *skb, u8 event)
5919 int err;
5921 if (!control->final)
5922 return -EPROTO;
5924 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5926 chan->rx_state = L2CAP_RX_STATE_RECV;
5927 l2cap_process_reqseq(chan, control->reqseq);
5929 if (!skb_queue_empty(&chan->tx_q))
5930 chan->tx_send_head = skb_peek(&chan->tx_q);
5931 else
5932 chan->tx_send_head = NULL;
5934 /* Rewind next_tx_seq to the point expected
5935 * by the receiver.
5937 chan->next_tx_seq = control->reqseq;
5938 chan->unacked_frames = 0;
5940 if (chan->hs_hcon)
5941 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
5942 else
5943 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
5945 err = l2cap_resegment(chan);
5947 if (!err)
5948 err = l2cap_rx_state_recv(chan, control, skb, event);
5950 return err;
5953 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
5955 /* Make sure reqseq is for a packet that has been sent but not acked */
5956 u16 unacked;
5958 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
5959 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
5962 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5963 struct sk_buff *skb, u8 event)
5965 int err = 0;
5967 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
5968 control, skb, event, chan->rx_state);
5970 if (__valid_reqseq(chan, control->reqseq)) {
5971 switch (chan->rx_state) {
5972 case L2CAP_RX_STATE_RECV:
5973 err = l2cap_rx_state_recv(chan, control, skb, event);
5974 break;
5975 case L2CAP_RX_STATE_SREJ_SENT:
5976 err = l2cap_rx_state_srej_sent(chan, control, skb,
5977 event);
5978 break;
5979 case L2CAP_RX_STATE_WAIT_P:
5980 err = l2cap_rx_state_wait_p(chan, control, skb, event);
5981 break;
5982 case L2CAP_RX_STATE_WAIT_F:
5983 err = l2cap_rx_state_wait_f(chan, control, skb, event);
5984 break;
5985 default:
5986 /* shut it down */
5987 break;
5989 } else {
5990 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5991 control->reqseq, chan->next_tx_seq,
5992 chan->expected_ack_seq);
5993 l2cap_send_disconn_req(chan, ECONNRESET);
5996 return err;
5999 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6000 struct sk_buff *skb)
6002 int err = 0;
6004 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6005 chan->rx_state);
6007 if (l2cap_classify_txseq(chan, control->txseq) ==
6008 L2CAP_TXSEQ_EXPECTED) {
6009 l2cap_pass_to_tx(chan, control);
6011 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6012 __next_seq(chan, chan->buffer_seq));
6014 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6016 l2cap_reassemble_sdu(chan, skb, control);
6017 } else {
6018 if (chan->sdu) {
6019 kfree_skb(chan->sdu);
6020 chan->sdu = NULL;
6022 chan->sdu_last_frag = NULL;
6023 chan->sdu_len = 0;
6025 if (skb) {
6026 BT_DBG("Freeing %p", skb);
6027 kfree_skb(skb);
6031 chan->last_acked_seq = control->txseq;
6032 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6034 return err;
6037 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6039 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6040 u16 len;
6041 u8 event;
6043 __unpack_control(chan, skb);
6045 len = skb->len;
6048 * We can just drop the corrupted I-frame here.
6049 * Receiver will miss it and start proper recovery
6050 * procedures and ask for retransmission.
6052 if (l2cap_check_fcs(chan, skb))
6053 goto drop;
6055 if (!control->sframe && control->sar == L2CAP_SAR_START)
6056 len -= L2CAP_SDULEN_SIZE;
6058 if (chan->fcs == L2CAP_FCS_CRC16)
6059 len -= L2CAP_FCS_SIZE;
6061 if (len > chan->mps) {
6062 l2cap_send_disconn_req(chan, ECONNRESET);
6063 goto drop;
6066 if (!control->sframe) {
6067 int err;
6069 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6070 control->sar, control->reqseq, control->final,
6071 control->txseq);
6073 /* Validate F-bit - F=0 always valid, F=1 only
6074 * valid in TX WAIT_F
6076 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6077 goto drop;
6079 if (chan->mode != L2CAP_MODE_STREAMING) {
6080 event = L2CAP_EV_RECV_IFRAME;
6081 err = l2cap_rx(chan, control, skb, event);
6082 } else {
6083 err = l2cap_stream_rx(chan, control, skb);
6086 if (err)
6087 l2cap_send_disconn_req(chan, ECONNRESET);
6088 } else {
6089 const u8 rx_func_to_event[4] = {
6090 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6091 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6094 /* Only I-frames are expected in streaming mode */
6095 if (chan->mode == L2CAP_MODE_STREAMING)
6096 goto drop;
6098 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6099 control->reqseq, control->final, control->poll,
6100 control->super);
6102 if (len != 0) {
6103 BT_ERR("Trailing bytes: %d in sframe", len);
6104 l2cap_send_disconn_req(chan, ECONNRESET);
6105 goto drop;
6108 /* Validate F and P bits */
6109 if (control->final && (control->poll ||
6110 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6111 goto drop;
6113 event = rx_func_to_event[control->super];
6114 if (l2cap_rx(chan, control, skb, event))
6115 l2cap_send_disconn_req(chan, ECONNRESET);
6118 return 0;
6120 drop:
6121 kfree_skb(skb);
6122 return 0;
6125 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6126 struct sk_buff *skb)
6128 struct l2cap_chan *chan;
6130 chan = l2cap_get_chan_by_scid(conn, cid);
6131 if (!chan) {
6132 if (cid == L2CAP_CID_A2MP) {
6133 chan = a2mp_channel_create(conn, skb);
6134 if (!chan) {
6135 kfree_skb(skb);
6136 return;
6139 l2cap_chan_lock(chan);
6140 } else {
6141 BT_DBG("unknown cid 0x%4.4x", cid);
6142 /* Drop packet and return */
6143 kfree_skb(skb);
6144 return;
6148 BT_DBG("chan %p, len %d", chan, skb->len);
6150 if (chan->state != BT_CONNECTED)
6151 goto drop;
6153 switch (chan->mode) {
6154 case L2CAP_MODE_BASIC:
6155 /* If socket recv buffers overflows we drop data here
6156 * which is *bad* because L2CAP has to be reliable.
6157 * But we don't have any other choice. L2CAP doesn't
6158 * provide flow control mechanism. */
6160 if (chan->imtu < skb->len)
6161 goto drop;
6163 if (!chan->ops->recv(chan, skb))
6164 goto done;
6165 break;
6167 case L2CAP_MODE_ERTM:
6168 case L2CAP_MODE_STREAMING:
6169 l2cap_data_rcv(chan, skb);
6170 goto done;
6172 default:
6173 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6174 break;
6177 drop:
6178 kfree_skb(skb);
6180 done:
6181 l2cap_chan_unlock(chan);
6184 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6185 struct sk_buff *skb)
6187 struct l2cap_chan *chan;
6189 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
6190 if (!chan)
6191 goto drop;
6193 BT_DBG("chan %p, len %d", chan, skb->len);
6195 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6196 goto drop;
6198 if (chan->imtu < skb->len)
6199 goto drop;
6201 if (!chan->ops->recv(chan, skb))
6202 return;
6204 drop:
6205 kfree_skb(skb);
6208 static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
6209 struct sk_buff *skb)
6211 struct l2cap_chan *chan;
6213 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
6214 if (!chan)
6215 goto drop;
6217 BT_DBG("chan %p, len %d", chan, skb->len);
6219 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6220 goto drop;
6222 if (chan->imtu < skb->len)
6223 goto drop;
6225 if (!chan->ops->recv(chan, skb))
6226 return;
6228 drop:
6229 kfree_skb(skb);
6232 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6234 struct l2cap_hdr *lh = (void *) skb->data;
6235 u16 cid, len;
6236 __le16 psm;
6238 skb_pull(skb, L2CAP_HDR_SIZE);
6239 cid = __le16_to_cpu(lh->cid);
6240 len = __le16_to_cpu(lh->len);
6242 if (len != skb->len) {
6243 kfree_skb(skb);
6244 return;
6247 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6249 switch (cid) {
6250 case L2CAP_CID_LE_SIGNALING:
6251 case L2CAP_CID_SIGNALING:
6252 l2cap_sig_channel(conn, skb);
6253 break;
6255 case L2CAP_CID_CONN_LESS:
6256 psm = get_unaligned((__le16 *) skb->data);
6257 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6258 l2cap_conless_channel(conn, psm, skb);
6259 break;
6261 case L2CAP_CID_LE_DATA:
6262 l2cap_att_channel(conn, cid, skb);
6263 break;
6265 case L2CAP_CID_SMP:
6266 if (smp_sig_channel(conn, skb))
6267 l2cap_conn_del(conn->hcon, EACCES);
6268 break;
6270 default:
6271 l2cap_data_channel(conn, cid, skb);
6272 break;
6276 /* ---- L2CAP interface with lower layer (HCI) ---- */
6278 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
6280 int exact = 0, lm1 = 0, lm2 = 0;
6281 struct l2cap_chan *c;
6283 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
6285 /* Find listening sockets and check their link_mode */
6286 read_lock(&chan_list_lock);
6287 list_for_each_entry(c, &chan_list, global_l) {
6288 struct sock *sk = c->sk;
6290 if (c->state != BT_LISTEN)
6291 continue;
6293 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
6294 lm1 |= HCI_LM_ACCEPT;
6295 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6296 lm1 |= HCI_LM_MASTER;
6297 exact++;
6298 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
6299 lm2 |= HCI_LM_ACCEPT;
6300 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6301 lm2 |= HCI_LM_MASTER;
6304 read_unlock(&chan_list_lock);
6306 return exact ? lm1 : lm2;
6309 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
6311 struct l2cap_conn *conn;
6313 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
6315 if (!status) {
6316 conn = l2cap_conn_add(hcon, status);
6317 if (conn)
6318 l2cap_conn_ready(conn);
6319 } else {
6320 l2cap_conn_del(hcon, bt_to_errno(status));
6324 int l2cap_disconn_ind(struct hci_conn *hcon)
6326 struct l2cap_conn *conn = hcon->l2cap_data;
6328 BT_DBG("hcon %p", hcon);
6330 if (!conn)
6331 return HCI_ERROR_REMOTE_USER_TERM;
6332 return conn->disc_reason;
6335 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
6337 BT_DBG("hcon %p reason %d", hcon, reason);
6339 l2cap_conn_del(hcon, bt_to_errno(reason));
6342 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
6344 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
6345 return;
6347 if (encrypt == 0x00) {
6348 if (chan->sec_level == BT_SECURITY_MEDIUM) {
6349 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
6350 } else if (chan->sec_level == BT_SECURITY_HIGH)
6351 l2cap_chan_close(chan, ECONNREFUSED);
6352 } else {
6353 if (chan->sec_level == BT_SECURITY_MEDIUM)
6354 __clear_chan_timer(chan);
6358 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
6360 struct l2cap_conn *conn = hcon->l2cap_data;
6361 struct l2cap_chan *chan;
6363 if (!conn)
6364 return 0;
6366 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
6368 if (hcon->type == LE_LINK) {
6369 if (!status && encrypt)
6370 smp_distribute_keys(conn, 0);
6371 cancel_delayed_work(&conn->security_timer);
6374 mutex_lock(&conn->chan_lock);
6376 list_for_each_entry(chan, &conn->chan_l, list) {
6377 l2cap_chan_lock(chan);
6379 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
6380 state_to_string(chan->state));
6382 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
6383 l2cap_chan_unlock(chan);
6384 continue;
6387 if (chan->scid == L2CAP_CID_LE_DATA) {
6388 if (!status && encrypt) {
6389 chan->sec_level = hcon->sec_level;
6390 l2cap_chan_ready(chan);
6393 l2cap_chan_unlock(chan);
6394 continue;
6397 if (!__l2cap_no_conn_pending(chan)) {
6398 l2cap_chan_unlock(chan);
6399 continue;
6402 if (!status && (chan->state == BT_CONNECTED ||
6403 chan->state == BT_CONFIG)) {
6404 struct sock *sk = chan->sk;
6406 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
6407 sk->sk_state_change(sk);
6409 l2cap_check_encryption(chan, encrypt);
6410 l2cap_chan_unlock(chan);
6411 continue;
6414 if (chan->state == BT_CONNECT) {
6415 if (!status) {
6416 l2cap_start_connection(chan);
6417 } else {
6418 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6420 } else if (chan->state == BT_CONNECT2) {
6421 struct sock *sk = chan->sk;
6422 struct l2cap_conn_rsp rsp;
6423 __u16 res, stat;
6425 lock_sock(sk);
6427 if (!status) {
6428 if (test_bit(BT_SK_DEFER_SETUP,
6429 &bt_sk(sk)->flags)) {
6430 res = L2CAP_CR_PEND;
6431 stat = L2CAP_CS_AUTHOR_PEND;
6432 chan->ops->defer(chan);
6433 } else {
6434 __l2cap_state_change(chan, BT_CONFIG);
6435 res = L2CAP_CR_SUCCESS;
6436 stat = L2CAP_CS_NO_INFO;
6438 } else {
6439 __l2cap_state_change(chan, BT_DISCONN);
6440 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6441 res = L2CAP_CR_SEC_BLOCK;
6442 stat = L2CAP_CS_NO_INFO;
6445 release_sock(sk);
6447 rsp.scid = cpu_to_le16(chan->dcid);
6448 rsp.dcid = cpu_to_le16(chan->scid);
6449 rsp.result = cpu_to_le16(res);
6450 rsp.status = cpu_to_le16(stat);
6451 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
6452 sizeof(rsp), &rsp);
6454 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
6455 res == L2CAP_CR_SUCCESS) {
6456 char buf[128];
6457 set_bit(CONF_REQ_SENT, &chan->conf_state);
6458 l2cap_send_cmd(conn, l2cap_get_ident(conn),
6459 L2CAP_CONF_REQ,
6460 l2cap_build_conf_req(chan, buf),
6461 buf);
6462 chan->num_conf_req++;
6466 l2cap_chan_unlock(chan);
6469 mutex_unlock(&conn->chan_lock);
6471 return 0;
6474 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
6476 struct l2cap_conn *conn = hcon->l2cap_data;
6477 struct l2cap_hdr *hdr;
6478 int len;
6480 /* For AMP controller do not create l2cap conn */
6481 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
6482 goto drop;
6484 if (!conn)
6485 conn = l2cap_conn_add(hcon, 0);
6487 if (!conn)
6488 goto drop;
6490 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
6492 switch (flags) {
6493 case ACL_START:
6494 case ACL_START_NO_FLUSH:
6495 case ACL_COMPLETE:
6496 if (conn->rx_len) {
6497 BT_ERR("Unexpected start frame (len %d)", skb->len);
6498 kfree_skb(conn->rx_skb);
6499 conn->rx_skb = NULL;
6500 conn->rx_len = 0;
6501 l2cap_conn_unreliable(conn, ECOMM);
6504 /* Start fragment always begin with Basic L2CAP header */
6505 if (skb->len < L2CAP_HDR_SIZE) {
6506 BT_ERR("Frame is too short (len %d)", skb->len);
6507 l2cap_conn_unreliable(conn, ECOMM);
6508 goto drop;
6511 hdr = (struct l2cap_hdr *) skb->data;
6512 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
6514 if (len == skb->len) {
6515 /* Complete frame received */
6516 l2cap_recv_frame(conn, skb);
6517 return 0;
6520 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
6522 if (skb->len > len) {
6523 BT_ERR("Frame is too long (len %d, expected len %d)",
6524 skb->len, len);
6525 l2cap_conn_unreliable(conn, ECOMM);
6526 goto drop;
6529 /* Allocate skb for the complete frame (with header) */
6530 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
6531 if (!conn->rx_skb)
6532 goto drop;
6534 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6535 skb->len);
6536 conn->rx_len = len - skb->len;
6537 break;
6539 case ACL_CONT:
6540 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
6542 if (!conn->rx_len) {
6543 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
6544 l2cap_conn_unreliable(conn, ECOMM);
6545 goto drop;
6548 if (skb->len > conn->rx_len) {
6549 BT_ERR("Fragment is too long (len %d, expected %d)",
6550 skb->len, conn->rx_len);
6551 kfree_skb(conn->rx_skb);
6552 conn->rx_skb = NULL;
6553 conn->rx_len = 0;
6554 l2cap_conn_unreliable(conn, ECOMM);
6555 goto drop;
6558 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6559 skb->len);
6560 conn->rx_len -= skb->len;
6562 if (!conn->rx_len) {
6563 /* Complete frame received */
6564 l2cap_recv_frame(conn, conn->rx_skb);
6565 conn->rx_skb = NULL;
6567 break;
6570 drop:
6571 kfree_skb(skb);
6572 return 0;
6575 static int l2cap_debugfs_show(struct seq_file *f, void *p)
6577 struct l2cap_chan *c;
6579 read_lock(&chan_list_lock);
6581 list_for_each_entry(c, &chan_list, global_l) {
6582 struct sock *sk = c->sk;
6584 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6585 &bt_sk(sk)->src, &bt_sk(sk)->dst,
6586 c->state, __le16_to_cpu(c->psm),
6587 c->scid, c->dcid, c->imtu, c->omtu,
6588 c->sec_level, c->mode);
6591 read_unlock(&chan_list_lock);
6593 return 0;
6596 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
6598 return single_open(file, l2cap_debugfs_show, inode->i_private);
6601 static const struct file_operations l2cap_debugfs_fops = {
6602 .open = l2cap_debugfs_open,
6603 .read = seq_read,
6604 .llseek = seq_lseek,
6605 .release = single_release,
6608 static struct dentry *l2cap_debugfs;
6610 int __init l2cap_init(void)
6612 int err;
6614 err = l2cap_init_sockets();
6615 if (err < 0)
6616 return err;
6618 if (bt_debugfs) {
6619 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
6620 NULL, &l2cap_debugfs_fops);
6621 if (!l2cap_debugfs)
6622 BT_ERR("Failed to create L2CAP debug file");
6625 return 0;
6628 void l2cap_exit(void)
6630 debugfs_remove(l2cap_debugfs);
6631 l2cap_cleanup_sockets();
6634 module_param(disable_ertm, bool, 0644);
6635 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");