ARM: dt: tegra trimslice: enable SDHCI1 controller
[linux-2.6/btrfs-unstable.git] / net / bluetooth / l2cap_core.c
blob94552b33d528447eea4b604b996c7d90ba0ab036
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
8 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License version 2 as
12 published by the Free Software Foundation;
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
17 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
18 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
19 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
24 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
25 SOFTWARE IS DISCLAIMED.
28 /* Bluetooth L2CAP core. */
30 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/poll.h>
39 #include <linux/fcntl.h>
40 #include <linux/init.h>
41 #include <linux/interrupt.h>
42 #include <linux/socket.h>
43 #include <linux/skbuff.h>
44 #include <linux/list.h>
45 #include <linux/device.h>
46 #include <linux/debugfs.h>
47 #include <linux/seq_file.h>
48 #include <linux/uaccess.h>
49 #include <linux/crc16.h>
50 #include <net/sock.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
59 bool disable_ertm;
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
64 static LIST_HEAD(chan_list);
65 static DEFINE_RWLOCK(chan_list_lock);
67 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
68 u8 code, u8 ident, u16 dlen, void *data);
69 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
70 void *data);
71 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
72 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
73 struct l2cap_chan *chan, int err);
75 /* ---- L2CAP channels ---- */
77 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
79 struct l2cap_chan *c;
81 list_for_each_entry(c, &conn->chan_l, list) {
82 if (c->dcid == cid)
83 return c;
85 return NULL;
88 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
90 struct l2cap_chan *c;
92 list_for_each_entry(c, &conn->chan_l, list) {
93 if (c->scid == cid)
94 return c;
96 return NULL;
99 /* Find channel with given SCID.
100 * Returns locked socket */
101 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
103 struct l2cap_chan *c;
105 mutex_lock(&conn->chan_lock);
106 c = __l2cap_get_chan_by_scid(conn, cid);
107 mutex_unlock(&conn->chan_lock);
109 return c;
112 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
114 struct l2cap_chan *c;
116 list_for_each_entry(c, &conn->chan_l, list) {
117 if (c->ident == ident)
118 return c;
120 return NULL;
123 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
125 struct l2cap_chan *c;
127 mutex_lock(&conn->chan_lock);
128 c = __l2cap_get_chan_by_ident(conn, ident);
129 mutex_unlock(&conn->chan_lock);
131 return c;
134 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
136 struct l2cap_chan *c;
138 list_for_each_entry(c, &chan_list, global_l) {
139 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
140 return c;
142 return NULL;
145 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
147 int err;
149 write_lock(&chan_list_lock);
151 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
152 err = -EADDRINUSE;
153 goto done;
156 if (psm) {
157 chan->psm = psm;
158 chan->sport = psm;
159 err = 0;
160 } else {
161 u16 p;
163 err = -EINVAL;
164 for (p = 0x1001; p < 0x1100; p += 2)
165 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
166 chan->psm = cpu_to_le16(p);
167 chan->sport = cpu_to_le16(p);
168 err = 0;
169 break;
173 done:
174 write_unlock(&chan_list_lock);
175 return err;
178 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
180 write_lock(&chan_list_lock);
182 chan->scid = scid;
184 write_unlock(&chan_list_lock);
186 return 0;
189 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
191 u16 cid = L2CAP_CID_DYN_START;
193 for (; cid < L2CAP_CID_DYN_END; cid++) {
194 if (!__l2cap_get_chan_by_scid(conn, cid))
195 return cid;
198 return 0;
201 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
203 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
204 state_to_string(state));
206 chan->state = state;
207 chan->ops->state_change(chan->data, state);
210 static void l2cap_state_change(struct l2cap_chan *chan, int state)
212 struct sock *sk = chan->sk;
214 lock_sock(sk);
215 __l2cap_state_change(chan, state);
216 release_sock(sk);
219 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
221 struct sock *sk = chan->sk;
223 sk->sk_err = err;
226 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
228 struct sock *sk = chan->sk;
230 lock_sock(sk);
231 __l2cap_chan_set_err(chan, err);
232 release_sock(sk);
235 static void l2cap_chan_timeout(struct work_struct *work)
237 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
238 chan_timer.work);
239 struct l2cap_conn *conn = chan->conn;
240 int reason;
242 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
244 mutex_lock(&conn->chan_lock);
245 l2cap_chan_lock(chan);
247 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
248 reason = ECONNREFUSED;
249 else if (chan->state == BT_CONNECT &&
250 chan->sec_level != BT_SECURITY_SDP)
251 reason = ECONNREFUSED;
252 else
253 reason = ETIMEDOUT;
255 l2cap_chan_close(chan, reason);
257 l2cap_chan_unlock(chan);
259 chan->ops->close(chan->data);
260 mutex_unlock(&conn->chan_lock);
262 l2cap_chan_put(chan);
265 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
267 struct l2cap_chan *chan;
269 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
270 if (!chan)
271 return NULL;
273 mutex_init(&chan->lock);
275 chan->sk = sk;
277 write_lock(&chan_list_lock);
278 list_add(&chan->global_l, &chan_list);
279 write_unlock(&chan_list_lock);
281 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
283 chan->state = BT_OPEN;
285 atomic_set(&chan->refcnt, 1);
287 BT_DBG("sk %p chan %p", sk, chan);
289 return chan;
292 void l2cap_chan_destroy(struct l2cap_chan *chan)
294 write_lock(&chan_list_lock);
295 list_del(&chan->global_l);
296 write_unlock(&chan_list_lock);
298 l2cap_chan_put(chan);
301 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
303 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
304 chan->psm, chan->dcid);
306 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
308 chan->conn = conn;
310 switch (chan->chan_type) {
311 case L2CAP_CHAN_CONN_ORIENTED:
312 if (conn->hcon->type == LE_LINK) {
313 /* LE connection */
314 chan->omtu = L2CAP_LE_DEFAULT_MTU;
315 chan->scid = L2CAP_CID_LE_DATA;
316 chan->dcid = L2CAP_CID_LE_DATA;
317 } else {
318 /* Alloc CID for connection-oriented socket */
319 chan->scid = l2cap_alloc_cid(conn);
320 chan->omtu = L2CAP_DEFAULT_MTU;
322 break;
324 case L2CAP_CHAN_CONN_LESS:
325 /* Connectionless socket */
326 chan->scid = L2CAP_CID_CONN_LESS;
327 chan->dcid = L2CAP_CID_CONN_LESS;
328 chan->omtu = L2CAP_DEFAULT_MTU;
329 break;
331 default:
332 /* Raw socket can send/recv signalling messages only */
333 chan->scid = L2CAP_CID_SIGNALING;
334 chan->dcid = L2CAP_CID_SIGNALING;
335 chan->omtu = L2CAP_DEFAULT_MTU;
338 chan->local_id = L2CAP_BESTEFFORT_ID;
339 chan->local_stype = L2CAP_SERV_BESTEFFORT;
340 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
341 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
342 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
343 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
345 l2cap_chan_hold(chan);
347 list_add(&chan->list, &conn->chan_l);
350 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
352 mutex_lock(&conn->chan_lock);
353 __l2cap_chan_add(conn, chan);
354 mutex_unlock(&conn->chan_lock);
357 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
359 struct sock *sk = chan->sk;
360 struct l2cap_conn *conn = chan->conn;
361 struct sock *parent = bt_sk(sk)->parent;
363 __clear_chan_timer(chan);
365 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
367 if (conn) {
368 /* Delete from channel list */
369 list_del(&chan->list);
371 l2cap_chan_put(chan);
373 chan->conn = NULL;
374 hci_conn_put(conn->hcon);
377 lock_sock(sk);
379 __l2cap_state_change(chan, BT_CLOSED);
380 sock_set_flag(sk, SOCK_ZAPPED);
382 if (err)
383 __l2cap_chan_set_err(chan, err);
385 if (parent) {
386 bt_accept_unlink(sk);
387 parent->sk_data_ready(parent, 0);
388 } else
389 sk->sk_state_change(sk);
391 release_sock(sk);
393 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
394 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
395 return;
397 skb_queue_purge(&chan->tx_q);
399 if (chan->mode == L2CAP_MODE_ERTM) {
400 struct srej_list *l, *tmp;
402 __clear_retrans_timer(chan);
403 __clear_monitor_timer(chan);
404 __clear_ack_timer(chan);
406 skb_queue_purge(&chan->srej_q);
408 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
409 list_del(&l->list);
410 kfree(l);
415 static void l2cap_chan_cleanup_listen(struct sock *parent)
417 struct sock *sk;
419 BT_DBG("parent %p", parent);
421 /* Close not yet accepted channels */
422 while ((sk = bt_accept_dequeue(parent, NULL))) {
423 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
425 l2cap_chan_lock(chan);
426 __clear_chan_timer(chan);
427 l2cap_chan_close(chan, ECONNRESET);
428 l2cap_chan_unlock(chan);
430 chan->ops->close(chan->data);
434 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
436 struct l2cap_conn *conn = chan->conn;
437 struct sock *sk = chan->sk;
439 BT_DBG("chan %p state %s sk %p", chan,
440 state_to_string(chan->state), sk);
442 switch (chan->state) {
443 case BT_LISTEN:
444 lock_sock(sk);
445 l2cap_chan_cleanup_listen(sk);
447 __l2cap_state_change(chan, BT_CLOSED);
448 sock_set_flag(sk, SOCK_ZAPPED);
449 release_sock(sk);
450 break;
452 case BT_CONNECTED:
453 case BT_CONFIG:
454 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
455 conn->hcon->type == ACL_LINK) {
456 __clear_chan_timer(chan);
457 __set_chan_timer(chan, sk->sk_sndtimeo);
458 l2cap_send_disconn_req(conn, chan, reason);
459 } else
460 l2cap_chan_del(chan, reason);
461 break;
463 case BT_CONNECT2:
464 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
465 conn->hcon->type == ACL_LINK) {
466 struct l2cap_conn_rsp rsp;
467 __u16 result;
469 if (bt_sk(sk)->defer_setup)
470 result = L2CAP_CR_SEC_BLOCK;
471 else
472 result = L2CAP_CR_BAD_PSM;
473 l2cap_state_change(chan, BT_DISCONN);
475 rsp.scid = cpu_to_le16(chan->dcid);
476 rsp.dcid = cpu_to_le16(chan->scid);
477 rsp.result = cpu_to_le16(result);
478 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
479 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
480 sizeof(rsp), &rsp);
483 l2cap_chan_del(chan, reason);
484 break;
486 case BT_CONNECT:
487 case BT_DISCONN:
488 l2cap_chan_del(chan, reason);
489 break;
491 default:
492 lock_sock(sk);
493 sock_set_flag(sk, SOCK_ZAPPED);
494 release_sock(sk);
495 break;
499 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
501 if (chan->chan_type == L2CAP_CHAN_RAW) {
502 switch (chan->sec_level) {
503 case BT_SECURITY_HIGH:
504 return HCI_AT_DEDICATED_BONDING_MITM;
505 case BT_SECURITY_MEDIUM:
506 return HCI_AT_DEDICATED_BONDING;
507 default:
508 return HCI_AT_NO_BONDING;
510 } else if (chan->psm == cpu_to_le16(0x0001)) {
511 if (chan->sec_level == BT_SECURITY_LOW)
512 chan->sec_level = BT_SECURITY_SDP;
514 if (chan->sec_level == BT_SECURITY_HIGH)
515 return HCI_AT_NO_BONDING_MITM;
516 else
517 return HCI_AT_NO_BONDING;
518 } else {
519 switch (chan->sec_level) {
520 case BT_SECURITY_HIGH:
521 return HCI_AT_GENERAL_BONDING_MITM;
522 case BT_SECURITY_MEDIUM:
523 return HCI_AT_GENERAL_BONDING;
524 default:
525 return HCI_AT_NO_BONDING;
530 /* Service level security */
531 int l2cap_chan_check_security(struct l2cap_chan *chan)
533 struct l2cap_conn *conn = chan->conn;
534 __u8 auth_type;
536 auth_type = l2cap_get_auth_type(chan);
538 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
541 static u8 l2cap_get_ident(struct l2cap_conn *conn)
543 u8 id;
545 /* Get next available identificator.
546 * 1 - 128 are used by kernel.
547 * 129 - 199 are reserved.
548 * 200 - 254 are used by utilities like l2ping, etc.
551 spin_lock(&conn->lock);
553 if (++conn->tx_ident > 128)
554 conn->tx_ident = 1;
556 id = conn->tx_ident;
558 spin_unlock(&conn->lock);
560 return id;
563 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
565 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
566 u8 flags;
568 BT_DBG("code 0x%2.2x", code);
570 if (!skb)
571 return;
573 if (lmp_no_flush_capable(conn->hcon->hdev))
574 flags = ACL_START_NO_FLUSH;
575 else
576 flags = ACL_START;
578 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
579 skb->priority = HCI_PRIO_MAX;
581 hci_send_acl(conn->hchan, skb, flags);
584 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
586 struct hci_conn *hcon = chan->conn->hcon;
587 u16 flags;
589 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
590 skb->priority);
592 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
593 lmp_no_flush_capable(hcon->hdev))
594 flags = ACL_START_NO_FLUSH;
595 else
596 flags = ACL_START;
598 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
599 hci_send_acl(chan->conn->hchan, skb, flags);
602 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
604 struct sk_buff *skb;
605 struct l2cap_hdr *lh;
606 struct l2cap_conn *conn = chan->conn;
607 int count, hlen;
609 if (chan->state != BT_CONNECTED)
610 return;
612 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
613 hlen = L2CAP_EXT_HDR_SIZE;
614 else
615 hlen = L2CAP_ENH_HDR_SIZE;
617 if (chan->fcs == L2CAP_FCS_CRC16)
618 hlen += L2CAP_FCS_SIZE;
620 BT_DBG("chan %p, control 0x%8.8x", chan, control);
622 count = min_t(unsigned int, conn->mtu, hlen);
624 control |= __set_sframe(chan);
626 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
627 control |= __set_ctrl_final(chan);
629 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
630 control |= __set_ctrl_poll(chan);
632 skb = bt_skb_alloc(count, GFP_ATOMIC);
633 if (!skb)
634 return;
636 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
637 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
638 lh->cid = cpu_to_le16(chan->dcid);
640 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
642 if (chan->fcs == L2CAP_FCS_CRC16) {
643 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
644 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
647 skb->priority = HCI_PRIO_MAX;
648 l2cap_do_send(chan, skb);
651 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
653 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
654 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
655 set_bit(CONN_RNR_SENT, &chan->conn_state);
656 } else
657 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
659 control |= __set_reqseq(chan, chan->buffer_seq);
661 l2cap_send_sframe(chan, control);
664 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
666 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
669 static void l2cap_send_conn_req(struct l2cap_chan *chan)
671 struct l2cap_conn *conn = chan->conn;
672 struct l2cap_conn_req req;
674 req.scid = cpu_to_le16(chan->scid);
675 req.psm = chan->psm;
677 chan->ident = l2cap_get_ident(conn);
679 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
681 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
684 static void l2cap_do_start(struct l2cap_chan *chan)
686 struct l2cap_conn *conn = chan->conn;
688 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
689 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
690 return;
692 if (l2cap_chan_check_security(chan) &&
693 __l2cap_no_conn_pending(chan))
694 l2cap_send_conn_req(chan);
695 } else {
696 struct l2cap_info_req req;
697 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
699 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
700 conn->info_ident = l2cap_get_ident(conn);
702 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
704 l2cap_send_cmd(conn, conn->info_ident,
705 L2CAP_INFO_REQ, sizeof(req), &req);
709 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
711 u32 local_feat_mask = l2cap_feat_mask;
712 if (!disable_ertm)
713 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
715 switch (mode) {
716 case L2CAP_MODE_ERTM:
717 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
718 case L2CAP_MODE_STREAMING:
719 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
720 default:
721 return 0x00;
725 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
727 struct sock *sk = chan->sk;
728 struct l2cap_disconn_req req;
730 if (!conn)
731 return;
733 if (chan->mode == L2CAP_MODE_ERTM) {
734 __clear_retrans_timer(chan);
735 __clear_monitor_timer(chan);
736 __clear_ack_timer(chan);
739 req.dcid = cpu_to_le16(chan->dcid);
740 req.scid = cpu_to_le16(chan->scid);
741 l2cap_send_cmd(conn, l2cap_get_ident(conn),
742 L2CAP_DISCONN_REQ, sizeof(req), &req);
744 lock_sock(sk);
745 __l2cap_state_change(chan, BT_DISCONN);
746 __l2cap_chan_set_err(chan, err);
747 release_sock(sk);
750 /* ---- L2CAP connections ---- */
751 static void l2cap_conn_start(struct l2cap_conn *conn)
753 struct l2cap_chan *chan, *tmp;
755 BT_DBG("conn %p", conn);
757 mutex_lock(&conn->chan_lock);
759 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
760 struct sock *sk = chan->sk;
762 l2cap_chan_lock(chan);
764 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
765 l2cap_chan_unlock(chan);
766 continue;
769 if (chan->state == BT_CONNECT) {
770 if (!l2cap_chan_check_security(chan) ||
771 !__l2cap_no_conn_pending(chan)) {
772 l2cap_chan_unlock(chan);
773 continue;
776 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
777 && test_bit(CONF_STATE2_DEVICE,
778 &chan->conf_state)) {
779 l2cap_chan_close(chan, ECONNRESET);
780 l2cap_chan_unlock(chan);
781 continue;
784 l2cap_send_conn_req(chan);
786 } else if (chan->state == BT_CONNECT2) {
787 struct l2cap_conn_rsp rsp;
788 char buf[128];
789 rsp.scid = cpu_to_le16(chan->dcid);
790 rsp.dcid = cpu_to_le16(chan->scid);
792 if (l2cap_chan_check_security(chan)) {
793 lock_sock(sk);
794 if (bt_sk(sk)->defer_setup) {
795 struct sock *parent = bt_sk(sk)->parent;
796 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
797 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
798 if (parent)
799 parent->sk_data_ready(parent, 0);
801 } else {
802 __l2cap_state_change(chan, BT_CONFIG);
803 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
804 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
806 release_sock(sk);
807 } else {
808 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
809 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
812 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
813 sizeof(rsp), &rsp);
815 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
816 rsp.result != L2CAP_CR_SUCCESS) {
817 l2cap_chan_unlock(chan);
818 continue;
821 set_bit(CONF_REQ_SENT, &chan->conf_state);
822 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
823 l2cap_build_conf_req(chan, buf), buf);
824 chan->num_conf_req++;
827 l2cap_chan_unlock(chan);
830 mutex_unlock(&conn->chan_lock);
833 /* Find socket with cid and source bdaddr.
834 * Returns closest match, locked.
836 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
838 struct l2cap_chan *c, *c1 = NULL;
840 read_lock(&chan_list_lock);
842 list_for_each_entry(c, &chan_list, global_l) {
843 struct sock *sk = c->sk;
845 if (state && c->state != state)
846 continue;
848 if (c->scid == cid) {
849 /* Exact match. */
850 if (!bacmp(&bt_sk(sk)->src, src)) {
851 read_unlock(&chan_list_lock);
852 return c;
855 /* Closest match */
856 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
857 c1 = c;
861 read_unlock(&chan_list_lock);
863 return c1;
866 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
868 struct sock *parent, *sk;
869 struct l2cap_chan *chan, *pchan;
871 BT_DBG("");
873 /* Check if we have socket listening on cid */
874 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
875 conn->src);
876 if (!pchan)
877 return;
879 parent = pchan->sk;
881 lock_sock(parent);
883 /* Check for backlog size */
884 if (sk_acceptq_is_full(parent)) {
885 BT_DBG("backlog full %d", parent->sk_ack_backlog);
886 goto clean;
889 chan = pchan->ops->new_connection(pchan->data);
890 if (!chan)
891 goto clean;
893 sk = chan->sk;
895 hci_conn_hold(conn->hcon);
897 bacpy(&bt_sk(sk)->src, conn->src);
898 bacpy(&bt_sk(sk)->dst, conn->dst);
900 bt_accept_enqueue(parent, sk);
902 l2cap_chan_add(conn, chan);
904 __set_chan_timer(chan, sk->sk_sndtimeo);
906 __l2cap_state_change(chan, BT_CONNECTED);
907 parent->sk_data_ready(parent, 0);
909 clean:
910 release_sock(parent);
913 static void l2cap_chan_ready(struct l2cap_chan *chan)
915 struct sock *sk = chan->sk;
916 struct sock *parent;
918 lock_sock(sk);
920 parent = bt_sk(sk)->parent;
922 BT_DBG("sk %p, parent %p", sk, parent);
924 chan->conf_state = 0;
925 __clear_chan_timer(chan);
927 __l2cap_state_change(chan, BT_CONNECTED);
928 sk->sk_state_change(sk);
930 if (parent)
931 parent->sk_data_ready(parent, 0);
933 release_sock(sk);
936 static void l2cap_conn_ready(struct l2cap_conn *conn)
938 struct l2cap_chan *chan;
940 BT_DBG("conn %p", conn);
942 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
943 l2cap_le_conn_ready(conn);
945 if (conn->hcon->out && conn->hcon->type == LE_LINK)
946 smp_conn_security(conn, conn->hcon->pending_sec_level);
948 mutex_lock(&conn->chan_lock);
950 list_for_each_entry(chan, &conn->chan_l, list) {
952 l2cap_chan_lock(chan);
954 if (conn->hcon->type == LE_LINK) {
955 if (smp_conn_security(conn, chan->sec_level))
956 l2cap_chan_ready(chan);
958 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
959 struct sock *sk = chan->sk;
960 __clear_chan_timer(chan);
961 lock_sock(sk);
962 __l2cap_state_change(chan, BT_CONNECTED);
963 sk->sk_state_change(sk);
964 release_sock(sk);
966 } else if (chan->state == BT_CONNECT)
967 l2cap_do_start(chan);
969 l2cap_chan_unlock(chan);
972 mutex_unlock(&conn->chan_lock);
975 /* Notify sockets that we cannot guaranty reliability anymore */
976 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
978 struct l2cap_chan *chan;
980 BT_DBG("conn %p", conn);
982 mutex_lock(&conn->chan_lock);
984 list_for_each_entry(chan, &conn->chan_l, list) {
985 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
986 __l2cap_chan_set_err(chan, err);
989 mutex_unlock(&conn->chan_lock);
992 static void l2cap_info_timeout(struct work_struct *work)
994 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
995 info_timer.work);
997 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
998 conn->info_ident = 0;
1000 l2cap_conn_start(conn);
1003 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1005 struct l2cap_conn *conn = hcon->l2cap_data;
1006 struct l2cap_chan *chan, *l;
1008 if (!conn)
1009 return;
1011 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1013 kfree_skb(conn->rx_skb);
1015 mutex_lock(&conn->chan_lock);
1017 /* Kill channels */
1018 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1019 l2cap_chan_lock(chan);
1021 l2cap_chan_del(chan, err);
1023 l2cap_chan_unlock(chan);
1025 chan->ops->close(chan->data);
1028 mutex_unlock(&conn->chan_lock);
1030 hci_chan_del(conn->hchan);
1032 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1033 cancel_delayed_work_sync(&conn->info_timer);
1035 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1036 cancel_delayed_work_sync(&conn->security_timer);
1037 smp_chan_destroy(conn);
1040 hcon->l2cap_data = NULL;
1041 kfree(conn);
1044 static void security_timeout(struct work_struct *work)
1046 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1047 security_timer.work);
1049 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1052 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1054 struct l2cap_conn *conn = hcon->l2cap_data;
1055 struct hci_chan *hchan;
1057 if (conn || status)
1058 return conn;
1060 hchan = hci_chan_create(hcon);
1061 if (!hchan)
1062 return NULL;
1064 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1065 if (!conn) {
1066 hci_chan_del(hchan);
1067 return NULL;
1070 hcon->l2cap_data = conn;
1071 conn->hcon = hcon;
1072 conn->hchan = hchan;
1074 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1076 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1077 conn->mtu = hcon->hdev->le_mtu;
1078 else
1079 conn->mtu = hcon->hdev->acl_mtu;
1081 conn->src = &hcon->hdev->bdaddr;
1082 conn->dst = &hcon->dst;
1084 conn->feat_mask = 0;
1086 spin_lock_init(&conn->lock);
1087 mutex_init(&conn->chan_lock);
1089 INIT_LIST_HEAD(&conn->chan_l);
1091 if (hcon->type == LE_LINK)
1092 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1093 else
1094 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1096 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1098 return conn;
1101 /* ---- Socket interface ---- */
1103 /* Find socket with psm and source bdaddr.
1104 * Returns closest match.
1106 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1108 struct l2cap_chan *c, *c1 = NULL;
1110 read_lock(&chan_list_lock);
1112 list_for_each_entry(c, &chan_list, global_l) {
1113 struct sock *sk = c->sk;
1115 if (state && c->state != state)
1116 continue;
1118 if (c->psm == psm) {
1119 /* Exact match. */
1120 if (!bacmp(&bt_sk(sk)->src, src)) {
1121 read_unlock(&chan_list_lock);
1122 return c;
1125 /* Closest match */
1126 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1127 c1 = c;
1131 read_unlock(&chan_list_lock);
1133 return c1;
1136 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
1138 struct sock *sk = chan->sk;
1139 bdaddr_t *src = &bt_sk(sk)->src;
1140 struct l2cap_conn *conn;
1141 struct hci_conn *hcon;
1142 struct hci_dev *hdev;
1143 __u8 auth_type;
1144 int err;
1146 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1147 chan->psm);
1149 hdev = hci_get_route(dst, src);
1150 if (!hdev)
1151 return -EHOSTUNREACH;
1153 hci_dev_lock(hdev);
1155 l2cap_chan_lock(chan);
1157 /* PSM must be odd and lsb of upper byte must be 0 */
1158 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1159 chan->chan_type != L2CAP_CHAN_RAW) {
1160 err = -EINVAL;
1161 goto done;
1164 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1165 err = -EINVAL;
1166 goto done;
1169 switch (chan->mode) {
1170 case L2CAP_MODE_BASIC:
1171 break;
1172 case L2CAP_MODE_ERTM:
1173 case L2CAP_MODE_STREAMING:
1174 if (!disable_ertm)
1175 break;
1176 /* fall through */
1177 default:
1178 err = -ENOTSUPP;
1179 goto done;
1182 lock_sock(sk);
1184 switch (sk->sk_state) {
1185 case BT_CONNECT:
1186 case BT_CONNECT2:
1187 case BT_CONFIG:
1188 /* Already connecting */
1189 err = 0;
1190 release_sock(sk);
1191 goto done;
1193 case BT_CONNECTED:
1194 /* Already connected */
1195 err = -EISCONN;
1196 release_sock(sk);
1197 goto done;
1199 case BT_OPEN:
1200 case BT_BOUND:
1201 /* Can connect */
1202 break;
1204 default:
1205 err = -EBADFD;
1206 release_sock(sk);
1207 goto done;
1210 /* Set destination address and psm */
1211 bacpy(&bt_sk(sk)->dst, dst);
1213 release_sock(sk);
1215 chan->psm = psm;
1216 chan->dcid = cid;
1218 auth_type = l2cap_get_auth_type(chan);
1220 if (chan->dcid == L2CAP_CID_LE_DATA)
1221 hcon = hci_connect(hdev, LE_LINK, dst,
1222 chan->sec_level, auth_type);
1223 else
1224 hcon = hci_connect(hdev, ACL_LINK, dst,
1225 chan->sec_level, auth_type);
1227 if (IS_ERR(hcon)) {
1228 err = PTR_ERR(hcon);
1229 goto done;
1232 conn = l2cap_conn_add(hcon, 0);
1233 if (!conn) {
1234 hci_conn_put(hcon);
1235 err = -ENOMEM;
1236 goto done;
1239 /* Update source addr of the socket */
1240 bacpy(src, conn->src);
1242 l2cap_chan_unlock(chan);
1243 l2cap_chan_add(conn, chan);
1244 l2cap_chan_lock(chan);
1246 l2cap_state_change(chan, BT_CONNECT);
1247 __set_chan_timer(chan, sk->sk_sndtimeo);
1249 if (hcon->state == BT_CONNECTED) {
1250 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1251 __clear_chan_timer(chan);
1252 if (l2cap_chan_check_security(chan))
1253 l2cap_state_change(chan, BT_CONNECTED);
1254 } else
1255 l2cap_do_start(chan);
1258 err = 0;
1260 done:
1261 l2cap_chan_unlock(chan);
1262 hci_dev_unlock(hdev);
1263 hci_dev_put(hdev);
1264 return err;
1267 int __l2cap_wait_ack(struct sock *sk)
1269 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1270 DECLARE_WAITQUEUE(wait, current);
1271 int err = 0;
1272 int timeo = HZ/5;
1274 add_wait_queue(sk_sleep(sk), &wait);
1275 set_current_state(TASK_INTERRUPTIBLE);
1276 while (chan->unacked_frames > 0 && chan->conn) {
1277 if (!timeo)
1278 timeo = HZ/5;
1280 if (signal_pending(current)) {
1281 err = sock_intr_errno(timeo);
1282 break;
1285 release_sock(sk);
1286 timeo = schedule_timeout(timeo);
1287 lock_sock(sk);
1288 set_current_state(TASK_INTERRUPTIBLE);
1290 err = sock_error(sk);
1291 if (err)
1292 break;
1294 set_current_state(TASK_RUNNING);
1295 remove_wait_queue(sk_sleep(sk), &wait);
1296 return err;
1299 static void l2cap_monitor_timeout(struct work_struct *work)
1301 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1302 monitor_timer.work);
1304 BT_DBG("chan %p", chan);
1306 l2cap_chan_lock(chan);
1308 if (chan->retry_count >= chan->remote_max_tx) {
1309 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1310 l2cap_chan_unlock(chan);
1311 l2cap_chan_put(chan);
1312 return;
1315 chan->retry_count++;
1316 __set_monitor_timer(chan);
1318 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1319 l2cap_chan_unlock(chan);
1320 l2cap_chan_put(chan);
1323 static void l2cap_retrans_timeout(struct work_struct *work)
1325 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1326 retrans_timer.work);
1328 BT_DBG("chan %p", chan);
1330 l2cap_chan_lock(chan);
1332 chan->retry_count = 1;
1333 __set_monitor_timer(chan);
1335 set_bit(CONN_WAIT_F, &chan->conn_state);
1337 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1339 l2cap_chan_unlock(chan);
1340 l2cap_chan_put(chan);
1343 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1345 struct sk_buff *skb;
1347 while ((skb = skb_peek(&chan->tx_q)) &&
1348 chan->unacked_frames) {
1349 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1350 break;
1352 skb = skb_dequeue(&chan->tx_q);
1353 kfree_skb(skb);
1355 chan->unacked_frames--;
1358 if (!chan->unacked_frames)
1359 __clear_retrans_timer(chan);
1362 static void l2cap_streaming_send(struct l2cap_chan *chan)
1364 struct sk_buff *skb;
1365 u32 control;
1366 u16 fcs;
1368 while ((skb = skb_dequeue(&chan->tx_q))) {
1369 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1370 control |= __set_txseq(chan, chan->next_tx_seq);
1371 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1373 if (chan->fcs == L2CAP_FCS_CRC16) {
1374 fcs = crc16(0, (u8 *)skb->data,
1375 skb->len - L2CAP_FCS_SIZE);
1376 put_unaligned_le16(fcs,
1377 skb->data + skb->len - L2CAP_FCS_SIZE);
1380 l2cap_do_send(chan, skb);
1382 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1386 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1388 struct sk_buff *skb, *tx_skb;
1389 u16 fcs;
1390 u32 control;
1392 skb = skb_peek(&chan->tx_q);
1393 if (!skb)
1394 return;
1396 while (bt_cb(skb)->tx_seq != tx_seq) {
1397 if (skb_queue_is_last(&chan->tx_q, skb))
1398 return;
1400 skb = skb_queue_next(&chan->tx_q, skb);
1403 if (chan->remote_max_tx &&
1404 bt_cb(skb)->retries == chan->remote_max_tx) {
1405 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1406 return;
1409 tx_skb = skb_clone(skb, GFP_ATOMIC);
1410 bt_cb(skb)->retries++;
1412 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1413 control &= __get_sar_mask(chan);
1415 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1416 control |= __set_ctrl_final(chan);
1418 control |= __set_reqseq(chan, chan->buffer_seq);
1419 control |= __set_txseq(chan, tx_seq);
1421 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1423 if (chan->fcs == L2CAP_FCS_CRC16) {
1424 fcs = crc16(0, (u8 *)tx_skb->data,
1425 tx_skb->len - L2CAP_FCS_SIZE);
1426 put_unaligned_le16(fcs,
1427 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1430 l2cap_do_send(chan, tx_skb);
1433 static int l2cap_ertm_send(struct l2cap_chan *chan)
1435 struct sk_buff *skb, *tx_skb;
1436 u16 fcs;
1437 u32 control;
1438 int nsent = 0;
1440 if (chan->state != BT_CONNECTED)
1441 return -ENOTCONN;
1443 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1445 if (chan->remote_max_tx &&
1446 bt_cb(skb)->retries == chan->remote_max_tx) {
1447 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1448 break;
1451 tx_skb = skb_clone(skb, GFP_ATOMIC);
1453 bt_cb(skb)->retries++;
1455 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1456 control &= __get_sar_mask(chan);
1458 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1459 control |= __set_ctrl_final(chan);
1461 control |= __set_reqseq(chan, chan->buffer_seq);
1462 control |= __set_txseq(chan, chan->next_tx_seq);
1464 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1466 if (chan->fcs == L2CAP_FCS_CRC16) {
1467 fcs = crc16(0, (u8 *)skb->data,
1468 tx_skb->len - L2CAP_FCS_SIZE);
1469 put_unaligned_le16(fcs, skb->data +
1470 tx_skb->len - L2CAP_FCS_SIZE);
1473 l2cap_do_send(chan, tx_skb);
1475 __set_retrans_timer(chan);
1477 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1479 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1481 if (bt_cb(skb)->retries == 1) {
1482 chan->unacked_frames++;
1484 if (!nsent++)
1485 __clear_ack_timer(chan);
1488 chan->frames_sent++;
1490 if (skb_queue_is_last(&chan->tx_q, skb))
1491 chan->tx_send_head = NULL;
1492 else
1493 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1496 return nsent;
1499 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1501 int ret;
1503 if (!skb_queue_empty(&chan->tx_q))
1504 chan->tx_send_head = chan->tx_q.next;
1506 chan->next_tx_seq = chan->expected_ack_seq;
1507 ret = l2cap_ertm_send(chan);
1508 return ret;
1511 static void __l2cap_send_ack(struct l2cap_chan *chan)
1513 u32 control = 0;
1515 control |= __set_reqseq(chan, chan->buffer_seq);
1517 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1518 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1519 set_bit(CONN_RNR_SENT, &chan->conn_state);
1520 l2cap_send_sframe(chan, control);
1521 return;
1524 if (l2cap_ertm_send(chan) > 0)
1525 return;
1527 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1528 l2cap_send_sframe(chan, control);
1531 static void l2cap_send_ack(struct l2cap_chan *chan)
1533 __clear_ack_timer(chan);
1534 __l2cap_send_ack(chan);
1537 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1539 struct srej_list *tail;
1540 u32 control;
1542 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1543 control |= __set_ctrl_final(chan);
1545 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1546 control |= __set_reqseq(chan, tail->tx_seq);
1548 l2cap_send_sframe(chan, control);
1551 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1552 struct msghdr *msg, int len,
1553 int count, struct sk_buff *skb)
1555 struct l2cap_conn *conn = chan->conn;
1556 struct sk_buff **frag;
1557 int err, sent = 0;
1559 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1560 return -EFAULT;
1562 sent += count;
1563 len -= count;
1565 /* Continuation fragments (no L2CAP header) */
1566 frag = &skb_shinfo(skb)->frag_list;
1567 while (len) {
1568 count = min_t(unsigned int, conn->mtu, len);
1570 *frag = chan->ops->alloc_skb(chan, count,
1571 msg->msg_flags & MSG_DONTWAIT,
1572 &err);
1574 if (!*frag)
1575 return err;
1576 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1577 return -EFAULT;
1579 (*frag)->priority = skb->priority;
1581 sent += count;
1582 len -= count;
1584 frag = &(*frag)->next;
1587 return sent;
1590 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1591 struct msghdr *msg, size_t len,
1592 u32 priority)
1594 struct l2cap_conn *conn = chan->conn;
1595 struct sk_buff *skb;
1596 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1597 struct l2cap_hdr *lh;
1599 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1601 count = min_t(unsigned int, (conn->mtu - hlen), len);
1603 skb = chan->ops->alloc_skb(chan, count + hlen,
1604 msg->msg_flags & MSG_DONTWAIT, &err);
1606 if (!skb)
1607 return ERR_PTR(err);
1609 skb->priority = priority;
1611 /* Create L2CAP header */
1612 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1613 lh->cid = cpu_to_le16(chan->dcid);
1614 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1615 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1617 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1618 if (unlikely(err < 0)) {
1619 kfree_skb(skb);
1620 return ERR_PTR(err);
1622 return skb;
1625 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1626 struct msghdr *msg, size_t len,
1627 u32 priority)
1629 struct l2cap_conn *conn = chan->conn;
1630 struct sk_buff *skb;
1631 int err, count, hlen = L2CAP_HDR_SIZE;
1632 struct l2cap_hdr *lh;
1634 BT_DBG("chan %p len %d", chan, (int)len);
1636 count = min_t(unsigned int, (conn->mtu - hlen), len);
1638 skb = chan->ops->alloc_skb(chan, count + hlen,
1639 msg->msg_flags & MSG_DONTWAIT, &err);
1641 if (!skb)
1642 return ERR_PTR(err);
1644 skb->priority = priority;
1646 /* Create L2CAP header */
1647 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1648 lh->cid = cpu_to_le16(chan->dcid);
1649 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1651 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1652 if (unlikely(err < 0)) {
1653 kfree_skb(skb);
1654 return ERR_PTR(err);
1656 return skb;
1659 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1660 struct msghdr *msg, size_t len,
1661 u32 control, u16 sdulen)
1663 struct l2cap_conn *conn = chan->conn;
1664 struct sk_buff *skb;
1665 int err, count, hlen;
1666 struct l2cap_hdr *lh;
1668 BT_DBG("chan %p len %d", chan, (int)len);
1670 if (!conn)
1671 return ERR_PTR(-ENOTCONN);
1673 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1674 hlen = L2CAP_EXT_HDR_SIZE;
1675 else
1676 hlen = L2CAP_ENH_HDR_SIZE;
1678 if (sdulen)
1679 hlen += L2CAP_SDULEN_SIZE;
1681 if (chan->fcs == L2CAP_FCS_CRC16)
1682 hlen += L2CAP_FCS_SIZE;
1684 count = min_t(unsigned int, (conn->mtu - hlen), len);
1686 skb = chan->ops->alloc_skb(chan, count + hlen,
1687 msg->msg_flags & MSG_DONTWAIT, &err);
1689 if (!skb)
1690 return ERR_PTR(err);
1692 /* Create L2CAP header */
1693 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1694 lh->cid = cpu_to_le16(chan->dcid);
1695 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1697 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1699 if (sdulen)
1700 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1702 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1703 if (unlikely(err < 0)) {
1704 kfree_skb(skb);
1705 return ERR_PTR(err);
1708 if (chan->fcs == L2CAP_FCS_CRC16)
1709 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1711 bt_cb(skb)->retries = 0;
1712 return skb;
1715 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1717 struct sk_buff *skb;
1718 struct sk_buff_head sar_queue;
1719 u32 control;
1720 size_t size = 0;
1722 skb_queue_head_init(&sar_queue);
1723 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1724 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1725 if (IS_ERR(skb))
1726 return PTR_ERR(skb);
1728 __skb_queue_tail(&sar_queue, skb);
1729 len -= chan->remote_mps;
1730 size += chan->remote_mps;
1732 while (len > 0) {
1733 size_t buflen;
1735 if (len > chan->remote_mps) {
1736 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1737 buflen = chan->remote_mps;
1738 } else {
1739 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1740 buflen = len;
1743 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1744 if (IS_ERR(skb)) {
1745 skb_queue_purge(&sar_queue);
1746 return PTR_ERR(skb);
1749 __skb_queue_tail(&sar_queue, skb);
1750 len -= buflen;
1751 size += buflen;
1753 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1754 if (chan->tx_send_head == NULL)
1755 chan->tx_send_head = sar_queue.next;
1757 return size;
1760 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1761 u32 priority)
1763 struct sk_buff *skb;
1764 u32 control;
1765 int err;
1767 /* Connectionless channel */
1768 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1769 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1770 if (IS_ERR(skb))
1771 return PTR_ERR(skb);
1773 l2cap_do_send(chan, skb);
1774 return len;
1777 switch (chan->mode) {
1778 case L2CAP_MODE_BASIC:
1779 /* Check outgoing MTU */
1780 if (len > chan->omtu)
1781 return -EMSGSIZE;
1783 /* Create a basic PDU */
1784 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1785 if (IS_ERR(skb))
1786 return PTR_ERR(skb);
1788 l2cap_do_send(chan, skb);
1789 err = len;
1790 break;
1792 case L2CAP_MODE_ERTM:
1793 case L2CAP_MODE_STREAMING:
1794 /* Entire SDU fits into one PDU */
1795 if (len <= chan->remote_mps) {
1796 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1797 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1799 if (IS_ERR(skb))
1800 return PTR_ERR(skb);
1802 __skb_queue_tail(&chan->tx_q, skb);
1804 if (chan->tx_send_head == NULL)
1805 chan->tx_send_head = skb;
1807 } else {
1808 /* Segment SDU into multiples PDUs */
1809 err = l2cap_sar_segment_sdu(chan, msg, len);
1810 if (err < 0)
1811 return err;
1814 if (chan->mode == L2CAP_MODE_STREAMING) {
1815 l2cap_streaming_send(chan);
1816 err = len;
1817 break;
1820 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1821 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1822 err = len;
1823 break;
1826 err = l2cap_ertm_send(chan);
1827 if (err >= 0)
1828 err = len;
1830 break;
1832 default:
1833 BT_DBG("bad state %1.1x", chan->mode);
1834 err = -EBADFD;
1837 return err;
1840 /* Copy frame to all raw sockets on that connection */
1841 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1843 struct sk_buff *nskb;
1844 struct l2cap_chan *chan;
1846 BT_DBG("conn %p", conn);
1848 mutex_lock(&conn->chan_lock);
1850 list_for_each_entry(chan, &conn->chan_l, list) {
1851 struct sock *sk = chan->sk;
1852 if (chan->chan_type != L2CAP_CHAN_RAW)
1853 continue;
1855 /* Don't send frame to the socket it came from */
1856 if (skb->sk == sk)
1857 continue;
1858 nskb = skb_clone(skb, GFP_ATOMIC);
1859 if (!nskb)
1860 continue;
1862 if (chan->ops->recv(chan->data, nskb))
1863 kfree_skb(nskb);
1866 mutex_unlock(&conn->chan_lock);
1869 /* ---- L2CAP signalling commands ---- */
1870 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1871 u8 code, u8 ident, u16 dlen, void *data)
1873 struct sk_buff *skb, **frag;
1874 struct l2cap_cmd_hdr *cmd;
1875 struct l2cap_hdr *lh;
1876 int len, count;
1878 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1879 conn, code, ident, dlen);
1881 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1882 count = min_t(unsigned int, conn->mtu, len);
1884 skb = bt_skb_alloc(count, GFP_ATOMIC);
1885 if (!skb)
1886 return NULL;
1888 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1889 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1891 if (conn->hcon->type == LE_LINK)
1892 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1893 else
1894 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1896 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1897 cmd->code = code;
1898 cmd->ident = ident;
1899 cmd->len = cpu_to_le16(dlen);
1901 if (dlen) {
1902 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1903 memcpy(skb_put(skb, count), data, count);
1904 data += count;
1907 len -= skb->len;
1909 /* Continuation fragments (no L2CAP header) */
1910 frag = &skb_shinfo(skb)->frag_list;
1911 while (len) {
1912 count = min_t(unsigned int, conn->mtu, len);
1914 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1915 if (!*frag)
1916 goto fail;
1918 memcpy(skb_put(*frag, count), data, count);
1920 len -= count;
1921 data += count;
1923 frag = &(*frag)->next;
1926 return skb;
1928 fail:
1929 kfree_skb(skb);
1930 return NULL;
1933 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1935 struct l2cap_conf_opt *opt = *ptr;
1936 int len;
1938 len = L2CAP_CONF_OPT_SIZE + opt->len;
1939 *ptr += len;
1941 *type = opt->type;
1942 *olen = opt->len;
1944 switch (opt->len) {
1945 case 1:
1946 *val = *((u8 *) opt->val);
1947 break;
1949 case 2:
1950 *val = get_unaligned_le16(opt->val);
1951 break;
1953 case 4:
1954 *val = get_unaligned_le32(opt->val);
1955 break;
1957 default:
1958 *val = (unsigned long) opt->val;
1959 break;
1962 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1963 return len;
1966 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1968 struct l2cap_conf_opt *opt = *ptr;
1970 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1972 opt->type = type;
1973 opt->len = len;
1975 switch (len) {
1976 case 1:
1977 *((u8 *) opt->val) = val;
1978 break;
1980 case 2:
1981 put_unaligned_le16(val, opt->val);
1982 break;
1984 case 4:
1985 put_unaligned_le32(val, opt->val);
1986 break;
1988 default:
1989 memcpy(opt->val, (void *) val, len);
1990 break;
1993 *ptr += L2CAP_CONF_OPT_SIZE + len;
1996 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1998 struct l2cap_conf_efs efs;
2000 switch (chan->mode) {
2001 case L2CAP_MODE_ERTM:
2002 efs.id = chan->local_id;
2003 efs.stype = chan->local_stype;
2004 efs.msdu = cpu_to_le16(chan->local_msdu);
2005 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2006 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2007 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2008 break;
2010 case L2CAP_MODE_STREAMING:
2011 efs.id = 1;
2012 efs.stype = L2CAP_SERV_BESTEFFORT;
2013 efs.msdu = cpu_to_le16(chan->local_msdu);
2014 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2015 efs.acc_lat = 0;
2016 efs.flush_to = 0;
2017 break;
2019 default:
2020 return;
2023 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2024 (unsigned long) &efs);
2027 static void l2cap_ack_timeout(struct work_struct *work)
2029 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2030 ack_timer.work);
2032 BT_DBG("chan %p", chan);
2034 l2cap_chan_lock(chan);
2036 __l2cap_send_ack(chan);
2038 l2cap_chan_unlock(chan);
2040 l2cap_chan_put(chan);
2043 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
2045 chan->expected_ack_seq = 0;
2046 chan->unacked_frames = 0;
2047 chan->buffer_seq = 0;
2048 chan->num_acked = 0;
2049 chan->frames_sent = 0;
2051 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2052 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2053 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2055 skb_queue_head_init(&chan->srej_q);
2057 INIT_LIST_HEAD(&chan->srej_l);
2060 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2062 switch (mode) {
2063 case L2CAP_MODE_STREAMING:
2064 case L2CAP_MODE_ERTM:
2065 if (l2cap_mode_supported(mode, remote_feat_mask))
2066 return mode;
2067 /* fall through */
2068 default:
2069 return L2CAP_MODE_BASIC;
2073 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2075 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2078 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2080 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2083 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2085 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2086 __l2cap_ews_supported(chan)) {
2087 /* use extended control field */
2088 set_bit(FLAG_EXT_CTRL, &chan->flags);
2089 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2090 } else {
2091 chan->tx_win = min_t(u16, chan->tx_win,
2092 L2CAP_DEFAULT_TX_WINDOW);
2093 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2097 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2099 struct l2cap_conf_req *req = data;
2100 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2101 void *ptr = req->data;
2102 u16 size;
2104 BT_DBG("chan %p", chan);
2106 if (chan->num_conf_req || chan->num_conf_rsp)
2107 goto done;
2109 switch (chan->mode) {
2110 case L2CAP_MODE_STREAMING:
2111 case L2CAP_MODE_ERTM:
2112 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2113 break;
2115 if (__l2cap_efs_supported(chan))
2116 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2118 /* fall through */
2119 default:
2120 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2121 break;
2124 done:
2125 if (chan->imtu != L2CAP_DEFAULT_MTU)
2126 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2128 switch (chan->mode) {
2129 case L2CAP_MODE_BASIC:
2130 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2131 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2132 break;
2134 rfc.mode = L2CAP_MODE_BASIC;
2135 rfc.txwin_size = 0;
2136 rfc.max_transmit = 0;
2137 rfc.retrans_timeout = 0;
2138 rfc.monitor_timeout = 0;
2139 rfc.max_pdu_size = 0;
2141 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2142 (unsigned long) &rfc);
2143 break;
2145 case L2CAP_MODE_ERTM:
2146 rfc.mode = L2CAP_MODE_ERTM;
2147 rfc.max_transmit = chan->max_tx;
2148 rfc.retrans_timeout = 0;
2149 rfc.monitor_timeout = 0;
2151 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2152 L2CAP_EXT_HDR_SIZE -
2153 L2CAP_SDULEN_SIZE -
2154 L2CAP_FCS_SIZE);
2155 rfc.max_pdu_size = cpu_to_le16(size);
2157 l2cap_txwin_setup(chan);
2159 rfc.txwin_size = min_t(u16, chan->tx_win,
2160 L2CAP_DEFAULT_TX_WINDOW);
2162 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2163 (unsigned long) &rfc);
2165 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2166 l2cap_add_opt_efs(&ptr, chan);
2168 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2169 break;
2171 if (chan->fcs == L2CAP_FCS_NONE ||
2172 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2173 chan->fcs = L2CAP_FCS_NONE;
2174 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2177 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2178 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2179 chan->tx_win);
2180 break;
2182 case L2CAP_MODE_STREAMING:
2183 rfc.mode = L2CAP_MODE_STREAMING;
2184 rfc.txwin_size = 0;
2185 rfc.max_transmit = 0;
2186 rfc.retrans_timeout = 0;
2187 rfc.monitor_timeout = 0;
2189 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2190 L2CAP_EXT_HDR_SIZE -
2191 L2CAP_SDULEN_SIZE -
2192 L2CAP_FCS_SIZE);
2193 rfc.max_pdu_size = cpu_to_le16(size);
2195 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2196 (unsigned long) &rfc);
2198 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2199 l2cap_add_opt_efs(&ptr, chan);
2201 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2202 break;
2204 if (chan->fcs == L2CAP_FCS_NONE ||
2205 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2206 chan->fcs = L2CAP_FCS_NONE;
2207 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2209 break;
2212 req->dcid = cpu_to_le16(chan->dcid);
2213 req->flags = cpu_to_le16(0);
2215 return ptr - data;
2218 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2220 struct l2cap_conf_rsp *rsp = data;
2221 void *ptr = rsp->data;
2222 void *req = chan->conf_req;
2223 int len = chan->conf_len;
2224 int type, hint, olen;
2225 unsigned long val;
2226 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2227 struct l2cap_conf_efs efs;
2228 u8 remote_efs = 0;
2229 u16 mtu = L2CAP_DEFAULT_MTU;
2230 u16 result = L2CAP_CONF_SUCCESS;
2231 u16 size;
2233 BT_DBG("chan %p", chan);
2235 while (len >= L2CAP_CONF_OPT_SIZE) {
2236 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2238 hint = type & L2CAP_CONF_HINT;
2239 type &= L2CAP_CONF_MASK;
2241 switch (type) {
2242 case L2CAP_CONF_MTU:
2243 mtu = val;
2244 break;
2246 case L2CAP_CONF_FLUSH_TO:
2247 chan->flush_to = val;
2248 break;
2250 case L2CAP_CONF_QOS:
2251 break;
2253 case L2CAP_CONF_RFC:
2254 if (olen == sizeof(rfc))
2255 memcpy(&rfc, (void *) val, olen);
2256 break;
2258 case L2CAP_CONF_FCS:
2259 if (val == L2CAP_FCS_NONE)
2260 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2261 break;
2263 case L2CAP_CONF_EFS:
2264 remote_efs = 1;
2265 if (olen == sizeof(efs))
2266 memcpy(&efs, (void *) val, olen);
2267 break;
2269 case L2CAP_CONF_EWS:
2270 if (!enable_hs)
2271 return -ECONNREFUSED;
2273 set_bit(FLAG_EXT_CTRL, &chan->flags);
2274 set_bit(CONF_EWS_RECV, &chan->conf_state);
2275 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2276 chan->remote_tx_win = val;
2277 break;
2279 default:
2280 if (hint)
2281 break;
2283 result = L2CAP_CONF_UNKNOWN;
2284 *((u8 *) ptr++) = type;
2285 break;
2289 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2290 goto done;
2292 switch (chan->mode) {
2293 case L2CAP_MODE_STREAMING:
2294 case L2CAP_MODE_ERTM:
2295 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2296 chan->mode = l2cap_select_mode(rfc.mode,
2297 chan->conn->feat_mask);
2298 break;
2301 if (remote_efs) {
2302 if (__l2cap_efs_supported(chan))
2303 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2304 else
2305 return -ECONNREFUSED;
2308 if (chan->mode != rfc.mode)
2309 return -ECONNREFUSED;
2311 break;
2314 done:
2315 if (chan->mode != rfc.mode) {
2316 result = L2CAP_CONF_UNACCEPT;
2317 rfc.mode = chan->mode;
2319 if (chan->num_conf_rsp == 1)
2320 return -ECONNREFUSED;
2322 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2323 sizeof(rfc), (unsigned long) &rfc);
2326 if (result == L2CAP_CONF_SUCCESS) {
2327 /* Configure output options and let the other side know
2328 * which ones we don't like. */
2330 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2331 result = L2CAP_CONF_UNACCEPT;
2332 else {
2333 chan->omtu = mtu;
2334 set_bit(CONF_MTU_DONE, &chan->conf_state);
2336 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2338 if (remote_efs) {
2339 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2340 efs.stype != L2CAP_SERV_NOTRAFIC &&
2341 efs.stype != chan->local_stype) {
2343 result = L2CAP_CONF_UNACCEPT;
2345 if (chan->num_conf_req >= 1)
2346 return -ECONNREFUSED;
2348 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2349 sizeof(efs),
2350 (unsigned long) &efs);
2351 } else {
2352 /* Send PENDING Conf Rsp */
2353 result = L2CAP_CONF_PENDING;
2354 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2358 switch (rfc.mode) {
2359 case L2CAP_MODE_BASIC:
2360 chan->fcs = L2CAP_FCS_NONE;
2361 set_bit(CONF_MODE_DONE, &chan->conf_state);
2362 break;
2364 case L2CAP_MODE_ERTM:
2365 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2366 chan->remote_tx_win = rfc.txwin_size;
2367 else
2368 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2370 chan->remote_max_tx = rfc.max_transmit;
2372 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2373 chan->conn->mtu -
2374 L2CAP_EXT_HDR_SIZE -
2375 L2CAP_SDULEN_SIZE -
2376 L2CAP_FCS_SIZE);
2377 rfc.max_pdu_size = cpu_to_le16(size);
2378 chan->remote_mps = size;
2380 rfc.retrans_timeout =
2381 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2382 rfc.monitor_timeout =
2383 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2385 set_bit(CONF_MODE_DONE, &chan->conf_state);
2387 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2388 sizeof(rfc), (unsigned long) &rfc);
2390 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2391 chan->remote_id = efs.id;
2392 chan->remote_stype = efs.stype;
2393 chan->remote_msdu = le16_to_cpu(efs.msdu);
2394 chan->remote_flush_to =
2395 le32_to_cpu(efs.flush_to);
2396 chan->remote_acc_lat =
2397 le32_to_cpu(efs.acc_lat);
2398 chan->remote_sdu_itime =
2399 le32_to_cpu(efs.sdu_itime);
2400 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2401 sizeof(efs), (unsigned long) &efs);
2403 break;
2405 case L2CAP_MODE_STREAMING:
2406 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2407 chan->conn->mtu -
2408 L2CAP_EXT_HDR_SIZE -
2409 L2CAP_SDULEN_SIZE -
2410 L2CAP_FCS_SIZE);
2411 rfc.max_pdu_size = cpu_to_le16(size);
2412 chan->remote_mps = size;
2414 set_bit(CONF_MODE_DONE, &chan->conf_state);
2416 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2417 sizeof(rfc), (unsigned long) &rfc);
2419 break;
2421 default:
2422 result = L2CAP_CONF_UNACCEPT;
2424 memset(&rfc, 0, sizeof(rfc));
2425 rfc.mode = chan->mode;
2428 if (result == L2CAP_CONF_SUCCESS)
2429 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2431 rsp->scid = cpu_to_le16(chan->dcid);
2432 rsp->result = cpu_to_le16(result);
2433 rsp->flags = cpu_to_le16(0x0000);
2435 return ptr - data;
2438 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2440 struct l2cap_conf_req *req = data;
2441 void *ptr = req->data;
2442 int type, olen;
2443 unsigned long val;
2444 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2445 struct l2cap_conf_efs efs;
2447 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2449 while (len >= L2CAP_CONF_OPT_SIZE) {
2450 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2452 switch (type) {
2453 case L2CAP_CONF_MTU:
2454 if (val < L2CAP_DEFAULT_MIN_MTU) {
2455 *result = L2CAP_CONF_UNACCEPT;
2456 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2457 } else
2458 chan->imtu = val;
2459 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2460 break;
2462 case L2CAP_CONF_FLUSH_TO:
2463 chan->flush_to = val;
2464 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2465 2, chan->flush_to);
2466 break;
2468 case L2CAP_CONF_RFC:
2469 if (olen == sizeof(rfc))
2470 memcpy(&rfc, (void *)val, olen);
2472 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2473 rfc.mode != chan->mode)
2474 return -ECONNREFUSED;
2476 chan->fcs = 0;
2478 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2479 sizeof(rfc), (unsigned long) &rfc);
2480 break;
2482 case L2CAP_CONF_EWS:
2483 chan->tx_win = min_t(u16, val,
2484 L2CAP_DEFAULT_EXT_WINDOW);
2485 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2486 chan->tx_win);
2487 break;
2489 case L2CAP_CONF_EFS:
2490 if (olen == sizeof(efs))
2491 memcpy(&efs, (void *)val, olen);
2493 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2494 efs.stype != L2CAP_SERV_NOTRAFIC &&
2495 efs.stype != chan->local_stype)
2496 return -ECONNREFUSED;
2498 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2499 sizeof(efs), (unsigned long) &efs);
2500 break;
2504 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2505 return -ECONNREFUSED;
2507 chan->mode = rfc.mode;
2509 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2510 switch (rfc.mode) {
2511 case L2CAP_MODE_ERTM:
2512 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2513 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2514 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2516 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2517 chan->local_msdu = le16_to_cpu(efs.msdu);
2518 chan->local_sdu_itime =
2519 le32_to_cpu(efs.sdu_itime);
2520 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2521 chan->local_flush_to =
2522 le32_to_cpu(efs.flush_to);
2524 break;
2526 case L2CAP_MODE_STREAMING:
2527 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2531 req->dcid = cpu_to_le16(chan->dcid);
2532 req->flags = cpu_to_le16(0x0000);
2534 return ptr - data;
2537 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2539 struct l2cap_conf_rsp *rsp = data;
2540 void *ptr = rsp->data;
2542 BT_DBG("chan %p", chan);
2544 rsp->scid = cpu_to_le16(chan->dcid);
2545 rsp->result = cpu_to_le16(result);
2546 rsp->flags = cpu_to_le16(flags);
2548 return ptr - data;
2551 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2553 struct l2cap_conn_rsp rsp;
2554 struct l2cap_conn *conn = chan->conn;
2555 u8 buf[128];
2557 rsp.scid = cpu_to_le16(chan->dcid);
2558 rsp.dcid = cpu_to_le16(chan->scid);
2559 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2560 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2561 l2cap_send_cmd(conn, chan->ident,
2562 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2564 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2565 return;
2567 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2568 l2cap_build_conf_req(chan, buf), buf);
2569 chan->num_conf_req++;
2572 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2574 int type, olen;
2575 unsigned long val;
2576 struct l2cap_conf_rfc rfc;
2578 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2580 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2581 return;
2583 while (len >= L2CAP_CONF_OPT_SIZE) {
2584 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2586 switch (type) {
2587 case L2CAP_CONF_RFC:
2588 if (olen == sizeof(rfc))
2589 memcpy(&rfc, (void *)val, olen);
2590 goto done;
2594 /* Use sane default values in case a misbehaving remote device
2595 * did not send an RFC option.
2597 rfc.mode = chan->mode;
2598 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2599 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2600 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2602 BT_ERR("Expected RFC option was not found, using defaults");
2604 done:
2605 switch (rfc.mode) {
2606 case L2CAP_MODE_ERTM:
2607 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2608 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2609 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2610 break;
2611 case L2CAP_MODE_STREAMING:
2612 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2616 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2618 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2620 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2621 return 0;
2623 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2624 cmd->ident == conn->info_ident) {
2625 cancel_delayed_work(&conn->info_timer);
2627 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2628 conn->info_ident = 0;
2630 l2cap_conn_start(conn);
2633 return 0;
2636 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2638 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2639 struct l2cap_conn_rsp rsp;
2640 struct l2cap_chan *chan = NULL, *pchan;
2641 struct sock *parent, *sk = NULL;
2642 int result, status = L2CAP_CS_NO_INFO;
2644 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2645 __le16 psm = req->psm;
2647 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2649 /* Check if we have socket listening on psm */
2650 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2651 if (!pchan) {
2652 result = L2CAP_CR_BAD_PSM;
2653 goto sendresp;
2656 parent = pchan->sk;
2658 mutex_lock(&conn->chan_lock);
2659 lock_sock(parent);
2661 /* Check if the ACL is secure enough (if not SDP) */
2662 if (psm != cpu_to_le16(0x0001) &&
2663 !hci_conn_check_link_mode(conn->hcon)) {
2664 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2665 result = L2CAP_CR_SEC_BLOCK;
2666 goto response;
2669 result = L2CAP_CR_NO_MEM;
2671 /* Check for backlog size */
2672 if (sk_acceptq_is_full(parent)) {
2673 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2674 goto response;
2677 chan = pchan->ops->new_connection(pchan->data);
2678 if (!chan)
2679 goto response;
2681 sk = chan->sk;
2683 /* Check if we already have channel with that dcid */
2684 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2685 sock_set_flag(sk, SOCK_ZAPPED);
2686 chan->ops->close(chan->data);
2687 goto response;
2690 hci_conn_hold(conn->hcon);
2692 bacpy(&bt_sk(sk)->src, conn->src);
2693 bacpy(&bt_sk(sk)->dst, conn->dst);
2694 chan->psm = psm;
2695 chan->dcid = scid;
2697 bt_accept_enqueue(parent, sk);
2699 __l2cap_chan_add(conn, chan);
2701 dcid = chan->scid;
2703 __set_chan_timer(chan, sk->sk_sndtimeo);
2705 chan->ident = cmd->ident;
2707 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2708 if (l2cap_chan_check_security(chan)) {
2709 if (bt_sk(sk)->defer_setup) {
2710 __l2cap_state_change(chan, BT_CONNECT2);
2711 result = L2CAP_CR_PEND;
2712 status = L2CAP_CS_AUTHOR_PEND;
2713 parent->sk_data_ready(parent, 0);
2714 } else {
2715 __l2cap_state_change(chan, BT_CONFIG);
2716 result = L2CAP_CR_SUCCESS;
2717 status = L2CAP_CS_NO_INFO;
2719 } else {
2720 __l2cap_state_change(chan, BT_CONNECT2);
2721 result = L2CAP_CR_PEND;
2722 status = L2CAP_CS_AUTHEN_PEND;
2724 } else {
2725 __l2cap_state_change(chan, BT_CONNECT2);
2726 result = L2CAP_CR_PEND;
2727 status = L2CAP_CS_NO_INFO;
2730 response:
2731 release_sock(parent);
2732 mutex_unlock(&conn->chan_lock);
2734 sendresp:
2735 rsp.scid = cpu_to_le16(scid);
2736 rsp.dcid = cpu_to_le16(dcid);
2737 rsp.result = cpu_to_le16(result);
2738 rsp.status = cpu_to_le16(status);
2739 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2741 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2742 struct l2cap_info_req info;
2743 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2745 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2746 conn->info_ident = l2cap_get_ident(conn);
2748 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
2750 l2cap_send_cmd(conn, conn->info_ident,
2751 L2CAP_INFO_REQ, sizeof(info), &info);
2754 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2755 result == L2CAP_CR_SUCCESS) {
2756 u8 buf[128];
2757 set_bit(CONF_REQ_SENT, &chan->conf_state);
2758 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2759 l2cap_build_conf_req(chan, buf), buf);
2760 chan->num_conf_req++;
2763 return 0;
2766 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2768 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2769 u16 scid, dcid, result, status;
2770 struct l2cap_chan *chan;
2771 u8 req[128];
2772 int err;
2774 scid = __le16_to_cpu(rsp->scid);
2775 dcid = __le16_to_cpu(rsp->dcid);
2776 result = __le16_to_cpu(rsp->result);
2777 status = __le16_to_cpu(rsp->status);
2779 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
2780 dcid, scid, result, status);
2782 mutex_lock(&conn->chan_lock);
2784 if (scid) {
2785 chan = __l2cap_get_chan_by_scid(conn, scid);
2786 if (!chan) {
2787 err = -EFAULT;
2788 goto unlock;
2790 } else {
2791 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
2792 if (!chan) {
2793 err = -EFAULT;
2794 goto unlock;
2798 err = 0;
2800 l2cap_chan_lock(chan);
2802 switch (result) {
2803 case L2CAP_CR_SUCCESS:
2804 l2cap_state_change(chan, BT_CONFIG);
2805 chan->ident = 0;
2806 chan->dcid = dcid;
2807 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2809 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2810 break;
2812 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2813 l2cap_build_conf_req(chan, req), req);
2814 chan->num_conf_req++;
2815 break;
2817 case L2CAP_CR_PEND:
2818 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2819 break;
2821 default:
2822 l2cap_chan_del(chan, ECONNREFUSED);
2823 break;
2826 l2cap_chan_unlock(chan);
2828 unlock:
2829 mutex_unlock(&conn->chan_lock);
2831 return err;
2834 static inline void set_default_fcs(struct l2cap_chan *chan)
2836 /* FCS is enabled only in ERTM or streaming mode, if one or both
2837 * sides request it.
2839 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2840 chan->fcs = L2CAP_FCS_NONE;
2841 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2842 chan->fcs = L2CAP_FCS_CRC16;
2845 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2847 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2848 u16 dcid, flags;
2849 u8 rsp[64];
2850 struct l2cap_chan *chan;
2851 int len;
2853 dcid = __le16_to_cpu(req->dcid);
2854 flags = __le16_to_cpu(req->flags);
2856 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2858 chan = l2cap_get_chan_by_scid(conn, dcid);
2859 if (!chan)
2860 return -ENOENT;
2862 l2cap_chan_lock(chan);
2864 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2865 struct l2cap_cmd_rej_cid rej;
2867 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2868 rej.scid = cpu_to_le16(chan->scid);
2869 rej.dcid = cpu_to_le16(chan->dcid);
2871 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2872 sizeof(rej), &rej);
2873 goto unlock;
2876 /* Reject if config buffer is too small. */
2877 len = cmd_len - sizeof(*req);
2878 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2879 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2880 l2cap_build_conf_rsp(chan, rsp,
2881 L2CAP_CONF_REJECT, flags), rsp);
2882 goto unlock;
2885 /* Store config. */
2886 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2887 chan->conf_len += len;
2889 if (flags & 0x0001) {
2890 /* Incomplete config. Send empty response. */
2891 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2892 l2cap_build_conf_rsp(chan, rsp,
2893 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2894 goto unlock;
2897 /* Complete config. */
2898 len = l2cap_parse_conf_req(chan, rsp);
2899 if (len < 0) {
2900 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2901 goto unlock;
2904 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2905 chan->num_conf_rsp++;
2907 /* Reset config buffer. */
2908 chan->conf_len = 0;
2910 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2911 goto unlock;
2913 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2914 set_default_fcs(chan);
2916 l2cap_state_change(chan, BT_CONNECTED);
2918 chan->next_tx_seq = 0;
2919 chan->expected_tx_seq = 0;
2920 skb_queue_head_init(&chan->tx_q);
2921 if (chan->mode == L2CAP_MODE_ERTM)
2922 l2cap_ertm_init(chan);
2924 l2cap_chan_ready(chan);
2925 goto unlock;
2928 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2929 u8 buf[64];
2930 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2931 l2cap_build_conf_req(chan, buf), buf);
2932 chan->num_conf_req++;
2935 /* Got Conf Rsp PENDING from remote side and asume we sent
2936 Conf Rsp PENDING in the code above */
2937 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2938 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2940 /* check compatibility */
2942 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2943 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2945 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2946 l2cap_build_conf_rsp(chan, rsp,
2947 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2950 unlock:
2951 l2cap_chan_unlock(chan);
2952 return 0;
2955 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2957 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2958 u16 scid, flags, result;
2959 struct l2cap_chan *chan;
2960 int len = cmd->len - sizeof(*rsp);
2962 scid = __le16_to_cpu(rsp->scid);
2963 flags = __le16_to_cpu(rsp->flags);
2964 result = __le16_to_cpu(rsp->result);
2966 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2967 scid, flags, result);
2969 chan = l2cap_get_chan_by_scid(conn, scid);
2970 if (!chan)
2971 return 0;
2973 l2cap_chan_lock(chan);
2975 switch (result) {
2976 case L2CAP_CONF_SUCCESS:
2977 l2cap_conf_rfc_get(chan, rsp->data, len);
2978 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2979 break;
2981 case L2CAP_CONF_PENDING:
2982 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2984 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2985 char buf[64];
2987 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2988 buf, &result);
2989 if (len < 0) {
2990 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2991 goto done;
2994 /* check compatibility */
2996 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2997 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2999 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3000 l2cap_build_conf_rsp(chan, buf,
3001 L2CAP_CONF_SUCCESS, 0x0000), buf);
3003 goto done;
3005 case L2CAP_CONF_UNACCEPT:
3006 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3007 char req[64];
3009 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3010 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3011 goto done;
3014 /* throw out any old stored conf requests */
3015 result = L2CAP_CONF_SUCCESS;
3016 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3017 req, &result);
3018 if (len < 0) {
3019 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3020 goto done;
3023 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3024 L2CAP_CONF_REQ, len, req);
3025 chan->num_conf_req++;
3026 if (result != L2CAP_CONF_SUCCESS)
3027 goto done;
3028 break;
3031 default:
3032 l2cap_chan_set_err(chan, ECONNRESET);
3034 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3035 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3036 goto done;
3039 if (flags & 0x01)
3040 goto done;
3042 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3044 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3045 set_default_fcs(chan);
3047 l2cap_state_change(chan, BT_CONNECTED);
3048 chan->next_tx_seq = 0;
3049 chan->expected_tx_seq = 0;
3050 skb_queue_head_init(&chan->tx_q);
3051 if (chan->mode == L2CAP_MODE_ERTM)
3052 l2cap_ertm_init(chan);
3054 l2cap_chan_ready(chan);
3057 done:
3058 l2cap_chan_unlock(chan);
3059 return 0;
3062 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3064 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3065 struct l2cap_disconn_rsp rsp;
3066 u16 dcid, scid;
3067 struct l2cap_chan *chan;
3068 struct sock *sk;
3070 scid = __le16_to_cpu(req->scid);
3071 dcid = __le16_to_cpu(req->dcid);
3073 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3075 mutex_lock(&conn->chan_lock);
3077 chan = __l2cap_get_chan_by_scid(conn, dcid);
3078 if (!chan) {
3079 mutex_unlock(&conn->chan_lock);
3080 return 0;
3083 l2cap_chan_lock(chan);
3085 sk = chan->sk;
3087 rsp.dcid = cpu_to_le16(chan->scid);
3088 rsp.scid = cpu_to_le16(chan->dcid);
3089 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3091 lock_sock(sk);
3092 sk->sk_shutdown = SHUTDOWN_MASK;
3093 release_sock(sk);
3095 l2cap_chan_del(chan, ECONNRESET);
3097 l2cap_chan_unlock(chan);
3099 chan->ops->close(chan->data);
3101 mutex_unlock(&conn->chan_lock);
3103 return 0;
3106 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3108 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3109 u16 dcid, scid;
3110 struct l2cap_chan *chan;
3112 scid = __le16_to_cpu(rsp->scid);
3113 dcid = __le16_to_cpu(rsp->dcid);
3115 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3117 mutex_lock(&conn->chan_lock);
3119 chan = __l2cap_get_chan_by_scid(conn, scid);
3120 if (!chan) {
3121 mutex_unlock(&conn->chan_lock);
3122 return 0;
3125 l2cap_chan_lock(chan);
3127 l2cap_chan_del(chan, 0);
3129 l2cap_chan_unlock(chan);
3131 chan->ops->close(chan->data);
3133 mutex_unlock(&conn->chan_lock);
3135 return 0;
3138 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3140 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3141 u16 type;
3143 type = __le16_to_cpu(req->type);
3145 BT_DBG("type 0x%4.4x", type);
3147 if (type == L2CAP_IT_FEAT_MASK) {
3148 u8 buf[8];
3149 u32 feat_mask = l2cap_feat_mask;
3150 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3151 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3152 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3153 if (!disable_ertm)
3154 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3155 | L2CAP_FEAT_FCS;
3156 if (enable_hs)
3157 feat_mask |= L2CAP_FEAT_EXT_FLOW
3158 | L2CAP_FEAT_EXT_WINDOW;
3160 put_unaligned_le32(feat_mask, rsp->data);
3161 l2cap_send_cmd(conn, cmd->ident,
3162 L2CAP_INFO_RSP, sizeof(buf), buf);
3163 } else if (type == L2CAP_IT_FIXED_CHAN) {
3164 u8 buf[12];
3165 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3167 if (enable_hs)
3168 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3169 else
3170 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3172 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3173 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3174 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3175 l2cap_send_cmd(conn, cmd->ident,
3176 L2CAP_INFO_RSP, sizeof(buf), buf);
3177 } else {
3178 struct l2cap_info_rsp rsp;
3179 rsp.type = cpu_to_le16(type);
3180 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3181 l2cap_send_cmd(conn, cmd->ident,
3182 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3185 return 0;
3188 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3190 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3191 u16 type, result;
3193 type = __le16_to_cpu(rsp->type);
3194 result = __le16_to_cpu(rsp->result);
3196 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3198 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3199 if (cmd->ident != conn->info_ident ||
3200 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3201 return 0;
3203 cancel_delayed_work(&conn->info_timer);
3205 if (result != L2CAP_IR_SUCCESS) {
3206 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3207 conn->info_ident = 0;
3209 l2cap_conn_start(conn);
3211 return 0;
3214 switch (type) {
3215 case L2CAP_IT_FEAT_MASK:
3216 conn->feat_mask = get_unaligned_le32(rsp->data);
3218 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3219 struct l2cap_info_req req;
3220 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3222 conn->info_ident = l2cap_get_ident(conn);
3224 l2cap_send_cmd(conn, conn->info_ident,
3225 L2CAP_INFO_REQ, sizeof(req), &req);
3226 } else {
3227 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3228 conn->info_ident = 0;
3230 l2cap_conn_start(conn);
3232 break;
3234 case L2CAP_IT_FIXED_CHAN:
3235 conn->fixed_chan_mask = rsp->data[0];
3236 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3237 conn->info_ident = 0;
3239 l2cap_conn_start(conn);
3240 break;
3243 return 0;
3246 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3247 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3248 void *data)
3250 struct l2cap_create_chan_req *req = data;
3251 struct l2cap_create_chan_rsp rsp;
3252 u16 psm, scid;
3254 if (cmd_len != sizeof(*req))
3255 return -EPROTO;
3257 if (!enable_hs)
3258 return -EINVAL;
3260 psm = le16_to_cpu(req->psm);
3261 scid = le16_to_cpu(req->scid);
3263 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3265 /* Placeholder: Always reject */
3266 rsp.dcid = 0;
3267 rsp.scid = cpu_to_le16(scid);
3268 rsp.result = L2CAP_CR_NO_MEM;
3269 rsp.status = L2CAP_CS_NO_INFO;
3271 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3272 sizeof(rsp), &rsp);
3274 return 0;
3277 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3278 struct l2cap_cmd_hdr *cmd, void *data)
3280 BT_DBG("conn %p", conn);
3282 return l2cap_connect_rsp(conn, cmd, data);
3285 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3286 u16 icid, u16 result)
3288 struct l2cap_move_chan_rsp rsp;
3290 BT_DBG("icid %d, result %d", icid, result);
3292 rsp.icid = cpu_to_le16(icid);
3293 rsp.result = cpu_to_le16(result);
3295 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3298 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3299 struct l2cap_chan *chan, u16 icid, u16 result)
3301 struct l2cap_move_chan_cfm cfm;
3302 u8 ident;
3304 BT_DBG("icid %d, result %d", icid, result);
3306 ident = l2cap_get_ident(conn);
3307 if (chan)
3308 chan->ident = ident;
3310 cfm.icid = cpu_to_le16(icid);
3311 cfm.result = cpu_to_le16(result);
3313 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3316 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3317 u16 icid)
3319 struct l2cap_move_chan_cfm_rsp rsp;
3321 BT_DBG("icid %d", icid);
3323 rsp.icid = cpu_to_le16(icid);
3324 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3327 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3328 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3330 struct l2cap_move_chan_req *req = data;
3331 u16 icid = 0;
3332 u16 result = L2CAP_MR_NOT_ALLOWED;
3334 if (cmd_len != sizeof(*req))
3335 return -EPROTO;
3337 icid = le16_to_cpu(req->icid);
3339 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3341 if (!enable_hs)
3342 return -EINVAL;
3344 /* Placeholder: Always refuse */
3345 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3347 return 0;
3350 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3351 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3353 struct l2cap_move_chan_rsp *rsp = data;
3354 u16 icid, result;
3356 if (cmd_len != sizeof(*rsp))
3357 return -EPROTO;
3359 icid = le16_to_cpu(rsp->icid);
3360 result = le16_to_cpu(rsp->result);
3362 BT_DBG("icid %d, result %d", icid, result);
3364 /* Placeholder: Always unconfirmed */
3365 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3367 return 0;
3370 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3371 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3373 struct l2cap_move_chan_cfm *cfm = data;
3374 u16 icid, result;
3376 if (cmd_len != sizeof(*cfm))
3377 return -EPROTO;
3379 icid = le16_to_cpu(cfm->icid);
3380 result = le16_to_cpu(cfm->result);
3382 BT_DBG("icid %d, result %d", icid, result);
3384 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3386 return 0;
3389 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3390 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3392 struct l2cap_move_chan_cfm_rsp *rsp = data;
3393 u16 icid;
3395 if (cmd_len != sizeof(*rsp))
3396 return -EPROTO;
3398 icid = le16_to_cpu(rsp->icid);
3400 BT_DBG("icid %d", icid);
3402 return 0;
3405 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3406 u16 to_multiplier)
3408 u16 max_latency;
3410 if (min > max || min < 6 || max > 3200)
3411 return -EINVAL;
3413 if (to_multiplier < 10 || to_multiplier > 3200)
3414 return -EINVAL;
3416 if (max >= to_multiplier * 8)
3417 return -EINVAL;
3419 max_latency = (to_multiplier * 8 / max) - 1;
3420 if (latency > 499 || latency > max_latency)
3421 return -EINVAL;
3423 return 0;
3426 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3427 struct l2cap_cmd_hdr *cmd, u8 *data)
3429 struct hci_conn *hcon = conn->hcon;
3430 struct l2cap_conn_param_update_req *req;
3431 struct l2cap_conn_param_update_rsp rsp;
3432 u16 min, max, latency, to_multiplier, cmd_len;
3433 int err;
3435 if (!(hcon->link_mode & HCI_LM_MASTER))
3436 return -EINVAL;
3438 cmd_len = __le16_to_cpu(cmd->len);
3439 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3440 return -EPROTO;
3442 req = (struct l2cap_conn_param_update_req *) data;
3443 min = __le16_to_cpu(req->min);
3444 max = __le16_to_cpu(req->max);
3445 latency = __le16_to_cpu(req->latency);
3446 to_multiplier = __le16_to_cpu(req->to_multiplier);
3448 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3449 min, max, latency, to_multiplier);
3451 memset(&rsp, 0, sizeof(rsp));
3453 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3454 if (err)
3455 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3456 else
3457 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3459 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3460 sizeof(rsp), &rsp);
3462 if (!err)
3463 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3465 return 0;
3468 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3469 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3471 int err = 0;
3473 switch (cmd->code) {
3474 case L2CAP_COMMAND_REJ:
3475 l2cap_command_rej(conn, cmd, data);
3476 break;
3478 case L2CAP_CONN_REQ:
3479 err = l2cap_connect_req(conn, cmd, data);
3480 break;
3482 case L2CAP_CONN_RSP:
3483 err = l2cap_connect_rsp(conn, cmd, data);
3484 break;
3486 case L2CAP_CONF_REQ:
3487 err = l2cap_config_req(conn, cmd, cmd_len, data);
3488 break;
3490 case L2CAP_CONF_RSP:
3491 err = l2cap_config_rsp(conn, cmd, data);
3492 break;
3494 case L2CAP_DISCONN_REQ:
3495 err = l2cap_disconnect_req(conn, cmd, data);
3496 break;
3498 case L2CAP_DISCONN_RSP:
3499 err = l2cap_disconnect_rsp(conn, cmd, data);
3500 break;
3502 case L2CAP_ECHO_REQ:
3503 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3504 break;
3506 case L2CAP_ECHO_RSP:
3507 break;
3509 case L2CAP_INFO_REQ:
3510 err = l2cap_information_req(conn, cmd, data);
3511 break;
3513 case L2CAP_INFO_RSP:
3514 err = l2cap_information_rsp(conn, cmd, data);
3515 break;
3517 case L2CAP_CREATE_CHAN_REQ:
3518 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3519 break;
3521 case L2CAP_CREATE_CHAN_RSP:
3522 err = l2cap_create_channel_rsp(conn, cmd, data);
3523 break;
3525 case L2CAP_MOVE_CHAN_REQ:
3526 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3527 break;
3529 case L2CAP_MOVE_CHAN_RSP:
3530 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3531 break;
3533 case L2CAP_MOVE_CHAN_CFM:
3534 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3535 break;
3537 case L2CAP_MOVE_CHAN_CFM_RSP:
3538 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3539 break;
3541 default:
3542 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3543 err = -EINVAL;
3544 break;
3547 return err;
3550 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3551 struct l2cap_cmd_hdr *cmd, u8 *data)
3553 switch (cmd->code) {
3554 case L2CAP_COMMAND_REJ:
3555 return 0;
3557 case L2CAP_CONN_PARAM_UPDATE_REQ:
3558 return l2cap_conn_param_update_req(conn, cmd, data);
3560 case L2CAP_CONN_PARAM_UPDATE_RSP:
3561 return 0;
3563 default:
3564 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3565 return -EINVAL;
3569 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3570 struct sk_buff *skb)
3572 u8 *data = skb->data;
3573 int len = skb->len;
3574 struct l2cap_cmd_hdr cmd;
3575 int err;
3577 l2cap_raw_recv(conn, skb);
3579 while (len >= L2CAP_CMD_HDR_SIZE) {
3580 u16 cmd_len;
3581 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3582 data += L2CAP_CMD_HDR_SIZE;
3583 len -= L2CAP_CMD_HDR_SIZE;
3585 cmd_len = le16_to_cpu(cmd.len);
3587 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3589 if (cmd_len > len || !cmd.ident) {
3590 BT_DBG("corrupted command");
3591 break;
3594 if (conn->hcon->type == LE_LINK)
3595 err = l2cap_le_sig_cmd(conn, &cmd, data);
3596 else
3597 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3599 if (err) {
3600 struct l2cap_cmd_rej_unk rej;
3602 BT_ERR("Wrong link type (%d)", err);
3604 /* FIXME: Map err to a valid reason */
3605 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3606 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3609 data += cmd_len;
3610 len -= cmd_len;
3613 kfree_skb(skb);
3616 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3618 u16 our_fcs, rcv_fcs;
3619 int hdr_size;
3621 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3622 hdr_size = L2CAP_EXT_HDR_SIZE;
3623 else
3624 hdr_size = L2CAP_ENH_HDR_SIZE;
3626 if (chan->fcs == L2CAP_FCS_CRC16) {
3627 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3628 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3629 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3631 if (our_fcs != rcv_fcs)
3632 return -EBADMSG;
3634 return 0;
3637 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3639 u32 control = 0;
3641 chan->frames_sent = 0;
3643 control |= __set_reqseq(chan, chan->buffer_seq);
3645 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3646 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3647 l2cap_send_sframe(chan, control);
3648 set_bit(CONN_RNR_SENT, &chan->conn_state);
3651 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3652 l2cap_retransmit_frames(chan);
3654 l2cap_ertm_send(chan);
3656 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3657 chan->frames_sent == 0) {
3658 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3659 l2cap_send_sframe(chan, control);
3663 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3665 struct sk_buff *next_skb;
3666 int tx_seq_offset, next_tx_seq_offset;
3668 bt_cb(skb)->tx_seq = tx_seq;
3669 bt_cb(skb)->sar = sar;
3671 next_skb = skb_peek(&chan->srej_q);
3673 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3675 while (next_skb) {
3676 if (bt_cb(next_skb)->tx_seq == tx_seq)
3677 return -EINVAL;
3679 next_tx_seq_offset = __seq_offset(chan,
3680 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3682 if (next_tx_seq_offset > tx_seq_offset) {
3683 __skb_queue_before(&chan->srej_q, next_skb, skb);
3684 return 0;
3687 if (skb_queue_is_last(&chan->srej_q, next_skb))
3688 next_skb = NULL;
3689 else
3690 next_skb = skb_queue_next(&chan->srej_q, next_skb);
3693 __skb_queue_tail(&chan->srej_q, skb);
3695 return 0;
3698 static void append_skb_frag(struct sk_buff *skb,
3699 struct sk_buff *new_frag, struct sk_buff **last_frag)
3701 /* skb->len reflects data in skb as well as all fragments
3702 * skb->data_len reflects only data in fragments
3704 if (!skb_has_frag_list(skb))
3705 skb_shinfo(skb)->frag_list = new_frag;
3707 new_frag->next = NULL;
3709 (*last_frag)->next = new_frag;
3710 *last_frag = new_frag;
3712 skb->len += new_frag->len;
3713 skb->data_len += new_frag->len;
3714 skb->truesize += new_frag->truesize;
3717 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3719 int err = -EINVAL;
3721 switch (__get_ctrl_sar(chan, control)) {
3722 case L2CAP_SAR_UNSEGMENTED:
3723 if (chan->sdu)
3724 break;
3726 err = chan->ops->recv(chan->data, skb);
3727 break;
3729 case L2CAP_SAR_START:
3730 if (chan->sdu)
3731 break;
3733 chan->sdu_len = get_unaligned_le16(skb->data);
3734 skb_pull(skb, L2CAP_SDULEN_SIZE);
3736 if (chan->sdu_len > chan->imtu) {
3737 err = -EMSGSIZE;
3738 break;
3741 if (skb->len >= chan->sdu_len)
3742 break;
3744 chan->sdu = skb;
3745 chan->sdu_last_frag = skb;
3747 skb = NULL;
3748 err = 0;
3749 break;
3751 case L2CAP_SAR_CONTINUE:
3752 if (!chan->sdu)
3753 break;
3755 append_skb_frag(chan->sdu, skb,
3756 &chan->sdu_last_frag);
3757 skb = NULL;
3759 if (chan->sdu->len >= chan->sdu_len)
3760 break;
3762 err = 0;
3763 break;
3765 case L2CAP_SAR_END:
3766 if (!chan->sdu)
3767 break;
3769 append_skb_frag(chan->sdu, skb,
3770 &chan->sdu_last_frag);
3771 skb = NULL;
3773 if (chan->sdu->len != chan->sdu_len)
3774 break;
3776 err = chan->ops->recv(chan->data, chan->sdu);
3778 if (!err) {
3779 /* Reassembly complete */
3780 chan->sdu = NULL;
3781 chan->sdu_last_frag = NULL;
3782 chan->sdu_len = 0;
3784 break;
3787 if (err) {
3788 kfree_skb(skb);
3789 kfree_skb(chan->sdu);
3790 chan->sdu = NULL;
3791 chan->sdu_last_frag = NULL;
3792 chan->sdu_len = 0;
3795 return err;
3798 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3800 BT_DBG("chan %p, Enter local busy", chan);
3802 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3804 __set_ack_timer(chan);
3807 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3809 u32 control;
3811 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3812 goto done;
3814 control = __set_reqseq(chan, chan->buffer_seq);
3815 control |= __set_ctrl_poll(chan);
3816 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3817 l2cap_send_sframe(chan, control);
3818 chan->retry_count = 1;
3820 __clear_retrans_timer(chan);
3821 __set_monitor_timer(chan);
3823 set_bit(CONN_WAIT_F, &chan->conn_state);
3825 done:
3826 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3827 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3829 BT_DBG("chan %p, Exit local busy", chan);
3832 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3834 if (chan->mode == L2CAP_MODE_ERTM) {
3835 if (busy)
3836 l2cap_ertm_enter_local_busy(chan);
3837 else
3838 l2cap_ertm_exit_local_busy(chan);
3842 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3844 struct sk_buff *skb;
3845 u32 control;
3847 while ((skb = skb_peek(&chan->srej_q)) &&
3848 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3849 int err;
3851 if (bt_cb(skb)->tx_seq != tx_seq)
3852 break;
3854 skb = skb_dequeue(&chan->srej_q);
3855 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3856 err = l2cap_reassemble_sdu(chan, skb, control);
3858 if (err < 0) {
3859 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3860 break;
3863 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3864 tx_seq = __next_seq(chan, tx_seq);
3868 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3870 struct srej_list *l, *tmp;
3871 u32 control;
3873 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3874 if (l->tx_seq == tx_seq) {
3875 list_del(&l->list);
3876 kfree(l);
3877 return;
3879 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3880 control |= __set_reqseq(chan, l->tx_seq);
3881 l2cap_send_sframe(chan, control);
3882 list_del(&l->list);
3883 list_add_tail(&l->list, &chan->srej_l);
3887 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3889 struct srej_list *new;
3890 u32 control;
3892 while (tx_seq != chan->expected_tx_seq) {
3893 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3894 control |= __set_reqseq(chan, chan->expected_tx_seq);
3895 l2cap_send_sframe(chan, control);
3897 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3898 if (!new)
3899 return -ENOMEM;
3901 new->tx_seq = chan->expected_tx_seq;
3903 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3905 list_add_tail(&new->list, &chan->srej_l);
3908 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3910 return 0;
3913 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3915 u16 tx_seq = __get_txseq(chan, rx_control);
3916 u16 req_seq = __get_reqseq(chan, rx_control);
3917 u8 sar = __get_ctrl_sar(chan, rx_control);
3918 int tx_seq_offset, expected_tx_seq_offset;
3919 int num_to_ack = (chan->tx_win/6) + 1;
3920 int err = 0;
3922 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3923 tx_seq, rx_control);
3925 if (__is_ctrl_final(chan, rx_control) &&
3926 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3927 __clear_monitor_timer(chan);
3928 if (chan->unacked_frames > 0)
3929 __set_retrans_timer(chan);
3930 clear_bit(CONN_WAIT_F, &chan->conn_state);
3933 chan->expected_ack_seq = req_seq;
3934 l2cap_drop_acked_frames(chan);
3936 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3938 /* invalid tx_seq */
3939 if (tx_seq_offset >= chan->tx_win) {
3940 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3941 goto drop;
3944 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3945 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3946 l2cap_send_ack(chan);
3947 goto drop;
3950 if (tx_seq == chan->expected_tx_seq)
3951 goto expected;
3953 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3954 struct srej_list *first;
3956 first = list_first_entry(&chan->srej_l,
3957 struct srej_list, list);
3958 if (tx_seq == first->tx_seq) {
3959 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3960 l2cap_check_srej_gap(chan, tx_seq);
3962 list_del(&first->list);
3963 kfree(first);
3965 if (list_empty(&chan->srej_l)) {
3966 chan->buffer_seq = chan->buffer_seq_srej;
3967 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3968 l2cap_send_ack(chan);
3969 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3971 } else {
3972 struct srej_list *l;
3974 /* duplicated tx_seq */
3975 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3976 goto drop;
3978 list_for_each_entry(l, &chan->srej_l, list) {
3979 if (l->tx_seq == tx_seq) {
3980 l2cap_resend_srejframe(chan, tx_seq);
3981 return 0;
3985 err = l2cap_send_srejframe(chan, tx_seq);
3986 if (err < 0) {
3987 l2cap_send_disconn_req(chan->conn, chan, -err);
3988 return err;
3991 } else {
3992 expected_tx_seq_offset = __seq_offset(chan,
3993 chan->expected_tx_seq, chan->buffer_seq);
3995 /* duplicated tx_seq */
3996 if (tx_seq_offset < expected_tx_seq_offset)
3997 goto drop;
3999 set_bit(CONN_SREJ_SENT, &chan->conn_state);
4001 BT_DBG("chan %p, Enter SREJ", chan);
4003 INIT_LIST_HEAD(&chan->srej_l);
4004 chan->buffer_seq_srej = chan->buffer_seq;
4006 __skb_queue_head_init(&chan->srej_q);
4007 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4009 /* Set P-bit only if there are some I-frames to ack. */
4010 if (__clear_ack_timer(chan))
4011 set_bit(CONN_SEND_PBIT, &chan->conn_state);
4013 err = l2cap_send_srejframe(chan, tx_seq);
4014 if (err < 0) {
4015 l2cap_send_disconn_req(chan->conn, chan, -err);
4016 return err;
4019 return 0;
4021 expected:
4022 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4024 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4025 bt_cb(skb)->tx_seq = tx_seq;
4026 bt_cb(skb)->sar = sar;
4027 __skb_queue_tail(&chan->srej_q, skb);
4028 return 0;
4031 err = l2cap_reassemble_sdu(chan, skb, rx_control);
4032 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4034 if (err < 0) {
4035 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4036 return err;
4039 if (__is_ctrl_final(chan, rx_control)) {
4040 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4041 l2cap_retransmit_frames(chan);
4045 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
4046 if (chan->num_acked == num_to_ack - 1)
4047 l2cap_send_ack(chan);
4048 else
4049 __set_ack_timer(chan);
4051 return 0;
4053 drop:
4054 kfree_skb(skb);
4055 return 0;
4058 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
4060 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
4061 __get_reqseq(chan, rx_control), rx_control);
4063 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
4064 l2cap_drop_acked_frames(chan);
4066 if (__is_ctrl_poll(chan, rx_control)) {
4067 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4068 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4069 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4070 (chan->unacked_frames > 0))
4071 __set_retrans_timer(chan);
4073 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4074 l2cap_send_srejtail(chan);
4075 } else {
4076 l2cap_send_i_or_rr_or_rnr(chan);
4079 } else if (__is_ctrl_final(chan, rx_control)) {
4080 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4082 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4083 l2cap_retransmit_frames(chan);
4085 } else {
4086 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4087 (chan->unacked_frames > 0))
4088 __set_retrans_timer(chan);
4090 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4091 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4092 l2cap_send_ack(chan);
4093 else
4094 l2cap_ertm_send(chan);
4098 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4100 u16 tx_seq = __get_reqseq(chan, rx_control);
4102 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4104 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4106 chan->expected_ack_seq = tx_seq;
4107 l2cap_drop_acked_frames(chan);
4109 if (__is_ctrl_final(chan, rx_control)) {
4110 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4111 l2cap_retransmit_frames(chan);
4112 } else {
4113 l2cap_retransmit_frames(chan);
4115 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4116 set_bit(CONN_REJ_ACT, &chan->conn_state);
4119 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4121 u16 tx_seq = __get_reqseq(chan, rx_control);
4123 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4125 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4127 if (__is_ctrl_poll(chan, rx_control)) {
4128 chan->expected_ack_seq = tx_seq;
4129 l2cap_drop_acked_frames(chan);
4131 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4132 l2cap_retransmit_one_frame(chan, tx_seq);
4134 l2cap_ertm_send(chan);
4136 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4137 chan->srej_save_reqseq = tx_seq;
4138 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4140 } else if (__is_ctrl_final(chan, rx_control)) {
4141 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4142 chan->srej_save_reqseq == tx_seq)
4143 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4144 else
4145 l2cap_retransmit_one_frame(chan, tx_seq);
4146 } else {
4147 l2cap_retransmit_one_frame(chan, tx_seq);
4148 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4149 chan->srej_save_reqseq = tx_seq;
4150 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4155 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4157 u16 tx_seq = __get_reqseq(chan, rx_control);
4159 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4161 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4162 chan->expected_ack_seq = tx_seq;
4163 l2cap_drop_acked_frames(chan);
4165 if (__is_ctrl_poll(chan, rx_control))
4166 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4168 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4169 __clear_retrans_timer(chan);
4170 if (__is_ctrl_poll(chan, rx_control))
4171 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4172 return;
4175 if (__is_ctrl_poll(chan, rx_control)) {
4176 l2cap_send_srejtail(chan);
4177 } else {
4178 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4179 l2cap_send_sframe(chan, rx_control);
4183 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4185 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4187 if (__is_ctrl_final(chan, rx_control) &&
4188 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4189 __clear_monitor_timer(chan);
4190 if (chan->unacked_frames > 0)
4191 __set_retrans_timer(chan);
4192 clear_bit(CONN_WAIT_F, &chan->conn_state);
4195 switch (__get_ctrl_super(chan, rx_control)) {
4196 case L2CAP_SUPER_RR:
4197 l2cap_data_channel_rrframe(chan, rx_control);
4198 break;
4200 case L2CAP_SUPER_REJ:
4201 l2cap_data_channel_rejframe(chan, rx_control);
4202 break;
4204 case L2CAP_SUPER_SREJ:
4205 l2cap_data_channel_srejframe(chan, rx_control);
4206 break;
4208 case L2CAP_SUPER_RNR:
4209 l2cap_data_channel_rnrframe(chan, rx_control);
4210 break;
4213 kfree_skb(skb);
4214 return 0;
4217 static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4219 u32 control;
4220 u16 req_seq;
4221 int len, next_tx_seq_offset, req_seq_offset;
4223 control = __get_control(chan, skb->data);
4224 skb_pull(skb, __ctrl_size(chan));
4225 len = skb->len;
4228 * We can just drop the corrupted I-frame here.
4229 * Receiver will miss it and start proper recovery
4230 * procedures and ask retransmission.
4232 if (l2cap_check_fcs(chan, skb))
4233 goto drop;
4235 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4236 len -= L2CAP_SDULEN_SIZE;
4238 if (chan->fcs == L2CAP_FCS_CRC16)
4239 len -= L2CAP_FCS_SIZE;
4241 if (len > chan->mps) {
4242 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4243 goto drop;
4246 req_seq = __get_reqseq(chan, control);
4248 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4250 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4251 chan->expected_ack_seq);
4253 /* check for invalid req-seq */
4254 if (req_seq_offset > next_tx_seq_offset) {
4255 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4256 goto drop;
4259 if (!__is_sframe(chan, control)) {
4260 if (len < 0) {
4261 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4262 goto drop;
4265 l2cap_data_channel_iframe(chan, control, skb);
4266 } else {
4267 if (len != 0) {
4268 BT_ERR("%d", len);
4269 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4270 goto drop;
4273 l2cap_data_channel_sframe(chan, control, skb);
4276 return 0;
4278 drop:
4279 kfree_skb(skb);
4280 return 0;
4283 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4285 struct l2cap_chan *chan;
4286 u32 control;
4287 u16 tx_seq;
4288 int len;
4290 chan = l2cap_get_chan_by_scid(conn, cid);
4291 if (!chan) {
4292 BT_DBG("unknown cid 0x%4.4x", cid);
4293 /* Drop packet and return */
4294 kfree_skb(skb);
4295 return 0;
4298 l2cap_chan_lock(chan);
4300 BT_DBG("chan %p, len %d", chan, skb->len);
4302 if (chan->state != BT_CONNECTED)
4303 goto drop;
4305 switch (chan->mode) {
4306 case L2CAP_MODE_BASIC:
4307 /* If socket recv buffers overflows we drop data here
4308 * which is *bad* because L2CAP has to be reliable.
4309 * But we don't have any other choice. L2CAP doesn't
4310 * provide flow control mechanism. */
4312 if (chan->imtu < skb->len)
4313 goto drop;
4315 if (!chan->ops->recv(chan->data, skb))
4316 goto done;
4317 break;
4319 case L2CAP_MODE_ERTM:
4320 l2cap_ertm_data_rcv(chan, skb);
4322 goto done;
4324 case L2CAP_MODE_STREAMING:
4325 control = __get_control(chan, skb->data);
4326 skb_pull(skb, __ctrl_size(chan));
4327 len = skb->len;
4329 if (l2cap_check_fcs(chan, skb))
4330 goto drop;
4332 if (__is_sar_start(chan, control))
4333 len -= L2CAP_SDULEN_SIZE;
4335 if (chan->fcs == L2CAP_FCS_CRC16)
4336 len -= L2CAP_FCS_SIZE;
4338 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4339 goto drop;
4341 tx_seq = __get_txseq(chan, control);
4343 if (chan->expected_tx_seq != tx_seq) {
4344 /* Frame(s) missing - must discard partial SDU */
4345 kfree_skb(chan->sdu);
4346 chan->sdu = NULL;
4347 chan->sdu_last_frag = NULL;
4348 chan->sdu_len = 0;
4350 /* TODO: Notify userland of missing data */
4353 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4355 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4356 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4358 goto done;
4360 default:
4361 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4362 break;
4365 drop:
4366 kfree_skb(skb);
4368 done:
4369 l2cap_chan_unlock(chan);
4371 return 0;
4374 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4376 struct l2cap_chan *chan;
4378 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4379 if (!chan)
4380 goto drop;
4382 BT_DBG("chan %p, len %d", chan, skb->len);
4384 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4385 goto drop;
4387 if (chan->imtu < skb->len)
4388 goto drop;
4390 if (!chan->ops->recv(chan->data, skb))
4391 return 0;
4393 drop:
4394 kfree_skb(skb);
4396 return 0;
4399 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4401 struct l2cap_chan *chan;
4403 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4404 if (!chan)
4405 goto drop;
4407 BT_DBG("chan %p, len %d", chan, skb->len);
4409 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4410 goto drop;
4412 if (chan->imtu < skb->len)
4413 goto drop;
4415 if (!chan->ops->recv(chan->data, skb))
4416 return 0;
4418 drop:
4419 kfree_skb(skb);
4421 return 0;
4424 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4426 struct l2cap_hdr *lh = (void *) skb->data;
4427 u16 cid, len;
4428 __le16 psm;
4430 skb_pull(skb, L2CAP_HDR_SIZE);
4431 cid = __le16_to_cpu(lh->cid);
4432 len = __le16_to_cpu(lh->len);
4434 if (len != skb->len) {
4435 kfree_skb(skb);
4436 return;
4439 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4441 switch (cid) {
4442 case L2CAP_CID_LE_SIGNALING:
4443 case L2CAP_CID_SIGNALING:
4444 l2cap_sig_channel(conn, skb);
4445 break;
4447 case L2CAP_CID_CONN_LESS:
4448 psm = get_unaligned_le16(skb->data);
4449 skb_pull(skb, 2);
4450 l2cap_conless_channel(conn, psm, skb);
4451 break;
4453 case L2CAP_CID_LE_DATA:
4454 l2cap_att_channel(conn, cid, skb);
4455 break;
4457 case L2CAP_CID_SMP:
4458 if (smp_sig_channel(conn, skb))
4459 l2cap_conn_del(conn->hcon, EACCES);
4460 break;
4462 default:
4463 l2cap_data_channel(conn, cid, skb);
4464 break;
4468 /* ---- L2CAP interface with lower layer (HCI) ---- */
4470 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
4472 int exact = 0, lm1 = 0, lm2 = 0;
4473 struct l2cap_chan *c;
4475 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4477 /* Find listening sockets and check their link_mode */
4478 read_lock(&chan_list_lock);
4479 list_for_each_entry(c, &chan_list, global_l) {
4480 struct sock *sk = c->sk;
4482 if (c->state != BT_LISTEN)
4483 continue;
4485 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4486 lm1 |= HCI_LM_ACCEPT;
4487 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4488 lm1 |= HCI_LM_MASTER;
4489 exact++;
4490 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4491 lm2 |= HCI_LM_ACCEPT;
4492 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4493 lm2 |= HCI_LM_MASTER;
4496 read_unlock(&chan_list_lock);
4498 return exact ? lm1 : lm2;
4501 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4503 struct l2cap_conn *conn;
4505 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4507 if (!status) {
4508 conn = l2cap_conn_add(hcon, status);
4509 if (conn)
4510 l2cap_conn_ready(conn);
4511 } else
4512 l2cap_conn_del(hcon, bt_to_errno(status));
4514 return 0;
4517 int l2cap_disconn_ind(struct hci_conn *hcon)
4519 struct l2cap_conn *conn = hcon->l2cap_data;
4521 BT_DBG("hcon %p", hcon);
4523 if (!conn)
4524 return HCI_ERROR_REMOTE_USER_TERM;
4525 return conn->disc_reason;
4528 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4530 BT_DBG("hcon %p reason %d", hcon, reason);
4532 l2cap_conn_del(hcon, bt_to_errno(reason));
4533 return 0;
4536 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4538 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4539 return;
4541 if (encrypt == 0x00) {
4542 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4543 __clear_chan_timer(chan);
4544 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4545 } else if (chan->sec_level == BT_SECURITY_HIGH)
4546 l2cap_chan_close(chan, ECONNREFUSED);
4547 } else {
4548 if (chan->sec_level == BT_SECURITY_MEDIUM)
4549 __clear_chan_timer(chan);
4553 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4555 struct l2cap_conn *conn = hcon->l2cap_data;
4556 struct l2cap_chan *chan;
4558 if (!conn)
4559 return 0;
4561 BT_DBG("conn %p", conn);
4563 if (hcon->type == LE_LINK) {
4564 smp_distribute_keys(conn, 0);
4565 cancel_delayed_work(&conn->security_timer);
4568 mutex_lock(&conn->chan_lock);
4570 list_for_each_entry(chan, &conn->chan_l, list) {
4571 l2cap_chan_lock(chan);
4573 BT_DBG("chan->scid %d", chan->scid);
4575 if (chan->scid == L2CAP_CID_LE_DATA) {
4576 if (!status && encrypt) {
4577 chan->sec_level = hcon->sec_level;
4578 l2cap_chan_ready(chan);
4581 l2cap_chan_unlock(chan);
4582 continue;
4585 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4586 l2cap_chan_unlock(chan);
4587 continue;
4590 if (!status && (chan->state == BT_CONNECTED ||
4591 chan->state == BT_CONFIG)) {
4592 l2cap_check_encryption(chan, encrypt);
4593 l2cap_chan_unlock(chan);
4594 continue;
4597 if (chan->state == BT_CONNECT) {
4598 if (!status) {
4599 l2cap_send_conn_req(chan);
4600 } else {
4601 __clear_chan_timer(chan);
4602 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4604 } else if (chan->state == BT_CONNECT2) {
4605 struct sock *sk = chan->sk;
4606 struct l2cap_conn_rsp rsp;
4607 __u16 res, stat;
4609 lock_sock(sk);
4611 if (!status) {
4612 if (bt_sk(sk)->defer_setup) {
4613 struct sock *parent = bt_sk(sk)->parent;
4614 res = L2CAP_CR_PEND;
4615 stat = L2CAP_CS_AUTHOR_PEND;
4616 if (parent)
4617 parent->sk_data_ready(parent, 0);
4618 } else {
4619 __l2cap_state_change(chan, BT_CONFIG);
4620 res = L2CAP_CR_SUCCESS;
4621 stat = L2CAP_CS_NO_INFO;
4623 } else {
4624 __l2cap_state_change(chan, BT_DISCONN);
4625 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4626 res = L2CAP_CR_SEC_BLOCK;
4627 stat = L2CAP_CS_NO_INFO;
4630 release_sock(sk);
4632 rsp.scid = cpu_to_le16(chan->dcid);
4633 rsp.dcid = cpu_to_le16(chan->scid);
4634 rsp.result = cpu_to_le16(res);
4635 rsp.status = cpu_to_le16(stat);
4636 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4637 sizeof(rsp), &rsp);
4640 l2cap_chan_unlock(chan);
4643 mutex_unlock(&conn->chan_lock);
4645 return 0;
4648 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4650 struct l2cap_conn *conn = hcon->l2cap_data;
4652 if (!conn)
4653 conn = l2cap_conn_add(hcon, 0);
4655 if (!conn)
4656 goto drop;
4658 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4660 if (!(flags & ACL_CONT)) {
4661 struct l2cap_hdr *hdr;
4662 struct l2cap_chan *chan;
4663 u16 cid;
4664 int len;
4666 if (conn->rx_len) {
4667 BT_ERR("Unexpected start frame (len %d)", skb->len);
4668 kfree_skb(conn->rx_skb);
4669 conn->rx_skb = NULL;
4670 conn->rx_len = 0;
4671 l2cap_conn_unreliable(conn, ECOMM);
4674 /* Start fragment always begin with Basic L2CAP header */
4675 if (skb->len < L2CAP_HDR_SIZE) {
4676 BT_ERR("Frame is too short (len %d)", skb->len);
4677 l2cap_conn_unreliable(conn, ECOMM);
4678 goto drop;
4681 hdr = (struct l2cap_hdr *) skb->data;
4682 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4683 cid = __le16_to_cpu(hdr->cid);
4685 if (len == skb->len) {
4686 /* Complete frame received */
4687 l2cap_recv_frame(conn, skb);
4688 return 0;
4691 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4693 if (skb->len > len) {
4694 BT_ERR("Frame is too long (len %d, expected len %d)",
4695 skb->len, len);
4696 l2cap_conn_unreliable(conn, ECOMM);
4697 goto drop;
4700 chan = l2cap_get_chan_by_scid(conn, cid);
4702 if (chan && chan->sk) {
4703 struct sock *sk = chan->sk;
4704 lock_sock(sk);
4706 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4707 BT_ERR("Frame exceeding recv MTU (len %d, "
4708 "MTU %d)", len,
4709 chan->imtu);
4710 release_sock(sk);
4711 l2cap_conn_unreliable(conn, ECOMM);
4712 goto drop;
4714 release_sock(sk);
4717 /* Allocate skb for the complete frame (with header) */
4718 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4719 if (!conn->rx_skb)
4720 goto drop;
4722 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4723 skb->len);
4724 conn->rx_len = len - skb->len;
4725 } else {
4726 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4728 if (!conn->rx_len) {
4729 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4730 l2cap_conn_unreliable(conn, ECOMM);
4731 goto drop;
4734 if (skb->len > conn->rx_len) {
4735 BT_ERR("Fragment is too long (len %d, expected %d)",
4736 skb->len, conn->rx_len);
4737 kfree_skb(conn->rx_skb);
4738 conn->rx_skb = NULL;
4739 conn->rx_len = 0;
4740 l2cap_conn_unreliable(conn, ECOMM);
4741 goto drop;
4744 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4745 skb->len);
4746 conn->rx_len -= skb->len;
4748 if (!conn->rx_len) {
4749 /* Complete frame received */
4750 l2cap_recv_frame(conn, conn->rx_skb);
4751 conn->rx_skb = NULL;
4755 drop:
4756 kfree_skb(skb);
4757 return 0;
4760 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4762 struct l2cap_chan *c;
4764 read_lock(&chan_list_lock);
4766 list_for_each_entry(c, &chan_list, global_l) {
4767 struct sock *sk = c->sk;
4769 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4770 batostr(&bt_sk(sk)->src),
4771 batostr(&bt_sk(sk)->dst),
4772 c->state, __le16_to_cpu(c->psm),
4773 c->scid, c->dcid, c->imtu, c->omtu,
4774 c->sec_level, c->mode);
4777 read_unlock(&chan_list_lock);
4779 return 0;
4782 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4784 return single_open(file, l2cap_debugfs_show, inode->i_private);
4787 static const struct file_operations l2cap_debugfs_fops = {
4788 .open = l2cap_debugfs_open,
4789 .read = seq_read,
4790 .llseek = seq_lseek,
4791 .release = single_release,
4794 static struct dentry *l2cap_debugfs;
4796 int __init l2cap_init(void)
4798 int err;
4800 err = l2cap_init_sockets();
4801 if (err < 0)
4802 return err;
4804 if (bt_debugfs) {
4805 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4806 bt_debugfs, NULL, &l2cap_debugfs_fops);
4807 if (!l2cap_debugfs)
4808 BT_ERR("Failed to create L2CAP debug file");
4811 return 0;
4814 void l2cap_exit(void)
4816 debugfs_remove(l2cap_debugfs);
4817 l2cap_cleanup_sockets();
4820 module_param(disable_ertm, bool, 0644);
4821 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");