Bluetooth: Move srej and busy queues to struct l2cap_chan
[linux-2.6/btrfs-unstable.git] / net / bluetooth / l2cap_core.c
blob06c505b1476d8720a38dc398d59b7168b11776c5
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
58 int disable_ertm;
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
63 static struct workqueue_struct *_busy_wq;
65 struct bt_sock_list l2cap_sk_list = {
66 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
69 static void l2cap_busy_work(struct work_struct *work);
71 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 u8 code, u8 ident, u16 dlen, void *data);
73 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
75 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
77 /* ---- L2CAP channels ---- */
78 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
80 struct l2cap_chan *c;
82 list_for_each_entry(c, &conn->chan_l, list) {
83 struct sock *s = c->sk;
84 if (l2cap_pi(s)->dcid == cid)
85 return c;
87 return NULL;
91 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
93 struct l2cap_chan *c;
95 list_for_each_entry(c, &conn->chan_l, list) {
96 struct sock *s = c->sk;
97 if (l2cap_pi(s)->scid == cid)
98 return c;
100 return NULL;
103 /* Find channel with given SCID.
104 * Returns locked socket */
105 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
107 struct l2cap_chan *c;
109 read_lock(&conn->chan_lock);
110 c = __l2cap_get_chan_by_scid(conn, cid);
111 if (c)
112 bh_lock_sock(c->sk);
113 read_unlock(&conn->chan_lock);
114 return c;
117 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
119 struct l2cap_chan *c;
121 list_for_each_entry(c, &conn->chan_l, list) {
122 if (c->ident == ident)
123 return c;
125 return NULL;
128 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
130 struct l2cap_chan *c;
132 read_lock(&conn->chan_lock);
133 c = __l2cap_get_chan_by_ident(conn, ident);
134 if (c)
135 bh_lock_sock(c->sk);
136 read_unlock(&conn->chan_lock);
137 return c;
140 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
142 u16 cid = L2CAP_CID_DYN_START;
144 for (; cid < L2CAP_CID_DYN_END; cid++) {
145 if (!__l2cap_get_chan_by_scid(conn, cid))
146 return cid;
149 return 0;
152 static struct l2cap_chan *l2cap_chan_alloc(struct sock *sk)
154 struct l2cap_chan *chan;
156 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
157 if (!chan)
158 return NULL;
160 chan->sk = sk;
162 return chan;
165 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
167 struct sock *sk = chan->sk;
169 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
170 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
172 conn->disc_reason = 0x13;
174 l2cap_pi(sk)->conn = conn;
176 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
177 if (conn->hcon->type == LE_LINK) {
178 /* LE connection */
179 l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU;
180 l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA;
181 l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA;
182 } else {
183 /* Alloc CID for connection-oriented socket */
184 l2cap_pi(sk)->scid = l2cap_alloc_cid(conn);
185 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
187 } else if (sk->sk_type == SOCK_DGRAM) {
188 /* Connectionless socket */
189 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
190 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
191 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
192 } else {
193 /* Raw socket can send/recv signalling messages only */
194 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
195 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
196 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
199 sock_hold(sk);
201 list_add(&chan->list, &conn->chan_l);
204 /* Delete channel.
205 * Must be called on the locked socket. */
206 void l2cap_chan_del(struct l2cap_chan *chan, int err)
208 struct sock *sk = chan->sk;
209 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
210 struct sock *parent = bt_sk(sk)->parent;
212 l2cap_sock_clear_timer(sk);
214 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
216 if (conn) {
217 /* Delete from channel list */
218 write_lock_bh(&conn->chan_lock);
219 list_del(&chan->list);
220 write_unlock_bh(&conn->chan_lock);
221 __sock_put(sk);
223 l2cap_pi(sk)->conn = NULL;
224 hci_conn_put(conn->hcon);
227 sk->sk_state = BT_CLOSED;
228 sock_set_flag(sk, SOCK_ZAPPED);
230 if (err)
231 sk->sk_err = err;
233 if (parent) {
234 bt_accept_unlink(sk);
235 parent->sk_data_ready(parent, 0);
236 } else
237 sk->sk_state_change(sk);
239 skb_queue_purge(TX_QUEUE(sk));
241 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
242 struct srej_list *l, *tmp;
244 del_timer(&chan->retrans_timer);
245 del_timer(&chan->monitor_timer);
246 del_timer(&chan->ack_timer);
248 skb_queue_purge(&chan->srej_q);
249 skb_queue_purge(&chan->busy_q);
251 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
252 list_del(&l->list);
253 kfree(l);
257 kfree(chan);
260 static inline u8 l2cap_get_auth_type(struct sock *sk)
262 if (sk->sk_type == SOCK_RAW) {
263 switch (l2cap_pi(sk)->sec_level) {
264 case BT_SECURITY_HIGH:
265 return HCI_AT_DEDICATED_BONDING_MITM;
266 case BT_SECURITY_MEDIUM:
267 return HCI_AT_DEDICATED_BONDING;
268 default:
269 return HCI_AT_NO_BONDING;
271 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
272 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
273 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
275 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
276 return HCI_AT_NO_BONDING_MITM;
277 else
278 return HCI_AT_NO_BONDING;
279 } else {
280 switch (l2cap_pi(sk)->sec_level) {
281 case BT_SECURITY_HIGH:
282 return HCI_AT_GENERAL_BONDING_MITM;
283 case BT_SECURITY_MEDIUM:
284 return HCI_AT_GENERAL_BONDING;
285 default:
286 return HCI_AT_NO_BONDING;
291 /* Service level security */
292 static inline int l2cap_check_security(struct sock *sk)
294 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
295 __u8 auth_type;
297 auth_type = l2cap_get_auth_type(sk);
299 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
300 auth_type);
303 u8 l2cap_get_ident(struct l2cap_conn *conn)
305 u8 id;
307 /* Get next available identificator.
308 * 1 - 128 are used by kernel.
309 * 129 - 199 are reserved.
310 * 200 - 254 are used by utilities like l2ping, etc.
313 spin_lock_bh(&conn->lock);
315 if (++conn->tx_ident > 128)
316 conn->tx_ident = 1;
318 id = conn->tx_ident;
320 spin_unlock_bh(&conn->lock);
322 return id;
325 void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
327 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
328 u8 flags;
330 BT_DBG("code 0x%2.2x", code);
332 if (!skb)
333 return;
335 if (lmp_no_flush_capable(conn->hcon->hdev))
336 flags = ACL_START_NO_FLUSH;
337 else
338 flags = ACL_START;
340 hci_send_acl(conn->hcon, skb, flags);
343 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
345 struct sk_buff *skb;
346 struct l2cap_hdr *lh;
347 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
348 struct l2cap_conn *conn = pi->conn;
349 struct sock *sk = (struct sock *)pi;
350 int count, hlen = L2CAP_HDR_SIZE + 2;
351 u8 flags;
353 if (sk->sk_state != BT_CONNECTED)
354 return;
356 if (pi->fcs == L2CAP_FCS_CRC16)
357 hlen += 2;
359 BT_DBG("pi %p, control 0x%2.2x", pi, control);
361 count = min_t(unsigned int, conn->mtu, hlen);
362 control |= L2CAP_CTRL_FRAME_TYPE;
364 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
365 control |= L2CAP_CTRL_FINAL;
366 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
369 if (chan->conn_state & L2CAP_CONN_SEND_PBIT) {
370 control |= L2CAP_CTRL_POLL;
371 chan->conn_state &= ~L2CAP_CONN_SEND_PBIT;
374 skb = bt_skb_alloc(count, GFP_ATOMIC);
375 if (!skb)
376 return;
378 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
379 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
380 lh->cid = cpu_to_le16(pi->dcid);
381 put_unaligned_le16(control, skb_put(skb, 2));
383 if (pi->fcs == L2CAP_FCS_CRC16) {
384 u16 fcs = crc16(0, (u8 *)lh, count - 2);
385 put_unaligned_le16(fcs, skb_put(skb, 2));
388 if (lmp_no_flush_capable(conn->hcon->hdev))
389 flags = ACL_START_NO_FLUSH;
390 else
391 flags = ACL_START;
393 hci_send_acl(pi->conn->hcon, skb, flags);
396 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
398 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
399 control |= L2CAP_SUPER_RCV_NOT_READY;
400 chan->conn_state |= L2CAP_CONN_RNR_SENT;
401 } else
402 control |= L2CAP_SUPER_RCV_READY;
404 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
406 l2cap_send_sframe(chan, control);
409 static inline int __l2cap_no_conn_pending(struct sock *sk)
411 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
414 static void l2cap_do_start(struct l2cap_chan *chan)
416 struct sock *sk = chan->sk;
417 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
419 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
420 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
421 return;
423 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
424 struct l2cap_conn_req req;
425 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
426 req.psm = l2cap_pi(sk)->psm;
428 chan->ident = l2cap_get_ident(conn);
429 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
431 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
432 sizeof(req), &req);
434 } else {
435 struct l2cap_info_req req;
436 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
438 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
439 conn->info_ident = l2cap_get_ident(conn);
441 mod_timer(&conn->info_timer, jiffies +
442 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
444 l2cap_send_cmd(conn, conn->info_ident,
445 L2CAP_INFO_REQ, sizeof(req), &req);
449 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
451 u32 local_feat_mask = l2cap_feat_mask;
452 if (!disable_ertm)
453 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
455 switch (mode) {
456 case L2CAP_MODE_ERTM:
457 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
458 case L2CAP_MODE_STREAMING:
459 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
460 default:
461 return 0x00;
465 void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
467 struct sock *sk;
468 struct l2cap_disconn_req req;
470 if (!conn)
471 return;
473 sk = chan->sk;
475 skb_queue_purge(TX_QUEUE(sk));
477 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
478 del_timer(&chan->retrans_timer);
479 del_timer(&chan->monitor_timer);
480 del_timer(&chan->ack_timer);
483 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
484 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
485 l2cap_send_cmd(conn, l2cap_get_ident(conn),
486 L2CAP_DISCONN_REQ, sizeof(req), &req);
488 sk->sk_state = BT_DISCONN;
489 sk->sk_err = err;
492 /* ---- L2CAP connections ---- */
493 static void l2cap_conn_start(struct l2cap_conn *conn)
495 struct l2cap_chan *chan, *tmp;
497 BT_DBG("conn %p", conn);
499 read_lock(&conn->chan_lock);
501 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
502 struct sock *sk = chan->sk;
504 bh_lock_sock(sk);
506 if (sk->sk_type != SOCK_SEQPACKET &&
507 sk->sk_type != SOCK_STREAM) {
508 bh_unlock_sock(sk);
509 continue;
512 if (sk->sk_state == BT_CONNECT) {
513 struct l2cap_conn_req req;
515 if (!l2cap_check_security(sk) ||
516 !__l2cap_no_conn_pending(sk)) {
517 bh_unlock_sock(sk);
518 continue;
521 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
522 conn->feat_mask)
523 && l2cap_pi(sk)->conf_state &
524 L2CAP_CONF_STATE2_DEVICE) {
525 /* __l2cap_sock_close() calls list_del(chan)
526 * so release the lock */
527 read_unlock_bh(&conn->chan_lock);
528 __l2cap_sock_close(sk, ECONNRESET);
529 read_lock_bh(&conn->chan_lock);
530 bh_unlock_sock(sk);
531 continue;
534 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
535 req.psm = l2cap_pi(sk)->psm;
537 chan->ident = l2cap_get_ident(conn);
538 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
540 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
541 sizeof(req), &req);
543 } else if (sk->sk_state == BT_CONNECT2) {
544 struct l2cap_conn_rsp rsp;
545 char buf[128];
546 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
547 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
549 if (l2cap_check_security(sk)) {
550 if (bt_sk(sk)->defer_setup) {
551 struct sock *parent = bt_sk(sk)->parent;
552 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
553 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
554 parent->sk_data_ready(parent, 0);
556 } else {
557 sk->sk_state = BT_CONFIG;
558 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
559 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
561 } else {
562 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
563 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
566 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
567 sizeof(rsp), &rsp);
569 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
570 rsp.result != L2CAP_CR_SUCCESS) {
571 bh_unlock_sock(sk);
572 continue;
575 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
576 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
577 l2cap_build_conf_req(chan, buf), buf);
578 chan->num_conf_req++;
581 bh_unlock_sock(sk);
584 read_unlock(&conn->chan_lock);
587 /* Find socket with cid and source bdaddr.
588 * Returns closest match, locked.
590 static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
592 struct sock *s, *sk = NULL, *sk1 = NULL;
593 struct hlist_node *node;
595 read_lock(&l2cap_sk_list.lock);
597 sk_for_each(sk, node, &l2cap_sk_list.head) {
598 if (state && sk->sk_state != state)
599 continue;
601 if (l2cap_pi(sk)->scid == cid) {
602 /* Exact match. */
603 if (!bacmp(&bt_sk(sk)->src, src))
604 break;
606 /* Closest match */
607 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
608 sk1 = sk;
611 s = node ? sk : sk1;
612 if (s)
613 bh_lock_sock(s);
614 read_unlock(&l2cap_sk_list.lock);
616 return s;
619 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
621 struct sock *parent, *uninitialized_var(sk);
622 struct l2cap_chan *chan;
624 BT_DBG("");
626 /* Check if we have socket listening on cid */
627 parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
628 conn->src);
629 if (!parent)
630 return;
632 /* Check for backlog size */
633 if (sk_acceptq_is_full(parent)) {
634 BT_DBG("backlog full %d", parent->sk_ack_backlog);
635 goto clean;
638 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
639 if (!sk)
640 goto clean;
642 chan = l2cap_chan_alloc(sk);
643 if (!chan) {
644 l2cap_sock_kill(sk);
645 goto clean;
648 write_lock_bh(&conn->chan_lock);
650 hci_conn_hold(conn->hcon);
652 l2cap_sock_init(sk, parent);
654 bacpy(&bt_sk(sk)->src, conn->src);
655 bacpy(&bt_sk(sk)->dst, conn->dst);
657 bt_accept_enqueue(parent, sk);
659 __l2cap_chan_add(conn, chan);
661 l2cap_pi(sk)->chan = chan;
663 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
665 sk->sk_state = BT_CONNECTED;
666 parent->sk_data_ready(parent, 0);
668 write_unlock_bh(&conn->chan_lock);
670 clean:
671 bh_unlock_sock(parent);
674 static void l2cap_conn_ready(struct l2cap_conn *conn)
676 struct l2cap_chan *chan;
678 BT_DBG("conn %p", conn);
680 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
681 l2cap_le_conn_ready(conn);
683 read_lock(&conn->chan_lock);
685 list_for_each_entry(chan, &conn->chan_l, list) {
686 struct sock *sk = chan->sk;
688 bh_lock_sock(sk);
690 if (conn->hcon->type == LE_LINK) {
691 l2cap_sock_clear_timer(sk);
692 sk->sk_state = BT_CONNECTED;
693 sk->sk_state_change(sk);
696 if (sk->sk_type != SOCK_SEQPACKET &&
697 sk->sk_type != SOCK_STREAM) {
698 l2cap_sock_clear_timer(sk);
699 sk->sk_state = BT_CONNECTED;
700 sk->sk_state_change(sk);
701 } else if (sk->sk_state == BT_CONNECT)
702 l2cap_do_start(chan);
704 bh_unlock_sock(sk);
707 read_unlock(&conn->chan_lock);
710 /* Notify sockets that we cannot guaranty reliability anymore */
711 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
713 struct l2cap_chan *chan;
715 BT_DBG("conn %p", conn);
717 read_lock(&conn->chan_lock);
719 list_for_each_entry(chan, &conn->chan_l, list) {
720 struct sock *sk = chan->sk;
722 if (l2cap_pi(sk)->force_reliable)
723 sk->sk_err = err;
726 read_unlock(&conn->chan_lock);
729 static void l2cap_info_timeout(unsigned long arg)
731 struct l2cap_conn *conn = (void *) arg;
733 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
734 conn->info_ident = 0;
736 l2cap_conn_start(conn);
739 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
741 struct l2cap_conn *conn = hcon->l2cap_data;
743 if (conn || status)
744 return conn;
746 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
747 if (!conn)
748 return NULL;
750 hcon->l2cap_data = conn;
751 conn->hcon = hcon;
753 BT_DBG("hcon %p conn %p", hcon, conn);
755 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
756 conn->mtu = hcon->hdev->le_mtu;
757 else
758 conn->mtu = hcon->hdev->acl_mtu;
760 conn->src = &hcon->hdev->bdaddr;
761 conn->dst = &hcon->dst;
763 conn->feat_mask = 0;
765 spin_lock_init(&conn->lock);
766 rwlock_init(&conn->chan_lock);
768 INIT_LIST_HEAD(&conn->chan_l);
770 if (hcon->type != LE_LINK)
771 setup_timer(&conn->info_timer, l2cap_info_timeout,
772 (unsigned long) conn);
774 conn->disc_reason = 0x13;
776 return conn;
779 static void l2cap_conn_del(struct hci_conn *hcon, int err)
781 struct l2cap_conn *conn = hcon->l2cap_data;
782 struct l2cap_chan *chan, *l;
783 struct sock *sk;
785 if (!conn)
786 return;
788 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
790 kfree_skb(conn->rx_skb);
792 /* Kill channels */
793 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
794 sk = chan->sk;
795 bh_lock_sock(sk);
796 l2cap_chan_del(chan, err);
797 bh_unlock_sock(sk);
798 l2cap_sock_kill(sk);
801 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
802 del_timer_sync(&conn->info_timer);
804 hcon->l2cap_data = NULL;
805 kfree(conn);
808 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
810 write_lock_bh(&conn->chan_lock);
811 __l2cap_chan_add(conn, chan);
812 write_unlock_bh(&conn->chan_lock);
815 /* ---- Socket interface ---- */
817 /* Find socket with psm and source bdaddr.
818 * Returns closest match.
820 static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
822 struct sock *sk = NULL, *sk1 = NULL;
823 struct hlist_node *node;
825 read_lock(&l2cap_sk_list.lock);
827 sk_for_each(sk, node, &l2cap_sk_list.head) {
828 if (state && sk->sk_state != state)
829 continue;
831 if (l2cap_pi(sk)->psm == psm) {
832 /* Exact match. */
833 if (!bacmp(&bt_sk(sk)->src, src))
834 break;
836 /* Closest match */
837 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
838 sk1 = sk;
842 read_unlock(&l2cap_sk_list.lock);
844 return node ? sk : sk1;
847 int l2cap_do_connect(struct sock *sk)
849 bdaddr_t *src = &bt_sk(sk)->src;
850 bdaddr_t *dst = &bt_sk(sk)->dst;
851 struct l2cap_conn *conn;
852 struct l2cap_chan *chan;
853 struct hci_conn *hcon;
854 struct hci_dev *hdev;
855 __u8 auth_type;
856 int err;
858 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
859 l2cap_pi(sk)->psm);
861 hdev = hci_get_route(dst, src);
862 if (!hdev)
863 return -EHOSTUNREACH;
865 hci_dev_lock_bh(hdev);
867 auth_type = l2cap_get_auth_type(sk);
869 if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
870 hcon = hci_connect(hdev, LE_LINK, dst,
871 l2cap_pi(sk)->sec_level, auth_type);
872 else
873 hcon = hci_connect(hdev, ACL_LINK, dst,
874 l2cap_pi(sk)->sec_level, auth_type);
876 if (IS_ERR(hcon)) {
877 err = PTR_ERR(hcon);
878 goto done;
881 conn = l2cap_conn_add(hcon, 0);
882 if (!conn) {
883 hci_conn_put(hcon);
884 err = -ENOMEM;
885 goto done;
888 chan = l2cap_chan_alloc(sk);
889 if (!chan) {
890 hci_conn_put(hcon);
891 err = -ENOMEM;
892 goto done;
895 /* Update source addr of the socket */
896 bacpy(src, conn->src);
898 l2cap_chan_add(conn, chan);
900 l2cap_pi(sk)->chan = chan;
902 sk->sk_state = BT_CONNECT;
903 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
905 if (hcon->state == BT_CONNECTED) {
906 if (sk->sk_type != SOCK_SEQPACKET &&
907 sk->sk_type != SOCK_STREAM) {
908 l2cap_sock_clear_timer(sk);
909 if (l2cap_check_security(sk))
910 sk->sk_state = BT_CONNECTED;
911 } else
912 l2cap_do_start(chan);
915 err = 0;
917 done:
918 hci_dev_unlock_bh(hdev);
919 hci_dev_put(hdev);
920 return err;
923 int __l2cap_wait_ack(struct sock *sk)
925 DECLARE_WAITQUEUE(wait, current);
926 int err = 0;
927 int timeo = HZ/5;
929 add_wait_queue(sk_sleep(sk), &wait);
930 while ((l2cap_pi(sk)->chan->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
931 set_current_state(TASK_INTERRUPTIBLE);
933 if (!timeo)
934 timeo = HZ/5;
936 if (signal_pending(current)) {
937 err = sock_intr_errno(timeo);
938 break;
941 release_sock(sk);
942 timeo = schedule_timeout(timeo);
943 lock_sock(sk);
945 err = sock_error(sk);
946 if (err)
947 break;
949 set_current_state(TASK_RUNNING);
950 remove_wait_queue(sk_sleep(sk), &wait);
951 return err;
954 static void l2cap_monitor_timeout(unsigned long arg)
956 struct l2cap_chan *chan = (void *) arg;
957 struct sock *sk = chan->sk;
959 BT_DBG("chan %p", chan);
961 bh_lock_sock(sk);
962 if (chan->retry_count >= chan->remote_max_tx) {
963 l2cap_send_disconn_req(l2cap_pi(sk)->conn, chan, ECONNABORTED);
964 bh_unlock_sock(sk);
965 return;
968 chan->retry_count++;
969 __mod_monitor_timer();
971 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
972 bh_unlock_sock(sk);
975 static void l2cap_retrans_timeout(unsigned long arg)
977 struct l2cap_chan *chan = (void *) arg;
978 struct sock *sk = chan->sk;
980 BT_DBG("sk %p", sk);
982 bh_lock_sock(sk);
983 chan->retry_count = 1;
984 __mod_monitor_timer();
986 chan->conn_state |= L2CAP_CONN_WAIT_F;
988 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
989 bh_unlock_sock(sk);
992 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
994 struct sock *sk = chan->sk;
995 struct sk_buff *skb;
997 while ((skb = skb_peek(TX_QUEUE(sk))) &&
998 chan->unacked_frames) {
999 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1000 break;
1002 skb = skb_dequeue(TX_QUEUE(sk));
1003 kfree_skb(skb);
1005 chan->unacked_frames--;
1008 if (!chan->unacked_frames)
1009 del_timer(&chan->retrans_timer);
1012 void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1014 struct l2cap_pinfo *pi = l2cap_pi(sk);
1015 struct hci_conn *hcon = pi->conn->hcon;
1016 u16 flags;
1018 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1020 if (!pi->flushable && lmp_no_flush_capable(hcon->hdev))
1021 flags = ACL_START_NO_FLUSH;
1022 else
1023 flags = ACL_START;
1025 hci_send_acl(hcon, skb, flags);
1028 void l2cap_streaming_send(struct l2cap_chan *chan)
1030 struct sock *sk = chan->sk;
1031 struct sk_buff *skb;
1032 struct l2cap_pinfo *pi = l2cap_pi(sk);
1033 u16 control, fcs;
1035 while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
1036 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1037 control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1038 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1040 if (pi->fcs == L2CAP_FCS_CRC16) {
1041 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1042 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1045 l2cap_do_send(sk, skb);
1047 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1051 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1053 struct sock *sk = chan->sk;
1054 struct l2cap_pinfo *pi = l2cap_pi(sk);
1055 struct sk_buff *skb, *tx_skb;
1056 u16 control, fcs;
1058 skb = skb_peek(TX_QUEUE(sk));
1059 if (!skb)
1060 return;
1062 do {
1063 if (bt_cb(skb)->tx_seq == tx_seq)
1064 break;
1066 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1067 return;
1069 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1071 if (chan->remote_max_tx &&
1072 bt_cb(skb)->retries == chan->remote_max_tx) {
1073 l2cap_send_disconn_req(pi->conn, chan, ECONNABORTED);
1074 return;
1077 tx_skb = skb_clone(skb, GFP_ATOMIC);
1078 bt_cb(skb)->retries++;
1079 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1081 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1082 control |= L2CAP_CTRL_FINAL;
1083 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1086 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1087 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1089 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1091 if (pi->fcs == L2CAP_FCS_CRC16) {
1092 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1093 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1096 l2cap_do_send(sk, tx_skb);
1099 int l2cap_ertm_send(struct l2cap_chan *chan)
1101 struct sk_buff *skb, *tx_skb;
1102 struct sock *sk = chan->sk;
1103 struct l2cap_pinfo *pi = l2cap_pi(sk);
1104 u16 control, fcs;
1105 int nsent = 0;
1107 if (sk->sk_state != BT_CONNECTED)
1108 return -ENOTCONN;
1110 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(chan))) {
1112 if (chan->remote_max_tx &&
1113 bt_cb(skb)->retries == chan->remote_max_tx) {
1114 l2cap_send_disconn_req(pi->conn, chan, ECONNABORTED);
1115 break;
1118 tx_skb = skb_clone(skb, GFP_ATOMIC);
1120 bt_cb(skb)->retries++;
1122 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1123 control &= L2CAP_CTRL_SAR;
1125 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1126 control |= L2CAP_CTRL_FINAL;
1127 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1129 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1130 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1131 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1134 if (pi->fcs == L2CAP_FCS_CRC16) {
1135 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1136 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1139 l2cap_do_send(sk, tx_skb);
1141 __mod_retrans_timer();
1143 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1144 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1146 if (bt_cb(skb)->retries == 1)
1147 chan->unacked_frames++;
1149 chan->frames_sent++;
1151 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1152 sk->sk_send_head = NULL;
1153 else
1154 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1156 nsent++;
1159 return nsent;
1162 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1164 struct sock *sk = chan->sk;
1165 int ret;
1167 if (!skb_queue_empty(TX_QUEUE(sk)))
1168 sk->sk_send_head = TX_QUEUE(sk)->next;
1170 chan->next_tx_seq = chan->expected_ack_seq;
1171 ret = l2cap_ertm_send(chan);
1172 return ret;
1175 static void l2cap_send_ack(struct l2cap_chan *chan)
1177 u16 control = 0;
1179 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1181 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1182 control |= L2CAP_SUPER_RCV_NOT_READY;
1183 chan->conn_state |= L2CAP_CONN_RNR_SENT;
1184 l2cap_send_sframe(chan, control);
1185 return;
1188 if (l2cap_ertm_send(chan) > 0)
1189 return;
1191 control |= L2CAP_SUPER_RCV_READY;
1192 l2cap_send_sframe(chan, control);
1195 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1197 struct srej_list *tail;
1198 u16 control;
1200 control = L2CAP_SUPER_SELECT_REJECT;
1201 control |= L2CAP_CTRL_FINAL;
1203 tail = list_entry(SREJ_LIST(chan->sk)->prev, struct srej_list, list);
1204 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1206 l2cap_send_sframe(chan, control);
1209 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1211 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1212 struct sk_buff **frag;
1213 int err, sent = 0;
1215 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1216 return -EFAULT;
1218 sent += count;
1219 len -= count;
1221 /* Continuation fragments (no L2CAP header) */
1222 frag = &skb_shinfo(skb)->frag_list;
1223 while (len) {
1224 count = min_t(unsigned int, conn->mtu, len);
1226 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1227 if (!*frag)
1228 return err;
1229 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1230 return -EFAULT;
1232 sent += count;
1233 len -= count;
1235 frag = &(*frag)->next;
1238 return sent;
1241 struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1243 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1244 struct sk_buff *skb;
1245 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1246 struct l2cap_hdr *lh;
1248 BT_DBG("sk %p len %d", sk, (int)len);
1250 count = min_t(unsigned int, (conn->mtu - hlen), len);
1251 skb = bt_skb_send_alloc(sk, count + hlen,
1252 msg->msg_flags & MSG_DONTWAIT, &err);
1253 if (!skb)
1254 return ERR_PTR(err);
1256 /* Create L2CAP header */
1257 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1258 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1259 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1260 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1262 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1263 if (unlikely(err < 0)) {
1264 kfree_skb(skb);
1265 return ERR_PTR(err);
1267 return skb;
1270 struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1272 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1273 struct sk_buff *skb;
1274 int err, count, hlen = L2CAP_HDR_SIZE;
1275 struct l2cap_hdr *lh;
1277 BT_DBG("sk %p len %d", sk, (int)len);
1279 count = min_t(unsigned int, (conn->mtu - hlen), len);
1280 skb = bt_skb_send_alloc(sk, count + hlen,
1281 msg->msg_flags & MSG_DONTWAIT, &err);
1282 if (!skb)
1283 return ERR_PTR(err);
1285 /* Create L2CAP header */
1286 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1287 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1288 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1290 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1291 if (unlikely(err < 0)) {
1292 kfree_skb(skb);
1293 return ERR_PTR(err);
1295 return skb;
1298 struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1300 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1301 struct sk_buff *skb;
1302 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1303 struct l2cap_hdr *lh;
1305 BT_DBG("sk %p len %d", sk, (int)len);
1307 if (!conn)
1308 return ERR_PTR(-ENOTCONN);
1310 if (sdulen)
1311 hlen += 2;
1313 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1314 hlen += 2;
1316 count = min_t(unsigned int, (conn->mtu - hlen), len);
1317 skb = bt_skb_send_alloc(sk, count + hlen,
1318 msg->msg_flags & MSG_DONTWAIT, &err);
1319 if (!skb)
1320 return ERR_PTR(err);
1322 /* Create L2CAP header */
1323 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1324 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1325 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1326 put_unaligned_le16(control, skb_put(skb, 2));
1327 if (sdulen)
1328 put_unaligned_le16(sdulen, skb_put(skb, 2));
1330 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1331 if (unlikely(err < 0)) {
1332 kfree_skb(skb);
1333 return ERR_PTR(err);
1336 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1337 put_unaligned_le16(0, skb_put(skb, 2));
1339 bt_cb(skb)->retries = 0;
1340 return skb;
1343 int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1345 struct sock *sk = chan->sk;
1346 struct sk_buff *skb;
1347 struct sk_buff_head sar_queue;
1348 u16 control;
1349 size_t size = 0;
1351 skb_queue_head_init(&sar_queue);
1352 control = L2CAP_SDU_START;
1353 skb = l2cap_create_iframe_pdu(sk, msg, chan->remote_mps, control, len);
1354 if (IS_ERR(skb))
1355 return PTR_ERR(skb);
1357 __skb_queue_tail(&sar_queue, skb);
1358 len -= chan->remote_mps;
1359 size += chan->remote_mps;
1361 while (len > 0) {
1362 size_t buflen;
1364 if (len > chan->remote_mps) {
1365 control = L2CAP_SDU_CONTINUE;
1366 buflen = chan->remote_mps;
1367 } else {
1368 control = L2CAP_SDU_END;
1369 buflen = len;
1372 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1373 if (IS_ERR(skb)) {
1374 skb_queue_purge(&sar_queue);
1375 return PTR_ERR(skb);
1378 __skb_queue_tail(&sar_queue, skb);
1379 len -= buflen;
1380 size += buflen;
1382 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1383 if (sk->sk_send_head == NULL)
1384 sk->sk_send_head = sar_queue.next;
1386 return size;
1389 static void l2cap_chan_ready(struct sock *sk)
1391 struct sock *parent = bt_sk(sk)->parent;
1393 BT_DBG("sk %p, parent %p", sk, parent);
1395 l2cap_pi(sk)->conf_state = 0;
1396 l2cap_sock_clear_timer(sk);
1398 if (!parent) {
1399 /* Outgoing channel.
1400 * Wake up socket sleeping on connect.
1402 sk->sk_state = BT_CONNECTED;
1403 sk->sk_state_change(sk);
1404 } else {
1405 /* Incoming channel.
1406 * Wake up socket sleeping on accept.
1408 parent->sk_data_ready(parent, 0);
1412 /* Copy frame to all raw sockets on that connection */
1413 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1415 struct sk_buff *nskb;
1416 struct l2cap_chan *chan;
1418 BT_DBG("conn %p", conn);
1420 read_lock(&conn->chan_lock);
1421 list_for_each_entry(chan, &conn->chan_l, list) {
1422 struct sock *sk = chan->sk;
1423 if (sk->sk_type != SOCK_RAW)
1424 continue;
1426 /* Don't send frame to the socket it came from */
1427 if (skb->sk == sk)
1428 continue;
1429 nskb = skb_clone(skb, GFP_ATOMIC);
1430 if (!nskb)
1431 continue;
1433 if (sock_queue_rcv_skb(sk, nskb))
1434 kfree_skb(nskb);
1436 read_unlock(&conn->chan_lock);
1439 /* ---- L2CAP signalling commands ---- */
1440 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1441 u8 code, u8 ident, u16 dlen, void *data)
1443 struct sk_buff *skb, **frag;
1444 struct l2cap_cmd_hdr *cmd;
1445 struct l2cap_hdr *lh;
1446 int len, count;
1448 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1449 conn, code, ident, dlen);
1451 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1452 count = min_t(unsigned int, conn->mtu, len);
1454 skb = bt_skb_alloc(count, GFP_ATOMIC);
1455 if (!skb)
1456 return NULL;
1458 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1459 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1461 if (conn->hcon->type == LE_LINK)
1462 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1463 else
1464 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1466 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1467 cmd->code = code;
1468 cmd->ident = ident;
1469 cmd->len = cpu_to_le16(dlen);
1471 if (dlen) {
1472 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1473 memcpy(skb_put(skb, count), data, count);
1474 data += count;
1477 len -= skb->len;
1479 /* Continuation fragments (no L2CAP header) */
1480 frag = &skb_shinfo(skb)->frag_list;
1481 while (len) {
1482 count = min_t(unsigned int, conn->mtu, len);
1484 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1485 if (!*frag)
1486 goto fail;
1488 memcpy(skb_put(*frag, count), data, count);
1490 len -= count;
1491 data += count;
1493 frag = &(*frag)->next;
1496 return skb;
1498 fail:
1499 kfree_skb(skb);
1500 return NULL;
1503 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1505 struct l2cap_conf_opt *opt = *ptr;
1506 int len;
1508 len = L2CAP_CONF_OPT_SIZE + opt->len;
1509 *ptr += len;
1511 *type = opt->type;
1512 *olen = opt->len;
1514 switch (opt->len) {
1515 case 1:
1516 *val = *((u8 *) opt->val);
1517 break;
1519 case 2:
1520 *val = get_unaligned_le16(opt->val);
1521 break;
1523 case 4:
1524 *val = get_unaligned_le32(opt->val);
1525 break;
1527 default:
1528 *val = (unsigned long) opt->val;
1529 break;
1532 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1533 return len;
1536 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1538 struct l2cap_conf_opt *opt = *ptr;
1540 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1542 opt->type = type;
1543 opt->len = len;
1545 switch (len) {
1546 case 1:
1547 *((u8 *) opt->val) = val;
1548 break;
1550 case 2:
1551 put_unaligned_le16(val, opt->val);
1552 break;
1554 case 4:
1555 put_unaligned_le32(val, opt->val);
1556 break;
1558 default:
1559 memcpy(opt->val, (void *) val, len);
1560 break;
1563 *ptr += L2CAP_CONF_OPT_SIZE + len;
1566 static void l2cap_ack_timeout(unsigned long arg)
1568 struct l2cap_chan *chan = (void *) arg;
1570 bh_lock_sock(chan->sk);
1571 l2cap_send_ack(chan);
1572 bh_unlock_sock(chan->sk);
1575 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1577 struct sock *sk = chan->sk;
1579 chan->expected_ack_seq = 0;
1580 chan->unacked_frames = 0;
1581 chan->buffer_seq = 0;
1582 chan->num_acked = 0;
1583 chan->frames_sent = 0;
1585 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1586 (unsigned long) chan);
1587 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1588 (unsigned long) chan);
1589 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1591 skb_queue_head_init(&chan->srej_q);
1592 skb_queue_head_init(&chan->busy_q);
1594 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
1596 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1599 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1601 switch (mode) {
1602 case L2CAP_MODE_STREAMING:
1603 case L2CAP_MODE_ERTM:
1604 if (l2cap_mode_supported(mode, remote_feat_mask))
1605 return mode;
1606 /* fall through */
1607 default:
1608 return L2CAP_MODE_BASIC;
1612 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1614 struct sock *sk = chan->sk;
1615 struct l2cap_pinfo *pi = l2cap_pi(sk);
1616 struct l2cap_conf_req *req = data;
1617 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
1618 void *ptr = req->data;
1620 BT_DBG("sk %p", sk);
1622 if (chan->num_conf_req || chan->num_conf_rsp)
1623 goto done;
1625 switch (pi->mode) {
1626 case L2CAP_MODE_STREAMING:
1627 case L2CAP_MODE_ERTM:
1628 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
1629 break;
1631 /* fall through */
1632 default:
1633 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
1634 break;
1637 done:
1638 if (pi->imtu != L2CAP_DEFAULT_MTU)
1639 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1641 switch (pi->mode) {
1642 case L2CAP_MODE_BASIC:
1643 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1644 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
1645 break;
1647 rfc.mode = L2CAP_MODE_BASIC;
1648 rfc.txwin_size = 0;
1649 rfc.max_transmit = 0;
1650 rfc.retrans_timeout = 0;
1651 rfc.monitor_timeout = 0;
1652 rfc.max_pdu_size = 0;
1654 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1655 (unsigned long) &rfc);
1656 break;
1658 case L2CAP_MODE_ERTM:
1659 rfc.mode = L2CAP_MODE_ERTM;
1660 rfc.txwin_size = pi->tx_win;
1661 rfc.max_transmit = pi->max_tx;
1662 rfc.retrans_timeout = 0;
1663 rfc.monitor_timeout = 0;
1664 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1665 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1666 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1668 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1669 (unsigned long) &rfc);
1671 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1672 break;
1674 if (pi->fcs == L2CAP_FCS_NONE ||
1675 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1676 pi->fcs = L2CAP_FCS_NONE;
1677 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1679 break;
1681 case L2CAP_MODE_STREAMING:
1682 rfc.mode = L2CAP_MODE_STREAMING;
1683 rfc.txwin_size = 0;
1684 rfc.max_transmit = 0;
1685 rfc.retrans_timeout = 0;
1686 rfc.monitor_timeout = 0;
1687 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1688 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1689 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1691 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1692 (unsigned long) &rfc);
1694 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1695 break;
1697 if (pi->fcs == L2CAP_FCS_NONE ||
1698 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1699 pi->fcs = L2CAP_FCS_NONE;
1700 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1702 break;
1705 req->dcid = cpu_to_le16(pi->dcid);
1706 req->flags = cpu_to_le16(0);
1708 return ptr - data;
1711 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1713 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
1714 struct l2cap_conf_rsp *rsp = data;
1715 void *ptr = rsp->data;
1716 void *req = chan->conf_req;
1717 int len = chan->conf_len;
1718 int type, hint, olen;
1719 unsigned long val;
1720 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1721 u16 mtu = L2CAP_DEFAULT_MTU;
1722 u16 result = L2CAP_CONF_SUCCESS;
1724 BT_DBG("chan %p", chan);
1726 while (len >= L2CAP_CONF_OPT_SIZE) {
1727 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1729 hint = type & L2CAP_CONF_HINT;
1730 type &= L2CAP_CONF_MASK;
1732 switch (type) {
1733 case L2CAP_CONF_MTU:
1734 mtu = val;
1735 break;
1737 case L2CAP_CONF_FLUSH_TO:
1738 pi->flush_to = val;
1739 break;
1741 case L2CAP_CONF_QOS:
1742 break;
1744 case L2CAP_CONF_RFC:
1745 if (olen == sizeof(rfc))
1746 memcpy(&rfc, (void *) val, olen);
1747 break;
1749 case L2CAP_CONF_FCS:
1750 if (val == L2CAP_FCS_NONE)
1751 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
1753 break;
1755 default:
1756 if (hint)
1757 break;
1759 result = L2CAP_CONF_UNKNOWN;
1760 *((u8 *) ptr++) = type;
1761 break;
1765 if (chan->num_conf_rsp || chan->num_conf_req > 1)
1766 goto done;
1768 switch (pi->mode) {
1769 case L2CAP_MODE_STREAMING:
1770 case L2CAP_MODE_ERTM:
1771 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
1772 pi->mode = l2cap_select_mode(rfc.mode,
1773 pi->conn->feat_mask);
1774 break;
1777 if (pi->mode != rfc.mode)
1778 return -ECONNREFUSED;
1780 break;
1783 done:
1784 if (pi->mode != rfc.mode) {
1785 result = L2CAP_CONF_UNACCEPT;
1786 rfc.mode = pi->mode;
1788 if (chan->num_conf_rsp == 1)
1789 return -ECONNREFUSED;
1791 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1792 sizeof(rfc), (unsigned long) &rfc);
1796 if (result == L2CAP_CONF_SUCCESS) {
1797 /* Configure output options and let the other side know
1798 * which ones we don't like. */
1800 if (mtu < L2CAP_DEFAULT_MIN_MTU)
1801 result = L2CAP_CONF_UNACCEPT;
1802 else {
1803 pi->omtu = mtu;
1804 pi->conf_state |= L2CAP_CONF_MTU_DONE;
1806 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1808 switch (rfc.mode) {
1809 case L2CAP_MODE_BASIC:
1810 pi->fcs = L2CAP_FCS_NONE;
1811 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1812 break;
1814 case L2CAP_MODE_ERTM:
1815 chan->remote_tx_win = rfc.txwin_size;
1816 chan->remote_max_tx = rfc.max_transmit;
1818 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1819 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1821 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1823 rfc.retrans_timeout =
1824 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
1825 rfc.monitor_timeout =
1826 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
1828 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1830 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1831 sizeof(rfc), (unsigned long) &rfc);
1833 break;
1835 case L2CAP_MODE_STREAMING:
1836 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1837 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1839 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1841 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1843 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1844 sizeof(rfc), (unsigned long) &rfc);
1846 break;
1848 default:
1849 result = L2CAP_CONF_UNACCEPT;
1851 memset(&rfc, 0, sizeof(rfc));
1852 rfc.mode = pi->mode;
1855 if (result == L2CAP_CONF_SUCCESS)
1856 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1858 rsp->scid = cpu_to_le16(pi->dcid);
1859 rsp->result = cpu_to_le16(result);
1860 rsp->flags = cpu_to_le16(0x0000);
1862 return ptr - data;
1865 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
1867 struct l2cap_pinfo *pi = l2cap_pi(sk);
1868 struct l2cap_conf_req *req = data;
1869 void *ptr = req->data;
1870 int type, olen;
1871 unsigned long val;
1872 struct l2cap_conf_rfc rfc;
1874 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
1876 while (len >= L2CAP_CONF_OPT_SIZE) {
1877 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1879 switch (type) {
1880 case L2CAP_CONF_MTU:
1881 if (val < L2CAP_DEFAULT_MIN_MTU) {
1882 *result = L2CAP_CONF_UNACCEPT;
1883 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
1884 } else
1885 pi->imtu = val;
1886 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1887 break;
1889 case L2CAP_CONF_FLUSH_TO:
1890 pi->flush_to = val;
1891 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
1892 2, pi->flush_to);
1893 break;
1895 case L2CAP_CONF_RFC:
1896 if (olen == sizeof(rfc))
1897 memcpy(&rfc, (void *)val, olen);
1899 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
1900 rfc.mode != pi->mode)
1901 return -ECONNREFUSED;
1903 pi->fcs = 0;
1905 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1906 sizeof(rfc), (unsigned long) &rfc);
1907 break;
1911 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
1912 return -ECONNREFUSED;
1914 pi->mode = rfc.mode;
1916 if (*result == L2CAP_CONF_SUCCESS) {
1917 switch (rfc.mode) {
1918 case L2CAP_MODE_ERTM:
1919 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1920 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1921 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1922 break;
1923 case L2CAP_MODE_STREAMING:
1924 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1928 req->dcid = cpu_to_le16(pi->dcid);
1929 req->flags = cpu_to_le16(0x0000);
1931 return ptr - data;
1934 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1936 struct l2cap_conf_rsp *rsp = data;
1937 void *ptr = rsp->data;
1939 BT_DBG("sk %p", sk);
1941 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1942 rsp->result = cpu_to_le16(result);
1943 rsp->flags = cpu_to_le16(flags);
1945 return ptr - data;
1948 void __l2cap_connect_rsp_defer(struct sock *sk)
1950 struct l2cap_conn_rsp rsp;
1951 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1952 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1953 u8 buf[128];
1955 sk->sk_state = BT_CONFIG;
1957 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1958 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1959 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1960 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1961 l2cap_send_cmd(conn, chan->ident,
1962 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1964 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
1965 return;
1967 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1968 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1969 l2cap_build_conf_req(chan, buf), buf);
1970 chan->num_conf_req++;
1973 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
1975 struct l2cap_pinfo *pi = l2cap_pi(sk);
1976 int type, olen;
1977 unsigned long val;
1978 struct l2cap_conf_rfc rfc;
1980 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
1982 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
1983 return;
1985 while (len >= L2CAP_CONF_OPT_SIZE) {
1986 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1988 switch (type) {
1989 case L2CAP_CONF_RFC:
1990 if (olen == sizeof(rfc))
1991 memcpy(&rfc, (void *)val, olen);
1992 goto done;
1996 done:
1997 switch (rfc.mode) {
1998 case L2CAP_MODE_ERTM:
1999 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2000 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2001 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2002 break;
2003 case L2CAP_MODE_STREAMING:
2004 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2008 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2010 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2012 if (rej->reason != 0x0000)
2013 return 0;
2015 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2016 cmd->ident == conn->info_ident) {
2017 del_timer(&conn->info_timer);
2019 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2020 conn->info_ident = 0;
2022 l2cap_conn_start(conn);
2025 return 0;
2028 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2030 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2031 struct l2cap_conn_rsp rsp;
2032 struct l2cap_chan *chan = NULL;
2033 struct sock *parent, *sk = NULL;
2034 int result, status = L2CAP_CS_NO_INFO;
2036 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2037 __le16 psm = req->psm;
2039 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2041 /* Check if we have socket listening on psm */
2042 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2043 if (!parent) {
2044 result = L2CAP_CR_BAD_PSM;
2045 goto sendresp;
2048 bh_lock_sock(parent);
2050 /* Check if the ACL is secure enough (if not SDP) */
2051 if (psm != cpu_to_le16(0x0001) &&
2052 !hci_conn_check_link_mode(conn->hcon)) {
2053 conn->disc_reason = 0x05;
2054 result = L2CAP_CR_SEC_BLOCK;
2055 goto response;
2058 result = L2CAP_CR_NO_MEM;
2060 /* Check for backlog size */
2061 if (sk_acceptq_is_full(parent)) {
2062 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2063 goto response;
2066 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2067 if (!sk)
2068 goto response;
2070 chan = l2cap_chan_alloc(sk);
2071 if (!chan) {
2072 l2cap_sock_kill(sk);
2073 goto response;
2076 write_lock_bh(&conn->chan_lock);
2078 /* Check if we already have channel with that dcid */
2079 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2080 write_unlock_bh(&conn->chan_lock);
2081 sock_set_flag(sk, SOCK_ZAPPED);
2082 l2cap_sock_kill(sk);
2083 goto response;
2086 hci_conn_hold(conn->hcon);
2088 l2cap_sock_init(sk, parent);
2089 bacpy(&bt_sk(sk)->src, conn->src);
2090 bacpy(&bt_sk(sk)->dst, conn->dst);
2091 l2cap_pi(sk)->psm = psm;
2092 l2cap_pi(sk)->dcid = scid;
2094 bt_accept_enqueue(parent, sk);
2096 __l2cap_chan_add(conn, chan);
2098 l2cap_pi(sk)->chan = chan;
2100 dcid = l2cap_pi(sk)->scid;
2102 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2104 chan->ident = cmd->ident;
2106 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2107 if (l2cap_check_security(sk)) {
2108 if (bt_sk(sk)->defer_setup) {
2109 sk->sk_state = BT_CONNECT2;
2110 result = L2CAP_CR_PEND;
2111 status = L2CAP_CS_AUTHOR_PEND;
2112 parent->sk_data_ready(parent, 0);
2113 } else {
2114 sk->sk_state = BT_CONFIG;
2115 result = L2CAP_CR_SUCCESS;
2116 status = L2CAP_CS_NO_INFO;
2118 } else {
2119 sk->sk_state = BT_CONNECT2;
2120 result = L2CAP_CR_PEND;
2121 status = L2CAP_CS_AUTHEN_PEND;
2123 } else {
2124 sk->sk_state = BT_CONNECT2;
2125 result = L2CAP_CR_PEND;
2126 status = L2CAP_CS_NO_INFO;
2129 write_unlock_bh(&conn->chan_lock);
2131 response:
2132 bh_unlock_sock(parent);
2134 sendresp:
2135 rsp.scid = cpu_to_le16(scid);
2136 rsp.dcid = cpu_to_le16(dcid);
2137 rsp.result = cpu_to_le16(result);
2138 rsp.status = cpu_to_le16(status);
2139 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2141 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2142 struct l2cap_info_req info;
2143 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2145 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2146 conn->info_ident = l2cap_get_ident(conn);
2148 mod_timer(&conn->info_timer, jiffies +
2149 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2151 l2cap_send_cmd(conn, conn->info_ident,
2152 L2CAP_INFO_REQ, sizeof(info), &info);
2155 if (chan && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
2156 result == L2CAP_CR_SUCCESS) {
2157 u8 buf[128];
2158 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2159 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2160 l2cap_build_conf_req(chan, buf), buf);
2161 chan->num_conf_req++;
2164 return 0;
2167 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2169 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2170 u16 scid, dcid, result, status;
2171 struct l2cap_chan *chan;
2172 struct sock *sk;
2173 u8 req[128];
2175 scid = __le16_to_cpu(rsp->scid);
2176 dcid = __le16_to_cpu(rsp->dcid);
2177 result = __le16_to_cpu(rsp->result);
2178 status = __le16_to_cpu(rsp->status);
2180 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2182 if (scid) {
2183 chan = l2cap_get_chan_by_scid(conn, scid);
2184 if (!chan)
2185 return -EFAULT;
2186 } else {
2187 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2188 if (!chan)
2189 return -EFAULT;
2192 sk = chan->sk;
2194 switch (result) {
2195 case L2CAP_CR_SUCCESS:
2196 sk->sk_state = BT_CONFIG;
2197 chan->ident = 0;
2198 l2cap_pi(sk)->dcid = dcid;
2199 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2201 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
2202 break;
2204 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2206 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2207 l2cap_build_conf_req(chan, req), req);
2208 chan->num_conf_req++;
2209 break;
2211 case L2CAP_CR_PEND:
2212 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2213 break;
2215 default:
2216 /* don't delete l2cap channel if sk is owned by user */
2217 if (sock_owned_by_user(sk)) {
2218 sk->sk_state = BT_DISCONN;
2219 l2cap_sock_clear_timer(sk);
2220 l2cap_sock_set_timer(sk, HZ / 5);
2221 break;
2224 l2cap_chan_del(chan, ECONNREFUSED);
2225 break;
2228 bh_unlock_sock(sk);
2229 return 0;
2232 static inline void set_default_fcs(struct l2cap_pinfo *pi)
2234 /* FCS is enabled only in ERTM or streaming mode, if one or both
2235 * sides request it.
2237 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
2238 pi->fcs = L2CAP_FCS_NONE;
2239 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
2240 pi->fcs = L2CAP_FCS_CRC16;
2243 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2245 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2246 u16 dcid, flags;
2247 u8 rsp[64];
2248 struct l2cap_chan *chan;
2249 struct sock *sk;
2250 int len;
2252 dcid = __le16_to_cpu(req->dcid);
2253 flags = __le16_to_cpu(req->flags);
2255 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2257 chan = l2cap_get_chan_by_scid(conn, dcid);
2258 if (!chan)
2259 return -ENOENT;
2261 sk = chan->sk;
2263 if (sk->sk_state != BT_CONFIG) {
2264 struct l2cap_cmd_rej rej;
2266 rej.reason = cpu_to_le16(0x0002);
2267 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2268 sizeof(rej), &rej);
2269 goto unlock;
2272 /* Reject if config buffer is too small. */
2273 len = cmd_len - sizeof(*req);
2274 if (chan->conf_len + len > sizeof(chan->conf_req)) {
2275 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2276 l2cap_build_conf_rsp(sk, rsp,
2277 L2CAP_CONF_REJECT, flags), rsp);
2278 goto unlock;
2281 /* Store config. */
2282 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2283 chan->conf_len += len;
2285 if (flags & 0x0001) {
2286 /* Incomplete config. Send empty response. */
2287 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2288 l2cap_build_conf_rsp(sk, rsp,
2289 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2290 goto unlock;
2293 /* Complete config. */
2294 len = l2cap_parse_conf_req(chan, rsp);
2295 if (len < 0) {
2296 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2297 goto unlock;
2300 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2301 chan->num_conf_rsp++;
2303 /* Reset config buffer. */
2304 chan->conf_len = 0;
2306 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2307 goto unlock;
2309 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2310 set_default_fcs(l2cap_pi(sk));
2312 sk->sk_state = BT_CONNECTED;
2314 chan->next_tx_seq = 0;
2315 chan->expected_tx_seq = 0;
2316 __skb_queue_head_init(TX_QUEUE(sk));
2317 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2318 l2cap_ertm_init(chan);
2320 l2cap_chan_ready(sk);
2321 goto unlock;
2324 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2325 u8 buf[64];
2326 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2327 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2328 l2cap_build_conf_req(chan, buf), buf);
2329 chan->num_conf_req++;
2332 unlock:
2333 bh_unlock_sock(sk);
2334 return 0;
2337 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2339 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2340 u16 scid, flags, result;
2341 struct l2cap_chan *chan;
2342 struct sock *sk;
2343 int len = cmd->len - sizeof(*rsp);
2345 scid = __le16_to_cpu(rsp->scid);
2346 flags = __le16_to_cpu(rsp->flags);
2347 result = __le16_to_cpu(rsp->result);
2349 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2350 scid, flags, result);
2352 chan = l2cap_get_chan_by_scid(conn, scid);
2353 if (!chan)
2354 return 0;
2356 sk = chan->sk;
2358 switch (result) {
2359 case L2CAP_CONF_SUCCESS:
2360 l2cap_conf_rfc_get(sk, rsp->data, len);
2361 break;
2363 case L2CAP_CONF_UNACCEPT:
2364 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2365 char req[64];
2367 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2368 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2369 goto done;
2372 /* throw out any old stored conf requests */
2373 result = L2CAP_CONF_SUCCESS;
2374 len = l2cap_parse_conf_rsp(sk, rsp->data,
2375 len, req, &result);
2376 if (len < 0) {
2377 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2378 goto done;
2381 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2382 L2CAP_CONF_REQ, len, req);
2383 chan->num_conf_req++;
2384 if (result != L2CAP_CONF_SUCCESS)
2385 goto done;
2386 break;
2389 default:
2390 sk->sk_err = ECONNRESET;
2391 l2cap_sock_set_timer(sk, HZ * 5);
2392 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2393 goto done;
2396 if (flags & 0x01)
2397 goto done;
2399 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2401 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2402 set_default_fcs(l2cap_pi(sk));
2404 sk->sk_state = BT_CONNECTED;
2405 chan->next_tx_seq = 0;
2406 chan->expected_tx_seq = 0;
2407 __skb_queue_head_init(TX_QUEUE(sk));
2408 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2409 l2cap_ertm_init(chan);
2411 l2cap_chan_ready(sk);
2414 done:
2415 bh_unlock_sock(sk);
2416 return 0;
2419 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2421 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2422 struct l2cap_disconn_rsp rsp;
2423 u16 dcid, scid;
2424 struct l2cap_chan *chan;
2425 struct sock *sk;
2427 scid = __le16_to_cpu(req->scid);
2428 dcid = __le16_to_cpu(req->dcid);
2430 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2432 chan = l2cap_get_chan_by_scid(conn, dcid);
2433 if (!chan)
2434 return 0;
2436 sk = chan->sk;
2438 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2439 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2440 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2442 sk->sk_shutdown = SHUTDOWN_MASK;
2444 /* don't delete l2cap channel if sk is owned by user */
2445 if (sock_owned_by_user(sk)) {
2446 sk->sk_state = BT_DISCONN;
2447 l2cap_sock_clear_timer(sk);
2448 l2cap_sock_set_timer(sk, HZ / 5);
2449 bh_unlock_sock(sk);
2450 return 0;
2453 l2cap_chan_del(chan, ECONNRESET);
2454 bh_unlock_sock(sk);
2456 l2cap_sock_kill(sk);
2457 return 0;
2460 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2462 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2463 u16 dcid, scid;
2464 struct l2cap_chan *chan;
2465 struct sock *sk;
2467 scid = __le16_to_cpu(rsp->scid);
2468 dcid = __le16_to_cpu(rsp->dcid);
2470 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2472 chan = l2cap_get_chan_by_scid(conn, scid);
2473 if (!chan)
2474 return 0;
2476 sk = chan->sk;
2478 /* don't delete l2cap channel if sk is owned by user */
2479 if (sock_owned_by_user(sk)) {
2480 sk->sk_state = BT_DISCONN;
2481 l2cap_sock_clear_timer(sk);
2482 l2cap_sock_set_timer(sk, HZ / 5);
2483 bh_unlock_sock(sk);
2484 return 0;
2487 l2cap_chan_del(chan, 0);
2488 bh_unlock_sock(sk);
2490 l2cap_sock_kill(sk);
2491 return 0;
2494 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2496 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2497 u16 type;
2499 type = __le16_to_cpu(req->type);
2501 BT_DBG("type 0x%4.4x", type);
2503 if (type == L2CAP_IT_FEAT_MASK) {
2504 u8 buf[8];
2505 u32 feat_mask = l2cap_feat_mask;
2506 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2507 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2508 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2509 if (!disable_ertm)
2510 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2511 | L2CAP_FEAT_FCS;
2512 put_unaligned_le32(feat_mask, rsp->data);
2513 l2cap_send_cmd(conn, cmd->ident,
2514 L2CAP_INFO_RSP, sizeof(buf), buf);
2515 } else if (type == L2CAP_IT_FIXED_CHAN) {
2516 u8 buf[12];
2517 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2518 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2519 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2520 memcpy(buf + 4, l2cap_fixed_chan, 8);
2521 l2cap_send_cmd(conn, cmd->ident,
2522 L2CAP_INFO_RSP, sizeof(buf), buf);
2523 } else {
2524 struct l2cap_info_rsp rsp;
2525 rsp.type = cpu_to_le16(type);
2526 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2527 l2cap_send_cmd(conn, cmd->ident,
2528 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2531 return 0;
2534 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2536 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2537 u16 type, result;
2539 type = __le16_to_cpu(rsp->type);
2540 result = __le16_to_cpu(rsp->result);
2542 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2544 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2545 if (cmd->ident != conn->info_ident ||
2546 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2547 return 0;
2549 del_timer(&conn->info_timer);
2551 if (result != L2CAP_IR_SUCCESS) {
2552 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2553 conn->info_ident = 0;
2555 l2cap_conn_start(conn);
2557 return 0;
2560 if (type == L2CAP_IT_FEAT_MASK) {
2561 conn->feat_mask = get_unaligned_le32(rsp->data);
2563 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2564 struct l2cap_info_req req;
2565 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2567 conn->info_ident = l2cap_get_ident(conn);
2569 l2cap_send_cmd(conn, conn->info_ident,
2570 L2CAP_INFO_REQ, sizeof(req), &req);
2571 } else {
2572 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2573 conn->info_ident = 0;
2575 l2cap_conn_start(conn);
2577 } else if (type == L2CAP_IT_FIXED_CHAN) {
2578 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2579 conn->info_ident = 0;
2581 l2cap_conn_start(conn);
2584 return 0;
2587 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2588 u16 to_multiplier)
2590 u16 max_latency;
2592 if (min > max || min < 6 || max > 3200)
2593 return -EINVAL;
2595 if (to_multiplier < 10 || to_multiplier > 3200)
2596 return -EINVAL;
2598 if (max >= to_multiplier * 8)
2599 return -EINVAL;
2601 max_latency = (to_multiplier * 8 / max) - 1;
2602 if (latency > 499 || latency > max_latency)
2603 return -EINVAL;
2605 return 0;
2608 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2609 struct l2cap_cmd_hdr *cmd, u8 *data)
2611 struct hci_conn *hcon = conn->hcon;
2612 struct l2cap_conn_param_update_req *req;
2613 struct l2cap_conn_param_update_rsp rsp;
2614 u16 min, max, latency, to_multiplier, cmd_len;
2615 int err;
2617 if (!(hcon->link_mode & HCI_LM_MASTER))
2618 return -EINVAL;
2620 cmd_len = __le16_to_cpu(cmd->len);
2621 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2622 return -EPROTO;
2624 req = (struct l2cap_conn_param_update_req *) data;
2625 min = __le16_to_cpu(req->min);
2626 max = __le16_to_cpu(req->max);
2627 latency = __le16_to_cpu(req->latency);
2628 to_multiplier = __le16_to_cpu(req->to_multiplier);
2630 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2631 min, max, latency, to_multiplier);
2633 memset(&rsp, 0, sizeof(rsp));
2635 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2636 if (err)
2637 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2638 else
2639 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2641 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2642 sizeof(rsp), &rsp);
2644 if (!err)
2645 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2647 return 0;
2650 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2651 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2653 int err = 0;
2655 switch (cmd->code) {
2656 case L2CAP_COMMAND_REJ:
2657 l2cap_command_rej(conn, cmd, data);
2658 break;
2660 case L2CAP_CONN_REQ:
2661 err = l2cap_connect_req(conn, cmd, data);
2662 break;
2664 case L2CAP_CONN_RSP:
2665 err = l2cap_connect_rsp(conn, cmd, data);
2666 break;
2668 case L2CAP_CONF_REQ:
2669 err = l2cap_config_req(conn, cmd, cmd_len, data);
2670 break;
2672 case L2CAP_CONF_RSP:
2673 err = l2cap_config_rsp(conn, cmd, data);
2674 break;
2676 case L2CAP_DISCONN_REQ:
2677 err = l2cap_disconnect_req(conn, cmd, data);
2678 break;
2680 case L2CAP_DISCONN_RSP:
2681 err = l2cap_disconnect_rsp(conn, cmd, data);
2682 break;
2684 case L2CAP_ECHO_REQ:
2685 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2686 break;
2688 case L2CAP_ECHO_RSP:
2689 break;
2691 case L2CAP_INFO_REQ:
2692 err = l2cap_information_req(conn, cmd, data);
2693 break;
2695 case L2CAP_INFO_RSP:
2696 err = l2cap_information_rsp(conn, cmd, data);
2697 break;
2699 default:
2700 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2701 err = -EINVAL;
2702 break;
2705 return err;
2708 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2709 struct l2cap_cmd_hdr *cmd, u8 *data)
2711 switch (cmd->code) {
2712 case L2CAP_COMMAND_REJ:
2713 return 0;
2715 case L2CAP_CONN_PARAM_UPDATE_REQ:
2716 return l2cap_conn_param_update_req(conn, cmd, data);
2718 case L2CAP_CONN_PARAM_UPDATE_RSP:
2719 return 0;
2721 default:
2722 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2723 return -EINVAL;
2727 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2728 struct sk_buff *skb)
2730 u8 *data = skb->data;
2731 int len = skb->len;
2732 struct l2cap_cmd_hdr cmd;
2733 int err;
2735 l2cap_raw_recv(conn, skb);
2737 while (len >= L2CAP_CMD_HDR_SIZE) {
2738 u16 cmd_len;
2739 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2740 data += L2CAP_CMD_HDR_SIZE;
2741 len -= L2CAP_CMD_HDR_SIZE;
2743 cmd_len = le16_to_cpu(cmd.len);
2745 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2747 if (cmd_len > len || !cmd.ident) {
2748 BT_DBG("corrupted command");
2749 break;
2752 if (conn->hcon->type == LE_LINK)
2753 err = l2cap_le_sig_cmd(conn, &cmd, data);
2754 else
2755 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
2757 if (err) {
2758 struct l2cap_cmd_rej rej;
2760 BT_ERR("Wrong link type (%d)", err);
2762 /* FIXME: Map err to a valid reason */
2763 rej.reason = cpu_to_le16(0);
2764 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2767 data += cmd_len;
2768 len -= cmd_len;
2771 kfree_skb(skb);
2774 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
2776 u16 our_fcs, rcv_fcs;
2777 int hdr_size = L2CAP_HDR_SIZE + 2;
2779 if (pi->fcs == L2CAP_FCS_CRC16) {
2780 skb_trim(skb, skb->len - 2);
2781 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
2782 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
2784 if (our_fcs != rcv_fcs)
2785 return -EBADMSG;
2787 return 0;
2790 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
2792 u16 control = 0;
2794 chan->frames_sent = 0;
2796 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2798 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2799 control |= L2CAP_SUPER_RCV_NOT_READY;
2800 l2cap_send_sframe(chan, control);
2801 chan->conn_state |= L2CAP_CONN_RNR_SENT;
2804 if (chan->conn_state & L2CAP_CONN_REMOTE_BUSY)
2805 l2cap_retransmit_frames(chan);
2807 l2cap_ertm_send(chan);
2809 if (!(chan->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
2810 chan->frames_sent == 0) {
2811 control |= L2CAP_SUPER_RCV_READY;
2812 l2cap_send_sframe(chan, control);
2816 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
2818 struct sk_buff *next_skb;
2819 int tx_seq_offset, next_tx_seq_offset;
2821 bt_cb(skb)->tx_seq = tx_seq;
2822 bt_cb(skb)->sar = sar;
2824 next_skb = skb_peek(&chan->srej_q);
2825 if (!next_skb) {
2826 __skb_queue_tail(&chan->srej_q, skb);
2827 return 0;
2830 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
2831 if (tx_seq_offset < 0)
2832 tx_seq_offset += 64;
2834 do {
2835 if (bt_cb(next_skb)->tx_seq == tx_seq)
2836 return -EINVAL;
2838 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
2839 chan->buffer_seq) % 64;
2840 if (next_tx_seq_offset < 0)
2841 next_tx_seq_offset += 64;
2843 if (next_tx_seq_offset > tx_seq_offset) {
2844 __skb_queue_before(&chan->srej_q, next_skb, skb);
2845 return 0;
2848 if (skb_queue_is_last(&chan->srej_q, next_skb))
2849 break;
2851 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
2853 __skb_queue_tail(&chan->srej_q, skb);
2855 return 0;
2858 static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
2860 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
2861 struct sk_buff *_skb;
2862 int err;
2864 switch (control & L2CAP_CTRL_SAR) {
2865 case L2CAP_SDU_UNSEGMENTED:
2866 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
2867 goto drop;
2869 err = sock_queue_rcv_skb(chan->sk, skb);
2870 if (!err)
2871 return err;
2873 break;
2875 case L2CAP_SDU_START:
2876 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
2877 goto drop;
2879 chan->sdu_len = get_unaligned_le16(skb->data);
2881 if (chan->sdu_len > pi->imtu)
2882 goto disconnect;
2884 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
2885 if (!chan->sdu)
2886 return -ENOMEM;
2888 /* pull sdu_len bytes only after alloc, because of Local Busy
2889 * condition we have to be sure that this will be executed
2890 * only once, i.e., when alloc does not fail */
2891 skb_pull(skb, 2);
2893 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2895 chan->conn_state |= L2CAP_CONN_SAR_SDU;
2896 chan->partial_sdu_len = skb->len;
2897 break;
2899 case L2CAP_SDU_CONTINUE:
2900 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
2901 goto disconnect;
2903 if (!chan->sdu)
2904 goto disconnect;
2906 chan->partial_sdu_len += skb->len;
2907 if (chan->partial_sdu_len > chan->sdu_len)
2908 goto drop;
2910 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2912 break;
2914 case L2CAP_SDU_END:
2915 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
2916 goto disconnect;
2918 if (!chan->sdu)
2919 goto disconnect;
2921 if (!(chan->conn_state & L2CAP_CONN_SAR_RETRY)) {
2922 chan->partial_sdu_len += skb->len;
2924 if (chan->partial_sdu_len > pi->imtu)
2925 goto drop;
2927 if (chan->partial_sdu_len != chan->sdu_len)
2928 goto drop;
2930 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2933 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
2934 if (!_skb) {
2935 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
2936 return -ENOMEM;
2939 err = sock_queue_rcv_skb(chan->sk, _skb);
2940 if (err < 0) {
2941 kfree_skb(_skb);
2942 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
2943 return err;
2946 chan->conn_state &= ~L2CAP_CONN_SAR_RETRY;
2947 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
2949 kfree_skb(chan->sdu);
2950 break;
2953 kfree_skb(skb);
2954 return 0;
2956 drop:
2957 kfree_skb(chan->sdu);
2958 chan->sdu = NULL;
2960 disconnect:
2961 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
2962 kfree_skb(skb);
2963 return 0;
2966 static int l2cap_try_push_rx_skb(struct l2cap_chan *chan)
2968 struct sock *sk = chan->sk;
2969 struct sk_buff *skb;
2970 u16 control;
2971 int err;
2973 while ((skb = skb_dequeue(&chan->busy_q))) {
2974 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
2975 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
2976 if (err < 0) {
2977 skb_queue_head(&chan->busy_q, skb);
2978 return -EBUSY;
2981 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
2984 if (!(chan->conn_state & L2CAP_CONN_RNR_SENT))
2985 goto done;
2987 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2988 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
2989 l2cap_send_sframe(chan, control);
2990 chan->retry_count = 1;
2992 del_timer(&chan->retrans_timer);
2993 __mod_monitor_timer();
2995 chan->conn_state |= L2CAP_CONN_WAIT_F;
2997 done:
2998 chan->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
2999 chan->conn_state &= ~L2CAP_CONN_RNR_SENT;
3001 BT_DBG("sk %p, Exit local busy", sk);
3003 return 0;
3006 static void l2cap_busy_work(struct work_struct *work)
3008 DECLARE_WAITQUEUE(wait, current);
3009 struct l2cap_pinfo *pi =
3010 container_of(work, struct l2cap_pinfo, busy_work);
3011 struct sock *sk = (struct sock *)pi;
3012 int n_tries = 0, timeo = HZ/5, err;
3013 struct sk_buff *skb;
3015 lock_sock(sk);
3017 add_wait_queue(sk_sleep(sk), &wait);
3018 while ((skb = skb_peek(&pi->chan->busy_q))) {
3019 set_current_state(TASK_INTERRUPTIBLE);
3021 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3022 err = -EBUSY;
3023 l2cap_send_disconn_req(pi->conn, pi->chan, EBUSY);
3024 break;
3027 if (!timeo)
3028 timeo = HZ/5;
3030 if (signal_pending(current)) {
3031 err = sock_intr_errno(timeo);
3032 break;
3035 release_sock(sk);
3036 timeo = schedule_timeout(timeo);
3037 lock_sock(sk);
3039 err = sock_error(sk);
3040 if (err)
3041 break;
3043 if (l2cap_try_push_rx_skb(l2cap_pi(sk)->chan) == 0)
3044 break;
3047 set_current_state(TASK_RUNNING);
3048 remove_wait_queue(sk_sleep(sk), &wait);
3050 release_sock(sk);
3053 static int l2cap_push_rx_skb(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3055 struct sock *sk = chan->sk;
3056 struct l2cap_pinfo *pi = l2cap_pi(sk);
3057 int sctrl, err;
3059 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3060 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3061 __skb_queue_tail(&chan->busy_q, skb);
3062 return l2cap_try_push_rx_skb(chan);
3067 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3068 if (err >= 0) {
3069 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3070 return err;
3073 /* Busy Condition */
3074 BT_DBG("sk %p, Enter local busy", sk);
3076 chan->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3077 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3078 __skb_queue_tail(&chan->busy_q, skb);
3080 sctrl = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3081 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3082 l2cap_send_sframe(chan, sctrl);
3084 chan->conn_state |= L2CAP_CONN_RNR_SENT;
3086 del_timer(&chan->ack_timer);
3088 queue_work(_busy_wq, &pi->busy_work);
3090 return err;
3093 static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3095 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
3096 struct sk_buff *_skb;
3097 int err = -EINVAL;
3100 * TODO: We have to notify the userland if some data is lost with the
3101 * Streaming Mode.
3104 switch (control & L2CAP_CTRL_SAR) {
3105 case L2CAP_SDU_UNSEGMENTED:
3106 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3107 kfree_skb(chan->sdu);
3108 break;
3111 err = sock_queue_rcv_skb(chan->sk, skb);
3112 if (!err)
3113 return 0;
3115 break;
3117 case L2CAP_SDU_START:
3118 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3119 kfree_skb(chan->sdu);
3120 break;
3123 chan->sdu_len = get_unaligned_le16(skb->data);
3124 skb_pull(skb, 2);
3126 if (chan->sdu_len > pi->imtu) {
3127 err = -EMSGSIZE;
3128 break;
3131 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3132 if (!chan->sdu) {
3133 err = -ENOMEM;
3134 break;
3137 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3139 chan->conn_state |= L2CAP_CONN_SAR_SDU;
3140 chan->partial_sdu_len = skb->len;
3141 err = 0;
3142 break;
3144 case L2CAP_SDU_CONTINUE:
3145 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3146 break;
3148 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3150 chan->partial_sdu_len += skb->len;
3151 if (chan->partial_sdu_len > chan->sdu_len)
3152 kfree_skb(chan->sdu);
3153 else
3154 err = 0;
3156 break;
3158 case L2CAP_SDU_END:
3159 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3160 break;
3162 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3164 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
3165 chan->partial_sdu_len += skb->len;
3167 if (chan->partial_sdu_len > pi->imtu)
3168 goto drop;
3170 if (chan->partial_sdu_len == chan->sdu_len) {
3171 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3172 err = sock_queue_rcv_skb(chan->sk, _skb);
3173 if (err < 0)
3174 kfree_skb(_skb);
3176 err = 0;
3178 drop:
3179 kfree_skb(chan->sdu);
3180 break;
3183 kfree_skb(skb);
3184 return err;
3187 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3189 struct sk_buff *skb;
3190 u16 control;
3192 while ((skb = skb_peek(&chan->srej_q))) {
3193 if (bt_cb(skb)->tx_seq != tx_seq)
3194 break;
3196 skb = skb_dequeue(&chan->srej_q);
3197 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3198 l2cap_ertm_reassembly_sdu(chan, skb, control);
3199 chan->buffer_seq_srej =
3200 (chan->buffer_seq_srej + 1) % 64;
3201 tx_seq = (tx_seq + 1) % 64;
3205 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3207 struct sock *sk = chan->sk;
3208 struct srej_list *l, *tmp;
3209 u16 control;
3211 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3212 if (l->tx_seq == tx_seq) {
3213 list_del(&l->list);
3214 kfree(l);
3215 return;
3217 control = L2CAP_SUPER_SELECT_REJECT;
3218 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3219 l2cap_send_sframe(chan, control);
3220 list_del(&l->list);
3221 list_add_tail(&l->list, SREJ_LIST(sk));
3225 static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3227 struct sock *sk = chan->sk;
3228 struct srej_list *new;
3229 u16 control;
3231 while (tx_seq != chan->expected_tx_seq) {
3232 control = L2CAP_SUPER_SELECT_REJECT;
3233 control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3234 l2cap_send_sframe(chan, control);
3236 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3237 new->tx_seq = chan->expected_tx_seq;
3238 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3239 list_add_tail(&new->list, SREJ_LIST(sk));
3241 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3244 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3246 struct sock *sk = chan->sk;
3247 struct l2cap_pinfo *pi = l2cap_pi(sk);
3248 u8 tx_seq = __get_txseq(rx_control);
3249 u8 req_seq = __get_reqseq(rx_control);
3250 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3251 int tx_seq_offset, expected_tx_seq_offset;
3252 int num_to_ack = (pi->tx_win/6) + 1;
3253 int err = 0;
3255 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3256 tx_seq, rx_control);
3258 if (L2CAP_CTRL_FINAL & rx_control &&
3259 chan->conn_state & L2CAP_CONN_WAIT_F) {
3260 del_timer(&chan->monitor_timer);
3261 if (chan->unacked_frames > 0)
3262 __mod_retrans_timer();
3263 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3266 chan->expected_ack_seq = req_seq;
3267 l2cap_drop_acked_frames(chan);
3269 if (tx_seq == chan->expected_tx_seq)
3270 goto expected;
3272 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3273 if (tx_seq_offset < 0)
3274 tx_seq_offset += 64;
3276 /* invalid tx_seq */
3277 if (tx_seq_offset >= pi->tx_win) {
3278 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3279 goto drop;
3282 if (chan->conn_state == L2CAP_CONN_LOCAL_BUSY)
3283 goto drop;
3285 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3286 struct srej_list *first;
3288 first = list_first_entry(SREJ_LIST(sk),
3289 struct srej_list, list);
3290 if (tx_seq == first->tx_seq) {
3291 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3292 l2cap_check_srej_gap(chan, tx_seq);
3294 list_del(&first->list);
3295 kfree(first);
3297 if (list_empty(SREJ_LIST(sk))) {
3298 chan->buffer_seq = chan->buffer_seq_srej;
3299 chan->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3300 l2cap_send_ack(chan);
3301 BT_DBG("sk %p, Exit SREJ_SENT", sk);
3303 } else {
3304 struct srej_list *l;
3306 /* duplicated tx_seq */
3307 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3308 goto drop;
3310 list_for_each_entry(l, SREJ_LIST(sk), list) {
3311 if (l->tx_seq == tx_seq) {
3312 l2cap_resend_srejframe(chan, tx_seq);
3313 return 0;
3316 l2cap_send_srejframe(chan, tx_seq);
3318 } else {
3319 expected_tx_seq_offset =
3320 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3321 if (expected_tx_seq_offset < 0)
3322 expected_tx_seq_offset += 64;
3324 /* duplicated tx_seq */
3325 if (tx_seq_offset < expected_tx_seq_offset)
3326 goto drop;
3328 chan->conn_state |= L2CAP_CONN_SREJ_SENT;
3330 BT_DBG("sk %p, Enter SREJ", sk);
3332 INIT_LIST_HEAD(SREJ_LIST(sk));
3333 chan->buffer_seq_srej = chan->buffer_seq;
3335 __skb_queue_head_init(&chan->srej_q);
3336 __skb_queue_head_init(&chan->busy_q);
3337 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3339 chan->conn_state |= L2CAP_CONN_SEND_PBIT;
3341 l2cap_send_srejframe(chan, tx_seq);
3343 del_timer(&chan->ack_timer);
3345 return 0;
3347 expected:
3348 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3350 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3351 bt_cb(skb)->tx_seq = tx_seq;
3352 bt_cb(skb)->sar = sar;
3353 __skb_queue_tail(&chan->srej_q, skb);
3354 return 0;
3357 err = l2cap_push_rx_skb(chan, skb, rx_control);
3358 if (err < 0)
3359 return 0;
3361 if (rx_control & L2CAP_CTRL_FINAL) {
3362 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3363 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3364 else
3365 l2cap_retransmit_frames(chan);
3368 __mod_ack_timer();
3370 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3371 if (chan->num_acked == num_to_ack - 1)
3372 l2cap_send_ack(chan);
3374 return 0;
3376 drop:
3377 kfree_skb(skb);
3378 return 0;
3381 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3383 struct sock *sk = chan->sk;
3385 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
3386 rx_control);
3388 chan->expected_ack_seq = __get_reqseq(rx_control);
3389 l2cap_drop_acked_frames(chan);
3391 if (rx_control & L2CAP_CTRL_POLL) {
3392 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3393 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3394 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3395 (chan->unacked_frames > 0))
3396 __mod_retrans_timer();
3398 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3399 l2cap_send_srejtail(chan);
3400 } else {
3401 l2cap_send_i_or_rr_or_rnr(chan);
3404 } else if (rx_control & L2CAP_CTRL_FINAL) {
3405 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3407 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3408 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3409 else
3410 l2cap_retransmit_frames(chan);
3412 } else {
3413 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3414 (chan->unacked_frames > 0))
3415 __mod_retrans_timer();
3417 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3418 if (chan->conn_state & L2CAP_CONN_SREJ_SENT)
3419 l2cap_send_ack(chan);
3420 else
3421 l2cap_ertm_send(chan);
3425 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3427 u8 tx_seq = __get_reqseq(rx_control);
3429 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3431 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3433 chan->expected_ack_seq = tx_seq;
3434 l2cap_drop_acked_frames(chan);
3436 if (rx_control & L2CAP_CTRL_FINAL) {
3437 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3438 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3439 else
3440 l2cap_retransmit_frames(chan);
3441 } else {
3442 l2cap_retransmit_frames(chan);
3444 if (chan->conn_state & L2CAP_CONN_WAIT_F)
3445 chan->conn_state |= L2CAP_CONN_REJ_ACT;
3448 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3450 u8 tx_seq = __get_reqseq(rx_control);
3452 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3454 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3456 if (rx_control & L2CAP_CTRL_POLL) {
3457 chan->expected_ack_seq = tx_seq;
3458 l2cap_drop_acked_frames(chan);
3460 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3461 l2cap_retransmit_one_frame(chan, tx_seq);
3463 l2cap_ertm_send(chan);
3465 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3466 chan->srej_save_reqseq = tx_seq;
3467 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3469 } else if (rx_control & L2CAP_CTRL_FINAL) {
3470 if ((chan->conn_state & L2CAP_CONN_SREJ_ACT) &&
3471 chan->srej_save_reqseq == tx_seq)
3472 chan->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3473 else
3474 l2cap_retransmit_one_frame(chan, tx_seq);
3475 } else {
3476 l2cap_retransmit_one_frame(chan, tx_seq);
3477 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3478 chan->srej_save_reqseq = tx_seq;
3479 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3484 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3486 u8 tx_seq = __get_reqseq(rx_control);
3488 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3490 chan->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3491 chan->expected_ack_seq = tx_seq;
3492 l2cap_drop_acked_frames(chan);
3494 if (rx_control & L2CAP_CTRL_POLL)
3495 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3497 if (!(chan->conn_state & L2CAP_CONN_SREJ_SENT)) {
3498 del_timer(&chan->retrans_timer);
3499 if (rx_control & L2CAP_CTRL_POLL)
3500 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3501 return;
3504 if (rx_control & L2CAP_CTRL_POLL)
3505 l2cap_send_srejtail(chan);
3506 else
3507 l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3510 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3512 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3514 if (L2CAP_CTRL_FINAL & rx_control &&
3515 chan->conn_state & L2CAP_CONN_WAIT_F) {
3516 del_timer(&chan->monitor_timer);
3517 if (chan->unacked_frames > 0)
3518 __mod_retrans_timer();
3519 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3522 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3523 case L2CAP_SUPER_RCV_READY:
3524 l2cap_data_channel_rrframe(chan, rx_control);
3525 break;
3527 case L2CAP_SUPER_REJECT:
3528 l2cap_data_channel_rejframe(chan, rx_control);
3529 break;
3531 case L2CAP_SUPER_SELECT_REJECT:
3532 l2cap_data_channel_srejframe(chan, rx_control);
3533 break;
3535 case L2CAP_SUPER_RCV_NOT_READY:
3536 l2cap_data_channel_rnrframe(chan, rx_control);
3537 break;
3540 kfree_skb(skb);
3541 return 0;
3544 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3546 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3547 struct l2cap_pinfo *pi = l2cap_pi(sk);
3548 u16 control;
3549 u8 req_seq;
3550 int len, next_tx_seq_offset, req_seq_offset;
3552 control = get_unaligned_le16(skb->data);
3553 skb_pull(skb, 2);
3554 len = skb->len;
3557 * We can just drop the corrupted I-frame here.
3558 * Receiver will miss it and start proper recovery
3559 * procedures and ask retransmission.
3561 if (l2cap_check_fcs(pi, skb))
3562 goto drop;
3564 if (__is_sar_start(control) && __is_iframe(control))
3565 len -= 2;
3567 if (pi->fcs == L2CAP_FCS_CRC16)
3568 len -= 2;
3570 if (len > pi->mps) {
3571 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3572 goto drop;
3575 req_seq = __get_reqseq(control);
3576 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3577 if (req_seq_offset < 0)
3578 req_seq_offset += 64;
3580 next_tx_seq_offset =
3581 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3582 if (next_tx_seq_offset < 0)
3583 next_tx_seq_offset += 64;
3585 /* check for invalid req-seq */
3586 if (req_seq_offset > next_tx_seq_offset) {
3587 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3588 goto drop;
3591 if (__is_iframe(control)) {
3592 if (len < 0) {
3593 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3594 goto drop;
3597 l2cap_data_channel_iframe(chan, control, skb);
3598 } else {
3599 if (len != 0) {
3600 BT_ERR("%d", len);
3601 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3602 goto drop;
3605 l2cap_data_channel_sframe(chan, control, skb);
3608 return 0;
3610 drop:
3611 kfree_skb(skb);
3612 return 0;
3615 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3617 struct l2cap_chan *chan;
3618 struct sock *sk;
3619 struct l2cap_pinfo *pi;
3620 u16 control;
3621 u8 tx_seq;
3622 int len;
3624 chan = l2cap_get_chan_by_scid(conn, cid);
3625 if (!chan) {
3626 BT_DBG("unknown cid 0x%4.4x", cid);
3627 goto drop;
3630 sk = chan->sk;
3631 pi = l2cap_pi(sk);
3633 BT_DBG("sk %p, len %d", sk, skb->len);
3635 if (sk->sk_state != BT_CONNECTED)
3636 goto drop;
3638 switch (pi->mode) {
3639 case L2CAP_MODE_BASIC:
3640 /* If socket recv buffers overflows we drop data here
3641 * which is *bad* because L2CAP has to be reliable.
3642 * But we don't have any other choice. L2CAP doesn't
3643 * provide flow control mechanism. */
3645 if (pi->imtu < skb->len)
3646 goto drop;
3648 if (!sock_queue_rcv_skb(sk, skb))
3649 goto done;
3650 break;
3652 case L2CAP_MODE_ERTM:
3653 if (!sock_owned_by_user(sk)) {
3654 l2cap_ertm_data_rcv(sk, skb);
3655 } else {
3656 if (sk_add_backlog(sk, skb))
3657 goto drop;
3660 goto done;
3662 case L2CAP_MODE_STREAMING:
3663 control = get_unaligned_le16(skb->data);
3664 skb_pull(skb, 2);
3665 len = skb->len;
3667 if (l2cap_check_fcs(pi, skb))
3668 goto drop;
3670 if (__is_sar_start(control))
3671 len -= 2;
3673 if (pi->fcs == L2CAP_FCS_CRC16)
3674 len -= 2;
3676 if (len > pi->mps || len < 0 || __is_sframe(control))
3677 goto drop;
3679 tx_seq = __get_txseq(control);
3681 if (chan->expected_tx_seq == tx_seq)
3682 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3683 else
3684 chan->expected_tx_seq = (tx_seq + 1) % 64;
3686 l2cap_streaming_reassembly_sdu(chan, skb, control);
3688 goto done;
3690 default:
3691 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
3692 break;
3695 drop:
3696 kfree_skb(skb);
3698 done:
3699 if (sk)
3700 bh_unlock_sock(sk);
3702 return 0;
3705 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3707 struct sock *sk;
3709 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3710 if (!sk)
3711 goto drop;
3713 bh_lock_sock(sk);
3715 BT_DBG("sk %p, len %d", sk, skb->len);
3717 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3718 goto drop;
3720 if (l2cap_pi(sk)->imtu < skb->len)
3721 goto drop;
3723 if (!sock_queue_rcv_skb(sk, skb))
3724 goto done;
3726 drop:
3727 kfree_skb(skb);
3729 done:
3730 if (sk)
3731 bh_unlock_sock(sk);
3732 return 0;
3735 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3737 struct l2cap_hdr *lh = (void *) skb->data;
3738 u16 cid, len;
3739 __le16 psm;
3741 skb_pull(skb, L2CAP_HDR_SIZE);
3742 cid = __le16_to_cpu(lh->cid);
3743 len = __le16_to_cpu(lh->len);
3745 if (len != skb->len) {
3746 kfree_skb(skb);
3747 return;
3750 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3752 switch (cid) {
3753 case L2CAP_CID_LE_SIGNALING:
3754 case L2CAP_CID_SIGNALING:
3755 l2cap_sig_channel(conn, skb);
3756 break;
3758 case L2CAP_CID_CONN_LESS:
3759 psm = get_unaligned_le16(skb->data);
3760 skb_pull(skb, 2);
3761 l2cap_conless_channel(conn, psm, skb);
3762 break;
3764 default:
3765 l2cap_data_channel(conn, cid, skb);
3766 break;
3770 /* ---- L2CAP interface with lower layer (HCI) ---- */
3772 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3774 int exact = 0, lm1 = 0, lm2 = 0;
3775 register struct sock *sk;
3776 struct hlist_node *node;
3778 if (type != ACL_LINK)
3779 return -EINVAL;
3781 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3783 /* Find listening sockets and check their link_mode */
3784 read_lock(&l2cap_sk_list.lock);
3785 sk_for_each(sk, node, &l2cap_sk_list.head) {
3786 if (sk->sk_state != BT_LISTEN)
3787 continue;
3789 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3790 lm1 |= HCI_LM_ACCEPT;
3791 if (l2cap_pi(sk)->role_switch)
3792 lm1 |= HCI_LM_MASTER;
3793 exact++;
3794 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3795 lm2 |= HCI_LM_ACCEPT;
3796 if (l2cap_pi(sk)->role_switch)
3797 lm2 |= HCI_LM_MASTER;
3800 read_unlock(&l2cap_sk_list.lock);
3802 return exact ? lm1 : lm2;
3805 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3807 struct l2cap_conn *conn;
3809 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3811 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3812 return -EINVAL;
3814 if (!status) {
3815 conn = l2cap_conn_add(hcon, status);
3816 if (conn)
3817 l2cap_conn_ready(conn);
3818 } else
3819 l2cap_conn_del(hcon, bt_err(status));
3821 return 0;
3824 static int l2cap_disconn_ind(struct hci_conn *hcon)
3826 struct l2cap_conn *conn = hcon->l2cap_data;
3828 BT_DBG("hcon %p", hcon);
3830 if (hcon->type != ACL_LINK || !conn)
3831 return 0x13;
3833 return conn->disc_reason;
3836 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3838 BT_DBG("hcon %p reason %d", hcon, reason);
3840 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3841 return -EINVAL;
3843 l2cap_conn_del(hcon, bt_err(reason));
3845 return 0;
3848 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3850 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
3851 return;
3853 if (encrypt == 0x00) {
3854 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3855 l2cap_sock_clear_timer(sk);
3856 l2cap_sock_set_timer(sk, HZ * 5);
3857 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3858 __l2cap_sock_close(sk, ECONNREFUSED);
3859 } else {
3860 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3861 l2cap_sock_clear_timer(sk);
3865 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3867 struct l2cap_conn *conn = hcon->l2cap_data;
3868 struct l2cap_chan *chan;
3870 if (!conn)
3871 return 0;
3873 BT_DBG("conn %p", conn);
3875 read_lock(&conn->chan_lock);
3877 list_for_each_entry(chan, &conn->chan_l, list) {
3878 struct sock *sk = chan->sk;
3880 bh_lock_sock(sk);
3882 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3883 bh_unlock_sock(sk);
3884 continue;
3887 if (!status && (sk->sk_state == BT_CONNECTED ||
3888 sk->sk_state == BT_CONFIG)) {
3889 l2cap_check_encryption(sk, encrypt);
3890 bh_unlock_sock(sk);
3891 continue;
3894 if (sk->sk_state == BT_CONNECT) {
3895 if (!status) {
3896 struct l2cap_conn_req req;
3897 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3898 req.psm = l2cap_pi(sk)->psm;
3900 chan->ident = l2cap_get_ident(conn);
3901 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3903 l2cap_send_cmd(conn, chan->ident,
3904 L2CAP_CONN_REQ, sizeof(req), &req);
3905 } else {
3906 l2cap_sock_clear_timer(sk);
3907 l2cap_sock_set_timer(sk, HZ / 10);
3909 } else if (sk->sk_state == BT_CONNECT2) {
3910 struct l2cap_conn_rsp rsp;
3911 __u16 result;
3913 if (!status) {
3914 sk->sk_state = BT_CONFIG;
3915 result = L2CAP_CR_SUCCESS;
3916 } else {
3917 sk->sk_state = BT_DISCONN;
3918 l2cap_sock_set_timer(sk, HZ / 10);
3919 result = L2CAP_CR_SEC_BLOCK;
3922 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3923 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3924 rsp.result = cpu_to_le16(result);
3925 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3926 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
3927 sizeof(rsp), &rsp);
3930 bh_unlock_sock(sk);
3933 read_unlock(&conn->chan_lock);
3935 return 0;
3938 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3940 struct l2cap_conn *conn = hcon->l2cap_data;
3942 if (!conn)
3943 conn = l2cap_conn_add(hcon, 0);
3945 if (!conn)
3946 goto drop;
3948 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3950 if (!(flags & ACL_CONT)) {
3951 struct l2cap_hdr *hdr;
3952 struct l2cap_chan *chan;
3953 u16 cid;
3954 int len;
3956 if (conn->rx_len) {
3957 BT_ERR("Unexpected start frame (len %d)", skb->len);
3958 kfree_skb(conn->rx_skb);
3959 conn->rx_skb = NULL;
3960 conn->rx_len = 0;
3961 l2cap_conn_unreliable(conn, ECOMM);
3964 /* Start fragment always begin with Basic L2CAP header */
3965 if (skb->len < L2CAP_HDR_SIZE) {
3966 BT_ERR("Frame is too short (len %d)", skb->len);
3967 l2cap_conn_unreliable(conn, ECOMM);
3968 goto drop;
3971 hdr = (struct l2cap_hdr *) skb->data;
3972 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3973 cid = __le16_to_cpu(hdr->cid);
3975 if (len == skb->len) {
3976 /* Complete frame received */
3977 l2cap_recv_frame(conn, skb);
3978 return 0;
3981 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3983 if (skb->len > len) {
3984 BT_ERR("Frame is too long (len %d, expected len %d)",
3985 skb->len, len);
3986 l2cap_conn_unreliable(conn, ECOMM);
3987 goto drop;
3990 chan = l2cap_get_chan_by_scid(conn, cid);
3992 if (chan && chan->sk) {
3993 struct sock *sk = chan->sk;
3995 if (l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) {
3996 BT_ERR("Frame exceeding recv MTU (len %d, "
3997 "MTU %d)", len,
3998 l2cap_pi(sk)->imtu);
3999 bh_unlock_sock(sk);
4000 l2cap_conn_unreliable(conn, ECOMM);
4001 goto drop;
4003 bh_unlock_sock(sk);
4006 /* Allocate skb for the complete frame (with header) */
4007 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4008 if (!conn->rx_skb)
4009 goto drop;
4011 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4012 skb->len);
4013 conn->rx_len = len - skb->len;
4014 } else {
4015 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4017 if (!conn->rx_len) {
4018 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4019 l2cap_conn_unreliable(conn, ECOMM);
4020 goto drop;
4023 if (skb->len > conn->rx_len) {
4024 BT_ERR("Fragment is too long (len %d, expected %d)",
4025 skb->len, conn->rx_len);
4026 kfree_skb(conn->rx_skb);
4027 conn->rx_skb = NULL;
4028 conn->rx_len = 0;
4029 l2cap_conn_unreliable(conn, ECOMM);
4030 goto drop;
4033 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4034 skb->len);
4035 conn->rx_len -= skb->len;
4037 if (!conn->rx_len) {
4038 /* Complete frame received */
4039 l2cap_recv_frame(conn, conn->rx_skb);
4040 conn->rx_skb = NULL;
4044 drop:
4045 kfree_skb(skb);
4046 return 0;
4049 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4051 struct sock *sk;
4052 struct hlist_node *node;
4054 read_lock_bh(&l2cap_sk_list.lock);
4056 sk_for_each(sk, node, &l2cap_sk_list.head) {
4057 struct l2cap_pinfo *pi = l2cap_pi(sk);
4059 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4060 batostr(&bt_sk(sk)->src),
4061 batostr(&bt_sk(sk)->dst),
4062 sk->sk_state, __le16_to_cpu(pi->psm),
4063 pi->scid, pi->dcid,
4064 pi->imtu, pi->omtu, pi->sec_level,
4065 pi->mode);
4068 read_unlock_bh(&l2cap_sk_list.lock);
4070 return 0;
4073 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4075 return single_open(file, l2cap_debugfs_show, inode->i_private);
4078 static const struct file_operations l2cap_debugfs_fops = {
4079 .open = l2cap_debugfs_open,
4080 .read = seq_read,
4081 .llseek = seq_lseek,
4082 .release = single_release,
4085 static struct dentry *l2cap_debugfs;
4087 static struct hci_proto l2cap_hci_proto = {
4088 .name = "L2CAP",
4089 .id = HCI_PROTO_L2CAP,
4090 .connect_ind = l2cap_connect_ind,
4091 .connect_cfm = l2cap_connect_cfm,
4092 .disconn_ind = l2cap_disconn_ind,
4093 .disconn_cfm = l2cap_disconn_cfm,
4094 .security_cfm = l2cap_security_cfm,
4095 .recv_acldata = l2cap_recv_acldata
4098 int __init l2cap_init(void)
4100 int err;
4102 err = l2cap_init_sockets();
4103 if (err < 0)
4104 return err;
4106 _busy_wq = create_singlethread_workqueue("l2cap");
4107 if (!_busy_wq) {
4108 err = -ENOMEM;
4109 goto error;
4112 err = hci_register_proto(&l2cap_hci_proto);
4113 if (err < 0) {
4114 BT_ERR("L2CAP protocol registration failed");
4115 bt_sock_unregister(BTPROTO_L2CAP);
4116 goto error;
4119 if (bt_debugfs) {
4120 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4121 bt_debugfs, NULL, &l2cap_debugfs_fops);
4122 if (!l2cap_debugfs)
4123 BT_ERR("Failed to create L2CAP debug file");
4126 return 0;
4128 error:
4129 destroy_workqueue(_busy_wq);
4130 l2cap_cleanup_sockets();
4131 return err;
4134 void l2cap_exit(void)
4136 debugfs_remove(l2cap_debugfs);
4138 flush_workqueue(_busy_wq);
4139 destroy_workqueue(_busy_wq);
4141 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4142 BT_ERR("L2CAP protocol unregistration failed");
4144 l2cap_cleanup_sockets();
4147 module_param(disable_ertm, bool, 0644);
4148 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");