Bluetooth: Improve error message on wrong link type
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / bluetooth / l2cap_core.c
blobb5a1ce06e1c0da4309ba0027c468e65b3aeb9c44
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
58 int disable_ertm;
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
63 static struct workqueue_struct *_busy_wq;
65 struct bt_sock_list l2cap_sk_list = {
66 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
69 static void l2cap_busy_work(struct work_struct *work);
71 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 u8 code, u8 ident, u16 dlen, void *data);
74 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
76 /* ---- L2CAP channels ---- */
77 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
79 struct sock *s;
80 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
81 if (l2cap_pi(s)->dcid == cid)
82 break;
84 return s;
87 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
89 struct sock *s;
90 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
91 if (l2cap_pi(s)->scid == cid)
92 break;
94 return s;
97 /* Find channel with given SCID.
98 * Returns locked socket */
99 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
101 struct sock *s;
102 read_lock(&l->lock);
103 s = __l2cap_get_chan_by_scid(l, cid);
104 if (s)
105 bh_lock_sock(s);
106 read_unlock(&l->lock);
107 return s;
110 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
112 struct sock *s;
113 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
114 if (l2cap_pi(s)->ident == ident)
115 break;
117 return s;
120 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
122 struct sock *s;
123 read_lock(&l->lock);
124 s = __l2cap_get_chan_by_ident(l, ident);
125 if (s)
126 bh_lock_sock(s);
127 read_unlock(&l->lock);
128 return s;
131 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
133 u16 cid = L2CAP_CID_DYN_START;
135 for (; cid < L2CAP_CID_DYN_END; cid++) {
136 if (!__l2cap_get_chan_by_scid(l, cid))
137 return cid;
140 return 0;
143 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
145 sock_hold(sk);
147 if (l->head)
148 l2cap_pi(l->head)->prev_c = sk;
150 l2cap_pi(sk)->next_c = l->head;
151 l2cap_pi(sk)->prev_c = NULL;
152 l->head = sk;
155 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
157 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
159 write_lock_bh(&l->lock);
160 if (sk == l->head)
161 l->head = next;
163 if (next)
164 l2cap_pi(next)->prev_c = prev;
165 if (prev)
166 l2cap_pi(prev)->next_c = next;
167 write_unlock_bh(&l->lock);
169 __sock_put(sk);
172 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
174 struct l2cap_chan_list *l = &conn->chan_list;
176 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
177 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
179 conn->disc_reason = 0x13;
181 l2cap_pi(sk)->conn = conn;
183 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
184 if (conn->hcon->type == LE_LINK) {
185 /* LE connection */
186 l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU;
187 l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA;
188 l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA;
189 } else {
190 /* Alloc CID for connection-oriented socket */
191 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
192 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
194 } else if (sk->sk_type == SOCK_DGRAM) {
195 /* Connectionless socket */
196 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
197 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
198 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
199 } else {
200 /* Raw socket can send/recv signalling messages only */
201 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
202 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
203 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
206 __l2cap_chan_link(l, sk);
208 if (parent)
209 bt_accept_enqueue(parent, sk);
212 /* Delete channel.
213 * Must be called on the locked socket. */
214 void l2cap_chan_del(struct sock *sk, int err)
216 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
217 struct sock *parent = bt_sk(sk)->parent;
219 l2cap_sock_clear_timer(sk);
221 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
223 if (conn) {
224 /* Unlink from channel list */
225 l2cap_chan_unlink(&conn->chan_list, sk);
226 l2cap_pi(sk)->conn = NULL;
227 hci_conn_put(conn->hcon);
230 sk->sk_state = BT_CLOSED;
231 sock_set_flag(sk, SOCK_ZAPPED);
233 if (err)
234 sk->sk_err = err;
236 if (parent) {
237 bt_accept_unlink(sk);
238 parent->sk_data_ready(parent, 0);
239 } else
240 sk->sk_state_change(sk);
242 skb_queue_purge(TX_QUEUE(sk));
244 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
245 struct srej_list *l, *tmp;
247 del_timer(&l2cap_pi(sk)->retrans_timer);
248 del_timer(&l2cap_pi(sk)->monitor_timer);
249 del_timer(&l2cap_pi(sk)->ack_timer);
251 skb_queue_purge(SREJ_QUEUE(sk));
252 skb_queue_purge(BUSY_QUEUE(sk));
254 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
255 list_del(&l->list);
256 kfree(l);
261 static inline u8 l2cap_get_auth_type(struct sock *sk)
263 if (sk->sk_type == SOCK_RAW) {
264 switch (l2cap_pi(sk)->sec_level) {
265 case BT_SECURITY_HIGH:
266 return HCI_AT_DEDICATED_BONDING_MITM;
267 case BT_SECURITY_MEDIUM:
268 return HCI_AT_DEDICATED_BONDING;
269 default:
270 return HCI_AT_NO_BONDING;
272 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
273 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
274 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
276 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
277 return HCI_AT_NO_BONDING_MITM;
278 else
279 return HCI_AT_NO_BONDING;
280 } else {
281 switch (l2cap_pi(sk)->sec_level) {
282 case BT_SECURITY_HIGH:
283 return HCI_AT_GENERAL_BONDING_MITM;
284 case BT_SECURITY_MEDIUM:
285 return HCI_AT_GENERAL_BONDING;
286 default:
287 return HCI_AT_NO_BONDING;
292 /* Service level security */
293 static inline int l2cap_check_security(struct sock *sk)
295 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
296 __u8 auth_type;
298 auth_type = l2cap_get_auth_type(sk);
300 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
301 auth_type);
304 u8 l2cap_get_ident(struct l2cap_conn *conn)
306 u8 id;
308 /* Get next available identificator.
309 * 1 - 128 are used by kernel.
310 * 129 - 199 are reserved.
311 * 200 - 254 are used by utilities like l2ping, etc.
314 spin_lock_bh(&conn->lock);
316 if (++conn->tx_ident > 128)
317 conn->tx_ident = 1;
319 id = conn->tx_ident;
321 spin_unlock_bh(&conn->lock);
323 return id;
326 void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
328 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
329 u8 flags;
331 BT_DBG("code 0x%2.2x", code);
333 if (!skb)
334 return;
336 if (lmp_no_flush_capable(conn->hcon->hdev))
337 flags = ACL_START_NO_FLUSH;
338 else
339 flags = ACL_START;
341 hci_send_acl(conn->hcon, skb, flags);
344 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
346 struct sk_buff *skb;
347 struct l2cap_hdr *lh;
348 struct l2cap_conn *conn = pi->conn;
349 struct sock *sk = (struct sock *)pi;
350 int count, hlen = L2CAP_HDR_SIZE + 2;
351 u8 flags;
353 if (sk->sk_state != BT_CONNECTED)
354 return;
356 if (pi->fcs == L2CAP_FCS_CRC16)
357 hlen += 2;
359 BT_DBG("pi %p, control 0x%2.2x", pi, control);
361 count = min_t(unsigned int, conn->mtu, hlen);
362 control |= L2CAP_CTRL_FRAME_TYPE;
364 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
365 control |= L2CAP_CTRL_FINAL;
366 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
369 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
370 control |= L2CAP_CTRL_POLL;
371 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
374 skb = bt_skb_alloc(count, GFP_ATOMIC);
375 if (!skb)
376 return;
378 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
379 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
380 lh->cid = cpu_to_le16(pi->dcid);
381 put_unaligned_le16(control, skb_put(skb, 2));
383 if (pi->fcs == L2CAP_FCS_CRC16) {
384 u16 fcs = crc16(0, (u8 *)lh, count - 2);
385 put_unaligned_le16(fcs, skb_put(skb, 2));
388 if (lmp_no_flush_capable(conn->hcon->hdev))
389 flags = ACL_START_NO_FLUSH;
390 else
391 flags = ACL_START;
393 hci_send_acl(pi->conn->hcon, skb, flags);
396 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
398 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
399 control |= L2CAP_SUPER_RCV_NOT_READY;
400 pi->conn_state |= L2CAP_CONN_RNR_SENT;
401 } else
402 control |= L2CAP_SUPER_RCV_READY;
404 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
406 l2cap_send_sframe(pi, control);
409 static inline int __l2cap_no_conn_pending(struct sock *sk)
411 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
414 static void l2cap_do_start(struct sock *sk)
416 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
418 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
419 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
420 return;
422 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
423 struct l2cap_conn_req req;
424 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
425 req.psm = l2cap_pi(sk)->psm;
427 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
428 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
430 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
431 L2CAP_CONN_REQ, sizeof(req), &req);
433 } else {
434 struct l2cap_info_req req;
435 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
437 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
438 conn->info_ident = l2cap_get_ident(conn);
440 mod_timer(&conn->info_timer, jiffies +
441 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
443 l2cap_send_cmd(conn, conn->info_ident,
444 L2CAP_INFO_REQ, sizeof(req), &req);
448 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
450 u32 local_feat_mask = l2cap_feat_mask;
451 if (!disable_ertm)
452 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
454 switch (mode) {
455 case L2CAP_MODE_ERTM:
456 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
457 case L2CAP_MODE_STREAMING:
458 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
459 default:
460 return 0x00;
464 void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
466 struct l2cap_disconn_req req;
468 if (!conn)
469 return;
471 skb_queue_purge(TX_QUEUE(sk));
473 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
474 del_timer(&l2cap_pi(sk)->retrans_timer);
475 del_timer(&l2cap_pi(sk)->monitor_timer);
476 del_timer(&l2cap_pi(sk)->ack_timer);
479 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
480 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
481 l2cap_send_cmd(conn, l2cap_get_ident(conn),
482 L2CAP_DISCONN_REQ, sizeof(req), &req);
484 sk->sk_state = BT_DISCONN;
485 sk->sk_err = err;
488 /* ---- L2CAP connections ---- */
489 static void l2cap_conn_start(struct l2cap_conn *conn)
491 struct l2cap_chan_list *l = &conn->chan_list;
492 struct sock_del_list del, *tmp1, *tmp2;
493 struct sock *sk;
495 BT_DBG("conn %p", conn);
497 INIT_LIST_HEAD(&del.list);
499 read_lock(&l->lock);
501 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
502 bh_lock_sock(sk);
504 if (sk->sk_type != SOCK_SEQPACKET &&
505 sk->sk_type != SOCK_STREAM) {
506 bh_unlock_sock(sk);
507 continue;
510 if (sk->sk_state == BT_CONNECT) {
511 struct l2cap_conn_req req;
513 if (!l2cap_check_security(sk) ||
514 !__l2cap_no_conn_pending(sk)) {
515 bh_unlock_sock(sk);
516 continue;
519 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
520 conn->feat_mask)
521 && l2cap_pi(sk)->conf_state &
522 L2CAP_CONF_STATE2_DEVICE) {
523 tmp1 = kzalloc(sizeof(struct sock_del_list),
524 GFP_ATOMIC);
525 tmp1->sk = sk;
526 list_add_tail(&tmp1->list, &del.list);
527 bh_unlock_sock(sk);
528 continue;
531 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
532 req.psm = l2cap_pi(sk)->psm;
534 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
535 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
537 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
538 L2CAP_CONN_REQ, sizeof(req), &req);
540 } else if (sk->sk_state == BT_CONNECT2) {
541 struct l2cap_conn_rsp rsp;
542 char buf[128];
543 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
544 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
546 if (l2cap_check_security(sk)) {
547 if (bt_sk(sk)->defer_setup) {
548 struct sock *parent = bt_sk(sk)->parent;
549 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
550 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
551 parent->sk_data_ready(parent, 0);
553 } else {
554 sk->sk_state = BT_CONFIG;
555 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
556 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
558 } else {
559 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
560 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
563 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
564 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
566 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
567 rsp.result != L2CAP_CR_SUCCESS) {
568 bh_unlock_sock(sk);
569 continue;
572 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
573 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
574 l2cap_build_conf_req(sk, buf), buf);
575 l2cap_pi(sk)->num_conf_req++;
578 bh_unlock_sock(sk);
581 read_unlock(&l->lock);
583 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
584 bh_lock_sock(tmp1->sk);
585 __l2cap_sock_close(tmp1->sk, ECONNRESET);
586 bh_unlock_sock(tmp1->sk);
587 list_del(&tmp1->list);
588 kfree(tmp1);
592 /* Find socket with cid and source bdaddr.
593 * Returns closest match, locked.
595 static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
597 struct sock *s, *sk = NULL, *sk1 = NULL;
598 struct hlist_node *node;
600 read_lock(&l2cap_sk_list.lock);
602 sk_for_each(sk, node, &l2cap_sk_list.head) {
603 if (state && sk->sk_state != state)
604 continue;
606 if (l2cap_pi(sk)->scid == cid) {
607 /* Exact match. */
608 if (!bacmp(&bt_sk(sk)->src, src))
609 break;
611 /* Closest match */
612 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
613 sk1 = sk;
616 s = node ? sk : sk1;
617 if (s)
618 bh_lock_sock(s);
619 read_unlock(&l2cap_sk_list.lock);
621 return s;
624 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
626 struct l2cap_chan_list *list = &conn->chan_list;
627 struct sock *parent, *uninitialized_var(sk);
629 BT_DBG("");
631 /* Check if we have socket listening on cid */
632 parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
633 conn->src);
634 if (!parent)
635 return;
637 /* Check for backlog size */
638 if (sk_acceptq_is_full(parent)) {
639 BT_DBG("backlog full %d", parent->sk_ack_backlog);
640 goto clean;
643 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
644 if (!sk)
645 goto clean;
647 write_lock_bh(&list->lock);
649 hci_conn_hold(conn->hcon);
651 l2cap_sock_init(sk, parent);
652 bacpy(&bt_sk(sk)->src, conn->src);
653 bacpy(&bt_sk(sk)->dst, conn->dst);
655 __l2cap_chan_add(conn, sk, parent);
657 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
659 sk->sk_state = BT_CONNECTED;
660 parent->sk_data_ready(parent, 0);
662 write_unlock_bh(&list->lock);
664 clean:
665 bh_unlock_sock(parent);
668 static void l2cap_conn_ready(struct l2cap_conn *conn)
670 struct l2cap_chan_list *l = &conn->chan_list;
671 struct sock *sk;
673 BT_DBG("conn %p", conn);
675 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
676 l2cap_le_conn_ready(conn);
678 read_lock(&l->lock);
680 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
681 bh_lock_sock(sk);
683 if (conn->hcon->type == LE_LINK) {
684 l2cap_sock_clear_timer(sk);
685 sk->sk_state = BT_CONNECTED;
686 sk->sk_state_change(sk);
689 if (sk->sk_type != SOCK_SEQPACKET &&
690 sk->sk_type != SOCK_STREAM) {
691 l2cap_sock_clear_timer(sk);
692 sk->sk_state = BT_CONNECTED;
693 sk->sk_state_change(sk);
694 } else if (sk->sk_state == BT_CONNECT)
695 l2cap_do_start(sk);
697 bh_unlock_sock(sk);
700 read_unlock(&l->lock);
703 /* Notify sockets that we cannot guaranty reliability anymore */
704 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
706 struct l2cap_chan_list *l = &conn->chan_list;
707 struct sock *sk;
709 BT_DBG("conn %p", conn);
711 read_lock(&l->lock);
713 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
714 if (l2cap_pi(sk)->force_reliable)
715 sk->sk_err = err;
718 read_unlock(&l->lock);
721 static void l2cap_info_timeout(unsigned long arg)
723 struct l2cap_conn *conn = (void *) arg;
725 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
726 conn->info_ident = 0;
728 l2cap_conn_start(conn);
731 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
733 struct l2cap_conn *conn = hcon->l2cap_data;
735 if (conn || status)
736 return conn;
738 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
739 if (!conn)
740 return NULL;
742 hcon->l2cap_data = conn;
743 conn->hcon = hcon;
745 BT_DBG("hcon %p conn %p", hcon, conn);
747 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
748 conn->mtu = hcon->hdev->le_mtu;
749 else
750 conn->mtu = hcon->hdev->acl_mtu;
752 conn->src = &hcon->hdev->bdaddr;
753 conn->dst = &hcon->dst;
755 conn->feat_mask = 0;
757 spin_lock_init(&conn->lock);
758 rwlock_init(&conn->chan_list.lock);
760 if (hcon->type != LE_LINK)
761 setup_timer(&conn->info_timer, l2cap_info_timeout,
762 (unsigned long) conn);
764 conn->disc_reason = 0x13;
766 return conn;
769 static void l2cap_conn_del(struct hci_conn *hcon, int err)
771 struct l2cap_conn *conn = hcon->l2cap_data;
772 struct sock *sk;
774 if (!conn)
775 return;
777 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
779 kfree_skb(conn->rx_skb);
781 /* Kill channels */
782 while ((sk = conn->chan_list.head)) {
783 bh_lock_sock(sk);
784 l2cap_chan_del(sk, err);
785 bh_unlock_sock(sk);
786 l2cap_sock_kill(sk);
789 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
790 del_timer_sync(&conn->info_timer);
792 hcon->l2cap_data = NULL;
793 kfree(conn);
796 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
798 struct l2cap_chan_list *l = &conn->chan_list;
799 write_lock_bh(&l->lock);
800 __l2cap_chan_add(conn, sk, parent);
801 write_unlock_bh(&l->lock);
804 /* ---- Socket interface ---- */
806 /* Find socket with psm and source bdaddr.
807 * Returns closest match.
809 static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
811 struct sock *sk = NULL, *sk1 = NULL;
812 struct hlist_node *node;
814 read_lock(&l2cap_sk_list.lock);
816 sk_for_each(sk, node, &l2cap_sk_list.head) {
817 if (state && sk->sk_state != state)
818 continue;
820 if (l2cap_pi(sk)->psm == psm) {
821 /* Exact match. */
822 if (!bacmp(&bt_sk(sk)->src, src))
823 break;
825 /* Closest match */
826 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
827 sk1 = sk;
831 read_unlock(&l2cap_sk_list.lock);
833 return node ? sk : sk1;
836 int l2cap_do_connect(struct sock *sk)
838 bdaddr_t *src = &bt_sk(sk)->src;
839 bdaddr_t *dst = &bt_sk(sk)->dst;
840 struct l2cap_conn *conn;
841 struct hci_conn *hcon;
842 struct hci_dev *hdev;
843 __u8 auth_type;
844 int err;
846 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
847 l2cap_pi(sk)->psm);
849 hdev = hci_get_route(dst, src);
850 if (!hdev)
851 return -EHOSTUNREACH;
853 hci_dev_lock_bh(hdev);
855 auth_type = l2cap_get_auth_type(sk);
857 if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
858 hcon = hci_connect(hdev, LE_LINK, dst,
859 l2cap_pi(sk)->sec_level, auth_type);
860 else
861 hcon = hci_connect(hdev, ACL_LINK, dst,
862 l2cap_pi(sk)->sec_level, auth_type);
864 if (IS_ERR(hcon)) {
865 err = PTR_ERR(hcon);
866 goto done;
869 conn = l2cap_conn_add(hcon, 0);
870 if (!conn) {
871 hci_conn_put(hcon);
872 err = -ENOMEM;
873 goto done;
876 /* Update source addr of the socket */
877 bacpy(src, conn->src);
879 l2cap_chan_add(conn, sk, NULL);
881 sk->sk_state = BT_CONNECT;
882 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
884 if (hcon->state == BT_CONNECTED) {
885 if (sk->sk_type != SOCK_SEQPACKET &&
886 sk->sk_type != SOCK_STREAM) {
887 l2cap_sock_clear_timer(sk);
888 if (l2cap_check_security(sk))
889 sk->sk_state = BT_CONNECTED;
890 } else
891 l2cap_do_start(sk);
894 err = 0;
896 done:
897 hci_dev_unlock_bh(hdev);
898 hci_dev_put(hdev);
899 return err;
902 int __l2cap_wait_ack(struct sock *sk)
904 DECLARE_WAITQUEUE(wait, current);
905 int err = 0;
906 int timeo = HZ/5;
908 add_wait_queue(sk_sleep(sk), &wait);
909 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
910 set_current_state(TASK_INTERRUPTIBLE);
912 if (!timeo)
913 timeo = HZ/5;
915 if (signal_pending(current)) {
916 err = sock_intr_errno(timeo);
917 break;
920 release_sock(sk);
921 timeo = schedule_timeout(timeo);
922 lock_sock(sk);
924 err = sock_error(sk);
925 if (err)
926 break;
928 set_current_state(TASK_RUNNING);
929 remove_wait_queue(sk_sleep(sk), &wait);
930 return err;
933 static void l2cap_monitor_timeout(unsigned long arg)
935 struct sock *sk = (void *) arg;
937 BT_DBG("sk %p", sk);
939 bh_lock_sock(sk);
940 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
941 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
942 bh_unlock_sock(sk);
943 return;
946 l2cap_pi(sk)->retry_count++;
947 __mod_monitor_timer();
949 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
950 bh_unlock_sock(sk);
953 static void l2cap_retrans_timeout(unsigned long arg)
955 struct sock *sk = (void *) arg;
957 BT_DBG("sk %p", sk);
959 bh_lock_sock(sk);
960 l2cap_pi(sk)->retry_count = 1;
961 __mod_monitor_timer();
963 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
965 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
966 bh_unlock_sock(sk);
969 static void l2cap_drop_acked_frames(struct sock *sk)
971 struct sk_buff *skb;
973 while ((skb = skb_peek(TX_QUEUE(sk))) &&
974 l2cap_pi(sk)->unacked_frames) {
975 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
976 break;
978 skb = skb_dequeue(TX_QUEUE(sk));
979 kfree_skb(skb);
981 l2cap_pi(sk)->unacked_frames--;
984 if (!l2cap_pi(sk)->unacked_frames)
985 del_timer(&l2cap_pi(sk)->retrans_timer);
988 void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
990 struct l2cap_pinfo *pi = l2cap_pi(sk);
991 struct hci_conn *hcon = pi->conn->hcon;
992 u16 flags;
994 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
996 if (!pi->flushable && lmp_no_flush_capable(hcon->hdev))
997 flags = ACL_START_NO_FLUSH;
998 else
999 flags = ACL_START;
1001 hci_send_acl(hcon, skb, flags);
1004 void l2cap_streaming_send(struct sock *sk)
1006 struct sk_buff *skb;
1007 struct l2cap_pinfo *pi = l2cap_pi(sk);
1008 u16 control, fcs;
1010 while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
1011 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1012 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1013 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1015 if (pi->fcs == L2CAP_FCS_CRC16) {
1016 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1017 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1020 l2cap_do_send(sk, skb);
1022 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1026 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1028 struct l2cap_pinfo *pi = l2cap_pi(sk);
1029 struct sk_buff *skb, *tx_skb;
1030 u16 control, fcs;
1032 skb = skb_peek(TX_QUEUE(sk));
1033 if (!skb)
1034 return;
1036 do {
1037 if (bt_cb(skb)->tx_seq == tx_seq)
1038 break;
1040 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1041 return;
1043 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1045 if (pi->remote_max_tx &&
1046 bt_cb(skb)->retries == pi->remote_max_tx) {
1047 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1048 return;
1051 tx_skb = skb_clone(skb, GFP_ATOMIC);
1052 bt_cb(skb)->retries++;
1053 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1055 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1056 control |= L2CAP_CTRL_FINAL;
1057 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1060 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1061 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1063 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1065 if (pi->fcs == L2CAP_FCS_CRC16) {
1066 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1067 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1070 l2cap_do_send(sk, tx_skb);
1073 int l2cap_ertm_send(struct sock *sk)
1075 struct sk_buff *skb, *tx_skb;
1076 struct l2cap_pinfo *pi = l2cap_pi(sk);
1077 u16 control, fcs;
1078 int nsent = 0;
1080 if (sk->sk_state != BT_CONNECTED)
1081 return -ENOTCONN;
1083 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1085 if (pi->remote_max_tx &&
1086 bt_cb(skb)->retries == pi->remote_max_tx) {
1087 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1088 break;
1091 tx_skb = skb_clone(skb, GFP_ATOMIC);
1093 bt_cb(skb)->retries++;
1095 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1096 control &= L2CAP_CTRL_SAR;
1098 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1099 control |= L2CAP_CTRL_FINAL;
1100 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1102 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1103 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1104 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1107 if (pi->fcs == L2CAP_FCS_CRC16) {
1108 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1109 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1112 l2cap_do_send(sk, tx_skb);
1114 __mod_retrans_timer();
1116 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1117 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1119 pi->unacked_frames++;
1120 pi->frames_sent++;
1122 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1123 sk->sk_send_head = NULL;
1124 else
1125 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1127 nsent++;
1130 return nsent;
1133 static int l2cap_retransmit_frames(struct sock *sk)
1135 struct l2cap_pinfo *pi = l2cap_pi(sk);
1136 int ret;
1138 if (!skb_queue_empty(TX_QUEUE(sk)))
1139 sk->sk_send_head = TX_QUEUE(sk)->next;
1141 pi->next_tx_seq = pi->expected_ack_seq;
1142 ret = l2cap_ertm_send(sk);
1143 return ret;
1146 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1148 struct sock *sk = (struct sock *)pi;
1149 u16 control = 0;
1151 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1153 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1154 control |= L2CAP_SUPER_RCV_NOT_READY;
1155 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1156 l2cap_send_sframe(pi, control);
1157 return;
1160 if (l2cap_ertm_send(sk) > 0)
1161 return;
1163 control |= L2CAP_SUPER_RCV_READY;
1164 l2cap_send_sframe(pi, control);
1167 static void l2cap_send_srejtail(struct sock *sk)
1169 struct srej_list *tail;
1170 u16 control;
1172 control = L2CAP_SUPER_SELECT_REJECT;
1173 control |= L2CAP_CTRL_FINAL;
1175 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1176 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1178 l2cap_send_sframe(l2cap_pi(sk), control);
1181 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1183 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1184 struct sk_buff **frag;
1185 int err, sent = 0;
1187 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1188 return -EFAULT;
1190 sent += count;
1191 len -= count;
1193 /* Continuation fragments (no L2CAP header) */
1194 frag = &skb_shinfo(skb)->frag_list;
1195 while (len) {
1196 count = min_t(unsigned int, conn->mtu, len);
1198 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1199 if (!*frag)
1200 return err;
1201 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1202 return -EFAULT;
1204 sent += count;
1205 len -= count;
1207 frag = &(*frag)->next;
1210 return sent;
1213 struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1215 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1216 struct sk_buff *skb;
1217 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1218 struct l2cap_hdr *lh;
1220 BT_DBG("sk %p len %d", sk, (int)len);
1222 count = min_t(unsigned int, (conn->mtu - hlen), len);
1223 skb = bt_skb_send_alloc(sk, count + hlen,
1224 msg->msg_flags & MSG_DONTWAIT, &err);
1225 if (!skb)
1226 return ERR_PTR(err);
1228 /* Create L2CAP header */
1229 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1230 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1231 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1232 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1234 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1235 if (unlikely(err < 0)) {
1236 kfree_skb(skb);
1237 return ERR_PTR(err);
1239 return skb;
1242 struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1244 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1245 struct sk_buff *skb;
1246 int err, count, hlen = L2CAP_HDR_SIZE;
1247 struct l2cap_hdr *lh;
1249 BT_DBG("sk %p len %d", sk, (int)len);
1251 count = min_t(unsigned int, (conn->mtu - hlen), len);
1252 skb = bt_skb_send_alloc(sk, count + hlen,
1253 msg->msg_flags & MSG_DONTWAIT, &err);
1254 if (!skb)
1255 return ERR_PTR(err);
1257 /* Create L2CAP header */
1258 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1259 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1260 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1262 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1263 if (unlikely(err < 0)) {
1264 kfree_skb(skb);
1265 return ERR_PTR(err);
1267 return skb;
1270 struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1272 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1273 struct sk_buff *skb;
1274 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1275 struct l2cap_hdr *lh;
1277 BT_DBG("sk %p len %d", sk, (int)len);
1279 if (!conn)
1280 return ERR_PTR(-ENOTCONN);
1282 if (sdulen)
1283 hlen += 2;
1285 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1286 hlen += 2;
1288 count = min_t(unsigned int, (conn->mtu - hlen), len);
1289 skb = bt_skb_send_alloc(sk, count + hlen,
1290 msg->msg_flags & MSG_DONTWAIT, &err);
1291 if (!skb)
1292 return ERR_PTR(err);
1294 /* Create L2CAP header */
1295 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1296 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1297 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1298 put_unaligned_le16(control, skb_put(skb, 2));
1299 if (sdulen)
1300 put_unaligned_le16(sdulen, skb_put(skb, 2));
1302 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1303 if (unlikely(err < 0)) {
1304 kfree_skb(skb);
1305 return ERR_PTR(err);
1308 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1309 put_unaligned_le16(0, skb_put(skb, 2));
1311 bt_cb(skb)->retries = 0;
1312 return skb;
1315 int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1317 struct l2cap_pinfo *pi = l2cap_pi(sk);
1318 struct sk_buff *skb;
1319 struct sk_buff_head sar_queue;
1320 u16 control;
1321 size_t size = 0;
1323 skb_queue_head_init(&sar_queue);
1324 control = L2CAP_SDU_START;
1325 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1326 if (IS_ERR(skb))
1327 return PTR_ERR(skb);
1329 __skb_queue_tail(&sar_queue, skb);
1330 len -= pi->remote_mps;
1331 size += pi->remote_mps;
1333 while (len > 0) {
1334 size_t buflen;
1336 if (len > pi->remote_mps) {
1337 control = L2CAP_SDU_CONTINUE;
1338 buflen = pi->remote_mps;
1339 } else {
1340 control = L2CAP_SDU_END;
1341 buflen = len;
1344 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1345 if (IS_ERR(skb)) {
1346 skb_queue_purge(&sar_queue);
1347 return PTR_ERR(skb);
1350 __skb_queue_tail(&sar_queue, skb);
1351 len -= buflen;
1352 size += buflen;
1354 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1355 if (sk->sk_send_head == NULL)
1356 sk->sk_send_head = sar_queue.next;
1358 return size;
1361 static void l2cap_chan_ready(struct sock *sk)
1363 struct sock *parent = bt_sk(sk)->parent;
1365 BT_DBG("sk %p, parent %p", sk, parent);
1367 l2cap_pi(sk)->conf_state = 0;
1368 l2cap_sock_clear_timer(sk);
1370 if (!parent) {
1371 /* Outgoing channel.
1372 * Wake up socket sleeping on connect.
1374 sk->sk_state = BT_CONNECTED;
1375 sk->sk_state_change(sk);
1376 } else {
1377 /* Incoming channel.
1378 * Wake up socket sleeping on accept.
1380 parent->sk_data_ready(parent, 0);
1384 /* Copy frame to all raw sockets on that connection */
1385 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1387 struct l2cap_chan_list *l = &conn->chan_list;
1388 struct sk_buff *nskb;
1389 struct sock *sk;
1391 BT_DBG("conn %p", conn);
1393 read_lock(&l->lock);
1394 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1395 if (sk->sk_type != SOCK_RAW)
1396 continue;
1398 /* Don't send frame to the socket it came from */
1399 if (skb->sk == sk)
1400 continue;
1401 nskb = skb_clone(skb, GFP_ATOMIC);
1402 if (!nskb)
1403 continue;
1405 if (sock_queue_rcv_skb(sk, nskb))
1406 kfree_skb(nskb);
1408 read_unlock(&l->lock);
1411 /* ---- L2CAP signalling commands ---- */
1412 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1413 u8 code, u8 ident, u16 dlen, void *data)
1415 struct sk_buff *skb, **frag;
1416 struct l2cap_cmd_hdr *cmd;
1417 struct l2cap_hdr *lh;
1418 int len, count;
1420 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1421 conn, code, ident, dlen);
1423 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1424 count = min_t(unsigned int, conn->mtu, len);
1426 skb = bt_skb_alloc(count, GFP_ATOMIC);
1427 if (!skb)
1428 return NULL;
1430 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1431 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1433 if (conn->hcon->type == LE_LINK)
1434 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1435 else
1436 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1438 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1439 cmd->code = code;
1440 cmd->ident = ident;
1441 cmd->len = cpu_to_le16(dlen);
1443 if (dlen) {
1444 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1445 memcpy(skb_put(skb, count), data, count);
1446 data += count;
1449 len -= skb->len;
1451 /* Continuation fragments (no L2CAP header) */
1452 frag = &skb_shinfo(skb)->frag_list;
1453 while (len) {
1454 count = min_t(unsigned int, conn->mtu, len);
1456 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1457 if (!*frag)
1458 goto fail;
1460 memcpy(skb_put(*frag, count), data, count);
1462 len -= count;
1463 data += count;
1465 frag = &(*frag)->next;
1468 return skb;
1470 fail:
1471 kfree_skb(skb);
1472 return NULL;
1475 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1477 struct l2cap_conf_opt *opt = *ptr;
1478 int len;
1480 len = L2CAP_CONF_OPT_SIZE + opt->len;
1481 *ptr += len;
1483 *type = opt->type;
1484 *olen = opt->len;
1486 switch (opt->len) {
1487 case 1:
1488 *val = *((u8 *) opt->val);
1489 break;
1491 case 2:
1492 *val = get_unaligned_le16(opt->val);
1493 break;
1495 case 4:
1496 *val = get_unaligned_le32(opt->val);
1497 break;
1499 default:
1500 *val = (unsigned long) opt->val;
1501 break;
1504 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1505 return len;
1508 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1510 struct l2cap_conf_opt *opt = *ptr;
1512 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1514 opt->type = type;
1515 opt->len = len;
1517 switch (len) {
1518 case 1:
1519 *((u8 *) opt->val) = val;
1520 break;
1522 case 2:
1523 put_unaligned_le16(val, opt->val);
1524 break;
1526 case 4:
1527 put_unaligned_le32(val, opt->val);
1528 break;
1530 default:
1531 memcpy(opt->val, (void *) val, len);
1532 break;
1535 *ptr += L2CAP_CONF_OPT_SIZE + len;
1538 static void l2cap_ack_timeout(unsigned long arg)
1540 struct sock *sk = (void *) arg;
1542 bh_lock_sock(sk);
1543 l2cap_send_ack(l2cap_pi(sk));
1544 bh_unlock_sock(sk);
1547 static inline void l2cap_ertm_init(struct sock *sk)
1549 l2cap_pi(sk)->expected_ack_seq = 0;
1550 l2cap_pi(sk)->unacked_frames = 0;
1551 l2cap_pi(sk)->buffer_seq = 0;
1552 l2cap_pi(sk)->num_acked = 0;
1553 l2cap_pi(sk)->frames_sent = 0;
1555 setup_timer(&l2cap_pi(sk)->retrans_timer,
1556 l2cap_retrans_timeout, (unsigned long) sk);
1557 setup_timer(&l2cap_pi(sk)->monitor_timer,
1558 l2cap_monitor_timeout, (unsigned long) sk);
1559 setup_timer(&l2cap_pi(sk)->ack_timer,
1560 l2cap_ack_timeout, (unsigned long) sk);
1562 __skb_queue_head_init(SREJ_QUEUE(sk));
1563 __skb_queue_head_init(BUSY_QUEUE(sk));
1565 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
1567 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1570 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1572 switch (mode) {
1573 case L2CAP_MODE_STREAMING:
1574 case L2CAP_MODE_ERTM:
1575 if (l2cap_mode_supported(mode, remote_feat_mask))
1576 return mode;
1577 /* fall through */
1578 default:
1579 return L2CAP_MODE_BASIC;
1583 int l2cap_build_conf_req(struct sock *sk, void *data)
1585 struct l2cap_pinfo *pi = l2cap_pi(sk);
1586 struct l2cap_conf_req *req = data;
1587 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
1588 void *ptr = req->data;
1590 BT_DBG("sk %p", sk);
1592 if (pi->num_conf_req || pi->num_conf_rsp)
1593 goto done;
1595 switch (pi->mode) {
1596 case L2CAP_MODE_STREAMING:
1597 case L2CAP_MODE_ERTM:
1598 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
1599 break;
1601 /* fall through */
1602 default:
1603 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
1604 break;
1607 done:
1608 if (pi->imtu != L2CAP_DEFAULT_MTU)
1609 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1611 switch (pi->mode) {
1612 case L2CAP_MODE_BASIC:
1613 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1614 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
1615 break;
1617 rfc.mode = L2CAP_MODE_BASIC;
1618 rfc.txwin_size = 0;
1619 rfc.max_transmit = 0;
1620 rfc.retrans_timeout = 0;
1621 rfc.monitor_timeout = 0;
1622 rfc.max_pdu_size = 0;
1624 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1625 (unsigned long) &rfc);
1626 break;
1628 case L2CAP_MODE_ERTM:
1629 rfc.mode = L2CAP_MODE_ERTM;
1630 rfc.txwin_size = pi->tx_win;
1631 rfc.max_transmit = pi->max_tx;
1632 rfc.retrans_timeout = 0;
1633 rfc.monitor_timeout = 0;
1634 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1635 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1636 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1638 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1639 (unsigned long) &rfc);
1641 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1642 break;
1644 if (pi->fcs == L2CAP_FCS_NONE ||
1645 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1646 pi->fcs = L2CAP_FCS_NONE;
1647 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1649 break;
1651 case L2CAP_MODE_STREAMING:
1652 rfc.mode = L2CAP_MODE_STREAMING;
1653 rfc.txwin_size = 0;
1654 rfc.max_transmit = 0;
1655 rfc.retrans_timeout = 0;
1656 rfc.monitor_timeout = 0;
1657 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1658 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1659 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1661 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1662 (unsigned long) &rfc);
1664 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1665 break;
1667 if (pi->fcs == L2CAP_FCS_NONE ||
1668 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1669 pi->fcs = L2CAP_FCS_NONE;
1670 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1672 break;
1675 req->dcid = cpu_to_le16(pi->dcid);
1676 req->flags = cpu_to_le16(0);
1678 return ptr - data;
1681 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1683 struct l2cap_pinfo *pi = l2cap_pi(sk);
1684 struct l2cap_conf_rsp *rsp = data;
1685 void *ptr = rsp->data;
1686 void *req = pi->conf_req;
1687 int len = pi->conf_len;
1688 int type, hint, olen;
1689 unsigned long val;
1690 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1691 u16 mtu = L2CAP_DEFAULT_MTU;
1692 u16 result = L2CAP_CONF_SUCCESS;
1694 BT_DBG("sk %p", sk);
1696 while (len >= L2CAP_CONF_OPT_SIZE) {
1697 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1699 hint = type & L2CAP_CONF_HINT;
1700 type &= L2CAP_CONF_MASK;
1702 switch (type) {
1703 case L2CAP_CONF_MTU:
1704 mtu = val;
1705 break;
1707 case L2CAP_CONF_FLUSH_TO:
1708 pi->flush_to = val;
1709 break;
1711 case L2CAP_CONF_QOS:
1712 break;
1714 case L2CAP_CONF_RFC:
1715 if (olen == sizeof(rfc))
1716 memcpy(&rfc, (void *) val, olen);
1717 break;
1719 case L2CAP_CONF_FCS:
1720 if (val == L2CAP_FCS_NONE)
1721 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
1723 break;
1725 default:
1726 if (hint)
1727 break;
1729 result = L2CAP_CONF_UNKNOWN;
1730 *((u8 *) ptr++) = type;
1731 break;
1735 if (pi->num_conf_rsp || pi->num_conf_req > 1)
1736 goto done;
1738 switch (pi->mode) {
1739 case L2CAP_MODE_STREAMING:
1740 case L2CAP_MODE_ERTM:
1741 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
1742 pi->mode = l2cap_select_mode(rfc.mode,
1743 pi->conn->feat_mask);
1744 break;
1747 if (pi->mode != rfc.mode)
1748 return -ECONNREFUSED;
1750 break;
1753 done:
1754 if (pi->mode != rfc.mode) {
1755 result = L2CAP_CONF_UNACCEPT;
1756 rfc.mode = pi->mode;
1758 if (pi->num_conf_rsp == 1)
1759 return -ECONNREFUSED;
1761 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1762 sizeof(rfc), (unsigned long) &rfc);
1766 if (result == L2CAP_CONF_SUCCESS) {
1767 /* Configure output options and let the other side know
1768 * which ones we don't like. */
1770 if (mtu < L2CAP_DEFAULT_MIN_MTU)
1771 result = L2CAP_CONF_UNACCEPT;
1772 else {
1773 pi->omtu = mtu;
1774 pi->conf_state |= L2CAP_CONF_MTU_DONE;
1776 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1778 switch (rfc.mode) {
1779 case L2CAP_MODE_BASIC:
1780 pi->fcs = L2CAP_FCS_NONE;
1781 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1782 break;
1784 case L2CAP_MODE_ERTM:
1785 pi->remote_tx_win = rfc.txwin_size;
1786 pi->remote_max_tx = rfc.max_transmit;
1788 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1789 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1791 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1793 rfc.retrans_timeout =
1794 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
1795 rfc.monitor_timeout =
1796 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
1798 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1800 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1801 sizeof(rfc), (unsigned long) &rfc);
1803 break;
1805 case L2CAP_MODE_STREAMING:
1806 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1807 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1809 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1811 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1813 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1814 sizeof(rfc), (unsigned long) &rfc);
1816 break;
1818 default:
1819 result = L2CAP_CONF_UNACCEPT;
1821 memset(&rfc, 0, sizeof(rfc));
1822 rfc.mode = pi->mode;
1825 if (result == L2CAP_CONF_SUCCESS)
1826 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1828 rsp->scid = cpu_to_le16(pi->dcid);
1829 rsp->result = cpu_to_le16(result);
1830 rsp->flags = cpu_to_le16(0x0000);
1832 return ptr - data;
1835 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
1837 struct l2cap_pinfo *pi = l2cap_pi(sk);
1838 struct l2cap_conf_req *req = data;
1839 void *ptr = req->data;
1840 int type, olen;
1841 unsigned long val;
1842 struct l2cap_conf_rfc rfc;
1844 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
1846 while (len >= L2CAP_CONF_OPT_SIZE) {
1847 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1849 switch (type) {
1850 case L2CAP_CONF_MTU:
1851 if (val < L2CAP_DEFAULT_MIN_MTU) {
1852 *result = L2CAP_CONF_UNACCEPT;
1853 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
1854 } else
1855 pi->imtu = val;
1856 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1857 break;
1859 case L2CAP_CONF_FLUSH_TO:
1860 pi->flush_to = val;
1861 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
1862 2, pi->flush_to);
1863 break;
1865 case L2CAP_CONF_RFC:
1866 if (olen == sizeof(rfc))
1867 memcpy(&rfc, (void *)val, olen);
1869 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
1870 rfc.mode != pi->mode)
1871 return -ECONNREFUSED;
1873 pi->fcs = 0;
1875 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1876 sizeof(rfc), (unsigned long) &rfc);
1877 break;
1881 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
1882 return -ECONNREFUSED;
1884 pi->mode = rfc.mode;
1886 if (*result == L2CAP_CONF_SUCCESS) {
1887 switch (rfc.mode) {
1888 case L2CAP_MODE_ERTM:
1889 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1890 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1891 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1892 break;
1893 case L2CAP_MODE_STREAMING:
1894 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1898 req->dcid = cpu_to_le16(pi->dcid);
1899 req->flags = cpu_to_le16(0x0000);
1901 return ptr - data;
1904 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1906 struct l2cap_conf_rsp *rsp = data;
1907 void *ptr = rsp->data;
1909 BT_DBG("sk %p", sk);
1911 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1912 rsp->result = cpu_to_le16(result);
1913 rsp->flags = cpu_to_le16(flags);
1915 return ptr - data;
1918 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
1920 struct l2cap_pinfo *pi = l2cap_pi(sk);
1921 int type, olen;
1922 unsigned long val;
1923 struct l2cap_conf_rfc rfc;
1925 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
1927 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
1928 return;
1930 while (len >= L2CAP_CONF_OPT_SIZE) {
1931 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1933 switch (type) {
1934 case L2CAP_CONF_RFC:
1935 if (olen == sizeof(rfc))
1936 memcpy(&rfc, (void *)val, olen);
1937 goto done;
1941 done:
1942 switch (rfc.mode) {
1943 case L2CAP_MODE_ERTM:
1944 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1945 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1946 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1947 break;
1948 case L2CAP_MODE_STREAMING:
1949 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1953 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1955 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1957 if (rej->reason != 0x0000)
1958 return 0;
1960 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1961 cmd->ident == conn->info_ident) {
1962 del_timer(&conn->info_timer);
1964 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1965 conn->info_ident = 0;
1967 l2cap_conn_start(conn);
1970 return 0;
1973 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1975 struct l2cap_chan_list *list = &conn->chan_list;
1976 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1977 struct l2cap_conn_rsp rsp;
1978 struct sock *parent, *sk = NULL;
1979 int result, status = L2CAP_CS_NO_INFO;
1981 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1982 __le16 psm = req->psm;
1984 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1986 /* Check if we have socket listening on psm */
1987 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1988 if (!parent) {
1989 result = L2CAP_CR_BAD_PSM;
1990 goto sendresp;
1993 bh_lock_sock(parent);
1995 /* Check if the ACL is secure enough (if not SDP) */
1996 if (psm != cpu_to_le16(0x0001) &&
1997 !hci_conn_check_link_mode(conn->hcon)) {
1998 conn->disc_reason = 0x05;
1999 result = L2CAP_CR_SEC_BLOCK;
2000 goto response;
2003 result = L2CAP_CR_NO_MEM;
2005 /* Check for backlog size */
2006 if (sk_acceptq_is_full(parent)) {
2007 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2008 goto response;
2011 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2012 if (!sk)
2013 goto response;
2015 write_lock_bh(&list->lock);
2017 /* Check if we already have channel with that dcid */
2018 if (__l2cap_get_chan_by_dcid(list, scid)) {
2019 write_unlock_bh(&list->lock);
2020 sock_set_flag(sk, SOCK_ZAPPED);
2021 l2cap_sock_kill(sk);
2022 goto response;
2025 hci_conn_hold(conn->hcon);
2027 l2cap_sock_init(sk, parent);
2028 bacpy(&bt_sk(sk)->src, conn->src);
2029 bacpy(&bt_sk(sk)->dst, conn->dst);
2030 l2cap_pi(sk)->psm = psm;
2031 l2cap_pi(sk)->dcid = scid;
2033 __l2cap_chan_add(conn, sk, parent);
2034 dcid = l2cap_pi(sk)->scid;
2036 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2038 l2cap_pi(sk)->ident = cmd->ident;
2040 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2041 if (l2cap_check_security(sk)) {
2042 if (bt_sk(sk)->defer_setup) {
2043 sk->sk_state = BT_CONNECT2;
2044 result = L2CAP_CR_PEND;
2045 status = L2CAP_CS_AUTHOR_PEND;
2046 parent->sk_data_ready(parent, 0);
2047 } else {
2048 sk->sk_state = BT_CONFIG;
2049 result = L2CAP_CR_SUCCESS;
2050 status = L2CAP_CS_NO_INFO;
2052 } else {
2053 sk->sk_state = BT_CONNECT2;
2054 result = L2CAP_CR_PEND;
2055 status = L2CAP_CS_AUTHEN_PEND;
2057 } else {
2058 sk->sk_state = BT_CONNECT2;
2059 result = L2CAP_CR_PEND;
2060 status = L2CAP_CS_NO_INFO;
2063 write_unlock_bh(&list->lock);
2065 response:
2066 bh_unlock_sock(parent);
2068 sendresp:
2069 rsp.scid = cpu_to_le16(scid);
2070 rsp.dcid = cpu_to_le16(dcid);
2071 rsp.result = cpu_to_le16(result);
2072 rsp.status = cpu_to_le16(status);
2073 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2075 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2076 struct l2cap_info_req info;
2077 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2079 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2080 conn->info_ident = l2cap_get_ident(conn);
2082 mod_timer(&conn->info_timer, jiffies +
2083 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2085 l2cap_send_cmd(conn, conn->info_ident,
2086 L2CAP_INFO_REQ, sizeof(info), &info);
2089 if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
2090 result == L2CAP_CR_SUCCESS) {
2091 u8 buf[128];
2092 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2093 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2094 l2cap_build_conf_req(sk, buf), buf);
2095 l2cap_pi(sk)->num_conf_req++;
2098 return 0;
2101 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2103 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2104 u16 scid, dcid, result, status;
2105 struct sock *sk;
2106 u8 req[128];
2108 scid = __le16_to_cpu(rsp->scid);
2109 dcid = __le16_to_cpu(rsp->dcid);
2110 result = __le16_to_cpu(rsp->result);
2111 status = __le16_to_cpu(rsp->status);
2113 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2115 if (scid) {
2116 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2117 if (!sk)
2118 return -EFAULT;
2119 } else {
2120 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2121 if (!sk)
2122 return -EFAULT;
2125 switch (result) {
2126 case L2CAP_CR_SUCCESS:
2127 sk->sk_state = BT_CONFIG;
2128 l2cap_pi(sk)->ident = 0;
2129 l2cap_pi(sk)->dcid = dcid;
2130 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2132 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
2133 break;
2135 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2137 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2138 l2cap_build_conf_req(sk, req), req);
2139 l2cap_pi(sk)->num_conf_req++;
2140 break;
2142 case L2CAP_CR_PEND:
2143 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2144 break;
2146 default:
2147 /* don't delete l2cap channel if sk is owned by user */
2148 if (sock_owned_by_user(sk)) {
2149 sk->sk_state = BT_DISCONN;
2150 l2cap_sock_clear_timer(sk);
2151 l2cap_sock_set_timer(sk, HZ / 5);
2152 break;
2155 l2cap_chan_del(sk, ECONNREFUSED);
2156 break;
2159 bh_unlock_sock(sk);
2160 return 0;
2163 static inline void set_default_fcs(struct l2cap_pinfo *pi)
2165 /* FCS is enabled only in ERTM or streaming mode, if one or both
2166 * sides request it.
2168 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
2169 pi->fcs = L2CAP_FCS_NONE;
2170 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
2171 pi->fcs = L2CAP_FCS_CRC16;
2174 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2176 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2177 u16 dcid, flags;
2178 u8 rsp[64];
2179 struct sock *sk;
2180 int len;
2182 dcid = __le16_to_cpu(req->dcid);
2183 flags = __le16_to_cpu(req->flags);
2185 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2187 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2188 if (!sk)
2189 return -ENOENT;
2191 if (sk->sk_state != BT_CONFIG) {
2192 struct l2cap_cmd_rej rej;
2194 rej.reason = cpu_to_le16(0x0002);
2195 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2196 sizeof(rej), &rej);
2197 goto unlock;
2200 /* Reject if config buffer is too small. */
2201 len = cmd_len - sizeof(*req);
2202 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2203 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2204 l2cap_build_conf_rsp(sk, rsp,
2205 L2CAP_CONF_REJECT, flags), rsp);
2206 goto unlock;
2209 /* Store config. */
2210 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2211 l2cap_pi(sk)->conf_len += len;
2213 if (flags & 0x0001) {
2214 /* Incomplete config. Send empty response. */
2215 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2216 l2cap_build_conf_rsp(sk, rsp,
2217 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2218 goto unlock;
2221 /* Complete config. */
2222 len = l2cap_parse_conf_req(sk, rsp);
2223 if (len < 0) {
2224 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2225 goto unlock;
2228 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2229 l2cap_pi(sk)->num_conf_rsp++;
2231 /* Reset config buffer. */
2232 l2cap_pi(sk)->conf_len = 0;
2234 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2235 goto unlock;
2237 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2238 set_default_fcs(l2cap_pi(sk));
2240 sk->sk_state = BT_CONNECTED;
2242 l2cap_pi(sk)->next_tx_seq = 0;
2243 l2cap_pi(sk)->expected_tx_seq = 0;
2244 __skb_queue_head_init(TX_QUEUE(sk));
2245 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2246 l2cap_ertm_init(sk);
2248 l2cap_chan_ready(sk);
2249 goto unlock;
2252 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2253 u8 buf[64];
2254 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2255 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2256 l2cap_build_conf_req(sk, buf), buf);
2257 l2cap_pi(sk)->num_conf_req++;
2260 unlock:
2261 bh_unlock_sock(sk);
2262 return 0;
2265 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2267 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2268 u16 scid, flags, result;
2269 struct sock *sk;
2270 int len = cmd->len - sizeof(*rsp);
2272 scid = __le16_to_cpu(rsp->scid);
2273 flags = __le16_to_cpu(rsp->flags);
2274 result = __le16_to_cpu(rsp->result);
2276 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2277 scid, flags, result);
2279 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2280 if (!sk)
2281 return 0;
2283 switch (result) {
2284 case L2CAP_CONF_SUCCESS:
2285 l2cap_conf_rfc_get(sk, rsp->data, len);
2286 break;
2288 case L2CAP_CONF_UNACCEPT:
2289 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2290 char req[64];
2292 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2293 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2294 goto done;
2297 /* throw out any old stored conf requests */
2298 result = L2CAP_CONF_SUCCESS;
2299 len = l2cap_parse_conf_rsp(sk, rsp->data,
2300 len, req, &result);
2301 if (len < 0) {
2302 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2303 goto done;
2306 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2307 L2CAP_CONF_REQ, len, req);
2308 l2cap_pi(sk)->num_conf_req++;
2309 if (result != L2CAP_CONF_SUCCESS)
2310 goto done;
2311 break;
2314 default:
2315 sk->sk_err = ECONNRESET;
2316 l2cap_sock_set_timer(sk, HZ * 5);
2317 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2318 goto done;
2321 if (flags & 0x01)
2322 goto done;
2324 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2326 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2327 set_default_fcs(l2cap_pi(sk));
2329 sk->sk_state = BT_CONNECTED;
2330 l2cap_pi(sk)->next_tx_seq = 0;
2331 l2cap_pi(sk)->expected_tx_seq = 0;
2332 __skb_queue_head_init(TX_QUEUE(sk));
2333 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2334 l2cap_ertm_init(sk);
2336 l2cap_chan_ready(sk);
2339 done:
2340 bh_unlock_sock(sk);
2341 return 0;
2344 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2346 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2347 struct l2cap_disconn_rsp rsp;
2348 u16 dcid, scid;
2349 struct sock *sk;
2351 scid = __le16_to_cpu(req->scid);
2352 dcid = __le16_to_cpu(req->dcid);
2354 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2356 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2357 if (!sk)
2358 return 0;
2360 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2361 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2362 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2364 sk->sk_shutdown = SHUTDOWN_MASK;
2366 /* don't delete l2cap channel if sk is owned by user */
2367 if (sock_owned_by_user(sk)) {
2368 sk->sk_state = BT_DISCONN;
2369 l2cap_sock_clear_timer(sk);
2370 l2cap_sock_set_timer(sk, HZ / 5);
2371 bh_unlock_sock(sk);
2372 return 0;
2375 l2cap_chan_del(sk, ECONNRESET);
2376 bh_unlock_sock(sk);
2378 l2cap_sock_kill(sk);
2379 return 0;
2382 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2384 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2385 u16 dcid, scid;
2386 struct sock *sk;
2388 scid = __le16_to_cpu(rsp->scid);
2389 dcid = __le16_to_cpu(rsp->dcid);
2391 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2393 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2394 if (!sk)
2395 return 0;
2397 /* don't delete l2cap channel if sk is owned by user */
2398 if (sock_owned_by_user(sk)) {
2399 sk->sk_state = BT_DISCONN;
2400 l2cap_sock_clear_timer(sk);
2401 l2cap_sock_set_timer(sk, HZ / 5);
2402 bh_unlock_sock(sk);
2403 return 0;
2406 l2cap_chan_del(sk, 0);
2407 bh_unlock_sock(sk);
2409 l2cap_sock_kill(sk);
2410 return 0;
2413 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2415 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2416 u16 type;
2418 type = __le16_to_cpu(req->type);
2420 BT_DBG("type 0x%4.4x", type);
2422 if (type == L2CAP_IT_FEAT_MASK) {
2423 u8 buf[8];
2424 u32 feat_mask = l2cap_feat_mask;
2425 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2426 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2427 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2428 if (!disable_ertm)
2429 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2430 | L2CAP_FEAT_FCS;
2431 put_unaligned_le32(feat_mask, rsp->data);
2432 l2cap_send_cmd(conn, cmd->ident,
2433 L2CAP_INFO_RSP, sizeof(buf), buf);
2434 } else if (type == L2CAP_IT_FIXED_CHAN) {
2435 u8 buf[12];
2436 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2437 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2438 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2439 memcpy(buf + 4, l2cap_fixed_chan, 8);
2440 l2cap_send_cmd(conn, cmd->ident,
2441 L2CAP_INFO_RSP, sizeof(buf), buf);
2442 } else {
2443 struct l2cap_info_rsp rsp;
2444 rsp.type = cpu_to_le16(type);
2445 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2446 l2cap_send_cmd(conn, cmd->ident,
2447 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2450 return 0;
2453 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2455 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2456 u16 type, result;
2458 type = __le16_to_cpu(rsp->type);
2459 result = __le16_to_cpu(rsp->result);
2461 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2463 del_timer(&conn->info_timer);
2465 if (result != L2CAP_IR_SUCCESS) {
2466 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2467 conn->info_ident = 0;
2469 l2cap_conn_start(conn);
2471 return 0;
2474 if (type == L2CAP_IT_FEAT_MASK) {
2475 conn->feat_mask = get_unaligned_le32(rsp->data);
2477 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2478 struct l2cap_info_req req;
2479 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2481 conn->info_ident = l2cap_get_ident(conn);
2483 l2cap_send_cmd(conn, conn->info_ident,
2484 L2CAP_INFO_REQ, sizeof(req), &req);
2485 } else {
2486 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2487 conn->info_ident = 0;
2489 l2cap_conn_start(conn);
2491 } else if (type == L2CAP_IT_FIXED_CHAN) {
2492 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2493 conn->info_ident = 0;
2495 l2cap_conn_start(conn);
2498 return 0;
2501 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2502 u16 to_multiplier)
2504 u16 max_latency;
2506 if (min > max || min < 6 || max > 3200)
2507 return -EINVAL;
2509 if (to_multiplier < 10 || to_multiplier > 3200)
2510 return -EINVAL;
2512 if (max >= to_multiplier * 8)
2513 return -EINVAL;
2515 max_latency = (to_multiplier * 8 / max) - 1;
2516 if (latency > 499 || latency > max_latency)
2517 return -EINVAL;
2519 return 0;
2522 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2523 struct l2cap_cmd_hdr *cmd, u8 *data)
2525 struct hci_conn *hcon = conn->hcon;
2526 struct l2cap_conn_param_update_req *req;
2527 struct l2cap_conn_param_update_rsp rsp;
2528 u16 min, max, latency, to_multiplier, cmd_len;
2529 int err;
2531 if (!(hcon->link_mode & HCI_LM_MASTER))
2532 return -EINVAL;
2534 cmd_len = __le16_to_cpu(cmd->len);
2535 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2536 return -EPROTO;
2538 req = (struct l2cap_conn_param_update_req *) data;
2539 min = __le16_to_cpu(req->min);
2540 max = __le16_to_cpu(req->max);
2541 latency = __le16_to_cpu(req->latency);
2542 to_multiplier = __le16_to_cpu(req->to_multiplier);
2544 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2545 min, max, latency, to_multiplier);
2547 memset(&rsp, 0, sizeof(rsp));
2549 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2550 if (err)
2551 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2552 else
2553 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2555 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2556 sizeof(rsp), &rsp);
2558 if (!err)
2559 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2561 return 0;
2564 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2565 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2567 int err = 0;
2569 switch (cmd->code) {
2570 case L2CAP_COMMAND_REJ:
2571 l2cap_command_rej(conn, cmd, data);
2572 break;
2574 case L2CAP_CONN_REQ:
2575 err = l2cap_connect_req(conn, cmd, data);
2576 break;
2578 case L2CAP_CONN_RSP:
2579 err = l2cap_connect_rsp(conn, cmd, data);
2580 break;
2582 case L2CAP_CONF_REQ:
2583 err = l2cap_config_req(conn, cmd, cmd_len, data);
2584 break;
2586 case L2CAP_CONF_RSP:
2587 err = l2cap_config_rsp(conn, cmd, data);
2588 break;
2590 case L2CAP_DISCONN_REQ:
2591 err = l2cap_disconnect_req(conn, cmd, data);
2592 break;
2594 case L2CAP_DISCONN_RSP:
2595 err = l2cap_disconnect_rsp(conn, cmd, data);
2596 break;
2598 case L2CAP_ECHO_REQ:
2599 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2600 break;
2602 case L2CAP_ECHO_RSP:
2603 break;
2605 case L2CAP_INFO_REQ:
2606 err = l2cap_information_req(conn, cmd, data);
2607 break;
2609 case L2CAP_INFO_RSP:
2610 err = l2cap_information_rsp(conn, cmd, data);
2611 break;
2613 default:
2614 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2615 err = -EINVAL;
2616 break;
2619 return err;
2622 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2623 struct l2cap_cmd_hdr *cmd, u8 *data)
2625 switch (cmd->code) {
2626 case L2CAP_COMMAND_REJ:
2627 return 0;
2629 case L2CAP_CONN_PARAM_UPDATE_REQ:
2630 return l2cap_conn_param_update_req(conn, cmd, data);
2632 case L2CAP_CONN_PARAM_UPDATE_RSP:
2633 return 0;
2635 default:
2636 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2637 return -EINVAL;
2641 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2642 struct sk_buff *skb)
2644 u8 *data = skb->data;
2645 int len = skb->len;
2646 struct l2cap_cmd_hdr cmd;
2647 int err;
2649 l2cap_raw_recv(conn, skb);
2651 while (len >= L2CAP_CMD_HDR_SIZE) {
2652 u16 cmd_len;
2653 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2654 data += L2CAP_CMD_HDR_SIZE;
2655 len -= L2CAP_CMD_HDR_SIZE;
2657 cmd_len = le16_to_cpu(cmd.len);
2659 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2661 if (cmd_len > len || !cmd.ident) {
2662 BT_DBG("corrupted command");
2663 break;
2666 if (conn->hcon->type == LE_LINK)
2667 err = l2cap_le_sig_cmd(conn, &cmd, data);
2668 else
2669 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
2671 if (err) {
2672 struct l2cap_cmd_rej rej;
2674 BT_ERR("Wrong link type (%d)", err);
2676 /* FIXME: Map err to a valid reason */
2677 rej.reason = cpu_to_le16(0);
2678 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2681 data += cmd_len;
2682 len -= cmd_len;
2685 kfree_skb(skb);
2688 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
2690 u16 our_fcs, rcv_fcs;
2691 int hdr_size = L2CAP_HDR_SIZE + 2;
2693 if (pi->fcs == L2CAP_FCS_CRC16) {
2694 skb_trim(skb, skb->len - 2);
2695 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
2696 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
2698 if (our_fcs != rcv_fcs)
2699 return -EBADMSG;
2701 return 0;
2704 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
2706 struct l2cap_pinfo *pi = l2cap_pi(sk);
2707 u16 control = 0;
2709 pi->frames_sent = 0;
2711 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2713 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2714 control |= L2CAP_SUPER_RCV_NOT_READY;
2715 l2cap_send_sframe(pi, control);
2716 pi->conn_state |= L2CAP_CONN_RNR_SENT;
2719 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
2720 l2cap_retransmit_frames(sk);
2722 l2cap_ertm_send(sk);
2724 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
2725 pi->frames_sent == 0) {
2726 control |= L2CAP_SUPER_RCV_READY;
2727 l2cap_send_sframe(pi, control);
2731 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
2733 struct sk_buff *next_skb;
2734 struct l2cap_pinfo *pi = l2cap_pi(sk);
2735 int tx_seq_offset, next_tx_seq_offset;
2737 bt_cb(skb)->tx_seq = tx_seq;
2738 bt_cb(skb)->sar = sar;
2740 next_skb = skb_peek(SREJ_QUEUE(sk));
2741 if (!next_skb) {
2742 __skb_queue_tail(SREJ_QUEUE(sk), skb);
2743 return 0;
2746 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
2747 if (tx_seq_offset < 0)
2748 tx_seq_offset += 64;
2750 do {
2751 if (bt_cb(next_skb)->tx_seq == tx_seq)
2752 return -EINVAL;
2754 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
2755 pi->buffer_seq) % 64;
2756 if (next_tx_seq_offset < 0)
2757 next_tx_seq_offset += 64;
2759 if (next_tx_seq_offset > tx_seq_offset) {
2760 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
2761 return 0;
2764 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
2765 break;
2767 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
2769 __skb_queue_tail(SREJ_QUEUE(sk), skb);
2771 return 0;
2774 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
2776 struct l2cap_pinfo *pi = l2cap_pi(sk);
2777 struct sk_buff *_skb;
2778 int err;
2780 switch (control & L2CAP_CTRL_SAR) {
2781 case L2CAP_SDU_UNSEGMENTED:
2782 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
2783 goto drop;
2785 err = sock_queue_rcv_skb(sk, skb);
2786 if (!err)
2787 return err;
2789 break;
2791 case L2CAP_SDU_START:
2792 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
2793 goto drop;
2795 pi->sdu_len = get_unaligned_le16(skb->data);
2797 if (pi->sdu_len > pi->imtu)
2798 goto disconnect;
2800 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
2801 if (!pi->sdu)
2802 return -ENOMEM;
2804 /* pull sdu_len bytes only after alloc, because of Local Busy
2805 * condition we have to be sure that this will be executed
2806 * only once, i.e., when alloc does not fail */
2807 skb_pull(skb, 2);
2809 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2811 pi->conn_state |= L2CAP_CONN_SAR_SDU;
2812 pi->partial_sdu_len = skb->len;
2813 break;
2815 case L2CAP_SDU_CONTINUE:
2816 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
2817 goto disconnect;
2819 if (!pi->sdu)
2820 goto disconnect;
2822 pi->partial_sdu_len += skb->len;
2823 if (pi->partial_sdu_len > pi->sdu_len)
2824 goto drop;
2826 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2828 break;
2830 case L2CAP_SDU_END:
2831 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
2832 goto disconnect;
2834 if (!pi->sdu)
2835 goto disconnect;
2837 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
2838 pi->partial_sdu_len += skb->len;
2840 if (pi->partial_sdu_len > pi->imtu)
2841 goto drop;
2843 if (pi->partial_sdu_len != pi->sdu_len)
2844 goto drop;
2846 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2849 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
2850 if (!_skb) {
2851 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
2852 return -ENOMEM;
2855 err = sock_queue_rcv_skb(sk, _skb);
2856 if (err < 0) {
2857 kfree_skb(_skb);
2858 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
2859 return err;
2862 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
2863 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
2865 kfree_skb(pi->sdu);
2866 break;
2869 kfree_skb(skb);
2870 return 0;
2872 drop:
2873 kfree_skb(pi->sdu);
2874 pi->sdu = NULL;
2876 disconnect:
2877 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
2878 kfree_skb(skb);
2879 return 0;
2882 static int l2cap_try_push_rx_skb(struct sock *sk)
2884 struct l2cap_pinfo *pi = l2cap_pi(sk);
2885 struct sk_buff *skb;
2886 u16 control;
2887 int err;
2889 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
2890 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
2891 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
2892 if (err < 0) {
2893 skb_queue_head(BUSY_QUEUE(sk), skb);
2894 return -EBUSY;
2897 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
2900 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
2901 goto done;
2903 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2904 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
2905 l2cap_send_sframe(pi, control);
2906 l2cap_pi(sk)->retry_count = 1;
2908 del_timer(&pi->retrans_timer);
2909 __mod_monitor_timer();
2911 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
2913 done:
2914 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
2915 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
2917 BT_DBG("sk %p, Exit local busy", sk);
2919 return 0;
2922 static void l2cap_busy_work(struct work_struct *work)
2924 DECLARE_WAITQUEUE(wait, current);
2925 struct l2cap_pinfo *pi =
2926 container_of(work, struct l2cap_pinfo, busy_work);
2927 struct sock *sk = (struct sock *)pi;
2928 int n_tries = 0, timeo = HZ/5, err;
2929 struct sk_buff *skb;
2931 lock_sock(sk);
2933 add_wait_queue(sk_sleep(sk), &wait);
2934 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
2935 set_current_state(TASK_INTERRUPTIBLE);
2937 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
2938 err = -EBUSY;
2939 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
2940 break;
2943 if (!timeo)
2944 timeo = HZ/5;
2946 if (signal_pending(current)) {
2947 err = sock_intr_errno(timeo);
2948 break;
2951 release_sock(sk);
2952 timeo = schedule_timeout(timeo);
2953 lock_sock(sk);
2955 err = sock_error(sk);
2956 if (err)
2957 break;
2959 if (l2cap_try_push_rx_skb(sk) == 0)
2960 break;
2963 set_current_state(TASK_RUNNING);
2964 remove_wait_queue(sk_sleep(sk), &wait);
2966 release_sock(sk);
2969 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
2971 struct l2cap_pinfo *pi = l2cap_pi(sk);
2972 int sctrl, err;
2974 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2975 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
2976 __skb_queue_tail(BUSY_QUEUE(sk), skb);
2977 return l2cap_try_push_rx_skb(sk);
2982 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
2983 if (err >= 0) {
2984 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
2985 return err;
2988 /* Busy Condition */
2989 BT_DBG("sk %p, Enter local busy", sk);
2991 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
2992 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
2993 __skb_queue_tail(BUSY_QUEUE(sk), skb);
2995 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2996 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
2997 l2cap_send_sframe(pi, sctrl);
2999 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3001 del_timer(&pi->ack_timer);
3003 queue_work(_busy_wq, &pi->busy_work);
3005 return err;
3008 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3010 struct l2cap_pinfo *pi = l2cap_pi(sk);
3011 struct sk_buff *_skb;
3012 int err = -EINVAL;
3015 * TODO: We have to notify the userland if some data is lost with the
3016 * Streaming Mode.
3019 switch (control & L2CAP_CTRL_SAR) {
3020 case L2CAP_SDU_UNSEGMENTED:
3021 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3022 kfree_skb(pi->sdu);
3023 break;
3026 err = sock_queue_rcv_skb(sk, skb);
3027 if (!err)
3028 return 0;
3030 break;
3032 case L2CAP_SDU_START:
3033 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3034 kfree_skb(pi->sdu);
3035 break;
3038 pi->sdu_len = get_unaligned_le16(skb->data);
3039 skb_pull(skb, 2);
3041 if (pi->sdu_len > pi->imtu) {
3042 err = -EMSGSIZE;
3043 break;
3046 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3047 if (!pi->sdu) {
3048 err = -ENOMEM;
3049 break;
3052 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3054 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3055 pi->partial_sdu_len = skb->len;
3056 err = 0;
3057 break;
3059 case L2CAP_SDU_CONTINUE:
3060 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3061 break;
3063 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3065 pi->partial_sdu_len += skb->len;
3066 if (pi->partial_sdu_len > pi->sdu_len)
3067 kfree_skb(pi->sdu);
3068 else
3069 err = 0;
3071 break;
3073 case L2CAP_SDU_END:
3074 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3075 break;
3077 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3079 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3080 pi->partial_sdu_len += skb->len;
3082 if (pi->partial_sdu_len > pi->imtu)
3083 goto drop;
3085 if (pi->partial_sdu_len == pi->sdu_len) {
3086 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3087 err = sock_queue_rcv_skb(sk, _skb);
3088 if (err < 0)
3089 kfree_skb(_skb);
3091 err = 0;
3093 drop:
3094 kfree_skb(pi->sdu);
3095 break;
3098 kfree_skb(skb);
3099 return err;
3102 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3104 struct sk_buff *skb;
3105 u16 control;
3107 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3108 if (bt_cb(skb)->tx_seq != tx_seq)
3109 break;
3111 skb = skb_dequeue(SREJ_QUEUE(sk));
3112 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3113 l2cap_ertm_reassembly_sdu(sk, skb, control);
3114 l2cap_pi(sk)->buffer_seq_srej =
3115 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3116 tx_seq = (tx_seq + 1) % 64;
3120 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3122 struct l2cap_pinfo *pi = l2cap_pi(sk);
3123 struct srej_list *l, *tmp;
3124 u16 control;
3126 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3127 if (l->tx_seq == tx_seq) {
3128 list_del(&l->list);
3129 kfree(l);
3130 return;
3132 control = L2CAP_SUPER_SELECT_REJECT;
3133 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3134 l2cap_send_sframe(pi, control);
3135 list_del(&l->list);
3136 list_add_tail(&l->list, SREJ_LIST(sk));
3140 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3142 struct l2cap_pinfo *pi = l2cap_pi(sk);
3143 struct srej_list *new;
3144 u16 control;
3146 while (tx_seq != pi->expected_tx_seq) {
3147 control = L2CAP_SUPER_SELECT_REJECT;
3148 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3149 l2cap_send_sframe(pi, control);
3151 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3152 new->tx_seq = pi->expected_tx_seq;
3153 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3154 list_add_tail(&new->list, SREJ_LIST(sk));
3156 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3159 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3161 struct l2cap_pinfo *pi = l2cap_pi(sk);
3162 u8 tx_seq = __get_txseq(rx_control);
3163 u8 req_seq = __get_reqseq(rx_control);
3164 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3165 int tx_seq_offset, expected_tx_seq_offset;
3166 int num_to_ack = (pi->tx_win/6) + 1;
3167 int err = 0;
3169 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3170 rx_control);
3172 if (L2CAP_CTRL_FINAL & rx_control &&
3173 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3174 del_timer(&pi->monitor_timer);
3175 if (pi->unacked_frames > 0)
3176 __mod_retrans_timer();
3177 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3180 pi->expected_ack_seq = req_seq;
3181 l2cap_drop_acked_frames(sk);
3183 if (tx_seq == pi->expected_tx_seq)
3184 goto expected;
3186 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3187 if (tx_seq_offset < 0)
3188 tx_seq_offset += 64;
3190 /* invalid tx_seq */
3191 if (tx_seq_offset >= pi->tx_win) {
3192 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3193 goto drop;
3196 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3197 goto drop;
3199 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3200 struct srej_list *first;
3202 first = list_first_entry(SREJ_LIST(sk),
3203 struct srej_list, list);
3204 if (tx_seq == first->tx_seq) {
3205 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3206 l2cap_check_srej_gap(sk, tx_seq);
3208 list_del(&first->list);
3209 kfree(first);
3211 if (list_empty(SREJ_LIST(sk))) {
3212 pi->buffer_seq = pi->buffer_seq_srej;
3213 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3214 l2cap_send_ack(pi);
3215 BT_DBG("sk %p, Exit SREJ_SENT", sk);
3217 } else {
3218 struct srej_list *l;
3220 /* duplicated tx_seq */
3221 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3222 goto drop;
3224 list_for_each_entry(l, SREJ_LIST(sk), list) {
3225 if (l->tx_seq == tx_seq) {
3226 l2cap_resend_srejframe(sk, tx_seq);
3227 return 0;
3230 l2cap_send_srejframe(sk, tx_seq);
3232 } else {
3233 expected_tx_seq_offset =
3234 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3235 if (expected_tx_seq_offset < 0)
3236 expected_tx_seq_offset += 64;
3238 /* duplicated tx_seq */
3239 if (tx_seq_offset < expected_tx_seq_offset)
3240 goto drop;
3242 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3244 BT_DBG("sk %p, Enter SREJ", sk);
3246 INIT_LIST_HEAD(SREJ_LIST(sk));
3247 pi->buffer_seq_srej = pi->buffer_seq;
3249 __skb_queue_head_init(SREJ_QUEUE(sk));
3250 __skb_queue_head_init(BUSY_QUEUE(sk));
3251 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3253 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3255 l2cap_send_srejframe(sk, tx_seq);
3257 del_timer(&pi->ack_timer);
3259 return 0;
3261 expected:
3262 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3264 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3265 bt_cb(skb)->tx_seq = tx_seq;
3266 bt_cb(skb)->sar = sar;
3267 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3268 return 0;
3271 err = l2cap_push_rx_skb(sk, skb, rx_control);
3272 if (err < 0)
3273 return 0;
3275 if (rx_control & L2CAP_CTRL_FINAL) {
3276 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3277 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3278 else
3279 l2cap_retransmit_frames(sk);
3282 __mod_ack_timer();
3284 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3285 if (pi->num_acked == num_to_ack - 1)
3286 l2cap_send_ack(pi);
3288 return 0;
3290 drop:
3291 kfree_skb(skb);
3292 return 0;
3295 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3297 struct l2cap_pinfo *pi = l2cap_pi(sk);
3299 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
3300 rx_control);
3302 pi->expected_ack_seq = __get_reqseq(rx_control);
3303 l2cap_drop_acked_frames(sk);
3305 if (rx_control & L2CAP_CTRL_POLL) {
3306 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3307 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3308 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3309 (pi->unacked_frames > 0))
3310 __mod_retrans_timer();
3312 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3313 l2cap_send_srejtail(sk);
3314 } else {
3315 l2cap_send_i_or_rr_or_rnr(sk);
3318 } else if (rx_control & L2CAP_CTRL_FINAL) {
3319 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3321 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3322 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3323 else
3324 l2cap_retransmit_frames(sk);
3326 } else {
3327 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3328 (pi->unacked_frames > 0))
3329 __mod_retrans_timer();
3331 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3332 if (pi->conn_state & L2CAP_CONN_SREJ_SENT)
3333 l2cap_send_ack(pi);
3334 else
3335 l2cap_ertm_send(sk);
3339 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
3341 struct l2cap_pinfo *pi = l2cap_pi(sk);
3342 u8 tx_seq = __get_reqseq(rx_control);
3344 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3346 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3348 pi->expected_ack_seq = tx_seq;
3349 l2cap_drop_acked_frames(sk);
3351 if (rx_control & L2CAP_CTRL_FINAL) {
3352 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3353 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3354 else
3355 l2cap_retransmit_frames(sk);
3356 } else {
3357 l2cap_retransmit_frames(sk);
3359 if (pi->conn_state & L2CAP_CONN_WAIT_F)
3360 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3363 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
3365 struct l2cap_pinfo *pi = l2cap_pi(sk);
3366 u8 tx_seq = __get_reqseq(rx_control);
3368 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3370 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3372 if (rx_control & L2CAP_CTRL_POLL) {
3373 pi->expected_ack_seq = tx_seq;
3374 l2cap_drop_acked_frames(sk);
3376 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3377 l2cap_retransmit_one_frame(sk, tx_seq);
3379 l2cap_ertm_send(sk);
3381 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3382 pi->srej_save_reqseq = tx_seq;
3383 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3385 } else if (rx_control & L2CAP_CTRL_FINAL) {
3386 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3387 pi->srej_save_reqseq == tx_seq)
3388 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3389 else
3390 l2cap_retransmit_one_frame(sk, tx_seq);
3391 } else {
3392 l2cap_retransmit_one_frame(sk, tx_seq);
3393 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3394 pi->srej_save_reqseq = tx_seq;
3395 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3400 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
3402 struct l2cap_pinfo *pi = l2cap_pi(sk);
3403 u8 tx_seq = __get_reqseq(rx_control);
3405 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3407 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3408 pi->expected_ack_seq = tx_seq;
3409 l2cap_drop_acked_frames(sk);
3411 if (rx_control & L2CAP_CTRL_POLL)
3412 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3414 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
3415 del_timer(&pi->retrans_timer);
3416 if (rx_control & L2CAP_CTRL_POLL)
3417 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
3418 return;
3421 if (rx_control & L2CAP_CTRL_POLL)
3422 l2cap_send_srejtail(sk);
3423 else
3424 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
3427 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3429 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3431 if (L2CAP_CTRL_FINAL & rx_control &&
3432 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3433 del_timer(&l2cap_pi(sk)->monitor_timer);
3434 if (l2cap_pi(sk)->unacked_frames > 0)
3435 __mod_retrans_timer();
3436 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
3439 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3440 case L2CAP_SUPER_RCV_READY:
3441 l2cap_data_channel_rrframe(sk, rx_control);
3442 break;
3444 case L2CAP_SUPER_REJECT:
3445 l2cap_data_channel_rejframe(sk, rx_control);
3446 break;
3448 case L2CAP_SUPER_SELECT_REJECT:
3449 l2cap_data_channel_srejframe(sk, rx_control);
3450 break;
3452 case L2CAP_SUPER_RCV_NOT_READY:
3453 l2cap_data_channel_rnrframe(sk, rx_control);
3454 break;
3457 kfree_skb(skb);
3458 return 0;
3461 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3463 struct l2cap_pinfo *pi = l2cap_pi(sk);
3464 u16 control;
3465 u8 req_seq;
3466 int len, next_tx_seq_offset, req_seq_offset;
3468 control = get_unaligned_le16(skb->data);
3469 skb_pull(skb, 2);
3470 len = skb->len;
3473 * We can just drop the corrupted I-frame here.
3474 * Receiver will miss it and start proper recovery
3475 * procedures and ask retransmission.
3477 if (l2cap_check_fcs(pi, skb))
3478 goto drop;
3480 if (__is_sar_start(control) && __is_iframe(control))
3481 len -= 2;
3483 if (pi->fcs == L2CAP_FCS_CRC16)
3484 len -= 2;
3486 if (len > pi->mps) {
3487 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3488 goto drop;
3491 req_seq = __get_reqseq(control);
3492 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
3493 if (req_seq_offset < 0)
3494 req_seq_offset += 64;
3496 next_tx_seq_offset =
3497 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
3498 if (next_tx_seq_offset < 0)
3499 next_tx_seq_offset += 64;
3501 /* check for invalid req-seq */
3502 if (req_seq_offset > next_tx_seq_offset) {
3503 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3504 goto drop;
3507 if (__is_iframe(control)) {
3508 if (len < 0) {
3509 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3510 goto drop;
3513 l2cap_data_channel_iframe(sk, control, skb);
3514 } else {
3515 if (len != 0) {
3516 BT_ERR("%d", len);
3517 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3518 goto drop;
3521 l2cap_data_channel_sframe(sk, control, skb);
3524 return 0;
3526 drop:
3527 kfree_skb(skb);
3528 return 0;
3531 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3533 struct sock *sk;
3534 struct l2cap_pinfo *pi;
3535 u16 control;
3536 u8 tx_seq;
3537 int len;
3539 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3540 if (!sk) {
3541 BT_DBG("unknown cid 0x%4.4x", cid);
3542 goto drop;
3545 pi = l2cap_pi(sk);
3547 BT_DBG("sk %p, len %d", sk, skb->len);
3549 if (sk->sk_state != BT_CONNECTED)
3550 goto drop;
3552 switch (pi->mode) {
3553 case L2CAP_MODE_BASIC:
3554 /* If socket recv buffers overflows we drop data here
3555 * which is *bad* because L2CAP has to be reliable.
3556 * But we don't have any other choice. L2CAP doesn't
3557 * provide flow control mechanism. */
3559 if (pi->imtu < skb->len)
3560 goto drop;
3562 if (!sock_queue_rcv_skb(sk, skb))
3563 goto done;
3564 break;
3566 case L2CAP_MODE_ERTM:
3567 if (!sock_owned_by_user(sk)) {
3568 l2cap_ertm_data_rcv(sk, skb);
3569 } else {
3570 if (sk_add_backlog(sk, skb))
3571 goto drop;
3574 goto done;
3576 case L2CAP_MODE_STREAMING:
3577 control = get_unaligned_le16(skb->data);
3578 skb_pull(skb, 2);
3579 len = skb->len;
3581 if (l2cap_check_fcs(pi, skb))
3582 goto drop;
3584 if (__is_sar_start(control))
3585 len -= 2;
3587 if (pi->fcs == L2CAP_FCS_CRC16)
3588 len -= 2;
3590 if (len > pi->mps || len < 0 || __is_sframe(control))
3591 goto drop;
3593 tx_seq = __get_txseq(control);
3595 if (pi->expected_tx_seq == tx_seq)
3596 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3597 else
3598 pi->expected_tx_seq = (tx_seq + 1) % 64;
3600 l2cap_streaming_reassembly_sdu(sk, skb, control);
3602 goto done;
3604 default:
3605 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
3606 break;
3609 drop:
3610 kfree_skb(skb);
3612 done:
3613 if (sk)
3614 bh_unlock_sock(sk);
3616 return 0;
3619 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3621 struct sock *sk;
3623 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3624 if (!sk)
3625 goto drop;
3627 bh_lock_sock(sk);
3629 BT_DBG("sk %p, len %d", sk, skb->len);
3631 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3632 goto drop;
3634 if (l2cap_pi(sk)->imtu < skb->len)
3635 goto drop;
3637 if (!sock_queue_rcv_skb(sk, skb))
3638 goto done;
3640 drop:
3641 kfree_skb(skb);
3643 done:
3644 if (sk)
3645 bh_unlock_sock(sk);
3646 return 0;
3649 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3651 struct l2cap_hdr *lh = (void *) skb->data;
3652 u16 cid, len;
3653 __le16 psm;
3655 skb_pull(skb, L2CAP_HDR_SIZE);
3656 cid = __le16_to_cpu(lh->cid);
3657 len = __le16_to_cpu(lh->len);
3659 if (len != skb->len) {
3660 kfree_skb(skb);
3661 return;
3664 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3666 switch (cid) {
3667 case L2CAP_CID_LE_SIGNALING:
3668 case L2CAP_CID_SIGNALING:
3669 l2cap_sig_channel(conn, skb);
3670 break;
3672 case L2CAP_CID_CONN_LESS:
3673 psm = get_unaligned_le16(skb->data);
3674 skb_pull(skb, 2);
3675 l2cap_conless_channel(conn, psm, skb);
3676 break;
3678 default:
3679 l2cap_data_channel(conn, cid, skb);
3680 break;
3684 /* ---- L2CAP interface with lower layer (HCI) ---- */
3686 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3688 int exact = 0, lm1 = 0, lm2 = 0;
3689 register struct sock *sk;
3690 struct hlist_node *node;
3692 if (type != ACL_LINK)
3693 return -EINVAL;
3695 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3697 /* Find listening sockets and check their link_mode */
3698 read_lock(&l2cap_sk_list.lock);
3699 sk_for_each(sk, node, &l2cap_sk_list.head) {
3700 if (sk->sk_state != BT_LISTEN)
3701 continue;
3703 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3704 lm1 |= HCI_LM_ACCEPT;
3705 if (l2cap_pi(sk)->role_switch)
3706 lm1 |= HCI_LM_MASTER;
3707 exact++;
3708 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3709 lm2 |= HCI_LM_ACCEPT;
3710 if (l2cap_pi(sk)->role_switch)
3711 lm2 |= HCI_LM_MASTER;
3714 read_unlock(&l2cap_sk_list.lock);
3716 return exact ? lm1 : lm2;
3719 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3721 struct l2cap_conn *conn;
3723 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3725 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3726 return -EINVAL;
3728 if (!status) {
3729 conn = l2cap_conn_add(hcon, status);
3730 if (conn)
3731 l2cap_conn_ready(conn);
3732 } else
3733 l2cap_conn_del(hcon, bt_err(status));
3735 return 0;
3738 static int l2cap_disconn_ind(struct hci_conn *hcon)
3740 struct l2cap_conn *conn = hcon->l2cap_data;
3742 BT_DBG("hcon %p", hcon);
3744 if (hcon->type != ACL_LINK || !conn)
3745 return 0x13;
3747 return conn->disc_reason;
3750 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3752 BT_DBG("hcon %p reason %d", hcon, reason);
3754 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3755 return -EINVAL;
3757 l2cap_conn_del(hcon, bt_err(reason));
3759 return 0;
3762 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3764 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
3765 return;
3767 if (encrypt == 0x00) {
3768 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3769 l2cap_sock_clear_timer(sk);
3770 l2cap_sock_set_timer(sk, HZ * 5);
3771 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3772 __l2cap_sock_close(sk, ECONNREFUSED);
3773 } else {
3774 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3775 l2cap_sock_clear_timer(sk);
3779 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3781 struct l2cap_chan_list *l;
3782 struct l2cap_conn *conn = hcon->l2cap_data;
3783 struct sock *sk;
3785 if (!conn)
3786 return 0;
3788 l = &conn->chan_list;
3790 BT_DBG("conn %p", conn);
3792 read_lock(&l->lock);
3794 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3795 bh_lock_sock(sk);
3797 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3798 bh_unlock_sock(sk);
3799 continue;
3802 if (!status && (sk->sk_state == BT_CONNECTED ||
3803 sk->sk_state == BT_CONFIG)) {
3804 l2cap_check_encryption(sk, encrypt);
3805 bh_unlock_sock(sk);
3806 continue;
3809 if (sk->sk_state == BT_CONNECT) {
3810 if (!status) {
3811 struct l2cap_conn_req req;
3812 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3813 req.psm = l2cap_pi(sk)->psm;
3815 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
3816 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3818 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3819 L2CAP_CONN_REQ, sizeof(req), &req);
3820 } else {
3821 l2cap_sock_clear_timer(sk);
3822 l2cap_sock_set_timer(sk, HZ / 10);
3824 } else if (sk->sk_state == BT_CONNECT2) {
3825 struct l2cap_conn_rsp rsp;
3826 __u16 result;
3828 if (!status) {
3829 sk->sk_state = BT_CONFIG;
3830 result = L2CAP_CR_SUCCESS;
3831 } else {
3832 sk->sk_state = BT_DISCONN;
3833 l2cap_sock_set_timer(sk, HZ / 10);
3834 result = L2CAP_CR_SEC_BLOCK;
3837 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3838 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3839 rsp.result = cpu_to_le16(result);
3840 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3841 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3842 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3845 bh_unlock_sock(sk);
3848 read_unlock(&l->lock);
3850 return 0;
3853 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3855 struct l2cap_conn *conn = hcon->l2cap_data;
3857 if (!conn)
3858 conn = l2cap_conn_add(hcon, 0);
3860 if (!conn)
3861 goto drop;
3863 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3865 if (!(flags & ACL_CONT)) {
3866 struct l2cap_hdr *hdr;
3867 struct sock *sk;
3868 u16 cid;
3869 int len;
3871 if (conn->rx_len) {
3872 BT_ERR("Unexpected start frame (len %d)", skb->len);
3873 kfree_skb(conn->rx_skb);
3874 conn->rx_skb = NULL;
3875 conn->rx_len = 0;
3876 l2cap_conn_unreliable(conn, ECOMM);
3879 /* Start fragment always begin with Basic L2CAP header */
3880 if (skb->len < L2CAP_HDR_SIZE) {
3881 BT_ERR("Frame is too short (len %d)", skb->len);
3882 l2cap_conn_unreliable(conn, ECOMM);
3883 goto drop;
3886 hdr = (struct l2cap_hdr *) skb->data;
3887 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3888 cid = __le16_to_cpu(hdr->cid);
3890 if (len == skb->len) {
3891 /* Complete frame received */
3892 l2cap_recv_frame(conn, skb);
3893 return 0;
3896 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3898 if (skb->len > len) {
3899 BT_ERR("Frame is too long (len %d, expected len %d)",
3900 skb->len, len);
3901 l2cap_conn_unreliable(conn, ECOMM);
3902 goto drop;
3905 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3907 if (sk && l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) {
3908 BT_ERR("Frame exceeding recv MTU (len %d, MTU %d)",
3909 len, l2cap_pi(sk)->imtu);
3910 bh_unlock_sock(sk);
3911 l2cap_conn_unreliable(conn, ECOMM);
3912 goto drop;
3915 if (sk)
3916 bh_unlock_sock(sk);
3918 /* Allocate skb for the complete frame (with header) */
3919 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
3920 if (!conn->rx_skb)
3921 goto drop;
3923 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3924 skb->len);
3925 conn->rx_len = len - skb->len;
3926 } else {
3927 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
3929 if (!conn->rx_len) {
3930 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
3931 l2cap_conn_unreliable(conn, ECOMM);
3932 goto drop;
3935 if (skb->len > conn->rx_len) {
3936 BT_ERR("Fragment is too long (len %d, expected %d)",
3937 skb->len, conn->rx_len);
3938 kfree_skb(conn->rx_skb);
3939 conn->rx_skb = NULL;
3940 conn->rx_len = 0;
3941 l2cap_conn_unreliable(conn, ECOMM);
3942 goto drop;
3945 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3946 skb->len);
3947 conn->rx_len -= skb->len;
3949 if (!conn->rx_len) {
3950 /* Complete frame received */
3951 l2cap_recv_frame(conn, conn->rx_skb);
3952 conn->rx_skb = NULL;
3956 drop:
3957 kfree_skb(skb);
3958 return 0;
3961 static int l2cap_debugfs_show(struct seq_file *f, void *p)
3963 struct sock *sk;
3964 struct hlist_node *node;
3966 read_lock_bh(&l2cap_sk_list.lock);
3968 sk_for_each(sk, node, &l2cap_sk_list.head) {
3969 struct l2cap_pinfo *pi = l2cap_pi(sk);
3971 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
3972 batostr(&bt_sk(sk)->src),
3973 batostr(&bt_sk(sk)->dst),
3974 sk->sk_state, __le16_to_cpu(pi->psm),
3975 pi->scid, pi->dcid,
3976 pi->imtu, pi->omtu, pi->sec_level,
3977 pi->mode);
3980 read_unlock_bh(&l2cap_sk_list.lock);
3982 return 0;
3985 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
3987 return single_open(file, l2cap_debugfs_show, inode->i_private);
3990 static const struct file_operations l2cap_debugfs_fops = {
3991 .open = l2cap_debugfs_open,
3992 .read = seq_read,
3993 .llseek = seq_lseek,
3994 .release = single_release,
3997 static struct dentry *l2cap_debugfs;
3999 static struct hci_proto l2cap_hci_proto = {
4000 .name = "L2CAP",
4001 .id = HCI_PROTO_L2CAP,
4002 .connect_ind = l2cap_connect_ind,
4003 .connect_cfm = l2cap_connect_cfm,
4004 .disconn_ind = l2cap_disconn_ind,
4005 .disconn_cfm = l2cap_disconn_cfm,
4006 .security_cfm = l2cap_security_cfm,
4007 .recv_acldata = l2cap_recv_acldata
4010 int __init l2cap_init(void)
4012 int err;
4014 err = l2cap_init_sockets();
4015 if (err < 0)
4016 return err;
4018 _busy_wq = create_singlethread_workqueue("l2cap");
4019 if (!_busy_wq) {
4020 err = -ENOMEM;
4021 goto error;
4024 err = hci_register_proto(&l2cap_hci_proto);
4025 if (err < 0) {
4026 BT_ERR("L2CAP protocol registration failed");
4027 bt_sock_unregister(BTPROTO_L2CAP);
4028 goto error;
4031 if (bt_debugfs) {
4032 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4033 bt_debugfs, NULL, &l2cap_debugfs_fops);
4034 if (!l2cap_debugfs)
4035 BT_ERR("Failed to create L2CAP debug file");
4038 return 0;
4040 error:
4041 destroy_workqueue(_busy_wq);
4042 l2cap_cleanup_sockets();
4043 return err;
4046 void l2cap_exit(void)
4048 debugfs_remove(l2cap_debugfs);
4050 flush_workqueue(_busy_wq);
4051 destroy_workqueue(_busy_wq);
4053 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4054 BT_ERR("L2CAP protocol unregistration failed");
4056 l2cap_cleanup_sockets();
4059 module_param(disable_ertm, bool, 0644);
4060 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");