wl12xx: set the actual tid instead of the ac
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / bluetooth / l2cap_core.c
blobc3cebed205ccfec22759191bb359b103e914f3e5
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
58 int disable_ertm;
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
63 static struct workqueue_struct *_busy_wq;
65 struct bt_sock_list l2cap_sk_list = {
66 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
69 static void l2cap_busy_work(struct work_struct *work);
71 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 u8 code, u8 ident, u16 dlen, void *data);
74 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
76 /* ---- L2CAP channels ---- */
77 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
79 struct sock *s;
80 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
81 if (l2cap_pi(s)->dcid == cid)
82 break;
84 return s;
87 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
89 struct sock *s;
90 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
91 if (l2cap_pi(s)->scid == cid)
92 break;
94 return s;
97 /* Find channel with given SCID.
98 * Returns locked socket */
99 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
101 struct sock *s;
102 read_lock(&l->lock);
103 s = __l2cap_get_chan_by_scid(l, cid);
104 if (s)
105 bh_lock_sock(s);
106 read_unlock(&l->lock);
107 return s;
110 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
112 struct sock *s;
113 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
114 if (l2cap_pi(s)->ident == ident)
115 break;
117 return s;
120 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
122 struct sock *s;
123 read_lock(&l->lock);
124 s = __l2cap_get_chan_by_ident(l, ident);
125 if (s)
126 bh_lock_sock(s);
127 read_unlock(&l->lock);
128 return s;
131 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
133 u16 cid = L2CAP_CID_DYN_START;
135 for (; cid < L2CAP_CID_DYN_END; cid++) {
136 if (!__l2cap_get_chan_by_scid(l, cid))
137 return cid;
140 return 0;
143 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
145 sock_hold(sk);
147 if (l->head)
148 l2cap_pi(l->head)->prev_c = sk;
150 l2cap_pi(sk)->next_c = l->head;
151 l2cap_pi(sk)->prev_c = NULL;
152 l->head = sk;
155 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
157 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
159 write_lock_bh(&l->lock);
160 if (sk == l->head)
161 l->head = next;
163 if (next)
164 l2cap_pi(next)->prev_c = prev;
165 if (prev)
166 l2cap_pi(prev)->next_c = next;
167 write_unlock_bh(&l->lock);
169 __sock_put(sk);
172 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk)
174 struct l2cap_chan_list *l = &conn->chan_list;
176 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
177 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
179 conn->disc_reason = 0x13;
181 l2cap_pi(sk)->conn = conn;
183 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
184 if (conn->hcon->type == LE_LINK) {
185 /* LE connection */
186 l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU;
187 l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA;
188 l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA;
189 } else {
190 /* Alloc CID for connection-oriented socket */
191 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
192 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
194 } else if (sk->sk_type == SOCK_DGRAM) {
195 /* Connectionless socket */
196 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
197 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
198 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
199 } else {
200 /* Raw socket can send/recv signalling messages only */
201 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
202 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
203 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
206 __l2cap_chan_link(l, sk);
209 /* Delete channel.
210 * Must be called on the locked socket. */
211 void l2cap_chan_del(struct sock *sk, int err)
213 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
214 struct sock *parent = bt_sk(sk)->parent;
216 l2cap_sock_clear_timer(sk);
218 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
220 if (conn) {
221 /* Unlink from channel list */
222 l2cap_chan_unlink(&conn->chan_list, sk);
223 l2cap_pi(sk)->conn = NULL;
224 hci_conn_put(conn->hcon);
227 sk->sk_state = BT_CLOSED;
228 sock_set_flag(sk, SOCK_ZAPPED);
230 if (err)
231 sk->sk_err = err;
233 if (parent) {
234 bt_accept_unlink(sk);
235 parent->sk_data_ready(parent, 0);
236 } else
237 sk->sk_state_change(sk);
239 skb_queue_purge(TX_QUEUE(sk));
241 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
242 struct srej_list *l, *tmp;
244 del_timer(&l2cap_pi(sk)->retrans_timer);
245 del_timer(&l2cap_pi(sk)->monitor_timer);
246 del_timer(&l2cap_pi(sk)->ack_timer);
248 skb_queue_purge(SREJ_QUEUE(sk));
249 skb_queue_purge(BUSY_QUEUE(sk));
251 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
252 list_del(&l->list);
253 kfree(l);
258 static inline u8 l2cap_get_auth_type(struct sock *sk)
260 if (sk->sk_type == SOCK_RAW) {
261 switch (l2cap_pi(sk)->sec_level) {
262 case BT_SECURITY_HIGH:
263 return HCI_AT_DEDICATED_BONDING_MITM;
264 case BT_SECURITY_MEDIUM:
265 return HCI_AT_DEDICATED_BONDING;
266 default:
267 return HCI_AT_NO_BONDING;
269 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
270 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
271 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
273 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
274 return HCI_AT_NO_BONDING_MITM;
275 else
276 return HCI_AT_NO_BONDING;
277 } else {
278 switch (l2cap_pi(sk)->sec_level) {
279 case BT_SECURITY_HIGH:
280 return HCI_AT_GENERAL_BONDING_MITM;
281 case BT_SECURITY_MEDIUM:
282 return HCI_AT_GENERAL_BONDING;
283 default:
284 return HCI_AT_NO_BONDING;
289 /* Service level security */
290 static inline int l2cap_check_security(struct sock *sk)
292 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
293 __u8 auth_type;
295 auth_type = l2cap_get_auth_type(sk);
297 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
298 auth_type);
301 u8 l2cap_get_ident(struct l2cap_conn *conn)
303 u8 id;
305 /* Get next available identificator.
306 * 1 - 128 are used by kernel.
307 * 129 - 199 are reserved.
308 * 200 - 254 are used by utilities like l2ping, etc.
311 spin_lock_bh(&conn->lock);
313 if (++conn->tx_ident > 128)
314 conn->tx_ident = 1;
316 id = conn->tx_ident;
318 spin_unlock_bh(&conn->lock);
320 return id;
323 void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
325 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
326 u8 flags;
328 BT_DBG("code 0x%2.2x", code);
330 if (!skb)
331 return;
333 if (lmp_no_flush_capable(conn->hcon->hdev))
334 flags = ACL_START_NO_FLUSH;
335 else
336 flags = ACL_START;
338 hci_send_acl(conn->hcon, skb, flags);
341 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
343 struct sk_buff *skb;
344 struct l2cap_hdr *lh;
345 struct l2cap_conn *conn = pi->conn;
346 struct sock *sk = (struct sock *)pi;
347 int count, hlen = L2CAP_HDR_SIZE + 2;
348 u8 flags;
350 if (sk->sk_state != BT_CONNECTED)
351 return;
353 if (pi->fcs == L2CAP_FCS_CRC16)
354 hlen += 2;
356 BT_DBG("pi %p, control 0x%2.2x", pi, control);
358 count = min_t(unsigned int, conn->mtu, hlen);
359 control |= L2CAP_CTRL_FRAME_TYPE;
361 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
362 control |= L2CAP_CTRL_FINAL;
363 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
366 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
367 control |= L2CAP_CTRL_POLL;
368 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
371 skb = bt_skb_alloc(count, GFP_ATOMIC);
372 if (!skb)
373 return;
375 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
376 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
377 lh->cid = cpu_to_le16(pi->dcid);
378 put_unaligned_le16(control, skb_put(skb, 2));
380 if (pi->fcs == L2CAP_FCS_CRC16) {
381 u16 fcs = crc16(0, (u8 *)lh, count - 2);
382 put_unaligned_le16(fcs, skb_put(skb, 2));
385 if (lmp_no_flush_capable(conn->hcon->hdev))
386 flags = ACL_START_NO_FLUSH;
387 else
388 flags = ACL_START;
390 hci_send_acl(pi->conn->hcon, skb, flags);
393 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
395 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
396 control |= L2CAP_SUPER_RCV_NOT_READY;
397 pi->conn_state |= L2CAP_CONN_RNR_SENT;
398 } else
399 control |= L2CAP_SUPER_RCV_READY;
401 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
403 l2cap_send_sframe(pi, control);
406 static inline int __l2cap_no_conn_pending(struct sock *sk)
408 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
411 static void l2cap_do_start(struct sock *sk)
413 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
415 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
416 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
417 return;
419 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
420 struct l2cap_conn_req req;
421 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
422 req.psm = l2cap_pi(sk)->psm;
424 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
425 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
427 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
428 L2CAP_CONN_REQ, sizeof(req), &req);
430 } else {
431 struct l2cap_info_req req;
432 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
434 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
435 conn->info_ident = l2cap_get_ident(conn);
437 mod_timer(&conn->info_timer, jiffies +
438 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
440 l2cap_send_cmd(conn, conn->info_ident,
441 L2CAP_INFO_REQ, sizeof(req), &req);
445 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
447 u32 local_feat_mask = l2cap_feat_mask;
448 if (!disable_ertm)
449 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
451 switch (mode) {
452 case L2CAP_MODE_ERTM:
453 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
454 case L2CAP_MODE_STREAMING:
455 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
456 default:
457 return 0x00;
461 void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
463 struct l2cap_disconn_req req;
465 if (!conn)
466 return;
468 skb_queue_purge(TX_QUEUE(sk));
470 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
471 del_timer(&l2cap_pi(sk)->retrans_timer);
472 del_timer(&l2cap_pi(sk)->monitor_timer);
473 del_timer(&l2cap_pi(sk)->ack_timer);
476 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
477 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
478 l2cap_send_cmd(conn, l2cap_get_ident(conn),
479 L2CAP_DISCONN_REQ, sizeof(req), &req);
481 sk->sk_state = BT_DISCONN;
482 sk->sk_err = err;
485 /* ---- L2CAP connections ---- */
486 static void l2cap_conn_start(struct l2cap_conn *conn)
488 struct l2cap_chan_list *l = &conn->chan_list;
489 struct sock_del_list del, *tmp1, *tmp2;
490 struct sock *sk;
492 BT_DBG("conn %p", conn);
494 INIT_LIST_HEAD(&del.list);
496 read_lock(&l->lock);
498 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
499 bh_lock_sock(sk);
501 if (sk->sk_type != SOCK_SEQPACKET &&
502 sk->sk_type != SOCK_STREAM) {
503 bh_unlock_sock(sk);
504 continue;
507 if (sk->sk_state == BT_CONNECT) {
508 struct l2cap_conn_req req;
510 if (!l2cap_check_security(sk) ||
511 !__l2cap_no_conn_pending(sk)) {
512 bh_unlock_sock(sk);
513 continue;
516 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
517 conn->feat_mask)
518 && l2cap_pi(sk)->conf_state &
519 L2CAP_CONF_STATE2_DEVICE) {
520 tmp1 = kzalloc(sizeof(struct sock_del_list),
521 GFP_ATOMIC);
522 tmp1->sk = sk;
523 list_add_tail(&tmp1->list, &del.list);
524 bh_unlock_sock(sk);
525 continue;
528 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
529 req.psm = l2cap_pi(sk)->psm;
531 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
532 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
534 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
535 L2CAP_CONN_REQ, sizeof(req), &req);
537 } else if (sk->sk_state == BT_CONNECT2) {
538 struct l2cap_conn_rsp rsp;
539 char buf[128];
540 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
541 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
543 if (l2cap_check_security(sk)) {
544 if (bt_sk(sk)->defer_setup) {
545 struct sock *parent = bt_sk(sk)->parent;
546 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
547 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
548 parent->sk_data_ready(parent, 0);
550 } else {
551 sk->sk_state = BT_CONFIG;
552 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
553 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
555 } else {
556 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
557 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
560 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
561 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
563 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
564 rsp.result != L2CAP_CR_SUCCESS) {
565 bh_unlock_sock(sk);
566 continue;
569 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
570 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
571 l2cap_build_conf_req(sk, buf), buf);
572 l2cap_pi(sk)->num_conf_req++;
575 bh_unlock_sock(sk);
578 read_unlock(&l->lock);
580 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
581 bh_lock_sock(tmp1->sk);
582 __l2cap_sock_close(tmp1->sk, ECONNRESET);
583 bh_unlock_sock(tmp1->sk);
584 list_del(&tmp1->list);
585 kfree(tmp1);
589 /* Find socket with cid and source bdaddr.
590 * Returns closest match, locked.
592 static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
594 struct sock *s, *sk = NULL, *sk1 = NULL;
595 struct hlist_node *node;
597 read_lock(&l2cap_sk_list.lock);
599 sk_for_each(sk, node, &l2cap_sk_list.head) {
600 if (state && sk->sk_state != state)
601 continue;
603 if (l2cap_pi(sk)->scid == cid) {
604 /* Exact match. */
605 if (!bacmp(&bt_sk(sk)->src, src))
606 break;
608 /* Closest match */
609 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
610 sk1 = sk;
613 s = node ? sk : sk1;
614 if (s)
615 bh_lock_sock(s);
616 read_unlock(&l2cap_sk_list.lock);
618 return s;
621 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
623 struct l2cap_chan_list *list = &conn->chan_list;
624 struct sock *parent, *uninitialized_var(sk);
626 BT_DBG("");
628 /* Check if we have socket listening on cid */
629 parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
630 conn->src);
631 if (!parent)
632 return;
634 /* Check for backlog size */
635 if (sk_acceptq_is_full(parent)) {
636 BT_DBG("backlog full %d", parent->sk_ack_backlog);
637 goto clean;
640 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
641 if (!sk)
642 goto clean;
644 write_lock_bh(&list->lock);
646 hci_conn_hold(conn->hcon);
648 l2cap_sock_init(sk, parent);
649 bacpy(&bt_sk(sk)->src, conn->src);
650 bacpy(&bt_sk(sk)->dst, conn->dst);
652 bt_accept_enqueue(parent, sk);
654 __l2cap_chan_add(conn, sk);
656 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
658 sk->sk_state = BT_CONNECTED;
659 parent->sk_data_ready(parent, 0);
661 write_unlock_bh(&list->lock);
663 clean:
664 bh_unlock_sock(parent);
667 static void l2cap_conn_ready(struct l2cap_conn *conn)
669 struct l2cap_chan_list *l = &conn->chan_list;
670 struct sock *sk;
672 BT_DBG("conn %p", conn);
674 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
675 l2cap_le_conn_ready(conn);
677 read_lock(&l->lock);
679 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
680 bh_lock_sock(sk);
682 if (conn->hcon->type == LE_LINK) {
683 l2cap_sock_clear_timer(sk);
684 sk->sk_state = BT_CONNECTED;
685 sk->sk_state_change(sk);
688 if (sk->sk_type != SOCK_SEQPACKET &&
689 sk->sk_type != SOCK_STREAM) {
690 l2cap_sock_clear_timer(sk);
691 sk->sk_state = BT_CONNECTED;
692 sk->sk_state_change(sk);
693 } else if (sk->sk_state == BT_CONNECT)
694 l2cap_do_start(sk);
696 bh_unlock_sock(sk);
699 read_unlock(&l->lock);
702 /* Notify sockets that we cannot guaranty reliability anymore */
703 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
705 struct l2cap_chan_list *l = &conn->chan_list;
706 struct sock *sk;
708 BT_DBG("conn %p", conn);
710 read_lock(&l->lock);
712 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
713 if (l2cap_pi(sk)->force_reliable)
714 sk->sk_err = err;
717 read_unlock(&l->lock);
720 static void l2cap_info_timeout(unsigned long arg)
722 struct l2cap_conn *conn = (void *) arg;
724 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
725 conn->info_ident = 0;
727 l2cap_conn_start(conn);
730 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
732 struct l2cap_conn *conn = hcon->l2cap_data;
734 if (conn || status)
735 return conn;
737 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
738 if (!conn)
739 return NULL;
741 hcon->l2cap_data = conn;
742 conn->hcon = hcon;
744 BT_DBG("hcon %p conn %p", hcon, conn);
746 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
747 conn->mtu = hcon->hdev->le_mtu;
748 else
749 conn->mtu = hcon->hdev->acl_mtu;
751 conn->src = &hcon->hdev->bdaddr;
752 conn->dst = &hcon->dst;
754 conn->feat_mask = 0;
756 spin_lock_init(&conn->lock);
757 rwlock_init(&conn->chan_list.lock);
759 if (hcon->type != LE_LINK)
760 setup_timer(&conn->info_timer, l2cap_info_timeout,
761 (unsigned long) conn);
763 conn->disc_reason = 0x13;
765 return conn;
768 static void l2cap_conn_del(struct hci_conn *hcon, int err)
770 struct l2cap_conn *conn = hcon->l2cap_data;
771 struct sock *sk;
773 if (!conn)
774 return;
776 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
778 kfree_skb(conn->rx_skb);
780 /* Kill channels */
781 while ((sk = conn->chan_list.head)) {
782 bh_lock_sock(sk);
783 l2cap_chan_del(sk, err);
784 bh_unlock_sock(sk);
785 l2cap_sock_kill(sk);
788 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
789 del_timer_sync(&conn->info_timer);
791 hcon->l2cap_data = NULL;
792 kfree(conn);
795 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk)
797 struct l2cap_chan_list *l = &conn->chan_list;
798 write_lock_bh(&l->lock);
799 __l2cap_chan_add(conn, sk);
800 write_unlock_bh(&l->lock);
803 /* ---- Socket interface ---- */
805 /* Find socket with psm and source bdaddr.
806 * Returns closest match.
808 static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
810 struct sock *sk = NULL, *sk1 = NULL;
811 struct hlist_node *node;
813 read_lock(&l2cap_sk_list.lock);
815 sk_for_each(sk, node, &l2cap_sk_list.head) {
816 if (state && sk->sk_state != state)
817 continue;
819 if (l2cap_pi(sk)->psm == psm) {
820 /* Exact match. */
821 if (!bacmp(&bt_sk(sk)->src, src))
822 break;
824 /* Closest match */
825 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
826 sk1 = sk;
830 read_unlock(&l2cap_sk_list.lock);
832 return node ? sk : sk1;
835 int l2cap_do_connect(struct sock *sk)
837 bdaddr_t *src = &bt_sk(sk)->src;
838 bdaddr_t *dst = &bt_sk(sk)->dst;
839 struct l2cap_conn *conn;
840 struct hci_conn *hcon;
841 struct hci_dev *hdev;
842 __u8 auth_type;
843 int err;
845 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
846 l2cap_pi(sk)->psm);
848 hdev = hci_get_route(dst, src);
849 if (!hdev)
850 return -EHOSTUNREACH;
852 hci_dev_lock_bh(hdev);
854 auth_type = l2cap_get_auth_type(sk);
856 if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
857 hcon = hci_connect(hdev, LE_LINK, dst,
858 l2cap_pi(sk)->sec_level, auth_type);
859 else
860 hcon = hci_connect(hdev, ACL_LINK, dst,
861 l2cap_pi(sk)->sec_level, auth_type);
863 if (IS_ERR(hcon)) {
864 err = PTR_ERR(hcon);
865 goto done;
868 conn = l2cap_conn_add(hcon, 0);
869 if (!conn) {
870 hci_conn_put(hcon);
871 err = -ENOMEM;
872 goto done;
875 /* Update source addr of the socket */
876 bacpy(src, conn->src);
878 l2cap_chan_add(conn, sk);
880 sk->sk_state = BT_CONNECT;
881 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
883 if (hcon->state == BT_CONNECTED) {
884 if (sk->sk_type != SOCK_SEQPACKET &&
885 sk->sk_type != SOCK_STREAM) {
886 l2cap_sock_clear_timer(sk);
887 if (l2cap_check_security(sk))
888 sk->sk_state = BT_CONNECTED;
889 } else
890 l2cap_do_start(sk);
893 err = 0;
895 done:
896 hci_dev_unlock_bh(hdev);
897 hci_dev_put(hdev);
898 return err;
901 int __l2cap_wait_ack(struct sock *sk)
903 DECLARE_WAITQUEUE(wait, current);
904 int err = 0;
905 int timeo = HZ/5;
907 add_wait_queue(sk_sleep(sk), &wait);
908 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
909 set_current_state(TASK_INTERRUPTIBLE);
911 if (!timeo)
912 timeo = HZ/5;
914 if (signal_pending(current)) {
915 err = sock_intr_errno(timeo);
916 break;
919 release_sock(sk);
920 timeo = schedule_timeout(timeo);
921 lock_sock(sk);
923 err = sock_error(sk);
924 if (err)
925 break;
927 set_current_state(TASK_RUNNING);
928 remove_wait_queue(sk_sleep(sk), &wait);
929 return err;
932 static void l2cap_monitor_timeout(unsigned long arg)
934 struct sock *sk = (void *) arg;
936 BT_DBG("sk %p", sk);
938 bh_lock_sock(sk);
939 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
940 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
941 bh_unlock_sock(sk);
942 return;
945 l2cap_pi(sk)->retry_count++;
946 __mod_monitor_timer();
948 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
949 bh_unlock_sock(sk);
952 static void l2cap_retrans_timeout(unsigned long arg)
954 struct sock *sk = (void *) arg;
956 BT_DBG("sk %p", sk);
958 bh_lock_sock(sk);
959 l2cap_pi(sk)->retry_count = 1;
960 __mod_monitor_timer();
962 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
964 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
965 bh_unlock_sock(sk);
968 static void l2cap_drop_acked_frames(struct sock *sk)
970 struct sk_buff *skb;
972 while ((skb = skb_peek(TX_QUEUE(sk))) &&
973 l2cap_pi(sk)->unacked_frames) {
974 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
975 break;
977 skb = skb_dequeue(TX_QUEUE(sk));
978 kfree_skb(skb);
980 l2cap_pi(sk)->unacked_frames--;
983 if (!l2cap_pi(sk)->unacked_frames)
984 del_timer(&l2cap_pi(sk)->retrans_timer);
987 void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
989 struct l2cap_pinfo *pi = l2cap_pi(sk);
990 struct hci_conn *hcon = pi->conn->hcon;
991 u16 flags;
993 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
995 if (!pi->flushable && lmp_no_flush_capable(hcon->hdev))
996 flags = ACL_START_NO_FLUSH;
997 else
998 flags = ACL_START;
1000 hci_send_acl(hcon, skb, flags);
1003 void l2cap_streaming_send(struct sock *sk)
1005 struct sk_buff *skb;
1006 struct l2cap_pinfo *pi = l2cap_pi(sk);
1007 u16 control, fcs;
1009 while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
1010 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1011 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1012 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1014 if (pi->fcs == L2CAP_FCS_CRC16) {
1015 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1016 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1019 l2cap_do_send(sk, skb);
1021 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1025 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1027 struct l2cap_pinfo *pi = l2cap_pi(sk);
1028 struct sk_buff *skb, *tx_skb;
1029 u16 control, fcs;
1031 skb = skb_peek(TX_QUEUE(sk));
1032 if (!skb)
1033 return;
1035 do {
1036 if (bt_cb(skb)->tx_seq == tx_seq)
1037 break;
1039 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1040 return;
1042 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1044 if (pi->remote_max_tx &&
1045 bt_cb(skb)->retries == pi->remote_max_tx) {
1046 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1047 return;
1050 tx_skb = skb_clone(skb, GFP_ATOMIC);
1051 bt_cb(skb)->retries++;
1052 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1054 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1055 control |= L2CAP_CTRL_FINAL;
1056 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1059 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1060 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1062 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1064 if (pi->fcs == L2CAP_FCS_CRC16) {
1065 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1066 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1069 l2cap_do_send(sk, tx_skb);
1072 int l2cap_ertm_send(struct sock *sk)
1074 struct sk_buff *skb, *tx_skb;
1075 struct l2cap_pinfo *pi = l2cap_pi(sk);
1076 u16 control, fcs;
1077 int nsent = 0;
1079 if (sk->sk_state != BT_CONNECTED)
1080 return -ENOTCONN;
1082 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1084 if (pi->remote_max_tx &&
1085 bt_cb(skb)->retries == pi->remote_max_tx) {
1086 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1087 break;
1090 tx_skb = skb_clone(skb, GFP_ATOMIC);
1092 bt_cb(skb)->retries++;
1094 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1095 control &= L2CAP_CTRL_SAR;
1097 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1098 control |= L2CAP_CTRL_FINAL;
1099 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1101 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1102 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1103 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1106 if (pi->fcs == L2CAP_FCS_CRC16) {
1107 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1108 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1111 l2cap_do_send(sk, tx_skb);
1113 __mod_retrans_timer();
1115 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1116 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1118 if (bt_cb(skb)->retries == 1)
1119 pi->unacked_frames++;
1121 pi->frames_sent++;
1123 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1124 sk->sk_send_head = NULL;
1125 else
1126 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1128 nsent++;
1131 return nsent;
1134 static int l2cap_retransmit_frames(struct sock *sk)
1136 struct l2cap_pinfo *pi = l2cap_pi(sk);
1137 int ret;
1139 if (!skb_queue_empty(TX_QUEUE(sk)))
1140 sk->sk_send_head = TX_QUEUE(sk)->next;
1142 pi->next_tx_seq = pi->expected_ack_seq;
1143 ret = l2cap_ertm_send(sk);
1144 return ret;
1147 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1149 struct sock *sk = (struct sock *)pi;
1150 u16 control = 0;
1152 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1154 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1155 control |= L2CAP_SUPER_RCV_NOT_READY;
1156 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1157 l2cap_send_sframe(pi, control);
1158 return;
1161 if (l2cap_ertm_send(sk) > 0)
1162 return;
1164 control |= L2CAP_SUPER_RCV_READY;
1165 l2cap_send_sframe(pi, control);
1168 static void l2cap_send_srejtail(struct sock *sk)
1170 struct srej_list *tail;
1171 u16 control;
1173 control = L2CAP_SUPER_SELECT_REJECT;
1174 control |= L2CAP_CTRL_FINAL;
1176 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1177 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1179 l2cap_send_sframe(l2cap_pi(sk), control);
1182 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1184 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1185 struct sk_buff **frag;
1186 int err, sent = 0;
1188 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1189 return -EFAULT;
1191 sent += count;
1192 len -= count;
1194 /* Continuation fragments (no L2CAP header) */
1195 frag = &skb_shinfo(skb)->frag_list;
1196 while (len) {
1197 count = min_t(unsigned int, conn->mtu, len);
1199 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1200 if (!*frag)
1201 return err;
1202 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1203 return -EFAULT;
1205 sent += count;
1206 len -= count;
1208 frag = &(*frag)->next;
1211 return sent;
1214 struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1216 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1217 struct sk_buff *skb;
1218 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1219 struct l2cap_hdr *lh;
1221 BT_DBG("sk %p len %d", sk, (int)len);
1223 count = min_t(unsigned int, (conn->mtu - hlen), len);
1224 skb = bt_skb_send_alloc(sk, count + hlen,
1225 msg->msg_flags & MSG_DONTWAIT, &err);
1226 if (!skb)
1227 return ERR_PTR(err);
1229 /* Create L2CAP header */
1230 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1231 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1232 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1233 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1235 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1236 if (unlikely(err < 0)) {
1237 kfree_skb(skb);
1238 return ERR_PTR(err);
1240 return skb;
1243 struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1245 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1246 struct sk_buff *skb;
1247 int err, count, hlen = L2CAP_HDR_SIZE;
1248 struct l2cap_hdr *lh;
1250 BT_DBG("sk %p len %d", sk, (int)len);
1252 count = min_t(unsigned int, (conn->mtu - hlen), len);
1253 skb = bt_skb_send_alloc(sk, count + hlen,
1254 msg->msg_flags & MSG_DONTWAIT, &err);
1255 if (!skb)
1256 return ERR_PTR(err);
1258 /* Create L2CAP header */
1259 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1260 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1261 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1263 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1264 if (unlikely(err < 0)) {
1265 kfree_skb(skb);
1266 return ERR_PTR(err);
1268 return skb;
1271 struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1273 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1274 struct sk_buff *skb;
1275 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1276 struct l2cap_hdr *lh;
1278 BT_DBG("sk %p len %d", sk, (int)len);
1280 if (!conn)
1281 return ERR_PTR(-ENOTCONN);
1283 if (sdulen)
1284 hlen += 2;
1286 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1287 hlen += 2;
1289 count = min_t(unsigned int, (conn->mtu - hlen), len);
1290 skb = bt_skb_send_alloc(sk, count + hlen,
1291 msg->msg_flags & MSG_DONTWAIT, &err);
1292 if (!skb)
1293 return ERR_PTR(err);
1295 /* Create L2CAP header */
1296 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1297 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1298 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1299 put_unaligned_le16(control, skb_put(skb, 2));
1300 if (sdulen)
1301 put_unaligned_le16(sdulen, skb_put(skb, 2));
1303 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1304 if (unlikely(err < 0)) {
1305 kfree_skb(skb);
1306 return ERR_PTR(err);
1309 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1310 put_unaligned_le16(0, skb_put(skb, 2));
1312 bt_cb(skb)->retries = 0;
1313 return skb;
1316 int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1318 struct l2cap_pinfo *pi = l2cap_pi(sk);
1319 struct sk_buff *skb;
1320 struct sk_buff_head sar_queue;
1321 u16 control;
1322 size_t size = 0;
1324 skb_queue_head_init(&sar_queue);
1325 control = L2CAP_SDU_START;
1326 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1327 if (IS_ERR(skb))
1328 return PTR_ERR(skb);
1330 __skb_queue_tail(&sar_queue, skb);
1331 len -= pi->remote_mps;
1332 size += pi->remote_mps;
1334 while (len > 0) {
1335 size_t buflen;
1337 if (len > pi->remote_mps) {
1338 control = L2CAP_SDU_CONTINUE;
1339 buflen = pi->remote_mps;
1340 } else {
1341 control = L2CAP_SDU_END;
1342 buflen = len;
1345 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1346 if (IS_ERR(skb)) {
1347 skb_queue_purge(&sar_queue);
1348 return PTR_ERR(skb);
1351 __skb_queue_tail(&sar_queue, skb);
1352 len -= buflen;
1353 size += buflen;
1355 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1356 if (sk->sk_send_head == NULL)
1357 sk->sk_send_head = sar_queue.next;
1359 return size;
1362 static void l2cap_chan_ready(struct sock *sk)
1364 struct sock *parent = bt_sk(sk)->parent;
1366 BT_DBG("sk %p, parent %p", sk, parent);
1368 l2cap_pi(sk)->conf_state = 0;
1369 l2cap_sock_clear_timer(sk);
1371 if (!parent) {
1372 /* Outgoing channel.
1373 * Wake up socket sleeping on connect.
1375 sk->sk_state = BT_CONNECTED;
1376 sk->sk_state_change(sk);
1377 } else {
1378 /* Incoming channel.
1379 * Wake up socket sleeping on accept.
1381 parent->sk_data_ready(parent, 0);
1385 /* Copy frame to all raw sockets on that connection */
1386 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1388 struct l2cap_chan_list *l = &conn->chan_list;
1389 struct sk_buff *nskb;
1390 struct sock *sk;
1392 BT_DBG("conn %p", conn);
1394 read_lock(&l->lock);
1395 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1396 if (sk->sk_type != SOCK_RAW)
1397 continue;
1399 /* Don't send frame to the socket it came from */
1400 if (skb->sk == sk)
1401 continue;
1402 nskb = skb_clone(skb, GFP_ATOMIC);
1403 if (!nskb)
1404 continue;
1406 if (sock_queue_rcv_skb(sk, nskb))
1407 kfree_skb(nskb);
1409 read_unlock(&l->lock);
1412 /* ---- L2CAP signalling commands ---- */
1413 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1414 u8 code, u8 ident, u16 dlen, void *data)
1416 struct sk_buff *skb, **frag;
1417 struct l2cap_cmd_hdr *cmd;
1418 struct l2cap_hdr *lh;
1419 int len, count;
1421 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1422 conn, code, ident, dlen);
1424 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1425 count = min_t(unsigned int, conn->mtu, len);
1427 skb = bt_skb_alloc(count, GFP_ATOMIC);
1428 if (!skb)
1429 return NULL;
1431 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1432 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1434 if (conn->hcon->type == LE_LINK)
1435 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1436 else
1437 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1439 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1440 cmd->code = code;
1441 cmd->ident = ident;
1442 cmd->len = cpu_to_le16(dlen);
1444 if (dlen) {
1445 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1446 memcpy(skb_put(skb, count), data, count);
1447 data += count;
1450 len -= skb->len;
1452 /* Continuation fragments (no L2CAP header) */
1453 frag = &skb_shinfo(skb)->frag_list;
1454 while (len) {
1455 count = min_t(unsigned int, conn->mtu, len);
1457 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1458 if (!*frag)
1459 goto fail;
1461 memcpy(skb_put(*frag, count), data, count);
1463 len -= count;
1464 data += count;
1466 frag = &(*frag)->next;
1469 return skb;
1471 fail:
1472 kfree_skb(skb);
1473 return NULL;
1476 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1478 struct l2cap_conf_opt *opt = *ptr;
1479 int len;
1481 len = L2CAP_CONF_OPT_SIZE + opt->len;
1482 *ptr += len;
1484 *type = opt->type;
1485 *olen = opt->len;
1487 switch (opt->len) {
1488 case 1:
1489 *val = *((u8 *) opt->val);
1490 break;
1492 case 2:
1493 *val = get_unaligned_le16(opt->val);
1494 break;
1496 case 4:
1497 *val = get_unaligned_le32(opt->val);
1498 break;
1500 default:
1501 *val = (unsigned long) opt->val;
1502 break;
1505 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1506 return len;
1509 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1511 struct l2cap_conf_opt *opt = *ptr;
1513 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1515 opt->type = type;
1516 opt->len = len;
1518 switch (len) {
1519 case 1:
1520 *((u8 *) opt->val) = val;
1521 break;
1523 case 2:
1524 put_unaligned_le16(val, opt->val);
1525 break;
1527 case 4:
1528 put_unaligned_le32(val, opt->val);
1529 break;
1531 default:
1532 memcpy(opt->val, (void *) val, len);
1533 break;
1536 *ptr += L2CAP_CONF_OPT_SIZE + len;
1539 static void l2cap_ack_timeout(unsigned long arg)
1541 struct sock *sk = (void *) arg;
1543 bh_lock_sock(sk);
1544 l2cap_send_ack(l2cap_pi(sk));
1545 bh_unlock_sock(sk);
1548 static inline void l2cap_ertm_init(struct sock *sk)
1550 l2cap_pi(sk)->expected_ack_seq = 0;
1551 l2cap_pi(sk)->unacked_frames = 0;
1552 l2cap_pi(sk)->buffer_seq = 0;
1553 l2cap_pi(sk)->num_acked = 0;
1554 l2cap_pi(sk)->frames_sent = 0;
1556 setup_timer(&l2cap_pi(sk)->retrans_timer,
1557 l2cap_retrans_timeout, (unsigned long) sk);
1558 setup_timer(&l2cap_pi(sk)->monitor_timer,
1559 l2cap_monitor_timeout, (unsigned long) sk);
1560 setup_timer(&l2cap_pi(sk)->ack_timer,
1561 l2cap_ack_timeout, (unsigned long) sk);
1563 __skb_queue_head_init(SREJ_QUEUE(sk));
1564 __skb_queue_head_init(BUSY_QUEUE(sk));
1566 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
1568 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1571 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1573 switch (mode) {
1574 case L2CAP_MODE_STREAMING:
1575 case L2CAP_MODE_ERTM:
1576 if (l2cap_mode_supported(mode, remote_feat_mask))
1577 return mode;
1578 /* fall through */
1579 default:
1580 return L2CAP_MODE_BASIC;
1584 int l2cap_build_conf_req(struct sock *sk, void *data)
1586 struct l2cap_pinfo *pi = l2cap_pi(sk);
1587 struct l2cap_conf_req *req = data;
1588 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
1589 void *ptr = req->data;
1591 BT_DBG("sk %p", sk);
1593 if (pi->num_conf_req || pi->num_conf_rsp)
1594 goto done;
1596 switch (pi->mode) {
1597 case L2CAP_MODE_STREAMING:
1598 case L2CAP_MODE_ERTM:
1599 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
1600 break;
1602 /* fall through */
1603 default:
1604 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
1605 break;
1608 done:
1609 if (pi->imtu != L2CAP_DEFAULT_MTU)
1610 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1612 switch (pi->mode) {
1613 case L2CAP_MODE_BASIC:
1614 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1615 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
1616 break;
1618 rfc.mode = L2CAP_MODE_BASIC;
1619 rfc.txwin_size = 0;
1620 rfc.max_transmit = 0;
1621 rfc.retrans_timeout = 0;
1622 rfc.monitor_timeout = 0;
1623 rfc.max_pdu_size = 0;
1625 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1626 (unsigned long) &rfc);
1627 break;
1629 case L2CAP_MODE_ERTM:
1630 rfc.mode = L2CAP_MODE_ERTM;
1631 rfc.txwin_size = pi->tx_win;
1632 rfc.max_transmit = pi->max_tx;
1633 rfc.retrans_timeout = 0;
1634 rfc.monitor_timeout = 0;
1635 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1636 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1637 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1639 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1640 (unsigned long) &rfc);
1642 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1643 break;
1645 if (pi->fcs == L2CAP_FCS_NONE ||
1646 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1647 pi->fcs = L2CAP_FCS_NONE;
1648 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1650 break;
1652 case L2CAP_MODE_STREAMING:
1653 rfc.mode = L2CAP_MODE_STREAMING;
1654 rfc.txwin_size = 0;
1655 rfc.max_transmit = 0;
1656 rfc.retrans_timeout = 0;
1657 rfc.monitor_timeout = 0;
1658 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1659 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1660 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1662 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1663 (unsigned long) &rfc);
1665 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1666 break;
1668 if (pi->fcs == L2CAP_FCS_NONE ||
1669 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1670 pi->fcs = L2CAP_FCS_NONE;
1671 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1673 break;
1676 req->dcid = cpu_to_le16(pi->dcid);
1677 req->flags = cpu_to_le16(0);
1679 return ptr - data;
1682 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1684 struct l2cap_pinfo *pi = l2cap_pi(sk);
1685 struct l2cap_conf_rsp *rsp = data;
1686 void *ptr = rsp->data;
1687 void *req = pi->conf_req;
1688 int len = pi->conf_len;
1689 int type, hint, olen;
1690 unsigned long val;
1691 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1692 u16 mtu = L2CAP_DEFAULT_MTU;
1693 u16 result = L2CAP_CONF_SUCCESS;
1695 BT_DBG("sk %p", sk);
1697 while (len >= L2CAP_CONF_OPT_SIZE) {
1698 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1700 hint = type & L2CAP_CONF_HINT;
1701 type &= L2CAP_CONF_MASK;
1703 switch (type) {
1704 case L2CAP_CONF_MTU:
1705 mtu = val;
1706 break;
1708 case L2CAP_CONF_FLUSH_TO:
1709 pi->flush_to = val;
1710 break;
1712 case L2CAP_CONF_QOS:
1713 break;
1715 case L2CAP_CONF_RFC:
1716 if (olen == sizeof(rfc))
1717 memcpy(&rfc, (void *) val, olen);
1718 break;
1720 case L2CAP_CONF_FCS:
1721 if (val == L2CAP_FCS_NONE)
1722 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
1724 break;
1726 default:
1727 if (hint)
1728 break;
1730 result = L2CAP_CONF_UNKNOWN;
1731 *((u8 *) ptr++) = type;
1732 break;
1736 if (pi->num_conf_rsp || pi->num_conf_req > 1)
1737 goto done;
1739 switch (pi->mode) {
1740 case L2CAP_MODE_STREAMING:
1741 case L2CAP_MODE_ERTM:
1742 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
1743 pi->mode = l2cap_select_mode(rfc.mode,
1744 pi->conn->feat_mask);
1745 break;
1748 if (pi->mode != rfc.mode)
1749 return -ECONNREFUSED;
1751 break;
1754 done:
1755 if (pi->mode != rfc.mode) {
1756 result = L2CAP_CONF_UNACCEPT;
1757 rfc.mode = pi->mode;
1759 if (pi->num_conf_rsp == 1)
1760 return -ECONNREFUSED;
1762 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1763 sizeof(rfc), (unsigned long) &rfc);
1767 if (result == L2CAP_CONF_SUCCESS) {
1768 /* Configure output options and let the other side know
1769 * which ones we don't like. */
1771 if (mtu < L2CAP_DEFAULT_MIN_MTU)
1772 result = L2CAP_CONF_UNACCEPT;
1773 else {
1774 pi->omtu = mtu;
1775 pi->conf_state |= L2CAP_CONF_MTU_DONE;
1777 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1779 switch (rfc.mode) {
1780 case L2CAP_MODE_BASIC:
1781 pi->fcs = L2CAP_FCS_NONE;
1782 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1783 break;
1785 case L2CAP_MODE_ERTM:
1786 pi->remote_tx_win = rfc.txwin_size;
1787 pi->remote_max_tx = rfc.max_transmit;
1789 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1790 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1792 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1794 rfc.retrans_timeout =
1795 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
1796 rfc.monitor_timeout =
1797 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
1799 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1801 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1802 sizeof(rfc), (unsigned long) &rfc);
1804 break;
1806 case L2CAP_MODE_STREAMING:
1807 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1808 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1810 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1812 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1814 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1815 sizeof(rfc), (unsigned long) &rfc);
1817 break;
1819 default:
1820 result = L2CAP_CONF_UNACCEPT;
1822 memset(&rfc, 0, sizeof(rfc));
1823 rfc.mode = pi->mode;
1826 if (result == L2CAP_CONF_SUCCESS)
1827 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1829 rsp->scid = cpu_to_le16(pi->dcid);
1830 rsp->result = cpu_to_le16(result);
1831 rsp->flags = cpu_to_le16(0x0000);
1833 return ptr - data;
1836 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
1838 struct l2cap_pinfo *pi = l2cap_pi(sk);
1839 struct l2cap_conf_req *req = data;
1840 void *ptr = req->data;
1841 int type, olen;
1842 unsigned long val;
1843 struct l2cap_conf_rfc rfc;
1845 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
1847 while (len >= L2CAP_CONF_OPT_SIZE) {
1848 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1850 switch (type) {
1851 case L2CAP_CONF_MTU:
1852 if (val < L2CAP_DEFAULT_MIN_MTU) {
1853 *result = L2CAP_CONF_UNACCEPT;
1854 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
1855 } else
1856 pi->imtu = val;
1857 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1858 break;
1860 case L2CAP_CONF_FLUSH_TO:
1861 pi->flush_to = val;
1862 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
1863 2, pi->flush_to);
1864 break;
1866 case L2CAP_CONF_RFC:
1867 if (olen == sizeof(rfc))
1868 memcpy(&rfc, (void *)val, olen);
1870 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
1871 rfc.mode != pi->mode)
1872 return -ECONNREFUSED;
1874 pi->fcs = 0;
1876 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1877 sizeof(rfc), (unsigned long) &rfc);
1878 break;
1882 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
1883 return -ECONNREFUSED;
1885 pi->mode = rfc.mode;
1887 if (*result == L2CAP_CONF_SUCCESS) {
1888 switch (rfc.mode) {
1889 case L2CAP_MODE_ERTM:
1890 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1891 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1892 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1893 break;
1894 case L2CAP_MODE_STREAMING:
1895 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1899 req->dcid = cpu_to_le16(pi->dcid);
1900 req->flags = cpu_to_le16(0x0000);
1902 return ptr - data;
1905 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1907 struct l2cap_conf_rsp *rsp = data;
1908 void *ptr = rsp->data;
1910 BT_DBG("sk %p", sk);
1912 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1913 rsp->result = cpu_to_le16(result);
1914 rsp->flags = cpu_to_le16(flags);
1916 return ptr - data;
1919 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
1921 struct l2cap_pinfo *pi = l2cap_pi(sk);
1922 int type, olen;
1923 unsigned long val;
1924 struct l2cap_conf_rfc rfc;
1926 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
1928 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
1929 return;
1931 while (len >= L2CAP_CONF_OPT_SIZE) {
1932 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1934 switch (type) {
1935 case L2CAP_CONF_RFC:
1936 if (olen == sizeof(rfc))
1937 memcpy(&rfc, (void *)val, olen);
1938 goto done;
1942 done:
1943 switch (rfc.mode) {
1944 case L2CAP_MODE_ERTM:
1945 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1946 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1947 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1948 break;
1949 case L2CAP_MODE_STREAMING:
1950 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1954 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1956 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1958 if (rej->reason != 0x0000)
1959 return 0;
1961 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1962 cmd->ident == conn->info_ident) {
1963 del_timer(&conn->info_timer);
1965 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1966 conn->info_ident = 0;
1968 l2cap_conn_start(conn);
1971 return 0;
1974 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1976 struct l2cap_chan_list *list = &conn->chan_list;
1977 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1978 struct l2cap_conn_rsp rsp;
1979 struct sock *parent, *sk = NULL;
1980 int result, status = L2CAP_CS_NO_INFO;
1982 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1983 __le16 psm = req->psm;
1985 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1987 /* Check if we have socket listening on psm */
1988 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1989 if (!parent) {
1990 result = L2CAP_CR_BAD_PSM;
1991 goto sendresp;
1994 bh_lock_sock(parent);
1996 /* Check if the ACL is secure enough (if not SDP) */
1997 if (psm != cpu_to_le16(0x0001) &&
1998 !hci_conn_check_link_mode(conn->hcon)) {
1999 conn->disc_reason = 0x05;
2000 result = L2CAP_CR_SEC_BLOCK;
2001 goto response;
2004 result = L2CAP_CR_NO_MEM;
2006 /* Check for backlog size */
2007 if (sk_acceptq_is_full(parent)) {
2008 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2009 goto response;
2012 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2013 if (!sk)
2014 goto response;
2016 write_lock_bh(&list->lock);
2018 /* Check if we already have channel with that dcid */
2019 if (__l2cap_get_chan_by_dcid(list, scid)) {
2020 write_unlock_bh(&list->lock);
2021 sock_set_flag(sk, SOCK_ZAPPED);
2022 l2cap_sock_kill(sk);
2023 goto response;
2026 hci_conn_hold(conn->hcon);
2028 l2cap_sock_init(sk, parent);
2029 bacpy(&bt_sk(sk)->src, conn->src);
2030 bacpy(&bt_sk(sk)->dst, conn->dst);
2031 l2cap_pi(sk)->psm = psm;
2032 l2cap_pi(sk)->dcid = scid;
2034 bt_accept_enqueue(parent, sk);
2036 __l2cap_chan_add(conn, sk);
2037 dcid = l2cap_pi(sk)->scid;
2039 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2041 l2cap_pi(sk)->ident = cmd->ident;
2043 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2044 if (l2cap_check_security(sk)) {
2045 if (bt_sk(sk)->defer_setup) {
2046 sk->sk_state = BT_CONNECT2;
2047 result = L2CAP_CR_PEND;
2048 status = L2CAP_CS_AUTHOR_PEND;
2049 parent->sk_data_ready(parent, 0);
2050 } else {
2051 sk->sk_state = BT_CONFIG;
2052 result = L2CAP_CR_SUCCESS;
2053 status = L2CAP_CS_NO_INFO;
2055 } else {
2056 sk->sk_state = BT_CONNECT2;
2057 result = L2CAP_CR_PEND;
2058 status = L2CAP_CS_AUTHEN_PEND;
2060 } else {
2061 sk->sk_state = BT_CONNECT2;
2062 result = L2CAP_CR_PEND;
2063 status = L2CAP_CS_NO_INFO;
2066 write_unlock_bh(&list->lock);
2068 response:
2069 bh_unlock_sock(parent);
2071 sendresp:
2072 rsp.scid = cpu_to_le16(scid);
2073 rsp.dcid = cpu_to_le16(dcid);
2074 rsp.result = cpu_to_le16(result);
2075 rsp.status = cpu_to_le16(status);
2076 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2078 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2079 struct l2cap_info_req info;
2080 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2082 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2083 conn->info_ident = l2cap_get_ident(conn);
2085 mod_timer(&conn->info_timer, jiffies +
2086 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2088 l2cap_send_cmd(conn, conn->info_ident,
2089 L2CAP_INFO_REQ, sizeof(info), &info);
2092 if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
2093 result == L2CAP_CR_SUCCESS) {
2094 u8 buf[128];
2095 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2096 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2097 l2cap_build_conf_req(sk, buf), buf);
2098 l2cap_pi(sk)->num_conf_req++;
2101 return 0;
2104 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2106 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2107 u16 scid, dcid, result, status;
2108 struct sock *sk;
2109 u8 req[128];
2111 scid = __le16_to_cpu(rsp->scid);
2112 dcid = __le16_to_cpu(rsp->dcid);
2113 result = __le16_to_cpu(rsp->result);
2114 status = __le16_to_cpu(rsp->status);
2116 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2118 if (scid) {
2119 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2120 if (!sk)
2121 return -EFAULT;
2122 } else {
2123 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2124 if (!sk)
2125 return -EFAULT;
2128 switch (result) {
2129 case L2CAP_CR_SUCCESS:
2130 sk->sk_state = BT_CONFIG;
2131 l2cap_pi(sk)->ident = 0;
2132 l2cap_pi(sk)->dcid = dcid;
2133 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2135 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
2136 break;
2138 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2140 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2141 l2cap_build_conf_req(sk, req), req);
2142 l2cap_pi(sk)->num_conf_req++;
2143 break;
2145 case L2CAP_CR_PEND:
2146 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2147 break;
2149 default:
2150 /* don't delete l2cap channel if sk is owned by user */
2151 if (sock_owned_by_user(sk)) {
2152 sk->sk_state = BT_DISCONN;
2153 l2cap_sock_clear_timer(sk);
2154 l2cap_sock_set_timer(sk, HZ / 5);
2155 break;
2158 l2cap_chan_del(sk, ECONNREFUSED);
2159 break;
2162 bh_unlock_sock(sk);
2163 return 0;
2166 static inline void set_default_fcs(struct l2cap_pinfo *pi)
2168 /* FCS is enabled only in ERTM or streaming mode, if one or both
2169 * sides request it.
2171 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
2172 pi->fcs = L2CAP_FCS_NONE;
2173 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
2174 pi->fcs = L2CAP_FCS_CRC16;
2177 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2179 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2180 u16 dcid, flags;
2181 u8 rsp[64];
2182 struct sock *sk;
2183 int len;
2185 dcid = __le16_to_cpu(req->dcid);
2186 flags = __le16_to_cpu(req->flags);
2188 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2190 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2191 if (!sk)
2192 return -ENOENT;
2194 if (sk->sk_state != BT_CONFIG) {
2195 struct l2cap_cmd_rej rej;
2197 rej.reason = cpu_to_le16(0x0002);
2198 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2199 sizeof(rej), &rej);
2200 goto unlock;
2203 /* Reject if config buffer is too small. */
2204 len = cmd_len - sizeof(*req);
2205 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2206 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2207 l2cap_build_conf_rsp(sk, rsp,
2208 L2CAP_CONF_REJECT, flags), rsp);
2209 goto unlock;
2212 /* Store config. */
2213 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2214 l2cap_pi(sk)->conf_len += len;
2216 if (flags & 0x0001) {
2217 /* Incomplete config. Send empty response. */
2218 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2219 l2cap_build_conf_rsp(sk, rsp,
2220 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2221 goto unlock;
2224 /* Complete config. */
2225 len = l2cap_parse_conf_req(sk, rsp);
2226 if (len < 0) {
2227 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2228 goto unlock;
2231 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2232 l2cap_pi(sk)->num_conf_rsp++;
2234 /* Reset config buffer. */
2235 l2cap_pi(sk)->conf_len = 0;
2237 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2238 goto unlock;
2240 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2241 set_default_fcs(l2cap_pi(sk));
2243 sk->sk_state = BT_CONNECTED;
2245 l2cap_pi(sk)->next_tx_seq = 0;
2246 l2cap_pi(sk)->expected_tx_seq = 0;
2247 __skb_queue_head_init(TX_QUEUE(sk));
2248 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2249 l2cap_ertm_init(sk);
2251 l2cap_chan_ready(sk);
2252 goto unlock;
2255 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2256 u8 buf[64];
2257 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2258 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2259 l2cap_build_conf_req(sk, buf), buf);
2260 l2cap_pi(sk)->num_conf_req++;
2263 unlock:
2264 bh_unlock_sock(sk);
2265 return 0;
2268 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2270 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2271 u16 scid, flags, result;
2272 struct sock *sk;
2273 int len = cmd->len - sizeof(*rsp);
2275 scid = __le16_to_cpu(rsp->scid);
2276 flags = __le16_to_cpu(rsp->flags);
2277 result = __le16_to_cpu(rsp->result);
2279 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2280 scid, flags, result);
2282 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2283 if (!sk)
2284 return 0;
2286 switch (result) {
2287 case L2CAP_CONF_SUCCESS:
2288 l2cap_conf_rfc_get(sk, rsp->data, len);
2289 break;
2291 case L2CAP_CONF_UNACCEPT:
2292 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2293 char req[64];
2295 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2296 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2297 goto done;
2300 /* throw out any old stored conf requests */
2301 result = L2CAP_CONF_SUCCESS;
2302 len = l2cap_parse_conf_rsp(sk, rsp->data,
2303 len, req, &result);
2304 if (len < 0) {
2305 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2306 goto done;
2309 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2310 L2CAP_CONF_REQ, len, req);
2311 l2cap_pi(sk)->num_conf_req++;
2312 if (result != L2CAP_CONF_SUCCESS)
2313 goto done;
2314 break;
2317 default:
2318 sk->sk_err = ECONNRESET;
2319 l2cap_sock_set_timer(sk, HZ * 5);
2320 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2321 goto done;
2324 if (flags & 0x01)
2325 goto done;
2327 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2329 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2330 set_default_fcs(l2cap_pi(sk));
2332 sk->sk_state = BT_CONNECTED;
2333 l2cap_pi(sk)->next_tx_seq = 0;
2334 l2cap_pi(sk)->expected_tx_seq = 0;
2335 __skb_queue_head_init(TX_QUEUE(sk));
2336 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2337 l2cap_ertm_init(sk);
2339 l2cap_chan_ready(sk);
2342 done:
2343 bh_unlock_sock(sk);
2344 return 0;
2347 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2349 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2350 struct l2cap_disconn_rsp rsp;
2351 u16 dcid, scid;
2352 struct sock *sk;
2354 scid = __le16_to_cpu(req->scid);
2355 dcid = __le16_to_cpu(req->dcid);
2357 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2359 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2360 if (!sk)
2361 return 0;
2363 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2364 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2365 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2367 sk->sk_shutdown = SHUTDOWN_MASK;
2369 /* don't delete l2cap channel if sk is owned by user */
2370 if (sock_owned_by_user(sk)) {
2371 sk->sk_state = BT_DISCONN;
2372 l2cap_sock_clear_timer(sk);
2373 l2cap_sock_set_timer(sk, HZ / 5);
2374 bh_unlock_sock(sk);
2375 return 0;
2378 l2cap_chan_del(sk, ECONNRESET);
2379 bh_unlock_sock(sk);
2381 l2cap_sock_kill(sk);
2382 return 0;
2385 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2387 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2388 u16 dcid, scid;
2389 struct sock *sk;
2391 scid = __le16_to_cpu(rsp->scid);
2392 dcid = __le16_to_cpu(rsp->dcid);
2394 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2396 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2397 if (!sk)
2398 return 0;
2400 /* don't delete l2cap channel if sk is owned by user */
2401 if (sock_owned_by_user(sk)) {
2402 sk->sk_state = BT_DISCONN;
2403 l2cap_sock_clear_timer(sk);
2404 l2cap_sock_set_timer(sk, HZ / 5);
2405 bh_unlock_sock(sk);
2406 return 0;
2409 l2cap_chan_del(sk, 0);
2410 bh_unlock_sock(sk);
2412 l2cap_sock_kill(sk);
2413 return 0;
2416 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2418 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2419 u16 type;
2421 type = __le16_to_cpu(req->type);
2423 BT_DBG("type 0x%4.4x", type);
2425 if (type == L2CAP_IT_FEAT_MASK) {
2426 u8 buf[8];
2427 u32 feat_mask = l2cap_feat_mask;
2428 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2429 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2430 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2431 if (!disable_ertm)
2432 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2433 | L2CAP_FEAT_FCS;
2434 put_unaligned_le32(feat_mask, rsp->data);
2435 l2cap_send_cmd(conn, cmd->ident,
2436 L2CAP_INFO_RSP, sizeof(buf), buf);
2437 } else if (type == L2CAP_IT_FIXED_CHAN) {
2438 u8 buf[12];
2439 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2440 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2441 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2442 memcpy(buf + 4, l2cap_fixed_chan, 8);
2443 l2cap_send_cmd(conn, cmd->ident,
2444 L2CAP_INFO_RSP, sizeof(buf), buf);
2445 } else {
2446 struct l2cap_info_rsp rsp;
2447 rsp.type = cpu_to_le16(type);
2448 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2449 l2cap_send_cmd(conn, cmd->ident,
2450 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2453 return 0;
2456 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2458 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2459 u16 type, result;
2461 type = __le16_to_cpu(rsp->type);
2462 result = __le16_to_cpu(rsp->result);
2464 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2466 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2467 if (cmd->ident != conn->info_ident ||
2468 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2469 return 0;
2471 del_timer(&conn->info_timer);
2473 if (result != L2CAP_IR_SUCCESS) {
2474 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2475 conn->info_ident = 0;
2477 l2cap_conn_start(conn);
2479 return 0;
2482 if (type == L2CAP_IT_FEAT_MASK) {
2483 conn->feat_mask = get_unaligned_le32(rsp->data);
2485 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2486 struct l2cap_info_req req;
2487 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2489 conn->info_ident = l2cap_get_ident(conn);
2491 l2cap_send_cmd(conn, conn->info_ident,
2492 L2CAP_INFO_REQ, sizeof(req), &req);
2493 } else {
2494 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2495 conn->info_ident = 0;
2497 l2cap_conn_start(conn);
2499 } else if (type == L2CAP_IT_FIXED_CHAN) {
2500 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2501 conn->info_ident = 0;
2503 l2cap_conn_start(conn);
2506 return 0;
2509 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2510 u16 to_multiplier)
2512 u16 max_latency;
2514 if (min > max || min < 6 || max > 3200)
2515 return -EINVAL;
2517 if (to_multiplier < 10 || to_multiplier > 3200)
2518 return -EINVAL;
2520 if (max >= to_multiplier * 8)
2521 return -EINVAL;
2523 max_latency = (to_multiplier * 8 / max) - 1;
2524 if (latency > 499 || latency > max_latency)
2525 return -EINVAL;
2527 return 0;
2530 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2531 struct l2cap_cmd_hdr *cmd, u8 *data)
2533 struct hci_conn *hcon = conn->hcon;
2534 struct l2cap_conn_param_update_req *req;
2535 struct l2cap_conn_param_update_rsp rsp;
2536 u16 min, max, latency, to_multiplier, cmd_len;
2537 int err;
2539 if (!(hcon->link_mode & HCI_LM_MASTER))
2540 return -EINVAL;
2542 cmd_len = __le16_to_cpu(cmd->len);
2543 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2544 return -EPROTO;
2546 req = (struct l2cap_conn_param_update_req *) data;
2547 min = __le16_to_cpu(req->min);
2548 max = __le16_to_cpu(req->max);
2549 latency = __le16_to_cpu(req->latency);
2550 to_multiplier = __le16_to_cpu(req->to_multiplier);
2552 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2553 min, max, latency, to_multiplier);
2555 memset(&rsp, 0, sizeof(rsp));
2557 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2558 if (err)
2559 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2560 else
2561 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2563 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2564 sizeof(rsp), &rsp);
2566 if (!err)
2567 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2569 return 0;
2572 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2573 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2575 int err = 0;
2577 switch (cmd->code) {
2578 case L2CAP_COMMAND_REJ:
2579 l2cap_command_rej(conn, cmd, data);
2580 break;
2582 case L2CAP_CONN_REQ:
2583 err = l2cap_connect_req(conn, cmd, data);
2584 break;
2586 case L2CAP_CONN_RSP:
2587 err = l2cap_connect_rsp(conn, cmd, data);
2588 break;
2590 case L2CAP_CONF_REQ:
2591 err = l2cap_config_req(conn, cmd, cmd_len, data);
2592 break;
2594 case L2CAP_CONF_RSP:
2595 err = l2cap_config_rsp(conn, cmd, data);
2596 break;
2598 case L2CAP_DISCONN_REQ:
2599 err = l2cap_disconnect_req(conn, cmd, data);
2600 break;
2602 case L2CAP_DISCONN_RSP:
2603 err = l2cap_disconnect_rsp(conn, cmd, data);
2604 break;
2606 case L2CAP_ECHO_REQ:
2607 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2608 break;
2610 case L2CAP_ECHO_RSP:
2611 break;
2613 case L2CAP_INFO_REQ:
2614 err = l2cap_information_req(conn, cmd, data);
2615 break;
2617 case L2CAP_INFO_RSP:
2618 err = l2cap_information_rsp(conn, cmd, data);
2619 break;
2621 default:
2622 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2623 err = -EINVAL;
2624 break;
2627 return err;
2630 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2631 struct l2cap_cmd_hdr *cmd, u8 *data)
2633 switch (cmd->code) {
2634 case L2CAP_COMMAND_REJ:
2635 return 0;
2637 case L2CAP_CONN_PARAM_UPDATE_REQ:
2638 return l2cap_conn_param_update_req(conn, cmd, data);
2640 case L2CAP_CONN_PARAM_UPDATE_RSP:
2641 return 0;
2643 default:
2644 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2645 return -EINVAL;
2649 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2650 struct sk_buff *skb)
2652 u8 *data = skb->data;
2653 int len = skb->len;
2654 struct l2cap_cmd_hdr cmd;
2655 int err;
2657 l2cap_raw_recv(conn, skb);
2659 while (len >= L2CAP_CMD_HDR_SIZE) {
2660 u16 cmd_len;
2661 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2662 data += L2CAP_CMD_HDR_SIZE;
2663 len -= L2CAP_CMD_HDR_SIZE;
2665 cmd_len = le16_to_cpu(cmd.len);
2667 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2669 if (cmd_len > len || !cmd.ident) {
2670 BT_DBG("corrupted command");
2671 break;
2674 if (conn->hcon->type == LE_LINK)
2675 err = l2cap_le_sig_cmd(conn, &cmd, data);
2676 else
2677 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
2679 if (err) {
2680 struct l2cap_cmd_rej rej;
2682 BT_ERR("Wrong link type (%d)", err);
2684 /* FIXME: Map err to a valid reason */
2685 rej.reason = cpu_to_le16(0);
2686 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2689 data += cmd_len;
2690 len -= cmd_len;
2693 kfree_skb(skb);
2696 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
2698 u16 our_fcs, rcv_fcs;
2699 int hdr_size = L2CAP_HDR_SIZE + 2;
2701 if (pi->fcs == L2CAP_FCS_CRC16) {
2702 skb_trim(skb, skb->len - 2);
2703 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
2704 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
2706 if (our_fcs != rcv_fcs)
2707 return -EBADMSG;
2709 return 0;
2712 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
2714 struct l2cap_pinfo *pi = l2cap_pi(sk);
2715 u16 control = 0;
2717 pi->frames_sent = 0;
2719 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2721 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2722 control |= L2CAP_SUPER_RCV_NOT_READY;
2723 l2cap_send_sframe(pi, control);
2724 pi->conn_state |= L2CAP_CONN_RNR_SENT;
2727 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
2728 l2cap_retransmit_frames(sk);
2730 l2cap_ertm_send(sk);
2732 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
2733 pi->frames_sent == 0) {
2734 control |= L2CAP_SUPER_RCV_READY;
2735 l2cap_send_sframe(pi, control);
2739 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
2741 struct sk_buff *next_skb;
2742 struct l2cap_pinfo *pi = l2cap_pi(sk);
2743 int tx_seq_offset, next_tx_seq_offset;
2745 bt_cb(skb)->tx_seq = tx_seq;
2746 bt_cb(skb)->sar = sar;
2748 next_skb = skb_peek(SREJ_QUEUE(sk));
2749 if (!next_skb) {
2750 __skb_queue_tail(SREJ_QUEUE(sk), skb);
2751 return 0;
2754 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
2755 if (tx_seq_offset < 0)
2756 tx_seq_offset += 64;
2758 do {
2759 if (bt_cb(next_skb)->tx_seq == tx_seq)
2760 return -EINVAL;
2762 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
2763 pi->buffer_seq) % 64;
2764 if (next_tx_seq_offset < 0)
2765 next_tx_seq_offset += 64;
2767 if (next_tx_seq_offset > tx_seq_offset) {
2768 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
2769 return 0;
2772 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
2773 break;
2775 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
2777 __skb_queue_tail(SREJ_QUEUE(sk), skb);
2779 return 0;
2782 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
2784 struct l2cap_pinfo *pi = l2cap_pi(sk);
2785 struct sk_buff *_skb;
2786 int err;
2788 switch (control & L2CAP_CTRL_SAR) {
2789 case L2CAP_SDU_UNSEGMENTED:
2790 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
2791 goto drop;
2793 err = sock_queue_rcv_skb(sk, skb);
2794 if (!err)
2795 return err;
2797 break;
2799 case L2CAP_SDU_START:
2800 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
2801 goto drop;
2803 pi->sdu_len = get_unaligned_le16(skb->data);
2805 if (pi->sdu_len > pi->imtu)
2806 goto disconnect;
2808 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
2809 if (!pi->sdu)
2810 return -ENOMEM;
2812 /* pull sdu_len bytes only after alloc, because of Local Busy
2813 * condition we have to be sure that this will be executed
2814 * only once, i.e., when alloc does not fail */
2815 skb_pull(skb, 2);
2817 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2819 pi->conn_state |= L2CAP_CONN_SAR_SDU;
2820 pi->partial_sdu_len = skb->len;
2821 break;
2823 case L2CAP_SDU_CONTINUE:
2824 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
2825 goto disconnect;
2827 if (!pi->sdu)
2828 goto disconnect;
2830 pi->partial_sdu_len += skb->len;
2831 if (pi->partial_sdu_len > pi->sdu_len)
2832 goto drop;
2834 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2836 break;
2838 case L2CAP_SDU_END:
2839 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
2840 goto disconnect;
2842 if (!pi->sdu)
2843 goto disconnect;
2845 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
2846 pi->partial_sdu_len += skb->len;
2848 if (pi->partial_sdu_len > pi->imtu)
2849 goto drop;
2851 if (pi->partial_sdu_len != pi->sdu_len)
2852 goto drop;
2854 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2857 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
2858 if (!_skb) {
2859 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
2860 return -ENOMEM;
2863 err = sock_queue_rcv_skb(sk, _skb);
2864 if (err < 0) {
2865 kfree_skb(_skb);
2866 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
2867 return err;
2870 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
2871 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
2873 kfree_skb(pi->sdu);
2874 break;
2877 kfree_skb(skb);
2878 return 0;
2880 drop:
2881 kfree_skb(pi->sdu);
2882 pi->sdu = NULL;
2884 disconnect:
2885 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
2886 kfree_skb(skb);
2887 return 0;
2890 static int l2cap_try_push_rx_skb(struct sock *sk)
2892 struct l2cap_pinfo *pi = l2cap_pi(sk);
2893 struct sk_buff *skb;
2894 u16 control;
2895 int err;
2897 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
2898 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
2899 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
2900 if (err < 0) {
2901 skb_queue_head(BUSY_QUEUE(sk), skb);
2902 return -EBUSY;
2905 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
2908 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
2909 goto done;
2911 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2912 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
2913 l2cap_send_sframe(pi, control);
2914 l2cap_pi(sk)->retry_count = 1;
2916 del_timer(&pi->retrans_timer);
2917 __mod_monitor_timer();
2919 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
2921 done:
2922 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
2923 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
2925 BT_DBG("sk %p, Exit local busy", sk);
2927 return 0;
2930 static void l2cap_busy_work(struct work_struct *work)
2932 DECLARE_WAITQUEUE(wait, current);
2933 struct l2cap_pinfo *pi =
2934 container_of(work, struct l2cap_pinfo, busy_work);
2935 struct sock *sk = (struct sock *)pi;
2936 int n_tries = 0, timeo = HZ/5, err;
2937 struct sk_buff *skb;
2939 lock_sock(sk);
2941 add_wait_queue(sk_sleep(sk), &wait);
2942 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
2943 set_current_state(TASK_INTERRUPTIBLE);
2945 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
2946 err = -EBUSY;
2947 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
2948 break;
2951 if (!timeo)
2952 timeo = HZ/5;
2954 if (signal_pending(current)) {
2955 err = sock_intr_errno(timeo);
2956 break;
2959 release_sock(sk);
2960 timeo = schedule_timeout(timeo);
2961 lock_sock(sk);
2963 err = sock_error(sk);
2964 if (err)
2965 break;
2967 if (l2cap_try_push_rx_skb(sk) == 0)
2968 break;
2971 set_current_state(TASK_RUNNING);
2972 remove_wait_queue(sk_sleep(sk), &wait);
2974 release_sock(sk);
2977 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
2979 struct l2cap_pinfo *pi = l2cap_pi(sk);
2980 int sctrl, err;
2982 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2983 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
2984 __skb_queue_tail(BUSY_QUEUE(sk), skb);
2985 return l2cap_try_push_rx_skb(sk);
2990 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
2991 if (err >= 0) {
2992 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
2993 return err;
2996 /* Busy Condition */
2997 BT_DBG("sk %p, Enter local busy", sk);
2999 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3000 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3001 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3003 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3004 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3005 l2cap_send_sframe(pi, sctrl);
3007 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3009 del_timer(&pi->ack_timer);
3011 queue_work(_busy_wq, &pi->busy_work);
3013 return err;
3016 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3018 struct l2cap_pinfo *pi = l2cap_pi(sk);
3019 struct sk_buff *_skb;
3020 int err = -EINVAL;
3023 * TODO: We have to notify the userland if some data is lost with the
3024 * Streaming Mode.
3027 switch (control & L2CAP_CTRL_SAR) {
3028 case L2CAP_SDU_UNSEGMENTED:
3029 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3030 kfree_skb(pi->sdu);
3031 break;
3034 err = sock_queue_rcv_skb(sk, skb);
3035 if (!err)
3036 return 0;
3038 break;
3040 case L2CAP_SDU_START:
3041 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3042 kfree_skb(pi->sdu);
3043 break;
3046 pi->sdu_len = get_unaligned_le16(skb->data);
3047 skb_pull(skb, 2);
3049 if (pi->sdu_len > pi->imtu) {
3050 err = -EMSGSIZE;
3051 break;
3054 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3055 if (!pi->sdu) {
3056 err = -ENOMEM;
3057 break;
3060 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3062 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3063 pi->partial_sdu_len = skb->len;
3064 err = 0;
3065 break;
3067 case L2CAP_SDU_CONTINUE:
3068 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3069 break;
3071 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3073 pi->partial_sdu_len += skb->len;
3074 if (pi->partial_sdu_len > pi->sdu_len)
3075 kfree_skb(pi->sdu);
3076 else
3077 err = 0;
3079 break;
3081 case L2CAP_SDU_END:
3082 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3083 break;
3085 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3087 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3088 pi->partial_sdu_len += skb->len;
3090 if (pi->partial_sdu_len > pi->imtu)
3091 goto drop;
3093 if (pi->partial_sdu_len == pi->sdu_len) {
3094 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3095 err = sock_queue_rcv_skb(sk, _skb);
3096 if (err < 0)
3097 kfree_skb(_skb);
3099 err = 0;
3101 drop:
3102 kfree_skb(pi->sdu);
3103 break;
3106 kfree_skb(skb);
3107 return err;
3110 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3112 struct sk_buff *skb;
3113 u16 control;
3115 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3116 if (bt_cb(skb)->tx_seq != tx_seq)
3117 break;
3119 skb = skb_dequeue(SREJ_QUEUE(sk));
3120 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3121 l2cap_ertm_reassembly_sdu(sk, skb, control);
3122 l2cap_pi(sk)->buffer_seq_srej =
3123 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3124 tx_seq = (tx_seq + 1) % 64;
3128 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3130 struct l2cap_pinfo *pi = l2cap_pi(sk);
3131 struct srej_list *l, *tmp;
3132 u16 control;
3134 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3135 if (l->tx_seq == tx_seq) {
3136 list_del(&l->list);
3137 kfree(l);
3138 return;
3140 control = L2CAP_SUPER_SELECT_REJECT;
3141 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3142 l2cap_send_sframe(pi, control);
3143 list_del(&l->list);
3144 list_add_tail(&l->list, SREJ_LIST(sk));
3148 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3150 struct l2cap_pinfo *pi = l2cap_pi(sk);
3151 struct srej_list *new;
3152 u16 control;
3154 while (tx_seq != pi->expected_tx_seq) {
3155 control = L2CAP_SUPER_SELECT_REJECT;
3156 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3157 l2cap_send_sframe(pi, control);
3159 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3160 new->tx_seq = pi->expected_tx_seq;
3161 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3162 list_add_tail(&new->list, SREJ_LIST(sk));
3164 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3167 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3169 struct l2cap_pinfo *pi = l2cap_pi(sk);
3170 u8 tx_seq = __get_txseq(rx_control);
3171 u8 req_seq = __get_reqseq(rx_control);
3172 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3173 int tx_seq_offset, expected_tx_seq_offset;
3174 int num_to_ack = (pi->tx_win/6) + 1;
3175 int err = 0;
3177 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3178 rx_control);
3180 if (L2CAP_CTRL_FINAL & rx_control &&
3181 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3182 del_timer(&pi->monitor_timer);
3183 if (pi->unacked_frames > 0)
3184 __mod_retrans_timer();
3185 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3188 pi->expected_ack_seq = req_seq;
3189 l2cap_drop_acked_frames(sk);
3191 if (tx_seq == pi->expected_tx_seq)
3192 goto expected;
3194 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3195 if (tx_seq_offset < 0)
3196 tx_seq_offset += 64;
3198 /* invalid tx_seq */
3199 if (tx_seq_offset >= pi->tx_win) {
3200 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3201 goto drop;
3204 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3205 goto drop;
3207 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3208 struct srej_list *first;
3210 first = list_first_entry(SREJ_LIST(sk),
3211 struct srej_list, list);
3212 if (tx_seq == first->tx_seq) {
3213 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3214 l2cap_check_srej_gap(sk, tx_seq);
3216 list_del(&first->list);
3217 kfree(first);
3219 if (list_empty(SREJ_LIST(sk))) {
3220 pi->buffer_seq = pi->buffer_seq_srej;
3221 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3222 l2cap_send_ack(pi);
3223 BT_DBG("sk %p, Exit SREJ_SENT", sk);
3225 } else {
3226 struct srej_list *l;
3228 /* duplicated tx_seq */
3229 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3230 goto drop;
3232 list_for_each_entry(l, SREJ_LIST(sk), list) {
3233 if (l->tx_seq == tx_seq) {
3234 l2cap_resend_srejframe(sk, tx_seq);
3235 return 0;
3238 l2cap_send_srejframe(sk, tx_seq);
3240 } else {
3241 expected_tx_seq_offset =
3242 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3243 if (expected_tx_seq_offset < 0)
3244 expected_tx_seq_offset += 64;
3246 /* duplicated tx_seq */
3247 if (tx_seq_offset < expected_tx_seq_offset)
3248 goto drop;
3250 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3252 BT_DBG("sk %p, Enter SREJ", sk);
3254 INIT_LIST_HEAD(SREJ_LIST(sk));
3255 pi->buffer_seq_srej = pi->buffer_seq;
3257 __skb_queue_head_init(SREJ_QUEUE(sk));
3258 __skb_queue_head_init(BUSY_QUEUE(sk));
3259 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3261 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3263 l2cap_send_srejframe(sk, tx_seq);
3265 del_timer(&pi->ack_timer);
3267 return 0;
3269 expected:
3270 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3272 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3273 bt_cb(skb)->tx_seq = tx_seq;
3274 bt_cb(skb)->sar = sar;
3275 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3276 return 0;
3279 err = l2cap_push_rx_skb(sk, skb, rx_control);
3280 if (err < 0)
3281 return 0;
3283 if (rx_control & L2CAP_CTRL_FINAL) {
3284 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3285 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3286 else
3287 l2cap_retransmit_frames(sk);
3290 __mod_ack_timer();
3292 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3293 if (pi->num_acked == num_to_ack - 1)
3294 l2cap_send_ack(pi);
3296 return 0;
3298 drop:
3299 kfree_skb(skb);
3300 return 0;
3303 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3305 struct l2cap_pinfo *pi = l2cap_pi(sk);
3307 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
3308 rx_control);
3310 pi->expected_ack_seq = __get_reqseq(rx_control);
3311 l2cap_drop_acked_frames(sk);
3313 if (rx_control & L2CAP_CTRL_POLL) {
3314 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3315 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3316 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3317 (pi->unacked_frames > 0))
3318 __mod_retrans_timer();
3320 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3321 l2cap_send_srejtail(sk);
3322 } else {
3323 l2cap_send_i_or_rr_or_rnr(sk);
3326 } else if (rx_control & L2CAP_CTRL_FINAL) {
3327 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3329 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3330 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3331 else
3332 l2cap_retransmit_frames(sk);
3334 } else {
3335 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3336 (pi->unacked_frames > 0))
3337 __mod_retrans_timer();
3339 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3340 if (pi->conn_state & L2CAP_CONN_SREJ_SENT)
3341 l2cap_send_ack(pi);
3342 else
3343 l2cap_ertm_send(sk);
3347 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
3349 struct l2cap_pinfo *pi = l2cap_pi(sk);
3350 u8 tx_seq = __get_reqseq(rx_control);
3352 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3354 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3356 pi->expected_ack_seq = tx_seq;
3357 l2cap_drop_acked_frames(sk);
3359 if (rx_control & L2CAP_CTRL_FINAL) {
3360 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3361 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3362 else
3363 l2cap_retransmit_frames(sk);
3364 } else {
3365 l2cap_retransmit_frames(sk);
3367 if (pi->conn_state & L2CAP_CONN_WAIT_F)
3368 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3371 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
3373 struct l2cap_pinfo *pi = l2cap_pi(sk);
3374 u8 tx_seq = __get_reqseq(rx_control);
3376 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3378 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3380 if (rx_control & L2CAP_CTRL_POLL) {
3381 pi->expected_ack_seq = tx_seq;
3382 l2cap_drop_acked_frames(sk);
3384 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3385 l2cap_retransmit_one_frame(sk, tx_seq);
3387 l2cap_ertm_send(sk);
3389 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3390 pi->srej_save_reqseq = tx_seq;
3391 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3393 } else if (rx_control & L2CAP_CTRL_FINAL) {
3394 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3395 pi->srej_save_reqseq == tx_seq)
3396 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3397 else
3398 l2cap_retransmit_one_frame(sk, tx_seq);
3399 } else {
3400 l2cap_retransmit_one_frame(sk, tx_seq);
3401 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3402 pi->srej_save_reqseq = tx_seq;
3403 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3408 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
3410 struct l2cap_pinfo *pi = l2cap_pi(sk);
3411 u8 tx_seq = __get_reqseq(rx_control);
3413 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3415 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3416 pi->expected_ack_seq = tx_seq;
3417 l2cap_drop_acked_frames(sk);
3419 if (rx_control & L2CAP_CTRL_POLL)
3420 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3422 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
3423 del_timer(&pi->retrans_timer);
3424 if (rx_control & L2CAP_CTRL_POLL)
3425 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
3426 return;
3429 if (rx_control & L2CAP_CTRL_POLL)
3430 l2cap_send_srejtail(sk);
3431 else
3432 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
3435 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3437 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3439 if (L2CAP_CTRL_FINAL & rx_control &&
3440 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3441 del_timer(&l2cap_pi(sk)->monitor_timer);
3442 if (l2cap_pi(sk)->unacked_frames > 0)
3443 __mod_retrans_timer();
3444 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
3447 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3448 case L2CAP_SUPER_RCV_READY:
3449 l2cap_data_channel_rrframe(sk, rx_control);
3450 break;
3452 case L2CAP_SUPER_REJECT:
3453 l2cap_data_channel_rejframe(sk, rx_control);
3454 break;
3456 case L2CAP_SUPER_SELECT_REJECT:
3457 l2cap_data_channel_srejframe(sk, rx_control);
3458 break;
3460 case L2CAP_SUPER_RCV_NOT_READY:
3461 l2cap_data_channel_rnrframe(sk, rx_control);
3462 break;
3465 kfree_skb(skb);
3466 return 0;
3469 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3471 struct l2cap_pinfo *pi = l2cap_pi(sk);
3472 u16 control;
3473 u8 req_seq;
3474 int len, next_tx_seq_offset, req_seq_offset;
3476 control = get_unaligned_le16(skb->data);
3477 skb_pull(skb, 2);
3478 len = skb->len;
3481 * We can just drop the corrupted I-frame here.
3482 * Receiver will miss it and start proper recovery
3483 * procedures and ask retransmission.
3485 if (l2cap_check_fcs(pi, skb))
3486 goto drop;
3488 if (__is_sar_start(control) && __is_iframe(control))
3489 len -= 2;
3491 if (pi->fcs == L2CAP_FCS_CRC16)
3492 len -= 2;
3494 if (len > pi->mps) {
3495 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3496 goto drop;
3499 req_seq = __get_reqseq(control);
3500 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
3501 if (req_seq_offset < 0)
3502 req_seq_offset += 64;
3504 next_tx_seq_offset =
3505 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
3506 if (next_tx_seq_offset < 0)
3507 next_tx_seq_offset += 64;
3509 /* check for invalid req-seq */
3510 if (req_seq_offset > next_tx_seq_offset) {
3511 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3512 goto drop;
3515 if (__is_iframe(control)) {
3516 if (len < 0) {
3517 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3518 goto drop;
3521 l2cap_data_channel_iframe(sk, control, skb);
3522 } else {
3523 if (len != 0) {
3524 BT_ERR("%d", len);
3525 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3526 goto drop;
3529 l2cap_data_channel_sframe(sk, control, skb);
3532 return 0;
3534 drop:
3535 kfree_skb(skb);
3536 return 0;
3539 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3541 struct sock *sk;
3542 struct l2cap_pinfo *pi;
3543 u16 control;
3544 u8 tx_seq;
3545 int len;
3547 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3548 if (!sk) {
3549 BT_DBG("unknown cid 0x%4.4x", cid);
3550 goto drop;
3553 pi = l2cap_pi(sk);
3555 BT_DBG("sk %p, len %d", sk, skb->len);
3557 if (sk->sk_state != BT_CONNECTED)
3558 goto drop;
3560 switch (pi->mode) {
3561 case L2CAP_MODE_BASIC:
3562 /* If socket recv buffers overflows we drop data here
3563 * which is *bad* because L2CAP has to be reliable.
3564 * But we don't have any other choice. L2CAP doesn't
3565 * provide flow control mechanism. */
3567 if (pi->imtu < skb->len)
3568 goto drop;
3570 if (!sock_queue_rcv_skb(sk, skb))
3571 goto done;
3572 break;
3574 case L2CAP_MODE_ERTM:
3575 if (!sock_owned_by_user(sk)) {
3576 l2cap_ertm_data_rcv(sk, skb);
3577 } else {
3578 if (sk_add_backlog(sk, skb))
3579 goto drop;
3582 goto done;
3584 case L2CAP_MODE_STREAMING:
3585 control = get_unaligned_le16(skb->data);
3586 skb_pull(skb, 2);
3587 len = skb->len;
3589 if (l2cap_check_fcs(pi, skb))
3590 goto drop;
3592 if (__is_sar_start(control))
3593 len -= 2;
3595 if (pi->fcs == L2CAP_FCS_CRC16)
3596 len -= 2;
3598 if (len > pi->mps || len < 0 || __is_sframe(control))
3599 goto drop;
3601 tx_seq = __get_txseq(control);
3603 if (pi->expected_tx_seq == tx_seq)
3604 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3605 else
3606 pi->expected_tx_seq = (tx_seq + 1) % 64;
3608 l2cap_streaming_reassembly_sdu(sk, skb, control);
3610 goto done;
3612 default:
3613 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
3614 break;
3617 drop:
3618 kfree_skb(skb);
3620 done:
3621 if (sk)
3622 bh_unlock_sock(sk);
3624 return 0;
3627 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3629 struct sock *sk;
3631 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3632 if (!sk)
3633 goto drop;
3635 bh_lock_sock(sk);
3637 BT_DBG("sk %p, len %d", sk, skb->len);
3639 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3640 goto drop;
3642 if (l2cap_pi(sk)->imtu < skb->len)
3643 goto drop;
3645 if (!sock_queue_rcv_skb(sk, skb))
3646 goto done;
3648 drop:
3649 kfree_skb(skb);
3651 done:
3652 if (sk)
3653 bh_unlock_sock(sk);
3654 return 0;
3657 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3659 struct l2cap_hdr *lh = (void *) skb->data;
3660 u16 cid, len;
3661 __le16 psm;
3663 skb_pull(skb, L2CAP_HDR_SIZE);
3664 cid = __le16_to_cpu(lh->cid);
3665 len = __le16_to_cpu(lh->len);
3667 if (len != skb->len) {
3668 kfree_skb(skb);
3669 return;
3672 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3674 switch (cid) {
3675 case L2CAP_CID_LE_SIGNALING:
3676 case L2CAP_CID_SIGNALING:
3677 l2cap_sig_channel(conn, skb);
3678 break;
3680 case L2CAP_CID_CONN_LESS:
3681 psm = get_unaligned_le16(skb->data);
3682 skb_pull(skb, 2);
3683 l2cap_conless_channel(conn, psm, skb);
3684 break;
3686 default:
3687 l2cap_data_channel(conn, cid, skb);
3688 break;
3692 /* ---- L2CAP interface with lower layer (HCI) ---- */
3694 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3696 int exact = 0, lm1 = 0, lm2 = 0;
3697 register struct sock *sk;
3698 struct hlist_node *node;
3700 if (type != ACL_LINK)
3701 return -EINVAL;
3703 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3705 /* Find listening sockets and check their link_mode */
3706 read_lock(&l2cap_sk_list.lock);
3707 sk_for_each(sk, node, &l2cap_sk_list.head) {
3708 if (sk->sk_state != BT_LISTEN)
3709 continue;
3711 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3712 lm1 |= HCI_LM_ACCEPT;
3713 if (l2cap_pi(sk)->role_switch)
3714 lm1 |= HCI_LM_MASTER;
3715 exact++;
3716 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3717 lm2 |= HCI_LM_ACCEPT;
3718 if (l2cap_pi(sk)->role_switch)
3719 lm2 |= HCI_LM_MASTER;
3722 read_unlock(&l2cap_sk_list.lock);
3724 return exact ? lm1 : lm2;
3727 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3729 struct l2cap_conn *conn;
3731 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3733 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3734 return -EINVAL;
3736 if (!status) {
3737 conn = l2cap_conn_add(hcon, status);
3738 if (conn)
3739 l2cap_conn_ready(conn);
3740 } else
3741 l2cap_conn_del(hcon, bt_err(status));
3743 return 0;
3746 static int l2cap_disconn_ind(struct hci_conn *hcon)
3748 struct l2cap_conn *conn = hcon->l2cap_data;
3750 BT_DBG("hcon %p", hcon);
3752 if (hcon->type != ACL_LINK || !conn)
3753 return 0x13;
3755 return conn->disc_reason;
3758 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3760 BT_DBG("hcon %p reason %d", hcon, reason);
3762 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3763 return -EINVAL;
3765 l2cap_conn_del(hcon, bt_err(reason));
3767 return 0;
3770 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3772 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
3773 return;
3775 if (encrypt == 0x00) {
3776 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3777 l2cap_sock_clear_timer(sk);
3778 l2cap_sock_set_timer(sk, HZ * 5);
3779 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3780 __l2cap_sock_close(sk, ECONNREFUSED);
3781 } else {
3782 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3783 l2cap_sock_clear_timer(sk);
3787 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3789 struct l2cap_chan_list *l;
3790 struct l2cap_conn *conn = hcon->l2cap_data;
3791 struct sock *sk;
3793 if (!conn)
3794 return 0;
3796 l = &conn->chan_list;
3798 BT_DBG("conn %p", conn);
3800 read_lock(&l->lock);
3802 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3803 bh_lock_sock(sk);
3805 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3806 bh_unlock_sock(sk);
3807 continue;
3810 if (!status && (sk->sk_state == BT_CONNECTED ||
3811 sk->sk_state == BT_CONFIG)) {
3812 l2cap_check_encryption(sk, encrypt);
3813 bh_unlock_sock(sk);
3814 continue;
3817 if (sk->sk_state == BT_CONNECT) {
3818 if (!status) {
3819 struct l2cap_conn_req req;
3820 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3821 req.psm = l2cap_pi(sk)->psm;
3823 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
3824 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3826 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3827 L2CAP_CONN_REQ, sizeof(req), &req);
3828 } else {
3829 l2cap_sock_clear_timer(sk);
3830 l2cap_sock_set_timer(sk, HZ / 10);
3832 } else if (sk->sk_state == BT_CONNECT2) {
3833 struct l2cap_conn_rsp rsp;
3834 __u16 result;
3836 if (!status) {
3837 sk->sk_state = BT_CONFIG;
3838 result = L2CAP_CR_SUCCESS;
3839 } else {
3840 sk->sk_state = BT_DISCONN;
3841 l2cap_sock_set_timer(sk, HZ / 10);
3842 result = L2CAP_CR_SEC_BLOCK;
3845 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3846 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3847 rsp.result = cpu_to_le16(result);
3848 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3849 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3850 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3853 bh_unlock_sock(sk);
3856 read_unlock(&l->lock);
3858 return 0;
3861 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3863 struct l2cap_conn *conn = hcon->l2cap_data;
3865 if (!conn)
3866 conn = l2cap_conn_add(hcon, 0);
3868 if (!conn)
3869 goto drop;
3871 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3873 if (!(flags & ACL_CONT)) {
3874 struct l2cap_hdr *hdr;
3875 struct sock *sk;
3876 u16 cid;
3877 int len;
3879 if (conn->rx_len) {
3880 BT_ERR("Unexpected start frame (len %d)", skb->len);
3881 kfree_skb(conn->rx_skb);
3882 conn->rx_skb = NULL;
3883 conn->rx_len = 0;
3884 l2cap_conn_unreliable(conn, ECOMM);
3887 /* Start fragment always begin with Basic L2CAP header */
3888 if (skb->len < L2CAP_HDR_SIZE) {
3889 BT_ERR("Frame is too short (len %d)", skb->len);
3890 l2cap_conn_unreliable(conn, ECOMM);
3891 goto drop;
3894 hdr = (struct l2cap_hdr *) skb->data;
3895 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3896 cid = __le16_to_cpu(hdr->cid);
3898 if (len == skb->len) {
3899 /* Complete frame received */
3900 l2cap_recv_frame(conn, skb);
3901 return 0;
3904 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3906 if (skb->len > len) {
3907 BT_ERR("Frame is too long (len %d, expected len %d)",
3908 skb->len, len);
3909 l2cap_conn_unreliable(conn, ECOMM);
3910 goto drop;
3913 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3915 if (sk && l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) {
3916 BT_ERR("Frame exceeding recv MTU (len %d, MTU %d)",
3917 len, l2cap_pi(sk)->imtu);
3918 bh_unlock_sock(sk);
3919 l2cap_conn_unreliable(conn, ECOMM);
3920 goto drop;
3923 if (sk)
3924 bh_unlock_sock(sk);
3926 /* Allocate skb for the complete frame (with header) */
3927 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
3928 if (!conn->rx_skb)
3929 goto drop;
3931 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3932 skb->len);
3933 conn->rx_len = len - skb->len;
3934 } else {
3935 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
3937 if (!conn->rx_len) {
3938 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
3939 l2cap_conn_unreliable(conn, ECOMM);
3940 goto drop;
3943 if (skb->len > conn->rx_len) {
3944 BT_ERR("Fragment is too long (len %d, expected %d)",
3945 skb->len, conn->rx_len);
3946 kfree_skb(conn->rx_skb);
3947 conn->rx_skb = NULL;
3948 conn->rx_len = 0;
3949 l2cap_conn_unreliable(conn, ECOMM);
3950 goto drop;
3953 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3954 skb->len);
3955 conn->rx_len -= skb->len;
3957 if (!conn->rx_len) {
3958 /* Complete frame received */
3959 l2cap_recv_frame(conn, conn->rx_skb);
3960 conn->rx_skb = NULL;
3964 drop:
3965 kfree_skb(skb);
3966 return 0;
3969 static int l2cap_debugfs_show(struct seq_file *f, void *p)
3971 struct sock *sk;
3972 struct hlist_node *node;
3974 read_lock_bh(&l2cap_sk_list.lock);
3976 sk_for_each(sk, node, &l2cap_sk_list.head) {
3977 struct l2cap_pinfo *pi = l2cap_pi(sk);
3979 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
3980 batostr(&bt_sk(sk)->src),
3981 batostr(&bt_sk(sk)->dst),
3982 sk->sk_state, __le16_to_cpu(pi->psm),
3983 pi->scid, pi->dcid,
3984 pi->imtu, pi->omtu, pi->sec_level,
3985 pi->mode);
3988 read_unlock_bh(&l2cap_sk_list.lock);
3990 return 0;
3993 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
3995 return single_open(file, l2cap_debugfs_show, inode->i_private);
3998 static const struct file_operations l2cap_debugfs_fops = {
3999 .open = l2cap_debugfs_open,
4000 .read = seq_read,
4001 .llseek = seq_lseek,
4002 .release = single_release,
4005 static struct dentry *l2cap_debugfs;
4007 static struct hci_proto l2cap_hci_proto = {
4008 .name = "L2CAP",
4009 .id = HCI_PROTO_L2CAP,
4010 .connect_ind = l2cap_connect_ind,
4011 .connect_cfm = l2cap_connect_cfm,
4012 .disconn_ind = l2cap_disconn_ind,
4013 .disconn_cfm = l2cap_disconn_cfm,
4014 .security_cfm = l2cap_security_cfm,
4015 .recv_acldata = l2cap_recv_acldata
4018 int __init l2cap_init(void)
4020 int err;
4022 err = l2cap_init_sockets();
4023 if (err < 0)
4024 return err;
4026 _busy_wq = create_singlethread_workqueue("l2cap");
4027 if (!_busy_wq) {
4028 err = -ENOMEM;
4029 goto error;
4032 err = hci_register_proto(&l2cap_hci_proto);
4033 if (err < 0) {
4034 BT_ERR("L2CAP protocol registration failed");
4035 bt_sock_unregister(BTPROTO_L2CAP);
4036 goto error;
4039 if (bt_debugfs) {
4040 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4041 bt_debugfs, NULL, &l2cap_debugfs_fops);
4042 if (!l2cap_debugfs)
4043 BT_ERR("Failed to create L2CAP debug file");
4046 return 0;
4048 error:
4049 destroy_workqueue(_busy_wq);
4050 l2cap_cleanup_sockets();
4051 return err;
4054 void l2cap_exit(void)
4056 debugfs_remove(l2cap_debugfs);
4058 flush_workqueue(_busy_wq);
4059 destroy_workqueue(_busy_wq);
4061 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4062 BT_ERR("L2CAP protocol unregistration failed");
4064 l2cap_cleanup_sockets();
4067 module_param(disable_ertm, bool, 0644);
4068 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");