Bluetooth: Fix bug with ERTM vars increment
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / bluetooth / l2cap.c
bloba9fdfe401f5bfd0f400cd18bf804157dba9b0136
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
47 #include <net/sock.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 static int enable_ertm = 0;
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
63 static const struct proto_ops l2cap_sock_ops;
65 static struct workqueue_struct *_busy_wq;
67 static struct bt_sock_list l2cap_sk_list = {
68 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
71 static void l2cap_busy_work(struct work_struct *work);
73 static void __l2cap_sock_close(struct sock *sk, int reason);
74 static void l2cap_sock_close(struct sock *sk);
75 static void l2cap_sock_kill(struct sock *sk);
77 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
78 u8 code, u8 ident, u16 dlen, void *data);
80 /* ---- L2CAP timers ---- */
81 static void l2cap_sock_timeout(unsigned long arg)
83 struct sock *sk = (struct sock *) arg;
84 int reason;
86 BT_DBG("sock %p state %d", sk, sk->sk_state);
88 bh_lock_sock(sk);
90 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
91 reason = ECONNREFUSED;
92 else if (sk->sk_state == BT_CONNECT &&
93 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
94 reason = ECONNREFUSED;
95 else
96 reason = ETIMEDOUT;
98 __l2cap_sock_close(sk, reason);
100 bh_unlock_sock(sk);
102 l2cap_sock_kill(sk);
103 sock_put(sk);
106 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
108 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
109 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
112 static void l2cap_sock_clear_timer(struct sock *sk)
114 BT_DBG("sock %p state %d", sk, sk->sk_state);
115 sk_stop_timer(sk, &sk->sk_timer);
118 /* ---- L2CAP channels ---- */
119 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
121 struct sock *s;
122 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
123 if (l2cap_pi(s)->dcid == cid)
124 break;
126 return s;
129 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
131 struct sock *s;
132 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
133 if (l2cap_pi(s)->scid == cid)
134 break;
136 return s;
139 /* Find channel with given SCID.
140 * Returns locked socket */
141 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
143 struct sock *s;
144 read_lock(&l->lock);
145 s = __l2cap_get_chan_by_scid(l, cid);
146 if (s)
147 bh_lock_sock(s);
148 read_unlock(&l->lock);
149 return s;
152 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
154 struct sock *s;
155 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
156 if (l2cap_pi(s)->ident == ident)
157 break;
159 return s;
162 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
164 struct sock *s;
165 read_lock(&l->lock);
166 s = __l2cap_get_chan_by_ident(l, ident);
167 if (s)
168 bh_lock_sock(s);
169 read_unlock(&l->lock);
170 return s;
173 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
175 u16 cid = L2CAP_CID_DYN_START;
177 for (; cid < L2CAP_CID_DYN_END; cid++) {
178 if (!__l2cap_get_chan_by_scid(l, cid))
179 return cid;
182 return 0;
185 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
187 sock_hold(sk);
189 if (l->head)
190 l2cap_pi(l->head)->prev_c = sk;
192 l2cap_pi(sk)->next_c = l->head;
193 l2cap_pi(sk)->prev_c = NULL;
194 l->head = sk;
197 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
199 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
201 write_lock_bh(&l->lock);
202 if (sk == l->head)
203 l->head = next;
205 if (next)
206 l2cap_pi(next)->prev_c = prev;
207 if (prev)
208 l2cap_pi(prev)->next_c = next;
209 write_unlock_bh(&l->lock);
211 __sock_put(sk);
214 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
216 struct l2cap_chan_list *l = &conn->chan_list;
218 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
219 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
221 conn->disc_reason = 0x13;
223 l2cap_pi(sk)->conn = conn;
225 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
226 /* Alloc CID for connection-oriented socket */
227 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
228 } else if (sk->sk_type == SOCK_DGRAM) {
229 /* Connectionless socket */
230 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
231 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
232 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
233 } else {
234 /* Raw socket can send/recv signalling messages only */
235 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
236 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
237 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
240 __l2cap_chan_link(l, sk);
242 if (parent)
243 bt_accept_enqueue(parent, sk);
246 /* Delete channel.
247 * Must be called on the locked socket. */
248 static void l2cap_chan_del(struct sock *sk, int err)
250 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
251 struct sock *parent = bt_sk(sk)->parent;
253 l2cap_sock_clear_timer(sk);
255 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
257 if (conn) {
258 /* Unlink from channel list */
259 l2cap_chan_unlink(&conn->chan_list, sk);
260 l2cap_pi(sk)->conn = NULL;
261 hci_conn_put(conn->hcon);
264 sk->sk_state = BT_CLOSED;
265 sock_set_flag(sk, SOCK_ZAPPED);
267 if (err)
268 sk->sk_err = err;
270 if (parent) {
271 bt_accept_unlink(sk);
272 parent->sk_data_ready(parent, 0);
273 } else
274 sk->sk_state_change(sk);
277 /* Service level security */
278 static inline int l2cap_check_security(struct sock *sk)
280 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
281 __u8 auth_type;
283 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
284 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
285 auth_type = HCI_AT_NO_BONDING_MITM;
286 else
287 auth_type = HCI_AT_NO_BONDING;
289 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
290 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
291 } else {
292 switch (l2cap_pi(sk)->sec_level) {
293 case BT_SECURITY_HIGH:
294 auth_type = HCI_AT_GENERAL_BONDING_MITM;
295 break;
296 case BT_SECURITY_MEDIUM:
297 auth_type = HCI_AT_GENERAL_BONDING;
298 break;
299 default:
300 auth_type = HCI_AT_NO_BONDING;
301 break;
305 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
306 auth_type);
309 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
311 u8 id;
313 /* Get next available identificator.
314 * 1 - 128 are used by kernel.
315 * 129 - 199 are reserved.
316 * 200 - 254 are used by utilities like l2ping, etc.
319 spin_lock_bh(&conn->lock);
321 if (++conn->tx_ident > 128)
322 conn->tx_ident = 1;
324 id = conn->tx_ident;
326 spin_unlock_bh(&conn->lock);
328 return id;
331 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
333 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
335 BT_DBG("code 0x%2.2x", code);
337 if (!skb)
338 return;
340 hci_send_acl(conn->hcon, skb, 0);
343 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
345 struct sk_buff *skb;
346 struct l2cap_hdr *lh;
347 struct l2cap_conn *conn = pi->conn;
348 int count, hlen = L2CAP_HDR_SIZE + 2;
350 if (pi->fcs == L2CAP_FCS_CRC16)
351 hlen += 2;
353 BT_DBG("pi %p, control 0x%2.2x", pi, control);
355 count = min_t(unsigned int, conn->mtu, hlen);
356 control |= L2CAP_CTRL_FRAME_TYPE;
358 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
359 control |= L2CAP_CTRL_FINAL;
360 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
363 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
364 control |= L2CAP_CTRL_POLL;
365 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
368 skb = bt_skb_alloc(count, GFP_ATOMIC);
369 if (!skb)
370 return;
372 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
373 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
374 lh->cid = cpu_to_le16(pi->dcid);
375 put_unaligned_le16(control, skb_put(skb, 2));
377 if (pi->fcs == L2CAP_FCS_CRC16) {
378 u16 fcs = crc16(0, (u8 *)lh, count - 2);
379 put_unaligned_le16(fcs, skb_put(skb, 2));
382 hci_send_acl(pi->conn->hcon, skb, 0);
385 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
387 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
388 control |= L2CAP_SUPER_RCV_NOT_READY;
389 pi->conn_state |= L2CAP_CONN_RNR_SENT;
390 } else
391 control |= L2CAP_SUPER_RCV_READY;
393 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
395 l2cap_send_sframe(pi, control);
398 static inline int __l2cap_no_conn_pending(struct sock *sk)
400 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
403 static void l2cap_do_start(struct sock *sk)
405 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
407 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
408 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
409 return;
411 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
412 struct l2cap_conn_req req;
413 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
414 req.psm = l2cap_pi(sk)->psm;
416 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
417 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
419 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
420 L2CAP_CONN_REQ, sizeof(req), &req);
422 } else {
423 struct l2cap_info_req req;
424 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
426 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
427 conn->info_ident = l2cap_get_ident(conn);
429 mod_timer(&conn->info_timer, jiffies +
430 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
432 l2cap_send_cmd(conn, conn->info_ident,
433 L2CAP_INFO_REQ, sizeof(req), &req);
437 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
439 struct l2cap_disconn_req req;
441 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
442 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
443 l2cap_send_cmd(conn, l2cap_get_ident(conn),
444 L2CAP_DISCONN_REQ, sizeof(req), &req);
447 /* ---- L2CAP connections ---- */
448 static void l2cap_conn_start(struct l2cap_conn *conn)
450 struct l2cap_chan_list *l = &conn->chan_list;
451 struct sock *sk;
453 BT_DBG("conn %p", conn);
455 read_lock(&l->lock);
457 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
458 bh_lock_sock(sk);
460 if (sk->sk_type != SOCK_SEQPACKET &&
461 sk->sk_type != SOCK_STREAM) {
462 bh_unlock_sock(sk);
463 continue;
466 if (sk->sk_state == BT_CONNECT) {
467 if (l2cap_check_security(sk) &&
468 __l2cap_no_conn_pending(sk)) {
469 struct l2cap_conn_req req;
470 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
471 req.psm = l2cap_pi(sk)->psm;
473 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
474 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
476 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
477 L2CAP_CONN_REQ, sizeof(req), &req);
479 } else if (sk->sk_state == BT_CONNECT2) {
480 struct l2cap_conn_rsp rsp;
481 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
482 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
484 if (l2cap_check_security(sk)) {
485 if (bt_sk(sk)->defer_setup) {
486 struct sock *parent = bt_sk(sk)->parent;
487 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
488 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
489 parent->sk_data_ready(parent, 0);
491 } else {
492 sk->sk_state = BT_CONFIG;
493 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
494 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
496 } else {
497 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
498 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
501 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
502 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
505 bh_unlock_sock(sk);
508 read_unlock(&l->lock);
511 static void l2cap_conn_ready(struct l2cap_conn *conn)
513 struct l2cap_chan_list *l = &conn->chan_list;
514 struct sock *sk;
516 BT_DBG("conn %p", conn);
518 read_lock(&l->lock);
520 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
521 bh_lock_sock(sk);
523 if (sk->sk_type != SOCK_SEQPACKET &&
524 sk->sk_type != SOCK_STREAM) {
525 l2cap_sock_clear_timer(sk);
526 sk->sk_state = BT_CONNECTED;
527 sk->sk_state_change(sk);
528 } else if (sk->sk_state == BT_CONNECT)
529 l2cap_do_start(sk);
531 bh_unlock_sock(sk);
534 read_unlock(&l->lock);
537 /* Notify sockets that we cannot guaranty reliability anymore */
538 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
540 struct l2cap_chan_list *l = &conn->chan_list;
541 struct sock *sk;
543 BT_DBG("conn %p", conn);
545 read_lock(&l->lock);
547 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
548 if (l2cap_pi(sk)->force_reliable)
549 sk->sk_err = err;
552 read_unlock(&l->lock);
555 static void l2cap_info_timeout(unsigned long arg)
557 struct l2cap_conn *conn = (void *) arg;
559 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
560 conn->info_ident = 0;
562 l2cap_conn_start(conn);
565 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
567 struct l2cap_conn *conn = hcon->l2cap_data;
569 if (conn || status)
570 return conn;
572 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
573 if (!conn)
574 return NULL;
576 hcon->l2cap_data = conn;
577 conn->hcon = hcon;
579 BT_DBG("hcon %p conn %p", hcon, conn);
581 conn->mtu = hcon->hdev->acl_mtu;
582 conn->src = &hcon->hdev->bdaddr;
583 conn->dst = &hcon->dst;
585 conn->feat_mask = 0;
587 spin_lock_init(&conn->lock);
588 rwlock_init(&conn->chan_list.lock);
590 setup_timer(&conn->info_timer, l2cap_info_timeout,
591 (unsigned long) conn);
593 conn->disc_reason = 0x13;
595 return conn;
598 static void l2cap_conn_del(struct hci_conn *hcon, int err)
600 struct l2cap_conn *conn = hcon->l2cap_data;
601 struct sock *sk;
603 if (!conn)
604 return;
606 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
608 kfree_skb(conn->rx_skb);
610 /* Kill channels */
611 while ((sk = conn->chan_list.head)) {
612 bh_lock_sock(sk);
613 l2cap_chan_del(sk, err);
614 bh_unlock_sock(sk);
615 l2cap_sock_kill(sk);
618 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
619 del_timer_sync(&conn->info_timer);
621 hcon->l2cap_data = NULL;
622 kfree(conn);
625 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
627 struct l2cap_chan_list *l = &conn->chan_list;
628 write_lock_bh(&l->lock);
629 __l2cap_chan_add(conn, sk, parent);
630 write_unlock_bh(&l->lock);
633 /* ---- Socket interface ---- */
634 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
636 struct sock *sk;
637 struct hlist_node *node;
638 sk_for_each(sk, node, &l2cap_sk_list.head)
639 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
640 goto found;
641 sk = NULL;
642 found:
643 return sk;
646 /* Find socket with psm and source bdaddr.
647 * Returns closest match.
649 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
651 struct sock *sk = NULL, *sk1 = NULL;
652 struct hlist_node *node;
654 sk_for_each(sk, node, &l2cap_sk_list.head) {
655 if (state && sk->sk_state != state)
656 continue;
658 if (l2cap_pi(sk)->psm == psm) {
659 /* Exact match. */
660 if (!bacmp(&bt_sk(sk)->src, src))
661 break;
663 /* Closest match */
664 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
665 sk1 = sk;
668 return node ? sk : sk1;
671 /* Find socket with given address (psm, src).
672 * Returns locked socket */
673 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
675 struct sock *s;
676 read_lock(&l2cap_sk_list.lock);
677 s = __l2cap_get_sock_by_psm(state, psm, src);
678 if (s)
679 bh_lock_sock(s);
680 read_unlock(&l2cap_sk_list.lock);
681 return s;
684 static void l2cap_sock_destruct(struct sock *sk)
686 BT_DBG("sk %p", sk);
688 skb_queue_purge(&sk->sk_receive_queue);
689 skb_queue_purge(&sk->sk_write_queue);
692 static void l2cap_sock_cleanup_listen(struct sock *parent)
694 struct sock *sk;
696 BT_DBG("parent %p", parent);
698 /* Close not yet accepted channels */
699 while ((sk = bt_accept_dequeue(parent, NULL)))
700 l2cap_sock_close(sk);
702 parent->sk_state = BT_CLOSED;
703 sock_set_flag(parent, SOCK_ZAPPED);
706 /* Kill socket (only if zapped and orphan)
707 * Must be called on unlocked socket.
709 static void l2cap_sock_kill(struct sock *sk)
711 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
712 return;
714 BT_DBG("sk %p state %d", sk, sk->sk_state);
716 /* Kill poor orphan */
717 bt_sock_unlink(&l2cap_sk_list, sk);
718 sock_set_flag(sk, SOCK_DEAD);
719 sock_put(sk);
722 static void __l2cap_sock_close(struct sock *sk, int reason)
724 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
726 switch (sk->sk_state) {
727 case BT_LISTEN:
728 l2cap_sock_cleanup_listen(sk);
729 break;
731 case BT_CONNECTED:
732 case BT_CONFIG:
733 if (sk->sk_type == SOCK_SEQPACKET ||
734 sk->sk_type == SOCK_STREAM) {
735 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
737 sk->sk_state = BT_DISCONN;
738 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
739 l2cap_send_disconn_req(conn, sk);
740 } else
741 l2cap_chan_del(sk, reason);
742 break;
744 case BT_CONNECT2:
745 if (sk->sk_type == SOCK_SEQPACKET ||
746 sk->sk_type == SOCK_STREAM) {
747 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
748 struct l2cap_conn_rsp rsp;
749 __u16 result;
751 if (bt_sk(sk)->defer_setup)
752 result = L2CAP_CR_SEC_BLOCK;
753 else
754 result = L2CAP_CR_BAD_PSM;
756 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
757 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
758 rsp.result = cpu_to_le16(result);
759 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
760 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
761 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
762 } else
763 l2cap_chan_del(sk, reason);
764 break;
766 case BT_CONNECT:
767 case BT_DISCONN:
768 l2cap_chan_del(sk, reason);
769 break;
771 default:
772 sock_set_flag(sk, SOCK_ZAPPED);
773 break;
777 /* Must be called on unlocked socket. */
778 static void l2cap_sock_close(struct sock *sk)
780 l2cap_sock_clear_timer(sk);
781 lock_sock(sk);
782 __l2cap_sock_close(sk, ECONNRESET);
783 release_sock(sk);
784 l2cap_sock_kill(sk);
787 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
789 struct l2cap_pinfo *pi = l2cap_pi(sk);
791 BT_DBG("sk %p", sk);
793 if (parent) {
794 sk->sk_type = parent->sk_type;
795 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
797 pi->imtu = l2cap_pi(parent)->imtu;
798 pi->omtu = l2cap_pi(parent)->omtu;
799 pi->mode = l2cap_pi(parent)->mode;
800 pi->fcs = l2cap_pi(parent)->fcs;
801 pi->max_tx = l2cap_pi(parent)->max_tx;
802 pi->tx_win = l2cap_pi(parent)->tx_win;
803 pi->sec_level = l2cap_pi(parent)->sec_level;
804 pi->role_switch = l2cap_pi(parent)->role_switch;
805 pi->force_reliable = l2cap_pi(parent)->force_reliable;
806 } else {
807 pi->imtu = L2CAP_DEFAULT_MTU;
808 pi->omtu = 0;
809 if (enable_ertm && sk->sk_type == SOCK_STREAM)
810 pi->mode = L2CAP_MODE_ERTM;
811 else
812 pi->mode = L2CAP_MODE_BASIC;
813 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
814 pi->fcs = L2CAP_FCS_CRC16;
815 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
816 pi->sec_level = BT_SECURITY_LOW;
817 pi->role_switch = 0;
818 pi->force_reliable = 0;
821 /* Default config options */
822 pi->conf_len = 0;
823 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
824 skb_queue_head_init(TX_QUEUE(sk));
825 skb_queue_head_init(SREJ_QUEUE(sk));
826 skb_queue_head_init(BUSY_QUEUE(sk));
827 INIT_LIST_HEAD(SREJ_LIST(sk));
830 static struct proto l2cap_proto = {
831 .name = "L2CAP",
832 .owner = THIS_MODULE,
833 .obj_size = sizeof(struct l2cap_pinfo)
836 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
838 struct sock *sk;
840 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
841 if (!sk)
842 return NULL;
844 sock_init_data(sock, sk);
845 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
847 sk->sk_destruct = l2cap_sock_destruct;
848 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
850 sock_reset_flag(sk, SOCK_ZAPPED);
852 sk->sk_protocol = proto;
853 sk->sk_state = BT_OPEN;
855 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
857 bt_sock_link(&l2cap_sk_list, sk);
858 return sk;
861 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
862 int kern)
864 struct sock *sk;
866 BT_DBG("sock %p", sock);
868 sock->state = SS_UNCONNECTED;
870 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
871 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
872 return -ESOCKTNOSUPPORT;
874 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
875 return -EPERM;
877 sock->ops = &l2cap_sock_ops;
879 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
880 if (!sk)
881 return -ENOMEM;
883 l2cap_sock_init(sk, NULL);
884 return 0;
887 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
889 struct sock *sk = sock->sk;
890 struct sockaddr_l2 la;
891 int len, err = 0;
893 BT_DBG("sk %p", sk);
895 if (!addr || addr->sa_family != AF_BLUETOOTH)
896 return -EINVAL;
898 memset(&la, 0, sizeof(la));
899 len = min_t(unsigned int, sizeof(la), alen);
900 memcpy(&la, addr, len);
902 if (la.l2_cid)
903 return -EINVAL;
905 lock_sock(sk);
907 if (sk->sk_state != BT_OPEN) {
908 err = -EBADFD;
909 goto done;
912 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
913 !capable(CAP_NET_BIND_SERVICE)) {
914 err = -EACCES;
915 goto done;
918 write_lock_bh(&l2cap_sk_list.lock);
920 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
921 err = -EADDRINUSE;
922 } else {
923 /* Save source address */
924 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
925 l2cap_pi(sk)->psm = la.l2_psm;
926 l2cap_pi(sk)->sport = la.l2_psm;
927 sk->sk_state = BT_BOUND;
929 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
930 __le16_to_cpu(la.l2_psm) == 0x0003)
931 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
934 write_unlock_bh(&l2cap_sk_list.lock);
936 done:
937 release_sock(sk);
938 return err;
941 static int l2cap_do_connect(struct sock *sk)
943 bdaddr_t *src = &bt_sk(sk)->src;
944 bdaddr_t *dst = &bt_sk(sk)->dst;
945 struct l2cap_conn *conn;
946 struct hci_conn *hcon;
947 struct hci_dev *hdev;
948 __u8 auth_type;
949 int err;
951 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
952 l2cap_pi(sk)->psm);
954 hdev = hci_get_route(dst, src);
955 if (!hdev)
956 return -EHOSTUNREACH;
958 hci_dev_lock_bh(hdev);
960 err = -ENOMEM;
962 if (sk->sk_type == SOCK_RAW) {
963 switch (l2cap_pi(sk)->sec_level) {
964 case BT_SECURITY_HIGH:
965 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
966 break;
967 case BT_SECURITY_MEDIUM:
968 auth_type = HCI_AT_DEDICATED_BONDING;
969 break;
970 default:
971 auth_type = HCI_AT_NO_BONDING;
972 break;
974 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
975 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
976 auth_type = HCI_AT_NO_BONDING_MITM;
977 else
978 auth_type = HCI_AT_NO_BONDING;
980 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
981 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
982 } else {
983 switch (l2cap_pi(sk)->sec_level) {
984 case BT_SECURITY_HIGH:
985 auth_type = HCI_AT_GENERAL_BONDING_MITM;
986 break;
987 case BT_SECURITY_MEDIUM:
988 auth_type = HCI_AT_GENERAL_BONDING;
989 break;
990 default:
991 auth_type = HCI_AT_NO_BONDING;
992 break;
996 hcon = hci_connect(hdev, ACL_LINK, dst,
997 l2cap_pi(sk)->sec_level, auth_type);
998 if (!hcon)
999 goto done;
1001 conn = l2cap_conn_add(hcon, 0);
1002 if (!conn) {
1003 hci_conn_put(hcon);
1004 goto done;
1007 err = 0;
1009 /* Update source addr of the socket */
1010 bacpy(src, conn->src);
1012 l2cap_chan_add(conn, sk, NULL);
1014 sk->sk_state = BT_CONNECT;
1015 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1017 if (hcon->state == BT_CONNECTED) {
1018 if (sk->sk_type != SOCK_SEQPACKET &&
1019 sk->sk_type != SOCK_STREAM) {
1020 l2cap_sock_clear_timer(sk);
1021 sk->sk_state = BT_CONNECTED;
1022 } else
1023 l2cap_do_start(sk);
1026 done:
1027 hci_dev_unlock_bh(hdev);
1028 hci_dev_put(hdev);
1029 return err;
1032 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1034 struct sock *sk = sock->sk;
1035 struct sockaddr_l2 la;
1036 int len, err = 0;
1038 BT_DBG("sk %p", sk);
1040 if (!addr || alen < sizeof(addr->sa_family) ||
1041 addr->sa_family != AF_BLUETOOTH)
1042 return -EINVAL;
1044 memset(&la, 0, sizeof(la));
1045 len = min_t(unsigned int, sizeof(la), alen);
1046 memcpy(&la, addr, len);
1048 if (la.l2_cid)
1049 return -EINVAL;
1051 lock_sock(sk);
1053 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1054 && !la.l2_psm) {
1055 err = -EINVAL;
1056 goto done;
1059 switch (l2cap_pi(sk)->mode) {
1060 case L2CAP_MODE_BASIC:
1061 break;
1062 case L2CAP_MODE_ERTM:
1063 case L2CAP_MODE_STREAMING:
1064 if (enable_ertm)
1065 break;
1066 /* fall through */
1067 default:
1068 err = -ENOTSUPP;
1069 goto done;
1072 switch (sk->sk_state) {
1073 case BT_CONNECT:
1074 case BT_CONNECT2:
1075 case BT_CONFIG:
1076 /* Already connecting */
1077 goto wait;
1079 case BT_CONNECTED:
1080 /* Already connected */
1081 goto done;
1083 case BT_OPEN:
1084 case BT_BOUND:
1085 /* Can connect */
1086 break;
1088 default:
1089 err = -EBADFD;
1090 goto done;
1093 /* Set destination address and psm */
1094 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1095 l2cap_pi(sk)->psm = la.l2_psm;
1097 err = l2cap_do_connect(sk);
1098 if (err)
1099 goto done;
1101 wait:
1102 err = bt_sock_wait_state(sk, BT_CONNECTED,
1103 sock_sndtimeo(sk, flags & O_NONBLOCK));
1104 done:
1105 release_sock(sk);
1106 return err;
1109 static int l2cap_sock_listen(struct socket *sock, int backlog)
1111 struct sock *sk = sock->sk;
1112 int err = 0;
1114 BT_DBG("sk %p backlog %d", sk, backlog);
1116 lock_sock(sk);
1118 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1119 || sk->sk_state != BT_BOUND) {
1120 err = -EBADFD;
1121 goto done;
1124 switch (l2cap_pi(sk)->mode) {
1125 case L2CAP_MODE_BASIC:
1126 break;
1127 case L2CAP_MODE_ERTM:
1128 case L2CAP_MODE_STREAMING:
1129 if (enable_ertm)
1130 break;
1131 /* fall through */
1132 default:
1133 err = -ENOTSUPP;
1134 goto done;
1137 if (!l2cap_pi(sk)->psm) {
1138 bdaddr_t *src = &bt_sk(sk)->src;
1139 u16 psm;
1141 err = -EINVAL;
1143 write_lock_bh(&l2cap_sk_list.lock);
1145 for (psm = 0x1001; psm < 0x1100; psm += 2)
1146 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1147 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1148 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1149 err = 0;
1150 break;
1153 write_unlock_bh(&l2cap_sk_list.lock);
1155 if (err < 0)
1156 goto done;
1159 sk->sk_max_ack_backlog = backlog;
1160 sk->sk_ack_backlog = 0;
1161 sk->sk_state = BT_LISTEN;
1163 done:
1164 release_sock(sk);
1165 return err;
1168 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1170 DECLARE_WAITQUEUE(wait, current);
1171 struct sock *sk = sock->sk, *nsk;
1172 long timeo;
1173 int err = 0;
1175 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1177 if (sk->sk_state != BT_LISTEN) {
1178 err = -EBADFD;
1179 goto done;
1182 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1184 BT_DBG("sk %p timeo %ld", sk, timeo);
1186 /* Wait for an incoming connection. (wake-one). */
1187 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1188 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1189 set_current_state(TASK_INTERRUPTIBLE);
1190 if (!timeo) {
1191 err = -EAGAIN;
1192 break;
1195 release_sock(sk);
1196 timeo = schedule_timeout(timeo);
1197 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1199 if (sk->sk_state != BT_LISTEN) {
1200 err = -EBADFD;
1201 break;
1204 if (signal_pending(current)) {
1205 err = sock_intr_errno(timeo);
1206 break;
1209 set_current_state(TASK_RUNNING);
1210 remove_wait_queue(sk_sleep(sk), &wait);
1212 if (err)
1213 goto done;
1215 newsock->state = SS_CONNECTED;
1217 BT_DBG("new socket %p", nsk);
1219 done:
1220 release_sock(sk);
1221 return err;
1224 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1226 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1227 struct sock *sk = sock->sk;
1229 BT_DBG("sock %p, sk %p", sock, sk);
1231 addr->sa_family = AF_BLUETOOTH;
1232 *len = sizeof(struct sockaddr_l2);
1234 if (peer) {
1235 la->l2_psm = l2cap_pi(sk)->psm;
1236 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1237 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1238 } else {
1239 la->l2_psm = l2cap_pi(sk)->sport;
1240 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1241 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1244 return 0;
1247 static int __l2cap_wait_ack(struct sock *sk)
1249 DECLARE_WAITQUEUE(wait, current);
1250 int err = 0;
1251 int timeo = HZ/5;
1253 add_wait_queue(sk_sleep(sk), &wait);
1254 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1255 set_current_state(TASK_INTERRUPTIBLE);
1257 if (!timeo)
1258 timeo = HZ/5;
1260 if (signal_pending(current)) {
1261 err = sock_intr_errno(timeo);
1262 break;
1265 release_sock(sk);
1266 timeo = schedule_timeout(timeo);
1267 lock_sock(sk);
1269 err = sock_error(sk);
1270 if (err)
1271 break;
1273 set_current_state(TASK_RUNNING);
1274 remove_wait_queue(sk_sleep(sk), &wait);
1275 return err;
1278 static void l2cap_monitor_timeout(unsigned long arg)
1280 struct sock *sk = (void *) arg;
1282 bh_lock_sock(sk);
1283 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1284 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1285 bh_unlock_sock(sk);
1286 return;
1289 l2cap_pi(sk)->retry_count++;
1290 __mod_monitor_timer();
1292 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1293 bh_unlock_sock(sk);
1296 static void l2cap_retrans_timeout(unsigned long arg)
1298 struct sock *sk = (void *) arg;
1300 bh_lock_sock(sk);
1301 l2cap_pi(sk)->retry_count = 1;
1302 __mod_monitor_timer();
1304 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1306 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1307 bh_unlock_sock(sk);
1310 static void l2cap_drop_acked_frames(struct sock *sk)
1312 struct sk_buff *skb;
1314 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1315 l2cap_pi(sk)->unacked_frames) {
1316 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1317 break;
1319 skb = skb_dequeue(TX_QUEUE(sk));
1320 kfree_skb(skb);
1322 l2cap_pi(sk)->unacked_frames--;
1325 if (!l2cap_pi(sk)->unacked_frames)
1326 del_timer(&l2cap_pi(sk)->retrans_timer);
1329 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1331 struct l2cap_pinfo *pi = l2cap_pi(sk);
1333 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1335 hci_send_acl(pi->conn->hcon, skb, 0);
1338 static int l2cap_streaming_send(struct sock *sk)
1340 struct sk_buff *skb, *tx_skb;
1341 struct l2cap_pinfo *pi = l2cap_pi(sk);
1342 u16 control, fcs;
1344 while ((skb = sk->sk_send_head)) {
1345 tx_skb = skb_clone(skb, GFP_ATOMIC);
1347 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1348 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1349 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1351 if (pi->fcs == L2CAP_FCS_CRC16) {
1352 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1353 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1356 l2cap_do_send(sk, tx_skb);
1358 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1360 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1361 sk->sk_send_head = NULL;
1362 else
1363 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1365 skb = skb_dequeue(TX_QUEUE(sk));
1366 kfree_skb(skb);
1368 return 0;
1371 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1373 struct l2cap_pinfo *pi = l2cap_pi(sk);
1374 struct sk_buff *skb, *tx_skb;
1375 u16 control, fcs;
1377 skb = skb_peek(TX_QUEUE(sk));
1378 if (!skb)
1379 return;
1381 do {
1382 if (bt_cb(skb)->tx_seq == tx_seq)
1383 break;
1385 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1386 return;
1388 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1390 if (pi->remote_max_tx &&
1391 bt_cb(skb)->retries == pi->remote_max_tx) {
1392 l2cap_send_disconn_req(pi->conn, sk);
1393 return;
1396 tx_skb = skb_clone(skb, GFP_ATOMIC);
1397 bt_cb(skb)->retries++;
1398 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1399 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1400 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1401 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1403 if (pi->fcs == L2CAP_FCS_CRC16) {
1404 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1405 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1408 l2cap_do_send(sk, tx_skb);
1411 static int l2cap_ertm_send(struct sock *sk)
1413 struct sk_buff *skb, *tx_skb;
1414 struct l2cap_pinfo *pi = l2cap_pi(sk);
1415 u16 control, fcs;
1416 int nsent = 0;
1418 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1419 return 0;
1421 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
1422 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1424 if (pi->remote_max_tx &&
1425 bt_cb(skb)->retries == pi->remote_max_tx) {
1426 l2cap_send_disconn_req(pi->conn, sk);
1427 break;
1430 tx_skb = skb_clone(skb, GFP_ATOMIC);
1432 bt_cb(skb)->retries++;
1434 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1435 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1436 control |= L2CAP_CTRL_FINAL;
1437 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1439 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1440 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1441 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1444 if (pi->fcs == L2CAP_FCS_CRC16) {
1445 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1446 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1449 l2cap_do_send(sk, tx_skb);
1451 __mod_retrans_timer();
1453 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1454 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1456 pi->unacked_frames++;
1457 pi->frames_sent++;
1459 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1460 sk->sk_send_head = NULL;
1461 else
1462 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1464 nsent++;
1467 return nsent;
1470 static int l2cap_retransmit_frames(struct sock *sk)
1472 struct l2cap_pinfo *pi = l2cap_pi(sk);
1473 int ret;
1475 spin_lock_bh(&pi->send_lock);
1477 if (!skb_queue_empty(TX_QUEUE(sk)))
1478 sk->sk_send_head = TX_QUEUE(sk)->next;
1480 pi->next_tx_seq = pi->expected_ack_seq;
1481 ret = l2cap_ertm_send(sk);
1483 spin_unlock_bh(&pi->send_lock);
1485 return ret;
1488 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1490 struct sock *sk = (struct sock *)pi;
1491 u16 control = 0;
1492 int nframes;
1494 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1496 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1497 control |= L2CAP_SUPER_RCV_NOT_READY;
1498 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1499 l2cap_send_sframe(pi, control);
1500 return;
1503 spin_lock_bh(&pi->send_lock);
1504 nframes = l2cap_ertm_send(sk);
1505 spin_unlock_bh(&pi->send_lock);
1507 if (nframes > 0)
1508 return;
1510 control |= L2CAP_SUPER_RCV_READY;
1511 l2cap_send_sframe(pi, control);
1514 static void l2cap_send_srejtail(struct sock *sk)
1516 struct srej_list *tail;
1517 u16 control;
1519 control = L2CAP_SUPER_SELECT_REJECT;
1520 control |= L2CAP_CTRL_FINAL;
1522 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1523 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1525 l2cap_send_sframe(l2cap_pi(sk), control);
1528 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1530 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1531 struct sk_buff **frag;
1532 int err, sent = 0;
1534 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1535 return -EFAULT;
1537 sent += count;
1538 len -= count;
1540 /* Continuation fragments (no L2CAP header) */
1541 frag = &skb_shinfo(skb)->frag_list;
1542 while (len) {
1543 count = min_t(unsigned int, conn->mtu, len);
1545 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1546 if (!*frag)
1547 return -EFAULT;
1548 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1549 return -EFAULT;
1551 sent += count;
1552 len -= count;
1554 frag = &(*frag)->next;
1557 return sent;
1560 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1562 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1563 struct sk_buff *skb;
1564 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1565 struct l2cap_hdr *lh;
1567 BT_DBG("sk %p len %d", sk, (int)len);
1569 count = min_t(unsigned int, (conn->mtu - hlen), len);
1570 skb = bt_skb_send_alloc(sk, count + hlen,
1571 msg->msg_flags & MSG_DONTWAIT, &err);
1572 if (!skb)
1573 return ERR_PTR(-ENOMEM);
1575 /* Create L2CAP header */
1576 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1577 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1578 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1579 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1581 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1582 if (unlikely(err < 0)) {
1583 kfree_skb(skb);
1584 return ERR_PTR(err);
1586 return skb;
1589 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1591 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1592 struct sk_buff *skb;
1593 int err, count, hlen = L2CAP_HDR_SIZE;
1594 struct l2cap_hdr *lh;
1596 BT_DBG("sk %p len %d", sk, (int)len);
1598 count = min_t(unsigned int, (conn->mtu - hlen), len);
1599 skb = bt_skb_send_alloc(sk, count + hlen,
1600 msg->msg_flags & MSG_DONTWAIT, &err);
1601 if (!skb)
1602 return ERR_PTR(-ENOMEM);
1604 /* Create L2CAP header */
1605 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1606 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1607 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1609 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1610 if (unlikely(err < 0)) {
1611 kfree_skb(skb);
1612 return ERR_PTR(err);
1614 return skb;
1617 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1619 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1620 struct sk_buff *skb;
1621 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1622 struct l2cap_hdr *lh;
1624 BT_DBG("sk %p len %d", sk, (int)len);
1626 if (!conn)
1627 return ERR_PTR(-ENOTCONN);
1629 if (sdulen)
1630 hlen += 2;
1632 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1633 hlen += 2;
1635 count = min_t(unsigned int, (conn->mtu - hlen), len);
1636 skb = bt_skb_send_alloc(sk, count + hlen,
1637 msg->msg_flags & MSG_DONTWAIT, &err);
1638 if (!skb)
1639 return ERR_PTR(-ENOMEM);
1641 /* Create L2CAP header */
1642 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1643 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1644 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1645 put_unaligned_le16(control, skb_put(skb, 2));
1646 if (sdulen)
1647 put_unaligned_le16(sdulen, skb_put(skb, 2));
1649 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1650 if (unlikely(err < 0)) {
1651 kfree_skb(skb);
1652 return ERR_PTR(err);
1655 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1656 put_unaligned_le16(0, skb_put(skb, 2));
1658 bt_cb(skb)->retries = 0;
1659 return skb;
1662 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1664 struct l2cap_pinfo *pi = l2cap_pi(sk);
1665 struct sk_buff *skb;
1666 struct sk_buff_head sar_queue;
1667 u16 control;
1668 size_t size = 0;
1670 skb_queue_head_init(&sar_queue);
1671 control = L2CAP_SDU_START;
1672 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1673 if (IS_ERR(skb))
1674 return PTR_ERR(skb);
1676 __skb_queue_tail(&sar_queue, skb);
1677 len -= pi->remote_mps;
1678 size += pi->remote_mps;
1680 while (len > 0) {
1681 size_t buflen;
1683 if (len > pi->remote_mps) {
1684 control = L2CAP_SDU_CONTINUE;
1685 buflen = pi->remote_mps;
1686 } else {
1687 control = L2CAP_SDU_END;
1688 buflen = len;
1691 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1692 if (IS_ERR(skb)) {
1693 skb_queue_purge(&sar_queue);
1694 return PTR_ERR(skb);
1697 __skb_queue_tail(&sar_queue, skb);
1698 len -= buflen;
1699 size += buflen;
1701 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1702 spin_lock_bh(&pi->send_lock);
1703 if (sk->sk_send_head == NULL)
1704 sk->sk_send_head = sar_queue.next;
1705 spin_unlock_bh(&pi->send_lock);
1707 return size;
1710 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1712 struct sock *sk = sock->sk;
1713 struct l2cap_pinfo *pi = l2cap_pi(sk);
1714 struct sk_buff *skb;
1715 u16 control;
1716 int err;
1718 BT_DBG("sock %p, sk %p", sock, sk);
1720 err = sock_error(sk);
1721 if (err)
1722 return err;
1724 if (msg->msg_flags & MSG_OOB)
1725 return -EOPNOTSUPP;
1727 lock_sock(sk);
1729 if (sk->sk_state != BT_CONNECTED) {
1730 err = -ENOTCONN;
1731 goto done;
1734 /* Connectionless channel */
1735 if (sk->sk_type == SOCK_DGRAM) {
1736 skb = l2cap_create_connless_pdu(sk, msg, len);
1737 if (IS_ERR(skb)) {
1738 err = PTR_ERR(skb);
1739 } else {
1740 l2cap_do_send(sk, skb);
1741 err = len;
1743 goto done;
1746 switch (pi->mode) {
1747 case L2CAP_MODE_BASIC:
1748 /* Check outgoing MTU */
1749 if (len > pi->omtu) {
1750 err = -EINVAL;
1751 goto done;
1754 /* Create a basic PDU */
1755 skb = l2cap_create_basic_pdu(sk, msg, len);
1756 if (IS_ERR(skb)) {
1757 err = PTR_ERR(skb);
1758 goto done;
1761 l2cap_do_send(sk, skb);
1762 err = len;
1763 break;
1765 case L2CAP_MODE_ERTM:
1766 case L2CAP_MODE_STREAMING:
1767 /* Entire SDU fits into one PDU */
1768 if (len <= pi->remote_mps) {
1769 control = L2CAP_SDU_UNSEGMENTED;
1770 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1771 if (IS_ERR(skb)) {
1772 err = PTR_ERR(skb);
1773 goto done;
1775 __skb_queue_tail(TX_QUEUE(sk), skb);
1777 if (pi->mode == L2CAP_MODE_ERTM)
1778 spin_lock_bh(&pi->send_lock);
1780 if (sk->sk_send_head == NULL)
1781 sk->sk_send_head = skb;
1783 if (pi->mode == L2CAP_MODE_ERTM)
1784 spin_unlock_bh(&pi->send_lock);
1785 } else {
1786 /* Segment SDU into multiples PDUs */
1787 err = l2cap_sar_segment_sdu(sk, msg, len);
1788 if (err < 0)
1789 goto done;
1792 if (pi->mode == L2CAP_MODE_STREAMING) {
1793 err = l2cap_streaming_send(sk);
1794 } else {
1795 spin_lock_bh(&pi->send_lock);
1796 err = l2cap_ertm_send(sk);
1797 spin_unlock_bh(&pi->send_lock);
1800 if (err >= 0)
1801 err = len;
1802 break;
1804 default:
1805 BT_DBG("bad state %1.1x", pi->mode);
1806 err = -EINVAL;
1809 done:
1810 release_sock(sk);
1811 return err;
1814 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1816 struct sock *sk = sock->sk;
1818 lock_sock(sk);
1820 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1821 struct l2cap_conn_rsp rsp;
1823 sk->sk_state = BT_CONFIG;
1825 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1826 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1827 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1828 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1829 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1830 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1832 release_sock(sk);
1833 return 0;
1836 release_sock(sk);
1838 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1841 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1843 struct sock *sk = sock->sk;
1844 struct l2cap_options opts;
1845 int len, err = 0;
1846 u32 opt;
1848 BT_DBG("sk %p", sk);
1850 lock_sock(sk);
1852 switch (optname) {
1853 case L2CAP_OPTIONS:
1854 opts.imtu = l2cap_pi(sk)->imtu;
1855 opts.omtu = l2cap_pi(sk)->omtu;
1856 opts.flush_to = l2cap_pi(sk)->flush_to;
1857 opts.mode = l2cap_pi(sk)->mode;
1858 opts.fcs = l2cap_pi(sk)->fcs;
1859 opts.max_tx = l2cap_pi(sk)->max_tx;
1860 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1862 len = min_t(unsigned int, sizeof(opts), optlen);
1863 if (copy_from_user((char *) &opts, optval, len)) {
1864 err = -EFAULT;
1865 break;
1868 l2cap_pi(sk)->mode = opts.mode;
1869 switch (l2cap_pi(sk)->mode) {
1870 case L2CAP_MODE_BASIC:
1871 break;
1872 case L2CAP_MODE_ERTM:
1873 case L2CAP_MODE_STREAMING:
1874 if (enable_ertm)
1875 break;
1876 /* fall through */
1877 default:
1878 err = -EINVAL;
1879 break;
1882 l2cap_pi(sk)->imtu = opts.imtu;
1883 l2cap_pi(sk)->omtu = opts.omtu;
1884 l2cap_pi(sk)->fcs = opts.fcs;
1885 l2cap_pi(sk)->max_tx = opts.max_tx;
1886 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1887 break;
1889 case L2CAP_LM:
1890 if (get_user(opt, (u32 __user *) optval)) {
1891 err = -EFAULT;
1892 break;
1895 if (opt & L2CAP_LM_AUTH)
1896 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1897 if (opt & L2CAP_LM_ENCRYPT)
1898 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1899 if (opt & L2CAP_LM_SECURE)
1900 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1902 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1903 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1904 break;
1906 default:
1907 err = -ENOPROTOOPT;
1908 break;
1911 release_sock(sk);
1912 return err;
1915 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1917 struct sock *sk = sock->sk;
1918 struct bt_security sec;
1919 int len, err = 0;
1920 u32 opt;
1922 BT_DBG("sk %p", sk);
1924 if (level == SOL_L2CAP)
1925 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1927 if (level != SOL_BLUETOOTH)
1928 return -ENOPROTOOPT;
1930 lock_sock(sk);
1932 switch (optname) {
1933 case BT_SECURITY:
1934 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
1935 && sk->sk_type != SOCK_RAW) {
1936 err = -EINVAL;
1937 break;
1940 sec.level = BT_SECURITY_LOW;
1942 len = min_t(unsigned int, sizeof(sec), optlen);
1943 if (copy_from_user((char *) &sec, optval, len)) {
1944 err = -EFAULT;
1945 break;
1948 if (sec.level < BT_SECURITY_LOW ||
1949 sec.level > BT_SECURITY_HIGH) {
1950 err = -EINVAL;
1951 break;
1954 l2cap_pi(sk)->sec_level = sec.level;
1955 break;
1957 case BT_DEFER_SETUP:
1958 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1959 err = -EINVAL;
1960 break;
1963 if (get_user(opt, (u32 __user *) optval)) {
1964 err = -EFAULT;
1965 break;
1968 bt_sk(sk)->defer_setup = opt;
1969 break;
1971 default:
1972 err = -ENOPROTOOPT;
1973 break;
1976 release_sock(sk);
1977 return err;
1980 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1982 struct sock *sk = sock->sk;
1983 struct l2cap_options opts;
1984 struct l2cap_conninfo cinfo;
1985 int len, err = 0;
1986 u32 opt;
1988 BT_DBG("sk %p", sk);
1990 if (get_user(len, optlen))
1991 return -EFAULT;
1993 lock_sock(sk);
1995 switch (optname) {
1996 case L2CAP_OPTIONS:
1997 opts.imtu = l2cap_pi(sk)->imtu;
1998 opts.omtu = l2cap_pi(sk)->omtu;
1999 opts.flush_to = l2cap_pi(sk)->flush_to;
2000 opts.mode = l2cap_pi(sk)->mode;
2001 opts.fcs = l2cap_pi(sk)->fcs;
2002 opts.max_tx = l2cap_pi(sk)->max_tx;
2003 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2005 len = min_t(unsigned int, len, sizeof(opts));
2006 if (copy_to_user(optval, (char *) &opts, len))
2007 err = -EFAULT;
2009 break;
2011 case L2CAP_LM:
2012 switch (l2cap_pi(sk)->sec_level) {
2013 case BT_SECURITY_LOW:
2014 opt = L2CAP_LM_AUTH;
2015 break;
2016 case BT_SECURITY_MEDIUM:
2017 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2018 break;
2019 case BT_SECURITY_HIGH:
2020 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2021 L2CAP_LM_SECURE;
2022 break;
2023 default:
2024 opt = 0;
2025 break;
2028 if (l2cap_pi(sk)->role_switch)
2029 opt |= L2CAP_LM_MASTER;
2031 if (l2cap_pi(sk)->force_reliable)
2032 opt |= L2CAP_LM_RELIABLE;
2034 if (put_user(opt, (u32 __user *) optval))
2035 err = -EFAULT;
2036 break;
2038 case L2CAP_CONNINFO:
2039 if (sk->sk_state != BT_CONNECTED &&
2040 !(sk->sk_state == BT_CONNECT2 &&
2041 bt_sk(sk)->defer_setup)) {
2042 err = -ENOTCONN;
2043 break;
2046 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2047 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2049 len = min_t(unsigned int, len, sizeof(cinfo));
2050 if (copy_to_user(optval, (char *) &cinfo, len))
2051 err = -EFAULT;
2053 break;
2055 default:
2056 err = -ENOPROTOOPT;
2057 break;
2060 release_sock(sk);
2061 return err;
2064 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2066 struct sock *sk = sock->sk;
2067 struct bt_security sec;
2068 int len, err = 0;
2070 BT_DBG("sk %p", sk);
2072 if (level == SOL_L2CAP)
2073 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2075 if (level != SOL_BLUETOOTH)
2076 return -ENOPROTOOPT;
2078 if (get_user(len, optlen))
2079 return -EFAULT;
2081 lock_sock(sk);
2083 switch (optname) {
2084 case BT_SECURITY:
2085 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2086 && sk->sk_type != SOCK_RAW) {
2087 err = -EINVAL;
2088 break;
2091 sec.level = l2cap_pi(sk)->sec_level;
2093 len = min_t(unsigned int, len, sizeof(sec));
2094 if (copy_to_user(optval, (char *) &sec, len))
2095 err = -EFAULT;
2097 break;
2099 case BT_DEFER_SETUP:
2100 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2101 err = -EINVAL;
2102 break;
2105 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2106 err = -EFAULT;
2108 break;
2110 default:
2111 err = -ENOPROTOOPT;
2112 break;
2115 release_sock(sk);
2116 return err;
2119 static int l2cap_sock_shutdown(struct socket *sock, int how)
2121 struct sock *sk = sock->sk;
2122 int err = 0;
2124 BT_DBG("sock %p, sk %p", sock, sk);
2126 if (!sk)
2127 return 0;
2129 lock_sock(sk);
2130 if (!sk->sk_shutdown) {
2131 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2132 err = __l2cap_wait_ack(sk);
2134 sk->sk_shutdown = SHUTDOWN_MASK;
2135 l2cap_sock_clear_timer(sk);
2136 __l2cap_sock_close(sk, 0);
2138 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2139 err = bt_sock_wait_state(sk, BT_CLOSED,
2140 sk->sk_lingertime);
2142 release_sock(sk);
2143 return err;
2146 static int l2cap_sock_release(struct socket *sock)
2148 struct sock *sk = sock->sk;
2149 int err;
2151 BT_DBG("sock %p, sk %p", sock, sk);
2153 if (!sk)
2154 return 0;
2156 err = l2cap_sock_shutdown(sock, 2);
2158 sock_orphan(sk);
2159 l2cap_sock_kill(sk);
2160 return err;
2163 static void l2cap_chan_ready(struct sock *sk)
2165 struct sock *parent = bt_sk(sk)->parent;
2167 BT_DBG("sk %p, parent %p", sk, parent);
2169 l2cap_pi(sk)->conf_state = 0;
2170 l2cap_sock_clear_timer(sk);
2172 if (!parent) {
2173 /* Outgoing channel.
2174 * Wake up socket sleeping on connect.
2176 sk->sk_state = BT_CONNECTED;
2177 sk->sk_state_change(sk);
2178 } else {
2179 /* Incoming channel.
2180 * Wake up socket sleeping on accept.
2182 parent->sk_data_ready(parent, 0);
2186 /* Copy frame to all raw sockets on that connection */
2187 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2189 struct l2cap_chan_list *l = &conn->chan_list;
2190 struct sk_buff *nskb;
2191 struct sock *sk;
2193 BT_DBG("conn %p", conn);
2195 read_lock(&l->lock);
2196 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2197 if (sk->sk_type != SOCK_RAW)
2198 continue;
2200 /* Don't send frame to the socket it came from */
2201 if (skb->sk == sk)
2202 continue;
2203 nskb = skb_clone(skb, GFP_ATOMIC);
2204 if (!nskb)
2205 continue;
2207 if (sock_queue_rcv_skb(sk, nskb))
2208 kfree_skb(nskb);
2210 read_unlock(&l->lock);
2213 /* ---- L2CAP signalling commands ---- */
2214 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2215 u8 code, u8 ident, u16 dlen, void *data)
2217 struct sk_buff *skb, **frag;
2218 struct l2cap_cmd_hdr *cmd;
2219 struct l2cap_hdr *lh;
2220 int len, count;
2222 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2223 conn, code, ident, dlen);
2225 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2226 count = min_t(unsigned int, conn->mtu, len);
2228 skb = bt_skb_alloc(count, GFP_ATOMIC);
2229 if (!skb)
2230 return NULL;
2232 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2233 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2234 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2236 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2237 cmd->code = code;
2238 cmd->ident = ident;
2239 cmd->len = cpu_to_le16(dlen);
2241 if (dlen) {
2242 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2243 memcpy(skb_put(skb, count), data, count);
2244 data += count;
2247 len -= skb->len;
2249 /* Continuation fragments (no L2CAP header) */
2250 frag = &skb_shinfo(skb)->frag_list;
2251 while (len) {
2252 count = min_t(unsigned int, conn->mtu, len);
2254 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2255 if (!*frag)
2256 goto fail;
2258 memcpy(skb_put(*frag, count), data, count);
2260 len -= count;
2261 data += count;
2263 frag = &(*frag)->next;
2266 return skb;
2268 fail:
2269 kfree_skb(skb);
2270 return NULL;
2273 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2275 struct l2cap_conf_opt *opt = *ptr;
2276 int len;
2278 len = L2CAP_CONF_OPT_SIZE + opt->len;
2279 *ptr += len;
2281 *type = opt->type;
2282 *olen = opt->len;
2284 switch (opt->len) {
2285 case 1:
2286 *val = *((u8 *) opt->val);
2287 break;
2289 case 2:
2290 *val = __le16_to_cpu(*((__le16 *) opt->val));
2291 break;
2293 case 4:
2294 *val = __le32_to_cpu(*((__le32 *) opt->val));
2295 break;
2297 default:
2298 *val = (unsigned long) opt->val;
2299 break;
2302 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2303 return len;
2306 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2308 struct l2cap_conf_opt *opt = *ptr;
2310 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2312 opt->type = type;
2313 opt->len = len;
2315 switch (len) {
2316 case 1:
2317 *((u8 *) opt->val) = val;
2318 break;
2320 case 2:
2321 *((__le16 *) opt->val) = cpu_to_le16(val);
2322 break;
2324 case 4:
2325 *((__le32 *) opt->val) = cpu_to_le32(val);
2326 break;
2328 default:
2329 memcpy(opt->val, (void *) val, len);
2330 break;
2333 *ptr += L2CAP_CONF_OPT_SIZE + len;
2336 static void l2cap_ack_timeout(unsigned long arg)
2338 struct sock *sk = (void *) arg;
2340 bh_lock_sock(sk);
2341 l2cap_send_ack(l2cap_pi(sk));
2342 bh_unlock_sock(sk);
2345 static inline void l2cap_ertm_init(struct sock *sk)
2347 l2cap_pi(sk)->expected_ack_seq = 0;
2348 l2cap_pi(sk)->unacked_frames = 0;
2349 l2cap_pi(sk)->buffer_seq = 0;
2350 l2cap_pi(sk)->num_acked = 0;
2351 l2cap_pi(sk)->frames_sent = 0;
2353 setup_timer(&l2cap_pi(sk)->retrans_timer,
2354 l2cap_retrans_timeout, (unsigned long) sk);
2355 setup_timer(&l2cap_pi(sk)->monitor_timer,
2356 l2cap_monitor_timeout, (unsigned long) sk);
2357 setup_timer(&l2cap_pi(sk)->ack_timer,
2358 l2cap_ack_timeout, (unsigned long) sk);
2360 __skb_queue_head_init(SREJ_QUEUE(sk));
2361 __skb_queue_head_init(BUSY_QUEUE(sk));
2362 spin_lock_init(&l2cap_pi(sk)->send_lock);
2364 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2367 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2369 u32 local_feat_mask = l2cap_feat_mask;
2370 if (enable_ertm)
2371 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2373 switch (mode) {
2374 case L2CAP_MODE_ERTM:
2375 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2376 case L2CAP_MODE_STREAMING:
2377 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2378 default:
2379 return 0x00;
2383 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2385 switch (mode) {
2386 case L2CAP_MODE_STREAMING:
2387 case L2CAP_MODE_ERTM:
2388 if (l2cap_mode_supported(mode, remote_feat_mask))
2389 return mode;
2390 /* fall through */
2391 default:
2392 return L2CAP_MODE_BASIC;
2396 static int l2cap_build_conf_req(struct sock *sk, void *data)
2398 struct l2cap_pinfo *pi = l2cap_pi(sk);
2399 struct l2cap_conf_req *req = data;
2400 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2401 void *ptr = req->data;
2403 BT_DBG("sk %p", sk);
2405 if (pi->num_conf_req || pi->num_conf_rsp)
2406 goto done;
2408 switch (pi->mode) {
2409 case L2CAP_MODE_STREAMING:
2410 case L2CAP_MODE_ERTM:
2411 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2412 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2413 l2cap_send_disconn_req(pi->conn, sk);
2414 break;
2415 default:
2416 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2417 break;
2420 done:
2421 switch (pi->mode) {
2422 case L2CAP_MODE_BASIC:
2423 if (pi->imtu != L2CAP_DEFAULT_MTU)
2424 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2425 break;
2427 case L2CAP_MODE_ERTM:
2428 rfc.mode = L2CAP_MODE_ERTM;
2429 rfc.txwin_size = pi->tx_win;
2430 rfc.max_transmit = pi->max_tx;
2431 rfc.retrans_timeout = 0;
2432 rfc.monitor_timeout = 0;
2433 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2434 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2435 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2437 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2438 sizeof(rfc), (unsigned long) &rfc);
2440 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2441 break;
2443 if (pi->fcs == L2CAP_FCS_NONE ||
2444 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2445 pi->fcs = L2CAP_FCS_NONE;
2446 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2448 break;
2450 case L2CAP_MODE_STREAMING:
2451 rfc.mode = L2CAP_MODE_STREAMING;
2452 rfc.txwin_size = 0;
2453 rfc.max_transmit = 0;
2454 rfc.retrans_timeout = 0;
2455 rfc.monitor_timeout = 0;
2456 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2457 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2458 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2460 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2461 sizeof(rfc), (unsigned long) &rfc);
2463 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2464 break;
2466 if (pi->fcs == L2CAP_FCS_NONE ||
2467 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2468 pi->fcs = L2CAP_FCS_NONE;
2469 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2471 break;
2474 /* FIXME: Need actual value of the flush timeout */
2475 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2476 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2478 req->dcid = cpu_to_le16(pi->dcid);
2479 req->flags = cpu_to_le16(0);
2481 return ptr - data;
2484 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2486 struct l2cap_pinfo *pi = l2cap_pi(sk);
2487 struct l2cap_conf_rsp *rsp = data;
2488 void *ptr = rsp->data;
2489 void *req = pi->conf_req;
2490 int len = pi->conf_len;
2491 int type, hint, olen;
2492 unsigned long val;
2493 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2494 u16 mtu = L2CAP_DEFAULT_MTU;
2495 u16 result = L2CAP_CONF_SUCCESS;
2497 BT_DBG("sk %p", sk);
2499 while (len >= L2CAP_CONF_OPT_SIZE) {
2500 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2502 hint = type & L2CAP_CONF_HINT;
2503 type &= L2CAP_CONF_MASK;
2505 switch (type) {
2506 case L2CAP_CONF_MTU:
2507 mtu = val;
2508 break;
2510 case L2CAP_CONF_FLUSH_TO:
2511 pi->flush_to = val;
2512 break;
2514 case L2CAP_CONF_QOS:
2515 break;
2517 case L2CAP_CONF_RFC:
2518 if (olen == sizeof(rfc))
2519 memcpy(&rfc, (void *) val, olen);
2520 break;
2522 case L2CAP_CONF_FCS:
2523 if (val == L2CAP_FCS_NONE)
2524 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2526 break;
2528 default:
2529 if (hint)
2530 break;
2532 result = L2CAP_CONF_UNKNOWN;
2533 *((u8 *) ptr++) = type;
2534 break;
2538 if (pi->num_conf_rsp || pi->num_conf_req)
2539 goto done;
2541 switch (pi->mode) {
2542 case L2CAP_MODE_STREAMING:
2543 case L2CAP_MODE_ERTM:
2544 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2545 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2546 return -ECONNREFUSED;
2547 break;
2548 default:
2549 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2550 break;
2553 done:
2554 if (pi->mode != rfc.mode) {
2555 result = L2CAP_CONF_UNACCEPT;
2556 rfc.mode = pi->mode;
2558 if (pi->num_conf_rsp == 1)
2559 return -ECONNREFUSED;
2561 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2562 sizeof(rfc), (unsigned long) &rfc);
2566 if (result == L2CAP_CONF_SUCCESS) {
2567 /* Configure output options and let the other side know
2568 * which ones we don't like. */
2570 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2571 result = L2CAP_CONF_UNACCEPT;
2572 else {
2573 pi->omtu = mtu;
2574 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2576 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2578 switch (rfc.mode) {
2579 case L2CAP_MODE_BASIC:
2580 pi->fcs = L2CAP_FCS_NONE;
2581 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2582 break;
2584 case L2CAP_MODE_ERTM:
2585 pi->remote_tx_win = rfc.txwin_size;
2586 pi->remote_max_tx = rfc.max_transmit;
2587 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2588 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2590 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2592 rfc.retrans_timeout =
2593 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2594 rfc.monitor_timeout =
2595 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2597 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2599 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2600 sizeof(rfc), (unsigned long) &rfc);
2602 break;
2604 case L2CAP_MODE_STREAMING:
2605 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2606 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2608 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2610 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2612 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2613 sizeof(rfc), (unsigned long) &rfc);
2615 break;
2617 default:
2618 result = L2CAP_CONF_UNACCEPT;
2620 memset(&rfc, 0, sizeof(rfc));
2621 rfc.mode = pi->mode;
2624 if (result == L2CAP_CONF_SUCCESS)
2625 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2627 rsp->scid = cpu_to_le16(pi->dcid);
2628 rsp->result = cpu_to_le16(result);
2629 rsp->flags = cpu_to_le16(0x0000);
2631 return ptr - data;
2634 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2636 struct l2cap_pinfo *pi = l2cap_pi(sk);
2637 struct l2cap_conf_req *req = data;
2638 void *ptr = req->data;
2639 int type, olen;
2640 unsigned long val;
2641 struct l2cap_conf_rfc rfc;
2643 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2645 while (len >= L2CAP_CONF_OPT_SIZE) {
2646 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2648 switch (type) {
2649 case L2CAP_CONF_MTU:
2650 if (val < L2CAP_DEFAULT_MIN_MTU) {
2651 *result = L2CAP_CONF_UNACCEPT;
2652 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2653 } else
2654 pi->omtu = val;
2655 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2656 break;
2658 case L2CAP_CONF_FLUSH_TO:
2659 pi->flush_to = val;
2660 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2661 2, pi->flush_to);
2662 break;
2664 case L2CAP_CONF_RFC:
2665 if (olen == sizeof(rfc))
2666 memcpy(&rfc, (void *)val, olen);
2668 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2669 rfc.mode != pi->mode)
2670 return -ECONNREFUSED;
2672 pi->mode = rfc.mode;
2673 pi->fcs = 0;
2675 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2676 sizeof(rfc), (unsigned long) &rfc);
2677 break;
2681 if (*result == L2CAP_CONF_SUCCESS) {
2682 switch (rfc.mode) {
2683 case L2CAP_MODE_ERTM:
2684 pi->remote_tx_win = rfc.txwin_size;
2685 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2686 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2687 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2688 break;
2689 case L2CAP_MODE_STREAMING:
2690 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2694 req->dcid = cpu_to_le16(pi->dcid);
2695 req->flags = cpu_to_le16(0x0000);
2697 return ptr - data;
2700 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2702 struct l2cap_conf_rsp *rsp = data;
2703 void *ptr = rsp->data;
2705 BT_DBG("sk %p", sk);
2707 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2708 rsp->result = cpu_to_le16(result);
2709 rsp->flags = cpu_to_le16(flags);
2711 return ptr - data;
2714 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2716 struct l2cap_pinfo *pi = l2cap_pi(sk);
2717 int type, olen;
2718 unsigned long val;
2719 struct l2cap_conf_rfc rfc;
2721 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2723 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2724 return;
2726 while (len >= L2CAP_CONF_OPT_SIZE) {
2727 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2729 switch (type) {
2730 case L2CAP_CONF_RFC:
2731 if (olen == sizeof(rfc))
2732 memcpy(&rfc, (void *)val, olen);
2733 goto done;
2737 done:
2738 switch (rfc.mode) {
2739 case L2CAP_MODE_ERTM:
2740 pi->remote_tx_win = rfc.txwin_size;
2741 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2742 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2743 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2744 break;
2745 case L2CAP_MODE_STREAMING:
2746 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2750 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2752 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2754 if (rej->reason != 0x0000)
2755 return 0;
2757 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2758 cmd->ident == conn->info_ident) {
2759 del_timer(&conn->info_timer);
2761 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2762 conn->info_ident = 0;
2764 l2cap_conn_start(conn);
2767 return 0;
2770 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2772 struct l2cap_chan_list *list = &conn->chan_list;
2773 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2774 struct l2cap_conn_rsp rsp;
2775 struct sock *sk, *parent;
2776 int result, status = L2CAP_CS_NO_INFO;
2778 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2779 __le16 psm = req->psm;
2781 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2783 /* Check if we have socket listening on psm */
2784 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2785 if (!parent) {
2786 result = L2CAP_CR_BAD_PSM;
2787 goto sendresp;
2790 /* Check if the ACL is secure enough (if not SDP) */
2791 if (psm != cpu_to_le16(0x0001) &&
2792 !hci_conn_check_link_mode(conn->hcon)) {
2793 conn->disc_reason = 0x05;
2794 result = L2CAP_CR_SEC_BLOCK;
2795 goto response;
2798 result = L2CAP_CR_NO_MEM;
2800 /* Check for backlog size */
2801 if (sk_acceptq_is_full(parent)) {
2802 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2803 goto response;
2806 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2807 if (!sk)
2808 goto response;
2810 write_lock_bh(&list->lock);
2812 /* Check if we already have channel with that dcid */
2813 if (__l2cap_get_chan_by_dcid(list, scid)) {
2814 write_unlock_bh(&list->lock);
2815 sock_set_flag(sk, SOCK_ZAPPED);
2816 l2cap_sock_kill(sk);
2817 goto response;
2820 hci_conn_hold(conn->hcon);
2822 l2cap_sock_init(sk, parent);
2823 bacpy(&bt_sk(sk)->src, conn->src);
2824 bacpy(&bt_sk(sk)->dst, conn->dst);
2825 l2cap_pi(sk)->psm = psm;
2826 l2cap_pi(sk)->dcid = scid;
2828 __l2cap_chan_add(conn, sk, parent);
2829 dcid = l2cap_pi(sk)->scid;
2831 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2833 l2cap_pi(sk)->ident = cmd->ident;
2835 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2836 if (l2cap_check_security(sk)) {
2837 if (bt_sk(sk)->defer_setup) {
2838 sk->sk_state = BT_CONNECT2;
2839 result = L2CAP_CR_PEND;
2840 status = L2CAP_CS_AUTHOR_PEND;
2841 parent->sk_data_ready(parent, 0);
2842 } else {
2843 sk->sk_state = BT_CONFIG;
2844 result = L2CAP_CR_SUCCESS;
2845 status = L2CAP_CS_NO_INFO;
2847 } else {
2848 sk->sk_state = BT_CONNECT2;
2849 result = L2CAP_CR_PEND;
2850 status = L2CAP_CS_AUTHEN_PEND;
2852 } else {
2853 sk->sk_state = BT_CONNECT2;
2854 result = L2CAP_CR_PEND;
2855 status = L2CAP_CS_NO_INFO;
2858 write_unlock_bh(&list->lock);
2860 response:
2861 bh_unlock_sock(parent);
2863 sendresp:
2864 rsp.scid = cpu_to_le16(scid);
2865 rsp.dcid = cpu_to_le16(dcid);
2866 rsp.result = cpu_to_le16(result);
2867 rsp.status = cpu_to_le16(status);
2868 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2870 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2871 struct l2cap_info_req info;
2872 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2874 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2875 conn->info_ident = l2cap_get_ident(conn);
2877 mod_timer(&conn->info_timer, jiffies +
2878 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2880 l2cap_send_cmd(conn, conn->info_ident,
2881 L2CAP_INFO_REQ, sizeof(info), &info);
2884 return 0;
2887 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2889 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2890 u16 scid, dcid, result, status;
2891 struct sock *sk;
2892 u8 req[128];
2894 scid = __le16_to_cpu(rsp->scid);
2895 dcid = __le16_to_cpu(rsp->dcid);
2896 result = __le16_to_cpu(rsp->result);
2897 status = __le16_to_cpu(rsp->status);
2899 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2901 if (scid) {
2902 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2903 if (!sk)
2904 return 0;
2905 } else {
2906 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2907 if (!sk)
2908 return 0;
2911 switch (result) {
2912 case L2CAP_CR_SUCCESS:
2913 sk->sk_state = BT_CONFIG;
2914 l2cap_pi(sk)->ident = 0;
2915 l2cap_pi(sk)->dcid = dcid;
2916 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2917 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2919 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2920 l2cap_build_conf_req(sk, req), req);
2921 l2cap_pi(sk)->num_conf_req++;
2922 break;
2924 case L2CAP_CR_PEND:
2925 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2926 break;
2928 default:
2929 l2cap_chan_del(sk, ECONNREFUSED);
2930 break;
2933 bh_unlock_sock(sk);
2934 return 0;
2937 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2939 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2940 u16 dcid, flags;
2941 u8 rsp[64];
2942 struct sock *sk;
2943 int len;
2945 dcid = __le16_to_cpu(req->dcid);
2946 flags = __le16_to_cpu(req->flags);
2948 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2950 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2951 if (!sk)
2952 return -ENOENT;
2954 if (sk->sk_state == BT_DISCONN)
2955 goto unlock;
2957 /* Reject if config buffer is too small. */
2958 len = cmd_len - sizeof(*req);
2959 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2960 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2961 l2cap_build_conf_rsp(sk, rsp,
2962 L2CAP_CONF_REJECT, flags), rsp);
2963 goto unlock;
2966 /* Store config. */
2967 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2968 l2cap_pi(sk)->conf_len += len;
2970 if (flags & 0x0001) {
2971 /* Incomplete config. Send empty response. */
2972 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2973 l2cap_build_conf_rsp(sk, rsp,
2974 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2975 goto unlock;
2978 /* Complete config. */
2979 len = l2cap_parse_conf_req(sk, rsp);
2980 if (len < 0) {
2981 l2cap_send_disconn_req(conn, sk);
2982 goto unlock;
2985 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2986 l2cap_pi(sk)->num_conf_rsp++;
2988 /* Reset config buffer. */
2989 l2cap_pi(sk)->conf_len = 0;
2991 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2992 goto unlock;
2994 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2995 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2996 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2997 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2999 sk->sk_state = BT_CONNECTED;
3001 l2cap_pi(sk)->next_tx_seq = 0;
3002 l2cap_pi(sk)->expected_tx_seq = 0;
3003 __skb_queue_head_init(TX_QUEUE(sk));
3004 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3005 l2cap_ertm_init(sk);
3007 l2cap_chan_ready(sk);
3008 goto unlock;
3011 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3012 u8 buf[64];
3013 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3014 l2cap_build_conf_req(sk, buf), buf);
3015 l2cap_pi(sk)->num_conf_req++;
3018 unlock:
3019 bh_unlock_sock(sk);
3020 return 0;
3023 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3025 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3026 u16 scid, flags, result;
3027 struct sock *sk;
3028 int len = cmd->len - sizeof(*rsp);
3030 scid = __le16_to_cpu(rsp->scid);
3031 flags = __le16_to_cpu(rsp->flags);
3032 result = __le16_to_cpu(rsp->result);
3034 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3035 scid, flags, result);
3037 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3038 if (!sk)
3039 return 0;
3041 switch (result) {
3042 case L2CAP_CONF_SUCCESS:
3043 l2cap_conf_rfc_get(sk, rsp->data, len);
3044 break;
3046 case L2CAP_CONF_UNACCEPT:
3047 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3048 char req[64];
3050 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3051 l2cap_send_disconn_req(conn, sk);
3052 goto done;
3055 /* throw out any old stored conf requests */
3056 result = L2CAP_CONF_SUCCESS;
3057 len = l2cap_parse_conf_rsp(sk, rsp->data,
3058 len, req, &result);
3059 if (len < 0) {
3060 l2cap_send_disconn_req(conn, sk);
3061 goto done;
3064 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3065 L2CAP_CONF_REQ, len, req);
3066 l2cap_pi(sk)->num_conf_req++;
3067 if (result != L2CAP_CONF_SUCCESS)
3068 goto done;
3069 break;
3072 default:
3073 sk->sk_state = BT_DISCONN;
3074 sk->sk_err = ECONNRESET;
3075 l2cap_sock_set_timer(sk, HZ * 5);
3076 l2cap_send_disconn_req(conn, sk);
3077 goto done;
3080 if (flags & 0x01)
3081 goto done;
3083 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3085 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3086 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3087 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3088 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3090 sk->sk_state = BT_CONNECTED;
3091 l2cap_pi(sk)->next_tx_seq = 0;
3092 l2cap_pi(sk)->expected_tx_seq = 0;
3093 __skb_queue_head_init(TX_QUEUE(sk));
3094 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3095 l2cap_ertm_init(sk);
3097 l2cap_chan_ready(sk);
3100 done:
3101 bh_unlock_sock(sk);
3102 return 0;
3105 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3107 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3108 struct l2cap_disconn_rsp rsp;
3109 u16 dcid, scid;
3110 struct sock *sk;
3112 scid = __le16_to_cpu(req->scid);
3113 dcid = __le16_to_cpu(req->dcid);
3115 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3117 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3118 if (!sk)
3119 return 0;
3121 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3122 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3123 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3125 sk->sk_shutdown = SHUTDOWN_MASK;
3127 skb_queue_purge(TX_QUEUE(sk));
3129 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3130 skb_queue_purge(SREJ_QUEUE(sk));
3131 skb_queue_purge(BUSY_QUEUE(sk));
3132 del_timer(&l2cap_pi(sk)->retrans_timer);
3133 del_timer(&l2cap_pi(sk)->monitor_timer);
3134 del_timer(&l2cap_pi(sk)->ack_timer);
3137 l2cap_chan_del(sk, ECONNRESET);
3138 bh_unlock_sock(sk);
3140 l2cap_sock_kill(sk);
3141 return 0;
3144 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3146 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3147 u16 dcid, scid;
3148 struct sock *sk;
3150 scid = __le16_to_cpu(rsp->scid);
3151 dcid = __le16_to_cpu(rsp->dcid);
3153 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3155 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3156 if (!sk)
3157 return 0;
3159 skb_queue_purge(TX_QUEUE(sk));
3161 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3162 skb_queue_purge(SREJ_QUEUE(sk));
3163 skb_queue_purge(BUSY_QUEUE(sk));
3164 del_timer(&l2cap_pi(sk)->retrans_timer);
3165 del_timer(&l2cap_pi(sk)->monitor_timer);
3166 del_timer(&l2cap_pi(sk)->ack_timer);
3169 l2cap_chan_del(sk, 0);
3170 bh_unlock_sock(sk);
3172 l2cap_sock_kill(sk);
3173 return 0;
3176 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3178 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3179 u16 type;
3181 type = __le16_to_cpu(req->type);
3183 BT_DBG("type 0x%4.4x", type);
3185 if (type == L2CAP_IT_FEAT_MASK) {
3186 u8 buf[8];
3187 u32 feat_mask = l2cap_feat_mask;
3188 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3189 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3190 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3191 if (enable_ertm)
3192 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3193 | L2CAP_FEAT_FCS;
3194 put_unaligned_le32(feat_mask, rsp->data);
3195 l2cap_send_cmd(conn, cmd->ident,
3196 L2CAP_INFO_RSP, sizeof(buf), buf);
3197 } else if (type == L2CAP_IT_FIXED_CHAN) {
3198 u8 buf[12];
3199 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3200 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3201 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3202 memcpy(buf + 4, l2cap_fixed_chan, 8);
3203 l2cap_send_cmd(conn, cmd->ident,
3204 L2CAP_INFO_RSP, sizeof(buf), buf);
3205 } else {
3206 struct l2cap_info_rsp rsp;
3207 rsp.type = cpu_to_le16(type);
3208 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3209 l2cap_send_cmd(conn, cmd->ident,
3210 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3213 return 0;
3216 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3218 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3219 u16 type, result;
3221 type = __le16_to_cpu(rsp->type);
3222 result = __le16_to_cpu(rsp->result);
3224 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3226 del_timer(&conn->info_timer);
3228 if (type == L2CAP_IT_FEAT_MASK) {
3229 conn->feat_mask = get_unaligned_le32(rsp->data);
3231 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3232 struct l2cap_info_req req;
3233 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3235 conn->info_ident = l2cap_get_ident(conn);
3237 l2cap_send_cmd(conn, conn->info_ident,
3238 L2CAP_INFO_REQ, sizeof(req), &req);
3239 } else {
3240 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3241 conn->info_ident = 0;
3243 l2cap_conn_start(conn);
3245 } else if (type == L2CAP_IT_FIXED_CHAN) {
3246 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3247 conn->info_ident = 0;
3249 l2cap_conn_start(conn);
3252 return 0;
3255 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3257 u8 *data = skb->data;
3258 int len = skb->len;
3259 struct l2cap_cmd_hdr cmd;
3260 int err = 0;
3262 l2cap_raw_recv(conn, skb);
3264 while (len >= L2CAP_CMD_HDR_SIZE) {
3265 u16 cmd_len;
3266 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3267 data += L2CAP_CMD_HDR_SIZE;
3268 len -= L2CAP_CMD_HDR_SIZE;
3270 cmd_len = le16_to_cpu(cmd.len);
3272 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3274 if (cmd_len > len || !cmd.ident) {
3275 BT_DBG("corrupted command");
3276 break;
3279 switch (cmd.code) {
3280 case L2CAP_COMMAND_REJ:
3281 l2cap_command_rej(conn, &cmd, data);
3282 break;
3284 case L2CAP_CONN_REQ:
3285 err = l2cap_connect_req(conn, &cmd, data);
3286 break;
3288 case L2CAP_CONN_RSP:
3289 err = l2cap_connect_rsp(conn, &cmd, data);
3290 break;
3292 case L2CAP_CONF_REQ:
3293 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3294 break;
3296 case L2CAP_CONF_RSP:
3297 err = l2cap_config_rsp(conn, &cmd, data);
3298 break;
3300 case L2CAP_DISCONN_REQ:
3301 err = l2cap_disconnect_req(conn, &cmd, data);
3302 break;
3304 case L2CAP_DISCONN_RSP:
3305 err = l2cap_disconnect_rsp(conn, &cmd, data);
3306 break;
3308 case L2CAP_ECHO_REQ:
3309 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3310 break;
3312 case L2CAP_ECHO_RSP:
3313 break;
3315 case L2CAP_INFO_REQ:
3316 err = l2cap_information_req(conn, &cmd, data);
3317 break;
3319 case L2CAP_INFO_RSP:
3320 err = l2cap_information_rsp(conn, &cmd, data);
3321 break;
3323 default:
3324 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3325 err = -EINVAL;
3326 break;
3329 if (err) {
3330 struct l2cap_cmd_rej rej;
3331 BT_DBG("error %d", err);
3333 /* FIXME: Map err to a valid reason */
3334 rej.reason = cpu_to_le16(0);
3335 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3338 data += cmd_len;
3339 len -= cmd_len;
3342 kfree_skb(skb);
3345 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3347 u16 our_fcs, rcv_fcs;
3348 int hdr_size = L2CAP_HDR_SIZE + 2;
3350 if (pi->fcs == L2CAP_FCS_CRC16) {
3351 skb_trim(skb, skb->len - 2);
3352 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3353 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3355 if (our_fcs != rcv_fcs)
3356 return -EINVAL;
3358 return 0;
3361 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3363 struct l2cap_pinfo *pi = l2cap_pi(sk);
3364 u16 control = 0;
3366 pi->frames_sent = 0;
3367 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3369 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3371 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3372 control |= L2CAP_SUPER_RCV_NOT_READY | L2CAP_CTRL_FINAL;
3373 l2cap_send_sframe(pi, control);
3374 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3375 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
3378 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY && pi->unacked_frames > 0)
3379 __mod_retrans_timer();
3381 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3383 spin_lock_bh(&pi->send_lock);
3384 l2cap_ertm_send(sk);
3385 spin_unlock_bh(&pi->send_lock);
3387 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3388 pi->frames_sent == 0) {
3389 control |= L2CAP_SUPER_RCV_READY;
3390 l2cap_send_sframe(pi, control);
3394 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3396 struct sk_buff *next_skb;
3398 bt_cb(skb)->tx_seq = tx_seq;
3399 bt_cb(skb)->sar = sar;
3401 next_skb = skb_peek(SREJ_QUEUE(sk));
3402 if (!next_skb) {
3403 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3404 return 0;
3407 do {
3408 if (bt_cb(next_skb)->tx_seq == tx_seq)
3409 return -EINVAL;
3411 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3412 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3413 return 0;
3416 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3417 break;
3419 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3421 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3423 return 0;
3426 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3428 struct l2cap_pinfo *pi = l2cap_pi(sk);
3429 struct sk_buff *_skb;
3430 int err;
3432 switch (control & L2CAP_CTRL_SAR) {
3433 case L2CAP_SDU_UNSEGMENTED:
3434 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3435 goto drop;
3437 err = sock_queue_rcv_skb(sk, skb);
3438 if (!err)
3439 return err;
3441 break;
3443 case L2CAP_SDU_START:
3444 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3445 goto drop;
3447 pi->sdu_len = get_unaligned_le16(skb->data);
3449 if (pi->sdu_len > pi->imtu)
3450 goto disconnect;
3452 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3453 if (!pi->sdu)
3454 return -ENOMEM;
3456 /* pull sdu_len bytes only after alloc, because of Local Busy
3457 * condition we have to be sure that this will be executed
3458 * only once, i.e., when alloc does not fail */
3459 skb_pull(skb, 2);
3461 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3463 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3464 pi->partial_sdu_len = skb->len;
3465 break;
3467 case L2CAP_SDU_CONTINUE:
3468 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3469 goto disconnect;
3471 if (!pi->sdu)
3472 goto disconnect;
3474 pi->partial_sdu_len += skb->len;
3475 if (pi->partial_sdu_len > pi->sdu_len)
3476 goto drop;
3478 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3480 break;
3482 case L2CAP_SDU_END:
3483 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3484 goto disconnect;
3486 if (!pi->sdu)
3487 goto disconnect;
3489 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3490 pi->partial_sdu_len += skb->len;
3492 if (pi->partial_sdu_len > pi->imtu)
3493 goto drop;
3495 if (pi->partial_sdu_len != pi->sdu_len)
3496 goto drop;
3498 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3501 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3502 if (!_skb) {
3503 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3504 return -ENOMEM;
3507 err = sock_queue_rcv_skb(sk, _skb);
3508 if (err < 0) {
3509 kfree_skb(_skb);
3510 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3511 return err;
3514 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3515 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3517 kfree_skb(pi->sdu);
3518 break;
3521 kfree_skb(skb);
3522 return 0;
3524 drop:
3525 kfree_skb(pi->sdu);
3526 pi->sdu = NULL;
3528 disconnect:
3529 l2cap_send_disconn_req(pi->conn, sk);
3530 kfree_skb(skb);
3531 return 0;
3534 static void l2cap_busy_work(struct work_struct *work)
3536 DECLARE_WAITQUEUE(wait, current);
3537 struct l2cap_pinfo *pi =
3538 container_of(work, struct l2cap_pinfo, busy_work);
3539 struct sock *sk = (struct sock *)pi;
3540 int n_tries = 0, timeo = HZ/5, err;
3541 struct sk_buff *skb;
3542 u16 control;
3544 lock_sock(sk);
3546 add_wait_queue(sk_sleep(sk), &wait);
3547 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3548 set_current_state(TASK_INTERRUPTIBLE);
3550 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3551 err = -EBUSY;
3552 l2cap_send_disconn_req(pi->conn, sk);
3553 goto done;
3556 if (!timeo)
3557 timeo = HZ/5;
3559 if (signal_pending(current)) {
3560 err = sock_intr_errno(timeo);
3561 goto done;
3564 release_sock(sk);
3565 timeo = schedule_timeout(timeo);
3566 lock_sock(sk);
3568 err = sock_error(sk);
3569 if (err)
3570 goto done;
3572 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3573 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3574 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3575 if (err < 0) {
3576 skb_queue_head(BUSY_QUEUE(sk), skb);
3577 break;
3580 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3583 if (!skb)
3584 break;
3587 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3588 goto done;
3590 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3591 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3592 l2cap_send_sframe(pi, control);
3593 l2cap_pi(sk)->retry_count = 1;
3595 del_timer(&pi->retrans_timer);
3596 __mod_monitor_timer();
3598 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3600 done:
3601 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3602 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3604 set_current_state(TASK_RUNNING);
3605 remove_wait_queue(sk_sleep(sk), &wait);
3607 release_sock(sk);
3610 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3612 struct l2cap_pinfo *pi = l2cap_pi(sk);
3613 int sctrl, err;
3615 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3616 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3617 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3618 return -EBUSY;
3621 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3622 if (err >= 0) {
3623 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3624 return err;
3627 /* Busy Condition */
3628 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3629 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3630 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3632 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3633 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3634 l2cap_send_sframe(pi, sctrl);
3636 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3638 queue_work(_busy_wq, &pi->busy_work);
3640 return err;
3643 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3645 struct l2cap_pinfo *pi = l2cap_pi(sk);
3646 struct sk_buff *_skb;
3647 int err = -EINVAL;
3650 * TODO: We have to notify the userland if some data is lost with the
3651 * Streaming Mode.
3654 switch (control & L2CAP_CTRL_SAR) {
3655 case L2CAP_SDU_UNSEGMENTED:
3656 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3657 kfree_skb(pi->sdu);
3658 break;
3661 err = sock_queue_rcv_skb(sk, skb);
3662 if (!err)
3663 return 0;
3665 break;
3667 case L2CAP_SDU_START:
3668 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3669 kfree_skb(pi->sdu);
3670 break;
3673 pi->sdu_len = get_unaligned_le16(skb->data);
3674 skb_pull(skb, 2);
3676 if (pi->sdu_len > pi->imtu) {
3677 err = -EMSGSIZE;
3678 break;
3681 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3682 if (!pi->sdu) {
3683 err = -ENOMEM;
3684 break;
3687 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3689 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3690 pi->partial_sdu_len = skb->len;
3691 err = 0;
3692 break;
3694 case L2CAP_SDU_CONTINUE:
3695 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3696 break;
3698 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3700 pi->partial_sdu_len += skb->len;
3701 if (pi->partial_sdu_len > pi->sdu_len)
3702 kfree_skb(pi->sdu);
3703 else
3704 err = 0;
3706 break;
3708 case L2CAP_SDU_END:
3709 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3710 break;
3712 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3714 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3715 pi->partial_sdu_len += skb->len;
3717 if (pi->partial_sdu_len > pi->imtu)
3718 goto drop;
3720 if (pi->partial_sdu_len == pi->sdu_len) {
3721 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3722 err = sock_queue_rcv_skb(sk, _skb);
3723 if (err < 0)
3724 kfree_skb(_skb);
3726 err = 0;
3728 drop:
3729 kfree_skb(pi->sdu);
3730 break;
3733 kfree_skb(skb);
3734 return err;
3737 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3739 struct sk_buff *skb;
3740 u16 control;
3742 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3743 if (bt_cb(skb)->tx_seq != tx_seq)
3744 break;
3746 skb = skb_dequeue(SREJ_QUEUE(sk));
3747 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3748 l2cap_ertm_reassembly_sdu(sk, skb, control);
3749 l2cap_pi(sk)->buffer_seq_srej =
3750 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3751 tx_seq = (tx_seq + 1) % 64;
3755 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3757 struct l2cap_pinfo *pi = l2cap_pi(sk);
3758 struct srej_list *l, *tmp;
3759 u16 control;
3761 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3762 if (l->tx_seq == tx_seq) {
3763 list_del(&l->list);
3764 kfree(l);
3765 return;
3767 control = L2CAP_SUPER_SELECT_REJECT;
3768 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3769 l2cap_send_sframe(pi, control);
3770 list_del(&l->list);
3771 list_add_tail(&l->list, SREJ_LIST(sk));
3775 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3777 struct l2cap_pinfo *pi = l2cap_pi(sk);
3778 struct srej_list *new;
3779 u16 control;
3781 while (tx_seq != pi->expected_tx_seq) {
3782 control = L2CAP_SUPER_SELECT_REJECT;
3783 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3784 l2cap_send_sframe(pi, control);
3786 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3787 new->tx_seq = pi->expected_tx_seq;
3788 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3789 list_add_tail(&new->list, SREJ_LIST(sk));
3791 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3794 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3796 struct l2cap_pinfo *pi = l2cap_pi(sk);
3797 u8 tx_seq = __get_txseq(rx_control);
3798 u8 req_seq = __get_reqseq(rx_control);
3799 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3800 int tx_seq_offset, expected_tx_seq_offset;
3801 int num_to_ack = (pi->tx_win/6) + 1;
3802 int err = 0;
3804 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3806 if (L2CAP_CTRL_FINAL & rx_control &&
3807 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3808 del_timer(&pi->monitor_timer);
3809 if (pi->unacked_frames > 0)
3810 __mod_retrans_timer();
3811 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3814 pi->expected_ack_seq = req_seq;
3815 l2cap_drop_acked_frames(sk);
3817 if (tx_seq == pi->expected_tx_seq)
3818 goto expected;
3820 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3821 if (tx_seq_offset < 0)
3822 tx_seq_offset += 64;
3824 /* invalid tx_seq */
3825 if (tx_seq_offset >= pi->tx_win) {
3826 l2cap_send_disconn_req(pi->conn, sk);
3827 goto drop;
3830 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3831 goto drop;
3833 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3834 struct srej_list *first;
3836 first = list_first_entry(SREJ_LIST(sk),
3837 struct srej_list, list);
3838 if (tx_seq == first->tx_seq) {
3839 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3840 l2cap_check_srej_gap(sk, tx_seq);
3842 list_del(&first->list);
3843 kfree(first);
3845 if (list_empty(SREJ_LIST(sk))) {
3846 pi->buffer_seq = pi->buffer_seq_srej;
3847 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3848 l2cap_send_ack(pi);
3850 } else {
3851 struct srej_list *l;
3853 /* duplicated tx_seq */
3854 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3855 goto drop;
3857 list_for_each_entry(l, SREJ_LIST(sk), list) {
3858 if (l->tx_seq == tx_seq) {
3859 l2cap_resend_srejframe(sk, tx_seq);
3860 return 0;
3863 l2cap_send_srejframe(sk, tx_seq);
3865 } else {
3866 expected_tx_seq_offset =
3867 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3868 if (expected_tx_seq_offset < 0)
3869 expected_tx_seq_offset += 64;
3871 /* duplicated tx_seq */
3872 if (tx_seq_offset < expected_tx_seq_offset)
3873 goto drop;
3875 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3877 INIT_LIST_HEAD(SREJ_LIST(sk));
3878 pi->buffer_seq_srej = pi->buffer_seq;
3880 __skb_queue_head_init(SREJ_QUEUE(sk));
3881 __skb_queue_head_init(BUSY_QUEUE(sk));
3882 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3884 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3886 l2cap_send_srejframe(sk, tx_seq);
3888 return 0;
3890 expected:
3891 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3893 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3894 bt_cb(skb)->tx_seq = tx_seq;
3895 bt_cb(skb)->sar = sar;
3896 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3897 return 0;
3900 if (rx_control & L2CAP_CTRL_FINAL) {
3901 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3902 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3903 else
3904 l2cap_retransmit_frames(sk);
3907 err = l2cap_push_rx_skb(sk, skb, rx_control);
3908 if (err < 0)
3909 return 0;
3911 __mod_ack_timer();
3913 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3914 if (pi->num_acked == num_to_ack - 1)
3915 l2cap_send_ack(pi);
3917 return 0;
3919 drop:
3920 kfree_skb(skb);
3921 return 0;
3924 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3926 struct l2cap_pinfo *pi = l2cap_pi(sk);
3928 pi->expected_ack_seq = __get_reqseq(rx_control);
3929 l2cap_drop_acked_frames(sk);
3931 if (rx_control & L2CAP_CTRL_POLL) {
3932 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3933 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3934 (pi->unacked_frames > 0))
3935 __mod_retrans_timer();
3937 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3938 l2cap_send_srejtail(sk);
3939 } else {
3940 l2cap_send_i_or_rr_or_rnr(sk);
3943 } else if (rx_control & L2CAP_CTRL_FINAL) {
3944 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3946 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3947 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3948 else
3949 l2cap_retransmit_frames(sk);
3951 } else {
3952 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3953 (pi->unacked_frames > 0))
3954 __mod_retrans_timer();
3956 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3957 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3958 l2cap_send_ack(pi);
3959 } else {
3960 spin_lock_bh(&pi->send_lock);
3961 l2cap_ertm_send(sk);
3962 spin_unlock_bh(&pi->send_lock);
3967 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
3969 struct l2cap_pinfo *pi = l2cap_pi(sk);
3970 u8 tx_seq = __get_reqseq(rx_control);
3972 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3974 pi->expected_ack_seq = tx_seq;
3975 l2cap_drop_acked_frames(sk);
3977 if (rx_control & L2CAP_CTRL_FINAL) {
3978 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3979 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3980 else
3981 l2cap_retransmit_frames(sk);
3982 } else {
3983 l2cap_retransmit_frames(sk);
3985 if (pi->conn_state & L2CAP_CONN_WAIT_F)
3986 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3989 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
3991 struct l2cap_pinfo *pi = l2cap_pi(sk);
3992 u8 tx_seq = __get_reqseq(rx_control);
3994 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3996 if (rx_control & L2CAP_CTRL_POLL) {
3997 pi->expected_ack_seq = tx_seq;
3998 l2cap_drop_acked_frames(sk);
3999 l2cap_retransmit_one_frame(sk, tx_seq);
4001 spin_lock_bh(&pi->send_lock);
4002 l2cap_ertm_send(sk);
4003 spin_unlock_bh(&pi->send_lock);
4005 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4006 pi->srej_save_reqseq = tx_seq;
4007 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4009 } else if (rx_control & L2CAP_CTRL_FINAL) {
4010 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4011 pi->srej_save_reqseq == tx_seq)
4012 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4013 else
4014 l2cap_retransmit_one_frame(sk, tx_seq);
4015 } else {
4016 l2cap_retransmit_one_frame(sk, tx_seq);
4017 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4018 pi->srej_save_reqseq = tx_seq;
4019 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4024 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4026 struct l2cap_pinfo *pi = l2cap_pi(sk);
4027 u8 tx_seq = __get_reqseq(rx_control);
4029 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4030 pi->expected_ack_seq = tx_seq;
4031 l2cap_drop_acked_frames(sk);
4033 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4034 del_timer(&pi->retrans_timer);
4035 if (rx_control & L2CAP_CTRL_POLL)
4036 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4037 return;
4040 if (rx_control & L2CAP_CTRL_POLL)
4041 l2cap_send_srejtail(sk);
4042 else
4043 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4046 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4048 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4050 if (L2CAP_CTRL_FINAL & rx_control &&
4051 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4052 del_timer(&l2cap_pi(sk)->monitor_timer);
4053 if (l2cap_pi(sk)->unacked_frames > 0)
4054 __mod_retrans_timer();
4055 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4058 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4059 case L2CAP_SUPER_RCV_READY:
4060 l2cap_data_channel_rrframe(sk, rx_control);
4061 break;
4063 case L2CAP_SUPER_REJECT:
4064 l2cap_data_channel_rejframe(sk, rx_control);
4065 break;
4067 case L2CAP_SUPER_SELECT_REJECT:
4068 l2cap_data_channel_srejframe(sk, rx_control);
4069 break;
4071 case L2CAP_SUPER_RCV_NOT_READY:
4072 l2cap_data_channel_rnrframe(sk, rx_control);
4073 break;
4076 kfree_skb(skb);
4077 return 0;
4080 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4082 struct sock *sk;
4083 struct l2cap_pinfo *pi;
4084 u16 control, len;
4085 u8 tx_seq, req_seq;
4086 int next_tx_seq_offset, req_seq_offset;
4088 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4089 if (!sk) {
4090 BT_DBG("unknown cid 0x%4.4x", cid);
4091 goto drop;
4094 pi = l2cap_pi(sk);
4096 BT_DBG("sk %p, len %d", sk, skb->len);
4098 if (sk->sk_state != BT_CONNECTED)
4099 goto drop;
4101 switch (pi->mode) {
4102 case L2CAP_MODE_BASIC:
4103 /* If socket recv buffers overflows we drop data here
4104 * which is *bad* because L2CAP has to be reliable.
4105 * But we don't have any other choice. L2CAP doesn't
4106 * provide flow control mechanism. */
4108 if (pi->imtu < skb->len)
4109 goto drop;
4111 if (!sock_queue_rcv_skb(sk, skb))
4112 goto done;
4113 break;
4115 case L2CAP_MODE_ERTM:
4116 control = get_unaligned_le16(skb->data);
4117 skb_pull(skb, 2);
4118 len = skb->len;
4120 if (__is_sar_start(control))
4121 len -= 2;
4123 if (pi->fcs == L2CAP_FCS_CRC16)
4124 len -= 2;
4127 * We can just drop the corrupted I-frame here.
4128 * Receiver will miss it and start proper recovery
4129 * procedures and ask retransmission.
4131 if (len > pi->mps) {
4132 l2cap_send_disconn_req(pi->conn, sk);
4133 goto drop;
4136 if (l2cap_check_fcs(pi, skb))
4137 goto drop;
4139 req_seq = __get_reqseq(control);
4140 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4141 if (req_seq_offset < 0)
4142 req_seq_offset += 64;
4144 next_tx_seq_offset =
4145 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4146 if (next_tx_seq_offset < 0)
4147 next_tx_seq_offset += 64;
4149 /* check for invalid req-seq */
4150 if (req_seq_offset > next_tx_seq_offset) {
4151 l2cap_send_disconn_req(pi->conn, sk);
4152 goto drop;
4155 if (__is_iframe(control)) {
4156 if (len < 4) {
4157 l2cap_send_disconn_req(pi->conn, sk);
4158 goto drop;
4161 l2cap_data_channel_iframe(sk, control, skb);
4162 } else {
4163 if (len != 0) {
4164 l2cap_send_disconn_req(pi->conn, sk);
4165 goto drop;
4168 l2cap_data_channel_sframe(sk, control, skb);
4171 goto done;
4173 case L2CAP_MODE_STREAMING:
4174 control = get_unaligned_le16(skb->data);
4175 skb_pull(skb, 2);
4176 len = skb->len;
4178 if (__is_sar_start(control))
4179 len -= 2;
4181 if (pi->fcs == L2CAP_FCS_CRC16)
4182 len -= 2;
4184 if (len > pi->mps || len < 4 || __is_sframe(control))
4185 goto drop;
4187 if (l2cap_check_fcs(pi, skb))
4188 goto drop;
4190 tx_seq = __get_txseq(control);
4192 if (pi->expected_tx_seq == tx_seq)
4193 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4194 else
4195 pi->expected_tx_seq = (tx_seq + 1) % 64;
4197 l2cap_streaming_reassembly_sdu(sk, skb, control);
4199 goto done;
4201 default:
4202 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4203 break;
4206 drop:
4207 kfree_skb(skb);
4209 done:
4210 if (sk)
4211 bh_unlock_sock(sk);
4213 return 0;
4216 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4218 struct sock *sk;
4220 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4221 if (!sk)
4222 goto drop;
4224 BT_DBG("sk %p, len %d", sk, skb->len);
4226 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4227 goto drop;
4229 if (l2cap_pi(sk)->imtu < skb->len)
4230 goto drop;
4232 if (!sock_queue_rcv_skb(sk, skb))
4233 goto done;
4235 drop:
4236 kfree_skb(skb);
4238 done:
4239 if (sk)
4240 bh_unlock_sock(sk);
4241 return 0;
4244 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4246 struct l2cap_hdr *lh = (void *) skb->data;
4247 u16 cid, len;
4248 __le16 psm;
4250 skb_pull(skb, L2CAP_HDR_SIZE);
4251 cid = __le16_to_cpu(lh->cid);
4252 len = __le16_to_cpu(lh->len);
4254 if (len != skb->len) {
4255 kfree_skb(skb);
4256 return;
4259 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4261 switch (cid) {
4262 case L2CAP_CID_SIGNALING:
4263 l2cap_sig_channel(conn, skb);
4264 break;
4266 case L2CAP_CID_CONN_LESS:
4267 psm = get_unaligned_le16(skb->data);
4268 skb_pull(skb, 2);
4269 l2cap_conless_channel(conn, psm, skb);
4270 break;
4272 default:
4273 l2cap_data_channel(conn, cid, skb);
4274 break;
4278 /* ---- L2CAP interface with lower layer (HCI) ---- */
4280 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4282 int exact = 0, lm1 = 0, lm2 = 0;
4283 register struct sock *sk;
4284 struct hlist_node *node;
4286 if (type != ACL_LINK)
4287 return 0;
4289 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4291 /* Find listening sockets and check their link_mode */
4292 read_lock(&l2cap_sk_list.lock);
4293 sk_for_each(sk, node, &l2cap_sk_list.head) {
4294 if (sk->sk_state != BT_LISTEN)
4295 continue;
4297 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4298 lm1 |= HCI_LM_ACCEPT;
4299 if (l2cap_pi(sk)->role_switch)
4300 lm1 |= HCI_LM_MASTER;
4301 exact++;
4302 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4303 lm2 |= HCI_LM_ACCEPT;
4304 if (l2cap_pi(sk)->role_switch)
4305 lm2 |= HCI_LM_MASTER;
4308 read_unlock(&l2cap_sk_list.lock);
4310 return exact ? lm1 : lm2;
4313 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4315 struct l2cap_conn *conn;
4317 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4319 if (hcon->type != ACL_LINK)
4320 return 0;
4322 if (!status) {
4323 conn = l2cap_conn_add(hcon, status);
4324 if (conn)
4325 l2cap_conn_ready(conn);
4326 } else
4327 l2cap_conn_del(hcon, bt_err(status));
4329 return 0;
4332 static int l2cap_disconn_ind(struct hci_conn *hcon)
4334 struct l2cap_conn *conn = hcon->l2cap_data;
4336 BT_DBG("hcon %p", hcon);
4338 if (hcon->type != ACL_LINK || !conn)
4339 return 0x13;
4341 return conn->disc_reason;
4344 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4346 BT_DBG("hcon %p reason %d", hcon, reason);
4348 if (hcon->type != ACL_LINK)
4349 return 0;
4351 l2cap_conn_del(hcon, bt_err(reason));
4353 return 0;
4356 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4358 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4359 return;
4361 if (encrypt == 0x00) {
4362 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4363 l2cap_sock_clear_timer(sk);
4364 l2cap_sock_set_timer(sk, HZ * 5);
4365 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4366 __l2cap_sock_close(sk, ECONNREFUSED);
4367 } else {
4368 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4369 l2cap_sock_clear_timer(sk);
4373 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4375 struct l2cap_chan_list *l;
4376 struct l2cap_conn *conn = hcon->l2cap_data;
4377 struct sock *sk;
4379 if (!conn)
4380 return 0;
4382 l = &conn->chan_list;
4384 BT_DBG("conn %p", conn);
4386 read_lock(&l->lock);
4388 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4389 bh_lock_sock(sk);
4391 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4392 bh_unlock_sock(sk);
4393 continue;
4396 if (!status && (sk->sk_state == BT_CONNECTED ||
4397 sk->sk_state == BT_CONFIG)) {
4398 l2cap_check_encryption(sk, encrypt);
4399 bh_unlock_sock(sk);
4400 continue;
4403 if (sk->sk_state == BT_CONNECT) {
4404 if (!status) {
4405 struct l2cap_conn_req req;
4406 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4407 req.psm = l2cap_pi(sk)->psm;
4409 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4410 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4412 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4413 L2CAP_CONN_REQ, sizeof(req), &req);
4414 } else {
4415 l2cap_sock_clear_timer(sk);
4416 l2cap_sock_set_timer(sk, HZ / 10);
4418 } else if (sk->sk_state == BT_CONNECT2) {
4419 struct l2cap_conn_rsp rsp;
4420 __u16 result;
4422 if (!status) {
4423 sk->sk_state = BT_CONFIG;
4424 result = L2CAP_CR_SUCCESS;
4425 } else {
4426 sk->sk_state = BT_DISCONN;
4427 l2cap_sock_set_timer(sk, HZ / 10);
4428 result = L2CAP_CR_SEC_BLOCK;
4431 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4432 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4433 rsp.result = cpu_to_le16(result);
4434 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4435 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4436 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4439 bh_unlock_sock(sk);
4442 read_unlock(&l->lock);
4444 return 0;
4447 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4449 struct l2cap_conn *conn = hcon->l2cap_data;
4451 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4452 goto drop;
4454 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4456 if (flags & ACL_START) {
4457 struct l2cap_hdr *hdr;
4458 int len;
4460 if (conn->rx_len) {
4461 BT_ERR("Unexpected start frame (len %d)", skb->len);
4462 kfree_skb(conn->rx_skb);
4463 conn->rx_skb = NULL;
4464 conn->rx_len = 0;
4465 l2cap_conn_unreliable(conn, ECOMM);
4468 if (skb->len < 2) {
4469 BT_ERR("Frame is too short (len %d)", skb->len);
4470 l2cap_conn_unreliable(conn, ECOMM);
4471 goto drop;
4474 hdr = (struct l2cap_hdr *) skb->data;
4475 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4477 if (len == skb->len) {
4478 /* Complete frame received */
4479 l2cap_recv_frame(conn, skb);
4480 return 0;
4483 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4485 if (skb->len > len) {
4486 BT_ERR("Frame is too long (len %d, expected len %d)",
4487 skb->len, len);
4488 l2cap_conn_unreliable(conn, ECOMM);
4489 goto drop;
4492 /* Allocate skb for the complete frame (with header) */
4493 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4494 if (!conn->rx_skb)
4495 goto drop;
4497 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4498 skb->len);
4499 conn->rx_len = len - skb->len;
4500 } else {
4501 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4503 if (!conn->rx_len) {
4504 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4505 l2cap_conn_unreliable(conn, ECOMM);
4506 goto drop;
4509 if (skb->len > conn->rx_len) {
4510 BT_ERR("Fragment is too long (len %d, expected %d)",
4511 skb->len, conn->rx_len);
4512 kfree_skb(conn->rx_skb);
4513 conn->rx_skb = NULL;
4514 conn->rx_len = 0;
4515 l2cap_conn_unreliable(conn, ECOMM);
4516 goto drop;
4519 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4520 skb->len);
4521 conn->rx_len -= skb->len;
4523 if (!conn->rx_len) {
4524 /* Complete frame received */
4525 l2cap_recv_frame(conn, conn->rx_skb);
4526 conn->rx_skb = NULL;
4530 drop:
4531 kfree_skb(skb);
4532 return 0;
4535 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4537 struct sock *sk;
4538 struct hlist_node *node;
4540 read_lock_bh(&l2cap_sk_list.lock);
4542 sk_for_each(sk, node, &l2cap_sk_list.head) {
4543 struct l2cap_pinfo *pi = l2cap_pi(sk);
4545 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4546 batostr(&bt_sk(sk)->src),
4547 batostr(&bt_sk(sk)->dst),
4548 sk->sk_state, __le16_to_cpu(pi->psm),
4549 pi->scid, pi->dcid,
4550 pi->imtu, pi->omtu, pi->sec_level);
4553 read_unlock_bh(&l2cap_sk_list.lock);
4555 return 0;
4558 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4560 return single_open(file, l2cap_debugfs_show, inode->i_private);
4563 static const struct file_operations l2cap_debugfs_fops = {
4564 .open = l2cap_debugfs_open,
4565 .read = seq_read,
4566 .llseek = seq_lseek,
4567 .release = single_release,
4570 static struct dentry *l2cap_debugfs;
4572 static const struct proto_ops l2cap_sock_ops = {
4573 .family = PF_BLUETOOTH,
4574 .owner = THIS_MODULE,
4575 .release = l2cap_sock_release,
4576 .bind = l2cap_sock_bind,
4577 .connect = l2cap_sock_connect,
4578 .listen = l2cap_sock_listen,
4579 .accept = l2cap_sock_accept,
4580 .getname = l2cap_sock_getname,
4581 .sendmsg = l2cap_sock_sendmsg,
4582 .recvmsg = l2cap_sock_recvmsg,
4583 .poll = bt_sock_poll,
4584 .ioctl = bt_sock_ioctl,
4585 .mmap = sock_no_mmap,
4586 .socketpair = sock_no_socketpair,
4587 .shutdown = l2cap_sock_shutdown,
4588 .setsockopt = l2cap_sock_setsockopt,
4589 .getsockopt = l2cap_sock_getsockopt
4592 static const struct net_proto_family l2cap_sock_family_ops = {
4593 .family = PF_BLUETOOTH,
4594 .owner = THIS_MODULE,
4595 .create = l2cap_sock_create,
4598 static struct hci_proto l2cap_hci_proto = {
4599 .name = "L2CAP",
4600 .id = HCI_PROTO_L2CAP,
4601 .connect_ind = l2cap_connect_ind,
4602 .connect_cfm = l2cap_connect_cfm,
4603 .disconn_ind = l2cap_disconn_ind,
4604 .disconn_cfm = l2cap_disconn_cfm,
4605 .security_cfm = l2cap_security_cfm,
4606 .recv_acldata = l2cap_recv_acldata
4609 static int __init l2cap_init(void)
4611 int err;
4613 err = proto_register(&l2cap_proto, 0);
4614 if (err < 0)
4615 return err;
4617 _busy_wq = create_singlethread_workqueue("l2cap");
4618 if (!_busy_wq)
4619 goto error;
4621 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4622 if (err < 0) {
4623 BT_ERR("L2CAP socket registration failed");
4624 goto error;
4627 err = hci_register_proto(&l2cap_hci_proto);
4628 if (err < 0) {
4629 BT_ERR("L2CAP protocol registration failed");
4630 bt_sock_unregister(BTPROTO_L2CAP);
4631 goto error;
4634 if (bt_debugfs) {
4635 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4636 bt_debugfs, NULL, &l2cap_debugfs_fops);
4637 if (!l2cap_debugfs)
4638 BT_ERR("Failed to create L2CAP debug file");
4641 BT_INFO("L2CAP ver %s", VERSION);
4642 BT_INFO("L2CAP socket layer initialized");
4644 return 0;
4646 error:
4647 proto_unregister(&l2cap_proto);
4648 return err;
4651 static void __exit l2cap_exit(void)
4653 debugfs_remove(l2cap_debugfs);
4655 flush_workqueue(_busy_wq);
4656 destroy_workqueue(_busy_wq);
4658 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4659 BT_ERR("L2CAP socket unregistration failed");
4661 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4662 BT_ERR("L2CAP protocol unregistration failed");
4664 proto_unregister(&l2cap_proto);
4667 void l2cap_load(void)
4669 /* Dummy function to trigger automatic L2CAP module loading by
4670 * other modules that use L2CAP sockets but don't use any other
4671 * symbols from it. */
4673 EXPORT_SYMBOL(l2cap_load);
4675 module_init(l2cap_init);
4676 module_exit(l2cap_exit);
4678 module_param(enable_ertm, bool, 0644);
4679 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4681 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4682 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4683 MODULE_VERSION(VERSION);
4684 MODULE_LICENSE("GPL");
4685 MODULE_ALIAS("bt-proto-0");