Bluetooth: Fix bug in l2cap_ertm_send() behavior
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / bluetooth / l2cap.c
blob69f098d98141b9a360b1d38099fda59b2ec06980
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
47 #include <net/sock.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 static int enable_ertm = 0;
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
63 static const struct proto_ops l2cap_sock_ops;
65 static struct workqueue_struct *_busy_wq;
67 static struct bt_sock_list l2cap_sk_list = {
68 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
71 static void l2cap_busy_work(struct work_struct *work);
73 static void __l2cap_sock_close(struct sock *sk, int reason);
74 static void l2cap_sock_close(struct sock *sk);
75 static void l2cap_sock_kill(struct sock *sk);
77 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
78 u8 code, u8 ident, u16 dlen, void *data);
80 /* ---- L2CAP timers ---- */
81 static void l2cap_sock_timeout(unsigned long arg)
83 struct sock *sk = (struct sock *) arg;
84 int reason;
86 BT_DBG("sock %p state %d", sk, sk->sk_state);
88 bh_lock_sock(sk);
90 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
91 reason = ECONNREFUSED;
92 else if (sk->sk_state == BT_CONNECT &&
93 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
94 reason = ECONNREFUSED;
95 else
96 reason = ETIMEDOUT;
98 __l2cap_sock_close(sk, reason);
100 bh_unlock_sock(sk);
102 l2cap_sock_kill(sk);
103 sock_put(sk);
106 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
108 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
109 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
112 static void l2cap_sock_clear_timer(struct sock *sk)
114 BT_DBG("sock %p state %d", sk, sk->sk_state);
115 sk_stop_timer(sk, &sk->sk_timer);
118 /* ---- L2CAP channels ---- */
119 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
121 struct sock *s;
122 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
123 if (l2cap_pi(s)->dcid == cid)
124 break;
126 return s;
129 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
131 struct sock *s;
132 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
133 if (l2cap_pi(s)->scid == cid)
134 break;
136 return s;
139 /* Find channel with given SCID.
140 * Returns locked socket */
141 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
143 struct sock *s;
144 read_lock(&l->lock);
145 s = __l2cap_get_chan_by_scid(l, cid);
146 if (s)
147 bh_lock_sock(s);
148 read_unlock(&l->lock);
149 return s;
152 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
154 struct sock *s;
155 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
156 if (l2cap_pi(s)->ident == ident)
157 break;
159 return s;
162 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
164 struct sock *s;
165 read_lock(&l->lock);
166 s = __l2cap_get_chan_by_ident(l, ident);
167 if (s)
168 bh_lock_sock(s);
169 read_unlock(&l->lock);
170 return s;
173 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
175 u16 cid = L2CAP_CID_DYN_START;
177 for (; cid < L2CAP_CID_DYN_END; cid++) {
178 if (!__l2cap_get_chan_by_scid(l, cid))
179 return cid;
182 return 0;
185 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
187 sock_hold(sk);
189 if (l->head)
190 l2cap_pi(l->head)->prev_c = sk;
192 l2cap_pi(sk)->next_c = l->head;
193 l2cap_pi(sk)->prev_c = NULL;
194 l->head = sk;
197 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
199 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
201 write_lock_bh(&l->lock);
202 if (sk == l->head)
203 l->head = next;
205 if (next)
206 l2cap_pi(next)->prev_c = prev;
207 if (prev)
208 l2cap_pi(prev)->next_c = next;
209 write_unlock_bh(&l->lock);
211 __sock_put(sk);
214 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
216 struct l2cap_chan_list *l = &conn->chan_list;
218 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
219 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
221 conn->disc_reason = 0x13;
223 l2cap_pi(sk)->conn = conn;
225 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
226 /* Alloc CID for connection-oriented socket */
227 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
228 } else if (sk->sk_type == SOCK_DGRAM) {
229 /* Connectionless socket */
230 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
231 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
232 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
233 } else {
234 /* Raw socket can send/recv signalling messages only */
235 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
236 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
237 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
240 __l2cap_chan_link(l, sk);
242 if (parent)
243 bt_accept_enqueue(parent, sk);
246 /* Delete channel.
247 * Must be called on the locked socket. */
248 static void l2cap_chan_del(struct sock *sk, int err)
250 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
251 struct sock *parent = bt_sk(sk)->parent;
253 l2cap_sock_clear_timer(sk);
255 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
257 if (conn) {
258 /* Unlink from channel list */
259 l2cap_chan_unlink(&conn->chan_list, sk);
260 l2cap_pi(sk)->conn = NULL;
261 hci_conn_put(conn->hcon);
264 sk->sk_state = BT_CLOSED;
265 sock_set_flag(sk, SOCK_ZAPPED);
267 if (err)
268 sk->sk_err = err;
270 if (parent) {
271 bt_accept_unlink(sk);
272 parent->sk_data_ready(parent, 0);
273 } else
274 sk->sk_state_change(sk);
277 /* Service level security */
278 static inline int l2cap_check_security(struct sock *sk)
280 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
281 __u8 auth_type;
283 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
284 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
285 auth_type = HCI_AT_NO_BONDING_MITM;
286 else
287 auth_type = HCI_AT_NO_BONDING;
289 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
290 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
291 } else {
292 switch (l2cap_pi(sk)->sec_level) {
293 case BT_SECURITY_HIGH:
294 auth_type = HCI_AT_GENERAL_BONDING_MITM;
295 break;
296 case BT_SECURITY_MEDIUM:
297 auth_type = HCI_AT_GENERAL_BONDING;
298 break;
299 default:
300 auth_type = HCI_AT_NO_BONDING;
301 break;
305 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
306 auth_type);
309 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
311 u8 id;
313 /* Get next available identificator.
314 * 1 - 128 are used by kernel.
315 * 129 - 199 are reserved.
316 * 200 - 254 are used by utilities like l2ping, etc.
319 spin_lock_bh(&conn->lock);
321 if (++conn->tx_ident > 128)
322 conn->tx_ident = 1;
324 id = conn->tx_ident;
326 spin_unlock_bh(&conn->lock);
328 return id;
331 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
333 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
335 BT_DBG("code 0x%2.2x", code);
337 if (!skb)
338 return;
340 hci_send_acl(conn->hcon, skb, 0);
343 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
345 struct sk_buff *skb;
346 struct l2cap_hdr *lh;
347 struct l2cap_conn *conn = pi->conn;
348 int count, hlen = L2CAP_HDR_SIZE + 2;
350 if (pi->fcs == L2CAP_FCS_CRC16)
351 hlen += 2;
353 BT_DBG("pi %p, control 0x%2.2x", pi, control);
355 count = min_t(unsigned int, conn->mtu, hlen);
356 control |= L2CAP_CTRL_FRAME_TYPE;
358 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
359 control |= L2CAP_CTRL_FINAL;
360 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
363 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
364 control |= L2CAP_CTRL_POLL;
365 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
368 skb = bt_skb_alloc(count, GFP_ATOMIC);
369 if (!skb)
370 return;
372 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
373 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
374 lh->cid = cpu_to_le16(pi->dcid);
375 put_unaligned_le16(control, skb_put(skb, 2));
377 if (pi->fcs == L2CAP_FCS_CRC16) {
378 u16 fcs = crc16(0, (u8 *)lh, count - 2);
379 put_unaligned_le16(fcs, skb_put(skb, 2));
382 hci_send_acl(pi->conn->hcon, skb, 0);
385 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
387 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
388 control |= L2CAP_SUPER_RCV_NOT_READY;
389 pi->conn_state |= L2CAP_CONN_RNR_SENT;
390 } else
391 control |= L2CAP_SUPER_RCV_READY;
393 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
395 l2cap_send_sframe(pi, control);
398 static inline int __l2cap_no_conn_pending(struct sock *sk)
400 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
403 static void l2cap_do_start(struct sock *sk)
405 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
407 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
408 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
409 return;
411 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
412 struct l2cap_conn_req req;
413 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
414 req.psm = l2cap_pi(sk)->psm;
416 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
417 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
419 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
420 L2CAP_CONN_REQ, sizeof(req), &req);
422 } else {
423 struct l2cap_info_req req;
424 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
426 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
427 conn->info_ident = l2cap_get_ident(conn);
429 mod_timer(&conn->info_timer, jiffies +
430 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
432 l2cap_send_cmd(conn, conn->info_ident,
433 L2CAP_INFO_REQ, sizeof(req), &req);
437 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
439 struct l2cap_disconn_req req;
441 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
442 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
443 l2cap_send_cmd(conn, l2cap_get_ident(conn),
444 L2CAP_DISCONN_REQ, sizeof(req), &req);
447 /* ---- L2CAP connections ---- */
448 static void l2cap_conn_start(struct l2cap_conn *conn)
450 struct l2cap_chan_list *l = &conn->chan_list;
451 struct sock *sk;
453 BT_DBG("conn %p", conn);
455 read_lock(&l->lock);
457 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
458 bh_lock_sock(sk);
460 if (sk->sk_type != SOCK_SEQPACKET &&
461 sk->sk_type != SOCK_STREAM) {
462 bh_unlock_sock(sk);
463 continue;
466 if (sk->sk_state == BT_CONNECT) {
467 if (l2cap_check_security(sk) &&
468 __l2cap_no_conn_pending(sk)) {
469 struct l2cap_conn_req req;
470 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
471 req.psm = l2cap_pi(sk)->psm;
473 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
474 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
476 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
477 L2CAP_CONN_REQ, sizeof(req), &req);
479 } else if (sk->sk_state == BT_CONNECT2) {
480 struct l2cap_conn_rsp rsp;
481 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
482 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
484 if (l2cap_check_security(sk)) {
485 if (bt_sk(sk)->defer_setup) {
486 struct sock *parent = bt_sk(sk)->parent;
487 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
488 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
489 parent->sk_data_ready(parent, 0);
491 } else {
492 sk->sk_state = BT_CONFIG;
493 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
494 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
496 } else {
497 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
498 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
501 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
502 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
505 bh_unlock_sock(sk);
508 read_unlock(&l->lock);
511 static void l2cap_conn_ready(struct l2cap_conn *conn)
513 struct l2cap_chan_list *l = &conn->chan_list;
514 struct sock *sk;
516 BT_DBG("conn %p", conn);
518 read_lock(&l->lock);
520 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
521 bh_lock_sock(sk);
523 if (sk->sk_type != SOCK_SEQPACKET &&
524 sk->sk_type != SOCK_STREAM) {
525 l2cap_sock_clear_timer(sk);
526 sk->sk_state = BT_CONNECTED;
527 sk->sk_state_change(sk);
528 } else if (sk->sk_state == BT_CONNECT)
529 l2cap_do_start(sk);
531 bh_unlock_sock(sk);
534 read_unlock(&l->lock);
537 /* Notify sockets that we cannot guaranty reliability anymore */
538 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
540 struct l2cap_chan_list *l = &conn->chan_list;
541 struct sock *sk;
543 BT_DBG("conn %p", conn);
545 read_lock(&l->lock);
547 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
548 if (l2cap_pi(sk)->force_reliable)
549 sk->sk_err = err;
552 read_unlock(&l->lock);
555 static void l2cap_info_timeout(unsigned long arg)
557 struct l2cap_conn *conn = (void *) arg;
559 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
560 conn->info_ident = 0;
562 l2cap_conn_start(conn);
565 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
567 struct l2cap_conn *conn = hcon->l2cap_data;
569 if (conn || status)
570 return conn;
572 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
573 if (!conn)
574 return NULL;
576 hcon->l2cap_data = conn;
577 conn->hcon = hcon;
579 BT_DBG("hcon %p conn %p", hcon, conn);
581 conn->mtu = hcon->hdev->acl_mtu;
582 conn->src = &hcon->hdev->bdaddr;
583 conn->dst = &hcon->dst;
585 conn->feat_mask = 0;
587 spin_lock_init(&conn->lock);
588 rwlock_init(&conn->chan_list.lock);
590 setup_timer(&conn->info_timer, l2cap_info_timeout,
591 (unsigned long) conn);
593 conn->disc_reason = 0x13;
595 return conn;
598 static void l2cap_conn_del(struct hci_conn *hcon, int err)
600 struct l2cap_conn *conn = hcon->l2cap_data;
601 struct sock *sk;
603 if (!conn)
604 return;
606 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
608 kfree_skb(conn->rx_skb);
610 /* Kill channels */
611 while ((sk = conn->chan_list.head)) {
612 bh_lock_sock(sk);
613 l2cap_chan_del(sk, err);
614 bh_unlock_sock(sk);
615 l2cap_sock_kill(sk);
618 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
619 del_timer_sync(&conn->info_timer);
621 hcon->l2cap_data = NULL;
622 kfree(conn);
625 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
627 struct l2cap_chan_list *l = &conn->chan_list;
628 write_lock_bh(&l->lock);
629 __l2cap_chan_add(conn, sk, parent);
630 write_unlock_bh(&l->lock);
633 /* ---- Socket interface ---- */
634 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
636 struct sock *sk;
637 struct hlist_node *node;
638 sk_for_each(sk, node, &l2cap_sk_list.head)
639 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
640 goto found;
641 sk = NULL;
642 found:
643 return sk;
646 /* Find socket with psm and source bdaddr.
647 * Returns closest match.
649 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
651 struct sock *sk = NULL, *sk1 = NULL;
652 struct hlist_node *node;
654 sk_for_each(sk, node, &l2cap_sk_list.head) {
655 if (state && sk->sk_state != state)
656 continue;
658 if (l2cap_pi(sk)->psm == psm) {
659 /* Exact match. */
660 if (!bacmp(&bt_sk(sk)->src, src))
661 break;
663 /* Closest match */
664 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
665 sk1 = sk;
668 return node ? sk : sk1;
671 /* Find socket with given address (psm, src).
672 * Returns locked socket */
673 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
675 struct sock *s;
676 read_lock(&l2cap_sk_list.lock);
677 s = __l2cap_get_sock_by_psm(state, psm, src);
678 if (s)
679 bh_lock_sock(s);
680 read_unlock(&l2cap_sk_list.lock);
681 return s;
684 static void l2cap_sock_destruct(struct sock *sk)
686 BT_DBG("sk %p", sk);
688 skb_queue_purge(&sk->sk_receive_queue);
689 skb_queue_purge(&sk->sk_write_queue);
692 static void l2cap_sock_cleanup_listen(struct sock *parent)
694 struct sock *sk;
696 BT_DBG("parent %p", parent);
698 /* Close not yet accepted channels */
699 while ((sk = bt_accept_dequeue(parent, NULL)))
700 l2cap_sock_close(sk);
702 parent->sk_state = BT_CLOSED;
703 sock_set_flag(parent, SOCK_ZAPPED);
706 /* Kill socket (only if zapped and orphan)
707 * Must be called on unlocked socket.
709 static void l2cap_sock_kill(struct sock *sk)
711 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
712 return;
714 BT_DBG("sk %p state %d", sk, sk->sk_state);
716 /* Kill poor orphan */
717 bt_sock_unlink(&l2cap_sk_list, sk);
718 sock_set_flag(sk, SOCK_DEAD);
719 sock_put(sk);
722 static void __l2cap_sock_close(struct sock *sk, int reason)
724 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
726 switch (sk->sk_state) {
727 case BT_LISTEN:
728 l2cap_sock_cleanup_listen(sk);
729 break;
731 case BT_CONNECTED:
732 case BT_CONFIG:
733 if (sk->sk_type == SOCK_SEQPACKET ||
734 sk->sk_type == SOCK_STREAM) {
735 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
737 sk->sk_state = BT_DISCONN;
738 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
739 l2cap_send_disconn_req(conn, sk);
740 } else
741 l2cap_chan_del(sk, reason);
742 break;
744 case BT_CONNECT2:
745 if (sk->sk_type == SOCK_SEQPACKET ||
746 sk->sk_type == SOCK_STREAM) {
747 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
748 struct l2cap_conn_rsp rsp;
749 __u16 result;
751 if (bt_sk(sk)->defer_setup)
752 result = L2CAP_CR_SEC_BLOCK;
753 else
754 result = L2CAP_CR_BAD_PSM;
756 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
757 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
758 rsp.result = cpu_to_le16(result);
759 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
760 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
761 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
762 } else
763 l2cap_chan_del(sk, reason);
764 break;
766 case BT_CONNECT:
767 case BT_DISCONN:
768 l2cap_chan_del(sk, reason);
769 break;
771 default:
772 sock_set_flag(sk, SOCK_ZAPPED);
773 break;
777 /* Must be called on unlocked socket. */
778 static void l2cap_sock_close(struct sock *sk)
780 l2cap_sock_clear_timer(sk);
781 lock_sock(sk);
782 __l2cap_sock_close(sk, ECONNRESET);
783 release_sock(sk);
784 l2cap_sock_kill(sk);
787 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
789 struct l2cap_pinfo *pi = l2cap_pi(sk);
791 BT_DBG("sk %p", sk);
793 if (parent) {
794 sk->sk_type = parent->sk_type;
795 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
797 pi->imtu = l2cap_pi(parent)->imtu;
798 pi->omtu = l2cap_pi(parent)->omtu;
799 pi->mode = l2cap_pi(parent)->mode;
800 pi->fcs = l2cap_pi(parent)->fcs;
801 pi->max_tx = l2cap_pi(parent)->max_tx;
802 pi->tx_win = l2cap_pi(parent)->tx_win;
803 pi->sec_level = l2cap_pi(parent)->sec_level;
804 pi->role_switch = l2cap_pi(parent)->role_switch;
805 pi->force_reliable = l2cap_pi(parent)->force_reliable;
806 } else {
807 pi->imtu = L2CAP_DEFAULT_MTU;
808 pi->omtu = 0;
809 if (enable_ertm && sk->sk_type == SOCK_STREAM)
810 pi->mode = L2CAP_MODE_ERTM;
811 else
812 pi->mode = L2CAP_MODE_BASIC;
813 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
814 pi->fcs = L2CAP_FCS_CRC16;
815 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
816 pi->sec_level = BT_SECURITY_LOW;
817 pi->role_switch = 0;
818 pi->force_reliable = 0;
821 /* Default config options */
822 pi->conf_len = 0;
823 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
824 skb_queue_head_init(TX_QUEUE(sk));
825 skb_queue_head_init(SREJ_QUEUE(sk));
826 skb_queue_head_init(BUSY_QUEUE(sk));
827 INIT_LIST_HEAD(SREJ_LIST(sk));
830 static struct proto l2cap_proto = {
831 .name = "L2CAP",
832 .owner = THIS_MODULE,
833 .obj_size = sizeof(struct l2cap_pinfo)
836 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
838 struct sock *sk;
840 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
841 if (!sk)
842 return NULL;
844 sock_init_data(sock, sk);
845 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
847 sk->sk_destruct = l2cap_sock_destruct;
848 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
850 sock_reset_flag(sk, SOCK_ZAPPED);
852 sk->sk_protocol = proto;
853 sk->sk_state = BT_OPEN;
855 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
857 bt_sock_link(&l2cap_sk_list, sk);
858 return sk;
861 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
862 int kern)
864 struct sock *sk;
866 BT_DBG("sock %p", sock);
868 sock->state = SS_UNCONNECTED;
870 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
871 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
872 return -ESOCKTNOSUPPORT;
874 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
875 return -EPERM;
877 sock->ops = &l2cap_sock_ops;
879 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
880 if (!sk)
881 return -ENOMEM;
883 l2cap_sock_init(sk, NULL);
884 return 0;
887 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
889 struct sock *sk = sock->sk;
890 struct sockaddr_l2 la;
891 int len, err = 0;
893 BT_DBG("sk %p", sk);
895 if (!addr || addr->sa_family != AF_BLUETOOTH)
896 return -EINVAL;
898 memset(&la, 0, sizeof(la));
899 len = min_t(unsigned int, sizeof(la), alen);
900 memcpy(&la, addr, len);
902 if (la.l2_cid)
903 return -EINVAL;
905 lock_sock(sk);
907 if (sk->sk_state != BT_OPEN) {
908 err = -EBADFD;
909 goto done;
912 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
913 !capable(CAP_NET_BIND_SERVICE)) {
914 err = -EACCES;
915 goto done;
918 write_lock_bh(&l2cap_sk_list.lock);
920 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
921 err = -EADDRINUSE;
922 } else {
923 /* Save source address */
924 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
925 l2cap_pi(sk)->psm = la.l2_psm;
926 l2cap_pi(sk)->sport = la.l2_psm;
927 sk->sk_state = BT_BOUND;
929 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
930 __le16_to_cpu(la.l2_psm) == 0x0003)
931 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
934 write_unlock_bh(&l2cap_sk_list.lock);
936 done:
937 release_sock(sk);
938 return err;
941 static int l2cap_do_connect(struct sock *sk)
943 bdaddr_t *src = &bt_sk(sk)->src;
944 bdaddr_t *dst = &bt_sk(sk)->dst;
945 struct l2cap_conn *conn;
946 struct hci_conn *hcon;
947 struct hci_dev *hdev;
948 __u8 auth_type;
949 int err;
951 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
952 l2cap_pi(sk)->psm);
954 hdev = hci_get_route(dst, src);
955 if (!hdev)
956 return -EHOSTUNREACH;
958 hci_dev_lock_bh(hdev);
960 err = -ENOMEM;
962 if (sk->sk_type == SOCK_RAW) {
963 switch (l2cap_pi(sk)->sec_level) {
964 case BT_SECURITY_HIGH:
965 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
966 break;
967 case BT_SECURITY_MEDIUM:
968 auth_type = HCI_AT_DEDICATED_BONDING;
969 break;
970 default:
971 auth_type = HCI_AT_NO_BONDING;
972 break;
974 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
975 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
976 auth_type = HCI_AT_NO_BONDING_MITM;
977 else
978 auth_type = HCI_AT_NO_BONDING;
980 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
981 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
982 } else {
983 switch (l2cap_pi(sk)->sec_level) {
984 case BT_SECURITY_HIGH:
985 auth_type = HCI_AT_GENERAL_BONDING_MITM;
986 break;
987 case BT_SECURITY_MEDIUM:
988 auth_type = HCI_AT_GENERAL_BONDING;
989 break;
990 default:
991 auth_type = HCI_AT_NO_BONDING;
992 break;
996 hcon = hci_connect(hdev, ACL_LINK, dst,
997 l2cap_pi(sk)->sec_level, auth_type);
998 if (!hcon)
999 goto done;
1001 conn = l2cap_conn_add(hcon, 0);
1002 if (!conn) {
1003 hci_conn_put(hcon);
1004 goto done;
1007 err = 0;
1009 /* Update source addr of the socket */
1010 bacpy(src, conn->src);
1012 l2cap_chan_add(conn, sk, NULL);
1014 sk->sk_state = BT_CONNECT;
1015 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1017 if (hcon->state == BT_CONNECTED) {
1018 if (sk->sk_type != SOCK_SEQPACKET &&
1019 sk->sk_type != SOCK_STREAM) {
1020 l2cap_sock_clear_timer(sk);
1021 sk->sk_state = BT_CONNECTED;
1022 } else
1023 l2cap_do_start(sk);
1026 done:
1027 hci_dev_unlock_bh(hdev);
1028 hci_dev_put(hdev);
1029 return err;
1032 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1034 struct sock *sk = sock->sk;
1035 struct sockaddr_l2 la;
1036 int len, err = 0;
1038 BT_DBG("sk %p", sk);
1040 if (!addr || alen < sizeof(addr->sa_family) ||
1041 addr->sa_family != AF_BLUETOOTH)
1042 return -EINVAL;
1044 memset(&la, 0, sizeof(la));
1045 len = min_t(unsigned int, sizeof(la), alen);
1046 memcpy(&la, addr, len);
1048 if (la.l2_cid)
1049 return -EINVAL;
1051 lock_sock(sk);
1053 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1054 && !la.l2_psm) {
1055 err = -EINVAL;
1056 goto done;
1059 switch (l2cap_pi(sk)->mode) {
1060 case L2CAP_MODE_BASIC:
1061 break;
1062 case L2CAP_MODE_ERTM:
1063 case L2CAP_MODE_STREAMING:
1064 if (enable_ertm)
1065 break;
1066 /* fall through */
1067 default:
1068 err = -ENOTSUPP;
1069 goto done;
1072 switch (sk->sk_state) {
1073 case BT_CONNECT:
1074 case BT_CONNECT2:
1075 case BT_CONFIG:
1076 /* Already connecting */
1077 goto wait;
1079 case BT_CONNECTED:
1080 /* Already connected */
1081 goto done;
1083 case BT_OPEN:
1084 case BT_BOUND:
1085 /* Can connect */
1086 break;
1088 default:
1089 err = -EBADFD;
1090 goto done;
1093 /* Set destination address and psm */
1094 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1095 l2cap_pi(sk)->psm = la.l2_psm;
1097 err = l2cap_do_connect(sk);
1098 if (err)
1099 goto done;
1101 wait:
1102 err = bt_sock_wait_state(sk, BT_CONNECTED,
1103 sock_sndtimeo(sk, flags & O_NONBLOCK));
1104 done:
1105 release_sock(sk);
1106 return err;
1109 static int l2cap_sock_listen(struct socket *sock, int backlog)
1111 struct sock *sk = sock->sk;
1112 int err = 0;
1114 BT_DBG("sk %p backlog %d", sk, backlog);
1116 lock_sock(sk);
1118 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1119 || sk->sk_state != BT_BOUND) {
1120 err = -EBADFD;
1121 goto done;
1124 switch (l2cap_pi(sk)->mode) {
1125 case L2CAP_MODE_BASIC:
1126 break;
1127 case L2CAP_MODE_ERTM:
1128 case L2CAP_MODE_STREAMING:
1129 if (enable_ertm)
1130 break;
1131 /* fall through */
1132 default:
1133 err = -ENOTSUPP;
1134 goto done;
1137 if (!l2cap_pi(sk)->psm) {
1138 bdaddr_t *src = &bt_sk(sk)->src;
1139 u16 psm;
1141 err = -EINVAL;
1143 write_lock_bh(&l2cap_sk_list.lock);
1145 for (psm = 0x1001; psm < 0x1100; psm += 2)
1146 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1147 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1148 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1149 err = 0;
1150 break;
1153 write_unlock_bh(&l2cap_sk_list.lock);
1155 if (err < 0)
1156 goto done;
1159 sk->sk_max_ack_backlog = backlog;
1160 sk->sk_ack_backlog = 0;
1161 sk->sk_state = BT_LISTEN;
1163 done:
1164 release_sock(sk);
1165 return err;
1168 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1170 DECLARE_WAITQUEUE(wait, current);
1171 struct sock *sk = sock->sk, *nsk;
1172 long timeo;
1173 int err = 0;
1175 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1177 if (sk->sk_state != BT_LISTEN) {
1178 err = -EBADFD;
1179 goto done;
1182 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1184 BT_DBG("sk %p timeo %ld", sk, timeo);
1186 /* Wait for an incoming connection. (wake-one). */
1187 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1188 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1189 set_current_state(TASK_INTERRUPTIBLE);
1190 if (!timeo) {
1191 err = -EAGAIN;
1192 break;
1195 release_sock(sk);
1196 timeo = schedule_timeout(timeo);
1197 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1199 if (sk->sk_state != BT_LISTEN) {
1200 err = -EBADFD;
1201 break;
1204 if (signal_pending(current)) {
1205 err = sock_intr_errno(timeo);
1206 break;
1209 set_current_state(TASK_RUNNING);
1210 remove_wait_queue(sk_sleep(sk), &wait);
1212 if (err)
1213 goto done;
1215 newsock->state = SS_CONNECTED;
1217 BT_DBG("new socket %p", nsk);
1219 done:
1220 release_sock(sk);
1221 return err;
1224 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1226 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1227 struct sock *sk = sock->sk;
1229 BT_DBG("sock %p, sk %p", sock, sk);
1231 addr->sa_family = AF_BLUETOOTH;
1232 *len = sizeof(struct sockaddr_l2);
1234 if (peer) {
1235 la->l2_psm = l2cap_pi(sk)->psm;
1236 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1237 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1238 } else {
1239 la->l2_psm = l2cap_pi(sk)->sport;
1240 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1241 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1244 return 0;
1247 static int __l2cap_wait_ack(struct sock *sk)
1249 DECLARE_WAITQUEUE(wait, current);
1250 int err = 0;
1251 int timeo = HZ/5;
1253 add_wait_queue(sk_sleep(sk), &wait);
1254 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1255 set_current_state(TASK_INTERRUPTIBLE);
1257 if (!timeo)
1258 timeo = HZ/5;
1260 if (signal_pending(current)) {
1261 err = sock_intr_errno(timeo);
1262 break;
1265 release_sock(sk);
1266 timeo = schedule_timeout(timeo);
1267 lock_sock(sk);
1269 err = sock_error(sk);
1270 if (err)
1271 break;
1273 set_current_state(TASK_RUNNING);
1274 remove_wait_queue(sk_sleep(sk), &wait);
1275 return err;
1278 static void l2cap_monitor_timeout(unsigned long arg)
1280 struct sock *sk = (void *) arg;
1282 bh_lock_sock(sk);
1283 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1284 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1285 bh_unlock_sock(sk);
1286 return;
1289 l2cap_pi(sk)->retry_count++;
1290 __mod_monitor_timer();
1292 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1293 bh_unlock_sock(sk);
1296 static void l2cap_retrans_timeout(unsigned long arg)
1298 struct sock *sk = (void *) arg;
1300 bh_lock_sock(sk);
1301 l2cap_pi(sk)->retry_count = 1;
1302 __mod_monitor_timer();
1304 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1306 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1307 bh_unlock_sock(sk);
1310 static void l2cap_drop_acked_frames(struct sock *sk)
1312 struct sk_buff *skb;
1314 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1315 l2cap_pi(sk)->unacked_frames) {
1316 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1317 break;
1319 skb = skb_dequeue(TX_QUEUE(sk));
1320 kfree_skb(skb);
1322 l2cap_pi(sk)->unacked_frames--;
1325 if (!l2cap_pi(sk)->unacked_frames)
1326 del_timer(&l2cap_pi(sk)->retrans_timer);
1329 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1331 struct l2cap_pinfo *pi = l2cap_pi(sk);
1333 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1335 hci_send_acl(pi->conn->hcon, skb, 0);
1338 static int l2cap_streaming_send(struct sock *sk)
1340 struct sk_buff *skb, *tx_skb;
1341 struct l2cap_pinfo *pi = l2cap_pi(sk);
1342 u16 control, fcs;
1344 while ((skb = sk->sk_send_head)) {
1345 tx_skb = skb_clone(skb, GFP_ATOMIC);
1347 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1348 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1349 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1351 if (pi->fcs == L2CAP_FCS_CRC16) {
1352 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1353 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1356 l2cap_do_send(sk, tx_skb);
1358 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1360 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1361 sk->sk_send_head = NULL;
1362 else
1363 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1365 skb = skb_dequeue(TX_QUEUE(sk));
1366 kfree_skb(skb);
1368 return 0;
1371 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1373 struct l2cap_pinfo *pi = l2cap_pi(sk);
1374 struct sk_buff *skb, *tx_skb;
1375 u16 control, fcs;
1377 skb = skb_peek(TX_QUEUE(sk));
1378 if (!skb)
1379 return;
1381 do {
1382 if (bt_cb(skb)->tx_seq == tx_seq)
1383 break;
1385 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1386 return;
1388 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1390 if (pi->remote_max_tx &&
1391 bt_cb(skb)->retries == pi->remote_max_tx) {
1392 l2cap_send_disconn_req(pi->conn, sk);
1393 return;
1396 tx_skb = skb_clone(skb, GFP_ATOMIC);
1397 bt_cb(skb)->retries++;
1398 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1399 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1400 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1401 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1403 if (pi->fcs == L2CAP_FCS_CRC16) {
1404 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1405 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1408 l2cap_do_send(sk, tx_skb);
1411 static int l2cap_ertm_send(struct sock *sk)
1413 struct sk_buff *skb, *tx_skb;
1414 struct l2cap_pinfo *pi = l2cap_pi(sk);
1415 u16 control, fcs;
1416 int nsent = 0;
1419 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1421 if (pi->remote_max_tx &&
1422 bt_cb(skb)->retries == pi->remote_max_tx) {
1423 l2cap_send_disconn_req(pi->conn, sk);
1424 break;
1427 tx_skb = skb_clone(skb, GFP_ATOMIC);
1429 bt_cb(skb)->retries++;
1431 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1432 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1433 control |= L2CAP_CTRL_FINAL;
1434 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1436 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1437 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1438 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1441 if (pi->fcs == L2CAP_FCS_CRC16) {
1442 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1443 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1446 l2cap_do_send(sk, tx_skb);
1448 __mod_retrans_timer();
1450 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1451 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1453 pi->unacked_frames++;
1454 pi->frames_sent++;
1456 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1457 sk->sk_send_head = NULL;
1458 else
1459 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1461 nsent++;
1464 return nsent;
1467 static int l2cap_retransmit_frames(struct sock *sk)
1469 struct l2cap_pinfo *pi = l2cap_pi(sk);
1470 int ret;
1472 spin_lock_bh(&pi->send_lock);
1474 if (!skb_queue_empty(TX_QUEUE(sk)))
1475 sk->sk_send_head = TX_QUEUE(sk)->next;
1477 pi->next_tx_seq = pi->expected_ack_seq;
1478 ret = l2cap_ertm_send(sk);
1480 spin_unlock_bh(&pi->send_lock);
1482 return ret;
1485 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1487 struct sock *sk = (struct sock *)pi;
1488 u16 control = 0;
1489 int nframes;
1491 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1493 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1494 control |= L2CAP_SUPER_RCV_NOT_READY;
1495 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1496 l2cap_send_sframe(pi, control);
1497 return;
1500 spin_lock_bh(&pi->send_lock);
1501 nframes = l2cap_ertm_send(sk);
1502 spin_unlock_bh(&pi->send_lock);
1504 if (nframes > 0)
1505 return;
1507 control |= L2CAP_SUPER_RCV_READY;
1508 l2cap_send_sframe(pi, control);
1511 static void l2cap_send_srejtail(struct sock *sk)
1513 struct srej_list *tail;
1514 u16 control;
1516 control = L2CAP_SUPER_SELECT_REJECT;
1517 control |= L2CAP_CTRL_FINAL;
1519 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1520 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1522 l2cap_send_sframe(l2cap_pi(sk), control);
1525 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1527 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1528 struct sk_buff **frag;
1529 int err, sent = 0;
1531 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1532 return -EFAULT;
1534 sent += count;
1535 len -= count;
1537 /* Continuation fragments (no L2CAP header) */
1538 frag = &skb_shinfo(skb)->frag_list;
1539 while (len) {
1540 count = min_t(unsigned int, conn->mtu, len);
1542 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1543 if (!*frag)
1544 return -EFAULT;
1545 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1546 return -EFAULT;
1548 sent += count;
1549 len -= count;
1551 frag = &(*frag)->next;
1554 return sent;
1557 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1559 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1560 struct sk_buff *skb;
1561 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1562 struct l2cap_hdr *lh;
1564 BT_DBG("sk %p len %d", sk, (int)len);
1566 count = min_t(unsigned int, (conn->mtu - hlen), len);
1567 skb = bt_skb_send_alloc(sk, count + hlen,
1568 msg->msg_flags & MSG_DONTWAIT, &err);
1569 if (!skb)
1570 return ERR_PTR(-ENOMEM);
1572 /* Create L2CAP header */
1573 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1574 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1575 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1576 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1578 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1579 if (unlikely(err < 0)) {
1580 kfree_skb(skb);
1581 return ERR_PTR(err);
1583 return skb;
1586 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1588 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1589 struct sk_buff *skb;
1590 int err, count, hlen = L2CAP_HDR_SIZE;
1591 struct l2cap_hdr *lh;
1593 BT_DBG("sk %p len %d", sk, (int)len);
1595 count = min_t(unsigned int, (conn->mtu - hlen), len);
1596 skb = bt_skb_send_alloc(sk, count + hlen,
1597 msg->msg_flags & MSG_DONTWAIT, &err);
1598 if (!skb)
1599 return ERR_PTR(-ENOMEM);
1601 /* Create L2CAP header */
1602 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1603 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1604 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1606 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1607 if (unlikely(err < 0)) {
1608 kfree_skb(skb);
1609 return ERR_PTR(err);
1611 return skb;
1614 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1616 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1617 struct sk_buff *skb;
1618 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1619 struct l2cap_hdr *lh;
1621 BT_DBG("sk %p len %d", sk, (int)len);
1623 if (!conn)
1624 return ERR_PTR(-ENOTCONN);
1626 if (sdulen)
1627 hlen += 2;
1629 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1630 hlen += 2;
1632 count = min_t(unsigned int, (conn->mtu - hlen), len);
1633 skb = bt_skb_send_alloc(sk, count + hlen,
1634 msg->msg_flags & MSG_DONTWAIT, &err);
1635 if (!skb)
1636 return ERR_PTR(-ENOMEM);
1638 /* Create L2CAP header */
1639 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1640 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1641 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1642 put_unaligned_le16(control, skb_put(skb, 2));
1643 if (sdulen)
1644 put_unaligned_le16(sdulen, skb_put(skb, 2));
1646 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1647 if (unlikely(err < 0)) {
1648 kfree_skb(skb);
1649 return ERR_PTR(err);
1652 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1653 put_unaligned_le16(0, skb_put(skb, 2));
1655 bt_cb(skb)->retries = 0;
1656 return skb;
1659 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1661 struct l2cap_pinfo *pi = l2cap_pi(sk);
1662 struct sk_buff *skb;
1663 struct sk_buff_head sar_queue;
1664 u16 control;
1665 size_t size = 0;
1667 skb_queue_head_init(&sar_queue);
1668 control = L2CAP_SDU_START;
1669 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1670 if (IS_ERR(skb))
1671 return PTR_ERR(skb);
1673 __skb_queue_tail(&sar_queue, skb);
1674 len -= pi->remote_mps;
1675 size += pi->remote_mps;
1677 while (len > 0) {
1678 size_t buflen;
1680 if (len > pi->remote_mps) {
1681 control = L2CAP_SDU_CONTINUE;
1682 buflen = pi->remote_mps;
1683 } else {
1684 control = L2CAP_SDU_END;
1685 buflen = len;
1688 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1689 if (IS_ERR(skb)) {
1690 skb_queue_purge(&sar_queue);
1691 return PTR_ERR(skb);
1694 __skb_queue_tail(&sar_queue, skb);
1695 len -= buflen;
1696 size += buflen;
1698 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1699 spin_lock_bh(&pi->send_lock);
1700 if (sk->sk_send_head == NULL)
1701 sk->sk_send_head = sar_queue.next;
1702 spin_unlock_bh(&pi->send_lock);
1704 return size;
1707 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1709 struct sock *sk = sock->sk;
1710 struct l2cap_pinfo *pi = l2cap_pi(sk);
1711 struct sk_buff *skb;
1712 u16 control;
1713 int err;
1715 BT_DBG("sock %p, sk %p", sock, sk);
1717 err = sock_error(sk);
1718 if (err)
1719 return err;
1721 if (msg->msg_flags & MSG_OOB)
1722 return -EOPNOTSUPP;
1724 lock_sock(sk);
1726 if (sk->sk_state != BT_CONNECTED) {
1727 err = -ENOTCONN;
1728 goto done;
1731 /* Connectionless channel */
1732 if (sk->sk_type == SOCK_DGRAM) {
1733 skb = l2cap_create_connless_pdu(sk, msg, len);
1734 if (IS_ERR(skb)) {
1735 err = PTR_ERR(skb);
1736 } else {
1737 l2cap_do_send(sk, skb);
1738 err = len;
1740 goto done;
1743 switch (pi->mode) {
1744 case L2CAP_MODE_BASIC:
1745 /* Check outgoing MTU */
1746 if (len > pi->omtu) {
1747 err = -EINVAL;
1748 goto done;
1751 /* Create a basic PDU */
1752 skb = l2cap_create_basic_pdu(sk, msg, len);
1753 if (IS_ERR(skb)) {
1754 err = PTR_ERR(skb);
1755 goto done;
1758 l2cap_do_send(sk, skb);
1759 err = len;
1760 break;
1762 case L2CAP_MODE_ERTM:
1763 case L2CAP_MODE_STREAMING:
1764 /* Entire SDU fits into one PDU */
1765 if (len <= pi->remote_mps) {
1766 control = L2CAP_SDU_UNSEGMENTED;
1767 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1768 if (IS_ERR(skb)) {
1769 err = PTR_ERR(skb);
1770 goto done;
1772 __skb_queue_tail(TX_QUEUE(sk), skb);
1774 if (pi->mode == L2CAP_MODE_ERTM)
1775 spin_lock_bh(&pi->send_lock);
1777 if (sk->sk_send_head == NULL)
1778 sk->sk_send_head = skb;
1780 if (pi->mode == L2CAP_MODE_ERTM)
1781 spin_unlock_bh(&pi->send_lock);
1782 } else {
1783 /* Segment SDU into multiples PDUs */
1784 err = l2cap_sar_segment_sdu(sk, msg, len);
1785 if (err < 0)
1786 goto done;
1789 if (pi->mode == L2CAP_MODE_STREAMING) {
1790 err = l2cap_streaming_send(sk);
1791 } else {
1792 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY &&
1793 pi->conn_state && L2CAP_CONN_WAIT_F) {
1794 err = len;
1795 break;
1797 spin_lock_bh(&pi->send_lock);
1798 err = l2cap_ertm_send(sk);
1799 spin_unlock_bh(&pi->send_lock);
1802 if (err >= 0)
1803 err = len;
1804 break;
1806 default:
1807 BT_DBG("bad state %1.1x", pi->mode);
1808 err = -EINVAL;
1811 done:
1812 release_sock(sk);
1813 return err;
1816 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1818 struct sock *sk = sock->sk;
1820 lock_sock(sk);
1822 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1823 struct l2cap_conn_rsp rsp;
1825 sk->sk_state = BT_CONFIG;
1827 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1828 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1829 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1830 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1831 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1832 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1834 release_sock(sk);
1835 return 0;
1838 release_sock(sk);
1840 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1843 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1845 struct sock *sk = sock->sk;
1846 struct l2cap_options opts;
1847 int len, err = 0;
1848 u32 opt;
1850 BT_DBG("sk %p", sk);
1852 lock_sock(sk);
1854 switch (optname) {
1855 case L2CAP_OPTIONS:
1856 opts.imtu = l2cap_pi(sk)->imtu;
1857 opts.omtu = l2cap_pi(sk)->omtu;
1858 opts.flush_to = l2cap_pi(sk)->flush_to;
1859 opts.mode = l2cap_pi(sk)->mode;
1860 opts.fcs = l2cap_pi(sk)->fcs;
1861 opts.max_tx = l2cap_pi(sk)->max_tx;
1862 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1864 len = min_t(unsigned int, sizeof(opts), optlen);
1865 if (copy_from_user((char *) &opts, optval, len)) {
1866 err = -EFAULT;
1867 break;
1870 l2cap_pi(sk)->mode = opts.mode;
1871 switch (l2cap_pi(sk)->mode) {
1872 case L2CAP_MODE_BASIC:
1873 break;
1874 case L2CAP_MODE_ERTM:
1875 case L2CAP_MODE_STREAMING:
1876 if (enable_ertm)
1877 break;
1878 /* fall through */
1879 default:
1880 err = -EINVAL;
1881 break;
1884 l2cap_pi(sk)->imtu = opts.imtu;
1885 l2cap_pi(sk)->omtu = opts.omtu;
1886 l2cap_pi(sk)->fcs = opts.fcs;
1887 l2cap_pi(sk)->max_tx = opts.max_tx;
1888 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1889 break;
1891 case L2CAP_LM:
1892 if (get_user(opt, (u32 __user *) optval)) {
1893 err = -EFAULT;
1894 break;
1897 if (opt & L2CAP_LM_AUTH)
1898 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1899 if (opt & L2CAP_LM_ENCRYPT)
1900 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1901 if (opt & L2CAP_LM_SECURE)
1902 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1904 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1905 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1906 break;
1908 default:
1909 err = -ENOPROTOOPT;
1910 break;
1913 release_sock(sk);
1914 return err;
1917 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1919 struct sock *sk = sock->sk;
1920 struct bt_security sec;
1921 int len, err = 0;
1922 u32 opt;
1924 BT_DBG("sk %p", sk);
1926 if (level == SOL_L2CAP)
1927 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1929 if (level != SOL_BLUETOOTH)
1930 return -ENOPROTOOPT;
1932 lock_sock(sk);
1934 switch (optname) {
1935 case BT_SECURITY:
1936 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
1937 && sk->sk_type != SOCK_RAW) {
1938 err = -EINVAL;
1939 break;
1942 sec.level = BT_SECURITY_LOW;
1944 len = min_t(unsigned int, sizeof(sec), optlen);
1945 if (copy_from_user((char *) &sec, optval, len)) {
1946 err = -EFAULT;
1947 break;
1950 if (sec.level < BT_SECURITY_LOW ||
1951 sec.level > BT_SECURITY_HIGH) {
1952 err = -EINVAL;
1953 break;
1956 l2cap_pi(sk)->sec_level = sec.level;
1957 break;
1959 case BT_DEFER_SETUP:
1960 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1961 err = -EINVAL;
1962 break;
1965 if (get_user(opt, (u32 __user *) optval)) {
1966 err = -EFAULT;
1967 break;
1970 bt_sk(sk)->defer_setup = opt;
1971 break;
1973 default:
1974 err = -ENOPROTOOPT;
1975 break;
1978 release_sock(sk);
1979 return err;
1982 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1984 struct sock *sk = sock->sk;
1985 struct l2cap_options opts;
1986 struct l2cap_conninfo cinfo;
1987 int len, err = 0;
1988 u32 opt;
1990 BT_DBG("sk %p", sk);
1992 if (get_user(len, optlen))
1993 return -EFAULT;
1995 lock_sock(sk);
1997 switch (optname) {
1998 case L2CAP_OPTIONS:
1999 opts.imtu = l2cap_pi(sk)->imtu;
2000 opts.omtu = l2cap_pi(sk)->omtu;
2001 opts.flush_to = l2cap_pi(sk)->flush_to;
2002 opts.mode = l2cap_pi(sk)->mode;
2003 opts.fcs = l2cap_pi(sk)->fcs;
2004 opts.max_tx = l2cap_pi(sk)->max_tx;
2005 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2007 len = min_t(unsigned int, len, sizeof(opts));
2008 if (copy_to_user(optval, (char *) &opts, len))
2009 err = -EFAULT;
2011 break;
2013 case L2CAP_LM:
2014 switch (l2cap_pi(sk)->sec_level) {
2015 case BT_SECURITY_LOW:
2016 opt = L2CAP_LM_AUTH;
2017 break;
2018 case BT_SECURITY_MEDIUM:
2019 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2020 break;
2021 case BT_SECURITY_HIGH:
2022 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2023 L2CAP_LM_SECURE;
2024 break;
2025 default:
2026 opt = 0;
2027 break;
2030 if (l2cap_pi(sk)->role_switch)
2031 opt |= L2CAP_LM_MASTER;
2033 if (l2cap_pi(sk)->force_reliable)
2034 opt |= L2CAP_LM_RELIABLE;
2036 if (put_user(opt, (u32 __user *) optval))
2037 err = -EFAULT;
2038 break;
2040 case L2CAP_CONNINFO:
2041 if (sk->sk_state != BT_CONNECTED &&
2042 !(sk->sk_state == BT_CONNECT2 &&
2043 bt_sk(sk)->defer_setup)) {
2044 err = -ENOTCONN;
2045 break;
2048 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2049 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2051 len = min_t(unsigned int, len, sizeof(cinfo));
2052 if (copy_to_user(optval, (char *) &cinfo, len))
2053 err = -EFAULT;
2055 break;
2057 default:
2058 err = -ENOPROTOOPT;
2059 break;
2062 release_sock(sk);
2063 return err;
2066 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2068 struct sock *sk = sock->sk;
2069 struct bt_security sec;
2070 int len, err = 0;
2072 BT_DBG("sk %p", sk);
2074 if (level == SOL_L2CAP)
2075 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2077 if (level != SOL_BLUETOOTH)
2078 return -ENOPROTOOPT;
2080 if (get_user(len, optlen))
2081 return -EFAULT;
2083 lock_sock(sk);
2085 switch (optname) {
2086 case BT_SECURITY:
2087 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2088 && sk->sk_type != SOCK_RAW) {
2089 err = -EINVAL;
2090 break;
2093 sec.level = l2cap_pi(sk)->sec_level;
2095 len = min_t(unsigned int, len, sizeof(sec));
2096 if (copy_to_user(optval, (char *) &sec, len))
2097 err = -EFAULT;
2099 break;
2101 case BT_DEFER_SETUP:
2102 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2103 err = -EINVAL;
2104 break;
2107 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2108 err = -EFAULT;
2110 break;
2112 default:
2113 err = -ENOPROTOOPT;
2114 break;
2117 release_sock(sk);
2118 return err;
2121 static int l2cap_sock_shutdown(struct socket *sock, int how)
2123 struct sock *sk = sock->sk;
2124 int err = 0;
2126 BT_DBG("sock %p, sk %p", sock, sk);
2128 if (!sk)
2129 return 0;
2131 lock_sock(sk);
2132 if (!sk->sk_shutdown) {
2133 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2134 err = __l2cap_wait_ack(sk);
2136 sk->sk_shutdown = SHUTDOWN_MASK;
2137 l2cap_sock_clear_timer(sk);
2138 __l2cap_sock_close(sk, 0);
2140 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2141 err = bt_sock_wait_state(sk, BT_CLOSED,
2142 sk->sk_lingertime);
2144 release_sock(sk);
2145 return err;
2148 static int l2cap_sock_release(struct socket *sock)
2150 struct sock *sk = sock->sk;
2151 int err;
2153 BT_DBG("sock %p, sk %p", sock, sk);
2155 if (!sk)
2156 return 0;
2158 err = l2cap_sock_shutdown(sock, 2);
2160 sock_orphan(sk);
2161 l2cap_sock_kill(sk);
2162 return err;
2165 static void l2cap_chan_ready(struct sock *sk)
2167 struct sock *parent = bt_sk(sk)->parent;
2169 BT_DBG("sk %p, parent %p", sk, parent);
2171 l2cap_pi(sk)->conf_state = 0;
2172 l2cap_sock_clear_timer(sk);
2174 if (!parent) {
2175 /* Outgoing channel.
2176 * Wake up socket sleeping on connect.
2178 sk->sk_state = BT_CONNECTED;
2179 sk->sk_state_change(sk);
2180 } else {
2181 /* Incoming channel.
2182 * Wake up socket sleeping on accept.
2184 parent->sk_data_ready(parent, 0);
2188 /* Copy frame to all raw sockets on that connection */
2189 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2191 struct l2cap_chan_list *l = &conn->chan_list;
2192 struct sk_buff *nskb;
2193 struct sock *sk;
2195 BT_DBG("conn %p", conn);
2197 read_lock(&l->lock);
2198 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2199 if (sk->sk_type != SOCK_RAW)
2200 continue;
2202 /* Don't send frame to the socket it came from */
2203 if (skb->sk == sk)
2204 continue;
2205 nskb = skb_clone(skb, GFP_ATOMIC);
2206 if (!nskb)
2207 continue;
2209 if (sock_queue_rcv_skb(sk, nskb))
2210 kfree_skb(nskb);
2212 read_unlock(&l->lock);
2215 /* ---- L2CAP signalling commands ---- */
2216 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2217 u8 code, u8 ident, u16 dlen, void *data)
2219 struct sk_buff *skb, **frag;
2220 struct l2cap_cmd_hdr *cmd;
2221 struct l2cap_hdr *lh;
2222 int len, count;
2224 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2225 conn, code, ident, dlen);
2227 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2228 count = min_t(unsigned int, conn->mtu, len);
2230 skb = bt_skb_alloc(count, GFP_ATOMIC);
2231 if (!skb)
2232 return NULL;
2234 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2235 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2236 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2238 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2239 cmd->code = code;
2240 cmd->ident = ident;
2241 cmd->len = cpu_to_le16(dlen);
2243 if (dlen) {
2244 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2245 memcpy(skb_put(skb, count), data, count);
2246 data += count;
2249 len -= skb->len;
2251 /* Continuation fragments (no L2CAP header) */
2252 frag = &skb_shinfo(skb)->frag_list;
2253 while (len) {
2254 count = min_t(unsigned int, conn->mtu, len);
2256 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2257 if (!*frag)
2258 goto fail;
2260 memcpy(skb_put(*frag, count), data, count);
2262 len -= count;
2263 data += count;
2265 frag = &(*frag)->next;
2268 return skb;
2270 fail:
2271 kfree_skb(skb);
2272 return NULL;
2275 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2277 struct l2cap_conf_opt *opt = *ptr;
2278 int len;
2280 len = L2CAP_CONF_OPT_SIZE + opt->len;
2281 *ptr += len;
2283 *type = opt->type;
2284 *olen = opt->len;
2286 switch (opt->len) {
2287 case 1:
2288 *val = *((u8 *) opt->val);
2289 break;
2291 case 2:
2292 *val = __le16_to_cpu(*((__le16 *) opt->val));
2293 break;
2295 case 4:
2296 *val = __le32_to_cpu(*((__le32 *) opt->val));
2297 break;
2299 default:
2300 *val = (unsigned long) opt->val;
2301 break;
2304 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2305 return len;
2308 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2310 struct l2cap_conf_opt *opt = *ptr;
2312 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2314 opt->type = type;
2315 opt->len = len;
2317 switch (len) {
2318 case 1:
2319 *((u8 *) opt->val) = val;
2320 break;
2322 case 2:
2323 *((__le16 *) opt->val) = cpu_to_le16(val);
2324 break;
2326 case 4:
2327 *((__le32 *) opt->val) = cpu_to_le32(val);
2328 break;
2330 default:
2331 memcpy(opt->val, (void *) val, len);
2332 break;
2335 *ptr += L2CAP_CONF_OPT_SIZE + len;
2338 static void l2cap_ack_timeout(unsigned long arg)
2340 struct sock *sk = (void *) arg;
2342 bh_lock_sock(sk);
2343 l2cap_send_ack(l2cap_pi(sk));
2344 bh_unlock_sock(sk);
2347 static inline void l2cap_ertm_init(struct sock *sk)
2349 l2cap_pi(sk)->expected_ack_seq = 0;
2350 l2cap_pi(sk)->unacked_frames = 0;
2351 l2cap_pi(sk)->buffer_seq = 0;
2352 l2cap_pi(sk)->num_acked = 0;
2353 l2cap_pi(sk)->frames_sent = 0;
2355 setup_timer(&l2cap_pi(sk)->retrans_timer,
2356 l2cap_retrans_timeout, (unsigned long) sk);
2357 setup_timer(&l2cap_pi(sk)->monitor_timer,
2358 l2cap_monitor_timeout, (unsigned long) sk);
2359 setup_timer(&l2cap_pi(sk)->ack_timer,
2360 l2cap_ack_timeout, (unsigned long) sk);
2362 __skb_queue_head_init(SREJ_QUEUE(sk));
2363 __skb_queue_head_init(BUSY_QUEUE(sk));
2364 spin_lock_init(&l2cap_pi(sk)->send_lock);
2366 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2369 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2371 u32 local_feat_mask = l2cap_feat_mask;
2372 if (enable_ertm)
2373 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2375 switch (mode) {
2376 case L2CAP_MODE_ERTM:
2377 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2378 case L2CAP_MODE_STREAMING:
2379 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2380 default:
2381 return 0x00;
2385 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2387 switch (mode) {
2388 case L2CAP_MODE_STREAMING:
2389 case L2CAP_MODE_ERTM:
2390 if (l2cap_mode_supported(mode, remote_feat_mask))
2391 return mode;
2392 /* fall through */
2393 default:
2394 return L2CAP_MODE_BASIC;
2398 static int l2cap_build_conf_req(struct sock *sk, void *data)
2400 struct l2cap_pinfo *pi = l2cap_pi(sk);
2401 struct l2cap_conf_req *req = data;
2402 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2403 void *ptr = req->data;
2405 BT_DBG("sk %p", sk);
2407 if (pi->num_conf_req || pi->num_conf_rsp)
2408 goto done;
2410 switch (pi->mode) {
2411 case L2CAP_MODE_STREAMING:
2412 case L2CAP_MODE_ERTM:
2413 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2414 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2415 l2cap_send_disconn_req(pi->conn, sk);
2416 break;
2417 default:
2418 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2419 break;
2422 done:
2423 switch (pi->mode) {
2424 case L2CAP_MODE_BASIC:
2425 if (pi->imtu != L2CAP_DEFAULT_MTU)
2426 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2427 break;
2429 case L2CAP_MODE_ERTM:
2430 rfc.mode = L2CAP_MODE_ERTM;
2431 rfc.txwin_size = pi->tx_win;
2432 rfc.max_transmit = pi->max_tx;
2433 rfc.retrans_timeout = 0;
2434 rfc.monitor_timeout = 0;
2435 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2436 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2437 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2439 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2440 sizeof(rfc), (unsigned long) &rfc);
2442 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2443 break;
2445 if (pi->fcs == L2CAP_FCS_NONE ||
2446 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2447 pi->fcs = L2CAP_FCS_NONE;
2448 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2450 break;
2452 case L2CAP_MODE_STREAMING:
2453 rfc.mode = L2CAP_MODE_STREAMING;
2454 rfc.txwin_size = 0;
2455 rfc.max_transmit = 0;
2456 rfc.retrans_timeout = 0;
2457 rfc.monitor_timeout = 0;
2458 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2459 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2460 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2462 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2463 sizeof(rfc), (unsigned long) &rfc);
2465 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2466 break;
2468 if (pi->fcs == L2CAP_FCS_NONE ||
2469 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2470 pi->fcs = L2CAP_FCS_NONE;
2471 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2473 break;
2476 /* FIXME: Need actual value of the flush timeout */
2477 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2478 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2480 req->dcid = cpu_to_le16(pi->dcid);
2481 req->flags = cpu_to_le16(0);
2483 return ptr - data;
2486 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2488 struct l2cap_pinfo *pi = l2cap_pi(sk);
2489 struct l2cap_conf_rsp *rsp = data;
2490 void *ptr = rsp->data;
2491 void *req = pi->conf_req;
2492 int len = pi->conf_len;
2493 int type, hint, olen;
2494 unsigned long val;
2495 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2496 u16 mtu = L2CAP_DEFAULT_MTU;
2497 u16 result = L2CAP_CONF_SUCCESS;
2499 BT_DBG("sk %p", sk);
2501 while (len >= L2CAP_CONF_OPT_SIZE) {
2502 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2504 hint = type & L2CAP_CONF_HINT;
2505 type &= L2CAP_CONF_MASK;
2507 switch (type) {
2508 case L2CAP_CONF_MTU:
2509 mtu = val;
2510 break;
2512 case L2CAP_CONF_FLUSH_TO:
2513 pi->flush_to = val;
2514 break;
2516 case L2CAP_CONF_QOS:
2517 break;
2519 case L2CAP_CONF_RFC:
2520 if (olen == sizeof(rfc))
2521 memcpy(&rfc, (void *) val, olen);
2522 break;
2524 case L2CAP_CONF_FCS:
2525 if (val == L2CAP_FCS_NONE)
2526 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2528 break;
2530 default:
2531 if (hint)
2532 break;
2534 result = L2CAP_CONF_UNKNOWN;
2535 *((u8 *) ptr++) = type;
2536 break;
2540 if (pi->num_conf_rsp || pi->num_conf_req)
2541 goto done;
2543 switch (pi->mode) {
2544 case L2CAP_MODE_STREAMING:
2545 case L2CAP_MODE_ERTM:
2546 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2547 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2548 return -ECONNREFUSED;
2549 break;
2550 default:
2551 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2552 break;
2555 done:
2556 if (pi->mode != rfc.mode) {
2557 result = L2CAP_CONF_UNACCEPT;
2558 rfc.mode = pi->mode;
2560 if (pi->num_conf_rsp == 1)
2561 return -ECONNREFUSED;
2563 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2564 sizeof(rfc), (unsigned long) &rfc);
2568 if (result == L2CAP_CONF_SUCCESS) {
2569 /* Configure output options and let the other side know
2570 * which ones we don't like. */
2572 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2573 result = L2CAP_CONF_UNACCEPT;
2574 else {
2575 pi->omtu = mtu;
2576 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2578 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2580 switch (rfc.mode) {
2581 case L2CAP_MODE_BASIC:
2582 pi->fcs = L2CAP_FCS_NONE;
2583 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2584 break;
2586 case L2CAP_MODE_ERTM:
2587 pi->remote_tx_win = rfc.txwin_size;
2588 pi->remote_max_tx = rfc.max_transmit;
2589 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2590 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2592 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2594 rfc.retrans_timeout =
2595 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2596 rfc.monitor_timeout =
2597 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2599 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2601 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2602 sizeof(rfc), (unsigned long) &rfc);
2604 break;
2606 case L2CAP_MODE_STREAMING:
2607 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2608 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2610 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2612 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2614 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2615 sizeof(rfc), (unsigned long) &rfc);
2617 break;
2619 default:
2620 result = L2CAP_CONF_UNACCEPT;
2622 memset(&rfc, 0, sizeof(rfc));
2623 rfc.mode = pi->mode;
2626 if (result == L2CAP_CONF_SUCCESS)
2627 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2629 rsp->scid = cpu_to_le16(pi->dcid);
2630 rsp->result = cpu_to_le16(result);
2631 rsp->flags = cpu_to_le16(0x0000);
2633 return ptr - data;
2636 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2638 struct l2cap_pinfo *pi = l2cap_pi(sk);
2639 struct l2cap_conf_req *req = data;
2640 void *ptr = req->data;
2641 int type, olen;
2642 unsigned long val;
2643 struct l2cap_conf_rfc rfc;
2645 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2647 while (len >= L2CAP_CONF_OPT_SIZE) {
2648 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2650 switch (type) {
2651 case L2CAP_CONF_MTU:
2652 if (val < L2CAP_DEFAULT_MIN_MTU) {
2653 *result = L2CAP_CONF_UNACCEPT;
2654 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2655 } else
2656 pi->omtu = val;
2657 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2658 break;
2660 case L2CAP_CONF_FLUSH_TO:
2661 pi->flush_to = val;
2662 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2663 2, pi->flush_to);
2664 break;
2666 case L2CAP_CONF_RFC:
2667 if (olen == sizeof(rfc))
2668 memcpy(&rfc, (void *)val, olen);
2670 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2671 rfc.mode != pi->mode)
2672 return -ECONNREFUSED;
2674 pi->mode = rfc.mode;
2675 pi->fcs = 0;
2677 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2678 sizeof(rfc), (unsigned long) &rfc);
2679 break;
2683 if (*result == L2CAP_CONF_SUCCESS) {
2684 switch (rfc.mode) {
2685 case L2CAP_MODE_ERTM:
2686 pi->remote_tx_win = rfc.txwin_size;
2687 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2688 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2689 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2690 break;
2691 case L2CAP_MODE_STREAMING:
2692 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2696 req->dcid = cpu_to_le16(pi->dcid);
2697 req->flags = cpu_to_le16(0x0000);
2699 return ptr - data;
2702 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2704 struct l2cap_conf_rsp *rsp = data;
2705 void *ptr = rsp->data;
2707 BT_DBG("sk %p", sk);
2709 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2710 rsp->result = cpu_to_le16(result);
2711 rsp->flags = cpu_to_le16(flags);
2713 return ptr - data;
2716 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2718 struct l2cap_pinfo *pi = l2cap_pi(sk);
2719 int type, olen;
2720 unsigned long val;
2721 struct l2cap_conf_rfc rfc;
2723 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2725 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2726 return;
2728 while (len >= L2CAP_CONF_OPT_SIZE) {
2729 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2731 switch (type) {
2732 case L2CAP_CONF_RFC:
2733 if (olen == sizeof(rfc))
2734 memcpy(&rfc, (void *)val, olen);
2735 goto done;
2739 done:
2740 switch (rfc.mode) {
2741 case L2CAP_MODE_ERTM:
2742 pi->remote_tx_win = rfc.txwin_size;
2743 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2744 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2745 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2746 break;
2747 case L2CAP_MODE_STREAMING:
2748 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2752 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2754 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2756 if (rej->reason != 0x0000)
2757 return 0;
2759 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2760 cmd->ident == conn->info_ident) {
2761 del_timer(&conn->info_timer);
2763 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2764 conn->info_ident = 0;
2766 l2cap_conn_start(conn);
2769 return 0;
2772 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2774 struct l2cap_chan_list *list = &conn->chan_list;
2775 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2776 struct l2cap_conn_rsp rsp;
2777 struct sock *sk, *parent;
2778 int result, status = L2CAP_CS_NO_INFO;
2780 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2781 __le16 psm = req->psm;
2783 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2785 /* Check if we have socket listening on psm */
2786 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2787 if (!parent) {
2788 result = L2CAP_CR_BAD_PSM;
2789 goto sendresp;
2792 /* Check if the ACL is secure enough (if not SDP) */
2793 if (psm != cpu_to_le16(0x0001) &&
2794 !hci_conn_check_link_mode(conn->hcon)) {
2795 conn->disc_reason = 0x05;
2796 result = L2CAP_CR_SEC_BLOCK;
2797 goto response;
2800 result = L2CAP_CR_NO_MEM;
2802 /* Check for backlog size */
2803 if (sk_acceptq_is_full(parent)) {
2804 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2805 goto response;
2808 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2809 if (!sk)
2810 goto response;
2812 write_lock_bh(&list->lock);
2814 /* Check if we already have channel with that dcid */
2815 if (__l2cap_get_chan_by_dcid(list, scid)) {
2816 write_unlock_bh(&list->lock);
2817 sock_set_flag(sk, SOCK_ZAPPED);
2818 l2cap_sock_kill(sk);
2819 goto response;
2822 hci_conn_hold(conn->hcon);
2824 l2cap_sock_init(sk, parent);
2825 bacpy(&bt_sk(sk)->src, conn->src);
2826 bacpy(&bt_sk(sk)->dst, conn->dst);
2827 l2cap_pi(sk)->psm = psm;
2828 l2cap_pi(sk)->dcid = scid;
2830 __l2cap_chan_add(conn, sk, parent);
2831 dcid = l2cap_pi(sk)->scid;
2833 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2835 l2cap_pi(sk)->ident = cmd->ident;
2837 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2838 if (l2cap_check_security(sk)) {
2839 if (bt_sk(sk)->defer_setup) {
2840 sk->sk_state = BT_CONNECT2;
2841 result = L2CAP_CR_PEND;
2842 status = L2CAP_CS_AUTHOR_PEND;
2843 parent->sk_data_ready(parent, 0);
2844 } else {
2845 sk->sk_state = BT_CONFIG;
2846 result = L2CAP_CR_SUCCESS;
2847 status = L2CAP_CS_NO_INFO;
2849 } else {
2850 sk->sk_state = BT_CONNECT2;
2851 result = L2CAP_CR_PEND;
2852 status = L2CAP_CS_AUTHEN_PEND;
2854 } else {
2855 sk->sk_state = BT_CONNECT2;
2856 result = L2CAP_CR_PEND;
2857 status = L2CAP_CS_NO_INFO;
2860 write_unlock_bh(&list->lock);
2862 response:
2863 bh_unlock_sock(parent);
2865 sendresp:
2866 rsp.scid = cpu_to_le16(scid);
2867 rsp.dcid = cpu_to_le16(dcid);
2868 rsp.result = cpu_to_le16(result);
2869 rsp.status = cpu_to_le16(status);
2870 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2872 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2873 struct l2cap_info_req info;
2874 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2876 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2877 conn->info_ident = l2cap_get_ident(conn);
2879 mod_timer(&conn->info_timer, jiffies +
2880 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2882 l2cap_send_cmd(conn, conn->info_ident,
2883 L2CAP_INFO_REQ, sizeof(info), &info);
2886 return 0;
2889 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2891 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2892 u16 scid, dcid, result, status;
2893 struct sock *sk;
2894 u8 req[128];
2896 scid = __le16_to_cpu(rsp->scid);
2897 dcid = __le16_to_cpu(rsp->dcid);
2898 result = __le16_to_cpu(rsp->result);
2899 status = __le16_to_cpu(rsp->status);
2901 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2903 if (scid) {
2904 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2905 if (!sk)
2906 return 0;
2907 } else {
2908 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2909 if (!sk)
2910 return 0;
2913 switch (result) {
2914 case L2CAP_CR_SUCCESS:
2915 sk->sk_state = BT_CONFIG;
2916 l2cap_pi(sk)->ident = 0;
2917 l2cap_pi(sk)->dcid = dcid;
2918 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2919 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2921 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2922 l2cap_build_conf_req(sk, req), req);
2923 l2cap_pi(sk)->num_conf_req++;
2924 break;
2926 case L2CAP_CR_PEND:
2927 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2928 break;
2930 default:
2931 l2cap_chan_del(sk, ECONNREFUSED);
2932 break;
2935 bh_unlock_sock(sk);
2936 return 0;
2939 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2941 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2942 u16 dcid, flags;
2943 u8 rsp[64];
2944 struct sock *sk;
2945 int len;
2947 dcid = __le16_to_cpu(req->dcid);
2948 flags = __le16_to_cpu(req->flags);
2950 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2952 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2953 if (!sk)
2954 return -ENOENT;
2956 if (sk->sk_state == BT_DISCONN)
2957 goto unlock;
2959 /* Reject if config buffer is too small. */
2960 len = cmd_len - sizeof(*req);
2961 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2962 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2963 l2cap_build_conf_rsp(sk, rsp,
2964 L2CAP_CONF_REJECT, flags), rsp);
2965 goto unlock;
2968 /* Store config. */
2969 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2970 l2cap_pi(sk)->conf_len += len;
2972 if (flags & 0x0001) {
2973 /* Incomplete config. Send empty response. */
2974 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2975 l2cap_build_conf_rsp(sk, rsp,
2976 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2977 goto unlock;
2980 /* Complete config. */
2981 len = l2cap_parse_conf_req(sk, rsp);
2982 if (len < 0) {
2983 l2cap_send_disconn_req(conn, sk);
2984 goto unlock;
2987 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2988 l2cap_pi(sk)->num_conf_rsp++;
2990 /* Reset config buffer. */
2991 l2cap_pi(sk)->conf_len = 0;
2993 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2994 goto unlock;
2996 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2997 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2998 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2999 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3001 sk->sk_state = BT_CONNECTED;
3003 l2cap_pi(sk)->next_tx_seq = 0;
3004 l2cap_pi(sk)->expected_tx_seq = 0;
3005 __skb_queue_head_init(TX_QUEUE(sk));
3006 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3007 l2cap_ertm_init(sk);
3009 l2cap_chan_ready(sk);
3010 goto unlock;
3013 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3014 u8 buf[64];
3015 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3016 l2cap_build_conf_req(sk, buf), buf);
3017 l2cap_pi(sk)->num_conf_req++;
3020 unlock:
3021 bh_unlock_sock(sk);
3022 return 0;
3025 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3027 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3028 u16 scid, flags, result;
3029 struct sock *sk;
3030 int len = cmd->len - sizeof(*rsp);
3032 scid = __le16_to_cpu(rsp->scid);
3033 flags = __le16_to_cpu(rsp->flags);
3034 result = __le16_to_cpu(rsp->result);
3036 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3037 scid, flags, result);
3039 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3040 if (!sk)
3041 return 0;
3043 switch (result) {
3044 case L2CAP_CONF_SUCCESS:
3045 l2cap_conf_rfc_get(sk, rsp->data, len);
3046 break;
3048 case L2CAP_CONF_UNACCEPT:
3049 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3050 char req[64];
3052 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3053 l2cap_send_disconn_req(conn, sk);
3054 goto done;
3057 /* throw out any old stored conf requests */
3058 result = L2CAP_CONF_SUCCESS;
3059 len = l2cap_parse_conf_rsp(sk, rsp->data,
3060 len, req, &result);
3061 if (len < 0) {
3062 l2cap_send_disconn_req(conn, sk);
3063 goto done;
3066 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3067 L2CAP_CONF_REQ, len, req);
3068 l2cap_pi(sk)->num_conf_req++;
3069 if (result != L2CAP_CONF_SUCCESS)
3070 goto done;
3071 break;
3074 default:
3075 sk->sk_state = BT_DISCONN;
3076 sk->sk_err = ECONNRESET;
3077 l2cap_sock_set_timer(sk, HZ * 5);
3078 l2cap_send_disconn_req(conn, sk);
3079 goto done;
3082 if (flags & 0x01)
3083 goto done;
3085 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3087 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3088 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3089 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3090 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3092 sk->sk_state = BT_CONNECTED;
3093 l2cap_pi(sk)->next_tx_seq = 0;
3094 l2cap_pi(sk)->expected_tx_seq = 0;
3095 __skb_queue_head_init(TX_QUEUE(sk));
3096 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3097 l2cap_ertm_init(sk);
3099 l2cap_chan_ready(sk);
3102 done:
3103 bh_unlock_sock(sk);
3104 return 0;
3107 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3109 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3110 struct l2cap_disconn_rsp rsp;
3111 u16 dcid, scid;
3112 struct sock *sk;
3114 scid = __le16_to_cpu(req->scid);
3115 dcid = __le16_to_cpu(req->dcid);
3117 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3119 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3120 if (!sk)
3121 return 0;
3123 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3124 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3125 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3127 sk->sk_shutdown = SHUTDOWN_MASK;
3129 skb_queue_purge(TX_QUEUE(sk));
3131 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3132 skb_queue_purge(SREJ_QUEUE(sk));
3133 skb_queue_purge(BUSY_QUEUE(sk));
3134 del_timer(&l2cap_pi(sk)->retrans_timer);
3135 del_timer(&l2cap_pi(sk)->monitor_timer);
3136 del_timer(&l2cap_pi(sk)->ack_timer);
3139 l2cap_chan_del(sk, ECONNRESET);
3140 bh_unlock_sock(sk);
3142 l2cap_sock_kill(sk);
3143 return 0;
3146 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3148 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3149 u16 dcid, scid;
3150 struct sock *sk;
3152 scid = __le16_to_cpu(rsp->scid);
3153 dcid = __le16_to_cpu(rsp->dcid);
3155 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3157 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3158 if (!sk)
3159 return 0;
3161 skb_queue_purge(TX_QUEUE(sk));
3163 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3164 skb_queue_purge(SREJ_QUEUE(sk));
3165 skb_queue_purge(BUSY_QUEUE(sk));
3166 del_timer(&l2cap_pi(sk)->retrans_timer);
3167 del_timer(&l2cap_pi(sk)->monitor_timer);
3168 del_timer(&l2cap_pi(sk)->ack_timer);
3171 l2cap_chan_del(sk, 0);
3172 bh_unlock_sock(sk);
3174 l2cap_sock_kill(sk);
3175 return 0;
3178 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3180 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3181 u16 type;
3183 type = __le16_to_cpu(req->type);
3185 BT_DBG("type 0x%4.4x", type);
3187 if (type == L2CAP_IT_FEAT_MASK) {
3188 u8 buf[8];
3189 u32 feat_mask = l2cap_feat_mask;
3190 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3191 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3192 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3193 if (enable_ertm)
3194 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3195 | L2CAP_FEAT_FCS;
3196 put_unaligned_le32(feat_mask, rsp->data);
3197 l2cap_send_cmd(conn, cmd->ident,
3198 L2CAP_INFO_RSP, sizeof(buf), buf);
3199 } else if (type == L2CAP_IT_FIXED_CHAN) {
3200 u8 buf[12];
3201 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3202 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3203 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3204 memcpy(buf + 4, l2cap_fixed_chan, 8);
3205 l2cap_send_cmd(conn, cmd->ident,
3206 L2CAP_INFO_RSP, sizeof(buf), buf);
3207 } else {
3208 struct l2cap_info_rsp rsp;
3209 rsp.type = cpu_to_le16(type);
3210 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3211 l2cap_send_cmd(conn, cmd->ident,
3212 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3215 return 0;
3218 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3220 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3221 u16 type, result;
3223 type = __le16_to_cpu(rsp->type);
3224 result = __le16_to_cpu(rsp->result);
3226 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3228 del_timer(&conn->info_timer);
3230 if (type == L2CAP_IT_FEAT_MASK) {
3231 conn->feat_mask = get_unaligned_le32(rsp->data);
3233 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3234 struct l2cap_info_req req;
3235 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3237 conn->info_ident = l2cap_get_ident(conn);
3239 l2cap_send_cmd(conn, conn->info_ident,
3240 L2CAP_INFO_REQ, sizeof(req), &req);
3241 } else {
3242 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3243 conn->info_ident = 0;
3245 l2cap_conn_start(conn);
3247 } else if (type == L2CAP_IT_FIXED_CHAN) {
3248 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3249 conn->info_ident = 0;
3251 l2cap_conn_start(conn);
3254 return 0;
3257 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3259 u8 *data = skb->data;
3260 int len = skb->len;
3261 struct l2cap_cmd_hdr cmd;
3262 int err = 0;
3264 l2cap_raw_recv(conn, skb);
3266 while (len >= L2CAP_CMD_HDR_SIZE) {
3267 u16 cmd_len;
3268 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3269 data += L2CAP_CMD_HDR_SIZE;
3270 len -= L2CAP_CMD_HDR_SIZE;
3272 cmd_len = le16_to_cpu(cmd.len);
3274 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3276 if (cmd_len > len || !cmd.ident) {
3277 BT_DBG("corrupted command");
3278 break;
3281 switch (cmd.code) {
3282 case L2CAP_COMMAND_REJ:
3283 l2cap_command_rej(conn, &cmd, data);
3284 break;
3286 case L2CAP_CONN_REQ:
3287 err = l2cap_connect_req(conn, &cmd, data);
3288 break;
3290 case L2CAP_CONN_RSP:
3291 err = l2cap_connect_rsp(conn, &cmd, data);
3292 break;
3294 case L2CAP_CONF_REQ:
3295 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3296 break;
3298 case L2CAP_CONF_RSP:
3299 err = l2cap_config_rsp(conn, &cmd, data);
3300 break;
3302 case L2CAP_DISCONN_REQ:
3303 err = l2cap_disconnect_req(conn, &cmd, data);
3304 break;
3306 case L2CAP_DISCONN_RSP:
3307 err = l2cap_disconnect_rsp(conn, &cmd, data);
3308 break;
3310 case L2CAP_ECHO_REQ:
3311 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3312 break;
3314 case L2CAP_ECHO_RSP:
3315 break;
3317 case L2CAP_INFO_REQ:
3318 err = l2cap_information_req(conn, &cmd, data);
3319 break;
3321 case L2CAP_INFO_RSP:
3322 err = l2cap_information_rsp(conn, &cmd, data);
3323 break;
3325 default:
3326 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3327 err = -EINVAL;
3328 break;
3331 if (err) {
3332 struct l2cap_cmd_rej rej;
3333 BT_DBG("error %d", err);
3335 /* FIXME: Map err to a valid reason */
3336 rej.reason = cpu_to_le16(0);
3337 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3340 data += cmd_len;
3341 len -= cmd_len;
3344 kfree_skb(skb);
3347 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3349 u16 our_fcs, rcv_fcs;
3350 int hdr_size = L2CAP_HDR_SIZE + 2;
3352 if (pi->fcs == L2CAP_FCS_CRC16) {
3353 skb_trim(skb, skb->len - 2);
3354 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3355 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3357 if (our_fcs != rcv_fcs)
3358 return -EINVAL;
3360 return 0;
3363 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3365 struct l2cap_pinfo *pi = l2cap_pi(sk);
3366 u16 control = 0;
3368 pi->frames_sent = 0;
3369 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3371 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3373 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3374 control |= L2CAP_SUPER_RCV_NOT_READY | L2CAP_CTRL_FINAL;
3375 l2cap_send_sframe(pi, control);
3376 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3377 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
3380 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY && pi->unacked_frames > 0)
3381 __mod_retrans_timer();
3383 spin_lock_bh(&pi->send_lock);
3384 l2cap_ertm_send(sk);
3385 spin_unlock_bh(&pi->send_lock);
3387 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3388 pi->frames_sent == 0) {
3389 control |= L2CAP_SUPER_RCV_READY;
3390 l2cap_send_sframe(pi, control);
3394 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3396 struct sk_buff *next_skb;
3398 bt_cb(skb)->tx_seq = tx_seq;
3399 bt_cb(skb)->sar = sar;
3401 next_skb = skb_peek(SREJ_QUEUE(sk));
3402 if (!next_skb) {
3403 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3404 return 0;
3407 do {
3408 if (bt_cb(next_skb)->tx_seq == tx_seq)
3409 return -EINVAL;
3411 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3412 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3413 return 0;
3416 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3417 break;
3419 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3421 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3423 return 0;
3426 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3428 struct l2cap_pinfo *pi = l2cap_pi(sk);
3429 struct sk_buff *_skb;
3430 int err;
3432 switch (control & L2CAP_CTRL_SAR) {
3433 case L2CAP_SDU_UNSEGMENTED:
3434 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3435 goto drop;
3437 err = sock_queue_rcv_skb(sk, skb);
3438 if (!err)
3439 return err;
3441 break;
3443 case L2CAP_SDU_START:
3444 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3445 goto drop;
3447 pi->sdu_len = get_unaligned_le16(skb->data);
3449 if (pi->sdu_len > pi->imtu)
3450 goto disconnect;
3452 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3453 if (!pi->sdu)
3454 return -ENOMEM;
3456 /* pull sdu_len bytes only after alloc, because of Local Busy
3457 * condition we have to be sure that this will be executed
3458 * only once, i.e., when alloc does not fail */
3459 skb_pull(skb, 2);
3461 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3463 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3464 pi->partial_sdu_len = skb->len;
3465 break;
3467 case L2CAP_SDU_CONTINUE:
3468 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3469 goto disconnect;
3471 if (!pi->sdu)
3472 goto disconnect;
3474 pi->partial_sdu_len += skb->len;
3475 if (pi->partial_sdu_len > pi->sdu_len)
3476 goto drop;
3478 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3480 break;
3482 case L2CAP_SDU_END:
3483 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3484 goto disconnect;
3486 if (!pi->sdu)
3487 goto disconnect;
3489 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3490 pi->partial_sdu_len += skb->len;
3492 if (pi->partial_sdu_len > pi->imtu)
3493 goto drop;
3495 if (pi->partial_sdu_len != pi->sdu_len)
3496 goto drop;
3498 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3501 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3502 if (!_skb) {
3503 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3504 return -ENOMEM;
3507 err = sock_queue_rcv_skb(sk, _skb);
3508 if (err < 0) {
3509 kfree_skb(_skb);
3510 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3511 return err;
3514 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3515 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3517 kfree_skb(pi->sdu);
3518 break;
3521 kfree_skb(skb);
3522 return 0;
3524 drop:
3525 kfree_skb(pi->sdu);
3526 pi->sdu = NULL;
3528 disconnect:
3529 l2cap_send_disconn_req(pi->conn, sk);
3530 kfree_skb(skb);
3531 return 0;
3534 static void l2cap_busy_work(struct work_struct *work)
3536 DECLARE_WAITQUEUE(wait, current);
3537 struct l2cap_pinfo *pi =
3538 container_of(work, struct l2cap_pinfo, busy_work);
3539 struct sock *sk = (struct sock *)pi;
3540 int n_tries = 0, timeo = HZ/5, err;
3541 struct sk_buff *skb;
3542 u16 control;
3544 lock_sock(sk);
3546 add_wait_queue(sk_sleep(sk), &wait);
3547 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3548 set_current_state(TASK_INTERRUPTIBLE);
3550 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3551 err = -EBUSY;
3552 l2cap_send_disconn_req(pi->conn, sk);
3553 goto done;
3556 if (!timeo)
3557 timeo = HZ/5;
3559 if (signal_pending(current)) {
3560 err = sock_intr_errno(timeo);
3561 goto done;
3564 release_sock(sk);
3565 timeo = schedule_timeout(timeo);
3566 lock_sock(sk);
3568 err = sock_error(sk);
3569 if (err)
3570 goto done;
3572 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3573 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3574 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3575 if (err < 0) {
3576 skb_queue_head(BUSY_QUEUE(sk), skb);
3577 break;
3580 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3583 if (!skb)
3584 break;
3587 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3588 goto done;
3590 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3591 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3592 l2cap_send_sframe(pi, control);
3593 l2cap_pi(sk)->retry_count = 1;
3595 del_timer(&pi->retrans_timer);
3596 __mod_monitor_timer();
3598 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3600 done:
3601 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3602 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3604 set_current_state(TASK_RUNNING);
3605 remove_wait_queue(sk_sleep(sk), &wait);
3607 release_sock(sk);
3610 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3612 struct l2cap_pinfo *pi = l2cap_pi(sk);
3613 int sctrl, err;
3615 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3616 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3617 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3618 return -EBUSY;
3621 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3622 if (err >= 0) {
3623 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3624 return err;
3627 /* Busy Condition */
3628 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3629 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3630 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3632 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3633 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3634 l2cap_send_sframe(pi, sctrl);
3636 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3638 queue_work(_busy_wq, &pi->busy_work);
3640 return err;
3643 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3645 struct l2cap_pinfo *pi = l2cap_pi(sk);
3646 struct sk_buff *_skb;
3647 int err = -EINVAL;
3650 * TODO: We have to notify the userland if some data is lost with the
3651 * Streaming Mode.
3654 switch (control & L2CAP_CTRL_SAR) {
3655 case L2CAP_SDU_UNSEGMENTED:
3656 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3657 kfree_skb(pi->sdu);
3658 break;
3661 err = sock_queue_rcv_skb(sk, skb);
3662 if (!err)
3663 return 0;
3665 break;
3667 case L2CAP_SDU_START:
3668 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3669 kfree_skb(pi->sdu);
3670 break;
3673 pi->sdu_len = get_unaligned_le16(skb->data);
3674 skb_pull(skb, 2);
3676 if (pi->sdu_len > pi->imtu) {
3677 err = -EMSGSIZE;
3678 break;
3681 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3682 if (!pi->sdu) {
3683 err = -ENOMEM;
3684 break;
3687 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3689 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3690 pi->partial_sdu_len = skb->len;
3691 err = 0;
3692 break;
3694 case L2CAP_SDU_CONTINUE:
3695 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3696 break;
3698 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3700 pi->partial_sdu_len += skb->len;
3701 if (pi->partial_sdu_len > pi->sdu_len)
3702 kfree_skb(pi->sdu);
3703 else
3704 err = 0;
3706 break;
3708 case L2CAP_SDU_END:
3709 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3710 break;
3712 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3714 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3715 pi->partial_sdu_len += skb->len;
3717 if (pi->partial_sdu_len > pi->imtu)
3718 goto drop;
3720 if (pi->partial_sdu_len == pi->sdu_len) {
3721 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3722 err = sock_queue_rcv_skb(sk, _skb);
3723 if (err < 0)
3724 kfree_skb(_skb);
3726 err = 0;
3728 drop:
3729 kfree_skb(pi->sdu);
3730 break;
3733 kfree_skb(skb);
3734 return err;
3737 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3739 struct sk_buff *skb;
3740 u16 control;
3742 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3743 if (bt_cb(skb)->tx_seq != tx_seq)
3744 break;
3746 skb = skb_dequeue(SREJ_QUEUE(sk));
3747 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3748 l2cap_ertm_reassembly_sdu(sk, skb, control);
3749 l2cap_pi(sk)->buffer_seq_srej =
3750 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3751 tx_seq = (tx_seq + 1) % 64;
3755 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3757 struct l2cap_pinfo *pi = l2cap_pi(sk);
3758 struct srej_list *l, *tmp;
3759 u16 control;
3761 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3762 if (l->tx_seq == tx_seq) {
3763 list_del(&l->list);
3764 kfree(l);
3765 return;
3767 control = L2CAP_SUPER_SELECT_REJECT;
3768 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3769 l2cap_send_sframe(pi, control);
3770 list_del(&l->list);
3771 list_add_tail(&l->list, SREJ_LIST(sk));
3775 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3777 struct l2cap_pinfo *pi = l2cap_pi(sk);
3778 struct srej_list *new;
3779 u16 control;
3781 while (tx_seq != pi->expected_tx_seq) {
3782 control = L2CAP_SUPER_SELECT_REJECT;
3783 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3784 l2cap_send_sframe(pi, control);
3786 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3787 new->tx_seq = pi->expected_tx_seq;
3788 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3789 list_add_tail(&new->list, SREJ_LIST(sk));
3791 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3794 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3796 struct l2cap_pinfo *pi = l2cap_pi(sk);
3797 u8 tx_seq = __get_txseq(rx_control);
3798 u8 req_seq = __get_reqseq(rx_control);
3799 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3800 int tx_seq_offset, expected_tx_seq_offset;
3801 int num_to_ack = (pi->tx_win/6) + 1;
3802 int err = 0;
3804 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3806 if (L2CAP_CTRL_FINAL & rx_control &&
3807 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3808 del_timer(&pi->monitor_timer);
3809 if (pi->unacked_frames > 0)
3810 __mod_retrans_timer();
3811 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3814 pi->expected_ack_seq = req_seq;
3815 l2cap_drop_acked_frames(sk);
3817 if (tx_seq == pi->expected_tx_seq)
3818 goto expected;
3820 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3821 if (tx_seq_offset < 0)
3822 tx_seq_offset += 64;
3824 /* invalid tx_seq */
3825 if (tx_seq_offset >= pi->tx_win) {
3826 l2cap_send_disconn_req(pi->conn, sk);
3827 goto drop;
3830 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3831 goto drop;
3833 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3834 struct srej_list *first;
3836 first = list_first_entry(SREJ_LIST(sk),
3837 struct srej_list, list);
3838 if (tx_seq == first->tx_seq) {
3839 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3840 l2cap_check_srej_gap(sk, tx_seq);
3842 list_del(&first->list);
3843 kfree(first);
3845 if (list_empty(SREJ_LIST(sk))) {
3846 pi->buffer_seq = pi->buffer_seq_srej;
3847 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3848 l2cap_send_ack(pi);
3850 } else {
3851 struct srej_list *l;
3853 /* duplicated tx_seq */
3854 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3855 goto drop;
3857 list_for_each_entry(l, SREJ_LIST(sk), list) {
3858 if (l->tx_seq == tx_seq) {
3859 l2cap_resend_srejframe(sk, tx_seq);
3860 return 0;
3863 l2cap_send_srejframe(sk, tx_seq);
3865 } else {
3866 expected_tx_seq_offset =
3867 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3868 if (expected_tx_seq_offset < 0)
3869 expected_tx_seq_offset += 64;
3871 /* duplicated tx_seq */
3872 if (tx_seq_offset < expected_tx_seq_offset)
3873 goto drop;
3875 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3877 INIT_LIST_HEAD(SREJ_LIST(sk));
3878 pi->buffer_seq_srej = pi->buffer_seq;
3880 __skb_queue_head_init(SREJ_QUEUE(sk));
3881 __skb_queue_head_init(BUSY_QUEUE(sk));
3882 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3884 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3886 l2cap_send_srejframe(sk, tx_seq);
3888 return 0;
3890 expected:
3891 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3893 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3894 bt_cb(skb)->tx_seq = tx_seq;
3895 bt_cb(skb)->sar = sar;
3896 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3897 return 0;
3900 if (rx_control & L2CAP_CTRL_FINAL) {
3901 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3902 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3903 else
3904 l2cap_retransmit_frames(sk);
3907 err = l2cap_push_rx_skb(sk, skb, rx_control);
3908 if (err < 0)
3909 return 0;
3911 __mod_ack_timer();
3913 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3914 if (pi->num_acked == num_to_ack - 1)
3915 l2cap_send_ack(pi);
3917 return 0;
3919 drop:
3920 kfree_skb(skb);
3921 return 0;
3924 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3926 struct l2cap_pinfo *pi = l2cap_pi(sk);
3928 pi->expected_ack_seq = __get_reqseq(rx_control);
3929 l2cap_drop_acked_frames(sk);
3931 if (rx_control & L2CAP_CTRL_POLL) {
3932 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3933 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3934 (pi->unacked_frames > 0))
3935 __mod_retrans_timer();
3937 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3938 l2cap_send_srejtail(sk);
3939 } else {
3940 l2cap_send_i_or_rr_or_rnr(sk);
3943 } else if (rx_control & L2CAP_CTRL_FINAL) {
3944 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3946 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3947 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3948 else
3949 l2cap_retransmit_frames(sk);
3951 } else {
3952 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3953 (pi->unacked_frames > 0))
3954 __mod_retrans_timer();
3956 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3957 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3958 l2cap_send_ack(pi);
3959 } else {
3960 spin_lock_bh(&pi->send_lock);
3961 l2cap_ertm_send(sk);
3962 spin_unlock_bh(&pi->send_lock);
3967 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
3969 struct l2cap_pinfo *pi = l2cap_pi(sk);
3970 u8 tx_seq = __get_reqseq(rx_control);
3972 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3974 pi->expected_ack_seq = tx_seq;
3975 l2cap_drop_acked_frames(sk);
3977 if (rx_control & L2CAP_CTRL_FINAL) {
3978 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3979 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3980 else
3981 l2cap_retransmit_frames(sk);
3982 } else {
3983 l2cap_retransmit_frames(sk);
3985 if (pi->conn_state & L2CAP_CONN_WAIT_F)
3986 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3989 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
3991 struct l2cap_pinfo *pi = l2cap_pi(sk);
3992 u8 tx_seq = __get_reqseq(rx_control);
3994 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3996 if (rx_control & L2CAP_CTRL_POLL) {
3997 pi->expected_ack_seq = tx_seq;
3998 l2cap_drop_acked_frames(sk);
3999 l2cap_retransmit_one_frame(sk, tx_seq);
4001 spin_lock_bh(&pi->send_lock);
4002 l2cap_ertm_send(sk);
4003 spin_unlock_bh(&pi->send_lock);
4005 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4006 pi->srej_save_reqseq = tx_seq;
4007 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4009 } else if (rx_control & L2CAP_CTRL_FINAL) {
4010 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4011 pi->srej_save_reqseq == tx_seq)
4012 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4013 else
4014 l2cap_retransmit_one_frame(sk, tx_seq);
4015 } else {
4016 l2cap_retransmit_one_frame(sk, tx_seq);
4017 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4018 pi->srej_save_reqseq = tx_seq;
4019 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4024 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4026 struct l2cap_pinfo *pi = l2cap_pi(sk);
4027 u8 tx_seq = __get_reqseq(rx_control);
4029 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4030 pi->expected_ack_seq = tx_seq;
4031 l2cap_drop_acked_frames(sk);
4033 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4034 del_timer(&pi->retrans_timer);
4035 if (rx_control & L2CAP_CTRL_POLL)
4036 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4037 return;
4040 if (rx_control & L2CAP_CTRL_POLL)
4041 l2cap_send_srejtail(sk);
4042 else
4043 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4046 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4048 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4050 if (L2CAP_CTRL_FINAL & rx_control &&
4051 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4052 del_timer(&l2cap_pi(sk)->monitor_timer);
4053 if (l2cap_pi(sk)->unacked_frames > 0)
4054 __mod_retrans_timer();
4055 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4058 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4059 case L2CAP_SUPER_RCV_READY:
4060 l2cap_data_channel_rrframe(sk, rx_control);
4061 break;
4063 case L2CAP_SUPER_REJECT:
4064 l2cap_data_channel_rejframe(sk, rx_control);
4065 break;
4067 case L2CAP_SUPER_SELECT_REJECT:
4068 l2cap_data_channel_srejframe(sk, rx_control);
4069 break;
4071 case L2CAP_SUPER_RCV_NOT_READY:
4072 l2cap_data_channel_rnrframe(sk, rx_control);
4073 break;
4076 kfree_skb(skb);
4077 return 0;
4080 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4082 struct sock *sk;
4083 struct l2cap_pinfo *pi;
4084 u16 control, len;
4085 u8 tx_seq, req_seq;
4086 int next_tx_seq_offset, req_seq_offset;
4088 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4089 if (!sk) {
4090 BT_DBG("unknown cid 0x%4.4x", cid);
4091 goto drop;
4094 pi = l2cap_pi(sk);
4096 BT_DBG("sk %p, len %d", sk, skb->len);
4098 if (sk->sk_state != BT_CONNECTED)
4099 goto drop;
4101 switch (pi->mode) {
4102 case L2CAP_MODE_BASIC:
4103 /* If socket recv buffers overflows we drop data here
4104 * which is *bad* because L2CAP has to be reliable.
4105 * But we don't have any other choice. L2CAP doesn't
4106 * provide flow control mechanism. */
4108 if (pi->imtu < skb->len)
4109 goto drop;
4111 if (!sock_queue_rcv_skb(sk, skb))
4112 goto done;
4113 break;
4115 case L2CAP_MODE_ERTM:
4116 control = get_unaligned_le16(skb->data);
4117 skb_pull(skb, 2);
4118 len = skb->len;
4120 if (__is_sar_start(control) && __is_iframe(control))
4121 len -= 2;
4123 if (pi->fcs == L2CAP_FCS_CRC16)
4124 len -= 2;
4127 * We can just drop the corrupted I-frame here.
4128 * Receiver will miss it and start proper recovery
4129 * procedures and ask retransmission.
4131 if (len > pi->mps) {
4132 l2cap_send_disconn_req(pi->conn, sk);
4133 goto drop;
4136 if (l2cap_check_fcs(pi, skb))
4137 goto drop;
4139 req_seq = __get_reqseq(control);
4140 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4141 if (req_seq_offset < 0)
4142 req_seq_offset += 64;
4144 next_tx_seq_offset =
4145 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4146 if (next_tx_seq_offset < 0)
4147 next_tx_seq_offset += 64;
4149 /* check for invalid req-seq */
4150 if (req_seq_offset > next_tx_seq_offset) {
4151 l2cap_send_disconn_req(pi->conn, sk);
4152 goto drop;
4155 if (__is_iframe(control)) {
4156 if (len < 4) {
4157 l2cap_send_disconn_req(pi->conn, sk);
4158 goto drop;
4161 l2cap_data_channel_iframe(sk, control, skb);
4162 } else {
4163 if (len != 0) {
4164 l2cap_send_disconn_req(pi->conn, sk);
4165 goto drop;
4168 l2cap_data_channel_sframe(sk, control, skb);
4171 goto done;
4173 case L2CAP_MODE_STREAMING:
4174 control = get_unaligned_le16(skb->data);
4175 skb_pull(skb, 2);
4176 len = skb->len;
4178 if (__is_sar_start(control))
4179 len -= 2;
4181 if (pi->fcs == L2CAP_FCS_CRC16)
4182 len -= 2;
4184 if (len > pi->mps || len < 4 || __is_sframe(control))
4185 goto drop;
4187 if (l2cap_check_fcs(pi, skb))
4188 goto drop;
4190 tx_seq = __get_txseq(control);
4192 if (pi->expected_tx_seq == tx_seq)
4193 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4194 else
4195 pi->expected_tx_seq = (tx_seq + 1) % 64;
4197 l2cap_streaming_reassembly_sdu(sk, skb, control);
4199 goto done;
4201 default:
4202 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4203 break;
4206 drop:
4207 kfree_skb(skb);
4209 done:
4210 if (sk)
4211 bh_unlock_sock(sk);
4213 return 0;
4216 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4218 struct sock *sk;
4220 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4221 if (!sk)
4222 goto drop;
4224 BT_DBG("sk %p, len %d", sk, skb->len);
4226 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4227 goto drop;
4229 if (l2cap_pi(sk)->imtu < skb->len)
4230 goto drop;
4232 if (!sock_queue_rcv_skb(sk, skb))
4233 goto done;
4235 drop:
4236 kfree_skb(skb);
4238 done:
4239 if (sk)
4240 bh_unlock_sock(sk);
4241 return 0;
4244 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4246 struct l2cap_hdr *lh = (void *) skb->data;
4247 u16 cid, len;
4248 __le16 psm;
4250 skb_pull(skb, L2CAP_HDR_SIZE);
4251 cid = __le16_to_cpu(lh->cid);
4252 len = __le16_to_cpu(lh->len);
4254 if (len != skb->len) {
4255 kfree_skb(skb);
4256 return;
4259 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4261 switch (cid) {
4262 case L2CAP_CID_SIGNALING:
4263 l2cap_sig_channel(conn, skb);
4264 break;
4266 case L2CAP_CID_CONN_LESS:
4267 psm = get_unaligned_le16(skb->data);
4268 skb_pull(skb, 2);
4269 l2cap_conless_channel(conn, psm, skb);
4270 break;
4272 default:
4273 l2cap_data_channel(conn, cid, skb);
4274 break;
4278 /* ---- L2CAP interface with lower layer (HCI) ---- */
4280 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4282 int exact = 0, lm1 = 0, lm2 = 0;
4283 register struct sock *sk;
4284 struct hlist_node *node;
4286 if (type != ACL_LINK)
4287 return 0;
4289 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4291 /* Find listening sockets and check their link_mode */
4292 read_lock(&l2cap_sk_list.lock);
4293 sk_for_each(sk, node, &l2cap_sk_list.head) {
4294 if (sk->sk_state != BT_LISTEN)
4295 continue;
4297 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4298 lm1 |= HCI_LM_ACCEPT;
4299 if (l2cap_pi(sk)->role_switch)
4300 lm1 |= HCI_LM_MASTER;
4301 exact++;
4302 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4303 lm2 |= HCI_LM_ACCEPT;
4304 if (l2cap_pi(sk)->role_switch)
4305 lm2 |= HCI_LM_MASTER;
4308 read_unlock(&l2cap_sk_list.lock);
4310 return exact ? lm1 : lm2;
4313 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4315 struct l2cap_conn *conn;
4317 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4319 if (hcon->type != ACL_LINK)
4320 return 0;
4322 if (!status) {
4323 conn = l2cap_conn_add(hcon, status);
4324 if (conn)
4325 l2cap_conn_ready(conn);
4326 } else
4327 l2cap_conn_del(hcon, bt_err(status));
4329 return 0;
4332 static int l2cap_disconn_ind(struct hci_conn *hcon)
4334 struct l2cap_conn *conn = hcon->l2cap_data;
4336 BT_DBG("hcon %p", hcon);
4338 if (hcon->type != ACL_LINK || !conn)
4339 return 0x13;
4341 return conn->disc_reason;
4344 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4346 BT_DBG("hcon %p reason %d", hcon, reason);
4348 if (hcon->type != ACL_LINK)
4349 return 0;
4351 l2cap_conn_del(hcon, bt_err(reason));
4353 return 0;
4356 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4358 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4359 return;
4361 if (encrypt == 0x00) {
4362 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4363 l2cap_sock_clear_timer(sk);
4364 l2cap_sock_set_timer(sk, HZ * 5);
4365 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4366 __l2cap_sock_close(sk, ECONNREFUSED);
4367 } else {
4368 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4369 l2cap_sock_clear_timer(sk);
4373 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4375 struct l2cap_chan_list *l;
4376 struct l2cap_conn *conn = hcon->l2cap_data;
4377 struct sock *sk;
4379 if (!conn)
4380 return 0;
4382 l = &conn->chan_list;
4384 BT_DBG("conn %p", conn);
4386 read_lock(&l->lock);
4388 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4389 bh_lock_sock(sk);
4391 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4392 bh_unlock_sock(sk);
4393 continue;
4396 if (!status && (sk->sk_state == BT_CONNECTED ||
4397 sk->sk_state == BT_CONFIG)) {
4398 l2cap_check_encryption(sk, encrypt);
4399 bh_unlock_sock(sk);
4400 continue;
4403 if (sk->sk_state == BT_CONNECT) {
4404 if (!status) {
4405 struct l2cap_conn_req req;
4406 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4407 req.psm = l2cap_pi(sk)->psm;
4409 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4410 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4412 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4413 L2CAP_CONN_REQ, sizeof(req), &req);
4414 } else {
4415 l2cap_sock_clear_timer(sk);
4416 l2cap_sock_set_timer(sk, HZ / 10);
4418 } else if (sk->sk_state == BT_CONNECT2) {
4419 struct l2cap_conn_rsp rsp;
4420 __u16 result;
4422 if (!status) {
4423 sk->sk_state = BT_CONFIG;
4424 result = L2CAP_CR_SUCCESS;
4425 } else {
4426 sk->sk_state = BT_DISCONN;
4427 l2cap_sock_set_timer(sk, HZ / 10);
4428 result = L2CAP_CR_SEC_BLOCK;
4431 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4432 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4433 rsp.result = cpu_to_le16(result);
4434 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4435 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4436 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4439 bh_unlock_sock(sk);
4442 read_unlock(&l->lock);
4444 return 0;
4447 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4449 struct l2cap_conn *conn = hcon->l2cap_data;
4451 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4452 goto drop;
4454 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4456 if (flags & ACL_START) {
4457 struct l2cap_hdr *hdr;
4458 int len;
4460 if (conn->rx_len) {
4461 BT_ERR("Unexpected start frame (len %d)", skb->len);
4462 kfree_skb(conn->rx_skb);
4463 conn->rx_skb = NULL;
4464 conn->rx_len = 0;
4465 l2cap_conn_unreliable(conn, ECOMM);
4468 if (skb->len < 2) {
4469 BT_ERR("Frame is too short (len %d)", skb->len);
4470 l2cap_conn_unreliable(conn, ECOMM);
4471 goto drop;
4474 hdr = (struct l2cap_hdr *) skb->data;
4475 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4477 if (len == skb->len) {
4478 /* Complete frame received */
4479 l2cap_recv_frame(conn, skb);
4480 return 0;
4483 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4485 if (skb->len > len) {
4486 BT_ERR("Frame is too long (len %d, expected len %d)",
4487 skb->len, len);
4488 l2cap_conn_unreliable(conn, ECOMM);
4489 goto drop;
4492 /* Allocate skb for the complete frame (with header) */
4493 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4494 if (!conn->rx_skb)
4495 goto drop;
4497 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4498 skb->len);
4499 conn->rx_len = len - skb->len;
4500 } else {
4501 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4503 if (!conn->rx_len) {
4504 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4505 l2cap_conn_unreliable(conn, ECOMM);
4506 goto drop;
4509 if (skb->len > conn->rx_len) {
4510 BT_ERR("Fragment is too long (len %d, expected %d)",
4511 skb->len, conn->rx_len);
4512 kfree_skb(conn->rx_skb);
4513 conn->rx_skb = NULL;
4514 conn->rx_len = 0;
4515 l2cap_conn_unreliable(conn, ECOMM);
4516 goto drop;
4519 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4520 skb->len);
4521 conn->rx_len -= skb->len;
4523 if (!conn->rx_len) {
4524 /* Complete frame received */
4525 l2cap_recv_frame(conn, conn->rx_skb);
4526 conn->rx_skb = NULL;
4530 drop:
4531 kfree_skb(skb);
4532 return 0;
4535 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4537 struct sock *sk;
4538 struct hlist_node *node;
4540 read_lock_bh(&l2cap_sk_list.lock);
4542 sk_for_each(sk, node, &l2cap_sk_list.head) {
4543 struct l2cap_pinfo *pi = l2cap_pi(sk);
4545 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4546 batostr(&bt_sk(sk)->src),
4547 batostr(&bt_sk(sk)->dst),
4548 sk->sk_state, __le16_to_cpu(pi->psm),
4549 pi->scid, pi->dcid,
4550 pi->imtu, pi->omtu, pi->sec_level);
4553 read_unlock_bh(&l2cap_sk_list.lock);
4555 return 0;
4558 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4560 return single_open(file, l2cap_debugfs_show, inode->i_private);
4563 static const struct file_operations l2cap_debugfs_fops = {
4564 .open = l2cap_debugfs_open,
4565 .read = seq_read,
4566 .llseek = seq_lseek,
4567 .release = single_release,
4570 static struct dentry *l2cap_debugfs;
4572 static const struct proto_ops l2cap_sock_ops = {
4573 .family = PF_BLUETOOTH,
4574 .owner = THIS_MODULE,
4575 .release = l2cap_sock_release,
4576 .bind = l2cap_sock_bind,
4577 .connect = l2cap_sock_connect,
4578 .listen = l2cap_sock_listen,
4579 .accept = l2cap_sock_accept,
4580 .getname = l2cap_sock_getname,
4581 .sendmsg = l2cap_sock_sendmsg,
4582 .recvmsg = l2cap_sock_recvmsg,
4583 .poll = bt_sock_poll,
4584 .ioctl = bt_sock_ioctl,
4585 .mmap = sock_no_mmap,
4586 .socketpair = sock_no_socketpair,
4587 .shutdown = l2cap_sock_shutdown,
4588 .setsockopt = l2cap_sock_setsockopt,
4589 .getsockopt = l2cap_sock_getsockopt
4592 static const struct net_proto_family l2cap_sock_family_ops = {
4593 .family = PF_BLUETOOTH,
4594 .owner = THIS_MODULE,
4595 .create = l2cap_sock_create,
4598 static struct hci_proto l2cap_hci_proto = {
4599 .name = "L2CAP",
4600 .id = HCI_PROTO_L2CAP,
4601 .connect_ind = l2cap_connect_ind,
4602 .connect_cfm = l2cap_connect_cfm,
4603 .disconn_ind = l2cap_disconn_ind,
4604 .disconn_cfm = l2cap_disconn_cfm,
4605 .security_cfm = l2cap_security_cfm,
4606 .recv_acldata = l2cap_recv_acldata
4609 static int __init l2cap_init(void)
4611 int err;
4613 err = proto_register(&l2cap_proto, 0);
4614 if (err < 0)
4615 return err;
4617 _busy_wq = create_singlethread_workqueue("l2cap");
4618 if (!_busy_wq)
4619 goto error;
4621 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4622 if (err < 0) {
4623 BT_ERR("L2CAP socket registration failed");
4624 goto error;
4627 err = hci_register_proto(&l2cap_hci_proto);
4628 if (err < 0) {
4629 BT_ERR("L2CAP protocol registration failed");
4630 bt_sock_unregister(BTPROTO_L2CAP);
4631 goto error;
4634 if (bt_debugfs) {
4635 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4636 bt_debugfs, NULL, &l2cap_debugfs_fops);
4637 if (!l2cap_debugfs)
4638 BT_ERR("Failed to create L2CAP debug file");
4641 BT_INFO("L2CAP ver %s", VERSION);
4642 BT_INFO("L2CAP socket layer initialized");
4644 return 0;
4646 error:
4647 proto_unregister(&l2cap_proto);
4648 return err;
4651 static void __exit l2cap_exit(void)
4653 debugfs_remove(l2cap_debugfs);
4655 flush_workqueue(_busy_wq);
4656 destroy_workqueue(_busy_wq);
4658 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4659 BT_ERR("L2CAP socket unregistration failed");
4661 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4662 BT_ERR("L2CAP protocol unregistration failed");
4664 proto_unregister(&l2cap_proto);
4667 void l2cap_load(void)
4669 /* Dummy function to trigger automatic L2CAP module loading by
4670 * other modules that use L2CAP sockets but don't use any other
4671 * symbols from it. */
4673 EXPORT_SYMBOL(l2cap_load);
4675 module_init(l2cap_init);
4676 module_exit(l2cap_exit);
4678 module_param(enable_ertm, bool, 0644);
4679 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4681 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4682 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4683 MODULE_VERSION(VERSION);
4684 MODULE_LICENSE("GPL");
4685 MODULE_ALIAS("bt-proto-0");