ASoC: Ensure we delay long enough for WM8994 FLL to lock
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / bluetooth / l2cap.c
blobcf3c4073a8a655d6e9d8936cb61d30254224962f
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
47 #include <net/sock.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 #ifdef CONFIG_BT_L2CAP_EXT_FEATURES
59 static int enable_ertm = 1;
60 #else
61 static int enable_ertm = 0;
62 #endif
63 static int max_transmit = L2CAP_DEFAULT_MAX_TX;
64 static int tx_window = L2CAP_DEFAULT_TX_WINDOW;
66 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
67 static u8 l2cap_fixed_chan[8] = { 0x02, };
69 static const struct proto_ops l2cap_sock_ops;
71 static struct workqueue_struct *_busy_wq;
73 static struct bt_sock_list l2cap_sk_list = {
74 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
77 static void l2cap_busy_work(struct work_struct *work);
79 static void __l2cap_sock_close(struct sock *sk, int reason);
80 static void l2cap_sock_close(struct sock *sk);
81 static void l2cap_sock_kill(struct sock *sk);
83 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
84 u8 code, u8 ident, u16 dlen, void *data);
86 /* ---- L2CAP timers ---- */
87 static void l2cap_sock_timeout(unsigned long arg)
89 struct sock *sk = (struct sock *) arg;
90 int reason;
92 BT_DBG("sock %p state %d", sk, sk->sk_state);
94 bh_lock_sock(sk);
96 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
97 reason = ECONNREFUSED;
98 else if (sk->sk_state == BT_CONNECT &&
99 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
100 reason = ECONNREFUSED;
101 else
102 reason = ETIMEDOUT;
104 __l2cap_sock_close(sk, reason);
106 bh_unlock_sock(sk);
108 l2cap_sock_kill(sk);
109 sock_put(sk);
112 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
114 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
115 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
118 static void l2cap_sock_clear_timer(struct sock *sk)
120 BT_DBG("sock %p state %d", sk, sk->sk_state);
121 sk_stop_timer(sk, &sk->sk_timer);
124 /* ---- L2CAP channels ---- */
125 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
127 struct sock *s;
128 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
129 if (l2cap_pi(s)->dcid == cid)
130 break;
132 return s;
135 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
137 struct sock *s;
138 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
139 if (l2cap_pi(s)->scid == cid)
140 break;
142 return s;
145 /* Find channel with given SCID.
146 * Returns locked socket */
147 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
149 struct sock *s;
150 read_lock(&l->lock);
151 s = __l2cap_get_chan_by_scid(l, cid);
152 if (s)
153 bh_lock_sock(s);
154 read_unlock(&l->lock);
155 return s;
158 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
160 struct sock *s;
161 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
162 if (l2cap_pi(s)->ident == ident)
163 break;
165 return s;
168 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
170 struct sock *s;
171 read_lock(&l->lock);
172 s = __l2cap_get_chan_by_ident(l, ident);
173 if (s)
174 bh_lock_sock(s);
175 read_unlock(&l->lock);
176 return s;
179 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
181 u16 cid = L2CAP_CID_DYN_START;
183 for (; cid < L2CAP_CID_DYN_END; cid++) {
184 if (!__l2cap_get_chan_by_scid(l, cid))
185 return cid;
188 return 0;
191 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
193 sock_hold(sk);
195 if (l->head)
196 l2cap_pi(l->head)->prev_c = sk;
198 l2cap_pi(sk)->next_c = l->head;
199 l2cap_pi(sk)->prev_c = NULL;
200 l->head = sk;
203 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
205 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
207 write_lock_bh(&l->lock);
208 if (sk == l->head)
209 l->head = next;
211 if (next)
212 l2cap_pi(next)->prev_c = prev;
213 if (prev)
214 l2cap_pi(prev)->next_c = next;
215 write_unlock_bh(&l->lock);
217 __sock_put(sk);
220 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
222 struct l2cap_chan_list *l = &conn->chan_list;
224 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
225 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
227 conn->disc_reason = 0x13;
229 l2cap_pi(sk)->conn = conn;
231 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
232 /* Alloc CID for connection-oriented socket */
233 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
234 } else if (sk->sk_type == SOCK_DGRAM) {
235 /* Connectionless socket */
236 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
237 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
238 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
239 } else {
240 /* Raw socket can send/recv signalling messages only */
241 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
242 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
243 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
246 __l2cap_chan_link(l, sk);
248 if (parent)
249 bt_accept_enqueue(parent, sk);
252 /* Delete channel.
253 * Must be called on the locked socket. */
254 static void l2cap_chan_del(struct sock *sk, int err)
256 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
257 struct sock *parent = bt_sk(sk)->parent;
259 l2cap_sock_clear_timer(sk);
261 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
263 if (conn) {
264 /* Unlink from channel list */
265 l2cap_chan_unlink(&conn->chan_list, sk);
266 l2cap_pi(sk)->conn = NULL;
267 hci_conn_put(conn->hcon);
270 sk->sk_state = BT_CLOSED;
271 sock_set_flag(sk, SOCK_ZAPPED);
273 if (err)
274 sk->sk_err = err;
276 if (parent) {
277 bt_accept_unlink(sk);
278 parent->sk_data_ready(parent, 0);
279 } else
280 sk->sk_state_change(sk);
283 /* Service level security */
284 static inline int l2cap_check_security(struct sock *sk)
286 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
287 __u8 auth_type;
289 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
290 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
291 auth_type = HCI_AT_NO_BONDING_MITM;
292 else
293 auth_type = HCI_AT_NO_BONDING;
295 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
296 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
297 } else {
298 switch (l2cap_pi(sk)->sec_level) {
299 case BT_SECURITY_HIGH:
300 auth_type = HCI_AT_GENERAL_BONDING_MITM;
301 break;
302 case BT_SECURITY_MEDIUM:
303 auth_type = HCI_AT_GENERAL_BONDING;
304 break;
305 default:
306 auth_type = HCI_AT_NO_BONDING;
307 break;
311 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
312 auth_type);
315 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
317 u8 id;
319 /* Get next available identificator.
320 * 1 - 128 are used by kernel.
321 * 129 - 199 are reserved.
322 * 200 - 254 are used by utilities like l2ping, etc.
325 spin_lock_bh(&conn->lock);
327 if (++conn->tx_ident > 128)
328 conn->tx_ident = 1;
330 id = conn->tx_ident;
332 spin_unlock_bh(&conn->lock);
334 return id;
337 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
339 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
341 BT_DBG("code 0x%2.2x", code);
343 if (!skb)
344 return;
346 hci_send_acl(conn->hcon, skb, 0);
349 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
351 struct sk_buff *skb;
352 struct l2cap_hdr *lh;
353 struct l2cap_conn *conn = pi->conn;
354 int count, hlen = L2CAP_HDR_SIZE + 2;
356 if (pi->fcs == L2CAP_FCS_CRC16)
357 hlen += 2;
359 BT_DBG("pi %p, control 0x%2.2x", pi, control);
361 count = min_t(unsigned int, conn->mtu, hlen);
362 control |= L2CAP_CTRL_FRAME_TYPE;
364 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
365 control |= L2CAP_CTRL_FINAL;
366 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
369 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
370 control |= L2CAP_CTRL_POLL;
371 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
374 skb = bt_skb_alloc(count, GFP_ATOMIC);
375 if (!skb)
376 return;
378 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
379 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
380 lh->cid = cpu_to_le16(pi->dcid);
381 put_unaligned_le16(control, skb_put(skb, 2));
383 if (pi->fcs == L2CAP_FCS_CRC16) {
384 u16 fcs = crc16(0, (u8 *)lh, count - 2);
385 put_unaligned_le16(fcs, skb_put(skb, 2));
388 hci_send_acl(pi->conn->hcon, skb, 0);
391 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
393 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
394 control |= L2CAP_SUPER_RCV_NOT_READY;
395 pi->conn_state |= L2CAP_CONN_RNR_SENT;
396 } else
397 control |= L2CAP_SUPER_RCV_READY;
399 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
401 l2cap_send_sframe(pi, control);
404 static inline int __l2cap_no_conn_pending(struct sock *sk)
406 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
409 static void l2cap_do_start(struct sock *sk)
411 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
413 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
414 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
415 return;
417 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
418 struct l2cap_conn_req req;
419 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
420 req.psm = l2cap_pi(sk)->psm;
422 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
423 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
425 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
426 L2CAP_CONN_REQ, sizeof(req), &req);
428 } else {
429 struct l2cap_info_req req;
430 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
432 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
433 conn->info_ident = l2cap_get_ident(conn);
435 mod_timer(&conn->info_timer, jiffies +
436 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
438 l2cap_send_cmd(conn, conn->info_ident,
439 L2CAP_INFO_REQ, sizeof(req), &req);
443 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
445 struct l2cap_disconn_req req;
447 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
448 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
449 l2cap_send_cmd(conn, l2cap_get_ident(conn),
450 L2CAP_DISCONN_REQ, sizeof(req), &req);
453 /* ---- L2CAP connections ---- */
454 static void l2cap_conn_start(struct l2cap_conn *conn)
456 struct l2cap_chan_list *l = &conn->chan_list;
457 struct sock *sk;
459 BT_DBG("conn %p", conn);
461 read_lock(&l->lock);
463 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
464 bh_lock_sock(sk);
466 if (sk->sk_type != SOCK_SEQPACKET &&
467 sk->sk_type != SOCK_STREAM) {
468 bh_unlock_sock(sk);
469 continue;
472 if (sk->sk_state == BT_CONNECT) {
473 if (l2cap_check_security(sk) &&
474 __l2cap_no_conn_pending(sk)) {
475 struct l2cap_conn_req req;
476 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
477 req.psm = l2cap_pi(sk)->psm;
479 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
480 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
482 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
483 L2CAP_CONN_REQ, sizeof(req), &req);
485 } else if (sk->sk_state == BT_CONNECT2) {
486 struct l2cap_conn_rsp rsp;
487 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
488 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
490 if (l2cap_check_security(sk)) {
491 if (bt_sk(sk)->defer_setup) {
492 struct sock *parent = bt_sk(sk)->parent;
493 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
494 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
495 parent->sk_data_ready(parent, 0);
497 } else {
498 sk->sk_state = BT_CONFIG;
499 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
500 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
502 } else {
503 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
504 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
507 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
508 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
511 bh_unlock_sock(sk);
514 read_unlock(&l->lock);
517 static void l2cap_conn_ready(struct l2cap_conn *conn)
519 struct l2cap_chan_list *l = &conn->chan_list;
520 struct sock *sk;
522 BT_DBG("conn %p", conn);
524 read_lock(&l->lock);
526 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
527 bh_lock_sock(sk);
529 if (sk->sk_type != SOCK_SEQPACKET &&
530 sk->sk_type != SOCK_STREAM) {
531 l2cap_sock_clear_timer(sk);
532 sk->sk_state = BT_CONNECTED;
533 sk->sk_state_change(sk);
534 } else if (sk->sk_state == BT_CONNECT)
535 l2cap_do_start(sk);
537 bh_unlock_sock(sk);
540 read_unlock(&l->lock);
543 /* Notify sockets that we cannot guaranty reliability anymore */
544 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
546 struct l2cap_chan_list *l = &conn->chan_list;
547 struct sock *sk;
549 BT_DBG("conn %p", conn);
551 read_lock(&l->lock);
553 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
554 if (l2cap_pi(sk)->force_reliable)
555 sk->sk_err = err;
558 read_unlock(&l->lock);
561 static void l2cap_info_timeout(unsigned long arg)
563 struct l2cap_conn *conn = (void *) arg;
565 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
566 conn->info_ident = 0;
568 l2cap_conn_start(conn);
571 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
573 struct l2cap_conn *conn = hcon->l2cap_data;
575 if (conn || status)
576 return conn;
578 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
579 if (!conn)
580 return NULL;
582 hcon->l2cap_data = conn;
583 conn->hcon = hcon;
585 BT_DBG("hcon %p conn %p", hcon, conn);
587 conn->mtu = hcon->hdev->acl_mtu;
588 conn->src = &hcon->hdev->bdaddr;
589 conn->dst = &hcon->dst;
591 conn->feat_mask = 0;
593 spin_lock_init(&conn->lock);
594 rwlock_init(&conn->chan_list.lock);
596 setup_timer(&conn->info_timer, l2cap_info_timeout,
597 (unsigned long) conn);
599 conn->disc_reason = 0x13;
601 return conn;
604 static void l2cap_conn_del(struct hci_conn *hcon, int err)
606 struct l2cap_conn *conn = hcon->l2cap_data;
607 struct sock *sk;
609 if (!conn)
610 return;
612 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
614 kfree_skb(conn->rx_skb);
616 /* Kill channels */
617 while ((sk = conn->chan_list.head)) {
618 bh_lock_sock(sk);
619 l2cap_chan_del(sk, err);
620 bh_unlock_sock(sk);
621 l2cap_sock_kill(sk);
624 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
625 del_timer_sync(&conn->info_timer);
627 hcon->l2cap_data = NULL;
628 kfree(conn);
631 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
633 struct l2cap_chan_list *l = &conn->chan_list;
634 write_lock_bh(&l->lock);
635 __l2cap_chan_add(conn, sk, parent);
636 write_unlock_bh(&l->lock);
639 /* ---- Socket interface ---- */
640 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
642 struct sock *sk;
643 struct hlist_node *node;
644 sk_for_each(sk, node, &l2cap_sk_list.head)
645 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
646 goto found;
647 sk = NULL;
648 found:
649 return sk;
652 /* Find socket with psm and source bdaddr.
653 * Returns closest match.
655 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
657 struct sock *sk = NULL, *sk1 = NULL;
658 struct hlist_node *node;
660 sk_for_each(sk, node, &l2cap_sk_list.head) {
661 if (state && sk->sk_state != state)
662 continue;
664 if (l2cap_pi(sk)->psm == psm) {
665 /* Exact match. */
666 if (!bacmp(&bt_sk(sk)->src, src))
667 break;
669 /* Closest match */
670 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
671 sk1 = sk;
674 return node ? sk : sk1;
677 /* Find socket with given address (psm, src).
678 * Returns locked socket */
679 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
681 struct sock *s;
682 read_lock(&l2cap_sk_list.lock);
683 s = __l2cap_get_sock_by_psm(state, psm, src);
684 if (s)
685 bh_lock_sock(s);
686 read_unlock(&l2cap_sk_list.lock);
687 return s;
690 static void l2cap_sock_destruct(struct sock *sk)
692 BT_DBG("sk %p", sk);
694 skb_queue_purge(&sk->sk_receive_queue);
695 skb_queue_purge(&sk->sk_write_queue);
698 static void l2cap_sock_cleanup_listen(struct sock *parent)
700 struct sock *sk;
702 BT_DBG("parent %p", parent);
704 /* Close not yet accepted channels */
705 while ((sk = bt_accept_dequeue(parent, NULL)))
706 l2cap_sock_close(sk);
708 parent->sk_state = BT_CLOSED;
709 sock_set_flag(parent, SOCK_ZAPPED);
712 /* Kill socket (only if zapped and orphan)
713 * Must be called on unlocked socket.
715 static void l2cap_sock_kill(struct sock *sk)
717 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
718 return;
720 BT_DBG("sk %p state %d", sk, sk->sk_state);
722 /* Kill poor orphan */
723 bt_sock_unlink(&l2cap_sk_list, sk);
724 sock_set_flag(sk, SOCK_DEAD);
725 sock_put(sk);
728 static void __l2cap_sock_close(struct sock *sk, int reason)
730 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
732 switch (sk->sk_state) {
733 case BT_LISTEN:
734 l2cap_sock_cleanup_listen(sk);
735 break;
737 case BT_CONNECTED:
738 case BT_CONFIG:
739 if (sk->sk_type == SOCK_SEQPACKET ||
740 sk->sk_type == SOCK_STREAM) {
741 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
743 sk->sk_state = BT_DISCONN;
744 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
745 l2cap_send_disconn_req(conn, sk);
746 } else
747 l2cap_chan_del(sk, reason);
748 break;
750 case BT_CONNECT2:
751 if (sk->sk_type == SOCK_SEQPACKET ||
752 sk->sk_type == SOCK_STREAM) {
753 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
754 struct l2cap_conn_rsp rsp;
755 __u16 result;
757 if (bt_sk(sk)->defer_setup)
758 result = L2CAP_CR_SEC_BLOCK;
759 else
760 result = L2CAP_CR_BAD_PSM;
762 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
763 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
764 rsp.result = cpu_to_le16(result);
765 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
766 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
767 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
768 } else
769 l2cap_chan_del(sk, reason);
770 break;
772 case BT_CONNECT:
773 case BT_DISCONN:
774 l2cap_chan_del(sk, reason);
775 break;
777 default:
778 sock_set_flag(sk, SOCK_ZAPPED);
779 break;
783 /* Must be called on unlocked socket. */
784 static void l2cap_sock_close(struct sock *sk)
786 l2cap_sock_clear_timer(sk);
787 lock_sock(sk);
788 __l2cap_sock_close(sk, ECONNRESET);
789 release_sock(sk);
790 l2cap_sock_kill(sk);
793 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
795 struct l2cap_pinfo *pi = l2cap_pi(sk);
797 BT_DBG("sk %p", sk);
799 if (parent) {
800 sk->sk_type = parent->sk_type;
801 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
803 pi->imtu = l2cap_pi(parent)->imtu;
804 pi->omtu = l2cap_pi(parent)->omtu;
805 pi->mode = l2cap_pi(parent)->mode;
806 pi->fcs = l2cap_pi(parent)->fcs;
807 pi->max_tx = l2cap_pi(parent)->max_tx;
808 pi->tx_win = l2cap_pi(parent)->tx_win;
809 pi->sec_level = l2cap_pi(parent)->sec_level;
810 pi->role_switch = l2cap_pi(parent)->role_switch;
811 pi->force_reliable = l2cap_pi(parent)->force_reliable;
812 } else {
813 pi->imtu = L2CAP_DEFAULT_MTU;
814 pi->omtu = 0;
815 if (enable_ertm && sk->sk_type == SOCK_STREAM)
816 pi->mode = L2CAP_MODE_ERTM;
817 else
818 pi->mode = L2CAP_MODE_BASIC;
819 pi->max_tx = max_transmit;
820 pi->fcs = L2CAP_FCS_CRC16;
821 pi->tx_win = tx_window;
822 pi->sec_level = BT_SECURITY_LOW;
823 pi->role_switch = 0;
824 pi->force_reliable = 0;
827 /* Default config options */
828 pi->conf_len = 0;
829 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
830 skb_queue_head_init(TX_QUEUE(sk));
831 skb_queue_head_init(SREJ_QUEUE(sk));
832 skb_queue_head_init(BUSY_QUEUE(sk));
833 INIT_LIST_HEAD(SREJ_LIST(sk));
836 static struct proto l2cap_proto = {
837 .name = "L2CAP",
838 .owner = THIS_MODULE,
839 .obj_size = sizeof(struct l2cap_pinfo)
842 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
844 struct sock *sk;
846 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
847 if (!sk)
848 return NULL;
850 sock_init_data(sock, sk);
851 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
853 sk->sk_destruct = l2cap_sock_destruct;
854 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
856 sock_reset_flag(sk, SOCK_ZAPPED);
858 sk->sk_protocol = proto;
859 sk->sk_state = BT_OPEN;
861 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
863 bt_sock_link(&l2cap_sk_list, sk);
864 return sk;
867 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
868 int kern)
870 struct sock *sk;
872 BT_DBG("sock %p", sock);
874 sock->state = SS_UNCONNECTED;
876 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
877 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
878 return -ESOCKTNOSUPPORT;
880 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
881 return -EPERM;
883 sock->ops = &l2cap_sock_ops;
885 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
886 if (!sk)
887 return -ENOMEM;
889 l2cap_sock_init(sk, NULL);
890 return 0;
893 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
895 struct sock *sk = sock->sk;
896 struct sockaddr_l2 la;
897 int len, err = 0;
899 BT_DBG("sk %p", sk);
901 if (!addr || addr->sa_family != AF_BLUETOOTH)
902 return -EINVAL;
904 memset(&la, 0, sizeof(la));
905 len = min_t(unsigned int, sizeof(la), alen);
906 memcpy(&la, addr, len);
908 if (la.l2_cid)
909 return -EINVAL;
911 lock_sock(sk);
913 if (sk->sk_state != BT_OPEN) {
914 err = -EBADFD;
915 goto done;
918 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
919 !capable(CAP_NET_BIND_SERVICE)) {
920 err = -EACCES;
921 goto done;
924 write_lock_bh(&l2cap_sk_list.lock);
926 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
927 err = -EADDRINUSE;
928 } else {
929 /* Save source address */
930 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
931 l2cap_pi(sk)->psm = la.l2_psm;
932 l2cap_pi(sk)->sport = la.l2_psm;
933 sk->sk_state = BT_BOUND;
935 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
936 __le16_to_cpu(la.l2_psm) == 0x0003)
937 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
940 write_unlock_bh(&l2cap_sk_list.lock);
942 done:
943 release_sock(sk);
944 return err;
947 static int l2cap_do_connect(struct sock *sk)
949 bdaddr_t *src = &bt_sk(sk)->src;
950 bdaddr_t *dst = &bt_sk(sk)->dst;
951 struct l2cap_conn *conn;
952 struct hci_conn *hcon;
953 struct hci_dev *hdev;
954 __u8 auth_type;
955 int err;
957 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
958 l2cap_pi(sk)->psm);
960 hdev = hci_get_route(dst, src);
961 if (!hdev)
962 return -EHOSTUNREACH;
964 hci_dev_lock_bh(hdev);
966 err = -ENOMEM;
968 if (sk->sk_type == SOCK_RAW) {
969 switch (l2cap_pi(sk)->sec_level) {
970 case BT_SECURITY_HIGH:
971 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
972 break;
973 case BT_SECURITY_MEDIUM:
974 auth_type = HCI_AT_DEDICATED_BONDING;
975 break;
976 default:
977 auth_type = HCI_AT_NO_BONDING;
978 break;
980 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
981 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
982 auth_type = HCI_AT_NO_BONDING_MITM;
983 else
984 auth_type = HCI_AT_NO_BONDING;
986 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
987 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
988 } else {
989 switch (l2cap_pi(sk)->sec_level) {
990 case BT_SECURITY_HIGH:
991 auth_type = HCI_AT_GENERAL_BONDING_MITM;
992 break;
993 case BT_SECURITY_MEDIUM:
994 auth_type = HCI_AT_GENERAL_BONDING;
995 break;
996 default:
997 auth_type = HCI_AT_NO_BONDING;
998 break;
1002 hcon = hci_connect(hdev, ACL_LINK, dst,
1003 l2cap_pi(sk)->sec_level, auth_type);
1004 if (!hcon)
1005 goto done;
1007 conn = l2cap_conn_add(hcon, 0);
1008 if (!conn) {
1009 hci_conn_put(hcon);
1010 goto done;
1013 err = 0;
1015 /* Update source addr of the socket */
1016 bacpy(src, conn->src);
1018 l2cap_chan_add(conn, sk, NULL);
1020 sk->sk_state = BT_CONNECT;
1021 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1023 if (hcon->state == BT_CONNECTED) {
1024 if (sk->sk_type != SOCK_SEQPACKET &&
1025 sk->sk_type != SOCK_STREAM) {
1026 l2cap_sock_clear_timer(sk);
1027 sk->sk_state = BT_CONNECTED;
1028 } else
1029 l2cap_do_start(sk);
1032 done:
1033 hci_dev_unlock_bh(hdev);
1034 hci_dev_put(hdev);
1035 return err;
1038 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1040 struct sock *sk = sock->sk;
1041 struct sockaddr_l2 la;
1042 int len, err = 0;
1044 BT_DBG("sk %p", sk);
1046 if (!addr || alen < sizeof(addr->sa_family) ||
1047 addr->sa_family != AF_BLUETOOTH)
1048 return -EINVAL;
1050 memset(&la, 0, sizeof(la));
1051 len = min_t(unsigned int, sizeof(la), alen);
1052 memcpy(&la, addr, len);
1054 if (la.l2_cid)
1055 return -EINVAL;
1057 lock_sock(sk);
1059 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1060 && !la.l2_psm) {
1061 err = -EINVAL;
1062 goto done;
1065 switch (l2cap_pi(sk)->mode) {
1066 case L2CAP_MODE_BASIC:
1067 break;
1068 case L2CAP_MODE_ERTM:
1069 case L2CAP_MODE_STREAMING:
1070 if (enable_ertm)
1071 break;
1072 /* fall through */
1073 default:
1074 err = -ENOTSUPP;
1075 goto done;
1078 switch (sk->sk_state) {
1079 case BT_CONNECT:
1080 case BT_CONNECT2:
1081 case BT_CONFIG:
1082 /* Already connecting */
1083 goto wait;
1085 case BT_CONNECTED:
1086 /* Already connected */
1087 goto done;
1089 case BT_OPEN:
1090 case BT_BOUND:
1091 /* Can connect */
1092 break;
1094 default:
1095 err = -EBADFD;
1096 goto done;
1099 /* Set destination address and psm */
1100 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1101 l2cap_pi(sk)->psm = la.l2_psm;
1103 err = l2cap_do_connect(sk);
1104 if (err)
1105 goto done;
1107 wait:
1108 err = bt_sock_wait_state(sk, BT_CONNECTED,
1109 sock_sndtimeo(sk, flags & O_NONBLOCK));
1110 done:
1111 release_sock(sk);
1112 return err;
1115 static int l2cap_sock_listen(struct socket *sock, int backlog)
1117 struct sock *sk = sock->sk;
1118 int err = 0;
1120 BT_DBG("sk %p backlog %d", sk, backlog);
1122 lock_sock(sk);
1124 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1125 || sk->sk_state != BT_BOUND) {
1126 err = -EBADFD;
1127 goto done;
1130 switch (l2cap_pi(sk)->mode) {
1131 case L2CAP_MODE_BASIC:
1132 break;
1133 case L2CAP_MODE_ERTM:
1134 case L2CAP_MODE_STREAMING:
1135 if (enable_ertm)
1136 break;
1137 /* fall through */
1138 default:
1139 err = -ENOTSUPP;
1140 goto done;
1143 if (!l2cap_pi(sk)->psm) {
1144 bdaddr_t *src = &bt_sk(sk)->src;
1145 u16 psm;
1147 err = -EINVAL;
1149 write_lock_bh(&l2cap_sk_list.lock);
1151 for (psm = 0x1001; psm < 0x1100; psm += 2)
1152 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1153 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1154 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1155 err = 0;
1156 break;
1159 write_unlock_bh(&l2cap_sk_list.lock);
1161 if (err < 0)
1162 goto done;
1165 sk->sk_max_ack_backlog = backlog;
1166 sk->sk_ack_backlog = 0;
1167 sk->sk_state = BT_LISTEN;
1169 done:
1170 release_sock(sk);
1171 return err;
1174 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1176 DECLARE_WAITQUEUE(wait, current);
1177 struct sock *sk = sock->sk, *nsk;
1178 long timeo;
1179 int err = 0;
1181 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1183 if (sk->sk_state != BT_LISTEN) {
1184 err = -EBADFD;
1185 goto done;
1188 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1190 BT_DBG("sk %p timeo %ld", sk, timeo);
1192 /* Wait for an incoming connection. (wake-one). */
1193 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1194 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1195 set_current_state(TASK_INTERRUPTIBLE);
1196 if (!timeo) {
1197 err = -EAGAIN;
1198 break;
1201 release_sock(sk);
1202 timeo = schedule_timeout(timeo);
1203 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1205 if (sk->sk_state != BT_LISTEN) {
1206 err = -EBADFD;
1207 break;
1210 if (signal_pending(current)) {
1211 err = sock_intr_errno(timeo);
1212 break;
1215 set_current_state(TASK_RUNNING);
1216 remove_wait_queue(sk_sleep(sk), &wait);
1218 if (err)
1219 goto done;
1221 newsock->state = SS_CONNECTED;
1223 BT_DBG("new socket %p", nsk);
1225 done:
1226 release_sock(sk);
1227 return err;
1230 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1232 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1233 struct sock *sk = sock->sk;
1235 BT_DBG("sock %p, sk %p", sock, sk);
1237 addr->sa_family = AF_BLUETOOTH;
1238 *len = sizeof(struct sockaddr_l2);
1240 if (peer) {
1241 la->l2_psm = l2cap_pi(sk)->psm;
1242 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1243 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1244 } else {
1245 la->l2_psm = l2cap_pi(sk)->sport;
1246 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1247 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1250 return 0;
1253 static int __l2cap_wait_ack(struct sock *sk)
1255 DECLARE_WAITQUEUE(wait, current);
1256 int err = 0;
1257 int timeo = HZ/5;
1259 add_wait_queue(sk_sleep(sk), &wait);
1260 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1261 set_current_state(TASK_INTERRUPTIBLE);
1263 if (!timeo)
1264 timeo = HZ/5;
1266 if (signal_pending(current)) {
1267 err = sock_intr_errno(timeo);
1268 break;
1271 release_sock(sk);
1272 timeo = schedule_timeout(timeo);
1273 lock_sock(sk);
1275 err = sock_error(sk);
1276 if (err)
1277 break;
1279 set_current_state(TASK_RUNNING);
1280 remove_wait_queue(sk_sleep(sk), &wait);
1281 return err;
1284 static void l2cap_monitor_timeout(unsigned long arg)
1286 struct sock *sk = (void *) arg;
1288 bh_lock_sock(sk);
1289 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1290 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1291 bh_unlock_sock(sk);
1292 return;
1295 l2cap_pi(sk)->retry_count++;
1296 __mod_monitor_timer();
1298 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1299 bh_unlock_sock(sk);
1302 static void l2cap_retrans_timeout(unsigned long arg)
1304 struct sock *sk = (void *) arg;
1306 bh_lock_sock(sk);
1307 l2cap_pi(sk)->retry_count = 1;
1308 __mod_monitor_timer();
1310 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1312 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1313 bh_unlock_sock(sk);
1316 static void l2cap_drop_acked_frames(struct sock *sk)
1318 struct sk_buff *skb;
1320 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1321 l2cap_pi(sk)->unacked_frames) {
1322 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1323 break;
1325 skb = skb_dequeue(TX_QUEUE(sk));
1326 kfree_skb(skb);
1328 l2cap_pi(sk)->unacked_frames--;
1331 if (!l2cap_pi(sk)->unacked_frames)
1332 del_timer(&l2cap_pi(sk)->retrans_timer);
1335 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1337 struct l2cap_pinfo *pi = l2cap_pi(sk);
1339 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1341 hci_send_acl(pi->conn->hcon, skb, 0);
1344 static int l2cap_streaming_send(struct sock *sk)
1346 struct sk_buff *skb, *tx_skb;
1347 struct l2cap_pinfo *pi = l2cap_pi(sk);
1348 u16 control, fcs;
1350 while ((skb = sk->sk_send_head)) {
1351 tx_skb = skb_clone(skb, GFP_ATOMIC);
1353 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1354 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1355 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1357 if (pi->fcs == L2CAP_FCS_CRC16) {
1358 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1359 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1362 l2cap_do_send(sk, tx_skb);
1364 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1366 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1367 sk->sk_send_head = NULL;
1368 else
1369 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1371 skb = skb_dequeue(TX_QUEUE(sk));
1372 kfree_skb(skb);
1374 return 0;
1377 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1379 struct l2cap_pinfo *pi = l2cap_pi(sk);
1380 struct sk_buff *skb, *tx_skb;
1381 u16 control, fcs;
1383 skb = skb_peek(TX_QUEUE(sk));
1384 if (!skb)
1385 return;
1387 do {
1388 if (bt_cb(skb)->tx_seq == tx_seq)
1389 break;
1391 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1392 return;
1394 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1396 if (pi->remote_max_tx &&
1397 bt_cb(skb)->retries == pi->remote_max_tx) {
1398 l2cap_send_disconn_req(pi->conn, sk);
1399 return;
1402 tx_skb = skb_clone(skb, GFP_ATOMIC);
1403 bt_cb(skb)->retries++;
1404 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1405 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1406 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1407 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1409 if (pi->fcs == L2CAP_FCS_CRC16) {
1410 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1411 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1414 l2cap_do_send(sk, tx_skb);
1417 static int l2cap_ertm_send(struct sock *sk)
1419 struct sk_buff *skb, *tx_skb;
1420 struct l2cap_pinfo *pi = l2cap_pi(sk);
1421 u16 control, fcs;
1422 int nsent = 0;
1424 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1425 return 0;
1427 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
1428 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1430 if (pi->remote_max_tx &&
1431 bt_cb(skb)->retries == pi->remote_max_tx) {
1432 l2cap_send_disconn_req(pi->conn, sk);
1433 break;
1436 tx_skb = skb_clone(skb, GFP_ATOMIC);
1438 bt_cb(skb)->retries++;
1440 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1441 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1442 control |= L2CAP_CTRL_FINAL;
1443 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1445 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1446 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1447 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1450 if (pi->fcs == L2CAP_FCS_CRC16) {
1451 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1452 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1455 l2cap_do_send(sk, tx_skb);
1457 __mod_retrans_timer();
1459 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1460 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1462 pi->unacked_frames++;
1463 pi->frames_sent++;
1465 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1466 sk->sk_send_head = NULL;
1467 else
1468 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1470 nsent++;
1473 return nsent;
1476 static int l2cap_retransmit_frames(struct sock *sk)
1478 struct l2cap_pinfo *pi = l2cap_pi(sk);
1479 int ret;
1481 spin_lock_bh(&pi->send_lock);
1483 if (!skb_queue_empty(TX_QUEUE(sk)))
1484 sk->sk_send_head = TX_QUEUE(sk)->next;
1486 pi->next_tx_seq = pi->expected_ack_seq;
1487 ret = l2cap_ertm_send(sk);
1489 spin_unlock_bh(&pi->send_lock);
1491 return ret;
1494 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1496 struct sock *sk = (struct sock *)pi;
1497 u16 control = 0;
1498 int nframes;
1500 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1502 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1503 control |= L2CAP_SUPER_RCV_NOT_READY;
1504 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1505 l2cap_send_sframe(pi, control);
1506 return;
1509 spin_lock_bh(&pi->send_lock);
1510 nframes = l2cap_ertm_send(sk);
1511 spin_unlock_bh(&pi->send_lock);
1513 if (nframes > 0)
1514 return;
1516 control |= L2CAP_SUPER_RCV_READY;
1517 l2cap_send_sframe(pi, control);
1520 static void l2cap_send_srejtail(struct sock *sk)
1522 struct srej_list *tail;
1523 u16 control;
1525 control = L2CAP_SUPER_SELECT_REJECT;
1526 control |= L2CAP_CTRL_FINAL;
1528 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1529 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1531 l2cap_send_sframe(l2cap_pi(sk), control);
1534 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1536 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1537 struct sk_buff **frag;
1538 int err, sent = 0;
1540 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1541 return -EFAULT;
1543 sent += count;
1544 len -= count;
1546 /* Continuation fragments (no L2CAP header) */
1547 frag = &skb_shinfo(skb)->frag_list;
1548 while (len) {
1549 count = min_t(unsigned int, conn->mtu, len);
1551 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1552 if (!*frag)
1553 return -EFAULT;
1554 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1555 return -EFAULT;
1557 sent += count;
1558 len -= count;
1560 frag = &(*frag)->next;
1563 return sent;
1566 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1568 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1569 struct sk_buff *skb;
1570 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1571 struct l2cap_hdr *lh;
1573 BT_DBG("sk %p len %d", sk, (int)len);
1575 count = min_t(unsigned int, (conn->mtu - hlen), len);
1576 skb = bt_skb_send_alloc(sk, count + hlen,
1577 msg->msg_flags & MSG_DONTWAIT, &err);
1578 if (!skb)
1579 return ERR_PTR(-ENOMEM);
1581 /* Create L2CAP header */
1582 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1583 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1584 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1585 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1587 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1588 if (unlikely(err < 0)) {
1589 kfree_skb(skb);
1590 return ERR_PTR(err);
1592 return skb;
1595 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1597 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1598 struct sk_buff *skb;
1599 int err, count, hlen = L2CAP_HDR_SIZE;
1600 struct l2cap_hdr *lh;
1602 BT_DBG("sk %p len %d", sk, (int)len);
1604 count = min_t(unsigned int, (conn->mtu - hlen), len);
1605 skb = bt_skb_send_alloc(sk, count + hlen,
1606 msg->msg_flags & MSG_DONTWAIT, &err);
1607 if (!skb)
1608 return ERR_PTR(-ENOMEM);
1610 /* Create L2CAP header */
1611 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1612 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1613 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1615 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1616 if (unlikely(err < 0)) {
1617 kfree_skb(skb);
1618 return ERR_PTR(err);
1620 return skb;
1623 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1625 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1626 struct sk_buff *skb;
1627 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1628 struct l2cap_hdr *lh;
1630 BT_DBG("sk %p len %d", sk, (int)len);
1632 if (!conn)
1633 return ERR_PTR(-ENOTCONN);
1635 if (sdulen)
1636 hlen += 2;
1638 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1639 hlen += 2;
1641 count = min_t(unsigned int, (conn->mtu - hlen), len);
1642 skb = bt_skb_send_alloc(sk, count + hlen,
1643 msg->msg_flags & MSG_DONTWAIT, &err);
1644 if (!skb)
1645 return ERR_PTR(-ENOMEM);
1647 /* Create L2CAP header */
1648 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1649 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1650 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1651 put_unaligned_le16(control, skb_put(skb, 2));
1652 if (sdulen)
1653 put_unaligned_le16(sdulen, skb_put(skb, 2));
1655 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1656 if (unlikely(err < 0)) {
1657 kfree_skb(skb);
1658 return ERR_PTR(err);
1661 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1662 put_unaligned_le16(0, skb_put(skb, 2));
1664 bt_cb(skb)->retries = 0;
1665 return skb;
1668 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1670 struct l2cap_pinfo *pi = l2cap_pi(sk);
1671 struct sk_buff *skb;
1672 struct sk_buff_head sar_queue;
1673 u16 control;
1674 size_t size = 0;
1676 skb_queue_head_init(&sar_queue);
1677 control = L2CAP_SDU_START;
1678 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1679 if (IS_ERR(skb))
1680 return PTR_ERR(skb);
1682 __skb_queue_tail(&sar_queue, skb);
1683 len -= pi->remote_mps;
1684 size += pi->remote_mps;
1686 while (len > 0) {
1687 size_t buflen;
1689 if (len > pi->remote_mps) {
1690 control = L2CAP_SDU_CONTINUE;
1691 buflen = pi->remote_mps;
1692 } else {
1693 control = L2CAP_SDU_END;
1694 buflen = len;
1697 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1698 if (IS_ERR(skb)) {
1699 skb_queue_purge(&sar_queue);
1700 return PTR_ERR(skb);
1703 __skb_queue_tail(&sar_queue, skb);
1704 len -= buflen;
1705 size += buflen;
1707 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1708 spin_lock_bh(&pi->send_lock);
1709 if (sk->sk_send_head == NULL)
1710 sk->sk_send_head = sar_queue.next;
1711 spin_unlock_bh(&pi->send_lock);
1713 return size;
1716 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1718 struct sock *sk = sock->sk;
1719 struct l2cap_pinfo *pi = l2cap_pi(sk);
1720 struct sk_buff *skb;
1721 u16 control;
1722 int err;
1724 BT_DBG("sock %p, sk %p", sock, sk);
1726 err = sock_error(sk);
1727 if (err)
1728 return err;
1730 if (msg->msg_flags & MSG_OOB)
1731 return -EOPNOTSUPP;
1733 lock_sock(sk);
1735 if (sk->sk_state != BT_CONNECTED) {
1736 err = -ENOTCONN;
1737 goto done;
1740 /* Connectionless channel */
1741 if (sk->sk_type == SOCK_DGRAM) {
1742 skb = l2cap_create_connless_pdu(sk, msg, len);
1743 if (IS_ERR(skb)) {
1744 err = PTR_ERR(skb);
1745 } else {
1746 l2cap_do_send(sk, skb);
1747 err = len;
1749 goto done;
1752 switch (pi->mode) {
1753 case L2CAP_MODE_BASIC:
1754 /* Check outgoing MTU */
1755 if (len > pi->omtu) {
1756 err = -EINVAL;
1757 goto done;
1760 /* Create a basic PDU */
1761 skb = l2cap_create_basic_pdu(sk, msg, len);
1762 if (IS_ERR(skb)) {
1763 err = PTR_ERR(skb);
1764 goto done;
1767 l2cap_do_send(sk, skb);
1768 err = len;
1769 break;
1771 case L2CAP_MODE_ERTM:
1772 case L2CAP_MODE_STREAMING:
1773 /* Entire SDU fits into one PDU */
1774 if (len <= pi->remote_mps) {
1775 control = L2CAP_SDU_UNSEGMENTED;
1776 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1777 if (IS_ERR(skb)) {
1778 err = PTR_ERR(skb);
1779 goto done;
1781 __skb_queue_tail(TX_QUEUE(sk), skb);
1783 if (pi->mode == L2CAP_MODE_ERTM)
1784 spin_lock_bh(&pi->send_lock);
1786 if (sk->sk_send_head == NULL)
1787 sk->sk_send_head = skb;
1789 if (pi->mode == L2CAP_MODE_ERTM)
1790 spin_unlock_bh(&pi->send_lock);
1791 } else {
1792 /* Segment SDU into multiples PDUs */
1793 err = l2cap_sar_segment_sdu(sk, msg, len);
1794 if (err < 0)
1795 goto done;
1798 if (pi->mode == L2CAP_MODE_STREAMING) {
1799 err = l2cap_streaming_send(sk);
1800 } else {
1801 spin_lock_bh(&pi->send_lock);
1802 err = l2cap_ertm_send(sk);
1803 spin_unlock_bh(&pi->send_lock);
1806 if (err >= 0)
1807 err = len;
1808 break;
1810 default:
1811 BT_DBG("bad state %1.1x", pi->mode);
1812 err = -EINVAL;
1815 done:
1816 release_sock(sk);
1817 return err;
1820 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1822 struct sock *sk = sock->sk;
1824 lock_sock(sk);
1826 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1827 struct l2cap_conn_rsp rsp;
1829 sk->sk_state = BT_CONFIG;
1831 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1832 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1833 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1834 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1835 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1836 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1838 release_sock(sk);
1839 return 0;
1842 release_sock(sk);
1844 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1847 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1849 struct sock *sk = sock->sk;
1850 struct l2cap_options opts;
1851 int len, err = 0;
1852 u32 opt;
1854 BT_DBG("sk %p", sk);
1856 lock_sock(sk);
1858 switch (optname) {
1859 case L2CAP_OPTIONS:
1860 opts.imtu = l2cap_pi(sk)->imtu;
1861 opts.omtu = l2cap_pi(sk)->omtu;
1862 opts.flush_to = l2cap_pi(sk)->flush_to;
1863 opts.mode = l2cap_pi(sk)->mode;
1864 opts.fcs = l2cap_pi(sk)->fcs;
1865 opts.max_tx = l2cap_pi(sk)->max_tx;
1866 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1868 len = min_t(unsigned int, sizeof(opts), optlen);
1869 if (copy_from_user((char *) &opts, optval, len)) {
1870 err = -EFAULT;
1871 break;
1874 l2cap_pi(sk)->mode = opts.mode;
1875 switch (l2cap_pi(sk)->mode) {
1876 case L2CAP_MODE_BASIC:
1877 break;
1878 case L2CAP_MODE_ERTM:
1879 case L2CAP_MODE_STREAMING:
1880 if (enable_ertm)
1881 break;
1882 /* fall through */
1883 default:
1884 err = -EINVAL;
1885 break;
1888 l2cap_pi(sk)->imtu = opts.imtu;
1889 l2cap_pi(sk)->omtu = opts.omtu;
1890 l2cap_pi(sk)->fcs = opts.fcs;
1891 l2cap_pi(sk)->max_tx = opts.max_tx;
1892 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1893 break;
1895 case L2CAP_LM:
1896 if (get_user(opt, (u32 __user *) optval)) {
1897 err = -EFAULT;
1898 break;
1901 if (opt & L2CAP_LM_AUTH)
1902 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1903 if (opt & L2CAP_LM_ENCRYPT)
1904 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1905 if (opt & L2CAP_LM_SECURE)
1906 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1908 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1909 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1910 break;
1912 default:
1913 err = -ENOPROTOOPT;
1914 break;
1917 release_sock(sk);
1918 return err;
1921 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1923 struct sock *sk = sock->sk;
1924 struct bt_security sec;
1925 int len, err = 0;
1926 u32 opt;
1928 BT_DBG("sk %p", sk);
1930 if (level == SOL_L2CAP)
1931 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1933 if (level != SOL_BLUETOOTH)
1934 return -ENOPROTOOPT;
1936 lock_sock(sk);
1938 switch (optname) {
1939 case BT_SECURITY:
1940 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
1941 && sk->sk_type != SOCK_RAW) {
1942 err = -EINVAL;
1943 break;
1946 sec.level = BT_SECURITY_LOW;
1948 len = min_t(unsigned int, sizeof(sec), optlen);
1949 if (copy_from_user((char *) &sec, optval, len)) {
1950 err = -EFAULT;
1951 break;
1954 if (sec.level < BT_SECURITY_LOW ||
1955 sec.level > BT_SECURITY_HIGH) {
1956 err = -EINVAL;
1957 break;
1960 l2cap_pi(sk)->sec_level = sec.level;
1961 break;
1963 case BT_DEFER_SETUP:
1964 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1965 err = -EINVAL;
1966 break;
1969 if (get_user(opt, (u32 __user *) optval)) {
1970 err = -EFAULT;
1971 break;
1974 bt_sk(sk)->defer_setup = opt;
1975 break;
1977 default:
1978 err = -ENOPROTOOPT;
1979 break;
1982 release_sock(sk);
1983 return err;
1986 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1988 struct sock *sk = sock->sk;
1989 struct l2cap_options opts;
1990 struct l2cap_conninfo cinfo;
1991 int len, err = 0;
1992 u32 opt;
1994 BT_DBG("sk %p", sk);
1996 if (get_user(len, optlen))
1997 return -EFAULT;
1999 lock_sock(sk);
2001 switch (optname) {
2002 case L2CAP_OPTIONS:
2003 opts.imtu = l2cap_pi(sk)->imtu;
2004 opts.omtu = l2cap_pi(sk)->omtu;
2005 opts.flush_to = l2cap_pi(sk)->flush_to;
2006 opts.mode = l2cap_pi(sk)->mode;
2007 opts.fcs = l2cap_pi(sk)->fcs;
2008 opts.max_tx = l2cap_pi(sk)->max_tx;
2009 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2011 len = min_t(unsigned int, len, sizeof(opts));
2012 if (copy_to_user(optval, (char *) &opts, len))
2013 err = -EFAULT;
2015 break;
2017 case L2CAP_LM:
2018 switch (l2cap_pi(sk)->sec_level) {
2019 case BT_SECURITY_LOW:
2020 opt = L2CAP_LM_AUTH;
2021 break;
2022 case BT_SECURITY_MEDIUM:
2023 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2024 break;
2025 case BT_SECURITY_HIGH:
2026 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2027 L2CAP_LM_SECURE;
2028 break;
2029 default:
2030 opt = 0;
2031 break;
2034 if (l2cap_pi(sk)->role_switch)
2035 opt |= L2CAP_LM_MASTER;
2037 if (l2cap_pi(sk)->force_reliable)
2038 opt |= L2CAP_LM_RELIABLE;
2040 if (put_user(opt, (u32 __user *) optval))
2041 err = -EFAULT;
2042 break;
2044 case L2CAP_CONNINFO:
2045 if (sk->sk_state != BT_CONNECTED &&
2046 !(sk->sk_state == BT_CONNECT2 &&
2047 bt_sk(sk)->defer_setup)) {
2048 err = -ENOTCONN;
2049 break;
2052 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2053 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2055 len = min_t(unsigned int, len, sizeof(cinfo));
2056 if (copy_to_user(optval, (char *) &cinfo, len))
2057 err = -EFAULT;
2059 break;
2061 default:
2062 err = -ENOPROTOOPT;
2063 break;
2066 release_sock(sk);
2067 return err;
2070 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2072 struct sock *sk = sock->sk;
2073 struct bt_security sec;
2074 int len, err = 0;
2076 BT_DBG("sk %p", sk);
2078 if (level == SOL_L2CAP)
2079 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2081 if (level != SOL_BLUETOOTH)
2082 return -ENOPROTOOPT;
2084 if (get_user(len, optlen))
2085 return -EFAULT;
2087 lock_sock(sk);
2089 switch (optname) {
2090 case BT_SECURITY:
2091 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2092 && sk->sk_type != SOCK_RAW) {
2093 err = -EINVAL;
2094 break;
2097 sec.level = l2cap_pi(sk)->sec_level;
2099 len = min_t(unsigned int, len, sizeof(sec));
2100 if (copy_to_user(optval, (char *) &sec, len))
2101 err = -EFAULT;
2103 break;
2105 case BT_DEFER_SETUP:
2106 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2107 err = -EINVAL;
2108 break;
2111 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2112 err = -EFAULT;
2114 break;
2116 default:
2117 err = -ENOPROTOOPT;
2118 break;
2121 release_sock(sk);
2122 return err;
2125 static int l2cap_sock_shutdown(struct socket *sock, int how)
2127 struct sock *sk = sock->sk;
2128 int err = 0;
2130 BT_DBG("sock %p, sk %p", sock, sk);
2132 if (!sk)
2133 return 0;
2135 lock_sock(sk);
2136 if (!sk->sk_shutdown) {
2137 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2138 err = __l2cap_wait_ack(sk);
2140 sk->sk_shutdown = SHUTDOWN_MASK;
2141 l2cap_sock_clear_timer(sk);
2142 __l2cap_sock_close(sk, 0);
2144 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2145 err = bt_sock_wait_state(sk, BT_CLOSED,
2146 sk->sk_lingertime);
2148 release_sock(sk);
2149 return err;
2152 static int l2cap_sock_release(struct socket *sock)
2154 struct sock *sk = sock->sk;
2155 int err;
2157 BT_DBG("sock %p, sk %p", sock, sk);
2159 if (!sk)
2160 return 0;
2162 err = l2cap_sock_shutdown(sock, 2);
2164 sock_orphan(sk);
2165 l2cap_sock_kill(sk);
2166 return err;
2169 static void l2cap_chan_ready(struct sock *sk)
2171 struct sock *parent = bt_sk(sk)->parent;
2173 BT_DBG("sk %p, parent %p", sk, parent);
2175 l2cap_pi(sk)->conf_state = 0;
2176 l2cap_sock_clear_timer(sk);
2178 if (!parent) {
2179 /* Outgoing channel.
2180 * Wake up socket sleeping on connect.
2182 sk->sk_state = BT_CONNECTED;
2183 sk->sk_state_change(sk);
2184 } else {
2185 /* Incoming channel.
2186 * Wake up socket sleeping on accept.
2188 parent->sk_data_ready(parent, 0);
2192 /* Copy frame to all raw sockets on that connection */
2193 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2195 struct l2cap_chan_list *l = &conn->chan_list;
2196 struct sk_buff *nskb;
2197 struct sock *sk;
2199 BT_DBG("conn %p", conn);
2201 read_lock(&l->lock);
2202 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2203 if (sk->sk_type != SOCK_RAW)
2204 continue;
2206 /* Don't send frame to the socket it came from */
2207 if (skb->sk == sk)
2208 continue;
2209 nskb = skb_clone(skb, GFP_ATOMIC);
2210 if (!nskb)
2211 continue;
2213 if (sock_queue_rcv_skb(sk, nskb))
2214 kfree_skb(nskb);
2216 read_unlock(&l->lock);
2219 /* ---- L2CAP signalling commands ---- */
2220 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2221 u8 code, u8 ident, u16 dlen, void *data)
2223 struct sk_buff *skb, **frag;
2224 struct l2cap_cmd_hdr *cmd;
2225 struct l2cap_hdr *lh;
2226 int len, count;
2228 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2229 conn, code, ident, dlen);
2231 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2232 count = min_t(unsigned int, conn->mtu, len);
2234 skb = bt_skb_alloc(count, GFP_ATOMIC);
2235 if (!skb)
2236 return NULL;
2238 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2239 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2240 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2242 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2243 cmd->code = code;
2244 cmd->ident = ident;
2245 cmd->len = cpu_to_le16(dlen);
2247 if (dlen) {
2248 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2249 memcpy(skb_put(skb, count), data, count);
2250 data += count;
2253 len -= skb->len;
2255 /* Continuation fragments (no L2CAP header) */
2256 frag = &skb_shinfo(skb)->frag_list;
2257 while (len) {
2258 count = min_t(unsigned int, conn->mtu, len);
2260 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2261 if (!*frag)
2262 goto fail;
2264 memcpy(skb_put(*frag, count), data, count);
2266 len -= count;
2267 data += count;
2269 frag = &(*frag)->next;
2272 return skb;
2274 fail:
2275 kfree_skb(skb);
2276 return NULL;
2279 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2281 struct l2cap_conf_opt *opt = *ptr;
2282 int len;
2284 len = L2CAP_CONF_OPT_SIZE + opt->len;
2285 *ptr += len;
2287 *type = opt->type;
2288 *olen = opt->len;
2290 switch (opt->len) {
2291 case 1:
2292 *val = *((u8 *) opt->val);
2293 break;
2295 case 2:
2296 *val = __le16_to_cpu(*((__le16 *) opt->val));
2297 break;
2299 case 4:
2300 *val = __le32_to_cpu(*((__le32 *) opt->val));
2301 break;
2303 default:
2304 *val = (unsigned long) opt->val;
2305 break;
2308 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2309 return len;
2312 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2314 struct l2cap_conf_opt *opt = *ptr;
2316 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2318 opt->type = type;
2319 opt->len = len;
2321 switch (len) {
2322 case 1:
2323 *((u8 *) opt->val) = val;
2324 break;
2326 case 2:
2327 *((__le16 *) opt->val) = cpu_to_le16(val);
2328 break;
2330 case 4:
2331 *((__le32 *) opt->val) = cpu_to_le32(val);
2332 break;
2334 default:
2335 memcpy(opt->val, (void *) val, len);
2336 break;
2339 *ptr += L2CAP_CONF_OPT_SIZE + len;
2342 static void l2cap_ack_timeout(unsigned long arg)
2344 struct sock *sk = (void *) arg;
2346 bh_lock_sock(sk);
2347 l2cap_send_ack(l2cap_pi(sk));
2348 bh_unlock_sock(sk);
2351 static inline void l2cap_ertm_init(struct sock *sk)
2353 l2cap_pi(sk)->expected_ack_seq = 0;
2354 l2cap_pi(sk)->unacked_frames = 0;
2355 l2cap_pi(sk)->buffer_seq = 0;
2356 l2cap_pi(sk)->num_acked = 0;
2357 l2cap_pi(sk)->frames_sent = 0;
2359 setup_timer(&l2cap_pi(sk)->retrans_timer,
2360 l2cap_retrans_timeout, (unsigned long) sk);
2361 setup_timer(&l2cap_pi(sk)->monitor_timer,
2362 l2cap_monitor_timeout, (unsigned long) sk);
2363 setup_timer(&l2cap_pi(sk)->ack_timer,
2364 l2cap_ack_timeout, (unsigned long) sk);
2366 __skb_queue_head_init(SREJ_QUEUE(sk));
2367 __skb_queue_head_init(BUSY_QUEUE(sk));
2368 spin_lock_init(&l2cap_pi(sk)->send_lock);
2370 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2373 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2375 u32 local_feat_mask = l2cap_feat_mask;
2376 if (enable_ertm)
2377 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2379 switch (mode) {
2380 case L2CAP_MODE_ERTM:
2381 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2382 case L2CAP_MODE_STREAMING:
2383 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2384 default:
2385 return 0x00;
2389 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2391 switch (mode) {
2392 case L2CAP_MODE_STREAMING:
2393 case L2CAP_MODE_ERTM:
2394 if (l2cap_mode_supported(mode, remote_feat_mask))
2395 return mode;
2396 /* fall through */
2397 default:
2398 return L2CAP_MODE_BASIC;
2402 static int l2cap_build_conf_req(struct sock *sk, void *data)
2404 struct l2cap_pinfo *pi = l2cap_pi(sk);
2405 struct l2cap_conf_req *req = data;
2406 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2407 void *ptr = req->data;
2409 BT_DBG("sk %p", sk);
2411 if (pi->num_conf_req || pi->num_conf_rsp)
2412 goto done;
2414 switch (pi->mode) {
2415 case L2CAP_MODE_STREAMING:
2416 case L2CAP_MODE_ERTM:
2417 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2418 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2419 l2cap_send_disconn_req(pi->conn, sk);
2420 break;
2421 default:
2422 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2423 break;
2426 done:
2427 switch (pi->mode) {
2428 case L2CAP_MODE_BASIC:
2429 if (pi->imtu != L2CAP_DEFAULT_MTU)
2430 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2431 break;
2433 case L2CAP_MODE_ERTM:
2434 rfc.mode = L2CAP_MODE_ERTM;
2435 rfc.txwin_size = pi->tx_win;
2436 rfc.max_transmit = pi->max_tx;
2437 rfc.retrans_timeout = 0;
2438 rfc.monitor_timeout = 0;
2439 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2440 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2441 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2443 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2444 sizeof(rfc), (unsigned long) &rfc);
2446 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2447 break;
2449 if (pi->fcs == L2CAP_FCS_NONE ||
2450 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2451 pi->fcs = L2CAP_FCS_NONE;
2452 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2454 break;
2456 case L2CAP_MODE_STREAMING:
2457 rfc.mode = L2CAP_MODE_STREAMING;
2458 rfc.txwin_size = 0;
2459 rfc.max_transmit = 0;
2460 rfc.retrans_timeout = 0;
2461 rfc.monitor_timeout = 0;
2462 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2463 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2464 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2466 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2467 sizeof(rfc), (unsigned long) &rfc);
2469 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2470 break;
2472 if (pi->fcs == L2CAP_FCS_NONE ||
2473 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2474 pi->fcs = L2CAP_FCS_NONE;
2475 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2477 break;
2480 /* FIXME: Need actual value of the flush timeout */
2481 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2482 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2484 req->dcid = cpu_to_le16(pi->dcid);
2485 req->flags = cpu_to_le16(0);
2487 return ptr - data;
2490 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2492 struct l2cap_pinfo *pi = l2cap_pi(sk);
2493 struct l2cap_conf_rsp *rsp = data;
2494 void *ptr = rsp->data;
2495 void *req = pi->conf_req;
2496 int len = pi->conf_len;
2497 int type, hint, olen;
2498 unsigned long val;
2499 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2500 u16 mtu = L2CAP_DEFAULT_MTU;
2501 u16 result = L2CAP_CONF_SUCCESS;
2503 BT_DBG("sk %p", sk);
2505 while (len >= L2CAP_CONF_OPT_SIZE) {
2506 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2508 hint = type & L2CAP_CONF_HINT;
2509 type &= L2CAP_CONF_MASK;
2511 switch (type) {
2512 case L2CAP_CONF_MTU:
2513 mtu = val;
2514 break;
2516 case L2CAP_CONF_FLUSH_TO:
2517 pi->flush_to = val;
2518 break;
2520 case L2CAP_CONF_QOS:
2521 break;
2523 case L2CAP_CONF_RFC:
2524 if (olen == sizeof(rfc))
2525 memcpy(&rfc, (void *) val, olen);
2526 break;
2528 case L2CAP_CONF_FCS:
2529 if (val == L2CAP_FCS_NONE)
2530 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2532 break;
2534 default:
2535 if (hint)
2536 break;
2538 result = L2CAP_CONF_UNKNOWN;
2539 *((u8 *) ptr++) = type;
2540 break;
2544 if (pi->num_conf_rsp || pi->num_conf_req)
2545 goto done;
2547 switch (pi->mode) {
2548 case L2CAP_MODE_STREAMING:
2549 case L2CAP_MODE_ERTM:
2550 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2551 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2552 return -ECONNREFUSED;
2553 break;
2554 default:
2555 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2556 break;
2559 done:
2560 if (pi->mode != rfc.mode) {
2561 result = L2CAP_CONF_UNACCEPT;
2562 rfc.mode = pi->mode;
2564 if (pi->num_conf_rsp == 1)
2565 return -ECONNREFUSED;
2567 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2568 sizeof(rfc), (unsigned long) &rfc);
2572 if (result == L2CAP_CONF_SUCCESS) {
2573 /* Configure output options and let the other side know
2574 * which ones we don't like. */
2576 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2577 result = L2CAP_CONF_UNACCEPT;
2578 else {
2579 pi->omtu = mtu;
2580 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2582 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2584 switch (rfc.mode) {
2585 case L2CAP_MODE_BASIC:
2586 pi->fcs = L2CAP_FCS_NONE;
2587 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2588 break;
2590 case L2CAP_MODE_ERTM:
2591 pi->remote_tx_win = rfc.txwin_size;
2592 pi->remote_max_tx = rfc.max_transmit;
2593 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2594 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2596 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2598 rfc.retrans_timeout =
2599 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2600 rfc.monitor_timeout =
2601 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2603 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2605 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2606 sizeof(rfc), (unsigned long) &rfc);
2608 break;
2610 case L2CAP_MODE_STREAMING:
2611 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2612 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2614 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2616 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2618 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2619 sizeof(rfc), (unsigned long) &rfc);
2621 break;
2623 default:
2624 result = L2CAP_CONF_UNACCEPT;
2626 memset(&rfc, 0, sizeof(rfc));
2627 rfc.mode = pi->mode;
2630 if (result == L2CAP_CONF_SUCCESS)
2631 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2633 rsp->scid = cpu_to_le16(pi->dcid);
2634 rsp->result = cpu_to_le16(result);
2635 rsp->flags = cpu_to_le16(0x0000);
2637 return ptr - data;
2640 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2642 struct l2cap_pinfo *pi = l2cap_pi(sk);
2643 struct l2cap_conf_req *req = data;
2644 void *ptr = req->data;
2645 int type, olen;
2646 unsigned long val;
2647 struct l2cap_conf_rfc rfc;
2649 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2651 while (len >= L2CAP_CONF_OPT_SIZE) {
2652 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2654 switch (type) {
2655 case L2CAP_CONF_MTU:
2656 if (val < L2CAP_DEFAULT_MIN_MTU) {
2657 *result = L2CAP_CONF_UNACCEPT;
2658 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2659 } else
2660 pi->omtu = val;
2661 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2662 break;
2664 case L2CAP_CONF_FLUSH_TO:
2665 pi->flush_to = val;
2666 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2667 2, pi->flush_to);
2668 break;
2670 case L2CAP_CONF_RFC:
2671 if (olen == sizeof(rfc))
2672 memcpy(&rfc, (void *)val, olen);
2674 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2675 rfc.mode != pi->mode)
2676 return -ECONNREFUSED;
2678 pi->mode = rfc.mode;
2679 pi->fcs = 0;
2681 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2682 sizeof(rfc), (unsigned long) &rfc);
2683 break;
2687 if (*result == L2CAP_CONF_SUCCESS) {
2688 switch (rfc.mode) {
2689 case L2CAP_MODE_ERTM:
2690 pi->remote_tx_win = rfc.txwin_size;
2691 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2692 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2693 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2694 break;
2695 case L2CAP_MODE_STREAMING:
2696 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2700 req->dcid = cpu_to_le16(pi->dcid);
2701 req->flags = cpu_to_le16(0x0000);
2703 return ptr - data;
2706 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2708 struct l2cap_conf_rsp *rsp = data;
2709 void *ptr = rsp->data;
2711 BT_DBG("sk %p", sk);
2713 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2714 rsp->result = cpu_to_le16(result);
2715 rsp->flags = cpu_to_le16(flags);
2717 return ptr - data;
2720 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2722 struct l2cap_pinfo *pi = l2cap_pi(sk);
2723 int type, olen;
2724 unsigned long val;
2725 struct l2cap_conf_rfc rfc;
2727 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2729 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2730 return;
2732 while (len >= L2CAP_CONF_OPT_SIZE) {
2733 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2735 switch (type) {
2736 case L2CAP_CONF_RFC:
2737 if (olen == sizeof(rfc))
2738 memcpy(&rfc, (void *)val, olen);
2739 goto done;
2743 done:
2744 switch (rfc.mode) {
2745 case L2CAP_MODE_ERTM:
2746 pi->remote_tx_win = rfc.txwin_size;
2747 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2748 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2749 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2750 break;
2751 case L2CAP_MODE_STREAMING:
2752 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2756 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2758 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2760 if (rej->reason != 0x0000)
2761 return 0;
2763 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2764 cmd->ident == conn->info_ident) {
2765 del_timer(&conn->info_timer);
2767 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2768 conn->info_ident = 0;
2770 l2cap_conn_start(conn);
2773 return 0;
2776 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2778 struct l2cap_chan_list *list = &conn->chan_list;
2779 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2780 struct l2cap_conn_rsp rsp;
2781 struct sock *sk, *parent;
2782 int result, status = L2CAP_CS_NO_INFO;
2784 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2785 __le16 psm = req->psm;
2787 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2789 /* Check if we have socket listening on psm */
2790 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2791 if (!parent) {
2792 result = L2CAP_CR_BAD_PSM;
2793 goto sendresp;
2796 /* Check if the ACL is secure enough (if not SDP) */
2797 if (psm != cpu_to_le16(0x0001) &&
2798 !hci_conn_check_link_mode(conn->hcon)) {
2799 conn->disc_reason = 0x05;
2800 result = L2CAP_CR_SEC_BLOCK;
2801 goto response;
2804 result = L2CAP_CR_NO_MEM;
2806 /* Check for backlog size */
2807 if (sk_acceptq_is_full(parent)) {
2808 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2809 goto response;
2812 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2813 if (!sk)
2814 goto response;
2816 write_lock_bh(&list->lock);
2818 /* Check if we already have channel with that dcid */
2819 if (__l2cap_get_chan_by_dcid(list, scid)) {
2820 write_unlock_bh(&list->lock);
2821 sock_set_flag(sk, SOCK_ZAPPED);
2822 l2cap_sock_kill(sk);
2823 goto response;
2826 hci_conn_hold(conn->hcon);
2828 l2cap_sock_init(sk, parent);
2829 bacpy(&bt_sk(sk)->src, conn->src);
2830 bacpy(&bt_sk(sk)->dst, conn->dst);
2831 l2cap_pi(sk)->psm = psm;
2832 l2cap_pi(sk)->dcid = scid;
2834 __l2cap_chan_add(conn, sk, parent);
2835 dcid = l2cap_pi(sk)->scid;
2837 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2839 l2cap_pi(sk)->ident = cmd->ident;
2841 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2842 if (l2cap_check_security(sk)) {
2843 if (bt_sk(sk)->defer_setup) {
2844 sk->sk_state = BT_CONNECT2;
2845 result = L2CAP_CR_PEND;
2846 status = L2CAP_CS_AUTHOR_PEND;
2847 parent->sk_data_ready(parent, 0);
2848 } else {
2849 sk->sk_state = BT_CONFIG;
2850 result = L2CAP_CR_SUCCESS;
2851 status = L2CAP_CS_NO_INFO;
2853 } else {
2854 sk->sk_state = BT_CONNECT2;
2855 result = L2CAP_CR_PEND;
2856 status = L2CAP_CS_AUTHEN_PEND;
2858 } else {
2859 sk->sk_state = BT_CONNECT2;
2860 result = L2CAP_CR_PEND;
2861 status = L2CAP_CS_NO_INFO;
2864 write_unlock_bh(&list->lock);
2866 response:
2867 bh_unlock_sock(parent);
2869 sendresp:
2870 rsp.scid = cpu_to_le16(scid);
2871 rsp.dcid = cpu_to_le16(dcid);
2872 rsp.result = cpu_to_le16(result);
2873 rsp.status = cpu_to_le16(status);
2874 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2876 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2877 struct l2cap_info_req info;
2878 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2880 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2881 conn->info_ident = l2cap_get_ident(conn);
2883 mod_timer(&conn->info_timer, jiffies +
2884 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2886 l2cap_send_cmd(conn, conn->info_ident,
2887 L2CAP_INFO_REQ, sizeof(info), &info);
2890 return 0;
2893 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2895 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2896 u16 scid, dcid, result, status;
2897 struct sock *sk;
2898 u8 req[128];
2900 scid = __le16_to_cpu(rsp->scid);
2901 dcid = __le16_to_cpu(rsp->dcid);
2902 result = __le16_to_cpu(rsp->result);
2903 status = __le16_to_cpu(rsp->status);
2905 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2907 if (scid) {
2908 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2909 if (!sk)
2910 return 0;
2911 } else {
2912 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2913 if (!sk)
2914 return 0;
2917 switch (result) {
2918 case L2CAP_CR_SUCCESS:
2919 sk->sk_state = BT_CONFIG;
2920 l2cap_pi(sk)->ident = 0;
2921 l2cap_pi(sk)->dcid = dcid;
2922 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2923 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2925 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2926 l2cap_build_conf_req(sk, req), req);
2927 l2cap_pi(sk)->num_conf_req++;
2928 break;
2930 case L2CAP_CR_PEND:
2931 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2932 break;
2934 default:
2935 l2cap_chan_del(sk, ECONNREFUSED);
2936 break;
2939 bh_unlock_sock(sk);
2940 return 0;
2943 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2945 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2946 u16 dcid, flags;
2947 u8 rsp[64];
2948 struct sock *sk;
2949 int len;
2951 dcid = __le16_to_cpu(req->dcid);
2952 flags = __le16_to_cpu(req->flags);
2954 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2956 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2957 if (!sk)
2958 return -ENOENT;
2960 if (sk->sk_state == BT_DISCONN)
2961 goto unlock;
2963 /* Reject if config buffer is too small. */
2964 len = cmd_len - sizeof(*req);
2965 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2966 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2967 l2cap_build_conf_rsp(sk, rsp,
2968 L2CAP_CONF_REJECT, flags), rsp);
2969 goto unlock;
2972 /* Store config. */
2973 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2974 l2cap_pi(sk)->conf_len += len;
2976 if (flags & 0x0001) {
2977 /* Incomplete config. Send empty response. */
2978 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2979 l2cap_build_conf_rsp(sk, rsp,
2980 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2981 goto unlock;
2984 /* Complete config. */
2985 len = l2cap_parse_conf_req(sk, rsp);
2986 if (len < 0) {
2987 l2cap_send_disconn_req(conn, sk);
2988 goto unlock;
2991 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2992 l2cap_pi(sk)->num_conf_rsp++;
2994 /* Reset config buffer. */
2995 l2cap_pi(sk)->conf_len = 0;
2997 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2998 goto unlock;
3000 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
3001 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3002 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3003 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3005 sk->sk_state = BT_CONNECTED;
3007 l2cap_pi(sk)->next_tx_seq = 0;
3008 l2cap_pi(sk)->expected_tx_seq = 0;
3009 __skb_queue_head_init(TX_QUEUE(sk));
3010 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3011 l2cap_ertm_init(sk);
3013 l2cap_chan_ready(sk);
3014 goto unlock;
3017 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3018 u8 buf[64];
3019 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3020 l2cap_build_conf_req(sk, buf), buf);
3021 l2cap_pi(sk)->num_conf_req++;
3024 unlock:
3025 bh_unlock_sock(sk);
3026 return 0;
3029 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3031 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3032 u16 scid, flags, result;
3033 struct sock *sk;
3034 int len = cmd->len - sizeof(*rsp);
3036 scid = __le16_to_cpu(rsp->scid);
3037 flags = __le16_to_cpu(rsp->flags);
3038 result = __le16_to_cpu(rsp->result);
3040 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3041 scid, flags, result);
3043 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3044 if (!sk)
3045 return 0;
3047 switch (result) {
3048 case L2CAP_CONF_SUCCESS:
3049 l2cap_conf_rfc_get(sk, rsp->data, len);
3050 break;
3052 case L2CAP_CONF_UNACCEPT:
3053 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3054 char req[64];
3056 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3057 l2cap_send_disconn_req(conn, sk);
3058 goto done;
3061 /* throw out any old stored conf requests */
3062 result = L2CAP_CONF_SUCCESS;
3063 len = l2cap_parse_conf_rsp(sk, rsp->data,
3064 len, req, &result);
3065 if (len < 0) {
3066 l2cap_send_disconn_req(conn, sk);
3067 goto done;
3070 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3071 L2CAP_CONF_REQ, len, req);
3072 l2cap_pi(sk)->num_conf_req++;
3073 if (result != L2CAP_CONF_SUCCESS)
3074 goto done;
3075 break;
3078 default:
3079 sk->sk_state = BT_DISCONN;
3080 sk->sk_err = ECONNRESET;
3081 l2cap_sock_set_timer(sk, HZ * 5);
3082 l2cap_send_disconn_req(conn, sk);
3083 goto done;
3086 if (flags & 0x01)
3087 goto done;
3089 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3091 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3092 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3093 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3094 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3096 sk->sk_state = BT_CONNECTED;
3097 l2cap_pi(sk)->next_tx_seq = 0;
3098 l2cap_pi(sk)->expected_tx_seq = 0;
3099 __skb_queue_head_init(TX_QUEUE(sk));
3100 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3101 l2cap_ertm_init(sk);
3103 l2cap_chan_ready(sk);
3106 done:
3107 bh_unlock_sock(sk);
3108 return 0;
3111 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3113 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3114 struct l2cap_disconn_rsp rsp;
3115 u16 dcid, scid;
3116 struct sock *sk;
3118 scid = __le16_to_cpu(req->scid);
3119 dcid = __le16_to_cpu(req->dcid);
3121 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3123 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3124 if (!sk)
3125 return 0;
3127 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3128 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3129 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3131 sk->sk_shutdown = SHUTDOWN_MASK;
3133 skb_queue_purge(TX_QUEUE(sk));
3135 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3136 skb_queue_purge(SREJ_QUEUE(sk));
3137 skb_queue_purge(BUSY_QUEUE(sk));
3138 del_timer(&l2cap_pi(sk)->retrans_timer);
3139 del_timer(&l2cap_pi(sk)->monitor_timer);
3140 del_timer(&l2cap_pi(sk)->ack_timer);
3143 l2cap_chan_del(sk, ECONNRESET);
3144 bh_unlock_sock(sk);
3146 l2cap_sock_kill(sk);
3147 return 0;
3150 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3152 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3153 u16 dcid, scid;
3154 struct sock *sk;
3156 scid = __le16_to_cpu(rsp->scid);
3157 dcid = __le16_to_cpu(rsp->dcid);
3159 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3161 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3162 if (!sk)
3163 return 0;
3165 skb_queue_purge(TX_QUEUE(sk));
3167 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3168 skb_queue_purge(SREJ_QUEUE(sk));
3169 skb_queue_purge(BUSY_QUEUE(sk));
3170 del_timer(&l2cap_pi(sk)->retrans_timer);
3171 del_timer(&l2cap_pi(sk)->monitor_timer);
3172 del_timer(&l2cap_pi(sk)->ack_timer);
3175 l2cap_chan_del(sk, 0);
3176 bh_unlock_sock(sk);
3178 l2cap_sock_kill(sk);
3179 return 0;
3182 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3184 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3185 u16 type;
3187 type = __le16_to_cpu(req->type);
3189 BT_DBG("type 0x%4.4x", type);
3191 if (type == L2CAP_IT_FEAT_MASK) {
3192 u8 buf[8];
3193 u32 feat_mask = l2cap_feat_mask;
3194 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3195 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3196 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3197 if (enable_ertm)
3198 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3199 | L2CAP_FEAT_FCS;
3200 put_unaligned_le32(feat_mask, rsp->data);
3201 l2cap_send_cmd(conn, cmd->ident,
3202 L2CAP_INFO_RSP, sizeof(buf), buf);
3203 } else if (type == L2CAP_IT_FIXED_CHAN) {
3204 u8 buf[12];
3205 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3206 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3207 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3208 memcpy(buf + 4, l2cap_fixed_chan, 8);
3209 l2cap_send_cmd(conn, cmd->ident,
3210 L2CAP_INFO_RSP, sizeof(buf), buf);
3211 } else {
3212 struct l2cap_info_rsp rsp;
3213 rsp.type = cpu_to_le16(type);
3214 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3215 l2cap_send_cmd(conn, cmd->ident,
3216 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3219 return 0;
3222 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3224 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3225 u16 type, result;
3227 type = __le16_to_cpu(rsp->type);
3228 result = __le16_to_cpu(rsp->result);
3230 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3232 del_timer(&conn->info_timer);
3234 if (type == L2CAP_IT_FEAT_MASK) {
3235 conn->feat_mask = get_unaligned_le32(rsp->data);
3237 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3238 struct l2cap_info_req req;
3239 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3241 conn->info_ident = l2cap_get_ident(conn);
3243 l2cap_send_cmd(conn, conn->info_ident,
3244 L2CAP_INFO_REQ, sizeof(req), &req);
3245 } else {
3246 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3247 conn->info_ident = 0;
3249 l2cap_conn_start(conn);
3251 } else if (type == L2CAP_IT_FIXED_CHAN) {
3252 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3253 conn->info_ident = 0;
3255 l2cap_conn_start(conn);
3258 return 0;
3261 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3263 u8 *data = skb->data;
3264 int len = skb->len;
3265 struct l2cap_cmd_hdr cmd;
3266 int err = 0;
3268 l2cap_raw_recv(conn, skb);
3270 while (len >= L2CAP_CMD_HDR_SIZE) {
3271 u16 cmd_len;
3272 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3273 data += L2CAP_CMD_HDR_SIZE;
3274 len -= L2CAP_CMD_HDR_SIZE;
3276 cmd_len = le16_to_cpu(cmd.len);
3278 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3280 if (cmd_len > len || !cmd.ident) {
3281 BT_DBG("corrupted command");
3282 break;
3285 switch (cmd.code) {
3286 case L2CAP_COMMAND_REJ:
3287 l2cap_command_rej(conn, &cmd, data);
3288 break;
3290 case L2CAP_CONN_REQ:
3291 err = l2cap_connect_req(conn, &cmd, data);
3292 break;
3294 case L2CAP_CONN_RSP:
3295 err = l2cap_connect_rsp(conn, &cmd, data);
3296 break;
3298 case L2CAP_CONF_REQ:
3299 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3300 break;
3302 case L2CAP_CONF_RSP:
3303 err = l2cap_config_rsp(conn, &cmd, data);
3304 break;
3306 case L2CAP_DISCONN_REQ:
3307 err = l2cap_disconnect_req(conn, &cmd, data);
3308 break;
3310 case L2CAP_DISCONN_RSP:
3311 err = l2cap_disconnect_rsp(conn, &cmd, data);
3312 break;
3314 case L2CAP_ECHO_REQ:
3315 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3316 break;
3318 case L2CAP_ECHO_RSP:
3319 break;
3321 case L2CAP_INFO_REQ:
3322 err = l2cap_information_req(conn, &cmd, data);
3323 break;
3325 case L2CAP_INFO_RSP:
3326 err = l2cap_information_rsp(conn, &cmd, data);
3327 break;
3329 default:
3330 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3331 err = -EINVAL;
3332 break;
3335 if (err) {
3336 struct l2cap_cmd_rej rej;
3337 BT_DBG("error %d", err);
3339 /* FIXME: Map err to a valid reason */
3340 rej.reason = cpu_to_le16(0);
3341 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3344 data += cmd_len;
3345 len -= cmd_len;
3348 kfree_skb(skb);
3351 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3353 u16 our_fcs, rcv_fcs;
3354 int hdr_size = L2CAP_HDR_SIZE + 2;
3356 if (pi->fcs == L2CAP_FCS_CRC16) {
3357 skb_trim(skb, skb->len - 2);
3358 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3359 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3361 if (our_fcs != rcv_fcs)
3362 return -EINVAL;
3364 return 0;
3367 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3369 struct l2cap_pinfo *pi = l2cap_pi(sk);
3370 u16 control = 0;
3372 pi->frames_sent = 0;
3373 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3375 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3377 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3378 control |= L2CAP_SUPER_RCV_NOT_READY | L2CAP_CTRL_FINAL;
3379 l2cap_send_sframe(pi, control);
3380 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3381 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
3384 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY && pi->unacked_frames > 0)
3385 __mod_retrans_timer();
3387 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3389 spin_lock_bh(&pi->send_lock);
3390 l2cap_ertm_send(sk);
3391 spin_unlock_bh(&pi->send_lock);
3393 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3394 pi->frames_sent == 0) {
3395 control |= L2CAP_SUPER_RCV_READY;
3396 l2cap_send_sframe(pi, control);
3400 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3402 struct sk_buff *next_skb;
3404 bt_cb(skb)->tx_seq = tx_seq;
3405 bt_cb(skb)->sar = sar;
3407 next_skb = skb_peek(SREJ_QUEUE(sk));
3408 if (!next_skb) {
3409 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3410 return 0;
3413 do {
3414 if (bt_cb(next_skb)->tx_seq == tx_seq)
3415 return -EINVAL;
3417 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3418 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3419 return 0;
3422 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3423 break;
3425 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3427 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3429 return 0;
3432 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3434 struct l2cap_pinfo *pi = l2cap_pi(sk);
3435 struct sk_buff *_skb;
3436 int err;
3438 switch (control & L2CAP_CTRL_SAR) {
3439 case L2CAP_SDU_UNSEGMENTED:
3440 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3441 goto drop;
3443 err = sock_queue_rcv_skb(sk, skb);
3444 if (!err)
3445 return err;
3447 break;
3449 case L2CAP_SDU_START:
3450 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3451 goto drop;
3453 pi->sdu_len = get_unaligned_le16(skb->data);
3455 if (pi->sdu_len > pi->imtu)
3456 goto disconnect;
3458 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3459 if (!pi->sdu)
3460 return -ENOMEM;
3462 /* pull sdu_len bytes only after alloc, because of Local Busy
3463 * condition we have to be sure that this will be executed
3464 * only once, i.e., when alloc does not fail */
3465 skb_pull(skb, 2);
3467 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3469 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3470 pi->partial_sdu_len = skb->len;
3471 break;
3473 case L2CAP_SDU_CONTINUE:
3474 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3475 goto disconnect;
3477 if (!pi->sdu)
3478 goto disconnect;
3480 pi->partial_sdu_len += skb->len;
3481 if (pi->partial_sdu_len > pi->sdu_len)
3482 goto drop;
3484 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3486 break;
3488 case L2CAP_SDU_END:
3489 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3490 goto disconnect;
3492 if (!pi->sdu)
3493 goto disconnect;
3495 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3496 pi->partial_sdu_len += skb->len;
3498 if (pi->partial_sdu_len > pi->imtu)
3499 goto drop;
3501 if (pi->partial_sdu_len != pi->sdu_len)
3502 goto drop;
3504 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3507 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3508 if (!_skb) {
3509 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3510 return -ENOMEM;
3513 err = sock_queue_rcv_skb(sk, _skb);
3514 if (err < 0) {
3515 kfree_skb(_skb);
3516 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3517 return err;
3520 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3521 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3523 kfree_skb(pi->sdu);
3524 break;
3527 kfree_skb(skb);
3528 return 0;
3530 drop:
3531 kfree_skb(pi->sdu);
3532 pi->sdu = NULL;
3534 disconnect:
3535 l2cap_send_disconn_req(pi->conn, sk);
3536 kfree_skb(skb);
3537 return 0;
3540 static void l2cap_busy_work(struct work_struct *work)
3542 DECLARE_WAITQUEUE(wait, current);
3543 struct l2cap_pinfo *pi =
3544 container_of(work, struct l2cap_pinfo, busy_work);
3545 struct sock *sk = (struct sock *)pi;
3546 int n_tries = 0, timeo = HZ/5, err;
3547 struct sk_buff *skb;
3548 u16 control;
3550 lock_sock(sk);
3552 add_wait_queue(sk_sleep(sk), &wait);
3553 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3554 set_current_state(TASK_INTERRUPTIBLE);
3556 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3557 err = -EBUSY;
3558 l2cap_send_disconn_req(pi->conn, sk);
3559 goto done;
3562 if (!timeo)
3563 timeo = HZ/5;
3565 if (signal_pending(current)) {
3566 err = sock_intr_errno(timeo);
3567 goto done;
3570 release_sock(sk);
3571 timeo = schedule_timeout(timeo);
3572 lock_sock(sk);
3574 err = sock_error(sk);
3575 if (err)
3576 goto done;
3578 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3579 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3580 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3581 if (err < 0) {
3582 skb_queue_head(BUSY_QUEUE(sk), skb);
3583 break;
3586 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3589 if (!skb)
3590 break;
3593 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3594 goto done;
3596 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3597 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3598 l2cap_send_sframe(pi, control);
3599 l2cap_pi(sk)->retry_count = 1;
3601 del_timer(&pi->retrans_timer);
3602 __mod_monitor_timer();
3604 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3606 done:
3607 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3608 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3610 set_current_state(TASK_RUNNING);
3611 remove_wait_queue(sk_sleep(sk), &wait);
3613 release_sock(sk);
3616 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3618 struct l2cap_pinfo *pi = l2cap_pi(sk);
3619 int sctrl, err;
3621 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3622 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3623 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3624 return -EBUSY;
3627 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3628 if (err >= 0) {
3629 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3630 return err;
3633 /* Busy Condition */
3634 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3635 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3636 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3638 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3639 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3640 l2cap_send_sframe(pi, sctrl);
3642 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3644 queue_work(_busy_wq, &pi->busy_work);
3646 return err;
3649 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3651 struct l2cap_pinfo *pi = l2cap_pi(sk);
3652 struct sk_buff *_skb;
3653 int err = -EINVAL;
3656 * TODO: We have to notify the userland if some data is lost with the
3657 * Streaming Mode.
3660 switch (control & L2CAP_CTRL_SAR) {
3661 case L2CAP_SDU_UNSEGMENTED:
3662 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3663 kfree_skb(pi->sdu);
3664 break;
3667 err = sock_queue_rcv_skb(sk, skb);
3668 if (!err)
3669 return 0;
3671 break;
3673 case L2CAP_SDU_START:
3674 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3675 kfree_skb(pi->sdu);
3676 break;
3679 pi->sdu_len = get_unaligned_le16(skb->data);
3680 skb_pull(skb, 2);
3682 if (pi->sdu_len > pi->imtu) {
3683 err = -EMSGSIZE;
3684 break;
3687 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3688 if (!pi->sdu) {
3689 err = -ENOMEM;
3690 break;
3693 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3695 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3696 pi->partial_sdu_len = skb->len;
3697 err = 0;
3698 break;
3700 case L2CAP_SDU_CONTINUE:
3701 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3702 break;
3704 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3706 pi->partial_sdu_len += skb->len;
3707 if (pi->partial_sdu_len > pi->sdu_len)
3708 kfree_skb(pi->sdu);
3709 else
3710 err = 0;
3712 break;
3714 case L2CAP_SDU_END:
3715 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3716 break;
3718 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3720 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3721 pi->partial_sdu_len += skb->len;
3723 if (pi->partial_sdu_len > pi->imtu)
3724 goto drop;
3726 if (pi->partial_sdu_len == pi->sdu_len) {
3727 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3728 err = sock_queue_rcv_skb(sk, _skb);
3729 if (err < 0)
3730 kfree_skb(_skb);
3732 err = 0;
3734 drop:
3735 kfree_skb(pi->sdu);
3736 break;
3739 kfree_skb(skb);
3740 return err;
3743 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3745 struct sk_buff *skb;
3746 u16 control;
3748 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3749 if (bt_cb(skb)->tx_seq != tx_seq)
3750 break;
3752 skb = skb_dequeue(SREJ_QUEUE(sk));
3753 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3754 l2cap_ertm_reassembly_sdu(sk, skb, control);
3755 l2cap_pi(sk)->buffer_seq_srej =
3756 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3757 tx_seq++;
3761 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3763 struct l2cap_pinfo *pi = l2cap_pi(sk);
3764 struct srej_list *l, *tmp;
3765 u16 control;
3767 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3768 if (l->tx_seq == tx_seq) {
3769 list_del(&l->list);
3770 kfree(l);
3771 return;
3773 control = L2CAP_SUPER_SELECT_REJECT;
3774 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3775 l2cap_send_sframe(pi, control);
3776 list_del(&l->list);
3777 list_add_tail(&l->list, SREJ_LIST(sk));
3781 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3783 struct l2cap_pinfo *pi = l2cap_pi(sk);
3784 struct srej_list *new;
3785 u16 control;
3787 while (tx_seq != pi->expected_tx_seq) {
3788 control = L2CAP_SUPER_SELECT_REJECT;
3789 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3790 l2cap_send_sframe(pi, control);
3792 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3793 new->tx_seq = pi->expected_tx_seq++;
3794 list_add_tail(&new->list, SREJ_LIST(sk));
3796 pi->expected_tx_seq++;
3799 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3801 struct l2cap_pinfo *pi = l2cap_pi(sk);
3802 u8 tx_seq = __get_txseq(rx_control);
3803 u8 req_seq = __get_reqseq(rx_control);
3804 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3805 u8 tx_seq_offset, expected_tx_seq_offset;
3806 int num_to_ack = (pi->tx_win/6) + 1;
3807 int err = 0;
3809 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3811 if (L2CAP_CTRL_FINAL & rx_control &&
3812 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3813 del_timer(&pi->monitor_timer);
3814 if (pi->unacked_frames > 0)
3815 __mod_retrans_timer();
3816 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3819 pi->expected_ack_seq = req_seq;
3820 l2cap_drop_acked_frames(sk);
3822 if (tx_seq == pi->expected_tx_seq)
3823 goto expected;
3825 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3826 if (tx_seq_offset < 0)
3827 tx_seq_offset += 64;
3829 /* invalid tx_seq */
3830 if (tx_seq_offset >= pi->tx_win) {
3831 l2cap_send_disconn_req(pi->conn, sk);
3832 goto drop;
3835 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3836 goto drop;
3838 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3839 struct srej_list *first;
3841 first = list_first_entry(SREJ_LIST(sk),
3842 struct srej_list, list);
3843 if (tx_seq == first->tx_seq) {
3844 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3845 l2cap_check_srej_gap(sk, tx_seq);
3847 list_del(&first->list);
3848 kfree(first);
3850 if (list_empty(SREJ_LIST(sk))) {
3851 pi->buffer_seq = pi->buffer_seq_srej;
3852 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3853 l2cap_send_ack(pi);
3855 } else {
3856 struct srej_list *l;
3858 /* duplicated tx_seq */
3859 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3860 goto drop;
3862 list_for_each_entry(l, SREJ_LIST(sk), list) {
3863 if (l->tx_seq == tx_seq) {
3864 l2cap_resend_srejframe(sk, tx_seq);
3865 return 0;
3868 l2cap_send_srejframe(sk, tx_seq);
3870 } else {
3871 expected_tx_seq_offset =
3872 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3873 if (expected_tx_seq_offset < 0)
3874 expected_tx_seq_offset += 64;
3876 /* duplicated tx_seq */
3877 if (tx_seq_offset < expected_tx_seq_offset)
3878 goto drop;
3880 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3882 INIT_LIST_HEAD(SREJ_LIST(sk));
3883 pi->buffer_seq_srej = pi->buffer_seq;
3885 __skb_queue_head_init(SREJ_QUEUE(sk));
3886 __skb_queue_head_init(BUSY_QUEUE(sk));
3887 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3889 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3891 l2cap_send_srejframe(sk, tx_seq);
3893 return 0;
3895 expected:
3896 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3898 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3899 bt_cb(skb)->tx_seq = tx_seq;
3900 bt_cb(skb)->sar = sar;
3901 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3902 return 0;
3905 if (rx_control & L2CAP_CTRL_FINAL) {
3906 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3907 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3908 else
3909 l2cap_retransmit_frames(sk);
3912 err = l2cap_push_rx_skb(sk, skb, rx_control);
3913 if (err < 0)
3914 return 0;
3916 __mod_ack_timer();
3918 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3919 if (pi->num_acked == num_to_ack - 1)
3920 l2cap_send_ack(pi);
3922 return 0;
3924 drop:
3925 kfree_skb(skb);
3926 return 0;
3929 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3931 struct l2cap_pinfo *pi = l2cap_pi(sk);
3933 pi->expected_ack_seq = __get_reqseq(rx_control);
3934 l2cap_drop_acked_frames(sk);
3936 if (rx_control & L2CAP_CTRL_POLL) {
3937 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3938 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3939 (pi->unacked_frames > 0))
3940 __mod_retrans_timer();
3942 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3943 l2cap_send_srejtail(sk);
3944 } else {
3945 l2cap_send_i_or_rr_or_rnr(sk);
3948 } else if (rx_control & L2CAP_CTRL_FINAL) {
3949 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3951 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3952 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3953 else
3954 l2cap_retransmit_frames(sk);
3956 } else {
3957 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3958 (pi->unacked_frames > 0))
3959 __mod_retrans_timer();
3961 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3962 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3963 l2cap_send_ack(pi);
3964 } else {
3965 spin_lock_bh(&pi->send_lock);
3966 l2cap_ertm_send(sk);
3967 spin_unlock_bh(&pi->send_lock);
3972 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
3974 struct l2cap_pinfo *pi = l2cap_pi(sk);
3975 u8 tx_seq = __get_reqseq(rx_control);
3977 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3979 pi->expected_ack_seq = tx_seq;
3980 l2cap_drop_acked_frames(sk);
3982 if (rx_control & L2CAP_CTRL_FINAL) {
3983 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3984 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3985 else
3986 l2cap_retransmit_frames(sk);
3987 } else {
3988 l2cap_retransmit_frames(sk);
3990 if (pi->conn_state & L2CAP_CONN_WAIT_F)
3991 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3994 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
3996 struct l2cap_pinfo *pi = l2cap_pi(sk);
3997 u8 tx_seq = __get_reqseq(rx_control);
3999 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4001 if (rx_control & L2CAP_CTRL_POLL) {
4002 pi->expected_ack_seq = tx_seq;
4003 l2cap_drop_acked_frames(sk);
4004 l2cap_retransmit_one_frame(sk, tx_seq);
4006 spin_lock_bh(&pi->send_lock);
4007 l2cap_ertm_send(sk);
4008 spin_unlock_bh(&pi->send_lock);
4010 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4011 pi->srej_save_reqseq = tx_seq;
4012 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4014 } else if (rx_control & L2CAP_CTRL_FINAL) {
4015 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4016 pi->srej_save_reqseq == tx_seq)
4017 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4018 else
4019 l2cap_retransmit_one_frame(sk, tx_seq);
4020 } else {
4021 l2cap_retransmit_one_frame(sk, tx_seq);
4022 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4023 pi->srej_save_reqseq = tx_seq;
4024 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4029 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4031 struct l2cap_pinfo *pi = l2cap_pi(sk);
4032 u8 tx_seq = __get_reqseq(rx_control);
4034 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4035 pi->expected_ack_seq = tx_seq;
4036 l2cap_drop_acked_frames(sk);
4038 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4039 del_timer(&pi->retrans_timer);
4040 if (rx_control & L2CAP_CTRL_POLL)
4041 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4042 return;
4045 if (rx_control & L2CAP_CTRL_POLL)
4046 l2cap_send_srejtail(sk);
4047 else
4048 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4051 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4053 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4055 if (L2CAP_CTRL_FINAL & rx_control &&
4056 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4057 del_timer(&l2cap_pi(sk)->monitor_timer);
4058 if (l2cap_pi(sk)->unacked_frames > 0)
4059 __mod_retrans_timer();
4060 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4063 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4064 case L2CAP_SUPER_RCV_READY:
4065 l2cap_data_channel_rrframe(sk, rx_control);
4066 break;
4068 case L2CAP_SUPER_REJECT:
4069 l2cap_data_channel_rejframe(sk, rx_control);
4070 break;
4072 case L2CAP_SUPER_SELECT_REJECT:
4073 l2cap_data_channel_srejframe(sk, rx_control);
4074 break;
4076 case L2CAP_SUPER_RCV_NOT_READY:
4077 l2cap_data_channel_rnrframe(sk, rx_control);
4078 break;
4081 kfree_skb(skb);
4082 return 0;
4085 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4087 struct sock *sk;
4088 struct l2cap_pinfo *pi;
4089 u16 control, len;
4090 u8 tx_seq, req_seq, next_tx_seq_offset, req_seq_offset;
4092 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4093 if (!sk) {
4094 BT_DBG("unknown cid 0x%4.4x", cid);
4095 goto drop;
4098 pi = l2cap_pi(sk);
4100 BT_DBG("sk %p, len %d", sk, skb->len);
4102 if (sk->sk_state != BT_CONNECTED)
4103 goto drop;
4105 switch (pi->mode) {
4106 case L2CAP_MODE_BASIC:
4107 /* If socket recv buffers overflows we drop data here
4108 * which is *bad* because L2CAP has to be reliable.
4109 * But we don't have any other choice. L2CAP doesn't
4110 * provide flow control mechanism. */
4112 if (pi->imtu < skb->len)
4113 goto drop;
4115 if (!sock_queue_rcv_skb(sk, skb))
4116 goto done;
4117 break;
4119 case L2CAP_MODE_ERTM:
4120 control = get_unaligned_le16(skb->data);
4121 skb_pull(skb, 2);
4122 len = skb->len;
4124 if (__is_sar_start(control))
4125 len -= 2;
4127 if (pi->fcs == L2CAP_FCS_CRC16)
4128 len -= 2;
4131 * We can just drop the corrupted I-frame here.
4132 * Receiver will miss it and start proper recovery
4133 * procedures and ask retransmission.
4135 if (len > pi->mps) {
4136 l2cap_send_disconn_req(pi->conn, sk);
4137 goto drop;
4140 if (l2cap_check_fcs(pi, skb))
4141 goto drop;
4143 req_seq = __get_reqseq(control);
4144 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4145 if (req_seq_offset < 0)
4146 req_seq_offset += 64;
4148 next_tx_seq_offset =
4149 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4150 if (next_tx_seq_offset < 0)
4151 next_tx_seq_offset += 64;
4153 /* check for invalid req-seq */
4154 if (req_seq_offset > next_tx_seq_offset) {
4155 l2cap_send_disconn_req(pi->conn, sk);
4156 goto drop;
4159 if (__is_iframe(control)) {
4160 if (len < 4) {
4161 l2cap_send_disconn_req(pi->conn, sk);
4162 goto drop;
4165 l2cap_data_channel_iframe(sk, control, skb);
4166 } else {
4167 if (len != 0) {
4168 l2cap_send_disconn_req(pi->conn, sk);
4169 goto drop;
4172 l2cap_data_channel_sframe(sk, control, skb);
4175 goto done;
4177 case L2CAP_MODE_STREAMING:
4178 control = get_unaligned_le16(skb->data);
4179 skb_pull(skb, 2);
4180 len = skb->len;
4182 if (__is_sar_start(control))
4183 len -= 2;
4185 if (pi->fcs == L2CAP_FCS_CRC16)
4186 len -= 2;
4188 if (len > pi->mps || len < 4 || __is_sframe(control))
4189 goto drop;
4191 if (l2cap_check_fcs(pi, skb))
4192 goto drop;
4194 tx_seq = __get_txseq(control);
4196 if (pi->expected_tx_seq == tx_seq)
4197 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4198 else
4199 pi->expected_tx_seq = (tx_seq + 1) % 64;
4201 l2cap_streaming_reassembly_sdu(sk, skb, control);
4203 goto done;
4205 default:
4206 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4207 break;
4210 drop:
4211 kfree_skb(skb);
4213 done:
4214 if (sk)
4215 bh_unlock_sock(sk);
4217 return 0;
4220 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4222 struct sock *sk;
4224 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4225 if (!sk)
4226 goto drop;
4228 BT_DBG("sk %p, len %d", sk, skb->len);
4230 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4231 goto drop;
4233 if (l2cap_pi(sk)->imtu < skb->len)
4234 goto drop;
4236 if (!sock_queue_rcv_skb(sk, skb))
4237 goto done;
4239 drop:
4240 kfree_skb(skb);
4242 done:
4243 if (sk)
4244 bh_unlock_sock(sk);
4245 return 0;
4248 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4250 struct l2cap_hdr *lh = (void *) skb->data;
4251 u16 cid, len;
4252 __le16 psm;
4254 skb_pull(skb, L2CAP_HDR_SIZE);
4255 cid = __le16_to_cpu(lh->cid);
4256 len = __le16_to_cpu(lh->len);
4258 if (len != skb->len) {
4259 kfree_skb(skb);
4260 return;
4263 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4265 switch (cid) {
4266 case L2CAP_CID_SIGNALING:
4267 l2cap_sig_channel(conn, skb);
4268 break;
4270 case L2CAP_CID_CONN_LESS:
4271 psm = get_unaligned_le16(skb->data);
4272 skb_pull(skb, 2);
4273 l2cap_conless_channel(conn, psm, skb);
4274 break;
4276 default:
4277 l2cap_data_channel(conn, cid, skb);
4278 break;
4282 /* ---- L2CAP interface with lower layer (HCI) ---- */
4284 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4286 int exact = 0, lm1 = 0, lm2 = 0;
4287 register struct sock *sk;
4288 struct hlist_node *node;
4290 if (type != ACL_LINK)
4291 return 0;
4293 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4295 /* Find listening sockets and check their link_mode */
4296 read_lock(&l2cap_sk_list.lock);
4297 sk_for_each(sk, node, &l2cap_sk_list.head) {
4298 if (sk->sk_state != BT_LISTEN)
4299 continue;
4301 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4302 lm1 |= HCI_LM_ACCEPT;
4303 if (l2cap_pi(sk)->role_switch)
4304 lm1 |= HCI_LM_MASTER;
4305 exact++;
4306 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4307 lm2 |= HCI_LM_ACCEPT;
4308 if (l2cap_pi(sk)->role_switch)
4309 lm2 |= HCI_LM_MASTER;
4312 read_unlock(&l2cap_sk_list.lock);
4314 return exact ? lm1 : lm2;
4317 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4319 struct l2cap_conn *conn;
4321 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4323 if (hcon->type != ACL_LINK)
4324 return 0;
4326 if (!status) {
4327 conn = l2cap_conn_add(hcon, status);
4328 if (conn)
4329 l2cap_conn_ready(conn);
4330 } else
4331 l2cap_conn_del(hcon, bt_err(status));
4333 return 0;
4336 static int l2cap_disconn_ind(struct hci_conn *hcon)
4338 struct l2cap_conn *conn = hcon->l2cap_data;
4340 BT_DBG("hcon %p", hcon);
4342 if (hcon->type != ACL_LINK || !conn)
4343 return 0x13;
4345 return conn->disc_reason;
4348 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4350 BT_DBG("hcon %p reason %d", hcon, reason);
4352 if (hcon->type != ACL_LINK)
4353 return 0;
4355 l2cap_conn_del(hcon, bt_err(reason));
4357 return 0;
4360 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4362 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4363 return;
4365 if (encrypt == 0x00) {
4366 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4367 l2cap_sock_clear_timer(sk);
4368 l2cap_sock_set_timer(sk, HZ * 5);
4369 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4370 __l2cap_sock_close(sk, ECONNREFUSED);
4371 } else {
4372 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4373 l2cap_sock_clear_timer(sk);
4377 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4379 struct l2cap_chan_list *l;
4380 struct l2cap_conn *conn = hcon->l2cap_data;
4381 struct sock *sk;
4383 if (!conn)
4384 return 0;
4386 l = &conn->chan_list;
4388 BT_DBG("conn %p", conn);
4390 read_lock(&l->lock);
4392 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4393 bh_lock_sock(sk);
4395 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4396 bh_unlock_sock(sk);
4397 continue;
4400 if (!status && (sk->sk_state == BT_CONNECTED ||
4401 sk->sk_state == BT_CONFIG)) {
4402 l2cap_check_encryption(sk, encrypt);
4403 bh_unlock_sock(sk);
4404 continue;
4407 if (sk->sk_state == BT_CONNECT) {
4408 if (!status) {
4409 struct l2cap_conn_req req;
4410 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4411 req.psm = l2cap_pi(sk)->psm;
4413 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4414 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4416 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4417 L2CAP_CONN_REQ, sizeof(req), &req);
4418 } else {
4419 l2cap_sock_clear_timer(sk);
4420 l2cap_sock_set_timer(sk, HZ / 10);
4422 } else if (sk->sk_state == BT_CONNECT2) {
4423 struct l2cap_conn_rsp rsp;
4424 __u16 result;
4426 if (!status) {
4427 sk->sk_state = BT_CONFIG;
4428 result = L2CAP_CR_SUCCESS;
4429 } else {
4430 sk->sk_state = BT_DISCONN;
4431 l2cap_sock_set_timer(sk, HZ / 10);
4432 result = L2CAP_CR_SEC_BLOCK;
4435 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4436 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4437 rsp.result = cpu_to_le16(result);
4438 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4439 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4440 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4443 bh_unlock_sock(sk);
4446 read_unlock(&l->lock);
4448 return 0;
4451 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4453 struct l2cap_conn *conn = hcon->l2cap_data;
4455 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4456 goto drop;
4458 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4460 if (flags & ACL_START) {
4461 struct l2cap_hdr *hdr;
4462 int len;
4464 if (conn->rx_len) {
4465 BT_ERR("Unexpected start frame (len %d)", skb->len);
4466 kfree_skb(conn->rx_skb);
4467 conn->rx_skb = NULL;
4468 conn->rx_len = 0;
4469 l2cap_conn_unreliable(conn, ECOMM);
4472 if (skb->len < 2) {
4473 BT_ERR("Frame is too short (len %d)", skb->len);
4474 l2cap_conn_unreliable(conn, ECOMM);
4475 goto drop;
4478 hdr = (struct l2cap_hdr *) skb->data;
4479 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4481 if (len == skb->len) {
4482 /* Complete frame received */
4483 l2cap_recv_frame(conn, skb);
4484 return 0;
4487 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4489 if (skb->len > len) {
4490 BT_ERR("Frame is too long (len %d, expected len %d)",
4491 skb->len, len);
4492 l2cap_conn_unreliable(conn, ECOMM);
4493 goto drop;
4496 /* Allocate skb for the complete frame (with header) */
4497 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4498 if (!conn->rx_skb)
4499 goto drop;
4501 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4502 skb->len);
4503 conn->rx_len = len - skb->len;
4504 } else {
4505 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4507 if (!conn->rx_len) {
4508 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4509 l2cap_conn_unreliable(conn, ECOMM);
4510 goto drop;
4513 if (skb->len > conn->rx_len) {
4514 BT_ERR("Fragment is too long (len %d, expected %d)",
4515 skb->len, conn->rx_len);
4516 kfree_skb(conn->rx_skb);
4517 conn->rx_skb = NULL;
4518 conn->rx_len = 0;
4519 l2cap_conn_unreliable(conn, ECOMM);
4520 goto drop;
4523 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4524 skb->len);
4525 conn->rx_len -= skb->len;
4527 if (!conn->rx_len) {
4528 /* Complete frame received */
4529 l2cap_recv_frame(conn, conn->rx_skb);
4530 conn->rx_skb = NULL;
4534 drop:
4535 kfree_skb(skb);
4536 return 0;
4539 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4541 struct sock *sk;
4542 struct hlist_node *node;
4544 read_lock_bh(&l2cap_sk_list.lock);
4546 sk_for_each(sk, node, &l2cap_sk_list.head) {
4547 struct l2cap_pinfo *pi = l2cap_pi(sk);
4549 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4550 batostr(&bt_sk(sk)->src),
4551 batostr(&bt_sk(sk)->dst),
4552 sk->sk_state, __le16_to_cpu(pi->psm),
4553 pi->scid, pi->dcid,
4554 pi->imtu, pi->omtu, pi->sec_level);
4557 read_unlock_bh(&l2cap_sk_list.lock);
4559 return 0;
4562 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4564 return single_open(file, l2cap_debugfs_show, inode->i_private);
4567 static const struct file_operations l2cap_debugfs_fops = {
4568 .open = l2cap_debugfs_open,
4569 .read = seq_read,
4570 .llseek = seq_lseek,
4571 .release = single_release,
4574 static struct dentry *l2cap_debugfs;
4576 static const struct proto_ops l2cap_sock_ops = {
4577 .family = PF_BLUETOOTH,
4578 .owner = THIS_MODULE,
4579 .release = l2cap_sock_release,
4580 .bind = l2cap_sock_bind,
4581 .connect = l2cap_sock_connect,
4582 .listen = l2cap_sock_listen,
4583 .accept = l2cap_sock_accept,
4584 .getname = l2cap_sock_getname,
4585 .sendmsg = l2cap_sock_sendmsg,
4586 .recvmsg = l2cap_sock_recvmsg,
4587 .poll = bt_sock_poll,
4588 .ioctl = bt_sock_ioctl,
4589 .mmap = sock_no_mmap,
4590 .socketpair = sock_no_socketpair,
4591 .shutdown = l2cap_sock_shutdown,
4592 .setsockopt = l2cap_sock_setsockopt,
4593 .getsockopt = l2cap_sock_getsockopt
4596 static const struct net_proto_family l2cap_sock_family_ops = {
4597 .family = PF_BLUETOOTH,
4598 .owner = THIS_MODULE,
4599 .create = l2cap_sock_create,
4602 static struct hci_proto l2cap_hci_proto = {
4603 .name = "L2CAP",
4604 .id = HCI_PROTO_L2CAP,
4605 .connect_ind = l2cap_connect_ind,
4606 .connect_cfm = l2cap_connect_cfm,
4607 .disconn_ind = l2cap_disconn_ind,
4608 .disconn_cfm = l2cap_disconn_cfm,
4609 .security_cfm = l2cap_security_cfm,
4610 .recv_acldata = l2cap_recv_acldata
4613 static int __init l2cap_init(void)
4615 int err;
4617 err = proto_register(&l2cap_proto, 0);
4618 if (err < 0)
4619 return err;
4621 _busy_wq = create_singlethread_workqueue("l2cap");
4622 if (!_busy_wq)
4623 goto error;
4625 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4626 if (err < 0) {
4627 BT_ERR("L2CAP socket registration failed");
4628 goto error;
4631 err = hci_register_proto(&l2cap_hci_proto);
4632 if (err < 0) {
4633 BT_ERR("L2CAP protocol registration failed");
4634 bt_sock_unregister(BTPROTO_L2CAP);
4635 goto error;
4638 if (bt_debugfs) {
4639 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4640 bt_debugfs, NULL, &l2cap_debugfs_fops);
4641 if (!l2cap_debugfs)
4642 BT_ERR("Failed to create L2CAP debug file");
4645 BT_INFO("L2CAP ver %s", VERSION);
4646 BT_INFO("L2CAP socket layer initialized");
4648 return 0;
4650 error:
4651 proto_unregister(&l2cap_proto);
4652 return err;
4655 static void __exit l2cap_exit(void)
4657 debugfs_remove(l2cap_debugfs);
4659 flush_workqueue(_busy_wq);
4660 destroy_workqueue(_busy_wq);
4662 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4663 BT_ERR("L2CAP socket unregistration failed");
4665 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4666 BT_ERR("L2CAP protocol unregistration failed");
4668 proto_unregister(&l2cap_proto);
4671 void l2cap_load(void)
4673 /* Dummy function to trigger automatic L2CAP module loading by
4674 * other modules that use L2CAP sockets but don't use any other
4675 * symbols from it. */
4677 EXPORT_SYMBOL(l2cap_load);
4679 module_init(l2cap_init);
4680 module_exit(l2cap_exit);
4682 module_param(enable_ertm, bool, 0644);
4683 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4685 module_param(max_transmit, uint, 0644);
4686 MODULE_PARM_DESC(max_transmit, "Max transmit value (default = 3)");
4688 module_param(tx_window, uint, 0644);
4689 MODULE_PARM_DESC(tx_window, "Transmission window size value (default = 63)");
4691 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4692 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4693 MODULE_VERSION(VERSION);
4694 MODULE_LICENSE("GPL");
4695 MODULE_ALIAS("bt-proto-0");