Input: evdev - never leave the client buffer empty after write
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / bluetooth / l2cap.c
blob1b682a5aa0616911c2861b5d836c015230922faf
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
47 #include <net/sock.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 #ifdef CONFIG_BT_L2CAP_EXT_FEATURES
59 static int enable_ertm = 1;
60 #else
61 static int enable_ertm = 0;
62 #endif
63 static int max_transmit = L2CAP_DEFAULT_MAX_TX;
64 static int tx_window = L2CAP_DEFAULT_TX_WINDOW;
66 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
67 static u8 l2cap_fixed_chan[8] = { 0x02, };
69 static const struct proto_ops l2cap_sock_ops;
71 static struct workqueue_struct *_busy_wq;
73 static struct bt_sock_list l2cap_sk_list = {
74 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
77 static void l2cap_busy_work(struct work_struct *work);
79 static void __l2cap_sock_close(struct sock *sk, int reason);
80 static void l2cap_sock_close(struct sock *sk);
81 static void l2cap_sock_kill(struct sock *sk);
83 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
84 u8 code, u8 ident, u16 dlen, void *data);
86 /* ---- L2CAP timers ---- */
87 static void l2cap_sock_timeout(unsigned long arg)
89 struct sock *sk = (struct sock *) arg;
90 int reason;
92 BT_DBG("sock %p state %d", sk, sk->sk_state);
94 bh_lock_sock(sk);
96 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
97 reason = ECONNREFUSED;
98 else if (sk->sk_state == BT_CONNECT &&
99 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
100 reason = ECONNREFUSED;
101 else
102 reason = ETIMEDOUT;
104 __l2cap_sock_close(sk, reason);
106 bh_unlock_sock(sk);
108 l2cap_sock_kill(sk);
109 sock_put(sk);
112 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
114 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
115 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
118 static void l2cap_sock_clear_timer(struct sock *sk)
120 BT_DBG("sock %p state %d", sk, sk->sk_state);
121 sk_stop_timer(sk, &sk->sk_timer);
124 /* ---- L2CAP channels ---- */
125 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
127 struct sock *s;
128 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
129 if (l2cap_pi(s)->dcid == cid)
130 break;
132 return s;
135 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
137 struct sock *s;
138 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
139 if (l2cap_pi(s)->scid == cid)
140 break;
142 return s;
145 /* Find channel with given SCID.
146 * Returns locked socket */
147 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
149 struct sock *s;
150 read_lock(&l->lock);
151 s = __l2cap_get_chan_by_scid(l, cid);
152 if (s)
153 bh_lock_sock(s);
154 read_unlock(&l->lock);
155 return s;
158 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
160 struct sock *s;
161 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
162 if (l2cap_pi(s)->ident == ident)
163 break;
165 return s;
168 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
170 struct sock *s;
171 read_lock(&l->lock);
172 s = __l2cap_get_chan_by_ident(l, ident);
173 if (s)
174 bh_lock_sock(s);
175 read_unlock(&l->lock);
176 return s;
179 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
181 u16 cid = L2CAP_CID_DYN_START;
183 for (; cid < L2CAP_CID_DYN_END; cid++) {
184 if (!__l2cap_get_chan_by_scid(l, cid))
185 return cid;
188 return 0;
191 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
193 sock_hold(sk);
195 if (l->head)
196 l2cap_pi(l->head)->prev_c = sk;
198 l2cap_pi(sk)->next_c = l->head;
199 l2cap_pi(sk)->prev_c = NULL;
200 l->head = sk;
203 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
205 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
207 write_lock_bh(&l->lock);
208 if (sk == l->head)
209 l->head = next;
211 if (next)
212 l2cap_pi(next)->prev_c = prev;
213 if (prev)
214 l2cap_pi(prev)->next_c = next;
215 write_unlock_bh(&l->lock);
217 __sock_put(sk);
220 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
222 struct l2cap_chan_list *l = &conn->chan_list;
224 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
225 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
227 conn->disc_reason = 0x13;
229 l2cap_pi(sk)->conn = conn;
231 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
232 /* Alloc CID for connection-oriented socket */
233 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
234 } else if (sk->sk_type == SOCK_DGRAM) {
235 /* Connectionless socket */
236 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
237 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
238 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
239 } else {
240 /* Raw socket can send/recv signalling messages only */
241 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
242 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
243 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
246 __l2cap_chan_link(l, sk);
248 if (parent)
249 bt_accept_enqueue(parent, sk);
252 /* Delete channel.
253 * Must be called on the locked socket. */
254 static void l2cap_chan_del(struct sock *sk, int err)
256 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
257 struct sock *parent = bt_sk(sk)->parent;
259 l2cap_sock_clear_timer(sk);
261 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
263 if (conn) {
264 /* Unlink from channel list */
265 l2cap_chan_unlink(&conn->chan_list, sk);
266 l2cap_pi(sk)->conn = NULL;
267 hci_conn_put(conn->hcon);
270 sk->sk_state = BT_CLOSED;
271 sock_set_flag(sk, SOCK_ZAPPED);
273 if (err)
274 sk->sk_err = err;
276 if (parent) {
277 bt_accept_unlink(sk);
278 parent->sk_data_ready(parent, 0);
279 } else
280 sk->sk_state_change(sk);
283 /* Service level security */
284 static inline int l2cap_check_security(struct sock *sk)
286 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
287 __u8 auth_type;
289 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
290 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
291 auth_type = HCI_AT_NO_BONDING_MITM;
292 else
293 auth_type = HCI_AT_NO_BONDING;
295 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
296 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
297 } else {
298 switch (l2cap_pi(sk)->sec_level) {
299 case BT_SECURITY_HIGH:
300 auth_type = HCI_AT_GENERAL_BONDING_MITM;
301 break;
302 case BT_SECURITY_MEDIUM:
303 auth_type = HCI_AT_GENERAL_BONDING;
304 break;
305 default:
306 auth_type = HCI_AT_NO_BONDING;
307 break;
311 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
312 auth_type);
315 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
317 u8 id;
319 /* Get next available identificator.
320 * 1 - 128 are used by kernel.
321 * 129 - 199 are reserved.
322 * 200 - 254 are used by utilities like l2ping, etc.
325 spin_lock_bh(&conn->lock);
327 if (++conn->tx_ident > 128)
328 conn->tx_ident = 1;
330 id = conn->tx_ident;
332 spin_unlock_bh(&conn->lock);
334 return id;
337 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
339 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
341 BT_DBG("code 0x%2.2x", code);
343 if (!skb)
344 return;
346 hci_send_acl(conn->hcon, skb, 0);
349 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
351 struct sk_buff *skb;
352 struct l2cap_hdr *lh;
353 struct l2cap_conn *conn = pi->conn;
354 int count, hlen = L2CAP_HDR_SIZE + 2;
356 if (pi->fcs == L2CAP_FCS_CRC16)
357 hlen += 2;
359 BT_DBG("pi %p, control 0x%2.2x", pi, control);
361 count = min_t(unsigned int, conn->mtu, hlen);
362 control |= L2CAP_CTRL_FRAME_TYPE;
364 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
365 control |= L2CAP_CTRL_FINAL;
366 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
369 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
370 control |= L2CAP_CTRL_POLL;
371 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
374 skb = bt_skb_alloc(count, GFP_ATOMIC);
375 if (!skb)
376 return;
378 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
379 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
380 lh->cid = cpu_to_le16(pi->dcid);
381 put_unaligned_le16(control, skb_put(skb, 2));
383 if (pi->fcs == L2CAP_FCS_CRC16) {
384 u16 fcs = crc16(0, (u8 *)lh, count - 2);
385 put_unaligned_le16(fcs, skb_put(skb, 2));
388 hci_send_acl(pi->conn->hcon, skb, 0);
391 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
393 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
394 control |= L2CAP_SUPER_RCV_NOT_READY;
395 pi->conn_state |= L2CAP_CONN_RNR_SENT;
396 } else
397 control |= L2CAP_SUPER_RCV_READY;
399 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
401 l2cap_send_sframe(pi, control);
404 static void l2cap_do_start(struct sock *sk)
406 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
408 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
409 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
410 return;
412 if (l2cap_check_security(sk)) {
413 struct l2cap_conn_req req;
414 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
415 req.psm = l2cap_pi(sk)->psm;
417 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
419 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
420 L2CAP_CONN_REQ, sizeof(req), &req);
422 } else {
423 struct l2cap_info_req req;
424 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
426 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
427 conn->info_ident = l2cap_get_ident(conn);
429 mod_timer(&conn->info_timer, jiffies +
430 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
432 l2cap_send_cmd(conn, conn->info_ident,
433 L2CAP_INFO_REQ, sizeof(req), &req);
437 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
439 struct l2cap_disconn_req req;
441 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
442 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
443 l2cap_send_cmd(conn, l2cap_get_ident(conn),
444 L2CAP_DISCONN_REQ, sizeof(req), &req);
447 /* ---- L2CAP connections ---- */
448 static void l2cap_conn_start(struct l2cap_conn *conn)
450 struct l2cap_chan_list *l = &conn->chan_list;
451 struct sock *sk;
453 BT_DBG("conn %p", conn);
455 read_lock(&l->lock);
457 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
458 bh_lock_sock(sk);
460 if (sk->sk_type != SOCK_SEQPACKET &&
461 sk->sk_type != SOCK_STREAM) {
462 bh_unlock_sock(sk);
463 continue;
466 if (sk->sk_state == BT_CONNECT) {
467 if (l2cap_check_security(sk)) {
468 struct l2cap_conn_req req;
469 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
470 req.psm = l2cap_pi(sk)->psm;
472 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
474 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
475 L2CAP_CONN_REQ, sizeof(req), &req);
477 } else if (sk->sk_state == BT_CONNECT2) {
478 struct l2cap_conn_rsp rsp;
479 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
480 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
482 if (l2cap_check_security(sk)) {
483 if (bt_sk(sk)->defer_setup) {
484 struct sock *parent = bt_sk(sk)->parent;
485 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
486 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
487 parent->sk_data_ready(parent, 0);
489 } else {
490 sk->sk_state = BT_CONFIG;
491 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
492 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
494 } else {
495 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
496 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
499 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
500 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
503 bh_unlock_sock(sk);
506 read_unlock(&l->lock);
509 static void l2cap_conn_ready(struct l2cap_conn *conn)
511 struct l2cap_chan_list *l = &conn->chan_list;
512 struct sock *sk;
514 BT_DBG("conn %p", conn);
516 read_lock(&l->lock);
518 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
519 bh_lock_sock(sk);
521 if (sk->sk_type != SOCK_SEQPACKET &&
522 sk->sk_type != SOCK_STREAM) {
523 l2cap_sock_clear_timer(sk);
524 sk->sk_state = BT_CONNECTED;
525 sk->sk_state_change(sk);
526 } else if (sk->sk_state == BT_CONNECT)
527 l2cap_do_start(sk);
529 bh_unlock_sock(sk);
532 read_unlock(&l->lock);
535 /* Notify sockets that we cannot guaranty reliability anymore */
536 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
538 struct l2cap_chan_list *l = &conn->chan_list;
539 struct sock *sk;
541 BT_DBG("conn %p", conn);
543 read_lock(&l->lock);
545 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
546 if (l2cap_pi(sk)->force_reliable)
547 sk->sk_err = err;
550 read_unlock(&l->lock);
553 static void l2cap_info_timeout(unsigned long arg)
555 struct l2cap_conn *conn = (void *) arg;
557 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
558 conn->info_ident = 0;
560 l2cap_conn_start(conn);
563 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
565 struct l2cap_conn *conn = hcon->l2cap_data;
567 if (conn || status)
568 return conn;
570 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
571 if (!conn)
572 return NULL;
574 hcon->l2cap_data = conn;
575 conn->hcon = hcon;
577 BT_DBG("hcon %p conn %p", hcon, conn);
579 conn->mtu = hcon->hdev->acl_mtu;
580 conn->src = &hcon->hdev->bdaddr;
581 conn->dst = &hcon->dst;
583 conn->feat_mask = 0;
585 spin_lock_init(&conn->lock);
586 rwlock_init(&conn->chan_list.lock);
588 setup_timer(&conn->info_timer, l2cap_info_timeout,
589 (unsigned long) conn);
591 conn->disc_reason = 0x13;
593 return conn;
596 static void l2cap_conn_del(struct hci_conn *hcon, int err)
598 struct l2cap_conn *conn = hcon->l2cap_data;
599 struct sock *sk;
601 if (!conn)
602 return;
604 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
606 kfree_skb(conn->rx_skb);
608 /* Kill channels */
609 while ((sk = conn->chan_list.head)) {
610 bh_lock_sock(sk);
611 l2cap_chan_del(sk, err);
612 bh_unlock_sock(sk);
613 l2cap_sock_kill(sk);
616 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
617 del_timer_sync(&conn->info_timer);
619 hcon->l2cap_data = NULL;
620 kfree(conn);
623 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
625 struct l2cap_chan_list *l = &conn->chan_list;
626 write_lock_bh(&l->lock);
627 __l2cap_chan_add(conn, sk, parent);
628 write_unlock_bh(&l->lock);
631 /* ---- Socket interface ---- */
632 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
634 struct sock *sk;
635 struct hlist_node *node;
636 sk_for_each(sk, node, &l2cap_sk_list.head)
637 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
638 goto found;
639 sk = NULL;
640 found:
641 return sk;
644 /* Find socket with psm and source bdaddr.
645 * Returns closest match.
647 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
649 struct sock *sk = NULL, *sk1 = NULL;
650 struct hlist_node *node;
652 sk_for_each(sk, node, &l2cap_sk_list.head) {
653 if (state && sk->sk_state != state)
654 continue;
656 if (l2cap_pi(sk)->psm == psm) {
657 /* Exact match. */
658 if (!bacmp(&bt_sk(sk)->src, src))
659 break;
661 /* Closest match */
662 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
663 sk1 = sk;
666 return node ? sk : sk1;
669 /* Find socket with given address (psm, src).
670 * Returns locked socket */
671 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
673 struct sock *s;
674 read_lock(&l2cap_sk_list.lock);
675 s = __l2cap_get_sock_by_psm(state, psm, src);
676 if (s)
677 bh_lock_sock(s);
678 read_unlock(&l2cap_sk_list.lock);
679 return s;
682 static void l2cap_sock_destruct(struct sock *sk)
684 BT_DBG("sk %p", sk);
686 skb_queue_purge(&sk->sk_receive_queue);
687 skb_queue_purge(&sk->sk_write_queue);
690 static void l2cap_sock_cleanup_listen(struct sock *parent)
692 struct sock *sk;
694 BT_DBG("parent %p", parent);
696 /* Close not yet accepted channels */
697 while ((sk = bt_accept_dequeue(parent, NULL)))
698 l2cap_sock_close(sk);
700 parent->sk_state = BT_CLOSED;
701 sock_set_flag(parent, SOCK_ZAPPED);
704 /* Kill socket (only if zapped and orphan)
705 * Must be called on unlocked socket.
707 static void l2cap_sock_kill(struct sock *sk)
709 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
710 return;
712 BT_DBG("sk %p state %d", sk, sk->sk_state);
714 /* Kill poor orphan */
715 bt_sock_unlink(&l2cap_sk_list, sk);
716 sock_set_flag(sk, SOCK_DEAD);
717 sock_put(sk);
720 static void __l2cap_sock_close(struct sock *sk, int reason)
722 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
724 switch (sk->sk_state) {
725 case BT_LISTEN:
726 l2cap_sock_cleanup_listen(sk);
727 break;
729 case BT_CONNECTED:
730 case BT_CONFIG:
731 if (sk->sk_type == SOCK_SEQPACKET ||
732 sk->sk_type == SOCK_STREAM) {
733 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
735 sk->sk_state = BT_DISCONN;
736 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
737 l2cap_send_disconn_req(conn, sk);
738 } else
739 l2cap_chan_del(sk, reason);
740 break;
742 case BT_CONNECT2:
743 if (sk->sk_type == SOCK_SEQPACKET ||
744 sk->sk_type == SOCK_STREAM) {
745 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
746 struct l2cap_conn_rsp rsp;
747 __u16 result;
749 if (bt_sk(sk)->defer_setup)
750 result = L2CAP_CR_SEC_BLOCK;
751 else
752 result = L2CAP_CR_BAD_PSM;
754 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
755 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
756 rsp.result = cpu_to_le16(result);
757 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
758 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
759 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
760 } else
761 l2cap_chan_del(sk, reason);
762 break;
764 case BT_CONNECT:
765 case BT_DISCONN:
766 l2cap_chan_del(sk, reason);
767 break;
769 default:
770 sock_set_flag(sk, SOCK_ZAPPED);
771 break;
775 /* Must be called on unlocked socket. */
776 static void l2cap_sock_close(struct sock *sk)
778 l2cap_sock_clear_timer(sk);
779 lock_sock(sk);
780 __l2cap_sock_close(sk, ECONNRESET);
781 release_sock(sk);
782 l2cap_sock_kill(sk);
785 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
787 struct l2cap_pinfo *pi = l2cap_pi(sk);
789 BT_DBG("sk %p", sk);
791 if (parent) {
792 sk->sk_type = parent->sk_type;
793 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
795 pi->imtu = l2cap_pi(parent)->imtu;
796 pi->omtu = l2cap_pi(parent)->omtu;
797 pi->mode = l2cap_pi(parent)->mode;
798 pi->fcs = l2cap_pi(parent)->fcs;
799 pi->max_tx = l2cap_pi(parent)->max_tx;
800 pi->tx_win = l2cap_pi(parent)->tx_win;
801 pi->sec_level = l2cap_pi(parent)->sec_level;
802 pi->role_switch = l2cap_pi(parent)->role_switch;
803 pi->force_reliable = l2cap_pi(parent)->force_reliable;
804 } else {
805 pi->imtu = L2CAP_DEFAULT_MTU;
806 pi->omtu = 0;
807 if (enable_ertm && sk->sk_type == SOCK_STREAM)
808 pi->mode = L2CAP_MODE_ERTM;
809 else
810 pi->mode = L2CAP_MODE_BASIC;
811 pi->max_tx = max_transmit;
812 pi->fcs = L2CAP_FCS_CRC16;
813 pi->tx_win = tx_window;
814 pi->sec_level = BT_SECURITY_LOW;
815 pi->role_switch = 0;
816 pi->force_reliable = 0;
819 /* Default config options */
820 pi->conf_len = 0;
821 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
822 skb_queue_head_init(TX_QUEUE(sk));
823 skb_queue_head_init(SREJ_QUEUE(sk));
824 skb_queue_head_init(BUSY_QUEUE(sk));
825 INIT_LIST_HEAD(SREJ_LIST(sk));
828 static struct proto l2cap_proto = {
829 .name = "L2CAP",
830 .owner = THIS_MODULE,
831 .obj_size = sizeof(struct l2cap_pinfo)
834 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
836 struct sock *sk;
838 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
839 if (!sk)
840 return NULL;
842 sock_init_data(sock, sk);
843 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
845 sk->sk_destruct = l2cap_sock_destruct;
846 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
848 sock_reset_flag(sk, SOCK_ZAPPED);
850 sk->sk_protocol = proto;
851 sk->sk_state = BT_OPEN;
853 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
855 bt_sock_link(&l2cap_sk_list, sk);
856 return sk;
859 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
860 int kern)
862 struct sock *sk;
864 BT_DBG("sock %p", sock);
866 sock->state = SS_UNCONNECTED;
868 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
869 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
870 return -ESOCKTNOSUPPORT;
872 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
873 return -EPERM;
875 sock->ops = &l2cap_sock_ops;
877 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
878 if (!sk)
879 return -ENOMEM;
881 l2cap_sock_init(sk, NULL);
882 return 0;
885 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
887 struct sock *sk = sock->sk;
888 struct sockaddr_l2 la;
889 int len, err = 0;
891 BT_DBG("sk %p", sk);
893 if (!addr || addr->sa_family != AF_BLUETOOTH)
894 return -EINVAL;
896 memset(&la, 0, sizeof(la));
897 len = min_t(unsigned int, sizeof(la), alen);
898 memcpy(&la, addr, len);
900 if (la.l2_cid)
901 return -EINVAL;
903 lock_sock(sk);
905 if (sk->sk_state != BT_OPEN) {
906 err = -EBADFD;
907 goto done;
910 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
911 !capable(CAP_NET_BIND_SERVICE)) {
912 err = -EACCES;
913 goto done;
916 write_lock_bh(&l2cap_sk_list.lock);
918 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
919 err = -EADDRINUSE;
920 } else {
921 /* Save source address */
922 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
923 l2cap_pi(sk)->psm = la.l2_psm;
924 l2cap_pi(sk)->sport = la.l2_psm;
925 sk->sk_state = BT_BOUND;
927 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
928 __le16_to_cpu(la.l2_psm) == 0x0003)
929 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
932 write_unlock_bh(&l2cap_sk_list.lock);
934 done:
935 release_sock(sk);
936 return err;
939 static int l2cap_do_connect(struct sock *sk)
941 bdaddr_t *src = &bt_sk(sk)->src;
942 bdaddr_t *dst = &bt_sk(sk)->dst;
943 struct l2cap_conn *conn;
944 struct hci_conn *hcon;
945 struct hci_dev *hdev;
946 __u8 auth_type;
947 int err;
949 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
950 l2cap_pi(sk)->psm);
952 hdev = hci_get_route(dst, src);
953 if (!hdev)
954 return -EHOSTUNREACH;
956 hci_dev_lock_bh(hdev);
958 err = -ENOMEM;
960 if (sk->sk_type == SOCK_RAW) {
961 switch (l2cap_pi(sk)->sec_level) {
962 case BT_SECURITY_HIGH:
963 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
964 break;
965 case BT_SECURITY_MEDIUM:
966 auth_type = HCI_AT_DEDICATED_BONDING;
967 break;
968 default:
969 auth_type = HCI_AT_NO_BONDING;
970 break;
972 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
973 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
974 auth_type = HCI_AT_NO_BONDING_MITM;
975 else
976 auth_type = HCI_AT_NO_BONDING;
978 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
979 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
980 } else {
981 switch (l2cap_pi(sk)->sec_level) {
982 case BT_SECURITY_HIGH:
983 auth_type = HCI_AT_GENERAL_BONDING_MITM;
984 break;
985 case BT_SECURITY_MEDIUM:
986 auth_type = HCI_AT_GENERAL_BONDING;
987 break;
988 default:
989 auth_type = HCI_AT_NO_BONDING;
990 break;
994 hcon = hci_connect(hdev, ACL_LINK, dst,
995 l2cap_pi(sk)->sec_level, auth_type);
996 if (!hcon)
997 goto done;
999 conn = l2cap_conn_add(hcon, 0);
1000 if (!conn) {
1001 hci_conn_put(hcon);
1002 goto done;
1005 err = 0;
1007 /* Update source addr of the socket */
1008 bacpy(src, conn->src);
1010 l2cap_chan_add(conn, sk, NULL);
1012 sk->sk_state = BT_CONNECT;
1013 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1015 if (hcon->state == BT_CONNECTED) {
1016 if (sk->sk_type != SOCK_SEQPACKET &&
1017 sk->sk_type != SOCK_STREAM) {
1018 l2cap_sock_clear_timer(sk);
1019 sk->sk_state = BT_CONNECTED;
1020 } else
1021 l2cap_do_start(sk);
1024 done:
1025 hci_dev_unlock_bh(hdev);
1026 hci_dev_put(hdev);
1027 return err;
1030 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1032 struct sock *sk = sock->sk;
1033 struct sockaddr_l2 la;
1034 int len, err = 0;
1036 BT_DBG("sk %p", sk);
1038 if (!addr || alen < sizeof(addr->sa_family) ||
1039 addr->sa_family != AF_BLUETOOTH)
1040 return -EINVAL;
1042 memset(&la, 0, sizeof(la));
1043 len = min_t(unsigned int, sizeof(la), alen);
1044 memcpy(&la, addr, len);
1046 if (la.l2_cid)
1047 return -EINVAL;
1049 lock_sock(sk);
1051 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1052 && !la.l2_psm) {
1053 err = -EINVAL;
1054 goto done;
1057 switch (l2cap_pi(sk)->mode) {
1058 case L2CAP_MODE_BASIC:
1059 break;
1060 case L2CAP_MODE_ERTM:
1061 case L2CAP_MODE_STREAMING:
1062 if (enable_ertm)
1063 break;
1064 /* fall through */
1065 default:
1066 err = -ENOTSUPP;
1067 goto done;
1070 switch (sk->sk_state) {
1071 case BT_CONNECT:
1072 case BT_CONNECT2:
1073 case BT_CONFIG:
1074 /* Already connecting */
1075 goto wait;
1077 case BT_CONNECTED:
1078 /* Already connected */
1079 goto done;
1081 case BT_OPEN:
1082 case BT_BOUND:
1083 /* Can connect */
1084 break;
1086 default:
1087 err = -EBADFD;
1088 goto done;
1091 /* Set destination address and psm */
1092 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1093 l2cap_pi(sk)->psm = la.l2_psm;
1095 err = l2cap_do_connect(sk);
1096 if (err)
1097 goto done;
1099 wait:
1100 err = bt_sock_wait_state(sk, BT_CONNECTED,
1101 sock_sndtimeo(sk, flags & O_NONBLOCK));
1102 done:
1103 release_sock(sk);
1104 return err;
1107 static int l2cap_sock_listen(struct socket *sock, int backlog)
1109 struct sock *sk = sock->sk;
1110 int err = 0;
1112 BT_DBG("sk %p backlog %d", sk, backlog);
1114 lock_sock(sk);
1116 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1117 || sk->sk_state != BT_BOUND) {
1118 err = -EBADFD;
1119 goto done;
1122 switch (l2cap_pi(sk)->mode) {
1123 case L2CAP_MODE_BASIC:
1124 break;
1125 case L2CAP_MODE_ERTM:
1126 case L2CAP_MODE_STREAMING:
1127 if (enable_ertm)
1128 break;
1129 /* fall through */
1130 default:
1131 err = -ENOTSUPP;
1132 goto done;
1135 if (!l2cap_pi(sk)->psm) {
1136 bdaddr_t *src = &bt_sk(sk)->src;
1137 u16 psm;
1139 err = -EINVAL;
1141 write_lock_bh(&l2cap_sk_list.lock);
1143 for (psm = 0x1001; psm < 0x1100; psm += 2)
1144 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1145 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1146 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1147 err = 0;
1148 break;
1151 write_unlock_bh(&l2cap_sk_list.lock);
1153 if (err < 0)
1154 goto done;
1157 sk->sk_max_ack_backlog = backlog;
1158 sk->sk_ack_backlog = 0;
1159 sk->sk_state = BT_LISTEN;
1161 done:
1162 release_sock(sk);
1163 return err;
1166 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1168 DECLARE_WAITQUEUE(wait, current);
1169 struct sock *sk = sock->sk, *nsk;
1170 long timeo;
1171 int err = 0;
1173 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1175 if (sk->sk_state != BT_LISTEN) {
1176 err = -EBADFD;
1177 goto done;
1180 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1182 BT_DBG("sk %p timeo %ld", sk, timeo);
1184 /* Wait for an incoming connection. (wake-one). */
1185 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1186 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1187 set_current_state(TASK_INTERRUPTIBLE);
1188 if (!timeo) {
1189 err = -EAGAIN;
1190 break;
1193 release_sock(sk);
1194 timeo = schedule_timeout(timeo);
1195 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1197 if (sk->sk_state != BT_LISTEN) {
1198 err = -EBADFD;
1199 break;
1202 if (signal_pending(current)) {
1203 err = sock_intr_errno(timeo);
1204 break;
1207 set_current_state(TASK_RUNNING);
1208 remove_wait_queue(sk_sleep(sk), &wait);
1210 if (err)
1211 goto done;
1213 newsock->state = SS_CONNECTED;
1215 BT_DBG("new socket %p", nsk);
1217 done:
1218 release_sock(sk);
1219 return err;
1222 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1224 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1225 struct sock *sk = sock->sk;
1227 BT_DBG("sock %p, sk %p", sock, sk);
1229 addr->sa_family = AF_BLUETOOTH;
1230 *len = sizeof(struct sockaddr_l2);
1232 if (peer) {
1233 la->l2_psm = l2cap_pi(sk)->psm;
1234 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1235 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1236 } else {
1237 la->l2_psm = l2cap_pi(sk)->sport;
1238 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1239 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1242 return 0;
1245 static int __l2cap_wait_ack(struct sock *sk)
1247 DECLARE_WAITQUEUE(wait, current);
1248 int err = 0;
1249 int timeo = HZ/5;
1251 add_wait_queue(sk_sleep(sk), &wait);
1252 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1253 set_current_state(TASK_INTERRUPTIBLE);
1255 if (!timeo)
1256 timeo = HZ/5;
1258 if (signal_pending(current)) {
1259 err = sock_intr_errno(timeo);
1260 break;
1263 release_sock(sk);
1264 timeo = schedule_timeout(timeo);
1265 lock_sock(sk);
1267 err = sock_error(sk);
1268 if (err)
1269 break;
1271 set_current_state(TASK_RUNNING);
1272 remove_wait_queue(sk_sleep(sk), &wait);
1273 return err;
1276 static void l2cap_monitor_timeout(unsigned long arg)
1278 struct sock *sk = (void *) arg;
1280 bh_lock_sock(sk);
1281 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1282 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1283 bh_unlock_sock(sk);
1284 return;
1287 l2cap_pi(sk)->retry_count++;
1288 __mod_monitor_timer();
1290 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1291 bh_unlock_sock(sk);
1294 static void l2cap_retrans_timeout(unsigned long arg)
1296 struct sock *sk = (void *) arg;
1298 bh_lock_sock(sk);
1299 l2cap_pi(sk)->retry_count = 1;
1300 __mod_monitor_timer();
1302 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1304 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1305 bh_unlock_sock(sk);
1308 static void l2cap_drop_acked_frames(struct sock *sk)
1310 struct sk_buff *skb;
1312 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1313 l2cap_pi(sk)->unacked_frames) {
1314 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1315 break;
1317 skb = skb_dequeue(TX_QUEUE(sk));
1318 kfree_skb(skb);
1320 l2cap_pi(sk)->unacked_frames--;
1323 if (!l2cap_pi(sk)->unacked_frames)
1324 del_timer(&l2cap_pi(sk)->retrans_timer);
1327 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1329 struct l2cap_pinfo *pi = l2cap_pi(sk);
1331 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1333 hci_send_acl(pi->conn->hcon, skb, 0);
1336 static int l2cap_streaming_send(struct sock *sk)
1338 struct sk_buff *skb, *tx_skb;
1339 struct l2cap_pinfo *pi = l2cap_pi(sk);
1340 u16 control, fcs;
1342 while ((skb = sk->sk_send_head)) {
1343 tx_skb = skb_clone(skb, GFP_ATOMIC);
1345 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1346 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1347 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1349 if (pi->fcs == L2CAP_FCS_CRC16) {
1350 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1351 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1354 l2cap_do_send(sk, tx_skb);
1356 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1358 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1359 sk->sk_send_head = NULL;
1360 else
1361 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1363 skb = skb_dequeue(TX_QUEUE(sk));
1364 kfree_skb(skb);
1366 return 0;
1369 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1371 struct l2cap_pinfo *pi = l2cap_pi(sk);
1372 struct sk_buff *skb, *tx_skb;
1373 u16 control, fcs;
1375 skb = skb_peek(TX_QUEUE(sk));
1376 if (!skb)
1377 return;
1379 do {
1380 if (bt_cb(skb)->tx_seq == tx_seq)
1381 break;
1383 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1384 return;
1386 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1388 if (pi->remote_max_tx &&
1389 bt_cb(skb)->retries == pi->remote_max_tx) {
1390 l2cap_send_disconn_req(pi->conn, sk);
1391 return;
1394 tx_skb = skb_clone(skb, GFP_ATOMIC);
1395 bt_cb(skb)->retries++;
1396 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1397 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1398 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1399 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1401 if (pi->fcs == L2CAP_FCS_CRC16) {
1402 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1403 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1406 l2cap_do_send(sk, tx_skb);
1409 static int l2cap_ertm_send(struct sock *sk)
1411 struct sk_buff *skb, *tx_skb;
1412 struct l2cap_pinfo *pi = l2cap_pi(sk);
1413 u16 control, fcs;
1414 int nsent = 0;
1416 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1417 return 0;
1419 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
1420 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1422 if (pi->remote_max_tx &&
1423 bt_cb(skb)->retries == pi->remote_max_tx) {
1424 l2cap_send_disconn_req(pi->conn, sk);
1425 break;
1428 tx_skb = skb_clone(skb, GFP_ATOMIC);
1430 bt_cb(skb)->retries++;
1432 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1433 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1434 control |= L2CAP_CTRL_FINAL;
1435 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1437 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1438 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1439 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1442 if (pi->fcs == L2CAP_FCS_CRC16) {
1443 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1444 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1447 l2cap_do_send(sk, tx_skb);
1449 __mod_retrans_timer();
1451 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1452 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1454 pi->unacked_frames++;
1455 pi->frames_sent++;
1457 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1458 sk->sk_send_head = NULL;
1459 else
1460 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1462 nsent++;
1465 return nsent;
1468 static int l2cap_retransmit_frames(struct sock *sk)
1470 struct l2cap_pinfo *pi = l2cap_pi(sk);
1471 int ret;
1473 spin_lock_bh(&pi->send_lock);
1475 if (!skb_queue_empty(TX_QUEUE(sk)))
1476 sk->sk_send_head = TX_QUEUE(sk)->next;
1478 pi->next_tx_seq = pi->expected_ack_seq;
1479 ret = l2cap_ertm_send(sk);
1481 spin_unlock_bh(&pi->send_lock);
1483 return ret;
1486 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1488 struct sock *sk = (struct sock *)pi;
1489 u16 control = 0;
1490 int nframes;
1492 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1494 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1495 control |= L2CAP_SUPER_RCV_NOT_READY;
1496 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1497 l2cap_send_sframe(pi, control);
1498 return;
1501 spin_lock_bh(&pi->send_lock);
1502 nframes = l2cap_ertm_send(sk);
1503 spin_unlock_bh(&pi->send_lock);
1505 if (nframes > 0)
1506 return;
1508 control |= L2CAP_SUPER_RCV_READY;
1509 l2cap_send_sframe(pi, control);
1512 static void l2cap_send_srejtail(struct sock *sk)
1514 struct srej_list *tail;
1515 u16 control;
1517 control = L2CAP_SUPER_SELECT_REJECT;
1518 control |= L2CAP_CTRL_FINAL;
1520 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1521 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1523 l2cap_send_sframe(l2cap_pi(sk), control);
1526 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1528 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1529 struct sk_buff **frag;
1530 int err, sent = 0;
1532 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1533 return -EFAULT;
1535 sent += count;
1536 len -= count;
1538 /* Continuation fragments (no L2CAP header) */
1539 frag = &skb_shinfo(skb)->frag_list;
1540 while (len) {
1541 count = min_t(unsigned int, conn->mtu, len);
1543 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1544 if (!*frag)
1545 return -EFAULT;
1546 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1547 return -EFAULT;
1549 sent += count;
1550 len -= count;
1552 frag = &(*frag)->next;
1555 return sent;
1558 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1560 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1561 struct sk_buff *skb;
1562 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1563 struct l2cap_hdr *lh;
1565 BT_DBG("sk %p len %d", sk, (int)len);
1567 count = min_t(unsigned int, (conn->mtu - hlen), len);
1568 skb = bt_skb_send_alloc(sk, count + hlen,
1569 msg->msg_flags & MSG_DONTWAIT, &err);
1570 if (!skb)
1571 return ERR_PTR(-ENOMEM);
1573 /* Create L2CAP header */
1574 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1575 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1576 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1577 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1579 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1580 if (unlikely(err < 0)) {
1581 kfree_skb(skb);
1582 return ERR_PTR(err);
1584 return skb;
1587 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1589 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1590 struct sk_buff *skb;
1591 int err, count, hlen = L2CAP_HDR_SIZE;
1592 struct l2cap_hdr *lh;
1594 BT_DBG("sk %p len %d", sk, (int)len);
1596 count = min_t(unsigned int, (conn->mtu - hlen), len);
1597 skb = bt_skb_send_alloc(sk, count + hlen,
1598 msg->msg_flags & MSG_DONTWAIT, &err);
1599 if (!skb)
1600 return ERR_PTR(-ENOMEM);
1602 /* Create L2CAP header */
1603 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1604 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1605 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1607 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1608 if (unlikely(err < 0)) {
1609 kfree_skb(skb);
1610 return ERR_PTR(err);
1612 return skb;
1615 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1617 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1618 struct sk_buff *skb;
1619 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1620 struct l2cap_hdr *lh;
1622 BT_DBG("sk %p len %d", sk, (int)len);
1624 if (!conn)
1625 return ERR_PTR(-ENOTCONN);
1627 if (sdulen)
1628 hlen += 2;
1630 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1631 hlen += 2;
1633 count = min_t(unsigned int, (conn->mtu - hlen), len);
1634 skb = bt_skb_send_alloc(sk, count + hlen,
1635 msg->msg_flags & MSG_DONTWAIT, &err);
1636 if (!skb)
1637 return ERR_PTR(-ENOMEM);
1639 /* Create L2CAP header */
1640 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1641 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1642 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1643 put_unaligned_le16(control, skb_put(skb, 2));
1644 if (sdulen)
1645 put_unaligned_le16(sdulen, skb_put(skb, 2));
1647 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1648 if (unlikely(err < 0)) {
1649 kfree_skb(skb);
1650 return ERR_PTR(err);
1653 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1654 put_unaligned_le16(0, skb_put(skb, 2));
1656 bt_cb(skb)->retries = 0;
1657 return skb;
1660 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1662 struct l2cap_pinfo *pi = l2cap_pi(sk);
1663 struct sk_buff *skb;
1664 struct sk_buff_head sar_queue;
1665 u16 control;
1666 size_t size = 0;
1668 skb_queue_head_init(&sar_queue);
1669 control = L2CAP_SDU_START;
1670 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1671 if (IS_ERR(skb))
1672 return PTR_ERR(skb);
1674 __skb_queue_tail(&sar_queue, skb);
1675 len -= pi->remote_mps;
1676 size += pi->remote_mps;
1678 while (len > 0) {
1679 size_t buflen;
1681 if (len > pi->remote_mps) {
1682 control = L2CAP_SDU_CONTINUE;
1683 buflen = pi->remote_mps;
1684 } else {
1685 control = L2CAP_SDU_END;
1686 buflen = len;
1689 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1690 if (IS_ERR(skb)) {
1691 skb_queue_purge(&sar_queue);
1692 return PTR_ERR(skb);
1695 __skb_queue_tail(&sar_queue, skb);
1696 len -= buflen;
1697 size += buflen;
1699 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1700 spin_lock_bh(&pi->send_lock);
1701 if (sk->sk_send_head == NULL)
1702 sk->sk_send_head = sar_queue.next;
1703 spin_unlock_bh(&pi->send_lock);
1705 return size;
1708 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1710 struct sock *sk = sock->sk;
1711 struct l2cap_pinfo *pi = l2cap_pi(sk);
1712 struct sk_buff *skb;
1713 u16 control;
1714 int err;
1716 BT_DBG("sock %p, sk %p", sock, sk);
1718 err = sock_error(sk);
1719 if (err)
1720 return err;
1722 if (msg->msg_flags & MSG_OOB)
1723 return -EOPNOTSUPP;
1725 lock_sock(sk);
1727 if (sk->sk_state != BT_CONNECTED) {
1728 err = -ENOTCONN;
1729 goto done;
1732 /* Connectionless channel */
1733 if (sk->sk_type == SOCK_DGRAM) {
1734 skb = l2cap_create_connless_pdu(sk, msg, len);
1735 if (IS_ERR(skb)) {
1736 err = PTR_ERR(skb);
1737 } else {
1738 l2cap_do_send(sk, skb);
1739 err = len;
1741 goto done;
1744 switch (pi->mode) {
1745 case L2CAP_MODE_BASIC:
1746 /* Check outgoing MTU */
1747 if (len > pi->omtu) {
1748 err = -EINVAL;
1749 goto done;
1752 /* Create a basic PDU */
1753 skb = l2cap_create_basic_pdu(sk, msg, len);
1754 if (IS_ERR(skb)) {
1755 err = PTR_ERR(skb);
1756 goto done;
1759 l2cap_do_send(sk, skb);
1760 err = len;
1761 break;
1763 case L2CAP_MODE_ERTM:
1764 case L2CAP_MODE_STREAMING:
1765 /* Entire SDU fits into one PDU */
1766 if (len <= pi->remote_mps) {
1767 control = L2CAP_SDU_UNSEGMENTED;
1768 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1769 if (IS_ERR(skb)) {
1770 err = PTR_ERR(skb);
1771 goto done;
1773 __skb_queue_tail(TX_QUEUE(sk), skb);
1775 if (pi->mode == L2CAP_MODE_ERTM)
1776 spin_lock_bh(&pi->send_lock);
1778 if (sk->sk_send_head == NULL)
1779 sk->sk_send_head = skb;
1781 if (pi->mode == L2CAP_MODE_ERTM)
1782 spin_unlock_bh(&pi->send_lock);
1783 } else {
1784 /* Segment SDU into multiples PDUs */
1785 err = l2cap_sar_segment_sdu(sk, msg, len);
1786 if (err < 0)
1787 goto done;
1790 if (pi->mode == L2CAP_MODE_STREAMING) {
1791 err = l2cap_streaming_send(sk);
1792 } else {
1793 spin_lock_bh(&pi->send_lock);
1794 err = l2cap_ertm_send(sk);
1795 spin_unlock_bh(&pi->send_lock);
1798 if (err >= 0)
1799 err = len;
1800 break;
1802 default:
1803 BT_DBG("bad state %1.1x", pi->mode);
1804 err = -EINVAL;
1807 done:
1808 release_sock(sk);
1809 return err;
1812 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1814 struct sock *sk = sock->sk;
1816 lock_sock(sk);
1818 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1819 struct l2cap_conn_rsp rsp;
1821 sk->sk_state = BT_CONFIG;
1823 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1824 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1825 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1826 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1827 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1828 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1830 release_sock(sk);
1831 return 0;
1834 release_sock(sk);
1836 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1839 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1841 struct sock *sk = sock->sk;
1842 struct l2cap_options opts;
1843 int len, err = 0;
1844 u32 opt;
1846 BT_DBG("sk %p", sk);
1848 lock_sock(sk);
1850 switch (optname) {
1851 case L2CAP_OPTIONS:
1852 opts.imtu = l2cap_pi(sk)->imtu;
1853 opts.omtu = l2cap_pi(sk)->omtu;
1854 opts.flush_to = l2cap_pi(sk)->flush_to;
1855 opts.mode = l2cap_pi(sk)->mode;
1856 opts.fcs = l2cap_pi(sk)->fcs;
1857 opts.max_tx = l2cap_pi(sk)->max_tx;
1858 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1860 len = min_t(unsigned int, sizeof(opts), optlen);
1861 if (copy_from_user((char *) &opts, optval, len)) {
1862 err = -EFAULT;
1863 break;
1866 l2cap_pi(sk)->mode = opts.mode;
1867 switch (l2cap_pi(sk)->mode) {
1868 case L2CAP_MODE_BASIC:
1869 break;
1870 case L2CAP_MODE_ERTM:
1871 case L2CAP_MODE_STREAMING:
1872 if (enable_ertm)
1873 break;
1874 /* fall through */
1875 default:
1876 err = -EINVAL;
1877 break;
1880 l2cap_pi(sk)->imtu = opts.imtu;
1881 l2cap_pi(sk)->omtu = opts.omtu;
1882 l2cap_pi(sk)->fcs = opts.fcs;
1883 l2cap_pi(sk)->max_tx = opts.max_tx;
1884 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1885 break;
1887 case L2CAP_LM:
1888 if (get_user(opt, (u32 __user *) optval)) {
1889 err = -EFAULT;
1890 break;
1893 if (opt & L2CAP_LM_AUTH)
1894 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1895 if (opt & L2CAP_LM_ENCRYPT)
1896 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1897 if (opt & L2CAP_LM_SECURE)
1898 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1900 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1901 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1902 break;
1904 default:
1905 err = -ENOPROTOOPT;
1906 break;
1909 release_sock(sk);
1910 return err;
1913 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1915 struct sock *sk = sock->sk;
1916 struct bt_security sec;
1917 int len, err = 0;
1918 u32 opt;
1920 BT_DBG("sk %p", sk);
1922 if (level == SOL_L2CAP)
1923 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1925 if (level != SOL_BLUETOOTH)
1926 return -ENOPROTOOPT;
1928 lock_sock(sk);
1930 switch (optname) {
1931 case BT_SECURITY:
1932 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
1933 && sk->sk_type != SOCK_RAW) {
1934 err = -EINVAL;
1935 break;
1938 sec.level = BT_SECURITY_LOW;
1940 len = min_t(unsigned int, sizeof(sec), optlen);
1941 if (copy_from_user((char *) &sec, optval, len)) {
1942 err = -EFAULT;
1943 break;
1946 if (sec.level < BT_SECURITY_LOW ||
1947 sec.level > BT_SECURITY_HIGH) {
1948 err = -EINVAL;
1949 break;
1952 l2cap_pi(sk)->sec_level = sec.level;
1953 break;
1955 case BT_DEFER_SETUP:
1956 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1957 err = -EINVAL;
1958 break;
1961 if (get_user(opt, (u32 __user *) optval)) {
1962 err = -EFAULT;
1963 break;
1966 bt_sk(sk)->defer_setup = opt;
1967 break;
1969 default:
1970 err = -ENOPROTOOPT;
1971 break;
1974 release_sock(sk);
1975 return err;
1978 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1980 struct sock *sk = sock->sk;
1981 struct l2cap_options opts;
1982 struct l2cap_conninfo cinfo;
1983 int len, err = 0;
1984 u32 opt;
1986 BT_DBG("sk %p", sk);
1988 if (get_user(len, optlen))
1989 return -EFAULT;
1991 lock_sock(sk);
1993 switch (optname) {
1994 case L2CAP_OPTIONS:
1995 opts.imtu = l2cap_pi(sk)->imtu;
1996 opts.omtu = l2cap_pi(sk)->omtu;
1997 opts.flush_to = l2cap_pi(sk)->flush_to;
1998 opts.mode = l2cap_pi(sk)->mode;
1999 opts.fcs = l2cap_pi(sk)->fcs;
2000 opts.max_tx = l2cap_pi(sk)->max_tx;
2001 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2003 len = min_t(unsigned int, len, sizeof(opts));
2004 if (copy_to_user(optval, (char *) &opts, len))
2005 err = -EFAULT;
2007 break;
2009 case L2CAP_LM:
2010 switch (l2cap_pi(sk)->sec_level) {
2011 case BT_SECURITY_LOW:
2012 opt = L2CAP_LM_AUTH;
2013 break;
2014 case BT_SECURITY_MEDIUM:
2015 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2016 break;
2017 case BT_SECURITY_HIGH:
2018 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2019 L2CAP_LM_SECURE;
2020 break;
2021 default:
2022 opt = 0;
2023 break;
2026 if (l2cap_pi(sk)->role_switch)
2027 opt |= L2CAP_LM_MASTER;
2029 if (l2cap_pi(sk)->force_reliable)
2030 opt |= L2CAP_LM_RELIABLE;
2032 if (put_user(opt, (u32 __user *) optval))
2033 err = -EFAULT;
2034 break;
2036 case L2CAP_CONNINFO:
2037 if (sk->sk_state != BT_CONNECTED &&
2038 !(sk->sk_state == BT_CONNECT2 &&
2039 bt_sk(sk)->defer_setup)) {
2040 err = -ENOTCONN;
2041 break;
2044 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2045 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2047 len = min_t(unsigned int, len, sizeof(cinfo));
2048 if (copy_to_user(optval, (char *) &cinfo, len))
2049 err = -EFAULT;
2051 break;
2053 default:
2054 err = -ENOPROTOOPT;
2055 break;
2058 release_sock(sk);
2059 return err;
2062 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2064 struct sock *sk = sock->sk;
2065 struct bt_security sec;
2066 int len, err = 0;
2068 BT_DBG("sk %p", sk);
2070 if (level == SOL_L2CAP)
2071 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2073 if (level != SOL_BLUETOOTH)
2074 return -ENOPROTOOPT;
2076 if (get_user(len, optlen))
2077 return -EFAULT;
2079 lock_sock(sk);
2081 switch (optname) {
2082 case BT_SECURITY:
2083 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2084 && sk->sk_type != SOCK_RAW) {
2085 err = -EINVAL;
2086 break;
2089 sec.level = l2cap_pi(sk)->sec_level;
2091 len = min_t(unsigned int, len, sizeof(sec));
2092 if (copy_to_user(optval, (char *) &sec, len))
2093 err = -EFAULT;
2095 break;
2097 case BT_DEFER_SETUP:
2098 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2099 err = -EINVAL;
2100 break;
2103 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2104 err = -EFAULT;
2106 break;
2108 default:
2109 err = -ENOPROTOOPT;
2110 break;
2113 release_sock(sk);
2114 return err;
2117 static int l2cap_sock_shutdown(struct socket *sock, int how)
2119 struct sock *sk = sock->sk;
2120 int err = 0;
2122 BT_DBG("sock %p, sk %p", sock, sk);
2124 if (!sk)
2125 return 0;
2127 lock_sock(sk);
2128 if (!sk->sk_shutdown) {
2129 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2130 err = __l2cap_wait_ack(sk);
2132 sk->sk_shutdown = SHUTDOWN_MASK;
2133 l2cap_sock_clear_timer(sk);
2134 __l2cap_sock_close(sk, 0);
2136 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2137 err = bt_sock_wait_state(sk, BT_CLOSED,
2138 sk->sk_lingertime);
2140 release_sock(sk);
2141 return err;
2144 static int l2cap_sock_release(struct socket *sock)
2146 struct sock *sk = sock->sk;
2147 int err;
2149 BT_DBG("sock %p, sk %p", sock, sk);
2151 if (!sk)
2152 return 0;
2154 err = l2cap_sock_shutdown(sock, 2);
2156 sock_orphan(sk);
2157 l2cap_sock_kill(sk);
2158 return err;
2161 static void l2cap_chan_ready(struct sock *sk)
2163 struct sock *parent = bt_sk(sk)->parent;
2165 BT_DBG("sk %p, parent %p", sk, parent);
2167 l2cap_pi(sk)->conf_state = 0;
2168 l2cap_sock_clear_timer(sk);
2170 if (!parent) {
2171 /* Outgoing channel.
2172 * Wake up socket sleeping on connect.
2174 sk->sk_state = BT_CONNECTED;
2175 sk->sk_state_change(sk);
2176 } else {
2177 /* Incoming channel.
2178 * Wake up socket sleeping on accept.
2180 parent->sk_data_ready(parent, 0);
2184 /* Copy frame to all raw sockets on that connection */
2185 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2187 struct l2cap_chan_list *l = &conn->chan_list;
2188 struct sk_buff *nskb;
2189 struct sock *sk;
2191 BT_DBG("conn %p", conn);
2193 read_lock(&l->lock);
2194 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2195 if (sk->sk_type != SOCK_RAW)
2196 continue;
2198 /* Don't send frame to the socket it came from */
2199 if (skb->sk == sk)
2200 continue;
2201 nskb = skb_clone(skb, GFP_ATOMIC);
2202 if (!nskb)
2203 continue;
2205 if (sock_queue_rcv_skb(sk, nskb))
2206 kfree_skb(nskb);
2208 read_unlock(&l->lock);
2211 /* ---- L2CAP signalling commands ---- */
2212 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2213 u8 code, u8 ident, u16 dlen, void *data)
2215 struct sk_buff *skb, **frag;
2216 struct l2cap_cmd_hdr *cmd;
2217 struct l2cap_hdr *lh;
2218 int len, count;
2220 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2221 conn, code, ident, dlen);
2223 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2224 count = min_t(unsigned int, conn->mtu, len);
2226 skb = bt_skb_alloc(count, GFP_ATOMIC);
2227 if (!skb)
2228 return NULL;
2230 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2231 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2232 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2234 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2235 cmd->code = code;
2236 cmd->ident = ident;
2237 cmd->len = cpu_to_le16(dlen);
2239 if (dlen) {
2240 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2241 memcpy(skb_put(skb, count), data, count);
2242 data += count;
2245 len -= skb->len;
2247 /* Continuation fragments (no L2CAP header) */
2248 frag = &skb_shinfo(skb)->frag_list;
2249 while (len) {
2250 count = min_t(unsigned int, conn->mtu, len);
2252 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2253 if (!*frag)
2254 goto fail;
2256 memcpy(skb_put(*frag, count), data, count);
2258 len -= count;
2259 data += count;
2261 frag = &(*frag)->next;
2264 return skb;
2266 fail:
2267 kfree_skb(skb);
2268 return NULL;
2271 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2273 struct l2cap_conf_opt *opt = *ptr;
2274 int len;
2276 len = L2CAP_CONF_OPT_SIZE + opt->len;
2277 *ptr += len;
2279 *type = opt->type;
2280 *olen = opt->len;
2282 switch (opt->len) {
2283 case 1:
2284 *val = *((u8 *) opt->val);
2285 break;
2287 case 2:
2288 *val = __le16_to_cpu(*((__le16 *) opt->val));
2289 break;
2291 case 4:
2292 *val = __le32_to_cpu(*((__le32 *) opt->val));
2293 break;
2295 default:
2296 *val = (unsigned long) opt->val;
2297 break;
2300 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2301 return len;
2304 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2306 struct l2cap_conf_opt *opt = *ptr;
2308 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2310 opt->type = type;
2311 opt->len = len;
2313 switch (len) {
2314 case 1:
2315 *((u8 *) opt->val) = val;
2316 break;
2318 case 2:
2319 *((__le16 *) opt->val) = cpu_to_le16(val);
2320 break;
2322 case 4:
2323 *((__le32 *) opt->val) = cpu_to_le32(val);
2324 break;
2326 default:
2327 memcpy(opt->val, (void *) val, len);
2328 break;
2331 *ptr += L2CAP_CONF_OPT_SIZE + len;
2334 static void l2cap_ack_timeout(unsigned long arg)
2336 struct sock *sk = (void *) arg;
2338 bh_lock_sock(sk);
2339 l2cap_send_ack(l2cap_pi(sk));
2340 bh_unlock_sock(sk);
2343 static inline void l2cap_ertm_init(struct sock *sk)
2345 l2cap_pi(sk)->expected_ack_seq = 0;
2346 l2cap_pi(sk)->unacked_frames = 0;
2347 l2cap_pi(sk)->buffer_seq = 0;
2348 l2cap_pi(sk)->num_acked = 0;
2349 l2cap_pi(sk)->frames_sent = 0;
2351 setup_timer(&l2cap_pi(sk)->retrans_timer,
2352 l2cap_retrans_timeout, (unsigned long) sk);
2353 setup_timer(&l2cap_pi(sk)->monitor_timer,
2354 l2cap_monitor_timeout, (unsigned long) sk);
2355 setup_timer(&l2cap_pi(sk)->ack_timer,
2356 l2cap_ack_timeout, (unsigned long) sk);
2358 __skb_queue_head_init(SREJ_QUEUE(sk));
2359 __skb_queue_head_init(BUSY_QUEUE(sk));
2360 spin_lock_init(&l2cap_pi(sk)->send_lock);
2362 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2365 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2367 u32 local_feat_mask = l2cap_feat_mask;
2368 if (enable_ertm)
2369 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2371 switch (mode) {
2372 case L2CAP_MODE_ERTM:
2373 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2374 case L2CAP_MODE_STREAMING:
2375 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2376 default:
2377 return 0x00;
2381 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2383 switch (mode) {
2384 case L2CAP_MODE_STREAMING:
2385 case L2CAP_MODE_ERTM:
2386 if (l2cap_mode_supported(mode, remote_feat_mask))
2387 return mode;
2388 /* fall through */
2389 default:
2390 return L2CAP_MODE_BASIC;
2394 static int l2cap_build_conf_req(struct sock *sk, void *data)
2396 struct l2cap_pinfo *pi = l2cap_pi(sk);
2397 struct l2cap_conf_req *req = data;
2398 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2399 void *ptr = req->data;
2401 BT_DBG("sk %p", sk);
2403 if (pi->num_conf_req || pi->num_conf_rsp)
2404 goto done;
2406 switch (pi->mode) {
2407 case L2CAP_MODE_STREAMING:
2408 case L2CAP_MODE_ERTM:
2409 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2410 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2411 l2cap_send_disconn_req(pi->conn, sk);
2412 break;
2413 default:
2414 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2415 break;
2418 done:
2419 switch (pi->mode) {
2420 case L2CAP_MODE_BASIC:
2421 if (pi->imtu != L2CAP_DEFAULT_MTU)
2422 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2423 break;
2425 case L2CAP_MODE_ERTM:
2426 rfc.mode = L2CAP_MODE_ERTM;
2427 rfc.txwin_size = pi->tx_win;
2428 rfc.max_transmit = pi->max_tx;
2429 rfc.retrans_timeout = 0;
2430 rfc.monitor_timeout = 0;
2431 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2432 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2433 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2435 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2436 sizeof(rfc), (unsigned long) &rfc);
2438 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2439 break;
2441 if (pi->fcs == L2CAP_FCS_NONE ||
2442 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2443 pi->fcs = L2CAP_FCS_NONE;
2444 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2446 break;
2448 case L2CAP_MODE_STREAMING:
2449 rfc.mode = L2CAP_MODE_STREAMING;
2450 rfc.txwin_size = 0;
2451 rfc.max_transmit = 0;
2452 rfc.retrans_timeout = 0;
2453 rfc.monitor_timeout = 0;
2454 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2455 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2456 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2458 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2459 sizeof(rfc), (unsigned long) &rfc);
2461 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2462 break;
2464 if (pi->fcs == L2CAP_FCS_NONE ||
2465 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2466 pi->fcs = L2CAP_FCS_NONE;
2467 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2469 break;
2472 /* FIXME: Need actual value of the flush timeout */
2473 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2474 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2476 req->dcid = cpu_to_le16(pi->dcid);
2477 req->flags = cpu_to_le16(0);
2479 return ptr - data;
2482 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2484 struct l2cap_pinfo *pi = l2cap_pi(sk);
2485 struct l2cap_conf_rsp *rsp = data;
2486 void *ptr = rsp->data;
2487 void *req = pi->conf_req;
2488 int len = pi->conf_len;
2489 int type, hint, olen;
2490 unsigned long val;
2491 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2492 u16 mtu = L2CAP_DEFAULT_MTU;
2493 u16 result = L2CAP_CONF_SUCCESS;
2495 BT_DBG("sk %p", sk);
2497 while (len >= L2CAP_CONF_OPT_SIZE) {
2498 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2500 hint = type & L2CAP_CONF_HINT;
2501 type &= L2CAP_CONF_MASK;
2503 switch (type) {
2504 case L2CAP_CONF_MTU:
2505 mtu = val;
2506 break;
2508 case L2CAP_CONF_FLUSH_TO:
2509 pi->flush_to = val;
2510 break;
2512 case L2CAP_CONF_QOS:
2513 break;
2515 case L2CAP_CONF_RFC:
2516 if (olen == sizeof(rfc))
2517 memcpy(&rfc, (void *) val, olen);
2518 break;
2520 case L2CAP_CONF_FCS:
2521 if (val == L2CAP_FCS_NONE)
2522 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2524 break;
2526 default:
2527 if (hint)
2528 break;
2530 result = L2CAP_CONF_UNKNOWN;
2531 *((u8 *) ptr++) = type;
2532 break;
2536 if (pi->num_conf_rsp || pi->num_conf_req)
2537 goto done;
2539 switch (pi->mode) {
2540 case L2CAP_MODE_STREAMING:
2541 case L2CAP_MODE_ERTM:
2542 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2543 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2544 return -ECONNREFUSED;
2545 break;
2546 default:
2547 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2548 break;
2551 done:
2552 if (pi->mode != rfc.mode) {
2553 result = L2CAP_CONF_UNACCEPT;
2554 rfc.mode = pi->mode;
2556 if (pi->num_conf_rsp == 1)
2557 return -ECONNREFUSED;
2559 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2560 sizeof(rfc), (unsigned long) &rfc);
2564 if (result == L2CAP_CONF_SUCCESS) {
2565 /* Configure output options and let the other side know
2566 * which ones we don't like. */
2568 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2569 result = L2CAP_CONF_UNACCEPT;
2570 else {
2571 pi->omtu = mtu;
2572 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2574 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2576 switch (rfc.mode) {
2577 case L2CAP_MODE_BASIC:
2578 pi->fcs = L2CAP_FCS_NONE;
2579 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2580 break;
2582 case L2CAP_MODE_ERTM:
2583 pi->remote_tx_win = rfc.txwin_size;
2584 pi->remote_max_tx = rfc.max_transmit;
2585 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2586 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2588 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2590 rfc.retrans_timeout =
2591 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2592 rfc.monitor_timeout =
2593 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2595 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2597 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2598 sizeof(rfc), (unsigned long) &rfc);
2600 break;
2602 case L2CAP_MODE_STREAMING:
2603 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2604 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2606 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2608 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2610 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2611 sizeof(rfc), (unsigned long) &rfc);
2613 break;
2615 default:
2616 result = L2CAP_CONF_UNACCEPT;
2618 memset(&rfc, 0, sizeof(rfc));
2619 rfc.mode = pi->mode;
2622 if (result == L2CAP_CONF_SUCCESS)
2623 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2625 rsp->scid = cpu_to_le16(pi->dcid);
2626 rsp->result = cpu_to_le16(result);
2627 rsp->flags = cpu_to_le16(0x0000);
2629 return ptr - data;
2632 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2634 struct l2cap_pinfo *pi = l2cap_pi(sk);
2635 struct l2cap_conf_req *req = data;
2636 void *ptr = req->data;
2637 int type, olen;
2638 unsigned long val;
2639 struct l2cap_conf_rfc rfc;
2641 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2643 while (len >= L2CAP_CONF_OPT_SIZE) {
2644 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2646 switch (type) {
2647 case L2CAP_CONF_MTU:
2648 if (val < L2CAP_DEFAULT_MIN_MTU) {
2649 *result = L2CAP_CONF_UNACCEPT;
2650 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2651 } else
2652 pi->omtu = val;
2653 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2654 break;
2656 case L2CAP_CONF_FLUSH_TO:
2657 pi->flush_to = val;
2658 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2659 2, pi->flush_to);
2660 break;
2662 case L2CAP_CONF_RFC:
2663 if (olen == sizeof(rfc))
2664 memcpy(&rfc, (void *)val, olen);
2666 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2667 rfc.mode != pi->mode)
2668 return -ECONNREFUSED;
2670 pi->mode = rfc.mode;
2671 pi->fcs = 0;
2673 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2674 sizeof(rfc), (unsigned long) &rfc);
2675 break;
2679 if (*result == L2CAP_CONF_SUCCESS) {
2680 switch (rfc.mode) {
2681 case L2CAP_MODE_ERTM:
2682 pi->remote_tx_win = rfc.txwin_size;
2683 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2684 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2685 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2686 break;
2687 case L2CAP_MODE_STREAMING:
2688 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2692 req->dcid = cpu_to_le16(pi->dcid);
2693 req->flags = cpu_to_le16(0x0000);
2695 return ptr - data;
2698 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2700 struct l2cap_conf_rsp *rsp = data;
2701 void *ptr = rsp->data;
2703 BT_DBG("sk %p", sk);
2705 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2706 rsp->result = cpu_to_le16(result);
2707 rsp->flags = cpu_to_le16(flags);
2709 return ptr - data;
2712 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2714 struct l2cap_pinfo *pi = l2cap_pi(sk);
2715 int type, olen;
2716 unsigned long val;
2717 struct l2cap_conf_rfc rfc;
2719 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2721 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2722 return;
2724 while (len >= L2CAP_CONF_OPT_SIZE) {
2725 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2727 switch (type) {
2728 case L2CAP_CONF_RFC:
2729 if (olen == sizeof(rfc))
2730 memcpy(&rfc, (void *)val, olen);
2731 goto done;
2735 done:
2736 switch (rfc.mode) {
2737 case L2CAP_MODE_ERTM:
2738 pi->remote_tx_win = rfc.txwin_size;
2739 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2740 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2741 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2742 break;
2743 case L2CAP_MODE_STREAMING:
2744 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2748 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2750 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2752 if (rej->reason != 0x0000)
2753 return 0;
2755 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2756 cmd->ident == conn->info_ident) {
2757 del_timer(&conn->info_timer);
2759 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2760 conn->info_ident = 0;
2762 l2cap_conn_start(conn);
2765 return 0;
2768 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2770 struct l2cap_chan_list *list = &conn->chan_list;
2771 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2772 struct l2cap_conn_rsp rsp;
2773 struct sock *sk, *parent;
2774 int result, status = L2CAP_CS_NO_INFO;
2776 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2777 __le16 psm = req->psm;
2779 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2781 /* Check if we have socket listening on psm */
2782 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2783 if (!parent) {
2784 result = L2CAP_CR_BAD_PSM;
2785 goto sendresp;
2788 /* Check if the ACL is secure enough (if not SDP) */
2789 if (psm != cpu_to_le16(0x0001) &&
2790 !hci_conn_check_link_mode(conn->hcon)) {
2791 conn->disc_reason = 0x05;
2792 result = L2CAP_CR_SEC_BLOCK;
2793 goto response;
2796 result = L2CAP_CR_NO_MEM;
2798 /* Check for backlog size */
2799 if (sk_acceptq_is_full(parent)) {
2800 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2801 goto response;
2804 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2805 if (!sk)
2806 goto response;
2808 write_lock_bh(&list->lock);
2810 /* Check if we already have channel with that dcid */
2811 if (__l2cap_get_chan_by_dcid(list, scid)) {
2812 write_unlock_bh(&list->lock);
2813 sock_set_flag(sk, SOCK_ZAPPED);
2814 l2cap_sock_kill(sk);
2815 goto response;
2818 hci_conn_hold(conn->hcon);
2820 l2cap_sock_init(sk, parent);
2821 bacpy(&bt_sk(sk)->src, conn->src);
2822 bacpy(&bt_sk(sk)->dst, conn->dst);
2823 l2cap_pi(sk)->psm = psm;
2824 l2cap_pi(sk)->dcid = scid;
2826 __l2cap_chan_add(conn, sk, parent);
2827 dcid = l2cap_pi(sk)->scid;
2829 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2831 l2cap_pi(sk)->ident = cmd->ident;
2833 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2834 if (l2cap_check_security(sk)) {
2835 if (bt_sk(sk)->defer_setup) {
2836 sk->sk_state = BT_CONNECT2;
2837 result = L2CAP_CR_PEND;
2838 status = L2CAP_CS_AUTHOR_PEND;
2839 parent->sk_data_ready(parent, 0);
2840 } else {
2841 sk->sk_state = BT_CONFIG;
2842 result = L2CAP_CR_SUCCESS;
2843 status = L2CAP_CS_NO_INFO;
2845 } else {
2846 sk->sk_state = BT_CONNECT2;
2847 result = L2CAP_CR_PEND;
2848 status = L2CAP_CS_AUTHEN_PEND;
2850 } else {
2851 sk->sk_state = BT_CONNECT2;
2852 result = L2CAP_CR_PEND;
2853 status = L2CAP_CS_NO_INFO;
2856 write_unlock_bh(&list->lock);
2858 response:
2859 bh_unlock_sock(parent);
2861 sendresp:
2862 rsp.scid = cpu_to_le16(scid);
2863 rsp.dcid = cpu_to_le16(dcid);
2864 rsp.result = cpu_to_le16(result);
2865 rsp.status = cpu_to_le16(status);
2866 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2868 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2869 struct l2cap_info_req info;
2870 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2872 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2873 conn->info_ident = l2cap_get_ident(conn);
2875 mod_timer(&conn->info_timer, jiffies +
2876 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2878 l2cap_send_cmd(conn, conn->info_ident,
2879 L2CAP_INFO_REQ, sizeof(info), &info);
2882 return 0;
2885 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2887 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2888 u16 scid, dcid, result, status;
2889 struct sock *sk;
2890 u8 req[128];
2892 scid = __le16_to_cpu(rsp->scid);
2893 dcid = __le16_to_cpu(rsp->dcid);
2894 result = __le16_to_cpu(rsp->result);
2895 status = __le16_to_cpu(rsp->status);
2897 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2899 if (scid) {
2900 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2901 if (!sk)
2902 return 0;
2903 } else {
2904 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2905 if (!sk)
2906 return 0;
2909 switch (result) {
2910 case L2CAP_CR_SUCCESS:
2911 sk->sk_state = BT_CONFIG;
2912 l2cap_pi(sk)->ident = 0;
2913 l2cap_pi(sk)->dcid = dcid;
2914 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2916 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2918 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2919 l2cap_build_conf_req(sk, req), req);
2920 l2cap_pi(sk)->num_conf_req++;
2921 break;
2923 case L2CAP_CR_PEND:
2924 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2925 break;
2927 default:
2928 l2cap_chan_del(sk, ECONNREFUSED);
2929 break;
2932 bh_unlock_sock(sk);
2933 return 0;
2936 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2938 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2939 u16 dcid, flags;
2940 u8 rsp[64];
2941 struct sock *sk;
2942 int len;
2944 dcid = __le16_to_cpu(req->dcid);
2945 flags = __le16_to_cpu(req->flags);
2947 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2949 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2950 if (!sk)
2951 return -ENOENT;
2953 if (sk->sk_state == BT_DISCONN)
2954 goto unlock;
2956 /* Reject if config buffer is too small. */
2957 len = cmd_len - sizeof(*req);
2958 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2959 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2960 l2cap_build_conf_rsp(sk, rsp,
2961 L2CAP_CONF_REJECT, flags), rsp);
2962 goto unlock;
2965 /* Store config. */
2966 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2967 l2cap_pi(sk)->conf_len += len;
2969 if (flags & 0x0001) {
2970 /* Incomplete config. Send empty response. */
2971 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2972 l2cap_build_conf_rsp(sk, rsp,
2973 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2974 goto unlock;
2977 /* Complete config. */
2978 len = l2cap_parse_conf_req(sk, rsp);
2979 if (len < 0) {
2980 l2cap_send_disconn_req(conn, sk);
2981 goto unlock;
2984 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2985 l2cap_pi(sk)->num_conf_rsp++;
2987 /* Reset config buffer. */
2988 l2cap_pi(sk)->conf_len = 0;
2990 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2991 goto unlock;
2993 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2994 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2995 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2996 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2998 sk->sk_state = BT_CONNECTED;
3000 l2cap_pi(sk)->next_tx_seq = 0;
3001 l2cap_pi(sk)->expected_tx_seq = 0;
3002 __skb_queue_head_init(TX_QUEUE(sk));
3003 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3004 l2cap_ertm_init(sk);
3006 l2cap_chan_ready(sk);
3007 goto unlock;
3010 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3011 u8 buf[64];
3012 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3013 l2cap_build_conf_req(sk, buf), buf);
3014 l2cap_pi(sk)->num_conf_req++;
3017 unlock:
3018 bh_unlock_sock(sk);
3019 return 0;
3022 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3024 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3025 u16 scid, flags, result;
3026 struct sock *sk;
3027 int len = cmd->len - sizeof(*rsp);
3029 scid = __le16_to_cpu(rsp->scid);
3030 flags = __le16_to_cpu(rsp->flags);
3031 result = __le16_to_cpu(rsp->result);
3033 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3034 scid, flags, result);
3036 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3037 if (!sk)
3038 return 0;
3040 switch (result) {
3041 case L2CAP_CONF_SUCCESS:
3042 l2cap_conf_rfc_get(sk, rsp->data, len);
3043 break;
3045 case L2CAP_CONF_UNACCEPT:
3046 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3047 char req[64];
3049 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3050 l2cap_send_disconn_req(conn, sk);
3051 goto done;
3054 /* throw out any old stored conf requests */
3055 result = L2CAP_CONF_SUCCESS;
3056 len = l2cap_parse_conf_rsp(sk, rsp->data,
3057 len, req, &result);
3058 if (len < 0) {
3059 l2cap_send_disconn_req(conn, sk);
3060 goto done;
3063 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3064 L2CAP_CONF_REQ, len, req);
3065 l2cap_pi(sk)->num_conf_req++;
3066 if (result != L2CAP_CONF_SUCCESS)
3067 goto done;
3068 break;
3071 default:
3072 sk->sk_state = BT_DISCONN;
3073 sk->sk_err = ECONNRESET;
3074 l2cap_sock_set_timer(sk, HZ * 5);
3075 l2cap_send_disconn_req(conn, sk);
3076 goto done;
3079 if (flags & 0x01)
3080 goto done;
3082 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3084 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3085 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3086 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3087 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3089 sk->sk_state = BT_CONNECTED;
3090 l2cap_pi(sk)->next_tx_seq = 0;
3091 l2cap_pi(sk)->expected_tx_seq = 0;
3092 __skb_queue_head_init(TX_QUEUE(sk));
3093 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3094 l2cap_ertm_init(sk);
3096 l2cap_chan_ready(sk);
3099 done:
3100 bh_unlock_sock(sk);
3101 return 0;
3104 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3106 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3107 struct l2cap_disconn_rsp rsp;
3108 u16 dcid, scid;
3109 struct sock *sk;
3111 scid = __le16_to_cpu(req->scid);
3112 dcid = __le16_to_cpu(req->dcid);
3114 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3116 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3117 if (!sk)
3118 return 0;
3120 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3121 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3122 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3124 sk->sk_shutdown = SHUTDOWN_MASK;
3126 skb_queue_purge(TX_QUEUE(sk));
3128 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3129 skb_queue_purge(SREJ_QUEUE(sk));
3130 skb_queue_purge(BUSY_QUEUE(sk));
3131 del_timer(&l2cap_pi(sk)->retrans_timer);
3132 del_timer(&l2cap_pi(sk)->monitor_timer);
3133 del_timer(&l2cap_pi(sk)->ack_timer);
3136 l2cap_chan_del(sk, ECONNRESET);
3137 bh_unlock_sock(sk);
3139 l2cap_sock_kill(sk);
3140 return 0;
3143 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3145 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3146 u16 dcid, scid;
3147 struct sock *sk;
3149 scid = __le16_to_cpu(rsp->scid);
3150 dcid = __le16_to_cpu(rsp->dcid);
3152 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3154 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3155 if (!sk)
3156 return 0;
3158 skb_queue_purge(TX_QUEUE(sk));
3160 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3161 skb_queue_purge(SREJ_QUEUE(sk));
3162 skb_queue_purge(BUSY_QUEUE(sk));
3163 del_timer(&l2cap_pi(sk)->retrans_timer);
3164 del_timer(&l2cap_pi(sk)->monitor_timer);
3165 del_timer(&l2cap_pi(sk)->ack_timer);
3168 l2cap_chan_del(sk, 0);
3169 bh_unlock_sock(sk);
3171 l2cap_sock_kill(sk);
3172 return 0;
3175 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3177 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3178 u16 type;
3180 type = __le16_to_cpu(req->type);
3182 BT_DBG("type 0x%4.4x", type);
3184 if (type == L2CAP_IT_FEAT_MASK) {
3185 u8 buf[8];
3186 u32 feat_mask = l2cap_feat_mask;
3187 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3188 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3189 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3190 if (enable_ertm)
3191 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3192 | L2CAP_FEAT_FCS;
3193 put_unaligned_le32(feat_mask, rsp->data);
3194 l2cap_send_cmd(conn, cmd->ident,
3195 L2CAP_INFO_RSP, sizeof(buf), buf);
3196 } else if (type == L2CAP_IT_FIXED_CHAN) {
3197 u8 buf[12];
3198 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3199 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3200 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3201 memcpy(buf + 4, l2cap_fixed_chan, 8);
3202 l2cap_send_cmd(conn, cmd->ident,
3203 L2CAP_INFO_RSP, sizeof(buf), buf);
3204 } else {
3205 struct l2cap_info_rsp rsp;
3206 rsp.type = cpu_to_le16(type);
3207 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3208 l2cap_send_cmd(conn, cmd->ident,
3209 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3212 return 0;
3215 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3217 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3218 u16 type, result;
3220 type = __le16_to_cpu(rsp->type);
3221 result = __le16_to_cpu(rsp->result);
3223 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3225 del_timer(&conn->info_timer);
3227 if (type == L2CAP_IT_FEAT_MASK) {
3228 conn->feat_mask = get_unaligned_le32(rsp->data);
3230 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3231 struct l2cap_info_req req;
3232 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3234 conn->info_ident = l2cap_get_ident(conn);
3236 l2cap_send_cmd(conn, conn->info_ident,
3237 L2CAP_INFO_REQ, sizeof(req), &req);
3238 } else {
3239 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3240 conn->info_ident = 0;
3242 l2cap_conn_start(conn);
3244 } else if (type == L2CAP_IT_FIXED_CHAN) {
3245 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3246 conn->info_ident = 0;
3248 l2cap_conn_start(conn);
3251 return 0;
3254 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3256 u8 *data = skb->data;
3257 int len = skb->len;
3258 struct l2cap_cmd_hdr cmd;
3259 int err = 0;
3261 l2cap_raw_recv(conn, skb);
3263 while (len >= L2CAP_CMD_HDR_SIZE) {
3264 u16 cmd_len;
3265 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3266 data += L2CAP_CMD_HDR_SIZE;
3267 len -= L2CAP_CMD_HDR_SIZE;
3269 cmd_len = le16_to_cpu(cmd.len);
3271 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3273 if (cmd_len > len || !cmd.ident) {
3274 BT_DBG("corrupted command");
3275 break;
3278 switch (cmd.code) {
3279 case L2CAP_COMMAND_REJ:
3280 l2cap_command_rej(conn, &cmd, data);
3281 break;
3283 case L2CAP_CONN_REQ:
3284 err = l2cap_connect_req(conn, &cmd, data);
3285 break;
3287 case L2CAP_CONN_RSP:
3288 err = l2cap_connect_rsp(conn, &cmd, data);
3289 break;
3291 case L2CAP_CONF_REQ:
3292 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3293 break;
3295 case L2CAP_CONF_RSP:
3296 err = l2cap_config_rsp(conn, &cmd, data);
3297 break;
3299 case L2CAP_DISCONN_REQ:
3300 err = l2cap_disconnect_req(conn, &cmd, data);
3301 break;
3303 case L2CAP_DISCONN_RSP:
3304 err = l2cap_disconnect_rsp(conn, &cmd, data);
3305 break;
3307 case L2CAP_ECHO_REQ:
3308 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3309 break;
3311 case L2CAP_ECHO_RSP:
3312 break;
3314 case L2CAP_INFO_REQ:
3315 err = l2cap_information_req(conn, &cmd, data);
3316 break;
3318 case L2CAP_INFO_RSP:
3319 err = l2cap_information_rsp(conn, &cmd, data);
3320 break;
3322 default:
3323 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3324 err = -EINVAL;
3325 break;
3328 if (err) {
3329 struct l2cap_cmd_rej rej;
3330 BT_DBG("error %d", err);
3332 /* FIXME: Map err to a valid reason */
3333 rej.reason = cpu_to_le16(0);
3334 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3337 data += cmd_len;
3338 len -= cmd_len;
3341 kfree_skb(skb);
3344 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3346 u16 our_fcs, rcv_fcs;
3347 int hdr_size = L2CAP_HDR_SIZE + 2;
3349 if (pi->fcs == L2CAP_FCS_CRC16) {
3350 skb_trim(skb, skb->len - 2);
3351 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3352 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3354 if (our_fcs != rcv_fcs)
3355 return -EINVAL;
3357 return 0;
3360 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3362 struct l2cap_pinfo *pi = l2cap_pi(sk);
3363 u16 control = 0;
3365 pi->frames_sent = 0;
3366 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3368 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3370 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3371 control |= L2CAP_SUPER_RCV_NOT_READY | L2CAP_CTRL_FINAL;
3372 l2cap_send_sframe(pi, control);
3373 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3374 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
3377 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY && pi->unacked_frames > 0)
3378 __mod_retrans_timer();
3380 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3382 spin_lock_bh(&pi->send_lock);
3383 l2cap_ertm_send(sk);
3384 spin_unlock_bh(&pi->send_lock);
3386 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3387 pi->frames_sent == 0) {
3388 control |= L2CAP_SUPER_RCV_READY;
3389 l2cap_send_sframe(pi, control);
3393 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3395 struct sk_buff *next_skb;
3397 bt_cb(skb)->tx_seq = tx_seq;
3398 bt_cb(skb)->sar = sar;
3400 next_skb = skb_peek(SREJ_QUEUE(sk));
3401 if (!next_skb) {
3402 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3403 return 0;
3406 do {
3407 if (bt_cb(next_skb)->tx_seq == tx_seq)
3408 return -EINVAL;
3410 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3411 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3412 return 0;
3415 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3416 break;
3418 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3420 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3422 return 0;
3425 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3427 struct l2cap_pinfo *pi = l2cap_pi(sk);
3428 struct sk_buff *_skb;
3429 int err;
3431 switch (control & L2CAP_CTRL_SAR) {
3432 case L2CAP_SDU_UNSEGMENTED:
3433 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3434 goto drop;
3436 err = sock_queue_rcv_skb(sk, skb);
3437 if (!err)
3438 return err;
3440 break;
3442 case L2CAP_SDU_START:
3443 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3444 goto drop;
3446 pi->sdu_len = get_unaligned_le16(skb->data);
3448 if (pi->sdu_len > pi->imtu)
3449 goto disconnect;
3451 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3452 if (!pi->sdu)
3453 return -ENOMEM;
3455 /* pull sdu_len bytes only after alloc, because of Local Busy
3456 * condition we have to be sure that this will be executed
3457 * only once, i.e., when alloc does not fail */
3458 skb_pull(skb, 2);
3460 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3462 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3463 pi->partial_sdu_len = skb->len;
3464 break;
3466 case L2CAP_SDU_CONTINUE:
3467 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3468 goto disconnect;
3470 if (!pi->sdu)
3471 goto disconnect;
3473 pi->partial_sdu_len += skb->len;
3474 if (pi->partial_sdu_len > pi->sdu_len)
3475 goto drop;
3477 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3479 break;
3481 case L2CAP_SDU_END:
3482 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3483 goto disconnect;
3485 if (!pi->sdu)
3486 goto disconnect;
3488 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3489 pi->partial_sdu_len += skb->len;
3491 if (pi->partial_sdu_len > pi->imtu)
3492 goto drop;
3494 if (pi->partial_sdu_len != pi->sdu_len)
3495 goto drop;
3497 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3500 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3501 if (!_skb) {
3502 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3503 return -ENOMEM;
3506 err = sock_queue_rcv_skb(sk, _skb);
3507 if (err < 0) {
3508 kfree_skb(_skb);
3509 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3510 return err;
3513 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3514 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3516 kfree_skb(pi->sdu);
3517 break;
3520 kfree_skb(skb);
3521 return 0;
3523 drop:
3524 kfree_skb(pi->sdu);
3525 pi->sdu = NULL;
3527 disconnect:
3528 l2cap_send_disconn_req(pi->conn, sk);
3529 kfree_skb(skb);
3530 return 0;
3533 static void l2cap_busy_work(struct work_struct *work)
3535 DECLARE_WAITQUEUE(wait, current);
3536 struct l2cap_pinfo *pi =
3537 container_of(work, struct l2cap_pinfo, busy_work);
3538 struct sock *sk = (struct sock *)pi;
3539 int n_tries = 0, timeo = HZ/5, err;
3540 struct sk_buff *skb;
3541 u16 control;
3543 lock_sock(sk);
3545 add_wait_queue(sk_sleep(sk), &wait);
3546 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3547 set_current_state(TASK_INTERRUPTIBLE);
3549 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3550 err = -EBUSY;
3551 l2cap_send_disconn_req(pi->conn, sk);
3552 goto done;
3555 if (!timeo)
3556 timeo = HZ/5;
3558 if (signal_pending(current)) {
3559 err = sock_intr_errno(timeo);
3560 goto done;
3563 release_sock(sk);
3564 timeo = schedule_timeout(timeo);
3565 lock_sock(sk);
3567 err = sock_error(sk);
3568 if (err)
3569 goto done;
3571 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3572 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3573 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3574 if (err < 0) {
3575 skb_queue_head(BUSY_QUEUE(sk), skb);
3576 break;
3579 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3582 if (!skb)
3583 break;
3586 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3587 goto done;
3589 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3590 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3591 l2cap_send_sframe(pi, control);
3592 l2cap_pi(sk)->retry_count = 1;
3594 del_timer(&pi->retrans_timer);
3595 __mod_monitor_timer();
3597 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3599 done:
3600 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3601 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3603 set_current_state(TASK_RUNNING);
3604 remove_wait_queue(sk_sleep(sk), &wait);
3606 release_sock(sk);
3609 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3611 struct l2cap_pinfo *pi = l2cap_pi(sk);
3612 int sctrl, err;
3614 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3615 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3616 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3617 return -EBUSY;
3620 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3621 if (err >= 0) {
3622 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3623 return err;
3626 /* Busy Condition */
3627 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3628 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3629 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3631 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3632 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3633 l2cap_send_sframe(pi, sctrl);
3635 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3637 queue_work(_busy_wq, &pi->busy_work);
3639 return err;
3642 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3644 struct l2cap_pinfo *pi = l2cap_pi(sk);
3645 struct sk_buff *_skb;
3646 int err = -EINVAL;
3649 * TODO: We have to notify the userland if some data is lost with the
3650 * Streaming Mode.
3653 switch (control & L2CAP_CTRL_SAR) {
3654 case L2CAP_SDU_UNSEGMENTED:
3655 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3656 kfree_skb(pi->sdu);
3657 break;
3660 err = sock_queue_rcv_skb(sk, skb);
3661 if (!err)
3662 return 0;
3664 break;
3666 case L2CAP_SDU_START:
3667 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3668 kfree_skb(pi->sdu);
3669 break;
3672 pi->sdu_len = get_unaligned_le16(skb->data);
3673 skb_pull(skb, 2);
3675 if (pi->sdu_len > pi->imtu) {
3676 err = -EMSGSIZE;
3677 break;
3680 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3681 if (!pi->sdu) {
3682 err = -ENOMEM;
3683 break;
3686 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3688 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3689 pi->partial_sdu_len = skb->len;
3690 err = 0;
3691 break;
3693 case L2CAP_SDU_CONTINUE:
3694 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3695 break;
3697 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3699 pi->partial_sdu_len += skb->len;
3700 if (pi->partial_sdu_len > pi->sdu_len)
3701 kfree_skb(pi->sdu);
3702 else
3703 err = 0;
3705 break;
3707 case L2CAP_SDU_END:
3708 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3709 break;
3711 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3713 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3714 pi->partial_sdu_len += skb->len;
3716 if (pi->partial_sdu_len > pi->imtu)
3717 goto drop;
3719 if (pi->partial_sdu_len == pi->sdu_len) {
3720 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3721 err = sock_queue_rcv_skb(sk, _skb);
3722 if (err < 0)
3723 kfree_skb(_skb);
3725 err = 0;
3727 drop:
3728 kfree_skb(pi->sdu);
3729 break;
3732 kfree_skb(skb);
3733 return err;
3736 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3738 struct sk_buff *skb;
3739 u16 control;
3741 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3742 if (bt_cb(skb)->tx_seq != tx_seq)
3743 break;
3745 skb = skb_dequeue(SREJ_QUEUE(sk));
3746 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3747 l2cap_ertm_reassembly_sdu(sk, skb, control);
3748 l2cap_pi(sk)->buffer_seq_srej =
3749 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3750 tx_seq++;
3754 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3756 struct l2cap_pinfo *pi = l2cap_pi(sk);
3757 struct srej_list *l, *tmp;
3758 u16 control;
3760 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3761 if (l->tx_seq == tx_seq) {
3762 list_del(&l->list);
3763 kfree(l);
3764 return;
3766 control = L2CAP_SUPER_SELECT_REJECT;
3767 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3768 l2cap_send_sframe(pi, control);
3769 list_del(&l->list);
3770 list_add_tail(&l->list, SREJ_LIST(sk));
3774 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3776 struct l2cap_pinfo *pi = l2cap_pi(sk);
3777 struct srej_list *new;
3778 u16 control;
3780 while (tx_seq != pi->expected_tx_seq) {
3781 control = L2CAP_SUPER_SELECT_REJECT;
3782 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3783 l2cap_send_sframe(pi, control);
3785 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3786 new->tx_seq = pi->expected_tx_seq++;
3787 list_add_tail(&new->list, SREJ_LIST(sk));
3789 pi->expected_tx_seq++;
3792 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3794 struct l2cap_pinfo *pi = l2cap_pi(sk);
3795 u8 tx_seq = __get_txseq(rx_control);
3796 u8 req_seq = __get_reqseq(rx_control);
3797 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3798 u8 tx_seq_offset, expected_tx_seq_offset;
3799 int num_to_ack = (pi->tx_win/6) + 1;
3800 int err = 0;
3802 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3804 if (L2CAP_CTRL_FINAL & rx_control &&
3805 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3806 del_timer(&pi->monitor_timer);
3807 if (pi->unacked_frames > 0)
3808 __mod_retrans_timer();
3809 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3812 pi->expected_ack_seq = req_seq;
3813 l2cap_drop_acked_frames(sk);
3815 if (tx_seq == pi->expected_tx_seq)
3816 goto expected;
3818 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3819 if (tx_seq_offset < 0)
3820 tx_seq_offset += 64;
3822 /* invalid tx_seq */
3823 if (tx_seq_offset >= pi->tx_win) {
3824 l2cap_send_disconn_req(pi->conn, sk);
3825 goto drop;
3828 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3829 goto drop;
3831 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3832 struct srej_list *first;
3834 first = list_first_entry(SREJ_LIST(sk),
3835 struct srej_list, list);
3836 if (tx_seq == first->tx_seq) {
3837 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3838 l2cap_check_srej_gap(sk, tx_seq);
3840 list_del(&first->list);
3841 kfree(first);
3843 if (list_empty(SREJ_LIST(sk))) {
3844 pi->buffer_seq = pi->buffer_seq_srej;
3845 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3846 l2cap_send_ack(pi);
3848 } else {
3849 struct srej_list *l;
3851 /* duplicated tx_seq */
3852 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3853 goto drop;
3855 list_for_each_entry(l, SREJ_LIST(sk), list) {
3856 if (l->tx_seq == tx_seq) {
3857 l2cap_resend_srejframe(sk, tx_seq);
3858 return 0;
3861 l2cap_send_srejframe(sk, tx_seq);
3863 } else {
3864 expected_tx_seq_offset =
3865 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3866 if (expected_tx_seq_offset < 0)
3867 expected_tx_seq_offset += 64;
3869 /* duplicated tx_seq */
3870 if (tx_seq_offset < expected_tx_seq_offset)
3871 goto drop;
3873 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3875 INIT_LIST_HEAD(SREJ_LIST(sk));
3876 pi->buffer_seq_srej = pi->buffer_seq;
3878 __skb_queue_head_init(SREJ_QUEUE(sk));
3879 __skb_queue_head_init(BUSY_QUEUE(sk));
3880 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3882 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3884 l2cap_send_srejframe(sk, tx_seq);
3886 return 0;
3888 expected:
3889 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3891 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3892 bt_cb(skb)->tx_seq = tx_seq;
3893 bt_cb(skb)->sar = sar;
3894 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3895 return 0;
3898 if (rx_control & L2CAP_CTRL_FINAL) {
3899 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3900 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3901 else
3902 l2cap_retransmit_frames(sk);
3905 err = l2cap_push_rx_skb(sk, skb, rx_control);
3906 if (err < 0)
3907 return 0;
3909 __mod_ack_timer();
3911 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3912 if (pi->num_acked == num_to_ack - 1)
3913 l2cap_send_ack(pi);
3915 return 0;
3917 drop:
3918 kfree_skb(skb);
3919 return 0;
3922 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3924 struct l2cap_pinfo *pi = l2cap_pi(sk);
3926 pi->expected_ack_seq = __get_reqseq(rx_control);
3927 l2cap_drop_acked_frames(sk);
3929 if (rx_control & L2CAP_CTRL_POLL) {
3930 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3931 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3932 (pi->unacked_frames > 0))
3933 __mod_retrans_timer();
3935 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3936 l2cap_send_srejtail(sk);
3937 } else {
3938 l2cap_send_i_or_rr_or_rnr(sk);
3941 } else if (rx_control & L2CAP_CTRL_FINAL) {
3942 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3944 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3945 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3946 else
3947 l2cap_retransmit_frames(sk);
3949 } else {
3950 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3951 (pi->unacked_frames > 0))
3952 __mod_retrans_timer();
3954 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3955 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3956 l2cap_send_ack(pi);
3957 } else {
3958 spin_lock_bh(&pi->send_lock);
3959 l2cap_ertm_send(sk);
3960 spin_unlock_bh(&pi->send_lock);
3965 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
3967 struct l2cap_pinfo *pi = l2cap_pi(sk);
3968 u8 tx_seq = __get_reqseq(rx_control);
3970 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3972 pi->expected_ack_seq = tx_seq;
3973 l2cap_drop_acked_frames(sk);
3975 if (rx_control & L2CAP_CTRL_FINAL) {
3976 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3977 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3978 else
3979 l2cap_retransmit_frames(sk);
3980 } else {
3981 l2cap_retransmit_frames(sk);
3983 if (pi->conn_state & L2CAP_CONN_WAIT_F)
3984 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3987 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
3989 struct l2cap_pinfo *pi = l2cap_pi(sk);
3990 u8 tx_seq = __get_reqseq(rx_control);
3992 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3994 if (rx_control & L2CAP_CTRL_POLL) {
3995 pi->expected_ack_seq = tx_seq;
3996 l2cap_drop_acked_frames(sk);
3997 l2cap_retransmit_one_frame(sk, tx_seq);
3999 spin_lock_bh(&pi->send_lock);
4000 l2cap_ertm_send(sk);
4001 spin_unlock_bh(&pi->send_lock);
4003 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4004 pi->srej_save_reqseq = tx_seq;
4005 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4007 } else if (rx_control & L2CAP_CTRL_FINAL) {
4008 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4009 pi->srej_save_reqseq == tx_seq)
4010 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4011 else
4012 l2cap_retransmit_one_frame(sk, tx_seq);
4013 } else {
4014 l2cap_retransmit_one_frame(sk, tx_seq);
4015 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4016 pi->srej_save_reqseq = tx_seq;
4017 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4022 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4024 struct l2cap_pinfo *pi = l2cap_pi(sk);
4025 u8 tx_seq = __get_reqseq(rx_control);
4027 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4028 pi->expected_ack_seq = tx_seq;
4029 l2cap_drop_acked_frames(sk);
4031 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4032 del_timer(&pi->retrans_timer);
4033 if (rx_control & L2CAP_CTRL_POLL)
4034 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4035 return;
4038 if (rx_control & L2CAP_CTRL_POLL)
4039 l2cap_send_srejtail(sk);
4040 else
4041 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4044 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4046 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4048 if (L2CAP_CTRL_FINAL & rx_control &&
4049 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4050 del_timer(&l2cap_pi(sk)->monitor_timer);
4051 if (l2cap_pi(sk)->unacked_frames > 0)
4052 __mod_retrans_timer();
4053 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4056 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4057 case L2CAP_SUPER_RCV_READY:
4058 l2cap_data_channel_rrframe(sk, rx_control);
4059 break;
4061 case L2CAP_SUPER_REJECT:
4062 l2cap_data_channel_rejframe(sk, rx_control);
4063 break;
4065 case L2CAP_SUPER_SELECT_REJECT:
4066 l2cap_data_channel_srejframe(sk, rx_control);
4067 break;
4069 case L2CAP_SUPER_RCV_NOT_READY:
4070 l2cap_data_channel_rnrframe(sk, rx_control);
4071 break;
4074 kfree_skb(skb);
4075 return 0;
4078 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4080 struct sock *sk;
4081 struct l2cap_pinfo *pi;
4082 u16 control, len;
4083 u8 tx_seq, req_seq, next_tx_seq_offset, req_seq_offset;
4085 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4086 if (!sk) {
4087 BT_DBG("unknown cid 0x%4.4x", cid);
4088 goto drop;
4091 pi = l2cap_pi(sk);
4093 BT_DBG("sk %p, len %d", sk, skb->len);
4095 if (sk->sk_state != BT_CONNECTED)
4096 goto drop;
4098 switch (pi->mode) {
4099 case L2CAP_MODE_BASIC:
4100 /* If socket recv buffers overflows we drop data here
4101 * which is *bad* because L2CAP has to be reliable.
4102 * But we don't have any other choice. L2CAP doesn't
4103 * provide flow control mechanism. */
4105 if (pi->imtu < skb->len)
4106 goto drop;
4108 if (!sock_queue_rcv_skb(sk, skb))
4109 goto done;
4110 break;
4112 case L2CAP_MODE_ERTM:
4113 control = get_unaligned_le16(skb->data);
4114 skb_pull(skb, 2);
4115 len = skb->len;
4117 if (__is_sar_start(control))
4118 len -= 2;
4120 if (pi->fcs == L2CAP_FCS_CRC16)
4121 len -= 2;
4124 * We can just drop the corrupted I-frame here.
4125 * Receiver will miss it and start proper recovery
4126 * procedures and ask retransmission.
4128 if (len > pi->mps) {
4129 l2cap_send_disconn_req(pi->conn, sk);
4130 goto drop;
4133 if (l2cap_check_fcs(pi, skb))
4134 goto drop;
4136 req_seq = __get_reqseq(control);
4137 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4138 if (req_seq_offset < 0)
4139 req_seq_offset += 64;
4141 next_tx_seq_offset =
4142 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4143 if (next_tx_seq_offset < 0)
4144 next_tx_seq_offset += 64;
4146 /* check for invalid req-seq */
4147 if (req_seq_offset > next_tx_seq_offset) {
4148 l2cap_send_disconn_req(pi->conn, sk);
4149 goto drop;
4152 if (__is_iframe(control)) {
4153 if (len < 4) {
4154 l2cap_send_disconn_req(pi->conn, sk);
4155 goto drop;
4158 l2cap_data_channel_iframe(sk, control, skb);
4159 } else {
4160 if (len != 0) {
4161 l2cap_send_disconn_req(pi->conn, sk);
4162 goto drop;
4165 l2cap_data_channel_sframe(sk, control, skb);
4168 goto done;
4170 case L2CAP_MODE_STREAMING:
4171 control = get_unaligned_le16(skb->data);
4172 skb_pull(skb, 2);
4173 len = skb->len;
4175 if (__is_sar_start(control))
4176 len -= 2;
4178 if (pi->fcs == L2CAP_FCS_CRC16)
4179 len -= 2;
4181 if (len > pi->mps || len < 4 || __is_sframe(control))
4182 goto drop;
4184 if (l2cap_check_fcs(pi, skb))
4185 goto drop;
4187 tx_seq = __get_txseq(control);
4189 if (pi->expected_tx_seq == tx_seq)
4190 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4191 else
4192 pi->expected_tx_seq = (tx_seq + 1) % 64;
4194 l2cap_streaming_reassembly_sdu(sk, skb, control);
4196 goto done;
4198 default:
4199 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4200 break;
4203 drop:
4204 kfree_skb(skb);
4206 done:
4207 if (sk)
4208 bh_unlock_sock(sk);
4210 return 0;
4213 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4215 struct sock *sk;
4217 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4218 if (!sk)
4219 goto drop;
4221 BT_DBG("sk %p, len %d", sk, skb->len);
4223 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4224 goto drop;
4226 if (l2cap_pi(sk)->imtu < skb->len)
4227 goto drop;
4229 if (!sock_queue_rcv_skb(sk, skb))
4230 goto done;
4232 drop:
4233 kfree_skb(skb);
4235 done:
4236 if (sk)
4237 bh_unlock_sock(sk);
4238 return 0;
4241 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4243 struct l2cap_hdr *lh = (void *) skb->data;
4244 u16 cid, len;
4245 __le16 psm;
4247 skb_pull(skb, L2CAP_HDR_SIZE);
4248 cid = __le16_to_cpu(lh->cid);
4249 len = __le16_to_cpu(lh->len);
4251 if (len != skb->len) {
4252 kfree_skb(skb);
4253 return;
4256 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4258 switch (cid) {
4259 case L2CAP_CID_SIGNALING:
4260 l2cap_sig_channel(conn, skb);
4261 break;
4263 case L2CAP_CID_CONN_LESS:
4264 psm = get_unaligned_le16(skb->data);
4265 skb_pull(skb, 2);
4266 l2cap_conless_channel(conn, psm, skb);
4267 break;
4269 default:
4270 l2cap_data_channel(conn, cid, skb);
4271 break;
4275 /* ---- L2CAP interface with lower layer (HCI) ---- */
4277 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4279 int exact = 0, lm1 = 0, lm2 = 0;
4280 register struct sock *sk;
4281 struct hlist_node *node;
4283 if (type != ACL_LINK)
4284 return 0;
4286 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4288 /* Find listening sockets and check their link_mode */
4289 read_lock(&l2cap_sk_list.lock);
4290 sk_for_each(sk, node, &l2cap_sk_list.head) {
4291 if (sk->sk_state != BT_LISTEN)
4292 continue;
4294 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4295 lm1 |= HCI_LM_ACCEPT;
4296 if (l2cap_pi(sk)->role_switch)
4297 lm1 |= HCI_LM_MASTER;
4298 exact++;
4299 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4300 lm2 |= HCI_LM_ACCEPT;
4301 if (l2cap_pi(sk)->role_switch)
4302 lm2 |= HCI_LM_MASTER;
4305 read_unlock(&l2cap_sk_list.lock);
4307 return exact ? lm1 : lm2;
4310 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4312 struct l2cap_conn *conn;
4314 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4316 if (hcon->type != ACL_LINK)
4317 return 0;
4319 if (!status) {
4320 conn = l2cap_conn_add(hcon, status);
4321 if (conn)
4322 l2cap_conn_ready(conn);
4323 } else
4324 l2cap_conn_del(hcon, bt_err(status));
4326 return 0;
4329 static int l2cap_disconn_ind(struct hci_conn *hcon)
4331 struct l2cap_conn *conn = hcon->l2cap_data;
4333 BT_DBG("hcon %p", hcon);
4335 if (hcon->type != ACL_LINK || !conn)
4336 return 0x13;
4338 return conn->disc_reason;
4341 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4343 BT_DBG("hcon %p reason %d", hcon, reason);
4345 if (hcon->type != ACL_LINK)
4346 return 0;
4348 l2cap_conn_del(hcon, bt_err(reason));
4350 return 0;
4353 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4355 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4356 return;
4358 if (encrypt == 0x00) {
4359 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4360 l2cap_sock_clear_timer(sk);
4361 l2cap_sock_set_timer(sk, HZ * 5);
4362 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4363 __l2cap_sock_close(sk, ECONNREFUSED);
4364 } else {
4365 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4366 l2cap_sock_clear_timer(sk);
4370 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4372 struct l2cap_chan_list *l;
4373 struct l2cap_conn *conn = hcon->l2cap_data;
4374 struct sock *sk;
4376 if (!conn)
4377 return 0;
4379 l = &conn->chan_list;
4381 BT_DBG("conn %p", conn);
4383 read_lock(&l->lock);
4385 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4386 bh_lock_sock(sk);
4388 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4389 bh_unlock_sock(sk);
4390 continue;
4393 if (!status && (sk->sk_state == BT_CONNECTED ||
4394 sk->sk_state == BT_CONFIG)) {
4395 l2cap_check_encryption(sk, encrypt);
4396 bh_unlock_sock(sk);
4397 continue;
4400 if (sk->sk_state == BT_CONNECT) {
4401 if (!status) {
4402 struct l2cap_conn_req req;
4403 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4404 req.psm = l2cap_pi(sk)->psm;
4406 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4408 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4409 L2CAP_CONN_REQ, sizeof(req), &req);
4410 } else {
4411 l2cap_sock_clear_timer(sk);
4412 l2cap_sock_set_timer(sk, HZ / 10);
4414 } else if (sk->sk_state == BT_CONNECT2) {
4415 struct l2cap_conn_rsp rsp;
4416 __u16 result;
4418 if (!status) {
4419 sk->sk_state = BT_CONFIG;
4420 result = L2CAP_CR_SUCCESS;
4421 } else {
4422 sk->sk_state = BT_DISCONN;
4423 l2cap_sock_set_timer(sk, HZ / 10);
4424 result = L2CAP_CR_SEC_BLOCK;
4427 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4428 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4429 rsp.result = cpu_to_le16(result);
4430 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4431 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4432 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4435 bh_unlock_sock(sk);
4438 read_unlock(&l->lock);
4440 return 0;
4443 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4445 struct l2cap_conn *conn = hcon->l2cap_data;
4447 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4448 goto drop;
4450 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4452 if (flags & ACL_START) {
4453 struct l2cap_hdr *hdr;
4454 int len;
4456 if (conn->rx_len) {
4457 BT_ERR("Unexpected start frame (len %d)", skb->len);
4458 kfree_skb(conn->rx_skb);
4459 conn->rx_skb = NULL;
4460 conn->rx_len = 0;
4461 l2cap_conn_unreliable(conn, ECOMM);
4464 if (skb->len < 2) {
4465 BT_ERR("Frame is too short (len %d)", skb->len);
4466 l2cap_conn_unreliable(conn, ECOMM);
4467 goto drop;
4470 hdr = (struct l2cap_hdr *) skb->data;
4471 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4473 if (len == skb->len) {
4474 /* Complete frame received */
4475 l2cap_recv_frame(conn, skb);
4476 return 0;
4479 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4481 if (skb->len > len) {
4482 BT_ERR("Frame is too long (len %d, expected len %d)",
4483 skb->len, len);
4484 l2cap_conn_unreliable(conn, ECOMM);
4485 goto drop;
4488 /* Allocate skb for the complete frame (with header) */
4489 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4490 if (!conn->rx_skb)
4491 goto drop;
4493 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4494 skb->len);
4495 conn->rx_len = len - skb->len;
4496 } else {
4497 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4499 if (!conn->rx_len) {
4500 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4501 l2cap_conn_unreliable(conn, ECOMM);
4502 goto drop;
4505 if (skb->len > conn->rx_len) {
4506 BT_ERR("Fragment is too long (len %d, expected %d)",
4507 skb->len, conn->rx_len);
4508 kfree_skb(conn->rx_skb);
4509 conn->rx_skb = NULL;
4510 conn->rx_len = 0;
4511 l2cap_conn_unreliable(conn, ECOMM);
4512 goto drop;
4515 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4516 skb->len);
4517 conn->rx_len -= skb->len;
4519 if (!conn->rx_len) {
4520 /* Complete frame received */
4521 l2cap_recv_frame(conn, conn->rx_skb);
4522 conn->rx_skb = NULL;
4526 drop:
4527 kfree_skb(skb);
4528 return 0;
4531 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4533 struct sock *sk;
4534 struct hlist_node *node;
4536 read_lock_bh(&l2cap_sk_list.lock);
4538 sk_for_each(sk, node, &l2cap_sk_list.head) {
4539 struct l2cap_pinfo *pi = l2cap_pi(sk);
4541 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4542 batostr(&bt_sk(sk)->src),
4543 batostr(&bt_sk(sk)->dst),
4544 sk->sk_state, __le16_to_cpu(pi->psm),
4545 pi->scid, pi->dcid,
4546 pi->imtu, pi->omtu, pi->sec_level);
4549 read_unlock_bh(&l2cap_sk_list.lock);
4551 return 0;
4554 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4556 return single_open(file, l2cap_debugfs_show, inode->i_private);
4559 static const struct file_operations l2cap_debugfs_fops = {
4560 .open = l2cap_debugfs_open,
4561 .read = seq_read,
4562 .llseek = seq_lseek,
4563 .release = single_release,
4566 static struct dentry *l2cap_debugfs;
4568 static const struct proto_ops l2cap_sock_ops = {
4569 .family = PF_BLUETOOTH,
4570 .owner = THIS_MODULE,
4571 .release = l2cap_sock_release,
4572 .bind = l2cap_sock_bind,
4573 .connect = l2cap_sock_connect,
4574 .listen = l2cap_sock_listen,
4575 .accept = l2cap_sock_accept,
4576 .getname = l2cap_sock_getname,
4577 .sendmsg = l2cap_sock_sendmsg,
4578 .recvmsg = l2cap_sock_recvmsg,
4579 .poll = bt_sock_poll,
4580 .ioctl = bt_sock_ioctl,
4581 .mmap = sock_no_mmap,
4582 .socketpair = sock_no_socketpair,
4583 .shutdown = l2cap_sock_shutdown,
4584 .setsockopt = l2cap_sock_setsockopt,
4585 .getsockopt = l2cap_sock_getsockopt
4588 static const struct net_proto_family l2cap_sock_family_ops = {
4589 .family = PF_BLUETOOTH,
4590 .owner = THIS_MODULE,
4591 .create = l2cap_sock_create,
4594 static struct hci_proto l2cap_hci_proto = {
4595 .name = "L2CAP",
4596 .id = HCI_PROTO_L2CAP,
4597 .connect_ind = l2cap_connect_ind,
4598 .connect_cfm = l2cap_connect_cfm,
4599 .disconn_ind = l2cap_disconn_ind,
4600 .disconn_cfm = l2cap_disconn_cfm,
4601 .security_cfm = l2cap_security_cfm,
4602 .recv_acldata = l2cap_recv_acldata
4605 static int __init l2cap_init(void)
4607 int err;
4609 err = proto_register(&l2cap_proto, 0);
4610 if (err < 0)
4611 return err;
4613 _busy_wq = create_singlethread_workqueue("l2cap");
4614 if (!_busy_wq)
4615 goto error;
4617 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4618 if (err < 0) {
4619 BT_ERR("L2CAP socket registration failed");
4620 goto error;
4623 err = hci_register_proto(&l2cap_hci_proto);
4624 if (err < 0) {
4625 BT_ERR("L2CAP protocol registration failed");
4626 bt_sock_unregister(BTPROTO_L2CAP);
4627 goto error;
4630 if (bt_debugfs) {
4631 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4632 bt_debugfs, NULL, &l2cap_debugfs_fops);
4633 if (!l2cap_debugfs)
4634 BT_ERR("Failed to create L2CAP debug file");
4637 BT_INFO("L2CAP ver %s", VERSION);
4638 BT_INFO("L2CAP socket layer initialized");
4640 return 0;
4642 error:
4643 proto_unregister(&l2cap_proto);
4644 return err;
4647 static void __exit l2cap_exit(void)
4649 debugfs_remove(l2cap_debugfs);
4651 flush_workqueue(_busy_wq);
4652 destroy_workqueue(_busy_wq);
4654 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4655 BT_ERR("L2CAP socket unregistration failed");
4657 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4658 BT_ERR("L2CAP protocol unregistration failed");
4660 proto_unregister(&l2cap_proto);
4663 void l2cap_load(void)
4665 /* Dummy function to trigger automatic L2CAP module loading by
4666 * other modules that use L2CAP sockets but don't use any other
4667 * symbols from it. */
4669 EXPORT_SYMBOL(l2cap_load);
4671 module_init(l2cap_init);
4672 module_exit(l2cap_exit);
4674 module_param(enable_ertm, bool, 0644);
4675 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4677 module_param(max_transmit, uint, 0644);
4678 MODULE_PARM_DESC(max_transmit, "Max transmit value (default = 3)");
4680 module_param(tx_window, uint, 0644);
4681 MODULE_PARM_DESC(tx_window, "Transmission window size value (default = 63)");
4683 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4684 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4685 MODULE_VERSION(VERSION);
4686 MODULE_LICENSE("GPL");
4687 MODULE_ALIAS("bt-proto-0");