GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / net / bluetooth / l2cap.c
blob5b3c13a58a36a5de761f01f97b89e59b81a9abe2
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core and sockets. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
58 #define VERSION "2.15"
60 static int disable_ertm = 0;
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { 0x02, };
65 static const struct proto_ops l2cap_sock_ops;
67 static struct workqueue_struct *_busy_wq;
69 static struct bt_sock_list l2cap_sk_list = {
70 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
73 static void l2cap_busy_work(struct work_struct *work);
75 static void __l2cap_sock_close(struct sock *sk, int reason);
76 static void l2cap_sock_close(struct sock *sk);
77 static void l2cap_sock_kill(struct sock *sk);
79 static int l2cap_build_conf_req(struct sock *sk, void *data);
80 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
81 u8 code, u8 ident, u16 dlen, void *data);
83 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
85 /* ---- L2CAP timers ---- */
86 static void l2cap_sock_timeout(unsigned long arg)
88 struct sock *sk = (struct sock *) arg;
89 int reason;
91 BT_DBG("sock %p state %d", sk, sk->sk_state);
93 bh_lock_sock(sk);
95 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
96 reason = ECONNREFUSED;
97 else if (sk->sk_state == BT_CONNECT &&
98 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
99 reason = ECONNREFUSED;
100 else
101 reason = ETIMEDOUT;
103 __l2cap_sock_close(sk, reason);
105 bh_unlock_sock(sk);
107 l2cap_sock_kill(sk);
108 sock_put(sk);
111 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
113 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
114 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
117 static void l2cap_sock_clear_timer(struct sock *sk)
119 BT_DBG("sock %p state %d", sk, sk->sk_state);
120 sk_stop_timer(sk, &sk->sk_timer);
123 /* ---- L2CAP channels ---- */
124 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
126 struct sock *s;
127 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
128 if (l2cap_pi(s)->dcid == cid)
129 break;
131 return s;
134 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
136 struct sock *s;
137 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
138 if (l2cap_pi(s)->scid == cid)
139 break;
141 return s;
144 /* Find channel with given SCID.
145 * Returns locked socket */
146 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
148 struct sock *s;
149 read_lock(&l->lock);
150 s = __l2cap_get_chan_by_scid(l, cid);
151 if (s)
152 bh_lock_sock(s);
153 read_unlock(&l->lock);
154 return s;
157 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
159 struct sock *s;
160 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
161 if (l2cap_pi(s)->ident == ident)
162 break;
164 return s;
167 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
169 struct sock *s;
170 read_lock(&l->lock);
171 s = __l2cap_get_chan_by_ident(l, ident);
172 if (s)
173 bh_lock_sock(s);
174 read_unlock(&l->lock);
175 return s;
178 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
180 u16 cid = L2CAP_CID_DYN_START;
182 for (; cid < L2CAP_CID_DYN_END; cid++) {
183 if (!__l2cap_get_chan_by_scid(l, cid))
184 return cid;
187 return 0;
190 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
192 sock_hold(sk);
194 if (l->head)
195 l2cap_pi(l->head)->prev_c = sk;
197 l2cap_pi(sk)->next_c = l->head;
198 l2cap_pi(sk)->prev_c = NULL;
199 l->head = sk;
202 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
204 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
206 write_lock_bh(&l->lock);
207 if (sk == l->head)
208 l->head = next;
210 if (next)
211 l2cap_pi(next)->prev_c = prev;
212 if (prev)
213 l2cap_pi(prev)->next_c = next;
214 write_unlock_bh(&l->lock);
216 __sock_put(sk);
219 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
221 struct l2cap_chan_list *l = &conn->chan_list;
223 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
224 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
226 conn->disc_reason = 0x13;
228 l2cap_pi(sk)->conn = conn;
230 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
231 /* Alloc CID for connection-oriented socket */
232 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
233 } else if (sk->sk_type == SOCK_DGRAM) {
234 /* Connectionless socket */
235 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
236 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
237 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
238 } else {
239 /* Raw socket can send/recv signalling messages only */
240 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
241 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
242 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
245 __l2cap_chan_link(l, sk);
247 if (parent)
248 bt_accept_enqueue(parent, sk);
251 /* Delete channel.
252 * Must be called on the locked socket. */
253 static void l2cap_chan_del(struct sock *sk, int err)
255 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
256 struct sock *parent = bt_sk(sk)->parent;
258 l2cap_sock_clear_timer(sk);
260 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
262 if (conn) {
263 /* Unlink from channel list */
264 l2cap_chan_unlink(&conn->chan_list, sk);
265 l2cap_pi(sk)->conn = NULL;
266 hci_conn_put(conn->hcon);
269 sk->sk_state = BT_CLOSED;
270 sock_set_flag(sk, SOCK_ZAPPED);
272 if (err)
273 sk->sk_err = err;
275 if (parent) {
276 bt_accept_unlink(sk);
277 parent->sk_data_ready(parent, 0);
278 } else
279 sk->sk_state_change(sk);
281 skb_queue_purge(TX_QUEUE(sk));
283 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
284 struct srej_list *l, *tmp;
286 del_timer(&l2cap_pi(sk)->retrans_timer);
287 del_timer(&l2cap_pi(sk)->monitor_timer);
288 del_timer(&l2cap_pi(sk)->ack_timer);
290 skb_queue_purge(SREJ_QUEUE(sk));
291 skb_queue_purge(BUSY_QUEUE(sk));
293 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
294 list_del(&l->list);
295 kfree(l);
300 /* Service level security */
301 static inline int l2cap_check_security(struct sock *sk)
303 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
304 __u8 auth_type;
306 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
307 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
308 auth_type = HCI_AT_NO_BONDING_MITM;
309 else
310 auth_type = HCI_AT_NO_BONDING;
312 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
313 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
314 } else {
315 switch (l2cap_pi(sk)->sec_level) {
316 case BT_SECURITY_HIGH:
317 auth_type = HCI_AT_GENERAL_BONDING_MITM;
318 break;
319 case BT_SECURITY_MEDIUM:
320 auth_type = HCI_AT_GENERAL_BONDING;
321 break;
322 default:
323 auth_type = HCI_AT_NO_BONDING;
324 break;
328 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
329 auth_type);
332 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
334 u8 id;
336 /* Get next available identificator.
337 * 1 - 128 are used by kernel.
338 * 129 - 199 are reserved.
339 * 200 - 254 are used by utilities like l2ping, etc.
342 spin_lock_bh(&conn->lock);
344 if (++conn->tx_ident > 128)
345 conn->tx_ident = 1;
347 id = conn->tx_ident;
349 spin_unlock_bh(&conn->lock);
351 return id;
354 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
356 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
358 BT_DBG("code 0x%2.2x", code);
360 if (!skb)
361 return;
363 hci_send_acl(conn->hcon, skb, 0);
366 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
368 struct sk_buff *skb;
369 struct l2cap_hdr *lh;
370 struct l2cap_conn *conn = pi->conn;
371 struct sock *sk = (struct sock *)pi;
372 int count, hlen = L2CAP_HDR_SIZE + 2;
374 if (sk->sk_state != BT_CONNECTED)
375 return;
377 if (pi->fcs == L2CAP_FCS_CRC16)
378 hlen += 2;
380 BT_DBG("pi %p, control 0x%2.2x", pi, control);
382 count = min_t(unsigned int, conn->mtu, hlen);
383 control |= L2CAP_CTRL_FRAME_TYPE;
385 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
386 control |= L2CAP_CTRL_FINAL;
387 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
390 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
391 control |= L2CAP_CTRL_POLL;
392 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
395 skb = bt_skb_alloc(count, GFP_ATOMIC);
396 if (!skb)
397 return;
399 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
400 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
401 lh->cid = cpu_to_le16(pi->dcid);
402 put_unaligned_le16(control, skb_put(skb, 2));
404 if (pi->fcs == L2CAP_FCS_CRC16) {
405 u16 fcs = crc16(0, (u8 *)lh, count - 2);
406 put_unaligned_le16(fcs, skb_put(skb, 2));
409 hci_send_acl(pi->conn->hcon, skb, 0);
412 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
414 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
415 control |= L2CAP_SUPER_RCV_NOT_READY;
416 pi->conn_state |= L2CAP_CONN_RNR_SENT;
417 } else
418 control |= L2CAP_SUPER_RCV_READY;
420 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
422 l2cap_send_sframe(pi, control);
425 static inline int __l2cap_no_conn_pending(struct sock *sk)
427 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
430 static void l2cap_do_start(struct sock *sk)
432 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
434 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
435 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
436 return;
438 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
439 struct l2cap_conn_req req;
440 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
441 req.psm = l2cap_pi(sk)->psm;
443 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
444 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
446 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
447 L2CAP_CONN_REQ, sizeof(req), &req);
449 } else {
450 struct l2cap_info_req req;
451 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
453 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
454 conn->info_ident = l2cap_get_ident(conn);
456 mod_timer(&conn->info_timer, jiffies +
457 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
459 l2cap_send_cmd(conn, conn->info_ident,
460 L2CAP_INFO_REQ, sizeof(req), &req);
464 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
466 u32 local_feat_mask = l2cap_feat_mask;
467 if (!disable_ertm)
468 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
470 switch (mode) {
471 case L2CAP_MODE_ERTM:
472 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
473 case L2CAP_MODE_STREAMING:
474 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
475 default:
476 return 0x00;
480 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
482 struct l2cap_disconn_req req;
484 if (!conn)
485 return;
487 skb_queue_purge(TX_QUEUE(sk));
489 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
490 del_timer(&l2cap_pi(sk)->retrans_timer);
491 del_timer(&l2cap_pi(sk)->monitor_timer);
492 del_timer(&l2cap_pi(sk)->ack_timer);
495 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
496 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
497 l2cap_send_cmd(conn, l2cap_get_ident(conn),
498 L2CAP_DISCONN_REQ, sizeof(req), &req);
500 sk->sk_state = BT_DISCONN;
501 sk->sk_err = err;
504 /* ---- L2CAP connections ---- */
505 static void l2cap_conn_start(struct l2cap_conn *conn)
507 struct l2cap_chan_list *l = &conn->chan_list;
508 struct sock_del_list del, *tmp1, *tmp2;
509 struct sock *sk;
511 BT_DBG("conn %p", conn);
513 INIT_LIST_HEAD(&del.list);
515 read_lock(&l->lock);
517 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
518 bh_lock_sock(sk);
520 if (sk->sk_type != SOCK_SEQPACKET &&
521 sk->sk_type != SOCK_STREAM) {
522 bh_unlock_sock(sk);
523 continue;
526 if (sk->sk_state == BT_CONNECT) {
527 struct l2cap_conn_req req;
529 if (!l2cap_check_security(sk) ||
530 !__l2cap_no_conn_pending(sk)) {
531 bh_unlock_sock(sk);
532 continue;
535 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
536 conn->feat_mask)
537 && l2cap_pi(sk)->conf_state &
538 L2CAP_CONF_STATE2_DEVICE) {
539 tmp1 = kzalloc(sizeof(struct sock_del_list),
540 GFP_ATOMIC);
541 tmp1->sk = sk;
542 list_add_tail(&tmp1->list, &del.list);
543 bh_unlock_sock(sk);
544 continue;
547 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
548 req.psm = l2cap_pi(sk)->psm;
550 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
551 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
553 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
554 L2CAP_CONN_REQ, sizeof(req), &req);
556 } else if (sk->sk_state == BT_CONNECT2) {
557 struct l2cap_conn_rsp rsp;
558 char buf[128];
559 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
560 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
562 if (l2cap_check_security(sk)) {
563 if (bt_sk(sk)->defer_setup) {
564 struct sock *parent = bt_sk(sk)->parent;
565 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
566 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
567 parent->sk_data_ready(parent, 0);
569 } else {
570 sk->sk_state = BT_CONFIG;
571 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
572 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
574 } else {
575 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
576 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
579 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
580 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
582 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
583 rsp.result != L2CAP_CR_SUCCESS) {
584 bh_unlock_sock(sk);
585 continue;
588 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
589 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
590 l2cap_build_conf_req(sk, buf), buf);
591 l2cap_pi(sk)->num_conf_req++;
594 bh_unlock_sock(sk);
597 read_unlock(&l->lock);
599 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
600 bh_lock_sock(tmp1->sk);
601 __l2cap_sock_close(tmp1->sk, ECONNRESET);
602 bh_unlock_sock(tmp1->sk);
603 list_del(&tmp1->list);
604 kfree(tmp1);
608 static void l2cap_conn_ready(struct l2cap_conn *conn)
610 struct l2cap_chan_list *l = &conn->chan_list;
611 struct sock *sk;
613 BT_DBG("conn %p", conn);
615 read_lock(&l->lock);
617 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
618 bh_lock_sock(sk);
620 if (sk->sk_type != SOCK_SEQPACKET &&
621 sk->sk_type != SOCK_STREAM) {
622 l2cap_sock_clear_timer(sk);
623 sk->sk_state = BT_CONNECTED;
624 sk->sk_state_change(sk);
625 } else if (sk->sk_state == BT_CONNECT)
626 l2cap_do_start(sk);
628 bh_unlock_sock(sk);
631 read_unlock(&l->lock);
634 /* Notify sockets that we cannot guaranty reliability anymore */
635 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
637 struct l2cap_chan_list *l = &conn->chan_list;
638 struct sock *sk;
640 BT_DBG("conn %p", conn);
642 read_lock(&l->lock);
644 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
645 if (l2cap_pi(sk)->force_reliable)
646 sk->sk_err = err;
649 read_unlock(&l->lock);
652 static void l2cap_info_timeout(unsigned long arg)
654 struct l2cap_conn *conn = (void *) arg;
656 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
657 conn->info_ident = 0;
659 l2cap_conn_start(conn);
662 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
664 struct l2cap_conn *conn = hcon->l2cap_data;
666 if (conn || status)
667 return conn;
669 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
670 if (!conn)
671 return NULL;
673 hcon->l2cap_data = conn;
674 conn->hcon = hcon;
676 BT_DBG("hcon %p conn %p", hcon, conn);
678 conn->mtu = hcon->hdev->acl_mtu;
679 conn->src = &hcon->hdev->bdaddr;
680 conn->dst = &hcon->dst;
682 conn->feat_mask = 0;
684 spin_lock_init(&conn->lock);
685 rwlock_init(&conn->chan_list.lock);
687 setup_timer(&conn->info_timer, l2cap_info_timeout,
688 (unsigned long) conn);
690 conn->disc_reason = 0x13;
692 return conn;
695 static void l2cap_conn_del(struct hci_conn *hcon, int err)
697 struct l2cap_conn *conn = hcon->l2cap_data;
698 struct sock *sk;
700 if (!conn)
701 return;
703 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
705 kfree_skb(conn->rx_skb);
707 /* Kill channels */
708 while ((sk = conn->chan_list.head)) {
709 bh_lock_sock(sk);
710 l2cap_chan_del(sk, err);
711 bh_unlock_sock(sk);
712 l2cap_sock_kill(sk);
715 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
716 del_timer_sync(&conn->info_timer);
718 hcon->l2cap_data = NULL;
719 kfree(conn);
722 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
724 struct l2cap_chan_list *l = &conn->chan_list;
725 write_lock_bh(&l->lock);
726 __l2cap_chan_add(conn, sk, parent);
727 write_unlock_bh(&l->lock);
730 /* ---- Socket interface ---- */
731 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
733 struct sock *sk;
734 struct hlist_node *node;
735 sk_for_each(sk, node, &l2cap_sk_list.head)
736 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
737 goto found;
738 sk = NULL;
739 found:
740 return sk;
743 /* Find socket with psm and source bdaddr.
744 * Returns closest match.
746 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
748 struct sock *sk = NULL, *sk1 = NULL;
749 struct hlist_node *node;
751 sk_for_each(sk, node, &l2cap_sk_list.head) {
752 if (state && sk->sk_state != state)
753 continue;
755 if (l2cap_pi(sk)->psm == psm) {
756 /* Exact match. */
757 if (!bacmp(&bt_sk(sk)->src, src))
758 break;
760 /* Closest match */
761 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
762 sk1 = sk;
765 return node ? sk : sk1;
768 /* Find socket with given address (psm, src).
769 * Returns locked socket */
770 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
772 struct sock *s;
773 read_lock(&l2cap_sk_list.lock);
774 s = __l2cap_get_sock_by_psm(state, psm, src);
775 if (s)
776 bh_lock_sock(s);
777 read_unlock(&l2cap_sk_list.lock);
778 return s;
781 static void l2cap_sock_destruct(struct sock *sk)
783 BT_DBG("sk %p", sk);
785 skb_queue_purge(&sk->sk_receive_queue);
786 skb_queue_purge(&sk->sk_write_queue);
789 static void l2cap_sock_cleanup_listen(struct sock *parent)
791 struct sock *sk;
793 BT_DBG("parent %p", parent);
795 /* Close not yet accepted channels */
796 while ((sk = bt_accept_dequeue(parent, NULL)))
797 l2cap_sock_close(sk);
799 parent->sk_state = BT_CLOSED;
800 sock_set_flag(parent, SOCK_ZAPPED);
803 /* Kill socket (only if zapped and orphan)
804 * Must be called on unlocked socket.
806 static void l2cap_sock_kill(struct sock *sk)
808 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
809 return;
811 BT_DBG("sk %p state %d", sk, sk->sk_state);
813 /* Kill poor orphan */
814 bt_sock_unlink(&l2cap_sk_list, sk);
815 sock_set_flag(sk, SOCK_DEAD);
816 sock_put(sk);
819 static void __l2cap_sock_close(struct sock *sk, int reason)
821 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
823 switch (sk->sk_state) {
824 case BT_LISTEN:
825 l2cap_sock_cleanup_listen(sk);
826 break;
828 case BT_CONNECTED:
829 case BT_CONFIG:
830 if (sk->sk_type == SOCK_SEQPACKET ||
831 sk->sk_type == SOCK_STREAM) {
832 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
834 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
835 l2cap_send_disconn_req(conn, sk, reason);
836 } else
837 l2cap_chan_del(sk, reason);
838 break;
840 case BT_CONNECT2:
841 if (sk->sk_type == SOCK_SEQPACKET ||
842 sk->sk_type == SOCK_STREAM) {
843 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
844 struct l2cap_conn_rsp rsp;
845 __u16 result;
847 if (bt_sk(sk)->defer_setup)
848 result = L2CAP_CR_SEC_BLOCK;
849 else
850 result = L2CAP_CR_BAD_PSM;
852 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
853 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
854 rsp.result = cpu_to_le16(result);
855 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
856 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
857 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
858 } else
859 l2cap_chan_del(sk, reason);
860 break;
862 case BT_CONNECT:
863 case BT_DISCONN:
864 l2cap_chan_del(sk, reason);
865 break;
867 default:
868 sock_set_flag(sk, SOCK_ZAPPED);
869 break;
873 /* Must be called on unlocked socket. */
874 static void l2cap_sock_close(struct sock *sk)
876 l2cap_sock_clear_timer(sk);
877 lock_sock(sk);
878 __l2cap_sock_close(sk, ECONNRESET);
879 release_sock(sk);
880 l2cap_sock_kill(sk);
883 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
885 struct l2cap_pinfo *pi = l2cap_pi(sk);
887 BT_DBG("sk %p", sk);
889 if (parent) {
890 sk->sk_type = parent->sk_type;
891 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
893 pi->imtu = l2cap_pi(parent)->imtu;
894 pi->omtu = l2cap_pi(parent)->omtu;
895 pi->conf_state = l2cap_pi(parent)->conf_state;
896 pi->mode = l2cap_pi(parent)->mode;
897 pi->fcs = l2cap_pi(parent)->fcs;
898 pi->max_tx = l2cap_pi(parent)->max_tx;
899 pi->tx_win = l2cap_pi(parent)->tx_win;
900 pi->sec_level = l2cap_pi(parent)->sec_level;
901 pi->role_switch = l2cap_pi(parent)->role_switch;
902 pi->force_reliable = l2cap_pi(parent)->force_reliable;
903 } else {
904 pi->imtu = L2CAP_DEFAULT_MTU;
905 pi->omtu = 0;
906 if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
907 pi->mode = L2CAP_MODE_ERTM;
908 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
909 } else {
910 pi->mode = L2CAP_MODE_BASIC;
912 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
913 pi->fcs = L2CAP_FCS_CRC16;
914 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
915 pi->sec_level = BT_SECURITY_LOW;
916 pi->role_switch = 0;
917 pi->force_reliable = 0;
920 /* Default config options */
921 pi->conf_len = 0;
922 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
923 skb_queue_head_init(TX_QUEUE(sk));
924 skb_queue_head_init(SREJ_QUEUE(sk));
925 skb_queue_head_init(BUSY_QUEUE(sk));
926 INIT_LIST_HEAD(SREJ_LIST(sk));
929 static struct proto l2cap_proto = {
930 .name = "L2CAP",
931 .owner = THIS_MODULE,
932 .obj_size = sizeof(struct l2cap_pinfo)
935 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
937 struct sock *sk;
939 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
940 if (!sk)
941 return NULL;
943 sock_init_data(sock, sk);
944 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
946 sk->sk_destruct = l2cap_sock_destruct;
947 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
949 sock_reset_flag(sk, SOCK_ZAPPED);
951 sk->sk_protocol = proto;
952 sk->sk_state = BT_OPEN;
954 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
956 bt_sock_link(&l2cap_sk_list, sk);
957 return sk;
960 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
961 int kern)
963 struct sock *sk;
965 BT_DBG("sock %p", sock);
967 sock->state = SS_UNCONNECTED;
969 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
970 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
971 return -ESOCKTNOSUPPORT;
973 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
974 return -EPERM;
976 sock->ops = &l2cap_sock_ops;
978 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
979 if (!sk)
980 return -ENOMEM;
982 l2cap_sock_init(sk, NULL);
983 return 0;
986 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
988 struct sock *sk = sock->sk;
989 struct sockaddr_l2 la;
990 int len, err = 0;
992 BT_DBG("sk %p", sk);
994 if (!addr || addr->sa_family != AF_BLUETOOTH)
995 return -EINVAL;
997 memset(&la, 0, sizeof(la));
998 len = min_t(unsigned int, sizeof(la), alen);
999 memcpy(&la, addr, len);
1001 if (la.l2_cid)
1002 return -EINVAL;
1004 lock_sock(sk);
1006 if (sk->sk_state != BT_OPEN) {
1007 err = -EBADFD;
1008 goto done;
1011 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
1012 !capable(CAP_NET_BIND_SERVICE)) {
1013 err = -EACCES;
1014 goto done;
1017 write_lock_bh(&l2cap_sk_list.lock);
1019 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
1020 err = -EADDRINUSE;
1021 } else {
1022 /* Save source address */
1023 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
1024 l2cap_pi(sk)->psm = la.l2_psm;
1025 l2cap_pi(sk)->sport = la.l2_psm;
1026 sk->sk_state = BT_BOUND;
1028 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
1029 __le16_to_cpu(la.l2_psm) == 0x0003)
1030 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1033 write_unlock_bh(&l2cap_sk_list.lock);
1035 done:
1036 release_sock(sk);
1037 return err;
1040 static int l2cap_do_connect(struct sock *sk)
1042 bdaddr_t *src = &bt_sk(sk)->src;
1043 bdaddr_t *dst = &bt_sk(sk)->dst;
1044 struct l2cap_conn *conn;
1045 struct hci_conn *hcon;
1046 struct hci_dev *hdev;
1047 __u8 auth_type;
1048 int err;
1050 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1051 l2cap_pi(sk)->psm);
1053 hdev = hci_get_route(dst, src);
1054 if (!hdev)
1055 return -EHOSTUNREACH;
1057 hci_dev_lock_bh(hdev);
1059 err = -ENOMEM;
1061 if (sk->sk_type == SOCK_RAW) {
1062 switch (l2cap_pi(sk)->sec_level) {
1063 case BT_SECURITY_HIGH:
1064 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1065 break;
1066 case BT_SECURITY_MEDIUM:
1067 auth_type = HCI_AT_DEDICATED_BONDING;
1068 break;
1069 default:
1070 auth_type = HCI_AT_NO_BONDING;
1071 break;
1073 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
1074 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
1075 auth_type = HCI_AT_NO_BONDING_MITM;
1076 else
1077 auth_type = HCI_AT_NO_BONDING;
1079 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
1080 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1081 } else {
1082 switch (l2cap_pi(sk)->sec_level) {
1083 case BT_SECURITY_HIGH:
1084 auth_type = HCI_AT_GENERAL_BONDING_MITM;
1085 break;
1086 case BT_SECURITY_MEDIUM:
1087 auth_type = HCI_AT_GENERAL_BONDING;
1088 break;
1089 default:
1090 auth_type = HCI_AT_NO_BONDING;
1091 break;
1095 hcon = hci_connect(hdev, ACL_LINK, dst,
1096 l2cap_pi(sk)->sec_level, auth_type);
1097 if (!hcon)
1098 goto done;
1100 conn = l2cap_conn_add(hcon, 0);
1101 if (!conn) {
1102 hci_conn_put(hcon);
1103 goto done;
1106 err = 0;
1108 /* Update source addr of the socket */
1109 bacpy(src, conn->src);
1111 l2cap_chan_add(conn, sk, NULL);
1113 sk->sk_state = BT_CONNECT;
1114 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1116 if (hcon->state == BT_CONNECTED) {
1117 if (sk->sk_type != SOCK_SEQPACKET &&
1118 sk->sk_type != SOCK_STREAM) {
1119 l2cap_sock_clear_timer(sk);
1120 sk->sk_state = BT_CONNECTED;
1121 } else
1122 l2cap_do_start(sk);
1125 done:
1126 hci_dev_unlock_bh(hdev);
1127 hci_dev_put(hdev);
1128 return err;
1131 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1133 struct sock *sk = sock->sk;
1134 struct sockaddr_l2 la;
1135 int len, err = 0;
1137 BT_DBG("sk %p", sk);
1139 if (!addr || alen < sizeof(addr->sa_family) ||
1140 addr->sa_family != AF_BLUETOOTH)
1141 return -EINVAL;
1143 memset(&la, 0, sizeof(la));
1144 len = min_t(unsigned int, sizeof(la), alen);
1145 memcpy(&la, addr, len);
1147 if (la.l2_cid)
1148 return -EINVAL;
1150 lock_sock(sk);
1152 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1153 && !la.l2_psm) {
1154 err = -EINVAL;
1155 goto done;
1158 switch (l2cap_pi(sk)->mode) {
1159 case L2CAP_MODE_BASIC:
1160 break;
1161 case L2CAP_MODE_ERTM:
1162 case L2CAP_MODE_STREAMING:
1163 if (!disable_ertm)
1164 break;
1165 /* fall through */
1166 default:
1167 err = -ENOTSUPP;
1168 goto done;
1171 switch (sk->sk_state) {
1172 case BT_CONNECT:
1173 case BT_CONNECT2:
1174 case BT_CONFIG:
1175 /* Already connecting */
1176 goto wait;
1178 case BT_CONNECTED:
1179 /* Already connected */
1180 err = -EISCONN;
1181 goto done;
1183 case BT_OPEN:
1184 case BT_BOUND:
1185 /* Can connect */
1186 break;
1188 default:
1189 err = -EBADFD;
1190 goto done;
1193 /* Set destination address and psm */
1194 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1195 l2cap_pi(sk)->psm = la.l2_psm;
1197 err = l2cap_do_connect(sk);
1198 if (err)
1199 goto done;
1201 wait:
1202 err = bt_sock_wait_state(sk, BT_CONNECTED,
1203 sock_sndtimeo(sk, flags & O_NONBLOCK));
1204 done:
1205 release_sock(sk);
1206 return err;
1209 static int l2cap_sock_listen(struct socket *sock, int backlog)
1211 struct sock *sk = sock->sk;
1212 int err = 0;
1214 BT_DBG("sk %p backlog %d", sk, backlog);
1216 lock_sock(sk);
1218 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1219 || sk->sk_state != BT_BOUND) {
1220 err = -EBADFD;
1221 goto done;
1224 switch (l2cap_pi(sk)->mode) {
1225 case L2CAP_MODE_BASIC:
1226 break;
1227 case L2CAP_MODE_ERTM:
1228 case L2CAP_MODE_STREAMING:
1229 if (!disable_ertm)
1230 break;
1231 /* fall through */
1232 default:
1233 err = -ENOTSUPP;
1234 goto done;
1237 if (!l2cap_pi(sk)->psm) {
1238 bdaddr_t *src = &bt_sk(sk)->src;
1239 u16 psm;
1241 err = -EINVAL;
1243 write_lock_bh(&l2cap_sk_list.lock);
1245 for (psm = 0x1001; psm < 0x1100; psm += 2)
1246 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1247 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1248 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1249 err = 0;
1250 break;
1253 write_unlock_bh(&l2cap_sk_list.lock);
1255 if (err < 0)
1256 goto done;
1259 sk->sk_max_ack_backlog = backlog;
1260 sk->sk_ack_backlog = 0;
1261 sk->sk_state = BT_LISTEN;
1263 done:
1264 release_sock(sk);
1265 return err;
1268 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1270 DECLARE_WAITQUEUE(wait, current);
1271 struct sock *sk = sock->sk, *nsk;
1272 long timeo;
1273 int err = 0;
1275 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1277 if (sk->sk_state != BT_LISTEN) {
1278 err = -EBADFD;
1279 goto done;
1282 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1284 BT_DBG("sk %p timeo %ld", sk, timeo);
1286 /* Wait for an incoming connection. (wake-one). */
1287 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1288 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1289 set_current_state(TASK_INTERRUPTIBLE);
1290 if (!timeo) {
1291 err = -EAGAIN;
1292 break;
1295 release_sock(sk);
1296 timeo = schedule_timeout(timeo);
1297 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1299 if (sk->sk_state != BT_LISTEN) {
1300 err = -EBADFD;
1301 break;
1304 if (signal_pending(current)) {
1305 err = sock_intr_errno(timeo);
1306 break;
1309 set_current_state(TASK_RUNNING);
1310 remove_wait_queue(sk_sleep(sk), &wait);
1312 if (err)
1313 goto done;
1315 newsock->state = SS_CONNECTED;
1317 BT_DBG("new socket %p", nsk);
1319 done:
1320 release_sock(sk);
1321 return err;
1324 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1326 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1327 struct sock *sk = sock->sk;
1329 BT_DBG("sock %p, sk %p", sock, sk);
1331 addr->sa_family = AF_BLUETOOTH;
1332 *len = sizeof(struct sockaddr_l2);
1334 if (peer) {
1335 la->l2_psm = l2cap_pi(sk)->psm;
1336 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1337 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1338 } else {
1339 la->l2_psm = l2cap_pi(sk)->sport;
1340 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1341 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1344 return 0;
1347 static int __l2cap_wait_ack(struct sock *sk)
1349 DECLARE_WAITQUEUE(wait, current);
1350 int err = 0;
1351 int timeo = HZ/5;
1353 add_wait_queue(sk_sleep(sk), &wait);
1354 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1355 set_current_state(TASK_INTERRUPTIBLE);
1357 if (!timeo)
1358 timeo = HZ/5;
1360 if (signal_pending(current)) {
1361 err = sock_intr_errno(timeo);
1362 break;
1365 release_sock(sk);
1366 timeo = schedule_timeout(timeo);
1367 lock_sock(sk);
1369 err = sock_error(sk);
1370 if (err)
1371 break;
1373 set_current_state(TASK_RUNNING);
1374 remove_wait_queue(sk_sleep(sk), &wait);
1375 return err;
1378 static void l2cap_monitor_timeout(unsigned long arg)
1380 struct sock *sk = (void *) arg;
1382 BT_DBG("sk %p", sk);
1384 bh_lock_sock(sk);
1385 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1386 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
1387 bh_unlock_sock(sk);
1388 return;
1391 l2cap_pi(sk)->retry_count++;
1392 __mod_monitor_timer();
1394 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1395 bh_unlock_sock(sk);
1398 static void l2cap_retrans_timeout(unsigned long arg)
1400 struct sock *sk = (void *) arg;
1402 BT_DBG("sk %p", sk);
1404 bh_lock_sock(sk);
1405 l2cap_pi(sk)->retry_count = 1;
1406 __mod_monitor_timer();
1408 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1410 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1411 bh_unlock_sock(sk);
1414 static void l2cap_drop_acked_frames(struct sock *sk)
1416 struct sk_buff *skb;
1418 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1419 l2cap_pi(sk)->unacked_frames) {
1420 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1421 break;
1423 skb = skb_dequeue(TX_QUEUE(sk));
1424 kfree_skb(skb);
1426 l2cap_pi(sk)->unacked_frames--;
1429 if (!l2cap_pi(sk)->unacked_frames)
1430 del_timer(&l2cap_pi(sk)->retrans_timer);
1433 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1435 struct l2cap_pinfo *pi = l2cap_pi(sk);
1437 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1439 hci_send_acl(pi->conn->hcon, skb, 0);
1442 static void l2cap_streaming_send(struct sock *sk)
1444 struct sk_buff *skb;
1445 struct l2cap_pinfo *pi = l2cap_pi(sk);
1446 u16 control, fcs;
1448 while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
1449 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1450 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1451 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1453 if (pi->fcs == L2CAP_FCS_CRC16) {
1454 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1455 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1458 l2cap_do_send(sk, skb);
1460 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1464 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1466 struct l2cap_pinfo *pi = l2cap_pi(sk);
1467 struct sk_buff *skb, *tx_skb;
1468 u16 control, fcs;
1470 skb = skb_peek(TX_QUEUE(sk));
1471 if (!skb)
1472 return;
1474 do {
1475 if (bt_cb(skb)->tx_seq == tx_seq)
1476 break;
1478 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1479 return;
1481 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1483 if (pi->remote_max_tx &&
1484 bt_cb(skb)->retries == pi->remote_max_tx) {
1485 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1486 return;
1489 tx_skb = skb_clone(skb, GFP_ATOMIC);
1490 bt_cb(skb)->retries++;
1491 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1493 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1494 control |= L2CAP_CTRL_FINAL;
1495 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1498 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1499 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1501 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1503 if (pi->fcs == L2CAP_FCS_CRC16) {
1504 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1505 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1508 l2cap_do_send(sk, tx_skb);
1511 static int l2cap_ertm_send(struct sock *sk)
1513 struct sk_buff *skb, *tx_skb;
1514 struct l2cap_pinfo *pi = l2cap_pi(sk);
1515 u16 control, fcs;
1516 int nsent = 0;
1518 if (sk->sk_state != BT_CONNECTED)
1519 return -ENOTCONN;
1521 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1523 if (pi->remote_max_tx &&
1524 bt_cb(skb)->retries == pi->remote_max_tx) {
1525 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1526 break;
1529 tx_skb = skb_clone(skb, GFP_ATOMIC);
1531 bt_cb(skb)->retries++;
1533 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1534 control &= L2CAP_CTRL_SAR;
1536 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1537 control |= L2CAP_CTRL_FINAL;
1538 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1540 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1541 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1542 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1545 if (pi->fcs == L2CAP_FCS_CRC16) {
1546 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1547 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1550 l2cap_do_send(sk, tx_skb);
1552 __mod_retrans_timer();
1554 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1555 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1557 pi->unacked_frames++;
1558 pi->frames_sent++;
1560 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1561 sk->sk_send_head = NULL;
1562 else
1563 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1565 nsent++;
1568 return nsent;
1571 static int l2cap_retransmit_frames(struct sock *sk)
1573 struct l2cap_pinfo *pi = l2cap_pi(sk);
1574 int ret;
1576 if (!skb_queue_empty(TX_QUEUE(sk)))
1577 sk->sk_send_head = TX_QUEUE(sk)->next;
1579 pi->next_tx_seq = pi->expected_ack_seq;
1580 ret = l2cap_ertm_send(sk);
1581 return ret;
1584 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1586 struct sock *sk = (struct sock *)pi;
1587 u16 control = 0;
1589 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1591 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1592 control |= L2CAP_SUPER_RCV_NOT_READY;
1593 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1594 l2cap_send_sframe(pi, control);
1595 return;
1598 if (l2cap_ertm_send(sk) > 0)
1599 return;
1601 control |= L2CAP_SUPER_RCV_READY;
1602 l2cap_send_sframe(pi, control);
1605 static void l2cap_send_srejtail(struct sock *sk)
1607 struct srej_list *tail;
1608 u16 control;
1610 control = L2CAP_SUPER_SELECT_REJECT;
1611 control |= L2CAP_CTRL_FINAL;
1613 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1614 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1616 l2cap_send_sframe(l2cap_pi(sk), control);
1619 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1621 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1622 struct sk_buff **frag;
1623 int err, sent = 0;
1625 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1626 return -EFAULT;
1628 sent += count;
1629 len -= count;
1631 /* Continuation fragments (no L2CAP header) */
1632 frag = &skb_shinfo(skb)->frag_list;
1633 while (len) {
1634 count = min_t(unsigned int, conn->mtu, len);
1636 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1637 if (!*frag)
1638 return -EFAULT;
1639 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1640 return -EFAULT;
1642 sent += count;
1643 len -= count;
1645 frag = &(*frag)->next;
1648 return sent;
1651 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1653 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1654 struct sk_buff *skb;
1655 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1656 struct l2cap_hdr *lh;
1658 BT_DBG("sk %p len %d", sk, (int)len);
1660 count = min_t(unsigned int, (conn->mtu - hlen), len);
1661 skb = bt_skb_send_alloc(sk, count + hlen,
1662 msg->msg_flags & MSG_DONTWAIT, &err);
1663 if (!skb)
1664 return ERR_PTR(-ENOMEM);
1666 /* Create L2CAP header */
1667 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1668 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1669 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1670 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1672 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1673 if (unlikely(err < 0)) {
1674 kfree_skb(skb);
1675 return ERR_PTR(err);
1677 return skb;
1680 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1682 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1683 struct sk_buff *skb;
1684 int err, count, hlen = L2CAP_HDR_SIZE;
1685 struct l2cap_hdr *lh;
1687 BT_DBG("sk %p len %d", sk, (int)len);
1689 count = min_t(unsigned int, (conn->mtu - hlen), len);
1690 skb = bt_skb_send_alloc(sk, count + hlen,
1691 msg->msg_flags & MSG_DONTWAIT, &err);
1692 if (!skb)
1693 return ERR_PTR(-ENOMEM);
1695 /* Create L2CAP header */
1696 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1697 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1698 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1700 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1701 if (unlikely(err < 0)) {
1702 kfree_skb(skb);
1703 return ERR_PTR(err);
1705 return skb;
1708 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1710 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1711 struct sk_buff *skb;
1712 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1713 struct l2cap_hdr *lh;
1715 BT_DBG("sk %p len %d", sk, (int)len);
1717 if (!conn)
1718 return ERR_PTR(-ENOTCONN);
1720 if (sdulen)
1721 hlen += 2;
1723 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1724 hlen += 2;
1726 count = min_t(unsigned int, (conn->mtu - hlen), len);
1727 skb = bt_skb_send_alloc(sk, count + hlen,
1728 msg->msg_flags & MSG_DONTWAIT, &err);
1729 if (!skb)
1730 return ERR_PTR(-ENOMEM);
1732 /* Create L2CAP header */
1733 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1734 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1735 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1736 put_unaligned_le16(control, skb_put(skb, 2));
1737 if (sdulen)
1738 put_unaligned_le16(sdulen, skb_put(skb, 2));
1740 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1741 if (unlikely(err < 0)) {
1742 kfree_skb(skb);
1743 return ERR_PTR(err);
1746 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1747 put_unaligned_le16(0, skb_put(skb, 2));
1749 bt_cb(skb)->retries = 0;
1750 return skb;
1753 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1755 struct l2cap_pinfo *pi = l2cap_pi(sk);
1756 struct sk_buff *skb;
1757 struct sk_buff_head sar_queue;
1758 u16 control;
1759 size_t size = 0;
1761 skb_queue_head_init(&sar_queue);
1762 control = L2CAP_SDU_START;
1763 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1764 if (IS_ERR(skb))
1765 return PTR_ERR(skb);
1767 __skb_queue_tail(&sar_queue, skb);
1768 len -= pi->remote_mps;
1769 size += pi->remote_mps;
1771 while (len > 0) {
1772 size_t buflen;
1774 if (len > pi->remote_mps) {
1775 control = L2CAP_SDU_CONTINUE;
1776 buflen = pi->remote_mps;
1777 } else {
1778 control = L2CAP_SDU_END;
1779 buflen = len;
1782 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1783 if (IS_ERR(skb)) {
1784 skb_queue_purge(&sar_queue);
1785 return PTR_ERR(skb);
1788 __skb_queue_tail(&sar_queue, skb);
1789 len -= buflen;
1790 size += buflen;
1792 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1793 if (sk->sk_send_head == NULL)
1794 sk->sk_send_head = sar_queue.next;
1796 return size;
1799 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1801 struct sock *sk = sock->sk;
1802 struct l2cap_pinfo *pi = l2cap_pi(sk);
1803 struct sk_buff *skb;
1804 u16 control;
1805 int err;
1807 BT_DBG("sock %p, sk %p", sock, sk);
1809 err = sock_error(sk);
1810 if (err)
1811 return err;
1813 if (msg->msg_flags & MSG_OOB)
1814 return -EOPNOTSUPP;
1816 lock_sock(sk);
1818 if (sk->sk_state != BT_CONNECTED) {
1819 err = -ENOTCONN;
1820 goto done;
1823 /* Connectionless channel */
1824 if (sk->sk_type == SOCK_DGRAM) {
1825 skb = l2cap_create_connless_pdu(sk, msg, len);
1826 if (IS_ERR(skb)) {
1827 err = PTR_ERR(skb);
1828 } else {
1829 l2cap_do_send(sk, skb);
1830 err = len;
1832 goto done;
1835 switch (pi->mode) {
1836 case L2CAP_MODE_BASIC:
1837 /* Check outgoing MTU */
1838 if (len > pi->omtu) {
1839 err = -EMSGSIZE;
1840 goto done;
1843 /* Create a basic PDU */
1844 skb = l2cap_create_basic_pdu(sk, msg, len);
1845 if (IS_ERR(skb)) {
1846 err = PTR_ERR(skb);
1847 goto done;
1850 l2cap_do_send(sk, skb);
1851 err = len;
1852 break;
1854 case L2CAP_MODE_ERTM:
1855 case L2CAP_MODE_STREAMING:
1856 /* Entire SDU fits into one PDU */
1857 if (len <= pi->remote_mps) {
1858 control = L2CAP_SDU_UNSEGMENTED;
1859 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1860 if (IS_ERR(skb)) {
1861 err = PTR_ERR(skb);
1862 goto done;
1864 __skb_queue_tail(TX_QUEUE(sk), skb);
1866 if (sk->sk_send_head == NULL)
1867 sk->sk_send_head = skb;
1869 } else {
1870 /* Segment SDU into multiples PDUs */
1871 err = l2cap_sar_segment_sdu(sk, msg, len);
1872 if (err < 0)
1873 goto done;
1876 if (pi->mode == L2CAP_MODE_STREAMING) {
1877 l2cap_streaming_send(sk);
1878 } else {
1879 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY &&
1880 pi->conn_state && L2CAP_CONN_WAIT_F) {
1881 err = len;
1882 break;
1884 err = l2cap_ertm_send(sk);
1887 if (err >= 0)
1888 err = len;
1889 break;
1891 default:
1892 BT_DBG("bad state %1.1x", pi->mode);
1893 err = -EBADFD;
1896 done:
1897 release_sock(sk);
1898 return err;
1901 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1903 struct sock *sk = sock->sk;
1905 lock_sock(sk);
1907 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1908 struct l2cap_conn_rsp rsp;
1909 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1910 u8 buf[128];
1912 sk->sk_state = BT_CONFIG;
1914 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1915 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1916 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1917 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1918 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1919 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1921 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
1922 release_sock(sk);
1923 return 0;
1926 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1927 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1928 l2cap_build_conf_req(sk, buf), buf);
1929 l2cap_pi(sk)->num_conf_req++;
1931 release_sock(sk);
1932 return 0;
1935 release_sock(sk);
1937 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1940 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1942 struct sock *sk = sock->sk;
1943 struct l2cap_options opts;
1944 int len, err = 0;
1945 u32 opt;
1947 BT_DBG("sk %p", sk);
1949 lock_sock(sk);
1951 switch (optname) {
1952 case L2CAP_OPTIONS:
1953 if (sk->sk_state == BT_CONNECTED) {
1954 err = -EINVAL;
1955 break;
1958 opts.imtu = l2cap_pi(sk)->imtu;
1959 opts.omtu = l2cap_pi(sk)->omtu;
1960 opts.flush_to = l2cap_pi(sk)->flush_to;
1961 opts.mode = l2cap_pi(sk)->mode;
1962 opts.fcs = l2cap_pi(sk)->fcs;
1963 opts.max_tx = l2cap_pi(sk)->max_tx;
1964 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1966 len = min_t(unsigned int, sizeof(opts), optlen);
1967 if (copy_from_user((char *) &opts, optval, len)) {
1968 err = -EFAULT;
1969 break;
1972 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
1973 err = -EINVAL;
1974 break;
1977 l2cap_pi(sk)->mode = opts.mode;
1978 switch (l2cap_pi(sk)->mode) {
1979 case L2CAP_MODE_BASIC:
1980 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
1981 break;
1982 case L2CAP_MODE_ERTM:
1983 case L2CAP_MODE_STREAMING:
1984 if (!disable_ertm)
1985 break;
1986 /* fall through */
1987 default:
1988 err = -EINVAL;
1989 break;
1992 l2cap_pi(sk)->imtu = opts.imtu;
1993 l2cap_pi(sk)->omtu = opts.omtu;
1994 l2cap_pi(sk)->fcs = opts.fcs;
1995 l2cap_pi(sk)->max_tx = opts.max_tx;
1996 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1997 break;
1999 case L2CAP_LM:
2000 if (get_user(opt, (u32 __user *) optval)) {
2001 err = -EFAULT;
2002 break;
2005 if (opt & L2CAP_LM_AUTH)
2006 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
2007 if (opt & L2CAP_LM_ENCRYPT)
2008 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
2009 if (opt & L2CAP_LM_SECURE)
2010 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
2012 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
2013 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
2014 break;
2016 default:
2017 err = -ENOPROTOOPT;
2018 break;
2021 release_sock(sk);
2022 return err;
2025 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
2027 struct sock *sk = sock->sk;
2028 struct bt_security sec;
2029 int len, err = 0;
2030 u32 opt;
2032 BT_DBG("sk %p", sk);
2034 if (level == SOL_L2CAP)
2035 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
2037 if (level != SOL_BLUETOOTH)
2038 return -ENOPROTOOPT;
2040 lock_sock(sk);
2042 switch (optname) {
2043 case BT_SECURITY:
2044 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2045 && sk->sk_type != SOCK_RAW) {
2046 err = -EINVAL;
2047 break;
2050 sec.level = BT_SECURITY_LOW;
2052 len = min_t(unsigned int, sizeof(sec), optlen);
2053 if (copy_from_user((char *) &sec, optval, len)) {
2054 err = -EFAULT;
2055 break;
2058 if (sec.level < BT_SECURITY_LOW ||
2059 sec.level > BT_SECURITY_HIGH) {
2060 err = -EINVAL;
2061 break;
2064 l2cap_pi(sk)->sec_level = sec.level;
2065 break;
2067 case BT_DEFER_SETUP:
2068 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2069 err = -EINVAL;
2070 break;
2073 if (get_user(opt, (u32 __user *) optval)) {
2074 err = -EFAULT;
2075 break;
2078 bt_sk(sk)->defer_setup = opt;
2079 break;
2081 default:
2082 err = -ENOPROTOOPT;
2083 break;
2086 release_sock(sk);
2087 return err;
2090 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
2092 struct sock *sk = sock->sk;
2093 struct l2cap_options opts;
2094 struct l2cap_conninfo cinfo;
2095 int len, err = 0;
2096 u32 opt;
2098 BT_DBG("sk %p", sk);
2100 if (get_user(len, optlen))
2101 return -EFAULT;
2103 lock_sock(sk);
2105 switch (optname) {
2106 case L2CAP_OPTIONS:
2107 opts.imtu = l2cap_pi(sk)->imtu;
2108 opts.omtu = l2cap_pi(sk)->omtu;
2109 opts.flush_to = l2cap_pi(sk)->flush_to;
2110 opts.mode = l2cap_pi(sk)->mode;
2111 opts.fcs = l2cap_pi(sk)->fcs;
2112 opts.max_tx = l2cap_pi(sk)->max_tx;
2113 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2115 len = min_t(unsigned int, len, sizeof(opts));
2116 if (copy_to_user(optval, (char *) &opts, len))
2117 err = -EFAULT;
2119 break;
2121 case L2CAP_LM:
2122 switch (l2cap_pi(sk)->sec_level) {
2123 case BT_SECURITY_LOW:
2124 opt = L2CAP_LM_AUTH;
2125 break;
2126 case BT_SECURITY_MEDIUM:
2127 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2128 break;
2129 case BT_SECURITY_HIGH:
2130 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2131 L2CAP_LM_SECURE;
2132 break;
2133 default:
2134 opt = 0;
2135 break;
2138 if (l2cap_pi(sk)->role_switch)
2139 opt |= L2CAP_LM_MASTER;
2141 if (l2cap_pi(sk)->force_reliable)
2142 opt |= L2CAP_LM_RELIABLE;
2144 if (put_user(opt, (u32 __user *) optval))
2145 err = -EFAULT;
2146 break;
2148 case L2CAP_CONNINFO:
2149 if (sk->sk_state != BT_CONNECTED &&
2150 !(sk->sk_state == BT_CONNECT2 &&
2151 bt_sk(sk)->defer_setup)) {
2152 err = -ENOTCONN;
2153 break;
2156 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2157 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2159 len = min_t(unsigned int, len, sizeof(cinfo));
2160 if (copy_to_user(optval, (char *) &cinfo, len))
2161 err = -EFAULT;
2163 break;
2165 default:
2166 err = -ENOPROTOOPT;
2167 break;
2170 release_sock(sk);
2171 return err;
2174 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2176 struct sock *sk = sock->sk;
2177 struct bt_security sec;
2178 int len, err = 0;
2180 BT_DBG("sk %p", sk);
2182 if (level == SOL_L2CAP)
2183 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2185 if (level != SOL_BLUETOOTH)
2186 return -ENOPROTOOPT;
2188 if (get_user(len, optlen))
2189 return -EFAULT;
2191 lock_sock(sk);
2193 switch (optname) {
2194 case BT_SECURITY:
2195 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2196 && sk->sk_type != SOCK_RAW) {
2197 err = -EINVAL;
2198 break;
2201 sec.level = l2cap_pi(sk)->sec_level;
2203 len = min_t(unsigned int, len, sizeof(sec));
2204 if (copy_to_user(optval, (char *) &sec, len))
2205 err = -EFAULT;
2207 break;
2209 case BT_DEFER_SETUP:
2210 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2211 err = -EINVAL;
2212 break;
2215 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2216 err = -EFAULT;
2218 break;
2220 default:
2221 err = -ENOPROTOOPT;
2222 break;
2225 release_sock(sk);
2226 return err;
2229 static int l2cap_sock_shutdown(struct socket *sock, int how)
2231 struct sock *sk = sock->sk;
2232 int err = 0;
2234 BT_DBG("sock %p, sk %p", sock, sk);
2236 if (!sk)
2237 return 0;
2239 lock_sock(sk);
2240 if (!sk->sk_shutdown) {
2241 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2242 err = __l2cap_wait_ack(sk);
2244 sk->sk_shutdown = SHUTDOWN_MASK;
2245 l2cap_sock_clear_timer(sk);
2246 __l2cap_sock_close(sk, 0);
2248 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2249 err = bt_sock_wait_state(sk, BT_CLOSED,
2250 sk->sk_lingertime);
2253 if (!err && sk->sk_err)
2254 err = -sk->sk_err;
2256 release_sock(sk);
2257 return err;
2260 static int l2cap_sock_release(struct socket *sock)
2262 struct sock *sk = sock->sk;
2263 int err;
2265 BT_DBG("sock %p, sk %p", sock, sk);
2267 if (!sk)
2268 return 0;
2270 err = l2cap_sock_shutdown(sock, 2);
2272 sock_orphan(sk);
2273 l2cap_sock_kill(sk);
2274 return err;
2277 static void l2cap_chan_ready(struct sock *sk)
2279 struct sock *parent = bt_sk(sk)->parent;
2281 BT_DBG("sk %p, parent %p", sk, parent);
2283 l2cap_pi(sk)->conf_state = 0;
2284 l2cap_sock_clear_timer(sk);
2286 if (!parent) {
2287 /* Outgoing channel.
2288 * Wake up socket sleeping on connect.
2290 sk->sk_state = BT_CONNECTED;
2291 sk->sk_state_change(sk);
2292 } else {
2293 /* Incoming channel.
2294 * Wake up socket sleeping on accept.
2296 parent->sk_data_ready(parent, 0);
2300 /* Copy frame to all raw sockets on that connection */
2301 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2303 struct l2cap_chan_list *l = &conn->chan_list;
2304 struct sk_buff *nskb;
2305 struct sock *sk;
2307 BT_DBG("conn %p", conn);
2309 read_lock(&l->lock);
2310 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2311 if (sk->sk_type != SOCK_RAW)
2312 continue;
2314 /* Don't send frame to the socket it came from */
2315 if (skb->sk == sk)
2316 continue;
2317 nskb = skb_clone(skb, GFP_ATOMIC);
2318 if (!nskb)
2319 continue;
2321 if (sock_queue_rcv_skb(sk, nskb))
2322 kfree_skb(nskb);
2324 read_unlock(&l->lock);
2327 /* ---- L2CAP signalling commands ---- */
2328 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2329 u8 code, u8 ident, u16 dlen, void *data)
2331 struct sk_buff *skb, **frag;
2332 struct l2cap_cmd_hdr *cmd;
2333 struct l2cap_hdr *lh;
2334 int len, count;
2336 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2337 conn, code, ident, dlen);
2339 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2340 count = min_t(unsigned int, conn->mtu, len);
2342 skb = bt_skb_alloc(count, GFP_ATOMIC);
2343 if (!skb)
2344 return NULL;
2346 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2347 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2348 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2350 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2351 cmd->code = code;
2352 cmd->ident = ident;
2353 cmd->len = cpu_to_le16(dlen);
2355 if (dlen) {
2356 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2357 memcpy(skb_put(skb, count), data, count);
2358 data += count;
2361 len -= skb->len;
2363 /* Continuation fragments (no L2CAP header) */
2364 frag = &skb_shinfo(skb)->frag_list;
2365 while (len) {
2366 count = min_t(unsigned int, conn->mtu, len);
2368 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2369 if (!*frag)
2370 goto fail;
2372 memcpy(skb_put(*frag, count), data, count);
2374 len -= count;
2375 data += count;
2377 frag = &(*frag)->next;
2380 return skb;
2382 fail:
2383 kfree_skb(skb);
2384 return NULL;
2387 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2389 struct l2cap_conf_opt *opt = *ptr;
2390 int len;
2392 len = L2CAP_CONF_OPT_SIZE + opt->len;
2393 *ptr += len;
2395 *type = opt->type;
2396 *olen = opt->len;
2398 switch (opt->len) {
2399 case 1:
2400 *val = *((u8 *) opt->val);
2401 break;
2403 case 2:
2404 *val = __le16_to_cpu(*((__le16 *) opt->val));
2405 break;
2407 case 4:
2408 *val = __le32_to_cpu(*((__le32 *) opt->val));
2409 break;
2411 default:
2412 *val = (unsigned long) opt->val;
2413 break;
2416 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2417 return len;
2420 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2422 struct l2cap_conf_opt *opt = *ptr;
2424 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2426 opt->type = type;
2427 opt->len = len;
2429 switch (len) {
2430 case 1:
2431 *((u8 *) opt->val) = val;
2432 break;
2434 case 2:
2435 *((__le16 *) opt->val) = cpu_to_le16(val);
2436 break;
2438 case 4:
2439 *((__le32 *) opt->val) = cpu_to_le32(val);
2440 break;
2442 default:
2443 memcpy(opt->val, (void *) val, len);
2444 break;
2447 *ptr += L2CAP_CONF_OPT_SIZE + len;
2450 static void l2cap_ack_timeout(unsigned long arg)
2452 struct sock *sk = (void *) arg;
2454 bh_lock_sock(sk);
2455 l2cap_send_ack(l2cap_pi(sk));
2456 bh_unlock_sock(sk);
2459 static inline void l2cap_ertm_init(struct sock *sk)
2461 l2cap_pi(sk)->expected_ack_seq = 0;
2462 l2cap_pi(sk)->unacked_frames = 0;
2463 l2cap_pi(sk)->buffer_seq = 0;
2464 l2cap_pi(sk)->num_acked = 0;
2465 l2cap_pi(sk)->frames_sent = 0;
2467 setup_timer(&l2cap_pi(sk)->retrans_timer,
2468 l2cap_retrans_timeout, (unsigned long) sk);
2469 setup_timer(&l2cap_pi(sk)->monitor_timer,
2470 l2cap_monitor_timeout, (unsigned long) sk);
2471 setup_timer(&l2cap_pi(sk)->ack_timer,
2472 l2cap_ack_timeout, (unsigned long) sk);
2474 __skb_queue_head_init(SREJ_QUEUE(sk));
2475 __skb_queue_head_init(BUSY_QUEUE(sk));
2477 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2479 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
2482 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2484 switch (mode) {
2485 case L2CAP_MODE_STREAMING:
2486 case L2CAP_MODE_ERTM:
2487 if (l2cap_mode_supported(mode, remote_feat_mask))
2488 return mode;
2489 /* fall through */
2490 default:
2491 return L2CAP_MODE_BASIC;
2495 static int l2cap_build_conf_req(struct sock *sk, void *data)
2497 struct l2cap_pinfo *pi = l2cap_pi(sk);
2498 struct l2cap_conf_req *req = data;
2499 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2500 void *ptr = req->data;
2502 BT_DBG("sk %p", sk);
2504 if (pi->num_conf_req || pi->num_conf_rsp)
2505 goto done;
2507 switch (pi->mode) {
2508 case L2CAP_MODE_STREAMING:
2509 case L2CAP_MODE_ERTM:
2510 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
2511 break;
2513 /* fall through */
2514 default:
2515 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2516 break;
2519 done:
2520 switch (pi->mode) {
2521 case L2CAP_MODE_BASIC:
2522 if (pi->imtu != L2CAP_DEFAULT_MTU)
2523 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2525 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2526 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
2527 break;
2529 rfc.mode = L2CAP_MODE_BASIC;
2530 rfc.txwin_size = 0;
2531 rfc.max_transmit = 0;
2532 rfc.retrans_timeout = 0;
2533 rfc.monitor_timeout = 0;
2534 rfc.max_pdu_size = 0;
2536 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2537 (unsigned long) &rfc);
2538 break;
2540 case L2CAP_MODE_ERTM:
2541 rfc.mode = L2CAP_MODE_ERTM;
2542 rfc.txwin_size = pi->tx_win;
2543 rfc.max_transmit = pi->max_tx;
2544 rfc.retrans_timeout = 0;
2545 rfc.monitor_timeout = 0;
2546 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2547 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2548 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2550 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2551 (unsigned long) &rfc);
2553 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2554 break;
2556 if (pi->fcs == L2CAP_FCS_NONE ||
2557 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2558 pi->fcs = L2CAP_FCS_NONE;
2559 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2561 break;
2563 case L2CAP_MODE_STREAMING:
2564 rfc.mode = L2CAP_MODE_STREAMING;
2565 rfc.txwin_size = 0;
2566 rfc.max_transmit = 0;
2567 rfc.retrans_timeout = 0;
2568 rfc.monitor_timeout = 0;
2569 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2570 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2571 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2573 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2574 (unsigned long) &rfc);
2576 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2577 break;
2579 if (pi->fcs == L2CAP_FCS_NONE ||
2580 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2581 pi->fcs = L2CAP_FCS_NONE;
2582 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2584 break;
2587 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2588 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2590 req->dcid = cpu_to_le16(pi->dcid);
2591 req->flags = cpu_to_le16(0);
2593 return ptr - data;
2596 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2598 struct l2cap_pinfo *pi = l2cap_pi(sk);
2599 struct l2cap_conf_rsp *rsp = data;
2600 void *ptr = rsp->data;
2601 void *req = pi->conf_req;
2602 int len = pi->conf_len;
2603 int type, hint, olen;
2604 unsigned long val;
2605 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2606 u16 mtu = L2CAP_DEFAULT_MTU;
2607 u16 result = L2CAP_CONF_SUCCESS;
2609 BT_DBG("sk %p", sk);
2611 while (len >= L2CAP_CONF_OPT_SIZE) {
2612 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2614 hint = type & L2CAP_CONF_HINT;
2615 type &= L2CAP_CONF_MASK;
2617 switch (type) {
2618 case L2CAP_CONF_MTU:
2619 mtu = val;
2620 break;
2622 case L2CAP_CONF_FLUSH_TO:
2623 pi->flush_to = val;
2624 break;
2626 case L2CAP_CONF_QOS:
2627 break;
2629 case L2CAP_CONF_RFC:
2630 if (olen == sizeof(rfc))
2631 memcpy(&rfc, (void *) val, olen);
2632 break;
2634 case L2CAP_CONF_FCS:
2635 if (val == L2CAP_FCS_NONE)
2636 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2638 break;
2640 default:
2641 if (hint)
2642 break;
2644 result = L2CAP_CONF_UNKNOWN;
2645 *((u8 *) ptr++) = type;
2646 break;
2650 if (pi->num_conf_rsp || pi->num_conf_req > 1)
2651 goto done;
2653 switch (pi->mode) {
2654 case L2CAP_MODE_STREAMING:
2655 case L2CAP_MODE_ERTM:
2656 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2657 pi->mode = l2cap_select_mode(rfc.mode,
2658 pi->conn->feat_mask);
2659 break;
2662 if (pi->mode != rfc.mode)
2663 return -ECONNREFUSED;
2665 break;
2668 done:
2669 if (pi->mode != rfc.mode) {
2670 result = L2CAP_CONF_UNACCEPT;
2671 rfc.mode = pi->mode;
2673 if (pi->num_conf_rsp == 1)
2674 return -ECONNREFUSED;
2676 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2677 sizeof(rfc), (unsigned long) &rfc);
2681 if (result == L2CAP_CONF_SUCCESS) {
2682 /* Configure output options and let the other side know
2683 * which ones we don't like. */
2685 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2686 result = L2CAP_CONF_UNACCEPT;
2687 else {
2688 pi->omtu = mtu;
2689 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2691 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2693 switch (rfc.mode) {
2694 case L2CAP_MODE_BASIC:
2695 pi->fcs = L2CAP_FCS_NONE;
2696 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2697 break;
2699 case L2CAP_MODE_ERTM:
2700 pi->remote_tx_win = rfc.txwin_size;
2701 pi->remote_max_tx = rfc.max_transmit;
2703 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
2704 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2706 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2708 rfc.retrans_timeout =
2709 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2710 rfc.monitor_timeout =
2711 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2713 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2715 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2716 sizeof(rfc), (unsigned long) &rfc);
2718 break;
2720 case L2CAP_MODE_STREAMING:
2721 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
2722 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2724 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2726 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2728 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2729 sizeof(rfc), (unsigned long) &rfc);
2731 break;
2733 default:
2734 result = L2CAP_CONF_UNACCEPT;
2736 memset(&rfc, 0, sizeof(rfc));
2737 rfc.mode = pi->mode;
2740 if (result == L2CAP_CONF_SUCCESS)
2741 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2743 rsp->scid = cpu_to_le16(pi->dcid);
2744 rsp->result = cpu_to_le16(result);
2745 rsp->flags = cpu_to_le16(0x0000);
2747 return ptr - data;
2750 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2752 struct l2cap_pinfo *pi = l2cap_pi(sk);
2753 struct l2cap_conf_req *req = data;
2754 void *ptr = req->data;
2755 int type, olen;
2756 unsigned long val;
2757 struct l2cap_conf_rfc rfc;
2759 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2761 while (len >= L2CAP_CONF_OPT_SIZE) {
2762 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2764 switch (type) {
2765 case L2CAP_CONF_MTU:
2766 if (val < L2CAP_DEFAULT_MIN_MTU) {
2767 *result = L2CAP_CONF_UNACCEPT;
2768 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
2769 } else
2770 pi->imtu = val;
2771 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2772 break;
2774 case L2CAP_CONF_FLUSH_TO:
2775 pi->flush_to = val;
2776 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2777 2, pi->flush_to);
2778 break;
2780 case L2CAP_CONF_RFC:
2781 if (olen == sizeof(rfc))
2782 memcpy(&rfc, (void *)val, olen);
2784 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2785 rfc.mode != pi->mode)
2786 return -ECONNREFUSED;
2788 pi->fcs = 0;
2790 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2791 sizeof(rfc), (unsigned long) &rfc);
2792 break;
2796 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
2797 return -ECONNREFUSED;
2799 pi->mode = rfc.mode;
2801 if (*result == L2CAP_CONF_SUCCESS) {
2802 switch (rfc.mode) {
2803 case L2CAP_MODE_ERTM:
2804 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2805 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2806 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2807 break;
2808 case L2CAP_MODE_STREAMING:
2809 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2813 req->dcid = cpu_to_le16(pi->dcid);
2814 req->flags = cpu_to_le16(0x0000);
2816 return ptr - data;
2819 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2821 struct l2cap_conf_rsp *rsp = data;
2822 void *ptr = rsp->data;
2824 BT_DBG("sk %p", sk);
2826 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2827 rsp->result = cpu_to_le16(result);
2828 rsp->flags = cpu_to_le16(flags);
2830 return ptr - data;
2833 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2835 struct l2cap_pinfo *pi = l2cap_pi(sk);
2836 int type, olen;
2837 unsigned long val;
2838 struct l2cap_conf_rfc rfc;
2840 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2842 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2843 return;
2845 while (len >= L2CAP_CONF_OPT_SIZE) {
2846 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2848 switch (type) {
2849 case L2CAP_CONF_RFC:
2850 if (olen == sizeof(rfc))
2851 memcpy(&rfc, (void *)val, olen);
2852 goto done;
2856 done:
2857 switch (rfc.mode) {
2858 case L2CAP_MODE_ERTM:
2859 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2860 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2861 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2862 break;
2863 case L2CAP_MODE_STREAMING:
2864 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2868 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2870 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2872 if (rej->reason != 0x0000)
2873 return 0;
2875 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2876 cmd->ident == conn->info_ident) {
2877 del_timer(&conn->info_timer);
2879 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2880 conn->info_ident = 0;
2882 l2cap_conn_start(conn);
2885 return 0;
2888 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2890 struct l2cap_chan_list *list = &conn->chan_list;
2891 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2892 struct l2cap_conn_rsp rsp;
2893 struct sock *parent, *sk = NULL;
2894 int result, status = L2CAP_CS_NO_INFO;
2896 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2897 __le16 psm = req->psm;
2899 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2901 /* Check if we have socket listening on psm */
2902 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2903 if (!parent) {
2904 result = L2CAP_CR_BAD_PSM;
2905 goto sendresp;
2908 /* Check if the ACL is secure enough (if not SDP) */
2909 if (psm != cpu_to_le16(0x0001) &&
2910 !hci_conn_check_link_mode(conn->hcon)) {
2911 conn->disc_reason = 0x05;
2912 result = L2CAP_CR_SEC_BLOCK;
2913 goto response;
2916 result = L2CAP_CR_NO_MEM;
2918 /* Check for backlog size */
2919 if (sk_acceptq_is_full(parent)) {
2920 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2921 goto response;
2924 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2925 if (!sk)
2926 goto response;
2928 write_lock_bh(&list->lock);
2930 /* Check if we already have channel with that dcid */
2931 if (__l2cap_get_chan_by_dcid(list, scid)) {
2932 write_unlock_bh(&list->lock);
2933 sock_set_flag(sk, SOCK_ZAPPED);
2934 l2cap_sock_kill(sk);
2935 goto response;
2938 hci_conn_hold(conn->hcon);
2940 l2cap_sock_init(sk, parent);
2941 bacpy(&bt_sk(sk)->src, conn->src);
2942 bacpy(&bt_sk(sk)->dst, conn->dst);
2943 l2cap_pi(sk)->psm = psm;
2944 l2cap_pi(sk)->dcid = scid;
2946 __l2cap_chan_add(conn, sk, parent);
2947 dcid = l2cap_pi(sk)->scid;
2949 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2951 l2cap_pi(sk)->ident = cmd->ident;
2953 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2954 if (l2cap_check_security(sk)) {
2955 if (bt_sk(sk)->defer_setup) {
2956 sk->sk_state = BT_CONNECT2;
2957 result = L2CAP_CR_PEND;
2958 status = L2CAP_CS_AUTHOR_PEND;
2959 parent->sk_data_ready(parent, 0);
2960 } else {
2961 sk->sk_state = BT_CONFIG;
2962 result = L2CAP_CR_SUCCESS;
2963 status = L2CAP_CS_NO_INFO;
2965 } else {
2966 sk->sk_state = BT_CONNECT2;
2967 result = L2CAP_CR_PEND;
2968 status = L2CAP_CS_AUTHEN_PEND;
2970 } else {
2971 sk->sk_state = BT_CONNECT2;
2972 result = L2CAP_CR_PEND;
2973 status = L2CAP_CS_NO_INFO;
2976 write_unlock_bh(&list->lock);
2978 response:
2979 bh_unlock_sock(parent);
2981 sendresp:
2982 rsp.scid = cpu_to_le16(scid);
2983 rsp.dcid = cpu_to_le16(dcid);
2984 rsp.result = cpu_to_le16(result);
2985 rsp.status = cpu_to_le16(status);
2986 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2988 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2989 struct l2cap_info_req info;
2990 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2992 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2993 conn->info_ident = l2cap_get_ident(conn);
2995 mod_timer(&conn->info_timer, jiffies +
2996 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2998 l2cap_send_cmd(conn, conn->info_ident,
2999 L2CAP_INFO_REQ, sizeof(info), &info);
3002 if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
3003 result == L2CAP_CR_SUCCESS) {
3004 u8 buf[128];
3005 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3006 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3007 l2cap_build_conf_req(sk, buf), buf);
3008 l2cap_pi(sk)->num_conf_req++;
3011 return 0;
3014 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3016 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3017 u16 scid, dcid, result, status;
3018 struct sock *sk;
3019 u8 req[128];
3021 scid = __le16_to_cpu(rsp->scid);
3022 dcid = __le16_to_cpu(rsp->dcid);
3023 result = __le16_to_cpu(rsp->result);
3024 status = __le16_to_cpu(rsp->status);
3026 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
3028 if (scid) {
3029 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3030 if (!sk)
3031 return -EFAULT;
3032 } else {
3033 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
3034 if (!sk)
3035 return -EFAULT;
3038 switch (result) {
3039 case L2CAP_CR_SUCCESS:
3040 sk->sk_state = BT_CONFIG;
3041 l2cap_pi(sk)->ident = 0;
3042 l2cap_pi(sk)->dcid = dcid;
3043 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
3045 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
3046 break;
3048 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3050 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3051 l2cap_build_conf_req(sk, req), req);
3052 l2cap_pi(sk)->num_conf_req++;
3053 break;
3055 case L2CAP_CR_PEND:
3056 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3057 break;
3059 default:
3060 l2cap_chan_del(sk, ECONNREFUSED);
3061 break;
3064 bh_unlock_sock(sk);
3065 return 0;
3068 static inline void set_default_fcs(struct l2cap_pinfo *pi)
3070 /* FCS is enabled only in ERTM or streaming mode, if one or both
3071 * sides request it.
3073 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
3074 pi->fcs = L2CAP_FCS_NONE;
3075 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
3076 pi->fcs = L2CAP_FCS_CRC16;
3079 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3081 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3082 u16 dcid, flags;
3083 u8 rsp[64];
3084 struct sock *sk;
3085 int len;
3087 dcid = __le16_to_cpu(req->dcid);
3088 flags = __le16_to_cpu(req->flags);
3090 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3092 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3093 if (!sk)
3094 return -ENOENT;
3096 if (sk->sk_state == BT_DISCONN)
3097 goto unlock;
3099 /* Reject if config buffer is too small. */
3100 len = cmd_len - sizeof(*req);
3101 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
3102 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3103 l2cap_build_conf_rsp(sk, rsp,
3104 L2CAP_CONF_REJECT, flags), rsp);
3105 goto unlock;
3108 /* Store config. */
3109 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
3110 l2cap_pi(sk)->conf_len += len;
3112 if (flags & 0x0001) {
3113 /* Incomplete config. Send empty response. */
3114 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3115 l2cap_build_conf_rsp(sk, rsp,
3116 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3117 goto unlock;
3120 /* Complete config. */
3121 len = l2cap_parse_conf_req(sk, rsp);
3122 if (len < 0) {
3123 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3124 goto unlock;
3127 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3128 l2cap_pi(sk)->num_conf_rsp++;
3130 /* Reset config buffer. */
3131 l2cap_pi(sk)->conf_len = 0;
3133 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
3134 goto unlock;
3136 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
3137 set_default_fcs(l2cap_pi(sk));
3139 sk->sk_state = BT_CONNECTED;
3141 l2cap_pi(sk)->next_tx_seq = 0;
3142 l2cap_pi(sk)->expected_tx_seq = 0;
3143 __skb_queue_head_init(TX_QUEUE(sk));
3144 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3145 l2cap_ertm_init(sk);
3147 l2cap_chan_ready(sk);
3148 goto unlock;
3151 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3152 u8 buf[64];
3153 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3154 l2cap_build_conf_req(sk, buf), buf);
3155 l2cap_pi(sk)->num_conf_req++;
3158 unlock:
3159 bh_unlock_sock(sk);
3160 return 0;
3163 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3165 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3166 u16 scid, flags, result;
3167 struct sock *sk;
3168 int len = cmd->len - sizeof(*rsp);
3170 scid = __le16_to_cpu(rsp->scid);
3171 flags = __le16_to_cpu(rsp->flags);
3172 result = __le16_to_cpu(rsp->result);
3174 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3175 scid, flags, result);
3177 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3178 if (!sk)
3179 return 0;
3181 switch (result) {
3182 case L2CAP_CONF_SUCCESS:
3183 l2cap_conf_rfc_get(sk, rsp->data, len);
3184 break;
3186 case L2CAP_CONF_UNACCEPT:
3187 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3188 char req[64];
3190 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3191 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3192 goto done;
3195 /* throw out any old stored conf requests */
3196 result = L2CAP_CONF_SUCCESS;
3197 len = l2cap_parse_conf_rsp(sk, rsp->data,
3198 len, req, &result);
3199 if (len < 0) {
3200 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3201 goto done;
3204 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3205 L2CAP_CONF_REQ, len, req);
3206 l2cap_pi(sk)->num_conf_req++;
3207 if (result != L2CAP_CONF_SUCCESS)
3208 goto done;
3209 break;
3212 default:
3213 sk->sk_err = ECONNRESET;
3214 l2cap_sock_set_timer(sk, HZ * 5);
3215 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3216 goto done;
3219 if (flags & 0x01)
3220 goto done;
3222 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3224 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3225 set_default_fcs(l2cap_pi(sk));
3227 sk->sk_state = BT_CONNECTED;
3228 l2cap_pi(sk)->next_tx_seq = 0;
3229 l2cap_pi(sk)->expected_tx_seq = 0;
3230 __skb_queue_head_init(TX_QUEUE(sk));
3231 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3232 l2cap_ertm_init(sk);
3234 l2cap_chan_ready(sk);
3237 done:
3238 bh_unlock_sock(sk);
3239 return 0;
3242 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3244 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3245 struct l2cap_disconn_rsp rsp;
3246 u16 dcid, scid;
3247 struct sock *sk;
3249 scid = __le16_to_cpu(req->scid);
3250 dcid = __le16_to_cpu(req->dcid);
3252 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3254 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3255 if (!sk)
3256 return 0;
3258 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3259 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3260 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3262 sk->sk_shutdown = SHUTDOWN_MASK;
3264 l2cap_chan_del(sk, ECONNRESET);
3265 bh_unlock_sock(sk);
3267 l2cap_sock_kill(sk);
3268 return 0;
3271 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3273 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3274 u16 dcid, scid;
3275 struct sock *sk;
3277 scid = __le16_to_cpu(rsp->scid);
3278 dcid = __le16_to_cpu(rsp->dcid);
3280 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3282 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3283 if (!sk)
3284 return 0;
3286 l2cap_chan_del(sk, 0);
3287 bh_unlock_sock(sk);
3289 l2cap_sock_kill(sk);
3290 return 0;
3293 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3295 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3296 u16 type;
3298 type = __le16_to_cpu(req->type);
3300 BT_DBG("type 0x%4.4x", type);
3302 if (type == L2CAP_IT_FEAT_MASK) {
3303 u8 buf[8];
3304 u32 feat_mask = l2cap_feat_mask;
3305 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3306 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3307 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3308 if (!disable_ertm)
3309 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3310 | L2CAP_FEAT_FCS;
3311 put_unaligned_le32(feat_mask, rsp->data);
3312 l2cap_send_cmd(conn, cmd->ident,
3313 L2CAP_INFO_RSP, sizeof(buf), buf);
3314 } else if (type == L2CAP_IT_FIXED_CHAN) {
3315 u8 buf[12];
3316 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3317 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3318 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3319 memcpy(buf + 4, l2cap_fixed_chan, 8);
3320 l2cap_send_cmd(conn, cmd->ident,
3321 L2CAP_INFO_RSP, sizeof(buf), buf);
3322 } else {
3323 struct l2cap_info_rsp rsp;
3324 rsp.type = cpu_to_le16(type);
3325 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3326 l2cap_send_cmd(conn, cmd->ident,
3327 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3330 return 0;
3333 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3335 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3336 u16 type, result;
3338 type = __le16_to_cpu(rsp->type);
3339 result = __le16_to_cpu(rsp->result);
3341 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3343 del_timer(&conn->info_timer);
3345 if (result != L2CAP_IR_SUCCESS) {
3346 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3347 conn->info_ident = 0;
3349 l2cap_conn_start(conn);
3351 return 0;
3354 if (type == L2CAP_IT_FEAT_MASK) {
3355 conn->feat_mask = get_unaligned_le32(rsp->data);
3357 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3358 struct l2cap_info_req req;
3359 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3361 conn->info_ident = l2cap_get_ident(conn);
3363 l2cap_send_cmd(conn, conn->info_ident,
3364 L2CAP_INFO_REQ, sizeof(req), &req);
3365 } else {
3366 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3367 conn->info_ident = 0;
3369 l2cap_conn_start(conn);
3371 } else if (type == L2CAP_IT_FIXED_CHAN) {
3372 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3373 conn->info_ident = 0;
3375 l2cap_conn_start(conn);
3378 return 0;
3381 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3383 u8 *data = skb->data;
3384 int len = skb->len;
3385 struct l2cap_cmd_hdr cmd;
3386 int err = 0;
3388 l2cap_raw_recv(conn, skb);
3390 while (len >= L2CAP_CMD_HDR_SIZE) {
3391 u16 cmd_len;
3392 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3393 data += L2CAP_CMD_HDR_SIZE;
3394 len -= L2CAP_CMD_HDR_SIZE;
3396 cmd_len = le16_to_cpu(cmd.len);
3398 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3400 if (cmd_len > len || !cmd.ident) {
3401 BT_DBG("corrupted command");
3402 break;
3405 switch (cmd.code) {
3406 case L2CAP_COMMAND_REJ:
3407 l2cap_command_rej(conn, &cmd, data);
3408 break;
3410 case L2CAP_CONN_REQ:
3411 err = l2cap_connect_req(conn, &cmd, data);
3412 break;
3414 case L2CAP_CONN_RSP:
3415 err = l2cap_connect_rsp(conn, &cmd, data);
3416 break;
3418 case L2CAP_CONF_REQ:
3419 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3420 break;
3422 case L2CAP_CONF_RSP:
3423 err = l2cap_config_rsp(conn, &cmd, data);
3424 break;
3426 case L2CAP_DISCONN_REQ:
3427 err = l2cap_disconnect_req(conn, &cmd, data);
3428 break;
3430 case L2CAP_DISCONN_RSP:
3431 err = l2cap_disconnect_rsp(conn, &cmd, data);
3432 break;
3434 case L2CAP_ECHO_REQ:
3435 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3436 break;
3438 case L2CAP_ECHO_RSP:
3439 break;
3441 case L2CAP_INFO_REQ:
3442 err = l2cap_information_req(conn, &cmd, data);
3443 break;
3445 case L2CAP_INFO_RSP:
3446 err = l2cap_information_rsp(conn, &cmd, data);
3447 break;
3449 default:
3450 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3451 err = -EINVAL;
3452 break;
3455 if (err) {
3456 struct l2cap_cmd_rej rej;
3457 BT_DBG("error %d", err);
3459 rej.reason = cpu_to_le16(0);
3460 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3463 data += cmd_len;
3464 len -= cmd_len;
3467 kfree_skb(skb);
3470 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3472 u16 our_fcs, rcv_fcs;
3473 int hdr_size = L2CAP_HDR_SIZE + 2;
3475 if (pi->fcs == L2CAP_FCS_CRC16) {
3476 skb_trim(skb, skb->len - 2);
3477 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3478 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3480 if (our_fcs != rcv_fcs)
3481 return -EBADMSG;
3483 return 0;
3486 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3488 struct l2cap_pinfo *pi = l2cap_pi(sk);
3489 u16 control = 0;
3491 pi->frames_sent = 0;
3493 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3495 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3496 control |= L2CAP_SUPER_RCV_NOT_READY;
3497 l2cap_send_sframe(pi, control);
3498 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3501 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3502 l2cap_retransmit_frames(sk);
3504 l2cap_ertm_send(sk);
3506 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3507 pi->frames_sent == 0) {
3508 control |= L2CAP_SUPER_RCV_READY;
3509 l2cap_send_sframe(pi, control);
3513 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3515 struct sk_buff *next_skb;
3516 struct l2cap_pinfo *pi = l2cap_pi(sk);
3517 int tx_seq_offset, next_tx_seq_offset;
3519 bt_cb(skb)->tx_seq = tx_seq;
3520 bt_cb(skb)->sar = sar;
3522 next_skb = skb_peek(SREJ_QUEUE(sk));
3523 if (!next_skb) {
3524 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3525 return 0;
3528 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3529 if (tx_seq_offset < 0)
3530 tx_seq_offset += 64;
3532 do {
3533 if (bt_cb(next_skb)->tx_seq == tx_seq)
3534 return -EINVAL;
3536 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3537 pi->buffer_seq) % 64;
3538 if (next_tx_seq_offset < 0)
3539 next_tx_seq_offset += 64;
3541 if (next_tx_seq_offset > tx_seq_offset) {
3542 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3543 return 0;
3546 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3547 break;
3549 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3551 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3553 return 0;
3556 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3558 struct l2cap_pinfo *pi = l2cap_pi(sk);
3559 struct sk_buff *_skb;
3560 int err;
3562 switch (control & L2CAP_CTRL_SAR) {
3563 case L2CAP_SDU_UNSEGMENTED:
3564 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3565 goto drop;
3567 err = sock_queue_rcv_skb(sk, skb);
3568 if (!err)
3569 return err;
3571 break;
3573 case L2CAP_SDU_START:
3574 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3575 goto drop;
3577 pi->sdu_len = get_unaligned_le16(skb->data);
3579 if (pi->sdu_len > pi->imtu)
3580 goto disconnect;
3582 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3583 if (!pi->sdu)
3584 return -ENOMEM;
3586 /* pull sdu_len bytes only after alloc, because of Local Busy
3587 * condition we have to be sure that this will be executed
3588 * only once, i.e., when alloc does not fail */
3589 skb_pull(skb, 2);
3591 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3593 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3594 pi->partial_sdu_len = skb->len;
3595 break;
3597 case L2CAP_SDU_CONTINUE:
3598 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3599 goto disconnect;
3601 if (!pi->sdu)
3602 goto disconnect;
3604 pi->partial_sdu_len += skb->len;
3605 if (pi->partial_sdu_len > pi->sdu_len)
3606 goto drop;
3608 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3610 break;
3612 case L2CAP_SDU_END:
3613 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3614 goto disconnect;
3616 if (!pi->sdu)
3617 goto disconnect;
3619 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3620 pi->partial_sdu_len += skb->len;
3622 if (pi->partial_sdu_len > pi->imtu)
3623 goto drop;
3625 if (pi->partial_sdu_len != pi->sdu_len)
3626 goto drop;
3628 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3631 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3632 if (!_skb) {
3633 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3634 return -ENOMEM;
3637 err = sock_queue_rcv_skb(sk, _skb);
3638 if (err < 0) {
3639 kfree_skb(_skb);
3640 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3641 return err;
3644 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3645 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3647 kfree_skb(pi->sdu);
3648 break;
3651 kfree_skb(skb);
3652 return 0;
3654 drop:
3655 kfree_skb(pi->sdu);
3656 pi->sdu = NULL;
3658 disconnect:
3659 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3660 kfree_skb(skb);
3661 return 0;
3664 static int l2cap_try_push_rx_skb(struct sock *sk)
3666 struct l2cap_pinfo *pi = l2cap_pi(sk);
3667 struct sk_buff *skb;
3668 u16 control;
3669 int err;
3671 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3672 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3673 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3674 if (err < 0) {
3675 skb_queue_head(BUSY_QUEUE(sk), skb);
3676 return -EBUSY;
3679 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3682 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3683 goto done;
3685 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3686 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3687 l2cap_send_sframe(pi, control);
3688 l2cap_pi(sk)->retry_count = 1;
3690 del_timer(&pi->retrans_timer);
3691 __mod_monitor_timer();
3693 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3695 done:
3696 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3697 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3699 BT_DBG("sk %p, Exit local busy", sk);
3701 return 0;
3704 static void l2cap_busy_work(struct work_struct *work)
3706 DECLARE_WAITQUEUE(wait, current);
3707 struct l2cap_pinfo *pi =
3708 container_of(work, struct l2cap_pinfo, busy_work);
3709 struct sock *sk = (struct sock *)pi;
3710 int n_tries = 0, timeo = HZ/5, err;
3711 struct sk_buff *skb;
3713 lock_sock(sk);
3715 add_wait_queue(sk_sleep(sk), &wait);
3716 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3717 set_current_state(TASK_INTERRUPTIBLE);
3719 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3720 err = -EBUSY;
3721 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
3722 break;
3725 if (!timeo)
3726 timeo = HZ/5;
3728 if (signal_pending(current)) {
3729 err = sock_intr_errno(timeo);
3730 break;
3733 release_sock(sk);
3734 timeo = schedule_timeout(timeo);
3735 lock_sock(sk);
3737 err = sock_error(sk);
3738 if (err)
3739 break;
3741 if (l2cap_try_push_rx_skb(sk) == 0)
3742 break;
3745 set_current_state(TASK_RUNNING);
3746 remove_wait_queue(sk_sleep(sk), &wait);
3748 release_sock(sk);
3751 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3753 struct l2cap_pinfo *pi = l2cap_pi(sk);
3754 int sctrl, err;
3756 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3757 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3758 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3759 return l2cap_try_push_rx_skb(sk);
3764 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3765 if (err >= 0) {
3766 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3767 return err;
3770 /* Busy Condition */
3771 BT_DBG("sk %p, Enter local busy", sk);
3773 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3774 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3775 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3777 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3778 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3779 l2cap_send_sframe(pi, sctrl);
3781 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3783 del_timer(&pi->ack_timer);
3785 queue_work(_busy_wq, &pi->busy_work);
3787 return err;
3790 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3792 struct l2cap_pinfo *pi = l2cap_pi(sk);
3793 struct sk_buff *_skb;
3794 int err = -EINVAL;
3797 * TODO: We have to notify the userland if some data is lost with the
3798 * Streaming Mode.
3801 switch (control & L2CAP_CTRL_SAR) {
3802 case L2CAP_SDU_UNSEGMENTED:
3803 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3804 kfree_skb(pi->sdu);
3805 break;
3808 err = sock_queue_rcv_skb(sk, skb);
3809 if (!err)
3810 return 0;
3812 break;
3814 case L2CAP_SDU_START:
3815 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3816 kfree_skb(pi->sdu);
3817 break;
3820 pi->sdu_len = get_unaligned_le16(skb->data);
3821 skb_pull(skb, 2);
3823 if (pi->sdu_len > pi->imtu) {
3824 err = -EMSGSIZE;
3825 break;
3828 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3829 if (!pi->sdu) {
3830 err = -ENOMEM;
3831 break;
3834 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3836 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3837 pi->partial_sdu_len = skb->len;
3838 err = 0;
3839 break;
3841 case L2CAP_SDU_CONTINUE:
3842 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3843 break;
3845 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3847 pi->partial_sdu_len += skb->len;
3848 if (pi->partial_sdu_len > pi->sdu_len)
3849 kfree_skb(pi->sdu);
3850 else
3851 err = 0;
3853 break;
3855 case L2CAP_SDU_END:
3856 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3857 break;
3859 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3861 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3862 pi->partial_sdu_len += skb->len;
3864 if (pi->partial_sdu_len > pi->imtu)
3865 goto drop;
3867 if (pi->partial_sdu_len == pi->sdu_len) {
3868 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3869 err = sock_queue_rcv_skb(sk, _skb);
3870 if (err < 0)
3871 kfree_skb(_skb);
3873 err = 0;
3875 drop:
3876 kfree_skb(pi->sdu);
3877 break;
3880 kfree_skb(skb);
3881 return err;
3884 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3886 struct sk_buff *skb;
3887 u16 control;
3889 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3890 if (bt_cb(skb)->tx_seq != tx_seq)
3891 break;
3893 skb = skb_dequeue(SREJ_QUEUE(sk));
3894 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3895 l2cap_ertm_reassembly_sdu(sk, skb, control);
3896 l2cap_pi(sk)->buffer_seq_srej =
3897 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3898 tx_seq = (tx_seq + 1) % 64;
3902 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3904 struct l2cap_pinfo *pi = l2cap_pi(sk);
3905 struct srej_list *l, *tmp;
3906 u16 control;
3908 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3909 if (l->tx_seq == tx_seq) {
3910 list_del(&l->list);
3911 kfree(l);
3912 return;
3914 control = L2CAP_SUPER_SELECT_REJECT;
3915 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3916 l2cap_send_sframe(pi, control);
3917 list_del(&l->list);
3918 list_add_tail(&l->list, SREJ_LIST(sk));
3922 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3924 struct l2cap_pinfo *pi = l2cap_pi(sk);
3925 struct srej_list *new;
3926 u16 control;
3928 while (tx_seq != pi->expected_tx_seq) {
3929 control = L2CAP_SUPER_SELECT_REJECT;
3930 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3931 l2cap_send_sframe(pi, control);
3933 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3934 new->tx_seq = pi->expected_tx_seq;
3935 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3936 list_add_tail(&new->list, SREJ_LIST(sk));
3938 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3941 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3943 struct l2cap_pinfo *pi = l2cap_pi(sk);
3944 u8 tx_seq = __get_txseq(rx_control);
3945 u8 req_seq = __get_reqseq(rx_control);
3946 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3947 int tx_seq_offset, expected_tx_seq_offset;
3948 int num_to_ack = (pi->tx_win/6) + 1;
3949 int err = 0;
3951 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3952 rx_control);
3954 if (L2CAP_CTRL_FINAL & rx_control &&
3955 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3956 del_timer(&pi->monitor_timer);
3957 if (pi->unacked_frames > 0)
3958 __mod_retrans_timer();
3959 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3962 pi->expected_ack_seq = req_seq;
3963 l2cap_drop_acked_frames(sk);
3965 if (tx_seq == pi->expected_tx_seq)
3966 goto expected;
3968 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3969 if (tx_seq_offset < 0)
3970 tx_seq_offset += 64;
3972 /* invalid tx_seq */
3973 if (tx_seq_offset >= pi->tx_win) {
3974 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3975 goto drop;
3978 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3979 goto drop;
3981 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3982 struct srej_list *first;
3984 first = list_first_entry(SREJ_LIST(sk),
3985 struct srej_list, list);
3986 if (tx_seq == first->tx_seq) {
3987 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3988 l2cap_check_srej_gap(sk, tx_seq);
3990 list_del(&first->list);
3991 kfree(first);
3993 if (list_empty(SREJ_LIST(sk))) {
3994 pi->buffer_seq = pi->buffer_seq_srej;
3995 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3996 l2cap_send_ack(pi);
3997 BT_DBG("sk %p, Exit SREJ_SENT", sk);
3999 } else {
4000 struct srej_list *l;
4002 /* duplicated tx_seq */
4003 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
4004 goto drop;
4006 list_for_each_entry(l, SREJ_LIST(sk), list) {
4007 if (l->tx_seq == tx_seq) {
4008 l2cap_resend_srejframe(sk, tx_seq);
4009 return 0;
4012 l2cap_send_srejframe(sk, tx_seq);
4014 } else {
4015 expected_tx_seq_offset =
4016 (pi->expected_tx_seq - pi->buffer_seq) % 64;
4017 if (expected_tx_seq_offset < 0)
4018 expected_tx_seq_offset += 64;
4020 /* duplicated tx_seq */
4021 if (tx_seq_offset < expected_tx_seq_offset)
4022 goto drop;
4024 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
4026 BT_DBG("sk %p, Enter SREJ", sk);
4028 INIT_LIST_HEAD(SREJ_LIST(sk));
4029 pi->buffer_seq_srej = pi->buffer_seq;
4031 __skb_queue_head_init(SREJ_QUEUE(sk));
4032 __skb_queue_head_init(BUSY_QUEUE(sk));
4033 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
4035 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
4037 l2cap_send_srejframe(sk, tx_seq);
4039 del_timer(&pi->ack_timer);
4041 return 0;
4043 expected:
4044 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4046 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4047 bt_cb(skb)->tx_seq = tx_seq;
4048 bt_cb(skb)->sar = sar;
4049 __skb_queue_tail(SREJ_QUEUE(sk), skb);
4050 return 0;
4053 err = l2cap_push_rx_skb(sk, skb, rx_control);
4054 if (err < 0)
4055 return 0;
4057 if (rx_control & L2CAP_CTRL_FINAL) {
4058 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4059 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4060 else
4061 l2cap_retransmit_frames(sk);
4064 __mod_ack_timer();
4066 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
4067 if (pi->num_acked == num_to_ack - 1)
4068 l2cap_send_ack(pi);
4070 return 0;
4072 drop:
4073 kfree_skb(skb);
4074 return 0;
4077 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
4079 struct l2cap_pinfo *pi = l2cap_pi(sk);
4081 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
4082 rx_control);
4084 pi->expected_ack_seq = __get_reqseq(rx_control);
4085 l2cap_drop_acked_frames(sk);
4087 if (rx_control & L2CAP_CTRL_POLL) {
4088 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4089 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4090 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4091 (pi->unacked_frames > 0))
4092 __mod_retrans_timer();
4094 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4095 l2cap_send_srejtail(sk);
4096 } else {
4097 l2cap_send_i_or_rr_or_rnr(sk);
4100 } else if (rx_control & L2CAP_CTRL_FINAL) {
4101 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4103 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4104 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4105 else
4106 l2cap_retransmit_frames(sk);
4108 } else {
4109 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4110 (pi->unacked_frames > 0))
4111 __mod_retrans_timer();
4113 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4114 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4115 l2cap_send_ack(pi);
4116 } else {
4117 l2cap_ertm_send(sk);
4122 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
4124 struct l2cap_pinfo *pi = l2cap_pi(sk);
4125 u8 tx_seq = __get_reqseq(rx_control);
4127 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4129 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4131 pi->expected_ack_seq = tx_seq;
4132 l2cap_drop_acked_frames(sk);
4134 if (rx_control & L2CAP_CTRL_FINAL) {
4135 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4136 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4137 else
4138 l2cap_retransmit_frames(sk);
4139 } else {
4140 l2cap_retransmit_frames(sk);
4142 if (pi->conn_state & L2CAP_CONN_WAIT_F)
4143 pi->conn_state |= L2CAP_CONN_REJ_ACT;
4146 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
4148 struct l2cap_pinfo *pi = l2cap_pi(sk);
4149 u8 tx_seq = __get_reqseq(rx_control);
4151 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4153 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4155 if (rx_control & L2CAP_CTRL_POLL) {
4156 pi->expected_ack_seq = tx_seq;
4157 l2cap_drop_acked_frames(sk);
4159 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4160 l2cap_retransmit_one_frame(sk, tx_seq);
4162 l2cap_ertm_send(sk);
4164 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4165 pi->srej_save_reqseq = tx_seq;
4166 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4168 } else if (rx_control & L2CAP_CTRL_FINAL) {
4169 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4170 pi->srej_save_reqseq == tx_seq)
4171 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4172 else
4173 l2cap_retransmit_one_frame(sk, tx_seq);
4174 } else {
4175 l2cap_retransmit_one_frame(sk, tx_seq);
4176 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4177 pi->srej_save_reqseq = tx_seq;
4178 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4183 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4185 struct l2cap_pinfo *pi = l2cap_pi(sk);
4186 u8 tx_seq = __get_reqseq(rx_control);
4188 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4190 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4191 pi->expected_ack_seq = tx_seq;
4192 l2cap_drop_acked_frames(sk);
4194 if (rx_control & L2CAP_CTRL_POLL)
4195 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4197 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4198 del_timer(&pi->retrans_timer);
4199 if (rx_control & L2CAP_CTRL_POLL)
4200 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4201 return;
4204 if (rx_control & L2CAP_CTRL_POLL)
4205 l2cap_send_srejtail(sk);
4206 else
4207 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4210 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4212 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4214 if (L2CAP_CTRL_FINAL & rx_control &&
4215 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4216 del_timer(&l2cap_pi(sk)->monitor_timer);
4217 if (l2cap_pi(sk)->unacked_frames > 0)
4218 __mod_retrans_timer();
4219 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4222 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4223 case L2CAP_SUPER_RCV_READY:
4224 l2cap_data_channel_rrframe(sk, rx_control);
4225 break;
4227 case L2CAP_SUPER_REJECT:
4228 l2cap_data_channel_rejframe(sk, rx_control);
4229 break;
4231 case L2CAP_SUPER_SELECT_REJECT:
4232 l2cap_data_channel_srejframe(sk, rx_control);
4233 break;
4235 case L2CAP_SUPER_RCV_NOT_READY:
4236 l2cap_data_channel_rnrframe(sk, rx_control);
4237 break;
4240 kfree_skb(skb);
4241 return 0;
4244 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4246 struct l2cap_pinfo *pi = l2cap_pi(sk);
4247 u16 control;
4248 u8 req_seq;
4249 int len, next_tx_seq_offset, req_seq_offset;
4251 control = get_unaligned_le16(skb->data);
4252 skb_pull(skb, 2);
4253 len = skb->len;
4256 * We can just drop the corrupted I-frame here.
4257 * Receiver will miss it and start proper recovery
4258 * procedures and ask retransmission.
4260 if (l2cap_check_fcs(pi, skb))
4261 goto drop;
4263 if (__is_sar_start(control) && __is_iframe(control))
4264 len -= 2;
4266 if (pi->fcs == L2CAP_FCS_CRC16)
4267 len -= 2;
4269 if (len > pi->mps) {
4270 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4271 goto drop;
4274 req_seq = __get_reqseq(control);
4275 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4276 if (req_seq_offset < 0)
4277 req_seq_offset += 64;
4279 next_tx_seq_offset =
4280 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4281 if (next_tx_seq_offset < 0)
4282 next_tx_seq_offset += 64;
4284 /* check for invalid req-seq */
4285 if (req_seq_offset > next_tx_seq_offset) {
4286 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4287 goto drop;
4290 if (__is_iframe(control)) {
4291 if (len < 0) {
4292 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4293 goto drop;
4296 l2cap_data_channel_iframe(sk, control, skb);
4297 } else {
4298 if (len != 0) {
4299 BT_ERR("%d", len);
4300 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4301 goto drop;
4304 l2cap_data_channel_sframe(sk, control, skb);
4307 return 0;
4309 drop:
4310 kfree_skb(skb);
4311 return 0;
4314 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4316 struct sock *sk;
4317 struct l2cap_pinfo *pi;
4318 u16 control;
4319 u8 tx_seq;
4320 int len;
4322 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4323 if (!sk) {
4324 BT_DBG("unknown cid 0x%4.4x", cid);
4325 goto drop;
4328 pi = l2cap_pi(sk);
4330 BT_DBG("sk %p, len %d", sk, skb->len);
4332 if (sk->sk_state != BT_CONNECTED)
4333 goto drop;
4335 switch (pi->mode) {
4336 case L2CAP_MODE_BASIC:
4337 /* If socket recv buffers overflows we drop data here
4338 * which is *bad* because L2CAP has to be reliable.
4339 * But we don't have any other choice. L2CAP doesn't
4340 * provide flow control mechanism. */
4342 if (pi->imtu < skb->len)
4343 goto drop;
4345 if (!sock_queue_rcv_skb(sk, skb))
4346 goto done;
4347 break;
4349 case L2CAP_MODE_ERTM:
4350 if (!sock_owned_by_user(sk)) {
4351 l2cap_ertm_data_rcv(sk, skb);
4352 } else {
4353 if (sk_add_backlog(sk, skb))
4354 goto drop;
4357 goto done;
4359 case L2CAP_MODE_STREAMING:
4360 control = get_unaligned_le16(skb->data);
4361 skb_pull(skb, 2);
4362 len = skb->len;
4364 if (l2cap_check_fcs(pi, skb))
4365 goto drop;
4367 if (__is_sar_start(control))
4368 len -= 2;
4370 if (pi->fcs == L2CAP_FCS_CRC16)
4371 len -= 2;
4373 if (len > pi->mps || len < 0 || __is_sframe(control))
4374 goto drop;
4376 tx_seq = __get_txseq(control);
4378 if (pi->expected_tx_seq == tx_seq)
4379 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4380 else
4381 pi->expected_tx_seq = (tx_seq + 1) % 64;
4383 l2cap_streaming_reassembly_sdu(sk, skb, control);
4385 goto done;
4387 default:
4388 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4389 break;
4392 drop:
4393 kfree_skb(skb);
4395 done:
4396 if (sk)
4397 bh_unlock_sock(sk);
4399 return 0;
4402 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4404 struct sock *sk;
4406 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4407 if (!sk)
4408 goto drop;
4410 BT_DBG("sk %p, len %d", sk, skb->len);
4412 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4413 goto drop;
4415 if (l2cap_pi(sk)->imtu < skb->len)
4416 goto drop;
4418 if (!sock_queue_rcv_skb(sk, skb))
4419 goto done;
4421 drop:
4422 kfree_skb(skb);
4424 done:
4425 if (sk)
4426 bh_unlock_sock(sk);
4427 return 0;
4430 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4432 struct l2cap_hdr *lh = (void *) skb->data;
4433 u16 cid, len;
4434 __le16 psm;
4436 skb_pull(skb, L2CAP_HDR_SIZE);
4437 cid = __le16_to_cpu(lh->cid);
4438 len = __le16_to_cpu(lh->len);
4440 if (len != skb->len) {
4441 kfree_skb(skb);
4442 return;
4445 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4447 switch (cid) {
4448 case L2CAP_CID_SIGNALING:
4449 l2cap_sig_channel(conn, skb);
4450 break;
4452 case L2CAP_CID_CONN_LESS:
4453 psm = get_unaligned_le16(skb->data);
4454 skb_pull(skb, 2);
4455 l2cap_conless_channel(conn, psm, skb);
4456 break;
4458 default:
4459 l2cap_data_channel(conn, cid, skb);
4460 break;
4464 /* ---- L2CAP interface with lower layer (HCI) ---- */
4466 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4468 int exact = 0, lm1 = 0, lm2 = 0;
4469 register struct sock *sk;
4470 struct hlist_node *node;
4472 if (type != ACL_LINK)
4473 return -EINVAL;
4475 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4477 /* Find listening sockets and check their link_mode */
4478 read_lock(&l2cap_sk_list.lock);
4479 sk_for_each(sk, node, &l2cap_sk_list.head) {
4480 if (sk->sk_state != BT_LISTEN)
4481 continue;
4483 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4484 lm1 |= HCI_LM_ACCEPT;
4485 if (l2cap_pi(sk)->role_switch)
4486 lm1 |= HCI_LM_MASTER;
4487 exact++;
4488 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4489 lm2 |= HCI_LM_ACCEPT;
4490 if (l2cap_pi(sk)->role_switch)
4491 lm2 |= HCI_LM_MASTER;
4494 read_unlock(&l2cap_sk_list.lock);
4496 return exact ? lm1 : lm2;
4499 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4501 struct l2cap_conn *conn;
4503 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4505 if (hcon->type != ACL_LINK)
4506 return -EINVAL;
4508 if (!status) {
4509 conn = l2cap_conn_add(hcon, status);
4510 if (conn)
4511 l2cap_conn_ready(conn);
4512 } else
4513 l2cap_conn_del(hcon, bt_err(status));
4515 return 0;
4518 static int l2cap_disconn_ind(struct hci_conn *hcon)
4520 struct l2cap_conn *conn = hcon->l2cap_data;
4522 BT_DBG("hcon %p", hcon);
4524 if (hcon->type != ACL_LINK || !conn)
4525 return 0x13;
4527 return conn->disc_reason;
4530 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4532 BT_DBG("hcon %p reason %d", hcon, reason);
4534 if (hcon->type != ACL_LINK)
4535 return -EINVAL;
4537 l2cap_conn_del(hcon, bt_err(reason));
4539 return 0;
4542 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4544 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4545 return;
4547 if (encrypt == 0x00) {
4548 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4549 l2cap_sock_clear_timer(sk);
4550 l2cap_sock_set_timer(sk, HZ * 5);
4551 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4552 __l2cap_sock_close(sk, ECONNREFUSED);
4553 } else {
4554 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4555 l2cap_sock_clear_timer(sk);
4559 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4561 struct l2cap_chan_list *l;
4562 struct l2cap_conn *conn = hcon->l2cap_data;
4563 struct sock *sk;
4565 if (!conn)
4566 return 0;
4568 l = &conn->chan_list;
4570 BT_DBG("conn %p", conn);
4572 read_lock(&l->lock);
4574 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4575 bh_lock_sock(sk);
4577 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4578 bh_unlock_sock(sk);
4579 continue;
4582 if (!status && (sk->sk_state == BT_CONNECTED ||
4583 sk->sk_state == BT_CONFIG)) {
4584 l2cap_check_encryption(sk, encrypt);
4585 bh_unlock_sock(sk);
4586 continue;
4589 if (sk->sk_state == BT_CONNECT) {
4590 if (!status) {
4591 struct l2cap_conn_req req;
4592 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4593 req.psm = l2cap_pi(sk)->psm;
4595 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4596 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4598 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4599 L2CAP_CONN_REQ, sizeof(req), &req);
4600 } else {
4601 l2cap_sock_clear_timer(sk);
4602 l2cap_sock_set_timer(sk, HZ / 10);
4604 } else if (sk->sk_state == BT_CONNECT2) {
4605 struct l2cap_conn_rsp rsp;
4606 __u16 result;
4608 if (!status) {
4609 sk->sk_state = BT_CONFIG;
4610 result = L2CAP_CR_SUCCESS;
4611 } else {
4612 sk->sk_state = BT_DISCONN;
4613 l2cap_sock_set_timer(sk, HZ / 10);
4614 result = L2CAP_CR_SEC_BLOCK;
4617 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4618 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4619 rsp.result = cpu_to_le16(result);
4620 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4621 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4622 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4625 bh_unlock_sock(sk);
4628 read_unlock(&l->lock);
4630 return 0;
4633 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4635 struct l2cap_conn *conn = hcon->l2cap_data;
4637 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4638 goto drop;
4640 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4642 if (flags & ACL_START) {
4643 struct l2cap_hdr *hdr;
4644 int len;
4646 if (conn->rx_len) {
4647 BT_ERR("Unexpected start frame (len %d)", skb->len);
4648 kfree_skb(conn->rx_skb);
4649 conn->rx_skb = NULL;
4650 conn->rx_len = 0;
4651 l2cap_conn_unreliable(conn, ECOMM);
4654 if (skb->len < 2) {
4655 BT_ERR("Frame is too short (len %d)", skb->len);
4656 l2cap_conn_unreliable(conn, ECOMM);
4657 goto drop;
4660 hdr = (struct l2cap_hdr *) skb->data;
4661 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4663 if (len == skb->len) {
4664 /* Complete frame received */
4665 l2cap_recv_frame(conn, skb);
4666 return 0;
4669 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4671 if (skb->len > len) {
4672 BT_ERR("Frame is too long (len %d, expected len %d)",
4673 skb->len, len);
4674 l2cap_conn_unreliable(conn, ECOMM);
4675 goto drop;
4678 /* Allocate skb for the complete frame (with header) */
4679 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4680 if (!conn->rx_skb)
4681 goto drop;
4683 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4684 skb->len);
4685 conn->rx_len = len - skb->len;
4686 } else {
4687 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4689 if (!conn->rx_len) {
4690 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4691 l2cap_conn_unreliable(conn, ECOMM);
4692 goto drop;
4695 if (skb->len > conn->rx_len) {
4696 BT_ERR("Fragment is too long (len %d, expected %d)",
4697 skb->len, conn->rx_len);
4698 kfree_skb(conn->rx_skb);
4699 conn->rx_skb = NULL;
4700 conn->rx_len = 0;
4701 l2cap_conn_unreliable(conn, ECOMM);
4702 goto drop;
4705 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4706 skb->len);
4707 conn->rx_len -= skb->len;
4709 if (!conn->rx_len) {
4710 /* Complete frame received */
4711 l2cap_recv_frame(conn, conn->rx_skb);
4712 conn->rx_skb = NULL;
4716 drop:
4717 kfree_skb(skb);
4718 return 0;
4721 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4723 struct sock *sk;
4724 struct hlist_node *node;
4726 read_lock_bh(&l2cap_sk_list.lock);
4728 sk_for_each(sk, node, &l2cap_sk_list.head) {
4729 struct l2cap_pinfo *pi = l2cap_pi(sk);
4731 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4732 batostr(&bt_sk(sk)->src),
4733 batostr(&bt_sk(sk)->dst),
4734 sk->sk_state, __le16_to_cpu(pi->psm),
4735 pi->scid, pi->dcid,
4736 pi->imtu, pi->omtu, pi->sec_level);
4739 read_unlock_bh(&l2cap_sk_list.lock);
4741 return 0;
4744 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4746 return single_open(file, l2cap_debugfs_show, inode->i_private);
4749 static const struct file_operations l2cap_debugfs_fops = {
4750 .open = l2cap_debugfs_open,
4751 .read = seq_read,
4752 .llseek = seq_lseek,
4753 .release = single_release,
4756 static struct dentry *l2cap_debugfs;
4758 static const struct proto_ops l2cap_sock_ops = {
4759 .family = PF_BLUETOOTH,
4760 .owner = THIS_MODULE,
4761 .release = l2cap_sock_release,
4762 .bind = l2cap_sock_bind,
4763 .connect = l2cap_sock_connect,
4764 .listen = l2cap_sock_listen,
4765 .accept = l2cap_sock_accept,
4766 .getname = l2cap_sock_getname,
4767 .sendmsg = l2cap_sock_sendmsg,
4768 .recvmsg = l2cap_sock_recvmsg,
4769 .poll = bt_sock_poll,
4770 .ioctl = bt_sock_ioctl,
4771 .mmap = sock_no_mmap,
4772 .socketpair = sock_no_socketpair,
4773 .shutdown = l2cap_sock_shutdown,
4774 .setsockopt = l2cap_sock_setsockopt,
4775 .getsockopt = l2cap_sock_getsockopt
4778 static const struct net_proto_family l2cap_sock_family_ops = {
4779 .family = PF_BLUETOOTH,
4780 .owner = THIS_MODULE,
4781 .create = l2cap_sock_create,
4784 static struct hci_proto l2cap_hci_proto = {
4785 .name = "L2CAP",
4786 .id = HCI_PROTO_L2CAP,
4787 .connect_ind = l2cap_connect_ind,
4788 .connect_cfm = l2cap_connect_cfm,
4789 .disconn_ind = l2cap_disconn_ind,
4790 .disconn_cfm = l2cap_disconn_cfm,
4791 .security_cfm = l2cap_security_cfm,
4792 .recv_acldata = l2cap_recv_acldata
4795 static int __init l2cap_init(void)
4797 int err;
4799 err = proto_register(&l2cap_proto, 0);
4800 if (err < 0)
4801 return err;
4803 _busy_wq = create_singlethread_workqueue("l2cap");
4804 if (!_busy_wq)
4805 goto error;
4807 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4808 if (err < 0) {
4809 BT_ERR("L2CAP socket registration failed");
4810 goto error;
4813 err = hci_register_proto(&l2cap_hci_proto);
4814 if (err < 0) {
4815 BT_ERR("L2CAP protocol registration failed");
4816 bt_sock_unregister(BTPROTO_L2CAP);
4817 goto error;
4820 if (bt_debugfs) {
4821 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4822 bt_debugfs, NULL, &l2cap_debugfs_fops);
4823 if (!l2cap_debugfs)
4824 BT_ERR("Failed to create L2CAP debug file");
4827 BT_INFO("L2CAP ver %s", VERSION);
4828 BT_INFO("L2CAP socket layer initialized");
4830 return 0;
4832 error:
4833 proto_unregister(&l2cap_proto);
4834 return err;
4837 static void __exit l2cap_exit(void)
4839 debugfs_remove(l2cap_debugfs);
4841 flush_workqueue(_busy_wq);
4842 destroy_workqueue(_busy_wq);
4844 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4845 BT_ERR("L2CAP socket unregistration failed");
4847 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4848 BT_ERR("L2CAP protocol unregistration failed");
4850 proto_unregister(&l2cap_proto);
4853 void l2cap_load(void)
4855 /* Dummy function to trigger automatic L2CAP module loading by
4856 * other modules that use L2CAP sockets but don't use any other
4857 * symbols from it. */
4859 EXPORT_SYMBOL(l2cap_load);
4861 module_init(l2cap_init);
4862 module_exit(l2cap_exit);
4864 module_param(disable_ertm, bool, 0644);
4865 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4867 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4868 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4869 MODULE_VERSION(VERSION);
4870 MODULE_LICENSE("GPL");
4871 MODULE_ALIAS("bt-proto-0");