brd: handle on-demand devices correctly
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / bluetooth / l2cap.c
blob675614e38e149c7d28d13903763b966b3c37e5f8
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core and sockets. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
58 #define VERSION "2.15"
60 static int disable_ertm;
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { 0x02, };
65 static const struct proto_ops l2cap_sock_ops;
67 static struct workqueue_struct *_busy_wq;
69 static struct bt_sock_list l2cap_sk_list = {
70 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
73 static void l2cap_busy_work(struct work_struct *work);
75 static void __l2cap_sock_close(struct sock *sk, int reason);
76 static void l2cap_sock_close(struct sock *sk);
77 static void l2cap_sock_kill(struct sock *sk);
79 static int l2cap_build_conf_req(struct sock *sk, void *data);
80 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
81 u8 code, u8 ident, u16 dlen, void *data);
83 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
85 /* ---- L2CAP timers ---- */
86 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
88 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
89 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
92 static void l2cap_sock_clear_timer(struct sock *sk)
94 BT_DBG("sock %p state %d", sk, sk->sk_state);
95 sk_stop_timer(sk, &sk->sk_timer);
98 static void l2cap_sock_timeout(unsigned long arg)
100 struct sock *sk = (struct sock *) arg;
101 int reason;
103 BT_DBG("sock %p state %d", sk, sk->sk_state);
105 bh_lock_sock(sk);
107 if (sock_owned_by_user(sk)) {
108 /* sk is owned by user. Try again later */
109 l2cap_sock_set_timer(sk, HZ / 5);
110 bh_unlock_sock(sk);
111 sock_put(sk);
112 return;
115 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
116 reason = ECONNREFUSED;
117 else if (sk->sk_state == BT_CONNECT &&
118 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
119 reason = ECONNREFUSED;
120 else
121 reason = ETIMEDOUT;
123 __l2cap_sock_close(sk, reason);
125 bh_unlock_sock(sk);
127 l2cap_sock_kill(sk);
128 sock_put(sk);
131 /* ---- L2CAP channels ---- */
132 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
134 struct sock *s;
135 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
136 if (l2cap_pi(s)->dcid == cid)
137 break;
139 return s;
142 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
144 struct sock *s;
145 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
146 if (l2cap_pi(s)->scid == cid)
147 break;
149 return s;
152 /* Find channel with given SCID.
153 * Returns locked socket */
154 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
156 struct sock *s;
157 read_lock(&l->lock);
158 s = __l2cap_get_chan_by_scid(l, cid);
159 if (s)
160 bh_lock_sock(s);
161 read_unlock(&l->lock);
162 return s;
165 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
167 struct sock *s;
168 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
169 if (l2cap_pi(s)->ident == ident)
170 break;
172 return s;
175 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
177 struct sock *s;
178 read_lock(&l->lock);
179 s = __l2cap_get_chan_by_ident(l, ident);
180 if (s)
181 bh_lock_sock(s);
182 read_unlock(&l->lock);
183 return s;
186 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
188 u16 cid = L2CAP_CID_DYN_START;
190 for (; cid < L2CAP_CID_DYN_END; cid++) {
191 if (!__l2cap_get_chan_by_scid(l, cid))
192 return cid;
195 return 0;
198 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
200 sock_hold(sk);
202 if (l->head)
203 l2cap_pi(l->head)->prev_c = sk;
205 l2cap_pi(sk)->next_c = l->head;
206 l2cap_pi(sk)->prev_c = NULL;
207 l->head = sk;
210 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
212 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
214 write_lock_bh(&l->lock);
215 if (sk == l->head)
216 l->head = next;
218 if (next)
219 l2cap_pi(next)->prev_c = prev;
220 if (prev)
221 l2cap_pi(prev)->next_c = next;
222 write_unlock_bh(&l->lock);
224 __sock_put(sk);
227 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
229 struct l2cap_chan_list *l = &conn->chan_list;
231 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
232 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
234 conn->disc_reason = 0x13;
236 l2cap_pi(sk)->conn = conn;
238 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
239 /* Alloc CID for connection-oriented socket */
240 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
241 } else if (sk->sk_type == SOCK_DGRAM) {
242 /* Connectionless socket */
243 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
244 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
245 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
246 } else {
247 /* Raw socket can send/recv signalling messages only */
248 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
249 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
250 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
253 __l2cap_chan_link(l, sk);
255 if (parent)
256 bt_accept_enqueue(parent, sk);
259 /* Delete channel.
260 * Must be called on the locked socket. */
261 static void l2cap_chan_del(struct sock *sk, int err)
263 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
264 struct sock *parent = bt_sk(sk)->parent;
266 l2cap_sock_clear_timer(sk);
268 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
270 if (conn) {
271 /* Unlink from channel list */
272 l2cap_chan_unlink(&conn->chan_list, sk);
273 l2cap_pi(sk)->conn = NULL;
274 hci_conn_put(conn->hcon);
277 sk->sk_state = BT_CLOSED;
278 sock_set_flag(sk, SOCK_ZAPPED);
280 if (err)
281 sk->sk_err = err;
283 if (parent) {
284 bt_accept_unlink(sk);
285 parent->sk_data_ready(parent, 0);
286 } else
287 sk->sk_state_change(sk);
289 skb_queue_purge(TX_QUEUE(sk));
291 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
292 struct srej_list *l, *tmp;
294 del_timer(&l2cap_pi(sk)->retrans_timer);
295 del_timer(&l2cap_pi(sk)->monitor_timer);
296 del_timer(&l2cap_pi(sk)->ack_timer);
298 skb_queue_purge(SREJ_QUEUE(sk));
299 skb_queue_purge(BUSY_QUEUE(sk));
301 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
302 list_del(&l->list);
303 kfree(l);
308 static inline u8 l2cap_get_auth_type(struct sock *sk)
310 if (sk->sk_type == SOCK_RAW) {
311 switch (l2cap_pi(sk)->sec_level) {
312 case BT_SECURITY_HIGH:
313 return HCI_AT_DEDICATED_BONDING_MITM;
314 case BT_SECURITY_MEDIUM:
315 return HCI_AT_DEDICATED_BONDING;
316 default:
317 return HCI_AT_NO_BONDING;
319 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
320 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
321 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
323 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
324 return HCI_AT_NO_BONDING_MITM;
325 else
326 return HCI_AT_NO_BONDING;
327 } else {
328 switch (l2cap_pi(sk)->sec_level) {
329 case BT_SECURITY_HIGH:
330 return HCI_AT_GENERAL_BONDING_MITM;
331 case BT_SECURITY_MEDIUM:
332 return HCI_AT_GENERAL_BONDING;
333 default:
334 return HCI_AT_NO_BONDING;
339 /* Service level security */
340 static inline int l2cap_check_security(struct sock *sk)
342 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
343 __u8 auth_type;
345 auth_type = l2cap_get_auth_type(sk);
347 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
348 auth_type);
351 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
353 u8 id;
355 /* Get next available identificator.
356 * 1 - 128 are used by kernel.
357 * 129 - 199 are reserved.
358 * 200 - 254 are used by utilities like l2ping, etc.
361 spin_lock_bh(&conn->lock);
363 if (++conn->tx_ident > 128)
364 conn->tx_ident = 1;
366 id = conn->tx_ident;
368 spin_unlock_bh(&conn->lock);
370 return id;
373 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
375 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
377 BT_DBG("code 0x%2.2x", code);
379 if (!skb)
380 return;
382 hci_send_acl(conn->hcon, skb, 0);
385 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
387 struct sk_buff *skb;
388 struct l2cap_hdr *lh;
389 struct l2cap_conn *conn = pi->conn;
390 struct sock *sk = (struct sock *)pi;
391 int count, hlen = L2CAP_HDR_SIZE + 2;
393 if (sk->sk_state != BT_CONNECTED)
394 return;
396 if (pi->fcs == L2CAP_FCS_CRC16)
397 hlen += 2;
399 BT_DBG("pi %p, control 0x%2.2x", pi, control);
401 count = min_t(unsigned int, conn->mtu, hlen);
402 control |= L2CAP_CTRL_FRAME_TYPE;
404 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
405 control |= L2CAP_CTRL_FINAL;
406 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
409 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
410 control |= L2CAP_CTRL_POLL;
411 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
414 skb = bt_skb_alloc(count, GFP_ATOMIC);
415 if (!skb)
416 return;
418 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
419 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
420 lh->cid = cpu_to_le16(pi->dcid);
421 put_unaligned_le16(control, skb_put(skb, 2));
423 if (pi->fcs == L2CAP_FCS_CRC16) {
424 u16 fcs = crc16(0, (u8 *)lh, count - 2);
425 put_unaligned_le16(fcs, skb_put(skb, 2));
428 hci_send_acl(pi->conn->hcon, skb, 0);
431 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
433 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
434 control |= L2CAP_SUPER_RCV_NOT_READY;
435 pi->conn_state |= L2CAP_CONN_RNR_SENT;
436 } else
437 control |= L2CAP_SUPER_RCV_READY;
439 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
441 l2cap_send_sframe(pi, control);
444 static inline int __l2cap_no_conn_pending(struct sock *sk)
446 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
449 static void l2cap_do_start(struct sock *sk)
451 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
453 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
454 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
455 return;
457 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
458 struct l2cap_conn_req req;
459 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
460 req.psm = l2cap_pi(sk)->psm;
462 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
463 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
465 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
466 L2CAP_CONN_REQ, sizeof(req), &req);
468 } else {
469 struct l2cap_info_req req;
470 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
472 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
473 conn->info_ident = l2cap_get_ident(conn);
475 mod_timer(&conn->info_timer, jiffies +
476 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
478 l2cap_send_cmd(conn, conn->info_ident,
479 L2CAP_INFO_REQ, sizeof(req), &req);
483 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
485 u32 local_feat_mask = l2cap_feat_mask;
486 if (!disable_ertm)
487 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
489 switch (mode) {
490 case L2CAP_MODE_ERTM:
491 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
492 case L2CAP_MODE_STREAMING:
493 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
494 default:
495 return 0x00;
499 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
501 struct l2cap_disconn_req req;
503 if (!conn)
504 return;
506 skb_queue_purge(TX_QUEUE(sk));
508 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
509 del_timer(&l2cap_pi(sk)->retrans_timer);
510 del_timer(&l2cap_pi(sk)->monitor_timer);
511 del_timer(&l2cap_pi(sk)->ack_timer);
514 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
515 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
516 l2cap_send_cmd(conn, l2cap_get_ident(conn),
517 L2CAP_DISCONN_REQ, sizeof(req), &req);
519 sk->sk_state = BT_DISCONN;
520 sk->sk_err = err;
523 /* ---- L2CAP connections ---- */
524 static void l2cap_conn_start(struct l2cap_conn *conn)
526 struct l2cap_chan_list *l = &conn->chan_list;
527 struct sock_del_list del, *tmp1, *tmp2;
528 struct sock *sk;
530 BT_DBG("conn %p", conn);
532 INIT_LIST_HEAD(&del.list);
534 read_lock(&l->lock);
536 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
537 bh_lock_sock(sk);
539 if (sk->sk_type != SOCK_SEQPACKET &&
540 sk->sk_type != SOCK_STREAM) {
541 bh_unlock_sock(sk);
542 continue;
545 if (sk->sk_state == BT_CONNECT) {
546 struct l2cap_conn_req req;
548 if (!l2cap_check_security(sk) ||
549 !__l2cap_no_conn_pending(sk)) {
550 bh_unlock_sock(sk);
551 continue;
554 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
555 conn->feat_mask)
556 && l2cap_pi(sk)->conf_state &
557 L2CAP_CONF_STATE2_DEVICE) {
558 tmp1 = kzalloc(sizeof(struct sock_del_list),
559 GFP_ATOMIC);
560 tmp1->sk = sk;
561 list_add_tail(&tmp1->list, &del.list);
562 bh_unlock_sock(sk);
563 continue;
566 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
567 req.psm = l2cap_pi(sk)->psm;
569 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
570 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
572 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
573 L2CAP_CONN_REQ, sizeof(req), &req);
575 } else if (sk->sk_state == BT_CONNECT2) {
576 struct l2cap_conn_rsp rsp;
577 char buf[128];
578 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
579 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
581 if (l2cap_check_security(sk)) {
582 if (bt_sk(sk)->defer_setup) {
583 struct sock *parent = bt_sk(sk)->parent;
584 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
585 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
586 parent->sk_data_ready(parent, 0);
588 } else {
589 sk->sk_state = BT_CONFIG;
590 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
591 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
593 } else {
594 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
595 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
598 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
599 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
601 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
602 rsp.result != L2CAP_CR_SUCCESS) {
603 bh_unlock_sock(sk);
604 continue;
607 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
608 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
609 l2cap_build_conf_req(sk, buf), buf);
610 l2cap_pi(sk)->num_conf_req++;
613 bh_unlock_sock(sk);
616 read_unlock(&l->lock);
618 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
619 bh_lock_sock(tmp1->sk);
620 __l2cap_sock_close(tmp1->sk, ECONNRESET);
621 bh_unlock_sock(tmp1->sk);
622 list_del(&tmp1->list);
623 kfree(tmp1);
627 static void l2cap_conn_ready(struct l2cap_conn *conn)
629 struct l2cap_chan_list *l = &conn->chan_list;
630 struct sock *sk;
632 BT_DBG("conn %p", conn);
634 read_lock(&l->lock);
636 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
637 bh_lock_sock(sk);
639 if (sk->sk_type != SOCK_SEQPACKET &&
640 sk->sk_type != SOCK_STREAM) {
641 l2cap_sock_clear_timer(sk);
642 sk->sk_state = BT_CONNECTED;
643 sk->sk_state_change(sk);
644 } else if (sk->sk_state == BT_CONNECT)
645 l2cap_do_start(sk);
647 bh_unlock_sock(sk);
650 read_unlock(&l->lock);
653 /* Notify sockets that we cannot guaranty reliability anymore */
654 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
656 struct l2cap_chan_list *l = &conn->chan_list;
657 struct sock *sk;
659 BT_DBG("conn %p", conn);
661 read_lock(&l->lock);
663 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
664 if (l2cap_pi(sk)->force_reliable)
665 sk->sk_err = err;
668 read_unlock(&l->lock);
671 static void l2cap_info_timeout(unsigned long arg)
673 struct l2cap_conn *conn = (void *) arg;
675 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
676 conn->info_ident = 0;
678 l2cap_conn_start(conn);
681 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
683 struct l2cap_conn *conn = hcon->l2cap_data;
685 if (conn || status)
686 return conn;
688 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
689 if (!conn)
690 return NULL;
692 hcon->l2cap_data = conn;
693 conn->hcon = hcon;
695 BT_DBG("hcon %p conn %p", hcon, conn);
697 conn->mtu = hcon->hdev->acl_mtu;
698 conn->src = &hcon->hdev->bdaddr;
699 conn->dst = &hcon->dst;
701 conn->feat_mask = 0;
703 spin_lock_init(&conn->lock);
704 rwlock_init(&conn->chan_list.lock);
706 setup_timer(&conn->info_timer, l2cap_info_timeout,
707 (unsigned long) conn);
709 conn->disc_reason = 0x13;
711 return conn;
714 static void l2cap_conn_del(struct hci_conn *hcon, int err)
716 struct l2cap_conn *conn = hcon->l2cap_data;
717 struct sock *sk;
719 if (!conn)
720 return;
722 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
724 kfree_skb(conn->rx_skb);
726 /* Kill channels */
727 while ((sk = conn->chan_list.head)) {
728 bh_lock_sock(sk);
729 l2cap_chan_del(sk, err);
730 bh_unlock_sock(sk);
731 l2cap_sock_kill(sk);
734 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
735 del_timer_sync(&conn->info_timer);
737 hcon->l2cap_data = NULL;
738 kfree(conn);
741 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
743 struct l2cap_chan_list *l = &conn->chan_list;
744 write_lock_bh(&l->lock);
745 __l2cap_chan_add(conn, sk, parent);
746 write_unlock_bh(&l->lock);
749 /* ---- Socket interface ---- */
750 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
752 struct sock *sk;
753 struct hlist_node *node;
754 sk_for_each(sk, node, &l2cap_sk_list.head)
755 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
756 goto found;
757 sk = NULL;
758 found:
759 return sk;
762 /* Find socket with psm and source bdaddr.
763 * Returns closest match.
765 static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
767 struct sock *sk = NULL, *sk1 = NULL;
768 struct hlist_node *node;
770 read_lock(&l2cap_sk_list.lock);
772 sk_for_each(sk, node, &l2cap_sk_list.head) {
773 if (state && sk->sk_state != state)
774 continue;
776 if (l2cap_pi(sk)->psm == psm) {
777 /* Exact match. */
778 if (!bacmp(&bt_sk(sk)->src, src))
779 break;
781 /* Closest match */
782 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
783 sk1 = sk;
787 read_unlock(&l2cap_sk_list.lock);
789 return node ? sk : sk1;
792 static void l2cap_sock_destruct(struct sock *sk)
794 BT_DBG("sk %p", sk);
796 skb_queue_purge(&sk->sk_receive_queue);
797 skb_queue_purge(&sk->sk_write_queue);
800 static void l2cap_sock_cleanup_listen(struct sock *parent)
802 struct sock *sk;
804 BT_DBG("parent %p", parent);
806 /* Close not yet accepted channels */
807 while ((sk = bt_accept_dequeue(parent, NULL)))
808 l2cap_sock_close(sk);
810 parent->sk_state = BT_CLOSED;
811 sock_set_flag(parent, SOCK_ZAPPED);
814 /* Kill socket (only if zapped and orphan)
815 * Must be called on unlocked socket.
817 static void l2cap_sock_kill(struct sock *sk)
819 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
820 return;
822 BT_DBG("sk %p state %d", sk, sk->sk_state);
824 /* Kill poor orphan */
825 bt_sock_unlink(&l2cap_sk_list, sk);
826 sock_set_flag(sk, SOCK_DEAD);
827 sock_put(sk);
830 static void __l2cap_sock_close(struct sock *sk, int reason)
832 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
834 switch (sk->sk_state) {
835 case BT_LISTEN:
836 l2cap_sock_cleanup_listen(sk);
837 break;
839 case BT_CONNECTED:
840 case BT_CONFIG:
841 if (sk->sk_type == SOCK_SEQPACKET ||
842 sk->sk_type == SOCK_STREAM) {
843 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
845 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
846 l2cap_send_disconn_req(conn, sk, reason);
847 } else
848 l2cap_chan_del(sk, reason);
849 break;
851 case BT_CONNECT2:
852 if (sk->sk_type == SOCK_SEQPACKET ||
853 sk->sk_type == SOCK_STREAM) {
854 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
855 struct l2cap_conn_rsp rsp;
856 __u16 result;
858 if (bt_sk(sk)->defer_setup)
859 result = L2CAP_CR_SEC_BLOCK;
860 else
861 result = L2CAP_CR_BAD_PSM;
862 sk->sk_state = BT_DISCONN;
864 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
865 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
866 rsp.result = cpu_to_le16(result);
867 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
868 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
869 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
870 } else
871 l2cap_chan_del(sk, reason);
872 break;
874 case BT_CONNECT:
875 case BT_DISCONN:
876 l2cap_chan_del(sk, reason);
877 break;
879 default:
880 sock_set_flag(sk, SOCK_ZAPPED);
881 break;
885 /* Must be called on unlocked socket. */
886 static void l2cap_sock_close(struct sock *sk)
888 l2cap_sock_clear_timer(sk);
889 lock_sock(sk);
890 __l2cap_sock_close(sk, ECONNRESET);
891 release_sock(sk);
892 l2cap_sock_kill(sk);
895 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
897 struct l2cap_pinfo *pi = l2cap_pi(sk);
899 BT_DBG("sk %p", sk);
901 if (parent) {
902 sk->sk_type = parent->sk_type;
903 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
905 pi->imtu = l2cap_pi(parent)->imtu;
906 pi->omtu = l2cap_pi(parent)->omtu;
907 pi->conf_state = l2cap_pi(parent)->conf_state;
908 pi->mode = l2cap_pi(parent)->mode;
909 pi->fcs = l2cap_pi(parent)->fcs;
910 pi->max_tx = l2cap_pi(parent)->max_tx;
911 pi->tx_win = l2cap_pi(parent)->tx_win;
912 pi->sec_level = l2cap_pi(parent)->sec_level;
913 pi->role_switch = l2cap_pi(parent)->role_switch;
914 pi->force_reliable = l2cap_pi(parent)->force_reliable;
915 } else {
916 pi->imtu = L2CAP_DEFAULT_MTU;
917 pi->omtu = 0;
918 if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
919 pi->mode = L2CAP_MODE_ERTM;
920 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
921 } else {
922 pi->mode = L2CAP_MODE_BASIC;
924 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
925 pi->fcs = L2CAP_FCS_CRC16;
926 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
927 pi->sec_level = BT_SECURITY_LOW;
928 pi->role_switch = 0;
929 pi->force_reliable = 0;
932 /* Default config options */
933 pi->conf_len = 0;
934 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
935 skb_queue_head_init(TX_QUEUE(sk));
936 skb_queue_head_init(SREJ_QUEUE(sk));
937 skb_queue_head_init(BUSY_QUEUE(sk));
938 INIT_LIST_HEAD(SREJ_LIST(sk));
941 static struct proto l2cap_proto = {
942 .name = "L2CAP",
943 .owner = THIS_MODULE,
944 .obj_size = sizeof(struct l2cap_pinfo)
947 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
949 struct sock *sk;
951 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
952 if (!sk)
953 return NULL;
955 sock_init_data(sock, sk);
956 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
958 sk->sk_destruct = l2cap_sock_destruct;
959 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
961 sock_reset_flag(sk, SOCK_ZAPPED);
963 sk->sk_protocol = proto;
964 sk->sk_state = BT_OPEN;
966 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
968 bt_sock_link(&l2cap_sk_list, sk);
969 return sk;
972 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
973 int kern)
975 struct sock *sk;
977 BT_DBG("sock %p", sock);
979 sock->state = SS_UNCONNECTED;
981 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
982 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
983 return -ESOCKTNOSUPPORT;
985 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
986 return -EPERM;
988 sock->ops = &l2cap_sock_ops;
990 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
991 if (!sk)
992 return -ENOMEM;
994 l2cap_sock_init(sk, NULL);
995 return 0;
998 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
1000 struct sock *sk = sock->sk;
1001 struct sockaddr_l2 la;
1002 int len, err = 0;
1004 BT_DBG("sk %p", sk);
1006 if (!addr || addr->sa_family != AF_BLUETOOTH)
1007 return -EINVAL;
1009 memset(&la, 0, sizeof(la));
1010 len = min_t(unsigned int, sizeof(la), alen);
1011 memcpy(&la, addr, len);
1013 if (la.l2_cid)
1014 return -EINVAL;
1016 lock_sock(sk);
1018 if (sk->sk_state != BT_OPEN) {
1019 err = -EBADFD;
1020 goto done;
1023 if (la.l2_psm) {
1024 __u16 psm = __le16_to_cpu(la.l2_psm);
1026 /* PSM must be odd and lsb of upper byte must be 0 */
1027 if ((psm & 0x0101) != 0x0001) {
1028 err = -EINVAL;
1029 goto done;
1032 /* Restrict usage of well-known PSMs */
1033 if (psm < 0x1001 && !capable(CAP_NET_BIND_SERVICE)) {
1034 err = -EACCES;
1035 goto done;
1039 write_lock_bh(&l2cap_sk_list.lock);
1041 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
1042 err = -EADDRINUSE;
1043 } else {
1044 /* Save source address */
1045 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
1046 l2cap_pi(sk)->psm = la.l2_psm;
1047 l2cap_pi(sk)->sport = la.l2_psm;
1048 sk->sk_state = BT_BOUND;
1050 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
1051 __le16_to_cpu(la.l2_psm) == 0x0003)
1052 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1055 write_unlock_bh(&l2cap_sk_list.lock);
1057 done:
1058 release_sock(sk);
1059 return err;
1062 static int l2cap_do_connect(struct sock *sk)
1064 bdaddr_t *src = &bt_sk(sk)->src;
1065 bdaddr_t *dst = &bt_sk(sk)->dst;
1066 struct l2cap_conn *conn;
1067 struct hci_conn *hcon;
1068 struct hci_dev *hdev;
1069 __u8 auth_type;
1070 int err;
1072 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1073 l2cap_pi(sk)->psm);
1075 hdev = hci_get_route(dst, src);
1076 if (!hdev)
1077 return -EHOSTUNREACH;
1079 hci_dev_lock_bh(hdev);
1081 err = -ENOMEM;
1083 auth_type = l2cap_get_auth_type(sk);
1085 hcon = hci_connect(hdev, ACL_LINK, dst,
1086 l2cap_pi(sk)->sec_level, auth_type);
1087 if (!hcon)
1088 goto done;
1090 conn = l2cap_conn_add(hcon, 0);
1091 if (!conn) {
1092 hci_conn_put(hcon);
1093 goto done;
1096 err = 0;
1098 /* Update source addr of the socket */
1099 bacpy(src, conn->src);
1101 l2cap_chan_add(conn, sk, NULL);
1103 sk->sk_state = BT_CONNECT;
1104 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1106 if (hcon->state == BT_CONNECTED) {
1107 if (sk->sk_type != SOCK_SEQPACKET &&
1108 sk->sk_type != SOCK_STREAM) {
1109 l2cap_sock_clear_timer(sk);
1110 if (l2cap_check_security(sk))
1111 sk->sk_state = BT_CONNECTED;
1112 } else
1113 l2cap_do_start(sk);
1116 done:
1117 hci_dev_unlock_bh(hdev);
1118 hci_dev_put(hdev);
1119 return err;
1122 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1124 struct sock *sk = sock->sk;
1125 struct sockaddr_l2 la;
1126 int len, err = 0;
1128 BT_DBG("sk %p", sk);
1130 if (!addr || alen < sizeof(addr->sa_family) ||
1131 addr->sa_family != AF_BLUETOOTH)
1132 return -EINVAL;
1134 memset(&la, 0, sizeof(la));
1135 len = min_t(unsigned int, sizeof(la), alen);
1136 memcpy(&la, addr, len);
1138 if (la.l2_cid)
1139 return -EINVAL;
1141 lock_sock(sk);
1143 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1144 && !la.l2_psm) {
1145 err = -EINVAL;
1146 goto done;
1149 switch (l2cap_pi(sk)->mode) {
1150 case L2CAP_MODE_BASIC:
1151 break;
1152 case L2CAP_MODE_ERTM:
1153 case L2CAP_MODE_STREAMING:
1154 if (!disable_ertm)
1155 break;
1156 /* fall through */
1157 default:
1158 err = -ENOTSUPP;
1159 goto done;
1162 switch (sk->sk_state) {
1163 case BT_CONNECT:
1164 case BT_CONNECT2:
1165 case BT_CONFIG:
1166 /* Already connecting */
1167 goto wait;
1169 case BT_CONNECTED:
1170 /* Already connected */
1171 err = -EISCONN;
1172 goto done;
1174 case BT_OPEN:
1175 case BT_BOUND:
1176 /* Can connect */
1177 break;
1179 default:
1180 err = -EBADFD;
1181 goto done;
1184 /* PSM must be odd and lsb of upper byte must be 0 */
1185 if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 &&
1186 sk->sk_type != SOCK_RAW) {
1187 err = -EINVAL;
1188 goto done;
1191 /* Set destination address and psm */
1192 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1193 l2cap_pi(sk)->psm = la.l2_psm;
1195 err = l2cap_do_connect(sk);
1196 if (err)
1197 goto done;
1199 wait:
1200 err = bt_sock_wait_state(sk, BT_CONNECTED,
1201 sock_sndtimeo(sk, flags & O_NONBLOCK));
1202 done:
1203 release_sock(sk);
1204 return err;
1207 static int l2cap_sock_listen(struct socket *sock, int backlog)
1209 struct sock *sk = sock->sk;
1210 int err = 0;
1212 BT_DBG("sk %p backlog %d", sk, backlog);
1214 lock_sock(sk);
1216 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1217 || sk->sk_state != BT_BOUND) {
1218 err = -EBADFD;
1219 goto done;
1222 switch (l2cap_pi(sk)->mode) {
1223 case L2CAP_MODE_BASIC:
1224 break;
1225 case L2CAP_MODE_ERTM:
1226 case L2CAP_MODE_STREAMING:
1227 if (!disable_ertm)
1228 break;
1229 /* fall through */
1230 default:
1231 err = -ENOTSUPP;
1232 goto done;
1235 if (!l2cap_pi(sk)->psm) {
1236 bdaddr_t *src = &bt_sk(sk)->src;
1237 u16 psm;
1239 err = -EINVAL;
1241 write_lock_bh(&l2cap_sk_list.lock);
1243 for (psm = 0x1001; psm < 0x1100; psm += 2)
1244 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1245 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1246 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1247 err = 0;
1248 break;
1251 write_unlock_bh(&l2cap_sk_list.lock);
1253 if (err < 0)
1254 goto done;
1257 sk->sk_max_ack_backlog = backlog;
1258 sk->sk_ack_backlog = 0;
1259 sk->sk_state = BT_LISTEN;
1261 done:
1262 release_sock(sk);
1263 return err;
1266 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1268 DECLARE_WAITQUEUE(wait, current);
1269 struct sock *sk = sock->sk, *nsk;
1270 long timeo;
1271 int err = 0;
1273 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1275 if (sk->sk_state != BT_LISTEN) {
1276 err = -EBADFD;
1277 goto done;
1280 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1282 BT_DBG("sk %p timeo %ld", sk, timeo);
1284 /* Wait for an incoming connection. (wake-one). */
1285 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1286 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1287 set_current_state(TASK_INTERRUPTIBLE);
1288 if (!timeo) {
1289 err = -EAGAIN;
1290 break;
1293 release_sock(sk);
1294 timeo = schedule_timeout(timeo);
1295 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1297 if (sk->sk_state != BT_LISTEN) {
1298 err = -EBADFD;
1299 break;
1302 if (signal_pending(current)) {
1303 err = sock_intr_errno(timeo);
1304 break;
1307 set_current_state(TASK_RUNNING);
1308 remove_wait_queue(sk_sleep(sk), &wait);
1310 if (err)
1311 goto done;
1313 newsock->state = SS_CONNECTED;
1315 BT_DBG("new socket %p", nsk);
1317 done:
1318 release_sock(sk);
1319 return err;
1322 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1324 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1325 struct sock *sk = sock->sk;
1327 BT_DBG("sock %p, sk %p", sock, sk);
1329 addr->sa_family = AF_BLUETOOTH;
1330 *len = sizeof(struct sockaddr_l2);
1332 if (peer) {
1333 la->l2_psm = l2cap_pi(sk)->psm;
1334 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1335 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1336 } else {
1337 la->l2_psm = l2cap_pi(sk)->sport;
1338 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1339 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1342 return 0;
1345 static int __l2cap_wait_ack(struct sock *sk)
1347 DECLARE_WAITQUEUE(wait, current);
1348 int err = 0;
1349 int timeo = HZ/5;
1351 add_wait_queue(sk_sleep(sk), &wait);
1352 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1353 set_current_state(TASK_INTERRUPTIBLE);
1355 if (!timeo)
1356 timeo = HZ/5;
1358 if (signal_pending(current)) {
1359 err = sock_intr_errno(timeo);
1360 break;
1363 release_sock(sk);
1364 timeo = schedule_timeout(timeo);
1365 lock_sock(sk);
1367 err = sock_error(sk);
1368 if (err)
1369 break;
1371 set_current_state(TASK_RUNNING);
1372 remove_wait_queue(sk_sleep(sk), &wait);
1373 return err;
1376 static void l2cap_monitor_timeout(unsigned long arg)
1378 struct sock *sk = (void *) arg;
1380 BT_DBG("sk %p", sk);
1382 bh_lock_sock(sk);
1383 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1384 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
1385 bh_unlock_sock(sk);
1386 return;
1389 l2cap_pi(sk)->retry_count++;
1390 __mod_monitor_timer();
1392 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1393 bh_unlock_sock(sk);
1396 static void l2cap_retrans_timeout(unsigned long arg)
1398 struct sock *sk = (void *) arg;
1400 BT_DBG("sk %p", sk);
1402 bh_lock_sock(sk);
1403 l2cap_pi(sk)->retry_count = 1;
1404 __mod_monitor_timer();
1406 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1408 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1409 bh_unlock_sock(sk);
1412 static void l2cap_drop_acked_frames(struct sock *sk)
1414 struct sk_buff *skb;
1416 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1417 l2cap_pi(sk)->unacked_frames) {
1418 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1419 break;
1421 skb = skb_dequeue(TX_QUEUE(sk));
1422 kfree_skb(skb);
1424 l2cap_pi(sk)->unacked_frames--;
1427 if (!l2cap_pi(sk)->unacked_frames)
1428 del_timer(&l2cap_pi(sk)->retrans_timer);
1431 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1433 struct l2cap_pinfo *pi = l2cap_pi(sk);
1435 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1437 hci_send_acl(pi->conn->hcon, skb, 0);
1440 static void l2cap_streaming_send(struct sock *sk)
1442 struct sk_buff *skb;
1443 struct l2cap_pinfo *pi = l2cap_pi(sk);
1444 u16 control, fcs;
1446 while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
1447 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1448 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1449 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1451 if (pi->fcs == L2CAP_FCS_CRC16) {
1452 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1453 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1456 l2cap_do_send(sk, skb);
1458 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1462 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1464 struct l2cap_pinfo *pi = l2cap_pi(sk);
1465 struct sk_buff *skb, *tx_skb;
1466 u16 control, fcs;
1468 skb = skb_peek(TX_QUEUE(sk));
1469 if (!skb)
1470 return;
1472 do {
1473 if (bt_cb(skb)->tx_seq == tx_seq)
1474 break;
1476 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1477 return;
1479 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1481 if (pi->remote_max_tx &&
1482 bt_cb(skb)->retries == pi->remote_max_tx) {
1483 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1484 return;
1487 tx_skb = skb_clone(skb, GFP_ATOMIC);
1488 bt_cb(skb)->retries++;
1489 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1491 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1492 control |= L2CAP_CTRL_FINAL;
1493 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1496 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1497 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1499 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1501 if (pi->fcs == L2CAP_FCS_CRC16) {
1502 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1503 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1506 l2cap_do_send(sk, tx_skb);
1509 static int l2cap_ertm_send(struct sock *sk)
1511 struct sk_buff *skb, *tx_skb;
1512 struct l2cap_pinfo *pi = l2cap_pi(sk);
1513 u16 control, fcs;
1514 int nsent = 0;
1516 if (sk->sk_state != BT_CONNECTED)
1517 return -ENOTCONN;
1519 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1521 if (pi->remote_max_tx &&
1522 bt_cb(skb)->retries == pi->remote_max_tx) {
1523 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1524 break;
1527 tx_skb = skb_clone(skb, GFP_ATOMIC);
1529 bt_cb(skb)->retries++;
1531 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1532 control &= L2CAP_CTRL_SAR;
1534 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1535 control |= L2CAP_CTRL_FINAL;
1536 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1538 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1539 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1540 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1543 if (pi->fcs == L2CAP_FCS_CRC16) {
1544 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1545 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1548 l2cap_do_send(sk, tx_skb);
1550 __mod_retrans_timer();
1552 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1553 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1555 pi->unacked_frames++;
1556 pi->frames_sent++;
1558 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1559 sk->sk_send_head = NULL;
1560 else
1561 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1563 nsent++;
1566 return nsent;
1569 static int l2cap_retransmit_frames(struct sock *sk)
1571 struct l2cap_pinfo *pi = l2cap_pi(sk);
1572 int ret;
1574 if (!skb_queue_empty(TX_QUEUE(sk)))
1575 sk->sk_send_head = TX_QUEUE(sk)->next;
1577 pi->next_tx_seq = pi->expected_ack_seq;
1578 ret = l2cap_ertm_send(sk);
1579 return ret;
1582 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1584 struct sock *sk = (struct sock *)pi;
1585 u16 control = 0;
1587 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1589 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1590 control |= L2CAP_SUPER_RCV_NOT_READY;
1591 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1592 l2cap_send_sframe(pi, control);
1593 return;
1596 if (l2cap_ertm_send(sk) > 0)
1597 return;
1599 control |= L2CAP_SUPER_RCV_READY;
1600 l2cap_send_sframe(pi, control);
1603 static void l2cap_send_srejtail(struct sock *sk)
1605 struct srej_list *tail;
1606 u16 control;
1608 control = L2CAP_SUPER_SELECT_REJECT;
1609 control |= L2CAP_CTRL_FINAL;
1611 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1612 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1614 l2cap_send_sframe(l2cap_pi(sk), control);
1617 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1619 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1620 struct sk_buff **frag;
1621 int err, sent = 0;
1623 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1624 return -EFAULT;
1626 sent += count;
1627 len -= count;
1629 /* Continuation fragments (no L2CAP header) */
1630 frag = &skb_shinfo(skb)->frag_list;
1631 while (len) {
1632 count = min_t(unsigned int, conn->mtu, len);
1634 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1635 if (!*frag)
1636 return err;
1637 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1638 return -EFAULT;
1640 sent += count;
1641 len -= count;
1643 frag = &(*frag)->next;
1646 return sent;
1649 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1651 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1652 struct sk_buff *skb;
1653 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1654 struct l2cap_hdr *lh;
1656 BT_DBG("sk %p len %d", sk, (int)len);
1658 count = min_t(unsigned int, (conn->mtu - hlen), len);
1659 skb = bt_skb_send_alloc(sk, count + hlen,
1660 msg->msg_flags & MSG_DONTWAIT, &err);
1661 if (!skb)
1662 return ERR_PTR(err);
1664 /* Create L2CAP header */
1665 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1666 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1667 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1668 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1670 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1671 if (unlikely(err < 0)) {
1672 kfree_skb(skb);
1673 return ERR_PTR(err);
1675 return skb;
1678 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1680 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1681 struct sk_buff *skb;
1682 int err, count, hlen = L2CAP_HDR_SIZE;
1683 struct l2cap_hdr *lh;
1685 BT_DBG("sk %p len %d", sk, (int)len);
1687 count = min_t(unsigned int, (conn->mtu - hlen), len);
1688 skb = bt_skb_send_alloc(sk, count + hlen,
1689 msg->msg_flags & MSG_DONTWAIT, &err);
1690 if (!skb)
1691 return ERR_PTR(err);
1693 /* Create L2CAP header */
1694 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1695 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1696 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1698 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1699 if (unlikely(err < 0)) {
1700 kfree_skb(skb);
1701 return ERR_PTR(err);
1703 return skb;
1706 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1708 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1709 struct sk_buff *skb;
1710 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1711 struct l2cap_hdr *lh;
1713 BT_DBG("sk %p len %d", sk, (int)len);
1715 if (!conn)
1716 return ERR_PTR(-ENOTCONN);
1718 if (sdulen)
1719 hlen += 2;
1721 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1722 hlen += 2;
1724 count = min_t(unsigned int, (conn->mtu - hlen), len);
1725 skb = bt_skb_send_alloc(sk, count + hlen,
1726 msg->msg_flags & MSG_DONTWAIT, &err);
1727 if (!skb)
1728 return ERR_PTR(err);
1730 /* Create L2CAP header */
1731 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1732 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1733 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1734 put_unaligned_le16(control, skb_put(skb, 2));
1735 if (sdulen)
1736 put_unaligned_le16(sdulen, skb_put(skb, 2));
1738 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1739 if (unlikely(err < 0)) {
1740 kfree_skb(skb);
1741 return ERR_PTR(err);
1744 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1745 put_unaligned_le16(0, skb_put(skb, 2));
1747 bt_cb(skb)->retries = 0;
1748 return skb;
1751 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1753 struct l2cap_pinfo *pi = l2cap_pi(sk);
1754 struct sk_buff *skb;
1755 struct sk_buff_head sar_queue;
1756 u16 control;
1757 size_t size = 0;
1759 skb_queue_head_init(&sar_queue);
1760 control = L2CAP_SDU_START;
1761 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1762 if (IS_ERR(skb))
1763 return PTR_ERR(skb);
1765 __skb_queue_tail(&sar_queue, skb);
1766 len -= pi->remote_mps;
1767 size += pi->remote_mps;
1769 while (len > 0) {
1770 size_t buflen;
1772 if (len > pi->remote_mps) {
1773 control = L2CAP_SDU_CONTINUE;
1774 buflen = pi->remote_mps;
1775 } else {
1776 control = L2CAP_SDU_END;
1777 buflen = len;
1780 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1781 if (IS_ERR(skb)) {
1782 skb_queue_purge(&sar_queue);
1783 return PTR_ERR(skb);
1786 __skb_queue_tail(&sar_queue, skb);
1787 len -= buflen;
1788 size += buflen;
1790 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1791 if (sk->sk_send_head == NULL)
1792 sk->sk_send_head = sar_queue.next;
1794 return size;
1797 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1799 struct sock *sk = sock->sk;
1800 struct l2cap_pinfo *pi = l2cap_pi(sk);
1801 struct sk_buff *skb;
1802 u16 control;
1803 int err;
1805 BT_DBG("sock %p, sk %p", sock, sk);
1807 err = sock_error(sk);
1808 if (err)
1809 return err;
1811 if (msg->msg_flags & MSG_OOB)
1812 return -EOPNOTSUPP;
1814 lock_sock(sk);
1816 if (sk->sk_state != BT_CONNECTED) {
1817 err = -ENOTCONN;
1818 goto done;
1821 /* Connectionless channel */
1822 if (sk->sk_type == SOCK_DGRAM) {
1823 skb = l2cap_create_connless_pdu(sk, msg, len);
1824 if (IS_ERR(skb)) {
1825 err = PTR_ERR(skb);
1826 } else {
1827 l2cap_do_send(sk, skb);
1828 err = len;
1830 goto done;
1833 switch (pi->mode) {
1834 case L2CAP_MODE_BASIC:
1835 /* Check outgoing MTU */
1836 if (len > pi->omtu) {
1837 err = -EMSGSIZE;
1838 goto done;
1841 /* Create a basic PDU */
1842 skb = l2cap_create_basic_pdu(sk, msg, len);
1843 if (IS_ERR(skb)) {
1844 err = PTR_ERR(skb);
1845 goto done;
1848 l2cap_do_send(sk, skb);
1849 err = len;
1850 break;
1852 case L2CAP_MODE_ERTM:
1853 case L2CAP_MODE_STREAMING:
1854 /* Entire SDU fits into one PDU */
1855 if (len <= pi->remote_mps) {
1856 control = L2CAP_SDU_UNSEGMENTED;
1857 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1858 if (IS_ERR(skb)) {
1859 err = PTR_ERR(skb);
1860 goto done;
1862 __skb_queue_tail(TX_QUEUE(sk), skb);
1864 if (sk->sk_send_head == NULL)
1865 sk->sk_send_head = skb;
1867 } else {
1868 /* Segment SDU into multiples PDUs */
1869 err = l2cap_sar_segment_sdu(sk, msg, len);
1870 if (err < 0)
1871 goto done;
1874 if (pi->mode == L2CAP_MODE_STREAMING) {
1875 l2cap_streaming_send(sk);
1876 } else {
1877 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
1878 (pi->conn_state & L2CAP_CONN_WAIT_F)) {
1879 err = len;
1880 break;
1882 err = l2cap_ertm_send(sk);
1885 if (err >= 0)
1886 err = len;
1887 break;
1889 default:
1890 BT_DBG("bad state %1.1x", pi->mode);
1891 err = -EBADFD;
1894 done:
1895 release_sock(sk);
1896 return err;
1899 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1901 struct sock *sk = sock->sk;
1903 lock_sock(sk);
1905 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1906 struct l2cap_conn_rsp rsp;
1907 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1908 u8 buf[128];
1910 sk->sk_state = BT_CONFIG;
1912 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1913 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1914 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1915 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1916 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1917 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1919 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
1920 release_sock(sk);
1921 return 0;
1924 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1925 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1926 l2cap_build_conf_req(sk, buf), buf);
1927 l2cap_pi(sk)->num_conf_req++;
1929 release_sock(sk);
1930 return 0;
1933 release_sock(sk);
1935 if (sock->type == SOCK_STREAM)
1936 return bt_sock_stream_recvmsg(iocb, sock, msg, len, flags);
1938 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1941 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1943 struct sock *sk = sock->sk;
1944 struct l2cap_options opts;
1945 int len, err = 0;
1946 u32 opt;
1948 BT_DBG("sk %p", sk);
1950 lock_sock(sk);
1952 switch (optname) {
1953 case L2CAP_OPTIONS:
1954 if (sk->sk_state == BT_CONNECTED) {
1955 err = -EINVAL;
1956 break;
1959 opts.imtu = l2cap_pi(sk)->imtu;
1960 opts.omtu = l2cap_pi(sk)->omtu;
1961 opts.flush_to = l2cap_pi(sk)->flush_to;
1962 opts.mode = l2cap_pi(sk)->mode;
1963 opts.fcs = l2cap_pi(sk)->fcs;
1964 opts.max_tx = l2cap_pi(sk)->max_tx;
1965 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1967 len = min_t(unsigned int, sizeof(opts), optlen);
1968 if (copy_from_user((char *) &opts, optval, len)) {
1969 err = -EFAULT;
1970 break;
1973 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
1974 err = -EINVAL;
1975 break;
1978 l2cap_pi(sk)->mode = opts.mode;
1979 switch (l2cap_pi(sk)->mode) {
1980 case L2CAP_MODE_BASIC:
1981 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
1982 break;
1983 case L2CAP_MODE_ERTM:
1984 case L2CAP_MODE_STREAMING:
1985 if (!disable_ertm)
1986 break;
1987 /* fall through */
1988 default:
1989 err = -EINVAL;
1990 break;
1993 l2cap_pi(sk)->imtu = opts.imtu;
1994 l2cap_pi(sk)->omtu = opts.omtu;
1995 l2cap_pi(sk)->fcs = opts.fcs;
1996 l2cap_pi(sk)->max_tx = opts.max_tx;
1997 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1998 break;
2000 case L2CAP_LM:
2001 if (get_user(opt, (u32 __user *) optval)) {
2002 err = -EFAULT;
2003 break;
2006 if (opt & L2CAP_LM_AUTH)
2007 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
2008 if (opt & L2CAP_LM_ENCRYPT)
2009 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
2010 if (opt & L2CAP_LM_SECURE)
2011 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
2013 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
2014 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
2015 break;
2017 default:
2018 err = -ENOPROTOOPT;
2019 break;
2022 release_sock(sk);
2023 return err;
2026 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
2028 struct sock *sk = sock->sk;
2029 struct bt_security sec;
2030 int len, err = 0;
2031 u32 opt;
2033 BT_DBG("sk %p", sk);
2035 if (level == SOL_L2CAP)
2036 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
2038 if (level != SOL_BLUETOOTH)
2039 return -ENOPROTOOPT;
2041 lock_sock(sk);
2043 switch (optname) {
2044 case BT_SECURITY:
2045 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2046 && sk->sk_type != SOCK_RAW) {
2047 err = -EINVAL;
2048 break;
2051 sec.level = BT_SECURITY_LOW;
2053 len = min_t(unsigned int, sizeof(sec), optlen);
2054 if (copy_from_user((char *) &sec, optval, len)) {
2055 err = -EFAULT;
2056 break;
2059 if (sec.level < BT_SECURITY_LOW ||
2060 sec.level > BT_SECURITY_HIGH) {
2061 err = -EINVAL;
2062 break;
2065 l2cap_pi(sk)->sec_level = sec.level;
2066 break;
2068 case BT_DEFER_SETUP:
2069 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2070 err = -EINVAL;
2071 break;
2074 if (get_user(opt, (u32 __user *) optval)) {
2075 err = -EFAULT;
2076 break;
2079 bt_sk(sk)->defer_setup = opt;
2080 break;
2082 default:
2083 err = -ENOPROTOOPT;
2084 break;
2087 release_sock(sk);
2088 return err;
2091 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
2093 struct sock *sk = sock->sk;
2094 struct l2cap_options opts;
2095 struct l2cap_conninfo cinfo;
2096 int len, err = 0;
2097 u32 opt;
2099 BT_DBG("sk %p", sk);
2101 if (get_user(len, optlen))
2102 return -EFAULT;
2104 lock_sock(sk);
2106 switch (optname) {
2107 case L2CAP_OPTIONS:
2108 opts.imtu = l2cap_pi(sk)->imtu;
2109 opts.omtu = l2cap_pi(sk)->omtu;
2110 opts.flush_to = l2cap_pi(sk)->flush_to;
2111 opts.mode = l2cap_pi(sk)->mode;
2112 opts.fcs = l2cap_pi(sk)->fcs;
2113 opts.max_tx = l2cap_pi(sk)->max_tx;
2114 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2116 len = min_t(unsigned int, len, sizeof(opts));
2117 if (copy_to_user(optval, (char *) &opts, len))
2118 err = -EFAULT;
2120 break;
2122 case L2CAP_LM:
2123 switch (l2cap_pi(sk)->sec_level) {
2124 case BT_SECURITY_LOW:
2125 opt = L2CAP_LM_AUTH;
2126 break;
2127 case BT_SECURITY_MEDIUM:
2128 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2129 break;
2130 case BT_SECURITY_HIGH:
2131 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2132 L2CAP_LM_SECURE;
2133 break;
2134 default:
2135 opt = 0;
2136 break;
2139 if (l2cap_pi(sk)->role_switch)
2140 opt |= L2CAP_LM_MASTER;
2142 if (l2cap_pi(sk)->force_reliable)
2143 opt |= L2CAP_LM_RELIABLE;
2145 if (put_user(opt, (u32 __user *) optval))
2146 err = -EFAULT;
2147 break;
2149 case L2CAP_CONNINFO:
2150 if (sk->sk_state != BT_CONNECTED &&
2151 !(sk->sk_state == BT_CONNECT2 &&
2152 bt_sk(sk)->defer_setup)) {
2153 err = -ENOTCONN;
2154 break;
2157 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2158 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2160 len = min_t(unsigned int, len, sizeof(cinfo));
2161 if (copy_to_user(optval, (char *) &cinfo, len))
2162 err = -EFAULT;
2164 break;
2166 default:
2167 err = -ENOPROTOOPT;
2168 break;
2171 release_sock(sk);
2172 return err;
2175 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2177 struct sock *sk = sock->sk;
2178 struct bt_security sec;
2179 int len, err = 0;
2181 BT_DBG("sk %p", sk);
2183 if (level == SOL_L2CAP)
2184 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2186 if (level != SOL_BLUETOOTH)
2187 return -ENOPROTOOPT;
2189 if (get_user(len, optlen))
2190 return -EFAULT;
2192 lock_sock(sk);
2194 switch (optname) {
2195 case BT_SECURITY:
2196 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2197 && sk->sk_type != SOCK_RAW) {
2198 err = -EINVAL;
2199 break;
2202 sec.level = l2cap_pi(sk)->sec_level;
2204 len = min_t(unsigned int, len, sizeof(sec));
2205 if (copy_to_user(optval, (char *) &sec, len))
2206 err = -EFAULT;
2208 break;
2210 case BT_DEFER_SETUP:
2211 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2212 err = -EINVAL;
2213 break;
2216 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2217 err = -EFAULT;
2219 break;
2221 default:
2222 err = -ENOPROTOOPT;
2223 break;
2226 release_sock(sk);
2227 return err;
2230 static int l2cap_sock_shutdown(struct socket *sock, int how)
2232 struct sock *sk = sock->sk;
2233 int err = 0;
2235 BT_DBG("sock %p, sk %p", sock, sk);
2237 if (!sk)
2238 return 0;
2240 lock_sock(sk);
2241 if (!sk->sk_shutdown) {
2242 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2243 err = __l2cap_wait_ack(sk);
2245 sk->sk_shutdown = SHUTDOWN_MASK;
2246 l2cap_sock_clear_timer(sk);
2247 __l2cap_sock_close(sk, 0);
2249 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2250 err = bt_sock_wait_state(sk, BT_CLOSED,
2251 sk->sk_lingertime);
2254 if (!err && sk->sk_err)
2255 err = -sk->sk_err;
2257 release_sock(sk);
2258 return err;
2261 static int l2cap_sock_release(struct socket *sock)
2263 struct sock *sk = sock->sk;
2264 int err;
2266 BT_DBG("sock %p, sk %p", sock, sk);
2268 if (!sk)
2269 return 0;
2271 err = l2cap_sock_shutdown(sock, 2);
2273 sock_orphan(sk);
2274 l2cap_sock_kill(sk);
2275 return err;
2278 static void l2cap_chan_ready(struct sock *sk)
2280 struct sock *parent = bt_sk(sk)->parent;
2282 BT_DBG("sk %p, parent %p", sk, parent);
2284 l2cap_pi(sk)->conf_state = 0;
2285 l2cap_sock_clear_timer(sk);
2287 if (!parent) {
2288 /* Outgoing channel.
2289 * Wake up socket sleeping on connect.
2291 sk->sk_state = BT_CONNECTED;
2292 sk->sk_state_change(sk);
2293 } else {
2294 /* Incoming channel.
2295 * Wake up socket sleeping on accept.
2297 parent->sk_data_ready(parent, 0);
2301 /* Copy frame to all raw sockets on that connection */
2302 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2304 struct l2cap_chan_list *l = &conn->chan_list;
2305 struct sk_buff *nskb;
2306 struct sock *sk;
2308 BT_DBG("conn %p", conn);
2310 read_lock(&l->lock);
2311 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2312 if (sk->sk_type != SOCK_RAW)
2313 continue;
2315 /* Don't send frame to the socket it came from */
2316 if (skb->sk == sk)
2317 continue;
2318 nskb = skb_clone(skb, GFP_ATOMIC);
2319 if (!nskb)
2320 continue;
2322 if (sock_queue_rcv_skb(sk, nskb))
2323 kfree_skb(nskb);
2325 read_unlock(&l->lock);
2328 /* ---- L2CAP signalling commands ---- */
2329 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2330 u8 code, u8 ident, u16 dlen, void *data)
2332 struct sk_buff *skb, **frag;
2333 struct l2cap_cmd_hdr *cmd;
2334 struct l2cap_hdr *lh;
2335 int len, count;
2337 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2338 conn, code, ident, dlen);
2340 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2341 count = min_t(unsigned int, conn->mtu, len);
2343 skb = bt_skb_alloc(count, GFP_ATOMIC);
2344 if (!skb)
2345 return NULL;
2347 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2348 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2349 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2351 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2352 cmd->code = code;
2353 cmd->ident = ident;
2354 cmd->len = cpu_to_le16(dlen);
2356 if (dlen) {
2357 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2358 memcpy(skb_put(skb, count), data, count);
2359 data += count;
2362 len -= skb->len;
2364 /* Continuation fragments (no L2CAP header) */
2365 frag = &skb_shinfo(skb)->frag_list;
2366 while (len) {
2367 count = min_t(unsigned int, conn->mtu, len);
2369 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2370 if (!*frag)
2371 goto fail;
2373 memcpy(skb_put(*frag, count), data, count);
2375 len -= count;
2376 data += count;
2378 frag = &(*frag)->next;
2381 return skb;
2383 fail:
2384 kfree_skb(skb);
2385 return NULL;
2388 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2390 struct l2cap_conf_opt *opt = *ptr;
2391 int len;
2393 len = L2CAP_CONF_OPT_SIZE + opt->len;
2394 *ptr += len;
2396 *type = opt->type;
2397 *olen = opt->len;
2399 switch (opt->len) {
2400 case 1:
2401 *val = *((u8 *) opt->val);
2402 break;
2404 case 2:
2405 *val = get_unaligned_le16(opt->val);
2406 break;
2408 case 4:
2409 *val = get_unaligned_le32(opt->val);
2410 break;
2412 default:
2413 *val = (unsigned long) opt->val;
2414 break;
2417 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2418 return len;
2421 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2423 struct l2cap_conf_opt *opt = *ptr;
2425 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2427 opt->type = type;
2428 opt->len = len;
2430 switch (len) {
2431 case 1:
2432 *((u8 *) opt->val) = val;
2433 break;
2435 case 2:
2436 put_unaligned_le16(val, opt->val);
2437 break;
2439 case 4:
2440 put_unaligned_le32(val, opt->val);
2441 break;
2443 default:
2444 memcpy(opt->val, (void *) val, len);
2445 break;
2448 *ptr += L2CAP_CONF_OPT_SIZE + len;
2451 static void l2cap_ack_timeout(unsigned long arg)
2453 struct sock *sk = (void *) arg;
2455 bh_lock_sock(sk);
2456 l2cap_send_ack(l2cap_pi(sk));
2457 bh_unlock_sock(sk);
2460 static inline void l2cap_ertm_init(struct sock *sk)
2462 l2cap_pi(sk)->expected_ack_seq = 0;
2463 l2cap_pi(sk)->unacked_frames = 0;
2464 l2cap_pi(sk)->buffer_seq = 0;
2465 l2cap_pi(sk)->num_acked = 0;
2466 l2cap_pi(sk)->frames_sent = 0;
2468 setup_timer(&l2cap_pi(sk)->retrans_timer,
2469 l2cap_retrans_timeout, (unsigned long) sk);
2470 setup_timer(&l2cap_pi(sk)->monitor_timer,
2471 l2cap_monitor_timeout, (unsigned long) sk);
2472 setup_timer(&l2cap_pi(sk)->ack_timer,
2473 l2cap_ack_timeout, (unsigned long) sk);
2475 __skb_queue_head_init(SREJ_QUEUE(sk));
2476 __skb_queue_head_init(BUSY_QUEUE(sk));
2478 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2480 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
2483 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2485 switch (mode) {
2486 case L2CAP_MODE_STREAMING:
2487 case L2CAP_MODE_ERTM:
2488 if (l2cap_mode_supported(mode, remote_feat_mask))
2489 return mode;
2490 /* fall through */
2491 default:
2492 return L2CAP_MODE_BASIC;
2496 static int l2cap_build_conf_req(struct sock *sk, void *data)
2498 struct l2cap_pinfo *pi = l2cap_pi(sk);
2499 struct l2cap_conf_req *req = data;
2500 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2501 void *ptr = req->data;
2503 BT_DBG("sk %p", sk);
2505 if (pi->num_conf_req || pi->num_conf_rsp)
2506 goto done;
2508 switch (pi->mode) {
2509 case L2CAP_MODE_STREAMING:
2510 case L2CAP_MODE_ERTM:
2511 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
2512 break;
2514 /* fall through */
2515 default:
2516 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2517 break;
2520 done:
2521 switch (pi->mode) {
2522 case L2CAP_MODE_BASIC:
2523 if (pi->imtu != L2CAP_DEFAULT_MTU)
2524 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2526 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2527 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
2528 break;
2530 rfc.mode = L2CAP_MODE_BASIC;
2531 rfc.txwin_size = 0;
2532 rfc.max_transmit = 0;
2533 rfc.retrans_timeout = 0;
2534 rfc.monitor_timeout = 0;
2535 rfc.max_pdu_size = 0;
2537 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2538 (unsigned long) &rfc);
2539 break;
2541 case L2CAP_MODE_ERTM:
2542 rfc.mode = L2CAP_MODE_ERTM;
2543 rfc.txwin_size = pi->tx_win;
2544 rfc.max_transmit = pi->max_tx;
2545 rfc.retrans_timeout = 0;
2546 rfc.monitor_timeout = 0;
2547 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2548 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2549 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2551 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2552 (unsigned long) &rfc);
2554 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2555 break;
2557 if (pi->fcs == L2CAP_FCS_NONE ||
2558 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2559 pi->fcs = L2CAP_FCS_NONE;
2560 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2562 break;
2564 case L2CAP_MODE_STREAMING:
2565 rfc.mode = L2CAP_MODE_STREAMING;
2566 rfc.txwin_size = 0;
2567 rfc.max_transmit = 0;
2568 rfc.retrans_timeout = 0;
2569 rfc.monitor_timeout = 0;
2570 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2571 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2572 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2574 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2575 (unsigned long) &rfc);
2577 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2578 break;
2580 if (pi->fcs == L2CAP_FCS_NONE ||
2581 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2582 pi->fcs = L2CAP_FCS_NONE;
2583 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2585 break;
2588 /* FIXME: Need actual value of the flush timeout */
2589 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2590 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2592 req->dcid = cpu_to_le16(pi->dcid);
2593 req->flags = cpu_to_le16(0);
2595 return ptr - data;
2598 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2600 struct l2cap_pinfo *pi = l2cap_pi(sk);
2601 struct l2cap_conf_rsp *rsp = data;
2602 void *ptr = rsp->data;
2603 void *req = pi->conf_req;
2604 int len = pi->conf_len;
2605 int type, hint, olen;
2606 unsigned long val;
2607 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2608 u16 mtu = L2CAP_DEFAULT_MTU;
2609 u16 result = L2CAP_CONF_SUCCESS;
2611 BT_DBG("sk %p", sk);
2613 while (len >= L2CAP_CONF_OPT_SIZE) {
2614 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2616 hint = type & L2CAP_CONF_HINT;
2617 type &= L2CAP_CONF_MASK;
2619 switch (type) {
2620 case L2CAP_CONF_MTU:
2621 mtu = val;
2622 break;
2624 case L2CAP_CONF_FLUSH_TO:
2625 pi->flush_to = val;
2626 break;
2628 case L2CAP_CONF_QOS:
2629 break;
2631 case L2CAP_CONF_RFC:
2632 if (olen == sizeof(rfc))
2633 memcpy(&rfc, (void *) val, olen);
2634 break;
2636 case L2CAP_CONF_FCS:
2637 if (val == L2CAP_FCS_NONE)
2638 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2640 break;
2642 default:
2643 if (hint)
2644 break;
2646 result = L2CAP_CONF_UNKNOWN;
2647 *((u8 *) ptr++) = type;
2648 break;
2652 if (pi->num_conf_rsp || pi->num_conf_req > 1)
2653 goto done;
2655 switch (pi->mode) {
2656 case L2CAP_MODE_STREAMING:
2657 case L2CAP_MODE_ERTM:
2658 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2659 pi->mode = l2cap_select_mode(rfc.mode,
2660 pi->conn->feat_mask);
2661 break;
2664 if (pi->mode != rfc.mode)
2665 return -ECONNREFUSED;
2667 break;
2670 done:
2671 if (pi->mode != rfc.mode) {
2672 result = L2CAP_CONF_UNACCEPT;
2673 rfc.mode = pi->mode;
2675 if (pi->num_conf_rsp == 1)
2676 return -ECONNREFUSED;
2678 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2679 sizeof(rfc), (unsigned long) &rfc);
2683 if (result == L2CAP_CONF_SUCCESS) {
2684 /* Configure output options and let the other side know
2685 * which ones we don't like. */
2687 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2688 result = L2CAP_CONF_UNACCEPT;
2689 else {
2690 pi->omtu = mtu;
2691 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2693 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2695 switch (rfc.mode) {
2696 case L2CAP_MODE_BASIC:
2697 pi->fcs = L2CAP_FCS_NONE;
2698 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2699 break;
2701 case L2CAP_MODE_ERTM:
2702 pi->remote_tx_win = rfc.txwin_size;
2703 pi->remote_max_tx = rfc.max_transmit;
2705 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
2706 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2708 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2710 rfc.retrans_timeout =
2711 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2712 rfc.monitor_timeout =
2713 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2715 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2717 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2718 sizeof(rfc), (unsigned long) &rfc);
2720 break;
2722 case L2CAP_MODE_STREAMING:
2723 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
2724 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2726 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2728 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2730 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2731 sizeof(rfc), (unsigned long) &rfc);
2733 break;
2735 default:
2736 result = L2CAP_CONF_UNACCEPT;
2738 memset(&rfc, 0, sizeof(rfc));
2739 rfc.mode = pi->mode;
2742 if (result == L2CAP_CONF_SUCCESS)
2743 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2745 rsp->scid = cpu_to_le16(pi->dcid);
2746 rsp->result = cpu_to_le16(result);
2747 rsp->flags = cpu_to_le16(0x0000);
2749 return ptr - data;
2752 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2754 struct l2cap_pinfo *pi = l2cap_pi(sk);
2755 struct l2cap_conf_req *req = data;
2756 void *ptr = req->data;
2757 int type, olen;
2758 unsigned long val;
2759 struct l2cap_conf_rfc rfc;
2761 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2763 while (len >= L2CAP_CONF_OPT_SIZE) {
2764 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2766 switch (type) {
2767 case L2CAP_CONF_MTU:
2768 if (val < L2CAP_DEFAULT_MIN_MTU) {
2769 *result = L2CAP_CONF_UNACCEPT;
2770 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
2771 } else
2772 pi->imtu = val;
2773 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2774 break;
2776 case L2CAP_CONF_FLUSH_TO:
2777 pi->flush_to = val;
2778 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2779 2, pi->flush_to);
2780 break;
2782 case L2CAP_CONF_RFC:
2783 if (olen == sizeof(rfc))
2784 memcpy(&rfc, (void *)val, olen);
2786 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2787 rfc.mode != pi->mode)
2788 return -ECONNREFUSED;
2790 pi->fcs = 0;
2792 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2793 sizeof(rfc), (unsigned long) &rfc);
2794 break;
2798 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
2799 return -ECONNREFUSED;
2801 pi->mode = rfc.mode;
2803 if (*result == L2CAP_CONF_SUCCESS) {
2804 switch (rfc.mode) {
2805 case L2CAP_MODE_ERTM:
2806 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2807 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2808 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2809 break;
2810 case L2CAP_MODE_STREAMING:
2811 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2815 req->dcid = cpu_to_le16(pi->dcid);
2816 req->flags = cpu_to_le16(0x0000);
2818 return ptr - data;
2821 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2823 struct l2cap_conf_rsp *rsp = data;
2824 void *ptr = rsp->data;
2826 BT_DBG("sk %p", sk);
2828 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2829 rsp->result = cpu_to_le16(result);
2830 rsp->flags = cpu_to_le16(flags);
2832 return ptr - data;
2835 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2837 struct l2cap_pinfo *pi = l2cap_pi(sk);
2838 int type, olen;
2839 unsigned long val;
2840 struct l2cap_conf_rfc rfc;
2842 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2844 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2845 return;
2847 while (len >= L2CAP_CONF_OPT_SIZE) {
2848 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2850 switch (type) {
2851 case L2CAP_CONF_RFC:
2852 if (olen == sizeof(rfc))
2853 memcpy(&rfc, (void *)val, olen);
2854 goto done;
2858 done:
2859 switch (rfc.mode) {
2860 case L2CAP_MODE_ERTM:
2861 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2862 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2863 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2864 break;
2865 case L2CAP_MODE_STREAMING:
2866 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2870 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2872 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2874 if (rej->reason != 0x0000)
2875 return 0;
2877 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2878 cmd->ident == conn->info_ident) {
2879 del_timer(&conn->info_timer);
2881 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2882 conn->info_ident = 0;
2884 l2cap_conn_start(conn);
2887 return 0;
2890 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2892 struct l2cap_chan_list *list = &conn->chan_list;
2893 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2894 struct l2cap_conn_rsp rsp;
2895 struct sock *parent, *sk = NULL;
2896 int result, status = L2CAP_CS_NO_INFO;
2898 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2899 __le16 psm = req->psm;
2901 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2903 /* Check if we have socket listening on psm */
2904 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2905 if (!parent) {
2906 result = L2CAP_CR_BAD_PSM;
2907 goto sendresp;
2910 bh_lock_sock(parent);
2912 /* Check if the ACL is secure enough (if not SDP) */
2913 if (psm != cpu_to_le16(0x0001) &&
2914 !hci_conn_check_link_mode(conn->hcon)) {
2915 conn->disc_reason = 0x05;
2916 result = L2CAP_CR_SEC_BLOCK;
2917 goto response;
2920 result = L2CAP_CR_NO_MEM;
2922 /* Check for backlog size */
2923 if (sk_acceptq_is_full(parent)) {
2924 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2925 goto response;
2928 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2929 if (!sk)
2930 goto response;
2932 write_lock_bh(&list->lock);
2934 /* Check if we already have channel with that dcid */
2935 if (__l2cap_get_chan_by_dcid(list, scid)) {
2936 write_unlock_bh(&list->lock);
2937 sock_set_flag(sk, SOCK_ZAPPED);
2938 l2cap_sock_kill(sk);
2939 goto response;
2942 hci_conn_hold(conn->hcon);
2944 l2cap_sock_init(sk, parent);
2945 bacpy(&bt_sk(sk)->src, conn->src);
2946 bacpy(&bt_sk(sk)->dst, conn->dst);
2947 l2cap_pi(sk)->psm = psm;
2948 l2cap_pi(sk)->dcid = scid;
2950 __l2cap_chan_add(conn, sk, parent);
2951 dcid = l2cap_pi(sk)->scid;
2953 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2955 l2cap_pi(sk)->ident = cmd->ident;
2957 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2958 if (l2cap_check_security(sk)) {
2959 if (bt_sk(sk)->defer_setup) {
2960 sk->sk_state = BT_CONNECT2;
2961 result = L2CAP_CR_PEND;
2962 status = L2CAP_CS_AUTHOR_PEND;
2963 parent->sk_data_ready(parent, 0);
2964 } else {
2965 sk->sk_state = BT_CONFIG;
2966 result = L2CAP_CR_SUCCESS;
2967 status = L2CAP_CS_NO_INFO;
2969 } else {
2970 sk->sk_state = BT_CONNECT2;
2971 result = L2CAP_CR_PEND;
2972 status = L2CAP_CS_AUTHEN_PEND;
2974 } else {
2975 sk->sk_state = BT_CONNECT2;
2976 result = L2CAP_CR_PEND;
2977 status = L2CAP_CS_NO_INFO;
2980 write_unlock_bh(&list->lock);
2982 response:
2983 bh_unlock_sock(parent);
2985 sendresp:
2986 rsp.scid = cpu_to_le16(scid);
2987 rsp.dcid = cpu_to_le16(dcid);
2988 rsp.result = cpu_to_le16(result);
2989 rsp.status = cpu_to_le16(status);
2990 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2992 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2993 struct l2cap_info_req info;
2994 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2996 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2997 conn->info_ident = l2cap_get_ident(conn);
2999 mod_timer(&conn->info_timer, jiffies +
3000 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
3002 l2cap_send_cmd(conn, conn->info_ident,
3003 L2CAP_INFO_REQ, sizeof(info), &info);
3006 if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
3007 result == L2CAP_CR_SUCCESS) {
3008 u8 buf[128];
3009 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3010 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3011 l2cap_build_conf_req(sk, buf), buf);
3012 l2cap_pi(sk)->num_conf_req++;
3015 return 0;
3018 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3020 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3021 u16 scid, dcid, result, status;
3022 struct sock *sk;
3023 u8 req[128];
3025 scid = __le16_to_cpu(rsp->scid);
3026 dcid = __le16_to_cpu(rsp->dcid);
3027 result = __le16_to_cpu(rsp->result);
3028 status = __le16_to_cpu(rsp->status);
3030 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
3032 if (scid) {
3033 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3034 if (!sk)
3035 return -EFAULT;
3036 } else {
3037 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
3038 if (!sk)
3039 return -EFAULT;
3042 switch (result) {
3043 case L2CAP_CR_SUCCESS:
3044 sk->sk_state = BT_CONFIG;
3045 l2cap_pi(sk)->ident = 0;
3046 l2cap_pi(sk)->dcid = dcid;
3047 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
3049 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
3050 break;
3052 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3054 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3055 l2cap_build_conf_req(sk, req), req);
3056 l2cap_pi(sk)->num_conf_req++;
3057 break;
3059 case L2CAP_CR_PEND:
3060 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3061 break;
3063 default:
3064 /* don't delete l2cap channel if sk is owned by user */
3065 if (sock_owned_by_user(sk)) {
3066 sk->sk_state = BT_DISCONN;
3067 l2cap_sock_clear_timer(sk);
3068 l2cap_sock_set_timer(sk, HZ / 5);
3069 break;
3072 l2cap_chan_del(sk, ECONNREFUSED);
3073 break;
3076 bh_unlock_sock(sk);
3077 return 0;
3080 static inline void set_default_fcs(struct l2cap_pinfo *pi)
3082 /* FCS is enabled only in ERTM or streaming mode, if one or both
3083 * sides request it.
3085 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
3086 pi->fcs = L2CAP_FCS_NONE;
3087 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
3088 pi->fcs = L2CAP_FCS_CRC16;
3091 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3093 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3094 u16 dcid, flags;
3095 u8 rsp[64];
3096 struct sock *sk;
3097 int len;
3099 dcid = __le16_to_cpu(req->dcid);
3100 flags = __le16_to_cpu(req->flags);
3102 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3104 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3105 if (!sk)
3106 return -ENOENT;
3108 if (sk->sk_state != BT_CONFIG) {
3109 struct l2cap_cmd_rej rej;
3111 rej.reason = cpu_to_le16(0x0002);
3112 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3113 sizeof(rej), &rej);
3114 goto unlock;
3117 /* Reject if config buffer is too small. */
3118 len = cmd_len - sizeof(*req);
3119 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
3120 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3121 l2cap_build_conf_rsp(sk, rsp,
3122 L2CAP_CONF_REJECT, flags), rsp);
3123 goto unlock;
3126 /* Store config. */
3127 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
3128 l2cap_pi(sk)->conf_len += len;
3130 if (flags & 0x0001) {
3131 /* Incomplete config. Send empty response. */
3132 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3133 l2cap_build_conf_rsp(sk, rsp,
3134 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3135 goto unlock;
3138 /* Complete config. */
3139 len = l2cap_parse_conf_req(sk, rsp);
3140 if (len < 0) {
3141 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3142 goto unlock;
3145 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3146 l2cap_pi(sk)->num_conf_rsp++;
3148 /* Reset config buffer. */
3149 l2cap_pi(sk)->conf_len = 0;
3151 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
3152 goto unlock;
3154 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
3155 set_default_fcs(l2cap_pi(sk));
3157 sk->sk_state = BT_CONNECTED;
3159 l2cap_pi(sk)->next_tx_seq = 0;
3160 l2cap_pi(sk)->expected_tx_seq = 0;
3161 __skb_queue_head_init(TX_QUEUE(sk));
3162 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3163 l2cap_ertm_init(sk);
3165 l2cap_chan_ready(sk);
3166 goto unlock;
3169 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3170 u8 buf[64];
3171 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3172 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3173 l2cap_build_conf_req(sk, buf), buf);
3174 l2cap_pi(sk)->num_conf_req++;
3177 unlock:
3178 bh_unlock_sock(sk);
3179 return 0;
3182 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3184 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3185 u16 scid, flags, result;
3186 struct sock *sk;
3187 int len = cmd->len - sizeof(*rsp);
3189 scid = __le16_to_cpu(rsp->scid);
3190 flags = __le16_to_cpu(rsp->flags);
3191 result = __le16_to_cpu(rsp->result);
3193 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3194 scid, flags, result);
3196 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3197 if (!sk)
3198 return 0;
3200 switch (result) {
3201 case L2CAP_CONF_SUCCESS:
3202 l2cap_conf_rfc_get(sk, rsp->data, len);
3203 break;
3205 case L2CAP_CONF_UNACCEPT:
3206 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3207 char req[64];
3209 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3210 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3211 goto done;
3214 /* throw out any old stored conf requests */
3215 result = L2CAP_CONF_SUCCESS;
3216 len = l2cap_parse_conf_rsp(sk, rsp->data,
3217 len, req, &result);
3218 if (len < 0) {
3219 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3220 goto done;
3223 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3224 L2CAP_CONF_REQ, len, req);
3225 l2cap_pi(sk)->num_conf_req++;
3226 if (result != L2CAP_CONF_SUCCESS)
3227 goto done;
3228 break;
3231 default:
3232 sk->sk_err = ECONNRESET;
3233 l2cap_sock_set_timer(sk, HZ * 5);
3234 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3235 goto done;
3238 if (flags & 0x01)
3239 goto done;
3241 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3243 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3244 set_default_fcs(l2cap_pi(sk));
3246 sk->sk_state = BT_CONNECTED;
3247 l2cap_pi(sk)->next_tx_seq = 0;
3248 l2cap_pi(sk)->expected_tx_seq = 0;
3249 __skb_queue_head_init(TX_QUEUE(sk));
3250 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3251 l2cap_ertm_init(sk);
3253 l2cap_chan_ready(sk);
3256 done:
3257 bh_unlock_sock(sk);
3258 return 0;
3261 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3263 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3264 struct l2cap_disconn_rsp rsp;
3265 u16 dcid, scid;
3266 struct sock *sk;
3268 scid = __le16_to_cpu(req->scid);
3269 dcid = __le16_to_cpu(req->dcid);
3271 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3273 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3274 if (!sk)
3275 return 0;
3277 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3278 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3279 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3281 sk->sk_shutdown = SHUTDOWN_MASK;
3283 /* don't delete l2cap channel if sk is owned by user */
3284 if (sock_owned_by_user(sk)) {
3285 sk->sk_state = BT_DISCONN;
3286 l2cap_sock_clear_timer(sk);
3287 l2cap_sock_set_timer(sk, HZ / 5);
3288 bh_unlock_sock(sk);
3289 return 0;
3292 l2cap_chan_del(sk, ECONNRESET);
3293 bh_unlock_sock(sk);
3295 l2cap_sock_kill(sk);
3296 return 0;
3299 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3301 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3302 u16 dcid, scid;
3303 struct sock *sk;
3305 scid = __le16_to_cpu(rsp->scid);
3306 dcid = __le16_to_cpu(rsp->dcid);
3308 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3310 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3311 if (!sk)
3312 return 0;
3314 /* don't delete l2cap channel if sk is owned by user */
3315 if (sock_owned_by_user(sk)) {
3316 sk->sk_state = BT_DISCONN;
3317 l2cap_sock_clear_timer(sk);
3318 l2cap_sock_set_timer(sk, HZ / 5);
3319 bh_unlock_sock(sk);
3320 return 0;
3323 l2cap_chan_del(sk, 0);
3324 bh_unlock_sock(sk);
3326 l2cap_sock_kill(sk);
3327 return 0;
3330 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3332 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3333 u16 type;
3335 type = __le16_to_cpu(req->type);
3337 BT_DBG("type 0x%4.4x", type);
3339 if (type == L2CAP_IT_FEAT_MASK) {
3340 u8 buf[8];
3341 u32 feat_mask = l2cap_feat_mask;
3342 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3343 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3344 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3345 if (!disable_ertm)
3346 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3347 | L2CAP_FEAT_FCS;
3348 put_unaligned_le32(feat_mask, rsp->data);
3349 l2cap_send_cmd(conn, cmd->ident,
3350 L2CAP_INFO_RSP, sizeof(buf), buf);
3351 } else if (type == L2CAP_IT_FIXED_CHAN) {
3352 u8 buf[12];
3353 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3354 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3355 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3356 memcpy(buf + 4, l2cap_fixed_chan, 8);
3357 l2cap_send_cmd(conn, cmd->ident,
3358 L2CAP_INFO_RSP, sizeof(buf), buf);
3359 } else {
3360 struct l2cap_info_rsp rsp;
3361 rsp.type = cpu_to_le16(type);
3362 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3363 l2cap_send_cmd(conn, cmd->ident,
3364 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3367 return 0;
3370 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3372 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3373 u16 type, result;
3375 type = __le16_to_cpu(rsp->type);
3376 result = __le16_to_cpu(rsp->result);
3378 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3380 del_timer(&conn->info_timer);
3382 if (result != L2CAP_IR_SUCCESS) {
3383 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3384 conn->info_ident = 0;
3386 l2cap_conn_start(conn);
3388 return 0;
3391 if (type == L2CAP_IT_FEAT_MASK) {
3392 conn->feat_mask = get_unaligned_le32(rsp->data);
3394 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3395 struct l2cap_info_req req;
3396 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3398 conn->info_ident = l2cap_get_ident(conn);
3400 l2cap_send_cmd(conn, conn->info_ident,
3401 L2CAP_INFO_REQ, sizeof(req), &req);
3402 } else {
3403 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3404 conn->info_ident = 0;
3406 l2cap_conn_start(conn);
3408 } else if (type == L2CAP_IT_FIXED_CHAN) {
3409 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3410 conn->info_ident = 0;
3412 l2cap_conn_start(conn);
3415 return 0;
3418 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3420 u8 *data = skb->data;
3421 int len = skb->len;
3422 struct l2cap_cmd_hdr cmd;
3423 int err = 0;
3425 l2cap_raw_recv(conn, skb);
3427 while (len >= L2CAP_CMD_HDR_SIZE) {
3428 u16 cmd_len;
3429 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3430 data += L2CAP_CMD_HDR_SIZE;
3431 len -= L2CAP_CMD_HDR_SIZE;
3433 cmd_len = le16_to_cpu(cmd.len);
3435 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3437 if (cmd_len > len || !cmd.ident) {
3438 BT_DBG("corrupted command");
3439 break;
3442 switch (cmd.code) {
3443 case L2CAP_COMMAND_REJ:
3444 l2cap_command_rej(conn, &cmd, data);
3445 break;
3447 case L2CAP_CONN_REQ:
3448 err = l2cap_connect_req(conn, &cmd, data);
3449 break;
3451 case L2CAP_CONN_RSP:
3452 err = l2cap_connect_rsp(conn, &cmd, data);
3453 break;
3455 case L2CAP_CONF_REQ:
3456 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3457 break;
3459 case L2CAP_CONF_RSP:
3460 err = l2cap_config_rsp(conn, &cmd, data);
3461 break;
3463 case L2CAP_DISCONN_REQ:
3464 err = l2cap_disconnect_req(conn, &cmd, data);
3465 break;
3467 case L2CAP_DISCONN_RSP:
3468 err = l2cap_disconnect_rsp(conn, &cmd, data);
3469 break;
3471 case L2CAP_ECHO_REQ:
3472 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3473 break;
3475 case L2CAP_ECHO_RSP:
3476 break;
3478 case L2CAP_INFO_REQ:
3479 err = l2cap_information_req(conn, &cmd, data);
3480 break;
3482 case L2CAP_INFO_RSP:
3483 err = l2cap_information_rsp(conn, &cmd, data);
3484 break;
3486 default:
3487 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3488 err = -EINVAL;
3489 break;
3492 if (err) {
3493 struct l2cap_cmd_rej rej;
3494 BT_DBG("error %d", err);
3496 /* FIXME: Map err to a valid reason */
3497 rej.reason = cpu_to_le16(0);
3498 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3501 data += cmd_len;
3502 len -= cmd_len;
3505 kfree_skb(skb);
3508 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3510 u16 our_fcs, rcv_fcs;
3511 int hdr_size = L2CAP_HDR_SIZE + 2;
3513 if (pi->fcs == L2CAP_FCS_CRC16) {
3514 skb_trim(skb, skb->len - 2);
3515 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3516 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3518 if (our_fcs != rcv_fcs)
3519 return -EBADMSG;
3521 return 0;
3524 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3526 struct l2cap_pinfo *pi = l2cap_pi(sk);
3527 u16 control = 0;
3529 pi->frames_sent = 0;
3531 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3533 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3534 control |= L2CAP_SUPER_RCV_NOT_READY;
3535 l2cap_send_sframe(pi, control);
3536 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3539 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3540 l2cap_retransmit_frames(sk);
3542 l2cap_ertm_send(sk);
3544 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3545 pi->frames_sent == 0) {
3546 control |= L2CAP_SUPER_RCV_READY;
3547 l2cap_send_sframe(pi, control);
3551 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3553 struct sk_buff *next_skb;
3554 struct l2cap_pinfo *pi = l2cap_pi(sk);
3555 int tx_seq_offset, next_tx_seq_offset;
3557 bt_cb(skb)->tx_seq = tx_seq;
3558 bt_cb(skb)->sar = sar;
3560 next_skb = skb_peek(SREJ_QUEUE(sk));
3561 if (!next_skb) {
3562 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3563 return 0;
3566 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3567 if (tx_seq_offset < 0)
3568 tx_seq_offset += 64;
3570 do {
3571 if (bt_cb(next_skb)->tx_seq == tx_seq)
3572 return -EINVAL;
3574 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3575 pi->buffer_seq) % 64;
3576 if (next_tx_seq_offset < 0)
3577 next_tx_seq_offset += 64;
3579 if (next_tx_seq_offset > tx_seq_offset) {
3580 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3581 return 0;
3584 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3585 break;
3587 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3589 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3591 return 0;
3594 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3596 struct l2cap_pinfo *pi = l2cap_pi(sk);
3597 struct sk_buff *_skb;
3598 int err;
3600 switch (control & L2CAP_CTRL_SAR) {
3601 case L2CAP_SDU_UNSEGMENTED:
3602 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3603 goto drop;
3605 err = sock_queue_rcv_skb(sk, skb);
3606 if (!err)
3607 return err;
3609 break;
3611 case L2CAP_SDU_START:
3612 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3613 goto drop;
3615 pi->sdu_len = get_unaligned_le16(skb->data);
3617 if (pi->sdu_len > pi->imtu)
3618 goto disconnect;
3620 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3621 if (!pi->sdu)
3622 return -ENOMEM;
3624 /* pull sdu_len bytes only after alloc, because of Local Busy
3625 * condition we have to be sure that this will be executed
3626 * only once, i.e., when alloc does not fail */
3627 skb_pull(skb, 2);
3629 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3631 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3632 pi->partial_sdu_len = skb->len;
3633 break;
3635 case L2CAP_SDU_CONTINUE:
3636 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3637 goto disconnect;
3639 if (!pi->sdu)
3640 goto disconnect;
3642 pi->partial_sdu_len += skb->len;
3643 if (pi->partial_sdu_len > pi->sdu_len)
3644 goto drop;
3646 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3648 break;
3650 case L2CAP_SDU_END:
3651 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3652 goto disconnect;
3654 if (!pi->sdu)
3655 goto disconnect;
3657 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3658 pi->partial_sdu_len += skb->len;
3660 if (pi->partial_sdu_len > pi->imtu)
3661 goto drop;
3663 if (pi->partial_sdu_len != pi->sdu_len)
3664 goto drop;
3666 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3669 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3670 if (!_skb) {
3671 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3672 return -ENOMEM;
3675 err = sock_queue_rcv_skb(sk, _skb);
3676 if (err < 0) {
3677 kfree_skb(_skb);
3678 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3679 return err;
3682 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3683 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3685 kfree_skb(pi->sdu);
3686 break;
3689 kfree_skb(skb);
3690 return 0;
3692 drop:
3693 kfree_skb(pi->sdu);
3694 pi->sdu = NULL;
3696 disconnect:
3697 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3698 kfree_skb(skb);
3699 return 0;
3702 static int l2cap_try_push_rx_skb(struct sock *sk)
3704 struct l2cap_pinfo *pi = l2cap_pi(sk);
3705 struct sk_buff *skb;
3706 u16 control;
3707 int err;
3709 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3710 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3711 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3712 if (err < 0) {
3713 skb_queue_head(BUSY_QUEUE(sk), skb);
3714 return -EBUSY;
3717 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3720 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3721 goto done;
3723 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3724 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3725 l2cap_send_sframe(pi, control);
3726 l2cap_pi(sk)->retry_count = 1;
3728 del_timer(&pi->retrans_timer);
3729 __mod_monitor_timer();
3731 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3733 done:
3734 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3735 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3737 BT_DBG("sk %p, Exit local busy", sk);
3739 return 0;
3742 static void l2cap_busy_work(struct work_struct *work)
3744 DECLARE_WAITQUEUE(wait, current);
3745 struct l2cap_pinfo *pi =
3746 container_of(work, struct l2cap_pinfo, busy_work);
3747 struct sock *sk = (struct sock *)pi;
3748 int n_tries = 0, timeo = HZ/5, err;
3749 struct sk_buff *skb;
3751 lock_sock(sk);
3753 add_wait_queue(sk_sleep(sk), &wait);
3754 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3755 set_current_state(TASK_INTERRUPTIBLE);
3757 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3758 err = -EBUSY;
3759 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
3760 break;
3763 if (!timeo)
3764 timeo = HZ/5;
3766 if (signal_pending(current)) {
3767 err = sock_intr_errno(timeo);
3768 break;
3771 release_sock(sk);
3772 timeo = schedule_timeout(timeo);
3773 lock_sock(sk);
3775 err = sock_error(sk);
3776 if (err)
3777 break;
3779 if (l2cap_try_push_rx_skb(sk) == 0)
3780 break;
3783 set_current_state(TASK_RUNNING);
3784 remove_wait_queue(sk_sleep(sk), &wait);
3786 release_sock(sk);
3789 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3791 struct l2cap_pinfo *pi = l2cap_pi(sk);
3792 int sctrl, err;
3794 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3795 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3796 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3797 return l2cap_try_push_rx_skb(sk);
3802 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3803 if (err >= 0) {
3804 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3805 return err;
3808 /* Busy Condition */
3809 BT_DBG("sk %p, Enter local busy", sk);
3811 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3812 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3813 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3815 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3816 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3817 l2cap_send_sframe(pi, sctrl);
3819 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3821 del_timer(&pi->ack_timer);
3823 queue_work(_busy_wq, &pi->busy_work);
3825 return err;
3828 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3830 struct l2cap_pinfo *pi = l2cap_pi(sk);
3831 struct sk_buff *_skb;
3832 int err = -EINVAL;
3835 * TODO: We have to notify the userland if some data is lost with the
3836 * Streaming Mode.
3839 switch (control & L2CAP_CTRL_SAR) {
3840 case L2CAP_SDU_UNSEGMENTED:
3841 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3842 kfree_skb(pi->sdu);
3843 break;
3846 err = sock_queue_rcv_skb(sk, skb);
3847 if (!err)
3848 return 0;
3850 break;
3852 case L2CAP_SDU_START:
3853 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3854 kfree_skb(pi->sdu);
3855 break;
3858 pi->sdu_len = get_unaligned_le16(skb->data);
3859 skb_pull(skb, 2);
3861 if (pi->sdu_len > pi->imtu) {
3862 err = -EMSGSIZE;
3863 break;
3866 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3867 if (!pi->sdu) {
3868 err = -ENOMEM;
3869 break;
3872 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3874 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3875 pi->partial_sdu_len = skb->len;
3876 err = 0;
3877 break;
3879 case L2CAP_SDU_CONTINUE:
3880 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3881 break;
3883 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3885 pi->partial_sdu_len += skb->len;
3886 if (pi->partial_sdu_len > pi->sdu_len)
3887 kfree_skb(pi->sdu);
3888 else
3889 err = 0;
3891 break;
3893 case L2CAP_SDU_END:
3894 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3895 break;
3897 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3899 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3900 pi->partial_sdu_len += skb->len;
3902 if (pi->partial_sdu_len > pi->imtu)
3903 goto drop;
3905 if (pi->partial_sdu_len == pi->sdu_len) {
3906 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3907 err = sock_queue_rcv_skb(sk, _skb);
3908 if (err < 0)
3909 kfree_skb(_skb);
3911 err = 0;
3913 drop:
3914 kfree_skb(pi->sdu);
3915 break;
3918 kfree_skb(skb);
3919 return err;
3922 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3924 struct sk_buff *skb;
3925 u16 control;
3927 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3928 if (bt_cb(skb)->tx_seq != tx_seq)
3929 break;
3931 skb = skb_dequeue(SREJ_QUEUE(sk));
3932 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3933 l2cap_ertm_reassembly_sdu(sk, skb, control);
3934 l2cap_pi(sk)->buffer_seq_srej =
3935 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3936 tx_seq = (tx_seq + 1) % 64;
3940 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3942 struct l2cap_pinfo *pi = l2cap_pi(sk);
3943 struct srej_list *l, *tmp;
3944 u16 control;
3946 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3947 if (l->tx_seq == tx_seq) {
3948 list_del(&l->list);
3949 kfree(l);
3950 return;
3952 control = L2CAP_SUPER_SELECT_REJECT;
3953 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3954 l2cap_send_sframe(pi, control);
3955 list_del(&l->list);
3956 list_add_tail(&l->list, SREJ_LIST(sk));
3960 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3962 struct l2cap_pinfo *pi = l2cap_pi(sk);
3963 struct srej_list *new;
3964 u16 control;
3966 while (tx_seq != pi->expected_tx_seq) {
3967 control = L2CAP_SUPER_SELECT_REJECT;
3968 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3969 l2cap_send_sframe(pi, control);
3971 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3972 new->tx_seq = pi->expected_tx_seq;
3973 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3974 list_add_tail(&new->list, SREJ_LIST(sk));
3976 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3979 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3981 struct l2cap_pinfo *pi = l2cap_pi(sk);
3982 u8 tx_seq = __get_txseq(rx_control);
3983 u8 req_seq = __get_reqseq(rx_control);
3984 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3985 int tx_seq_offset, expected_tx_seq_offset;
3986 int num_to_ack = (pi->tx_win/6) + 1;
3987 int err = 0;
3989 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3990 rx_control);
3992 if (L2CAP_CTRL_FINAL & rx_control &&
3993 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3994 del_timer(&pi->monitor_timer);
3995 if (pi->unacked_frames > 0)
3996 __mod_retrans_timer();
3997 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
4000 pi->expected_ack_seq = req_seq;
4001 l2cap_drop_acked_frames(sk);
4003 if (tx_seq == pi->expected_tx_seq)
4004 goto expected;
4006 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
4007 if (tx_seq_offset < 0)
4008 tx_seq_offset += 64;
4010 /* invalid tx_seq */
4011 if (tx_seq_offset >= pi->tx_win) {
4012 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4013 goto drop;
4016 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
4017 goto drop;
4019 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4020 struct srej_list *first;
4022 first = list_first_entry(SREJ_LIST(sk),
4023 struct srej_list, list);
4024 if (tx_seq == first->tx_seq) {
4025 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
4026 l2cap_check_srej_gap(sk, tx_seq);
4028 list_del(&first->list);
4029 kfree(first);
4031 if (list_empty(SREJ_LIST(sk))) {
4032 pi->buffer_seq = pi->buffer_seq_srej;
4033 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
4034 l2cap_send_ack(pi);
4035 BT_DBG("sk %p, Exit SREJ_SENT", sk);
4037 } else {
4038 struct srej_list *l;
4040 /* duplicated tx_seq */
4041 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
4042 goto drop;
4044 list_for_each_entry(l, SREJ_LIST(sk), list) {
4045 if (l->tx_seq == tx_seq) {
4046 l2cap_resend_srejframe(sk, tx_seq);
4047 return 0;
4050 l2cap_send_srejframe(sk, tx_seq);
4052 } else {
4053 expected_tx_seq_offset =
4054 (pi->expected_tx_seq - pi->buffer_seq) % 64;
4055 if (expected_tx_seq_offset < 0)
4056 expected_tx_seq_offset += 64;
4058 /* duplicated tx_seq */
4059 if (tx_seq_offset < expected_tx_seq_offset)
4060 goto drop;
4062 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
4064 BT_DBG("sk %p, Enter SREJ", sk);
4066 INIT_LIST_HEAD(SREJ_LIST(sk));
4067 pi->buffer_seq_srej = pi->buffer_seq;
4069 __skb_queue_head_init(SREJ_QUEUE(sk));
4070 __skb_queue_head_init(BUSY_QUEUE(sk));
4071 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
4073 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
4075 l2cap_send_srejframe(sk, tx_seq);
4077 del_timer(&pi->ack_timer);
4079 return 0;
4081 expected:
4082 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4084 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4085 bt_cb(skb)->tx_seq = tx_seq;
4086 bt_cb(skb)->sar = sar;
4087 __skb_queue_tail(SREJ_QUEUE(sk), skb);
4088 return 0;
4091 err = l2cap_push_rx_skb(sk, skb, rx_control);
4092 if (err < 0)
4093 return 0;
4095 if (rx_control & L2CAP_CTRL_FINAL) {
4096 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4097 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4098 else
4099 l2cap_retransmit_frames(sk);
4102 __mod_ack_timer();
4104 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
4105 if (pi->num_acked == num_to_ack - 1)
4106 l2cap_send_ack(pi);
4108 return 0;
4110 drop:
4111 kfree_skb(skb);
4112 return 0;
4115 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
4117 struct l2cap_pinfo *pi = l2cap_pi(sk);
4119 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
4120 rx_control);
4122 pi->expected_ack_seq = __get_reqseq(rx_control);
4123 l2cap_drop_acked_frames(sk);
4125 if (rx_control & L2CAP_CTRL_POLL) {
4126 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4127 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4128 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4129 (pi->unacked_frames > 0))
4130 __mod_retrans_timer();
4132 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4133 l2cap_send_srejtail(sk);
4134 } else {
4135 l2cap_send_i_or_rr_or_rnr(sk);
4138 } else if (rx_control & L2CAP_CTRL_FINAL) {
4139 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4141 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4142 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4143 else
4144 l2cap_retransmit_frames(sk);
4146 } else {
4147 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4148 (pi->unacked_frames > 0))
4149 __mod_retrans_timer();
4151 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4152 if (pi->conn_state & L2CAP_CONN_SREJ_SENT)
4153 l2cap_send_ack(pi);
4154 else
4155 l2cap_ertm_send(sk);
4159 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
4161 struct l2cap_pinfo *pi = l2cap_pi(sk);
4162 u8 tx_seq = __get_reqseq(rx_control);
4164 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4166 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4168 pi->expected_ack_seq = tx_seq;
4169 l2cap_drop_acked_frames(sk);
4171 if (rx_control & L2CAP_CTRL_FINAL) {
4172 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4173 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4174 else
4175 l2cap_retransmit_frames(sk);
4176 } else {
4177 l2cap_retransmit_frames(sk);
4179 if (pi->conn_state & L2CAP_CONN_WAIT_F)
4180 pi->conn_state |= L2CAP_CONN_REJ_ACT;
4183 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
4185 struct l2cap_pinfo *pi = l2cap_pi(sk);
4186 u8 tx_seq = __get_reqseq(rx_control);
4188 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4190 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4192 if (rx_control & L2CAP_CTRL_POLL) {
4193 pi->expected_ack_seq = tx_seq;
4194 l2cap_drop_acked_frames(sk);
4196 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4197 l2cap_retransmit_one_frame(sk, tx_seq);
4199 l2cap_ertm_send(sk);
4201 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4202 pi->srej_save_reqseq = tx_seq;
4203 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4205 } else if (rx_control & L2CAP_CTRL_FINAL) {
4206 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4207 pi->srej_save_reqseq == tx_seq)
4208 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4209 else
4210 l2cap_retransmit_one_frame(sk, tx_seq);
4211 } else {
4212 l2cap_retransmit_one_frame(sk, tx_seq);
4213 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4214 pi->srej_save_reqseq = tx_seq;
4215 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4220 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4222 struct l2cap_pinfo *pi = l2cap_pi(sk);
4223 u8 tx_seq = __get_reqseq(rx_control);
4225 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4227 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4228 pi->expected_ack_seq = tx_seq;
4229 l2cap_drop_acked_frames(sk);
4231 if (rx_control & L2CAP_CTRL_POLL)
4232 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4234 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4235 del_timer(&pi->retrans_timer);
4236 if (rx_control & L2CAP_CTRL_POLL)
4237 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4238 return;
4241 if (rx_control & L2CAP_CTRL_POLL)
4242 l2cap_send_srejtail(sk);
4243 else
4244 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4247 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4249 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4251 if (L2CAP_CTRL_FINAL & rx_control &&
4252 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4253 del_timer(&l2cap_pi(sk)->monitor_timer);
4254 if (l2cap_pi(sk)->unacked_frames > 0)
4255 __mod_retrans_timer();
4256 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4259 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4260 case L2CAP_SUPER_RCV_READY:
4261 l2cap_data_channel_rrframe(sk, rx_control);
4262 break;
4264 case L2CAP_SUPER_REJECT:
4265 l2cap_data_channel_rejframe(sk, rx_control);
4266 break;
4268 case L2CAP_SUPER_SELECT_REJECT:
4269 l2cap_data_channel_srejframe(sk, rx_control);
4270 break;
4272 case L2CAP_SUPER_RCV_NOT_READY:
4273 l2cap_data_channel_rnrframe(sk, rx_control);
4274 break;
4277 kfree_skb(skb);
4278 return 0;
4281 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4283 struct l2cap_pinfo *pi = l2cap_pi(sk);
4284 u16 control;
4285 u8 req_seq;
4286 int len, next_tx_seq_offset, req_seq_offset;
4288 control = get_unaligned_le16(skb->data);
4289 skb_pull(skb, 2);
4290 len = skb->len;
4293 * We can just drop the corrupted I-frame here.
4294 * Receiver will miss it and start proper recovery
4295 * procedures and ask retransmission.
4297 if (l2cap_check_fcs(pi, skb))
4298 goto drop;
4300 if (__is_sar_start(control) && __is_iframe(control))
4301 len -= 2;
4303 if (pi->fcs == L2CAP_FCS_CRC16)
4304 len -= 2;
4306 if (len > pi->mps) {
4307 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4308 goto drop;
4311 req_seq = __get_reqseq(control);
4312 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4313 if (req_seq_offset < 0)
4314 req_seq_offset += 64;
4316 next_tx_seq_offset =
4317 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4318 if (next_tx_seq_offset < 0)
4319 next_tx_seq_offset += 64;
4321 /* check for invalid req-seq */
4322 if (req_seq_offset > next_tx_seq_offset) {
4323 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4324 goto drop;
4327 if (__is_iframe(control)) {
4328 if (len < 0) {
4329 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4330 goto drop;
4333 l2cap_data_channel_iframe(sk, control, skb);
4334 } else {
4335 if (len != 0) {
4336 BT_ERR("%d", len);
4337 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4338 goto drop;
4341 l2cap_data_channel_sframe(sk, control, skb);
4344 return 0;
4346 drop:
4347 kfree_skb(skb);
4348 return 0;
4351 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4353 struct sock *sk;
4354 struct l2cap_pinfo *pi;
4355 u16 control;
4356 u8 tx_seq;
4357 int len;
4359 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4360 if (!sk) {
4361 BT_DBG("unknown cid 0x%4.4x", cid);
4362 goto drop;
4365 pi = l2cap_pi(sk);
4367 BT_DBG("sk %p, len %d", sk, skb->len);
4369 if (sk->sk_state != BT_CONNECTED)
4370 goto drop;
4372 switch (pi->mode) {
4373 case L2CAP_MODE_BASIC:
4374 /* If socket recv buffers overflows we drop data here
4375 * which is *bad* because L2CAP has to be reliable.
4376 * But we don't have any other choice. L2CAP doesn't
4377 * provide flow control mechanism. */
4379 if (pi->imtu < skb->len)
4380 goto drop;
4382 if (!sock_queue_rcv_skb(sk, skb))
4383 goto done;
4384 break;
4386 case L2CAP_MODE_ERTM:
4387 if (!sock_owned_by_user(sk)) {
4388 l2cap_ertm_data_rcv(sk, skb);
4389 } else {
4390 if (sk_add_backlog(sk, skb))
4391 goto drop;
4394 goto done;
4396 case L2CAP_MODE_STREAMING:
4397 control = get_unaligned_le16(skb->data);
4398 skb_pull(skb, 2);
4399 len = skb->len;
4401 if (l2cap_check_fcs(pi, skb))
4402 goto drop;
4404 if (__is_sar_start(control))
4405 len -= 2;
4407 if (pi->fcs == L2CAP_FCS_CRC16)
4408 len -= 2;
4410 if (len > pi->mps || len < 0 || __is_sframe(control))
4411 goto drop;
4413 tx_seq = __get_txseq(control);
4415 if (pi->expected_tx_seq == tx_seq)
4416 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4417 else
4418 pi->expected_tx_seq = (tx_seq + 1) % 64;
4420 l2cap_streaming_reassembly_sdu(sk, skb, control);
4422 goto done;
4424 default:
4425 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4426 break;
4429 drop:
4430 kfree_skb(skb);
4432 done:
4433 if (sk)
4434 bh_unlock_sock(sk);
4436 return 0;
4439 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4441 struct sock *sk;
4443 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4444 if (!sk)
4445 goto drop;
4447 bh_lock_sock(sk);
4449 BT_DBG("sk %p, len %d", sk, skb->len);
4451 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4452 goto drop;
4454 if (l2cap_pi(sk)->imtu < skb->len)
4455 goto drop;
4457 if (!sock_queue_rcv_skb(sk, skb))
4458 goto done;
4460 drop:
4461 kfree_skb(skb);
4463 done:
4464 if (sk)
4465 bh_unlock_sock(sk);
4466 return 0;
4469 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4471 struct l2cap_hdr *lh = (void *) skb->data;
4472 u16 cid, len;
4473 __le16 psm;
4475 skb_pull(skb, L2CAP_HDR_SIZE);
4476 cid = __le16_to_cpu(lh->cid);
4477 len = __le16_to_cpu(lh->len);
4479 if (len != skb->len) {
4480 kfree_skb(skb);
4481 return;
4484 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4486 switch (cid) {
4487 case L2CAP_CID_SIGNALING:
4488 l2cap_sig_channel(conn, skb);
4489 break;
4491 case L2CAP_CID_CONN_LESS:
4492 psm = get_unaligned_le16(skb->data);
4493 skb_pull(skb, 2);
4494 l2cap_conless_channel(conn, psm, skb);
4495 break;
4497 default:
4498 l2cap_data_channel(conn, cid, skb);
4499 break;
4503 /* ---- L2CAP interface with lower layer (HCI) ---- */
4505 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4507 int exact = 0, lm1 = 0, lm2 = 0;
4508 register struct sock *sk;
4509 struct hlist_node *node;
4511 if (type != ACL_LINK)
4512 return -EINVAL;
4514 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4516 /* Find listening sockets and check their link_mode */
4517 read_lock(&l2cap_sk_list.lock);
4518 sk_for_each(sk, node, &l2cap_sk_list.head) {
4519 if (sk->sk_state != BT_LISTEN)
4520 continue;
4522 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4523 lm1 |= HCI_LM_ACCEPT;
4524 if (l2cap_pi(sk)->role_switch)
4525 lm1 |= HCI_LM_MASTER;
4526 exact++;
4527 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4528 lm2 |= HCI_LM_ACCEPT;
4529 if (l2cap_pi(sk)->role_switch)
4530 lm2 |= HCI_LM_MASTER;
4533 read_unlock(&l2cap_sk_list.lock);
4535 return exact ? lm1 : lm2;
4538 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4540 struct l2cap_conn *conn;
4542 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4544 if (hcon->type != ACL_LINK)
4545 return -EINVAL;
4547 if (!status) {
4548 conn = l2cap_conn_add(hcon, status);
4549 if (conn)
4550 l2cap_conn_ready(conn);
4551 } else
4552 l2cap_conn_del(hcon, bt_err(status));
4554 return 0;
4557 static int l2cap_disconn_ind(struct hci_conn *hcon)
4559 struct l2cap_conn *conn = hcon->l2cap_data;
4561 BT_DBG("hcon %p", hcon);
4563 if (hcon->type != ACL_LINK || !conn)
4564 return 0x13;
4566 return conn->disc_reason;
4569 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4571 BT_DBG("hcon %p reason %d", hcon, reason);
4573 if (hcon->type != ACL_LINK)
4574 return -EINVAL;
4576 l2cap_conn_del(hcon, bt_err(reason));
4578 return 0;
4581 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4583 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4584 return;
4586 if (encrypt == 0x00) {
4587 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4588 l2cap_sock_clear_timer(sk);
4589 l2cap_sock_set_timer(sk, HZ * 5);
4590 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4591 __l2cap_sock_close(sk, ECONNREFUSED);
4592 } else {
4593 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4594 l2cap_sock_clear_timer(sk);
4598 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4600 struct l2cap_chan_list *l;
4601 struct l2cap_conn *conn = hcon->l2cap_data;
4602 struct sock *sk;
4604 if (!conn)
4605 return 0;
4607 l = &conn->chan_list;
4609 BT_DBG("conn %p", conn);
4611 read_lock(&l->lock);
4613 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4614 bh_lock_sock(sk);
4616 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4617 bh_unlock_sock(sk);
4618 continue;
4621 if (!status && (sk->sk_state == BT_CONNECTED ||
4622 sk->sk_state == BT_CONFIG)) {
4623 l2cap_check_encryption(sk, encrypt);
4624 bh_unlock_sock(sk);
4625 continue;
4628 if (sk->sk_state == BT_CONNECT) {
4629 if (!status) {
4630 struct l2cap_conn_req req;
4631 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4632 req.psm = l2cap_pi(sk)->psm;
4634 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4635 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4637 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4638 L2CAP_CONN_REQ, sizeof(req), &req);
4639 } else {
4640 l2cap_sock_clear_timer(sk);
4641 l2cap_sock_set_timer(sk, HZ / 10);
4643 } else if (sk->sk_state == BT_CONNECT2) {
4644 struct l2cap_conn_rsp rsp;
4645 __u16 result;
4647 if (!status) {
4648 sk->sk_state = BT_CONFIG;
4649 result = L2CAP_CR_SUCCESS;
4650 } else {
4651 sk->sk_state = BT_DISCONN;
4652 l2cap_sock_set_timer(sk, HZ / 10);
4653 result = L2CAP_CR_SEC_BLOCK;
4656 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4657 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4658 rsp.result = cpu_to_le16(result);
4659 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4660 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4661 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4664 bh_unlock_sock(sk);
4667 read_unlock(&l->lock);
4669 return 0;
4672 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4674 struct l2cap_conn *conn = hcon->l2cap_data;
4676 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4677 goto drop;
4679 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4681 if (flags & ACL_START) {
4682 struct l2cap_hdr *hdr;
4683 struct sock *sk;
4684 u16 cid;
4685 int len;
4687 if (conn->rx_len) {
4688 BT_ERR("Unexpected start frame (len %d)", skb->len);
4689 kfree_skb(conn->rx_skb);
4690 conn->rx_skb = NULL;
4691 conn->rx_len = 0;
4692 l2cap_conn_unreliable(conn, ECOMM);
4695 /* Start fragment always begin with Basic L2CAP header */
4696 if (skb->len < L2CAP_HDR_SIZE) {
4697 BT_ERR("Frame is too short (len %d)", skb->len);
4698 l2cap_conn_unreliable(conn, ECOMM);
4699 goto drop;
4702 hdr = (struct l2cap_hdr *) skb->data;
4703 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4704 cid = __le16_to_cpu(hdr->cid);
4706 if (len == skb->len) {
4707 /* Complete frame received */
4708 l2cap_recv_frame(conn, skb);
4709 return 0;
4712 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4714 if (skb->len > len) {
4715 BT_ERR("Frame is too long (len %d, expected len %d)",
4716 skb->len, len);
4717 l2cap_conn_unreliable(conn, ECOMM);
4718 goto drop;
4721 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4723 if (sk && l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) {
4724 BT_ERR("Frame exceeding recv MTU (len %d, MTU %d)",
4725 len, l2cap_pi(sk)->imtu);
4726 bh_unlock_sock(sk);
4727 l2cap_conn_unreliable(conn, ECOMM);
4728 goto drop;
4731 if (sk)
4732 bh_unlock_sock(sk);
4734 /* Allocate skb for the complete frame (with header) */
4735 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4736 if (!conn->rx_skb)
4737 goto drop;
4739 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4740 skb->len);
4741 conn->rx_len = len - skb->len;
4742 } else {
4743 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4745 if (!conn->rx_len) {
4746 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4747 l2cap_conn_unreliable(conn, ECOMM);
4748 goto drop;
4751 if (skb->len > conn->rx_len) {
4752 BT_ERR("Fragment is too long (len %d, expected %d)",
4753 skb->len, conn->rx_len);
4754 kfree_skb(conn->rx_skb);
4755 conn->rx_skb = NULL;
4756 conn->rx_len = 0;
4757 l2cap_conn_unreliable(conn, ECOMM);
4758 goto drop;
4761 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4762 skb->len);
4763 conn->rx_len -= skb->len;
4765 if (!conn->rx_len) {
4766 /* Complete frame received */
4767 l2cap_recv_frame(conn, conn->rx_skb);
4768 conn->rx_skb = NULL;
4772 drop:
4773 kfree_skb(skb);
4774 return 0;
4777 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4779 struct sock *sk;
4780 struct hlist_node *node;
4782 read_lock_bh(&l2cap_sk_list.lock);
4784 sk_for_each(sk, node, &l2cap_sk_list.head) {
4785 struct l2cap_pinfo *pi = l2cap_pi(sk);
4787 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4788 batostr(&bt_sk(sk)->src),
4789 batostr(&bt_sk(sk)->dst),
4790 sk->sk_state, __le16_to_cpu(pi->psm),
4791 pi->scid, pi->dcid,
4792 pi->imtu, pi->omtu, pi->sec_level);
4795 read_unlock_bh(&l2cap_sk_list.lock);
4797 return 0;
4800 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4802 return single_open(file, l2cap_debugfs_show, inode->i_private);
4805 static const struct file_operations l2cap_debugfs_fops = {
4806 .open = l2cap_debugfs_open,
4807 .read = seq_read,
4808 .llseek = seq_lseek,
4809 .release = single_release,
4812 static struct dentry *l2cap_debugfs;
4814 static const struct proto_ops l2cap_sock_ops = {
4815 .family = PF_BLUETOOTH,
4816 .owner = THIS_MODULE,
4817 .release = l2cap_sock_release,
4818 .bind = l2cap_sock_bind,
4819 .connect = l2cap_sock_connect,
4820 .listen = l2cap_sock_listen,
4821 .accept = l2cap_sock_accept,
4822 .getname = l2cap_sock_getname,
4823 .sendmsg = l2cap_sock_sendmsg,
4824 .recvmsg = l2cap_sock_recvmsg,
4825 .poll = bt_sock_poll,
4826 .ioctl = bt_sock_ioctl,
4827 .mmap = sock_no_mmap,
4828 .socketpair = sock_no_socketpair,
4829 .shutdown = l2cap_sock_shutdown,
4830 .setsockopt = l2cap_sock_setsockopt,
4831 .getsockopt = l2cap_sock_getsockopt
4834 static const struct net_proto_family l2cap_sock_family_ops = {
4835 .family = PF_BLUETOOTH,
4836 .owner = THIS_MODULE,
4837 .create = l2cap_sock_create,
4840 static struct hci_proto l2cap_hci_proto = {
4841 .name = "L2CAP",
4842 .id = HCI_PROTO_L2CAP,
4843 .connect_ind = l2cap_connect_ind,
4844 .connect_cfm = l2cap_connect_cfm,
4845 .disconn_ind = l2cap_disconn_ind,
4846 .disconn_cfm = l2cap_disconn_cfm,
4847 .security_cfm = l2cap_security_cfm,
4848 .recv_acldata = l2cap_recv_acldata
4851 static int __init l2cap_init(void)
4853 int err;
4855 err = proto_register(&l2cap_proto, 0);
4856 if (err < 0)
4857 return err;
4859 _busy_wq = create_singlethread_workqueue("l2cap");
4860 if (!_busy_wq) {
4861 proto_unregister(&l2cap_proto);
4862 return -ENOMEM;
4865 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4866 if (err < 0) {
4867 BT_ERR("L2CAP socket registration failed");
4868 goto error;
4871 err = hci_register_proto(&l2cap_hci_proto);
4872 if (err < 0) {
4873 BT_ERR("L2CAP protocol registration failed");
4874 bt_sock_unregister(BTPROTO_L2CAP);
4875 goto error;
4878 if (bt_debugfs) {
4879 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4880 bt_debugfs, NULL, &l2cap_debugfs_fops);
4881 if (!l2cap_debugfs)
4882 BT_ERR("Failed to create L2CAP debug file");
4885 BT_INFO("L2CAP ver %s", VERSION);
4886 BT_INFO("L2CAP socket layer initialized");
4888 return 0;
4890 error:
4891 destroy_workqueue(_busy_wq);
4892 proto_unregister(&l2cap_proto);
4893 return err;
4896 static void __exit l2cap_exit(void)
4898 debugfs_remove(l2cap_debugfs);
4900 flush_workqueue(_busy_wq);
4901 destroy_workqueue(_busy_wq);
4903 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4904 BT_ERR("L2CAP socket unregistration failed");
4906 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4907 BT_ERR("L2CAP protocol unregistration failed");
4909 proto_unregister(&l2cap_proto);
4912 void l2cap_load(void)
4914 /* Dummy function to trigger automatic L2CAP module loading by
4915 * other modules that use L2CAP sockets but don't use any other
4916 * symbols from it. */
4918 EXPORT_SYMBOL(l2cap_load);
4920 module_init(l2cap_init);
4921 module_exit(l2cap_exit);
4923 module_param(disable_ertm, bool, 0644);
4924 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4926 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4927 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4928 MODULE_VERSION(VERSION);
4929 MODULE_LICENSE("GPL");
4930 MODULE_ALIAS("bt-proto-0");