vmalloc: rename temporary variable in __insert_vmap_area()
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / bluetooth / l2cap.c
blobdaa7a988d9a6b4de7c7767748f1f18e1524cb78f
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core and sockets. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
58 #define VERSION "2.15"
60 static int disable_ertm = 0;
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { 0x02, };
65 static const struct proto_ops l2cap_sock_ops;
67 static struct workqueue_struct *_busy_wq;
69 static struct bt_sock_list l2cap_sk_list = {
70 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
73 static void l2cap_busy_work(struct work_struct *work);
75 static void __l2cap_sock_close(struct sock *sk, int reason);
76 static void l2cap_sock_close(struct sock *sk);
77 static void l2cap_sock_kill(struct sock *sk);
79 static int l2cap_build_conf_req(struct sock *sk, void *data);
80 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
81 u8 code, u8 ident, u16 dlen, void *data);
83 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
85 /* ---- L2CAP timers ---- */
86 static void l2cap_sock_timeout(unsigned long arg)
88 struct sock *sk = (struct sock *) arg;
89 int reason;
91 BT_DBG("sock %p state %d", sk, sk->sk_state);
93 bh_lock_sock(sk);
95 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
96 reason = ECONNREFUSED;
97 else if (sk->sk_state == BT_CONNECT &&
98 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
99 reason = ECONNREFUSED;
100 else
101 reason = ETIMEDOUT;
103 __l2cap_sock_close(sk, reason);
105 bh_unlock_sock(sk);
107 l2cap_sock_kill(sk);
108 sock_put(sk);
111 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
113 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
114 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
117 static void l2cap_sock_clear_timer(struct sock *sk)
119 BT_DBG("sock %p state %d", sk, sk->sk_state);
120 sk_stop_timer(sk, &sk->sk_timer);
123 /* ---- L2CAP channels ---- */
124 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
126 struct sock *s;
127 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
128 if (l2cap_pi(s)->dcid == cid)
129 break;
131 return s;
134 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
136 struct sock *s;
137 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
138 if (l2cap_pi(s)->scid == cid)
139 break;
141 return s;
144 /* Find channel with given SCID.
145 * Returns locked socket */
146 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
148 struct sock *s;
149 read_lock(&l->lock);
150 s = __l2cap_get_chan_by_scid(l, cid);
151 if (s)
152 bh_lock_sock(s);
153 read_unlock(&l->lock);
154 return s;
157 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
159 struct sock *s;
160 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
161 if (l2cap_pi(s)->ident == ident)
162 break;
164 return s;
167 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
169 struct sock *s;
170 read_lock(&l->lock);
171 s = __l2cap_get_chan_by_ident(l, ident);
172 if (s)
173 bh_lock_sock(s);
174 read_unlock(&l->lock);
175 return s;
178 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
180 u16 cid = L2CAP_CID_DYN_START;
182 for (; cid < L2CAP_CID_DYN_END; cid++) {
183 if (!__l2cap_get_chan_by_scid(l, cid))
184 return cid;
187 return 0;
190 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
192 sock_hold(sk);
194 if (l->head)
195 l2cap_pi(l->head)->prev_c = sk;
197 l2cap_pi(sk)->next_c = l->head;
198 l2cap_pi(sk)->prev_c = NULL;
199 l->head = sk;
202 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
204 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
206 write_lock_bh(&l->lock);
207 if (sk == l->head)
208 l->head = next;
210 if (next)
211 l2cap_pi(next)->prev_c = prev;
212 if (prev)
213 l2cap_pi(prev)->next_c = next;
214 write_unlock_bh(&l->lock);
216 __sock_put(sk);
219 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
221 struct l2cap_chan_list *l = &conn->chan_list;
223 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
224 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
226 conn->disc_reason = 0x13;
228 l2cap_pi(sk)->conn = conn;
230 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
231 /* Alloc CID for connection-oriented socket */
232 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
233 } else if (sk->sk_type == SOCK_DGRAM) {
234 /* Connectionless socket */
235 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
236 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
237 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
238 } else {
239 /* Raw socket can send/recv signalling messages only */
240 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
241 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
242 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
245 __l2cap_chan_link(l, sk);
247 if (parent)
248 bt_accept_enqueue(parent, sk);
251 /* Delete channel.
252 * Must be called on the locked socket. */
253 static void l2cap_chan_del(struct sock *sk, int err)
255 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
256 struct sock *parent = bt_sk(sk)->parent;
258 l2cap_sock_clear_timer(sk);
260 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
262 if (conn) {
263 /* Unlink from channel list */
264 l2cap_chan_unlink(&conn->chan_list, sk);
265 l2cap_pi(sk)->conn = NULL;
266 hci_conn_put(conn->hcon);
269 sk->sk_state = BT_CLOSED;
270 sock_set_flag(sk, SOCK_ZAPPED);
272 if (err)
273 sk->sk_err = err;
275 if (parent) {
276 bt_accept_unlink(sk);
277 parent->sk_data_ready(parent, 0);
278 } else
279 sk->sk_state_change(sk);
281 skb_queue_purge(TX_QUEUE(sk));
283 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
284 struct srej_list *l, *tmp;
286 del_timer(&l2cap_pi(sk)->retrans_timer);
287 del_timer(&l2cap_pi(sk)->monitor_timer);
288 del_timer(&l2cap_pi(sk)->ack_timer);
290 skb_queue_purge(SREJ_QUEUE(sk));
291 skb_queue_purge(BUSY_QUEUE(sk));
293 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
294 list_del(&l->list);
295 kfree(l);
300 /* Service level security */
301 static inline int l2cap_check_security(struct sock *sk)
303 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
304 __u8 auth_type;
306 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
307 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
308 auth_type = HCI_AT_NO_BONDING_MITM;
309 else
310 auth_type = HCI_AT_NO_BONDING;
312 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
313 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
314 } else {
315 switch (l2cap_pi(sk)->sec_level) {
316 case BT_SECURITY_HIGH:
317 auth_type = HCI_AT_GENERAL_BONDING_MITM;
318 break;
319 case BT_SECURITY_MEDIUM:
320 auth_type = HCI_AT_GENERAL_BONDING;
321 break;
322 default:
323 auth_type = HCI_AT_NO_BONDING;
324 break;
328 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
329 auth_type);
332 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
334 u8 id;
336 /* Get next available identificator.
337 * 1 - 128 are used by kernel.
338 * 129 - 199 are reserved.
339 * 200 - 254 are used by utilities like l2ping, etc.
342 spin_lock_bh(&conn->lock);
344 if (++conn->tx_ident > 128)
345 conn->tx_ident = 1;
347 id = conn->tx_ident;
349 spin_unlock_bh(&conn->lock);
351 return id;
354 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
356 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
358 BT_DBG("code 0x%2.2x", code);
360 if (!skb)
361 return;
363 hci_send_acl(conn->hcon, skb, 0);
366 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
368 struct sk_buff *skb;
369 struct l2cap_hdr *lh;
370 struct l2cap_conn *conn = pi->conn;
371 struct sock *sk = (struct sock *)pi;
372 int count, hlen = L2CAP_HDR_SIZE + 2;
374 if (sk->sk_state != BT_CONNECTED)
375 return;
377 if (pi->fcs == L2CAP_FCS_CRC16)
378 hlen += 2;
380 BT_DBG("pi %p, control 0x%2.2x", pi, control);
382 count = min_t(unsigned int, conn->mtu, hlen);
383 control |= L2CAP_CTRL_FRAME_TYPE;
385 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
386 control |= L2CAP_CTRL_FINAL;
387 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
390 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
391 control |= L2CAP_CTRL_POLL;
392 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
395 skb = bt_skb_alloc(count, GFP_ATOMIC);
396 if (!skb)
397 return;
399 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
400 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
401 lh->cid = cpu_to_le16(pi->dcid);
402 put_unaligned_le16(control, skb_put(skb, 2));
404 if (pi->fcs == L2CAP_FCS_CRC16) {
405 u16 fcs = crc16(0, (u8 *)lh, count - 2);
406 put_unaligned_le16(fcs, skb_put(skb, 2));
409 hci_send_acl(pi->conn->hcon, skb, 0);
412 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
414 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
415 control |= L2CAP_SUPER_RCV_NOT_READY;
416 pi->conn_state |= L2CAP_CONN_RNR_SENT;
417 } else
418 control |= L2CAP_SUPER_RCV_READY;
420 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
422 l2cap_send_sframe(pi, control);
425 static inline int __l2cap_no_conn_pending(struct sock *sk)
427 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
430 static void l2cap_do_start(struct sock *sk)
432 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
434 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
435 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
436 return;
438 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
439 struct l2cap_conn_req req;
440 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
441 req.psm = l2cap_pi(sk)->psm;
443 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
444 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
446 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
447 L2CAP_CONN_REQ, sizeof(req), &req);
449 } else {
450 struct l2cap_info_req req;
451 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
453 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
454 conn->info_ident = l2cap_get_ident(conn);
456 mod_timer(&conn->info_timer, jiffies +
457 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
459 l2cap_send_cmd(conn, conn->info_ident,
460 L2CAP_INFO_REQ, sizeof(req), &req);
464 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
466 u32 local_feat_mask = l2cap_feat_mask;
467 if (!disable_ertm)
468 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
470 switch (mode) {
471 case L2CAP_MODE_ERTM:
472 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
473 case L2CAP_MODE_STREAMING:
474 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
475 default:
476 return 0x00;
480 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
482 struct l2cap_disconn_req req;
484 if (!conn)
485 return;
487 skb_queue_purge(TX_QUEUE(sk));
489 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
490 del_timer(&l2cap_pi(sk)->retrans_timer);
491 del_timer(&l2cap_pi(sk)->monitor_timer);
492 del_timer(&l2cap_pi(sk)->ack_timer);
495 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
496 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
497 l2cap_send_cmd(conn, l2cap_get_ident(conn),
498 L2CAP_DISCONN_REQ, sizeof(req), &req);
500 sk->sk_state = BT_DISCONN;
501 sk->sk_err = err;
504 /* ---- L2CAP connections ---- */
505 static void l2cap_conn_start(struct l2cap_conn *conn)
507 struct l2cap_chan_list *l = &conn->chan_list;
508 struct sock_del_list del, *tmp1, *tmp2;
509 struct sock *sk;
511 BT_DBG("conn %p", conn);
513 INIT_LIST_HEAD(&del.list);
515 read_lock(&l->lock);
517 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
518 bh_lock_sock(sk);
520 if (sk->sk_type != SOCK_SEQPACKET &&
521 sk->sk_type != SOCK_STREAM) {
522 bh_unlock_sock(sk);
523 continue;
526 if (sk->sk_state == BT_CONNECT) {
527 struct l2cap_conn_req req;
529 if (!l2cap_check_security(sk) ||
530 !__l2cap_no_conn_pending(sk)) {
531 bh_unlock_sock(sk);
532 continue;
535 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
536 conn->feat_mask)
537 && l2cap_pi(sk)->conf_state &
538 L2CAP_CONF_STATE2_DEVICE) {
539 tmp1 = kzalloc(sizeof(struct sock_del_list),
540 GFP_ATOMIC);
541 tmp1->sk = sk;
542 list_add_tail(&tmp1->list, &del.list);
543 bh_unlock_sock(sk);
544 continue;
547 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
548 req.psm = l2cap_pi(sk)->psm;
550 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
551 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
553 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
554 L2CAP_CONN_REQ, sizeof(req), &req);
556 } else if (sk->sk_state == BT_CONNECT2) {
557 struct l2cap_conn_rsp rsp;
558 char buf[128];
559 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
560 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
562 if (l2cap_check_security(sk)) {
563 if (bt_sk(sk)->defer_setup) {
564 struct sock *parent = bt_sk(sk)->parent;
565 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
566 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
567 parent->sk_data_ready(parent, 0);
569 } else {
570 sk->sk_state = BT_CONFIG;
571 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
572 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
574 } else {
575 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
576 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
579 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
580 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
582 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
583 rsp.result != L2CAP_CR_SUCCESS) {
584 bh_unlock_sock(sk);
585 continue;
588 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
589 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
590 l2cap_build_conf_req(sk, buf), buf);
591 l2cap_pi(sk)->num_conf_req++;
594 bh_unlock_sock(sk);
597 read_unlock(&l->lock);
599 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
600 bh_lock_sock(tmp1->sk);
601 __l2cap_sock_close(tmp1->sk, ECONNRESET);
602 bh_unlock_sock(tmp1->sk);
603 list_del(&tmp1->list);
604 kfree(tmp1);
608 static void l2cap_conn_ready(struct l2cap_conn *conn)
610 struct l2cap_chan_list *l = &conn->chan_list;
611 struct sock *sk;
613 BT_DBG("conn %p", conn);
615 read_lock(&l->lock);
617 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
618 bh_lock_sock(sk);
620 if (sk->sk_type != SOCK_SEQPACKET &&
621 sk->sk_type != SOCK_STREAM) {
622 l2cap_sock_clear_timer(sk);
623 sk->sk_state = BT_CONNECTED;
624 sk->sk_state_change(sk);
625 } else if (sk->sk_state == BT_CONNECT)
626 l2cap_do_start(sk);
628 bh_unlock_sock(sk);
631 read_unlock(&l->lock);
634 /* Notify sockets that we cannot guaranty reliability anymore */
635 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
637 struct l2cap_chan_list *l = &conn->chan_list;
638 struct sock *sk;
640 BT_DBG("conn %p", conn);
642 read_lock(&l->lock);
644 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
645 if (l2cap_pi(sk)->force_reliable)
646 sk->sk_err = err;
649 read_unlock(&l->lock);
652 static void l2cap_info_timeout(unsigned long arg)
654 struct l2cap_conn *conn = (void *) arg;
656 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
657 conn->info_ident = 0;
659 l2cap_conn_start(conn);
662 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
664 struct l2cap_conn *conn = hcon->l2cap_data;
666 if (conn || status)
667 return conn;
669 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
670 if (!conn)
671 return NULL;
673 hcon->l2cap_data = conn;
674 conn->hcon = hcon;
676 BT_DBG("hcon %p conn %p", hcon, conn);
678 conn->mtu = hcon->hdev->acl_mtu;
679 conn->src = &hcon->hdev->bdaddr;
680 conn->dst = &hcon->dst;
682 conn->feat_mask = 0;
684 spin_lock_init(&conn->lock);
685 rwlock_init(&conn->chan_list.lock);
687 setup_timer(&conn->info_timer, l2cap_info_timeout,
688 (unsigned long) conn);
690 conn->disc_reason = 0x13;
692 return conn;
695 static void l2cap_conn_del(struct hci_conn *hcon, int err)
697 struct l2cap_conn *conn = hcon->l2cap_data;
698 struct sock *sk;
700 if (!conn)
701 return;
703 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
705 kfree_skb(conn->rx_skb);
707 /* Kill channels */
708 while ((sk = conn->chan_list.head)) {
709 bh_lock_sock(sk);
710 l2cap_chan_del(sk, err);
711 bh_unlock_sock(sk);
712 l2cap_sock_kill(sk);
715 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
716 del_timer_sync(&conn->info_timer);
718 hcon->l2cap_data = NULL;
719 kfree(conn);
722 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
724 struct l2cap_chan_list *l = &conn->chan_list;
725 write_lock_bh(&l->lock);
726 __l2cap_chan_add(conn, sk, parent);
727 write_unlock_bh(&l->lock);
730 /* ---- Socket interface ---- */
731 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
733 struct sock *sk;
734 struct hlist_node *node;
735 sk_for_each(sk, node, &l2cap_sk_list.head)
736 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
737 goto found;
738 sk = NULL;
739 found:
740 return sk;
743 /* Find socket with psm and source bdaddr.
744 * Returns closest match.
746 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
748 struct sock *sk = NULL, *sk1 = NULL;
749 struct hlist_node *node;
751 sk_for_each(sk, node, &l2cap_sk_list.head) {
752 if (state && sk->sk_state != state)
753 continue;
755 if (l2cap_pi(sk)->psm == psm) {
756 /* Exact match. */
757 if (!bacmp(&bt_sk(sk)->src, src))
758 break;
760 /* Closest match */
761 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
762 sk1 = sk;
765 return node ? sk : sk1;
768 /* Find socket with given address (psm, src).
769 * Returns locked socket */
770 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
772 struct sock *s;
773 read_lock(&l2cap_sk_list.lock);
774 s = __l2cap_get_sock_by_psm(state, psm, src);
775 if (s)
776 bh_lock_sock(s);
777 read_unlock(&l2cap_sk_list.lock);
778 return s;
781 static void l2cap_sock_destruct(struct sock *sk)
783 BT_DBG("sk %p", sk);
785 skb_queue_purge(&sk->sk_receive_queue);
786 skb_queue_purge(&sk->sk_write_queue);
789 static void l2cap_sock_cleanup_listen(struct sock *parent)
791 struct sock *sk;
793 BT_DBG("parent %p", parent);
795 /* Close not yet accepted channels */
796 while ((sk = bt_accept_dequeue(parent, NULL)))
797 l2cap_sock_close(sk);
799 parent->sk_state = BT_CLOSED;
800 sock_set_flag(parent, SOCK_ZAPPED);
803 /* Kill socket (only if zapped and orphan)
804 * Must be called on unlocked socket.
806 static void l2cap_sock_kill(struct sock *sk)
808 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
809 return;
811 BT_DBG("sk %p state %d", sk, sk->sk_state);
813 /* Kill poor orphan */
814 bt_sock_unlink(&l2cap_sk_list, sk);
815 sock_set_flag(sk, SOCK_DEAD);
816 sock_put(sk);
819 static void __l2cap_sock_close(struct sock *sk, int reason)
821 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
823 switch (sk->sk_state) {
824 case BT_LISTEN:
825 l2cap_sock_cleanup_listen(sk);
826 break;
828 case BT_CONNECTED:
829 case BT_CONFIG:
830 if (sk->sk_type == SOCK_SEQPACKET ||
831 sk->sk_type == SOCK_STREAM) {
832 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
834 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
835 l2cap_send_disconn_req(conn, sk, reason);
836 } else
837 l2cap_chan_del(sk, reason);
838 break;
840 case BT_CONNECT2:
841 if (sk->sk_type == SOCK_SEQPACKET ||
842 sk->sk_type == SOCK_STREAM) {
843 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
844 struct l2cap_conn_rsp rsp;
845 __u16 result;
847 if (bt_sk(sk)->defer_setup)
848 result = L2CAP_CR_SEC_BLOCK;
849 else
850 result = L2CAP_CR_BAD_PSM;
852 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
853 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
854 rsp.result = cpu_to_le16(result);
855 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
856 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
857 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
858 } else
859 l2cap_chan_del(sk, reason);
860 break;
862 case BT_CONNECT:
863 case BT_DISCONN:
864 l2cap_chan_del(sk, reason);
865 break;
867 default:
868 sock_set_flag(sk, SOCK_ZAPPED);
869 break;
873 /* Must be called on unlocked socket. */
874 static void l2cap_sock_close(struct sock *sk)
876 l2cap_sock_clear_timer(sk);
877 lock_sock(sk);
878 __l2cap_sock_close(sk, ECONNRESET);
879 release_sock(sk);
880 l2cap_sock_kill(sk);
883 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
885 struct l2cap_pinfo *pi = l2cap_pi(sk);
887 BT_DBG("sk %p", sk);
889 if (parent) {
890 sk->sk_type = parent->sk_type;
891 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
893 pi->imtu = l2cap_pi(parent)->imtu;
894 pi->omtu = l2cap_pi(parent)->omtu;
895 pi->conf_state = l2cap_pi(parent)->conf_state;
896 pi->mode = l2cap_pi(parent)->mode;
897 pi->fcs = l2cap_pi(parent)->fcs;
898 pi->max_tx = l2cap_pi(parent)->max_tx;
899 pi->tx_win = l2cap_pi(parent)->tx_win;
900 pi->sec_level = l2cap_pi(parent)->sec_level;
901 pi->role_switch = l2cap_pi(parent)->role_switch;
902 pi->force_reliable = l2cap_pi(parent)->force_reliable;
903 } else {
904 pi->imtu = L2CAP_DEFAULT_MTU;
905 pi->omtu = 0;
906 if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
907 pi->mode = L2CAP_MODE_ERTM;
908 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
909 } else {
910 pi->mode = L2CAP_MODE_BASIC;
912 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
913 pi->fcs = L2CAP_FCS_CRC16;
914 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
915 pi->sec_level = BT_SECURITY_LOW;
916 pi->role_switch = 0;
917 pi->force_reliable = 0;
920 /* Default config options */
921 pi->conf_len = 0;
922 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
923 skb_queue_head_init(TX_QUEUE(sk));
924 skb_queue_head_init(SREJ_QUEUE(sk));
925 skb_queue_head_init(BUSY_QUEUE(sk));
926 INIT_LIST_HEAD(SREJ_LIST(sk));
929 static struct proto l2cap_proto = {
930 .name = "L2CAP",
931 .owner = THIS_MODULE,
932 .obj_size = sizeof(struct l2cap_pinfo)
935 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
937 struct sock *sk;
939 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
940 if (!sk)
941 return NULL;
943 sock_init_data(sock, sk);
944 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
946 sk->sk_destruct = l2cap_sock_destruct;
947 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
949 sock_reset_flag(sk, SOCK_ZAPPED);
951 sk->sk_protocol = proto;
952 sk->sk_state = BT_OPEN;
954 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
956 bt_sock_link(&l2cap_sk_list, sk);
957 return sk;
960 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
961 int kern)
963 struct sock *sk;
965 BT_DBG("sock %p", sock);
967 sock->state = SS_UNCONNECTED;
969 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
970 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
971 return -ESOCKTNOSUPPORT;
973 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
974 return -EPERM;
976 sock->ops = &l2cap_sock_ops;
978 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
979 if (!sk)
980 return -ENOMEM;
982 l2cap_sock_init(sk, NULL);
983 return 0;
986 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
988 struct sock *sk = sock->sk;
989 struct sockaddr_l2 la;
990 int len, err = 0;
992 BT_DBG("sk %p", sk);
994 if (!addr || addr->sa_family != AF_BLUETOOTH)
995 return -EINVAL;
997 memset(&la, 0, sizeof(la));
998 len = min_t(unsigned int, sizeof(la), alen);
999 memcpy(&la, addr, len);
1001 if (la.l2_cid)
1002 return -EINVAL;
1004 lock_sock(sk);
1006 if (sk->sk_state != BT_OPEN) {
1007 err = -EBADFD;
1008 goto done;
1011 if (la.l2_psm) {
1012 __u16 psm = __le16_to_cpu(la.l2_psm);
1014 /* PSM must be odd and lsb of upper byte must be 0 */
1015 if ((psm & 0x0101) != 0x0001) {
1016 err = -EINVAL;
1017 goto done;
1020 /* Restrict usage of well-known PSMs */
1021 if (psm < 0x1001 && !capable(CAP_NET_BIND_SERVICE)) {
1022 err = -EACCES;
1023 goto done;
1027 write_lock_bh(&l2cap_sk_list.lock);
1029 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
1030 err = -EADDRINUSE;
1031 } else {
1032 /* Save source address */
1033 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
1034 l2cap_pi(sk)->psm = la.l2_psm;
1035 l2cap_pi(sk)->sport = la.l2_psm;
1036 sk->sk_state = BT_BOUND;
1038 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
1039 __le16_to_cpu(la.l2_psm) == 0x0003)
1040 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1043 write_unlock_bh(&l2cap_sk_list.lock);
1045 done:
1046 release_sock(sk);
1047 return err;
1050 static int l2cap_do_connect(struct sock *sk)
1052 bdaddr_t *src = &bt_sk(sk)->src;
1053 bdaddr_t *dst = &bt_sk(sk)->dst;
1054 struct l2cap_conn *conn;
1055 struct hci_conn *hcon;
1056 struct hci_dev *hdev;
1057 __u8 auth_type;
1058 int err;
1060 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1061 l2cap_pi(sk)->psm);
1063 hdev = hci_get_route(dst, src);
1064 if (!hdev)
1065 return -EHOSTUNREACH;
1067 hci_dev_lock_bh(hdev);
1069 err = -ENOMEM;
1071 if (sk->sk_type == SOCK_RAW) {
1072 switch (l2cap_pi(sk)->sec_level) {
1073 case BT_SECURITY_HIGH:
1074 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1075 break;
1076 case BT_SECURITY_MEDIUM:
1077 auth_type = HCI_AT_DEDICATED_BONDING;
1078 break;
1079 default:
1080 auth_type = HCI_AT_NO_BONDING;
1081 break;
1083 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
1084 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
1085 auth_type = HCI_AT_NO_BONDING_MITM;
1086 else
1087 auth_type = HCI_AT_NO_BONDING;
1089 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
1090 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1091 } else {
1092 switch (l2cap_pi(sk)->sec_level) {
1093 case BT_SECURITY_HIGH:
1094 auth_type = HCI_AT_GENERAL_BONDING_MITM;
1095 break;
1096 case BT_SECURITY_MEDIUM:
1097 auth_type = HCI_AT_GENERAL_BONDING;
1098 break;
1099 default:
1100 auth_type = HCI_AT_NO_BONDING;
1101 break;
1105 hcon = hci_connect(hdev, ACL_LINK, dst,
1106 l2cap_pi(sk)->sec_level, auth_type);
1107 if (!hcon)
1108 goto done;
1110 conn = l2cap_conn_add(hcon, 0);
1111 if (!conn) {
1112 hci_conn_put(hcon);
1113 goto done;
1116 err = 0;
1118 /* Update source addr of the socket */
1119 bacpy(src, conn->src);
1121 l2cap_chan_add(conn, sk, NULL);
1123 sk->sk_state = BT_CONNECT;
1124 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1126 if (hcon->state == BT_CONNECTED) {
1127 if (sk->sk_type != SOCK_SEQPACKET &&
1128 sk->sk_type != SOCK_STREAM) {
1129 l2cap_sock_clear_timer(sk);
1130 sk->sk_state = BT_CONNECTED;
1131 } else
1132 l2cap_do_start(sk);
1135 done:
1136 hci_dev_unlock_bh(hdev);
1137 hci_dev_put(hdev);
1138 return err;
1141 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1143 struct sock *sk = sock->sk;
1144 struct sockaddr_l2 la;
1145 int len, err = 0;
1147 BT_DBG("sk %p", sk);
1149 if (!addr || alen < sizeof(addr->sa_family) ||
1150 addr->sa_family != AF_BLUETOOTH)
1151 return -EINVAL;
1153 memset(&la, 0, sizeof(la));
1154 len = min_t(unsigned int, sizeof(la), alen);
1155 memcpy(&la, addr, len);
1157 if (la.l2_cid)
1158 return -EINVAL;
1160 lock_sock(sk);
1162 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1163 && !la.l2_psm) {
1164 err = -EINVAL;
1165 goto done;
1168 switch (l2cap_pi(sk)->mode) {
1169 case L2CAP_MODE_BASIC:
1170 break;
1171 case L2CAP_MODE_ERTM:
1172 case L2CAP_MODE_STREAMING:
1173 if (!disable_ertm)
1174 break;
1175 /* fall through */
1176 default:
1177 err = -ENOTSUPP;
1178 goto done;
1181 switch (sk->sk_state) {
1182 case BT_CONNECT:
1183 case BT_CONNECT2:
1184 case BT_CONFIG:
1185 /* Already connecting */
1186 goto wait;
1188 case BT_CONNECTED:
1189 /* Already connected */
1190 err = -EISCONN;
1191 goto done;
1193 case BT_OPEN:
1194 case BT_BOUND:
1195 /* Can connect */
1196 break;
1198 default:
1199 err = -EBADFD;
1200 goto done;
1203 /* PSM must be odd and lsb of upper byte must be 0 */
1204 if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 &&
1205 sk->sk_type != SOCK_RAW) {
1206 err = -EINVAL;
1207 goto done;
1210 /* Set destination address and psm */
1211 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1212 l2cap_pi(sk)->psm = la.l2_psm;
1214 err = l2cap_do_connect(sk);
1215 if (err)
1216 goto done;
1218 wait:
1219 err = bt_sock_wait_state(sk, BT_CONNECTED,
1220 sock_sndtimeo(sk, flags & O_NONBLOCK));
1221 done:
1222 release_sock(sk);
1223 return err;
1226 static int l2cap_sock_listen(struct socket *sock, int backlog)
1228 struct sock *sk = sock->sk;
1229 int err = 0;
1231 BT_DBG("sk %p backlog %d", sk, backlog);
1233 lock_sock(sk);
1235 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1236 || sk->sk_state != BT_BOUND) {
1237 err = -EBADFD;
1238 goto done;
1241 switch (l2cap_pi(sk)->mode) {
1242 case L2CAP_MODE_BASIC:
1243 break;
1244 case L2CAP_MODE_ERTM:
1245 case L2CAP_MODE_STREAMING:
1246 if (!disable_ertm)
1247 break;
1248 /* fall through */
1249 default:
1250 err = -ENOTSUPP;
1251 goto done;
1254 if (!l2cap_pi(sk)->psm) {
1255 bdaddr_t *src = &bt_sk(sk)->src;
1256 u16 psm;
1258 err = -EINVAL;
1260 write_lock_bh(&l2cap_sk_list.lock);
1262 for (psm = 0x1001; psm < 0x1100; psm += 2)
1263 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1264 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1265 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1266 err = 0;
1267 break;
1270 write_unlock_bh(&l2cap_sk_list.lock);
1272 if (err < 0)
1273 goto done;
1276 sk->sk_max_ack_backlog = backlog;
1277 sk->sk_ack_backlog = 0;
1278 sk->sk_state = BT_LISTEN;
1280 done:
1281 release_sock(sk);
1282 return err;
1285 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1287 DECLARE_WAITQUEUE(wait, current);
1288 struct sock *sk = sock->sk, *nsk;
1289 long timeo;
1290 int err = 0;
1292 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1294 if (sk->sk_state != BT_LISTEN) {
1295 err = -EBADFD;
1296 goto done;
1299 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1301 BT_DBG("sk %p timeo %ld", sk, timeo);
1303 /* Wait for an incoming connection. (wake-one). */
1304 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1305 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1306 set_current_state(TASK_INTERRUPTIBLE);
1307 if (!timeo) {
1308 err = -EAGAIN;
1309 break;
1312 release_sock(sk);
1313 timeo = schedule_timeout(timeo);
1314 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1316 if (sk->sk_state != BT_LISTEN) {
1317 err = -EBADFD;
1318 break;
1321 if (signal_pending(current)) {
1322 err = sock_intr_errno(timeo);
1323 break;
1326 set_current_state(TASK_RUNNING);
1327 remove_wait_queue(sk_sleep(sk), &wait);
1329 if (err)
1330 goto done;
1332 newsock->state = SS_CONNECTED;
1334 BT_DBG("new socket %p", nsk);
1336 done:
1337 release_sock(sk);
1338 return err;
1341 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1343 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1344 struct sock *sk = sock->sk;
1346 BT_DBG("sock %p, sk %p", sock, sk);
1348 addr->sa_family = AF_BLUETOOTH;
1349 *len = sizeof(struct sockaddr_l2);
1351 if (peer) {
1352 la->l2_psm = l2cap_pi(sk)->psm;
1353 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1354 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1355 } else {
1356 la->l2_psm = l2cap_pi(sk)->sport;
1357 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1358 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1361 return 0;
1364 static int __l2cap_wait_ack(struct sock *sk)
1366 DECLARE_WAITQUEUE(wait, current);
1367 int err = 0;
1368 int timeo = HZ/5;
1370 add_wait_queue(sk_sleep(sk), &wait);
1371 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1372 set_current_state(TASK_INTERRUPTIBLE);
1374 if (!timeo)
1375 timeo = HZ/5;
1377 if (signal_pending(current)) {
1378 err = sock_intr_errno(timeo);
1379 break;
1382 release_sock(sk);
1383 timeo = schedule_timeout(timeo);
1384 lock_sock(sk);
1386 err = sock_error(sk);
1387 if (err)
1388 break;
1390 set_current_state(TASK_RUNNING);
1391 remove_wait_queue(sk_sleep(sk), &wait);
1392 return err;
1395 static void l2cap_monitor_timeout(unsigned long arg)
1397 struct sock *sk = (void *) arg;
1399 BT_DBG("sk %p", sk);
1401 bh_lock_sock(sk);
1402 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1403 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
1404 bh_unlock_sock(sk);
1405 return;
1408 l2cap_pi(sk)->retry_count++;
1409 __mod_monitor_timer();
1411 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1412 bh_unlock_sock(sk);
1415 static void l2cap_retrans_timeout(unsigned long arg)
1417 struct sock *sk = (void *) arg;
1419 BT_DBG("sk %p", sk);
1421 bh_lock_sock(sk);
1422 l2cap_pi(sk)->retry_count = 1;
1423 __mod_monitor_timer();
1425 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1427 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1428 bh_unlock_sock(sk);
1431 static void l2cap_drop_acked_frames(struct sock *sk)
1433 struct sk_buff *skb;
1435 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1436 l2cap_pi(sk)->unacked_frames) {
1437 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1438 break;
1440 skb = skb_dequeue(TX_QUEUE(sk));
1441 kfree_skb(skb);
1443 l2cap_pi(sk)->unacked_frames--;
1446 if (!l2cap_pi(sk)->unacked_frames)
1447 del_timer(&l2cap_pi(sk)->retrans_timer);
1450 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1452 struct l2cap_pinfo *pi = l2cap_pi(sk);
1454 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1456 hci_send_acl(pi->conn->hcon, skb, 0);
1459 static void l2cap_streaming_send(struct sock *sk)
1461 struct sk_buff *skb;
1462 struct l2cap_pinfo *pi = l2cap_pi(sk);
1463 u16 control, fcs;
1465 while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
1466 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1467 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1468 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1470 if (pi->fcs == L2CAP_FCS_CRC16) {
1471 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1472 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1475 l2cap_do_send(sk, skb);
1477 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1481 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1483 struct l2cap_pinfo *pi = l2cap_pi(sk);
1484 struct sk_buff *skb, *tx_skb;
1485 u16 control, fcs;
1487 skb = skb_peek(TX_QUEUE(sk));
1488 if (!skb)
1489 return;
1491 do {
1492 if (bt_cb(skb)->tx_seq == tx_seq)
1493 break;
1495 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1496 return;
1498 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1500 if (pi->remote_max_tx &&
1501 bt_cb(skb)->retries == pi->remote_max_tx) {
1502 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1503 return;
1506 tx_skb = skb_clone(skb, GFP_ATOMIC);
1507 bt_cb(skb)->retries++;
1508 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1510 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1511 control |= L2CAP_CTRL_FINAL;
1512 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1515 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1516 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1518 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1520 if (pi->fcs == L2CAP_FCS_CRC16) {
1521 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1522 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1525 l2cap_do_send(sk, tx_skb);
1528 static int l2cap_ertm_send(struct sock *sk)
1530 struct sk_buff *skb, *tx_skb;
1531 struct l2cap_pinfo *pi = l2cap_pi(sk);
1532 u16 control, fcs;
1533 int nsent = 0;
1535 if (sk->sk_state != BT_CONNECTED)
1536 return -ENOTCONN;
1538 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1540 if (pi->remote_max_tx &&
1541 bt_cb(skb)->retries == pi->remote_max_tx) {
1542 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1543 break;
1546 tx_skb = skb_clone(skb, GFP_ATOMIC);
1548 bt_cb(skb)->retries++;
1550 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1551 control &= L2CAP_CTRL_SAR;
1553 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1554 control |= L2CAP_CTRL_FINAL;
1555 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1557 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1558 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1559 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1562 if (pi->fcs == L2CAP_FCS_CRC16) {
1563 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1564 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1567 l2cap_do_send(sk, tx_skb);
1569 __mod_retrans_timer();
1571 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1572 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1574 pi->unacked_frames++;
1575 pi->frames_sent++;
1577 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1578 sk->sk_send_head = NULL;
1579 else
1580 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1582 nsent++;
1585 return nsent;
1588 static int l2cap_retransmit_frames(struct sock *sk)
1590 struct l2cap_pinfo *pi = l2cap_pi(sk);
1591 int ret;
1593 if (!skb_queue_empty(TX_QUEUE(sk)))
1594 sk->sk_send_head = TX_QUEUE(sk)->next;
1596 pi->next_tx_seq = pi->expected_ack_seq;
1597 ret = l2cap_ertm_send(sk);
1598 return ret;
1601 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1603 struct sock *sk = (struct sock *)pi;
1604 u16 control = 0;
1606 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1608 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1609 control |= L2CAP_SUPER_RCV_NOT_READY;
1610 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1611 l2cap_send_sframe(pi, control);
1612 return;
1615 if (l2cap_ertm_send(sk) > 0)
1616 return;
1618 control |= L2CAP_SUPER_RCV_READY;
1619 l2cap_send_sframe(pi, control);
1622 static void l2cap_send_srejtail(struct sock *sk)
1624 struct srej_list *tail;
1625 u16 control;
1627 control = L2CAP_SUPER_SELECT_REJECT;
1628 control |= L2CAP_CTRL_FINAL;
1630 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1631 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1633 l2cap_send_sframe(l2cap_pi(sk), control);
1636 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1638 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1639 struct sk_buff **frag;
1640 int err, sent = 0;
1642 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1643 return -EFAULT;
1645 sent += count;
1646 len -= count;
1648 /* Continuation fragments (no L2CAP header) */
1649 frag = &skb_shinfo(skb)->frag_list;
1650 while (len) {
1651 count = min_t(unsigned int, conn->mtu, len);
1653 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1654 if (!*frag)
1655 return err;
1656 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1657 return -EFAULT;
1659 sent += count;
1660 len -= count;
1662 frag = &(*frag)->next;
1665 return sent;
1668 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1670 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1671 struct sk_buff *skb;
1672 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1673 struct l2cap_hdr *lh;
1675 BT_DBG("sk %p len %d", sk, (int)len);
1677 count = min_t(unsigned int, (conn->mtu - hlen), len);
1678 skb = bt_skb_send_alloc(sk, count + hlen,
1679 msg->msg_flags & MSG_DONTWAIT, &err);
1680 if (!skb)
1681 return ERR_PTR(err);
1683 /* Create L2CAP header */
1684 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1685 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1686 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1687 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1689 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1690 if (unlikely(err < 0)) {
1691 kfree_skb(skb);
1692 return ERR_PTR(err);
1694 return skb;
1697 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1699 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1700 struct sk_buff *skb;
1701 int err, count, hlen = L2CAP_HDR_SIZE;
1702 struct l2cap_hdr *lh;
1704 BT_DBG("sk %p len %d", sk, (int)len);
1706 count = min_t(unsigned int, (conn->mtu - hlen), len);
1707 skb = bt_skb_send_alloc(sk, count + hlen,
1708 msg->msg_flags & MSG_DONTWAIT, &err);
1709 if (!skb)
1710 return ERR_PTR(err);
1712 /* Create L2CAP header */
1713 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1714 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1715 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1717 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1718 if (unlikely(err < 0)) {
1719 kfree_skb(skb);
1720 return ERR_PTR(err);
1722 return skb;
1725 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1727 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1728 struct sk_buff *skb;
1729 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1730 struct l2cap_hdr *lh;
1732 BT_DBG("sk %p len %d", sk, (int)len);
1734 if (!conn)
1735 return ERR_PTR(-ENOTCONN);
1737 if (sdulen)
1738 hlen += 2;
1740 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1741 hlen += 2;
1743 count = min_t(unsigned int, (conn->mtu - hlen), len);
1744 skb = bt_skb_send_alloc(sk, count + hlen,
1745 msg->msg_flags & MSG_DONTWAIT, &err);
1746 if (!skb)
1747 return ERR_PTR(err);
1749 /* Create L2CAP header */
1750 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1751 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1752 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1753 put_unaligned_le16(control, skb_put(skb, 2));
1754 if (sdulen)
1755 put_unaligned_le16(sdulen, skb_put(skb, 2));
1757 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1758 if (unlikely(err < 0)) {
1759 kfree_skb(skb);
1760 return ERR_PTR(err);
1763 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1764 put_unaligned_le16(0, skb_put(skb, 2));
1766 bt_cb(skb)->retries = 0;
1767 return skb;
1770 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1772 struct l2cap_pinfo *pi = l2cap_pi(sk);
1773 struct sk_buff *skb;
1774 struct sk_buff_head sar_queue;
1775 u16 control;
1776 size_t size = 0;
1778 skb_queue_head_init(&sar_queue);
1779 control = L2CAP_SDU_START;
1780 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1781 if (IS_ERR(skb))
1782 return PTR_ERR(skb);
1784 __skb_queue_tail(&sar_queue, skb);
1785 len -= pi->remote_mps;
1786 size += pi->remote_mps;
1788 while (len > 0) {
1789 size_t buflen;
1791 if (len > pi->remote_mps) {
1792 control = L2CAP_SDU_CONTINUE;
1793 buflen = pi->remote_mps;
1794 } else {
1795 control = L2CAP_SDU_END;
1796 buflen = len;
1799 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1800 if (IS_ERR(skb)) {
1801 skb_queue_purge(&sar_queue);
1802 return PTR_ERR(skb);
1805 __skb_queue_tail(&sar_queue, skb);
1806 len -= buflen;
1807 size += buflen;
1809 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1810 if (sk->sk_send_head == NULL)
1811 sk->sk_send_head = sar_queue.next;
1813 return size;
1816 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1818 struct sock *sk = sock->sk;
1819 struct l2cap_pinfo *pi = l2cap_pi(sk);
1820 struct sk_buff *skb;
1821 u16 control;
1822 int err;
1824 BT_DBG("sock %p, sk %p", sock, sk);
1826 err = sock_error(sk);
1827 if (err)
1828 return err;
1830 if (msg->msg_flags & MSG_OOB)
1831 return -EOPNOTSUPP;
1833 lock_sock(sk);
1835 if (sk->sk_state != BT_CONNECTED) {
1836 err = -ENOTCONN;
1837 goto done;
1840 /* Connectionless channel */
1841 if (sk->sk_type == SOCK_DGRAM) {
1842 skb = l2cap_create_connless_pdu(sk, msg, len);
1843 if (IS_ERR(skb)) {
1844 err = PTR_ERR(skb);
1845 } else {
1846 l2cap_do_send(sk, skb);
1847 err = len;
1849 goto done;
1852 switch (pi->mode) {
1853 case L2CAP_MODE_BASIC:
1854 /* Check outgoing MTU */
1855 if (len > pi->omtu) {
1856 err = -EMSGSIZE;
1857 goto done;
1860 /* Create a basic PDU */
1861 skb = l2cap_create_basic_pdu(sk, msg, len);
1862 if (IS_ERR(skb)) {
1863 err = PTR_ERR(skb);
1864 goto done;
1867 l2cap_do_send(sk, skb);
1868 err = len;
1869 break;
1871 case L2CAP_MODE_ERTM:
1872 case L2CAP_MODE_STREAMING:
1873 /* Entire SDU fits into one PDU */
1874 if (len <= pi->remote_mps) {
1875 control = L2CAP_SDU_UNSEGMENTED;
1876 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1877 if (IS_ERR(skb)) {
1878 err = PTR_ERR(skb);
1879 goto done;
1881 __skb_queue_tail(TX_QUEUE(sk), skb);
1883 if (sk->sk_send_head == NULL)
1884 sk->sk_send_head = skb;
1886 } else {
1887 /* Segment SDU into multiples PDUs */
1888 err = l2cap_sar_segment_sdu(sk, msg, len);
1889 if (err < 0)
1890 goto done;
1893 if (pi->mode == L2CAP_MODE_STREAMING) {
1894 l2cap_streaming_send(sk);
1895 } else {
1896 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY &&
1897 pi->conn_state && L2CAP_CONN_WAIT_F) {
1898 err = len;
1899 break;
1901 err = l2cap_ertm_send(sk);
1904 if (err >= 0)
1905 err = len;
1906 break;
1908 default:
1909 BT_DBG("bad state %1.1x", pi->mode);
1910 err = -EBADFD;
1913 done:
1914 release_sock(sk);
1915 return err;
1918 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1920 struct sock *sk = sock->sk;
1922 lock_sock(sk);
1924 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1925 struct l2cap_conn_rsp rsp;
1926 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1927 u8 buf[128];
1929 sk->sk_state = BT_CONFIG;
1931 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1932 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1933 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1934 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1935 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1936 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1938 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
1939 release_sock(sk);
1940 return 0;
1943 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1944 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1945 l2cap_build_conf_req(sk, buf), buf);
1946 l2cap_pi(sk)->num_conf_req++;
1948 release_sock(sk);
1949 return 0;
1952 release_sock(sk);
1954 if (sock->type == SOCK_STREAM)
1955 return bt_sock_stream_recvmsg(iocb, sock, msg, len, flags);
1957 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1960 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1962 struct sock *sk = sock->sk;
1963 struct l2cap_options opts;
1964 int len, err = 0;
1965 u32 opt;
1967 BT_DBG("sk %p", sk);
1969 lock_sock(sk);
1971 switch (optname) {
1972 case L2CAP_OPTIONS:
1973 if (sk->sk_state == BT_CONNECTED) {
1974 err = -EINVAL;
1975 break;
1978 opts.imtu = l2cap_pi(sk)->imtu;
1979 opts.omtu = l2cap_pi(sk)->omtu;
1980 opts.flush_to = l2cap_pi(sk)->flush_to;
1981 opts.mode = l2cap_pi(sk)->mode;
1982 opts.fcs = l2cap_pi(sk)->fcs;
1983 opts.max_tx = l2cap_pi(sk)->max_tx;
1984 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1986 len = min_t(unsigned int, sizeof(opts), optlen);
1987 if (copy_from_user((char *) &opts, optval, len)) {
1988 err = -EFAULT;
1989 break;
1992 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
1993 err = -EINVAL;
1994 break;
1997 l2cap_pi(sk)->mode = opts.mode;
1998 switch (l2cap_pi(sk)->mode) {
1999 case L2CAP_MODE_BASIC:
2000 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
2001 break;
2002 case L2CAP_MODE_ERTM:
2003 case L2CAP_MODE_STREAMING:
2004 if (!disable_ertm)
2005 break;
2006 /* fall through */
2007 default:
2008 err = -EINVAL;
2009 break;
2012 l2cap_pi(sk)->imtu = opts.imtu;
2013 l2cap_pi(sk)->omtu = opts.omtu;
2014 l2cap_pi(sk)->fcs = opts.fcs;
2015 l2cap_pi(sk)->max_tx = opts.max_tx;
2016 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
2017 break;
2019 case L2CAP_LM:
2020 if (get_user(opt, (u32 __user *) optval)) {
2021 err = -EFAULT;
2022 break;
2025 if (opt & L2CAP_LM_AUTH)
2026 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
2027 if (opt & L2CAP_LM_ENCRYPT)
2028 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
2029 if (opt & L2CAP_LM_SECURE)
2030 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
2032 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
2033 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
2034 break;
2036 default:
2037 err = -ENOPROTOOPT;
2038 break;
2041 release_sock(sk);
2042 return err;
2045 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
2047 struct sock *sk = sock->sk;
2048 struct bt_security sec;
2049 int len, err = 0;
2050 u32 opt;
2052 BT_DBG("sk %p", sk);
2054 if (level == SOL_L2CAP)
2055 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
2057 if (level != SOL_BLUETOOTH)
2058 return -ENOPROTOOPT;
2060 lock_sock(sk);
2062 switch (optname) {
2063 case BT_SECURITY:
2064 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2065 && sk->sk_type != SOCK_RAW) {
2066 err = -EINVAL;
2067 break;
2070 sec.level = BT_SECURITY_LOW;
2072 len = min_t(unsigned int, sizeof(sec), optlen);
2073 if (copy_from_user((char *) &sec, optval, len)) {
2074 err = -EFAULT;
2075 break;
2078 if (sec.level < BT_SECURITY_LOW ||
2079 sec.level > BT_SECURITY_HIGH) {
2080 err = -EINVAL;
2081 break;
2084 l2cap_pi(sk)->sec_level = sec.level;
2085 break;
2087 case BT_DEFER_SETUP:
2088 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2089 err = -EINVAL;
2090 break;
2093 if (get_user(opt, (u32 __user *) optval)) {
2094 err = -EFAULT;
2095 break;
2098 bt_sk(sk)->defer_setup = opt;
2099 break;
2101 default:
2102 err = -ENOPROTOOPT;
2103 break;
2106 release_sock(sk);
2107 return err;
2110 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
2112 struct sock *sk = sock->sk;
2113 struct l2cap_options opts;
2114 struct l2cap_conninfo cinfo;
2115 int len, err = 0;
2116 u32 opt;
2118 BT_DBG("sk %p", sk);
2120 if (get_user(len, optlen))
2121 return -EFAULT;
2123 lock_sock(sk);
2125 switch (optname) {
2126 case L2CAP_OPTIONS:
2127 opts.imtu = l2cap_pi(sk)->imtu;
2128 opts.omtu = l2cap_pi(sk)->omtu;
2129 opts.flush_to = l2cap_pi(sk)->flush_to;
2130 opts.mode = l2cap_pi(sk)->mode;
2131 opts.fcs = l2cap_pi(sk)->fcs;
2132 opts.max_tx = l2cap_pi(sk)->max_tx;
2133 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2135 len = min_t(unsigned int, len, sizeof(opts));
2136 if (copy_to_user(optval, (char *) &opts, len))
2137 err = -EFAULT;
2139 break;
2141 case L2CAP_LM:
2142 switch (l2cap_pi(sk)->sec_level) {
2143 case BT_SECURITY_LOW:
2144 opt = L2CAP_LM_AUTH;
2145 break;
2146 case BT_SECURITY_MEDIUM:
2147 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2148 break;
2149 case BT_SECURITY_HIGH:
2150 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2151 L2CAP_LM_SECURE;
2152 break;
2153 default:
2154 opt = 0;
2155 break;
2158 if (l2cap_pi(sk)->role_switch)
2159 opt |= L2CAP_LM_MASTER;
2161 if (l2cap_pi(sk)->force_reliable)
2162 opt |= L2CAP_LM_RELIABLE;
2164 if (put_user(opt, (u32 __user *) optval))
2165 err = -EFAULT;
2166 break;
2168 case L2CAP_CONNINFO:
2169 if (sk->sk_state != BT_CONNECTED &&
2170 !(sk->sk_state == BT_CONNECT2 &&
2171 bt_sk(sk)->defer_setup)) {
2172 err = -ENOTCONN;
2173 break;
2176 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2177 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2179 len = min_t(unsigned int, len, sizeof(cinfo));
2180 if (copy_to_user(optval, (char *) &cinfo, len))
2181 err = -EFAULT;
2183 break;
2185 default:
2186 err = -ENOPROTOOPT;
2187 break;
2190 release_sock(sk);
2191 return err;
2194 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2196 struct sock *sk = sock->sk;
2197 struct bt_security sec;
2198 int len, err = 0;
2200 BT_DBG("sk %p", sk);
2202 if (level == SOL_L2CAP)
2203 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2205 if (level != SOL_BLUETOOTH)
2206 return -ENOPROTOOPT;
2208 if (get_user(len, optlen))
2209 return -EFAULT;
2211 lock_sock(sk);
2213 switch (optname) {
2214 case BT_SECURITY:
2215 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2216 && sk->sk_type != SOCK_RAW) {
2217 err = -EINVAL;
2218 break;
2221 sec.level = l2cap_pi(sk)->sec_level;
2223 len = min_t(unsigned int, len, sizeof(sec));
2224 if (copy_to_user(optval, (char *) &sec, len))
2225 err = -EFAULT;
2227 break;
2229 case BT_DEFER_SETUP:
2230 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2231 err = -EINVAL;
2232 break;
2235 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2236 err = -EFAULT;
2238 break;
2240 default:
2241 err = -ENOPROTOOPT;
2242 break;
2245 release_sock(sk);
2246 return err;
2249 static int l2cap_sock_shutdown(struct socket *sock, int how)
2251 struct sock *sk = sock->sk;
2252 int err = 0;
2254 BT_DBG("sock %p, sk %p", sock, sk);
2256 if (!sk)
2257 return 0;
2259 lock_sock(sk);
2260 if (!sk->sk_shutdown) {
2261 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2262 err = __l2cap_wait_ack(sk);
2264 sk->sk_shutdown = SHUTDOWN_MASK;
2265 l2cap_sock_clear_timer(sk);
2266 __l2cap_sock_close(sk, 0);
2268 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2269 err = bt_sock_wait_state(sk, BT_CLOSED,
2270 sk->sk_lingertime);
2273 if (!err && sk->sk_err)
2274 err = -sk->sk_err;
2276 release_sock(sk);
2277 return err;
2280 static int l2cap_sock_release(struct socket *sock)
2282 struct sock *sk = sock->sk;
2283 int err;
2285 BT_DBG("sock %p, sk %p", sock, sk);
2287 if (!sk)
2288 return 0;
2290 err = l2cap_sock_shutdown(sock, 2);
2292 sock_orphan(sk);
2293 l2cap_sock_kill(sk);
2294 return err;
2297 static void l2cap_chan_ready(struct sock *sk)
2299 struct sock *parent = bt_sk(sk)->parent;
2301 BT_DBG("sk %p, parent %p", sk, parent);
2303 l2cap_pi(sk)->conf_state = 0;
2304 l2cap_sock_clear_timer(sk);
2306 if (!parent) {
2307 /* Outgoing channel.
2308 * Wake up socket sleeping on connect.
2310 sk->sk_state = BT_CONNECTED;
2311 sk->sk_state_change(sk);
2312 } else {
2313 /* Incoming channel.
2314 * Wake up socket sleeping on accept.
2316 parent->sk_data_ready(parent, 0);
2320 /* Copy frame to all raw sockets on that connection */
2321 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2323 struct l2cap_chan_list *l = &conn->chan_list;
2324 struct sk_buff *nskb;
2325 struct sock *sk;
2327 BT_DBG("conn %p", conn);
2329 read_lock(&l->lock);
2330 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2331 if (sk->sk_type != SOCK_RAW)
2332 continue;
2334 /* Don't send frame to the socket it came from */
2335 if (skb->sk == sk)
2336 continue;
2337 nskb = skb_clone(skb, GFP_ATOMIC);
2338 if (!nskb)
2339 continue;
2341 if (sock_queue_rcv_skb(sk, nskb))
2342 kfree_skb(nskb);
2344 read_unlock(&l->lock);
2347 /* ---- L2CAP signalling commands ---- */
2348 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2349 u8 code, u8 ident, u16 dlen, void *data)
2351 struct sk_buff *skb, **frag;
2352 struct l2cap_cmd_hdr *cmd;
2353 struct l2cap_hdr *lh;
2354 int len, count;
2356 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2357 conn, code, ident, dlen);
2359 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2360 count = min_t(unsigned int, conn->mtu, len);
2362 skb = bt_skb_alloc(count, GFP_ATOMIC);
2363 if (!skb)
2364 return NULL;
2366 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2367 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2368 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2370 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2371 cmd->code = code;
2372 cmd->ident = ident;
2373 cmd->len = cpu_to_le16(dlen);
2375 if (dlen) {
2376 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2377 memcpy(skb_put(skb, count), data, count);
2378 data += count;
2381 len -= skb->len;
2383 /* Continuation fragments (no L2CAP header) */
2384 frag = &skb_shinfo(skb)->frag_list;
2385 while (len) {
2386 count = min_t(unsigned int, conn->mtu, len);
2388 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2389 if (!*frag)
2390 goto fail;
2392 memcpy(skb_put(*frag, count), data, count);
2394 len -= count;
2395 data += count;
2397 frag = &(*frag)->next;
2400 return skb;
2402 fail:
2403 kfree_skb(skb);
2404 return NULL;
2407 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2409 struct l2cap_conf_opt *opt = *ptr;
2410 int len;
2412 len = L2CAP_CONF_OPT_SIZE + opt->len;
2413 *ptr += len;
2415 *type = opt->type;
2416 *olen = opt->len;
2418 switch (opt->len) {
2419 case 1:
2420 *val = *((u8 *) opt->val);
2421 break;
2423 case 2:
2424 *val = __le16_to_cpu(*((__le16 *) opt->val));
2425 break;
2427 case 4:
2428 *val = __le32_to_cpu(*((__le32 *) opt->val));
2429 break;
2431 default:
2432 *val = (unsigned long) opt->val;
2433 break;
2436 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2437 return len;
2440 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2442 struct l2cap_conf_opt *opt = *ptr;
2444 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2446 opt->type = type;
2447 opt->len = len;
2449 switch (len) {
2450 case 1:
2451 *((u8 *) opt->val) = val;
2452 break;
2454 case 2:
2455 *((__le16 *) opt->val) = cpu_to_le16(val);
2456 break;
2458 case 4:
2459 *((__le32 *) opt->val) = cpu_to_le32(val);
2460 break;
2462 default:
2463 memcpy(opt->val, (void *) val, len);
2464 break;
2467 *ptr += L2CAP_CONF_OPT_SIZE + len;
2470 static void l2cap_ack_timeout(unsigned long arg)
2472 struct sock *sk = (void *) arg;
2474 bh_lock_sock(sk);
2475 l2cap_send_ack(l2cap_pi(sk));
2476 bh_unlock_sock(sk);
2479 static inline void l2cap_ertm_init(struct sock *sk)
2481 l2cap_pi(sk)->expected_ack_seq = 0;
2482 l2cap_pi(sk)->unacked_frames = 0;
2483 l2cap_pi(sk)->buffer_seq = 0;
2484 l2cap_pi(sk)->num_acked = 0;
2485 l2cap_pi(sk)->frames_sent = 0;
2487 setup_timer(&l2cap_pi(sk)->retrans_timer,
2488 l2cap_retrans_timeout, (unsigned long) sk);
2489 setup_timer(&l2cap_pi(sk)->monitor_timer,
2490 l2cap_monitor_timeout, (unsigned long) sk);
2491 setup_timer(&l2cap_pi(sk)->ack_timer,
2492 l2cap_ack_timeout, (unsigned long) sk);
2494 __skb_queue_head_init(SREJ_QUEUE(sk));
2495 __skb_queue_head_init(BUSY_QUEUE(sk));
2497 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2499 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
2502 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2504 switch (mode) {
2505 case L2CAP_MODE_STREAMING:
2506 case L2CAP_MODE_ERTM:
2507 if (l2cap_mode_supported(mode, remote_feat_mask))
2508 return mode;
2509 /* fall through */
2510 default:
2511 return L2CAP_MODE_BASIC;
2515 static int l2cap_build_conf_req(struct sock *sk, void *data)
2517 struct l2cap_pinfo *pi = l2cap_pi(sk);
2518 struct l2cap_conf_req *req = data;
2519 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2520 void *ptr = req->data;
2522 BT_DBG("sk %p", sk);
2524 if (pi->num_conf_req || pi->num_conf_rsp)
2525 goto done;
2527 switch (pi->mode) {
2528 case L2CAP_MODE_STREAMING:
2529 case L2CAP_MODE_ERTM:
2530 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
2531 break;
2533 /* fall through */
2534 default:
2535 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2536 break;
2539 done:
2540 switch (pi->mode) {
2541 case L2CAP_MODE_BASIC:
2542 if (pi->imtu != L2CAP_DEFAULT_MTU)
2543 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2545 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2546 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
2547 break;
2549 rfc.mode = L2CAP_MODE_BASIC;
2550 rfc.txwin_size = 0;
2551 rfc.max_transmit = 0;
2552 rfc.retrans_timeout = 0;
2553 rfc.monitor_timeout = 0;
2554 rfc.max_pdu_size = 0;
2556 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2557 (unsigned long) &rfc);
2558 break;
2560 case L2CAP_MODE_ERTM:
2561 rfc.mode = L2CAP_MODE_ERTM;
2562 rfc.txwin_size = pi->tx_win;
2563 rfc.max_transmit = pi->max_tx;
2564 rfc.retrans_timeout = 0;
2565 rfc.monitor_timeout = 0;
2566 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2567 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2568 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2570 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2571 (unsigned long) &rfc);
2573 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2574 break;
2576 if (pi->fcs == L2CAP_FCS_NONE ||
2577 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2578 pi->fcs = L2CAP_FCS_NONE;
2579 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2581 break;
2583 case L2CAP_MODE_STREAMING:
2584 rfc.mode = L2CAP_MODE_STREAMING;
2585 rfc.txwin_size = 0;
2586 rfc.max_transmit = 0;
2587 rfc.retrans_timeout = 0;
2588 rfc.monitor_timeout = 0;
2589 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2590 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2591 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2593 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2594 (unsigned long) &rfc);
2596 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2597 break;
2599 if (pi->fcs == L2CAP_FCS_NONE ||
2600 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2601 pi->fcs = L2CAP_FCS_NONE;
2602 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2604 break;
2607 /* FIXME: Need actual value of the flush timeout */
2608 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2609 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2611 req->dcid = cpu_to_le16(pi->dcid);
2612 req->flags = cpu_to_le16(0);
2614 return ptr - data;
2617 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2619 struct l2cap_pinfo *pi = l2cap_pi(sk);
2620 struct l2cap_conf_rsp *rsp = data;
2621 void *ptr = rsp->data;
2622 void *req = pi->conf_req;
2623 int len = pi->conf_len;
2624 int type, hint, olen;
2625 unsigned long val;
2626 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2627 u16 mtu = L2CAP_DEFAULT_MTU;
2628 u16 result = L2CAP_CONF_SUCCESS;
2630 BT_DBG("sk %p", sk);
2632 while (len >= L2CAP_CONF_OPT_SIZE) {
2633 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2635 hint = type & L2CAP_CONF_HINT;
2636 type &= L2CAP_CONF_MASK;
2638 switch (type) {
2639 case L2CAP_CONF_MTU:
2640 mtu = val;
2641 break;
2643 case L2CAP_CONF_FLUSH_TO:
2644 pi->flush_to = val;
2645 break;
2647 case L2CAP_CONF_QOS:
2648 break;
2650 case L2CAP_CONF_RFC:
2651 if (olen == sizeof(rfc))
2652 memcpy(&rfc, (void *) val, olen);
2653 break;
2655 case L2CAP_CONF_FCS:
2656 if (val == L2CAP_FCS_NONE)
2657 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2659 break;
2661 default:
2662 if (hint)
2663 break;
2665 result = L2CAP_CONF_UNKNOWN;
2666 *((u8 *) ptr++) = type;
2667 break;
2671 if (pi->num_conf_rsp || pi->num_conf_req > 1)
2672 goto done;
2674 switch (pi->mode) {
2675 case L2CAP_MODE_STREAMING:
2676 case L2CAP_MODE_ERTM:
2677 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2678 pi->mode = l2cap_select_mode(rfc.mode,
2679 pi->conn->feat_mask);
2680 break;
2683 if (pi->mode != rfc.mode)
2684 return -ECONNREFUSED;
2686 break;
2689 done:
2690 if (pi->mode != rfc.mode) {
2691 result = L2CAP_CONF_UNACCEPT;
2692 rfc.mode = pi->mode;
2694 if (pi->num_conf_rsp == 1)
2695 return -ECONNREFUSED;
2697 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2698 sizeof(rfc), (unsigned long) &rfc);
2702 if (result == L2CAP_CONF_SUCCESS) {
2703 /* Configure output options and let the other side know
2704 * which ones we don't like. */
2706 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2707 result = L2CAP_CONF_UNACCEPT;
2708 else {
2709 pi->omtu = mtu;
2710 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2712 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2714 switch (rfc.mode) {
2715 case L2CAP_MODE_BASIC:
2716 pi->fcs = L2CAP_FCS_NONE;
2717 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2718 break;
2720 case L2CAP_MODE_ERTM:
2721 pi->remote_tx_win = rfc.txwin_size;
2722 pi->remote_max_tx = rfc.max_transmit;
2724 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
2725 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2727 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2729 rfc.retrans_timeout =
2730 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2731 rfc.monitor_timeout =
2732 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2734 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2736 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2737 sizeof(rfc), (unsigned long) &rfc);
2739 break;
2741 case L2CAP_MODE_STREAMING:
2742 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
2743 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2745 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2747 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2749 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2750 sizeof(rfc), (unsigned long) &rfc);
2752 break;
2754 default:
2755 result = L2CAP_CONF_UNACCEPT;
2757 memset(&rfc, 0, sizeof(rfc));
2758 rfc.mode = pi->mode;
2761 if (result == L2CAP_CONF_SUCCESS)
2762 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2764 rsp->scid = cpu_to_le16(pi->dcid);
2765 rsp->result = cpu_to_le16(result);
2766 rsp->flags = cpu_to_le16(0x0000);
2768 return ptr - data;
2771 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2773 struct l2cap_pinfo *pi = l2cap_pi(sk);
2774 struct l2cap_conf_req *req = data;
2775 void *ptr = req->data;
2776 int type, olen;
2777 unsigned long val;
2778 struct l2cap_conf_rfc rfc;
2780 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2782 while (len >= L2CAP_CONF_OPT_SIZE) {
2783 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2785 switch (type) {
2786 case L2CAP_CONF_MTU:
2787 if (val < L2CAP_DEFAULT_MIN_MTU) {
2788 *result = L2CAP_CONF_UNACCEPT;
2789 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
2790 } else
2791 pi->imtu = val;
2792 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2793 break;
2795 case L2CAP_CONF_FLUSH_TO:
2796 pi->flush_to = val;
2797 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2798 2, pi->flush_to);
2799 break;
2801 case L2CAP_CONF_RFC:
2802 if (olen == sizeof(rfc))
2803 memcpy(&rfc, (void *)val, olen);
2805 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2806 rfc.mode != pi->mode)
2807 return -ECONNREFUSED;
2809 pi->fcs = 0;
2811 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2812 sizeof(rfc), (unsigned long) &rfc);
2813 break;
2817 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
2818 return -ECONNREFUSED;
2820 pi->mode = rfc.mode;
2822 if (*result == L2CAP_CONF_SUCCESS) {
2823 switch (rfc.mode) {
2824 case L2CAP_MODE_ERTM:
2825 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2826 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2827 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2828 break;
2829 case L2CAP_MODE_STREAMING:
2830 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2834 req->dcid = cpu_to_le16(pi->dcid);
2835 req->flags = cpu_to_le16(0x0000);
2837 return ptr - data;
2840 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2842 struct l2cap_conf_rsp *rsp = data;
2843 void *ptr = rsp->data;
2845 BT_DBG("sk %p", sk);
2847 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2848 rsp->result = cpu_to_le16(result);
2849 rsp->flags = cpu_to_le16(flags);
2851 return ptr - data;
2854 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2856 struct l2cap_pinfo *pi = l2cap_pi(sk);
2857 int type, olen;
2858 unsigned long val;
2859 struct l2cap_conf_rfc rfc;
2861 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2863 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2864 return;
2866 while (len >= L2CAP_CONF_OPT_SIZE) {
2867 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2869 switch (type) {
2870 case L2CAP_CONF_RFC:
2871 if (olen == sizeof(rfc))
2872 memcpy(&rfc, (void *)val, olen);
2873 goto done;
2877 done:
2878 switch (rfc.mode) {
2879 case L2CAP_MODE_ERTM:
2880 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2881 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2882 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2883 break;
2884 case L2CAP_MODE_STREAMING:
2885 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2889 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2891 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2893 if (rej->reason != 0x0000)
2894 return 0;
2896 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2897 cmd->ident == conn->info_ident) {
2898 del_timer(&conn->info_timer);
2900 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2901 conn->info_ident = 0;
2903 l2cap_conn_start(conn);
2906 return 0;
2909 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2911 struct l2cap_chan_list *list = &conn->chan_list;
2912 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2913 struct l2cap_conn_rsp rsp;
2914 struct sock *parent, *sk = NULL;
2915 int result, status = L2CAP_CS_NO_INFO;
2917 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2918 __le16 psm = req->psm;
2920 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2922 /* Check if we have socket listening on psm */
2923 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2924 if (!parent) {
2925 result = L2CAP_CR_BAD_PSM;
2926 goto sendresp;
2929 /* Check if the ACL is secure enough (if not SDP) */
2930 if (psm != cpu_to_le16(0x0001) &&
2931 !hci_conn_check_link_mode(conn->hcon)) {
2932 conn->disc_reason = 0x05;
2933 result = L2CAP_CR_SEC_BLOCK;
2934 goto response;
2937 result = L2CAP_CR_NO_MEM;
2939 /* Check for backlog size */
2940 if (sk_acceptq_is_full(parent)) {
2941 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2942 goto response;
2945 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2946 if (!sk)
2947 goto response;
2949 write_lock_bh(&list->lock);
2951 /* Check if we already have channel with that dcid */
2952 if (__l2cap_get_chan_by_dcid(list, scid)) {
2953 write_unlock_bh(&list->lock);
2954 sock_set_flag(sk, SOCK_ZAPPED);
2955 l2cap_sock_kill(sk);
2956 goto response;
2959 hci_conn_hold(conn->hcon);
2961 l2cap_sock_init(sk, parent);
2962 bacpy(&bt_sk(sk)->src, conn->src);
2963 bacpy(&bt_sk(sk)->dst, conn->dst);
2964 l2cap_pi(sk)->psm = psm;
2965 l2cap_pi(sk)->dcid = scid;
2967 __l2cap_chan_add(conn, sk, parent);
2968 dcid = l2cap_pi(sk)->scid;
2970 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2972 l2cap_pi(sk)->ident = cmd->ident;
2974 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2975 if (l2cap_check_security(sk)) {
2976 if (bt_sk(sk)->defer_setup) {
2977 sk->sk_state = BT_CONNECT2;
2978 result = L2CAP_CR_PEND;
2979 status = L2CAP_CS_AUTHOR_PEND;
2980 parent->sk_data_ready(parent, 0);
2981 } else {
2982 sk->sk_state = BT_CONFIG;
2983 result = L2CAP_CR_SUCCESS;
2984 status = L2CAP_CS_NO_INFO;
2986 } else {
2987 sk->sk_state = BT_CONNECT2;
2988 result = L2CAP_CR_PEND;
2989 status = L2CAP_CS_AUTHEN_PEND;
2991 } else {
2992 sk->sk_state = BT_CONNECT2;
2993 result = L2CAP_CR_PEND;
2994 status = L2CAP_CS_NO_INFO;
2997 write_unlock_bh(&list->lock);
2999 response:
3000 bh_unlock_sock(parent);
3002 sendresp:
3003 rsp.scid = cpu_to_le16(scid);
3004 rsp.dcid = cpu_to_le16(dcid);
3005 rsp.result = cpu_to_le16(result);
3006 rsp.status = cpu_to_le16(status);
3007 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3009 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3010 struct l2cap_info_req info;
3011 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3013 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3014 conn->info_ident = l2cap_get_ident(conn);
3016 mod_timer(&conn->info_timer, jiffies +
3017 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
3019 l2cap_send_cmd(conn, conn->info_ident,
3020 L2CAP_INFO_REQ, sizeof(info), &info);
3023 if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
3024 result == L2CAP_CR_SUCCESS) {
3025 u8 buf[128];
3026 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3027 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3028 l2cap_build_conf_req(sk, buf), buf);
3029 l2cap_pi(sk)->num_conf_req++;
3032 return 0;
3035 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3037 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3038 u16 scid, dcid, result, status;
3039 struct sock *sk;
3040 u8 req[128];
3042 scid = __le16_to_cpu(rsp->scid);
3043 dcid = __le16_to_cpu(rsp->dcid);
3044 result = __le16_to_cpu(rsp->result);
3045 status = __le16_to_cpu(rsp->status);
3047 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
3049 if (scid) {
3050 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3051 if (!sk)
3052 return -EFAULT;
3053 } else {
3054 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
3055 if (!sk)
3056 return -EFAULT;
3059 switch (result) {
3060 case L2CAP_CR_SUCCESS:
3061 sk->sk_state = BT_CONFIG;
3062 l2cap_pi(sk)->ident = 0;
3063 l2cap_pi(sk)->dcid = dcid;
3064 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
3066 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
3067 break;
3069 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3071 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3072 l2cap_build_conf_req(sk, req), req);
3073 l2cap_pi(sk)->num_conf_req++;
3074 break;
3076 case L2CAP_CR_PEND:
3077 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3078 break;
3080 default:
3081 l2cap_chan_del(sk, ECONNREFUSED);
3082 break;
3085 bh_unlock_sock(sk);
3086 return 0;
3089 static inline void set_default_fcs(struct l2cap_pinfo *pi)
3091 /* FCS is enabled only in ERTM or streaming mode, if one or both
3092 * sides request it.
3094 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
3095 pi->fcs = L2CAP_FCS_NONE;
3096 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
3097 pi->fcs = L2CAP_FCS_CRC16;
3100 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3102 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3103 u16 dcid, flags;
3104 u8 rsp[64];
3105 struct sock *sk;
3106 int len;
3108 dcid = __le16_to_cpu(req->dcid);
3109 flags = __le16_to_cpu(req->flags);
3111 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3113 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3114 if (!sk)
3115 return -ENOENT;
3117 if (sk->sk_state == BT_DISCONN)
3118 goto unlock;
3120 /* Reject if config buffer is too small. */
3121 len = cmd_len - sizeof(*req);
3122 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
3123 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3124 l2cap_build_conf_rsp(sk, rsp,
3125 L2CAP_CONF_REJECT, flags), rsp);
3126 goto unlock;
3129 /* Store config. */
3130 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
3131 l2cap_pi(sk)->conf_len += len;
3133 if (flags & 0x0001) {
3134 /* Incomplete config. Send empty response. */
3135 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3136 l2cap_build_conf_rsp(sk, rsp,
3137 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3138 goto unlock;
3141 /* Complete config. */
3142 len = l2cap_parse_conf_req(sk, rsp);
3143 if (len < 0) {
3144 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3145 goto unlock;
3148 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3149 l2cap_pi(sk)->num_conf_rsp++;
3151 /* Reset config buffer. */
3152 l2cap_pi(sk)->conf_len = 0;
3154 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
3155 goto unlock;
3157 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
3158 set_default_fcs(l2cap_pi(sk));
3160 sk->sk_state = BT_CONNECTED;
3162 l2cap_pi(sk)->next_tx_seq = 0;
3163 l2cap_pi(sk)->expected_tx_seq = 0;
3164 __skb_queue_head_init(TX_QUEUE(sk));
3165 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3166 l2cap_ertm_init(sk);
3168 l2cap_chan_ready(sk);
3169 goto unlock;
3172 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3173 u8 buf[64];
3174 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3175 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3176 l2cap_build_conf_req(sk, buf), buf);
3177 l2cap_pi(sk)->num_conf_req++;
3180 unlock:
3181 bh_unlock_sock(sk);
3182 return 0;
3185 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3187 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3188 u16 scid, flags, result;
3189 struct sock *sk;
3190 int len = cmd->len - sizeof(*rsp);
3192 scid = __le16_to_cpu(rsp->scid);
3193 flags = __le16_to_cpu(rsp->flags);
3194 result = __le16_to_cpu(rsp->result);
3196 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3197 scid, flags, result);
3199 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3200 if (!sk)
3201 return 0;
3203 switch (result) {
3204 case L2CAP_CONF_SUCCESS:
3205 l2cap_conf_rfc_get(sk, rsp->data, len);
3206 break;
3208 case L2CAP_CONF_UNACCEPT:
3209 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3210 char req[64];
3212 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3213 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3214 goto done;
3217 /* throw out any old stored conf requests */
3218 result = L2CAP_CONF_SUCCESS;
3219 len = l2cap_parse_conf_rsp(sk, rsp->data,
3220 len, req, &result);
3221 if (len < 0) {
3222 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3223 goto done;
3226 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3227 L2CAP_CONF_REQ, len, req);
3228 l2cap_pi(sk)->num_conf_req++;
3229 if (result != L2CAP_CONF_SUCCESS)
3230 goto done;
3231 break;
3234 default:
3235 sk->sk_err = ECONNRESET;
3236 l2cap_sock_set_timer(sk, HZ * 5);
3237 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3238 goto done;
3241 if (flags & 0x01)
3242 goto done;
3244 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3246 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3247 set_default_fcs(l2cap_pi(sk));
3249 sk->sk_state = BT_CONNECTED;
3250 l2cap_pi(sk)->next_tx_seq = 0;
3251 l2cap_pi(sk)->expected_tx_seq = 0;
3252 __skb_queue_head_init(TX_QUEUE(sk));
3253 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3254 l2cap_ertm_init(sk);
3256 l2cap_chan_ready(sk);
3259 done:
3260 bh_unlock_sock(sk);
3261 return 0;
3264 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3266 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3267 struct l2cap_disconn_rsp rsp;
3268 u16 dcid, scid;
3269 struct sock *sk;
3271 scid = __le16_to_cpu(req->scid);
3272 dcid = __le16_to_cpu(req->dcid);
3274 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3276 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3277 if (!sk)
3278 return 0;
3280 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3281 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3282 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3284 sk->sk_shutdown = SHUTDOWN_MASK;
3286 l2cap_chan_del(sk, ECONNRESET);
3287 bh_unlock_sock(sk);
3289 l2cap_sock_kill(sk);
3290 return 0;
3293 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3295 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3296 u16 dcid, scid;
3297 struct sock *sk;
3299 scid = __le16_to_cpu(rsp->scid);
3300 dcid = __le16_to_cpu(rsp->dcid);
3302 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3304 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3305 if (!sk)
3306 return 0;
3308 l2cap_chan_del(sk, 0);
3309 bh_unlock_sock(sk);
3311 l2cap_sock_kill(sk);
3312 return 0;
3315 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3317 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3318 u16 type;
3320 type = __le16_to_cpu(req->type);
3322 BT_DBG("type 0x%4.4x", type);
3324 if (type == L2CAP_IT_FEAT_MASK) {
3325 u8 buf[8];
3326 u32 feat_mask = l2cap_feat_mask;
3327 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3328 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3329 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3330 if (!disable_ertm)
3331 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3332 | L2CAP_FEAT_FCS;
3333 put_unaligned_le32(feat_mask, rsp->data);
3334 l2cap_send_cmd(conn, cmd->ident,
3335 L2CAP_INFO_RSP, sizeof(buf), buf);
3336 } else if (type == L2CAP_IT_FIXED_CHAN) {
3337 u8 buf[12];
3338 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3339 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3340 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3341 memcpy(buf + 4, l2cap_fixed_chan, 8);
3342 l2cap_send_cmd(conn, cmd->ident,
3343 L2CAP_INFO_RSP, sizeof(buf), buf);
3344 } else {
3345 struct l2cap_info_rsp rsp;
3346 rsp.type = cpu_to_le16(type);
3347 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3348 l2cap_send_cmd(conn, cmd->ident,
3349 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3352 return 0;
3355 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3357 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3358 u16 type, result;
3360 type = __le16_to_cpu(rsp->type);
3361 result = __le16_to_cpu(rsp->result);
3363 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3365 del_timer(&conn->info_timer);
3367 if (result != L2CAP_IR_SUCCESS) {
3368 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3369 conn->info_ident = 0;
3371 l2cap_conn_start(conn);
3373 return 0;
3376 if (type == L2CAP_IT_FEAT_MASK) {
3377 conn->feat_mask = get_unaligned_le32(rsp->data);
3379 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3380 struct l2cap_info_req req;
3381 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3383 conn->info_ident = l2cap_get_ident(conn);
3385 l2cap_send_cmd(conn, conn->info_ident,
3386 L2CAP_INFO_REQ, sizeof(req), &req);
3387 } else {
3388 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3389 conn->info_ident = 0;
3391 l2cap_conn_start(conn);
3393 } else if (type == L2CAP_IT_FIXED_CHAN) {
3394 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3395 conn->info_ident = 0;
3397 l2cap_conn_start(conn);
3400 return 0;
3403 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3405 u8 *data = skb->data;
3406 int len = skb->len;
3407 struct l2cap_cmd_hdr cmd;
3408 int err = 0;
3410 l2cap_raw_recv(conn, skb);
3412 while (len >= L2CAP_CMD_HDR_SIZE) {
3413 u16 cmd_len;
3414 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3415 data += L2CAP_CMD_HDR_SIZE;
3416 len -= L2CAP_CMD_HDR_SIZE;
3418 cmd_len = le16_to_cpu(cmd.len);
3420 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3422 if (cmd_len > len || !cmd.ident) {
3423 BT_DBG("corrupted command");
3424 break;
3427 switch (cmd.code) {
3428 case L2CAP_COMMAND_REJ:
3429 l2cap_command_rej(conn, &cmd, data);
3430 break;
3432 case L2CAP_CONN_REQ:
3433 err = l2cap_connect_req(conn, &cmd, data);
3434 break;
3436 case L2CAP_CONN_RSP:
3437 err = l2cap_connect_rsp(conn, &cmd, data);
3438 break;
3440 case L2CAP_CONF_REQ:
3441 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3442 break;
3444 case L2CAP_CONF_RSP:
3445 err = l2cap_config_rsp(conn, &cmd, data);
3446 break;
3448 case L2CAP_DISCONN_REQ:
3449 err = l2cap_disconnect_req(conn, &cmd, data);
3450 break;
3452 case L2CAP_DISCONN_RSP:
3453 err = l2cap_disconnect_rsp(conn, &cmd, data);
3454 break;
3456 case L2CAP_ECHO_REQ:
3457 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3458 break;
3460 case L2CAP_ECHO_RSP:
3461 break;
3463 case L2CAP_INFO_REQ:
3464 err = l2cap_information_req(conn, &cmd, data);
3465 break;
3467 case L2CAP_INFO_RSP:
3468 err = l2cap_information_rsp(conn, &cmd, data);
3469 break;
3471 default:
3472 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3473 err = -EINVAL;
3474 break;
3477 if (err) {
3478 struct l2cap_cmd_rej rej;
3479 BT_DBG("error %d", err);
3481 /* FIXME: Map err to a valid reason */
3482 rej.reason = cpu_to_le16(0);
3483 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3486 data += cmd_len;
3487 len -= cmd_len;
3490 kfree_skb(skb);
3493 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3495 u16 our_fcs, rcv_fcs;
3496 int hdr_size = L2CAP_HDR_SIZE + 2;
3498 if (pi->fcs == L2CAP_FCS_CRC16) {
3499 skb_trim(skb, skb->len - 2);
3500 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3501 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3503 if (our_fcs != rcv_fcs)
3504 return -EBADMSG;
3506 return 0;
3509 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3511 struct l2cap_pinfo *pi = l2cap_pi(sk);
3512 u16 control = 0;
3514 pi->frames_sent = 0;
3516 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3518 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3519 control |= L2CAP_SUPER_RCV_NOT_READY;
3520 l2cap_send_sframe(pi, control);
3521 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3524 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3525 l2cap_retransmit_frames(sk);
3527 l2cap_ertm_send(sk);
3529 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3530 pi->frames_sent == 0) {
3531 control |= L2CAP_SUPER_RCV_READY;
3532 l2cap_send_sframe(pi, control);
3536 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3538 struct sk_buff *next_skb;
3539 struct l2cap_pinfo *pi = l2cap_pi(sk);
3540 int tx_seq_offset, next_tx_seq_offset;
3542 bt_cb(skb)->tx_seq = tx_seq;
3543 bt_cb(skb)->sar = sar;
3545 next_skb = skb_peek(SREJ_QUEUE(sk));
3546 if (!next_skb) {
3547 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3548 return 0;
3551 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3552 if (tx_seq_offset < 0)
3553 tx_seq_offset += 64;
3555 do {
3556 if (bt_cb(next_skb)->tx_seq == tx_seq)
3557 return -EINVAL;
3559 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3560 pi->buffer_seq) % 64;
3561 if (next_tx_seq_offset < 0)
3562 next_tx_seq_offset += 64;
3564 if (next_tx_seq_offset > tx_seq_offset) {
3565 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3566 return 0;
3569 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3570 break;
3572 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3574 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3576 return 0;
3579 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3581 struct l2cap_pinfo *pi = l2cap_pi(sk);
3582 struct sk_buff *_skb;
3583 int err;
3585 switch (control & L2CAP_CTRL_SAR) {
3586 case L2CAP_SDU_UNSEGMENTED:
3587 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3588 goto drop;
3590 err = sock_queue_rcv_skb(sk, skb);
3591 if (!err)
3592 return err;
3594 break;
3596 case L2CAP_SDU_START:
3597 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3598 goto drop;
3600 pi->sdu_len = get_unaligned_le16(skb->data);
3602 if (pi->sdu_len > pi->imtu)
3603 goto disconnect;
3605 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3606 if (!pi->sdu)
3607 return -ENOMEM;
3609 /* pull sdu_len bytes only after alloc, because of Local Busy
3610 * condition we have to be sure that this will be executed
3611 * only once, i.e., when alloc does not fail */
3612 skb_pull(skb, 2);
3614 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3616 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3617 pi->partial_sdu_len = skb->len;
3618 break;
3620 case L2CAP_SDU_CONTINUE:
3621 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3622 goto disconnect;
3624 if (!pi->sdu)
3625 goto disconnect;
3627 pi->partial_sdu_len += skb->len;
3628 if (pi->partial_sdu_len > pi->sdu_len)
3629 goto drop;
3631 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3633 break;
3635 case L2CAP_SDU_END:
3636 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3637 goto disconnect;
3639 if (!pi->sdu)
3640 goto disconnect;
3642 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3643 pi->partial_sdu_len += skb->len;
3645 if (pi->partial_sdu_len > pi->imtu)
3646 goto drop;
3648 if (pi->partial_sdu_len != pi->sdu_len)
3649 goto drop;
3651 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3654 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3655 if (!_skb) {
3656 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3657 return -ENOMEM;
3660 err = sock_queue_rcv_skb(sk, _skb);
3661 if (err < 0) {
3662 kfree_skb(_skb);
3663 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3664 return err;
3667 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3668 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3670 kfree_skb(pi->sdu);
3671 break;
3674 kfree_skb(skb);
3675 return 0;
3677 drop:
3678 kfree_skb(pi->sdu);
3679 pi->sdu = NULL;
3681 disconnect:
3682 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3683 kfree_skb(skb);
3684 return 0;
3687 static int l2cap_try_push_rx_skb(struct sock *sk)
3689 struct l2cap_pinfo *pi = l2cap_pi(sk);
3690 struct sk_buff *skb;
3691 u16 control;
3692 int err;
3694 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3695 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3696 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3697 if (err < 0) {
3698 skb_queue_head(BUSY_QUEUE(sk), skb);
3699 return -EBUSY;
3702 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3705 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3706 goto done;
3708 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3709 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3710 l2cap_send_sframe(pi, control);
3711 l2cap_pi(sk)->retry_count = 1;
3713 del_timer(&pi->retrans_timer);
3714 __mod_monitor_timer();
3716 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3718 done:
3719 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3720 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3722 BT_DBG("sk %p, Exit local busy", sk);
3724 return 0;
3727 static void l2cap_busy_work(struct work_struct *work)
3729 DECLARE_WAITQUEUE(wait, current);
3730 struct l2cap_pinfo *pi =
3731 container_of(work, struct l2cap_pinfo, busy_work);
3732 struct sock *sk = (struct sock *)pi;
3733 int n_tries = 0, timeo = HZ/5, err;
3734 struct sk_buff *skb;
3736 lock_sock(sk);
3738 add_wait_queue(sk_sleep(sk), &wait);
3739 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3740 set_current_state(TASK_INTERRUPTIBLE);
3742 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3743 err = -EBUSY;
3744 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
3745 break;
3748 if (!timeo)
3749 timeo = HZ/5;
3751 if (signal_pending(current)) {
3752 err = sock_intr_errno(timeo);
3753 break;
3756 release_sock(sk);
3757 timeo = schedule_timeout(timeo);
3758 lock_sock(sk);
3760 err = sock_error(sk);
3761 if (err)
3762 break;
3764 if (l2cap_try_push_rx_skb(sk) == 0)
3765 break;
3768 set_current_state(TASK_RUNNING);
3769 remove_wait_queue(sk_sleep(sk), &wait);
3771 release_sock(sk);
3774 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3776 struct l2cap_pinfo *pi = l2cap_pi(sk);
3777 int sctrl, err;
3779 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3780 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3781 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3782 return l2cap_try_push_rx_skb(sk);
3787 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3788 if (err >= 0) {
3789 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3790 return err;
3793 /* Busy Condition */
3794 BT_DBG("sk %p, Enter local busy", sk);
3796 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3797 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3798 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3800 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3801 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3802 l2cap_send_sframe(pi, sctrl);
3804 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3806 del_timer(&pi->ack_timer);
3808 queue_work(_busy_wq, &pi->busy_work);
3810 return err;
3813 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3815 struct l2cap_pinfo *pi = l2cap_pi(sk);
3816 struct sk_buff *_skb;
3817 int err = -EINVAL;
3820 * TODO: We have to notify the userland if some data is lost with the
3821 * Streaming Mode.
3824 switch (control & L2CAP_CTRL_SAR) {
3825 case L2CAP_SDU_UNSEGMENTED:
3826 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3827 kfree_skb(pi->sdu);
3828 break;
3831 err = sock_queue_rcv_skb(sk, skb);
3832 if (!err)
3833 return 0;
3835 break;
3837 case L2CAP_SDU_START:
3838 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3839 kfree_skb(pi->sdu);
3840 break;
3843 pi->sdu_len = get_unaligned_le16(skb->data);
3844 skb_pull(skb, 2);
3846 if (pi->sdu_len > pi->imtu) {
3847 err = -EMSGSIZE;
3848 break;
3851 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3852 if (!pi->sdu) {
3853 err = -ENOMEM;
3854 break;
3857 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3859 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3860 pi->partial_sdu_len = skb->len;
3861 err = 0;
3862 break;
3864 case L2CAP_SDU_CONTINUE:
3865 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3866 break;
3868 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3870 pi->partial_sdu_len += skb->len;
3871 if (pi->partial_sdu_len > pi->sdu_len)
3872 kfree_skb(pi->sdu);
3873 else
3874 err = 0;
3876 break;
3878 case L2CAP_SDU_END:
3879 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3880 break;
3882 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3884 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3885 pi->partial_sdu_len += skb->len;
3887 if (pi->partial_sdu_len > pi->imtu)
3888 goto drop;
3890 if (pi->partial_sdu_len == pi->sdu_len) {
3891 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3892 err = sock_queue_rcv_skb(sk, _skb);
3893 if (err < 0)
3894 kfree_skb(_skb);
3896 err = 0;
3898 drop:
3899 kfree_skb(pi->sdu);
3900 break;
3903 kfree_skb(skb);
3904 return err;
3907 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3909 struct sk_buff *skb;
3910 u16 control;
3912 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3913 if (bt_cb(skb)->tx_seq != tx_seq)
3914 break;
3916 skb = skb_dequeue(SREJ_QUEUE(sk));
3917 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3918 l2cap_ertm_reassembly_sdu(sk, skb, control);
3919 l2cap_pi(sk)->buffer_seq_srej =
3920 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3921 tx_seq = (tx_seq + 1) % 64;
3925 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3927 struct l2cap_pinfo *pi = l2cap_pi(sk);
3928 struct srej_list *l, *tmp;
3929 u16 control;
3931 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3932 if (l->tx_seq == tx_seq) {
3933 list_del(&l->list);
3934 kfree(l);
3935 return;
3937 control = L2CAP_SUPER_SELECT_REJECT;
3938 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3939 l2cap_send_sframe(pi, control);
3940 list_del(&l->list);
3941 list_add_tail(&l->list, SREJ_LIST(sk));
3945 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3947 struct l2cap_pinfo *pi = l2cap_pi(sk);
3948 struct srej_list *new;
3949 u16 control;
3951 while (tx_seq != pi->expected_tx_seq) {
3952 control = L2CAP_SUPER_SELECT_REJECT;
3953 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3954 l2cap_send_sframe(pi, control);
3956 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3957 new->tx_seq = pi->expected_tx_seq;
3958 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3959 list_add_tail(&new->list, SREJ_LIST(sk));
3961 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3964 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3966 struct l2cap_pinfo *pi = l2cap_pi(sk);
3967 u8 tx_seq = __get_txseq(rx_control);
3968 u8 req_seq = __get_reqseq(rx_control);
3969 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3970 int tx_seq_offset, expected_tx_seq_offset;
3971 int num_to_ack = (pi->tx_win/6) + 1;
3972 int err = 0;
3974 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3975 rx_control);
3977 if (L2CAP_CTRL_FINAL & rx_control &&
3978 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3979 del_timer(&pi->monitor_timer);
3980 if (pi->unacked_frames > 0)
3981 __mod_retrans_timer();
3982 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3985 pi->expected_ack_seq = req_seq;
3986 l2cap_drop_acked_frames(sk);
3988 if (tx_seq == pi->expected_tx_seq)
3989 goto expected;
3991 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3992 if (tx_seq_offset < 0)
3993 tx_seq_offset += 64;
3995 /* invalid tx_seq */
3996 if (tx_seq_offset >= pi->tx_win) {
3997 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3998 goto drop;
4001 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
4002 goto drop;
4004 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4005 struct srej_list *first;
4007 first = list_first_entry(SREJ_LIST(sk),
4008 struct srej_list, list);
4009 if (tx_seq == first->tx_seq) {
4010 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
4011 l2cap_check_srej_gap(sk, tx_seq);
4013 list_del(&first->list);
4014 kfree(first);
4016 if (list_empty(SREJ_LIST(sk))) {
4017 pi->buffer_seq = pi->buffer_seq_srej;
4018 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
4019 l2cap_send_ack(pi);
4020 BT_DBG("sk %p, Exit SREJ_SENT", sk);
4022 } else {
4023 struct srej_list *l;
4025 /* duplicated tx_seq */
4026 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
4027 goto drop;
4029 list_for_each_entry(l, SREJ_LIST(sk), list) {
4030 if (l->tx_seq == tx_seq) {
4031 l2cap_resend_srejframe(sk, tx_seq);
4032 return 0;
4035 l2cap_send_srejframe(sk, tx_seq);
4037 } else {
4038 expected_tx_seq_offset =
4039 (pi->expected_tx_seq - pi->buffer_seq) % 64;
4040 if (expected_tx_seq_offset < 0)
4041 expected_tx_seq_offset += 64;
4043 /* duplicated tx_seq */
4044 if (tx_seq_offset < expected_tx_seq_offset)
4045 goto drop;
4047 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
4049 BT_DBG("sk %p, Enter SREJ", sk);
4051 INIT_LIST_HEAD(SREJ_LIST(sk));
4052 pi->buffer_seq_srej = pi->buffer_seq;
4054 __skb_queue_head_init(SREJ_QUEUE(sk));
4055 __skb_queue_head_init(BUSY_QUEUE(sk));
4056 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
4058 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
4060 l2cap_send_srejframe(sk, tx_seq);
4062 del_timer(&pi->ack_timer);
4064 return 0;
4066 expected:
4067 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4069 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4070 bt_cb(skb)->tx_seq = tx_seq;
4071 bt_cb(skb)->sar = sar;
4072 __skb_queue_tail(SREJ_QUEUE(sk), skb);
4073 return 0;
4076 err = l2cap_push_rx_skb(sk, skb, rx_control);
4077 if (err < 0)
4078 return 0;
4080 if (rx_control & L2CAP_CTRL_FINAL) {
4081 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4082 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4083 else
4084 l2cap_retransmit_frames(sk);
4087 __mod_ack_timer();
4089 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
4090 if (pi->num_acked == num_to_ack - 1)
4091 l2cap_send_ack(pi);
4093 return 0;
4095 drop:
4096 kfree_skb(skb);
4097 return 0;
4100 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
4102 struct l2cap_pinfo *pi = l2cap_pi(sk);
4104 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
4105 rx_control);
4107 pi->expected_ack_seq = __get_reqseq(rx_control);
4108 l2cap_drop_acked_frames(sk);
4110 if (rx_control & L2CAP_CTRL_POLL) {
4111 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4112 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4113 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4114 (pi->unacked_frames > 0))
4115 __mod_retrans_timer();
4117 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4118 l2cap_send_srejtail(sk);
4119 } else {
4120 l2cap_send_i_or_rr_or_rnr(sk);
4123 } else if (rx_control & L2CAP_CTRL_FINAL) {
4124 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4126 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4127 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4128 else
4129 l2cap_retransmit_frames(sk);
4131 } else {
4132 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4133 (pi->unacked_frames > 0))
4134 __mod_retrans_timer();
4136 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4137 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4138 l2cap_send_ack(pi);
4139 } else {
4140 l2cap_ertm_send(sk);
4145 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
4147 struct l2cap_pinfo *pi = l2cap_pi(sk);
4148 u8 tx_seq = __get_reqseq(rx_control);
4150 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4152 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4154 pi->expected_ack_seq = tx_seq;
4155 l2cap_drop_acked_frames(sk);
4157 if (rx_control & L2CAP_CTRL_FINAL) {
4158 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4159 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4160 else
4161 l2cap_retransmit_frames(sk);
4162 } else {
4163 l2cap_retransmit_frames(sk);
4165 if (pi->conn_state & L2CAP_CONN_WAIT_F)
4166 pi->conn_state |= L2CAP_CONN_REJ_ACT;
4169 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
4171 struct l2cap_pinfo *pi = l2cap_pi(sk);
4172 u8 tx_seq = __get_reqseq(rx_control);
4174 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4176 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4178 if (rx_control & L2CAP_CTRL_POLL) {
4179 pi->expected_ack_seq = tx_seq;
4180 l2cap_drop_acked_frames(sk);
4182 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4183 l2cap_retransmit_one_frame(sk, tx_seq);
4185 l2cap_ertm_send(sk);
4187 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4188 pi->srej_save_reqseq = tx_seq;
4189 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4191 } else if (rx_control & L2CAP_CTRL_FINAL) {
4192 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4193 pi->srej_save_reqseq == tx_seq)
4194 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4195 else
4196 l2cap_retransmit_one_frame(sk, tx_seq);
4197 } else {
4198 l2cap_retransmit_one_frame(sk, tx_seq);
4199 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4200 pi->srej_save_reqseq = tx_seq;
4201 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4206 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4208 struct l2cap_pinfo *pi = l2cap_pi(sk);
4209 u8 tx_seq = __get_reqseq(rx_control);
4211 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4213 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4214 pi->expected_ack_seq = tx_seq;
4215 l2cap_drop_acked_frames(sk);
4217 if (rx_control & L2CAP_CTRL_POLL)
4218 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4220 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4221 del_timer(&pi->retrans_timer);
4222 if (rx_control & L2CAP_CTRL_POLL)
4223 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4224 return;
4227 if (rx_control & L2CAP_CTRL_POLL)
4228 l2cap_send_srejtail(sk);
4229 else
4230 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4233 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4235 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4237 if (L2CAP_CTRL_FINAL & rx_control &&
4238 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4239 del_timer(&l2cap_pi(sk)->monitor_timer);
4240 if (l2cap_pi(sk)->unacked_frames > 0)
4241 __mod_retrans_timer();
4242 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4245 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4246 case L2CAP_SUPER_RCV_READY:
4247 l2cap_data_channel_rrframe(sk, rx_control);
4248 break;
4250 case L2CAP_SUPER_REJECT:
4251 l2cap_data_channel_rejframe(sk, rx_control);
4252 break;
4254 case L2CAP_SUPER_SELECT_REJECT:
4255 l2cap_data_channel_srejframe(sk, rx_control);
4256 break;
4258 case L2CAP_SUPER_RCV_NOT_READY:
4259 l2cap_data_channel_rnrframe(sk, rx_control);
4260 break;
4263 kfree_skb(skb);
4264 return 0;
4267 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4269 struct l2cap_pinfo *pi = l2cap_pi(sk);
4270 u16 control;
4271 u8 req_seq;
4272 int len, next_tx_seq_offset, req_seq_offset;
4274 control = get_unaligned_le16(skb->data);
4275 skb_pull(skb, 2);
4276 len = skb->len;
4279 * We can just drop the corrupted I-frame here.
4280 * Receiver will miss it and start proper recovery
4281 * procedures and ask retransmission.
4283 if (l2cap_check_fcs(pi, skb))
4284 goto drop;
4286 if (__is_sar_start(control) && __is_iframe(control))
4287 len -= 2;
4289 if (pi->fcs == L2CAP_FCS_CRC16)
4290 len -= 2;
4292 if (len > pi->mps) {
4293 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4294 goto drop;
4297 req_seq = __get_reqseq(control);
4298 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4299 if (req_seq_offset < 0)
4300 req_seq_offset += 64;
4302 next_tx_seq_offset =
4303 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4304 if (next_tx_seq_offset < 0)
4305 next_tx_seq_offset += 64;
4307 /* check for invalid req-seq */
4308 if (req_seq_offset > next_tx_seq_offset) {
4309 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4310 goto drop;
4313 if (__is_iframe(control)) {
4314 if (len < 0) {
4315 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4316 goto drop;
4319 l2cap_data_channel_iframe(sk, control, skb);
4320 } else {
4321 if (len != 0) {
4322 BT_ERR("%d", len);
4323 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4324 goto drop;
4327 l2cap_data_channel_sframe(sk, control, skb);
4330 return 0;
4332 drop:
4333 kfree_skb(skb);
4334 return 0;
4337 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4339 struct sock *sk;
4340 struct l2cap_pinfo *pi;
4341 u16 control;
4342 u8 tx_seq;
4343 int len;
4345 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4346 if (!sk) {
4347 BT_DBG("unknown cid 0x%4.4x", cid);
4348 goto drop;
4351 pi = l2cap_pi(sk);
4353 BT_DBG("sk %p, len %d", sk, skb->len);
4355 if (sk->sk_state != BT_CONNECTED)
4356 goto drop;
4358 switch (pi->mode) {
4359 case L2CAP_MODE_BASIC:
4360 /* If socket recv buffers overflows we drop data here
4361 * which is *bad* because L2CAP has to be reliable.
4362 * But we don't have any other choice. L2CAP doesn't
4363 * provide flow control mechanism. */
4365 if (pi->imtu < skb->len)
4366 goto drop;
4368 if (!sock_queue_rcv_skb(sk, skb))
4369 goto done;
4370 break;
4372 case L2CAP_MODE_ERTM:
4373 if (!sock_owned_by_user(sk)) {
4374 l2cap_ertm_data_rcv(sk, skb);
4375 } else {
4376 if (sk_add_backlog(sk, skb))
4377 goto drop;
4380 goto done;
4382 case L2CAP_MODE_STREAMING:
4383 control = get_unaligned_le16(skb->data);
4384 skb_pull(skb, 2);
4385 len = skb->len;
4387 if (l2cap_check_fcs(pi, skb))
4388 goto drop;
4390 if (__is_sar_start(control))
4391 len -= 2;
4393 if (pi->fcs == L2CAP_FCS_CRC16)
4394 len -= 2;
4396 if (len > pi->mps || len < 0 || __is_sframe(control))
4397 goto drop;
4399 tx_seq = __get_txseq(control);
4401 if (pi->expected_tx_seq == tx_seq)
4402 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4403 else
4404 pi->expected_tx_seq = (tx_seq + 1) % 64;
4406 l2cap_streaming_reassembly_sdu(sk, skb, control);
4408 goto done;
4410 default:
4411 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4412 break;
4415 drop:
4416 kfree_skb(skb);
4418 done:
4419 if (sk)
4420 bh_unlock_sock(sk);
4422 return 0;
4425 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4427 struct sock *sk;
4429 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4430 if (!sk)
4431 goto drop;
4433 BT_DBG("sk %p, len %d", sk, skb->len);
4435 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4436 goto drop;
4438 if (l2cap_pi(sk)->imtu < skb->len)
4439 goto drop;
4441 if (!sock_queue_rcv_skb(sk, skb))
4442 goto done;
4444 drop:
4445 kfree_skb(skb);
4447 done:
4448 if (sk)
4449 bh_unlock_sock(sk);
4450 return 0;
4453 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4455 struct l2cap_hdr *lh = (void *) skb->data;
4456 u16 cid, len;
4457 __le16 psm;
4459 skb_pull(skb, L2CAP_HDR_SIZE);
4460 cid = __le16_to_cpu(lh->cid);
4461 len = __le16_to_cpu(lh->len);
4463 if (len != skb->len) {
4464 kfree_skb(skb);
4465 return;
4468 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4470 switch (cid) {
4471 case L2CAP_CID_SIGNALING:
4472 l2cap_sig_channel(conn, skb);
4473 break;
4475 case L2CAP_CID_CONN_LESS:
4476 psm = get_unaligned_le16(skb->data);
4477 skb_pull(skb, 2);
4478 l2cap_conless_channel(conn, psm, skb);
4479 break;
4481 default:
4482 l2cap_data_channel(conn, cid, skb);
4483 break;
4487 /* ---- L2CAP interface with lower layer (HCI) ---- */
4489 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4491 int exact = 0, lm1 = 0, lm2 = 0;
4492 register struct sock *sk;
4493 struct hlist_node *node;
4495 if (type != ACL_LINK)
4496 return -EINVAL;
4498 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4500 /* Find listening sockets and check their link_mode */
4501 read_lock(&l2cap_sk_list.lock);
4502 sk_for_each(sk, node, &l2cap_sk_list.head) {
4503 if (sk->sk_state != BT_LISTEN)
4504 continue;
4506 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4507 lm1 |= HCI_LM_ACCEPT;
4508 if (l2cap_pi(sk)->role_switch)
4509 lm1 |= HCI_LM_MASTER;
4510 exact++;
4511 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4512 lm2 |= HCI_LM_ACCEPT;
4513 if (l2cap_pi(sk)->role_switch)
4514 lm2 |= HCI_LM_MASTER;
4517 read_unlock(&l2cap_sk_list.lock);
4519 return exact ? lm1 : lm2;
4522 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4524 struct l2cap_conn *conn;
4526 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4528 if (hcon->type != ACL_LINK)
4529 return -EINVAL;
4531 if (!status) {
4532 conn = l2cap_conn_add(hcon, status);
4533 if (conn)
4534 l2cap_conn_ready(conn);
4535 } else
4536 l2cap_conn_del(hcon, bt_err(status));
4538 return 0;
4541 static int l2cap_disconn_ind(struct hci_conn *hcon)
4543 struct l2cap_conn *conn = hcon->l2cap_data;
4545 BT_DBG("hcon %p", hcon);
4547 if (hcon->type != ACL_LINK || !conn)
4548 return 0x13;
4550 return conn->disc_reason;
4553 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4555 BT_DBG("hcon %p reason %d", hcon, reason);
4557 if (hcon->type != ACL_LINK)
4558 return -EINVAL;
4560 l2cap_conn_del(hcon, bt_err(reason));
4562 return 0;
4565 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4567 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4568 return;
4570 if (encrypt == 0x00) {
4571 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4572 l2cap_sock_clear_timer(sk);
4573 l2cap_sock_set_timer(sk, HZ * 5);
4574 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4575 __l2cap_sock_close(sk, ECONNREFUSED);
4576 } else {
4577 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4578 l2cap_sock_clear_timer(sk);
4582 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4584 struct l2cap_chan_list *l;
4585 struct l2cap_conn *conn = hcon->l2cap_data;
4586 struct sock *sk;
4588 if (!conn)
4589 return 0;
4591 l = &conn->chan_list;
4593 BT_DBG("conn %p", conn);
4595 read_lock(&l->lock);
4597 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4598 bh_lock_sock(sk);
4600 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4601 bh_unlock_sock(sk);
4602 continue;
4605 if (!status && (sk->sk_state == BT_CONNECTED ||
4606 sk->sk_state == BT_CONFIG)) {
4607 l2cap_check_encryption(sk, encrypt);
4608 bh_unlock_sock(sk);
4609 continue;
4612 if (sk->sk_state == BT_CONNECT) {
4613 if (!status) {
4614 struct l2cap_conn_req req;
4615 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4616 req.psm = l2cap_pi(sk)->psm;
4618 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4619 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4621 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4622 L2CAP_CONN_REQ, sizeof(req), &req);
4623 } else {
4624 l2cap_sock_clear_timer(sk);
4625 l2cap_sock_set_timer(sk, HZ / 10);
4627 } else if (sk->sk_state == BT_CONNECT2) {
4628 struct l2cap_conn_rsp rsp;
4629 __u16 result;
4631 if (!status) {
4632 sk->sk_state = BT_CONFIG;
4633 result = L2CAP_CR_SUCCESS;
4634 } else {
4635 sk->sk_state = BT_DISCONN;
4636 l2cap_sock_set_timer(sk, HZ / 10);
4637 result = L2CAP_CR_SEC_BLOCK;
4640 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4641 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4642 rsp.result = cpu_to_le16(result);
4643 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4644 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4645 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4648 bh_unlock_sock(sk);
4651 read_unlock(&l->lock);
4653 return 0;
4656 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4658 struct l2cap_conn *conn = hcon->l2cap_data;
4660 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4661 goto drop;
4663 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4665 if (flags & ACL_START) {
4666 struct l2cap_hdr *hdr;
4667 struct sock *sk;
4668 u16 cid;
4669 int len;
4671 if (conn->rx_len) {
4672 BT_ERR("Unexpected start frame (len %d)", skb->len);
4673 kfree_skb(conn->rx_skb);
4674 conn->rx_skb = NULL;
4675 conn->rx_len = 0;
4676 l2cap_conn_unreliable(conn, ECOMM);
4679 /* Start fragment always begin with Basic L2CAP header */
4680 if (skb->len < L2CAP_HDR_SIZE) {
4681 BT_ERR("Frame is too short (len %d)", skb->len);
4682 l2cap_conn_unreliable(conn, ECOMM);
4683 goto drop;
4686 hdr = (struct l2cap_hdr *) skb->data;
4687 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4688 cid = __le16_to_cpu(hdr->cid);
4690 if (len == skb->len) {
4691 /* Complete frame received */
4692 l2cap_recv_frame(conn, skb);
4693 return 0;
4696 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4698 if (skb->len > len) {
4699 BT_ERR("Frame is too long (len %d, expected len %d)",
4700 skb->len, len);
4701 l2cap_conn_unreliable(conn, ECOMM);
4702 goto drop;
4705 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4707 if (sk && l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) {
4708 BT_ERR("Frame exceeding recv MTU (len %d, MTU %d)",
4709 len, l2cap_pi(sk)->imtu);
4710 bh_unlock_sock(sk);
4711 l2cap_conn_unreliable(conn, ECOMM);
4712 goto drop;
4715 if (sk)
4716 bh_unlock_sock(sk);
4718 /* Allocate skb for the complete frame (with header) */
4719 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4720 if (!conn->rx_skb)
4721 goto drop;
4723 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4724 skb->len);
4725 conn->rx_len = len - skb->len;
4726 } else {
4727 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4729 if (!conn->rx_len) {
4730 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4731 l2cap_conn_unreliable(conn, ECOMM);
4732 goto drop;
4735 if (skb->len > conn->rx_len) {
4736 BT_ERR("Fragment is too long (len %d, expected %d)",
4737 skb->len, conn->rx_len);
4738 kfree_skb(conn->rx_skb);
4739 conn->rx_skb = NULL;
4740 conn->rx_len = 0;
4741 l2cap_conn_unreliable(conn, ECOMM);
4742 goto drop;
4745 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4746 skb->len);
4747 conn->rx_len -= skb->len;
4749 if (!conn->rx_len) {
4750 /* Complete frame received */
4751 l2cap_recv_frame(conn, conn->rx_skb);
4752 conn->rx_skb = NULL;
4756 drop:
4757 kfree_skb(skb);
4758 return 0;
4761 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4763 struct sock *sk;
4764 struct hlist_node *node;
4766 read_lock_bh(&l2cap_sk_list.lock);
4768 sk_for_each(sk, node, &l2cap_sk_list.head) {
4769 struct l2cap_pinfo *pi = l2cap_pi(sk);
4771 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4772 batostr(&bt_sk(sk)->src),
4773 batostr(&bt_sk(sk)->dst),
4774 sk->sk_state, __le16_to_cpu(pi->psm),
4775 pi->scid, pi->dcid,
4776 pi->imtu, pi->omtu, pi->sec_level);
4779 read_unlock_bh(&l2cap_sk_list.lock);
4781 return 0;
4784 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4786 return single_open(file, l2cap_debugfs_show, inode->i_private);
4789 static const struct file_operations l2cap_debugfs_fops = {
4790 .open = l2cap_debugfs_open,
4791 .read = seq_read,
4792 .llseek = seq_lseek,
4793 .release = single_release,
4796 static struct dentry *l2cap_debugfs;
4798 static const struct proto_ops l2cap_sock_ops = {
4799 .family = PF_BLUETOOTH,
4800 .owner = THIS_MODULE,
4801 .release = l2cap_sock_release,
4802 .bind = l2cap_sock_bind,
4803 .connect = l2cap_sock_connect,
4804 .listen = l2cap_sock_listen,
4805 .accept = l2cap_sock_accept,
4806 .getname = l2cap_sock_getname,
4807 .sendmsg = l2cap_sock_sendmsg,
4808 .recvmsg = l2cap_sock_recvmsg,
4809 .poll = bt_sock_poll,
4810 .ioctl = bt_sock_ioctl,
4811 .mmap = sock_no_mmap,
4812 .socketpair = sock_no_socketpair,
4813 .shutdown = l2cap_sock_shutdown,
4814 .setsockopt = l2cap_sock_setsockopt,
4815 .getsockopt = l2cap_sock_getsockopt
4818 static const struct net_proto_family l2cap_sock_family_ops = {
4819 .family = PF_BLUETOOTH,
4820 .owner = THIS_MODULE,
4821 .create = l2cap_sock_create,
4824 static struct hci_proto l2cap_hci_proto = {
4825 .name = "L2CAP",
4826 .id = HCI_PROTO_L2CAP,
4827 .connect_ind = l2cap_connect_ind,
4828 .connect_cfm = l2cap_connect_cfm,
4829 .disconn_ind = l2cap_disconn_ind,
4830 .disconn_cfm = l2cap_disconn_cfm,
4831 .security_cfm = l2cap_security_cfm,
4832 .recv_acldata = l2cap_recv_acldata
4835 static int __init l2cap_init(void)
4837 int err;
4839 err = proto_register(&l2cap_proto, 0);
4840 if (err < 0)
4841 return err;
4843 _busy_wq = create_singlethread_workqueue("l2cap");
4844 if (!_busy_wq)
4845 goto error;
4847 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4848 if (err < 0) {
4849 BT_ERR("L2CAP socket registration failed");
4850 goto error;
4853 err = hci_register_proto(&l2cap_hci_proto);
4854 if (err < 0) {
4855 BT_ERR("L2CAP protocol registration failed");
4856 bt_sock_unregister(BTPROTO_L2CAP);
4857 goto error;
4860 if (bt_debugfs) {
4861 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4862 bt_debugfs, NULL, &l2cap_debugfs_fops);
4863 if (!l2cap_debugfs)
4864 BT_ERR("Failed to create L2CAP debug file");
4867 BT_INFO("L2CAP ver %s", VERSION);
4868 BT_INFO("L2CAP socket layer initialized");
4870 return 0;
4872 error:
4873 proto_unregister(&l2cap_proto);
4874 return err;
4877 static void __exit l2cap_exit(void)
4879 debugfs_remove(l2cap_debugfs);
4881 flush_workqueue(_busy_wq);
4882 destroy_workqueue(_busy_wq);
4884 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4885 BT_ERR("L2CAP socket unregistration failed");
4887 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4888 BT_ERR("L2CAP protocol unregistration failed");
4890 proto_unregister(&l2cap_proto);
4893 void l2cap_load(void)
4895 /* Dummy function to trigger automatic L2CAP module loading by
4896 * other modules that use L2CAP sockets but don't use any other
4897 * symbols from it. */
4899 EXPORT_SYMBOL(l2cap_load);
4901 module_init(l2cap_init);
4902 module_exit(l2cap_exit);
4904 module_param(disable_ertm, bool, 0644);
4905 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4907 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4908 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4909 MODULE_VERSION(VERSION);
4910 MODULE_LICENSE("GPL");
4911 MODULE_ALIAS("bt-proto-0");