btrfs: restructure find_free_dev_extent()
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / bluetooth / l2cap.c
blob0b54b7dd84010a52147a54155c4f8db2b61752a7
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core and sockets. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
58 #define VERSION "2.15"
60 static int disable_ertm = 0;
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { 0x02, };
65 static const struct proto_ops l2cap_sock_ops;
67 static struct workqueue_struct *_busy_wq;
69 static struct bt_sock_list l2cap_sk_list = {
70 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
73 static void l2cap_busy_work(struct work_struct *work);
75 static void __l2cap_sock_close(struct sock *sk, int reason);
76 static void l2cap_sock_close(struct sock *sk);
77 static void l2cap_sock_kill(struct sock *sk);
79 static int l2cap_build_conf_req(struct sock *sk, void *data);
80 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
81 u8 code, u8 ident, u16 dlen, void *data);
83 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
85 /* ---- L2CAP timers ---- */
86 static void l2cap_sock_timeout(unsigned long arg)
88 struct sock *sk = (struct sock *) arg;
89 int reason;
91 BT_DBG("sock %p state %d", sk, sk->sk_state);
93 bh_lock_sock(sk);
95 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
96 reason = ECONNREFUSED;
97 else if (sk->sk_state == BT_CONNECT &&
98 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
99 reason = ECONNREFUSED;
100 else
101 reason = ETIMEDOUT;
103 __l2cap_sock_close(sk, reason);
105 bh_unlock_sock(sk);
107 l2cap_sock_kill(sk);
108 sock_put(sk);
111 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
113 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
114 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
117 static void l2cap_sock_clear_timer(struct sock *sk)
119 BT_DBG("sock %p state %d", sk, sk->sk_state);
120 sk_stop_timer(sk, &sk->sk_timer);
123 /* ---- L2CAP channels ---- */
124 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
126 struct sock *s;
127 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
128 if (l2cap_pi(s)->dcid == cid)
129 break;
131 return s;
134 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
136 struct sock *s;
137 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
138 if (l2cap_pi(s)->scid == cid)
139 break;
141 return s;
144 /* Find channel with given SCID.
145 * Returns locked socket */
146 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
148 struct sock *s;
149 read_lock(&l->lock);
150 s = __l2cap_get_chan_by_scid(l, cid);
151 if (s)
152 bh_lock_sock(s);
153 read_unlock(&l->lock);
154 return s;
157 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
159 struct sock *s;
160 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
161 if (l2cap_pi(s)->ident == ident)
162 break;
164 return s;
167 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
169 struct sock *s;
170 read_lock(&l->lock);
171 s = __l2cap_get_chan_by_ident(l, ident);
172 if (s)
173 bh_lock_sock(s);
174 read_unlock(&l->lock);
175 return s;
178 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
180 u16 cid = L2CAP_CID_DYN_START;
182 for (; cid < L2CAP_CID_DYN_END; cid++) {
183 if (!__l2cap_get_chan_by_scid(l, cid))
184 return cid;
187 return 0;
190 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
192 sock_hold(sk);
194 if (l->head)
195 l2cap_pi(l->head)->prev_c = sk;
197 l2cap_pi(sk)->next_c = l->head;
198 l2cap_pi(sk)->prev_c = NULL;
199 l->head = sk;
202 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
204 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
206 write_lock_bh(&l->lock);
207 if (sk == l->head)
208 l->head = next;
210 if (next)
211 l2cap_pi(next)->prev_c = prev;
212 if (prev)
213 l2cap_pi(prev)->next_c = next;
214 write_unlock_bh(&l->lock);
216 __sock_put(sk);
219 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
221 struct l2cap_chan_list *l = &conn->chan_list;
223 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
224 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
226 conn->disc_reason = 0x13;
228 l2cap_pi(sk)->conn = conn;
230 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
231 /* Alloc CID for connection-oriented socket */
232 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
233 } else if (sk->sk_type == SOCK_DGRAM) {
234 /* Connectionless socket */
235 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
236 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
237 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
238 } else {
239 /* Raw socket can send/recv signalling messages only */
240 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
241 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
242 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
245 __l2cap_chan_link(l, sk);
247 if (parent)
248 bt_accept_enqueue(parent, sk);
251 /* Delete channel.
252 * Must be called on the locked socket. */
253 static void l2cap_chan_del(struct sock *sk, int err)
255 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
256 struct sock *parent = bt_sk(sk)->parent;
258 l2cap_sock_clear_timer(sk);
260 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
262 if (conn) {
263 /* Unlink from channel list */
264 l2cap_chan_unlink(&conn->chan_list, sk);
265 l2cap_pi(sk)->conn = NULL;
266 hci_conn_put(conn->hcon);
269 sk->sk_state = BT_CLOSED;
270 sock_set_flag(sk, SOCK_ZAPPED);
272 if (err)
273 sk->sk_err = err;
275 if (parent) {
276 bt_accept_unlink(sk);
277 parent->sk_data_ready(parent, 0);
278 } else
279 sk->sk_state_change(sk);
281 skb_queue_purge(TX_QUEUE(sk));
283 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
284 struct srej_list *l, *tmp;
286 del_timer(&l2cap_pi(sk)->retrans_timer);
287 del_timer(&l2cap_pi(sk)->monitor_timer);
288 del_timer(&l2cap_pi(sk)->ack_timer);
290 skb_queue_purge(SREJ_QUEUE(sk));
291 skb_queue_purge(BUSY_QUEUE(sk));
293 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
294 list_del(&l->list);
295 kfree(l);
300 /* Service level security */
301 static inline int l2cap_check_security(struct sock *sk)
303 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
304 __u8 auth_type;
306 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
307 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
308 auth_type = HCI_AT_NO_BONDING_MITM;
309 else
310 auth_type = HCI_AT_NO_BONDING;
312 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
313 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
314 } else {
315 switch (l2cap_pi(sk)->sec_level) {
316 case BT_SECURITY_HIGH:
317 auth_type = HCI_AT_GENERAL_BONDING_MITM;
318 break;
319 case BT_SECURITY_MEDIUM:
320 auth_type = HCI_AT_GENERAL_BONDING;
321 break;
322 default:
323 auth_type = HCI_AT_NO_BONDING;
324 break;
328 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
329 auth_type);
332 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
334 u8 id;
336 /* Get next available identificator.
337 * 1 - 128 are used by kernel.
338 * 129 - 199 are reserved.
339 * 200 - 254 are used by utilities like l2ping, etc.
342 spin_lock_bh(&conn->lock);
344 if (++conn->tx_ident > 128)
345 conn->tx_ident = 1;
347 id = conn->tx_ident;
349 spin_unlock_bh(&conn->lock);
351 return id;
354 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
356 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
358 BT_DBG("code 0x%2.2x", code);
360 if (!skb)
361 return;
363 hci_send_acl(conn->hcon, skb, 0);
366 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
368 struct sk_buff *skb;
369 struct l2cap_hdr *lh;
370 struct l2cap_conn *conn = pi->conn;
371 struct sock *sk = (struct sock *)pi;
372 int count, hlen = L2CAP_HDR_SIZE + 2;
374 if (sk->sk_state != BT_CONNECTED)
375 return;
377 if (pi->fcs == L2CAP_FCS_CRC16)
378 hlen += 2;
380 BT_DBG("pi %p, control 0x%2.2x", pi, control);
382 count = min_t(unsigned int, conn->mtu, hlen);
383 control |= L2CAP_CTRL_FRAME_TYPE;
385 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
386 control |= L2CAP_CTRL_FINAL;
387 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
390 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
391 control |= L2CAP_CTRL_POLL;
392 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
395 skb = bt_skb_alloc(count, GFP_ATOMIC);
396 if (!skb)
397 return;
399 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
400 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
401 lh->cid = cpu_to_le16(pi->dcid);
402 put_unaligned_le16(control, skb_put(skb, 2));
404 if (pi->fcs == L2CAP_FCS_CRC16) {
405 u16 fcs = crc16(0, (u8 *)lh, count - 2);
406 put_unaligned_le16(fcs, skb_put(skb, 2));
409 hci_send_acl(pi->conn->hcon, skb, 0);
412 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
414 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
415 control |= L2CAP_SUPER_RCV_NOT_READY;
416 pi->conn_state |= L2CAP_CONN_RNR_SENT;
417 } else
418 control |= L2CAP_SUPER_RCV_READY;
420 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
422 l2cap_send_sframe(pi, control);
425 static inline int __l2cap_no_conn_pending(struct sock *sk)
427 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
430 static void l2cap_do_start(struct sock *sk)
432 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
434 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
435 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
436 return;
438 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
439 struct l2cap_conn_req req;
440 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
441 req.psm = l2cap_pi(sk)->psm;
443 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
444 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
446 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
447 L2CAP_CONN_REQ, sizeof(req), &req);
449 } else {
450 struct l2cap_info_req req;
451 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
453 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
454 conn->info_ident = l2cap_get_ident(conn);
456 mod_timer(&conn->info_timer, jiffies +
457 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
459 l2cap_send_cmd(conn, conn->info_ident,
460 L2CAP_INFO_REQ, sizeof(req), &req);
464 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
466 u32 local_feat_mask = l2cap_feat_mask;
467 if (!disable_ertm)
468 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
470 switch (mode) {
471 case L2CAP_MODE_ERTM:
472 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
473 case L2CAP_MODE_STREAMING:
474 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
475 default:
476 return 0x00;
480 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
482 struct l2cap_disconn_req req;
484 if (!conn)
485 return;
487 skb_queue_purge(TX_QUEUE(sk));
489 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
490 del_timer(&l2cap_pi(sk)->retrans_timer);
491 del_timer(&l2cap_pi(sk)->monitor_timer);
492 del_timer(&l2cap_pi(sk)->ack_timer);
495 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
496 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
497 l2cap_send_cmd(conn, l2cap_get_ident(conn),
498 L2CAP_DISCONN_REQ, sizeof(req), &req);
500 sk->sk_state = BT_DISCONN;
501 sk->sk_err = err;
504 /* ---- L2CAP connections ---- */
505 static void l2cap_conn_start(struct l2cap_conn *conn)
507 struct l2cap_chan_list *l = &conn->chan_list;
508 struct sock_del_list del, *tmp1, *tmp2;
509 struct sock *sk;
511 BT_DBG("conn %p", conn);
513 INIT_LIST_HEAD(&del.list);
515 read_lock(&l->lock);
517 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
518 bh_lock_sock(sk);
520 if (sk->sk_type != SOCK_SEQPACKET &&
521 sk->sk_type != SOCK_STREAM) {
522 bh_unlock_sock(sk);
523 continue;
526 if (sk->sk_state == BT_CONNECT) {
527 struct l2cap_conn_req req;
529 if (!l2cap_check_security(sk) ||
530 !__l2cap_no_conn_pending(sk)) {
531 bh_unlock_sock(sk);
532 continue;
535 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
536 conn->feat_mask)
537 && l2cap_pi(sk)->conf_state &
538 L2CAP_CONF_STATE2_DEVICE) {
539 tmp1 = kzalloc(sizeof(struct sock_del_list),
540 GFP_ATOMIC);
541 tmp1->sk = sk;
542 list_add_tail(&tmp1->list, &del.list);
543 bh_unlock_sock(sk);
544 continue;
547 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
548 req.psm = l2cap_pi(sk)->psm;
550 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
551 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
553 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
554 L2CAP_CONN_REQ, sizeof(req), &req);
556 } else if (sk->sk_state == BT_CONNECT2) {
557 struct l2cap_conn_rsp rsp;
558 char buf[128];
559 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
560 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
562 if (l2cap_check_security(sk)) {
563 if (bt_sk(sk)->defer_setup) {
564 struct sock *parent = bt_sk(sk)->parent;
565 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
566 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
567 parent->sk_data_ready(parent, 0);
569 } else {
570 sk->sk_state = BT_CONFIG;
571 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
572 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
574 } else {
575 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
576 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
579 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
580 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
582 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
583 rsp.result != L2CAP_CR_SUCCESS) {
584 bh_unlock_sock(sk);
585 continue;
588 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
589 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
590 l2cap_build_conf_req(sk, buf), buf);
591 l2cap_pi(sk)->num_conf_req++;
594 bh_unlock_sock(sk);
597 read_unlock(&l->lock);
599 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
600 bh_lock_sock(tmp1->sk);
601 __l2cap_sock_close(tmp1->sk, ECONNRESET);
602 bh_unlock_sock(tmp1->sk);
603 list_del(&tmp1->list);
604 kfree(tmp1);
608 static void l2cap_conn_ready(struct l2cap_conn *conn)
610 struct l2cap_chan_list *l = &conn->chan_list;
611 struct sock *sk;
613 BT_DBG("conn %p", conn);
615 read_lock(&l->lock);
617 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
618 bh_lock_sock(sk);
620 if (sk->sk_type != SOCK_SEQPACKET &&
621 sk->sk_type != SOCK_STREAM) {
622 l2cap_sock_clear_timer(sk);
623 sk->sk_state = BT_CONNECTED;
624 sk->sk_state_change(sk);
625 } else if (sk->sk_state == BT_CONNECT)
626 l2cap_do_start(sk);
628 bh_unlock_sock(sk);
631 read_unlock(&l->lock);
634 /* Notify sockets that we cannot guaranty reliability anymore */
635 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
637 struct l2cap_chan_list *l = &conn->chan_list;
638 struct sock *sk;
640 BT_DBG("conn %p", conn);
642 read_lock(&l->lock);
644 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
645 if (l2cap_pi(sk)->force_reliable)
646 sk->sk_err = err;
649 read_unlock(&l->lock);
652 static void l2cap_info_timeout(unsigned long arg)
654 struct l2cap_conn *conn = (void *) arg;
656 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
657 conn->info_ident = 0;
659 l2cap_conn_start(conn);
662 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
664 struct l2cap_conn *conn = hcon->l2cap_data;
666 if (conn || status)
667 return conn;
669 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
670 if (!conn)
671 return NULL;
673 hcon->l2cap_data = conn;
674 conn->hcon = hcon;
676 BT_DBG("hcon %p conn %p", hcon, conn);
678 conn->mtu = hcon->hdev->acl_mtu;
679 conn->src = &hcon->hdev->bdaddr;
680 conn->dst = &hcon->dst;
682 conn->feat_mask = 0;
684 spin_lock_init(&conn->lock);
685 rwlock_init(&conn->chan_list.lock);
687 setup_timer(&conn->info_timer, l2cap_info_timeout,
688 (unsigned long) conn);
690 conn->disc_reason = 0x13;
692 return conn;
695 static void l2cap_conn_del(struct hci_conn *hcon, int err)
697 struct l2cap_conn *conn = hcon->l2cap_data;
698 struct sock *sk;
700 if (!conn)
701 return;
703 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
705 kfree_skb(conn->rx_skb);
707 /* Kill channels */
708 while ((sk = conn->chan_list.head)) {
709 bh_lock_sock(sk);
710 l2cap_chan_del(sk, err);
711 bh_unlock_sock(sk);
712 l2cap_sock_kill(sk);
715 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
716 del_timer_sync(&conn->info_timer);
718 hcon->l2cap_data = NULL;
719 kfree(conn);
722 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
724 struct l2cap_chan_list *l = &conn->chan_list;
725 write_lock_bh(&l->lock);
726 __l2cap_chan_add(conn, sk, parent);
727 write_unlock_bh(&l->lock);
730 /* ---- Socket interface ---- */
731 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
733 struct sock *sk;
734 struct hlist_node *node;
735 sk_for_each(sk, node, &l2cap_sk_list.head)
736 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
737 goto found;
738 sk = NULL;
739 found:
740 return sk;
743 /* Find socket with psm and source bdaddr.
744 * Returns closest match.
746 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
748 struct sock *sk = NULL, *sk1 = NULL;
749 struct hlist_node *node;
751 sk_for_each(sk, node, &l2cap_sk_list.head) {
752 if (state && sk->sk_state != state)
753 continue;
755 if (l2cap_pi(sk)->psm == psm) {
756 /* Exact match. */
757 if (!bacmp(&bt_sk(sk)->src, src))
758 break;
760 /* Closest match */
761 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
762 sk1 = sk;
765 return node ? sk : sk1;
768 /* Find socket with given address (psm, src).
769 * Returns locked socket */
770 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
772 struct sock *s;
773 read_lock(&l2cap_sk_list.lock);
774 s = __l2cap_get_sock_by_psm(state, psm, src);
775 if (s)
776 bh_lock_sock(s);
777 read_unlock(&l2cap_sk_list.lock);
778 return s;
781 static void l2cap_sock_destruct(struct sock *sk)
783 BT_DBG("sk %p", sk);
785 skb_queue_purge(&sk->sk_receive_queue);
786 skb_queue_purge(&sk->sk_write_queue);
789 static void l2cap_sock_cleanup_listen(struct sock *parent)
791 struct sock *sk;
793 BT_DBG("parent %p", parent);
795 /* Close not yet accepted channels */
796 while ((sk = bt_accept_dequeue(parent, NULL)))
797 l2cap_sock_close(sk);
799 parent->sk_state = BT_CLOSED;
800 sock_set_flag(parent, SOCK_ZAPPED);
803 /* Kill socket (only if zapped and orphan)
804 * Must be called on unlocked socket.
806 static void l2cap_sock_kill(struct sock *sk)
808 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
809 return;
811 BT_DBG("sk %p state %d", sk, sk->sk_state);
813 /* Kill poor orphan */
814 bt_sock_unlink(&l2cap_sk_list, sk);
815 sock_set_flag(sk, SOCK_DEAD);
816 sock_put(sk);
819 static void __l2cap_sock_close(struct sock *sk, int reason)
821 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
823 switch (sk->sk_state) {
824 case BT_LISTEN:
825 l2cap_sock_cleanup_listen(sk);
826 break;
828 case BT_CONNECTED:
829 case BT_CONFIG:
830 if (sk->sk_type == SOCK_SEQPACKET ||
831 sk->sk_type == SOCK_STREAM) {
832 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
834 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
835 l2cap_send_disconn_req(conn, sk, reason);
836 } else
837 l2cap_chan_del(sk, reason);
838 break;
840 case BT_CONNECT2:
841 if (sk->sk_type == SOCK_SEQPACKET ||
842 sk->sk_type == SOCK_STREAM) {
843 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
844 struct l2cap_conn_rsp rsp;
845 __u16 result;
847 if (bt_sk(sk)->defer_setup)
848 result = L2CAP_CR_SEC_BLOCK;
849 else
850 result = L2CAP_CR_BAD_PSM;
852 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
853 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
854 rsp.result = cpu_to_le16(result);
855 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
856 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
857 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
858 } else
859 l2cap_chan_del(sk, reason);
860 break;
862 case BT_CONNECT:
863 case BT_DISCONN:
864 l2cap_chan_del(sk, reason);
865 break;
867 default:
868 sock_set_flag(sk, SOCK_ZAPPED);
869 break;
873 /* Must be called on unlocked socket. */
874 static void l2cap_sock_close(struct sock *sk)
876 l2cap_sock_clear_timer(sk);
877 lock_sock(sk);
878 __l2cap_sock_close(sk, ECONNRESET);
879 release_sock(sk);
880 l2cap_sock_kill(sk);
883 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
885 struct l2cap_pinfo *pi = l2cap_pi(sk);
887 BT_DBG("sk %p", sk);
889 if (parent) {
890 sk->sk_type = parent->sk_type;
891 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
893 pi->imtu = l2cap_pi(parent)->imtu;
894 pi->omtu = l2cap_pi(parent)->omtu;
895 pi->conf_state = l2cap_pi(parent)->conf_state;
896 pi->mode = l2cap_pi(parent)->mode;
897 pi->fcs = l2cap_pi(parent)->fcs;
898 pi->max_tx = l2cap_pi(parent)->max_tx;
899 pi->tx_win = l2cap_pi(parent)->tx_win;
900 pi->sec_level = l2cap_pi(parent)->sec_level;
901 pi->role_switch = l2cap_pi(parent)->role_switch;
902 pi->force_reliable = l2cap_pi(parent)->force_reliable;
903 } else {
904 pi->imtu = L2CAP_DEFAULT_MTU;
905 pi->omtu = 0;
906 if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
907 pi->mode = L2CAP_MODE_ERTM;
908 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
909 } else {
910 pi->mode = L2CAP_MODE_BASIC;
912 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
913 pi->fcs = L2CAP_FCS_CRC16;
914 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
915 pi->sec_level = BT_SECURITY_LOW;
916 pi->role_switch = 0;
917 pi->force_reliable = 0;
920 /* Default config options */
921 pi->conf_len = 0;
922 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
923 skb_queue_head_init(TX_QUEUE(sk));
924 skb_queue_head_init(SREJ_QUEUE(sk));
925 skb_queue_head_init(BUSY_QUEUE(sk));
926 INIT_LIST_HEAD(SREJ_LIST(sk));
929 static struct proto l2cap_proto = {
930 .name = "L2CAP",
931 .owner = THIS_MODULE,
932 .obj_size = sizeof(struct l2cap_pinfo)
935 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
937 struct sock *sk;
939 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
940 if (!sk)
941 return NULL;
943 sock_init_data(sock, sk);
944 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
946 sk->sk_destruct = l2cap_sock_destruct;
947 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
949 sock_reset_flag(sk, SOCK_ZAPPED);
951 sk->sk_protocol = proto;
952 sk->sk_state = BT_OPEN;
954 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
956 bt_sock_link(&l2cap_sk_list, sk);
957 return sk;
960 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
961 int kern)
963 struct sock *sk;
965 BT_DBG("sock %p", sock);
967 sock->state = SS_UNCONNECTED;
969 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
970 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
971 return -ESOCKTNOSUPPORT;
973 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
974 return -EPERM;
976 sock->ops = &l2cap_sock_ops;
978 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
979 if (!sk)
980 return -ENOMEM;
982 l2cap_sock_init(sk, NULL);
983 return 0;
986 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
988 struct sock *sk = sock->sk;
989 struct sockaddr_l2 la;
990 int len, err = 0;
992 BT_DBG("sk %p", sk);
994 if (!addr || addr->sa_family != AF_BLUETOOTH)
995 return -EINVAL;
997 memset(&la, 0, sizeof(la));
998 len = min_t(unsigned int, sizeof(la), alen);
999 memcpy(&la, addr, len);
1001 if (la.l2_cid)
1002 return -EINVAL;
1004 lock_sock(sk);
1006 if (sk->sk_state != BT_OPEN) {
1007 err = -EBADFD;
1008 goto done;
1011 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
1012 !capable(CAP_NET_BIND_SERVICE)) {
1013 err = -EACCES;
1014 goto done;
1017 write_lock_bh(&l2cap_sk_list.lock);
1019 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
1020 err = -EADDRINUSE;
1021 } else {
1022 /* Save source address */
1023 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
1024 l2cap_pi(sk)->psm = la.l2_psm;
1025 l2cap_pi(sk)->sport = la.l2_psm;
1026 sk->sk_state = BT_BOUND;
1028 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
1029 __le16_to_cpu(la.l2_psm) == 0x0003)
1030 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1033 write_unlock_bh(&l2cap_sk_list.lock);
1035 done:
1036 release_sock(sk);
1037 return err;
1040 static int l2cap_do_connect(struct sock *sk)
1042 bdaddr_t *src = &bt_sk(sk)->src;
1043 bdaddr_t *dst = &bt_sk(sk)->dst;
1044 struct l2cap_conn *conn;
1045 struct hci_conn *hcon;
1046 struct hci_dev *hdev;
1047 __u8 auth_type;
1048 int err;
1050 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1051 l2cap_pi(sk)->psm);
1053 hdev = hci_get_route(dst, src);
1054 if (!hdev)
1055 return -EHOSTUNREACH;
1057 hci_dev_lock_bh(hdev);
1059 err = -ENOMEM;
1061 if (sk->sk_type == SOCK_RAW) {
1062 switch (l2cap_pi(sk)->sec_level) {
1063 case BT_SECURITY_HIGH:
1064 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1065 break;
1066 case BT_SECURITY_MEDIUM:
1067 auth_type = HCI_AT_DEDICATED_BONDING;
1068 break;
1069 default:
1070 auth_type = HCI_AT_NO_BONDING;
1071 break;
1073 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
1074 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
1075 auth_type = HCI_AT_NO_BONDING_MITM;
1076 else
1077 auth_type = HCI_AT_NO_BONDING;
1079 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
1080 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1081 } else {
1082 switch (l2cap_pi(sk)->sec_level) {
1083 case BT_SECURITY_HIGH:
1084 auth_type = HCI_AT_GENERAL_BONDING_MITM;
1085 break;
1086 case BT_SECURITY_MEDIUM:
1087 auth_type = HCI_AT_GENERAL_BONDING;
1088 break;
1089 default:
1090 auth_type = HCI_AT_NO_BONDING;
1091 break;
1095 hcon = hci_connect(hdev, ACL_LINK, dst,
1096 l2cap_pi(sk)->sec_level, auth_type);
1097 if (!hcon)
1098 goto done;
1100 conn = l2cap_conn_add(hcon, 0);
1101 if (!conn) {
1102 hci_conn_put(hcon);
1103 goto done;
1106 err = 0;
1108 /* Update source addr of the socket */
1109 bacpy(src, conn->src);
1111 l2cap_chan_add(conn, sk, NULL);
1113 sk->sk_state = BT_CONNECT;
1114 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1116 if (hcon->state == BT_CONNECTED) {
1117 if (sk->sk_type != SOCK_SEQPACKET &&
1118 sk->sk_type != SOCK_STREAM) {
1119 l2cap_sock_clear_timer(sk);
1120 sk->sk_state = BT_CONNECTED;
1121 } else
1122 l2cap_do_start(sk);
1125 done:
1126 hci_dev_unlock_bh(hdev);
1127 hci_dev_put(hdev);
1128 return err;
1131 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1133 struct sock *sk = sock->sk;
1134 struct sockaddr_l2 la;
1135 int len, err = 0;
1137 BT_DBG("sk %p", sk);
1139 if (!addr || alen < sizeof(addr->sa_family) ||
1140 addr->sa_family != AF_BLUETOOTH)
1141 return -EINVAL;
1143 memset(&la, 0, sizeof(la));
1144 len = min_t(unsigned int, sizeof(la), alen);
1145 memcpy(&la, addr, len);
1147 if (la.l2_cid)
1148 return -EINVAL;
1150 lock_sock(sk);
1152 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1153 && !la.l2_psm) {
1154 err = -EINVAL;
1155 goto done;
1158 switch (l2cap_pi(sk)->mode) {
1159 case L2CAP_MODE_BASIC:
1160 break;
1161 case L2CAP_MODE_ERTM:
1162 case L2CAP_MODE_STREAMING:
1163 if (!disable_ertm)
1164 break;
1165 /* fall through */
1166 default:
1167 err = -ENOTSUPP;
1168 goto done;
1171 switch (sk->sk_state) {
1172 case BT_CONNECT:
1173 case BT_CONNECT2:
1174 case BT_CONFIG:
1175 /* Already connecting */
1176 goto wait;
1178 case BT_CONNECTED:
1179 /* Already connected */
1180 err = -EISCONN;
1181 goto done;
1183 case BT_OPEN:
1184 case BT_BOUND:
1185 /* Can connect */
1186 break;
1188 default:
1189 err = -EBADFD;
1190 goto done;
1193 /* Set destination address and psm */
1194 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1195 l2cap_pi(sk)->psm = la.l2_psm;
1197 err = l2cap_do_connect(sk);
1198 if (err)
1199 goto done;
1201 wait:
1202 err = bt_sock_wait_state(sk, BT_CONNECTED,
1203 sock_sndtimeo(sk, flags & O_NONBLOCK));
1204 done:
1205 release_sock(sk);
1206 return err;
1209 static int l2cap_sock_listen(struct socket *sock, int backlog)
1211 struct sock *sk = sock->sk;
1212 int err = 0;
1214 BT_DBG("sk %p backlog %d", sk, backlog);
1216 lock_sock(sk);
1218 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1219 || sk->sk_state != BT_BOUND) {
1220 err = -EBADFD;
1221 goto done;
1224 switch (l2cap_pi(sk)->mode) {
1225 case L2CAP_MODE_BASIC:
1226 break;
1227 case L2CAP_MODE_ERTM:
1228 case L2CAP_MODE_STREAMING:
1229 if (!disable_ertm)
1230 break;
1231 /* fall through */
1232 default:
1233 err = -ENOTSUPP;
1234 goto done;
1237 if (!l2cap_pi(sk)->psm) {
1238 bdaddr_t *src = &bt_sk(sk)->src;
1239 u16 psm;
1241 err = -EINVAL;
1243 write_lock_bh(&l2cap_sk_list.lock);
1245 for (psm = 0x1001; psm < 0x1100; psm += 2)
1246 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1247 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1248 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1249 err = 0;
1250 break;
1253 write_unlock_bh(&l2cap_sk_list.lock);
1255 if (err < 0)
1256 goto done;
1259 sk->sk_max_ack_backlog = backlog;
1260 sk->sk_ack_backlog = 0;
1261 sk->sk_state = BT_LISTEN;
1263 done:
1264 release_sock(sk);
1265 return err;
1268 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1270 DECLARE_WAITQUEUE(wait, current);
1271 struct sock *sk = sock->sk, *nsk;
1272 long timeo;
1273 int err = 0;
1275 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1277 if (sk->sk_state != BT_LISTEN) {
1278 err = -EBADFD;
1279 goto done;
1282 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1284 BT_DBG("sk %p timeo %ld", sk, timeo);
1286 /* Wait for an incoming connection. (wake-one). */
1287 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1288 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1289 set_current_state(TASK_INTERRUPTIBLE);
1290 if (!timeo) {
1291 err = -EAGAIN;
1292 break;
1295 release_sock(sk);
1296 timeo = schedule_timeout(timeo);
1297 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1299 if (sk->sk_state != BT_LISTEN) {
1300 err = -EBADFD;
1301 break;
1304 if (signal_pending(current)) {
1305 err = sock_intr_errno(timeo);
1306 break;
1309 set_current_state(TASK_RUNNING);
1310 remove_wait_queue(sk_sleep(sk), &wait);
1312 if (err)
1313 goto done;
1315 newsock->state = SS_CONNECTED;
1317 BT_DBG("new socket %p", nsk);
1319 done:
1320 release_sock(sk);
1321 return err;
1324 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1326 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1327 struct sock *sk = sock->sk;
1329 BT_DBG("sock %p, sk %p", sock, sk);
1331 addr->sa_family = AF_BLUETOOTH;
1332 *len = sizeof(struct sockaddr_l2);
1334 if (peer) {
1335 la->l2_psm = l2cap_pi(sk)->psm;
1336 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1337 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1338 } else {
1339 la->l2_psm = l2cap_pi(sk)->sport;
1340 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1341 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1344 return 0;
1347 static int __l2cap_wait_ack(struct sock *sk)
1349 DECLARE_WAITQUEUE(wait, current);
1350 int err = 0;
1351 int timeo = HZ/5;
1353 add_wait_queue(sk_sleep(sk), &wait);
1354 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1355 set_current_state(TASK_INTERRUPTIBLE);
1357 if (!timeo)
1358 timeo = HZ/5;
1360 if (signal_pending(current)) {
1361 err = sock_intr_errno(timeo);
1362 break;
1365 release_sock(sk);
1366 timeo = schedule_timeout(timeo);
1367 lock_sock(sk);
1369 err = sock_error(sk);
1370 if (err)
1371 break;
1373 set_current_state(TASK_RUNNING);
1374 remove_wait_queue(sk_sleep(sk), &wait);
1375 return err;
1378 static void l2cap_monitor_timeout(unsigned long arg)
1380 struct sock *sk = (void *) arg;
1382 BT_DBG("sk %p", sk);
1384 bh_lock_sock(sk);
1385 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1386 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
1387 bh_unlock_sock(sk);
1388 return;
1391 l2cap_pi(sk)->retry_count++;
1392 __mod_monitor_timer();
1394 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1395 bh_unlock_sock(sk);
1398 static void l2cap_retrans_timeout(unsigned long arg)
1400 struct sock *sk = (void *) arg;
1402 BT_DBG("sk %p", sk);
1404 bh_lock_sock(sk);
1405 l2cap_pi(sk)->retry_count = 1;
1406 __mod_monitor_timer();
1408 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1410 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1411 bh_unlock_sock(sk);
1414 static void l2cap_drop_acked_frames(struct sock *sk)
1416 struct sk_buff *skb;
1418 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1419 l2cap_pi(sk)->unacked_frames) {
1420 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1421 break;
1423 skb = skb_dequeue(TX_QUEUE(sk));
1424 kfree_skb(skb);
1426 l2cap_pi(sk)->unacked_frames--;
1429 if (!l2cap_pi(sk)->unacked_frames)
1430 del_timer(&l2cap_pi(sk)->retrans_timer);
1433 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1435 struct l2cap_pinfo *pi = l2cap_pi(sk);
1437 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1439 hci_send_acl(pi->conn->hcon, skb, 0);
1442 static void l2cap_streaming_send(struct sock *sk)
1444 struct sk_buff *skb;
1445 struct l2cap_pinfo *pi = l2cap_pi(sk);
1446 u16 control, fcs;
1448 while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
1449 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1450 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1451 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1453 if (pi->fcs == L2CAP_FCS_CRC16) {
1454 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1455 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1458 l2cap_do_send(sk, skb);
1460 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1464 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1466 struct l2cap_pinfo *pi = l2cap_pi(sk);
1467 struct sk_buff *skb, *tx_skb;
1468 u16 control, fcs;
1470 skb = skb_peek(TX_QUEUE(sk));
1471 if (!skb)
1472 return;
1474 do {
1475 if (bt_cb(skb)->tx_seq == tx_seq)
1476 break;
1478 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1479 return;
1481 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1483 if (pi->remote_max_tx &&
1484 bt_cb(skb)->retries == pi->remote_max_tx) {
1485 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1486 return;
1489 tx_skb = skb_clone(skb, GFP_ATOMIC);
1490 bt_cb(skb)->retries++;
1491 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1493 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1494 control |= L2CAP_CTRL_FINAL;
1495 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1498 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1499 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1501 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1503 if (pi->fcs == L2CAP_FCS_CRC16) {
1504 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1505 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1508 l2cap_do_send(sk, tx_skb);
1511 static int l2cap_ertm_send(struct sock *sk)
1513 struct sk_buff *skb, *tx_skb;
1514 struct l2cap_pinfo *pi = l2cap_pi(sk);
1515 u16 control, fcs;
1516 int nsent = 0;
1518 if (sk->sk_state != BT_CONNECTED)
1519 return -ENOTCONN;
1521 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1523 if (pi->remote_max_tx &&
1524 bt_cb(skb)->retries == pi->remote_max_tx) {
1525 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1526 break;
1529 tx_skb = skb_clone(skb, GFP_ATOMIC);
1531 bt_cb(skb)->retries++;
1533 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1534 control &= L2CAP_CTRL_SAR;
1536 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1537 control |= L2CAP_CTRL_FINAL;
1538 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1540 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1541 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1542 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1545 if (pi->fcs == L2CAP_FCS_CRC16) {
1546 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1547 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1550 l2cap_do_send(sk, tx_skb);
1552 __mod_retrans_timer();
1554 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1555 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1557 pi->unacked_frames++;
1558 pi->frames_sent++;
1560 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1561 sk->sk_send_head = NULL;
1562 else
1563 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1565 nsent++;
1568 return nsent;
1571 static int l2cap_retransmit_frames(struct sock *sk)
1573 struct l2cap_pinfo *pi = l2cap_pi(sk);
1574 int ret;
1576 if (!skb_queue_empty(TX_QUEUE(sk)))
1577 sk->sk_send_head = TX_QUEUE(sk)->next;
1579 pi->next_tx_seq = pi->expected_ack_seq;
1580 ret = l2cap_ertm_send(sk);
1581 return ret;
1584 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1586 struct sock *sk = (struct sock *)pi;
1587 u16 control = 0;
1589 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1591 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1592 control |= L2CAP_SUPER_RCV_NOT_READY;
1593 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1594 l2cap_send_sframe(pi, control);
1595 return;
1598 if (l2cap_ertm_send(sk) > 0)
1599 return;
1601 control |= L2CAP_SUPER_RCV_READY;
1602 l2cap_send_sframe(pi, control);
1605 static void l2cap_send_srejtail(struct sock *sk)
1607 struct srej_list *tail;
1608 u16 control;
1610 control = L2CAP_SUPER_SELECT_REJECT;
1611 control |= L2CAP_CTRL_FINAL;
1613 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1614 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1616 l2cap_send_sframe(l2cap_pi(sk), control);
1619 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1621 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1622 struct sk_buff **frag;
1623 int err, sent = 0;
1625 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1626 return -EFAULT;
1628 sent += count;
1629 len -= count;
1631 /* Continuation fragments (no L2CAP header) */
1632 frag = &skb_shinfo(skb)->frag_list;
1633 while (len) {
1634 count = min_t(unsigned int, conn->mtu, len);
1636 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1637 if (!*frag)
1638 return -EFAULT;
1639 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1640 return -EFAULT;
1642 sent += count;
1643 len -= count;
1645 frag = &(*frag)->next;
1648 return sent;
1651 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1653 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1654 struct sk_buff *skb;
1655 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1656 struct l2cap_hdr *lh;
1658 BT_DBG("sk %p len %d", sk, (int)len);
1660 count = min_t(unsigned int, (conn->mtu - hlen), len);
1661 skb = bt_skb_send_alloc(sk, count + hlen,
1662 msg->msg_flags & MSG_DONTWAIT, &err);
1663 if (!skb)
1664 return ERR_PTR(-ENOMEM);
1666 /* Create L2CAP header */
1667 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1668 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1669 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1670 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1672 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1673 if (unlikely(err < 0)) {
1674 kfree_skb(skb);
1675 return ERR_PTR(err);
1677 return skb;
1680 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1682 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1683 struct sk_buff *skb;
1684 int err, count, hlen = L2CAP_HDR_SIZE;
1685 struct l2cap_hdr *lh;
1687 BT_DBG("sk %p len %d", sk, (int)len);
1689 count = min_t(unsigned int, (conn->mtu - hlen), len);
1690 skb = bt_skb_send_alloc(sk, count + hlen,
1691 msg->msg_flags & MSG_DONTWAIT, &err);
1692 if (!skb)
1693 return ERR_PTR(-ENOMEM);
1695 /* Create L2CAP header */
1696 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1697 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1698 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1700 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1701 if (unlikely(err < 0)) {
1702 kfree_skb(skb);
1703 return ERR_PTR(err);
1705 return skb;
1708 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1710 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1711 struct sk_buff *skb;
1712 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1713 struct l2cap_hdr *lh;
1715 BT_DBG("sk %p len %d", sk, (int)len);
1717 if (!conn)
1718 return ERR_PTR(-ENOTCONN);
1720 if (sdulen)
1721 hlen += 2;
1723 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1724 hlen += 2;
1726 count = min_t(unsigned int, (conn->mtu - hlen), len);
1727 skb = bt_skb_send_alloc(sk, count + hlen,
1728 msg->msg_flags & MSG_DONTWAIT, &err);
1729 if (!skb)
1730 return ERR_PTR(-ENOMEM);
1732 /* Create L2CAP header */
1733 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1734 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1735 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1736 put_unaligned_le16(control, skb_put(skb, 2));
1737 if (sdulen)
1738 put_unaligned_le16(sdulen, skb_put(skb, 2));
1740 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1741 if (unlikely(err < 0)) {
1742 kfree_skb(skb);
1743 return ERR_PTR(err);
1746 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1747 put_unaligned_le16(0, skb_put(skb, 2));
1749 bt_cb(skb)->retries = 0;
1750 return skb;
1753 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1755 struct l2cap_pinfo *pi = l2cap_pi(sk);
1756 struct sk_buff *skb;
1757 struct sk_buff_head sar_queue;
1758 u16 control;
1759 size_t size = 0;
1761 skb_queue_head_init(&sar_queue);
1762 control = L2CAP_SDU_START;
1763 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1764 if (IS_ERR(skb))
1765 return PTR_ERR(skb);
1767 __skb_queue_tail(&sar_queue, skb);
1768 len -= pi->remote_mps;
1769 size += pi->remote_mps;
1771 while (len > 0) {
1772 size_t buflen;
1774 if (len > pi->remote_mps) {
1775 control = L2CAP_SDU_CONTINUE;
1776 buflen = pi->remote_mps;
1777 } else {
1778 control = L2CAP_SDU_END;
1779 buflen = len;
1782 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1783 if (IS_ERR(skb)) {
1784 skb_queue_purge(&sar_queue);
1785 return PTR_ERR(skb);
1788 __skb_queue_tail(&sar_queue, skb);
1789 len -= buflen;
1790 size += buflen;
1792 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1793 if (sk->sk_send_head == NULL)
1794 sk->sk_send_head = sar_queue.next;
1796 return size;
1799 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1801 struct sock *sk = sock->sk;
1802 struct l2cap_pinfo *pi = l2cap_pi(sk);
1803 struct sk_buff *skb;
1804 u16 control;
1805 int err;
1807 BT_DBG("sock %p, sk %p", sock, sk);
1809 err = sock_error(sk);
1810 if (err)
1811 return err;
1813 if (msg->msg_flags & MSG_OOB)
1814 return -EOPNOTSUPP;
1816 lock_sock(sk);
1818 if (sk->sk_state != BT_CONNECTED) {
1819 err = -ENOTCONN;
1820 goto done;
1823 /* Connectionless channel */
1824 if (sk->sk_type == SOCK_DGRAM) {
1825 skb = l2cap_create_connless_pdu(sk, msg, len);
1826 if (IS_ERR(skb)) {
1827 err = PTR_ERR(skb);
1828 } else {
1829 l2cap_do_send(sk, skb);
1830 err = len;
1832 goto done;
1835 switch (pi->mode) {
1836 case L2CAP_MODE_BASIC:
1837 /* Check outgoing MTU */
1838 if (len > pi->omtu) {
1839 err = -EMSGSIZE;
1840 goto done;
1843 /* Create a basic PDU */
1844 skb = l2cap_create_basic_pdu(sk, msg, len);
1845 if (IS_ERR(skb)) {
1846 err = PTR_ERR(skb);
1847 goto done;
1850 l2cap_do_send(sk, skb);
1851 err = len;
1852 break;
1854 case L2CAP_MODE_ERTM:
1855 case L2CAP_MODE_STREAMING:
1856 /* Entire SDU fits into one PDU */
1857 if (len <= pi->remote_mps) {
1858 control = L2CAP_SDU_UNSEGMENTED;
1859 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1860 if (IS_ERR(skb)) {
1861 err = PTR_ERR(skb);
1862 goto done;
1864 __skb_queue_tail(TX_QUEUE(sk), skb);
1866 if (sk->sk_send_head == NULL)
1867 sk->sk_send_head = skb;
1869 } else {
1870 /* Segment SDU into multiples PDUs */
1871 err = l2cap_sar_segment_sdu(sk, msg, len);
1872 if (err < 0)
1873 goto done;
1876 if (pi->mode == L2CAP_MODE_STREAMING) {
1877 l2cap_streaming_send(sk);
1878 } else {
1879 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY &&
1880 pi->conn_state && L2CAP_CONN_WAIT_F) {
1881 err = len;
1882 break;
1884 err = l2cap_ertm_send(sk);
1887 if (err >= 0)
1888 err = len;
1889 break;
1891 default:
1892 BT_DBG("bad state %1.1x", pi->mode);
1893 err = -EBADFD;
1896 done:
1897 release_sock(sk);
1898 return err;
1901 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1903 struct sock *sk = sock->sk;
1905 lock_sock(sk);
1907 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1908 struct l2cap_conn_rsp rsp;
1909 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1910 u8 buf[128];
1912 sk->sk_state = BT_CONFIG;
1914 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1915 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1916 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1917 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1918 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1919 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1921 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
1922 release_sock(sk);
1923 return 0;
1926 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1927 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1928 l2cap_build_conf_req(sk, buf), buf);
1929 l2cap_pi(sk)->num_conf_req++;
1931 release_sock(sk);
1932 return 0;
1935 release_sock(sk);
1937 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1940 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1942 struct sock *sk = sock->sk;
1943 struct l2cap_options opts;
1944 int len, err = 0;
1945 u32 opt;
1947 BT_DBG("sk %p", sk);
1949 lock_sock(sk);
1951 switch (optname) {
1952 case L2CAP_OPTIONS:
1953 if (sk->sk_state == BT_CONNECTED) {
1954 err = -EINVAL;
1955 break;
1958 opts.imtu = l2cap_pi(sk)->imtu;
1959 opts.omtu = l2cap_pi(sk)->omtu;
1960 opts.flush_to = l2cap_pi(sk)->flush_to;
1961 opts.mode = l2cap_pi(sk)->mode;
1962 opts.fcs = l2cap_pi(sk)->fcs;
1963 opts.max_tx = l2cap_pi(sk)->max_tx;
1964 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1966 len = min_t(unsigned int, sizeof(opts), optlen);
1967 if (copy_from_user((char *) &opts, optval, len)) {
1968 err = -EFAULT;
1969 break;
1972 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
1973 err = -EINVAL;
1974 break;
1977 l2cap_pi(sk)->mode = opts.mode;
1978 switch (l2cap_pi(sk)->mode) {
1979 case L2CAP_MODE_BASIC:
1980 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
1981 break;
1982 case L2CAP_MODE_ERTM:
1983 case L2CAP_MODE_STREAMING:
1984 if (!disable_ertm)
1985 break;
1986 /* fall through */
1987 default:
1988 err = -EINVAL;
1989 break;
1992 l2cap_pi(sk)->imtu = opts.imtu;
1993 l2cap_pi(sk)->omtu = opts.omtu;
1994 l2cap_pi(sk)->fcs = opts.fcs;
1995 l2cap_pi(sk)->max_tx = opts.max_tx;
1996 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1997 break;
1999 case L2CAP_LM:
2000 if (get_user(opt, (u32 __user *) optval)) {
2001 err = -EFAULT;
2002 break;
2005 if (opt & L2CAP_LM_AUTH)
2006 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
2007 if (opt & L2CAP_LM_ENCRYPT)
2008 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
2009 if (opt & L2CAP_LM_SECURE)
2010 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
2012 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
2013 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
2014 break;
2016 default:
2017 err = -ENOPROTOOPT;
2018 break;
2021 release_sock(sk);
2022 return err;
2025 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
2027 struct sock *sk = sock->sk;
2028 struct bt_security sec;
2029 int len, err = 0;
2030 u32 opt;
2032 BT_DBG("sk %p", sk);
2034 if (level == SOL_L2CAP)
2035 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
2037 if (level != SOL_BLUETOOTH)
2038 return -ENOPROTOOPT;
2040 lock_sock(sk);
2042 switch (optname) {
2043 case BT_SECURITY:
2044 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2045 && sk->sk_type != SOCK_RAW) {
2046 err = -EINVAL;
2047 break;
2050 sec.level = BT_SECURITY_LOW;
2052 len = min_t(unsigned int, sizeof(sec), optlen);
2053 if (copy_from_user((char *) &sec, optval, len)) {
2054 err = -EFAULT;
2055 break;
2058 if (sec.level < BT_SECURITY_LOW ||
2059 sec.level > BT_SECURITY_HIGH) {
2060 err = -EINVAL;
2061 break;
2064 l2cap_pi(sk)->sec_level = sec.level;
2065 break;
2067 case BT_DEFER_SETUP:
2068 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2069 err = -EINVAL;
2070 break;
2073 if (get_user(opt, (u32 __user *) optval)) {
2074 err = -EFAULT;
2075 break;
2078 bt_sk(sk)->defer_setup = opt;
2079 break;
2081 default:
2082 err = -ENOPROTOOPT;
2083 break;
2086 release_sock(sk);
2087 return err;
2090 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
2092 struct sock *sk = sock->sk;
2093 struct l2cap_options opts;
2094 struct l2cap_conninfo cinfo;
2095 int len, err = 0;
2096 u32 opt;
2098 BT_DBG("sk %p", sk);
2100 if (get_user(len, optlen))
2101 return -EFAULT;
2103 lock_sock(sk);
2105 switch (optname) {
2106 case L2CAP_OPTIONS:
2107 opts.imtu = l2cap_pi(sk)->imtu;
2108 opts.omtu = l2cap_pi(sk)->omtu;
2109 opts.flush_to = l2cap_pi(sk)->flush_to;
2110 opts.mode = l2cap_pi(sk)->mode;
2111 opts.fcs = l2cap_pi(sk)->fcs;
2112 opts.max_tx = l2cap_pi(sk)->max_tx;
2113 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2115 len = min_t(unsigned int, len, sizeof(opts));
2116 if (copy_to_user(optval, (char *) &opts, len))
2117 err = -EFAULT;
2119 break;
2121 case L2CAP_LM:
2122 switch (l2cap_pi(sk)->sec_level) {
2123 case BT_SECURITY_LOW:
2124 opt = L2CAP_LM_AUTH;
2125 break;
2126 case BT_SECURITY_MEDIUM:
2127 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2128 break;
2129 case BT_SECURITY_HIGH:
2130 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2131 L2CAP_LM_SECURE;
2132 break;
2133 default:
2134 opt = 0;
2135 break;
2138 if (l2cap_pi(sk)->role_switch)
2139 opt |= L2CAP_LM_MASTER;
2141 if (l2cap_pi(sk)->force_reliable)
2142 opt |= L2CAP_LM_RELIABLE;
2144 if (put_user(opt, (u32 __user *) optval))
2145 err = -EFAULT;
2146 break;
2148 case L2CAP_CONNINFO:
2149 if (sk->sk_state != BT_CONNECTED &&
2150 !(sk->sk_state == BT_CONNECT2 &&
2151 bt_sk(sk)->defer_setup)) {
2152 err = -ENOTCONN;
2153 break;
2156 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2157 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2159 len = min_t(unsigned int, len, sizeof(cinfo));
2160 if (copy_to_user(optval, (char *) &cinfo, len))
2161 err = -EFAULT;
2163 break;
2165 default:
2166 err = -ENOPROTOOPT;
2167 break;
2170 release_sock(sk);
2171 return err;
2174 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2176 struct sock *sk = sock->sk;
2177 struct bt_security sec;
2178 int len, err = 0;
2180 BT_DBG("sk %p", sk);
2182 if (level == SOL_L2CAP)
2183 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2185 if (level != SOL_BLUETOOTH)
2186 return -ENOPROTOOPT;
2188 if (get_user(len, optlen))
2189 return -EFAULT;
2191 lock_sock(sk);
2193 switch (optname) {
2194 case BT_SECURITY:
2195 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2196 && sk->sk_type != SOCK_RAW) {
2197 err = -EINVAL;
2198 break;
2201 sec.level = l2cap_pi(sk)->sec_level;
2203 len = min_t(unsigned int, len, sizeof(sec));
2204 if (copy_to_user(optval, (char *) &sec, len))
2205 err = -EFAULT;
2207 break;
2209 case BT_DEFER_SETUP:
2210 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2211 err = -EINVAL;
2212 break;
2215 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2216 err = -EFAULT;
2218 break;
2220 default:
2221 err = -ENOPROTOOPT;
2222 break;
2225 release_sock(sk);
2226 return err;
2229 static int l2cap_sock_shutdown(struct socket *sock, int how)
2231 struct sock *sk = sock->sk;
2232 int err = 0;
2234 BT_DBG("sock %p, sk %p", sock, sk);
2236 if (!sk)
2237 return 0;
2239 lock_sock(sk);
2240 if (!sk->sk_shutdown) {
2241 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2242 err = __l2cap_wait_ack(sk);
2244 sk->sk_shutdown = SHUTDOWN_MASK;
2245 l2cap_sock_clear_timer(sk);
2246 __l2cap_sock_close(sk, 0);
2248 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2249 err = bt_sock_wait_state(sk, BT_CLOSED,
2250 sk->sk_lingertime);
2253 if (!err && sk->sk_err)
2254 err = -sk->sk_err;
2256 release_sock(sk);
2257 return err;
2260 static int l2cap_sock_release(struct socket *sock)
2262 struct sock *sk = sock->sk;
2263 int err;
2265 BT_DBG("sock %p, sk %p", sock, sk);
2267 if (!sk)
2268 return 0;
2270 err = l2cap_sock_shutdown(sock, 2);
2272 sock_orphan(sk);
2273 l2cap_sock_kill(sk);
2274 return err;
2277 static void l2cap_chan_ready(struct sock *sk)
2279 struct sock *parent = bt_sk(sk)->parent;
2281 BT_DBG("sk %p, parent %p", sk, parent);
2283 l2cap_pi(sk)->conf_state = 0;
2284 l2cap_sock_clear_timer(sk);
2286 if (!parent) {
2287 /* Outgoing channel.
2288 * Wake up socket sleeping on connect.
2290 sk->sk_state = BT_CONNECTED;
2291 sk->sk_state_change(sk);
2292 } else {
2293 /* Incoming channel.
2294 * Wake up socket sleeping on accept.
2296 parent->sk_data_ready(parent, 0);
2300 /* Copy frame to all raw sockets on that connection */
2301 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2303 struct l2cap_chan_list *l = &conn->chan_list;
2304 struct sk_buff *nskb;
2305 struct sock *sk;
2307 BT_DBG("conn %p", conn);
2309 read_lock(&l->lock);
2310 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2311 if (sk->sk_type != SOCK_RAW)
2312 continue;
2314 /* Don't send frame to the socket it came from */
2315 if (skb->sk == sk)
2316 continue;
2317 nskb = skb_clone(skb, GFP_ATOMIC);
2318 if (!nskb)
2319 continue;
2321 if (sock_queue_rcv_skb(sk, nskb))
2322 kfree_skb(nskb);
2324 read_unlock(&l->lock);
2327 /* ---- L2CAP signalling commands ---- */
2328 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2329 u8 code, u8 ident, u16 dlen, void *data)
2331 struct sk_buff *skb, **frag;
2332 struct l2cap_cmd_hdr *cmd;
2333 struct l2cap_hdr *lh;
2334 int len, count;
2336 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2337 conn, code, ident, dlen);
2339 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2340 count = min_t(unsigned int, conn->mtu, len);
2342 skb = bt_skb_alloc(count, GFP_ATOMIC);
2343 if (!skb)
2344 return NULL;
2346 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2347 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2348 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2350 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2351 cmd->code = code;
2352 cmd->ident = ident;
2353 cmd->len = cpu_to_le16(dlen);
2355 if (dlen) {
2356 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2357 memcpy(skb_put(skb, count), data, count);
2358 data += count;
2361 len -= skb->len;
2363 /* Continuation fragments (no L2CAP header) */
2364 frag = &skb_shinfo(skb)->frag_list;
2365 while (len) {
2366 count = min_t(unsigned int, conn->mtu, len);
2368 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2369 if (!*frag)
2370 goto fail;
2372 memcpy(skb_put(*frag, count), data, count);
2374 len -= count;
2375 data += count;
2377 frag = &(*frag)->next;
2380 return skb;
2382 fail:
2383 kfree_skb(skb);
2384 return NULL;
2387 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2389 struct l2cap_conf_opt *opt = *ptr;
2390 int len;
2392 len = L2CAP_CONF_OPT_SIZE + opt->len;
2393 *ptr += len;
2395 *type = opt->type;
2396 *olen = opt->len;
2398 switch (opt->len) {
2399 case 1:
2400 *val = *((u8 *) opt->val);
2401 break;
2403 case 2:
2404 *val = __le16_to_cpu(*((__le16 *) opt->val));
2405 break;
2407 case 4:
2408 *val = __le32_to_cpu(*((__le32 *) opt->val));
2409 break;
2411 default:
2412 *val = (unsigned long) opt->val;
2413 break;
2416 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2417 return len;
2420 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2422 struct l2cap_conf_opt *opt = *ptr;
2424 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2426 opt->type = type;
2427 opt->len = len;
2429 switch (len) {
2430 case 1:
2431 *((u8 *) opt->val) = val;
2432 break;
2434 case 2:
2435 *((__le16 *) opt->val) = cpu_to_le16(val);
2436 break;
2438 case 4:
2439 *((__le32 *) opt->val) = cpu_to_le32(val);
2440 break;
2442 default:
2443 memcpy(opt->val, (void *) val, len);
2444 break;
2447 *ptr += L2CAP_CONF_OPT_SIZE + len;
2450 static void l2cap_ack_timeout(unsigned long arg)
2452 struct sock *sk = (void *) arg;
2454 bh_lock_sock(sk);
2455 l2cap_send_ack(l2cap_pi(sk));
2456 bh_unlock_sock(sk);
2459 static inline void l2cap_ertm_init(struct sock *sk)
2461 l2cap_pi(sk)->expected_ack_seq = 0;
2462 l2cap_pi(sk)->unacked_frames = 0;
2463 l2cap_pi(sk)->buffer_seq = 0;
2464 l2cap_pi(sk)->num_acked = 0;
2465 l2cap_pi(sk)->frames_sent = 0;
2467 setup_timer(&l2cap_pi(sk)->retrans_timer,
2468 l2cap_retrans_timeout, (unsigned long) sk);
2469 setup_timer(&l2cap_pi(sk)->monitor_timer,
2470 l2cap_monitor_timeout, (unsigned long) sk);
2471 setup_timer(&l2cap_pi(sk)->ack_timer,
2472 l2cap_ack_timeout, (unsigned long) sk);
2474 __skb_queue_head_init(SREJ_QUEUE(sk));
2475 __skb_queue_head_init(BUSY_QUEUE(sk));
2477 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2479 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
2482 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2484 switch (mode) {
2485 case L2CAP_MODE_STREAMING:
2486 case L2CAP_MODE_ERTM:
2487 if (l2cap_mode_supported(mode, remote_feat_mask))
2488 return mode;
2489 /* fall through */
2490 default:
2491 return L2CAP_MODE_BASIC;
2495 static int l2cap_build_conf_req(struct sock *sk, void *data)
2497 struct l2cap_pinfo *pi = l2cap_pi(sk);
2498 struct l2cap_conf_req *req = data;
2499 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2500 void *ptr = req->data;
2502 BT_DBG("sk %p", sk);
2504 if (pi->num_conf_req || pi->num_conf_rsp)
2505 goto done;
2507 switch (pi->mode) {
2508 case L2CAP_MODE_STREAMING:
2509 case L2CAP_MODE_ERTM:
2510 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
2511 break;
2513 /* fall through */
2514 default:
2515 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2516 break;
2519 done:
2520 switch (pi->mode) {
2521 case L2CAP_MODE_BASIC:
2522 if (pi->imtu != L2CAP_DEFAULT_MTU)
2523 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2525 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2526 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
2527 break;
2529 rfc.mode = L2CAP_MODE_BASIC;
2530 rfc.txwin_size = 0;
2531 rfc.max_transmit = 0;
2532 rfc.retrans_timeout = 0;
2533 rfc.monitor_timeout = 0;
2534 rfc.max_pdu_size = 0;
2536 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2537 (unsigned long) &rfc);
2538 break;
2540 case L2CAP_MODE_ERTM:
2541 rfc.mode = L2CAP_MODE_ERTM;
2542 rfc.txwin_size = pi->tx_win;
2543 rfc.max_transmit = pi->max_tx;
2544 rfc.retrans_timeout = 0;
2545 rfc.monitor_timeout = 0;
2546 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2547 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2548 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2550 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2551 (unsigned long) &rfc);
2553 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2554 break;
2556 if (pi->fcs == L2CAP_FCS_NONE ||
2557 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2558 pi->fcs = L2CAP_FCS_NONE;
2559 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2561 break;
2563 case L2CAP_MODE_STREAMING:
2564 rfc.mode = L2CAP_MODE_STREAMING;
2565 rfc.txwin_size = 0;
2566 rfc.max_transmit = 0;
2567 rfc.retrans_timeout = 0;
2568 rfc.monitor_timeout = 0;
2569 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2570 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2571 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2573 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2574 (unsigned long) &rfc);
2576 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2577 break;
2579 if (pi->fcs == L2CAP_FCS_NONE ||
2580 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2581 pi->fcs = L2CAP_FCS_NONE;
2582 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2584 break;
2587 /* FIXME: Need actual value of the flush timeout */
2588 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2589 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2591 req->dcid = cpu_to_le16(pi->dcid);
2592 req->flags = cpu_to_le16(0);
2594 return ptr - data;
2597 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2599 struct l2cap_pinfo *pi = l2cap_pi(sk);
2600 struct l2cap_conf_rsp *rsp = data;
2601 void *ptr = rsp->data;
2602 void *req = pi->conf_req;
2603 int len = pi->conf_len;
2604 int type, hint, olen;
2605 unsigned long val;
2606 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2607 u16 mtu = L2CAP_DEFAULT_MTU;
2608 u16 result = L2CAP_CONF_SUCCESS;
2610 BT_DBG("sk %p", sk);
2612 while (len >= L2CAP_CONF_OPT_SIZE) {
2613 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2615 hint = type & L2CAP_CONF_HINT;
2616 type &= L2CAP_CONF_MASK;
2618 switch (type) {
2619 case L2CAP_CONF_MTU:
2620 mtu = val;
2621 break;
2623 case L2CAP_CONF_FLUSH_TO:
2624 pi->flush_to = val;
2625 break;
2627 case L2CAP_CONF_QOS:
2628 break;
2630 case L2CAP_CONF_RFC:
2631 if (olen == sizeof(rfc))
2632 memcpy(&rfc, (void *) val, olen);
2633 break;
2635 case L2CAP_CONF_FCS:
2636 if (val == L2CAP_FCS_NONE)
2637 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2639 break;
2641 default:
2642 if (hint)
2643 break;
2645 result = L2CAP_CONF_UNKNOWN;
2646 *((u8 *) ptr++) = type;
2647 break;
2651 if (pi->num_conf_rsp || pi->num_conf_req > 1)
2652 goto done;
2654 switch (pi->mode) {
2655 case L2CAP_MODE_STREAMING:
2656 case L2CAP_MODE_ERTM:
2657 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2658 pi->mode = l2cap_select_mode(rfc.mode,
2659 pi->conn->feat_mask);
2660 break;
2663 if (pi->mode != rfc.mode)
2664 return -ECONNREFUSED;
2666 break;
2669 done:
2670 if (pi->mode != rfc.mode) {
2671 result = L2CAP_CONF_UNACCEPT;
2672 rfc.mode = pi->mode;
2674 if (pi->num_conf_rsp == 1)
2675 return -ECONNREFUSED;
2677 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2678 sizeof(rfc), (unsigned long) &rfc);
2682 if (result == L2CAP_CONF_SUCCESS) {
2683 /* Configure output options and let the other side know
2684 * which ones we don't like. */
2686 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2687 result = L2CAP_CONF_UNACCEPT;
2688 else {
2689 pi->omtu = mtu;
2690 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2692 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2694 switch (rfc.mode) {
2695 case L2CAP_MODE_BASIC:
2696 pi->fcs = L2CAP_FCS_NONE;
2697 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2698 break;
2700 case L2CAP_MODE_ERTM:
2701 pi->remote_tx_win = rfc.txwin_size;
2702 pi->remote_max_tx = rfc.max_transmit;
2704 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
2705 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2707 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2709 rfc.retrans_timeout =
2710 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2711 rfc.monitor_timeout =
2712 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2714 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2716 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2717 sizeof(rfc), (unsigned long) &rfc);
2719 break;
2721 case L2CAP_MODE_STREAMING:
2722 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
2723 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2725 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2727 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2729 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2730 sizeof(rfc), (unsigned long) &rfc);
2732 break;
2734 default:
2735 result = L2CAP_CONF_UNACCEPT;
2737 memset(&rfc, 0, sizeof(rfc));
2738 rfc.mode = pi->mode;
2741 if (result == L2CAP_CONF_SUCCESS)
2742 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2744 rsp->scid = cpu_to_le16(pi->dcid);
2745 rsp->result = cpu_to_le16(result);
2746 rsp->flags = cpu_to_le16(0x0000);
2748 return ptr - data;
2751 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2753 struct l2cap_pinfo *pi = l2cap_pi(sk);
2754 struct l2cap_conf_req *req = data;
2755 void *ptr = req->data;
2756 int type, olen;
2757 unsigned long val;
2758 struct l2cap_conf_rfc rfc;
2760 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2762 while (len >= L2CAP_CONF_OPT_SIZE) {
2763 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2765 switch (type) {
2766 case L2CAP_CONF_MTU:
2767 if (val < L2CAP_DEFAULT_MIN_MTU) {
2768 *result = L2CAP_CONF_UNACCEPT;
2769 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
2770 } else
2771 pi->imtu = val;
2772 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2773 break;
2775 case L2CAP_CONF_FLUSH_TO:
2776 pi->flush_to = val;
2777 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2778 2, pi->flush_to);
2779 break;
2781 case L2CAP_CONF_RFC:
2782 if (olen == sizeof(rfc))
2783 memcpy(&rfc, (void *)val, olen);
2785 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2786 rfc.mode != pi->mode)
2787 return -ECONNREFUSED;
2789 pi->fcs = 0;
2791 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2792 sizeof(rfc), (unsigned long) &rfc);
2793 break;
2797 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
2798 return -ECONNREFUSED;
2800 pi->mode = rfc.mode;
2802 if (*result == L2CAP_CONF_SUCCESS) {
2803 switch (rfc.mode) {
2804 case L2CAP_MODE_ERTM:
2805 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2806 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2807 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2808 break;
2809 case L2CAP_MODE_STREAMING:
2810 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2814 req->dcid = cpu_to_le16(pi->dcid);
2815 req->flags = cpu_to_le16(0x0000);
2817 return ptr - data;
2820 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2822 struct l2cap_conf_rsp *rsp = data;
2823 void *ptr = rsp->data;
2825 BT_DBG("sk %p", sk);
2827 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2828 rsp->result = cpu_to_le16(result);
2829 rsp->flags = cpu_to_le16(flags);
2831 return ptr - data;
2834 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2836 struct l2cap_pinfo *pi = l2cap_pi(sk);
2837 int type, olen;
2838 unsigned long val;
2839 struct l2cap_conf_rfc rfc;
2841 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2843 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2844 return;
2846 while (len >= L2CAP_CONF_OPT_SIZE) {
2847 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2849 switch (type) {
2850 case L2CAP_CONF_RFC:
2851 if (olen == sizeof(rfc))
2852 memcpy(&rfc, (void *)val, olen);
2853 goto done;
2857 done:
2858 switch (rfc.mode) {
2859 case L2CAP_MODE_ERTM:
2860 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2861 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2862 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2863 break;
2864 case L2CAP_MODE_STREAMING:
2865 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2869 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2871 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2873 if (rej->reason != 0x0000)
2874 return 0;
2876 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2877 cmd->ident == conn->info_ident) {
2878 del_timer(&conn->info_timer);
2880 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2881 conn->info_ident = 0;
2883 l2cap_conn_start(conn);
2886 return 0;
2889 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2891 struct l2cap_chan_list *list = &conn->chan_list;
2892 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2893 struct l2cap_conn_rsp rsp;
2894 struct sock *parent, *uninitialized_var(sk);
2895 int result, status = L2CAP_CS_NO_INFO;
2897 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2898 __le16 psm = req->psm;
2900 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2902 /* Check if we have socket listening on psm */
2903 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2904 if (!parent) {
2905 result = L2CAP_CR_BAD_PSM;
2906 goto sendresp;
2909 /* Check if the ACL is secure enough (if not SDP) */
2910 if (psm != cpu_to_le16(0x0001) &&
2911 !hci_conn_check_link_mode(conn->hcon)) {
2912 conn->disc_reason = 0x05;
2913 result = L2CAP_CR_SEC_BLOCK;
2914 goto response;
2917 result = L2CAP_CR_NO_MEM;
2919 /* Check for backlog size */
2920 if (sk_acceptq_is_full(parent)) {
2921 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2922 goto response;
2925 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2926 if (!sk)
2927 goto response;
2929 write_lock_bh(&list->lock);
2931 /* Check if we already have channel with that dcid */
2932 if (__l2cap_get_chan_by_dcid(list, scid)) {
2933 write_unlock_bh(&list->lock);
2934 sock_set_flag(sk, SOCK_ZAPPED);
2935 l2cap_sock_kill(sk);
2936 goto response;
2939 hci_conn_hold(conn->hcon);
2941 l2cap_sock_init(sk, parent);
2942 bacpy(&bt_sk(sk)->src, conn->src);
2943 bacpy(&bt_sk(sk)->dst, conn->dst);
2944 l2cap_pi(sk)->psm = psm;
2945 l2cap_pi(sk)->dcid = scid;
2947 __l2cap_chan_add(conn, sk, parent);
2948 dcid = l2cap_pi(sk)->scid;
2950 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2952 l2cap_pi(sk)->ident = cmd->ident;
2954 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2955 if (l2cap_check_security(sk)) {
2956 if (bt_sk(sk)->defer_setup) {
2957 sk->sk_state = BT_CONNECT2;
2958 result = L2CAP_CR_PEND;
2959 status = L2CAP_CS_AUTHOR_PEND;
2960 parent->sk_data_ready(parent, 0);
2961 } else {
2962 sk->sk_state = BT_CONFIG;
2963 result = L2CAP_CR_SUCCESS;
2964 status = L2CAP_CS_NO_INFO;
2966 } else {
2967 sk->sk_state = BT_CONNECT2;
2968 result = L2CAP_CR_PEND;
2969 status = L2CAP_CS_AUTHEN_PEND;
2971 } else {
2972 sk->sk_state = BT_CONNECT2;
2973 result = L2CAP_CR_PEND;
2974 status = L2CAP_CS_NO_INFO;
2977 write_unlock_bh(&list->lock);
2979 response:
2980 bh_unlock_sock(parent);
2982 sendresp:
2983 rsp.scid = cpu_to_le16(scid);
2984 rsp.dcid = cpu_to_le16(dcid);
2985 rsp.result = cpu_to_le16(result);
2986 rsp.status = cpu_to_le16(status);
2987 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2989 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2990 struct l2cap_info_req info;
2991 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2993 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2994 conn->info_ident = l2cap_get_ident(conn);
2996 mod_timer(&conn->info_timer, jiffies +
2997 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2999 l2cap_send_cmd(conn, conn->info_ident,
3000 L2CAP_INFO_REQ, sizeof(info), &info);
3003 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
3004 result == L2CAP_CR_SUCCESS) {
3005 u8 buf[128];
3006 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3007 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3008 l2cap_build_conf_req(sk, buf), buf);
3009 l2cap_pi(sk)->num_conf_req++;
3012 return 0;
3015 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3017 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3018 u16 scid, dcid, result, status;
3019 struct sock *sk;
3020 u8 req[128];
3022 scid = __le16_to_cpu(rsp->scid);
3023 dcid = __le16_to_cpu(rsp->dcid);
3024 result = __le16_to_cpu(rsp->result);
3025 status = __le16_to_cpu(rsp->status);
3027 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
3029 if (scid) {
3030 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3031 if (!sk)
3032 return -EFAULT;
3033 } else {
3034 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
3035 if (!sk)
3036 return -EFAULT;
3039 switch (result) {
3040 case L2CAP_CR_SUCCESS:
3041 sk->sk_state = BT_CONFIG;
3042 l2cap_pi(sk)->ident = 0;
3043 l2cap_pi(sk)->dcid = dcid;
3044 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
3046 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
3047 break;
3049 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3051 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3052 l2cap_build_conf_req(sk, req), req);
3053 l2cap_pi(sk)->num_conf_req++;
3054 break;
3056 case L2CAP_CR_PEND:
3057 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3058 break;
3060 default:
3061 l2cap_chan_del(sk, ECONNREFUSED);
3062 break;
3065 bh_unlock_sock(sk);
3066 return 0;
3069 static inline void set_default_fcs(struct l2cap_pinfo *pi)
3071 /* FCS is enabled only in ERTM or streaming mode, if one or both
3072 * sides request it.
3074 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
3075 pi->fcs = L2CAP_FCS_NONE;
3076 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
3077 pi->fcs = L2CAP_FCS_CRC16;
3080 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3082 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3083 u16 dcid, flags;
3084 u8 rsp[64];
3085 struct sock *sk;
3086 int len;
3088 dcid = __le16_to_cpu(req->dcid);
3089 flags = __le16_to_cpu(req->flags);
3091 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3093 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3094 if (!sk)
3095 return -ENOENT;
3097 if (sk->sk_state == BT_DISCONN)
3098 goto unlock;
3100 /* Reject if config buffer is too small. */
3101 len = cmd_len - sizeof(*req);
3102 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
3103 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3104 l2cap_build_conf_rsp(sk, rsp,
3105 L2CAP_CONF_REJECT, flags), rsp);
3106 goto unlock;
3109 /* Store config. */
3110 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
3111 l2cap_pi(sk)->conf_len += len;
3113 if (flags & 0x0001) {
3114 /* Incomplete config. Send empty response. */
3115 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3116 l2cap_build_conf_rsp(sk, rsp,
3117 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3118 goto unlock;
3121 /* Complete config. */
3122 len = l2cap_parse_conf_req(sk, rsp);
3123 if (len < 0) {
3124 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3125 goto unlock;
3128 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3129 l2cap_pi(sk)->num_conf_rsp++;
3131 /* Reset config buffer. */
3132 l2cap_pi(sk)->conf_len = 0;
3134 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
3135 goto unlock;
3137 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
3138 set_default_fcs(l2cap_pi(sk));
3140 sk->sk_state = BT_CONNECTED;
3142 l2cap_pi(sk)->next_tx_seq = 0;
3143 l2cap_pi(sk)->expected_tx_seq = 0;
3144 __skb_queue_head_init(TX_QUEUE(sk));
3145 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3146 l2cap_ertm_init(sk);
3148 l2cap_chan_ready(sk);
3149 goto unlock;
3152 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3153 u8 buf[64];
3154 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3155 l2cap_build_conf_req(sk, buf), buf);
3156 l2cap_pi(sk)->num_conf_req++;
3159 unlock:
3160 bh_unlock_sock(sk);
3161 return 0;
3164 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3166 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3167 u16 scid, flags, result;
3168 struct sock *sk;
3169 int len = cmd->len - sizeof(*rsp);
3171 scid = __le16_to_cpu(rsp->scid);
3172 flags = __le16_to_cpu(rsp->flags);
3173 result = __le16_to_cpu(rsp->result);
3175 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3176 scid, flags, result);
3178 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3179 if (!sk)
3180 return 0;
3182 switch (result) {
3183 case L2CAP_CONF_SUCCESS:
3184 l2cap_conf_rfc_get(sk, rsp->data, len);
3185 break;
3187 case L2CAP_CONF_UNACCEPT:
3188 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3189 char req[64];
3191 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3192 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3193 goto done;
3196 /* throw out any old stored conf requests */
3197 result = L2CAP_CONF_SUCCESS;
3198 len = l2cap_parse_conf_rsp(sk, rsp->data,
3199 len, req, &result);
3200 if (len < 0) {
3201 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3202 goto done;
3205 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3206 L2CAP_CONF_REQ, len, req);
3207 l2cap_pi(sk)->num_conf_req++;
3208 if (result != L2CAP_CONF_SUCCESS)
3209 goto done;
3210 break;
3213 default:
3214 sk->sk_err = ECONNRESET;
3215 l2cap_sock_set_timer(sk, HZ * 5);
3216 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3217 goto done;
3220 if (flags & 0x01)
3221 goto done;
3223 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3225 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3226 set_default_fcs(l2cap_pi(sk));
3228 sk->sk_state = BT_CONNECTED;
3229 l2cap_pi(sk)->next_tx_seq = 0;
3230 l2cap_pi(sk)->expected_tx_seq = 0;
3231 __skb_queue_head_init(TX_QUEUE(sk));
3232 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3233 l2cap_ertm_init(sk);
3235 l2cap_chan_ready(sk);
3238 done:
3239 bh_unlock_sock(sk);
3240 return 0;
3243 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3245 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3246 struct l2cap_disconn_rsp rsp;
3247 u16 dcid, scid;
3248 struct sock *sk;
3250 scid = __le16_to_cpu(req->scid);
3251 dcid = __le16_to_cpu(req->dcid);
3253 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3255 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3256 if (!sk)
3257 return 0;
3259 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3260 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3261 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3263 sk->sk_shutdown = SHUTDOWN_MASK;
3265 l2cap_chan_del(sk, ECONNRESET);
3266 bh_unlock_sock(sk);
3268 l2cap_sock_kill(sk);
3269 return 0;
3272 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3274 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3275 u16 dcid, scid;
3276 struct sock *sk;
3278 scid = __le16_to_cpu(rsp->scid);
3279 dcid = __le16_to_cpu(rsp->dcid);
3281 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3283 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3284 if (!sk)
3285 return 0;
3287 l2cap_chan_del(sk, 0);
3288 bh_unlock_sock(sk);
3290 l2cap_sock_kill(sk);
3291 return 0;
3294 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3296 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3297 u16 type;
3299 type = __le16_to_cpu(req->type);
3301 BT_DBG("type 0x%4.4x", type);
3303 if (type == L2CAP_IT_FEAT_MASK) {
3304 u8 buf[8];
3305 u32 feat_mask = l2cap_feat_mask;
3306 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3307 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3308 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3309 if (!disable_ertm)
3310 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3311 | L2CAP_FEAT_FCS;
3312 put_unaligned_le32(feat_mask, rsp->data);
3313 l2cap_send_cmd(conn, cmd->ident,
3314 L2CAP_INFO_RSP, sizeof(buf), buf);
3315 } else if (type == L2CAP_IT_FIXED_CHAN) {
3316 u8 buf[12];
3317 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3318 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3319 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3320 memcpy(buf + 4, l2cap_fixed_chan, 8);
3321 l2cap_send_cmd(conn, cmd->ident,
3322 L2CAP_INFO_RSP, sizeof(buf), buf);
3323 } else {
3324 struct l2cap_info_rsp rsp;
3325 rsp.type = cpu_to_le16(type);
3326 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3327 l2cap_send_cmd(conn, cmd->ident,
3328 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3331 return 0;
3334 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3336 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3337 u16 type, result;
3339 type = __le16_to_cpu(rsp->type);
3340 result = __le16_to_cpu(rsp->result);
3342 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3344 del_timer(&conn->info_timer);
3346 if (result != L2CAP_IR_SUCCESS) {
3347 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3348 conn->info_ident = 0;
3350 l2cap_conn_start(conn);
3352 return 0;
3355 if (type == L2CAP_IT_FEAT_MASK) {
3356 conn->feat_mask = get_unaligned_le32(rsp->data);
3358 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3359 struct l2cap_info_req req;
3360 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3362 conn->info_ident = l2cap_get_ident(conn);
3364 l2cap_send_cmd(conn, conn->info_ident,
3365 L2CAP_INFO_REQ, sizeof(req), &req);
3366 } else {
3367 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3368 conn->info_ident = 0;
3370 l2cap_conn_start(conn);
3372 } else if (type == L2CAP_IT_FIXED_CHAN) {
3373 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3374 conn->info_ident = 0;
3376 l2cap_conn_start(conn);
3379 return 0;
3382 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3384 u8 *data = skb->data;
3385 int len = skb->len;
3386 struct l2cap_cmd_hdr cmd;
3387 int err = 0;
3389 l2cap_raw_recv(conn, skb);
3391 while (len >= L2CAP_CMD_HDR_SIZE) {
3392 u16 cmd_len;
3393 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3394 data += L2CAP_CMD_HDR_SIZE;
3395 len -= L2CAP_CMD_HDR_SIZE;
3397 cmd_len = le16_to_cpu(cmd.len);
3399 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3401 if (cmd_len > len || !cmd.ident) {
3402 BT_DBG("corrupted command");
3403 break;
3406 switch (cmd.code) {
3407 case L2CAP_COMMAND_REJ:
3408 l2cap_command_rej(conn, &cmd, data);
3409 break;
3411 case L2CAP_CONN_REQ:
3412 err = l2cap_connect_req(conn, &cmd, data);
3413 break;
3415 case L2CAP_CONN_RSP:
3416 err = l2cap_connect_rsp(conn, &cmd, data);
3417 break;
3419 case L2CAP_CONF_REQ:
3420 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3421 break;
3423 case L2CAP_CONF_RSP:
3424 err = l2cap_config_rsp(conn, &cmd, data);
3425 break;
3427 case L2CAP_DISCONN_REQ:
3428 err = l2cap_disconnect_req(conn, &cmd, data);
3429 break;
3431 case L2CAP_DISCONN_RSP:
3432 err = l2cap_disconnect_rsp(conn, &cmd, data);
3433 break;
3435 case L2CAP_ECHO_REQ:
3436 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3437 break;
3439 case L2CAP_ECHO_RSP:
3440 break;
3442 case L2CAP_INFO_REQ:
3443 err = l2cap_information_req(conn, &cmd, data);
3444 break;
3446 case L2CAP_INFO_RSP:
3447 err = l2cap_information_rsp(conn, &cmd, data);
3448 break;
3450 default:
3451 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3452 err = -EINVAL;
3453 break;
3456 if (err) {
3457 struct l2cap_cmd_rej rej;
3458 BT_DBG("error %d", err);
3460 /* FIXME: Map err to a valid reason */
3461 rej.reason = cpu_to_le16(0);
3462 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3465 data += cmd_len;
3466 len -= cmd_len;
3469 kfree_skb(skb);
3472 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3474 u16 our_fcs, rcv_fcs;
3475 int hdr_size = L2CAP_HDR_SIZE + 2;
3477 if (pi->fcs == L2CAP_FCS_CRC16) {
3478 skb_trim(skb, skb->len - 2);
3479 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3480 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3482 if (our_fcs != rcv_fcs)
3483 return -EBADMSG;
3485 return 0;
3488 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3490 struct l2cap_pinfo *pi = l2cap_pi(sk);
3491 u16 control = 0;
3493 pi->frames_sent = 0;
3495 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3497 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3498 control |= L2CAP_SUPER_RCV_NOT_READY;
3499 l2cap_send_sframe(pi, control);
3500 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3503 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3504 l2cap_retransmit_frames(sk);
3506 l2cap_ertm_send(sk);
3508 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3509 pi->frames_sent == 0) {
3510 control |= L2CAP_SUPER_RCV_READY;
3511 l2cap_send_sframe(pi, control);
3515 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3517 struct sk_buff *next_skb;
3518 struct l2cap_pinfo *pi = l2cap_pi(sk);
3519 int tx_seq_offset, next_tx_seq_offset;
3521 bt_cb(skb)->tx_seq = tx_seq;
3522 bt_cb(skb)->sar = sar;
3524 next_skb = skb_peek(SREJ_QUEUE(sk));
3525 if (!next_skb) {
3526 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3527 return 0;
3530 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3531 if (tx_seq_offset < 0)
3532 tx_seq_offset += 64;
3534 do {
3535 if (bt_cb(next_skb)->tx_seq == tx_seq)
3536 return -EINVAL;
3538 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3539 pi->buffer_seq) % 64;
3540 if (next_tx_seq_offset < 0)
3541 next_tx_seq_offset += 64;
3543 if (next_tx_seq_offset > tx_seq_offset) {
3544 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3545 return 0;
3548 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3549 break;
3551 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3553 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3555 return 0;
3558 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3560 struct l2cap_pinfo *pi = l2cap_pi(sk);
3561 struct sk_buff *_skb;
3562 int err;
3564 switch (control & L2CAP_CTRL_SAR) {
3565 case L2CAP_SDU_UNSEGMENTED:
3566 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3567 goto drop;
3569 err = sock_queue_rcv_skb(sk, skb);
3570 if (!err)
3571 return err;
3573 break;
3575 case L2CAP_SDU_START:
3576 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3577 goto drop;
3579 pi->sdu_len = get_unaligned_le16(skb->data);
3581 if (pi->sdu_len > pi->imtu)
3582 goto disconnect;
3584 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3585 if (!pi->sdu)
3586 return -ENOMEM;
3588 /* pull sdu_len bytes only after alloc, because of Local Busy
3589 * condition we have to be sure that this will be executed
3590 * only once, i.e., when alloc does not fail */
3591 skb_pull(skb, 2);
3593 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3595 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3596 pi->partial_sdu_len = skb->len;
3597 break;
3599 case L2CAP_SDU_CONTINUE:
3600 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3601 goto disconnect;
3603 if (!pi->sdu)
3604 goto disconnect;
3606 pi->partial_sdu_len += skb->len;
3607 if (pi->partial_sdu_len > pi->sdu_len)
3608 goto drop;
3610 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3612 break;
3614 case L2CAP_SDU_END:
3615 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3616 goto disconnect;
3618 if (!pi->sdu)
3619 goto disconnect;
3621 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3622 pi->partial_sdu_len += skb->len;
3624 if (pi->partial_sdu_len > pi->imtu)
3625 goto drop;
3627 if (pi->partial_sdu_len != pi->sdu_len)
3628 goto drop;
3630 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3633 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3634 if (!_skb) {
3635 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3636 return -ENOMEM;
3639 err = sock_queue_rcv_skb(sk, _skb);
3640 if (err < 0) {
3641 kfree_skb(_skb);
3642 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3643 return err;
3646 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3647 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3649 kfree_skb(pi->sdu);
3650 break;
3653 kfree_skb(skb);
3654 return 0;
3656 drop:
3657 kfree_skb(pi->sdu);
3658 pi->sdu = NULL;
3660 disconnect:
3661 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3662 kfree_skb(skb);
3663 return 0;
3666 static int l2cap_try_push_rx_skb(struct sock *sk)
3668 struct l2cap_pinfo *pi = l2cap_pi(sk);
3669 struct sk_buff *skb;
3670 u16 control;
3671 int err;
3673 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3674 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3675 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3676 if (err < 0) {
3677 skb_queue_head(BUSY_QUEUE(sk), skb);
3678 return -EBUSY;
3681 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3684 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3685 goto done;
3687 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3688 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3689 l2cap_send_sframe(pi, control);
3690 l2cap_pi(sk)->retry_count = 1;
3692 del_timer(&pi->retrans_timer);
3693 __mod_monitor_timer();
3695 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3697 done:
3698 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3699 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3701 BT_DBG("sk %p, Exit local busy", sk);
3703 return 0;
3706 static void l2cap_busy_work(struct work_struct *work)
3708 DECLARE_WAITQUEUE(wait, current);
3709 struct l2cap_pinfo *pi =
3710 container_of(work, struct l2cap_pinfo, busy_work);
3711 struct sock *sk = (struct sock *)pi;
3712 int n_tries = 0, timeo = HZ/5, err;
3713 struct sk_buff *skb;
3715 lock_sock(sk);
3717 add_wait_queue(sk_sleep(sk), &wait);
3718 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3719 set_current_state(TASK_INTERRUPTIBLE);
3721 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3722 err = -EBUSY;
3723 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
3724 break;
3727 if (!timeo)
3728 timeo = HZ/5;
3730 if (signal_pending(current)) {
3731 err = sock_intr_errno(timeo);
3732 break;
3735 release_sock(sk);
3736 timeo = schedule_timeout(timeo);
3737 lock_sock(sk);
3739 err = sock_error(sk);
3740 if (err)
3741 break;
3743 if (l2cap_try_push_rx_skb(sk) == 0)
3744 break;
3747 set_current_state(TASK_RUNNING);
3748 remove_wait_queue(sk_sleep(sk), &wait);
3750 release_sock(sk);
3753 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3755 struct l2cap_pinfo *pi = l2cap_pi(sk);
3756 int sctrl, err;
3758 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3759 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3760 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3761 return l2cap_try_push_rx_skb(sk);
3766 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3767 if (err >= 0) {
3768 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3769 return err;
3772 /* Busy Condition */
3773 BT_DBG("sk %p, Enter local busy", sk);
3775 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3776 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3777 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3779 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3780 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3781 l2cap_send_sframe(pi, sctrl);
3783 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3785 del_timer(&pi->ack_timer);
3787 queue_work(_busy_wq, &pi->busy_work);
3789 return err;
3792 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3794 struct l2cap_pinfo *pi = l2cap_pi(sk);
3795 struct sk_buff *_skb;
3796 int err = -EINVAL;
3799 * TODO: We have to notify the userland if some data is lost with the
3800 * Streaming Mode.
3803 switch (control & L2CAP_CTRL_SAR) {
3804 case L2CAP_SDU_UNSEGMENTED:
3805 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3806 kfree_skb(pi->sdu);
3807 break;
3810 err = sock_queue_rcv_skb(sk, skb);
3811 if (!err)
3812 return 0;
3814 break;
3816 case L2CAP_SDU_START:
3817 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3818 kfree_skb(pi->sdu);
3819 break;
3822 pi->sdu_len = get_unaligned_le16(skb->data);
3823 skb_pull(skb, 2);
3825 if (pi->sdu_len > pi->imtu) {
3826 err = -EMSGSIZE;
3827 break;
3830 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3831 if (!pi->sdu) {
3832 err = -ENOMEM;
3833 break;
3836 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3838 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3839 pi->partial_sdu_len = skb->len;
3840 err = 0;
3841 break;
3843 case L2CAP_SDU_CONTINUE:
3844 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3845 break;
3847 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3849 pi->partial_sdu_len += skb->len;
3850 if (pi->partial_sdu_len > pi->sdu_len)
3851 kfree_skb(pi->sdu);
3852 else
3853 err = 0;
3855 break;
3857 case L2CAP_SDU_END:
3858 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3859 break;
3861 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3863 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3864 pi->partial_sdu_len += skb->len;
3866 if (pi->partial_sdu_len > pi->imtu)
3867 goto drop;
3869 if (pi->partial_sdu_len == pi->sdu_len) {
3870 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3871 err = sock_queue_rcv_skb(sk, _skb);
3872 if (err < 0)
3873 kfree_skb(_skb);
3875 err = 0;
3877 drop:
3878 kfree_skb(pi->sdu);
3879 break;
3882 kfree_skb(skb);
3883 return err;
3886 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3888 struct sk_buff *skb;
3889 u16 control;
3891 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3892 if (bt_cb(skb)->tx_seq != tx_seq)
3893 break;
3895 skb = skb_dequeue(SREJ_QUEUE(sk));
3896 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3897 l2cap_ertm_reassembly_sdu(sk, skb, control);
3898 l2cap_pi(sk)->buffer_seq_srej =
3899 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3900 tx_seq = (tx_seq + 1) % 64;
3904 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3906 struct l2cap_pinfo *pi = l2cap_pi(sk);
3907 struct srej_list *l, *tmp;
3908 u16 control;
3910 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3911 if (l->tx_seq == tx_seq) {
3912 list_del(&l->list);
3913 kfree(l);
3914 return;
3916 control = L2CAP_SUPER_SELECT_REJECT;
3917 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3918 l2cap_send_sframe(pi, control);
3919 list_del(&l->list);
3920 list_add_tail(&l->list, SREJ_LIST(sk));
3924 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3926 struct l2cap_pinfo *pi = l2cap_pi(sk);
3927 struct srej_list *new;
3928 u16 control;
3930 while (tx_seq != pi->expected_tx_seq) {
3931 control = L2CAP_SUPER_SELECT_REJECT;
3932 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3933 l2cap_send_sframe(pi, control);
3935 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3936 new->tx_seq = pi->expected_tx_seq;
3937 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3938 list_add_tail(&new->list, SREJ_LIST(sk));
3940 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3943 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3945 struct l2cap_pinfo *pi = l2cap_pi(sk);
3946 u8 tx_seq = __get_txseq(rx_control);
3947 u8 req_seq = __get_reqseq(rx_control);
3948 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3949 int tx_seq_offset, expected_tx_seq_offset;
3950 int num_to_ack = (pi->tx_win/6) + 1;
3951 int err = 0;
3953 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3954 rx_control);
3956 if (L2CAP_CTRL_FINAL & rx_control &&
3957 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3958 del_timer(&pi->monitor_timer);
3959 if (pi->unacked_frames > 0)
3960 __mod_retrans_timer();
3961 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3964 pi->expected_ack_seq = req_seq;
3965 l2cap_drop_acked_frames(sk);
3967 if (tx_seq == pi->expected_tx_seq)
3968 goto expected;
3970 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3971 if (tx_seq_offset < 0)
3972 tx_seq_offset += 64;
3974 /* invalid tx_seq */
3975 if (tx_seq_offset >= pi->tx_win) {
3976 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3977 goto drop;
3980 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3981 goto drop;
3983 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3984 struct srej_list *first;
3986 first = list_first_entry(SREJ_LIST(sk),
3987 struct srej_list, list);
3988 if (tx_seq == first->tx_seq) {
3989 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3990 l2cap_check_srej_gap(sk, tx_seq);
3992 list_del(&first->list);
3993 kfree(first);
3995 if (list_empty(SREJ_LIST(sk))) {
3996 pi->buffer_seq = pi->buffer_seq_srej;
3997 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3998 l2cap_send_ack(pi);
3999 BT_DBG("sk %p, Exit SREJ_SENT", sk);
4001 } else {
4002 struct srej_list *l;
4004 /* duplicated tx_seq */
4005 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
4006 goto drop;
4008 list_for_each_entry(l, SREJ_LIST(sk), list) {
4009 if (l->tx_seq == tx_seq) {
4010 l2cap_resend_srejframe(sk, tx_seq);
4011 return 0;
4014 l2cap_send_srejframe(sk, tx_seq);
4016 } else {
4017 expected_tx_seq_offset =
4018 (pi->expected_tx_seq - pi->buffer_seq) % 64;
4019 if (expected_tx_seq_offset < 0)
4020 expected_tx_seq_offset += 64;
4022 /* duplicated tx_seq */
4023 if (tx_seq_offset < expected_tx_seq_offset)
4024 goto drop;
4026 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
4028 BT_DBG("sk %p, Enter SREJ", sk);
4030 INIT_LIST_HEAD(SREJ_LIST(sk));
4031 pi->buffer_seq_srej = pi->buffer_seq;
4033 __skb_queue_head_init(SREJ_QUEUE(sk));
4034 __skb_queue_head_init(BUSY_QUEUE(sk));
4035 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
4037 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
4039 l2cap_send_srejframe(sk, tx_seq);
4041 del_timer(&pi->ack_timer);
4043 return 0;
4045 expected:
4046 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4048 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4049 bt_cb(skb)->tx_seq = tx_seq;
4050 bt_cb(skb)->sar = sar;
4051 __skb_queue_tail(SREJ_QUEUE(sk), skb);
4052 return 0;
4055 err = l2cap_push_rx_skb(sk, skb, rx_control);
4056 if (err < 0)
4057 return 0;
4059 if (rx_control & L2CAP_CTRL_FINAL) {
4060 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4061 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4062 else
4063 l2cap_retransmit_frames(sk);
4066 __mod_ack_timer();
4068 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
4069 if (pi->num_acked == num_to_ack - 1)
4070 l2cap_send_ack(pi);
4072 return 0;
4074 drop:
4075 kfree_skb(skb);
4076 return 0;
4079 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
4081 struct l2cap_pinfo *pi = l2cap_pi(sk);
4083 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
4084 rx_control);
4086 pi->expected_ack_seq = __get_reqseq(rx_control);
4087 l2cap_drop_acked_frames(sk);
4089 if (rx_control & L2CAP_CTRL_POLL) {
4090 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4091 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4092 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4093 (pi->unacked_frames > 0))
4094 __mod_retrans_timer();
4096 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4097 l2cap_send_srejtail(sk);
4098 } else {
4099 l2cap_send_i_or_rr_or_rnr(sk);
4102 } else if (rx_control & L2CAP_CTRL_FINAL) {
4103 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4105 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4106 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4107 else
4108 l2cap_retransmit_frames(sk);
4110 } else {
4111 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4112 (pi->unacked_frames > 0))
4113 __mod_retrans_timer();
4115 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4116 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4117 l2cap_send_ack(pi);
4118 } else {
4119 l2cap_ertm_send(sk);
4124 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
4126 struct l2cap_pinfo *pi = l2cap_pi(sk);
4127 u8 tx_seq = __get_reqseq(rx_control);
4129 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4131 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4133 pi->expected_ack_seq = tx_seq;
4134 l2cap_drop_acked_frames(sk);
4136 if (rx_control & L2CAP_CTRL_FINAL) {
4137 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4138 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4139 else
4140 l2cap_retransmit_frames(sk);
4141 } else {
4142 l2cap_retransmit_frames(sk);
4144 if (pi->conn_state & L2CAP_CONN_WAIT_F)
4145 pi->conn_state |= L2CAP_CONN_REJ_ACT;
4148 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
4150 struct l2cap_pinfo *pi = l2cap_pi(sk);
4151 u8 tx_seq = __get_reqseq(rx_control);
4153 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4155 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4157 if (rx_control & L2CAP_CTRL_POLL) {
4158 pi->expected_ack_seq = tx_seq;
4159 l2cap_drop_acked_frames(sk);
4161 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4162 l2cap_retransmit_one_frame(sk, tx_seq);
4164 l2cap_ertm_send(sk);
4166 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4167 pi->srej_save_reqseq = tx_seq;
4168 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4170 } else if (rx_control & L2CAP_CTRL_FINAL) {
4171 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4172 pi->srej_save_reqseq == tx_seq)
4173 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4174 else
4175 l2cap_retransmit_one_frame(sk, tx_seq);
4176 } else {
4177 l2cap_retransmit_one_frame(sk, tx_seq);
4178 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4179 pi->srej_save_reqseq = tx_seq;
4180 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4185 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4187 struct l2cap_pinfo *pi = l2cap_pi(sk);
4188 u8 tx_seq = __get_reqseq(rx_control);
4190 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4192 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4193 pi->expected_ack_seq = tx_seq;
4194 l2cap_drop_acked_frames(sk);
4196 if (rx_control & L2CAP_CTRL_POLL)
4197 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4199 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4200 del_timer(&pi->retrans_timer);
4201 if (rx_control & L2CAP_CTRL_POLL)
4202 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4203 return;
4206 if (rx_control & L2CAP_CTRL_POLL)
4207 l2cap_send_srejtail(sk);
4208 else
4209 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4212 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4214 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4216 if (L2CAP_CTRL_FINAL & rx_control &&
4217 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4218 del_timer(&l2cap_pi(sk)->monitor_timer);
4219 if (l2cap_pi(sk)->unacked_frames > 0)
4220 __mod_retrans_timer();
4221 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4224 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4225 case L2CAP_SUPER_RCV_READY:
4226 l2cap_data_channel_rrframe(sk, rx_control);
4227 break;
4229 case L2CAP_SUPER_REJECT:
4230 l2cap_data_channel_rejframe(sk, rx_control);
4231 break;
4233 case L2CAP_SUPER_SELECT_REJECT:
4234 l2cap_data_channel_srejframe(sk, rx_control);
4235 break;
4237 case L2CAP_SUPER_RCV_NOT_READY:
4238 l2cap_data_channel_rnrframe(sk, rx_control);
4239 break;
4242 kfree_skb(skb);
4243 return 0;
4246 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4248 struct l2cap_pinfo *pi = l2cap_pi(sk);
4249 u16 control;
4250 u8 req_seq;
4251 int len, next_tx_seq_offset, req_seq_offset;
4253 control = get_unaligned_le16(skb->data);
4254 skb_pull(skb, 2);
4255 len = skb->len;
4258 * We can just drop the corrupted I-frame here.
4259 * Receiver will miss it and start proper recovery
4260 * procedures and ask retransmission.
4262 if (l2cap_check_fcs(pi, skb))
4263 goto drop;
4265 if (__is_sar_start(control) && __is_iframe(control))
4266 len -= 2;
4268 if (pi->fcs == L2CAP_FCS_CRC16)
4269 len -= 2;
4271 if (len > pi->mps) {
4272 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4273 goto drop;
4276 req_seq = __get_reqseq(control);
4277 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4278 if (req_seq_offset < 0)
4279 req_seq_offset += 64;
4281 next_tx_seq_offset =
4282 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4283 if (next_tx_seq_offset < 0)
4284 next_tx_seq_offset += 64;
4286 /* check for invalid req-seq */
4287 if (req_seq_offset > next_tx_seq_offset) {
4288 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4289 goto drop;
4292 if (__is_iframe(control)) {
4293 if (len < 0) {
4294 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4295 goto drop;
4298 l2cap_data_channel_iframe(sk, control, skb);
4299 } else {
4300 if (len != 0) {
4301 BT_ERR("%d", len);
4302 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4303 goto drop;
4306 l2cap_data_channel_sframe(sk, control, skb);
4309 return 0;
4311 drop:
4312 kfree_skb(skb);
4313 return 0;
4316 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4318 struct sock *sk;
4319 struct l2cap_pinfo *pi;
4320 u16 control;
4321 u8 tx_seq;
4322 int len;
4324 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4325 if (!sk) {
4326 BT_DBG("unknown cid 0x%4.4x", cid);
4327 goto drop;
4330 pi = l2cap_pi(sk);
4332 BT_DBG("sk %p, len %d", sk, skb->len);
4334 if (sk->sk_state != BT_CONNECTED)
4335 goto drop;
4337 switch (pi->mode) {
4338 case L2CAP_MODE_BASIC:
4339 /* If socket recv buffers overflows we drop data here
4340 * which is *bad* because L2CAP has to be reliable.
4341 * But we don't have any other choice. L2CAP doesn't
4342 * provide flow control mechanism. */
4344 if (pi->imtu < skb->len)
4345 goto drop;
4347 if (!sock_queue_rcv_skb(sk, skb))
4348 goto done;
4349 break;
4351 case L2CAP_MODE_ERTM:
4352 if (!sock_owned_by_user(sk)) {
4353 l2cap_ertm_data_rcv(sk, skb);
4354 } else {
4355 if (sk_add_backlog(sk, skb))
4356 goto drop;
4359 goto done;
4361 case L2CAP_MODE_STREAMING:
4362 control = get_unaligned_le16(skb->data);
4363 skb_pull(skb, 2);
4364 len = skb->len;
4366 if (l2cap_check_fcs(pi, skb))
4367 goto drop;
4369 if (__is_sar_start(control))
4370 len -= 2;
4372 if (pi->fcs == L2CAP_FCS_CRC16)
4373 len -= 2;
4375 if (len > pi->mps || len < 0 || __is_sframe(control))
4376 goto drop;
4378 tx_seq = __get_txseq(control);
4380 if (pi->expected_tx_seq == tx_seq)
4381 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4382 else
4383 pi->expected_tx_seq = (tx_seq + 1) % 64;
4385 l2cap_streaming_reassembly_sdu(sk, skb, control);
4387 goto done;
4389 default:
4390 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4391 break;
4394 drop:
4395 kfree_skb(skb);
4397 done:
4398 if (sk)
4399 bh_unlock_sock(sk);
4401 return 0;
4404 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4406 struct sock *sk;
4408 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4409 if (!sk)
4410 goto drop;
4412 BT_DBG("sk %p, len %d", sk, skb->len);
4414 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4415 goto drop;
4417 if (l2cap_pi(sk)->imtu < skb->len)
4418 goto drop;
4420 if (!sock_queue_rcv_skb(sk, skb))
4421 goto done;
4423 drop:
4424 kfree_skb(skb);
4426 done:
4427 if (sk)
4428 bh_unlock_sock(sk);
4429 return 0;
4432 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4434 struct l2cap_hdr *lh = (void *) skb->data;
4435 u16 cid, len;
4436 __le16 psm;
4438 skb_pull(skb, L2CAP_HDR_SIZE);
4439 cid = __le16_to_cpu(lh->cid);
4440 len = __le16_to_cpu(lh->len);
4442 if (len != skb->len) {
4443 kfree_skb(skb);
4444 return;
4447 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4449 switch (cid) {
4450 case L2CAP_CID_SIGNALING:
4451 l2cap_sig_channel(conn, skb);
4452 break;
4454 case L2CAP_CID_CONN_LESS:
4455 psm = get_unaligned_le16(skb->data);
4456 skb_pull(skb, 2);
4457 l2cap_conless_channel(conn, psm, skb);
4458 break;
4460 default:
4461 l2cap_data_channel(conn, cid, skb);
4462 break;
4466 /* ---- L2CAP interface with lower layer (HCI) ---- */
4468 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4470 int exact = 0, lm1 = 0, lm2 = 0;
4471 register struct sock *sk;
4472 struct hlist_node *node;
4474 if (type != ACL_LINK)
4475 return -EINVAL;
4477 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4479 /* Find listening sockets and check their link_mode */
4480 read_lock(&l2cap_sk_list.lock);
4481 sk_for_each(sk, node, &l2cap_sk_list.head) {
4482 if (sk->sk_state != BT_LISTEN)
4483 continue;
4485 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4486 lm1 |= HCI_LM_ACCEPT;
4487 if (l2cap_pi(sk)->role_switch)
4488 lm1 |= HCI_LM_MASTER;
4489 exact++;
4490 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4491 lm2 |= HCI_LM_ACCEPT;
4492 if (l2cap_pi(sk)->role_switch)
4493 lm2 |= HCI_LM_MASTER;
4496 read_unlock(&l2cap_sk_list.lock);
4498 return exact ? lm1 : lm2;
4501 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4503 struct l2cap_conn *conn;
4505 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4507 if (hcon->type != ACL_LINK)
4508 return -EINVAL;
4510 if (!status) {
4511 conn = l2cap_conn_add(hcon, status);
4512 if (conn)
4513 l2cap_conn_ready(conn);
4514 } else
4515 l2cap_conn_del(hcon, bt_err(status));
4517 return 0;
4520 static int l2cap_disconn_ind(struct hci_conn *hcon)
4522 struct l2cap_conn *conn = hcon->l2cap_data;
4524 BT_DBG("hcon %p", hcon);
4526 if (hcon->type != ACL_LINK || !conn)
4527 return 0x13;
4529 return conn->disc_reason;
4532 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4534 BT_DBG("hcon %p reason %d", hcon, reason);
4536 if (hcon->type != ACL_LINK)
4537 return -EINVAL;
4539 l2cap_conn_del(hcon, bt_err(reason));
4541 return 0;
4544 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4546 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4547 return;
4549 if (encrypt == 0x00) {
4550 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4551 l2cap_sock_clear_timer(sk);
4552 l2cap_sock_set_timer(sk, HZ * 5);
4553 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4554 __l2cap_sock_close(sk, ECONNREFUSED);
4555 } else {
4556 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4557 l2cap_sock_clear_timer(sk);
4561 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4563 struct l2cap_chan_list *l;
4564 struct l2cap_conn *conn = hcon->l2cap_data;
4565 struct sock *sk;
4567 if (!conn)
4568 return 0;
4570 l = &conn->chan_list;
4572 BT_DBG("conn %p", conn);
4574 read_lock(&l->lock);
4576 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4577 bh_lock_sock(sk);
4579 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4580 bh_unlock_sock(sk);
4581 continue;
4584 if (!status && (sk->sk_state == BT_CONNECTED ||
4585 sk->sk_state == BT_CONFIG)) {
4586 l2cap_check_encryption(sk, encrypt);
4587 bh_unlock_sock(sk);
4588 continue;
4591 if (sk->sk_state == BT_CONNECT) {
4592 if (!status) {
4593 struct l2cap_conn_req req;
4594 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4595 req.psm = l2cap_pi(sk)->psm;
4597 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4598 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4600 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4601 L2CAP_CONN_REQ, sizeof(req), &req);
4602 } else {
4603 l2cap_sock_clear_timer(sk);
4604 l2cap_sock_set_timer(sk, HZ / 10);
4606 } else if (sk->sk_state == BT_CONNECT2) {
4607 struct l2cap_conn_rsp rsp;
4608 __u16 result;
4610 if (!status) {
4611 sk->sk_state = BT_CONFIG;
4612 result = L2CAP_CR_SUCCESS;
4613 } else {
4614 sk->sk_state = BT_DISCONN;
4615 l2cap_sock_set_timer(sk, HZ / 10);
4616 result = L2CAP_CR_SEC_BLOCK;
4619 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4620 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4621 rsp.result = cpu_to_le16(result);
4622 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4623 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4624 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4627 bh_unlock_sock(sk);
4630 read_unlock(&l->lock);
4632 return 0;
4635 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4637 struct l2cap_conn *conn = hcon->l2cap_data;
4639 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4640 goto drop;
4642 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4644 if (flags & ACL_START) {
4645 struct l2cap_hdr *hdr;
4646 int len;
4648 if (conn->rx_len) {
4649 BT_ERR("Unexpected start frame (len %d)", skb->len);
4650 kfree_skb(conn->rx_skb);
4651 conn->rx_skb = NULL;
4652 conn->rx_len = 0;
4653 l2cap_conn_unreliable(conn, ECOMM);
4656 if (skb->len < 2) {
4657 BT_ERR("Frame is too short (len %d)", skb->len);
4658 l2cap_conn_unreliable(conn, ECOMM);
4659 goto drop;
4662 hdr = (struct l2cap_hdr *) skb->data;
4663 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4665 if (len == skb->len) {
4666 /* Complete frame received */
4667 l2cap_recv_frame(conn, skb);
4668 return 0;
4671 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4673 if (skb->len > len) {
4674 BT_ERR("Frame is too long (len %d, expected len %d)",
4675 skb->len, len);
4676 l2cap_conn_unreliable(conn, ECOMM);
4677 goto drop;
4680 /* Allocate skb for the complete frame (with header) */
4681 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4682 if (!conn->rx_skb)
4683 goto drop;
4685 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4686 skb->len);
4687 conn->rx_len = len - skb->len;
4688 } else {
4689 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4691 if (!conn->rx_len) {
4692 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4693 l2cap_conn_unreliable(conn, ECOMM);
4694 goto drop;
4697 if (skb->len > conn->rx_len) {
4698 BT_ERR("Fragment is too long (len %d, expected %d)",
4699 skb->len, conn->rx_len);
4700 kfree_skb(conn->rx_skb);
4701 conn->rx_skb = NULL;
4702 conn->rx_len = 0;
4703 l2cap_conn_unreliable(conn, ECOMM);
4704 goto drop;
4707 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4708 skb->len);
4709 conn->rx_len -= skb->len;
4711 if (!conn->rx_len) {
4712 /* Complete frame received */
4713 l2cap_recv_frame(conn, conn->rx_skb);
4714 conn->rx_skb = NULL;
4718 drop:
4719 kfree_skb(skb);
4720 return 0;
4723 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4725 struct sock *sk;
4726 struct hlist_node *node;
4728 read_lock_bh(&l2cap_sk_list.lock);
4730 sk_for_each(sk, node, &l2cap_sk_list.head) {
4731 struct l2cap_pinfo *pi = l2cap_pi(sk);
4733 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4734 batostr(&bt_sk(sk)->src),
4735 batostr(&bt_sk(sk)->dst),
4736 sk->sk_state, __le16_to_cpu(pi->psm),
4737 pi->scid, pi->dcid,
4738 pi->imtu, pi->omtu, pi->sec_level);
4741 read_unlock_bh(&l2cap_sk_list.lock);
4743 return 0;
4746 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4748 return single_open(file, l2cap_debugfs_show, inode->i_private);
4751 static const struct file_operations l2cap_debugfs_fops = {
4752 .open = l2cap_debugfs_open,
4753 .read = seq_read,
4754 .llseek = seq_lseek,
4755 .release = single_release,
4758 static struct dentry *l2cap_debugfs;
4760 static const struct proto_ops l2cap_sock_ops = {
4761 .family = PF_BLUETOOTH,
4762 .owner = THIS_MODULE,
4763 .release = l2cap_sock_release,
4764 .bind = l2cap_sock_bind,
4765 .connect = l2cap_sock_connect,
4766 .listen = l2cap_sock_listen,
4767 .accept = l2cap_sock_accept,
4768 .getname = l2cap_sock_getname,
4769 .sendmsg = l2cap_sock_sendmsg,
4770 .recvmsg = l2cap_sock_recvmsg,
4771 .poll = bt_sock_poll,
4772 .ioctl = bt_sock_ioctl,
4773 .mmap = sock_no_mmap,
4774 .socketpair = sock_no_socketpair,
4775 .shutdown = l2cap_sock_shutdown,
4776 .setsockopt = l2cap_sock_setsockopt,
4777 .getsockopt = l2cap_sock_getsockopt
4780 static const struct net_proto_family l2cap_sock_family_ops = {
4781 .family = PF_BLUETOOTH,
4782 .owner = THIS_MODULE,
4783 .create = l2cap_sock_create,
4786 static struct hci_proto l2cap_hci_proto = {
4787 .name = "L2CAP",
4788 .id = HCI_PROTO_L2CAP,
4789 .connect_ind = l2cap_connect_ind,
4790 .connect_cfm = l2cap_connect_cfm,
4791 .disconn_ind = l2cap_disconn_ind,
4792 .disconn_cfm = l2cap_disconn_cfm,
4793 .security_cfm = l2cap_security_cfm,
4794 .recv_acldata = l2cap_recv_acldata
4797 static int __init l2cap_init(void)
4799 int err;
4801 err = proto_register(&l2cap_proto, 0);
4802 if (err < 0)
4803 return err;
4805 _busy_wq = create_singlethread_workqueue("l2cap");
4806 if (!_busy_wq)
4807 goto error;
4809 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4810 if (err < 0) {
4811 BT_ERR("L2CAP socket registration failed");
4812 goto error;
4815 err = hci_register_proto(&l2cap_hci_proto);
4816 if (err < 0) {
4817 BT_ERR("L2CAP protocol registration failed");
4818 bt_sock_unregister(BTPROTO_L2CAP);
4819 goto error;
4822 if (bt_debugfs) {
4823 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4824 bt_debugfs, NULL, &l2cap_debugfs_fops);
4825 if (!l2cap_debugfs)
4826 BT_ERR("Failed to create L2CAP debug file");
4829 BT_INFO("L2CAP ver %s", VERSION);
4830 BT_INFO("L2CAP socket layer initialized");
4832 return 0;
4834 error:
4835 proto_unregister(&l2cap_proto);
4836 return err;
4839 static void __exit l2cap_exit(void)
4841 debugfs_remove(l2cap_debugfs);
4843 flush_workqueue(_busy_wq);
4844 destroy_workqueue(_busy_wq);
4846 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4847 BT_ERR("L2CAP socket unregistration failed");
4849 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4850 BT_ERR("L2CAP protocol unregistration failed");
4852 proto_unregister(&l2cap_proto);
4855 void l2cap_load(void)
4857 /* Dummy function to trigger automatic L2CAP module loading by
4858 * other modules that use L2CAP sockets but don't use any other
4859 * symbols from it. */
4861 EXPORT_SYMBOL(l2cap_load);
4863 module_init(l2cap_init);
4864 module_exit(l2cap_exit);
4866 module_param(disable_ertm, bool, 0644);
4867 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4869 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4870 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4871 MODULE_VERSION(VERSION);
4872 MODULE_LICENSE("GPL");
4873 MODULE_ALIAS("bt-proto-0");