[DNS RESOLVER] Minor typo correction
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / bluetooth / l2cap.c
blob9ba1e8eee37c7b1ae49c3a242bdefa176462659f
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core and sockets. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
58 #define VERSION "2.15"
60 static int disable_ertm = 0;
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { 0x02, };
65 static const struct proto_ops l2cap_sock_ops;
67 static struct workqueue_struct *_busy_wq;
69 static struct bt_sock_list l2cap_sk_list = {
70 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
73 static void l2cap_busy_work(struct work_struct *work);
75 static void __l2cap_sock_close(struct sock *sk, int reason);
76 static void l2cap_sock_close(struct sock *sk);
77 static void l2cap_sock_kill(struct sock *sk);
79 static int l2cap_build_conf_req(struct sock *sk, void *data);
80 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
81 u8 code, u8 ident, u16 dlen, void *data);
83 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
85 /* ---- L2CAP timers ---- */
86 static void l2cap_sock_timeout(unsigned long arg)
88 struct sock *sk = (struct sock *) arg;
89 int reason;
91 BT_DBG("sock %p state %d", sk, sk->sk_state);
93 bh_lock_sock(sk);
95 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
96 reason = ECONNREFUSED;
97 else if (sk->sk_state == BT_CONNECT &&
98 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
99 reason = ECONNREFUSED;
100 else
101 reason = ETIMEDOUT;
103 __l2cap_sock_close(sk, reason);
105 bh_unlock_sock(sk);
107 l2cap_sock_kill(sk);
108 sock_put(sk);
111 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
113 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
114 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
117 static void l2cap_sock_clear_timer(struct sock *sk)
119 BT_DBG("sock %p state %d", sk, sk->sk_state);
120 sk_stop_timer(sk, &sk->sk_timer);
123 /* ---- L2CAP channels ---- */
124 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
126 struct sock *s;
127 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
128 if (l2cap_pi(s)->dcid == cid)
129 break;
131 return s;
134 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
136 struct sock *s;
137 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
138 if (l2cap_pi(s)->scid == cid)
139 break;
141 return s;
144 /* Find channel with given SCID.
145 * Returns locked socket */
146 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
148 struct sock *s;
149 read_lock(&l->lock);
150 s = __l2cap_get_chan_by_scid(l, cid);
151 if (s)
152 bh_lock_sock(s);
153 read_unlock(&l->lock);
154 return s;
157 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
159 struct sock *s;
160 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
161 if (l2cap_pi(s)->ident == ident)
162 break;
164 return s;
167 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
169 struct sock *s;
170 read_lock(&l->lock);
171 s = __l2cap_get_chan_by_ident(l, ident);
172 if (s)
173 bh_lock_sock(s);
174 read_unlock(&l->lock);
175 return s;
178 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
180 u16 cid = L2CAP_CID_DYN_START;
182 for (; cid < L2CAP_CID_DYN_END; cid++) {
183 if (!__l2cap_get_chan_by_scid(l, cid))
184 return cid;
187 return 0;
190 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
192 sock_hold(sk);
194 if (l->head)
195 l2cap_pi(l->head)->prev_c = sk;
197 l2cap_pi(sk)->next_c = l->head;
198 l2cap_pi(sk)->prev_c = NULL;
199 l->head = sk;
202 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
204 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
206 write_lock_bh(&l->lock);
207 if (sk == l->head)
208 l->head = next;
210 if (next)
211 l2cap_pi(next)->prev_c = prev;
212 if (prev)
213 l2cap_pi(prev)->next_c = next;
214 write_unlock_bh(&l->lock);
216 __sock_put(sk);
219 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
221 struct l2cap_chan_list *l = &conn->chan_list;
223 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
224 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
226 conn->disc_reason = 0x13;
228 l2cap_pi(sk)->conn = conn;
230 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
231 /* Alloc CID for connection-oriented socket */
232 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
233 } else if (sk->sk_type == SOCK_DGRAM) {
234 /* Connectionless socket */
235 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
236 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
237 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
238 } else {
239 /* Raw socket can send/recv signalling messages only */
240 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
241 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
242 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
245 __l2cap_chan_link(l, sk);
247 if (parent)
248 bt_accept_enqueue(parent, sk);
251 /* Delete channel.
252 * Must be called on the locked socket. */
253 static void l2cap_chan_del(struct sock *sk, int err)
255 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
256 struct sock *parent = bt_sk(sk)->parent;
258 l2cap_sock_clear_timer(sk);
260 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
262 if (conn) {
263 /* Unlink from channel list */
264 l2cap_chan_unlink(&conn->chan_list, sk);
265 l2cap_pi(sk)->conn = NULL;
266 hci_conn_put(conn->hcon);
269 sk->sk_state = BT_CLOSED;
270 sock_set_flag(sk, SOCK_ZAPPED);
272 if (err)
273 sk->sk_err = err;
275 if (parent) {
276 bt_accept_unlink(sk);
277 parent->sk_data_ready(parent, 0);
278 } else
279 sk->sk_state_change(sk);
281 skb_queue_purge(TX_QUEUE(sk));
283 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
284 struct srej_list *l, *tmp;
286 del_timer(&l2cap_pi(sk)->retrans_timer);
287 del_timer(&l2cap_pi(sk)->monitor_timer);
288 del_timer(&l2cap_pi(sk)->ack_timer);
290 skb_queue_purge(SREJ_QUEUE(sk));
291 skb_queue_purge(BUSY_QUEUE(sk));
293 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
294 list_del(&l->list);
295 kfree(l);
300 /* Service level security */
301 static inline int l2cap_check_security(struct sock *sk)
303 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
304 __u8 auth_type;
306 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
307 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
308 auth_type = HCI_AT_NO_BONDING_MITM;
309 else
310 auth_type = HCI_AT_NO_BONDING;
312 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
313 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
314 } else {
315 switch (l2cap_pi(sk)->sec_level) {
316 case BT_SECURITY_HIGH:
317 auth_type = HCI_AT_GENERAL_BONDING_MITM;
318 break;
319 case BT_SECURITY_MEDIUM:
320 auth_type = HCI_AT_GENERAL_BONDING;
321 break;
322 default:
323 auth_type = HCI_AT_NO_BONDING;
324 break;
328 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
329 auth_type);
332 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
334 u8 id;
336 /* Get next available identificator.
337 * 1 - 128 are used by kernel.
338 * 129 - 199 are reserved.
339 * 200 - 254 are used by utilities like l2ping, etc.
342 spin_lock_bh(&conn->lock);
344 if (++conn->tx_ident > 128)
345 conn->tx_ident = 1;
347 id = conn->tx_ident;
349 spin_unlock_bh(&conn->lock);
351 return id;
354 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
356 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
358 BT_DBG("code 0x%2.2x", code);
360 if (!skb)
361 return;
363 hci_send_acl(conn->hcon, skb, 0);
366 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
368 struct sk_buff *skb;
369 struct l2cap_hdr *lh;
370 struct l2cap_conn *conn = pi->conn;
371 struct sock *sk = (struct sock *)pi;
372 int count, hlen = L2CAP_HDR_SIZE + 2;
374 if (sk->sk_state != BT_CONNECTED)
375 return;
377 if (pi->fcs == L2CAP_FCS_CRC16)
378 hlen += 2;
380 BT_DBG("pi %p, control 0x%2.2x", pi, control);
382 count = min_t(unsigned int, conn->mtu, hlen);
383 control |= L2CAP_CTRL_FRAME_TYPE;
385 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
386 control |= L2CAP_CTRL_FINAL;
387 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
390 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
391 control |= L2CAP_CTRL_POLL;
392 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
395 skb = bt_skb_alloc(count, GFP_ATOMIC);
396 if (!skb)
397 return;
399 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
400 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
401 lh->cid = cpu_to_le16(pi->dcid);
402 put_unaligned_le16(control, skb_put(skb, 2));
404 if (pi->fcs == L2CAP_FCS_CRC16) {
405 u16 fcs = crc16(0, (u8 *)lh, count - 2);
406 put_unaligned_le16(fcs, skb_put(skb, 2));
409 hci_send_acl(pi->conn->hcon, skb, 0);
412 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
414 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
415 control |= L2CAP_SUPER_RCV_NOT_READY;
416 pi->conn_state |= L2CAP_CONN_RNR_SENT;
417 } else
418 control |= L2CAP_SUPER_RCV_READY;
420 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
422 l2cap_send_sframe(pi, control);
425 static inline int __l2cap_no_conn_pending(struct sock *sk)
427 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
430 static void l2cap_do_start(struct sock *sk)
432 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
434 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
435 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
436 return;
438 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
439 struct l2cap_conn_req req;
440 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
441 req.psm = l2cap_pi(sk)->psm;
443 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
444 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
446 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
447 L2CAP_CONN_REQ, sizeof(req), &req);
449 } else {
450 struct l2cap_info_req req;
451 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
453 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
454 conn->info_ident = l2cap_get_ident(conn);
456 mod_timer(&conn->info_timer, jiffies +
457 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
459 l2cap_send_cmd(conn, conn->info_ident,
460 L2CAP_INFO_REQ, sizeof(req), &req);
464 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
466 u32 local_feat_mask = l2cap_feat_mask;
467 if (!disable_ertm)
468 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
470 switch (mode) {
471 case L2CAP_MODE_ERTM:
472 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
473 case L2CAP_MODE_STREAMING:
474 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
475 default:
476 return 0x00;
480 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
482 struct l2cap_disconn_req req;
484 if (!conn)
485 return;
487 skb_queue_purge(TX_QUEUE(sk));
489 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
490 del_timer(&l2cap_pi(sk)->retrans_timer);
491 del_timer(&l2cap_pi(sk)->monitor_timer);
492 del_timer(&l2cap_pi(sk)->ack_timer);
495 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
496 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
497 l2cap_send_cmd(conn, l2cap_get_ident(conn),
498 L2CAP_DISCONN_REQ, sizeof(req), &req);
500 sk->sk_state = BT_DISCONN;
501 sk->sk_err = err;
504 /* ---- L2CAP connections ---- */
505 static void l2cap_conn_start(struct l2cap_conn *conn)
507 struct l2cap_chan_list *l = &conn->chan_list;
508 struct sock_del_list del, *tmp1, *tmp2;
509 struct sock *sk;
511 BT_DBG("conn %p", conn);
513 INIT_LIST_HEAD(&del.list);
515 read_lock(&l->lock);
517 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
518 bh_lock_sock(sk);
520 if (sk->sk_type != SOCK_SEQPACKET &&
521 sk->sk_type != SOCK_STREAM) {
522 bh_unlock_sock(sk);
523 continue;
526 if (sk->sk_state == BT_CONNECT) {
527 struct l2cap_conn_req req;
529 if (!l2cap_check_security(sk) ||
530 !__l2cap_no_conn_pending(sk)) {
531 bh_unlock_sock(sk);
532 continue;
535 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
536 conn->feat_mask)
537 && l2cap_pi(sk)->conf_state &
538 L2CAP_CONF_STATE2_DEVICE) {
539 tmp1 = kzalloc(sizeof(struct sock_del_list),
540 GFP_ATOMIC);
541 tmp1->sk = sk;
542 list_add_tail(&tmp1->list, &del.list);
543 bh_unlock_sock(sk);
544 continue;
547 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
548 req.psm = l2cap_pi(sk)->psm;
550 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
551 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
553 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
554 L2CAP_CONN_REQ, sizeof(req), &req);
556 } else if (sk->sk_state == BT_CONNECT2) {
557 struct l2cap_conn_rsp rsp;
558 char buf[128];
559 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
560 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
562 if (l2cap_check_security(sk)) {
563 if (bt_sk(sk)->defer_setup) {
564 struct sock *parent = bt_sk(sk)->parent;
565 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
566 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
567 parent->sk_data_ready(parent, 0);
569 } else {
570 sk->sk_state = BT_CONFIG;
571 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
572 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
574 } else {
575 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
576 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
579 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
580 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
582 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
583 rsp.result != L2CAP_CR_SUCCESS) {
584 bh_unlock_sock(sk);
585 continue;
588 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
589 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
590 l2cap_build_conf_req(sk, buf), buf);
591 l2cap_pi(sk)->num_conf_req++;
594 bh_unlock_sock(sk);
597 read_unlock(&l->lock);
599 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
600 bh_lock_sock(tmp1->sk);
601 __l2cap_sock_close(tmp1->sk, ECONNRESET);
602 bh_unlock_sock(tmp1->sk);
603 list_del(&tmp1->list);
604 kfree(tmp1);
608 static void l2cap_conn_ready(struct l2cap_conn *conn)
610 struct l2cap_chan_list *l = &conn->chan_list;
611 struct sock *sk;
613 BT_DBG("conn %p", conn);
615 read_lock(&l->lock);
617 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
618 bh_lock_sock(sk);
620 if (sk->sk_type != SOCK_SEQPACKET &&
621 sk->sk_type != SOCK_STREAM) {
622 l2cap_sock_clear_timer(sk);
623 sk->sk_state = BT_CONNECTED;
624 sk->sk_state_change(sk);
625 } else if (sk->sk_state == BT_CONNECT)
626 l2cap_do_start(sk);
628 bh_unlock_sock(sk);
631 read_unlock(&l->lock);
634 /* Notify sockets that we cannot guaranty reliability anymore */
635 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
637 struct l2cap_chan_list *l = &conn->chan_list;
638 struct sock *sk;
640 BT_DBG("conn %p", conn);
642 read_lock(&l->lock);
644 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
645 if (l2cap_pi(sk)->force_reliable)
646 sk->sk_err = err;
649 read_unlock(&l->lock);
652 static void l2cap_info_timeout(unsigned long arg)
654 struct l2cap_conn *conn = (void *) arg;
656 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
657 conn->info_ident = 0;
659 l2cap_conn_start(conn);
662 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
664 struct l2cap_conn *conn = hcon->l2cap_data;
666 if (conn || status)
667 return conn;
669 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
670 if (!conn)
671 return NULL;
673 hcon->l2cap_data = conn;
674 conn->hcon = hcon;
676 BT_DBG("hcon %p conn %p", hcon, conn);
678 conn->mtu = hcon->hdev->acl_mtu;
679 conn->src = &hcon->hdev->bdaddr;
680 conn->dst = &hcon->dst;
682 conn->feat_mask = 0;
684 spin_lock_init(&conn->lock);
685 rwlock_init(&conn->chan_list.lock);
687 setup_timer(&conn->info_timer, l2cap_info_timeout,
688 (unsigned long) conn);
690 conn->disc_reason = 0x13;
692 return conn;
695 static void l2cap_conn_del(struct hci_conn *hcon, int err)
697 struct l2cap_conn *conn = hcon->l2cap_data;
698 struct sock *sk;
700 if (!conn)
701 return;
703 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
705 kfree_skb(conn->rx_skb);
707 /* Kill channels */
708 while ((sk = conn->chan_list.head)) {
709 bh_lock_sock(sk);
710 l2cap_chan_del(sk, err);
711 bh_unlock_sock(sk);
712 l2cap_sock_kill(sk);
715 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
716 del_timer_sync(&conn->info_timer);
718 hcon->l2cap_data = NULL;
719 kfree(conn);
722 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
724 struct l2cap_chan_list *l = &conn->chan_list;
725 write_lock_bh(&l->lock);
726 __l2cap_chan_add(conn, sk, parent);
727 write_unlock_bh(&l->lock);
730 /* ---- Socket interface ---- */
731 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
733 struct sock *sk;
734 struct hlist_node *node;
735 sk_for_each(sk, node, &l2cap_sk_list.head)
736 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
737 goto found;
738 sk = NULL;
739 found:
740 return sk;
743 /* Find socket with psm and source bdaddr.
744 * Returns closest match.
746 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
748 struct sock *sk = NULL, *sk1 = NULL;
749 struct hlist_node *node;
751 sk_for_each(sk, node, &l2cap_sk_list.head) {
752 if (state && sk->sk_state != state)
753 continue;
755 if (l2cap_pi(sk)->psm == psm) {
756 /* Exact match. */
757 if (!bacmp(&bt_sk(sk)->src, src))
758 break;
760 /* Closest match */
761 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
762 sk1 = sk;
765 return node ? sk : sk1;
768 /* Find socket with given address (psm, src).
769 * Returns locked socket */
770 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
772 struct sock *s;
773 read_lock(&l2cap_sk_list.lock);
774 s = __l2cap_get_sock_by_psm(state, psm, src);
775 if (s)
776 bh_lock_sock(s);
777 read_unlock(&l2cap_sk_list.lock);
778 return s;
781 static void l2cap_sock_destruct(struct sock *sk)
783 BT_DBG("sk %p", sk);
785 skb_queue_purge(&sk->sk_receive_queue);
786 skb_queue_purge(&sk->sk_write_queue);
789 static void l2cap_sock_cleanup_listen(struct sock *parent)
791 struct sock *sk;
793 BT_DBG("parent %p", parent);
795 /* Close not yet accepted channels */
796 while ((sk = bt_accept_dequeue(parent, NULL)))
797 l2cap_sock_close(sk);
799 parent->sk_state = BT_CLOSED;
800 sock_set_flag(parent, SOCK_ZAPPED);
803 /* Kill socket (only if zapped and orphan)
804 * Must be called on unlocked socket.
806 static void l2cap_sock_kill(struct sock *sk)
808 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
809 return;
811 BT_DBG("sk %p state %d", sk, sk->sk_state);
813 /* Kill poor orphan */
814 bt_sock_unlink(&l2cap_sk_list, sk);
815 sock_set_flag(sk, SOCK_DEAD);
816 sock_put(sk);
819 static void __l2cap_sock_close(struct sock *sk, int reason)
821 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
823 switch (sk->sk_state) {
824 case BT_LISTEN:
825 l2cap_sock_cleanup_listen(sk);
826 break;
828 case BT_CONNECTED:
829 case BT_CONFIG:
830 if (sk->sk_type == SOCK_SEQPACKET ||
831 sk->sk_type == SOCK_STREAM) {
832 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
834 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
835 l2cap_send_disconn_req(conn, sk, reason);
836 } else
837 l2cap_chan_del(sk, reason);
838 break;
840 case BT_CONNECT2:
841 if (sk->sk_type == SOCK_SEQPACKET ||
842 sk->sk_type == SOCK_STREAM) {
843 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
844 struct l2cap_conn_rsp rsp;
845 __u16 result;
847 if (bt_sk(sk)->defer_setup)
848 result = L2CAP_CR_SEC_BLOCK;
849 else
850 result = L2CAP_CR_BAD_PSM;
852 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
853 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
854 rsp.result = cpu_to_le16(result);
855 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
856 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
857 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
858 } else
859 l2cap_chan_del(sk, reason);
860 break;
862 case BT_CONNECT:
863 case BT_DISCONN:
864 l2cap_chan_del(sk, reason);
865 break;
867 default:
868 sock_set_flag(sk, SOCK_ZAPPED);
869 break;
873 /* Must be called on unlocked socket. */
874 static void l2cap_sock_close(struct sock *sk)
876 l2cap_sock_clear_timer(sk);
877 lock_sock(sk);
878 __l2cap_sock_close(sk, ECONNRESET);
879 release_sock(sk);
880 l2cap_sock_kill(sk);
883 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
885 struct l2cap_pinfo *pi = l2cap_pi(sk);
887 BT_DBG("sk %p", sk);
889 if (parent) {
890 sk->sk_type = parent->sk_type;
891 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
893 pi->imtu = l2cap_pi(parent)->imtu;
894 pi->omtu = l2cap_pi(parent)->omtu;
895 pi->conf_state = l2cap_pi(parent)->conf_state;
896 pi->mode = l2cap_pi(parent)->mode;
897 pi->fcs = l2cap_pi(parent)->fcs;
898 pi->max_tx = l2cap_pi(parent)->max_tx;
899 pi->tx_win = l2cap_pi(parent)->tx_win;
900 pi->sec_level = l2cap_pi(parent)->sec_level;
901 pi->role_switch = l2cap_pi(parent)->role_switch;
902 pi->force_reliable = l2cap_pi(parent)->force_reliable;
903 } else {
904 pi->imtu = L2CAP_DEFAULT_MTU;
905 pi->omtu = 0;
906 if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
907 pi->mode = L2CAP_MODE_ERTM;
908 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
909 } else {
910 pi->mode = L2CAP_MODE_BASIC;
912 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
913 pi->fcs = L2CAP_FCS_CRC16;
914 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
915 pi->sec_level = BT_SECURITY_LOW;
916 pi->role_switch = 0;
917 pi->force_reliable = 0;
920 /* Default config options */
921 pi->conf_len = 0;
922 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
923 skb_queue_head_init(TX_QUEUE(sk));
924 skb_queue_head_init(SREJ_QUEUE(sk));
925 skb_queue_head_init(BUSY_QUEUE(sk));
926 INIT_LIST_HEAD(SREJ_LIST(sk));
929 static struct proto l2cap_proto = {
930 .name = "L2CAP",
931 .owner = THIS_MODULE,
932 .obj_size = sizeof(struct l2cap_pinfo)
935 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
937 struct sock *sk;
939 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
940 if (!sk)
941 return NULL;
943 sock_init_data(sock, sk);
944 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
946 sk->sk_destruct = l2cap_sock_destruct;
947 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
949 sock_reset_flag(sk, SOCK_ZAPPED);
951 sk->sk_protocol = proto;
952 sk->sk_state = BT_OPEN;
954 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
956 bt_sock_link(&l2cap_sk_list, sk);
957 return sk;
960 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
961 int kern)
963 struct sock *sk;
965 BT_DBG("sock %p", sock);
967 sock->state = SS_UNCONNECTED;
969 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
970 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
971 return -ESOCKTNOSUPPORT;
973 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
974 return -EPERM;
976 sock->ops = &l2cap_sock_ops;
978 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
979 if (!sk)
980 return -ENOMEM;
982 l2cap_sock_init(sk, NULL);
983 return 0;
986 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
988 struct sock *sk = sock->sk;
989 struct sockaddr_l2 la;
990 int len, err = 0;
992 BT_DBG("sk %p", sk);
994 if (!addr || addr->sa_family != AF_BLUETOOTH)
995 return -EINVAL;
997 memset(&la, 0, sizeof(la));
998 len = min_t(unsigned int, sizeof(la), alen);
999 memcpy(&la, addr, len);
1001 if (la.l2_cid)
1002 return -EINVAL;
1004 lock_sock(sk);
1006 if (sk->sk_state != BT_OPEN) {
1007 err = -EBADFD;
1008 goto done;
1011 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
1012 !capable(CAP_NET_BIND_SERVICE)) {
1013 err = -EACCES;
1014 goto done;
1017 write_lock_bh(&l2cap_sk_list.lock);
1019 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
1020 err = -EADDRINUSE;
1021 } else {
1022 /* Save source address */
1023 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
1024 l2cap_pi(sk)->psm = la.l2_psm;
1025 l2cap_pi(sk)->sport = la.l2_psm;
1026 sk->sk_state = BT_BOUND;
1028 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
1029 __le16_to_cpu(la.l2_psm) == 0x0003)
1030 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1033 write_unlock_bh(&l2cap_sk_list.lock);
1035 done:
1036 release_sock(sk);
1037 return err;
1040 static int l2cap_do_connect(struct sock *sk)
1042 bdaddr_t *src = &bt_sk(sk)->src;
1043 bdaddr_t *dst = &bt_sk(sk)->dst;
1044 struct l2cap_conn *conn;
1045 struct hci_conn *hcon;
1046 struct hci_dev *hdev;
1047 __u8 auth_type;
1048 int err;
1050 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1051 l2cap_pi(sk)->psm);
1053 hdev = hci_get_route(dst, src);
1054 if (!hdev)
1055 return -EHOSTUNREACH;
1057 hci_dev_lock_bh(hdev);
1059 err = -ENOMEM;
1061 if (sk->sk_type == SOCK_RAW) {
1062 switch (l2cap_pi(sk)->sec_level) {
1063 case BT_SECURITY_HIGH:
1064 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1065 break;
1066 case BT_SECURITY_MEDIUM:
1067 auth_type = HCI_AT_DEDICATED_BONDING;
1068 break;
1069 default:
1070 auth_type = HCI_AT_NO_BONDING;
1071 break;
1073 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
1074 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
1075 auth_type = HCI_AT_NO_BONDING_MITM;
1076 else
1077 auth_type = HCI_AT_NO_BONDING;
1079 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
1080 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1081 } else {
1082 switch (l2cap_pi(sk)->sec_level) {
1083 case BT_SECURITY_HIGH:
1084 auth_type = HCI_AT_GENERAL_BONDING_MITM;
1085 break;
1086 case BT_SECURITY_MEDIUM:
1087 auth_type = HCI_AT_GENERAL_BONDING;
1088 break;
1089 default:
1090 auth_type = HCI_AT_NO_BONDING;
1091 break;
1095 hcon = hci_connect(hdev, ACL_LINK, dst,
1096 l2cap_pi(sk)->sec_level, auth_type);
1097 if (!hcon)
1098 goto done;
1100 conn = l2cap_conn_add(hcon, 0);
1101 if (!conn) {
1102 hci_conn_put(hcon);
1103 goto done;
1106 err = 0;
1108 /* Update source addr of the socket */
1109 bacpy(src, conn->src);
1111 l2cap_chan_add(conn, sk, NULL);
1113 sk->sk_state = BT_CONNECT;
1114 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1116 if (hcon->state == BT_CONNECTED) {
1117 if (sk->sk_type != SOCK_SEQPACKET &&
1118 sk->sk_type != SOCK_STREAM) {
1119 l2cap_sock_clear_timer(sk);
1120 sk->sk_state = BT_CONNECTED;
1121 } else
1122 l2cap_do_start(sk);
1125 done:
1126 hci_dev_unlock_bh(hdev);
1127 hci_dev_put(hdev);
1128 return err;
1131 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1133 struct sock *sk = sock->sk;
1134 struct sockaddr_l2 la;
1135 int len, err = 0;
1137 BT_DBG("sk %p", sk);
1139 if (!addr || alen < sizeof(addr->sa_family) ||
1140 addr->sa_family != AF_BLUETOOTH)
1141 return -EINVAL;
1143 memset(&la, 0, sizeof(la));
1144 len = min_t(unsigned int, sizeof(la), alen);
1145 memcpy(&la, addr, len);
1147 if (la.l2_cid)
1148 return -EINVAL;
1150 lock_sock(sk);
1152 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1153 && !la.l2_psm) {
1154 err = -EINVAL;
1155 goto done;
1158 switch (l2cap_pi(sk)->mode) {
1159 case L2CAP_MODE_BASIC:
1160 break;
1161 case L2CAP_MODE_ERTM:
1162 case L2CAP_MODE_STREAMING:
1163 if (!disable_ertm)
1164 break;
1165 /* fall through */
1166 default:
1167 err = -ENOTSUPP;
1168 goto done;
1171 switch (sk->sk_state) {
1172 case BT_CONNECT:
1173 case BT_CONNECT2:
1174 case BT_CONFIG:
1175 /* Already connecting */
1176 goto wait;
1178 case BT_CONNECTED:
1179 /* Already connected */
1180 err = -EISCONN;
1181 goto done;
1183 case BT_OPEN:
1184 case BT_BOUND:
1185 /* Can connect */
1186 break;
1188 default:
1189 err = -EBADFD;
1190 goto done;
1193 /* Set destination address and psm */
1194 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1195 l2cap_pi(sk)->psm = la.l2_psm;
1197 err = l2cap_do_connect(sk);
1198 if (err)
1199 goto done;
1201 wait:
1202 err = bt_sock_wait_state(sk, BT_CONNECTED,
1203 sock_sndtimeo(sk, flags & O_NONBLOCK));
1204 done:
1205 release_sock(sk);
1206 return err;
1209 static int l2cap_sock_listen(struct socket *sock, int backlog)
1211 struct sock *sk = sock->sk;
1212 int err = 0;
1214 BT_DBG("sk %p backlog %d", sk, backlog);
1216 lock_sock(sk);
1218 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1219 || sk->sk_state != BT_BOUND) {
1220 err = -EBADFD;
1221 goto done;
1224 switch (l2cap_pi(sk)->mode) {
1225 case L2CAP_MODE_BASIC:
1226 break;
1227 case L2CAP_MODE_ERTM:
1228 case L2CAP_MODE_STREAMING:
1229 if (!disable_ertm)
1230 break;
1231 /* fall through */
1232 default:
1233 err = -ENOTSUPP;
1234 goto done;
1237 if (!l2cap_pi(sk)->psm) {
1238 bdaddr_t *src = &bt_sk(sk)->src;
1239 u16 psm;
1241 err = -EINVAL;
1243 write_lock_bh(&l2cap_sk_list.lock);
1245 for (psm = 0x1001; psm < 0x1100; psm += 2)
1246 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1247 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1248 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1249 err = 0;
1250 break;
1253 write_unlock_bh(&l2cap_sk_list.lock);
1255 if (err < 0)
1256 goto done;
1259 sk->sk_max_ack_backlog = backlog;
1260 sk->sk_ack_backlog = 0;
1261 sk->sk_state = BT_LISTEN;
1263 done:
1264 release_sock(sk);
1265 return err;
1268 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1270 DECLARE_WAITQUEUE(wait, current);
1271 struct sock *sk = sock->sk, *nsk;
1272 long timeo;
1273 int err = 0;
1275 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1277 if (sk->sk_state != BT_LISTEN) {
1278 err = -EBADFD;
1279 goto done;
1282 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1284 BT_DBG("sk %p timeo %ld", sk, timeo);
1286 /* Wait for an incoming connection. (wake-one). */
1287 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1288 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1289 set_current_state(TASK_INTERRUPTIBLE);
1290 if (!timeo) {
1291 err = -EAGAIN;
1292 break;
1295 release_sock(sk);
1296 timeo = schedule_timeout(timeo);
1297 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1299 if (sk->sk_state != BT_LISTEN) {
1300 err = -EBADFD;
1301 break;
1304 if (signal_pending(current)) {
1305 err = sock_intr_errno(timeo);
1306 break;
1309 set_current_state(TASK_RUNNING);
1310 remove_wait_queue(sk_sleep(sk), &wait);
1312 if (err)
1313 goto done;
1315 newsock->state = SS_CONNECTED;
1317 BT_DBG("new socket %p", nsk);
1319 done:
1320 release_sock(sk);
1321 return err;
1324 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1326 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1327 struct sock *sk = sock->sk;
1329 BT_DBG("sock %p, sk %p", sock, sk);
1331 addr->sa_family = AF_BLUETOOTH;
1332 *len = sizeof(struct sockaddr_l2);
1334 if (peer) {
1335 la->l2_psm = l2cap_pi(sk)->psm;
1336 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1337 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1338 } else {
1339 la->l2_psm = l2cap_pi(sk)->sport;
1340 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1341 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1344 return 0;
1347 static int __l2cap_wait_ack(struct sock *sk)
1349 DECLARE_WAITQUEUE(wait, current);
1350 int err = 0;
1351 int timeo = HZ/5;
1353 add_wait_queue(sk_sleep(sk), &wait);
1354 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1355 set_current_state(TASK_INTERRUPTIBLE);
1357 if (!timeo)
1358 timeo = HZ/5;
1360 if (signal_pending(current)) {
1361 err = sock_intr_errno(timeo);
1362 break;
1365 release_sock(sk);
1366 timeo = schedule_timeout(timeo);
1367 lock_sock(sk);
1369 err = sock_error(sk);
1370 if (err)
1371 break;
1373 set_current_state(TASK_RUNNING);
1374 remove_wait_queue(sk_sleep(sk), &wait);
1375 return err;
1378 static void l2cap_monitor_timeout(unsigned long arg)
1380 struct sock *sk = (void *) arg;
1382 BT_DBG("sk %p", sk);
1384 bh_lock_sock(sk);
1385 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1386 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
1387 bh_unlock_sock(sk);
1388 return;
1391 l2cap_pi(sk)->retry_count++;
1392 __mod_monitor_timer();
1394 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1395 bh_unlock_sock(sk);
1398 static void l2cap_retrans_timeout(unsigned long arg)
1400 struct sock *sk = (void *) arg;
1402 BT_DBG("sk %p", sk);
1404 bh_lock_sock(sk);
1405 l2cap_pi(sk)->retry_count = 1;
1406 __mod_monitor_timer();
1408 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1410 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1411 bh_unlock_sock(sk);
1414 static void l2cap_drop_acked_frames(struct sock *sk)
1416 struct sk_buff *skb;
1418 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1419 l2cap_pi(sk)->unacked_frames) {
1420 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1421 break;
1423 skb = skb_dequeue(TX_QUEUE(sk));
1424 kfree_skb(skb);
1426 l2cap_pi(sk)->unacked_frames--;
1429 if (!l2cap_pi(sk)->unacked_frames)
1430 del_timer(&l2cap_pi(sk)->retrans_timer);
1433 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1435 struct l2cap_pinfo *pi = l2cap_pi(sk);
1437 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1439 hci_send_acl(pi->conn->hcon, skb, 0);
1442 static void l2cap_streaming_send(struct sock *sk)
1444 struct sk_buff *skb, *tx_skb;
1445 struct l2cap_pinfo *pi = l2cap_pi(sk);
1446 u16 control, fcs;
1448 while ((skb = sk->sk_send_head)) {
1449 tx_skb = skb_clone(skb, GFP_ATOMIC);
1451 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1452 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1453 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1455 if (pi->fcs == L2CAP_FCS_CRC16) {
1456 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1457 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1460 l2cap_do_send(sk, tx_skb);
1462 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1464 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1465 sk->sk_send_head = NULL;
1466 else
1467 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1469 skb = skb_dequeue(TX_QUEUE(sk));
1470 kfree_skb(skb);
1474 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1476 struct l2cap_pinfo *pi = l2cap_pi(sk);
1477 struct sk_buff *skb, *tx_skb;
1478 u16 control, fcs;
1480 skb = skb_peek(TX_QUEUE(sk));
1481 if (!skb)
1482 return;
1484 do {
1485 if (bt_cb(skb)->tx_seq == tx_seq)
1486 break;
1488 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1489 return;
1491 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1493 if (pi->remote_max_tx &&
1494 bt_cb(skb)->retries == pi->remote_max_tx) {
1495 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1496 return;
1499 tx_skb = skb_clone(skb, GFP_ATOMIC);
1500 bt_cb(skb)->retries++;
1501 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1503 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1504 control |= L2CAP_CTRL_FINAL;
1505 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1508 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1509 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1511 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1513 if (pi->fcs == L2CAP_FCS_CRC16) {
1514 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1515 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1518 l2cap_do_send(sk, tx_skb);
1521 static int l2cap_ertm_send(struct sock *sk)
1523 struct sk_buff *skb, *tx_skb;
1524 struct l2cap_pinfo *pi = l2cap_pi(sk);
1525 u16 control, fcs;
1526 int nsent = 0;
1528 if (sk->sk_state != BT_CONNECTED)
1529 return -ENOTCONN;
1531 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1533 if (pi->remote_max_tx &&
1534 bt_cb(skb)->retries == pi->remote_max_tx) {
1535 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1536 break;
1539 tx_skb = skb_clone(skb, GFP_ATOMIC);
1541 bt_cb(skb)->retries++;
1543 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1544 control &= L2CAP_CTRL_SAR;
1546 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1547 control |= L2CAP_CTRL_FINAL;
1548 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1550 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1551 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1552 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1555 if (pi->fcs == L2CAP_FCS_CRC16) {
1556 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1557 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1560 l2cap_do_send(sk, tx_skb);
1562 __mod_retrans_timer();
1564 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1565 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1567 pi->unacked_frames++;
1568 pi->frames_sent++;
1570 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1571 sk->sk_send_head = NULL;
1572 else
1573 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1575 nsent++;
1578 return nsent;
1581 static int l2cap_retransmit_frames(struct sock *sk)
1583 struct l2cap_pinfo *pi = l2cap_pi(sk);
1584 int ret;
1586 if (!skb_queue_empty(TX_QUEUE(sk)))
1587 sk->sk_send_head = TX_QUEUE(sk)->next;
1589 pi->next_tx_seq = pi->expected_ack_seq;
1590 ret = l2cap_ertm_send(sk);
1591 return ret;
1594 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1596 struct sock *sk = (struct sock *)pi;
1597 u16 control = 0;
1599 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1601 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1602 control |= L2CAP_SUPER_RCV_NOT_READY;
1603 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1604 l2cap_send_sframe(pi, control);
1605 return;
1608 if (l2cap_ertm_send(sk) > 0)
1609 return;
1611 control |= L2CAP_SUPER_RCV_READY;
1612 l2cap_send_sframe(pi, control);
1615 static void l2cap_send_srejtail(struct sock *sk)
1617 struct srej_list *tail;
1618 u16 control;
1620 control = L2CAP_SUPER_SELECT_REJECT;
1621 control |= L2CAP_CTRL_FINAL;
1623 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1624 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1626 l2cap_send_sframe(l2cap_pi(sk), control);
1629 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1631 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1632 struct sk_buff **frag;
1633 int err, sent = 0;
1635 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1636 return -EFAULT;
1638 sent += count;
1639 len -= count;
1641 /* Continuation fragments (no L2CAP header) */
1642 frag = &skb_shinfo(skb)->frag_list;
1643 while (len) {
1644 count = min_t(unsigned int, conn->mtu, len);
1646 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1647 if (!*frag)
1648 return -EFAULT;
1649 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1650 return -EFAULT;
1652 sent += count;
1653 len -= count;
1655 frag = &(*frag)->next;
1658 return sent;
1661 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1663 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1664 struct sk_buff *skb;
1665 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1666 struct l2cap_hdr *lh;
1668 BT_DBG("sk %p len %d", sk, (int)len);
1670 count = min_t(unsigned int, (conn->mtu - hlen), len);
1671 skb = bt_skb_send_alloc(sk, count + hlen,
1672 msg->msg_flags & MSG_DONTWAIT, &err);
1673 if (!skb)
1674 return ERR_PTR(-ENOMEM);
1676 /* Create L2CAP header */
1677 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1678 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1679 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1680 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1682 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1683 if (unlikely(err < 0)) {
1684 kfree_skb(skb);
1685 return ERR_PTR(err);
1687 return skb;
1690 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1692 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1693 struct sk_buff *skb;
1694 int err, count, hlen = L2CAP_HDR_SIZE;
1695 struct l2cap_hdr *lh;
1697 BT_DBG("sk %p len %d", sk, (int)len);
1699 count = min_t(unsigned int, (conn->mtu - hlen), len);
1700 skb = bt_skb_send_alloc(sk, count + hlen,
1701 msg->msg_flags & MSG_DONTWAIT, &err);
1702 if (!skb)
1703 return ERR_PTR(-ENOMEM);
1705 /* Create L2CAP header */
1706 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1707 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1708 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1710 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1711 if (unlikely(err < 0)) {
1712 kfree_skb(skb);
1713 return ERR_PTR(err);
1715 return skb;
1718 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1720 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1721 struct sk_buff *skb;
1722 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1723 struct l2cap_hdr *lh;
1725 BT_DBG("sk %p len %d", sk, (int)len);
1727 if (!conn)
1728 return ERR_PTR(-ENOTCONN);
1730 if (sdulen)
1731 hlen += 2;
1733 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1734 hlen += 2;
1736 count = min_t(unsigned int, (conn->mtu - hlen), len);
1737 skb = bt_skb_send_alloc(sk, count + hlen,
1738 msg->msg_flags & MSG_DONTWAIT, &err);
1739 if (!skb)
1740 return ERR_PTR(-ENOMEM);
1742 /* Create L2CAP header */
1743 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1744 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1745 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1746 put_unaligned_le16(control, skb_put(skb, 2));
1747 if (sdulen)
1748 put_unaligned_le16(sdulen, skb_put(skb, 2));
1750 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1751 if (unlikely(err < 0)) {
1752 kfree_skb(skb);
1753 return ERR_PTR(err);
1756 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1757 put_unaligned_le16(0, skb_put(skb, 2));
1759 bt_cb(skb)->retries = 0;
1760 return skb;
1763 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1765 struct l2cap_pinfo *pi = l2cap_pi(sk);
1766 struct sk_buff *skb;
1767 struct sk_buff_head sar_queue;
1768 u16 control;
1769 size_t size = 0;
1771 skb_queue_head_init(&sar_queue);
1772 control = L2CAP_SDU_START;
1773 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1774 if (IS_ERR(skb))
1775 return PTR_ERR(skb);
1777 __skb_queue_tail(&sar_queue, skb);
1778 len -= pi->remote_mps;
1779 size += pi->remote_mps;
1781 while (len > 0) {
1782 size_t buflen;
1784 if (len > pi->remote_mps) {
1785 control = L2CAP_SDU_CONTINUE;
1786 buflen = pi->remote_mps;
1787 } else {
1788 control = L2CAP_SDU_END;
1789 buflen = len;
1792 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1793 if (IS_ERR(skb)) {
1794 skb_queue_purge(&sar_queue);
1795 return PTR_ERR(skb);
1798 __skb_queue_tail(&sar_queue, skb);
1799 len -= buflen;
1800 size += buflen;
1802 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1803 if (sk->sk_send_head == NULL)
1804 sk->sk_send_head = sar_queue.next;
1806 return size;
1809 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1811 struct sock *sk = sock->sk;
1812 struct l2cap_pinfo *pi = l2cap_pi(sk);
1813 struct sk_buff *skb;
1814 u16 control;
1815 int err;
1817 BT_DBG("sock %p, sk %p", sock, sk);
1819 err = sock_error(sk);
1820 if (err)
1821 return err;
1823 if (msg->msg_flags & MSG_OOB)
1824 return -EOPNOTSUPP;
1826 lock_sock(sk);
1828 if (sk->sk_state != BT_CONNECTED) {
1829 err = -ENOTCONN;
1830 goto done;
1833 /* Connectionless channel */
1834 if (sk->sk_type == SOCK_DGRAM) {
1835 skb = l2cap_create_connless_pdu(sk, msg, len);
1836 if (IS_ERR(skb)) {
1837 err = PTR_ERR(skb);
1838 } else {
1839 l2cap_do_send(sk, skb);
1840 err = len;
1842 goto done;
1845 switch (pi->mode) {
1846 case L2CAP_MODE_BASIC:
1847 /* Check outgoing MTU */
1848 if (len > pi->omtu) {
1849 err = -EMSGSIZE;
1850 goto done;
1853 /* Create a basic PDU */
1854 skb = l2cap_create_basic_pdu(sk, msg, len);
1855 if (IS_ERR(skb)) {
1856 err = PTR_ERR(skb);
1857 goto done;
1860 l2cap_do_send(sk, skb);
1861 err = len;
1862 break;
1864 case L2CAP_MODE_ERTM:
1865 case L2CAP_MODE_STREAMING:
1866 /* Entire SDU fits into one PDU */
1867 if (len <= pi->remote_mps) {
1868 control = L2CAP_SDU_UNSEGMENTED;
1869 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1870 if (IS_ERR(skb)) {
1871 err = PTR_ERR(skb);
1872 goto done;
1874 __skb_queue_tail(TX_QUEUE(sk), skb);
1876 if (sk->sk_send_head == NULL)
1877 sk->sk_send_head = skb;
1879 } else {
1880 /* Segment SDU into multiples PDUs */
1881 err = l2cap_sar_segment_sdu(sk, msg, len);
1882 if (err < 0)
1883 goto done;
1886 if (pi->mode == L2CAP_MODE_STREAMING) {
1887 l2cap_streaming_send(sk);
1888 } else {
1889 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY &&
1890 pi->conn_state && L2CAP_CONN_WAIT_F) {
1891 err = len;
1892 break;
1894 err = l2cap_ertm_send(sk);
1897 if (err >= 0)
1898 err = len;
1899 break;
1901 default:
1902 BT_DBG("bad state %1.1x", pi->mode);
1903 err = -EBADFD;
1906 done:
1907 release_sock(sk);
1908 return err;
1911 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1913 struct sock *sk = sock->sk;
1915 lock_sock(sk);
1917 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1918 struct l2cap_conn_rsp rsp;
1919 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1920 u8 buf[128];
1922 sk->sk_state = BT_CONFIG;
1924 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1925 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1926 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1927 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1928 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1929 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1931 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
1932 release_sock(sk);
1933 return 0;
1936 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1937 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1938 l2cap_build_conf_req(sk, buf), buf);
1939 l2cap_pi(sk)->num_conf_req++;
1941 release_sock(sk);
1942 return 0;
1945 release_sock(sk);
1947 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1950 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1952 struct sock *sk = sock->sk;
1953 struct l2cap_options opts;
1954 int len, err = 0;
1955 u32 opt;
1957 BT_DBG("sk %p", sk);
1959 lock_sock(sk);
1961 switch (optname) {
1962 case L2CAP_OPTIONS:
1963 opts.imtu = l2cap_pi(sk)->imtu;
1964 opts.omtu = l2cap_pi(sk)->omtu;
1965 opts.flush_to = l2cap_pi(sk)->flush_to;
1966 opts.mode = l2cap_pi(sk)->mode;
1967 opts.fcs = l2cap_pi(sk)->fcs;
1968 opts.max_tx = l2cap_pi(sk)->max_tx;
1969 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1971 len = min_t(unsigned int, sizeof(opts), optlen);
1972 if (copy_from_user((char *) &opts, optval, len)) {
1973 err = -EFAULT;
1974 break;
1977 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
1978 err = -EINVAL;
1979 break;
1982 l2cap_pi(sk)->mode = opts.mode;
1983 switch (l2cap_pi(sk)->mode) {
1984 case L2CAP_MODE_BASIC:
1985 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
1986 break;
1987 case L2CAP_MODE_ERTM:
1988 case L2CAP_MODE_STREAMING:
1989 if (!disable_ertm)
1990 break;
1991 /* fall through */
1992 default:
1993 err = -EINVAL;
1994 break;
1997 l2cap_pi(sk)->imtu = opts.imtu;
1998 l2cap_pi(sk)->omtu = opts.omtu;
1999 l2cap_pi(sk)->fcs = opts.fcs;
2000 l2cap_pi(sk)->max_tx = opts.max_tx;
2001 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
2002 break;
2004 case L2CAP_LM:
2005 if (get_user(opt, (u32 __user *) optval)) {
2006 err = -EFAULT;
2007 break;
2010 if (opt & L2CAP_LM_AUTH)
2011 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
2012 if (opt & L2CAP_LM_ENCRYPT)
2013 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
2014 if (opt & L2CAP_LM_SECURE)
2015 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
2017 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
2018 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
2019 break;
2021 default:
2022 err = -ENOPROTOOPT;
2023 break;
2026 release_sock(sk);
2027 return err;
2030 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
2032 struct sock *sk = sock->sk;
2033 struct bt_security sec;
2034 int len, err = 0;
2035 u32 opt;
2037 BT_DBG("sk %p", sk);
2039 if (level == SOL_L2CAP)
2040 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
2042 if (level != SOL_BLUETOOTH)
2043 return -ENOPROTOOPT;
2045 lock_sock(sk);
2047 switch (optname) {
2048 case BT_SECURITY:
2049 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2050 && sk->sk_type != SOCK_RAW) {
2051 err = -EINVAL;
2052 break;
2055 sec.level = BT_SECURITY_LOW;
2057 len = min_t(unsigned int, sizeof(sec), optlen);
2058 if (copy_from_user((char *) &sec, optval, len)) {
2059 err = -EFAULT;
2060 break;
2063 if (sec.level < BT_SECURITY_LOW ||
2064 sec.level > BT_SECURITY_HIGH) {
2065 err = -EINVAL;
2066 break;
2069 l2cap_pi(sk)->sec_level = sec.level;
2070 break;
2072 case BT_DEFER_SETUP:
2073 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2074 err = -EINVAL;
2075 break;
2078 if (get_user(opt, (u32 __user *) optval)) {
2079 err = -EFAULT;
2080 break;
2083 bt_sk(sk)->defer_setup = opt;
2084 break;
2086 default:
2087 err = -ENOPROTOOPT;
2088 break;
2091 release_sock(sk);
2092 return err;
2095 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
2097 struct sock *sk = sock->sk;
2098 struct l2cap_options opts;
2099 struct l2cap_conninfo cinfo;
2100 int len, err = 0;
2101 u32 opt;
2103 BT_DBG("sk %p", sk);
2105 if (get_user(len, optlen))
2106 return -EFAULT;
2108 lock_sock(sk);
2110 switch (optname) {
2111 case L2CAP_OPTIONS:
2112 opts.imtu = l2cap_pi(sk)->imtu;
2113 opts.omtu = l2cap_pi(sk)->omtu;
2114 opts.flush_to = l2cap_pi(sk)->flush_to;
2115 opts.mode = l2cap_pi(sk)->mode;
2116 opts.fcs = l2cap_pi(sk)->fcs;
2117 opts.max_tx = l2cap_pi(sk)->max_tx;
2118 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2120 len = min_t(unsigned int, len, sizeof(opts));
2121 if (copy_to_user(optval, (char *) &opts, len))
2122 err = -EFAULT;
2124 break;
2126 case L2CAP_LM:
2127 switch (l2cap_pi(sk)->sec_level) {
2128 case BT_SECURITY_LOW:
2129 opt = L2CAP_LM_AUTH;
2130 break;
2131 case BT_SECURITY_MEDIUM:
2132 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2133 break;
2134 case BT_SECURITY_HIGH:
2135 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2136 L2CAP_LM_SECURE;
2137 break;
2138 default:
2139 opt = 0;
2140 break;
2143 if (l2cap_pi(sk)->role_switch)
2144 opt |= L2CAP_LM_MASTER;
2146 if (l2cap_pi(sk)->force_reliable)
2147 opt |= L2CAP_LM_RELIABLE;
2149 if (put_user(opt, (u32 __user *) optval))
2150 err = -EFAULT;
2151 break;
2153 case L2CAP_CONNINFO:
2154 if (sk->sk_state != BT_CONNECTED &&
2155 !(sk->sk_state == BT_CONNECT2 &&
2156 bt_sk(sk)->defer_setup)) {
2157 err = -ENOTCONN;
2158 break;
2161 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2162 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2164 len = min_t(unsigned int, len, sizeof(cinfo));
2165 if (copy_to_user(optval, (char *) &cinfo, len))
2166 err = -EFAULT;
2168 break;
2170 default:
2171 err = -ENOPROTOOPT;
2172 break;
2175 release_sock(sk);
2176 return err;
2179 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2181 struct sock *sk = sock->sk;
2182 struct bt_security sec;
2183 int len, err = 0;
2185 BT_DBG("sk %p", sk);
2187 if (level == SOL_L2CAP)
2188 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2190 if (level != SOL_BLUETOOTH)
2191 return -ENOPROTOOPT;
2193 if (get_user(len, optlen))
2194 return -EFAULT;
2196 lock_sock(sk);
2198 switch (optname) {
2199 case BT_SECURITY:
2200 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2201 && sk->sk_type != SOCK_RAW) {
2202 err = -EINVAL;
2203 break;
2206 sec.level = l2cap_pi(sk)->sec_level;
2208 len = min_t(unsigned int, len, sizeof(sec));
2209 if (copy_to_user(optval, (char *) &sec, len))
2210 err = -EFAULT;
2212 break;
2214 case BT_DEFER_SETUP:
2215 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2216 err = -EINVAL;
2217 break;
2220 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2221 err = -EFAULT;
2223 break;
2225 default:
2226 err = -ENOPROTOOPT;
2227 break;
2230 release_sock(sk);
2231 return err;
2234 static int l2cap_sock_shutdown(struct socket *sock, int how)
2236 struct sock *sk = sock->sk;
2237 int err = 0;
2239 BT_DBG("sock %p, sk %p", sock, sk);
2241 if (!sk)
2242 return 0;
2244 lock_sock(sk);
2245 if (!sk->sk_shutdown) {
2246 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2247 err = __l2cap_wait_ack(sk);
2249 sk->sk_shutdown = SHUTDOWN_MASK;
2250 l2cap_sock_clear_timer(sk);
2251 __l2cap_sock_close(sk, 0);
2253 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2254 err = bt_sock_wait_state(sk, BT_CLOSED,
2255 sk->sk_lingertime);
2258 if (!err && sk->sk_err)
2259 err = -sk->sk_err;
2261 release_sock(sk);
2262 return err;
2265 static int l2cap_sock_release(struct socket *sock)
2267 struct sock *sk = sock->sk;
2268 int err;
2270 BT_DBG("sock %p, sk %p", sock, sk);
2272 if (!sk)
2273 return 0;
2275 err = l2cap_sock_shutdown(sock, 2);
2277 sock_orphan(sk);
2278 l2cap_sock_kill(sk);
2279 return err;
2282 static void l2cap_chan_ready(struct sock *sk)
2284 struct sock *parent = bt_sk(sk)->parent;
2286 BT_DBG("sk %p, parent %p", sk, parent);
2288 l2cap_pi(sk)->conf_state = 0;
2289 l2cap_sock_clear_timer(sk);
2291 if (!parent) {
2292 /* Outgoing channel.
2293 * Wake up socket sleeping on connect.
2295 sk->sk_state = BT_CONNECTED;
2296 sk->sk_state_change(sk);
2297 } else {
2298 /* Incoming channel.
2299 * Wake up socket sleeping on accept.
2301 parent->sk_data_ready(parent, 0);
2305 /* Copy frame to all raw sockets on that connection */
2306 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2308 struct l2cap_chan_list *l = &conn->chan_list;
2309 struct sk_buff *nskb;
2310 struct sock *sk;
2312 BT_DBG("conn %p", conn);
2314 read_lock(&l->lock);
2315 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2316 if (sk->sk_type != SOCK_RAW)
2317 continue;
2319 /* Don't send frame to the socket it came from */
2320 if (skb->sk == sk)
2321 continue;
2322 nskb = skb_clone(skb, GFP_ATOMIC);
2323 if (!nskb)
2324 continue;
2326 if (sock_queue_rcv_skb(sk, nskb))
2327 kfree_skb(nskb);
2329 read_unlock(&l->lock);
2332 /* ---- L2CAP signalling commands ---- */
2333 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2334 u8 code, u8 ident, u16 dlen, void *data)
2336 struct sk_buff *skb, **frag;
2337 struct l2cap_cmd_hdr *cmd;
2338 struct l2cap_hdr *lh;
2339 int len, count;
2341 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2342 conn, code, ident, dlen);
2344 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2345 count = min_t(unsigned int, conn->mtu, len);
2347 skb = bt_skb_alloc(count, GFP_ATOMIC);
2348 if (!skb)
2349 return NULL;
2351 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2352 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2353 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2355 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2356 cmd->code = code;
2357 cmd->ident = ident;
2358 cmd->len = cpu_to_le16(dlen);
2360 if (dlen) {
2361 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2362 memcpy(skb_put(skb, count), data, count);
2363 data += count;
2366 len -= skb->len;
2368 /* Continuation fragments (no L2CAP header) */
2369 frag = &skb_shinfo(skb)->frag_list;
2370 while (len) {
2371 count = min_t(unsigned int, conn->mtu, len);
2373 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2374 if (!*frag)
2375 goto fail;
2377 memcpy(skb_put(*frag, count), data, count);
2379 len -= count;
2380 data += count;
2382 frag = &(*frag)->next;
2385 return skb;
2387 fail:
2388 kfree_skb(skb);
2389 return NULL;
2392 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2394 struct l2cap_conf_opt *opt = *ptr;
2395 int len;
2397 len = L2CAP_CONF_OPT_SIZE + opt->len;
2398 *ptr += len;
2400 *type = opt->type;
2401 *olen = opt->len;
2403 switch (opt->len) {
2404 case 1:
2405 *val = *((u8 *) opt->val);
2406 break;
2408 case 2:
2409 *val = __le16_to_cpu(*((__le16 *) opt->val));
2410 break;
2412 case 4:
2413 *val = __le32_to_cpu(*((__le32 *) opt->val));
2414 break;
2416 default:
2417 *val = (unsigned long) opt->val;
2418 break;
2421 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2422 return len;
2425 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2427 struct l2cap_conf_opt *opt = *ptr;
2429 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2431 opt->type = type;
2432 opt->len = len;
2434 switch (len) {
2435 case 1:
2436 *((u8 *) opt->val) = val;
2437 break;
2439 case 2:
2440 *((__le16 *) opt->val) = cpu_to_le16(val);
2441 break;
2443 case 4:
2444 *((__le32 *) opt->val) = cpu_to_le32(val);
2445 break;
2447 default:
2448 memcpy(opt->val, (void *) val, len);
2449 break;
2452 *ptr += L2CAP_CONF_OPT_SIZE + len;
2455 static void l2cap_ack_timeout(unsigned long arg)
2457 struct sock *sk = (void *) arg;
2459 bh_lock_sock(sk);
2460 l2cap_send_ack(l2cap_pi(sk));
2461 bh_unlock_sock(sk);
2464 static inline void l2cap_ertm_init(struct sock *sk)
2466 l2cap_pi(sk)->expected_ack_seq = 0;
2467 l2cap_pi(sk)->unacked_frames = 0;
2468 l2cap_pi(sk)->buffer_seq = 0;
2469 l2cap_pi(sk)->num_acked = 0;
2470 l2cap_pi(sk)->frames_sent = 0;
2472 setup_timer(&l2cap_pi(sk)->retrans_timer,
2473 l2cap_retrans_timeout, (unsigned long) sk);
2474 setup_timer(&l2cap_pi(sk)->monitor_timer,
2475 l2cap_monitor_timeout, (unsigned long) sk);
2476 setup_timer(&l2cap_pi(sk)->ack_timer,
2477 l2cap_ack_timeout, (unsigned long) sk);
2479 __skb_queue_head_init(SREJ_QUEUE(sk));
2480 __skb_queue_head_init(BUSY_QUEUE(sk));
2482 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2484 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
2487 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2489 switch (mode) {
2490 case L2CAP_MODE_STREAMING:
2491 case L2CAP_MODE_ERTM:
2492 if (l2cap_mode_supported(mode, remote_feat_mask))
2493 return mode;
2494 /* fall through */
2495 default:
2496 return L2CAP_MODE_BASIC;
2500 static int l2cap_build_conf_req(struct sock *sk, void *data)
2502 struct l2cap_pinfo *pi = l2cap_pi(sk);
2503 struct l2cap_conf_req *req = data;
2504 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2505 void *ptr = req->data;
2507 BT_DBG("sk %p", sk);
2509 if (pi->num_conf_req || pi->num_conf_rsp)
2510 goto done;
2512 switch (pi->mode) {
2513 case L2CAP_MODE_STREAMING:
2514 case L2CAP_MODE_ERTM:
2515 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
2516 break;
2518 /* fall through */
2519 default:
2520 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2521 break;
2524 done:
2525 switch (pi->mode) {
2526 case L2CAP_MODE_BASIC:
2527 if (pi->imtu != L2CAP_DEFAULT_MTU)
2528 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2530 rfc.mode = L2CAP_MODE_BASIC;
2531 rfc.txwin_size = 0;
2532 rfc.max_transmit = 0;
2533 rfc.retrans_timeout = 0;
2534 rfc.monitor_timeout = 0;
2535 rfc.max_pdu_size = 0;
2537 break;
2539 case L2CAP_MODE_ERTM:
2540 rfc.mode = L2CAP_MODE_ERTM;
2541 rfc.txwin_size = pi->tx_win;
2542 rfc.max_transmit = pi->max_tx;
2543 rfc.retrans_timeout = 0;
2544 rfc.monitor_timeout = 0;
2545 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2546 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2547 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2549 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2550 break;
2552 if (pi->fcs == L2CAP_FCS_NONE ||
2553 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2554 pi->fcs = L2CAP_FCS_NONE;
2555 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2557 break;
2559 case L2CAP_MODE_STREAMING:
2560 rfc.mode = L2CAP_MODE_STREAMING;
2561 rfc.txwin_size = 0;
2562 rfc.max_transmit = 0;
2563 rfc.retrans_timeout = 0;
2564 rfc.monitor_timeout = 0;
2565 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2566 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2567 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2569 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2570 break;
2572 if (pi->fcs == L2CAP_FCS_NONE ||
2573 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2574 pi->fcs = L2CAP_FCS_NONE;
2575 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2577 break;
2580 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2581 (unsigned long) &rfc);
2583 /* FIXME: Need actual value of the flush timeout */
2584 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2585 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2587 req->dcid = cpu_to_le16(pi->dcid);
2588 req->flags = cpu_to_le16(0);
2590 return ptr - data;
2593 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2595 struct l2cap_pinfo *pi = l2cap_pi(sk);
2596 struct l2cap_conf_rsp *rsp = data;
2597 void *ptr = rsp->data;
2598 void *req = pi->conf_req;
2599 int len = pi->conf_len;
2600 int type, hint, olen;
2601 unsigned long val;
2602 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2603 u16 mtu = L2CAP_DEFAULT_MTU;
2604 u16 result = L2CAP_CONF_SUCCESS;
2606 BT_DBG("sk %p", sk);
2608 while (len >= L2CAP_CONF_OPT_SIZE) {
2609 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2611 hint = type & L2CAP_CONF_HINT;
2612 type &= L2CAP_CONF_MASK;
2614 switch (type) {
2615 case L2CAP_CONF_MTU:
2616 mtu = val;
2617 break;
2619 case L2CAP_CONF_FLUSH_TO:
2620 pi->flush_to = val;
2621 break;
2623 case L2CAP_CONF_QOS:
2624 break;
2626 case L2CAP_CONF_RFC:
2627 if (olen == sizeof(rfc))
2628 memcpy(&rfc, (void *) val, olen);
2629 break;
2631 case L2CAP_CONF_FCS:
2632 if (val == L2CAP_FCS_NONE)
2633 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2635 break;
2637 default:
2638 if (hint)
2639 break;
2641 result = L2CAP_CONF_UNKNOWN;
2642 *((u8 *) ptr++) = type;
2643 break;
2647 if (pi->num_conf_rsp || pi->num_conf_req > 1)
2648 goto done;
2650 switch (pi->mode) {
2651 case L2CAP_MODE_STREAMING:
2652 case L2CAP_MODE_ERTM:
2653 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2654 pi->mode = l2cap_select_mode(rfc.mode,
2655 pi->conn->feat_mask);
2656 break;
2659 if (pi->mode != rfc.mode)
2660 return -ECONNREFUSED;
2662 break;
2665 done:
2666 if (pi->mode != rfc.mode) {
2667 result = L2CAP_CONF_UNACCEPT;
2668 rfc.mode = pi->mode;
2670 if (pi->num_conf_rsp == 1)
2671 return -ECONNREFUSED;
2673 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2674 sizeof(rfc), (unsigned long) &rfc);
2678 if (result == L2CAP_CONF_SUCCESS) {
2679 /* Configure output options and let the other side know
2680 * which ones we don't like. */
2682 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2683 result = L2CAP_CONF_UNACCEPT;
2684 else {
2685 pi->omtu = mtu;
2686 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2688 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2690 switch (rfc.mode) {
2691 case L2CAP_MODE_BASIC:
2692 pi->fcs = L2CAP_FCS_NONE;
2693 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2694 break;
2696 case L2CAP_MODE_ERTM:
2697 pi->remote_tx_win = rfc.txwin_size;
2698 pi->remote_max_tx = rfc.max_transmit;
2699 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2700 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2702 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2704 rfc.retrans_timeout =
2705 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2706 rfc.monitor_timeout =
2707 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2709 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2711 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2712 sizeof(rfc), (unsigned long) &rfc);
2714 break;
2716 case L2CAP_MODE_STREAMING:
2717 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2718 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2720 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2722 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2724 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2725 sizeof(rfc), (unsigned long) &rfc);
2727 break;
2729 default:
2730 result = L2CAP_CONF_UNACCEPT;
2732 memset(&rfc, 0, sizeof(rfc));
2733 rfc.mode = pi->mode;
2736 if (result == L2CAP_CONF_SUCCESS)
2737 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2739 rsp->scid = cpu_to_le16(pi->dcid);
2740 rsp->result = cpu_to_le16(result);
2741 rsp->flags = cpu_to_le16(0x0000);
2743 return ptr - data;
2746 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2748 struct l2cap_pinfo *pi = l2cap_pi(sk);
2749 struct l2cap_conf_req *req = data;
2750 void *ptr = req->data;
2751 int type, olen;
2752 unsigned long val;
2753 struct l2cap_conf_rfc rfc;
2755 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2757 while (len >= L2CAP_CONF_OPT_SIZE) {
2758 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2760 switch (type) {
2761 case L2CAP_CONF_MTU:
2762 if (val < L2CAP_DEFAULT_MIN_MTU) {
2763 *result = L2CAP_CONF_UNACCEPT;
2764 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2765 } else
2766 pi->omtu = val;
2767 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2768 break;
2770 case L2CAP_CONF_FLUSH_TO:
2771 pi->flush_to = val;
2772 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2773 2, pi->flush_to);
2774 break;
2776 case L2CAP_CONF_RFC:
2777 if (olen == sizeof(rfc))
2778 memcpy(&rfc, (void *)val, olen);
2780 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2781 rfc.mode != pi->mode)
2782 return -ECONNREFUSED;
2784 pi->fcs = 0;
2786 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2787 sizeof(rfc), (unsigned long) &rfc);
2788 break;
2792 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
2793 return -ECONNREFUSED;
2795 pi->mode = rfc.mode;
2797 if (*result == L2CAP_CONF_SUCCESS) {
2798 switch (rfc.mode) {
2799 case L2CAP_MODE_ERTM:
2800 pi->remote_tx_win = rfc.txwin_size;
2801 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2802 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2803 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2804 break;
2805 case L2CAP_MODE_STREAMING:
2806 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2810 req->dcid = cpu_to_le16(pi->dcid);
2811 req->flags = cpu_to_le16(0x0000);
2813 return ptr - data;
2816 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2818 struct l2cap_conf_rsp *rsp = data;
2819 void *ptr = rsp->data;
2821 BT_DBG("sk %p", sk);
2823 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2824 rsp->result = cpu_to_le16(result);
2825 rsp->flags = cpu_to_le16(flags);
2827 return ptr - data;
2830 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2832 struct l2cap_pinfo *pi = l2cap_pi(sk);
2833 int type, olen;
2834 unsigned long val;
2835 struct l2cap_conf_rfc rfc;
2837 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2839 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2840 return;
2842 while (len >= L2CAP_CONF_OPT_SIZE) {
2843 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2845 switch (type) {
2846 case L2CAP_CONF_RFC:
2847 if (olen == sizeof(rfc))
2848 memcpy(&rfc, (void *)val, olen);
2849 goto done;
2853 done:
2854 switch (rfc.mode) {
2855 case L2CAP_MODE_ERTM:
2856 pi->remote_tx_win = rfc.txwin_size;
2857 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2858 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2859 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2860 break;
2861 case L2CAP_MODE_STREAMING:
2862 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2866 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2868 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2870 if (rej->reason != 0x0000)
2871 return 0;
2873 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2874 cmd->ident == conn->info_ident) {
2875 del_timer(&conn->info_timer);
2877 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2878 conn->info_ident = 0;
2880 l2cap_conn_start(conn);
2883 return 0;
2886 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2888 struct l2cap_chan_list *list = &conn->chan_list;
2889 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2890 struct l2cap_conn_rsp rsp;
2891 struct sock *parent, *uninitialized_var(sk);
2892 int result, status = L2CAP_CS_NO_INFO;
2894 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2895 __le16 psm = req->psm;
2897 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2899 /* Check if we have socket listening on psm */
2900 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2901 if (!parent) {
2902 result = L2CAP_CR_BAD_PSM;
2903 goto sendresp;
2906 /* Check if the ACL is secure enough (if not SDP) */
2907 if (psm != cpu_to_le16(0x0001) &&
2908 !hci_conn_check_link_mode(conn->hcon)) {
2909 conn->disc_reason = 0x05;
2910 result = L2CAP_CR_SEC_BLOCK;
2911 goto response;
2914 result = L2CAP_CR_NO_MEM;
2916 /* Check for backlog size */
2917 if (sk_acceptq_is_full(parent)) {
2918 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2919 goto response;
2922 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2923 if (!sk)
2924 goto response;
2926 write_lock_bh(&list->lock);
2928 /* Check if we already have channel with that dcid */
2929 if (__l2cap_get_chan_by_dcid(list, scid)) {
2930 write_unlock_bh(&list->lock);
2931 sock_set_flag(sk, SOCK_ZAPPED);
2932 l2cap_sock_kill(sk);
2933 goto response;
2936 hci_conn_hold(conn->hcon);
2938 l2cap_sock_init(sk, parent);
2939 bacpy(&bt_sk(sk)->src, conn->src);
2940 bacpy(&bt_sk(sk)->dst, conn->dst);
2941 l2cap_pi(sk)->psm = psm;
2942 l2cap_pi(sk)->dcid = scid;
2944 __l2cap_chan_add(conn, sk, parent);
2945 dcid = l2cap_pi(sk)->scid;
2947 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2949 l2cap_pi(sk)->ident = cmd->ident;
2951 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2952 if (l2cap_check_security(sk)) {
2953 if (bt_sk(sk)->defer_setup) {
2954 sk->sk_state = BT_CONNECT2;
2955 result = L2CAP_CR_PEND;
2956 status = L2CAP_CS_AUTHOR_PEND;
2957 parent->sk_data_ready(parent, 0);
2958 } else {
2959 sk->sk_state = BT_CONFIG;
2960 result = L2CAP_CR_SUCCESS;
2961 status = L2CAP_CS_NO_INFO;
2963 } else {
2964 sk->sk_state = BT_CONNECT2;
2965 result = L2CAP_CR_PEND;
2966 status = L2CAP_CS_AUTHEN_PEND;
2968 } else {
2969 sk->sk_state = BT_CONNECT2;
2970 result = L2CAP_CR_PEND;
2971 status = L2CAP_CS_NO_INFO;
2974 write_unlock_bh(&list->lock);
2976 response:
2977 bh_unlock_sock(parent);
2979 sendresp:
2980 rsp.scid = cpu_to_le16(scid);
2981 rsp.dcid = cpu_to_le16(dcid);
2982 rsp.result = cpu_to_le16(result);
2983 rsp.status = cpu_to_le16(status);
2984 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2986 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2987 struct l2cap_info_req info;
2988 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2990 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2991 conn->info_ident = l2cap_get_ident(conn);
2993 mod_timer(&conn->info_timer, jiffies +
2994 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2996 l2cap_send_cmd(conn, conn->info_ident,
2997 L2CAP_INFO_REQ, sizeof(info), &info);
3000 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
3001 result == L2CAP_CR_SUCCESS) {
3002 u8 buf[128];
3003 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3004 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3005 l2cap_build_conf_req(sk, buf), buf);
3006 l2cap_pi(sk)->num_conf_req++;
3009 return 0;
3012 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3014 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3015 u16 scid, dcid, result, status;
3016 struct sock *sk;
3017 u8 req[128];
3019 scid = __le16_to_cpu(rsp->scid);
3020 dcid = __le16_to_cpu(rsp->dcid);
3021 result = __le16_to_cpu(rsp->result);
3022 status = __le16_to_cpu(rsp->status);
3024 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
3026 if (scid) {
3027 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3028 if (!sk)
3029 return -EFAULT;
3030 } else {
3031 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
3032 if (!sk)
3033 return -EFAULT;
3036 switch (result) {
3037 case L2CAP_CR_SUCCESS:
3038 sk->sk_state = BT_CONFIG;
3039 l2cap_pi(sk)->ident = 0;
3040 l2cap_pi(sk)->dcid = dcid;
3041 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
3043 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
3044 break;
3046 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3048 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3049 l2cap_build_conf_req(sk, req), req);
3050 l2cap_pi(sk)->num_conf_req++;
3051 break;
3053 case L2CAP_CR_PEND:
3054 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3055 break;
3057 default:
3058 l2cap_chan_del(sk, ECONNREFUSED);
3059 break;
3062 bh_unlock_sock(sk);
3063 return 0;
3066 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3068 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3069 u16 dcid, flags;
3070 u8 rsp[64];
3071 struct sock *sk;
3072 int len;
3074 dcid = __le16_to_cpu(req->dcid);
3075 flags = __le16_to_cpu(req->flags);
3077 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3079 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3080 if (!sk)
3081 return -ENOENT;
3083 if (sk->sk_state != BT_CONFIG) {
3084 struct l2cap_cmd_rej rej;
3086 rej.reason = cpu_to_le16(0x0002);
3087 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3088 sizeof(rej), &rej);
3089 goto unlock;
3092 /* Reject if config buffer is too small. */
3093 len = cmd_len - sizeof(*req);
3094 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
3095 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3096 l2cap_build_conf_rsp(sk, rsp,
3097 L2CAP_CONF_REJECT, flags), rsp);
3098 goto unlock;
3101 /* Store config. */
3102 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
3103 l2cap_pi(sk)->conf_len += len;
3105 if (flags & 0x0001) {
3106 /* Incomplete config. Send empty response. */
3107 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3108 l2cap_build_conf_rsp(sk, rsp,
3109 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3110 goto unlock;
3113 /* Complete config. */
3114 len = l2cap_parse_conf_req(sk, rsp);
3115 if (len < 0) {
3116 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3117 goto unlock;
3120 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3121 l2cap_pi(sk)->num_conf_rsp++;
3123 /* Reset config buffer. */
3124 l2cap_pi(sk)->conf_len = 0;
3126 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
3127 goto unlock;
3129 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
3130 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3131 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3132 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3134 sk->sk_state = BT_CONNECTED;
3136 l2cap_pi(sk)->next_tx_seq = 0;
3137 l2cap_pi(sk)->expected_tx_seq = 0;
3138 __skb_queue_head_init(TX_QUEUE(sk));
3139 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3140 l2cap_ertm_init(sk);
3142 l2cap_chan_ready(sk);
3143 goto unlock;
3146 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3147 u8 buf[64];
3148 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3149 l2cap_build_conf_req(sk, buf), buf);
3150 l2cap_pi(sk)->num_conf_req++;
3153 unlock:
3154 bh_unlock_sock(sk);
3155 return 0;
3158 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3160 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3161 u16 scid, flags, result;
3162 struct sock *sk;
3163 int len = cmd->len - sizeof(*rsp);
3165 scid = __le16_to_cpu(rsp->scid);
3166 flags = __le16_to_cpu(rsp->flags);
3167 result = __le16_to_cpu(rsp->result);
3169 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3170 scid, flags, result);
3172 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3173 if (!sk)
3174 return 0;
3176 switch (result) {
3177 case L2CAP_CONF_SUCCESS:
3178 l2cap_conf_rfc_get(sk, rsp->data, len);
3179 break;
3181 case L2CAP_CONF_UNACCEPT:
3182 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3183 char req[64];
3185 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3186 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3187 goto done;
3190 /* throw out any old stored conf requests */
3191 result = L2CAP_CONF_SUCCESS;
3192 len = l2cap_parse_conf_rsp(sk, rsp->data,
3193 len, req, &result);
3194 if (len < 0) {
3195 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3196 goto done;
3199 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3200 L2CAP_CONF_REQ, len, req);
3201 l2cap_pi(sk)->num_conf_req++;
3202 if (result != L2CAP_CONF_SUCCESS)
3203 goto done;
3204 break;
3207 default:
3208 sk->sk_err = ECONNRESET;
3209 l2cap_sock_set_timer(sk, HZ * 5);
3210 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3211 goto done;
3214 if (flags & 0x01)
3215 goto done;
3217 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3219 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3220 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3221 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3222 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3224 sk->sk_state = BT_CONNECTED;
3225 l2cap_pi(sk)->next_tx_seq = 0;
3226 l2cap_pi(sk)->expected_tx_seq = 0;
3227 __skb_queue_head_init(TX_QUEUE(sk));
3228 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3229 l2cap_ertm_init(sk);
3231 l2cap_chan_ready(sk);
3234 done:
3235 bh_unlock_sock(sk);
3236 return 0;
3239 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3241 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3242 struct l2cap_disconn_rsp rsp;
3243 u16 dcid, scid;
3244 struct sock *sk;
3246 scid = __le16_to_cpu(req->scid);
3247 dcid = __le16_to_cpu(req->dcid);
3249 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3251 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3252 if (!sk)
3253 return 0;
3255 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3256 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3257 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3259 sk->sk_shutdown = SHUTDOWN_MASK;
3261 l2cap_chan_del(sk, ECONNRESET);
3262 bh_unlock_sock(sk);
3264 l2cap_sock_kill(sk);
3265 return 0;
3268 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3270 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3271 u16 dcid, scid;
3272 struct sock *sk;
3274 scid = __le16_to_cpu(rsp->scid);
3275 dcid = __le16_to_cpu(rsp->dcid);
3277 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3279 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3280 if (!sk)
3281 return 0;
3283 l2cap_chan_del(sk, 0);
3284 bh_unlock_sock(sk);
3286 l2cap_sock_kill(sk);
3287 return 0;
3290 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3292 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3293 u16 type;
3295 type = __le16_to_cpu(req->type);
3297 BT_DBG("type 0x%4.4x", type);
3299 if (type == L2CAP_IT_FEAT_MASK) {
3300 u8 buf[8];
3301 u32 feat_mask = l2cap_feat_mask;
3302 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3303 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3304 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3305 if (!disable_ertm)
3306 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3307 | L2CAP_FEAT_FCS;
3308 put_unaligned_le32(feat_mask, rsp->data);
3309 l2cap_send_cmd(conn, cmd->ident,
3310 L2CAP_INFO_RSP, sizeof(buf), buf);
3311 } else if (type == L2CAP_IT_FIXED_CHAN) {
3312 u8 buf[12];
3313 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3314 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3315 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3316 memcpy(buf + 4, l2cap_fixed_chan, 8);
3317 l2cap_send_cmd(conn, cmd->ident,
3318 L2CAP_INFO_RSP, sizeof(buf), buf);
3319 } else {
3320 struct l2cap_info_rsp rsp;
3321 rsp.type = cpu_to_le16(type);
3322 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3323 l2cap_send_cmd(conn, cmd->ident,
3324 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3327 return 0;
3330 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3332 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3333 u16 type, result;
3335 type = __le16_to_cpu(rsp->type);
3336 result = __le16_to_cpu(rsp->result);
3338 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3340 del_timer(&conn->info_timer);
3342 if (type == L2CAP_IT_FEAT_MASK) {
3343 conn->feat_mask = get_unaligned_le32(rsp->data);
3345 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3346 struct l2cap_info_req req;
3347 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3349 conn->info_ident = l2cap_get_ident(conn);
3351 l2cap_send_cmd(conn, conn->info_ident,
3352 L2CAP_INFO_REQ, sizeof(req), &req);
3353 } else {
3354 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3355 conn->info_ident = 0;
3357 l2cap_conn_start(conn);
3359 } else if (type == L2CAP_IT_FIXED_CHAN) {
3360 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3361 conn->info_ident = 0;
3363 l2cap_conn_start(conn);
3366 return 0;
3369 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3371 u8 *data = skb->data;
3372 int len = skb->len;
3373 struct l2cap_cmd_hdr cmd;
3374 int err = 0;
3376 l2cap_raw_recv(conn, skb);
3378 while (len >= L2CAP_CMD_HDR_SIZE) {
3379 u16 cmd_len;
3380 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3381 data += L2CAP_CMD_HDR_SIZE;
3382 len -= L2CAP_CMD_HDR_SIZE;
3384 cmd_len = le16_to_cpu(cmd.len);
3386 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3388 if (cmd_len > len || !cmd.ident) {
3389 BT_DBG("corrupted command");
3390 break;
3393 switch (cmd.code) {
3394 case L2CAP_COMMAND_REJ:
3395 l2cap_command_rej(conn, &cmd, data);
3396 break;
3398 case L2CAP_CONN_REQ:
3399 err = l2cap_connect_req(conn, &cmd, data);
3400 break;
3402 case L2CAP_CONN_RSP:
3403 err = l2cap_connect_rsp(conn, &cmd, data);
3404 break;
3406 case L2CAP_CONF_REQ:
3407 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3408 break;
3410 case L2CAP_CONF_RSP:
3411 err = l2cap_config_rsp(conn, &cmd, data);
3412 break;
3414 case L2CAP_DISCONN_REQ:
3415 err = l2cap_disconnect_req(conn, &cmd, data);
3416 break;
3418 case L2CAP_DISCONN_RSP:
3419 err = l2cap_disconnect_rsp(conn, &cmd, data);
3420 break;
3422 case L2CAP_ECHO_REQ:
3423 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3424 break;
3426 case L2CAP_ECHO_RSP:
3427 break;
3429 case L2CAP_INFO_REQ:
3430 err = l2cap_information_req(conn, &cmd, data);
3431 break;
3433 case L2CAP_INFO_RSP:
3434 err = l2cap_information_rsp(conn, &cmd, data);
3435 break;
3437 default:
3438 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3439 err = -EINVAL;
3440 break;
3443 if (err) {
3444 struct l2cap_cmd_rej rej;
3445 BT_DBG("error %d", err);
3447 /* FIXME: Map err to a valid reason */
3448 rej.reason = cpu_to_le16(0);
3449 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3452 data += cmd_len;
3453 len -= cmd_len;
3456 kfree_skb(skb);
3459 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3461 u16 our_fcs, rcv_fcs;
3462 int hdr_size = L2CAP_HDR_SIZE + 2;
3464 if (pi->fcs == L2CAP_FCS_CRC16) {
3465 skb_trim(skb, skb->len - 2);
3466 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3467 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3469 if (our_fcs != rcv_fcs)
3470 return -EBADMSG;
3472 return 0;
3475 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3477 struct l2cap_pinfo *pi = l2cap_pi(sk);
3478 u16 control = 0;
3480 pi->frames_sent = 0;
3482 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3484 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3485 control |= L2CAP_SUPER_RCV_NOT_READY;
3486 l2cap_send_sframe(pi, control);
3487 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3490 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3491 l2cap_retransmit_frames(sk);
3493 l2cap_ertm_send(sk);
3495 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3496 pi->frames_sent == 0) {
3497 control |= L2CAP_SUPER_RCV_READY;
3498 l2cap_send_sframe(pi, control);
3502 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3504 struct sk_buff *next_skb;
3505 struct l2cap_pinfo *pi = l2cap_pi(sk);
3506 int tx_seq_offset, next_tx_seq_offset;
3508 bt_cb(skb)->tx_seq = tx_seq;
3509 bt_cb(skb)->sar = sar;
3511 next_skb = skb_peek(SREJ_QUEUE(sk));
3512 if (!next_skb) {
3513 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3514 return 0;
3517 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3518 if (tx_seq_offset < 0)
3519 tx_seq_offset += 64;
3521 do {
3522 if (bt_cb(next_skb)->tx_seq == tx_seq)
3523 return -EINVAL;
3525 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3526 pi->buffer_seq) % 64;
3527 if (next_tx_seq_offset < 0)
3528 next_tx_seq_offset += 64;
3530 if (next_tx_seq_offset > tx_seq_offset) {
3531 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3532 return 0;
3535 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3536 break;
3538 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3540 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3542 return 0;
3545 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3547 struct l2cap_pinfo *pi = l2cap_pi(sk);
3548 struct sk_buff *_skb;
3549 int err;
3551 switch (control & L2CAP_CTRL_SAR) {
3552 case L2CAP_SDU_UNSEGMENTED:
3553 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3554 goto drop;
3556 err = sock_queue_rcv_skb(sk, skb);
3557 if (!err)
3558 return err;
3560 break;
3562 case L2CAP_SDU_START:
3563 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3564 goto drop;
3566 pi->sdu_len = get_unaligned_le16(skb->data);
3568 if (pi->sdu_len > pi->imtu)
3569 goto disconnect;
3571 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3572 if (!pi->sdu)
3573 return -ENOMEM;
3575 /* pull sdu_len bytes only after alloc, because of Local Busy
3576 * condition we have to be sure that this will be executed
3577 * only once, i.e., when alloc does not fail */
3578 skb_pull(skb, 2);
3580 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3582 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3583 pi->partial_sdu_len = skb->len;
3584 break;
3586 case L2CAP_SDU_CONTINUE:
3587 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3588 goto disconnect;
3590 if (!pi->sdu)
3591 goto disconnect;
3593 pi->partial_sdu_len += skb->len;
3594 if (pi->partial_sdu_len > pi->sdu_len)
3595 goto drop;
3597 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3599 break;
3601 case L2CAP_SDU_END:
3602 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3603 goto disconnect;
3605 if (!pi->sdu)
3606 goto disconnect;
3608 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3609 pi->partial_sdu_len += skb->len;
3611 if (pi->partial_sdu_len > pi->imtu)
3612 goto drop;
3614 if (pi->partial_sdu_len != pi->sdu_len)
3615 goto drop;
3617 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3620 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3621 if (!_skb) {
3622 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3623 return -ENOMEM;
3626 err = sock_queue_rcv_skb(sk, _skb);
3627 if (err < 0) {
3628 kfree_skb(_skb);
3629 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3630 return err;
3633 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3634 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3636 kfree_skb(pi->sdu);
3637 break;
3640 kfree_skb(skb);
3641 return 0;
3643 drop:
3644 kfree_skb(pi->sdu);
3645 pi->sdu = NULL;
3647 disconnect:
3648 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3649 kfree_skb(skb);
3650 return 0;
3653 static int l2cap_try_push_rx_skb(struct sock *sk)
3655 struct l2cap_pinfo *pi = l2cap_pi(sk);
3656 struct sk_buff *skb;
3657 u16 control;
3658 int err;
3660 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3661 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3662 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3663 if (err < 0) {
3664 skb_queue_head(BUSY_QUEUE(sk), skb);
3665 return -EBUSY;
3668 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3671 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3672 goto done;
3674 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3675 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3676 l2cap_send_sframe(pi, control);
3677 l2cap_pi(sk)->retry_count = 1;
3679 del_timer(&pi->retrans_timer);
3680 __mod_monitor_timer();
3682 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3684 done:
3685 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3686 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3688 BT_DBG("sk %p, Exit local busy", sk);
3690 return 0;
3693 static void l2cap_busy_work(struct work_struct *work)
3695 DECLARE_WAITQUEUE(wait, current);
3696 struct l2cap_pinfo *pi =
3697 container_of(work, struct l2cap_pinfo, busy_work);
3698 struct sock *sk = (struct sock *)pi;
3699 int n_tries = 0, timeo = HZ/5, err;
3700 struct sk_buff *skb;
3702 lock_sock(sk);
3704 add_wait_queue(sk_sleep(sk), &wait);
3705 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3706 set_current_state(TASK_INTERRUPTIBLE);
3708 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3709 err = -EBUSY;
3710 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
3711 break;
3714 if (!timeo)
3715 timeo = HZ/5;
3717 if (signal_pending(current)) {
3718 err = sock_intr_errno(timeo);
3719 break;
3722 release_sock(sk);
3723 timeo = schedule_timeout(timeo);
3724 lock_sock(sk);
3726 err = sock_error(sk);
3727 if (err)
3728 break;
3730 if (l2cap_try_push_rx_skb(sk) == 0)
3731 break;
3734 set_current_state(TASK_RUNNING);
3735 remove_wait_queue(sk_sleep(sk), &wait);
3737 release_sock(sk);
3740 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3742 struct l2cap_pinfo *pi = l2cap_pi(sk);
3743 int sctrl, err;
3745 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3746 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3747 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3748 return l2cap_try_push_rx_skb(sk);
3753 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3754 if (err >= 0) {
3755 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3756 return err;
3759 /* Busy Condition */
3760 BT_DBG("sk %p, Enter local busy", sk);
3762 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3763 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3764 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3766 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3767 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3768 l2cap_send_sframe(pi, sctrl);
3770 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3772 del_timer(&pi->ack_timer);
3774 queue_work(_busy_wq, &pi->busy_work);
3776 return err;
3779 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3781 struct l2cap_pinfo *pi = l2cap_pi(sk);
3782 struct sk_buff *_skb;
3783 int err = -EINVAL;
3786 * TODO: We have to notify the userland if some data is lost with the
3787 * Streaming Mode.
3790 switch (control & L2CAP_CTRL_SAR) {
3791 case L2CAP_SDU_UNSEGMENTED:
3792 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3793 kfree_skb(pi->sdu);
3794 break;
3797 err = sock_queue_rcv_skb(sk, skb);
3798 if (!err)
3799 return 0;
3801 break;
3803 case L2CAP_SDU_START:
3804 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3805 kfree_skb(pi->sdu);
3806 break;
3809 pi->sdu_len = get_unaligned_le16(skb->data);
3810 skb_pull(skb, 2);
3812 if (pi->sdu_len > pi->imtu) {
3813 err = -EMSGSIZE;
3814 break;
3817 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3818 if (!pi->sdu) {
3819 err = -ENOMEM;
3820 break;
3823 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3825 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3826 pi->partial_sdu_len = skb->len;
3827 err = 0;
3828 break;
3830 case L2CAP_SDU_CONTINUE:
3831 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3832 break;
3834 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3836 pi->partial_sdu_len += skb->len;
3837 if (pi->partial_sdu_len > pi->sdu_len)
3838 kfree_skb(pi->sdu);
3839 else
3840 err = 0;
3842 break;
3844 case L2CAP_SDU_END:
3845 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3846 break;
3848 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3850 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3851 pi->partial_sdu_len += skb->len;
3853 if (pi->partial_sdu_len > pi->imtu)
3854 goto drop;
3856 if (pi->partial_sdu_len == pi->sdu_len) {
3857 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3858 err = sock_queue_rcv_skb(sk, _skb);
3859 if (err < 0)
3860 kfree_skb(_skb);
3862 err = 0;
3864 drop:
3865 kfree_skb(pi->sdu);
3866 break;
3869 kfree_skb(skb);
3870 return err;
3873 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3875 struct sk_buff *skb;
3876 u16 control;
3878 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3879 if (bt_cb(skb)->tx_seq != tx_seq)
3880 break;
3882 skb = skb_dequeue(SREJ_QUEUE(sk));
3883 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3884 l2cap_ertm_reassembly_sdu(sk, skb, control);
3885 l2cap_pi(sk)->buffer_seq_srej =
3886 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3887 tx_seq = (tx_seq + 1) % 64;
3891 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3893 struct l2cap_pinfo *pi = l2cap_pi(sk);
3894 struct srej_list *l, *tmp;
3895 u16 control;
3897 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3898 if (l->tx_seq == tx_seq) {
3899 list_del(&l->list);
3900 kfree(l);
3901 return;
3903 control = L2CAP_SUPER_SELECT_REJECT;
3904 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3905 l2cap_send_sframe(pi, control);
3906 list_del(&l->list);
3907 list_add_tail(&l->list, SREJ_LIST(sk));
3911 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3913 struct l2cap_pinfo *pi = l2cap_pi(sk);
3914 struct srej_list *new;
3915 u16 control;
3917 while (tx_seq != pi->expected_tx_seq) {
3918 control = L2CAP_SUPER_SELECT_REJECT;
3919 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3920 l2cap_send_sframe(pi, control);
3922 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3923 new->tx_seq = pi->expected_tx_seq;
3924 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3925 list_add_tail(&new->list, SREJ_LIST(sk));
3927 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3930 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3932 struct l2cap_pinfo *pi = l2cap_pi(sk);
3933 u8 tx_seq = __get_txseq(rx_control);
3934 u8 req_seq = __get_reqseq(rx_control);
3935 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3936 int tx_seq_offset, expected_tx_seq_offset;
3937 int num_to_ack = (pi->tx_win/6) + 1;
3938 int err = 0;
3940 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3941 rx_control);
3943 if (L2CAP_CTRL_FINAL & rx_control &&
3944 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3945 del_timer(&pi->monitor_timer);
3946 if (pi->unacked_frames > 0)
3947 __mod_retrans_timer();
3948 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3951 pi->expected_ack_seq = req_seq;
3952 l2cap_drop_acked_frames(sk);
3954 if (tx_seq == pi->expected_tx_seq)
3955 goto expected;
3957 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3958 if (tx_seq_offset < 0)
3959 tx_seq_offset += 64;
3961 /* invalid tx_seq */
3962 if (tx_seq_offset >= pi->tx_win) {
3963 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3964 goto drop;
3967 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3968 goto drop;
3970 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3971 struct srej_list *first;
3973 first = list_first_entry(SREJ_LIST(sk),
3974 struct srej_list, list);
3975 if (tx_seq == first->tx_seq) {
3976 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3977 l2cap_check_srej_gap(sk, tx_seq);
3979 list_del(&first->list);
3980 kfree(first);
3982 if (list_empty(SREJ_LIST(sk))) {
3983 pi->buffer_seq = pi->buffer_seq_srej;
3984 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3985 l2cap_send_ack(pi);
3986 BT_DBG("sk %p, Exit SREJ_SENT", sk);
3988 } else {
3989 struct srej_list *l;
3991 /* duplicated tx_seq */
3992 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3993 goto drop;
3995 list_for_each_entry(l, SREJ_LIST(sk), list) {
3996 if (l->tx_seq == tx_seq) {
3997 l2cap_resend_srejframe(sk, tx_seq);
3998 return 0;
4001 l2cap_send_srejframe(sk, tx_seq);
4003 } else {
4004 expected_tx_seq_offset =
4005 (pi->expected_tx_seq - pi->buffer_seq) % 64;
4006 if (expected_tx_seq_offset < 0)
4007 expected_tx_seq_offset += 64;
4009 /* duplicated tx_seq */
4010 if (tx_seq_offset < expected_tx_seq_offset)
4011 goto drop;
4013 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
4015 BT_DBG("sk %p, Enter SREJ", sk);
4017 INIT_LIST_HEAD(SREJ_LIST(sk));
4018 pi->buffer_seq_srej = pi->buffer_seq;
4020 __skb_queue_head_init(SREJ_QUEUE(sk));
4021 __skb_queue_head_init(BUSY_QUEUE(sk));
4022 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
4024 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
4026 l2cap_send_srejframe(sk, tx_seq);
4028 del_timer(&pi->ack_timer);
4030 return 0;
4032 expected:
4033 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4035 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4036 bt_cb(skb)->tx_seq = tx_seq;
4037 bt_cb(skb)->sar = sar;
4038 __skb_queue_tail(SREJ_QUEUE(sk), skb);
4039 return 0;
4042 err = l2cap_push_rx_skb(sk, skb, rx_control);
4043 if (err < 0)
4044 return 0;
4046 if (rx_control & L2CAP_CTRL_FINAL) {
4047 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4048 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4049 else
4050 l2cap_retransmit_frames(sk);
4053 __mod_ack_timer();
4055 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
4056 if (pi->num_acked == num_to_ack - 1)
4057 l2cap_send_ack(pi);
4059 return 0;
4061 drop:
4062 kfree_skb(skb);
4063 return 0;
4066 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
4068 struct l2cap_pinfo *pi = l2cap_pi(sk);
4070 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
4071 rx_control);
4073 pi->expected_ack_seq = __get_reqseq(rx_control);
4074 l2cap_drop_acked_frames(sk);
4076 if (rx_control & L2CAP_CTRL_POLL) {
4077 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4078 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4079 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4080 (pi->unacked_frames > 0))
4081 __mod_retrans_timer();
4083 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4084 l2cap_send_srejtail(sk);
4085 } else {
4086 l2cap_send_i_or_rr_or_rnr(sk);
4089 } else if (rx_control & L2CAP_CTRL_FINAL) {
4090 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4092 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4093 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4094 else
4095 l2cap_retransmit_frames(sk);
4097 } else {
4098 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4099 (pi->unacked_frames > 0))
4100 __mod_retrans_timer();
4102 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4103 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4104 l2cap_send_ack(pi);
4105 } else {
4106 l2cap_ertm_send(sk);
4111 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
4113 struct l2cap_pinfo *pi = l2cap_pi(sk);
4114 u8 tx_seq = __get_reqseq(rx_control);
4116 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4118 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4120 pi->expected_ack_seq = tx_seq;
4121 l2cap_drop_acked_frames(sk);
4123 if (rx_control & L2CAP_CTRL_FINAL) {
4124 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4125 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4126 else
4127 l2cap_retransmit_frames(sk);
4128 } else {
4129 l2cap_retransmit_frames(sk);
4131 if (pi->conn_state & L2CAP_CONN_WAIT_F)
4132 pi->conn_state |= L2CAP_CONN_REJ_ACT;
4135 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
4137 struct l2cap_pinfo *pi = l2cap_pi(sk);
4138 u8 tx_seq = __get_reqseq(rx_control);
4140 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4142 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4144 if (rx_control & L2CAP_CTRL_POLL) {
4145 pi->expected_ack_seq = tx_seq;
4146 l2cap_drop_acked_frames(sk);
4148 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4149 l2cap_retransmit_one_frame(sk, tx_seq);
4151 l2cap_ertm_send(sk);
4153 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4154 pi->srej_save_reqseq = tx_seq;
4155 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4157 } else if (rx_control & L2CAP_CTRL_FINAL) {
4158 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4159 pi->srej_save_reqseq == tx_seq)
4160 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4161 else
4162 l2cap_retransmit_one_frame(sk, tx_seq);
4163 } else {
4164 l2cap_retransmit_one_frame(sk, tx_seq);
4165 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4166 pi->srej_save_reqseq = tx_seq;
4167 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4172 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4174 struct l2cap_pinfo *pi = l2cap_pi(sk);
4175 u8 tx_seq = __get_reqseq(rx_control);
4177 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4179 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4180 pi->expected_ack_seq = tx_seq;
4181 l2cap_drop_acked_frames(sk);
4183 if (rx_control & L2CAP_CTRL_POLL)
4184 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4186 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4187 del_timer(&pi->retrans_timer);
4188 if (rx_control & L2CAP_CTRL_POLL)
4189 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4190 return;
4193 if (rx_control & L2CAP_CTRL_POLL)
4194 l2cap_send_srejtail(sk);
4195 else
4196 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4199 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4201 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4203 if (L2CAP_CTRL_FINAL & rx_control &&
4204 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4205 del_timer(&l2cap_pi(sk)->monitor_timer);
4206 if (l2cap_pi(sk)->unacked_frames > 0)
4207 __mod_retrans_timer();
4208 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4211 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4212 case L2CAP_SUPER_RCV_READY:
4213 l2cap_data_channel_rrframe(sk, rx_control);
4214 break;
4216 case L2CAP_SUPER_REJECT:
4217 l2cap_data_channel_rejframe(sk, rx_control);
4218 break;
4220 case L2CAP_SUPER_SELECT_REJECT:
4221 l2cap_data_channel_srejframe(sk, rx_control);
4222 break;
4224 case L2CAP_SUPER_RCV_NOT_READY:
4225 l2cap_data_channel_rnrframe(sk, rx_control);
4226 break;
4229 kfree_skb(skb);
4230 return 0;
4233 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4235 struct l2cap_pinfo *pi = l2cap_pi(sk);
4236 u16 control;
4237 u8 req_seq;
4238 int len, next_tx_seq_offset, req_seq_offset;
4240 control = get_unaligned_le16(skb->data);
4241 skb_pull(skb, 2);
4242 len = skb->len;
4245 * We can just drop the corrupted I-frame here.
4246 * Receiver will miss it and start proper recovery
4247 * procedures and ask retransmission.
4249 if (l2cap_check_fcs(pi, skb))
4250 goto drop;
4252 if (__is_sar_start(control) && __is_iframe(control))
4253 len -= 2;
4255 if (pi->fcs == L2CAP_FCS_CRC16)
4256 len -= 2;
4258 if (len > pi->mps) {
4259 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4260 goto drop;
4263 req_seq = __get_reqseq(control);
4264 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4265 if (req_seq_offset < 0)
4266 req_seq_offset += 64;
4268 next_tx_seq_offset =
4269 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4270 if (next_tx_seq_offset < 0)
4271 next_tx_seq_offset += 64;
4273 /* check for invalid req-seq */
4274 if (req_seq_offset > next_tx_seq_offset) {
4275 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4276 goto drop;
4279 if (__is_iframe(control)) {
4280 if (len < 0) {
4281 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4282 goto drop;
4285 l2cap_data_channel_iframe(sk, control, skb);
4286 } else {
4287 if (len != 0) {
4288 BT_ERR("%d", len);
4289 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4290 goto drop;
4293 l2cap_data_channel_sframe(sk, control, skb);
4296 return 0;
4298 drop:
4299 kfree_skb(skb);
4300 return 0;
4303 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4305 struct sock *sk;
4306 struct l2cap_pinfo *pi;
4307 u16 control;
4308 u8 tx_seq;
4309 int len;
4311 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4312 if (!sk) {
4313 BT_DBG("unknown cid 0x%4.4x", cid);
4314 goto drop;
4317 pi = l2cap_pi(sk);
4319 BT_DBG("sk %p, len %d", sk, skb->len);
4321 if (sk->sk_state != BT_CONNECTED)
4322 goto drop;
4324 switch (pi->mode) {
4325 case L2CAP_MODE_BASIC:
4326 /* If socket recv buffers overflows we drop data here
4327 * which is *bad* because L2CAP has to be reliable.
4328 * But we don't have any other choice. L2CAP doesn't
4329 * provide flow control mechanism. */
4331 if (pi->imtu < skb->len)
4332 goto drop;
4334 if (!sock_queue_rcv_skb(sk, skb))
4335 goto done;
4336 break;
4338 case L2CAP_MODE_ERTM:
4339 if (!sock_owned_by_user(sk)) {
4340 l2cap_ertm_data_rcv(sk, skb);
4341 } else {
4342 if (sk_add_backlog(sk, skb))
4343 goto drop;
4346 goto done;
4348 case L2CAP_MODE_STREAMING:
4349 control = get_unaligned_le16(skb->data);
4350 skb_pull(skb, 2);
4351 len = skb->len;
4353 if (l2cap_check_fcs(pi, skb))
4354 goto drop;
4356 if (__is_sar_start(control))
4357 len -= 2;
4359 if (pi->fcs == L2CAP_FCS_CRC16)
4360 len -= 2;
4362 if (len > pi->mps || len < 0 || __is_sframe(control))
4363 goto drop;
4365 tx_seq = __get_txseq(control);
4367 if (pi->expected_tx_seq == tx_seq)
4368 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4369 else
4370 pi->expected_tx_seq = (tx_seq + 1) % 64;
4372 l2cap_streaming_reassembly_sdu(sk, skb, control);
4374 goto done;
4376 default:
4377 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4378 break;
4381 drop:
4382 kfree_skb(skb);
4384 done:
4385 if (sk)
4386 bh_unlock_sock(sk);
4388 return 0;
4391 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4393 struct sock *sk;
4395 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4396 if (!sk)
4397 goto drop;
4399 BT_DBG("sk %p, len %d", sk, skb->len);
4401 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4402 goto drop;
4404 if (l2cap_pi(sk)->imtu < skb->len)
4405 goto drop;
4407 if (!sock_queue_rcv_skb(sk, skb))
4408 goto done;
4410 drop:
4411 kfree_skb(skb);
4413 done:
4414 if (sk)
4415 bh_unlock_sock(sk);
4416 return 0;
4419 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4421 struct l2cap_hdr *lh = (void *) skb->data;
4422 u16 cid, len;
4423 __le16 psm;
4425 skb_pull(skb, L2CAP_HDR_SIZE);
4426 cid = __le16_to_cpu(lh->cid);
4427 len = __le16_to_cpu(lh->len);
4429 if (len != skb->len) {
4430 kfree_skb(skb);
4431 return;
4434 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4436 switch (cid) {
4437 case L2CAP_CID_SIGNALING:
4438 l2cap_sig_channel(conn, skb);
4439 break;
4441 case L2CAP_CID_CONN_LESS:
4442 psm = get_unaligned_le16(skb->data);
4443 skb_pull(skb, 2);
4444 l2cap_conless_channel(conn, psm, skb);
4445 break;
4447 default:
4448 l2cap_data_channel(conn, cid, skb);
4449 break;
4453 /* ---- L2CAP interface with lower layer (HCI) ---- */
4455 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4457 int exact = 0, lm1 = 0, lm2 = 0;
4458 register struct sock *sk;
4459 struct hlist_node *node;
4461 if (type != ACL_LINK)
4462 return -EINVAL;
4464 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4466 /* Find listening sockets and check their link_mode */
4467 read_lock(&l2cap_sk_list.lock);
4468 sk_for_each(sk, node, &l2cap_sk_list.head) {
4469 if (sk->sk_state != BT_LISTEN)
4470 continue;
4472 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4473 lm1 |= HCI_LM_ACCEPT;
4474 if (l2cap_pi(sk)->role_switch)
4475 lm1 |= HCI_LM_MASTER;
4476 exact++;
4477 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4478 lm2 |= HCI_LM_ACCEPT;
4479 if (l2cap_pi(sk)->role_switch)
4480 lm2 |= HCI_LM_MASTER;
4483 read_unlock(&l2cap_sk_list.lock);
4485 return exact ? lm1 : lm2;
4488 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4490 struct l2cap_conn *conn;
4492 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4494 if (hcon->type != ACL_LINK)
4495 return -EINVAL;
4497 if (!status) {
4498 conn = l2cap_conn_add(hcon, status);
4499 if (conn)
4500 l2cap_conn_ready(conn);
4501 } else
4502 l2cap_conn_del(hcon, bt_err(status));
4504 return 0;
4507 static int l2cap_disconn_ind(struct hci_conn *hcon)
4509 struct l2cap_conn *conn = hcon->l2cap_data;
4511 BT_DBG("hcon %p", hcon);
4513 if (hcon->type != ACL_LINK || !conn)
4514 return 0x13;
4516 return conn->disc_reason;
4519 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4521 BT_DBG("hcon %p reason %d", hcon, reason);
4523 if (hcon->type != ACL_LINK)
4524 return -EINVAL;
4526 l2cap_conn_del(hcon, bt_err(reason));
4528 return 0;
4531 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4533 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4534 return;
4536 if (encrypt == 0x00) {
4537 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4538 l2cap_sock_clear_timer(sk);
4539 l2cap_sock_set_timer(sk, HZ * 5);
4540 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4541 __l2cap_sock_close(sk, ECONNREFUSED);
4542 } else {
4543 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4544 l2cap_sock_clear_timer(sk);
4548 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4550 struct l2cap_chan_list *l;
4551 struct l2cap_conn *conn = hcon->l2cap_data;
4552 struct sock *sk;
4554 if (!conn)
4555 return 0;
4557 l = &conn->chan_list;
4559 BT_DBG("conn %p", conn);
4561 read_lock(&l->lock);
4563 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4564 bh_lock_sock(sk);
4566 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4567 bh_unlock_sock(sk);
4568 continue;
4571 if (!status && (sk->sk_state == BT_CONNECTED ||
4572 sk->sk_state == BT_CONFIG)) {
4573 l2cap_check_encryption(sk, encrypt);
4574 bh_unlock_sock(sk);
4575 continue;
4578 if (sk->sk_state == BT_CONNECT) {
4579 if (!status) {
4580 struct l2cap_conn_req req;
4581 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4582 req.psm = l2cap_pi(sk)->psm;
4584 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4585 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4587 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4588 L2CAP_CONN_REQ, sizeof(req), &req);
4589 } else {
4590 l2cap_sock_clear_timer(sk);
4591 l2cap_sock_set_timer(sk, HZ / 10);
4593 } else if (sk->sk_state == BT_CONNECT2) {
4594 struct l2cap_conn_rsp rsp;
4595 __u16 result;
4597 if (!status) {
4598 sk->sk_state = BT_CONFIG;
4599 result = L2CAP_CR_SUCCESS;
4600 } else {
4601 sk->sk_state = BT_DISCONN;
4602 l2cap_sock_set_timer(sk, HZ / 10);
4603 result = L2CAP_CR_SEC_BLOCK;
4606 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4607 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4608 rsp.result = cpu_to_le16(result);
4609 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4610 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4611 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4614 bh_unlock_sock(sk);
4617 read_unlock(&l->lock);
4619 return 0;
4622 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4624 struct l2cap_conn *conn = hcon->l2cap_data;
4626 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4627 goto drop;
4629 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4631 if (flags & ACL_START) {
4632 struct l2cap_hdr *hdr;
4633 int len;
4635 if (conn->rx_len) {
4636 BT_ERR("Unexpected start frame (len %d)", skb->len);
4637 kfree_skb(conn->rx_skb);
4638 conn->rx_skb = NULL;
4639 conn->rx_len = 0;
4640 l2cap_conn_unreliable(conn, ECOMM);
4643 if (skb->len < 2) {
4644 BT_ERR("Frame is too short (len %d)", skb->len);
4645 l2cap_conn_unreliable(conn, ECOMM);
4646 goto drop;
4649 hdr = (struct l2cap_hdr *) skb->data;
4650 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4652 if (len == skb->len) {
4653 /* Complete frame received */
4654 l2cap_recv_frame(conn, skb);
4655 return 0;
4658 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4660 if (skb->len > len) {
4661 BT_ERR("Frame is too long (len %d, expected len %d)",
4662 skb->len, len);
4663 l2cap_conn_unreliable(conn, ECOMM);
4664 goto drop;
4667 /* Allocate skb for the complete frame (with header) */
4668 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4669 if (!conn->rx_skb)
4670 goto drop;
4672 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4673 skb->len);
4674 conn->rx_len = len - skb->len;
4675 } else {
4676 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4678 if (!conn->rx_len) {
4679 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4680 l2cap_conn_unreliable(conn, ECOMM);
4681 goto drop;
4684 if (skb->len > conn->rx_len) {
4685 BT_ERR("Fragment is too long (len %d, expected %d)",
4686 skb->len, conn->rx_len);
4687 kfree_skb(conn->rx_skb);
4688 conn->rx_skb = NULL;
4689 conn->rx_len = 0;
4690 l2cap_conn_unreliable(conn, ECOMM);
4691 goto drop;
4694 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4695 skb->len);
4696 conn->rx_len -= skb->len;
4698 if (!conn->rx_len) {
4699 /* Complete frame received */
4700 l2cap_recv_frame(conn, conn->rx_skb);
4701 conn->rx_skb = NULL;
4705 drop:
4706 kfree_skb(skb);
4707 return 0;
4710 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4712 struct sock *sk;
4713 struct hlist_node *node;
4715 read_lock_bh(&l2cap_sk_list.lock);
4717 sk_for_each(sk, node, &l2cap_sk_list.head) {
4718 struct l2cap_pinfo *pi = l2cap_pi(sk);
4720 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4721 batostr(&bt_sk(sk)->src),
4722 batostr(&bt_sk(sk)->dst),
4723 sk->sk_state, __le16_to_cpu(pi->psm),
4724 pi->scid, pi->dcid,
4725 pi->imtu, pi->omtu, pi->sec_level);
4728 read_unlock_bh(&l2cap_sk_list.lock);
4730 return 0;
4733 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4735 return single_open(file, l2cap_debugfs_show, inode->i_private);
4738 static const struct file_operations l2cap_debugfs_fops = {
4739 .open = l2cap_debugfs_open,
4740 .read = seq_read,
4741 .llseek = seq_lseek,
4742 .release = single_release,
4745 static struct dentry *l2cap_debugfs;
4747 static const struct proto_ops l2cap_sock_ops = {
4748 .family = PF_BLUETOOTH,
4749 .owner = THIS_MODULE,
4750 .release = l2cap_sock_release,
4751 .bind = l2cap_sock_bind,
4752 .connect = l2cap_sock_connect,
4753 .listen = l2cap_sock_listen,
4754 .accept = l2cap_sock_accept,
4755 .getname = l2cap_sock_getname,
4756 .sendmsg = l2cap_sock_sendmsg,
4757 .recvmsg = l2cap_sock_recvmsg,
4758 .poll = bt_sock_poll,
4759 .ioctl = bt_sock_ioctl,
4760 .mmap = sock_no_mmap,
4761 .socketpair = sock_no_socketpair,
4762 .shutdown = l2cap_sock_shutdown,
4763 .setsockopt = l2cap_sock_setsockopt,
4764 .getsockopt = l2cap_sock_getsockopt
4767 static const struct net_proto_family l2cap_sock_family_ops = {
4768 .family = PF_BLUETOOTH,
4769 .owner = THIS_MODULE,
4770 .create = l2cap_sock_create,
4773 static struct hci_proto l2cap_hci_proto = {
4774 .name = "L2CAP",
4775 .id = HCI_PROTO_L2CAP,
4776 .connect_ind = l2cap_connect_ind,
4777 .connect_cfm = l2cap_connect_cfm,
4778 .disconn_ind = l2cap_disconn_ind,
4779 .disconn_cfm = l2cap_disconn_cfm,
4780 .security_cfm = l2cap_security_cfm,
4781 .recv_acldata = l2cap_recv_acldata
4784 static int __init l2cap_init(void)
4786 int err;
4788 err = proto_register(&l2cap_proto, 0);
4789 if (err < 0)
4790 return err;
4792 _busy_wq = create_singlethread_workqueue("l2cap");
4793 if (!_busy_wq)
4794 goto error;
4796 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4797 if (err < 0) {
4798 BT_ERR("L2CAP socket registration failed");
4799 goto error;
4802 err = hci_register_proto(&l2cap_hci_proto);
4803 if (err < 0) {
4804 BT_ERR("L2CAP protocol registration failed");
4805 bt_sock_unregister(BTPROTO_L2CAP);
4806 goto error;
4809 if (bt_debugfs) {
4810 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4811 bt_debugfs, NULL, &l2cap_debugfs_fops);
4812 if (!l2cap_debugfs)
4813 BT_ERR("Failed to create L2CAP debug file");
4816 BT_INFO("L2CAP ver %s", VERSION);
4817 BT_INFO("L2CAP socket layer initialized");
4819 return 0;
4821 error:
4822 proto_unregister(&l2cap_proto);
4823 return err;
4826 static void __exit l2cap_exit(void)
4828 debugfs_remove(l2cap_debugfs);
4830 flush_workqueue(_busy_wq);
4831 destroy_workqueue(_busy_wq);
4833 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4834 BT_ERR("L2CAP socket unregistration failed");
4836 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4837 BT_ERR("L2CAP protocol unregistration failed");
4839 proto_unregister(&l2cap_proto);
4842 void l2cap_load(void)
4844 /* Dummy function to trigger automatic L2CAP module loading by
4845 * other modules that use L2CAP sockets but don't use any other
4846 * symbols from it. */
4848 EXPORT_SYMBOL(l2cap_load);
4850 module_init(l2cap_init);
4851 module_exit(l2cap_exit);
4853 module_param(disable_ertm, bool, 0644);
4854 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4856 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4857 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4858 MODULE_VERSION(VERSION);
4859 MODULE_LICENSE("GPL");
4860 MODULE_ALIAS("bt-proto-0");